github.com/Filosottile/go@v0.0.0-20170906193555-dbed9972d994/src/cmd/compile/internal/ssa/gen/AMD64.rules (about)

     1  // Copyright 2015 The Go Authors. All rights reserved.
     2  // Use of this source code is governed by a BSD-style
     3  // license that can be found in the LICENSE file.
     4  
     5  // Lowering arithmetic
     6  (Add64  x y) -> (ADDQ  x y)
     7  (AddPtr x y) && config.PtrSize == 8 -> (ADDQ x y)
     8  (AddPtr x y) && config.PtrSize == 4 -> (ADDL x y)
     9  (Add32  x y) -> (ADDL  x y)
    10  (Add16  x y) -> (ADDL  x y)
    11  (Add8   x y) -> (ADDL  x y)
    12  (Add32F x y) -> (ADDSS x y)
    13  (Add64F x y) -> (ADDSD x y)
    14  
    15  (Sub64  x y) -> (SUBQ  x y)
    16  (SubPtr x y) && config.PtrSize == 8 -> (SUBQ x y)
    17  (SubPtr x y) && config.PtrSize == 4 -> (SUBL x y)
    18  (Sub32  x y) -> (SUBL  x y)
    19  (Sub16  x y) -> (SUBL  x y)
    20  (Sub8   x y) -> (SUBL  x y)
    21  (Sub32F x y) -> (SUBSS x y)
    22  (Sub64F x y) -> (SUBSD x y)
    23  
    24  (Mul64  x y) -> (MULQ  x y)
    25  (Mul32  x y) -> (MULL  x y)
    26  (Mul16  x y) -> (MULL  x y)
    27  (Mul8   x y) -> (MULL  x y)
    28  (Mul32F x y) -> (MULSS x y)
    29  (Mul64F x y) -> (MULSD x y)
    30  
    31  (Div32F x y) -> (DIVSS x y)
    32  (Div64F x y) -> (DIVSD x y)
    33  
    34  (Div64  x y) -> (Select0 (DIVQ  x y))
    35  (Div64u x y) -> (Select0 (DIVQU x y))
    36  (Div32  x y) -> (Select0 (DIVL  x y))
    37  (Div32u x y) -> (Select0 (DIVLU x y))
    38  (Div16  x y) -> (Select0 (DIVW  x y))
    39  (Div16u x y) -> (Select0 (DIVWU x y))
    40  (Div8   x y) -> (Select0 (DIVW  (SignExt8to16 x) (SignExt8to16 y)))
    41  (Div8u  x y) -> (Select0 (DIVWU (ZeroExt8to16 x) (ZeroExt8to16 y)))
    42  
    43  (Hmul64  x y) -> (HMULQ  x y)
    44  (Hmul64u x y) -> (HMULQU x y)
    45  (Hmul32  x y) -> (HMULL  x y)
    46  (Hmul32u x y) -> (HMULLU x y)
    47  
    48  (Mul64uhilo x y) -> (MULQU2 x y)
    49  (Div128u xhi xlo y) -> (DIVQU2 xhi xlo y)
    50  
    51  (Avg64u x y) -> (AVGQU x y)
    52  
    53  (Mod64  x y) -> (Select1 (DIVQ  x y))
    54  (Mod64u x y) -> (Select1 (DIVQU x y))
    55  (Mod32  x y) -> (Select1 (DIVL  x y))
    56  (Mod32u x y) -> (Select1 (DIVLU x y))
    57  (Mod16  x y) -> (Select1 (DIVW  x y))
    58  (Mod16u x y) -> (Select1 (DIVWU x y))
    59  (Mod8   x y) -> (Select1 (DIVW  (SignExt8to16 x) (SignExt8to16 y)))
    60  (Mod8u  x y) -> (Select1 (DIVWU (ZeroExt8to16 x) (ZeroExt8to16 y)))
    61  
    62  (And64 x y) -> (ANDQ x y)
    63  (And32 x y) -> (ANDL x y)
    64  (And16 x y) -> (ANDL x y)
    65  (And8  x y) -> (ANDL x y)
    66  
    67  (Or64 x y) -> (ORQ x y)
    68  (Or32 x y) -> (ORL x y)
    69  (Or16 x y) -> (ORL x y)
    70  (Or8  x y) -> (ORL x y)
    71  
    72  (Xor64 x y) -> (XORQ x y)
    73  (Xor32 x y) -> (XORL x y)
    74  (Xor16 x y) -> (XORL x y)
    75  (Xor8  x y) -> (XORL x y)
    76  
    77  (Neg64  x) -> (NEGQ x)
    78  (Neg32  x) -> (NEGL x)
    79  (Neg16  x) -> (NEGL x)
    80  (Neg8   x) -> (NEGL x)
    81  (Neg32F x) -> (PXOR x (MOVSSconst <typ.Float32> [f2i(math.Copysign(0, -1))]))
    82  (Neg64F x) -> (PXOR x (MOVSDconst <typ.Float64> [f2i(math.Copysign(0, -1))]))
    83  
    84  (Com64 x) -> (NOTQ x)
    85  (Com32 x) -> (NOTL x)
    86  (Com16 x) -> (NOTL x)
    87  (Com8  x) -> (NOTL x)
    88  
    89  // Lowering boolean ops
    90  (AndB x y) -> (ANDL x y)
    91  (OrB x y) -> (ORL x y)
    92  (Not x) -> (XORLconst [1] x)
    93  
    94  // Lowering pointer arithmetic
    95  (OffPtr [off] ptr) && config.PtrSize == 8 && is32Bit(off) -> (ADDQconst [off] ptr)
    96  (OffPtr [off] ptr) && config.PtrSize == 8 -> (ADDQ (MOVQconst [off]) ptr)
    97  (OffPtr [off] ptr) && config.PtrSize == 4 -> (ADDLconst [off] ptr)
    98  
    99  // Lowering other arithmetic
   100  (Ctz64 <t> x) -> (CMOVQEQ (Select0 <t> (BSFQ x)) (MOVQconst <t> [64]) (Select1 <types.TypeFlags> (BSFQ x)))
   101  (Ctz32 x) -> (Select0 (BSFQ (ORQ <typ.UInt64> (MOVQconst [1<<32]) x)))
   102  
   103  (BitLen64 <t> x) -> (ADDQconst [1] (CMOVQEQ <t> (Select0 <t> (BSRQ x)) (MOVQconst <t> [-1]) (Select1 <types.TypeFlags> (BSRQ x))))
   104  (BitLen32 x) -> (BitLen64 (MOVLQZX <typ.UInt64> x))
   105  
   106  (Bswap64 x) -> (BSWAPQ x)
   107  (Bswap32 x) -> (BSWAPL x)
   108  
   109  (PopCount64 x) -> (POPCNTQ x)
   110  (PopCount32 x) -> (POPCNTL x)
   111  (PopCount16 x) -> (POPCNTL (MOVWQZX <typ.UInt32> x))
   112  (PopCount8 x) -> (POPCNTL (MOVBQZX <typ.UInt32> x))
   113  
   114  (Sqrt x) -> (SQRTSD x)
   115  
   116  // Lowering extension
   117  // Note: we always extend to 64 bits even though some ops don't need that many result bits.
   118  (SignExt8to16  x) -> (MOVBQSX x)
   119  (SignExt8to32  x) -> (MOVBQSX x)
   120  (SignExt8to64  x) -> (MOVBQSX x)
   121  (SignExt16to32 x) -> (MOVWQSX x)
   122  (SignExt16to64 x) -> (MOVWQSX x)
   123  (SignExt32to64 x) -> (MOVLQSX x)
   124  
   125  (ZeroExt8to16  x) -> (MOVBQZX x)
   126  (ZeroExt8to32  x) -> (MOVBQZX x)
   127  (ZeroExt8to64  x) -> (MOVBQZX x)
   128  (ZeroExt16to32 x) -> (MOVWQZX x)
   129  (ZeroExt16to64 x) -> (MOVWQZX x)
   130  (ZeroExt32to64 x) -> (MOVLQZX x)
   131  
   132  (Slicemask <t> x) -> (SARQconst (NEGQ <t> x) [63])
   133  
   134  // Lowering truncation
   135  // Because we ignore high parts of registers, truncates are just copies.
   136  (Trunc16to8  x) -> x
   137  (Trunc32to8  x) -> x
   138  (Trunc32to16 x) -> x
   139  (Trunc64to8  x) -> x
   140  (Trunc64to16 x) -> x
   141  (Trunc64to32 x) -> x
   142  
   143  // Lowering float <-> int
   144  (Cvt32to32F x) -> (CVTSL2SS x)
   145  (Cvt32to64F x) -> (CVTSL2SD x)
   146  (Cvt64to32F x) -> (CVTSQ2SS x)
   147  (Cvt64to64F x) -> (CVTSQ2SD x)
   148  
   149  (Cvt32Fto32 x) -> (CVTTSS2SL x)
   150  (Cvt32Fto64 x) -> (CVTTSS2SQ x)
   151  (Cvt64Fto32 x) -> (CVTTSD2SL x)
   152  (Cvt64Fto64 x) -> (CVTTSD2SQ x)
   153  
   154  (Cvt32Fto64F x) -> (CVTSS2SD x)
   155  (Cvt64Fto32F x) -> (CVTSD2SS x)
   156  
   157  (Round32F x) -> x
   158  (Round64F x) -> x
   159  
   160  // Lowering shifts
   161  // Unsigned shifts need to return 0 if shift amount is >= width of shifted value.
   162  //   result = (arg << shift) & (shift >= argbits ? 0 : 0xffffffffffffffff)
   163  (Lsh64x64 <t> x y) -> (ANDQ (SHLQ <t> x y) (SBBQcarrymask <t> (CMPQconst y [64])))
   164  (Lsh64x32 <t> x y) -> (ANDQ (SHLQ <t> x y) (SBBQcarrymask <t> (CMPLconst y [64])))
   165  (Lsh64x16 <t> x y) -> (ANDQ (SHLQ <t> x y) (SBBQcarrymask <t> (CMPWconst y [64])))
   166  (Lsh64x8  <t> x y) -> (ANDQ (SHLQ <t> x y) (SBBQcarrymask <t> (CMPBconst y [64])))
   167  
   168  (Lsh32x64 <t> x y) -> (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPQconst y [32])))
   169  (Lsh32x32 <t> x y) -> (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPLconst y [32])))
   170  (Lsh32x16 <t> x y) -> (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPWconst y [32])))
   171  (Lsh32x8  <t> x y) -> (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPBconst y [32])))
   172  
   173  (Lsh16x64 <t> x y) -> (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPQconst y [32])))
   174  (Lsh16x32 <t> x y) -> (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPLconst y [32])))
   175  (Lsh16x16 <t> x y) -> (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPWconst y [32])))
   176  (Lsh16x8  <t> x y) -> (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPBconst y [32])))
   177  
   178  (Lsh8x64 <t> x y)  -> (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPQconst y [32])))
   179  (Lsh8x32 <t> x y)  -> (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPLconst y [32])))
   180  (Lsh8x16 <t> x y)  -> (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPWconst y [32])))
   181  (Lsh8x8  <t> x y)  -> (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPBconst y [32])))
   182  
   183  (Rsh64Ux64 <t> x y) -> (ANDQ (SHRQ <t> x y) (SBBQcarrymask <t> (CMPQconst y [64])))
   184  (Rsh64Ux32 <t> x y) -> (ANDQ (SHRQ <t> x y) (SBBQcarrymask <t> (CMPLconst y [64])))
   185  (Rsh64Ux16 <t> x y) -> (ANDQ (SHRQ <t> x y) (SBBQcarrymask <t> (CMPWconst y [64])))
   186  (Rsh64Ux8  <t> x y) -> (ANDQ (SHRQ <t> x y) (SBBQcarrymask <t> (CMPBconst y [64])))
   187  
   188  (Rsh32Ux64 <t> x y) -> (ANDL (SHRL <t> x y) (SBBLcarrymask <t> (CMPQconst y [32])))
   189  (Rsh32Ux32 <t> x y) -> (ANDL (SHRL <t> x y) (SBBLcarrymask <t> (CMPLconst y [32])))
   190  (Rsh32Ux16 <t> x y) -> (ANDL (SHRL <t> x y) (SBBLcarrymask <t> (CMPWconst y [32])))
   191  (Rsh32Ux8  <t> x y) -> (ANDL (SHRL <t> x y) (SBBLcarrymask <t> (CMPBconst y [32])))
   192  
   193  (Rsh16Ux64 <t> x y) -> (ANDL (SHRW <t> x y) (SBBLcarrymask <t> (CMPQconst y [16])))
   194  (Rsh16Ux32 <t> x y) -> (ANDL (SHRW <t> x y) (SBBLcarrymask <t> (CMPLconst y [16])))
   195  (Rsh16Ux16 <t> x y) -> (ANDL (SHRW <t> x y) (SBBLcarrymask <t> (CMPWconst y [16])))
   196  (Rsh16Ux8  <t> x y) -> (ANDL (SHRW <t> x y) (SBBLcarrymask <t> (CMPBconst y [16])))
   197  
   198  (Rsh8Ux64 <t> x y)  -> (ANDL (SHRB <t> x y) (SBBLcarrymask <t> (CMPQconst y [8])))
   199  (Rsh8Ux32 <t> x y)  -> (ANDL (SHRB <t> x y) (SBBLcarrymask <t> (CMPLconst y [8])))
   200  (Rsh8Ux16 <t> x y)  -> (ANDL (SHRB <t> x y) (SBBLcarrymask <t> (CMPWconst y [8])))
   201  (Rsh8Ux8  <t> x y)  -> (ANDL (SHRB <t> x y) (SBBLcarrymask <t> (CMPBconst y [8])))
   202  
   203  // Signed right shift needs to return 0/-1 if shift amount is >= width of shifted value.
   204  // We implement this by setting the shift value to -1 (all ones) if the shift value is >= width.
   205  (Rsh64x64 <t> x y) -> (SARQ <t> x (ORQ <y.Type> y (NOTQ <y.Type> (SBBQcarrymask <y.Type> (CMPQconst y [64])))))
   206  (Rsh64x32 <t> x y) -> (SARQ <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPLconst y [64])))))
   207  (Rsh64x16 <t> x y) -> (SARQ <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPWconst y [64])))))
   208  (Rsh64x8  <t> x y) -> (SARQ <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPBconst y [64])))))
   209  
   210  (Rsh32x64 <t> x y) -> (SARL <t> x (ORQ <y.Type> y (NOTQ <y.Type> (SBBQcarrymask <y.Type> (CMPQconst y [32])))))
   211  (Rsh32x32 <t> x y) -> (SARL <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPLconst y [32])))))
   212  (Rsh32x16 <t> x y) -> (SARL <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPWconst y [32])))))
   213  (Rsh32x8  <t> x y) -> (SARL <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPBconst y [32])))))
   214  
   215  (Rsh16x64 <t> x y) -> (SARW <t> x (ORQ <y.Type> y (NOTQ <y.Type> (SBBQcarrymask <y.Type> (CMPQconst y [16])))))
   216  (Rsh16x32 <t> x y) -> (SARW <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPLconst y [16])))))
   217  (Rsh16x16 <t> x y) -> (SARW <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPWconst y [16])))))
   218  (Rsh16x8  <t> x y) -> (SARW <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPBconst y [16])))))
   219  
   220  (Rsh8x64 <t> x y)  -> (SARB <t> x (ORQ <y.Type> y (NOTQ <y.Type> (SBBQcarrymask <y.Type> (CMPQconst y [8])))))
   221  (Rsh8x32 <t> x y)  -> (SARB <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPLconst y [8])))))
   222  (Rsh8x16 <t> x y)  -> (SARB <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPWconst y [8])))))
   223  (Rsh8x8  <t> x y)  -> (SARB <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPBconst y [8])))))
   224  
   225  // Lowering comparisons
   226  (Less64  x y) -> (SETL (CMPQ x y))
   227  (Less32  x y) -> (SETL (CMPL x y))
   228  (Less16  x y) -> (SETL (CMPW x y))
   229  (Less8   x y) -> (SETL (CMPB x y))
   230  (Less64U x y) -> (SETB (CMPQ x y))
   231  (Less32U x y) -> (SETB (CMPL x y))
   232  (Less16U x y) -> (SETB (CMPW x y))
   233  (Less8U  x y) -> (SETB (CMPB x y))
   234  // Use SETGF with reversed operands to dodge NaN case
   235  (Less64F x y) -> (SETGF (UCOMISD y x))
   236  (Less32F x y) -> (SETGF (UCOMISS y x))
   237  
   238  (Leq64  x y) -> (SETLE (CMPQ x y))
   239  (Leq32  x y) -> (SETLE (CMPL x y))
   240  (Leq16  x y) -> (SETLE (CMPW x y))
   241  (Leq8   x y) -> (SETLE (CMPB x y))
   242  (Leq64U x y) -> (SETBE (CMPQ x y))
   243  (Leq32U x y) -> (SETBE (CMPL x y))
   244  (Leq16U x y) -> (SETBE (CMPW x y))
   245  (Leq8U  x y) -> (SETBE (CMPB x y))
   246  // Use SETGEF with reversed operands to dodge NaN case
   247  (Leq64F x y) -> (SETGEF (UCOMISD y x))
   248  (Leq32F x y) -> (SETGEF (UCOMISS y x))
   249  
   250  (Greater64  x y) -> (SETG (CMPQ x y))
   251  (Greater32  x y) -> (SETG (CMPL x y))
   252  (Greater16  x y) -> (SETG (CMPW x y))
   253  (Greater8   x y) -> (SETG (CMPB x y))
   254  (Greater64U x y) -> (SETA (CMPQ x y))
   255  (Greater32U x y) -> (SETA (CMPL x y))
   256  (Greater16U x y) -> (SETA (CMPW x y))
   257  (Greater8U  x y) -> (SETA (CMPB x y))
   258  // Note Go assembler gets UCOMISx operand order wrong, but it is right here
   259  // Bug is accommodated at generation of assembly language.
   260  (Greater64F x y) -> (SETGF (UCOMISD x y))
   261  (Greater32F x y) -> (SETGF (UCOMISS x y))
   262  
   263  (Geq64  x y) -> (SETGE (CMPQ x y))
   264  (Geq32  x y) -> (SETGE (CMPL x y))
   265  (Geq16  x y) -> (SETGE (CMPW x y))
   266  (Geq8   x y) -> (SETGE (CMPB x y))
   267  (Geq64U x y) -> (SETAE (CMPQ x y))
   268  (Geq32U x y) -> (SETAE (CMPL x y))
   269  (Geq16U x y) -> (SETAE (CMPW x y))
   270  (Geq8U  x y) -> (SETAE (CMPB x y))
   271  // Note Go assembler gets UCOMISx operand order wrong, but it is right here
   272  // Bug is accommodated at generation of assembly language.
   273  (Geq64F x y) -> (SETGEF (UCOMISD x y))
   274  (Geq32F x y) -> (SETGEF (UCOMISS x y))
   275  
   276  (Eq64  x y) -> (SETEQ (CMPQ x y))
   277  (Eq32  x y) -> (SETEQ (CMPL x y))
   278  (Eq16  x y) -> (SETEQ (CMPW x y))
   279  (Eq8   x y) -> (SETEQ (CMPB x y))
   280  (EqB   x y) -> (SETEQ (CMPB x y))
   281  (EqPtr x y) && config.PtrSize == 8 -> (SETEQ (CMPQ x y))
   282  (EqPtr x y) && config.PtrSize == 4 -> (SETEQ (CMPL x y))
   283  (Eq64F x y) -> (SETEQF (UCOMISD x y))
   284  (Eq32F x y) -> (SETEQF (UCOMISS x y))
   285  
   286  (Neq64  x y) -> (SETNE (CMPQ x y))
   287  (Neq32  x y) -> (SETNE (CMPL x y))
   288  (Neq16  x y) -> (SETNE (CMPW x y))
   289  (Neq8   x y) -> (SETNE (CMPB x y))
   290  (NeqB   x y) -> (SETNE (CMPB x y))
   291  (NeqPtr x y) && config.PtrSize == 8 -> (SETNE (CMPQ x y))
   292  (NeqPtr x y) && config.PtrSize == 4 -> (SETNE (CMPL x y))
   293  (Neq64F x y) -> (SETNEF (UCOMISD x y))
   294  (Neq32F x y) -> (SETNEF (UCOMISS x y))
   295  
   296  (Int64Hi x) -> (SHRQconst [32] x) // needed for amd64p32
   297  
   298  // Lowering loads
   299  (Load <t> ptr mem) && (is64BitInt(t) || isPtr(t) && config.PtrSize == 8) -> (MOVQload ptr mem)
   300  (Load <t> ptr mem) && (is32BitInt(t) || isPtr(t) && config.PtrSize == 4) -> (MOVLload ptr mem)
   301  (Load <t> ptr mem) && is16BitInt(t) -> (MOVWload ptr mem)
   302  (Load <t> ptr mem) && (t.IsBoolean() || is8BitInt(t)) -> (MOVBload ptr mem)
   303  (Load <t> ptr mem) && is32BitFloat(t) -> (MOVSSload ptr mem)
   304  (Load <t> ptr mem) && is64BitFloat(t) -> (MOVSDload ptr mem)
   305  
   306  // Lowering stores
   307  // These more-specific FP versions of Store pattern should come first.
   308  (Store {t} ptr val mem) && t.(*types.Type).Size() == 8 && is64BitFloat(val.Type) -> (MOVSDstore ptr val mem)
   309  (Store {t} ptr val mem) && t.(*types.Type).Size() == 4 && is32BitFloat(val.Type) -> (MOVSSstore ptr val mem)
   310  
   311  (Store {t} ptr val mem) && t.(*types.Type).Size() == 8 -> (MOVQstore ptr val mem)
   312  (Store {t} ptr val mem) && t.(*types.Type).Size() == 4 -> (MOVLstore ptr val mem)
   313  (Store {t} ptr val mem) && t.(*types.Type).Size() == 2 -> (MOVWstore ptr val mem)
   314  (Store {t} ptr val mem) && t.(*types.Type).Size() == 1 -> (MOVBstore ptr val mem)
   315  
   316  // Lowering moves
   317  (Move [0] _ _ mem) -> mem
   318  (Move [1] dst src mem) -> (MOVBstore dst (MOVBload src mem) mem)
   319  (Move [2] dst src mem) -> (MOVWstore dst (MOVWload src mem) mem)
   320  (Move [4] dst src mem) -> (MOVLstore dst (MOVLload src mem) mem)
   321  (Move [8] dst src mem) -> (MOVQstore dst (MOVQload src mem) mem)
   322  (Move [16] dst src mem) && config.useSSE -> (MOVOstore dst (MOVOload src mem) mem)
   323  (Move [16] dst src mem) && !config.useSSE ->
   324  	(MOVQstore [8] dst (MOVQload [8] src mem)
   325  		(MOVQstore dst (MOVQload src mem) mem))
   326  (Move [3] dst src mem) ->
   327  	(MOVBstore [2] dst (MOVBload [2] src mem)
   328  		(MOVWstore dst (MOVWload src mem) mem))
   329  (Move [5] dst src mem) ->
   330  	(MOVBstore [4] dst (MOVBload [4] src mem)
   331  		(MOVLstore dst (MOVLload src mem) mem))
   332  (Move [6] dst src mem) ->
   333  	(MOVWstore [4] dst (MOVWload [4] src mem)
   334  		(MOVLstore dst (MOVLload src mem) mem))
   335  (Move [7] dst src mem) ->
   336  	(MOVLstore [3] dst (MOVLload [3] src mem)
   337  		(MOVLstore dst (MOVLload src mem) mem))
   338  (Move [s] dst src mem) && s > 8 && s < 16 ->
   339  	(MOVQstore [s-8] dst (MOVQload [s-8] src mem)
   340  		(MOVQstore dst (MOVQload src mem) mem))
   341  
   342  // Adjust moves to be a multiple of 16 bytes.
   343  (Move [s] dst src mem)
   344  	&& s > 16 && s%16 != 0 && s%16 <= 8 ->
   345  	(Move [s-s%16]
   346  		(OffPtr <dst.Type> dst [s%16])
   347  		(OffPtr <src.Type> src [s%16])
   348  		(MOVQstore dst (MOVQload src mem) mem))
   349  (Move [s] dst src mem)
   350  	&& s > 16 && s%16 != 0 && s%16 > 8 && config.useSSE ->
   351  	(Move [s-s%16]
   352  		(OffPtr <dst.Type> dst [s%16])
   353  		(OffPtr <src.Type> src [s%16])
   354  		(MOVOstore dst (MOVOload src mem) mem))
   355  (Move [s] dst src mem)
   356  	&& s > 16 && s%16 != 0 && s%16 > 8 && !config.useSSE ->
   357  	(Move [s-s%16]
   358  		(OffPtr <dst.Type> dst [s%16])
   359  		(OffPtr <src.Type> src [s%16])
   360  		(MOVQstore [8] dst (MOVQload [8] src mem)
   361  			(MOVQstore dst (MOVQload src mem) mem)))
   362  
   363  // Medium copying uses a duff device.
   364  (Move [s] dst src mem)
   365  	&& s >= 32 && s <= 16*64 && s%16 == 0
   366  	&& !config.noDuffDevice ->
   367  	(DUFFCOPY [14*(64-s/16)] dst src mem)
   368  // 14 and 64 are magic constants.  14 is the number of bytes to encode:
   369  //	MOVUPS	(SI), X0
   370  //	ADDQ	$16, SI
   371  //	MOVUPS	X0, (DI)
   372  //	ADDQ	$16, DI
   373  // and 64 is the number of such blocks. See src/runtime/duff_amd64.s:duffcopy.
   374  
   375  // Large copying uses REP MOVSQ.
   376  (Move [s] dst src mem) && (s > 16*64 || config.noDuffDevice) && s%8 == 0 ->
   377  	(REPMOVSQ dst src (MOVQconst [s/8]) mem)
   378  
   379  // Lowering Zero instructions
   380  (Zero [0] _ mem) -> mem
   381  (Zero [1] destptr mem) -> (MOVBstoreconst [0] destptr mem)
   382  (Zero [2] destptr mem) -> (MOVWstoreconst [0] destptr mem)
   383  (Zero [4] destptr mem) -> (MOVLstoreconst [0] destptr mem)
   384  (Zero [8] destptr mem) -> (MOVQstoreconst [0] destptr mem)
   385  
   386  (Zero [3] destptr mem) ->
   387  	(MOVBstoreconst [makeValAndOff(0,2)] destptr
   388  		(MOVWstoreconst [0] destptr mem))
   389  (Zero [5] destptr mem) ->
   390  	(MOVBstoreconst [makeValAndOff(0,4)] destptr
   391  		(MOVLstoreconst [0] destptr mem))
   392  (Zero [6] destptr mem) ->
   393  	(MOVWstoreconst [makeValAndOff(0,4)] destptr
   394  		(MOVLstoreconst [0] destptr mem))
   395  (Zero [7] destptr mem) ->
   396  	(MOVLstoreconst [makeValAndOff(0,3)] destptr
   397  		(MOVLstoreconst [0] destptr mem))
   398  
   399  // Strip off any fractional word zeroing.
   400  (Zero [s] destptr mem) && s%8 != 0 && s > 8 && !config.useSSE ->
   401  	(Zero [s-s%8] (OffPtr <destptr.Type> destptr [s%8])
   402  		(MOVQstoreconst [0] destptr mem))
   403  
   404  // Zero small numbers of words directly.
   405  (Zero [16] destptr mem) && !config.useSSE ->
   406  	(MOVQstoreconst [makeValAndOff(0,8)] destptr
   407  		(MOVQstoreconst [0] destptr mem))
   408  (Zero [24] destptr mem) && !config.useSSE ->
   409  	(MOVQstoreconst [makeValAndOff(0,16)] destptr
   410  		(MOVQstoreconst [makeValAndOff(0,8)] destptr
   411  			(MOVQstoreconst [0] destptr mem)))
   412  (Zero [32] destptr mem) && !config.useSSE ->
   413  	(MOVQstoreconst [makeValAndOff(0,24)] destptr
   414  		(MOVQstoreconst [makeValAndOff(0,16)] destptr
   415  			(MOVQstoreconst [makeValAndOff(0,8)] destptr
   416  				(MOVQstoreconst [0] destptr mem))))
   417  
   418  (Zero [s] destptr mem) && s > 8 && s < 16 && config.useSSE ->
   419  	(MOVQstoreconst [makeValAndOff(0,s-8)] destptr
   420  		(MOVQstoreconst [0] destptr mem))
   421  
   422  // Adjust zeros to be a multiple of 16 bytes.
   423  (Zero [s] destptr mem) && s%16 != 0 && s > 16 && s%16 > 8 && config.useSSE ->
   424  	(Zero [s-s%16] (OffPtr <destptr.Type> destptr [s%16])
   425  		(MOVOstore destptr (MOVOconst [0]) mem))
   426  
   427  (Zero [s] destptr mem) && s%16 != 0 && s > 16 && s%16 <= 8 && config.useSSE ->
   428  	(Zero [s-s%16] (OffPtr <destptr.Type> destptr [s%16])
   429  		(MOVQstoreconst [0] destptr mem))
   430  
   431  (Zero [16] destptr mem) && config.useSSE ->
   432  	(MOVOstore destptr (MOVOconst [0]) mem)
   433  (Zero [32] destptr mem) && config.useSSE ->
   434  	(MOVOstore (OffPtr <destptr.Type> destptr [16]) (MOVOconst [0])
   435  		(MOVOstore destptr (MOVOconst [0]) mem))
   436  (Zero [48] destptr mem) && config.useSSE ->
   437  	(MOVOstore (OffPtr <destptr.Type> destptr [32]) (MOVOconst [0])
   438  		(MOVOstore (OffPtr <destptr.Type> destptr [16]) (MOVOconst [0])
   439  			(MOVOstore destptr (MOVOconst [0]) mem)))
   440  (Zero [64] destptr mem) && config.useSSE ->
   441  	(MOVOstore (OffPtr <destptr.Type> destptr [48]) (MOVOconst [0])
   442  		(MOVOstore (OffPtr <destptr.Type> destptr [32]) (MOVOconst [0])
   443  			(MOVOstore (OffPtr <destptr.Type> destptr [16]) (MOVOconst [0])
   444  				(MOVOstore destptr (MOVOconst [0]) mem))))
   445  
   446  // Medium zeroing uses a duff device.
   447  (Zero [s] destptr mem)
   448  	&& s > 64 && s <= 1024 && s%16 == 0 && !config.noDuffDevice ->
   449  	(DUFFZERO [s] destptr (MOVOconst [0]) mem)
   450  
   451  // Large zeroing uses REP STOSQ.
   452  (Zero [s] destptr mem)
   453  	&& (s > 1024 || (config.noDuffDevice && s > 64 || !config.useSSE && s > 32))
   454  	&& s%8 == 0 ->
   455  	(REPSTOSQ destptr (MOVQconst [s/8]) (MOVQconst [0]) mem)
   456  
   457  // Lowering constants
   458  (Const8   [val]) -> (MOVLconst [val])
   459  (Const16  [val]) -> (MOVLconst [val])
   460  (Const32  [val]) -> (MOVLconst [val])
   461  (Const64  [val]) -> (MOVQconst [val])
   462  (Const32F [val]) -> (MOVSSconst [val])
   463  (Const64F [val]) -> (MOVSDconst [val])
   464  (ConstNil) && config.PtrSize == 8 -> (MOVQconst [0])
   465  (ConstNil) && config.PtrSize == 4 -> (MOVLconst [0])
   466  (ConstBool [b]) -> (MOVLconst [b])
   467  
   468  // Lowering calls
   469  (StaticCall [argwid] {target} mem) -> (CALLstatic [argwid] {target} mem)
   470  (ClosureCall [argwid] entry closure mem) -> (CALLclosure [argwid] entry closure mem)
   471  (InterCall [argwid] entry mem) -> (CALLinter [argwid] entry mem)
   472  
   473  // Miscellaneous
   474  (Convert <t> x mem) && config.PtrSize == 8 -> (MOVQconvert <t> x mem)
   475  (Convert <t> x mem) && config.PtrSize == 4 -> (MOVLconvert <t> x mem)
   476  (IsNonNil p) && config.PtrSize == 8 -> (SETNE (TESTQ p p))
   477  (IsNonNil p) && config.PtrSize == 4 -> (SETNE (TESTL p p))
   478  (IsInBounds idx len) && config.PtrSize == 8 -> (SETB (CMPQ idx len))
   479  (IsInBounds idx len) && config.PtrSize == 4 -> (SETB (CMPL idx len))
   480  (IsSliceInBounds idx len) && config.PtrSize == 8 -> (SETBE (CMPQ idx len))
   481  (IsSliceInBounds idx len) && config.PtrSize == 4 -> (SETBE (CMPL idx len))
   482  (NilCheck ptr mem) -> (LoweredNilCheck ptr mem)
   483  (GetG mem) -> (LoweredGetG mem)
   484  (GetClosurePtr) -> (LoweredGetClosurePtr)
   485  (Addr {sym} base) && config.PtrSize == 8 -> (LEAQ {sym} base)
   486  (Addr {sym} base) && config.PtrSize == 4 -> (LEAL {sym} base)
   487  
   488  // block rewrites
   489  (If (SETL  cmp) yes no) -> (LT  cmp yes no)
   490  (If (SETLE cmp) yes no) -> (LE  cmp yes no)
   491  (If (SETG  cmp) yes no) -> (GT  cmp yes no)
   492  (If (SETGE cmp) yes no) -> (GE  cmp yes no)
   493  (If (SETEQ cmp) yes no) -> (EQ  cmp yes no)
   494  (If (SETNE cmp) yes no) -> (NE  cmp yes no)
   495  (If (SETB  cmp) yes no) -> (ULT cmp yes no)
   496  (If (SETBE cmp) yes no) -> (ULE cmp yes no)
   497  (If (SETA  cmp) yes no) -> (UGT cmp yes no)
   498  (If (SETAE cmp) yes no) -> (UGE cmp yes no)
   499  
   500  // Special case for floating point - LF/LEF not generated
   501  (If (SETGF  cmp) yes no) -> (UGT  cmp yes no)
   502  (If (SETGEF cmp) yes no) -> (UGE  cmp yes no)
   503  (If (SETEQF cmp) yes no) -> (EQF  cmp yes no)
   504  (If (SETNEF cmp) yes no) -> (NEF  cmp yes no)
   505  
   506  (If cond yes no) -> (NE (TESTB cond cond) yes no)
   507  
   508  // Atomic loads.  Other than preserving their ordering with respect to other loads, nothing special here.
   509  (AtomicLoad32 ptr mem) -> (MOVLatomicload ptr mem)
   510  (AtomicLoad64 ptr mem) -> (MOVQatomicload ptr mem)
   511  (AtomicLoadPtr ptr mem) && config.PtrSize == 8 -> (MOVQatomicload ptr mem)
   512  (AtomicLoadPtr ptr mem) && config.PtrSize == 4 -> (MOVLatomicload ptr mem)
   513  
   514  // Atomic stores.  We use XCHG to prevent the hardware reordering a subsequent load.
   515  // TODO: most runtime uses of atomic stores don't need that property.  Use normal stores for those?
   516  (AtomicStore32 ptr val mem) -> (Select1 (XCHGL <types.NewTuple(typ.UInt32,types.TypeMem)> val ptr mem))
   517  (AtomicStore64 ptr val mem) -> (Select1 (XCHGQ <types.NewTuple(typ.UInt64,types.TypeMem)> val ptr mem))
   518  (AtomicStorePtrNoWB ptr val mem) && config.PtrSize == 8 -> (Select1 (XCHGQ <types.NewTuple(typ.BytePtr,types.TypeMem)> val ptr mem))
   519  (AtomicStorePtrNoWB ptr val mem) && config.PtrSize == 4 -> (Select1 (XCHGL <types.NewTuple(typ.BytePtr,types.TypeMem)> val ptr mem))
   520  
   521  // Atomic exchanges.
   522  (AtomicExchange32 ptr val mem) -> (XCHGL val ptr mem)
   523  (AtomicExchange64 ptr val mem) -> (XCHGQ val ptr mem)
   524  
   525  // Atomic adds.
   526  (AtomicAdd32 ptr val mem) -> (AddTupleFirst32 val (XADDLlock val ptr mem))
   527  (AtomicAdd64 ptr val mem) -> (AddTupleFirst64 val (XADDQlock val ptr mem))
   528  (Select0 <t> (AddTupleFirst32 val tuple)) -> (ADDL val (Select0 <t> tuple))
   529  (Select1     (AddTupleFirst32   _ tuple)) -> (Select1 tuple)
   530  (Select0 <t> (AddTupleFirst64 val tuple)) -> (ADDQ val (Select0 <t> tuple))
   531  (Select1     (AddTupleFirst64   _ tuple)) -> (Select1 tuple)
   532  
   533  // Atomic compare and swap.
   534  (AtomicCompareAndSwap32 ptr old new_ mem) -> (CMPXCHGLlock ptr old new_ mem)
   535  (AtomicCompareAndSwap64 ptr old new_ mem) -> (CMPXCHGQlock ptr old new_ mem)
   536  
   537  // Atomic memory updates.
   538  (AtomicAnd8 ptr val mem) -> (ANDBlock ptr val mem)
   539  (AtomicOr8 ptr val mem) -> (ORBlock ptr val mem)
   540  
   541  // ***************************
   542  // Above: lowering rules
   543  // Below: optimizations
   544  // ***************************
   545  // TODO: Should the optimizations be a separate pass?
   546  
   547  // Fold boolean tests into blocks
   548  (NE (TESTB (SETL  cmp) (SETL  cmp)) yes no) -> (LT  cmp yes no)
   549  (NE (TESTB (SETLE cmp) (SETLE cmp)) yes no) -> (LE  cmp yes no)
   550  (NE (TESTB (SETG  cmp) (SETG  cmp)) yes no) -> (GT  cmp yes no)
   551  (NE (TESTB (SETGE cmp) (SETGE cmp)) yes no) -> (GE  cmp yes no)
   552  (NE (TESTB (SETEQ cmp) (SETEQ cmp)) yes no) -> (EQ  cmp yes no)
   553  (NE (TESTB (SETNE cmp) (SETNE cmp)) yes no) -> (NE  cmp yes no)
   554  (NE (TESTB (SETB  cmp) (SETB  cmp)) yes no) -> (ULT cmp yes no)
   555  (NE (TESTB (SETBE cmp) (SETBE cmp)) yes no) -> (ULE cmp yes no)
   556  (NE (TESTB (SETA  cmp) (SETA  cmp)) yes no) -> (UGT cmp yes no)
   557  (NE (TESTB (SETAE cmp) (SETAE cmp)) yes no) -> (UGE cmp yes no)
   558  
   559  // Recognize bit tests: a&(1<<b) != 0 for b suitably bounded
   560  // Note that ULT and SETB check the carry flag; they are identical to CS and SETCS.
   561  // Same, mutatis mutandis, for UGE and SETAE, and CC and SETCC.
   562  (NE (TESTL (SHLL (MOVLconst [1]) x) y)) && !config.nacl -> (ULT (BTL x y))
   563  (EQ (TESTL (SHLL (MOVLconst [1]) x) y)) && !config.nacl -> (UGE (BTL x y))
   564  (NE (TESTQ (SHLQ (MOVQconst [1]) x) y)) && !config.nacl -> (ULT (BTQ x y))
   565  (EQ (TESTQ (SHLQ (MOVQconst [1]) x) y)) && !config.nacl -> (UGE (BTQ x y))
   566  (NE (TESTLconst [c] x)) && isPowerOfTwo(c) && log2(c) < 32 && !config.nacl -> (ULT (BTLconst [log2(c)] x))
   567  (EQ (TESTLconst [c] x)) && isPowerOfTwo(c) && log2(c) < 32 && !config.nacl -> (UGE (BTLconst [log2(c)] x))
   568  (NE (TESTQconst [c] x)) && isPowerOfTwo(c) && log2(c) < 64 && !config.nacl -> (ULT (BTQconst [log2(c)] x))
   569  (EQ (TESTQconst [c] x)) && isPowerOfTwo(c) && log2(c) < 64 && !config.nacl -> (UGE (BTQconst [log2(c)] x))
   570  (NE (TESTQ (MOVQconst [c]) x)) && isPowerOfTwo(c) && log2(c) < 64 && !config.nacl -> (ULT (BTQconst [log2(c)] x))
   571  (EQ (TESTQ (MOVQconst [c]) x)) && isPowerOfTwo(c) && log2(c) < 64 && !config.nacl -> (UGE (BTQconst [log2(c)] x))
   572  (SETNE (TESTL (SHLL (MOVLconst [1]) x) y)) && !config.nacl -> (SETB  (BTL x y))
   573  (SETEQ (TESTL (SHLL (MOVLconst [1]) x) y)) && !config.nacl -> (SETAE (BTL x y))
   574  (SETNE (TESTQ (SHLQ (MOVQconst [1]) x) y)) && !config.nacl -> (SETB  (BTQ x y))
   575  (SETEQ (TESTQ (SHLQ (MOVQconst [1]) x) y)) && !config.nacl -> (SETAE (BTQ x y))
   576  (SETNE (TESTLconst [c] x)) && isPowerOfTwo(c) && log2(c) < 32 && !config.nacl -> (SETB  (BTLconst [log2(c)] x))
   577  (SETEQ (TESTLconst [c] x)) && isPowerOfTwo(c) && log2(c) < 32 && !config.nacl -> (SETAE (BTLconst [log2(c)] x))
   578  (SETNE (TESTQconst [c] x)) && isPowerOfTwo(c) && log2(c) < 64 && !config.nacl -> (SETB  (BTQconst [log2(c)] x))
   579  (SETEQ (TESTQconst [c] x)) && isPowerOfTwo(c) && log2(c) < 64 && !config.nacl -> (SETAE (BTQconst [log2(c)] x))
   580  (SETNE (TESTQ (MOVQconst [c]) x)) && isPowerOfTwo(c) && log2(c) < 64 && !config.nacl -> (SETB  (BTQconst [log2(c)] x))
   581  (SETEQ (TESTQ (MOVQconst [c]) x)) && isPowerOfTwo(c) && log2(c) < 64 && !config.nacl -> (SETAE (BTQconst [log2(c)] x))
   582  
   583  // Fold boolean negation into SETcc.
   584  (XORLconst [1] (SETNE x)) -> (SETEQ x)
   585  (XORLconst [1] (SETEQ x)) -> (SETNE x)
   586  (XORLconst [1] (SETL  x)) -> (SETGE x)
   587  (XORLconst [1] (SETGE x)) -> (SETL  x)
   588  (XORLconst [1] (SETLE x)) -> (SETG  x)
   589  (XORLconst [1] (SETG  x)) -> (SETLE x)
   590  (XORLconst [1] (SETB  x)) -> (SETAE x)
   591  (XORLconst [1] (SETAE x)) -> (SETB  x)
   592  (XORLconst [1] (SETBE x)) -> (SETA  x)
   593  (XORLconst [1] (SETA  x)) -> (SETBE x)
   594  
   595  // Convert BTQconst to BTLconst if possible. It has a shorter encoding.
   596  (BTQconst [c] x) && c < 32 -> (BTLconst [c] x)
   597  
   598  // Special case for floating point - LF/LEF not generated
   599  (NE (TESTB (SETGF  cmp) (SETGF  cmp)) yes no) -> (UGT  cmp yes no)
   600  (NE (TESTB (SETGEF cmp) (SETGEF cmp)) yes no) -> (UGE  cmp yes no)
   601  (NE (TESTB (SETEQF cmp) (SETEQF cmp)) yes no) -> (EQF  cmp yes no)
   602  (NE (TESTB (SETNEF cmp) (SETNEF cmp)) yes no) -> (NEF  cmp yes no)
   603  
   604  // Disabled because it interferes with the pattern match above and makes worse code.
   605  // (SETNEF x) -> (ORQ (SETNE <typ.Int8> x) (SETNAN <typ.Int8> x))
   606  // (SETEQF x) -> (ANDQ (SETEQ <typ.Int8> x) (SETORD <typ.Int8> x))
   607  
   608  // fold constants into instructions
   609  (ADDQ x (MOVQconst [c])) && is32Bit(c) -> (ADDQconst [c] x)
   610  (ADDL x (MOVLconst [c])) -> (ADDLconst [c] x)
   611  
   612  (SUBQ x (MOVQconst [c])) && is32Bit(c) -> (SUBQconst x [c])
   613  (SUBQ (MOVQconst [c]) x) && is32Bit(c) -> (NEGQ (SUBQconst <v.Type> x [c]))
   614  (SUBL x (MOVLconst [c])) -> (SUBLconst x [c])
   615  (SUBL (MOVLconst [c]) x) -> (NEGL (SUBLconst <v.Type> x [c]))
   616  
   617  (MULQ x (MOVQconst [c])) && is32Bit(c) -> (MULQconst [c] x)
   618  (MULL x (MOVLconst [c])) -> (MULLconst [c] x)
   619  
   620  (ANDQ x (MOVQconst [c])) && is32Bit(c) -> (ANDQconst [c] x)
   621  (ANDL x (MOVLconst [c])) -> (ANDLconst [c] x)
   622  
   623  (ANDLconst [c] (ANDLconst [d] x)) -> (ANDLconst [c & d] x)
   624  (ANDQconst [c] (ANDQconst [d] x)) -> (ANDQconst [c & d] x)
   625  
   626  (XORLconst [c] (XORLconst [d] x)) -> (XORLconst [c ^ d] x)
   627  (XORQconst [c] (XORQconst [d] x)) -> (XORQconst [c ^ d] x)
   628  
   629  (MULLconst [c] (MULLconst [d] x)) -> (MULLconst [int64(int32(c * d))] x)
   630  (MULQconst [c] (MULQconst [d] x)) && is32Bit(c*d) -> (MULQconst [c * d] x)
   631  
   632  (ORQ x (MOVQconst [c])) && is32Bit(c) -> (ORQconst [c] x)
   633  (ORL x (MOVLconst [c])) -> (ORLconst [c] x)
   634  
   635  (XORQ x (MOVQconst [c])) && is32Bit(c) -> (XORQconst [c] x)
   636  (XORL x (MOVLconst [c])) -> (XORLconst [c] x)
   637  
   638  (SHLQ x (MOVQconst [c])) -> (SHLQconst [c&63] x)
   639  (SHLQ x (MOVLconst [c])) -> (SHLQconst [c&63] x)
   640  
   641  (SHLL x (MOVQconst [c])) -> (SHLLconst [c&31] x)
   642  (SHLL x (MOVLconst [c])) -> (SHLLconst [c&31] x)
   643  
   644  (SHRQ x (MOVQconst [c])) -> (SHRQconst [c&63] x)
   645  (SHRQ x (MOVLconst [c])) -> (SHRQconst [c&63] x)
   646  
   647  (SHRL x (MOVQconst [c])) -> (SHRLconst [c&31] x)
   648  (SHRL x (MOVLconst [c])) -> (SHRLconst [c&31] x)
   649  
   650  (SHRW x (MOVQconst [c])) && c&31 < 16 -> (SHRWconst [c&31] x)
   651  (SHRW x (MOVLconst [c])) && c&31 < 16 -> (SHRWconst [c&31] x)
   652  (SHRW _ (MOVQconst [c])) && c&31 >= 16 -> (MOVLconst [0])
   653  (SHRW _ (MOVLconst [c])) && c&31 >= 16 -> (MOVLconst [0])
   654  
   655  (SHRB x (MOVQconst [c])) && c&31 < 8 -> (SHRBconst [c&31] x)
   656  (SHRB x (MOVLconst [c])) && c&31 < 8 -> (SHRBconst [c&31] x)
   657  (SHRB _ (MOVQconst [c])) && c&31 >= 8 -> (MOVLconst [0])
   658  (SHRB _ (MOVLconst [c])) && c&31 >= 8 -> (MOVLconst [0])
   659  
   660  (SARQ x (MOVQconst [c])) -> (SARQconst [c&63] x)
   661  (SARQ x (MOVLconst [c])) -> (SARQconst [c&63] x)
   662  
   663  (SARL x (MOVQconst [c])) -> (SARLconst [c&31] x)
   664  (SARL x (MOVLconst [c])) -> (SARLconst [c&31] x)
   665  
   666  (SARW x (MOVQconst [c])) -> (SARWconst [min(c&31,15)] x)
   667  (SARW x (MOVLconst [c])) -> (SARWconst [min(c&31,15)] x)
   668  
   669  (SARB x (MOVQconst [c])) -> (SARBconst [min(c&31,7)] x)
   670  (SARB x (MOVLconst [c])) -> (SARBconst [min(c&31,7)] x)
   671  
   672  // Operations which don't affect the low 6/5 bits of the shift amount are NOPs.
   673  (SHLQ x (ADDQconst [c] y)) && c & 63 == 0 -> (SHLQ x y)
   674  (SHRQ x (ADDQconst [c] y)) && c & 63 == 0 -> (SHRQ x y)
   675  (SARQ x (ADDQconst [c] y)) && c & 63 == 0 -> (SARQ x y)
   676  (SHLQ x (NEGQ <t> (ADDQconst [c] y))) && c & 63 == 0 -> (SHLQ x (NEGQ <t> y))
   677  (SHRQ x (NEGQ <t> (ADDQconst [c] y))) && c & 63 == 0 -> (SHRQ x (NEGQ <t> y))
   678  (SARQ x (NEGQ <t> (ADDQconst [c] y))) && c & 63 == 0 -> (SARQ x (NEGQ <t> y))
   679  (SHLQ x (ANDQconst [c] y)) && c & 63 == 63 -> (SHLQ x y)
   680  (SHRQ x (ANDQconst [c] y)) && c & 63 == 63 -> (SHRQ x y)
   681  (SARQ x (ANDQconst [c] y)) && c & 63 == 63 -> (SARQ x y)
   682  (SHLQ x (NEGQ <t> (ANDQconst [c] y))) && c & 63 == 63 -> (SHLQ x (NEGQ <t> y))
   683  (SHRQ x (NEGQ <t> (ANDQconst [c] y))) && c & 63 == 63 -> (SHRQ x (NEGQ <t> y))
   684  (SARQ x (NEGQ <t> (ANDQconst [c] y))) && c & 63 == 63 -> (SARQ x (NEGQ <t> y))
   685  
   686  (SHLL x (ADDQconst [c] y)) && c & 31 == 0 -> (SHLL x y)
   687  (SHRL x (ADDQconst [c] y)) && c & 31 == 0 -> (SHRL x y)
   688  (SARL x (ADDQconst [c] y)) && c & 31 == 0 -> (SARL x y)
   689  (SHLL x (NEGQ <t> (ADDQconst [c] y))) && c & 31 == 0 -> (SHLL x (NEGQ <t> y))
   690  (SHRL x (NEGQ <t> (ADDQconst [c] y))) && c & 31 == 0 -> (SHRL x (NEGQ <t> y))
   691  (SARL x (NEGQ <t> (ADDQconst [c] y))) && c & 31 == 0 -> (SARL x (NEGQ <t> y))
   692  (SHLL x (ANDQconst [c] y)) && c & 31 == 31 -> (SHLL x y)
   693  (SHRL x (ANDQconst [c] y)) && c & 31 == 31 -> (SHRL x y)
   694  (SARL x (ANDQconst [c] y)) && c & 31 == 31 -> (SARL x y)
   695  (SHLL x (NEGQ <t> (ANDQconst [c] y))) && c & 31 == 31 -> (SHLL x (NEGQ <t> y))
   696  (SHRL x (NEGQ <t> (ANDQconst [c] y))) && c & 31 == 31 -> (SHRL x (NEGQ <t> y))
   697  (SARL x (NEGQ <t> (ANDQconst [c] y))) && c & 31 == 31 -> (SARL x (NEGQ <t> y))
   698  
   699  (SHLQ x (ADDLconst [c] y)) && c & 63 == 0 -> (SHLQ x y)
   700  (SHRQ x (ADDLconst [c] y)) && c & 63 == 0 -> (SHRQ x y)
   701  (SARQ x (ADDLconst [c] y)) && c & 63 == 0 -> (SARQ x y)
   702  (SHLQ x (NEGL <t> (ADDLconst [c] y))) && c & 63 == 0 -> (SHLQ x (NEGL <t> y))
   703  (SHRQ x (NEGL <t> (ADDLconst [c] y))) && c & 63 == 0 -> (SHRQ x (NEGL <t> y))
   704  (SARQ x (NEGL <t> (ADDLconst [c] y))) && c & 63 == 0 -> (SARQ x (NEGL <t> y))
   705  (SHLQ x (ANDLconst [c] y)) && c & 63 == 63 -> (SHLQ x y)
   706  (SHRQ x (ANDLconst [c] y)) && c & 63 == 63 -> (SHRQ x y)
   707  (SARQ x (ANDLconst [c] y)) && c & 63 == 63 -> (SARQ x y)
   708  (SHLQ x (NEGL <t> (ANDLconst [c] y))) && c & 63 == 63 -> (SHLQ x (NEGL <t> y))
   709  (SHRQ x (NEGL <t> (ANDLconst [c] y))) && c & 63 == 63 -> (SHRQ x (NEGL <t> y))
   710  (SARQ x (NEGL <t> (ANDLconst [c] y))) && c & 63 == 63 -> (SARQ x (NEGL <t> y))
   711  
   712  (SHLL x (ADDLconst [c] y)) && c & 31 == 0 -> (SHLL x y)
   713  (SHRL x (ADDLconst [c] y)) && c & 31 == 0 -> (SHRL x y)
   714  (SARL x (ADDLconst [c] y)) && c & 31 == 0 -> (SARL x y)
   715  (SHLL x (NEGL <t> (ADDLconst [c] y))) && c & 31 == 0 -> (SHLL x (NEGL <t> y))
   716  (SHRL x (NEGL <t> (ADDLconst [c] y))) && c & 31 == 0 -> (SHRL x (NEGL <t> y))
   717  (SARL x (NEGL <t> (ADDLconst [c] y))) && c & 31 == 0 -> (SARL x (NEGL <t> y))
   718  (SHLL x (ANDLconst [c] y)) && c & 31 == 31 -> (SHLL x y)
   719  (SHRL x (ANDLconst [c] y)) && c & 31 == 31 -> (SHRL x y)
   720  (SARL x (ANDLconst [c] y)) && c & 31 == 31 -> (SARL x y)
   721  (SHLL x (NEGL <t> (ANDLconst [c] y))) && c & 31 == 31 -> (SHLL x (NEGL <t> y))
   722  (SHRL x (NEGL <t> (ANDLconst [c] y))) && c & 31 == 31 -> (SHRL x (NEGL <t> y))
   723  (SARL x (NEGL <t> (ANDLconst [c] y))) && c & 31 == 31 -> (SARL x (NEGL <t> y))
   724  
   725  // Constant rotate instructions
   726  (ADDQ (SHLQconst x [c]) (SHRQconst x [d])) && d==64-c -> (ROLQconst x [c])
   727  ( ORQ (SHLQconst x [c]) (SHRQconst x [d])) && d==64-c -> (ROLQconst x [c])
   728  (XORQ (SHLQconst x [c]) (SHRQconst x [d])) && d==64-c -> (ROLQconst x [c])
   729  
   730  (ADDL (SHLLconst x [c]) (SHRLconst x [d])) && d==32-c -> (ROLLconst x [c])
   731  ( ORL (SHLLconst x [c]) (SHRLconst x [d])) && d==32-c -> (ROLLconst x [c])
   732  (XORL (SHLLconst x [c]) (SHRLconst x [d])) && d==32-c -> (ROLLconst x [c])
   733  
   734  (ADDL <t> (SHLLconst x [c]) (SHRWconst x [d])) && d==16-c && c < 16 && t.Size() == 2 -> (ROLWconst x [c])
   735  ( ORL <t> (SHLLconst x [c]) (SHRWconst x [d])) && d==16-c && c < 16 && t.Size() == 2 -> (ROLWconst x [c])
   736  (XORL <t> (SHLLconst x [c]) (SHRWconst x [d])) && d==16-c && c < 16 && t.Size() == 2 -> (ROLWconst x [c])
   737  
   738  (ADDL <t> (SHLLconst x [c]) (SHRBconst x [d])) && d==8-c  && c < 8 && t.Size() == 1 -> (ROLBconst x [c])
   739  ( ORL <t> (SHLLconst x [c]) (SHRBconst x [d])) && d==8-c  && c < 8 && t.Size() == 1 -> (ROLBconst x [c])
   740  (XORL <t> (SHLLconst x [c]) (SHRBconst x [d])) && d==8-c  && c < 8 && t.Size() == 1 -> (ROLBconst x [c])
   741  
   742  (ROLQconst [c] (ROLQconst [d] x)) -> (ROLQconst [(c+d)&63] x)
   743  (ROLLconst [c] (ROLLconst [d] x)) -> (ROLLconst [(c+d)&31] x)
   744  (ROLWconst [c] (ROLWconst [d] x)) -> (ROLWconst [(c+d)&15] x)
   745  (ROLBconst [c] (ROLBconst [d] x)) -> (ROLBconst [(c+d)& 7] x)
   746  
   747  // Non-constant rotates.
   748  // We want to issue a rotate when the Go source contains code like
   749  //     y &= 63
   750  //     x << y | x >> (64-y)
   751  // The shift rules above convert << to SHLx and >> to SHRx.
   752  // SHRx converts its shift argument from 64-y to -y.
   753  // A tricky situation occurs when y==0. Then the original code would be:
   754  //     x << 0 | x >> 64
   755  // But x >> 64 is 0, not x. So there's an additional mask that is ANDed in
   756  // to force the second term to 0. We don't need that mask, but we must match
   757  // it in order to strip it out.
   758  (ORQ (SHLQ x y) (ANDQ (SHRQ x (NEGQ y)) (SBBQcarrymask (CMPQconst (NEGQ (ADDQconst (ANDQconst y [63]) [-64])) [64])))) -> (ROLQ x y)
   759  (ORQ (SHLQ x y) (ANDQ (SHRQ x (NEGL y)) (SBBQcarrymask (CMPLconst (NEGL (ADDLconst (ANDLconst y [63]) [-64])) [64])))) -> (ROLQ x y)
   760  (ORQ (SHRQ x y) (ANDQ (SHLQ x (NEGQ y)) (SBBQcarrymask (CMPQconst (NEGQ (ADDQconst (ANDQconst y [63]) [-64])) [64])))) -> (RORQ x y)
   761  (ORQ (SHRQ x y) (ANDQ (SHLQ x (NEGL y)) (SBBQcarrymask (CMPLconst (NEGL (ADDLconst (ANDLconst y [63]) [-64])) [64])))) -> (RORQ x y)
   762  
   763  (ORL (SHLL x y) (ANDL (SHRL x (NEGQ y)) (SBBLcarrymask (CMPQconst (NEGQ (ADDQconst (ANDQconst y [31]) [-32])) [32])))) -> (ROLL x y)
   764  (ORL (SHLL x y) (ANDL (SHRL x (NEGL y)) (SBBLcarrymask (CMPLconst (NEGL (ADDLconst (ANDLconst y [31]) [-32])) [32])))) -> (ROLL x y)
   765  (ORL (SHRL x y) (ANDL (SHLL x (NEGQ y)) (SBBLcarrymask (CMPQconst (NEGQ (ADDQconst (ANDQconst y [31]) [-32])) [32])))) -> (RORL x y)
   766  (ORL (SHRL x y) (ANDL (SHLL x (NEGL y)) (SBBLcarrymask (CMPLconst (NEGL (ADDLconst (ANDLconst y [31]) [-32])) [32])))) -> (RORL x y)
   767  
   768  // Help with rotate detection
   769  (CMPQconst (NEGQ (ADDQconst [-16] (ANDQconst [15] _))) [32]) -> (FlagLT_ULT)
   770  (CMPQconst (NEGQ (ADDQconst [ -8] (ANDQconst  [7] _))) [32]) -> (FlagLT_ULT)
   771  
   772  (ORL (SHLL x (ANDQconst y [15]))
   773       (ANDL (SHRW x (NEGQ (ADDQconst (ANDQconst y [15]) [-16])))
   774             (SBBLcarrymask (CMPQconst (NEGQ (ADDQconst (ANDQconst y [15]) [-16])) [16]))))
   775    && v.Type.Size() == 2
   776    -> (ROLW x y)
   777  (ORL (SHLL x (ANDLconst y [15]))
   778       (ANDL (SHRW x (NEGL (ADDLconst (ANDLconst y [15]) [-16])))
   779             (SBBLcarrymask (CMPLconst (NEGL (ADDLconst (ANDLconst y [15]) [-16])) [16]))))
   780    && v.Type.Size() == 2
   781    -> (ROLW x y)
   782  (ORL (SHRW x (ANDQconst y [15]))
   783       (SHLL x (NEGQ (ADDQconst (ANDQconst y [15]) [-16]))))
   784    && v.Type.Size() == 2
   785    -> (RORW x y)
   786  (ORL (SHRW x (ANDLconst y [15]))
   787       (SHLL x (NEGL (ADDLconst (ANDLconst y [15]) [-16]))))
   788    && v.Type.Size() == 2
   789    -> (RORW x y)
   790  
   791  (ORL (SHLL x (ANDQconst y [ 7]))
   792       (ANDL (SHRB x (NEGQ (ADDQconst (ANDQconst y [ 7]) [ -8])))
   793             (SBBLcarrymask (CMPQconst (NEGQ (ADDQconst (ANDQconst y [ 7]) [ -8])) [ 8]))))
   794    && v.Type.Size() == 1
   795    -> (ROLB x y)
   796  (ORL (SHLL x (ANDLconst y [ 7]))
   797       (ANDL (SHRB x (NEGL (ADDLconst (ANDLconst y [ 7]) [ -8])))
   798             (SBBLcarrymask (CMPLconst (NEGL (ADDLconst (ANDLconst y [ 7]) [ -8])) [ 8]))))
   799    && v.Type.Size() == 1
   800    -> (ROLB x y)
   801  (ORL (SHRB x (ANDQconst y [ 7]))
   802       (SHLL x (NEGQ (ADDQconst (ANDQconst y [ 7]) [ -8]))))
   803    && v.Type.Size() == 1
   804    -> (RORB x y)
   805  (ORL (SHRB x (ANDLconst y [ 7]))
   806       (SHLL x (NEGL (ADDLconst (ANDLconst y [ 7]) [ -8]))))
   807    && v.Type.Size() == 1
   808    -> (RORB x y)
   809  
   810  // rotate left negative = rotate right
   811  (ROLQ x (NEGQ y)) -> (RORQ x y)
   812  (ROLQ x (NEGL y)) -> (RORQ x y)
   813  (ROLL x (NEGQ y)) -> (RORL x y)
   814  (ROLL x (NEGL y)) -> (RORL x y)
   815  (ROLW x (NEGQ y)) -> (RORW x y)
   816  (ROLW x (NEGL y)) -> (RORW x y)
   817  (ROLB x (NEGQ y)) -> (RORB x y)
   818  (ROLB x (NEGL y)) -> (RORB x y)
   819  
   820  // rotate right negative = rotate left
   821  (RORQ x (NEGQ y)) -> (ROLQ x y)
   822  (RORQ x (NEGL y)) -> (ROLQ x y)
   823  (RORL x (NEGQ y)) -> (ROLL x y)
   824  (RORL x (NEGL y)) -> (ROLL x y)
   825  (RORW x (NEGQ y)) -> (ROLW x y)
   826  (RORW x (NEGL y)) -> (ROLW x y)
   827  (RORB x (NEGQ y)) -> (ROLB x y)
   828  (RORB x (NEGL y)) -> (ROLB x y)
   829  
   830  // rotate by constants
   831  (ROLQ x (MOVQconst [c])) -> (ROLQconst [c&63] x)
   832  (ROLQ x (MOVLconst [c])) -> (ROLQconst [c&63] x)
   833  (ROLL x (MOVQconst [c])) -> (ROLLconst [c&31] x)
   834  (ROLL x (MOVLconst [c])) -> (ROLLconst [c&31] x)
   835  (ROLW x (MOVQconst [c])) -> (ROLWconst [c&15] x)
   836  (ROLW x (MOVLconst [c])) -> (ROLWconst [c&15] x)
   837  (ROLB x (MOVQconst [c])) -> (ROLBconst [c&7 ] x)
   838  (ROLB x (MOVLconst [c])) -> (ROLBconst [c&7 ] x)
   839  
   840  (RORQ x (MOVQconst [c])) -> (ROLQconst [(-c)&63] x)
   841  (RORQ x (MOVLconst [c])) -> (ROLQconst [(-c)&63] x)
   842  (RORL x (MOVQconst [c])) -> (ROLLconst [(-c)&31] x)
   843  (RORL x (MOVLconst [c])) -> (ROLLconst [(-c)&31] x)
   844  (RORW x (MOVQconst [c])) -> (ROLWconst [(-c)&15] x)
   845  (RORW x (MOVLconst [c])) -> (ROLWconst [(-c)&15] x)
   846  (RORB x (MOVQconst [c])) -> (ROLBconst [(-c)&7 ] x)
   847  (RORB x (MOVLconst [c])) -> (ROLBconst [(-c)&7 ] x)
   848  
   849  // Constant shift simplifications
   850  (SHLQconst x [0]) -> x
   851  (SHRQconst x [0]) -> x
   852  (SARQconst x [0]) -> x
   853  
   854  (SHLLconst x [0]) -> x
   855  (SHRLconst x [0]) -> x
   856  (SARLconst x [0]) -> x
   857  
   858  (SHRWconst x [0]) -> x
   859  (SARWconst x [0]) -> x
   860  
   861  (SHRBconst x [0]) -> x
   862  (SARBconst x [0]) -> x
   863  
   864  (ROLQconst x [0]) -> x
   865  (ROLLconst x [0]) -> x
   866  (ROLWconst x [0]) -> x
   867  (ROLBconst x [0]) -> x
   868  
   869  // Note: the word and byte shifts keep the low 5 bits (not the low 4 or 3 bits)
   870  // because the x86 instructions are defined to use all 5 bits of the shift even
   871  // for the small shifts. I don't think we'll ever generate a weird shift (e.g.
   872  // (SHRW x (MOVLconst [24])), but just in case.
   873  
   874  (CMPQ x (MOVQconst [c])) && is32Bit(c) -> (CMPQconst x [c])
   875  (CMPQ (MOVQconst [c]) x) && is32Bit(c) -> (InvertFlags (CMPQconst x [c]))
   876  (CMPL x (MOVLconst [c])) -> (CMPLconst x [c])
   877  (CMPL (MOVLconst [c]) x) -> (InvertFlags (CMPLconst x [c]))
   878  (CMPW x (MOVLconst [c])) -> (CMPWconst x [int64(int16(c))])
   879  (CMPW (MOVLconst [c]) x) -> (InvertFlags (CMPWconst x [int64(int16(c))]))
   880  (CMPB x (MOVLconst [c])) -> (CMPBconst x [int64(int8(c))])
   881  (CMPB (MOVLconst [c]) x) -> (InvertFlags (CMPBconst x [int64(int8(c))]))
   882  
   883  // Using MOVZX instead of AND is cheaper.
   884  (ANDLconst [0xFF] x) -> (MOVBQZX x)
   885  (ANDLconst [0xFFFF] x) -> (MOVWQZX x)
   886  (ANDQconst [0xFF] x) -> (MOVBQZX x)
   887  (ANDQconst [0xFFFF] x) -> (MOVWQZX x)
   888  (ANDQconst [0xFFFFFFFF] x) -> (MOVLQZX x)
   889  
   890  // strength reduction
   891  // Assumes that the following costs from https://gmplib.org/~tege/x86-timing.pdf:
   892  //    1 - addq, shlq, leaq, negq
   893  //    3 - imulq
   894  // This limits the rewrites to two instructions.
   895  // TODO: 27, 81
   896  (MULQconst [-1] x) -> (NEGQ x)
   897  (MULQconst [0] _) -> (MOVQconst [0])
   898  (MULQconst [1] x) -> x
   899  (MULQconst [3] x) -> (LEAQ2 x x)
   900  (MULQconst [5] x) -> (LEAQ4 x x)
   901  (MULQconst [7] x) -> (LEAQ8 (NEGQ <v.Type> x) x)
   902  (MULQconst [9] x) -> (LEAQ8 x x)
   903  (MULQconst [11] x) -> (LEAQ2 x (LEAQ4 <v.Type> x x))
   904  (MULQconst [13] x) -> (LEAQ4 x (LEAQ2 <v.Type> x x))
   905  (MULQconst [21] x) -> (LEAQ4 x (LEAQ4 <v.Type> x x))
   906  (MULQconst [25] x) -> (LEAQ8 x (LEAQ2 <v.Type> x x))
   907  (MULQconst [37] x) -> (LEAQ4 x (LEAQ8 <v.Type> x x))
   908  (MULQconst [41] x) -> (LEAQ8 x (LEAQ4 <v.Type> x x))
   909  (MULQconst [73] x) -> (LEAQ8 x (LEAQ8 <v.Type> x x))
   910  
   911  (MULQconst [c] x) && isPowerOfTwo(c) -> (SHLQconst [log2(c)] x)
   912  (MULQconst [c] x) && isPowerOfTwo(c+1) && c >= 15 -> (SUBQ (SHLQconst <v.Type> [log2(c+1)] x) x)
   913  (MULQconst [c] x) && isPowerOfTwo(c-1) && c >= 17 -> (LEAQ1 (SHLQconst <v.Type> [log2(c-1)] x) x)
   914  (MULQconst [c] x) && isPowerOfTwo(c-2) && c >= 34 -> (LEAQ2 (SHLQconst <v.Type> [log2(c-2)] x) x)
   915  (MULQconst [c] x) && isPowerOfTwo(c-4) && c >= 68 -> (LEAQ4 (SHLQconst <v.Type> [log2(c-4)] x) x)
   916  (MULQconst [c] x) && isPowerOfTwo(c-8) && c >= 136 -> (LEAQ8 (SHLQconst <v.Type> [log2(c-8)] x) x)
   917  (MULQconst [c] x) && c%3 == 0 && isPowerOfTwo(c/3) -> (SHLQconst [log2(c/3)] (LEAQ2 <v.Type> x x))
   918  (MULQconst [c] x) && c%5 == 0 && isPowerOfTwo(c/5) -> (SHLQconst [log2(c/5)] (LEAQ4 <v.Type> x x))
   919  (MULQconst [c] x) && c%9 == 0 && isPowerOfTwo(c/9) -> (SHLQconst [log2(c/9)] (LEAQ8 <v.Type> x x))
   920  
   921  // combine add/shift into LEAQ
   922  (ADDQ x (SHLQconst [3] y)) -> (LEAQ8 x y)
   923  (ADDQ x (SHLQconst [2] y)) -> (LEAQ4 x y)
   924  (ADDQ x (SHLQconst [1] y)) -> (LEAQ2 x y)
   925  (ADDQ x (ADDQ y y)) -> (LEAQ2 x y)
   926  (ADDQ x (ADDQ x y)) -> (LEAQ2 y x)
   927  
   928  // combine ADDQ/ADDQconst into LEAQ1
   929  (ADDQconst [c] (ADDQ x y)) -> (LEAQ1 [c] x y)
   930  (ADDQ (ADDQconst [c] x) y) -> (LEAQ1 [c] x y)
   931  
   932  // fold ADDQ into LEAQ
   933  (ADDQconst [c] (LEAQ [d] {s} x)) && is32Bit(c+d) -> (LEAQ [c+d] {s} x)
   934  (LEAQ [c] {s} (ADDQconst [d] x)) && is32Bit(c+d) -> (LEAQ [c+d] {s} x)
   935  (LEAQ [c] {s} (ADDQ x y)) && x.Op != OpSB && y.Op != OpSB -> (LEAQ1 [c] {s} x y)
   936  (ADDQ x (LEAQ [c] {s} y)) && x.Op != OpSB && y.Op != OpSB -> (LEAQ1 [c] {s} x y)
   937  
   938  // fold ADDQconst into LEAQx
   939  (ADDQconst [c] (LEAQ1 [d] {s} x y)) && is32Bit(c+d) -> (LEAQ1 [c+d] {s} x y)
   940  (ADDQconst [c] (LEAQ2 [d] {s} x y)) && is32Bit(c+d) -> (LEAQ2 [c+d] {s} x y)
   941  (ADDQconst [c] (LEAQ4 [d] {s} x y)) && is32Bit(c+d) -> (LEAQ4 [c+d] {s} x y)
   942  (ADDQconst [c] (LEAQ8 [d] {s} x y)) && is32Bit(c+d) -> (LEAQ8 [c+d] {s} x y)
   943  (LEAQ1 [c] {s} (ADDQconst [d] x) y) && is32Bit(c+d)   && x.Op != OpSB -> (LEAQ1 [c+d] {s} x y)
   944  (LEAQ2 [c] {s} (ADDQconst [d] x) y) && is32Bit(c+d)   && x.Op != OpSB -> (LEAQ2 [c+d] {s} x y)
   945  (LEAQ2 [c] {s} x (ADDQconst [d] y)) && is32Bit(c+2*d) && y.Op != OpSB -> (LEAQ2 [c+2*d] {s} x y)
   946  (LEAQ4 [c] {s} (ADDQconst [d] x) y) && is32Bit(c+d)   && x.Op != OpSB -> (LEAQ4 [c+d] {s} x y)
   947  (LEAQ4 [c] {s} x (ADDQconst [d] y)) && is32Bit(c+4*d) && y.Op != OpSB -> (LEAQ4 [c+4*d] {s} x y)
   948  (LEAQ8 [c] {s} (ADDQconst [d] x) y) && is32Bit(c+d)   && x.Op != OpSB -> (LEAQ8 [c+d] {s} x y)
   949  (LEAQ8 [c] {s} x (ADDQconst [d] y)) && is32Bit(c+8*d) && y.Op != OpSB -> (LEAQ8 [c+8*d] {s} x y)
   950  
   951  // fold shifts into LEAQx
   952  (LEAQ1 [c] {s} x (SHLQconst [1] y)) -> (LEAQ2 [c] {s} x y)
   953  (LEAQ1 [c] {s} x (SHLQconst [2] y)) -> (LEAQ4 [c] {s} x y)
   954  (LEAQ1 [c] {s} x (SHLQconst [3] y)) -> (LEAQ8 [c] {s} x y)
   955  (LEAQ2 [c] {s} x (SHLQconst [1] y)) -> (LEAQ4 [c] {s} x y)
   956  (LEAQ2 [c] {s} x (SHLQconst [2] y)) -> (LEAQ8 [c] {s} x y)
   957  (LEAQ4 [c] {s} x (SHLQconst [1] y)) -> (LEAQ8 [c] {s} x y)
   958  
   959  // reverse ordering of compare instruction
   960  (SETL (InvertFlags x)) -> (SETG x)
   961  (SETG (InvertFlags x)) -> (SETL x)
   962  (SETB (InvertFlags x)) -> (SETA x)
   963  (SETA (InvertFlags x)) -> (SETB x)
   964  (SETLE (InvertFlags x)) -> (SETGE x)
   965  (SETGE (InvertFlags x)) -> (SETLE x)
   966  (SETBE (InvertFlags x)) -> (SETAE x)
   967  (SETAE (InvertFlags x)) -> (SETBE x)
   968  (SETEQ (InvertFlags x)) -> (SETEQ x)
   969  (SETNE (InvertFlags x)) -> (SETNE x)
   970  
   971  // sign extended loads
   972  // Note: The combined instruction must end up in the same block
   973  // as the original load. If not, we end up making a value with
   974  // memory type live in two different blocks, which can lead to
   975  // multiple memory values alive simultaneously.
   976  // Make sure we don't combine these ops if the load has another use.
   977  // This prevents a single load from being split into multiple loads
   978  // which then might return different values.  See test/atomicload.go.
   979  (MOVBQSX x:(MOVBload [off] {sym} ptr mem)) && x.Uses == 1 && clobber(x) -> @x.Block (MOVBQSXload <v.Type> [off] {sym} ptr mem)
   980  (MOVBQSX x:(MOVWload [off] {sym} ptr mem)) && x.Uses == 1 && clobber(x) -> @x.Block (MOVBQSXload <v.Type> [off] {sym} ptr mem)
   981  (MOVBQSX x:(MOVLload [off] {sym} ptr mem)) && x.Uses == 1 && clobber(x) -> @x.Block (MOVBQSXload <v.Type> [off] {sym} ptr mem)
   982  (MOVBQSX x:(MOVQload [off] {sym} ptr mem)) && x.Uses == 1 && clobber(x) -> @x.Block (MOVBQSXload <v.Type> [off] {sym} ptr mem)
   983  (MOVBQZX x:(MOVBload [off] {sym} ptr mem)) && x.Uses == 1 && clobber(x) -> @x.Block (MOVBload <v.Type> [off] {sym} ptr mem)
   984  (MOVBQZX x:(MOVWload [off] {sym} ptr mem)) && x.Uses == 1 && clobber(x) -> @x.Block (MOVBload <v.Type> [off] {sym} ptr mem)
   985  (MOVBQZX x:(MOVLload [off] {sym} ptr mem)) && x.Uses == 1 && clobber(x) -> @x.Block (MOVBload <v.Type> [off] {sym} ptr mem)
   986  (MOVBQZX x:(MOVQload [off] {sym} ptr mem)) && x.Uses == 1 && clobber(x) -> @x.Block (MOVBload <v.Type> [off] {sym} ptr mem)
   987  (MOVWQSX x:(MOVWload [off] {sym} ptr mem)) && x.Uses == 1 && clobber(x) -> @x.Block (MOVWQSXload <v.Type> [off] {sym} ptr mem)
   988  (MOVWQSX x:(MOVLload [off] {sym} ptr mem)) && x.Uses == 1 && clobber(x) -> @x.Block (MOVWQSXload <v.Type> [off] {sym} ptr mem)
   989  (MOVWQSX x:(MOVQload [off] {sym} ptr mem)) && x.Uses == 1 && clobber(x) -> @x.Block (MOVWQSXload <v.Type> [off] {sym} ptr mem)
   990  (MOVWQZX x:(MOVWload [off] {sym} ptr mem)) && x.Uses == 1 && clobber(x) -> @x.Block (MOVWload <v.Type> [off] {sym} ptr mem)
   991  (MOVWQZX x:(MOVLload [off] {sym} ptr mem)) && x.Uses == 1 && clobber(x) -> @x.Block (MOVWload <v.Type> [off] {sym} ptr mem)
   992  (MOVWQZX x:(MOVQload [off] {sym} ptr mem)) && x.Uses == 1 && clobber(x) -> @x.Block (MOVWload <v.Type> [off] {sym} ptr mem)
   993  (MOVLQSX x:(MOVLload [off] {sym} ptr mem)) && x.Uses == 1 && clobber(x) -> @x.Block (MOVLQSXload <v.Type> [off] {sym} ptr mem)
   994  (MOVLQSX x:(MOVQload [off] {sym} ptr mem)) && x.Uses == 1 && clobber(x) -> @x.Block (MOVLQSXload <v.Type> [off] {sym} ptr mem)
   995  (MOVLQZX x:(MOVLload [off] {sym} ptr mem)) && x.Uses == 1 && clobber(x) -> @x.Block (MOVLload <v.Type> [off] {sym} ptr mem)
   996  (MOVLQZX x:(MOVQload [off] {sym} ptr mem)) && x.Uses == 1 && clobber(x) -> @x.Block (MOVLload <v.Type> [off] {sym} ptr mem)
   997  
   998  (MOVLQZX x) && zeroUpper32Bits(x,3) -> x
   999  
  1000  (MOVBQZX x:(MOVBloadidx1 [off] {sym} ptr idx mem)) && x.Uses == 1 && clobber(x) -> @x.Block (MOVBloadidx1 <v.Type> [off] {sym} ptr idx mem)
  1001  (MOVWQZX x:(MOVWloadidx1 [off] {sym} ptr idx mem)) && x.Uses == 1 && clobber(x) -> @x.Block (MOVWloadidx1 <v.Type> [off] {sym} ptr idx mem)
  1002  (MOVWQZX x:(MOVWloadidx2 [off] {sym} ptr idx mem)) && x.Uses == 1 && clobber(x) -> @x.Block (MOVWloadidx2 <v.Type> [off] {sym} ptr idx mem)
  1003  (MOVLQZX x:(MOVLloadidx1 [off] {sym} ptr idx mem)) && x.Uses == 1 && clobber(x) -> @x.Block (MOVLloadidx1 <v.Type> [off] {sym} ptr idx mem)
  1004  (MOVLQZX x:(MOVLloadidx4 [off] {sym} ptr idx mem)) && x.Uses == 1 && clobber(x) -> @x.Block (MOVLloadidx4 <v.Type> [off] {sym} ptr idx mem)
  1005  
  1006  // replace load from same location as preceding store with zero/sign extension (or copy in case of full width)
  1007  (MOVBload [off] {sym} ptr (MOVBstore [off2] {sym2} ptr2 x _)) && sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) -> (MOVBQZX x)
  1008  (MOVWload [off] {sym} ptr (MOVWstore [off2] {sym2} ptr2 x _)) && sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) -> (MOVWQZX x)
  1009  (MOVLload [off] {sym} ptr (MOVLstore [off2] {sym2} ptr2 x _)) && sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) -> (MOVLQZX x)
  1010  (MOVQload [off] {sym} ptr (MOVQstore [off2] {sym2} ptr2 x _)) && sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) -> x
  1011  (MOVBQSXload [off] {sym} ptr (MOVBstore [off2] {sym2} ptr2 x _)) && sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) -> (MOVBQSX x)
  1012  (MOVWQSXload [off] {sym} ptr (MOVWstore [off2] {sym2} ptr2 x _)) && sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) -> (MOVWQSX x)
  1013  (MOVLQSXload [off] {sym} ptr (MOVLstore [off2] {sym2} ptr2 x _)) && sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) -> (MOVLQSX x)
  1014  
  1015  // Fold extensions and ANDs together.
  1016  (MOVBQZX (ANDLconst [c] x)) -> (ANDLconst [c & 0xff] x)
  1017  (MOVWQZX (ANDLconst [c] x)) -> (ANDLconst [c & 0xffff] x)
  1018  (MOVLQZX (ANDLconst [c] x)) -> (ANDLconst [c] x)
  1019  (MOVBQSX (ANDLconst [c] x)) && c & 0x80 == 0 -> (ANDLconst [c & 0x7f] x)
  1020  (MOVWQSX (ANDLconst [c] x)) && c & 0x8000 == 0 -> (ANDLconst [c & 0x7fff] x)
  1021  (MOVLQSX (ANDLconst [c] x)) && c & 0x80000000 == 0 -> (ANDLconst [c & 0x7fffffff] x)
  1022  
  1023  // Don't extend before storing
  1024  (MOVLstore [off] {sym} ptr (MOVLQSX x) mem) -> (MOVLstore [off] {sym} ptr x mem)
  1025  (MOVWstore [off] {sym} ptr (MOVWQSX x) mem) -> (MOVWstore [off] {sym} ptr x mem)
  1026  (MOVBstore [off] {sym} ptr (MOVBQSX x) mem) -> (MOVBstore [off] {sym} ptr x mem)
  1027  (MOVLstore [off] {sym} ptr (MOVLQZX x) mem) -> (MOVLstore [off] {sym} ptr x mem)
  1028  (MOVWstore [off] {sym} ptr (MOVWQZX x) mem) -> (MOVWstore [off] {sym} ptr x mem)
  1029  (MOVBstore [off] {sym} ptr (MOVBQZX x) mem) -> (MOVBstore [off] {sym} ptr x mem)
  1030  
  1031  // fold constants into memory operations
  1032  // Note that this is not always a good idea because if not all the uses of
  1033  // the ADDQconst get eliminated, we still have to compute the ADDQconst and we now
  1034  // have potentially two live values (ptr and (ADDQconst [off] ptr)) instead of one.
  1035  // Nevertheless, let's do it!
  1036  (MOVQload  [off1] {sym} (ADDQconst [off2] ptr) mem) && is32Bit(off1+off2) -> (MOVQload  [off1+off2] {sym} ptr mem)
  1037  (MOVLload  [off1] {sym} (ADDQconst [off2] ptr) mem) && is32Bit(off1+off2) -> (MOVLload  [off1+off2] {sym} ptr mem)
  1038  (MOVWload  [off1] {sym} (ADDQconst [off2] ptr) mem) && is32Bit(off1+off2) -> (MOVWload  [off1+off2] {sym} ptr mem)
  1039  (MOVBload  [off1] {sym} (ADDQconst [off2] ptr) mem) && is32Bit(off1+off2) -> (MOVBload  [off1+off2] {sym} ptr mem)
  1040  (MOVSSload [off1] {sym} (ADDQconst [off2] ptr) mem) && is32Bit(off1+off2) -> (MOVSSload [off1+off2] {sym} ptr mem)
  1041  (MOVSDload [off1] {sym} (ADDQconst [off2] ptr) mem) && is32Bit(off1+off2) -> (MOVSDload [off1+off2] {sym} ptr mem)
  1042  (MOVOload  [off1] {sym} (ADDQconst [off2] ptr) mem) && is32Bit(off1+off2) -> (MOVOload  [off1+off2] {sym} ptr mem)
  1043  
  1044  (MOVQstore  [off1] {sym} (ADDQconst [off2] ptr) val mem) && is32Bit(off1+off2) -> (MOVQstore  [off1+off2] {sym} ptr val mem)
  1045  (MOVLstore  [off1] {sym} (ADDQconst [off2] ptr) val mem) && is32Bit(off1+off2) -> (MOVLstore  [off1+off2] {sym} ptr val mem)
  1046  (MOVWstore  [off1] {sym} (ADDQconst [off2] ptr) val mem) && is32Bit(off1+off2) -> (MOVWstore  [off1+off2] {sym} ptr val mem)
  1047  (MOVBstore  [off1] {sym} (ADDQconst [off2] ptr) val mem) && is32Bit(off1+off2) -> (MOVBstore  [off1+off2] {sym} ptr val mem)
  1048  (MOVSSstore [off1] {sym} (ADDQconst [off2] ptr) val mem) && is32Bit(off1+off2) -> (MOVSSstore [off1+off2] {sym} ptr val mem)
  1049  (MOVSDstore [off1] {sym} (ADDQconst [off2] ptr) val mem) && is32Bit(off1+off2) -> (MOVSDstore [off1+off2] {sym} ptr val mem)
  1050  (MOVOstore  [off1] {sym} (ADDQconst [off2] ptr) val mem) && is32Bit(off1+off2) -> (MOVOstore  [off1+off2] {sym} ptr val mem)
  1051  
  1052  // Fold constants into stores.
  1053  (MOVQstore [off] {sym} ptr (MOVQconst [c]) mem) && validValAndOff(c,off) ->
  1054  	(MOVQstoreconst [makeValAndOff(c,off)] {sym} ptr mem)
  1055  (MOVLstore [off] {sym} ptr (MOVLconst [c]) mem) && validOff(off) ->
  1056  	(MOVLstoreconst [makeValAndOff(int64(int32(c)),off)] {sym} ptr mem)
  1057  (MOVWstore [off] {sym} ptr (MOVLconst [c]) mem) && validOff(off) ->
  1058  	(MOVWstoreconst [makeValAndOff(int64(int16(c)),off)] {sym} ptr mem)
  1059  (MOVBstore [off] {sym} ptr (MOVLconst [c]) mem) && validOff(off) ->
  1060  	(MOVBstoreconst [makeValAndOff(int64(int8(c)),off)] {sym} ptr mem)
  1061  
  1062  // Fold address offsets into constant stores.
  1063  (MOVQstoreconst [sc] {s} (ADDQconst [off] ptr) mem) && ValAndOff(sc).canAdd(off) ->
  1064  	(MOVQstoreconst [ValAndOff(sc).add(off)] {s} ptr mem)
  1065  (MOVLstoreconst [sc] {s} (ADDQconst [off] ptr) mem) && ValAndOff(sc).canAdd(off) ->
  1066  	(MOVLstoreconst [ValAndOff(sc).add(off)] {s} ptr mem)
  1067  (MOVWstoreconst [sc] {s} (ADDQconst [off] ptr) mem) && ValAndOff(sc).canAdd(off) ->
  1068  	(MOVWstoreconst [ValAndOff(sc).add(off)] {s} ptr mem)
  1069  (MOVBstoreconst [sc] {s} (ADDQconst [off] ptr) mem) && ValAndOff(sc).canAdd(off) ->
  1070  	(MOVBstoreconst [ValAndOff(sc).add(off)] {s} ptr mem)
  1071  
  1072  // We need to fold LEAQ into the MOVx ops so that the live variable analysis knows
  1073  // what variables are being read/written by the ops.
  1074  (MOVQload  [off1] {sym1} (LEAQ [off2] {sym2} base) mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) ->
  1075  	(MOVQload  [off1+off2] {mergeSym(sym1,sym2)} base mem)
  1076  (MOVLload  [off1] {sym1} (LEAQ [off2] {sym2} base) mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) ->
  1077  	(MOVLload  [off1+off2] {mergeSym(sym1,sym2)} base mem)
  1078  (MOVWload  [off1] {sym1} (LEAQ [off2] {sym2} base) mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) ->
  1079  	(MOVWload  [off1+off2] {mergeSym(sym1,sym2)} base mem)
  1080  (MOVBload  [off1] {sym1} (LEAQ [off2] {sym2} base) mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) ->
  1081  	(MOVBload  [off1+off2] {mergeSym(sym1,sym2)} base mem)
  1082  (MOVSSload [off1] {sym1} (LEAQ [off2] {sym2} base) mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) ->
  1083  	(MOVSSload [off1+off2] {mergeSym(sym1,sym2)} base mem)
  1084  (MOVSDload [off1] {sym1} (LEAQ [off2] {sym2} base) mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) ->
  1085  	(MOVSDload [off1+off2] {mergeSym(sym1,sym2)} base mem)
  1086  (MOVOload [off1] {sym1} (LEAQ [off2] {sym2} base) mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) ->
  1087  	(MOVOload [off1+off2] {mergeSym(sym1,sym2)} base mem)
  1088  
  1089  (MOVBQSXload [off1] {sym1} (LEAQ [off2] {sym2} base) mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) ->
  1090  	(MOVBQSXload [off1+off2] {mergeSym(sym1,sym2)} base mem)
  1091  (MOVWQSXload [off1] {sym1} (LEAQ [off2] {sym2} base) mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) ->
  1092  	(MOVWQSXload [off1+off2] {mergeSym(sym1,sym2)} base mem)
  1093  (MOVLQSXload [off1] {sym1} (LEAQ [off2] {sym2} base) mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) ->
  1094  	(MOVLQSXload [off1+off2] {mergeSym(sym1,sym2)} base mem)
  1095  
  1096  (MOVQstore  [off1] {sym1} (LEAQ [off2] {sym2} base) val mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) ->
  1097  	(MOVQstore  [off1+off2] {mergeSym(sym1,sym2)} base val mem)
  1098  (MOVLstore  [off1] {sym1} (LEAQ [off2] {sym2} base) val mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) ->
  1099  	(MOVLstore  [off1+off2] {mergeSym(sym1,sym2)} base val mem)
  1100  (MOVWstore  [off1] {sym1} (LEAQ [off2] {sym2} base) val mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) ->
  1101  	(MOVWstore  [off1+off2] {mergeSym(sym1,sym2)} base val mem)
  1102  (MOVBstore  [off1] {sym1} (LEAQ [off2] {sym2} base) val mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) ->
  1103  	(MOVBstore  [off1+off2] {mergeSym(sym1,sym2)} base val mem)
  1104  (MOVSSstore [off1] {sym1} (LEAQ [off2] {sym2} base) val mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) ->
  1105  	(MOVSSstore [off1+off2] {mergeSym(sym1,sym2)} base val mem)
  1106  (MOVSDstore [off1] {sym1} (LEAQ [off2] {sym2} base) val mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) ->
  1107  	(MOVSDstore [off1+off2] {mergeSym(sym1,sym2)} base val mem)
  1108  (MOVOstore [off1] {sym1} (LEAQ [off2] {sym2} base) val mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) ->
  1109  	(MOVOstore [off1+off2] {mergeSym(sym1,sym2)} base val mem)
  1110  
  1111  (MOVQstoreconst [sc] {sym1} (LEAQ [off] {sym2} ptr) mem) && canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off) ->
  1112  	(MOVQstoreconst [ValAndOff(sc).add(off)] {mergeSym(sym1, sym2)} ptr mem)
  1113  (MOVLstoreconst [sc] {sym1} (LEAQ [off] {sym2} ptr) mem) && canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off) ->
  1114  	(MOVLstoreconst [ValAndOff(sc).add(off)] {mergeSym(sym1, sym2)} ptr mem)
  1115  (MOVWstoreconst [sc] {sym1} (LEAQ [off] {sym2} ptr) mem) && canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off) ->
  1116  	(MOVWstoreconst [ValAndOff(sc).add(off)] {mergeSym(sym1, sym2)} ptr mem)
  1117  (MOVBstoreconst [sc] {sym1} (LEAQ [off] {sym2} ptr) mem) && canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off) ->
  1118  	(MOVBstoreconst [ValAndOff(sc).add(off)] {mergeSym(sym1, sym2)} ptr mem)
  1119  
  1120  // generating indexed loads and stores
  1121  (MOVBload [off1] {sym1} (LEAQ1 [off2] {sym2} ptr idx) mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) ->
  1122  	(MOVBloadidx1 [off1+off2] {mergeSym(sym1,sym2)} ptr idx mem)
  1123  (MOVWload [off1] {sym1} (LEAQ1 [off2] {sym2} ptr idx) mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) ->
  1124  	(MOVWloadidx1 [off1+off2] {mergeSym(sym1,sym2)} ptr idx mem)
  1125  (MOVWload [off1] {sym1} (LEAQ2 [off2] {sym2} ptr idx) mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) ->
  1126  	(MOVWloadidx2 [off1+off2] {mergeSym(sym1,sym2)} ptr idx mem)
  1127  (MOVLload [off1] {sym1} (LEAQ1 [off2] {sym2} ptr idx) mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) ->
  1128  	(MOVLloadidx1 [off1+off2] {mergeSym(sym1,sym2)} ptr idx mem)
  1129  (MOVLload [off1] {sym1} (LEAQ4 [off2] {sym2} ptr idx) mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) ->
  1130  	(MOVLloadidx4 [off1+off2] {mergeSym(sym1,sym2)} ptr idx mem)
  1131  (MOVLload [off1] {sym1} (LEAQ8 [off2] {sym2} ptr idx) mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) ->
  1132  	(MOVLloadidx8 [off1+off2] {mergeSym(sym1,sym2)} ptr idx mem)
  1133  (MOVQload [off1] {sym1} (LEAQ1 [off2] {sym2} ptr idx) mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) ->
  1134  	(MOVQloadidx1 [off1+off2] {mergeSym(sym1,sym2)} ptr idx mem)
  1135  (MOVQload [off1] {sym1} (LEAQ8 [off2] {sym2} ptr idx) mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) ->
  1136  	(MOVQloadidx8 [off1+off2] {mergeSym(sym1,sym2)} ptr idx mem)
  1137  (MOVSSload [off1] {sym1} (LEAQ1 [off2] {sym2} ptr idx) mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) ->
  1138  	(MOVSSloadidx1 [off1+off2] {mergeSym(sym1,sym2)} ptr idx mem)
  1139  (MOVSSload [off1] {sym1} (LEAQ4 [off2] {sym2} ptr idx) mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) ->
  1140  	(MOVSSloadidx4 [off1+off2] {mergeSym(sym1,sym2)} ptr idx mem)
  1141  (MOVSDload [off1] {sym1} (LEAQ1 [off2] {sym2} ptr idx) mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) ->
  1142  	(MOVSDloadidx1 [off1+off2] {mergeSym(sym1,sym2)} ptr idx mem)
  1143  (MOVSDload [off1] {sym1} (LEAQ8 [off2] {sym2} ptr idx) mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) ->
  1144  	(MOVSDloadidx8 [off1+off2] {mergeSym(sym1,sym2)} ptr idx mem)
  1145  
  1146  (MOVBstore [off1] {sym1} (LEAQ1 [off2] {sym2} ptr idx) val mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) ->
  1147  	(MOVBstoreidx1 [off1+off2] {mergeSym(sym1,sym2)} ptr idx val mem)
  1148  (MOVWstore [off1] {sym1} (LEAQ1 [off2] {sym2} ptr idx) val mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) ->
  1149  	(MOVWstoreidx1 [off1+off2] {mergeSym(sym1,sym2)} ptr idx val mem)
  1150  (MOVWstore [off1] {sym1} (LEAQ2 [off2] {sym2} ptr idx) val mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) ->
  1151  	(MOVWstoreidx2 [off1+off2] {mergeSym(sym1,sym2)} ptr idx val mem)
  1152  (MOVLstore [off1] {sym1} (LEAQ1 [off2] {sym2} ptr idx) val mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) ->
  1153  	(MOVLstoreidx1 [off1+off2] {mergeSym(sym1,sym2)} ptr idx val mem)
  1154  (MOVLstore [off1] {sym1} (LEAQ4 [off2] {sym2} ptr idx) val mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) ->
  1155  	(MOVLstoreidx4 [off1+off2] {mergeSym(sym1,sym2)} ptr idx val mem)
  1156  (MOVLstore [off1] {sym1} (LEAQ8 [off2] {sym2} ptr idx) val mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) ->
  1157  	(MOVLstoreidx8 [off1+off2] {mergeSym(sym1,sym2)} ptr idx val mem)
  1158  (MOVQstore [off1] {sym1} (LEAQ1 [off2] {sym2} ptr idx) val mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) ->
  1159  	(MOVQstoreidx1 [off1+off2] {mergeSym(sym1,sym2)} ptr idx val mem)
  1160  (MOVQstore [off1] {sym1} (LEAQ8 [off2] {sym2} ptr idx) val mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) ->
  1161  	(MOVQstoreidx8 [off1+off2] {mergeSym(sym1,sym2)} ptr idx val mem)
  1162  (MOVSSstore [off1] {sym1} (LEAQ1 [off2] {sym2} ptr idx) val mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) ->
  1163  	(MOVSSstoreidx1 [off1+off2] {mergeSym(sym1,sym2)} ptr idx val mem)
  1164  (MOVSSstore [off1] {sym1} (LEAQ4 [off2] {sym2} ptr idx) val mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) ->
  1165  	(MOVSSstoreidx4 [off1+off2] {mergeSym(sym1,sym2)} ptr idx val mem)
  1166  (MOVSDstore [off1] {sym1} (LEAQ1 [off2] {sym2} ptr idx) val mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) ->
  1167  	(MOVSDstoreidx1 [off1+off2] {mergeSym(sym1,sym2)} ptr idx val mem)
  1168  (MOVSDstore [off1] {sym1} (LEAQ8 [off2] {sym2} ptr idx) val mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) ->
  1169  	(MOVSDstoreidx8 [off1+off2] {mergeSym(sym1,sym2)} ptr idx val mem)
  1170  
  1171  (MOVBload [off] {sym} (ADDQ ptr idx) mem) && ptr.Op != OpSB -> (MOVBloadidx1 [off] {sym} ptr idx mem)
  1172  (MOVWload [off] {sym} (ADDQ ptr idx) mem) && ptr.Op != OpSB -> (MOVWloadidx1 [off] {sym} ptr idx mem)
  1173  (MOVLload [off] {sym} (ADDQ ptr idx) mem) && ptr.Op != OpSB -> (MOVLloadidx1 [off] {sym} ptr idx mem)
  1174  (MOVQload [off] {sym} (ADDQ ptr idx) mem) && ptr.Op != OpSB -> (MOVQloadidx1 [off] {sym} ptr idx mem)
  1175  (MOVSSload [off] {sym} (ADDQ ptr idx) mem) && ptr.Op != OpSB -> (MOVSSloadidx1 [off] {sym} ptr idx mem)
  1176  (MOVSDload [off] {sym} (ADDQ ptr idx) mem) && ptr.Op != OpSB -> (MOVSDloadidx1 [off] {sym} ptr idx mem)
  1177  (MOVBstore [off] {sym} (ADDQ ptr idx) val mem) && ptr.Op != OpSB -> (MOVBstoreidx1 [off] {sym} ptr idx val mem)
  1178  (MOVWstore [off] {sym} (ADDQ ptr idx) val mem) && ptr.Op != OpSB -> (MOVWstoreidx1 [off] {sym} ptr idx val mem)
  1179  (MOVLstore [off] {sym} (ADDQ ptr idx) val mem) && ptr.Op != OpSB -> (MOVLstoreidx1 [off] {sym} ptr idx val mem)
  1180  (MOVQstore [off] {sym} (ADDQ ptr idx) val mem) && ptr.Op != OpSB -> (MOVQstoreidx1 [off] {sym} ptr idx val mem)
  1181  (MOVSSstore [off] {sym} (ADDQ ptr idx) val mem) && ptr.Op != OpSB -> (MOVSSstoreidx1 [off] {sym} ptr idx val mem)
  1182  (MOVSDstore [off] {sym} (ADDQ ptr idx) val mem) && ptr.Op != OpSB -> (MOVSDstoreidx1 [off] {sym} ptr idx val mem)
  1183  
  1184  (MOVBstoreconst [x] {sym1} (LEAQ1 [off] {sym2} ptr idx) mem) && canMergeSym(sym1, sym2) ->
  1185  	(MOVBstoreconstidx1 [ValAndOff(x).add(off)] {mergeSym(sym1,sym2)} ptr idx mem)
  1186  (MOVWstoreconst [x] {sym1} (LEAQ1 [off] {sym2} ptr idx) mem) && canMergeSym(sym1, sym2) ->
  1187  	(MOVWstoreconstidx1 [ValAndOff(x).add(off)] {mergeSym(sym1,sym2)} ptr idx mem)
  1188  (MOVWstoreconst [x] {sym1} (LEAQ2 [off] {sym2} ptr idx) mem) && canMergeSym(sym1, sym2) ->
  1189  	(MOVWstoreconstidx2 [ValAndOff(x).add(off)] {mergeSym(sym1,sym2)} ptr idx mem)
  1190  (MOVLstoreconst [x] {sym1} (LEAQ1 [off] {sym2} ptr idx) mem) && canMergeSym(sym1, sym2) ->
  1191  	(MOVLstoreconstidx1 [ValAndOff(x).add(off)] {mergeSym(sym1,sym2)} ptr idx mem)
  1192  (MOVLstoreconst [x] {sym1} (LEAQ4 [off] {sym2} ptr idx) mem) && canMergeSym(sym1, sym2) ->
  1193  	(MOVLstoreconstidx4 [ValAndOff(x).add(off)] {mergeSym(sym1,sym2)} ptr idx mem)
  1194  (MOVQstoreconst [x] {sym1} (LEAQ1 [off] {sym2} ptr idx) mem) && canMergeSym(sym1, sym2) ->
  1195  	(MOVQstoreconstidx1 [ValAndOff(x).add(off)] {mergeSym(sym1,sym2)} ptr idx mem)
  1196  (MOVQstoreconst [x] {sym1} (LEAQ8 [off] {sym2} ptr idx) mem) && canMergeSym(sym1, sym2) ->
  1197  	(MOVQstoreconstidx8 [ValAndOff(x).add(off)] {mergeSym(sym1,sym2)} ptr idx mem)
  1198  
  1199  (MOVBstoreconst [x] {sym} (ADDQ ptr idx) mem) -> (MOVBstoreconstidx1 [x] {sym} ptr idx mem)
  1200  (MOVWstoreconst [x] {sym} (ADDQ ptr idx) mem) -> (MOVWstoreconstidx1 [x] {sym} ptr idx mem)
  1201  (MOVLstoreconst [x] {sym} (ADDQ ptr idx) mem) -> (MOVLstoreconstidx1 [x] {sym} ptr idx mem)
  1202  (MOVQstoreconst [x] {sym} (ADDQ ptr idx) mem) -> (MOVQstoreconstidx1 [x] {sym} ptr idx mem)
  1203  
  1204  // combine SHLQ into indexed loads and stores
  1205  (MOVWloadidx1 [c] {sym} ptr (SHLQconst [1] idx) mem) -> (MOVWloadidx2 [c] {sym} ptr idx mem)
  1206  (MOVLloadidx1 [c] {sym} ptr (SHLQconst [2] idx) mem) -> (MOVLloadidx4 [c] {sym} ptr idx mem)
  1207  (MOVLloadidx1 [c] {sym} ptr (SHLQconst [3] idx) mem) -> (MOVLloadidx8 [c] {sym} ptr idx mem)
  1208  (MOVQloadidx1 [c] {sym} ptr (SHLQconst [3] idx) mem) -> (MOVQloadidx8 [c] {sym} ptr idx mem)
  1209  (MOVSSloadidx1 [c] {sym} ptr (SHLQconst [2] idx) mem) -> (MOVSSloadidx4 [c] {sym} ptr idx mem)
  1210  (MOVSDloadidx1 [c] {sym} ptr (SHLQconst [3] idx) mem) -> (MOVSDloadidx8 [c] {sym} ptr idx mem)
  1211  (MOVWstoreidx1 [c] {sym} ptr (SHLQconst [1] idx) val mem) -> (MOVWstoreidx2 [c] {sym} ptr idx val mem)
  1212  (MOVLstoreidx1 [c] {sym} ptr (SHLQconst [2] idx) val mem) -> (MOVLstoreidx4 [c] {sym} ptr idx val mem)
  1213  (MOVLstoreidx1 [c] {sym} ptr (SHLQconst [3] idx) val mem) -> (MOVLstoreidx8 [c] {sym} ptr idx val mem)
  1214  (MOVQstoreidx1 [c] {sym} ptr (SHLQconst [3] idx) val mem) -> (MOVQstoreidx8 [c] {sym} ptr idx val mem)
  1215  (MOVSSstoreidx1 [c] {sym} ptr (SHLQconst [2] idx) val mem) -> (MOVSSstoreidx4 [c] {sym} ptr idx val mem)
  1216  (MOVSDstoreidx1 [c] {sym} ptr (SHLQconst [3] idx) val mem) -> (MOVSDstoreidx8 [c] {sym} ptr idx val mem)
  1217  (MOVWstoreconstidx1 [c] {sym} ptr (SHLQconst [1] idx) mem) -> (MOVWstoreconstidx2 [c] {sym} ptr idx mem)
  1218  (MOVLstoreconstidx1 [c] {sym} ptr (SHLQconst [2] idx) mem) -> (MOVLstoreconstidx4 [c] {sym} ptr idx mem)
  1219  (MOVQstoreconstidx1 [c] {sym} ptr (SHLQconst [3] idx) mem) -> (MOVQstoreconstidx8 [c] {sym} ptr idx mem)
  1220  
  1221  // combine ADDQ into indexed loads and stores
  1222  (MOVBloadidx1 [c] {sym} (ADDQconst [d] ptr) idx mem) && is32Bit(c+d) -> (MOVBloadidx1 [c+d] {sym} ptr idx mem)
  1223  (MOVWloadidx1 [c] {sym} (ADDQconst [d] ptr) idx mem) && is32Bit(c+d) -> (MOVWloadidx1 [c+d] {sym} ptr idx mem)
  1224  (MOVWloadidx2 [c] {sym} (ADDQconst [d] ptr) idx mem) && is32Bit(c+d) -> (MOVWloadidx2 [c+d] {sym} ptr idx mem)
  1225  (MOVLloadidx1 [c] {sym} (ADDQconst [d] ptr) idx mem) && is32Bit(c+d) -> (MOVLloadidx1 [c+d] {sym} ptr idx mem)
  1226  (MOVLloadidx4 [c] {sym} (ADDQconst [d] ptr) idx mem) && is32Bit(c+d) -> (MOVLloadidx4 [c+d] {sym} ptr idx mem)
  1227  (MOVLloadidx8 [c] {sym} (ADDQconst [d] ptr) idx mem) && is32Bit(c+d) -> (MOVLloadidx8 [c+d] {sym} ptr idx mem)
  1228  (MOVQloadidx1 [c] {sym} (ADDQconst [d] ptr) idx mem) && is32Bit(c+d) -> (MOVQloadidx1 [c+d] {sym} ptr idx mem)
  1229  (MOVQloadidx8 [c] {sym} (ADDQconst [d] ptr) idx mem) && is32Bit(c+d) -> (MOVQloadidx8 [c+d] {sym} ptr idx mem)
  1230  (MOVSSloadidx1 [c] {sym} (ADDQconst [d] ptr) idx mem) && is32Bit(c+d) -> (MOVSSloadidx1 [c+d] {sym} ptr idx mem)
  1231  (MOVSSloadidx4 [c] {sym} (ADDQconst [d] ptr) idx mem) && is32Bit(c+d) -> (MOVSSloadidx4 [c+d] {sym} ptr idx mem)
  1232  (MOVSDloadidx1 [c] {sym} (ADDQconst [d] ptr) idx mem) && is32Bit(c+d) -> (MOVSDloadidx1 [c+d] {sym} ptr idx mem)
  1233  (MOVSDloadidx8 [c] {sym} (ADDQconst [d] ptr) idx mem) && is32Bit(c+d) -> (MOVSDloadidx8 [c+d] {sym} ptr idx mem)
  1234  
  1235  (MOVBstoreidx1 [c] {sym} (ADDQconst [d] ptr) idx val mem) && is32Bit(c+d) -> (MOVBstoreidx1 [c+d] {sym} ptr idx val mem)
  1236  (MOVWstoreidx1 [c] {sym} (ADDQconst [d] ptr) idx val mem) && is32Bit(c+d) -> (MOVWstoreidx1 [c+d] {sym} ptr idx val mem)
  1237  (MOVWstoreidx2 [c] {sym} (ADDQconst [d] ptr) idx val mem) && is32Bit(c+d) -> (MOVWstoreidx2 [c+d] {sym} ptr idx val mem)
  1238  (MOVLstoreidx1 [c] {sym} (ADDQconst [d] ptr) idx val mem) && is32Bit(c+d) -> (MOVLstoreidx1 [c+d] {sym} ptr idx val mem)
  1239  (MOVLstoreidx4 [c] {sym} (ADDQconst [d] ptr) idx val mem) && is32Bit(c+d) -> (MOVLstoreidx4 [c+d] {sym} ptr idx val mem)
  1240  (MOVLstoreidx8 [c] {sym} (ADDQconst [d] ptr) idx val mem) && is32Bit(c+d) -> (MOVLstoreidx8 [c+d] {sym} ptr idx val mem)
  1241  (MOVQstoreidx1 [c] {sym} (ADDQconst [d] ptr) idx val mem) && is32Bit(c+d) -> (MOVQstoreidx1 [c+d] {sym} ptr idx val mem)
  1242  (MOVQstoreidx8 [c] {sym} (ADDQconst [d] ptr) idx val mem) && is32Bit(c+d) -> (MOVQstoreidx8 [c+d] {sym} ptr idx val mem)
  1243  (MOVSSstoreidx1 [c] {sym} (ADDQconst [d] ptr) idx val mem) && is32Bit(c+d) -> (MOVSSstoreidx1 [c+d] {sym} ptr idx val mem)
  1244  (MOVSSstoreidx4 [c] {sym} (ADDQconst [d] ptr) idx val mem) && is32Bit(c+d) -> (MOVSSstoreidx4 [c+d] {sym} ptr idx val mem)
  1245  (MOVSDstoreidx1 [c] {sym} (ADDQconst [d] ptr) idx val mem) && is32Bit(c+d) -> (MOVSDstoreidx1 [c+d] {sym} ptr idx val mem)
  1246  (MOVSDstoreidx8 [c] {sym} (ADDQconst [d] ptr) idx val mem) && is32Bit(c+d) -> (MOVSDstoreidx8 [c+d] {sym} ptr idx val mem)
  1247  
  1248  (MOVBloadidx1 [c] {sym} ptr (ADDQconst [d] idx) mem)  && is32Bit(c+d)   -> (MOVBloadidx1 [c+d] {sym} ptr idx mem)
  1249  (MOVWloadidx1 [c] {sym} ptr (ADDQconst [d] idx) mem)  && is32Bit(c+d)   -> (MOVWloadidx1 [c+d] {sym} ptr idx mem)
  1250  (MOVWloadidx2 [c] {sym} ptr (ADDQconst [d] idx) mem)  && is32Bit(c+2*d) -> (MOVWloadidx2 [c+2*d] {sym} ptr idx mem)
  1251  (MOVLloadidx1 [c] {sym} ptr (ADDQconst [d] idx) mem)  && is32Bit(c+d)   -> (MOVLloadidx1 [c+d] {sym} ptr idx mem)
  1252  (MOVLloadidx4 [c] {sym} ptr (ADDQconst [d] idx) mem)  && is32Bit(c+4*d) -> (MOVLloadidx4 [c+4*d] {sym} ptr idx mem)
  1253  (MOVLloadidx8 [c] {sym} ptr (ADDQconst [d] idx) mem)  && is32Bit(c+8*d) -> (MOVLloadidx8 [c+8*d] {sym} ptr idx mem)
  1254  (MOVQloadidx1 [c] {sym} ptr (ADDQconst [d] idx) mem)  && is32Bit(c+d)   -> (MOVQloadidx1 [c+d] {sym} ptr idx mem)
  1255  (MOVQloadidx8 [c] {sym} ptr (ADDQconst [d] idx) mem)  && is32Bit(c+8*d) -> (MOVQloadidx8 [c+8*d] {sym} ptr idx mem)
  1256  (MOVSSloadidx1 [c] {sym} ptr (ADDQconst [d] idx) mem) && is32Bit(c+d)   -> (MOVSSloadidx1 [c+d] {sym} ptr idx mem)
  1257  (MOVSSloadidx4 [c] {sym} ptr (ADDQconst [d] idx) mem) && is32Bit(c+4*d) -> (MOVSSloadidx4 [c+4*d] {sym} ptr idx mem)
  1258  (MOVSDloadidx1 [c] {sym} ptr (ADDQconst [d] idx) mem) && is32Bit(c+d)   -> (MOVSDloadidx1 [c+d] {sym} ptr idx mem)
  1259  (MOVSDloadidx8 [c] {sym} ptr (ADDQconst [d] idx) mem) && is32Bit(c+8*d) -> (MOVSDloadidx8 [c+8*d] {sym} ptr idx mem)
  1260  
  1261  (MOVBstoreidx1 [c] {sym} ptr (ADDQconst [d] idx) val mem)  && is32Bit(c+d)   -> (MOVBstoreidx1 [c+d] {sym} ptr idx val mem)
  1262  (MOVWstoreidx1 [c] {sym} ptr (ADDQconst [d] idx) val mem)  && is32Bit(c+d)   -> (MOVWstoreidx1 [c+d] {sym} ptr idx val mem)
  1263  (MOVWstoreidx2 [c] {sym} ptr (ADDQconst [d] idx) val mem)  && is32Bit(c+2*d) -> (MOVWstoreidx2 [c+2*d] {sym} ptr idx val mem)
  1264  (MOVLstoreidx1 [c] {sym} ptr (ADDQconst [d] idx) val mem)  && is32Bit(c+d)   -> (MOVLstoreidx1 [c+d] {sym} ptr idx val mem)
  1265  (MOVLstoreidx4 [c] {sym} ptr (ADDQconst [d] idx) val mem)  && is32Bit(c+4*d) -> (MOVLstoreidx4 [c+4*d] {sym} ptr idx val mem)
  1266  (MOVLstoreidx8 [c] {sym} ptr (ADDQconst [d] idx) val mem)  && is32Bit(c+8*d) -> (MOVLstoreidx8 [c+8*d] {sym} ptr idx val mem)
  1267  (MOVQstoreidx1 [c] {sym} ptr (ADDQconst [d] idx) val mem)  && is32Bit(c+d)   -> (MOVQstoreidx1 [c+d] {sym} ptr idx val mem)
  1268  (MOVQstoreidx8 [c] {sym} ptr (ADDQconst [d] idx) val mem)  && is32Bit(c+8*d) -> (MOVQstoreidx8 [c+8*d] {sym} ptr idx val mem)
  1269  (MOVSSstoreidx1 [c] {sym} ptr (ADDQconst [d] idx) val mem) && is32Bit(c+d)   -> (MOVSSstoreidx1 [c+d] {sym} ptr idx val mem)
  1270  (MOVSSstoreidx4 [c] {sym} ptr (ADDQconst [d] idx) val mem) && is32Bit(c+4*d) -> (MOVSSstoreidx4 [c+4*d] {sym} ptr idx val mem)
  1271  (MOVSDstoreidx1 [c] {sym} ptr (ADDQconst [d] idx) val mem) && is32Bit(c+d)   -> (MOVSDstoreidx1 [c+d] {sym} ptr idx val mem)
  1272  (MOVSDstoreidx8 [c] {sym} ptr (ADDQconst [d] idx) val mem) && is32Bit(c+8*d) -> (MOVSDstoreidx8 [c+8*d] {sym} ptr idx val mem)
  1273  
  1274  (MOVBstoreconstidx1 [x] {sym} (ADDQconst [c] ptr) idx mem) && ValAndOff(x).canAdd(c) ->
  1275  	(MOVBstoreconstidx1 [ValAndOff(x).add(c)] {sym} ptr idx mem)
  1276  (MOVWstoreconstidx1 [x] {sym} (ADDQconst [c] ptr) idx mem) && ValAndOff(x).canAdd(c) ->
  1277  	(MOVWstoreconstidx1 [ValAndOff(x).add(c)] {sym} ptr idx mem)
  1278  (MOVWstoreconstidx2 [x] {sym} (ADDQconst [c] ptr) idx mem) && ValAndOff(x).canAdd(c) ->
  1279  	(MOVWstoreconstidx2 [ValAndOff(x).add(c)] {sym} ptr idx mem)
  1280  (MOVLstoreconstidx1 [x] {sym} (ADDQconst [c] ptr) idx mem) && ValAndOff(x).canAdd(c) ->
  1281  	(MOVLstoreconstidx1 [ValAndOff(x).add(c)] {sym} ptr idx mem)
  1282  (MOVLstoreconstidx4 [x] {sym} (ADDQconst [c] ptr) idx mem) && ValAndOff(x).canAdd(c) ->
  1283  	(MOVLstoreconstidx4 [ValAndOff(x).add(c)] {sym} ptr idx mem)
  1284  (MOVQstoreconstidx1 [x] {sym} (ADDQconst [c] ptr) idx mem) && ValAndOff(x).canAdd(c) ->
  1285  	(MOVQstoreconstidx1 [ValAndOff(x).add(c)] {sym} ptr idx mem)
  1286  (MOVQstoreconstidx8 [x] {sym} (ADDQconst [c] ptr) idx mem) && ValAndOff(x).canAdd(c) ->
  1287  	(MOVQstoreconstidx8 [ValAndOff(x).add(c)] {sym} ptr idx mem)
  1288  
  1289  (MOVBstoreconstidx1 [x] {sym} ptr (ADDQconst [c] idx) mem) && ValAndOff(x).canAdd(c) ->
  1290  	(MOVBstoreconstidx1 [ValAndOff(x).add(c)] {sym} ptr idx mem)
  1291  (MOVWstoreconstidx1 [x] {sym} ptr (ADDQconst [c] idx) mem) && ValAndOff(x).canAdd(c) ->
  1292  	(MOVWstoreconstidx1 [ValAndOff(x).add(c)] {sym} ptr idx mem)
  1293  (MOVWstoreconstidx2 [x] {sym} ptr (ADDQconst [c] idx) mem) && ValAndOff(x).canAdd(2*c) ->
  1294  	(MOVWstoreconstidx2 [ValAndOff(x).add(2*c)] {sym} ptr idx mem)
  1295  (MOVLstoreconstidx1 [x] {sym} ptr (ADDQconst [c] idx) mem) && ValAndOff(x).canAdd(c) ->
  1296  	(MOVLstoreconstidx1 [ValAndOff(x).add(c)] {sym} ptr idx mem)
  1297  (MOVLstoreconstidx4 [x] {sym} ptr (ADDQconst [c] idx) mem) && ValAndOff(x).canAdd(4*c) ->
  1298  	(MOVLstoreconstidx4 [ValAndOff(x).add(4*c)] {sym} ptr idx mem)
  1299  (MOVQstoreconstidx1 [x] {sym} ptr (ADDQconst [c] idx) mem) && ValAndOff(x).canAdd(c) ->
  1300  	(MOVQstoreconstidx1 [ValAndOff(x).add(c)] {sym} ptr idx mem)
  1301  (MOVQstoreconstidx8 [x] {sym} ptr (ADDQconst [c] idx) mem) && ValAndOff(x).canAdd(8*c) ->
  1302  	(MOVQstoreconstidx8 [ValAndOff(x).add(8*c)] {sym} ptr idx mem)
  1303  
  1304  // fold LEAQs together
  1305  (LEAQ [off1] {sym1} (LEAQ [off2] {sym2} x)) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) ->
  1306        (LEAQ [off1+off2] {mergeSym(sym1,sym2)} x)
  1307  
  1308  // LEAQ into LEAQ1
  1309  (LEAQ1 [off1] {sym1} (LEAQ [off2] {sym2} x) y) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) && x.Op != OpSB ->
  1310         (LEAQ1 [off1+off2] {mergeSym(sym1,sym2)} x y)
  1311  
  1312  // LEAQ1 into LEAQ
  1313  (LEAQ [off1] {sym1} (LEAQ1 [off2] {sym2} x y)) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) ->
  1314         (LEAQ1 [off1+off2] {mergeSym(sym1,sym2)} x y)
  1315  
  1316  // LEAQ into LEAQ[248]
  1317  (LEAQ2 [off1] {sym1} (LEAQ [off2] {sym2} x) y) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) && x.Op != OpSB ->
  1318         (LEAQ2 [off1+off2] {mergeSym(sym1,sym2)} x y)
  1319  (LEAQ4 [off1] {sym1} (LEAQ [off2] {sym2} x) y) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) && x.Op != OpSB ->
  1320         (LEAQ4 [off1+off2] {mergeSym(sym1,sym2)} x y)
  1321  (LEAQ8 [off1] {sym1} (LEAQ [off2] {sym2} x) y) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) && x.Op != OpSB ->
  1322         (LEAQ8 [off1+off2] {mergeSym(sym1,sym2)} x y)
  1323  
  1324  // LEAQ[248] into LEAQ
  1325  (LEAQ [off1] {sym1} (LEAQ2 [off2] {sym2} x y)) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) ->
  1326        (LEAQ2 [off1+off2] {mergeSym(sym1,sym2)} x y)
  1327  (LEAQ [off1] {sym1} (LEAQ4 [off2] {sym2} x y)) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) ->
  1328        (LEAQ4 [off1+off2] {mergeSym(sym1,sym2)} x y)
  1329  (LEAQ [off1] {sym1} (LEAQ8 [off2] {sym2} x y)) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) ->
  1330        (LEAQ8 [off1+off2] {mergeSym(sym1,sym2)} x y)
  1331  
  1332  // Absorb InvertFlags into branches.
  1333  (LT (InvertFlags cmp) yes no) -> (GT cmp yes no)
  1334  (GT (InvertFlags cmp) yes no) -> (LT cmp yes no)
  1335  (LE (InvertFlags cmp) yes no) -> (GE cmp yes no)
  1336  (GE (InvertFlags cmp) yes no) -> (LE cmp yes no)
  1337  (ULT (InvertFlags cmp) yes no) -> (UGT cmp yes no)
  1338  (UGT (InvertFlags cmp) yes no) -> (ULT cmp yes no)
  1339  (ULE (InvertFlags cmp) yes no) -> (UGE cmp yes no)
  1340  (UGE (InvertFlags cmp) yes no) -> (ULE cmp yes no)
  1341  (EQ (InvertFlags cmp) yes no) -> (EQ cmp yes no)
  1342  (NE (InvertFlags cmp) yes no) -> (NE cmp yes no)
  1343  
  1344  // Constant comparisons.
  1345  (CMPQconst (MOVQconst [x]) [y]) && x==y -> (FlagEQ)
  1346  (CMPQconst (MOVQconst [x]) [y]) && x<y && uint64(x)<uint64(y) -> (FlagLT_ULT)
  1347  (CMPQconst (MOVQconst [x]) [y]) && x<y && uint64(x)>uint64(y) -> (FlagLT_UGT)
  1348  (CMPQconst (MOVQconst [x]) [y]) && x>y && uint64(x)<uint64(y) -> (FlagGT_ULT)
  1349  (CMPQconst (MOVQconst [x]) [y]) && x>y && uint64(x)>uint64(y) -> (FlagGT_UGT)
  1350  (CMPLconst (MOVLconst [x]) [y]) && int32(x)==int32(y) -> (FlagEQ)
  1351  (CMPLconst (MOVLconst [x]) [y]) && int32(x)<int32(y) && uint32(x)<uint32(y) -> (FlagLT_ULT)
  1352  (CMPLconst (MOVLconst [x]) [y]) && int32(x)<int32(y) && uint32(x)>uint32(y) -> (FlagLT_UGT)
  1353  (CMPLconst (MOVLconst [x]) [y]) && int32(x)>int32(y) && uint32(x)<uint32(y) -> (FlagGT_ULT)
  1354  (CMPLconst (MOVLconst [x]) [y]) && int32(x)>int32(y) && uint32(x)>uint32(y) -> (FlagGT_UGT)
  1355  (CMPWconst (MOVLconst [x]) [y]) && int16(x)==int16(y) -> (FlagEQ)
  1356  (CMPWconst (MOVLconst [x]) [y]) && int16(x)<int16(y) && uint16(x)<uint16(y) -> (FlagLT_ULT)
  1357  (CMPWconst (MOVLconst [x]) [y]) && int16(x)<int16(y) && uint16(x)>uint16(y) -> (FlagLT_UGT)
  1358  (CMPWconst (MOVLconst [x]) [y]) && int16(x)>int16(y) && uint16(x)<uint16(y) -> (FlagGT_ULT)
  1359  (CMPWconst (MOVLconst [x]) [y]) && int16(x)>int16(y) && uint16(x)>uint16(y) -> (FlagGT_UGT)
  1360  (CMPBconst (MOVLconst [x]) [y]) && int8(x)==int8(y) -> (FlagEQ)
  1361  (CMPBconst (MOVLconst [x]) [y]) && int8(x)<int8(y) && uint8(x)<uint8(y) -> (FlagLT_ULT)
  1362  (CMPBconst (MOVLconst [x]) [y]) && int8(x)<int8(y) && uint8(x)>uint8(y) -> (FlagLT_UGT)
  1363  (CMPBconst (MOVLconst [x]) [y]) && int8(x)>int8(y) && uint8(x)<uint8(y) -> (FlagGT_ULT)
  1364  (CMPBconst (MOVLconst [x]) [y]) && int8(x)>int8(y) && uint8(x)>uint8(y) -> (FlagGT_UGT)
  1365  
  1366  // Other known comparisons.
  1367  (CMPQconst (MOVBQZX _) [c]) && 0xFF < c -> (FlagLT_ULT)
  1368  (CMPQconst (MOVWQZX _) [c]) && 0xFFFF < c -> (FlagLT_ULT)
  1369  (CMPQconst (MOVLQZX _) [c]) && 0xFFFFFFFF < c -> (FlagLT_ULT)
  1370  (CMPLconst (SHRLconst _ [c]) [n]) && 0 <= n && 0 < c && c <= 32 && (1<<uint64(32-c)) <= uint64(n) -> (FlagLT_ULT)
  1371  (CMPQconst (SHRQconst _ [c]) [n]) && 0 <= n && 0 < c && c <= 64 && (1<<uint64(64-c)) <= uint64(n) -> (FlagLT_ULT)
  1372  (CMPQconst (ANDQconst _ [m]) [n]) && 0 <= m && m < n -> (FlagLT_ULT)
  1373  (CMPQconst (ANDLconst _ [m]) [n]) && 0 <= m && m < n -> (FlagLT_ULT)
  1374  (CMPLconst (ANDLconst _ [m]) [n]) && 0 <= int32(m) && int32(m) < int32(n) -> (FlagLT_ULT)
  1375  (CMPWconst (ANDLconst _ [m]) [n]) && 0 <= int16(m) && int16(m) < int16(n) -> (FlagLT_ULT)
  1376  (CMPBconst (ANDLconst _ [m]) [n]) && 0 <= int8(m) && int8(m) < int8(n) -> (FlagLT_ULT)
  1377  
  1378  // TODO: DIVxU also.
  1379  
  1380  // Absorb flag constants into SBB ops.
  1381  (SBBQcarrymask (FlagEQ)) -> (MOVQconst [0])
  1382  (SBBQcarrymask (FlagLT_ULT)) -> (MOVQconst [-1])
  1383  (SBBQcarrymask (FlagLT_UGT)) -> (MOVQconst [0])
  1384  (SBBQcarrymask (FlagGT_ULT)) -> (MOVQconst [-1])
  1385  (SBBQcarrymask (FlagGT_UGT)) -> (MOVQconst [0])
  1386  (SBBLcarrymask (FlagEQ)) -> (MOVLconst [0])
  1387  (SBBLcarrymask (FlagLT_ULT)) -> (MOVLconst [-1])
  1388  (SBBLcarrymask (FlagLT_UGT)) -> (MOVLconst [0])
  1389  (SBBLcarrymask (FlagGT_ULT)) -> (MOVLconst [-1])
  1390  (SBBLcarrymask (FlagGT_UGT)) -> (MOVLconst [0])
  1391  
  1392  // Absorb flag constants into branches.
  1393  (EQ (FlagEQ) yes no) -> (First nil yes no)
  1394  (EQ (FlagLT_ULT) yes no) -> (First nil no yes)
  1395  (EQ (FlagLT_UGT) yes no) -> (First nil no yes)
  1396  (EQ (FlagGT_ULT) yes no) -> (First nil no yes)
  1397  (EQ (FlagGT_UGT) yes no) -> (First nil no yes)
  1398  
  1399  (NE (FlagEQ) yes no) -> (First nil no yes)
  1400  (NE (FlagLT_ULT) yes no) -> (First nil yes no)
  1401  (NE (FlagLT_UGT) yes no) -> (First nil yes no)
  1402  (NE (FlagGT_ULT) yes no) -> (First nil yes no)
  1403  (NE (FlagGT_UGT) yes no) -> (First nil yes no)
  1404  
  1405  (LT (FlagEQ) yes no) -> (First nil no yes)
  1406  (LT (FlagLT_ULT) yes no) -> (First nil yes no)
  1407  (LT (FlagLT_UGT) yes no) -> (First nil yes no)
  1408  (LT (FlagGT_ULT) yes no) -> (First nil no yes)
  1409  (LT (FlagGT_UGT) yes no) -> (First nil no yes)
  1410  
  1411  (LE (FlagEQ) yes no) -> (First nil yes no)
  1412  (LE (FlagLT_ULT) yes no) -> (First nil yes no)
  1413  (LE (FlagLT_UGT) yes no) -> (First nil yes no)
  1414  (LE (FlagGT_ULT) yes no) -> (First nil no yes)
  1415  (LE (FlagGT_UGT) yes no) -> (First nil no yes)
  1416  
  1417  (GT (FlagEQ) yes no) -> (First nil no yes)
  1418  (GT (FlagLT_ULT) yes no) -> (First nil no yes)
  1419  (GT (FlagLT_UGT) yes no) -> (First nil no yes)
  1420  (GT (FlagGT_ULT) yes no) -> (First nil yes no)
  1421  (GT (FlagGT_UGT) yes no) -> (First nil yes no)
  1422  
  1423  (GE (FlagEQ) yes no) -> (First nil yes no)
  1424  (GE (FlagLT_ULT) yes no) -> (First nil no yes)
  1425  (GE (FlagLT_UGT) yes no) -> (First nil no yes)
  1426  (GE (FlagGT_ULT) yes no) -> (First nil yes no)
  1427  (GE (FlagGT_UGT) yes no) -> (First nil yes no)
  1428  
  1429  (ULT (FlagEQ) yes no) -> (First nil no yes)
  1430  (ULT (FlagLT_ULT) yes no) -> (First nil yes no)
  1431  (ULT (FlagLT_UGT) yes no) -> (First nil no yes)
  1432  (ULT (FlagGT_ULT) yes no) -> (First nil yes no)
  1433  (ULT (FlagGT_UGT) yes no) -> (First nil no yes)
  1434  
  1435  (ULE (FlagEQ) yes no) -> (First nil yes no)
  1436  (ULE (FlagLT_ULT) yes no) -> (First nil yes no)
  1437  (ULE (FlagLT_UGT) yes no) -> (First nil no yes)
  1438  (ULE (FlagGT_ULT) yes no) -> (First nil yes no)
  1439  (ULE (FlagGT_UGT) yes no) -> (First nil no yes)
  1440  
  1441  (UGT (FlagEQ) yes no) -> (First nil no yes)
  1442  (UGT (FlagLT_ULT) yes no) -> (First nil no yes)
  1443  (UGT (FlagLT_UGT) yes no) -> (First nil yes no)
  1444  (UGT (FlagGT_ULT) yes no) -> (First nil no yes)
  1445  (UGT (FlagGT_UGT) yes no) -> (First nil yes no)
  1446  
  1447  (UGE (FlagEQ) yes no) -> (First nil yes no)
  1448  (UGE (FlagLT_ULT) yes no) -> (First nil no yes)
  1449  (UGE (FlagLT_UGT) yes no) -> (First nil yes no)
  1450  (UGE (FlagGT_ULT) yes no) -> (First nil no yes)
  1451  (UGE (FlagGT_UGT) yes no) -> (First nil yes no)
  1452  
  1453  // Absorb flag constants into SETxx ops.
  1454  (SETEQ (FlagEQ)) -> (MOVLconst [1])
  1455  (SETEQ (FlagLT_ULT)) -> (MOVLconst [0])
  1456  (SETEQ (FlagLT_UGT)) -> (MOVLconst [0])
  1457  (SETEQ (FlagGT_ULT)) -> (MOVLconst [0])
  1458  (SETEQ (FlagGT_UGT)) -> (MOVLconst [0])
  1459  
  1460  (SETNE (FlagEQ)) -> (MOVLconst [0])
  1461  (SETNE (FlagLT_ULT)) -> (MOVLconst [1])
  1462  (SETNE (FlagLT_UGT)) -> (MOVLconst [1])
  1463  (SETNE (FlagGT_ULT)) -> (MOVLconst [1])
  1464  (SETNE (FlagGT_UGT)) -> (MOVLconst [1])
  1465  
  1466  (SETL (FlagEQ)) -> (MOVLconst [0])
  1467  (SETL (FlagLT_ULT)) -> (MOVLconst [1])
  1468  (SETL (FlagLT_UGT)) -> (MOVLconst [1])
  1469  (SETL (FlagGT_ULT)) -> (MOVLconst [0])
  1470  (SETL (FlagGT_UGT)) -> (MOVLconst [0])
  1471  
  1472  (SETLE (FlagEQ)) -> (MOVLconst [1])
  1473  (SETLE (FlagLT_ULT)) -> (MOVLconst [1])
  1474  (SETLE (FlagLT_UGT)) -> (MOVLconst [1])
  1475  (SETLE (FlagGT_ULT)) -> (MOVLconst [0])
  1476  (SETLE (FlagGT_UGT)) -> (MOVLconst [0])
  1477  
  1478  (SETG (FlagEQ)) -> (MOVLconst [0])
  1479  (SETG (FlagLT_ULT)) -> (MOVLconst [0])
  1480  (SETG (FlagLT_UGT)) -> (MOVLconst [0])
  1481  (SETG (FlagGT_ULT)) -> (MOVLconst [1])
  1482  (SETG (FlagGT_UGT)) -> (MOVLconst [1])
  1483  
  1484  (SETGE (FlagEQ)) -> (MOVLconst [1])
  1485  (SETGE (FlagLT_ULT)) -> (MOVLconst [0])
  1486  (SETGE (FlagLT_UGT)) -> (MOVLconst [0])
  1487  (SETGE (FlagGT_ULT)) -> (MOVLconst [1])
  1488  (SETGE (FlagGT_UGT)) -> (MOVLconst [1])
  1489  
  1490  (SETB (FlagEQ)) -> (MOVLconst [0])
  1491  (SETB (FlagLT_ULT)) -> (MOVLconst [1])
  1492  (SETB (FlagLT_UGT)) -> (MOVLconst [0])
  1493  (SETB (FlagGT_ULT)) -> (MOVLconst [1])
  1494  (SETB (FlagGT_UGT)) -> (MOVLconst [0])
  1495  
  1496  (SETBE (FlagEQ)) -> (MOVLconst [1])
  1497  (SETBE (FlagLT_ULT)) -> (MOVLconst [1])
  1498  (SETBE (FlagLT_UGT)) -> (MOVLconst [0])
  1499  (SETBE (FlagGT_ULT)) -> (MOVLconst [1])
  1500  (SETBE (FlagGT_UGT)) -> (MOVLconst [0])
  1501  
  1502  (SETA (FlagEQ)) -> (MOVLconst [0])
  1503  (SETA (FlagLT_ULT)) -> (MOVLconst [0])
  1504  (SETA (FlagLT_UGT)) -> (MOVLconst [1])
  1505  (SETA (FlagGT_ULT)) -> (MOVLconst [0])
  1506  (SETA (FlagGT_UGT)) -> (MOVLconst [1])
  1507  
  1508  (SETAE (FlagEQ)) -> (MOVLconst [1])
  1509  (SETAE (FlagLT_ULT)) -> (MOVLconst [0])
  1510  (SETAE (FlagLT_UGT)) -> (MOVLconst [1])
  1511  (SETAE (FlagGT_ULT)) -> (MOVLconst [0])
  1512  (SETAE (FlagGT_UGT)) -> (MOVLconst [1])
  1513  
  1514  // Remove redundant *const ops
  1515  (ADDQconst [0] x) -> x
  1516  (ADDLconst [c] x) && int32(c)==0 -> x
  1517  (SUBQconst [0] x) -> x
  1518  (SUBLconst [c] x) && int32(c) == 0 -> x
  1519  (ANDQconst [0] _)                 -> (MOVQconst [0])
  1520  (ANDLconst [c] _) && int32(c)==0  -> (MOVLconst [0])
  1521  (ANDQconst [-1] x)                -> x
  1522  (ANDLconst [c] x) && int32(c)==-1 -> x
  1523  (ORQconst [0] x)                  -> x
  1524  (ORLconst [c] x) && int32(c)==0   -> x
  1525  (ORQconst [-1] _)                 -> (MOVQconst [-1])
  1526  (ORLconst [c] _) && int32(c)==-1  -> (MOVLconst [-1])
  1527  (XORQconst [0] x)                  -> x
  1528  (XORLconst [c] x) && int32(c)==0   -> x
  1529  // TODO: since we got rid of the W/B versions, we might miss
  1530  // things like (ANDLconst [0x100] x) which were formerly
  1531  // (ANDBconst [0] x).  Probably doesn't happen very often.
  1532  // If we cared, we might do:
  1533  //  (ANDLconst <t> [c] x) && t.Size()==1 && int8(x)==0 -> (MOVLconst [0])
  1534  
  1535  // Convert constant subtracts to constant adds
  1536  (SUBQconst [c] x) && c != -(1<<31) -> (ADDQconst [-c] x)
  1537  (SUBLconst [c] x) -> (ADDLconst [int64(int32(-c))] x)
  1538  
  1539  // generic constant folding
  1540  // TODO: more of this
  1541  (ADDQconst [c] (MOVQconst [d])) -> (MOVQconst [c+d])
  1542  (ADDLconst [c] (MOVLconst [d])) -> (MOVLconst [int64(int32(c+d))])
  1543  (ADDQconst [c] (ADDQconst [d] x)) && is32Bit(c+d) -> (ADDQconst [c+d] x)
  1544  (ADDLconst [c] (ADDLconst [d] x)) -> (ADDLconst [int64(int32(c+d))] x)
  1545  (SUBQconst (MOVQconst [d]) [c]) -> (MOVQconst [d-c])
  1546  (SUBQconst (SUBQconst x [d]) [c]) && is32Bit(-c-d) -> (ADDQconst [-c-d] x)
  1547  (SARQconst [c] (MOVQconst [d])) -> (MOVQconst [d>>uint64(c)])
  1548  (SARLconst [c] (MOVQconst [d])) -> (MOVQconst [d>>uint64(c)])
  1549  (SARWconst [c] (MOVQconst [d])) -> (MOVQconst [d>>uint64(c)])
  1550  (SARBconst [c] (MOVQconst [d])) -> (MOVQconst [d>>uint64(c)])
  1551  (NEGQ (MOVQconst [c])) -> (MOVQconst [-c])
  1552  (NEGL (MOVLconst [c])) -> (MOVLconst [int64(int32(-c))])
  1553  (MULQconst [c] (MOVQconst [d])) -> (MOVQconst [c*d])
  1554  (MULLconst [c] (MOVLconst [d])) -> (MOVLconst [int64(int32(c*d))])
  1555  (ANDQconst [c] (MOVQconst [d])) -> (MOVQconst [c&d])
  1556  (ANDLconst [c] (MOVLconst [d])) -> (MOVLconst [c&d])
  1557  (ORQconst [c] (MOVQconst [d])) -> (MOVQconst [c|d])
  1558  (ORLconst [c] (MOVLconst [d])) -> (MOVLconst [c|d])
  1559  (XORQconst [c] (MOVQconst [d])) -> (MOVQconst [c^d])
  1560  (XORLconst [c] (MOVLconst [d])) -> (MOVLconst [c^d])
  1561  (NOTQ (MOVQconst [c])) -> (MOVQconst [^c])
  1562  (NOTL (MOVLconst [c])) -> (MOVLconst [^c])
  1563  
  1564  // generic simplifications
  1565  // TODO: more of this
  1566  (ADDQ x (NEGQ y)) -> (SUBQ x y)
  1567  (ADDL x (NEGL y)) -> (SUBL x y)
  1568  (SUBQ x x) -> (MOVQconst [0])
  1569  (SUBL x x) -> (MOVLconst [0])
  1570  (ANDQ x x) -> x
  1571  (ANDL x x) -> x
  1572  (ORQ x x) -> x
  1573  (ORL x x) -> x
  1574  (XORQ x x) -> (MOVQconst [0])
  1575  (XORL x x) -> (MOVLconst [0])
  1576  (NEGQ (ADDQconst [c] (NEGQ x))) && c != -(1<<31) -> (ADDQconst [-c] x)
  1577  
  1578  // checking AND against 0.
  1579  (CMPQconst (ANDQ x y) [0]) -> (TESTQ x y)
  1580  (CMPLconst (ANDL x y) [0]) -> (TESTL x y)
  1581  (CMPWconst (ANDL x y) [0]) -> (TESTW x y)
  1582  (CMPBconst (ANDL x y) [0]) -> (TESTB x y)
  1583  (CMPQconst (ANDQconst [c] x) [0]) -> (TESTQconst [c] x)
  1584  (CMPLconst (ANDLconst [c] x) [0]) -> (TESTLconst [c] x)
  1585  (CMPWconst (ANDLconst [c] x) [0]) -> (TESTWconst [int64(int16(c))] x)
  1586  (CMPBconst (ANDLconst [c] x) [0]) -> (TESTBconst [int64(int8(c))] x)
  1587  
  1588  // Convert TESTx to TESTxconst if possible.
  1589  (TESTQ (MOVQconst [c]) x) && is32Bit(c) -> (TESTQconst [c] x)
  1590  (TESTL (MOVLconst [c]) x) -> (TESTLconst [c] x)
  1591  (TESTW (MOVLconst [c]) x) -> (TESTWconst [c] x)
  1592  (TESTB (MOVLconst [c]) x) -> (TESTBconst [c] x)
  1593  
  1594  // TEST %reg,%reg is shorter than CMP
  1595  (CMPQconst x [0]) -> (TESTQ x x)
  1596  (CMPLconst x [0]) -> (TESTL x x)
  1597  (CMPWconst x [0]) -> (TESTW x x)
  1598  (CMPBconst x [0]) -> (TESTB x x)
  1599  
  1600  // Combining byte loads into larger (unaligned) loads.
  1601  // There are many ways these combinations could occur.  This is
  1602  // designed to match the way encoding/binary.LittleEndian does it.
  1603  
  1604  // Little-endian loads
  1605  
  1606  (ORL                  x0:(MOVBload [i0] {s} p mem)
  1607      sh:(SHLLconst [8] x1:(MOVBload [i1] {s} p mem)))
  1608    && i1 == i0+1
  1609    && x0.Uses == 1
  1610    && x1.Uses == 1
  1611    && sh.Uses == 1
  1612    && mergePoint(b,x0,x1) != nil
  1613    && clobber(x0)
  1614    && clobber(x1)
  1615    && clobber(sh)
  1616    -> @mergePoint(b,x0,x1) (MOVWload [i0] {s} p mem)
  1617  
  1618  (ORQ                  x0:(MOVBload [i0] {s} p mem)
  1619      sh:(SHLQconst [8] x1:(MOVBload [i1] {s} p mem)))
  1620    && i1 == i0+1
  1621    && x0.Uses == 1
  1622    && x1.Uses == 1
  1623    && sh.Uses == 1
  1624    && mergePoint(b,x0,x1) != nil
  1625    && clobber(x0)
  1626    && clobber(x1)
  1627    && clobber(sh)
  1628    -> @mergePoint(b,x0,x1) (MOVWload [i0] {s} p mem)
  1629  
  1630  (ORL                   x0:(MOVWload [i0] {s} p mem)
  1631      sh:(SHLLconst [16] x1:(MOVWload [i1] {s} p mem)))
  1632    && i1 == i0+2
  1633    && x0.Uses == 1
  1634    && x1.Uses == 1
  1635    && sh.Uses == 1
  1636    && mergePoint(b,x0,x1) != nil
  1637    && clobber(x0)
  1638    && clobber(x1)
  1639    && clobber(sh)
  1640    -> @mergePoint(b,x0,x1) (MOVLload [i0] {s} p mem)
  1641  
  1642  (ORQ                   x0:(MOVWload [i0] {s} p mem)
  1643      sh:(SHLQconst [16] x1:(MOVWload [i1] {s} p mem)))
  1644    && i1 == i0+2
  1645    && x0.Uses == 1
  1646    && x1.Uses == 1
  1647    && sh.Uses == 1
  1648    && mergePoint(b,x0,x1) != nil
  1649    && clobber(x0)
  1650    && clobber(x1)
  1651    && clobber(sh)
  1652    -> @mergePoint(b,x0,x1) (MOVLload [i0] {s} p mem)
  1653  
  1654  (ORQ                   x0:(MOVLload [i0] {s} p mem)
  1655      sh:(SHLQconst [32] x1:(MOVLload [i1] {s} p mem)))
  1656    && i1 == i0+4
  1657    && x0.Uses == 1
  1658    && x1.Uses == 1
  1659    && sh.Uses == 1
  1660    && mergePoint(b,x0,x1) != nil
  1661    && clobber(x0)
  1662    && clobber(x1)
  1663    && clobber(sh)
  1664    -> @mergePoint(b,x0,x1) (MOVQload [i0] {s} p mem)
  1665  
  1666  (ORL
  1667      s1:(SHLLconst [j1] x1:(MOVBload [i1] {s} p mem))
  1668      or:(ORL
  1669          s0:(SHLLconst [j0] x0:(MOVBload [i0] {s} p mem))
  1670  	y))
  1671    && i1 == i0+1
  1672    && j1 == j0+8
  1673    && j0 % 16 == 0
  1674    && x0.Uses == 1
  1675    && x1.Uses == 1
  1676    && s0.Uses == 1
  1677    && s1.Uses == 1
  1678    && or.Uses == 1
  1679    && mergePoint(b,x0,x1) != nil
  1680    && clobber(x0)
  1681    && clobber(x1)
  1682    && clobber(s0)
  1683    && clobber(s1)
  1684    && clobber(or)
  1685    -> @mergePoint(b,x0,x1) (ORL <v.Type> (SHLLconst <v.Type> [j0] (MOVWload [i0] {s} p mem)) y)
  1686  
  1687  (ORQ
  1688      s1:(SHLQconst [j1] x1:(MOVBload [i1] {s} p mem))
  1689      or:(ORQ
  1690          s0:(SHLQconst [j0] x0:(MOVBload [i0] {s} p mem))
  1691  	y))
  1692    && i1 == i0+1
  1693    && j1 == j0+8
  1694    && j0 % 16 == 0
  1695    && x0.Uses == 1
  1696    && x1.Uses == 1
  1697    && s0.Uses == 1
  1698    && s1.Uses == 1
  1699    && or.Uses == 1
  1700    && mergePoint(b,x0,x1) != nil
  1701    && clobber(x0)
  1702    && clobber(x1)
  1703    && clobber(s0)
  1704    && clobber(s1)
  1705    && clobber(or)
  1706    -> @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j0] (MOVWload [i0] {s} p mem)) y)
  1707  
  1708  (ORQ
  1709      s1:(SHLQconst [j1] x1:(MOVWload [i1] {s} p mem))
  1710      or:(ORQ
  1711          s0:(SHLQconst [j0] x0:(MOVWload [i0] {s} p mem))
  1712  	y))
  1713    && i1 == i0+2
  1714    && j1 == j0+16
  1715    && j0 % 32 == 0
  1716    && x0.Uses == 1
  1717    && x1.Uses == 1
  1718    && s0.Uses == 1
  1719    && s1.Uses == 1
  1720    && or.Uses == 1
  1721    && mergePoint(b,x0,x1) != nil
  1722    && clobber(x0)
  1723    && clobber(x1)
  1724    && clobber(s0)
  1725    && clobber(s1)
  1726    && clobber(or)
  1727    -> @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j0] (MOVLload [i0] {s} p mem)) y)
  1728  
  1729  // Little-endian indexed loads
  1730  
  1731  (ORL                  x0:(MOVBloadidx1 [i0] {s} p idx mem)
  1732      sh:(SHLLconst [8] x1:(MOVBloadidx1 [i1] {s} p idx mem)))
  1733    && i1 == i0+1
  1734    && x0.Uses == 1
  1735    && x1.Uses == 1
  1736    && sh.Uses == 1
  1737    && mergePoint(b,x0,x1) != nil
  1738    && clobber(x0)
  1739    && clobber(x1)
  1740    && clobber(sh)
  1741    -> @mergePoint(b,x0,x1) (MOVWloadidx1 <v.Type> [i0] {s} p idx mem)
  1742  
  1743  (ORQ                  x0:(MOVBloadidx1 [i0] {s} p idx mem)
  1744      sh:(SHLQconst [8] x1:(MOVBloadidx1 [i1] {s} p idx mem)))
  1745    && i1 == i0+1
  1746    && x0.Uses == 1
  1747    && x1.Uses == 1
  1748    && sh.Uses == 1
  1749    && mergePoint(b,x0,x1) != nil
  1750    && clobber(x0)
  1751    && clobber(x1)
  1752    && clobber(sh)
  1753    -> @mergePoint(b,x0,x1) (MOVWloadidx1 <v.Type> [i0] {s} p idx mem)
  1754  
  1755  (ORL                   x0:(MOVWloadidx1 [i0] {s} p idx mem)
  1756      sh:(SHLLconst [16] x1:(MOVWloadidx1 [i1] {s} p idx mem)))
  1757    && i1 == i0+2
  1758    && x0.Uses == 1
  1759    && x1.Uses == 1
  1760    && sh.Uses == 1
  1761    && mergePoint(b,x0,x1) != nil
  1762    && clobber(x0)
  1763    && clobber(x1)
  1764    && clobber(sh)
  1765    -> @mergePoint(b,x0,x1) (MOVLloadidx1 [i0] {s} p idx mem)
  1766  
  1767  (ORQ                   x0:(MOVWloadidx1 [i0] {s} p idx mem)
  1768      sh:(SHLQconst [16] x1:(MOVWloadidx1 [i1] {s} p idx mem)))
  1769    && i1 == i0+2
  1770    && x0.Uses == 1
  1771    && x1.Uses == 1
  1772    && sh.Uses == 1
  1773    && mergePoint(b,x0,x1) != nil
  1774    && clobber(x0)
  1775    && clobber(x1)
  1776    && clobber(sh)
  1777    -> @mergePoint(b,x0,x1) (MOVLloadidx1 [i0] {s} p idx mem)
  1778  
  1779  (ORQ                   x0:(MOVLloadidx1 [i0] {s} p idx mem)
  1780      sh:(SHLQconst [32] x1:(MOVLloadidx1 [i1] {s} p idx mem)))
  1781    && i1 == i0+4
  1782    && x0.Uses == 1
  1783    && x1.Uses == 1
  1784    && sh.Uses == 1
  1785    && mergePoint(b,x0,x1) != nil
  1786    && clobber(x0)
  1787    && clobber(x1)
  1788    && clobber(sh)
  1789    -> @mergePoint(b,x0,x1) (MOVQloadidx1 [i0] {s} p idx mem)
  1790  
  1791  (ORL
  1792      s1:(SHLLconst [j1] x1:(MOVBloadidx1 [i1] {s} p idx mem))
  1793      or:(ORL
  1794          s0:(SHLLconst [j0] x0:(MOVBloadidx1 [i0] {s} p idx mem))
  1795  	y))
  1796    && i1 == i0+1
  1797    && j1 == j0+8
  1798    && j0 % 16 == 0
  1799    && x0.Uses == 1
  1800    && x1.Uses == 1
  1801    && s0.Uses == 1
  1802    && s1.Uses == 1
  1803    && or.Uses == 1
  1804    && mergePoint(b,x0,x1) != nil
  1805    && clobber(x0)
  1806    && clobber(x1)
  1807    && clobber(s0)
  1808    && clobber(s1)
  1809    && clobber(or)
  1810    -> @mergePoint(b,x0,x1) (ORL <v.Type> (SHLLconst <v.Type> [j0] (MOVWloadidx1 [i0] {s} p idx mem)) y)
  1811  
  1812  (ORQ
  1813      s1:(SHLQconst [j1] x1:(MOVBloadidx1 [i1] {s} p idx mem))
  1814      or:(ORQ
  1815          s0:(SHLQconst [j0] x0:(MOVBloadidx1 [i0] {s} p idx mem))
  1816  	y))
  1817    && i1 == i0+1
  1818    && j1 == j0+8
  1819    && j0 % 16 == 0
  1820    && x0.Uses == 1
  1821    && x1.Uses == 1
  1822    && s0.Uses == 1
  1823    && s1.Uses == 1
  1824    && or.Uses == 1
  1825    && mergePoint(b,x0,x1) != nil
  1826    && clobber(x0)
  1827    && clobber(x1)
  1828    && clobber(s0)
  1829    && clobber(s1)
  1830    && clobber(or)
  1831    -> @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j0] (MOVWloadidx1 [i0] {s} p idx mem)) y)
  1832  
  1833  (ORQ
  1834      s1:(SHLQconst [j1] x1:(MOVWloadidx1 [i1] {s} p idx mem))
  1835      or:(ORQ
  1836          s0:(SHLQconst [j0] x0:(MOVWloadidx1 [i0] {s} p idx mem))
  1837  	y))
  1838    && i1 == i0+2
  1839    && j1 == j0+16
  1840    && j0 % 32 == 0
  1841    && x0.Uses == 1
  1842    && x1.Uses == 1
  1843    && s0.Uses == 1
  1844    && s1.Uses == 1
  1845    && or.Uses == 1
  1846    && mergePoint(b,x0,x1) != nil
  1847    && clobber(x0)
  1848    && clobber(x1)
  1849    && clobber(s0)
  1850    && clobber(s1)
  1851    && clobber(or)
  1852    -> @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j0] (MOVLloadidx1 [i0] {s} p idx mem)) y)
  1853  
  1854  // Big-endian loads
  1855  
  1856  (ORL
  1857                         x1:(MOVBload [i1] {s} p mem)
  1858      sh:(SHLLconst [8]  x0:(MOVBload [i0] {s} p mem)))
  1859    && i1 == i0+1
  1860    && x0.Uses == 1
  1861    && x1.Uses == 1
  1862    && sh.Uses == 1
  1863    && mergePoint(b,x0,x1) != nil
  1864    && clobber(x0)
  1865    && clobber(x1)
  1866    && clobber(sh)
  1867    -> @mergePoint(b,x0,x1) (ROLWconst <v.Type> [8] (MOVWload [i0] {s} p mem))
  1868  
  1869  (ORQ
  1870                         x1:(MOVBload [i1] {s} p mem)
  1871      sh:(SHLQconst [8]  x0:(MOVBload [i0] {s} p mem)))
  1872    && i1 == i0+1
  1873    && x0.Uses == 1
  1874    && x1.Uses == 1
  1875    && sh.Uses == 1
  1876    && mergePoint(b,x0,x1) != nil
  1877    && clobber(x0)
  1878    && clobber(x1)
  1879    && clobber(sh)
  1880    -> @mergePoint(b,x0,x1) (ROLWconst <v.Type> [8] (MOVWload [i0] {s} p mem))
  1881  
  1882  (ORL
  1883                          r1:(ROLWconst [8] x1:(MOVWload [i1] {s} p mem))
  1884      sh:(SHLLconst [16]  r0:(ROLWconst [8] x0:(MOVWload [i0] {s} p mem))))
  1885    && i1 == i0+2
  1886    && x0.Uses == 1
  1887    && x1.Uses == 1
  1888    && r0.Uses == 1
  1889    && r1.Uses == 1
  1890    && sh.Uses == 1
  1891    && mergePoint(b,x0,x1) != nil
  1892    && clobber(x0)
  1893    && clobber(x1)
  1894    && clobber(r0)
  1895    && clobber(r1)
  1896    && clobber(sh)
  1897    -> @mergePoint(b,x0,x1) (BSWAPL <v.Type> (MOVLload [i0] {s} p mem))
  1898  
  1899  (ORQ
  1900                          r1:(ROLWconst [8] x1:(MOVWload [i1] {s} p mem))
  1901      sh:(SHLQconst [16]  r0:(ROLWconst [8] x0:(MOVWload [i0] {s} p mem))))
  1902    && i1 == i0+2
  1903    && x0.Uses == 1
  1904    && x1.Uses == 1
  1905    && r0.Uses == 1
  1906    && r1.Uses == 1
  1907    && sh.Uses == 1
  1908    && mergePoint(b,x0,x1) != nil
  1909    && clobber(x0)
  1910    && clobber(x1)
  1911    && clobber(r0)
  1912    && clobber(r1)
  1913    && clobber(sh)
  1914    -> @mergePoint(b,x0,x1) (BSWAPL <v.Type> (MOVLload [i0] {s} p mem))
  1915  
  1916  (ORQ
  1917                          r1:(BSWAPL x1:(MOVLload [i1] {s} p mem))
  1918      sh:(SHLQconst [32]  r0:(BSWAPL x0:(MOVLload [i0] {s} p mem))))
  1919    && i1 == i0+4
  1920    && x0.Uses == 1
  1921    && x1.Uses == 1
  1922    && r0.Uses == 1
  1923    && r1.Uses == 1
  1924    && sh.Uses == 1
  1925    && mergePoint(b,x0,x1) != nil
  1926    && clobber(x0)
  1927    && clobber(x1)
  1928    && clobber(r0)
  1929    && clobber(r1)
  1930    && clobber(sh)
  1931    -> @mergePoint(b,x0,x1) (BSWAPQ <v.Type> (MOVQload [i0] {s} p mem))
  1932  
  1933  (ORL
  1934      s0:(SHLLconst [j0] x0:(MOVBload [i0] {s} p mem))
  1935      or:(ORL
  1936          s1:(SHLLconst [j1] x1:(MOVBload [i1] {s} p mem))
  1937  	y))
  1938    && i1 == i0+1
  1939    && j1 == j0-8
  1940    && j1 % 16 == 0
  1941    && x0.Uses == 1
  1942    && x1.Uses == 1
  1943    && s0.Uses == 1
  1944    && s1.Uses == 1
  1945    && or.Uses == 1
  1946    && mergePoint(b,x0,x1) != nil
  1947    && clobber(x0)
  1948    && clobber(x1)
  1949    && clobber(s0)
  1950    && clobber(s1)
  1951    && clobber(or)
  1952    -> @mergePoint(b,x0,x1) (ORL <v.Type> (SHLLconst <v.Type> [j1] (ROLWconst <typ.UInt16> [8] (MOVWload [i0] {s} p mem))) y)
  1953  
  1954  (ORQ
  1955      s0:(SHLQconst [j0] x0:(MOVBload [i0] {s} p mem))
  1956      or:(ORQ
  1957          s1:(SHLQconst [j1] x1:(MOVBload [i1] {s} p mem))
  1958  	y))
  1959    && i1 == i0+1
  1960    && j1 == j0-8
  1961    && j1 % 16 == 0
  1962    && x0.Uses == 1
  1963    && x1.Uses == 1
  1964    && s0.Uses == 1
  1965    && s1.Uses == 1
  1966    && or.Uses == 1
  1967    && mergePoint(b,x0,x1) != nil
  1968    && clobber(x0)
  1969    && clobber(x1)
  1970    && clobber(s0)
  1971    && clobber(s1)
  1972    && clobber(or)
  1973    -> @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (ROLWconst <typ.UInt16> [8] (MOVWload [i0] {s} p mem))) y)
  1974  
  1975  (ORQ
  1976      s0:(SHLQconst [j0] r0:(ROLWconst [8] x0:(MOVWload [i0] {s} p mem)))
  1977      or:(ORQ
  1978          s1:(SHLQconst [j1] r1:(ROLWconst [8] x1:(MOVWload [i1] {s} p mem)))
  1979  	y))
  1980    && i1 == i0+2
  1981    && j1 == j0-16
  1982    && j1 % 32 == 0
  1983    && x0.Uses == 1
  1984    && x1.Uses == 1
  1985    && r0.Uses == 1
  1986    && r1.Uses == 1
  1987    && s0.Uses == 1
  1988    && s1.Uses == 1
  1989    && or.Uses == 1
  1990    && mergePoint(b,x0,x1) != nil
  1991    && clobber(x0)
  1992    && clobber(x1)
  1993    && clobber(r0)
  1994    && clobber(r1)
  1995    && clobber(s0)
  1996    && clobber(s1)
  1997    && clobber(or)
  1998    -> @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (BSWAPL <typ.UInt32> (MOVLload [i0] {s} p mem))) y)
  1999  
  2000  // Big-endian indexed loads
  2001  
  2002  (ORL
  2003                         x1:(MOVBloadidx1 [i1] {s} p idx mem)
  2004      sh:(SHLLconst [8]  x0:(MOVBloadidx1 [i0] {s} p idx mem)))
  2005    && i1 == i0+1
  2006    && x0.Uses == 1
  2007    && x1.Uses == 1
  2008    && sh.Uses == 1
  2009    && mergePoint(b,x0,x1) != nil
  2010    && clobber(x0)
  2011    && clobber(x1)
  2012    && clobber(sh)
  2013    -> @mergePoint(b,x0,x1) (ROLWconst <v.Type> [8] (MOVWloadidx1 [i0] {s} p idx mem))
  2014  
  2015  (ORQ
  2016                         x1:(MOVBloadidx1 [i1] {s} p idx mem)
  2017      sh:(SHLQconst [8]  x0:(MOVBloadidx1 [i0] {s} p idx mem)))
  2018    && i1 == i0+1
  2019    && x0.Uses == 1
  2020    && x1.Uses == 1
  2021    && sh.Uses == 1
  2022    && mergePoint(b,x0,x1) != nil
  2023    && clobber(x0)
  2024    && clobber(x1)
  2025    && clobber(sh)
  2026    -> @mergePoint(b,x0,x1) (ROLWconst <v.Type> [8] (MOVWloadidx1 [i0] {s} p idx mem))
  2027  
  2028  (ORL
  2029                          r1:(ROLWconst [8] x1:(MOVWloadidx1 [i1] {s} p idx mem))
  2030      sh:(SHLLconst [16]  r0:(ROLWconst [8] x0:(MOVWloadidx1 [i0] {s} p idx mem))))
  2031    && i1 == i0+2
  2032    && x0.Uses == 1
  2033    && x1.Uses == 1
  2034    && r0.Uses == 1
  2035    && r1.Uses == 1
  2036    && sh.Uses == 1
  2037    && mergePoint(b,x0,x1) != nil
  2038    && clobber(x0)
  2039    && clobber(x1)
  2040    && clobber(r0)
  2041    && clobber(r1)
  2042    && clobber(sh)
  2043    -> @mergePoint(b,x0,x1) (BSWAPL <v.Type> (MOVLloadidx1 [i0] {s} p idx mem))
  2044  
  2045  (ORQ
  2046                          r1:(ROLWconst [8] x1:(MOVWloadidx1 [i1] {s} p idx mem))
  2047      sh:(SHLQconst [16]  r0:(ROLWconst [8] x0:(MOVWloadidx1 [i0] {s} p idx mem))))
  2048    && i1 == i0+2
  2049    && x0.Uses == 1
  2050    && x1.Uses == 1
  2051    && r0.Uses == 1
  2052    && r1.Uses == 1
  2053    && sh.Uses == 1
  2054    && mergePoint(b,x0,x1) != nil
  2055    && clobber(x0)
  2056    && clobber(x1)
  2057    && clobber(r0)
  2058    && clobber(r1)
  2059    && clobber(sh)
  2060    -> @mergePoint(b,x0,x1) (BSWAPL <v.Type> (MOVLloadidx1 [i0] {s} p idx mem))
  2061  
  2062  (ORQ
  2063                          r1:(BSWAPL x1:(MOVLloadidx1 [i1] {s} p idx mem))
  2064      sh:(SHLQconst [32]  r0:(BSWAPL x0:(MOVLloadidx1 [i0] {s} p idx mem))))
  2065    && i1 == i0+4
  2066    && x0.Uses == 1
  2067    && x1.Uses == 1
  2068    && r0.Uses == 1
  2069    && r1.Uses == 1
  2070    && sh.Uses == 1
  2071    && mergePoint(b,x0,x1) != nil
  2072    && clobber(x0)
  2073    && clobber(x1)
  2074    && clobber(r0)
  2075    && clobber(r1)
  2076    && clobber(sh)
  2077    -> @mergePoint(b,x0,x1) (BSWAPQ <v.Type> (MOVQloadidx1 [i0] {s} p idx mem))
  2078  
  2079  (ORL
  2080      s0:(SHLLconst [j0] x0:(MOVBloadidx1 [i0] {s} p idx mem))
  2081      or:(ORL
  2082          s1:(SHLLconst [j1] x1:(MOVBloadidx1 [i1] {s} p idx mem))
  2083  	y))
  2084    && i1 == i0+1
  2085    && j1 == j0-8
  2086    && j1 % 16 == 0
  2087    && x0.Uses == 1
  2088    && x1.Uses == 1
  2089    && s0.Uses == 1
  2090    && s1.Uses == 1
  2091    && or.Uses == 1
  2092    && mergePoint(b,x0,x1) != nil
  2093    && clobber(x0)
  2094    && clobber(x1)
  2095    && clobber(s0)
  2096    && clobber(s1)
  2097    && clobber(or)
  2098    -> @mergePoint(b,x0,x1) (ORL <v.Type> (SHLLconst <v.Type> [j1] (ROLWconst <typ.UInt16> [8] (MOVWloadidx1 [i0] {s} p idx mem))) y)
  2099  
  2100  (ORQ
  2101      s0:(SHLQconst [j0] x0:(MOVBloadidx1 [i0] {s} p idx mem))
  2102      or:(ORQ
  2103          s1:(SHLQconst [j1] x1:(MOVBloadidx1 [i1] {s} p idx mem))
  2104  	y))
  2105    && i1 == i0+1
  2106    && j1 == j0-8
  2107    && j1 % 16 == 0
  2108    && x0.Uses == 1
  2109    && x1.Uses == 1
  2110    && s0.Uses == 1
  2111    && s1.Uses == 1
  2112    && or.Uses == 1
  2113    && mergePoint(b,x0,x1) != nil
  2114    && clobber(x0)
  2115    && clobber(x1)
  2116    && clobber(s0)
  2117    && clobber(s1)
  2118    && clobber(or)
  2119    -> @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (ROLWconst <typ.UInt16> [8] (MOVWloadidx1 [i0] {s} p idx mem))) y)
  2120  
  2121  (ORQ
  2122      s0:(SHLQconst [j0] r0:(ROLWconst [8] x0:(MOVWloadidx1 [i0] {s} p idx mem)))
  2123      or:(ORQ
  2124          s1:(SHLQconst [j1] r1:(ROLWconst [8] x1:(MOVWloadidx1 [i1] {s} p idx mem)))
  2125  	y))
  2126    && i1 == i0+2
  2127    && j1 == j0-16
  2128    && j1 % 32 == 0
  2129    && x0.Uses == 1
  2130    && x1.Uses == 1
  2131    && r0.Uses == 1
  2132    && r1.Uses == 1
  2133    && s0.Uses == 1
  2134    && s1.Uses == 1
  2135    && or.Uses == 1
  2136    && mergePoint(b,x0,x1) != nil
  2137    && clobber(x0)
  2138    && clobber(x1)
  2139    && clobber(r0)
  2140    && clobber(r1)
  2141    && clobber(s0)
  2142    && clobber(s1)
  2143    && clobber(or)
  2144    -> @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (BSWAPL <typ.UInt32> (MOVLloadidx1 [i0] {s} p idx mem))) y)
  2145  
  2146  // Combine 2 byte stores + shift into rolw 8 + word store
  2147  (MOVBstore [i] {s} p w
  2148    x0:(MOVBstore [i-1] {s} p (SHRWconst [8] w) mem))
  2149    && x0.Uses == 1
  2150    && clobber(x0)
  2151    -> (MOVWstore [i-1] {s} p (ROLWconst <w.Type> [8] w) mem)
  2152  
  2153  (MOVBstoreidx1 [i] {s} p idx w
  2154    x0:(MOVBstoreidx1 [i-1] {s} p idx (SHRWconst [8] w) mem))
  2155    && x0.Uses == 1
  2156    && clobber(x0)
  2157    -> (MOVWstoreidx1 [i-1] {s} p idx (ROLWconst <w.Type> [8] w) mem)
  2158  
  2159  // Combine stores + shifts into bswap and larger (unaligned) stores
  2160  (MOVBstore [i] {s} p w
  2161    x2:(MOVBstore [i-1] {s} p (SHRLconst [8] w)
  2162    x1:(MOVBstore [i-2] {s} p (SHRLconst [16] w)
  2163    x0:(MOVBstore [i-3] {s} p (SHRLconst [24] w) mem))))
  2164    && x0.Uses == 1
  2165    && x1.Uses == 1
  2166    && x2.Uses == 1
  2167    && clobber(x0)
  2168    && clobber(x1)
  2169    && clobber(x2)
  2170    -> (MOVLstore [i-3] {s} p (BSWAPL <w.Type> w) mem)
  2171  
  2172  (MOVBstoreidx1 [i] {s} p idx w
  2173    x2:(MOVBstoreidx1 [i-1] {s} p idx (SHRLconst [8] w)
  2174    x1:(MOVBstoreidx1 [i-2] {s} p idx (SHRLconst [16] w)
  2175    x0:(MOVBstoreidx1 [i-3] {s} p idx (SHRLconst [24] w) mem))))
  2176    && x0.Uses == 1
  2177    && x1.Uses == 1
  2178    && x2.Uses == 1
  2179    && clobber(x0)
  2180    && clobber(x1)
  2181    && clobber(x2)
  2182    -> (MOVLstoreidx1 [i-3] {s} p idx (BSWAPL <w.Type> w) mem)
  2183  
  2184  (MOVBstore [i] {s} p w
  2185    x6:(MOVBstore [i-1] {s} p (SHRQconst [8] w)
  2186    x5:(MOVBstore [i-2] {s} p (SHRQconst [16] w)
  2187    x4:(MOVBstore [i-3] {s} p (SHRQconst [24] w)
  2188    x3:(MOVBstore [i-4] {s} p (SHRQconst [32] w)
  2189    x2:(MOVBstore [i-5] {s} p (SHRQconst [40] w)
  2190    x1:(MOVBstore [i-6] {s} p (SHRQconst [48] w)
  2191    x0:(MOVBstore [i-7] {s} p (SHRQconst [56] w) mem))))))))
  2192    && x0.Uses == 1
  2193    && x1.Uses == 1
  2194    && x2.Uses == 1
  2195    && x3.Uses == 1
  2196    && x4.Uses == 1
  2197    && x5.Uses == 1
  2198    && x6.Uses == 1
  2199    && clobber(x0)
  2200    && clobber(x1)
  2201    && clobber(x2)
  2202    && clobber(x3)
  2203    && clobber(x4)
  2204    && clobber(x5)
  2205    && clobber(x6)
  2206    -> (MOVQstore [i-7] {s} p (BSWAPQ <w.Type> w) mem)
  2207  
  2208  (MOVBstoreidx1 [i] {s} p idx w
  2209    x6:(MOVBstoreidx1 [i-1] {s} p idx (SHRQconst [8] w)
  2210    x5:(MOVBstoreidx1 [i-2] {s} p idx (SHRQconst [16] w)
  2211    x4:(MOVBstoreidx1 [i-3] {s} p idx (SHRQconst [24] w)
  2212    x3:(MOVBstoreidx1 [i-4] {s} p idx (SHRQconst [32] w)
  2213    x2:(MOVBstoreidx1 [i-5] {s} p idx (SHRQconst [40] w)
  2214    x1:(MOVBstoreidx1 [i-6] {s} p idx (SHRQconst [48] w)
  2215    x0:(MOVBstoreidx1 [i-7] {s} p idx (SHRQconst [56] w) mem))))))))
  2216    && x0.Uses == 1
  2217    && x1.Uses == 1
  2218    && x2.Uses == 1
  2219    && x3.Uses == 1
  2220    && x4.Uses == 1
  2221    && x5.Uses == 1
  2222    && x6.Uses == 1
  2223    && clobber(x0)
  2224    && clobber(x1)
  2225    && clobber(x2)
  2226    && clobber(x3)
  2227    && clobber(x4)
  2228    && clobber(x5)
  2229    && clobber(x6)
  2230    -> (MOVQstoreidx1 [i-7] {s} p idx (BSWAPQ <w.Type> w) mem)
  2231  
  2232  // Combine constant stores into larger (unaligned) stores.
  2233  (MOVBstoreconst [c] {s} p x:(MOVBstoreconst [a] {s} p mem))
  2234    && x.Uses == 1
  2235    && ValAndOff(a).Off() + 1 == ValAndOff(c).Off()
  2236    && clobber(x)
  2237    -> (MOVWstoreconst [makeValAndOff(ValAndOff(a).Val()&0xff | ValAndOff(c).Val()<<8, ValAndOff(a).Off())] {s} p mem)
  2238  (MOVWstoreconst [c] {s} p x:(MOVWstoreconst [a] {s} p mem))
  2239    && x.Uses == 1
  2240    && ValAndOff(a).Off() + 2 == ValAndOff(c).Off()
  2241    && clobber(x)
  2242    -> (MOVLstoreconst [makeValAndOff(ValAndOff(a).Val()&0xffff | ValAndOff(c).Val()<<16, ValAndOff(a).Off())] {s} p mem)
  2243  (MOVLstoreconst [c] {s} p x:(MOVLstoreconst [a] {s} p mem))
  2244    && x.Uses == 1
  2245    && ValAndOff(a).Off() + 4 == ValAndOff(c).Off()
  2246    && clobber(x)
  2247    -> (MOVQstore [ValAndOff(a).Off()] {s} p (MOVQconst [ValAndOff(a).Val()&0xffffffff | ValAndOff(c).Val()<<32]) mem)
  2248  (MOVQstoreconst [c] {s} p x:(MOVQstoreconst [c2] {s} p mem))
  2249    && config.useSSE
  2250    && x.Uses == 1
  2251    && ValAndOff(c2).Off() + 8 == ValAndOff(c).Off()
  2252    && ValAndOff(c).Val() == 0
  2253    && ValAndOff(c2).Val() == 0
  2254    && clobber(x)
  2255    -> (MOVOstore [ValAndOff(c2).Off()] {s} p (MOVOconst [0]) mem)
  2256  
  2257  (MOVBstoreconstidx1 [c] {s} p i x:(MOVBstoreconstidx1 [a] {s} p i mem))
  2258    && x.Uses == 1
  2259    && ValAndOff(a).Off() + 1 == ValAndOff(c).Off()
  2260    && clobber(x)
  2261    -> (MOVWstoreconstidx1 [makeValAndOff(ValAndOff(a).Val()&0xff | ValAndOff(c).Val()<<8, ValAndOff(a).Off())] {s} p i mem)
  2262  (MOVWstoreconstidx1 [c] {s} p i x:(MOVWstoreconstidx1 [a] {s} p i mem))
  2263    && x.Uses == 1
  2264    && ValAndOff(a).Off() + 2 == ValAndOff(c).Off()
  2265    && clobber(x)
  2266    -> (MOVLstoreconstidx1 [makeValAndOff(ValAndOff(a).Val()&0xffff | ValAndOff(c).Val()<<16, ValAndOff(a).Off())] {s} p i mem)
  2267  (MOVLstoreconstidx1 [c] {s} p i x:(MOVLstoreconstidx1 [a] {s} p i mem))
  2268    && x.Uses == 1
  2269    && ValAndOff(a).Off() + 4 == ValAndOff(c).Off()
  2270    && clobber(x)
  2271    -> (MOVQstoreidx1 [ValAndOff(a).Off()] {s} p i (MOVQconst [ValAndOff(a).Val()&0xffffffff | ValAndOff(c).Val()<<32]) mem)
  2272  
  2273  (MOVWstoreconstidx2 [c] {s} p i x:(MOVWstoreconstidx2 [a] {s} p i mem))
  2274    && x.Uses == 1
  2275    && ValAndOff(a).Off() + 2 == ValAndOff(c).Off()
  2276    && clobber(x)
  2277    -> (MOVLstoreconstidx1 [makeValAndOff(ValAndOff(a).Val()&0xffff | ValAndOff(c).Val()<<16, ValAndOff(a).Off())] {s} p (SHLQconst <i.Type> [1] i) mem)
  2278  (MOVLstoreconstidx4 [c] {s} p i x:(MOVLstoreconstidx4 [a] {s} p i mem))
  2279    && x.Uses == 1
  2280    && ValAndOff(a).Off() + 4 == ValAndOff(c).Off()
  2281    && clobber(x)
  2282    -> (MOVQstoreidx1 [ValAndOff(a).Off()] {s} p (SHLQconst <i.Type> [2] i) (MOVQconst [ValAndOff(a).Val()&0xffffffff | ValAndOff(c).Val()<<32]) mem)
  2283  
  2284  // Combine stores into larger (unaligned) stores.
  2285  (MOVBstore [i] {s} p (SHRQconst [8] w) x:(MOVBstore [i-1] {s} p w mem))
  2286    && x.Uses == 1
  2287    && clobber(x)
  2288    -> (MOVWstore [i-1] {s} p w mem)
  2289  (MOVBstore [i] {s} p (SHRQconst [j] w) x:(MOVBstore [i-1] {s} p w0:(SHRQconst [j-8] w) mem))
  2290    && x.Uses == 1
  2291    && clobber(x)
  2292    -> (MOVWstore [i-1] {s} p w0 mem)
  2293  (MOVWstore [i] {s} p (SHRQconst [16] w) x:(MOVWstore [i-2] {s} p w mem))
  2294    && x.Uses == 1
  2295    && clobber(x)
  2296    -> (MOVLstore [i-2] {s} p w mem)
  2297  (MOVWstore [i] {s} p (SHRQconst [j] w) x:(MOVWstore [i-2] {s} p w0:(SHRQconst [j-16] w) mem))
  2298    && x.Uses == 1
  2299    && clobber(x)
  2300    -> (MOVLstore [i-2] {s} p w0 mem)
  2301  (MOVLstore [i] {s} p (SHRQconst [32] w) x:(MOVLstore [i-4] {s} p w mem))
  2302    && x.Uses == 1
  2303    && clobber(x)
  2304    -> (MOVQstore [i-4] {s} p w mem)
  2305  (MOVLstore [i] {s} p (SHRQconst [j] w) x:(MOVLstore [i-4] {s} p w0:(SHRQconst [j-32] w) mem))
  2306    && x.Uses == 1
  2307    && clobber(x)
  2308    -> (MOVQstore [i-4] {s} p w0 mem)
  2309  
  2310  (MOVBstoreidx1 [i] {s} p idx (SHRQconst [8] w) x:(MOVBstoreidx1 [i-1] {s} p idx w mem))
  2311    && x.Uses == 1
  2312    && clobber(x)
  2313    -> (MOVWstoreidx1 [i-1] {s} p idx w mem)
  2314  (MOVBstoreidx1 [i] {s} p idx (SHRQconst [j] w) x:(MOVBstoreidx1 [i-1] {s} p idx w0:(SHRQconst [j-8] w) mem))
  2315    && x.Uses == 1
  2316    && clobber(x)
  2317    -> (MOVWstoreidx1 [i-1] {s} p idx w0 mem)
  2318  (MOVWstoreidx1 [i] {s} p idx (SHRQconst [16] w) x:(MOVWstoreidx1 [i-2] {s} p idx w mem))
  2319    && x.Uses == 1
  2320    && clobber(x)
  2321    -> (MOVLstoreidx1 [i-2] {s} p idx w mem)
  2322  (MOVWstoreidx1 [i] {s} p idx (SHRQconst [j] w) x:(MOVWstoreidx1 [i-2] {s} p idx w0:(SHRQconst [j-16] w) mem))
  2323    && x.Uses == 1
  2324    && clobber(x)
  2325    -> (MOVLstoreidx1 [i-2] {s} p idx w0 mem)
  2326  (MOVLstoreidx1 [i] {s} p idx (SHRQconst [32] w) x:(MOVLstoreidx1 [i-4] {s} p idx w mem))
  2327    && x.Uses == 1
  2328    && clobber(x)
  2329    -> (MOVQstoreidx1 [i-4] {s} p idx w mem)
  2330  (MOVLstoreidx1 [i] {s} p idx (SHRQconst [j] w) x:(MOVLstoreidx1 [i-4] {s} p idx w0:(SHRQconst [j-32] w) mem))
  2331    && x.Uses == 1
  2332    && clobber(x)
  2333    -> (MOVQstoreidx1 [i-4] {s} p idx w0 mem)
  2334  
  2335  (MOVWstoreidx2 [i] {s} p idx (SHRQconst [16] w) x:(MOVWstoreidx2 [i-2] {s} p idx w mem))
  2336    && x.Uses == 1
  2337    && clobber(x)
  2338    -> (MOVLstoreidx1 [i-2] {s} p (SHLQconst <idx.Type> [1] idx) w mem)
  2339  (MOVWstoreidx2 [i] {s} p idx (SHRQconst [j] w) x:(MOVWstoreidx2 [i-2] {s} p idx w0:(SHRQconst [j-16] w) mem))
  2340    && x.Uses == 1
  2341    && clobber(x)
  2342    -> (MOVLstoreidx1 [i-2] {s} p (SHLQconst <idx.Type> [1] idx) w0 mem)
  2343  (MOVLstoreidx4 [i] {s} p idx (SHRQconst [32] w) x:(MOVLstoreidx4 [i-4] {s} p idx w mem))
  2344    && x.Uses == 1
  2345    && clobber(x)
  2346    -> (MOVQstoreidx1 [i-4] {s} p (SHLQconst <idx.Type> [2] idx) w mem)
  2347  (MOVLstoreidx4 [i] {s} p idx (SHRQconst [j] w) x:(MOVLstoreidx4 [i-4] {s} p idx w0:(SHRQconst [j-32] w) mem))
  2348    && x.Uses == 1
  2349    && clobber(x)
  2350    -> (MOVQstoreidx1 [i-4] {s} p (SHLQconst <idx.Type> [2] idx) w0 mem)
  2351  
  2352  (MOVBstore [i] {s} p
  2353    x1:(MOVBload [j] {s2} p2 mem)
  2354      mem2:(MOVBstore [i-1] {s} p
  2355        x2:(MOVBload [j-1] {s2} p2 mem) mem))
  2356    && x1.Uses == 1
  2357    && x2.Uses == 1
  2358    && mem2.Uses == 1
  2359    && clobber(x1)
  2360    && clobber(x2)
  2361    && clobber(mem2)
  2362    -> (MOVWstore [i-1] {s} p (MOVWload [j-1] {s2} p2 mem) mem)
  2363  
  2364  (MOVWstore [i] {s} p
  2365    x1:(MOVWload [j] {s2} p2 mem)
  2366      mem2:(MOVWstore [i-2] {s} p
  2367        x2:(MOVWload [j-2] {s2} p2 mem) mem))
  2368    && x1.Uses == 1
  2369    && x2.Uses == 1
  2370    && mem2.Uses == 1
  2371    && clobber(x1)
  2372    && clobber(x2)
  2373    && clobber(mem2)
  2374    -> (MOVLstore [i-2] {s} p (MOVLload [j-2] {s2} p2 mem) mem)
  2375  
  2376  (MOVLstore [i] {s} p
  2377    x1:(MOVLload [j] {s2} p2 mem)
  2378      mem2:(MOVLstore [i-4] {s} p
  2379        x2:(MOVLload [j-4] {s2} p2 mem) mem))
  2380    && x1.Uses == 1
  2381    && x2.Uses == 1
  2382    && mem2.Uses == 1
  2383    && clobber(x1)
  2384    && clobber(x2)
  2385    && clobber(mem2)
  2386    -> (MOVQstore [i-4] {s} p (MOVQload [j-4] {s2} p2 mem) mem)
  2387  
  2388  // This is somewhat tricky. There may be pointers in SSE registers due to rule below.
  2389  // However those register shouldn't live across GC safepoint.
  2390  (MOVQstore [i] {s} p
  2391    x1:(MOVQload [j] {s2} p2 mem)
  2392      mem2:(MOVQstore [i-8] {s} p
  2393        x2:(MOVQload [j-8] {s2} p2 mem) mem))
  2394    && x1.Uses == 1
  2395    && x2.Uses == 1
  2396    && mem2.Uses == 1
  2397    && config.useSSE
  2398    && clobber(x1)
  2399    && clobber(x2)
  2400    && clobber(mem2)
  2401    -> (MOVOstore [i-8] {s} p (MOVOload [j-8] {s2} p2 mem) mem)
  2402  
  2403  
  2404  // amd64p32 rules
  2405  // same as the rules above, but with 32 instead of 64 bit pointer arithmetic.
  2406  // LEAQ,ADDQ -> LEAL,ADDL
  2407  (ADDLconst [c] (LEAL [d] {s} x)) && is32Bit(c+d) -> (LEAL [c+d] {s} x)
  2408  (LEAL [c] {s} (ADDLconst [d] x)) && is32Bit(c+d) -> (LEAL [c+d] {s} x)
  2409  
  2410  (MOVQload  [off1] {sym1} (LEAL [off2] {sym2} base) mem) && canMergeSym(sym1, sym2) && is32Bit(off1+off2) ->
  2411  	(MOVQload  [off1+off2] {mergeSym(sym1,sym2)} base mem)
  2412  (MOVLload  [off1] {sym1} (LEAL [off2] {sym2} base) mem) && canMergeSym(sym1, sym2) && is32Bit(off1+off2) ->
  2413  	(MOVLload  [off1+off2] {mergeSym(sym1,sym2)} base mem)
  2414  (MOVWload  [off1] {sym1} (LEAL [off2] {sym2} base) mem) && canMergeSym(sym1, sym2) && is32Bit(off1+off2) ->
  2415  	(MOVWload  [off1+off2] {mergeSym(sym1,sym2)} base mem)
  2416  (MOVBload  [off1] {sym1} (LEAL [off2] {sym2} base) mem) && canMergeSym(sym1, sym2) && is32Bit(off1+off2) ->
  2417  	(MOVBload  [off1+off2] {mergeSym(sym1,sym2)} base mem)
  2418  
  2419  (MOVQstore  [off1] {sym1} (LEAL [off2] {sym2} base) val mem) && canMergeSym(sym1, sym2) && is32Bit(off1+off2) ->
  2420  	(MOVQstore  [off1+off2] {mergeSym(sym1,sym2)} base val mem)
  2421  (MOVLstore  [off1] {sym1} (LEAL [off2] {sym2} base) val mem) && canMergeSym(sym1, sym2) && is32Bit(off1+off2) ->
  2422  	(MOVLstore  [off1+off2] {mergeSym(sym1,sym2)} base val mem)
  2423  (MOVWstore  [off1] {sym1} (LEAL [off2] {sym2} base) val mem) && canMergeSym(sym1, sym2) && is32Bit(off1+off2) ->
  2424  	(MOVWstore  [off1+off2] {mergeSym(sym1,sym2)} base val mem)
  2425  (MOVBstore  [off1] {sym1} (LEAL [off2] {sym2} base) val mem) && canMergeSym(sym1, sym2) && is32Bit(off1+off2) ->
  2426  	(MOVBstore  [off1+off2] {mergeSym(sym1,sym2)} base val mem)
  2427  
  2428  (MOVQstoreconst [sc] {sym1} (LEAL [off] {sym2} ptr) mem) && canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off) ->
  2429  	(MOVQstoreconst [ValAndOff(sc).add(off)] {mergeSym(sym1, sym2)} ptr mem)
  2430  (MOVLstoreconst [sc] {sym1} (LEAL [off] {sym2} ptr) mem) && canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off) ->
  2431  	(MOVLstoreconst [ValAndOff(sc).add(off)] {mergeSym(sym1, sym2)} ptr mem)
  2432  (MOVWstoreconst [sc] {sym1} (LEAL [off] {sym2} ptr) mem) && canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off) ->
  2433  	(MOVWstoreconst [ValAndOff(sc).add(off)] {mergeSym(sym1, sym2)} ptr mem)
  2434  (MOVBstoreconst [sc] {sym1} (LEAL [off] {sym2} ptr) mem) && canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off) ->
  2435  	(MOVBstoreconst [ValAndOff(sc).add(off)] {mergeSym(sym1, sym2)} ptr mem)
  2436  
  2437  (MOVQload  [off1] {sym} (ADDLconst [off2] ptr) mem) && is32Bit(off1+off2) -> (MOVQload  [off1+off2] {sym} ptr mem)
  2438  (MOVLload  [off1] {sym} (ADDLconst [off2] ptr) mem) && is32Bit(off1+off2) -> (MOVLload  [off1+off2] {sym} ptr mem)
  2439  (MOVWload  [off1] {sym} (ADDLconst [off2] ptr) mem) && is32Bit(off1+off2) -> (MOVWload  [off1+off2] {sym} ptr mem)
  2440  (MOVBload  [off1] {sym} (ADDLconst [off2] ptr) mem) && is32Bit(off1+off2) -> (MOVBload  [off1+off2] {sym} ptr mem)
  2441  (MOVQstore  [off1] {sym} (ADDLconst [off2] ptr) val mem) && is32Bit(off1+off2) -> (MOVQstore  [off1+off2] {sym} ptr val mem)
  2442  (MOVLstore  [off1] {sym} (ADDLconst [off2] ptr) val mem) && is32Bit(off1+off2) -> (MOVLstore  [off1+off2] {sym} ptr val mem)
  2443  (MOVWstore  [off1] {sym} (ADDLconst [off2] ptr) val mem) && is32Bit(off1+off2) -> (MOVWstore  [off1+off2] {sym} ptr val mem)
  2444  (MOVBstore  [off1] {sym} (ADDLconst [off2] ptr) val mem) && is32Bit(off1+off2) -> (MOVBstore  [off1+off2] {sym} ptr val mem)
  2445  (MOVQstoreconst [sc] {s} (ADDLconst [off] ptr) mem) && ValAndOff(sc).canAdd(off) ->
  2446  	(MOVQstoreconst [ValAndOff(sc).add(off)] {s} ptr mem)
  2447  (MOVLstoreconst [sc] {s} (ADDLconst [off] ptr) mem) && ValAndOff(sc).canAdd(off) ->
  2448  	(MOVLstoreconst [ValAndOff(sc).add(off)] {s} ptr mem)
  2449  (MOVWstoreconst [sc] {s} (ADDLconst [off] ptr) mem) && ValAndOff(sc).canAdd(off) ->
  2450  	(MOVWstoreconst [ValAndOff(sc).add(off)] {s} ptr mem)
  2451  (MOVBstoreconst [sc] {s} (ADDLconst [off] ptr) mem) && ValAndOff(sc).canAdd(off) ->
  2452  	(MOVBstoreconst [ValAndOff(sc).add(off)] {s} ptr mem)
  2453  
  2454  // Merge load and op
  2455  // TODO: add indexed variants?
  2456  (ADDQ x l:(MOVQload [off] {sym} ptr mem)) && canMergeLoad(v, l, x) && clobber(l) -> (ADDQmem x [off] {sym} ptr mem)
  2457  (ADDL x l:(MOVLload [off] {sym} ptr mem)) && canMergeLoad(v, l, x) && clobber(l) -> (ADDLmem x [off] {sym} ptr mem)
  2458  (SUBQ x l:(MOVQload [off] {sym} ptr mem)) && canMergeLoad(v, l, x) && clobber(l) -> (SUBQmem x [off] {sym} ptr mem)
  2459  (SUBL x l:(MOVLload [off] {sym} ptr mem)) && canMergeLoad(v, l, x) && clobber(l) -> (SUBLmem x [off] {sym} ptr mem)
  2460  (ANDQ x l:(MOVQload [off] {sym} ptr mem)) && canMergeLoad(v, l, x) && clobber(l) -> (ANDQmem x [off] {sym} ptr mem)
  2461  (ANDL x l:(MOVLload [off] {sym} ptr mem)) && canMergeLoad(v, l, x) && clobber(l) -> (ANDLmem x [off] {sym} ptr mem)
  2462  (ORQ  x l:(MOVQload [off] {sym} ptr mem)) && canMergeLoad(v, l, x) && clobber(l) -> (ORQmem x [off] {sym} ptr mem)
  2463  (ORL  x l:(MOVLload [off] {sym} ptr mem)) && canMergeLoad(v, l, x) && clobber(l) -> (ORLmem x [off] {sym} ptr mem)
  2464  (XORQ x l:(MOVQload [off] {sym} ptr mem)) && canMergeLoad(v, l, x) && clobber(l) -> (XORQmem x [off] {sym} ptr mem)
  2465  (XORL x l:(MOVLload [off] {sym} ptr mem)) && canMergeLoad(v, l, x) && clobber(l) -> (XORLmem x [off] {sym} ptr mem)
  2466  (ADDSD x l:(MOVSDload [off] {sym} ptr mem)) && canMergeLoad(v, l, x) && clobber(l) -> (ADDSDmem x [off] {sym} ptr mem)
  2467  (ADDSS x l:(MOVSSload [off] {sym} ptr mem)) && canMergeLoad(v, l, x) && clobber(l) -> (ADDSSmem x [off] {sym} ptr mem)
  2468  (SUBSD x l:(MOVSDload [off] {sym} ptr mem)) && canMergeLoad(v, l, x) && clobber(l) -> (SUBSDmem x [off] {sym} ptr mem)
  2469  (SUBSS x l:(MOVSSload [off] {sym} ptr mem)) && canMergeLoad(v, l, x) && clobber(l) -> (SUBSSmem x [off] {sym} ptr mem)
  2470  (MULSD x l:(MOVSDload [off] {sym} ptr mem)) && canMergeLoad(v, l, x) && clobber(l) -> (MULSDmem x [off] {sym} ptr mem)
  2471  (MULSS x l:(MOVSSload [off] {sym} ptr mem)) && canMergeLoad(v, l, x) && clobber(l) -> (MULSSmem x [off] {sym} ptr mem)
  2472  
  2473  // Merge ADDQconst and LEAQ into atomic loads.
  2474  (MOVQatomicload [off1] {sym} (ADDQconst [off2] ptr) mem) && is32Bit(off1+off2) ->
  2475  	(MOVQatomicload [off1+off2] {sym} ptr mem)
  2476  (MOVLatomicload [off1] {sym} (ADDQconst [off2] ptr) mem) && is32Bit(off1+off2) ->
  2477  	(MOVLatomicload [off1+off2] {sym} ptr mem)
  2478  (MOVQatomicload [off1] {sym1} (LEAQ [off2] {sym2} ptr) mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) ->
  2479  	(MOVQatomicload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
  2480  (MOVLatomicload [off1] {sym1} (LEAQ [off2] {sym2} ptr) mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) ->
  2481  	(MOVLatomicload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
  2482  
  2483  // Merge ADDQconst and LEAQ into atomic stores.
  2484  (XCHGQ [off1] {sym} val (ADDQconst [off2] ptr) mem) && is32Bit(off1+off2) ->
  2485  	(XCHGQ [off1+off2] {sym} val ptr mem)
  2486  (XCHGQ [off1] {sym1} val (LEAQ [off2] {sym2} ptr) mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) && ptr.Op != OpSB ->
  2487  	(XCHGQ [off1+off2] {mergeSym(sym1,sym2)} val ptr mem)
  2488  (XCHGL [off1] {sym} val (ADDQconst [off2] ptr) mem) && is32Bit(off1+off2) ->
  2489  	(XCHGL [off1+off2] {sym} val ptr mem)
  2490  (XCHGL [off1] {sym1} val (LEAQ [off2] {sym2} ptr) mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) && ptr.Op != OpSB ->
  2491  	(XCHGL [off1+off2] {mergeSym(sym1,sym2)} val ptr mem)
  2492  
  2493  // Merge ADDQconst into atomic adds.
  2494  // TODO: merging LEAQ doesn't work, assembler doesn't like the resulting instructions.
  2495  (XADDQlock [off1] {sym} val (ADDQconst [off2] ptr) mem) && is32Bit(off1+off2) ->
  2496  	(XADDQlock [off1+off2] {sym} val ptr mem)
  2497  (XADDLlock [off1] {sym} val (ADDQconst [off2] ptr) mem) && is32Bit(off1+off2) ->
  2498  	(XADDLlock [off1+off2] {sym} val ptr mem)
  2499  
  2500  // Merge ADDQconst into atomic compare and swaps.
  2501  // TODO: merging LEAQ doesn't work, assembler doesn't like the resulting instructions.
  2502  (CMPXCHGQlock [off1] {sym} (ADDQconst [off2] ptr) old new_ mem) && is32Bit(off1+off2) ->
  2503  	(CMPXCHGQlock [off1+off2] {sym} ptr old new_ mem)
  2504  (CMPXCHGLlock [off1] {sym} (ADDQconst [off2] ptr) old new_ mem) && is32Bit(off1+off2) ->
  2505  	(CMPXCHGLlock [off1+off2] {sym} ptr old new_ mem)
  2506  
  2507  // We don't need the conditional move if we know the arg of BSF is not zero.
  2508  (CMOVQEQ x _ (Select1 (BSFQ (ORQconst [c] _)))) && c != 0 -> x
  2509  // Extension is unnecessary for trailing zeros.
  2510  (BSFQ (ORQconst <t> [1<<8] (MOVBQZX x))) -> (BSFQ (ORQconst <t> [1<<8] x))
  2511  (BSFQ (ORQconst <t> [1<<16] (MOVWQZX x))) -> (BSFQ (ORQconst <t> [1<<16] x))
  2512  
  2513  // Redundant sign/zero extensions
  2514  (MOVLQSX x:(MOVLQSX _)) -> x
  2515  (MOVLQSX x:(MOVWQSX _)) -> x
  2516  (MOVLQSX x:(MOVBQSX _)) -> x
  2517  (MOVWQSX x:(MOVWQSX _)) -> x
  2518  (MOVWQSX x:(MOVBQSX _)) -> x
  2519  (MOVBQSX x:(MOVBQSX _)) -> x
  2520  (MOVLQZX x:(MOVLQZX _)) -> x
  2521  (MOVLQZX x:(MOVWQZX _)) -> x
  2522  (MOVLQZX x:(MOVBQZX _)) -> x
  2523  (MOVWQZX x:(MOVWQZX _)) -> x
  2524  (MOVWQZX x:(MOVBQZX _)) -> x
  2525  (MOVBQZX x:(MOVBQZX _)) -> x
  2526  
  2527  (MOVQstore [off] {sym} ptr a:(ADDQconst [c] l:(MOVQload [off] {sym} ptr2 mem)) mem)
  2528  	&& isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && validValAndOff(c,off) ->
  2529  	(ADDQconstmem {sym} [makeValAndOff(c,off)] ptr mem)
  2530  (MOVLstore [off] {sym} ptr a:(ADDLconst [c] l:(MOVLload [off] {sym} ptr2 mem)) mem)
  2531  	&& isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && validValAndOff(c,off) ->
  2532  	(ADDLconstmem {sym} [makeValAndOff(c,off)] ptr mem)
  2533  
  2534  // float <-> int register moves, with no conversion.
  2535  // These come up when compiling math.{Float{32,64}bits,Float{32,64}frombits}.
  2536  (MOVQload  [off] {sym} ptr (MOVSDstore [off] {sym} ptr val _)) -> (MOVQf2i val)
  2537  (MOVLload  [off] {sym} ptr (MOVSSstore [off] {sym} ptr val _)) -> (MOVLf2i val)
  2538  (MOVSDload [off] {sym} ptr (MOVQstore  [off] {sym} ptr val _)) -> (MOVQi2f val)
  2539  (MOVSSload [off] {sym} ptr (MOVLstore  [off] {sym} ptr val _)) -> (MOVLi2f val)
  2540  
  2541  // Other load-like ops.
  2542  (ADDQmem x [off] {sym} ptr (MOVSDstore [off] {sym} ptr y _)) -> (ADDQ x (MOVQf2i y))
  2543  (ADDLmem x [off] {sym} ptr (MOVSSstore [off] {sym} ptr y _)) -> (ADDL x (MOVLf2i y))
  2544  (SUBQmem x [off] {sym} ptr (MOVSDstore [off] {sym} ptr y _)) -> (SUBQ x (MOVQf2i y))
  2545  (SUBLmem x [off] {sym} ptr (MOVSSstore [off] {sym} ptr y _)) -> (SUBL x (MOVLf2i y))
  2546  (ANDQmem x [off] {sym} ptr (MOVSDstore [off] {sym} ptr y _)) -> (ANDQ x (MOVQf2i y))
  2547  (ANDLmem x [off] {sym} ptr (MOVSSstore [off] {sym} ptr y _)) -> (ANDL x (MOVLf2i y))
  2548  ( ORQmem x [off] {sym} ptr (MOVSDstore [off] {sym} ptr y _)) -> ( ORQ x (MOVQf2i y))
  2549  ( ORLmem x [off] {sym} ptr (MOVSSstore [off] {sym} ptr y _)) -> ( ORL x (MOVLf2i y))
  2550  (XORQmem x [off] {sym} ptr (MOVSDstore [off] {sym} ptr y _)) -> (XORQ x (MOVQf2i y))
  2551  (XORLmem x [off] {sym} ptr (MOVSSstore [off] {sym} ptr y _)) -> (XORL x (MOVLf2i y))
  2552  
  2553  (ADDQconstmem [valOff] {sym} ptr (MOVSDstore [ValAndOff(valOff).Off()] {sym} ptr x _)) ->
  2554    (ADDQconst [ValAndOff(valOff).Val()] (MOVQf2i x))
  2555  (ADDLconstmem [valOff] {sym} ptr (MOVSSstore [ValAndOff(valOff).Off()] {sym} ptr x _)) ->
  2556    (ADDLconst [ValAndOff(valOff).Val()] (MOVLf2i x))
  2557  
  2558  (ADDSDmem x [off] {sym} ptr (MOVQstore [off] {sym} ptr y _)) -> (ADDSD x (MOVQi2f y))
  2559  (ADDSSmem x [off] {sym} ptr (MOVLstore [off] {sym} ptr y _)) -> (ADDSS x (MOVLi2f y))
  2560  (SUBSDmem x [off] {sym} ptr (MOVQstore [off] {sym} ptr y _)) -> (SUBSD x (MOVQi2f y))
  2561  (SUBSSmem x [off] {sym} ptr (MOVLstore [off] {sym} ptr y _)) -> (SUBSS x (MOVLi2f y))
  2562  (MULSDmem x [off] {sym} ptr (MOVQstore [off] {sym} ptr y _)) -> (MULSD x (MOVQi2f y))
  2563  (MULSSmem x [off] {sym} ptr (MOVLstore [off] {sym} ptr y _)) -> (MULSS x (MOVLi2f y))
  2564  
  2565  // Redirect stores to use the other register set.
  2566  (MOVQstore  [off] {sym} ptr (MOVQf2i val) mem) -> (MOVSDstore [off] {sym} ptr val mem)
  2567  (MOVLstore  [off] {sym} ptr (MOVLf2i val) mem) -> (MOVSSstore [off] {sym} ptr val mem)
  2568  (MOVSDstore [off] {sym} ptr (MOVQi2f val) mem) -> (MOVQstore  [off] {sym} ptr val mem)
  2569  (MOVSSstore [off] {sym} ptr (MOVLi2f val) mem) -> (MOVLstore  [off] {sym} ptr val mem)
  2570  
  2571  // Load args directly into the register class where it will be used.
  2572  // We do this by just modifying the type of the Arg.
  2573  (MOVQf2i <t> (Arg [off] {sym})) -> @b.Func.Entry (Arg <t> [off] {sym})
  2574  (MOVLf2i <t> (Arg [off] {sym})) -> @b.Func.Entry (Arg <t> [off] {sym})
  2575  (MOVQi2f <t> (Arg [off] {sym})) -> @b.Func.Entry (Arg <t> [off] {sym})
  2576  (MOVLi2f <t> (Arg [off] {sym})) -> @b.Func.Entry (Arg <t> [off] {sym})