github.com/corona10/go@v0.0.0-20180224231303-7a218942be57/src/cmd/compile/internal/ssa/gen/AMD64.rules (about)

     1  // Copyright 2015 The Go Authors. All rights reserved.
     2  // Use of this source code is governed by a BSD-style
     3  // license that can be found in the LICENSE file.
     4  
     5  // Lowering arithmetic
     6  (Add64  x y) -> (ADDQ  x y)
     7  (AddPtr x y) && config.PtrSize == 8 -> (ADDQ x y)
     8  (AddPtr x y) && config.PtrSize == 4 -> (ADDL x y)
     9  (Add32  x y) -> (ADDL  x y)
    10  (Add16  x y) -> (ADDL  x y)
    11  (Add8   x y) -> (ADDL  x y)
    12  (Add32F x y) -> (ADDSS x y)
    13  (Add64F x y) -> (ADDSD x y)
    14  
    15  (Sub64  x y) -> (SUBQ  x y)
    16  (SubPtr x y) && config.PtrSize == 8 -> (SUBQ x y)
    17  (SubPtr x y) && config.PtrSize == 4 -> (SUBL x y)
    18  (Sub32  x y) -> (SUBL  x y)
    19  (Sub16  x y) -> (SUBL  x y)
    20  (Sub8   x y) -> (SUBL  x y)
    21  (Sub32F x y) -> (SUBSS x y)
    22  (Sub64F x y) -> (SUBSD x y)
    23  
    24  (Mul64  x y) -> (MULQ  x y)
    25  (Mul32  x y) -> (MULL  x y)
    26  (Mul16  x y) -> (MULL  x y)
    27  (Mul8   x y) -> (MULL  x y)
    28  (Mul32F x y) -> (MULSS x y)
    29  (Mul64F x y) -> (MULSD x y)
    30  
    31  (Div32F x y) -> (DIVSS x y)
    32  (Div64F x y) -> (DIVSD x y)
    33  
    34  (Div64  x y) -> (Select0 (DIVQ  x y))
    35  (Div64u x y) -> (Select0 (DIVQU x y))
    36  (Div32  x y) -> (Select0 (DIVL  x y))
    37  (Div32u x y) -> (Select0 (DIVLU x y))
    38  (Div16  x y) -> (Select0 (DIVW  x y))
    39  (Div16u x y) -> (Select0 (DIVWU x y))
    40  (Div8   x y) -> (Select0 (DIVW  (SignExt8to16 x) (SignExt8to16 y)))
    41  (Div8u  x y) -> (Select0 (DIVWU (ZeroExt8to16 x) (ZeroExt8to16 y)))
    42  
    43  (Hmul64  x y) -> (HMULQ  x y)
    44  (Hmul64u x y) -> (HMULQU x y)
    45  (Hmul32  x y) -> (HMULL  x y)
    46  (Hmul32u x y) -> (HMULLU x y)
    47  
    48  (Mul64uhilo x y) -> (MULQU2 x y)
    49  (Div128u xhi xlo y) -> (DIVQU2 xhi xlo y)
    50  
    51  (Avg64u x y) -> (AVGQU x y)
    52  
    53  (Mod64  x y) -> (Select1 (DIVQ  x y))
    54  (Mod64u x y) -> (Select1 (DIVQU x y))
    55  (Mod32  x y) -> (Select1 (DIVL  x y))
    56  (Mod32u x y) -> (Select1 (DIVLU x y))
    57  (Mod16  x y) -> (Select1 (DIVW  x y))
    58  (Mod16u x y) -> (Select1 (DIVWU x y))
    59  (Mod8   x y) -> (Select1 (DIVW  (SignExt8to16 x) (SignExt8to16 y)))
    60  (Mod8u  x y) -> (Select1 (DIVWU (ZeroExt8to16 x) (ZeroExt8to16 y)))
    61  
    62  (And64 x y) -> (ANDQ x y)
    63  (And32 x y) -> (ANDL x y)
    64  (And16 x y) -> (ANDL x y)
    65  (And8  x y) -> (ANDL x y)
    66  
    67  (Or64 x y) -> (ORQ x y)
    68  (Or32 x y) -> (ORL x y)
    69  (Or16 x y) -> (ORL x y)
    70  (Or8  x y) -> (ORL x y)
    71  
    72  (Xor64 x y) -> (XORQ x y)
    73  (Xor32 x y) -> (XORL x y)
    74  (Xor16 x y) -> (XORL x y)
    75  (Xor8  x y) -> (XORL x y)
    76  
    77  (Neg64  x) -> (NEGQ x)
    78  (Neg32  x) -> (NEGL x)
    79  (Neg16  x) -> (NEGL x)
    80  (Neg8   x) -> (NEGL x)
    81  (Neg32F x) -> (PXOR x (MOVSSconst <typ.Float32> [f2i(math.Copysign(0, -1))]))
    82  (Neg64F x) -> (PXOR x (MOVSDconst <typ.Float64> [f2i(math.Copysign(0, -1))]))
    83  
    84  (Com64 x) -> (NOTQ x)
    85  (Com32 x) -> (NOTL x)
    86  (Com16 x) -> (NOTL x)
    87  (Com8  x) -> (NOTL x)
    88  
    89  // Lowering boolean ops
    90  (AndB x y) -> (ANDL x y)
    91  (OrB x y) -> (ORL x y)
    92  (Not x) -> (XORLconst [1] x)
    93  
    94  // Lowering pointer arithmetic
    95  (OffPtr [off] ptr) && config.PtrSize == 8 && is32Bit(off) -> (ADDQconst [off] ptr)
    96  (OffPtr [off] ptr) && config.PtrSize == 8 -> (ADDQ (MOVQconst [off]) ptr)
    97  (OffPtr [off] ptr) && config.PtrSize == 4 -> (ADDLconst [off] ptr)
    98  
    99  // Lowering other arithmetic
   100  (Ctz64 <t> x) -> (CMOVQEQ (Select0 <t> (BSFQ x)) (MOVQconst <t> [64]) (Select1 <types.TypeFlags> (BSFQ x)))
   101  (Ctz32 x) -> (Select0 (BSFQ (ORQ <typ.UInt64> (MOVQconst [1<<32]) x)))
   102  
   103  (BitLen64 <t> x) -> (ADDQconst [1] (CMOVQEQ <t> (Select0 <t> (BSRQ x)) (MOVQconst <t> [-1]) (Select1 <types.TypeFlags> (BSRQ x))))
   104  (BitLen32 x) -> (BitLen64 (MOVLQZX <typ.UInt64> x))
   105  
   106  (Bswap64 x) -> (BSWAPQ x)
   107  (Bswap32 x) -> (BSWAPL x)
   108  
   109  (PopCount64 x) -> (POPCNTQ x)
   110  (PopCount32 x) -> (POPCNTL x)
   111  (PopCount16 x) -> (POPCNTL (MOVWQZX <typ.UInt32> x))
   112  (PopCount8 x) -> (POPCNTL (MOVBQZX <typ.UInt32> x))
   113  
   114  (Sqrt x) -> (SQRTSD x)
   115  
   116  (RoundToEven x) -> (ROUNDSD [0] x)
   117  (Floor x)	-> (ROUNDSD [1] x)
   118  (Ceil x)  	-> (ROUNDSD [2] x)
   119  (Trunc x) 	-> (ROUNDSD [3] x)
   120  
   121  // Lowering extension
   122  // Note: we always extend to 64 bits even though some ops don't need that many result bits.
   123  (SignExt8to16  x) -> (MOVBQSX x)
   124  (SignExt8to32  x) -> (MOVBQSX x)
   125  (SignExt8to64  x) -> (MOVBQSX x)
   126  (SignExt16to32 x) -> (MOVWQSX x)
   127  (SignExt16to64 x) -> (MOVWQSX x)
   128  (SignExt32to64 x) -> (MOVLQSX x)
   129  
   130  (ZeroExt8to16  x) -> (MOVBQZX x)
   131  (ZeroExt8to32  x) -> (MOVBQZX x)
   132  (ZeroExt8to64  x) -> (MOVBQZX x)
   133  (ZeroExt16to32 x) -> (MOVWQZX x)
   134  (ZeroExt16to64 x) -> (MOVWQZX x)
   135  (ZeroExt32to64 x) -> (MOVLQZX x)
   136  
   137  (Slicemask <t> x) -> (SARQconst (NEGQ <t> x) [63])
   138  
   139  // Lowering truncation
   140  // Because we ignore high parts of registers, truncates are just copies.
   141  (Trunc16to8  x) -> x
   142  (Trunc32to8  x) -> x
   143  (Trunc32to16 x) -> x
   144  (Trunc64to8  x) -> x
   145  (Trunc64to16 x) -> x
   146  (Trunc64to32 x) -> x
   147  
   148  // Lowering float <-> int
   149  (Cvt32to32F x) -> (CVTSL2SS x)
   150  (Cvt32to64F x) -> (CVTSL2SD x)
   151  (Cvt64to32F x) -> (CVTSQ2SS x)
   152  (Cvt64to64F x) -> (CVTSQ2SD x)
   153  
   154  (Cvt32Fto32 x) -> (CVTTSS2SL x)
   155  (Cvt32Fto64 x) -> (CVTTSS2SQ x)
   156  (Cvt64Fto32 x) -> (CVTTSD2SL x)
   157  (Cvt64Fto64 x) -> (CVTTSD2SQ x)
   158  
   159  (Cvt32Fto64F x) -> (CVTSS2SD x)
   160  (Cvt64Fto32F x) -> (CVTSD2SS x)
   161  
   162  (Round32F x) -> x
   163  (Round64F x) -> x
   164  
   165  // Lowering shifts
   166  // Unsigned shifts need to return 0 if shift amount is >= width of shifted value.
   167  //   result = (arg << shift) & (shift >= argbits ? 0 : 0xffffffffffffffff)
   168  (Lsh64x64 <t> x y) -> (ANDQ (SHLQ <t> x y) (SBBQcarrymask <t> (CMPQconst y [64])))
   169  (Lsh64x32 <t> x y) -> (ANDQ (SHLQ <t> x y) (SBBQcarrymask <t> (CMPLconst y [64])))
   170  (Lsh64x16 <t> x y) -> (ANDQ (SHLQ <t> x y) (SBBQcarrymask <t> (CMPWconst y [64])))
   171  (Lsh64x8  <t> x y) -> (ANDQ (SHLQ <t> x y) (SBBQcarrymask <t> (CMPBconst y [64])))
   172  
   173  (Lsh32x64 <t> x y) -> (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPQconst y [32])))
   174  (Lsh32x32 <t> x y) -> (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPLconst y [32])))
   175  (Lsh32x16 <t> x y) -> (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPWconst y [32])))
   176  (Lsh32x8  <t> x y) -> (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPBconst y [32])))
   177  
   178  (Lsh16x64 <t> x y) -> (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPQconst y [32])))
   179  (Lsh16x32 <t> x y) -> (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPLconst y [32])))
   180  (Lsh16x16 <t> x y) -> (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPWconst y [32])))
   181  (Lsh16x8  <t> x y) -> (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPBconst y [32])))
   182  
   183  (Lsh8x64 <t> x y)  -> (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPQconst y [32])))
   184  (Lsh8x32 <t> x y)  -> (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPLconst y [32])))
   185  (Lsh8x16 <t> x y)  -> (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPWconst y [32])))
   186  (Lsh8x8  <t> x y)  -> (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPBconst y [32])))
   187  
   188  (Rsh64Ux64 <t> x y) -> (ANDQ (SHRQ <t> x y) (SBBQcarrymask <t> (CMPQconst y [64])))
   189  (Rsh64Ux32 <t> x y) -> (ANDQ (SHRQ <t> x y) (SBBQcarrymask <t> (CMPLconst y [64])))
   190  (Rsh64Ux16 <t> x y) -> (ANDQ (SHRQ <t> x y) (SBBQcarrymask <t> (CMPWconst y [64])))
   191  (Rsh64Ux8  <t> x y) -> (ANDQ (SHRQ <t> x y) (SBBQcarrymask <t> (CMPBconst y [64])))
   192  
   193  (Rsh32Ux64 <t> x y) -> (ANDL (SHRL <t> x y) (SBBLcarrymask <t> (CMPQconst y [32])))
   194  (Rsh32Ux32 <t> x y) -> (ANDL (SHRL <t> x y) (SBBLcarrymask <t> (CMPLconst y [32])))
   195  (Rsh32Ux16 <t> x y) -> (ANDL (SHRL <t> x y) (SBBLcarrymask <t> (CMPWconst y [32])))
   196  (Rsh32Ux8  <t> x y) -> (ANDL (SHRL <t> x y) (SBBLcarrymask <t> (CMPBconst y [32])))
   197  
   198  (Rsh16Ux64 <t> x y) -> (ANDL (SHRW <t> x y) (SBBLcarrymask <t> (CMPQconst y [16])))
   199  (Rsh16Ux32 <t> x y) -> (ANDL (SHRW <t> x y) (SBBLcarrymask <t> (CMPLconst y [16])))
   200  (Rsh16Ux16 <t> x y) -> (ANDL (SHRW <t> x y) (SBBLcarrymask <t> (CMPWconst y [16])))
   201  (Rsh16Ux8  <t> x y) -> (ANDL (SHRW <t> x y) (SBBLcarrymask <t> (CMPBconst y [16])))
   202  
   203  (Rsh8Ux64 <t> x y)  -> (ANDL (SHRB <t> x y) (SBBLcarrymask <t> (CMPQconst y [8])))
   204  (Rsh8Ux32 <t> x y)  -> (ANDL (SHRB <t> x y) (SBBLcarrymask <t> (CMPLconst y [8])))
   205  (Rsh8Ux16 <t> x y)  -> (ANDL (SHRB <t> x y) (SBBLcarrymask <t> (CMPWconst y [8])))
   206  (Rsh8Ux8  <t> x y)  -> (ANDL (SHRB <t> x y) (SBBLcarrymask <t> (CMPBconst y [8])))
   207  
   208  // Signed right shift needs to return 0/-1 if shift amount is >= width of shifted value.
   209  // We implement this by setting the shift value to -1 (all ones) if the shift value is >= width.
   210  (Rsh64x64 <t> x y) -> (SARQ <t> x (ORQ <y.Type> y (NOTQ <y.Type> (SBBQcarrymask <y.Type> (CMPQconst y [64])))))
   211  (Rsh64x32 <t> x y) -> (SARQ <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPLconst y [64])))))
   212  (Rsh64x16 <t> x y) -> (SARQ <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPWconst y [64])))))
   213  (Rsh64x8  <t> x y) -> (SARQ <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPBconst y [64])))))
   214  
   215  (Rsh32x64 <t> x y) -> (SARL <t> x (ORQ <y.Type> y (NOTQ <y.Type> (SBBQcarrymask <y.Type> (CMPQconst y [32])))))
   216  (Rsh32x32 <t> x y) -> (SARL <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPLconst y [32])))))
   217  (Rsh32x16 <t> x y) -> (SARL <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPWconst y [32])))))
   218  (Rsh32x8  <t> x y) -> (SARL <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPBconst y [32])))))
   219  
   220  (Rsh16x64 <t> x y) -> (SARW <t> x (ORQ <y.Type> y (NOTQ <y.Type> (SBBQcarrymask <y.Type> (CMPQconst y [16])))))
   221  (Rsh16x32 <t> x y) -> (SARW <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPLconst y [16])))))
   222  (Rsh16x16 <t> x y) -> (SARW <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPWconst y [16])))))
   223  (Rsh16x8  <t> x y) -> (SARW <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPBconst y [16])))))
   224  
   225  (Rsh8x64 <t> x y)  -> (SARB <t> x (ORQ <y.Type> y (NOTQ <y.Type> (SBBQcarrymask <y.Type> (CMPQconst y [8])))))
   226  (Rsh8x32 <t> x y)  -> (SARB <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPLconst y [8])))))
   227  (Rsh8x16 <t> x y)  -> (SARB <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPWconst y [8])))))
   228  (Rsh8x8  <t> x y)  -> (SARB <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPBconst y [8])))))
   229  
   230  // Lowering comparisons
   231  (Less64  x y) -> (SETL (CMPQ x y))
   232  (Less32  x y) -> (SETL (CMPL x y))
   233  (Less16  x y) -> (SETL (CMPW x y))
   234  (Less8   x y) -> (SETL (CMPB x y))
   235  (Less64U x y) -> (SETB (CMPQ x y))
   236  (Less32U x y) -> (SETB (CMPL x y))
   237  (Less16U x y) -> (SETB (CMPW x y))
   238  (Less8U  x y) -> (SETB (CMPB x y))
   239  // Use SETGF with reversed operands to dodge NaN case
   240  (Less64F x y) -> (SETGF (UCOMISD y x))
   241  (Less32F x y) -> (SETGF (UCOMISS y x))
   242  
   243  (Leq64  x y) -> (SETLE (CMPQ x y))
   244  (Leq32  x y) -> (SETLE (CMPL x y))
   245  (Leq16  x y) -> (SETLE (CMPW x y))
   246  (Leq8   x y) -> (SETLE (CMPB x y))
   247  (Leq64U x y) -> (SETBE (CMPQ x y))
   248  (Leq32U x y) -> (SETBE (CMPL x y))
   249  (Leq16U x y) -> (SETBE (CMPW x y))
   250  (Leq8U  x y) -> (SETBE (CMPB x y))
   251  // Use SETGEF with reversed operands to dodge NaN case
   252  (Leq64F x y) -> (SETGEF (UCOMISD y x))
   253  (Leq32F x y) -> (SETGEF (UCOMISS y x))
   254  
   255  (Greater64  x y) -> (SETG (CMPQ x y))
   256  (Greater32  x y) -> (SETG (CMPL x y))
   257  (Greater16  x y) -> (SETG (CMPW x y))
   258  (Greater8   x y) -> (SETG (CMPB x y))
   259  (Greater64U x y) -> (SETA (CMPQ x y))
   260  (Greater32U x y) -> (SETA (CMPL x y))
   261  (Greater16U x y) -> (SETA (CMPW x y))
   262  (Greater8U  x y) -> (SETA (CMPB x y))
   263  // Note Go assembler gets UCOMISx operand order wrong, but it is right here
   264  // Bug is accommodated at generation of assembly language.
   265  (Greater64F x y) -> (SETGF (UCOMISD x y))
   266  (Greater32F x y) -> (SETGF (UCOMISS x y))
   267  
   268  (Geq64  x y) -> (SETGE (CMPQ x y))
   269  (Geq32  x y) -> (SETGE (CMPL x y))
   270  (Geq16  x y) -> (SETGE (CMPW x y))
   271  (Geq8   x y) -> (SETGE (CMPB x y))
   272  (Geq64U x y) -> (SETAE (CMPQ x y))
   273  (Geq32U x y) -> (SETAE (CMPL x y))
   274  (Geq16U x y) -> (SETAE (CMPW x y))
   275  (Geq8U  x y) -> (SETAE (CMPB x y))
   276  // Note Go assembler gets UCOMISx operand order wrong, but it is right here
   277  // Bug is accommodated at generation of assembly language.
   278  (Geq64F x y) -> (SETGEF (UCOMISD x y))
   279  (Geq32F x y) -> (SETGEF (UCOMISS x y))
   280  
   281  (Eq64  x y) -> (SETEQ (CMPQ x y))
   282  (Eq32  x y) -> (SETEQ (CMPL x y))
   283  (Eq16  x y) -> (SETEQ (CMPW x y))
   284  (Eq8   x y) -> (SETEQ (CMPB x y))
   285  (EqB   x y) -> (SETEQ (CMPB x y))
   286  (EqPtr x y) && config.PtrSize == 8 -> (SETEQ (CMPQ x y))
   287  (EqPtr x y) && config.PtrSize == 4 -> (SETEQ (CMPL x y))
   288  (Eq64F x y) -> (SETEQF (UCOMISD x y))
   289  (Eq32F x y) -> (SETEQF (UCOMISS x y))
   290  
   291  (Neq64  x y) -> (SETNE (CMPQ x y))
   292  (Neq32  x y) -> (SETNE (CMPL x y))
   293  (Neq16  x y) -> (SETNE (CMPW x y))
   294  (Neq8   x y) -> (SETNE (CMPB x y))
   295  (NeqB   x y) -> (SETNE (CMPB x y))
   296  (NeqPtr x y) && config.PtrSize == 8 -> (SETNE (CMPQ x y))
   297  (NeqPtr x y) && config.PtrSize == 4 -> (SETNE (CMPL x y))
   298  (Neq64F x y) -> (SETNEF (UCOMISD x y))
   299  (Neq32F x y) -> (SETNEF (UCOMISS x y))
   300  
   301  (Int64Hi x) -> (SHRQconst [32] x) // needed for amd64p32
   302  
   303  // Lowering loads
   304  (Load <t> ptr mem) && (is64BitInt(t) || isPtr(t) && config.PtrSize == 8) -> (MOVQload ptr mem)
   305  (Load <t> ptr mem) && (is32BitInt(t) || isPtr(t) && config.PtrSize == 4) -> (MOVLload ptr mem)
   306  (Load <t> ptr mem) && is16BitInt(t) -> (MOVWload ptr mem)
   307  (Load <t> ptr mem) && (t.IsBoolean() || is8BitInt(t)) -> (MOVBload ptr mem)
   308  (Load <t> ptr mem) && is32BitFloat(t) -> (MOVSSload ptr mem)
   309  (Load <t> ptr mem) && is64BitFloat(t) -> (MOVSDload ptr mem)
   310  
   311  // Lowering stores
   312  // These more-specific FP versions of Store pattern should come first.
   313  (Store {t} ptr val mem) && t.(*types.Type).Size() == 8 && is64BitFloat(val.Type) -> (MOVSDstore ptr val mem)
   314  (Store {t} ptr val mem) && t.(*types.Type).Size() == 4 && is32BitFloat(val.Type) -> (MOVSSstore ptr val mem)
   315  
   316  (Store {t} ptr val mem) && t.(*types.Type).Size() == 8 -> (MOVQstore ptr val mem)
   317  (Store {t} ptr val mem) && t.(*types.Type).Size() == 4 -> (MOVLstore ptr val mem)
   318  (Store {t} ptr val mem) && t.(*types.Type).Size() == 2 -> (MOVWstore ptr val mem)
   319  (Store {t} ptr val mem) && t.(*types.Type).Size() == 1 -> (MOVBstore ptr val mem)
   320  
   321  // Lowering moves
   322  (Move [0] _ _ mem) -> mem
   323  (Move [1] dst src mem) -> (MOVBstore dst (MOVBload src mem) mem)
   324  (Move [2] dst src mem) -> (MOVWstore dst (MOVWload src mem) mem)
   325  (Move [4] dst src mem) -> (MOVLstore dst (MOVLload src mem) mem)
   326  (Move [8] dst src mem) -> (MOVQstore dst (MOVQload src mem) mem)
   327  (Move [16] dst src mem) && config.useSSE -> (MOVOstore dst (MOVOload src mem) mem)
   328  (Move [16] dst src mem) && !config.useSSE ->
   329  	(MOVQstore [8] dst (MOVQload [8] src mem)
   330  		(MOVQstore dst (MOVQload src mem) mem))
   331  (Move [3] dst src mem) ->
   332  	(MOVBstore [2] dst (MOVBload [2] src mem)
   333  		(MOVWstore dst (MOVWload src mem) mem))
   334  (Move [5] dst src mem) ->
   335  	(MOVBstore [4] dst (MOVBload [4] src mem)
   336  		(MOVLstore dst (MOVLload src mem) mem))
   337  (Move [6] dst src mem) ->
   338  	(MOVWstore [4] dst (MOVWload [4] src mem)
   339  		(MOVLstore dst (MOVLload src mem) mem))
   340  (Move [7] dst src mem) ->
   341  	(MOVLstore [3] dst (MOVLload [3] src mem)
   342  		(MOVLstore dst (MOVLload src mem) mem))
   343  (Move [s] dst src mem) && s > 8 && s < 16 ->
   344  	(MOVQstore [s-8] dst (MOVQload [s-8] src mem)
   345  		(MOVQstore dst (MOVQload src mem) mem))
   346  
   347  // Adjust moves to be a multiple of 16 bytes.
   348  (Move [s] dst src mem)
   349  	&& s > 16 && s%16 != 0 && s%16 <= 8 ->
   350  	(Move [s-s%16]
   351  		(OffPtr <dst.Type> dst [s%16])
   352  		(OffPtr <src.Type> src [s%16])
   353  		(MOVQstore dst (MOVQload src mem) mem))
   354  (Move [s] dst src mem)
   355  	&& s > 16 && s%16 != 0 && s%16 > 8 && config.useSSE ->
   356  	(Move [s-s%16]
   357  		(OffPtr <dst.Type> dst [s%16])
   358  		(OffPtr <src.Type> src [s%16])
   359  		(MOVOstore dst (MOVOload src mem) mem))
   360  (Move [s] dst src mem)
   361  	&& s > 16 && s%16 != 0 && s%16 > 8 && !config.useSSE ->
   362  	(Move [s-s%16]
   363  		(OffPtr <dst.Type> dst [s%16])
   364  		(OffPtr <src.Type> src [s%16])
   365  		(MOVQstore [8] dst (MOVQload [8] src mem)
   366  			(MOVQstore dst (MOVQload src mem) mem)))
   367  
   368  // Medium copying uses a duff device.
   369  (Move [s] dst src mem)
   370  	&& s >= 32 && s <= 16*64 && s%16 == 0
   371  	&& !config.noDuffDevice ->
   372  	(DUFFCOPY [14*(64-s/16)] dst src mem)
   373  // 14 and 64 are magic constants.  14 is the number of bytes to encode:
   374  //	MOVUPS	(SI), X0
   375  //	ADDQ	$16, SI
   376  //	MOVUPS	X0, (DI)
   377  //	ADDQ	$16, DI
   378  // and 64 is the number of such blocks. See src/runtime/duff_amd64.s:duffcopy.
   379  
   380  // Large copying uses REP MOVSQ.
   381  (Move [s] dst src mem) && (s > 16*64 || config.noDuffDevice) && s%8 == 0 ->
   382  	(REPMOVSQ dst src (MOVQconst [s/8]) mem)
   383  
   384  // Lowering Zero instructions
   385  (Zero [0] _ mem) -> mem
   386  (Zero [1] destptr mem) -> (MOVBstoreconst [0] destptr mem)
   387  (Zero [2] destptr mem) -> (MOVWstoreconst [0] destptr mem)
   388  (Zero [4] destptr mem) -> (MOVLstoreconst [0] destptr mem)
   389  (Zero [8] destptr mem) -> (MOVQstoreconst [0] destptr mem)
   390  
   391  (Zero [3] destptr mem) ->
   392  	(MOVBstoreconst [makeValAndOff(0,2)] destptr
   393  		(MOVWstoreconst [0] destptr mem))
   394  (Zero [5] destptr mem) ->
   395  	(MOVBstoreconst [makeValAndOff(0,4)] destptr
   396  		(MOVLstoreconst [0] destptr mem))
   397  (Zero [6] destptr mem) ->
   398  	(MOVWstoreconst [makeValAndOff(0,4)] destptr
   399  		(MOVLstoreconst [0] destptr mem))
   400  (Zero [7] destptr mem) ->
   401  	(MOVLstoreconst [makeValAndOff(0,3)] destptr
   402  		(MOVLstoreconst [0] destptr mem))
   403  
   404  // Strip off any fractional word zeroing.
   405  (Zero [s] destptr mem) && s%8 != 0 && s > 8 && !config.useSSE ->
   406  	(Zero [s-s%8] (OffPtr <destptr.Type> destptr [s%8])
   407  		(MOVQstoreconst [0] destptr mem))
   408  
   409  // Zero small numbers of words directly.
   410  (Zero [16] destptr mem) && !config.useSSE ->
   411  	(MOVQstoreconst [makeValAndOff(0,8)] destptr
   412  		(MOVQstoreconst [0] destptr mem))
   413  (Zero [24] destptr mem) && !config.useSSE ->
   414  	(MOVQstoreconst [makeValAndOff(0,16)] destptr
   415  		(MOVQstoreconst [makeValAndOff(0,8)] destptr
   416  			(MOVQstoreconst [0] destptr mem)))
   417  (Zero [32] destptr mem) && !config.useSSE ->
   418  	(MOVQstoreconst [makeValAndOff(0,24)] destptr
   419  		(MOVQstoreconst [makeValAndOff(0,16)] destptr
   420  			(MOVQstoreconst [makeValAndOff(0,8)] destptr
   421  				(MOVQstoreconst [0] destptr mem))))
   422  
   423  (Zero [s] destptr mem) && s > 8 && s < 16 && config.useSSE ->
   424  	(MOVQstoreconst [makeValAndOff(0,s-8)] destptr
   425  		(MOVQstoreconst [0] destptr mem))
   426  
   427  // Adjust zeros to be a multiple of 16 bytes.
   428  (Zero [s] destptr mem) && s%16 != 0 && s > 16 && s%16 > 8 && config.useSSE ->
   429  	(Zero [s-s%16] (OffPtr <destptr.Type> destptr [s%16])
   430  		(MOVOstore destptr (MOVOconst [0]) mem))
   431  
   432  (Zero [s] destptr mem) && s%16 != 0 && s > 16 && s%16 <= 8 && config.useSSE ->
   433  	(Zero [s-s%16] (OffPtr <destptr.Type> destptr [s%16])
   434  		(MOVQstoreconst [0] destptr mem))
   435  
   436  (Zero [16] destptr mem) && config.useSSE ->
   437  	(MOVOstore destptr (MOVOconst [0]) mem)
   438  (Zero [32] destptr mem) && config.useSSE ->
   439  	(MOVOstore (OffPtr <destptr.Type> destptr [16]) (MOVOconst [0])
   440  		(MOVOstore destptr (MOVOconst [0]) mem))
   441  (Zero [48] destptr mem) && config.useSSE ->
   442  	(MOVOstore (OffPtr <destptr.Type> destptr [32]) (MOVOconst [0])
   443  		(MOVOstore (OffPtr <destptr.Type> destptr [16]) (MOVOconst [0])
   444  			(MOVOstore destptr (MOVOconst [0]) mem)))
   445  (Zero [64] destptr mem) && config.useSSE ->
   446  	(MOVOstore (OffPtr <destptr.Type> destptr [48]) (MOVOconst [0])
   447  		(MOVOstore (OffPtr <destptr.Type> destptr [32]) (MOVOconst [0])
   448  			(MOVOstore (OffPtr <destptr.Type> destptr [16]) (MOVOconst [0])
   449  				(MOVOstore destptr (MOVOconst [0]) mem))))
   450  
   451  // Medium zeroing uses a duff device.
   452  (Zero [s] destptr mem)
   453  	&& s > 64 && s <= 1024 && s%16 == 0 && !config.noDuffDevice ->
   454  	(DUFFZERO [s] destptr (MOVOconst [0]) mem)
   455  
   456  // Large zeroing uses REP STOSQ.
   457  (Zero [s] destptr mem)
   458  	&& (s > 1024 || (config.noDuffDevice && s > 64 || !config.useSSE && s > 32))
   459  	&& s%8 == 0 ->
   460  	(REPSTOSQ destptr (MOVQconst [s/8]) (MOVQconst [0]) mem)
   461  
   462  // Lowering constants
   463  (Const8   [val]) -> (MOVLconst [val])
   464  (Const16  [val]) -> (MOVLconst [val])
   465  (Const32  [val]) -> (MOVLconst [val])
   466  (Const64  [val]) -> (MOVQconst [val])
   467  (Const32F [val]) -> (MOVSSconst [val])
   468  (Const64F [val]) -> (MOVSDconst [val])
   469  (ConstNil) && config.PtrSize == 8 -> (MOVQconst [0])
   470  (ConstNil) && config.PtrSize == 4 -> (MOVLconst [0])
   471  (ConstBool [b]) -> (MOVLconst [b])
   472  
   473  // Lowering calls
   474  (StaticCall [argwid] {target} mem) -> (CALLstatic [argwid] {target} mem)
   475  (ClosureCall [argwid] entry closure mem) -> (CALLclosure [argwid] entry closure mem)
   476  (InterCall [argwid] entry mem) -> (CALLinter [argwid] entry mem)
   477  
   478  // Miscellaneous
   479  (Convert <t> x mem) && config.PtrSize == 8 -> (MOVQconvert <t> x mem)
   480  (Convert <t> x mem) && config.PtrSize == 4 -> (MOVLconvert <t> x mem)
   481  (IsNonNil p) && config.PtrSize == 8 -> (SETNE (TESTQ p p))
   482  (IsNonNil p) && config.PtrSize == 4 -> (SETNE (TESTL p p))
   483  (IsInBounds idx len) && config.PtrSize == 8 -> (SETB (CMPQ idx len))
   484  (IsInBounds idx len) && config.PtrSize == 4 -> (SETB (CMPL idx len))
   485  (IsSliceInBounds idx len) && config.PtrSize == 8 -> (SETBE (CMPQ idx len))
   486  (IsSliceInBounds idx len) && config.PtrSize == 4 -> (SETBE (CMPL idx len))
   487  (NilCheck ptr mem) -> (LoweredNilCheck ptr mem)
   488  (GetG mem) -> (LoweredGetG mem)
   489  (GetClosurePtr) -> (LoweredGetClosurePtr)
   490  (GetCallerPC) -> (LoweredGetCallerPC)
   491  (GetCallerSP) -> (LoweredGetCallerSP)
   492  (Addr {sym} base) && config.PtrSize == 8 -> (LEAQ {sym} base)
   493  (Addr {sym} base) && config.PtrSize == 4 -> (LEAL {sym} base)
   494  
   495  (MOVBstore [off] {sym} ptr y:(SETL x) mem) && y.Uses == 1 -> (SETLmem [off] {sym} ptr x mem)
   496  (MOVBstore [off] {sym} ptr y:(SETLE x) mem) && y.Uses == 1 -> (SETLEmem [off] {sym} ptr x mem)
   497  (MOVBstore [off] {sym} ptr y:(SETG x) mem) && y.Uses == 1 -> (SETGmem [off] {sym} ptr x mem)
   498  (MOVBstore [off] {sym} ptr y:(SETGE x) mem) && y.Uses == 1 -> (SETGEmem [off] {sym} ptr x mem)
   499  (MOVBstore [off] {sym} ptr y:(SETEQ x) mem) && y.Uses == 1 -> (SETEQmem [off] {sym} ptr x mem)
   500  (MOVBstore [off] {sym} ptr y:(SETNE x) mem) && y.Uses == 1 -> (SETNEmem [off] {sym} ptr x mem)
   501  (MOVBstore [off] {sym} ptr y:(SETB x) mem) && y.Uses == 1 -> (SETBmem [off] {sym} ptr x mem)
   502  (MOVBstore [off] {sym} ptr y:(SETBE x) mem) && y.Uses == 1 -> (SETBEmem [off] {sym} ptr x mem)
   503  (MOVBstore [off] {sym} ptr y:(SETA x) mem) && y.Uses == 1 -> (SETAmem [off] {sym} ptr x mem)
   504  (MOVBstore [off] {sym} ptr y:(SETAE x) mem) && y.Uses == 1 -> (SETAEmem [off] {sym} ptr x mem)
   505  
   506  // block rewrites
   507  (If (SETL  cmp) yes no) -> (LT  cmp yes no)
   508  (If (SETLE cmp) yes no) -> (LE  cmp yes no)
   509  (If (SETG  cmp) yes no) -> (GT  cmp yes no)
   510  (If (SETGE cmp) yes no) -> (GE  cmp yes no)
   511  (If (SETEQ cmp) yes no) -> (EQ  cmp yes no)
   512  (If (SETNE cmp) yes no) -> (NE  cmp yes no)
   513  (If (SETB  cmp) yes no) -> (ULT cmp yes no)
   514  (If (SETBE cmp) yes no) -> (ULE cmp yes no)
   515  (If (SETA  cmp) yes no) -> (UGT cmp yes no)
   516  (If (SETAE cmp) yes no) -> (UGE cmp yes no)
   517  
   518  // Special case for floating point - LF/LEF not generated
   519  (If (SETGF  cmp) yes no) -> (UGT  cmp yes no)
   520  (If (SETGEF cmp) yes no) -> (UGE  cmp yes no)
   521  (If (SETEQF cmp) yes no) -> (EQF  cmp yes no)
   522  (If (SETNEF cmp) yes no) -> (NEF  cmp yes no)
   523  
   524  (If cond yes no) -> (NE (TESTB cond cond) yes no)
   525  
   526  // Atomic loads.  Other than preserving their ordering with respect to other loads, nothing special here.
   527  (AtomicLoad32 ptr mem) -> (MOVLatomicload ptr mem)
   528  (AtomicLoad64 ptr mem) -> (MOVQatomicload ptr mem)
   529  (AtomicLoadPtr ptr mem) && config.PtrSize == 8 -> (MOVQatomicload ptr mem)
   530  (AtomicLoadPtr ptr mem) && config.PtrSize == 4 -> (MOVLatomicload ptr mem)
   531  
   532  // Atomic stores.  We use XCHG to prevent the hardware reordering a subsequent load.
   533  // TODO: most runtime uses of atomic stores don't need that property.  Use normal stores for those?
   534  (AtomicStore32 ptr val mem) -> (Select1 (XCHGL <types.NewTuple(typ.UInt32,types.TypeMem)> val ptr mem))
   535  (AtomicStore64 ptr val mem) -> (Select1 (XCHGQ <types.NewTuple(typ.UInt64,types.TypeMem)> val ptr mem))
   536  (AtomicStorePtrNoWB ptr val mem) && config.PtrSize == 8 -> (Select1 (XCHGQ <types.NewTuple(typ.BytePtr,types.TypeMem)> val ptr mem))
   537  (AtomicStorePtrNoWB ptr val mem) && config.PtrSize == 4 -> (Select1 (XCHGL <types.NewTuple(typ.BytePtr,types.TypeMem)> val ptr mem))
   538  
   539  // Atomic exchanges.
   540  (AtomicExchange32 ptr val mem) -> (XCHGL val ptr mem)
   541  (AtomicExchange64 ptr val mem) -> (XCHGQ val ptr mem)
   542  
   543  // Atomic adds.
   544  (AtomicAdd32 ptr val mem) -> (AddTupleFirst32 val (XADDLlock val ptr mem))
   545  (AtomicAdd64 ptr val mem) -> (AddTupleFirst64 val (XADDQlock val ptr mem))
   546  (Select0 <t> (AddTupleFirst32 val tuple)) -> (ADDL val (Select0 <t> tuple))
   547  (Select1     (AddTupleFirst32   _ tuple)) -> (Select1 tuple)
   548  (Select0 <t> (AddTupleFirst64 val tuple)) -> (ADDQ val (Select0 <t> tuple))
   549  (Select1     (AddTupleFirst64   _ tuple)) -> (Select1 tuple)
   550  
   551  // Atomic compare and swap.
   552  (AtomicCompareAndSwap32 ptr old new_ mem) -> (CMPXCHGLlock ptr old new_ mem)
   553  (AtomicCompareAndSwap64 ptr old new_ mem) -> (CMPXCHGQlock ptr old new_ mem)
   554  
   555  // Atomic memory updates.
   556  (AtomicAnd8 ptr val mem) -> (ANDBlock ptr val mem)
   557  (AtomicOr8 ptr val mem) -> (ORBlock ptr val mem)
   558  
   559  // Write barrier.
   560  (WB {fn} destptr srcptr mem) -> (LoweredWB {fn} destptr srcptr mem)
   561  
   562  // ***************************
   563  // Above: lowering rules
   564  // Below: optimizations
   565  // ***************************
   566  // TODO: Should the optimizations be a separate pass?
   567  
   568  // Fold boolean tests into blocks
   569  (NE (TESTB (SETL  cmp) (SETL  cmp)) yes no) -> (LT  cmp yes no)
   570  (NE (TESTB (SETLE cmp) (SETLE cmp)) yes no) -> (LE  cmp yes no)
   571  (NE (TESTB (SETG  cmp) (SETG  cmp)) yes no) -> (GT  cmp yes no)
   572  (NE (TESTB (SETGE cmp) (SETGE cmp)) yes no) -> (GE  cmp yes no)
   573  (NE (TESTB (SETEQ cmp) (SETEQ cmp)) yes no) -> (EQ  cmp yes no)
   574  (NE (TESTB (SETNE cmp) (SETNE cmp)) yes no) -> (NE  cmp yes no)
   575  (NE (TESTB (SETB  cmp) (SETB  cmp)) yes no) -> (ULT cmp yes no)
   576  (NE (TESTB (SETBE cmp) (SETBE cmp)) yes no) -> (ULE cmp yes no)
   577  (NE (TESTB (SETA  cmp) (SETA  cmp)) yes no) -> (UGT cmp yes no)
   578  (NE (TESTB (SETAE cmp) (SETAE cmp)) yes no) -> (UGE cmp yes no)
   579  
   580  // Recognize bit tests: a&(1<<b) != 0 for b suitably bounded
   581  // Note that ULT and SETB check the carry flag; they are identical to CS and SETCS.
   582  // Same, mutatis mutandis, for UGE and SETAE, and CC and SETCC.
   583  ((NE|EQ) (TESTL (SHLL (MOVLconst [1]) x) y)) && !config.nacl -> ((ULT|UGE) (BTL x y))
   584  ((NE|EQ) (TESTQ (SHLQ (MOVQconst [1]) x) y)) && !config.nacl -> ((ULT|UGE) (BTQ x y))
   585  ((NE|EQ) (TESTLconst [c] x)) && isPowerOfTwo(c) && log2(c) < 32 && !config.nacl
   586      -> ((ULT|UGE) (BTLconst [log2(c)] x))
   587  ((NE|EQ) (TESTQconst [c] x)) && isPowerOfTwo(c) && log2(c) < 64 && !config.nacl
   588      -> ((ULT|UGE) (BTQconst [log2(c)] x))
   589  ((NE|EQ) (TESTQ (MOVQconst [c]) x)) && isPowerOfTwo(c) && log2(c) < 64 && !config.nacl
   590      -> ((ULT|UGE) (BTQconst [log2(c)] x))
   591  (SET(NE|EQ) (TESTL (SHLL (MOVLconst [1]) x) y)) && !config.nacl -> (SET(B|AE)  (BTL x y))
   592  (SET(NE|EQ) (TESTQ (SHLQ (MOVQconst [1]) x) y)) && !config.nacl -> (SET(B|AE)  (BTQ x y))
   593  (SET(NE|EQ) (TESTLconst [c] x)) && isPowerOfTwo(c) && log2(c) < 32 && !config.nacl
   594      -> (SET(B|AE)  (BTLconst [log2(c)] x))
   595  (SET(NE|EQ) (TESTQconst [c] x)) && isPowerOfTwo(c) && log2(c) < 64 && !config.nacl
   596      -> (SET(B|AE)  (BTQconst [log2(c)] x))
   597  (SET(NE|EQ) (TESTQ (MOVQconst [c]) x)) && isPowerOfTwo(c) && log2(c) < 64 && !config.nacl
   598      -> (SET(B|AE)  (BTQconst [log2(c)] x))
   599  // SET..mem variant
   600  (SET(NE|EQ)mem [off] {sym} ptr (TESTL (SHLL (MOVLconst [1]) x) y) mem) && !config.nacl
   601      -> (SET(B|AE)mem  [off] {sym} ptr (BTL x y) mem)
   602  (SET(NE|EQ)mem [off] {sym} ptr (TESTQ (SHLQ (MOVQconst [1]) x) y) mem) && !config.nacl
   603      -> (SET(B|AE)mem  [off] {sym} ptr (BTQ x y) mem)
   604  (SET(NE|EQ)mem [off] {sym} ptr (TESTLconst [c] x) mem) && isPowerOfTwo(c) && log2(c) < 32 && !config.nacl
   605      -> (SET(B|AE)mem  [off] {sym} ptr (BTLconst [log2(c)] x) mem)
   606  (SET(NE|EQ)mem [off] {sym} ptr (TESTQconst [c] x) mem) && isPowerOfTwo(c) && log2(c) < 64 && !config.nacl
   607      -> (SET(B|AE)mem  [off] {sym} ptr (BTQconst [log2(c)] x) mem)
   608  (SET(NE|EQ)mem [off] {sym} ptr (TESTQ (MOVQconst [c]) x) mem) && isPowerOfTwo(c) && log2(c) < 64 && !config.nacl
   609      -> (SET(B|AE)mem  [off] {sym} ptr (BTQconst [log2(c)] x) mem)
   610  
   611  // Fold boolean negation into SETcc.
   612  (XORLconst [1] (SETNE x)) -> (SETEQ x)
   613  (XORLconst [1] (SETEQ x)) -> (SETNE x)
   614  (XORLconst [1] (SETL  x)) -> (SETGE x)
   615  (XORLconst [1] (SETGE x)) -> (SETL  x)
   616  (XORLconst [1] (SETLE x)) -> (SETG  x)
   617  (XORLconst [1] (SETG  x)) -> (SETLE x)
   618  (XORLconst [1] (SETB  x)) -> (SETAE x)
   619  (XORLconst [1] (SETAE x)) -> (SETB  x)
   620  (XORLconst [1] (SETBE x)) -> (SETA  x)
   621  (XORLconst [1] (SETA  x)) -> (SETBE x)
   622  
   623  // Convert BTQconst to BTLconst if possible. It has a shorter encoding.
   624  (BTQconst [c] x) && c < 32 -> (BTLconst [c] x)
   625  
   626  // Special case for floating point - LF/LEF not generated
   627  (NE (TESTB (SETGF  cmp) (SETGF  cmp)) yes no) -> (UGT  cmp yes no)
   628  (NE (TESTB (SETGEF cmp) (SETGEF cmp)) yes no) -> (UGE  cmp yes no)
   629  (NE (TESTB (SETEQF cmp) (SETEQF cmp)) yes no) -> (EQF  cmp yes no)
   630  (NE (TESTB (SETNEF cmp) (SETNEF cmp)) yes no) -> (NEF  cmp yes no)
   631  
   632  // Disabled because it interferes with the pattern match above and makes worse code.
   633  // (SETNEF x) -> (ORQ (SETNE <typ.Int8> x) (SETNAN <typ.Int8> x))
   634  // (SETEQF x) -> (ANDQ (SETEQ <typ.Int8> x) (SETORD <typ.Int8> x))
   635  
   636  // fold constants into instructions
   637  (ADDQ x (MOVQconst [c])) && is32Bit(c) -> (ADDQconst [c] x)
   638  (ADDL x (MOVLconst [c])) -> (ADDLconst [c] x)
   639  
   640  (SUBQ x (MOVQconst [c])) && is32Bit(c) -> (SUBQconst x [c])
   641  (SUBQ (MOVQconst [c]) x) && is32Bit(c) -> (NEGQ (SUBQconst <v.Type> x [c]))
   642  (SUBL x (MOVLconst [c])) -> (SUBLconst x [c])
   643  (SUBL (MOVLconst [c]) x) -> (NEGL (SUBLconst <v.Type> x [c]))
   644  
   645  (MULQ x (MOVQconst [c])) && is32Bit(c) -> (MULQconst [c] x)
   646  (MULL x (MOVLconst [c])) -> (MULLconst [c] x)
   647  
   648  (ANDQ x (MOVQconst [c])) && is32Bit(c) -> (ANDQconst [c] x)
   649  (ANDL x (MOVLconst [c])) -> (ANDLconst [c] x)
   650  
   651  (AND(L|Q)const [c] (AND(L|Q)const [d] x)) -> (AND(L|Q)const [c & d] x)
   652  (XOR(L|Q)const [c] (XOR(L|Q)const [d] x)) -> (XOR(L|Q)const [c ^ d] x)
   653  
   654  (MULLconst [c] (MULLconst [d] x)) -> (MULLconst [int64(int32(c * d))] x)
   655  (MULQconst [c] (MULQconst [d] x)) && is32Bit(c*d) -> (MULQconst [c * d] x)
   656  
   657  (ORQ x (MOVQconst [c])) && is32Bit(c) -> (ORQconst [c] x)
   658  (ORL x (MOVLconst [c])) -> (ORLconst [c] x)
   659  
   660  (XORQ x (MOVQconst [c])) && is32Bit(c) -> (XORQconst [c] x)
   661  (XORL x (MOVLconst [c])) -> (XORLconst [c] x)
   662  
   663  (SHLQ x (MOV(Q|L)const [c])) -> (SHLQconst [c&63] x)
   664  (SHLL x (MOV(Q|L)const [c])) -> (SHLLconst [c&31] x)
   665  
   666  (SHRQ x (MOV(Q|L)const [c])) -> (SHRQconst [c&63] x)
   667  (SHRL x (MOV(Q|L)const [c])) -> (SHRLconst [c&31] x)
   668  (SHRW x (MOV(Q|L)const [c])) && c&31 < 16 -> (SHRWconst [c&31] x)
   669  (SHRW _ (MOV(Q|L)const [c])) && c&31 >= 16 -> (MOVLconst [0])
   670  (SHRB x (MOV(Q|L)const [c])) && c&31 < 8 -> (SHRBconst [c&31] x)
   671  (SHRB _ (MOV(Q|L)const [c])) && c&31 >= 8 -> (MOVLconst [0])
   672  
   673  (SARQ x (MOV(Q|L)const [c])) -> (SARQconst [c&63] x)
   674  (SARL x (MOV(Q|L)const [c])) -> (SARLconst [c&31] x)
   675  (SARW x (MOV(Q|L)const [c])) -> (SARWconst [min(c&31,15)] x)
   676  (SARB x (MOV(Q|L)const [c])) -> (SARBconst [min(c&31,7)] x)
   677  
   678  // Operations which don't affect the low 6/5 bits of the shift amount are NOPs.
   679  ((SHLQ|SHRQ|SARQ) x (ADDQconst [c] y)) && c & 63 == 0  -> ((SHLQ|SHRQ|SARQ) x y)
   680  ((SHLQ|SHRQ|SARQ) x (NEGQ <t> (ADDQconst [c] y))) && c & 63 == 0  -> ((SHLQ|SHRQ|SARQ) x (NEGQ <t> y))
   681  ((SHLQ|SHRQ|SARQ) x (ANDQconst [c] y)) && c & 63 == 63 -> ((SHLQ|SHRQ|SARQ) x y)
   682  ((SHLQ|SHRQ|SARQ) x (NEGQ <t> (ANDQconst [c] y))) && c & 63 == 63 -> ((SHLQ|SHRQ|SARQ) x (NEGQ <t> y))
   683  
   684  ((SHLL|SHRL|SARL) x (ADDQconst [c] y)) && c & 31 == 0  -> ((SHLL|SHRL|SARL) x y)
   685  ((SHLL|SHRL|SARL) x (NEGQ <t> (ADDQconst [c] y))) && c & 31 == 0  -> ((SHLL|SHRL|SARL) x (NEGQ <t> y))
   686  ((SHLL|SHRL|SARL) x (ANDQconst [c] y)) && c & 31 == 31 -> ((SHLL|SHRL|SARL) x y)
   687  ((SHLL|SHRL|SARL) x (NEGQ <t> (ANDQconst [c] y))) && c & 31 == 31 -> ((SHLL|SHRL|SARL) x (NEGQ <t> y))
   688  
   689  ((SHLQ|SHRQ|SARQ) x (ADDLconst [c] y)) && c & 63 == 0  -> ((SHLQ|SHRQ|SARQ) x y)
   690  ((SHLQ|SHRQ|SARQ) x (NEGL <t> (ADDLconst [c] y))) && c & 63 == 0  -> ((SHLQ|SHRQ|SARQ) x (NEGL <t> y))
   691  ((SHLQ|SHRQ|SARQ) x (ANDLconst [c] y)) && c & 63 == 63 -> ((SHLQ|SHRQ|SARQ) x y)
   692  ((SHLQ|SHRQ|SARQ) x (NEGL <t> (ANDLconst [c] y))) && c & 63 == 63 -> ((SHLQ|SHRQ|SARQ) x (NEGL <t> y))
   693  
   694  ((SHLL|SHRL|SARL) x (ADDLconst [c] y)) && c & 31 == 0  -> ((SHLL|SHRL|SARL) x y)
   695  ((SHLL|SHRL|SARL) x (NEGL <t> (ADDLconst [c] y))) && c & 31 == 0  -> ((SHLL|SHRL|SARL) x (NEGL <t> y))
   696  ((SHLL|SHRL|SARL) x (ANDLconst [c] y)) && c & 31 == 31 -> ((SHLL|SHRL|SARL) x y)
   697  ((SHLL|SHRL|SARL) x (NEGL <t> (ANDLconst [c] y))) && c & 31 == 31 -> ((SHLL|SHRL|SARL) x (NEGL <t> y))
   698  
   699  // Constant rotate instructions
   700  ((ADDQ|ORQ|XORQ) (SHLQconst x [c]) (SHRQconst x [d])) && d==64-c -> (ROLQconst x [c])
   701  ((ADDL|ORL|XORL) (SHLLconst x [c]) (SHRLconst x [d])) && d==32-c -> (ROLLconst x [c])
   702  
   703  ((ADDL|ORL|XORL) <t> (SHLLconst x [c]) (SHRWconst x [d])) && d==16-c && c < 16 && t.Size() == 2 -> (ROLWconst x [c])
   704  ((ADDL|ORL|XORL) <t> (SHLLconst x [c]) (SHRBconst x [d])) && d==8-c  && c < 8  && t.Size() == 1 -> (ROLBconst x [c])
   705  
   706  (ROLQconst [c] (ROLQconst [d] x)) -> (ROLQconst [(c+d)&63] x)
   707  (ROLLconst [c] (ROLLconst [d] x)) -> (ROLLconst [(c+d)&31] x)
   708  (ROLWconst [c] (ROLWconst [d] x)) -> (ROLWconst [(c+d)&15] x)
   709  (ROLBconst [c] (ROLBconst [d] x)) -> (ROLBconst [(c+d)& 7] x)
   710  
   711  // Non-constant rotates.
   712  // We want to issue a rotate when the Go source contains code like
   713  //     y &= 63
   714  //     x << y | x >> (64-y)
   715  // The shift rules above convert << to SHLx and >> to SHRx.
   716  // SHRx converts its shift argument from 64-y to -y.
   717  // A tricky situation occurs when y==0. Then the original code would be:
   718  //     x << 0 | x >> 64
   719  // But x >> 64 is 0, not x. So there's an additional mask that is ANDed in
   720  // to force the second term to 0. We don't need that mask, but we must match
   721  // it in order to strip it out.
   722  (ORQ (SHLQ x y) (ANDQ (SHRQ x (NEG(Q|L) y)) (SBBQcarrymask (CMP(Q|L)const (NEG(Q|L) (ADD(Q|L)const (AND(Q|L)const y [63]) [-64])) [64])))) -> (ROLQ x y)
   723  (ORQ (SHRQ x y) (ANDQ (SHLQ x (NEG(Q|L) y)) (SBBQcarrymask (CMP(Q|L)const (NEG(Q|L) (ADD(Q|L)const (AND(Q|L)const y [63]) [-64])) [64])))) -> (RORQ x y)
   724  
   725  (ORL (SHLL x y) (ANDL (SHRL x (NEG(Q|L) y)) (SBBLcarrymask (CMP(Q|L)const (NEG(Q|L) (ADD(Q|L)const (AND(Q|L)const y [31]) [-32])) [32])))) -> (ROLL x y)
   726  (ORL (SHRL x y) (ANDL (SHLL x (NEG(Q|L) y)) (SBBLcarrymask (CMP(Q|L)const (NEG(Q|L) (ADD(Q|L)const (AND(Q|L)const y [31]) [-32])) [32])))) -> (RORL x y)
   727  
   728  // Help with rotate detection
   729  (CMPQconst (NEGQ (ADDQconst [-16] (ANDQconst [15] _))) [32]) -> (FlagLT_ULT)
   730  (CMPQconst (NEGQ (ADDQconst [ -8] (ANDQconst  [7] _))) [32]) -> (FlagLT_ULT)
   731  
   732  (ORL (SHLL x (AND(Q|L)const y [15]))
   733       (ANDL (SHRW x (NEG(Q|L) (ADD(Q|L)const (AND(Q|L)const y [15]) [-16])))
   734             (SBBLcarrymask (CMP(Q|L)const (NEG(Q|L) (ADD(Q|L)const (AND(Q|L)const y [15]) [-16])) [16]))))
   735    && v.Type.Size() == 2
   736    -> (ROLW x y)
   737  (ORL (SHRW x (AND(Q|L)const y [15]))
   738       (SHLL x (NEG(Q|L) (ADD(Q|L)const (AND(Q|L)const y [15]) [-16]))))
   739    && v.Type.Size() == 2
   740    -> (RORW x y)
   741  
   742  (ORL (SHLL x (AND(Q|L)const y [ 7]))
   743       (ANDL (SHRB x (NEG(Q|L) (ADD(Q|L)const (AND(Q|L)const y [ 7]) [ -8])))
   744             (SBBLcarrymask (CMP(Q|L)const (NEG(Q|L) (ADD(Q|L)const (AND(Q|L)const y [ 7]) [ -8])) [ 8]))))
   745    && v.Type.Size() == 1
   746    -> (ROLB x y)
   747  (ORL (SHRB x (AND(Q|L)const y [ 7]))
   748       (SHLL x (NEG(Q|L) (ADD(Q|L)const (AND(Q|L)const y [ 7]) [ -8]))))
   749    && v.Type.Size() == 1
   750    -> (RORB x y)
   751  
   752  // rotate left negative = rotate right
   753  (ROLQ x (NEG(Q|L) y)) -> (RORQ x y)
   754  (ROLL x (NEG(Q|L) y)) -> (RORL x y)
   755  (ROLW x (NEG(Q|L) y)) -> (RORW x y)
   756  (ROLB x (NEG(Q|L) y)) -> (RORB x y)
   757  
   758  // rotate right negative = rotate left
   759  (RORQ x (NEG(Q|L) y)) -> (ROLQ x y)
   760  (RORL x (NEG(Q|L) y)) -> (ROLL x y)
   761  (RORW x (NEG(Q|L) y)) -> (ROLW x y)
   762  (RORB x (NEG(Q|L) y)) -> (ROLB x y)
   763  
   764  // rotate by constants
   765  (ROLQ x (MOV(Q|L)const [c])) -> (ROLQconst [c&63] x)
   766  (ROLL x (MOV(Q|L)const [c])) -> (ROLLconst [c&31] x)
   767  (ROLW x (MOV(Q|L)const [c])) -> (ROLWconst [c&15] x)
   768  (ROLB x (MOV(Q|L)const [c])) -> (ROLBconst [c&7 ] x)
   769  
   770  (RORQ x (MOV(Q|L)const [c])) -> (ROLQconst [(-c)&63] x)
   771  (RORL x (MOV(Q|L)const [c])) -> (ROLLconst [(-c)&31] x)
   772  (RORW x (MOV(Q|L)const [c])) -> (ROLWconst [(-c)&15] x)
   773  (RORB x (MOV(Q|L)const [c])) -> (ROLBconst [(-c)&7 ] x)
   774  
   775  // Constant shift simplifications
   776  ((SHLQ|SHRQ|SARQ)const      x [0]) -> x
   777  ((SHLL|SHRL|SARL)const      x [0]) -> x
   778  ((SHRW|SARW)const           x [0]) -> x
   779  ((SHRB|SARB)const           x [0]) -> x
   780  ((ROLQ|ROLL|ROLW|ROLB)const x [0]) -> x
   781  
   782  // Note: the word and byte shifts keep the low 5 bits (not the low 4 or 3 bits)
   783  // because the x86 instructions are defined to use all 5 bits of the shift even
   784  // for the small shifts. I don't think we'll ever generate a weird shift (e.g.
   785  // (SHRW x (MOVLconst [24])), but just in case.
   786  
   787  (CMPQ x (MOVQconst [c])) && is32Bit(c) -> (CMPQconst x [c])
   788  (CMPQ (MOVQconst [c]) x) && is32Bit(c) -> (InvertFlags (CMPQconst x [c]))
   789  (CMPL x (MOVLconst [c])) -> (CMPLconst x [c])
   790  (CMPL (MOVLconst [c]) x) -> (InvertFlags (CMPLconst x [c]))
   791  (CMPW x (MOVLconst [c])) -> (CMPWconst x [int64(int16(c))])
   792  (CMPW (MOVLconst [c]) x) -> (InvertFlags (CMPWconst x [int64(int16(c))]))
   793  (CMPB x (MOVLconst [c])) -> (CMPBconst x [int64(int8(c))])
   794  (CMPB (MOVLconst [c]) x) -> (InvertFlags (CMPBconst x [int64(int8(c))]))
   795  
   796  // Using MOVZX instead of AND is cheaper.
   797  (ANDLconst [0xFF] x) -> (MOVBQZX x)
   798  (ANDLconst [0xFFFF] x) -> (MOVWQZX x)
   799  (ANDQconst [0xFF] x) -> (MOVBQZX x)
   800  (ANDQconst [0xFFFF] x) -> (MOVWQZX x)
   801  (ANDQconst [0xFFFFFFFF] x) -> (MOVLQZX x)
   802  
   803  // strength reduction
   804  // Assumes that the following costs from https://gmplib.org/~tege/x86-timing.pdf:
   805  //    1 - addq, shlq, leaq, negq
   806  //    3 - imulq
   807  // This limits the rewrites to two instructions.
   808  // TODO: 27, 81
   809  (MULQconst [-1] x) -> (NEGQ x)
   810  (MULQconst [0] _) -> (MOVQconst [0])
   811  (MULQconst [1] x) -> x
   812  (MULQconst [3] x) -> (LEAQ2 x x)
   813  (MULQconst [5] x) -> (LEAQ4 x x)
   814  (MULQconst [7] x) -> (LEAQ8 (NEGQ <v.Type> x) x)
   815  (MULQconst [9] x) -> (LEAQ8 x x)
   816  (MULQconst [11] x) -> (LEAQ2 x (LEAQ4 <v.Type> x x))
   817  (MULQconst [13] x) -> (LEAQ4 x (LEAQ2 <v.Type> x x))
   818  (MULQconst [21] x) -> (LEAQ4 x (LEAQ4 <v.Type> x x))
   819  (MULQconst [25] x) -> (LEAQ8 x (LEAQ2 <v.Type> x x))
   820  (MULQconst [37] x) -> (LEAQ4 x (LEAQ8 <v.Type> x x))
   821  (MULQconst [41] x) -> (LEAQ8 x (LEAQ4 <v.Type> x x))
   822  (MULQconst [73] x) -> (LEAQ8 x (LEAQ8 <v.Type> x x))
   823  
   824  (MULQconst [c] x) && isPowerOfTwo(c+1) && c >= 15 -> (SUBQ (SHLQconst <v.Type> [log2(c+1)] x) x)
   825  (MULQconst [c] x) && isPowerOfTwo(c-1) && c >= 17 -> (LEAQ1 (SHLQconst <v.Type> [log2(c-1)] x) x)
   826  (MULQconst [c] x) && isPowerOfTwo(c-2) && c >= 34 -> (LEAQ2 (SHLQconst <v.Type> [log2(c-2)] x) x)
   827  (MULQconst [c] x) && isPowerOfTwo(c-4) && c >= 68 -> (LEAQ4 (SHLQconst <v.Type> [log2(c-4)] x) x)
   828  (MULQconst [c] x) && isPowerOfTwo(c-8) && c >= 136 -> (LEAQ8 (SHLQconst <v.Type> [log2(c-8)] x) x)
   829  (MULQconst [c] x) && c%3 == 0 && isPowerOfTwo(c/3) -> (SHLQconst [log2(c/3)] (LEAQ2 <v.Type> x x))
   830  (MULQconst [c] x) && c%5 == 0 && isPowerOfTwo(c/5) -> (SHLQconst [log2(c/5)] (LEAQ4 <v.Type> x x))
   831  (MULQconst [c] x) && c%9 == 0 && isPowerOfTwo(c/9) -> (SHLQconst [log2(c/9)] (LEAQ8 <v.Type> x x))
   832  
   833  // combine add/shift into LEAQ
   834  (ADDQ x (SHLQconst [3] y)) -> (LEAQ8 x y)
   835  (ADDQ x (SHLQconst [2] y)) -> (LEAQ4 x y)
   836  (ADDQ x (SHLQconst [1] y)) -> (LEAQ2 x y)
   837  (ADDQ x (ADDQ y y)) -> (LEAQ2 x y)
   838  (ADDQ x (ADDQ x y)) -> (LEAQ2 y x)
   839  
   840  // combine ADDQ/ADDQconst into LEAQ1
   841  (ADDQconst [c] (ADDQ x y)) -> (LEAQ1 [c] x y)
   842  (ADDQ (ADDQconst [c] x) y) -> (LEAQ1 [c] x y)
   843  
   844  // fold ADDQ into LEAQ
   845  (ADDQconst [c] (LEAQ [d] {s} x)) && is32Bit(c+d) -> (LEAQ [c+d] {s} x)
   846  (LEAQ [c] {s} (ADDQconst [d] x)) && is32Bit(c+d) -> (LEAQ [c+d] {s} x)
   847  (LEAQ [c] {s} (ADDQ x y)) && x.Op != OpSB && y.Op != OpSB -> (LEAQ1 [c] {s} x y)
   848  (ADDQ x (LEAQ [c] {s} y)) && x.Op != OpSB && y.Op != OpSB -> (LEAQ1 [c] {s} x y)
   849  
   850  // fold ADDQconst into LEAQx
   851  (ADDQconst [c] (LEAQ1 [d] {s} x y)) && is32Bit(c+d) -> (LEAQ1 [c+d] {s} x y)
   852  (ADDQconst [c] (LEAQ2 [d] {s} x y)) && is32Bit(c+d) -> (LEAQ2 [c+d] {s} x y)
   853  (ADDQconst [c] (LEAQ4 [d] {s} x y)) && is32Bit(c+d) -> (LEAQ4 [c+d] {s} x y)
   854  (ADDQconst [c] (LEAQ8 [d] {s} x y)) && is32Bit(c+d) -> (LEAQ8 [c+d] {s} x y)
   855  (LEAQ1 [c] {s} (ADDQconst [d] x) y) && is32Bit(c+d)   && x.Op != OpSB -> (LEAQ1 [c+d] {s} x y)
   856  (LEAQ2 [c] {s} (ADDQconst [d] x) y) && is32Bit(c+d)   && x.Op != OpSB -> (LEAQ2 [c+d] {s} x y)
   857  (LEAQ2 [c] {s} x (ADDQconst [d] y)) && is32Bit(c+2*d) && y.Op != OpSB -> (LEAQ2 [c+2*d] {s} x y)
   858  (LEAQ4 [c] {s} (ADDQconst [d] x) y) && is32Bit(c+d)   && x.Op != OpSB -> (LEAQ4 [c+d] {s} x y)
   859  (LEAQ4 [c] {s} x (ADDQconst [d] y)) && is32Bit(c+4*d) && y.Op != OpSB -> (LEAQ4 [c+4*d] {s} x y)
   860  (LEAQ8 [c] {s} (ADDQconst [d] x) y) && is32Bit(c+d)   && x.Op != OpSB -> (LEAQ8 [c+d] {s} x y)
   861  (LEAQ8 [c] {s} x (ADDQconst [d] y)) && is32Bit(c+8*d) && y.Op != OpSB -> (LEAQ8 [c+8*d] {s} x y)
   862  
   863  // fold shifts into LEAQx
   864  (LEAQ1 [c] {s} x (SHLQconst [1] y)) -> (LEAQ2 [c] {s} x y)
   865  (LEAQ1 [c] {s} x (SHLQconst [2] y)) -> (LEAQ4 [c] {s} x y)
   866  (LEAQ1 [c] {s} x (SHLQconst [3] y)) -> (LEAQ8 [c] {s} x y)
   867  (LEAQ2 [c] {s} x (SHLQconst [1] y)) -> (LEAQ4 [c] {s} x y)
   868  (LEAQ2 [c] {s} x (SHLQconst [2] y)) -> (LEAQ8 [c] {s} x y)
   869  (LEAQ4 [c] {s} x (SHLQconst [1] y)) -> (LEAQ8 [c] {s} x y)
   870  
   871  // reverse ordering of compare instruction
   872  (SETL (InvertFlags x)) -> (SETG x)
   873  (SETG (InvertFlags x)) -> (SETL x)
   874  (SETB (InvertFlags x)) -> (SETA x)
   875  (SETA (InvertFlags x)) -> (SETB x)
   876  (SETLE (InvertFlags x)) -> (SETGE x)
   877  (SETGE (InvertFlags x)) -> (SETLE x)
   878  (SETBE (InvertFlags x)) -> (SETAE x)
   879  (SETAE (InvertFlags x)) -> (SETBE x)
   880  (SETEQ (InvertFlags x)) -> (SETEQ x)
   881  (SETNE (InvertFlags x)) -> (SETNE x)
   882  
   883  (SETLmem [off] {sym} ptr (InvertFlags x) mem) -> (SETGmem [off] {sym} ptr x mem)
   884  (SETGmem [off] {sym} ptr (InvertFlags x) mem) -> (SETLmem [off] {sym} ptr x mem)
   885  (SETBmem [off] {sym} ptr (InvertFlags x) mem) -> (SETAmem [off] {sym} ptr x mem)
   886  (SETAmem [off] {sym} ptr (InvertFlags x) mem) -> (SETBmem [off] {sym} ptr x mem)
   887  (SETLEmem [off] {sym} ptr (InvertFlags x) mem) -> (SETGEmem [off] {sym} ptr x mem)
   888  (SETGEmem [off] {sym} ptr (InvertFlags x) mem) -> (SETLEmem [off] {sym} ptr x mem)
   889  (SETBEmem [off] {sym} ptr (InvertFlags x) mem) -> (SETAEmem [off] {sym} ptr x mem)
   890  (SETAEmem [off] {sym} ptr (InvertFlags x) mem) -> (SETBEmem [off] {sym} ptr x mem)
   891  (SETEQmem [off] {sym} ptr (InvertFlags x) mem) -> (SETEQmem [off] {sym} ptr x mem)
   892  (SETNEmem [off] {sym} ptr (InvertFlags x) mem) -> (SETNEmem [off] {sym} ptr x mem)
   893  
   894  // sign extended loads
   895  // Note: The combined instruction must end up in the same block
   896  // as the original load. If not, we end up making a value with
   897  // memory type live in two different blocks, which can lead to
   898  // multiple memory values alive simultaneously.
   899  // Make sure we don't combine these ops if the load has another use.
   900  // This prevents a single load from being split into multiple loads
   901  // which then might return different values.  See test/atomicload.go.
   902  (MOVBQSX x:(MOVBload [off] {sym} ptr mem)) && x.Uses == 1 && clobber(x) -> @x.Block (MOVBQSXload <v.Type> [off] {sym} ptr mem)
   903  (MOVBQSX x:(MOVWload [off] {sym} ptr mem)) && x.Uses == 1 && clobber(x) -> @x.Block (MOVBQSXload <v.Type> [off] {sym} ptr mem)
   904  (MOVBQSX x:(MOVLload [off] {sym} ptr mem)) && x.Uses == 1 && clobber(x) -> @x.Block (MOVBQSXload <v.Type> [off] {sym} ptr mem)
   905  (MOVBQSX x:(MOVQload [off] {sym} ptr mem)) && x.Uses == 1 && clobber(x) -> @x.Block (MOVBQSXload <v.Type> [off] {sym} ptr mem)
   906  (MOVBQZX x:(MOVBload [off] {sym} ptr mem)) && x.Uses == 1 && clobber(x) -> @x.Block (MOVBload <v.Type> [off] {sym} ptr mem)
   907  (MOVBQZX x:(MOVWload [off] {sym} ptr mem)) && x.Uses == 1 && clobber(x) -> @x.Block (MOVBload <v.Type> [off] {sym} ptr mem)
   908  (MOVBQZX x:(MOVLload [off] {sym} ptr mem)) && x.Uses == 1 && clobber(x) -> @x.Block (MOVBload <v.Type> [off] {sym} ptr mem)
   909  (MOVBQZX x:(MOVQload [off] {sym} ptr mem)) && x.Uses == 1 && clobber(x) -> @x.Block (MOVBload <v.Type> [off] {sym} ptr mem)
   910  (MOVWQSX x:(MOVWload [off] {sym} ptr mem)) && x.Uses == 1 && clobber(x) -> @x.Block (MOVWQSXload <v.Type> [off] {sym} ptr mem)
   911  (MOVWQSX x:(MOVLload [off] {sym} ptr mem)) && x.Uses == 1 && clobber(x) -> @x.Block (MOVWQSXload <v.Type> [off] {sym} ptr mem)
   912  (MOVWQSX x:(MOVQload [off] {sym} ptr mem)) && x.Uses == 1 && clobber(x) -> @x.Block (MOVWQSXload <v.Type> [off] {sym} ptr mem)
   913  (MOVWQZX x:(MOVWload [off] {sym} ptr mem)) && x.Uses == 1 && clobber(x) -> @x.Block (MOVWload <v.Type> [off] {sym} ptr mem)
   914  (MOVWQZX x:(MOVLload [off] {sym} ptr mem)) && x.Uses == 1 && clobber(x) -> @x.Block (MOVWload <v.Type> [off] {sym} ptr mem)
   915  (MOVWQZX x:(MOVQload [off] {sym} ptr mem)) && x.Uses == 1 && clobber(x) -> @x.Block (MOVWload <v.Type> [off] {sym} ptr mem)
   916  (MOVLQSX x:(MOVLload [off] {sym} ptr mem)) && x.Uses == 1 && clobber(x) -> @x.Block (MOVLQSXload <v.Type> [off] {sym} ptr mem)
   917  (MOVLQSX x:(MOVQload [off] {sym} ptr mem)) && x.Uses == 1 && clobber(x) -> @x.Block (MOVLQSXload <v.Type> [off] {sym} ptr mem)
   918  (MOVLQZX x:(MOVLload [off] {sym} ptr mem)) && x.Uses == 1 && clobber(x) -> @x.Block (MOVLload <v.Type> [off] {sym} ptr mem)
   919  (MOVLQZX x:(MOVQload [off] {sym} ptr mem)) && x.Uses == 1 && clobber(x) -> @x.Block (MOVLload <v.Type> [off] {sym} ptr mem)
   920  
   921  (MOVLQZX x) && zeroUpper32Bits(x,3) -> x
   922  
   923  (MOVBQZX x:(MOVBloadidx1 [off] {sym} ptr idx mem)) && x.Uses == 1 && clobber(x) -> @x.Block (MOVBloadidx1 <v.Type> [off] {sym} ptr idx mem)
   924  (MOVWQZX x:(MOVWloadidx1 [off] {sym} ptr idx mem)) && x.Uses == 1 && clobber(x) -> @x.Block (MOVWloadidx1 <v.Type> [off] {sym} ptr idx mem)
   925  (MOVWQZX x:(MOVWloadidx2 [off] {sym} ptr idx mem)) && x.Uses == 1 && clobber(x) -> @x.Block (MOVWloadidx2 <v.Type> [off] {sym} ptr idx mem)
   926  (MOVLQZX x:(MOVLloadidx1 [off] {sym} ptr idx mem)) && x.Uses == 1 && clobber(x) -> @x.Block (MOVLloadidx1 <v.Type> [off] {sym} ptr idx mem)
   927  (MOVLQZX x:(MOVLloadidx4 [off] {sym} ptr idx mem)) && x.Uses == 1 && clobber(x) -> @x.Block (MOVLloadidx4 <v.Type> [off] {sym} ptr idx mem)
   928  
   929  // replace load from same location as preceding store with zero/sign extension (or copy in case of full width)
   930  (MOVBload [off] {sym} ptr (MOVBstore [off2] {sym2} ptr2 x _)) && sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) -> (MOVBQZX x)
   931  (MOVWload [off] {sym} ptr (MOVWstore [off2] {sym2} ptr2 x _)) && sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) -> (MOVWQZX x)
   932  (MOVLload [off] {sym} ptr (MOVLstore [off2] {sym2} ptr2 x _)) && sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) -> (MOVLQZX x)
   933  (MOVQload [off] {sym} ptr (MOVQstore [off2] {sym2} ptr2 x _)) && sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) -> x
   934  (MOVBQSXload [off] {sym} ptr (MOVBstore [off2] {sym2} ptr2 x _)) && sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) -> (MOVBQSX x)
   935  (MOVWQSXload [off] {sym} ptr (MOVWstore [off2] {sym2} ptr2 x _)) && sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) -> (MOVWQSX x)
   936  (MOVLQSXload [off] {sym} ptr (MOVLstore [off2] {sym2} ptr2 x _)) && sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) -> (MOVLQSX x)
   937  
   938  // Fold extensions and ANDs together.
   939  (MOVBQZX (ANDLconst [c] x)) -> (ANDLconst [c & 0xff] x)
   940  (MOVWQZX (ANDLconst [c] x)) -> (ANDLconst [c & 0xffff] x)
   941  (MOVLQZX (ANDLconst [c] x)) -> (ANDLconst [c] x)
   942  (MOVBQSX (ANDLconst [c] x)) && c & 0x80 == 0 -> (ANDLconst [c & 0x7f] x)
   943  (MOVWQSX (ANDLconst [c] x)) && c & 0x8000 == 0 -> (ANDLconst [c & 0x7fff] x)
   944  (MOVLQSX (ANDLconst [c] x)) && c & 0x80000000 == 0 -> (ANDLconst [c & 0x7fffffff] x)
   945  
   946  // Don't extend before storing
   947  (MOVLstore [off] {sym} ptr (MOVLQSX x) mem) -> (MOVLstore [off] {sym} ptr x mem)
   948  (MOVWstore [off] {sym} ptr (MOVWQSX x) mem) -> (MOVWstore [off] {sym} ptr x mem)
   949  (MOVBstore [off] {sym} ptr (MOVBQSX x) mem) -> (MOVBstore [off] {sym} ptr x mem)
   950  (MOVLstore [off] {sym} ptr (MOVLQZX x) mem) -> (MOVLstore [off] {sym} ptr x mem)
   951  (MOVWstore [off] {sym} ptr (MOVWQZX x) mem) -> (MOVWstore [off] {sym} ptr x mem)
   952  (MOVBstore [off] {sym} ptr (MOVBQZX x) mem) -> (MOVBstore [off] {sym} ptr x mem)
   953  
   954  // fold constants into memory operations
   955  // Note that this is not always a good idea because if not all the uses of
   956  // the ADDQconst get eliminated, we still have to compute the ADDQconst and we now
   957  // have potentially two live values (ptr and (ADDQconst [off] ptr)) instead of one.
   958  // Nevertheless, let's do it!
   959  (MOV(Q|L|W|B|SS|SD|O)load  [off1] {sym} (ADDQconst [off2] ptr) mem) && is32Bit(off1+off2) ->
   960      (MOV(Q|L|W|B|SS|SD|O)load  [off1+off2] {sym} ptr mem)
   961  (MOV(Q|L|W|B|SS|SD|O)store  [off1] {sym} (ADDQconst [off2] ptr) val mem) && is32Bit(off1+off2) ->
   962  	(MOV(Q|L|W|B|SS|SD|O)store  [off1+off2] {sym} ptr val mem)
   963  (SET(L|G|B|A|LE|GE|BE|AE|EQ|NE)mem [off1] {sym} (ADDQconst [off2] base) val mem) && is32Bit(off1+off2) ->
   964  	(SET(L|G|B|A|LE|GE|BE|AE|EQ|NE)mem [off1+off2] {sym} base val mem)
   965  
   966  // Fold constants into stores.
   967  (MOVQstore [off] {sym} ptr (MOVQconst [c]) mem) && validValAndOff(c,off) ->
   968  	(MOVQstoreconst [makeValAndOff(c,off)] {sym} ptr mem)
   969  (MOVLstore [off] {sym} ptr (MOVLconst [c]) mem) && validOff(off) ->
   970  	(MOVLstoreconst [makeValAndOff(int64(int32(c)),off)] {sym} ptr mem)
   971  (MOVWstore [off] {sym} ptr (MOVLconst [c]) mem) && validOff(off) ->
   972  	(MOVWstoreconst [makeValAndOff(int64(int16(c)),off)] {sym} ptr mem)
   973  (MOVBstore [off] {sym} ptr (MOVLconst [c]) mem) && validOff(off) ->
   974  	(MOVBstoreconst [makeValAndOff(int64(int8(c)),off)] {sym} ptr mem)
   975  
   976  // Fold address offsets into constant stores.
   977  (MOV(Q|L|W|B)storeconst [sc] {s} (ADDQconst [off] ptr) mem) && ValAndOff(sc).canAdd(off) ->
   978  	(MOV(Q|L|W|B)storeconst [ValAndOff(sc).add(off)] {s} ptr mem)
   979  
   980  // We need to fold LEAQ into the MOVx ops so that the live variable analysis knows
   981  // what variables are being read/written by the ops.
   982  (MOV(Q|L|W|B|SS|SD|O|BQSX|WQSX|LQSX)load [off1] {sym1} (LEAQ [off2] {sym2} base) mem)
   983  	&& is32Bit(off1+off2) && canMergeSym(sym1, sym2) ->
   984  	(MOV(Q|L|W|B|SS|SD|O|BQSX|WQSX|LQSX)load [off1+off2] {mergeSym(sym1,sym2)} base mem)
   985  (MOV(Q|L|W|B|SS|SD|O)store [off1] {sym1} (LEAQ [off2] {sym2} base) val mem)
   986  	&& is32Bit(off1+off2) && canMergeSym(sym1, sym2) ->
   987  	(MOV(Q|L|W|B|SS|SD|O)store [off1+off2] {mergeSym(sym1,sym2)} base val mem)
   988  (MOV(Q|L|W|B)storeconst [sc] {sym1} (LEAQ [off] {sym2} ptr) mem) && canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off) ->
   989  	(MOV(Q|L|W|B)storeconst [ValAndOff(sc).add(off)] {mergeSym(sym1, sym2)} ptr mem)
   990  (SET(L|G|B|A|LE|GE|BE|AE|EQ|NE)mem [off1] {sym1} (LEAQ [off2] {sym2} base) val mem)
   991  	&& is32Bit(off1+off2) && canMergeSym(sym1, sym2) ->
   992  	(SET(L|G|B|A|LE|GE|BE|AE|EQ|NE)mem [off1+off2] {mergeSym(sym1,sym2)} base val mem)
   993  
   994  // generating indexed loads and stores
   995  (MOV(B|W|L|Q|SS|SD)load [off1] {sym1} (LEAQ1 [off2] {sym2} ptr idx) mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) ->
   996  	(MOV(B|W|L|Q|SS|SD)loadidx1 [off1+off2] {mergeSym(sym1,sym2)} ptr idx mem)
   997  (MOVWload [off1] {sym1} (LEAQ2 [off2] {sym2} ptr idx) mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) ->
   998  	(MOVWloadidx2 [off1+off2] {mergeSym(sym1,sym2)} ptr idx mem)
   999  (MOV(L|SS)load [off1] {sym1} (LEAQ4 [off2] {sym2} ptr idx) mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) ->
  1000  	(MOV(L|SS)loadidx4 [off1+off2] {mergeSym(sym1,sym2)} ptr idx mem)
  1001  (MOV(L|Q|SD)load [off1] {sym1} (LEAQ8 [off2] {sym2} ptr idx) mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) ->
  1002  	(MOV(L|Q|SD)loadidx8 [off1+off2] {mergeSym(sym1,sym2)} ptr idx mem)
  1003  
  1004  (MOV(B|W|L|Q|SS|SD)store [off1] {sym1} (LEAQ1 [off2] {sym2} ptr idx) val mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) ->
  1005  	(MOV(B|W|L|Q|SS|SD)storeidx1 [off1+off2] {mergeSym(sym1,sym2)} ptr idx val mem)
  1006  (MOVWstore [off1] {sym1} (LEAQ2 [off2] {sym2} ptr idx) val mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) ->
  1007  	(MOVWstoreidx2 [off1+off2] {mergeSym(sym1,sym2)} ptr idx val mem)
  1008  (MOV(L|SS)store [off1] {sym1} (LEAQ4 [off2] {sym2} ptr idx) val mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) ->
  1009  	(MOV(L|SS)storeidx4 [off1+off2] {mergeSym(sym1,sym2)} ptr idx val mem)
  1010  (MOV(L|Q|SD)store [off1] {sym1} (LEAQ8 [off2] {sym2} ptr idx) val mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) ->
  1011  	(MOV(L|Q|SD)storeidx8 [off1+off2] {mergeSym(sym1,sym2)} ptr idx val mem)
  1012  
  1013  (MOV(B|W|L|Q|SS|SD)load [off] {sym} (ADDQ ptr idx) mem) && ptr.Op != OpSB ->
  1014  	(MOV(B|W|L|Q|SS|SD)loadidx1 [off] {sym} ptr idx mem)
  1015  (MOV(B|W|L|Q|SS|SD)store [off] {sym} (ADDQ ptr idx) val mem) && ptr.Op != OpSB ->
  1016  	(MOV(B|W|L|Q|SS|SD)storeidx1 [off] {sym} ptr idx val mem)
  1017  
  1018  (MOV(B|W|L|Q)storeconst [x] {sym1} (LEAQ1 [off] {sym2} ptr idx) mem) && canMergeSym(sym1, sym2) ->
  1019  	(MOV(B|W|L|Q)storeconstidx1 [ValAndOff(x).add(off)] {mergeSym(sym1,sym2)} ptr idx mem)
  1020  (MOVWstoreconst [x] {sym1} (LEAQ2 [off] {sym2} ptr idx) mem) && canMergeSym(sym1, sym2) ->
  1021  	(MOVWstoreconstidx2 [ValAndOff(x).add(off)] {mergeSym(sym1,sym2)} ptr idx mem)
  1022  (MOVLstoreconst [x] {sym1} (LEAQ4 [off] {sym2} ptr idx) mem) && canMergeSym(sym1, sym2) ->
  1023  	(MOVLstoreconstidx4 [ValAndOff(x).add(off)] {mergeSym(sym1,sym2)} ptr idx mem)
  1024  (MOVQstoreconst [x] {sym1} (LEAQ8 [off] {sym2} ptr idx) mem) && canMergeSym(sym1, sym2) ->
  1025  	(MOVQstoreconstidx8 [ValAndOff(x).add(off)] {mergeSym(sym1,sym2)} ptr idx mem)
  1026  
  1027  (MOV(B|W|L|Q)storeconst [x] {sym} (ADDQ ptr idx) mem) -> (MOV(B|W|L|Q)storeconstidx1 [x] {sym} ptr idx mem)
  1028  
  1029  // combine SHLQ into indexed loads and stores
  1030  (MOVWloadidx1 [c] {sym} ptr (SHLQconst [1] idx) mem) -> (MOVWloadidx2 [c] {sym} ptr idx mem)
  1031  (MOV(L|SS)loadidx1 [c] {sym} ptr (SHLQconst [2] idx) mem) -> (MOV(L|SS)loadidx4 [c] {sym} ptr idx mem)
  1032  (MOV(L|Q|SD)loadidx1 [c] {sym} ptr (SHLQconst [3] idx) mem) -> (MOV(L|Q|SD)loadidx8 [c] {sym} ptr idx mem)
  1033  
  1034  (MOVWstoreidx1 [c] {sym} ptr (SHLQconst [1] idx) val mem) -> (MOVWstoreidx2 [c] {sym} ptr idx val mem)
  1035  (MOV(L|SS)storeidx1 [c] {sym} ptr (SHLQconst [2] idx) val mem) -> (MOV(L|SS)storeidx4 [c] {sym} ptr idx val mem)
  1036  (MOV(L|Q|SD)storeidx1 [c] {sym} ptr (SHLQconst [3] idx) val mem) -> (MOV(L|Q|SD)storeidx8 [c] {sym} ptr idx val mem)
  1037  (MOVWstoreconstidx1 [c] {sym} ptr (SHLQconst [1] idx) mem) -> (MOVWstoreconstidx2 [c] {sym} ptr idx mem)
  1038  (MOVLstoreconstidx1 [c] {sym} ptr (SHLQconst [2] idx) mem) -> (MOVLstoreconstidx4 [c] {sym} ptr idx mem)
  1039  (MOVQstoreconstidx1 [c] {sym} ptr (SHLQconst [3] idx) mem) -> (MOVQstoreconstidx8 [c] {sym} ptr idx mem)
  1040  
  1041  // combine ADDQ into pointer of indexed loads and stores
  1042  (MOV(B|W|L|Q|SS|SD)loadidx1 [c] {sym} (ADDQconst [d] ptr) idx mem) && is32Bit(c+d) -> (MOV(B|W|L|Q|SS|SD)loadidx1 [c+d] {sym} ptr idx mem)
  1043  (MOVWloadidx2 [c] {sym} (ADDQconst [d] ptr) idx mem) && is32Bit(c+d) -> (MOVWloadidx2 [c+d] {sym} ptr idx mem)
  1044  (MOV(L|SS)loadidx4 [c] {sym} (ADDQconst [d] ptr) idx mem) && is32Bit(c+d) -> (MOV(L|SS)loadidx4 [c+d] {sym} ptr idx mem)
  1045  (MOV(L|Q|SD)loadidx8 [c] {sym} (ADDQconst [d] ptr) idx mem) && is32Bit(c+d) -> (MOV(L|Q|SD)loadidx8 [c+d] {sym} ptr idx mem)
  1046  
  1047  (MOV(B|W|L|Q|SS|SD)storeidx1 [c] {sym} (ADDQconst [d] ptr) idx val mem) && is32Bit(c+d) -> (MOV(B|W|L|Q|SS|SD)storeidx1 [c+d] {sym} ptr idx val mem)
  1048  (MOVWstoreidx2 [c] {sym} (ADDQconst [d] ptr) idx val mem) && is32Bit(c+d) -> (MOVWstoreidx2 [c+d] {sym} ptr idx val mem)
  1049  (MOV(L|SS)storeidx4 [c] {sym} (ADDQconst [d] ptr) idx val mem) && is32Bit(c+d) -> (MOV(L|SS)storeidx4 [c+d] {sym} ptr idx val mem)
  1050  (MOV(L|Q|SD)storeidx8 [c] {sym} (ADDQconst [d] ptr) idx val mem) && is32Bit(c+d) -> (MOV(L|Q|SD)storeidx8 [c+d] {sym} ptr idx val mem)
  1051  
  1052  
  1053  // combine ADDQ into index of indexed loads and stores
  1054  (MOV(B|W|L|Q|SS|SD)loadidx1 [c] {sym} ptr (ADDQconst [d] idx) mem)  && is32Bit(c+d) -> (MOV(B|W|L|Q|SS|SD)loadidx1 [c+d] {sym} ptr idx mem)
  1055  (MOVWloadidx2 [c] {sym} ptr (ADDQconst [d] idx) mem)  && is32Bit(c+2*d) -> (MOVWloadidx2 [c+2*d] {sym} ptr idx mem)
  1056  (MOV(L|SS)loadidx4 [c] {sym} ptr (ADDQconst [d] idx) mem)  && is32Bit(c+4*d) -> (MOV(L|SS)loadidx4 [c+4*d] {sym} ptr idx mem)
  1057  (MOV(L|Q|SD)loadidx8 [c] {sym} ptr (ADDQconst [d] idx) mem)  && is32Bit(c+8*d) -> (MOV(L|Q|SD)loadidx8 [c+8*d] {sym} ptr idx mem)
  1058  
  1059  (MOV(B|W|L|Q|SS|SD)storeidx1 [c] {sym} ptr (ADDQconst [d] idx) val mem)  && is32Bit(c+d) -> (MOV(B|W|L|Q|SS|SD)storeidx1 [c+d] {sym} ptr idx val mem)
  1060  (MOVWstoreidx2 [c] {sym} ptr (ADDQconst [d] idx) val mem)  && is32Bit(c+2*d) -> (MOVWstoreidx2 [c+2*d] {sym} ptr idx val mem)
  1061  (MOV(L|SS)storeidx4 [c] {sym} ptr (ADDQconst [d] idx) val mem)  && is32Bit(c+4*d) -> (MOV(L|SS)storeidx4 [c+4*d] {sym} ptr idx val mem)
  1062  (MOV(L|Q|SD)storeidx8 [c] {sym} ptr (ADDQconst [d] idx) val mem)  && is32Bit(c+8*d) -> (MOV(L|Q|SD)storeidx8 [c+8*d] {sym} ptr idx val mem)
  1063  
  1064  (MOV(B|W|L|Q)storeconstidx1 [x] {sym} (ADDQconst [c] ptr) idx mem) && ValAndOff(x).canAdd(c) -> (MOV(B|W|L|Q)storeconstidx1 [ValAndOff(x).add(c)] {sym} ptr idx mem)
  1065  (MOVWstoreconstidx2 [x] {sym} (ADDQconst [c] ptr) idx mem) && ValAndOff(x).canAdd(c) -> (MOVWstoreconstidx2 [ValAndOff(x).add(c)] {sym} ptr idx mem)
  1066  (MOVLstoreconstidx4 [x] {sym} (ADDQconst [c] ptr) idx mem) && ValAndOff(x).canAdd(c) -> (MOVLstoreconstidx4 [ValAndOff(x).add(c)] {sym} ptr idx mem)
  1067  (MOVQstoreconstidx8 [x] {sym} (ADDQconst [c] ptr) idx mem) && ValAndOff(x).canAdd(c) -> (MOVQstoreconstidx8 [ValAndOff(x).add(c)] {sym} ptr idx mem)
  1068  
  1069  (MOV(B|W|L|Q)storeconstidx1 [x] {sym} ptr (ADDQconst [c] idx) mem) && ValAndOff(x).canAdd(c) -> (MOV(B|W|L|Q)storeconstidx1 [ValAndOff(x).add(c)] {sym} ptr idx mem)
  1070  (MOVWstoreconstidx2 [x] {sym} ptr (ADDQconst [c] idx) mem) && ValAndOff(x).canAdd(2*c) -> (MOVWstoreconstidx2 [ValAndOff(x).add(2*c)] {sym} ptr idx mem)
  1071  (MOVLstoreconstidx4 [x] {sym} ptr (ADDQconst [c] idx) mem) && ValAndOff(x).canAdd(4*c) -> (MOVLstoreconstidx4 [ValAndOff(x).add(4*c)] {sym} ptr idx mem)
  1072  (MOVQstoreconstidx8 [x] {sym} ptr (ADDQconst [c] idx) mem) && ValAndOff(x).canAdd(8*c) -> (MOVQstoreconstidx8 [ValAndOff(x).add(8*c)] {sym} ptr idx mem)
  1073  
  1074  // fold LEAQs together
  1075  (LEAQ [off1] {sym1} (LEAQ [off2] {sym2} x)) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) ->
  1076        (LEAQ [off1+off2] {mergeSym(sym1,sym2)} x)
  1077  
  1078  // LEAQ into LEAQ1
  1079  (LEAQ1 [off1] {sym1} (LEAQ [off2] {sym2} x) y) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) && x.Op != OpSB ->
  1080         (LEAQ1 [off1+off2] {mergeSym(sym1,sym2)} x y)
  1081  
  1082  // LEAQ1 into LEAQ
  1083  (LEAQ [off1] {sym1} (LEAQ1 [off2] {sym2} x y)) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) ->
  1084         (LEAQ1 [off1+off2] {mergeSym(sym1,sym2)} x y)
  1085  
  1086  // LEAQ into LEAQ[248]
  1087  (LEAQ2 [off1] {sym1} (LEAQ [off2] {sym2} x) y) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) && x.Op != OpSB ->
  1088         (LEAQ2 [off1+off2] {mergeSym(sym1,sym2)} x y)
  1089  (LEAQ4 [off1] {sym1} (LEAQ [off2] {sym2} x) y) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) && x.Op != OpSB ->
  1090         (LEAQ4 [off1+off2] {mergeSym(sym1,sym2)} x y)
  1091  (LEAQ8 [off1] {sym1} (LEAQ [off2] {sym2} x) y) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) && x.Op != OpSB ->
  1092         (LEAQ8 [off1+off2] {mergeSym(sym1,sym2)} x y)
  1093  
  1094  // LEAQ[248] into LEAQ
  1095  (LEAQ [off1] {sym1} (LEAQ2 [off2] {sym2} x y)) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) ->
  1096        (LEAQ2 [off1+off2] {mergeSym(sym1,sym2)} x y)
  1097  (LEAQ [off1] {sym1} (LEAQ4 [off2] {sym2} x y)) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) ->
  1098        (LEAQ4 [off1+off2] {mergeSym(sym1,sym2)} x y)
  1099  (LEAQ [off1] {sym1} (LEAQ8 [off2] {sym2} x y)) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) ->
  1100        (LEAQ8 [off1+off2] {mergeSym(sym1,sym2)} x y)
  1101  
  1102  // Absorb InvertFlags into branches.
  1103  (LT (InvertFlags cmp) yes no) -> (GT cmp yes no)
  1104  (GT (InvertFlags cmp) yes no) -> (LT cmp yes no)
  1105  (LE (InvertFlags cmp) yes no) -> (GE cmp yes no)
  1106  (GE (InvertFlags cmp) yes no) -> (LE cmp yes no)
  1107  (ULT (InvertFlags cmp) yes no) -> (UGT cmp yes no)
  1108  (UGT (InvertFlags cmp) yes no) -> (ULT cmp yes no)
  1109  (ULE (InvertFlags cmp) yes no) -> (UGE cmp yes no)
  1110  (UGE (InvertFlags cmp) yes no) -> (ULE cmp yes no)
  1111  (EQ (InvertFlags cmp) yes no) -> (EQ cmp yes no)
  1112  (NE (InvertFlags cmp) yes no) -> (NE cmp yes no)
  1113  
  1114  // Constant comparisons.
  1115  (CMPQconst (MOVQconst [x]) [y]) && x==y -> (FlagEQ)
  1116  (CMPQconst (MOVQconst [x]) [y]) && x<y && uint64(x)<uint64(y) -> (FlagLT_ULT)
  1117  (CMPQconst (MOVQconst [x]) [y]) && x<y && uint64(x)>uint64(y) -> (FlagLT_UGT)
  1118  (CMPQconst (MOVQconst [x]) [y]) && x>y && uint64(x)<uint64(y) -> (FlagGT_ULT)
  1119  (CMPQconst (MOVQconst [x]) [y]) && x>y && uint64(x)>uint64(y) -> (FlagGT_UGT)
  1120  (CMPLconst (MOVLconst [x]) [y]) && int32(x)==int32(y) -> (FlagEQ)
  1121  (CMPLconst (MOVLconst [x]) [y]) && int32(x)<int32(y) && uint32(x)<uint32(y) -> (FlagLT_ULT)
  1122  (CMPLconst (MOVLconst [x]) [y]) && int32(x)<int32(y) && uint32(x)>uint32(y) -> (FlagLT_UGT)
  1123  (CMPLconst (MOVLconst [x]) [y]) && int32(x)>int32(y) && uint32(x)<uint32(y) -> (FlagGT_ULT)
  1124  (CMPLconst (MOVLconst [x]) [y]) && int32(x)>int32(y) && uint32(x)>uint32(y) -> (FlagGT_UGT)
  1125  (CMPWconst (MOVLconst [x]) [y]) && int16(x)==int16(y) -> (FlagEQ)
  1126  (CMPWconst (MOVLconst [x]) [y]) && int16(x)<int16(y) && uint16(x)<uint16(y) -> (FlagLT_ULT)
  1127  (CMPWconst (MOVLconst [x]) [y]) && int16(x)<int16(y) && uint16(x)>uint16(y) -> (FlagLT_UGT)
  1128  (CMPWconst (MOVLconst [x]) [y]) && int16(x)>int16(y) && uint16(x)<uint16(y) -> (FlagGT_ULT)
  1129  (CMPWconst (MOVLconst [x]) [y]) && int16(x)>int16(y) && uint16(x)>uint16(y) -> (FlagGT_UGT)
  1130  (CMPBconst (MOVLconst [x]) [y]) && int8(x)==int8(y) -> (FlagEQ)
  1131  (CMPBconst (MOVLconst [x]) [y]) && int8(x)<int8(y) && uint8(x)<uint8(y) -> (FlagLT_ULT)
  1132  (CMPBconst (MOVLconst [x]) [y]) && int8(x)<int8(y) && uint8(x)>uint8(y) -> (FlagLT_UGT)
  1133  (CMPBconst (MOVLconst [x]) [y]) && int8(x)>int8(y) && uint8(x)<uint8(y) -> (FlagGT_ULT)
  1134  (CMPBconst (MOVLconst [x]) [y]) && int8(x)>int8(y) && uint8(x)>uint8(y) -> (FlagGT_UGT)
  1135  
  1136  // Other known comparisons.
  1137  (CMPQconst (MOVBQZX _) [c]) && 0xFF < c -> (FlagLT_ULT)
  1138  (CMPQconst (MOVWQZX _) [c]) && 0xFFFF < c -> (FlagLT_ULT)
  1139  (CMPQconst (MOVLQZX _) [c]) && 0xFFFFFFFF < c -> (FlagLT_ULT)
  1140  (CMPLconst (SHRLconst _ [c]) [n]) && 0 <= n && 0 < c && c <= 32 && (1<<uint64(32-c)) <= uint64(n) -> (FlagLT_ULT)
  1141  (CMPQconst (SHRQconst _ [c]) [n]) && 0 <= n && 0 < c && c <= 64 && (1<<uint64(64-c)) <= uint64(n) -> (FlagLT_ULT)
  1142  (CMPQconst (ANDQconst _ [m]) [n]) && 0 <= m && m < n -> (FlagLT_ULT)
  1143  (CMPQconst (ANDLconst _ [m]) [n]) && 0 <= m && m < n -> (FlagLT_ULT)
  1144  (CMPLconst (ANDLconst _ [m]) [n]) && 0 <= int32(m) && int32(m) < int32(n) -> (FlagLT_ULT)
  1145  (CMPWconst (ANDLconst _ [m]) [n]) && 0 <= int16(m) && int16(m) < int16(n) -> (FlagLT_ULT)
  1146  (CMPBconst (ANDLconst _ [m]) [n]) && 0 <= int8(m) && int8(m) < int8(n) -> (FlagLT_ULT)
  1147  
  1148  // TODO: DIVxU also.
  1149  
  1150  // Absorb flag constants into SBB ops.
  1151  (SBBQcarrymask (FlagEQ)) -> (MOVQconst [0])
  1152  (SBBQcarrymask (FlagLT_ULT)) -> (MOVQconst [-1])
  1153  (SBBQcarrymask (FlagLT_UGT)) -> (MOVQconst [0])
  1154  (SBBQcarrymask (FlagGT_ULT)) -> (MOVQconst [-1])
  1155  (SBBQcarrymask (FlagGT_UGT)) -> (MOVQconst [0])
  1156  (SBBLcarrymask (FlagEQ)) -> (MOVLconst [0])
  1157  (SBBLcarrymask (FlagLT_ULT)) -> (MOVLconst [-1])
  1158  (SBBLcarrymask (FlagLT_UGT)) -> (MOVLconst [0])
  1159  (SBBLcarrymask (FlagGT_ULT)) -> (MOVLconst [-1])
  1160  (SBBLcarrymask (FlagGT_UGT)) -> (MOVLconst [0])
  1161  
  1162  // Absorb flag constants into branches.
  1163  ((EQ|LE|GE|ULE|UGE) (FlagEQ) yes no) -> (First nil yes no)
  1164  ((NE|LT|GT|ULT|UGT) (FlagEQ) yes no) -> (First nil no yes)
  1165  ((NE|LT|LE|ULT|ULE) (FlagLT_ULT) yes no) -> (First nil yes no)
  1166  ((EQ|GT|GE|UGT|UGE) (FlagLT_ULT) yes no) -> (First nil no yes)
  1167  ((NE|LT|LE|UGT|UGE) (FlagLT_UGT) yes no) -> (First nil yes no)
  1168  ((EQ|GT|GE|ULT|ULE) (FlagLT_UGT) yes no) -> (First nil no yes)
  1169  ((NE|GT|GE|ULT|ULE) (FlagGT_ULT) yes no) -> (First nil yes no)
  1170  ((EQ|LT|LE|UGT|UGE) (FlagGT_ULT) yes no) -> (First nil no yes)
  1171  ((NE|GT|GE|UGT|UGE) (FlagGT_UGT) yes no) -> (First nil yes no)
  1172  ((EQ|LT|LE|ULT|ULE) (FlagGT_UGT) yes no) -> (First nil no yes)
  1173  
  1174  // Absorb flag constants into SETxx ops.
  1175  ((SETEQ|SETLE|SETGE|SETBE|SETAE) (FlagEQ))     -> (MOVLconst [1])
  1176  ((SETNE|SETL|SETG|SETB|SETA)     (FlagEQ))     -> (MOVLconst [0])
  1177  ((SETNE|SETL|SETLE|SETB|SETBE)   (FlagLT_ULT)) -> (MOVLconst [1])
  1178  ((SETEQ|SETG|SETGE|SETA|SETAE)   (FlagLT_ULT)) -> (MOVLconst [0])
  1179  ((SETNE|SETL|SETLE|SETA|SETAE)   (FlagLT_UGT)) -> (MOVLconst [1])
  1180  ((SETEQ|SETG|SETGE|SETB|SETBE)   (FlagLT_UGT)) -> (MOVLconst [0])
  1181  ((SETNE|SETG|SETGE|SETB|SETBE)   (FlagGT_ULT)) -> (MOVLconst [1])
  1182  ((SETEQ|SETL|SETLE|SETA|SETAE)   (FlagGT_ULT)) -> (MOVLconst [0])
  1183  ((SETNE|SETG|SETGE|SETA|SETAE)   (FlagGT_UGT)) -> (MOVLconst [1])
  1184  ((SETEQ|SETL|SETLE|SETB|SETBE)   (FlagGT_UGT)) -> (MOVLconst [0])
  1185  
  1186  (SETEQmem [off] {sym} ptr x:(FlagEQ)     mem) -> (MOVBstore [off] {sym} ptr (MOVLconst <x.Type> [1]) mem)
  1187  (SETEQmem [off] {sym} ptr x:(FlagLT_ULT) mem) -> (MOVBstore [off] {sym} ptr (MOVLconst <x.Type> [0]) mem)
  1188  (SETEQmem [off] {sym} ptr x:(FlagLT_UGT) mem) -> (MOVBstore [off] {sym} ptr (MOVLconst <x.Type> [0]) mem)
  1189  (SETEQmem [off] {sym} ptr x:(FlagGT_ULT) mem) -> (MOVBstore [off] {sym} ptr (MOVLconst <x.Type> [0]) mem)
  1190  (SETEQmem [off] {sym} ptr x:(FlagGT_UGT) mem) -> (MOVBstore [off] {sym} ptr (MOVLconst <x.Type> [0]) mem)
  1191  
  1192  (SETNEmem [off] {sym} ptr x:(FlagEQ)     mem) -> (MOVBstore [off] {sym} ptr (MOVLconst <x.Type> [0]) mem)
  1193  (SETNEmem [off] {sym} ptr x:(FlagLT_ULT) mem) -> (MOVBstore [off] {sym} ptr (MOVLconst <x.Type> [1]) mem)
  1194  (SETNEmem [off] {sym} ptr x:(FlagLT_UGT) mem) -> (MOVBstore [off] {sym} ptr (MOVLconst <x.Type> [1]) mem)
  1195  (SETNEmem [off] {sym} ptr x:(FlagGT_ULT) mem) -> (MOVBstore [off] {sym} ptr (MOVLconst <x.Type> [1]) mem)
  1196  (SETNEmem [off] {sym} ptr x:(FlagGT_UGT) mem) -> (MOVBstore [off] {sym} ptr (MOVLconst <x.Type> [1]) mem)
  1197  
  1198  (SETLmem  [off] {sym} ptr x:(FlagEQ)     mem) -> (MOVBstore [off] {sym} ptr (MOVLconst <x.Type> [0]) mem)
  1199  (SETLmem  [off] {sym} ptr x:(FlagLT_ULT) mem) -> (MOVBstore [off] {sym} ptr (MOVLconst <x.Type> [1]) mem)
  1200  (SETLmem  [off] {sym} ptr x:(FlagLT_UGT) mem) -> (MOVBstore [off] {sym} ptr (MOVLconst <x.Type> [1]) mem)
  1201  (SETLmem  [off] {sym} ptr x:(FlagGT_ULT) mem) -> (MOVBstore [off] {sym} ptr (MOVLconst <x.Type> [0]) mem)
  1202  (SETLmem  [off] {sym} ptr x:(FlagGT_UGT) mem) -> (MOVBstore [off] {sym} ptr (MOVLconst <x.Type> [0]) mem)
  1203  
  1204  (SETLEmem [off] {sym} ptr x:(FlagEQ)     mem) -> (MOVBstore [off] {sym} ptr (MOVLconst <x.Type> [1]) mem)
  1205  (SETLEmem [off] {sym} ptr x:(FlagLT_ULT) mem) -> (MOVBstore [off] {sym} ptr (MOVLconst <x.Type> [1]) mem)
  1206  (SETLEmem [off] {sym} ptr x:(FlagLT_UGT) mem) -> (MOVBstore [off] {sym} ptr (MOVLconst <x.Type> [1]) mem)
  1207  (SETLEmem [off] {sym} ptr x:(FlagGT_ULT) mem) -> (MOVBstore [off] {sym} ptr (MOVLconst <x.Type> [0]) mem)
  1208  (SETLEmem [off] {sym} ptr x:(FlagGT_UGT) mem) -> (MOVBstore [off] {sym} ptr (MOVLconst <x.Type> [0]) mem)
  1209  
  1210  (SETGmem  [off] {sym} ptr x:(FlagEQ)     mem) -> (MOVBstore [off] {sym} ptr (MOVLconst <x.Type> [0]) mem)
  1211  (SETGmem  [off] {sym} ptr x:(FlagLT_ULT) mem) -> (MOVBstore [off] {sym} ptr (MOVLconst <x.Type> [0]) mem)
  1212  (SETGmem  [off] {sym} ptr x:(FlagLT_UGT) mem) -> (MOVBstore [off] {sym} ptr (MOVLconst <x.Type> [0]) mem)
  1213  (SETGmem  [off] {sym} ptr x:(FlagGT_ULT) mem) -> (MOVBstore [off] {sym} ptr (MOVLconst <x.Type> [1]) mem)
  1214  (SETGmem  [off] {sym} ptr x:(FlagGT_UGT) mem) -> (MOVBstore [off] {sym} ptr (MOVLconst <x.Type> [1]) mem)
  1215  
  1216  (SETGEmem [off] {sym} ptr x:(FlagEQ)     mem) -> (MOVBstore [off] {sym} ptr (MOVLconst <x.Type> [1]) mem)
  1217  (SETGEmem [off] {sym} ptr x:(FlagLT_ULT) mem) -> (MOVBstore [off] {sym} ptr (MOVLconst <x.Type> [0]) mem)
  1218  (SETGEmem [off] {sym} ptr x:(FlagLT_UGT) mem) -> (MOVBstore [off] {sym} ptr (MOVLconst <x.Type> [0]) mem)
  1219  (SETGEmem [off] {sym} ptr x:(FlagGT_ULT) mem) -> (MOVBstore [off] {sym} ptr (MOVLconst <x.Type> [1]) mem)
  1220  (SETGEmem [off] {sym} ptr x:(FlagGT_UGT) mem) -> (MOVBstore [off] {sym} ptr (MOVLconst <x.Type> [1]) mem)
  1221  
  1222  (SETBmem  [off] {sym} ptr x:(FlagEQ)     mem) -> (MOVBstore [off] {sym} ptr (MOVLconst <x.Type> [0]) mem)
  1223  (SETBmem  [off] {sym} ptr x:(FlagLT_ULT) mem) -> (MOVBstore [off] {sym} ptr (MOVLconst <x.Type> [1]) mem)
  1224  (SETBmem  [off] {sym} ptr x:(FlagLT_UGT) mem) -> (MOVBstore [off] {sym} ptr (MOVLconst <x.Type> [0]) mem)
  1225  (SETBmem  [off] {sym} ptr x:(FlagGT_ULT) mem) -> (MOVBstore [off] {sym} ptr (MOVLconst <x.Type> [1]) mem)
  1226  (SETBmem  [off] {sym} ptr x:(FlagGT_UGT) mem) -> (MOVBstore [off] {sym} ptr (MOVLconst <x.Type> [0]) mem)
  1227  
  1228  (SETBEmem [off] {sym} ptr x:(FlagEQ)     mem) -> (MOVBstore [off] {sym} ptr (MOVLconst <x.Type> [1]) mem)
  1229  (SETBEmem [off] {sym} ptr x:(FlagLT_ULT) mem) -> (MOVBstore [off] {sym} ptr (MOVLconst <x.Type> [1]) mem)
  1230  (SETBEmem [off] {sym} ptr x:(FlagLT_UGT) mem) -> (MOVBstore [off] {sym} ptr (MOVLconst <x.Type> [0]) mem)
  1231  (SETBEmem [off] {sym} ptr x:(FlagGT_ULT) mem) -> (MOVBstore [off] {sym} ptr (MOVLconst <x.Type> [1]) mem)
  1232  (SETBEmem [off] {sym} ptr x:(FlagGT_UGT) mem) -> (MOVBstore [off] {sym} ptr (MOVLconst <x.Type> [0]) mem)
  1233  
  1234  (SETAmem  [off] {sym} ptr x:(FlagEQ)     mem) -> (MOVBstore [off] {sym} ptr (MOVLconst <x.Type> [0]) mem)
  1235  (SETAmem  [off] {sym} ptr x:(FlagLT_ULT) mem) -> (MOVBstore [off] {sym} ptr (MOVLconst <x.Type> [0]) mem)
  1236  (SETAmem  [off] {sym} ptr x:(FlagLT_UGT) mem) -> (MOVBstore [off] {sym} ptr (MOVLconst <x.Type> [1]) mem)
  1237  (SETAmem  [off] {sym} ptr x:(FlagGT_ULT) mem) -> (MOVBstore [off] {sym} ptr (MOVLconst <x.Type> [0]) mem)
  1238  (SETAmem  [off] {sym} ptr x:(FlagGT_UGT) mem) -> (MOVBstore [off] {sym} ptr (MOVLconst <x.Type> [1]) mem)
  1239  
  1240  (SETAEmem [off] {sym} ptr x:(FlagEQ)     mem) -> (MOVBstore [off] {sym} ptr (MOVLconst <x.Type> [1]) mem)
  1241  (SETAEmem [off] {sym} ptr x:(FlagLT_ULT) mem) -> (MOVBstore [off] {sym} ptr (MOVLconst <x.Type> [0]) mem)
  1242  (SETAEmem [off] {sym} ptr x:(FlagLT_UGT) mem) -> (MOVBstore [off] {sym} ptr (MOVLconst <x.Type> [1]) mem)
  1243  (SETAEmem [off] {sym} ptr x:(FlagGT_ULT) mem) -> (MOVBstore [off] {sym} ptr (MOVLconst <x.Type> [0]) mem)
  1244  (SETAEmem [off] {sym} ptr x:(FlagGT_UGT) mem) -> (MOVBstore [off] {sym} ptr (MOVLconst <x.Type> [1]) mem)
  1245  
  1246  // Remove redundant *const ops
  1247  (ADDQconst [0] x) -> x
  1248  (ADDLconst [c] x) && int32(c)==0 -> x
  1249  (SUBQconst [0] x) -> x
  1250  (SUBLconst [c] x) && int32(c) == 0 -> x
  1251  (ANDQconst [0] _)                 -> (MOVQconst [0])
  1252  (ANDLconst [c] _) && int32(c)==0  -> (MOVLconst [0])
  1253  (ANDQconst [-1] x)                -> x
  1254  (ANDLconst [c] x) && int32(c)==-1 -> x
  1255  (ORQconst [0] x)                  -> x
  1256  (ORLconst [c] x) && int32(c)==0   -> x
  1257  (ORQconst [-1] _)                 -> (MOVQconst [-1])
  1258  (ORLconst [c] _) && int32(c)==-1  -> (MOVLconst [-1])
  1259  (XORQconst [0] x)                  -> x
  1260  (XORLconst [c] x) && int32(c)==0   -> x
  1261  // TODO: since we got rid of the W/B versions, we might miss
  1262  // things like (ANDLconst [0x100] x) which were formerly
  1263  // (ANDBconst [0] x).  Probably doesn't happen very often.
  1264  // If we cared, we might do:
  1265  //  (ANDLconst <t> [c] x) && t.Size()==1 && int8(x)==0 -> (MOVLconst [0])
  1266  
  1267  // Convert constant subtracts to constant adds
  1268  (SUBQconst [c] x) && c != -(1<<31) -> (ADDQconst [-c] x)
  1269  (SUBLconst [c] x) -> (ADDLconst [int64(int32(-c))] x)
  1270  
  1271  // generic constant folding
  1272  // TODO: more of this
  1273  (ADDQconst [c] (MOVQconst [d])) -> (MOVQconst [c+d])
  1274  (ADDLconst [c] (MOVLconst [d])) -> (MOVLconst [int64(int32(c+d))])
  1275  (ADDQconst [c] (ADDQconst [d] x)) && is32Bit(c+d) -> (ADDQconst [c+d] x)
  1276  (ADDLconst [c] (ADDLconst [d] x)) -> (ADDLconst [int64(int32(c+d))] x)
  1277  (SUBQconst (MOVQconst [d]) [c]) -> (MOVQconst [d-c])
  1278  (SUBQconst (SUBQconst x [d]) [c]) && is32Bit(-c-d) -> (ADDQconst [-c-d] x)
  1279  (SARQconst [c] (MOVQconst [d])) -> (MOVQconst [d>>uint64(c)])
  1280  (SARLconst [c] (MOVQconst [d])) -> (MOVQconst [int64(int32(d))>>uint64(c)])
  1281  (SARWconst [c] (MOVQconst [d])) -> (MOVQconst [int64(int16(d))>>uint64(c)])
  1282  (SARBconst [c] (MOVQconst [d])) -> (MOVQconst [int64(int8(d))>>uint64(c)])
  1283  (NEGQ (MOVQconst [c])) -> (MOVQconst [-c])
  1284  (NEGL (MOVLconst [c])) -> (MOVLconst [int64(int32(-c))])
  1285  (MULQconst [c] (MOVQconst [d])) -> (MOVQconst [c*d])
  1286  (MULLconst [c] (MOVLconst [d])) -> (MOVLconst [int64(int32(c*d))])
  1287  (ANDQconst [c] (MOVQconst [d])) -> (MOVQconst [c&d])
  1288  (ANDLconst [c] (MOVLconst [d])) -> (MOVLconst [c&d])
  1289  (ORQconst [c] (MOVQconst [d])) -> (MOVQconst [c|d])
  1290  (ORLconst [c] (MOVLconst [d])) -> (MOVLconst [c|d])
  1291  (XORQconst [c] (MOVQconst [d])) -> (MOVQconst [c^d])
  1292  (XORLconst [c] (MOVLconst [d])) -> (MOVLconst [c^d])
  1293  (NOTQ (MOVQconst [c])) -> (MOVQconst [^c])
  1294  (NOTL (MOVLconst [c])) -> (MOVLconst [^c])
  1295  
  1296  // generic simplifications
  1297  // TODO: more of this
  1298  (ADDQ x (NEGQ y)) -> (SUBQ x y)
  1299  (ADDL x (NEGL y)) -> (SUBL x y)
  1300  (SUBQ x x) -> (MOVQconst [0])
  1301  (SUBL x x) -> (MOVLconst [0])
  1302  (ANDQ x x) -> x
  1303  (ANDL x x) -> x
  1304  (ORQ x x) -> x
  1305  (ORL x x) -> x
  1306  (XORQ x x) -> (MOVQconst [0])
  1307  (XORL x x) -> (MOVLconst [0])
  1308  (NEGQ (ADDQconst [c] (NEGQ x))) && c != -(1<<31) -> (ADDQconst [-c] x)
  1309  
  1310  // checking AND against 0.
  1311  (CMPQconst (ANDQ x y) [0]) -> (TESTQ x y)
  1312  (CMPLconst (ANDL x y) [0]) -> (TESTL x y)
  1313  (CMPWconst (ANDL x y) [0]) -> (TESTW x y)
  1314  (CMPBconst (ANDL x y) [0]) -> (TESTB x y)
  1315  (CMPQconst (ANDQconst [c] x) [0]) -> (TESTQconst [c] x)
  1316  (CMPLconst (ANDLconst [c] x) [0]) -> (TESTLconst [c] x)
  1317  (CMPWconst (ANDLconst [c] x) [0]) -> (TESTWconst [int64(int16(c))] x)
  1318  (CMPBconst (ANDLconst [c] x) [0]) -> (TESTBconst [int64(int8(c))] x)
  1319  
  1320  // Convert TESTx to TESTxconst if possible.
  1321  (TESTQ (MOVQconst [c]) x) && is32Bit(c) -> (TESTQconst [c] x)
  1322  (TESTL (MOVLconst [c]) x) -> (TESTLconst [c] x)
  1323  (TESTW (MOVLconst [c]) x) -> (TESTWconst [c] x)
  1324  (TESTB (MOVLconst [c]) x) -> (TESTBconst [c] x)
  1325  
  1326  // TEST %reg,%reg is shorter than CMP
  1327  (CMPQconst x [0]) -> (TESTQ x x)
  1328  (CMPLconst x [0]) -> (TESTL x x)
  1329  (CMPWconst x [0]) -> (TESTW x x)
  1330  (CMPBconst x [0]) -> (TESTB x x)
  1331  
  1332  // Combining byte loads into larger (unaligned) loads.
  1333  // There are many ways these combinations could occur.  This is
  1334  // designed to match the way encoding/binary.LittleEndian does it.
  1335  
  1336  // Little-endian loads
  1337  
  1338  (ORL                  x0:(MOVBload [i0] {s} p mem)
  1339      sh:(SHLLconst [8] x1:(MOVBload [i1] {s} p mem)))
  1340    && i1 == i0+1
  1341    && x0.Uses == 1
  1342    && x1.Uses == 1
  1343    && sh.Uses == 1
  1344    && mergePoint(b,x0,x1) != nil
  1345    && clobber(x0)
  1346    && clobber(x1)
  1347    && clobber(sh)
  1348    -> @mergePoint(b,x0,x1) (MOVWload [i0] {s} p mem)
  1349  
  1350  (ORQ                  x0:(MOVBload [i0] {s} p mem)
  1351      sh:(SHLQconst [8] x1:(MOVBload [i1] {s} p mem)))
  1352    && i1 == i0+1
  1353    && x0.Uses == 1
  1354    && x1.Uses == 1
  1355    && sh.Uses == 1
  1356    && mergePoint(b,x0,x1) != nil
  1357    && clobber(x0)
  1358    && clobber(x1)
  1359    && clobber(sh)
  1360    -> @mergePoint(b,x0,x1) (MOVWload [i0] {s} p mem)
  1361  
  1362  (ORL                   x0:(MOVWload [i0] {s} p mem)
  1363      sh:(SHLLconst [16] x1:(MOVWload [i1] {s} p mem)))
  1364    && i1 == i0+2
  1365    && x0.Uses == 1
  1366    && x1.Uses == 1
  1367    && sh.Uses == 1
  1368    && mergePoint(b,x0,x1) != nil
  1369    && clobber(x0)
  1370    && clobber(x1)
  1371    && clobber(sh)
  1372    -> @mergePoint(b,x0,x1) (MOVLload [i0] {s} p mem)
  1373  
  1374  (ORQ                   x0:(MOVWload [i0] {s} p mem)
  1375      sh:(SHLQconst [16] x1:(MOVWload [i1] {s} p mem)))
  1376    && i1 == i0+2
  1377    && x0.Uses == 1
  1378    && x1.Uses == 1
  1379    && sh.Uses == 1
  1380    && mergePoint(b,x0,x1) != nil
  1381    && clobber(x0)
  1382    && clobber(x1)
  1383    && clobber(sh)
  1384    -> @mergePoint(b,x0,x1) (MOVLload [i0] {s} p mem)
  1385  
  1386  (ORQ                   x0:(MOVLload [i0] {s} p mem)
  1387      sh:(SHLQconst [32] x1:(MOVLload [i1] {s} p mem)))
  1388    && i1 == i0+4
  1389    && x0.Uses == 1
  1390    && x1.Uses == 1
  1391    && sh.Uses == 1
  1392    && mergePoint(b,x0,x1) != nil
  1393    && clobber(x0)
  1394    && clobber(x1)
  1395    && clobber(sh)
  1396    -> @mergePoint(b,x0,x1) (MOVQload [i0] {s} p mem)
  1397  
  1398  (ORL
  1399      s1:(SHLLconst [j1] x1:(MOVBload [i1] {s} p mem))
  1400      or:(ORL
  1401          s0:(SHLLconst [j0] x0:(MOVBload [i0] {s} p mem))
  1402  	y))
  1403    && i1 == i0+1
  1404    && j1 == j0+8
  1405    && j0 % 16 == 0
  1406    && x0.Uses == 1
  1407    && x1.Uses == 1
  1408    && s0.Uses == 1
  1409    && s1.Uses == 1
  1410    && or.Uses == 1
  1411    && mergePoint(b,x0,x1) != nil
  1412    && clobber(x0)
  1413    && clobber(x1)
  1414    && clobber(s0)
  1415    && clobber(s1)
  1416    && clobber(or)
  1417    -> @mergePoint(b,x0,x1) (ORL <v.Type> (SHLLconst <v.Type> [j0] (MOVWload [i0] {s} p mem)) y)
  1418  
  1419  (ORQ
  1420      s1:(SHLQconst [j1] x1:(MOVBload [i1] {s} p mem))
  1421      or:(ORQ
  1422          s0:(SHLQconst [j0] x0:(MOVBload [i0] {s} p mem))
  1423  	y))
  1424    && i1 == i0+1
  1425    && j1 == j0+8
  1426    && j0 % 16 == 0
  1427    && x0.Uses == 1
  1428    && x1.Uses == 1
  1429    && s0.Uses == 1
  1430    && s1.Uses == 1
  1431    && or.Uses == 1
  1432    && mergePoint(b,x0,x1) != nil
  1433    && clobber(x0)
  1434    && clobber(x1)
  1435    && clobber(s0)
  1436    && clobber(s1)
  1437    && clobber(or)
  1438    -> @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j0] (MOVWload [i0] {s} p mem)) y)
  1439  
  1440  (ORQ
  1441      s1:(SHLQconst [j1] x1:(MOVWload [i1] {s} p mem))
  1442      or:(ORQ
  1443          s0:(SHLQconst [j0] x0:(MOVWload [i0] {s} p mem))
  1444  	y))
  1445    && i1 == i0+2
  1446    && j1 == j0+16
  1447    && j0 % 32 == 0
  1448    && x0.Uses == 1
  1449    && x1.Uses == 1
  1450    && s0.Uses == 1
  1451    && s1.Uses == 1
  1452    && or.Uses == 1
  1453    && mergePoint(b,x0,x1) != nil
  1454    && clobber(x0)
  1455    && clobber(x1)
  1456    && clobber(s0)
  1457    && clobber(s1)
  1458    && clobber(or)
  1459    -> @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j0] (MOVLload [i0] {s} p mem)) y)
  1460  
  1461  // Little-endian indexed loads
  1462  
  1463  (ORL                  x0:(MOVBloadidx1 [i0] {s} p idx mem)
  1464      sh:(SHLLconst [8] x1:(MOVBloadidx1 [i1] {s} p idx mem)))
  1465    && i1 == i0+1
  1466    && x0.Uses == 1
  1467    && x1.Uses == 1
  1468    && sh.Uses == 1
  1469    && mergePoint(b,x0,x1) != nil
  1470    && clobber(x0)
  1471    && clobber(x1)
  1472    && clobber(sh)
  1473    -> @mergePoint(b,x0,x1) (MOVWloadidx1 <v.Type> [i0] {s} p idx mem)
  1474  
  1475  (ORQ                  x0:(MOVBloadidx1 [i0] {s} p idx mem)
  1476      sh:(SHLQconst [8] x1:(MOVBloadidx1 [i1] {s} p idx mem)))
  1477    && i1 == i0+1
  1478    && x0.Uses == 1
  1479    && x1.Uses == 1
  1480    && sh.Uses == 1
  1481    && mergePoint(b,x0,x1) != nil
  1482    && clobber(x0)
  1483    && clobber(x1)
  1484    && clobber(sh)
  1485    -> @mergePoint(b,x0,x1) (MOVWloadidx1 <v.Type> [i0] {s} p idx mem)
  1486  
  1487  (ORL                   x0:(MOVWloadidx1 [i0] {s} p idx mem)
  1488      sh:(SHLLconst [16] x1:(MOVWloadidx1 [i1] {s} p idx mem)))
  1489    && i1 == i0+2
  1490    && x0.Uses == 1
  1491    && x1.Uses == 1
  1492    && sh.Uses == 1
  1493    && mergePoint(b,x0,x1) != nil
  1494    && clobber(x0)
  1495    && clobber(x1)
  1496    && clobber(sh)
  1497    -> @mergePoint(b,x0,x1) (MOVLloadidx1 [i0] {s} p idx mem)
  1498  
  1499  (ORQ                   x0:(MOVWloadidx1 [i0] {s} p idx mem)
  1500      sh:(SHLQconst [16] x1:(MOVWloadidx1 [i1] {s} p idx mem)))
  1501    && i1 == i0+2
  1502    && x0.Uses == 1
  1503    && x1.Uses == 1
  1504    && sh.Uses == 1
  1505    && mergePoint(b,x0,x1) != nil
  1506    && clobber(x0)
  1507    && clobber(x1)
  1508    && clobber(sh)
  1509    -> @mergePoint(b,x0,x1) (MOVLloadidx1 [i0] {s} p idx mem)
  1510  
  1511  (ORQ                   x0:(MOVLloadidx1 [i0] {s} p idx mem)
  1512      sh:(SHLQconst [32] x1:(MOVLloadidx1 [i1] {s} p idx mem)))
  1513    && i1 == i0+4
  1514    && x0.Uses == 1
  1515    && x1.Uses == 1
  1516    && sh.Uses == 1
  1517    && mergePoint(b,x0,x1) != nil
  1518    && clobber(x0)
  1519    && clobber(x1)
  1520    && clobber(sh)
  1521    -> @mergePoint(b,x0,x1) (MOVQloadidx1 [i0] {s} p idx mem)
  1522  
  1523  (ORL
  1524      s1:(SHLLconst [j1] x1:(MOVBloadidx1 [i1] {s} p idx mem))
  1525      or:(ORL
  1526          s0:(SHLLconst [j0] x0:(MOVBloadidx1 [i0] {s} p idx mem))
  1527  	y))
  1528    && i1 == i0+1
  1529    && j1 == j0+8
  1530    && j0 % 16 == 0
  1531    && x0.Uses == 1
  1532    && x1.Uses == 1
  1533    && s0.Uses == 1
  1534    && s1.Uses == 1
  1535    && or.Uses == 1
  1536    && mergePoint(b,x0,x1) != nil
  1537    && clobber(x0)
  1538    && clobber(x1)
  1539    && clobber(s0)
  1540    && clobber(s1)
  1541    && clobber(or)
  1542    -> @mergePoint(b,x0,x1) (ORL <v.Type> (SHLLconst <v.Type> [j0] (MOVWloadidx1 [i0] {s} p idx mem)) y)
  1543  
  1544  (ORQ
  1545      s1:(SHLQconst [j1] x1:(MOVBloadidx1 [i1] {s} p idx mem))
  1546      or:(ORQ
  1547          s0:(SHLQconst [j0] x0:(MOVBloadidx1 [i0] {s} p idx mem))
  1548  	y))
  1549    && i1 == i0+1
  1550    && j1 == j0+8
  1551    && j0 % 16 == 0
  1552    && x0.Uses == 1
  1553    && x1.Uses == 1
  1554    && s0.Uses == 1
  1555    && s1.Uses == 1
  1556    && or.Uses == 1
  1557    && mergePoint(b,x0,x1) != nil
  1558    && clobber(x0)
  1559    && clobber(x1)
  1560    && clobber(s0)
  1561    && clobber(s1)
  1562    && clobber(or)
  1563    -> @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j0] (MOVWloadidx1 [i0] {s} p idx mem)) y)
  1564  
  1565  (ORQ
  1566      s1:(SHLQconst [j1] x1:(MOVWloadidx1 [i1] {s} p idx mem))
  1567      or:(ORQ
  1568          s0:(SHLQconst [j0] x0:(MOVWloadidx1 [i0] {s} p idx mem))
  1569  	y))
  1570    && i1 == i0+2
  1571    && j1 == j0+16
  1572    && j0 % 32 == 0
  1573    && x0.Uses == 1
  1574    && x1.Uses == 1
  1575    && s0.Uses == 1
  1576    && s1.Uses == 1
  1577    && or.Uses == 1
  1578    && mergePoint(b,x0,x1) != nil
  1579    && clobber(x0)
  1580    && clobber(x1)
  1581    && clobber(s0)
  1582    && clobber(s1)
  1583    && clobber(or)
  1584    -> @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j0] (MOVLloadidx1 [i0] {s} p idx mem)) y)
  1585  
  1586  // Big-endian loads
  1587  
  1588  (ORL
  1589                         x1:(MOVBload [i1] {s} p mem)
  1590      sh:(SHLLconst [8]  x0:(MOVBload [i0] {s} p mem)))
  1591    && i1 == i0+1
  1592    && x0.Uses == 1
  1593    && x1.Uses == 1
  1594    && sh.Uses == 1
  1595    && mergePoint(b,x0,x1) != nil
  1596    && clobber(x0)
  1597    && clobber(x1)
  1598    && clobber(sh)
  1599    -> @mergePoint(b,x0,x1) (ROLWconst <v.Type> [8] (MOVWload [i0] {s} p mem))
  1600  
  1601  (ORQ
  1602                         x1:(MOVBload [i1] {s} p mem)
  1603      sh:(SHLQconst [8]  x0:(MOVBload [i0] {s} p mem)))
  1604    && i1 == i0+1
  1605    && x0.Uses == 1
  1606    && x1.Uses == 1
  1607    && sh.Uses == 1
  1608    && mergePoint(b,x0,x1) != nil
  1609    && clobber(x0)
  1610    && clobber(x1)
  1611    && clobber(sh)
  1612    -> @mergePoint(b,x0,x1) (ROLWconst <v.Type> [8] (MOVWload [i0] {s} p mem))
  1613  
  1614  (ORL
  1615                          r1:(ROLWconst [8] x1:(MOVWload [i1] {s} p mem))
  1616      sh:(SHLLconst [16]  r0:(ROLWconst [8] x0:(MOVWload [i0] {s} p mem))))
  1617    && i1 == i0+2
  1618    && x0.Uses == 1
  1619    && x1.Uses == 1
  1620    && r0.Uses == 1
  1621    && r1.Uses == 1
  1622    && sh.Uses == 1
  1623    && mergePoint(b,x0,x1) != nil
  1624    && clobber(x0)
  1625    && clobber(x1)
  1626    && clobber(r0)
  1627    && clobber(r1)
  1628    && clobber(sh)
  1629    -> @mergePoint(b,x0,x1) (BSWAPL <v.Type> (MOVLload [i0] {s} p mem))
  1630  
  1631  (ORQ
  1632                          r1:(ROLWconst [8] x1:(MOVWload [i1] {s} p mem))
  1633      sh:(SHLQconst [16]  r0:(ROLWconst [8] x0:(MOVWload [i0] {s} p mem))))
  1634    && i1 == i0+2
  1635    && x0.Uses == 1
  1636    && x1.Uses == 1
  1637    && r0.Uses == 1
  1638    && r1.Uses == 1
  1639    && sh.Uses == 1
  1640    && mergePoint(b,x0,x1) != nil
  1641    && clobber(x0)
  1642    && clobber(x1)
  1643    && clobber(r0)
  1644    && clobber(r1)
  1645    && clobber(sh)
  1646    -> @mergePoint(b,x0,x1) (BSWAPL <v.Type> (MOVLload [i0] {s} p mem))
  1647  
  1648  (ORQ
  1649                          r1:(BSWAPL x1:(MOVLload [i1] {s} p mem))
  1650      sh:(SHLQconst [32]  r0:(BSWAPL x0:(MOVLload [i0] {s} p mem))))
  1651    && i1 == i0+4
  1652    && x0.Uses == 1
  1653    && x1.Uses == 1
  1654    && r0.Uses == 1
  1655    && r1.Uses == 1
  1656    && sh.Uses == 1
  1657    && mergePoint(b,x0,x1) != nil
  1658    && clobber(x0)
  1659    && clobber(x1)
  1660    && clobber(r0)
  1661    && clobber(r1)
  1662    && clobber(sh)
  1663    -> @mergePoint(b,x0,x1) (BSWAPQ <v.Type> (MOVQload [i0] {s} p mem))
  1664  
  1665  (ORL
  1666      s0:(SHLLconst [j0] x0:(MOVBload [i0] {s} p mem))
  1667      or:(ORL
  1668          s1:(SHLLconst [j1] x1:(MOVBload [i1] {s} p mem))
  1669  	y))
  1670    && i1 == i0+1
  1671    && j1 == j0-8
  1672    && j1 % 16 == 0
  1673    && x0.Uses == 1
  1674    && x1.Uses == 1
  1675    && s0.Uses == 1
  1676    && s1.Uses == 1
  1677    && or.Uses == 1
  1678    && mergePoint(b,x0,x1) != nil
  1679    && clobber(x0)
  1680    && clobber(x1)
  1681    && clobber(s0)
  1682    && clobber(s1)
  1683    && clobber(or)
  1684    -> @mergePoint(b,x0,x1) (ORL <v.Type> (SHLLconst <v.Type> [j1] (ROLWconst <typ.UInt16> [8] (MOVWload [i0] {s} p mem))) y)
  1685  
  1686  (ORQ
  1687      s0:(SHLQconst [j0] x0:(MOVBload [i0] {s} p mem))
  1688      or:(ORQ
  1689          s1:(SHLQconst [j1] x1:(MOVBload [i1] {s} p mem))
  1690  	y))
  1691    && i1 == i0+1
  1692    && j1 == j0-8
  1693    && j1 % 16 == 0
  1694    && x0.Uses == 1
  1695    && x1.Uses == 1
  1696    && s0.Uses == 1
  1697    && s1.Uses == 1
  1698    && or.Uses == 1
  1699    && mergePoint(b,x0,x1) != nil
  1700    && clobber(x0)
  1701    && clobber(x1)
  1702    && clobber(s0)
  1703    && clobber(s1)
  1704    && clobber(or)
  1705    -> @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (ROLWconst <typ.UInt16> [8] (MOVWload [i0] {s} p mem))) y)
  1706  
  1707  (ORQ
  1708      s0:(SHLQconst [j0] r0:(ROLWconst [8] x0:(MOVWload [i0] {s} p mem)))
  1709      or:(ORQ
  1710          s1:(SHLQconst [j1] r1:(ROLWconst [8] x1:(MOVWload [i1] {s} p mem)))
  1711  	y))
  1712    && i1 == i0+2
  1713    && j1 == j0-16
  1714    && j1 % 32 == 0
  1715    && x0.Uses == 1
  1716    && x1.Uses == 1
  1717    && r0.Uses == 1
  1718    && r1.Uses == 1
  1719    && s0.Uses == 1
  1720    && s1.Uses == 1
  1721    && or.Uses == 1
  1722    && mergePoint(b,x0,x1) != nil
  1723    && clobber(x0)
  1724    && clobber(x1)
  1725    && clobber(r0)
  1726    && clobber(r1)
  1727    && clobber(s0)
  1728    && clobber(s1)
  1729    && clobber(or)
  1730    -> @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (BSWAPL <typ.UInt32> (MOVLload [i0] {s} p mem))) y)
  1731  
  1732  // Big-endian indexed loads
  1733  
  1734  (ORL
  1735                         x1:(MOVBloadidx1 [i1] {s} p idx mem)
  1736      sh:(SHLLconst [8]  x0:(MOVBloadidx1 [i0] {s} p idx mem)))
  1737    && i1 == i0+1
  1738    && x0.Uses == 1
  1739    && x1.Uses == 1
  1740    && sh.Uses == 1
  1741    && mergePoint(b,x0,x1) != nil
  1742    && clobber(x0)
  1743    && clobber(x1)
  1744    && clobber(sh)
  1745    -> @mergePoint(b,x0,x1) (ROLWconst <v.Type> [8] (MOVWloadidx1 [i0] {s} p idx mem))
  1746  
  1747  (ORQ
  1748                         x1:(MOVBloadidx1 [i1] {s} p idx mem)
  1749      sh:(SHLQconst [8]  x0:(MOVBloadidx1 [i0] {s} p idx mem)))
  1750    && i1 == i0+1
  1751    && x0.Uses == 1
  1752    && x1.Uses == 1
  1753    && sh.Uses == 1
  1754    && mergePoint(b,x0,x1) != nil
  1755    && clobber(x0)
  1756    && clobber(x1)
  1757    && clobber(sh)
  1758    -> @mergePoint(b,x0,x1) (ROLWconst <v.Type> [8] (MOVWloadidx1 [i0] {s} p idx mem))
  1759  
  1760  (ORL
  1761                          r1:(ROLWconst [8] x1:(MOVWloadidx1 [i1] {s} p idx mem))
  1762      sh:(SHLLconst [16]  r0:(ROLWconst [8] x0:(MOVWloadidx1 [i0] {s} p idx mem))))
  1763    && i1 == i0+2
  1764    && x0.Uses == 1
  1765    && x1.Uses == 1
  1766    && r0.Uses == 1
  1767    && r1.Uses == 1
  1768    && sh.Uses == 1
  1769    && mergePoint(b,x0,x1) != nil
  1770    && clobber(x0)
  1771    && clobber(x1)
  1772    && clobber(r0)
  1773    && clobber(r1)
  1774    && clobber(sh)
  1775    -> @mergePoint(b,x0,x1) (BSWAPL <v.Type> (MOVLloadidx1 [i0] {s} p idx mem))
  1776  
  1777  (ORQ
  1778                          r1:(ROLWconst [8] x1:(MOVWloadidx1 [i1] {s} p idx mem))
  1779      sh:(SHLQconst [16]  r0:(ROLWconst [8] x0:(MOVWloadidx1 [i0] {s} p idx mem))))
  1780    && i1 == i0+2
  1781    && x0.Uses == 1
  1782    && x1.Uses == 1
  1783    && r0.Uses == 1
  1784    && r1.Uses == 1
  1785    && sh.Uses == 1
  1786    && mergePoint(b,x0,x1) != nil
  1787    && clobber(x0)
  1788    && clobber(x1)
  1789    && clobber(r0)
  1790    && clobber(r1)
  1791    && clobber(sh)
  1792    -> @mergePoint(b,x0,x1) (BSWAPL <v.Type> (MOVLloadidx1 [i0] {s} p idx mem))
  1793  
  1794  (ORQ
  1795                          r1:(BSWAPL x1:(MOVLloadidx1 [i1] {s} p idx mem))
  1796      sh:(SHLQconst [32]  r0:(BSWAPL x0:(MOVLloadidx1 [i0] {s} p idx mem))))
  1797    && i1 == i0+4
  1798    && x0.Uses == 1
  1799    && x1.Uses == 1
  1800    && r0.Uses == 1
  1801    && r1.Uses == 1
  1802    && sh.Uses == 1
  1803    && mergePoint(b,x0,x1) != nil
  1804    && clobber(x0)
  1805    && clobber(x1)
  1806    && clobber(r0)
  1807    && clobber(r1)
  1808    && clobber(sh)
  1809    -> @mergePoint(b,x0,x1) (BSWAPQ <v.Type> (MOVQloadidx1 [i0] {s} p idx mem))
  1810  
  1811  (ORL
  1812      s0:(SHLLconst [j0] x0:(MOVBloadidx1 [i0] {s} p idx mem))
  1813      or:(ORL
  1814          s1:(SHLLconst [j1] x1:(MOVBloadidx1 [i1] {s} p idx mem))
  1815  	y))
  1816    && i1 == i0+1
  1817    && j1 == j0-8
  1818    && j1 % 16 == 0
  1819    && x0.Uses == 1
  1820    && x1.Uses == 1
  1821    && s0.Uses == 1
  1822    && s1.Uses == 1
  1823    && or.Uses == 1
  1824    && mergePoint(b,x0,x1) != nil
  1825    && clobber(x0)
  1826    && clobber(x1)
  1827    && clobber(s0)
  1828    && clobber(s1)
  1829    && clobber(or)
  1830    -> @mergePoint(b,x0,x1) (ORL <v.Type> (SHLLconst <v.Type> [j1] (ROLWconst <typ.UInt16> [8] (MOVWloadidx1 [i0] {s} p idx mem))) y)
  1831  
  1832  (ORQ
  1833      s0:(SHLQconst [j0] x0:(MOVBloadidx1 [i0] {s} p idx mem))
  1834      or:(ORQ
  1835          s1:(SHLQconst [j1] x1:(MOVBloadidx1 [i1] {s} p idx mem))
  1836  	y))
  1837    && i1 == i0+1
  1838    && j1 == j0-8
  1839    && j1 % 16 == 0
  1840    && x0.Uses == 1
  1841    && x1.Uses == 1
  1842    && s0.Uses == 1
  1843    && s1.Uses == 1
  1844    && or.Uses == 1
  1845    && mergePoint(b,x0,x1) != nil
  1846    && clobber(x0)
  1847    && clobber(x1)
  1848    && clobber(s0)
  1849    && clobber(s1)
  1850    && clobber(or)
  1851    -> @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (ROLWconst <typ.UInt16> [8] (MOVWloadidx1 [i0] {s} p idx mem))) y)
  1852  
  1853  (ORQ
  1854      s0:(SHLQconst [j0] r0:(ROLWconst [8] x0:(MOVWloadidx1 [i0] {s} p idx mem)))
  1855      or:(ORQ
  1856          s1:(SHLQconst [j1] r1:(ROLWconst [8] x1:(MOVWloadidx1 [i1] {s} p idx mem)))
  1857  	y))
  1858    && i1 == i0+2
  1859    && j1 == j0-16
  1860    && j1 % 32 == 0
  1861    && x0.Uses == 1
  1862    && x1.Uses == 1
  1863    && r0.Uses == 1
  1864    && r1.Uses == 1
  1865    && s0.Uses == 1
  1866    && s1.Uses == 1
  1867    && or.Uses == 1
  1868    && mergePoint(b,x0,x1) != nil
  1869    && clobber(x0)
  1870    && clobber(x1)
  1871    && clobber(r0)
  1872    && clobber(r1)
  1873    && clobber(s0)
  1874    && clobber(s1)
  1875    && clobber(or)
  1876    -> @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (BSWAPL <typ.UInt32> (MOVLloadidx1 [i0] {s} p idx mem))) y)
  1877  
  1878  // Combine 2 byte stores + shift into rolw 8 + word store
  1879  (MOVBstore [i] {s} p w
  1880    x0:(MOVBstore [i-1] {s} p (SHRWconst [8] w) mem))
  1881    && x0.Uses == 1
  1882    && clobber(x0)
  1883    -> (MOVWstore [i-1] {s} p (ROLWconst <w.Type> [8] w) mem)
  1884  
  1885  (MOVBstoreidx1 [i] {s} p idx w
  1886    x0:(MOVBstoreidx1 [i-1] {s} p idx (SHRWconst [8] w) mem))
  1887    && x0.Uses == 1
  1888    && clobber(x0)
  1889    -> (MOVWstoreidx1 [i-1] {s} p idx (ROLWconst <w.Type> [8] w) mem)
  1890  
  1891  // Combine stores + shifts into bswap and larger (unaligned) stores
  1892  (MOVBstore [i] {s} p w
  1893    x2:(MOVBstore [i-1] {s} p (SHRLconst [8] w)
  1894    x1:(MOVBstore [i-2] {s} p (SHRLconst [16] w)
  1895    x0:(MOVBstore [i-3] {s} p (SHRLconst [24] w) mem))))
  1896    && x0.Uses == 1
  1897    && x1.Uses == 1
  1898    && x2.Uses == 1
  1899    && clobber(x0)
  1900    && clobber(x1)
  1901    && clobber(x2)
  1902    -> (MOVLstore [i-3] {s} p (BSWAPL <w.Type> w) mem)
  1903  
  1904  (MOVBstoreidx1 [i] {s} p idx w
  1905    x2:(MOVBstoreidx1 [i-1] {s} p idx (SHRLconst [8] w)
  1906    x1:(MOVBstoreidx1 [i-2] {s} p idx (SHRLconst [16] w)
  1907    x0:(MOVBstoreidx1 [i-3] {s} p idx (SHRLconst [24] w) mem))))
  1908    && x0.Uses == 1
  1909    && x1.Uses == 1
  1910    && x2.Uses == 1
  1911    && clobber(x0)
  1912    && clobber(x1)
  1913    && clobber(x2)
  1914    -> (MOVLstoreidx1 [i-3] {s} p idx (BSWAPL <w.Type> w) mem)
  1915  
  1916  (MOVBstore [i] {s} p w
  1917    x6:(MOVBstore [i-1] {s} p (SHRQconst [8] w)
  1918    x5:(MOVBstore [i-2] {s} p (SHRQconst [16] w)
  1919    x4:(MOVBstore [i-3] {s} p (SHRQconst [24] w)
  1920    x3:(MOVBstore [i-4] {s} p (SHRQconst [32] w)
  1921    x2:(MOVBstore [i-5] {s} p (SHRQconst [40] w)
  1922    x1:(MOVBstore [i-6] {s} p (SHRQconst [48] w)
  1923    x0:(MOVBstore [i-7] {s} p (SHRQconst [56] w) mem))))))))
  1924    && x0.Uses == 1
  1925    && x1.Uses == 1
  1926    && x2.Uses == 1
  1927    && x3.Uses == 1
  1928    && x4.Uses == 1
  1929    && x5.Uses == 1
  1930    && x6.Uses == 1
  1931    && clobber(x0)
  1932    && clobber(x1)
  1933    && clobber(x2)
  1934    && clobber(x3)
  1935    && clobber(x4)
  1936    && clobber(x5)
  1937    && clobber(x6)
  1938    -> (MOVQstore [i-7] {s} p (BSWAPQ <w.Type> w) mem)
  1939  
  1940  (MOVBstoreidx1 [i] {s} p idx w
  1941    x6:(MOVBstoreidx1 [i-1] {s} p idx (SHRQconst [8] w)
  1942    x5:(MOVBstoreidx1 [i-2] {s} p idx (SHRQconst [16] w)
  1943    x4:(MOVBstoreidx1 [i-3] {s} p idx (SHRQconst [24] w)
  1944    x3:(MOVBstoreidx1 [i-4] {s} p idx (SHRQconst [32] w)
  1945    x2:(MOVBstoreidx1 [i-5] {s} p idx (SHRQconst [40] w)
  1946    x1:(MOVBstoreidx1 [i-6] {s} p idx (SHRQconst [48] w)
  1947    x0:(MOVBstoreidx1 [i-7] {s} p idx (SHRQconst [56] w) mem))))))))
  1948    && x0.Uses == 1
  1949    && x1.Uses == 1
  1950    && x2.Uses == 1
  1951    && x3.Uses == 1
  1952    && x4.Uses == 1
  1953    && x5.Uses == 1
  1954    && x6.Uses == 1
  1955    && clobber(x0)
  1956    && clobber(x1)
  1957    && clobber(x2)
  1958    && clobber(x3)
  1959    && clobber(x4)
  1960    && clobber(x5)
  1961    && clobber(x6)
  1962    -> (MOVQstoreidx1 [i-7] {s} p idx (BSWAPQ <w.Type> w) mem)
  1963  
  1964  // Combine constant stores into larger (unaligned) stores.
  1965  (MOVBstoreconst [c] {s} p x:(MOVBstoreconst [a] {s} p mem))
  1966    && x.Uses == 1
  1967    && ValAndOff(a).Off() + 1 == ValAndOff(c).Off()
  1968    && clobber(x)
  1969    -> (MOVWstoreconst [makeValAndOff(ValAndOff(a).Val()&0xff | ValAndOff(c).Val()<<8, ValAndOff(a).Off())] {s} p mem)
  1970  (MOVWstoreconst [c] {s} p x:(MOVWstoreconst [a] {s} p mem))
  1971    && x.Uses == 1
  1972    && ValAndOff(a).Off() + 2 == ValAndOff(c).Off()
  1973    && clobber(x)
  1974    -> (MOVLstoreconst [makeValAndOff(ValAndOff(a).Val()&0xffff | ValAndOff(c).Val()<<16, ValAndOff(a).Off())] {s} p mem)
  1975  (MOVLstoreconst [c] {s} p x:(MOVLstoreconst [a] {s} p mem))
  1976    && x.Uses == 1
  1977    && ValAndOff(a).Off() + 4 == ValAndOff(c).Off()
  1978    && clobber(x)
  1979    -> (MOVQstore [ValAndOff(a).Off()] {s} p (MOVQconst [ValAndOff(a).Val()&0xffffffff | ValAndOff(c).Val()<<32]) mem)
  1980  (MOVQstoreconst [c] {s} p x:(MOVQstoreconst [c2] {s} p mem))
  1981    && config.useSSE
  1982    && x.Uses == 1
  1983    && ValAndOff(c2).Off() + 8 == ValAndOff(c).Off()
  1984    && ValAndOff(c).Val() == 0
  1985    && ValAndOff(c2).Val() == 0
  1986    && clobber(x)
  1987    -> (MOVOstore [ValAndOff(c2).Off()] {s} p (MOVOconst [0]) mem)
  1988  
  1989  (MOVBstoreconstidx1 [c] {s} p i x:(MOVBstoreconstidx1 [a] {s} p i mem))
  1990    && x.Uses == 1
  1991    && ValAndOff(a).Off() + 1 == ValAndOff(c).Off()
  1992    && clobber(x)
  1993    -> (MOVWstoreconstidx1 [makeValAndOff(ValAndOff(a).Val()&0xff | ValAndOff(c).Val()<<8, ValAndOff(a).Off())] {s} p i mem)
  1994  (MOVWstoreconstidx1 [c] {s} p i x:(MOVWstoreconstidx1 [a] {s} p i mem))
  1995    && x.Uses == 1
  1996    && ValAndOff(a).Off() + 2 == ValAndOff(c).Off()
  1997    && clobber(x)
  1998    -> (MOVLstoreconstidx1 [makeValAndOff(ValAndOff(a).Val()&0xffff | ValAndOff(c).Val()<<16, ValAndOff(a).Off())] {s} p i mem)
  1999  (MOVLstoreconstidx1 [c] {s} p i x:(MOVLstoreconstidx1 [a] {s} p i mem))
  2000    && x.Uses == 1
  2001    && ValAndOff(a).Off() + 4 == ValAndOff(c).Off()
  2002    && clobber(x)
  2003    -> (MOVQstoreidx1 [ValAndOff(a).Off()] {s} p i (MOVQconst [ValAndOff(a).Val()&0xffffffff | ValAndOff(c).Val()<<32]) mem)
  2004  
  2005  (MOVWstoreconstidx2 [c] {s} p i x:(MOVWstoreconstidx2 [a] {s} p i mem))
  2006    && x.Uses == 1
  2007    && ValAndOff(a).Off() + 2 == ValAndOff(c).Off()
  2008    && clobber(x)
  2009    -> (MOVLstoreconstidx1 [makeValAndOff(ValAndOff(a).Val()&0xffff | ValAndOff(c).Val()<<16, ValAndOff(a).Off())] {s} p (SHLQconst <i.Type> [1] i) mem)
  2010  (MOVLstoreconstidx4 [c] {s} p i x:(MOVLstoreconstidx4 [a] {s} p i mem))
  2011    && x.Uses == 1
  2012    && ValAndOff(a).Off() + 4 == ValAndOff(c).Off()
  2013    && clobber(x)
  2014    -> (MOVQstoreidx1 [ValAndOff(a).Off()] {s} p (SHLQconst <i.Type> [2] i) (MOVQconst [ValAndOff(a).Val()&0xffffffff | ValAndOff(c).Val()<<32]) mem)
  2015  
  2016  // Combine stores into larger (unaligned) stores.
  2017  (MOVBstore [i] {s} p (SHRQconst [8] w) x:(MOVBstore [i-1] {s} p w mem))
  2018    && x.Uses == 1
  2019    && clobber(x)
  2020    -> (MOVWstore [i-1] {s} p w mem)
  2021  (MOVBstore [i] {s} p (SHRQconst [j] w) x:(MOVBstore [i-1] {s} p w0:(SHRQconst [j-8] w) mem))
  2022    && x.Uses == 1
  2023    && clobber(x)
  2024    -> (MOVWstore [i-1] {s} p w0 mem)
  2025  (MOVWstore [i] {s} p (SHRQconst [16] w) x:(MOVWstore [i-2] {s} p w mem))
  2026    && x.Uses == 1
  2027    && clobber(x)
  2028    -> (MOVLstore [i-2] {s} p w mem)
  2029  (MOVWstore [i] {s} p (SHRQconst [j] w) x:(MOVWstore [i-2] {s} p w0:(SHRQconst [j-16] w) mem))
  2030    && x.Uses == 1
  2031    && clobber(x)
  2032    -> (MOVLstore [i-2] {s} p w0 mem)
  2033  (MOVLstore [i] {s} p (SHRQconst [32] w) x:(MOVLstore [i-4] {s} p w mem))
  2034    && x.Uses == 1
  2035    && clobber(x)
  2036    -> (MOVQstore [i-4] {s} p w mem)
  2037  (MOVLstore [i] {s} p (SHRQconst [j] w) x:(MOVLstore [i-4] {s} p w0:(SHRQconst [j-32] w) mem))
  2038    && x.Uses == 1
  2039    && clobber(x)
  2040    -> (MOVQstore [i-4] {s} p w0 mem)
  2041  
  2042  (MOVBstoreidx1 [i] {s} p idx (SHRQconst [8] w) x:(MOVBstoreidx1 [i-1] {s} p idx w mem))
  2043    && x.Uses == 1
  2044    && clobber(x)
  2045    -> (MOVWstoreidx1 [i-1] {s} p idx w mem)
  2046  (MOVBstoreidx1 [i] {s} p idx (SHRQconst [j] w) x:(MOVBstoreidx1 [i-1] {s} p idx w0:(SHRQconst [j-8] w) mem))
  2047    && x.Uses == 1
  2048    && clobber(x)
  2049    -> (MOVWstoreidx1 [i-1] {s} p idx w0 mem)
  2050  (MOVWstoreidx1 [i] {s} p idx (SHRQconst [16] w) x:(MOVWstoreidx1 [i-2] {s} p idx w mem))
  2051    && x.Uses == 1
  2052    && clobber(x)
  2053    -> (MOVLstoreidx1 [i-2] {s} p idx w mem)
  2054  (MOVWstoreidx1 [i] {s} p idx (SHRQconst [j] w) x:(MOVWstoreidx1 [i-2] {s} p idx w0:(SHRQconst [j-16] w) mem))
  2055    && x.Uses == 1
  2056    && clobber(x)
  2057    -> (MOVLstoreidx1 [i-2] {s} p idx w0 mem)
  2058  (MOVLstoreidx1 [i] {s} p idx (SHRQconst [32] w) x:(MOVLstoreidx1 [i-4] {s} p idx w mem))
  2059    && x.Uses == 1
  2060    && clobber(x)
  2061    -> (MOVQstoreidx1 [i-4] {s} p idx w mem)
  2062  (MOVLstoreidx1 [i] {s} p idx (SHRQconst [j] w) x:(MOVLstoreidx1 [i-4] {s} p idx w0:(SHRQconst [j-32] w) mem))
  2063    && x.Uses == 1
  2064    && clobber(x)
  2065    -> (MOVQstoreidx1 [i-4] {s} p idx w0 mem)
  2066  
  2067  (MOVWstoreidx2 [i] {s} p idx (SHRQconst [16] w) x:(MOVWstoreidx2 [i-2] {s} p idx w mem))
  2068    && x.Uses == 1
  2069    && clobber(x)
  2070    -> (MOVLstoreidx1 [i-2] {s} p (SHLQconst <idx.Type> [1] idx) w mem)
  2071  (MOVWstoreidx2 [i] {s} p idx (SHRQconst [j] w) x:(MOVWstoreidx2 [i-2] {s} p idx w0:(SHRQconst [j-16] w) mem))
  2072    && x.Uses == 1
  2073    && clobber(x)
  2074    -> (MOVLstoreidx1 [i-2] {s} p (SHLQconst <idx.Type> [1] idx) w0 mem)
  2075  (MOVLstoreidx4 [i] {s} p idx (SHRQconst [32] w) x:(MOVLstoreidx4 [i-4] {s} p idx w mem))
  2076    && x.Uses == 1
  2077    && clobber(x)
  2078    -> (MOVQstoreidx1 [i-4] {s} p (SHLQconst <idx.Type> [2] idx) w mem)
  2079  (MOVLstoreidx4 [i] {s} p idx (SHRQconst [j] w) x:(MOVLstoreidx4 [i-4] {s} p idx w0:(SHRQconst [j-32] w) mem))
  2080    && x.Uses == 1
  2081    && clobber(x)
  2082    -> (MOVQstoreidx1 [i-4] {s} p (SHLQconst <idx.Type> [2] idx) w0 mem)
  2083  
  2084  (MOVBstore [i] {s} p
  2085    x1:(MOVBload [j] {s2} p2 mem)
  2086      mem2:(MOVBstore [i-1] {s} p
  2087        x2:(MOVBload [j-1] {s2} p2 mem) mem))
  2088    && x1.Uses == 1
  2089    && x2.Uses == 1
  2090    && mem2.Uses == 1
  2091    && clobber(x1)
  2092    && clobber(x2)
  2093    && clobber(mem2)
  2094    -> (MOVWstore [i-1] {s} p (MOVWload [j-1] {s2} p2 mem) mem)
  2095  
  2096  (MOVWstore [i] {s} p
  2097    x1:(MOVWload [j] {s2} p2 mem)
  2098      mem2:(MOVWstore [i-2] {s} p
  2099        x2:(MOVWload [j-2] {s2} p2 mem) mem))
  2100    && x1.Uses == 1
  2101    && x2.Uses == 1
  2102    && mem2.Uses == 1
  2103    && clobber(x1)
  2104    && clobber(x2)
  2105    && clobber(mem2)
  2106    -> (MOVLstore [i-2] {s} p (MOVLload [j-2] {s2} p2 mem) mem)
  2107  
  2108  (MOVLstore [i] {s} p
  2109    x1:(MOVLload [j] {s2} p2 mem)
  2110      mem2:(MOVLstore [i-4] {s} p
  2111        x2:(MOVLload [j-4] {s2} p2 mem) mem))
  2112    && x1.Uses == 1
  2113    && x2.Uses == 1
  2114    && mem2.Uses == 1
  2115    && clobber(x1)
  2116    && clobber(x2)
  2117    && clobber(mem2)
  2118    -> (MOVQstore [i-4] {s} p (MOVQload [j-4] {s2} p2 mem) mem)
  2119  
  2120  // amd64p32 rules
  2121  // same as the rules above, but with 32 instead of 64 bit pointer arithmetic.
  2122  // LEAQ,ADDQ -> LEAL,ADDL
  2123  (ADDLconst [c] (LEAL [d] {s} x)) && is32Bit(c+d) -> (LEAL [c+d] {s} x)
  2124  (LEAL [c] {s} (ADDLconst [d] x)) && is32Bit(c+d) -> (LEAL [c+d] {s} x)
  2125  
  2126  (MOVQload  [off1] {sym1} (LEAL [off2] {sym2} base) mem) && canMergeSym(sym1, sym2) && is32Bit(off1+off2) ->
  2127  	(MOVQload  [off1+off2] {mergeSym(sym1,sym2)} base mem)
  2128  (MOVLload  [off1] {sym1} (LEAL [off2] {sym2} base) mem) && canMergeSym(sym1, sym2) && is32Bit(off1+off2) ->
  2129  	(MOVLload  [off1+off2] {mergeSym(sym1,sym2)} base mem)
  2130  (MOVWload  [off1] {sym1} (LEAL [off2] {sym2} base) mem) && canMergeSym(sym1, sym2) && is32Bit(off1+off2) ->
  2131  	(MOVWload  [off1+off2] {mergeSym(sym1,sym2)} base mem)
  2132  (MOVBload  [off1] {sym1} (LEAL [off2] {sym2} base) mem) && canMergeSym(sym1, sym2) && is32Bit(off1+off2) ->
  2133  	(MOVBload  [off1+off2] {mergeSym(sym1,sym2)} base mem)
  2134  
  2135  (MOVQstore  [off1] {sym1} (LEAL [off2] {sym2} base) val mem) && canMergeSym(sym1, sym2) && is32Bit(off1+off2) ->
  2136  	(MOVQstore  [off1+off2] {mergeSym(sym1,sym2)} base val mem)
  2137  (MOVLstore  [off1] {sym1} (LEAL [off2] {sym2} base) val mem) && canMergeSym(sym1, sym2) && is32Bit(off1+off2) ->
  2138  	(MOVLstore  [off1+off2] {mergeSym(sym1,sym2)} base val mem)
  2139  (MOVWstore  [off1] {sym1} (LEAL [off2] {sym2} base) val mem) && canMergeSym(sym1, sym2) && is32Bit(off1+off2) ->
  2140  	(MOVWstore  [off1+off2] {mergeSym(sym1,sym2)} base val mem)
  2141  (MOVBstore  [off1] {sym1} (LEAL [off2] {sym2} base) val mem) && canMergeSym(sym1, sym2) && is32Bit(off1+off2) ->
  2142  	(MOVBstore  [off1+off2] {mergeSym(sym1,sym2)} base val mem)
  2143  
  2144  (MOVQstoreconst [sc] {sym1} (LEAL [off] {sym2} ptr) mem) && canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off) ->
  2145  	(MOVQstoreconst [ValAndOff(sc).add(off)] {mergeSym(sym1, sym2)} ptr mem)
  2146  (MOVLstoreconst [sc] {sym1} (LEAL [off] {sym2} ptr) mem) && canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off) ->
  2147  	(MOVLstoreconst [ValAndOff(sc).add(off)] {mergeSym(sym1, sym2)} ptr mem)
  2148  (MOVWstoreconst [sc] {sym1} (LEAL [off] {sym2} ptr) mem) && canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off) ->
  2149  	(MOVWstoreconst [ValAndOff(sc).add(off)] {mergeSym(sym1, sym2)} ptr mem)
  2150  (MOVBstoreconst [sc] {sym1} (LEAL [off] {sym2} ptr) mem) && canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off) ->
  2151  	(MOVBstoreconst [ValAndOff(sc).add(off)] {mergeSym(sym1, sym2)} ptr mem)
  2152  
  2153  (MOVQload  [off1] {sym} (ADDLconst [off2] ptr) mem) && is32Bit(off1+off2) -> (MOVQload  [off1+off2] {sym} ptr mem)
  2154  (MOVLload  [off1] {sym} (ADDLconst [off2] ptr) mem) && is32Bit(off1+off2) -> (MOVLload  [off1+off2] {sym} ptr mem)
  2155  (MOVWload  [off1] {sym} (ADDLconst [off2] ptr) mem) && is32Bit(off1+off2) -> (MOVWload  [off1+off2] {sym} ptr mem)
  2156  (MOVBload  [off1] {sym} (ADDLconst [off2] ptr) mem) && is32Bit(off1+off2) -> (MOVBload  [off1+off2] {sym} ptr mem)
  2157  (MOVQstore  [off1] {sym} (ADDLconst [off2] ptr) val mem) && is32Bit(off1+off2) -> (MOVQstore  [off1+off2] {sym} ptr val mem)
  2158  (MOVLstore  [off1] {sym} (ADDLconst [off2] ptr) val mem) && is32Bit(off1+off2) -> (MOVLstore  [off1+off2] {sym} ptr val mem)
  2159  (MOVWstore  [off1] {sym} (ADDLconst [off2] ptr) val mem) && is32Bit(off1+off2) -> (MOVWstore  [off1+off2] {sym} ptr val mem)
  2160  (MOVBstore  [off1] {sym} (ADDLconst [off2] ptr) val mem) && is32Bit(off1+off2) -> (MOVBstore  [off1+off2] {sym} ptr val mem)
  2161  (MOVQstoreconst [sc] {s} (ADDLconst [off] ptr) mem) && ValAndOff(sc).canAdd(off) ->
  2162  	(MOVQstoreconst [ValAndOff(sc).add(off)] {s} ptr mem)
  2163  (MOVLstoreconst [sc] {s} (ADDLconst [off] ptr) mem) && ValAndOff(sc).canAdd(off) ->
  2164  	(MOVLstoreconst [ValAndOff(sc).add(off)] {s} ptr mem)
  2165  (MOVWstoreconst [sc] {s} (ADDLconst [off] ptr) mem) && ValAndOff(sc).canAdd(off) ->
  2166  	(MOVWstoreconst [ValAndOff(sc).add(off)] {s} ptr mem)
  2167  (MOVBstoreconst [sc] {s} (ADDLconst [off] ptr) mem) && ValAndOff(sc).canAdd(off) ->
  2168  	(MOVBstoreconst [ValAndOff(sc).add(off)] {s} ptr mem)
  2169  
  2170  // Merge load and op
  2171  // TODO: add indexed variants?
  2172  ((ADD|SUB|AND|OR|XOR)Q x l:(MOVQload [off] {sym} ptr mem)) && canMergeLoad(v, l, x) && clobber(l) -> ((ADD|SUB|AND|OR|XOR)Qmem x [off] {sym} ptr mem)
  2173  ((ADD|SUB|AND|OR|XOR)L x l:(MOVLload [off] {sym} ptr mem)) && canMergeLoad(v, l, x) && clobber(l) -> ((ADD|SUB|AND|OR|XOR)Lmem x [off] {sym} ptr mem)
  2174  ((ADD|SUB|MUL)SD x l:(MOVSDload [off] {sym} ptr mem)) && canMergeLoad(v, l, x) && clobber(l) -> ((ADD|SUB|MUL)SDmem x [off] {sym} ptr mem)
  2175  ((ADD|SUB|MUL)SS x l:(MOVSSload [off] {sym} ptr mem)) && canMergeLoad(v, l, x) && clobber(l) -> ((ADD|SUB|MUL)SSmem x [off] {sym} ptr mem)
  2176  
  2177  // Merge ADDQconst and LEAQ into atomic loads.
  2178  (MOVQatomicload [off1] {sym} (ADDQconst [off2] ptr) mem) && is32Bit(off1+off2) ->
  2179  	(MOVQatomicload [off1+off2] {sym} ptr mem)
  2180  (MOVLatomicload [off1] {sym} (ADDQconst [off2] ptr) mem) && is32Bit(off1+off2) ->
  2181  	(MOVLatomicload [off1+off2] {sym} ptr mem)
  2182  (MOVQatomicload [off1] {sym1} (LEAQ [off2] {sym2} ptr) mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) ->
  2183  	(MOVQatomicload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
  2184  (MOVLatomicload [off1] {sym1} (LEAQ [off2] {sym2} ptr) mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) ->
  2185  	(MOVLatomicload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
  2186  
  2187  // Merge ADDQconst and LEAQ into atomic stores.
  2188  (XCHGQ [off1] {sym} val (ADDQconst [off2] ptr) mem) && is32Bit(off1+off2) ->
  2189  	(XCHGQ [off1+off2] {sym} val ptr mem)
  2190  (XCHGQ [off1] {sym1} val (LEAQ [off2] {sym2} ptr) mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) && ptr.Op != OpSB ->
  2191  	(XCHGQ [off1+off2] {mergeSym(sym1,sym2)} val ptr mem)
  2192  (XCHGL [off1] {sym} val (ADDQconst [off2] ptr) mem) && is32Bit(off1+off2) ->
  2193  	(XCHGL [off1+off2] {sym} val ptr mem)
  2194  (XCHGL [off1] {sym1} val (LEAQ [off2] {sym2} ptr) mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) && ptr.Op != OpSB ->
  2195  	(XCHGL [off1+off2] {mergeSym(sym1,sym2)} val ptr mem)
  2196  
  2197  // Merge ADDQconst into atomic adds.
  2198  // TODO: merging LEAQ doesn't work, assembler doesn't like the resulting instructions.
  2199  (XADDQlock [off1] {sym} val (ADDQconst [off2] ptr) mem) && is32Bit(off1+off2) ->
  2200  	(XADDQlock [off1+off2] {sym} val ptr mem)
  2201  (XADDLlock [off1] {sym} val (ADDQconst [off2] ptr) mem) && is32Bit(off1+off2) ->
  2202  	(XADDLlock [off1+off2] {sym} val ptr mem)
  2203  
  2204  // Merge ADDQconst into atomic compare and swaps.
  2205  // TODO: merging LEAQ doesn't work, assembler doesn't like the resulting instructions.
  2206  (CMPXCHGQlock [off1] {sym} (ADDQconst [off2] ptr) old new_ mem) && is32Bit(off1+off2) ->
  2207  	(CMPXCHGQlock [off1+off2] {sym} ptr old new_ mem)
  2208  (CMPXCHGLlock [off1] {sym} (ADDQconst [off2] ptr) old new_ mem) && is32Bit(off1+off2) ->
  2209  	(CMPXCHGLlock [off1+off2] {sym} ptr old new_ mem)
  2210  
  2211  // We don't need the conditional move if we know the arg of BSF is not zero.
  2212  (CMOVQEQ x _ (Select1 (BSFQ (ORQconst [c] _)))) && c != 0 -> x
  2213  // Extension is unnecessary for trailing zeros.
  2214  (BSFQ (ORQconst <t> [1<<8] (MOVBQZX x))) -> (BSFQ (ORQconst <t> [1<<8] x))
  2215  (BSFQ (ORQconst <t> [1<<16] (MOVWQZX x))) -> (BSFQ (ORQconst <t> [1<<16] x))
  2216  
  2217  // Redundant sign/zero extensions
  2218  // Note: see issue 21963. We have to make sure we use the right type on
  2219  // the resulting extension (the outer type, not the inner type).
  2220  (MOVLQSX (MOVLQSX x)) -> (MOVLQSX x)
  2221  (MOVLQSX (MOVWQSX x)) -> (MOVWQSX x)
  2222  (MOVLQSX (MOVBQSX x)) -> (MOVBQSX x)
  2223  (MOVWQSX (MOVWQSX x)) -> (MOVWQSX x)
  2224  (MOVWQSX (MOVBQSX x)) -> (MOVBQSX x)
  2225  (MOVBQSX (MOVBQSX x)) -> (MOVBQSX x)
  2226  (MOVLQZX (MOVLQZX x)) -> (MOVLQZX x)
  2227  (MOVLQZX (MOVWQZX x)) -> (MOVWQZX x)
  2228  (MOVLQZX (MOVBQZX x)) -> (MOVBQZX x)
  2229  (MOVWQZX (MOVWQZX x)) -> (MOVWQZX x)
  2230  (MOVWQZX (MOVBQZX x)) -> (MOVBQZX x)
  2231  (MOVBQZX (MOVBQZX x)) -> (MOVBQZX x)
  2232  
  2233  (MOVQstore [off] {sym} ptr a:(ADDQconst [c] l:(MOVQload [off] {sym} ptr2 mem)) mem)
  2234  	&& isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && validValAndOff(c,off) ->
  2235  	(ADDQconstmem {sym} [makeValAndOff(c,off)] ptr mem)
  2236  (MOVLstore [off] {sym} ptr a:(ADDLconst [c] l:(MOVLload [off] {sym} ptr2 mem)) mem)
  2237  	&& isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && validValAndOff(c,off) ->
  2238  	(ADDLconstmem {sym} [makeValAndOff(c,off)] ptr mem)
  2239  
  2240  // float <-> int register moves, with no conversion.
  2241  // These come up when compiling math.{Float{32,64}bits,Float{32,64}frombits}.
  2242  (MOVQload  [off] {sym} ptr (MOVSDstore [off] {sym} ptr val _)) -> (MOVQf2i val)
  2243  (MOVLload  [off] {sym} ptr (MOVSSstore [off] {sym} ptr val _)) -> (MOVLf2i val)
  2244  (MOVSDload [off] {sym} ptr (MOVQstore  [off] {sym} ptr val _)) -> (MOVQi2f val)
  2245  (MOVSSload [off] {sym} ptr (MOVLstore  [off] {sym} ptr val _)) -> (MOVLi2f val)
  2246  
  2247  // Other load-like ops.
  2248  (ADDQmem x [off] {sym} ptr (MOVSDstore [off] {sym} ptr y _)) -> (ADDQ x (MOVQf2i y))
  2249  (ADDLmem x [off] {sym} ptr (MOVSSstore [off] {sym} ptr y _)) -> (ADDL x (MOVLf2i y))
  2250  (SUBQmem x [off] {sym} ptr (MOVSDstore [off] {sym} ptr y _)) -> (SUBQ x (MOVQf2i y))
  2251  (SUBLmem x [off] {sym} ptr (MOVSSstore [off] {sym} ptr y _)) -> (SUBL x (MOVLf2i y))
  2252  (ANDQmem x [off] {sym} ptr (MOVSDstore [off] {sym} ptr y _)) -> (ANDQ x (MOVQf2i y))
  2253  (ANDLmem x [off] {sym} ptr (MOVSSstore [off] {sym} ptr y _)) -> (ANDL x (MOVLf2i y))
  2254  ( ORQmem x [off] {sym} ptr (MOVSDstore [off] {sym} ptr y _)) -> ( ORQ x (MOVQf2i y))
  2255  ( ORLmem x [off] {sym} ptr (MOVSSstore [off] {sym} ptr y _)) -> ( ORL x (MOVLf2i y))
  2256  (XORQmem x [off] {sym} ptr (MOVSDstore [off] {sym} ptr y _)) -> (XORQ x (MOVQf2i y))
  2257  (XORLmem x [off] {sym} ptr (MOVSSstore [off] {sym} ptr y _)) -> (XORL x (MOVLf2i y))
  2258  
  2259  (ADDQconstmem [valOff] {sym} ptr (MOVSDstore [ValAndOff(valOff).Off()] {sym} ptr x _)) ->
  2260    (ADDQconst [ValAndOff(valOff).Val()] (MOVQf2i x))
  2261  (ADDLconstmem [valOff] {sym} ptr (MOVSSstore [ValAndOff(valOff).Off()] {sym} ptr x _)) ->
  2262    (ADDLconst [ValAndOff(valOff).Val()] (MOVLf2i x))
  2263  
  2264  (ADDSDmem x [off] {sym} ptr (MOVQstore [off] {sym} ptr y _)) -> (ADDSD x (MOVQi2f y))
  2265  (ADDSSmem x [off] {sym} ptr (MOVLstore [off] {sym} ptr y _)) -> (ADDSS x (MOVLi2f y))
  2266  (SUBSDmem x [off] {sym} ptr (MOVQstore [off] {sym} ptr y _)) -> (SUBSD x (MOVQi2f y))
  2267  (SUBSSmem x [off] {sym} ptr (MOVLstore [off] {sym} ptr y _)) -> (SUBSS x (MOVLi2f y))
  2268  (MULSDmem x [off] {sym} ptr (MOVQstore [off] {sym} ptr y _)) -> (MULSD x (MOVQi2f y))
  2269  (MULSSmem x [off] {sym} ptr (MOVLstore [off] {sym} ptr y _)) -> (MULSS x (MOVLi2f y))
  2270  
  2271  // Redirect stores to use the other register set.
  2272  (MOVQstore  [off] {sym} ptr (MOVQf2i val) mem) -> (MOVSDstore [off] {sym} ptr val mem)
  2273  (MOVLstore  [off] {sym} ptr (MOVLf2i val) mem) -> (MOVSSstore [off] {sym} ptr val mem)
  2274  (MOVSDstore [off] {sym} ptr (MOVQi2f val) mem) -> (MOVQstore  [off] {sym} ptr val mem)
  2275  (MOVSSstore [off] {sym} ptr (MOVLi2f val) mem) -> (MOVLstore  [off] {sym} ptr val mem)
  2276  
  2277  // Load args directly into the register class where it will be used.
  2278  // We do this by just modifying the type of the Arg.
  2279  (MOVQf2i <t> (Arg [off] {sym})) -> @b.Func.Entry (Arg <t> [off] {sym})
  2280  (MOVLf2i <t> (Arg [off] {sym})) -> @b.Func.Entry (Arg <t> [off] {sym})
  2281  (MOVQi2f <t> (Arg [off] {sym})) -> @b.Func.Entry (Arg <t> [off] {sym})
  2282  (MOVLi2f <t> (Arg [off] {sym})) -> @b.Func.Entry (Arg <t> [off] {sym})
  2283  
  2284  // LEAQ is rematerializeable, so this helps to avoid register spill.
  2285  // See isuue 22947 for details
  2286  (ADDQconst [off] x:(SP)) -> (LEAQ [off] x)