github.com/gocuntian/go@v0.0.0-20160610041250-fee02d270bf8/src/cmd/compile/internal/ssa/gen/AMD64.rules (about)

     1  // Copyright 2015 The Go Authors. All rights reserved.
     2  // Use of this source code is governed by a BSD-style
     3  // license that can be found in the LICENSE file.
     4  
     5  // Lowering arithmetic
     6  (Add64  x y) -> (ADDQ  x y)
     7  (AddPtr x y) -> (ADDQ  x y)
     8  (Add32  x y) -> (ADDL  x y)
     9  (Add16  x y) -> (ADDL  x y)
    10  (Add8   x y) -> (ADDL  x y)
    11  (Add32F x y) -> (ADDSS x y)
    12  (Add64F x y) -> (ADDSD x y)
    13  
    14  (Sub64  x y) -> (SUBQ  x y)
    15  (SubPtr x y) -> (SUBQ  x y)
    16  (Sub32  x y) -> (SUBL  x y)
    17  (Sub16  x y) -> (SUBL  x y)
    18  (Sub8   x y) -> (SUBL  x y)
    19  (Sub32F x y) -> (SUBSS x y)
    20  (Sub64F x y) -> (SUBSD x y)
    21  
    22  (Mul64  x y) -> (MULQ  x y)
    23  (Mul32  x y) -> (MULL  x y)
    24  (Mul16  x y) -> (MULL  x y)
    25  (Mul8   x y) -> (MULL  x y)
    26  (Mul32F x y) -> (MULSS x y)
    27  (Mul64F x y) -> (MULSD x y)
    28  
    29  (Div32F x y) -> (DIVSS x y)
    30  (Div64F x y) -> (DIVSD x y)
    31  
    32  (Div64  x y) -> (DIVQ  x y)
    33  (Div64u x y) -> (DIVQU x y)
    34  (Div32  x y) -> (DIVL  x y)
    35  (Div32u x y) -> (DIVLU x y)
    36  (Div16  x y) -> (DIVW  x y)
    37  (Div16u x y) -> (DIVWU x y)
    38  (Div8   x y) -> (DIVW  (SignExt8to16 x) (SignExt8to16 y))
    39  (Div8u  x y) -> (DIVWU (ZeroExt8to16 x) (ZeroExt8to16 y))
    40  
    41  (Hmul64  x y) -> (HMULQ  x y)
    42  (Hmul64u x y) -> (HMULQU x y)
    43  (Hmul32  x y) -> (HMULL  x y)
    44  (Hmul32u x y) -> (HMULLU x y)
    45  (Hmul16  x y) -> (HMULW  x y)
    46  (Hmul16u x y) -> (HMULWU x y)
    47  (Hmul8   x y) -> (HMULB  x y)
    48  (Hmul8u  x y) -> (HMULBU x y)
    49  
    50  (Avg64u x y) -> (AVGQU x y)
    51  
    52  (Mod64  x y) -> (MODQ  x y)
    53  (Mod64u x y) -> (MODQU x y)
    54  (Mod32  x y) -> (MODL  x y)
    55  (Mod32u x y) -> (MODLU x y)
    56  (Mod16  x y) -> (MODW  x y)
    57  (Mod16u x y) -> (MODWU x y)
    58  (Mod8   x y) -> (MODW  (SignExt8to16 x) (SignExt8to16 y))
    59  (Mod8u  x y) -> (MODWU (ZeroExt8to16 x) (ZeroExt8to16 y))
    60  
    61  (And64 x y) -> (ANDQ x y)
    62  (And32 x y) -> (ANDL x y)
    63  (And16 x y) -> (ANDL x y)
    64  (And8  x y) -> (ANDL x y)
    65  
    66  (Or64 x y) -> (ORQ x y)
    67  (Or32 x y) -> (ORL x y)
    68  (Or16 x y) -> (ORL x y)
    69  (Or8  x y) -> (ORL x y)
    70  
    71  (Xor64 x y) -> (XORQ x y)
    72  (Xor32 x y) -> (XORL x y)
    73  (Xor16 x y) -> (XORL x y)
    74  (Xor8  x y) -> (XORL x y)
    75  
    76  (Neg64  x) -> (NEGQ x)
    77  (Neg32  x) -> (NEGL x)
    78  (Neg16  x) -> (NEGL x)
    79  (Neg8   x) -> (NEGL x)
    80  (Neg32F x) -> (PXOR x (MOVSSconst <config.Frontend().TypeFloat32()> [f2i(math.Copysign(0, -1))]))
    81  (Neg64F x) -> (PXOR x (MOVSDconst <config.Frontend().TypeFloat64()> [f2i(math.Copysign(0, -1))]))
    82  
    83  (Com64 x) -> (NOTQ x)
    84  (Com32 x) -> (NOTL x)
    85  (Com16 x) -> (NOTL x)
    86  (Com8  x) -> (NOTL x)
    87  
    88  // Lowering boolean ops
    89  (AndB x y) -> (ANDL x y)
    90  (OrB x y) -> (ORL x y)
    91  (Not x) -> (XORLconst [1] x)
    92  
    93  // Lowering pointer arithmetic
    94  (OffPtr [off] ptr) && is32Bit(off) -> (ADDQconst [off] ptr)
    95  (OffPtr [off] ptr) -> (ADDQ (MOVQconst [off]) ptr)
    96  
    97  // Lowering other arithmetic
    98  // TODO: CMPQconst 0 below is redundant because BSF sets Z but how to remove?
    99  (Ctz64 <t> x) -> (CMOVQEQconst (BSFQ <t> x) (CMPQconst x [0]) [64])
   100  (Ctz32 <t> x) -> (CMOVLEQconst (BSFL <t> x) (CMPLconst x [0]) [32])
   101  (Ctz16 <t> x) -> (CMOVWEQconst (BSFW <t> x) (CMPWconst x [0]) [16])
   102  
   103  (Bswap64 x) -> (BSWAPQ x)
   104  (Bswap32 x) -> (BSWAPL x)
   105  
   106  (Sqrt x) -> (SQRTSD x)
   107  
   108  // Lowering extension
   109  // Note: we always extend to 64 bits even though some ops don't need that many result bits.
   110  (SignExt8to16  x) -> (MOVBQSX x)
   111  (SignExt8to32  x) -> (MOVBQSX x)
   112  (SignExt8to64  x) -> (MOVBQSX x)
   113  (SignExt16to32 x) -> (MOVWQSX x)
   114  (SignExt16to64 x) -> (MOVWQSX x)
   115  (SignExt32to64 x) -> (MOVLQSX x)
   116  
   117  (ZeroExt8to16  x) -> (MOVBQZX x)
   118  (ZeroExt8to32  x) -> (MOVBQZX x)
   119  (ZeroExt8to64  x) -> (MOVBQZX x)
   120  (ZeroExt16to32 x) -> (MOVWQZX x)
   121  (ZeroExt16to64 x) -> (MOVWQZX x)
   122  (ZeroExt32to64 x) -> (MOVLQZX x)
   123  
   124  // Lowering truncation
   125  // Because we ignore high parts of registers, truncates are just copies.
   126  (Trunc16to8  x) -> x
   127  (Trunc32to8  x) -> x
   128  (Trunc32to16 x) -> x
   129  (Trunc64to8  x) -> x
   130  (Trunc64to16 x) -> x
   131  (Trunc64to32 x) -> x
   132  
   133  // Lowering float <-> int
   134  (Cvt32to32F x) -> (CVTSL2SS x)
   135  (Cvt32to64F x) -> (CVTSL2SD x)
   136  (Cvt64to32F x) -> (CVTSQ2SS x)
   137  (Cvt64to64F x) -> (CVTSQ2SD x)
   138  
   139  (Cvt32Fto32 x) -> (CVTTSS2SL x)
   140  (Cvt32Fto64 x) -> (CVTTSS2SQ x)
   141  (Cvt64Fto32 x) -> (CVTTSD2SL x)
   142  (Cvt64Fto64 x) -> (CVTTSD2SQ x)
   143  
   144  (Cvt32Fto64F x) -> (CVTSS2SD x)
   145  (Cvt64Fto32F x) -> (CVTSD2SS x)
   146  
   147  // Lowering shifts
   148  // Unsigned shifts need to return 0 if shift amount is >= width of shifted value.
   149  //   result = (arg << shift) & (shift >= argbits ? 0 : 0xffffffffffffffff)
   150  (Lsh64x64 <t> x y) -> (ANDQ (SHLQ <t> x y) (SBBQcarrymask <t> (CMPQconst y [64])))
   151  (Lsh64x32 <t> x y) -> (ANDQ (SHLQ <t> x y) (SBBQcarrymask <t> (CMPLconst y [64])))
   152  (Lsh64x16 <t> x y) -> (ANDQ (SHLQ <t> x y) (SBBQcarrymask <t> (CMPWconst y [64])))
   153  (Lsh64x8  <t> x y) -> (ANDQ (SHLQ <t> x y) (SBBQcarrymask <t> (CMPBconst y [64])))
   154  
   155  (Lsh32x64 <t> x y) -> (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPQconst y [32])))
   156  (Lsh32x32 <t> x y) -> (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPLconst y [32])))
   157  (Lsh32x16 <t> x y) -> (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPWconst y [32])))
   158  (Lsh32x8  <t> x y) -> (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPBconst y [32])))
   159  
   160  (Lsh16x64 <t> x y) -> (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPQconst y [32])))
   161  (Lsh16x32 <t> x y) -> (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPLconst y [32])))
   162  (Lsh16x16 <t> x y) -> (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPWconst y [32])))
   163  (Lsh16x8  <t> x y) -> (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPBconst y [32])))
   164  
   165  (Lsh8x64 <t> x y)  -> (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPQconst y [32])))
   166  (Lsh8x32 <t> x y)  -> (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPLconst y [32])))
   167  (Lsh8x16 <t> x y)  -> (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPWconst y [32])))
   168  (Lsh8x8  <t> x y)  -> (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPBconst y [32])))
   169  
   170  (Lrot64 <t> x [c]) -> (ROLQconst <t> [c&63] x)
   171  (Lrot32 <t> x [c]) -> (ROLLconst <t> [c&31] x)
   172  (Lrot16 <t> x [c]) -> (ROLWconst <t> [c&15] x)
   173  (Lrot8  <t> x [c]) -> (ROLBconst <t> [c&7] x)
   174  
   175  (Rsh64Ux64 <t> x y) -> (ANDQ (SHRQ <t> x y) (SBBQcarrymask <t> (CMPQconst y [64])))
   176  (Rsh64Ux32 <t> x y) -> (ANDQ (SHRQ <t> x y) (SBBQcarrymask <t> (CMPLconst y [64])))
   177  (Rsh64Ux16 <t> x y) -> (ANDQ (SHRQ <t> x y) (SBBQcarrymask <t> (CMPWconst y [64])))
   178  (Rsh64Ux8  <t> x y) -> (ANDQ (SHRQ <t> x y) (SBBQcarrymask <t> (CMPBconst y [64])))
   179  
   180  (Rsh32Ux64 <t> x y) -> (ANDL (SHRL <t> x y) (SBBLcarrymask <t> (CMPQconst y [32])))
   181  (Rsh32Ux32 <t> x y) -> (ANDL (SHRL <t> x y) (SBBLcarrymask <t> (CMPLconst y [32])))
   182  (Rsh32Ux16 <t> x y) -> (ANDL (SHRL <t> x y) (SBBLcarrymask <t> (CMPWconst y [32])))
   183  (Rsh32Ux8  <t> x y) -> (ANDL (SHRL <t> x y) (SBBLcarrymask <t> (CMPBconst y [32])))
   184  
   185  (Rsh16Ux64 <t> x y) -> (ANDL (SHRW <t> x y) (SBBLcarrymask <t> (CMPQconst y [16])))
   186  (Rsh16Ux32 <t> x y) -> (ANDL (SHRW <t> x y) (SBBLcarrymask <t> (CMPLconst y [16])))
   187  (Rsh16Ux16 <t> x y) -> (ANDL (SHRW <t> x y) (SBBLcarrymask <t> (CMPWconst y [16])))
   188  (Rsh16Ux8  <t> x y) -> (ANDL (SHRW <t> x y) (SBBLcarrymask <t> (CMPBconst y [16])))
   189  
   190  (Rsh8Ux64 <t> x y)  -> (ANDL (SHRB <t> x y) (SBBLcarrymask <t> (CMPQconst y [8])))
   191  (Rsh8Ux32 <t> x y)  -> (ANDL (SHRB <t> x y) (SBBLcarrymask <t> (CMPLconst y [8])))
   192  (Rsh8Ux16 <t> x y)  -> (ANDL (SHRB <t> x y) (SBBLcarrymask <t> (CMPWconst y [8])))
   193  (Rsh8Ux8  <t> x y)  -> (ANDL (SHRB <t> x y) (SBBLcarrymask <t> (CMPBconst y [8])))
   194  
   195  // Signed right shift needs to return 0/-1 if shift amount is >= width of shifted value.
   196  // We implement this by setting the shift value to -1 (all ones) if the shift value is >= width.
   197  (Rsh64x64 <t> x y) -> (SARQ <t> x (ORQ <y.Type> y (NOTQ <y.Type> (SBBQcarrymask <y.Type> (CMPQconst y [64])))))
   198  (Rsh64x32 <t> x y) -> (SARQ <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPLconst y [64])))))
   199  (Rsh64x16 <t> x y) -> (SARQ <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPWconst y [64])))))
   200  (Rsh64x8  <t> x y) -> (SARQ <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPBconst y [64])))))
   201  
   202  (Rsh32x64 <t> x y) -> (SARL <t> x (ORQ <y.Type> y (NOTQ <y.Type> (SBBQcarrymask <y.Type> (CMPQconst y [32])))))
   203  (Rsh32x32 <t> x y) -> (SARL <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPLconst y [32])))))
   204  (Rsh32x16 <t> x y) -> (SARL <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPWconst y [32])))))
   205  (Rsh32x8  <t> x y) -> (SARL <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPBconst y [32])))))
   206  
   207  (Rsh16x64 <t> x y) -> (SARW <t> x (ORQ <y.Type> y (NOTQ <y.Type> (SBBQcarrymask <y.Type> (CMPQconst y [16])))))
   208  (Rsh16x32 <t> x y) -> (SARW <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPLconst y [16])))))
   209  (Rsh16x16 <t> x y) -> (SARW <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPWconst y [16])))))
   210  (Rsh16x8  <t> x y) -> (SARW <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPBconst y [16])))))
   211  
   212  (Rsh8x64 <t> x y)  -> (SARB <t> x (ORQ <y.Type> y (NOTQ <y.Type> (SBBQcarrymask <y.Type> (CMPQconst y [8])))))
   213  (Rsh8x32 <t> x y)  -> (SARB <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPLconst y [8])))))
   214  (Rsh8x16 <t> x y)  -> (SARB <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPWconst y [8])))))
   215  (Rsh8x8  <t> x y)  -> (SARB <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPBconst y [8])))))
   216  
   217  // Lowering comparisons
   218  (Less64  x y) -> (SETL (CMPQ x y))
   219  (Less32  x y) -> (SETL (CMPL x y))
   220  (Less16  x y) -> (SETL (CMPW x y))
   221  (Less8   x y) -> (SETL (CMPB x y))
   222  (Less64U x y) -> (SETB (CMPQ x y))
   223  (Less32U x y) -> (SETB (CMPL x y))
   224  (Less16U x y) -> (SETB (CMPW x y))
   225  (Less8U  x y) -> (SETB (CMPB x y))
   226  // Use SETGF with reversed operands to dodge NaN case
   227  (Less64F x y) -> (SETGF (UCOMISD y x))
   228  (Less32F x y) -> (SETGF (UCOMISS y x))
   229  
   230  (Leq64  x y) -> (SETLE (CMPQ x y))
   231  (Leq32  x y) -> (SETLE (CMPL x y))
   232  (Leq16  x y) -> (SETLE (CMPW x y))
   233  (Leq8   x y) -> (SETLE (CMPB x y))
   234  (Leq64U x y) -> (SETBE (CMPQ x y))
   235  (Leq32U x y) -> (SETBE (CMPL x y))
   236  (Leq16U x y) -> (SETBE (CMPW x y))
   237  (Leq8U  x y) -> (SETBE (CMPB x y))
   238  // Use SETGEF with reversed operands to dodge NaN case
   239  (Leq64F x y) -> (SETGEF (UCOMISD y x))
   240  (Leq32F x y) -> (SETGEF (UCOMISS y x))
   241  
   242  (Greater64  x y) -> (SETG (CMPQ x y))
   243  (Greater32  x y) -> (SETG (CMPL x y))
   244  (Greater16  x y) -> (SETG (CMPW x y))
   245  (Greater8   x y) -> (SETG (CMPB x y))
   246  (Greater64U x y) -> (SETA (CMPQ x y))
   247  (Greater32U x y) -> (SETA (CMPL x y))
   248  (Greater16U x y) -> (SETA (CMPW x y))
   249  (Greater8U  x y) -> (SETA (CMPB x y))
   250  // Note Go assembler gets UCOMISx operand order wrong, but it is right here
   251  // Bug is accommodated at generation of assembly language.
   252  (Greater64F x y) -> (SETGF (UCOMISD x y))
   253  (Greater32F x y) -> (SETGF (UCOMISS x y))
   254  
   255  (Geq64  x y) -> (SETGE (CMPQ x y))
   256  (Geq32  x y) -> (SETGE (CMPL x y))
   257  (Geq16  x y) -> (SETGE (CMPW x y))
   258  (Geq8   x y) -> (SETGE (CMPB x y))
   259  (Geq64U x y) -> (SETAE (CMPQ x y))
   260  (Geq32U x y) -> (SETAE (CMPL x y))
   261  (Geq16U x y) -> (SETAE (CMPW x y))
   262  (Geq8U  x y) -> (SETAE (CMPB x y))
   263  // Note Go assembler gets UCOMISx operand order wrong, but it is right here
   264  // Bug is accommodated at generation of assembly language.
   265  (Geq64F x y) -> (SETGEF (UCOMISD x y))
   266  (Geq32F x y) -> (SETGEF (UCOMISS x y))
   267  
   268  (Eq64  x y) -> (SETEQ (CMPQ x y))
   269  (Eq32  x y) -> (SETEQ (CMPL x y))
   270  (Eq16  x y) -> (SETEQ (CMPW x y))
   271  (Eq8   x y) -> (SETEQ (CMPB x y))
   272  (EqB   x y) -> (SETEQ (CMPB x y))
   273  (EqPtr x y) -> (SETEQ (CMPQ x y))
   274  (Eq64F x y) -> (SETEQF (UCOMISD x y))
   275  (Eq32F x y) -> (SETEQF (UCOMISS x y))
   276  
   277  (Neq64  x y) -> (SETNE (CMPQ x y))
   278  (Neq32  x y) -> (SETNE (CMPL x y))
   279  (Neq16  x y) -> (SETNE (CMPW x y))
   280  (Neq8   x y) -> (SETNE (CMPB x y))
   281  (NeqB   x y) -> (SETNE (CMPB x y))
   282  (NeqPtr x y) -> (SETNE (CMPQ x y))
   283  (Neq64F x y) -> (SETNEF (UCOMISD x y))
   284  (Neq32F x y) -> (SETNEF (UCOMISS x y))
   285  
   286  // Lowering loads
   287  (Load <t> ptr mem) && (is64BitInt(t) || isPtr(t)) -> (MOVQload ptr mem)
   288  (Load <t> ptr mem) && is32BitInt(t) -> (MOVLload ptr mem)
   289  (Load <t> ptr mem) && is16BitInt(t) -> (MOVWload ptr mem)
   290  (Load <t> ptr mem) && (t.IsBoolean() || is8BitInt(t)) -> (MOVBload ptr mem)
   291  (Load <t> ptr mem) && is32BitFloat(t) -> (MOVSSload ptr mem)
   292  (Load <t> ptr mem) && is64BitFloat(t) -> (MOVSDload ptr mem)
   293  
   294  // Lowering stores
   295  // These more-specific FP versions of Store pattern should come first.
   296  (Store [8] ptr val mem) && is64BitFloat(val.Type) -> (MOVSDstore ptr val mem)
   297  (Store [4] ptr val mem) && is32BitFloat(val.Type) -> (MOVSSstore ptr val mem)
   298  
   299  (Store [8] ptr val mem) -> (MOVQstore ptr val mem)
   300  (Store [4] ptr val mem) -> (MOVLstore ptr val mem)
   301  (Store [2] ptr val mem) -> (MOVWstore ptr val mem)
   302  (Store [1] ptr val mem) -> (MOVBstore ptr val mem)
   303  
   304  // Lowering moves
   305  (Move [0] _ _ mem) -> mem
   306  (Move [1] dst src mem) -> (MOVBstore dst (MOVBload src mem) mem)
   307  (Move [2] dst src mem) -> (MOVWstore dst (MOVWload src mem) mem)
   308  (Move [4] dst src mem) -> (MOVLstore dst (MOVLload src mem) mem)
   309  (Move [8] dst src mem) -> (MOVQstore dst (MOVQload src mem) mem)
   310  (Move [16] dst src mem) -> (MOVOstore dst (MOVOload src mem) mem)
   311  (Move [3] dst src mem) ->
   312  	(MOVBstore [2] dst (MOVBload [2] src mem)
   313  		(MOVWstore dst (MOVWload src mem) mem))
   314  (Move [5] dst src mem) ->
   315  	(MOVBstore [4] dst (MOVBload [4] src mem)
   316  		(MOVLstore dst (MOVLload src mem) mem))
   317  (Move [6] dst src mem) ->
   318  	(MOVWstore [4] dst (MOVWload [4] src mem)
   319  		(MOVLstore dst (MOVLload src mem) mem))
   320  (Move [7] dst src mem) ->
   321  	(MOVLstore [3] dst (MOVLload [3] src mem)
   322  		(MOVLstore dst (MOVLload src mem) mem))
   323  (Move [size] dst src mem) && size > 8 && size < 16 ->
   324  	(MOVQstore [size-8] dst (MOVQload [size-8] src mem)
   325  		(MOVQstore dst (MOVQload src mem) mem))
   326  
   327  // Adjust moves to be a multiple of 16 bytes.
   328  (Move [size] dst src mem) && size > 16 && size%16 != 0 && size%16 <= 8 ->
   329  	(Move [size-size%16] (ADDQconst <dst.Type> dst [size%16]) (ADDQconst <src.Type> src [size%16])
   330  		(MOVQstore dst (MOVQload src mem) mem))
   331  (Move [size] dst src mem) && size > 16 && size%16 != 0 && size%16 > 8 ->
   332  	(Move [size-size%16] (ADDQconst <dst.Type> dst [size%16]) (ADDQconst <src.Type> src [size%16])
   333  		(MOVOstore dst (MOVOload src mem) mem))
   334  
   335  // Medium copying uses a duff device.
   336  (Move [size] dst src mem) && size >= 32 && size <= 16*64 && size%16 == 0 && !config.noDuffDevice ->
   337  	(DUFFCOPY [14*(64-size/16)] dst src mem)
   338  // 14 and 64 are magic constants.  14 is the number of bytes to encode:
   339  //	MOVUPS	(SI), X0
   340  //	ADDQ	$16, SI
   341  //	MOVUPS	X0, (DI)
   342  //	ADDQ	$16, DI
   343  // and 64 is the number of such blocks. See src/runtime/duff_amd64.s:duffcopy.
   344  
   345  // Large copying uses REP MOVSQ.
   346  (Move [size] dst src mem) && (size > 16*64 || config.noDuffDevice) && size%8 == 0 ->
   347  	(REPMOVSQ dst src (MOVQconst [size/8]) mem)
   348  
   349  // Lowering Zero instructions
   350  (Zero [0] _ mem) -> mem
   351  (Zero [1] destptr mem) -> (MOVBstoreconst [0] destptr mem)
   352  (Zero [2] destptr mem) -> (MOVWstoreconst [0] destptr mem)
   353  (Zero [4] destptr mem) -> (MOVLstoreconst [0] destptr mem)
   354  (Zero [8] destptr mem) -> (MOVQstoreconst [0] destptr mem)
   355  
   356  (Zero [3] destptr mem) ->
   357  	(MOVBstoreconst [makeValAndOff(0,2)] destptr
   358  		(MOVWstoreconst [0] destptr mem))
   359  (Zero [5] destptr mem) ->
   360  	(MOVBstoreconst [makeValAndOff(0,4)] destptr
   361  		(MOVLstoreconst [0] destptr mem))
   362  (Zero [6] destptr mem) ->
   363  	(MOVWstoreconst [makeValAndOff(0,4)] destptr
   364  		(MOVLstoreconst [0] destptr mem))
   365  (Zero [7] destptr mem) ->
   366  	(MOVLstoreconst [makeValAndOff(0,3)] destptr
   367  		(MOVLstoreconst [0] destptr mem))
   368  
   369  // Strip off any fractional word zeroing.
   370  (Zero [size] destptr mem) && size%8 != 0 && size > 8 ->
   371  	(Zero [size-size%8] (ADDQconst destptr [size%8])
   372  		(MOVQstoreconst [0] destptr mem))
   373  
   374  // Zero small numbers of words directly.
   375  (Zero [16] destptr mem) ->
   376  	(MOVQstoreconst [makeValAndOff(0,8)] destptr
   377  		(MOVQstoreconst [0] destptr mem))
   378  (Zero [24] destptr mem) ->
   379  	(MOVQstoreconst [makeValAndOff(0,16)] destptr
   380  		(MOVQstoreconst [makeValAndOff(0,8)] destptr
   381  			(MOVQstoreconst [0] destptr mem)))
   382  (Zero [32] destptr mem) ->
   383  	(MOVQstoreconst [makeValAndOff(0,24)] destptr
   384  		(MOVQstoreconst [makeValAndOff(0,16)] destptr
   385  			(MOVQstoreconst [makeValAndOff(0,8)] destptr
   386  				(MOVQstoreconst [0] destptr mem))))
   387  
   388  // Medium zeroing uses a duff device.
   389  (Zero [size] destptr mem) && size <= 1024 && size%8 == 0 && size%16 != 0 && !config.noDuffDevice ->
   390  	(Zero [size-8] (ADDQconst [8] destptr) (MOVQstore destptr (MOVQconst [0]) mem))
   391  (Zero [size] destptr mem) && size <= 1024 && size%16 == 0 && !config.noDuffDevice ->
   392  	(DUFFZERO [duffStart(size)] (ADDQconst [duffAdj(size)] destptr) (MOVOconst [0]) mem)
   393  
   394  // Large zeroing uses REP STOSQ.
   395  (Zero [size] destptr mem) && (size > 1024 || (config.noDuffDevice && size > 32)) && size%8 == 0 ->
   396  	(REPSTOSQ destptr (MOVQconst [size/8]) (MOVQconst [0]) mem)
   397  
   398  // Lowering constants
   399  (Const8   [val]) -> (MOVLconst [val])
   400  (Const16  [val]) -> (MOVLconst [val])
   401  (Const32  [val]) -> (MOVLconst [val])
   402  (Const64  [val]) -> (MOVQconst [val])
   403  (Const32F [val]) -> (MOVSSconst [val])
   404  (Const64F [val]) -> (MOVSDconst [val])
   405  (ConstNil) -> (MOVQconst [0])
   406  (ConstBool [b]) -> (MOVLconst [b])
   407  
   408  // Lowering calls
   409  (StaticCall [argwid] {target} mem) -> (CALLstatic [argwid] {target} mem)
   410  (ClosureCall [argwid] entry closure mem) -> (CALLclosure [argwid] entry closure mem)
   411  (DeferCall [argwid] mem) -> (CALLdefer [argwid] mem)
   412  (GoCall [argwid] mem) -> (CALLgo [argwid] mem)
   413  (InterCall [argwid] entry mem) -> (CALLinter [argwid] entry mem)
   414  
   415  // Miscellaneous
   416  (Convert <t> x mem) -> (MOVQconvert <t> x mem)
   417  (IsNonNil p) -> (SETNE (TESTQ p p))
   418  (IsInBounds idx len) -> (SETB (CMPQ idx len))
   419  (IsSliceInBounds idx len) -> (SETBE (CMPQ idx len))
   420  (NilCheck ptr mem) -> (LoweredNilCheck ptr mem)
   421  (GetG mem) -> (LoweredGetG mem)
   422  (GetClosurePtr) -> (LoweredGetClosurePtr)
   423  (Addr {sym} base) -> (LEAQ {sym} base)
   424  (ITab (Load ptr mem)) -> (MOVQload ptr mem)
   425  
   426  // block rewrites
   427  (If (SETL  cmp) yes no) -> (LT  cmp yes no)
   428  (If (SETLE cmp) yes no) -> (LE  cmp yes no)
   429  (If (SETG  cmp) yes no) -> (GT  cmp yes no)
   430  (If (SETGE cmp) yes no) -> (GE  cmp yes no)
   431  (If (SETEQ cmp) yes no) -> (EQ  cmp yes no)
   432  (If (SETNE cmp) yes no) -> (NE  cmp yes no)
   433  (If (SETB  cmp) yes no) -> (ULT cmp yes no)
   434  (If (SETBE cmp) yes no) -> (ULE cmp yes no)
   435  (If (SETA  cmp) yes no) -> (UGT cmp yes no)
   436  (If (SETAE cmp) yes no) -> (UGE cmp yes no)
   437  
   438  // Special case for floating point - LF/LEF not generated
   439  (If (SETGF  cmp) yes no) -> (UGT  cmp yes no)
   440  (If (SETGEF cmp) yes no) -> (UGE  cmp yes no)
   441  (If (SETEQF cmp) yes no) -> (EQF  cmp yes no)
   442  (If (SETNEF cmp) yes no) -> (NEF  cmp yes no)
   443  
   444  (If cond yes no) -> (NE (TESTB cond cond) yes no)
   445  
   446  // ***************************
   447  // Above: lowering rules
   448  // Below: optimizations
   449  // ***************************
   450  // TODO: Should the optimizations be a separate pass?
   451  
   452  // Fold boolean tests into blocks
   453  (NE (TESTB (SETL  cmp) (SETL  cmp)) yes no) -> (LT  cmp yes no)
   454  (NE (TESTB (SETLE cmp) (SETLE cmp)) yes no) -> (LE  cmp yes no)
   455  (NE (TESTB (SETG  cmp) (SETG  cmp)) yes no) -> (GT  cmp yes no)
   456  (NE (TESTB (SETGE cmp) (SETGE cmp)) yes no) -> (GE  cmp yes no)
   457  (NE (TESTB (SETEQ cmp) (SETEQ cmp)) yes no) -> (EQ  cmp yes no)
   458  (NE (TESTB (SETNE cmp) (SETNE cmp)) yes no) -> (NE  cmp yes no)
   459  (NE (TESTB (SETB  cmp) (SETB  cmp)) yes no) -> (ULT cmp yes no)
   460  (NE (TESTB (SETBE cmp) (SETBE cmp)) yes no) -> (ULE cmp yes no)
   461  (NE (TESTB (SETA  cmp) (SETA  cmp)) yes no) -> (UGT cmp yes no)
   462  (NE (TESTB (SETAE cmp) (SETAE cmp)) yes no) -> (UGE cmp yes no)
   463  
   464  // Special case for floating point - LF/LEF not generated
   465  (NE (TESTB (SETGF  cmp) (SETGF  cmp)) yes no) -> (UGT  cmp yes no)
   466  (NE (TESTB (SETGEF cmp) (SETGEF cmp)) yes no) -> (UGE  cmp yes no)
   467  (NE (TESTB (SETEQF cmp) (SETEQF cmp)) yes no) -> (EQF  cmp yes no)
   468  (NE (TESTB (SETNEF cmp) (SETNEF cmp)) yes no) -> (NEF  cmp yes no)
   469  
   470  // Disabled because it interferes with the pattern match above and makes worse code.
   471  // (SETNEF x) -> (ORQ (SETNE <config.Frontend().TypeInt8()> x) (SETNAN <config.Frontend().TypeInt8()> x))
   472  // (SETEQF x) -> (ANDQ (SETEQ <config.Frontend().TypeInt8()> x) (SETORD <config.Frontend().TypeInt8()> x))
   473  
   474  // fold constants into instructions
   475  (ADDQ x (MOVQconst [c])) && is32Bit(c) -> (ADDQconst [c] x)
   476  (ADDQ (MOVQconst [c]) x) && is32Bit(c) -> (ADDQconst [c] x)
   477  (ADDL x (MOVLconst [c])) -> (ADDLconst [c] x)
   478  (ADDL (MOVLconst [c]) x) -> (ADDLconst [c] x)
   479  
   480  (SUBQ x (MOVQconst [c])) && is32Bit(c) -> (SUBQconst x [c])
   481  (SUBQ (MOVQconst [c]) x) && is32Bit(c) -> (NEGQ (SUBQconst <v.Type> x [c]))
   482  (SUBL x (MOVLconst [c])) -> (SUBLconst x [c])
   483  (SUBL (MOVLconst [c]) x) -> (NEGL (SUBLconst <v.Type> x [c]))
   484  
   485  (MULQ x (MOVQconst [c])) && is32Bit(c) -> (MULQconst [c] x)
   486  (MULQ (MOVQconst [c]) x) && is32Bit(c) -> (MULQconst [c] x)
   487  (MULL x (MOVLconst [c])) -> (MULLconst [c] x)
   488  (MULL (MOVLconst [c]) x) -> (MULLconst [c] x)
   489  
   490  (ANDQ x (MOVQconst [c])) && is32Bit(c) -> (ANDQconst [c] x)
   491  (ANDQ (MOVQconst [c]) x) && is32Bit(c) -> (ANDQconst [c] x)
   492  (ANDL x (MOVLconst [c])) -> (ANDLconst [c] x)
   493  (ANDL (MOVLconst [c]) x) -> (ANDLconst [c] x)
   494  
   495  (ANDLconst [c] (ANDLconst [d] x)) -> (ANDLconst [c & d] x)
   496  (ANDQconst [c] (ANDQconst [d] x)) -> (ANDQconst [c & d] x)
   497  
   498  (ORQ x (MOVQconst [c])) && is32Bit(c) -> (ORQconst [c] x)
   499  (ORQ (MOVQconst [c]) x) && is32Bit(c) -> (ORQconst [c] x)
   500  (ORL x (MOVLconst [c])) -> (ORLconst [c] x)
   501  (ORL (MOVLconst [c]) x) -> (ORLconst [c] x)
   502  
   503  (XORQ x (MOVQconst [c])) && is32Bit(c) -> (XORQconst [c] x)
   504  (XORQ (MOVQconst [c]) x) && is32Bit(c) -> (XORQconst [c] x)
   505  (XORL x (MOVLconst [c])) -> (XORLconst [c] x)
   506  (XORL (MOVLconst [c]) x) -> (XORLconst [c] x)
   507  
   508  (SHLQ x (MOVQconst [c])) -> (SHLQconst [c&63] x)
   509  (SHLQ x (MOVLconst [c])) -> (SHLQconst [c&63] x)
   510  
   511  (SHLL x (MOVQconst [c])) -> (SHLLconst [c&31] x)
   512  (SHLL x (MOVLconst [c])) -> (SHLLconst [c&31] x)
   513  
   514  (SHRQ x (MOVQconst [c])) -> (SHRQconst [c&63] x)
   515  (SHRQ x (MOVLconst [c])) -> (SHRQconst [c&63] x)
   516  
   517  (SHRL x (MOVQconst [c])) -> (SHRLconst [c&31] x)
   518  (SHRL x (MOVLconst [c])) -> (SHRLconst [c&31] x)
   519  
   520  (SHRW x (MOVQconst [c])) -> (SHRWconst [c&31] x)
   521  (SHRW x (MOVLconst [c])) -> (SHRWconst [c&31] x)
   522  
   523  (SHRB x (MOVQconst [c])) -> (SHRBconst [c&31] x)
   524  (SHRB x (MOVLconst [c])) -> (SHRBconst [c&31] x)
   525  
   526  (SARQ x (MOVQconst [c])) -> (SARQconst [c&63] x)
   527  (SARQ x (MOVLconst [c])) -> (SARQconst [c&63] x)
   528  
   529  (SARL x (MOVQconst [c])) -> (SARLconst [c&31] x)
   530  (SARL x (MOVLconst [c])) -> (SARLconst [c&31] x)
   531  
   532  (SARW x (MOVQconst [c])) -> (SARWconst [c&31] x)
   533  (SARW x (MOVLconst [c])) -> (SARWconst [c&31] x)
   534  
   535  (SARB x (MOVQconst [c])) -> (SARBconst [c&31] x)
   536  (SARB x (MOVLconst [c])) -> (SARBconst [c&31] x)
   537  
   538  (SARL x (ANDLconst [31] y)) -> (SARL x y)
   539  (SARQ x (ANDQconst [63] y)) -> (SARQ x y)
   540  
   541  (SHLL x (ANDLconst [31] y)) -> (SHLL x y)
   542  (SHLQ x (ANDQconst [63] y)) -> (SHLQ x y)
   543  
   544  (SHRL x (ANDLconst [31] y)) -> (SHRL x y)
   545  (SHRQ x (ANDQconst [63] y)) -> (SHRQ x y)
   546  
   547  // Note: the word and byte shifts keep the low 5 bits (not the low 4 or 3 bits)
   548  // because the x86 instructions are defined to use all 5 bits of the shift even
   549  // for the small shifts. I don't think we'll ever generate a weird shift (e.g.
   550  // (SHRW x (MOVLconst [24])), but just in case.
   551  
   552  (CMPQ x (MOVQconst [c])) && is32Bit(c) -> (CMPQconst x [c])
   553  (CMPQ (MOVQconst [c]) x) && is32Bit(c) -> (InvertFlags (CMPQconst x [c]))
   554  (CMPL x (MOVLconst [c])) -> (CMPLconst x [c])
   555  (CMPL (MOVLconst [c]) x) -> (InvertFlags (CMPLconst x [c]))
   556  (CMPW x (MOVLconst [c])) -> (CMPWconst x [int64(int16(c))])
   557  (CMPW (MOVLconst [c]) x) -> (InvertFlags (CMPWconst x [int64(int16(c))]))
   558  (CMPB x (MOVLconst [c])) -> (CMPBconst x [int64(int8(c))])
   559  (CMPB (MOVLconst [c]) x) -> (InvertFlags (CMPBconst x [int64(int8(c))]))
   560  
   561  // Using MOVBQZX instead of ANDQ is cheaper.
   562  (ANDQconst [0xFF] x) -> (MOVBQZX x)
   563  (ANDQconst [0xFFFF] x) -> (MOVWQZX x)
   564  (ANDQconst [0xFFFFFFFF] x) -> (MOVLQZX x)
   565  
   566  // strength reduction
   567  // Assumes that the following costs from https://gmplib.org/~tege/x86-timing.pdf:
   568  //    1 - addq, shlq, leaq, negq
   569  //    3 - imulq
   570  // This limits the rewrites to two instructions.
   571  // TODO: 27, 81
   572  (MULQconst [-1] x) -> (NEGQ x)
   573  (MULQconst [0] _) -> (MOVQconst [0])
   574  (MULQconst [1] x) -> x
   575  (MULQconst [3] x) -> (LEAQ2 x x)
   576  (MULQconst [5] x) -> (LEAQ4 x x)
   577  (MULQconst [7] x) -> (LEAQ8 (NEGQ <v.Type> x) x)
   578  (MULQconst [9] x) -> (LEAQ8 x x)
   579  (MULQconst [11] x) -> (LEAQ2 x (LEAQ4 <v.Type> x x))
   580  (MULQconst [13] x) -> (LEAQ4 x (LEAQ2 <v.Type> x x))
   581  (MULQconst [21] x) -> (LEAQ4 x (LEAQ4 <v.Type> x x))
   582  (MULQconst [25] x) -> (LEAQ8 x (LEAQ2 <v.Type> x x))
   583  (MULQconst [37] x) -> (LEAQ4 x (LEAQ8 <v.Type> x x))
   584  (MULQconst [41] x) -> (LEAQ8 x (LEAQ4 <v.Type> x x))
   585  (MULQconst [73] x) -> (LEAQ8 x (LEAQ8 <v.Type> x x))
   586  
   587  (MULQconst [c] x) && isPowerOfTwo(c) -> (SHLQconst [log2(c)] x)
   588  (MULQconst [c] x) && isPowerOfTwo(c+1) && c >= 15 -> (SUBQ (SHLQconst <v.Type> [log2(c+1)] x) x)
   589  (MULQconst [c] x) && isPowerOfTwo(c-1) && c >= 17 -> (LEAQ1 (SHLQconst <v.Type> [log2(c-1)] x) x)
   590  (MULQconst [c] x) && isPowerOfTwo(c-2) && c >= 34 -> (LEAQ2 (SHLQconst <v.Type> [log2(c-2)] x) x)
   591  (MULQconst [c] x) && isPowerOfTwo(c-4) && c >= 68 -> (LEAQ4 (SHLQconst <v.Type> [log2(c-4)] x) x)
   592  (MULQconst [c] x) && isPowerOfTwo(c-8) && c >= 136 -> (LEAQ8 (SHLQconst <v.Type> [log2(c-8)] x) x)
   593  (MULQconst [c] x) && c%3 == 0 && isPowerOfTwo(c/3)-> (SHLQconst [log2(c/3)] (LEAQ2 <v.Type> x x))
   594  (MULQconst [c] x) && c%5 == 0 && isPowerOfTwo(c/5)-> (SHLQconst [log2(c/5)] (LEAQ4 <v.Type> x x))
   595  (MULQconst [c] x) && c%9 == 0 && isPowerOfTwo(c/9)-> (SHLQconst [log2(c/9)] (LEAQ8 <v.Type> x x))
   596  
   597  // combine add/shift into LEAQ
   598  (ADDQ x (SHLQconst [3] y)) -> (LEAQ8 x y)
   599  (ADDQ x (SHLQconst [2] y)) -> (LEAQ4 x y)
   600  (ADDQ x (SHLQconst [1] y)) -> (LEAQ2 x y)
   601  (ADDQ x (ADDQ y y)) -> (LEAQ2 x y)
   602  (ADDQ x (ADDQ x y)) -> (LEAQ2 y x)
   603  (ADDQ x (ADDQ y x)) -> (LEAQ2 y x)
   604  
   605  // combine ADDQ/ADDQconst into LEAQ1
   606  (ADDQconst [c] (ADDQ x y)) -> (LEAQ1 [c] x y)
   607  (ADDQ (ADDQconst [c] x) y) -> (LEAQ1 [c] x y)
   608  (ADDQ x (ADDQconst [c] y)) -> (LEAQ1 [c] x y)
   609  
   610  // fold ADDQ into LEAQ
   611  (ADDQconst [c] (LEAQ [d] {s} x)) && is32Bit(c+d) -> (LEAQ [c+d] {s} x)
   612  (LEAQ [c] {s} (ADDQconst [d] x)) && is32Bit(c+d) -> (LEAQ [c+d] {s} x)
   613  (LEAQ [c] {s} (ADDQ x y)) && x.Op != OpSB && y.Op != OpSB -> (LEAQ1 [c] {s} x y)
   614  (ADDQ x (LEAQ [c] {s} y)) && x.Op != OpSB && y.Op != OpSB -> (LEAQ1 [c] {s} x y)
   615  (ADDQ (LEAQ [c] {s} x) y) && x.Op != OpSB && y.Op != OpSB -> (LEAQ1 [c] {s} x y)
   616  
   617  // fold ADDQconst into LEAQx
   618  (ADDQconst [c] (LEAQ1 [d] {s} x y)) && is32Bit(c+d) -> (LEAQ1 [c+d] {s} x y)
   619  (ADDQconst [c] (LEAQ2 [d] {s} x y)) && is32Bit(c+d) -> (LEAQ2 [c+d] {s} x y)
   620  (ADDQconst [c] (LEAQ4 [d] {s} x y)) && is32Bit(c+d) -> (LEAQ4 [c+d] {s} x y)
   621  (ADDQconst [c] (LEAQ8 [d] {s} x y)) && is32Bit(c+d) -> (LEAQ8 [c+d] {s} x y)
   622  (LEAQ1 [c] {s} (ADDQconst [d] x) y) && is32Bit(c+d)   && x.Op != OpSB -> (LEAQ1 [c+d] {s} x y)
   623  (LEAQ1 [c] {s} x (ADDQconst [d] y)) && is32Bit(c+d)   && y.Op != OpSB -> (LEAQ1 [c+d] {s} x y)
   624  (LEAQ2 [c] {s} (ADDQconst [d] x) y) && is32Bit(c+d)   && x.Op != OpSB -> (LEAQ2 [c+d] {s} x y)
   625  (LEAQ2 [c] {s} x (ADDQconst [d] y)) && is32Bit(c+2*d) && y.Op != OpSB -> (LEAQ2 [c+2*d] {s} x y)
   626  (LEAQ4 [c] {s} (ADDQconst [d] x) y) && is32Bit(c+d)   && x.Op != OpSB -> (LEAQ4 [c+d] {s} x y)
   627  (LEAQ4 [c] {s} x (ADDQconst [d] y)) && is32Bit(c+4*d) && y.Op != OpSB -> (LEAQ4 [c+4*d] {s} x y)
   628  (LEAQ8 [c] {s} (ADDQconst [d] x) y) && is32Bit(c+d)   && x.Op != OpSB -> (LEAQ8 [c+d] {s} x y)
   629  (LEAQ8 [c] {s} x (ADDQconst [d] y)) && is32Bit(c+8*d) && y.Op != OpSB -> (LEAQ8 [c+8*d] {s} x y)
   630  
   631  // fold shifts into LEAQx
   632  (LEAQ1 [c] {s} x (SHLQconst [1] y)) -> (LEAQ2 [c] {s} x y)
   633  (LEAQ1 [c] {s} (SHLQconst [1] x) y) -> (LEAQ2 [c] {s} y x)
   634  (LEAQ1 [c] {s} x (SHLQconst [2] y)) -> (LEAQ4 [c] {s} x y)
   635  (LEAQ1 [c] {s} (SHLQconst [2] x) y) -> (LEAQ4 [c] {s} y x)
   636  (LEAQ1 [c] {s} x (SHLQconst [3] y)) -> (LEAQ8 [c] {s} x y)
   637  (LEAQ1 [c] {s} (SHLQconst [3] x) y) -> (LEAQ8 [c] {s} y x)
   638  
   639  (LEAQ2 [c] {s} x (SHLQconst [1] y)) -> (LEAQ4 [c] {s} x y)
   640  (LEAQ2 [c] {s} x (SHLQconst [2] y)) -> (LEAQ8 [c] {s} x y)
   641  (LEAQ4 [c] {s} x (SHLQconst [1] y)) -> (LEAQ8 [c] {s} x y)
   642  
   643  // reverse ordering of compare instruction
   644  (SETL (InvertFlags x)) -> (SETG x)
   645  (SETG (InvertFlags x)) -> (SETL x)
   646  (SETB (InvertFlags x)) -> (SETA x)
   647  (SETA (InvertFlags x)) -> (SETB x)
   648  (SETLE (InvertFlags x)) -> (SETGE x)
   649  (SETGE (InvertFlags x)) -> (SETLE x)
   650  (SETBE (InvertFlags x)) -> (SETAE x)
   651  (SETAE (InvertFlags x)) -> (SETBE x)
   652  (SETEQ (InvertFlags x)) -> (SETEQ x)
   653  (SETNE (InvertFlags x)) -> (SETNE x)
   654  
   655  // sign extended loads
   656  // Note: The combined instruction must end up in the same block
   657  // as the original load. If not, we end up making a value with
   658  // memory type live in two different blocks, which can lead to
   659  // multiple memory values alive simultaneously.
   660  // Make sure we don't combine these ops if the load has another use.
   661  // This prevents a single load from being split into multiple loads
   662  // which then might return different values.  See test/atomicload.go.
   663  (MOVBQSX x:(MOVBload [off] {sym} ptr mem)) && x.Uses == 1 && clobber(x) -> @x.Block (MOVBQSXload <v.Type> [off] {sym} ptr mem)
   664  (MOVBQZX x:(MOVBload [off] {sym} ptr mem)) && x.Uses == 1 && clobber(x) -> @x.Block (MOVBload <v.Type> [off] {sym} ptr mem)
   665  (MOVWQSX x:(MOVWload [off] {sym} ptr mem)) && x.Uses == 1 && clobber(x) -> @x.Block (MOVWQSXload <v.Type> [off] {sym} ptr mem)
   666  (MOVWQZX x:(MOVWload [off] {sym} ptr mem)) && x.Uses == 1 && clobber(x) -> @x.Block (MOVWload <v.Type> [off] {sym} ptr mem)
   667  (MOVLQSX x:(MOVLload [off] {sym} ptr mem)) && x.Uses == 1 && clobber(x) -> @x.Block (MOVLQSXload <v.Type> [off] {sym} ptr mem)
   668  (MOVLQZX x:(MOVLload [off] {sym} ptr mem)) && x.Uses == 1 && clobber(x) -> @x.Block (MOVLload <v.Type> [off] {sym} ptr mem)
   669  
   670  (MOVBQZX x:(MOVBloadidx1 [off] {sym} ptr idx mem)) && x.Uses == 1 && clobber(x) -> @x.Block (MOVBloadidx1 <v.Type> [off] {sym} ptr idx mem)
   671  (MOVWQZX x:(MOVWloadidx1 [off] {sym} ptr idx mem)) && x.Uses == 1 && clobber(x) -> @x.Block (MOVWloadidx1 <v.Type> [off] {sym} ptr idx mem)
   672  (MOVWQZX x:(MOVWloadidx2 [off] {sym} ptr idx mem)) && x.Uses == 1 && clobber(x) -> @x.Block (MOVWloadidx2 <v.Type> [off] {sym} ptr idx mem)
   673  (MOVLQZX x:(MOVLloadidx1 [off] {sym} ptr idx mem)) && x.Uses == 1 && clobber(x) -> @x.Block (MOVLloadidx1 <v.Type> [off] {sym} ptr idx mem)
   674  (MOVLQZX x:(MOVLloadidx4 [off] {sym} ptr idx mem)) && x.Uses == 1 && clobber(x) -> @x.Block (MOVLloadidx4 <v.Type> [off] {sym} ptr idx mem)
   675  
   676  // replace load from same location as preceding store with copy
   677  (MOVBload [off] {sym} ptr (MOVBstore [off2] {sym2} ptr2 x _)) && sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) -> x
   678  (MOVWload [off] {sym} ptr (MOVWstore [off2] {sym2} ptr2 x _)) && sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) -> x
   679  (MOVLload [off] {sym} ptr (MOVLstore [off2] {sym2} ptr2 x _)) && sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) -> x
   680  (MOVQload [off] {sym} ptr (MOVQstore [off2] {sym2} ptr2 x _)) && sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) -> x
   681  
   682  // Fold extensions and ANDs together.
   683  (MOVBQZX (ANDLconst [c] x)) -> (ANDLconst [c & 0xff] x)
   684  (MOVWQZX (ANDLconst [c] x)) -> (ANDLconst [c & 0xffff] x)
   685  (MOVLQZX (ANDLconst [c] x)) -> (ANDLconst [c] x)
   686  (MOVBQSX (ANDLconst [c] x)) && c & 0x80 == 0 -> (ANDLconst [c & 0x7f] x)
   687  (MOVWQSX (ANDLconst [c] x)) && c & 0x8000 == 0 -> (ANDLconst [c & 0x7fff] x)
   688  (MOVLQSX (ANDLconst [c] x)) && c & 0x80000000 == 0 -> (ANDLconst [c & 0x7fffffff] x)
   689  
   690  // Don't extend before storing
   691  (MOVLstore [off] {sym} ptr (MOVLQSX x) mem) -> (MOVLstore [off] {sym} ptr x mem)
   692  (MOVWstore [off] {sym} ptr (MOVWQSX x) mem) -> (MOVWstore [off] {sym} ptr x mem)
   693  (MOVBstore [off] {sym} ptr (MOVBQSX x) mem) -> (MOVBstore [off] {sym} ptr x mem)
   694  (MOVLstore [off] {sym} ptr (MOVLQZX x) mem) -> (MOVLstore [off] {sym} ptr x mem)
   695  (MOVWstore [off] {sym} ptr (MOVWQZX x) mem) -> (MOVWstore [off] {sym} ptr x mem)
   696  (MOVBstore [off] {sym} ptr (MOVBQZX x) mem) -> (MOVBstore [off] {sym} ptr x mem)
   697  
   698  // fold constants into memory operations
   699  // Note that this is not always a good idea because if not all the uses of
   700  // the ADDQconst get eliminated, we still have to compute the ADDQconst and we now
   701  // have potentially two live values (ptr and (ADDQconst [off] ptr)) instead of one.
   702  // Nevertheless, let's do it!
   703  (MOVQload  [off1] {sym} (ADDQconst [off2] ptr) mem) && is32Bit(off1+off2) -> (MOVQload  [off1+off2] {sym} ptr mem)
   704  (MOVLload  [off1] {sym} (ADDQconst [off2] ptr) mem) && is32Bit(off1+off2) -> (MOVLload  [off1+off2] {sym} ptr mem)
   705  (MOVWload  [off1] {sym} (ADDQconst [off2] ptr) mem) && is32Bit(off1+off2) -> (MOVWload  [off1+off2] {sym} ptr mem)
   706  (MOVBload  [off1] {sym} (ADDQconst [off2] ptr) mem) && is32Bit(off1+off2) -> (MOVBload  [off1+off2] {sym} ptr mem)
   707  (MOVSSload [off1] {sym} (ADDQconst [off2] ptr) mem) && is32Bit(off1+off2) -> (MOVSSload [off1+off2] {sym} ptr mem)
   708  (MOVSDload [off1] {sym} (ADDQconst [off2] ptr) mem) && is32Bit(off1+off2) -> (MOVSDload [off1+off2] {sym} ptr mem)
   709  (MOVOload  [off1] {sym} (ADDQconst [off2] ptr) mem) && is32Bit(off1+off2) -> (MOVOload  [off1+off2] {sym} ptr mem)
   710  
   711  (MOVQstore  [off1] {sym} (ADDQconst [off2] ptr) val mem) && is32Bit(off1+off2) -> (MOVQstore  [off1+off2] {sym} ptr val mem)
   712  (MOVLstore  [off1] {sym} (ADDQconst [off2] ptr) val mem) && is32Bit(off1+off2) -> (MOVLstore  [off1+off2] {sym} ptr val mem)
   713  (MOVWstore  [off1] {sym} (ADDQconst [off2] ptr) val mem) && is32Bit(off1+off2) -> (MOVWstore  [off1+off2] {sym} ptr val mem)
   714  (MOVBstore  [off1] {sym} (ADDQconst [off2] ptr) val mem) && is32Bit(off1+off2) -> (MOVBstore  [off1+off2] {sym} ptr val mem)
   715  (MOVSSstore [off1] {sym} (ADDQconst [off2] ptr) val mem) && is32Bit(off1+off2) -> (MOVSSstore [off1+off2] {sym} ptr val mem)
   716  (MOVSDstore [off1] {sym} (ADDQconst [off2] ptr) val mem) && is32Bit(off1+off2) -> (MOVSDstore [off1+off2] {sym} ptr val mem)
   717  (MOVOstore  [off1] {sym} (ADDQconst [off2] ptr) val mem) && is32Bit(off1+off2) -> (MOVOstore  [off1+off2] {sym} ptr val mem)
   718  
   719  // Fold constants into stores.
   720  (MOVQstore [off] {sym} ptr (MOVQconst [c]) mem) && validValAndOff(c,off) ->
   721  	(MOVQstoreconst [makeValAndOff(c,off)] {sym} ptr mem)
   722  (MOVLstore [off] {sym} ptr (MOVLconst [c]) mem) && validOff(off) ->
   723  	(MOVLstoreconst [makeValAndOff(int64(int32(c)),off)] {sym} ptr mem)
   724  (MOVWstore [off] {sym} ptr (MOVLconst [c]) mem) && validOff(off) ->
   725  	(MOVWstoreconst [makeValAndOff(int64(int16(c)),off)] {sym} ptr mem)
   726  (MOVBstore [off] {sym} ptr (MOVLconst [c]) mem) && validOff(off) ->
   727  	(MOVBstoreconst [makeValAndOff(int64(int8(c)),off)] {sym} ptr mem)
   728  
   729  // Fold address offsets into constant stores.
   730  (MOVQstoreconst [sc] {s} (ADDQconst [off] ptr) mem) && ValAndOff(sc).canAdd(off) ->
   731  	(MOVQstoreconst [ValAndOff(sc).add(off)] {s} ptr mem)
   732  (MOVLstoreconst [sc] {s} (ADDQconst [off] ptr) mem) && ValAndOff(sc).canAdd(off) ->
   733  	(MOVLstoreconst [ValAndOff(sc).add(off)] {s} ptr mem)
   734  (MOVWstoreconst [sc] {s} (ADDQconst [off] ptr) mem) && ValAndOff(sc).canAdd(off) ->
   735  	(MOVWstoreconst [ValAndOff(sc).add(off)] {s} ptr mem)
   736  (MOVBstoreconst [sc] {s} (ADDQconst [off] ptr) mem) && ValAndOff(sc).canAdd(off) ->
   737  	(MOVBstoreconst [ValAndOff(sc).add(off)] {s} ptr mem)
   738  
   739  // We need to fold LEAQ into the MOVx ops so that the live variable analysis knows
   740  // what variables are being read/written by the ops.
   741  (MOVQload  [off1] {sym1} (LEAQ [off2] {sym2} base) mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) ->
   742  	(MOVQload  [off1+off2] {mergeSym(sym1,sym2)} base mem)
   743  (MOVLload  [off1] {sym1} (LEAQ [off2] {sym2} base) mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) ->
   744  	(MOVLload  [off1+off2] {mergeSym(sym1,sym2)} base mem)
   745  (MOVWload  [off1] {sym1} (LEAQ [off2] {sym2} base) mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) ->
   746  	(MOVWload  [off1+off2] {mergeSym(sym1,sym2)} base mem)
   747  (MOVBload  [off1] {sym1} (LEAQ [off2] {sym2} base) mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) ->
   748  	(MOVBload  [off1+off2] {mergeSym(sym1,sym2)} base mem)
   749  (MOVSSload [off1] {sym1} (LEAQ [off2] {sym2} base) mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) ->
   750  	(MOVSSload [off1+off2] {mergeSym(sym1,sym2)} base mem)
   751  (MOVSDload [off1] {sym1} (LEAQ [off2] {sym2} base) mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) ->
   752  	(MOVSDload [off1+off2] {mergeSym(sym1,sym2)} base mem)
   753  (MOVOload [off1] {sym1} (LEAQ [off2] {sym2} base) mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) ->
   754  	(MOVOload [off1+off2] {mergeSym(sym1,sym2)} base mem)
   755  
   756  (MOVBQSXload [off1] {sym1} (LEAQ [off2] {sym2} base) mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) ->
   757  	(MOVBQSXload [off1+off2] {mergeSym(sym1,sym2)} base mem)
   758  (MOVWQSXload [off1] {sym1} (LEAQ [off2] {sym2} base) mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) ->
   759  	(MOVWQSXload [off1+off2] {mergeSym(sym1,sym2)} base mem)
   760  (MOVLQSXload [off1] {sym1} (LEAQ [off2] {sym2} base) mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) ->
   761  	(MOVLQSXload [off1+off2] {mergeSym(sym1,sym2)} base mem)
   762  
   763  (MOVQstore  [off1] {sym1} (LEAQ [off2] {sym2} base) val mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) ->
   764  	(MOVQstore  [off1+off2] {mergeSym(sym1,sym2)} base val mem)
   765  (MOVLstore  [off1] {sym1} (LEAQ [off2] {sym2} base) val mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) ->
   766  	(MOVLstore  [off1+off2] {mergeSym(sym1,sym2)} base val mem)
   767  (MOVWstore  [off1] {sym1} (LEAQ [off2] {sym2} base) val mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) ->
   768  	(MOVWstore  [off1+off2] {mergeSym(sym1,sym2)} base val mem)
   769  (MOVBstore  [off1] {sym1} (LEAQ [off2] {sym2} base) val mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) ->
   770  	(MOVBstore  [off1+off2] {mergeSym(sym1,sym2)} base val mem)
   771  (MOVSSstore [off1] {sym1} (LEAQ [off2] {sym2} base) val mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) ->
   772  	(MOVSSstore [off1+off2] {mergeSym(sym1,sym2)} base val mem)
   773  (MOVSDstore [off1] {sym1} (LEAQ [off2] {sym2} base) val mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) ->
   774  	(MOVSDstore [off1+off2] {mergeSym(sym1,sym2)} base val mem)
   775  (MOVOstore [off1] {sym1} (LEAQ [off2] {sym2} base) val mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) ->
   776  	(MOVOstore [off1+off2] {mergeSym(sym1,sym2)} base val mem)
   777  
   778  (MOVQstoreconst [sc] {sym1} (LEAQ [off] {sym2} ptr) mem) && canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off) ->
   779  	(MOVQstoreconst [ValAndOff(sc).add(off)] {mergeSym(sym1, sym2)} ptr mem)
   780  (MOVLstoreconst [sc] {sym1} (LEAQ [off] {sym2} ptr) mem) && canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off) ->
   781  	(MOVLstoreconst [ValAndOff(sc).add(off)] {mergeSym(sym1, sym2)} ptr mem)
   782  (MOVWstoreconst [sc] {sym1} (LEAQ [off] {sym2} ptr) mem) && canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off) ->
   783  	(MOVWstoreconst [ValAndOff(sc).add(off)] {mergeSym(sym1, sym2)} ptr mem)
   784  (MOVBstoreconst [sc] {sym1} (LEAQ [off] {sym2} ptr) mem) && canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off) ->
   785  	(MOVBstoreconst [ValAndOff(sc).add(off)] {mergeSym(sym1, sym2)} ptr mem)
   786  
   787  // generating indexed loads and stores
   788  (MOVBload [off1] {sym1} (LEAQ1 [off2] {sym2} ptr idx) mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) ->
   789  	(MOVBloadidx1 [off1+off2] {mergeSym(sym1,sym2)} ptr idx mem)
   790  (MOVWload [off1] {sym1} (LEAQ1 [off2] {sym2} ptr idx) mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) ->
   791  	(MOVWloadidx1 [off1+off2] {mergeSym(sym1,sym2)} ptr idx mem)
   792  (MOVWload [off1] {sym1} (LEAQ2 [off2] {sym2} ptr idx) mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) ->
   793  	(MOVWloadidx2 [off1+off2] {mergeSym(sym1,sym2)} ptr idx mem)
   794  (MOVLload [off1] {sym1} (LEAQ1 [off2] {sym2} ptr idx) mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) ->
   795  	(MOVLloadidx1 [off1+off2] {mergeSym(sym1,sym2)} ptr idx mem)
   796  (MOVLload [off1] {sym1} (LEAQ4 [off2] {sym2} ptr idx) mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) ->
   797  	(MOVLloadidx4 [off1+off2] {mergeSym(sym1,sym2)} ptr idx mem)
   798  (MOVQload [off1] {sym1} (LEAQ1 [off2] {sym2} ptr idx) mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) ->
   799  	(MOVQloadidx1 [off1+off2] {mergeSym(sym1,sym2)} ptr idx mem)
   800  (MOVQload [off1] {sym1} (LEAQ8 [off2] {sym2} ptr idx) mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) ->
   801  	(MOVQloadidx8 [off1+off2] {mergeSym(sym1,sym2)} ptr idx mem)
   802  (MOVSSload [off1] {sym1} (LEAQ1 [off2] {sym2} ptr idx) mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) ->
   803  	(MOVSSloadidx1 [off1+off2] {mergeSym(sym1,sym2)} ptr idx mem)
   804  (MOVSSload [off1] {sym1} (LEAQ4 [off2] {sym2} ptr idx) mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) ->
   805  	(MOVSSloadidx4 [off1+off2] {mergeSym(sym1,sym2)} ptr idx mem)
   806  (MOVSDload [off1] {sym1} (LEAQ1 [off2] {sym2} ptr idx) mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) ->
   807  	(MOVSDloadidx1 [off1+off2] {mergeSym(sym1,sym2)} ptr idx mem)
   808  (MOVSDload [off1] {sym1} (LEAQ8 [off2] {sym2} ptr idx) mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) ->
   809  	(MOVSDloadidx8 [off1+off2] {mergeSym(sym1,sym2)} ptr idx mem)
   810  
   811  (MOVBstore [off1] {sym1} (LEAQ1 [off2] {sym2} ptr idx) val mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) ->
   812  	(MOVBstoreidx1 [off1+off2] {mergeSym(sym1,sym2)} ptr idx val mem)
   813  (MOVWstore [off1] {sym1} (LEAQ1 [off2] {sym2} ptr idx) val mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) ->
   814  	(MOVWstoreidx1 [off1+off2] {mergeSym(sym1,sym2)} ptr idx val mem)
   815  (MOVWstore [off1] {sym1} (LEAQ2 [off2] {sym2} ptr idx) val mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) ->
   816  	(MOVWstoreidx2 [off1+off2] {mergeSym(sym1,sym2)} ptr idx val mem)
   817  (MOVLstore [off1] {sym1} (LEAQ1 [off2] {sym2} ptr idx) val mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) ->
   818  	(MOVLstoreidx1 [off1+off2] {mergeSym(sym1,sym2)} ptr idx val mem)
   819  (MOVLstore [off1] {sym1} (LEAQ4 [off2] {sym2} ptr idx) val mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) ->
   820  	(MOVLstoreidx4 [off1+off2] {mergeSym(sym1,sym2)} ptr idx val mem)
   821  (MOVQstore [off1] {sym1} (LEAQ1 [off2] {sym2} ptr idx) val mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) ->
   822  	(MOVQstoreidx1 [off1+off2] {mergeSym(sym1,sym2)} ptr idx val mem)
   823  (MOVQstore [off1] {sym1} (LEAQ8 [off2] {sym2} ptr idx) val mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) ->
   824  	(MOVQstoreidx8 [off1+off2] {mergeSym(sym1,sym2)} ptr idx val mem)
   825  (MOVSSstore [off1] {sym1} (LEAQ1 [off2] {sym2} ptr idx) val mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) ->
   826  	(MOVSSstoreidx1 [off1+off2] {mergeSym(sym1,sym2)} ptr idx val mem)
   827  (MOVSSstore [off1] {sym1} (LEAQ4 [off2] {sym2} ptr idx) val mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) ->
   828  	(MOVSSstoreidx4 [off1+off2] {mergeSym(sym1,sym2)} ptr idx val mem)
   829  (MOVSDstore [off1] {sym1} (LEAQ1 [off2] {sym2} ptr idx) val mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) ->
   830  	(MOVSDstoreidx1 [off1+off2] {mergeSym(sym1,sym2)} ptr idx val mem)
   831  (MOVSDstore [off1] {sym1} (LEAQ8 [off2] {sym2} ptr idx) val mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) ->
   832  	(MOVSDstoreidx8 [off1+off2] {mergeSym(sym1,sym2)} ptr idx val mem)
   833  
   834  (MOVBload [off] {sym} (ADDQ ptr idx) mem) && ptr.Op != OpSB -> (MOVBloadidx1 [off] {sym} ptr idx mem)
   835  (MOVWload [off] {sym} (ADDQ ptr idx) mem) && ptr.Op != OpSB -> (MOVWloadidx1 [off] {sym} ptr idx mem)
   836  (MOVLload [off] {sym} (ADDQ ptr idx) mem) && ptr.Op != OpSB -> (MOVLloadidx1 [off] {sym} ptr idx mem)
   837  (MOVQload [off] {sym} (ADDQ ptr idx) mem) && ptr.Op != OpSB -> (MOVQloadidx1 [off] {sym} ptr idx mem)
   838  (MOVSSload [off] {sym} (ADDQ ptr idx) mem) && ptr.Op != OpSB -> (MOVSSloadidx1 [off] {sym} ptr idx mem)
   839  (MOVSDload [off] {sym} (ADDQ ptr idx) mem) && ptr.Op != OpSB -> (MOVSDloadidx1 [off] {sym} ptr idx mem)
   840  (MOVBstore [off] {sym} (ADDQ ptr idx) val mem) && ptr.Op != OpSB -> (MOVBstoreidx1 [off] {sym} ptr idx val mem)
   841  (MOVWstore [off] {sym} (ADDQ ptr idx) val mem) && ptr.Op != OpSB -> (MOVWstoreidx1 [off] {sym} ptr idx val mem)
   842  (MOVLstore [off] {sym} (ADDQ ptr idx) val mem) && ptr.Op != OpSB -> (MOVLstoreidx1 [off] {sym} ptr idx val mem)
   843  (MOVQstore [off] {sym} (ADDQ ptr idx) val mem) && ptr.Op != OpSB -> (MOVQstoreidx1 [off] {sym} ptr idx val mem)
   844  (MOVSSstore [off] {sym} (ADDQ ptr idx) val mem) && ptr.Op != OpSB -> (MOVSSstoreidx1 [off] {sym} ptr idx val mem)
   845  (MOVSDstore [off] {sym} (ADDQ ptr idx) val mem) && ptr.Op != OpSB -> (MOVSDstoreidx1 [off] {sym} ptr idx val mem)
   846  
   847  (MOVBstoreconst [x] {sym1} (LEAQ1 [off] {sym2} ptr idx) mem) && canMergeSym(sym1, sym2) ->
   848  	(MOVBstoreconstidx1 [ValAndOff(x).add(off)] {mergeSym(sym1,sym2)} ptr idx mem)
   849  (MOVWstoreconst [x] {sym1} (LEAQ1 [off] {sym2} ptr idx) mem) && canMergeSym(sym1, sym2) ->
   850  	(MOVWstoreconstidx1 [ValAndOff(x).add(off)] {mergeSym(sym1,sym2)} ptr idx mem)
   851  (MOVWstoreconst [x] {sym1} (LEAQ2 [off] {sym2} ptr idx) mem) && canMergeSym(sym1, sym2) ->
   852  	(MOVWstoreconstidx2 [ValAndOff(x).add(off)] {mergeSym(sym1,sym2)} ptr idx mem)
   853  (MOVLstoreconst [x] {sym1} (LEAQ1 [off] {sym2} ptr idx) mem) && canMergeSym(sym1, sym2) ->
   854  	(MOVLstoreconstidx1 [ValAndOff(x).add(off)] {mergeSym(sym1,sym2)} ptr idx mem)
   855  (MOVLstoreconst [x] {sym1} (LEAQ4 [off] {sym2} ptr idx) mem) && canMergeSym(sym1, sym2) ->
   856  	(MOVLstoreconstidx4 [ValAndOff(x).add(off)] {mergeSym(sym1,sym2)} ptr idx mem)
   857  (MOVQstoreconst [x] {sym1} (LEAQ1 [off] {sym2} ptr idx) mem) && canMergeSym(sym1, sym2) ->
   858  	(MOVQstoreconstidx1 [ValAndOff(x).add(off)] {mergeSym(sym1,sym2)} ptr idx mem)
   859  (MOVQstoreconst [x] {sym1} (LEAQ8 [off] {sym2} ptr idx) mem) && canMergeSym(sym1, sym2) ->
   860  	(MOVQstoreconstidx8 [ValAndOff(x).add(off)] {mergeSym(sym1,sym2)} ptr idx mem)
   861  
   862  (MOVBstoreconst [x] {sym} (ADDQ ptr idx) mem) -> (MOVBstoreconstidx1 [x] {sym} ptr idx mem)
   863  (MOVWstoreconst [x] {sym} (ADDQ ptr idx) mem) -> (MOVWstoreconstidx1 [x] {sym} ptr idx mem)
   864  (MOVLstoreconst [x] {sym} (ADDQ ptr idx) mem) -> (MOVLstoreconstidx1 [x] {sym} ptr idx mem)
   865  (MOVQstoreconst [x] {sym} (ADDQ ptr idx) mem) -> (MOVQstoreconstidx1 [x] {sym} ptr idx mem)
   866  
   867  // combine SHLQ into indexed loads and stores
   868  (MOVWloadidx1 [c] {sym} ptr (SHLQconst [1] idx) mem) -> (MOVWloadidx2 [c] {sym} ptr idx mem)
   869  (MOVLloadidx1 [c] {sym} ptr (SHLQconst [2] idx) mem) -> (MOVLloadidx4 [c] {sym} ptr idx mem)
   870  (MOVQloadidx1 [c] {sym} ptr (SHLQconst [3] idx) mem) -> (MOVQloadidx8 [c] {sym} ptr idx mem)
   871  (MOVWstoreidx1 [c] {sym} ptr (SHLQconst [1] idx) val mem) -> (MOVWstoreidx2 [c] {sym} ptr idx val mem)
   872  (MOVLstoreidx1 [c] {sym} ptr (SHLQconst [2] idx) val mem) -> (MOVLstoreidx4 [c] {sym} ptr idx val mem)
   873  (MOVQstoreidx1 [c] {sym} ptr (SHLQconst [3] idx) val mem) -> (MOVQstoreidx8 [c] {sym} ptr idx val mem)
   874  (MOVWstoreconstidx1 [c] {sym} ptr (SHLQconst [1] idx) mem) -> (MOVWstoreconstidx2 [c] {sym} ptr idx mem)
   875  (MOVLstoreconstidx1 [c] {sym} ptr (SHLQconst [2] idx) mem) -> (MOVLstoreconstidx4 [c] {sym} ptr idx mem)
   876  (MOVQstoreconstidx1 [c] {sym} ptr (SHLQconst [3] idx) mem) -> (MOVQstoreconstidx8 [c] {sym} ptr idx mem)
   877  
   878  // combine ADDQ into indexed loads and stores
   879  (MOVBloadidx1 [c] {sym} (ADDQconst [d] ptr) idx mem) -> (MOVBloadidx1 [c+d] {sym} ptr idx mem)
   880  (MOVWloadidx1 [c] {sym} (ADDQconst [d] ptr) idx mem) -> (MOVWloadidx1 [c+d] {sym} ptr idx mem)
   881  (MOVWloadidx2 [c] {sym} (ADDQconst [d] ptr) idx mem) -> (MOVWloadidx2 [c+d] {sym} ptr idx mem)
   882  (MOVLloadidx1 [c] {sym} (ADDQconst [d] ptr) idx mem) -> (MOVLloadidx1 [c+d] {sym} ptr idx mem)
   883  (MOVLloadidx4 [c] {sym} (ADDQconst [d] ptr) idx mem) -> (MOVLloadidx4 [c+d] {sym} ptr idx mem)
   884  (MOVQloadidx1 [c] {sym} (ADDQconst [d] ptr) idx mem) -> (MOVQloadidx1 [c+d] {sym} ptr idx mem)
   885  (MOVQloadidx8 [c] {sym} (ADDQconst [d] ptr) idx mem) -> (MOVQloadidx8 [c+d] {sym} ptr idx mem)
   886  (MOVSSloadidx1 [c] {sym} (ADDQconst [d] ptr) idx mem) -> (MOVSSloadidx1 [c+d] {sym} ptr idx mem)
   887  (MOVSSloadidx4 [c] {sym} (ADDQconst [d] ptr) idx mem) -> (MOVSSloadidx4 [c+d] {sym} ptr idx mem)
   888  (MOVSDloadidx1 [c] {sym} (ADDQconst [d] ptr) idx mem) -> (MOVSDloadidx1 [c+d] {sym} ptr idx mem)
   889  (MOVSDloadidx8 [c] {sym} (ADDQconst [d] ptr) idx mem) -> (MOVSDloadidx8 [c+d] {sym} ptr idx mem)
   890  
   891  (MOVBstoreidx1 [c] {sym} (ADDQconst [d] ptr) idx val mem) -> (MOVBstoreidx1 [c+d] {sym} ptr idx val mem)
   892  (MOVWstoreidx1 [c] {sym} (ADDQconst [d] ptr) idx val mem) -> (MOVWstoreidx1 [c+d] {sym} ptr idx val mem)
   893  (MOVWstoreidx2 [c] {sym} (ADDQconst [d] ptr) idx val mem) -> (MOVWstoreidx2 [c+d] {sym} ptr idx val mem)
   894  (MOVLstoreidx1 [c] {sym} (ADDQconst [d] ptr) idx val mem) -> (MOVLstoreidx1 [c+d] {sym} ptr idx val mem)
   895  (MOVLstoreidx4 [c] {sym} (ADDQconst [d] ptr) idx val mem) -> (MOVLstoreidx4 [c+d] {sym} ptr idx val mem)
   896  (MOVQstoreidx1 [c] {sym} (ADDQconst [d] ptr) idx val mem) -> (MOVQstoreidx1 [c+d] {sym} ptr idx val mem)
   897  (MOVQstoreidx8 [c] {sym} (ADDQconst [d] ptr) idx val mem) -> (MOVQstoreidx8 [c+d] {sym} ptr idx val mem)
   898  (MOVSSstoreidx1 [c] {sym} (ADDQconst [d] ptr) idx val mem) -> (MOVSSstoreidx1 [c+d] {sym} ptr idx val mem)
   899  (MOVSSstoreidx4 [c] {sym} (ADDQconst [d] ptr) idx val mem) -> (MOVSSstoreidx4 [c+d] {sym} ptr idx val mem)
   900  (MOVSDstoreidx1 [c] {sym} (ADDQconst [d] ptr) idx val mem) -> (MOVSDstoreidx1 [c+d] {sym} ptr idx val mem)
   901  (MOVSDstoreidx8 [c] {sym} (ADDQconst [d] ptr) idx val mem) -> (MOVSDstoreidx8 [c+d] {sym} ptr idx val mem)
   902  
   903  (MOVBloadidx1 [c] {sym} ptr (ADDQconst [d] idx) mem) -> (MOVBloadidx1 [c+d] {sym} ptr idx mem)
   904  (MOVWloadidx1 [c] {sym} ptr (ADDQconst [d] idx) mem) -> (MOVWloadidx1 [c+d] {sym} ptr idx mem)
   905  (MOVWloadidx2 [c] {sym} ptr (ADDQconst [d] idx) mem) -> (MOVWloadidx2 [c+2*d] {sym} ptr idx mem)
   906  (MOVLloadidx1 [c] {sym} ptr (ADDQconst [d] idx) mem) -> (MOVLloadidx1 [c+d] {sym} ptr idx mem)
   907  (MOVLloadidx4 [c] {sym} ptr (ADDQconst [d] idx) mem) -> (MOVLloadidx4 [c+4*d] {sym} ptr idx mem)
   908  (MOVQloadidx1 [c] {sym} ptr (ADDQconst [d] idx) mem) -> (MOVQloadidx1 [c+d] {sym} ptr idx mem)
   909  (MOVQloadidx8 [c] {sym} ptr (ADDQconst [d] idx) mem) -> (MOVQloadidx8 [c+8*d] {sym} ptr idx mem)
   910  (MOVSSloadidx1 [c] {sym} ptr (ADDQconst [d] idx) mem) -> (MOVSSloadidx1 [c+d] {sym} ptr idx mem)
   911  (MOVSSloadidx4 [c] {sym} ptr (ADDQconst [d] idx) mem) -> (MOVSSloadidx4 [c+4*d] {sym} ptr idx mem)
   912  (MOVSDloadidx1 [c] {sym} ptr (ADDQconst [d] idx) mem) -> (MOVSDloadidx1 [c+d] {sym} ptr idx mem)
   913  (MOVSDloadidx8 [c] {sym} ptr (ADDQconst [d] idx) mem) -> (MOVSDloadidx8 [c+8*d] {sym} ptr idx mem)
   914  
   915  (MOVBstoreidx1 [c] {sym} ptr (ADDQconst [d] idx) val mem) -> (MOVBstoreidx1 [c+d] {sym} ptr idx val mem)
   916  (MOVWstoreidx1 [c] {sym} ptr (ADDQconst [d] idx) val mem) -> (MOVWstoreidx1 [c+d] {sym} ptr idx val mem)
   917  (MOVWstoreidx2 [c] {sym} ptr (ADDQconst [d] idx) val mem) -> (MOVWstoreidx2 [c+2*d] {sym} ptr idx val mem)
   918  (MOVLstoreidx1 [c] {sym} ptr (ADDQconst [d] idx) val mem) -> (MOVLstoreidx1 [c+d] {sym} ptr idx val mem)
   919  (MOVLstoreidx4 [c] {sym} ptr (ADDQconst [d] idx) val mem) -> (MOVLstoreidx4 [c+4*d] {sym} ptr idx val mem)
   920  (MOVQstoreidx1 [c] {sym} ptr (ADDQconst [d] idx) val mem) -> (MOVQstoreidx1 [c+d] {sym} ptr idx val mem)
   921  (MOVQstoreidx8 [c] {sym} ptr (ADDQconst [d] idx) val mem) -> (MOVQstoreidx8 [c+8*d] {sym} ptr idx val mem)
   922  (MOVSSstoreidx1 [c] {sym} ptr (ADDQconst [d] idx) val mem) -> (MOVSSstoreidx1 [c+d] {sym} ptr idx val mem)
   923  (MOVSSstoreidx4 [c] {sym} ptr (ADDQconst [d] idx) val mem) -> (MOVSSstoreidx4 [c+4*d] {sym} ptr idx val mem)
   924  (MOVSDstoreidx1 [c] {sym} ptr (ADDQconst [d] idx) val mem) -> (MOVSDstoreidx1 [c+d] {sym} ptr idx val mem)
   925  (MOVSDstoreidx8 [c] {sym} ptr (ADDQconst [d] idx) val mem) -> (MOVSDstoreidx8 [c+8*d] {sym} ptr idx val mem)
   926  
   927  (MOVBstoreconstidx1 [x] {sym} (ADDQconst [c] ptr) idx mem) ->
   928  	(MOVBstoreconstidx1 [ValAndOff(x).add(c)] {sym} ptr idx mem)
   929  (MOVWstoreconstidx1 [x] {sym} (ADDQconst [c] ptr) idx mem) ->
   930  	(MOVWstoreconstidx1 [ValAndOff(x).add(c)] {sym} ptr idx mem)
   931  (MOVWstoreconstidx2 [x] {sym} (ADDQconst [c] ptr) idx mem) ->
   932  	(MOVWstoreconstidx2 [ValAndOff(x).add(c)] {sym} ptr idx mem)
   933  (MOVLstoreconstidx1 [x] {sym} (ADDQconst [c] ptr) idx mem) ->
   934  	(MOVLstoreconstidx1 [ValAndOff(x).add(c)] {sym} ptr idx mem)
   935  (MOVLstoreconstidx4 [x] {sym} (ADDQconst [c] ptr) idx mem) ->
   936  	(MOVLstoreconstidx4 [ValAndOff(x).add(c)] {sym} ptr idx mem)
   937  (MOVQstoreconstidx1 [x] {sym} (ADDQconst [c] ptr) idx mem) ->
   938  	(MOVQstoreconstidx1 [ValAndOff(x).add(c)] {sym} ptr idx mem)
   939  (MOVQstoreconstidx8 [x] {sym} (ADDQconst [c] ptr) idx mem) ->
   940  	(MOVQstoreconstidx8 [ValAndOff(x).add(c)] {sym} ptr idx mem)
   941  
   942  (MOVBstoreconstidx1 [x] {sym} ptr (ADDQconst [c] idx) mem) ->
   943  	(MOVBstoreconstidx1 [ValAndOff(x).add(c)] {sym} ptr idx mem)
   944  (MOVWstoreconstidx1 [x] {sym} ptr (ADDQconst [c] idx) mem) ->
   945  	(MOVWstoreconstidx1 [ValAndOff(x).add(c)] {sym} ptr idx mem)
   946  (MOVWstoreconstidx2 [x] {sym} ptr (ADDQconst [c] idx) mem) ->
   947  	(MOVWstoreconstidx2 [ValAndOff(x).add(2*c)] {sym} ptr idx mem)
   948  (MOVLstoreconstidx1 [x] {sym} ptr (ADDQconst [c] idx) mem) ->
   949  	(MOVLstoreconstidx1 [ValAndOff(x).add(c)] {sym} ptr idx mem)
   950  (MOVLstoreconstidx4 [x] {sym} ptr (ADDQconst [c] idx) mem) ->
   951  	(MOVLstoreconstidx4 [ValAndOff(x).add(4*c)] {sym} ptr idx mem)
   952  (MOVQstoreconstidx1 [x] {sym} ptr (ADDQconst [c] idx) mem) ->
   953  	(MOVQstoreconstidx1 [ValAndOff(x).add(c)] {sym} ptr idx mem)
   954  (MOVQstoreconstidx8 [x] {sym} ptr (ADDQconst [c] idx) mem) ->
   955  	(MOVQstoreconstidx8 [ValAndOff(x).add(8*c)] {sym} ptr idx mem)
   956  
   957  // fold LEAQs together
   958  (LEAQ [off1] {sym1} (LEAQ [off2] {sym2} x)) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) ->
   959        (LEAQ [off1+off2] {mergeSym(sym1,sym2)} x)
   960  
   961  // LEAQ into LEAQ1
   962  (LEAQ1 [off1] {sym1} (LEAQ [off2] {sym2} x) y) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) && x.Op != OpSB ->
   963         (LEAQ1 [off1+off2] {mergeSym(sym1,sym2)} x y)
   964  (LEAQ1 [off1] {sym1} x (LEAQ [off2] {sym2} y)) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) && y.Op != OpSB ->
   965         (LEAQ1 [off1+off2] {mergeSym(sym1,sym2)} x y)
   966  
   967  // LEAQ1 into LEAQ
   968  (LEAQ [off1] {sym1} (LEAQ1 [off2] {sym2} x y)) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) ->
   969         (LEAQ1 [off1+off2] {mergeSym(sym1,sym2)} x y)
   970  
   971  // LEAQ into LEAQ[248]
   972  (LEAQ2 [off1] {sym1} (LEAQ [off2] {sym2} x) y) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) && x.Op != OpSB ->
   973         (LEAQ2 [off1+off2] {mergeSym(sym1,sym2)} x y)
   974  (LEAQ4 [off1] {sym1} (LEAQ [off2] {sym2} x) y) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) && x.Op != OpSB ->
   975         (LEAQ4 [off1+off2] {mergeSym(sym1,sym2)} x y)
   976  (LEAQ8 [off1] {sym1} (LEAQ [off2] {sym2} x) y) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) && x.Op != OpSB ->
   977         (LEAQ8 [off1+off2] {mergeSym(sym1,sym2)} x y)
   978  
   979  // LEAQ[248] into LEAQ
   980  (LEAQ [off1] {sym1} (LEAQ2 [off2] {sym2} x y)) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) ->
   981        (LEAQ2 [off1+off2] {mergeSym(sym1,sym2)} x y)
   982  (LEAQ [off1] {sym1} (LEAQ4 [off2] {sym2} x y)) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) ->
   983        (LEAQ4 [off1+off2] {mergeSym(sym1,sym2)} x y)
   984  (LEAQ [off1] {sym1} (LEAQ8 [off2] {sym2} x y)) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) ->
   985        (LEAQ8 [off1+off2] {mergeSym(sym1,sym2)} x y)
   986  
   987  // Absorb InvertFlags into branches.
   988  (LT (InvertFlags cmp) yes no) -> (GT cmp yes no)
   989  (GT (InvertFlags cmp) yes no) -> (LT cmp yes no)
   990  (LE (InvertFlags cmp) yes no) -> (GE cmp yes no)
   991  (GE (InvertFlags cmp) yes no) -> (LE cmp yes no)
   992  (ULT (InvertFlags cmp) yes no) -> (UGT cmp yes no)
   993  (UGT (InvertFlags cmp) yes no) -> (ULT cmp yes no)
   994  (ULE (InvertFlags cmp) yes no) -> (UGE cmp yes no)
   995  (UGE (InvertFlags cmp) yes no) -> (ULE cmp yes no)
   996  (EQ (InvertFlags cmp) yes no) -> (EQ cmp yes no)
   997  (NE (InvertFlags cmp) yes no) -> (NE cmp yes no)
   998  
   999  // Constant comparisons.
  1000  (CMPQconst (MOVQconst [x]) [y]) && x==y -> (FlagEQ)
  1001  (CMPQconst (MOVQconst [x]) [y]) && x<y && uint64(x)<uint64(y) -> (FlagLT_ULT)
  1002  (CMPQconst (MOVQconst [x]) [y]) && x<y && uint64(x)>uint64(y) -> (FlagLT_UGT)
  1003  (CMPQconst (MOVQconst [x]) [y]) && x>y && uint64(x)<uint64(y) -> (FlagGT_ULT)
  1004  (CMPQconst (MOVQconst [x]) [y]) && x>y && uint64(x)>uint64(y) -> (FlagGT_UGT)
  1005  (CMPLconst (MOVLconst [x]) [y]) && int32(x)==int32(y) -> (FlagEQ)
  1006  (CMPLconst (MOVLconst [x]) [y]) && int32(x)<int32(y) && uint32(x)<uint32(y) -> (FlagLT_ULT)
  1007  (CMPLconst (MOVLconst [x]) [y]) && int32(x)<int32(y) && uint32(x)>uint32(y) -> (FlagLT_UGT)
  1008  (CMPLconst (MOVLconst [x]) [y]) && int32(x)>int32(y) && uint32(x)<uint32(y) -> (FlagGT_ULT)
  1009  (CMPLconst (MOVLconst [x]) [y]) && int32(x)>int32(y) && uint32(x)>uint32(y) -> (FlagGT_UGT)
  1010  (CMPWconst (MOVLconst [x]) [y]) && int16(x)==int16(y) -> (FlagEQ)
  1011  (CMPWconst (MOVLconst [x]) [y]) && int16(x)<int16(y) && uint16(x)<uint16(y) -> (FlagLT_ULT)
  1012  (CMPWconst (MOVLconst [x]) [y]) && int16(x)<int16(y) && uint16(x)>uint16(y) -> (FlagLT_UGT)
  1013  (CMPWconst (MOVLconst [x]) [y]) && int16(x)>int16(y) && uint16(x)<uint16(y) -> (FlagGT_ULT)
  1014  (CMPWconst (MOVLconst [x]) [y]) && int16(x)>int16(y) && uint16(x)>uint16(y) -> (FlagGT_UGT)
  1015  (CMPBconst (MOVLconst [x]) [y]) && int8(x)==int8(y) -> (FlagEQ)
  1016  (CMPBconst (MOVLconst [x]) [y]) && int8(x)<int8(y) && uint8(x)<uint8(y) -> (FlagLT_ULT)
  1017  (CMPBconst (MOVLconst [x]) [y]) && int8(x)<int8(y) && uint8(x)>uint8(y) -> (FlagLT_UGT)
  1018  (CMPBconst (MOVLconst [x]) [y]) && int8(x)>int8(y) && uint8(x)<uint8(y) -> (FlagGT_ULT)
  1019  (CMPBconst (MOVLconst [x]) [y]) && int8(x)>int8(y) && uint8(x)>uint8(y) -> (FlagGT_UGT)
  1020  
  1021  // Other known comparisons.
  1022  (CMPQconst (MOVBQZX _) [c]) && 0xFF < c -> (FlagLT_ULT)
  1023  (CMPQconst (MOVWQZX _) [c]) && 0xFFFF < c -> (FlagLT_ULT)
  1024  (CMPQconst (MOVLQZX _) [c]) && 0xFFFFFFFF < c -> (FlagLT_ULT)
  1025  (CMPLconst (SHRLconst _ [c]) [n]) && 0 <= n && 0 < c && c <= 32 && (1<<uint64(32-c)) <= uint64(n) -> (FlagLT_ULT)
  1026  (CMPQconst (SHRQconst _ [c]) [n]) && 0 <= n && 0 < c && c <= 64 && (1<<uint64(64-c)) <= uint64(n) -> (FlagLT_ULT)
  1027  (CMPQconst (ANDQconst _ [m]) [n]) && 0 <= m && m < n -> (FlagLT_ULT)
  1028  (CMPLconst (ANDLconst _ [m]) [n]) && 0 <= int32(m) && int32(m) < int32(n) -> (FlagLT_ULT)
  1029  (CMPWconst (ANDLconst _ [m]) [n]) && 0 <= int16(m) && int16(m) < int16(n) -> (FlagLT_ULT)
  1030  (CMPBconst (ANDLconst _ [m]) [n]) && 0 <= int8(m) && int8(m) < int8(n) -> (FlagLT_ULT)
  1031  // TODO: DIVxU also.
  1032  
  1033  // Absorb flag constants into SBB ops.
  1034  (SBBQcarrymask (FlagEQ)) -> (MOVQconst [0])
  1035  (SBBQcarrymask (FlagLT_ULT)) -> (MOVQconst [-1])
  1036  (SBBQcarrymask (FlagLT_UGT)) -> (MOVQconst [0])
  1037  (SBBQcarrymask (FlagGT_ULT)) -> (MOVQconst [-1])
  1038  (SBBQcarrymask (FlagGT_UGT)) -> (MOVQconst [0])
  1039  (SBBLcarrymask (FlagEQ)) -> (MOVLconst [0])
  1040  (SBBLcarrymask (FlagLT_ULT)) -> (MOVLconst [-1])
  1041  (SBBLcarrymask (FlagLT_UGT)) -> (MOVLconst [0])
  1042  (SBBLcarrymask (FlagGT_ULT)) -> (MOVLconst [-1])
  1043  (SBBLcarrymask (FlagGT_UGT)) -> (MOVLconst [0])
  1044  
  1045  // Absorb flag constants into branches.
  1046  (EQ (FlagEQ) yes no) -> (First nil yes no)
  1047  (EQ (FlagLT_ULT) yes no) -> (First nil no yes)
  1048  (EQ (FlagLT_UGT) yes no) -> (First nil no yes)
  1049  (EQ (FlagGT_ULT) yes no) -> (First nil no yes)
  1050  (EQ (FlagGT_UGT) yes no) -> (First nil no yes)
  1051  
  1052  (NE (FlagEQ) yes no) -> (First nil no yes)
  1053  (NE (FlagLT_ULT) yes no) -> (First nil yes no)
  1054  (NE (FlagLT_UGT) yes no) -> (First nil yes no)
  1055  (NE (FlagGT_ULT) yes no) -> (First nil yes no)
  1056  (NE (FlagGT_UGT) yes no) -> (First nil yes no)
  1057  
  1058  (LT (FlagEQ) yes no) -> (First nil no yes)
  1059  (LT (FlagLT_ULT) yes no) -> (First nil yes no)
  1060  (LT (FlagLT_UGT) yes no) -> (First nil yes no)
  1061  (LT (FlagGT_ULT) yes no) -> (First nil no yes)
  1062  (LT (FlagGT_UGT) yes no) -> (First nil no yes)
  1063  
  1064  (LE (FlagEQ) yes no) -> (First nil yes no)
  1065  (LE (FlagLT_ULT) yes no) -> (First nil yes no)
  1066  (LE (FlagLT_UGT) yes no) -> (First nil yes no)
  1067  (LE (FlagGT_ULT) yes no) -> (First nil no yes)
  1068  (LE (FlagGT_UGT) yes no) -> (First nil no yes)
  1069  
  1070  (GT (FlagEQ) yes no) -> (First nil no yes)
  1071  (GT (FlagLT_ULT) yes no) -> (First nil no yes)
  1072  (GT (FlagLT_UGT) yes no) -> (First nil no yes)
  1073  (GT (FlagGT_ULT) yes no) -> (First nil yes no)
  1074  (GT (FlagGT_UGT) yes no) -> (First nil yes no)
  1075  
  1076  (GE (FlagEQ) yes no) -> (First nil yes no)
  1077  (GE (FlagLT_ULT) yes no) -> (First nil no yes)
  1078  (GE (FlagLT_UGT) yes no) -> (First nil no yes)
  1079  (GE (FlagGT_ULT) yes no) -> (First nil yes no)
  1080  (GE (FlagGT_UGT) yes no) -> (First nil yes no)
  1081  
  1082  (ULT (FlagEQ) yes no) -> (First nil no yes)
  1083  (ULT (FlagLT_ULT) yes no) -> (First nil yes no)
  1084  (ULT (FlagLT_UGT) yes no) -> (First nil no yes)
  1085  (ULT (FlagGT_ULT) yes no) -> (First nil yes no)
  1086  (ULT (FlagGT_UGT) yes no) -> (First nil no yes)
  1087  
  1088  (ULE (FlagEQ) yes no) -> (First nil yes no)
  1089  (ULE (FlagLT_ULT) yes no) -> (First nil yes no)
  1090  (ULE (FlagLT_UGT) yes no) -> (First nil no yes)
  1091  (ULE (FlagGT_ULT) yes no) -> (First nil yes no)
  1092  (ULE (FlagGT_UGT) yes no) -> (First nil no yes)
  1093  
  1094  (UGT (FlagEQ) yes no) -> (First nil no yes)
  1095  (UGT (FlagLT_ULT) yes no) -> (First nil no yes)
  1096  (UGT (FlagLT_UGT) yes no) -> (First nil yes no)
  1097  (UGT (FlagGT_ULT) yes no) -> (First nil no yes)
  1098  (UGT (FlagGT_UGT) yes no) -> (First nil yes no)
  1099  
  1100  (UGE (FlagEQ) yes no) -> (First nil yes no)
  1101  (UGE (FlagLT_ULT) yes no) -> (First nil no yes)
  1102  (UGE (FlagLT_UGT) yes no) -> (First nil yes no)
  1103  (UGE (FlagGT_ULT) yes no) -> (First nil no yes)
  1104  (UGE (FlagGT_UGT) yes no) -> (First nil yes no)
  1105  
  1106  // Absorb flag constants into SETxx ops.
  1107  (SETEQ (FlagEQ)) -> (MOVLconst [1])
  1108  (SETEQ (FlagLT_ULT)) -> (MOVLconst [0])
  1109  (SETEQ (FlagLT_UGT)) -> (MOVLconst [0])
  1110  (SETEQ (FlagGT_ULT)) -> (MOVLconst [0])
  1111  (SETEQ (FlagGT_UGT)) -> (MOVLconst [0])
  1112  
  1113  (SETNE (FlagEQ)) -> (MOVLconst [0])
  1114  (SETNE (FlagLT_ULT)) -> (MOVLconst [1])
  1115  (SETNE (FlagLT_UGT)) -> (MOVLconst [1])
  1116  (SETNE (FlagGT_ULT)) -> (MOVLconst [1])
  1117  (SETNE (FlagGT_UGT)) -> (MOVLconst [1])
  1118  
  1119  (SETL (FlagEQ)) -> (MOVLconst [0])
  1120  (SETL (FlagLT_ULT)) -> (MOVLconst [1])
  1121  (SETL (FlagLT_UGT)) -> (MOVLconst [1])
  1122  (SETL (FlagGT_ULT)) -> (MOVLconst [0])
  1123  (SETL (FlagGT_UGT)) -> (MOVLconst [0])
  1124  
  1125  (SETLE (FlagEQ)) -> (MOVLconst [1])
  1126  (SETLE (FlagLT_ULT)) -> (MOVLconst [1])
  1127  (SETLE (FlagLT_UGT)) -> (MOVLconst [1])
  1128  (SETLE (FlagGT_ULT)) -> (MOVLconst [0])
  1129  (SETLE (FlagGT_UGT)) -> (MOVLconst [0])
  1130  
  1131  (SETG (FlagEQ)) -> (MOVLconst [0])
  1132  (SETG (FlagLT_ULT)) -> (MOVLconst [0])
  1133  (SETG (FlagLT_UGT)) -> (MOVLconst [0])
  1134  (SETG (FlagGT_ULT)) -> (MOVLconst [1])
  1135  (SETG (FlagGT_UGT)) -> (MOVLconst [1])
  1136  
  1137  (SETGE (FlagEQ)) -> (MOVLconst [1])
  1138  (SETGE (FlagLT_ULT)) -> (MOVLconst [0])
  1139  (SETGE (FlagLT_UGT)) -> (MOVLconst [0])
  1140  (SETGE (FlagGT_ULT)) -> (MOVLconst [1])
  1141  (SETGE (FlagGT_UGT)) -> (MOVLconst [1])
  1142  
  1143  (SETB (FlagEQ)) -> (MOVLconst [0])
  1144  (SETB (FlagLT_ULT)) -> (MOVLconst [1])
  1145  (SETB (FlagLT_UGT)) -> (MOVLconst [0])
  1146  (SETB (FlagGT_ULT)) -> (MOVLconst [1])
  1147  (SETB (FlagGT_UGT)) -> (MOVLconst [0])
  1148  
  1149  (SETBE (FlagEQ)) -> (MOVLconst [1])
  1150  (SETBE (FlagLT_ULT)) -> (MOVLconst [1])
  1151  (SETBE (FlagLT_UGT)) -> (MOVLconst [0])
  1152  (SETBE (FlagGT_ULT)) -> (MOVLconst [1])
  1153  (SETBE (FlagGT_UGT)) -> (MOVLconst [0])
  1154  
  1155  (SETA (FlagEQ)) -> (MOVLconst [0])
  1156  (SETA (FlagLT_ULT)) -> (MOVLconst [0])
  1157  (SETA (FlagLT_UGT)) -> (MOVLconst [1])
  1158  (SETA (FlagGT_ULT)) -> (MOVLconst [0])
  1159  (SETA (FlagGT_UGT)) -> (MOVLconst [1])
  1160  
  1161  (SETAE (FlagEQ)) -> (MOVLconst [1])
  1162  (SETAE (FlagLT_ULT)) -> (MOVLconst [0])
  1163  (SETAE (FlagLT_UGT)) -> (MOVLconst [1])
  1164  (SETAE (FlagGT_ULT)) -> (MOVLconst [0])
  1165  (SETAE (FlagGT_UGT)) -> (MOVLconst [1])
  1166  
  1167  // Remove redundant *const ops
  1168  (ADDQconst [0] x) -> x
  1169  (ADDLconst [c] x) && int32(c)==0 -> x
  1170  (SUBQconst [0] x) -> x
  1171  (SUBLconst [c] x) && int32(c) == 0 -> x
  1172  (ANDQconst [0] _)                 -> (MOVQconst [0])
  1173  (ANDLconst [c] _) && int32(c)==0  -> (MOVLconst [0])
  1174  (ANDQconst [-1] x)                -> x
  1175  (ANDLconst [c] x) && int32(c)==-1 -> x
  1176  (ORQconst [0] x)                  -> x
  1177  (ORLconst [c] x) && int32(c)==0   -> x
  1178  (ORQconst [-1] _)                 -> (MOVQconst [-1])
  1179  (ORLconst [c] _) && int32(c)==-1  -> (MOVLconst [-1])
  1180  (XORQconst [0] x)                  -> x
  1181  (XORLconst [c] x) && int32(c)==0   -> x
  1182  // TODO: since we got rid of the W/B versions, we might miss
  1183  // things like (ANDLconst [0x100] x) which were formerly
  1184  // (ANDBconst [0] x).  Probably doesn't happen very often.
  1185  // If we cared, we might do:
  1186  //  (ANDLconst <t> [c] x) && t.Size()==1 && int8(x)==0 -> (MOVLconst [0])
  1187  
  1188  // Convert constant subtracts to constant adds
  1189  (SUBQconst [c] x) && c != -(1<<31) -> (ADDQconst [-c] x)
  1190  (SUBLconst [c] x) -> (ADDLconst [int64(int32(-c))] x)
  1191  
  1192  // generic constant folding
  1193  // TODO: more of this
  1194  (ADDQconst [c] (MOVQconst [d])) -> (MOVQconst [c+d])
  1195  (ADDLconst [c] (MOVLconst [d])) -> (MOVLconst [int64(int32(c+d))])
  1196  (ADDQconst [c] (ADDQconst [d] x)) && is32Bit(c+d) -> (ADDQconst [c+d] x)
  1197  (ADDLconst [c] (ADDLconst [d] x)) -> (ADDLconst [int64(int32(c+d))] x)
  1198  (SUBQconst (MOVQconst [d]) [c]) -> (MOVQconst [d-c])
  1199  (SUBQconst (SUBQconst x [d]) [c]) && is32Bit(-c-d) -> (ADDQconst [-c-d] x)
  1200  (SARQconst [c] (MOVQconst [d])) -> (MOVQconst [d>>uint64(c)])
  1201  (SARLconst [c] (MOVQconst [d])) -> (MOVQconst [d>>uint64(c)])
  1202  (SARWconst [c] (MOVQconst [d])) -> (MOVQconst [d>>uint64(c)])
  1203  (SARBconst [c] (MOVQconst [d])) -> (MOVQconst [d>>uint64(c)])
  1204  (NEGQ (MOVQconst [c])) -> (MOVQconst [-c])
  1205  (NEGL (MOVLconst [c])) -> (MOVLconst [int64(int32(-c))])
  1206  (MULQconst [c] (MOVQconst [d])) -> (MOVQconst [c*d])
  1207  (MULLconst [c] (MOVLconst [d])) -> (MOVLconst [int64(int32(c*d))])
  1208  (ANDQconst [c] (MOVQconst [d])) -> (MOVQconst [c&d])
  1209  (ANDLconst [c] (MOVLconst [d])) -> (MOVLconst [c&d])
  1210  (ORQconst [c] (MOVQconst [d])) -> (MOVQconst [c|d])
  1211  (ORLconst [c] (MOVLconst [d])) -> (MOVLconst [c|d])
  1212  (XORQconst [c] (MOVQconst [d])) -> (MOVQconst [c^d])
  1213  (XORLconst [c] (MOVLconst [d])) -> (MOVLconst [c^d])
  1214  (NOTQ (MOVQconst [c])) -> (MOVQconst [^c])
  1215  (NOTL (MOVLconst [c])) -> (MOVLconst [^c])
  1216  
  1217  // generic simplifications
  1218  // TODO: more of this
  1219  (ADDQ x (NEGQ y)) -> (SUBQ x y)
  1220  (ADDL x (NEGL y)) -> (SUBL x y)
  1221  (SUBQ x x) -> (MOVQconst [0])
  1222  (SUBL x x) -> (MOVLconst [0])
  1223  (ANDQ x x) -> x
  1224  (ANDL x x) -> x
  1225  (ORQ x x) -> x
  1226  (ORL x x) -> x
  1227  (XORQ x x) -> (MOVQconst [0])
  1228  (XORL x x) -> (MOVLconst [0])
  1229  
  1230  // checking AND against 0.
  1231  (CMPQconst (ANDQ x y) [0]) -> (TESTQ x y)
  1232  (CMPLconst (ANDL x y) [0]) -> (TESTL x y)
  1233  (CMPWconst (ANDL x y) [0]) -> (TESTW x y)
  1234  (CMPBconst (ANDL x y) [0]) -> (TESTB x y)
  1235  (CMPQconst (ANDQconst [c] x) [0]) -> (TESTQconst [c] x)
  1236  (CMPLconst (ANDLconst [c] x) [0]) -> (TESTLconst [c] x)
  1237  (CMPWconst (ANDLconst [c] x) [0]) -> (TESTWconst [int64(int16(c))] x)
  1238  (CMPBconst (ANDLconst [c] x) [0]) -> (TESTBconst [int64(int8(c))] x)
  1239  
  1240  // TEST %reg,%reg is shorter than CMP
  1241  (CMPQconst x [0]) -> (TESTQ x x)
  1242  (CMPLconst x [0]) -> (TESTL x x)
  1243  (CMPWconst x [0]) -> (TESTW x x)
  1244  (CMPBconst x [0]) -> (TESTB x x)
  1245  
  1246  // Optimizing conditional moves
  1247  (CMOVQEQconst x (InvertFlags y) [c]) -> (CMOVQNEconst x y [c])
  1248  (CMOVLEQconst x (InvertFlags y) [c]) -> (CMOVLNEconst x y [c])
  1249  (CMOVWEQconst x (InvertFlags y) [c]) -> (CMOVWNEconst x y [c])
  1250  
  1251  (CMOVQEQconst _ (FlagEQ) [c]) -> (Const64 [c])
  1252  (CMOVLEQconst _ (FlagEQ) [c]) -> (Const32 [c])
  1253  (CMOVWEQconst _ (FlagEQ) [c]) -> (Const16 [c])
  1254  
  1255  (CMOVQEQconst x (FlagLT_ULT)) -> x
  1256  (CMOVLEQconst x (FlagLT_ULT)) -> x
  1257  (CMOVWEQconst x (FlagLT_ULT)) -> x
  1258  
  1259  (CMOVQEQconst x (FlagLT_UGT)) -> x
  1260  (CMOVLEQconst x (FlagLT_UGT)) -> x
  1261  (CMOVWEQconst x (FlagLT_UGT)) -> x
  1262  
  1263  (CMOVQEQconst x (FlagGT_ULT)) -> x
  1264  (CMOVLEQconst x (FlagGT_ULT)) -> x
  1265  (CMOVWEQconst x (FlagGT_ULT)) -> x
  1266  
  1267  (CMOVQEQconst x (FlagGT_UGT)) -> x
  1268  (CMOVLEQconst x (FlagGT_UGT)) -> x
  1269  (CMOVWEQconst x (FlagGT_UGT)) -> x
  1270  
  1271  // Combining byte loads into larger (unaligned) loads.
  1272  // There are many ways these combinations could occur.  This is
  1273  // designed to match the way encoding/binary.LittleEndian does it.
  1274  (ORL                  x0:(MOVBload [i]   {s} p mem)
  1275      s0:(SHLLconst [8] x1:(MOVBload [i+1] {s} p mem)))
  1276    && x0.Uses == 1
  1277    && x1.Uses == 1
  1278    && s0.Uses == 1
  1279    && mergePoint(b,x0,x1) != nil
  1280    && clobber(x0)
  1281    && clobber(x1)
  1282    && clobber(s0)
  1283    -> @mergePoint(b,x0,x1) (MOVWload [i] {s} p mem)
  1284  
  1285  (ORL o0:(ORL o1:(ORL
  1286                         x0:(MOVBload [i]   {s} p mem)
  1287      s0:(SHLLconst [8]  x1:(MOVBload [i+1] {s} p mem)))
  1288      s1:(SHLLconst [16] x2:(MOVBload [i+2] {s} p mem)))
  1289      s2:(SHLLconst [24] x3:(MOVBload [i+3] {s} p mem)))
  1290    && x0.Uses == 1
  1291    && x1.Uses == 1
  1292    && x2.Uses == 1
  1293    && x3.Uses == 1
  1294    && s0.Uses == 1
  1295    && s1.Uses == 1
  1296    && s2.Uses == 1
  1297    && o0.Uses == 1
  1298    && o1.Uses == 1
  1299    && mergePoint(b,x0,x1,x2,x3) != nil
  1300    && clobber(x0)
  1301    && clobber(x1)
  1302    && clobber(x2)
  1303    && clobber(x3)
  1304    && clobber(s0)
  1305    && clobber(s1)
  1306    && clobber(s2)
  1307    && clobber(o0)
  1308    && clobber(o1)
  1309    -> @mergePoint(b,x0,x1,x2,x3) (MOVLload [i] {s} p mem)
  1310  
  1311  (ORQ o0:(ORQ o1:(ORQ o2:(ORQ o3:(ORQ o4:(ORQ o5:(ORQ
  1312                         x0:(MOVBload [i]   {s} p mem)
  1313      s0:(SHLQconst [8]  x1:(MOVBload [i+1] {s} p mem)))
  1314      s1:(SHLQconst [16] x2:(MOVBload [i+2] {s} p mem)))
  1315      s2:(SHLQconst [24] x3:(MOVBload [i+3] {s} p mem)))
  1316      s3:(SHLQconst [32] x4:(MOVBload [i+4] {s} p mem)))
  1317      s4:(SHLQconst [40] x5:(MOVBload [i+5] {s} p mem)))
  1318      s5:(SHLQconst [48] x6:(MOVBload [i+6] {s} p mem)))
  1319      s6:(SHLQconst [56] x7:(MOVBload [i+7] {s} p mem)))
  1320    && x0.Uses == 1
  1321    && x1.Uses == 1
  1322    && x2.Uses == 1
  1323    && x3.Uses == 1
  1324    && x4.Uses == 1
  1325    && x5.Uses == 1
  1326    && x6.Uses == 1
  1327    && x7.Uses == 1
  1328    && s0.Uses == 1
  1329    && s1.Uses == 1
  1330    && s2.Uses == 1
  1331    && s3.Uses == 1
  1332    && s4.Uses == 1
  1333    && s5.Uses == 1
  1334    && s6.Uses == 1
  1335    && o0.Uses == 1
  1336    && o1.Uses == 1
  1337    && o2.Uses == 1
  1338    && o3.Uses == 1
  1339    && o4.Uses == 1
  1340    && o5.Uses == 1
  1341    && mergePoint(b,x0,x1,x2,x3,x4,x5,x6,x7) != nil
  1342    && clobber(x0)
  1343    && clobber(x1)
  1344    && clobber(x2)
  1345    && clobber(x3)
  1346    && clobber(x4)
  1347    && clobber(x5)
  1348    && clobber(x6)
  1349    && clobber(x7)
  1350    && clobber(s0)
  1351    && clobber(s1)
  1352    && clobber(s2)
  1353    && clobber(s3)
  1354    && clobber(s4)
  1355    && clobber(s5)
  1356    && clobber(s6)
  1357    && clobber(o0)
  1358    && clobber(o1)
  1359    && clobber(o2)
  1360    && clobber(o3)
  1361    && clobber(o4)
  1362    && clobber(o5)
  1363    -> @mergePoint(b,x0,x1,x2,x3,x4,x5,x6,x7) (MOVQload [i] {s} p mem)
  1364  
  1365  (ORL                  x0:(MOVBloadidx1 [i]   {s} p idx mem)
  1366      s0:(SHLLconst [8] x1:(MOVBloadidx1 [i+1] {s} p idx mem)))
  1367    && x0.Uses == 1
  1368    && x1.Uses == 1
  1369    && s0.Uses == 1
  1370    && mergePoint(b,x0,x1) != nil
  1371    && clobber(x0)
  1372    && clobber(x1)
  1373    && clobber(s0)
  1374    -> @mergePoint(b,x0,x1) (MOVWloadidx1 <v.Type> [i] {s} p idx mem)
  1375  
  1376  (ORL o0:(ORL o1:(ORL
  1377                         x0:(MOVBloadidx1 [i]   {s} p idx mem)
  1378      s0:(SHLLconst [8]  x1:(MOVBloadidx1 [i+1] {s} p idx mem)))
  1379      s1:(SHLLconst [16] x2:(MOVBloadidx1 [i+2] {s} p idx mem)))
  1380      s2:(SHLLconst [24] x3:(MOVBloadidx1 [i+3] {s} p idx mem)))
  1381    && x0.Uses == 1
  1382    && x1.Uses == 1
  1383    && x2.Uses == 1
  1384    && x3.Uses == 1
  1385    && s0.Uses == 1
  1386    && s1.Uses == 1
  1387    && s2.Uses == 1
  1388    && o0.Uses == 1
  1389    && o1.Uses == 1
  1390    && mergePoint(b,x0,x1,x2,x3) != nil
  1391    && clobber(x0)
  1392    && clobber(x1)
  1393    && clobber(x2)
  1394    && clobber(x3)
  1395    && clobber(s0)
  1396    && clobber(s1)
  1397    && clobber(s2)
  1398    && clobber(o0)
  1399    && clobber(o1)
  1400    -> @mergePoint(b,x0,x1,x2,x3) (MOVLloadidx1 <v.Type> [i] {s} p idx mem)
  1401  
  1402  (ORQ o0:(ORQ o1:(ORQ o2:(ORQ o3:(ORQ o4:(ORQ o5:(ORQ
  1403                         x0:(MOVBloadidx1 [i]   {s} p idx mem)
  1404      s0:(SHLQconst [8]  x1:(MOVBloadidx1 [i+1] {s} p idx mem)))
  1405      s1:(SHLQconst [16] x2:(MOVBloadidx1 [i+2] {s} p idx mem)))
  1406      s2:(SHLQconst [24] x3:(MOVBloadidx1 [i+3] {s} p idx mem)))
  1407      s3:(SHLQconst [32] x4:(MOVBloadidx1 [i+4] {s} p idx mem)))
  1408      s4:(SHLQconst [40] x5:(MOVBloadidx1 [i+5] {s} p idx mem)))
  1409      s5:(SHLQconst [48] x6:(MOVBloadidx1 [i+6] {s} p idx mem)))
  1410      s6:(SHLQconst [56] x7:(MOVBloadidx1 [i+7] {s} p idx mem)))
  1411    && x0.Uses == 1
  1412    && x1.Uses == 1
  1413    && x2.Uses == 1
  1414    && x3.Uses == 1
  1415    && x4.Uses == 1
  1416    && x5.Uses == 1
  1417    && x6.Uses == 1
  1418    && x7.Uses == 1
  1419    && s0.Uses == 1
  1420    && s1.Uses == 1
  1421    && s2.Uses == 1
  1422    && s3.Uses == 1
  1423    && s4.Uses == 1
  1424    && s5.Uses == 1
  1425    && s6.Uses == 1
  1426    && o0.Uses == 1
  1427    && o1.Uses == 1
  1428    && o2.Uses == 1
  1429    && o3.Uses == 1
  1430    && o4.Uses == 1
  1431    && o5.Uses == 1
  1432    && mergePoint(b,x0,x1,x2,x3,x4,x5,x6,x7) != nil
  1433    && clobber(x0)
  1434    && clobber(x1)
  1435    && clobber(x2)
  1436    && clobber(x3)
  1437    && clobber(x4)
  1438    && clobber(x5)
  1439    && clobber(x6)
  1440    && clobber(x7)
  1441    && clobber(s0)
  1442    && clobber(s1)
  1443    && clobber(s2)
  1444    && clobber(s3)
  1445    && clobber(s4)
  1446    && clobber(s5)
  1447    && clobber(s6)
  1448    && clobber(o0)
  1449    && clobber(o1)
  1450    && clobber(o2)
  1451    && clobber(o3)
  1452    && clobber(o4)
  1453    && clobber(o5)
  1454    -> @mergePoint(b,x0,x1,x2,x3,x4,x5,x6,x7) (MOVQloadidx1 <v.Type> [i] {s} p idx mem)
  1455  
  1456  // Combine constant stores into larger (unaligned) stores.
  1457  (MOVBstoreconst [c] {s} p x:(MOVBstoreconst [a] {s} p mem))
  1458    && x.Uses == 1
  1459    && ValAndOff(a).Off() + 1 == ValAndOff(c).Off()
  1460    && clobber(x)
  1461    -> (MOVWstoreconst [makeValAndOff(ValAndOff(a).Val()&0xff | ValAndOff(c).Val()<<8, ValAndOff(a).Off())] {s} p mem)
  1462  (MOVWstoreconst [c] {s} p x:(MOVWstoreconst [a] {s} p mem))
  1463    && x.Uses == 1
  1464    && ValAndOff(a).Off() + 2 == ValAndOff(c).Off()
  1465    && clobber(x)
  1466    -> (MOVLstoreconst [makeValAndOff(ValAndOff(a).Val()&0xffff | ValAndOff(c).Val()<<16, ValAndOff(a).Off())] {s} p mem)
  1467  (MOVLstoreconst [c] {s} p x:(MOVLstoreconst [a] {s} p mem))
  1468    && x.Uses == 1
  1469    && ValAndOff(a).Off() + 4 == ValAndOff(c).Off()
  1470    && clobber(x)
  1471    -> (MOVQstore [ValAndOff(a).Off()] {s} p (MOVQconst [ValAndOff(a).Val()&0xffffffff | ValAndOff(c).Val()<<32]) mem)
  1472  
  1473  (MOVBstoreconstidx1 [c] {s} p i x:(MOVBstoreconstidx1 [a] {s} p i mem))
  1474    && x.Uses == 1
  1475    && ValAndOff(a).Off() + 1 == ValAndOff(c).Off()
  1476    && clobber(x)
  1477    -> (MOVWstoreconstidx1 [makeValAndOff(ValAndOff(a).Val()&0xff | ValAndOff(c).Val()<<8, ValAndOff(a).Off())] {s} p i mem)
  1478  (MOVWstoreconstidx1 [c] {s} p i x:(MOVWstoreconstidx1 [a] {s} p i mem))
  1479    && x.Uses == 1
  1480    && ValAndOff(a).Off() + 2 == ValAndOff(c).Off()
  1481    && clobber(x)
  1482    -> (MOVLstoreconstidx1 [makeValAndOff(ValAndOff(a).Val()&0xffff | ValAndOff(c).Val()<<16, ValAndOff(a).Off())] {s} p i mem)
  1483  (MOVLstoreconstidx1 [c] {s} p i x:(MOVLstoreconstidx1 [a] {s} p i mem))
  1484    && x.Uses == 1
  1485    && ValAndOff(a).Off() + 4 == ValAndOff(c).Off()
  1486    && clobber(x)
  1487    -> (MOVQstoreidx1 [ValAndOff(a).Off()] {s} p i (MOVQconst [ValAndOff(a).Val()&0xffffffff | ValAndOff(c).Val()<<32]) mem)
  1488  
  1489  (MOVWstoreconstidx2 [c] {s} p i x:(MOVWstoreconstidx2 [a] {s} p i mem))
  1490    && x.Uses == 1
  1491    && ValAndOff(a).Off() + 2 == ValAndOff(c).Off()
  1492    && clobber(x)
  1493    -> (MOVLstoreconstidx1 [makeValAndOff(ValAndOff(a).Val()&0xffff | ValAndOff(c).Val()<<16, ValAndOff(a).Off())] {s} p (SHLQconst <i.Type> [1] i) mem)
  1494  (MOVLstoreconstidx4 [c] {s} p i x:(MOVLstoreconstidx4 [a] {s} p i mem))
  1495    && x.Uses == 1
  1496    && ValAndOff(a).Off() + 4 == ValAndOff(c).Off()
  1497    && clobber(x)
  1498    -> (MOVQstoreidx1 [ValAndOff(a).Off()] {s} p (SHLQconst <i.Type> [2] i) (MOVQconst [ValAndOff(a).Val()&0xffffffff | ValAndOff(c).Val()<<32]) mem)
  1499  
  1500  // Combine stores into larger (unaligned) stores.
  1501  (MOVBstore [i] {s} p (SHRQconst [8] w) x:(MOVBstore [i-1] {s} p w mem))
  1502    && x.Uses == 1
  1503    && clobber(x)
  1504    -> (MOVWstore [i-1] {s} p w mem)
  1505  (MOVBstore [i] {s} p (SHRQconst [j] w) x:(MOVBstore [i-1] {s} p w0:(SHRQconst [j-8] w) mem))
  1506    && x.Uses == 1
  1507    && clobber(x)
  1508    -> (MOVWstore [i-1] {s} p w0 mem)
  1509  (MOVWstore [i] {s} p (SHRQconst [16] w) x:(MOVWstore [i-2] {s} p w mem))
  1510    && x.Uses == 1
  1511    && clobber(x)
  1512    -> (MOVLstore [i-2] {s} p w mem)
  1513  (MOVWstore [i] {s} p (SHRQconst [j] w) x:(MOVWstore [i-2] {s} p w0:(SHRQconst [j-16] w) mem))
  1514    && x.Uses == 1
  1515    && clobber(x)
  1516    -> (MOVLstore [i-2] {s} p w0 mem)
  1517  (MOVLstore [i] {s} p (SHRQconst [32] w) x:(MOVLstore [i-4] {s} p w mem))
  1518    && x.Uses == 1
  1519    && clobber(x)
  1520    -> (MOVQstore [i-4] {s} p w mem)
  1521  (MOVLstore [i] {s} p (SHRQconst [j] w) x:(MOVLstore [i-4] {s} p w0:(SHRQconst [j-32] w) mem))
  1522    && x.Uses == 1
  1523    && clobber(x)
  1524    -> (MOVQstore [i-4] {s} p w0 mem)
  1525  
  1526  (MOVBstoreidx1 [i] {s} p idx (SHRQconst [8] w) x:(MOVBstoreidx1 [i-1] {s} p idx w mem))
  1527    && x.Uses == 1
  1528    && clobber(x)
  1529    -> (MOVWstoreidx1 [i-1] {s} p idx w mem)
  1530  (MOVBstoreidx1 [i] {s} p idx (SHRQconst [j] w) x:(MOVBstoreidx1 [i-1] {s} p idx w0:(SHRQconst [j-8] w) mem))
  1531    && x.Uses == 1
  1532    && clobber(x)
  1533    -> (MOVWstoreidx1 [i-1] {s} p idx w0 mem)
  1534  (MOVWstoreidx1 [i] {s} p idx (SHRQconst [16] w) x:(MOVWstoreidx1 [i-2] {s} p idx w mem))
  1535    && x.Uses == 1
  1536    && clobber(x)
  1537    -> (MOVLstoreidx1 [i-2] {s} p idx w mem)
  1538  (MOVWstoreidx1 [i] {s} p idx (SHRQconst [j] w) x:(MOVWstoreidx1 [i-2] {s} p idx w0:(SHRQconst [j-16] w) mem))
  1539    && x.Uses == 1
  1540    && clobber(x)
  1541    -> (MOVLstoreidx1 [i-2] {s} p idx w0 mem)
  1542  (MOVLstoreidx1 [i] {s} p idx (SHRQconst [32] w) x:(MOVLstoreidx1 [i-4] {s} p idx w mem))
  1543    && x.Uses == 1
  1544    && clobber(x)
  1545    -> (MOVQstoreidx1 [i-4] {s} p idx w mem)
  1546  (MOVLstoreidx1 [i] {s} p idx (SHRQconst [j] w) x:(MOVLstoreidx1 [i-4] {s} p idx w0:(SHRQconst [j-32] w) mem))
  1547    && x.Uses == 1
  1548    && clobber(x)
  1549    -> (MOVQstoreidx1 [i-4] {s} p idx w0 mem)
  1550  
  1551  (MOVWstoreidx2 [i] {s} p idx (SHRQconst [16] w) x:(MOVWstoreidx2 [i-2] {s} p idx w mem))
  1552    && x.Uses == 1
  1553    && clobber(x)
  1554    -> (MOVLstoreidx1 [i-2] {s} p (SHLQconst <idx.Type> [1] idx) w mem)
  1555  (MOVWstoreidx2 [i] {s} p idx (SHRQconst [j] w) x:(MOVWstoreidx2 [i-2] {s} p idx w0:(SHRQconst [j-16] w) mem))
  1556    && x.Uses == 1
  1557    && clobber(x)
  1558    -> (MOVLstoreidx1 [i-2] {s} p (SHLQconst <idx.Type> [1] idx) w0 mem)
  1559  (MOVLstoreidx4 [i] {s} p idx (SHRQconst [32] w) x:(MOVLstoreidx4 [i-4] {s} p idx w mem))
  1560    && x.Uses == 1
  1561    && clobber(x)
  1562    -> (MOVQstoreidx1 [i-4] {s} p (SHLQconst <idx.Type> [2] idx) w mem)
  1563  (MOVLstoreidx4 [i] {s} p idx (SHRQconst [j] w) x:(MOVLstoreidx4 [i-4] {s} p idx w0:(SHRQconst [j-32] w) mem))
  1564    && x.Uses == 1
  1565    && clobber(x)
  1566    -> (MOVQstoreidx1 [i-4] {s} p (SHLQconst <idx.Type> [2] idx) w0 mem)