github.com/riscv/riscv-go@v0.0.0-20200123204226-124ebd6fcc8e/src/cmd/compile/internal/ssa/gen/386.rules (about)

     1  // Copyright 2016 The Go Authors. All rights reserved.
     2  // Use of this source code is governed by a BSD-style
     3  // license that can be found in the LICENSE file.
     4  
     5  // Lowering arithmetic
     6  (AddPtr x y) -> (ADDL  x y)
     7  (Add32  x y) -> (ADDL  x y)
     8  (Add16  x y) -> (ADDL  x y)
     9  (Add8   x y) -> (ADDL  x y)
    10  (Add32F x y) -> (ADDSS x y)
    11  (Add64F x y) -> (ADDSD x y)
    12  
    13  (Add32carry x y) -> (ADDLcarry x y)
    14  (Add32withcarry x y c) -> (ADCL x y c)
    15  
    16  (SubPtr x y) -> (SUBL  x y)
    17  (Sub32  x y) -> (SUBL  x y)
    18  (Sub16  x y) -> (SUBL  x y)
    19  (Sub8   x y) -> (SUBL  x y)
    20  (Sub32F x y) -> (SUBSS x y)
    21  (Sub64F x y) -> (SUBSD x y)
    22  
    23  (Sub32carry x y) -> (SUBLcarry x y)
    24  (Sub32withcarry x y c) -> (SBBL x y c)
    25  
    26  (Mul32  x y) -> (MULL  x y)
    27  (Mul16  x y) -> (MULL  x y)
    28  (Mul8   x y) -> (MULL  x y)
    29  (Mul32F x y) -> (MULSS x y)
    30  (Mul64F x y) -> (MULSD x y)
    31  
    32  (Mul32uhilo x y) -> (MULLQU x y)
    33  
    34  (Div32F x y) -> (DIVSS x y)
    35  (Div64F x y) -> (DIVSD x y)
    36  
    37  (Div32  x y) -> (DIVL  x y)
    38  (Div32u x y) -> (DIVLU x y)
    39  (Div16  x y) -> (DIVW  x y)
    40  (Div16u x y) -> (DIVWU x y)
    41  (Div8   x y) -> (DIVW  (SignExt8to16 x) (SignExt8to16 y))
    42  (Div8u  x y) -> (DIVWU (ZeroExt8to16 x) (ZeroExt8to16 y))
    43  
    44  (Hmul32  x y) -> (HMULL  x y)
    45  (Hmul32u x y) -> (HMULLU x y)
    46  (Hmul16  x y) -> (HMULW  x y)
    47  (Hmul16u x y) -> (HMULWU x y)
    48  (Hmul8   x y) -> (HMULB  x y)
    49  (Hmul8u  x y) -> (HMULBU x y)
    50  
    51  (Mod32  x y) -> (MODL  x y)
    52  (Mod32u x y) -> (MODLU x y)
    53  (Mod16  x y) -> (MODW  x y)
    54  (Mod16u x y) -> (MODWU x y)
    55  (Mod8   x y) -> (MODW  (SignExt8to16 x) (SignExt8to16 y))
    56  (Mod8u  x y) -> (MODWU (ZeroExt8to16 x) (ZeroExt8to16 y))
    57  
    58  (And32 x y) -> (ANDL x y)
    59  (And16 x y) -> (ANDL x y)
    60  (And8  x y) -> (ANDL x y)
    61  
    62  (Or32 x y) -> (ORL x y)
    63  (Or16 x y) -> (ORL x y)
    64  (Or8  x y) -> (ORL x y)
    65  
    66  (Xor32 x y) -> (XORL x y)
    67  (Xor16 x y) -> (XORL x y)
    68  (Xor8  x y) -> (XORL x y)
    69  
    70  (Neg32  x) -> (NEGL x)
    71  (Neg16  x) -> (NEGL x)
    72  (Neg8   x) -> (NEGL x)
    73  (Neg32F x) && !config.use387 -> (PXOR x (MOVSSconst <config.Frontend().TypeFloat32()> [f2i(math.Copysign(0, -1))]))
    74  (Neg64F x) && !config.use387 -> (PXOR x (MOVSDconst <config.Frontend().TypeFloat64()> [f2i(math.Copysign(0, -1))]))
    75  (Neg32F x) && config.use387 -> (FCHS x)
    76  (Neg64F x) && config.use387 -> (FCHS x)
    77  
    78  (Com32 x) -> (NOTL x)
    79  (Com16 x) -> (NOTL x)
    80  (Com8  x) -> (NOTL x)
    81  
    82  // Lowering boolean ops
    83  (AndB x y) -> (ANDL x y)
    84  (OrB x y) -> (ORL x y)
    85  (Not x) -> (XORLconst [1] x)
    86  
    87  // Lowering pointer arithmetic
    88  (OffPtr [off] ptr) -> (ADDLconst [off] ptr)
    89  
    90  (Bswap32 x) -> (BSWAPL x)
    91  
    92  (Sqrt x) -> (SQRTSD x)
    93  
    94  // Lowering extension
    95  (SignExt8to16  x) -> (MOVBLSX x)
    96  (SignExt8to32  x) -> (MOVBLSX x)
    97  (SignExt16to32 x) -> (MOVWLSX x)
    98  
    99  (ZeroExt8to16  x) -> (MOVBLZX x)
   100  (ZeroExt8to32  x) -> (MOVBLZX x)
   101  (ZeroExt16to32 x) -> (MOVWLZX x)
   102  
   103  (Signmask x) -> (SARLconst x [31])
   104  (Zeromask <t> x) -> (XORLconst [-1] (SBBLcarrymask <t> (CMPLconst x [1])))
   105  (Slicemask <t> x) -> (SARLconst (NEGL <t> x) [31])
   106  
   107  // Lowering truncation
   108  // Because we ignore high parts of registers, truncates are just copies.
   109  (Trunc16to8  x) -> x
   110  (Trunc32to8  x) -> x
   111  (Trunc32to16 x) -> x
   112  
   113  // Lowering float <-> int
   114  (Cvt32to32F x) -> (CVTSL2SS x)
   115  (Cvt32to64F x) -> (CVTSL2SD x)
   116  
   117  (Cvt32Fto32 x) -> (CVTTSS2SL x)
   118  (Cvt64Fto32 x) -> (CVTTSD2SL x)
   119  
   120  (Cvt32Fto64F x) -> (CVTSS2SD x)
   121  (Cvt64Fto32F x) -> (CVTSD2SS x)
   122  
   123  // Lowering shifts
   124  // Unsigned shifts need to return 0 if shift amount is >= width of shifted value.
   125  //   result = (arg << shift) & (shift >= argbits ? 0 : 0xffffffffffffffff)
   126  (Lsh32x32 <t> x y) -> (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPLconst y [32])))
   127  (Lsh32x16 <t> x y) -> (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPWconst y [32])))
   128  (Lsh32x8  <t> x y) -> (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPBconst y [32])))
   129  
   130  (Lsh16x32 <t> x y) -> (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPLconst y [32])))
   131  (Lsh16x16 <t> x y) -> (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPWconst y [32])))
   132  (Lsh16x8  <t> x y) -> (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPBconst y [32])))
   133  
   134  (Lsh8x32 <t> x y)  -> (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPLconst y [32])))
   135  (Lsh8x16 <t> x y)  -> (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPWconst y [32])))
   136  (Lsh8x8  <t> x y)  -> (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPBconst y [32])))
   137  
   138  (Rsh32Ux32 <t> x y) -> (ANDL (SHRL <t> x y) (SBBLcarrymask <t> (CMPLconst y [32])))
   139  (Rsh32Ux16 <t> x y) -> (ANDL (SHRL <t> x y) (SBBLcarrymask <t> (CMPWconst y [32])))
   140  (Rsh32Ux8  <t> x y) -> (ANDL (SHRL <t> x y) (SBBLcarrymask <t> (CMPBconst y [32])))
   141  
   142  (Rsh16Ux32 <t> x y) -> (ANDL (SHRW <t> x y) (SBBLcarrymask <t> (CMPLconst y [16])))
   143  (Rsh16Ux16 <t> x y) -> (ANDL (SHRW <t> x y) (SBBLcarrymask <t> (CMPWconst y [16])))
   144  (Rsh16Ux8  <t> x y) -> (ANDL (SHRW <t> x y) (SBBLcarrymask <t> (CMPBconst y [16])))
   145  
   146  (Rsh8Ux32 <t> x y)  -> (ANDL (SHRB <t> x y) (SBBLcarrymask <t> (CMPLconst y [8])))
   147  (Rsh8Ux16 <t> x y)  -> (ANDL (SHRB <t> x y) (SBBLcarrymask <t> (CMPWconst y [8])))
   148  (Rsh8Ux8  <t> x y)  -> (ANDL (SHRB <t> x y) (SBBLcarrymask <t> (CMPBconst y [8])))
   149  
   150  // Signed right shift needs to return 0/-1 if shift amount is >= width of shifted value.
   151  // We implement this by setting the shift value to -1 (all ones) if the shift value is >= width.
   152  
   153  (Rsh32x32 <t> x y) -> (SARL <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPLconst y [32])))))
   154  (Rsh32x16 <t> x y) -> (SARL <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPWconst y [32])))))
   155  (Rsh32x8  <t> x y) -> (SARL <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPBconst y [32])))))
   156  
   157  (Rsh16x32 <t> x y) -> (SARW <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPLconst y [16])))))
   158  (Rsh16x16 <t> x y) -> (SARW <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPWconst y [16])))))
   159  (Rsh16x8  <t> x y) -> (SARW <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPBconst y [16])))))
   160  
   161  (Rsh8x32 <t> x y)  -> (SARB <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPLconst y [8])))))
   162  (Rsh8x16 <t> x y)  -> (SARB <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPWconst y [8])))))
   163  (Rsh8x8  <t> x y)  -> (SARB <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPBconst y [8])))))
   164  
   165  // constant shifts
   166  // generic opt rewrites all constant shifts to shift by Const64
   167  (Lsh32x64 x (Const64 [c])) && uint64(c) < 32 -> (SHLLconst x [c])
   168  (Rsh32x64 x (Const64 [c])) && uint64(c) < 32 -> (SARLconst x [c])
   169  (Rsh32Ux64 x (Const64 [c])) && uint64(c) < 32 -> (SHRLconst x [c])
   170  (Lsh16x64 x (Const64 [c])) && uint64(c) < 16 -> (SHLLconst x [c])
   171  (Rsh16x64 x (Const64 [c])) && uint64(c) < 16 -> (SARWconst x [c])
   172  (Rsh16Ux64 x (Const64 [c])) && uint64(c) < 16 -> (SHRWconst x [c])
   173  (Lsh8x64 x (Const64 [c])) && uint64(c) < 8 -> (SHLLconst x [c])
   174  (Rsh8x64 x (Const64 [c])) && uint64(c) < 8 -> (SARBconst x [c])
   175  (Rsh8Ux64 x (Const64 [c])) && uint64(c) < 8 -> (SHRBconst x [c])
   176  
   177  // large constant shifts
   178  (Lsh32x64 _ (Const64 [c])) && uint64(c) >= 32 -> (Const32 [0])
   179  (Rsh32Ux64 _ (Const64 [c])) && uint64(c) >= 32 -> (Const32 [0])
   180  (Lsh16x64 _ (Const64 [c])) && uint64(c) >= 16 -> (Const16 [0])
   181  (Rsh16Ux64 _ (Const64 [c])) && uint64(c) >= 16 -> (Const16 [0])
   182  (Lsh8x64 _ (Const64 [c])) && uint64(c) >= 8 -> (Const8 [0])
   183  (Rsh8Ux64 _ (Const64 [c])) && uint64(c) >= 8 -> (Const8 [0])
   184  
   185  // large constant signed right shift, we leave the sign bit
   186  (Rsh32x64 x (Const64 [c])) && uint64(c) >= 32 -> (SARLconst x [31])
   187  (Rsh16x64 x (Const64 [c])) && uint64(c) >= 16 -> (SARWconst x [15])
   188  (Rsh8x64 x (Const64 [c])) && uint64(c) >= 8 -> (SARBconst x [7])
   189  
   190  // Lowering comparisons
   191  (Less32  x y) -> (SETL (CMPL x y))
   192  (Less16  x y) -> (SETL (CMPW x y))
   193  (Less8   x y) -> (SETL (CMPB x y))
   194  (Less32U x y) -> (SETB (CMPL x y))
   195  (Less16U x y) -> (SETB (CMPW x y))
   196  (Less8U  x y) -> (SETB (CMPB x y))
   197  // Use SETGF with reversed operands to dodge NaN case
   198  (Less64F x y) -> (SETGF (UCOMISD y x))
   199  (Less32F x y) -> (SETGF (UCOMISS y x))
   200  
   201  (Leq32  x y) -> (SETLE (CMPL x y))
   202  (Leq16  x y) -> (SETLE (CMPW x y))
   203  (Leq8   x y) -> (SETLE (CMPB x y))
   204  (Leq32U x y) -> (SETBE (CMPL x y))
   205  (Leq16U x y) -> (SETBE (CMPW x y))
   206  (Leq8U  x y) -> (SETBE (CMPB x y))
   207  // Use SETGEF with reversed operands to dodge NaN case
   208  (Leq64F x y) -> (SETGEF (UCOMISD y x))
   209  (Leq32F x y) -> (SETGEF (UCOMISS y x))
   210  
   211  (Greater32  x y) -> (SETG (CMPL x y))
   212  (Greater16  x y) -> (SETG (CMPW x y))
   213  (Greater8   x y) -> (SETG (CMPB x y))
   214  (Greater32U x y) -> (SETA (CMPL x y))
   215  (Greater16U x y) -> (SETA (CMPW x y))
   216  (Greater8U  x y) -> (SETA (CMPB x y))
   217  // Note Go assembler gets UCOMISx operand order wrong, but it is right here
   218  // Bug is accommodated at generation of assembly language.
   219  (Greater64F x y) -> (SETGF (UCOMISD x y))
   220  (Greater32F x y) -> (SETGF (UCOMISS x y))
   221  
   222  (Geq32  x y) -> (SETGE (CMPL x y))
   223  (Geq16  x y) -> (SETGE (CMPW x y))
   224  (Geq8   x y) -> (SETGE (CMPB x y))
   225  (Geq32U x y) -> (SETAE (CMPL x y))
   226  (Geq16U x y) -> (SETAE (CMPW x y))
   227  (Geq8U  x y) -> (SETAE (CMPB x y))
   228  // Note Go assembler gets UCOMISx operand order wrong, but it is right here
   229  // Bug is accommodated at generation of assembly language.
   230  (Geq64F x y) -> (SETGEF (UCOMISD x y))
   231  (Geq32F x y) -> (SETGEF (UCOMISS x y))
   232  
   233  (Eq32  x y) -> (SETEQ (CMPL x y))
   234  (Eq16  x y) -> (SETEQ (CMPW x y))
   235  (Eq8   x y) -> (SETEQ (CMPB x y))
   236  (EqB   x y) -> (SETEQ (CMPB x y))
   237  (EqPtr x y) -> (SETEQ (CMPL x y))
   238  (Eq64F x y) -> (SETEQF (UCOMISD x y))
   239  (Eq32F x y) -> (SETEQF (UCOMISS x y))
   240  
   241  (Neq32  x y) -> (SETNE (CMPL x y))
   242  (Neq16  x y) -> (SETNE (CMPW x y))
   243  (Neq8   x y) -> (SETNE (CMPB x y))
   244  (NeqB   x y) -> (SETNE (CMPB x y))
   245  (NeqPtr x y) -> (SETNE (CMPL x y))
   246  (Neq64F x y) -> (SETNEF (UCOMISD x y))
   247  (Neq32F x y) -> (SETNEF (UCOMISS x y))
   248  
   249  // Lowering loads
   250  (Load <t> ptr mem) && (is32BitInt(t) || isPtr(t)) -> (MOVLload ptr mem)
   251  (Load <t> ptr mem) && is16BitInt(t) -> (MOVWload ptr mem)
   252  (Load <t> ptr mem) && (t.IsBoolean() || is8BitInt(t)) -> (MOVBload ptr mem)
   253  (Load <t> ptr mem) && is32BitFloat(t) -> (MOVSSload ptr mem)
   254  (Load <t> ptr mem) && is64BitFloat(t) -> (MOVSDload ptr mem)
   255  
   256  // Lowering stores
   257  // These more-specific FP versions of Store pattern should come first.
   258  (Store [8] ptr val mem) && is64BitFloat(val.Type) -> (MOVSDstore ptr val mem)
   259  (Store [4] ptr val mem) && is32BitFloat(val.Type) -> (MOVSSstore ptr val mem)
   260  
   261  (Store [4] ptr val mem) -> (MOVLstore ptr val mem)
   262  (Store [2] ptr val mem) -> (MOVWstore ptr val mem)
   263  (Store [1] ptr val mem) -> (MOVBstore ptr val mem)
   264  
   265  // Lowering moves
   266  (Move [s] _ _ mem) && SizeAndAlign(s).Size() == 0 -> mem
   267  (Move [s] dst src mem) && SizeAndAlign(s).Size() == 1 -> (MOVBstore dst (MOVBload src mem) mem)
   268  (Move [s] dst src mem) && SizeAndAlign(s).Size() == 2 -> (MOVWstore dst (MOVWload src mem) mem)
   269  (Move [s] dst src mem) && SizeAndAlign(s).Size() == 4 -> (MOVLstore dst (MOVLload src mem) mem)
   270  (Move [s] dst src mem) && SizeAndAlign(s).Size() == 3 ->
   271  	(MOVBstore [2] dst (MOVBload [2] src mem)
   272  		(MOVWstore dst (MOVWload src mem) mem))
   273  (Move [s] dst src mem) && SizeAndAlign(s).Size() == 5 ->
   274  	(MOVBstore [4] dst (MOVBload [4] src mem)
   275  		(MOVLstore dst (MOVLload src mem) mem))
   276  (Move [s] dst src mem) && SizeAndAlign(s).Size() == 6 ->
   277  	(MOVWstore [4] dst (MOVWload [4] src mem)
   278  		(MOVLstore dst (MOVLload src mem) mem))
   279  (Move [s] dst src mem) && SizeAndAlign(s).Size() == 7 ->
   280  	(MOVLstore [3] dst (MOVLload [3] src mem)
   281  		(MOVLstore dst (MOVLload src mem) mem))
   282  (Move [s] dst src mem) && SizeAndAlign(s).Size() == 8 ->
   283  	(MOVLstore [4] dst (MOVLload [4] src mem)
   284  		(MOVLstore dst (MOVLload src mem) mem))
   285  
   286  // Adjust moves to be a multiple of 4 bytes.
   287  (Move [s] dst src mem)
   288  	&& SizeAndAlign(s).Size() > 8 && SizeAndAlign(s).Size()%4 != 0 ->
   289  	(Move [SizeAndAlign(s).Size()-SizeAndAlign(s).Size()%4]
   290  		(ADDLconst <dst.Type> dst [SizeAndAlign(s).Size()%4])
   291  		(ADDLconst <src.Type> src [SizeAndAlign(s).Size()%4])
   292  		(MOVLstore dst (MOVLload src mem) mem))
   293  
   294  // Medium copying uses a duff device.
   295  (Move [s] dst src mem)
   296  	&& SizeAndAlign(s).Size() > 8 && SizeAndAlign(s).Size() <= 4*128 && SizeAndAlign(s).Size()%4 == 0
   297  	&& !config.noDuffDevice ->
   298  	(DUFFCOPY [10*(128-SizeAndAlign(s).Size()/4)] dst src mem)
   299  // 10 and 128 are magic constants.  10 is the number of bytes to encode:
   300  //	MOVL	(SI), CX
   301  //	ADDL	$4, SI
   302  //	MOVL	CX, (DI)
   303  //	ADDL	$4, DI
   304  // and 128 is the number of such blocks. See src/runtime/duff_386.s:duffcopy.
   305  
   306  // Large copying uses REP MOVSL.
   307  (Move [s] dst src mem) && (SizeAndAlign(s).Size() > 4*128 || config.noDuffDevice) && SizeAndAlign(s).Size()%4 == 0 ->
   308  	(REPMOVSL dst src (MOVLconst [SizeAndAlign(s).Size()/4]) mem)
   309  
   310  // Lowering Zero instructions
   311  (Zero [s] _ mem) && SizeAndAlign(s).Size() == 0 -> mem
   312  (Zero [s] destptr mem) && SizeAndAlign(s).Size() == 1 -> (MOVBstoreconst [0] destptr mem)
   313  (Zero [s] destptr mem) && SizeAndAlign(s).Size() == 2 -> (MOVWstoreconst [0] destptr mem)
   314  (Zero [s] destptr mem) && SizeAndAlign(s).Size() == 4 -> (MOVLstoreconst [0] destptr mem)
   315  
   316  (Zero [s] destptr mem) && SizeAndAlign(s).Size() == 3 ->
   317  	(MOVBstoreconst [makeValAndOff(0,2)] destptr
   318  		(MOVWstoreconst [0] destptr mem))
   319  (Zero [s] destptr mem) && SizeAndAlign(s).Size() == 5 ->
   320  	(MOVBstoreconst [makeValAndOff(0,4)] destptr
   321  		(MOVLstoreconst [0] destptr mem))
   322  (Zero [s] destptr mem) && SizeAndAlign(s).Size() == 6 ->
   323  	(MOVWstoreconst [makeValAndOff(0,4)] destptr
   324  		(MOVLstoreconst [0] destptr mem))
   325  (Zero [s] destptr mem) && SizeAndAlign(s).Size() == 7 ->
   326  	(MOVLstoreconst [makeValAndOff(0,3)] destptr
   327  		(MOVLstoreconst [0] destptr mem))
   328  
   329  // Strip off any fractional word zeroing.
   330  (Zero [s] destptr mem) && SizeAndAlign(s).Size()%4 != 0 && SizeAndAlign(s).Size() > 4 ->
   331  	(Zero [SizeAndAlign(s).Size()-SizeAndAlign(s).Size()%4] (ADDLconst destptr [SizeAndAlign(s).Size()%4])
   332  		(MOVLstoreconst [0] destptr mem))
   333  
   334  // Zero small numbers of words directly.
   335  (Zero [s] destptr mem) && SizeAndAlign(s).Size() == 8 ->
   336  	(MOVLstoreconst [makeValAndOff(0,4)] destptr
   337  		(MOVLstoreconst [0] destptr mem))
   338  (Zero [s] destptr mem) && SizeAndAlign(s).Size() == 12 ->
   339  	(MOVLstoreconst [makeValAndOff(0,8)] destptr
   340  		(MOVLstoreconst [makeValAndOff(0,4)] destptr
   341  			(MOVLstoreconst [0] destptr mem)))
   342  (Zero [s] destptr mem) && SizeAndAlign(s).Size() == 16 ->
   343  	(MOVLstoreconst [makeValAndOff(0,12)] destptr
   344  		(MOVLstoreconst [makeValAndOff(0,8)] destptr
   345  			(MOVLstoreconst [makeValAndOff(0,4)] destptr
   346  				(MOVLstoreconst [0] destptr mem))))
   347  
   348  // Medium zeroing uses a duff device.
   349  (Zero [s] destptr mem)
   350    && SizeAndAlign(s).Size() > 16
   351    && SizeAndAlign(s).Size() <= 4*128
   352    && SizeAndAlign(s).Size()%4 == 0
   353    && !config.noDuffDevice ->
   354  	(DUFFZERO [1*(128-SizeAndAlign(s).Size()/4)] destptr (MOVLconst [0]) mem)
   355  // 1 and 128 are magic constants.  1 is the number of bytes to encode STOSL.
   356  // 128 is the number of STOSL instructions in duffzero.
   357  // See src/runtime/duff_386.s:duffzero.
   358  
   359  // Large zeroing uses REP STOSQ.
   360  (Zero [s] destptr mem)
   361    && (SizeAndAlign(s).Size() > 4*128 || (config.noDuffDevice && SizeAndAlign(s).Size() > 16))
   362    && SizeAndAlign(s).Size()%4 == 0 ->
   363  	(REPSTOSL destptr (MOVLconst [SizeAndAlign(s).Size()/4]) (MOVLconst [0]) mem)
   364  
   365  // Lowering constants
   366  (Const8   [val]) -> (MOVLconst [val])
   367  (Const16  [val]) -> (MOVLconst [val])
   368  (Const32  [val]) -> (MOVLconst [val])
   369  (Const32F [val]) -> (MOVSSconst [val])
   370  (Const64F [val]) -> (MOVSDconst [val])
   371  (ConstNil) -> (MOVLconst [0])
   372  (ConstBool [b]) -> (MOVLconst [b])
   373  
   374  // Lowering calls
   375  (StaticCall [argwid] {target} mem) -> (CALLstatic [argwid] {target} mem)
   376  (ClosureCall [argwid] entry closure mem) -> (CALLclosure [argwid] entry closure mem)
   377  (DeferCall [argwid] mem) -> (CALLdefer [argwid] mem)
   378  (GoCall [argwid] mem) -> (CALLgo [argwid] mem)
   379  (InterCall [argwid] entry mem) -> (CALLinter [argwid] entry mem)
   380  
   381  // Miscellaneous
   382  (Convert <t> x mem) -> (MOVLconvert <t> x mem)
   383  (IsNonNil p) -> (SETNE (TESTL p p))
   384  (IsInBounds idx len) -> (SETB (CMPL idx len))
   385  (IsSliceInBounds idx len) -> (SETBE (CMPL idx len))
   386  (NilCheck ptr mem) -> (LoweredNilCheck ptr mem)
   387  (GetG mem) -> (LoweredGetG mem)
   388  (GetClosurePtr) -> (LoweredGetClosurePtr)
   389  (Addr {sym} base) -> (LEAL {sym} base)
   390  
   391  // block rewrites
   392  (If (SETL  cmp) yes no) -> (LT  cmp yes no)
   393  (If (SETLE cmp) yes no) -> (LE  cmp yes no)
   394  (If (SETG  cmp) yes no) -> (GT  cmp yes no)
   395  (If (SETGE cmp) yes no) -> (GE  cmp yes no)
   396  (If (SETEQ cmp) yes no) -> (EQ  cmp yes no)
   397  (If (SETNE cmp) yes no) -> (NE  cmp yes no)
   398  (If (SETB  cmp) yes no) -> (ULT cmp yes no)
   399  (If (SETBE cmp) yes no) -> (ULE cmp yes no)
   400  (If (SETA  cmp) yes no) -> (UGT cmp yes no)
   401  (If (SETAE cmp) yes no) -> (UGE cmp yes no)
   402  
   403  // Special case for floating point - LF/LEF not generated
   404  (If (SETGF  cmp) yes no) -> (UGT  cmp yes no)
   405  (If (SETGEF cmp) yes no) -> (UGE  cmp yes no)
   406  (If (SETEQF cmp) yes no) -> (EQF  cmp yes no)
   407  (If (SETNEF cmp) yes no) -> (NEF  cmp yes no)
   408  
   409  (If cond yes no) -> (NE (TESTB cond cond) yes no)
   410  
   411  // ***************************
   412  // Above: lowering rules
   413  // Below: optimizations
   414  // ***************************
   415  // TODO: Should the optimizations be a separate pass?
   416  
   417  // Fold boolean tests into blocks
   418  (NE (TESTB (SETL  cmp) (SETL  cmp)) yes no) -> (LT  cmp yes no)
   419  (NE (TESTB (SETLE cmp) (SETLE cmp)) yes no) -> (LE  cmp yes no)
   420  (NE (TESTB (SETG  cmp) (SETG  cmp)) yes no) -> (GT  cmp yes no)
   421  (NE (TESTB (SETGE cmp) (SETGE cmp)) yes no) -> (GE  cmp yes no)
   422  (NE (TESTB (SETEQ cmp) (SETEQ cmp)) yes no) -> (EQ  cmp yes no)
   423  (NE (TESTB (SETNE cmp) (SETNE cmp)) yes no) -> (NE  cmp yes no)
   424  (NE (TESTB (SETB  cmp) (SETB  cmp)) yes no) -> (ULT cmp yes no)
   425  (NE (TESTB (SETBE cmp) (SETBE cmp)) yes no) -> (ULE cmp yes no)
   426  (NE (TESTB (SETA  cmp) (SETA  cmp)) yes no) -> (UGT cmp yes no)
   427  (NE (TESTB (SETAE cmp) (SETAE cmp)) yes no) -> (UGE cmp yes no)
   428  
   429  // Special case for floating point - LF/LEF not generated
   430  (NE (TESTB (SETGF  cmp) (SETGF  cmp)) yes no) -> (UGT  cmp yes no)
   431  (NE (TESTB (SETGEF cmp) (SETGEF cmp)) yes no) -> (UGE  cmp yes no)
   432  (NE (TESTB (SETEQF cmp) (SETEQF cmp)) yes no) -> (EQF  cmp yes no)
   433  (NE (TESTB (SETNEF cmp) (SETNEF cmp)) yes no) -> (NEF  cmp yes no)
   434  
   435  // fold constants into instructions
   436  (ADDL x (MOVLconst [c])) -> (ADDLconst [c] x)
   437  (ADDL (MOVLconst [c]) x) -> (ADDLconst [c] x)
   438  (ADDLcarry x (MOVLconst [c])) -> (ADDLconstcarry [c] x)
   439  (ADDLcarry (MOVLconst [c]) x) -> (ADDLconstcarry [c] x)
   440  (ADCL x (MOVLconst [c]) f) -> (ADCLconst [c] x f)
   441  (ADCL (MOVLconst [c]) x f) -> (ADCLconst [c] x f)
   442  
   443  (SUBL x (MOVLconst [c])) -> (SUBLconst x [c])
   444  (SUBL (MOVLconst [c]) x) -> (NEGL (SUBLconst <v.Type> x [c]))
   445  (SUBLcarry x (MOVLconst [c])) -> (SUBLconstcarry [c] x)
   446  (SBBL x (MOVLconst [c]) f) -> (SBBLconst [c] x f)
   447  
   448  (MULL x (MOVLconst [c])) -> (MULLconst [c] x)
   449  (MULL (MOVLconst [c]) x) -> (MULLconst [c] x)
   450  
   451  (ANDL x (MOVLconst [c])) -> (ANDLconst [c] x)
   452  (ANDL (MOVLconst [c]) x) -> (ANDLconst [c] x)
   453  
   454  (ANDLconst [c] (ANDLconst [d] x)) -> (ANDLconst [c & d] x)
   455  
   456  (XORLconst [c] (XORLconst [d] x)) -> (XORLconst [c ^ d] x)
   457  
   458  (MULLconst [c] (MULLconst [d] x)) -> (MULLconst [int64(int32(c * d))] x)
   459  
   460  (ORL x (MOVLconst [c])) -> (ORLconst [c] x)
   461  (ORL (MOVLconst [c]) x) -> (ORLconst [c] x)
   462  
   463  (XORL x (MOVLconst [c])) -> (XORLconst [c] x)
   464  (XORL (MOVLconst [c]) x) -> (XORLconst [c] x)
   465  
   466  (SHLL x (MOVLconst [c])) -> (SHLLconst [c&31] x)
   467  (SHRL x (MOVLconst [c])) -> (SHRLconst [c&31] x)
   468  (SHRW x (MOVLconst [c])) && c&31 < 16 -> (SHRWconst [c&31] x)
   469  (SHRW _ (MOVLconst [c])) && c&31 >= 16 -> (MOVLconst [0])
   470  (SHRB x (MOVLconst [c])) && c&31 < 8 -> (SHRBconst [c&31] x)
   471  (SHRB _ (MOVLconst [c])) && c&31 >= 8 -> (MOVLconst [0])
   472  
   473  (SARL x (MOVLconst [c])) -> (SARLconst [c&31] x)
   474  (SARW x (MOVLconst [c])) -> (SARWconst [min(c&31,15)] x)
   475  (SARB x (MOVLconst [c])) -> (SARBconst [min(c&31,7)] x)
   476  
   477  (SARL x (ANDLconst [31] y)) -> (SARL x y)
   478  
   479  (SHLL x (ANDLconst [31] y)) -> (SHLL x y)
   480  
   481  (SHRL x (ANDLconst [31] y)) -> (SHRL x y)
   482  
   483  // Rotate instructions
   484  
   485  (ADDL (SHLLconst [c] x) (SHRLconst [32-c] x)) -> (ROLLconst [c   ] x)
   486  ( ORL (SHLLconst [c] x) (SHRLconst [32-c] x)) -> (ROLLconst [c   ] x)
   487  (XORL (SHLLconst [c] x) (SHRLconst [32-c] x)) -> (ROLLconst [c   ] x)
   488  (ADDL (SHRLconst [c] x) (SHLLconst [32-c] x)) -> (ROLLconst [32-c] x)
   489  ( ORL (SHRLconst [c] x) (SHLLconst [32-c] x)) -> (ROLLconst [32-c] x)
   490  (XORL (SHRLconst [c] x) (SHLLconst [32-c] x)) -> (ROLLconst [32-c] x)
   491  
   492  (ADDL <t> (SHLLconst x [c]) (SHRWconst x [16-c])) && c < 16 && t.Size() == 2 -> (ROLWconst x [   c])
   493  ( ORL <t> (SHLLconst x [c]) (SHRWconst x [16-c])) && c < 16 && t.Size() == 2 -> (ROLWconst x [   c])
   494  (XORL <t> (SHLLconst x [c]) (SHRWconst x [16-c])) && c < 16 && t.Size() == 2 -> (ROLWconst x [   c])
   495  (ADDL <t> (SHRWconst x [c]) (SHLLconst x [16-c])) && c > 0  && t.Size() == 2 -> (ROLWconst x [16-c])
   496  ( ORL <t> (SHRWconst x [c]) (SHLLconst x [16-c])) && c > 0  && t.Size() == 2 -> (ROLWconst x [16-c])
   497  (XORL <t> (SHRWconst x [c]) (SHLLconst x [16-c])) && c > 0  && t.Size() == 2 -> (ROLWconst x [16-c])
   498  
   499  (ADDL <t> (SHLLconst x [c]) (SHRBconst x [ 8-c])) && c < 8 && t.Size() == 1 -> (ROLBconst x [   c])
   500  ( ORL <t> (SHLLconst x [c]) (SHRBconst x [ 8-c])) && c < 8 && t.Size() == 1 -> (ROLBconst x [   c])
   501  (XORL <t> (SHLLconst x [c]) (SHRBconst x [ 8-c])) && c < 8 && t.Size() == 1 -> (ROLBconst x [   c])
   502  (ADDL <t> (SHRBconst x [c]) (SHLLconst x [ 8-c])) && c > 0 && t.Size() == 1 -> (ROLBconst x [ 8-c])
   503  ( ORL <t> (SHRBconst x [c]) (SHLLconst x [ 8-c])) && c > 0 && t.Size() == 1 -> (ROLBconst x [ 8-c])
   504  (XORL <t> (SHRBconst x [c]) (SHLLconst x [ 8-c])) && c > 0 && t.Size() == 1 -> (ROLBconst x [ 8-c])
   505  
   506  (ROLLconst [c] (ROLLconst [d] x)) -> (ROLLconst [(c+d)&31] x)
   507  (ROLWconst [c] (ROLWconst [d] x)) -> (ROLWconst [(c+d)&15] x)
   508  (ROLBconst [c] (ROLBconst [d] x)) -> (ROLBconst [(c+d)& 7] x)
   509  
   510  // Constant shift simplifications
   511  
   512  (SHLLconst x [0]) -> x
   513  (SHRLconst x [0]) -> x
   514  (SARLconst x [0]) -> x
   515  
   516  (SHRWconst x [0]) -> x
   517  (SARWconst x [0]) -> x
   518  
   519  (SHRBconst x [0]) -> x
   520  (SARBconst x [0]) -> x
   521  
   522  (ROLLconst [0] x) -> x
   523  (ROLWconst [0] x) -> x
   524  (ROLBconst [0] x) -> x
   525  
   526  // Note: the word and byte shifts keep the low 5 bits (not the low 4 or 3 bits)
   527  // because the x86 instructions are defined to use all 5 bits of the shift even
   528  // for the small shifts. I don't think we'll ever generate a weird shift (e.g.
   529  // (SHRW x (MOVLconst [24])), but just in case.
   530  
   531  (CMPL x (MOVLconst [c])) -> (CMPLconst x [c])
   532  (CMPL (MOVLconst [c]) x) -> (InvertFlags (CMPLconst x [c]))
   533  (CMPW x (MOVLconst [c])) -> (CMPWconst x [int64(int16(c))])
   534  (CMPW (MOVLconst [c]) x) -> (InvertFlags (CMPWconst x [int64(int16(c))]))
   535  (CMPB x (MOVLconst [c])) -> (CMPBconst x [int64(int8(c))])
   536  (CMPB (MOVLconst [c]) x) -> (InvertFlags (CMPBconst x [int64(int8(c))]))
   537  
   538  // strength reduction
   539  // Assumes that the following costs from https://gmplib.org/~tege/x86-timing.pdf:
   540  //    1 - addq, shlq, leaq, negq
   541  //    3 - imulq
   542  // This limits the rewrites to two instructions.
   543  // TODO: 27, 81
   544  (MULLconst [-1] x) -> (NEGL x)
   545  (MULLconst [0] _) -> (MOVLconst [0])
   546  (MULLconst [1] x) -> x
   547  (MULLconst [3] x) -> (LEAL2 x x)
   548  (MULLconst [5] x) -> (LEAL4 x x)
   549  (MULLconst [7] x) -> (LEAL8 (NEGL <v.Type> x) x)
   550  (MULLconst [9] x) -> (LEAL8 x x)
   551  (MULLconst [11] x) -> (LEAL2 x (LEAL4 <v.Type> x x))
   552  (MULLconst [13] x) -> (LEAL4 x (LEAL2 <v.Type> x x))
   553  (MULLconst [21] x) -> (LEAL4 x (LEAL4 <v.Type> x x))
   554  (MULLconst [25] x) -> (LEAL8 x (LEAL2 <v.Type> x x))
   555  (MULLconst [37] x) -> (LEAL4 x (LEAL8 <v.Type> x x))
   556  (MULLconst [41] x) -> (LEAL8 x (LEAL4 <v.Type> x x))
   557  (MULLconst [73] x) -> (LEAL8 x (LEAL8 <v.Type> x x))
   558  
   559  (MULLconst [c] x) && isPowerOfTwo(c) -> (SHLLconst [log2(c)] x)
   560  (MULLconst [c] x) && isPowerOfTwo(c+1) && c >= 15 -> (SUBL (SHLLconst <v.Type> [log2(c+1)] x) x)
   561  (MULLconst [c] x) && isPowerOfTwo(c-1) && c >= 17 -> (LEAL1 (SHLLconst <v.Type> [log2(c-1)] x) x)
   562  (MULLconst [c] x) && isPowerOfTwo(c-2) && c >= 34 -> (LEAL2 (SHLLconst <v.Type> [log2(c-2)] x) x)
   563  (MULLconst [c] x) && isPowerOfTwo(c-4) && c >= 68 -> (LEAL4 (SHLLconst <v.Type> [log2(c-4)] x) x)
   564  (MULLconst [c] x) && isPowerOfTwo(c-8) && c >= 136 -> (LEAL8 (SHLLconst <v.Type> [log2(c-8)] x) x)
   565  (MULLconst [c] x) && c%3 == 0 && isPowerOfTwo(c/3)-> (SHLLconst [log2(c/3)] (LEAL2 <v.Type> x x))
   566  (MULLconst [c] x) && c%5 == 0 && isPowerOfTwo(c/5)-> (SHLLconst [log2(c/5)] (LEAL4 <v.Type> x x))
   567  (MULLconst [c] x) && c%9 == 0 && isPowerOfTwo(c/9)-> (SHLLconst [log2(c/9)] (LEAL8 <v.Type> x x))
   568  
   569  // combine add/shift into LEAL
   570  (ADDL x (SHLLconst [3] y)) -> (LEAL8 x y)
   571  (ADDL x (SHLLconst [2] y)) -> (LEAL4 x y)
   572  (ADDL x (SHLLconst [1] y)) -> (LEAL2 x y)
   573  (ADDL x (ADDL y y)) -> (LEAL2 x y)
   574  (ADDL x (ADDL x y)) -> (LEAL2 y x)
   575  (ADDL x (ADDL y x)) -> (LEAL2 y x)
   576  
   577  // combine ADDL/ADDLconst into LEAL1
   578  (ADDLconst [c] (ADDL x y)) -> (LEAL1 [c] x y)
   579  (ADDL (ADDLconst [c] x) y) -> (LEAL1 [c] x y)
   580  (ADDL x (ADDLconst [c] y)) -> (LEAL1 [c] x y)
   581  
   582  // fold ADDL into LEAL
   583  (ADDLconst [c] (LEAL [d] {s} x)) && is32Bit(c+d) -> (LEAL [c+d] {s} x)
   584  (LEAL [c] {s} (ADDLconst [d] x)) && is32Bit(c+d) -> (LEAL [c+d] {s} x)
   585  (LEAL [c] {s} (ADDL x y)) && x.Op != OpSB && y.Op != OpSB -> (LEAL1 [c] {s} x y)
   586  (ADDL x (LEAL [c] {s} y)) && x.Op != OpSB && y.Op != OpSB -> (LEAL1 [c] {s} x y)
   587  (ADDL (LEAL [c] {s} x) y) && x.Op != OpSB && y.Op != OpSB -> (LEAL1 [c] {s} x y)
   588  
   589  // fold ADDLconst into LEALx
   590  (ADDLconst [c] (LEAL1 [d] {s} x y)) && is32Bit(c+d) -> (LEAL1 [c+d] {s} x y)
   591  (ADDLconst [c] (LEAL2 [d] {s} x y)) && is32Bit(c+d) -> (LEAL2 [c+d] {s} x y)
   592  (ADDLconst [c] (LEAL4 [d] {s} x y)) && is32Bit(c+d) -> (LEAL4 [c+d] {s} x y)
   593  (ADDLconst [c] (LEAL8 [d] {s} x y)) && is32Bit(c+d) -> (LEAL8 [c+d] {s} x y)
   594  (LEAL1 [c] {s} (ADDLconst [d] x) y) && is32Bit(c+d)   && x.Op != OpSB -> (LEAL1 [c+d] {s} x y)
   595  (LEAL1 [c] {s} x (ADDLconst [d] y)) && is32Bit(c+d)   && y.Op != OpSB -> (LEAL1 [c+d] {s} x y)
   596  (LEAL2 [c] {s} (ADDLconst [d] x) y) && is32Bit(c+d)   && x.Op != OpSB -> (LEAL2 [c+d] {s} x y)
   597  (LEAL2 [c] {s} x (ADDLconst [d] y)) && is32Bit(c+2*d) && y.Op != OpSB -> (LEAL2 [c+2*d] {s} x y)
   598  (LEAL4 [c] {s} (ADDLconst [d] x) y) && is32Bit(c+d)   && x.Op != OpSB -> (LEAL4 [c+d] {s} x y)
   599  (LEAL4 [c] {s} x (ADDLconst [d] y)) && is32Bit(c+4*d) && y.Op != OpSB -> (LEAL4 [c+4*d] {s} x y)
   600  (LEAL8 [c] {s} (ADDLconst [d] x) y) && is32Bit(c+d)   && x.Op != OpSB -> (LEAL8 [c+d] {s} x y)
   601  (LEAL8 [c] {s} x (ADDLconst [d] y)) && is32Bit(c+8*d) && y.Op != OpSB -> (LEAL8 [c+8*d] {s} x y)
   602  
   603  // fold shifts into LEALx
   604  (LEAL1 [c] {s} x (SHLLconst [1] y)) -> (LEAL2 [c] {s} x y)
   605  (LEAL1 [c] {s} (SHLLconst [1] x) y) -> (LEAL2 [c] {s} y x)
   606  (LEAL1 [c] {s} x (SHLLconst [2] y)) -> (LEAL4 [c] {s} x y)
   607  (LEAL1 [c] {s} (SHLLconst [2] x) y) -> (LEAL4 [c] {s} y x)
   608  (LEAL1 [c] {s} x (SHLLconst [3] y)) -> (LEAL8 [c] {s} x y)
   609  (LEAL1 [c] {s} (SHLLconst [3] x) y) -> (LEAL8 [c] {s} y x)
   610  
   611  (LEAL2 [c] {s} x (SHLLconst [1] y)) -> (LEAL4 [c] {s} x y)
   612  (LEAL2 [c] {s} x (SHLLconst [2] y)) -> (LEAL8 [c] {s} x y)
   613  (LEAL4 [c] {s} x (SHLLconst [1] y)) -> (LEAL8 [c] {s} x y)
   614  
   615  // reverse ordering of compare instruction
   616  (SETL (InvertFlags x)) -> (SETG x)
   617  (SETG (InvertFlags x)) -> (SETL x)
   618  (SETB (InvertFlags x)) -> (SETA x)
   619  (SETA (InvertFlags x)) -> (SETB x)
   620  (SETLE (InvertFlags x)) -> (SETGE x)
   621  (SETGE (InvertFlags x)) -> (SETLE x)
   622  (SETBE (InvertFlags x)) -> (SETAE x)
   623  (SETAE (InvertFlags x)) -> (SETBE x)
   624  (SETEQ (InvertFlags x)) -> (SETEQ x)
   625  (SETNE (InvertFlags x)) -> (SETNE x)
   626  
   627  // sign extended loads
   628  // Note: The combined instruction must end up in the same block
   629  // as the original load. If not, we end up making a value with
   630  // memory type live in two different blocks, which can lead to
   631  // multiple memory values alive simultaneously.
   632  // Make sure we don't combine these ops if the load has another use.
   633  // This prevents a single load from being split into multiple loads
   634  // which then might return different values.  See test/atomicload.go.
   635  (MOVBLSX x:(MOVBload [off] {sym} ptr mem)) && x.Uses == 1 && clobber(x) -> @x.Block (MOVBLSXload <v.Type> [off] {sym} ptr mem)
   636  (MOVBLZX x:(MOVBload [off] {sym} ptr mem)) && x.Uses == 1 && clobber(x) -> @x.Block (MOVBload <v.Type> [off] {sym} ptr mem)
   637  (MOVWLSX x:(MOVWload [off] {sym} ptr mem)) && x.Uses == 1 && clobber(x) -> @x.Block (MOVWLSXload <v.Type> [off] {sym} ptr mem)
   638  (MOVWLZX x:(MOVWload [off] {sym} ptr mem)) && x.Uses == 1 && clobber(x) -> @x.Block (MOVWload <v.Type> [off] {sym} ptr mem)
   639  
   640  (MOVBLZX x:(MOVBloadidx1 [off] {sym} ptr idx mem)) && x.Uses == 1 && clobber(x) -> @x.Block (MOVBloadidx1 <v.Type> [off] {sym} ptr idx mem)
   641  (MOVWLZX x:(MOVWloadidx1 [off] {sym} ptr idx mem)) && x.Uses == 1 && clobber(x) -> @x.Block (MOVWloadidx1 <v.Type> [off] {sym} ptr idx mem)
   642  (MOVWLZX x:(MOVWloadidx2 [off] {sym} ptr idx mem)) && x.Uses == 1 && clobber(x) -> @x.Block (MOVWloadidx2 <v.Type> [off] {sym} ptr idx mem)
   643  
   644  // replace load from same location as preceding store with copy
   645  (MOVBload [off] {sym} ptr (MOVBstore [off2] {sym2} ptr2 x _)) && sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) -> x
   646  (MOVWload [off] {sym} ptr (MOVWstore [off2] {sym2} ptr2 x _)) && sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) -> x
   647  (MOVLload [off] {sym} ptr (MOVLstore [off2] {sym2} ptr2 x _)) && sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) -> x
   648  
   649  // Fold extensions and ANDs together.
   650  (MOVBLZX (ANDLconst [c] x)) -> (ANDLconst [c & 0xff] x)
   651  (MOVWLZX (ANDLconst [c] x)) -> (ANDLconst [c & 0xffff] x)
   652  (MOVBLSX (ANDLconst [c] x)) && c & 0x80 == 0 -> (ANDLconst [c & 0x7f] x)
   653  (MOVWLSX (ANDLconst [c] x)) && c & 0x8000 == 0 -> (ANDLconst [c & 0x7fff] x)
   654  
   655  // Don't extend before storing
   656  (MOVWstore [off] {sym} ptr (MOVWLSX x) mem) -> (MOVWstore [off] {sym} ptr x mem)
   657  (MOVBstore [off] {sym} ptr (MOVBLSX x) mem) -> (MOVBstore [off] {sym} ptr x mem)
   658  (MOVWstore [off] {sym} ptr (MOVWLZX x) mem) -> (MOVWstore [off] {sym} ptr x mem)
   659  (MOVBstore [off] {sym} ptr (MOVBLZX x) mem) -> (MOVBstore [off] {sym} ptr x mem)
   660  
   661  // fold constants into memory operations
   662  // Note that this is not always a good idea because if not all the uses of
   663  // the ADDQconst get eliminated, we still have to compute the ADDQconst and we now
   664  // have potentially two live values (ptr and (ADDQconst [off] ptr)) instead of one.
   665  // Nevertheless, let's do it!
   666  (MOVLload  [off1] {sym} (ADDLconst [off2] ptr) mem) && is32Bit(off1+off2) -> (MOVLload  [off1+off2] {sym} ptr mem)
   667  (MOVWload  [off1] {sym} (ADDLconst [off2] ptr) mem) && is32Bit(off1+off2) -> (MOVWload  [off1+off2] {sym} ptr mem)
   668  (MOVBload  [off1] {sym} (ADDLconst [off2] ptr) mem) && is32Bit(off1+off2) -> (MOVBload  [off1+off2] {sym} ptr mem)
   669  (MOVSSload [off1] {sym} (ADDLconst [off2] ptr) mem) && is32Bit(off1+off2) -> (MOVSSload [off1+off2] {sym} ptr mem)
   670  (MOVSDload [off1] {sym} (ADDLconst [off2] ptr) mem) && is32Bit(off1+off2) -> (MOVSDload [off1+off2] {sym} ptr mem)
   671  
   672  (MOVLstore  [off1] {sym} (ADDLconst [off2] ptr) val mem) && is32Bit(off1+off2) -> (MOVLstore  [off1+off2] {sym} ptr val mem)
   673  (MOVWstore  [off1] {sym} (ADDLconst [off2] ptr) val mem) && is32Bit(off1+off2) -> (MOVWstore  [off1+off2] {sym} ptr val mem)
   674  (MOVBstore  [off1] {sym} (ADDLconst [off2] ptr) val mem) && is32Bit(off1+off2) -> (MOVBstore  [off1+off2] {sym} ptr val mem)
   675  (MOVSSstore [off1] {sym} (ADDLconst [off2] ptr) val mem) && is32Bit(off1+off2) -> (MOVSSstore [off1+off2] {sym} ptr val mem)
   676  (MOVSDstore [off1] {sym} (ADDLconst [off2] ptr) val mem) && is32Bit(off1+off2) -> (MOVSDstore [off1+off2] {sym} ptr val mem)
   677  
   678  // Fold constants into stores.
   679  (MOVLstore [off] {sym} ptr (MOVLconst [c]) mem) && validOff(off) ->
   680  	(MOVLstoreconst [makeValAndOff(int64(int32(c)),off)] {sym} ptr mem)
   681  (MOVWstore [off] {sym} ptr (MOVLconst [c]) mem) && validOff(off) ->
   682  	(MOVWstoreconst [makeValAndOff(int64(int16(c)),off)] {sym} ptr mem)
   683  (MOVBstore [off] {sym} ptr (MOVLconst [c]) mem) && validOff(off) ->
   684  	(MOVBstoreconst [makeValAndOff(int64(int8(c)),off)] {sym} ptr mem)
   685  
   686  // Fold address offsets into constant stores.
   687  (MOVLstoreconst [sc] {s} (ADDLconst [off] ptr) mem) && ValAndOff(sc).canAdd(off) ->
   688  	(MOVLstoreconst [ValAndOff(sc).add(off)] {s} ptr mem)
   689  (MOVWstoreconst [sc] {s} (ADDLconst [off] ptr) mem) && ValAndOff(sc).canAdd(off) ->
   690  	(MOVWstoreconst [ValAndOff(sc).add(off)] {s} ptr mem)
   691  (MOVBstoreconst [sc] {s} (ADDLconst [off] ptr) mem) && ValAndOff(sc).canAdd(off) ->
   692  	(MOVBstoreconst [ValAndOff(sc).add(off)] {s} ptr mem)
   693  
   694  // We need to fold LEAQ into the MOVx ops so that the live variable analysis knows
   695  // what variables are being read/written by the ops.
   696  // Note: we turn off this merging for operations on globals when building
   697  // position-independent code (when Flag_shared is set).
   698  // PIC needs a spare register to load the PC into.  Having the LEAL be
   699  // a separate instruction gives us that register.  Having the LEAL be
   700  // a separate instruction also allows it to be CSEd (which is good because
   701  // it compiles to a thunk call).
   702  (MOVLload  [off1] {sym1} (LEAL [off2] {sym2} base) mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2)
   703    && (base.Op != OpSB || !config.ctxt.Flag_shared) ->
   704  	(MOVLload  [off1+off2] {mergeSym(sym1,sym2)} base mem)
   705  (MOVWload  [off1] {sym1} (LEAL [off2] {sym2} base) mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2)
   706    && (base.Op != OpSB || !config.ctxt.Flag_shared) ->
   707  	(MOVWload  [off1+off2] {mergeSym(sym1,sym2)} base mem)
   708  (MOVBload  [off1] {sym1} (LEAL [off2] {sym2} base) mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2)
   709    && (base.Op != OpSB || !config.ctxt.Flag_shared) ->
   710  	(MOVBload  [off1+off2] {mergeSym(sym1,sym2)} base mem)
   711  (MOVSSload [off1] {sym1} (LEAL [off2] {sym2} base) mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2)
   712    && (base.Op != OpSB || !config.ctxt.Flag_shared) ->
   713  	(MOVSSload [off1+off2] {mergeSym(sym1,sym2)} base mem)
   714  (MOVSDload [off1] {sym1} (LEAL [off2] {sym2} base) mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2)
   715    && (base.Op != OpSB || !config.ctxt.Flag_shared) ->
   716  	(MOVSDload [off1+off2] {mergeSym(sym1,sym2)} base mem)
   717  
   718  (MOVBLSXload [off1] {sym1} (LEAL [off2] {sym2} base) mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2)
   719    && (base.Op != OpSB || !config.ctxt.Flag_shared) ->
   720  	(MOVBLSXload [off1+off2] {mergeSym(sym1,sym2)} base mem)
   721  (MOVWLSXload [off1] {sym1} (LEAL [off2] {sym2} base) mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2)
   722    && (base.Op != OpSB || !config.ctxt.Flag_shared) ->
   723  	(MOVWLSXload [off1+off2] {mergeSym(sym1,sym2)} base mem)
   724  
   725  (MOVLstore  [off1] {sym1} (LEAL [off2] {sym2} base) val mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2)
   726    && (base.Op != OpSB || !config.ctxt.Flag_shared) ->
   727  	(MOVLstore  [off1+off2] {mergeSym(sym1,sym2)} base val mem)
   728  (MOVWstore  [off1] {sym1} (LEAL [off2] {sym2} base) val mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2)
   729    && (base.Op != OpSB || !config.ctxt.Flag_shared) ->
   730  	(MOVWstore  [off1+off2] {mergeSym(sym1,sym2)} base val mem)
   731  (MOVBstore  [off1] {sym1} (LEAL [off2] {sym2} base) val mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2)
   732    && (base.Op != OpSB || !config.ctxt.Flag_shared) ->
   733  	(MOVBstore  [off1+off2] {mergeSym(sym1,sym2)} base val mem)
   734  (MOVSSstore [off1] {sym1} (LEAL [off2] {sym2} base) val mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2)
   735    && (base.Op != OpSB || !config.ctxt.Flag_shared) ->
   736  	(MOVSSstore [off1+off2] {mergeSym(sym1,sym2)} base val mem)
   737  (MOVSDstore [off1] {sym1} (LEAL [off2] {sym2} base) val mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2)
   738    && (base.Op != OpSB || !config.ctxt.Flag_shared) ->
   739  	(MOVSDstore [off1+off2] {mergeSym(sym1,sym2)} base val mem)
   740  
   741  (MOVLstoreconst [sc] {sym1} (LEAL [off] {sym2} ptr) mem) && canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off)
   742    && (ptr.Op != OpSB || !config.ctxt.Flag_shared) ->
   743  	(MOVLstoreconst [ValAndOff(sc).add(off)] {mergeSym(sym1, sym2)} ptr mem)
   744  (MOVWstoreconst [sc] {sym1} (LEAL [off] {sym2} ptr) mem) && canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off)
   745    && (ptr.Op != OpSB || !config.ctxt.Flag_shared) ->
   746  	(MOVWstoreconst [ValAndOff(sc).add(off)] {mergeSym(sym1, sym2)} ptr mem)
   747  (MOVBstoreconst [sc] {sym1} (LEAL [off] {sym2} ptr) mem) && canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off)
   748    && (ptr.Op != OpSB || !config.ctxt.Flag_shared) ->
   749  	(MOVBstoreconst [ValAndOff(sc).add(off)] {mergeSym(sym1, sym2)} ptr mem)
   750  
   751  // generating indexed loads and stores
   752  (MOVBload [off1] {sym1} (LEAL1 [off2] {sym2} ptr idx) mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) ->
   753  	(MOVBloadidx1 [off1+off2] {mergeSym(sym1,sym2)} ptr idx mem)
   754  (MOVWload [off1] {sym1} (LEAL1 [off2] {sym2} ptr idx) mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) ->
   755  	(MOVWloadidx1 [off1+off2] {mergeSym(sym1,sym2)} ptr idx mem)
   756  (MOVWload [off1] {sym1} (LEAL2 [off2] {sym2} ptr idx) mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) ->
   757  	(MOVWloadidx2 [off1+off2] {mergeSym(sym1,sym2)} ptr idx mem)
   758  (MOVLload [off1] {sym1} (LEAL1 [off2] {sym2} ptr idx) mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) ->
   759  	(MOVLloadidx1 [off1+off2] {mergeSym(sym1,sym2)} ptr idx mem)
   760  (MOVLload [off1] {sym1} (LEAL4 [off2] {sym2} ptr idx) mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) ->
   761  	(MOVLloadidx4 [off1+off2] {mergeSym(sym1,sym2)} ptr idx mem)
   762  (MOVSSload [off1] {sym1} (LEAL1 [off2] {sym2} ptr idx) mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) ->
   763  	(MOVSSloadidx1 [off1+off2] {mergeSym(sym1,sym2)} ptr idx mem)
   764  (MOVSSload [off1] {sym1} (LEAL4 [off2] {sym2} ptr idx) mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) ->
   765  	(MOVSSloadidx4 [off1+off2] {mergeSym(sym1,sym2)} ptr idx mem)
   766  (MOVSDload [off1] {sym1} (LEAL1 [off2] {sym2} ptr idx) mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) ->
   767  	(MOVSDloadidx1 [off1+off2] {mergeSym(sym1,sym2)} ptr idx mem)
   768  (MOVSDload [off1] {sym1} (LEAL8 [off2] {sym2} ptr idx) mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) ->
   769  	(MOVSDloadidx8 [off1+off2] {mergeSym(sym1,sym2)} ptr idx mem)
   770  
   771  (MOVBstore [off1] {sym1} (LEAL1 [off2] {sym2} ptr idx) val mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) ->
   772  	(MOVBstoreidx1 [off1+off2] {mergeSym(sym1,sym2)} ptr idx val mem)
   773  (MOVWstore [off1] {sym1} (LEAL1 [off2] {sym2} ptr idx) val mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) ->
   774  	(MOVWstoreidx1 [off1+off2] {mergeSym(sym1,sym2)} ptr idx val mem)
   775  (MOVWstore [off1] {sym1} (LEAL2 [off2] {sym2} ptr idx) val mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) ->
   776  	(MOVWstoreidx2 [off1+off2] {mergeSym(sym1,sym2)} ptr idx val mem)
   777  (MOVLstore [off1] {sym1} (LEAL1 [off2] {sym2} ptr idx) val mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) ->
   778  	(MOVLstoreidx1 [off1+off2] {mergeSym(sym1,sym2)} ptr idx val mem)
   779  (MOVLstore [off1] {sym1} (LEAL4 [off2] {sym2} ptr idx) val mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) ->
   780  	(MOVLstoreidx4 [off1+off2] {mergeSym(sym1,sym2)} ptr idx val mem)
   781  (MOVSSstore [off1] {sym1} (LEAL1 [off2] {sym2} ptr idx) val mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) ->
   782  	(MOVSSstoreidx1 [off1+off2] {mergeSym(sym1,sym2)} ptr idx val mem)
   783  (MOVSSstore [off1] {sym1} (LEAL4 [off2] {sym2} ptr idx) val mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) ->
   784  	(MOVSSstoreidx4 [off1+off2] {mergeSym(sym1,sym2)} ptr idx val mem)
   785  (MOVSDstore [off1] {sym1} (LEAL1 [off2] {sym2} ptr idx) val mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) ->
   786  	(MOVSDstoreidx1 [off1+off2] {mergeSym(sym1,sym2)} ptr idx val mem)
   787  (MOVSDstore [off1] {sym1} (LEAL8 [off2] {sym2} ptr idx) val mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) ->
   788  	(MOVSDstoreidx8 [off1+off2] {mergeSym(sym1,sym2)} ptr idx val mem)
   789  
   790  (MOVBload [off] {sym} (ADDL ptr idx) mem) && ptr.Op != OpSB -> (MOVBloadidx1 [off] {sym} ptr idx mem)
   791  (MOVWload [off] {sym} (ADDL ptr idx) mem) && ptr.Op != OpSB -> (MOVWloadidx1 [off] {sym} ptr idx mem)
   792  (MOVLload [off] {sym} (ADDL ptr idx) mem) && ptr.Op != OpSB -> (MOVLloadidx1 [off] {sym} ptr idx mem)
   793  (MOVSSload [off] {sym} (ADDL ptr idx) mem) && ptr.Op != OpSB -> (MOVSSloadidx1 [off] {sym} ptr idx mem)
   794  (MOVSDload [off] {sym} (ADDL ptr idx) mem) && ptr.Op != OpSB -> (MOVSDloadidx1 [off] {sym} ptr idx mem)
   795  (MOVBstore [off] {sym} (ADDL ptr idx) val mem) && ptr.Op != OpSB -> (MOVBstoreidx1 [off] {sym} ptr idx val mem)
   796  (MOVWstore [off] {sym} (ADDL ptr idx) val mem) && ptr.Op != OpSB -> (MOVWstoreidx1 [off] {sym} ptr idx val mem)
   797  (MOVLstore [off] {sym} (ADDL ptr idx) val mem) && ptr.Op != OpSB -> (MOVLstoreidx1 [off] {sym} ptr idx val mem)
   798  (MOVSSstore [off] {sym} (ADDL ptr idx) val mem) && ptr.Op != OpSB -> (MOVSSstoreidx1 [off] {sym} ptr idx val mem)
   799  (MOVSDstore [off] {sym} (ADDL ptr idx) val mem) && ptr.Op != OpSB -> (MOVSDstoreidx1 [off] {sym} ptr idx val mem)
   800  
   801  (MOVBstoreconst [x] {sym1} (LEAL1 [off] {sym2} ptr idx) mem) && canMergeSym(sym1, sym2) ->
   802  	(MOVBstoreconstidx1 [ValAndOff(x).add(off)] {mergeSym(sym1,sym2)} ptr idx mem)
   803  (MOVWstoreconst [x] {sym1} (LEAL1 [off] {sym2} ptr idx) mem) && canMergeSym(sym1, sym2) ->
   804  	(MOVWstoreconstidx1 [ValAndOff(x).add(off)] {mergeSym(sym1,sym2)} ptr idx mem)
   805  (MOVWstoreconst [x] {sym1} (LEAL2 [off] {sym2} ptr idx) mem) && canMergeSym(sym1, sym2) ->
   806  	(MOVWstoreconstidx2 [ValAndOff(x).add(off)] {mergeSym(sym1,sym2)} ptr idx mem)
   807  (MOVLstoreconst [x] {sym1} (LEAL1 [off] {sym2} ptr idx) mem) && canMergeSym(sym1, sym2) ->
   808  	(MOVLstoreconstidx1 [ValAndOff(x).add(off)] {mergeSym(sym1,sym2)} ptr idx mem)
   809  (MOVLstoreconst [x] {sym1} (LEAL4 [off] {sym2} ptr idx) mem) && canMergeSym(sym1, sym2) ->
   810  	(MOVLstoreconstidx4 [ValAndOff(x).add(off)] {mergeSym(sym1,sym2)} ptr idx mem)
   811  
   812  (MOVBstoreconst [x] {sym} (ADDL ptr idx) mem) -> (MOVBstoreconstidx1 [x] {sym} ptr idx mem)
   813  (MOVWstoreconst [x] {sym} (ADDL ptr idx) mem) -> (MOVWstoreconstidx1 [x] {sym} ptr idx mem)
   814  (MOVLstoreconst [x] {sym} (ADDL ptr idx) mem) -> (MOVLstoreconstidx1 [x] {sym} ptr idx mem)
   815  
   816  // combine SHLL into indexed loads and stores
   817  (MOVWloadidx1 [c] {sym} ptr (SHLLconst [1] idx) mem) -> (MOVWloadidx2 [c] {sym} ptr idx mem)
   818  (MOVLloadidx1 [c] {sym} ptr (SHLLconst [2] idx) mem) -> (MOVLloadidx4 [c] {sym} ptr idx mem)
   819  (MOVWstoreidx1 [c] {sym} ptr (SHLLconst [1] idx) val mem) -> (MOVWstoreidx2 [c] {sym} ptr idx val mem)
   820  (MOVLstoreidx1 [c] {sym} ptr (SHLLconst [2] idx) val mem) -> (MOVLstoreidx4 [c] {sym} ptr idx val mem)
   821  (MOVWstoreconstidx1 [c] {sym} ptr (SHLLconst [1] idx) mem) -> (MOVWstoreconstidx2 [c] {sym} ptr idx mem)
   822  (MOVLstoreconstidx1 [c] {sym} ptr (SHLLconst [2] idx) mem) -> (MOVLstoreconstidx4 [c] {sym} ptr idx mem)
   823  
   824  // combine ADDL into indexed loads and stores
   825  (MOVBloadidx1 [c] {sym} (ADDLconst [d] ptr) idx mem) -> (MOVBloadidx1 [c+d] {sym} ptr idx mem)
   826  (MOVWloadidx1 [c] {sym} (ADDLconst [d] ptr) idx mem) -> (MOVWloadidx1 [c+d] {sym} ptr idx mem)
   827  (MOVWloadidx2 [c] {sym} (ADDLconst [d] ptr) idx mem) -> (MOVWloadidx2 [c+d] {sym} ptr idx mem)
   828  (MOVLloadidx1 [c] {sym} (ADDLconst [d] ptr) idx mem) -> (MOVLloadidx1 [c+d] {sym} ptr idx mem)
   829  (MOVLloadidx4 [c] {sym} (ADDLconst [d] ptr) idx mem) -> (MOVLloadidx4 [c+d] {sym} ptr idx mem)
   830  (MOVSSloadidx1 [c] {sym} (ADDLconst [d] ptr) idx mem) -> (MOVSSloadidx1 [c+d] {sym} ptr idx mem)
   831  (MOVSSloadidx4 [c] {sym} (ADDLconst [d] ptr) idx mem) -> (MOVSSloadidx4 [c+d] {sym} ptr idx mem)
   832  (MOVSDloadidx1 [c] {sym} (ADDLconst [d] ptr) idx mem) -> (MOVSDloadidx1 [c+d] {sym} ptr idx mem)
   833  (MOVSDloadidx8 [c] {sym} (ADDLconst [d] ptr) idx mem) -> (MOVSDloadidx8 [c+d] {sym} ptr idx mem)
   834  
   835  (MOVBstoreidx1 [c] {sym} (ADDLconst [d] ptr) idx val mem) -> (MOVBstoreidx1 [c+d] {sym} ptr idx val mem)
   836  (MOVWstoreidx1 [c] {sym} (ADDLconst [d] ptr) idx val mem) -> (MOVWstoreidx1 [c+d] {sym} ptr idx val mem)
   837  (MOVWstoreidx2 [c] {sym} (ADDLconst [d] ptr) idx val mem) -> (MOVWstoreidx2 [c+d] {sym} ptr idx val mem)
   838  (MOVLstoreidx1 [c] {sym} (ADDLconst [d] ptr) idx val mem) -> (MOVLstoreidx1 [c+d] {sym} ptr idx val mem)
   839  (MOVLstoreidx4 [c] {sym} (ADDLconst [d] ptr) idx val mem) -> (MOVLstoreidx4 [c+d] {sym} ptr idx val mem)
   840  (MOVSSstoreidx1 [c] {sym} (ADDLconst [d] ptr) idx val mem) -> (MOVSSstoreidx1 [c+d] {sym} ptr idx val mem)
   841  (MOVSSstoreidx4 [c] {sym} (ADDLconst [d] ptr) idx val mem) -> (MOVSSstoreidx4 [c+d] {sym} ptr idx val mem)
   842  (MOVSDstoreidx1 [c] {sym} (ADDLconst [d] ptr) idx val mem) -> (MOVSDstoreidx1 [c+d] {sym} ptr idx val mem)
   843  (MOVSDstoreidx8 [c] {sym} (ADDLconst [d] ptr) idx val mem) -> (MOVSDstoreidx8 [c+d] {sym} ptr idx val mem)
   844  
   845  (MOVBloadidx1 [c] {sym} ptr (ADDLconst [d] idx) mem) -> (MOVBloadidx1 [c+d] {sym} ptr idx mem)
   846  (MOVWloadidx1 [c] {sym} ptr (ADDLconst [d] idx) mem) -> (MOVWloadidx1 [c+d] {sym} ptr idx mem)
   847  (MOVWloadidx2 [c] {sym} ptr (ADDLconst [d] idx) mem) -> (MOVWloadidx2 [c+2*d] {sym} ptr idx mem)
   848  (MOVLloadidx1 [c] {sym} ptr (ADDLconst [d] idx) mem) -> (MOVLloadidx1 [c+d] {sym} ptr idx mem)
   849  (MOVLloadidx4 [c] {sym} ptr (ADDLconst [d] idx) mem) -> (MOVLloadidx4 [c+4*d] {sym} ptr idx mem)
   850  (MOVSSloadidx1 [c] {sym} ptr (ADDLconst [d] idx) mem) -> (MOVSSloadidx1 [c+d] {sym} ptr idx mem)
   851  (MOVSSloadidx4 [c] {sym} ptr (ADDLconst [d] idx) mem) -> (MOVSSloadidx4 [c+4*d] {sym} ptr idx mem)
   852  (MOVSDloadidx1 [c] {sym} ptr (ADDLconst [d] idx) mem) -> (MOVSDloadidx1 [c+d] {sym} ptr idx mem)
   853  (MOVSDloadidx8 [c] {sym} ptr (ADDLconst [d] idx) mem) -> (MOVSDloadidx8 [c+8*d] {sym} ptr idx mem)
   854  
   855  (MOVBstoreidx1 [c] {sym} ptr (ADDLconst [d] idx) val mem) -> (MOVBstoreidx1 [c+d] {sym} ptr idx val mem)
   856  (MOVWstoreidx1 [c] {sym} ptr (ADDLconst [d] idx) val mem) -> (MOVWstoreidx1 [c+d] {sym} ptr idx val mem)
   857  (MOVWstoreidx2 [c] {sym} ptr (ADDLconst [d] idx) val mem) -> (MOVWstoreidx2 [c+2*d] {sym} ptr idx val mem)
   858  (MOVLstoreidx1 [c] {sym} ptr (ADDLconst [d] idx) val mem) -> (MOVLstoreidx1 [c+d] {sym} ptr idx val mem)
   859  (MOVLstoreidx4 [c] {sym} ptr (ADDLconst [d] idx) val mem) -> (MOVLstoreidx4 [c+4*d] {sym} ptr idx val mem)
   860  (MOVSSstoreidx1 [c] {sym} ptr (ADDLconst [d] idx) val mem) -> (MOVSSstoreidx1 [c+d] {sym} ptr idx val mem)
   861  (MOVSSstoreidx4 [c] {sym} ptr (ADDLconst [d] idx) val mem) -> (MOVSSstoreidx4 [c+4*d] {sym} ptr idx val mem)
   862  (MOVSDstoreidx1 [c] {sym} ptr (ADDLconst [d] idx) val mem) -> (MOVSDstoreidx1 [c+d] {sym} ptr idx val mem)
   863  (MOVSDstoreidx8 [c] {sym} ptr (ADDLconst [d] idx) val mem) -> (MOVSDstoreidx8 [c+8*d] {sym} ptr idx val mem)
   864  
   865  (MOVBstoreconstidx1 [x] {sym} (ADDLconst [c] ptr) idx mem) ->
   866  	(MOVBstoreconstidx1 [ValAndOff(x).add(c)] {sym} ptr idx mem)
   867  (MOVWstoreconstidx1 [x] {sym} (ADDLconst [c] ptr) idx mem) ->
   868  	(MOVWstoreconstidx1 [ValAndOff(x).add(c)] {sym} ptr idx mem)
   869  (MOVWstoreconstidx2 [x] {sym} (ADDLconst [c] ptr) idx mem) ->
   870  	(MOVWstoreconstidx2 [ValAndOff(x).add(c)] {sym} ptr idx mem)
   871  (MOVLstoreconstidx1 [x] {sym} (ADDLconst [c] ptr) idx mem) ->
   872  	(MOVLstoreconstidx1 [ValAndOff(x).add(c)] {sym} ptr idx mem)
   873  (MOVLstoreconstidx4 [x] {sym} (ADDLconst [c] ptr) idx mem) ->
   874  	(MOVLstoreconstidx4 [ValAndOff(x).add(c)] {sym} ptr idx mem)
   875  
   876  (MOVBstoreconstidx1 [x] {sym} ptr (ADDLconst [c] idx) mem) ->
   877  	(MOVBstoreconstidx1 [ValAndOff(x).add(c)] {sym} ptr idx mem)
   878  (MOVWstoreconstidx1 [x] {sym} ptr (ADDLconst [c] idx) mem) ->
   879  	(MOVWstoreconstidx1 [ValAndOff(x).add(c)] {sym} ptr idx mem)
   880  (MOVWstoreconstidx2 [x] {sym} ptr (ADDLconst [c] idx) mem) ->
   881  	(MOVWstoreconstidx2 [ValAndOff(x).add(2*c)] {sym} ptr idx mem)
   882  (MOVLstoreconstidx1 [x] {sym} ptr (ADDLconst [c] idx) mem) ->
   883  	(MOVLstoreconstidx1 [ValAndOff(x).add(c)] {sym} ptr idx mem)
   884  (MOVLstoreconstidx4 [x] {sym} ptr (ADDLconst [c] idx) mem) ->
   885  	(MOVLstoreconstidx4 [ValAndOff(x).add(4*c)] {sym} ptr idx mem)
   886  
   887  // fold LEALs together
   888  (LEAL [off1] {sym1} (LEAL [off2] {sym2} x)) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) ->
   889        (LEAL [off1+off2] {mergeSym(sym1,sym2)} x)
   890  
   891  // LEAL into LEAL1
   892  (LEAL1 [off1] {sym1} (LEAL [off2] {sym2} x) y) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) && x.Op != OpSB ->
   893         (LEAL1 [off1+off2] {mergeSym(sym1,sym2)} x y)
   894  (LEAL1 [off1] {sym1} x (LEAL [off2] {sym2} y)) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) && y.Op != OpSB ->
   895         (LEAL1 [off1+off2] {mergeSym(sym1,sym2)} x y)
   896  
   897  // LEAL1 into LEAL
   898  (LEAL [off1] {sym1} (LEAL1 [off2] {sym2} x y)) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) ->
   899         (LEAL1 [off1+off2] {mergeSym(sym1,sym2)} x y)
   900  
   901  // LEAL into LEAL[248]
   902  (LEAL2 [off1] {sym1} (LEAL [off2] {sym2} x) y) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) && x.Op != OpSB ->
   903         (LEAL2 [off1+off2] {mergeSym(sym1,sym2)} x y)
   904  (LEAL4 [off1] {sym1} (LEAL [off2] {sym2} x) y) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) && x.Op != OpSB ->
   905         (LEAL4 [off1+off2] {mergeSym(sym1,sym2)} x y)
   906  (LEAL8 [off1] {sym1} (LEAL [off2] {sym2} x) y) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) && x.Op != OpSB ->
   907         (LEAL8 [off1+off2] {mergeSym(sym1,sym2)} x y)
   908  
   909  // LEAL[248] into LEAL
   910  (LEAL [off1] {sym1} (LEAL2 [off2] {sym2} x y)) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) ->
   911        (LEAL2 [off1+off2] {mergeSym(sym1,sym2)} x y)
   912  (LEAL [off1] {sym1} (LEAL4 [off2] {sym2} x y)) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) ->
   913        (LEAL4 [off1+off2] {mergeSym(sym1,sym2)} x y)
   914  (LEAL [off1] {sym1} (LEAL8 [off2] {sym2} x y)) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) ->
   915        (LEAL8 [off1+off2] {mergeSym(sym1,sym2)} x y)
   916  
   917  // Absorb InvertFlags into branches.
   918  (LT (InvertFlags cmp) yes no) -> (GT cmp yes no)
   919  (GT (InvertFlags cmp) yes no) -> (LT cmp yes no)
   920  (LE (InvertFlags cmp) yes no) -> (GE cmp yes no)
   921  (GE (InvertFlags cmp) yes no) -> (LE cmp yes no)
   922  (ULT (InvertFlags cmp) yes no) -> (UGT cmp yes no)
   923  (UGT (InvertFlags cmp) yes no) -> (ULT cmp yes no)
   924  (ULE (InvertFlags cmp) yes no) -> (UGE cmp yes no)
   925  (UGE (InvertFlags cmp) yes no) -> (ULE cmp yes no)
   926  (EQ (InvertFlags cmp) yes no) -> (EQ cmp yes no)
   927  (NE (InvertFlags cmp) yes no) -> (NE cmp yes no)
   928  
   929  // Constant comparisons.
   930  (CMPLconst (MOVLconst [x]) [y]) && int32(x)==int32(y) -> (FlagEQ)
   931  (CMPLconst (MOVLconst [x]) [y]) && int32(x)<int32(y) && uint32(x)<uint32(y) -> (FlagLT_ULT)
   932  (CMPLconst (MOVLconst [x]) [y]) && int32(x)<int32(y) && uint32(x)>uint32(y) -> (FlagLT_UGT)
   933  (CMPLconst (MOVLconst [x]) [y]) && int32(x)>int32(y) && uint32(x)<uint32(y) -> (FlagGT_ULT)
   934  (CMPLconst (MOVLconst [x]) [y]) && int32(x)>int32(y) && uint32(x)>uint32(y) -> (FlagGT_UGT)
   935  (CMPWconst (MOVLconst [x]) [y]) && int16(x)==int16(y) -> (FlagEQ)
   936  (CMPWconst (MOVLconst [x]) [y]) && int16(x)<int16(y) && uint16(x)<uint16(y) -> (FlagLT_ULT)
   937  (CMPWconst (MOVLconst [x]) [y]) && int16(x)<int16(y) && uint16(x)>uint16(y) -> (FlagLT_UGT)
   938  (CMPWconst (MOVLconst [x]) [y]) && int16(x)>int16(y) && uint16(x)<uint16(y) -> (FlagGT_ULT)
   939  (CMPWconst (MOVLconst [x]) [y]) && int16(x)>int16(y) && uint16(x)>uint16(y) -> (FlagGT_UGT)
   940  (CMPBconst (MOVLconst [x]) [y]) && int8(x)==int8(y) -> (FlagEQ)
   941  (CMPBconst (MOVLconst [x]) [y]) && int8(x)<int8(y) && uint8(x)<uint8(y) -> (FlagLT_ULT)
   942  (CMPBconst (MOVLconst [x]) [y]) && int8(x)<int8(y) && uint8(x)>uint8(y) -> (FlagLT_UGT)
   943  (CMPBconst (MOVLconst [x]) [y]) && int8(x)>int8(y) && uint8(x)<uint8(y) -> (FlagGT_ULT)
   944  (CMPBconst (MOVLconst [x]) [y]) && int8(x)>int8(y) && uint8(x)>uint8(y) -> (FlagGT_UGT)
   945  
   946  // Other known comparisons.
   947  (CMPLconst (SHRLconst _ [c]) [n]) && 0 <= n && 0 < c && c <= 32 && (1<<uint64(32-c)) <= uint64(n) -> (FlagLT_ULT)
   948  (CMPLconst (ANDLconst _ [m]) [n]) && 0 <= int32(m) && int32(m) < int32(n) -> (FlagLT_ULT)
   949  (CMPWconst (ANDLconst _ [m]) [n]) && 0 <= int16(m) && int16(m) < int16(n) -> (FlagLT_ULT)
   950  (CMPBconst (ANDLconst _ [m]) [n]) && 0 <= int8(m) && int8(m) < int8(n) -> (FlagLT_ULT)
   951  // TODO: DIVxU also.
   952  
   953  // Absorb flag constants into SBB ops.
   954  (SBBLcarrymask (FlagEQ)) -> (MOVLconst [0])
   955  (SBBLcarrymask (FlagLT_ULT)) -> (MOVLconst [-1])
   956  (SBBLcarrymask (FlagLT_UGT)) -> (MOVLconst [0])
   957  (SBBLcarrymask (FlagGT_ULT)) -> (MOVLconst [-1])
   958  (SBBLcarrymask (FlagGT_UGT)) -> (MOVLconst [0])
   959  
   960  // Absorb flag constants into branches.
   961  (EQ (FlagEQ) yes no) -> (First nil yes no)
   962  (EQ (FlagLT_ULT) yes no) -> (First nil no yes)
   963  (EQ (FlagLT_UGT) yes no) -> (First nil no yes)
   964  (EQ (FlagGT_ULT) yes no) -> (First nil no yes)
   965  (EQ (FlagGT_UGT) yes no) -> (First nil no yes)
   966  
   967  (NE (FlagEQ) yes no) -> (First nil no yes)
   968  (NE (FlagLT_ULT) yes no) -> (First nil yes no)
   969  (NE (FlagLT_UGT) yes no) -> (First nil yes no)
   970  (NE (FlagGT_ULT) yes no) -> (First nil yes no)
   971  (NE (FlagGT_UGT) yes no) -> (First nil yes no)
   972  
   973  (LT (FlagEQ) yes no) -> (First nil no yes)
   974  (LT (FlagLT_ULT) yes no) -> (First nil yes no)
   975  (LT (FlagLT_UGT) yes no) -> (First nil yes no)
   976  (LT (FlagGT_ULT) yes no) -> (First nil no yes)
   977  (LT (FlagGT_UGT) yes no) -> (First nil no yes)
   978  
   979  (LE (FlagEQ) yes no) -> (First nil yes no)
   980  (LE (FlagLT_ULT) yes no) -> (First nil yes no)
   981  (LE (FlagLT_UGT) yes no) -> (First nil yes no)
   982  (LE (FlagGT_ULT) yes no) -> (First nil no yes)
   983  (LE (FlagGT_UGT) yes no) -> (First nil no yes)
   984  
   985  (GT (FlagEQ) yes no) -> (First nil no yes)
   986  (GT (FlagLT_ULT) yes no) -> (First nil no yes)
   987  (GT (FlagLT_UGT) yes no) -> (First nil no yes)
   988  (GT (FlagGT_ULT) yes no) -> (First nil yes no)
   989  (GT (FlagGT_UGT) yes no) -> (First nil yes no)
   990  
   991  (GE (FlagEQ) yes no) -> (First nil yes no)
   992  (GE (FlagLT_ULT) yes no) -> (First nil no yes)
   993  (GE (FlagLT_UGT) yes no) -> (First nil no yes)
   994  (GE (FlagGT_ULT) yes no) -> (First nil yes no)
   995  (GE (FlagGT_UGT) yes no) -> (First nil yes no)
   996  
   997  (ULT (FlagEQ) yes no) -> (First nil no yes)
   998  (ULT (FlagLT_ULT) yes no) -> (First nil yes no)
   999  (ULT (FlagLT_UGT) yes no) -> (First nil no yes)
  1000  (ULT (FlagGT_ULT) yes no) -> (First nil yes no)
  1001  (ULT (FlagGT_UGT) yes no) -> (First nil no yes)
  1002  
  1003  (ULE (FlagEQ) yes no) -> (First nil yes no)
  1004  (ULE (FlagLT_ULT) yes no) -> (First nil yes no)
  1005  (ULE (FlagLT_UGT) yes no) -> (First nil no yes)
  1006  (ULE (FlagGT_ULT) yes no) -> (First nil yes no)
  1007  (ULE (FlagGT_UGT) yes no) -> (First nil no yes)
  1008  
  1009  (UGT (FlagEQ) yes no) -> (First nil no yes)
  1010  (UGT (FlagLT_ULT) yes no) -> (First nil no yes)
  1011  (UGT (FlagLT_UGT) yes no) -> (First nil yes no)
  1012  (UGT (FlagGT_ULT) yes no) -> (First nil no yes)
  1013  (UGT (FlagGT_UGT) yes no) -> (First nil yes no)
  1014  
  1015  (UGE (FlagEQ) yes no) -> (First nil yes no)
  1016  (UGE (FlagLT_ULT) yes no) -> (First nil no yes)
  1017  (UGE (FlagLT_UGT) yes no) -> (First nil yes no)
  1018  (UGE (FlagGT_ULT) yes no) -> (First nil no yes)
  1019  (UGE (FlagGT_UGT) yes no) -> (First nil yes no)
  1020  
  1021  // Absorb flag constants into SETxx ops.
  1022  (SETEQ (FlagEQ)) -> (MOVLconst [1])
  1023  (SETEQ (FlagLT_ULT)) -> (MOVLconst [0])
  1024  (SETEQ (FlagLT_UGT)) -> (MOVLconst [0])
  1025  (SETEQ (FlagGT_ULT)) -> (MOVLconst [0])
  1026  (SETEQ (FlagGT_UGT)) -> (MOVLconst [0])
  1027  
  1028  (SETNE (FlagEQ)) -> (MOVLconst [0])
  1029  (SETNE (FlagLT_ULT)) -> (MOVLconst [1])
  1030  (SETNE (FlagLT_UGT)) -> (MOVLconst [1])
  1031  (SETNE (FlagGT_ULT)) -> (MOVLconst [1])
  1032  (SETNE (FlagGT_UGT)) -> (MOVLconst [1])
  1033  
  1034  (SETL (FlagEQ)) -> (MOVLconst [0])
  1035  (SETL (FlagLT_ULT)) -> (MOVLconst [1])
  1036  (SETL (FlagLT_UGT)) -> (MOVLconst [1])
  1037  (SETL (FlagGT_ULT)) -> (MOVLconst [0])
  1038  (SETL (FlagGT_UGT)) -> (MOVLconst [0])
  1039  
  1040  (SETLE (FlagEQ)) -> (MOVLconst [1])
  1041  (SETLE (FlagLT_ULT)) -> (MOVLconst [1])
  1042  (SETLE (FlagLT_UGT)) -> (MOVLconst [1])
  1043  (SETLE (FlagGT_ULT)) -> (MOVLconst [0])
  1044  (SETLE (FlagGT_UGT)) -> (MOVLconst [0])
  1045  
  1046  (SETG (FlagEQ)) -> (MOVLconst [0])
  1047  (SETG (FlagLT_ULT)) -> (MOVLconst [0])
  1048  (SETG (FlagLT_UGT)) -> (MOVLconst [0])
  1049  (SETG (FlagGT_ULT)) -> (MOVLconst [1])
  1050  (SETG (FlagGT_UGT)) -> (MOVLconst [1])
  1051  
  1052  (SETGE (FlagEQ)) -> (MOVLconst [1])
  1053  (SETGE (FlagLT_ULT)) -> (MOVLconst [0])
  1054  (SETGE (FlagLT_UGT)) -> (MOVLconst [0])
  1055  (SETGE (FlagGT_ULT)) -> (MOVLconst [1])
  1056  (SETGE (FlagGT_UGT)) -> (MOVLconst [1])
  1057  
  1058  (SETB (FlagEQ)) -> (MOVLconst [0])
  1059  (SETB (FlagLT_ULT)) -> (MOVLconst [1])
  1060  (SETB (FlagLT_UGT)) -> (MOVLconst [0])
  1061  (SETB (FlagGT_ULT)) -> (MOVLconst [1])
  1062  (SETB (FlagGT_UGT)) -> (MOVLconst [0])
  1063  
  1064  (SETBE (FlagEQ)) -> (MOVLconst [1])
  1065  (SETBE (FlagLT_ULT)) -> (MOVLconst [1])
  1066  (SETBE (FlagLT_UGT)) -> (MOVLconst [0])
  1067  (SETBE (FlagGT_ULT)) -> (MOVLconst [1])
  1068  (SETBE (FlagGT_UGT)) -> (MOVLconst [0])
  1069  
  1070  (SETA (FlagEQ)) -> (MOVLconst [0])
  1071  (SETA (FlagLT_ULT)) -> (MOVLconst [0])
  1072  (SETA (FlagLT_UGT)) -> (MOVLconst [1])
  1073  (SETA (FlagGT_ULT)) -> (MOVLconst [0])
  1074  (SETA (FlagGT_UGT)) -> (MOVLconst [1])
  1075  
  1076  (SETAE (FlagEQ)) -> (MOVLconst [1])
  1077  (SETAE (FlagLT_ULT)) -> (MOVLconst [0])
  1078  (SETAE (FlagLT_UGT)) -> (MOVLconst [1])
  1079  (SETAE (FlagGT_ULT)) -> (MOVLconst [0])
  1080  (SETAE (FlagGT_UGT)) -> (MOVLconst [1])
  1081  
  1082  // Remove redundant *const ops
  1083  (ADDLconst [c] x) && int32(c)==0 -> x
  1084  (SUBLconst [c] x) && int32(c) == 0 -> x
  1085  (ANDLconst [c] _) && int32(c)==0  -> (MOVLconst [0])
  1086  (ANDLconst [c] x) && int32(c)==-1 -> x
  1087  (ORLconst [c] x) && int32(c)==0   -> x
  1088  (ORLconst [c] _) && int32(c)==-1  -> (MOVLconst [-1])
  1089  (XORLconst [c] x) && int32(c)==0   -> x
  1090  // TODO: since we got rid of the W/B versions, we might miss
  1091  // things like (ANDLconst [0x100] x) which were formerly
  1092  // (ANDBconst [0] x).  Probably doesn't happen very often.
  1093  // If we cared, we might do:
  1094  //  (ANDLconst <t> [c] x) && t.Size()==1 && int8(x)==0 -> (MOVLconst [0])
  1095  
  1096  // Convert constant subtracts to constant adds
  1097  (SUBLconst [c] x) -> (ADDLconst [int64(int32(-c))] x)
  1098  
  1099  // generic constant folding
  1100  // TODO: more of this
  1101  (ADDLconst [c] (MOVLconst [d])) -> (MOVLconst [int64(int32(c+d))])
  1102  (ADDLconst [c] (ADDLconst [d] x)) -> (ADDLconst [int64(int32(c+d))] x)
  1103  (SARLconst [c] (MOVLconst [d])) -> (MOVLconst [d>>uint64(c)])
  1104  (SARWconst [c] (MOVLconst [d])) -> (MOVLconst [d>>uint64(c)])
  1105  (SARBconst [c] (MOVLconst [d])) -> (MOVLconst [d>>uint64(c)])
  1106  (NEGL (MOVLconst [c])) -> (MOVLconst [int64(int32(-c))])
  1107  (MULLconst [c] (MOVLconst [d])) -> (MOVLconst [int64(int32(c*d))])
  1108  (ANDLconst [c] (MOVLconst [d])) -> (MOVLconst [c&d])
  1109  (ORLconst [c] (MOVLconst [d])) -> (MOVLconst [c|d])
  1110  (XORLconst [c] (MOVLconst [d])) -> (MOVLconst [c^d])
  1111  (NOTL (MOVLconst [c])) -> (MOVLconst [^c])
  1112  
  1113  // generic simplifications
  1114  // TODO: more of this
  1115  (ADDL x (NEGL y)) -> (SUBL x y)
  1116  (SUBL x x) -> (MOVLconst [0])
  1117  (ANDL x x) -> x
  1118  (ORL x x) -> x
  1119  (XORL x x) -> (MOVLconst [0])
  1120  
  1121  // checking AND against 0.
  1122  (CMPLconst (ANDL x y) [0]) -> (TESTL x y)
  1123  (CMPWconst (ANDL x y) [0]) -> (TESTW x y)
  1124  (CMPBconst (ANDL x y) [0]) -> (TESTB x y)
  1125  (CMPLconst (ANDLconst [c] x) [0]) -> (TESTLconst [c] x)
  1126  (CMPWconst (ANDLconst [c] x) [0]) -> (TESTWconst [int64(int16(c))] x)
  1127  (CMPBconst (ANDLconst [c] x) [0]) -> (TESTBconst [int64(int8(c))] x)
  1128  
  1129  // TEST %reg,%reg is shorter than CMP
  1130  (CMPLconst x [0]) -> (TESTL x x)
  1131  (CMPWconst x [0]) -> (TESTW x x)
  1132  (CMPBconst x [0]) -> (TESTB x x)
  1133  
  1134  // Combining byte loads into larger (unaligned) loads.
  1135  // There are many ways these combinations could occur.  This is
  1136  // designed to match the way encoding/binary.LittleEndian does it.
  1137  (ORL                  x0:(MOVBload [i]   {s} p mem)
  1138      s0:(SHLLconst [8] x1:(MOVBload [i+1] {s} p mem)))
  1139    && x0.Uses == 1
  1140    && x1.Uses == 1
  1141    && s0.Uses == 1
  1142    && mergePoint(b,x0,x1) != nil
  1143    && clobber(x0)
  1144    && clobber(x1)
  1145    && clobber(s0)
  1146    -> @mergePoint(b,x0,x1) (MOVWload [i] {s} p mem)
  1147  
  1148  (ORL o0:(ORL
  1149                         x0:(MOVWload [i]   {s} p mem)
  1150      s0:(SHLLconst [16] x1:(MOVBload [i+2] {s} p mem)))
  1151      s1:(SHLLconst [24] x2:(MOVBload [i+3] {s} p mem)))
  1152    && x0.Uses == 1
  1153    && x1.Uses == 1
  1154    && x2.Uses == 1
  1155    && s0.Uses == 1
  1156    && s1.Uses == 1
  1157    && o0.Uses == 1
  1158    && mergePoint(b,x0,x1,x2) != nil
  1159    && clobber(x0)
  1160    && clobber(x1)
  1161    && clobber(x2)
  1162    && clobber(s0)
  1163    && clobber(s1)
  1164    && clobber(o0)
  1165    -> @mergePoint(b,x0,x1,x2) (MOVLload [i] {s} p mem)
  1166  
  1167  (ORL                  x0:(MOVBloadidx1 [i]   {s} p idx mem)
  1168      s0:(SHLLconst [8] x1:(MOVBloadidx1 [i+1] {s} p idx mem)))
  1169    && x0.Uses == 1
  1170    && x1.Uses == 1
  1171    && s0.Uses == 1
  1172    && mergePoint(b,x0,x1) != nil
  1173    && clobber(x0)
  1174    && clobber(x1)
  1175    && clobber(s0)
  1176    -> @mergePoint(b,x0,x1) (MOVWloadidx1 <v.Type> [i] {s} p idx mem)
  1177  
  1178  (ORL o0:(ORL
  1179                         x0:(MOVWloadidx1 [i]   {s} p idx mem)
  1180      s0:(SHLLconst [16] x1:(MOVBloadidx1 [i+2] {s} p idx mem)))
  1181      s1:(SHLLconst [24] x2:(MOVBloadidx1 [i+3] {s} p idx mem)))
  1182    && x0.Uses == 1
  1183    && x1.Uses == 1
  1184    && x2.Uses == 1
  1185    && s0.Uses == 1
  1186    && s1.Uses == 1
  1187    && o0.Uses == 1
  1188    && mergePoint(b,x0,x1,x2) != nil
  1189    && clobber(x0)
  1190    && clobber(x1)
  1191    && clobber(x2)
  1192    && clobber(s0)
  1193    && clobber(s1)
  1194    && clobber(o0)
  1195    -> @mergePoint(b,x0,x1,x2) (MOVLloadidx1 <v.Type> [i] {s} p idx mem)
  1196  
  1197  // Combine constant stores into larger (unaligned) stores.
  1198  (MOVBstoreconst [c] {s} p x:(MOVBstoreconst [a] {s} p mem))
  1199    && x.Uses == 1
  1200    && ValAndOff(a).Off() + 1 == ValAndOff(c).Off()
  1201    && clobber(x)
  1202    -> (MOVWstoreconst [makeValAndOff(ValAndOff(a).Val()&0xff | ValAndOff(c).Val()<<8, ValAndOff(a).Off())] {s} p mem)
  1203  (MOVWstoreconst [c] {s} p x:(MOVWstoreconst [a] {s} p mem))
  1204    && x.Uses == 1
  1205    && ValAndOff(a).Off() + 2 == ValAndOff(c).Off()
  1206    && clobber(x)
  1207    -> (MOVLstoreconst [makeValAndOff(ValAndOff(a).Val()&0xffff | ValAndOff(c).Val()<<16, ValAndOff(a).Off())] {s} p mem)
  1208  
  1209  (MOVBstoreconstidx1 [c] {s} p i x:(MOVBstoreconstidx1 [a] {s} p i mem))
  1210    && x.Uses == 1
  1211    && ValAndOff(a).Off() + 1 == ValAndOff(c).Off()
  1212    && clobber(x)
  1213    -> (MOVWstoreconstidx1 [makeValAndOff(ValAndOff(a).Val()&0xff | ValAndOff(c).Val()<<8, ValAndOff(a).Off())] {s} p i mem)
  1214  (MOVWstoreconstidx1 [c] {s} p i x:(MOVWstoreconstidx1 [a] {s} p i mem))
  1215    && x.Uses == 1
  1216    && ValAndOff(a).Off() + 2 == ValAndOff(c).Off()
  1217    && clobber(x)
  1218    -> (MOVLstoreconstidx1 [makeValAndOff(ValAndOff(a).Val()&0xffff | ValAndOff(c).Val()<<16, ValAndOff(a).Off())] {s} p i mem)
  1219  
  1220  (MOVWstoreconstidx2 [c] {s} p i x:(MOVWstoreconstidx2 [a] {s} p i mem))
  1221    && x.Uses == 1
  1222    && ValAndOff(a).Off() + 2 == ValAndOff(c).Off()
  1223    && clobber(x)
  1224    -> (MOVLstoreconstidx1 [makeValAndOff(ValAndOff(a).Val()&0xffff | ValAndOff(c).Val()<<16, ValAndOff(a).Off())] {s} p (SHLLconst <i.Type> [1] i) mem)
  1225  
  1226  // Combine stores into larger (unaligned) stores.
  1227  (MOVBstore [i] {s} p (SHRLconst [8] w) x:(MOVBstore [i-1] {s} p w mem))
  1228    && x.Uses == 1
  1229    && clobber(x)
  1230    -> (MOVWstore [i-1] {s} p w mem)
  1231  (MOVBstore [i] {s} p (SHRLconst [j] w) x:(MOVBstore [i-1] {s} p w0:(SHRLconst [j-8] w) mem))
  1232    && x.Uses == 1
  1233    && clobber(x)
  1234    -> (MOVWstore [i-1] {s} p w0 mem)
  1235  (MOVWstore [i] {s} p (SHRLconst [16] w) x:(MOVWstore [i-2] {s} p w mem))
  1236    && x.Uses == 1
  1237    && clobber(x)
  1238    -> (MOVLstore [i-2] {s} p w mem)
  1239  (MOVWstore [i] {s} p (SHRLconst [j] w) x:(MOVWstore [i-2] {s} p w0:(SHRLconst [j-16] w) mem))
  1240    && x.Uses == 1
  1241    && clobber(x)
  1242    -> (MOVLstore [i-2] {s} p w0 mem)
  1243  
  1244  (MOVBstoreidx1 [i] {s} p idx (SHRLconst [8] w) x:(MOVBstoreidx1 [i-1] {s} p idx w mem))
  1245    && x.Uses == 1
  1246    && clobber(x)
  1247    -> (MOVWstoreidx1 [i-1] {s} p idx w mem)
  1248  (MOVBstoreidx1 [i] {s} p idx (SHRLconst [j] w) x:(MOVBstoreidx1 [i-1] {s} p idx w0:(SHRLconst [j-8] w) mem))
  1249    && x.Uses == 1
  1250    && clobber(x)
  1251    -> (MOVWstoreidx1 [i-1] {s} p idx w0 mem)
  1252  (MOVWstoreidx1 [i] {s} p idx (SHRLconst [16] w) x:(MOVWstoreidx1 [i-2] {s} p idx w mem))
  1253    && x.Uses == 1
  1254    && clobber(x)
  1255    -> (MOVLstoreidx1 [i-2] {s} p idx w mem)
  1256  (MOVWstoreidx1 [i] {s} p idx (SHRLconst [j] w) x:(MOVWstoreidx1 [i-2] {s} p idx w0:(SHRLconst [j-16] w) mem))
  1257    && x.Uses == 1
  1258    && clobber(x)
  1259    -> (MOVLstoreidx1 [i-2] {s} p idx w0 mem)
  1260  
  1261  (MOVWstoreidx2 [i] {s} p idx (SHRLconst [16] w) x:(MOVWstoreidx2 [i-2] {s} p idx w mem))
  1262    && x.Uses == 1
  1263    && clobber(x)
  1264    -> (MOVLstoreidx1 [i-2] {s} p (SHLLconst <idx.Type> [1] idx) w mem)
  1265  (MOVWstoreidx2 [i] {s} p idx (SHRLconst [j] w) x:(MOVWstoreidx2 [i-2] {s} p idx w0:(SHRLconst [j-16] w) mem))
  1266    && x.Uses == 1
  1267    && clobber(x)
  1268    -> (MOVLstoreidx1 [i-2] {s} p (SHLLconst <idx.Type> [1] idx) w0 mem)
  1269  
  1270  // For PIC, break floating-point constant loading into two instructions so we have
  1271  // a register to use for holding the address of the constant pool entry.
  1272  (MOVSSconst [c]) && config.ctxt.Flag_shared -> (MOVSSconst2 (MOVSSconst1 [c]))
  1273  (MOVSDconst [c]) && config.ctxt.Flag_shared -> (MOVSDconst2 (MOVSDconst1 [c]))