github.com/zebozhuang/go@v0.0.0-20200207033046-f8a98f6f5c5d/src/cmd/compile/internal/ssa/gen/386.rules (about)

     1  // Copyright 2016 The Go Authors. All rights reserved.
     2  // Use of this source code is governed by a BSD-style
     3  // license that can be found in the LICENSE file.
     4  
     5  // Lowering arithmetic
     6  (AddPtr x y) -> (ADDL  x y)
     7  (Add32  x y) -> (ADDL  x y)
     8  (Add16  x y) -> (ADDL  x y)
     9  (Add8   x y) -> (ADDL  x y)
    10  (Add32F x y) -> (ADDSS x y)
    11  (Add64F x y) -> (ADDSD x y)
    12  
    13  (Add32carry x y) -> (ADDLcarry x y)
    14  (Add32withcarry x y c) -> (ADCL x y c)
    15  
    16  (SubPtr x y) -> (SUBL  x y)
    17  (Sub32  x y) -> (SUBL  x y)
    18  (Sub16  x y) -> (SUBL  x y)
    19  (Sub8   x y) -> (SUBL  x y)
    20  (Sub32F x y) -> (SUBSS x y)
    21  (Sub64F x y) -> (SUBSD x y)
    22  
    23  (Sub32carry x y) -> (SUBLcarry x y)
    24  (Sub32withcarry x y c) -> (SBBL x y c)
    25  
    26  (Mul32  x y) -> (MULL  x y)
    27  (Mul16  x y) -> (MULL  x y)
    28  (Mul8   x y) -> (MULL  x y)
    29  (Mul32F x y) -> (MULSS x y)
    30  (Mul64F x y) -> (MULSD x y)
    31  
    32  (Mul32uhilo x y) -> (MULLQU x y)
    33  
    34  (Avg32u x y) -> (AVGLU x y)
    35  
    36  (Div32F x y) -> (DIVSS x y)
    37  (Div64F x y) -> (DIVSD x y)
    38  
    39  (Div32  x y) -> (DIVL  x y)
    40  (Div32u x y) -> (DIVLU x y)
    41  (Div16  x y) -> (DIVW  x y)
    42  (Div16u x y) -> (DIVWU x y)
    43  (Div8   x y) -> (DIVW  (SignExt8to16 x) (SignExt8to16 y))
    44  (Div8u  x y) -> (DIVWU (ZeroExt8to16 x) (ZeroExt8to16 y))
    45  
    46  (Hmul32  x y) -> (HMULL  x y)
    47  (Hmul32u x y) -> (HMULLU x y)
    48  
    49  (Mod32  x y) -> (MODL  x y)
    50  (Mod32u x y) -> (MODLU x y)
    51  (Mod16  x y) -> (MODW  x y)
    52  (Mod16u x y) -> (MODWU x y)
    53  (Mod8   x y) -> (MODW  (SignExt8to16 x) (SignExt8to16 y))
    54  (Mod8u  x y) -> (MODWU (ZeroExt8to16 x) (ZeroExt8to16 y))
    55  
    56  (And32 x y) -> (ANDL x y)
    57  (And16 x y) -> (ANDL x y)
    58  (And8  x y) -> (ANDL x y)
    59  
    60  (Or32 x y) -> (ORL x y)
    61  (Or16 x y) -> (ORL x y)
    62  (Or8  x y) -> (ORL x y)
    63  
    64  (Xor32 x y) -> (XORL x y)
    65  (Xor16 x y) -> (XORL x y)
    66  (Xor8  x y) -> (XORL x y)
    67  
    68  (Neg32  x) -> (NEGL x)
    69  (Neg16  x) -> (NEGL x)
    70  (Neg8   x) -> (NEGL x)
    71  (Neg32F x) && !config.use387 -> (PXOR x (MOVSSconst <typ.Float32> [f2i(math.Copysign(0, -1))]))
    72  (Neg64F x) && !config.use387 -> (PXOR x (MOVSDconst <typ.Float64> [f2i(math.Copysign(0, -1))]))
    73  (Neg32F x) && config.use387 -> (FCHS x)
    74  (Neg64F x) && config.use387 -> (FCHS x)
    75  
    76  (Com32 x) -> (NOTL x)
    77  (Com16 x) -> (NOTL x)
    78  (Com8  x) -> (NOTL x)
    79  
    80  // Lowering boolean ops
    81  (AndB x y) -> (ANDL x y)
    82  (OrB x y) -> (ORL x y)
    83  (Not x) -> (XORLconst [1] x)
    84  
    85  // Lowering pointer arithmetic
    86  (OffPtr [off] ptr) -> (ADDLconst [off] ptr)
    87  
    88  (Bswap32 x) -> (BSWAPL x)
    89  
    90  (Sqrt x) -> (SQRTSD x)
    91  
    92  // Lowering extension
    93  (SignExt8to16  x) -> (MOVBLSX x)
    94  (SignExt8to32  x) -> (MOVBLSX x)
    95  (SignExt16to32 x) -> (MOVWLSX x)
    96  
    97  (ZeroExt8to16  x) -> (MOVBLZX x)
    98  (ZeroExt8to32  x) -> (MOVBLZX x)
    99  (ZeroExt16to32 x) -> (MOVWLZX x)
   100  
   101  (Signmask x) -> (SARLconst x [31])
   102  (Zeromask <t> x) -> (XORLconst [-1] (SBBLcarrymask <t> (CMPLconst x [1])))
   103  (Slicemask <t> x) -> (SARLconst (NEGL <t> x) [31])
   104  
   105  // Lowering truncation
   106  // Because we ignore high parts of registers, truncates are just copies.
   107  (Trunc16to8  x) -> x
   108  (Trunc32to8  x) -> x
   109  (Trunc32to16 x) -> x
   110  
   111  // Lowering float <-> int
   112  (Cvt32to32F x) -> (CVTSL2SS x)
   113  (Cvt32to64F x) -> (CVTSL2SD x)
   114  
   115  (Cvt32Fto32 x) -> (CVTTSS2SL x)
   116  (Cvt64Fto32 x) -> (CVTTSD2SL x)
   117  
   118  (Cvt32Fto64F x) -> (CVTSS2SD x)
   119  (Cvt64Fto32F x) -> (CVTSD2SS x)
   120  
   121  (Round32F x) -> x
   122  (Round64F x) -> x
   123  
   124  // Lowering shifts
   125  // Unsigned shifts need to return 0 if shift amount is >= width of shifted value.
   126  //   result = (arg << shift) & (shift >= argbits ? 0 : 0xffffffffffffffff)
   127  (Lsh32x32 <t> x y) -> (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPLconst y [32])))
   128  (Lsh32x16 <t> x y) -> (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPWconst y [32])))
   129  (Lsh32x8  <t> x y) -> (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPBconst y [32])))
   130  
   131  (Lsh16x32 <t> x y) -> (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPLconst y [32])))
   132  (Lsh16x16 <t> x y) -> (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPWconst y [32])))
   133  (Lsh16x8  <t> x y) -> (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPBconst y [32])))
   134  
   135  (Lsh8x32 <t> x y)  -> (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPLconst y [32])))
   136  (Lsh8x16 <t> x y)  -> (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPWconst y [32])))
   137  (Lsh8x8  <t> x y)  -> (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPBconst y [32])))
   138  
   139  (Rsh32Ux32 <t> x y) -> (ANDL (SHRL <t> x y) (SBBLcarrymask <t> (CMPLconst y [32])))
   140  (Rsh32Ux16 <t> x y) -> (ANDL (SHRL <t> x y) (SBBLcarrymask <t> (CMPWconst y [32])))
   141  (Rsh32Ux8  <t> x y) -> (ANDL (SHRL <t> x y) (SBBLcarrymask <t> (CMPBconst y [32])))
   142  
   143  (Rsh16Ux32 <t> x y) -> (ANDL (SHRW <t> x y) (SBBLcarrymask <t> (CMPLconst y [16])))
   144  (Rsh16Ux16 <t> x y) -> (ANDL (SHRW <t> x y) (SBBLcarrymask <t> (CMPWconst y [16])))
   145  (Rsh16Ux8  <t> x y) -> (ANDL (SHRW <t> x y) (SBBLcarrymask <t> (CMPBconst y [16])))
   146  
   147  (Rsh8Ux32 <t> x y)  -> (ANDL (SHRB <t> x y) (SBBLcarrymask <t> (CMPLconst y [8])))
   148  (Rsh8Ux16 <t> x y)  -> (ANDL (SHRB <t> x y) (SBBLcarrymask <t> (CMPWconst y [8])))
   149  (Rsh8Ux8  <t> x y)  -> (ANDL (SHRB <t> x y) (SBBLcarrymask <t> (CMPBconst y [8])))
   150  
   151  // Signed right shift needs to return 0/-1 if shift amount is >= width of shifted value.
   152  // We implement this by setting the shift value to -1 (all ones) if the shift value is >= width.
   153  
   154  (Rsh32x32 <t> x y) -> (SARL <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPLconst y [32])))))
   155  (Rsh32x16 <t> x y) -> (SARL <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPWconst y [32])))))
   156  (Rsh32x8  <t> x y) -> (SARL <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPBconst y [32])))))
   157  
   158  (Rsh16x32 <t> x y) -> (SARW <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPLconst y [16])))))
   159  (Rsh16x16 <t> x y) -> (SARW <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPWconst y [16])))))
   160  (Rsh16x8  <t> x y) -> (SARW <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPBconst y [16])))))
   161  
   162  (Rsh8x32 <t> x y)  -> (SARB <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPLconst y [8])))))
   163  (Rsh8x16 <t> x y)  -> (SARB <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPWconst y [8])))))
   164  (Rsh8x8  <t> x y)  -> (SARB <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPBconst y [8])))))
   165  
   166  // constant shifts
   167  // generic opt rewrites all constant shifts to shift by Const64
   168  (Lsh32x64 x (Const64 [c])) && uint64(c) < 32 -> (SHLLconst x [c])
   169  (Rsh32x64 x (Const64 [c])) && uint64(c) < 32 -> (SARLconst x [c])
   170  (Rsh32Ux64 x (Const64 [c])) && uint64(c) < 32 -> (SHRLconst x [c])
   171  (Lsh16x64 x (Const64 [c])) && uint64(c) < 16 -> (SHLLconst x [c])
   172  (Rsh16x64 x (Const64 [c])) && uint64(c) < 16 -> (SARWconst x [c])
   173  (Rsh16Ux64 x (Const64 [c])) && uint64(c) < 16 -> (SHRWconst x [c])
   174  (Lsh8x64 x (Const64 [c])) && uint64(c) < 8 -> (SHLLconst x [c])
   175  (Rsh8x64 x (Const64 [c])) && uint64(c) < 8 -> (SARBconst x [c])
   176  (Rsh8Ux64 x (Const64 [c])) && uint64(c) < 8 -> (SHRBconst x [c])
   177  
   178  // large constant shifts
   179  (Lsh32x64 _ (Const64 [c])) && uint64(c) >= 32 -> (Const32 [0])
   180  (Rsh32Ux64 _ (Const64 [c])) && uint64(c) >= 32 -> (Const32 [0])
   181  (Lsh16x64 _ (Const64 [c])) && uint64(c) >= 16 -> (Const16 [0])
   182  (Rsh16Ux64 _ (Const64 [c])) && uint64(c) >= 16 -> (Const16 [0])
   183  (Lsh8x64 _ (Const64 [c])) && uint64(c) >= 8 -> (Const8 [0])
   184  (Rsh8Ux64 _ (Const64 [c])) && uint64(c) >= 8 -> (Const8 [0])
   185  
   186  // large constant signed right shift, we leave the sign bit
   187  (Rsh32x64 x (Const64 [c])) && uint64(c) >= 32 -> (SARLconst x [31])
   188  (Rsh16x64 x (Const64 [c])) && uint64(c) >= 16 -> (SARWconst x [15])
   189  (Rsh8x64 x (Const64 [c])) && uint64(c) >= 8 -> (SARBconst x [7])
   190  
   191  // Lowering comparisons
   192  (Less32  x y) -> (SETL (CMPL x y))
   193  (Less16  x y) -> (SETL (CMPW x y))
   194  (Less8   x y) -> (SETL (CMPB x y))
   195  (Less32U x y) -> (SETB (CMPL x y))
   196  (Less16U x y) -> (SETB (CMPW x y))
   197  (Less8U  x y) -> (SETB (CMPB x y))
   198  // Use SETGF with reversed operands to dodge NaN case
   199  (Less64F x y) -> (SETGF (UCOMISD y x))
   200  (Less32F x y) -> (SETGF (UCOMISS y x))
   201  
   202  (Leq32  x y) -> (SETLE (CMPL x y))
   203  (Leq16  x y) -> (SETLE (CMPW x y))
   204  (Leq8   x y) -> (SETLE (CMPB x y))
   205  (Leq32U x y) -> (SETBE (CMPL x y))
   206  (Leq16U x y) -> (SETBE (CMPW x y))
   207  (Leq8U  x y) -> (SETBE (CMPB x y))
   208  // Use SETGEF with reversed operands to dodge NaN case
   209  (Leq64F x y) -> (SETGEF (UCOMISD y x))
   210  (Leq32F x y) -> (SETGEF (UCOMISS y x))
   211  
   212  (Greater32  x y) -> (SETG (CMPL x y))
   213  (Greater16  x y) -> (SETG (CMPW x y))
   214  (Greater8   x y) -> (SETG (CMPB x y))
   215  (Greater32U x y) -> (SETA (CMPL x y))
   216  (Greater16U x y) -> (SETA (CMPW x y))
   217  (Greater8U  x y) -> (SETA (CMPB x y))
   218  // Note Go assembler gets UCOMISx operand order wrong, but it is right here
   219  // Bug is accommodated at generation of assembly language.
   220  (Greater64F x y) -> (SETGF (UCOMISD x y))
   221  (Greater32F x y) -> (SETGF (UCOMISS x y))
   222  
   223  (Geq32  x y) -> (SETGE (CMPL x y))
   224  (Geq16  x y) -> (SETGE (CMPW x y))
   225  (Geq8   x y) -> (SETGE (CMPB x y))
   226  (Geq32U x y) -> (SETAE (CMPL x y))
   227  (Geq16U x y) -> (SETAE (CMPW x y))
   228  (Geq8U  x y) -> (SETAE (CMPB x y))
   229  // Note Go assembler gets UCOMISx operand order wrong, but it is right here
   230  // Bug is accommodated at generation of assembly language.
   231  (Geq64F x y) -> (SETGEF (UCOMISD x y))
   232  (Geq32F x y) -> (SETGEF (UCOMISS x y))
   233  
   234  (Eq32  x y) -> (SETEQ (CMPL x y))
   235  (Eq16  x y) -> (SETEQ (CMPW x y))
   236  (Eq8   x y) -> (SETEQ (CMPB x y))
   237  (EqB   x y) -> (SETEQ (CMPB x y))
   238  (EqPtr x y) -> (SETEQ (CMPL x y))
   239  (Eq64F x y) -> (SETEQF (UCOMISD x y))
   240  (Eq32F x y) -> (SETEQF (UCOMISS x y))
   241  
   242  (Neq32  x y) -> (SETNE (CMPL x y))
   243  (Neq16  x y) -> (SETNE (CMPW x y))
   244  (Neq8   x y) -> (SETNE (CMPB x y))
   245  (NeqB   x y) -> (SETNE (CMPB x y))
   246  (NeqPtr x y) -> (SETNE (CMPL x y))
   247  (Neq64F x y) -> (SETNEF (UCOMISD x y))
   248  (Neq32F x y) -> (SETNEF (UCOMISS x y))
   249  
   250  // Lowering loads
   251  (Load <t> ptr mem) && (is32BitInt(t) || isPtr(t)) -> (MOVLload ptr mem)
   252  (Load <t> ptr mem) && is16BitInt(t) -> (MOVWload ptr mem)
   253  (Load <t> ptr mem) && (t.IsBoolean() || is8BitInt(t)) -> (MOVBload ptr mem)
   254  (Load <t> ptr mem) && is32BitFloat(t) -> (MOVSSload ptr mem)
   255  (Load <t> ptr mem) && is64BitFloat(t) -> (MOVSDload ptr mem)
   256  
   257  // Lowering stores
   258  // These more-specific FP versions of Store pattern should come first.
   259  (Store {t} ptr val mem) && t.(*types.Type).Size() == 8 && is64BitFloat(val.Type) -> (MOVSDstore ptr val mem)
   260  (Store {t} ptr val mem) && t.(*types.Type).Size() == 4 && is32BitFloat(val.Type) -> (MOVSSstore ptr val mem)
   261  
   262  (Store {t} ptr val mem) && t.(*types.Type).Size() == 4 -> (MOVLstore ptr val mem)
   263  (Store {t} ptr val mem) && t.(*types.Type).Size() == 2 -> (MOVWstore ptr val mem)
   264  (Store {t} ptr val mem) && t.(*types.Type).Size() == 1 -> (MOVBstore ptr val mem)
   265  
   266  // Lowering moves
   267  (Move [0] _ _ mem) -> mem
   268  (Move [1] dst src mem) -> (MOVBstore dst (MOVBload src mem) mem)
   269  (Move [2] dst src mem) -> (MOVWstore dst (MOVWload src mem) mem)
   270  (Move [4] dst src mem) -> (MOVLstore dst (MOVLload src mem) mem)
   271  (Move [3] dst src mem) ->
   272  	(MOVBstore [2] dst (MOVBload [2] src mem)
   273  		(MOVWstore dst (MOVWload src mem) mem))
   274  (Move [5] dst src mem) ->
   275  	(MOVBstore [4] dst (MOVBload [4] src mem)
   276  		(MOVLstore dst (MOVLload src mem) mem))
   277  (Move [6] dst src mem) ->
   278  	(MOVWstore [4] dst (MOVWload [4] src mem)
   279  		(MOVLstore dst (MOVLload src mem) mem))
   280  (Move [7] dst src mem) ->
   281  	(MOVLstore [3] dst (MOVLload [3] src mem)
   282  		(MOVLstore dst (MOVLload src mem) mem))
   283  (Move [8] dst src mem) ->
   284  	(MOVLstore [4] dst (MOVLload [4] src mem)
   285  		(MOVLstore dst (MOVLload src mem) mem))
   286  
   287  // Adjust moves to be a multiple of 4 bytes.
   288  (Move [s] dst src mem)
   289  	&& s > 8 && s%4 != 0 ->
   290  	(Move [s-s%4]
   291  		(ADDLconst <dst.Type> dst [s%4])
   292  		(ADDLconst <src.Type> src [s%4])
   293  		(MOVLstore dst (MOVLload src mem) mem))
   294  
   295  // Medium copying uses a duff device.
   296  (Move [s] dst src mem)
   297  	&& s > 8 && s <= 4*128 && s%4 == 0
   298  	&& !config.noDuffDevice ->
   299  	(DUFFCOPY [10*(128-s/4)] dst src mem)
   300  // 10 and 128 are magic constants.  10 is the number of bytes to encode:
   301  //	MOVL	(SI), CX
   302  //	ADDL	$4, SI
   303  //	MOVL	CX, (DI)
   304  //	ADDL	$4, DI
   305  // and 128 is the number of such blocks. See src/runtime/duff_386.s:duffcopy.
   306  
   307  // Large copying uses REP MOVSL.
   308  (Move [s] dst src mem) && (s > 4*128 || config.noDuffDevice) && s%4 == 0 ->
   309  	(REPMOVSL dst src (MOVLconst [s/4]) mem)
   310  
   311  // Lowering Zero instructions
   312  (Zero [0] _ mem) -> mem
   313  (Zero [1] destptr mem) -> (MOVBstoreconst [0] destptr mem)
   314  (Zero [2] destptr mem) -> (MOVWstoreconst [0] destptr mem)
   315  (Zero [4] destptr mem) -> (MOVLstoreconst [0] destptr mem)
   316  
   317  (Zero [3] destptr mem) ->
   318  	(MOVBstoreconst [makeValAndOff(0,2)] destptr
   319  		(MOVWstoreconst [0] destptr mem))
   320  (Zero [5] destptr mem) ->
   321  	(MOVBstoreconst [makeValAndOff(0,4)] destptr
   322  		(MOVLstoreconst [0] destptr mem))
   323  (Zero [6] destptr mem) ->
   324  	(MOVWstoreconst [makeValAndOff(0,4)] destptr
   325  		(MOVLstoreconst [0] destptr mem))
   326  (Zero [7] destptr mem) ->
   327  	(MOVLstoreconst [makeValAndOff(0,3)] destptr
   328  		(MOVLstoreconst [0] destptr mem))
   329  
   330  // Strip off any fractional word zeroing.
   331  (Zero [s] destptr mem) && s%4 != 0 && s > 4 ->
   332  	(Zero [s-s%4] (ADDLconst destptr [s%4])
   333  		(MOVLstoreconst [0] destptr mem))
   334  
   335  // Zero small numbers of words directly.
   336  (Zero [8] destptr mem) ->
   337  	(MOVLstoreconst [makeValAndOff(0,4)] destptr
   338  		(MOVLstoreconst [0] destptr mem))
   339  (Zero [12] destptr mem) ->
   340  	(MOVLstoreconst [makeValAndOff(0,8)] destptr
   341  		(MOVLstoreconst [makeValAndOff(0,4)] destptr
   342  			(MOVLstoreconst [0] destptr mem)))
   343  (Zero [16] destptr mem) ->
   344  	(MOVLstoreconst [makeValAndOff(0,12)] destptr
   345  		(MOVLstoreconst [makeValAndOff(0,8)] destptr
   346  			(MOVLstoreconst [makeValAndOff(0,4)] destptr
   347  				(MOVLstoreconst [0] destptr mem))))
   348  
   349  // Medium zeroing uses a duff device.
   350  (Zero [s] destptr mem)
   351    && s > 16 && s <= 4*128 && s%4 == 0
   352    && !config.noDuffDevice ->
   353  	(DUFFZERO [1*(128-s/4)] destptr (MOVLconst [0]) mem)
   354  // 1 and 128 are magic constants.  1 is the number of bytes to encode STOSL.
   355  // 128 is the number of STOSL instructions in duffzero.
   356  // See src/runtime/duff_386.s:duffzero.
   357  
   358  // Large zeroing uses REP STOSQ.
   359  (Zero [s] destptr mem)
   360    && (s > 4*128 || (config.noDuffDevice && s > 16))
   361    && s%4 == 0 ->
   362  	(REPSTOSL destptr (MOVLconst [s/4]) (MOVLconst [0]) mem)
   363  
   364  // Lowering constants
   365  (Const8   [val]) -> (MOVLconst [val])
   366  (Const16  [val]) -> (MOVLconst [val])
   367  (Const32  [val]) -> (MOVLconst [val])
   368  (Const32F [val]) -> (MOVSSconst [val])
   369  (Const64F [val]) -> (MOVSDconst [val])
   370  (ConstNil) -> (MOVLconst [0])
   371  (ConstBool [b]) -> (MOVLconst [b])
   372  
   373  // Lowering calls
   374  (StaticCall [argwid] {target} mem) -> (CALLstatic [argwid] {target} mem)
   375  (ClosureCall [argwid] entry closure mem) -> (CALLclosure [argwid] entry closure mem)
   376  (InterCall [argwid] entry mem) -> (CALLinter [argwid] entry mem)
   377  
   378  // Miscellaneous
   379  (Convert <t> x mem) -> (MOVLconvert <t> x mem)
   380  (IsNonNil p) -> (SETNE (TESTL p p))
   381  (IsInBounds idx len) -> (SETB (CMPL idx len))
   382  (IsSliceInBounds idx len) -> (SETBE (CMPL idx len))
   383  (NilCheck ptr mem) -> (LoweredNilCheck ptr mem)
   384  (GetG mem) -> (LoweredGetG mem)
   385  (GetClosurePtr) -> (LoweredGetClosurePtr)
   386  (Addr {sym} base) -> (LEAL {sym} base)
   387  
   388  // block rewrites
   389  (If (SETL  cmp) yes no) -> (LT  cmp yes no)
   390  (If (SETLE cmp) yes no) -> (LE  cmp yes no)
   391  (If (SETG  cmp) yes no) -> (GT  cmp yes no)
   392  (If (SETGE cmp) yes no) -> (GE  cmp yes no)
   393  (If (SETEQ cmp) yes no) -> (EQ  cmp yes no)
   394  (If (SETNE cmp) yes no) -> (NE  cmp yes no)
   395  (If (SETB  cmp) yes no) -> (ULT cmp yes no)
   396  (If (SETBE cmp) yes no) -> (ULE cmp yes no)
   397  (If (SETA  cmp) yes no) -> (UGT cmp yes no)
   398  (If (SETAE cmp) yes no) -> (UGE cmp yes no)
   399  
   400  // Special case for floating point - LF/LEF not generated
   401  (If (SETGF  cmp) yes no) -> (UGT  cmp yes no)
   402  (If (SETGEF cmp) yes no) -> (UGE  cmp yes no)
   403  (If (SETEQF cmp) yes no) -> (EQF  cmp yes no)
   404  (If (SETNEF cmp) yes no) -> (NEF  cmp yes no)
   405  
   406  (If cond yes no) -> (NE (TESTB cond cond) yes no)
   407  
   408  // ***************************
   409  // Above: lowering rules
   410  // Below: optimizations
   411  // ***************************
   412  // TODO: Should the optimizations be a separate pass?
   413  
   414  // Fold boolean tests into blocks
   415  (NE (TESTB (SETL  cmp) (SETL  cmp)) yes no) -> (LT  cmp yes no)
   416  (NE (TESTB (SETLE cmp) (SETLE cmp)) yes no) -> (LE  cmp yes no)
   417  (NE (TESTB (SETG  cmp) (SETG  cmp)) yes no) -> (GT  cmp yes no)
   418  (NE (TESTB (SETGE cmp) (SETGE cmp)) yes no) -> (GE  cmp yes no)
   419  (NE (TESTB (SETEQ cmp) (SETEQ cmp)) yes no) -> (EQ  cmp yes no)
   420  (NE (TESTB (SETNE cmp) (SETNE cmp)) yes no) -> (NE  cmp yes no)
   421  (NE (TESTB (SETB  cmp) (SETB  cmp)) yes no) -> (ULT cmp yes no)
   422  (NE (TESTB (SETBE cmp) (SETBE cmp)) yes no) -> (ULE cmp yes no)
   423  (NE (TESTB (SETA  cmp) (SETA  cmp)) yes no) -> (UGT cmp yes no)
   424  (NE (TESTB (SETAE cmp) (SETAE cmp)) yes no) -> (UGE cmp yes no)
   425  
   426  // Special case for floating point - LF/LEF not generated
   427  (NE (TESTB (SETGF  cmp) (SETGF  cmp)) yes no) -> (UGT  cmp yes no)
   428  (NE (TESTB (SETGEF cmp) (SETGEF cmp)) yes no) -> (UGE  cmp yes no)
   429  (NE (TESTB (SETEQF cmp) (SETEQF cmp)) yes no) -> (EQF  cmp yes no)
   430  (NE (TESTB (SETNEF cmp) (SETNEF cmp)) yes no) -> (NEF  cmp yes no)
   431  
   432  // fold constants into instructions
   433  (ADDL x (MOVLconst [c])) -> (ADDLconst [c] x)
   434  (ADDLcarry x (MOVLconst [c])) -> (ADDLconstcarry [c] x)
   435  (ADCL x (MOVLconst [c]) f) -> (ADCLconst [c] x f)
   436  (ADCL (MOVLconst [c]) x f) -> (ADCLconst [c] x f)
   437  
   438  (SUBL x (MOVLconst [c])) -> (SUBLconst x [c])
   439  (SUBL (MOVLconst [c]) x) -> (NEGL (SUBLconst <v.Type> x [c]))
   440  (SUBLcarry x (MOVLconst [c])) -> (SUBLconstcarry [c] x)
   441  (SBBL x (MOVLconst [c]) f) -> (SBBLconst [c] x f)
   442  
   443  (MULL x (MOVLconst [c])) -> (MULLconst [c] x)
   444  
   445  (ANDL x (MOVLconst [c])) -> (ANDLconst [c] x)
   446  
   447  (ANDLconst [c] (ANDLconst [d] x)) -> (ANDLconst [c & d] x)
   448  
   449  (XORLconst [c] (XORLconst [d] x)) -> (XORLconst [c ^ d] x)
   450  
   451  (MULLconst [c] (MULLconst [d] x)) -> (MULLconst [int64(int32(c * d))] x)
   452  
   453  (ORL x (MOVLconst [c])) -> (ORLconst [c] x)
   454  
   455  (XORL x (MOVLconst [c])) -> (XORLconst [c] x)
   456  
   457  (SHLL x (MOVLconst [c])) -> (SHLLconst [c&31] x)
   458  (SHRL x (MOVLconst [c])) -> (SHRLconst [c&31] x)
   459  (SHRW x (MOVLconst [c])) && c&31 < 16 -> (SHRWconst [c&31] x)
   460  (SHRW _ (MOVLconst [c])) && c&31 >= 16 -> (MOVLconst [0])
   461  (SHRB x (MOVLconst [c])) && c&31 < 8 -> (SHRBconst [c&31] x)
   462  (SHRB _ (MOVLconst [c])) && c&31 >= 8 -> (MOVLconst [0])
   463  
   464  (SARL x (MOVLconst [c])) -> (SARLconst [c&31] x)
   465  (SARW x (MOVLconst [c])) -> (SARWconst [min(c&31,15)] x)
   466  (SARB x (MOVLconst [c])) -> (SARBconst [min(c&31,7)] x)
   467  
   468  (SARL x (ANDLconst [31] y)) -> (SARL x y)
   469  
   470  (SHLL x (ANDLconst [31] y)) -> (SHLL x y)
   471  
   472  (SHRL x (ANDLconst [31] y)) -> (SHRL x y)
   473  
   474  // Rotate instructions
   475  
   476  (ADDL (SHLLconst [c] x) (SHRLconst [d] x)) && d == 32-c -> (ROLLconst [c] x)
   477  ( ORL (SHLLconst [c] x) (SHRLconst [d] x)) && d == 32-c -> (ROLLconst [c] x)
   478  (XORL (SHLLconst [c] x) (SHRLconst [d] x)) && d == 32-c -> (ROLLconst [c] x)
   479  
   480  (ADDL <t> (SHLLconst x [c]) (SHRWconst x [d])) && c < 16 && d == 16-c && t.Size() == 2 -> (ROLWconst x [c])
   481  ( ORL <t> (SHLLconst x [c]) (SHRWconst x [d])) && c < 16 && d == 16-c && t.Size() == 2 -> (ROLWconst x [c])
   482  (XORL <t> (SHLLconst x [c]) (SHRWconst x [d])) && c < 16 && d == 16-c && t.Size() == 2 -> (ROLWconst x [c])
   483  
   484  (ADDL <t> (SHLLconst x [c]) (SHRBconst x [d])) && c < 8 && d == 8-c && t.Size() == 1 -> (ROLBconst x [c])
   485  ( ORL <t> (SHLLconst x [c]) (SHRBconst x [d])) && c < 8 && d == 8-c && t.Size() == 1 -> (ROLBconst x [c])
   486  (XORL <t> (SHLLconst x [c]) (SHRBconst x [d])) && c < 8 && d == 8-c && t.Size() == 1 -> (ROLBconst x [c])
   487  
   488  (ROLLconst [c] (ROLLconst [d] x)) -> (ROLLconst [(c+d)&31] x)
   489  (ROLWconst [c] (ROLWconst [d] x)) -> (ROLWconst [(c+d)&15] x)
   490  (ROLBconst [c] (ROLBconst [d] x)) -> (ROLBconst [(c+d)& 7] x)
   491  
   492  // Constant shift simplifications
   493  
   494  (SHLLconst x [0]) -> x
   495  (SHRLconst x [0]) -> x
   496  (SARLconst x [0]) -> x
   497  
   498  (SHRWconst x [0]) -> x
   499  (SARWconst x [0]) -> x
   500  
   501  (SHRBconst x [0]) -> x
   502  (SARBconst x [0]) -> x
   503  
   504  (ROLLconst [0] x) -> x
   505  (ROLWconst [0] x) -> x
   506  (ROLBconst [0] x) -> x
   507  
   508  // Note: the word and byte shifts keep the low 5 bits (not the low 4 or 3 bits)
   509  // because the x86 instructions are defined to use all 5 bits of the shift even
   510  // for the small shifts. I don't think we'll ever generate a weird shift (e.g.
   511  // (SHRW x (MOVLconst [24])), but just in case.
   512  
   513  (CMPL x (MOVLconst [c])) -> (CMPLconst x [c])
   514  (CMPL (MOVLconst [c]) x) -> (InvertFlags (CMPLconst x [c]))
   515  (CMPW x (MOVLconst [c])) -> (CMPWconst x [int64(int16(c))])
   516  (CMPW (MOVLconst [c]) x) -> (InvertFlags (CMPWconst x [int64(int16(c))]))
   517  (CMPB x (MOVLconst [c])) -> (CMPBconst x [int64(int8(c))])
   518  (CMPB (MOVLconst [c]) x) -> (InvertFlags (CMPBconst x [int64(int8(c))]))
   519  
   520  // strength reduction
   521  // Assumes that the following costs from https://gmplib.org/~tege/x86-timing.pdf:
   522  //    1 - addq, shlq, leaq, negq
   523  //    3 - imulq
   524  // This limits the rewrites to two instructions.
   525  // TODO: 27, 81
   526  (MULLconst [-1] x) -> (NEGL x)
   527  (MULLconst [0] _) -> (MOVLconst [0])
   528  (MULLconst [1] x) -> x
   529  (MULLconst [3] x) -> (LEAL2 x x)
   530  (MULLconst [5] x) -> (LEAL4 x x)
   531  (MULLconst [7] x) -> (LEAL8 (NEGL <v.Type> x) x)
   532  (MULLconst [9] x) -> (LEAL8 x x)
   533  (MULLconst [11] x) -> (LEAL2 x (LEAL4 <v.Type> x x))
   534  (MULLconst [13] x) -> (LEAL4 x (LEAL2 <v.Type> x x))
   535  (MULLconst [21] x) -> (LEAL4 x (LEAL4 <v.Type> x x))
   536  (MULLconst [25] x) -> (LEAL8 x (LEAL2 <v.Type> x x))
   537  (MULLconst [37] x) -> (LEAL4 x (LEAL8 <v.Type> x x))
   538  (MULLconst [41] x) -> (LEAL8 x (LEAL4 <v.Type> x x))
   539  (MULLconst [73] x) -> (LEAL8 x (LEAL8 <v.Type> x x))
   540  
   541  (MULLconst [c] x) && isPowerOfTwo(c) -> (SHLLconst [log2(c)] x)
   542  (MULLconst [c] x) && isPowerOfTwo(c+1) && c >= 15 -> (SUBL (SHLLconst <v.Type> [log2(c+1)] x) x)
   543  (MULLconst [c] x) && isPowerOfTwo(c-1) && c >= 17 -> (LEAL1 (SHLLconst <v.Type> [log2(c-1)] x) x)
   544  (MULLconst [c] x) && isPowerOfTwo(c-2) && c >= 34 -> (LEAL2 (SHLLconst <v.Type> [log2(c-2)] x) x)
   545  (MULLconst [c] x) && isPowerOfTwo(c-4) && c >= 68 -> (LEAL4 (SHLLconst <v.Type> [log2(c-4)] x) x)
   546  (MULLconst [c] x) && isPowerOfTwo(c-8) && c >= 136 -> (LEAL8 (SHLLconst <v.Type> [log2(c-8)] x) x)
   547  (MULLconst [c] x) && c%3 == 0 && isPowerOfTwo(c/3) -> (SHLLconst [log2(c/3)] (LEAL2 <v.Type> x x))
   548  (MULLconst [c] x) && c%5 == 0 && isPowerOfTwo(c/5) -> (SHLLconst [log2(c/5)] (LEAL4 <v.Type> x x))
   549  (MULLconst [c] x) && c%9 == 0 && isPowerOfTwo(c/9) -> (SHLLconst [log2(c/9)] (LEAL8 <v.Type> x x))
   550  
   551  // combine add/shift into LEAL
   552  (ADDL x (SHLLconst [3] y)) -> (LEAL8 x y)
   553  (ADDL x (SHLLconst [2] y)) -> (LEAL4 x y)
   554  (ADDL x (SHLLconst [1] y)) -> (LEAL2 x y)
   555  (ADDL x (ADDL y y)) -> (LEAL2 x y)
   556  (ADDL x (ADDL x y)) -> (LEAL2 y x)
   557  
   558  // combine ADDL/ADDLconst into LEAL1
   559  (ADDLconst [c] (ADDL x y)) -> (LEAL1 [c] x y)
   560  (ADDL (ADDLconst [c] x) y) -> (LEAL1 [c] x y)
   561  
   562  // fold ADDL into LEAL
   563  (ADDLconst [c] (LEAL [d] {s} x)) && is32Bit(c+d) -> (LEAL [c+d] {s} x)
   564  (LEAL [c] {s} (ADDLconst [d] x)) && is32Bit(c+d) -> (LEAL [c+d] {s} x)
   565  (LEAL [c] {s} (ADDL x y)) && x.Op != OpSB && y.Op != OpSB -> (LEAL1 [c] {s} x y)
   566  (ADDL x (LEAL [c] {s} y)) && x.Op != OpSB && y.Op != OpSB -> (LEAL1 [c] {s} x y)
   567  
   568  // fold ADDLconst into LEALx
   569  (ADDLconst [c] (LEAL1 [d] {s} x y)) && is32Bit(c+d) -> (LEAL1 [c+d] {s} x y)
   570  (ADDLconst [c] (LEAL2 [d] {s} x y)) && is32Bit(c+d) -> (LEAL2 [c+d] {s} x y)
   571  (ADDLconst [c] (LEAL4 [d] {s} x y)) && is32Bit(c+d) -> (LEAL4 [c+d] {s} x y)
   572  (ADDLconst [c] (LEAL8 [d] {s} x y)) && is32Bit(c+d) -> (LEAL8 [c+d] {s} x y)
   573  (LEAL1 [c] {s} (ADDLconst [d] x) y) && is32Bit(c+d)   && x.Op != OpSB -> (LEAL1 [c+d] {s} x y)
   574  (LEAL2 [c] {s} (ADDLconst [d] x) y) && is32Bit(c+d)   && x.Op != OpSB -> (LEAL2 [c+d] {s} x y)
   575  (LEAL2 [c] {s} x (ADDLconst [d] y)) && is32Bit(c+2*d) && y.Op != OpSB -> (LEAL2 [c+2*d] {s} x y)
   576  (LEAL4 [c] {s} (ADDLconst [d] x) y) && is32Bit(c+d)   && x.Op != OpSB -> (LEAL4 [c+d] {s} x y)
   577  (LEAL4 [c] {s} x (ADDLconst [d] y)) && is32Bit(c+4*d) && y.Op != OpSB -> (LEAL4 [c+4*d] {s} x y)
   578  (LEAL8 [c] {s} (ADDLconst [d] x) y) && is32Bit(c+d)   && x.Op != OpSB -> (LEAL8 [c+d] {s} x y)
   579  (LEAL8 [c] {s} x (ADDLconst [d] y)) && is32Bit(c+8*d) && y.Op != OpSB -> (LEAL8 [c+8*d] {s} x y)
   580  
   581  // fold shifts into LEALx
   582  (LEAL1 [c] {s} x (SHLLconst [1] y)) -> (LEAL2 [c] {s} x y)
   583  (LEAL1 [c] {s} x (SHLLconst [2] y)) -> (LEAL4 [c] {s} x y)
   584  (LEAL1 [c] {s} x (SHLLconst [3] y)) -> (LEAL8 [c] {s} x y)
   585  (LEAL2 [c] {s} x (SHLLconst [1] y)) -> (LEAL4 [c] {s} x y)
   586  (LEAL2 [c] {s} x (SHLLconst [2] y)) -> (LEAL8 [c] {s} x y)
   587  (LEAL4 [c] {s} x (SHLLconst [1] y)) -> (LEAL8 [c] {s} x y)
   588  
   589  // reverse ordering of compare instruction
   590  (SETL (InvertFlags x)) -> (SETG x)
   591  (SETG (InvertFlags x)) -> (SETL x)
   592  (SETB (InvertFlags x)) -> (SETA x)
   593  (SETA (InvertFlags x)) -> (SETB x)
   594  (SETLE (InvertFlags x)) -> (SETGE x)
   595  (SETGE (InvertFlags x)) -> (SETLE x)
   596  (SETBE (InvertFlags x)) -> (SETAE x)
   597  (SETAE (InvertFlags x)) -> (SETBE x)
   598  (SETEQ (InvertFlags x)) -> (SETEQ x)
   599  (SETNE (InvertFlags x)) -> (SETNE x)
   600  
   601  // sign extended loads
   602  // Note: The combined instruction must end up in the same block
   603  // as the original load. If not, we end up making a value with
   604  // memory type live in two different blocks, which can lead to
   605  // multiple memory values alive simultaneously.
   606  // Make sure we don't combine these ops if the load has another use.
   607  // This prevents a single load from being split into multiple loads
   608  // which then might return different values.  See test/atomicload.go.
   609  (MOVBLSX x:(MOVBload [off] {sym} ptr mem)) && x.Uses == 1 && clobber(x) -> @x.Block (MOVBLSXload <v.Type> [off] {sym} ptr mem)
   610  (MOVBLZX x:(MOVBload [off] {sym} ptr mem)) && x.Uses == 1 && clobber(x) -> @x.Block (MOVBload <v.Type> [off] {sym} ptr mem)
   611  (MOVWLSX x:(MOVWload [off] {sym} ptr mem)) && x.Uses == 1 && clobber(x) -> @x.Block (MOVWLSXload <v.Type> [off] {sym} ptr mem)
   612  (MOVWLZX x:(MOVWload [off] {sym} ptr mem)) && x.Uses == 1 && clobber(x) -> @x.Block (MOVWload <v.Type> [off] {sym} ptr mem)
   613  
   614  (MOVBLZX x:(MOVBloadidx1 [off] {sym} ptr idx mem)) && x.Uses == 1 && clobber(x) -> @x.Block (MOVBloadidx1 <v.Type> [off] {sym} ptr idx mem)
   615  (MOVWLZX x:(MOVWloadidx1 [off] {sym} ptr idx mem)) && x.Uses == 1 && clobber(x) -> @x.Block (MOVWloadidx1 <v.Type> [off] {sym} ptr idx mem)
   616  (MOVWLZX x:(MOVWloadidx2 [off] {sym} ptr idx mem)) && x.Uses == 1 && clobber(x) -> @x.Block (MOVWloadidx2 <v.Type> [off] {sym} ptr idx mem)
   617  
   618  // replace load from same location as preceding store with zero/sign extension (or copy in case of full width)
   619  (MOVBload [off] {sym} ptr (MOVBstore [off2] {sym2} ptr2 x _)) && sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) -> (MOVBLZX x)
   620  (MOVWload [off] {sym} ptr (MOVWstore [off2] {sym2} ptr2 x _)) && sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) -> (MOVWLZX x)
   621  (MOVLload [off] {sym} ptr (MOVLstore [off2] {sym2} ptr2 x _)) && sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) -> x
   622  (MOVBLSXload [off] {sym} ptr (MOVBstore [off2] {sym2} ptr2 x _)) && sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) -> (MOVBLSX x)
   623  (MOVWLSXload [off] {sym} ptr (MOVWstore [off2] {sym2} ptr2 x _)) && sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) -> (MOVWLSX x)
   624  
   625  // Fold extensions and ANDs together.
   626  (MOVBLZX (ANDLconst [c] x)) -> (ANDLconst [c & 0xff] x)
   627  (MOVWLZX (ANDLconst [c] x)) -> (ANDLconst [c & 0xffff] x)
   628  (MOVBLSX (ANDLconst [c] x)) && c & 0x80 == 0 -> (ANDLconst [c & 0x7f] x)
   629  (MOVWLSX (ANDLconst [c] x)) && c & 0x8000 == 0 -> (ANDLconst [c & 0x7fff] x)
   630  
   631  // Don't extend before storing
   632  (MOVWstore [off] {sym} ptr (MOVWLSX x) mem) -> (MOVWstore [off] {sym} ptr x mem)
   633  (MOVBstore [off] {sym} ptr (MOVBLSX x) mem) -> (MOVBstore [off] {sym} ptr x mem)
   634  (MOVWstore [off] {sym} ptr (MOVWLZX x) mem) -> (MOVWstore [off] {sym} ptr x mem)
   635  (MOVBstore [off] {sym} ptr (MOVBLZX x) mem) -> (MOVBstore [off] {sym} ptr x mem)
   636  
   637  // fold constants into memory operations
   638  // Note that this is not always a good idea because if not all the uses of
   639  // the ADDQconst get eliminated, we still have to compute the ADDQconst and we now
   640  // have potentially two live values (ptr and (ADDQconst [off] ptr)) instead of one.
   641  // Nevertheless, let's do it!
   642  (MOVLload  [off1] {sym} (ADDLconst [off2] ptr) mem) && is32Bit(off1+off2) -> (MOVLload  [off1+off2] {sym} ptr mem)
   643  (MOVWload  [off1] {sym} (ADDLconst [off2] ptr) mem) && is32Bit(off1+off2) -> (MOVWload  [off1+off2] {sym} ptr mem)
   644  (MOVBload  [off1] {sym} (ADDLconst [off2] ptr) mem) && is32Bit(off1+off2) -> (MOVBload  [off1+off2] {sym} ptr mem)
   645  (MOVSSload [off1] {sym} (ADDLconst [off2] ptr) mem) && is32Bit(off1+off2) -> (MOVSSload [off1+off2] {sym} ptr mem)
   646  (MOVSDload [off1] {sym} (ADDLconst [off2] ptr) mem) && is32Bit(off1+off2) -> (MOVSDload [off1+off2] {sym} ptr mem)
   647  
   648  (MOVLstore  [off1] {sym} (ADDLconst [off2] ptr) val mem) && is32Bit(off1+off2) -> (MOVLstore  [off1+off2] {sym} ptr val mem)
   649  (MOVWstore  [off1] {sym} (ADDLconst [off2] ptr) val mem) && is32Bit(off1+off2) -> (MOVWstore  [off1+off2] {sym} ptr val mem)
   650  (MOVBstore  [off1] {sym} (ADDLconst [off2] ptr) val mem) && is32Bit(off1+off2) -> (MOVBstore  [off1+off2] {sym} ptr val mem)
   651  (MOVSSstore [off1] {sym} (ADDLconst [off2] ptr) val mem) && is32Bit(off1+off2) -> (MOVSSstore [off1+off2] {sym} ptr val mem)
   652  (MOVSDstore [off1] {sym} (ADDLconst [off2] ptr) val mem) && is32Bit(off1+off2) -> (MOVSDstore [off1+off2] {sym} ptr val mem)
   653  
   654  // Fold constants into stores.
   655  (MOVLstore [off] {sym} ptr (MOVLconst [c]) mem) && validOff(off) ->
   656  	(MOVLstoreconst [makeValAndOff(int64(int32(c)),off)] {sym} ptr mem)
   657  (MOVWstore [off] {sym} ptr (MOVLconst [c]) mem) && validOff(off) ->
   658  	(MOVWstoreconst [makeValAndOff(int64(int16(c)),off)] {sym} ptr mem)
   659  (MOVBstore [off] {sym} ptr (MOVLconst [c]) mem) && validOff(off) ->
   660  	(MOVBstoreconst [makeValAndOff(int64(int8(c)),off)] {sym} ptr mem)
   661  
   662  // Fold address offsets into constant stores.
   663  (MOVLstoreconst [sc] {s} (ADDLconst [off] ptr) mem) && ValAndOff(sc).canAdd(off) ->
   664  	(MOVLstoreconst [ValAndOff(sc).add(off)] {s} ptr mem)
   665  (MOVWstoreconst [sc] {s} (ADDLconst [off] ptr) mem) && ValAndOff(sc).canAdd(off) ->
   666  	(MOVWstoreconst [ValAndOff(sc).add(off)] {s} ptr mem)
   667  (MOVBstoreconst [sc] {s} (ADDLconst [off] ptr) mem) && ValAndOff(sc).canAdd(off) ->
   668  	(MOVBstoreconst [ValAndOff(sc).add(off)] {s} ptr mem)
   669  
   670  // We need to fold LEAQ into the MOVx ops so that the live variable analysis knows
   671  // what variables are being read/written by the ops.
   672  // Note: we turn off this merging for operations on globals when building
   673  // position-independent code (when Flag_shared is set).
   674  // PIC needs a spare register to load the PC into.  Having the LEAL be
   675  // a separate instruction gives us that register.  Having the LEAL be
   676  // a separate instruction also allows it to be CSEd (which is good because
   677  // it compiles to a thunk call).
   678  (MOVLload  [off1] {sym1} (LEAL [off2] {sym2} base) mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2)
   679    && (base.Op != OpSB || !config.ctxt.Flag_shared) ->
   680  	(MOVLload  [off1+off2] {mergeSym(sym1,sym2)} base mem)
   681  (MOVWload  [off1] {sym1} (LEAL [off2] {sym2} base) mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2)
   682    && (base.Op != OpSB || !config.ctxt.Flag_shared) ->
   683  	(MOVWload  [off1+off2] {mergeSym(sym1,sym2)} base mem)
   684  (MOVBload  [off1] {sym1} (LEAL [off2] {sym2} base) mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2)
   685    && (base.Op != OpSB || !config.ctxt.Flag_shared) ->
   686  	(MOVBload  [off1+off2] {mergeSym(sym1,sym2)} base mem)
   687  (MOVSSload [off1] {sym1} (LEAL [off2] {sym2} base) mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2)
   688    && (base.Op != OpSB || !config.ctxt.Flag_shared) ->
   689  	(MOVSSload [off1+off2] {mergeSym(sym1,sym2)} base mem)
   690  (MOVSDload [off1] {sym1} (LEAL [off2] {sym2} base) mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2)
   691    && (base.Op != OpSB || !config.ctxt.Flag_shared) ->
   692  	(MOVSDload [off1+off2] {mergeSym(sym1,sym2)} base mem)
   693  
   694  (MOVBLSXload [off1] {sym1} (LEAL [off2] {sym2} base) mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2)
   695    && (base.Op != OpSB || !config.ctxt.Flag_shared) ->
   696  	(MOVBLSXload [off1+off2] {mergeSym(sym1,sym2)} base mem)
   697  (MOVWLSXload [off1] {sym1} (LEAL [off2] {sym2} base) mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2)
   698    && (base.Op != OpSB || !config.ctxt.Flag_shared) ->
   699  	(MOVWLSXload [off1+off2] {mergeSym(sym1,sym2)} base mem)
   700  
   701  (MOVLstore  [off1] {sym1} (LEAL [off2] {sym2} base) val mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2)
   702    && (base.Op != OpSB || !config.ctxt.Flag_shared) ->
   703  	(MOVLstore  [off1+off2] {mergeSym(sym1,sym2)} base val mem)
   704  (MOVWstore  [off1] {sym1} (LEAL [off2] {sym2} base) val mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2)
   705    && (base.Op != OpSB || !config.ctxt.Flag_shared) ->
   706  	(MOVWstore  [off1+off2] {mergeSym(sym1,sym2)} base val mem)
   707  (MOVBstore  [off1] {sym1} (LEAL [off2] {sym2} base) val mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2)
   708    && (base.Op != OpSB || !config.ctxt.Flag_shared) ->
   709  	(MOVBstore  [off1+off2] {mergeSym(sym1,sym2)} base val mem)
   710  (MOVSSstore [off1] {sym1} (LEAL [off2] {sym2} base) val mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2)
   711    && (base.Op != OpSB || !config.ctxt.Flag_shared) ->
   712  	(MOVSSstore [off1+off2] {mergeSym(sym1,sym2)} base val mem)
   713  (MOVSDstore [off1] {sym1} (LEAL [off2] {sym2} base) val mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2)
   714    && (base.Op != OpSB || !config.ctxt.Flag_shared) ->
   715  	(MOVSDstore [off1+off2] {mergeSym(sym1,sym2)} base val mem)
   716  
   717  (MOVLstoreconst [sc] {sym1} (LEAL [off] {sym2} ptr) mem) && canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off)
   718    && (ptr.Op != OpSB || !config.ctxt.Flag_shared) ->
   719  	(MOVLstoreconst [ValAndOff(sc).add(off)] {mergeSym(sym1, sym2)} ptr mem)
   720  (MOVWstoreconst [sc] {sym1} (LEAL [off] {sym2} ptr) mem) && canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off)
   721    && (ptr.Op != OpSB || !config.ctxt.Flag_shared) ->
   722  	(MOVWstoreconst [ValAndOff(sc).add(off)] {mergeSym(sym1, sym2)} ptr mem)
   723  (MOVBstoreconst [sc] {sym1} (LEAL [off] {sym2} ptr) mem) && canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off)
   724    && (ptr.Op != OpSB || !config.ctxt.Flag_shared) ->
   725  	(MOVBstoreconst [ValAndOff(sc).add(off)] {mergeSym(sym1, sym2)} ptr mem)
   726  
   727  // generating indexed loads and stores
   728  (MOVBload [off1] {sym1} (LEAL1 [off2] {sym2} ptr idx) mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) ->
   729  	(MOVBloadidx1 [off1+off2] {mergeSym(sym1,sym2)} ptr idx mem)
   730  (MOVWload [off1] {sym1} (LEAL1 [off2] {sym2} ptr idx) mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) ->
   731  	(MOVWloadidx1 [off1+off2] {mergeSym(sym1,sym2)} ptr idx mem)
   732  (MOVWload [off1] {sym1} (LEAL2 [off2] {sym2} ptr idx) mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) ->
   733  	(MOVWloadidx2 [off1+off2] {mergeSym(sym1,sym2)} ptr idx mem)
   734  (MOVLload [off1] {sym1} (LEAL1 [off2] {sym2} ptr idx) mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) ->
   735  	(MOVLloadidx1 [off1+off2] {mergeSym(sym1,sym2)} ptr idx mem)
   736  (MOVLload [off1] {sym1} (LEAL4 [off2] {sym2} ptr idx) mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) ->
   737  	(MOVLloadidx4 [off1+off2] {mergeSym(sym1,sym2)} ptr idx mem)
   738  (MOVSSload [off1] {sym1} (LEAL1 [off2] {sym2} ptr idx) mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) ->
   739  	(MOVSSloadidx1 [off1+off2] {mergeSym(sym1,sym2)} ptr idx mem)
   740  (MOVSSload [off1] {sym1} (LEAL4 [off2] {sym2} ptr idx) mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) ->
   741  	(MOVSSloadidx4 [off1+off2] {mergeSym(sym1,sym2)} ptr idx mem)
   742  (MOVSDload [off1] {sym1} (LEAL1 [off2] {sym2} ptr idx) mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) ->
   743  	(MOVSDloadidx1 [off1+off2] {mergeSym(sym1,sym2)} ptr idx mem)
   744  (MOVSDload [off1] {sym1} (LEAL8 [off2] {sym2} ptr idx) mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) ->
   745  	(MOVSDloadidx8 [off1+off2] {mergeSym(sym1,sym2)} ptr idx mem)
   746  
   747  (MOVBstore [off1] {sym1} (LEAL1 [off2] {sym2} ptr idx) val mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) ->
   748  	(MOVBstoreidx1 [off1+off2] {mergeSym(sym1,sym2)} ptr idx val mem)
   749  (MOVWstore [off1] {sym1} (LEAL1 [off2] {sym2} ptr idx) val mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) ->
   750  	(MOVWstoreidx1 [off1+off2] {mergeSym(sym1,sym2)} ptr idx val mem)
   751  (MOVWstore [off1] {sym1} (LEAL2 [off2] {sym2} ptr idx) val mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) ->
   752  	(MOVWstoreidx2 [off1+off2] {mergeSym(sym1,sym2)} ptr idx val mem)
   753  (MOVLstore [off1] {sym1} (LEAL1 [off2] {sym2} ptr idx) val mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) ->
   754  	(MOVLstoreidx1 [off1+off2] {mergeSym(sym1,sym2)} ptr idx val mem)
   755  (MOVLstore [off1] {sym1} (LEAL4 [off2] {sym2} ptr idx) val mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) ->
   756  	(MOVLstoreidx4 [off1+off2] {mergeSym(sym1,sym2)} ptr idx val mem)
   757  (MOVSSstore [off1] {sym1} (LEAL1 [off2] {sym2} ptr idx) val mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) ->
   758  	(MOVSSstoreidx1 [off1+off2] {mergeSym(sym1,sym2)} ptr idx val mem)
   759  (MOVSSstore [off1] {sym1} (LEAL4 [off2] {sym2} ptr idx) val mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) ->
   760  	(MOVSSstoreidx4 [off1+off2] {mergeSym(sym1,sym2)} ptr idx val mem)
   761  (MOVSDstore [off1] {sym1} (LEAL1 [off2] {sym2} ptr idx) val mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) ->
   762  	(MOVSDstoreidx1 [off1+off2] {mergeSym(sym1,sym2)} ptr idx val mem)
   763  (MOVSDstore [off1] {sym1} (LEAL8 [off2] {sym2} ptr idx) val mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) ->
   764  	(MOVSDstoreidx8 [off1+off2] {mergeSym(sym1,sym2)} ptr idx val mem)
   765  
   766  (MOVBload [off] {sym} (ADDL ptr idx) mem) && ptr.Op != OpSB -> (MOVBloadidx1 [off] {sym} ptr idx mem)
   767  (MOVWload [off] {sym} (ADDL ptr idx) mem) && ptr.Op != OpSB -> (MOVWloadidx1 [off] {sym} ptr idx mem)
   768  (MOVLload [off] {sym} (ADDL ptr idx) mem) && ptr.Op != OpSB -> (MOVLloadidx1 [off] {sym} ptr idx mem)
   769  (MOVSSload [off] {sym} (ADDL ptr idx) mem) && ptr.Op != OpSB -> (MOVSSloadidx1 [off] {sym} ptr idx mem)
   770  (MOVSDload [off] {sym} (ADDL ptr idx) mem) && ptr.Op != OpSB -> (MOVSDloadidx1 [off] {sym} ptr idx mem)
   771  (MOVBstore [off] {sym} (ADDL ptr idx) val mem) && ptr.Op != OpSB -> (MOVBstoreidx1 [off] {sym} ptr idx val mem)
   772  (MOVWstore [off] {sym} (ADDL ptr idx) val mem) && ptr.Op != OpSB -> (MOVWstoreidx1 [off] {sym} ptr idx val mem)
   773  (MOVLstore [off] {sym} (ADDL ptr idx) val mem) && ptr.Op != OpSB -> (MOVLstoreidx1 [off] {sym} ptr idx val mem)
   774  (MOVSSstore [off] {sym} (ADDL ptr idx) val mem) && ptr.Op != OpSB -> (MOVSSstoreidx1 [off] {sym} ptr idx val mem)
   775  (MOVSDstore [off] {sym} (ADDL ptr idx) val mem) && ptr.Op != OpSB -> (MOVSDstoreidx1 [off] {sym} ptr idx val mem)
   776  
   777  (MOVBstoreconst [x] {sym1} (LEAL1 [off] {sym2} ptr idx) mem) && canMergeSym(sym1, sym2) ->
   778  	(MOVBstoreconstidx1 [ValAndOff(x).add(off)] {mergeSym(sym1,sym2)} ptr idx mem)
   779  (MOVWstoreconst [x] {sym1} (LEAL1 [off] {sym2} ptr idx) mem) && canMergeSym(sym1, sym2) ->
   780  	(MOVWstoreconstidx1 [ValAndOff(x).add(off)] {mergeSym(sym1,sym2)} ptr idx mem)
   781  (MOVWstoreconst [x] {sym1} (LEAL2 [off] {sym2} ptr idx) mem) && canMergeSym(sym1, sym2) ->
   782  	(MOVWstoreconstidx2 [ValAndOff(x).add(off)] {mergeSym(sym1,sym2)} ptr idx mem)
   783  (MOVLstoreconst [x] {sym1} (LEAL1 [off] {sym2} ptr idx) mem) && canMergeSym(sym1, sym2) ->
   784  	(MOVLstoreconstidx1 [ValAndOff(x).add(off)] {mergeSym(sym1,sym2)} ptr idx mem)
   785  (MOVLstoreconst [x] {sym1} (LEAL4 [off] {sym2} ptr idx) mem) && canMergeSym(sym1, sym2) ->
   786  	(MOVLstoreconstidx4 [ValAndOff(x).add(off)] {mergeSym(sym1,sym2)} ptr idx mem)
   787  
   788  (MOVBstoreconst [x] {sym} (ADDL ptr idx) mem) -> (MOVBstoreconstidx1 [x] {sym} ptr idx mem)
   789  (MOVWstoreconst [x] {sym} (ADDL ptr idx) mem) -> (MOVWstoreconstidx1 [x] {sym} ptr idx mem)
   790  (MOVLstoreconst [x] {sym} (ADDL ptr idx) mem) -> (MOVLstoreconstidx1 [x] {sym} ptr idx mem)
   791  
   792  // combine SHLL into indexed loads and stores
   793  (MOVWloadidx1 [c] {sym} ptr (SHLLconst [1] idx) mem) -> (MOVWloadidx2 [c] {sym} ptr idx mem)
   794  (MOVLloadidx1 [c] {sym} ptr (SHLLconst [2] idx) mem) -> (MOVLloadidx4 [c] {sym} ptr idx mem)
   795  (MOVWstoreidx1 [c] {sym} ptr (SHLLconst [1] idx) val mem) -> (MOVWstoreidx2 [c] {sym} ptr idx val mem)
   796  (MOVLstoreidx1 [c] {sym} ptr (SHLLconst [2] idx) val mem) -> (MOVLstoreidx4 [c] {sym} ptr idx val mem)
   797  (MOVWstoreconstidx1 [c] {sym} ptr (SHLLconst [1] idx) mem) -> (MOVWstoreconstidx2 [c] {sym} ptr idx mem)
   798  (MOVLstoreconstidx1 [c] {sym} ptr (SHLLconst [2] idx) mem) -> (MOVLstoreconstidx4 [c] {sym} ptr idx mem)
   799  
   800  // combine ADDL into indexed loads and stores
   801  (MOVBloadidx1 [c] {sym} (ADDLconst [d] ptr) idx mem) -> (MOVBloadidx1 [int64(int32(c+d))] {sym} ptr idx mem)
   802  (MOVWloadidx1 [c] {sym} (ADDLconst [d] ptr) idx mem) -> (MOVWloadidx1 [int64(int32(c+d))] {sym} ptr idx mem)
   803  (MOVWloadidx2 [c] {sym} (ADDLconst [d] ptr) idx mem) -> (MOVWloadidx2 [int64(int32(c+d))] {sym} ptr idx mem)
   804  (MOVLloadidx1 [c] {sym} (ADDLconst [d] ptr) idx mem) -> (MOVLloadidx1 [int64(int32(c+d))] {sym} ptr idx mem)
   805  (MOVLloadidx4 [c] {sym} (ADDLconst [d] ptr) idx mem) -> (MOVLloadidx4 [int64(int32(c+d))] {sym} ptr idx mem)
   806  (MOVSSloadidx1 [c] {sym} (ADDLconst [d] ptr) idx mem) -> (MOVSSloadidx1 [int64(int32(c+d))] {sym} ptr idx mem)
   807  (MOVSSloadidx4 [c] {sym} (ADDLconst [d] ptr) idx mem) -> (MOVSSloadidx4 [int64(int32(c+d))] {sym} ptr idx mem)
   808  (MOVSDloadidx1 [c] {sym} (ADDLconst [d] ptr) idx mem) -> (MOVSDloadidx1 [int64(int32(c+d))] {sym} ptr idx mem)
   809  (MOVSDloadidx8 [c] {sym} (ADDLconst [d] ptr) idx mem) -> (MOVSDloadidx8 [int64(int32(c+d))] {sym} ptr idx mem)
   810  
   811  (MOVBstoreidx1 [c] {sym} (ADDLconst [d] ptr) idx val mem)  -> (MOVBstoreidx1 [int64(int32(c+d))] {sym} ptr idx val mem)
   812  (MOVWstoreidx1 [c] {sym} (ADDLconst [d] ptr) idx val mem)  -> (MOVWstoreidx1 [int64(int32(c+d))] {sym} ptr idx val mem)
   813  (MOVWstoreidx2 [c] {sym} (ADDLconst [d] ptr) idx val mem)  -> (MOVWstoreidx2 [int64(int32(c+d))] {sym} ptr idx val mem)
   814  (MOVLstoreidx1 [c] {sym} (ADDLconst [d] ptr) idx val mem)  -> (MOVLstoreidx1 [int64(int32(c+d))] {sym} ptr idx val mem)
   815  (MOVLstoreidx4 [c] {sym} (ADDLconst [d] ptr) idx val mem)  -> (MOVLstoreidx4 [int64(int32(c+d))] {sym} ptr idx val mem)
   816  (MOVSSstoreidx1 [c] {sym} (ADDLconst [d] ptr) idx val mem)  -> (MOVSSstoreidx1 [int64(int32(c+d))] {sym} ptr idx val mem)
   817  (MOVSSstoreidx4 [c] {sym} (ADDLconst [d] ptr) idx val mem)  -> (MOVSSstoreidx4 [int64(int32(c+d))] {sym} ptr idx val mem)
   818  (MOVSDstoreidx1 [c] {sym} (ADDLconst [d] ptr) idx val mem)  -> (MOVSDstoreidx1 [int64(int32(c+d))] {sym} ptr idx val mem)
   819  (MOVSDstoreidx8 [c] {sym} (ADDLconst [d] ptr) idx val mem)  -> (MOVSDstoreidx8 [int64(int32(c+d))] {sym} ptr idx val mem)
   820  
   821  (MOVBloadidx1 [c] {sym} ptr (ADDLconst [d] idx) mem) -> (MOVBloadidx1  [int64(int32(c+d))]   {sym} ptr idx mem)
   822  (MOVWloadidx1 [c] {sym} ptr (ADDLconst [d] idx) mem) -> (MOVWloadidx1  [int64(int32(c+d))]   {sym} ptr idx mem)
   823  (MOVWloadidx2 [c] {sym} ptr (ADDLconst [d] idx) mem) -> (MOVWloadidx2  [int64(int32(c+2*d))] {sym} ptr idx mem)
   824  (MOVLloadidx1 [c] {sym} ptr (ADDLconst [d] idx) mem) -> (MOVLloadidx1  [int64(int32(c+d))]   {sym} ptr idx mem)
   825  (MOVLloadidx4 [c] {sym} ptr (ADDLconst [d] idx) mem) -> (MOVLloadidx4  [int64(int32(c+4*d))] {sym} ptr idx mem)
   826  (MOVSSloadidx1 [c] {sym} ptr (ADDLconst [d] idx) mem) -> (MOVSSloadidx1 [int64(int32(c+d))]   {sym} ptr idx mem)
   827  (MOVSSloadidx4 [c] {sym} ptr (ADDLconst [d] idx) mem) -> (MOVSSloadidx4 [int64(int32(c+4*d))] {sym} ptr idx mem)
   828  (MOVSDloadidx1 [c] {sym} ptr (ADDLconst [d] idx) mem) -> (MOVSDloadidx1 [int64(int32(c+d))]   {sym} ptr idx mem)
   829  (MOVSDloadidx8 [c] {sym} ptr (ADDLconst [d] idx) mem) -> (MOVSDloadidx8 [int64(int32(c+8*d))] {sym} ptr idx mem)
   830  
   831  (MOVBstoreidx1 [c] {sym} ptr (ADDLconst [d] idx) val mem) -> (MOVBstoreidx1  [int64(int32(c+d))]   {sym} ptr idx val mem)
   832  (MOVWstoreidx1 [c] {sym} ptr (ADDLconst [d] idx) val mem) -> (MOVWstoreidx1  [int64(int32(c+d))]   {sym} ptr idx val mem)
   833  (MOVWstoreidx2 [c] {sym} ptr (ADDLconst [d] idx) val mem) -> (MOVWstoreidx2  [int64(int32(c+2*d))] {sym} ptr idx val mem)
   834  (MOVLstoreidx1 [c] {sym} ptr (ADDLconst [d] idx) val mem) -> (MOVLstoreidx1  [int64(int32(c+d))]   {sym} ptr idx val mem)
   835  (MOVLstoreidx4 [c] {sym} ptr (ADDLconst [d] idx) val mem)  -> (MOVLstoreidx4  [int64(int32(c+4*d))] {sym} ptr idx val mem)
   836  (MOVSSstoreidx1 [c] {sym} ptr (ADDLconst [d] idx) val mem) -> (MOVSSstoreidx1 [int64(int32(c+d))]   {sym} ptr idx val mem)
   837  (MOVSSstoreidx4 [c] {sym} ptr (ADDLconst [d] idx) val mem) -> (MOVSSstoreidx4 [int64(int32(c+4*d))] {sym} ptr idx val mem)
   838  (MOVSDstoreidx1 [c] {sym} ptr (ADDLconst [d] idx) val mem) -> (MOVSDstoreidx1 [int64(int32(c+d))]   {sym} ptr idx val mem)
   839  (MOVSDstoreidx8 [c] {sym} ptr (ADDLconst [d] idx) val mem) -> (MOVSDstoreidx8 [int64(int32(c+8*d))] {sym} ptr idx val mem)
   840  
   841  (MOVBstoreconstidx1 [x] {sym} (ADDLconst [c] ptr) idx mem) ->
   842  	(MOVBstoreconstidx1 [ValAndOff(x).add(c)] {sym} ptr idx mem)
   843  (MOVWstoreconstidx1 [x] {sym} (ADDLconst [c] ptr) idx mem) ->
   844  	(MOVWstoreconstidx1 [ValAndOff(x).add(c)] {sym} ptr idx mem)
   845  (MOVWstoreconstidx2 [x] {sym} (ADDLconst [c] ptr) idx mem) ->
   846  	(MOVWstoreconstidx2 [ValAndOff(x).add(c)] {sym} ptr idx mem)
   847  (MOVLstoreconstidx1 [x] {sym} (ADDLconst [c] ptr) idx mem) ->
   848  	(MOVLstoreconstidx1 [ValAndOff(x).add(c)] {sym} ptr idx mem)
   849  (MOVLstoreconstidx4 [x] {sym} (ADDLconst [c] ptr) idx mem) ->
   850  	(MOVLstoreconstidx4 [ValAndOff(x).add(c)] {sym} ptr idx mem)
   851  
   852  (MOVBstoreconstidx1 [x] {sym} ptr (ADDLconst [c] idx) mem) ->
   853  	(MOVBstoreconstidx1 [ValAndOff(x).add(c)] {sym} ptr idx mem)
   854  (MOVWstoreconstidx1 [x] {sym} ptr (ADDLconst [c] idx) mem) ->
   855  	(MOVWstoreconstidx1 [ValAndOff(x).add(c)] {sym} ptr idx mem)
   856  (MOVWstoreconstidx2 [x] {sym} ptr (ADDLconst [c] idx) mem) ->
   857  	(MOVWstoreconstidx2 [ValAndOff(x).add(2*c)] {sym} ptr idx mem)
   858  (MOVLstoreconstidx1 [x] {sym} ptr (ADDLconst [c] idx) mem) ->
   859  	(MOVLstoreconstidx1 [ValAndOff(x).add(c)] {sym} ptr idx mem)
   860  (MOVLstoreconstidx4 [x] {sym} ptr (ADDLconst [c] idx) mem) ->
   861  	(MOVLstoreconstidx4 [ValAndOff(x).add(4*c)] {sym} ptr idx mem)
   862  
   863  // fold LEALs together
   864  (LEAL [off1] {sym1} (LEAL [off2] {sym2} x)) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) ->
   865        (LEAL [off1+off2] {mergeSym(sym1,sym2)} x)
   866  
   867  // LEAL into LEAL1
   868  (LEAL1 [off1] {sym1} (LEAL [off2] {sym2} x) y) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) && x.Op != OpSB ->
   869         (LEAL1 [off1+off2] {mergeSym(sym1,sym2)} x y)
   870  
   871  // LEAL1 into LEAL
   872  (LEAL [off1] {sym1} (LEAL1 [off2] {sym2} x y)) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) ->
   873         (LEAL1 [off1+off2] {mergeSym(sym1,sym2)} x y)
   874  
   875  // LEAL into LEAL[248]
   876  (LEAL2 [off1] {sym1} (LEAL [off2] {sym2} x) y) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) && x.Op != OpSB ->
   877         (LEAL2 [off1+off2] {mergeSym(sym1,sym2)} x y)
   878  (LEAL4 [off1] {sym1} (LEAL [off2] {sym2} x) y) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) && x.Op != OpSB ->
   879         (LEAL4 [off1+off2] {mergeSym(sym1,sym2)} x y)
   880  (LEAL8 [off1] {sym1} (LEAL [off2] {sym2} x) y) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) && x.Op != OpSB ->
   881         (LEAL8 [off1+off2] {mergeSym(sym1,sym2)} x y)
   882  
   883  // LEAL[248] into LEAL
   884  (LEAL [off1] {sym1} (LEAL2 [off2] {sym2} x y)) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) ->
   885        (LEAL2 [off1+off2] {mergeSym(sym1,sym2)} x y)
   886  (LEAL [off1] {sym1} (LEAL4 [off2] {sym2} x y)) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) ->
   887        (LEAL4 [off1+off2] {mergeSym(sym1,sym2)} x y)
   888  (LEAL [off1] {sym1} (LEAL8 [off2] {sym2} x y)) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) ->
   889        (LEAL8 [off1+off2] {mergeSym(sym1,sym2)} x y)
   890  
   891  // Absorb InvertFlags into branches.
   892  (LT (InvertFlags cmp) yes no) -> (GT cmp yes no)
   893  (GT (InvertFlags cmp) yes no) -> (LT cmp yes no)
   894  (LE (InvertFlags cmp) yes no) -> (GE cmp yes no)
   895  (GE (InvertFlags cmp) yes no) -> (LE cmp yes no)
   896  (ULT (InvertFlags cmp) yes no) -> (UGT cmp yes no)
   897  (UGT (InvertFlags cmp) yes no) -> (ULT cmp yes no)
   898  (ULE (InvertFlags cmp) yes no) -> (UGE cmp yes no)
   899  (UGE (InvertFlags cmp) yes no) -> (ULE cmp yes no)
   900  (EQ (InvertFlags cmp) yes no) -> (EQ cmp yes no)
   901  (NE (InvertFlags cmp) yes no) -> (NE cmp yes no)
   902  
   903  // Constant comparisons.
   904  (CMPLconst (MOVLconst [x]) [y]) && int32(x)==int32(y) -> (FlagEQ)
   905  (CMPLconst (MOVLconst [x]) [y]) && int32(x)<int32(y) && uint32(x)<uint32(y) -> (FlagLT_ULT)
   906  (CMPLconst (MOVLconst [x]) [y]) && int32(x)<int32(y) && uint32(x)>uint32(y) -> (FlagLT_UGT)
   907  (CMPLconst (MOVLconst [x]) [y]) && int32(x)>int32(y) && uint32(x)<uint32(y) -> (FlagGT_ULT)
   908  (CMPLconst (MOVLconst [x]) [y]) && int32(x)>int32(y) && uint32(x)>uint32(y) -> (FlagGT_UGT)
   909  (CMPWconst (MOVLconst [x]) [y]) && int16(x)==int16(y) -> (FlagEQ)
   910  (CMPWconst (MOVLconst [x]) [y]) && int16(x)<int16(y) && uint16(x)<uint16(y) -> (FlagLT_ULT)
   911  (CMPWconst (MOVLconst [x]) [y]) && int16(x)<int16(y) && uint16(x)>uint16(y) -> (FlagLT_UGT)
   912  (CMPWconst (MOVLconst [x]) [y]) && int16(x)>int16(y) && uint16(x)<uint16(y) -> (FlagGT_ULT)
   913  (CMPWconst (MOVLconst [x]) [y]) && int16(x)>int16(y) && uint16(x)>uint16(y) -> (FlagGT_UGT)
   914  (CMPBconst (MOVLconst [x]) [y]) && int8(x)==int8(y) -> (FlagEQ)
   915  (CMPBconst (MOVLconst [x]) [y]) && int8(x)<int8(y) && uint8(x)<uint8(y) -> (FlagLT_ULT)
   916  (CMPBconst (MOVLconst [x]) [y]) && int8(x)<int8(y) && uint8(x)>uint8(y) -> (FlagLT_UGT)
   917  (CMPBconst (MOVLconst [x]) [y]) && int8(x)>int8(y) && uint8(x)<uint8(y) -> (FlagGT_ULT)
   918  (CMPBconst (MOVLconst [x]) [y]) && int8(x)>int8(y) && uint8(x)>uint8(y) -> (FlagGT_UGT)
   919  
   920  // Other known comparisons.
   921  (CMPLconst (SHRLconst _ [c]) [n]) && 0 <= n && 0 < c && c <= 32 && (1<<uint64(32-c)) <= uint64(n) -> (FlagLT_ULT)
   922  (CMPLconst (ANDLconst _ [m]) [n]) && 0 <= int32(m) && int32(m) < int32(n) -> (FlagLT_ULT)
   923  (CMPWconst (ANDLconst _ [m]) [n]) && 0 <= int16(m) && int16(m) < int16(n) -> (FlagLT_ULT)
   924  (CMPBconst (ANDLconst _ [m]) [n]) && 0 <= int8(m) && int8(m) < int8(n) -> (FlagLT_ULT)
   925  // TODO: DIVxU also.
   926  
   927  // Absorb flag constants into SBB ops.
   928  (SBBLcarrymask (FlagEQ)) -> (MOVLconst [0])
   929  (SBBLcarrymask (FlagLT_ULT)) -> (MOVLconst [-1])
   930  (SBBLcarrymask (FlagLT_UGT)) -> (MOVLconst [0])
   931  (SBBLcarrymask (FlagGT_ULT)) -> (MOVLconst [-1])
   932  (SBBLcarrymask (FlagGT_UGT)) -> (MOVLconst [0])
   933  
   934  // Absorb flag constants into branches.
   935  (EQ (FlagEQ) yes no) -> (First nil yes no)
   936  (EQ (FlagLT_ULT) yes no) -> (First nil no yes)
   937  (EQ (FlagLT_UGT) yes no) -> (First nil no yes)
   938  (EQ (FlagGT_ULT) yes no) -> (First nil no yes)
   939  (EQ (FlagGT_UGT) yes no) -> (First nil no yes)
   940  
   941  (NE (FlagEQ) yes no) -> (First nil no yes)
   942  (NE (FlagLT_ULT) yes no) -> (First nil yes no)
   943  (NE (FlagLT_UGT) yes no) -> (First nil yes no)
   944  (NE (FlagGT_ULT) yes no) -> (First nil yes no)
   945  (NE (FlagGT_UGT) yes no) -> (First nil yes no)
   946  
   947  (LT (FlagEQ) yes no) -> (First nil no yes)
   948  (LT (FlagLT_ULT) yes no) -> (First nil yes no)
   949  (LT (FlagLT_UGT) yes no) -> (First nil yes no)
   950  (LT (FlagGT_ULT) yes no) -> (First nil no yes)
   951  (LT (FlagGT_UGT) yes no) -> (First nil no yes)
   952  
   953  (LE (FlagEQ) yes no) -> (First nil yes no)
   954  (LE (FlagLT_ULT) yes no) -> (First nil yes no)
   955  (LE (FlagLT_UGT) yes no) -> (First nil yes no)
   956  (LE (FlagGT_ULT) yes no) -> (First nil no yes)
   957  (LE (FlagGT_UGT) yes no) -> (First nil no yes)
   958  
   959  (GT (FlagEQ) yes no) -> (First nil no yes)
   960  (GT (FlagLT_ULT) yes no) -> (First nil no yes)
   961  (GT (FlagLT_UGT) yes no) -> (First nil no yes)
   962  (GT (FlagGT_ULT) yes no) -> (First nil yes no)
   963  (GT (FlagGT_UGT) yes no) -> (First nil yes no)
   964  
   965  (GE (FlagEQ) yes no) -> (First nil yes no)
   966  (GE (FlagLT_ULT) yes no) -> (First nil no yes)
   967  (GE (FlagLT_UGT) yes no) -> (First nil no yes)
   968  (GE (FlagGT_ULT) yes no) -> (First nil yes no)
   969  (GE (FlagGT_UGT) yes no) -> (First nil yes no)
   970  
   971  (ULT (FlagEQ) yes no) -> (First nil no yes)
   972  (ULT (FlagLT_ULT) yes no) -> (First nil yes no)
   973  (ULT (FlagLT_UGT) yes no) -> (First nil no yes)
   974  (ULT (FlagGT_ULT) yes no) -> (First nil yes no)
   975  (ULT (FlagGT_UGT) yes no) -> (First nil no yes)
   976  
   977  (ULE (FlagEQ) yes no) -> (First nil yes no)
   978  (ULE (FlagLT_ULT) yes no) -> (First nil yes no)
   979  (ULE (FlagLT_UGT) yes no) -> (First nil no yes)
   980  (ULE (FlagGT_ULT) yes no) -> (First nil yes no)
   981  (ULE (FlagGT_UGT) yes no) -> (First nil no yes)
   982  
   983  (UGT (FlagEQ) yes no) -> (First nil no yes)
   984  (UGT (FlagLT_ULT) yes no) -> (First nil no yes)
   985  (UGT (FlagLT_UGT) yes no) -> (First nil yes no)
   986  (UGT (FlagGT_ULT) yes no) -> (First nil no yes)
   987  (UGT (FlagGT_UGT) yes no) -> (First nil yes no)
   988  
   989  (UGE (FlagEQ) yes no) -> (First nil yes no)
   990  (UGE (FlagLT_ULT) yes no) -> (First nil no yes)
   991  (UGE (FlagLT_UGT) yes no) -> (First nil yes no)
   992  (UGE (FlagGT_ULT) yes no) -> (First nil no yes)
   993  (UGE (FlagGT_UGT) yes no) -> (First nil yes no)
   994  
   995  // Absorb flag constants into SETxx ops.
   996  (SETEQ (FlagEQ)) -> (MOVLconst [1])
   997  (SETEQ (FlagLT_ULT)) -> (MOVLconst [0])
   998  (SETEQ (FlagLT_UGT)) -> (MOVLconst [0])
   999  (SETEQ (FlagGT_ULT)) -> (MOVLconst [0])
  1000  (SETEQ (FlagGT_UGT)) -> (MOVLconst [0])
  1001  
  1002  (SETNE (FlagEQ)) -> (MOVLconst [0])
  1003  (SETNE (FlagLT_ULT)) -> (MOVLconst [1])
  1004  (SETNE (FlagLT_UGT)) -> (MOVLconst [1])
  1005  (SETNE (FlagGT_ULT)) -> (MOVLconst [1])
  1006  (SETNE (FlagGT_UGT)) -> (MOVLconst [1])
  1007  
  1008  (SETL (FlagEQ)) -> (MOVLconst [0])
  1009  (SETL (FlagLT_ULT)) -> (MOVLconst [1])
  1010  (SETL (FlagLT_UGT)) -> (MOVLconst [1])
  1011  (SETL (FlagGT_ULT)) -> (MOVLconst [0])
  1012  (SETL (FlagGT_UGT)) -> (MOVLconst [0])
  1013  
  1014  (SETLE (FlagEQ)) -> (MOVLconst [1])
  1015  (SETLE (FlagLT_ULT)) -> (MOVLconst [1])
  1016  (SETLE (FlagLT_UGT)) -> (MOVLconst [1])
  1017  (SETLE (FlagGT_ULT)) -> (MOVLconst [0])
  1018  (SETLE (FlagGT_UGT)) -> (MOVLconst [0])
  1019  
  1020  (SETG (FlagEQ)) -> (MOVLconst [0])
  1021  (SETG (FlagLT_ULT)) -> (MOVLconst [0])
  1022  (SETG (FlagLT_UGT)) -> (MOVLconst [0])
  1023  (SETG (FlagGT_ULT)) -> (MOVLconst [1])
  1024  (SETG (FlagGT_UGT)) -> (MOVLconst [1])
  1025  
  1026  (SETGE (FlagEQ)) -> (MOVLconst [1])
  1027  (SETGE (FlagLT_ULT)) -> (MOVLconst [0])
  1028  (SETGE (FlagLT_UGT)) -> (MOVLconst [0])
  1029  (SETGE (FlagGT_ULT)) -> (MOVLconst [1])
  1030  (SETGE (FlagGT_UGT)) -> (MOVLconst [1])
  1031  
  1032  (SETB (FlagEQ)) -> (MOVLconst [0])
  1033  (SETB (FlagLT_ULT)) -> (MOVLconst [1])
  1034  (SETB (FlagLT_UGT)) -> (MOVLconst [0])
  1035  (SETB (FlagGT_ULT)) -> (MOVLconst [1])
  1036  (SETB (FlagGT_UGT)) -> (MOVLconst [0])
  1037  
  1038  (SETBE (FlagEQ)) -> (MOVLconst [1])
  1039  (SETBE (FlagLT_ULT)) -> (MOVLconst [1])
  1040  (SETBE (FlagLT_UGT)) -> (MOVLconst [0])
  1041  (SETBE (FlagGT_ULT)) -> (MOVLconst [1])
  1042  (SETBE (FlagGT_UGT)) -> (MOVLconst [0])
  1043  
  1044  (SETA (FlagEQ)) -> (MOVLconst [0])
  1045  (SETA (FlagLT_ULT)) -> (MOVLconst [0])
  1046  (SETA (FlagLT_UGT)) -> (MOVLconst [1])
  1047  (SETA (FlagGT_ULT)) -> (MOVLconst [0])
  1048  (SETA (FlagGT_UGT)) -> (MOVLconst [1])
  1049  
  1050  (SETAE (FlagEQ)) -> (MOVLconst [1])
  1051  (SETAE (FlagLT_ULT)) -> (MOVLconst [0])
  1052  (SETAE (FlagLT_UGT)) -> (MOVLconst [1])
  1053  (SETAE (FlagGT_ULT)) -> (MOVLconst [0])
  1054  (SETAE (FlagGT_UGT)) -> (MOVLconst [1])
  1055  
  1056  // Remove redundant *const ops
  1057  (ADDLconst [c] x) && int32(c)==0 -> x
  1058  (SUBLconst [c] x) && int32(c) == 0 -> x
  1059  (ANDLconst [c] _) && int32(c)==0  -> (MOVLconst [0])
  1060  (ANDLconst [c] x) && int32(c)==-1 -> x
  1061  (ORLconst [c] x) && int32(c)==0   -> x
  1062  (ORLconst [c] _) && int32(c)==-1  -> (MOVLconst [-1])
  1063  (XORLconst [c] x) && int32(c)==0   -> x
  1064  // TODO: since we got rid of the W/B versions, we might miss
  1065  // things like (ANDLconst [0x100] x) which were formerly
  1066  // (ANDBconst [0] x).  Probably doesn't happen very often.
  1067  // If we cared, we might do:
  1068  //  (ANDLconst <t> [c] x) && t.Size()==1 && int8(x)==0 -> (MOVLconst [0])
  1069  
  1070  // Convert constant subtracts to constant adds
  1071  (SUBLconst [c] x) -> (ADDLconst [int64(int32(-c))] x)
  1072  
  1073  // generic constant folding
  1074  // TODO: more of this
  1075  (ADDLconst [c] (MOVLconst [d])) -> (MOVLconst [int64(int32(c+d))])
  1076  (ADDLconst [c] (ADDLconst [d] x)) -> (ADDLconst [int64(int32(c+d))] x)
  1077  (SARLconst [c] (MOVLconst [d])) -> (MOVLconst [d>>uint64(c)])
  1078  (SARWconst [c] (MOVLconst [d])) -> (MOVLconst [d>>uint64(c)])
  1079  (SARBconst [c] (MOVLconst [d])) -> (MOVLconst [d>>uint64(c)])
  1080  (NEGL (MOVLconst [c])) -> (MOVLconst [int64(int32(-c))])
  1081  (MULLconst [c] (MOVLconst [d])) -> (MOVLconst [int64(int32(c*d))])
  1082  (ANDLconst [c] (MOVLconst [d])) -> (MOVLconst [c&d])
  1083  (ORLconst [c] (MOVLconst [d])) -> (MOVLconst [c|d])
  1084  (XORLconst [c] (MOVLconst [d])) -> (MOVLconst [c^d])
  1085  (NOTL (MOVLconst [c])) -> (MOVLconst [^c])
  1086  
  1087  // generic simplifications
  1088  // TODO: more of this
  1089  (ADDL x (NEGL y)) -> (SUBL x y)
  1090  (SUBL x x) -> (MOVLconst [0])
  1091  (ANDL x x) -> x
  1092  (ORL x x) -> x
  1093  (XORL x x) -> (MOVLconst [0])
  1094  
  1095  // checking AND against 0.
  1096  (CMPLconst (ANDL x y) [0]) -> (TESTL x y)
  1097  (CMPWconst (ANDL x y) [0]) -> (TESTW x y)
  1098  (CMPBconst (ANDL x y) [0]) -> (TESTB x y)
  1099  (CMPLconst (ANDLconst [c] x) [0]) -> (TESTLconst [c] x)
  1100  (CMPWconst (ANDLconst [c] x) [0]) -> (TESTWconst [int64(int16(c))] x)
  1101  (CMPBconst (ANDLconst [c] x) [0]) -> (TESTBconst [int64(int8(c))] x)
  1102  
  1103  // TEST %reg,%reg is shorter than CMP
  1104  (CMPLconst x [0]) -> (TESTL x x)
  1105  (CMPWconst x [0]) -> (TESTW x x)
  1106  (CMPBconst x [0]) -> (TESTB x x)
  1107  
  1108  // Combining byte loads into larger (unaligned) loads.
  1109  // There are many ways these combinations could occur.  This is
  1110  // designed to match the way encoding/binary.LittleEndian does it.
  1111  (ORL                  x0:(MOVBload [i0] {s} p mem)
  1112      s0:(SHLLconst [8] x1:(MOVBload [i1] {s} p mem)))
  1113    && i1 == i0+1
  1114    && x0.Uses == 1
  1115    && x1.Uses == 1
  1116    && s0.Uses == 1
  1117    && mergePoint(b,x0,x1) != nil
  1118    && clobber(x0)
  1119    && clobber(x1)
  1120    && clobber(s0)
  1121    -> @mergePoint(b,x0,x1) (MOVWload [i0] {s} p mem)
  1122  
  1123  (ORL o0:(ORL
  1124                         x0:(MOVWload [i0] {s} p mem)
  1125      s0:(SHLLconst [16] x1:(MOVBload [i2] {s} p mem)))
  1126      s1:(SHLLconst [24] x2:(MOVBload [i3] {s} p mem)))
  1127    && i2 == i0+2
  1128    && i3 == i0+3
  1129    && x0.Uses == 1
  1130    && x1.Uses == 1
  1131    && x2.Uses == 1
  1132    && s0.Uses == 1
  1133    && s1.Uses == 1
  1134    && o0.Uses == 1
  1135    && mergePoint(b,x0,x1,x2) != nil
  1136    && clobber(x0)
  1137    && clobber(x1)
  1138    && clobber(x2)
  1139    && clobber(s0)
  1140    && clobber(s1)
  1141    && clobber(o0)
  1142    -> @mergePoint(b,x0,x1,x2) (MOVLload [i0] {s} p mem)
  1143  
  1144  (ORL                  x0:(MOVBloadidx1 [i0] {s} p idx mem)
  1145      s0:(SHLLconst [8] x1:(MOVBloadidx1 [i1] {s} p idx mem)))
  1146    && i1==i0+1
  1147    && x0.Uses == 1
  1148    && x1.Uses == 1
  1149    && s0.Uses == 1
  1150    && mergePoint(b,x0,x1) != nil
  1151    && clobber(x0)
  1152    && clobber(x1)
  1153    && clobber(s0)
  1154    -> @mergePoint(b,x0,x1) (MOVWloadidx1 <v.Type> [i0] {s} p idx mem)
  1155  
  1156  (ORL o0:(ORL
  1157                         x0:(MOVWloadidx1 [i0] {s} p idx mem)
  1158      s0:(SHLLconst [16] x1:(MOVBloadidx1 [i2] {s} p idx mem)))
  1159      s1:(SHLLconst [24] x2:(MOVBloadidx1 [i3] {s} p idx mem)))
  1160    && i2 == i0+2
  1161    && i3 == i0+3
  1162    && x0.Uses == 1
  1163    && x1.Uses == 1
  1164    && x2.Uses == 1
  1165    && s0.Uses == 1
  1166    && s1.Uses == 1
  1167    && o0.Uses == 1
  1168    && mergePoint(b,x0,x1,x2) != nil
  1169    && clobber(x0)
  1170    && clobber(x1)
  1171    && clobber(x2)
  1172    && clobber(s0)
  1173    && clobber(s1)
  1174    && clobber(o0)
  1175    -> @mergePoint(b,x0,x1,x2) (MOVLloadidx1 <v.Type> [i0] {s} p idx mem)
  1176  
  1177  // Combine constant stores into larger (unaligned) stores.
  1178  (MOVBstoreconst [c] {s} p x:(MOVBstoreconst [a] {s} p mem))
  1179    && x.Uses == 1
  1180    && ValAndOff(a).Off() + 1 == ValAndOff(c).Off()
  1181    && clobber(x)
  1182    -> (MOVWstoreconst [makeValAndOff(ValAndOff(a).Val()&0xff | ValAndOff(c).Val()<<8, ValAndOff(a).Off())] {s} p mem)
  1183  (MOVWstoreconst [c] {s} p x:(MOVWstoreconst [a] {s} p mem))
  1184    && x.Uses == 1
  1185    && ValAndOff(a).Off() + 2 == ValAndOff(c).Off()
  1186    && clobber(x)
  1187    -> (MOVLstoreconst [makeValAndOff(ValAndOff(a).Val()&0xffff | ValAndOff(c).Val()<<16, ValAndOff(a).Off())] {s} p mem)
  1188  
  1189  (MOVBstoreconstidx1 [c] {s} p i x:(MOVBstoreconstidx1 [a] {s} p i mem))
  1190    && x.Uses == 1
  1191    && ValAndOff(a).Off() + 1 == ValAndOff(c).Off()
  1192    && clobber(x)
  1193    -> (MOVWstoreconstidx1 [makeValAndOff(ValAndOff(a).Val()&0xff | ValAndOff(c).Val()<<8, ValAndOff(a).Off())] {s} p i mem)
  1194  (MOVWstoreconstidx1 [c] {s} p i x:(MOVWstoreconstidx1 [a] {s} p i mem))
  1195    && x.Uses == 1
  1196    && ValAndOff(a).Off() + 2 == ValAndOff(c).Off()
  1197    && clobber(x)
  1198    -> (MOVLstoreconstidx1 [makeValAndOff(ValAndOff(a).Val()&0xffff | ValAndOff(c).Val()<<16, ValAndOff(a).Off())] {s} p i mem)
  1199  
  1200  (MOVWstoreconstidx2 [c] {s} p i x:(MOVWstoreconstidx2 [a] {s} p i mem))
  1201    && x.Uses == 1
  1202    && ValAndOff(a).Off() + 2 == ValAndOff(c).Off()
  1203    && clobber(x)
  1204    -> (MOVLstoreconstidx1 [makeValAndOff(ValAndOff(a).Val()&0xffff | ValAndOff(c).Val()<<16, ValAndOff(a).Off())] {s} p (SHLLconst <i.Type> [1] i) mem)
  1205  
  1206  // Combine stores into larger (unaligned) stores.
  1207  (MOVBstore [i] {s} p (SHRLconst [8] w) x:(MOVBstore [i-1] {s} p w mem))
  1208    && x.Uses == 1
  1209    && clobber(x)
  1210    -> (MOVWstore [i-1] {s} p w mem)
  1211  (MOVBstore [i] {s} p (SHRLconst [j] w) x:(MOVBstore [i-1] {s} p w0:(SHRLconst [j-8] w) mem))
  1212    && x.Uses == 1
  1213    && clobber(x)
  1214    -> (MOVWstore [i-1] {s} p w0 mem)
  1215  (MOVWstore [i] {s} p (SHRLconst [16] w) x:(MOVWstore [i-2] {s} p w mem))
  1216    && x.Uses == 1
  1217    && clobber(x)
  1218    -> (MOVLstore [i-2] {s} p w mem)
  1219  (MOVWstore [i] {s} p (SHRLconst [j] w) x:(MOVWstore [i-2] {s} p w0:(SHRLconst [j-16] w) mem))
  1220    && x.Uses == 1
  1221    && clobber(x)
  1222    -> (MOVLstore [i-2] {s} p w0 mem)
  1223  
  1224  (MOVBstoreidx1 [i] {s} p idx (SHRLconst [8] w) x:(MOVBstoreidx1 [i-1] {s} p idx w mem))
  1225    && x.Uses == 1
  1226    && clobber(x)
  1227    -> (MOVWstoreidx1 [i-1] {s} p idx w mem)
  1228  (MOVBstoreidx1 [i] {s} p idx (SHRLconst [j] w) x:(MOVBstoreidx1 [i-1] {s} p idx w0:(SHRLconst [j-8] w) mem))
  1229    && x.Uses == 1
  1230    && clobber(x)
  1231    -> (MOVWstoreidx1 [i-1] {s} p idx w0 mem)
  1232  (MOVWstoreidx1 [i] {s} p idx (SHRLconst [16] w) x:(MOVWstoreidx1 [i-2] {s} p idx w mem))
  1233    && x.Uses == 1
  1234    && clobber(x)
  1235    -> (MOVLstoreidx1 [i-2] {s} p idx w mem)
  1236  (MOVWstoreidx1 [i] {s} p idx (SHRLconst [j] w) x:(MOVWstoreidx1 [i-2] {s} p idx w0:(SHRLconst [j-16] w) mem))
  1237    && x.Uses == 1
  1238    && clobber(x)
  1239    -> (MOVLstoreidx1 [i-2] {s} p idx w0 mem)
  1240  
  1241  (MOVWstoreidx2 [i] {s} p idx (SHRLconst [16] w) x:(MOVWstoreidx2 [i-2] {s} p idx w mem))
  1242    && x.Uses == 1
  1243    && clobber(x)
  1244    -> (MOVLstoreidx1 [i-2] {s} p (SHLLconst <idx.Type> [1] idx) w mem)
  1245  (MOVWstoreidx2 [i] {s} p idx (SHRLconst [j] w) x:(MOVWstoreidx2 [i-2] {s} p idx w0:(SHRLconst [j-16] w) mem))
  1246    && x.Uses == 1
  1247    && clobber(x)
  1248    -> (MOVLstoreidx1 [i-2] {s} p (SHLLconst <idx.Type> [1] idx) w0 mem)
  1249  
  1250  // For PIC, break floating-point constant loading into two instructions so we have
  1251  // a register to use for holding the address of the constant pool entry.
  1252  (MOVSSconst [c]) && config.ctxt.Flag_shared -> (MOVSSconst2 (MOVSSconst1 [c]))
  1253  (MOVSDconst [c]) && config.ctxt.Flag_shared -> (MOVSDconst2 (MOVSDconst1 [c]))