github.com/rakyll/go@v0.0.0-20170216000551-64c02460d703/src/cmd/compile/internal/ssa/gen/MIPS.rules (about)

     1  // Copyright 2016 The Go Authors. All rights reserved.
     2  // Use of this source code is governed by a BSD-style
     3  // license that can be found in the LICENSE file.
     4  
     5  (AddPtr x y) -> (ADD x y)
     6  (Add32 x y) -> (ADD x y)
     7  (Add16 x y) -> (ADD x y)
     8  (Add8 x y) -> (ADD x y)
     9  (Add32F x y) -> (ADDF x y)
    10  (Add64F x y) -> (ADDD x y)
    11  
    12  (Select0 (Add32carry <t> x y)) -> (ADD <t.FieldType(0)> x y)
    13  (Select1 (Add32carry <t> x y)) -> (SGTU <config.fe.TypeBool()> x (ADD <t.FieldType(0)> x y))
    14  (Add32withcarry <t> x y c) -> (ADD c (ADD <t> x y))
    15  
    16  (SubPtr x y) -> (SUB x y)
    17  (Sub32 x y) -> (SUB x y)
    18  (Sub16 x y) -> (SUB x y)
    19  (Sub8 x y) -> (SUB x y)
    20  (Sub32F x y) -> (SUBF x y)
    21  (Sub64F x y) -> (SUBD x y)
    22  
    23  (Select0 (Sub32carry <t> x y)) -> (SUB <t.FieldType(0)> x y)
    24  (Select1 (Sub32carry <t> x y)) -> (SGTU <config.fe.TypeBool()> (SUB <t.FieldType(0)> x y) x)
    25  (Sub32withcarry <t> x y c) -> (SUB (SUB <t> x y) c)
    26  
    27  (Mul32 x y) -> (MUL x y)
    28  (Mul16 x y) -> (MUL x y)
    29  (Mul8 x y) -> (MUL x y)
    30  (Mul32F x y) -> (MULF x y)
    31  (Mul64F x y) -> (MULD x y)
    32  
    33  (Hmul32 x y) -> (Select0 (MULT x y))
    34  (Hmul32u x y) -> (Select0 (MULTU x y))
    35  (Hmul16 x y) -> (SRAconst (MUL <config.fe.TypeInt32()> (SignExt16to32 x) (SignExt16to32 y)) [16])
    36  (Hmul16u x y) -> (SRLconst (MUL <config.fe.TypeUInt32()> (ZeroExt16to32 x) (ZeroExt16to32 y)) [16])
    37  (Hmul8 x y) -> (SRAconst  (MUL <config.fe.TypeInt32()> (SignExt8to32 x) (SignExt8to32 y)) [8])
    38  (Hmul8u x y) -> (SRLconst (MUL <config.fe.TypeUInt32()> (ZeroExt8to32 x) (ZeroExt8to32 y)) [8])
    39  
    40  (Mul32uhilo x y) -> (MULTU x y)
    41  
    42  (Div32 x y) -> (Select1 (DIV x y))
    43  (Div32u x y) -> (Select1 (DIVU x y))
    44  (Div16 x y) -> (Select1 (DIV (SignExt16to32 x) (SignExt16to32 y)))
    45  (Div16u x y) -> (Select1 (DIVU (ZeroExt16to32 x) (ZeroExt16to32 y)))
    46  (Div8 x y) -> (Select1 (DIV (SignExt8to32 x) (SignExt8to32 y)))
    47  (Div8u x y) -> (Select1 (DIVU (ZeroExt8to32 x) (ZeroExt8to32 y)))
    48  (Div32F x y) -> (DIVF x y)
    49  (Div64F x y) -> (DIVD x y)
    50  
    51  (Mod32 x y) -> (Select0 (DIV x y))
    52  (Mod32u x y) -> (Select0 (DIVU x y))
    53  (Mod16 x y) -> (Select0 (DIV (SignExt16to32 x) (SignExt16to32 y)))
    54  (Mod16u x y) -> (Select0 (DIVU (ZeroExt16to32 x) (ZeroExt16to32 y)))
    55  (Mod8 x y) -> (Select0 (DIV (SignExt8to32 x) (SignExt8to32 y)))
    56  (Mod8u x y) -> (Select0 (DIVU (ZeroExt8to32 x) (ZeroExt8to32 y)))
    57  
    58  (And32 x y) -> (AND x y)
    59  (And16 x y) -> (AND x y)
    60  (And8 x y) -> (AND x y)
    61  
    62  (Or32 x y) -> (OR x y)
    63  (Or16 x y) -> (OR x y)
    64  (Or8 x y) -> (OR x y)
    65  
    66  (Xor32 x y) -> (XOR x y)
    67  (Xor16 x y) -> (XOR x y)
    68  (Xor8 x y) -> (XOR x y)
    69  
    70  // constant shifts
    71  // generic opt rewrites all constant shifts to shift by Const64
    72  (Lsh32x64 x (Const64 [c])) && uint32(c) < 32 -> (SLLconst x [c])
    73  (Rsh32x64 x (Const64 [c])) && uint32(c) < 32 -> (SRAconst x [c])
    74  (Rsh32Ux64 x (Const64 [c])) && uint32(c) < 32 -> (SRLconst x [c])
    75  (Lsh16x64 x (Const64 [c])) && uint32(c) < 16 -> (SLLconst x [c])
    76  (Rsh16x64 x (Const64 [c])) && uint32(c) < 16 -> (SRAconst (SLLconst <config.fe.TypeUInt32()> x [16]) [c+16])
    77  (Rsh16Ux64 x (Const64 [c])) && uint32(c) < 16 -> (SRLconst (SLLconst <config.fe.TypeUInt32()> x [16]) [c+16])
    78  (Lsh8x64 x (Const64 [c])) && uint32(c) < 8 -> (SLLconst x [c])
    79  (Rsh8x64 x (Const64 [c])) && uint32(c) < 8 -> (SRAconst (SLLconst <config.fe.TypeUInt32()> x [24]) [c+24])
    80  (Rsh8Ux64 x (Const64 [c])) && uint32(c) < 8 -> (SRLconst (SLLconst <config.fe.TypeUInt32()> x [24]) [c+24])
    81  
    82  // large constant shifts
    83  (Lsh32x64 _ (Const64 [c])) && uint32(c) >= 32 -> (MOVWconst [0])
    84  (Rsh32Ux64 _ (Const64 [c])) && uint32(c) >= 32 -> (MOVWconst [0])
    85  (Lsh16x64 _ (Const64 [c])) && uint32(c) >= 16 -> (MOVWconst [0])
    86  (Rsh16Ux64 _ (Const64 [c])) && uint32(c) >= 16 -> (MOVWconst [0])
    87  (Lsh8x64 _ (Const64 [c])) && uint32(c) >= 8 -> (MOVWconst [0])
    88  (Rsh8Ux64 _ (Const64 [c])) && uint32(c) >= 8 -> (MOVWconst [0])
    89  
    90  // large constant signed right shift, we leave the sign bit
    91  (Rsh32x64 x (Const64 [c])) && uint32(c) >= 32 -> (SRAconst x [31])
    92  (Rsh16x64 x (Const64 [c])) && uint32(c) >= 16 -> (SRAconst (SLLconst <config.fe.TypeUInt32()> x [16]) [31])
    93  (Rsh8x64 x (Const64 [c])) && uint32(c) >= 8 -> (SRAconst (SLLconst <config.fe.TypeUInt32()> x [24]) [31])
    94  
    95  // shifts
    96  // hardware instruction uses only the low 5 bits of the shift
    97  // we compare to 32 to ensure Go semantics for large shifts
    98  (Lsh32x32 <t> x y) -> (CMOVZ (SLL <t> x y) (MOVWconst [0]) (SGTUconst [32] y))
    99  (Lsh32x16 <t> x y) -> (CMOVZ (SLL <t> x (ZeroExt16to32 y) ) (MOVWconst [0]) (SGTUconst [32] (ZeroExt16to32 y)))
   100  (Lsh32x8 <t> x y) -> (CMOVZ (SLL <t> x (ZeroExt8to32 y) ) (MOVWconst [0]) (SGTUconst [32] (ZeroExt8to32 y)))
   101  
   102  (Lsh16x32 <t> x y) -> (CMOVZ (SLL <t> x y) (MOVWconst [0]) (SGTUconst [32] y))
   103  (Lsh16x16 <t> x y) -> (CMOVZ (SLL <t> x (ZeroExt16to32 y) ) (MOVWconst [0]) (SGTUconst [32] (ZeroExt16to32 y)))
   104  (Lsh16x8 <t> x y) -> (CMOVZ (SLL <t> x (ZeroExt8to32 y) ) (MOVWconst [0]) (SGTUconst [32] (ZeroExt8to32 y)))
   105  
   106  (Lsh8x32 <t> x y) -> (CMOVZ (SLL <t> x y) (MOVWconst [0]) (SGTUconst [32] y))
   107  (Lsh8x16 <t> x y) -> (CMOVZ (SLL <t> x (ZeroExt16to32 y) ) (MOVWconst [0]) (SGTUconst [32] (ZeroExt16to32 y)))
   108  (Lsh8x8 <t> x y) -> (CMOVZ (SLL <t> x (ZeroExt8to32 y) ) (MOVWconst [0]) (SGTUconst [32] (ZeroExt8to32 y)))
   109  
   110  (Rsh32Ux32 <t> x y) -> (CMOVZ (SRL <t> x y) (MOVWconst [0]) (SGTUconst [32] y))
   111  (Rsh32Ux16 <t> x y) -> (CMOVZ (SRL <t> x (ZeroExt16to32 y) ) (MOVWconst [0]) (SGTUconst [32] (ZeroExt16to32 y)))
   112  (Rsh32Ux8 <t> x y) -> (CMOVZ (SRL <t> x (ZeroExt8to32 y) ) (MOVWconst [0]) (SGTUconst [32] (ZeroExt8to32 y)))
   113  
   114  (Rsh16Ux32 <t> x y) -> (CMOVZ (SRL <t> (ZeroExt16to32 x) y) (MOVWconst [0]) (SGTUconst [32] y))
   115  (Rsh16Ux16 <t> x y) -> (CMOVZ (SRL <t> (ZeroExt16to32 x) (ZeroExt16to32 y) ) (MOVWconst [0]) (SGTUconst [32] (ZeroExt16to32 y)))
   116  (Rsh16Ux8 <t> x y) -> (CMOVZ (SRL <t> (ZeroExt16to32 x) (ZeroExt8to32 y) ) (MOVWconst [0]) (SGTUconst [32] (ZeroExt8to32 y)))
   117  
   118  (Rsh8Ux32 <t> x y) -> (CMOVZ (SRL <t> (ZeroExt8to32 x) y) (MOVWconst [0]) (SGTUconst [32] y))
   119  (Rsh8Ux16 <t> x y) -> (CMOVZ (SRL <t> (ZeroExt8to32 x) (ZeroExt16to32 y) ) (MOVWconst [0]) (SGTUconst [32] (ZeroExt16to32 y)))
   120  (Rsh8Ux8 <t> x y) -> (CMOVZ (SRL <t> (ZeroExt8to32 x) (ZeroExt8to32 y) ) (MOVWconst [0]) (SGTUconst [32] (ZeroExt8to32 y)))
   121  
   122  (Rsh32x32 x y) -> (SRA x ( CMOVZ <config.fe.TypeUInt32()> y (MOVWconst [-1]) (SGTUconst [32] y)))
   123  (Rsh32x16 x y) -> (SRA x ( CMOVZ <config.fe.TypeUInt32()> (ZeroExt16to32 y) (MOVWconst [-1]) (SGTUconst [32] (ZeroExt16to32 y))))
   124  (Rsh32x8 x y) -> (SRA x ( CMOVZ <config.fe.TypeUInt32()> (ZeroExt8to32 y) (MOVWconst [-1]) (SGTUconst [32] (ZeroExt8to32 y))))
   125  
   126  (Rsh16x32 x y) -> (SRA (SignExt16to32 x) ( CMOVZ <config.fe.TypeUInt32()> y (MOVWconst [-1]) (SGTUconst [32] y)))
   127  (Rsh16x16 x y) -> (SRA (SignExt16to32 x) ( CMOVZ <config.fe.TypeUInt32()> (ZeroExt16to32 y) (MOVWconst [-1]) (SGTUconst [32] (ZeroExt16to32 y))))
   128  (Rsh16x8 x y) -> (SRA (SignExt16to32 x) ( CMOVZ <config.fe.TypeUInt32()> (ZeroExt8to32 y) (MOVWconst [-1]) (SGTUconst [32] (ZeroExt8to32 y))))
   129  
   130  (Rsh8x32 x y) -> (SRA (SignExt16to32 x) ( CMOVZ <config.fe.TypeUInt32()> y (MOVWconst [-1]) (SGTUconst [32] y)))
   131  (Rsh8x16 x y) -> (SRA (SignExt16to32 x) ( CMOVZ <config.fe.TypeUInt32()> (ZeroExt16to32 y) (MOVWconst [-1]) (SGTUconst [32] (ZeroExt16to32 y))))
   132  (Rsh8x8 x y) -> (SRA (SignExt16to32 x) ( CMOVZ <config.fe.TypeUInt32()> (ZeroExt8to32 y) (MOVWconst [-1]) (SGTUconst [32] (ZeroExt8to32 y))))
   133  
   134  // unary ops
   135  (Neg32 x) -> (NEG x)
   136  (Neg16 x) -> (NEG x)
   137  (Neg8 x) -> (NEG x)
   138  (Neg32F x) -> (NEGF x)
   139  (Neg64F x) -> (NEGD x)
   140  
   141  (Com32 x) -> (NORconst [0] x)
   142  (Com16 x) -> (NORconst [0] x)
   143  (Com8 x) -> (NORconst [0] x)
   144  
   145  (Sqrt x) -> (SQRTD x)
   146  
   147  // count trailing zero
   148  // 32 - CLZ(x&-x - 1)
   149  (Ctz32 <t> x) -> (SUB (MOVWconst [32]) (CLZ <t> (SUBconst <t> [1] (AND <t> x (NEG <t> x)))))
   150  
   151  // boolean ops -- booleans are represented with 0=false, 1=true
   152  (AndB x y) -> (AND x y)
   153  (OrB x y) -> (OR x y)
   154  (EqB x y) -> (XORconst [1] (XOR <config.fe.TypeBool()> x y))
   155  (NeqB x y) -> (XOR x y)
   156  (Not x) -> (XORconst [1] x)
   157  
   158  // constants
   159  (Const32 [val]) -> (MOVWconst [val])
   160  (Const16 [val]) -> (MOVWconst [val])
   161  (Const8 [val]) -> (MOVWconst [val])
   162  (Const32F [val]) -> (MOVFconst [val])
   163  (Const64F [val]) -> (MOVDconst [val])
   164  (ConstNil) -> (MOVWconst [0])
   165  (ConstBool [b]) -> (MOVWconst [b])
   166  
   167  // truncations
   168  // Because we ignore high parts of registers, truncates are just copies.
   169  (Trunc16to8 x) -> x
   170  (Trunc32to8 x) -> x
   171  (Trunc32to16 x) -> x
   172  
   173  // Zero-/Sign-extensions
   174  (ZeroExt8to16 x) -> (MOVBUreg x)
   175  (ZeroExt8to32 x) -> (MOVBUreg x)
   176  (ZeroExt16to32 x) -> (MOVHUreg x)
   177  
   178  (SignExt8to16 x) -> (MOVBreg x)
   179  (SignExt8to32 x) -> (MOVBreg x)
   180  (SignExt16to32 x) -> (MOVHreg x)
   181  
   182  (Signmask x) -> (SRAconst x [31])
   183  (Zeromask x) -> (NEG (SGTU x (MOVWconst [0])))
   184  (Slicemask <t> x) -> (SRAconst (NEG <t> x) [31])
   185  
   186  // float <-> int conversion
   187  (Cvt32to32F x) -> (MOVWF x)
   188  (Cvt32to64F x) -> (MOVWD x)
   189  (Cvt32Fto32 x) -> (TRUNCFW x)
   190  (Cvt64Fto32 x) -> (TRUNCDW x)
   191  (Cvt32Fto64F x) -> (MOVFD x)
   192  (Cvt64Fto32F x) -> (MOVDF x)
   193  
   194  // comparisons
   195  (Eq8 x y)  -> (SGTUconst [1] (XOR (ZeroExt8to32 x) (ZeroExt8to32 y)))
   196  (Eq16 x y) -> (SGTUconst [1] (XOR (ZeroExt16to32 x) (ZeroExt16to32 y)))
   197  (Eq32 x y) -> (SGTUconst [1] (XOR x y))
   198  (EqPtr x y) -> (SGTUconst [1] (XOR x y))
   199  (Eq32F x y) -> (FPFlagTrue (CMPEQF x y))
   200  (Eq64F x y) -> (FPFlagTrue (CMPEQD x y))
   201  
   202  (Neq8 x y)  -> (SGTU (XOR (ZeroExt8to32 x) (ZeroExt8to32 y)) (MOVWconst [0]))
   203  (Neq16 x y) -> (SGTU (XOR (ZeroExt16to32 x) (ZeroExt16to32 y)) (MOVWconst [0]))
   204  (Neq32 x y) -> (SGTU (XOR x y) (MOVWconst [0]))
   205  (NeqPtr x y) -> (SGTU (XOR x y) (MOVWconst [0]))
   206  (Neq32F x y) -> (FPFlagFalse (CMPEQF x y))
   207  (Neq64F x y) -> (FPFlagFalse (CMPEQD x y))
   208  
   209  (Less8 x y)  -> (SGT (SignExt8to32 y) (SignExt8to32 x))
   210  (Less16 x y) -> (SGT (SignExt16to32 y) (SignExt16to32 x))
   211  (Less32 x y) -> (SGT y x)
   212  (Less32F x y) -> (FPFlagTrue (CMPGTF y x)) // reverse operands to work around NaN
   213  (Less64F x y) -> (FPFlagTrue (CMPGTD y x)) // reverse operands to work around NaN
   214  
   215  (Less8U x y)  -> (SGTU (ZeroExt8to32 y) (ZeroExt8to32 x))
   216  (Less16U x y) -> (SGTU (ZeroExt16to32 y) (ZeroExt16to32 x))
   217  (Less32U x y) -> (SGTU y x)
   218  
   219  (Leq8 x y)  -> (XORconst [1] (SGT (SignExt8to32 x) (SignExt8to32 y)))
   220  (Leq16 x y) -> (XORconst [1] (SGT (SignExt16to32 x) (SignExt16to32 y)))
   221  (Leq32 x y) -> (XORconst [1] (SGT x y))
   222  (Leq32F x y) -> (FPFlagTrue (CMPGEF y x)) // reverse operands to work around NaN
   223  (Leq64F x y) -> (FPFlagTrue (CMPGED y x)) // reverse operands to work around NaN
   224  
   225  (Leq8U x y)  -> (XORconst [1] (SGTU (ZeroExt8to32 x) (ZeroExt8to32 y)))
   226  (Leq16U x y) -> (XORconst [1] (SGTU (ZeroExt16to32 x) (ZeroExt16to32 y)))
   227  (Leq32U x y) -> (XORconst [1] (SGTU x y))
   228  
   229  (Greater8 x y)  -> (SGT (SignExt8to32 x) (SignExt8to32 y))
   230  (Greater16 x y) -> (SGT (SignExt16to32 x) (SignExt16to32 y))
   231  (Greater32 x y) -> (SGT x y)
   232  (Greater32F x y) -> (FPFlagTrue (CMPGTF x y))
   233  (Greater64F x y) -> (FPFlagTrue (CMPGTD x y))
   234  
   235  (Greater8U x y)  -> (SGTU (ZeroExt8to32 x) (ZeroExt8to32 y))
   236  (Greater16U x y) -> (SGTU (ZeroExt16to32 x) (ZeroExt16to32 y))
   237  (Greater32U x y) -> (SGTU x y)
   238  
   239  (Geq8 x y)  -> (XORconst [1] (SGT (SignExt8to32 y) (SignExt8to32 x)))
   240  (Geq16 x y) -> (XORconst [1] (SGT (SignExt16to32 y) (SignExt16to32 x)))
   241  (Geq32 x y) -> (XORconst [1] (SGT y x))
   242  (Geq32F x y) -> (FPFlagTrue (CMPGEF x y))
   243  (Geq64F x y) -> (FPFlagTrue (CMPGED x y))
   244  
   245  (Geq8U x y)  -> (XORconst [1] (SGTU (ZeroExt8to32 y) (ZeroExt8to32 x)))
   246  (Geq16U x y) -> (XORconst [1] (SGTU (ZeroExt16to32 y) (ZeroExt16to32 x)))
   247  (Geq32U x y) -> (XORconst [1] (SGTU y x))
   248  
   249  (OffPtr [off] ptr:(SP)) -> (MOVWaddr [off] ptr)
   250  (OffPtr [off] ptr) -> (ADDconst [off] ptr)
   251  
   252  (Addr {sym} base) -> (MOVWaddr {sym} base)
   253  
   254  // loads
   255  (Load <t> ptr mem) && t.IsBoolean() -> (MOVBUload ptr mem)
   256  (Load <t> ptr mem) && (is8BitInt(t) && isSigned(t)) -> (MOVBload ptr mem)
   257  (Load <t> ptr mem) && (is8BitInt(t) && !isSigned(t)) -> (MOVBUload ptr mem)
   258  (Load <t> ptr mem) && (is16BitInt(t) && isSigned(t)) -> (MOVHload ptr mem)
   259  (Load <t> ptr mem) && (is16BitInt(t) && !isSigned(t)) -> (MOVHUload ptr mem)
   260  (Load <t> ptr mem) && (is32BitInt(t) || isPtr(t)) -> (MOVWload ptr mem)
   261  (Load <t> ptr mem) && is32BitFloat(t) -> (MOVFload ptr mem)
   262  (Load <t> ptr mem) && is64BitFloat(t) -> (MOVDload ptr mem)
   263  
   264  // stores
   265  (Store [1] ptr val mem) -> (MOVBstore ptr val mem)
   266  (Store [2] ptr val mem) -> (MOVHstore ptr val mem)
   267  (Store [4] ptr val mem) && !is32BitFloat(val.Type) -> (MOVWstore ptr val mem)
   268  (Store [8] ptr val mem) && !is64BitFloat(val.Type) -> (MOVWstore ptr val mem)
   269  (Store [4] ptr val mem) && is32BitFloat(val.Type) -> (MOVFstore ptr val mem)
   270  (Store [8] ptr val mem) && is64BitFloat(val.Type) -> (MOVDstore ptr val mem)
   271  
   272  // zero instructions
   273  (Zero [s] _ mem) && SizeAndAlign(s).Size() == 0 -> mem
   274  (Zero [s] ptr mem) && SizeAndAlign(s).Size() == 1 -> (MOVBstore ptr (MOVWconst [0]) mem)
   275  (Zero [s] ptr mem) && SizeAndAlign(s).Size() == 2 && SizeAndAlign(s).Align()%2 == 0 ->
   276  	(MOVHstore ptr (MOVWconst [0]) mem)
   277  (Zero [s] ptr mem) && SizeAndAlign(s).Size() == 2 ->
   278  	(MOVBstore [1] ptr (MOVWconst [0])
   279  		(MOVBstore [0] ptr (MOVWconst [0]) mem))
   280  (Zero [s] ptr mem) && SizeAndAlign(s).Size() == 4 && SizeAndAlign(s).Align()%4 == 0 ->
   281  	(MOVWstore ptr (MOVWconst [0]) mem)
   282  (Zero [s] ptr mem) && SizeAndAlign(s).Size() == 4 && SizeAndAlign(s).Align()%2 == 0 ->
   283  	(MOVHstore [2] ptr (MOVWconst [0])
   284  		(MOVHstore [0] ptr (MOVWconst [0]) mem))
   285  (Zero [s] ptr mem) && SizeAndAlign(s).Size() == 4 ->
   286  	(MOVBstore [3] ptr (MOVWconst [0])
   287  		(MOVBstore [2] ptr (MOVWconst [0])
   288  			(MOVBstore [1] ptr (MOVWconst [0])
   289  				(MOVBstore [0] ptr (MOVWconst [0]) mem))))
   290  (Zero [s] ptr mem) && SizeAndAlign(s).Size() == 3 ->
   291  	(MOVBstore [2] ptr (MOVWconst [0])
   292  		(MOVBstore [1] ptr (MOVWconst [0])
   293  			(MOVBstore [0] ptr (MOVWconst [0]) mem)))
   294  (Zero [s] ptr mem) && SizeAndAlign(s).Size() == 6 && SizeAndAlign(s).Align()%2 == 0 ->
   295  	(MOVHstore [4] ptr (MOVWconst [0])
   296  		(MOVHstore [2] ptr (MOVWconst [0])
   297  			(MOVHstore [0] ptr (MOVWconst [0]) mem)))
   298  (Zero [s] ptr mem) && SizeAndAlign(s).Size() == 8 && SizeAndAlign(s).Align()%4 == 0 ->
   299  		(MOVWstore [4] ptr (MOVWconst [0])
   300  			(MOVWstore [0] ptr (MOVWconst [0]) mem))
   301  (Zero [s] ptr mem) && SizeAndAlign(s).Size() == 12 && SizeAndAlign(s).Align()%4 == 0 ->
   302  	(MOVWstore [8] ptr (MOVWconst [0])
   303  		(MOVWstore [4] ptr (MOVWconst [0])
   304  			(MOVWstore [0] ptr (MOVWconst [0]) mem)))
   305  (Zero [s] ptr mem) && SizeAndAlign(s).Size() == 16 && SizeAndAlign(s).Align()%4 == 0 ->
   306  	(MOVWstore [12] ptr (MOVWconst [0])
   307  		(MOVWstore [8] ptr (MOVWconst [0])
   308  			(MOVWstore [4] ptr (MOVWconst [0])
   309  				(MOVWstore [0] ptr (MOVWconst [0]) mem))))
   310  
   311  // large or unaligned zeroing uses a loop
   312  (Zero [s] ptr mem)
   313  	&& (SizeAndAlign(s).Size() > 16  || SizeAndAlign(s).Align()%4 != 0) ->
   314  	(LoweredZero [SizeAndAlign(s).Align()]
   315  		ptr
   316  		(ADDconst <ptr.Type> ptr [SizeAndAlign(s).Size()-moveSize(SizeAndAlign(s).Align(), config)])
   317  		mem)
   318  
   319  // moves
   320  (Move [s] _ _ mem) && SizeAndAlign(s).Size() == 0 -> mem
   321  (Move [s] dst src mem) && SizeAndAlign(s).Size() == 1 -> (MOVBstore dst (MOVBUload src mem) mem)
   322  (Move [s] dst src mem) && SizeAndAlign(s).Size() == 2 && SizeAndAlign(s).Align()%2 == 0 ->
   323  	(MOVHstore dst (MOVHUload src mem) mem)
   324  (Move [s] dst src mem) && SizeAndAlign(s).Size() == 2 ->
   325  	(MOVBstore [1] dst (MOVBUload [1] src mem)
   326  		(MOVBstore dst (MOVBUload src mem) mem))
   327  (Move [s] dst src mem) && SizeAndAlign(s).Size() == 4 && SizeAndAlign(s).Align()%4 == 0 ->
   328  	(MOVWstore dst (MOVWload src mem) mem)
   329  (Move [s] dst src mem) && SizeAndAlign(s).Size() == 4 && SizeAndAlign(s).Align()%2 == 0 ->
   330  	(MOVHstore [2] dst (MOVHUload [2] src mem)
   331  		(MOVHstore dst (MOVHUload src mem) mem))
   332  (Move [s] dst src mem) && SizeAndAlign(s).Size() == 4 ->
   333  	(MOVBstore [3] dst (MOVBUload [3] src mem)
   334  		(MOVBstore [2] dst (MOVBUload [2] src mem)
   335  			(MOVBstore [1] dst (MOVBUload [1] src mem)
   336  				(MOVBstore dst (MOVBUload src mem) mem))))
   337  (Move [s] dst src mem) && SizeAndAlign(s).Size() == 3 ->
   338  	(MOVBstore [2] dst (MOVBUload [2] src mem)
   339  		(MOVBstore [1] dst (MOVBUload [1] src mem)
   340  			(MOVBstore dst (MOVBUload src mem) mem)))
   341  (Move [s] dst src mem) && SizeAndAlign(s).Size() == 8 && SizeAndAlign(s).Align()%4 == 0 ->
   342  	(MOVWstore [4] dst (MOVWload [4] src mem)
   343  		(MOVWstore dst (MOVWload src mem) mem))
   344  (Move [s] dst src mem) && SizeAndAlign(s).Size() == 8 && SizeAndAlign(s).Align()%2 == 0 ->
   345  	(MOVHstore [6] dst (MOVHload [6] src mem)
   346  		(MOVHstore [4] dst (MOVHload [4] src mem)
   347  			(MOVHstore [2] dst (MOVHload [2] src mem)
   348  				(MOVHstore dst (MOVHload src mem) mem))))
   349  (Move [s] dst src mem) && SizeAndAlign(s).Size() == 6 && SizeAndAlign(s).Align()%2 == 0 ->
   350  	(MOVHstore [4] dst (MOVHload [4] src mem)
   351  		(MOVHstore [2] dst (MOVHload [2] src mem)
   352  			(MOVHstore dst (MOVHload src mem) mem)))
   353  (Move [s] dst src mem) && SizeAndAlign(s).Size() == 12 && SizeAndAlign(s).Align()%4 == 0 ->
   354  	(MOVWstore [8] dst (MOVWload [8] src mem)
   355  		(MOVWstore [4] dst (MOVWload [4] src mem)
   356  			(MOVWstore dst (MOVWload src mem) mem)))
   357  (Move [s] dst src mem) && SizeAndAlign(s).Size() == 16 && SizeAndAlign(s).Align()%4 == 0 ->
   358  	(MOVWstore [12] dst (MOVWload [12] src mem)
   359  		(MOVWstore [8] dst (MOVWload [8] src mem)
   360  			(MOVWstore [4] dst (MOVWload [4] src mem)
   361  				(MOVWstore dst (MOVWload src mem) mem))))
   362  
   363  
   364  // large or unaligned move uses a loop
   365  (Move [s] dst src mem)
   366  	&& (SizeAndAlign(s).Size() > 16 || SizeAndAlign(s).Align()%4 != 0) ->
   367  	(LoweredMove [SizeAndAlign(s).Align()]
   368  		dst
   369  		src
   370  		(ADDconst <src.Type> src [SizeAndAlign(s).Size()-moveSize(SizeAndAlign(s).Align(), config)])
   371  		mem)
   372  
   373  // calls
   374  (StaticCall [argwid] {target} mem) -> (CALLstatic [argwid] {target} mem)
   375  (ClosureCall [argwid] entry closure mem) -> (CALLclosure [argwid] entry closure mem)
   376  (DeferCall [argwid] mem) -> (CALLdefer [argwid] mem)
   377  (GoCall [argwid] mem) -> (CALLgo [argwid] mem)
   378  (InterCall [argwid] entry mem) -> (CALLinter [argwid] entry mem)
   379  
   380  // atomic intrinsics
   381  (AtomicLoad32  ptr mem) -> (LoweredAtomicLoad ptr mem)
   382  (AtomicLoadPtr ptr mem) -> (LoweredAtomicLoad  ptr mem)
   383  
   384  (AtomicStore32      ptr val mem) -> (LoweredAtomicStore ptr val mem)
   385  (AtomicStorePtrNoWB ptr val mem) -> (LoweredAtomicStore  ptr val mem)
   386  
   387  (AtomicExchange32 ptr val mem) -> (LoweredAtomicExchange ptr val mem)
   388  (AtomicAdd32 ptr val mem) -> (LoweredAtomicAdd ptr val mem)
   389  
   390  (AtomicCompareAndSwap32 ptr old new_ mem) -> (LoweredAtomicCas ptr old new_ mem)
   391  
   392  // AtomicOr8(ptr,val) -> LoweredAtomicOr(ptr&^3,uint32(val) << ((ptr & 3) * 8))
   393  (AtomicOr8 ptr val mem) && !config.BigEndian ->
   394  	(LoweredAtomicOr (AND <config.fe.TypeUInt32().PtrTo()> (MOVWconst [^3]) ptr)
   395  		(SLL <config.fe.TypeUInt32()> (ZeroExt8to32 val)
   396  			(SLLconst <config.fe.TypeUInt32()> [3]
   397  				(ANDconst <config.fe.TypeUInt32()> [3] ptr))) mem)
   398  
   399  // AtomicAnd8(ptr,val) -> LoweredAtomicAnd(ptr&^3,(uint32(val) << ((ptr & 3) * 8)) | ^(uint32(0xFF) << ((ptr & 3) * 8))))
   400  (AtomicAnd8  ptr val mem) && !config.BigEndian ->
   401  	(LoweredAtomicAnd (AND <config.fe.TypeUInt32().PtrTo()> (MOVWconst [^3]) ptr)
   402  		(OR <config.fe.TypeUInt32()> (SLL <config.fe.TypeUInt32()> (ZeroExt8to32 val)
   403  			(SLLconst <config.fe.TypeUInt32()> [3]
   404  				(ANDconst  <config.fe.TypeUInt32()> [3] ptr)))
   405  		(NORconst [0] <config.fe.TypeUInt32()> (SLL <config.fe.TypeUInt32()>
   406  			(MOVWconst [0xff]) (SLLconst <config.fe.TypeUInt32()> [3]
   407  				(ANDconst <config.fe.TypeUInt32()> [3]
   408  					(XORconst <config.fe.TypeUInt32()> [3] ptr)))))) mem)
   409  
   410  // AtomicOr8(ptr,val) -> LoweredAtomicOr(ptr&^3,uint32(val) << (((ptr^3) & 3) * 8))
   411  (AtomicOr8 ptr val mem) && config.BigEndian ->
   412  	(LoweredAtomicOr (AND <config.fe.TypeUInt32().PtrTo()> (MOVWconst [^3]) ptr)
   413  		(SLL <config.fe.TypeUInt32()> (ZeroExt8to32 val)
   414  			(SLLconst <config.fe.TypeUInt32()> [3]
   415  				(ANDconst <config.fe.TypeUInt32()> [3]
   416  					(XORconst <config.fe.TypeUInt32()> [3] ptr)))) mem)
   417  
   418  // AtomicAnd8(ptr,val) -> LoweredAtomicAnd(ptr&^3,(uint32(val) << (((ptr^3) & 3) * 8)) | ^(uint32(0xFF) << (((ptr^3) & 3) * 8))))
   419  (AtomicAnd8  ptr val mem) && config.BigEndian ->
   420  	(LoweredAtomicAnd (AND <config.fe.TypeUInt32().PtrTo()> (MOVWconst [^3]) ptr)
   421  		(OR <config.fe.TypeUInt32()> (SLL <config.fe.TypeUInt32()> (ZeroExt8to32 val)
   422  			(SLLconst <config.fe.TypeUInt32()> [3]
   423  				(ANDconst  <config.fe.TypeUInt32()> [3]
   424  					(XORconst <config.fe.TypeUInt32()> [3] ptr))))
   425  		(NORconst [0] <config.fe.TypeUInt32()> (SLL <config.fe.TypeUInt32()>
   426  			(MOVWconst [0xff]) (SLLconst <config.fe.TypeUInt32()> [3]
   427  				(ANDconst <config.fe.TypeUInt32()> [3]
   428  					(XORconst <config.fe.TypeUInt32()> [3] ptr)))))) mem)
   429  
   430  
   431  // checks
   432  (NilCheck ptr mem) -> (LoweredNilCheck ptr mem)
   433  (IsNonNil ptr) -> (SGTU ptr (MOVWconst [0]))
   434  (IsInBounds idx len) -> (SGTU len idx)
   435  (IsSliceInBounds idx len) -> (XORconst [1] (SGTU idx len))
   436  
   437  // pseudo-ops
   438  (GetClosurePtr) -> (LoweredGetClosurePtr)
   439  (Convert x mem) -> (MOVWconvert x mem)
   440  
   441  (If cond yes no) -> (NE cond yes no)
   442  
   443  
   444  // Optimizations
   445  
   446  // Absorb boolean tests into block
   447  (NE (FPFlagTrue cmp) yes no) -> (FPT cmp yes no)
   448  (NE (FPFlagFalse cmp) yes no) -> (FPF cmp yes no)
   449  (EQ (FPFlagTrue cmp) yes no) -> (FPF cmp yes no)
   450  (EQ (FPFlagFalse cmp) yes no) -> (FPT cmp yes no)
   451  (NE (XORconst [1] cmp:(SGT _ _)) yes no) -> (EQ cmp yes no)
   452  (NE (XORconst [1] cmp:(SGTU _ _)) yes no) -> (EQ cmp yes no)
   453  (NE (XORconst [1] cmp:(SGTconst _)) yes no) -> (EQ cmp yes no)
   454  (NE (XORconst [1] cmp:(SGTUconst _)) yes no) -> (EQ cmp yes no)
   455  (NE (XORconst [1] cmp:(SGTzero _)) yes no) -> (EQ cmp yes no)
   456  (NE (XORconst [1] cmp:(SGTUzero _)) yes no) -> (EQ cmp yes no)
   457  (EQ (XORconst [1] cmp:(SGT _ _)) yes no) -> (NE cmp yes no)
   458  (EQ (XORconst [1] cmp:(SGTU _ _)) yes no) -> (NE cmp yes no)
   459  (EQ (XORconst [1] cmp:(SGTconst _)) yes no) -> (NE cmp yes no)
   460  (EQ (XORconst [1] cmp:(SGTUconst _)) yes no) -> (NE cmp yes no)
   461  (EQ (XORconst [1] cmp:(SGTzero _)) yes no) -> (NE cmp yes no)
   462  (EQ (XORconst [1] cmp:(SGTUzero _)) yes no) -> (NE cmp yes no)
   463  (NE (SGTUconst [1] x) yes no) -> (EQ x yes no)
   464  (EQ (SGTUconst [1] x) yes no) -> (NE x yes no)
   465  (NE (SGTUzero x) yes no) -> (NE x yes no)
   466  (EQ (SGTUzero x) yes no) -> (EQ x yes no)
   467  (NE (SGTconst [0] x) yes no) -> (LTZ x yes no)
   468  (EQ (SGTconst [0] x) yes no) -> (GEZ x yes no)
   469  (NE (SGTzero x) yes no) -> (GTZ x yes no)
   470  (EQ (SGTzero x) yes no) -> (LEZ x yes no)
   471  
   472  // fold offset into address
   473  (ADDconst [off1] (MOVWaddr [off2] {sym} ptr)) -> (MOVWaddr [off1+off2] {sym} ptr)
   474  
   475  // fold address into load/store
   476  (MOVBload  [off1] {sym} x:(ADDconst [off2] ptr) mem) && (is16Bit(off1+off2) || x.Uses == 1) -> (MOVBload  [off1+off2] {sym} ptr mem)
   477  (MOVBUload [off1] {sym} x:(ADDconst [off2] ptr) mem) && (is16Bit(off1+off2) || x.Uses == 1) -> (MOVBUload [off1+off2] {sym} ptr mem)
   478  (MOVHload  [off1] {sym} x:(ADDconst [off2] ptr) mem) && (is16Bit(off1+off2) || x.Uses == 1) -> (MOVHload  [off1+off2] {sym} ptr mem)
   479  (MOVHUload [off1] {sym} x:(ADDconst [off2] ptr) mem) && (is16Bit(off1+off2) || x.Uses == 1) -> (MOVHUload [off1+off2] {sym} ptr mem)
   480  (MOVWload  [off1] {sym} x:(ADDconst [off2] ptr) mem) && (is16Bit(off1+off2) || x.Uses == 1) -> (MOVWload  [off1+off2] {sym} ptr mem)
   481  (MOVFload  [off1] {sym} x:(ADDconst [off2] ptr) mem) && (is16Bit(off1+off2) || x.Uses == 1) -> (MOVFload  [off1+off2] {sym} ptr mem)
   482  (MOVDload  [off1] {sym} x:(ADDconst [off2] ptr) mem) && (is16Bit(off1+off2) || x.Uses == 1) -> (MOVDload  [off1+off2] {sym} ptr mem)
   483  
   484  (MOVBstore [off1] {sym} x:(ADDconst [off2] ptr) val mem) && (is16Bit(off1+off2) || x.Uses == 1) -> (MOVBstore [off1+off2] {sym} ptr val mem)
   485  (MOVHstore [off1] {sym} x:(ADDconst [off2] ptr) val mem) && (is16Bit(off1+off2) || x.Uses == 1) -> (MOVHstore [off1+off2] {sym} ptr val mem)
   486  (MOVWstore [off1] {sym} x:(ADDconst [off2] ptr) val mem) && (is16Bit(off1+off2) || x.Uses == 1) -> (MOVWstore [off1+off2] {sym} ptr val mem)
   487  (MOVFstore [off1] {sym} x:(ADDconst [off2] ptr) val mem) && (is16Bit(off1+off2) || x.Uses == 1) -> (MOVFstore [off1+off2] {sym} ptr val mem)
   488  (MOVDstore [off1] {sym} x:(ADDconst [off2] ptr) val mem) && (is16Bit(off1+off2) || x.Uses == 1) -> (MOVDstore [off1+off2] {sym} ptr val mem)
   489  
   490  (MOVBstorezero [off1] {sym} x:(ADDconst [off2] ptr) mem) && (is16Bit(off1+off2) || x.Uses == 1) -> (MOVBstorezero [off1+off2] {sym} ptr mem)
   491  (MOVHstorezero [off1] {sym} x:(ADDconst [off2] ptr) mem) && (is16Bit(off1+off2) || x.Uses == 1) -> (MOVHstorezero [off1+off2] {sym} ptr mem)
   492  (MOVWstorezero [off1] {sym} x:(ADDconst [off2] ptr) mem) && (is16Bit(off1+off2) || x.Uses == 1) -> (MOVWstorezero [off1+off2] {sym} ptr mem)
   493  
   494  (MOVBload [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2) ->
   495  	(MOVBload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
   496  (MOVBUload [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2) ->
   497  	(MOVBUload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
   498  (MOVHload [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2) ->
   499  	(MOVHload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
   500  (MOVHUload [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2) ->
   501  	(MOVHUload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
   502  (MOVWload [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2) ->
   503  	(MOVWload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
   504  (MOVFload [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2) ->
   505  	(MOVFload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
   506  (MOVDload [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2) ->
   507  	(MOVDload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
   508  
   509  (MOVBstore [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) val mem) && canMergeSym(sym1,sym2) ->
   510  	(MOVBstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem)
   511  (MOVHstore [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) val mem) && canMergeSym(sym1,sym2) ->
   512  	(MOVHstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem)
   513  (MOVWstore [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) val mem) && canMergeSym(sym1,sym2) ->
   514  	(MOVWstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem)
   515  (MOVFstore [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) val mem) && canMergeSym(sym1,sym2) ->
   516  	(MOVFstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem)
   517  (MOVDstore [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) val mem) && canMergeSym(sym1,sym2) ->
   518  	(MOVDstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem)
   519  (MOVBstorezero [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2) ->
   520  	(MOVBstorezero [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
   521  (MOVHstorezero [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2) ->
   522  	(MOVHstorezero [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
   523  (MOVWstorezero [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2) ->
   524  	(MOVWstorezero [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
   525  
   526  // replace load from same location as preceding store with copy
   527  (MOVBload [off] {sym} ptr (MOVBstore [off2] {sym2} ptr2 x _)) && sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) && isSigned(x.Type) -> x
   528  (MOVBUload [off] {sym} ptr (MOVBstore [off2] {sym2} ptr2 x _)) && sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) && !isSigned(x.Type) -> x
   529  (MOVHload [off] {sym} ptr (MOVHstore [off2] {sym2} ptr2 x _)) && sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) && isSigned(x.Type) -> x
   530  (MOVHUload [off] {sym} ptr (MOVHstore [off2] {sym2} ptr2 x _)) && sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) && !isSigned(x.Type) -> x
   531  (MOVWload [off] {sym} ptr (MOVWstore [off2] {sym2} ptr2 x _)) && sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) -> x
   532  (MOVFload [off] {sym} ptr (MOVFstore [off2] {sym2} ptr2 x _)) && sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) -> x
   533  (MOVDload [off] {sym} ptr (MOVDstore [off2] {sym2} ptr2 x _)) && sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) -> x
   534  
   535  // store zero
   536  (MOVBstore [off] {sym} ptr (MOVWconst [0]) mem) -> (MOVBstorezero [off] {sym} ptr mem)
   537  (MOVHstore [off] {sym} ptr (MOVWconst [0]) mem) -> (MOVHstorezero [off] {sym} ptr mem)
   538  (MOVWstore [off] {sym} ptr (MOVWconst [0]) mem) -> (MOVWstorezero [off] {sym} ptr mem)
   539  
   540  // don't extend after proper load
   541  (MOVBreg x:(MOVBload _ _)) -> (MOVWreg x)
   542  (MOVBUreg x:(MOVBUload _ _)) -> (MOVWreg x)
   543  (MOVHreg x:(MOVBload _ _)) -> (MOVWreg x)
   544  (MOVHreg x:(MOVBUload _ _)) -> (MOVWreg x)
   545  (MOVHreg x:(MOVHload _ _)) -> (MOVWreg x)
   546  (MOVHUreg x:(MOVBUload _ _)) -> (MOVWreg x)
   547  (MOVHUreg x:(MOVHUload _ _)) -> (MOVWreg x)
   548  
   549  // fold double extensions
   550  (MOVBreg x:(MOVBreg _)) -> (MOVWreg x)
   551  (MOVBUreg x:(MOVBUreg _)) -> (MOVWreg x)
   552  (MOVHreg x:(MOVBreg _)) -> (MOVWreg x)
   553  (MOVHreg x:(MOVBUreg _)) -> (MOVWreg x)
   554  (MOVHreg x:(MOVHreg _)) -> (MOVWreg x)
   555  (MOVHUreg x:(MOVBUreg _)) -> (MOVWreg x)
   556  (MOVHUreg x:(MOVHUreg _)) -> (MOVWreg x)
   557  
   558  // sign extended loads
   559  // Note: The combined instruction must end up in the same block
   560  // as the original load. If not, we end up making a value with
   561  // memory type live in two different blocks, which can lead to
   562  // multiple memory values alive simultaneously.
   563  // Make sure we don't combine these ops if the load has another use.
   564  // This prevents a single load from being split into multiple loads
   565  // which then might return different values.  See test/atomicload.go.
   566  (MOVBreg <t> x:(MOVBUload [off] {sym} ptr mem)) && x.Uses == 1 && clobber(x) -> @x.Block (MOVBload <t> [off] {sym} ptr mem)
   567  (MOVBUreg <t> x:(MOVBload [off] {sym} ptr mem)) && x.Uses == 1 && clobber(x) -> @x.Block (MOVBUload <t> [off] {sym} ptr mem)
   568  (MOVHreg <t> x:(MOVHUload [off] {sym} ptr mem)) && x.Uses == 1 && clobber(x) -> @x.Block (MOVHload <t> [off] {sym} ptr mem)
   569  (MOVHUreg <t> x:(MOVHload [off] {sym} ptr mem)) && x.Uses == 1 && clobber(x) -> @x.Block (MOVHUload <t> [off] {sym} ptr mem)
   570  
   571  // fold extensions and ANDs together
   572  (MOVBUreg (ANDconst [c] x)) -> (ANDconst [c&0xff] x)
   573  (MOVHUreg (ANDconst [c] x)) -> (ANDconst [c&0xffff] x)
   574  (MOVBreg (ANDconst [c] x)) && c & 0x80 == 0 -> (ANDconst [c&0x7f] x)
   575  (MOVHreg (ANDconst [c] x)) && c & 0x8000 == 0 -> (ANDconst [c&0x7fff] x)
   576  
   577  // don't extend before store
   578  (MOVBstore [off] {sym} ptr (MOVBreg x) mem) -> (MOVBstore [off] {sym} ptr x mem)
   579  (MOVBstore [off] {sym} ptr (MOVBUreg x) mem) -> (MOVBstore [off] {sym} ptr x mem)
   580  (MOVBstore [off] {sym} ptr (MOVHreg x) mem) -> (MOVBstore [off] {sym} ptr x mem)
   581  (MOVBstore [off] {sym} ptr (MOVHUreg x) mem) -> (MOVBstore [off] {sym} ptr x mem)
   582  (MOVBstore [off] {sym} ptr (MOVWreg x) mem) -> (MOVBstore [off] {sym} ptr x mem)
   583  (MOVHstore [off] {sym} ptr (MOVHreg x) mem) -> (MOVHstore [off] {sym} ptr x mem)
   584  (MOVHstore [off] {sym} ptr (MOVHUreg x) mem) -> (MOVHstore [off] {sym} ptr x mem)
   585  (MOVHstore [off] {sym} ptr (MOVWreg x) mem) -> (MOVHstore [off] {sym} ptr x mem)
   586  (MOVWstore [off] {sym} ptr (MOVWreg x) mem) -> (MOVWstore [off] {sym} ptr x mem)
   587  
   588  // if a register move has only 1 use, just use the same register without emitting instruction
   589  // MOVWnop doesn't emit instruction, only for ensuring the type.
   590  (MOVWreg x) && x.Uses == 1 -> (MOVWnop x)
   591  
   592  // fold constant into arithmatic ops
   593  (ADD (MOVWconst [c]) x) -> (ADDconst [c] x)
   594  (ADD x (MOVWconst [c])) -> (ADDconst [c] x)
   595  (SUB x (MOVWconst [c])) -> (SUBconst [c] x)
   596  (AND (MOVWconst [c]) x) -> (ANDconst [c] x)
   597  (AND x (MOVWconst [c])) -> (ANDconst [c] x)
   598  (OR  (MOVWconst [c]) x) -> (ORconst  [c] x)
   599  (OR  x (MOVWconst [c])) -> (ORconst  [c] x)
   600  (XOR (MOVWconst [c]) x) -> (XORconst [c] x)
   601  (XOR x (MOVWconst [c])) -> (XORconst [c] x)
   602  (NOR (MOVWconst [c]) x) -> (NORconst [c] x)
   603  (NOR x (MOVWconst [c])) -> (NORconst [c] x)
   604  
   605  (SLL _ (MOVWconst [c])) && uint32(c)>=32 -> (MOVWconst [0])
   606  (SRL _ (MOVWconst [c])) && uint32(c)>=32 -> (MOVWconst [0])
   607  (SRA x (MOVWconst [c])) && uint32(c)>=32 -> (SRAconst x [31])
   608  (SLL x (MOVWconst [c])) -> (SLLconst x [c])
   609  (SRL x (MOVWconst [c])) -> (SRLconst x [c])
   610  (SRA x (MOVWconst [c])) -> (SRAconst x [c])
   611  
   612  (SGT  (MOVWconst [c]) x) -> (SGTconst  [c] x)
   613  (SGTU (MOVWconst [c]) x) -> (SGTUconst [c] x)
   614  (SGT x (MOVWconst [0])) -> (SGTzero x)
   615  (SGTU x (MOVWconst [0])) -> (SGTUzero x)
   616  
   617  // mul with constant
   618  (Select1 (MULTU x (MOVWconst [c]))) && x.Op != OpMIPSMOVWconst-> (Select1 (MULTU (MOVWconst [c]) x ))
   619  (Select0 (MULTU x (MOVWconst [c]))) && x.Op != OpMIPSMOVWconst-> (Select0 (MULTU (MOVWconst [c]) x ))
   620  
   621  (Select1 (MULTU (MOVWconst [0]) _ )) -> (MOVWconst [0])
   622  (Select0 (MULTU (MOVWconst [0]) _ )) -> (MOVWconst [0])
   623  (Select1 (MULTU (MOVWconst [1]) x )) -> x
   624  (Select0 (MULTU (MOVWconst [1]) _ )) -> (MOVWconst [0])
   625  (Select1 (MULTU (MOVWconst [-1]) x )) -> (NEG <x.Type> x)
   626  (Select0 (MULTU (MOVWconst [-1]) x )) -> (CMOVZ (ADDconst <x.Type> [-1] x) (MOVWconst [0]) x)
   627  (Select1 (MULTU (MOVWconst [c]) x )) && isPowerOfTwo(int64(uint32(c))) -> (SLLconst [log2(int64(uint32(c)))] x)
   628  (Select0 (MULTU (MOVWconst [c]) x )) && isPowerOfTwo(int64(uint32(c))) -> (SRLconst [32-log2(int64(uint32(c)))] x)
   629  
   630  (MUL (MOVWconst [0]) _ ) -> (MOVWconst [0])
   631  (MUL (MOVWconst [1]) x ) -> x
   632  (MUL (MOVWconst [-1]) x ) -> (NEG x)
   633  (MUL (MOVWconst [c]) x ) && isPowerOfTwo(int64(uint32(c))) -> (SLLconst [log2(int64(uint32(c)))] x)
   634  
   635  // generic simplifications
   636  (ADD x (NEG y)) -> (SUB x y)
   637  (ADD (NEG y) x) -> (SUB x y)
   638  (SUB x x) -> (MOVWconst [0])
   639  (SUB (MOVWconst [0]) x) -> (NEG x)
   640  (AND x x) -> x
   641  (OR  x x) -> x
   642  (XOR x x) -> (MOVWconst [0])
   643  
   644  // miscellaneous patterns generated by dec64
   645  (AND (SGTUconst [1] x) (SGTUconst [1] y)) ->  (SGTUconst [1] (OR <x.Type> x y))
   646  (OR (SGTUzero x) (SGTUzero y)) ->  (SGTUzero (OR <x.Type> x y))
   647  
   648  // remove redundant *const ops
   649  (ADDconst [0]  x) -> x
   650  (SUBconst [0]  x) -> x
   651  (ANDconst [0]  _) -> (MOVWconst [0])
   652  (ANDconst [-1] x) -> x
   653  (ORconst  [0]  x) -> x
   654  (ORconst  [-1] _) -> (MOVWconst [-1])
   655  (XORconst [0]  x) -> x
   656  (XORconst [-1] x) -> (NORconst [0] x)
   657  
   658  // generic constant folding
   659  (ADDconst [c] (MOVWconst [d]))  -> (MOVWconst [int64(int32(c+d))])
   660  (ADDconst [c] (ADDconst [d] x)) -> (ADDconst [int64(int32(c+d))] x)
   661  (ADDconst [c] (SUBconst [d] x)) -> (ADDconst [int64(int32(c-d))] x)
   662  (SUBconst [c] (MOVWconst [d]))  -> (MOVWconst [int64(int32(d-c))])
   663  (SUBconst [c] (SUBconst [d] x)) -> (ADDconst [int64(int32(-c-d))] x)
   664  (SUBconst [c] (ADDconst [d] x)) -> (ADDconst [int64(int32(-c+d))] x)
   665  (SLLconst [c] (MOVWconst [d]))  -> (MOVWconst [int64(int32(uint32(d)<<uint32(c)))])
   666  (SRLconst [c] (MOVWconst [d]))  -> (MOVWconst [int64(uint32(d)>>uint32(c))])
   667  (SRAconst [c] (MOVWconst [d]))  -> (MOVWconst [int64(int32(d)>>uint32(c))])
   668  (MUL (MOVWconst [c]) (MOVWconst [d])) -> (MOVWconst [int64(int32(c)*int32(d))])
   669  (Select1 (MULTU  (MOVWconst [c]) (MOVWconst [d]))) -> (MOVWconst [int64(int32(uint32(c)*uint32(d)))])
   670  (Select0 (MULTU  (MOVWconst [c]) (MOVWconst [d]))) -> (MOVWconst [(c*d)>>32])
   671  (Select1 (DIV  (MOVWconst [c]) (MOVWconst [d]))) -> (MOVWconst [int64(int32(c)/int32(d))])
   672  (Select1 (DIVU (MOVWconst [c]) (MOVWconst [d]))) -> (MOVWconst [int64(int32(uint32(c)/uint32(d)))])
   673  (Select0 (DIV  (MOVWconst [c]) (MOVWconst [d]))) -> (MOVWconst [int64(int32(c)%int32(d))])
   674  (Select0 (DIVU (MOVWconst [c]) (MOVWconst [d]))) -> (MOVWconst [int64(int32(uint32(c)%uint32(d)))])
   675  (ANDconst [c] (MOVWconst [d])) -> (MOVWconst [c&d])
   676  (ANDconst [c] (ANDconst [d] x)) -> (ANDconst [c&d] x)
   677  (ORconst [c] (MOVWconst [d])) -> (MOVWconst [c|d])
   678  (ORconst [c] (ORconst [d] x)) -> (ORconst [c|d] x)
   679  (XORconst [c] (MOVWconst [d])) -> (MOVWconst [c^d])
   680  (XORconst [c] (XORconst [d] x)) -> (XORconst [c^d] x)
   681  (NORconst [c] (MOVWconst [d])) -> (MOVWconst [^(c|d)])
   682  (NEG (MOVWconst [c])) -> (MOVWconst [int64(int32(-c))])
   683  (MOVBreg  (MOVWconst [c])) -> (MOVWconst [int64(int8(c))])
   684  (MOVBUreg (MOVWconst [c])) -> (MOVWconst [int64(uint8(c))])
   685  (MOVHreg  (MOVWconst [c])) -> (MOVWconst [int64(int16(c))])
   686  (MOVHUreg (MOVWconst [c])) -> (MOVWconst [int64(uint16(c))])
   687  (MOVWreg  (MOVWconst [c])) -> (MOVWconst [c])
   688  
   689  // constant comparisons
   690  (SGTconst [c] (MOVWconst [d])) && int32(c) > int32(d) -> (MOVWconst [1])
   691  (SGTconst [c] (MOVWconst [d])) && int32(c) <= int32(d) -> (MOVWconst [0])
   692  (SGTUconst [c] (MOVWconst [d])) && uint32(c)>uint32(d) -> (MOVWconst [1])
   693  (SGTUconst [c] (MOVWconst [d])) && uint32(c)<=uint32(d) -> (MOVWconst [0])
   694  (SGTzero (MOVWconst [d])) && int32(d) > 0 -> (MOVWconst [1])
   695  (SGTzero (MOVWconst [d])) && int32(d) <= 0 -> (MOVWconst [0])
   696  (SGTUzero (MOVWconst [d])) && uint32(d) != 0 -> (MOVWconst [1])
   697  (SGTUzero (MOVWconst [d])) && uint32(d) == 0 -> (MOVWconst [0])
   698  
   699  // other known comparisons
   700  (SGTconst [c] (MOVBreg _)) && 0x7f < int32(c) -> (MOVWconst [1])
   701  (SGTconst [c] (MOVBreg _)) && int32(c) <= -0x80 -> (MOVWconst [0])
   702  (SGTconst [c] (MOVBUreg _)) && 0xff < int32(c) -> (MOVWconst [1])
   703  (SGTconst [c] (MOVBUreg _)) && int32(c) < 0 -> (MOVWconst [0])
   704  (SGTUconst [c] (MOVBUreg _)) && 0xff < uint32(c) -> (MOVWconst [1])
   705  (SGTconst [c] (MOVHreg _)) && 0x7fff < int32(c) -> (MOVWconst [1])
   706  (SGTconst [c] (MOVHreg _)) && int32(c) <= -0x8000 -> (MOVWconst [0])
   707  (SGTconst [c] (MOVHUreg _)) && 0xffff < int32(c) -> (MOVWconst [1])
   708  (SGTconst [c] (MOVHUreg _)) && int32(c) < 0 -> (MOVWconst [0])
   709  (SGTUconst [c] (MOVHUreg _)) && 0xffff < uint32(c) -> (MOVWconst [1])
   710  (SGTconst [c] (ANDconst [m] _)) && 0 <= int32(m) && int32(m) < int32(c) -> (MOVWconst [1])
   711  (SGTUconst [c] (ANDconst [m] _)) && uint32(m) < uint32(c) -> (MOVWconst [1])
   712  (SGTconst [c] (SRLconst _ [d])) && 0 <= int32(c) && uint32(d) <= 31 && 1<<(32-uint32(d)) <= int32(c) -> (MOVWconst [1])
   713  (SGTUconst [c] (SRLconst _ [d])) && uint32(d) <= 31 && 1<<(32-uint32(d)) <= uint32(c) -> (MOVWconst [1])
   714  
   715  // absorb constants into branches
   716  (EQ  (MOVWconst [0]) yes no) -> (First nil yes no)
   717  (EQ  (MOVWconst [c]) yes no) && c != 0 -> (First nil no yes)
   718  (NE  (MOVWconst [0]) yes no) -> (First nil no yes)
   719  (NE  (MOVWconst [c]) yes no) && c != 0 -> (First nil yes no)
   720  (LTZ (MOVWconst [c]) yes no) && int32(c) <  0 -> (First nil yes no)
   721  (LTZ (MOVWconst [c]) yes no) && int32(c) >= 0 -> (First nil no yes)
   722  (LEZ (MOVWconst [c]) yes no) && int32(c) <= 0 -> (First nil yes no)
   723  (LEZ (MOVWconst [c]) yes no) && int32(c) >  0 -> (First nil no yes)
   724  (GTZ (MOVWconst [c]) yes no) && int32(c) >  0 -> (First nil yes no)
   725  (GTZ (MOVWconst [c]) yes no) && int32(c) <= 0 -> (First nil no yes)
   726  (GEZ (MOVWconst [c]) yes no) && int32(c) >= 0 -> (First nil yes no)
   727  (GEZ (MOVWconst [c]) yes no) && int32(c) <  0 -> (First nil no yes)
   728  
   729  // conditional move
   730  (CMOVZ _ b (MOVWconst [0])) -> b
   731  (CMOVZ a _ (MOVWconst [c])) && c!=0-> a
   732  (CMOVZzero _ (MOVWconst [0])) -> (MOVWconst [0])
   733  (CMOVZzero a (MOVWconst [c])) && c!=0-> a
   734  (CMOVZ a (MOVWconst [0]) c) -> (CMOVZzero a c)
   735  
   736  // atomic
   737  (LoweredAtomicStore ptr (MOVWconst [0]) mem) -> (LoweredAtomicStorezero ptr mem)
   738  (LoweredAtomicAdd ptr (MOVWconst [c]) mem) && is16Bit(c)-> (LoweredAtomicAddconst [c] ptr mem)
   739