github.com/sanprasirt/go@v0.0.0-20170607001320-a027466e4b6d/src/cmd/compile/internal/ssa/gen/MIPS.rules (about)

     1  // Copyright 2016 The Go Authors. All rights reserved.
     2  // Use of this source code is governed by a BSD-style
     3  // license that can be found in the LICENSE file.
     4  
     5  (AddPtr x y) -> (ADD x y)
     6  (Add32 x y) -> (ADD x y)
     7  (Add16 x y) -> (ADD x y)
     8  (Add8 x y) -> (ADD x y)
     9  (Add32F x y) -> (ADDF x y)
    10  (Add64F x y) -> (ADDD x y)
    11  
    12  (Select0 (Add32carry <t> x y)) -> (ADD <t.FieldType(0)> x y)
    13  (Select1 (Add32carry <t> x y)) -> (SGTU <typ.Bool> x (ADD <t.FieldType(0)> x y))
    14  (Add32withcarry <t> x y c) -> (ADD c (ADD <t> x y))
    15  
    16  (SubPtr x y) -> (SUB x y)
    17  (Sub32 x y) -> (SUB x y)
    18  (Sub16 x y) -> (SUB x y)
    19  (Sub8 x y) -> (SUB x y)
    20  (Sub32F x y) -> (SUBF x y)
    21  (Sub64F x y) -> (SUBD x y)
    22  
    23  (Select0 (Sub32carry <t> x y)) -> (SUB <t.FieldType(0)> x y)
    24  (Select1 (Sub32carry <t> x y)) -> (SGTU <typ.Bool> (SUB <t.FieldType(0)> x y) x)
    25  (Sub32withcarry <t> x y c) -> (SUB (SUB <t> x y) c)
    26  
    27  (Mul32 x y) -> (MUL x y)
    28  (Mul16 x y) -> (MUL x y)
    29  (Mul8 x y) -> (MUL x y)
    30  (Mul32F x y) -> (MULF x y)
    31  (Mul64F x y) -> (MULD x y)
    32  
    33  (Hmul32 x y) -> (Select0 (MULT x y))
    34  (Hmul32u x y) -> (Select0 (MULTU x y))
    35  
    36  (Mul32uhilo x y) -> (MULTU x y)
    37  
    38  (Div32 x y) -> (Select1 (DIV x y))
    39  (Div32u x y) -> (Select1 (DIVU x y))
    40  (Div16 x y) -> (Select1 (DIV (SignExt16to32 x) (SignExt16to32 y)))
    41  (Div16u x y) -> (Select1 (DIVU (ZeroExt16to32 x) (ZeroExt16to32 y)))
    42  (Div8 x y) -> (Select1 (DIV (SignExt8to32 x) (SignExt8to32 y)))
    43  (Div8u x y) -> (Select1 (DIVU (ZeroExt8to32 x) (ZeroExt8to32 y)))
    44  (Div32F x y) -> (DIVF x y)
    45  (Div64F x y) -> (DIVD x y)
    46  
    47  (Mod32 x y) -> (Select0 (DIV x y))
    48  (Mod32u x y) -> (Select0 (DIVU x y))
    49  (Mod16 x y) -> (Select0 (DIV (SignExt16to32 x) (SignExt16to32 y)))
    50  (Mod16u x y) -> (Select0 (DIVU (ZeroExt16to32 x) (ZeroExt16to32 y)))
    51  (Mod8 x y) -> (Select0 (DIV (SignExt8to32 x) (SignExt8to32 y)))
    52  (Mod8u x y) -> (Select0 (DIVU (ZeroExt8to32 x) (ZeroExt8to32 y)))
    53  
    54  // (x + y) / 2 with x>=y -> (x - y) / 2 + y
    55  (Avg32u <t> x y) -> (ADD (SRLconst <t> (SUB <t> x y) [1]) y)
    56  
    57  (And32 x y) -> (AND x y)
    58  (And16 x y) -> (AND x y)
    59  (And8 x y) -> (AND x y)
    60  
    61  (Or32 x y) -> (OR x y)
    62  (Or16 x y) -> (OR x y)
    63  (Or8 x y) -> (OR x y)
    64  
    65  (Xor32 x y) -> (XOR x y)
    66  (Xor16 x y) -> (XOR x y)
    67  (Xor8 x y) -> (XOR x y)
    68  
    69  // constant shifts
    70  // generic opt rewrites all constant shifts to shift by Const64
    71  (Lsh32x64 x (Const64 [c])) && uint32(c) < 32 -> (SLLconst x [c])
    72  (Rsh32x64 x (Const64 [c])) && uint32(c) < 32 -> (SRAconst x [c])
    73  (Rsh32Ux64 x (Const64 [c])) && uint32(c) < 32 -> (SRLconst x [c])
    74  (Lsh16x64 x (Const64 [c])) && uint32(c) < 16 -> (SLLconst x [c])
    75  (Rsh16x64 x (Const64 [c])) && uint32(c) < 16 -> (SRAconst (SLLconst <typ.UInt32> x [16]) [c+16])
    76  (Rsh16Ux64 x (Const64 [c])) && uint32(c) < 16 -> (SRLconst (SLLconst <typ.UInt32> x [16]) [c+16])
    77  (Lsh8x64 x (Const64 [c])) && uint32(c) < 8 -> (SLLconst x [c])
    78  (Rsh8x64 x (Const64 [c])) && uint32(c) < 8 -> (SRAconst (SLLconst <typ.UInt32> x [24]) [c+24])
    79  (Rsh8Ux64 x (Const64 [c])) && uint32(c) < 8 -> (SRLconst (SLLconst <typ.UInt32> x [24]) [c+24])
    80  
    81  // large constant shifts
    82  (Lsh32x64 _ (Const64 [c])) && uint32(c) >= 32 -> (MOVWconst [0])
    83  (Rsh32Ux64 _ (Const64 [c])) && uint32(c) >= 32 -> (MOVWconst [0])
    84  (Lsh16x64 _ (Const64 [c])) && uint32(c) >= 16 -> (MOVWconst [0])
    85  (Rsh16Ux64 _ (Const64 [c])) && uint32(c) >= 16 -> (MOVWconst [0])
    86  (Lsh8x64 _ (Const64 [c])) && uint32(c) >= 8 -> (MOVWconst [0])
    87  (Rsh8Ux64 _ (Const64 [c])) && uint32(c) >= 8 -> (MOVWconst [0])
    88  
    89  // large constant signed right shift, we leave the sign bit
    90  (Rsh32x64 x (Const64 [c])) && uint32(c) >= 32 -> (SRAconst x [31])
    91  (Rsh16x64 x (Const64 [c])) && uint32(c) >= 16 -> (SRAconst (SLLconst <typ.UInt32> x [16]) [31])
    92  (Rsh8x64 x (Const64 [c])) && uint32(c) >= 8 -> (SRAconst (SLLconst <typ.UInt32> x [24]) [31])
    93  
    94  // shifts
    95  // hardware instruction uses only the low 5 bits of the shift
    96  // we compare to 32 to ensure Go semantics for large shifts
    97  (Lsh32x32 <t> x y) -> (CMOVZ (SLL <t> x y) (MOVWconst [0]) (SGTUconst [32] y))
    98  (Lsh32x16 <t> x y) -> (CMOVZ (SLL <t> x (ZeroExt16to32 y) ) (MOVWconst [0]) (SGTUconst [32] (ZeroExt16to32 y)))
    99  (Lsh32x8 <t> x y) -> (CMOVZ (SLL <t> x (ZeroExt8to32 y) ) (MOVWconst [0]) (SGTUconst [32] (ZeroExt8to32 y)))
   100  
   101  (Lsh16x32 <t> x y) -> (CMOVZ (SLL <t> x y) (MOVWconst [0]) (SGTUconst [32] y))
   102  (Lsh16x16 <t> x y) -> (CMOVZ (SLL <t> x (ZeroExt16to32 y) ) (MOVWconst [0]) (SGTUconst [32] (ZeroExt16to32 y)))
   103  (Lsh16x8 <t> x y) -> (CMOVZ (SLL <t> x (ZeroExt8to32 y) ) (MOVWconst [0]) (SGTUconst [32] (ZeroExt8to32 y)))
   104  
   105  (Lsh8x32 <t> x y) -> (CMOVZ (SLL <t> x y) (MOVWconst [0]) (SGTUconst [32] y))
   106  (Lsh8x16 <t> x y) -> (CMOVZ (SLL <t> x (ZeroExt16to32 y) ) (MOVWconst [0]) (SGTUconst [32] (ZeroExt16to32 y)))
   107  (Lsh8x8 <t> x y) -> (CMOVZ (SLL <t> x (ZeroExt8to32 y) ) (MOVWconst [0]) (SGTUconst [32] (ZeroExt8to32 y)))
   108  
   109  (Rsh32Ux32 <t> x y) -> (CMOVZ (SRL <t> x y) (MOVWconst [0]) (SGTUconst [32] y))
   110  (Rsh32Ux16 <t> x y) -> (CMOVZ (SRL <t> x (ZeroExt16to32 y) ) (MOVWconst [0]) (SGTUconst [32] (ZeroExt16to32 y)))
   111  (Rsh32Ux8 <t> x y) -> (CMOVZ (SRL <t> x (ZeroExt8to32 y) ) (MOVWconst [0]) (SGTUconst [32] (ZeroExt8to32 y)))
   112  
   113  (Rsh16Ux32 <t> x y) -> (CMOVZ (SRL <t> (ZeroExt16to32 x) y) (MOVWconst [0]) (SGTUconst [32] y))
   114  (Rsh16Ux16 <t> x y) -> (CMOVZ (SRL <t> (ZeroExt16to32 x) (ZeroExt16to32 y) ) (MOVWconst [0]) (SGTUconst [32] (ZeroExt16to32 y)))
   115  (Rsh16Ux8 <t> x y) -> (CMOVZ (SRL <t> (ZeroExt16to32 x) (ZeroExt8to32 y) ) (MOVWconst [0]) (SGTUconst [32] (ZeroExt8to32 y)))
   116  
   117  (Rsh8Ux32 <t> x y) -> (CMOVZ (SRL <t> (ZeroExt8to32 x) y) (MOVWconst [0]) (SGTUconst [32] y))
   118  (Rsh8Ux16 <t> x y) -> (CMOVZ (SRL <t> (ZeroExt8to32 x) (ZeroExt16to32 y) ) (MOVWconst [0]) (SGTUconst [32] (ZeroExt16to32 y)))
   119  (Rsh8Ux8 <t> x y) -> (CMOVZ (SRL <t> (ZeroExt8to32 x) (ZeroExt8to32 y) ) (MOVWconst [0]) (SGTUconst [32] (ZeroExt8to32 y)))
   120  
   121  (Rsh32x32 x y) -> (SRA x ( CMOVZ <typ.UInt32> y (MOVWconst [-1]) (SGTUconst [32] y)))
   122  (Rsh32x16 x y) -> (SRA x ( CMOVZ <typ.UInt32> (ZeroExt16to32 y) (MOVWconst [-1]) (SGTUconst [32] (ZeroExt16to32 y))))
   123  (Rsh32x8 x y) -> (SRA x ( CMOVZ <typ.UInt32> (ZeroExt8to32 y) (MOVWconst [-1]) (SGTUconst [32] (ZeroExt8to32 y))))
   124  
   125  (Rsh16x32 x y) -> (SRA (SignExt16to32 x) ( CMOVZ <typ.UInt32> y (MOVWconst [-1]) (SGTUconst [32] y)))
   126  (Rsh16x16 x y) -> (SRA (SignExt16to32 x) ( CMOVZ <typ.UInt32> (ZeroExt16to32 y) (MOVWconst [-1]) (SGTUconst [32] (ZeroExt16to32 y))))
   127  (Rsh16x8 x y) -> (SRA (SignExt16to32 x) ( CMOVZ <typ.UInt32> (ZeroExt8to32 y) (MOVWconst [-1]) (SGTUconst [32] (ZeroExt8to32 y))))
   128  
   129  (Rsh8x32 x y) -> (SRA (SignExt16to32 x) ( CMOVZ <typ.UInt32> y (MOVWconst [-1]) (SGTUconst [32] y)))
   130  (Rsh8x16 x y) -> (SRA (SignExt16to32 x) ( CMOVZ <typ.UInt32> (ZeroExt16to32 y) (MOVWconst [-1]) (SGTUconst [32] (ZeroExt16to32 y))))
   131  (Rsh8x8 x y) -> (SRA (SignExt16to32 x) ( CMOVZ <typ.UInt32> (ZeroExt8to32 y) (MOVWconst [-1]) (SGTUconst [32] (ZeroExt8to32 y))))
   132  
   133  // unary ops
   134  (Neg32 x) -> (NEG x)
   135  (Neg16 x) -> (NEG x)
   136  (Neg8 x) -> (NEG x)
   137  (Neg32F x) -> (NEGF x)
   138  (Neg64F x) -> (NEGD x)
   139  
   140  (Com32 x) -> (NORconst [0] x)
   141  (Com16 x) -> (NORconst [0] x)
   142  (Com8 x) -> (NORconst [0] x)
   143  
   144  (Sqrt x) -> (SQRTD x)
   145  
   146  // count trailing zero
   147  // 32 - CLZ(x&-x - 1)
   148  (Ctz32 <t> x) -> (SUB (MOVWconst [32]) (CLZ <t> (SUBconst <t> [1] (AND <t> x (NEG <t> x)))))
   149  
   150  // bit length
   151  (BitLen32 <t> x) -> (SUB (MOVWconst [32]) (CLZ <t> x))
   152  
   153  // boolean ops -- booleans are represented with 0=false, 1=true
   154  (AndB x y) -> (AND x y)
   155  (OrB x y) -> (OR x y)
   156  (EqB x y) -> (XORconst [1] (XOR <typ.Bool> x y))
   157  (NeqB x y) -> (XOR x y)
   158  (Not x) -> (XORconst [1] x)
   159  
   160  // constants
   161  (Const32 [val]) -> (MOVWconst [val])
   162  (Const16 [val]) -> (MOVWconst [val])
   163  (Const8 [val]) -> (MOVWconst [val])
   164  (Const32F [val]) -> (MOVFconst [val])
   165  (Const64F [val]) -> (MOVDconst [val])
   166  (ConstNil) -> (MOVWconst [0])
   167  (ConstBool [b]) -> (MOVWconst [b])
   168  
   169  // truncations
   170  // Because we ignore high parts of registers, truncates are just copies.
   171  (Trunc16to8 x) -> x
   172  (Trunc32to8 x) -> x
   173  (Trunc32to16 x) -> x
   174  
   175  // Zero-/Sign-extensions
   176  (ZeroExt8to16 x) -> (MOVBUreg x)
   177  (ZeroExt8to32 x) -> (MOVBUreg x)
   178  (ZeroExt16to32 x) -> (MOVHUreg x)
   179  
   180  (SignExt8to16 x) -> (MOVBreg x)
   181  (SignExt8to32 x) -> (MOVBreg x)
   182  (SignExt16to32 x) -> (MOVHreg x)
   183  
   184  (Signmask x) -> (SRAconst x [31])
   185  (Zeromask x) -> (NEG (SGTU x (MOVWconst [0])))
   186  (Slicemask <t> x) -> (SRAconst (NEG <t> x) [31])
   187  
   188  // float <-> int conversion
   189  (Cvt32to32F x) -> (MOVWF x)
   190  (Cvt32to64F x) -> (MOVWD x)
   191  (Cvt32Fto32 x) -> (TRUNCFW x)
   192  (Cvt64Fto32 x) -> (TRUNCDW x)
   193  (Cvt32Fto64F x) -> (MOVFD x)
   194  (Cvt64Fto32F x) -> (MOVDF x)
   195  
   196  (Round32F x) -> x
   197  (Round64F x) -> x
   198  
   199  // comparisons
   200  (Eq8 x y)  -> (SGTUconst [1] (XOR (ZeroExt8to32 x) (ZeroExt8to32 y)))
   201  (Eq16 x y) -> (SGTUconst [1] (XOR (ZeroExt16to32 x) (ZeroExt16to32 y)))
   202  (Eq32 x y) -> (SGTUconst [1] (XOR x y))
   203  (EqPtr x y) -> (SGTUconst [1] (XOR x y))
   204  (Eq32F x y) -> (FPFlagTrue (CMPEQF x y))
   205  (Eq64F x y) -> (FPFlagTrue (CMPEQD x y))
   206  
   207  (Neq8 x y)  -> (SGTU (XOR (ZeroExt8to32 x) (ZeroExt8to32 y)) (MOVWconst [0]))
   208  (Neq16 x y) -> (SGTU (XOR (ZeroExt16to32 x) (ZeroExt16to32 y)) (MOVWconst [0]))
   209  (Neq32 x y) -> (SGTU (XOR x y) (MOVWconst [0]))
   210  (NeqPtr x y) -> (SGTU (XOR x y) (MOVWconst [0]))
   211  (Neq32F x y) -> (FPFlagFalse (CMPEQF x y))
   212  (Neq64F x y) -> (FPFlagFalse (CMPEQD x y))
   213  
   214  (Less8 x y)  -> (SGT (SignExt8to32 y) (SignExt8to32 x))
   215  (Less16 x y) -> (SGT (SignExt16to32 y) (SignExt16to32 x))
   216  (Less32 x y) -> (SGT y x)
   217  (Less32F x y) -> (FPFlagTrue (CMPGTF y x)) // reverse operands to work around NaN
   218  (Less64F x y) -> (FPFlagTrue (CMPGTD y x)) // reverse operands to work around NaN
   219  
   220  (Less8U x y)  -> (SGTU (ZeroExt8to32 y) (ZeroExt8to32 x))
   221  (Less16U x y) -> (SGTU (ZeroExt16to32 y) (ZeroExt16to32 x))
   222  (Less32U x y) -> (SGTU y x)
   223  
   224  (Leq8 x y)  -> (XORconst [1] (SGT (SignExt8to32 x) (SignExt8to32 y)))
   225  (Leq16 x y) -> (XORconst [1] (SGT (SignExt16to32 x) (SignExt16to32 y)))
   226  (Leq32 x y) -> (XORconst [1] (SGT x y))
   227  (Leq32F x y) -> (FPFlagTrue (CMPGEF y x)) // reverse operands to work around NaN
   228  (Leq64F x y) -> (FPFlagTrue (CMPGED y x)) // reverse operands to work around NaN
   229  
   230  (Leq8U x y)  -> (XORconst [1] (SGTU (ZeroExt8to32 x) (ZeroExt8to32 y)))
   231  (Leq16U x y) -> (XORconst [1] (SGTU (ZeroExt16to32 x) (ZeroExt16to32 y)))
   232  (Leq32U x y) -> (XORconst [1] (SGTU x y))
   233  
   234  (Greater8 x y)  -> (SGT (SignExt8to32 x) (SignExt8to32 y))
   235  (Greater16 x y) -> (SGT (SignExt16to32 x) (SignExt16to32 y))
   236  (Greater32 x y) -> (SGT x y)
   237  (Greater32F x y) -> (FPFlagTrue (CMPGTF x y))
   238  (Greater64F x y) -> (FPFlagTrue (CMPGTD x y))
   239  
   240  (Greater8U x y)  -> (SGTU (ZeroExt8to32 x) (ZeroExt8to32 y))
   241  (Greater16U x y) -> (SGTU (ZeroExt16to32 x) (ZeroExt16to32 y))
   242  (Greater32U x y) -> (SGTU x y)
   243  
   244  (Geq8 x y)  -> (XORconst [1] (SGT (SignExt8to32 y) (SignExt8to32 x)))
   245  (Geq16 x y) -> (XORconst [1] (SGT (SignExt16to32 y) (SignExt16to32 x)))
   246  (Geq32 x y) -> (XORconst [1] (SGT y x))
   247  (Geq32F x y) -> (FPFlagTrue (CMPGEF x y))
   248  (Geq64F x y) -> (FPFlagTrue (CMPGED x y))
   249  
   250  (Geq8U x y)  -> (XORconst [1] (SGTU (ZeroExt8to32 y) (ZeroExt8to32 x)))
   251  (Geq16U x y) -> (XORconst [1] (SGTU (ZeroExt16to32 y) (ZeroExt16to32 x)))
   252  (Geq32U x y) -> (XORconst [1] (SGTU y x))
   253  
   254  (OffPtr [off] ptr:(SP)) -> (MOVWaddr [off] ptr)
   255  (OffPtr [off] ptr) -> (ADDconst [off] ptr)
   256  
   257  (Addr {sym} base) -> (MOVWaddr {sym} base)
   258  
   259  // loads
   260  (Load <t> ptr mem) && t.IsBoolean() -> (MOVBUload ptr mem)
   261  (Load <t> ptr mem) && (is8BitInt(t) && isSigned(t)) -> (MOVBload ptr mem)
   262  (Load <t> ptr mem) && (is8BitInt(t) && !isSigned(t)) -> (MOVBUload ptr mem)
   263  (Load <t> ptr mem) && (is16BitInt(t) && isSigned(t)) -> (MOVHload ptr mem)
   264  (Load <t> ptr mem) && (is16BitInt(t) && !isSigned(t)) -> (MOVHUload ptr mem)
   265  (Load <t> ptr mem) && (is32BitInt(t) || isPtr(t)) -> (MOVWload ptr mem)
   266  (Load <t> ptr mem) && is32BitFloat(t) -> (MOVFload ptr mem)
   267  (Load <t> ptr mem) && is64BitFloat(t) -> (MOVDload ptr mem)
   268  
   269  // stores
   270  (Store {t} ptr val mem) && t.(*types.Type).Size() == 1 -> (MOVBstore ptr val mem)
   271  (Store {t} ptr val mem) && t.(*types.Type).Size() == 2 -> (MOVHstore ptr val mem)
   272  (Store {t} ptr val mem) && t.(*types.Type).Size() == 4 && !is32BitFloat(val.Type) -> (MOVWstore ptr val mem)
   273  (Store {t} ptr val mem) && t.(*types.Type).Size() == 4 && is32BitFloat(val.Type) -> (MOVFstore ptr val mem)
   274  (Store {t} ptr val mem) && t.(*types.Type).Size() == 8 && is64BitFloat(val.Type) -> (MOVDstore ptr val mem)
   275  
   276  // zero instructions
   277  (Zero [0] _ mem) -> mem
   278  (Zero [1] ptr mem) -> (MOVBstore ptr (MOVWconst [0]) mem)
   279  (Zero [2] {t} ptr mem) && t.(*types.Type).Alignment()%2 == 0 ->
   280  	(MOVHstore ptr (MOVWconst [0]) mem)
   281  (Zero [2] ptr mem) ->
   282  	(MOVBstore [1] ptr (MOVWconst [0])
   283  		(MOVBstore [0] ptr (MOVWconst [0]) mem))
   284  (Zero [4] {t} ptr mem) && t.(*types.Type).Alignment()%4 == 0 ->
   285  	(MOVWstore ptr (MOVWconst [0]) mem)
   286  (Zero [4] {t} ptr mem) && t.(*types.Type).Alignment()%2 == 0 ->
   287  	(MOVHstore [2] ptr (MOVWconst [0])
   288  		(MOVHstore [0] ptr (MOVWconst [0]) mem))
   289  (Zero [4] ptr mem) ->
   290  	(MOVBstore [3] ptr (MOVWconst [0])
   291  		(MOVBstore [2] ptr (MOVWconst [0])
   292  			(MOVBstore [1] ptr (MOVWconst [0])
   293  				(MOVBstore [0] ptr (MOVWconst [0]) mem))))
   294  (Zero [3] ptr mem) ->
   295  	(MOVBstore [2] ptr (MOVWconst [0])
   296  		(MOVBstore [1] ptr (MOVWconst [0])
   297  			(MOVBstore [0] ptr (MOVWconst [0]) mem)))
   298  (Zero [6] {t} ptr mem) && t.(*types.Type).Alignment()%2 == 0 ->
   299  	(MOVHstore [4] ptr (MOVWconst [0])
   300  		(MOVHstore [2] ptr (MOVWconst [0])
   301  			(MOVHstore [0] ptr (MOVWconst [0]) mem)))
   302  (Zero [8] {t} ptr mem) && t.(*types.Type).Alignment()%4 == 0 ->
   303  		(MOVWstore [4] ptr (MOVWconst [0])
   304  			(MOVWstore [0] ptr (MOVWconst [0]) mem))
   305  (Zero [12] {t} ptr mem) && t.(*types.Type).Alignment()%4 == 0 ->
   306  	(MOVWstore [8] ptr (MOVWconst [0])
   307  		(MOVWstore [4] ptr (MOVWconst [0])
   308  			(MOVWstore [0] ptr (MOVWconst [0]) mem)))
   309  (Zero [16] {t} ptr mem) && t.(*types.Type).Alignment()%4 == 0 ->
   310  	(MOVWstore [12] ptr (MOVWconst [0])
   311  		(MOVWstore [8] ptr (MOVWconst [0])
   312  			(MOVWstore [4] ptr (MOVWconst [0])
   313  				(MOVWstore [0] ptr (MOVWconst [0]) mem))))
   314  
   315  // large or unaligned zeroing uses a loop
   316  (Zero [s] {t} ptr mem)
   317  	&& (s > 16  || t.(*types.Type).Alignment()%4 != 0) ->
   318  	(LoweredZero [t.(*types.Type).Alignment()]
   319  		ptr
   320  		(ADDconst <ptr.Type> ptr [s-moveSize(t.(*types.Type).Alignment(), config)])
   321  		mem)
   322  
   323  // moves
   324  (Move [0] _ _ mem) -> mem
   325  (Move [1] dst src mem) -> (MOVBstore dst (MOVBUload src mem) mem)
   326  (Move [2] {t} dst src mem) && t.(*types.Type).Alignment()%2 == 0 ->
   327  	(MOVHstore dst (MOVHUload src mem) mem)
   328  (Move [2] dst src mem) ->
   329  	(MOVBstore [1] dst (MOVBUload [1] src mem)
   330  		(MOVBstore dst (MOVBUload src mem) mem))
   331  (Move [4] {t} dst src mem) && t.(*types.Type).Alignment()%4 == 0 ->
   332  	(MOVWstore dst (MOVWload src mem) mem)
   333  (Move [4] {t} dst src mem) && t.(*types.Type).Alignment()%2 == 0 ->
   334  	(MOVHstore [2] dst (MOVHUload [2] src mem)
   335  		(MOVHstore dst (MOVHUload src mem) mem))
   336  (Move [4] dst src mem) ->
   337  	(MOVBstore [3] dst (MOVBUload [3] src mem)
   338  		(MOVBstore [2] dst (MOVBUload [2] src mem)
   339  			(MOVBstore [1] dst (MOVBUload [1] src mem)
   340  				(MOVBstore dst (MOVBUload src mem) mem))))
   341  (Move [3] dst src mem) ->
   342  	(MOVBstore [2] dst (MOVBUload [2] src mem)
   343  		(MOVBstore [1] dst (MOVBUload [1] src mem)
   344  			(MOVBstore dst (MOVBUload src mem) mem)))
   345  (Move [8] {t} dst src mem) && t.(*types.Type).Alignment()%4 == 0 ->
   346  	(MOVWstore [4] dst (MOVWload [4] src mem)
   347  		(MOVWstore dst (MOVWload src mem) mem))
   348  (Move [8] {t} dst src mem) && t.(*types.Type).Alignment()%2 == 0 ->
   349  	(MOVHstore [6] dst (MOVHload [6] src mem)
   350  		(MOVHstore [4] dst (MOVHload [4] src mem)
   351  			(MOVHstore [2] dst (MOVHload [2] src mem)
   352  				(MOVHstore dst (MOVHload src mem) mem))))
   353  (Move [6] {t} dst src mem) && t.(*types.Type).Alignment()%2 == 0 ->
   354  	(MOVHstore [4] dst (MOVHload [4] src mem)
   355  		(MOVHstore [2] dst (MOVHload [2] src mem)
   356  			(MOVHstore dst (MOVHload src mem) mem)))
   357  (Move [12] {t} dst src mem) && t.(*types.Type).Alignment()%4 == 0 ->
   358  	(MOVWstore [8] dst (MOVWload [8] src mem)
   359  		(MOVWstore [4] dst (MOVWload [4] src mem)
   360  			(MOVWstore dst (MOVWload src mem) mem)))
   361  (Move [16] {t} dst src mem) && t.(*types.Type).Alignment()%4 == 0 ->
   362  	(MOVWstore [12] dst (MOVWload [12] src mem)
   363  		(MOVWstore [8] dst (MOVWload [8] src mem)
   364  			(MOVWstore [4] dst (MOVWload [4] src mem)
   365  				(MOVWstore dst (MOVWload src mem) mem))))
   366  
   367  
   368  // large or unaligned move uses a loop
   369  (Move [s] {t} dst src mem)
   370  	&& (s > 16 || t.(*types.Type).Alignment()%4 != 0) ->
   371  	(LoweredMove [t.(*types.Type).Alignment()]
   372  		dst
   373  		src
   374  		(ADDconst <src.Type> src [s-moveSize(t.(*types.Type).Alignment(), config)])
   375  		mem)
   376  
   377  // calls
   378  (StaticCall [argwid] {target} mem) -> (CALLstatic [argwid] {target} mem)
   379  (ClosureCall [argwid] entry closure mem) -> (CALLclosure [argwid] entry closure mem)
   380  (InterCall [argwid] entry mem) -> (CALLinter [argwid] entry mem)
   381  
   382  // atomic intrinsics
   383  (AtomicLoad32  ptr mem) -> (LoweredAtomicLoad ptr mem)
   384  (AtomicLoadPtr ptr mem) -> (LoweredAtomicLoad  ptr mem)
   385  
   386  (AtomicStore32      ptr val mem) -> (LoweredAtomicStore ptr val mem)
   387  (AtomicStorePtrNoWB ptr val mem) -> (LoweredAtomicStore  ptr val mem)
   388  
   389  (AtomicExchange32 ptr val mem) -> (LoweredAtomicExchange ptr val mem)
   390  (AtomicAdd32 ptr val mem) -> (LoweredAtomicAdd ptr val mem)
   391  
   392  (AtomicCompareAndSwap32 ptr old new_ mem) -> (LoweredAtomicCas ptr old new_ mem)
   393  
   394  // AtomicOr8(ptr,val) -> LoweredAtomicOr(ptr&^3,uint32(val) << ((ptr & 3) * 8))
   395  (AtomicOr8 ptr val mem) && !config.BigEndian ->
   396  	(LoweredAtomicOr (AND <typ.UInt32Ptr> (MOVWconst [^3]) ptr)
   397  		(SLL <typ.UInt32> (ZeroExt8to32 val)
   398  			(SLLconst <typ.UInt32> [3]
   399  				(ANDconst <typ.UInt32> [3] ptr))) mem)
   400  
   401  // AtomicAnd8(ptr,val) -> LoweredAtomicAnd(ptr&^3,(uint32(val) << ((ptr & 3) * 8)) | ^(uint32(0xFF) << ((ptr & 3) * 8))))
   402  (AtomicAnd8  ptr val mem) && !config.BigEndian ->
   403  	(LoweredAtomicAnd (AND <typ.UInt32Ptr> (MOVWconst [^3]) ptr)
   404  		(OR <typ.UInt32> (SLL <typ.UInt32> (ZeroExt8to32 val)
   405  			(SLLconst <typ.UInt32> [3]
   406  				(ANDconst  <typ.UInt32> [3] ptr)))
   407  		(NORconst [0] <typ.UInt32> (SLL <typ.UInt32>
   408  			(MOVWconst [0xff]) (SLLconst <typ.UInt32> [3]
   409  				(ANDconst <typ.UInt32> [3] ptr))))) mem)
   410  
   411  // AtomicOr8(ptr,val) -> LoweredAtomicOr(ptr&^3,uint32(val) << (((ptr^3) & 3) * 8))
   412  (AtomicOr8 ptr val mem) && config.BigEndian ->
   413  	(LoweredAtomicOr (AND <typ.UInt32Ptr> (MOVWconst [^3]) ptr)
   414  		(SLL <typ.UInt32> (ZeroExt8to32 val)
   415  			(SLLconst <typ.UInt32> [3]
   416  				(ANDconst <typ.UInt32> [3]
   417  					(XORconst <typ.UInt32> [3] ptr)))) mem)
   418  
   419  // AtomicAnd8(ptr,val) -> LoweredAtomicAnd(ptr&^3,(uint32(val) << (((ptr^3) & 3) * 8)) | ^(uint32(0xFF) << (((ptr^3) & 3) * 8))))
   420  (AtomicAnd8  ptr val mem) && config.BigEndian ->
   421  	(LoweredAtomicAnd (AND <typ.UInt32Ptr> (MOVWconst [^3]) ptr)
   422  		(OR <typ.UInt32> (SLL <typ.UInt32> (ZeroExt8to32 val)
   423  			(SLLconst <typ.UInt32> [3]
   424  				(ANDconst  <typ.UInt32> [3]
   425  					(XORconst <typ.UInt32> [3] ptr))))
   426  		(NORconst [0] <typ.UInt32> (SLL <typ.UInt32>
   427  			(MOVWconst [0xff]) (SLLconst <typ.UInt32> [3]
   428  				(ANDconst <typ.UInt32> [3]
   429  					(XORconst <typ.UInt32> [3] ptr)))))) mem)
   430  
   431  
   432  // checks
   433  (NilCheck ptr mem) -> (LoweredNilCheck ptr mem)
   434  (IsNonNil ptr) -> (SGTU ptr (MOVWconst [0]))
   435  (IsInBounds idx len) -> (SGTU len idx)
   436  (IsSliceInBounds idx len) -> (XORconst [1] (SGTU idx len))
   437  
   438  // pseudo-ops
   439  (GetClosurePtr) -> (LoweredGetClosurePtr)
   440  (Convert x mem) -> (MOVWconvert x mem)
   441  
   442  (If cond yes no) -> (NE cond yes no)
   443  
   444  
   445  // Optimizations
   446  
   447  // Absorb boolean tests into block
   448  (NE (FPFlagTrue cmp) yes no) -> (FPT cmp yes no)
   449  (NE (FPFlagFalse cmp) yes no) -> (FPF cmp yes no)
   450  (EQ (FPFlagTrue cmp) yes no) -> (FPF cmp yes no)
   451  (EQ (FPFlagFalse cmp) yes no) -> (FPT cmp yes no)
   452  (NE (XORconst [1] cmp:(SGT _ _)) yes no) -> (EQ cmp yes no)
   453  (NE (XORconst [1] cmp:(SGTU _ _)) yes no) -> (EQ cmp yes no)
   454  (NE (XORconst [1] cmp:(SGTconst _)) yes no) -> (EQ cmp yes no)
   455  (NE (XORconst [1] cmp:(SGTUconst _)) yes no) -> (EQ cmp yes no)
   456  (NE (XORconst [1] cmp:(SGTzero _)) yes no) -> (EQ cmp yes no)
   457  (NE (XORconst [1] cmp:(SGTUzero _)) yes no) -> (EQ cmp yes no)
   458  (EQ (XORconst [1] cmp:(SGT _ _)) yes no) -> (NE cmp yes no)
   459  (EQ (XORconst [1] cmp:(SGTU _ _)) yes no) -> (NE cmp yes no)
   460  (EQ (XORconst [1] cmp:(SGTconst _)) yes no) -> (NE cmp yes no)
   461  (EQ (XORconst [1] cmp:(SGTUconst _)) yes no) -> (NE cmp yes no)
   462  (EQ (XORconst [1] cmp:(SGTzero _)) yes no) -> (NE cmp yes no)
   463  (EQ (XORconst [1] cmp:(SGTUzero _)) yes no) -> (NE cmp yes no)
   464  (NE (SGTUconst [1] x) yes no) -> (EQ x yes no)
   465  (EQ (SGTUconst [1] x) yes no) -> (NE x yes no)
   466  (NE (SGTUzero x) yes no) -> (NE x yes no)
   467  (EQ (SGTUzero x) yes no) -> (EQ x yes no)
   468  (NE (SGTconst [0] x) yes no) -> (LTZ x yes no)
   469  (EQ (SGTconst [0] x) yes no) -> (GEZ x yes no)
   470  (NE (SGTzero x) yes no) -> (GTZ x yes no)
   471  (EQ (SGTzero x) yes no) -> (LEZ x yes no)
   472  
   473  // fold offset into address
   474  (ADDconst [off1] (MOVWaddr [off2] {sym} ptr)) -> (MOVWaddr [off1+off2] {sym} ptr)
   475  
   476  // fold address into load/store
   477  (MOVBload  [off1] {sym} x:(ADDconst [off2] ptr) mem) && (is16Bit(off1+off2) || x.Uses == 1) -> (MOVBload  [off1+off2] {sym} ptr mem)
   478  (MOVBUload [off1] {sym} x:(ADDconst [off2] ptr) mem) && (is16Bit(off1+off2) || x.Uses == 1) -> (MOVBUload [off1+off2] {sym} ptr mem)
   479  (MOVHload  [off1] {sym} x:(ADDconst [off2] ptr) mem) && (is16Bit(off1+off2) || x.Uses == 1) -> (MOVHload  [off1+off2] {sym} ptr mem)
   480  (MOVHUload [off1] {sym} x:(ADDconst [off2] ptr) mem) && (is16Bit(off1+off2) || x.Uses == 1) -> (MOVHUload [off1+off2] {sym} ptr mem)
   481  (MOVWload  [off1] {sym} x:(ADDconst [off2] ptr) mem) && (is16Bit(off1+off2) || x.Uses == 1) -> (MOVWload  [off1+off2] {sym} ptr mem)
   482  (MOVFload  [off1] {sym} x:(ADDconst [off2] ptr) mem) && (is16Bit(off1+off2) || x.Uses == 1) -> (MOVFload  [off1+off2] {sym} ptr mem)
   483  (MOVDload  [off1] {sym} x:(ADDconst [off2] ptr) mem) && (is16Bit(off1+off2) || x.Uses == 1) -> (MOVDload  [off1+off2] {sym} ptr mem)
   484  
   485  (MOVBstore [off1] {sym} x:(ADDconst [off2] ptr) val mem) && (is16Bit(off1+off2) || x.Uses == 1) -> (MOVBstore [off1+off2] {sym} ptr val mem)
   486  (MOVHstore [off1] {sym} x:(ADDconst [off2] ptr) val mem) && (is16Bit(off1+off2) || x.Uses == 1) -> (MOVHstore [off1+off2] {sym} ptr val mem)
   487  (MOVWstore [off1] {sym} x:(ADDconst [off2] ptr) val mem) && (is16Bit(off1+off2) || x.Uses == 1) -> (MOVWstore [off1+off2] {sym} ptr val mem)
   488  (MOVFstore [off1] {sym} x:(ADDconst [off2] ptr) val mem) && (is16Bit(off1+off2) || x.Uses == 1) -> (MOVFstore [off1+off2] {sym} ptr val mem)
   489  (MOVDstore [off1] {sym} x:(ADDconst [off2] ptr) val mem) && (is16Bit(off1+off2) || x.Uses == 1) -> (MOVDstore [off1+off2] {sym} ptr val mem)
   490  
   491  (MOVBstorezero [off1] {sym} x:(ADDconst [off2] ptr) mem) && (is16Bit(off1+off2) || x.Uses == 1) -> (MOVBstorezero [off1+off2] {sym} ptr mem)
   492  (MOVHstorezero [off1] {sym} x:(ADDconst [off2] ptr) mem) && (is16Bit(off1+off2) || x.Uses == 1) -> (MOVHstorezero [off1+off2] {sym} ptr mem)
   493  (MOVWstorezero [off1] {sym} x:(ADDconst [off2] ptr) mem) && (is16Bit(off1+off2) || x.Uses == 1) -> (MOVWstorezero [off1+off2] {sym} ptr mem)
   494  
   495  (MOVBload [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2) ->
   496  	(MOVBload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
   497  (MOVBUload [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2) ->
   498  	(MOVBUload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
   499  (MOVHload [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2) ->
   500  	(MOVHload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
   501  (MOVHUload [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2) ->
   502  	(MOVHUload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
   503  (MOVWload [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2) ->
   504  	(MOVWload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
   505  (MOVFload [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2) ->
   506  	(MOVFload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
   507  (MOVDload [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2) ->
   508  	(MOVDload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
   509  
   510  (MOVBstore [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) val mem) && canMergeSym(sym1,sym2) ->
   511  	(MOVBstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem)
   512  (MOVHstore [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) val mem) && canMergeSym(sym1,sym2) ->
   513  	(MOVHstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem)
   514  (MOVWstore [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) val mem) && canMergeSym(sym1,sym2) ->
   515  	(MOVWstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem)
   516  (MOVFstore [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) val mem) && canMergeSym(sym1,sym2) ->
   517  	(MOVFstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem)
   518  (MOVDstore [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) val mem) && canMergeSym(sym1,sym2) ->
   519  	(MOVDstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem)
   520  (MOVBstorezero [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2) ->
   521  	(MOVBstorezero [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
   522  (MOVHstorezero [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2) ->
   523  	(MOVHstorezero [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
   524  (MOVWstorezero [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2) ->
   525  	(MOVWstorezero [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
   526  
   527  // replace load from same location as preceding store with zero/sign extension (or copy in case of full width)
   528  (MOVBload [off] {sym} ptr (MOVBstore [off2] {sym2} ptr2 x _)) && sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) -> (MOVBreg x)
   529  (MOVBUload [off] {sym} ptr (MOVBstore [off2] {sym2} ptr2 x _)) && sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) -> (MOVBUreg x)
   530  (MOVHload [off] {sym} ptr (MOVHstore [off2] {sym2} ptr2 x _)) && sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) -> (MOVHreg x)
   531  (MOVHUload [off] {sym} ptr (MOVHstore [off2] {sym2} ptr2 x _)) && sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) -> (MOVHUreg x)
   532  (MOVWload [off] {sym} ptr (MOVWstore [off2] {sym2} ptr2 x _)) && sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) -> x
   533  (MOVFload [off] {sym} ptr (MOVFstore [off2] {sym2} ptr2 x _)) && sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) -> x
   534  (MOVDload [off] {sym} ptr (MOVDstore [off2] {sym2} ptr2 x _)) && sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) -> x
   535  
   536  // store zero
   537  (MOVBstore [off] {sym} ptr (MOVWconst [0]) mem) -> (MOVBstorezero [off] {sym} ptr mem)
   538  (MOVHstore [off] {sym} ptr (MOVWconst [0]) mem) -> (MOVHstorezero [off] {sym} ptr mem)
   539  (MOVWstore [off] {sym} ptr (MOVWconst [0]) mem) -> (MOVWstorezero [off] {sym} ptr mem)
   540  
   541  // don't extend after proper load
   542  (MOVBreg x:(MOVBload _ _)) -> (MOVWreg x)
   543  (MOVBUreg x:(MOVBUload _ _)) -> (MOVWreg x)
   544  (MOVHreg x:(MOVBload _ _)) -> (MOVWreg x)
   545  (MOVHreg x:(MOVBUload _ _)) -> (MOVWreg x)
   546  (MOVHreg x:(MOVHload _ _)) -> (MOVWreg x)
   547  (MOVHUreg x:(MOVBUload _ _)) -> (MOVWreg x)
   548  (MOVHUreg x:(MOVHUload _ _)) -> (MOVWreg x)
   549  
   550  // fold double extensions
   551  (MOVBreg x:(MOVBreg _)) -> (MOVWreg x)
   552  (MOVBUreg x:(MOVBUreg _)) -> (MOVWreg x)
   553  (MOVHreg x:(MOVBreg _)) -> (MOVWreg x)
   554  (MOVHreg x:(MOVBUreg _)) -> (MOVWreg x)
   555  (MOVHreg x:(MOVHreg _)) -> (MOVWreg x)
   556  (MOVHUreg x:(MOVBUreg _)) -> (MOVWreg x)
   557  (MOVHUreg x:(MOVHUreg _)) -> (MOVWreg x)
   558  
   559  // sign extended loads
   560  // Note: The combined instruction must end up in the same block
   561  // as the original load. If not, we end up making a value with
   562  // memory type live in two different blocks, which can lead to
   563  // multiple memory values alive simultaneously.
   564  // Make sure we don't combine these ops if the load has another use.
   565  // This prevents a single load from being split into multiple loads
   566  // which then might return different values.  See test/atomicload.go.
   567  (MOVBreg <t> x:(MOVBUload [off] {sym} ptr mem)) && x.Uses == 1 && clobber(x) -> @x.Block (MOVBload <t> [off] {sym} ptr mem)
   568  (MOVBUreg <t> x:(MOVBload [off] {sym} ptr mem)) && x.Uses == 1 && clobber(x) -> @x.Block (MOVBUload <t> [off] {sym} ptr mem)
   569  (MOVHreg <t> x:(MOVHUload [off] {sym} ptr mem)) && x.Uses == 1 && clobber(x) -> @x.Block (MOVHload <t> [off] {sym} ptr mem)
   570  (MOVHUreg <t> x:(MOVHload [off] {sym} ptr mem)) && x.Uses == 1 && clobber(x) -> @x.Block (MOVHUload <t> [off] {sym} ptr mem)
   571  
   572  // fold extensions and ANDs together
   573  (MOVBUreg (ANDconst [c] x)) -> (ANDconst [c&0xff] x)
   574  (MOVHUreg (ANDconst [c] x)) -> (ANDconst [c&0xffff] x)
   575  (MOVBreg (ANDconst [c] x)) && c & 0x80 == 0 -> (ANDconst [c&0x7f] x)
   576  (MOVHreg (ANDconst [c] x)) && c & 0x8000 == 0 -> (ANDconst [c&0x7fff] x)
   577  
   578  // don't extend before store
   579  (MOVBstore [off] {sym} ptr (MOVBreg x) mem) -> (MOVBstore [off] {sym} ptr x mem)
   580  (MOVBstore [off] {sym} ptr (MOVBUreg x) mem) -> (MOVBstore [off] {sym} ptr x mem)
   581  (MOVBstore [off] {sym} ptr (MOVHreg x) mem) -> (MOVBstore [off] {sym} ptr x mem)
   582  (MOVBstore [off] {sym} ptr (MOVHUreg x) mem) -> (MOVBstore [off] {sym} ptr x mem)
   583  (MOVBstore [off] {sym} ptr (MOVWreg x) mem) -> (MOVBstore [off] {sym} ptr x mem)
   584  (MOVHstore [off] {sym} ptr (MOVHreg x) mem) -> (MOVHstore [off] {sym} ptr x mem)
   585  (MOVHstore [off] {sym} ptr (MOVHUreg x) mem) -> (MOVHstore [off] {sym} ptr x mem)
   586  (MOVHstore [off] {sym} ptr (MOVWreg x) mem) -> (MOVHstore [off] {sym} ptr x mem)
   587  (MOVWstore [off] {sym} ptr (MOVWreg x) mem) -> (MOVWstore [off] {sym} ptr x mem)
   588  
   589  // if a register move has only 1 use, just use the same register without emitting instruction
   590  // MOVWnop doesn't emit instruction, only for ensuring the type.
   591  (MOVWreg x) && x.Uses == 1 -> (MOVWnop x)
   592  
   593  // fold constant into arithmatic ops
   594  (ADD x (MOVWconst [c])) -> (ADDconst [c] x)
   595  (SUB x (MOVWconst [c])) -> (SUBconst [c] x)
   596  (AND x (MOVWconst [c])) -> (ANDconst [c] x)
   597  (OR  x (MOVWconst [c])) -> (ORconst  [c] x)
   598  (XOR x (MOVWconst [c])) -> (XORconst [c] x)
   599  (NOR x (MOVWconst [c])) -> (NORconst [c] x)
   600  
   601  (SLL _ (MOVWconst [c])) && uint32(c)>=32 -> (MOVWconst [0])
   602  (SRL _ (MOVWconst [c])) && uint32(c)>=32 -> (MOVWconst [0])
   603  (SRA x (MOVWconst [c])) && uint32(c)>=32 -> (SRAconst x [31])
   604  (SLL x (MOVWconst [c])) -> (SLLconst x [c])
   605  (SRL x (MOVWconst [c])) -> (SRLconst x [c])
   606  (SRA x (MOVWconst [c])) -> (SRAconst x [c])
   607  
   608  (SGT  (MOVWconst [c]) x) -> (SGTconst  [c] x)
   609  (SGTU (MOVWconst [c]) x) -> (SGTUconst [c] x)
   610  (SGT x (MOVWconst [0])) -> (SGTzero x)
   611  (SGTU x (MOVWconst [0])) -> (SGTUzero x)
   612  
   613  // mul with constant
   614  (Select1 (MULTU (MOVWconst [0]) _ )) -> (MOVWconst [0])
   615  (Select0 (MULTU (MOVWconst [0]) _ )) -> (MOVWconst [0])
   616  (Select1 (MULTU (MOVWconst [1]) x )) -> x
   617  (Select0 (MULTU (MOVWconst [1]) _ )) -> (MOVWconst [0])
   618  (Select1 (MULTU (MOVWconst [-1]) x )) -> (NEG <x.Type> x)
   619  (Select0 (MULTU (MOVWconst [-1]) x )) -> (CMOVZ (ADDconst <x.Type> [-1] x) (MOVWconst [0]) x)
   620  (Select1 (MULTU (MOVWconst [c]) x )) && isPowerOfTwo(int64(uint32(c))) -> (SLLconst [log2(int64(uint32(c)))] x)
   621  (Select0 (MULTU (MOVWconst [c]) x )) && isPowerOfTwo(int64(uint32(c))) -> (SRLconst [32-log2(int64(uint32(c)))] x)
   622  
   623  (MUL (MOVWconst [0]) _ ) -> (MOVWconst [0])
   624  (MUL (MOVWconst [1]) x ) -> x
   625  (MUL (MOVWconst [-1]) x ) -> (NEG x)
   626  (MUL (MOVWconst [c]) x ) && isPowerOfTwo(int64(uint32(c))) -> (SLLconst [log2(int64(uint32(c)))] x)
   627  
   628  // generic simplifications
   629  (ADD x (NEG y)) -> (SUB x y)
   630  (SUB x x) -> (MOVWconst [0])
   631  (SUB (MOVWconst [0]) x) -> (NEG x)
   632  (AND x x) -> x
   633  (OR  x x) -> x
   634  (XOR x x) -> (MOVWconst [0])
   635  
   636  // miscellaneous patterns generated by dec64
   637  (AND (SGTUconst [1] x) (SGTUconst [1] y)) ->  (SGTUconst [1] (OR <x.Type> x y))
   638  (OR (SGTUzero x) (SGTUzero y)) ->  (SGTUzero (OR <x.Type> x y))
   639  
   640  // remove redundant *const ops
   641  (ADDconst [0]  x) -> x
   642  (SUBconst [0]  x) -> x
   643  (ANDconst [0]  _) -> (MOVWconst [0])
   644  (ANDconst [-1] x) -> x
   645  (ORconst  [0]  x) -> x
   646  (ORconst  [-1] _) -> (MOVWconst [-1])
   647  (XORconst [0]  x) -> x
   648  (XORconst [-1] x) -> (NORconst [0] x)
   649  
   650  // generic constant folding
   651  (ADDconst [c] (MOVWconst [d]))  -> (MOVWconst [int64(int32(c+d))])
   652  (ADDconst [c] (ADDconst [d] x)) -> (ADDconst [int64(int32(c+d))] x)
   653  (ADDconst [c] (SUBconst [d] x)) -> (ADDconst [int64(int32(c-d))] x)
   654  (SUBconst [c] (MOVWconst [d]))  -> (MOVWconst [int64(int32(d-c))])
   655  (SUBconst [c] (SUBconst [d] x)) -> (ADDconst [int64(int32(-c-d))] x)
   656  (SUBconst [c] (ADDconst [d] x)) -> (ADDconst [int64(int32(-c+d))] x)
   657  (SLLconst [c] (MOVWconst [d]))  -> (MOVWconst [int64(int32(uint32(d)<<uint32(c)))])
   658  (SRLconst [c] (MOVWconst [d]))  -> (MOVWconst [int64(uint32(d)>>uint32(c))])
   659  (SRAconst [c] (MOVWconst [d]))  -> (MOVWconst [int64(int32(d)>>uint32(c))])
   660  (MUL (MOVWconst [c]) (MOVWconst [d])) -> (MOVWconst [int64(int32(c)*int32(d))])
   661  (Select1 (MULTU  (MOVWconst [c]) (MOVWconst [d]))) -> (MOVWconst [int64(int32(uint32(c)*uint32(d)))])
   662  (Select0 (MULTU  (MOVWconst [c]) (MOVWconst [d]))) -> (MOVWconst [(c*d)>>32])
   663  (Select1 (DIV  (MOVWconst [c]) (MOVWconst [d]))) -> (MOVWconst [int64(int32(c)/int32(d))])
   664  (Select1 (DIVU (MOVWconst [c]) (MOVWconst [d]))) -> (MOVWconst [int64(int32(uint32(c)/uint32(d)))])
   665  (Select0 (DIV  (MOVWconst [c]) (MOVWconst [d]))) -> (MOVWconst [int64(int32(c)%int32(d))])
   666  (Select0 (DIVU (MOVWconst [c]) (MOVWconst [d]))) -> (MOVWconst [int64(int32(uint32(c)%uint32(d)))])
   667  (ANDconst [c] (MOVWconst [d])) -> (MOVWconst [c&d])
   668  (ANDconst [c] (ANDconst [d] x)) -> (ANDconst [c&d] x)
   669  (ORconst [c] (MOVWconst [d])) -> (MOVWconst [c|d])
   670  (ORconst [c] (ORconst [d] x)) -> (ORconst [c|d] x)
   671  (XORconst [c] (MOVWconst [d])) -> (MOVWconst [c^d])
   672  (XORconst [c] (XORconst [d] x)) -> (XORconst [c^d] x)
   673  (NORconst [c] (MOVWconst [d])) -> (MOVWconst [^(c|d)])
   674  (NEG (MOVWconst [c])) -> (MOVWconst [int64(int32(-c))])
   675  (MOVBreg  (MOVWconst [c])) -> (MOVWconst [int64(int8(c))])
   676  (MOVBUreg (MOVWconst [c])) -> (MOVWconst [int64(uint8(c))])
   677  (MOVHreg  (MOVWconst [c])) -> (MOVWconst [int64(int16(c))])
   678  (MOVHUreg (MOVWconst [c])) -> (MOVWconst [int64(uint16(c))])
   679  (MOVWreg  (MOVWconst [c])) -> (MOVWconst [c])
   680  
   681  // constant comparisons
   682  (SGTconst [c] (MOVWconst [d])) && int32(c) > int32(d) -> (MOVWconst [1])
   683  (SGTconst [c] (MOVWconst [d])) && int32(c) <= int32(d) -> (MOVWconst [0])
   684  (SGTUconst [c] (MOVWconst [d])) && uint32(c)>uint32(d) -> (MOVWconst [1])
   685  (SGTUconst [c] (MOVWconst [d])) && uint32(c)<=uint32(d) -> (MOVWconst [0])
   686  (SGTzero (MOVWconst [d])) && int32(d) > 0 -> (MOVWconst [1])
   687  (SGTzero (MOVWconst [d])) && int32(d) <= 0 -> (MOVWconst [0])
   688  (SGTUzero (MOVWconst [d])) && uint32(d) != 0 -> (MOVWconst [1])
   689  (SGTUzero (MOVWconst [d])) && uint32(d) == 0 -> (MOVWconst [0])
   690  
   691  // other known comparisons
   692  (SGTconst [c] (MOVBreg _)) && 0x7f < int32(c) -> (MOVWconst [1])
   693  (SGTconst [c] (MOVBreg _)) && int32(c) <= -0x80 -> (MOVWconst [0])
   694  (SGTconst [c] (MOVBUreg _)) && 0xff < int32(c) -> (MOVWconst [1])
   695  (SGTconst [c] (MOVBUreg _)) && int32(c) < 0 -> (MOVWconst [0])
   696  (SGTUconst [c] (MOVBUreg _)) && 0xff < uint32(c) -> (MOVWconst [1])
   697  (SGTconst [c] (MOVHreg _)) && 0x7fff < int32(c) -> (MOVWconst [1])
   698  (SGTconst [c] (MOVHreg _)) && int32(c) <= -0x8000 -> (MOVWconst [0])
   699  (SGTconst [c] (MOVHUreg _)) && 0xffff < int32(c) -> (MOVWconst [1])
   700  (SGTconst [c] (MOVHUreg _)) && int32(c) < 0 -> (MOVWconst [0])
   701  (SGTUconst [c] (MOVHUreg _)) && 0xffff < uint32(c) -> (MOVWconst [1])
   702  (SGTconst [c] (ANDconst [m] _)) && 0 <= int32(m) && int32(m) < int32(c) -> (MOVWconst [1])
   703  (SGTUconst [c] (ANDconst [m] _)) && uint32(m) < uint32(c) -> (MOVWconst [1])
   704  (SGTconst [c] (SRLconst _ [d])) && 0 <= int32(c) && uint32(d) <= 31 && 1<<(32-uint32(d)) <= int32(c) -> (MOVWconst [1])
   705  (SGTUconst [c] (SRLconst _ [d])) && uint32(d) <= 31 && 1<<(32-uint32(d)) <= uint32(c) -> (MOVWconst [1])
   706  
   707  // absorb constants into branches
   708  (EQ  (MOVWconst [0]) yes no) -> (First nil yes no)
   709  (EQ  (MOVWconst [c]) yes no) && c != 0 -> (First nil no yes)
   710  (NE  (MOVWconst [0]) yes no) -> (First nil no yes)
   711  (NE  (MOVWconst [c]) yes no) && c != 0 -> (First nil yes no)
   712  (LTZ (MOVWconst [c]) yes no) && int32(c) <  0 -> (First nil yes no)
   713  (LTZ (MOVWconst [c]) yes no) && int32(c) >= 0 -> (First nil no yes)
   714  (LEZ (MOVWconst [c]) yes no) && int32(c) <= 0 -> (First nil yes no)
   715  (LEZ (MOVWconst [c]) yes no) && int32(c) >  0 -> (First nil no yes)
   716  (GTZ (MOVWconst [c]) yes no) && int32(c) >  0 -> (First nil yes no)
   717  (GTZ (MOVWconst [c]) yes no) && int32(c) <= 0 -> (First nil no yes)
   718  (GEZ (MOVWconst [c]) yes no) && int32(c) >= 0 -> (First nil yes no)
   719  (GEZ (MOVWconst [c]) yes no) && int32(c) <  0 -> (First nil no yes)
   720  
   721  // conditional move
   722  (CMOVZ _ b (MOVWconst [0])) -> b
   723  (CMOVZ a _ (MOVWconst [c])) && c!=0 -> a
   724  (CMOVZzero _ (MOVWconst [0])) -> (MOVWconst [0])
   725  (CMOVZzero a (MOVWconst [c])) && c!=0 -> a
   726  (CMOVZ a (MOVWconst [0]) c) -> (CMOVZzero a c)
   727  
   728  // atomic
   729  (LoweredAtomicStore ptr (MOVWconst [0]) mem) -> (LoweredAtomicStorezero ptr mem)
   730  (LoweredAtomicAdd ptr (MOVWconst [c]) mem) && is16Bit(c) -> (LoweredAtomicAddconst [c] ptr mem)
   731