github.com/tidwall/go@v0.0.0-20170415222209-6694a6888b7d/src/cmd/compile/internal/ssa/gen/ARM64.rules (about)

     1  // Copyright 2016 The Go Authors. All rights reserved.
     2  // Use of this source code is governed by a BSD-style
     3  // license that can be found in the LICENSE file.
     4  
     5  (AddPtr x y) -> (ADD x y)
     6  (Add64 x y) -> (ADD x y)
     7  (Add32 x y) -> (ADD x y)
     8  (Add16 x y) -> (ADD x y)
     9  (Add8 x y) -> (ADD x y)
    10  (Add32F x y) -> (FADDS x y)
    11  (Add64F x y) -> (FADDD x y)
    12  
    13  (SubPtr x y) -> (SUB x y)
    14  (Sub64 x y) -> (SUB x y)
    15  (Sub32 x y) -> (SUB x y)
    16  (Sub16 x y) -> (SUB x y)
    17  (Sub8 x y) -> (SUB x y)
    18  (Sub32F x y) -> (FSUBS x y)
    19  (Sub64F x y) -> (FSUBD x y)
    20  
    21  (Mul64 x y) -> (MUL x y)
    22  (Mul32 x y) -> (MULW x y)
    23  (Mul16 x y) -> (MULW x y)
    24  (Mul8 x y) -> (MULW x y)
    25  (Mul32F x y) -> (FMULS x y)
    26  (Mul64F x y) -> (FMULD x y)
    27  
    28  (Hmul64 x y) -> (MULH x y)
    29  (Hmul64u x y) -> (UMULH x y)
    30  (Hmul32 x y) -> (SRAconst (MULL <types.Int64> x y) [32])
    31  (Hmul32u x y) -> (SRAconst (UMULL <types.UInt64> x y) [32])
    32  
    33  (Div64 x y) -> (DIV x y)
    34  (Div64u x y) -> (UDIV x y)
    35  (Div32 x y) -> (DIVW x y)
    36  (Div32u x y) -> (UDIVW x y)
    37  (Div16 x y) -> (DIVW (SignExt16to32 x) (SignExt16to32 y))
    38  (Div16u x y) -> (UDIVW (ZeroExt16to32 x) (ZeroExt16to32 y))
    39  (Div8 x y) -> (DIVW (SignExt8to32 x) (SignExt8to32 y))
    40  (Div8u x y) -> (UDIVW (ZeroExt8to32 x) (ZeroExt8to32 y))
    41  (Div32F x y) -> (FDIVS x y)
    42  (Div64F x y) -> (FDIVD x y)
    43  
    44  (Mod64 x y) -> (MOD x y)
    45  (Mod64u x y) -> (UMOD x y)
    46  (Mod32 x y) -> (MODW x y)
    47  (Mod32u x y) -> (UMODW x y)
    48  (Mod16 x y) -> (MODW (SignExt16to32 x) (SignExt16to32 y))
    49  (Mod16u x y) -> (UMODW (ZeroExt16to32 x) (ZeroExt16to32 y))
    50  (Mod8 x y) -> (MODW (SignExt8to32 x) (SignExt8to32 y))
    51  (Mod8u x y) -> (UMODW (ZeroExt8to32 x) (ZeroExt8to32 y))
    52  
    53  // (x + y) / 2 with x>=y -> (x - y) / 2 + y
    54  (Avg64u <t> x y) -> (ADD (SRLconst <t> (SUB <t> x y) [1]) y)
    55  
    56  (And64 x y) -> (AND x y)
    57  (And32 x y) -> (AND x y)
    58  (And16 x y) -> (AND x y)
    59  (And8 x y) -> (AND x y)
    60  
    61  (Or64 x y) -> (OR x y)
    62  (Or32 x y) -> (OR x y)
    63  (Or16 x y) -> (OR x y)
    64  (Or8 x y) -> (OR x y)
    65  
    66  (Xor64 x y) -> (XOR x y)
    67  (Xor32 x y) -> (XOR x y)
    68  (Xor16 x y) -> (XOR x y)
    69  (Xor8 x y) -> (XOR x y)
    70  
    71  // unary ops
    72  (Neg64 x) -> (NEG x)
    73  (Neg32 x) -> (NEG x)
    74  (Neg16 x) -> (NEG x)
    75  (Neg8 x) -> (NEG x)
    76  (Neg32F x) -> (FNEGS x)
    77  (Neg64F x) -> (FNEGD x)
    78  
    79  (Com64 x) -> (MVN x)
    80  (Com32 x) -> (MVN x)
    81  (Com16 x) -> (MVN x)
    82  (Com8 x) -> (MVN x)
    83  
    84  (Sqrt x) -> (FSQRTD x)
    85  
    86  (Ctz64 <t> x) -> (CLZ (RBIT <t> x))
    87  (Ctz32 <t> x) -> (CLZW (RBITW <t> x))
    88  
    89  (BitLen64 x) -> (SUB (MOVDconst [64]) (CLZ <types.Int> x))
    90  
    91  (Bswap64 x) -> (REV x)
    92  (Bswap32 x) -> (REVW x)
    93  
    94  (BitRev64 x) -> (RBIT x)
    95  (BitRev32 x) -> (RBITW x)
    96  (BitRev16 x) -> (SRLconst [48] (RBIT <types.UInt64> x))
    97  (BitRev8 x) -> (SRLconst [56] (RBIT <types.UInt64> x))
    98  
    99  // boolean ops -- booleans are represented with 0=false, 1=true
   100  (AndB x y) -> (AND x y)
   101  (OrB x y) -> (OR x y)
   102  (EqB x y) -> (XOR (MOVDconst [1]) (XOR <types.Bool> x y))
   103  (NeqB x y) -> (XOR x y)
   104  (Not x) -> (XOR (MOVDconst [1]) x)
   105  
   106  // constant shifts
   107  (Lsh64x64  x (MOVDconst [c])) && uint64(c) < 64 -> (SLLconst x [c])
   108  (Rsh64x64  x (MOVDconst [c])) && uint64(c) < 64 -> (SRAconst x [c])
   109  (Rsh64Ux64 x (MOVDconst [c])) && uint64(c) < 64 -> (SRLconst x [c])
   110  (Lsh32x64  x (MOVDconst [c])) && uint64(c) < 32 -> (SLLconst x [c])
   111  (Rsh32x64  x (MOVDconst [c])) && uint64(c) < 32 -> (SRAconst (SignExt32to64 x) [c])
   112  (Rsh32Ux64 x (MOVDconst [c])) && uint64(c) < 32 -> (SRLconst (ZeroExt32to64 x) [c])
   113  (Lsh16x64  x (MOVDconst [c])) && uint64(c) < 16 -> (SLLconst x [c])
   114  (Rsh16x64  x (MOVDconst [c])) && uint64(c) < 16 -> (SRAconst (SignExt16to64 x) [c])
   115  (Rsh16Ux64 x (MOVDconst [c])) && uint64(c) < 16 -> (SRLconst (ZeroExt16to64 x) [c])
   116  (Lsh8x64   x (MOVDconst [c])) && uint64(c) < 8  -> (SLLconst x [c])
   117  (Rsh8x64   x (MOVDconst [c])) && uint64(c) < 8  -> (SRAconst (SignExt8to64  x) [c])
   118  (Rsh8Ux64  x (MOVDconst [c])) && uint64(c) < 8  -> (SRLconst (ZeroExt8to64  x) [c])
   119  
   120  // large constant shifts
   121  (Lsh64x64  _ (MOVDconst [c])) && uint64(c) >= 64 -> (MOVDconst [0])
   122  (Rsh64Ux64 _ (MOVDconst [c])) && uint64(c) >= 64 -> (MOVDconst [0])
   123  (Lsh32x64  _ (MOVDconst [c])) && uint64(c) >= 32 -> (MOVDconst [0])
   124  (Rsh32Ux64 _ (MOVDconst [c])) && uint64(c) >= 32 -> (MOVDconst [0])
   125  (Lsh16x64  _ (MOVDconst [c])) && uint64(c) >= 16 -> (MOVDconst [0])
   126  (Rsh16Ux64 _ (MOVDconst [c])) && uint64(c) >= 16 -> (MOVDconst [0])
   127  (Lsh8x64   _ (MOVDconst [c])) && uint64(c) >= 8  -> (MOVDconst [0])
   128  (Rsh8Ux64  _ (MOVDconst [c])) && uint64(c) >= 8  -> (MOVDconst [0])
   129  
   130  // large constant signed right shift, we leave the sign bit
   131  (Rsh64x64 x (MOVDconst [c])) && uint64(c) >= 64 -> (SRAconst x [63])
   132  (Rsh32x64 x (MOVDconst [c])) && uint64(c) >= 32 -> (SRAconst (SignExt32to64 x) [63])
   133  (Rsh16x64 x (MOVDconst [c])) && uint64(c) >= 16 -> (SRAconst (SignExt16to64 x) [63])
   134  (Rsh8x64  x (MOVDconst [c])) && uint64(c) >= 8  -> (SRAconst (SignExt8to64  x) [63])
   135  
   136  // shifts
   137  // hardware instruction uses only the low 6 bits of the shift
   138  // we compare to 64 to ensure Go semantics for large shifts
   139  (Lsh64x64 <t> x y) -> (CSELULT (SLL <t> x y) (Const64 <t> [0]) (CMPconst [64] y))
   140  (Lsh64x32 <t> x y) -> (CSELULT (SLL <t> x (ZeroExt32to64 y)) (Const64 <t> [0]) (CMPconst [64] (ZeroExt32to64 y)))
   141  (Lsh64x16 <t> x y) -> (CSELULT (SLL <t> x (ZeroExt16to64 y)) (Const64 <t> [0]) (CMPconst [64] (ZeroExt16to64 y)))
   142  (Lsh64x8  <t> x y) -> (CSELULT (SLL <t> x (ZeroExt8to64  y)) (Const64 <t> [0]) (CMPconst [64] (ZeroExt8to64  y)))
   143  
   144  (Lsh32x64 <t> x y) -> (CSELULT (SLL <t> x y) (Const64 <t> [0]) (CMPconst [64] y))
   145  (Lsh32x32 <t> x y) -> (CSELULT (SLL <t> x (ZeroExt32to64 y)) (Const64 <t> [0]) (CMPconst [64] (ZeroExt32to64 y)))
   146  (Lsh32x16 <t> x y) -> (CSELULT (SLL <t> x (ZeroExt16to64 y)) (Const64 <t> [0]) (CMPconst [64] (ZeroExt16to64 y)))
   147  (Lsh32x8  <t> x y) -> (CSELULT (SLL <t> x (ZeroExt8to64  y)) (Const64 <t> [0]) (CMPconst [64] (ZeroExt8to64  y)))
   148  
   149  (Lsh16x64 <t> x y) -> (CSELULT (SLL <t> x y) (Const64 <t> [0]) (CMPconst [64] y))
   150  (Lsh16x32 <t> x y) -> (CSELULT (SLL <t> x (ZeroExt32to64 y)) (Const64 <t> [0]) (CMPconst [64] (ZeroExt32to64 y)))
   151  (Lsh16x16 <t> x y) -> (CSELULT (SLL <t> x (ZeroExt16to64 y)) (Const64 <t> [0]) (CMPconst [64] (ZeroExt16to64 y)))
   152  (Lsh16x8  <t> x y) -> (CSELULT (SLL <t> x (ZeroExt8to64  y)) (Const64 <t> [0]) (CMPconst [64] (ZeroExt8to64  y)))
   153  
   154  (Lsh8x64 <t> x y) -> (CSELULT (SLL <t> x y) (Const64 <t> [0]) (CMPconst [64] y))
   155  (Lsh8x32 <t> x y) -> (CSELULT (SLL <t> x (ZeroExt32to64 y)) (Const64 <t> [0]) (CMPconst [64] (ZeroExt32to64 y)))
   156  (Lsh8x16 <t> x y) -> (CSELULT (SLL <t> x (ZeroExt16to64 y)) (Const64 <t> [0]) (CMPconst [64] (ZeroExt16to64 y)))
   157  (Lsh8x8  <t> x y) -> (CSELULT (SLL <t> x (ZeroExt8to64  y)) (Const64 <t> [0]) (CMPconst [64] (ZeroExt8to64  y)))
   158  
   159  (Rsh64Ux64 <t> x y) -> (CSELULT (SRL <t> x y) (Const64 <t> [0]) (CMPconst [64] y))
   160  (Rsh64Ux32 <t> x y) -> (CSELULT (SRL <t> x (ZeroExt32to64 y)) (Const64 <t> [0]) (CMPconst [64] (ZeroExt32to64 y)))
   161  (Rsh64Ux16 <t> x y) -> (CSELULT (SRL <t> x (ZeroExt16to64 y)) (Const64 <t> [0]) (CMPconst [64] (ZeroExt16to64 y)))
   162  (Rsh64Ux8  <t> x y) -> (CSELULT (SRL <t> x (ZeroExt8to64  y)) (Const64 <t> [0]) (CMPconst [64] (ZeroExt8to64  y)))
   163  
   164  (Rsh32Ux64 <t> x y) -> (CSELULT (SRL <t> (ZeroExt32to64 x) y) (Const64 <t> [0]) (CMPconst [64] y))
   165  (Rsh32Ux32 <t> x y) -> (CSELULT (SRL <t> (ZeroExt32to64 x) (ZeroExt32to64 y)) (Const64 <t> [0]) (CMPconst [64] (ZeroExt32to64 y)))
   166  (Rsh32Ux16 <t> x y) -> (CSELULT (SRL <t> (ZeroExt32to64 x) (ZeroExt16to64 y)) (Const64 <t> [0]) (CMPconst [64] (ZeroExt16to64 y)))
   167  (Rsh32Ux8  <t> x y) -> (CSELULT (SRL <t> (ZeroExt32to64 x) (ZeroExt8to64  y)) (Const64 <t> [0]) (CMPconst [64] (ZeroExt8to64  y)))
   168  
   169  (Rsh16Ux64 <t> x y) -> (CSELULT (SRL <t> (ZeroExt16to64 x) y) (Const64 <t> [0]) (CMPconst [64] y))
   170  (Rsh16Ux32 <t> x y) -> (CSELULT (SRL <t> (ZeroExt16to64 x) (ZeroExt32to64 y)) (Const64 <t> [0]) (CMPconst [64] (ZeroExt32to64 y)))
   171  (Rsh16Ux16 <t> x y) -> (CSELULT (SRL <t> (ZeroExt16to64 x) (ZeroExt16to64 y)) (Const64 <t> [0]) (CMPconst [64] (ZeroExt16to64 y)))
   172  (Rsh16Ux8  <t> x y) -> (CSELULT (SRL <t> (ZeroExt16to64 x) (ZeroExt8to64  y)) (Const64 <t> [0]) (CMPconst [64] (ZeroExt8to64  y)))
   173  
   174  (Rsh8Ux64 <t> x y) -> (CSELULT (SRL <t> (ZeroExt8to64 x) y) (Const64 <t> [0]) (CMPconst [64] y))
   175  (Rsh8Ux32 <t> x y) -> (CSELULT (SRL <t> (ZeroExt8to64 x) (ZeroExt32to64 y)) (Const64 <t> [0]) (CMPconst [64] (ZeroExt32to64 y)))
   176  (Rsh8Ux16 <t> x y) -> (CSELULT (SRL <t> (ZeroExt8to64 x) (ZeroExt16to64 y)) (Const64 <t> [0]) (CMPconst [64] (ZeroExt16to64 y)))
   177  (Rsh8Ux8  <t> x y) -> (CSELULT (SRL <t> (ZeroExt8to64 x) (ZeroExt8to64  y)) (Const64 <t> [0]) (CMPconst [64] (ZeroExt8to64  y)))
   178  
   179  (Rsh64x64 x y) -> (SRA x (CSELULT <y.Type> y (Const64 <y.Type> [63]) (CMPconst [64] y)))
   180  (Rsh64x32 x y) -> (SRA x (CSELULT <y.Type> (ZeroExt32to64 y) (Const64 <y.Type> [63]) (CMPconst [64] (ZeroExt32to64 y))))
   181  (Rsh64x16 x y) -> (SRA x (CSELULT <y.Type> (ZeroExt16to64 y) (Const64 <y.Type> [63]) (CMPconst [64] (ZeroExt16to64 y))))
   182  (Rsh64x8  x y) -> (SRA x (CSELULT <y.Type> (ZeroExt8to64  y) (Const64 <y.Type> [63]) (CMPconst [64] (ZeroExt8to64  y))))
   183  
   184  (Rsh32x64 x y) -> (SRA (SignExt32to64 x) (CSELULT <y.Type> y (Const64 <y.Type> [63]) (CMPconst [64] y)))
   185  (Rsh32x32 x y) -> (SRA (SignExt32to64 x) (CSELULT <y.Type> (ZeroExt32to64 y) (Const64 <y.Type> [63]) (CMPconst [64] (ZeroExt32to64 y))))
   186  (Rsh32x16 x y) -> (SRA (SignExt32to64 x) (CSELULT <y.Type> (ZeroExt16to64 y) (Const64 <y.Type> [63]) (CMPconst [64] (ZeroExt16to64 y))))
   187  (Rsh32x8  x y) -> (SRA (SignExt32to64 x) (CSELULT <y.Type> (ZeroExt8to64  y) (Const64 <y.Type> [63]) (CMPconst [64] (ZeroExt8to64  y))))
   188  
   189  (Rsh16x64 x y) -> (SRA (SignExt16to64 x) (CSELULT <y.Type> y (Const64 <y.Type> [63]) (CMPconst [64] y)))
   190  (Rsh16x32 x y) -> (SRA (SignExt16to64 x) (CSELULT <y.Type> (ZeroExt32to64 y) (Const64 <y.Type> [63]) (CMPconst [64] (ZeroExt32to64 y))))
   191  (Rsh16x16 x y) -> (SRA (SignExt16to64 x) (CSELULT <y.Type> (ZeroExt16to64 y) (Const64 <y.Type> [63]) (CMPconst [64] (ZeroExt16to64 y))))
   192  (Rsh16x8  x y) -> (SRA (SignExt16to64 x) (CSELULT <y.Type> (ZeroExt8to64  y) (Const64 <y.Type> [63]) (CMPconst [64] (ZeroExt8to64  y))))
   193  
   194  (Rsh8x64 x y) -> (SRA (SignExt8to64 x) (CSELULT <y.Type> y (Const64 <y.Type> [63]) (CMPconst [64] y)))
   195  (Rsh8x32 x y) -> (SRA (SignExt8to64 x) (CSELULT <y.Type> (ZeroExt32to64 y) (Const64 <y.Type> [63]) (CMPconst [64] (ZeroExt32to64 y))))
   196  (Rsh8x16 x y) -> (SRA (SignExt8to64 x) (CSELULT <y.Type> (ZeroExt16to64 y) (Const64 <y.Type> [63]) (CMPconst [64] (ZeroExt16to64 y))))
   197  (Rsh8x8  x y) -> (SRA (SignExt8to64 x) (CSELULT <y.Type> (ZeroExt8to64  y) (Const64 <y.Type> [63]) (CMPconst [64] (ZeroExt8to64  y))))
   198  
   199  // constants
   200  (Const64 [val]) -> (MOVDconst [val])
   201  (Const32 [val]) -> (MOVDconst [val])
   202  (Const16 [val]) -> (MOVDconst [val])
   203  (Const8 [val]) -> (MOVDconst [val])
   204  (Const32F [val]) -> (FMOVSconst [val])
   205  (Const64F [val]) -> (FMOVDconst [val])
   206  (ConstNil) -> (MOVDconst [0])
   207  (ConstBool [b]) -> (MOVDconst [b])
   208  
   209  (Slicemask <t> x) -> (SRAconst (NEG <t> x) [63])
   210  
   211  // truncations
   212  // Because we ignore high parts of registers, truncates are just copies.
   213  (Trunc16to8 x) -> x
   214  (Trunc32to8 x) -> x
   215  (Trunc32to16 x) -> x
   216  (Trunc64to8 x) -> x
   217  (Trunc64to16 x) -> x
   218  (Trunc64to32 x) -> x
   219  
   220  // Zero-/Sign-extensions
   221  (ZeroExt8to16 x) -> (MOVBUreg x)
   222  (ZeroExt8to32 x) -> (MOVBUreg x)
   223  (ZeroExt16to32 x) -> (MOVHUreg x)
   224  (ZeroExt8to64 x) -> (MOVBUreg x)
   225  (ZeroExt16to64 x) -> (MOVHUreg x)
   226  (ZeroExt32to64 x) -> (MOVWUreg x)
   227  
   228  (SignExt8to16 x) -> (MOVBreg x)
   229  (SignExt8to32 x) -> (MOVBreg x)
   230  (SignExt16to32 x) -> (MOVHreg x)
   231  (SignExt8to64 x) -> (MOVBreg x)
   232  (SignExt16to64 x) -> (MOVHreg x)
   233  (SignExt32to64 x) -> (MOVWreg x)
   234  
   235  // float <-> int conversion
   236  (Cvt32to32F x) -> (SCVTFWS x)
   237  (Cvt32to64F x) -> (SCVTFWD x)
   238  (Cvt64to32F x) -> (SCVTFS x)
   239  (Cvt64to64F x) -> (SCVTFD x)
   240  (Cvt32Uto32F x) -> (UCVTFWS x)
   241  (Cvt32Uto64F x) -> (UCVTFWD x)
   242  (Cvt64Uto32F x) -> (UCVTFS x)
   243  (Cvt64Uto64F x) -> (UCVTFD x)
   244  (Cvt32Fto32 x) -> (FCVTZSSW x)
   245  (Cvt64Fto32 x) -> (FCVTZSDW x)
   246  (Cvt32Fto64 x) -> (FCVTZSS x)
   247  (Cvt64Fto64 x) -> (FCVTZSD x)
   248  (Cvt32Fto32U x) -> (FCVTZUSW x)
   249  (Cvt64Fto32U x) -> (FCVTZUDW x)
   250  (Cvt32Fto64U x) -> (FCVTZUS x)
   251  (Cvt64Fto64U x) -> (FCVTZUD x)
   252  (Cvt32Fto64F x) -> (FCVTSD x)
   253  (Cvt64Fto32F x) -> (FCVTDS x)
   254  
   255  (Round32F x) -> x
   256  (Round64F x) -> x
   257  
   258  // comparisons
   259  (Eq8 x y)  -> (Equal (CMPW (ZeroExt8to32 x) (ZeroExt8to32 y)))
   260  (Eq16 x y) -> (Equal (CMPW (ZeroExt16to32 x) (ZeroExt16to32 y)))
   261  (Eq32 x y) -> (Equal (CMPW x y))
   262  (Eq64 x y) -> (Equal (CMP x y))
   263  (EqPtr x y) -> (Equal (CMP x y))
   264  (Eq32F x y) -> (Equal (FCMPS x y))
   265  (Eq64F x y) -> (Equal (FCMPD x y))
   266  
   267  (Neq8 x y)  -> (NotEqual (CMPW (ZeroExt8to32 x) (ZeroExt8to32 y)))
   268  (Neq16 x y) -> (NotEqual (CMPW (ZeroExt16to32 x) (ZeroExt16to32 y)))
   269  (Neq32 x y) -> (NotEqual (CMPW x y))
   270  (Neq64 x y) -> (NotEqual (CMP x y))
   271  (NeqPtr x y) -> (NotEqual (CMP x y))
   272  (Neq32F x y) -> (NotEqual (FCMPS x y))
   273  (Neq64F x y) -> (NotEqual (FCMPD x y))
   274  
   275  (Less8 x y)  -> (LessThan (CMPW (SignExt8to32 x) (SignExt8to32 y)))
   276  (Less16 x y) -> (LessThan (CMPW (SignExt16to32 x) (SignExt16to32 y)))
   277  (Less32 x y) -> (LessThan (CMPW x y))
   278  (Less64 x y) -> (LessThan (CMP x y))
   279  (Less32F x y) -> (GreaterThan (FCMPS y x)) // reverse operands to work around NaN
   280  (Less64F x y) -> (GreaterThan (FCMPD y x)) // reverse operands to work around NaN
   281  
   282  (Less8U x y)  -> (LessThanU (CMPW (ZeroExt8to32 x) (ZeroExt8to32 y)))
   283  (Less16U x y) -> (LessThanU (CMPW (ZeroExt16to32 x) (ZeroExt16to32 y)))
   284  (Less32U x y) -> (LessThanU (CMPW x y))
   285  (Less64U x y) -> (LessThanU (CMP x y))
   286  
   287  (Leq8 x y)  -> (LessEqual (CMPW (SignExt8to32 x) (SignExt8to32 y)))
   288  (Leq16 x y) -> (LessEqual (CMPW (SignExt16to32 x) (SignExt16to32 y)))
   289  (Leq32 x y) -> (LessEqual (CMPW x y))
   290  (Leq64 x y) -> (LessEqual (CMP x y))
   291  (Leq32F x y) -> (GreaterEqual (FCMPS y x)) // reverse operands to work around NaN
   292  (Leq64F x y) -> (GreaterEqual (FCMPD y x)) // reverse operands to work around NaN
   293  
   294  (Leq8U x y)  -> (LessEqualU (CMPW (ZeroExt8to32 x) (ZeroExt8to32 y)))
   295  (Leq16U x y) -> (LessEqualU (CMPW (ZeroExt16to32 x) (ZeroExt16to32 y)))
   296  (Leq32U x y) -> (LessEqualU (CMPW x y))
   297  (Leq64U x y) -> (LessEqualU (CMP x y))
   298  
   299  (Greater8 x y)  -> (GreaterThan (CMPW (SignExt8to32 x) (SignExt8to32 y)))
   300  (Greater16 x y) -> (GreaterThan (CMPW (SignExt16to32 x) (SignExt16to32 y)))
   301  (Greater32 x y) -> (GreaterThan (CMPW x y))
   302  (Greater64 x y) -> (GreaterThan (CMP x y))
   303  (Greater32F x y) -> (GreaterThan (FCMPS x y))
   304  (Greater64F x y) -> (GreaterThan (FCMPD x y))
   305  
   306  (Greater8U x y)  -> (GreaterThanU (CMPW (ZeroExt8to32 x) (ZeroExt8to32 y)))
   307  (Greater16U x y) -> (GreaterThanU (CMPW (ZeroExt16to32 x) (ZeroExt16to32 y)))
   308  (Greater32U x y) -> (GreaterThanU (CMPW x y))
   309  (Greater64U x y) -> (GreaterThanU (CMP x y))
   310  
   311  (Geq8 x y)  -> (GreaterEqual (CMPW (SignExt8to32 x) (SignExt8to32 y)))
   312  (Geq16 x y) -> (GreaterEqual (CMPW (SignExt16to32 x) (SignExt16to32 y)))
   313  (Geq32 x y) -> (GreaterEqual (CMPW x y))
   314  (Geq64 x y) -> (GreaterEqual (CMP x y))
   315  (Geq32F x y) -> (GreaterEqual (FCMPS x y))
   316  (Geq64F x y) -> (GreaterEqual (FCMPD x y))
   317  
   318  (Geq8U x y)  -> (GreaterEqualU (CMPW (ZeroExt8to32 x) (ZeroExt8to32 y)))
   319  (Geq16U x y) -> (GreaterEqualU (CMPW (ZeroExt16to32 x) (ZeroExt16to32 y)))
   320  (Geq32U x y) -> (GreaterEqualU (CMPW x y))
   321  (Geq64U x y) -> (GreaterEqualU (CMP x y))
   322  
   323  (OffPtr [off] ptr:(SP)) -> (MOVDaddr [off] ptr)
   324  (OffPtr [off] ptr) -> (ADDconst [off] ptr)
   325  
   326  (Addr {sym} base) -> (MOVDaddr {sym} base)
   327  
   328  // loads
   329  (Load <t> ptr mem) && t.IsBoolean() -> (MOVBUload ptr mem)
   330  (Load <t> ptr mem) && (is8BitInt(t) && isSigned(t)) -> (MOVBload ptr mem)
   331  (Load <t> ptr mem) && (is8BitInt(t) && !isSigned(t)) -> (MOVBUload ptr mem)
   332  (Load <t> ptr mem) && (is16BitInt(t) && isSigned(t)) -> (MOVHload ptr mem)
   333  (Load <t> ptr mem) && (is16BitInt(t) && !isSigned(t)) -> (MOVHUload ptr mem)
   334  (Load <t> ptr mem) && (is32BitInt(t) && isSigned(t)) -> (MOVWload ptr mem)
   335  (Load <t> ptr mem) && (is32BitInt(t) && !isSigned(t)) -> (MOVWUload ptr mem)
   336  (Load <t> ptr mem) && (is64BitInt(t) || isPtr(t)) -> (MOVDload ptr mem)
   337  (Load <t> ptr mem) && is32BitFloat(t) -> (FMOVSload ptr mem)
   338  (Load <t> ptr mem) && is64BitFloat(t) -> (FMOVDload ptr mem)
   339  
   340  // stores
   341  (Store {t} ptr val mem) && t.(Type).Size() == 1 -> (MOVBstore ptr val mem)
   342  (Store {t} ptr val mem) && t.(Type).Size() == 2 -> (MOVHstore ptr val mem)
   343  (Store {t} ptr val mem) && t.(Type).Size() == 4 && !is32BitFloat(val.Type) -> (MOVWstore ptr val mem)
   344  (Store {t} ptr val mem) && t.(Type).Size() == 8 && !is64BitFloat(val.Type) -> (MOVDstore ptr val mem)
   345  (Store {t} ptr val mem) && t.(Type).Size() == 4 && is32BitFloat(val.Type) -> (FMOVSstore ptr val mem)
   346  (Store {t} ptr val mem) && t.(Type).Size() == 8 && is64BitFloat(val.Type) -> (FMOVDstore ptr val mem)
   347  
   348  // zeroing
   349  (Zero [0] _ mem) -> mem
   350  (Zero [1] ptr mem) -> (MOVBstore ptr (MOVDconst [0]) mem)
   351  (Zero [2] ptr mem) -> (MOVHstore ptr (MOVDconst [0]) mem)
   352  (Zero [4] ptr mem) -> (MOVWstore ptr (MOVDconst [0]) mem)
   353  (Zero [8] ptr mem) -> (MOVDstore ptr (MOVDconst [0]) mem)
   354  
   355  (Zero [3] ptr mem) ->
   356  	(MOVBstore [2] ptr (MOVDconst [0])
   357  		(MOVHstore ptr (MOVDconst [0]) mem))
   358  (Zero [5] ptr mem) ->
   359  	(MOVBstore [4] ptr (MOVDconst [0])
   360  		(MOVWstore ptr (MOVDconst [0]) mem))
   361  (Zero [6] ptr mem) ->
   362  	(MOVHstore [4] ptr (MOVDconst [0])
   363  		(MOVWstore ptr (MOVDconst [0]) mem))
   364  (Zero [7] ptr mem) ->
   365  	(MOVBstore [6] ptr (MOVDconst [0])
   366  		(MOVHstore [4] ptr (MOVDconst [0])
   367  			(MOVWstore ptr (MOVDconst [0]) mem)))
   368  (Zero [12] ptr mem) ->
   369  	(MOVWstore [8] ptr (MOVDconst [0])
   370  		(MOVDstore ptr (MOVDconst [0]) mem))
   371  (Zero [16] ptr mem) ->
   372  	(MOVDstore [8] ptr (MOVDconst [0])
   373  		(MOVDstore ptr (MOVDconst [0]) mem))
   374  (Zero [24] ptr mem) ->
   375  	(MOVDstore [16] ptr (MOVDconst [0])
   376  		(MOVDstore [8] ptr (MOVDconst [0])
   377  			(MOVDstore ptr (MOVDconst [0]) mem)))
   378  
   379  // strip off fractional word zeroing
   380  (Zero [s] ptr mem) && s%8 != 0 && s > 8 ->
   381  	(Zero [s%8]
   382  		(OffPtr <ptr.Type> ptr [s-s%8])
   383  		(Zero [s-s%8] ptr mem))
   384  
   385  // medium zeroing uses a duff device
   386  // 4, 8, and 128 are magic constants, see runtime/mkduff.go
   387  (Zero [s] ptr mem)
   388  	&& s%8 == 0 && s > 24 && s <= 8*128
   389  	&& !config.noDuffDevice ->
   390  	(DUFFZERO [4 * (128 - int64(s/8))] ptr mem)
   391  
   392  // large zeroing uses a loop
   393  (Zero [s] ptr mem)
   394  	&& s%8 == 0 && (s > 8*128 || config.noDuffDevice) ->
   395  	(LoweredZero
   396  		ptr
   397  		(ADDconst <ptr.Type> [s-8] ptr)
   398  		mem)
   399  
   400  // moves
   401  (Move [0] _ _ mem) -> mem
   402  (Move [1] dst src mem) -> (MOVBstore dst (MOVBUload src mem) mem)
   403  (Move [2] dst src mem) -> (MOVHstore dst (MOVHUload src mem) mem)
   404  (Move [4] dst src mem) -> (MOVWstore dst (MOVWUload src mem) mem)
   405  (Move [8] dst src mem) -> (MOVDstore dst (MOVDload src mem) mem)
   406  
   407  (Move [3] dst src mem) ->
   408  	(MOVBstore [2] dst (MOVBUload [2] src mem)
   409  		(MOVHstore dst (MOVHUload src mem) mem))
   410  (Move [5] dst src mem) ->
   411  	(MOVBstore [4] dst (MOVBUload [4] src mem)
   412  		(MOVWstore dst (MOVWUload src mem) mem))
   413  (Move [6] dst src mem) ->
   414  	(MOVHstore [4] dst (MOVHUload [4] src mem)
   415  		(MOVWstore dst (MOVWUload src mem) mem))
   416  (Move [7] dst src mem) ->
   417  	(MOVBstore [6] dst (MOVBUload [6] src mem)
   418  		(MOVHstore [4] dst (MOVHUload [4] src mem)
   419  			(MOVWstore dst (MOVWUload src mem) mem)))
   420  (Move [12] dst src mem) ->
   421  	(MOVWstore [8] dst (MOVWUload [8] src mem)
   422  		(MOVDstore dst (MOVDload src mem) mem))
   423  (Move [16] dst src mem) ->
   424  	(MOVDstore [8] dst (MOVDload [8] src mem)
   425  		(MOVDstore dst (MOVDload src mem) mem))
   426  (Move [24] dst src mem) ->
   427  	(MOVDstore [16] dst (MOVDload [16] src mem)
   428  		(MOVDstore [8] dst (MOVDload [8] src mem)
   429  			(MOVDstore dst (MOVDload src mem) mem)))
   430  
   431  // strip off fractional word move
   432  (Move [s] dst src mem) && s%8 != 0 && s > 8 ->
   433  	(Move [s%8]
   434  		(OffPtr <dst.Type> dst [s-s%8])
   435  		(OffPtr <src.Type> src [s-s%8])
   436  		(Move [s-s%8] dst src mem))
   437  
   438  // medium move uses a duff device
   439  // 8 and 128 are magic constants, see runtime/mkduff.go
   440  (Move [s] dst src mem)
   441  	&& s%8 == 0 && s > 24 && s <= 8*128
   442  	&& !config.noDuffDevice ->
   443  	(DUFFCOPY [8 * (128 - int64(s/8))] dst src mem)
   444  
   445  // large move uses a loop
   446  (Move [s] dst src mem)
   447  	&& s > 24 && s%8 == 0 ->
   448  	(LoweredMove
   449  		dst
   450  		src
   451  		(ADDconst <src.Type> src [s-8])
   452  		mem)
   453  
   454  // calls
   455  (StaticCall [argwid] {target} mem) -> (CALLstatic [argwid] {target} mem)
   456  (ClosureCall [argwid] entry closure mem) -> (CALLclosure [argwid] entry closure mem)
   457  (InterCall [argwid] entry mem) -> (CALLinter [argwid] entry mem)
   458  
   459  // checks
   460  (NilCheck ptr mem) -> (LoweredNilCheck ptr mem)
   461  (IsNonNil ptr) -> (NotEqual (CMPconst [0] ptr))
   462  (IsInBounds idx len) -> (LessThanU (CMP idx len))
   463  (IsSliceInBounds idx len) -> (LessEqualU (CMP idx len))
   464  
   465  // pseudo-ops
   466  (GetClosurePtr) -> (LoweredGetClosurePtr)
   467  (Convert x mem) -> (MOVDconvert x mem)
   468  
   469  // Absorb pseudo-ops into blocks.
   470  (If (Equal cc) yes no) -> (EQ cc yes no)
   471  (If (NotEqual cc) yes no) -> (NE cc yes no)
   472  (If (LessThan cc) yes no) -> (LT cc yes no)
   473  (If (LessThanU cc) yes no) -> (ULT cc yes no)
   474  (If (LessEqual cc) yes no) -> (LE cc yes no)
   475  (If (LessEqualU cc) yes no) -> (ULE cc yes no)
   476  (If (GreaterThan cc) yes no) -> (GT cc yes no)
   477  (If (GreaterThanU cc) yes no) -> (UGT cc yes no)
   478  (If (GreaterEqual cc) yes no) -> (GE cc yes no)
   479  (If (GreaterEqualU cc) yes no) -> (UGE cc yes no)
   480  
   481  (If cond yes no) -> (NZ cond yes no)
   482  
   483  // atomic intrinsics
   484  // Note: these ops do not accept offset.
   485  (AtomicLoad32  ptr mem) -> (LDARW ptr mem)
   486  (AtomicLoad64  ptr mem) -> (LDAR  ptr mem)
   487  (AtomicLoadPtr ptr mem) -> (LDAR  ptr mem)
   488  
   489  (AtomicStore32      ptr val mem) -> (STLRW ptr val mem)
   490  (AtomicStore64      ptr val mem) -> (STLR  ptr val mem)
   491  (AtomicStorePtrNoWB ptr val mem) -> (STLR  ptr val mem)
   492  
   493  (AtomicExchange32 ptr val mem) -> (LoweredAtomicExchange32 ptr val mem)
   494  (AtomicExchange64 ptr val mem) -> (LoweredAtomicExchange64 ptr val mem)
   495  
   496  (AtomicAdd32 ptr val mem) -> (LoweredAtomicAdd32 ptr val mem)
   497  (AtomicAdd64 ptr val mem) -> (LoweredAtomicAdd64 ptr val mem)
   498  
   499  (AtomicCompareAndSwap32 ptr old new_ mem) -> (LoweredAtomicCas32 ptr old new_ mem)
   500  (AtomicCompareAndSwap64 ptr old new_ mem) -> (LoweredAtomicCas64 ptr old new_ mem)
   501  
   502  (AtomicAnd8 ptr val mem) -> (LoweredAtomicAnd8 ptr val mem)
   503  (AtomicOr8  ptr val mem) -> (LoweredAtomicOr8  ptr val mem)
   504  
   505  // Optimizations
   506  
   507  // Absorb boolean tests into block
   508  (NZ (Equal cc) yes no) -> (EQ cc yes no)
   509  (NZ (NotEqual cc) yes no) -> (NE cc yes no)
   510  (NZ (LessThan cc) yes no) -> (LT cc yes no)
   511  (NZ (LessThanU cc) yes no) -> (ULT cc yes no)
   512  (NZ (LessEqual cc) yes no) -> (LE cc yes no)
   513  (NZ (LessEqualU cc) yes no) -> (ULE cc yes no)
   514  (NZ (GreaterThan cc) yes no) -> (GT cc yes no)
   515  (NZ (GreaterThanU cc) yes no) -> (UGT cc yes no)
   516  (NZ (GreaterEqual cc) yes no) -> (GE cc yes no)
   517  (NZ (GreaterEqualU cc) yes no) -> (UGE cc yes no)
   518  
   519  (EQ (CMPconst [0] x) yes no) -> (Z x yes no)
   520  (NE (CMPconst [0] x) yes no) -> (NZ x yes no)
   521  (EQ (CMPWconst [0] x) yes no) -> (ZW x yes no)
   522  (NE (CMPWconst [0] x) yes no) -> (NZW x yes no)
   523  
   524  // fold offset into address
   525  (ADDconst [off1] (MOVDaddr [off2] {sym} ptr)) -> (MOVDaddr [off1+off2] {sym} ptr)
   526  
   527  // fold address into load/store
   528  (MOVBload [off1] {sym} (ADDconst [off2] ptr) mem) && fitsARM64Offset(off1+off2, 1, sym) ->
   529  	(MOVBload [off1+off2] {sym} ptr mem)
   530  (MOVBUload [off1] {sym} (ADDconst [off2] ptr) mem) && fitsARM64Offset(off1+off2, 1, sym) ->
   531  	(MOVBUload [off1+off2] {sym} ptr mem)
   532  (MOVHload [off1] {sym} (ADDconst [off2] ptr) mem) && fitsARM64Offset(off1+off2, 2, sym) ->
   533  	(MOVHload [off1+off2] {sym} ptr mem)
   534  (MOVHUload [off1] {sym} (ADDconst [off2] ptr) mem) && fitsARM64Offset(off1+off2, 2, sym) ->
   535  	(MOVHUload [off1+off2] {sym} ptr mem)
   536  (MOVWload [off1] {sym} (ADDconst [off2] ptr) mem) && fitsARM64Offset(off1+off2, 4, sym) ->
   537  	(MOVWload [off1+off2] {sym} ptr mem)
   538  (MOVWUload [off1] {sym} (ADDconst [off2] ptr) mem) && fitsARM64Offset(off1+off2, 4, sym) ->
   539  	(MOVWUload [off1+off2] {sym} ptr mem)
   540  (MOVDload [off1] {sym} (ADDconst [off2] ptr) mem) && fitsARM64Offset(off1+off2, 8, sym) ->
   541  	(MOVDload [off1+off2] {sym} ptr mem)
   542  (FMOVSload [off1] {sym} (ADDconst [off2] ptr) mem) && fitsARM64Offset(off1+off2, 4, sym) ->
   543  	(FMOVSload [off1+off2] {sym} ptr mem)
   544  (FMOVDload [off1] {sym} (ADDconst [off2] ptr) mem) && fitsARM64Offset(off1+off2, 8, sym) ->
   545  	(FMOVDload [off1+off2] {sym} ptr mem)
   546  
   547  (MOVBstore [off1] {sym} (ADDconst [off2] ptr) val mem) && fitsARM64Offset(off1+off2, 1, sym) ->
   548  	(MOVBstore [off1+off2] {sym} ptr val mem)
   549  (MOVHstore [off1] {sym} (ADDconst [off2] ptr) val mem) && fitsARM64Offset(off1+off2, 2, sym) ->
   550  	(MOVHstore [off1+off2] {sym} ptr val mem)
   551  (MOVWstore [off1] {sym} (ADDconst [off2] ptr) val mem) && fitsARM64Offset(off1+off2, 4, sym) ->
   552  	(MOVWstore [off1+off2] {sym} ptr val mem)
   553  (MOVDstore [off1] {sym} (ADDconst [off2] ptr) val mem) && fitsARM64Offset(off1+off2, 8, sym) ->
   554  	(MOVDstore [off1+off2] {sym} ptr val mem)
   555  (FMOVSstore [off1] {sym} (ADDconst [off2] ptr) val mem) && fitsARM64Offset(off1+off2, 4, sym) ->
   556  	(FMOVSstore [off1+off2] {sym} ptr val mem)
   557  (FMOVDstore [off1] {sym} (ADDconst [off2] ptr) val mem) && fitsARM64Offset(off1+off2, 8, sym) ->
   558  	(FMOVDstore [off1+off2] {sym} ptr val mem)
   559  (MOVBstorezero [off1] {sym} (ADDconst [off2] ptr) mem) && fitsARM64Offset(off1+off2, 1, sym) ->
   560  	(MOVBstorezero [off1+off2] {sym} ptr mem)
   561  (MOVHstorezero [off1] {sym} (ADDconst [off2] ptr) mem) && fitsARM64Offset(off1+off2, 2, sym) ->
   562  	(MOVHstorezero [off1+off2] {sym} ptr mem)
   563  (MOVWstorezero [off1] {sym} (ADDconst [off2] ptr) mem) && fitsARM64Offset(off1+off2, 4, sym) ->
   564  	(MOVWstorezero [off1+off2] {sym} ptr mem)
   565  (MOVDstorezero [off1] {sym} (ADDconst [off2] ptr) mem) && fitsARM64Offset(off1+off2, 8, sym) ->
   566  	(MOVDstorezero [off1+off2] {sym} ptr mem)
   567  
   568  (MOVBload [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem)
   569  	&& canMergeSym(sym1,sym2) && fitsARM64Offset(off1+off2, 1, mergeSym(sym1, sym2)) ->
   570  	(MOVBload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
   571  (MOVBUload [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem)
   572  	&& canMergeSym(sym1,sym2) && fitsARM64Offset(off1+off2, 1, mergeSym(sym1, sym2)) ->
   573  	(MOVBUload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
   574  (MOVHload [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem)
   575  	&& canMergeSym(sym1,sym2) && fitsARM64Offset(off1+off2, 2, mergeSym(sym1, sym2)) ->
   576  	(MOVHload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
   577  (MOVHUload [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem)
   578  	&& canMergeSym(sym1,sym2) && fitsARM64Offset(off1+off2, 2, mergeSym(sym1, sym2)) ->
   579  	(MOVHUload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
   580  (MOVWload [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem)
   581  	&& canMergeSym(sym1,sym2) && fitsARM64Offset(off1+off2, 4, mergeSym(sym1, sym2)) ->
   582  	(MOVWload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
   583  (MOVWUload [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem)
   584  	&& canMergeSym(sym1,sym2) && fitsARM64Offset(off1+off2, 4, mergeSym(sym1, sym2)) ->
   585  	(MOVWUload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
   586  (MOVDload [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem)
   587  	&& canMergeSym(sym1,sym2) && fitsARM64Offset(off1+off2, 8, mergeSym(sym1, sym2)) ->
   588  	(MOVDload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
   589  (FMOVSload [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem)
   590  	&& canMergeSym(sym1,sym2) && fitsARM64Offset(off1+off2, 4, mergeSym(sym1, sym2)) ->
   591  	(FMOVSload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
   592  (FMOVDload [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem)
   593  	&& canMergeSym(sym1,sym2) && fitsARM64Offset(off1+off2, 8, mergeSym(sym1, sym2)) ->
   594  	(FMOVDload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
   595  
   596  (MOVBstore [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) val mem)
   597  	&& canMergeSym(sym1,sym2) && fitsARM64Offset(off1+off2, 1, mergeSym(sym1, sym2)) ->
   598  	(MOVBstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem)
   599  (MOVHstore [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) val mem)
   600  	&& canMergeSym(sym1,sym2) && fitsARM64Offset(off1+off2, 2, mergeSym(sym1, sym2)) ->
   601  	(MOVHstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem)
   602  (MOVWstore [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) val mem)
   603  	&& canMergeSym(sym1,sym2) && fitsARM64Offset(off1+off2, 4, mergeSym(sym1, sym2)) ->
   604  	(MOVWstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem)
   605  (MOVDstore [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) val mem)
   606  	&& canMergeSym(sym1,sym2) && fitsARM64Offset(off1+off2, 8, mergeSym(sym1, sym2)) ->
   607  	(MOVDstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem)
   608  (FMOVSstore [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) val mem)
   609  	&& canMergeSym(sym1,sym2) && fitsARM64Offset(off1+off2, 4, mergeSym(sym1, sym2)) ->
   610  	(FMOVSstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem)
   611  (FMOVDstore [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) val mem)
   612  	&& canMergeSym(sym1,sym2) && fitsARM64Offset(off1+off2, 8, mergeSym(sym1, sym2)) ->
   613  	(FMOVDstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem)
   614  (MOVBstorezero [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem)
   615  	&& canMergeSym(sym1,sym2) && fitsARM64Offset(off1+off2, 1, mergeSym(sym1, sym2)) ->
   616  	(MOVBstorezero [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
   617  (MOVHstorezero [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem)
   618  	&& canMergeSym(sym1,sym2) && fitsARM64Offset(off1+off2, 2, mergeSym(sym1, sym2)) ->
   619  	(MOVHstorezero [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
   620  (MOVWstorezero [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem)
   621  	&& canMergeSym(sym1,sym2) && fitsARM64Offset(off1+off2, 4, mergeSym(sym1, sym2)) ->
   622  	(MOVWstorezero [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
   623  (MOVDstorezero [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem)
   624  	&& canMergeSym(sym1,sym2) && fitsARM64Offset(off1+off2, 8, mergeSym(sym1, sym2)) ->
   625  	(MOVDstorezero [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
   626  
   627  // store zero
   628  (MOVBstore [off] {sym} ptr (MOVDconst [0]) mem) -> (MOVBstorezero [off] {sym} ptr mem)
   629  (MOVHstore [off] {sym} ptr (MOVDconst [0]) mem) -> (MOVHstorezero [off] {sym} ptr mem)
   630  (MOVWstore [off] {sym} ptr (MOVDconst [0]) mem) -> (MOVWstorezero [off] {sym} ptr mem)
   631  (MOVDstore [off] {sym} ptr (MOVDconst [0]) mem) -> (MOVDstorezero [off] {sym} ptr mem)
   632  
   633  // replace load from same location as preceding store with copy
   634  // these seem to have bad interaction with other rules, resulting in slower code
   635  //(MOVBload [off] {sym} ptr (MOVBstore [off2] {sym2} ptr2 x _)) && sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) -> x
   636  //(MOVBUload [off] {sym} ptr (MOVBstore [off2] {sym2} ptr2 x _)) && sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) -> x
   637  //(MOVHload [off] {sym} ptr (MOVHstore [off2] {sym2} ptr2 x _)) && sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) -> x
   638  //(MOVHUload [off] {sym} ptr (MOVHstore [off2] {sym2} ptr2 x _)) && sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) -> x
   639  //(MOVWload [off] {sym} ptr (MOVWstore [off2] {sym2} ptr2 x _)) && sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) -> x
   640  //(MOVWUload [off] {sym} ptr (MOVWstore [off2] {sym2} ptr2 x _)) && sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) -> x
   641  //(MOVDload [off] {sym} ptr (MOVDstore [off2] {sym2} ptr2 x _)) && sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) -> x
   642  //(FMOVSload [off] {sym} ptr (FMOVSstore [off2] {sym2} ptr2 x _)) && sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) -> x
   643  //(FMOVDload [off] {sym} ptr (FMOVDstore [off2] {sym2} ptr2 x _)) && sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) -> x
   644  
   645  (MOVBload [off] {sym} ptr (MOVBstorezero [off2] {sym2} ptr2 _)) && sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) -> (MOVDconst [0])
   646  (MOVBUload [off] {sym} ptr (MOVBstorezero [off2] {sym2} ptr2 _)) && sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) -> (MOVDconst [0])
   647  (MOVHload [off] {sym} ptr (MOVHstorezero [off2] {sym2} ptr2 _)) && sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) -> (MOVDconst [0])
   648  (MOVHUload [off] {sym} ptr (MOVHstorezero [off2] {sym2} ptr2 _)) && sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) -> (MOVDconst [0])
   649  (MOVWload [off] {sym} ptr (MOVWstorezero [off2] {sym2} ptr2 _)) && sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) -> (MOVDconst [0])
   650  (MOVWUload [off] {sym} ptr (MOVWstorezero [off2] {sym2} ptr2 _)) && sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) -> (MOVDconst [0])
   651  (MOVDload [off] {sym} ptr (MOVDstorezero [off2] {sym2} ptr2 _)) && sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) -> (MOVDconst [0])
   652  
   653  // don't extend after proper load
   654  (MOVBreg x:(MOVBload _ _)) -> (MOVDreg x)
   655  (MOVBUreg x:(MOVBUload _ _)) -> (MOVDreg x)
   656  (MOVHreg x:(MOVBload _ _)) -> (MOVDreg x)
   657  (MOVHreg x:(MOVBUload _ _)) -> (MOVDreg x)
   658  (MOVHreg x:(MOVHload _ _)) -> (MOVDreg x)
   659  (MOVHUreg x:(MOVBUload _ _)) -> (MOVDreg x)
   660  (MOVHUreg x:(MOVHUload _ _)) -> (MOVDreg x)
   661  (MOVWreg x:(MOVBload _ _)) -> (MOVDreg x)
   662  (MOVWreg x:(MOVBUload _ _)) -> (MOVDreg x)
   663  (MOVWreg x:(MOVHload _ _)) -> (MOVDreg x)
   664  (MOVWreg x:(MOVHUload _ _)) -> (MOVDreg x)
   665  (MOVWreg x:(MOVWload _ _)) -> (MOVDreg x)
   666  (MOVWUreg x:(MOVBUload _ _)) -> (MOVDreg x)
   667  (MOVWUreg x:(MOVHUload _ _)) -> (MOVDreg x)
   668  (MOVWUreg x:(MOVWUload _ _)) -> (MOVDreg x)
   669  
   670  // fold double extensions
   671  (MOVBreg x:(MOVBreg _)) -> (MOVDreg x)
   672  (MOVBUreg x:(MOVBUreg _)) -> (MOVDreg x)
   673  (MOVHreg x:(MOVBreg _)) -> (MOVDreg x)
   674  (MOVHreg x:(MOVBUreg _)) -> (MOVDreg x)
   675  (MOVHreg x:(MOVHreg _)) -> (MOVDreg x)
   676  (MOVHUreg x:(MOVBUreg _)) -> (MOVDreg x)
   677  (MOVHUreg x:(MOVHUreg _)) -> (MOVDreg x)
   678  (MOVWreg x:(MOVBreg _)) -> (MOVDreg x)
   679  (MOVWreg x:(MOVBUreg _)) -> (MOVDreg x)
   680  (MOVWreg x:(MOVHreg _)) -> (MOVDreg x)
   681  (MOVWreg x:(MOVHreg _)) -> (MOVDreg x)
   682  (MOVWreg x:(MOVWreg _)) -> (MOVDreg x)
   683  (MOVWUreg x:(MOVBUreg _)) -> (MOVDreg x)
   684  (MOVWUreg x:(MOVHUreg _)) -> (MOVDreg x)
   685  (MOVWUreg x:(MOVWUreg _)) -> (MOVDreg x)
   686  
   687  // don't extend before store
   688  (MOVBstore [off] {sym} ptr (MOVBreg x) mem) -> (MOVBstore [off] {sym} ptr x mem)
   689  (MOVBstore [off] {sym} ptr (MOVBUreg x) mem) -> (MOVBstore [off] {sym} ptr x mem)
   690  (MOVBstore [off] {sym} ptr (MOVHreg x) mem) -> (MOVBstore [off] {sym} ptr x mem)
   691  (MOVBstore [off] {sym} ptr (MOVHUreg x) mem) -> (MOVBstore [off] {sym} ptr x mem)
   692  (MOVBstore [off] {sym} ptr (MOVWreg x) mem) -> (MOVBstore [off] {sym} ptr x mem)
   693  (MOVBstore [off] {sym} ptr (MOVWUreg x) mem) -> (MOVBstore [off] {sym} ptr x mem)
   694  (MOVHstore [off] {sym} ptr (MOVHreg x) mem) -> (MOVHstore [off] {sym} ptr x mem)
   695  (MOVHstore [off] {sym} ptr (MOVHUreg x) mem) -> (MOVHstore [off] {sym} ptr x mem)
   696  (MOVHstore [off] {sym} ptr (MOVWreg x) mem) -> (MOVHstore [off] {sym} ptr x mem)
   697  (MOVHstore [off] {sym} ptr (MOVWUreg x) mem) -> (MOVHstore [off] {sym} ptr x mem)
   698  (MOVWstore [off] {sym} ptr (MOVWreg x) mem) -> (MOVWstore [off] {sym} ptr x mem)
   699  (MOVWstore [off] {sym} ptr (MOVWUreg x) mem) -> (MOVWstore [off] {sym} ptr x mem)
   700  
   701  // if a register move has only 1 use, just use the same register without emitting instruction
   702  // MOVDnop doesn't emit instruction, only for ensuring the type.
   703  (MOVDreg x) && x.Uses == 1 -> (MOVDnop x)
   704  
   705  // fold constant into arithmatic ops
   706  (ADD x (MOVDconst [c])) -> (ADDconst [c] x)
   707  (SUB x (MOVDconst [c])) -> (SUBconst [c] x)
   708  (AND x (MOVDconst [c])) -> (ANDconst [c] x)
   709  (OR  x (MOVDconst [c])) -> (ORconst  [c] x)
   710  (XOR x (MOVDconst [c])) -> (XORconst [c] x)
   711  (BIC x (MOVDconst [c])) -> (BICconst [c] x)
   712  
   713  (SLL x (MOVDconst [c])) -> (SLLconst x [c&63]) // Note: I don't think we ever generate bad constant shifts (i.e. c>=64)
   714  (SRL x (MOVDconst [c])) -> (SRLconst x [c&63])
   715  (SRA x (MOVDconst [c])) -> (SRAconst x [c&63])
   716  
   717  (CMP x (MOVDconst [c])) -> (CMPconst [c] x)
   718  (CMP (MOVDconst [c]) x) -> (InvertFlags (CMPconst [c] x))
   719  (CMPW x (MOVDconst [c])) -> (CMPWconst [int64(int32(c))] x)
   720  (CMPW (MOVDconst [c]) x) -> (InvertFlags (CMPWconst [int64(int32(c))] x))
   721  
   722  // mul by constant
   723  (MUL x (MOVDconst [-1])) -> (NEG x)
   724  (MUL _ (MOVDconst [0])) -> (MOVDconst [0])
   725  (MUL x (MOVDconst [1])) -> x
   726  (MUL x (MOVDconst [c])) && isPowerOfTwo(c) -> (SLLconst [log2(c)] x)
   727  (MUL x (MOVDconst [c])) && isPowerOfTwo(c-1) && c >= 3 -> (ADDshiftLL x x [log2(c-1)])
   728  (MUL x (MOVDconst [c])) && isPowerOfTwo(c+1) && c >= 7 -> (ADDshiftLL (NEG <x.Type> x) x [log2(c+1)])
   729  (MUL x (MOVDconst [c])) && c%3 == 0 && isPowerOfTwo(c/3) -> (SLLconst [log2(c/3)] (ADDshiftLL <x.Type> x x [1]))
   730  (MUL x (MOVDconst [c])) && c%5 == 0 && isPowerOfTwo(c/5) -> (SLLconst [log2(c/5)] (ADDshiftLL <x.Type> x x [2]))
   731  (MUL x (MOVDconst [c])) && c%7 == 0 && isPowerOfTwo(c/7) -> (SLLconst [log2(c/7)] (ADDshiftLL <x.Type> (NEG <x.Type> x) x [3]))
   732  (MUL x (MOVDconst [c])) && c%9 == 0 && isPowerOfTwo(c/9) -> (SLLconst [log2(c/9)] (ADDshiftLL <x.Type> x x [3]))
   733  
   734  (MULW x (MOVDconst [c])) && int32(c)==-1 -> (NEG x)
   735  (MULW _ (MOVDconst [c])) && int32(c)==0 -> (MOVDconst [0])
   736  (MULW x (MOVDconst [c])) && int32(c)==1 -> x
   737  (MULW x (MOVDconst [c])) && isPowerOfTwo(c) -> (SLLconst [log2(c)] x)
   738  (MULW x (MOVDconst [c])) && isPowerOfTwo(c-1) && int32(c) >= 3 -> (ADDshiftLL x x [log2(c-1)])
   739  (MULW x (MOVDconst [c])) && isPowerOfTwo(c+1) && int32(c) >= 7 -> (ADDshiftLL (NEG <x.Type> x) x [log2(c+1)])
   740  (MULW x (MOVDconst [c])) && c%3 == 0 && isPowerOfTwo(c/3) && is32Bit(c) -> (SLLconst [log2(c/3)] (ADDshiftLL <x.Type> x x [1]))
   741  (MULW x (MOVDconst [c])) && c%5 == 0 && isPowerOfTwo(c/5) && is32Bit(c) -> (SLLconst [log2(c/5)] (ADDshiftLL <x.Type> x x [2]))
   742  (MULW x (MOVDconst [c])) && c%7 == 0 && isPowerOfTwo(c/7) && is32Bit(c) -> (SLLconst [log2(c/7)] (ADDshiftLL <x.Type> (NEG <x.Type> x) x [3]))
   743  (MULW x (MOVDconst [c])) && c%9 == 0 && isPowerOfTwo(c/9) && is32Bit(c) -> (SLLconst [log2(c/9)] (ADDshiftLL <x.Type> x x [3]))
   744  
   745  // div by constant
   746  (UDIV x (MOVDconst [1])) -> x
   747  (UDIV x (MOVDconst [c])) && isPowerOfTwo(c) -> (SRLconst [log2(c)] x)
   748  (UDIVW x (MOVDconst [c])) && uint32(c)==1 -> x
   749  (UDIVW x (MOVDconst [c])) && isPowerOfTwo(c) && is32Bit(c) -> (SRLconst [log2(c)] x)
   750  (UMOD _ (MOVDconst [1])) -> (MOVDconst [0])
   751  (UMOD x (MOVDconst [c])) && isPowerOfTwo(c) -> (ANDconst [c-1] x)
   752  (UMODW _ (MOVDconst [c])) && uint32(c)==1 -> (MOVDconst [0])
   753  (UMODW x (MOVDconst [c])) && isPowerOfTwo(c) && is32Bit(c) -> (ANDconst [c-1] x)
   754  
   755  // generic simplifications
   756  (ADD x (NEG y)) -> (SUB x y)
   757  (SUB x x) -> (MOVDconst [0])
   758  (AND x x) -> x
   759  (OR  x x) -> x
   760  (XOR x x) -> (MOVDconst [0])
   761  (BIC x x) -> (MOVDconst [0])
   762  (AND x (MVN y)) -> (BIC x y)
   763  (CSELULT x (MOVDconst [0]) flag) -> (CSELULT0 x flag)
   764  (SUB x (SUB y z)) -> (SUB (ADD <v.Type> x z) y)
   765  (SUB (SUB x y) z) -> (SUB x (ADD <y.Type> y z))
   766  
   767  // remove redundant *const ops
   768  (ADDconst [0]  x) -> x
   769  (SUBconst [0]  x) -> x
   770  (ANDconst [0]  _) -> (MOVDconst [0])
   771  (ANDconst [-1] x) -> x
   772  (ORconst  [0]  x) -> x
   773  (ORconst  [-1] _) -> (MOVDconst [-1])
   774  (XORconst [0]  x) -> x
   775  (XORconst [-1] x) -> (MVN x)
   776  (BICconst [0]  x) -> x
   777  (BICconst [-1] _) -> (MOVDconst [0])
   778  
   779  // generic constant folding
   780  (ADDconst [c] (MOVDconst [d]))  -> (MOVDconst [c+d])
   781  (ADDconst [c] (ADDconst [d] x)) -> (ADDconst [c+d] x)
   782  (ADDconst [c] (SUBconst [d] x)) -> (ADDconst [c-d] x)
   783  (SUBconst [c] (MOVDconst [d]))  -> (MOVDconst [d-c])
   784  (SUBconst [c] (SUBconst [d] x)) -> (ADDconst [-c-d] x)
   785  (SUBconst [c] (ADDconst [d] x)) -> (ADDconst [-c+d] x)
   786  (SLLconst [c] (MOVDconst [d]))  -> (MOVDconst [int64(d)<<uint64(c)])
   787  (SRLconst [c] (MOVDconst [d]))  -> (MOVDconst [int64(uint64(d)>>uint64(c))])
   788  (SRAconst [c] (MOVDconst [d]))  -> (MOVDconst [int64(d)>>uint64(c)])
   789  (MUL   (MOVDconst [c]) (MOVDconst [d])) -> (MOVDconst [c*d])
   790  (MULW  (MOVDconst [c]) (MOVDconst [d])) -> (MOVDconst [int64(int32(c)*int32(d))])
   791  (DIV   (MOVDconst [c]) (MOVDconst [d])) -> (MOVDconst [int64(c)/int64(d)])
   792  (UDIV  (MOVDconst [c]) (MOVDconst [d])) -> (MOVDconst [int64(uint64(c)/uint64(d))])
   793  (DIVW  (MOVDconst [c]) (MOVDconst [d])) -> (MOVDconst [int64(int32(c)/int32(d))])
   794  (UDIVW (MOVDconst [c]) (MOVDconst [d])) -> (MOVDconst [int64(uint32(c)/uint32(d))])
   795  (MOD   (MOVDconst [c]) (MOVDconst [d])) -> (MOVDconst [int64(c)%int64(d)])
   796  (UMOD  (MOVDconst [c]) (MOVDconst [d])) -> (MOVDconst [int64(uint64(c)%uint64(d))])
   797  (MODW  (MOVDconst [c]) (MOVDconst [d])) -> (MOVDconst [int64(int32(c)%int32(d))])
   798  (UMODW (MOVDconst [c]) (MOVDconst [d])) -> (MOVDconst [int64(uint32(c)%uint32(d))])
   799  (ANDconst [c] (MOVDconst [d]))  -> (MOVDconst [c&d])
   800  (ANDconst [c] (ANDconst [d] x)) -> (ANDconst [c&d] x)
   801  (ORconst  [c] (MOVDconst [d]))  -> (MOVDconst [c|d])
   802  (ORconst  [c] (ORconst [d] x))  -> (ORconst [c|d] x)
   803  (XORconst [c] (MOVDconst [d]))  -> (MOVDconst [c^d])
   804  (XORconst [c] (XORconst [d] x)) -> (XORconst [c^d] x)
   805  (BICconst [c] (MOVDconst [d]))  -> (MOVDconst [d&^c])
   806  (MVN (MOVDconst [c])) -> (MOVDconst [^c])
   807  (NEG (MOVDconst [c])) -> (MOVDconst [-c])
   808  (MOVBreg  (MOVDconst [c])) -> (MOVDconst [int64(int8(c))])
   809  (MOVBUreg (MOVDconst [c])) -> (MOVDconst [int64(uint8(c))])
   810  (MOVHreg  (MOVDconst [c])) -> (MOVDconst [int64(int16(c))])
   811  (MOVHUreg (MOVDconst [c])) -> (MOVDconst [int64(uint16(c))])
   812  (MOVWreg  (MOVDconst [c])) -> (MOVDconst [int64(int32(c))])
   813  (MOVWUreg (MOVDconst [c])) -> (MOVDconst [int64(uint32(c))])
   814  (MOVDreg  (MOVDconst [c])) -> (MOVDconst [c])
   815  
   816  // constant comparisons
   817  (CMPconst  (MOVDconst [x]) [y]) && x==y -> (FlagEQ)
   818  (CMPconst  (MOVDconst [x]) [y]) && int64(x)<int64(y) && uint64(x)<uint64(y) -> (FlagLT_ULT)
   819  (CMPconst  (MOVDconst [x]) [y]) && int64(x)<int64(y) && uint64(x)>uint64(y) -> (FlagLT_UGT)
   820  (CMPconst  (MOVDconst [x]) [y]) && int64(x)>int64(y) && uint64(x)<uint64(y) -> (FlagGT_ULT)
   821  (CMPconst  (MOVDconst [x]) [y]) && int64(x)>int64(y) && uint64(x)>uint64(y) -> (FlagGT_UGT)
   822  (CMPWconst (MOVDconst [x]) [y]) && int32(x)==int32(y) -> (FlagEQ)
   823  (CMPWconst (MOVDconst [x]) [y]) && int32(x)<int32(y) && uint32(x)<uint32(y) -> (FlagLT_ULT)
   824  (CMPWconst (MOVDconst [x]) [y]) && int32(x)<int32(y) && uint32(x)>uint32(y) -> (FlagLT_UGT)
   825  (CMPWconst (MOVDconst [x]) [y]) && int32(x)>int32(y) && uint32(x)<uint32(y) -> (FlagGT_ULT)
   826  (CMPWconst (MOVDconst [x]) [y]) && int32(x)>int32(y) && uint32(x)>uint32(y) -> (FlagGT_UGT)
   827  
   828  // other known comparisons
   829  (CMPconst (MOVBUreg _) [c]) && 0xff < c -> (FlagLT_ULT)
   830  (CMPconst (MOVHUreg _) [c]) && 0xffff < c -> (FlagLT_ULT)
   831  (CMPconst (MOVWUreg _) [c]) && 0xffffffff < c -> (FlagLT_ULT)
   832  (CMPconst (ANDconst _ [m]) [n]) && 0 <= m && m < n -> (FlagLT_ULT)
   833  (CMPconst (SRLconst _ [c]) [n]) && 0 <= n && 0 < c && c <= 63 && (1<<uint64(64-c)) <= uint64(n) -> (FlagLT_ULT)
   834  (CMPWconst (MOVBUreg _) [c]) && 0xff < int32(c) -> (FlagLT_ULT)
   835  (CMPWconst (MOVHUreg _) [c]) && 0xffff < int32(c) -> (FlagLT_ULT)
   836  
   837  // absorb flag constants into branches
   838  (EQ (FlagEQ) yes no) -> (First nil yes no)
   839  (EQ (FlagLT_ULT) yes no) -> (First nil no yes)
   840  (EQ (FlagLT_UGT) yes no) -> (First nil no yes)
   841  (EQ (FlagGT_ULT) yes no) -> (First nil no yes)
   842  (EQ (FlagGT_UGT) yes no) -> (First nil no yes)
   843  
   844  (NE (FlagEQ) yes no) -> (First nil no yes)
   845  (NE (FlagLT_ULT) yes no) -> (First nil yes no)
   846  (NE (FlagLT_UGT) yes no) -> (First nil yes no)
   847  (NE (FlagGT_ULT) yes no) -> (First nil yes no)
   848  (NE (FlagGT_UGT) yes no) -> (First nil yes no)
   849  
   850  (LT (FlagEQ) yes no) -> (First nil no yes)
   851  (LT (FlagLT_ULT) yes no) -> (First nil yes no)
   852  (LT (FlagLT_UGT) yes no) -> (First nil yes no)
   853  (LT (FlagGT_ULT) yes no) -> (First nil no yes)
   854  (LT (FlagGT_UGT) yes no) -> (First nil no yes)
   855  
   856  (LE (FlagEQ) yes no) -> (First nil yes no)
   857  (LE (FlagLT_ULT) yes no) -> (First nil yes no)
   858  (LE (FlagLT_UGT) yes no) -> (First nil yes no)
   859  (LE (FlagGT_ULT) yes no) -> (First nil no yes)
   860  (LE (FlagGT_UGT) yes no) -> (First nil no yes)
   861  
   862  (GT (FlagEQ) yes no) -> (First nil no yes)
   863  (GT (FlagLT_ULT) yes no) -> (First nil no yes)
   864  (GT (FlagLT_UGT) yes no) -> (First nil no yes)
   865  (GT (FlagGT_ULT) yes no) -> (First nil yes no)
   866  (GT (FlagGT_UGT) yes no) -> (First nil yes no)
   867  
   868  (GE (FlagEQ) yes no) -> (First nil yes no)
   869  (GE (FlagLT_ULT) yes no) -> (First nil no yes)
   870  (GE (FlagLT_UGT) yes no) -> (First nil no yes)
   871  (GE (FlagGT_ULT) yes no) -> (First nil yes no)
   872  (GE (FlagGT_UGT) yes no) -> (First nil yes no)
   873  
   874  (ULT (FlagEQ) yes no) -> (First nil no yes)
   875  (ULT (FlagLT_ULT) yes no) -> (First nil yes no)
   876  (ULT (FlagLT_UGT) yes no) -> (First nil no yes)
   877  (ULT (FlagGT_ULT) yes no) -> (First nil yes no)
   878  (ULT (FlagGT_UGT) yes no) -> (First nil no yes)
   879  
   880  (ULE (FlagEQ) yes no) -> (First nil yes no)
   881  (ULE (FlagLT_ULT) yes no) -> (First nil yes no)
   882  (ULE (FlagLT_UGT) yes no) -> (First nil no yes)
   883  (ULE (FlagGT_ULT) yes no) -> (First nil yes no)
   884  (ULE (FlagGT_UGT) yes no) -> (First nil no yes)
   885  
   886  (UGT (FlagEQ) yes no) -> (First nil no yes)
   887  (UGT (FlagLT_ULT) yes no) -> (First nil no yes)
   888  (UGT (FlagLT_UGT) yes no) -> (First nil yes no)
   889  (UGT (FlagGT_ULT) yes no) -> (First nil no yes)
   890  (UGT (FlagGT_UGT) yes no) -> (First nil yes no)
   891  
   892  (UGE (FlagEQ) yes no) -> (First nil yes no)
   893  (UGE (FlagLT_ULT) yes no) -> (First nil no yes)
   894  (UGE (FlagLT_UGT) yes no) -> (First nil yes no)
   895  (UGE (FlagGT_ULT) yes no) -> (First nil no yes)
   896  (UGE (FlagGT_UGT) yes no) -> (First nil yes no)
   897  
   898  (Z (MOVDconst [0]) yes no) -> (First nil yes no)
   899  (Z (MOVDconst [c]) yes no) && c != 0 -> (First nil no yes)
   900  (NZ (MOVDconst [0]) yes no) -> (First nil no yes)
   901  (NZ (MOVDconst [c]) yes no) && c != 0 -> (First nil yes no)
   902  (ZW (MOVDconst [c]) yes no) && int32(c) == 0 -> (First nil yes no)
   903  (ZW (MOVDconst [c]) yes no) && int32(c) != 0 -> (First nil no yes)
   904  (NZW (MOVDconst [c]) yes no) && int32(c) == 0 -> (First nil no yes)
   905  (NZW (MOVDconst [c]) yes no) && int32(c) != 0 -> (First nil yes no)
   906  
   907  // absorb InvertFlags into branches
   908  (LT (InvertFlags cmp) yes no) -> (GT cmp yes no)
   909  (GT (InvertFlags cmp) yes no) -> (LT cmp yes no)
   910  (LE (InvertFlags cmp) yes no) -> (GE cmp yes no)
   911  (GE (InvertFlags cmp) yes no) -> (LE cmp yes no)
   912  (ULT (InvertFlags cmp) yes no) -> (UGT cmp yes no)
   913  (UGT (InvertFlags cmp) yes no) -> (ULT cmp yes no)
   914  (ULE (InvertFlags cmp) yes no) -> (UGE cmp yes no)
   915  (UGE (InvertFlags cmp) yes no) -> (ULE cmp yes no)
   916  (EQ (InvertFlags cmp) yes no) -> (EQ cmp yes no)
   917  (NE (InvertFlags cmp) yes no) -> (NE cmp yes no)
   918  
   919  // absorb flag constants into boolean values
   920  (Equal (FlagEQ)) -> (MOVDconst [1])
   921  (Equal (FlagLT_ULT)) -> (MOVDconst [0])
   922  (Equal (FlagLT_UGT)) -> (MOVDconst [0])
   923  (Equal (FlagGT_ULT)) -> (MOVDconst [0])
   924  (Equal (FlagGT_UGT)) -> (MOVDconst [0])
   925  
   926  (NotEqual (FlagEQ)) -> (MOVDconst [0])
   927  (NotEqual (FlagLT_ULT)) -> (MOVDconst [1])
   928  (NotEqual (FlagLT_UGT)) -> (MOVDconst [1])
   929  (NotEqual (FlagGT_ULT)) -> (MOVDconst [1])
   930  (NotEqual (FlagGT_UGT)) -> (MOVDconst [1])
   931  
   932  (LessThan (FlagEQ)) -> (MOVDconst [0])
   933  (LessThan (FlagLT_ULT)) -> (MOVDconst [1])
   934  (LessThan (FlagLT_UGT)) -> (MOVDconst [1])
   935  (LessThan (FlagGT_ULT)) -> (MOVDconst [0])
   936  (LessThan (FlagGT_UGT)) -> (MOVDconst [0])
   937  
   938  (LessThanU (FlagEQ)) -> (MOVDconst [0])
   939  (LessThanU (FlagLT_ULT)) -> (MOVDconst [1])
   940  (LessThanU (FlagLT_UGT)) -> (MOVDconst [0])
   941  (LessThanU (FlagGT_ULT)) -> (MOVDconst [1])
   942  (LessThanU (FlagGT_UGT)) -> (MOVDconst [0])
   943  
   944  (LessEqual (FlagEQ)) -> (MOVDconst [1])
   945  (LessEqual (FlagLT_ULT)) -> (MOVDconst [1])
   946  (LessEqual (FlagLT_UGT)) -> (MOVDconst [1])
   947  (LessEqual (FlagGT_ULT)) -> (MOVDconst [0])
   948  (LessEqual (FlagGT_UGT)) -> (MOVDconst [0])
   949  
   950  (LessEqualU (FlagEQ)) -> (MOVDconst [1])
   951  (LessEqualU (FlagLT_ULT)) -> (MOVDconst [1])
   952  (LessEqualU (FlagLT_UGT)) -> (MOVDconst [0])
   953  (LessEqualU (FlagGT_ULT)) -> (MOVDconst [1])
   954  (LessEqualU (FlagGT_UGT)) -> (MOVDconst [0])
   955  
   956  (GreaterThan (FlagEQ)) -> (MOVDconst [0])
   957  (GreaterThan (FlagLT_ULT)) -> (MOVDconst [0])
   958  (GreaterThan (FlagLT_UGT)) -> (MOVDconst [0])
   959  (GreaterThan (FlagGT_ULT)) -> (MOVDconst [1])
   960  (GreaterThan (FlagGT_UGT)) -> (MOVDconst [1])
   961  
   962  (GreaterThanU (FlagEQ)) -> (MOVDconst [0])
   963  (GreaterThanU (FlagLT_ULT)) -> (MOVDconst [0])
   964  (GreaterThanU (FlagLT_UGT)) -> (MOVDconst [1])
   965  (GreaterThanU (FlagGT_ULT)) -> (MOVDconst [0])
   966  (GreaterThanU (FlagGT_UGT)) -> (MOVDconst [1])
   967  
   968  (GreaterEqual (FlagEQ)) -> (MOVDconst [1])
   969  (GreaterEqual (FlagLT_ULT)) -> (MOVDconst [0])
   970  (GreaterEqual (FlagLT_UGT)) -> (MOVDconst [0])
   971  (GreaterEqual (FlagGT_ULT)) -> (MOVDconst [1])
   972  (GreaterEqual (FlagGT_UGT)) -> (MOVDconst [1])
   973  
   974  (GreaterEqualU (FlagEQ)) -> (MOVDconst [1])
   975  (GreaterEqualU (FlagLT_ULT)) -> (MOVDconst [0])
   976  (GreaterEqualU (FlagLT_UGT)) -> (MOVDconst [1])
   977  (GreaterEqualU (FlagGT_ULT)) -> (MOVDconst [0])
   978  (GreaterEqualU (FlagGT_UGT)) -> (MOVDconst [1])
   979  
   980  // absorb InvertFlags into boolean values
   981  (Equal (InvertFlags x)) -> (Equal x)
   982  (NotEqual (InvertFlags x)) -> (NotEqual x)
   983  (LessThan (InvertFlags x)) -> (GreaterThan x)
   984  (LessThanU (InvertFlags x)) -> (GreaterThanU x)
   985  (GreaterThan (InvertFlags x)) -> (LessThan x)
   986  (GreaterThanU (InvertFlags x)) -> (LessThanU x)
   987  (LessEqual (InvertFlags x)) -> (GreaterEqual x)
   988  (LessEqualU (InvertFlags x)) -> (GreaterEqualU x)
   989  (GreaterEqual (InvertFlags x)) -> (LessEqual x)
   990  (GreaterEqualU (InvertFlags x)) -> (LessEqualU x)
   991  
   992  // absorb flag constants into conditional instructions
   993  (CSELULT _ y (FlagEQ)) -> y
   994  (CSELULT x _ (FlagLT_ULT)) -> x
   995  (CSELULT _ y (FlagLT_UGT)) -> y
   996  (CSELULT x _ (FlagGT_ULT)) -> x
   997  (CSELULT _ y (FlagGT_UGT)) -> y
   998  (CSELULT0 _ (FlagEQ)) -> (MOVDconst [0])
   999  (CSELULT0 x (FlagLT_ULT)) -> x
  1000  (CSELULT0 _ (FlagLT_UGT)) -> (MOVDconst [0])
  1001  (CSELULT0 x (FlagGT_ULT)) -> x
  1002  (CSELULT0 _ (FlagGT_UGT)) -> (MOVDconst [0])
  1003  
  1004  // absorb shifts into ops
  1005  (ADD x (SLLconst [c] y)) -> (ADDshiftLL x y [c])
  1006  (ADD x (SRLconst [c] y)) -> (ADDshiftRL x y [c])
  1007  (ADD x (SRAconst [c] y)) -> (ADDshiftRA x y [c])
  1008  (SUB x (SLLconst [c] y)) -> (SUBshiftLL x y [c])
  1009  (SUB x (SRLconst [c] y)) -> (SUBshiftRL x y [c])
  1010  (SUB x (SRAconst [c] y)) -> (SUBshiftRA x y [c])
  1011  (AND x (SLLconst [c] y)) -> (ANDshiftLL x y [c])
  1012  (AND x (SRLconst [c] y)) -> (ANDshiftRL x y [c])
  1013  (AND x (SRAconst [c] y)) -> (ANDshiftRA x y [c])
  1014  (OR  x (SLLconst [c] y)) -> (ORshiftLL  x y [c]) // useful for combined load
  1015  (OR  x (SRLconst [c] y)) -> (ORshiftRL  x y [c])
  1016  (OR  x (SRAconst [c] y)) -> (ORshiftRA  x y [c])
  1017  (XOR x (SLLconst [c] y)) -> (XORshiftLL x y [c])
  1018  (XOR x (SRLconst [c] y)) -> (XORshiftRL x y [c])
  1019  (XOR x (SRAconst [c] y)) -> (XORshiftRA x y [c])
  1020  (BIC x (SLLconst [c] y)) -> (BICshiftLL x y [c])
  1021  (BIC x (SRLconst [c] y)) -> (BICshiftRL x y [c])
  1022  (BIC x (SRAconst [c] y)) -> (BICshiftRA x y [c])
  1023  (CMP x (SLLconst [c] y)) -> (CMPshiftLL x y [c])
  1024  (CMP (SLLconst [c] y) x) -> (InvertFlags (CMPshiftLL x y [c]))
  1025  (CMP x (SRLconst [c] y)) -> (CMPshiftRL x y [c])
  1026  (CMP (SRLconst [c] y) x) -> (InvertFlags (CMPshiftRL x y [c]))
  1027  (CMP x (SRAconst [c] y)) -> (CMPshiftRA x y [c])
  1028  (CMP (SRAconst [c] y) x) -> (InvertFlags (CMPshiftRA x y [c]))
  1029  
  1030  // prefer *const ops to *shift ops
  1031  (ADDshiftLL (MOVDconst [c]) x [d]) -> (ADDconst [c] (SLLconst <x.Type> x [d]))
  1032  (ADDshiftRL (MOVDconst [c]) x [d]) -> (ADDconst [c] (SRLconst <x.Type> x [d]))
  1033  (ADDshiftRA (MOVDconst [c]) x [d]) -> (ADDconst [c] (SRAconst <x.Type> x [d]))
  1034  (ANDshiftLL (MOVDconst [c]) x [d]) -> (ANDconst [c] (SLLconst <x.Type> x [d]))
  1035  (ANDshiftRL (MOVDconst [c]) x [d]) -> (ANDconst [c] (SRLconst <x.Type> x [d]))
  1036  (ANDshiftRA (MOVDconst [c]) x [d]) -> (ANDconst [c] (SRAconst <x.Type> x [d]))
  1037  (ORshiftLL  (MOVDconst [c]) x [d]) -> (ORconst  [c] (SLLconst <x.Type> x [d]))
  1038  (ORshiftRL  (MOVDconst [c]) x [d]) -> (ORconst  [c] (SRLconst <x.Type> x [d]))
  1039  (ORshiftRA  (MOVDconst [c]) x [d]) -> (ORconst  [c] (SRAconst <x.Type> x [d]))
  1040  (XORshiftLL (MOVDconst [c]) x [d]) -> (XORconst [c] (SLLconst <x.Type> x [d]))
  1041  (XORshiftRL (MOVDconst [c]) x [d]) -> (XORconst [c] (SRLconst <x.Type> x [d]))
  1042  (XORshiftRA (MOVDconst [c]) x [d]) -> (XORconst [c] (SRAconst <x.Type> x [d]))
  1043  (CMPshiftLL (MOVDconst [c]) x [d]) -> (InvertFlags (CMPconst [c] (SLLconst <x.Type> x [d])))
  1044  (CMPshiftRL (MOVDconst [c]) x [d]) -> (InvertFlags (CMPconst [c] (SRLconst <x.Type> x [d])))
  1045  (CMPshiftRA (MOVDconst [c]) x [d]) -> (InvertFlags (CMPconst [c] (SRAconst <x.Type> x [d])))
  1046  
  1047  // constant folding in *shift ops
  1048  (ADDshiftLL x (MOVDconst [c]) [d]) -> (ADDconst x [int64(uint64(c)<<uint64(d))])
  1049  (ADDshiftRL x (MOVDconst [c]) [d]) -> (ADDconst x [int64(uint64(c)>>uint64(d))])
  1050  (ADDshiftRA x (MOVDconst [c]) [d]) -> (ADDconst x [int64(int64(c)>>uint64(d))])
  1051  (SUBshiftLL x (MOVDconst [c]) [d]) -> (SUBconst x [int64(uint64(c)<<uint64(d))])
  1052  (SUBshiftRL x (MOVDconst [c]) [d]) -> (SUBconst x [int64(uint64(c)>>uint64(d))])
  1053  (SUBshiftRA x (MOVDconst [c]) [d]) -> (SUBconst x [int64(int64(c)>>uint64(d))])
  1054  (ANDshiftLL x (MOVDconst [c]) [d]) -> (ANDconst x [int64(uint64(c)<<uint64(d))])
  1055  (ANDshiftRL x (MOVDconst [c]) [d]) -> (ANDconst x [int64(uint64(c)>>uint64(d))])
  1056  (ANDshiftRA x (MOVDconst [c]) [d]) -> (ANDconst x [int64(int64(c)>>uint64(d))])
  1057  (ORshiftLL  x (MOVDconst [c]) [d]) -> (ORconst  x [int64(uint64(c)<<uint64(d))])
  1058  (ORshiftRL  x (MOVDconst [c]) [d]) -> (ORconst  x [int64(uint64(c)>>uint64(d))])
  1059  (ORshiftRA  x (MOVDconst [c]) [d]) -> (ORconst  x [int64(int64(c)>>uint64(d))])
  1060  (XORshiftLL x (MOVDconst [c]) [d]) -> (XORconst x [int64(uint64(c)<<uint64(d))])
  1061  (XORshiftRL x (MOVDconst [c]) [d]) -> (XORconst x [int64(uint64(c)>>uint64(d))])
  1062  (XORshiftRA x (MOVDconst [c]) [d]) -> (XORconst x [int64(int64(c)>>uint64(d))])
  1063  (BICshiftLL x (MOVDconst [c]) [d]) -> (BICconst x [int64(uint64(c)<<uint64(d))])
  1064  (BICshiftRL x (MOVDconst [c]) [d]) -> (BICconst x [int64(uint64(c)>>uint64(d))])
  1065  (BICshiftRA x (MOVDconst [c]) [d]) -> (BICconst x [int64(int64(c)>>uint64(d))])
  1066  (CMPshiftLL x (MOVDconst [c]) [d]) -> (CMPconst x [int64(uint64(c)<<uint64(d))])
  1067  (CMPshiftRL x (MOVDconst [c]) [d]) -> (CMPconst x [int64(uint64(c)>>uint64(d))])
  1068  (CMPshiftRA x (MOVDconst [c]) [d]) -> (CMPconst x [int64(int64(c)>>uint64(d))])
  1069  
  1070  // simplification with *shift ops
  1071  (SUBshiftLL x (SLLconst x [c]) [d]) && c==d -> (MOVDconst [0])
  1072  (SUBshiftRL x (SRLconst x [c]) [d]) && c==d -> (MOVDconst [0])
  1073  (SUBshiftRA x (SRAconst x [c]) [d]) && c==d -> (MOVDconst [0])
  1074  (ANDshiftLL x y:(SLLconst x [c]) [d]) && c==d -> y
  1075  (ANDshiftRL x y:(SRLconst x [c]) [d]) && c==d -> y
  1076  (ANDshiftRA x y:(SRAconst x [c]) [d]) && c==d -> y
  1077  (ORshiftLL  x y:(SLLconst x [c]) [d]) && c==d -> y
  1078  (ORshiftRL  x y:(SRLconst x [c]) [d]) && c==d -> y
  1079  (ORshiftRA  x y:(SRAconst x [c]) [d]) && c==d -> y
  1080  (XORshiftLL x (SLLconst x [c]) [d]) && c==d -> (MOVDconst [0])
  1081  (XORshiftRL x (SRLconst x [c]) [d]) && c==d -> (MOVDconst [0])
  1082  (XORshiftRA x (SRAconst x [c]) [d]) && c==d -> (MOVDconst [0])
  1083  (BICshiftLL x (SLLconst x [c]) [d]) && c==d -> (MOVDconst [0])
  1084  (BICshiftRL x (SRLconst x [c]) [d]) && c==d -> (MOVDconst [0])
  1085  (BICshiftRA x (SRAconst x [c]) [d]) && c==d -> (MOVDconst [0])
  1086  
  1087  // Generate rotates
  1088  (ADDshiftLL [c] (SRLconst x [64-c]) x) -> (RORconst [64-c] x)
  1089  ( ORshiftLL [c] (SRLconst x [64-c]) x) -> (RORconst [64-c] x)
  1090  (XORshiftLL [c] (SRLconst x [64-c]) x) -> (RORconst [64-c] x)
  1091  (ADDshiftRL [c] (SLLconst x [64-c]) x) -> (RORconst [   c] x)
  1092  ( ORshiftRL [c] (SLLconst x [64-c]) x) -> (RORconst [   c] x)
  1093  (XORshiftRL [c] (SLLconst x [64-c]) x) -> (RORconst [   c] x)
  1094  
  1095  (ADDshiftLL <t> [c] (SRLconst (MOVWUreg x) [32-c]) x) && c < 32 && t.Size() == 4 -> (RORWconst [32-c] x)
  1096  ( ORshiftLL <t> [c] (SRLconst (MOVWUreg x) [32-c]) x) && c < 32 && t.Size() == 4 -> (RORWconst [32-c] x)
  1097  (XORshiftLL <t> [c] (SRLconst (MOVWUreg x) [32-c]) x) && c < 32 && t.Size() == 4 -> (RORWconst [32-c] x)
  1098  (ADDshiftRL <t> [c] (SLLconst x [32-c]) (MOVWUreg x)) && c < 32 && t.Size() == 4 -> (RORWconst [   c] x)
  1099  ( ORshiftRL <t> [c] (SLLconst x [32-c]) (MOVWUreg x)) && c < 32 && t.Size() == 4 -> (RORWconst [   c] x)
  1100  (XORshiftRL <t> [c] (SLLconst x [32-c]) (MOVWUreg x)) && c < 32 && t.Size() == 4 -> (RORWconst [   c] x)
  1101  
  1102  // Generic rules rewrite certain AND to a pair of shifts.
  1103  // However, on ARM64 the bitmask can fit into an instruction.
  1104  // Rewrite it back to AND.
  1105  (SRLconst [c] (SLLconst [c] x)) && 0 < c && c < 64 -> (ANDconst [1<<uint(64-c)-1] x) // mask out high bits
  1106  (SLLconst [c] (SRLconst [c] x)) && 0 < c && c < 64 -> (ANDconst [^(1<<uint(c)-1)] x) // mask out low bits
  1107  
  1108  // do combined loads
  1109  // little endian loads
  1110  // b[0] | b[1]<<8 -> load 16-bit
  1111  (ORshiftLL <t> [8]
  1112  	y0:(MOVDnop x0:(MOVBUload [i0] {s} p mem))
  1113  	y1:(MOVDnop x1:(MOVBUload [i1] {s} p mem)))
  1114  	&& i1 == i0+1
  1115  	&& x0.Uses == 1 && x1.Uses == 1
  1116  	&& y0.Uses == 1 && y1.Uses == 1
  1117  	&& mergePoint(b,x0,x1) != nil
  1118  	&& clobber(x0) && clobber(x1)
  1119  	&& clobber(y0) && clobber(y1)
  1120  	-> @mergePoint(b,x0,x1) (MOVHUload <t> {s} (OffPtr <p.Type> [i0] p) mem)
  1121  
  1122  // b[0] | b[1]<<8 | b[2]<<16 | b[3]<<24 -> load 32-bit
  1123  (ORshiftLL <t> [24] o0:(ORshiftLL [16]
  1124  	            x0:(MOVHUload [i0] {s} p mem)
  1125  	y1:(MOVDnop x1:(MOVBUload [i2] {s} p mem)))
  1126  	y2:(MOVDnop x2:(MOVBUload [i3] {s} p mem)))
  1127  	&& i2 == i0+2
  1128  	&& i3 == i0+3
  1129  	&& x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1
  1130  	&& y1.Uses == 1 && y2.Uses == 1
  1131  	&& o0.Uses == 1
  1132  	&& mergePoint(b,x0,x1,x2) != nil
  1133  	&& clobber(x0) && clobber(x1) && clobber(x2)
  1134  	&& clobber(y1) && clobber(y2)
  1135  	&& clobber(o0)
  1136  	-> @mergePoint(b,x0,x1,x2) (MOVWUload <t> {s} (OffPtr <p.Type> [i0] p) mem)
  1137  
  1138  // b[0] | b[1]<<8 | b[2]<<16 | b[3]<<24 | b[4]<<32 | b[5]<<40 | b[6]<<48 | b[7]<<56 -> load 64-bit
  1139  (ORshiftLL <t> [56] o0:(ORshiftLL [48] o1:(ORshiftLL [40] o2:(ORshiftLL [32]
  1140  	            x0:(MOVWUload [i0] {s} p mem)
  1141  	y1:(MOVDnop x1:(MOVBUload [i4] {s} p mem)))
  1142  	y2:(MOVDnop x2:(MOVBUload [i5] {s} p mem)))
  1143  	y3:(MOVDnop x3:(MOVBUload [i6] {s} p mem)))
  1144  	y4:(MOVDnop x4:(MOVBUload [i7] {s} p mem)))
  1145  	&& i4 == i0+4
  1146  	&& i5 == i0+5
  1147  	&& i6 == i0+6
  1148  	&& i7 == i0+7
  1149  	&& x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1
  1150  	&& y1.Uses == 1 && y2.Uses == 1 && y3.Uses == 1 && y4.Uses == 1
  1151  	&& o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1
  1152  	&& mergePoint(b,x0,x1,x2,x3,x4) != nil
  1153  	&& clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4)
  1154  	&& clobber(y1) && clobber(y2) && clobber(y3) && clobber(y4)
  1155  	&& clobber(o0) && clobber(o1) && clobber(o2)
  1156  	-> @mergePoint(b,x0,x1,x2,x3,x4) (MOVDload <t> {s} (OffPtr <p.Type> [i0] p) mem)
  1157  
  1158  // b[3]<<24 | b[2]<<16 | b[1]<<8 | b[0] -> load 32-bit
  1159  (OR <t> o0:(ORshiftLL [8] o1:(ORshiftLL [16] s0:(SLLconst [24]
  1160  	y0:(MOVDnop x0:(MOVBUload [i3] {s} p mem)))
  1161  	y1:(MOVDnop x1:(MOVBUload [i2] {s} p mem)))
  1162  	y2:(MOVDnop x2:(MOVBUload [i1] {s} p mem)))
  1163  	y3:(MOVDnop x3:(MOVBUload [i0] {s} p mem)))
  1164  	&& i1 == i0+1
  1165  	&& i2 == i0+2
  1166  	&& i3 == i0+3
  1167  	&& x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1
  1168  	&& y0.Uses == 1 && y1.Uses == 1 && y2.Uses == 1 && y3.Uses == 1
  1169  	&& o0.Uses == 1 && o1.Uses == 1 && s0.Uses == 1
  1170  	&& mergePoint(b,x0,x1,x2,x3) != nil
  1171  	&& clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3)
  1172  	&& clobber(y0) && clobber(y1) && clobber(y2) && clobber(y3)
  1173  	&& clobber(o0) && clobber(o1) && clobber(s0)
  1174  	-> @mergePoint(b,x0,x1,x2,x3) (MOVWUload <t> {s} (OffPtr <p.Type> [i0] p) mem)
  1175  
  1176  // b[7]<<56 | b[6]<<48 | b[5]<<40 | b[4]<<32 | b[3]<<24 | b[2]<<16 | b[1]<<8 | b[0] -> load 64-bit, reverse
  1177  (OR <t> o0:(ORshiftLL [8] o1:(ORshiftLL [16] o2:(ORshiftLL [24] o3:(ORshiftLL [32] o4:(ORshiftLL [40] o5:(ORshiftLL [48] s0:(SLLconst [56]
  1178  	y0:(MOVDnop x0:(MOVBUload [i7] {s} p mem)))
  1179  	y1:(MOVDnop x1:(MOVBUload [i6] {s} p mem)))
  1180  	y2:(MOVDnop x2:(MOVBUload [i5] {s} p mem)))
  1181  	y3:(MOVDnop x3:(MOVBUload [i4] {s} p mem)))
  1182  	y4:(MOVDnop x4:(MOVBUload [i3] {s} p mem)))
  1183  	y5:(MOVDnop x5:(MOVBUload [i2] {s} p mem)))
  1184  	y6:(MOVDnop x6:(MOVBUload [i1] {s} p mem)))
  1185  	y7:(MOVDnop x7:(MOVBUload [i0] {s} p mem)))
  1186  	&& i1 == i0+1
  1187  	&& i2 == i0+2
  1188  	&& i3 == i0+3
  1189  	&& i4 == i0+4
  1190  	&& i5 == i0+5
  1191  	&& i6 == i0+6
  1192  	&& i7 == i0+7
  1193  	&& x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1
  1194  	&& x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && x7.Uses == 1
  1195  	&& y0.Uses == 1 && y1.Uses == 1 && y2.Uses == 1 && y3.Uses == 1
  1196  	&& y4.Uses == 1 && y5.Uses == 1 && y6.Uses == 1 && y7.Uses == 1
  1197  	&& o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1 && o3.Uses == 1
  1198  	&& o4.Uses == 1 && o5.Uses == 1 && s0.Uses == 1
  1199  	&& mergePoint(b,x0,x1,x2,x3,x4,x5,x6,x7) != nil
  1200  	&& clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3)
  1201  	&& clobber(x4) && clobber(x5) && clobber(x6) && clobber(x7)
  1202  	&& clobber(y0) && clobber(y1) && clobber(y2) && clobber(y3)
  1203  	&& clobber(y4) && clobber(y5) && clobber(y6) && clobber(y7)
  1204  	&& clobber(o0) && clobber(o1) && clobber(o2) && clobber(o3)
  1205  	&& clobber(o4) && clobber(o5) && clobber(s0)
  1206  	-> @mergePoint(b,x0,x1,x2,x3,x4,x5,x6,x7) (REV <t> (MOVDload <t> {s} (OffPtr <p.Type> [i0] p) mem))
  1207  
  1208  // big endian loads
  1209  // b[1] | b[0]<<8 -> load 16-bit, reverse
  1210  (ORshiftLL <t> [8]
  1211  	y0:(MOVDnop x0:(MOVBUload [i1] {s} p mem))
  1212  	y1:(MOVDnop x1:(MOVBUload [i0] {s} p mem)))
  1213  	&& i1 == i0+1
  1214  	&& fitsARM64Offset(i0, 2, s)
  1215  	&& x0.Uses == 1 && x1.Uses == 1
  1216  	&& y0.Uses == 1 && y1.Uses == 1
  1217  	&& mergePoint(b,x0,x1) != nil
  1218  	&& clobber(x0) && clobber(x1)
  1219  	&& clobber(y0) && clobber(y1)
  1220  	-> @mergePoint(b,x0,x1) (REV16W <t> (MOVHUload <t> [i0] {s} p mem))
  1221  
  1222  // b[3] | b[2]<<8 | b[1]<<16 | b[0]<<24 -> load 32-bit, reverse
  1223  (ORshiftLL <t> [24] o0:(ORshiftLL [16]
  1224  	y0:(REV16W  x0:(MOVHUload [i2] {s} p mem))
  1225  	y1:(MOVDnop x1:(MOVBUload [i1] {s} p mem)))
  1226  	y2:(MOVDnop x2:(MOVBUload [i0] {s} p mem)))
  1227  	&& i1 == i0+1
  1228  	&& i2 == i0+2
  1229  	&& x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1
  1230  	&& y0.Uses == 1 && y1.Uses == 1 && y2.Uses == 1
  1231  	&& o0.Uses == 1
  1232  	&& mergePoint(b,x0,x1,x2) != nil
  1233  	&& clobber(x0) && clobber(x1) && clobber(x2)
  1234  	&& clobber(y0) && clobber(y1) && clobber(y2)
  1235  	&& clobber(o0)
  1236  	-> @mergePoint(b,x0,x1,x2) (REVW <t> (MOVWUload <t> {s} (OffPtr <p.Type> [i0] p) mem))
  1237  
  1238  // b[7] | b[6]<<8 | b[5]<<16 | b[4]<<24 | b[3]<<32 | b[2]<<40 | b[1]<<48 | b[0]<<56 -> load 64-bit, reverse
  1239  (ORshiftLL <t> [56] o0:(ORshiftLL [48] o1:(ORshiftLL [40] o2:(ORshiftLL [32]
  1240  	y0:(REVW    x0:(MOVWUload [i4] {s} p mem))
  1241  	y1:(MOVDnop x1:(MOVBUload [i3] {s} p mem)))
  1242  	y2:(MOVDnop x2:(MOVBUload [i2] {s} p mem)))
  1243  	y3:(MOVDnop x3:(MOVBUload [i1] {s} p mem)))
  1244  	y4:(MOVDnop x4:(MOVBUload [i0] {s} p mem)))
  1245  	&& i1 == i0+1
  1246  	&& i2 == i0+2
  1247  	&& i3 == i0+3
  1248  	&& i4 == i0+4
  1249  	&& x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1
  1250  	&& y0.Uses == 1 && y1.Uses == 1 && y2.Uses == 1 && y3.Uses == 1 && y4.Uses == 1
  1251  	&& o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1
  1252  	&& mergePoint(b,x0,x1,x2,x3,x4) != nil
  1253  	&& clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4)
  1254  	&& clobber(y0) && clobber(y1) && clobber(y2) && clobber(y3) && clobber(y4)
  1255  	&& clobber(o0) && clobber(o1) && clobber(o2)
  1256  	-> @mergePoint(b,x0,x1,x2,x3,x4) (REV <t> (MOVDload <t> {s} (OffPtr <p.Type> [i0] p) mem))
  1257  
  1258  // b[0]<<24 | b[1]<<16 | b[2]<<8 | b[3] -> load 32-bit, reverse
  1259  (OR <t> o0:(ORshiftLL [8] o1:(ORshiftLL [16] s0:(SLLconst [24]
  1260  	y0:(MOVDnop x0:(MOVBUload [i0] {s} p mem)))
  1261  	y1:(MOVDnop x1:(MOVBUload [i1] {s} p mem)))
  1262  	y2:(MOVDnop x2:(MOVBUload [i2] {s} p mem)))
  1263  	y3:(MOVDnop x3:(MOVBUload [i3] {s} p mem)))
  1264  	&& i1 == i0+1
  1265  	&& i2 == i0+2
  1266  	&& i3 == i0+3
  1267  	&& x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1
  1268  	&& y0.Uses == 1 && y1.Uses == 1 && y2.Uses == 1 && y3.Uses == 1
  1269  	&& o0.Uses == 1 && o1.Uses == 1 && s0.Uses == 1
  1270  	&& mergePoint(b,x0,x1,x2,x3) != nil
  1271  	&& clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3)
  1272  	&& clobber(y0) && clobber(y1) && clobber(y2) && clobber(y3)
  1273  	&& clobber(o0) && clobber(o1) && clobber(s0)
  1274  	-> @mergePoint(b,x0,x1,x2,x3) (REVW <t> (MOVWUload <t> {s} (OffPtr <p.Type> [i0] p) mem))
  1275  
  1276  // b[0]<<56 | b[1]<<48 | b[2]<<40 | b[3]<<32 | b[4]<<24 | b[5]<<16 | b[6]<<8 | b[7] -> load 64-bit, reverse
  1277  (OR <t> o0:(ORshiftLL [8] o1:(ORshiftLL [16] o2:(ORshiftLL [24] o3:(ORshiftLL [32] o4:(ORshiftLL [40] o5:(ORshiftLL [48] s0:(SLLconst [56]
  1278  	y0:(MOVDnop x0:(MOVBUload [i0] {s} p mem)))
  1279  	y1:(MOVDnop x1:(MOVBUload [i1] {s} p mem)))
  1280  	y2:(MOVDnop x2:(MOVBUload [i2] {s} p mem)))
  1281  	y3:(MOVDnop x3:(MOVBUload [i3] {s} p mem)))
  1282  	y4:(MOVDnop x4:(MOVBUload [i4] {s} p mem)))
  1283  	y5:(MOVDnop x5:(MOVBUload [i5] {s} p mem)))
  1284  	y6:(MOVDnop x6:(MOVBUload [i6] {s} p mem)))
  1285  	y7:(MOVDnop x7:(MOVBUload [i7] {s} p mem)))
  1286  	&& i1 == i0+1
  1287  	&& i2 == i0+2
  1288  	&& i3 == i0+3
  1289  	&& i4 == i0+4
  1290  	&& i5 == i0+5
  1291  	&& i6 == i0+6
  1292  	&& i7 == i0+7
  1293  	&& x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1
  1294  	&& x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && x7.Uses == 1
  1295  	&& y0.Uses == 1 && y1.Uses == 1 && y2.Uses == 1 && y3.Uses == 1
  1296  	&& y4.Uses == 1 && y5.Uses == 1 && y6.Uses == 1 && y7.Uses == 1
  1297  	&& o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1 && o3.Uses == 1
  1298  	&& o4.Uses == 1 && o5.Uses == 1 && s0.Uses == 1
  1299  	&& mergePoint(b,x0,x1,x2,x3,x4,x5,x6,x7) != nil
  1300  	&& clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3)
  1301  	&& clobber(x4) && clobber(x5) && clobber(x6) && clobber(x7)
  1302  	&& clobber(y0) && clobber(y1) && clobber(y2) && clobber(y3)
  1303  	&& clobber(y4) && clobber(y5) && clobber(y6) && clobber(y7)
  1304  	&& clobber(o0) && clobber(o1) && clobber(o2) && clobber(o3)
  1305  	&& clobber(o4) && clobber(o5) && clobber(s0)
  1306  	-> @mergePoint(b,x0,x1,x2,x3,x4,x5,x6,x7) (REV <t> (MOVDload <t> {s} (OffPtr <p.Type> [i0] p) mem))