github.com/corona10/go@v0.0.0-20180224231303-7a218942be57/src/cmd/compile/internal/ssa/rewriteAMD64.go (about)

     1  // Code generated from gen/AMD64.rules; DO NOT EDIT.
     2  // generated with: cd gen; go run *.go
     3  
     4  package ssa
     5  
     6  import "math"
     7  import "cmd/internal/obj"
     8  import "cmd/internal/objabi"
     9  import "cmd/compile/internal/types"
    10  
    11  var _ = math.MinInt8  // in case not otherwise used
    12  var _ = obj.ANOP      // in case not otherwise used
    13  var _ = objabi.GOROOT // in case not otherwise used
    14  var _ = types.TypeMem // in case not otherwise used
    15  
    16  func rewriteValueAMD64(v *Value) bool {
    17  	switch v.Op {
    18  	case OpAMD64ADDL:
    19  		return rewriteValueAMD64_OpAMD64ADDL_0(v) || rewriteValueAMD64_OpAMD64ADDL_10(v)
    20  	case OpAMD64ADDLconst:
    21  		return rewriteValueAMD64_OpAMD64ADDLconst_0(v)
    22  	case OpAMD64ADDLconstmem:
    23  		return rewriteValueAMD64_OpAMD64ADDLconstmem_0(v)
    24  	case OpAMD64ADDLmem:
    25  		return rewriteValueAMD64_OpAMD64ADDLmem_0(v)
    26  	case OpAMD64ADDQ:
    27  		return rewriteValueAMD64_OpAMD64ADDQ_0(v) || rewriteValueAMD64_OpAMD64ADDQ_10(v) || rewriteValueAMD64_OpAMD64ADDQ_20(v)
    28  	case OpAMD64ADDQconst:
    29  		return rewriteValueAMD64_OpAMD64ADDQconst_0(v)
    30  	case OpAMD64ADDQconstmem:
    31  		return rewriteValueAMD64_OpAMD64ADDQconstmem_0(v)
    32  	case OpAMD64ADDQmem:
    33  		return rewriteValueAMD64_OpAMD64ADDQmem_0(v)
    34  	case OpAMD64ADDSD:
    35  		return rewriteValueAMD64_OpAMD64ADDSD_0(v)
    36  	case OpAMD64ADDSDmem:
    37  		return rewriteValueAMD64_OpAMD64ADDSDmem_0(v)
    38  	case OpAMD64ADDSS:
    39  		return rewriteValueAMD64_OpAMD64ADDSS_0(v)
    40  	case OpAMD64ADDSSmem:
    41  		return rewriteValueAMD64_OpAMD64ADDSSmem_0(v)
    42  	case OpAMD64ANDL:
    43  		return rewriteValueAMD64_OpAMD64ANDL_0(v)
    44  	case OpAMD64ANDLconst:
    45  		return rewriteValueAMD64_OpAMD64ANDLconst_0(v)
    46  	case OpAMD64ANDLmem:
    47  		return rewriteValueAMD64_OpAMD64ANDLmem_0(v)
    48  	case OpAMD64ANDQ:
    49  		return rewriteValueAMD64_OpAMD64ANDQ_0(v)
    50  	case OpAMD64ANDQconst:
    51  		return rewriteValueAMD64_OpAMD64ANDQconst_0(v)
    52  	case OpAMD64ANDQmem:
    53  		return rewriteValueAMD64_OpAMD64ANDQmem_0(v)
    54  	case OpAMD64BSFQ:
    55  		return rewriteValueAMD64_OpAMD64BSFQ_0(v)
    56  	case OpAMD64BTQconst:
    57  		return rewriteValueAMD64_OpAMD64BTQconst_0(v)
    58  	case OpAMD64CMOVQEQ:
    59  		return rewriteValueAMD64_OpAMD64CMOVQEQ_0(v)
    60  	case OpAMD64CMPB:
    61  		return rewriteValueAMD64_OpAMD64CMPB_0(v)
    62  	case OpAMD64CMPBconst:
    63  		return rewriteValueAMD64_OpAMD64CMPBconst_0(v)
    64  	case OpAMD64CMPL:
    65  		return rewriteValueAMD64_OpAMD64CMPL_0(v)
    66  	case OpAMD64CMPLconst:
    67  		return rewriteValueAMD64_OpAMD64CMPLconst_0(v)
    68  	case OpAMD64CMPQ:
    69  		return rewriteValueAMD64_OpAMD64CMPQ_0(v)
    70  	case OpAMD64CMPQconst:
    71  		return rewriteValueAMD64_OpAMD64CMPQconst_0(v) || rewriteValueAMD64_OpAMD64CMPQconst_10(v)
    72  	case OpAMD64CMPW:
    73  		return rewriteValueAMD64_OpAMD64CMPW_0(v)
    74  	case OpAMD64CMPWconst:
    75  		return rewriteValueAMD64_OpAMD64CMPWconst_0(v)
    76  	case OpAMD64CMPXCHGLlock:
    77  		return rewriteValueAMD64_OpAMD64CMPXCHGLlock_0(v)
    78  	case OpAMD64CMPXCHGQlock:
    79  		return rewriteValueAMD64_OpAMD64CMPXCHGQlock_0(v)
    80  	case OpAMD64LEAL:
    81  		return rewriteValueAMD64_OpAMD64LEAL_0(v)
    82  	case OpAMD64LEAQ:
    83  		return rewriteValueAMD64_OpAMD64LEAQ_0(v)
    84  	case OpAMD64LEAQ1:
    85  		return rewriteValueAMD64_OpAMD64LEAQ1_0(v)
    86  	case OpAMD64LEAQ2:
    87  		return rewriteValueAMD64_OpAMD64LEAQ2_0(v)
    88  	case OpAMD64LEAQ4:
    89  		return rewriteValueAMD64_OpAMD64LEAQ4_0(v)
    90  	case OpAMD64LEAQ8:
    91  		return rewriteValueAMD64_OpAMD64LEAQ8_0(v)
    92  	case OpAMD64MOVBQSX:
    93  		return rewriteValueAMD64_OpAMD64MOVBQSX_0(v)
    94  	case OpAMD64MOVBQSXload:
    95  		return rewriteValueAMD64_OpAMD64MOVBQSXload_0(v)
    96  	case OpAMD64MOVBQZX:
    97  		return rewriteValueAMD64_OpAMD64MOVBQZX_0(v)
    98  	case OpAMD64MOVBload:
    99  		return rewriteValueAMD64_OpAMD64MOVBload_0(v)
   100  	case OpAMD64MOVBloadidx1:
   101  		return rewriteValueAMD64_OpAMD64MOVBloadidx1_0(v)
   102  	case OpAMD64MOVBstore:
   103  		return rewriteValueAMD64_OpAMD64MOVBstore_0(v) || rewriteValueAMD64_OpAMD64MOVBstore_10(v) || rewriteValueAMD64_OpAMD64MOVBstore_20(v)
   104  	case OpAMD64MOVBstoreconst:
   105  		return rewriteValueAMD64_OpAMD64MOVBstoreconst_0(v)
   106  	case OpAMD64MOVBstoreconstidx1:
   107  		return rewriteValueAMD64_OpAMD64MOVBstoreconstidx1_0(v)
   108  	case OpAMD64MOVBstoreidx1:
   109  		return rewriteValueAMD64_OpAMD64MOVBstoreidx1_0(v)
   110  	case OpAMD64MOVLQSX:
   111  		return rewriteValueAMD64_OpAMD64MOVLQSX_0(v)
   112  	case OpAMD64MOVLQSXload:
   113  		return rewriteValueAMD64_OpAMD64MOVLQSXload_0(v)
   114  	case OpAMD64MOVLQZX:
   115  		return rewriteValueAMD64_OpAMD64MOVLQZX_0(v)
   116  	case OpAMD64MOVLatomicload:
   117  		return rewriteValueAMD64_OpAMD64MOVLatomicload_0(v)
   118  	case OpAMD64MOVLf2i:
   119  		return rewriteValueAMD64_OpAMD64MOVLf2i_0(v)
   120  	case OpAMD64MOVLi2f:
   121  		return rewriteValueAMD64_OpAMD64MOVLi2f_0(v)
   122  	case OpAMD64MOVLload:
   123  		return rewriteValueAMD64_OpAMD64MOVLload_0(v)
   124  	case OpAMD64MOVLloadidx1:
   125  		return rewriteValueAMD64_OpAMD64MOVLloadidx1_0(v)
   126  	case OpAMD64MOVLloadidx4:
   127  		return rewriteValueAMD64_OpAMD64MOVLloadidx4_0(v)
   128  	case OpAMD64MOVLloadidx8:
   129  		return rewriteValueAMD64_OpAMD64MOVLloadidx8_0(v)
   130  	case OpAMD64MOVLstore:
   131  		return rewriteValueAMD64_OpAMD64MOVLstore_0(v) || rewriteValueAMD64_OpAMD64MOVLstore_10(v)
   132  	case OpAMD64MOVLstoreconst:
   133  		return rewriteValueAMD64_OpAMD64MOVLstoreconst_0(v)
   134  	case OpAMD64MOVLstoreconstidx1:
   135  		return rewriteValueAMD64_OpAMD64MOVLstoreconstidx1_0(v)
   136  	case OpAMD64MOVLstoreconstidx4:
   137  		return rewriteValueAMD64_OpAMD64MOVLstoreconstidx4_0(v)
   138  	case OpAMD64MOVLstoreidx1:
   139  		return rewriteValueAMD64_OpAMD64MOVLstoreidx1_0(v)
   140  	case OpAMD64MOVLstoreidx4:
   141  		return rewriteValueAMD64_OpAMD64MOVLstoreidx4_0(v)
   142  	case OpAMD64MOVLstoreidx8:
   143  		return rewriteValueAMD64_OpAMD64MOVLstoreidx8_0(v)
   144  	case OpAMD64MOVOload:
   145  		return rewriteValueAMD64_OpAMD64MOVOload_0(v)
   146  	case OpAMD64MOVOstore:
   147  		return rewriteValueAMD64_OpAMD64MOVOstore_0(v)
   148  	case OpAMD64MOVQatomicload:
   149  		return rewriteValueAMD64_OpAMD64MOVQatomicload_0(v)
   150  	case OpAMD64MOVQf2i:
   151  		return rewriteValueAMD64_OpAMD64MOVQf2i_0(v)
   152  	case OpAMD64MOVQi2f:
   153  		return rewriteValueAMD64_OpAMD64MOVQi2f_0(v)
   154  	case OpAMD64MOVQload:
   155  		return rewriteValueAMD64_OpAMD64MOVQload_0(v)
   156  	case OpAMD64MOVQloadidx1:
   157  		return rewriteValueAMD64_OpAMD64MOVQloadidx1_0(v)
   158  	case OpAMD64MOVQloadidx8:
   159  		return rewriteValueAMD64_OpAMD64MOVQloadidx8_0(v)
   160  	case OpAMD64MOVQstore:
   161  		return rewriteValueAMD64_OpAMD64MOVQstore_0(v)
   162  	case OpAMD64MOVQstoreconst:
   163  		return rewriteValueAMD64_OpAMD64MOVQstoreconst_0(v)
   164  	case OpAMD64MOVQstoreconstidx1:
   165  		return rewriteValueAMD64_OpAMD64MOVQstoreconstidx1_0(v)
   166  	case OpAMD64MOVQstoreconstidx8:
   167  		return rewriteValueAMD64_OpAMD64MOVQstoreconstidx8_0(v)
   168  	case OpAMD64MOVQstoreidx1:
   169  		return rewriteValueAMD64_OpAMD64MOVQstoreidx1_0(v)
   170  	case OpAMD64MOVQstoreidx8:
   171  		return rewriteValueAMD64_OpAMD64MOVQstoreidx8_0(v)
   172  	case OpAMD64MOVSDload:
   173  		return rewriteValueAMD64_OpAMD64MOVSDload_0(v)
   174  	case OpAMD64MOVSDloadidx1:
   175  		return rewriteValueAMD64_OpAMD64MOVSDloadidx1_0(v)
   176  	case OpAMD64MOVSDloadidx8:
   177  		return rewriteValueAMD64_OpAMD64MOVSDloadidx8_0(v)
   178  	case OpAMD64MOVSDstore:
   179  		return rewriteValueAMD64_OpAMD64MOVSDstore_0(v)
   180  	case OpAMD64MOVSDstoreidx1:
   181  		return rewriteValueAMD64_OpAMD64MOVSDstoreidx1_0(v)
   182  	case OpAMD64MOVSDstoreidx8:
   183  		return rewriteValueAMD64_OpAMD64MOVSDstoreidx8_0(v)
   184  	case OpAMD64MOVSSload:
   185  		return rewriteValueAMD64_OpAMD64MOVSSload_0(v)
   186  	case OpAMD64MOVSSloadidx1:
   187  		return rewriteValueAMD64_OpAMD64MOVSSloadidx1_0(v)
   188  	case OpAMD64MOVSSloadidx4:
   189  		return rewriteValueAMD64_OpAMD64MOVSSloadidx4_0(v)
   190  	case OpAMD64MOVSSstore:
   191  		return rewriteValueAMD64_OpAMD64MOVSSstore_0(v)
   192  	case OpAMD64MOVSSstoreidx1:
   193  		return rewriteValueAMD64_OpAMD64MOVSSstoreidx1_0(v)
   194  	case OpAMD64MOVSSstoreidx4:
   195  		return rewriteValueAMD64_OpAMD64MOVSSstoreidx4_0(v)
   196  	case OpAMD64MOVWQSX:
   197  		return rewriteValueAMD64_OpAMD64MOVWQSX_0(v)
   198  	case OpAMD64MOVWQSXload:
   199  		return rewriteValueAMD64_OpAMD64MOVWQSXload_0(v)
   200  	case OpAMD64MOVWQZX:
   201  		return rewriteValueAMD64_OpAMD64MOVWQZX_0(v)
   202  	case OpAMD64MOVWload:
   203  		return rewriteValueAMD64_OpAMD64MOVWload_0(v)
   204  	case OpAMD64MOVWloadidx1:
   205  		return rewriteValueAMD64_OpAMD64MOVWloadidx1_0(v)
   206  	case OpAMD64MOVWloadidx2:
   207  		return rewriteValueAMD64_OpAMD64MOVWloadidx2_0(v)
   208  	case OpAMD64MOVWstore:
   209  		return rewriteValueAMD64_OpAMD64MOVWstore_0(v) || rewriteValueAMD64_OpAMD64MOVWstore_10(v)
   210  	case OpAMD64MOVWstoreconst:
   211  		return rewriteValueAMD64_OpAMD64MOVWstoreconst_0(v)
   212  	case OpAMD64MOVWstoreconstidx1:
   213  		return rewriteValueAMD64_OpAMD64MOVWstoreconstidx1_0(v)
   214  	case OpAMD64MOVWstoreconstidx2:
   215  		return rewriteValueAMD64_OpAMD64MOVWstoreconstidx2_0(v)
   216  	case OpAMD64MOVWstoreidx1:
   217  		return rewriteValueAMD64_OpAMD64MOVWstoreidx1_0(v)
   218  	case OpAMD64MOVWstoreidx2:
   219  		return rewriteValueAMD64_OpAMD64MOVWstoreidx2_0(v)
   220  	case OpAMD64MULL:
   221  		return rewriteValueAMD64_OpAMD64MULL_0(v)
   222  	case OpAMD64MULLconst:
   223  		return rewriteValueAMD64_OpAMD64MULLconst_0(v)
   224  	case OpAMD64MULQ:
   225  		return rewriteValueAMD64_OpAMD64MULQ_0(v)
   226  	case OpAMD64MULQconst:
   227  		return rewriteValueAMD64_OpAMD64MULQconst_0(v) || rewriteValueAMD64_OpAMD64MULQconst_10(v) || rewriteValueAMD64_OpAMD64MULQconst_20(v)
   228  	case OpAMD64MULSD:
   229  		return rewriteValueAMD64_OpAMD64MULSD_0(v)
   230  	case OpAMD64MULSDmem:
   231  		return rewriteValueAMD64_OpAMD64MULSDmem_0(v)
   232  	case OpAMD64MULSS:
   233  		return rewriteValueAMD64_OpAMD64MULSS_0(v)
   234  	case OpAMD64MULSSmem:
   235  		return rewriteValueAMD64_OpAMD64MULSSmem_0(v)
   236  	case OpAMD64NEGL:
   237  		return rewriteValueAMD64_OpAMD64NEGL_0(v)
   238  	case OpAMD64NEGQ:
   239  		return rewriteValueAMD64_OpAMD64NEGQ_0(v)
   240  	case OpAMD64NOTL:
   241  		return rewriteValueAMD64_OpAMD64NOTL_0(v)
   242  	case OpAMD64NOTQ:
   243  		return rewriteValueAMD64_OpAMD64NOTQ_0(v)
   244  	case OpAMD64ORL:
   245  		return rewriteValueAMD64_OpAMD64ORL_0(v) || rewriteValueAMD64_OpAMD64ORL_10(v) || rewriteValueAMD64_OpAMD64ORL_20(v) || rewriteValueAMD64_OpAMD64ORL_30(v) || rewriteValueAMD64_OpAMD64ORL_40(v) || rewriteValueAMD64_OpAMD64ORL_50(v) || rewriteValueAMD64_OpAMD64ORL_60(v) || rewriteValueAMD64_OpAMD64ORL_70(v) || rewriteValueAMD64_OpAMD64ORL_80(v) || rewriteValueAMD64_OpAMD64ORL_90(v) || rewriteValueAMD64_OpAMD64ORL_100(v) || rewriteValueAMD64_OpAMD64ORL_110(v) || rewriteValueAMD64_OpAMD64ORL_120(v) || rewriteValueAMD64_OpAMD64ORL_130(v)
   246  	case OpAMD64ORLconst:
   247  		return rewriteValueAMD64_OpAMD64ORLconst_0(v)
   248  	case OpAMD64ORLmem:
   249  		return rewriteValueAMD64_OpAMD64ORLmem_0(v)
   250  	case OpAMD64ORQ:
   251  		return rewriteValueAMD64_OpAMD64ORQ_0(v) || rewriteValueAMD64_OpAMD64ORQ_10(v) || rewriteValueAMD64_OpAMD64ORQ_20(v) || rewriteValueAMD64_OpAMD64ORQ_30(v) || rewriteValueAMD64_OpAMD64ORQ_40(v) || rewriteValueAMD64_OpAMD64ORQ_50(v) || rewriteValueAMD64_OpAMD64ORQ_60(v) || rewriteValueAMD64_OpAMD64ORQ_70(v) || rewriteValueAMD64_OpAMD64ORQ_80(v) || rewriteValueAMD64_OpAMD64ORQ_90(v) || rewriteValueAMD64_OpAMD64ORQ_100(v) || rewriteValueAMD64_OpAMD64ORQ_110(v) || rewriteValueAMD64_OpAMD64ORQ_120(v) || rewriteValueAMD64_OpAMD64ORQ_130(v) || rewriteValueAMD64_OpAMD64ORQ_140(v) || rewriteValueAMD64_OpAMD64ORQ_150(v) || rewriteValueAMD64_OpAMD64ORQ_160(v)
   252  	case OpAMD64ORQconst:
   253  		return rewriteValueAMD64_OpAMD64ORQconst_0(v)
   254  	case OpAMD64ORQmem:
   255  		return rewriteValueAMD64_OpAMD64ORQmem_0(v)
   256  	case OpAMD64ROLB:
   257  		return rewriteValueAMD64_OpAMD64ROLB_0(v)
   258  	case OpAMD64ROLBconst:
   259  		return rewriteValueAMD64_OpAMD64ROLBconst_0(v)
   260  	case OpAMD64ROLL:
   261  		return rewriteValueAMD64_OpAMD64ROLL_0(v)
   262  	case OpAMD64ROLLconst:
   263  		return rewriteValueAMD64_OpAMD64ROLLconst_0(v)
   264  	case OpAMD64ROLQ:
   265  		return rewriteValueAMD64_OpAMD64ROLQ_0(v)
   266  	case OpAMD64ROLQconst:
   267  		return rewriteValueAMD64_OpAMD64ROLQconst_0(v)
   268  	case OpAMD64ROLW:
   269  		return rewriteValueAMD64_OpAMD64ROLW_0(v)
   270  	case OpAMD64ROLWconst:
   271  		return rewriteValueAMD64_OpAMD64ROLWconst_0(v)
   272  	case OpAMD64RORB:
   273  		return rewriteValueAMD64_OpAMD64RORB_0(v)
   274  	case OpAMD64RORL:
   275  		return rewriteValueAMD64_OpAMD64RORL_0(v)
   276  	case OpAMD64RORQ:
   277  		return rewriteValueAMD64_OpAMD64RORQ_0(v)
   278  	case OpAMD64RORW:
   279  		return rewriteValueAMD64_OpAMD64RORW_0(v)
   280  	case OpAMD64SARB:
   281  		return rewriteValueAMD64_OpAMD64SARB_0(v)
   282  	case OpAMD64SARBconst:
   283  		return rewriteValueAMD64_OpAMD64SARBconst_0(v)
   284  	case OpAMD64SARL:
   285  		return rewriteValueAMD64_OpAMD64SARL_0(v)
   286  	case OpAMD64SARLconst:
   287  		return rewriteValueAMD64_OpAMD64SARLconst_0(v)
   288  	case OpAMD64SARQ:
   289  		return rewriteValueAMD64_OpAMD64SARQ_0(v)
   290  	case OpAMD64SARQconst:
   291  		return rewriteValueAMD64_OpAMD64SARQconst_0(v)
   292  	case OpAMD64SARW:
   293  		return rewriteValueAMD64_OpAMD64SARW_0(v)
   294  	case OpAMD64SARWconst:
   295  		return rewriteValueAMD64_OpAMD64SARWconst_0(v)
   296  	case OpAMD64SBBLcarrymask:
   297  		return rewriteValueAMD64_OpAMD64SBBLcarrymask_0(v)
   298  	case OpAMD64SBBQcarrymask:
   299  		return rewriteValueAMD64_OpAMD64SBBQcarrymask_0(v)
   300  	case OpAMD64SETA:
   301  		return rewriteValueAMD64_OpAMD64SETA_0(v)
   302  	case OpAMD64SETAE:
   303  		return rewriteValueAMD64_OpAMD64SETAE_0(v)
   304  	case OpAMD64SETAEmem:
   305  		return rewriteValueAMD64_OpAMD64SETAEmem_0(v)
   306  	case OpAMD64SETAmem:
   307  		return rewriteValueAMD64_OpAMD64SETAmem_0(v)
   308  	case OpAMD64SETB:
   309  		return rewriteValueAMD64_OpAMD64SETB_0(v)
   310  	case OpAMD64SETBE:
   311  		return rewriteValueAMD64_OpAMD64SETBE_0(v)
   312  	case OpAMD64SETBEmem:
   313  		return rewriteValueAMD64_OpAMD64SETBEmem_0(v)
   314  	case OpAMD64SETBmem:
   315  		return rewriteValueAMD64_OpAMD64SETBmem_0(v)
   316  	case OpAMD64SETEQ:
   317  		return rewriteValueAMD64_OpAMD64SETEQ_0(v) || rewriteValueAMD64_OpAMD64SETEQ_10(v)
   318  	case OpAMD64SETEQmem:
   319  		return rewriteValueAMD64_OpAMD64SETEQmem_0(v) || rewriteValueAMD64_OpAMD64SETEQmem_10(v)
   320  	case OpAMD64SETG:
   321  		return rewriteValueAMD64_OpAMD64SETG_0(v)
   322  	case OpAMD64SETGE:
   323  		return rewriteValueAMD64_OpAMD64SETGE_0(v)
   324  	case OpAMD64SETGEmem:
   325  		return rewriteValueAMD64_OpAMD64SETGEmem_0(v)
   326  	case OpAMD64SETGmem:
   327  		return rewriteValueAMD64_OpAMD64SETGmem_0(v)
   328  	case OpAMD64SETL:
   329  		return rewriteValueAMD64_OpAMD64SETL_0(v)
   330  	case OpAMD64SETLE:
   331  		return rewriteValueAMD64_OpAMD64SETLE_0(v)
   332  	case OpAMD64SETLEmem:
   333  		return rewriteValueAMD64_OpAMD64SETLEmem_0(v)
   334  	case OpAMD64SETLmem:
   335  		return rewriteValueAMD64_OpAMD64SETLmem_0(v)
   336  	case OpAMD64SETNE:
   337  		return rewriteValueAMD64_OpAMD64SETNE_0(v) || rewriteValueAMD64_OpAMD64SETNE_10(v)
   338  	case OpAMD64SETNEmem:
   339  		return rewriteValueAMD64_OpAMD64SETNEmem_0(v) || rewriteValueAMD64_OpAMD64SETNEmem_10(v)
   340  	case OpAMD64SHLL:
   341  		return rewriteValueAMD64_OpAMD64SHLL_0(v)
   342  	case OpAMD64SHLLconst:
   343  		return rewriteValueAMD64_OpAMD64SHLLconst_0(v)
   344  	case OpAMD64SHLQ:
   345  		return rewriteValueAMD64_OpAMD64SHLQ_0(v)
   346  	case OpAMD64SHLQconst:
   347  		return rewriteValueAMD64_OpAMD64SHLQconst_0(v)
   348  	case OpAMD64SHRB:
   349  		return rewriteValueAMD64_OpAMD64SHRB_0(v)
   350  	case OpAMD64SHRBconst:
   351  		return rewriteValueAMD64_OpAMD64SHRBconst_0(v)
   352  	case OpAMD64SHRL:
   353  		return rewriteValueAMD64_OpAMD64SHRL_0(v)
   354  	case OpAMD64SHRLconst:
   355  		return rewriteValueAMD64_OpAMD64SHRLconst_0(v)
   356  	case OpAMD64SHRQ:
   357  		return rewriteValueAMD64_OpAMD64SHRQ_0(v)
   358  	case OpAMD64SHRQconst:
   359  		return rewriteValueAMD64_OpAMD64SHRQconst_0(v)
   360  	case OpAMD64SHRW:
   361  		return rewriteValueAMD64_OpAMD64SHRW_0(v)
   362  	case OpAMD64SHRWconst:
   363  		return rewriteValueAMD64_OpAMD64SHRWconst_0(v)
   364  	case OpAMD64SUBL:
   365  		return rewriteValueAMD64_OpAMD64SUBL_0(v)
   366  	case OpAMD64SUBLconst:
   367  		return rewriteValueAMD64_OpAMD64SUBLconst_0(v)
   368  	case OpAMD64SUBLmem:
   369  		return rewriteValueAMD64_OpAMD64SUBLmem_0(v)
   370  	case OpAMD64SUBQ:
   371  		return rewriteValueAMD64_OpAMD64SUBQ_0(v)
   372  	case OpAMD64SUBQconst:
   373  		return rewriteValueAMD64_OpAMD64SUBQconst_0(v)
   374  	case OpAMD64SUBQmem:
   375  		return rewriteValueAMD64_OpAMD64SUBQmem_0(v)
   376  	case OpAMD64SUBSD:
   377  		return rewriteValueAMD64_OpAMD64SUBSD_0(v)
   378  	case OpAMD64SUBSDmem:
   379  		return rewriteValueAMD64_OpAMD64SUBSDmem_0(v)
   380  	case OpAMD64SUBSS:
   381  		return rewriteValueAMD64_OpAMD64SUBSS_0(v)
   382  	case OpAMD64SUBSSmem:
   383  		return rewriteValueAMD64_OpAMD64SUBSSmem_0(v)
   384  	case OpAMD64TESTB:
   385  		return rewriteValueAMD64_OpAMD64TESTB_0(v)
   386  	case OpAMD64TESTL:
   387  		return rewriteValueAMD64_OpAMD64TESTL_0(v)
   388  	case OpAMD64TESTQ:
   389  		return rewriteValueAMD64_OpAMD64TESTQ_0(v)
   390  	case OpAMD64TESTW:
   391  		return rewriteValueAMD64_OpAMD64TESTW_0(v)
   392  	case OpAMD64XADDLlock:
   393  		return rewriteValueAMD64_OpAMD64XADDLlock_0(v)
   394  	case OpAMD64XADDQlock:
   395  		return rewriteValueAMD64_OpAMD64XADDQlock_0(v)
   396  	case OpAMD64XCHGL:
   397  		return rewriteValueAMD64_OpAMD64XCHGL_0(v)
   398  	case OpAMD64XCHGQ:
   399  		return rewriteValueAMD64_OpAMD64XCHGQ_0(v)
   400  	case OpAMD64XORL:
   401  		return rewriteValueAMD64_OpAMD64XORL_0(v) || rewriteValueAMD64_OpAMD64XORL_10(v)
   402  	case OpAMD64XORLconst:
   403  		return rewriteValueAMD64_OpAMD64XORLconst_0(v) || rewriteValueAMD64_OpAMD64XORLconst_10(v)
   404  	case OpAMD64XORLmem:
   405  		return rewriteValueAMD64_OpAMD64XORLmem_0(v)
   406  	case OpAMD64XORQ:
   407  		return rewriteValueAMD64_OpAMD64XORQ_0(v)
   408  	case OpAMD64XORQconst:
   409  		return rewriteValueAMD64_OpAMD64XORQconst_0(v)
   410  	case OpAMD64XORQmem:
   411  		return rewriteValueAMD64_OpAMD64XORQmem_0(v)
   412  	case OpAdd16:
   413  		return rewriteValueAMD64_OpAdd16_0(v)
   414  	case OpAdd32:
   415  		return rewriteValueAMD64_OpAdd32_0(v)
   416  	case OpAdd32F:
   417  		return rewriteValueAMD64_OpAdd32F_0(v)
   418  	case OpAdd64:
   419  		return rewriteValueAMD64_OpAdd64_0(v)
   420  	case OpAdd64F:
   421  		return rewriteValueAMD64_OpAdd64F_0(v)
   422  	case OpAdd8:
   423  		return rewriteValueAMD64_OpAdd8_0(v)
   424  	case OpAddPtr:
   425  		return rewriteValueAMD64_OpAddPtr_0(v)
   426  	case OpAddr:
   427  		return rewriteValueAMD64_OpAddr_0(v)
   428  	case OpAnd16:
   429  		return rewriteValueAMD64_OpAnd16_0(v)
   430  	case OpAnd32:
   431  		return rewriteValueAMD64_OpAnd32_0(v)
   432  	case OpAnd64:
   433  		return rewriteValueAMD64_OpAnd64_0(v)
   434  	case OpAnd8:
   435  		return rewriteValueAMD64_OpAnd8_0(v)
   436  	case OpAndB:
   437  		return rewriteValueAMD64_OpAndB_0(v)
   438  	case OpAtomicAdd32:
   439  		return rewriteValueAMD64_OpAtomicAdd32_0(v)
   440  	case OpAtomicAdd64:
   441  		return rewriteValueAMD64_OpAtomicAdd64_0(v)
   442  	case OpAtomicAnd8:
   443  		return rewriteValueAMD64_OpAtomicAnd8_0(v)
   444  	case OpAtomicCompareAndSwap32:
   445  		return rewriteValueAMD64_OpAtomicCompareAndSwap32_0(v)
   446  	case OpAtomicCompareAndSwap64:
   447  		return rewriteValueAMD64_OpAtomicCompareAndSwap64_0(v)
   448  	case OpAtomicExchange32:
   449  		return rewriteValueAMD64_OpAtomicExchange32_0(v)
   450  	case OpAtomicExchange64:
   451  		return rewriteValueAMD64_OpAtomicExchange64_0(v)
   452  	case OpAtomicLoad32:
   453  		return rewriteValueAMD64_OpAtomicLoad32_0(v)
   454  	case OpAtomicLoad64:
   455  		return rewriteValueAMD64_OpAtomicLoad64_0(v)
   456  	case OpAtomicLoadPtr:
   457  		return rewriteValueAMD64_OpAtomicLoadPtr_0(v)
   458  	case OpAtomicOr8:
   459  		return rewriteValueAMD64_OpAtomicOr8_0(v)
   460  	case OpAtomicStore32:
   461  		return rewriteValueAMD64_OpAtomicStore32_0(v)
   462  	case OpAtomicStore64:
   463  		return rewriteValueAMD64_OpAtomicStore64_0(v)
   464  	case OpAtomicStorePtrNoWB:
   465  		return rewriteValueAMD64_OpAtomicStorePtrNoWB_0(v)
   466  	case OpAvg64u:
   467  		return rewriteValueAMD64_OpAvg64u_0(v)
   468  	case OpBitLen32:
   469  		return rewriteValueAMD64_OpBitLen32_0(v)
   470  	case OpBitLen64:
   471  		return rewriteValueAMD64_OpBitLen64_0(v)
   472  	case OpBswap32:
   473  		return rewriteValueAMD64_OpBswap32_0(v)
   474  	case OpBswap64:
   475  		return rewriteValueAMD64_OpBswap64_0(v)
   476  	case OpCeil:
   477  		return rewriteValueAMD64_OpCeil_0(v)
   478  	case OpClosureCall:
   479  		return rewriteValueAMD64_OpClosureCall_0(v)
   480  	case OpCom16:
   481  		return rewriteValueAMD64_OpCom16_0(v)
   482  	case OpCom32:
   483  		return rewriteValueAMD64_OpCom32_0(v)
   484  	case OpCom64:
   485  		return rewriteValueAMD64_OpCom64_0(v)
   486  	case OpCom8:
   487  		return rewriteValueAMD64_OpCom8_0(v)
   488  	case OpConst16:
   489  		return rewriteValueAMD64_OpConst16_0(v)
   490  	case OpConst32:
   491  		return rewriteValueAMD64_OpConst32_0(v)
   492  	case OpConst32F:
   493  		return rewriteValueAMD64_OpConst32F_0(v)
   494  	case OpConst64:
   495  		return rewriteValueAMD64_OpConst64_0(v)
   496  	case OpConst64F:
   497  		return rewriteValueAMD64_OpConst64F_0(v)
   498  	case OpConst8:
   499  		return rewriteValueAMD64_OpConst8_0(v)
   500  	case OpConstBool:
   501  		return rewriteValueAMD64_OpConstBool_0(v)
   502  	case OpConstNil:
   503  		return rewriteValueAMD64_OpConstNil_0(v)
   504  	case OpConvert:
   505  		return rewriteValueAMD64_OpConvert_0(v)
   506  	case OpCtz32:
   507  		return rewriteValueAMD64_OpCtz32_0(v)
   508  	case OpCtz64:
   509  		return rewriteValueAMD64_OpCtz64_0(v)
   510  	case OpCvt32Fto32:
   511  		return rewriteValueAMD64_OpCvt32Fto32_0(v)
   512  	case OpCvt32Fto64:
   513  		return rewriteValueAMD64_OpCvt32Fto64_0(v)
   514  	case OpCvt32Fto64F:
   515  		return rewriteValueAMD64_OpCvt32Fto64F_0(v)
   516  	case OpCvt32to32F:
   517  		return rewriteValueAMD64_OpCvt32to32F_0(v)
   518  	case OpCvt32to64F:
   519  		return rewriteValueAMD64_OpCvt32to64F_0(v)
   520  	case OpCvt64Fto32:
   521  		return rewriteValueAMD64_OpCvt64Fto32_0(v)
   522  	case OpCvt64Fto32F:
   523  		return rewriteValueAMD64_OpCvt64Fto32F_0(v)
   524  	case OpCvt64Fto64:
   525  		return rewriteValueAMD64_OpCvt64Fto64_0(v)
   526  	case OpCvt64to32F:
   527  		return rewriteValueAMD64_OpCvt64to32F_0(v)
   528  	case OpCvt64to64F:
   529  		return rewriteValueAMD64_OpCvt64to64F_0(v)
   530  	case OpDiv128u:
   531  		return rewriteValueAMD64_OpDiv128u_0(v)
   532  	case OpDiv16:
   533  		return rewriteValueAMD64_OpDiv16_0(v)
   534  	case OpDiv16u:
   535  		return rewriteValueAMD64_OpDiv16u_0(v)
   536  	case OpDiv32:
   537  		return rewriteValueAMD64_OpDiv32_0(v)
   538  	case OpDiv32F:
   539  		return rewriteValueAMD64_OpDiv32F_0(v)
   540  	case OpDiv32u:
   541  		return rewriteValueAMD64_OpDiv32u_0(v)
   542  	case OpDiv64:
   543  		return rewriteValueAMD64_OpDiv64_0(v)
   544  	case OpDiv64F:
   545  		return rewriteValueAMD64_OpDiv64F_0(v)
   546  	case OpDiv64u:
   547  		return rewriteValueAMD64_OpDiv64u_0(v)
   548  	case OpDiv8:
   549  		return rewriteValueAMD64_OpDiv8_0(v)
   550  	case OpDiv8u:
   551  		return rewriteValueAMD64_OpDiv8u_0(v)
   552  	case OpEq16:
   553  		return rewriteValueAMD64_OpEq16_0(v)
   554  	case OpEq32:
   555  		return rewriteValueAMD64_OpEq32_0(v)
   556  	case OpEq32F:
   557  		return rewriteValueAMD64_OpEq32F_0(v)
   558  	case OpEq64:
   559  		return rewriteValueAMD64_OpEq64_0(v)
   560  	case OpEq64F:
   561  		return rewriteValueAMD64_OpEq64F_0(v)
   562  	case OpEq8:
   563  		return rewriteValueAMD64_OpEq8_0(v)
   564  	case OpEqB:
   565  		return rewriteValueAMD64_OpEqB_0(v)
   566  	case OpEqPtr:
   567  		return rewriteValueAMD64_OpEqPtr_0(v)
   568  	case OpFloor:
   569  		return rewriteValueAMD64_OpFloor_0(v)
   570  	case OpGeq16:
   571  		return rewriteValueAMD64_OpGeq16_0(v)
   572  	case OpGeq16U:
   573  		return rewriteValueAMD64_OpGeq16U_0(v)
   574  	case OpGeq32:
   575  		return rewriteValueAMD64_OpGeq32_0(v)
   576  	case OpGeq32F:
   577  		return rewriteValueAMD64_OpGeq32F_0(v)
   578  	case OpGeq32U:
   579  		return rewriteValueAMD64_OpGeq32U_0(v)
   580  	case OpGeq64:
   581  		return rewriteValueAMD64_OpGeq64_0(v)
   582  	case OpGeq64F:
   583  		return rewriteValueAMD64_OpGeq64F_0(v)
   584  	case OpGeq64U:
   585  		return rewriteValueAMD64_OpGeq64U_0(v)
   586  	case OpGeq8:
   587  		return rewriteValueAMD64_OpGeq8_0(v)
   588  	case OpGeq8U:
   589  		return rewriteValueAMD64_OpGeq8U_0(v)
   590  	case OpGetCallerPC:
   591  		return rewriteValueAMD64_OpGetCallerPC_0(v)
   592  	case OpGetCallerSP:
   593  		return rewriteValueAMD64_OpGetCallerSP_0(v)
   594  	case OpGetClosurePtr:
   595  		return rewriteValueAMD64_OpGetClosurePtr_0(v)
   596  	case OpGetG:
   597  		return rewriteValueAMD64_OpGetG_0(v)
   598  	case OpGreater16:
   599  		return rewriteValueAMD64_OpGreater16_0(v)
   600  	case OpGreater16U:
   601  		return rewriteValueAMD64_OpGreater16U_0(v)
   602  	case OpGreater32:
   603  		return rewriteValueAMD64_OpGreater32_0(v)
   604  	case OpGreater32F:
   605  		return rewriteValueAMD64_OpGreater32F_0(v)
   606  	case OpGreater32U:
   607  		return rewriteValueAMD64_OpGreater32U_0(v)
   608  	case OpGreater64:
   609  		return rewriteValueAMD64_OpGreater64_0(v)
   610  	case OpGreater64F:
   611  		return rewriteValueAMD64_OpGreater64F_0(v)
   612  	case OpGreater64U:
   613  		return rewriteValueAMD64_OpGreater64U_0(v)
   614  	case OpGreater8:
   615  		return rewriteValueAMD64_OpGreater8_0(v)
   616  	case OpGreater8U:
   617  		return rewriteValueAMD64_OpGreater8U_0(v)
   618  	case OpHmul32:
   619  		return rewriteValueAMD64_OpHmul32_0(v)
   620  	case OpHmul32u:
   621  		return rewriteValueAMD64_OpHmul32u_0(v)
   622  	case OpHmul64:
   623  		return rewriteValueAMD64_OpHmul64_0(v)
   624  	case OpHmul64u:
   625  		return rewriteValueAMD64_OpHmul64u_0(v)
   626  	case OpInt64Hi:
   627  		return rewriteValueAMD64_OpInt64Hi_0(v)
   628  	case OpInterCall:
   629  		return rewriteValueAMD64_OpInterCall_0(v)
   630  	case OpIsInBounds:
   631  		return rewriteValueAMD64_OpIsInBounds_0(v)
   632  	case OpIsNonNil:
   633  		return rewriteValueAMD64_OpIsNonNil_0(v)
   634  	case OpIsSliceInBounds:
   635  		return rewriteValueAMD64_OpIsSliceInBounds_0(v)
   636  	case OpLeq16:
   637  		return rewriteValueAMD64_OpLeq16_0(v)
   638  	case OpLeq16U:
   639  		return rewriteValueAMD64_OpLeq16U_0(v)
   640  	case OpLeq32:
   641  		return rewriteValueAMD64_OpLeq32_0(v)
   642  	case OpLeq32F:
   643  		return rewriteValueAMD64_OpLeq32F_0(v)
   644  	case OpLeq32U:
   645  		return rewriteValueAMD64_OpLeq32U_0(v)
   646  	case OpLeq64:
   647  		return rewriteValueAMD64_OpLeq64_0(v)
   648  	case OpLeq64F:
   649  		return rewriteValueAMD64_OpLeq64F_0(v)
   650  	case OpLeq64U:
   651  		return rewriteValueAMD64_OpLeq64U_0(v)
   652  	case OpLeq8:
   653  		return rewriteValueAMD64_OpLeq8_0(v)
   654  	case OpLeq8U:
   655  		return rewriteValueAMD64_OpLeq8U_0(v)
   656  	case OpLess16:
   657  		return rewriteValueAMD64_OpLess16_0(v)
   658  	case OpLess16U:
   659  		return rewriteValueAMD64_OpLess16U_0(v)
   660  	case OpLess32:
   661  		return rewriteValueAMD64_OpLess32_0(v)
   662  	case OpLess32F:
   663  		return rewriteValueAMD64_OpLess32F_0(v)
   664  	case OpLess32U:
   665  		return rewriteValueAMD64_OpLess32U_0(v)
   666  	case OpLess64:
   667  		return rewriteValueAMD64_OpLess64_0(v)
   668  	case OpLess64F:
   669  		return rewriteValueAMD64_OpLess64F_0(v)
   670  	case OpLess64U:
   671  		return rewriteValueAMD64_OpLess64U_0(v)
   672  	case OpLess8:
   673  		return rewriteValueAMD64_OpLess8_0(v)
   674  	case OpLess8U:
   675  		return rewriteValueAMD64_OpLess8U_0(v)
   676  	case OpLoad:
   677  		return rewriteValueAMD64_OpLoad_0(v)
   678  	case OpLsh16x16:
   679  		return rewriteValueAMD64_OpLsh16x16_0(v)
   680  	case OpLsh16x32:
   681  		return rewriteValueAMD64_OpLsh16x32_0(v)
   682  	case OpLsh16x64:
   683  		return rewriteValueAMD64_OpLsh16x64_0(v)
   684  	case OpLsh16x8:
   685  		return rewriteValueAMD64_OpLsh16x8_0(v)
   686  	case OpLsh32x16:
   687  		return rewriteValueAMD64_OpLsh32x16_0(v)
   688  	case OpLsh32x32:
   689  		return rewriteValueAMD64_OpLsh32x32_0(v)
   690  	case OpLsh32x64:
   691  		return rewriteValueAMD64_OpLsh32x64_0(v)
   692  	case OpLsh32x8:
   693  		return rewriteValueAMD64_OpLsh32x8_0(v)
   694  	case OpLsh64x16:
   695  		return rewriteValueAMD64_OpLsh64x16_0(v)
   696  	case OpLsh64x32:
   697  		return rewriteValueAMD64_OpLsh64x32_0(v)
   698  	case OpLsh64x64:
   699  		return rewriteValueAMD64_OpLsh64x64_0(v)
   700  	case OpLsh64x8:
   701  		return rewriteValueAMD64_OpLsh64x8_0(v)
   702  	case OpLsh8x16:
   703  		return rewriteValueAMD64_OpLsh8x16_0(v)
   704  	case OpLsh8x32:
   705  		return rewriteValueAMD64_OpLsh8x32_0(v)
   706  	case OpLsh8x64:
   707  		return rewriteValueAMD64_OpLsh8x64_0(v)
   708  	case OpLsh8x8:
   709  		return rewriteValueAMD64_OpLsh8x8_0(v)
   710  	case OpMod16:
   711  		return rewriteValueAMD64_OpMod16_0(v)
   712  	case OpMod16u:
   713  		return rewriteValueAMD64_OpMod16u_0(v)
   714  	case OpMod32:
   715  		return rewriteValueAMD64_OpMod32_0(v)
   716  	case OpMod32u:
   717  		return rewriteValueAMD64_OpMod32u_0(v)
   718  	case OpMod64:
   719  		return rewriteValueAMD64_OpMod64_0(v)
   720  	case OpMod64u:
   721  		return rewriteValueAMD64_OpMod64u_0(v)
   722  	case OpMod8:
   723  		return rewriteValueAMD64_OpMod8_0(v)
   724  	case OpMod8u:
   725  		return rewriteValueAMD64_OpMod8u_0(v)
   726  	case OpMove:
   727  		return rewriteValueAMD64_OpMove_0(v) || rewriteValueAMD64_OpMove_10(v)
   728  	case OpMul16:
   729  		return rewriteValueAMD64_OpMul16_0(v)
   730  	case OpMul32:
   731  		return rewriteValueAMD64_OpMul32_0(v)
   732  	case OpMul32F:
   733  		return rewriteValueAMD64_OpMul32F_0(v)
   734  	case OpMul64:
   735  		return rewriteValueAMD64_OpMul64_0(v)
   736  	case OpMul64F:
   737  		return rewriteValueAMD64_OpMul64F_0(v)
   738  	case OpMul64uhilo:
   739  		return rewriteValueAMD64_OpMul64uhilo_0(v)
   740  	case OpMul8:
   741  		return rewriteValueAMD64_OpMul8_0(v)
   742  	case OpNeg16:
   743  		return rewriteValueAMD64_OpNeg16_0(v)
   744  	case OpNeg32:
   745  		return rewriteValueAMD64_OpNeg32_0(v)
   746  	case OpNeg32F:
   747  		return rewriteValueAMD64_OpNeg32F_0(v)
   748  	case OpNeg64:
   749  		return rewriteValueAMD64_OpNeg64_0(v)
   750  	case OpNeg64F:
   751  		return rewriteValueAMD64_OpNeg64F_0(v)
   752  	case OpNeg8:
   753  		return rewriteValueAMD64_OpNeg8_0(v)
   754  	case OpNeq16:
   755  		return rewriteValueAMD64_OpNeq16_0(v)
   756  	case OpNeq32:
   757  		return rewriteValueAMD64_OpNeq32_0(v)
   758  	case OpNeq32F:
   759  		return rewriteValueAMD64_OpNeq32F_0(v)
   760  	case OpNeq64:
   761  		return rewriteValueAMD64_OpNeq64_0(v)
   762  	case OpNeq64F:
   763  		return rewriteValueAMD64_OpNeq64F_0(v)
   764  	case OpNeq8:
   765  		return rewriteValueAMD64_OpNeq8_0(v)
   766  	case OpNeqB:
   767  		return rewriteValueAMD64_OpNeqB_0(v)
   768  	case OpNeqPtr:
   769  		return rewriteValueAMD64_OpNeqPtr_0(v)
   770  	case OpNilCheck:
   771  		return rewriteValueAMD64_OpNilCheck_0(v)
   772  	case OpNot:
   773  		return rewriteValueAMD64_OpNot_0(v)
   774  	case OpOffPtr:
   775  		return rewriteValueAMD64_OpOffPtr_0(v)
   776  	case OpOr16:
   777  		return rewriteValueAMD64_OpOr16_0(v)
   778  	case OpOr32:
   779  		return rewriteValueAMD64_OpOr32_0(v)
   780  	case OpOr64:
   781  		return rewriteValueAMD64_OpOr64_0(v)
   782  	case OpOr8:
   783  		return rewriteValueAMD64_OpOr8_0(v)
   784  	case OpOrB:
   785  		return rewriteValueAMD64_OpOrB_0(v)
   786  	case OpPopCount16:
   787  		return rewriteValueAMD64_OpPopCount16_0(v)
   788  	case OpPopCount32:
   789  		return rewriteValueAMD64_OpPopCount32_0(v)
   790  	case OpPopCount64:
   791  		return rewriteValueAMD64_OpPopCount64_0(v)
   792  	case OpPopCount8:
   793  		return rewriteValueAMD64_OpPopCount8_0(v)
   794  	case OpRound32F:
   795  		return rewriteValueAMD64_OpRound32F_0(v)
   796  	case OpRound64F:
   797  		return rewriteValueAMD64_OpRound64F_0(v)
   798  	case OpRoundToEven:
   799  		return rewriteValueAMD64_OpRoundToEven_0(v)
   800  	case OpRsh16Ux16:
   801  		return rewriteValueAMD64_OpRsh16Ux16_0(v)
   802  	case OpRsh16Ux32:
   803  		return rewriteValueAMD64_OpRsh16Ux32_0(v)
   804  	case OpRsh16Ux64:
   805  		return rewriteValueAMD64_OpRsh16Ux64_0(v)
   806  	case OpRsh16Ux8:
   807  		return rewriteValueAMD64_OpRsh16Ux8_0(v)
   808  	case OpRsh16x16:
   809  		return rewriteValueAMD64_OpRsh16x16_0(v)
   810  	case OpRsh16x32:
   811  		return rewriteValueAMD64_OpRsh16x32_0(v)
   812  	case OpRsh16x64:
   813  		return rewriteValueAMD64_OpRsh16x64_0(v)
   814  	case OpRsh16x8:
   815  		return rewriteValueAMD64_OpRsh16x8_0(v)
   816  	case OpRsh32Ux16:
   817  		return rewriteValueAMD64_OpRsh32Ux16_0(v)
   818  	case OpRsh32Ux32:
   819  		return rewriteValueAMD64_OpRsh32Ux32_0(v)
   820  	case OpRsh32Ux64:
   821  		return rewriteValueAMD64_OpRsh32Ux64_0(v)
   822  	case OpRsh32Ux8:
   823  		return rewriteValueAMD64_OpRsh32Ux8_0(v)
   824  	case OpRsh32x16:
   825  		return rewriteValueAMD64_OpRsh32x16_0(v)
   826  	case OpRsh32x32:
   827  		return rewriteValueAMD64_OpRsh32x32_0(v)
   828  	case OpRsh32x64:
   829  		return rewriteValueAMD64_OpRsh32x64_0(v)
   830  	case OpRsh32x8:
   831  		return rewriteValueAMD64_OpRsh32x8_0(v)
   832  	case OpRsh64Ux16:
   833  		return rewriteValueAMD64_OpRsh64Ux16_0(v)
   834  	case OpRsh64Ux32:
   835  		return rewriteValueAMD64_OpRsh64Ux32_0(v)
   836  	case OpRsh64Ux64:
   837  		return rewriteValueAMD64_OpRsh64Ux64_0(v)
   838  	case OpRsh64Ux8:
   839  		return rewriteValueAMD64_OpRsh64Ux8_0(v)
   840  	case OpRsh64x16:
   841  		return rewriteValueAMD64_OpRsh64x16_0(v)
   842  	case OpRsh64x32:
   843  		return rewriteValueAMD64_OpRsh64x32_0(v)
   844  	case OpRsh64x64:
   845  		return rewriteValueAMD64_OpRsh64x64_0(v)
   846  	case OpRsh64x8:
   847  		return rewriteValueAMD64_OpRsh64x8_0(v)
   848  	case OpRsh8Ux16:
   849  		return rewriteValueAMD64_OpRsh8Ux16_0(v)
   850  	case OpRsh8Ux32:
   851  		return rewriteValueAMD64_OpRsh8Ux32_0(v)
   852  	case OpRsh8Ux64:
   853  		return rewriteValueAMD64_OpRsh8Ux64_0(v)
   854  	case OpRsh8Ux8:
   855  		return rewriteValueAMD64_OpRsh8Ux8_0(v)
   856  	case OpRsh8x16:
   857  		return rewriteValueAMD64_OpRsh8x16_0(v)
   858  	case OpRsh8x32:
   859  		return rewriteValueAMD64_OpRsh8x32_0(v)
   860  	case OpRsh8x64:
   861  		return rewriteValueAMD64_OpRsh8x64_0(v)
   862  	case OpRsh8x8:
   863  		return rewriteValueAMD64_OpRsh8x8_0(v)
   864  	case OpSelect0:
   865  		return rewriteValueAMD64_OpSelect0_0(v)
   866  	case OpSelect1:
   867  		return rewriteValueAMD64_OpSelect1_0(v)
   868  	case OpSignExt16to32:
   869  		return rewriteValueAMD64_OpSignExt16to32_0(v)
   870  	case OpSignExt16to64:
   871  		return rewriteValueAMD64_OpSignExt16to64_0(v)
   872  	case OpSignExt32to64:
   873  		return rewriteValueAMD64_OpSignExt32to64_0(v)
   874  	case OpSignExt8to16:
   875  		return rewriteValueAMD64_OpSignExt8to16_0(v)
   876  	case OpSignExt8to32:
   877  		return rewriteValueAMD64_OpSignExt8to32_0(v)
   878  	case OpSignExt8to64:
   879  		return rewriteValueAMD64_OpSignExt8to64_0(v)
   880  	case OpSlicemask:
   881  		return rewriteValueAMD64_OpSlicemask_0(v)
   882  	case OpSqrt:
   883  		return rewriteValueAMD64_OpSqrt_0(v)
   884  	case OpStaticCall:
   885  		return rewriteValueAMD64_OpStaticCall_0(v)
   886  	case OpStore:
   887  		return rewriteValueAMD64_OpStore_0(v)
   888  	case OpSub16:
   889  		return rewriteValueAMD64_OpSub16_0(v)
   890  	case OpSub32:
   891  		return rewriteValueAMD64_OpSub32_0(v)
   892  	case OpSub32F:
   893  		return rewriteValueAMD64_OpSub32F_0(v)
   894  	case OpSub64:
   895  		return rewriteValueAMD64_OpSub64_0(v)
   896  	case OpSub64F:
   897  		return rewriteValueAMD64_OpSub64F_0(v)
   898  	case OpSub8:
   899  		return rewriteValueAMD64_OpSub8_0(v)
   900  	case OpSubPtr:
   901  		return rewriteValueAMD64_OpSubPtr_0(v)
   902  	case OpTrunc:
   903  		return rewriteValueAMD64_OpTrunc_0(v)
   904  	case OpTrunc16to8:
   905  		return rewriteValueAMD64_OpTrunc16to8_0(v)
   906  	case OpTrunc32to16:
   907  		return rewriteValueAMD64_OpTrunc32to16_0(v)
   908  	case OpTrunc32to8:
   909  		return rewriteValueAMD64_OpTrunc32to8_0(v)
   910  	case OpTrunc64to16:
   911  		return rewriteValueAMD64_OpTrunc64to16_0(v)
   912  	case OpTrunc64to32:
   913  		return rewriteValueAMD64_OpTrunc64to32_0(v)
   914  	case OpTrunc64to8:
   915  		return rewriteValueAMD64_OpTrunc64to8_0(v)
   916  	case OpWB:
   917  		return rewriteValueAMD64_OpWB_0(v)
   918  	case OpXor16:
   919  		return rewriteValueAMD64_OpXor16_0(v)
   920  	case OpXor32:
   921  		return rewriteValueAMD64_OpXor32_0(v)
   922  	case OpXor64:
   923  		return rewriteValueAMD64_OpXor64_0(v)
   924  	case OpXor8:
   925  		return rewriteValueAMD64_OpXor8_0(v)
   926  	case OpZero:
   927  		return rewriteValueAMD64_OpZero_0(v) || rewriteValueAMD64_OpZero_10(v) || rewriteValueAMD64_OpZero_20(v)
   928  	case OpZeroExt16to32:
   929  		return rewriteValueAMD64_OpZeroExt16to32_0(v)
   930  	case OpZeroExt16to64:
   931  		return rewriteValueAMD64_OpZeroExt16to64_0(v)
   932  	case OpZeroExt32to64:
   933  		return rewriteValueAMD64_OpZeroExt32to64_0(v)
   934  	case OpZeroExt8to16:
   935  		return rewriteValueAMD64_OpZeroExt8to16_0(v)
   936  	case OpZeroExt8to32:
   937  		return rewriteValueAMD64_OpZeroExt8to32_0(v)
   938  	case OpZeroExt8to64:
   939  		return rewriteValueAMD64_OpZeroExt8to64_0(v)
   940  	}
   941  	return false
   942  }
   943  func rewriteValueAMD64_OpAMD64ADDL_0(v *Value) bool {
   944  	// match: (ADDL x (MOVLconst [c]))
   945  	// cond:
   946  	// result: (ADDLconst [c] x)
   947  	for {
   948  		_ = v.Args[1]
   949  		x := v.Args[0]
   950  		v_1 := v.Args[1]
   951  		if v_1.Op != OpAMD64MOVLconst {
   952  			break
   953  		}
   954  		c := v_1.AuxInt
   955  		v.reset(OpAMD64ADDLconst)
   956  		v.AuxInt = c
   957  		v.AddArg(x)
   958  		return true
   959  	}
   960  	// match: (ADDL (MOVLconst [c]) x)
   961  	// cond:
   962  	// result: (ADDLconst [c] x)
   963  	for {
   964  		_ = v.Args[1]
   965  		v_0 := v.Args[0]
   966  		if v_0.Op != OpAMD64MOVLconst {
   967  			break
   968  		}
   969  		c := v_0.AuxInt
   970  		x := v.Args[1]
   971  		v.reset(OpAMD64ADDLconst)
   972  		v.AuxInt = c
   973  		v.AddArg(x)
   974  		return true
   975  	}
   976  	// match: (ADDL (SHLLconst x [c]) (SHRLconst x [d]))
   977  	// cond: d==32-c
   978  	// result: (ROLLconst x [c])
   979  	for {
   980  		_ = v.Args[1]
   981  		v_0 := v.Args[0]
   982  		if v_0.Op != OpAMD64SHLLconst {
   983  			break
   984  		}
   985  		c := v_0.AuxInt
   986  		x := v_0.Args[0]
   987  		v_1 := v.Args[1]
   988  		if v_1.Op != OpAMD64SHRLconst {
   989  			break
   990  		}
   991  		d := v_1.AuxInt
   992  		if x != v_1.Args[0] {
   993  			break
   994  		}
   995  		if !(d == 32-c) {
   996  			break
   997  		}
   998  		v.reset(OpAMD64ROLLconst)
   999  		v.AuxInt = c
  1000  		v.AddArg(x)
  1001  		return true
  1002  	}
  1003  	// match: (ADDL (SHRLconst x [d]) (SHLLconst x [c]))
  1004  	// cond: d==32-c
  1005  	// result: (ROLLconst x [c])
  1006  	for {
  1007  		_ = v.Args[1]
  1008  		v_0 := v.Args[0]
  1009  		if v_0.Op != OpAMD64SHRLconst {
  1010  			break
  1011  		}
  1012  		d := v_0.AuxInt
  1013  		x := v_0.Args[0]
  1014  		v_1 := v.Args[1]
  1015  		if v_1.Op != OpAMD64SHLLconst {
  1016  			break
  1017  		}
  1018  		c := v_1.AuxInt
  1019  		if x != v_1.Args[0] {
  1020  			break
  1021  		}
  1022  		if !(d == 32-c) {
  1023  			break
  1024  		}
  1025  		v.reset(OpAMD64ROLLconst)
  1026  		v.AuxInt = c
  1027  		v.AddArg(x)
  1028  		return true
  1029  	}
  1030  	// match: (ADDL <t> (SHLLconst x [c]) (SHRWconst x [d]))
  1031  	// cond: d==16-c && c < 16 && t.Size() == 2
  1032  	// result: (ROLWconst x [c])
  1033  	for {
  1034  		t := v.Type
  1035  		_ = v.Args[1]
  1036  		v_0 := v.Args[0]
  1037  		if v_0.Op != OpAMD64SHLLconst {
  1038  			break
  1039  		}
  1040  		c := v_0.AuxInt
  1041  		x := v_0.Args[0]
  1042  		v_1 := v.Args[1]
  1043  		if v_1.Op != OpAMD64SHRWconst {
  1044  			break
  1045  		}
  1046  		d := v_1.AuxInt
  1047  		if x != v_1.Args[0] {
  1048  			break
  1049  		}
  1050  		if !(d == 16-c && c < 16 && t.Size() == 2) {
  1051  			break
  1052  		}
  1053  		v.reset(OpAMD64ROLWconst)
  1054  		v.AuxInt = c
  1055  		v.AddArg(x)
  1056  		return true
  1057  	}
  1058  	// match: (ADDL <t> (SHRWconst x [d]) (SHLLconst x [c]))
  1059  	// cond: d==16-c && c < 16 && t.Size() == 2
  1060  	// result: (ROLWconst x [c])
  1061  	for {
  1062  		t := v.Type
  1063  		_ = v.Args[1]
  1064  		v_0 := v.Args[0]
  1065  		if v_0.Op != OpAMD64SHRWconst {
  1066  			break
  1067  		}
  1068  		d := v_0.AuxInt
  1069  		x := v_0.Args[0]
  1070  		v_1 := v.Args[1]
  1071  		if v_1.Op != OpAMD64SHLLconst {
  1072  			break
  1073  		}
  1074  		c := v_1.AuxInt
  1075  		if x != v_1.Args[0] {
  1076  			break
  1077  		}
  1078  		if !(d == 16-c && c < 16 && t.Size() == 2) {
  1079  			break
  1080  		}
  1081  		v.reset(OpAMD64ROLWconst)
  1082  		v.AuxInt = c
  1083  		v.AddArg(x)
  1084  		return true
  1085  	}
  1086  	// match: (ADDL <t> (SHLLconst x [c]) (SHRBconst x [d]))
  1087  	// cond: d==8-c && c < 8 && t.Size() == 1
  1088  	// result: (ROLBconst x [c])
  1089  	for {
  1090  		t := v.Type
  1091  		_ = v.Args[1]
  1092  		v_0 := v.Args[0]
  1093  		if v_0.Op != OpAMD64SHLLconst {
  1094  			break
  1095  		}
  1096  		c := v_0.AuxInt
  1097  		x := v_0.Args[0]
  1098  		v_1 := v.Args[1]
  1099  		if v_1.Op != OpAMD64SHRBconst {
  1100  			break
  1101  		}
  1102  		d := v_1.AuxInt
  1103  		if x != v_1.Args[0] {
  1104  			break
  1105  		}
  1106  		if !(d == 8-c && c < 8 && t.Size() == 1) {
  1107  			break
  1108  		}
  1109  		v.reset(OpAMD64ROLBconst)
  1110  		v.AuxInt = c
  1111  		v.AddArg(x)
  1112  		return true
  1113  	}
  1114  	// match: (ADDL <t> (SHRBconst x [d]) (SHLLconst x [c]))
  1115  	// cond: d==8-c && c < 8 && t.Size() == 1
  1116  	// result: (ROLBconst x [c])
  1117  	for {
  1118  		t := v.Type
  1119  		_ = v.Args[1]
  1120  		v_0 := v.Args[0]
  1121  		if v_0.Op != OpAMD64SHRBconst {
  1122  			break
  1123  		}
  1124  		d := v_0.AuxInt
  1125  		x := v_0.Args[0]
  1126  		v_1 := v.Args[1]
  1127  		if v_1.Op != OpAMD64SHLLconst {
  1128  			break
  1129  		}
  1130  		c := v_1.AuxInt
  1131  		if x != v_1.Args[0] {
  1132  			break
  1133  		}
  1134  		if !(d == 8-c && c < 8 && t.Size() == 1) {
  1135  			break
  1136  		}
  1137  		v.reset(OpAMD64ROLBconst)
  1138  		v.AuxInt = c
  1139  		v.AddArg(x)
  1140  		return true
  1141  	}
  1142  	// match: (ADDL x (NEGL y))
  1143  	// cond:
  1144  	// result: (SUBL x y)
  1145  	for {
  1146  		_ = v.Args[1]
  1147  		x := v.Args[0]
  1148  		v_1 := v.Args[1]
  1149  		if v_1.Op != OpAMD64NEGL {
  1150  			break
  1151  		}
  1152  		y := v_1.Args[0]
  1153  		v.reset(OpAMD64SUBL)
  1154  		v.AddArg(x)
  1155  		v.AddArg(y)
  1156  		return true
  1157  	}
  1158  	// match: (ADDL (NEGL y) x)
  1159  	// cond:
  1160  	// result: (SUBL x y)
  1161  	for {
  1162  		_ = v.Args[1]
  1163  		v_0 := v.Args[0]
  1164  		if v_0.Op != OpAMD64NEGL {
  1165  			break
  1166  		}
  1167  		y := v_0.Args[0]
  1168  		x := v.Args[1]
  1169  		v.reset(OpAMD64SUBL)
  1170  		v.AddArg(x)
  1171  		v.AddArg(y)
  1172  		return true
  1173  	}
  1174  	return false
  1175  }
  1176  func rewriteValueAMD64_OpAMD64ADDL_10(v *Value) bool {
  1177  	// match: (ADDL x l:(MOVLload [off] {sym} ptr mem))
  1178  	// cond: canMergeLoad(v, l, x) && clobber(l)
  1179  	// result: (ADDLmem x [off] {sym} ptr mem)
  1180  	for {
  1181  		_ = v.Args[1]
  1182  		x := v.Args[0]
  1183  		l := v.Args[1]
  1184  		if l.Op != OpAMD64MOVLload {
  1185  			break
  1186  		}
  1187  		off := l.AuxInt
  1188  		sym := l.Aux
  1189  		_ = l.Args[1]
  1190  		ptr := l.Args[0]
  1191  		mem := l.Args[1]
  1192  		if !(canMergeLoad(v, l, x) && clobber(l)) {
  1193  			break
  1194  		}
  1195  		v.reset(OpAMD64ADDLmem)
  1196  		v.AuxInt = off
  1197  		v.Aux = sym
  1198  		v.AddArg(x)
  1199  		v.AddArg(ptr)
  1200  		v.AddArg(mem)
  1201  		return true
  1202  	}
  1203  	// match: (ADDL l:(MOVLload [off] {sym} ptr mem) x)
  1204  	// cond: canMergeLoad(v, l, x) && clobber(l)
  1205  	// result: (ADDLmem x [off] {sym} ptr mem)
  1206  	for {
  1207  		_ = v.Args[1]
  1208  		l := v.Args[0]
  1209  		if l.Op != OpAMD64MOVLload {
  1210  			break
  1211  		}
  1212  		off := l.AuxInt
  1213  		sym := l.Aux
  1214  		_ = l.Args[1]
  1215  		ptr := l.Args[0]
  1216  		mem := l.Args[1]
  1217  		x := v.Args[1]
  1218  		if !(canMergeLoad(v, l, x) && clobber(l)) {
  1219  			break
  1220  		}
  1221  		v.reset(OpAMD64ADDLmem)
  1222  		v.AuxInt = off
  1223  		v.Aux = sym
  1224  		v.AddArg(x)
  1225  		v.AddArg(ptr)
  1226  		v.AddArg(mem)
  1227  		return true
  1228  	}
  1229  	return false
  1230  }
  1231  func rewriteValueAMD64_OpAMD64ADDLconst_0(v *Value) bool {
  1232  	// match: (ADDLconst [c] x)
  1233  	// cond: int32(c)==0
  1234  	// result: x
  1235  	for {
  1236  		c := v.AuxInt
  1237  		x := v.Args[0]
  1238  		if !(int32(c) == 0) {
  1239  			break
  1240  		}
  1241  		v.reset(OpCopy)
  1242  		v.Type = x.Type
  1243  		v.AddArg(x)
  1244  		return true
  1245  	}
  1246  	// match: (ADDLconst [c] (MOVLconst [d]))
  1247  	// cond:
  1248  	// result: (MOVLconst [int64(int32(c+d))])
  1249  	for {
  1250  		c := v.AuxInt
  1251  		v_0 := v.Args[0]
  1252  		if v_0.Op != OpAMD64MOVLconst {
  1253  			break
  1254  		}
  1255  		d := v_0.AuxInt
  1256  		v.reset(OpAMD64MOVLconst)
  1257  		v.AuxInt = int64(int32(c + d))
  1258  		return true
  1259  	}
  1260  	// match: (ADDLconst [c] (ADDLconst [d] x))
  1261  	// cond:
  1262  	// result: (ADDLconst [int64(int32(c+d))] x)
  1263  	for {
  1264  		c := v.AuxInt
  1265  		v_0 := v.Args[0]
  1266  		if v_0.Op != OpAMD64ADDLconst {
  1267  			break
  1268  		}
  1269  		d := v_0.AuxInt
  1270  		x := v_0.Args[0]
  1271  		v.reset(OpAMD64ADDLconst)
  1272  		v.AuxInt = int64(int32(c + d))
  1273  		v.AddArg(x)
  1274  		return true
  1275  	}
  1276  	// match: (ADDLconst [c] (LEAL [d] {s} x))
  1277  	// cond: is32Bit(c+d)
  1278  	// result: (LEAL [c+d] {s} x)
  1279  	for {
  1280  		c := v.AuxInt
  1281  		v_0 := v.Args[0]
  1282  		if v_0.Op != OpAMD64LEAL {
  1283  			break
  1284  		}
  1285  		d := v_0.AuxInt
  1286  		s := v_0.Aux
  1287  		x := v_0.Args[0]
  1288  		if !(is32Bit(c + d)) {
  1289  			break
  1290  		}
  1291  		v.reset(OpAMD64LEAL)
  1292  		v.AuxInt = c + d
  1293  		v.Aux = s
  1294  		v.AddArg(x)
  1295  		return true
  1296  	}
  1297  	return false
  1298  }
  1299  func rewriteValueAMD64_OpAMD64ADDLconstmem_0(v *Value) bool {
  1300  	b := v.Block
  1301  	_ = b
  1302  	typ := &b.Func.Config.Types
  1303  	_ = typ
  1304  	// match: (ADDLconstmem [valOff] {sym} ptr (MOVSSstore [ValAndOff(valOff).Off()] {sym} ptr x _))
  1305  	// cond:
  1306  	// result: (ADDLconst [ValAndOff(valOff).Val()] (MOVLf2i x))
  1307  	for {
  1308  		valOff := v.AuxInt
  1309  		sym := v.Aux
  1310  		_ = v.Args[1]
  1311  		ptr := v.Args[0]
  1312  		v_1 := v.Args[1]
  1313  		if v_1.Op != OpAMD64MOVSSstore {
  1314  			break
  1315  		}
  1316  		if v_1.AuxInt != ValAndOff(valOff).Off() {
  1317  			break
  1318  		}
  1319  		if v_1.Aux != sym {
  1320  			break
  1321  		}
  1322  		_ = v_1.Args[2]
  1323  		if ptr != v_1.Args[0] {
  1324  			break
  1325  		}
  1326  		x := v_1.Args[1]
  1327  		v.reset(OpAMD64ADDLconst)
  1328  		v.AuxInt = ValAndOff(valOff).Val()
  1329  		v0 := b.NewValue0(v.Pos, OpAMD64MOVLf2i, typ.UInt32)
  1330  		v0.AddArg(x)
  1331  		v.AddArg(v0)
  1332  		return true
  1333  	}
  1334  	return false
  1335  }
  1336  func rewriteValueAMD64_OpAMD64ADDLmem_0(v *Value) bool {
  1337  	b := v.Block
  1338  	_ = b
  1339  	typ := &b.Func.Config.Types
  1340  	_ = typ
  1341  	// match: (ADDLmem x [off] {sym} ptr (MOVSSstore [off] {sym} ptr y _))
  1342  	// cond:
  1343  	// result: (ADDL x (MOVLf2i y))
  1344  	for {
  1345  		off := v.AuxInt
  1346  		sym := v.Aux
  1347  		_ = v.Args[2]
  1348  		x := v.Args[0]
  1349  		ptr := v.Args[1]
  1350  		v_2 := v.Args[2]
  1351  		if v_2.Op != OpAMD64MOVSSstore {
  1352  			break
  1353  		}
  1354  		if v_2.AuxInt != off {
  1355  			break
  1356  		}
  1357  		if v_2.Aux != sym {
  1358  			break
  1359  		}
  1360  		_ = v_2.Args[2]
  1361  		if ptr != v_2.Args[0] {
  1362  			break
  1363  		}
  1364  		y := v_2.Args[1]
  1365  		v.reset(OpAMD64ADDL)
  1366  		v.AddArg(x)
  1367  		v0 := b.NewValue0(v.Pos, OpAMD64MOVLf2i, typ.UInt32)
  1368  		v0.AddArg(y)
  1369  		v.AddArg(v0)
  1370  		return true
  1371  	}
  1372  	return false
  1373  }
  1374  func rewriteValueAMD64_OpAMD64ADDQ_0(v *Value) bool {
  1375  	// match: (ADDQ x (MOVQconst [c]))
  1376  	// cond: is32Bit(c)
  1377  	// result: (ADDQconst [c] x)
  1378  	for {
  1379  		_ = v.Args[1]
  1380  		x := v.Args[0]
  1381  		v_1 := v.Args[1]
  1382  		if v_1.Op != OpAMD64MOVQconst {
  1383  			break
  1384  		}
  1385  		c := v_1.AuxInt
  1386  		if !(is32Bit(c)) {
  1387  			break
  1388  		}
  1389  		v.reset(OpAMD64ADDQconst)
  1390  		v.AuxInt = c
  1391  		v.AddArg(x)
  1392  		return true
  1393  	}
  1394  	// match: (ADDQ (MOVQconst [c]) x)
  1395  	// cond: is32Bit(c)
  1396  	// result: (ADDQconst [c] x)
  1397  	for {
  1398  		_ = v.Args[1]
  1399  		v_0 := v.Args[0]
  1400  		if v_0.Op != OpAMD64MOVQconst {
  1401  			break
  1402  		}
  1403  		c := v_0.AuxInt
  1404  		x := v.Args[1]
  1405  		if !(is32Bit(c)) {
  1406  			break
  1407  		}
  1408  		v.reset(OpAMD64ADDQconst)
  1409  		v.AuxInt = c
  1410  		v.AddArg(x)
  1411  		return true
  1412  	}
  1413  	// match: (ADDQ (SHLQconst x [c]) (SHRQconst x [d]))
  1414  	// cond: d==64-c
  1415  	// result: (ROLQconst x [c])
  1416  	for {
  1417  		_ = v.Args[1]
  1418  		v_0 := v.Args[0]
  1419  		if v_0.Op != OpAMD64SHLQconst {
  1420  			break
  1421  		}
  1422  		c := v_0.AuxInt
  1423  		x := v_0.Args[0]
  1424  		v_1 := v.Args[1]
  1425  		if v_1.Op != OpAMD64SHRQconst {
  1426  			break
  1427  		}
  1428  		d := v_1.AuxInt
  1429  		if x != v_1.Args[0] {
  1430  			break
  1431  		}
  1432  		if !(d == 64-c) {
  1433  			break
  1434  		}
  1435  		v.reset(OpAMD64ROLQconst)
  1436  		v.AuxInt = c
  1437  		v.AddArg(x)
  1438  		return true
  1439  	}
  1440  	// match: (ADDQ (SHRQconst x [d]) (SHLQconst x [c]))
  1441  	// cond: d==64-c
  1442  	// result: (ROLQconst x [c])
  1443  	for {
  1444  		_ = v.Args[1]
  1445  		v_0 := v.Args[0]
  1446  		if v_0.Op != OpAMD64SHRQconst {
  1447  			break
  1448  		}
  1449  		d := v_0.AuxInt
  1450  		x := v_0.Args[0]
  1451  		v_1 := v.Args[1]
  1452  		if v_1.Op != OpAMD64SHLQconst {
  1453  			break
  1454  		}
  1455  		c := v_1.AuxInt
  1456  		if x != v_1.Args[0] {
  1457  			break
  1458  		}
  1459  		if !(d == 64-c) {
  1460  			break
  1461  		}
  1462  		v.reset(OpAMD64ROLQconst)
  1463  		v.AuxInt = c
  1464  		v.AddArg(x)
  1465  		return true
  1466  	}
  1467  	// match: (ADDQ x (SHLQconst [3] y))
  1468  	// cond:
  1469  	// result: (LEAQ8 x y)
  1470  	for {
  1471  		_ = v.Args[1]
  1472  		x := v.Args[0]
  1473  		v_1 := v.Args[1]
  1474  		if v_1.Op != OpAMD64SHLQconst {
  1475  			break
  1476  		}
  1477  		if v_1.AuxInt != 3 {
  1478  			break
  1479  		}
  1480  		y := v_1.Args[0]
  1481  		v.reset(OpAMD64LEAQ8)
  1482  		v.AddArg(x)
  1483  		v.AddArg(y)
  1484  		return true
  1485  	}
  1486  	// match: (ADDQ (SHLQconst [3] y) x)
  1487  	// cond:
  1488  	// result: (LEAQ8 x y)
  1489  	for {
  1490  		_ = v.Args[1]
  1491  		v_0 := v.Args[0]
  1492  		if v_0.Op != OpAMD64SHLQconst {
  1493  			break
  1494  		}
  1495  		if v_0.AuxInt != 3 {
  1496  			break
  1497  		}
  1498  		y := v_0.Args[0]
  1499  		x := v.Args[1]
  1500  		v.reset(OpAMD64LEAQ8)
  1501  		v.AddArg(x)
  1502  		v.AddArg(y)
  1503  		return true
  1504  	}
  1505  	// match: (ADDQ x (SHLQconst [2] y))
  1506  	// cond:
  1507  	// result: (LEAQ4 x y)
  1508  	for {
  1509  		_ = v.Args[1]
  1510  		x := v.Args[0]
  1511  		v_1 := v.Args[1]
  1512  		if v_1.Op != OpAMD64SHLQconst {
  1513  			break
  1514  		}
  1515  		if v_1.AuxInt != 2 {
  1516  			break
  1517  		}
  1518  		y := v_1.Args[0]
  1519  		v.reset(OpAMD64LEAQ4)
  1520  		v.AddArg(x)
  1521  		v.AddArg(y)
  1522  		return true
  1523  	}
  1524  	// match: (ADDQ (SHLQconst [2] y) x)
  1525  	// cond:
  1526  	// result: (LEAQ4 x y)
  1527  	for {
  1528  		_ = v.Args[1]
  1529  		v_0 := v.Args[0]
  1530  		if v_0.Op != OpAMD64SHLQconst {
  1531  			break
  1532  		}
  1533  		if v_0.AuxInt != 2 {
  1534  			break
  1535  		}
  1536  		y := v_0.Args[0]
  1537  		x := v.Args[1]
  1538  		v.reset(OpAMD64LEAQ4)
  1539  		v.AddArg(x)
  1540  		v.AddArg(y)
  1541  		return true
  1542  	}
  1543  	// match: (ADDQ x (SHLQconst [1] y))
  1544  	// cond:
  1545  	// result: (LEAQ2 x y)
  1546  	for {
  1547  		_ = v.Args[1]
  1548  		x := v.Args[0]
  1549  		v_1 := v.Args[1]
  1550  		if v_1.Op != OpAMD64SHLQconst {
  1551  			break
  1552  		}
  1553  		if v_1.AuxInt != 1 {
  1554  			break
  1555  		}
  1556  		y := v_1.Args[0]
  1557  		v.reset(OpAMD64LEAQ2)
  1558  		v.AddArg(x)
  1559  		v.AddArg(y)
  1560  		return true
  1561  	}
  1562  	// match: (ADDQ (SHLQconst [1] y) x)
  1563  	// cond:
  1564  	// result: (LEAQ2 x y)
  1565  	for {
  1566  		_ = v.Args[1]
  1567  		v_0 := v.Args[0]
  1568  		if v_0.Op != OpAMD64SHLQconst {
  1569  			break
  1570  		}
  1571  		if v_0.AuxInt != 1 {
  1572  			break
  1573  		}
  1574  		y := v_0.Args[0]
  1575  		x := v.Args[1]
  1576  		v.reset(OpAMD64LEAQ2)
  1577  		v.AddArg(x)
  1578  		v.AddArg(y)
  1579  		return true
  1580  	}
  1581  	return false
  1582  }
  1583  func rewriteValueAMD64_OpAMD64ADDQ_10(v *Value) bool {
  1584  	// match: (ADDQ x (ADDQ y y))
  1585  	// cond:
  1586  	// result: (LEAQ2 x y)
  1587  	for {
  1588  		_ = v.Args[1]
  1589  		x := v.Args[0]
  1590  		v_1 := v.Args[1]
  1591  		if v_1.Op != OpAMD64ADDQ {
  1592  			break
  1593  		}
  1594  		_ = v_1.Args[1]
  1595  		y := v_1.Args[0]
  1596  		if y != v_1.Args[1] {
  1597  			break
  1598  		}
  1599  		v.reset(OpAMD64LEAQ2)
  1600  		v.AddArg(x)
  1601  		v.AddArg(y)
  1602  		return true
  1603  	}
  1604  	// match: (ADDQ (ADDQ y y) x)
  1605  	// cond:
  1606  	// result: (LEAQ2 x y)
  1607  	for {
  1608  		_ = v.Args[1]
  1609  		v_0 := v.Args[0]
  1610  		if v_0.Op != OpAMD64ADDQ {
  1611  			break
  1612  		}
  1613  		_ = v_0.Args[1]
  1614  		y := v_0.Args[0]
  1615  		if y != v_0.Args[1] {
  1616  			break
  1617  		}
  1618  		x := v.Args[1]
  1619  		v.reset(OpAMD64LEAQ2)
  1620  		v.AddArg(x)
  1621  		v.AddArg(y)
  1622  		return true
  1623  	}
  1624  	// match: (ADDQ x (ADDQ x y))
  1625  	// cond:
  1626  	// result: (LEAQ2 y x)
  1627  	for {
  1628  		_ = v.Args[1]
  1629  		x := v.Args[0]
  1630  		v_1 := v.Args[1]
  1631  		if v_1.Op != OpAMD64ADDQ {
  1632  			break
  1633  		}
  1634  		_ = v_1.Args[1]
  1635  		if x != v_1.Args[0] {
  1636  			break
  1637  		}
  1638  		y := v_1.Args[1]
  1639  		v.reset(OpAMD64LEAQ2)
  1640  		v.AddArg(y)
  1641  		v.AddArg(x)
  1642  		return true
  1643  	}
  1644  	// match: (ADDQ x (ADDQ y x))
  1645  	// cond:
  1646  	// result: (LEAQ2 y x)
  1647  	for {
  1648  		_ = v.Args[1]
  1649  		x := v.Args[0]
  1650  		v_1 := v.Args[1]
  1651  		if v_1.Op != OpAMD64ADDQ {
  1652  			break
  1653  		}
  1654  		_ = v_1.Args[1]
  1655  		y := v_1.Args[0]
  1656  		if x != v_1.Args[1] {
  1657  			break
  1658  		}
  1659  		v.reset(OpAMD64LEAQ2)
  1660  		v.AddArg(y)
  1661  		v.AddArg(x)
  1662  		return true
  1663  	}
  1664  	// match: (ADDQ (ADDQ x y) x)
  1665  	// cond:
  1666  	// result: (LEAQ2 y x)
  1667  	for {
  1668  		_ = v.Args[1]
  1669  		v_0 := v.Args[0]
  1670  		if v_0.Op != OpAMD64ADDQ {
  1671  			break
  1672  		}
  1673  		_ = v_0.Args[1]
  1674  		x := v_0.Args[0]
  1675  		y := v_0.Args[1]
  1676  		if x != v.Args[1] {
  1677  			break
  1678  		}
  1679  		v.reset(OpAMD64LEAQ2)
  1680  		v.AddArg(y)
  1681  		v.AddArg(x)
  1682  		return true
  1683  	}
  1684  	// match: (ADDQ (ADDQ y x) x)
  1685  	// cond:
  1686  	// result: (LEAQ2 y x)
  1687  	for {
  1688  		_ = v.Args[1]
  1689  		v_0 := v.Args[0]
  1690  		if v_0.Op != OpAMD64ADDQ {
  1691  			break
  1692  		}
  1693  		_ = v_0.Args[1]
  1694  		y := v_0.Args[0]
  1695  		x := v_0.Args[1]
  1696  		if x != v.Args[1] {
  1697  			break
  1698  		}
  1699  		v.reset(OpAMD64LEAQ2)
  1700  		v.AddArg(y)
  1701  		v.AddArg(x)
  1702  		return true
  1703  	}
  1704  	// match: (ADDQ (ADDQconst [c] x) y)
  1705  	// cond:
  1706  	// result: (LEAQ1 [c] x y)
  1707  	for {
  1708  		_ = v.Args[1]
  1709  		v_0 := v.Args[0]
  1710  		if v_0.Op != OpAMD64ADDQconst {
  1711  			break
  1712  		}
  1713  		c := v_0.AuxInt
  1714  		x := v_0.Args[0]
  1715  		y := v.Args[1]
  1716  		v.reset(OpAMD64LEAQ1)
  1717  		v.AuxInt = c
  1718  		v.AddArg(x)
  1719  		v.AddArg(y)
  1720  		return true
  1721  	}
  1722  	// match: (ADDQ y (ADDQconst [c] x))
  1723  	// cond:
  1724  	// result: (LEAQ1 [c] x y)
  1725  	for {
  1726  		_ = v.Args[1]
  1727  		y := v.Args[0]
  1728  		v_1 := v.Args[1]
  1729  		if v_1.Op != OpAMD64ADDQconst {
  1730  			break
  1731  		}
  1732  		c := v_1.AuxInt
  1733  		x := v_1.Args[0]
  1734  		v.reset(OpAMD64LEAQ1)
  1735  		v.AuxInt = c
  1736  		v.AddArg(x)
  1737  		v.AddArg(y)
  1738  		return true
  1739  	}
  1740  	// match: (ADDQ x (LEAQ [c] {s} y))
  1741  	// cond: x.Op != OpSB && y.Op != OpSB
  1742  	// result: (LEAQ1 [c] {s} x y)
  1743  	for {
  1744  		_ = v.Args[1]
  1745  		x := v.Args[0]
  1746  		v_1 := v.Args[1]
  1747  		if v_1.Op != OpAMD64LEAQ {
  1748  			break
  1749  		}
  1750  		c := v_1.AuxInt
  1751  		s := v_1.Aux
  1752  		y := v_1.Args[0]
  1753  		if !(x.Op != OpSB && y.Op != OpSB) {
  1754  			break
  1755  		}
  1756  		v.reset(OpAMD64LEAQ1)
  1757  		v.AuxInt = c
  1758  		v.Aux = s
  1759  		v.AddArg(x)
  1760  		v.AddArg(y)
  1761  		return true
  1762  	}
  1763  	// match: (ADDQ (LEAQ [c] {s} y) x)
  1764  	// cond: x.Op != OpSB && y.Op != OpSB
  1765  	// result: (LEAQ1 [c] {s} x y)
  1766  	for {
  1767  		_ = v.Args[1]
  1768  		v_0 := v.Args[0]
  1769  		if v_0.Op != OpAMD64LEAQ {
  1770  			break
  1771  		}
  1772  		c := v_0.AuxInt
  1773  		s := v_0.Aux
  1774  		y := v_0.Args[0]
  1775  		x := v.Args[1]
  1776  		if !(x.Op != OpSB && y.Op != OpSB) {
  1777  			break
  1778  		}
  1779  		v.reset(OpAMD64LEAQ1)
  1780  		v.AuxInt = c
  1781  		v.Aux = s
  1782  		v.AddArg(x)
  1783  		v.AddArg(y)
  1784  		return true
  1785  	}
  1786  	return false
  1787  }
  1788  func rewriteValueAMD64_OpAMD64ADDQ_20(v *Value) bool {
  1789  	// match: (ADDQ x (NEGQ y))
  1790  	// cond:
  1791  	// result: (SUBQ x y)
  1792  	for {
  1793  		_ = v.Args[1]
  1794  		x := v.Args[0]
  1795  		v_1 := v.Args[1]
  1796  		if v_1.Op != OpAMD64NEGQ {
  1797  			break
  1798  		}
  1799  		y := v_1.Args[0]
  1800  		v.reset(OpAMD64SUBQ)
  1801  		v.AddArg(x)
  1802  		v.AddArg(y)
  1803  		return true
  1804  	}
  1805  	// match: (ADDQ (NEGQ y) x)
  1806  	// cond:
  1807  	// result: (SUBQ x y)
  1808  	for {
  1809  		_ = v.Args[1]
  1810  		v_0 := v.Args[0]
  1811  		if v_0.Op != OpAMD64NEGQ {
  1812  			break
  1813  		}
  1814  		y := v_0.Args[0]
  1815  		x := v.Args[1]
  1816  		v.reset(OpAMD64SUBQ)
  1817  		v.AddArg(x)
  1818  		v.AddArg(y)
  1819  		return true
  1820  	}
  1821  	// match: (ADDQ x l:(MOVQload [off] {sym} ptr mem))
  1822  	// cond: canMergeLoad(v, l, x) && clobber(l)
  1823  	// result: (ADDQmem x [off] {sym} ptr mem)
  1824  	for {
  1825  		_ = v.Args[1]
  1826  		x := v.Args[0]
  1827  		l := v.Args[1]
  1828  		if l.Op != OpAMD64MOVQload {
  1829  			break
  1830  		}
  1831  		off := l.AuxInt
  1832  		sym := l.Aux
  1833  		_ = l.Args[1]
  1834  		ptr := l.Args[0]
  1835  		mem := l.Args[1]
  1836  		if !(canMergeLoad(v, l, x) && clobber(l)) {
  1837  			break
  1838  		}
  1839  		v.reset(OpAMD64ADDQmem)
  1840  		v.AuxInt = off
  1841  		v.Aux = sym
  1842  		v.AddArg(x)
  1843  		v.AddArg(ptr)
  1844  		v.AddArg(mem)
  1845  		return true
  1846  	}
  1847  	// match: (ADDQ l:(MOVQload [off] {sym} ptr mem) x)
  1848  	// cond: canMergeLoad(v, l, x) && clobber(l)
  1849  	// result: (ADDQmem x [off] {sym} ptr mem)
  1850  	for {
  1851  		_ = v.Args[1]
  1852  		l := v.Args[0]
  1853  		if l.Op != OpAMD64MOVQload {
  1854  			break
  1855  		}
  1856  		off := l.AuxInt
  1857  		sym := l.Aux
  1858  		_ = l.Args[1]
  1859  		ptr := l.Args[0]
  1860  		mem := l.Args[1]
  1861  		x := v.Args[1]
  1862  		if !(canMergeLoad(v, l, x) && clobber(l)) {
  1863  			break
  1864  		}
  1865  		v.reset(OpAMD64ADDQmem)
  1866  		v.AuxInt = off
  1867  		v.Aux = sym
  1868  		v.AddArg(x)
  1869  		v.AddArg(ptr)
  1870  		v.AddArg(mem)
  1871  		return true
  1872  	}
  1873  	return false
  1874  }
  1875  func rewriteValueAMD64_OpAMD64ADDQconst_0(v *Value) bool {
  1876  	// match: (ADDQconst [c] (ADDQ x y))
  1877  	// cond:
  1878  	// result: (LEAQ1 [c] x y)
  1879  	for {
  1880  		c := v.AuxInt
  1881  		v_0 := v.Args[0]
  1882  		if v_0.Op != OpAMD64ADDQ {
  1883  			break
  1884  		}
  1885  		_ = v_0.Args[1]
  1886  		x := v_0.Args[0]
  1887  		y := v_0.Args[1]
  1888  		v.reset(OpAMD64LEAQ1)
  1889  		v.AuxInt = c
  1890  		v.AddArg(x)
  1891  		v.AddArg(y)
  1892  		return true
  1893  	}
  1894  	// match: (ADDQconst [c] (LEAQ [d] {s} x))
  1895  	// cond: is32Bit(c+d)
  1896  	// result: (LEAQ [c+d] {s} x)
  1897  	for {
  1898  		c := v.AuxInt
  1899  		v_0 := v.Args[0]
  1900  		if v_0.Op != OpAMD64LEAQ {
  1901  			break
  1902  		}
  1903  		d := v_0.AuxInt
  1904  		s := v_0.Aux
  1905  		x := v_0.Args[0]
  1906  		if !(is32Bit(c + d)) {
  1907  			break
  1908  		}
  1909  		v.reset(OpAMD64LEAQ)
  1910  		v.AuxInt = c + d
  1911  		v.Aux = s
  1912  		v.AddArg(x)
  1913  		return true
  1914  	}
  1915  	// match: (ADDQconst [c] (LEAQ1 [d] {s} x y))
  1916  	// cond: is32Bit(c+d)
  1917  	// result: (LEAQ1 [c+d] {s} x y)
  1918  	for {
  1919  		c := v.AuxInt
  1920  		v_0 := v.Args[0]
  1921  		if v_0.Op != OpAMD64LEAQ1 {
  1922  			break
  1923  		}
  1924  		d := v_0.AuxInt
  1925  		s := v_0.Aux
  1926  		_ = v_0.Args[1]
  1927  		x := v_0.Args[0]
  1928  		y := v_0.Args[1]
  1929  		if !(is32Bit(c + d)) {
  1930  			break
  1931  		}
  1932  		v.reset(OpAMD64LEAQ1)
  1933  		v.AuxInt = c + d
  1934  		v.Aux = s
  1935  		v.AddArg(x)
  1936  		v.AddArg(y)
  1937  		return true
  1938  	}
  1939  	// match: (ADDQconst [c] (LEAQ2 [d] {s} x y))
  1940  	// cond: is32Bit(c+d)
  1941  	// result: (LEAQ2 [c+d] {s} x y)
  1942  	for {
  1943  		c := v.AuxInt
  1944  		v_0 := v.Args[0]
  1945  		if v_0.Op != OpAMD64LEAQ2 {
  1946  			break
  1947  		}
  1948  		d := v_0.AuxInt
  1949  		s := v_0.Aux
  1950  		_ = v_0.Args[1]
  1951  		x := v_0.Args[0]
  1952  		y := v_0.Args[1]
  1953  		if !(is32Bit(c + d)) {
  1954  			break
  1955  		}
  1956  		v.reset(OpAMD64LEAQ2)
  1957  		v.AuxInt = c + d
  1958  		v.Aux = s
  1959  		v.AddArg(x)
  1960  		v.AddArg(y)
  1961  		return true
  1962  	}
  1963  	// match: (ADDQconst [c] (LEAQ4 [d] {s} x y))
  1964  	// cond: is32Bit(c+d)
  1965  	// result: (LEAQ4 [c+d] {s} x y)
  1966  	for {
  1967  		c := v.AuxInt
  1968  		v_0 := v.Args[0]
  1969  		if v_0.Op != OpAMD64LEAQ4 {
  1970  			break
  1971  		}
  1972  		d := v_0.AuxInt
  1973  		s := v_0.Aux
  1974  		_ = v_0.Args[1]
  1975  		x := v_0.Args[0]
  1976  		y := v_0.Args[1]
  1977  		if !(is32Bit(c + d)) {
  1978  			break
  1979  		}
  1980  		v.reset(OpAMD64LEAQ4)
  1981  		v.AuxInt = c + d
  1982  		v.Aux = s
  1983  		v.AddArg(x)
  1984  		v.AddArg(y)
  1985  		return true
  1986  	}
  1987  	// match: (ADDQconst [c] (LEAQ8 [d] {s} x y))
  1988  	// cond: is32Bit(c+d)
  1989  	// result: (LEAQ8 [c+d] {s} x y)
  1990  	for {
  1991  		c := v.AuxInt
  1992  		v_0 := v.Args[0]
  1993  		if v_0.Op != OpAMD64LEAQ8 {
  1994  			break
  1995  		}
  1996  		d := v_0.AuxInt
  1997  		s := v_0.Aux
  1998  		_ = v_0.Args[1]
  1999  		x := v_0.Args[0]
  2000  		y := v_0.Args[1]
  2001  		if !(is32Bit(c + d)) {
  2002  			break
  2003  		}
  2004  		v.reset(OpAMD64LEAQ8)
  2005  		v.AuxInt = c + d
  2006  		v.Aux = s
  2007  		v.AddArg(x)
  2008  		v.AddArg(y)
  2009  		return true
  2010  	}
  2011  	// match: (ADDQconst [0] x)
  2012  	// cond:
  2013  	// result: x
  2014  	for {
  2015  		if v.AuxInt != 0 {
  2016  			break
  2017  		}
  2018  		x := v.Args[0]
  2019  		v.reset(OpCopy)
  2020  		v.Type = x.Type
  2021  		v.AddArg(x)
  2022  		return true
  2023  	}
  2024  	// match: (ADDQconst [c] (MOVQconst [d]))
  2025  	// cond:
  2026  	// result: (MOVQconst [c+d])
  2027  	for {
  2028  		c := v.AuxInt
  2029  		v_0 := v.Args[0]
  2030  		if v_0.Op != OpAMD64MOVQconst {
  2031  			break
  2032  		}
  2033  		d := v_0.AuxInt
  2034  		v.reset(OpAMD64MOVQconst)
  2035  		v.AuxInt = c + d
  2036  		return true
  2037  	}
  2038  	// match: (ADDQconst [c] (ADDQconst [d] x))
  2039  	// cond: is32Bit(c+d)
  2040  	// result: (ADDQconst [c+d] x)
  2041  	for {
  2042  		c := v.AuxInt
  2043  		v_0 := v.Args[0]
  2044  		if v_0.Op != OpAMD64ADDQconst {
  2045  			break
  2046  		}
  2047  		d := v_0.AuxInt
  2048  		x := v_0.Args[0]
  2049  		if !(is32Bit(c + d)) {
  2050  			break
  2051  		}
  2052  		v.reset(OpAMD64ADDQconst)
  2053  		v.AuxInt = c + d
  2054  		v.AddArg(x)
  2055  		return true
  2056  	}
  2057  	// match: (ADDQconst [off] x:(SP))
  2058  	// cond:
  2059  	// result: (LEAQ [off] x)
  2060  	for {
  2061  		off := v.AuxInt
  2062  		x := v.Args[0]
  2063  		if x.Op != OpSP {
  2064  			break
  2065  		}
  2066  		v.reset(OpAMD64LEAQ)
  2067  		v.AuxInt = off
  2068  		v.AddArg(x)
  2069  		return true
  2070  	}
  2071  	return false
  2072  }
  2073  func rewriteValueAMD64_OpAMD64ADDQconstmem_0(v *Value) bool {
  2074  	b := v.Block
  2075  	_ = b
  2076  	typ := &b.Func.Config.Types
  2077  	_ = typ
  2078  	// match: (ADDQconstmem [valOff] {sym} ptr (MOVSDstore [ValAndOff(valOff).Off()] {sym} ptr x _))
  2079  	// cond:
  2080  	// result: (ADDQconst [ValAndOff(valOff).Val()] (MOVQf2i x))
  2081  	for {
  2082  		valOff := v.AuxInt
  2083  		sym := v.Aux
  2084  		_ = v.Args[1]
  2085  		ptr := v.Args[0]
  2086  		v_1 := v.Args[1]
  2087  		if v_1.Op != OpAMD64MOVSDstore {
  2088  			break
  2089  		}
  2090  		if v_1.AuxInt != ValAndOff(valOff).Off() {
  2091  			break
  2092  		}
  2093  		if v_1.Aux != sym {
  2094  			break
  2095  		}
  2096  		_ = v_1.Args[2]
  2097  		if ptr != v_1.Args[0] {
  2098  			break
  2099  		}
  2100  		x := v_1.Args[1]
  2101  		v.reset(OpAMD64ADDQconst)
  2102  		v.AuxInt = ValAndOff(valOff).Val()
  2103  		v0 := b.NewValue0(v.Pos, OpAMD64MOVQf2i, typ.UInt64)
  2104  		v0.AddArg(x)
  2105  		v.AddArg(v0)
  2106  		return true
  2107  	}
  2108  	return false
  2109  }
  2110  func rewriteValueAMD64_OpAMD64ADDQmem_0(v *Value) bool {
  2111  	b := v.Block
  2112  	_ = b
  2113  	typ := &b.Func.Config.Types
  2114  	_ = typ
  2115  	// match: (ADDQmem x [off] {sym} ptr (MOVSDstore [off] {sym} ptr y _))
  2116  	// cond:
  2117  	// result: (ADDQ x (MOVQf2i y))
  2118  	for {
  2119  		off := v.AuxInt
  2120  		sym := v.Aux
  2121  		_ = v.Args[2]
  2122  		x := v.Args[0]
  2123  		ptr := v.Args[1]
  2124  		v_2 := v.Args[2]
  2125  		if v_2.Op != OpAMD64MOVSDstore {
  2126  			break
  2127  		}
  2128  		if v_2.AuxInt != off {
  2129  			break
  2130  		}
  2131  		if v_2.Aux != sym {
  2132  			break
  2133  		}
  2134  		_ = v_2.Args[2]
  2135  		if ptr != v_2.Args[0] {
  2136  			break
  2137  		}
  2138  		y := v_2.Args[1]
  2139  		v.reset(OpAMD64ADDQ)
  2140  		v.AddArg(x)
  2141  		v0 := b.NewValue0(v.Pos, OpAMD64MOVQf2i, typ.UInt64)
  2142  		v0.AddArg(y)
  2143  		v.AddArg(v0)
  2144  		return true
  2145  	}
  2146  	return false
  2147  }
  2148  func rewriteValueAMD64_OpAMD64ADDSD_0(v *Value) bool {
  2149  	// match: (ADDSD x l:(MOVSDload [off] {sym} ptr mem))
  2150  	// cond: canMergeLoad(v, l, x) && clobber(l)
  2151  	// result: (ADDSDmem x [off] {sym} ptr mem)
  2152  	for {
  2153  		_ = v.Args[1]
  2154  		x := v.Args[0]
  2155  		l := v.Args[1]
  2156  		if l.Op != OpAMD64MOVSDload {
  2157  			break
  2158  		}
  2159  		off := l.AuxInt
  2160  		sym := l.Aux
  2161  		_ = l.Args[1]
  2162  		ptr := l.Args[0]
  2163  		mem := l.Args[1]
  2164  		if !(canMergeLoad(v, l, x) && clobber(l)) {
  2165  			break
  2166  		}
  2167  		v.reset(OpAMD64ADDSDmem)
  2168  		v.AuxInt = off
  2169  		v.Aux = sym
  2170  		v.AddArg(x)
  2171  		v.AddArg(ptr)
  2172  		v.AddArg(mem)
  2173  		return true
  2174  	}
  2175  	// match: (ADDSD l:(MOVSDload [off] {sym} ptr mem) x)
  2176  	// cond: canMergeLoad(v, l, x) && clobber(l)
  2177  	// result: (ADDSDmem x [off] {sym} ptr mem)
  2178  	for {
  2179  		_ = v.Args[1]
  2180  		l := v.Args[0]
  2181  		if l.Op != OpAMD64MOVSDload {
  2182  			break
  2183  		}
  2184  		off := l.AuxInt
  2185  		sym := l.Aux
  2186  		_ = l.Args[1]
  2187  		ptr := l.Args[0]
  2188  		mem := l.Args[1]
  2189  		x := v.Args[1]
  2190  		if !(canMergeLoad(v, l, x) && clobber(l)) {
  2191  			break
  2192  		}
  2193  		v.reset(OpAMD64ADDSDmem)
  2194  		v.AuxInt = off
  2195  		v.Aux = sym
  2196  		v.AddArg(x)
  2197  		v.AddArg(ptr)
  2198  		v.AddArg(mem)
  2199  		return true
  2200  	}
  2201  	return false
  2202  }
  2203  func rewriteValueAMD64_OpAMD64ADDSDmem_0(v *Value) bool {
  2204  	b := v.Block
  2205  	_ = b
  2206  	typ := &b.Func.Config.Types
  2207  	_ = typ
  2208  	// match: (ADDSDmem x [off] {sym} ptr (MOVQstore [off] {sym} ptr y _))
  2209  	// cond:
  2210  	// result: (ADDSD x (MOVQi2f y))
  2211  	for {
  2212  		off := v.AuxInt
  2213  		sym := v.Aux
  2214  		_ = v.Args[2]
  2215  		x := v.Args[0]
  2216  		ptr := v.Args[1]
  2217  		v_2 := v.Args[2]
  2218  		if v_2.Op != OpAMD64MOVQstore {
  2219  			break
  2220  		}
  2221  		if v_2.AuxInt != off {
  2222  			break
  2223  		}
  2224  		if v_2.Aux != sym {
  2225  			break
  2226  		}
  2227  		_ = v_2.Args[2]
  2228  		if ptr != v_2.Args[0] {
  2229  			break
  2230  		}
  2231  		y := v_2.Args[1]
  2232  		v.reset(OpAMD64ADDSD)
  2233  		v.AddArg(x)
  2234  		v0 := b.NewValue0(v.Pos, OpAMD64MOVQi2f, typ.Float64)
  2235  		v0.AddArg(y)
  2236  		v.AddArg(v0)
  2237  		return true
  2238  	}
  2239  	return false
  2240  }
  2241  func rewriteValueAMD64_OpAMD64ADDSS_0(v *Value) bool {
  2242  	// match: (ADDSS x l:(MOVSSload [off] {sym} ptr mem))
  2243  	// cond: canMergeLoad(v, l, x) && clobber(l)
  2244  	// result: (ADDSSmem x [off] {sym} ptr mem)
  2245  	for {
  2246  		_ = v.Args[1]
  2247  		x := v.Args[0]
  2248  		l := v.Args[1]
  2249  		if l.Op != OpAMD64MOVSSload {
  2250  			break
  2251  		}
  2252  		off := l.AuxInt
  2253  		sym := l.Aux
  2254  		_ = l.Args[1]
  2255  		ptr := l.Args[0]
  2256  		mem := l.Args[1]
  2257  		if !(canMergeLoad(v, l, x) && clobber(l)) {
  2258  			break
  2259  		}
  2260  		v.reset(OpAMD64ADDSSmem)
  2261  		v.AuxInt = off
  2262  		v.Aux = sym
  2263  		v.AddArg(x)
  2264  		v.AddArg(ptr)
  2265  		v.AddArg(mem)
  2266  		return true
  2267  	}
  2268  	// match: (ADDSS l:(MOVSSload [off] {sym} ptr mem) x)
  2269  	// cond: canMergeLoad(v, l, x) && clobber(l)
  2270  	// result: (ADDSSmem x [off] {sym} ptr mem)
  2271  	for {
  2272  		_ = v.Args[1]
  2273  		l := v.Args[0]
  2274  		if l.Op != OpAMD64MOVSSload {
  2275  			break
  2276  		}
  2277  		off := l.AuxInt
  2278  		sym := l.Aux
  2279  		_ = l.Args[1]
  2280  		ptr := l.Args[0]
  2281  		mem := l.Args[1]
  2282  		x := v.Args[1]
  2283  		if !(canMergeLoad(v, l, x) && clobber(l)) {
  2284  			break
  2285  		}
  2286  		v.reset(OpAMD64ADDSSmem)
  2287  		v.AuxInt = off
  2288  		v.Aux = sym
  2289  		v.AddArg(x)
  2290  		v.AddArg(ptr)
  2291  		v.AddArg(mem)
  2292  		return true
  2293  	}
  2294  	return false
  2295  }
  2296  func rewriteValueAMD64_OpAMD64ADDSSmem_0(v *Value) bool {
  2297  	b := v.Block
  2298  	_ = b
  2299  	typ := &b.Func.Config.Types
  2300  	_ = typ
  2301  	// match: (ADDSSmem x [off] {sym} ptr (MOVLstore [off] {sym} ptr y _))
  2302  	// cond:
  2303  	// result: (ADDSS x (MOVLi2f y))
  2304  	for {
  2305  		off := v.AuxInt
  2306  		sym := v.Aux
  2307  		_ = v.Args[2]
  2308  		x := v.Args[0]
  2309  		ptr := v.Args[1]
  2310  		v_2 := v.Args[2]
  2311  		if v_2.Op != OpAMD64MOVLstore {
  2312  			break
  2313  		}
  2314  		if v_2.AuxInt != off {
  2315  			break
  2316  		}
  2317  		if v_2.Aux != sym {
  2318  			break
  2319  		}
  2320  		_ = v_2.Args[2]
  2321  		if ptr != v_2.Args[0] {
  2322  			break
  2323  		}
  2324  		y := v_2.Args[1]
  2325  		v.reset(OpAMD64ADDSS)
  2326  		v.AddArg(x)
  2327  		v0 := b.NewValue0(v.Pos, OpAMD64MOVLi2f, typ.Float32)
  2328  		v0.AddArg(y)
  2329  		v.AddArg(v0)
  2330  		return true
  2331  	}
  2332  	return false
  2333  }
  2334  func rewriteValueAMD64_OpAMD64ANDL_0(v *Value) bool {
  2335  	// match: (ANDL x (MOVLconst [c]))
  2336  	// cond:
  2337  	// result: (ANDLconst [c] x)
  2338  	for {
  2339  		_ = v.Args[1]
  2340  		x := v.Args[0]
  2341  		v_1 := v.Args[1]
  2342  		if v_1.Op != OpAMD64MOVLconst {
  2343  			break
  2344  		}
  2345  		c := v_1.AuxInt
  2346  		v.reset(OpAMD64ANDLconst)
  2347  		v.AuxInt = c
  2348  		v.AddArg(x)
  2349  		return true
  2350  	}
  2351  	// match: (ANDL (MOVLconst [c]) x)
  2352  	// cond:
  2353  	// result: (ANDLconst [c] x)
  2354  	for {
  2355  		_ = v.Args[1]
  2356  		v_0 := v.Args[0]
  2357  		if v_0.Op != OpAMD64MOVLconst {
  2358  			break
  2359  		}
  2360  		c := v_0.AuxInt
  2361  		x := v.Args[1]
  2362  		v.reset(OpAMD64ANDLconst)
  2363  		v.AuxInt = c
  2364  		v.AddArg(x)
  2365  		return true
  2366  	}
  2367  	// match: (ANDL x x)
  2368  	// cond:
  2369  	// result: x
  2370  	for {
  2371  		_ = v.Args[1]
  2372  		x := v.Args[0]
  2373  		if x != v.Args[1] {
  2374  			break
  2375  		}
  2376  		v.reset(OpCopy)
  2377  		v.Type = x.Type
  2378  		v.AddArg(x)
  2379  		return true
  2380  	}
  2381  	// match: (ANDL x l:(MOVLload [off] {sym} ptr mem))
  2382  	// cond: canMergeLoad(v, l, x) && clobber(l)
  2383  	// result: (ANDLmem x [off] {sym} ptr mem)
  2384  	for {
  2385  		_ = v.Args[1]
  2386  		x := v.Args[0]
  2387  		l := v.Args[1]
  2388  		if l.Op != OpAMD64MOVLload {
  2389  			break
  2390  		}
  2391  		off := l.AuxInt
  2392  		sym := l.Aux
  2393  		_ = l.Args[1]
  2394  		ptr := l.Args[0]
  2395  		mem := l.Args[1]
  2396  		if !(canMergeLoad(v, l, x) && clobber(l)) {
  2397  			break
  2398  		}
  2399  		v.reset(OpAMD64ANDLmem)
  2400  		v.AuxInt = off
  2401  		v.Aux = sym
  2402  		v.AddArg(x)
  2403  		v.AddArg(ptr)
  2404  		v.AddArg(mem)
  2405  		return true
  2406  	}
  2407  	// match: (ANDL l:(MOVLload [off] {sym} ptr mem) x)
  2408  	// cond: canMergeLoad(v, l, x) && clobber(l)
  2409  	// result: (ANDLmem x [off] {sym} ptr mem)
  2410  	for {
  2411  		_ = v.Args[1]
  2412  		l := v.Args[0]
  2413  		if l.Op != OpAMD64MOVLload {
  2414  			break
  2415  		}
  2416  		off := l.AuxInt
  2417  		sym := l.Aux
  2418  		_ = l.Args[1]
  2419  		ptr := l.Args[0]
  2420  		mem := l.Args[1]
  2421  		x := v.Args[1]
  2422  		if !(canMergeLoad(v, l, x) && clobber(l)) {
  2423  			break
  2424  		}
  2425  		v.reset(OpAMD64ANDLmem)
  2426  		v.AuxInt = off
  2427  		v.Aux = sym
  2428  		v.AddArg(x)
  2429  		v.AddArg(ptr)
  2430  		v.AddArg(mem)
  2431  		return true
  2432  	}
  2433  	return false
  2434  }
  2435  func rewriteValueAMD64_OpAMD64ANDLconst_0(v *Value) bool {
  2436  	// match: (ANDLconst [c] (ANDLconst [d] x))
  2437  	// cond:
  2438  	// result: (ANDLconst [c & d] x)
  2439  	for {
  2440  		c := v.AuxInt
  2441  		v_0 := v.Args[0]
  2442  		if v_0.Op != OpAMD64ANDLconst {
  2443  			break
  2444  		}
  2445  		d := v_0.AuxInt
  2446  		x := v_0.Args[0]
  2447  		v.reset(OpAMD64ANDLconst)
  2448  		v.AuxInt = c & d
  2449  		v.AddArg(x)
  2450  		return true
  2451  	}
  2452  	// match: (ANDLconst [0xFF] x)
  2453  	// cond:
  2454  	// result: (MOVBQZX x)
  2455  	for {
  2456  		if v.AuxInt != 0xFF {
  2457  			break
  2458  		}
  2459  		x := v.Args[0]
  2460  		v.reset(OpAMD64MOVBQZX)
  2461  		v.AddArg(x)
  2462  		return true
  2463  	}
  2464  	// match: (ANDLconst [0xFFFF] x)
  2465  	// cond:
  2466  	// result: (MOVWQZX x)
  2467  	for {
  2468  		if v.AuxInt != 0xFFFF {
  2469  			break
  2470  		}
  2471  		x := v.Args[0]
  2472  		v.reset(OpAMD64MOVWQZX)
  2473  		v.AddArg(x)
  2474  		return true
  2475  	}
  2476  	// match: (ANDLconst [c] _)
  2477  	// cond: int32(c)==0
  2478  	// result: (MOVLconst [0])
  2479  	for {
  2480  		c := v.AuxInt
  2481  		if !(int32(c) == 0) {
  2482  			break
  2483  		}
  2484  		v.reset(OpAMD64MOVLconst)
  2485  		v.AuxInt = 0
  2486  		return true
  2487  	}
  2488  	// match: (ANDLconst [c] x)
  2489  	// cond: int32(c)==-1
  2490  	// result: x
  2491  	for {
  2492  		c := v.AuxInt
  2493  		x := v.Args[0]
  2494  		if !(int32(c) == -1) {
  2495  			break
  2496  		}
  2497  		v.reset(OpCopy)
  2498  		v.Type = x.Type
  2499  		v.AddArg(x)
  2500  		return true
  2501  	}
  2502  	// match: (ANDLconst [c] (MOVLconst [d]))
  2503  	// cond:
  2504  	// result: (MOVLconst [c&d])
  2505  	for {
  2506  		c := v.AuxInt
  2507  		v_0 := v.Args[0]
  2508  		if v_0.Op != OpAMD64MOVLconst {
  2509  			break
  2510  		}
  2511  		d := v_0.AuxInt
  2512  		v.reset(OpAMD64MOVLconst)
  2513  		v.AuxInt = c & d
  2514  		return true
  2515  	}
  2516  	return false
  2517  }
  2518  func rewriteValueAMD64_OpAMD64ANDLmem_0(v *Value) bool {
  2519  	b := v.Block
  2520  	_ = b
  2521  	typ := &b.Func.Config.Types
  2522  	_ = typ
  2523  	// match: (ANDLmem x [off] {sym} ptr (MOVSSstore [off] {sym} ptr y _))
  2524  	// cond:
  2525  	// result: (ANDL x (MOVLf2i y))
  2526  	for {
  2527  		off := v.AuxInt
  2528  		sym := v.Aux
  2529  		_ = v.Args[2]
  2530  		x := v.Args[0]
  2531  		ptr := v.Args[1]
  2532  		v_2 := v.Args[2]
  2533  		if v_2.Op != OpAMD64MOVSSstore {
  2534  			break
  2535  		}
  2536  		if v_2.AuxInt != off {
  2537  			break
  2538  		}
  2539  		if v_2.Aux != sym {
  2540  			break
  2541  		}
  2542  		_ = v_2.Args[2]
  2543  		if ptr != v_2.Args[0] {
  2544  			break
  2545  		}
  2546  		y := v_2.Args[1]
  2547  		v.reset(OpAMD64ANDL)
  2548  		v.AddArg(x)
  2549  		v0 := b.NewValue0(v.Pos, OpAMD64MOVLf2i, typ.UInt32)
  2550  		v0.AddArg(y)
  2551  		v.AddArg(v0)
  2552  		return true
  2553  	}
  2554  	return false
  2555  }
  2556  func rewriteValueAMD64_OpAMD64ANDQ_0(v *Value) bool {
  2557  	// match: (ANDQ x (MOVQconst [c]))
  2558  	// cond: is32Bit(c)
  2559  	// result: (ANDQconst [c] x)
  2560  	for {
  2561  		_ = v.Args[1]
  2562  		x := v.Args[0]
  2563  		v_1 := v.Args[1]
  2564  		if v_1.Op != OpAMD64MOVQconst {
  2565  			break
  2566  		}
  2567  		c := v_1.AuxInt
  2568  		if !(is32Bit(c)) {
  2569  			break
  2570  		}
  2571  		v.reset(OpAMD64ANDQconst)
  2572  		v.AuxInt = c
  2573  		v.AddArg(x)
  2574  		return true
  2575  	}
  2576  	// match: (ANDQ (MOVQconst [c]) x)
  2577  	// cond: is32Bit(c)
  2578  	// result: (ANDQconst [c] x)
  2579  	for {
  2580  		_ = v.Args[1]
  2581  		v_0 := v.Args[0]
  2582  		if v_0.Op != OpAMD64MOVQconst {
  2583  			break
  2584  		}
  2585  		c := v_0.AuxInt
  2586  		x := v.Args[1]
  2587  		if !(is32Bit(c)) {
  2588  			break
  2589  		}
  2590  		v.reset(OpAMD64ANDQconst)
  2591  		v.AuxInt = c
  2592  		v.AddArg(x)
  2593  		return true
  2594  	}
  2595  	// match: (ANDQ x x)
  2596  	// cond:
  2597  	// result: x
  2598  	for {
  2599  		_ = v.Args[1]
  2600  		x := v.Args[0]
  2601  		if x != v.Args[1] {
  2602  			break
  2603  		}
  2604  		v.reset(OpCopy)
  2605  		v.Type = x.Type
  2606  		v.AddArg(x)
  2607  		return true
  2608  	}
  2609  	// match: (ANDQ x l:(MOVQload [off] {sym} ptr mem))
  2610  	// cond: canMergeLoad(v, l, x) && clobber(l)
  2611  	// result: (ANDQmem x [off] {sym} ptr mem)
  2612  	for {
  2613  		_ = v.Args[1]
  2614  		x := v.Args[0]
  2615  		l := v.Args[1]
  2616  		if l.Op != OpAMD64MOVQload {
  2617  			break
  2618  		}
  2619  		off := l.AuxInt
  2620  		sym := l.Aux
  2621  		_ = l.Args[1]
  2622  		ptr := l.Args[0]
  2623  		mem := l.Args[1]
  2624  		if !(canMergeLoad(v, l, x) && clobber(l)) {
  2625  			break
  2626  		}
  2627  		v.reset(OpAMD64ANDQmem)
  2628  		v.AuxInt = off
  2629  		v.Aux = sym
  2630  		v.AddArg(x)
  2631  		v.AddArg(ptr)
  2632  		v.AddArg(mem)
  2633  		return true
  2634  	}
  2635  	// match: (ANDQ l:(MOVQload [off] {sym} ptr mem) x)
  2636  	// cond: canMergeLoad(v, l, x) && clobber(l)
  2637  	// result: (ANDQmem x [off] {sym} ptr mem)
  2638  	for {
  2639  		_ = v.Args[1]
  2640  		l := v.Args[0]
  2641  		if l.Op != OpAMD64MOVQload {
  2642  			break
  2643  		}
  2644  		off := l.AuxInt
  2645  		sym := l.Aux
  2646  		_ = l.Args[1]
  2647  		ptr := l.Args[0]
  2648  		mem := l.Args[1]
  2649  		x := v.Args[1]
  2650  		if !(canMergeLoad(v, l, x) && clobber(l)) {
  2651  			break
  2652  		}
  2653  		v.reset(OpAMD64ANDQmem)
  2654  		v.AuxInt = off
  2655  		v.Aux = sym
  2656  		v.AddArg(x)
  2657  		v.AddArg(ptr)
  2658  		v.AddArg(mem)
  2659  		return true
  2660  	}
  2661  	return false
  2662  }
  2663  func rewriteValueAMD64_OpAMD64ANDQconst_0(v *Value) bool {
  2664  	// match: (ANDQconst [c] (ANDQconst [d] x))
  2665  	// cond:
  2666  	// result: (ANDQconst [c & d] x)
  2667  	for {
  2668  		c := v.AuxInt
  2669  		v_0 := v.Args[0]
  2670  		if v_0.Op != OpAMD64ANDQconst {
  2671  			break
  2672  		}
  2673  		d := v_0.AuxInt
  2674  		x := v_0.Args[0]
  2675  		v.reset(OpAMD64ANDQconst)
  2676  		v.AuxInt = c & d
  2677  		v.AddArg(x)
  2678  		return true
  2679  	}
  2680  	// match: (ANDQconst [0xFF] x)
  2681  	// cond:
  2682  	// result: (MOVBQZX x)
  2683  	for {
  2684  		if v.AuxInt != 0xFF {
  2685  			break
  2686  		}
  2687  		x := v.Args[0]
  2688  		v.reset(OpAMD64MOVBQZX)
  2689  		v.AddArg(x)
  2690  		return true
  2691  	}
  2692  	// match: (ANDQconst [0xFFFF] x)
  2693  	// cond:
  2694  	// result: (MOVWQZX x)
  2695  	for {
  2696  		if v.AuxInt != 0xFFFF {
  2697  			break
  2698  		}
  2699  		x := v.Args[0]
  2700  		v.reset(OpAMD64MOVWQZX)
  2701  		v.AddArg(x)
  2702  		return true
  2703  	}
  2704  	// match: (ANDQconst [0xFFFFFFFF] x)
  2705  	// cond:
  2706  	// result: (MOVLQZX x)
  2707  	for {
  2708  		if v.AuxInt != 0xFFFFFFFF {
  2709  			break
  2710  		}
  2711  		x := v.Args[0]
  2712  		v.reset(OpAMD64MOVLQZX)
  2713  		v.AddArg(x)
  2714  		return true
  2715  	}
  2716  	// match: (ANDQconst [0] _)
  2717  	// cond:
  2718  	// result: (MOVQconst [0])
  2719  	for {
  2720  		if v.AuxInt != 0 {
  2721  			break
  2722  		}
  2723  		v.reset(OpAMD64MOVQconst)
  2724  		v.AuxInt = 0
  2725  		return true
  2726  	}
  2727  	// match: (ANDQconst [-1] x)
  2728  	// cond:
  2729  	// result: x
  2730  	for {
  2731  		if v.AuxInt != -1 {
  2732  			break
  2733  		}
  2734  		x := v.Args[0]
  2735  		v.reset(OpCopy)
  2736  		v.Type = x.Type
  2737  		v.AddArg(x)
  2738  		return true
  2739  	}
  2740  	// match: (ANDQconst [c] (MOVQconst [d]))
  2741  	// cond:
  2742  	// result: (MOVQconst [c&d])
  2743  	for {
  2744  		c := v.AuxInt
  2745  		v_0 := v.Args[0]
  2746  		if v_0.Op != OpAMD64MOVQconst {
  2747  			break
  2748  		}
  2749  		d := v_0.AuxInt
  2750  		v.reset(OpAMD64MOVQconst)
  2751  		v.AuxInt = c & d
  2752  		return true
  2753  	}
  2754  	return false
  2755  }
  2756  func rewriteValueAMD64_OpAMD64ANDQmem_0(v *Value) bool {
  2757  	b := v.Block
  2758  	_ = b
  2759  	typ := &b.Func.Config.Types
  2760  	_ = typ
  2761  	// match: (ANDQmem x [off] {sym} ptr (MOVSDstore [off] {sym} ptr y _))
  2762  	// cond:
  2763  	// result: (ANDQ x (MOVQf2i y))
  2764  	for {
  2765  		off := v.AuxInt
  2766  		sym := v.Aux
  2767  		_ = v.Args[2]
  2768  		x := v.Args[0]
  2769  		ptr := v.Args[1]
  2770  		v_2 := v.Args[2]
  2771  		if v_2.Op != OpAMD64MOVSDstore {
  2772  			break
  2773  		}
  2774  		if v_2.AuxInt != off {
  2775  			break
  2776  		}
  2777  		if v_2.Aux != sym {
  2778  			break
  2779  		}
  2780  		_ = v_2.Args[2]
  2781  		if ptr != v_2.Args[0] {
  2782  			break
  2783  		}
  2784  		y := v_2.Args[1]
  2785  		v.reset(OpAMD64ANDQ)
  2786  		v.AddArg(x)
  2787  		v0 := b.NewValue0(v.Pos, OpAMD64MOVQf2i, typ.UInt64)
  2788  		v0.AddArg(y)
  2789  		v.AddArg(v0)
  2790  		return true
  2791  	}
  2792  	return false
  2793  }
  2794  func rewriteValueAMD64_OpAMD64BSFQ_0(v *Value) bool {
  2795  	b := v.Block
  2796  	_ = b
  2797  	// match: (BSFQ (ORQconst <t> [1<<8] (MOVBQZX x)))
  2798  	// cond:
  2799  	// result: (BSFQ (ORQconst <t> [1<<8] x))
  2800  	for {
  2801  		v_0 := v.Args[0]
  2802  		if v_0.Op != OpAMD64ORQconst {
  2803  			break
  2804  		}
  2805  		t := v_0.Type
  2806  		if v_0.AuxInt != 1<<8 {
  2807  			break
  2808  		}
  2809  		v_0_0 := v_0.Args[0]
  2810  		if v_0_0.Op != OpAMD64MOVBQZX {
  2811  			break
  2812  		}
  2813  		x := v_0_0.Args[0]
  2814  		v.reset(OpAMD64BSFQ)
  2815  		v0 := b.NewValue0(v.Pos, OpAMD64ORQconst, t)
  2816  		v0.AuxInt = 1 << 8
  2817  		v0.AddArg(x)
  2818  		v.AddArg(v0)
  2819  		return true
  2820  	}
  2821  	// match: (BSFQ (ORQconst <t> [1<<16] (MOVWQZX x)))
  2822  	// cond:
  2823  	// result: (BSFQ (ORQconst <t> [1<<16] x))
  2824  	for {
  2825  		v_0 := v.Args[0]
  2826  		if v_0.Op != OpAMD64ORQconst {
  2827  			break
  2828  		}
  2829  		t := v_0.Type
  2830  		if v_0.AuxInt != 1<<16 {
  2831  			break
  2832  		}
  2833  		v_0_0 := v_0.Args[0]
  2834  		if v_0_0.Op != OpAMD64MOVWQZX {
  2835  			break
  2836  		}
  2837  		x := v_0_0.Args[0]
  2838  		v.reset(OpAMD64BSFQ)
  2839  		v0 := b.NewValue0(v.Pos, OpAMD64ORQconst, t)
  2840  		v0.AuxInt = 1 << 16
  2841  		v0.AddArg(x)
  2842  		v.AddArg(v0)
  2843  		return true
  2844  	}
  2845  	return false
  2846  }
  2847  func rewriteValueAMD64_OpAMD64BTQconst_0(v *Value) bool {
  2848  	// match: (BTQconst [c] x)
  2849  	// cond: c < 32
  2850  	// result: (BTLconst [c] x)
  2851  	for {
  2852  		c := v.AuxInt
  2853  		x := v.Args[0]
  2854  		if !(c < 32) {
  2855  			break
  2856  		}
  2857  		v.reset(OpAMD64BTLconst)
  2858  		v.AuxInt = c
  2859  		v.AddArg(x)
  2860  		return true
  2861  	}
  2862  	return false
  2863  }
  2864  func rewriteValueAMD64_OpAMD64CMOVQEQ_0(v *Value) bool {
  2865  	// match: (CMOVQEQ x _ (Select1 (BSFQ (ORQconst [c] _))))
  2866  	// cond: c != 0
  2867  	// result: x
  2868  	for {
  2869  		_ = v.Args[2]
  2870  		x := v.Args[0]
  2871  		v_2 := v.Args[2]
  2872  		if v_2.Op != OpSelect1 {
  2873  			break
  2874  		}
  2875  		v_2_0 := v_2.Args[0]
  2876  		if v_2_0.Op != OpAMD64BSFQ {
  2877  			break
  2878  		}
  2879  		v_2_0_0 := v_2_0.Args[0]
  2880  		if v_2_0_0.Op != OpAMD64ORQconst {
  2881  			break
  2882  		}
  2883  		c := v_2_0_0.AuxInt
  2884  		if !(c != 0) {
  2885  			break
  2886  		}
  2887  		v.reset(OpCopy)
  2888  		v.Type = x.Type
  2889  		v.AddArg(x)
  2890  		return true
  2891  	}
  2892  	return false
  2893  }
  2894  func rewriteValueAMD64_OpAMD64CMPB_0(v *Value) bool {
  2895  	b := v.Block
  2896  	_ = b
  2897  	// match: (CMPB x (MOVLconst [c]))
  2898  	// cond:
  2899  	// result: (CMPBconst x [int64(int8(c))])
  2900  	for {
  2901  		_ = v.Args[1]
  2902  		x := v.Args[0]
  2903  		v_1 := v.Args[1]
  2904  		if v_1.Op != OpAMD64MOVLconst {
  2905  			break
  2906  		}
  2907  		c := v_1.AuxInt
  2908  		v.reset(OpAMD64CMPBconst)
  2909  		v.AuxInt = int64(int8(c))
  2910  		v.AddArg(x)
  2911  		return true
  2912  	}
  2913  	// match: (CMPB (MOVLconst [c]) x)
  2914  	// cond:
  2915  	// result: (InvertFlags (CMPBconst x [int64(int8(c))]))
  2916  	for {
  2917  		_ = v.Args[1]
  2918  		v_0 := v.Args[0]
  2919  		if v_0.Op != OpAMD64MOVLconst {
  2920  			break
  2921  		}
  2922  		c := v_0.AuxInt
  2923  		x := v.Args[1]
  2924  		v.reset(OpAMD64InvertFlags)
  2925  		v0 := b.NewValue0(v.Pos, OpAMD64CMPBconst, types.TypeFlags)
  2926  		v0.AuxInt = int64(int8(c))
  2927  		v0.AddArg(x)
  2928  		v.AddArg(v0)
  2929  		return true
  2930  	}
  2931  	return false
  2932  }
  2933  func rewriteValueAMD64_OpAMD64CMPBconst_0(v *Value) bool {
  2934  	// match: (CMPBconst (MOVLconst [x]) [y])
  2935  	// cond: int8(x)==int8(y)
  2936  	// result: (FlagEQ)
  2937  	for {
  2938  		y := v.AuxInt
  2939  		v_0 := v.Args[0]
  2940  		if v_0.Op != OpAMD64MOVLconst {
  2941  			break
  2942  		}
  2943  		x := v_0.AuxInt
  2944  		if !(int8(x) == int8(y)) {
  2945  			break
  2946  		}
  2947  		v.reset(OpAMD64FlagEQ)
  2948  		return true
  2949  	}
  2950  	// match: (CMPBconst (MOVLconst [x]) [y])
  2951  	// cond: int8(x)<int8(y) && uint8(x)<uint8(y)
  2952  	// result: (FlagLT_ULT)
  2953  	for {
  2954  		y := v.AuxInt
  2955  		v_0 := v.Args[0]
  2956  		if v_0.Op != OpAMD64MOVLconst {
  2957  			break
  2958  		}
  2959  		x := v_0.AuxInt
  2960  		if !(int8(x) < int8(y) && uint8(x) < uint8(y)) {
  2961  			break
  2962  		}
  2963  		v.reset(OpAMD64FlagLT_ULT)
  2964  		return true
  2965  	}
  2966  	// match: (CMPBconst (MOVLconst [x]) [y])
  2967  	// cond: int8(x)<int8(y) && uint8(x)>uint8(y)
  2968  	// result: (FlagLT_UGT)
  2969  	for {
  2970  		y := v.AuxInt
  2971  		v_0 := v.Args[0]
  2972  		if v_0.Op != OpAMD64MOVLconst {
  2973  			break
  2974  		}
  2975  		x := v_0.AuxInt
  2976  		if !(int8(x) < int8(y) && uint8(x) > uint8(y)) {
  2977  			break
  2978  		}
  2979  		v.reset(OpAMD64FlagLT_UGT)
  2980  		return true
  2981  	}
  2982  	// match: (CMPBconst (MOVLconst [x]) [y])
  2983  	// cond: int8(x)>int8(y) && uint8(x)<uint8(y)
  2984  	// result: (FlagGT_ULT)
  2985  	for {
  2986  		y := v.AuxInt
  2987  		v_0 := v.Args[0]
  2988  		if v_0.Op != OpAMD64MOVLconst {
  2989  			break
  2990  		}
  2991  		x := v_0.AuxInt
  2992  		if !(int8(x) > int8(y) && uint8(x) < uint8(y)) {
  2993  			break
  2994  		}
  2995  		v.reset(OpAMD64FlagGT_ULT)
  2996  		return true
  2997  	}
  2998  	// match: (CMPBconst (MOVLconst [x]) [y])
  2999  	// cond: int8(x)>int8(y) && uint8(x)>uint8(y)
  3000  	// result: (FlagGT_UGT)
  3001  	for {
  3002  		y := v.AuxInt
  3003  		v_0 := v.Args[0]
  3004  		if v_0.Op != OpAMD64MOVLconst {
  3005  			break
  3006  		}
  3007  		x := v_0.AuxInt
  3008  		if !(int8(x) > int8(y) && uint8(x) > uint8(y)) {
  3009  			break
  3010  		}
  3011  		v.reset(OpAMD64FlagGT_UGT)
  3012  		return true
  3013  	}
  3014  	// match: (CMPBconst (ANDLconst _ [m]) [n])
  3015  	// cond: 0 <= int8(m) && int8(m) < int8(n)
  3016  	// result: (FlagLT_ULT)
  3017  	for {
  3018  		n := v.AuxInt
  3019  		v_0 := v.Args[0]
  3020  		if v_0.Op != OpAMD64ANDLconst {
  3021  			break
  3022  		}
  3023  		m := v_0.AuxInt
  3024  		if !(0 <= int8(m) && int8(m) < int8(n)) {
  3025  			break
  3026  		}
  3027  		v.reset(OpAMD64FlagLT_ULT)
  3028  		return true
  3029  	}
  3030  	// match: (CMPBconst (ANDL x y) [0])
  3031  	// cond:
  3032  	// result: (TESTB x y)
  3033  	for {
  3034  		if v.AuxInt != 0 {
  3035  			break
  3036  		}
  3037  		v_0 := v.Args[0]
  3038  		if v_0.Op != OpAMD64ANDL {
  3039  			break
  3040  		}
  3041  		_ = v_0.Args[1]
  3042  		x := v_0.Args[0]
  3043  		y := v_0.Args[1]
  3044  		v.reset(OpAMD64TESTB)
  3045  		v.AddArg(x)
  3046  		v.AddArg(y)
  3047  		return true
  3048  	}
  3049  	// match: (CMPBconst (ANDLconst [c] x) [0])
  3050  	// cond:
  3051  	// result: (TESTBconst [int64(int8(c))] x)
  3052  	for {
  3053  		if v.AuxInt != 0 {
  3054  			break
  3055  		}
  3056  		v_0 := v.Args[0]
  3057  		if v_0.Op != OpAMD64ANDLconst {
  3058  			break
  3059  		}
  3060  		c := v_0.AuxInt
  3061  		x := v_0.Args[0]
  3062  		v.reset(OpAMD64TESTBconst)
  3063  		v.AuxInt = int64(int8(c))
  3064  		v.AddArg(x)
  3065  		return true
  3066  	}
  3067  	// match: (CMPBconst x [0])
  3068  	// cond:
  3069  	// result: (TESTB x x)
  3070  	for {
  3071  		if v.AuxInt != 0 {
  3072  			break
  3073  		}
  3074  		x := v.Args[0]
  3075  		v.reset(OpAMD64TESTB)
  3076  		v.AddArg(x)
  3077  		v.AddArg(x)
  3078  		return true
  3079  	}
  3080  	return false
  3081  }
  3082  func rewriteValueAMD64_OpAMD64CMPL_0(v *Value) bool {
  3083  	b := v.Block
  3084  	_ = b
  3085  	// match: (CMPL x (MOVLconst [c]))
  3086  	// cond:
  3087  	// result: (CMPLconst x [c])
  3088  	for {
  3089  		_ = v.Args[1]
  3090  		x := v.Args[0]
  3091  		v_1 := v.Args[1]
  3092  		if v_1.Op != OpAMD64MOVLconst {
  3093  			break
  3094  		}
  3095  		c := v_1.AuxInt
  3096  		v.reset(OpAMD64CMPLconst)
  3097  		v.AuxInt = c
  3098  		v.AddArg(x)
  3099  		return true
  3100  	}
  3101  	// match: (CMPL (MOVLconst [c]) x)
  3102  	// cond:
  3103  	// result: (InvertFlags (CMPLconst x [c]))
  3104  	for {
  3105  		_ = v.Args[1]
  3106  		v_0 := v.Args[0]
  3107  		if v_0.Op != OpAMD64MOVLconst {
  3108  			break
  3109  		}
  3110  		c := v_0.AuxInt
  3111  		x := v.Args[1]
  3112  		v.reset(OpAMD64InvertFlags)
  3113  		v0 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags)
  3114  		v0.AuxInt = c
  3115  		v0.AddArg(x)
  3116  		v.AddArg(v0)
  3117  		return true
  3118  	}
  3119  	return false
  3120  }
  3121  func rewriteValueAMD64_OpAMD64CMPLconst_0(v *Value) bool {
  3122  	// match: (CMPLconst (MOVLconst [x]) [y])
  3123  	// cond: int32(x)==int32(y)
  3124  	// result: (FlagEQ)
  3125  	for {
  3126  		y := v.AuxInt
  3127  		v_0 := v.Args[0]
  3128  		if v_0.Op != OpAMD64MOVLconst {
  3129  			break
  3130  		}
  3131  		x := v_0.AuxInt
  3132  		if !(int32(x) == int32(y)) {
  3133  			break
  3134  		}
  3135  		v.reset(OpAMD64FlagEQ)
  3136  		return true
  3137  	}
  3138  	// match: (CMPLconst (MOVLconst [x]) [y])
  3139  	// cond: int32(x)<int32(y) && uint32(x)<uint32(y)
  3140  	// result: (FlagLT_ULT)
  3141  	for {
  3142  		y := v.AuxInt
  3143  		v_0 := v.Args[0]
  3144  		if v_0.Op != OpAMD64MOVLconst {
  3145  			break
  3146  		}
  3147  		x := v_0.AuxInt
  3148  		if !(int32(x) < int32(y) && uint32(x) < uint32(y)) {
  3149  			break
  3150  		}
  3151  		v.reset(OpAMD64FlagLT_ULT)
  3152  		return true
  3153  	}
  3154  	// match: (CMPLconst (MOVLconst [x]) [y])
  3155  	// cond: int32(x)<int32(y) && uint32(x)>uint32(y)
  3156  	// result: (FlagLT_UGT)
  3157  	for {
  3158  		y := v.AuxInt
  3159  		v_0 := v.Args[0]
  3160  		if v_0.Op != OpAMD64MOVLconst {
  3161  			break
  3162  		}
  3163  		x := v_0.AuxInt
  3164  		if !(int32(x) < int32(y) && uint32(x) > uint32(y)) {
  3165  			break
  3166  		}
  3167  		v.reset(OpAMD64FlagLT_UGT)
  3168  		return true
  3169  	}
  3170  	// match: (CMPLconst (MOVLconst [x]) [y])
  3171  	// cond: int32(x)>int32(y) && uint32(x)<uint32(y)
  3172  	// result: (FlagGT_ULT)
  3173  	for {
  3174  		y := v.AuxInt
  3175  		v_0 := v.Args[0]
  3176  		if v_0.Op != OpAMD64MOVLconst {
  3177  			break
  3178  		}
  3179  		x := v_0.AuxInt
  3180  		if !(int32(x) > int32(y) && uint32(x) < uint32(y)) {
  3181  			break
  3182  		}
  3183  		v.reset(OpAMD64FlagGT_ULT)
  3184  		return true
  3185  	}
  3186  	// match: (CMPLconst (MOVLconst [x]) [y])
  3187  	// cond: int32(x)>int32(y) && uint32(x)>uint32(y)
  3188  	// result: (FlagGT_UGT)
  3189  	for {
  3190  		y := v.AuxInt
  3191  		v_0 := v.Args[0]
  3192  		if v_0.Op != OpAMD64MOVLconst {
  3193  			break
  3194  		}
  3195  		x := v_0.AuxInt
  3196  		if !(int32(x) > int32(y) && uint32(x) > uint32(y)) {
  3197  			break
  3198  		}
  3199  		v.reset(OpAMD64FlagGT_UGT)
  3200  		return true
  3201  	}
  3202  	// match: (CMPLconst (SHRLconst _ [c]) [n])
  3203  	// cond: 0 <= n && 0 < c && c <= 32 && (1<<uint64(32-c)) <= uint64(n)
  3204  	// result: (FlagLT_ULT)
  3205  	for {
  3206  		n := v.AuxInt
  3207  		v_0 := v.Args[0]
  3208  		if v_0.Op != OpAMD64SHRLconst {
  3209  			break
  3210  		}
  3211  		c := v_0.AuxInt
  3212  		if !(0 <= n && 0 < c && c <= 32 && (1<<uint64(32-c)) <= uint64(n)) {
  3213  			break
  3214  		}
  3215  		v.reset(OpAMD64FlagLT_ULT)
  3216  		return true
  3217  	}
  3218  	// match: (CMPLconst (ANDLconst _ [m]) [n])
  3219  	// cond: 0 <= int32(m) && int32(m) < int32(n)
  3220  	// result: (FlagLT_ULT)
  3221  	for {
  3222  		n := v.AuxInt
  3223  		v_0 := v.Args[0]
  3224  		if v_0.Op != OpAMD64ANDLconst {
  3225  			break
  3226  		}
  3227  		m := v_0.AuxInt
  3228  		if !(0 <= int32(m) && int32(m) < int32(n)) {
  3229  			break
  3230  		}
  3231  		v.reset(OpAMD64FlagLT_ULT)
  3232  		return true
  3233  	}
  3234  	// match: (CMPLconst (ANDL x y) [0])
  3235  	// cond:
  3236  	// result: (TESTL x y)
  3237  	for {
  3238  		if v.AuxInt != 0 {
  3239  			break
  3240  		}
  3241  		v_0 := v.Args[0]
  3242  		if v_0.Op != OpAMD64ANDL {
  3243  			break
  3244  		}
  3245  		_ = v_0.Args[1]
  3246  		x := v_0.Args[0]
  3247  		y := v_0.Args[1]
  3248  		v.reset(OpAMD64TESTL)
  3249  		v.AddArg(x)
  3250  		v.AddArg(y)
  3251  		return true
  3252  	}
  3253  	// match: (CMPLconst (ANDLconst [c] x) [0])
  3254  	// cond:
  3255  	// result: (TESTLconst [c] x)
  3256  	for {
  3257  		if v.AuxInt != 0 {
  3258  			break
  3259  		}
  3260  		v_0 := v.Args[0]
  3261  		if v_0.Op != OpAMD64ANDLconst {
  3262  			break
  3263  		}
  3264  		c := v_0.AuxInt
  3265  		x := v_0.Args[0]
  3266  		v.reset(OpAMD64TESTLconst)
  3267  		v.AuxInt = c
  3268  		v.AddArg(x)
  3269  		return true
  3270  	}
  3271  	// match: (CMPLconst x [0])
  3272  	// cond:
  3273  	// result: (TESTL x x)
  3274  	for {
  3275  		if v.AuxInt != 0 {
  3276  			break
  3277  		}
  3278  		x := v.Args[0]
  3279  		v.reset(OpAMD64TESTL)
  3280  		v.AddArg(x)
  3281  		v.AddArg(x)
  3282  		return true
  3283  	}
  3284  	return false
  3285  }
  3286  func rewriteValueAMD64_OpAMD64CMPQ_0(v *Value) bool {
  3287  	b := v.Block
  3288  	_ = b
  3289  	// match: (CMPQ x (MOVQconst [c]))
  3290  	// cond: is32Bit(c)
  3291  	// result: (CMPQconst x [c])
  3292  	for {
  3293  		_ = v.Args[1]
  3294  		x := v.Args[0]
  3295  		v_1 := v.Args[1]
  3296  		if v_1.Op != OpAMD64MOVQconst {
  3297  			break
  3298  		}
  3299  		c := v_1.AuxInt
  3300  		if !(is32Bit(c)) {
  3301  			break
  3302  		}
  3303  		v.reset(OpAMD64CMPQconst)
  3304  		v.AuxInt = c
  3305  		v.AddArg(x)
  3306  		return true
  3307  	}
  3308  	// match: (CMPQ (MOVQconst [c]) x)
  3309  	// cond: is32Bit(c)
  3310  	// result: (InvertFlags (CMPQconst x [c]))
  3311  	for {
  3312  		_ = v.Args[1]
  3313  		v_0 := v.Args[0]
  3314  		if v_0.Op != OpAMD64MOVQconst {
  3315  			break
  3316  		}
  3317  		c := v_0.AuxInt
  3318  		x := v.Args[1]
  3319  		if !(is32Bit(c)) {
  3320  			break
  3321  		}
  3322  		v.reset(OpAMD64InvertFlags)
  3323  		v0 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags)
  3324  		v0.AuxInt = c
  3325  		v0.AddArg(x)
  3326  		v.AddArg(v0)
  3327  		return true
  3328  	}
  3329  	return false
  3330  }
  3331  func rewriteValueAMD64_OpAMD64CMPQconst_0(v *Value) bool {
  3332  	// match: (CMPQconst (NEGQ (ADDQconst [-16] (ANDQconst [15] _))) [32])
  3333  	// cond:
  3334  	// result: (FlagLT_ULT)
  3335  	for {
  3336  		if v.AuxInt != 32 {
  3337  			break
  3338  		}
  3339  		v_0 := v.Args[0]
  3340  		if v_0.Op != OpAMD64NEGQ {
  3341  			break
  3342  		}
  3343  		v_0_0 := v_0.Args[0]
  3344  		if v_0_0.Op != OpAMD64ADDQconst {
  3345  			break
  3346  		}
  3347  		if v_0_0.AuxInt != -16 {
  3348  			break
  3349  		}
  3350  		v_0_0_0 := v_0_0.Args[0]
  3351  		if v_0_0_0.Op != OpAMD64ANDQconst {
  3352  			break
  3353  		}
  3354  		if v_0_0_0.AuxInt != 15 {
  3355  			break
  3356  		}
  3357  		v.reset(OpAMD64FlagLT_ULT)
  3358  		return true
  3359  	}
  3360  	// match: (CMPQconst (NEGQ (ADDQconst [ -8] (ANDQconst [7] _))) [32])
  3361  	// cond:
  3362  	// result: (FlagLT_ULT)
  3363  	for {
  3364  		if v.AuxInt != 32 {
  3365  			break
  3366  		}
  3367  		v_0 := v.Args[0]
  3368  		if v_0.Op != OpAMD64NEGQ {
  3369  			break
  3370  		}
  3371  		v_0_0 := v_0.Args[0]
  3372  		if v_0_0.Op != OpAMD64ADDQconst {
  3373  			break
  3374  		}
  3375  		if v_0_0.AuxInt != -8 {
  3376  			break
  3377  		}
  3378  		v_0_0_0 := v_0_0.Args[0]
  3379  		if v_0_0_0.Op != OpAMD64ANDQconst {
  3380  			break
  3381  		}
  3382  		if v_0_0_0.AuxInt != 7 {
  3383  			break
  3384  		}
  3385  		v.reset(OpAMD64FlagLT_ULT)
  3386  		return true
  3387  	}
  3388  	// match: (CMPQconst (MOVQconst [x]) [y])
  3389  	// cond: x==y
  3390  	// result: (FlagEQ)
  3391  	for {
  3392  		y := v.AuxInt
  3393  		v_0 := v.Args[0]
  3394  		if v_0.Op != OpAMD64MOVQconst {
  3395  			break
  3396  		}
  3397  		x := v_0.AuxInt
  3398  		if !(x == y) {
  3399  			break
  3400  		}
  3401  		v.reset(OpAMD64FlagEQ)
  3402  		return true
  3403  	}
  3404  	// match: (CMPQconst (MOVQconst [x]) [y])
  3405  	// cond: x<y && uint64(x)<uint64(y)
  3406  	// result: (FlagLT_ULT)
  3407  	for {
  3408  		y := v.AuxInt
  3409  		v_0 := v.Args[0]
  3410  		if v_0.Op != OpAMD64MOVQconst {
  3411  			break
  3412  		}
  3413  		x := v_0.AuxInt
  3414  		if !(x < y && uint64(x) < uint64(y)) {
  3415  			break
  3416  		}
  3417  		v.reset(OpAMD64FlagLT_ULT)
  3418  		return true
  3419  	}
  3420  	// match: (CMPQconst (MOVQconst [x]) [y])
  3421  	// cond: x<y && uint64(x)>uint64(y)
  3422  	// result: (FlagLT_UGT)
  3423  	for {
  3424  		y := v.AuxInt
  3425  		v_0 := v.Args[0]
  3426  		if v_0.Op != OpAMD64MOVQconst {
  3427  			break
  3428  		}
  3429  		x := v_0.AuxInt
  3430  		if !(x < y && uint64(x) > uint64(y)) {
  3431  			break
  3432  		}
  3433  		v.reset(OpAMD64FlagLT_UGT)
  3434  		return true
  3435  	}
  3436  	// match: (CMPQconst (MOVQconst [x]) [y])
  3437  	// cond: x>y && uint64(x)<uint64(y)
  3438  	// result: (FlagGT_ULT)
  3439  	for {
  3440  		y := v.AuxInt
  3441  		v_0 := v.Args[0]
  3442  		if v_0.Op != OpAMD64MOVQconst {
  3443  			break
  3444  		}
  3445  		x := v_0.AuxInt
  3446  		if !(x > y && uint64(x) < uint64(y)) {
  3447  			break
  3448  		}
  3449  		v.reset(OpAMD64FlagGT_ULT)
  3450  		return true
  3451  	}
  3452  	// match: (CMPQconst (MOVQconst [x]) [y])
  3453  	// cond: x>y && uint64(x)>uint64(y)
  3454  	// result: (FlagGT_UGT)
  3455  	for {
  3456  		y := v.AuxInt
  3457  		v_0 := v.Args[0]
  3458  		if v_0.Op != OpAMD64MOVQconst {
  3459  			break
  3460  		}
  3461  		x := v_0.AuxInt
  3462  		if !(x > y && uint64(x) > uint64(y)) {
  3463  			break
  3464  		}
  3465  		v.reset(OpAMD64FlagGT_UGT)
  3466  		return true
  3467  	}
  3468  	// match: (CMPQconst (MOVBQZX _) [c])
  3469  	// cond: 0xFF < c
  3470  	// result: (FlagLT_ULT)
  3471  	for {
  3472  		c := v.AuxInt
  3473  		v_0 := v.Args[0]
  3474  		if v_0.Op != OpAMD64MOVBQZX {
  3475  			break
  3476  		}
  3477  		if !(0xFF < c) {
  3478  			break
  3479  		}
  3480  		v.reset(OpAMD64FlagLT_ULT)
  3481  		return true
  3482  	}
  3483  	// match: (CMPQconst (MOVWQZX _) [c])
  3484  	// cond: 0xFFFF < c
  3485  	// result: (FlagLT_ULT)
  3486  	for {
  3487  		c := v.AuxInt
  3488  		v_0 := v.Args[0]
  3489  		if v_0.Op != OpAMD64MOVWQZX {
  3490  			break
  3491  		}
  3492  		if !(0xFFFF < c) {
  3493  			break
  3494  		}
  3495  		v.reset(OpAMD64FlagLT_ULT)
  3496  		return true
  3497  	}
  3498  	// match: (CMPQconst (MOVLQZX _) [c])
  3499  	// cond: 0xFFFFFFFF < c
  3500  	// result: (FlagLT_ULT)
  3501  	for {
  3502  		c := v.AuxInt
  3503  		v_0 := v.Args[0]
  3504  		if v_0.Op != OpAMD64MOVLQZX {
  3505  			break
  3506  		}
  3507  		if !(0xFFFFFFFF < c) {
  3508  			break
  3509  		}
  3510  		v.reset(OpAMD64FlagLT_ULT)
  3511  		return true
  3512  	}
  3513  	return false
  3514  }
  3515  func rewriteValueAMD64_OpAMD64CMPQconst_10(v *Value) bool {
  3516  	// match: (CMPQconst (SHRQconst _ [c]) [n])
  3517  	// cond: 0 <= n && 0 < c && c <= 64 && (1<<uint64(64-c)) <= uint64(n)
  3518  	// result: (FlagLT_ULT)
  3519  	for {
  3520  		n := v.AuxInt
  3521  		v_0 := v.Args[0]
  3522  		if v_0.Op != OpAMD64SHRQconst {
  3523  			break
  3524  		}
  3525  		c := v_0.AuxInt
  3526  		if !(0 <= n && 0 < c && c <= 64 && (1<<uint64(64-c)) <= uint64(n)) {
  3527  			break
  3528  		}
  3529  		v.reset(OpAMD64FlagLT_ULT)
  3530  		return true
  3531  	}
  3532  	// match: (CMPQconst (ANDQconst _ [m]) [n])
  3533  	// cond: 0 <= m && m < n
  3534  	// result: (FlagLT_ULT)
  3535  	for {
  3536  		n := v.AuxInt
  3537  		v_0 := v.Args[0]
  3538  		if v_0.Op != OpAMD64ANDQconst {
  3539  			break
  3540  		}
  3541  		m := v_0.AuxInt
  3542  		if !(0 <= m && m < n) {
  3543  			break
  3544  		}
  3545  		v.reset(OpAMD64FlagLT_ULT)
  3546  		return true
  3547  	}
  3548  	// match: (CMPQconst (ANDLconst _ [m]) [n])
  3549  	// cond: 0 <= m && m < n
  3550  	// result: (FlagLT_ULT)
  3551  	for {
  3552  		n := v.AuxInt
  3553  		v_0 := v.Args[0]
  3554  		if v_0.Op != OpAMD64ANDLconst {
  3555  			break
  3556  		}
  3557  		m := v_0.AuxInt
  3558  		if !(0 <= m && m < n) {
  3559  			break
  3560  		}
  3561  		v.reset(OpAMD64FlagLT_ULT)
  3562  		return true
  3563  	}
  3564  	// match: (CMPQconst (ANDQ x y) [0])
  3565  	// cond:
  3566  	// result: (TESTQ x y)
  3567  	for {
  3568  		if v.AuxInt != 0 {
  3569  			break
  3570  		}
  3571  		v_0 := v.Args[0]
  3572  		if v_0.Op != OpAMD64ANDQ {
  3573  			break
  3574  		}
  3575  		_ = v_0.Args[1]
  3576  		x := v_0.Args[0]
  3577  		y := v_0.Args[1]
  3578  		v.reset(OpAMD64TESTQ)
  3579  		v.AddArg(x)
  3580  		v.AddArg(y)
  3581  		return true
  3582  	}
  3583  	// match: (CMPQconst (ANDQconst [c] x) [0])
  3584  	// cond:
  3585  	// result: (TESTQconst [c] x)
  3586  	for {
  3587  		if v.AuxInt != 0 {
  3588  			break
  3589  		}
  3590  		v_0 := v.Args[0]
  3591  		if v_0.Op != OpAMD64ANDQconst {
  3592  			break
  3593  		}
  3594  		c := v_0.AuxInt
  3595  		x := v_0.Args[0]
  3596  		v.reset(OpAMD64TESTQconst)
  3597  		v.AuxInt = c
  3598  		v.AddArg(x)
  3599  		return true
  3600  	}
  3601  	// match: (CMPQconst x [0])
  3602  	// cond:
  3603  	// result: (TESTQ x x)
  3604  	for {
  3605  		if v.AuxInt != 0 {
  3606  			break
  3607  		}
  3608  		x := v.Args[0]
  3609  		v.reset(OpAMD64TESTQ)
  3610  		v.AddArg(x)
  3611  		v.AddArg(x)
  3612  		return true
  3613  	}
  3614  	return false
  3615  }
  3616  func rewriteValueAMD64_OpAMD64CMPW_0(v *Value) bool {
  3617  	b := v.Block
  3618  	_ = b
  3619  	// match: (CMPW x (MOVLconst [c]))
  3620  	// cond:
  3621  	// result: (CMPWconst x [int64(int16(c))])
  3622  	for {
  3623  		_ = v.Args[1]
  3624  		x := v.Args[0]
  3625  		v_1 := v.Args[1]
  3626  		if v_1.Op != OpAMD64MOVLconst {
  3627  			break
  3628  		}
  3629  		c := v_1.AuxInt
  3630  		v.reset(OpAMD64CMPWconst)
  3631  		v.AuxInt = int64(int16(c))
  3632  		v.AddArg(x)
  3633  		return true
  3634  	}
  3635  	// match: (CMPW (MOVLconst [c]) x)
  3636  	// cond:
  3637  	// result: (InvertFlags (CMPWconst x [int64(int16(c))]))
  3638  	for {
  3639  		_ = v.Args[1]
  3640  		v_0 := v.Args[0]
  3641  		if v_0.Op != OpAMD64MOVLconst {
  3642  			break
  3643  		}
  3644  		c := v_0.AuxInt
  3645  		x := v.Args[1]
  3646  		v.reset(OpAMD64InvertFlags)
  3647  		v0 := b.NewValue0(v.Pos, OpAMD64CMPWconst, types.TypeFlags)
  3648  		v0.AuxInt = int64(int16(c))
  3649  		v0.AddArg(x)
  3650  		v.AddArg(v0)
  3651  		return true
  3652  	}
  3653  	return false
  3654  }
  3655  func rewriteValueAMD64_OpAMD64CMPWconst_0(v *Value) bool {
  3656  	// match: (CMPWconst (MOVLconst [x]) [y])
  3657  	// cond: int16(x)==int16(y)
  3658  	// result: (FlagEQ)
  3659  	for {
  3660  		y := v.AuxInt
  3661  		v_0 := v.Args[0]
  3662  		if v_0.Op != OpAMD64MOVLconst {
  3663  			break
  3664  		}
  3665  		x := v_0.AuxInt
  3666  		if !(int16(x) == int16(y)) {
  3667  			break
  3668  		}
  3669  		v.reset(OpAMD64FlagEQ)
  3670  		return true
  3671  	}
  3672  	// match: (CMPWconst (MOVLconst [x]) [y])
  3673  	// cond: int16(x)<int16(y) && uint16(x)<uint16(y)
  3674  	// result: (FlagLT_ULT)
  3675  	for {
  3676  		y := v.AuxInt
  3677  		v_0 := v.Args[0]
  3678  		if v_0.Op != OpAMD64MOVLconst {
  3679  			break
  3680  		}
  3681  		x := v_0.AuxInt
  3682  		if !(int16(x) < int16(y) && uint16(x) < uint16(y)) {
  3683  			break
  3684  		}
  3685  		v.reset(OpAMD64FlagLT_ULT)
  3686  		return true
  3687  	}
  3688  	// match: (CMPWconst (MOVLconst [x]) [y])
  3689  	// cond: int16(x)<int16(y) && uint16(x)>uint16(y)
  3690  	// result: (FlagLT_UGT)
  3691  	for {
  3692  		y := v.AuxInt
  3693  		v_0 := v.Args[0]
  3694  		if v_0.Op != OpAMD64MOVLconst {
  3695  			break
  3696  		}
  3697  		x := v_0.AuxInt
  3698  		if !(int16(x) < int16(y) && uint16(x) > uint16(y)) {
  3699  			break
  3700  		}
  3701  		v.reset(OpAMD64FlagLT_UGT)
  3702  		return true
  3703  	}
  3704  	// match: (CMPWconst (MOVLconst [x]) [y])
  3705  	// cond: int16(x)>int16(y) && uint16(x)<uint16(y)
  3706  	// result: (FlagGT_ULT)
  3707  	for {
  3708  		y := v.AuxInt
  3709  		v_0 := v.Args[0]
  3710  		if v_0.Op != OpAMD64MOVLconst {
  3711  			break
  3712  		}
  3713  		x := v_0.AuxInt
  3714  		if !(int16(x) > int16(y) && uint16(x) < uint16(y)) {
  3715  			break
  3716  		}
  3717  		v.reset(OpAMD64FlagGT_ULT)
  3718  		return true
  3719  	}
  3720  	// match: (CMPWconst (MOVLconst [x]) [y])
  3721  	// cond: int16(x)>int16(y) && uint16(x)>uint16(y)
  3722  	// result: (FlagGT_UGT)
  3723  	for {
  3724  		y := v.AuxInt
  3725  		v_0 := v.Args[0]
  3726  		if v_0.Op != OpAMD64MOVLconst {
  3727  			break
  3728  		}
  3729  		x := v_0.AuxInt
  3730  		if !(int16(x) > int16(y) && uint16(x) > uint16(y)) {
  3731  			break
  3732  		}
  3733  		v.reset(OpAMD64FlagGT_UGT)
  3734  		return true
  3735  	}
  3736  	// match: (CMPWconst (ANDLconst _ [m]) [n])
  3737  	// cond: 0 <= int16(m) && int16(m) < int16(n)
  3738  	// result: (FlagLT_ULT)
  3739  	for {
  3740  		n := v.AuxInt
  3741  		v_0 := v.Args[0]
  3742  		if v_0.Op != OpAMD64ANDLconst {
  3743  			break
  3744  		}
  3745  		m := v_0.AuxInt
  3746  		if !(0 <= int16(m) && int16(m) < int16(n)) {
  3747  			break
  3748  		}
  3749  		v.reset(OpAMD64FlagLT_ULT)
  3750  		return true
  3751  	}
  3752  	// match: (CMPWconst (ANDL x y) [0])
  3753  	// cond:
  3754  	// result: (TESTW x y)
  3755  	for {
  3756  		if v.AuxInt != 0 {
  3757  			break
  3758  		}
  3759  		v_0 := v.Args[0]
  3760  		if v_0.Op != OpAMD64ANDL {
  3761  			break
  3762  		}
  3763  		_ = v_0.Args[1]
  3764  		x := v_0.Args[0]
  3765  		y := v_0.Args[1]
  3766  		v.reset(OpAMD64TESTW)
  3767  		v.AddArg(x)
  3768  		v.AddArg(y)
  3769  		return true
  3770  	}
  3771  	// match: (CMPWconst (ANDLconst [c] x) [0])
  3772  	// cond:
  3773  	// result: (TESTWconst [int64(int16(c))] x)
  3774  	for {
  3775  		if v.AuxInt != 0 {
  3776  			break
  3777  		}
  3778  		v_0 := v.Args[0]
  3779  		if v_0.Op != OpAMD64ANDLconst {
  3780  			break
  3781  		}
  3782  		c := v_0.AuxInt
  3783  		x := v_0.Args[0]
  3784  		v.reset(OpAMD64TESTWconst)
  3785  		v.AuxInt = int64(int16(c))
  3786  		v.AddArg(x)
  3787  		return true
  3788  	}
  3789  	// match: (CMPWconst x [0])
  3790  	// cond:
  3791  	// result: (TESTW x x)
  3792  	for {
  3793  		if v.AuxInt != 0 {
  3794  			break
  3795  		}
  3796  		x := v.Args[0]
  3797  		v.reset(OpAMD64TESTW)
  3798  		v.AddArg(x)
  3799  		v.AddArg(x)
  3800  		return true
  3801  	}
  3802  	return false
  3803  }
  3804  func rewriteValueAMD64_OpAMD64CMPXCHGLlock_0(v *Value) bool {
  3805  	// match: (CMPXCHGLlock [off1] {sym} (ADDQconst [off2] ptr) old new_ mem)
  3806  	// cond: is32Bit(off1+off2)
  3807  	// result: (CMPXCHGLlock [off1+off2] {sym} ptr old new_ mem)
  3808  	for {
  3809  		off1 := v.AuxInt
  3810  		sym := v.Aux
  3811  		_ = v.Args[3]
  3812  		v_0 := v.Args[0]
  3813  		if v_0.Op != OpAMD64ADDQconst {
  3814  			break
  3815  		}
  3816  		off2 := v_0.AuxInt
  3817  		ptr := v_0.Args[0]
  3818  		old := v.Args[1]
  3819  		new_ := v.Args[2]
  3820  		mem := v.Args[3]
  3821  		if !(is32Bit(off1 + off2)) {
  3822  			break
  3823  		}
  3824  		v.reset(OpAMD64CMPXCHGLlock)
  3825  		v.AuxInt = off1 + off2
  3826  		v.Aux = sym
  3827  		v.AddArg(ptr)
  3828  		v.AddArg(old)
  3829  		v.AddArg(new_)
  3830  		v.AddArg(mem)
  3831  		return true
  3832  	}
  3833  	return false
  3834  }
  3835  func rewriteValueAMD64_OpAMD64CMPXCHGQlock_0(v *Value) bool {
  3836  	// match: (CMPXCHGQlock [off1] {sym} (ADDQconst [off2] ptr) old new_ mem)
  3837  	// cond: is32Bit(off1+off2)
  3838  	// result: (CMPXCHGQlock [off1+off2] {sym} ptr old new_ mem)
  3839  	for {
  3840  		off1 := v.AuxInt
  3841  		sym := v.Aux
  3842  		_ = v.Args[3]
  3843  		v_0 := v.Args[0]
  3844  		if v_0.Op != OpAMD64ADDQconst {
  3845  			break
  3846  		}
  3847  		off2 := v_0.AuxInt
  3848  		ptr := v_0.Args[0]
  3849  		old := v.Args[1]
  3850  		new_ := v.Args[2]
  3851  		mem := v.Args[3]
  3852  		if !(is32Bit(off1 + off2)) {
  3853  			break
  3854  		}
  3855  		v.reset(OpAMD64CMPXCHGQlock)
  3856  		v.AuxInt = off1 + off2
  3857  		v.Aux = sym
  3858  		v.AddArg(ptr)
  3859  		v.AddArg(old)
  3860  		v.AddArg(new_)
  3861  		v.AddArg(mem)
  3862  		return true
  3863  	}
  3864  	return false
  3865  }
  3866  func rewriteValueAMD64_OpAMD64LEAL_0(v *Value) bool {
  3867  	// match: (LEAL [c] {s} (ADDLconst [d] x))
  3868  	// cond: is32Bit(c+d)
  3869  	// result: (LEAL [c+d] {s} x)
  3870  	for {
  3871  		c := v.AuxInt
  3872  		s := v.Aux
  3873  		v_0 := v.Args[0]
  3874  		if v_0.Op != OpAMD64ADDLconst {
  3875  			break
  3876  		}
  3877  		d := v_0.AuxInt
  3878  		x := v_0.Args[0]
  3879  		if !(is32Bit(c + d)) {
  3880  			break
  3881  		}
  3882  		v.reset(OpAMD64LEAL)
  3883  		v.AuxInt = c + d
  3884  		v.Aux = s
  3885  		v.AddArg(x)
  3886  		return true
  3887  	}
  3888  	return false
  3889  }
  3890  func rewriteValueAMD64_OpAMD64LEAQ_0(v *Value) bool {
  3891  	// match: (LEAQ [c] {s} (ADDQconst [d] x))
  3892  	// cond: is32Bit(c+d)
  3893  	// result: (LEAQ [c+d] {s} x)
  3894  	for {
  3895  		c := v.AuxInt
  3896  		s := v.Aux
  3897  		v_0 := v.Args[0]
  3898  		if v_0.Op != OpAMD64ADDQconst {
  3899  			break
  3900  		}
  3901  		d := v_0.AuxInt
  3902  		x := v_0.Args[0]
  3903  		if !(is32Bit(c + d)) {
  3904  			break
  3905  		}
  3906  		v.reset(OpAMD64LEAQ)
  3907  		v.AuxInt = c + d
  3908  		v.Aux = s
  3909  		v.AddArg(x)
  3910  		return true
  3911  	}
  3912  	// match: (LEAQ [c] {s} (ADDQ x y))
  3913  	// cond: x.Op != OpSB && y.Op != OpSB
  3914  	// result: (LEAQ1 [c] {s} x y)
  3915  	for {
  3916  		c := v.AuxInt
  3917  		s := v.Aux
  3918  		v_0 := v.Args[0]
  3919  		if v_0.Op != OpAMD64ADDQ {
  3920  			break
  3921  		}
  3922  		_ = v_0.Args[1]
  3923  		x := v_0.Args[0]
  3924  		y := v_0.Args[1]
  3925  		if !(x.Op != OpSB && y.Op != OpSB) {
  3926  			break
  3927  		}
  3928  		v.reset(OpAMD64LEAQ1)
  3929  		v.AuxInt = c
  3930  		v.Aux = s
  3931  		v.AddArg(x)
  3932  		v.AddArg(y)
  3933  		return true
  3934  	}
  3935  	// match: (LEAQ [off1] {sym1} (LEAQ [off2] {sym2} x))
  3936  	// cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
  3937  	// result: (LEAQ [off1+off2] {mergeSym(sym1,sym2)} x)
  3938  	for {
  3939  		off1 := v.AuxInt
  3940  		sym1 := v.Aux
  3941  		v_0 := v.Args[0]
  3942  		if v_0.Op != OpAMD64LEAQ {
  3943  			break
  3944  		}
  3945  		off2 := v_0.AuxInt
  3946  		sym2 := v_0.Aux
  3947  		x := v_0.Args[0]
  3948  		if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
  3949  			break
  3950  		}
  3951  		v.reset(OpAMD64LEAQ)
  3952  		v.AuxInt = off1 + off2
  3953  		v.Aux = mergeSym(sym1, sym2)
  3954  		v.AddArg(x)
  3955  		return true
  3956  	}
  3957  	// match: (LEAQ [off1] {sym1} (LEAQ1 [off2] {sym2} x y))
  3958  	// cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
  3959  	// result: (LEAQ1 [off1+off2] {mergeSym(sym1,sym2)} x y)
  3960  	for {
  3961  		off1 := v.AuxInt
  3962  		sym1 := v.Aux
  3963  		v_0 := v.Args[0]
  3964  		if v_0.Op != OpAMD64LEAQ1 {
  3965  			break
  3966  		}
  3967  		off2 := v_0.AuxInt
  3968  		sym2 := v_0.Aux
  3969  		_ = v_0.Args[1]
  3970  		x := v_0.Args[0]
  3971  		y := v_0.Args[1]
  3972  		if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
  3973  			break
  3974  		}
  3975  		v.reset(OpAMD64LEAQ1)
  3976  		v.AuxInt = off1 + off2
  3977  		v.Aux = mergeSym(sym1, sym2)
  3978  		v.AddArg(x)
  3979  		v.AddArg(y)
  3980  		return true
  3981  	}
  3982  	// match: (LEAQ [off1] {sym1} (LEAQ2 [off2] {sym2} x y))
  3983  	// cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
  3984  	// result: (LEAQ2 [off1+off2] {mergeSym(sym1,sym2)} x y)
  3985  	for {
  3986  		off1 := v.AuxInt
  3987  		sym1 := v.Aux
  3988  		v_0 := v.Args[0]
  3989  		if v_0.Op != OpAMD64LEAQ2 {
  3990  			break
  3991  		}
  3992  		off2 := v_0.AuxInt
  3993  		sym2 := v_0.Aux
  3994  		_ = v_0.Args[1]
  3995  		x := v_0.Args[0]
  3996  		y := v_0.Args[1]
  3997  		if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
  3998  			break
  3999  		}
  4000  		v.reset(OpAMD64LEAQ2)
  4001  		v.AuxInt = off1 + off2
  4002  		v.Aux = mergeSym(sym1, sym2)
  4003  		v.AddArg(x)
  4004  		v.AddArg(y)
  4005  		return true
  4006  	}
  4007  	// match: (LEAQ [off1] {sym1} (LEAQ4 [off2] {sym2} x y))
  4008  	// cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
  4009  	// result: (LEAQ4 [off1+off2] {mergeSym(sym1,sym2)} x y)
  4010  	for {
  4011  		off1 := v.AuxInt
  4012  		sym1 := v.Aux
  4013  		v_0 := v.Args[0]
  4014  		if v_0.Op != OpAMD64LEAQ4 {
  4015  			break
  4016  		}
  4017  		off2 := v_0.AuxInt
  4018  		sym2 := v_0.Aux
  4019  		_ = v_0.Args[1]
  4020  		x := v_0.Args[0]
  4021  		y := v_0.Args[1]
  4022  		if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
  4023  			break
  4024  		}
  4025  		v.reset(OpAMD64LEAQ4)
  4026  		v.AuxInt = off1 + off2
  4027  		v.Aux = mergeSym(sym1, sym2)
  4028  		v.AddArg(x)
  4029  		v.AddArg(y)
  4030  		return true
  4031  	}
  4032  	// match: (LEAQ [off1] {sym1} (LEAQ8 [off2] {sym2} x y))
  4033  	// cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
  4034  	// result: (LEAQ8 [off1+off2] {mergeSym(sym1,sym2)} x y)
  4035  	for {
  4036  		off1 := v.AuxInt
  4037  		sym1 := v.Aux
  4038  		v_0 := v.Args[0]
  4039  		if v_0.Op != OpAMD64LEAQ8 {
  4040  			break
  4041  		}
  4042  		off2 := v_0.AuxInt
  4043  		sym2 := v_0.Aux
  4044  		_ = v_0.Args[1]
  4045  		x := v_0.Args[0]
  4046  		y := v_0.Args[1]
  4047  		if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
  4048  			break
  4049  		}
  4050  		v.reset(OpAMD64LEAQ8)
  4051  		v.AuxInt = off1 + off2
  4052  		v.Aux = mergeSym(sym1, sym2)
  4053  		v.AddArg(x)
  4054  		v.AddArg(y)
  4055  		return true
  4056  	}
  4057  	return false
  4058  }
  4059  func rewriteValueAMD64_OpAMD64LEAQ1_0(v *Value) bool {
  4060  	// match: (LEAQ1 [c] {s} (ADDQconst [d] x) y)
  4061  	// cond: is32Bit(c+d) && x.Op != OpSB
  4062  	// result: (LEAQ1 [c+d] {s} x y)
  4063  	for {
  4064  		c := v.AuxInt
  4065  		s := v.Aux
  4066  		_ = v.Args[1]
  4067  		v_0 := v.Args[0]
  4068  		if v_0.Op != OpAMD64ADDQconst {
  4069  			break
  4070  		}
  4071  		d := v_0.AuxInt
  4072  		x := v_0.Args[0]
  4073  		y := v.Args[1]
  4074  		if !(is32Bit(c+d) && x.Op != OpSB) {
  4075  			break
  4076  		}
  4077  		v.reset(OpAMD64LEAQ1)
  4078  		v.AuxInt = c + d
  4079  		v.Aux = s
  4080  		v.AddArg(x)
  4081  		v.AddArg(y)
  4082  		return true
  4083  	}
  4084  	// match: (LEAQ1 [c] {s} y (ADDQconst [d] x))
  4085  	// cond: is32Bit(c+d) && x.Op != OpSB
  4086  	// result: (LEAQ1 [c+d] {s} x y)
  4087  	for {
  4088  		c := v.AuxInt
  4089  		s := v.Aux
  4090  		_ = v.Args[1]
  4091  		y := v.Args[0]
  4092  		v_1 := v.Args[1]
  4093  		if v_1.Op != OpAMD64ADDQconst {
  4094  			break
  4095  		}
  4096  		d := v_1.AuxInt
  4097  		x := v_1.Args[0]
  4098  		if !(is32Bit(c+d) && x.Op != OpSB) {
  4099  			break
  4100  		}
  4101  		v.reset(OpAMD64LEAQ1)
  4102  		v.AuxInt = c + d
  4103  		v.Aux = s
  4104  		v.AddArg(x)
  4105  		v.AddArg(y)
  4106  		return true
  4107  	}
  4108  	// match: (LEAQ1 [c] {s} x (SHLQconst [1] y))
  4109  	// cond:
  4110  	// result: (LEAQ2 [c] {s} x y)
  4111  	for {
  4112  		c := v.AuxInt
  4113  		s := v.Aux
  4114  		_ = v.Args[1]
  4115  		x := v.Args[0]
  4116  		v_1 := v.Args[1]
  4117  		if v_1.Op != OpAMD64SHLQconst {
  4118  			break
  4119  		}
  4120  		if v_1.AuxInt != 1 {
  4121  			break
  4122  		}
  4123  		y := v_1.Args[0]
  4124  		v.reset(OpAMD64LEAQ2)
  4125  		v.AuxInt = c
  4126  		v.Aux = s
  4127  		v.AddArg(x)
  4128  		v.AddArg(y)
  4129  		return true
  4130  	}
  4131  	// match: (LEAQ1 [c] {s} (SHLQconst [1] y) x)
  4132  	// cond:
  4133  	// result: (LEAQ2 [c] {s} x y)
  4134  	for {
  4135  		c := v.AuxInt
  4136  		s := v.Aux
  4137  		_ = v.Args[1]
  4138  		v_0 := v.Args[0]
  4139  		if v_0.Op != OpAMD64SHLQconst {
  4140  			break
  4141  		}
  4142  		if v_0.AuxInt != 1 {
  4143  			break
  4144  		}
  4145  		y := v_0.Args[0]
  4146  		x := v.Args[1]
  4147  		v.reset(OpAMD64LEAQ2)
  4148  		v.AuxInt = c
  4149  		v.Aux = s
  4150  		v.AddArg(x)
  4151  		v.AddArg(y)
  4152  		return true
  4153  	}
  4154  	// match: (LEAQ1 [c] {s} x (SHLQconst [2] y))
  4155  	// cond:
  4156  	// result: (LEAQ4 [c] {s} x y)
  4157  	for {
  4158  		c := v.AuxInt
  4159  		s := v.Aux
  4160  		_ = v.Args[1]
  4161  		x := v.Args[0]
  4162  		v_1 := v.Args[1]
  4163  		if v_1.Op != OpAMD64SHLQconst {
  4164  			break
  4165  		}
  4166  		if v_1.AuxInt != 2 {
  4167  			break
  4168  		}
  4169  		y := v_1.Args[0]
  4170  		v.reset(OpAMD64LEAQ4)
  4171  		v.AuxInt = c
  4172  		v.Aux = s
  4173  		v.AddArg(x)
  4174  		v.AddArg(y)
  4175  		return true
  4176  	}
  4177  	// match: (LEAQ1 [c] {s} (SHLQconst [2] y) x)
  4178  	// cond:
  4179  	// result: (LEAQ4 [c] {s} x y)
  4180  	for {
  4181  		c := v.AuxInt
  4182  		s := v.Aux
  4183  		_ = v.Args[1]
  4184  		v_0 := v.Args[0]
  4185  		if v_0.Op != OpAMD64SHLQconst {
  4186  			break
  4187  		}
  4188  		if v_0.AuxInt != 2 {
  4189  			break
  4190  		}
  4191  		y := v_0.Args[0]
  4192  		x := v.Args[1]
  4193  		v.reset(OpAMD64LEAQ4)
  4194  		v.AuxInt = c
  4195  		v.Aux = s
  4196  		v.AddArg(x)
  4197  		v.AddArg(y)
  4198  		return true
  4199  	}
  4200  	// match: (LEAQ1 [c] {s} x (SHLQconst [3] y))
  4201  	// cond:
  4202  	// result: (LEAQ8 [c] {s} x y)
  4203  	for {
  4204  		c := v.AuxInt
  4205  		s := v.Aux
  4206  		_ = v.Args[1]
  4207  		x := v.Args[0]
  4208  		v_1 := v.Args[1]
  4209  		if v_1.Op != OpAMD64SHLQconst {
  4210  			break
  4211  		}
  4212  		if v_1.AuxInt != 3 {
  4213  			break
  4214  		}
  4215  		y := v_1.Args[0]
  4216  		v.reset(OpAMD64LEAQ8)
  4217  		v.AuxInt = c
  4218  		v.Aux = s
  4219  		v.AddArg(x)
  4220  		v.AddArg(y)
  4221  		return true
  4222  	}
  4223  	// match: (LEAQ1 [c] {s} (SHLQconst [3] y) x)
  4224  	// cond:
  4225  	// result: (LEAQ8 [c] {s} x y)
  4226  	for {
  4227  		c := v.AuxInt
  4228  		s := v.Aux
  4229  		_ = v.Args[1]
  4230  		v_0 := v.Args[0]
  4231  		if v_0.Op != OpAMD64SHLQconst {
  4232  			break
  4233  		}
  4234  		if v_0.AuxInt != 3 {
  4235  			break
  4236  		}
  4237  		y := v_0.Args[0]
  4238  		x := v.Args[1]
  4239  		v.reset(OpAMD64LEAQ8)
  4240  		v.AuxInt = c
  4241  		v.Aux = s
  4242  		v.AddArg(x)
  4243  		v.AddArg(y)
  4244  		return true
  4245  	}
  4246  	// match: (LEAQ1 [off1] {sym1} (LEAQ [off2] {sym2} x) y)
  4247  	// cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) && x.Op != OpSB
  4248  	// result: (LEAQ1 [off1+off2] {mergeSym(sym1,sym2)} x y)
  4249  	for {
  4250  		off1 := v.AuxInt
  4251  		sym1 := v.Aux
  4252  		_ = v.Args[1]
  4253  		v_0 := v.Args[0]
  4254  		if v_0.Op != OpAMD64LEAQ {
  4255  			break
  4256  		}
  4257  		off2 := v_0.AuxInt
  4258  		sym2 := v_0.Aux
  4259  		x := v_0.Args[0]
  4260  		y := v.Args[1]
  4261  		if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2) && x.Op != OpSB) {
  4262  			break
  4263  		}
  4264  		v.reset(OpAMD64LEAQ1)
  4265  		v.AuxInt = off1 + off2
  4266  		v.Aux = mergeSym(sym1, sym2)
  4267  		v.AddArg(x)
  4268  		v.AddArg(y)
  4269  		return true
  4270  	}
  4271  	// match: (LEAQ1 [off1] {sym1} y (LEAQ [off2] {sym2} x))
  4272  	// cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) && x.Op != OpSB
  4273  	// result: (LEAQ1 [off1+off2] {mergeSym(sym1,sym2)} x y)
  4274  	for {
  4275  		off1 := v.AuxInt
  4276  		sym1 := v.Aux
  4277  		_ = v.Args[1]
  4278  		y := v.Args[0]
  4279  		v_1 := v.Args[1]
  4280  		if v_1.Op != OpAMD64LEAQ {
  4281  			break
  4282  		}
  4283  		off2 := v_1.AuxInt
  4284  		sym2 := v_1.Aux
  4285  		x := v_1.Args[0]
  4286  		if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2) && x.Op != OpSB) {
  4287  			break
  4288  		}
  4289  		v.reset(OpAMD64LEAQ1)
  4290  		v.AuxInt = off1 + off2
  4291  		v.Aux = mergeSym(sym1, sym2)
  4292  		v.AddArg(x)
  4293  		v.AddArg(y)
  4294  		return true
  4295  	}
  4296  	return false
  4297  }
  4298  func rewriteValueAMD64_OpAMD64LEAQ2_0(v *Value) bool {
  4299  	// match: (LEAQ2 [c] {s} (ADDQconst [d] x) y)
  4300  	// cond: is32Bit(c+d) && x.Op != OpSB
  4301  	// result: (LEAQ2 [c+d] {s} x y)
  4302  	for {
  4303  		c := v.AuxInt
  4304  		s := v.Aux
  4305  		_ = v.Args[1]
  4306  		v_0 := v.Args[0]
  4307  		if v_0.Op != OpAMD64ADDQconst {
  4308  			break
  4309  		}
  4310  		d := v_0.AuxInt
  4311  		x := v_0.Args[0]
  4312  		y := v.Args[1]
  4313  		if !(is32Bit(c+d) && x.Op != OpSB) {
  4314  			break
  4315  		}
  4316  		v.reset(OpAMD64LEAQ2)
  4317  		v.AuxInt = c + d
  4318  		v.Aux = s
  4319  		v.AddArg(x)
  4320  		v.AddArg(y)
  4321  		return true
  4322  	}
  4323  	// match: (LEAQ2 [c] {s} x (ADDQconst [d] y))
  4324  	// cond: is32Bit(c+2*d) && y.Op != OpSB
  4325  	// result: (LEAQ2 [c+2*d] {s} x y)
  4326  	for {
  4327  		c := v.AuxInt
  4328  		s := v.Aux
  4329  		_ = v.Args[1]
  4330  		x := v.Args[0]
  4331  		v_1 := v.Args[1]
  4332  		if v_1.Op != OpAMD64ADDQconst {
  4333  			break
  4334  		}
  4335  		d := v_1.AuxInt
  4336  		y := v_1.Args[0]
  4337  		if !(is32Bit(c+2*d) && y.Op != OpSB) {
  4338  			break
  4339  		}
  4340  		v.reset(OpAMD64LEAQ2)
  4341  		v.AuxInt = c + 2*d
  4342  		v.Aux = s
  4343  		v.AddArg(x)
  4344  		v.AddArg(y)
  4345  		return true
  4346  	}
  4347  	// match: (LEAQ2 [c] {s} x (SHLQconst [1] y))
  4348  	// cond:
  4349  	// result: (LEAQ4 [c] {s} x y)
  4350  	for {
  4351  		c := v.AuxInt
  4352  		s := v.Aux
  4353  		_ = v.Args[1]
  4354  		x := v.Args[0]
  4355  		v_1 := v.Args[1]
  4356  		if v_1.Op != OpAMD64SHLQconst {
  4357  			break
  4358  		}
  4359  		if v_1.AuxInt != 1 {
  4360  			break
  4361  		}
  4362  		y := v_1.Args[0]
  4363  		v.reset(OpAMD64LEAQ4)
  4364  		v.AuxInt = c
  4365  		v.Aux = s
  4366  		v.AddArg(x)
  4367  		v.AddArg(y)
  4368  		return true
  4369  	}
  4370  	// match: (LEAQ2 [c] {s} x (SHLQconst [2] y))
  4371  	// cond:
  4372  	// result: (LEAQ8 [c] {s} x y)
  4373  	for {
  4374  		c := v.AuxInt
  4375  		s := v.Aux
  4376  		_ = v.Args[1]
  4377  		x := v.Args[0]
  4378  		v_1 := v.Args[1]
  4379  		if v_1.Op != OpAMD64SHLQconst {
  4380  			break
  4381  		}
  4382  		if v_1.AuxInt != 2 {
  4383  			break
  4384  		}
  4385  		y := v_1.Args[0]
  4386  		v.reset(OpAMD64LEAQ8)
  4387  		v.AuxInt = c
  4388  		v.Aux = s
  4389  		v.AddArg(x)
  4390  		v.AddArg(y)
  4391  		return true
  4392  	}
  4393  	// match: (LEAQ2 [off1] {sym1} (LEAQ [off2] {sym2} x) y)
  4394  	// cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) && x.Op != OpSB
  4395  	// result: (LEAQ2 [off1+off2] {mergeSym(sym1,sym2)} x y)
  4396  	for {
  4397  		off1 := v.AuxInt
  4398  		sym1 := v.Aux
  4399  		_ = v.Args[1]
  4400  		v_0 := v.Args[0]
  4401  		if v_0.Op != OpAMD64LEAQ {
  4402  			break
  4403  		}
  4404  		off2 := v_0.AuxInt
  4405  		sym2 := v_0.Aux
  4406  		x := v_0.Args[0]
  4407  		y := v.Args[1]
  4408  		if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2) && x.Op != OpSB) {
  4409  			break
  4410  		}
  4411  		v.reset(OpAMD64LEAQ2)
  4412  		v.AuxInt = off1 + off2
  4413  		v.Aux = mergeSym(sym1, sym2)
  4414  		v.AddArg(x)
  4415  		v.AddArg(y)
  4416  		return true
  4417  	}
  4418  	return false
  4419  }
  4420  func rewriteValueAMD64_OpAMD64LEAQ4_0(v *Value) bool {
  4421  	// match: (LEAQ4 [c] {s} (ADDQconst [d] x) y)
  4422  	// cond: is32Bit(c+d) && x.Op != OpSB
  4423  	// result: (LEAQ4 [c+d] {s} x y)
  4424  	for {
  4425  		c := v.AuxInt
  4426  		s := v.Aux
  4427  		_ = v.Args[1]
  4428  		v_0 := v.Args[0]
  4429  		if v_0.Op != OpAMD64ADDQconst {
  4430  			break
  4431  		}
  4432  		d := v_0.AuxInt
  4433  		x := v_0.Args[0]
  4434  		y := v.Args[1]
  4435  		if !(is32Bit(c+d) && x.Op != OpSB) {
  4436  			break
  4437  		}
  4438  		v.reset(OpAMD64LEAQ4)
  4439  		v.AuxInt = c + d
  4440  		v.Aux = s
  4441  		v.AddArg(x)
  4442  		v.AddArg(y)
  4443  		return true
  4444  	}
  4445  	// match: (LEAQ4 [c] {s} x (ADDQconst [d] y))
  4446  	// cond: is32Bit(c+4*d) && y.Op != OpSB
  4447  	// result: (LEAQ4 [c+4*d] {s} x y)
  4448  	for {
  4449  		c := v.AuxInt
  4450  		s := v.Aux
  4451  		_ = v.Args[1]
  4452  		x := v.Args[0]
  4453  		v_1 := v.Args[1]
  4454  		if v_1.Op != OpAMD64ADDQconst {
  4455  			break
  4456  		}
  4457  		d := v_1.AuxInt
  4458  		y := v_1.Args[0]
  4459  		if !(is32Bit(c+4*d) && y.Op != OpSB) {
  4460  			break
  4461  		}
  4462  		v.reset(OpAMD64LEAQ4)
  4463  		v.AuxInt = c + 4*d
  4464  		v.Aux = s
  4465  		v.AddArg(x)
  4466  		v.AddArg(y)
  4467  		return true
  4468  	}
  4469  	// match: (LEAQ4 [c] {s} x (SHLQconst [1] y))
  4470  	// cond:
  4471  	// result: (LEAQ8 [c] {s} x y)
  4472  	for {
  4473  		c := v.AuxInt
  4474  		s := v.Aux
  4475  		_ = v.Args[1]
  4476  		x := v.Args[0]
  4477  		v_1 := v.Args[1]
  4478  		if v_1.Op != OpAMD64SHLQconst {
  4479  			break
  4480  		}
  4481  		if v_1.AuxInt != 1 {
  4482  			break
  4483  		}
  4484  		y := v_1.Args[0]
  4485  		v.reset(OpAMD64LEAQ8)
  4486  		v.AuxInt = c
  4487  		v.Aux = s
  4488  		v.AddArg(x)
  4489  		v.AddArg(y)
  4490  		return true
  4491  	}
  4492  	// match: (LEAQ4 [off1] {sym1} (LEAQ [off2] {sym2} x) y)
  4493  	// cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) && x.Op != OpSB
  4494  	// result: (LEAQ4 [off1+off2] {mergeSym(sym1,sym2)} x y)
  4495  	for {
  4496  		off1 := v.AuxInt
  4497  		sym1 := v.Aux
  4498  		_ = v.Args[1]
  4499  		v_0 := v.Args[0]
  4500  		if v_0.Op != OpAMD64LEAQ {
  4501  			break
  4502  		}
  4503  		off2 := v_0.AuxInt
  4504  		sym2 := v_0.Aux
  4505  		x := v_0.Args[0]
  4506  		y := v.Args[1]
  4507  		if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2) && x.Op != OpSB) {
  4508  			break
  4509  		}
  4510  		v.reset(OpAMD64LEAQ4)
  4511  		v.AuxInt = off1 + off2
  4512  		v.Aux = mergeSym(sym1, sym2)
  4513  		v.AddArg(x)
  4514  		v.AddArg(y)
  4515  		return true
  4516  	}
  4517  	return false
  4518  }
  4519  func rewriteValueAMD64_OpAMD64LEAQ8_0(v *Value) bool {
  4520  	// match: (LEAQ8 [c] {s} (ADDQconst [d] x) y)
  4521  	// cond: is32Bit(c+d) && x.Op != OpSB
  4522  	// result: (LEAQ8 [c+d] {s} x y)
  4523  	for {
  4524  		c := v.AuxInt
  4525  		s := v.Aux
  4526  		_ = v.Args[1]
  4527  		v_0 := v.Args[0]
  4528  		if v_0.Op != OpAMD64ADDQconst {
  4529  			break
  4530  		}
  4531  		d := v_0.AuxInt
  4532  		x := v_0.Args[0]
  4533  		y := v.Args[1]
  4534  		if !(is32Bit(c+d) && x.Op != OpSB) {
  4535  			break
  4536  		}
  4537  		v.reset(OpAMD64LEAQ8)
  4538  		v.AuxInt = c + d
  4539  		v.Aux = s
  4540  		v.AddArg(x)
  4541  		v.AddArg(y)
  4542  		return true
  4543  	}
  4544  	// match: (LEAQ8 [c] {s} x (ADDQconst [d] y))
  4545  	// cond: is32Bit(c+8*d) && y.Op != OpSB
  4546  	// result: (LEAQ8 [c+8*d] {s} x y)
  4547  	for {
  4548  		c := v.AuxInt
  4549  		s := v.Aux
  4550  		_ = v.Args[1]
  4551  		x := v.Args[0]
  4552  		v_1 := v.Args[1]
  4553  		if v_1.Op != OpAMD64ADDQconst {
  4554  			break
  4555  		}
  4556  		d := v_1.AuxInt
  4557  		y := v_1.Args[0]
  4558  		if !(is32Bit(c+8*d) && y.Op != OpSB) {
  4559  			break
  4560  		}
  4561  		v.reset(OpAMD64LEAQ8)
  4562  		v.AuxInt = c + 8*d
  4563  		v.Aux = s
  4564  		v.AddArg(x)
  4565  		v.AddArg(y)
  4566  		return true
  4567  	}
  4568  	// match: (LEAQ8 [off1] {sym1} (LEAQ [off2] {sym2} x) y)
  4569  	// cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) && x.Op != OpSB
  4570  	// result: (LEAQ8 [off1+off2] {mergeSym(sym1,sym2)} x y)
  4571  	for {
  4572  		off1 := v.AuxInt
  4573  		sym1 := v.Aux
  4574  		_ = v.Args[1]
  4575  		v_0 := v.Args[0]
  4576  		if v_0.Op != OpAMD64LEAQ {
  4577  			break
  4578  		}
  4579  		off2 := v_0.AuxInt
  4580  		sym2 := v_0.Aux
  4581  		x := v_0.Args[0]
  4582  		y := v.Args[1]
  4583  		if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2) && x.Op != OpSB) {
  4584  			break
  4585  		}
  4586  		v.reset(OpAMD64LEAQ8)
  4587  		v.AuxInt = off1 + off2
  4588  		v.Aux = mergeSym(sym1, sym2)
  4589  		v.AddArg(x)
  4590  		v.AddArg(y)
  4591  		return true
  4592  	}
  4593  	return false
  4594  }
  4595  func rewriteValueAMD64_OpAMD64MOVBQSX_0(v *Value) bool {
  4596  	b := v.Block
  4597  	_ = b
  4598  	// match: (MOVBQSX x:(MOVBload [off] {sym} ptr mem))
  4599  	// cond: x.Uses == 1 && clobber(x)
  4600  	// result: @x.Block (MOVBQSXload <v.Type> [off] {sym} ptr mem)
  4601  	for {
  4602  		x := v.Args[0]
  4603  		if x.Op != OpAMD64MOVBload {
  4604  			break
  4605  		}
  4606  		off := x.AuxInt
  4607  		sym := x.Aux
  4608  		_ = x.Args[1]
  4609  		ptr := x.Args[0]
  4610  		mem := x.Args[1]
  4611  		if !(x.Uses == 1 && clobber(x)) {
  4612  			break
  4613  		}
  4614  		b = x.Block
  4615  		v0 := b.NewValue0(v.Pos, OpAMD64MOVBQSXload, v.Type)
  4616  		v.reset(OpCopy)
  4617  		v.AddArg(v0)
  4618  		v0.AuxInt = off
  4619  		v0.Aux = sym
  4620  		v0.AddArg(ptr)
  4621  		v0.AddArg(mem)
  4622  		return true
  4623  	}
  4624  	// match: (MOVBQSX x:(MOVWload [off] {sym} ptr mem))
  4625  	// cond: x.Uses == 1 && clobber(x)
  4626  	// result: @x.Block (MOVBQSXload <v.Type> [off] {sym} ptr mem)
  4627  	for {
  4628  		x := v.Args[0]
  4629  		if x.Op != OpAMD64MOVWload {
  4630  			break
  4631  		}
  4632  		off := x.AuxInt
  4633  		sym := x.Aux
  4634  		_ = x.Args[1]
  4635  		ptr := x.Args[0]
  4636  		mem := x.Args[1]
  4637  		if !(x.Uses == 1 && clobber(x)) {
  4638  			break
  4639  		}
  4640  		b = x.Block
  4641  		v0 := b.NewValue0(v.Pos, OpAMD64MOVBQSXload, v.Type)
  4642  		v.reset(OpCopy)
  4643  		v.AddArg(v0)
  4644  		v0.AuxInt = off
  4645  		v0.Aux = sym
  4646  		v0.AddArg(ptr)
  4647  		v0.AddArg(mem)
  4648  		return true
  4649  	}
  4650  	// match: (MOVBQSX x:(MOVLload [off] {sym} ptr mem))
  4651  	// cond: x.Uses == 1 && clobber(x)
  4652  	// result: @x.Block (MOVBQSXload <v.Type> [off] {sym} ptr mem)
  4653  	for {
  4654  		x := v.Args[0]
  4655  		if x.Op != OpAMD64MOVLload {
  4656  			break
  4657  		}
  4658  		off := x.AuxInt
  4659  		sym := x.Aux
  4660  		_ = x.Args[1]
  4661  		ptr := x.Args[0]
  4662  		mem := x.Args[1]
  4663  		if !(x.Uses == 1 && clobber(x)) {
  4664  			break
  4665  		}
  4666  		b = x.Block
  4667  		v0 := b.NewValue0(v.Pos, OpAMD64MOVBQSXload, v.Type)
  4668  		v.reset(OpCopy)
  4669  		v.AddArg(v0)
  4670  		v0.AuxInt = off
  4671  		v0.Aux = sym
  4672  		v0.AddArg(ptr)
  4673  		v0.AddArg(mem)
  4674  		return true
  4675  	}
  4676  	// match: (MOVBQSX x:(MOVQload [off] {sym} ptr mem))
  4677  	// cond: x.Uses == 1 && clobber(x)
  4678  	// result: @x.Block (MOVBQSXload <v.Type> [off] {sym} ptr mem)
  4679  	for {
  4680  		x := v.Args[0]
  4681  		if x.Op != OpAMD64MOVQload {
  4682  			break
  4683  		}
  4684  		off := x.AuxInt
  4685  		sym := x.Aux
  4686  		_ = x.Args[1]
  4687  		ptr := x.Args[0]
  4688  		mem := x.Args[1]
  4689  		if !(x.Uses == 1 && clobber(x)) {
  4690  			break
  4691  		}
  4692  		b = x.Block
  4693  		v0 := b.NewValue0(v.Pos, OpAMD64MOVBQSXload, v.Type)
  4694  		v.reset(OpCopy)
  4695  		v.AddArg(v0)
  4696  		v0.AuxInt = off
  4697  		v0.Aux = sym
  4698  		v0.AddArg(ptr)
  4699  		v0.AddArg(mem)
  4700  		return true
  4701  	}
  4702  	// match: (MOVBQSX (ANDLconst [c] x))
  4703  	// cond: c & 0x80 == 0
  4704  	// result: (ANDLconst [c & 0x7f] x)
  4705  	for {
  4706  		v_0 := v.Args[0]
  4707  		if v_0.Op != OpAMD64ANDLconst {
  4708  			break
  4709  		}
  4710  		c := v_0.AuxInt
  4711  		x := v_0.Args[0]
  4712  		if !(c&0x80 == 0) {
  4713  			break
  4714  		}
  4715  		v.reset(OpAMD64ANDLconst)
  4716  		v.AuxInt = c & 0x7f
  4717  		v.AddArg(x)
  4718  		return true
  4719  	}
  4720  	// match: (MOVBQSX (MOVBQSX x))
  4721  	// cond:
  4722  	// result: (MOVBQSX x)
  4723  	for {
  4724  		v_0 := v.Args[0]
  4725  		if v_0.Op != OpAMD64MOVBQSX {
  4726  			break
  4727  		}
  4728  		x := v_0.Args[0]
  4729  		v.reset(OpAMD64MOVBQSX)
  4730  		v.AddArg(x)
  4731  		return true
  4732  	}
  4733  	return false
  4734  }
  4735  func rewriteValueAMD64_OpAMD64MOVBQSXload_0(v *Value) bool {
  4736  	// match: (MOVBQSXload [off] {sym} ptr (MOVBstore [off2] {sym2} ptr2 x _))
  4737  	// cond: sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)
  4738  	// result: (MOVBQSX x)
  4739  	for {
  4740  		off := v.AuxInt
  4741  		sym := v.Aux
  4742  		_ = v.Args[1]
  4743  		ptr := v.Args[0]
  4744  		v_1 := v.Args[1]
  4745  		if v_1.Op != OpAMD64MOVBstore {
  4746  			break
  4747  		}
  4748  		off2 := v_1.AuxInt
  4749  		sym2 := v_1.Aux
  4750  		_ = v_1.Args[2]
  4751  		ptr2 := v_1.Args[0]
  4752  		x := v_1.Args[1]
  4753  		if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) {
  4754  			break
  4755  		}
  4756  		v.reset(OpAMD64MOVBQSX)
  4757  		v.AddArg(x)
  4758  		return true
  4759  	}
  4760  	// match: (MOVBQSXload [off1] {sym1} (LEAQ [off2] {sym2} base) mem)
  4761  	// cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
  4762  	// result: (MOVBQSXload [off1+off2] {mergeSym(sym1,sym2)} base mem)
  4763  	for {
  4764  		off1 := v.AuxInt
  4765  		sym1 := v.Aux
  4766  		_ = v.Args[1]
  4767  		v_0 := v.Args[0]
  4768  		if v_0.Op != OpAMD64LEAQ {
  4769  			break
  4770  		}
  4771  		off2 := v_0.AuxInt
  4772  		sym2 := v_0.Aux
  4773  		base := v_0.Args[0]
  4774  		mem := v.Args[1]
  4775  		if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
  4776  			break
  4777  		}
  4778  		v.reset(OpAMD64MOVBQSXload)
  4779  		v.AuxInt = off1 + off2
  4780  		v.Aux = mergeSym(sym1, sym2)
  4781  		v.AddArg(base)
  4782  		v.AddArg(mem)
  4783  		return true
  4784  	}
  4785  	return false
  4786  }
  4787  func rewriteValueAMD64_OpAMD64MOVBQZX_0(v *Value) bool {
  4788  	b := v.Block
  4789  	_ = b
  4790  	// match: (MOVBQZX x:(MOVBload [off] {sym} ptr mem))
  4791  	// cond: x.Uses == 1 && clobber(x)
  4792  	// result: @x.Block (MOVBload <v.Type> [off] {sym} ptr mem)
  4793  	for {
  4794  		x := v.Args[0]
  4795  		if x.Op != OpAMD64MOVBload {
  4796  			break
  4797  		}
  4798  		off := x.AuxInt
  4799  		sym := x.Aux
  4800  		_ = x.Args[1]
  4801  		ptr := x.Args[0]
  4802  		mem := x.Args[1]
  4803  		if !(x.Uses == 1 && clobber(x)) {
  4804  			break
  4805  		}
  4806  		b = x.Block
  4807  		v0 := b.NewValue0(v.Pos, OpAMD64MOVBload, v.Type)
  4808  		v.reset(OpCopy)
  4809  		v.AddArg(v0)
  4810  		v0.AuxInt = off
  4811  		v0.Aux = sym
  4812  		v0.AddArg(ptr)
  4813  		v0.AddArg(mem)
  4814  		return true
  4815  	}
  4816  	// match: (MOVBQZX x:(MOVWload [off] {sym} ptr mem))
  4817  	// cond: x.Uses == 1 && clobber(x)
  4818  	// result: @x.Block (MOVBload <v.Type> [off] {sym} ptr mem)
  4819  	for {
  4820  		x := v.Args[0]
  4821  		if x.Op != OpAMD64MOVWload {
  4822  			break
  4823  		}
  4824  		off := x.AuxInt
  4825  		sym := x.Aux
  4826  		_ = x.Args[1]
  4827  		ptr := x.Args[0]
  4828  		mem := x.Args[1]
  4829  		if !(x.Uses == 1 && clobber(x)) {
  4830  			break
  4831  		}
  4832  		b = x.Block
  4833  		v0 := b.NewValue0(v.Pos, OpAMD64MOVBload, v.Type)
  4834  		v.reset(OpCopy)
  4835  		v.AddArg(v0)
  4836  		v0.AuxInt = off
  4837  		v0.Aux = sym
  4838  		v0.AddArg(ptr)
  4839  		v0.AddArg(mem)
  4840  		return true
  4841  	}
  4842  	// match: (MOVBQZX x:(MOVLload [off] {sym} ptr mem))
  4843  	// cond: x.Uses == 1 && clobber(x)
  4844  	// result: @x.Block (MOVBload <v.Type> [off] {sym} ptr mem)
  4845  	for {
  4846  		x := v.Args[0]
  4847  		if x.Op != OpAMD64MOVLload {
  4848  			break
  4849  		}
  4850  		off := x.AuxInt
  4851  		sym := x.Aux
  4852  		_ = x.Args[1]
  4853  		ptr := x.Args[0]
  4854  		mem := x.Args[1]
  4855  		if !(x.Uses == 1 && clobber(x)) {
  4856  			break
  4857  		}
  4858  		b = x.Block
  4859  		v0 := b.NewValue0(v.Pos, OpAMD64MOVBload, v.Type)
  4860  		v.reset(OpCopy)
  4861  		v.AddArg(v0)
  4862  		v0.AuxInt = off
  4863  		v0.Aux = sym
  4864  		v0.AddArg(ptr)
  4865  		v0.AddArg(mem)
  4866  		return true
  4867  	}
  4868  	// match: (MOVBQZX x:(MOVQload [off] {sym} ptr mem))
  4869  	// cond: x.Uses == 1 && clobber(x)
  4870  	// result: @x.Block (MOVBload <v.Type> [off] {sym} ptr mem)
  4871  	for {
  4872  		x := v.Args[0]
  4873  		if x.Op != OpAMD64MOVQload {
  4874  			break
  4875  		}
  4876  		off := x.AuxInt
  4877  		sym := x.Aux
  4878  		_ = x.Args[1]
  4879  		ptr := x.Args[0]
  4880  		mem := x.Args[1]
  4881  		if !(x.Uses == 1 && clobber(x)) {
  4882  			break
  4883  		}
  4884  		b = x.Block
  4885  		v0 := b.NewValue0(v.Pos, OpAMD64MOVBload, v.Type)
  4886  		v.reset(OpCopy)
  4887  		v.AddArg(v0)
  4888  		v0.AuxInt = off
  4889  		v0.Aux = sym
  4890  		v0.AddArg(ptr)
  4891  		v0.AddArg(mem)
  4892  		return true
  4893  	}
  4894  	// match: (MOVBQZX x:(MOVBloadidx1 [off] {sym} ptr idx mem))
  4895  	// cond: x.Uses == 1 && clobber(x)
  4896  	// result: @x.Block (MOVBloadidx1 <v.Type> [off] {sym} ptr idx mem)
  4897  	for {
  4898  		x := v.Args[0]
  4899  		if x.Op != OpAMD64MOVBloadidx1 {
  4900  			break
  4901  		}
  4902  		off := x.AuxInt
  4903  		sym := x.Aux
  4904  		_ = x.Args[2]
  4905  		ptr := x.Args[0]
  4906  		idx := x.Args[1]
  4907  		mem := x.Args[2]
  4908  		if !(x.Uses == 1 && clobber(x)) {
  4909  			break
  4910  		}
  4911  		b = x.Block
  4912  		v0 := b.NewValue0(v.Pos, OpAMD64MOVBloadidx1, v.Type)
  4913  		v.reset(OpCopy)
  4914  		v.AddArg(v0)
  4915  		v0.AuxInt = off
  4916  		v0.Aux = sym
  4917  		v0.AddArg(ptr)
  4918  		v0.AddArg(idx)
  4919  		v0.AddArg(mem)
  4920  		return true
  4921  	}
  4922  	// match: (MOVBQZX (ANDLconst [c] x))
  4923  	// cond:
  4924  	// result: (ANDLconst [c & 0xff] x)
  4925  	for {
  4926  		v_0 := v.Args[0]
  4927  		if v_0.Op != OpAMD64ANDLconst {
  4928  			break
  4929  		}
  4930  		c := v_0.AuxInt
  4931  		x := v_0.Args[0]
  4932  		v.reset(OpAMD64ANDLconst)
  4933  		v.AuxInt = c & 0xff
  4934  		v.AddArg(x)
  4935  		return true
  4936  	}
  4937  	// match: (MOVBQZX (MOVBQZX x))
  4938  	// cond:
  4939  	// result: (MOVBQZX x)
  4940  	for {
  4941  		v_0 := v.Args[0]
  4942  		if v_0.Op != OpAMD64MOVBQZX {
  4943  			break
  4944  		}
  4945  		x := v_0.Args[0]
  4946  		v.reset(OpAMD64MOVBQZX)
  4947  		v.AddArg(x)
  4948  		return true
  4949  	}
  4950  	return false
  4951  }
  4952  func rewriteValueAMD64_OpAMD64MOVBload_0(v *Value) bool {
  4953  	// match: (MOVBload [off] {sym} ptr (MOVBstore [off2] {sym2} ptr2 x _))
  4954  	// cond: sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)
  4955  	// result: (MOVBQZX x)
  4956  	for {
  4957  		off := v.AuxInt
  4958  		sym := v.Aux
  4959  		_ = v.Args[1]
  4960  		ptr := v.Args[0]
  4961  		v_1 := v.Args[1]
  4962  		if v_1.Op != OpAMD64MOVBstore {
  4963  			break
  4964  		}
  4965  		off2 := v_1.AuxInt
  4966  		sym2 := v_1.Aux
  4967  		_ = v_1.Args[2]
  4968  		ptr2 := v_1.Args[0]
  4969  		x := v_1.Args[1]
  4970  		if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) {
  4971  			break
  4972  		}
  4973  		v.reset(OpAMD64MOVBQZX)
  4974  		v.AddArg(x)
  4975  		return true
  4976  	}
  4977  	// match: (MOVBload [off1] {sym} (ADDQconst [off2] ptr) mem)
  4978  	// cond: is32Bit(off1+off2)
  4979  	// result: (MOVBload [off1+off2] {sym} ptr mem)
  4980  	for {
  4981  		off1 := v.AuxInt
  4982  		sym := v.Aux
  4983  		_ = v.Args[1]
  4984  		v_0 := v.Args[0]
  4985  		if v_0.Op != OpAMD64ADDQconst {
  4986  			break
  4987  		}
  4988  		off2 := v_0.AuxInt
  4989  		ptr := v_0.Args[0]
  4990  		mem := v.Args[1]
  4991  		if !(is32Bit(off1 + off2)) {
  4992  			break
  4993  		}
  4994  		v.reset(OpAMD64MOVBload)
  4995  		v.AuxInt = off1 + off2
  4996  		v.Aux = sym
  4997  		v.AddArg(ptr)
  4998  		v.AddArg(mem)
  4999  		return true
  5000  	}
  5001  	// match: (MOVBload [off1] {sym1} (LEAQ [off2] {sym2} base) mem)
  5002  	// cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
  5003  	// result: (MOVBload [off1+off2] {mergeSym(sym1,sym2)} base mem)
  5004  	for {
  5005  		off1 := v.AuxInt
  5006  		sym1 := v.Aux
  5007  		_ = v.Args[1]
  5008  		v_0 := v.Args[0]
  5009  		if v_0.Op != OpAMD64LEAQ {
  5010  			break
  5011  		}
  5012  		off2 := v_0.AuxInt
  5013  		sym2 := v_0.Aux
  5014  		base := v_0.Args[0]
  5015  		mem := v.Args[1]
  5016  		if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
  5017  			break
  5018  		}
  5019  		v.reset(OpAMD64MOVBload)
  5020  		v.AuxInt = off1 + off2
  5021  		v.Aux = mergeSym(sym1, sym2)
  5022  		v.AddArg(base)
  5023  		v.AddArg(mem)
  5024  		return true
  5025  	}
  5026  	// match: (MOVBload [off1] {sym1} (LEAQ1 [off2] {sym2} ptr idx) mem)
  5027  	// cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
  5028  	// result: (MOVBloadidx1 [off1+off2] {mergeSym(sym1,sym2)} ptr idx mem)
  5029  	for {
  5030  		off1 := v.AuxInt
  5031  		sym1 := v.Aux
  5032  		_ = v.Args[1]
  5033  		v_0 := v.Args[0]
  5034  		if v_0.Op != OpAMD64LEAQ1 {
  5035  			break
  5036  		}
  5037  		off2 := v_0.AuxInt
  5038  		sym2 := v_0.Aux
  5039  		_ = v_0.Args[1]
  5040  		ptr := v_0.Args[0]
  5041  		idx := v_0.Args[1]
  5042  		mem := v.Args[1]
  5043  		if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
  5044  			break
  5045  		}
  5046  		v.reset(OpAMD64MOVBloadidx1)
  5047  		v.AuxInt = off1 + off2
  5048  		v.Aux = mergeSym(sym1, sym2)
  5049  		v.AddArg(ptr)
  5050  		v.AddArg(idx)
  5051  		v.AddArg(mem)
  5052  		return true
  5053  	}
  5054  	// match: (MOVBload [off] {sym} (ADDQ ptr idx) mem)
  5055  	// cond: ptr.Op != OpSB
  5056  	// result: (MOVBloadidx1 [off] {sym} ptr idx mem)
  5057  	for {
  5058  		off := v.AuxInt
  5059  		sym := v.Aux
  5060  		_ = v.Args[1]
  5061  		v_0 := v.Args[0]
  5062  		if v_0.Op != OpAMD64ADDQ {
  5063  			break
  5064  		}
  5065  		_ = v_0.Args[1]
  5066  		ptr := v_0.Args[0]
  5067  		idx := v_0.Args[1]
  5068  		mem := v.Args[1]
  5069  		if !(ptr.Op != OpSB) {
  5070  			break
  5071  		}
  5072  		v.reset(OpAMD64MOVBloadidx1)
  5073  		v.AuxInt = off
  5074  		v.Aux = sym
  5075  		v.AddArg(ptr)
  5076  		v.AddArg(idx)
  5077  		v.AddArg(mem)
  5078  		return true
  5079  	}
  5080  	// match: (MOVBload [off1] {sym1} (LEAL [off2] {sym2} base) mem)
  5081  	// cond: canMergeSym(sym1, sym2) && is32Bit(off1+off2)
  5082  	// result: (MOVBload [off1+off2] {mergeSym(sym1,sym2)} base mem)
  5083  	for {
  5084  		off1 := v.AuxInt
  5085  		sym1 := v.Aux
  5086  		_ = v.Args[1]
  5087  		v_0 := v.Args[0]
  5088  		if v_0.Op != OpAMD64LEAL {
  5089  			break
  5090  		}
  5091  		off2 := v_0.AuxInt
  5092  		sym2 := v_0.Aux
  5093  		base := v_0.Args[0]
  5094  		mem := v.Args[1]
  5095  		if !(canMergeSym(sym1, sym2) && is32Bit(off1+off2)) {
  5096  			break
  5097  		}
  5098  		v.reset(OpAMD64MOVBload)
  5099  		v.AuxInt = off1 + off2
  5100  		v.Aux = mergeSym(sym1, sym2)
  5101  		v.AddArg(base)
  5102  		v.AddArg(mem)
  5103  		return true
  5104  	}
  5105  	// match: (MOVBload [off1] {sym} (ADDLconst [off2] ptr) mem)
  5106  	// cond: is32Bit(off1+off2)
  5107  	// result: (MOVBload [off1+off2] {sym} ptr mem)
  5108  	for {
  5109  		off1 := v.AuxInt
  5110  		sym := v.Aux
  5111  		_ = v.Args[1]
  5112  		v_0 := v.Args[0]
  5113  		if v_0.Op != OpAMD64ADDLconst {
  5114  			break
  5115  		}
  5116  		off2 := v_0.AuxInt
  5117  		ptr := v_0.Args[0]
  5118  		mem := v.Args[1]
  5119  		if !(is32Bit(off1 + off2)) {
  5120  			break
  5121  		}
  5122  		v.reset(OpAMD64MOVBload)
  5123  		v.AuxInt = off1 + off2
  5124  		v.Aux = sym
  5125  		v.AddArg(ptr)
  5126  		v.AddArg(mem)
  5127  		return true
  5128  	}
  5129  	return false
  5130  }
  5131  func rewriteValueAMD64_OpAMD64MOVBloadidx1_0(v *Value) bool {
  5132  	// match: (MOVBloadidx1 [c] {sym} (ADDQconst [d] ptr) idx mem)
  5133  	// cond: is32Bit(c+d)
  5134  	// result: (MOVBloadidx1 [c+d] {sym} ptr idx mem)
  5135  	for {
  5136  		c := v.AuxInt
  5137  		sym := v.Aux
  5138  		_ = v.Args[2]
  5139  		v_0 := v.Args[0]
  5140  		if v_0.Op != OpAMD64ADDQconst {
  5141  			break
  5142  		}
  5143  		d := v_0.AuxInt
  5144  		ptr := v_0.Args[0]
  5145  		idx := v.Args[1]
  5146  		mem := v.Args[2]
  5147  		if !(is32Bit(c + d)) {
  5148  			break
  5149  		}
  5150  		v.reset(OpAMD64MOVBloadidx1)
  5151  		v.AuxInt = c + d
  5152  		v.Aux = sym
  5153  		v.AddArg(ptr)
  5154  		v.AddArg(idx)
  5155  		v.AddArg(mem)
  5156  		return true
  5157  	}
  5158  	// match: (MOVBloadidx1 [c] {sym} idx (ADDQconst [d] ptr) mem)
  5159  	// cond: is32Bit(c+d)
  5160  	// result: (MOVBloadidx1 [c+d] {sym} ptr idx mem)
  5161  	for {
  5162  		c := v.AuxInt
  5163  		sym := v.Aux
  5164  		_ = v.Args[2]
  5165  		idx := v.Args[0]
  5166  		v_1 := v.Args[1]
  5167  		if v_1.Op != OpAMD64ADDQconst {
  5168  			break
  5169  		}
  5170  		d := v_1.AuxInt
  5171  		ptr := v_1.Args[0]
  5172  		mem := v.Args[2]
  5173  		if !(is32Bit(c + d)) {
  5174  			break
  5175  		}
  5176  		v.reset(OpAMD64MOVBloadidx1)
  5177  		v.AuxInt = c + d
  5178  		v.Aux = sym
  5179  		v.AddArg(ptr)
  5180  		v.AddArg(idx)
  5181  		v.AddArg(mem)
  5182  		return true
  5183  	}
  5184  	// match: (MOVBloadidx1 [c] {sym} ptr (ADDQconst [d] idx) mem)
  5185  	// cond: is32Bit(c+d)
  5186  	// result: (MOVBloadidx1 [c+d] {sym} ptr idx mem)
  5187  	for {
  5188  		c := v.AuxInt
  5189  		sym := v.Aux
  5190  		_ = v.Args[2]
  5191  		ptr := v.Args[0]
  5192  		v_1 := v.Args[1]
  5193  		if v_1.Op != OpAMD64ADDQconst {
  5194  			break
  5195  		}
  5196  		d := v_1.AuxInt
  5197  		idx := v_1.Args[0]
  5198  		mem := v.Args[2]
  5199  		if !(is32Bit(c + d)) {
  5200  			break
  5201  		}
  5202  		v.reset(OpAMD64MOVBloadidx1)
  5203  		v.AuxInt = c + d
  5204  		v.Aux = sym
  5205  		v.AddArg(ptr)
  5206  		v.AddArg(idx)
  5207  		v.AddArg(mem)
  5208  		return true
  5209  	}
  5210  	// match: (MOVBloadidx1 [c] {sym} (ADDQconst [d] idx) ptr mem)
  5211  	// cond: is32Bit(c+d)
  5212  	// result: (MOVBloadidx1 [c+d] {sym} ptr idx mem)
  5213  	for {
  5214  		c := v.AuxInt
  5215  		sym := v.Aux
  5216  		_ = v.Args[2]
  5217  		v_0 := v.Args[0]
  5218  		if v_0.Op != OpAMD64ADDQconst {
  5219  			break
  5220  		}
  5221  		d := v_0.AuxInt
  5222  		idx := v_0.Args[0]
  5223  		ptr := v.Args[1]
  5224  		mem := v.Args[2]
  5225  		if !(is32Bit(c + d)) {
  5226  			break
  5227  		}
  5228  		v.reset(OpAMD64MOVBloadidx1)
  5229  		v.AuxInt = c + d
  5230  		v.Aux = sym
  5231  		v.AddArg(ptr)
  5232  		v.AddArg(idx)
  5233  		v.AddArg(mem)
  5234  		return true
  5235  	}
  5236  	return false
  5237  }
  5238  func rewriteValueAMD64_OpAMD64MOVBstore_0(v *Value) bool {
  5239  	// match: (MOVBstore [off] {sym} ptr y:(SETL x) mem)
  5240  	// cond: y.Uses == 1
  5241  	// result: (SETLmem [off] {sym} ptr x mem)
  5242  	for {
  5243  		off := v.AuxInt
  5244  		sym := v.Aux
  5245  		_ = v.Args[2]
  5246  		ptr := v.Args[0]
  5247  		y := v.Args[1]
  5248  		if y.Op != OpAMD64SETL {
  5249  			break
  5250  		}
  5251  		x := y.Args[0]
  5252  		mem := v.Args[2]
  5253  		if !(y.Uses == 1) {
  5254  			break
  5255  		}
  5256  		v.reset(OpAMD64SETLmem)
  5257  		v.AuxInt = off
  5258  		v.Aux = sym
  5259  		v.AddArg(ptr)
  5260  		v.AddArg(x)
  5261  		v.AddArg(mem)
  5262  		return true
  5263  	}
  5264  	// match: (MOVBstore [off] {sym} ptr y:(SETLE x) mem)
  5265  	// cond: y.Uses == 1
  5266  	// result: (SETLEmem [off] {sym} ptr x mem)
  5267  	for {
  5268  		off := v.AuxInt
  5269  		sym := v.Aux
  5270  		_ = v.Args[2]
  5271  		ptr := v.Args[0]
  5272  		y := v.Args[1]
  5273  		if y.Op != OpAMD64SETLE {
  5274  			break
  5275  		}
  5276  		x := y.Args[0]
  5277  		mem := v.Args[2]
  5278  		if !(y.Uses == 1) {
  5279  			break
  5280  		}
  5281  		v.reset(OpAMD64SETLEmem)
  5282  		v.AuxInt = off
  5283  		v.Aux = sym
  5284  		v.AddArg(ptr)
  5285  		v.AddArg(x)
  5286  		v.AddArg(mem)
  5287  		return true
  5288  	}
  5289  	// match: (MOVBstore [off] {sym} ptr y:(SETG x) mem)
  5290  	// cond: y.Uses == 1
  5291  	// result: (SETGmem [off] {sym} ptr x mem)
  5292  	for {
  5293  		off := v.AuxInt
  5294  		sym := v.Aux
  5295  		_ = v.Args[2]
  5296  		ptr := v.Args[0]
  5297  		y := v.Args[1]
  5298  		if y.Op != OpAMD64SETG {
  5299  			break
  5300  		}
  5301  		x := y.Args[0]
  5302  		mem := v.Args[2]
  5303  		if !(y.Uses == 1) {
  5304  			break
  5305  		}
  5306  		v.reset(OpAMD64SETGmem)
  5307  		v.AuxInt = off
  5308  		v.Aux = sym
  5309  		v.AddArg(ptr)
  5310  		v.AddArg(x)
  5311  		v.AddArg(mem)
  5312  		return true
  5313  	}
  5314  	// match: (MOVBstore [off] {sym} ptr y:(SETGE x) mem)
  5315  	// cond: y.Uses == 1
  5316  	// result: (SETGEmem [off] {sym} ptr x mem)
  5317  	for {
  5318  		off := v.AuxInt
  5319  		sym := v.Aux
  5320  		_ = v.Args[2]
  5321  		ptr := v.Args[0]
  5322  		y := v.Args[1]
  5323  		if y.Op != OpAMD64SETGE {
  5324  			break
  5325  		}
  5326  		x := y.Args[0]
  5327  		mem := v.Args[2]
  5328  		if !(y.Uses == 1) {
  5329  			break
  5330  		}
  5331  		v.reset(OpAMD64SETGEmem)
  5332  		v.AuxInt = off
  5333  		v.Aux = sym
  5334  		v.AddArg(ptr)
  5335  		v.AddArg(x)
  5336  		v.AddArg(mem)
  5337  		return true
  5338  	}
  5339  	// match: (MOVBstore [off] {sym} ptr y:(SETEQ x) mem)
  5340  	// cond: y.Uses == 1
  5341  	// result: (SETEQmem [off] {sym} ptr x mem)
  5342  	for {
  5343  		off := v.AuxInt
  5344  		sym := v.Aux
  5345  		_ = v.Args[2]
  5346  		ptr := v.Args[0]
  5347  		y := v.Args[1]
  5348  		if y.Op != OpAMD64SETEQ {
  5349  			break
  5350  		}
  5351  		x := y.Args[0]
  5352  		mem := v.Args[2]
  5353  		if !(y.Uses == 1) {
  5354  			break
  5355  		}
  5356  		v.reset(OpAMD64SETEQmem)
  5357  		v.AuxInt = off
  5358  		v.Aux = sym
  5359  		v.AddArg(ptr)
  5360  		v.AddArg(x)
  5361  		v.AddArg(mem)
  5362  		return true
  5363  	}
  5364  	// match: (MOVBstore [off] {sym} ptr y:(SETNE x) mem)
  5365  	// cond: y.Uses == 1
  5366  	// result: (SETNEmem [off] {sym} ptr x mem)
  5367  	for {
  5368  		off := v.AuxInt
  5369  		sym := v.Aux
  5370  		_ = v.Args[2]
  5371  		ptr := v.Args[0]
  5372  		y := v.Args[1]
  5373  		if y.Op != OpAMD64SETNE {
  5374  			break
  5375  		}
  5376  		x := y.Args[0]
  5377  		mem := v.Args[2]
  5378  		if !(y.Uses == 1) {
  5379  			break
  5380  		}
  5381  		v.reset(OpAMD64SETNEmem)
  5382  		v.AuxInt = off
  5383  		v.Aux = sym
  5384  		v.AddArg(ptr)
  5385  		v.AddArg(x)
  5386  		v.AddArg(mem)
  5387  		return true
  5388  	}
  5389  	// match: (MOVBstore [off] {sym} ptr y:(SETB x) mem)
  5390  	// cond: y.Uses == 1
  5391  	// result: (SETBmem [off] {sym} ptr x mem)
  5392  	for {
  5393  		off := v.AuxInt
  5394  		sym := v.Aux
  5395  		_ = v.Args[2]
  5396  		ptr := v.Args[0]
  5397  		y := v.Args[1]
  5398  		if y.Op != OpAMD64SETB {
  5399  			break
  5400  		}
  5401  		x := y.Args[0]
  5402  		mem := v.Args[2]
  5403  		if !(y.Uses == 1) {
  5404  			break
  5405  		}
  5406  		v.reset(OpAMD64SETBmem)
  5407  		v.AuxInt = off
  5408  		v.Aux = sym
  5409  		v.AddArg(ptr)
  5410  		v.AddArg(x)
  5411  		v.AddArg(mem)
  5412  		return true
  5413  	}
  5414  	// match: (MOVBstore [off] {sym} ptr y:(SETBE x) mem)
  5415  	// cond: y.Uses == 1
  5416  	// result: (SETBEmem [off] {sym} ptr x mem)
  5417  	for {
  5418  		off := v.AuxInt
  5419  		sym := v.Aux
  5420  		_ = v.Args[2]
  5421  		ptr := v.Args[0]
  5422  		y := v.Args[1]
  5423  		if y.Op != OpAMD64SETBE {
  5424  			break
  5425  		}
  5426  		x := y.Args[0]
  5427  		mem := v.Args[2]
  5428  		if !(y.Uses == 1) {
  5429  			break
  5430  		}
  5431  		v.reset(OpAMD64SETBEmem)
  5432  		v.AuxInt = off
  5433  		v.Aux = sym
  5434  		v.AddArg(ptr)
  5435  		v.AddArg(x)
  5436  		v.AddArg(mem)
  5437  		return true
  5438  	}
  5439  	// match: (MOVBstore [off] {sym} ptr y:(SETA x) mem)
  5440  	// cond: y.Uses == 1
  5441  	// result: (SETAmem [off] {sym} ptr x mem)
  5442  	for {
  5443  		off := v.AuxInt
  5444  		sym := v.Aux
  5445  		_ = v.Args[2]
  5446  		ptr := v.Args[0]
  5447  		y := v.Args[1]
  5448  		if y.Op != OpAMD64SETA {
  5449  			break
  5450  		}
  5451  		x := y.Args[0]
  5452  		mem := v.Args[2]
  5453  		if !(y.Uses == 1) {
  5454  			break
  5455  		}
  5456  		v.reset(OpAMD64SETAmem)
  5457  		v.AuxInt = off
  5458  		v.Aux = sym
  5459  		v.AddArg(ptr)
  5460  		v.AddArg(x)
  5461  		v.AddArg(mem)
  5462  		return true
  5463  	}
  5464  	// match: (MOVBstore [off] {sym} ptr y:(SETAE x) mem)
  5465  	// cond: y.Uses == 1
  5466  	// result: (SETAEmem [off] {sym} ptr x mem)
  5467  	for {
  5468  		off := v.AuxInt
  5469  		sym := v.Aux
  5470  		_ = v.Args[2]
  5471  		ptr := v.Args[0]
  5472  		y := v.Args[1]
  5473  		if y.Op != OpAMD64SETAE {
  5474  			break
  5475  		}
  5476  		x := y.Args[0]
  5477  		mem := v.Args[2]
  5478  		if !(y.Uses == 1) {
  5479  			break
  5480  		}
  5481  		v.reset(OpAMD64SETAEmem)
  5482  		v.AuxInt = off
  5483  		v.Aux = sym
  5484  		v.AddArg(ptr)
  5485  		v.AddArg(x)
  5486  		v.AddArg(mem)
  5487  		return true
  5488  	}
  5489  	return false
  5490  }
  5491  func rewriteValueAMD64_OpAMD64MOVBstore_10(v *Value) bool {
  5492  	b := v.Block
  5493  	_ = b
  5494  	// match: (MOVBstore [off] {sym} ptr (MOVBQSX x) mem)
  5495  	// cond:
  5496  	// result: (MOVBstore [off] {sym} ptr x mem)
  5497  	for {
  5498  		off := v.AuxInt
  5499  		sym := v.Aux
  5500  		_ = v.Args[2]
  5501  		ptr := v.Args[0]
  5502  		v_1 := v.Args[1]
  5503  		if v_1.Op != OpAMD64MOVBQSX {
  5504  			break
  5505  		}
  5506  		x := v_1.Args[0]
  5507  		mem := v.Args[2]
  5508  		v.reset(OpAMD64MOVBstore)
  5509  		v.AuxInt = off
  5510  		v.Aux = sym
  5511  		v.AddArg(ptr)
  5512  		v.AddArg(x)
  5513  		v.AddArg(mem)
  5514  		return true
  5515  	}
  5516  	// match: (MOVBstore [off] {sym} ptr (MOVBQZX x) mem)
  5517  	// cond:
  5518  	// result: (MOVBstore [off] {sym} ptr x mem)
  5519  	for {
  5520  		off := v.AuxInt
  5521  		sym := v.Aux
  5522  		_ = v.Args[2]
  5523  		ptr := v.Args[0]
  5524  		v_1 := v.Args[1]
  5525  		if v_1.Op != OpAMD64MOVBQZX {
  5526  			break
  5527  		}
  5528  		x := v_1.Args[0]
  5529  		mem := v.Args[2]
  5530  		v.reset(OpAMD64MOVBstore)
  5531  		v.AuxInt = off
  5532  		v.Aux = sym
  5533  		v.AddArg(ptr)
  5534  		v.AddArg(x)
  5535  		v.AddArg(mem)
  5536  		return true
  5537  	}
  5538  	// match: (MOVBstore [off1] {sym} (ADDQconst [off2] ptr) val mem)
  5539  	// cond: is32Bit(off1+off2)
  5540  	// result: (MOVBstore [off1+off2] {sym} ptr val mem)
  5541  	for {
  5542  		off1 := v.AuxInt
  5543  		sym := v.Aux
  5544  		_ = v.Args[2]
  5545  		v_0 := v.Args[0]
  5546  		if v_0.Op != OpAMD64ADDQconst {
  5547  			break
  5548  		}
  5549  		off2 := v_0.AuxInt
  5550  		ptr := v_0.Args[0]
  5551  		val := v.Args[1]
  5552  		mem := v.Args[2]
  5553  		if !(is32Bit(off1 + off2)) {
  5554  			break
  5555  		}
  5556  		v.reset(OpAMD64MOVBstore)
  5557  		v.AuxInt = off1 + off2
  5558  		v.Aux = sym
  5559  		v.AddArg(ptr)
  5560  		v.AddArg(val)
  5561  		v.AddArg(mem)
  5562  		return true
  5563  	}
  5564  	// match: (MOVBstore [off] {sym} ptr (MOVLconst [c]) mem)
  5565  	// cond: validOff(off)
  5566  	// result: (MOVBstoreconst [makeValAndOff(int64(int8(c)),off)] {sym} ptr mem)
  5567  	for {
  5568  		off := v.AuxInt
  5569  		sym := v.Aux
  5570  		_ = v.Args[2]
  5571  		ptr := v.Args[0]
  5572  		v_1 := v.Args[1]
  5573  		if v_1.Op != OpAMD64MOVLconst {
  5574  			break
  5575  		}
  5576  		c := v_1.AuxInt
  5577  		mem := v.Args[2]
  5578  		if !(validOff(off)) {
  5579  			break
  5580  		}
  5581  		v.reset(OpAMD64MOVBstoreconst)
  5582  		v.AuxInt = makeValAndOff(int64(int8(c)), off)
  5583  		v.Aux = sym
  5584  		v.AddArg(ptr)
  5585  		v.AddArg(mem)
  5586  		return true
  5587  	}
  5588  	// match: (MOVBstore [off1] {sym1} (LEAQ [off2] {sym2} base) val mem)
  5589  	// cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
  5590  	// result: (MOVBstore [off1+off2] {mergeSym(sym1,sym2)} base val mem)
  5591  	for {
  5592  		off1 := v.AuxInt
  5593  		sym1 := v.Aux
  5594  		_ = v.Args[2]
  5595  		v_0 := v.Args[0]
  5596  		if v_0.Op != OpAMD64LEAQ {
  5597  			break
  5598  		}
  5599  		off2 := v_0.AuxInt
  5600  		sym2 := v_0.Aux
  5601  		base := v_0.Args[0]
  5602  		val := v.Args[1]
  5603  		mem := v.Args[2]
  5604  		if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
  5605  			break
  5606  		}
  5607  		v.reset(OpAMD64MOVBstore)
  5608  		v.AuxInt = off1 + off2
  5609  		v.Aux = mergeSym(sym1, sym2)
  5610  		v.AddArg(base)
  5611  		v.AddArg(val)
  5612  		v.AddArg(mem)
  5613  		return true
  5614  	}
  5615  	// match: (MOVBstore [off1] {sym1} (LEAQ1 [off2] {sym2} ptr idx) val mem)
  5616  	// cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
  5617  	// result: (MOVBstoreidx1 [off1+off2] {mergeSym(sym1,sym2)} ptr idx val mem)
  5618  	for {
  5619  		off1 := v.AuxInt
  5620  		sym1 := v.Aux
  5621  		_ = v.Args[2]
  5622  		v_0 := v.Args[0]
  5623  		if v_0.Op != OpAMD64LEAQ1 {
  5624  			break
  5625  		}
  5626  		off2 := v_0.AuxInt
  5627  		sym2 := v_0.Aux
  5628  		_ = v_0.Args[1]
  5629  		ptr := v_0.Args[0]
  5630  		idx := v_0.Args[1]
  5631  		val := v.Args[1]
  5632  		mem := v.Args[2]
  5633  		if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
  5634  			break
  5635  		}
  5636  		v.reset(OpAMD64MOVBstoreidx1)
  5637  		v.AuxInt = off1 + off2
  5638  		v.Aux = mergeSym(sym1, sym2)
  5639  		v.AddArg(ptr)
  5640  		v.AddArg(idx)
  5641  		v.AddArg(val)
  5642  		v.AddArg(mem)
  5643  		return true
  5644  	}
  5645  	// match: (MOVBstore [off] {sym} (ADDQ ptr idx) val mem)
  5646  	// cond: ptr.Op != OpSB
  5647  	// result: (MOVBstoreidx1 [off] {sym} ptr idx val mem)
  5648  	for {
  5649  		off := v.AuxInt
  5650  		sym := v.Aux
  5651  		_ = v.Args[2]
  5652  		v_0 := v.Args[0]
  5653  		if v_0.Op != OpAMD64ADDQ {
  5654  			break
  5655  		}
  5656  		_ = v_0.Args[1]
  5657  		ptr := v_0.Args[0]
  5658  		idx := v_0.Args[1]
  5659  		val := v.Args[1]
  5660  		mem := v.Args[2]
  5661  		if !(ptr.Op != OpSB) {
  5662  			break
  5663  		}
  5664  		v.reset(OpAMD64MOVBstoreidx1)
  5665  		v.AuxInt = off
  5666  		v.Aux = sym
  5667  		v.AddArg(ptr)
  5668  		v.AddArg(idx)
  5669  		v.AddArg(val)
  5670  		v.AddArg(mem)
  5671  		return true
  5672  	}
  5673  	// match: (MOVBstore [i] {s} p w x0:(MOVBstore [i-1] {s} p (SHRWconst [8] w) mem))
  5674  	// cond: x0.Uses == 1 && clobber(x0)
  5675  	// result: (MOVWstore [i-1] {s} p (ROLWconst <w.Type> [8] w) mem)
  5676  	for {
  5677  		i := v.AuxInt
  5678  		s := v.Aux
  5679  		_ = v.Args[2]
  5680  		p := v.Args[0]
  5681  		w := v.Args[1]
  5682  		x0 := v.Args[2]
  5683  		if x0.Op != OpAMD64MOVBstore {
  5684  			break
  5685  		}
  5686  		if x0.AuxInt != i-1 {
  5687  			break
  5688  		}
  5689  		if x0.Aux != s {
  5690  			break
  5691  		}
  5692  		_ = x0.Args[2]
  5693  		if p != x0.Args[0] {
  5694  			break
  5695  		}
  5696  		x0_1 := x0.Args[1]
  5697  		if x0_1.Op != OpAMD64SHRWconst {
  5698  			break
  5699  		}
  5700  		if x0_1.AuxInt != 8 {
  5701  			break
  5702  		}
  5703  		if w != x0_1.Args[0] {
  5704  			break
  5705  		}
  5706  		mem := x0.Args[2]
  5707  		if !(x0.Uses == 1 && clobber(x0)) {
  5708  			break
  5709  		}
  5710  		v.reset(OpAMD64MOVWstore)
  5711  		v.AuxInt = i - 1
  5712  		v.Aux = s
  5713  		v.AddArg(p)
  5714  		v0 := b.NewValue0(v.Pos, OpAMD64ROLWconst, w.Type)
  5715  		v0.AuxInt = 8
  5716  		v0.AddArg(w)
  5717  		v.AddArg(v0)
  5718  		v.AddArg(mem)
  5719  		return true
  5720  	}
  5721  	// match: (MOVBstore [i] {s} p w x2:(MOVBstore [i-1] {s} p (SHRLconst [8] w) x1:(MOVBstore [i-2] {s} p (SHRLconst [16] w) x0:(MOVBstore [i-3] {s} p (SHRLconst [24] w) mem))))
  5722  	// cond: x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2)
  5723  	// result: (MOVLstore [i-3] {s} p (BSWAPL <w.Type> w) mem)
  5724  	for {
  5725  		i := v.AuxInt
  5726  		s := v.Aux
  5727  		_ = v.Args[2]
  5728  		p := v.Args[0]
  5729  		w := v.Args[1]
  5730  		x2 := v.Args[2]
  5731  		if x2.Op != OpAMD64MOVBstore {
  5732  			break
  5733  		}
  5734  		if x2.AuxInt != i-1 {
  5735  			break
  5736  		}
  5737  		if x2.Aux != s {
  5738  			break
  5739  		}
  5740  		_ = x2.Args[2]
  5741  		if p != x2.Args[0] {
  5742  			break
  5743  		}
  5744  		x2_1 := x2.Args[1]
  5745  		if x2_1.Op != OpAMD64SHRLconst {
  5746  			break
  5747  		}
  5748  		if x2_1.AuxInt != 8 {
  5749  			break
  5750  		}
  5751  		if w != x2_1.Args[0] {
  5752  			break
  5753  		}
  5754  		x1 := x2.Args[2]
  5755  		if x1.Op != OpAMD64MOVBstore {
  5756  			break
  5757  		}
  5758  		if x1.AuxInt != i-2 {
  5759  			break
  5760  		}
  5761  		if x1.Aux != s {
  5762  			break
  5763  		}
  5764  		_ = x1.Args[2]
  5765  		if p != x1.Args[0] {
  5766  			break
  5767  		}
  5768  		x1_1 := x1.Args[1]
  5769  		if x1_1.Op != OpAMD64SHRLconst {
  5770  			break
  5771  		}
  5772  		if x1_1.AuxInt != 16 {
  5773  			break
  5774  		}
  5775  		if w != x1_1.Args[0] {
  5776  			break
  5777  		}
  5778  		x0 := x1.Args[2]
  5779  		if x0.Op != OpAMD64MOVBstore {
  5780  			break
  5781  		}
  5782  		if x0.AuxInt != i-3 {
  5783  			break
  5784  		}
  5785  		if x0.Aux != s {
  5786  			break
  5787  		}
  5788  		_ = x0.Args[2]
  5789  		if p != x0.Args[0] {
  5790  			break
  5791  		}
  5792  		x0_1 := x0.Args[1]
  5793  		if x0_1.Op != OpAMD64SHRLconst {
  5794  			break
  5795  		}
  5796  		if x0_1.AuxInt != 24 {
  5797  			break
  5798  		}
  5799  		if w != x0_1.Args[0] {
  5800  			break
  5801  		}
  5802  		mem := x0.Args[2]
  5803  		if !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2)) {
  5804  			break
  5805  		}
  5806  		v.reset(OpAMD64MOVLstore)
  5807  		v.AuxInt = i - 3
  5808  		v.Aux = s
  5809  		v.AddArg(p)
  5810  		v0 := b.NewValue0(v.Pos, OpAMD64BSWAPL, w.Type)
  5811  		v0.AddArg(w)
  5812  		v.AddArg(v0)
  5813  		v.AddArg(mem)
  5814  		return true
  5815  	}
  5816  	// match: (MOVBstore [i] {s} p w x6:(MOVBstore [i-1] {s} p (SHRQconst [8] w) x5:(MOVBstore [i-2] {s} p (SHRQconst [16] w) x4:(MOVBstore [i-3] {s} p (SHRQconst [24] w) x3:(MOVBstore [i-4] {s} p (SHRQconst [32] w) x2:(MOVBstore [i-5] {s} p (SHRQconst [40] w) x1:(MOVBstore [i-6] {s} p (SHRQconst [48] w) x0:(MOVBstore [i-7] {s} p (SHRQconst [56] w) mem))))))))
  5817  	// cond: x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6)
  5818  	// result: (MOVQstore [i-7] {s} p (BSWAPQ <w.Type> w) mem)
  5819  	for {
  5820  		i := v.AuxInt
  5821  		s := v.Aux
  5822  		_ = v.Args[2]
  5823  		p := v.Args[0]
  5824  		w := v.Args[1]
  5825  		x6 := v.Args[2]
  5826  		if x6.Op != OpAMD64MOVBstore {
  5827  			break
  5828  		}
  5829  		if x6.AuxInt != i-1 {
  5830  			break
  5831  		}
  5832  		if x6.Aux != s {
  5833  			break
  5834  		}
  5835  		_ = x6.Args[2]
  5836  		if p != x6.Args[0] {
  5837  			break
  5838  		}
  5839  		x6_1 := x6.Args[1]
  5840  		if x6_1.Op != OpAMD64SHRQconst {
  5841  			break
  5842  		}
  5843  		if x6_1.AuxInt != 8 {
  5844  			break
  5845  		}
  5846  		if w != x6_1.Args[0] {
  5847  			break
  5848  		}
  5849  		x5 := x6.Args[2]
  5850  		if x5.Op != OpAMD64MOVBstore {
  5851  			break
  5852  		}
  5853  		if x5.AuxInt != i-2 {
  5854  			break
  5855  		}
  5856  		if x5.Aux != s {
  5857  			break
  5858  		}
  5859  		_ = x5.Args[2]
  5860  		if p != x5.Args[0] {
  5861  			break
  5862  		}
  5863  		x5_1 := x5.Args[1]
  5864  		if x5_1.Op != OpAMD64SHRQconst {
  5865  			break
  5866  		}
  5867  		if x5_1.AuxInt != 16 {
  5868  			break
  5869  		}
  5870  		if w != x5_1.Args[0] {
  5871  			break
  5872  		}
  5873  		x4 := x5.Args[2]
  5874  		if x4.Op != OpAMD64MOVBstore {
  5875  			break
  5876  		}
  5877  		if x4.AuxInt != i-3 {
  5878  			break
  5879  		}
  5880  		if x4.Aux != s {
  5881  			break
  5882  		}
  5883  		_ = x4.Args[2]
  5884  		if p != x4.Args[0] {
  5885  			break
  5886  		}
  5887  		x4_1 := x4.Args[1]
  5888  		if x4_1.Op != OpAMD64SHRQconst {
  5889  			break
  5890  		}
  5891  		if x4_1.AuxInt != 24 {
  5892  			break
  5893  		}
  5894  		if w != x4_1.Args[0] {
  5895  			break
  5896  		}
  5897  		x3 := x4.Args[2]
  5898  		if x3.Op != OpAMD64MOVBstore {
  5899  			break
  5900  		}
  5901  		if x3.AuxInt != i-4 {
  5902  			break
  5903  		}
  5904  		if x3.Aux != s {
  5905  			break
  5906  		}
  5907  		_ = x3.Args[2]
  5908  		if p != x3.Args[0] {
  5909  			break
  5910  		}
  5911  		x3_1 := x3.Args[1]
  5912  		if x3_1.Op != OpAMD64SHRQconst {
  5913  			break
  5914  		}
  5915  		if x3_1.AuxInt != 32 {
  5916  			break
  5917  		}
  5918  		if w != x3_1.Args[0] {
  5919  			break
  5920  		}
  5921  		x2 := x3.Args[2]
  5922  		if x2.Op != OpAMD64MOVBstore {
  5923  			break
  5924  		}
  5925  		if x2.AuxInt != i-5 {
  5926  			break
  5927  		}
  5928  		if x2.Aux != s {
  5929  			break
  5930  		}
  5931  		_ = x2.Args[2]
  5932  		if p != x2.Args[0] {
  5933  			break
  5934  		}
  5935  		x2_1 := x2.Args[1]
  5936  		if x2_1.Op != OpAMD64SHRQconst {
  5937  			break
  5938  		}
  5939  		if x2_1.AuxInt != 40 {
  5940  			break
  5941  		}
  5942  		if w != x2_1.Args[0] {
  5943  			break
  5944  		}
  5945  		x1 := x2.Args[2]
  5946  		if x1.Op != OpAMD64MOVBstore {
  5947  			break
  5948  		}
  5949  		if x1.AuxInt != i-6 {
  5950  			break
  5951  		}
  5952  		if x1.Aux != s {
  5953  			break
  5954  		}
  5955  		_ = x1.Args[2]
  5956  		if p != x1.Args[0] {
  5957  			break
  5958  		}
  5959  		x1_1 := x1.Args[1]
  5960  		if x1_1.Op != OpAMD64SHRQconst {
  5961  			break
  5962  		}
  5963  		if x1_1.AuxInt != 48 {
  5964  			break
  5965  		}
  5966  		if w != x1_1.Args[0] {
  5967  			break
  5968  		}
  5969  		x0 := x1.Args[2]
  5970  		if x0.Op != OpAMD64MOVBstore {
  5971  			break
  5972  		}
  5973  		if x0.AuxInt != i-7 {
  5974  			break
  5975  		}
  5976  		if x0.Aux != s {
  5977  			break
  5978  		}
  5979  		_ = x0.Args[2]
  5980  		if p != x0.Args[0] {
  5981  			break
  5982  		}
  5983  		x0_1 := x0.Args[1]
  5984  		if x0_1.Op != OpAMD64SHRQconst {
  5985  			break
  5986  		}
  5987  		if x0_1.AuxInt != 56 {
  5988  			break
  5989  		}
  5990  		if w != x0_1.Args[0] {
  5991  			break
  5992  		}
  5993  		mem := x0.Args[2]
  5994  		if !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6)) {
  5995  			break
  5996  		}
  5997  		v.reset(OpAMD64MOVQstore)
  5998  		v.AuxInt = i - 7
  5999  		v.Aux = s
  6000  		v.AddArg(p)
  6001  		v0 := b.NewValue0(v.Pos, OpAMD64BSWAPQ, w.Type)
  6002  		v0.AddArg(w)
  6003  		v.AddArg(v0)
  6004  		v.AddArg(mem)
  6005  		return true
  6006  	}
  6007  	return false
  6008  }
  6009  func rewriteValueAMD64_OpAMD64MOVBstore_20(v *Value) bool {
  6010  	b := v.Block
  6011  	_ = b
  6012  	typ := &b.Func.Config.Types
  6013  	_ = typ
  6014  	// match: (MOVBstore [i] {s} p (SHRQconst [8] w) x:(MOVBstore [i-1] {s} p w mem))
  6015  	// cond: x.Uses == 1 && clobber(x)
  6016  	// result: (MOVWstore [i-1] {s} p w mem)
  6017  	for {
  6018  		i := v.AuxInt
  6019  		s := v.Aux
  6020  		_ = v.Args[2]
  6021  		p := v.Args[0]
  6022  		v_1 := v.Args[1]
  6023  		if v_1.Op != OpAMD64SHRQconst {
  6024  			break
  6025  		}
  6026  		if v_1.AuxInt != 8 {
  6027  			break
  6028  		}
  6029  		w := v_1.Args[0]
  6030  		x := v.Args[2]
  6031  		if x.Op != OpAMD64MOVBstore {
  6032  			break
  6033  		}
  6034  		if x.AuxInt != i-1 {
  6035  			break
  6036  		}
  6037  		if x.Aux != s {
  6038  			break
  6039  		}
  6040  		_ = x.Args[2]
  6041  		if p != x.Args[0] {
  6042  			break
  6043  		}
  6044  		if w != x.Args[1] {
  6045  			break
  6046  		}
  6047  		mem := x.Args[2]
  6048  		if !(x.Uses == 1 && clobber(x)) {
  6049  			break
  6050  		}
  6051  		v.reset(OpAMD64MOVWstore)
  6052  		v.AuxInt = i - 1
  6053  		v.Aux = s
  6054  		v.AddArg(p)
  6055  		v.AddArg(w)
  6056  		v.AddArg(mem)
  6057  		return true
  6058  	}
  6059  	// match: (MOVBstore [i] {s} p (SHRQconst [j] w) x:(MOVBstore [i-1] {s} p w0:(SHRQconst [j-8] w) mem))
  6060  	// cond: x.Uses == 1 && clobber(x)
  6061  	// result: (MOVWstore [i-1] {s} p w0 mem)
  6062  	for {
  6063  		i := v.AuxInt
  6064  		s := v.Aux
  6065  		_ = v.Args[2]
  6066  		p := v.Args[0]
  6067  		v_1 := v.Args[1]
  6068  		if v_1.Op != OpAMD64SHRQconst {
  6069  			break
  6070  		}
  6071  		j := v_1.AuxInt
  6072  		w := v_1.Args[0]
  6073  		x := v.Args[2]
  6074  		if x.Op != OpAMD64MOVBstore {
  6075  			break
  6076  		}
  6077  		if x.AuxInt != i-1 {
  6078  			break
  6079  		}
  6080  		if x.Aux != s {
  6081  			break
  6082  		}
  6083  		_ = x.Args[2]
  6084  		if p != x.Args[0] {
  6085  			break
  6086  		}
  6087  		w0 := x.Args[1]
  6088  		if w0.Op != OpAMD64SHRQconst {
  6089  			break
  6090  		}
  6091  		if w0.AuxInt != j-8 {
  6092  			break
  6093  		}
  6094  		if w != w0.Args[0] {
  6095  			break
  6096  		}
  6097  		mem := x.Args[2]
  6098  		if !(x.Uses == 1 && clobber(x)) {
  6099  			break
  6100  		}
  6101  		v.reset(OpAMD64MOVWstore)
  6102  		v.AuxInt = i - 1
  6103  		v.Aux = s
  6104  		v.AddArg(p)
  6105  		v.AddArg(w0)
  6106  		v.AddArg(mem)
  6107  		return true
  6108  	}
  6109  	// match: (MOVBstore [i] {s} p x1:(MOVBload [j] {s2} p2 mem) mem2:(MOVBstore [i-1] {s} p x2:(MOVBload [j-1] {s2} p2 mem) mem))
  6110  	// cond: x1.Uses == 1 && x2.Uses == 1 && mem2.Uses == 1 && clobber(x1) && clobber(x2) && clobber(mem2)
  6111  	// result: (MOVWstore [i-1] {s} p (MOVWload [j-1] {s2} p2 mem) mem)
  6112  	for {
  6113  		i := v.AuxInt
  6114  		s := v.Aux
  6115  		_ = v.Args[2]
  6116  		p := v.Args[0]
  6117  		x1 := v.Args[1]
  6118  		if x1.Op != OpAMD64MOVBload {
  6119  			break
  6120  		}
  6121  		j := x1.AuxInt
  6122  		s2 := x1.Aux
  6123  		_ = x1.Args[1]
  6124  		p2 := x1.Args[0]
  6125  		mem := x1.Args[1]
  6126  		mem2 := v.Args[2]
  6127  		if mem2.Op != OpAMD64MOVBstore {
  6128  			break
  6129  		}
  6130  		if mem2.AuxInt != i-1 {
  6131  			break
  6132  		}
  6133  		if mem2.Aux != s {
  6134  			break
  6135  		}
  6136  		_ = mem2.Args[2]
  6137  		if p != mem2.Args[0] {
  6138  			break
  6139  		}
  6140  		x2 := mem2.Args[1]
  6141  		if x2.Op != OpAMD64MOVBload {
  6142  			break
  6143  		}
  6144  		if x2.AuxInt != j-1 {
  6145  			break
  6146  		}
  6147  		if x2.Aux != s2 {
  6148  			break
  6149  		}
  6150  		_ = x2.Args[1]
  6151  		if p2 != x2.Args[0] {
  6152  			break
  6153  		}
  6154  		if mem != x2.Args[1] {
  6155  			break
  6156  		}
  6157  		if mem != mem2.Args[2] {
  6158  			break
  6159  		}
  6160  		if !(x1.Uses == 1 && x2.Uses == 1 && mem2.Uses == 1 && clobber(x1) && clobber(x2) && clobber(mem2)) {
  6161  			break
  6162  		}
  6163  		v.reset(OpAMD64MOVWstore)
  6164  		v.AuxInt = i - 1
  6165  		v.Aux = s
  6166  		v.AddArg(p)
  6167  		v0 := b.NewValue0(v.Pos, OpAMD64MOVWload, typ.UInt16)
  6168  		v0.AuxInt = j - 1
  6169  		v0.Aux = s2
  6170  		v0.AddArg(p2)
  6171  		v0.AddArg(mem)
  6172  		v.AddArg(v0)
  6173  		v.AddArg(mem)
  6174  		return true
  6175  	}
  6176  	// match: (MOVBstore [off1] {sym1} (LEAL [off2] {sym2} base) val mem)
  6177  	// cond: canMergeSym(sym1, sym2) && is32Bit(off1+off2)
  6178  	// result: (MOVBstore [off1+off2] {mergeSym(sym1,sym2)} base val mem)
  6179  	for {
  6180  		off1 := v.AuxInt
  6181  		sym1 := v.Aux
  6182  		_ = v.Args[2]
  6183  		v_0 := v.Args[0]
  6184  		if v_0.Op != OpAMD64LEAL {
  6185  			break
  6186  		}
  6187  		off2 := v_0.AuxInt
  6188  		sym2 := v_0.Aux
  6189  		base := v_0.Args[0]
  6190  		val := v.Args[1]
  6191  		mem := v.Args[2]
  6192  		if !(canMergeSym(sym1, sym2) && is32Bit(off1+off2)) {
  6193  			break
  6194  		}
  6195  		v.reset(OpAMD64MOVBstore)
  6196  		v.AuxInt = off1 + off2
  6197  		v.Aux = mergeSym(sym1, sym2)
  6198  		v.AddArg(base)
  6199  		v.AddArg(val)
  6200  		v.AddArg(mem)
  6201  		return true
  6202  	}
  6203  	// match: (MOVBstore [off1] {sym} (ADDLconst [off2] ptr) val mem)
  6204  	// cond: is32Bit(off1+off2)
  6205  	// result: (MOVBstore [off1+off2] {sym} ptr val mem)
  6206  	for {
  6207  		off1 := v.AuxInt
  6208  		sym := v.Aux
  6209  		_ = v.Args[2]
  6210  		v_0 := v.Args[0]
  6211  		if v_0.Op != OpAMD64ADDLconst {
  6212  			break
  6213  		}
  6214  		off2 := v_0.AuxInt
  6215  		ptr := v_0.Args[0]
  6216  		val := v.Args[1]
  6217  		mem := v.Args[2]
  6218  		if !(is32Bit(off1 + off2)) {
  6219  			break
  6220  		}
  6221  		v.reset(OpAMD64MOVBstore)
  6222  		v.AuxInt = off1 + off2
  6223  		v.Aux = sym
  6224  		v.AddArg(ptr)
  6225  		v.AddArg(val)
  6226  		v.AddArg(mem)
  6227  		return true
  6228  	}
  6229  	return false
  6230  }
  6231  func rewriteValueAMD64_OpAMD64MOVBstoreconst_0(v *Value) bool {
  6232  	// match: (MOVBstoreconst [sc] {s} (ADDQconst [off] ptr) mem)
  6233  	// cond: ValAndOff(sc).canAdd(off)
  6234  	// result: (MOVBstoreconst [ValAndOff(sc).add(off)] {s} ptr mem)
  6235  	for {
  6236  		sc := v.AuxInt
  6237  		s := v.Aux
  6238  		_ = v.Args[1]
  6239  		v_0 := v.Args[0]
  6240  		if v_0.Op != OpAMD64ADDQconst {
  6241  			break
  6242  		}
  6243  		off := v_0.AuxInt
  6244  		ptr := v_0.Args[0]
  6245  		mem := v.Args[1]
  6246  		if !(ValAndOff(sc).canAdd(off)) {
  6247  			break
  6248  		}
  6249  		v.reset(OpAMD64MOVBstoreconst)
  6250  		v.AuxInt = ValAndOff(sc).add(off)
  6251  		v.Aux = s
  6252  		v.AddArg(ptr)
  6253  		v.AddArg(mem)
  6254  		return true
  6255  	}
  6256  	// match: (MOVBstoreconst [sc] {sym1} (LEAQ [off] {sym2} ptr) mem)
  6257  	// cond: canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off)
  6258  	// result: (MOVBstoreconst [ValAndOff(sc).add(off)] {mergeSym(sym1, sym2)} ptr mem)
  6259  	for {
  6260  		sc := v.AuxInt
  6261  		sym1 := v.Aux
  6262  		_ = v.Args[1]
  6263  		v_0 := v.Args[0]
  6264  		if v_0.Op != OpAMD64LEAQ {
  6265  			break
  6266  		}
  6267  		off := v_0.AuxInt
  6268  		sym2 := v_0.Aux
  6269  		ptr := v_0.Args[0]
  6270  		mem := v.Args[1]
  6271  		if !(canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off)) {
  6272  			break
  6273  		}
  6274  		v.reset(OpAMD64MOVBstoreconst)
  6275  		v.AuxInt = ValAndOff(sc).add(off)
  6276  		v.Aux = mergeSym(sym1, sym2)
  6277  		v.AddArg(ptr)
  6278  		v.AddArg(mem)
  6279  		return true
  6280  	}
  6281  	// match: (MOVBstoreconst [x] {sym1} (LEAQ1 [off] {sym2} ptr idx) mem)
  6282  	// cond: canMergeSym(sym1, sym2)
  6283  	// result: (MOVBstoreconstidx1 [ValAndOff(x).add(off)] {mergeSym(sym1,sym2)} ptr idx mem)
  6284  	for {
  6285  		x := v.AuxInt
  6286  		sym1 := v.Aux
  6287  		_ = v.Args[1]
  6288  		v_0 := v.Args[0]
  6289  		if v_0.Op != OpAMD64LEAQ1 {
  6290  			break
  6291  		}
  6292  		off := v_0.AuxInt
  6293  		sym2 := v_0.Aux
  6294  		_ = v_0.Args[1]
  6295  		ptr := v_0.Args[0]
  6296  		idx := v_0.Args[1]
  6297  		mem := v.Args[1]
  6298  		if !(canMergeSym(sym1, sym2)) {
  6299  			break
  6300  		}
  6301  		v.reset(OpAMD64MOVBstoreconstidx1)
  6302  		v.AuxInt = ValAndOff(x).add(off)
  6303  		v.Aux = mergeSym(sym1, sym2)
  6304  		v.AddArg(ptr)
  6305  		v.AddArg(idx)
  6306  		v.AddArg(mem)
  6307  		return true
  6308  	}
  6309  	// match: (MOVBstoreconst [x] {sym} (ADDQ ptr idx) mem)
  6310  	// cond:
  6311  	// result: (MOVBstoreconstidx1 [x] {sym} ptr idx mem)
  6312  	for {
  6313  		x := v.AuxInt
  6314  		sym := v.Aux
  6315  		_ = v.Args[1]
  6316  		v_0 := v.Args[0]
  6317  		if v_0.Op != OpAMD64ADDQ {
  6318  			break
  6319  		}
  6320  		_ = v_0.Args[1]
  6321  		ptr := v_0.Args[0]
  6322  		idx := v_0.Args[1]
  6323  		mem := v.Args[1]
  6324  		v.reset(OpAMD64MOVBstoreconstidx1)
  6325  		v.AuxInt = x
  6326  		v.Aux = sym
  6327  		v.AddArg(ptr)
  6328  		v.AddArg(idx)
  6329  		v.AddArg(mem)
  6330  		return true
  6331  	}
  6332  	// match: (MOVBstoreconst [c] {s} p x:(MOVBstoreconst [a] {s} p mem))
  6333  	// cond: x.Uses == 1 && ValAndOff(a).Off() + 1 == ValAndOff(c).Off() && clobber(x)
  6334  	// result: (MOVWstoreconst [makeValAndOff(ValAndOff(a).Val()&0xff | ValAndOff(c).Val()<<8, ValAndOff(a).Off())] {s} p mem)
  6335  	for {
  6336  		c := v.AuxInt
  6337  		s := v.Aux
  6338  		_ = v.Args[1]
  6339  		p := v.Args[0]
  6340  		x := v.Args[1]
  6341  		if x.Op != OpAMD64MOVBstoreconst {
  6342  			break
  6343  		}
  6344  		a := x.AuxInt
  6345  		if x.Aux != s {
  6346  			break
  6347  		}
  6348  		_ = x.Args[1]
  6349  		if p != x.Args[0] {
  6350  			break
  6351  		}
  6352  		mem := x.Args[1]
  6353  		if !(x.Uses == 1 && ValAndOff(a).Off()+1 == ValAndOff(c).Off() && clobber(x)) {
  6354  			break
  6355  		}
  6356  		v.reset(OpAMD64MOVWstoreconst)
  6357  		v.AuxInt = makeValAndOff(ValAndOff(a).Val()&0xff|ValAndOff(c).Val()<<8, ValAndOff(a).Off())
  6358  		v.Aux = s
  6359  		v.AddArg(p)
  6360  		v.AddArg(mem)
  6361  		return true
  6362  	}
  6363  	// match: (MOVBstoreconst [sc] {sym1} (LEAL [off] {sym2} ptr) mem)
  6364  	// cond: canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off)
  6365  	// result: (MOVBstoreconst [ValAndOff(sc).add(off)] {mergeSym(sym1, sym2)} ptr mem)
  6366  	for {
  6367  		sc := v.AuxInt
  6368  		sym1 := v.Aux
  6369  		_ = v.Args[1]
  6370  		v_0 := v.Args[0]
  6371  		if v_0.Op != OpAMD64LEAL {
  6372  			break
  6373  		}
  6374  		off := v_0.AuxInt
  6375  		sym2 := v_0.Aux
  6376  		ptr := v_0.Args[0]
  6377  		mem := v.Args[1]
  6378  		if !(canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off)) {
  6379  			break
  6380  		}
  6381  		v.reset(OpAMD64MOVBstoreconst)
  6382  		v.AuxInt = ValAndOff(sc).add(off)
  6383  		v.Aux = mergeSym(sym1, sym2)
  6384  		v.AddArg(ptr)
  6385  		v.AddArg(mem)
  6386  		return true
  6387  	}
  6388  	// match: (MOVBstoreconst [sc] {s} (ADDLconst [off] ptr) mem)
  6389  	// cond: ValAndOff(sc).canAdd(off)
  6390  	// result: (MOVBstoreconst [ValAndOff(sc).add(off)] {s} ptr mem)
  6391  	for {
  6392  		sc := v.AuxInt
  6393  		s := v.Aux
  6394  		_ = v.Args[1]
  6395  		v_0 := v.Args[0]
  6396  		if v_0.Op != OpAMD64ADDLconst {
  6397  			break
  6398  		}
  6399  		off := v_0.AuxInt
  6400  		ptr := v_0.Args[0]
  6401  		mem := v.Args[1]
  6402  		if !(ValAndOff(sc).canAdd(off)) {
  6403  			break
  6404  		}
  6405  		v.reset(OpAMD64MOVBstoreconst)
  6406  		v.AuxInt = ValAndOff(sc).add(off)
  6407  		v.Aux = s
  6408  		v.AddArg(ptr)
  6409  		v.AddArg(mem)
  6410  		return true
  6411  	}
  6412  	return false
  6413  }
  6414  func rewriteValueAMD64_OpAMD64MOVBstoreconstidx1_0(v *Value) bool {
  6415  	// match: (MOVBstoreconstidx1 [x] {sym} (ADDQconst [c] ptr) idx mem)
  6416  	// cond: ValAndOff(x).canAdd(c)
  6417  	// result: (MOVBstoreconstidx1 [ValAndOff(x).add(c)] {sym} ptr idx mem)
  6418  	for {
  6419  		x := v.AuxInt
  6420  		sym := v.Aux
  6421  		_ = v.Args[2]
  6422  		v_0 := v.Args[0]
  6423  		if v_0.Op != OpAMD64ADDQconst {
  6424  			break
  6425  		}
  6426  		c := v_0.AuxInt
  6427  		ptr := v_0.Args[0]
  6428  		idx := v.Args[1]
  6429  		mem := v.Args[2]
  6430  		if !(ValAndOff(x).canAdd(c)) {
  6431  			break
  6432  		}
  6433  		v.reset(OpAMD64MOVBstoreconstidx1)
  6434  		v.AuxInt = ValAndOff(x).add(c)
  6435  		v.Aux = sym
  6436  		v.AddArg(ptr)
  6437  		v.AddArg(idx)
  6438  		v.AddArg(mem)
  6439  		return true
  6440  	}
  6441  	// match: (MOVBstoreconstidx1 [x] {sym} ptr (ADDQconst [c] idx) mem)
  6442  	// cond: ValAndOff(x).canAdd(c)
  6443  	// result: (MOVBstoreconstidx1 [ValAndOff(x).add(c)] {sym} ptr idx mem)
  6444  	for {
  6445  		x := v.AuxInt
  6446  		sym := v.Aux
  6447  		_ = v.Args[2]
  6448  		ptr := v.Args[0]
  6449  		v_1 := v.Args[1]
  6450  		if v_1.Op != OpAMD64ADDQconst {
  6451  			break
  6452  		}
  6453  		c := v_1.AuxInt
  6454  		idx := v_1.Args[0]
  6455  		mem := v.Args[2]
  6456  		if !(ValAndOff(x).canAdd(c)) {
  6457  			break
  6458  		}
  6459  		v.reset(OpAMD64MOVBstoreconstidx1)
  6460  		v.AuxInt = ValAndOff(x).add(c)
  6461  		v.Aux = sym
  6462  		v.AddArg(ptr)
  6463  		v.AddArg(idx)
  6464  		v.AddArg(mem)
  6465  		return true
  6466  	}
  6467  	// match: (MOVBstoreconstidx1 [c] {s} p i x:(MOVBstoreconstidx1 [a] {s} p i mem))
  6468  	// cond: x.Uses == 1 && ValAndOff(a).Off() + 1 == ValAndOff(c).Off() && clobber(x)
  6469  	// result: (MOVWstoreconstidx1 [makeValAndOff(ValAndOff(a).Val()&0xff | ValAndOff(c).Val()<<8, ValAndOff(a).Off())] {s} p i mem)
  6470  	for {
  6471  		c := v.AuxInt
  6472  		s := v.Aux
  6473  		_ = v.Args[2]
  6474  		p := v.Args[0]
  6475  		i := v.Args[1]
  6476  		x := v.Args[2]
  6477  		if x.Op != OpAMD64MOVBstoreconstidx1 {
  6478  			break
  6479  		}
  6480  		a := x.AuxInt
  6481  		if x.Aux != s {
  6482  			break
  6483  		}
  6484  		_ = x.Args[2]
  6485  		if p != x.Args[0] {
  6486  			break
  6487  		}
  6488  		if i != x.Args[1] {
  6489  			break
  6490  		}
  6491  		mem := x.Args[2]
  6492  		if !(x.Uses == 1 && ValAndOff(a).Off()+1 == ValAndOff(c).Off() && clobber(x)) {
  6493  			break
  6494  		}
  6495  		v.reset(OpAMD64MOVWstoreconstidx1)
  6496  		v.AuxInt = makeValAndOff(ValAndOff(a).Val()&0xff|ValAndOff(c).Val()<<8, ValAndOff(a).Off())
  6497  		v.Aux = s
  6498  		v.AddArg(p)
  6499  		v.AddArg(i)
  6500  		v.AddArg(mem)
  6501  		return true
  6502  	}
  6503  	return false
  6504  }
  6505  func rewriteValueAMD64_OpAMD64MOVBstoreidx1_0(v *Value) bool {
  6506  	b := v.Block
  6507  	_ = b
  6508  	// match: (MOVBstoreidx1 [c] {sym} (ADDQconst [d] ptr) idx val mem)
  6509  	// cond: is32Bit(c+d)
  6510  	// result: (MOVBstoreidx1 [c+d] {sym} ptr idx val mem)
  6511  	for {
  6512  		c := v.AuxInt
  6513  		sym := v.Aux
  6514  		_ = v.Args[3]
  6515  		v_0 := v.Args[0]
  6516  		if v_0.Op != OpAMD64ADDQconst {
  6517  			break
  6518  		}
  6519  		d := v_0.AuxInt
  6520  		ptr := v_0.Args[0]
  6521  		idx := v.Args[1]
  6522  		val := v.Args[2]
  6523  		mem := v.Args[3]
  6524  		if !(is32Bit(c + d)) {
  6525  			break
  6526  		}
  6527  		v.reset(OpAMD64MOVBstoreidx1)
  6528  		v.AuxInt = c + d
  6529  		v.Aux = sym
  6530  		v.AddArg(ptr)
  6531  		v.AddArg(idx)
  6532  		v.AddArg(val)
  6533  		v.AddArg(mem)
  6534  		return true
  6535  	}
  6536  	// match: (MOVBstoreidx1 [c] {sym} ptr (ADDQconst [d] idx) val mem)
  6537  	// cond: is32Bit(c+d)
  6538  	// result: (MOVBstoreidx1 [c+d] {sym} ptr idx val mem)
  6539  	for {
  6540  		c := v.AuxInt
  6541  		sym := v.Aux
  6542  		_ = v.Args[3]
  6543  		ptr := v.Args[0]
  6544  		v_1 := v.Args[1]
  6545  		if v_1.Op != OpAMD64ADDQconst {
  6546  			break
  6547  		}
  6548  		d := v_1.AuxInt
  6549  		idx := v_1.Args[0]
  6550  		val := v.Args[2]
  6551  		mem := v.Args[3]
  6552  		if !(is32Bit(c + d)) {
  6553  			break
  6554  		}
  6555  		v.reset(OpAMD64MOVBstoreidx1)
  6556  		v.AuxInt = c + d
  6557  		v.Aux = sym
  6558  		v.AddArg(ptr)
  6559  		v.AddArg(idx)
  6560  		v.AddArg(val)
  6561  		v.AddArg(mem)
  6562  		return true
  6563  	}
  6564  	// match: (MOVBstoreidx1 [i] {s} p idx w x0:(MOVBstoreidx1 [i-1] {s} p idx (SHRWconst [8] w) mem))
  6565  	// cond: x0.Uses == 1 && clobber(x0)
  6566  	// result: (MOVWstoreidx1 [i-1] {s} p idx (ROLWconst <w.Type> [8] w) mem)
  6567  	for {
  6568  		i := v.AuxInt
  6569  		s := v.Aux
  6570  		_ = v.Args[3]
  6571  		p := v.Args[0]
  6572  		idx := v.Args[1]
  6573  		w := v.Args[2]
  6574  		x0 := v.Args[3]
  6575  		if x0.Op != OpAMD64MOVBstoreidx1 {
  6576  			break
  6577  		}
  6578  		if x0.AuxInt != i-1 {
  6579  			break
  6580  		}
  6581  		if x0.Aux != s {
  6582  			break
  6583  		}
  6584  		_ = x0.Args[3]
  6585  		if p != x0.Args[0] {
  6586  			break
  6587  		}
  6588  		if idx != x0.Args[1] {
  6589  			break
  6590  		}
  6591  		x0_2 := x0.Args[2]
  6592  		if x0_2.Op != OpAMD64SHRWconst {
  6593  			break
  6594  		}
  6595  		if x0_2.AuxInt != 8 {
  6596  			break
  6597  		}
  6598  		if w != x0_2.Args[0] {
  6599  			break
  6600  		}
  6601  		mem := x0.Args[3]
  6602  		if !(x0.Uses == 1 && clobber(x0)) {
  6603  			break
  6604  		}
  6605  		v.reset(OpAMD64MOVWstoreidx1)
  6606  		v.AuxInt = i - 1
  6607  		v.Aux = s
  6608  		v.AddArg(p)
  6609  		v.AddArg(idx)
  6610  		v0 := b.NewValue0(v.Pos, OpAMD64ROLWconst, w.Type)
  6611  		v0.AuxInt = 8
  6612  		v0.AddArg(w)
  6613  		v.AddArg(v0)
  6614  		v.AddArg(mem)
  6615  		return true
  6616  	}
  6617  	// match: (MOVBstoreidx1 [i] {s} p idx w x2:(MOVBstoreidx1 [i-1] {s} p idx (SHRLconst [8] w) x1:(MOVBstoreidx1 [i-2] {s} p idx (SHRLconst [16] w) x0:(MOVBstoreidx1 [i-3] {s} p idx (SHRLconst [24] w) mem))))
  6618  	// cond: x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2)
  6619  	// result: (MOVLstoreidx1 [i-3] {s} p idx (BSWAPL <w.Type> w) mem)
  6620  	for {
  6621  		i := v.AuxInt
  6622  		s := v.Aux
  6623  		_ = v.Args[3]
  6624  		p := v.Args[0]
  6625  		idx := v.Args[1]
  6626  		w := v.Args[2]
  6627  		x2 := v.Args[3]
  6628  		if x2.Op != OpAMD64MOVBstoreidx1 {
  6629  			break
  6630  		}
  6631  		if x2.AuxInt != i-1 {
  6632  			break
  6633  		}
  6634  		if x2.Aux != s {
  6635  			break
  6636  		}
  6637  		_ = x2.Args[3]
  6638  		if p != x2.Args[0] {
  6639  			break
  6640  		}
  6641  		if idx != x2.Args[1] {
  6642  			break
  6643  		}
  6644  		x2_2 := x2.Args[2]
  6645  		if x2_2.Op != OpAMD64SHRLconst {
  6646  			break
  6647  		}
  6648  		if x2_2.AuxInt != 8 {
  6649  			break
  6650  		}
  6651  		if w != x2_2.Args[0] {
  6652  			break
  6653  		}
  6654  		x1 := x2.Args[3]
  6655  		if x1.Op != OpAMD64MOVBstoreidx1 {
  6656  			break
  6657  		}
  6658  		if x1.AuxInt != i-2 {
  6659  			break
  6660  		}
  6661  		if x1.Aux != s {
  6662  			break
  6663  		}
  6664  		_ = x1.Args[3]
  6665  		if p != x1.Args[0] {
  6666  			break
  6667  		}
  6668  		if idx != x1.Args[1] {
  6669  			break
  6670  		}
  6671  		x1_2 := x1.Args[2]
  6672  		if x1_2.Op != OpAMD64SHRLconst {
  6673  			break
  6674  		}
  6675  		if x1_2.AuxInt != 16 {
  6676  			break
  6677  		}
  6678  		if w != x1_2.Args[0] {
  6679  			break
  6680  		}
  6681  		x0 := x1.Args[3]
  6682  		if x0.Op != OpAMD64MOVBstoreidx1 {
  6683  			break
  6684  		}
  6685  		if x0.AuxInt != i-3 {
  6686  			break
  6687  		}
  6688  		if x0.Aux != s {
  6689  			break
  6690  		}
  6691  		_ = x0.Args[3]
  6692  		if p != x0.Args[0] {
  6693  			break
  6694  		}
  6695  		if idx != x0.Args[1] {
  6696  			break
  6697  		}
  6698  		x0_2 := x0.Args[2]
  6699  		if x0_2.Op != OpAMD64SHRLconst {
  6700  			break
  6701  		}
  6702  		if x0_2.AuxInt != 24 {
  6703  			break
  6704  		}
  6705  		if w != x0_2.Args[0] {
  6706  			break
  6707  		}
  6708  		mem := x0.Args[3]
  6709  		if !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2)) {
  6710  			break
  6711  		}
  6712  		v.reset(OpAMD64MOVLstoreidx1)
  6713  		v.AuxInt = i - 3
  6714  		v.Aux = s
  6715  		v.AddArg(p)
  6716  		v.AddArg(idx)
  6717  		v0 := b.NewValue0(v.Pos, OpAMD64BSWAPL, w.Type)
  6718  		v0.AddArg(w)
  6719  		v.AddArg(v0)
  6720  		v.AddArg(mem)
  6721  		return true
  6722  	}
  6723  	// match: (MOVBstoreidx1 [i] {s} p idx w x6:(MOVBstoreidx1 [i-1] {s} p idx (SHRQconst [8] w) x5:(MOVBstoreidx1 [i-2] {s} p idx (SHRQconst [16] w) x4:(MOVBstoreidx1 [i-3] {s} p idx (SHRQconst [24] w) x3:(MOVBstoreidx1 [i-4] {s} p idx (SHRQconst [32] w) x2:(MOVBstoreidx1 [i-5] {s} p idx (SHRQconst [40] w) x1:(MOVBstoreidx1 [i-6] {s} p idx (SHRQconst [48] w) x0:(MOVBstoreidx1 [i-7] {s} p idx (SHRQconst [56] w) mem))))))))
  6724  	// cond: x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6)
  6725  	// result: (MOVQstoreidx1 [i-7] {s} p idx (BSWAPQ <w.Type> w) mem)
  6726  	for {
  6727  		i := v.AuxInt
  6728  		s := v.Aux
  6729  		_ = v.Args[3]
  6730  		p := v.Args[0]
  6731  		idx := v.Args[1]
  6732  		w := v.Args[2]
  6733  		x6 := v.Args[3]
  6734  		if x6.Op != OpAMD64MOVBstoreidx1 {
  6735  			break
  6736  		}
  6737  		if x6.AuxInt != i-1 {
  6738  			break
  6739  		}
  6740  		if x6.Aux != s {
  6741  			break
  6742  		}
  6743  		_ = x6.Args[3]
  6744  		if p != x6.Args[0] {
  6745  			break
  6746  		}
  6747  		if idx != x6.Args[1] {
  6748  			break
  6749  		}
  6750  		x6_2 := x6.Args[2]
  6751  		if x6_2.Op != OpAMD64SHRQconst {
  6752  			break
  6753  		}
  6754  		if x6_2.AuxInt != 8 {
  6755  			break
  6756  		}
  6757  		if w != x6_2.Args[0] {
  6758  			break
  6759  		}
  6760  		x5 := x6.Args[3]
  6761  		if x5.Op != OpAMD64MOVBstoreidx1 {
  6762  			break
  6763  		}
  6764  		if x5.AuxInt != i-2 {
  6765  			break
  6766  		}
  6767  		if x5.Aux != s {
  6768  			break
  6769  		}
  6770  		_ = x5.Args[3]
  6771  		if p != x5.Args[0] {
  6772  			break
  6773  		}
  6774  		if idx != x5.Args[1] {
  6775  			break
  6776  		}
  6777  		x5_2 := x5.Args[2]
  6778  		if x5_2.Op != OpAMD64SHRQconst {
  6779  			break
  6780  		}
  6781  		if x5_2.AuxInt != 16 {
  6782  			break
  6783  		}
  6784  		if w != x5_2.Args[0] {
  6785  			break
  6786  		}
  6787  		x4 := x5.Args[3]
  6788  		if x4.Op != OpAMD64MOVBstoreidx1 {
  6789  			break
  6790  		}
  6791  		if x4.AuxInt != i-3 {
  6792  			break
  6793  		}
  6794  		if x4.Aux != s {
  6795  			break
  6796  		}
  6797  		_ = x4.Args[3]
  6798  		if p != x4.Args[0] {
  6799  			break
  6800  		}
  6801  		if idx != x4.Args[1] {
  6802  			break
  6803  		}
  6804  		x4_2 := x4.Args[2]
  6805  		if x4_2.Op != OpAMD64SHRQconst {
  6806  			break
  6807  		}
  6808  		if x4_2.AuxInt != 24 {
  6809  			break
  6810  		}
  6811  		if w != x4_2.Args[0] {
  6812  			break
  6813  		}
  6814  		x3 := x4.Args[3]
  6815  		if x3.Op != OpAMD64MOVBstoreidx1 {
  6816  			break
  6817  		}
  6818  		if x3.AuxInt != i-4 {
  6819  			break
  6820  		}
  6821  		if x3.Aux != s {
  6822  			break
  6823  		}
  6824  		_ = x3.Args[3]
  6825  		if p != x3.Args[0] {
  6826  			break
  6827  		}
  6828  		if idx != x3.Args[1] {
  6829  			break
  6830  		}
  6831  		x3_2 := x3.Args[2]
  6832  		if x3_2.Op != OpAMD64SHRQconst {
  6833  			break
  6834  		}
  6835  		if x3_2.AuxInt != 32 {
  6836  			break
  6837  		}
  6838  		if w != x3_2.Args[0] {
  6839  			break
  6840  		}
  6841  		x2 := x3.Args[3]
  6842  		if x2.Op != OpAMD64MOVBstoreidx1 {
  6843  			break
  6844  		}
  6845  		if x2.AuxInt != i-5 {
  6846  			break
  6847  		}
  6848  		if x2.Aux != s {
  6849  			break
  6850  		}
  6851  		_ = x2.Args[3]
  6852  		if p != x2.Args[0] {
  6853  			break
  6854  		}
  6855  		if idx != x2.Args[1] {
  6856  			break
  6857  		}
  6858  		x2_2 := x2.Args[2]
  6859  		if x2_2.Op != OpAMD64SHRQconst {
  6860  			break
  6861  		}
  6862  		if x2_2.AuxInt != 40 {
  6863  			break
  6864  		}
  6865  		if w != x2_2.Args[0] {
  6866  			break
  6867  		}
  6868  		x1 := x2.Args[3]
  6869  		if x1.Op != OpAMD64MOVBstoreidx1 {
  6870  			break
  6871  		}
  6872  		if x1.AuxInt != i-6 {
  6873  			break
  6874  		}
  6875  		if x1.Aux != s {
  6876  			break
  6877  		}
  6878  		_ = x1.Args[3]
  6879  		if p != x1.Args[0] {
  6880  			break
  6881  		}
  6882  		if idx != x1.Args[1] {
  6883  			break
  6884  		}
  6885  		x1_2 := x1.Args[2]
  6886  		if x1_2.Op != OpAMD64SHRQconst {
  6887  			break
  6888  		}
  6889  		if x1_2.AuxInt != 48 {
  6890  			break
  6891  		}
  6892  		if w != x1_2.Args[0] {
  6893  			break
  6894  		}
  6895  		x0 := x1.Args[3]
  6896  		if x0.Op != OpAMD64MOVBstoreidx1 {
  6897  			break
  6898  		}
  6899  		if x0.AuxInt != i-7 {
  6900  			break
  6901  		}
  6902  		if x0.Aux != s {
  6903  			break
  6904  		}
  6905  		_ = x0.Args[3]
  6906  		if p != x0.Args[0] {
  6907  			break
  6908  		}
  6909  		if idx != x0.Args[1] {
  6910  			break
  6911  		}
  6912  		x0_2 := x0.Args[2]
  6913  		if x0_2.Op != OpAMD64SHRQconst {
  6914  			break
  6915  		}
  6916  		if x0_2.AuxInt != 56 {
  6917  			break
  6918  		}
  6919  		if w != x0_2.Args[0] {
  6920  			break
  6921  		}
  6922  		mem := x0.Args[3]
  6923  		if !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6)) {
  6924  			break
  6925  		}
  6926  		v.reset(OpAMD64MOVQstoreidx1)
  6927  		v.AuxInt = i - 7
  6928  		v.Aux = s
  6929  		v.AddArg(p)
  6930  		v.AddArg(idx)
  6931  		v0 := b.NewValue0(v.Pos, OpAMD64BSWAPQ, w.Type)
  6932  		v0.AddArg(w)
  6933  		v.AddArg(v0)
  6934  		v.AddArg(mem)
  6935  		return true
  6936  	}
  6937  	// match: (MOVBstoreidx1 [i] {s} p idx (SHRQconst [8] w) x:(MOVBstoreidx1 [i-1] {s} p idx w mem))
  6938  	// cond: x.Uses == 1 && clobber(x)
  6939  	// result: (MOVWstoreidx1 [i-1] {s} p idx w mem)
  6940  	for {
  6941  		i := v.AuxInt
  6942  		s := v.Aux
  6943  		_ = v.Args[3]
  6944  		p := v.Args[0]
  6945  		idx := v.Args[1]
  6946  		v_2 := v.Args[2]
  6947  		if v_2.Op != OpAMD64SHRQconst {
  6948  			break
  6949  		}
  6950  		if v_2.AuxInt != 8 {
  6951  			break
  6952  		}
  6953  		w := v_2.Args[0]
  6954  		x := v.Args[3]
  6955  		if x.Op != OpAMD64MOVBstoreidx1 {
  6956  			break
  6957  		}
  6958  		if x.AuxInt != i-1 {
  6959  			break
  6960  		}
  6961  		if x.Aux != s {
  6962  			break
  6963  		}
  6964  		_ = x.Args[3]
  6965  		if p != x.Args[0] {
  6966  			break
  6967  		}
  6968  		if idx != x.Args[1] {
  6969  			break
  6970  		}
  6971  		if w != x.Args[2] {
  6972  			break
  6973  		}
  6974  		mem := x.Args[3]
  6975  		if !(x.Uses == 1 && clobber(x)) {
  6976  			break
  6977  		}
  6978  		v.reset(OpAMD64MOVWstoreidx1)
  6979  		v.AuxInt = i - 1
  6980  		v.Aux = s
  6981  		v.AddArg(p)
  6982  		v.AddArg(idx)
  6983  		v.AddArg(w)
  6984  		v.AddArg(mem)
  6985  		return true
  6986  	}
  6987  	// match: (MOVBstoreidx1 [i] {s} p idx (SHRQconst [j] w) x:(MOVBstoreidx1 [i-1] {s} p idx w0:(SHRQconst [j-8] w) mem))
  6988  	// cond: x.Uses == 1 && clobber(x)
  6989  	// result: (MOVWstoreidx1 [i-1] {s} p idx w0 mem)
  6990  	for {
  6991  		i := v.AuxInt
  6992  		s := v.Aux
  6993  		_ = v.Args[3]
  6994  		p := v.Args[0]
  6995  		idx := v.Args[1]
  6996  		v_2 := v.Args[2]
  6997  		if v_2.Op != OpAMD64SHRQconst {
  6998  			break
  6999  		}
  7000  		j := v_2.AuxInt
  7001  		w := v_2.Args[0]
  7002  		x := v.Args[3]
  7003  		if x.Op != OpAMD64MOVBstoreidx1 {
  7004  			break
  7005  		}
  7006  		if x.AuxInt != i-1 {
  7007  			break
  7008  		}
  7009  		if x.Aux != s {
  7010  			break
  7011  		}
  7012  		_ = x.Args[3]
  7013  		if p != x.Args[0] {
  7014  			break
  7015  		}
  7016  		if idx != x.Args[1] {
  7017  			break
  7018  		}
  7019  		w0 := x.Args[2]
  7020  		if w0.Op != OpAMD64SHRQconst {
  7021  			break
  7022  		}
  7023  		if w0.AuxInt != j-8 {
  7024  			break
  7025  		}
  7026  		if w != w0.Args[0] {
  7027  			break
  7028  		}
  7029  		mem := x.Args[3]
  7030  		if !(x.Uses == 1 && clobber(x)) {
  7031  			break
  7032  		}
  7033  		v.reset(OpAMD64MOVWstoreidx1)
  7034  		v.AuxInt = i - 1
  7035  		v.Aux = s
  7036  		v.AddArg(p)
  7037  		v.AddArg(idx)
  7038  		v.AddArg(w0)
  7039  		v.AddArg(mem)
  7040  		return true
  7041  	}
  7042  	return false
  7043  }
  7044  func rewriteValueAMD64_OpAMD64MOVLQSX_0(v *Value) bool {
  7045  	b := v.Block
  7046  	_ = b
  7047  	// match: (MOVLQSX x:(MOVLload [off] {sym} ptr mem))
  7048  	// cond: x.Uses == 1 && clobber(x)
  7049  	// result: @x.Block (MOVLQSXload <v.Type> [off] {sym} ptr mem)
  7050  	for {
  7051  		x := v.Args[0]
  7052  		if x.Op != OpAMD64MOVLload {
  7053  			break
  7054  		}
  7055  		off := x.AuxInt
  7056  		sym := x.Aux
  7057  		_ = x.Args[1]
  7058  		ptr := x.Args[0]
  7059  		mem := x.Args[1]
  7060  		if !(x.Uses == 1 && clobber(x)) {
  7061  			break
  7062  		}
  7063  		b = x.Block
  7064  		v0 := b.NewValue0(v.Pos, OpAMD64MOVLQSXload, v.Type)
  7065  		v.reset(OpCopy)
  7066  		v.AddArg(v0)
  7067  		v0.AuxInt = off
  7068  		v0.Aux = sym
  7069  		v0.AddArg(ptr)
  7070  		v0.AddArg(mem)
  7071  		return true
  7072  	}
  7073  	// match: (MOVLQSX x:(MOVQload [off] {sym} ptr mem))
  7074  	// cond: x.Uses == 1 && clobber(x)
  7075  	// result: @x.Block (MOVLQSXload <v.Type> [off] {sym} ptr mem)
  7076  	for {
  7077  		x := v.Args[0]
  7078  		if x.Op != OpAMD64MOVQload {
  7079  			break
  7080  		}
  7081  		off := x.AuxInt
  7082  		sym := x.Aux
  7083  		_ = x.Args[1]
  7084  		ptr := x.Args[0]
  7085  		mem := x.Args[1]
  7086  		if !(x.Uses == 1 && clobber(x)) {
  7087  			break
  7088  		}
  7089  		b = x.Block
  7090  		v0 := b.NewValue0(v.Pos, OpAMD64MOVLQSXload, v.Type)
  7091  		v.reset(OpCopy)
  7092  		v.AddArg(v0)
  7093  		v0.AuxInt = off
  7094  		v0.Aux = sym
  7095  		v0.AddArg(ptr)
  7096  		v0.AddArg(mem)
  7097  		return true
  7098  	}
  7099  	// match: (MOVLQSX (ANDLconst [c] x))
  7100  	// cond: c & 0x80000000 == 0
  7101  	// result: (ANDLconst [c & 0x7fffffff] x)
  7102  	for {
  7103  		v_0 := v.Args[0]
  7104  		if v_0.Op != OpAMD64ANDLconst {
  7105  			break
  7106  		}
  7107  		c := v_0.AuxInt
  7108  		x := v_0.Args[0]
  7109  		if !(c&0x80000000 == 0) {
  7110  			break
  7111  		}
  7112  		v.reset(OpAMD64ANDLconst)
  7113  		v.AuxInt = c & 0x7fffffff
  7114  		v.AddArg(x)
  7115  		return true
  7116  	}
  7117  	// match: (MOVLQSX (MOVLQSX x))
  7118  	// cond:
  7119  	// result: (MOVLQSX x)
  7120  	for {
  7121  		v_0 := v.Args[0]
  7122  		if v_0.Op != OpAMD64MOVLQSX {
  7123  			break
  7124  		}
  7125  		x := v_0.Args[0]
  7126  		v.reset(OpAMD64MOVLQSX)
  7127  		v.AddArg(x)
  7128  		return true
  7129  	}
  7130  	// match: (MOVLQSX (MOVWQSX x))
  7131  	// cond:
  7132  	// result: (MOVWQSX x)
  7133  	for {
  7134  		v_0 := v.Args[0]
  7135  		if v_0.Op != OpAMD64MOVWQSX {
  7136  			break
  7137  		}
  7138  		x := v_0.Args[0]
  7139  		v.reset(OpAMD64MOVWQSX)
  7140  		v.AddArg(x)
  7141  		return true
  7142  	}
  7143  	// match: (MOVLQSX (MOVBQSX x))
  7144  	// cond:
  7145  	// result: (MOVBQSX x)
  7146  	for {
  7147  		v_0 := v.Args[0]
  7148  		if v_0.Op != OpAMD64MOVBQSX {
  7149  			break
  7150  		}
  7151  		x := v_0.Args[0]
  7152  		v.reset(OpAMD64MOVBQSX)
  7153  		v.AddArg(x)
  7154  		return true
  7155  	}
  7156  	return false
  7157  }
  7158  func rewriteValueAMD64_OpAMD64MOVLQSXload_0(v *Value) bool {
  7159  	// match: (MOVLQSXload [off] {sym} ptr (MOVLstore [off2] {sym2} ptr2 x _))
  7160  	// cond: sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)
  7161  	// result: (MOVLQSX x)
  7162  	for {
  7163  		off := v.AuxInt
  7164  		sym := v.Aux
  7165  		_ = v.Args[1]
  7166  		ptr := v.Args[0]
  7167  		v_1 := v.Args[1]
  7168  		if v_1.Op != OpAMD64MOVLstore {
  7169  			break
  7170  		}
  7171  		off2 := v_1.AuxInt
  7172  		sym2 := v_1.Aux
  7173  		_ = v_1.Args[2]
  7174  		ptr2 := v_1.Args[0]
  7175  		x := v_1.Args[1]
  7176  		if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) {
  7177  			break
  7178  		}
  7179  		v.reset(OpAMD64MOVLQSX)
  7180  		v.AddArg(x)
  7181  		return true
  7182  	}
  7183  	// match: (MOVLQSXload [off1] {sym1} (LEAQ [off2] {sym2} base) mem)
  7184  	// cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
  7185  	// result: (MOVLQSXload [off1+off2] {mergeSym(sym1,sym2)} base mem)
  7186  	for {
  7187  		off1 := v.AuxInt
  7188  		sym1 := v.Aux
  7189  		_ = v.Args[1]
  7190  		v_0 := v.Args[0]
  7191  		if v_0.Op != OpAMD64LEAQ {
  7192  			break
  7193  		}
  7194  		off2 := v_0.AuxInt
  7195  		sym2 := v_0.Aux
  7196  		base := v_0.Args[0]
  7197  		mem := v.Args[1]
  7198  		if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
  7199  			break
  7200  		}
  7201  		v.reset(OpAMD64MOVLQSXload)
  7202  		v.AuxInt = off1 + off2
  7203  		v.Aux = mergeSym(sym1, sym2)
  7204  		v.AddArg(base)
  7205  		v.AddArg(mem)
  7206  		return true
  7207  	}
  7208  	return false
  7209  }
  7210  func rewriteValueAMD64_OpAMD64MOVLQZX_0(v *Value) bool {
  7211  	b := v.Block
  7212  	_ = b
  7213  	// match: (MOVLQZX x:(MOVLload [off] {sym} ptr mem))
  7214  	// cond: x.Uses == 1 && clobber(x)
  7215  	// result: @x.Block (MOVLload <v.Type> [off] {sym} ptr mem)
  7216  	for {
  7217  		x := v.Args[0]
  7218  		if x.Op != OpAMD64MOVLload {
  7219  			break
  7220  		}
  7221  		off := x.AuxInt
  7222  		sym := x.Aux
  7223  		_ = x.Args[1]
  7224  		ptr := x.Args[0]
  7225  		mem := x.Args[1]
  7226  		if !(x.Uses == 1 && clobber(x)) {
  7227  			break
  7228  		}
  7229  		b = x.Block
  7230  		v0 := b.NewValue0(v.Pos, OpAMD64MOVLload, v.Type)
  7231  		v.reset(OpCopy)
  7232  		v.AddArg(v0)
  7233  		v0.AuxInt = off
  7234  		v0.Aux = sym
  7235  		v0.AddArg(ptr)
  7236  		v0.AddArg(mem)
  7237  		return true
  7238  	}
  7239  	// match: (MOVLQZX x:(MOVQload [off] {sym} ptr mem))
  7240  	// cond: x.Uses == 1 && clobber(x)
  7241  	// result: @x.Block (MOVLload <v.Type> [off] {sym} ptr mem)
  7242  	for {
  7243  		x := v.Args[0]
  7244  		if x.Op != OpAMD64MOVQload {
  7245  			break
  7246  		}
  7247  		off := x.AuxInt
  7248  		sym := x.Aux
  7249  		_ = x.Args[1]
  7250  		ptr := x.Args[0]
  7251  		mem := x.Args[1]
  7252  		if !(x.Uses == 1 && clobber(x)) {
  7253  			break
  7254  		}
  7255  		b = x.Block
  7256  		v0 := b.NewValue0(v.Pos, OpAMD64MOVLload, v.Type)
  7257  		v.reset(OpCopy)
  7258  		v.AddArg(v0)
  7259  		v0.AuxInt = off
  7260  		v0.Aux = sym
  7261  		v0.AddArg(ptr)
  7262  		v0.AddArg(mem)
  7263  		return true
  7264  	}
  7265  	// match: (MOVLQZX x)
  7266  	// cond: zeroUpper32Bits(x,3)
  7267  	// result: x
  7268  	for {
  7269  		x := v.Args[0]
  7270  		if !(zeroUpper32Bits(x, 3)) {
  7271  			break
  7272  		}
  7273  		v.reset(OpCopy)
  7274  		v.Type = x.Type
  7275  		v.AddArg(x)
  7276  		return true
  7277  	}
  7278  	// match: (MOVLQZX x:(MOVLloadidx1 [off] {sym} ptr idx mem))
  7279  	// cond: x.Uses == 1 && clobber(x)
  7280  	// result: @x.Block (MOVLloadidx1 <v.Type> [off] {sym} ptr idx mem)
  7281  	for {
  7282  		x := v.Args[0]
  7283  		if x.Op != OpAMD64MOVLloadidx1 {
  7284  			break
  7285  		}
  7286  		off := x.AuxInt
  7287  		sym := x.Aux
  7288  		_ = x.Args[2]
  7289  		ptr := x.Args[0]
  7290  		idx := x.Args[1]
  7291  		mem := x.Args[2]
  7292  		if !(x.Uses == 1 && clobber(x)) {
  7293  			break
  7294  		}
  7295  		b = x.Block
  7296  		v0 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, v.Type)
  7297  		v.reset(OpCopy)
  7298  		v.AddArg(v0)
  7299  		v0.AuxInt = off
  7300  		v0.Aux = sym
  7301  		v0.AddArg(ptr)
  7302  		v0.AddArg(idx)
  7303  		v0.AddArg(mem)
  7304  		return true
  7305  	}
  7306  	// match: (MOVLQZX x:(MOVLloadidx4 [off] {sym} ptr idx mem))
  7307  	// cond: x.Uses == 1 && clobber(x)
  7308  	// result: @x.Block (MOVLloadidx4 <v.Type> [off] {sym} ptr idx mem)
  7309  	for {
  7310  		x := v.Args[0]
  7311  		if x.Op != OpAMD64MOVLloadidx4 {
  7312  			break
  7313  		}
  7314  		off := x.AuxInt
  7315  		sym := x.Aux
  7316  		_ = x.Args[2]
  7317  		ptr := x.Args[0]
  7318  		idx := x.Args[1]
  7319  		mem := x.Args[2]
  7320  		if !(x.Uses == 1 && clobber(x)) {
  7321  			break
  7322  		}
  7323  		b = x.Block
  7324  		v0 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx4, v.Type)
  7325  		v.reset(OpCopy)
  7326  		v.AddArg(v0)
  7327  		v0.AuxInt = off
  7328  		v0.Aux = sym
  7329  		v0.AddArg(ptr)
  7330  		v0.AddArg(idx)
  7331  		v0.AddArg(mem)
  7332  		return true
  7333  	}
  7334  	// match: (MOVLQZX (ANDLconst [c] x))
  7335  	// cond:
  7336  	// result: (ANDLconst [c] x)
  7337  	for {
  7338  		v_0 := v.Args[0]
  7339  		if v_0.Op != OpAMD64ANDLconst {
  7340  			break
  7341  		}
  7342  		c := v_0.AuxInt
  7343  		x := v_0.Args[0]
  7344  		v.reset(OpAMD64ANDLconst)
  7345  		v.AuxInt = c
  7346  		v.AddArg(x)
  7347  		return true
  7348  	}
  7349  	// match: (MOVLQZX (MOVLQZX x))
  7350  	// cond:
  7351  	// result: (MOVLQZX x)
  7352  	for {
  7353  		v_0 := v.Args[0]
  7354  		if v_0.Op != OpAMD64MOVLQZX {
  7355  			break
  7356  		}
  7357  		x := v_0.Args[0]
  7358  		v.reset(OpAMD64MOVLQZX)
  7359  		v.AddArg(x)
  7360  		return true
  7361  	}
  7362  	// match: (MOVLQZX (MOVWQZX x))
  7363  	// cond:
  7364  	// result: (MOVWQZX x)
  7365  	for {
  7366  		v_0 := v.Args[0]
  7367  		if v_0.Op != OpAMD64MOVWQZX {
  7368  			break
  7369  		}
  7370  		x := v_0.Args[0]
  7371  		v.reset(OpAMD64MOVWQZX)
  7372  		v.AddArg(x)
  7373  		return true
  7374  	}
  7375  	// match: (MOVLQZX (MOVBQZX x))
  7376  	// cond:
  7377  	// result: (MOVBQZX x)
  7378  	for {
  7379  		v_0 := v.Args[0]
  7380  		if v_0.Op != OpAMD64MOVBQZX {
  7381  			break
  7382  		}
  7383  		x := v_0.Args[0]
  7384  		v.reset(OpAMD64MOVBQZX)
  7385  		v.AddArg(x)
  7386  		return true
  7387  	}
  7388  	return false
  7389  }
  7390  func rewriteValueAMD64_OpAMD64MOVLatomicload_0(v *Value) bool {
  7391  	// match: (MOVLatomicload [off1] {sym} (ADDQconst [off2] ptr) mem)
  7392  	// cond: is32Bit(off1+off2)
  7393  	// result: (MOVLatomicload [off1+off2] {sym} ptr mem)
  7394  	for {
  7395  		off1 := v.AuxInt
  7396  		sym := v.Aux
  7397  		_ = v.Args[1]
  7398  		v_0 := v.Args[0]
  7399  		if v_0.Op != OpAMD64ADDQconst {
  7400  			break
  7401  		}
  7402  		off2 := v_0.AuxInt
  7403  		ptr := v_0.Args[0]
  7404  		mem := v.Args[1]
  7405  		if !(is32Bit(off1 + off2)) {
  7406  			break
  7407  		}
  7408  		v.reset(OpAMD64MOVLatomicload)
  7409  		v.AuxInt = off1 + off2
  7410  		v.Aux = sym
  7411  		v.AddArg(ptr)
  7412  		v.AddArg(mem)
  7413  		return true
  7414  	}
  7415  	// match: (MOVLatomicload [off1] {sym1} (LEAQ [off2] {sym2} ptr) mem)
  7416  	// cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
  7417  	// result: (MOVLatomicload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
  7418  	for {
  7419  		off1 := v.AuxInt
  7420  		sym1 := v.Aux
  7421  		_ = v.Args[1]
  7422  		v_0 := v.Args[0]
  7423  		if v_0.Op != OpAMD64LEAQ {
  7424  			break
  7425  		}
  7426  		off2 := v_0.AuxInt
  7427  		sym2 := v_0.Aux
  7428  		ptr := v_0.Args[0]
  7429  		mem := v.Args[1]
  7430  		if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
  7431  			break
  7432  		}
  7433  		v.reset(OpAMD64MOVLatomicload)
  7434  		v.AuxInt = off1 + off2
  7435  		v.Aux = mergeSym(sym1, sym2)
  7436  		v.AddArg(ptr)
  7437  		v.AddArg(mem)
  7438  		return true
  7439  	}
  7440  	return false
  7441  }
  7442  func rewriteValueAMD64_OpAMD64MOVLf2i_0(v *Value) bool {
  7443  	b := v.Block
  7444  	_ = b
  7445  	// match: (MOVLf2i <t> (Arg [off] {sym}))
  7446  	// cond:
  7447  	// result: @b.Func.Entry (Arg <t> [off] {sym})
  7448  	for {
  7449  		t := v.Type
  7450  		v_0 := v.Args[0]
  7451  		if v_0.Op != OpArg {
  7452  			break
  7453  		}
  7454  		off := v_0.AuxInt
  7455  		sym := v_0.Aux
  7456  		b = b.Func.Entry
  7457  		v0 := b.NewValue0(v.Pos, OpArg, t)
  7458  		v.reset(OpCopy)
  7459  		v.AddArg(v0)
  7460  		v0.AuxInt = off
  7461  		v0.Aux = sym
  7462  		return true
  7463  	}
  7464  	return false
  7465  }
  7466  func rewriteValueAMD64_OpAMD64MOVLi2f_0(v *Value) bool {
  7467  	b := v.Block
  7468  	_ = b
  7469  	// match: (MOVLi2f <t> (Arg [off] {sym}))
  7470  	// cond:
  7471  	// result: @b.Func.Entry (Arg <t> [off] {sym})
  7472  	for {
  7473  		t := v.Type
  7474  		v_0 := v.Args[0]
  7475  		if v_0.Op != OpArg {
  7476  			break
  7477  		}
  7478  		off := v_0.AuxInt
  7479  		sym := v_0.Aux
  7480  		b = b.Func.Entry
  7481  		v0 := b.NewValue0(v.Pos, OpArg, t)
  7482  		v.reset(OpCopy)
  7483  		v.AddArg(v0)
  7484  		v0.AuxInt = off
  7485  		v0.Aux = sym
  7486  		return true
  7487  	}
  7488  	return false
  7489  }
  7490  func rewriteValueAMD64_OpAMD64MOVLload_0(v *Value) bool {
  7491  	// match: (MOVLload [off] {sym} ptr (MOVLstore [off2] {sym2} ptr2 x _))
  7492  	// cond: sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)
  7493  	// result: (MOVLQZX x)
  7494  	for {
  7495  		off := v.AuxInt
  7496  		sym := v.Aux
  7497  		_ = v.Args[1]
  7498  		ptr := v.Args[0]
  7499  		v_1 := v.Args[1]
  7500  		if v_1.Op != OpAMD64MOVLstore {
  7501  			break
  7502  		}
  7503  		off2 := v_1.AuxInt
  7504  		sym2 := v_1.Aux
  7505  		_ = v_1.Args[2]
  7506  		ptr2 := v_1.Args[0]
  7507  		x := v_1.Args[1]
  7508  		if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) {
  7509  			break
  7510  		}
  7511  		v.reset(OpAMD64MOVLQZX)
  7512  		v.AddArg(x)
  7513  		return true
  7514  	}
  7515  	// match: (MOVLload [off1] {sym} (ADDQconst [off2] ptr) mem)
  7516  	// cond: is32Bit(off1+off2)
  7517  	// result: (MOVLload [off1+off2] {sym} ptr mem)
  7518  	for {
  7519  		off1 := v.AuxInt
  7520  		sym := v.Aux
  7521  		_ = v.Args[1]
  7522  		v_0 := v.Args[0]
  7523  		if v_0.Op != OpAMD64ADDQconst {
  7524  			break
  7525  		}
  7526  		off2 := v_0.AuxInt
  7527  		ptr := v_0.Args[0]
  7528  		mem := v.Args[1]
  7529  		if !(is32Bit(off1 + off2)) {
  7530  			break
  7531  		}
  7532  		v.reset(OpAMD64MOVLload)
  7533  		v.AuxInt = off1 + off2
  7534  		v.Aux = sym
  7535  		v.AddArg(ptr)
  7536  		v.AddArg(mem)
  7537  		return true
  7538  	}
  7539  	// match: (MOVLload [off1] {sym1} (LEAQ [off2] {sym2} base) mem)
  7540  	// cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
  7541  	// result: (MOVLload [off1+off2] {mergeSym(sym1,sym2)} base mem)
  7542  	for {
  7543  		off1 := v.AuxInt
  7544  		sym1 := v.Aux
  7545  		_ = v.Args[1]
  7546  		v_0 := v.Args[0]
  7547  		if v_0.Op != OpAMD64LEAQ {
  7548  			break
  7549  		}
  7550  		off2 := v_0.AuxInt
  7551  		sym2 := v_0.Aux
  7552  		base := v_0.Args[0]
  7553  		mem := v.Args[1]
  7554  		if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
  7555  			break
  7556  		}
  7557  		v.reset(OpAMD64MOVLload)
  7558  		v.AuxInt = off1 + off2
  7559  		v.Aux = mergeSym(sym1, sym2)
  7560  		v.AddArg(base)
  7561  		v.AddArg(mem)
  7562  		return true
  7563  	}
  7564  	// match: (MOVLload [off1] {sym1} (LEAQ1 [off2] {sym2} ptr idx) mem)
  7565  	// cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
  7566  	// result: (MOVLloadidx1 [off1+off2] {mergeSym(sym1,sym2)} ptr idx mem)
  7567  	for {
  7568  		off1 := v.AuxInt
  7569  		sym1 := v.Aux
  7570  		_ = v.Args[1]
  7571  		v_0 := v.Args[0]
  7572  		if v_0.Op != OpAMD64LEAQ1 {
  7573  			break
  7574  		}
  7575  		off2 := v_0.AuxInt
  7576  		sym2 := v_0.Aux
  7577  		_ = v_0.Args[1]
  7578  		ptr := v_0.Args[0]
  7579  		idx := v_0.Args[1]
  7580  		mem := v.Args[1]
  7581  		if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
  7582  			break
  7583  		}
  7584  		v.reset(OpAMD64MOVLloadidx1)
  7585  		v.AuxInt = off1 + off2
  7586  		v.Aux = mergeSym(sym1, sym2)
  7587  		v.AddArg(ptr)
  7588  		v.AddArg(idx)
  7589  		v.AddArg(mem)
  7590  		return true
  7591  	}
  7592  	// match: (MOVLload [off1] {sym1} (LEAQ4 [off2] {sym2} ptr idx) mem)
  7593  	// cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
  7594  	// result: (MOVLloadidx4 [off1+off2] {mergeSym(sym1,sym2)} ptr idx mem)
  7595  	for {
  7596  		off1 := v.AuxInt
  7597  		sym1 := v.Aux
  7598  		_ = v.Args[1]
  7599  		v_0 := v.Args[0]
  7600  		if v_0.Op != OpAMD64LEAQ4 {
  7601  			break
  7602  		}
  7603  		off2 := v_0.AuxInt
  7604  		sym2 := v_0.Aux
  7605  		_ = v_0.Args[1]
  7606  		ptr := v_0.Args[0]
  7607  		idx := v_0.Args[1]
  7608  		mem := v.Args[1]
  7609  		if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
  7610  			break
  7611  		}
  7612  		v.reset(OpAMD64MOVLloadidx4)
  7613  		v.AuxInt = off1 + off2
  7614  		v.Aux = mergeSym(sym1, sym2)
  7615  		v.AddArg(ptr)
  7616  		v.AddArg(idx)
  7617  		v.AddArg(mem)
  7618  		return true
  7619  	}
  7620  	// match: (MOVLload [off1] {sym1} (LEAQ8 [off2] {sym2} ptr idx) mem)
  7621  	// cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
  7622  	// result: (MOVLloadidx8 [off1+off2] {mergeSym(sym1,sym2)} ptr idx mem)
  7623  	for {
  7624  		off1 := v.AuxInt
  7625  		sym1 := v.Aux
  7626  		_ = v.Args[1]
  7627  		v_0 := v.Args[0]
  7628  		if v_0.Op != OpAMD64LEAQ8 {
  7629  			break
  7630  		}
  7631  		off2 := v_0.AuxInt
  7632  		sym2 := v_0.Aux
  7633  		_ = v_0.Args[1]
  7634  		ptr := v_0.Args[0]
  7635  		idx := v_0.Args[1]
  7636  		mem := v.Args[1]
  7637  		if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
  7638  			break
  7639  		}
  7640  		v.reset(OpAMD64MOVLloadidx8)
  7641  		v.AuxInt = off1 + off2
  7642  		v.Aux = mergeSym(sym1, sym2)
  7643  		v.AddArg(ptr)
  7644  		v.AddArg(idx)
  7645  		v.AddArg(mem)
  7646  		return true
  7647  	}
  7648  	// match: (MOVLload [off] {sym} (ADDQ ptr idx) mem)
  7649  	// cond: ptr.Op != OpSB
  7650  	// result: (MOVLloadidx1 [off] {sym} ptr idx mem)
  7651  	for {
  7652  		off := v.AuxInt
  7653  		sym := v.Aux
  7654  		_ = v.Args[1]
  7655  		v_0 := v.Args[0]
  7656  		if v_0.Op != OpAMD64ADDQ {
  7657  			break
  7658  		}
  7659  		_ = v_0.Args[1]
  7660  		ptr := v_0.Args[0]
  7661  		idx := v_0.Args[1]
  7662  		mem := v.Args[1]
  7663  		if !(ptr.Op != OpSB) {
  7664  			break
  7665  		}
  7666  		v.reset(OpAMD64MOVLloadidx1)
  7667  		v.AuxInt = off
  7668  		v.Aux = sym
  7669  		v.AddArg(ptr)
  7670  		v.AddArg(idx)
  7671  		v.AddArg(mem)
  7672  		return true
  7673  	}
  7674  	// match: (MOVLload [off1] {sym1} (LEAL [off2] {sym2} base) mem)
  7675  	// cond: canMergeSym(sym1, sym2) && is32Bit(off1+off2)
  7676  	// result: (MOVLload [off1+off2] {mergeSym(sym1,sym2)} base mem)
  7677  	for {
  7678  		off1 := v.AuxInt
  7679  		sym1 := v.Aux
  7680  		_ = v.Args[1]
  7681  		v_0 := v.Args[0]
  7682  		if v_0.Op != OpAMD64LEAL {
  7683  			break
  7684  		}
  7685  		off2 := v_0.AuxInt
  7686  		sym2 := v_0.Aux
  7687  		base := v_0.Args[0]
  7688  		mem := v.Args[1]
  7689  		if !(canMergeSym(sym1, sym2) && is32Bit(off1+off2)) {
  7690  			break
  7691  		}
  7692  		v.reset(OpAMD64MOVLload)
  7693  		v.AuxInt = off1 + off2
  7694  		v.Aux = mergeSym(sym1, sym2)
  7695  		v.AddArg(base)
  7696  		v.AddArg(mem)
  7697  		return true
  7698  	}
  7699  	// match: (MOVLload [off1] {sym} (ADDLconst [off2] ptr) mem)
  7700  	// cond: is32Bit(off1+off2)
  7701  	// result: (MOVLload [off1+off2] {sym} ptr mem)
  7702  	for {
  7703  		off1 := v.AuxInt
  7704  		sym := v.Aux
  7705  		_ = v.Args[1]
  7706  		v_0 := v.Args[0]
  7707  		if v_0.Op != OpAMD64ADDLconst {
  7708  			break
  7709  		}
  7710  		off2 := v_0.AuxInt
  7711  		ptr := v_0.Args[0]
  7712  		mem := v.Args[1]
  7713  		if !(is32Bit(off1 + off2)) {
  7714  			break
  7715  		}
  7716  		v.reset(OpAMD64MOVLload)
  7717  		v.AuxInt = off1 + off2
  7718  		v.Aux = sym
  7719  		v.AddArg(ptr)
  7720  		v.AddArg(mem)
  7721  		return true
  7722  	}
  7723  	// match: (MOVLload [off] {sym} ptr (MOVSSstore [off] {sym} ptr val _))
  7724  	// cond:
  7725  	// result: (MOVLf2i val)
  7726  	for {
  7727  		off := v.AuxInt
  7728  		sym := v.Aux
  7729  		_ = v.Args[1]
  7730  		ptr := v.Args[0]
  7731  		v_1 := v.Args[1]
  7732  		if v_1.Op != OpAMD64MOVSSstore {
  7733  			break
  7734  		}
  7735  		if v_1.AuxInt != off {
  7736  			break
  7737  		}
  7738  		if v_1.Aux != sym {
  7739  			break
  7740  		}
  7741  		_ = v_1.Args[2]
  7742  		if ptr != v_1.Args[0] {
  7743  			break
  7744  		}
  7745  		val := v_1.Args[1]
  7746  		v.reset(OpAMD64MOVLf2i)
  7747  		v.AddArg(val)
  7748  		return true
  7749  	}
  7750  	return false
  7751  }
  7752  func rewriteValueAMD64_OpAMD64MOVLloadidx1_0(v *Value) bool {
  7753  	// match: (MOVLloadidx1 [c] {sym} ptr (SHLQconst [2] idx) mem)
  7754  	// cond:
  7755  	// result: (MOVLloadidx4 [c] {sym} ptr idx mem)
  7756  	for {
  7757  		c := v.AuxInt
  7758  		sym := v.Aux
  7759  		_ = v.Args[2]
  7760  		ptr := v.Args[0]
  7761  		v_1 := v.Args[1]
  7762  		if v_1.Op != OpAMD64SHLQconst {
  7763  			break
  7764  		}
  7765  		if v_1.AuxInt != 2 {
  7766  			break
  7767  		}
  7768  		idx := v_1.Args[0]
  7769  		mem := v.Args[2]
  7770  		v.reset(OpAMD64MOVLloadidx4)
  7771  		v.AuxInt = c
  7772  		v.Aux = sym
  7773  		v.AddArg(ptr)
  7774  		v.AddArg(idx)
  7775  		v.AddArg(mem)
  7776  		return true
  7777  	}
  7778  	// match: (MOVLloadidx1 [c] {sym} (SHLQconst [2] idx) ptr mem)
  7779  	// cond:
  7780  	// result: (MOVLloadidx4 [c] {sym} ptr idx mem)
  7781  	for {
  7782  		c := v.AuxInt
  7783  		sym := v.Aux
  7784  		_ = v.Args[2]
  7785  		v_0 := v.Args[0]
  7786  		if v_0.Op != OpAMD64SHLQconst {
  7787  			break
  7788  		}
  7789  		if v_0.AuxInt != 2 {
  7790  			break
  7791  		}
  7792  		idx := v_0.Args[0]
  7793  		ptr := v.Args[1]
  7794  		mem := v.Args[2]
  7795  		v.reset(OpAMD64MOVLloadidx4)
  7796  		v.AuxInt = c
  7797  		v.Aux = sym
  7798  		v.AddArg(ptr)
  7799  		v.AddArg(idx)
  7800  		v.AddArg(mem)
  7801  		return true
  7802  	}
  7803  	// match: (MOVLloadidx1 [c] {sym} ptr (SHLQconst [3] idx) mem)
  7804  	// cond:
  7805  	// result: (MOVLloadidx8 [c] {sym} ptr idx mem)
  7806  	for {
  7807  		c := v.AuxInt
  7808  		sym := v.Aux
  7809  		_ = v.Args[2]
  7810  		ptr := v.Args[0]
  7811  		v_1 := v.Args[1]
  7812  		if v_1.Op != OpAMD64SHLQconst {
  7813  			break
  7814  		}
  7815  		if v_1.AuxInt != 3 {
  7816  			break
  7817  		}
  7818  		idx := v_1.Args[0]
  7819  		mem := v.Args[2]
  7820  		v.reset(OpAMD64MOVLloadidx8)
  7821  		v.AuxInt = c
  7822  		v.Aux = sym
  7823  		v.AddArg(ptr)
  7824  		v.AddArg(idx)
  7825  		v.AddArg(mem)
  7826  		return true
  7827  	}
  7828  	// match: (MOVLloadidx1 [c] {sym} (SHLQconst [3] idx) ptr mem)
  7829  	// cond:
  7830  	// result: (MOVLloadidx8 [c] {sym} ptr idx mem)
  7831  	for {
  7832  		c := v.AuxInt
  7833  		sym := v.Aux
  7834  		_ = v.Args[2]
  7835  		v_0 := v.Args[0]
  7836  		if v_0.Op != OpAMD64SHLQconst {
  7837  			break
  7838  		}
  7839  		if v_0.AuxInt != 3 {
  7840  			break
  7841  		}
  7842  		idx := v_0.Args[0]
  7843  		ptr := v.Args[1]
  7844  		mem := v.Args[2]
  7845  		v.reset(OpAMD64MOVLloadidx8)
  7846  		v.AuxInt = c
  7847  		v.Aux = sym
  7848  		v.AddArg(ptr)
  7849  		v.AddArg(idx)
  7850  		v.AddArg(mem)
  7851  		return true
  7852  	}
  7853  	// match: (MOVLloadidx1 [c] {sym} (ADDQconst [d] ptr) idx mem)
  7854  	// cond: is32Bit(c+d)
  7855  	// result: (MOVLloadidx1 [c+d] {sym} ptr idx mem)
  7856  	for {
  7857  		c := v.AuxInt
  7858  		sym := v.Aux
  7859  		_ = v.Args[2]
  7860  		v_0 := v.Args[0]
  7861  		if v_0.Op != OpAMD64ADDQconst {
  7862  			break
  7863  		}
  7864  		d := v_0.AuxInt
  7865  		ptr := v_0.Args[0]
  7866  		idx := v.Args[1]
  7867  		mem := v.Args[2]
  7868  		if !(is32Bit(c + d)) {
  7869  			break
  7870  		}
  7871  		v.reset(OpAMD64MOVLloadidx1)
  7872  		v.AuxInt = c + d
  7873  		v.Aux = sym
  7874  		v.AddArg(ptr)
  7875  		v.AddArg(idx)
  7876  		v.AddArg(mem)
  7877  		return true
  7878  	}
  7879  	// match: (MOVLloadidx1 [c] {sym} idx (ADDQconst [d] ptr) mem)
  7880  	// cond: is32Bit(c+d)
  7881  	// result: (MOVLloadidx1 [c+d] {sym} ptr idx mem)
  7882  	for {
  7883  		c := v.AuxInt
  7884  		sym := v.Aux
  7885  		_ = v.Args[2]
  7886  		idx := v.Args[0]
  7887  		v_1 := v.Args[1]
  7888  		if v_1.Op != OpAMD64ADDQconst {
  7889  			break
  7890  		}
  7891  		d := v_1.AuxInt
  7892  		ptr := v_1.Args[0]
  7893  		mem := v.Args[2]
  7894  		if !(is32Bit(c + d)) {
  7895  			break
  7896  		}
  7897  		v.reset(OpAMD64MOVLloadidx1)
  7898  		v.AuxInt = c + d
  7899  		v.Aux = sym
  7900  		v.AddArg(ptr)
  7901  		v.AddArg(idx)
  7902  		v.AddArg(mem)
  7903  		return true
  7904  	}
  7905  	// match: (MOVLloadidx1 [c] {sym} ptr (ADDQconst [d] idx) mem)
  7906  	// cond: is32Bit(c+d)
  7907  	// result: (MOVLloadidx1 [c+d] {sym} ptr idx mem)
  7908  	for {
  7909  		c := v.AuxInt
  7910  		sym := v.Aux
  7911  		_ = v.Args[2]
  7912  		ptr := v.Args[0]
  7913  		v_1 := v.Args[1]
  7914  		if v_1.Op != OpAMD64ADDQconst {
  7915  			break
  7916  		}
  7917  		d := v_1.AuxInt
  7918  		idx := v_1.Args[0]
  7919  		mem := v.Args[2]
  7920  		if !(is32Bit(c + d)) {
  7921  			break
  7922  		}
  7923  		v.reset(OpAMD64MOVLloadidx1)
  7924  		v.AuxInt = c + d
  7925  		v.Aux = sym
  7926  		v.AddArg(ptr)
  7927  		v.AddArg(idx)
  7928  		v.AddArg(mem)
  7929  		return true
  7930  	}
  7931  	// match: (MOVLloadidx1 [c] {sym} (ADDQconst [d] idx) ptr mem)
  7932  	// cond: is32Bit(c+d)
  7933  	// result: (MOVLloadidx1 [c+d] {sym} ptr idx mem)
  7934  	for {
  7935  		c := v.AuxInt
  7936  		sym := v.Aux
  7937  		_ = v.Args[2]
  7938  		v_0 := v.Args[0]
  7939  		if v_0.Op != OpAMD64ADDQconst {
  7940  			break
  7941  		}
  7942  		d := v_0.AuxInt
  7943  		idx := v_0.Args[0]
  7944  		ptr := v.Args[1]
  7945  		mem := v.Args[2]
  7946  		if !(is32Bit(c + d)) {
  7947  			break
  7948  		}
  7949  		v.reset(OpAMD64MOVLloadidx1)
  7950  		v.AuxInt = c + d
  7951  		v.Aux = sym
  7952  		v.AddArg(ptr)
  7953  		v.AddArg(idx)
  7954  		v.AddArg(mem)
  7955  		return true
  7956  	}
  7957  	return false
  7958  }
  7959  func rewriteValueAMD64_OpAMD64MOVLloadidx4_0(v *Value) bool {
  7960  	// match: (MOVLloadidx4 [c] {sym} (ADDQconst [d] ptr) idx mem)
  7961  	// cond: is32Bit(c+d)
  7962  	// result: (MOVLloadidx4 [c+d] {sym} ptr idx mem)
  7963  	for {
  7964  		c := v.AuxInt
  7965  		sym := v.Aux
  7966  		_ = v.Args[2]
  7967  		v_0 := v.Args[0]
  7968  		if v_0.Op != OpAMD64ADDQconst {
  7969  			break
  7970  		}
  7971  		d := v_0.AuxInt
  7972  		ptr := v_0.Args[0]
  7973  		idx := v.Args[1]
  7974  		mem := v.Args[2]
  7975  		if !(is32Bit(c + d)) {
  7976  			break
  7977  		}
  7978  		v.reset(OpAMD64MOVLloadidx4)
  7979  		v.AuxInt = c + d
  7980  		v.Aux = sym
  7981  		v.AddArg(ptr)
  7982  		v.AddArg(idx)
  7983  		v.AddArg(mem)
  7984  		return true
  7985  	}
  7986  	// match: (MOVLloadidx4 [c] {sym} ptr (ADDQconst [d] idx) mem)
  7987  	// cond: is32Bit(c+4*d)
  7988  	// result: (MOVLloadidx4 [c+4*d] {sym} ptr idx mem)
  7989  	for {
  7990  		c := v.AuxInt
  7991  		sym := v.Aux
  7992  		_ = v.Args[2]
  7993  		ptr := v.Args[0]
  7994  		v_1 := v.Args[1]
  7995  		if v_1.Op != OpAMD64ADDQconst {
  7996  			break
  7997  		}
  7998  		d := v_1.AuxInt
  7999  		idx := v_1.Args[0]
  8000  		mem := v.Args[2]
  8001  		if !(is32Bit(c + 4*d)) {
  8002  			break
  8003  		}
  8004  		v.reset(OpAMD64MOVLloadidx4)
  8005  		v.AuxInt = c + 4*d
  8006  		v.Aux = sym
  8007  		v.AddArg(ptr)
  8008  		v.AddArg(idx)
  8009  		v.AddArg(mem)
  8010  		return true
  8011  	}
  8012  	return false
  8013  }
  8014  func rewriteValueAMD64_OpAMD64MOVLloadidx8_0(v *Value) bool {
  8015  	// match: (MOVLloadidx8 [c] {sym} (ADDQconst [d] ptr) idx mem)
  8016  	// cond: is32Bit(c+d)
  8017  	// result: (MOVLloadidx8 [c+d] {sym} ptr idx mem)
  8018  	for {
  8019  		c := v.AuxInt
  8020  		sym := v.Aux
  8021  		_ = v.Args[2]
  8022  		v_0 := v.Args[0]
  8023  		if v_0.Op != OpAMD64ADDQconst {
  8024  			break
  8025  		}
  8026  		d := v_0.AuxInt
  8027  		ptr := v_0.Args[0]
  8028  		idx := v.Args[1]
  8029  		mem := v.Args[2]
  8030  		if !(is32Bit(c + d)) {
  8031  			break
  8032  		}
  8033  		v.reset(OpAMD64MOVLloadidx8)
  8034  		v.AuxInt = c + d
  8035  		v.Aux = sym
  8036  		v.AddArg(ptr)
  8037  		v.AddArg(idx)
  8038  		v.AddArg(mem)
  8039  		return true
  8040  	}
  8041  	// match: (MOVLloadidx8 [c] {sym} ptr (ADDQconst [d] idx) mem)
  8042  	// cond: is32Bit(c+8*d)
  8043  	// result: (MOVLloadidx8 [c+8*d] {sym} ptr idx mem)
  8044  	for {
  8045  		c := v.AuxInt
  8046  		sym := v.Aux
  8047  		_ = v.Args[2]
  8048  		ptr := v.Args[0]
  8049  		v_1 := v.Args[1]
  8050  		if v_1.Op != OpAMD64ADDQconst {
  8051  			break
  8052  		}
  8053  		d := v_1.AuxInt
  8054  		idx := v_1.Args[0]
  8055  		mem := v.Args[2]
  8056  		if !(is32Bit(c + 8*d)) {
  8057  			break
  8058  		}
  8059  		v.reset(OpAMD64MOVLloadidx8)
  8060  		v.AuxInt = c + 8*d
  8061  		v.Aux = sym
  8062  		v.AddArg(ptr)
  8063  		v.AddArg(idx)
  8064  		v.AddArg(mem)
  8065  		return true
  8066  	}
  8067  	return false
  8068  }
  8069  func rewriteValueAMD64_OpAMD64MOVLstore_0(v *Value) bool {
  8070  	// match: (MOVLstore [off] {sym} ptr (MOVLQSX x) mem)
  8071  	// cond:
  8072  	// result: (MOVLstore [off] {sym} ptr x mem)
  8073  	for {
  8074  		off := v.AuxInt
  8075  		sym := v.Aux
  8076  		_ = v.Args[2]
  8077  		ptr := v.Args[0]
  8078  		v_1 := v.Args[1]
  8079  		if v_1.Op != OpAMD64MOVLQSX {
  8080  			break
  8081  		}
  8082  		x := v_1.Args[0]
  8083  		mem := v.Args[2]
  8084  		v.reset(OpAMD64MOVLstore)
  8085  		v.AuxInt = off
  8086  		v.Aux = sym
  8087  		v.AddArg(ptr)
  8088  		v.AddArg(x)
  8089  		v.AddArg(mem)
  8090  		return true
  8091  	}
  8092  	// match: (MOVLstore [off] {sym} ptr (MOVLQZX x) mem)
  8093  	// cond:
  8094  	// result: (MOVLstore [off] {sym} ptr x mem)
  8095  	for {
  8096  		off := v.AuxInt
  8097  		sym := v.Aux
  8098  		_ = v.Args[2]
  8099  		ptr := v.Args[0]
  8100  		v_1 := v.Args[1]
  8101  		if v_1.Op != OpAMD64MOVLQZX {
  8102  			break
  8103  		}
  8104  		x := v_1.Args[0]
  8105  		mem := v.Args[2]
  8106  		v.reset(OpAMD64MOVLstore)
  8107  		v.AuxInt = off
  8108  		v.Aux = sym
  8109  		v.AddArg(ptr)
  8110  		v.AddArg(x)
  8111  		v.AddArg(mem)
  8112  		return true
  8113  	}
  8114  	// match: (MOVLstore [off1] {sym} (ADDQconst [off2] ptr) val mem)
  8115  	// cond: is32Bit(off1+off2)
  8116  	// result: (MOVLstore [off1+off2] {sym} ptr val mem)
  8117  	for {
  8118  		off1 := v.AuxInt
  8119  		sym := v.Aux
  8120  		_ = v.Args[2]
  8121  		v_0 := v.Args[0]
  8122  		if v_0.Op != OpAMD64ADDQconst {
  8123  			break
  8124  		}
  8125  		off2 := v_0.AuxInt
  8126  		ptr := v_0.Args[0]
  8127  		val := v.Args[1]
  8128  		mem := v.Args[2]
  8129  		if !(is32Bit(off1 + off2)) {
  8130  			break
  8131  		}
  8132  		v.reset(OpAMD64MOVLstore)
  8133  		v.AuxInt = off1 + off2
  8134  		v.Aux = sym
  8135  		v.AddArg(ptr)
  8136  		v.AddArg(val)
  8137  		v.AddArg(mem)
  8138  		return true
  8139  	}
  8140  	// match: (MOVLstore [off] {sym} ptr (MOVLconst [c]) mem)
  8141  	// cond: validOff(off)
  8142  	// result: (MOVLstoreconst [makeValAndOff(int64(int32(c)),off)] {sym} ptr mem)
  8143  	for {
  8144  		off := v.AuxInt
  8145  		sym := v.Aux
  8146  		_ = v.Args[2]
  8147  		ptr := v.Args[0]
  8148  		v_1 := v.Args[1]
  8149  		if v_1.Op != OpAMD64MOVLconst {
  8150  			break
  8151  		}
  8152  		c := v_1.AuxInt
  8153  		mem := v.Args[2]
  8154  		if !(validOff(off)) {
  8155  			break
  8156  		}
  8157  		v.reset(OpAMD64MOVLstoreconst)
  8158  		v.AuxInt = makeValAndOff(int64(int32(c)), off)
  8159  		v.Aux = sym
  8160  		v.AddArg(ptr)
  8161  		v.AddArg(mem)
  8162  		return true
  8163  	}
  8164  	// match: (MOVLstore [off1] {sym1} (LEAQ [off2] {sym2} base) val mem)
  8165  	// cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
  8166  	// result: (MOVLstore [off1+off2] {mergeSym(sym1,sym2)} base val mem)
  8167  	for {
  8168  		off1 := v.AuxInt
  8169  		sym1 := v.Aux
  8170  		_ = v.Args[2]
  8171  		v_0 := v.Args[0]
  8172  		if v_0.Op != OpAMD64LEAQ {
  8173  			break
  8174  		}
  8175  		off2 := v_0.AuxInt
  8176  		sym2 := v_0.Aux
  8177  		base := v_0.Args[0]
  8178  		val := v.Args[1]
  8179  		mem := v.Args[2]
  8180  		if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
  8181  			break
  8182  		}
  8183  		v.reset(OpAMD64MOVLstore)
  8184  		v.AuxInt = off1 + off2
  8185  		v.Aux = mergeSym(sym1, sym2)
  8186  		v.AddArg(base)
  8187  		v.AddArg(val)
  8188  		v.AddArg(mem)
  8189  		return true
  8190  	}
  8191  	// match: (MOVLstore [off1] {sym1} (LEAQ1 [off2] {sym2} ptr idx) val mem)
  8192  	// cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
  8193  	// result: (MOVLstoreidx1 [off1+off2] {mergeSym(sym1,sym2)} ptr idx val mem)
  8194  	for {
  8195  		off1 := v.AuxInt
  8196  		sym1 := v.Aux
  8197  		_ = v.Args[2]
  8198  		v_0 := v.Args[0]
  8199  		if v_0.Op != OpAMD64LEAQ1 {
  8200  			break
  8201  		}
  8202  		off2 := v_0.AuxInt
  8203  		sym2 := v_0.Aux
  8204  		_ = v_0.Args[1]
  8205  		ptr := v_0.Args[0]
  8206  		idx := v_0.Args[1]
  8207  		val := v.Args[1]
  8208  		mem := v.Args[2]
  8209  		if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
  8210  			break
  8211  		}
  8212  		v.reset(OpAMD64MOVLstoreidx1)
  8213  		v.AuxInt = off1 + off2
  8214  		v.Aux = mergeSym(sym1, sym2)
  8215  		v.AddArg(ptr)
  8216  		v.AddArg(idx)
  8217  		v.AddArg(val)
  8218  		v.AddArg(mem)
  8219  		return true
  8220  	}
  8221  	// match: (MOVLstore [off1] {sym1} (LEAQ4 [off2] {sym2} ptr idx) val mem)
  8222  	// cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
  8223  	// result: (MOVLstoreidx4 [off1+off2] {mergeSym(sym1,sym2)} ptr idx val mem)
  8224  	for {
  8225  		off1 := v.AuxInt
  8226  		sym1 := v.Aux
  8227  		_ = v.Args[2]
  8228  		v_0 := v.Args[0]
  8229  		if v_0.Op != OpAMD64LEAQ4 {
  8230  			break
  8231  		}
  8232  		off2 := v_0.AuxInt
  8233  		sym2 := v_0.Aux
  8234  		_ = v_0.Args[1]
  8235  		ptr := v_0.Args[0]
  8236  		idx := v_0.Args[1]
  8237  		val := v.Args[1]
  8238  		mem := v.Args[2]
  8239  		if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
  8240  			break
  8241  		}
  8242  		v.reset(OpAMD64MOVLstoreidx4)
  8243  		v.AuxInt = off1 + off2
  8244  		v.Aux = mergeSym(sym1, sym2)
  8245  		v.AddArg(ptr)
  8246  		v.AddArg(idx)
  8247  		v.AddArg(val)
  8248  		v.AddArg(mem)
  8249  		return true
  8250  	}
  8251  	// match: (MOVLstore [off1] {sym1} (LEAQ8 [off2] {sym2} ptr idx) val mem)
  8252  	// cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
  8253  	// result: (MOVLstoreidx8 [off1+off2] {mergeSym(sym1,sym2)} ptr idx val mem)
  8254  	for {
  8255  		off1 := v.AuxInt
  8256  		sym1 := v.Aux
  8257  		_ = v.Args[2]
  8258  		v_0 := v.Args[0]
  8259  		if v_0.Op != OpAMD64LEAQ8 {
  8260  			break
  8261  		}
  8262  		off2 := v_0.AuxInt
  8263  		sym2 := v_0.Aux
  8264  		_ = v_0.Args[1]
  8265  		ptr := v_0.Args[0]
  8266  		idx := v_0.Args[1]
  8267  		val := v.Args[1]
  8268  		mem := v.Args[2]
  8269  		if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
  8270  			break
  8271  		}
  8272  		v.reset(OpAMD64MOVLstoreidx8)
  8273  		v.AuxInt = off1 + off2
  8274  		v.Aux = mergeSym(sym1, sym2)
  8275  		v.AddArg(ptr)
  8276  		v.AddArg(idx)
  8277  		v.AddArg(val)
  8278  		v.AddArg(mem)
  8279  		return true
  8280  	}
  8281  	// match: (MOVLstore [off] {sym} (ADDQ ptr idx) val mem)
  8282  	// cond: ptr.Op != OpSB
  8283  	// result: (MOVLstoreidx1 [off] {sym} ptr idx val mem)
  8284  	for {
  8285  		off := v.AuxInt
  8286  		sym := v.Aux
  8287  		_ = v.Args[2]
  8288  		v_0 := v.Args[0]
  8289  		if v_0.Op != OpAMD64ADDQ {
  8290  			break
  8291  		}
  8292  		_ = v_0.Args[1]
  8293  		ptr := v_0.Args[0]
  8294  		idx := v_0.Args[1]
  8295  		val := v.Args[1]
  8296  		mem := v.Args[2]
  8297  		if !(ptr.Op != OpSB) {
  8298  			break
  8299  		}
  8300  		v.reset(OpAMD64MOVLstoreidx1)
  8301  		v.AuxInt = off
  8302  		v.Aux = sym
  8303  		v.AddArg(ptr)
  8304  		v.AddArg(idx)
  8305  		v.AddArg(val)
  8306  		v.AddArg(mem)
  8307  		return true
  8308  	}
  8309  	// match: (MOVLstore [i] {s} p (SHRQconst [32] w) x:(MOVLstore [i-4] {s} p w mem))
  8310  	// cond: x.Uses == 1 && clobber(x)
  8311  	// result: (MOVQstore [i-4] {s} p w mem)
  8312  	for {
  8313  		i := v.AuxInt
  8314  		s := v.Aux
  8315  		_ = v.Args[2]
  8316  		p := v.Args[0]
  8317  		v_1 := v.Args[1]
  8318  		if v_1.Op != OpAMD64SHRQconst {
  8319  			break
  8320  		}
  8321  		if v_1.AuxInt != 32 {
  8322  			break
  8323  		}
  8324  		w := v_1.Args[0]
  8325  		x := v.Args[2]
  8326  		if x.Op != OpAMD64MOVLstore {
  8327  			break
  8328  		}
  8329  		if x.AuxInt != i-4 {
  8330  			break
  8331  		}
  8332  		if x.Aux != s {
  8333  			break
  8334  		}
  8335  		_ = x.Args[2]
  8336  		if p != x.Args[0] {
  8337  			break
  8338  		}
  8339  		if w != x.Args[1] {
  8340  			break
  8341  		}
  8342  		mem := x.Args[2]
  8343  		if !(x.Uses == 1 && clobber(x)) {
  8344  			break
  8345  		}
  8346  		v.reset(OpAMD64MOVQstore)
  8347  		v.AuxInt = i - 4
  8348  		v.Aux = s
  8349  		v.AddArg(p)
  8350  		v.AddArg(w)
  8351  		v.AddArg(mem)
  8352  		return true
  8353  	}
  8354  	return false
  8355  }
  8356  func rewriteValueAMD64_OpAMD64MOVLstore_10(v *Value) bool {
  8357  	b := v.Block
  8358  	_ = b
  8359  	typ := &b.Func.Config.Types
  8360  	_ = typ
  8361  	// match: (MOVLstore [i] {s} p (SHRQconst [j] w) x:(MOVLstore [i-4] {s} p w0:(SHRQconst [j-32] w) mem))
  8362  	// cond: x.Uses == 1 && clobber(x)
  8363  	// result: (MOVQstore [i-4] {s} p w0 mem)
  8364  	for {
  8365  		i := v.AuxInt
  8366  		s := v.Aux
  8367  		_ = v.Args[2]
  8368  		p := v.Args[0]
  8369  		v_1 := v.Args[1]
  8370  		if v_1.Op != OpAMD64SHRQconst {
  8371  			break
  8372  		}
  8373  		j := v_1.AuxInt
  8374  		w := v_1.Args[0]
  8375  		x := v.Args[2]
  8376  		if x.Op != OpAMD64MOVLstore {
  8377  			break
  8378  		}
  8379  		if x.AuxInt != i-4 {
  8380  			break
  8381  		}
  8382  		if x.Aux != s {
  8383  			break
  8384  		}
  8385  		_ = x.Args[2]
  8386  		if p != x.Args[0] {
  8387  			break
  8388  		}
  8389  		w0 := x.Args[1]
  8390  		if w0.Op != OpAMD64SHRQconst {
  8391  			break
  8392  		}
  8393  		if w0.AuxInt != j-32 {
  8394  			break
  8395  		}
  8396  		if w != w0.Args[0] {
  8397  			break
  8398  		}
  8399  		mem := x.Args[2]
  8400  		if !(x.Uses == 1 && clobber(x)) {
  8401  			break
  8402  		}
  8403  		v.reset(OpAMD64MOVQstore)
  8404  		v.AuxInt = i - 4
  8405  		v.Aux = s
  8406  		v.AddArg(p)
  8407  		v.AddArg(w0)
  8408  		v.AddArg(mem)
  8409  		return true
  8410  	}
  8411  	// match: (MOVLstore [i] {s} p x1:(MOVLload [j] {s2} p2 mem) mem2:(MOVLstore [i-4] {s} p x2:(MOVLload [j-4] {s2} p2 mem) mem))
  8412  	// cond: x1.Uses == 1 && x2.Uses == 1 && mem2.Uses == 1 && clobber(x1) && clobber(x2) && clobber(mem2)
  8413  	// result: (MOVQstore [i-4] {s} p (MOVQload [j-4] {s2} p2 mem) mem)
  8414  	for {
  8415  		i := v.AuxInt
  8416  		s := v.Aux
  8417  		_ = v.Args[2]
  8418  		p := v.Args[0]
  8419  		x1 := v.Args[1]
  8420  		if x1.Op != OpAMD64MOVLload {
  8421  			break
  8422  		}
  8423  		j := x1.AuxInt
  8424  		s2 := x1.Aux
  8425  		_ = x1.Args[1]
  8426  		p2 := x1.Args[0]
  8427  		mem := x1.Args[1]
  8428  		mem2 := v.Args[2]
  8429  		if mem2.Op != OpAMD64MOVLstore {
  8430  			break
  8431  		}
  8432  		if mem2.AuxInt != i-4 {
  8433  			break
  8434  		}
  8435  		if mem2.Aux != s {
  8436  			break
  8437  		}
  8438  		_ = mem2.Args[2]
  8439  		if p != mem2.Args[0] {
  8440  			break
  8441  		}
  8442  		x2 := mem2.Args[1]
  8443  		if x2.Op != OpAMD64MOVLload {
  8444  			break
  8445  		}
  8446  		if x2.AuxInt != j-4 {
  8447  			break
  8448  		}
  8449  		if x2.Aux != s2 {
  8450  			break
  8451  		}
  8452  		_ = x2.Args[1]
  8453  		if p2 != x2.Args[0] {
  8454  			break
  8455  		}
  8456  		if mem != x2.Args[1] {
  8457  			break
  8458  		}
  8459  		if mem != mem2.Args[2] {
  8460  			break
  8461  		}
  8462  		if !(x1.Uses == 1 && x2.Uses == 1 && mem2.Uses == 1 && clobber(x1) && clobber(x2) && clobber(mem2)) {
  8463  			break
  8464  		}
  8465  		v.reset(OpAMD64MOVQstore)
  8466  		v.AuxInt = i - 4
  8467  		v.Aux = s
  8468  		v.AddArg(p)
  8469  		v0 := b.NewValue0(v.Pos, OpAMD64MOVQload, typ.UInt64)
  8470  		v0.AuxInt = j - 4
  8471  		v0.Aux = s2
  8472  		v0.AddArg(p2)
  8473  		v0.AddArg(mem)
  8474  		v.AddArg(v0)
  8475  		v.AddArg(mem)
  8476  		return true
  8477  	}
  8478  	// match: (MOVLstore [off1] {sym1} (LEAL [off2] {sym2} base) val mem)
  8479  	// cond: canMergeSym(sym1, sym2) && is32Bit(off1+off2)
  8480  	// result: (MOVLstore [off1+off2] {mergeSym(sym1,sym2)} base val mem)
  8481  	for {
  8482  		off1 := v.AuxInt
  8483  		sym1 := v.Aux
  8484  		_ = v.Args[2]
  8485  		v_0 := v.Args[0]
  8486  		if v_0.Op != OpAMD64LEAL {
  8487  			break
  8488  		}
  8489  		off2 := v_0.AuxInt
  8490  		sym2 := v_0.Aux
  8491  		base := v_0.Args[0]
  8492  		val := v.Args[1]
  8493  		mem := v.Args[2]
  8494  		if !(canMergeSym(sym1, sym2) && is32Bit(off1+off2)) {
  8495  			break
  8496  		}
  8497  		v.reset(OpAMD64MOVLstore)
  8498  		v.AuxInt = off1 + off2
  8499  		v.Aux = mergeSym(sym1, sym2)
  8500  		v.AddArg(base)
  8501  		v.AddArg(val)
  8502  		v.AddArg(mem)
  8503  		return true
  8504  	}
  8505  	// match: (MOVLstore [off1] {sym} (ADDLconst [off2] ptr) val mem)
  8506  	// cond: is32Bit(off1+off2)
  8507  	// result: (MOVLstore [off1+off2] {sym} ptr val mem)
  8508  	for {
  8509  		off1 := v.AuxInt
  8510  		sym := v.Aux
  8511  		_ = v.Args[2]
  8512  		v_0 := v.Args[0]
  8513  		if v_0.Op != OpAMD64ADDLconst {
  8514  			break
  8515  		}
  8516  		off2 := v_0.AuxInt
  8517  		ptr := v_0.Args[0]
  8518  		val := v.Args[1]
  8519  		mem := v.Args[2]
  8520  		if !(is32Bit(off1 + off2)) {
  8521  			break
  8522  		}
  8523  		v.reset(OpAMD64MOVLstore)
  8524  		v.AuxInt = off1 + off2
  8525  		v.Aux = sym
  8526  		v.AddArg(ptr)
  8527  		v.AddArg(val)
  8528  		v.AddArg(mem)
  8529  		return true
  8530  	}
  8531  	// match: (MOVLstore [off] {sym} ptr a:(ADDLconst [c] l:(MOVLload [off] {sym} ptr2 mem)) mem)
  8532  	// cond: isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && validValAndOff(c,off)
  8533  	// result: (ADDLconstmem {sym} [makeValAndOff(c,off)] ptr mem)
  8534  	for {
  8535  		off := v.AuxInt
  8536  		sym := v.Aux
  8537  		_ = v.Args[2]
  8538  		ptr := v.Args[0]
  8539  		a := v.Args[1]
  8540  		if a.Op != OpAMD64ADDLconst {
  8541  			break
  8542  		}
  8543  		c := a.AuxInt
  8544  		l := a.Args[0]
  8545  		if l.Op != OpAMD64MOVLload {
  8546  			break
  8547  		}
  8548  		if l.AuxInt != off {
  8549  			break
  8550  		}
  8551  		if l.Aux != sym {
  8552  			break
  8553  		}
  8554  		_ = l.Args[1]
  8555  		ptr2 := l.Args[0]
  8556  		mem := l.Args[1]
  8557  		if mem != v.Args[2] {
  8558  			break
  8559  		}
  8560  		if !(isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && validValAndOff(c, off)) {
  8561  			break
  8562  		}
  8563  		v.reset(OpAMD64ADDLconstmem)
  8564  		v.AuxInt = makeValAndOff(c, off)
  8565  		v.Aux = sym
  8566  		v.AddArg(ptr)
  8567  		v.AddArg(mem)
  8568  		return true
  8569  	}
  8570  	// match: (MOVLstore [off] {sym} ptr (MOVLf2i val) mem)
  8571  	// cond:
  8572  	// result: (MOVSSstore [off] {sym} ptr val mem)
  8573  	for {
  8574  		off := v.AuxInt
  8575  		sym := v.Aux
  8576  		_ = v.Args[2]
  8577  		ptr := v.Args[0]
  8578  		v_1 := v.Args[1]
  8579  		if v_1.Op != OpAMD64MOVLf2i {
  8580  			break
  8581  		}
  8582  		val := v_1.Args[0]
  8583  		mem := v.Args[2]
  8584  		v.reset(OpAMD64MOVSSstore)
  8585  		v.AuxInt = off
  8586  		v.Aux = sym
  8587  		v.AddArg(ptr)
  8588  		v.AddArg(val)
  8589  		v.AddArg(mem)
  8590  		return true
  8591  	}
  8592  	return false
  8593  }
  8594  func rewriteValueAMD64_OpAMD64MOVLstoreconst_0(v *Value) bool {
  8595  	b := v.Block
  8596  	_ = b
  8597  	typ := &b.Func.Config.Types
  8598  	_ = typ
  8599  	// match: (MOVLstoreconst [sc] {s} (ADDQconst [off] ptr) mem)
  8600  	// cond: ValAndOff(sc).canAdd(off)
  8601  	// result: (MOVLstoreconst [ValAndOff(sc).add(off)] {s} ptr mem)
  8602  	for {
  8603  		sc := v.AuxInt
  8604  		s := v.Aux
  8605  		_ = v.Args[1]
  8606  		v_0 := v.Args[0]
  8607  		if v_0.Op != OpAMD64ADDQconst {
  8608  			break
  8609  		}
  8610  		off := v_0.AuxInt
  8611  		ptr := v_0.Args[0]
  8612  		mem := v.Args[1]
  8613  		if !(ValAndOff(sc).canAdd(off)) {
  8614  			break
  8615  		}
  8616  		v.reset(OpAMD64MOVLstoreconst)
  8617  		v.AuxInt = ValAndOff(sc).add(off)
  8618  		v.Aux = s
  8619  		v.AddArg(ptr)
  8620  		v.AddArg(mem)
  8621  		return true
  8622  	}
  8623  	// match: (MOVLstoreconst [sc] {sym1} (LEAQ [off] {sym2} ptr) mem)
  8624  	// cond: canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off)
  8625  	// result: (MOVLstoreconst [ValAndOff(sc).add(off)] {mergeSym(sym1, sym2)} ptr mem)
  8626  	for {
  8627  		sc := v.AuxInt
  8628  		sym1 := v.Aux
  8629  		_ = v.Args[1]
  8630  		v_0 := v.Args[0]
  8631  		if v_0.Op != OpAMD64LEAQ {
  8632  			break
  8633  		}
  8634  		off := v_0.AuxInt
  8635  		sym2 := v_0.Aux
  8636  		ptr := v_0.Args[0]
  8637  		mem := v.Args[1]
  8638  		if !(canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off)) {
  8639  			break
  8640  		}
  8641  		v.reset(OpAMD64MOVLstoreconst)
  8642  		v.AuxInt = ValAndOff(sc).add(off)
  8643  		v.Aux = mergeSym(sym1, sym2)
  8644  		v.AddArg(ptr)
  8645  		v.AddArg(mem)
  8646  		return true
  8647  	}
  8648  	// match: (MOVLstoreconst [x] {sym1} (LEAQ1 [off] {sym2} ptr idx) mem)
  8649  	// cond: canMergeSym(sym1, sym2)
  8650  	// result: (MOVLstoreconstidx1 [ValAndOff(x).add(off)] {mergeSym(sym1,sym2)} ptr idx mem)
  8651  	for {
  8652  		x := v.AuxInt
  8653  		sym1 := v.Aux
  8654  		_ = v.Args[1]
  8655  		v_0 := v.Args[0]
  8656  		if v_0.Op != OpAMD64LEAQ1 {
  8657  			break
  8658  		}
  8659  		off := v_0.AuxInt
  8660  		sym2 := v_0.Aux
  8661  		_ = v_0.Args[1]
  8662  		ptr := v_0.Args[0]
  8663  		idx := v_0.Args[1]
  8664  		mem := v.Args[1]
  8665  		if !(canMergeSym(sym1, sym2)) {
  8666  			break
  8667  		}
  8668  		v.reset(OpAMD64MOVLstoreconstidx1)
  8669  		v.AuxInt = ValAndOff(x).add(off)
  8670  		v.Aux = mergeSym(sym1, sym2)
  8671  		v.AddArg(ptr)
  8672  		v.AddArg(idx)
  8673  		v.AddArg(mem)
  8674  		return true
  8675  	}
  8676  	// match: (MOVLstoreconst [x] {sym1} (LEAQ4 [off] {sym2} ptr idx) mem)
  8677  	// cond: canMergeSym(sym1, sym2)
  8678  	// result: (MOVLstoreconstidx4 [ValAndOff(x).add(off)] {mergeSym(sym1,sym2)} ptr idx mem)
  8679  	for {
  8680  		x := v.AuxInt
  8681  		sym1 := v.Aux
  8682  		_ = v.Args[1]
  8683  		v_0 := v.Args[0]
  8684  		if v_0.Op != OpAMD64LEAQ4 {
  8685  			break
  8686  		}
  8687  		off := v_0.AuxInt
  8688  		sym2 := v_0.Aux
  8689  		_ = v_0.Args[1]
  8690  		ptr := v_0.Args[0]
  8691  		idx := v_0.Args[1]
  8692  		mem := v.Args[1]
  8693  		if !(canMergeSym(sym1, sym2)) {
  8694  			break
  8695  		}
  8696  		v.reset(OpAMD64MOVLstoreconstidx4)
  8697  		v.AuxInt = ValAndOff(x).add(off)
  8698  		v.Aux = mergeSym(sym1, sym2)
  8699  		v.AddArg(ptr)
  8700  		v.AddArg(idx)
  8701  		v.AddArg(mem)
  8702  		return true
  8703  	}
  8704  	// match: (MOVLstoreconst [x] {sym} (ADDQ ptr idx) mem)
  8705  	// cond:
  8706  	// result: (MOVLstoreconstidx1 [x] {sym} ptr idx mem)
  8707  	for {
  8708  		x := v.AuxInt
  8709  		sym := v.Aux
  8710  		_ = v.Args[1]
  8711  		v_0 := v.Args[0]
  8712  		if v_0.Op != OpAMD64ADDQ {
  8713  			break
  8714  		}
  8715  		_ = v_0.Args[1]
  8716  		ptr := v_0.Args[0]
  8717  		idx := v_0.Args[1]
  8718  		mem := v.Args[1]
  8719  		v.reset(OpAMD64MOVLstoreconstidx1)
  8720  		v.AuxInt = x
  8721  		v.Aux = sym
  8722  		v.AddArg(ptr)
  8723  		v.AddArg(idx)
  8724  		v.AddArg(mem)
  8725  		return true
  8726  	}
  8727  	// match: (MOVLstoreconst [c] {s} p x:(MOVLstoreconst [a] {s} p mem))
  8728  	// cond: x.Uses == 1 && ValAndOff(a).Off() + 4 == ValAndOff(c).Off() && clobber(x)
  8729  	// result: (MOVQstore [ValAndOff(a).Off()] {s} p (MOVQconst [ValAndOff(a).Val()&0xffffffff | ValAndOff(c).Val()<<32]) mem)
  8730  	for {
  8731  		c := v.AuxInt
  8732  		s := v.Aux
  8733  		_ = v.Args[1]
  8734  		p := v.Args[0]
  8735  		x := v.Args[1]
  8736  		if x.Op != OpAMD64MOVLstoreconst {
  8737  			break
  8738  		}
  8739  		a := x.AuxInt
  8740  		if x.Aux != s {
  8741  			break
  8742  		}
  8743  		_ = x.Args[1]
  8744  		if p != x.Args[0] {
  8745  			break
  8746  		}
  8747  		mem := x.Args[1]
  8748  		if !(x.Uses == 1 && ValAndOff(a).Off()+4 == ValAndOff(c).Off() && clobber(x)) {
  8749  			break
  8750  		}
  8751  		v.reset(OpAMD64MOVQstore)
  8752  		v.AuxInt = ValAndOff(a).Off()
  8753  		v.Aux = s
  8754  		v.AddArg(p)
  8755  		v0 := b.NewValue0(v.Pos, OpAMD64MOVQconst, typ.UInt64)
  8756  		v0.AuxInt = ValAndOff(a).Val()&0xffffffff | ValAndOff(c).Val()<<32
  8757  		v.AddArg(v0)
  8758  		v.AddArg(mem)
  8759  		return true
  8760  	}
  8761  	// match: (MOVLstoreconst [sc] {sym1} (LEAL [off] {sym2} ptr) mem)
  8762  	// cond: canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off)
  8763  	// result: (MOVLstoreconst [ValAndOff(sc).add(off)] {mergeSym(sym1, sym2)} ptr mem)
  8764  	for {
  8765  		sc := v.AuxInt
  8766  		sym1 := v.Aux
  8767  		_ = v.Args[1]
  8768  		v_0 := v.Args[0]
  8769  		if v_0.Op != OpAMD64LEAL {
  8770  			break
  8771  		}
  8772  		off := v_0.AuxInt
  8773  		sym2 := v_0.Aux
  8774  		ptr := v_0.Args[0]
  8775  		mem := v.Args[1]
  8776  		if !(canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off)) {
  8777  			break
  8778  		}
  8779  		v.reset(OpAMD64MOVLstoreconst)
  8780  		v.AuxInt = ValAndOff(sc).add(off)
  8781  		v.Aux = mergeSym(sym1, sym2)
  8782  		v.AddArg(ptr)
  8783  		v.AddArg(mem)
  8784  		return true
  8785  	}
  8786  	// match: (MOVLstoreconst [sc] {s} (ADDLconst [off] ptr) mem)
  8787  	// cond: ValAndOff(sc).canAdd(off)
  8788  	// result: (MOVLstoreconst [ValAndOff(sc).add(off)] {s} ptr mem)
  8789  	for {
  8790  		sc := v.AuxInt
  8791  		s := v.Aux
  8792  		_ = v.Args[1]
  8793  		v_0 := v.Args[0]
  8794  		if v_0.Op != OpAMD64ADDLconst {
  8795  			break
  8796  		}
  8797  		off := v_0.AuxInt
  8798  		ptr := v_0.Args[0]
  8799  		mem := v.Args[1]
  8800  		if !(ValAndOff(sc).canAdd(off)) {
  8801  			break
  8802  		}
  8803  		v.reset(OpAMD64MOVLstoreconst)
  8804  		v.AuxInt = ValAndOff(sc).add(off)
  8805  		v.Aux = s
  8806  		v.AddArg(ptr)
  8807  		v.AddArg(mem)
  8808  		return true
  8809  	}
  8810  	return false
  8811  }
  8812  func rewriteValueAMD64_OpAMD64MOVLstoreconstidx1_0(v *Value) bool {
  8813  	b := v.Block
  8814  	_ = b
  8815  	typ := &b.Func.Config.Types
  8816  	_ = typ
  8817  	// match: (MOVLstoreconstidx1 [c] {sym} ptr (SHLQconst [2] idx) mem)
  8818  	// cond:
  8819  	// result: (MOVLstoreconstidx4 [c] {sym} ptr idx mem)
  8820  	for {
  8821  		c := v.AuxInt
  8822  		sym := v.Aux
  8823  		_ = v.Args[2]
  8824  		ptr := v.Args[0]
  8825  		v_1 := v.Args[1]
  8826  		if v_1.Op != OpAMD64SHLQconst {
  8827  			break
  8828  		}
  8829  		if v_1.AuxInt != 2 {
  8830  			break
  8831  		}
  8832  		idx := v_1.Args[0]
  8833  		mem := v.Args[2]
  8834  		v.reset(OpAMD64MOVLstoreconstidx4)
  8835  		v.AuxInt = c
  8836  		v.Aux = sym
  8837  		v.AddArg(ptr)
  8838  		v.AddArg(idx)
  8839  		v.AddArg(mem)
  8840  		return true
  8841  	}
  8842  	// match: (MOVLstoreconstidx1 [x] {sym} (ADDQconst [c] ptr) idx mem)
  8843  	// cond: ValAndOff(x).canAdd(c)
  8844  	// result: (MOVLstoreconstidx1 [ValAndOff(x).add(c)] {sym} ptr idx mem)
  8845  	for {
  8846  		x := v.AuxInt
  8847  		sym := v.Aux
  8848  		_ = v.Args[2]
  8849  		v_0 := v.Args[0]
  8850  		if v_0.Op != OpAMD64ADDQconst {
  8851  			break
  8852  		}
  8853  		c := v_0.AuxInt
  8854  		ptr := v_0.Args[0]
  8855  		idx := v.Args[1]
  8856  		mem := v.Args[2]
  8857  		if !(ValAndOff(x).canAdd(c)) {
  8858  			break
  8859  		}
  8860  		v.reset(OpAMD64MOVLstoreconstidx1)
  8861  		v.AuxInt = ValAndOff(x).add(c)
  8862  		v.Aux = sym
  8863  		v.AddArg(ptr)
  8864  		v.AddArg(idx)
  8865  		v.AddArg(mem)
  8866  		return true
  8867  	}
  8868  	// match: (MOVLstoreconstidx1 [x] {sym} ptr (ADDQconst [c] idx) mem)
  8869  	// cond: ValAndOff(x).canAdd(c)
  8870  	// result: (MOVLstoreconstidx1 [ValAndOff(x).add(c)] {sym} ptr idx mem)
  8871  	for {
  8872  		x := v.AuxInt
  8873  		sym := v.Aux
  8874  		_ = v.Args[2]
  8875  		ptr := v.Args[0]
  8876  		v_1 := v.Args[1]
  8877  		if v_1.Op != OpAMD64ADDQconst {
  8878  			break
  8879  		}
  8880  		c := v_1.AuxInt
  8881  		idx := v_1.Args[0]
  8882  		mem := v.Args[2]
  8883  		if !(ValAndOff(x).canAdd(c)) {
  8884  			break
  8885  		}
  8886  		v.reset(OpAMD64MOVLstoreconstidx1)
  8887  		v.AuxInt = ValAndOff(x).add(c)
  8888  		v.Aux = sym
  8889  		v.AddArg(ptr)
  8890  		v.AddArg(idx)
  8891  		v.AddArg(mem)
  8892  		return true
  8893  	}
  8894  	// match: (MOVLstoreconstidx1 [c] {s} p i x:(MOVLstoreconstidx1 [a] {s} p i mem))
  8895  	// cond: x.Uses == 1 && ValAndOff(a).Off() + 4 == ValAndOff(c).Off() && clobber(x)
  8896  	// result: (MOVQstoreidx1 [ValAndOff(a).Off()] {s} p i (MOVQconst [ValAndOff(a).Val()&0xffffffff | ValAndOff(c).Val()<<32]) mem)
  8897  	for {
  8898  		c := v.AuxInt
  8899  		s := v.Aux
  8900  		_ = v.Args[2]
  8901  		p := v.Args[0]
  8902  		i := v.Args[1]
  8903  		x := v.Args[2]
  8904  		if x.Op != OpAMD64MOVLstoreconstidx1 {
  8905  			break
  8906  		}
  8907  		a := x.AuxInt
  8908  		if x.Aux != s {
  8909  			break
  8910  		}
  8911  		_ = x.Args[2]
  8912  		if p != x.Args[0] {
  8913  			break
  8914  		}
  8915  		if i != x.Args[1] {
  8916  			break
  8917  		}
  8918  		mem := x.Args[2]
  8919  		if !(x.Uses == 1 && ValAndOff(a).Off()+4 == ValAndOff(c).Off() && clobber(x)) {
  8920  			break
  8921  		}
  8922  		v.reset(OpAMD64MOVQstoreidx1)
  8923  		v.AuxInt = ValAndOff(a).Off()
  8924  		v.Aux = s
  8925  		v.AddArg(p)
  8926  		v.AddArg(i)
  8927  		v0 := b.NewValue0(v.Pos, OpAMD64MOVQconst, typ.UInt64)
  8928  		v0.AuxInt = ValAndOff(a).Val()&0xffffffff | ValAndOff(c).Val()<<32
  8929  		v.AddArg(v0)
  8930  		v.AddArg(mem)
  8931  		return true
  8932  	}
  8933  	return false
  8934  }
  8935  func rewriteValueAMD64_OpAMD64MOVLstoreconstidx4_0(v *Value) bool {
  8936  	b := v.Block
  8937  	_ = b
  8938  	typ := &b.Func.Config.Types
  8939  	_ = typ
  8940  	// match: (MOVLstoreconstidx4 [x] {sym} (ADDQconst [c] ptr) idx mem)
  8941  	// cond: ValAndOff(x).canAdd(c)
  8942  	// result: (MOVLstoreconstidx4 [ValAndOff(x).add(c)] {sym} ptr idx mem)
  8943  	for {
  8944  		x := v.AuxInt
  8945  		sym := v.Aux
  8946  		_ = v.Args[2]
  8947  		v_0 := v.Args[0]
  8948  		if v_0.Op != OpAMD64ADDQconst {
  8949  			break
  8950  		}
  8951  		c := v_0.AuxInt
  8952  		ptr := v_0.Args[0]
  8953  		idx := v.Args[1]
  8954  		mem := v.Args[2]
  8955  		if !(ValAndOff(x).canAdd(c)) {
  8956  			break
  8957  		}
  8958  		v.reset(OpAMD64MOVLstoreconstidx4)
  8959  		v.AuxInt = ValAndOff(x).add(c)
  8960  		v.Aux = sym
  8961  		v.AddArg(ptr)
  8962  		v.AddArg(idx)
  8963  		v.AddArg(mem)
  8964  		return true
  8965  	}
  8966  	// match: (MOVLstoreconstidx4 [x] {sym} ptr (ADDQconst [c] idx) mem)
  8967  	// cond: ValAndOff(x).canAdd(4*c)
  8968  	// result: (MOVLstoreconstidx4 [ValAndOff(x).add(4*c)] {sym} ptr idx mem)
  8969  	for {
  8970  		x := v.AuxInt
  8971  		sym := v.Aux
  8972  		_ = v.Args[2]
  8973  		ptr := v.Args[0]
  8974  		v_1 := v.Args[1]
  8975  		if v_1.Op != OpAMD64ADDQconst {
  8976  			break
  8977  		}
  8978  		c := v_1.AuxInt
  8979  		idx := v_1.Args[0]
  8980  		mem := v.Args[2]
  8981  		if !(ValAndOff(x).canAdd(4 * c)) {
  8982  			break
  8983  		}
  8984  		v.reset(OpAMD64MOVLstoreconstidx4)
  8985  		v.AuxInt = ValAndOff(x).add(4 * c)
  8986  		v.Aux = sym
  8987  		v.AddArg(ptr)
  8988  		v.AddArg(idx)
  8989  		v.AddArg(mem)
  8990  		return true
  8991  	}
  8992  	// match: (MOVLstoreconstidx4 [c] {s} p i x:(MOVLstoreconstidx4 [a] {s} p i mem))
  8993  	// cond: x.Uses == 1 && ValAndOff(a).Off() + 4 == ValAndOff(c).Off() && clobber(x)
  8994  	// result: (MOVQstoreidx1 [ValAndOff(a).Off()] {s} p (SHLQconst <i.Type> [2] i) (MOVQconst [ValAndOff(a).Val()&0xffffffff | ValAndOff(c).Val()<<32]) mem)
  8995  	for {
  8996  		c := v.AuxInt
  8997  		s := v.Aux
  8998  		_ = v.Args[2]
  8999  		p := v.Args[0]
  9000  		i := v.Args[1]
  9001  		x := v.Args[2]
  9002  		if x.Op != OpAMD64MOVLstoreconstidx4 {
  9003  			break
  9004  		}
  9005  		a := x.AuxInt
  9006  		if x.Aux != s {
  9007  			break
  9008  		}
  9009  		_ = x.Args[2]
  9010  		if p != x.Args[0] {
  9011  			break
  9012  		}
  9013  		if i != x.Args[1] {
  9014  			break
  9015  		}
  9016  		mem := x.Args[2]
  9017  		if !(x.Uses == 1 && ValAndOff(a).Off()+4 == ValAndOff(c).Off() && clobber(x)) {
  9018  			break
  9019  		}
  9020  		v.reset(OpAMD64MOVQstoreidx1)
  9021  		v.AuxInt = ValAndOff(a).Off()
  9022  		v.Aux = s
  9023  		v.AddArg(p)
  9024  		v0 := b.NewValue0(v.Pos, OpAMD64SHLQconst, i.Type)
  9025  		v0.AuxInt = 2
  9026  		v0.AddArg(i)
  9027  		v.AddArg(v0)
  9028  		v1 := b.NewValue0(v.Pos, OpAMD64MOVQconst, typ.UInt64)
  9029  		v1.AuxInt = ValAndOff(a).Val()&0xffffffff | ValAndOff(c).Val()<<32
  9030  		v.AddArg(v1)
  9031  		v.AddArg(mem)
  9032  		return true
  9033  	}
  9034  	return false
  9035  }
  9036  func rewriteValueAMD64_OpAMD64MOVLstoreidx1_0(v *Value) bool {
  9037  	// match: (MOVLstoreidx1 [c] {sym} ptr (SHLQconst [2] idx) val mem)
  9038  	// cond:
  9039  	// result: (MOVLstoreidx4 [c] {sym} ptr idx val mem)
  9040  	for {
  9041  		c := v.AuxInt
  9042  		sym := v.Aux
  9043  		_ = v.Args[3]
  9044  		ptr := v.Args[0]
  9045  		v_1 := v.Args[1]
  9046  		if v_1.Op != OpAMD64SHLQconst {
  9047  			break
  9048  		}
  9049  		if v_1.AuxInt != 2 {
  9050  			break
  9051  		}
  9052  		idx := v_1.Args[0]
  9053  		val := v.Args[2]
  9054  		mem := v.Args[3]
  9055  		v.reset(OpAMD64MOVLstoreidx4)
  9056  		v.AuxInt = c
  9057  		v.Aux = sym
  9058  		v.AddArg(ptr)
  9059  		v.AddArg(idx)
  9060  		v.AddArg(val)
  9061  		v.AddArg(mem)
  9062  		return true
  9063  	}
  9064  	// match: (MOVLstoreidx1 [c] {sym} ptr (SHLQconst [3] idx) val mem)
  9065  	// cond:
  9066  	// result: (MOVLstoreidx8 [c] {sym} ptr idx val mem)
  9067  	for {
  9068  		c := v.AuxInt
  9069  		sym := v.Aux
  9070  		_ = v.Args[3]
  9071  		ptr := v.Args[0]
  9072  		v_1 := v.Args[1]
  9073  		if v_1.Op != OpAMD64SHLQconst {
  9074  			break
  9075  		}
  9076  		if v_1.AuxInt != 3 {
  9077  			break
  9078  		}
  9079  		idx := v_1.Args[0]
  9080  		val := v.Args[2]
  9081  		mem := v.Args[3]
  9082  		v.reset(OpAMD64MOVLstoreidx8)
  9083  		v.AuxInt = c
  9084  		v.Aux = sym
  9085  		v.AddArg(ptr)
  9086  		v.AddArg(idx)
  9087  		v.AddArg(val)
  9088  		v.AddArg(mem)
  9089  		return true
  9090  	}
  9091  	// match: (MOVLstoreidx1 [c] {sym} (ADDQconst [d] ptr) idx val mem)
  9092  	// cond: is32Bit(c+d)
  9093  	// result: (MOVLstoreidx1 [c+d] {sym} ptr idx val mem)
  9094  	for {
  9095  		c := v.AuxInt
  9096  		sym := v.Aux
  9097  		_ = v.Args[3]
  9098  		v_0 := v.Args[0]
  9099  		if v_0.Op != OpAMD64ADDQconst {
  9100  			break
  9101  		}
  9102  		d := v_0.AuxInt
  9103  		ptr := v_0.Args[0]
  9104  		idx := v.Args[1]
  9105  		val := v.Args[2]
  9106  		mem := v.Args[3]
  9107  		if !(is32Bit(c + d)) {
  9108  			break
  9109  		}
  9110  		v.reset(OpAMD64MOVLstoreidx1)
  9111  		v.AuxInt = c + d
  9112  		v.Aux = sym
  9113  		v.AddArg(ptr)
  9114  		v.AddArg(idx)
  9115  		v.AddArg(val)
  9116  		v.AddArg(mem)
  9117  		return true
  9118  	}
  9119  	// match: (MOVLstoreidx1 [c] {sym} ptr (ADDQconst [d] idx) val mem)
  9120  	// cond: is32Bit(c+d)
  9121  	// result: (MOVLstoreidx1 [c+d] {sym} ptr idx val mem)
  9122  	for {
  9123  		c := v.AuxInt
  9124  		sym := v.Aux
  9125  		_ = v.Args[3]
  9126  		ptr := v.Args[0]
  9127  		v_1 := v.Args[1]
  9128  		if v_1.Op != OpAMD64ADDQconst {
  9129  			break
  9130  		}
  9131  		d := v_1.AuxInt
  9132  		idx := v_1.Args[0]
  9133  		val := v.Args[2]
  9134  		mem := v.Args[3]
  9135  		if !(is32Bit(c + d)) {
  9136  			break
  9137  		}
  9138  		v.reset(OpAMD64MOVLstoreidx1)
  9139  		v.AuxInt = c + d
  9140  		v.Aux = sym
  9141  		v.AddArg(ptr)
  9142  		v.AddArg(idx)
  9143  		v.AddArg(val)
  9144  		v.AddArg(mem)
  9145  		return true
  9146  	}
  9147  	// match: (MOVLstoreidx1 [i] {s} p idx (SHRQconst [32] w) x:(MOVLstoreidx1 [i-4] {s} p idx w mem))
  9148  	// cond: x.Uses == 1 && clobber(x)
  9149  	// result: (MOVQstoreidx1 [i-4] {s} p idx w mem)
  9150  	for {
  9151  		i := v.AuxInt
  9152  		s := v.Aux
  9153  		_ = v.Args[3]
  9154  		p := v.Args[0]
  9155  		idx := v.Args[1]
  9156  		v_2 := v.Args[2]
  9157  		if v_2.Op != OpAMD64SHRQconst {
  9158  			break
  9159  		}
  9160  		if v_2.AuxInt != 32 {
  9161  			break
  9162  		}
  9163  		w := v_2.Args[0]
  9164  		x := v.Args[3]
  9165  		if x.Op != OpAMD64MOVLstoreidx1 {
  9166  			break
  9167  		}
  9168  		if x.AuxInt != i-4 {
  9169  			break
  9170  		}
  9171  		if x.Aux != s {
  9172  			break
  9173  		}
  9174  		_ = x.Args[3]
  9175  		if p != x.Args[0] {
  9176  			break
  9177  		}
  9178  		if idx != x.Args[1] {
  9179  			break
  9180  		}
  9181  		if w != x.Args[2] {
  9182  			break
  9183  		}
  9184  		mem := x.Args[3]
  9185  		if !(x.Uses == 1 && clobber(x)) {
  9186  			break
  9187  		}
  9188  		v.reset(OpAMD64MOVQstoreidx1)
  9189  		v.AuxInt = i - 4
  9190  		v.Aux = s
  9191  		v.AddArg(p)
  9192  		v.AddArg(idx)
  9193  		v.AddArg(w)
  9194  		v.AddArg(mem)
  9195  		return true
  9196  	}
  9197  	// match: (MOVLstoreidx1 [i] {s} p idx (SHRQconst [j] w) x:(MOVLstoreidx1 [i-4] {s} p idx w0:(SHRQconst [j-32] w) mem))
  9198  	// cond: x.Uses == 1 && clobber(x)
  9199  	// result: (MOVQstoreidx1 [i-4] {s} p idx w0 mem)
  9200  	for {
  9201  		i := v.AuxInt
  9202  		s := v.Aux
  9203  		_ = v.Args[3]
  9204  		p := v.Args[0]
  9205  		idx := v.Args[1]
  9206  		v_2 := v.Args[2]
  9207  		if v_2.Op != OpAMD64SHRQconst {
  9208  			break
  9209  		}
  9210  		j := v_2.AuxInt
  9211  		w := v_2.Args[0]
  9212  		x := v.Args[3]
  9213  		if x.Op != OpAMD64MOVLstoreidx1 {
  9214  			break
  9215  		}
  9216  		if x.AuxInt != i-4 {
  9217  			break
  9218  		}
  9219  		if x.Aux != s {
  9220  			break
  9221  		}
  9222  		_ = x.Args[3]
  9223  		if p != x.Args[0] {
  9224  			break
  9225  		}
  9226  		if idx != x.Args[1] {
  9227  			break
  9228  		}
  9229  		w0 := x.Args[2]
  9230  		if w0.Op != OpAMD64SHRQconst {
  9231  			break
  9232  		}
  9233  		if w0.AuxInt != j-32 {
  9234  			break
  9235  		}
  9236  		if w != w0.Args[0] {
  9237  			break
  9238  		}
  9239  		mem := x.Args[3]
  9240  		if !(x.Uses == 1 && clobber(x)) {
  9241  			break
  9242  		}
  9243  		v.reset(OpAMD64MOVQstoreidx1)
  9244  		v.AuxInt = i - 4
  9245  		v.Aux = s
  9246  		v.AddArg(p)
  9247  		v.AddArg(idx)
  9248  		v.AddArg(w0)
  9249  		v.AddArg(mem)
  9250  		return true
  9251  	}
  9252  	return false
  9253  }
  9254  func rewriteValueAMD64_OpAMD64MOVLstoreidx4_0(v *Value) bool {
  9255  	b := v.Block
  9256  	_ = b
  9257  	// match: (MOVLstoreidx4 [c] {sym} (ADDQconst [d] ptr) idx val mem)
  9258  	// cond: is32Bit(c+d)
  9259  	// result: (MOVLstoreidx4 [c+d] {sym} ptr idx val mem)
  9260  	for {
  9261  		c := v.AuxInt
  9262  		sym := v.Aux
  9263  		_ = v.Args[3]
  9264  		v_0 := v.Args[0]
  9265  		if v_0.Op != OpAMD64ADDQconst {
  9266  			break
  9267  		}
  9268  		d := v_0.AuxInt
  9269  		ptr := v_0.Args[0]
  9270  		idx := v.Args[1]
  9271  		val := v.Args[2]
  9272  		mem := v.Args[3]
  9273  		if !(is32Bit(c + d)) {
  9274  			break
  9275  		}
  9276  		v.reset(OpAMD64MOVLstoreidx4)
  9277  		v.AuxInt = c + d
  9278  		v.Aux = sym
  9279  		v.AddArg(ptr)
  9280  		v.AddArg(idx)
  9281  		v.AddArg(val)
  9282  		v.AddArg(mem)
  9283  		return true
  9284  	}
  9285  	// match: (MOVLstoreidx4 [c] {sym} ptr (ADDQconst [d] idx) val mem)
  9286  	// cond: is32Bit(c+4*d)
  9287  	// result: (MOVLstoreidx4 [c+4*d] {sym} ptr idx val mem)
  9288  	for {
  9289  		c := v.AuxInt
  9290  		sym := v.Aux
  9291  		_ = v.Args[3]
  9292  		ptr := v.Args[0]
  9293  		v_1 := v.Args[1]
  9294  		if v_1.Op != OpAMD64ADDQconst {
  9295  			break
  9296  		}
  9297  		d := v_1.AuxInt
  9298  		idx := v_1.Args[0]
  9299  		val := v.Args[2]
  9300  		mem := v.Args[3]
  9301  		if !(is32Bit(c + 4*d)) {
  9302  			break
  9303  		}
  9304  		v.reset(OpAMD64MOVLstoreidx4)
  9305  		v.AuxInt = c + 4*d
  9306  		v.Aux = sym
  9307  		v.AddArg(ptr)
  9308  		v.AddArg(idx)
  9309  		v.AddArg(val)
  9310  		v.AddArg(mem)
  9311  		return true
  9312  	}
  9313  	// match: (MOVLstoreidx4 [i] {s} p idx (SHRQconst [32] w) x:(MOVLstoreidx4 [i-4] {s} p idx w mem))
  9314  	// cond: x.Uses == 1 && clobber(x)
  9315  	// result: (MOVQstoreidx1 [i-4] {s} p (SHLQconst <idx.Type> [2] idx) w mem)
  9316  	for {
  9317  		i := v.AuxInt
  9318  		s := v.Aux
  9319  		_ = v.Args[3]
  9320  		p := v.Args[0]
  9321  		idx := v.Args[1]
  9322  		v_2 := v.Args[2]
  9323  		if v_2.Op != OpAMD64SHRQconst {
  9324  			break
  9325  		}
  9326  		if v_2.AuxInt != 32 {
  9327  			break
  9328  		}
  9329  		w := v_2.Args[0]
  9330  		x := v.Args[3]
  9331  		if x.Op != OpAMD64MOVLstoreidx4 {
  9332  			break
  9333  		}
  9334  		if x.AuxInt != i-4 {
  9335  			break
  9336  		}
  9337  		if x.Aux != s {
  9338  			break
  9339  		}
  9340  		_ = x.Args[3]
  9341  		if p != x.Args[0] {
  9342  			break
  9343  		}
  9344  		if idx != x.Args[1] {
  9345  			break
  9346  		}
  9347  		if w != x.Args[2] {
  9348  			break
  9349  		}
  9350  		mem := x.Args[3]
  9351  		if !(x.Uses == 1 && clobber(x)) {
  9352  			break
  9353  		}
  9354  		v.reset(OpAMD64MOVQstoreidx1)
  9355  		v.AuxInt = i - 4
  9356  		v.Aux = s
  9357  		v.AddArg(p)
  9358  		v0 := b.NewValue0(v.Pos, OpAMD64SHLQconst, idx.Type)
  9359  		v0.AuxInt = 2
  9360  		v0.AddArg(idx)
  9361  		v.AddArg(v0)
  9362  		v.AddArg(w)
  9363  		v.AddArg(mem)
  9364  		return true
  9365  	}
  9366  	// match: (MOVLstoreidx4 [i] {s} p idx (SHRQconst [j] w) x:(MOVLstoreidx4 [i-4] {s} p idx w0:(SHRQconst [j-32] w) mem))
  9367  	// cond: x.Uses == 1 && clobber(x)
  9368  	// result: (MOVQstoreidx1 [i-4] {s} p (SHLQconst <idx.Type> [2] idx) w0 mem)
  9369  	for {
  9370  		i := v.AuxInt
  9371  		s := v.Aux
  9372  		_ = v.Args[3]
  9373  		p := v.Args[0]
  9374  		idx := v.Args[1]
  9375  		v_2 := v.Args[2]
  9376  		if v_2.Op != OpAMD64SHRQconst {
  9377  			break
  9378  		}
  9379  		j := v_2.AuxInt
  9380  		w := v_2.Args[0]
  9381  		x := v.Args[3]
  9382  		if x.Op != OpAMD64MOVLstoreidx4 {
  9383  			break
  9384  		}
  9385  		if x.AuxInt != i-4 {
  9386  			break
  9387  		}
  9388  		if x.Aux != s {
  9389  			break
  9390  		}
  9391  		_ = x.Args[3]
  9392  		if p != x.Args[0] {
  9393  			break
  9394  		}
  9395  		if idx != x.Args[1] {
  9396  			break
  9397  		}
  9398  		w0 := x.Args[2]
  9399  		if w0.Op != OpAMD64SHRQconst {
  9400  			break
  9401  		}
  9402  		if w0.AuxInt != j-32 {
  9403  			break
  9404  		}
  9405  		if w != w0.Args[0] {
  9406  			break
  9407  		}
  9408  		mem := x.Args[3]
  9409  		if !(x.Uses == 1 && clobber(x)) {
  9410  			break
  9411  		}
  9412  		v.reset(OpAMD64MOVQstoreidx1)
  9413  		v.AuxInt = i - 4
  9414  		v.Aux = s
  9415  		v.AddArg(p)
  9416  		v0 := b.NewValue0(v.Pos, OpAMD64SHLQconst, idx.Type)
  9417  		v0.AuxInt = 2
  9418  		v0.AddArg(idx)
  9419  		v.AddArg(v0)
  9420  		v.AddArg(w0)
  9421  		v.AddArg(mem)
  9422  		return true
  9423  	}
  9424  	return false
  9425  }
  9426  func rewriteValueAMD64_OpAMD64MOVLstoreidx8_0(v *Value) bool {
  9427  	// match: (MOVLstoreidx8 [c] {sym} (ADDQconst [d] ptr) idx val mem)
  9428  	// cond: is32Bit(c+d)
  9429  	// result: (MOVLstoreidx8 [c+d] {sym} ptr idx val mem)
  9430  	for {
  9431  		c := v.AuxInt
  9432  		sym := v.Aux
  9433  		_ = v.Args[3]
  9434  		v_0 := v.Args[0]
  9435  		if v_0.Op != OpAMD64ADDQconst {
  9436  			break
  9437  		}
  9438  		d := v_0.AuxInt
  9439  		ptr := v_0.Args[0]
  9440  		idx := v.Args[1]
  9441  		val := v.Args[2]
  9442  		mem := v.Args[3]
  9443  		if !(is32Bit(c + d)) {
  9444  			break
  9445  		}
  9446  		v.reset(OpAMD64MOVLstoreidx8)
  9447  		v.AuxInt = c + d
  9448  		v.Aux = sym
  9449  		v.AddArg(ptr)
  9450  		v.AddArg(idx)
  9451  		v.AddArg(val)
  9452  		v.AddArg(mem)
  9453  		return true
  9454  	}
  9455  	// match: (MOVLstoreidx8 [c] {sym} ptr (ADDQconst [d] idx) val mem)
  9456  	// cond: is32Bit(c+8*d)
  9457  	// result: (MOVLstoreidx8 [c+8*d] {sym} ptr idx val mem)
  9458  	for {
  9459  		c := v.AuxInt
  9460  		sym := v.Aux
  9461  		_ = v.Args[3]
  9462  		ptr := v.Args[0]
  9463  		v_1 := v.Args[1]
  9464  		if v_1.Op != OpAMD64ADDQconst {
  9465  			break
  9466  		}
  9467  		d := v_1.AuxInt
  9468  		idx := v_1.Args[0]
  9469  		val := v.Args[2]
  9470  		mem := v.Args[3]
  9471  		if !(is32Bit(c + 8*d)) {
  9472  			break
  9473  		}
  9474  		v.reset(OpAMD64MOVLstoreidx8)
  9475  		v.AuxInt = c + 8*d
  9476  		v.Aux = sym
  9477  		v.AddArg(ptr)
  9478  		v.AddArg(idx)
  9479  		v.AddArg(val)
  9480  		v.AddArg(mem)
  9481  		return true
  9482  	}
  9483  	return false
  9484  }
  9485  func rewriteValueAMD64_OpAMD64MOVOload_0(v *Value) bool {
  9486  	// match: (MOVOload [off1] {sym} (ADDQconst [off2] ptr) mem)
  9487  	// cond: is32Bit(off1+off2)
  9488  	// result: (MOVOload [off1+off2] {sym} ptr mem)
  9489  	for {
  9490  		off1 := v.AuxInt
  9491  		sym := v.Aux
  9492  		_ = v.Args[1]
  9493  		v_0 := v.Args[0]
  9494  		if v_0.Op != OpAMD64ADDQconst {
  9495  			break
  9496  		}
  9497  		off2 := v_0.AuxInt
  9498  		ptr := v_0.Args[0]
  9499  		mem := v.Args[1]
  9500  		if !(is32Bit(off1 + off2)) {
  9501  			break
  9502  		}
  9503  		v.reset(OpAMD64MOVOload)
  9504  		v.AuxInt = off1 + off2
  9505  		v.Aux = sym
  9506  		v.AddArg(ptr)
  9507  		v.AddArg(mem)
  9508  		return true
  9509  	}
  9510  	// match: (MOVOload [off1] {sym1} (LEAQ [off2] {sym2} base) mem)
  9511  	// cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
  9512  	// result: (MOVOload [off1+off2] {mergeSym(sym1,sym2)} base mem)
  9513  	for {
  9514  		off1 := v.AuxInt
  9515  		sym1 := v.Aux
  9516  		_ = v.Args[1]
  9517  		v_0 := v.Args[0]
  9518  		if v_0.Op != OpAMD64LEAQ {
  9519  			break
  9520  		}
  9521  		off2 := v_0.AuxInt
  9522  		sym2 := v_0.Aux
  9523  		base := v_0.Args[0]
  9524  		mem := v.Args[1]
  9525  		if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
  9526  			break
  9527  		}
  9528  		v.reset(OpAMD64MOVOload)
  9529  		v.AuxInt = off1 + off2
  9530  		v.Aux = mergeSym(sym1, sym2)
  9531  		v.AddArg(base)
  9532  		v.AddArg(mem)
  9533  		return true
  9534  	}
  9535  	return false
  9536  }
  9537  func rewriteValueAMD64_OpAMD64MOVOstore_0(v *Value) bool {
  9538  	// match: (MOVOstore [off1] {sym} (ADDQconst [off2] ptr) val mem)
  9539  	// cond: is32Bit(off1+off2)
  9540  	// result: (MOVOstore [off1+off2] {sym} ptr val mem)
  9541  	for {
  9542  		off1 := v.AuxInt
  9543  		sym := v.Aux
  9544  		_ = v.Args[2]
  9545  		v_0 := v.Args[0]
  9546  		if v_0.Op != OpAMD64ADDQconst {
  9547  			break
  9548  		}
  9549  		off2 := v_0.AuxInt
  9550  		ptr := v_0.Args[0]
  9551  		val := v.Args[1]
  9552  		mem := v.Args[2]
  9553  		if !(is32Bit(off1 + off2)) {
  9554  			break
  9555  		}
  9556  		v.reset(OpAMD64MOVOstore)
  9557  		v.AuxInt = off1 + off2
  9558  		v.Aux = sym
  9559  		v.AddArg(ptr)
  9560  		v.AddArg(val)
  9561  		v.AddArg(mem)
  9562  		return true
  9563  	}
  9564  	// match: (MOVOstore [off1] {sym1} (LEAQ [off2] {sym2} base) val mem)
  9565  	// cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
  9566  	// result: (MOVOstore [off1+off2] {mergeSym(sym1,sym2)} base val mem)
  9567  	for {
  9568  		off1 := v.AuxInt
  9569  		sym1 := v.Aux
  9570  		_ = v.Args[2]
  9571  		v_0 := v.Args[0]
  9572  		if v_0.Op != OpAMD64LEAQ {
  9573  			break
  9574  		}
  9575  		off2 := v_0.AuxInt
  9576  		sym2 := v_0.Aux
  9577  		base := v_0.Args[0]
  9578  		val := v.Args[1]
  9579  		mem := v.Args[2]
  9580  		if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
  9581  			break
  9582  		}
  9583  		v.reset(OpAMD64MOVOstore)
  9584  		v.AuxInt = off1 + off2
  9585  		v.Aux = mergeSym(sym1, sym2)
  9586  		v.AddArg(base)
  9587  		v.AddArg(val)
  9588  		v.AddArg(mem)
  9589  		return true
  9590  	}
  9591  	return false
  9592  }
  9593  func rewriteValueAMD64_OpAMD64MOVQatomicload_0(v *Value) bool {
  9594  	// match: (MOVQatomicload [off1] {sym} (ADDQconst [off2] ptr) mem)
  9595  	// cond: is32Bit(off1+off2)
  9596  	// result: (MOVQatomicload [off1+off2] {sym} ptr mem)
  9597  	for {
  9598  		off1 := v.AuxInt
  9599  		sym := v.Aux
  9600  		_ = v.Args[1]
  9601  		v_0 := v.Args[0]
  9602  		if v_0.Op != OpAMD64ADDQconst {
  9603  			break
  9604  		}
  9605  		off2 := v_0.AuxInt
  9606  		ptr := v_0.Args[0]
  9607  		mem := v.Args[1]
  9608  		if !(is32Bit(off1 + off2)) {
  9609  			break
  9610  		}
  9611  		v.reset(OpAMD64MOVQatomicload)
  9612  		v.AuxInt = off1 + off2
  9613  		v.Aux = sym
  9614  		v.AddArg(ptr)
  9615  		v.AddArg(mem)
  9616  		return true
  9617  	}
  9618  	// match: (MOVQatomicload [off1] {sym1} (LEAQ [off2] {sym2} ptr) mem)
  9619  	// cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
  9620  	// result: (MOVQatomicload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
  9621  	for {
  9622  		off1 := v.AuxInt
  9623  		sym1 := v.Aux
  9624  		_ = v.Args[1]
  9625  		v_0 := v.Args[0]
  9626  		if v_0.Op != OpAMD64LEAQ {
  9627  			break
  9628  		}
  9629  		off2 := v_0.AuxInt
  9630  		sym2 := v_0.Aux
  9631  		ptr := v_0.Args[0]
  9632  		mem := v.Args[1]
  9633  		if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
  9634  			break
  9635  		}
  9636  		v.reset(OpAMD64MOVQatomicload)
  9637  		v.AuxInt = off1 + off2
  9638  		v.Aux = mergeSym(sym1, sym2)
  9639  		v.AddArg(ptr)
  9640  		v.AddArg(mem)
  9641  		return true
  9642  	}
  9643  	return false
  9644  }
  9645  func rewriteValueAMD64_OpAMD64MOVQf2i_0(v *Value) bool {
  9646  	b := v.Block
  9647  	_ = b
  9648  	// match: (MOVQf2i <t> (Arg [off] {sym}))
  9649  	// cond:
  9650  	// result: @b.Func.Entry (Arg <t> [off] {sym})
  9651  	for {
  9652  		t := v.Type
  9653  		v_0 := v.Args[0]
  9654  		if v_0.Op != OpArg {
  9655  			break
  9656  		}
  9657  		off := v_0.AuxInt
  9658  		sym := v_0.Aux
  9659  		b = b.Func.Entry
  9660  		v0 := b.NewValue0(v.Pos, OpArg, t)
  9661  		v.reset(OpCopy)
  9662  		v.AddArg(v0)
  9663  		v0.AuxInt = off
  9664  		v0.Aux = sym
  9665  		return true
  9666  	}
  9667  	return false
  9668  }
  9669  func rewriteValueAMD64_OpAMD64MOVQi2f_0(v *Value) bool {
  9670  	b := v.Block
  9671  	_ = b
  9672  	// match: (MOVQi2f <t> (Arg [off] {sym}))
  9673  	// cond:
  9674  	// result: @b.Func.Entry (Arg <t> [off] {sym})
  9675  	for {
  9676  		t := v.Type
  9677  		v_0 := v.Args[0]
  9678  		if v_0.Op != OpArg {
  9679  			break
  9680  		}
  9681  		off := v_0.AuxInt
  9682  		sym := v_0.Aux
  9683  		b = b.Func.Entry
  9684  		v0 := b.NewValue0(v.Pos, OpArg, t)
  9685  		v.reset(OpCopy)
  9686  		v.AddArg(v0)
  9687  		v0.AuxInt = off
  9688  		v0.Aux = sym
  9689  		return true
  9690  	}
  9691  	return false
  9692  }
  9693  func rewriteValueAMD64_OpAMD64MOVQload_0(v *Value) bool {
  9694  	// match: (MOVQload [off] {sym} ptr (MOVQstore [off2] {sym2} ptr2 x _))
  9695  	// cond: sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)
  9696  	// result: x
  9697  	for {
  9698  		off := v.AuxInt
  9699  		sym := v.Aux
  9700  		_ = v.Args[1]
  9701  		ptr := v.Args[0]
  9702  		v_1 := v.Args[1]
  9703  		if v_1.Op != OpAMD64MOVQstore {
  9704  			break
  9705  		}
  9706  		off2 := v_1.AuxInt
  9707  		sym2 := v_1.Aux
  9708  		_ = v_1.Args[2]
  9709  		ptr2 := v_1.Args[0]
  9710  		x := v_1.Args[1]
  9711  		if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) {
  9712  			break
  9713  		}
  9714  		v.reset(OpCopy)
  9715  		v.Type = x.Type
  9716  		v.AddArg(x)
  9717  		return true
  9718  	}
  9719  	// match: (MOVQload [off1] {sym} (ADDQconst [off2] ptr) mem)
  9720  	// cond: is32Bit(off1+off2)
  9721  	// result: (MOVQload [off1+off2] {sym} ptr mem)
  9722  	for {
  9723  		off1 := v.AuxInt
  9724  		sym := v.Aux
  9725  		_ = v.Args[1]
  9726  		v_0 := v.Args[0]
  9727  		if v_0.Op != OpAMD64ADDQconst {
  9728  			break
  9729  		}
  9730  		off2 := v_0.AuxInt
  9731  		ptr := v_0.Args[0]
  9732  		mem := v.Args[1]
  9733  		if !(is32Bit(off1 + off2)) {
  9734  			break
  9735  		}
  9736  		v.reset(OpAMD64MOVQload)
  9737  		v.AuxInt = off1 + off2
  9738  		v.Aux = sym
  9739  		v.AddArg(ptr)
  9740  		v.AddArg(mem)
  9741  		return true
  9742  	}
  9743  	// match: (MOVQload [off1] {sym1} (LEAQ [off2] {sym2} base) mem)
  9744  	// cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
  9745  	// result: (MOVQload [off1+off2] {mergeSym(sym1,sym2)} base mem)
  9746  	for {
  9747  		off1 := v.AuxInt
  9748  		sym1 := v.Aux
  9749  		_ = v.Args[1]
  9750  		v_0 := v.Args[0]
  9751  		if v_0.Op != OpAMD64LEAQ {
  9752  			break
  9753  		}
  9754  		off2 := v_0.AuxInt
  9755  		sym2 := v_0.Aux
  9756  		base := v_0.Args[0]
  9757  		mem := v.Args[1]
  9758  		if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
  9759  			break
  9760  		}
  9761  		v.reset(OpAMD64MOVQload)
  9762  		v.AuxInt = off1 + off2
  9763  		v.Aux = mergeSym(sym1, sym2)
  9764  		v.AddArg(base)
  9765  		v.AddArg(mem)
  9766  		return true
  9767  	}
  9768  	// match: (MOVQload [off1] {sym1} (LEAQ1 [off2] {sym2} ptr idx) mem)
  9769  	// cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
  9770  	// result: (MOVQloadidx1 [off1+off2] {mergeSym(sym1,sym2)} ptr idx mem)
  9771  	for {
  9772  		off1 := v.AuxInt
  9773  		sym1 := v.Aux
  9774  		_ = v.Args[1]
  9775  		v_0 := v.Args[0]
  9776  		if v_0.Op != OpAMD64LEAQ1 {
  9777  			break
  9778  		}
  9779  		off2 := v_0.AuxInt
  9780  		sym2 := v_0.Aux
  9781  		_ = v_0.Args[1]
  9782  		ptr := v_0.Args[0]
  9783  		idx := v_0.Args[1]
  9784  		mem := v.Args[1]
  9785  		if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
  9786  			break
  9787  		}
  9788  		v.reset(OpAMD64MOVQloadidx1)
  9789  		v.AuxInt = off1 + off2
  9790  		v.Aux = mergeSym(sym1, sym2)
  9791  		v.AddArg(ptr)
  9792  		v.AddArg(idx)
  9793  		v.AddArg(mem)
  9794  		return true
  9795  	}
  9796  	// match: (MOVQload [off1] {sym1} (LEAQ8 [off2] {sym2} ptr idx) mem)
  9797  	// cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
  9798  	// result: (MOVQloadidx8 [off1+off2] {mergeSym(sym1,sym2)} ptr idx mem)
  9799  	for {
  9800  		off1 := v.AuxInt
  9801  		sym1 := v.Aux
  9802  		_ = v.Args[1]
  9803  		v_0 := v.Args[0]
  9804  		if v_0.Op != OpAMD64LEAQ8 {
  9805  			break
  9806  		}
  9807  		off2 := v_0.AuxInt
  9808  		sym2 := v_0.Aux
  9809  		_ = v_0.Args[1]
  9810  		ptr := v_0.Args[0]
  9811  		idx := v_0.Args[1]
  9812  		mem := v.Args[1]
  9813  		if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
  9814  			break
  9815  		}
  9816  		v.reset(OpAMD64MOVQloadidx8)
  9817  		v.AuxInt = off1 + off2
  9818  		v.Aux = mergeSym(sym1, sym2)
  9819  		v.AddArg(ptr)
  9820  		v.AddArg(idx)
  9821  		v.AddArg(mem)
  9822  		return true
  9823  	}
  9824  	// match: (MOVQload [off] {sym} (ADDQ ptr idx) mem)
  9825  	// cond: ptr.Op != OpSB
  9826  	// result: (MOVQloadidx1 [off] {sym} ptr idx mem)
  9827  	for {
  9828  		off := v.AuxInt
  9829  		sym := v.Aux
  9830  		_ = v.Args[1]
  9831  		v_0 := v.Args[0]
  9832  		if v_0.Op != OpAMD64ADDQ {
  9833  			break
  9834  		}
  9835  		_ = v_0.Args[1]
  9836  		ptr := v_0.Args[0]
  9837  		idx := v_0.Args[1]
  9838  		mem := v.Args[1]
  9839  		if !(ptr.Op != OpSB) {
  9840  			break
  9841  		}
  9842  		v.reset(OpAMD64MOVQloadidx1)
  9843  		v.AuxInt = off
  9844  		v.Aux = sym
  9845  		v.AddArg(ptr)
  9846  		v.AddArg(idx)
  9847  		v.AddArg(mem)
  9848  		return true
  9849  	}
  9850  	// match: (MOVQload [off1] {sym1} (LEAL [off2] {sym2} base) mem)
  9851  	// cond: canMergeSym(sym1, sym2) && is32Bit(off1+off2)
  9852  	// result: (MOVQload [off1+off2] {mergeSym(sym1,sym2)} base mem)
  9853  	for {
  9854  		off1 := v.AuxInt
  9855  		sym1 := v.Aux
  9856  		_ = v.Args[1]
  9857  		v_0 := v.Args[0]
  9858  		if v_0.Op != OpAMD64LEAL {
  9859  			break
  9860  		}
  9861  		off2 := v_0.AuxInt
  9862  		sym2 := v_0.Aux
  9863  		base := v_0.Args[0]
  9864  		mem := v.Args[1]
  9865  		if !(canMergeSym(sym1, sym2) && is32Bit(off1+off2)) {
  9866  			break
  9867  		}
  9868  		v.reset(OpAMD64MOVQload)
  9869  		v.AuxInt = off1 + off2
  9870  		v.Aux = mergeSym(sym1, sym2)
  9871  		v.AddArg(base)
  9872  		v.AddArg(mem)
  9873  		return true
  9874  	}
  9875  	// match: (MOVQload [off1] {sym} (ADDLconst [off2] ptr) mem)
  9876  	// cond: is32Bit(off1+off2)
  9877  	// result: (MOVQload [off1+off2] {sym} ptr mem)
  9878  	for {
  9879  		off1 := v.AuxInt
  9880  		sym := v.Aux
  9881  		_ = v.Args[1]
  9882  		v_0 := v.Args[0]
  9883  		if v_0.Op != OpAMD64ADDLconst {
  9884  			break
  9885  		}
  9886  		off2 := v_0.AuxInt
  9887  		ptr := v_0.Args[0]
  9888  		mem := v.Args[1]
  9889  		if !(is32Bit(off1 + off2)) {
  9890  			break
  9891  		}
  9892  		v.reset(OpAMD64MOVQload)
  9893  		v.AuxInt = off1 + off2
  9894  		v.Aux = sym
  9895  		v.AddArg(ptr)
  9896  		v.AddArg(mem)
  9897  		return true
  9898  	}
  9899  	// match: (MOVQload [off] {sym} ptr (MOVSDstore [off] {sym} ptr val _))
  9900  	// cond:
  9901  	// result: (MOVQf2i val)
  9902  	for {
  9903  		off := v.AuxInt
  9904  		sym := v.Aux
  9905  		_ = v.Args[1]
  9906  		ptr := v.Args[0]
  9907  		v_1 := v.Args[1]
  9908  		if v_1.Op != OpAMD64MOVSDstore {
  9909  			break
  9910  		}
  9911  		if v_1.AuxInt != off {
  9912  			break
  9913  		}
  9914  		if v_1.Aux != sym {
  9915  			break
  9916  		}
  9917  		_ = v_1.Args[2]
  9918  		if ptr != v_1.Args[0] {
  9919  			break
  9920  		}
  9921  		val := v_1.Args[1]
  9922  		v.reset(OpAMD64MOVQf2i)
  9923  		v.AddArg(val)
  9924  		return true
  9925  	}
  9926  	return false
  9927  }
  9928  func rewriteValueAMD64_OpAMD64MOVQloadidx1_0(v *Value) bool {
  9929  	// match: (MOVQloadidx1 [c] {sym} ptr (SHLQconst [3] idx) mem)
  9930  	// cond:
  9931  	// result: (MOVQloadidx8 [c] {sym} ptr idx mem)
  9932  	for {
  9933  		c := v.AuxInt
  9934  		sym := v.Aux
  9935  		_ = v.Args[2]
  9936  		ptr := v.Args[0]
  9937  		v_1 := v.Args[1]
  9938  		if v_1.Op != OpAMD64SHLQconst {
  9939  			break
  9940  		}
  9941  		if v_1.AuxInt != 3 {
  9942  			break
  9943  		}
  9944  		idx := v_1.Args[0]
  9945  		mem := v.Args[2]
  9946  		v.reset(OpAMD64MOVQloadidx8)
  9947  		v.AuxInt = c
  9948  		v.Aux = sym
  9949  		v.AddArg(ptr)
  9950  		v.AddArg(idx)
  9951  		v.AddArg(mem)
  9952  		return true
  9953  	}
  9954  	// match: (MOVQloadidx1 [c] {sym} (SHLQconst [3] idx) ptr mem)
  9955  	// cond:
  9956  	// result: (MOVQloadidx8 [c] {sym} ptr idx mem)
  9957  	for {
  9958  		c := v.AuxInt
  9959  		sym := v.Aux
  9960  		_ = v.Args[2]
  9961  		v_0 := v.Args[0]
  9962  		if v_0.Op != OpAMD64SHLQconst {
  9963  			break
  9964  		}
  9965  		if v_0.AuxInt != 3 {
  9966  			break
  9967  		}
  9968  		idx := v_0.Args[0]
  9969  		ptr := v.Args[1]
  9970  		mem := v.Args[2]
  9971  		v.reset(OpAMD64MOVQloadidx8)
  9972  		v.AuxInt = c
  9973  		v.Aux = sym
  9974  		v.AddArg(ptr)
  9975  		v.AddArg(idx)
  9976  		v.AddArg(mem)
  9977  		return true
  9978  	}
  9979  	// match: (MOVQloadidx1 [c] {sym} (ADDQconst [d] ptr) idx mem)
  9980  	// cond: is32Bit(c+d)
  9981  	// result: (MOVQloadidx1 [c+d] {sym} ptr idx mem)
  9982  	for {
  9983  		c := v.AuxInt
  9984  		sym := v.Aux
  9985  		_ = v.Args[2]
  9986  		v_0 := v.Args[0]
  9987  		if v_0.Op != OpAMD64ADDQconst {
  9988  			break
  9989  		}
  9990  		d := v_0.AuxInt
  9991  		ptr := v_0.Args[0]
  9992  		idx := v.Args[1]
  9993  		mem := v.Args[2]
  9994  		if !(is32Bit(c + d)) {
  9995  			break
  9996  		}
  9997  		v.reset(OpAMD64MOVQloadidx1)
  9998  		v.AuxInt = c + d
  9999  		v.Aux = sym
 10000  		v.AddArg(ptr)
 10001  		v.AddArg(idx)
 10002  		v.AddArg(mem)
 10003  		return true
 10004  	}
 10005  	// match: (MOVQloadidx1 [c] {sym} idx (ADDQconst [d] ptr) mem)
 10006  	// cond: is32Bit(c+d)
 10007  	// result: (MOVQloadidx1 [c+d] {sym} ptr idx mem)
 10008  	for {
 10009  		c := v.AuxInt
 10010  		sym := v.Aux
 10011  		_ = v.Args[2]
 10012  		idx := v.Args[0]
 10013  		v_1 := v.Args[1]
 10014  		if v_1.Op != OpAMD64ADDQconst {
 10015  			break
 10016  		}
 10017  		d := v_1.AuxInt
 10018  		ptr := v_1.Args[0]
 10019  		mem := v.Args[2]
 10020  		if !(is32Bit(c + d)) {
 10021  			break
 10022  		}
 10023  		v.reset(OpAMD64MOVQloadidx1)
 10024  		v.AuxInt = c + d
 10025  		v.Aux = sym
 10026  		v.AddArg(ptr)
 10027  		v.AddArg(idx)
 10028  		v.AddArg(mem)
 10029  		return true
 10030  	}
 10031  	// match: (MOVQloadidx1 [c] {sym} ptr (ADDQconst [d] idx) mem)
 10032  	// cond: is32Bit(c+d)
 10033  	// result: (MOVQloadidx1 [c+d] {sym} ptr idx mem)
 10034  	for {
 10035  		c := v.AuxInt
 10036  		sym := v.Aux
 10037  		_ = v.Args[2]
 10038  		ptr := v.Args[0]
 10039  		v_1 := v.Args[1]
 10040  		if v_1.Op != OpAMD64ADDQconst {
 10041  			break
 10042  		}
 10043  		d := v_1.AuxInt
 10044  		idx := v_1.Args[0]
 10045  		mem := v.Args[2]
 10046  		if !(is32Bit(c + d)) {
 10047  			break
 10048  		}
 10049  		v.reset(OpAMD64MOVQloadidx1)
 10050  		v.AuxInt = c + d
 10051  		v.Aux = sym
 10052  		v.AddArg(ptr)
 10053  		v.AddArg(idx)
 10054  		v.AddArg(mem)
 10055  		return true
 10056  	}
 10057  	// match: (MOVQloadidx1 [c] {sym} (ADDQconst [d] idx) ptr mem)
 10058  	// cond: is32Bit(c+d)
 10059  	// result: (MOVQloadidx1 [c+d] {sym} ptr idx mem)
 10060  	for {
 10061  		c := v.AuxInt
 10062  		sym := v.Aux
 10063  		_ = v.Args[2]
 10064  		v_0 := v.Args[0]
 10065  		if v_0.Op != OpAMD64ADDQconst {
 10066  			break
 10067  		}
 10068  		d := v_0.AuxInt
 10069  		idx := v_0.Args[0]
 10070  		ptr := v.Args[1]
 10071  		mem := v.Args[2]
 10072  		if !(is32Bit(c + d)) {
 10073  			break
 10074  		}
 10075  		v.reset(OpAMD64MOVQloadidx1)
 10076  		v.AuxInt = c + d
 10077  		v.Aux = sym
 10078  		v.AddArg(ptr)
 10079  		v.AddArg(idx)
 10080  		v.AddArg(mem)
 10081  		return true
 10082  	}
 10083  	return false
 10084  }
 10085  func rewriteValueAMD64_OpAMD64MOVQloadidx8_0(v *Value) bool {
 10086  	// match: (MOVQloadidx8 [c] {sym} (ADDQconst [d] ptr) idx mem)
 10087  	// cond: is32Bit(c+d)
 10088  	// result: (MOVQloadidx8 [c+d] {sym} ptr idx mem)
 10089  	for {
 10090  		c := v.AuxInt
 10091  		sym := v.Aux
 10092  		_ = v.Args[2]
 10093  		v_0 := v.Args[0]
 10094  		if v_0.Op != OpAMD64ADDQconst {
 10095  			break
 10096  		}
 10097  		d := v_0.AuxInt
 10098  		ptr := v_0.Args[0]
 10099  		idx := v.Args[1]
 10100  		mem := v.Args[2]
 10101  		if !(is32Bit(c + d)) {
 10102  			break
 10103  		}
 10104  		v.reset(OpAMD64MOVQloadidx8)
 10105  		v.AuxInt = c + d
 10106  		v.Aux = sym
 10107  		v.AddArg(ptr)
 10108  		v.AddArg(idx)
 10109  		v.AddArg(mem)
 10110  		return true
 10111  	}
 10112  	// match: (MOVQloadidx8 [c] {sym} ptr (ADDQconst [d] idx) mem)
 10113  	// cond: is32Bit(c+8*d)
 10114  	// result: (MOVQloadidx8 [c+8*d] {sym} ptr idx mem)
 10115  	for {
 10116  		c := v.AuxInt
 10117  		sym := v.Aux
 10118  		_ = v.Args[2]
 10119  		ptr := v.Args[0]
 10120  		v_1 := v.Args[1]
 10121  		if v_1.Op != OpAMD64ADDQconst {
 10122  			break
 10123  		}
 10124  		d := v_1.AuxInt
 10125  		idx := v_1.Args[0]
 10126  		mem := v.Args[2]
 10127  		if !(is32Bit(c + 8*d)) {
 10128  			break
 10129  		}
 10130  		v.reset(OpAMD64MOVQloadidx8)
 10131  		v.AuxInt = c + 8*d
 10132  		v.Aux = sym
 10133  		v.AddArg(ptr)
 10134  		v.AddArg(idx)
 10135  		v.AddArg(mem)
 10136  		return true
 10137  	}
 10138  	return false
 10139  }
 10140  func rewriteValueAMD64_OpAMD64MOVQstore_0(v *Value) bool {
 10141  	// match: (MOVQstore [off1] {sym} (ADDQconst [off2] ptr) val mem)
 10142  	// cond: is32Bit(off1+off2)
 10143  	// result: (MOVQstore [off1+off2] {sym} ptr val mem)
 10144  	for {
 10145  		off1 := v.AuxInt
 10146  		sym := v.Aux
 10147  		_ = v.Args[2]
 10148  		v_0 := v.Args[0]
 10149  		if v_0.Op != OpAMD64ADDQconst {
 10150  			break
 10151  		}
 10152  		off2 := v_0.AuxInt
 10153  		ptr := v_0.Args[0]
 10154  		val := v.Args[1]
 10155  		mem := v.Args[2]
 10156  		if !(is32Bit(off1 + off2)) {
 10157  			break
 10158  		}
 10159  		v.reset(OpAMD64MOVQstore)
 10160  		v.AuxInt = off1 + off2
 10161  		v.Aux = sym
 10162  		v.AddArg(ptr)
 10163  		v.AddArg(val)
 10164  		v.AddArg(mem)
 10165  		return true
 10166  	}
 10167  	// match: (MOVQstore [off] {sym} ptr (MOVQconst [c]) mem)
 10168  	// cond: validValAndOff(c,off)
 10169  	// result: (MOVQstoreconst [makeValAndOff(c,off)] {sym} ptr mem)
 10170  	for {
 10171  		off := v.AuxInt
 10172  		sym := v.Aux
 10173  		_ = v.Args[2]
 10174  		ptr := v.Args[0]
 10175  		v_1 := v.Args[1]
 10176  		if v_1.Op != OpAMD64MOVQconst {
 10177  			break
 10178  		}
 10179  		c := v_1.AuxInt
 10180  		mem := v.Args[2]
 10181  		if !(validValAndOff(c, off)) {
 10182  			break
 10183  		}
 10184  		v.reset(OpAMD64MOVQstoreconst)
 10185  		v.AuxInt = makeValAndOff(c, off)
 10186  		v.Aux = sym
 10187  		v.AddArg(ptr)
 10188  		v.AddArg(mem)
 10189  		return true
 10190  	}
 10191  	// match: (MOVQstore [off1] {sym1} (LEAQ [off2] {sym2} base) val mem)
 10192  	// cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
 10193  	// result: (MOVQstore [off1+off2] {mergeSym(sym1,sym2)} base val mem)
 10194  	for {
 10195  		off1 := v.AuxInt
 10196  		sym1 := v.Aux
 10197  		_ = v.Args[2]
 10198  		v_0 := v.Args[0]
 10199  		if v_0.Op != OpAMD64LEAQ {
 10200  			break
 10201  		}
 10202  		off2 := v_0.AuxInt
 10203  		sym2 := v_0.Aux
 10204  		base := v_0.Args[0]
 10205  		val := v.Args[1]
 10206  		mem := v.Args[2]
 10207  		if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
 10208  			break
 10209  		}
 10210  		v.reset(OpAMD64MOVQstore)
 10211  		v.AuxInt = off1 + off2
 10212  		v.Aux = mergeSym(sym1, sym2)
 10213  		v.AddArg(base)
 10214  		v.AddArg(val)
 10215  		v.AddArg(mem)
 10216  		return true
 10217  	}
 10218  	// match: (MOVQstore [off1] {sym1} (LEAQ1 [off2] {sym2} ptr idx) val mem)
 10219  	// cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
 10220  	// result: (MOVQstoreidx1 [off1+off2] {mergeSym(sym1,sym2)} ptr idx val mem)
 10221  	for {
 10222  		off1 := v.AuxInt
 10223  		sym1 := v.Aux
 10224  		_ = v.Args[2]
 10225  		v_0 := v.Args[0]
 10226  		if v_0.Op != OpAMD64LEAQ1 {
 10227  			break
 10228  		}
 10229  		off2 := v_0.AuxInt
 10230  		sym2 := v_0.Aux
 10231  		_ = v_0.Args[1]
 10232  		ptr := v_0.Args[0]
 10233  		idx := v_0.Args[1]
 10234  		val := v.Args[1]
 10235  		mem := v.Args[2]
 10236  		if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
 10237  			break
 10238  		}
 10239  		v.reset(OpAMD64MOVQstoreidx1)
 10240  		v.AuxInt = off1 + off2
 10241  		v.Aux = mergeSym(sym1, sym2)
 10242  		v.AddArg(ptr)
 10243  		v.AddArg(idx)
 10244  		v.AddArg(val)
 10245  		v.AddArg(mem)
 10246  		return true
 10247  	}
 10248  	// match: (MOVQstore [off1] {sym1} (LEAQ8 [off2] {sym2} ptr idx) val mem)
 10249  	// cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
 10250  	// result: (MOVQstoreidx8 [off1+off2] {mergeSym(sym1,sym2)} ptr idx val mem)
 10251  	for {
 10252  		off1 := v.AuxInt
 10253  		sym1 := v.Aux
 10254  		_ = v.Args[2]
 10255  		v_0 := v.Args[0]
 10256  		if v_0.Op != OpAMD64LEAQ8 {
 10257  			break
 10258  		}
 10259  		off2 := v_0.AuxInt
 10260  		sym2 := v_0.Aux
 10261  		_ = v_0.Args[1]
 10262  		ptr := v_0.Args[0]
 10263  		idx := v_0.Args[1]
 10264  		val := v.Args[1]
 10265  		mem := v.Args[2]
 10266  		if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
 10267  			break
 10268  		}
 10269  		v.reset(OpAMD64MOVQstoreidx8)
 10270  		v.AuxInt = off1 + off2
 10271  		v.Aux = mergeSym(sym1, sym2)
 10272  		v.AddArg(ptr)
 10273  		v.AddArg(idx)
 10274  		v.AddArg(val)
 10275  		v.AddArg(mem)
 10276  		return true
 10277  	}
 10278  	// match: (MOVQstore [off] {sym} (ADDQ ptr idx) val mem)
 10279  	// cond: ptr.Op != OpSB
 10280  	// result: (MOVQstoreidx1 [off] {sym} ptr idx val mem)
 10281  	for {
 10282  		off := v.AuxInt
 10283  		sym := v.Aux
 10284  		_ = v.Args[2]
 10285  		v_0 := v.Args[0]
 10286  		if v_0.Op != OpAMD64ADDQ {
 10287  			break
 10288  		}
 10289  		_ = v_0.Args[1]
 10290  		ptr := v_0.Args[0]
 10291  		idx := v_0.Args[1]
 10292  		val := v.Args[1]
 10293  		mem := v.Args[2]
 10294  		if !(ptr.Op != OpSB) {
 10295  			break
 10296  		}
 10297  		v.reset(OpAMD64MOVQstoreidx1)
 10298  		v.AuxInt = off
 10299  		v.Aux = sym
 10300  		v.AddArg(ptr)
 10301  		v.AddArg(idx)
 10302  		v.AddArg(val)
 10303  		v.AddArg(mem)
 10304  		return true
 10305  	}
 10306  	// match: (MOVQstore [off1] {sym1} (LEAL [off2] {sym2} base) val mem)
 10307  	// cond: canMergeSym(sym1, sym2) && is32Bit(off1+off2)
 10308  	// result: (MOVQstore [off1+off2] {mergeSym(sym1,sym2)} base val mem)
 10309  	for {
 10310  		off1 := v.AuxInt
 10311  		sym1 := v.Aux
 10312  		_ = v.Args[2]
 10313  		v_0 := v.Args[0]
 10314  		if v_0.Op != OpAMD64LEAL {
 10315  			break
 10316  		}
 10317  		off2 := v_0.AuxInt
 10318  		sym2 := v_0.Aux
 10319  		base := v_0.Args[0]
 10320  		val := v.Args[1]
 10321  		mem := v.Args[2]
 10322  		if !(canMergeSym(sym1, sym2) && is32Bit(off1+off2)) {
 10323  			break
 10324  		}
 10325  		v.reset(OpAMD64MOVQstore)
 10326  		v.AuxInt = off1 + off2
 10327  		v.Aux = mergeSym(sym1, sym2)
 10328  		v.AddArg(base)
 10329  		v.AddArg(val)
 10330  		v.AddArg(mem)
 10331  		return true
 10332  	}
 10333  	// match: (MOVQstore [off1] {sym} (ADDLconst [off2] ptr) val mem)
 10334  	// cond: is32Bit(off1+off2)
 10335  	// result: (MOVQstore [off1+off2] {sym} ptr val mem)
 10336  	for {
 10337  		off1 := v.AuxInt
 10338  		sym := v.Aux
 10339  		_ = v.Args[2]
 10340  		v_0 := v.Args[0]
 10341  		if v_0.Op != OpAMD64ADDLconst {
 10342  			break
 10343  		}
 10344  		off2 := v_0.AuxInt
 10345  		ptr := v_0.Args[0]
 10346  		val := v.Args[1]
 10347  		mem := v.Args[2]
 10348  		if !(is32Bit(off1 + off2)) {
 10349  			break
 10350  		}
 10351  		v.reset(OpAMD64MOVQstore)
 10352  		v.AuxInt = off1 + off2
 10353  		v.Aux = sym
 10354  		v.AddArg(ptr)
 10355  		v.AddArg(val)
 10356  		v.AddArg(mem)
 10357  		return true
 10358  	}
 10359  	// match: (MOVQstore [off] {sym} ptr a:(ADDQconst [c] l:(MOVQload [off] {sym} ptr2 mem)) mem)
 10360  	// cond: isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && validValAndOff(c,off)
 10361  	// result: (ADDQconstmem {sym} [makeValAndOff(c,off)] ptr mem)
 10362  	for {
 10363  		off := v.AuxInt
 10364  		sym := v.Aux
 10365  		_ = v.Args[2]
 10366  		ptr := v.Args[0]
 10367  		a := v.Args[1]
 10368  		if a.Op != OpAMD64ADDQconst {
 10369  			break
 10370  		}
 10371  		c := a.AuxInt
 10372  		l := a.Args[0]
 10373  		if l.Op != OpAMD64MOVQload {
 10374  			break
 10375  		}
 10376  		if l.AuxInt != off {
 10377  			break
 10378  		}
 10379  		if l.Aux != sym {
 10380  			break
 10381  		}
 10382  		_ = l.Args[1]
 10383  		ptr2 := l.Args[0]
 10384  		mem := l.Args[1]
 10385  		if mem != v.Args[2] {
 10386  			break
 10387  		}
 10388  		if !(isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && validValAndOff(c, off)) {
 10389  			break
 10390  		}
 10391  		v.reset(OpAMD64ADDQconstmem)
 10392  		v.AuxInt = makeValAndOff(c, off)
 10393  		v.Aux = sym
 10394  		v.AddArg(ptr)
 10395  		v.AddArg(mem)
 10396  		return true
 10397  	}
 10398  	// match: (MOVQstore [off] {sym} ptr (MOVQf2i val) mem)
 10399  	// cond:
 10400  	// result: (MOVSDstore [off] {sym} ptr val mem)
 10401  	for {
 10402  		off := v.AuxInt
 10403  		sym := v.Aux
 10404  		_ = v.Args[2]
 10405  		ptr := v.Args[0]
 10406  		v_1 := v.Args[1]
 10407  		if v_1.Op != OpAMD64MOVQf2i {
 10408  			break
 10409  		}
 10410  		val := v_1.Args[0]
 10411  		mem := v.Args[2]
 10412  		v.reset(OpAMD64MOVSDstore)
 10413  		v.AuxInt = off
 10414  		v.Aux = sym
 10415  		v.AddArg(ptr)
 10416  		v.AddArg(val)
 10417  		v.AddArg(mem)
 10418  		return true
 10419  	}
 10420  	return false
 10421  }
 10422  func rewriteValueAMD64_OpAMD64MOVQstoreconst_0(v *Value) bool {
 10423  	b := v.Block
 10424  	_ = b
 10425  	config := b.Func.Config
 10426  	_ = config
 10427  	// match: (MOVQstoreconst [sc] {s} (ADDQconst [off] ptr) mem)
 10428  	// cond: ValAndOff(sc).canAdd(off)
 10429  	// result: (MOVQstoreconst [ValAndOff(sc).add(off)] {s} ptr mem)
 10430  	for {
 10431  		sc := v.AuxInt
 10432  		s := v.Aux
 10433  		_ = v.Args[1]
 10434  		v_0 := v.Args[0]
 10435  		if v_0.Op != OpAMD64ADDQconst {
 10436  			break
 10437  		}
 10438  		off := v_0.AuxInt
 10439  		ptr := v_0.Args[0]
 10440  		mem := v.Args[1]
 10441  		if !(ValAndOff(sc).canAdd(off)) {
 10442  			break
 10443  		}
 10444  		v.reset(OpAMD64MOVQstoreconst)
 10445  		v.AuxInt = ValAndOff(sc).add(off)
 10446  		v.Aux = s
 10447  		v.AddArg(ptr)
 10448  		v.AddArg(mem)
 10449  		return true
 10450  	}
 10451  	// match: (MOVQstoreconst [sc] {sym1} (LEAQ [off] {sym2} ptr) mem)
 10452  	// cond: canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off)
 10453  	// result: (MOVQstoreconst [ValAndOff(sc).add(off)] {mergeSym(sym1, sym2)} ptr mem)
 10454  	for {
 10455  		sc := v.AuxInt
 10456  		sym1 := v.Aux
 10457  		_ = v.Args[1]
 10458  		v_0 := v.Args[0]
 10459  		if v_0.Op != OpAMD64LEAQ {
 10460  			break
 10461  		}
 10462  		off := v_0.AuxInt
 10463  		sym2 := v_0.Aux
 10464  		ptr := v_0.Args[0]
 10465  		mem := v.Args[1]
 10466  		if !(canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off)) {
 10467  			break
 10468  		}
 10469  		v.reset(OpAMD64MOVQstoreconst)
 10470  		v.AuxInt = ValAndOff(sc).add(off)
 10471  		v.Aux = mergeSym(sym1, sym2)
 10472  		v.AddArg(ptr)
 10473  		v.AddArg(mem)
 10474  		return true
 10475  	}
 10476  	// match: (MOVQstoreconst [x] {sym1} (LEAQ1 [off] {sym2} ptr idx) mem)
 10477  	// cond: canMergeSym(sym1, sym2)
 10478  	// result: (MOVQstoreconstidx1 [ValAndOff(x).add(off)] {mergeSym(sym1,sym2)} ptr idx mem)
 10479  	for {
 10480  		x := v.AuxInt
 10481  		sym1 := v.Aux
 10482  		_ = v.Args[1]
 10483  		v_0 := v.Args[0]
 10484  		if v_0.Op != OpAMD64LEAQ1 {
 10485  			break
 10486  		}
 10487  		off := v_0.AuxInt
 10488  		sym2 := v_0.Aux
 10489  		_ = v_0.Args[1]
 10490  		ptr := v_0.Args[0]
 10491  		idx := v_0.Args[1]
 10492  		mem := v.Args[1]
 10493  		if !(canMergeSym(sym1, sym2)) {
 10494  			break
 10495  		}
 10496  		v.reset(OpAMD64MOVQstoreconstidx1)
 10497  		v.AuxInt = ValAndOff(x).add(off)
 10498  		v.Aux = mergeSym(sym1, sym2)
 10499  		v.AddArg(ptr)
 10500  		v.AddArg(idx)
 10501  		v.AddArg(mem)
 10502  		return true
 10503  	}
 10504  	// match: (MOVQstoreconst [x] {sym1} (LEAQ8 [off] {sym2} ptr idx) mem)
 10505  	// cond: canMergeSym(sym1, sym2)
 10506  	// result: (MOVQstoreconstidx8 [ValAndOff(x).add(off)] {mergeSym(sym1,sym2)} ptr idx mem)
 10507  	for {
 10508  		x := v.AuxInt
 10509  		sym1 := v.Aux
 10510  		_ = v.Args[1]
 10511  		v_0 := v.Args[0]
 10512  		if v_0.Op != OpAMD64LEAQ8 {
 10513  			break
 10514  		}
 10515  		off := v_0.AuxInt
 10516  		sym2 := v_0.Aux
 10517  		_ = v_0.Args[1]
 10518  		ptr := v_0.Args[0]
 10519  		idx := v_0.Args[1]
 10520  		mem := v.Args[1]
 10521  		if !(canMergeSym(sym1, sym2)) {
 10522  			break
 10523  		}
 10524  		v.reset(OpAMD64MOVQstoreconstidx8)
 10525  		v.AuxInt = ValAndOff(x).add(off)
 10526  		v.Aux = mergeSym(sym1, sym2)
 10527  		v.AddArg(ptr)
 10528  		v.AddArg(idx)
 10529  		v.AddArg(mem)
 10530  		return true
 10531  	}
 10532  	// match: (MOVQstoreconst [x] {sym} (ADDQ ptr idx) mem)
 10533  	// cond:
 10534  	// result: (MOVQstoreconstidx1 [x] {sym} ptr idx mem)
 10535  	for {
 10536  		x := v.AuxInt
 10537  		sym := v.Aux
 10538  		_ = v.Args[1]
 10539  		v_0 := v.Args[0]
 10540  		if v_0.Op != OpAMD64ADDQ {
 10541  			break
 10542  		}
 10543  		_ = v_0.Args[1]
 10544  		ptr := v_0.Args[0]
 10545  		idx := v_0.Args[1]
 10546  		mem := v.Args[1]
 10547  		v.reset(OpAMD64MOVQstoreconstidx1)
 10548  		v.AuxInt = x
 10549  		v.Aux = sym
 10550  		v.AddArg(ptr)
 10551  		v.AddArg(idx)
 10552  		v.AddArg(mem)
 10553  		return true
 10554  	}
 10555  	// match: (MOVQstoreconst [c] {s} p x:(MOVQstoreconst [c2] {s} p mem))
 10556  	// cond: config.useSSE && x.Uses == 1 && ValAndOff(c2).Off() + 8 == ValAndOff(c).Off() && ValAndOff(c).Val() == 0 && ValAndOff(c2).Val() == 0 && clobber(x)
 10557  	// result: (MOVOstore [ValAndOff(c2).Off()] {s} p (MOVOconst [0]) mem)
 10558  	for {
 10559  		c := v.AuxInt
 10560  		s := v.Aux
 10561  		_ = v.Args[1]
 10562  		p := v.Args[0]
 10563  		x := v.Args[1]
 10564  		if x.Op != OpAMD64MOVQstoreconst {
 10565  			break
 10566  		}
 10567  		c2 := x.AuxInt
 10568  		if x.Aux != s {
 10569  			break
 10570  		}
 10571  		_ = x.Args[1]
 10572  		if p != x.Args[0] {
 10573  			break
 10574  		}
 10575  		mem := x.Args[1]
 10576  		if !(config.useSSE && x.Uses == 1 && ValAndOff(c2).Off()+8 == ValAndOff(c).Off() && ValAndOff(c).Val() == 0 && ValAndOff(c2).Val() == 0 && clobber(x)) {
 10577  			break
 10578  		}
 10579  		v.reset(OpAMD64MOVOstore)
 10580  		v.AuxInt = ValAndOff(c2).Off()
 10581  		v.Aux = s
 10582  		v.AddArg(p)
 10583  		v0 := b.NewValue0(v.Pos, OpAMD64MOVOconst, types.TypeInt128)
 10584  		v0.AuxInt = 0
 10585  		v.AddArg(v0)
 10586  		v.AddArg(mem)
 10587  		return true
 10588  	}
 10589  	// match: (MOVQstoreconst [sc] {sym1} (LEAL [off] {sym2} ptr) mem)
 10590  	// cond: canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off)
 10591  	// result: (MOVQstoreconst [ValAndOff(sc).add(off)] {mergeSym(sym1, sym2)} ptr mem)
 10592  	for {
 10593  		sc := v.AuxInt
 10594  		sym1 := v.Aux
 10595  		_ = v.Args[1]
 10596  		v_0 := v.Args[0]
 10597  		if v_0.Op != OpAMD64LEAL {
 10598  			break
 10599  		}
 10600  		off := v_0.AuxInt
 10601  		sym2 := v_0.Aux
 10602  		ptr := v_0.Args[0]
 10603  		mem := v.Args[1]
 10604  		if !(canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off)) {
 10605  			break
 10606  		}
 10607  		v.reset(OpAMD64MOVQstoreconst)
 10608  		v.AuxInt = ValAndOff(sc).add(off)
 10609  		v.Aux = mergeSym(sym1, sym2)
 10610  		v.AddArg(ptr)
 10611  		v.AddArg(mem)
 10612  		return true
 10613  	}
 10614  	// match: (MOVQstoreconst [sc] {s} (ADDLconst [off] ptr) mem)
 10615  	// cond: ValAndOff(sc).canAdd(off)
 10616  	// result: (MOVQstoreconst [ValAndOff(sc).add(off)] {s} ptr mem)
 10617  	for {
 10618  		sc := v.AuxInt
 10619  		s := v.Aux
 10620  		_ = v.Args[1]
 10621  		v_0 := v.Args[0]
 10622  		if v_0.Op != OpAMD64ADDLconst {
 10623  			break
 10624  		}
 10625  		off := v_0.AuxInt
 10626  		ptr := v_0.Args[0]
 10627  		mem := v.Args[1]
 10628  		if !(ValAndOff(sc).canAdd(off)) {
 10629  			break
 10630  		}
 10631  		v.reset(OpAMD64MOVQstoreconst)
 10632  		v.AuxInt = ValAndOff(sc).add(off)
 10633  		v.Aux = s
 10634  		v.AddArg(ptr)
 10635  		v.AddArg(mem)
 10636  		return true
 10637  	}
 10638  	return false
 10639  }
 10640  func rewriteValueAMD64_OpAMD64MOVQstoreconstidx1_0(v *Value) bool {
 10641  	// match: (MOVQstoreconstidx1 [c] {sym} ptr (SHLQconst [3] idx) mem)
 10642  	// cond:
 10643  	// result: (MOVQstoreconstidx8 [c] {sym} ptr idx mem)
 10644  	for {
 10645  		c := v.AuxInt
 10646  		sym := v.Aux
 10647  		_ = v.Args[2]
 10648  		ptr := v.Args[0]
 10649  		v_1 := v.Args[1]
 10650  		if v_1.Op != OpAMD64SHLQconst {
 10651  			break
 10652  		}
 10653  		if v_1.AuxInt != 3 {
 10654  			break
 10655  		}
 10656  		idx := v_1.Args[0]
 10657  		mem := v.Args[2]
 10658  		v.reset(OpAMD64MOVQstoreconstidx8)
 10659  		v.AuxInt = c
 10660  		v.Aux = sym
 10661  		v.AddArg(ptr)
 10662  		v.AddArg(idx)
 10663  		v.AddArg(mem)
 10664  		return true
 10665  	}
 10666  	// match: (MOVQstoreconstidx1 [x] {sym} (ADDQconst [c] ptr) idx mem)
 10667  	// cond: ValAndOff(x).canAdd(c)
 10668  	// result: (MOVQstoreconstidx1 [ValAndOff(x).add(c)] {sym} ptr idx mem)
 10669  	for {
 10670  		x := v.AuxInt
 10671  		sym := v.Aux
 10672  		_ = v.Args[2]
 10673  		v_0 := v.Args[0]
 10674  		if v_0.Op != OpAMD64ADDQconst {
 10675  			break
 10676  		}
 10677  		c := v_0.AuxInt
 10678  		ptr := v_0.Args[0]
 10679  		idx := v.Args[1]
 10680  		mem := v.Args[2]
 10681  		if !(ValAndOff(x).canAdd(c)) {
 10682  			break
 10683  		}
 10684  		v.reset(OpAMD64MOVQstoreconstidx1)
 10685  		v.AuxInt = ValAndOff(x).add(c)
 10686  		v.Aux = sym
 10687  		v.AddArg(ptr)
 10688  		v.AddArg(idx)
 10689  		v.AddArg(mem)
 10690  		return true
 10691  	}
 10692  	// match: (MOVQstoreconstidx1 [x] {sym} ptr (ADDQconst [c] idx) mem)
 10693  	// cond: ValAndOff(x).canAdd(c)
 10694  	// result: (MOVQstoreconstidx1 [ValAndOff(x).add(c)] {sym} ptr idx mem)
 10695  	for {
 10696  		x := v.AuxInt
 10697  		sym := v.Aux
 10698  		_ = v.Args[2]
 10699  		ptr := v.Args[0]
 10700  		v_1 := v.Args[1]
 10701  		if v_1.Op != OpAMD64ADDQconst {
 10702  			break
 10703  		}
 10704  		c := v_1.AuxInt
 10705  		idx := v_1.Args[0]
 10706  		mem := v.Args[2]
 10707  		if !(ValAndOff(x).canAdd(c)) {
 10708  			break
 10709  		}
 10710  		v.reset(OpAMD64MOVQstoreconstidx1)
 10711  		v.AuxInt = ValAndOff(x).add(c)
 10712  		v.Aux = sym
 10713  		v.AddArg(ptr)
 10714  		v.AddArg(idx)
 10715  		v.AddArg(mem)
 10716  		return true
 10717  	}
 10718  	return false
 10719  }
 10720  func rewriteValueAMD64_OpAMD64MOVQstoreconstidx8_0(v *Value) bool {
 10721  	// match: (MOVQstoreconstidx8 [x] {sym} (ADDQconst [c] ptr) idx mem)
 10722  	// cond: ValAndOff(x).canAdd(c)
 10723  	// result: (MOVQstoreconstidx8 [ValAndOff(x).add(c)] {sym} ptr idx mem)
 10724  	for {
 10725  		x := v.AuxInt
 10726  		sym := v.Aux
 10727  		_ = v.Args[2]
 10728  		v_0 := v.Args[0]
 10729  		if v_0.Op != OpAMD64ADDQconst {
 10730  			break
 10731  		}
 10732  		c := v_0.AuxInt
 10733  		ptr := v_0.Args[0]
 10734  		idx := v.Args[1]
 10735  		mem := v.Args[2]
 10736  		if !(ValAndOff(x).canAdd(c)) {
 10737  			break
 10738  		}
 10739  		v.reset(OpAMD64MOVQstoreconstidx8)
 10740  		v.AuxInt = ValAndOff(x).add(c)
 10741  		v.Aux = sym
 10742  		v.AddArg(ptr)
 10743  		v.AddArg(idx)
 10744  		v.AddArg(mem)
 10745  		return true
 10746  	}
 10747  	// match: (MOVQstoreconstidx8 [x] {sym} ptr (ADDQconst [c] idx) mem)
 10748  	// cond: ValAndOff(x).canAdd(8*c)
 10749  	// result: (MOVQstoreconstidx8 [ValAndOff(x).add(8*c)] {sym} ptr idx mem)
 10750  	for {
 10751  		x := v.AuxInt
 10752  		sym := v.Aux
 10753  		_ = v.Args[2]
 10754  		ptr := v.Args[0]
 10755  		v_1 := v.Args[1]
 10756  		if v_1.Op != OpAMD64ADDQconst {
 10757  			break
 10758  		}
 10759  		c := v_1.AuxInt
 10760  		idx := v_1.Args[0]
 10761  		mem := v.Args[2]
 10762  		if !(ValAndOff(x).canAdd(8 * c)) {
 10763  			break
 10764  		}
 10765  		v.reset(OpAMD64MOVQstoreconstidx8)
 10766  		v.AuxInt = ValAndOff(x).add(8 * c)
 10767  		v.Aux = sym
 10768  		v.AddArg(ptr)
 10769  		v.AddArg(idx)
 10770  		v.AddArg(mem)
 10771  		return true
 10772  	}
 10773  	return false
 10774  }
 10775  func rewriteValueAMD64_OpAMD64MOVQstoreidx1_0(v *Value) bool {
 10776  	// match: (MOVQstoreidx1 [c] {sym} ptr (SHLQconst [3] idx) val mem)
 10777  	// cond:
 10778  	// result: (MOVQstoreidx8 [c] {sym} ptr idx val mem)
 10779  	for {
 10780  		c := v.AuxInt
 10781  		sym := v.Aux
 10782  		_ = v.Args[3]
 10783  		ptr := v.Args[0]
 10784  		v_1 := v.Args[1]
 10785  		if v_1.Op != OpAMD64SHLQconst {
 10786  			break
 10787  		}
 10788  		if v_1.AuxInt != 3 {
 10789  			break
 10790  		}
 10791  		idx := v_1.Args[0]
 10792  		val := v.Args[2]
 10793  		mem := v.Args[3]
 10794  		v.reset(OpAMD64MOVQstoreidx8)
 10795  		v.AuxInt = c
 10796  		v.Aux = sym
 10797  		v.AddArg(ptr)
 10798  		v.AddArg(idx)
 10799  		v.AddArg(val)
 10800  		v.AddArg(mem)
 10801  		return true
 10802  	}
 10803  	// match: (MOVQstoreidx1 [c] {sym} (ADDQconst [d] ptr) idx val mem)
 10804  	// cond: is32Bit(c+d)
 10805  	// result: (MOVQstoreidx1 [c+d] {sym} ptr idx val mem)
 10806  	for {
 10807  		c := v.AuxInt
 10808  		sym := v.Aux
 10809  		_ = v.Args[3]
 10810  		v_0 := v.Args[0]
 10811  		if v_0.Op != OpAMD64ADDQconst {
 10812  			break
 10813  		}
 10814  		d := v_0.AuxInt
 10815  		ptr := v_0.Args[0]
 10816  		idx := v.Args[1]
 10817  		val := v.Args[2]
 10818  		mem := v.Args[3]
 10819  		if !(is32Bit(c + d)) {
 10820  			break
 10821  		}
 10822  		v.reset(OpAMD64MOVQstoreidx1)
 10823  		v.AuxInt = c + d
 10824  		v.Aux = sym
 10825  		v.AddArg(ptr)
 10826  		v.AddArg(idx)
 10827  		v.AddArg(val)
 10828  		v.AddArg(mem)
 10829  		return true
 10830  	}
 10831  	// match: (MOVQstoreidx1 [c] {sym} ptr (ADDQconst [d] idx) val mem)
 10832  	// cond: is32Bit(c+d)
 10833  	// result: (MOVQstoreidx1 [c+d] {sym} ptr idx val mem)
 10834  	for {
 10835  		c := v.AuxInt
 10836  		sym := v.Aux
 10837  		_ = v.Args[3]
 10838  		ptr := v.Args[0]
 10839  		v_1 := v.Args[1]
 10840  		if v_1.Op != OpAMD64ADDQconst {
 10841  			break
 10842  		}
 10843  		d := v_1.AuxInt
 10844  		idx := v_1.Args[0]
 10845  		val := v.Args[2]
 10846  		mem := v.Args[3]
 10847  		if !(is32Bit(c + d)) {
 10848  			break
 10849  		}
 10850  		v.reset(OpAMD64MOVQstoreidx1)
 10851  		v.AuxInt = c + d
 10852  		v.Aux = sym
 10853  		v.AddArg(ptr)
 10854  		v.AddArg(idx)
 10855  		v.AddArg(val)
 10856  		v.AddArg(mem)
 10857  		return true
 10858  	}
 10859  	return false
 10860  }
 10861  func rewriteValueAMD64_OpAMD64MOVQstoreidx8_0(v *Value) bool {
 10862  	// match: (MOVQstoreidx8 [c] {sym} (ADDQconst [d] ptr) idx val mem)
 10863  	// cond: is32Bit(c+d)
 10864  	// result: (MOVQstoreidx8 [c+d] {sym} ptr idx val mem)
 10865  	for {
 10866  		c := v.AuxInt
 10867  		sym := v.Aux
 10868  		_ = v.Args[3]
 10869  		v_0 := v.Args[0]
 10870  		if v_0.Op != OpAMD64ADDQconst {
 10871  			break
 10872  		}
 10873  		d := v_0.AuxInt
 10874  		ptr := v_0.Args[0]
 10875  		idx := v.Args[1]
 10876  		val := v.Args[2]
 10877  		mem := v.Args[3]
 10878  		if !(is32Bit(c + d)) {
 10879  			break
 10880  		}
 10881  		v.reset(OpAMD64MOVQstoreidx8)
 10882  		v.AuxInt = c + d
 10883  		v.Aux = sym
 10884  		v.AddArg(ptr)
 10885  		v.AddArg(idx)
 10886  		v.AddArg(val)
 10887  		v.AddArg(mem)
 10888  		return true
 10889  	}
 10890  	// match: (MOVQstoreidx8 [c] {sym} ptr (ADDQconst [d] idx) val mem)
 10891  	// cond: is32Bit(c+8*d)
 10892  	// result: (MOVQstoreidx8 [c+8*d] {sym} ptr idx val mem)
 10893  	for {
 10894  		c := v.AuxInt
 10895  		sym := v.Aux
 10896  		_ = v.Args[3]
 10897  		ptr := v.Args[0]
 10898  		v_1 := v.Args[1]
 10899  		if v_1.Op != OpAMD64ADDQconst {
 10900  			break
 10901  		}
 10902  		d := v_1.AuxInt
 10903  		idx := v_1.Args[0]
 10904  		val := v.Args[2]
 10905  		mem := v.Args[3]
 10906  		if !(is32Bit(c + 8*d)) {
 10907  			break
 10908  		}
 10909  		v.reset(OpAMD64MOVQstoreidx8)
 10910  		v.AuxInt = c + 8*d
 10911  		v.Aux = sym
 10912  		v.AddArg(ptr)
 10913  		v.AddArg(idx)
 10914  		v.AddArg(val)
 10915  		v.AddArg(mem)
 10916  		return true
 10917  	}
 10918  	return false
 10919  }
 10920  func rewriteValueAMD64_OpAMD64MOVSDload_0(v *Value) bool {
 10921  	// match: (MOVSDload [off1] {sym} (ADDQconst [off2] ptr) mem)
 10922  	// cond: is32Bit(off1+off2)
 10923  	// result: (MOVSDload [off1+off2] {sym} ptr mem)
 10924  	for {
 10925  		off1 := v.AuxInt
 10926  		sym := v.Aux
 10927  		_ = v.Args[1]
 10928  		v_0 := v.Args[0]
 10929  		if v_0.Op != OpAMD64ADDQconst {
 10930  			break
 10931  		}
 10932  		off2 := v_0.AuxInt
 10933  		ptr := v_0.Args[0]
 10934  		mem := v.Args[1]
 10935  		if !(is32Bit(off1 + off2)) {
 10936  			break
 10937  		}
 10938  		v.reset(OpAMD64MOVSDload)
 10939  		v.AuxInt = off1 + off2
 10940  		v.Aux = sym
 10941  		v.AddArg(ptr)
 10942  		v.AddArg(mem)
 10943  		return true
 10944  	}
 10945  	// match: (MOVSDload [off1] {sym1} (LEAQ [off2] {sym2} base) mem)
 10946  	// cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
 10947  	// result: (MOVSDload [off1+off2] {mergeSym(sym1,sym2)} base mem)
 10948  	for {
 10949  		off1 := v.AuxInt
 10950  		sym1 := v.Aux
 10951  		_ = v.Args[1]
 10952  		v_0 := v.Args[0]
 10953  		if v_0.Op != OpAMD64LEAQ {
 10954  			break
 10955  		}
 10956  		off2 := v_0.AuxInt
 10957  		sym2 := v_0.Aux
 10958  		base := v_0.Args[0]
 10959  		mem := v.Args[1]
 10960  		if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
 10961  			break
 10962  		}
 10963  		v.reset(OpAMD64MOVSDload)
 10964  		v.AuxInt = off1 + off2
 10965  		v.Aux = mergeSym(sym1, sym2)
 10966  		v.AddArg(base)
 10967  		v.AddArg(mem)
 10968  		return true
 10969  	}
 10970  	// match: (MOVSDload [off1] {sym1} (LEAQ1 [off2] {sym2} ptr idx) mem)
 10971  	// cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
 10972  	// result: (MOVSDloadidx1 [off1+off2] {mergeSym(sym1,sym2)} ptr idx mem)
 10973  	for {
 10974  		off1 := v.AuxInt
 10975  		sym1 := v.Aux
 10976  		_ = v.Args[1]
 10977  		v_0 := v.Args[0]
 10978  		if v_0.Op != OpAMD64LEAQ1 {
 10979  			break
 10980  		}
 10981  		off2 := v_0.AuxInt
 10982  		sym2 := v_0.Aux
 10983  		_ = v_0.Args[1]
 10984  		ptr := v_0.Args[0]
 10985  		idx := v_0.Args[1]
 10986  		mem := v.Args[1]
 10987  		if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
 10988  			break
 10989  		}
 10990  		v.reset(OpAMD64MOVSDloadidx1)
 10991  		v.AuxInt = off1 + off2
 10992  		v.Aux = mergeSym(sym1, sym2)
 10993  		v.AddArg(ptr)
 10994  		v.AddArg(idx)
 10995  		v.AddArg(mem)
 10996  		return true
 10997  	}
 10998  	// match: (MOVSDload [off1] {sym1} (LEAQ8 [off2] {sym2} ptr idx) mem)
 10999  	// cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
 11000  	// result: (MOVSDloadidx8 [off1+off2] {mergeSym(sym1,sym2)} ptr idx mem)
 11001  	for {
 11002  		off1 := v.AuxInt
 11003  		sym1 := v.Aux
 11004  		_ = v.Args[1]
 11005  		v_0 := v.Args[0]
 11006  		if v_0.Op != OpAMD64LEAQ8 {
 11007  			break
 11008  		}
 11009  		off2 := v_0.AuxInt
 11010  		sym2 := v_0.Aux
 11011  		_ = v_0.Args[1]
 11012  		ptr := v_0.Args[0]
 11013  		idx := v_0.Args[1]
 11014  		mem := v.Args[1]
 11015  		if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
 11016  			break
 11017  		}
 11018  		v.reset(OpAMD64MOVSDloadidx8)
 11019  		v.AuxInt = off1 + off2
 11020  		v.Aux = mergeSym(sym1, sym2)
 11021  		v.AddArg(ptr)
 11022  		v.AddArg(idx)
 11023  		v.AddArg(mem)
 11024  		return true
 11025  	}
 11026  	// match: (MOVSDload [off] {sym} (ADDQ ptr idx) mem)
 11027  	// cond: ptr.Op != OpSB
 11028  	// result: (MOVSDloadidx1 [off] {sym} ptr idx mem)
 11029  	for {
 11030  		off := v.AuxInt
 11031  		sym := v.Aux
 11032  		_ = v.Args[1]
 11033  		v_0 := v.Args[0]
 11034  		if v_0.Op != OpAMD64ADDQ {
 11035  			break
 11036  		}
 11037  		_ = v_0.Args[1]
 11038  		ptr := v_0.Args[0]
 11039  		idx := v_0.Args[1]
 11040  		mem := v.Args[1]
 11041  		if !(ptr.Op != OpSB) {
 11042  			break
 11043  		}
 11044  		v.reset(OpAMD64MOVSDloadidx1)
 11045  		v.AuxInt = off
 11046  		v.Aux = sym
 11047  		v.AddArg(ptr)
 11048  		v.AddArg(idx)
 11049  		v.AddArg(mem)
 11050  		return true
 11051  	}
 11052  	// match: (MOVSDload [off] {sym} ptr (MOVQstore [off] {sym} ptr val _))
 11053  	// cond:
 11054  	// result: (MOVQi2f val)
 11055  	for {
 11056  		off := v.AuxInt
 11057  		sym := v.Aux
 11058  		_ = v.Args[1]
 11059  		ptr := v.Args[0]
 11060  		v_1 := v.Args[1]
 11061  		if v_1.Op != OpAMD64MOVQstore {
 11062  			break
 11063  		}
 11064  		if v_1.AuxInt != off {
 11065  			break
 11066  		}
 11067  		if v_1.Aux != sym {
 11068  			break
 11069  		}
 11070  		_ = v_1.Args[2]
 11071  		if ptr != v_1.Args[0] {
 11072  			break
 11073  		}
 11074  		val := v_1.Args[1]
 11075  		v.reset(OpAMD64MOVQi2f)
 11076  		v.AddArg(val)
 11077  		return true
 11078  	}
 11079  	return false
 11080  }
 11081  func rewriteValueAMD64_OpAMD64MOVSDloadidx1_0(v *Value) bool {
 11082  	// match: (MOVSDloadidx1 [c] {sym} ptr (SHLQconst [3] idx) mem)
 11083  	// cond:
 11084  	// result: (MOVSDloadidx8 [c] {sym} ptr idx mem)
 11085  	for {
 11086  		c := v.AuxInt
 11087  		sym := v.Aux
 11088  		_ = v.Args[2]
 11089  		ptr := v.Args[0]
 11090  		v_1 := v.Args[1]
 11091  		if v_1.Op != OpAMD64SHLQconst {
 11092  			break
 11093  		}
 11094  		if v_1.AuxInt != 3 {
 11095  			break
 11096  		}
 11097  		idx := v_1.Args[0]
 11098  		mem := v.Args[2]
 11099  		v.reset(OpAMD64MOVSDloadidx8)
 11100  		v.AuxInt = c
 11101  		v.Aux = sym
 11102  		v.AddArg(ptr)
 11103  		v.AddArg(idx)
 11104  		v.AddArg(mem)
 11105  		return true
 11106  	}
 11107  	// match: (MOVSDloadidx1 [c] {sym} (ADDQconst [d] ptr) idx mem)
 11108  	// cond: is32Bit(c+d)
 11109  	// result: (MOVSDloadidx1 [c+d] {sym} ptr idx mem)
 11110  	for {
 11111  		c := v.AuxInt
 11112  		sym := v.Aux
 11113  		_ = v.Args[2]
 11114  		v_0 := v.Args[0]
 11115  		if v_0.Op != OpAMD64ADDQconst {
 11116  			break
 11117  		}
 11118  		d := v_0.AuxInt
 11119  		ptr := v_0.Args[0]
 11120  		idx := v.Args[1]
 11121  		mem := v.Args[2]
 11122  		if !(is32Bit(c + d)) {
 11123  			break
 11124  		}
 11125  		v.reset(OpAMD64MOVSDloadidx1)
 11126  		v.AuxInt = c + d
 11127  		v.Aux = sym
 11128  		v.AddArg(ptr)
 11129  		v.AddArg(idx)
 11130  		v.AddArg(mem)
 11131  		return true
 11132  	}
 11133  	// match: (MOVSDloadidx1 [c] {sym} ptr (ADDQconst [d] idx) mem)
 11134  	// cond: is32Bit(c+d)
 11135  	// result: (MOVSDloadidx1 [c+d] {sym} ptr idx mem)
 11136  	for {
 11137  		c := v.AuxInt
 11138  		sym := v.Aux
 11139  		_ = v.Args[2]
 11140  		ptr := v.Args[0]
 11141  		v_1 := v.Args[1]
 11142  		if v_1.Op != OpAMD64ADDQconst {
 11143  			break
 11144  		}
 11145  		d := v_1.AuxInt
 11146  		idx := v_1.Args[0]
 11147  		mem := v.Args[2]
 11148  		if !(is32Bit(c + d)) {
 11149  			break
 11150  		}
 11151  		v.reset(OpAMD64MOVSDloadidx1)
 11152  		v.AuxInt = c + d
 11153  		v.Aux = sym
 11154  		v.AddArg(ptr)
 11155  		v.AddArg(idx)
 11156  		v.AddArg(mem)
 11157  		return true
 11158  	}
 11159  	return false
 11160  }
 11161  func rewriteValueAMD64_OpAMD64MOVSDloadidx8_0(v *Value) bool {
 11162  	// match: (MOVSDloadidx8 [c] {sym} (ADDQconst [d] ptr) idx mem)
 11163  	// cond: is32Bit(c+d)
 11164  	// result: (MOVSDloadidx8 [c+d] {sym} ptr idx mem)
 11165  	for {
 11166  		c := v.AuxInt
 11167  		sym := v.Aux
 11168  		_ = v.Args[2]
 11169  		v_0 := v.Args[0]
 11170  		if v_0.Op != OpAMD64ADDQconst {
 11171  			break
 11172  		}
 11173  		d := v_0.AuxInt
 11174  		ptr := v_0.Args[0]
 11175  		idx := v.Args[1]
 11176  		mem := v.Args[2]
 11177  		if !(is32Bit(c + d)) {
 11178  			break
 11179  		}
 11180  		v.reset(OpAMD64MOVSDloadidx8)
 11181  		v.AuxInt = c + d
 11182  		v.Aux = sym
 11183  		v.AddArg(ptr)
 11184  		v.AddArg(idx)
 11185  		v.AddArg(mem)
 11186  		return true
 11187  	}
 11188  	// match: (MOVSDloadidx8 [c] {sym} ptr (ADDQconst [d] idx) mem)
 11189  	// cond: is32Bit(c+8*d)
 11190  	// result: (MOVSDloadidx8 [c+8*d] {sym} ptr idx mem)
 11191  	for {
 11192  		c := v.AuxInt
 11193  		sym := v.Aux
 11194  		_ = v.Args[2]
 11195  		ptr := v.Args[0]
 11196  		v_1 := v.Args[1]
 11197  		if v_1.Op != OpAMD64ADDQconst {
 11198  			break
 11199  		}
 11200  		d := v_1.AuxInt
 11201  		idx := v_1.Args[0]
 11202  		mem := v.Args[2]
 11203  		if !(is32Bit(c + 8*d)) {
 11204  			break
 11205  		}
 11206  		v.reset(OpAMD64MOVSDloadidx8)
 11207  		v.AuxInt = c + 8*d
 11208  		v.Aux = sym
 11209  		v.AddArg(ptr)
 11210  		v.AddArg(idx)
 11211  		v.AddArg(mem)
 11212  		return true
 11213  	}
 11214  	return false
 11215  }
 11216  func rewriteValueAMD64_OpAMD64MOVSDstore_0(v *Value) bool {
 11217  	// match: (MOVSDstore [off1] {sym} (ADDQconst [off2] ptr) val mem)
 11218  	// cond: is32Bit(off1+off2)
 11219  	// result: (MOVSDstore [off1+off2] {sym} ptr val mem)
 11220  	for {
 11221  		off1 := v.AuxInt
 11222  		sym := v.Aux
 11223  		_ = v.Args[2]
 11224  		v_0 := v.Args[0]
 11225  		if v_0.Op != OpAMD64ADDQconst {
 11226  			break
 11227  		}
 11228  		off2 := v_0.AuxInt
 11229  		ptr := v_0.Args[0]
 11230  		val := v.Args[1]
 11231  		mem := v.Args[2]
 11232  		if !(is32Bit(off1 + off2)) {
 11233  			break
 11234  		}
 11235  		v.reset(OpAMD64MOVSDstore)
 11236  		v.AuxInt = off1 + off2
 11237  		v.Aux = sym
 11238  		v.AddArg(ptr)
 11239  		v.AddArg(val)
 11240  		v.AddArg(mem)
 11241  		return true
 11242  	}
 11243  	// match: (MOVSDstore [off1] {sym1} (LEAQ [off2] {sym2} base) val mem)
 11244  	// cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
 11245  	// result: (MOVSDstore [off1+off2] {mergeSym(sym1,sym2)} base val mem)
 11246  	for {
 11247  		off1 := v.AuxInt
 11248  		sym1 := v.Aux
 11249  		_ = v.Args[2]
 11250  		v_0 := v.Args[0]
 11251  		if v_0.Op != OpAMD64LEAQ {
 11252  			break
 11253  		}
 11254  		off2 := v_0.AuxInt
 11255  		sym2 := v_0.Aux
 11256  		base := v_0.Args[0]
 11257  		val := v.Args[1]
 11258  		mem := v.Args[2]
 11259  		if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
 11260  			break
 11261  		}
 11262  		v.reset(OpAMD64MOVSDstore)
 11263  		v.AuxInt = off1 + off2
 11264  		v.Aux = mergeSym(sym1, sym2)
 11265  		v.AddArg(base)
 11266  		v.AddArg(val)
 11267  		v.AddArg(mem)
 11268  		return true
 11269  	}
 11270  	// match: (MOVSDstore [off1] {sym1} (LEAQ1 [off2] {sym2} ptr idx) val mem)
 11271  	// cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
 11272  	// result: (MOVSDstoreidx1 [off1+off2] {mergeSym(sym1,sym2)} ptr idx val mem)
 11273  	for {
 11274  		off1 := v.AuxInt
 11275  		sym1 := v.Aux
 11276  		_ = v.Args[2]
 11277  		v_0 := v.Args[0]
 11278  		if v_0.Op != OpAMD64LEAQ1 {
 11279  			break
 11280  		}
 11281  		off2 := v_0.AuxInt
 11282  		sym2 := v_0.Aux
 11283  		_ = v_0.Args[1]
 11284  		ptr := v_0.Args[0]
 11285  		idx := v_0.Args[1]
 11286  		val := v.Args[1]
 11287  		mem := v.Args[2]
 11288  		if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
 11289  			break
 11290  		}
 11291  		v.reset(OpAMD64MOVSDstoreidx1)
 11292  		v.AuxInt = off1 + off2
 11293  		v.Aux = mergeSym(sym1, sym2)
 11294  		v.AddArg(ptr)
 11295  		v.AddArg(idx)
 11296  		v.AddArg(val)
 11297  		v.AddArg(mem)
 11298  		return true
 11299  	}
 11300  	// match: (MOVSDstore [off1] {sym1} (LEAQ8 [off2] {sym2} ptr idx) val mem)
 11301  	// cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
 11302  	// result: (MOVSDstoreidx8 [off1+off2] {mergeSym(sym1,sym2)} ptr idx val mem)
 11303  	for {
 11304  		off1 := v.AuxInt
 11305  		sym1 := v.Aux
 11306  		_ = v.Args[2]
 11307  		v_0 := v.Args[0]
 11308  		if v_0.Op != OpAMD64LEAQ8 {
 11309  			break
 11310  		}
 11311  		off2 := v_0.AuxInt
 11312  		sym2 := v_0.Aux
 11313  		_ = v_0.Args[1]
 11314  		ptr := v_0.Args[0]
 11315  		idx := v_0.Args[1]
 11316  		val := v.Args[1]
 11317  		mem := v.Args[2]
 11318  		if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
 11319  			break
 11320  		}
 11321  		v.reset(OpAMD64MOVSDstoreidx8)
 11322  		v.AuxInt = off1 + off2
 11323  		v.Aux = mergeSym(sym1, sym2)
 11324  		v.AddArg(ptr)
 11325  		v.AddArg(idx)
 11326  		v.AddArg(val)
 11327  		v.AddArg(mem)
 11328  		return true
 11329  	}
 11330  	// match: (MOVSDstore [off] {sym} (ADDQ ptr idx) val mem)
 11331  	// cond: ptr.Op != OpSB
 11332  	// result: (MOVSDstoreidx1 [off] {sym} ptr idx val mem)
 11333  	for {
 11334  		off := v.AuxInt
 11335  		sym := v.Aux
 11336  		_ = v.Args[2]
 11337  		v_0 := v.Args[0]
 11338  		if v_0.Op != OpAMD64ADDQ {
 11339  			break
 11340  		}
 11341  		_ = v_0.Args[1]
 11342  		ptr := v_0.Args[0]
 11343  		idx := v_0.Args[1]
 11344  		val := v.Args[1]
 11345  		mem := v.Args[2]
 11346  		if !(ptr.Op != OpSB) {
 11347  			break
 11348  		}
 11349  		v.reset(OpAMD64MOVSDstoreidx1)
 11350  		v.AuxInt = off
 11351  		v.Aux = sym
 11352  		v.AddArg(ptr)
 11353  		v.AddArg(idx)
 11354  		v.AddArg(val)
 11355  		v.AddArg(mem)
 11356  		return true
 11357  	}
 11358  	// match: (MOVSDstore [off] {sym} ptr (MOVQi2f val) mem)
 11359  	// cond:
 11360  	// result: (MOVQstore [off] {sym} ptr val mem)
 11361  	for {
 11362  		off := v.AuxInt
 11363  		sym := v.Aux
 11364  		_ = v.Args[2]
 11365  		ptr := v.Args[0]
 11366  		v_1 := v.Args[1]
 11367  		if v_1.Op != OpAMD64MOVQi2f {
 11368  			break
 11369  		}
 11370  		val := v_1.Args[0]
 11371  		mem := v.Args[2]
 11372  		v.reset(OpAMD64MOVQstore)
 11373  		v.AuxInt = off
 11374  		v.Aux = sym
 11375  		v.AddArg(ptr)
 11376  		v.AddArg(val)
 11377  		v.AddArg(mem)
 11378  		return true
 11379  	}
 11380  	return false
 11381  }
 11382  func rewriteValueAMD64_OpAMD64MOVSDstoreidx1_0(v *Value) bool {
 11383  	// match: (MOVSDstoreidx1 [c] {sym} ptr (SHLQconst [3] idx) val mem)
 11384  	// cond:
 11385  	// result: (MOVSDstoreidx8 [c] {sym} ptr idx val mem)
 11386  	for {
 11387  		c := v.AuxInt
 11388  		sym := v.Aux
 11389  		_ = v.Args[3]
 11390  		ptr := v.Args[0]
 11391  		v_1 := v.Args[1]
 11392  		if v_1.Op != OpAMD64SHLQconst {
 11393  			break
 11394  		}
 11395  		if v_1.AuxInt != 3 {
 11396  			break
 11397  		}
 11398  		idx := v_1.Args[0]
 11399  		val := v.Args[2]
 11400  		mem := v.Args[3]
 11401  		v.reset(OpAMD64MOVSDstoreidx8)
 11402  		v.AuxInt = c
 11403  		v.Aux = sym
 11404  		v.AddArg(ptr)
 11405  		v.AddArg(idx)
 11406  		v.AddArg(val)
 11407  		v.AddArg(mem)
 11408  		return true
 11409  	}
 11410  	// match: (MOVSDstoreidx1 [c] {sym} (ADDQconst [d] ptr) idx val mem)
 11411  	// cond: is32Bit(c+d)
 11412  	// result: (MOVSDstoreidx1 [c+d] {sym} ptr idx val mem)
 11413  	for {
 11414  		c := v.AuxInt
 11415  		sym := v.Aux
 11416  		_ = v.Args[3]
 11417  		v_0 := v.Args[0]
 11418  		if v_0.Op != OpAMD64ADDQconst {
 11419  			break
 11420  		}
 11421  		d := v_0.AuxInt
 11422  		ptr := v_0.Args[0]
 11423  		idx := v.Args[1]
 11424  		val := v.Args[2]
 11425  		mem := v.Args[3]
 11426  		if !(is32Bit(c + d)) {
 11427  			break
 11428  		}
 11429  		v.reset(OpAMD64MOVSDstoreidx1)
 11430  		v.AuxInt = c + d
 11431  		v.Aux = sym
 11432  		v.AddArg(ptr)
 11433  		v.AddArg(idx)
 11434  		v.AddArg(val)
 11435  		v.AddArg(mem)
 11436  		return true
 11437  	}
 11438  	// match: (MOVSDstoreidx1 [c] {sym} ptr (ADDQconst [d] idx) val mem)
 11439  	// cond: is32Bit(c+d)
 11440  	// result: (MOVSDstoreidx1 [c+d] {sym} ptr idx val mem)
 11441  	for {
 11442  		c := v.AuxInt
 11443  		sym := v.Aux
 11444  		_ = v.Args[3]
 11445  		ptr := v.Args[0]
 11446  		v_1 := v.Args[1]
 11447  		if v_1.Op != OpAMD64ADDQconst {
 11448  			break
 11449  		}
 11450  		d := v_1.AuxInt
 11451  		idx := v_1.Args[0]
 11452  		val := v.Args[2]
 11453  		mem := v.Args[3]
 11454  		if !(is32Bit(c + d)) {
 11455  			break
 11456  		}
 11457  		v.reset(OpAMD64MOVSDstoreidx1)
 11458  		v.AuxInt = c + d
 11459  		v.Aux = sym
 11460  		v.AddArg(ptr)
 11461  		v.AddArg(idx)
 11462  		v.AddArg(val)
 11463  		v.AddArg(mem)
 11464  		return true
 11465  	}
 11466  	return false
 11467  }
 11468  func rewriteValueAMD64_OpAMD64MOVSDstoreidx8_0(v *Value) bool {
 11469  	// match: (MOVSDstoreidx8 [c] {sym} (ADDQconst [d] ptr) idx val mem)
 11470  	// cond: is32Bit(c+d)
 11471  	// result: (MOVSDstoreidx8 [c+d] {sym} ptr idx val mem)
 11472  	for {
 11473  		c := v.AuxInt
 11474  		sym := v.Aux
 11475  		_ = v.Args[3]
 11476  		v_0 := v.Args[0]
 11477  		if v_0.Op != OpAMD64ADDQconst {
 11478  			break
 11479  		}
 11480  		d := v_0.AuxInt
 11481  		ptr := v_0.Args[0]
 11482  		idx := v.Args[1]
 11483  		val := v.Args[2]
 11484  		mem := v.Args[3]
 11485  		if !(is32Bit(c + d)) {
 11486  			break
 11487  		}
 11488  		v.reset(OpAMD64MOVSDstoreidx8)
 11489  		v.AuxInt = c + d
 11490  		v.Aux = sym
 11491  		v.AddArg(ptr)
 11492  		v.AddArg(idx)
 11493  		v.AddArg(val)
 11494  		v.AddArg(mem)
 11495  		return true
 11496  	}
 11497  	// match: (MOVSDstoreidx8 [c] {sym} ptr (ADDQconst [d] idx) val mem)
 11498  	// cond: is32Bit(c+8*d)
 11499  	// result: (MOVSDstoreidx8 [c+8*d] {sym} ptr idx val mem)
 11500  	for {
 11501  		c := v.AuxInt
 11502  		sym := v.Aux
 11503  		_ = v.Args[3]
 11504  		ptr := v.Args[0]
 11505  		v_1 := v.Args[1]
 11506  		if v_1.Op != OpAMD64ADDQconst {
 11507  			break
 11508  		}
 11509  		d := v_1.AuxInt
 11510  		idx := v_1.Args[0]
 11511  		val := v.Args[2]
 11512  		mem := v.Args[3]
 11513  		if !(is32Bit(c + 8*d)) {
 11514  			break
 11515  		}
 11516  		v.reset(OpAMD64MOVSDstoreidx8)
 11517  		v.AuxInt = c + 8*d
 11518  		v.Aux = sym
 11519  		v.AddArg(ptr)
 11520  		v.AddArg(idx)
 11521  		v.AddArg(val)
 11522  		v.AddArg(mem)
 11523  		return true
 11524  	}
 11525  	return false
 11526  }
 11527  func rewriteValueAMD64_OpAMD64MOVSSload_0(v *Value) bool {
 11528  	// match: (MOVSSload [off1] {sym} (ADDQconst [off2] ptr) mem)
 11529  	// cond: is32Bit(off1+off2)
 11530  	// result: (MOVSSload [off1+off2] {sym} ptr mem)
 11531  	for {
 11532  		off1 := v.AuxInt
 11533  		sym := v.Aux
 11534  		_ = v.Args[1]
 11535  		v_0 := v.Args[0]
 11536  		if v_0.Op != OpAMD64ADDQconst {
 11537  			break
 11538  		}
 11539  		off2 := v_0.AuxInt
 11540  		ptr := v_0.Args[0]
 11541  		mem := v.Args[1]
 11542  		if !(is32Bit(off1 + off2)) {
 11543  			break
 11544  		}
 11545  		v.reset(OpAMD64MOVSSload)
 11546  		v.AuxInt = off1 + off2
 11547  		v.Aux = sym
 11548  		v.AddArg(ptr)
 11549  		v.AddArg(mem)
 11550  		return true
 11551  	}
 11552  	// match: (MOVSSload [off1] {sym1} (LEAQ [off2] {sym2} base) mem)
 11553  	// cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
 11554  	// result: (MOVSSload [off1+off2] {mergeSym(sym1,sym2)} base mem)
 11555  	for {
 11556  		off1 := v.AuxInt
 11557  		sym1 := v.Aux
 11558  		_ = v.Args[1]
 11559  		v_0 := v.Args[0]
 11560  		if v_0.Op != OpAMD64LEAQ {
 11561  			break
 11562  		}
 11563  		off2 := v_0.AuxInt
 11564  		sym2 := v_0.Aux
 11565  		base := v_0.Args[0]
 11566  		mem := v.Args[1]
 11567  		if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
 11568  			break
 11569  		}
 11570  		v.reset(OpAMD64MOVSSload)
 11571  		v.AuxInt = off1 + off2
 11572  		v.Aux = mergeSym(sym1, sym2)
 11573  		v.AddArg(base)
 11574  		v.AddArg(mem)
 11575  		return true
 11576  	}
 11577  	// match: (MOVSSload [off1] {sym1} (LEAQ1 [off2] {sym2} ptr idx) mem)
 11578  	// cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
 11579  	// result: (MOVSSloadidx1 [off1+off2] {mergeSym(sym1,sym2)} ptr idx mem)
 11580  	for {
 11581  		off1 := v.AuxInt
 11582  		sym1 := v.Aux
 11583  		_ = v.Args[1]
 11584  		v_0 := v.Args[0]
 11585  		if v_0.Op != OpAMD64LEAQ1 {
 11586  			break
 11587  		}
 11588  		off2 := v_0.AuxInt
 11589  		sym2 := v_0.Aux
 11590  		_ = v_0.Args[1]
 11591  		ptr := v_0.Args[0]
 11592  		idx := v_0.Args[1]
 11593  		mem := v.Args[1]
 11594  		if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
 11595  			break
 11596  		}
 11597  		v.reset(OpAMD64MOVSSloadidx1)
 11598  		v.AuxInt = off1 + off2
 11599  		v.Aux = mergeSym(sym1, sym2)
 11600  		v.AddArg(ptr)
 11601  		v.AddArg(idx)
 11602  		v.AddArg(mem)
 11603  		return true
 11604  	}
 11605  	// match: (MOVSSload [off1] {sym1} (LEAQ4 [off2] {sym2} ptr idx) mem)
 11606  	// cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
 11607  	// result: (MOVSSloadidx4 [off1+off2] {mergeSym(sym1,sym2)} ptr idx mem)
 11608  	for {
 11609  		off1 := v.AuxInt
 11610  		sym1 := v.Aux
 11611  		_ = v.Args[1]
 11612  		v_0 := v.Args[0]
 11613  		if v_0.Op != OpAMD64LEAQ4 {
 11614  			break
 11615  		}
 11616  		off2 := v_0.AuxInt
 11617  		sym2 := v_0.Aux
 11618  		_ = v_0.Args[1]
 11619  		ptr := v_0.Args[0]
 11620  		idx := v_0.Args[1]
 11621  		mem := v.Args[1]
 11622  		if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
 11623  			break
 11624  		}
 11625  		v.reset(OpAMD64MOVSSloadidx4)
 11626  		v.AuxInt = off1 + off2
 11627  		v.Aux = mergeSym(sym1, sym2)
 11628  		v.AddArg(ptr)
 11629  		v.AddArg(idx)
 11630  		v.AddArg(mem)
 11631  		return true
 11632  	}
 11633  	// match: (MOVSSload [off] {sym} (ADDQ ptr idx) mem)
 11634  	// cond: ptr.Op != OpSB
 11635  	// result: (MOVSSloadidx1 [off] {sym} ptr idx mem)
 11636  	for {
 11637  		off := v.AuxInt
 11638  		sym := v.Aux
 11639  		_ = v.Args[1]
 11640  		v_0 := v.Args[0]
 11641  		if v_0.Op != OpAMD64ADDQ {
 11642  			break
 11643  		}
 11644  		_ = v_0.Args[1]
 11645  		ptr := v_0.Args[0]
 11646  		idx := v_0.Args[1]
 11647  		mem := v.Args[1]
 11648  		if !(ptr.Op != OpSB) {
 11649  			break
 11650  		}
 11651  		v.reset(OpAMD64MOVSSloadidx1)
 11652  		v.AuxInt = off
 11653  		v.Aux = sym
 11654  		v.AddArg(ptr)
 11655  		v.AddArg(idx)
 11656  		v.AddArg(mem)
 11657  		return true
 11658  	}
 11659  	// match: (MOVSSload [off] {sym} ptr (MOVLstore [off] {sym} ptr val _))
 11660  	// cond:
 11661  	// result: (MOVLi2f val)
 11662  	for {
 11663  		off := v.AuxInt
 11664  		sym := v.Aux
 11665  		_ = v.Args[1]
 11666  		ptr := v.Args[0]
 11667  		v_1 := v.Args[1]
 11668  		if v_1.Op != OpAMD64MOVLstore {
 11669  			break
 11670  		}
 11671  		if v_1.AuxInt != off {
 11672  			break
 11673  		}
 11674  		if v_1.Aux != sym {
 11675  			break
 11676  		}
 11677  		_ = v_1.Args[2]
 11678  		if ptr != v_1.Args[0] {
 11679  			break
 11680  		}
 11681  		val := v_1.Args[1]
 11682  		v.reset(OpAMD64MOVLi2f)
 11683  		v.AddArg(val)
 11684  		return true
 11685  	}
 11686  	return false
 11687  }
 11688  func rewriteValueAMD64_OpAMD64MOVSSloadidx1_0(v *Value) bool {
 11689  	// match: (MOVSSloadidx1 [c] {sym} ptr (SHLQconst [2] idx) mem)
 11690  	// cond:
 11691  	// result: (MOVSSloadidx4 [c] {sym} ptr idx mem)
 11692  	for {
 11693  		c := v.AuxInt
 11694  		sym := v.Aux
 11695  		_ = v.Args[2]
 11696  		ptr := v.Args[0]
 11697  		v_1 := v.Args[1]
 11698  		if v_1.Op != OpAMD64SHLQconst {
 11699  			break
 11700  		}
 11701  		if v_1.AuxInt != 2 {
 11702  			break
 11703  		}
 11704  		idx := v_1.Args[0]
 11705  		mem := v.Args[2]
 11706  		v.reset(OpAMD64MOVSSloadidx4)
 11707  		v.AuxInt = c
 11708  		v.Aux = sym
 11709  		v.AddArg(ptr)
 11710  		v.AddArg(idx)
 11711  		v.AddArg(mem)
 11712  		return true
 11713  	}
 11714  	// match: (MOVSSloadidx1 [c] {sym} (ADDQconst [d] ptr) idx mem)
 11715  	// cond: is32Bit(c+d)
 11716  	// result: (MOVSSloadidx1 [c+d] {sym} ptr idx mem)
 11717  	for {
 11718  		c := v.AuxInt
 11719  		sym := v.Aux
 11720  		_ = v.Args[2]
 11721  		v_0 := v.Args[0]
 11722  		if v_0.Op != OpAMD64ADDQconst {
 11723  			break
 11724  		}
 11725  		d := v_0.AuxInt
 11726  		ptr := v_0.Args[0]
 11727  		idx := v.Args[1]
 11728  		mem := v.Args[2]
 11729  		if !(is32Bit(c + d)) {
 11730  			break
 11731  		}
 11732  		v.reset(OpAMD64MOVSSloadidx1)
 11733  		v.AuxInt = c + d
 11734  		v.Aux = sym
 11735  		v.AddArg(ptr)
 11736  		v.AddArg(idx)
 11737  		v.AddArg(mem)
 11738  		return true
 11739  	}
 11740  	// match: (MOVSSloadidx1 [c] {sym} ptr (ADDQconst [d] idx) mem)
 11741  	// cond: is32Bit(c+d)
 11742  	// result: (MOVSSloadidx1 [c+d] {sym} ptr idx mem)
 11743  	for {
 11744  		c := v.AuxInt
 11745  		sym := v.Aux
 11746  		_ = v.Args[2]
 11747  		ptr := v.Args[0]
 11748  		v_1 := v.Args[1]
 11749  		if v_1.Op != OpAMD64ADDQconst {
 11750  			break
 11751  		}
 11752  		d := v_1.AuxInt
 11753  		idx := v_1.Args[0]
 11754  		mem := v.Args[2]
 11755  		if !(is32Bit(c + d)) {
 11756  			break
 11757  		}
 11758  		v.reset(OpAMD64MOVSSloadidx1)
 11759  		v.AuxInt = c + d
 11760  		v.Aux = sym
 11761  		v.AddArg(ptr)
 11762  		v.AddArg(idx)
 11763  		v.AddArg(mem)
 11764  		return true
 11765  	}
 11766  	return false
 11767  }
 11768  func rewriteValueAMD64_OpAMD64MOVSSloadidx4_0(v *Value) bool {
 11769  	// match: (MOVSSloadidx4 [c] {sym} (ADDQconst [d] ptr) idx mem)
 11770  	// cond: is32Bit(c+d)
 11771  	// result: (MOVSSloadidx4 [c+d] {sym} ptr idx mem)
 11772  	for {
 11773  		c := v.AuxInt
 11774  		sym := v.Aux
 11775  		_ = v.Args[2]
 11776  		v_0 := v.Args[0]
 11777  		if v_0.Op != OpAMD64ADDQconst {
 11778  			break
 11779  		}
 11780  		d := v_0.AuxInt
 11781  		ptr := v_0.Args[0]
 11782  		idx := v.Args[1]
 11783  		mem := v.Args[2]
 11784  		if !(is32Bit(c + d)) {
 11785  			break
 11786  		}
 11787  		v.reset(OpAMD64MOVSSloadidx4)
 11788  		v.AuxInt = c + d
 11789  		v.Aux = sym
 11790  		v.AddArg(ptr)
 11791  		v.AddArg(idx)
 11792  		v.AddArg(mem)
 11793  		return true
 11794  	}
 11795  	// match: (MOVSSloadidx4 [c] {sym} ptr (ADDQconst [d] idx) mem)
 11796  	// cond: is32Bit(c+4*d)
 11797  	// result: (MOVSSloadidx4 [c+4*d] {sym} ptr idx mem)
 11798  	for {
 11799  		c := v.AuxInt
 11800  		sym := v.Aux
 11801  		_ = v.Args[2]
 11802  		ptr := v.Args[0]
 11803  		v_1 := v.Args[1]
 11804  		if v_1.Op != OpAMD64ADDQconst {
 11805  			break
 11806  		}
 11807  		d := v_1.AuxInt
 11808  		idx := v_1.Args[0]
 11809  		mem := v.Args[2]
 11810  		if !(is32Bit(c + 4*d)) {
 11811  			break
 11812  		}
 11813  		v.reset(OpAMD64MOVSSloadidx4)
 11814  		v.AuxInt = c + 4*d
 11815  		v.Aux = sym
 11816  		v.AddArg(ptr)
 11817  		v.AddArg(idx)
 11818  		v.AddArg(mem)
 11819  		return true
 11820  	}
 11821  	return false
 11822  }
 11823  func rewriteValueAMD64_OpAMD64MOVSSstore_0(v *Value) bool {
 11824  	// match: (MOVSSstore [off1] {sym} (ADDQconst [off2] ptr) val mem)
 11825  	// cond: is32Bit(off1+off2)
 11826  	// result: (MOVSSstore [off1+off2] {sym} ptr val mem)
 11827  	for {
 11828  		off1 := v.AuxInt
 11829  		sym := v.Aux
 11830  		_ = v.Args[2]
 11831  		v_0 := v.Args[0]
 11832  		if v_0.Op != OpAMD64ADDQconst {
 11833  			break
 11834  		}
 11835  		off2 := v_0.AuxInt
 11836  		ptr := v_0.Args[0]
 11837  		val := v.Args[1]
 11838  		mem := v.Args[2]
 11839  		if !(is32Bit(off1 + off2)) {
 11840  			break
 11841  		}
 11842  		v.reset(OpAMD64MOVSSstore)
 11843  		v.AuxInt = off1 + off2
 11844  		v.Aux = sym
 11845  		v.AddArg(ptr)
 11846  		v.AddArg(val)
 11847  		v.AddArg(mem)
 11848  		return true
 11849  	}
 11850  	// match: (MOVSSstore [off1] {sym1} (LEAQ [off2] {sym2} base) val mem)
 11851  	// cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
 11852  	// result: (MOVSSstore [off1+off2] {mergeSym(sym1,sym2)} base val mem)
 11853  	for {
 11854  		off1 := v.AuxInt
 11855  		sym1 := v.Aux
 11856  		_ = v.Args[2]
 11857  		v_0 := v.Args[0]
 11858  		if v_0.Op != OpAMD64LEAQ {
 11859  			break
 11860  		}
 11861  		off2 := v_0.AuxInt
 11862  		sym2 := v_0.Aux
 11863  		base := v_0.Args[0]
 11864  		val := v.Args[1]
 11865  		mem := v.Args[2]
 11866  		if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
 11867  			break
 11868  		}
 11869  		v.reset(OpAMD64MOVSSstore)
 11870  		v.AuxInt = off1 + off2
 11871  		v.Aux = mergeSym(sym1, sym2)
 11872  		v.AddArg(base)
 11873  		v.AddArg(val)
 11874  		v.AddArg(mem)
 11875  		return true
 11876  	}
 11877  	// match: (MOVSSstore [off1] {sym1} (LEAQ1 [off2] {sym2} ptr idx) val mem)
 11878  	// cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
 11879  	// result: (MOVSSstoreidx1 [off1+off2] {mergeSym(sym1,sym2)} ptr idx val mem)
 11880  	for {
 11881  		off1 := v.AuxInt
 11882  		sym1 := v.Aux
 11883  		_ = v.Args[2]
 11884  		v_0 := v.Args[0]
 11885  		if v_0.Op != OpAMD64LEAQ1 {
 11886  			break
 11887  		}
 11888  		off2 := v_0.AuxInt
 11889  		sym2 := v_0.Aux
 11890  		_ = v_0.Args[1]
 11891  		ptr := v_0.Args[0]
 11892  		idx := v_0.Args[1]
 11893  		val := v.Args[1]
 11894  		mem := v.Args[2]
 11895  		if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
 11896  			break
 11897  		}
 11898  		v.reset(OpAMD64MOVSSstoreidx1)
 11899  		v.AuxInt = off1 + off2
 11900  		v.Aux = mergeSym(sym1, sym2)
 11901  		v.AddArg(ptr)
 11902  		v.AddArg(idx)
 11903  		v.AddArg(val)
 11904  		v.AddArg(mem)
 11905  		return true
 11906  	}
 11907  	// match: (MOVSSstore [off1] {sym1} (LEAQ4 [off2] {sym2} ptr idx) val mem)
 11908  	// cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
 11909  	// result: (MOVSSstoreidx4 [off1+off2] {mergeSym(sym1,sym2)} ptr idx val mem)
 11910  	for {
 11911  		off1 := v.AuxInt
 11912  		sym1 := v.Aux
 11913  		_ = v.Args[2]
 11914  		v_0 := v.Args[0]
 11915  		if v_0.Op != OpAMD64LEAQ4 {
 11916  			break
 11917  		}
 11918  		off2 := v_0.AuxInt
 11919  		sym2 := v_0.Aux
 11920  		_ = v_0.Args[1]
 11921  		ptr := v_0.Args[0]
 11922  		idx := v_0.Args[1]
 11923  		val := v.Args[1]
 11924  		mem := v.Args[2]
 11925  		if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
 11926  			break
 11927  		}
 11928  		v.reset(OpAMD64MOVSSstoreidx4)
 11929  		v.AuxInt = off1 + off2
 11930  		v.Aux = mergeSym(sym1, sym2)
 11931  		v.AddArg(ptr)
 11932  		v.AddArg(idx)
 11933  		v.AddArg(val)
 11934  		v.AddArg(mem)
 11935  		return true
 11936  	}
 11937  	// match: (MOVSSstore [off] {sym} (ADDQ ptr idx) val mem)
 11938  	// cond: ptr.Op != OpSB
 11939  	// result: (MOVSSstoreidx1 [off] {sym} ptr idx val mem)
 11940  	for {
 11941  		off := v.AuxInt
 11942  		sym := v.Aux
 11943  		_ = v.Args[2]
 11944  		v_0 := v.Args[0]
 11945  		if v_0.Op != OpAMD64ADDQ {
 11946  			break
 11947  		}
 11948  		_ = v_0.Args[1]
 11949  		ptr := v_0.Args[0]
 11950  		idx := v_0.Args[1]
 11951  		val := v.Args[1]
 11952  		mem := v.Args[2]
 11953  		if !(ptr.Op != OpSB) {
 11954  			break
 11955  		}
 11956  		v.reset(OpAMD64MOVSSstoreidx1)
 11957  		v.AuxInt = off
 11958  		v.Aux = sym
 11959  		v.AddArg(ptr)
 11960  		v.AddArg(idx)
 11961  		v.AddArg(val)
 11962  		v.AddArg(mem)
 11963  		return true
 11964  	}
 11965  	// match: (MOVSSstore [off] {sym} ptr (MOVLi2f val) mem)
 11966  	// cond:
 11967  	// result: (MOVLstore [off] {sym} ptr val mem)
 11968  	for {
 11969  		off := v.AuxInt
 11970  		sym := v.Aux
 11971  		_ = v.Args[2]
 11972  		ptr := v.Args[0]
 11973  		v_1 := v.Args[1]
 11974  		if v_1.Op != OpAMD64MOVLi2f {
 11975  			break
 11976  		}
 11977  		val := v_1.Args[0]
 11978  		mem := v.Args[2]
 11979  		v.reset(OpAMD64MOVLstore)
 11980  		v.AuxInt = off
 11981  		v.Aux = sym
 11982  		v.AddArg(ptr)
 11983  		v.AddArg(val)
 11984  		v.AddArg(mem)
 11985  		return true
 11986  	}
 11987  	return false
 11988  }
 11989  func rewriteValueAMD64_OpAMD64MOVSSstoreidx1_0(v *Value) bool {
 11990  	// match: (MOVSSstoreidx1 [c] {sym} ptr (SHLQconst [2] idx) val mem)
 11991  	// cond:
 11992  	// result: (MOVSSstoreidx4 [c] {sym} ptr idx val mem)
 11993  	for {
 11994  		c := v.AuxInt
 11995  		sym := v.Aux
 11996  		_ = v.Args[3]
 11997  		ptr := v.Args[0]
 11998  		v_1 := v.Args[1]
 11999  		if v_1.Op != OpAMD64SHLQconst {
 12000  			break
 12001  		}
 12002  		if v_1.AuxInt != 2 {
 12003  			break
 12004  		}
 12005  		idx := v_1.Args[0]
 12006  		val := v.Args[2]
 12007  		mem := v.Args[3]
 12008  		v.reset(OpAMD64MOVSSstoreidx4)
 12009  		v.AuxInt = c
 12010  		v.Aux = sym
 12011  		v.AddArg(ptr)
 12012  		v.AddArg(idx)
 12013  		v.AddArg(val)
 12014  		v.AddArg(mem)
 12015  		return true
 12016  	}
 12017  	// match: (MOVSSstoreidx1 [c] {sym} (ADDQconst [d] ptr) idx val mem)
 12018  	// cond: is32Bit(c+d)
 12019  	// result: (MOVSSstoreidx1 [c+d] {sym} ptr idx val mem)
 12020  	for {
 12021  		c := v.AuxInt
 12022  		sym := v.Aux
 12023  		_ = v.Args[3]
 12024  		v_0 := v.Args[0]
 12025  		if v_0.Op != OpAMD64ADDQconst {
 12026  			break
 12027  		}
 12028  		d := v_0.AuxInt
 12029  		ptr := v_0.Args[0]
 12030  		idx := v.Args[1]
 12031  		val := v.Args[2]
 12032  		mem := v.Args[3]
 12033  		if !(is32Bit(c + d)) {
 12034  			break
 12035  		}
 12036  		v.reset(OpAMD64MOVSSstoreidx1)
 12037  		v.AuxInt = c + d
 12038  		v.Aux = sym
 12039  		v.AddArg(ptr)
 12040  		v.AddArg(idx)
 12041  		v.AddArg(val)
 12042  		v.AddArg(mem)
 12043  		return true
 12044  	}
 12045  	// match: (MOVSSstoreidx1 [c] {sym} ptr (ADDQconst [d] idx) val mem)
 12046  	// cond: is32Bit(c+d)
 12047  	// result: (MOVSSstoreidx1 [c+d] {sym} ptr idx val mem)
 12048  	for {
 12049  		c := v.AuxInt
 12050  		sym := v.Aux
 12051  		_ = v.Args[3]
 12052  		ptr := v.Args[0]
 12053  		v_1 := v.Args[1]
 12054  		if v_1.Op != OpAMD64ADDQconst {
 12055  			break
 12056  		}
 12057  		d := v_1.AuxInt
 12058  		idx := v_1.Args[0]
 12059  		val := v.Args[2]
 12060  		mem := v.Args[3]
 12061  		if !(is32Bit(c + d)) {
 12062  			break
 12063  		}
 12064  		v.reset(OpAMD64MOVSSstoreidx1)
 12065  		v.AuxInt = c + d
 12066  		v.Aux = sym
 12067  		v.AddArg(ptr)
 12068  		v.AddArg(idx)
 12069  		v.AddArg(val)
 12070  		v.AddArg(mem)
 12071  		return true
 12072  	}
 12073  	return false
 12074  }
 12075  func rewriteValueAMD64_OpAMD64MOVSSstoreidx4_0(v *Value) bool {
 12076  	// match: (MOVSSstoreidx4 [c] {sym} (ADDQconst [d] ptr) idx val mem)
 12077  	// cond: is32Bit(c+d)
 12078  	// result: (MOVSSstoreidx4 [c+d] {sym} ptr idx val mem)
 12079  	for {
 12080  		c := v.AuxInt
 12081  		sym := v.Aux
 12082  		_ = v.Args[3]
 12083  		v_0 := v.Args[0]
 12084  		if v_0.Op != OpAMD64ADDQconst {
 12085  			break
 12086  		}
 12087  		d := v_0.AuxInt
 12088  		ptr := v_0.Args[0]
 12089  		idx := v.Args[1]
 12090  		val := v.Args[2]
 12091  		mem := v.Args[3]
 12092  		if !(is32Bit(c + d)) {
 12093  			break
 12094  		}
 12095  		v.reset(OpAMD64MOVSSstoreidx4)
 12096  		v.AuxInt = c + d
 12097  		v.Aux = sym
 12098  		v.AddArg(ptr)
 12099  		v.AddArg(idx)
 12100  		v.AddArg(val)
 12101  		v.AddArg(mem)
 12102  		return true
 12103  	}
 12104  	// match: (MOVSSstoreidx4 [c] {sym} ptr (ADDQconst [d] idx) val mem)
 12105  	// cond: is32Bit(c+4*d)
 12106  	// result: (MOVSSstoreidx4 [c+4*d] {sym} ptr idx val mem)
 12107  	for {
 12108  		c := v.AuxInt
 12109  		sym := v.Aux
 12110  		_ = v.Args[3]
 12111  		ptr := v.Args[0]
 12112  		v_1 := v.Args[1]
 12113  		if v_1.Op != OpAMD64ADDQconst {
 12114  			break
 12115  		}
 12116  		d := v_1.AuxInt
 12117  		idx := v_1.Args[0]
 12118  		val := v.Args[2]
 12119  		mem := v.Args[3]
 12120  		if !(is32Bit(c + 4*d)) {
 12121  			break
 12122  		}
 12123  		v.reset(OpAMD64MOVSSstoreidx4)
 12124  		v.AuxInt = c + 4*d
 12125  		v.Aux = sym
 12126  		v.AddArg(ptr)
 12127  		v.AddArg(idx)
 12128  		v.AddArg(val)
 12129  		v.AddArg(mem)
 12130  		return true
 12131  	}
 12132  	return false
 12133  }
 12134  func rewriteValueAMD64_OpAMD64MOVWQSX_0(v *Value) bool {
 12135  	b := v.Block
 12136  	_ = b
 12137  	// match: (MOVWQSX x:(MOVWload [off] {sym} ptr mem))
 12138  	// cond: x.Uses == 1 && clobber(x)
 12139  	// result: @x.Block (MOVWQSXload <v.Type> [off] {sym} ptr mem)
 12140  	for {
 12141  		x := v.Args[0]
 12142  		if x.Op != OpAMD64MOVWload {
 12143  			break
 12144  		}
 12145  		off := x.AuxInt
 12146  		sym := x.Aux
 12147  		_ = x.Args[1]
 12148  		ptr := x.Args[0]
 12149  		mem := x.Args[1]
 12150  		if !(x.Uses == 1 && clobber(x)) {
 12151  			break
 12152  		}
 12153  		b = x.Block
 12154  		v0 := b.NewValue0(v.Pos, OpAMD64MOVWQSXload, v.Type)
 12155  		v.reset(OpCopy)
 12156  		v.AddArg(v0)
 12157  		v0.AuxInt = off
 12158  		v0.Aux = sym
 12159  		v0.AddArg(ptr)
 12160  		v0.AddArg(mem)
 12161  		return true
 12162  	}
 12163  	// match: (MOVWQSX x:(MOVLload [off] {sym} ptr mem))
 12164  	// cond: x.Uses == 1 && clobber(x)
 12165  	// result: @x.Block (MOVWQSXload <v.Type> [off] {sym} ptr mem)
 12166  	for {
 12167  		x := v.Args[0]
 12168  		if x.Op != OpAMD64MOVLload {
 12169  			break
 12170  		}
 12171  		off := x.AuxInt
 12172  		sym := x.Aux
 12173  		_ = x.Args[1]
 12174  		ptr := x.Args[0]
 12175  		mem := x.Args[1]
 12176  		if !(x.Uses == 1 && clobber(x)) {
 12177  			break
 12178  		}
 12179  		b = x.Block
 12180  		v0 := b.NewValue0(v.Pos, OpAMD64MOVWQSXload, v.Type)
 12181  		v.reset(OpCopy)
 12182  		v.AddArg(v0)
 12183  		v0.AuxInt = off
 12184  		v0.Aux = sym
 12185  		v0.AddArg(ptr)
 12186  		v0.AddArg(mem)
 12187  		return true
 12188  	}
 12189  	// match: (MOVWQSX x:(MOVQload [off] {sym} ptr mem))
 12190  	// cond: x.Uses == 1 && clobber(x)
 12191  	// result: @x.Block (MOVWQSXload <v.Type> [off] {sym} ptr mem)
 12192  	for {
 12193  		x := v.Args[0]
 12194  		if x.Op != OpAMD64MOVQload {
 12195  			break
 12196  		}
 12197  		off := x.AuxInt
 12198  		sym := x.Aux
 12199  		_ = x.Args[1]
 12200  		ptr := x.Args[0]
 12201  		mem := x.Args[1]
 12202  		if !(x.Uses == 1 && clobber(x)) {
 12203  			break
 12204  		}
 12205  		b = x.Block
 12206  		v0 := b.NewValue0(v.Pos, OpAMD64MOVWQSXload, v.Type)
 12207  		v.reset(OpCopy)
 12208  		v.AddArg(v0)
 12209  		v0.AuxInt = off
 12210  		v0.Aux = sym
 12211  		v0.AddArg(ptr)
 12212  		v0.AddArg(mem)
 12213  		return true
 12214  	}
 12215  	// match: (MOVWQSX (ANDLconst [c] x))
 12216  	// cond: c & 0x8000 == 0
 12217  	// result: (ANDLconst [c & 0x7fff] x)
 12218  	for {
 12219  		v_0 := v.Args[0]
 12220  		if v_0.Op != OpAMD64ANDLconst {
 12221  			break
 12222  		}
 12223  		c := v_0.AuxInt
 12224  		x := v_0.Args[0]
 12225  		if !(c&0x8000 == 0) {
 12226  			break
 12227  		}
 12228  		v.reset(OpAMD64ANDLconst)
 12229  		v.AuxInt = c & 0x7fff
 12230  		v.AddArg(x)
 12231  		return true
 12232  	}
 12233  	// match: (MOVWQSX (MOVWQSX x))
 12234  	// cond:
 12235  	// result: (MOVWQSX x)
 12236  	for {
 12237  		v_0 := v.Args[0]
 12238  		if v_0.Op != OpAMD64MOVWQSX {
 12239  			break
 12240  		}
 12241  		x := v_0.Args[0]
 12242  		v.reset(OpAMD64MOVWQSX)
 12243  		v.AddArg(x)
 12244  		return true
 12245  	}
 12246  	// match: (MOVWQSX (MOVBQSX x))
 12247  	// cond:
 12248  	// result: (MOVBQSX x)
 12249  	for {
 12250  		v_0 := v.Args[0]
 12251  		if v_0.Op != OpAMD64MOVBQSX {
 12252  			break
 12253  		}
 12254  		x := v_0.Args[0]
 12255  		v.reset(OpAMD64MOVBQSX)
 12256  		v.AddArg(x)
 12257  		return true
 12258  	}
 12259  	return false
 12260  }
 12261  func rewriteValueAMD64_OpAMD64MOVWQSXload_0(v *Value) bool {
 12262  	// match: (MOVWQSXload [off] {sym} ptr (MOVWstore [off2] {sym2} ptr2 x _))
 12263  	// cond: sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)
 12264  	// result: (MOVWQSX x)
 12265  	for {
 12266  		off := v.AuxInt
 12267  		sym := v.Aux
 12268  		_ = v.Args[1]
 12269  		ptr := v.Args[0]
 12270  		v_1 := v.Args[1]
 12271  		if v_1.Op != OpAMD64MOVWstore {
 12272  			break
 12273  		}
 12274  		off2 := v_1.AuxInt
 12275  		sym2 := v_1.Aux
 12276  		_ = v_1.Args[2]
 12277  		ptr2 := v_1.Args[0]
 12278  		x := v_1.Args[1]
 12279  		if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) {
 12280  			break
 12281  		}
 12282  		v.reset(OpAMD64MOVWQSX)
 12283  		v.AddArg(x)
 12284  		return true
 12285  	}
 12286  	// match: (MOVWQSXload [off1] {sym1} (LEAQ [off2] {sym2} base) mem)
 12287  	// cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
 12288  	// result: (MOVWQSXload [off1+off2] {mergeSym(sym1,sym2)} base mem)
 12289  	for {
 12290  		off1 := v.AuxInt
 12291  		sym1 := v.Aux
 12292  		_ = v.Args[1]
 12293  		v_0 := v.Args[0]
 12294  		if v_0.Op != OpAMD64LEAQ {
 12295  			break
 12296  		}
 12297  		off2 := v_0.AuxInt
 12298  		sym2 := v_0.Aux
 12299  		base := v_0.Args[0]
 12300  		mem := v.Args[1]
 12301  		if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
 12302  			break
 12303  		}
 12304  		v.reset(OpAMD64MOVWQSXload)
 12305  		v.AuxInt = off1 + off2
 12306  		v.Aux = mergeSym(sym1, sym2)
 12307  		v.AddArg(base)
 12308  		v.AddArg(mem)
 12309  		return true
 12310  	}
 12311  	return false
 12312  }
 12313  func rewriteValueAMD64_OpAMD64MOVWQZX_0(v *Value) bool {
 12314  	b := v.Block
 12315  	_ = b
 12316  	// match: (MOVWQZX x:(MOVWload [off] {sym} ptr mem))
 12317  	// cond: x.Uses == 1 && clobber(x)
 12318  	// result: @x.Block (MOVWload <v.Type> [off] {sym} ptr mem)
 12319  	for {
 12320  		x := v.Args[0]
 12321  		if x.Op != OpAMD64MOVWload {
 12322  			break
 12323  		}
 12324  		off := x.AuxInt
 12325  		sym := x.Aux
 12326  		_ = x.Args[1]
 12327  		ptr := x.Args[0]
 12328  		mem := x.Args[1]
 12329  		if !(x.Uses == 1 && clobber(x)) {
 12330  			break
 12331  		}
 12332  		b = x.Block
 12333  		v0 := b.NewValue0(v.Pos, OpAMD64MOVWload, v.Type)
 12334  		v.reset(OpCopy)
 12335  		v.AddArg(v0)
 12336  		v0.AuxInt = off
 12337  		v0.Aux = sym
 12338  		v0.AddArg(ptr)
 12339  		v0.AddArg(mem)
 12340  		return true
 12341  	}
 12342  	// match: (MOVWQZX x:(MOVLload [off] {sym} ptr mem))
 12343  	// cond: x.Uses == 1 && clobber(x)
 12344  	// result: @x.Block (MOVWload <v.Type> [off] {sym} ptr mem)
 12345  	for {
 12346  		x := v.Args[0]
 12347  		if x.Op != OpAMD64MOVLload {
 12348  			break
 12349  		}
 12350  		off := x.AuxInt
 12351  		sym := x.Aux
 12352  		_ = x.Args[1]
 12353  		ptr := x.Args[0]
 12354  		mem := x.Args[1]
 12355  		if !(x.Uses == 1 && clobber(x)) {
 12356  			break
 12357  		}
 12358  		b = x.Block
 12359  		v0 := b.NewValue0(v.Pos, OpAMD64MOVWload, v.Type)
 12360  		v.reset(OpCopy)
 12361  		v.AddArg(v0)
 12362  		v0.AuxInt = off
 12363  		v0.Aux = sym
 12364  		v0.AddArg(ptr)
 12365  		v0.AddArg(mem)
 12366  		return true
 12367  	}
 12368  	// match: (MOVWQZX x:(MOVQload [off] {sym} ptr mem))
 12369  	// cond: x.Uses == 1 && clobber(x)
 12370  	// result: @x.Block (MOVWload <v.Type> [off] {sym} ptr mem)
 12371  	for {
 12372  		x := v.Args[0]
 12373  		if x.Op != OpAMD64MOVQload {
 12374  			break
 12375  		}
 12376  		off := x.AuxInt
 12377  		sym := x.Aux
 12378  		_ = x.Args[1]
 12379  		ptr := x.Args[0]
 12380  		mem := x.Args[1]
 12381  		if !(x.Uses == 1 && clobber(x)) {
 12382  			break
 12383  		}
 12384  		b = x.Block
 12385  		v0 := b.NewValue0(v.Pos, OpAMD64MOVWload, v.Type)
 12386  		v.reset(OpCopy)
 12387  		v.AddArg(v0)
 12388  		v0.AuxInt = off
 12389  		v0.Aux = sym
 12390  		v0.AddArg(ptr)
 12391  		v0.AddArg(mem)
 12392  		return true
 12393  	}
 12394  	// match: (MOVWQZX x:(MOVWloadidx1 [off] {sym} ptr idx mem))
 12395  	// cond: x.Uses == 1 && clobber(x)
 12396  	// result: @x.Block (MOVWloadidx1 <v.Type> [off] {sym} ptr idx mem)
 12397  	for {
 12398  		x := v.Args[0]
 12399  		if x.Op != OpAMD64MOVWloadidx1 {
 12400  			break
 12401  		}
 12402  		off := x.AuxInt
 12403  		sym := x.Aux
 12404  		_ = x.Args[2]
 12405  		ptr := x.Args[0]
 12406  		idx := x.Args[1]
 12407  		mem := x.Args[2]
 12408  		if !(x.Uses == 1 && clobber(x)) {
 12409  			break
 12410  		}
 12411  		b = x.Block
 12412  		v0 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, v.Type)
 12413  		v.reset(OpCopy)
 12414  		v.AddArg(v0)
 12415  		v0.AuxInt = off
 12416  		v0.Aux = sym
 12417  		v0.AddArg(ptr)
 12418  		v0.AddArg(idx)
 12419  		v0.AddArg(mem)
 12420  		return true
 12421  	}
 12422  	// match: (MOVWQZX x:(MOVWloadidx2 [off] {sym} ptr idx mem))
 12423  	// cond: x.Uses == 1 && clobber(x)
 12424  	// result: @x.Block (MOVWloadidx2 <v.Type> [off] {sym} ptr idx mem)
 12425  	for {
 12426  		x := v.Args[0]
 12427  		if x.Op != OpAMD64MOVWloadidx2 {
 12428  			break
 12429  		}
 12430  		off := x.AuxInt
 12431  		sym := x.Aux
 12432  		_ = x.Args[2]
 12433  		ptr := x.Args[0]
 12434  		idx := x.Args[1]
 12435  		mem := x.Args[2]
 12436  		if !(x.Uses == 1 && clobber(x)) {
 12437  			break
 12438  		}
 12439  		b = x.Block
 12440  		v0 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx2, v.Type)
 12441  		v.reset(OpCopy)
 12442  		v.AddArg(v0)
 12443  		v0.AuxInt = off
 12444  		v0.Aux = sym
 12445  		v0.AddArg(ptr)
 12446  		v0.AddArg(idx)
 12447  		v0.AddArg(mem)
 12448  		return true
 12449  	}
 12450  	// match: (MOVWQZX (ANDLconst [c] x))
 12451  	// cond:
 12452  	// result: (ANDLconst [c & 0xffff] x)
 12453  	for {
 12454  		v_0 := v.Args[0]
 12455  		if v_0.Op != OpAMD64ANDLconst {
 12456  			break
 12457  		}
 12458  		c := v_0.AuxInt
 12459  		x := v_0.Args[0]
 12460  		v.reset(OpAMD64ANDLconst)
 12461  		v.AuxInt = c & 0xffff
 12462  		v.AddArg(x)
 12463  		return true
 12464  	}
 12465  	// match: (MOVWQZX (MOVWQZX x))
 12466  	// cond:
 12467  	// result: (MOVWQZX x)
 12468  	for {
 12469  		v_0 := v.Args[0]
 12470  		if v_0.Op != OpAMD64MOVWQZX {
 12471  			break
 12472  		}
 12473  		x := v_0.Args[0]
 12474  		v.reset(OpAMD64MOVWQZX)
 12475  		v.AddArg(x)
 12476  		return true
 12477  	}
 12478  	// match: (MOVWQZX (MOVBQZX x))
 12479  	// cond:
 12480  	// result: (MOVBQZX x)
 12481  	for {
 12482  		v_0 := v.Args[0]
 12483  		if v_0.Op != OpAMD64MOVBQZX {
 12484  			break
 12485  		}
 12486  		x := v_0.Args[0]
 12487  		v.reset(OpAMD64MOVBQZX)
 12488  		v.AddArg(x)
 12489  		return true
 12490  	}
 12491  	return false
 12492  }
 12493  func rewriteValueAMD64_OpAMD64MOVWload_0(v *Value) bool {
 12494  	// match: (MOVWload [off] {sym} ptr (MOVWstore [off2] {sym2} ptr2 x _))
 12495  	// cond: sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)
 12496  	// result: (MOVWQZX x)
 12497  	for {
 12498  		off := v.AuxInt
 12499  		sym := v.Aux
 12500  		_ = v.Args[1]
 12501  		ptr := v.Args[0]
 12502  		v_1 := v.Args[1]
 12503  		if v_1.Op != OpAMD64MOVWstore {
 12504  			break
 12505  		}
 12506  		off2 := v_1.AuxInt
 12507  		sym2 := v_1.Aux
 12508  		_ = v_1.Args[2]
 12509  		ptr2 := v_1.Args[0]
 12510  		x := v_1.Args[1]
 12511  		if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) {
 12512  			break
 12513  		}
 12514  		v.reset(OpAMD64MOVWQZX)
 12515  		v.AddArg(x)
 12516  		return true
 12517  	}
 12518  	// match: (MOVWload [off1] {sym} (ADDQconst [off2] ptr) mem)
 12519  	// cond: is32Bit(off1+off2)
 12520  	// result: (MOVWload [off1+off2] {sym} ptr mem)
 12521  	for {
 12522  		off1 := v.AuxInt
 12523  		sym := v.Aux
 12524  		_ = v.Args[1]
 12525  		v_0 := v.Args[0]
 12526  		if v_0.Op != OpAMD64ADDQconst {
 12527  			break
 12528  		}
 12529  		off2 := v_0.AuxInt
 12530  		ptr := v_0.Args[0]
 12531  		mem := v.Args[1]
 12532  		if !(is32Bit(off1 + off2)) {
 12533  			break
 12534  		}
 12535  		v.reset(OpAMD64MOVWload)
 12536  		v.AuxInt = off1 + off2
 12537  		v.Aux = sym
 12538  		v.AddArg(ptr)
 12539  		v.AddArg(mem)
 12540  		return true
 12541  	}
 12542  	// match: (MOVWload [off1] {sym1} (LEAQ [off2] {sym2} base) mem)
 12543  	// cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
 12544  	// result: (MOVWload [off1+off2] {mergeSym(sym1,sym2)} base mem)
 12545  	for {
 12546  		off1 := v.AuxInt
 12547  		sym1 := v.Aux
 12548  		_ = v.Args[1]
 12549  		v_0 := v.Args[0]
 12550  		if v_0.Op != OpAMD64LEAQ {
 12551  			break
 12552  		}
 12553  		off2 := v_0.AuxInt
 12554  		sym2 := v_0.Aux
 12555  		base := v_0.Args[0]
 12556  		mem := v.Args[1]
 12557  		if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
 12558  			break
 12559  		}
 12560  		v.reset(OpAMD64MOVWload)
 12561  		v.AuxInt = off1 + off2
 12562  		v.Aux = mergeSym(sym1, sym2)
 12563  		v.AddArg(base)
 12564  		v.AddArg(mem)
 12565  		return true
 12566  	}
 12567  	// match: (MOVWload [off1] {sym1} (LEAQ1 [off2] {sym2} ptr idx) mem)
 12568  	// cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
 12569  	// result: (MOVWloadidx1 [off1+off2] {mergeSym(sym1,sym2)} ptr idx mem)
 12570  	for {
 12571  		off1 := v.AuxInt
 12572  		sym1 := v.Aux
 12573  		_ = v.Args[1]
 12574  		v_0 := v.Args[0]
 12575  		if v_0.Op != OpAMD64LEAQ1 {
 12576  			break
 12577  		}
 12578  		off2 := v_0.AuxInt
 12579  		sym2 := v_0.Aux
 12580  		_ = v_0.Args[1]
 12581  		ptr := v_0.Args[0]
 12582  		idx := v_0.Args[1]
 12583  		mem := v.Args[1]
 12584  		if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
 12585  			break
 12586  		}
 12587  		v.reset(OpAMD64MOVWloadidx1)
 12588  		v.AuxInt = off1 + off2
 12589  		v.Aux = mergeSym(sym1, sym2)
 12590  		v.AddArg(ptr)
 12591  		v.AddArg(idx)
 12592  		v.AddArg(mem)
 12593  		return true
 12594  	}
 12595  	// match: (MOVWload [off1] {sym1} (LEAQ2 [off2] {sym2} ptr idx) mem)
 12596  	// cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
 12597  	// result: (MOVWloadidx2 [off1+off2] {mergeSym(sym1,sym2)} ptr idx mem)
 12598  	for {
 12599  		off1 := v.AuxInt
 12600  		sym1 := v.Aux
 12601  		_ = v.Args[1]
 12602  		v_0 := v.Args[0]
 12603  		if v_0.Op != OpAMD64LEAQ2 {
 12604  			break
 12605  		}
 12606  		off2 := v_0.AuxInt
 12607  		sym2 := v_0.Aux
 12608  		_ = v_0.Args[1]
 12609  		ptr := v_0.Args[0]
 12610  		idx := v_0.Args[1]
 12611  		mem := v.Args[1]
 12612  		if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
 12613  			break
 12614  		}
 12615  		v.reset(OpAMD64MOVWloadidx2)
 12616  		v.AuxInt = off1 + off2
 12617  		v.Aux = mergeSym(sym1, sym2)
 12618  		v.AddArg(ptr)
 12619  		v.AddArg(idx)
 12620  		v.AddArg(mem)
 12621  		return true
 12622  	}
 12623  	// match: (MOVWload [off] {sym} (ADDQ ptr idx) mem)
 12624  	// cond: ptr.Op != OpSB
 12625  	// result: (MOVWloadidx1 [off] {sym} ptr idx mem)
 12626  	for {
 12627  		off := v.AuxInt
 12628  		sym := v.Aux
 12629  		_ = v.Args[1]
 12630  		v_0 := v.Args[0]
 12631  		if v_0.Op != OpAMD64ADDQ {
 12632  			break
 12633  		}
 12634  		_ = v_0.Args[1]
 12635  		ptr := v_0.Args[0]
 12636  		idx := v_0.Args[1]
 12637  		mem := v.Args[1]
 12638  		if !(ptr.Op != OpSB) {
 12639  			break
 12640  		}
 12641  		v.reset(OpAMD64MOVWloadidx1)
 12642  		v.AuxInt = off
 12643  		v.Aux = sym
 12644  		v.AddArg(ptr)
 12645  		v.AddArg(idx)
 12646  		v.AddArg(mem)
 12647  		return true
 12648  	}
 12649  	// match: (MOVWload [off1] {sym1} (LEAL [off2] {sym2} base) mem)
 12650  	// cond: canMergeSym(sym1, sym2) && is32Bit(off1+off2)
 12651  	// result: (MOVWload [off1+off2] {mergeSym(sym1,sym2)} base mem)
 12652  	for {
 12653  		off1 := v.AuxInt
 12654  		sym1 := v.Aux
 12655  		_ = v.Args[1]
 12656  		v_0 := v.Args[0]
 12657  		if v_0.Op != OpAMD64LEAL {
 12658  			break
 12659  		}
 12660  		off2 := v_0.AuxInt
 12661  		sym2 := v_0.Aux
 12662  		base := v_0.Args[0]
 12663  		mem := v.Args[1]
 12664  		if !(canMergeSym(sym1, sym2) && is32Bit(off1+off2)) {
 12665  			break
 12666  		}
 12667  		v.reset(OpAMD64MOVWload)
 12668  		v.AuxInt = off1 + off2
 12669  		v.Aux = mergeSym(sym1, sym2)
 12670  		v.AddArg(base)
 12671  		v.AddArg(mem)
 12672  		return true
 12673  	}
 12674  	// match: (MOVWload [off1] {sym} (ADDLconst [off2] ptr) mem)
 12675  	// cond: is32Bit(off1+off2)
 12676  	// result: (MOVWload [off1+off2] {sym} ptr mem)
 12677  	for {
 12678  		off1 := v.AuxInt
 12679  		sym := v.Aux
 12680  		_ = v.Args[1]
 12681  		v_0 := v.Args[0]
 12682  		if v_0.Op != OpAMD64ADDLconst {
 12683  			break
 12684  		}
 12685  		off2 := v_0.AuxInt
 12686  		ptr := v_0.Args[0]
 12687  		mem := v.Args[1]
 12688  		if !(is32Bit(off1 + off2)) {
 12689  			break
 12690  		}
 12691  		v.reset(OpAMD64MOVWload)
 12692  		v.AuxInt = off1 + off2
 12693  		v.Aux = sym
 12694  		v.AddArg(ptr)
 12695  		v.AddArg(mem)
 12696  		return true
 12697  	}
 12698  	return false
 12699  }
 12700  func rewriteValueAMD64_OpAMD64MOVWloadidx1_0(v *Value) bool {
 12701  	// match: (MOVWloadidx1 [c] {sym} ptr (SHLQconst [1] idx) mem)
 12702  	// cond:
 12703  	// result: (MOVWloadidx2 [c] {sym} ptr idx mem)
 12704  	for {
 12705  		c := v.AuxInt
 12706  		sym := v.Aux
 12707  		_ = v.Args[2]
 12708  		ptr := v.Args[0]
 12709  		v_1 := v.Args[1]
 12710  		if v_1.Op != OpAMD64SHLQconst {
 12711  			break
 12712  		}
 12713  		if v_1.AuxInt != 1 {
 12714  			break
 12715  		}
 12716  		idx := v_1.Args[0]
 12717  		mem := v.Args[2]
 12718  		v.reset(OpAMD64MOVWloadidx2)
 12719  		v.AuxInt = c
 12720  		v.Aux = sym
 12721  		v.AddArg(ptr)
 12722  		v.AddArg(idx)
 12723  		v.AddArg(mem)
 12724  		return true
 12725  	}
 12726  	// match: (MOVWloadidx1 [c] {sym} (SHLQconst [1] idx) ptr mem)
 12727  	// cond:
 12728  	// result: (MOVWloadidx2 [c] {sym} ptr idx mem)
 12729  	for {
 12730  		c := v.AuxInt
 12731  		sym := v.Aux
 12732  		_ = v.Args[2]
 12733  		v_0 := v.Args[0]
 12734  		if v_0.Op != OpAMD64SHLQconst {
 12735  			break
 12736  		}
 12737  		if v_0.AuxInt != 1 {
 12738  			break
 12739  		}
 12740  		idx := v_0.Args[0]
 12741  		ptr := v.Args[1]
 12742  		mem := v.Args[2]
 12743  		v.reset(OpAMD64MOVWloadidx2)
 12744  		v.AuxInt = c
 12745  		v.Aux = sym
 12746  		v.AddArg(ptr)
 12747  		v.AddArg(idx)
 12748  		v.AddArg(mem)
 12749  		return true
 12750  	}
 12751  	// match: (MOVWloadidx1 [c] {sym} (ADDQconst [d] ptr) idx mem)
 12752  	// cond: is32Bit(c+d)
 12753  	// result: (MOVWloadidx1 [c+d] {sym} ptr idx mem)
 12754  	for {
 12755  		c := v.AuxInt
 12756  		sym := v.Aux
 12757  		_ = v.Args[2]
 12758  		v_0 := v.Args[0]
 12759  		if v_0.Op != OpAMD64ADDQconst {
 12760  			break
 12761  		}
 12762  		d := v_0.AuxInt
 12763  		ptr := v_0.Args[0]
 12764  		idx := v.Args[1]
 12765  		mem := v.Args[2]
 12766  		if !(is32Bit(c + d)) {
 12767  			break
 12768  		}
 12769  		v.reset(OpAMD64MOVWloadidx1)
 12770  		v.AuxInt = c + d
 12771  		v.Aux = sym
 12772  		v.AddArg(ptr)
 12773  		v.AddArg(idx)
 12774  		v.AddArg(mem)
 12775  		return true
 12776  	}
 12777  	// match: (MOVWloadidx1 [c] {sym} idx (ADDQconst [d] ptr) mem)
 12778  	// cond: is32Bit(c+d)
 12779  	// result: (MOVWloadidx1 [c+d] {sym} ptr idx mem)
 12780  	for {
 12781  		c := v.AuxInt
 12782  		sym := v.Aux
 12783  		_ = v.Args[2]
 12784  		idx := v.Args[0]
 12785  		v_1 := v.Args[1]
 12786  		if v_1.Op != OpAMD64ADDQconst {
 12787  			break
 12788  		}
 12789  		d := v_1.AuxInt
 12790  		ptr := v_1.Args[0]
 12791  		mem := v.Args[2]
 12792  		if !(is32Bit(c + d)) {
 12793  			break
 12794  		}
 12795  		v.reset(OpAMD64MOVWloadidx1)
 12796  		v.AuxInt = c + d
 12797  		v.Aux = sym
 12798  		v.AddArg(ptr)
 12799  		v.AddArg(idx)
 12800  		v.AddArg(mem)
 12801  		return true
 12802  	}
 12803  	// match: (MOVWloadidx1 [c] {sym} ptr (ADDQconst [d] idx) mem)
 12804  	// cond: is32Bit(c+d)
 12805  	// result: (MOVWloadidx1 [c+d] {sym} ptr idx mem)
 12806  	for {
 12807  		c := v.AuxInt
 12808  		sym := v.Aux
 12809  		_ = v.Args[2]
 12810  		ptr := v.Args[0]
 12811  		v_1 := v.Args[1]
 12812  		if v_1.Op != OpAMD64ADDQconst {
 12813  			break
 12814  		}
 12815  		d := v_1.AuxInt
 12816  		idx := v_1.Args[0]
 12817  		mem := v.Args[2]
 12818  		if !(is32Bit(c + d)) {
 12819  			break
 12820  		}
 12821  		v.reset(OpAMD64MOVWloadidx1)
 12822  		v.AuxInt = c + d
 12823  		v.Aux = sym
 12824  		v.AddArg(ptr)
 12825  		v.AddArg(idx)
 12826  		v.AddArg(mem)
 12827  		return true
 12828  	}
 12829  	// match: (MOVWloadidx1 [c] {sym} (ADDQconst [d] idx) ptr mem)
 12830  	// cond: is32Bit(c+d)
 12831  	// result: (MOVWloadidx1 [c+d] {sym} ptr idx mem)
 12832  	for {
 12833  		c := v.AuxInt
 12834  		sym := v.Aux
 12835  		_ = v.Args[2]
 12836  		v_0 := v.Args[0]
 12837  		if v_0.Op != OpAMD64ADDQconst {
 12838  			break
 12839  		}
 12840  		d := v_0.AuxInt
 12841  		idx := v_0.Args[0]
 12842  		ptr := v.Args[1]
 12843  		mem := v.Args[2]
 12844  		if !(is32Bit(c + d)) {
 12845  			break
 12846  		}
 12847  		v.reset(OpAMD64MOVWloadidx1)
 12848  		v.AuxInt = c + d
 12849  		v.Aux = sym
 12850  		v.AddArg(ptr)
 12851  		v.AddArg(idx)
 12852  		v.AddArg(mem)
 12853  		return true
 12854  	}
 12855  	return false
 12856  }
 12857  func rewriteValueAMD64_OpAMD64MOVWloadidx2_0(v *Value) bool {
 12858  	// match: (MOVWloadidx2 [c] {sym} (ADDQconst [d] ptr) idx mem)
 12859  	// cond: is32Bit(c+d)
 12860  	// result: (MOVWloadidx2 [c+d] {sym} ptr idx mem)
 12861  	for {
 12862  		c := v.AuxInt
 12863  		sym := v.Aux
 12864  		_ = v.Args[2]
 12865  		v_0 := v.Args[0]
 12866  		if v_0.Op != OpAMD64ADDQconst {
 12867  			break
 12868  		}
 12869  		d := v_0.AuxInt
 12870  		ptr := v_0.Args[0]
 12871  		idx := v.Args[1]
 12872  		mem := v.Args[2]
 12873  		if !(is32Bit(c + d)) {
 12874  			break
 12875  		}
 12876  		v.reset(OpAMD64MOVWloadidx2)
 12877  		v.AuxInt = c + d
 12878  		v.Aux = sym
 12879  		v.AddArg(ptr)
 12880  		v.AddArg(idx)
 12881  		v.AddArg(mem)
 12882  		return true
 12883  	}
 12884  	// match: (MOVWloadidx2 [c] {sym} ptr (ADDQconst [d] idx) mem)
 12885  	// cond: is32Bit(c+2*d)
 12886  	// result: (MOVWloadidx2 [c+2*d] {sym} ptr idx mem)
 12887  	for {
 12888  		c := v.AuxInt
 12889  		sym := v.Aux
 12890  		_ = v.Args[2]
 12891  		ptr := v.Args[0]
 12892  		v_1 := v.Args[1]
 12893  		if v_1.Op != OpAMD64ADDQconst {
 12894  			break
 12895  		}
 12896  		d := v_1.AuxInt
 12897  		idx := v_1.Args[0]
 12898  		mem := v.Args[2]
 12899  		if !(is32Bit(c + 2*d)) {
 12900  			break
 12901  		}
 12902  		v.reset(OpAMD64MOVWloadidx2)
 12903  		v.AuxInt = c + 2*d
 12904  		v.Aux = sym
 12905  		v.AddArg(ptr)
 12906  		v.AddArg(idx)
 12907  		v.AddArg(mem)
 12908  		return true
 12909  	}
 12910  	return false
 12911  }
 12912  func rewriteValueAMD64_OpAMD64MOVWstore_0(v *Value) bool {
 12913  	// match: (MOVWstore [off] {sym} ptr (MOVWQSX x) mem)
 12914  	// cond:
 12915  	// result: (MOVWstore [off] {sym} ptr x mem)
 12916  	for {
 12917  		off := v.AuxInt
 12918  		sym := v.Aux
 12919  		_ = v.Args[2]
 12920  		ptr := v.Args[0]
 12921  		v_1 := v.Args[1]
 12922  		if v_1.Op != OpAMD64MOVWQSX {
 12923  			break
 12924  		}
 12925  		x := v_1.Args[0]
 12926  		mem := v.Args[2]
 12927  		v.reset(OpAMD64MOVWstore)
 12928  		v.AuxInt = off
 12929  		v.Aux = sym
 12930  		v.AddArg(ptr)
 12931  		v.AddArg(x)
 12932  		v.AddArg(mem)
 12933  		return true
 12934  	}
 12935  	// match: (MOVWstore [off] {sym} ptr (MOVWQZX x) mem)
 12936  	// cond:
 12937  	// result: (MOVWstore [off] {sym} ptr x mem)
 12938  	for {
 12939  		off := v.AuxInt
 12940  		sym := v.Aux
 12941  		_ = v.Args[2]
 12942  		ptr := v.Args[0]
 12943  		v_1 := v.Args[1]
 12944  		if v_1.Op != OpAMD64MOVWQZX {
 12945  			break
 12946  		}
 12947  		x := v_1.Args[0]
 12948  		mem := v.Args[2]
 12949  		v.reset(OpAMD64MOVWstore)
 12950  		v.AuxInt = off
 12951  		v.Aux = sym
 12952  		v.AddArg(ptr)
 12953  		v.AddArg(x)
 12954  		v.AddArg(mem)
 12955  		return true
 12956  	}
 12957  	// match: (MOVWstore [off1] {sym} (ADDQconst [off2] ptr) val mem)
 12958  	// cond: is32Bit(off1+off2)
 12959  	// result: (MOVWstore [off1+off2] {sym} ptr val mem)
 12960  	for {
 12961  		off1 := v.AuxInt
 12962  		sym := v.Aux
 12963  		_ = v.Args[2]
 12964  		v_0 := v.Args[0]
 12965  		if v_0.Op != OpAMD64ADDQconst {
 12966  			break
 12967  		}
 12968  		off2 := v_0.AuxInt
 12969  		ptr := v_0.Args[0]
 12970  		val := v.Args[1]
 12971  		mem := v.Args[2]
 12972  		if !(is32Bit(off1 + off2)) {
 12973  			break
 12974  		}
 12975  		v.reset(OpAMD64MOVWstore)
 12976  		v.AuxInt = off1 + off2
 12977  		v.Aux = sym
 12978  		v.AddArg(ptr)
 12979  		v.AddArg(val)
 12980  		v.AddArg(mem)
 12981  		return true
 12982  	}
 12983  	// match: (MOVWstore [off] {sym} ptr (MOVLconst [c]) mem)
 12984  	// cond: validOff(off)
 12985  	// result: (MOVWstoreconst [makeValAndOff(int64(int16(c)),off)] {sym} ptr mem)
 12986  	for {
 12987  		off := v.AuxInt
 12988  		sym := v.Aux
 12989  		_ = v.Args[2]
 12990  		ptr := v.Args[0]
 12991  		v_1 := v.Args[1]
 12992  		if v_1.Op != OpAMD64MOVLconst {
 12993  			break
 12994  		}
 12995  		c := v_1.AuxInt
 12996  		mem := v.Args[2]
 12997  		if !(validOff(off)) {
 12998  			break
 12999  		}
 13000  		v.reset(OpAMD64MOVWstoreconst)
 13001  		v.AuxInt = makeValAndOff(int64(int16(c)), off)
 13002  		v.Aux = sym
 13003  		v.AddArg(ptr)
 13004  		v.AddArg(mem)
 13005  		return true
 13006  	}
 13007  	// match: (MOVWstore [off1] {sym1} (LEAQ [off2] {sym2} base) val mem)
 13008  	// cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
 13009  	// result: (MOVWstore [off1+off2] {mergeSym(sym1,sym2)} base val mem)
 13010  	for {
 13011  		off1 := v.AuxInt
 13012  		sym1 := v.Aux
 13013  		_ = v.Args[2]
 13014  		v_0 := v.Args[0]
 13015  		if v_0.Op != OpAMD64LEAQ {
 13016  			break
 13017  		}
 13018  		off2 := v_0.AuxInt
 13019  		sym2 := v_0.Aux
 13020  		base := v_0.Args[0]
 13021  		val := v.Args[1]
 13022  		mem := v.Args[2]
 13023  		if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
 13024  			break
 13025  		}
 13026  		v.reset(OpAMD64MOVWstore)
 13027  		v.AuxInt = off1 + off2
 13028  		v.Aux = mergeSym(sym1, sym2)
 13029  		v.AddArg(base)
 13030  		v.AddArg(val)
 13031  		v.AddArg(mem)
 13032  		return true
 13033  	}
 13034  	// match: (MOVWstore [off1] {sym1} (LEAQ1 [off2] {sym2} ptr idx) val mem)
 13035  	// cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
 13036  	// result: (MOVWstoreidx1 [off1+off2] {mergeSym(sym1,sym2)} ptr idx val mem)
 13037  	for {
 13038  		off1 := v.AuxInt
 13039  		sym1 := v.Aux
 13040  		_ = v.Args[2]
 13041  		v_0 := v.Args[0]
 13042  		if v_0.Op != OpAMD64LEAQ1 {
 13043  			break
 13044  		}
 13045  		off2 := v_0.AuxInt
 13046  		sym2 := v_0.Aux
 13047  		_ = v_0.Args[1]
 13048  		ptr := v_0.Args[0]
 13049  		idx := v_0.Args[1]
 13050  		val := v.Args[1]
 13051  		mem := v.Args[2]
 13052  		if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
 13053  			break
 13054  		}
 13055  		v.reset(OpAMD64MOVWstoreidx1)
 13056  		v.AuxInt = off1 + off2
 13057  		v.Aux = mergeSym(sym1, sym2)
 13058  		v.AddArg(ptr)
 13059  		v.AddArg(idx)
 13060  		v.AddArg(val)
 13061  		v.AddArg(mem)
 13062  		return true
 13063  	}
 13064  	// match: (MOVWstore [off1] {sym1} (LEAQ2 [off2] {sym2} ptr idx) val mem)
 13065  	// cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
 13066  	// result: (MOVWstoreidx2 [off1+off2] {mergeSym(sym1,sym2)} ptr idx val mem)
 13067  	for {
 13068  		off1 := v.AuxInt
 13069  		sym1 := v.Aux
 13070  		_ = v.Args[2]
 13071  		v_0 := v.Args[0]
 13072  		if v_0.Op != OpAMD64LEAQ2 {
 13073  			break
 13074  		}
 13075  		off2 := v_0.AuxInt
 13076  		sym2 := v_0.Aux
 13077  		_ = v_0.Args[1]
 13078  		ptr := v_0.Args[0]
 13079  		idx := v_0.Args[1]
 13080  		val := v.Args[1]
 13081  		mem := v.Args[2]
 13082  		if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
 13083  			break
 13084  		}
 13085  		v.reset(OpAMD64MOVWstoreidx2)
 13086  		v.AuxInt = off1 + off2
 13087  		v.Aux = mergeSym(sym1, sym2)
 13088  		v.AddArg(ptr)
 13089  		v.AddArg(idx)
 13090  		v.AddArg(val)
 13091  		v.AddArg(mem)
 13092  		return true
 13093  	}
 13094  	// match: (MOVWstore [off] {sym} (ADDQ ptr idx) val mem)
 13095  	// cond: ptr.Op != OpSB
 13096  	// result: (MOVWstoreidx1 [off] {sym} ptr idx val mem)
 13097  	for {
 13098  		off := v.AuxInt
 13099  		sym := v.Aux
 13100  		_ = v.Args[2]
 13101  		v_0 := v.Args[0]
 13102  		if v_0.Op != OpAMD64ADDQ {
 13103  			break
 13104  		}
 13105  		_ = v_0.Args[1]
 13106  		ptr := v_0.Args[0]
 13107  		idx := v_0.Args[1]
 13108  		val := v.Args[1]
 13109  		mem := v.Args[2]
 13110  		if !(ptr.Op != OpSB) {
 13111  			break
 13112  		}
 13113  		v.reset(OpAMD64MOVWstoreidx1)
 13114  		v.AuxInt = off
 13115  		v.Aux = sym
 13116  		v.AddArg(ptr)
 13117  		v.AddArg(idx)
 13118  		v.AddArg(val)
 13119  		v.AddArg(mem)
 13120  		return true
 13121  	}
 13122  	// match: (MOVWstore [i] {s} p (SHRQconst [16] w) x:(MOVWstore [i-2] {s} p w mem))
 13123  	// cond: x.Uses == 1 && clobber(x)
 13124  	// result: (MOVLstore [i-2] {s} p w mem)
 13125  	for {
 13126  		i := v.AuxInt
 13127  		s := v.Aux
 13128  		_ = v.Args[2]
 13129  		p := v.Args[0]
 13130  		v_1 := v.Args[1]
 13131  		if v_1.Op != OpAMD64SHRQconst {
 13132  			break
 13133  		}
 13134  		if v_1.AuxInt != 16 {
 13135  			break
 13136  		}
 13137  		w := v_1.Args[0]
 13138  		x := v.Args[2]
 13139  		if x.Op != OpAMD64MOVWstore {
 13140  			break
 13141  		}
 13142  		if x.AuxInt != i-2 {
 13143  			break
 13144  		}
 13145  		if x.Aux != s {
 13146  			break
 13147  		}
 13148  		_ = x.Args[2]
 13149  		if p != x.Args[0] {
 13150  			break
 13151  		}
 13152  		if w != x.Args[1] {
 13153  			break
 13154  		}
 13155  		mem := x.Args[2]
 13156  		if !(x.Uses == 1 && clobber(x)) {
 13157  			break
 13158  		}
 13159  		v.reset(OpAMD64MOVLstore)
 13160  		v.AuxInt = i - 2
 13161  		v.Aux = s
 13162  		v.AddArg(p)
 13163  		v.AddArg(w)
 13164  		v.AddArg(mem)
 13165  		return true
 13166  	}
 13167  	// match: (MOVWstore [i] {s} p (SHRQconst [j] w) x:(MOVWstore [i-2] {s} p w0:(SHRQconst [j-16] w) mem))
 13168  	// cond: x.Uses == 1 && clobber(x)
 13169  	// result: (MOVLstore [i-2] {s} p w0 mem)
 13170  	for {
 13171  		i := v.AuxInt
 13172  		s := v.Aux
 13173  		_ = v.Args[2]
 13174  		p := v.Args[0]
 13175  		v_1 := v.Args[1]
 13176  		if v_1.Op != OpAMD64SHRQconst {
 13177  			break
 13178  		}
 13179  		j := v_1.AuxInt
 13180  		w := v_1.Args[0]
 13181  		x := v.Args[2]
 13182  		if x.Op != OpAMD64MOVWstore {
 13183  			break
 13184  		}
 13185  		if x.AuxInt != i-2 {
 13186  			break
 13187  		}
 13188  		if x.Aux != s {
 13189  			break
 13190  		}
 13191  		_ = x.Args[2]
 13192  		if p != x.Args[0] {
 13193  			break
 13194  		}
 13195  		w0 := x.Args[1]
 13196  		if w0.Op != OpAMD64SHRQconst {
 13197  			break
 13198  		}
 13199  		if w0.AuxInt != j-16 {
 13200  			break
 13201  		}
 13202  		if w != w0.Args[0] {
 13203  			break
 13204  		}
 13205  		mem := x.Args[2]
 13206  		if !(x.Uses == 1 && clobber(x)) {
 13207  			break
 13208  		}
 13209  		v.reset(OpAMD64MOVLstore)
 13210  		v.AuxInt = i - 2
 13211  		v.Aux = s
 13212  		v.AddArg(p)
 13213  		v.AddArg(w0)
 13214  		v.AddArg(mem)
 13215  		return true
 13216  	}
 13217  	return false
 13218  }
 13219  func rewriteValueAMD64_OpAMD64MOVWstore_10(v *Value) bool {
 13220  	b := v.Block
 13221  	_ = b
 13222  	typ := &b.Func.Config.Types
 13223  	_ = typ
 13224  	// match: (MOVWstore [i] {s} p x1:(MOVWload [j] {s2} p2 mem) mem2:(MOVWstore [i-2] {s} p x2:(MOVWload [j-2] {s2} p2 mem) mem))
 13225  	// cond: x1.Uses == 1 && x2.Uses == 1 && mem2.Uses == 1 && clobber(x1) && clobber(x2) && clobber(mem2)
 13226  	// result: (MOVLstore [i-2] {s} p (MOVLload [j-2] {s2} p2 mem) mem)
 13227  	for {
 13228  		i := v.AuxInt
 13229  		s := v.Aux
 13230  		_ = v.Args[2]
 13231  		p := v.Args[0]
 13232  		x1 := v.Args[1]
 13233  		if x1.Op != OpAMD64MOVWload {
 13234  			break
 13235  		}
 13236  		j := x1.AuxInt
 13237  		s2 := x1.Aux
 13238  		_ = x1.Args[1]
 13239  		p2 := x1.Args[0]
 13240  		mem := x1.Args[1]
 13241  		mem2 := v.Args[2]
 13242  		if mem2.Op != OpAMD64MOVWstore {
 13243  			break
 13244  		}
 13245  		if mem2.AuxInt != i-2 {
 13246  			break
 13247  		}
 13248  		if mem2.Aux != s {
 13249  			break
 13250  		}
 13251  		_ = mem2.Args[2]
 13252  		if p != mem2.Args[0] {
 13253  			break
 13254  		}
 13255  		x2 := mem2.Args[1]
 13256  		if x2.Op != OpAMD64MOVWload {
 13257  			break
 13258  		}
 13259  		if x2.AuxInt != j-2 {
 13260  			break
 13261  		}
 13262  		if x2.Aux != s2 {
 13263  			break
 13264  		}
 13265  		_ = x2.Args[1]
 13266  		if p2 != x2.Args[0] {
 13267  			break
 13268  		}
 13269  		if mem != x2.Args[1] {
 13270  			break
 13271  		}
 13272  		if mem != mem2.Args[2] {
 13273  			break
 13274  		}
 13275  		if !(x1.Uses == 1 && x2.Uses == 1 && mem2.Uses == 1 && clobber(x1) && clobber(x2) && clobber(mem2)) {
 13276  			break
 13277  		}
 13278  		v.reset(OpAMD64MOVLstore)
 13279  		v.AuxInt = i - 2
 13280  		v.Aux = s
 13281  		v.AddArg(p)
 13282  		v0 := b.NewValue0(v.Pos, OpAMD64MOVLload, typ.UInt32)
 13283  		v0.AuxInt = j - 2
 13284  		v0.Aux = s2
 13285  		v0.AddArg(p2)
 13286  		v0.AddArg(mem)
 13287  		v.AddArg(v0)
 13288  		v.AddArg(mem)
 13289  		return true
 13290  	}
 13291  	// match: (MOVWstore [off1] {sym1} (LEAL [off2] {sym2} base) val mem)
 13292  	// cond: canMergeSym(sym1, sym2) && is32Bit(off1+off2)
 13293  	// result: (MOVWstore [off1+off2] {mergeSym(sym1,sym2)} base val mem)
 13294  	for {
 13295  		off1 := v.AuxInt
 13296  		sym1 := v.Aux
 13297  		_ = v.Args[2]
 13298  		v_0 := v.Args[0]
 13299  		if v_0.Op != OpAMD64LEAL {
 13300  			break
 13301  		}
 13302  		off2 := v_0.AuxInt
 13303  		sym2 := v_0.Aux
 13304  		base := v_0.Args[0]
 13305  		val := v.Args[1]
 13306  		mem := v.Args[2]
 13307  		if !(canMergeSym(sym1, sym2) && is32Bit(off1+off2)) {
 13308  			break
 13309  		}
 13310  		v.reset(OpAMD64MOVWstore)
 13311  		v.AuxInt = off1 + off2
 13312  		v.Aux = mergeSym(sym1, sym2)
 13313  		v.AddArg(base)
 13314  		v.AddArg(val)
 13315  		v.AddArg(mem)
 13316  		return true
 13317  	}
 13318  	// match: (MOVWstore [off1] {sym} (ADDLconst [off2] ptr) val mem)
 13319  	// cond: is32Bit(off1+off2)
 13320  	// result: (MOVWstore [off1+off2] {sym} ptr val mem)
 13321  	for {
 13322  		off1 := v.AuxInt
 13323  		sym := v.Aux
 13324  		_ = v.Args[2]
 13325  		v_0 := v.Args[0]
 13326  		if v_0.Op != OpAMD64ADDLconst {
 13327  			break
 13328  		}
 13329  		off2 := v_0.AuxInt
 13330  		ptr := v_0.Args[0]
 13331  		val := v.Args[1]
 13332  		mem := v.Args[2]
 13333  		if !(is32Bit(off1 + off2)) {
 13334  			break
 13335  		}
 13336  		v.reset(OpAMD64MOVWstore)
 13337  		v.AuxInt = off1 + off2
 13338  		v.Aux = sym
 13339  		v.AddArg(ptr)
 13340  		v.AddArg(val)
 13341  		v.AddArg(mem)
 13342  		return true
 13343  	}
 13344  	return false
 13345  }
 13346  func rewriteValueAMD64_OpAMD64MOVWstoreconst_0(v *Value) bool {
 13347  	// match: (MOVWstoreconst [sc] {s} (ADDQconst [off] ptr) mem)
 13348  	// cond: ValAndOff(sc).canAdd(off)
 13349  	// result: (MOVWstoreconst [ValAndOff(sc).add(off)] {s} ptr mem)
 13350  	for {
 13351  		sc := v.AuxInt
 13352  		s := v.Aux
 13353  		_ = v.Args[1]
 13354  		v_0 := v.Args[0]
 13355  		if v_0.Op != OpAMD64ADDQconst {
 13356  			break
 13357  		}
 13358  		off := v_0.AuxInt
 13359  		ptr := v_0.Args[0]
 13360  		mem := v.Args[1]
 13361  		if !(ValAndOff(sc).canAdd(off)) {
 13362  			break
 13363  		}
 13364  		v.reset(OpAMD64MOVWstoreconst)
 13365  		v.AuxInt = ValAndOff(sc).add(off)
 13366  		v.Aux = s
 13367  		v.AddArg(ptr)
 13368  		v.AddArg(mem)
 13369  		return true
 13370  	}
 13371  	// match: (MOVWstoreconst [sc] {sym1} (LEAQ [off] {sym2} ptr) mem)
 13372  	// cond: canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off)
 13373  	// result: (MOVWstoreconst [ValAndOff(sc).add(off)] {mergeSym(sym1, sym2)} ptr mem)
 13374  	for {
 13375  		sc := v.AuxInt
 13376  		sym1 := v.Aux
 13377  		_ = v.Args[1]
 13378  		v_0 := v.Args[0]
 13379  		if v_0.Op != OpAMD64LEAQ {
 13380  			break
 13381  		}
 13382  		off := v_0.AuxInt
 13383  		sym2 := v_0.Aux
 13384  		ptr := v_0.Args[0]
 13385  		mem := v.Args[1]
 13386  		if !(canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off)) {
 13387  			break
 13388  		}
 13389  		v.reset(OpAMD64MOVWstoreconst)
 13390  		v.AuxInt = ValAndOff(sc).add(off)
 13391  		v.Aux = mergeSym(sym1, sym2)
 13392  		v.AddArg(ptr)
 13393  		v.AddArg(mem)
 13394  		return true
 13395  	}
 13396  	// match: (MOVWstoreconst [x] {sym1} (LEAQ1 [off] {sym2} ptr idx) mem)
 13397  	// cond: canMergeSym(sym1, sym2)
 13398  	// result: (MOVWstoreconstidx1 [ValAndOff(x).add(off)] {mergeSym(sym1,sym2)} ptr idx mem)
 13399  	for {
 13400  		x := v.AuxInt
 13401  		sym1 := v.Aux
 13402  		_ = v.Args[1]
 13403  		v_0 := v.Args[0]
 13404  		if v_0.Op != OpAMD64LEAQ1 {
 13405  			break
 13406  		}
 13407  		off := v_0.AuxInt
 13408  		sym2 := v_0.Aux
 13409  		_ = v_0.Args[1]
 13410  		ptr := v_0.Args[0]
 13411  		idx := v_0.Args[1]
 13412  		mem := v.Args[1]
 13413  		if !(canMergeSym(sym1, sym2)) {
 13414  			break
 13415  		}
 13416  		v.reset(OpAMD64MOVWstoreconstidx1)
 13417  		v.AuxInt = ValAndOff(x).add(off)
 13418  		v.Aux = mergeSym(sym1, sym2)
 13419  		v.AddArg(ptr)
 13420  		v.AddArg(idx)
 13421  		v.AddArg(mem)
 13422  		return true
 13423  	}
 13424  	// match: (MOVWstoreconst [x] {sym1} (LEAQ2 [off] {sym2} ptr idx) mem)
 13425  	// cond: canMergeSym(sym1, sym2)
 13426  	// result: (MOVWstoreconstidx2 [ValAndOff(x).add(off)] {mergeSym(sym1,sym2)} ptr idx mem)
 13427  	for {
 13428  		x := v.AuxInt
 13429  		sym1 := v.Aux
 13430  		_ = v.Args[1]
 13431  		v_0 := v.Args[0]
 13432  		if v_0.Op != OpAMD64LEAQ2 {
 13433  			break
 13434  		}
 13435  		off := v_0.AuxInt
 13436  		sym2 := v_0.Aux
 13437  		_ = v_0.Args[1]
 13438  		ptr := v_0.Args[0]
 13439  		idx := v_0.Args[1]
 13440  		mem := v.Args[1]
 13441  		if !(canMergeSym(sym1, sym2)) {
 13442  			break
 13443  		}
 13444  		v.reset(OpAMD64MOVWstoreconstidx2)
 13445  		v.AuxInt = ValAndOff(x).add(off)
 13446  		v.Aux = mergeSym(sym1, sym2)
 13447  		v.AddArg(ptr)
 13448  		v.AddArg(idx)
 13449  		v.AddArg(mem)
 13450  		return true
 13451  	}
 13452  	// match: (MOVWstoreconst [x] {sym} (ADDQ ptr idx) mem)
 13453  	// cond:
 13454  	// result: (MOVWstoreconstidx1 [x] {sym} ptr idx mem)
 13455  	for {
 13456  		x := v.AuxInt
 13457  		sym := v.Aux
 13458  		_ = v.Args[1]
 13459  		v_0 := v.Args[0]
 13460  		if v_0.Op != OpAMD64ADDQ {
 13461  			break
 13462  		}
 13463  		_ = v_0.Args[1]
 13464  		ptr := v_0.Args[0]
 13465  		idx := v_0.Args[1]
 13466  		mem := v.Args[1]
 13467  		v.reset(OpAMD64MOVWstoreconstidx1)
 13468  		v.AuxInt = x
 13469  		v.Aux = sym
 13470  		v.AddArg(ptr)
 13471  		v.AddArg(idx)
 13472  		v.AddArg(mem)
 13473  		return true
 13474  	}
 13475  	// match: (MOVWstoreconst [c] {s} p x:(MOVWstoreconst [a] {s} p mem))
 13476  	// cond: x.Uses == 1 && ValAndOff(a).Off() + 2 == ValAndOff(c).Off() && clobber(x)
 13477  	// result: (MOVLstoreconst [makeValAndOff(ValAndOff(a).Val()&0xffff | ValAndOff(c).Val()<<16, ValAndOff(a).Off())] {s} p mem)
 13478  	for {
 13479  		c := v.AuxInt
 13480  		s := v.Aux
 13481  		_ = v.Args[1]
 13482  		p := v.Args[0]
 13483  		x := v.Args[1]
 13484  		if x.Op != OpAMD64MOVWstoreconst {
 13485  			break
 13486  		}
 13487  		a := x.AuxInt
 13488  		if x.Aux != s {
 13489  			break
 13490  		}
 13491  		_ = x.Args[1]
 13492  		if p != x.Args[0] {
 13493  			break
 13494  		}
 13495  		mem := x.Args[1]
 13496  		if !(x.Uses == 1 && ValAndOff(a).Off()+2 == ValAndOff(c).Off() && clobber(x)) {
 13497  			break
 13498  		}
 13499  		v.reset(OpAMD64MOVLstoreconst)
 13500  		v.AuxInt = makeValAndOff(ValAndOff(a).Val()&0xffff|ValAndOff(c).Val()<<16, ValAndOff(a).Off())
 13501  		v.Aux = s
 13502  		v.AddArg(p)
 13503  		v.AddArg(mem)
 13504  		return true
 13505  	}
 13506  	// match: (MOVWstoreconst [sc] {sym1} (LEAL [off] {sym2} ptr) mem)
 13507  	// cond: canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off)
 13508  	// result: (MOVWstoreconst [ValAndOff(sc).add(off)] {mergeSym(sym1, sym2)} ptr mem)
 13509  	for {
 13510  		sc := v.AuxInt
 13511  		sym1 := v.Aux
 13512  		_ = v.Args[1]
 13513  		v_0 := v.Args[0]
 13514  		if v_0.Op != OpAMD64LEAL {
 13515  			break
 13516  		}
 13517  		off := v_0.AuxInt
 13518  		sym2 := v_0.Aux
 13519  		ptr := v_0.Args[0]
 13520  		mem := v.Args[1]
 13521  		if !(canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off)) {
 13522  			break
 13523  		}
 13524  		v.reset(OpAMD64MOVWstoreconst)
 13525  		v.AuxInt = ValAndOff(sc).add(off)
 13526  		v.Aux = mergeSym(sym1, sym2)
 13527  		v.AddArg(ptr)
 13528  		v.AddArg(mem)
 13529  		return true
 13530  	}
 13531  	// match: (MOVWstoreconst [sc] {s} (ADDLconst [off] ptr) mem)
 13532  	// cond: ValAndOff(sc).canAdd(off)
 13533  	// result: (MOVWstoreconst [ValAndOff(sc).add(off)] {s} ptr mem)
 13534  	for {
 13535  		sc := v.AuxInt
 13536  		s := v.Aux
 13537  		_ = v.Args[1]
 13538  		v_0 := v.Args[0]
 13539  		if v_0.Op != OpAMD64ADDLconst {
 13540  			break
 13541  		}
 13542  		off := v_0.AuxInt
 13543  		ptr := v_0.Args[0]
 13544  		mem := v.Args[1]
 13545  		if !(ValAndOff(sc).canAdd(off)) {
 13546  			break
 13547  		}
 13548  		v.reset(OpAMD64MOVWstoreconst)
 13549  		v.AuxInt = ValAndOff(sc).add(off)
 13550  		v.Aux = s
 13551  		v.AddArg(ptr)
 13552  		v.AddArg(mem)
 13553  		return true
 13554  	}
 13555  	return false
 13556  }
 13557  func rewriteValueAMD64_OpAMD64MOVWstoreconstidx1_0(v *Value) bool {
 13558  	// match: (MOVWstoreconstidx1 [c] {sym} ptr (SHLQconst [1] idx) mem)
 13559  	// cond:
 13560  	// result: (MOVWstoreconstidx2 [c] {sym} ptr idx mem)
 13561  	for {
 13562  		c := v.AuxInt
 13563  		sym := v.Aux
 13564  		_ = v.Args[2]
 13565  		ptr := v.Args[0]
 13566  		v_1 := v.Args[1]
 13567  		if v_1.Op != OpAMD64SHLQconst {
 13568  			break
 13569  		}
 13570  		if v_1.AuxInt != 1 {
 13571  			break
 13572  		}
 13573  		idx := v_1.Args[0]
 13574  		mem := v.Args[2]
 13575  		v.reset(OpAMD64MOVWstoreconstidx2)
 13576  		v.AuxInt = c
 13577  		v.Aux = sym
 13578  		v.AddArg(ptr)
 13579  		v.AddArg(idx)
 13580  		v.AddArg(mem)
 13581  		return true
 13582  	}
 13583  	// match: (MOVWstoreconstidx1 [x] {sym} (ADDQconst [c] ptr) idx mem)
 13584  	// cond: ValAndOff(x).canAdd(c)
 13585  	// result: (MOVWstoreconstidx1 [ValAndOff(x).add(c)] {sym} ptr idx mem)
 13586  	for {
 13587  		x := v.AuxInt
 13588  		sym := v.Aux
 13589  		_ = v.Args[2]
 13590  		v_0 := v.Args[0]
 13591  		if v_0.Op != OpAMD64ADDQconst {
 13592  			break
 13593  		}
 13594  		c := v_0.AuxInt
 13595  		ptr := v_0.Args[0]
 13596  		idx := v.Args[1]
 13597  		mem := v.Args[2]
 13598  		if !(ValAndOff(x).canAdd(c)) {
 13599  			break
 13600  		}
 13601  		v.reset(OpAMD64MOVWstoreconstidx1)
 13602  		v.AuxInt = ValAndOff(x).add(c)
 13603  		v.Aux = sym
 13604  		v.AddArg(ptr)
 13605  		v.AddArg(idx)
 13606  		v.AddArg(mem)
 13607  		return true
 13608  	}
 13609  	// match: (MOVWstoreconstidx1 [x] {sym} ptr (ADDQconst [c] idx) mem)
 13610  	// cond: ValAndOff(x).canAdd(c)
 13611  	// result: (MOVWstoreconstidx1 [ValAndOff(x).add(c)] {sym} ptr idx mem)
 13612  	for {
 13613  		x := v.AuxInt
 13614  		sym := v.Aux
 13615  		_ = v.Args[2]
 13616  		ptr := v.Args[0]
 13617  		v_1 := v.Args[1]
 13618  		if v_1.Op != OpAMD64ADDQconst {
 13619  			break
 13620  		}
 13621  		c := v_1.AuxInt
 13622  		idx := v_1.Args[0]
 13623  		mem := v.Args[2]
 13624  		if !(ValAndOff(x).canAdd(c)) {
 13625  			break
 13626  		}
 13627  		v.reset(OpAMD64MOVWstoreconstidx1)
 13628  		v.AuxInt = ValAndOff(x).add(c)
 13629  		v.Aux = sym
 13630  		v.AddArg(ptr)
 13631  		v.AddArg(idx)
 13632  		v.AddArg(mem)
 13633  		return true
 13634  	}
 13635  	// match: (MOVWstoreconstidx1 [c] {s} p i x:(MOVWstoreconstidx1 [a] {s} p i mem))
 13636  	// cond: x.Uses == 1 && ValAndOff(a).Off() + 2 == ValAndOff(c).Off() && clobber(x)
 13637  	// result: (MOVLstoreconstidx1 [makeValAndOff(ValAndOff(a).Val()&0xffff | ValAndOff(c).Val()<<16, ValAndOff(a).Off())] {s} p i mem)
 13638  	for {
 13639  		c := v.AuxInt
 13640  		s := v.Aux
 13641  		_ = v.Args[2]
 13642  		p := v.Args[0]
 13643  		i := v.Args[1]
 13644  		x := v.Args[2]
 13645  		if x.Op != OpAMD64MOVWstoreconstidx1 {
 13646  			break
 13647  		}
 13648  		a := x.AuxInt
 13649  		if x.Aux != s {
 13650  			break
 13651  		}
 13652  		_ = x.Args[2]
 13653  		if p != x.Args[0] {
 13654  			break
 13655  		}
 13656  		if i != x.Args[1] {
 13657  			break
 13658  		}
 13659  		mem := x.Args[2]
 13660  		if !(x.Uses == 1 && ValAndOff(a).Off()+2 == ValAndOff(c).Off() && clobber(x)) {
 13661  			break
 13662  		}
 13663  		v.reset(OpAMD64MOVLstoreconstidx1)
 13664  		v.AuxInt = makeValAndOff(ValAndOff(a).Val()&0xffff|ValAndOff(c).Val()<<16, ValAndOff(a).Off())
 13665  		v.Aux = s
 13666  		v.AddArg(p)
 13667  		v.AddArg(i)
 13668  		v.AddArg(mem)
 13669  		return true
 13670  	}
 13671  	return false
 13672  }
 13673  func rewriteValueAMD64_OpAMD64MOVWstoreconstidx2_0(v *Value) bool {
 13674  	b := v.Block
 13675  	_ = b
 13676  	// match: (MOVWstoreconstidx2 [x] {sym} (ADDQconst [c] ptr) idx mem)
 13677  	// cond: ValAndOff(x).canAdd(c)
 13678  	// result: (MOVWstoreconstidx2 [ValAndOff(x).add(c)] {sym} ptr idx mem)
 13679  	for {
 13680  		x := v.AuxInt
 13681  		sym := v.Aux
 13682  		_ = v.Args[2]
 13683  		v_0 := v.Args[0]
 13684  		if v_0.Op != OpAMD64ADDQconst {
 13685  			break
 13686  		}
 13687  		c := v_0.AuxInt
 13688  		ptr := v_0.Args[0]
 13689  		idx := v.Args[1]
 13690  		mem := v.Args[2]
 13691  		if !(ValAndOff(x).canAdd(c)) {
 13692  			break
 13693  		}
 13694  		v.reset(OpAMD64MOVWstoreconstidx2)
 13695  		v.AuxInt = ValAndOff(x).add(c)
 13696  		v.Aux = sym
 13697  		v.AddArg(ptr)
 13698  		v.AddArg(idx)
 13699  		v.AddArg(mem)
 13700  		return true
 13701  	}
 13702  	// match: (MOVWstoreconstidx2 [x] {sym} ptr (ADDQconst [c] idx) mem)
 13703  	// cond: ValAndOff(x).canAdd(2*c)
 13704  	// result: (MOVWstoreconstidx2 [ValAndOff(x).add(2*c)] {sym} ptr idx mem)
 13705  	for {
 13706  		x := v.AuxInt
 13707  		sym := v.Aux
 13708  		_ = v.Args[2]
 13709  		ptr := v.Args[0]
 13710  		v_1 := v.Args[1]
 13711  		if v_1.Op != OpAMD64ADDQconst {
 13712  			break
 13713  		}
 13714  		c := v_1.AuxInt
 13715  		idx := v_1.Args[0]
 13716  		mem := v.Args[2]
 13717  		if !(ValAndOff(x).canAdd(2 * c)) {
 13718  			break
 13719  		}
 13720  		v.reset(OpAMD64MOVWstoreconstidx2)
 13721  		v.AuxInt = ValAndOff(x).add(2 * c)
 13722  		v.Aux = sym
 13723  		v.AddArg(ptr)
 13724  		v.AddArg(idx)
 13725  		v.AddArg(mem)
 13726  		return true
 13727  	}
 13728  	// match: (MOVWstoreconstidx2 [c] {s} p i x:(MOVWstoreconstidx2 [a] {s} p i mem))
 13729  	// cond: x.Uses == 1 && ValAndOff(a).Off() + 2 == ValAndOff(c).Off() && clobber(x)
 13730  	// result: (MOVLstoreconstidx1 [makeValAndOff(ValAndOff(a).Val()&0xffff | ValAndOff(c).Val()<<16, ValAndOff(a).Off())] {s} p (SHLQconst <i.Type> [1] i) mem)
 13731  	for {
 13732  		c := v.AuxInt
 13733  		s := v.Aux
 13734  		_ = v.Args[2]
 13735  		p := v.Args[0]
 13736  		i := v.Args[1]
 13737  		x := v.Args[2]
 13738  		if x.Op != OpAMD64MOVWstoreconstidx2 {
 13739  			break
 13740  		}
 13741  		a := x.AuxInt
 13742  		if x.Aux != s {
 13743  			break
 13744  		}
 13745  		_ = x.Args[2]
 13746  		if p != x.Args[0] {
 13747  			break
 13748  		}
 13749  		if i != x.Args[1] {
 13750  			break
 13751  		}
 13752  		mem := x.Args[2]
 13753  		if !(x.Uses == 1 && ValAndOff(a).Off()+2 == ValAndOff(c).Off() && clobber(x)) {
 13754  			break
 13755  		}
 13756  		v.reset(OpAMD64MOVLstoreconstidx1)
 13757  		v.AuxInt = makeValAndOff(ValAndOff(a).Val()&0xffff|ValAndOff(c).Val()<<16, ValAndOff(a).Off())
 13758  		v.Aux = s
 13759  		v.AddArg(p)
 13760  		v0 := b.NewValue0(v.Pos, OpAMD64SHLQconst, i.Type)
 13761  		v0.AuxInt = 1
 13762  		v0.AddArg(i)
 13763  		v.AddArg(v0)
 13764  		v.AddArg(mem)
 13765  		return true
 13766  	}
 13767  	return false
 13768  }
 13769  func rewriteValueAMD64_OpAMD64MOVWstoreidx1_0(v *Value) bool {
 13770  	// match: (MOVWstoreidx1 [c] {sym} ptr (SHLQconst [1] idx) val mem)
 13771  	// cond:
 13772  	// result: (MOVWstoreidx2 [c] {sym} ptr idx val mem)
 13773  	for {
 13774  		c := v.AuxInt
 13775  		sym := v.Aux
 13776  		_ = v.Args[3]
 13777  		ptr := v.Args[0]
 13778  		v_1 := v.Args[1]
 13779  		if v_1.Op != OpAMD64SHLQconst {
 13780  			break
 13781  		}
 13782  		if v_1.AuxInt != 1 {
 13783  			break
 13784  		}
 13785  		idx := v_1.Args[0]
 13786  		val := v.Args[2]
 13787  		mem := v.Args[3]
 13788  		v.reset(OpAMD64MOVWstoreidx2)
 13789  		v.AuxInt = c
 13790  		v.Aux = sym
 13791  		v.AddArg(ptr)
 13792  		v.AddArg(idx)
 13793  		v.AddArg(val)
 13794  		v.AddArg(mem)
 13795  		return true
 13796  	}
 13797  	// match: (MOVWstoreidx1 [c] {sym} (ADDQconst [d] ptr) idx val mem)
 13798  	// cond: is32Bit(c+d)
 13799  	// result: (MOVWstoreidx1 [c+d] {sym} ptr idx val mem)
 13800  	for {
 13801  		c := v.AuxInt
 13802  		sym := v.Aux
 13803  		_ = v.Args[3]
 13804  		v_0 := v.Args[0]
 13805  		if v_0.Op != OpAMD64ADDQconst {
 13806  			break
 13807  		}
 13808  		d := v_0.AuxInt
 13809  		ptr := v_0.Args[0]
 13810  		idx := v.Args[1]
 13811  		val := v.Args[2]
 13812  		mem := v.Args[3]
 13813  		if !(is32Bit(c + d)) {
 13814  			break
 13815  		}
 13816  		v.reset(OpAMD64MOVWstoreidx1)
 13817  		v.AuxInt = c + d
 13818  		v.Aux = sym
 13819  		v.AddArg(ptr)
 13820  		v.AddArg(idx)
 13821  		v.AddArg(val)
 13822  		v.AddArg(mem)
 13823  		return true
 13824  	}
 13825  	// match: (MOVWstoreidx1 [c] {sym} ptr (ADDQconst [d] idx) val mem)
 13826  	// cond: is32Bit(c+d)
 13827  	// result: (MOVWstoreidx1 [c+d] {sym} ptr idx val mem)
 13828  	for {
 13829  		c := v.AuxInt
 13830  		sym := v.Aux
 13831  		_ = v.Args[3]
 13832  		ptr := v.Args[0]
 13833  		v_1 := v.Args[1]
 13834  		if v_1.Op != OpAMD64ADDQconst {
 13835  			break
 13836  		}
 13837  		d := v_1.AuxInt
 13838  		idx := v_1.Args[0]
 13839  		val := v.Args[2]
 13840  		mem := v.Args[3]
 13841  		if !(is32Bit(c + d)) {
 13842  			break
 13843  		}
 13844  		v.reset(OpAMD64MOVWstoreidx1)
 13845  		v.AuxInt = c + d
 13846  		v.Aux = sym
 13847  		v.AddArg(ptr)
 13848  		v.AddArg(idx)
 13849  		v.AddArg(val)
 13850  		v.AddArg(mem)
 13851  		return true
 13852  	}
 13853  	// match: (MOVWstoreidx1 [i] {s} p idx (SHRQconst [16] w) x:(MOVWstoreidx1 [i-2] {s} p idx w mem))
 13854  	// cond: x.Uses == 1 && clobber(x)
 13855  	// result: (MOVLstoreidx1 [i-2] {s} p idx w mem)
 13856  	for {
 13857  		i := v.AuxInt
 13858  		s := v.Aux
 13859  		_ = v.Args[3]
 13860  		p := v.Args[0]
 13861  		idx := v.Args[1]
 13862  		v_2 := v.Args[2]
 13863  		if v_2.Op != OpAMD64SHRQconst {
 13864  			break
 13865  		}
 13866  		if v_2.AuxInt != 16 {
 13867  			break
 13868  		}
 13869  		w := v_2.Args[0]
 13870  		x := v.Args[3]
 13871  		if x.Op != OpAMD64MOVWstoreidx1 {
 13872  			break
 13873  		}
 13874  		if x.AuxInt != i-2 {
 13875  			break
 13876  		}
 13877  		if x.Aux != s {
 13878  			break
 13879  		}
 13880  		_ = x.Args[3]
 13881  		if p != x.Args[0] {
 13882  			break
 13883  		}
 13884  		if idx != x.Args[1] {
 13885  			break
 13886  		}
 13887  		if w != x.Args[2] {
 13888  			break
 13889  		}
 13890  		mem := x.Args[3]
 13891  		if !(x.Uses == 1 && clobber(x)) {
 13892  			break
 13893  		}
 13894  		v.reset(OpAMD64MOVLstoreidx1)
 13895  		v.AuxInt = i - 2
 13896  		v.Aux = s
 13897  		v.AddArg(p)
 13898  		v.AddArg(idx)
 13899  		v.AddArg(w)
 13900  		v.AddArg(mem)
 13901  		return true
 13902  	}
 13903  	// match: (MOVWstoreidx1 [i] {s} p idx (SHRQconst [j] w) x:(MOVWstoreidx1 [i-2] {s} p idx w0:(SHRQconst [j-16] w) mem))
 13904  	// cond: x.Uses == 1 && clobber(x)
 13905  	// result: (MOVLstoreidx1 [i-2] {s} p idx w0 mem)
 13906  	for {
 13907  		i := v.AuxInt
 13908  		s := v.Aux
 13909  		_ = v.Args[3]
 13910  		p := v.Args[0]
 13911  		idx := v.Args[1]
 13912  		v_2 := v.Args[2]
 13913  		if v_2.Op != OpAMD64SHRQconst {
 13914  			break
 13915  		}
 13916  		j := v_2.AuxInt
 13917  		w := v_2.Args[0]
 13918  		x := v.Args[3]
 13919  		if x.Op != OpAMD64MOVWstoreidx1 {
 13920  			break
 13921  		}
 13922  		if x.AuxInt != i-2 {
 13923  			break
 13924  		}
 13925  		if x.Aux != s {
 13926  			break
 13927  		}
 13928  		_ = x.Args[3]
 13929  		if p != x.Args[0] {
 13930  			break
 13931  		}
 13932  		if idx != x.Args[1] {
 13933  			break
 13934  		}
 13935  		w0 := x.Args[2]
 13936  		if w0.Op != OpAMD64SHRQconst {
 13937  			break
 13938  		}
 13939  		if w0.AuxInt != j-16 {
 13940  			break
 13941  		}
 13942  		if w != w0.Args[0] {
 13943  			break
 13944  		}
 13945  		mem := x.Args[3]
 13946  		if !(x.Uses == 1 && clobber(x)) {
 13947  			break
 13948  		}
 13949  		v.reset(OpAMD64MOVLstoreidx1)
 13950  		v.AuxInt = i - 2
 13951  		v.Aux = s
 13952  		v.AddArg(p)
 13953  		v.AddArg(idx)
 13954  		v.AddArg(w0)
 13955  		v.AddArg(mem)
 13956  		return true
 13957  	}
 13958  	return false
 13959  }
 13960  func rewriteValueAMD64_OpAMD64MOVWstoreidx2_0(v *Value) bool {
 13961  	b := v.Block
 13962  	_ = b
 13963  	// match: (MOVWstoreidx2 [c] {sym} (ADDQconst [d] ptr) idx val mem)
 13964  	// cond: is32Bit(c+d)
 13965  	// result: (MOVWstoreidx2 [c+d] {sym} ptr idx val mem)
 13966  	for {
 13967  		c := v.AuxInt
 13968  		sym := v.Aux
 13969  		_ = v.Args[3]
 13970  		v_0 := v.Args[0]
 13971  		if v_0.Op != OpAMD64ADDQconst {
 13972  			break
 13973  		}
 13974  		d := v_0.AuxInt
 13975  		ptr := v_0.Args[0]
 13976  		idx := v.Args[1]
 13977  		val := v.Args[2]
 13978  		mem := v.Args[3]
 13979  		if !(is32Bit(c + d)) {
 13980  			break
 13981  		}
 13982  		v.reset(OpAMD64MOVWstoreidx2)
 13983  		v.AuxInt = c + d
 13984  		v.Aux = sym
 13985  		v.AddArg(ptr)
 13986  		v.AddArg(idx)
 13987  		v.AddArg(val)
 13988  		v.AddArg(mem)
 13989  		return true
 13990  	}
 13991  	// match: (MOVWstoreidx2 [c] {sym} ptr (ADDQconst [d] idx) val mem)
 13992  	// cond: is32Bit(c+2*d)
 13993  	// result: (MOVWstoreidx2 [c+2*d] {sym} ptr idx val mem)
 13994  	for {
 13995  		c := v.AuxInt
 13996  		sym := v.Aux
 13997  		_ = v.Args[3]
 13998  		ptr := v.Args[0]
 13999  		v_1 := v.Args[1]
 14000  		if v_1.Op != OpAMD64ADDQconst {
 14001  			break
 14002  		}
 14003  		d := v_1.AuxInt
 14004  		idx := v_1.Args[0]
 14005  		val := v.Args[2]
 14006  		mem := v.Args[3]
 14007  		if !(is32Bit(c + 2*d)) {
 14008  			break
 14009  		}
 14010  		v.reset(OpAMD64MOVWstoreidx2)
 14011  		v.AuxInt = c + 2*d
 14012  		v.Aux = sym
 14013  		v.AddArg(ptr)
 14014  		v.AddArg(idx)
 14015  		v.AddArg(val)
 14016  		v.AddArg(mem)
 14017  		return true
 14018  	}
 14019  	// match: (MOVWstoreidx2 [i] {s} p idx (SHRQconst [16] w) x:(MOVWstoreidx2 [i-2] {s} p idx w mem))
 14020  	// cond: x.Uses == 1 && clobber(x)
 14021  	// result: (MOVLstoreidx1 [i-2] {s} p (SHLQconst <idx.Type> [1] idx) w mem)
 14022  	for {
 14023  		i := v.AuxInt
 14024  		s := v.Aux
 14025  		_ = v.Args[3]
 14026  		p := v.Args[0]
 14027  		idx := v.Args[1]
 14028  		v_2 := v.Args[2]
 14029  		if v_2.Op != OpAMD64SHRQconst {
 14030  			break
 14031  		}
 14032  		if v_2.AuxInt != 16 {
 14033  			break
 14034  		}
 14035  		w := v_2.Args[0]
 14036  		x := v.Args[3]
 14037  		if x.Op != OpAMD64MOVWstoreidx2 {
 14038  			break
 14039  		}
 14040  		if x.AuxInt != i-2 {
 14041  			break
 14042  		}
 14043  		if x.Aux != s {
 14044  			break
 14045  		}
 14046  		_ = x.Args[3]
 14047  		if p != x.Args[0] {
 14048  			break
 14049  		}
 14050  		if idx != x.Args[1] {
 14051  			break
 14052  		}
 14053  		if w != x.Args[2] {
 14054  			break
 14055  		}
 14056  		mem := x.Args[3]
 14057  		if !(x.Uses == 1 && clobber(x)) {
 14058  			break
 14059  		}
 14060  		v.reset(OpAMD64MOVLstoreidx1)
 14061  		v.AuxInt = i - 2
 14062  		v.Aux = s
 14063  		v.AddArg(p)
 14064  		v0 := b.NewValue0(v.Pos, OpAMD64SHLQconst, idx.Type)
 14065  		v0.AuxInt = 1
 14066  		v0.AddArg(idx)
 14067  		v.AddArg(v0)
 14068  		v.AddArg(w)
 14069  		v.AddArg(mem)
 14070  		return true
 14071  	}
 14072  	// match: (MOVWstoreidx2 [i] {s} p idx (SHRQconst [j] w) x:(MOVWstoreidx2 [i-2] {s} p idx w0:(SHRQconst [j-16] w) mem))
 14073  	// cond: x.Uses == 1 && clobber(x)
 14074  	// result: (MOVLstoreidx1 [i-2] {s} p (SHLQconst <idx.Type> [1] idx) w0 mem)
 14075  	for {
 14076  		i := v.AuxInt
 14077  		s := v.Aux
 14078  		_ = v.Args[3]
 14079  		p := v.Args[0]
 14080  		idx := v.Args[1]
 14081  		v_2 := v.Args[2]
 14082  		if v_2.Op != OpAMD64SHRQconst {
 14083  			break
 14084  		}
 14085  		j := v_2.AuxInt
 14086  		w := v_2.Args[0]
 14087  		x := v.Args[3]
 14088  		if x.Op != OpAMD64MOVWstoreidx2 {
 14089  			break
 14090  		}
 14091  		if x.AuxInt != i-2 {
 14092  			break
 14093  		}
 14094  		if x.Aux != s {
 14095  			break
 14096  		}
 14097  		_ = x.Args[3]
 14098  		if p != x.Args[0] {
 14099  			break
 14100  		}
 14101  		if idx != x.Args[1] {
 14102  			break
 14103  		}
 14104  		w0 := x.Args[2]
 14105  		if w0.Op != OpAMD64SHRQconst {
 14106  			break
 14107  		}
 14108  		if w0.AuxInt != j-16 {
 14109  			break
 14110  		}
 14111  		if w != w0.Args[0] {
 14112  			break
 14113  		}
 14114  		mem := x.Args[3]
 14115  		if !(x.Uses == 1 && clobber(x)) {
 14116  			break
 14117  		}
 14118  		v.reset(OpAMD64MOVLstoreidx1)
 14119  		v.AuxInt = i - 2
 14120  		v.Aux = s
 14121  		v.AddArg(p)
 14122  		v0 := b.NewValue0(v.Pos, OpAMD64SHLQconst, idx.Type)
 14123  		v0.AuxInt = 1
 14124  		v0.AddArg(idx)
 14125  		v.AddArg(v0)
 14126  		v.AddArg(w0)
 14127  		v.AddArg(mem)
 14128  		return true
 14129  	}
 14130  	return false
 14131  }
 14132  func rewriteValueAMD64_OpAMD64MULL_0(v *Value) bool {
 14133  	// match: (MULL x (MOVLconst [c]))
 14134  	// cond:
 14135  	// result: (MULLconst [c] x)
 14136  	for {
 14137  		_ = v.Args[1]
 14138  		x := v.Args[0]
 14139  		v_1 := v.Args[1]
 14140  		if v_1.Op != OpAMD64MOVLconst {
 14141  			break
 14142  		}
 14143  		c := v_1.AuxInt
 14144  		v.reset(OpAMD64MULLconst)
 14145  		v.AuxInt = c
 14146  		v.AddArg(x)
 14147  		return true
 14148  	}
 14149  	// match: (MULL (MOVLconst [c]) x)
 14150  	// cond:
 14151  	// result: (MULLconst [c] x)
 14152  	for {
 14153  		_ = v.Args[1]
 14154  		v_0 := v.Args[0]
 14155  		if v_0.Op != OpAMD64MOVLconst {
 14156  			break
 14157  		}
 14158  		c := v_0.AuxInt
 14159  		x := v.Args[1]
 14160  		v.reset(OpAMD64MULLconst)
 14161  		v.AuxInt = c
 14162  		v.AddArg(x)
 14163  		return true
 14164  	}
 14165  	return false
 14166  }
 14167  func rewriteValueAMD64_OpAMD64MULLconst_0(v *Value) bool {
 14168  	// match: (MULLconst [c] (MULLconst [d] x))
 14169  	// cond:
 14170  	// result: (MULLconst [int64(int32(c * d))] x)
 14171  	for {
 14172  		c := v.AuxInt
 14173  		v_0 := v.Args[0]
 14174  		if v_0.Op != OpAMD64MULLconst {
 14175  			break
 14176  		}
 14177  		d := v_0.AuxInt
 14178  		x := v_0.Args[0]
 14179  		v.reset(OpAMD64MULLconst)
 14180  		v.AuxInt = int64(int32(c * d))
 14181  		v.AddArg(x)
 14182  		return true
 14183  	}
 14184  	// match: (MULLconst [c] (MOVLconst [d]))
 14185  	// cond:
 14186  	// result: (MOVLconst [int64(int32(c*d))])
 14187  	for {
 14188  		c := v.AuxInt
 14189  		v_0 := v.Args[0]
 14190  		if v_0.Op != OpAMD64MOVLconst {
 14191  			break
 14192  		}
 14193  		d := v_0.AuxInt
 14194  		v.reset(OpAMD64MOVLconst)
 14195  		v.AuxInt = int64(int32(c * d))
 14196  		return true
 14197  	}
 14198  	return false
 14199  }
 14200  func rewriteValueAMD64_OpAMD64MULQ_0(v *Value) bool {
 14201  	// match: (MULQ x (MOVQconst [c]))
 14202  	// cond: is32Bit(c)
 14203  	// result: (MULQconst [c] x)
 14204  	for {
 14205  		_ = v.Args[1]
 14206  		x := v.Args[0]
 14207  		v_1 := v.Args[1]
 14208  		if v_1.Op != OpAMD64MOVQconst {
 14209  			break
 14210  		}
 14211  		c := v_1.AuxInt
 14212  		if !(is32Bit(c)) {
 14213  			break
 14214  		}
 14215  		v.reset(OpAMD64MULQconst)
 14216  		v.AuxInt = c
 14217  		v.AddArg(x)
 14218  		return true
 14219  	}
 14220  	// match: (MULQ (MOVQconst [c]) x)
 14221  	// cond: is32Bit(c)
 14222  	// result: (MULQconst [c] x)
 14223  	for {
 14224  		_ = v.Args[1]
 14225  		v_0 := v.Args[0]
 14226  		if v_0.Op != OpAMD64MOVQconst {
 14227  			break
 14228  		}
 14229  		c := v_0.AuxInt
 14230  		x := v.Args[1]
 14231  		if !(is32Bit(c)) {
 14232  			break
 14233  		}
 14234  		v.reset(OpAMD64MULQconst)
 14235  		v.AuxInt = c
 14236  		v.AddArg(x)
 14237  		return true
 14238  	}
 14239  	return false
 14240  }
 14241  func rewriteValueAMD64_OpAMD64MULQconst_0(v *Value) bool {
 14242  	b := v.Block
 14243  	_ = b
 14244  	// match: (MULQconst [c] (MULQconst [d] x))
 14245  	// cond: is32Bit(c*d)
 14246  	// result: (MULQconst [c * d] x)
 14247  	for {
 14248  		c := v.AuxInt
 14249  		v_0 := v.Args[0]
 14250  		if v_0.Op != OpAMD64MULQconst {
 14251  			break
 14252  		}
 14253  		d := v_0.AuxInt
 14254  		x := v_0.Args[0]
 14255  		if !(is32Bit(c * d)) {
 14256  			break
 14257  		}
 14258  		v.reset(OpAMD64MULQconst)
 14259  		v.AuxInt = c * d
 14260  		v.AddArg(x)
 14261  		return true
 14262  	}
 14263  	// match: (MULQconst [-1] x)
 14264  	// cond:
 14265  	// result: (NEGQ x)
 14266  	for {
 14267  		if v.AuxInt != -1 {
 14268  			break
 14269  		}
 14270  		x := v.Args[0]
 14271  		v.reset(OpAMD64NEGQ)
 14272  		v.AddArg(x)
 14273  		return true
 14274  	}
 14275  	// match: (MULQconst [0] _)
 14276  	// cond:
 14277  	// result: (MOVQconst [0])
 14278  	for {
 14279  		if v.AuxInt != 0 {
 14280  			break
 14281  		}
 14282  		v.reset(OpAMD64MOVQconst)
 14283  		v.AuxInt = 0
 14284  		return true
 14285  	}
 14286  	// match: (MULQconst [1] x)
 14287  	// cond:
 14288  	// result: x
 14289  	for {
 14290  		if v.AuxInt != 1 {
 14291  			break
 14292  		}
 14293  		x := v.Args[0]
 14294  		v.reset(OpCopy)
 14295  		v.Type = x.Type
 14296  		v.AddArg(x)
 14297  		return true
 14298  	}
 14299  	// match: (MULQconst [3] x)
 14300  	// cond:
 14301  	// result: (LEAQ2 x x)
 14302  	for {
 14303  		if v.AuxInt != 3 {
 14304  			break
 14305  		}
 14306  		x := v.Args[0]
 14307  		v.reset(OpAMD64LEAQ2)
 14308  		v.AddArg(x)
 14309  		v.AddArg(x)
 14310  		return true
 14311  	}
 14312  	// match: (MULQconst [5] x)
 14313  	// cond:
 14314  	// result: (LEAQ4 x x)
 14315  	for {
 14316  		if v.AuxInt != 5 {
 14317  			break
 14318  		}
 14319  		x := v.Args[0]
 14320  		v.reset(OpAMD64LEAQ4)
 14321  		v.AddArg(x)
 14322  		v.AddArg(x)
 14323  		return true
 14324  	}
 14325  	// match: (MULQconst [7] x)
 14326  	// cond:
 14327  	// result: (LEAQ8 (NEGQ <v.Type> x) x)
 14328  	for {
 14329  		if v.AuxInt != 7 {
 14330  			break
 14331  		}
 14332  		x := v.Args[0]
 14333  		v.reset(OpAMD64LEAQ8)
 14334  		v0 := b.NewValue0(v.Pos, OpAMD64NEGQ, v.Type)
 14335  		v0.AddArg(x)
 14336  		v.AddArg(v0)
 14337  		v.AddArg(x)
 14338  		return true
 14339  	}
 14340  	// match: (MULQconst [9] x)
 14341  	// cond:
 14342  	// result: (LEAQ8 x x)
 14343  	for {
 14344  		if v.AuxInt != 9 {
 14345  			break
 14346  		}
 14347  		x := v.Args[0]
 14348  		v.reset(OpAMD64LEAQ8)
 14349  		v.AddArg(x)
 14350  		v.AddArg(x)
 14351  		return true
 14352  	}
 14353  	// match: (MULQconst [11] x)
 14354  	// cond:
 14355  	// result: (LEAQ2 x (LEAQ4 <v.Type> x x))
 14356  	for {
 14357  		if v.AuxInt != 11 {
 14358  			break
 14359  		}
 14360  		x := v.Args[0]
 14361  		v.reset(OpAMD64LEAQ2)
 14362  		v.AddArg(x)
 14363  		v0 := b.NewValue0(v.Pos, OpAMD64LEAQ4, v.Type)
 14364  		v0.AddArg(x)
 14365  		v0.AddArg(x)
 14366  		v.AddArg(v0)
 14367  		return true
 14368  	}
 14369  	// match: (MULQconst [13] x)
 14370  	// cond:
 14371  	// result: (LEAQ4 x (LEAQ2 <v.Type> x x))
 14372  	for {
 14373  		if v.AuxInt != 13 {
 14374  			break
 14375  		}
 14376  		x := v.Args[0]
 14377  		v.reset(OpAMD64LEAQ4)
 14378  		v.AddArg(x)
 14379  		v0 := b.NewValue0(v.Pos, OpAMD64LEAQ2, v.Type)
 14380  		v0.AddArg(x)
 14381  		v0.AddArg(x)
 14382  		v.AddArg(v0)
 14383  		return true
 14384  	}
 14385  	return false
 14386  }
 14387  func rewriteValueAMD64_OpAMD64MULQconst_10(v *Value) bool {
 14388  	b := v.Block
 14389  	_ = b
 14390  	// match: (MULQconst [21] x)
 14391  	// cond:
 14392  	// result: (LEAQ4 x (LEAQ4 <v.Type> x x))
 14393  	for {
 14394  		if v.AuxInt != 21 {
 14395  			break
 14396  		}
 14397  		x := v.Args[0]
 14398  		v.reset(OpAMD64LEAQ4)
 14399  		v.AddArg(x)
 14400  		v0 := b.NewValue0(v.Pos, OpAMD64LEAQ4, v.Type)
 14401  		v0.AddArg(x)
 14402  		v0.AddArg(x)
 14403  		v.AddArg(v0)
 14404  		return true
 14405  	}
 14406  	// match: (MULQconst [25] x)
 14407  	// cond:
 14408  	// result: (LEAQ8 x (LEAQ2 <v.Type> x x))
 14409  	for {
 14410  		if v.AuxInt != 25 {
 14411  			break
 14412  		}
 14413  		x := v.Args[0]
 14414  		v.reset(OpAMD64LEAQ8)
 14415  		v.AddArg(x)
 14416  		v0 := b.NewValue0(v.Pos, OpAMD64LEAQ2, v.Type)
 14417  		v0.AddArg(x)
 14418  		v0.AddArg(x)
 14419  		v.AddArg(v0)
 14420  		return true
 14421  	}
 14422  	// match: (MULQconst [37] x)
 14423  	// cond:
 14424  	// result: (LEAQ4 x (LEAQ8 <v.Type> x x))
 14425  	for {
 14426  		if v.AuxInt != 37 {
 14427  			break
 14428  		}
 14429  		x := v.Args[0]
 14430  		v.reset(OpAMD64LEAQ4)
 14431  		v.AddArg(x)
 14432  		v0 := b.NewValue0(v.Pos, OpAMD64LEAQ8, v.Type)
 14433  		v0.AddArg(x)
 14434  		v0.AddArg(x)
 14435  		v.AddArg(v0)
 14436  		return true
 14437  	}
 14438  	// match: (MULQconst [41] x)
 14439  	// cond:
 14440  	// result: (LEAQ8 x (LEAQ4 <v.Type> x x))
 14441  	for {
 14442  		if v.AuxInt != 41 {
 14443  			break
 14444  		}
 14445  		x := v.Args[0]
 14446  		v.reset(OpAMD64LEAQ8)
 14447  		v.AddArg(x)
 14448  		v0 := b.NewValue0(v.Pos, OpAMD64LEAQ4, v.Type)
 14449  		v0.AddArg(x)
 14450  		v0.AddArg(x)
 14451  		v.AddArg(v0)
 14452  		return true
 14453  	}
 14454  	// match: (MULQconst [73] x)
 14455  	// cond:
 14456  	// result: (LEAQ8 x (LEAQ8 <v.Type> x x))
 14457  	for {
 14458  		if v.AuxInt != 73 {
 14459  			break
 14460  		}
 14461  		x := v.Args[0]
 14462  		v.reset(OpAMD64LEAQ8)
 14463  		v.AddArg(x)
 14464  		v0 := b.NewValue0(v.Pos, OpAMD64LEAQ8, v.Type)
 14465  		v0.AddArg(x)
 14466  		v0.AddArg(x)
 14467  		v.AddArg(v0)
 14468  		return true
 14469  	}
 14470  	// match: (MULQconst [c] x)
 14471  	// cond: isPowerOfTwo(c+1) && c >= 15
 14472  	// result: (SUBQ (SHLQconst <v.Type> [log2(c+1)] x) x)
 14473  	for {
 14474  		c := v.AuxInt
 14475  		x := v.Args[0]
 14476  		if !(isPowerOfTwo(c+1) && c >= 15) {
 14477  			break
 14478  		}
 14479  		v.reset(OpAMD64SUBQ)
 14480  		v0 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type)
 14481  		v0.AuxInt = log2(c + 1)
 14482  		v0.AddArg(x)
 14483  		v.AddArg(v0)
 14484  		v.AddArg(x)
 14485  		return true
 14486  	}
 14487  	// match: (MULQconst [c] x)
 14488  	// cond: isPowerOfTwo(c-1) && c >= 17
 14489  	// result: (LEAQ1 (SHLQconst <v.Type> [log2(c-1)] x) x)
 14490  	for {
 14491  		c := v.AuxInt
 14492  		x := v.Args[0]
 14493  		if !(isPowerOfTwo(c-1) && c >= 17) {
 14494  			break
 14495  		}
 14496  		v.reset(OpAMD64LEAQ1)
 14497  		v0 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type)
 14498  		v0.AuxInt = log2(c - 1)
 14499  		v0.AddArg(x)
 14500  		v.AddArg(v0)
 14501  		v.AddArg(x)
 14502  		return true
 14503  	}
 14504  	// match: (MULQconst [c] x)
 14505  	// cond: isPowerOfTwo(c-2) && c >= 34
 14506  	// result: (LEAQ2 (SHLQconst <v.Type> [log2(c-2)] x) x)
 14507  	for {
 14508  		c := v.AuxInt
 14509  		x := v.Args[0]
 14510  		if !(isPowerOfTwo(c-2) && c >= 34) {
 14511  			break
 14512  		}
 14513  		v.reset(OpAMD64LEAQ2)
 14514  		v0 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type)
 14515  		v0.AuxInt = log2(c - 2)
 14516  		v0.AddArg(x)
 14517  		v.AddArg(v0)
 14518  		v.AddArg(x)
 14519  		return true
 14520  	}
 14521  	// match: (MULQconst [c] x)
 14522  	// cond: isPowerOfTwo(c-4) && c >= 68
 14523  	// result: (LEAQ4 (SHLQconst <v.Type> [log2(c-4)] x) x)
 14524  	for {
 14525  		c := v.AuxInt
 14526  		x := v.Args[0]
 14527  		if !(isPowerOfTwo(c-4) && c >= 68) {
 14528  			break
 14529  		}
 14530  		v.reset(OpAMD64LEAQ4)
 14531  		v0 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type)
 14532  		v0.AuxInt = log2(c - 4)
 14533  		v0.AddArg(x)
 14534  		v.AddArg(v0)
 14535  		v.AddArg(x)
 14536  		return true
 14537  	}
 14538  	// match: (MULQconst [c] x)
 14539  	// cond: isPowerOfTwo(c-8) && c >= 136
 14540  	// result: (LEAQ8 (SHLQconst <v.Type> [log2(c-8)] x) x)
 14541  	for {
 14542  		c := v.AuxInt
 14543  		x := v.Args[0]
 14544  		if !(isPowerOfTwo(c-8) && c >= 136) {
 14545  			break
 14546  		}
 14547  		v.reset(OpAMD64LEAQ8)
 14548  		v0 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type)
 14549  		v0.AuxInt = log2(c - 8)
 14550  		v0.AddArg(x)
 14551  		v.AddArg(v0)
 14552  		v.AddArg(x)
 14553  		return true
 14554  	}
 14555  	return false
 14556  }
 14557  func rewriteValueAMD64_OpAMD64MULQconst_20(v *Value) bool {
 14558  	b := v.Block
 14559  	_ = b
 14560  	// match: (MULQconst [c] x)
 14561  	// cond: c%3 == 0 && isPowerOfTwo(c/3)
 14562  	// result: (SHLQconst [log2(c/3)] (LEAQ2 <v.Type> x x))
 14563  	for {
 14564  		c := v.AuxInt
 14565  		x := v.Args[0]
 14566  		if !(c%3 == 0 && isPowerOfTwo(c/3)) {
 14567  			break
 14568  		}
 14569  		v.reset(OpAMD64SHLQconst)
 14570  		v.AuxInt = log2(c / 3)
 14571  		v0 := b.NewValue0(v.Pos, OpAMD64LEAQ2, v.Type)
 14572  		v0.AddArg(x)
 14573  		v0.AddArg(x)
 14574  		v.AddArg(v0)
 14575  		return true
 14576  	}
 14577  	// match: (MULQconst [c] x)
 14578  	// cond: c%5 == 0 && isPowerOfTwo(c/5)
 14579  	// result: (SHLQconst [log2(c/5)] (LEAQ4 <v.Type> x x))
 14580  	for {
 14581  		c := v.AuxInt
 14582  		x := v.Args[0]
 14583  		if !(c%5 == 0 && isPowerOfTwo(c/5)) {
 14584  			break
 14585  		}
 14586  		v.reset(OpAMD64SHLQconst)
 14587  		v.AuxInt = log2(c / 5)
 14588  		v0 := b.NewValue0(v.Pos, OpAMD64LEAQ4, v.Type)
 14589  		v0.AddArg(x)
 14590  		v0.AddArg(x)
 14591  		v.AddArg(v0)
 14592  		return true
 14593  	}
 14594  	// match: (MULQconst [c] x)
 14595  	// cond: c%9 == 0 && isPowerOfTwo(c/9)
 14596  	// result: (SHLQconst [log2(c/9)] (LEAQ8 <v.Type> x x))
 14597  	for {
 14598  		c := v.AuxInt
 14599  		x := v.Args[0]
 14600  		if !(c%9 == 0 && isPowerOfTwo(c/9)) {
 14601  			break
 14602  		}
 14603  		v.reset(OpAMD64SHLQconst)
 14604  		v.AuxInt = log2(c / 9)
 14605  		v0 := b.NewValue0(v.Pos, OpAMD64LEAQ8, v.Type)
 14606  		v0.AddArg(x)
 14607  		v0.AddArg(x)
 14608  		v.AddArg(v0)
 14609  		return true
 14610  	}
 14611  	// match: (MULQconst [c] (MOVQconst [d]))
 14612  	// cond:
 14613  	// result: (MOVQconst [c*d])
 14614  	for {
 14615  		c := v.AuxInt
 14616  		v_0 := v.Args[0]
 14617  		if v_0.Op != OpAMD64MOVQconst {
 14618  			break
 14619  		}
 14620  		d := v_0.AuxInt
 14621  		v.reset(OpAMD64MOVQconst)
 14622  		v.AuxInt = c * d
 14623  		return true
 14624  	}
 14625  	return false
 14626  }
 14627  func rewriteValueAMD64_OpAMD64MULSD_0(v *Value) bool {
 14628  	// match: (MULSD x l:(MOVSDload [off] {sym} ptr mem))
 14629  	// cond: canMergeLoad(v, l, x) && clobber(l)
 14630  	// result: (MULSDmem x [off] {sym} ptr mem)
 14631  	for {
 14632  		_ = v.Args[1]
 14633  		x := v.Args[0]
 14634  		l := v.Args[1]
 14635  		if l.Op != OpAMD64MOVSDload {
 14636  			break
 14637  		}
 14638  		off := l.AuxInt
 14639  		sym := l.Aux
 14640  		_ = l.Args[1]
 14641  		ptr := l.Args[0]
 14642  		mem := l.Args[1]
 14643  		if !(canMergeLoad(v, l, x) && clobber(l)) {
 14644  			break
 14645  		}
 14646  		v.reset(OpAMD64MULSDmem)
 14647  		v.AuxInt = off
 14648  		v.Aux = sym
 14649  		v.AddArg(x)
 14650  		v.AddArg(ptr)
 14651  		v.AddArg(mem)
 14652  		return true
 14653  	}
 14654  	// match: (MULSD l:(MOVSDload [off] {sym} ptr mem) x)
 14655  	// cond: canMergeLoad(v, l, x) && clobber(l)
 14656  	// result: (MULSDmem x [off] {sym} ptr mem)
 14657  	for {
 14658  		_ = v.Args[1]
 14659  		l := v.Args[0]
 14660  		if l.Op != OpAMD64MOVSDload {
 14661  			break
 14662  		}
 14663  		off := l.AuxInt
 14664  		sym := l.Aux
 14665  		_ = l.Args[1]
 14666  		ptr := l.Args[0]
 14667  		mem := l.Args[1]
 14668  		x := v.Args[1]
 14669  		if !(canMergeLoad(v, l, x) && clobber(l)) {
 14670  			break
 14671  		}
 14672  		v.reset(OpAMD64MULSDmem)
 14673  		v.AuxInt = off
 14674  		v.Aux = sym
 14675  		v.AddArg(x)
 14676  		v.AddArg(ptr)
 14677  		v.AddArg(mem)
 14678  		return true
 14679  	}
 14680  	return false
 14681  }
 14682  func rewriteValueAMD64_OpAMD64MULSDmem_0(v *Value) bool {
 14683  	b := v.Block
 14684  	_ = b
 14685  	typ := &b.Func.Config.Types
 14686  	_ = typ
 14687  	// match: (MULSDmem x [off] {sym} ptr (MOVQstore [off] {sym} ptr y _))
 14688  	// cond:
 14689  	// result: (MULSD x (MOVQi2f y))
 14690  	for {
 14691  		off := v.AuxInt
 14692  		sym := v.Aux
 14693  		_ = v.Args[2]
 14694  		x := v.Args[0]
 14695  		ptr := v.Args[1]
 14696  		v_2 := v.Args[2]
 14697  		if v_2.Op != OpAMD64MOVQstore {
 14698  			break
 14699  		}
 14700  		if v_2.AuxInt != off {
 14701  			break
 14702  		}
 14703  		if v_2.Aux != sym {
 14704  			break
 14705  		}
 14706  		_ = v_2.Args[2]
 14707  		if ptr != v_2.Args[0] {
 14708  			break
 14709  		}
 14710  		y := v_2.Args[1]
 14711  		v.reset(OpAMD64MULSD)
 14712  		v.AddArg(x)
 14713  		v0 := b.NewValue0(v.Pos, OpAMD64MOVQi2f, typ.Float64)
 14714  		v0.AddArg(y)
 14715  		v.AddArg(v0)
 14716  		return true
 14717  	}
 14718  	return false
 14719  }
 14720  func rewriteValueAMD64_OpAMD64MULSS_0(v *Value) bool {
 14721  	// match: (MULSS x l:(MOVSSload [off] {sym} ptr mem))
 14722  	// cond: canMergeLoad(v, l, x) && clobber(l)
 14723  	// result: (MULSSmem x [off] {sym} ptr mem)
 14724  	for {
 14725  		_ = v.Args[1]
 14726  		x := v.Args[0]
 14727  		l := v.Args[1]
 14728  		if l.Op != OpAMD64MOVSSload {
 14729  			break
 14730  		}
 14731  		off := l.AuxInt
 14732  		sym := l.Aux
 14733  		_ = l.Args[1]
 14734  		ptr := l.Args[0]
 14735  		mem := l.Args[1]
 14736  		if !(canMergeLoad(v, l, x) && clobber(l)) {
 14737  			break
 14738  		}
 14739  		v.reset(OpAMD64MULSSmem)
 14740  		v.AuxInt = off
 14741  		v.Aux = sym
 14742  		v.AddArg(x)
 14743  		v.AddArg(ptr)
 14744  		v.AddArg(mem)
 14745  		return true
 14746  	}
 14747  	// match: (MULSS l:(MOVSSload [off] {sym} ptr mem) x)
 14748  	// cond: canMergeLoad(v, l, x) && clobber(l)
 14749  	// result: (MULSSmem x [off] {sym} ptr mem)
 14750  	for {
 14751  		_ = v.Args[1]
 14752  		l := v.Args[0]
 14753  		if l.Op != OpAMD64MOVSSload {
 14754  			break
 14755  		}
 14756  		off := l.AuxInt
 14757  		sym := l.Aux
 14758  		_ = l.Args[1]
 14759  		ptr := l.Args[0]
 14760  		mem := l.Args[1]
 14761  		x := v.Args[1]
 14762  		if !(canMergeLoad(v, l, x) && clobber(l)) {
 14763  			break
 14764  		}
 14765  		v.reset(OpAMD64MULSSmem)
 14766  		v.AuxInt = off
 14767  		v.Aux = sym
 14768  		v.AddArg(x)
 14769  		v.AddArg(ptr)
 14770  		v.AddArg(mem)
 14771  		return true
 14772  	}
 14773  	return false
 14774  }
 14775  func rewriteValueAMD64_OpAMD64MULSSmem_0(v *Value) bool {
 14776  	b := v.Block
 14777  	_ = b
 14778  	typ := &b.Func.Config.Types
 14779  	_ = typ
 14780  	// match: (MULSSmem x [off] {sym} ptr (MOVLstore [off] {sym} ptr y _))
 14781  	// cond:
 14782  	// result: (MULSS x (MOVLi2f y))
 14783  	for {
 14784  		off := v.AuxInt
 14785  		sym := v.Aux
 14786  		_ = v.Args[2]
 14787  		x := v.Args[0]
 14788  		ptr := v.Args[1]
 14789  		v_2 := v.Args[2]
 14790  		if v_2.Op != OpAMD64MOVLstore {
 14791  			break
 14792  		}
 14793  		if v_2.AuxInt != off {
 14794  			break
 14795  		}
 14796  		if v_2.Aux != sym {
 14797  			break
 14798  		}
 14799  		_ = v_2.Args[2]
 14800  		if ptr != v_2.Args[0] {
 14801  			break
 14802  		}
 14803  		y := v_2.Args[1]
 14804  		v.reset(OpAMD64MULSS)
 14805  		v.AddArg(x)
 14806  		v0 := b.NewValue0(v.Pos, OpAMD64MOVLi2f, typ.Float32)
 14807  		v0.AddArg(y)
 14808  		v.AddArg(v0)
 14809  		return true
 14810  	}
 14811  	return false
 14812  }
 14813  func rewriteValueAMD64_OpAMD64NEGL_0(v *Value) bool {
 14814  	// match: (NEGL (MOVLconst [c]))
 14815  	// cond:
 14816  	// result: (MOVLconst [int64(int32(-c))])
 14817  	for {
 14818  		v_0 := v.Args[0]
 14819  		if v_0.Op != OpAMD64MOVLconst {
 14820  			break
 14821  		}
 14822  		c := v_0.AuxInt
 14823  		v.reset(OpAMD64MOVLconst)
 14824  		v.AuxInt = int64(int32(-c))
 14825  		return true
 14826  	}
 14827  	return false
 14828  }
 14829  func rewriteValueAMD64_OpAMD64NEGQ_0(v *Value) bool {
 14830  	// match: (NEGQ (MOVQconst [c]))
 14831  	// cond:
 14832  	// result: (MOVQconst [-c])
 14833  	for {
 14834  		v_0 := v.Args[0]
 14835  		if v_0.Op != OpAMD64MOVQconst {
 14836  			break
 14837  		}
 14838  		c := v_0.AuxInt
 14839  		v.reset(OpAMD64MOVQconst)
 14840  		v.AuxInt = -c
 14841  		return true
 14842  	}
 14843  	// match: (NEGQ (ADDQconst [c] (NEGQ x)))
 14844  	// cond: c != -(1<<31)
 14845  	// result: (ADDQconst [-c] x)
 14846  	for {
 14847  		v_0 := v.Args[0]
 14848  		if v_0.Op != OpAMD64ADDQconst {
 14849  			break
 14850  		}
 14851  		c := v_0.AuxInt
 14852  		v_0_0 := v_0.Args[0]
 14853  		if v_0_0.Op != OpAMD64NEGQ {
 14854  			break
 14855  		}
 14856  		x := v_0_0.Args[0]
 14857  		if !(c != -(1 << 31)) {
 14858  			break
 14859  		}
 14860  		v.reset(OpAMD64ADDQconst)
 14861  		v.AuxInt = -c
 14862  		v.AddArg(x)
 14863  		return true
 14864  	}
 14865  	return false
 14866  }
 14867  func rewriteValueAMD64_OpAMD64NOTL_0(v *Value) bool {
 14868  	// match: (NOTL (MOVLconst [c]))
 14869  	// cond:
 14870  	// result: (MOVLconst [^c])
 14871  	for {
 14872  		v_0 := v.Args[0]
 14873  		if v_0.Op != OpAMD64MOVLconst {
 14874  			break
 14875  		}
 14876  		c := v_0.AuxInt
 14877  		v.reset(OpAMD64MOVLconst)
 14878  		v.AuxInt = ^c
 14879  		return true
 14880  	}
 14881  	return false
 14882  }
 14883  func rewriteValueAMD64_OpAMD64NOTQ_0(v *Value) bool {
 14884  	// match: (NOTQ (MOVQconst [c]))
 14885  	// cond:
 14886  	// result: (MOVQconst [^c])
 14887  	for {
 14888  		v_0 := v.Args[0]
 14889  		if v_0.Op != OpAMD64MOVQconst {
 14890  			break
 14891  		}
 14892  		c := v_0.AuxInt
 14893  		v.reset(OpAMD64MOVQconst)
 14894  		v.AuxInt = ^c
 14895  		return true
 14896  	}
 14897  	return false
 14898  }
 14899  func rewriteValueAMD64_OpAMD64ORL_0(v *Value) bool {
 14900  	// match: (ORL x (MOVLconst [c]))
 14901  	// cond:
 14902  	// result: (ORLconst [c] x)
 14903  	for {
 14904  		_ = v.Args[1]
 14905  		x := v.Args[0]
 14906  		v_1 := v.Args[1]
 14907  		if v_1.Op != OpAMD64MOVLconst {
 14908  			break
 14909  		}
 14910  		c := v_1.AuxInt
 14911  		v.reset(OpAMD64ORLconst)
 14912  		v.AuxInt = c
 14913  		v.AddArg(x)
 14914  		return true
 14915  	}
 14916  	// match: (ORL (MOVLconst [c]) x)
 14917  	// cond:
 14918  	// result: (ORLconst [c] x)
 14919  	for {
 14920  		_ = v.Args[1]
 14921  		v_0 := v.Args[0]
 14922  		if v_0.Op != OpAMD64MOVLconst {
 14923  			break
 14924  		}
 14925  		c := v_0.AuxInt
 14926  		x := v.Args[1]
 14927  		v.reset(OpAMD64ORLconst)
 14928  		v.AuxInt = c
 14929  		v.AddArg(x)
 14930  		return true
 14931  	}
 14932  	// match: (ORL (SHLLconst x [c]) (SHRLconst x [d]))
 14933  	// cond: d==32-c
 14934  	// result: (ROLLconst x [c])
 14935  	for {
 14936  		_ = v.Args[1]
 14937  		v_0 := v.Args[0]
 14938  		if v_0.Op != OpAMD64SHLLconst {
 14939  			break
 14940  		}
 14941  		c := v_0.AuxInt
 14942  		x := v_0.Args[0]
 14943  		v_1 := v.Args[1]
 14944  		if v_1.Op != OpAMD64SHRLconst {
 14945  			break
 14946  		}
 14947  		d := v_1.AuxInt
 14948  		if x != v_1.Args[0] {
 14949  			break
 14950  		}
 14951  		if !(d == 32-c) {
 14952  			break
 14953  		}
 14954  		v.reset(OpAMD64ROLLconst)
 14955  		v.AuxInt = c
 14956  		v.AddArg(x)
 14957  		return true
 14958  	}
 14959  	// match: (ORL (SHRLconst x [d]) (SHLLconst x [c]))
 14960  	// cond: d==32-c
 14961  	// result: (ROLLconst x [c])
 14962  	for {
 14963  		_ = v.Args[1]
 14964  		v_0 := v.Args[0]
 14965  		if v_0.Op != OpAMD64SHRLconst {
 14966  			break
 14967  		}
 14968  		d := v_0.AuxInt
 14969  		x := v_0.Args[0]
 14970  		v_1 := v.Args[1]
 14971  		if v_1.Op != OpAMD64SHLLconst {
 14972  			break
 14973  		}
 14974  		c := v_1.AuxInt
 14975  		if x != v_1.Args[0] {
 14976  			break
 14977  		}
 14978  		if !(d == 32-c) {
 14979  			break
 14980  		}
 14981  		v.reset(OpAMD64ROLLconst)
 14982  		v.AuxInt = c
 14983  		v.AddArg(x)
 14984  		return true
 14985  	}
 14986  	// match: (ORL <t> (SHLLconst x [c]) (SHRWconst x [d]))
 14987  	// cond: d==16-c && c < 16 && t.Size() == 2
 14988  	// result: (ROLWconst x [c])
 14989  	for {
 14990  		t := v.Type
 14991  		_ = v.Args[1]
 14992  		v_0 := v.Args[0]
 14993  		if v_0.Op != OpAMD64SHLLconst {
 14994  			break
 14995  		}
 14996  		c := v_0.AuxInt
 14997  		x := v_0.Args[0]
 14998  		v_1 := v.Args[1]
 14999  		if v_1.Op != OpAMD64SHRWconst {
 15000  			break
 15001  		}
 15002  		d := v_1.AuxInt
 15003  		if x != v_1.Args[0] {
 15004  			break
 15005  		}
 15006  		if !(d == 16-c && c < 16 && t.Size() == 2) {
 15007  			break
 15008  		}
 15009  		v.reset(OpAMD64ROLWconst)
 15010  		v.AuxInt = c
 15011  		v.AddArg(x)
 15012  		return true
 15013  	}
 15014  	// match: (ORL <t> (SHRWconst x [d]) (SHLLconst x [c]))
 15015  	// cond: d==16-c && c < 16 && t.Size() == 2
 15016  	// result: (ROLWconst x [c])
 15017  	for {
 15018  		t := v.Type
 15019  		_ = v.Args[1]
 15020  		v_0 := v.Args[0]
 15021  		if v_0.Op != OpAMD64SHRWconst {
 15022  			break
 15023  		}
 15024  		d := v_0.AuxInt
 15025  		x := v_0.Args[0]
 15026  		v_1 := v.Args[1]
 15027  		if v_1.Op != OpAMD64SHLLconst {
 15028  			break
 15029  		}
 15030  		c := v_1.AuxInt
 15031  		if x != v_1.Args[0] {
 15032  			break
 15033  		}
 15034  		if !(d == 16-c && c < 16 && t.Size() == 2) {
 15035  			break
 15036  		}
 15037  		v.reset(OpAMD64ROLWconst)
 15038  		v.AuxInt = c
 15039  		v.AddArg(x)
 15040  		return true
 15041  	}
 15042  	// match: (ORL <t> (SHLLconst x [c]) (SHRBconst x [d]))
 15043  	// cond: d==8-c && c < 8 && t.Size() == 1
 15044  	// result: (ROLBconst x [c])
 15045  	for {
 15046  		t := v.Type
 15047  		_ = v.Args[1]
 15048  		v_0 := v.Args[0]
 15049  		if v_0.Op != OpAMD64SHLLconst {
 15050  			break
 15051  		}
 15052  		c := v_0.AuxInt
 15053  		x := v_0.Args[0]
 15054  		v_1 := v.Args[1]
 15055  		if v_1.Op != OpAMD64SHRBconst {
 15056  			break
 15057  		}
 15058  		d := v_1.AuxInt
 15059  		if x != v_1.Args[0] {
 15060  			break
 15061  		}
 15062  		if !(d == 8-c && c < 8 && t.Size() == 1) {
 15063  			break
 15064  		}
 15065  		v.reset(OpAMD64ROLBconst)
 15066  		v.AuxInt = c
 15067  		v.AddArg(x)
 15068  		return true
 15069  	}
 15070  	// match: (ORL <t> (SHRBconst x [d]) (SHLLconst x [c]))
 15071  	// cond: d==8-c && c < 8 && t.Size() == 1
 15072  	// result: (ROLBconst x [c])
 15073  	for {
 15074  		t := v.Type
 15075  		_ = v.Args[1]
 15076  		v_0 := v.Args[0]
 15077  		if v_0.Op != OpAMD64SHRBconst {
 15078  			break
 15079  		}
 15080  		d := v_0.AuxInt
 15081  		x := v_0.Args[0]
 15082  		v_1 := v.Args[1]
 15083  		if v_1.Op != OpAMD64SHLLconst {
 15084  			break
 15085  		}
 15086  		c := v_1.AuxInt
 15087  		if x != v_1.Args[0] {
 15088  			break
 15089  		}
 15090  		if !(d == 8-c && c < 8 && t.Size() == 1) {
 15091  			break
 15092  		}
 15093  		v.reset(OpAMD64ROLBconst)
 15094  		v.AuxInt = c
 15095  		v.AddArg(x)
 15096  		return true
 15097  	}
 15098  	// match: (ORL (SHLL x y) (ANDL (SHRL x (NEGQ y)) (SBBLcarrymask (CMPQconst (NEGQ (ADDQconst (ANDQconst y [31]) [-32])) [32]))))
 15099  	// cond:
 15100  	// result: (ROLL x y)
 15101  	for {
 15102  		_ = v.Args[1]
 15103  		v_0 := v.Args[0]
 15104  		if v_0.Op != OpAMD64SHLL {
 15105  			break
 15106  		}
 15107  		_ = v_0.Args[1]
 15108  		x := v_0.Args[0]
 15109  		y := v_0.Args[1]
 15110  		v_1 := v.Args[1]
 15111  		if v_1.Op != OpAMD64ANDL {
 15112  			break
 15113  		}
 15114  		_ = v_1.Args[1]
 15115  		v_1_0 := v_1.Args[0]
 15116  		if v_1_0.Op != OpAMD64SHRL {
 15117  			break
 15118  		}
 15119  		_ = v_1_0.Args[1]
 15120  		if x != v_1_0.Args[0] {
 15121  			break
 15122  		}
 15123  		v_1_0_1 := v_1_0.Args[1]
 15124  		if v_1_0_1.Op != OpAMD64NEGQ {
 15125  			break
 15126  		}
 15127  		if y != v_1_0_1.Args[0] {
 15128  			break
 15129  		}
 15130  		v_1_1 := v_1.Args[1]
 15131  		if v_1_1.Op != OpAMD64SBBLcarrymask {
 15132  			break
 15133  		}
 15134  		v_1_1_0 := v_1_1.Args[0]
 15135  		if v_1_1_0.Op != OpAMD64CMPQconst {
 15136  			break
 15137  		}
 15138  		if v_1_1_0.AuxInt != 32 {
 15139  			break
 15140  		}
 15141  		v_1_1_0_0 := v_1_1_0.Args[0]
 15142  		if v_1_1_0_0.Op != OpAMD64NEGQ {
 15143  			break
 15144  		}
 15145  		v_1_1_0_0_0 := v_1_1_0_0.Args[0]
 15146  		if v_1_1_0_0_0.Op != OpAMD64ADDQconst {
 15147  			break
 15148  		}
 15149  		if v_1_1_0_0_0.AuxInt != -32 {
 15150  			break
 15151  		}
 15152  		v_1_1_0_0_0_0 := v_1_1_0_0_0.Args[0]
 15153  		if v_1_1_0_0_0_0.Op != OpAMD64ANDQconst {
 15154  			break
 15155  		}
 15156  		if v_1_1_0_0_0_0.AuxInt != 31 {
 15157  			break
 15158  		}
 15159  		if y != v_1_1_0_0_0_0.Args[0] {
 15160  			break
 15161  		}
 15162  		v.reset(OpAMD64ROLL)
 15163  		v.AddArg(x)
 15164  		v.AddArg(y)
 15165  		return true
 15166  	}
 15167  	// match: (ORL (SHLL x y) (ANDL (SBBLcarrymask (CMPQconst (NEGQ (ADDQconst (ANDQconst y [31]) [-32])) [32])) (SHRL x (NEGQ y))))
 15168  	// cond:
 15169  	// result: (ROLL x y)
 15170  	for {
 15171  		_ = v.Args[1]
 15172  		v_0 := v.Args[0]
 15173  		if v_0.Op != OpAMD64SHLL {
 15174  			break
 15175  		}
 15176  		_ = v_0.Args[1]
 15177  		x := v_0.Args[0]
 15178  		y := v_0.Args[1]
 15179  		v_1 := v.Args[1]
 15180  		if v_1.Op != OpAMD64ANDL {
 15181  			break
 15182  		}
 15183  		_ = v_1.Args[1]
 15184  		v_1_0 := v_1.Args[0]
 15185  		if v_1_0.Op != OpAMD64SBBLcarrymask {
 15186  			break
 15187  		}
 15188  		v_1_0_0 := v_1_0.Args[0]
 15189  		if v_1_0_0.Op != OpAMD64CMPQconst {
 15190  			break
 15191  		}
 15192  		if v_1_0_0.AuxInt != 32 {
 15193  			break
 15194  		}
 15195  		v_1_0_0_0 := v_1_0_0.Args[0]
 15196  		if v_1_0_0_0.Op != OpAMD64NEGQ {
 15197  			break
 15198  		}
 15199  		v_1_0_0_0_0 := v_1_0_0_0.Args[0]
 15200  		if v_1_0_0_0_0.Op != OpAMD64ADDQconst {
 15201  			break
 15202  		}
 15203  		if v_1_0_0_0_0.AuxInt != -32 {
 15204  			break
 15205  		}
 15206  		v_1_0_0_0_0_0 := v_1_0_0_0_0.Args[0]
 15207  		if v_1_0_0_0_0_0.Op != OpAMD64ANDQconst {
 15208  			break
 15209  		}
 15210  		if v_1_0_0_0_0_0.AuxInt != 31 {
 15211  			break
 15212  		}
 15213  		if y != v_1_0_0_0_0_0.Args[0] {
 15214  			break
 15215  		}
 15216  		v_1_1 := v_1.Args[1]
 15217  		if v_1_1.Op != OpAMD64SHRL {
 15218  			break
 15219  		}
 15220  		_ = v_1_1.Args[1]
 15221  		if x != v_1_1.Args[0] {
 15222  			break
 15223  		}
 15224  		v_1_1_1 := v_1_1.Args[1]
 15225  		if v_1_1_1.Op != OpAMD64NEGQ {
 15226  			break
 15227  		}
 15228  		if y != v_1_1_1.Args[0] {
 15229  			break
 15230  		}
 15231  		v.reset(OpAMD64ROLL)
 15232  		v.AddArg(x)
 15233  		v.AddArg(y)
 15234  		return true
 15235  	}
 15236  	return false
 15237  }
 15238  func rewriteValueAMD64_OpAMD64ORL_10(v *Value) bool {
 15239  	// match: (ORL (ANDL (SHRL x (NEGQ y)) (SBBLcarrymask (CMPQconst (NEGQ (ADDQconst (ANDQconst y [31]) [-32])) [32]))) (SHLL x y))
 15240  	// cond:
 15241  	// result: (ROLL x y)
 15242  	for {
 15243  		_ = v.Args[1]
 15244  		v_0 := v.Args[0]
 15245  		if v_0.Op != OpAMD64ANDL {
 15246  			break
 15247  		}
 15248  		_ = v_0.Args[1]
 15249  		v_0_0 := v_0.Args[0]
 15250  		if v_0_0.Op != OpAMD64SHRL {
 15251  			break
 15252  		}
 15253  		_ = v_0_0.Args[1]
 15254  		x := v_0_0.Args[0]
 15255  		v_0_0_1 := v_0_0.Args[1]
 15256  		if v_0_0_1.Op != OpAMD64NEGQ {
 15257  			break
 15258  		}
 15259  		y := v_0_0_1.Args[0]
 15260  		v_0_1 := v_0.Args[1]
 15261  		if v_0_1.Op != OpAMD64SBBLcarrymask {
 15262  			break
 15263  		}
 15264  		v_0_1_0 := v_0_1.Args[0]
 15265  		if v_0_1_0.Op != OpAMD64CMPQconst {
 15266  			break
 15267  		}
 15268  		if v_0_1_0.AuxInt != 32 {
 15269  			break
 15270  		}
 15271  		v_0_1_0_0 := v_0_1_0.Args[0]
 15272  		if v_0_1_0_0.Op != OpAMD64NEGQ {
 15273  			break
 15274  		}
 15275  		v_0_1_0_0_0 := v_0_1_0_0.Args[0]
 15276  		if v_0_1_0_0_0.Op != OpAMD64ADDQconst {
 15277  			break
 15278  		}
 15279  		if v_0_1_0_0_0.AuxInt != -32 {
 15280  			break
 15281  		}
 15282  		v_0_1_0_0_0_0 := v_0_1_0_0_0.Args[0]
 15283  		if v_0_1_0_0_0_0.Op != OpAMD64ANDQconst {
 15284  			break
 15285  		}
 15286  		if v_0_1_0_0_0_0.AuxInt != 31 {
 15287  			break
 15288  		}
 15289  		if y != v_0_1_0_0_0_0.Args[0] {
 15290  			break
 15291  		}
 15292  		v_1 := v.Args[1]
 15293  		if v_1.Op != OpAMD64SHLL {
 15294  			break
 15295  		}
 15296  		_ = v_1.Args[1]
 15297  		if x != v_1.Args[0] {
 15298  			break
 15299  		}
 15300  		if y != v_1.Args[1] {
 15301  			break
 15302  		}
 15303  		v.reset(OpAMD64ROLL)
 15304  		v.AddArg(x)
 15305  		v.AddArg(y)
 15306  		return true
 15307  	}
 15308  	// match: (ORL (ANDL (SBBLcarrymask (CMPQconst (NEGQ (ADDQconst (ANDQconst y [31]) [-32])) [32])) (SHRL x (NEGQ y))) (SHLL x y))
 15309  	// cond:
 15310  	// result: (ROLL x y)
 15311  	for {
 15312  		_ = v.Args[1]
 15313  		v_0 := v.Args[0]
 15314  		if v_0.Op != OpAMD64ANDL {
 15315  			break
 15316  		}
 15317  		_ = v_0.Args[1]
 15318  		v_0_0 := v_0.Args[0]
 15319  		if v_0_0.Op != OpAMD64SBBLcarrymask {
 15320  			break
 15321  		}
 15322  		v_0_0_0 := v_0_0.Args[0]
 15323  		if v_0_0_0.Op != OpAMD64CMPQconst {
 15324  			break
 15325  		}
 15326  		if v_0_0_0.AuxInt != 32 {
 15327  			break
 15328  		}
 15329  		v_0_0_0_0 := v_0_0_0.Args[0]
 15330  		if v_0_0_0_0.Op != OpAMD64NEGQ {
 15331  			break
 15332  		}
 15333  		v_0_0_0_0_0 := v_0_0_0_0.Args[0]
 15334  		if v_0_0_0_0_0.Op != OpAMD64ADDQconst {
 15335  			break
 15336  		}
 15337  		if v_0_0_0_0_0.AuxInt != -32 {
 15338  			break
 15339  		}
 15340  		v_0_0_0_0_0_0 := v_0_0_0_0_0.Args[0]
 15341  		if v_0_0_0_0_0_0.Op != OpAMD64ANDQconst {
 15342  			break
 15343  		}
 15344  		if v_0_0_0_0_0_0.AuxInt != 31 {
 15345  			break
 15346  		}
 15347  		y := v_0_0_0_0_0_0.Args[0]
 15348  		v_0_1 := v_0.Args[1]
 15349  		if v_0_1.Op != OpAMD64SHRL {
 15350  			break
 15351  		}
 15352  		_ = v_0_1.Args[1]
 15353  		x := v_0_1.Args[0]
 15354  		v_0_1_1 := v_0_1.Args[1]
 15355  		if v_0_1_1.Op != OpAMD64NEGQ {
 15356  			break
 15357  		}
 15358  		if y != v_0_1_1.Args[0] {
 15359  			break
 15360  		}
 15361  		v_1 := v.Args[1]
 15362  		if v_1.Op != OpAMD64SHLL {
 15363  			break
 15364  		}
 15365  		_ = v_1.Args[1]
 15366  		if x != v_1.Args[0] {
 15367  			break
 15368  		}
 15369  		if y != v_1.Args[1] {
 15370  			break
 15371  		}
 15372  		v.reset(OpAMD64ROLL)
 15373  		v.AddArg(x)
 15374  		v.AddArg(y)
 15375  		return true
 15376  	}
 15377  	// match: (ORL (SHLL x y) (ANDL (SHRL x (NEGL y)) (SBBLcarrymask (CMPLconst (NEGL (ADDLconst (ANDLconst y [31]) [-32])) [32]))))
 15378  	// cond:
 15379  	// result: (ROLL x y)
 15380  	for {
 15381  		_ = v.Args[1]
 15382  		v_0 := v.Args[0]
 15383  		if v_0.Op != OpAMD64SHLL {
 15384  			break
 15385  		}
 15386  		_ = v_0.Args[1]
 15387  		x := v_0.Args[0]
 15388  		y := v_0.Args[1]
 15389  		v_1 := v.Args[1]
 15390  		if v_1.Op != OpAMD64ANDL {
 15391  			break
 15392  		}
 15393  		_ = v_1.Args[1]
 15394  		v_1_0 := v_1.Args[0]
 15395  		if v_1_0.Op != OpAMD64SHRL {
 15396  			break
 15397  		}
 15398  		_ = v_1_0.Args[1]
 15399  		if x != v_1_0.Args[0] {
 15400  			break
 15401  		}
 15402  		v_1_0_1 := v_1_0.Args[1]
 15403  		if v_1_0_1.Op != OpAMD64NEGL {
 15404  			break
 15405  		}
 15406  		if y != v_1_0_1.Args[0] {
 15407  			break
 15408  		}
 15409  		v_1_1 := v_1.Args[1]
 15410  		if v_1_1.Op != OpAMD64SBBLcarrymask {
 15411  			break
 15412  		}
 15413  		v_1_1_0 := v_1_1.Args[0]
 15414  		if v_1_1_0.Op != OpAMD64CMPLconst {
 15415  			break
 15416  		}
 15417  		if v_1_1_0.AuxInt != 32 {
 15418  			break
 15419  		}
 15420  		v_1_1_0_0 := v_1_1_0.Args[0]
 15421  		if v_1_1_0_0.Op != OpAMD64NEGL {
 15422  			break
 15423  		}
 15424  		v_1_1_0_0_0 := v_1_1_0_0.Args[0]
 15425  		if v_1_1_0_0_0.Op != OpAMD64ADDLconst {
 15426  			break
 15427  		}
 15428  		if v_1_1_0_0_0.AuxInt != -32 {
 15429  			break
 15430  		}
 15431  		v_1_1_0_0_0_0 := v_1_1_0_0_0.Args[0]
 15432  		if v_1_1_0_0_0_0.Op != OpAMD64ANDLconst {
 15433  			break
 15434  		}
 15435  		if v_1_1_0_0_0_0.AuxInt != 31 {
 15436  			break
 15437  		}
 15438  		if y != v_1_1_0_0_0_0.Args[0] {
 15439  			break
 15440  		}
 15441  		v.reset(OpAMD64ROLL)
 15442  		v.AddArg(x)
 15443  		v.AddArg(y)
 15444  		return true
 15445  	}
 15446  	// match: (ORL (SHLL x y) (ANDL (SBBLcarrymask (CMPLconst (NEGL (ADDLconst (ANDLconst y [31]) [-32])) [32])) (SHRL x (NEGL y))))
 15447  	// cond:
 15448  	// result: (ROLL x y)
 15449  	for {
 15450  		_ = v.Args[1]
 15451  		v_0 := v.Args[0]
 15452  		if v_0.Op != OpAMD64SHLL {
 15453  			break
 15454  		}
 15455  		_ = v_0.Args[1]
 15456  		x := v_0.Args[0]
 15457  		y := v_0.Args[1]
 15458  		v_1 := v.Args[1]
 15459  		if v_1.Op != OpAMD64ANDL {
 15460  			break
 15461  		}
 15462  		_ = v_1.Args[1]
 15463  		v_1_0 := v_1.Args[0]
 15464  		if v_1_0.Op != OpAMD64SBBLcarrymask {
 15465  			break
 15466  		}
 15467  		v_1_0_0 := v_1_0.Args[0]
 15468  		if v_1_0_0.Op != OpAMD64CMPLconst {
 15469  			break
 15470  		}
 15471  		if v_1_0_0.AuxInt != 32 {
 15472  			break
 15473  		}
 15474  		v_1_0_0_0 := v_1_0_0.Args[0]
 15475  		if v_1_0_0_0.Op != OpAMD64NEGL {
 15476  			break
 15477  		}
 15478  		v_1_0_0_0_0 := v_1_0_0_0.Args[0]
 15479  		if v_1_0_0_0_0.Op != OpAMD64ADDLconst {
 15480  			break
 15481  		}
 15482  		if v_1_0_0_0_0.AuxInt != -32 {
 15483  			break
 15484  		}
 15485  		v_1_0_0_0_0_0 := v_1_0_0_0_0.Args[0]
 15486  		if v_1_0_0_0_0_0.Op != OpAMD64ANDLconst {
 15487  			break
 15488  		}
 15489  		if v_1_0_0_0_0_0.AuxInt != 31 {
 15490  			break
 15491  		}
 15492  		if y != v_1_0_0_0_0_0.Args[0] {
 15493  			break
 15494  		}
 15495  		v_1_1 := v_1.Args[1]
 15496  		if v_1_1.Op != OpAMD64SHRL {
 15497  			break
 15498  		}
 15499  		_ = v_1_1.Args[1]
 15500  		if x != v_1_1.Args[0] {
 15501  			break
 15502  		}
 15503  		v_1_1_1 := v_1_1.Args[1]
 15504  		if v_1_1_1.Op != OpAMD64NEGL {
 15505  			break
 15506  		}
 15507  		if y != v_1_1_1.Args[0] {
 15508  			break
 15509  		}
 15510  		v.reset(OpAMD64ROLL)
 15511  		v.AddArg(x)
 15512  		v.AddArg(y)
 15513  		return true
 15514  	}
 15515  	// match: (ORL (ANDL (SHRL x (NEGL y)) (SBBLcarrymask (CMPLconst (NEGL (ADDLconst (ANDLconst y [31]) [-32])) [32]))) (SHLL x y))
 15516  	// cond:
 15517  	// result: (ROLL x y)
 15518  	for {
 15519  		_ = v.Args[1]
 15520  		v_0 := v.Args[0]
 15521  		if v_0.Op != OpAMD64ANDL {
 15522  			break
 15523  		}
 15524  		_ = v_0.Args[1]
 15525  		v_0_0 := v_0.Args[0]
 15526  		if v_0_0.Op != OpAMD64SHRL {
 15527  			break
 15528  		}
 15529  		_ = v_0_0.Args[1]
 15530  		x := v_0_0.Args[0]
 15531  		v_0_0_1 := v_0_0.Args[1]
 15532  		if v_0_0_1.Op != OpAMD64NEGL {
 15533  			break
 15534  		}
 15535  		y := v_0_0_1.Args[0]
 15536  		v_0_1 := v_0.Args[1]
 15537  		if v_0_1.Op != OpAMD64SBBLcarrymask {
 15538  			break
 15539  		}
 15540  		v_0_1_0 := v_0_1.Args[0]
 15541  		if v_0_1_0.Op != OpAMD64CMPLconst {
 15542  			break
 15543  		}
 15544  		if v_0_1_0.AuxInt != 32 {
 15545  			break
 15546  		}
 15547  		v_0_1_0_0 := v_0_1_0.Args[0]
 15548  		if v_0_1_0_0.Op != OpAMD64NEGL {
 15549  			break
 15550  		}
 15551  		v_0_1_0_0_0 := v_0_1_0_0.Args[0]
 15552  		if v_0_1_0_0_0.Op != OpAMD64ADDLconst {
 15553  			break
 15554  		}
 15555  		if v_0_1_0_0_0.AuxInt != -32 {
 15556  			break
 15557  		}
 15558  		v_0_1_0_0_0_0 := v_0_1_0_0_0.Args[0]
 15559  		if v_0_1_0_0_0_0.Op != OpAMD64ANDLconst {
 15560  			break
 15561  		}
 15562  		if v_0_1_0_0_0_0.AuxInt != 31 {
 15563  			break
 15564  		}
 15565  		if y != v_0_1_0_0_0_0.Args[0] {
 15566  			break
 15567  		}
 15568  		v_1 := v.Args[1]
 15569  		if v_1.Op != OpAMD64SHLL {
 15570  			break
 15571  		}
 15572  		_ = v_1.Args[1]
 15573  		if x != v_1.Args[0] {
 15574  			break
 15575  		}
 15576  		if y != v_1.Args[1] {
 15577  			break
 15578  		}
 15579  		v.reset(OpAMD64ROLL)
 15580  		v.AddArg(x)
 15581  		v.AddArg(y)
 15582  		return true
 15583  	}
 15584  	// match: (ORL (ANDL (SBBLcarrymask (CMPLconst (NEGL (ADDLconst (ANDLconst y [31]) [-32])) [32])) (SHRL x (NEGL y))) (SHLL x y))
 15585  	// cond:
 15586  	// result: (ROLL x y)
 15587  	for {
 15588  		_ = v.Args[1]
 15589  		v_0 := v.Args[0]
 15590  		if v_0.Op != OpAMD64ANDL {
 15591  			break
 15592  		}
 15593  		_ = v_0.Args[1]
 15594  		v_0_0 := v_0.Args[0]
 15595  		if v_0_0.Op != OpAMD64SBBLcarrymask {
 15596  			break
 15597  		}
 15598  		v_0_0_0 := v_0_0.Args[0]
 15599  		if v_0_0_0.Op != OpAMD64CMPLconst {
 15600  			break
 15601  		}
 15602  		if v_0_0_0.AuxInt != 32 {
 15603  			break
 15604  		}
 15605  		v_0_0_0_0 := v_0_0_0.Args[0]
 15606  		if v_0_0_0_0.Op != OpAMD64NEGL {
 15607  			break
 15608  		}
 15609  		v_0_0_0_0_0 := v_0_0_0_0.Args[0]
 15610  		if v_0_0_0_0_0.Op != OpAMD64ADDLconst {
 15611  			break
 15612  		}
 15613  		if v_0_0_0_0_0.AuxInt != -32 {
 15614  			break
 15615  		}
 15616  		v_0_0_0_0_0_0 := v_0_0_0_0_0.Args[0]
 15617  		if v_0_0_0_0_0_0.Op != OpAMD64ANDLconst {
 15618  			break
 15619  		}
 15620  		if v_0_0_0_0_0_0.AuxInt != 31 {
 15621  			break
 15622  		}
 15623  		y := v_0_0_0_0_0_0.Args[0]
 15624  		v_0_1 := v_0.Args[1]
 15625  		if v_0_1.Op != OpAMD64SHRL {
 15626  			break
 15627  		}
 15628  		_ = v_0_1.Args[1]
 15629  		x := v_0_1.Args[0]
 15630  		v_0_1_1 := v_0_1.Args[1]
 15631  		if v_0_1_1.Op != OpAMD64NEGL {
 15632  			break
 15633  		}
 15634  		if y != v_0_1_1.Args[0] {
 15635  			break
 15636  		}
 15637  		v_1 := v.Args[1]
 15638  		if v_1.Op != OpAMD64SHLL {
 15639  			break
 15640  		}
 15641  		_ = v_1.Args[1]
 15642  		if x != v_1.Args[0] {
 15643  			break
 15644  		}
 15645  		if y != v_1.Args[1] {
 15646  			break
 15647  		}
 15648  		v.reset(OpAMD64ROLL)
 15649  		v.AddArg(x)
 15650  		v.AddArg(y)
 15651  		return true
 15652  	}
 15653  	// match: (ORL (SHRL x y) (ANDL (SHLL x (NEGQ y)) (SBBLcarrymask (CMPQconst (NEGQ (ADDQconst (ANDQconst y [31]) [-32])) [32]))))
 15654  	// cond:
 15655  	// result: (RORL x y)
 15656  	for {
 15657  		_ = v.Args[1]
 15658  		v_0 := v.Args[0]
 15659  		if v_0.Op != OpAMD64SHRL {
 15660  			break
 15661  		}
 15662  		_ = v_0.Args[1]
 15663  		x := v_0.Args[0]
 15664  		y := v_0.Args[1]
 15665  		v_1 := v.Args[1]
 15666  		if v_1.Op != OpAMD64ANDL {
 15667  			break
 15668  		}
 15669  		_ = v_1.Args[1]
 15670  		v_1_0 := v_1.Args[0]
 15671  		if v_1_0.Op != OpAMD64SHLL {
 15672  			break
 15673  		}
 15674  		_ = v_1_0.Args[1]
 15675  		if x != v_1_0.Args[0] {
 15676  			break
 15677  		}
 15678  		v_1_0_1 := v_1_0.Args[1]
 15679  		if v_1_0_1.Op != OpAMD64NEGQ {
 15680  			break
 15681  		}
 15682  		if y != v_1_0_1.Args[0] {
 15683  			break
 15684  		}
 15685  		v_1_1 := v_1.Args[1]
 15686  		if v_1_1.Op != OpAMD64SBBLcarrymask {
 15687  			break
 15688  		}
 15689  		v_1_1_0 := v_1_1.Args[0]
 15690  		if v_1_1_0.Op != OpAMD64CMPQconst {
 15691  			break
 15692  		}
 15693  		if v_1_1_0.AuxInt != 32 {
 15694  			break
 15695  		}
 15696  		v_1_1_0_0 := v_1_1_0.Args[0]
 15697  		if v_1_1_0_0.Op != OpAMD64NEGQ {
 15698  			break
 15699  		}
 15700  		v_1_1_0_0_0 := v_1_1_0_0.Args[0]
 15701  		if v_1_1_0_0_0.Op != OpAMD64ADDQconst {
 15702  			break
 15703  		}
 15704  		if v_1_1_0_0_0.AuxInt != -32 {
 15705  			break
 15706  		}
 15707  		v_1_1_0_0_0_0 := v_1_1_0_0_0.Args[0]
 15708  		if v_1_1_0_0_0_0.Op != OpAMD64ANDQconst {
 15709  			break
 15710  		}
 15711  		if v_1_1_0_0_0_0.AuxInt != 31 {
 15712  			break
 15713  		}
 15714  		if y != v_1_1_0_0_0_0.Args[0] {
 15715  			break
 15716  		}
 15717  		v.reset(OpAMD64RORL)
 15718  		v.AddArg(x)
 15719  		v.AddArg(y)
 15720  		return true
 15721  	}
 15722  	// match: (ORL (SHRL x y) (ANDL (SBBLcarrymask (CMPQconst (NEGQ (ADDQconst (ANDQconst y [31]) [-32])) [32])) (SHLL x (NEGQ y))))
 15723  	// cond:
 15724  	// result: (RORL x y)
 15725  	for {
 15726  		_ = v.Args[1]
 15727  		v_0 := v.Args[0]
 15728  		if v_0.Op != OpAMD64SHRL {
 15729  			break
 15730  		}
 15731  		_ = v_0.Args[1]
 15732  		x := v_0.Args[0]
 15733  		y := v_0.Args[1]
 15734  		v_1 := v.Args[1]
 15735  		if v_1.Op != OpAMD64ANDL {
 15736  			break
 15737  		}
 15738  		_ = v_1.Args[1]
 15739  		v_1_0 := v_1.Args[0]
 15740  		if v_1_0.Op != OpAMD64SBBLcarrymask {
 15741  			break
 15742  		}
 15743  		v_1_0_0 := v_1_0.Args[0]
 15744  		if v_1_0_0.Op != OpAMD64CMPQconst {
 15745  			break
 15746  		}
 15747  		if v_1_0_0.AuxInt != 32 {
 15748  			break
 15749  		}
 15750  		v_1_0_0_0 := v_1_0_0.Args[0]
 15751  		if v_1_0_0_0.Op != OpAMD64NEGQ {
 15752  			break
 15753  		}
 15754  		v_1_0_0_0_0 := v_1_0_0_0.Args[0]
 15755  		if v_1_0_0_0_0.Op != OpAMD64ADDQconst {
 15756  			break
 15757  		}
 15758  		if v_1_0_0_0_0.AuxInt != -32 {
 15759  			break
 15760  		}
 15761  		v_1_0_0_0_0_0 := v_1_0_0_0_0.Args[0]
 15762  		if v_1_0_0_0_0_0.Op != OpAMD64ANDQconst {
 15763  			break
 15764  		}
 15765  		if v_1_0_0_0_0_0.AuxInt != 31 {
 15766  			break
 15767  		}
 15768  		if y != v_1_0_0_0_0_0.Args[0] {
 15769  			break
 15770  		}
 15771  		v_1_1 := v_1.Args[1]
 15772  		if v_1_1.Op != OpAMD64SHLL {
 15773  			break
 15774  		}
 15775  		_ = v_1_1.Args[1]
 15776  		if x != v_1_1.Args[0] {
 15777  			break
 15778  		}
 15779  		v_1_1_1 := v_1_1.Args[1]
 15780  		if v_1_1_1.Op != OpAMD64NEGQ {
 15781  			break
 15782  		}
 15783  		if y != v_1_1_1.Args[0] {
 15784  			break
 15785  		}
 15786  		v.reset(OpAMD64RORL)
 15787  		v.AddArg(x)
 15788  		v.AddArg(y)
 15789  		return true
 15790  	}
 15791  	// match: (ORL (ANDL (SHLL x (NEGQ y)) (SBBLcarrymask (CMPQconst (NEGQ (ADDQconst (ANDQconst y [31]) [-32])) [32]))) (SHRL x y))
 15792  	// cond:
 15793  	// result: (RORL x y)
 15794  	for {
 15795  		_ = v.Args[1]
 15796  		v_0 := v.Args[0]
 15797  		if v_0.Op != OpAMD64ANDL {
 15798  			break
 15799  		}
 15800  		_ = v_0.Args[1]
 15801  		v_0_0 := v_0.Args[0]
 15802  		if v_0_0.Op != OpAMD64SHLL {
 15803  			break
 15804  		}
 15805  		_ = v_0_0.Args[1]
 15806  		x := v_0_0.Args[0]
 15807  		v_0_0_1 := v_0_0.Args[1]
 15808  		if v_0_0_1.Op != OpAMD64NEGQ {
 15809  			break
 15810  		}
 15811  		y := v_0_0_1.Args[0]
 15812  		v_0_1 := v_0.Args[1]
 15813  		if v_0_1.Op != OpAMD64SBBLcarrymask {
 15814  			break
 15815  		}
 15816  		v_0_1_0 := v_0_1.Args[0]
 15817  		if v_0_1_0.Op != OpAMD64CMPQconst {
 15818  			break
 15819  		}
 15820  		if v_0_1_0.AuxInt != 32 {
 15821  			break
 15822  		}
 15823  		v_0_1_0_0 := v_0_1_0.Args[0]
 15824  		if v_0_1_0_0.Op != OpAMD64NEGQ {
 15825  			break
 15826  		}
 15827  		v_0_1_0_0_0 := v_0_1_0_0.Args[0]
 15828  		if v_0_1_0_0_0.Op != OpAMD64ADDQconst {
 15829  			break
 15830  		}
 15831  		if v_0_1_0_0_0.AuxInt != -32 {
 15832  			break
 15833  		}
 15834  		v_0_1_0_0_0_0 := v_0_1_0_0_0.Args[0]
 15835  		if v_0_1_0_0_0_0.Op != OpAMD64ANDQconst {
 15836  			break
 15837  		}
 15838  		if v_0_1_0_0_0_0.AuxInt != 31 {
 15839  			break
 15840  		}
 15841  		if y != v_0_1_0_0_0_0.Args[0] {
 15842  			break
 15843  		}
 15844  		v_1 := v.Args[1]
 15845  		if v_1.Op != OpAMD64SHRL {
 15846  			break
 15847  		}
 15848  		_ = v_1.Args[1]
 15849  		if x != v_1.Args[0] {
 15850  			break
 15851  		}
 15852  		if y != v_1.Args[1] {
 15853  			break
 15854  		}
 15855  		v.reset(OpAMD64RORL)
 15856  		v.AddArg(x)
 15857  		v.AddArg(y)
 15858  		return true
 15859  	}
 15860  	// match: (ORL (ANDL (SBBLcarrymask (CMPQconst (NEGQ (ADDQconst (ANDQconst y [31]) [-32])) [32])) (SHLL x (NEGQ y))) (SHRL x y))
 15861  	// cond:
 15862  	// result: (RORL x y)
 15863  	for {
 15864  		_ = v.Args[1]
 15865  		v_0 := v.Args[0]
 15866  		if v_0.Op != OpAMD64ANDL {
 15867  			break
 15868  		}
 15869  		_ = v_0.Args[1]
 15870  		v_0_0 := v_0.Args[0]
 15871  		if v_0_0.Op != OpAMD64SBBLcarrymask {
 15872  			break
 15873  		}
 15874  		v_0_0_0 := v_0_0.Args[0]
 15875  		if v_0_0_0.Op != OpAMD64CMPQconst {
 15876  			break
 15877  		}
 15878  		if v_0_0_0.AuxInt != 32 {
 15879  			break
 15880  		}
 15881  		v_0_0_0_0 := v_0_0_0.Args[0]
 15882  		if v_0_0_0_0.Op != OpAMD64NEGQ {
 15883  			break
 15884  		}
 15885  		v_0_0_0_0_0 := v_0_0_0_0.Args[0]
 15886  		if v_0_0_0_0_0.Op != OpAMD64ADDQconst {
 15887  			break
 15888  		}
 15889  		if v_0_0_0_0_0.AuxInt != -32 {
 15890  			break
 15891  		}
 15892  		v_0_0_0_0_0_0 := v_0_0_0_0_0.Args[0]
 15893  		if v_0_0_0_0_0_0.Op != OpAMD64ANDQconst {
 15894  			break
 15895  		}
 15896  		if v_0_0_0_0_0_0.AuxInt != 31 {
 15897  			break
 15898  		}
 15899  		y := v_0_0_0_0_0_0.Args[0]
 15900  		v_0_1 := v_0.Args[1]
 15901  		if v_0_1.Op != OpAMD64SHLL {
 15902  			break
 15903  		}
 15904  		_ = v_0_1.Args[1]
 15905  		x := v_0_1.Args[0]
 15906  		v_0_1_1 := v_0_1.Args[1]
 15907  		if v_0_1_1.Op != OpAMD64NEGQ {
 15908  			break
 15909  		}
 15910  		if y != v_0_1_1.Args[0] {
 15911  			break
 15912  		}
 15913  		v_1 := v.Args[1]
 15914  		if v_1.Op != OpAMD64SHRL {
 15915  			break
 15916  		}
 15917  		_ = v_1.Args[1]
 15918  		if x != v_1.Args[0] {
 15919  			break
 15920  		}
 15921  		if y != v_1.Args[1] {
 15922  			break
 15923  		}
 15924  		v.reset(OpAMD64RORL)
 15925  		v.AddArg(x)
 15926  		v.AddArg(y)
 15927  		return true
 15928  	}
 15929  	return false
 15930  }
 15931  func rewriteValueAMD64_OpAMD64ORL_20(v *Value) bool {
 15932  	// match: (ORL (SHRL x y) (ANDL (SHLL x (NEGL y)) (SBBLcarrymask (CMPLconst (NEGL (ADDLconst (ANDLconst y [31]) [-32])) [32]))))
 15933  	// cond:
 15934  	// result: (RORL x y)
 15935  	for {
 15936  		_ = v.Args[1]
 15937  		v_0 := v.Args[0]
 15938  		if v_0.Op != OpAMD64SHRL {
 15939  			break
 15940  		}
 15941  		_ = v_0.Args[1]
 15942  		x := v_0.Args[0]
 15943  		y := v_0.Args[1]
 15944  		v_1 := v.Args[1]
 15945  		if v_1.Op != OpAMD64ANDL {
 15946  			break
 15947  		}
 15948  		_ = v_1.Args[1]
 15949  		v_1_0 := v_1.Args[0]
 15950  		if v_1_0.Op != OpAMD64SHLL {
 15951  			break
 15952  		}
 15953  		_ = v_1_0.Args[1]
 15954  		if x != v_1_0.Args[0] {
 15955  			break
 15956  		}
 15957  		v_1_0_1 := v_1_0.Args[1]
 15958  		if v_1_0_1.Op != OpAMD64NEGL {
 15959  			break
 15960  		}
 15961  		if y != v_1_0_1.Args[0] {
 15962  			break
 15963  		}
 15964  		v_1_1 := v_1.Args[1]
 15965  		if v_1_1.Op != OpAMD64SBBLcarrymask {
 15966  			break
 15967  		}
 15968  		v_1_1_0 := v_1_1.Args[0]
 15969  		if v_1_1_0.Op != OpAMD64CMPLconst {
 15970  			break
 15971  		}
 15972  		if v_1_1_0.AuxInt != 32 {
 15973  			break
 15974  		}
 15975  		v_1_1_0_0 := v_1_1_0.Args[0]
 15976  		if v_1_1_0_0.Op != OpAMD64NEGL {
 15977  			break
 15978  		}
 15979  		v_1_1_0_0_0 := v_1_1_0_0.Args[0]
 15980  		if v_1_1_0_0_0.Op != OpAMD64ADDLconst {
 15981  			break
 15982  		}
 15983  		if v_1_1_0_0_0.AuxInt != -32 {
 15984  			break
 15985  		}
 15986  		v_1_1_0_0_0_0 := v_1_1_0_0_0.Args[0]
 15987  		if v_1_1_0_0_0_0.Op != OpAMD64ANDLconst {
 15988  			break
 15989  		}
 15990  		if v_1_1_0_0_0_0.AuxInt != 31 {
 15991  			break
 15992  		}
 15993  		if y != v_1_1_0_0_0_0.Args[0] {
 15994  			break
 15995  		}
 15996  		v.reset(OpAMD64RORL)
 15997  		v.AddArg(x)
 15998  		v.AddArg(y)
 15999  		return true
 16000  	}
 16001  	// match: (ORL (SHRL x y) (ANDL (SBBLcarrymask (CMPLconst (NEGL (ADDLconst (ANDLconst y [31]) [-32])) [32])) (SHLL x (NEGL y))))
 16002  	// cond:
 16003  	// result: (RORL x y)
 16004  	for {
 16005  		_ = v.Args[1]
 16006  		v_0 := v.Args[0]
 16007  		if v_0.Op != OpAMD64SHRL {
 16008  			break
 16009  		}
 16010  		_ = v_0.Args[1]
 16011  		x := v_0.Args[0]
 16012  		y := v_0.Args[1]
 16013  		v_1 := v.Args[1]
 16014  		if v_1.Op != OpAMD64ANDL {
 16015  			break
 16016  		}
 16017  		_ = v_1.Args[1]
 16018  		v_1_0 := v_1.Args[0]
 16019  		if v_1_0.Op != OpAMD64SBBLcarrymask {
 16020  			break
 16021  		}
 16022  		v_1_0_0 := v_1_0.Args[0]
 16023  		if v_1_0_0.Op != OpAMD64CMPLconst {
 16024  			break
 16025  		}
 16026  		if v_1_0_0.AuxInt != 32 {
 16027  			break
 16028  		}
 16029  		v_1_0_0_0 := v_1_0_0.Args[0]
 16030  		if v_1_0_0_0.Op != OpAMD64NEGL {
 16031  			break
 16032  		}
 16033  		v_1_0_0_0_0 := v_1_0_0_0.Args[0]
 16034  		if v_1_0_0_0_0.Op != OpAMD64ADDLconst {
 16035  			break
 16036  		}
 16037  		if v_1_0_0_0_0.AuxInt != -32 {
 16038  			break
 16039  		}
 16040  		v_1_0_0_0_0_0 := v_1_0_0_0_0.Args[0]
 16041  		if v_1_0_0_0_0_0.Op != OpAMD64ANDLconst {
 16042  			break
 16043  		}
 16044  		if v_1_0_0_0_0_0.AuxInt != 31 {
 16045  			break
 16046  		}
 16047  		if y != v_1_0_0_0_0_0.Args[0] {
 16048  			break
 16049  		}
 16050  		v_1_1 := v_1.Args[1]
 16051  		if v_1_1.Op != OpAMD64SHLL {
 16052  			break
 16053  		}
 16054  		_ = v_1_1.Args[1]
 16055  		if x != v_1_1.Args[0] {
 16056  			break
 16057  		}
 16058  		v_1_1_1 := v_1_1.Args[1]
 16059  		if v_1_1_1.Op != OpAMD64NEGL {
 16060  			break
 16061  		}
 16062  		if y != v_1_1_1.Args[0] {
 16063  			break
 16064  		}
 16065  		v.reset(OpAMD64RORL)
 16066  		v.AddArg(x)
 16067  		v.AddArg(y)
 16068  		return true
 16069  	}
 16070  	// match: (ORL (ANDL (SHLL x (NEGL y)) (SBBLcarrymask (CMPLconst (NEGL (ADDLconst (ANDLconst y [31]) [-32])) [32]))) (SHRL x y))
 16071  	// cond:
 16072  	// result: (RORL x y)
 16073  	for {
 16074  		_ = v.Args[1]
 16075  		v_0 := v.Args[0]
 16076  		if v_0.Op != OpAMD64ANDL {
 16077  			break
 16078  		}
 16079  		_ = v_0.Args[1]
 16080  		v_0_0 := v_0.Args[0]
 16081  		if v_0_0.Op != OpAMD64SHLL {
 16082  			break
 16083  		}
 16084  		_ = v_0_0.Args[1]
 16085  		x := v_0_0.Args[0]
 16086  		v_0_0_1 := v_0_0.Args[1]
 16087  		if v_0_0_1.Op != OpAMD64NEGL {
 16088  			break
 16089  		}
 16090  		y := v_0_0_1.Args[0]
 16091  		v_0_1 := v_0.Args[1]
 16092  		if v_0_1.Op != OpAMD64SBBLcarrymask {
 16093  			break
 16094  		}
 16095  		v_0_1_0 := v_0_1.Args[0]
 16096  		if v_0_1_0.Op != OpAMD64CMPLconst {
 16097  			break
 16098  		}
 16099  		if v_0_1_0.AuxInt != 32 {
 16100  			break
 16101  		}
 16102  		v_0_1_0_0 := v_0_1_0.Args[0]
 16103  		if v_0_1_0_0.Op != OpAMD64NEGL {
 16104  			break
 16105  		}
 16106  		v_0_1_0_0_0 := v_0_1_0_0.Args[0]
 16107  		if v_0_1_0_0_0.Op != OpAMD64ADDLconst {
 16108  			break
 16109  		}
 16110  		if v_0_1_0_0_0.AuxInt != -32 {
 16111  			break
 16112  		}
 16113  		v_0_1_0_0_0_0 := v_0_1_0_0_0.Args[0]
 16114  		if v_0_1_0_0_0_0.Op != OpAMD64ANDLconst {
 16115  			break
 16116  		}
 16117  		if v_0_1_0_0_0_0.AuxInt != 31 {
 16118  			break
 16119  		}
 16120  		if y != v_0_1_0_0_0_0.Args[0] {
 16121  			break
 16122  		}
 16123  		v_1 := v.Args[1]
 16124  		if v_1.Op != OpAMD64SHRL {
 16125  			break
 16126  		}
 16127  		_ = v_1.Args[1]
 16128  		if x != v_1.Args[0] {
 16129  			break
 16130  		}
 16131  		if y != v_1.Args[1] {
 16132  			break
 16133  		}
 16134  		v.reset(OpAMD64RORL)
 16135  		v.AddArg(x)
 16136  		v.AddArg(y)
 16137  		return true
 16138  	}
 16139  	// match: (ORL (ANDL (SBBLcarrymask (CMPLconst (NEGL (ADDLconst (ANDLconst y [31]) [-32])) [32])) (SHLL x (NEGL y))) (SHRL x y))
 16140  	// cond:
 16141  	// result: (RORL x y)
 16142  	for {
 16143  		_ = v.Args[1]
 16144  		v_0 := v.Args[0]
 16145  		if v_0.Op != OpAMD64ANDL {
 16146  			break
 16147  		}
 16148  		_ = v_0.Args[1]
 16149  		v_0_0 := v_0.Args[0]
 16150  		if v_0_0.Op != OpAMD64SBBLcarrymask {
 16151  			break
 16152  		}
 16153  		v_0_0_0 := v_0_0.Args[0]
 16154  		if v_0_0_0.Op != OpAMD64CMPLconst {
 16155  			break
 16156  		}
 16157  		if v_0_0_0.AuxInt != 32 {
 16158  			break
 16159  		}
 16160  		v_0_0_0_0 := v_0_0_0.Args[0]
 16161  		if v_0_0_0_0.Op != OpAMD64NEGL {
 16162  			break
 16163  		}
 16164  		v_0_0_0_0_0 := v_0_0_0_0.Args[0]
 16165  		if v_0_0_0_0_0.Op != OpAMD64ADDLconst {
 16166  			break
 16167  		}
 16168  		if v_0_0_0_0_0.AuxInt != -32 {
 16169  			break
 16170  		}
 16171  		v_0_0_0_0_0_0 := v_0_0_0_0_0.Args[0]
 16172  		if v_0_0_0_0_0_0.Op != OpAMD64ANDLconst {
 16173  			break
 16174  		}
 16175  		if v_0_0_0_0_0_0.AuxInt != 31 {
 16176  			break
 16177  		}
 16178  		y := v_0_0_0_0_0_0.Args[0]
 16179  		v_0_1 := v_0.Args[1]
 16180  		if v_0_1.Op != OpAMD64SHLL {
 16181  			break
 16182  		}
 16183  		_ = v_0_1.Args[1]
 16184  		x := v_0_1.Args[0]
 16185  		v_0_1_1 := v_0_1.Args[1]
 16186  		if v_0_1_1.Op != OpAMD64NEGL {
 16187  			break
 16188  		}
 16189  		if y != v_0_1_1.Args[0] {
 16190  			break
 16191  		}
 16192  		v_1 := v.Args[1]
 16193  		if v_1.Op != OpAMD64SHRL {
 16194  			break
 16195  		}
 16196  		_ = v_1.Args[1]
 16197  		if x != v_1.Args[0] {
 16198  			break
 16199  		}
 16200  		if y != v_1.Args[1] {
 16201  			break
 16202  		}
 16203  		v.reset(OpAMD64RORL)
 16204  		v.AddArg(x)
 16205  		v.AddArg(y)
 16206  		return true
 16207  	}
 16208  	// match: (ORL (SHLL x (ANDQconst y [15])) (ANDL (SHRW x (NEGQ (ADDQconst (ANDQconst y [15]) [-16]))) (SBBLcarrymask (CMPQconst (NEGQ (ADDQconst (ANDQconst y [15]) [-16])) [16]))))
 16209  	// cond: v.Type.Size() == 2
 16210  	// result: (ROLW x y)
 16211  	for {
 16212  		_ = v.Args[1]
 16213  		v_0 := v.Args[0]
 16214  		if v_0.Op != OpAMD64SHLL {
 16215  			break
 16216  		}
 16217  		_ = v_0.Args[1]
 16218  		x := v_0.Args[0]
 16219  		v_0_1 := v_0.Args[1]
 16220  		if v_0_1.Op != OpAMD64ANDQconst {
 16221  			break
 16222  		}
 16223  		if v_0_1.AuxInt != 15 {
 16224  			break
 16225  		}
 16226  		y := v_0_1.Args[0]
 16227  		v_1 := v.Args[1]
 16228  		if v_1.Op != OpAMD64ANDL {
 16229  			break
 16230  		}
 16231  		_ = v_1.Args[1]
 16232  		v_1_0 := v_1.Args[0]
 16233  		if v_1_0.Op != OpAMD64SHRW {
 16234  			break
 16235  		}
 16236  		_ = v_1_0.Args[1]
 16237  		if x != v_1_0.Args[0] {
 16238  			break
 16239  		}
 16240  		v_1_0_1 := v_1_0.Args[1]
 16241  		if v_1_0_1.Op != OpAMD64NEGQ {
 16242  			break
 16243  		}
 16244  		v_1_0_1_0 := v_1_0_1.Args[0]
 16245  		if v_1_0_1_0.Op != OpAMD64ADDQconst {
 16246  			break
 16247  		}
 16248  		if v_1_0_1_0.AuxInt != -16 {
 16249  			break
 16250  		}
 16251  		v_1_0_1_0_0 := v_1_0_1_0.Args[0]
 16252  		if v_1_0_1_0_0.Op != OpAMD64ANDQconst {
 16253  			break
 16254  		}
 16255  		if v_1_0_1_0_0.AuxInt != 15 {
 16256  			break
 16257  		}
 16258  		if y != v_1_0_1_0_0.Args[0] {
 16259  			break
 16260  		}
 16261  		v_1_1 := v_1.Args[1]
 16262  		if v_1_1.Op != OpAMD64SBBLcarrymask {
 16263  			break
 16264  		}
 16265  		v_1_1_0 := v_1_1.Args[0]
 16266  		if v_1_1_0.Op != OpAMD64CMPQconst {
 16267  			break
 16268  		}
 16269  		if v_1_1_0.AuxInt != 16 {
 16270  			break
 16271  		}
 16272  		v_1_1_0_0 := v_1_1_0.Args[0]
 16273  		if v_1_1_0_0.Op != OpAMD64NEGQ {
 16274  			break
 16275  		}
 16276  		v_1_1_0_0_0 := v_1_1_0_0.Args[0]
 16277  		if v_1_1_0_0_0.Op != OpAMD64ADDQconst {
 16278  			break
 16279  		}
 16280  		if v_1_1_0_0_0.AuxInt != -16 {
 16281  			break
 16282  		}
 16283  		v_1_1_0_0_0_0 := v_1_1_0_0_0.Args[0]
 16284  		if v_1_1_0_0_0_0.Op != OpAMD64ANDQconst {
 16285  			break
 16286  		}
 16287  		if v_1_1_0_0_0_0.AuxInt != 15 {
 16288  			break
 16289  		}
 16290  		if y != v_1_1_0_0_0_0.Args[0] {
 16291  			break
 16292  		}
 16293  		if !(v.Type.Size() == 2) {
 16294  			break
 16295  		}
 16296  		v.reset(OpAMD64ROLW)
 16297  		v.AddArg(x)
 16298  		v.AddArg(y)
 16299  		return true
 16300  	}
 16301  	// match: (ORL (SHLL x (ANDQconst y [15])) (ANDL (SBBLcarrymask (CMPQconst (NEGQ (ADDQconst (ANDQconst y [15]) [-16])) [16])) (SHRW x (NEGQ (ADDQconst (ANDQconst y [15]) [-16])))))
 16302  	// cond: v.Type.Size() == 2
 16303  	// result: (ROLW x y)
 16304  	for {
 16305  		_ = v.Args[1]
 16306  		v_0 := v.Args[0]
 16307  		if v_0.Op != OpAMD64SHLL {
 16308  			break
 16309  		}
 16310  		_ = v_0.Args[1]
 16311  		x := v_0.Args[0]
 16312  		v_0_1 := v_0.Args[1]
 16313  		if v_0_1.Op != OpAMD64ANDQconst {
 16314  			break
 16315  		}
 16316  		if v_0_1.AuxInt != 15 {
 16317  			break
 16318  		}
 16319  		y := v_0_1.Args[0]
 16320  		v_1 := v.Args[1]
 16321  		if v_1.Op != OpAMD64ANDL {
 16322  			break
 16323  		}
 16324  		_ = v_1.Args[1]
 16325  		v_1_0 := v_1.Args[0]
 16326  		if v_1_0.Op != OpAMD64SBBLcarrymask {
 16327  			break
 16328  		}
 16329  		v_1_0_0 := v_1_0.Args[0]
 16330  		if v_1_0_0.Op != OpAMD64CMPQconst {
 16331  			break
 16332  		}
 16333  		if v_1_0_0.AuxInt != 16 {
 16334  			break
 16335  		}
 16336  		v_1_0_0_0 := v_1_0_0.Args[0]
 16337  		if v_1_0_0_0.Op != OpAMD64NEGQ {
 16338  			break
 16339  		}
 16340  		v_1_0_0_0_0 := v_1_0_0_0.Args[0]
 16341  		if v_1_0_0_0_0.Op != OpAMD64ADDQconst {
 16342  			break
 16343  		}
 16344  		if v_1_0_0_0_0.AuxInt != -16 {
 16345  			break
 16346  		}
 16347  		v_1_0_0_0_0_0 := v_1_0_0_0_0.Args[0]
 16348  		if v_1_0_0_0_0_0.Op != OpAMD64ANDQconst {
 16349  			break
 16350  		}
 16351  		if v_1_0_0_0_0_0.AuxInt != 15 {
 16352  			break
 16353  		}
 16354  		if y != v_1_0_0_0_0_0.Args[0] {
 16355  			break
 16356  		}
 16357  		v_1_1 := v_1.Args[1]
 16358  		if v_1_1.Op != OpAMD64SHRW {
 16359  			break
 16360  		}
 16361  		_ = v_1_1.Args[1]
 16362  		if x != v_1_1.Args[0] {
 16363  			break
 16364  		}
 16365  		v_1_1_1 := v_1_1.Args[1]
 16366  		if v_1_1_1.Op != OpAMD64NEGQ {
 16367  			break
 16368  		}
 16369  		v_1_1_1_0 := v_1_1_1.Args[0]
 16370  		if v_1_1_1_0.Op != OpAMD64ADDQconst {
 16371  			break
 16372  		}
 16373  		if v_1_1_1_0.AuxInt != -16 {
 16374  			break
 16375  		}
 16376  		v_1_1_1_0_0 := v_1_1_1_0.Args[0]
 16377  		if v_1_1_1_0_0.Op != OpAMD64ANDQconst {
 16378  			break
 16379  		}
 16380  		if v_1_1_1_0_0.AuxInt != 15 {
 16381  			break
 16382  		}
 16383  		if y != v_1_1_1_0_0.Args[0] {
 16384  			break
 16385  		}
 16386  		if !(v.Type.Size() == 2) {
 16387  			break
 16388  		}
 16389  		v.reset(OpAMD64ROLW)
 16390  		v.AddArg(x)
 16391  		v.AddArg(y)
 16392  		return true
 16393  	}
 16394  	// match: (ORL (ANDL (SHRW x (NEGQ (ADDQconst (ANDQconst y [15]) [-16]))) (SBBLcarrymask (CMPQconst (NEGQ (ADDQconst (ANDQconst y [15]) [-16])) [16]))) (SHLL x (ANDQconst y [15])))
 16395  	// cond: v.Type.Size() == 2
 16396  	// result: (ROLW x y)
 16397  	for {
 16398  		_ = v.Args[1]
 16399  		v_0 := v.Args[0]
 16400  		if v_0.Op != OpAMD64ANDL {
 16401  			break
 16402  		}
 16403  		_ = v_0.Args[1]
 16404  		v_0_0 := v_0.Args[0]
 16405  		if v_0_0.Op != OpAMD64SHRW {
 16406  			break
 16407  		}
 16408  		_ = v_0_0.Args[1]
 16409  		x := v_0_0.Args[0]
 16410  		v_0_0_1 := v_0_0.Args[1]
 16411  		if v_0_0_1.Op != OpAMD64NEGQ {
 16412  			break
 16413  		}
 16414  		v_0_0_1_0 := v_0_0_1.Args[0]
 16415  		if v_0_0_1_0.Op != OpAMD64ADDQconst {
 16416  			break
 16417  		}
 16418  		if v_0_0_1_0.AuxInt != -16 {
 16419  			break
 16420  		}
 16421  		v_0_0_1_0_0 := v_0_0_1_0.Args[0]
 16422  		if v_0_0_1_0_0.Op != OpAMD64ANDQconst {
 16423  			break
 16424  		}
 16425  		if v_0_0_1_0_0.AuxInt != 15 {
 16426  			break
 16427  		}
 16428  		y := v_0_0_1_0_0.Args[0]
 16429  		v_0_1 := v_0.Args[1]
 16430  		if v_0_1.Op != OpAMD64SBBLcarrymask {
 16431  			break
 16432  		}
 16433  		v_0_1_0 := v_0_1.Args[0]
 16434  		if v_0_1_0.Op != OpAMD64CMPQconst {
 16435  			break
 16436  		}
 16437  		if v_0_1_0.AuxInt != 16 {
 16438  			break
 16439  		}
 16440  		v_0_1_0_0 := v_0_1_0.Args[0]
 16441  		if v_0_1_0_0.Op != OpAMD64NEGQ {
 16442  			break
 16443  		}
 16444  		v_0_1_0_0_0 := v_0_1_0_0.Args[0]
 16445  		if v_0_1_0_0_0.Op != OpAMD64ADDQconst {
 16446  			break
 16447  		}
 16448  		if v_0_1_0_0_0.AuxInt != -16 {
 16449  			break
 16450  		}
 16451  		v_0_1_0_0_0_0 := v_0_1_0_0_0.Args[0]
 16452  		if v_0_1_0_0_0_0.Op != OpAMD64ANDQconst {
 16453  			break
 16454  		}
 16455  		if v_0_1_0_0_0_0.AuxInt != 15 {
 16456  			break
 16457  		}
 16458  		if y != v_0_1_0_0_0_0.Args[0] {
 16459  			break
 16460  		}
 16461  		v_1 := v.Args[1]
 16462  		if v_1.Op != OpAMD64SHLL {
 16463  			break
 16464  		}
 16465  		_ = v_1.Args[1]
 16466  		if x != v_1.Args[0] {
 16467  			break
 16468  		}
 16469  		v_1_1 := v_1.Args[1]
 16470  		if v_1_1.Op != OpAMD64ANDQconst {
 16471  			break
 16472  		}
 16473  		if v_1_1.AuxInt != 15 {
 16474  			break
 16475  		}
 16476  		if y != v_1_1.Args[0] {
 16477  			break
 16478  		}
 16479  		if !(v.Type.Size() == 2) {
 16480  			break
 16481  		}
 16482  		v.reset(OpAMD64ROLW)
 16483  		v.AddArg(x)
 16484  		v.AddArg(y)
 16485  		return true
 16486  	}
 16487  	// match: (ORL (ANDL (SBBLcarrymask (CMPQconst (NEGQ (ADDQconst (ANDQconst y [15]) [-16])) [16])) (SHRW x (NEGQ (ADDQconst (ANDQconst y [15]) [-16])))) (SHLL x (ANDQconst y [15])))
 16488  	// cond: v.Type.Size() == 2
 16489  	// result: (ROLW x y)
 16490  	for {
 16491  		_ = v.Args[1]
 16492  		v_0 := v.Args[0]
 16493  		if v_0.Op != OpAMD64ANDL {
 16494  			break
 16495  		}
 16496  		_ = v_0.Args[1]
 16497  		v_0_0 := v_0.Args[0]
 16498  		if v_0_0.Op != OpAMD64SBBLcarrymask {
 16499  			break
 16500  		}
 16501  		v_0_0_0 := v_0_0.Args[0]
 16502  		if v_0_0_0.Op != OpAMD64CMPQconst {
 16503  			break
 16504  		}
 16505  		if v_0_0_0.AuxInt != 16 {
 16506  			break
 16507  		}
 16508  		v_0_0_0_0 := v_0_0_0.Args[0]
 16509  		if v_0_0_0_0.Op != OpAMD64NEGQ {
 16510  			break
 16511  		}
 16512  		v_0_0_0_0_0 := v_0_0_0_0.Args[0]
 16513  		if v_0_0_0_0_0.Op != OpAMD64ADDQconst {
 16514  			break
 16515  		}
 16516  		if v_0_0_0_0_0.AuxInt != -16 {
 16517  			break
 16518  		}
 16519  		v_0_0_0_0_0_0 := v_0_0_0_0_0.Args[0]
 16520  		if v_0_0_0_0_0_0.Op != OpAMD64ANDQconst {
 16521  			break
 16522  		}
 16523  		if v_0_0_0_0_0_0.AuxInt != 15 {
 16524  			break
 16525  		}
 16526  		y := v_0_0_0_0_0_0.Args[0]
 16527  		v_0_1 := v_0.Args[1]
 16528  		if v_0_1.Op != OpAMD64SHRW {
 16529  			break
 16530  		}
 16531  		_ = v_0_1.Args[1]
 16532  		x := v_0_1.Args[0]
 16533  		v_0_1_1 := v_0_1.Args[1]
 16534  		if v_0_1_1.Op != OpAMD64NEGQ {
 16535  			break
 16536  		}
 16537  		v_0_1_1_0 := v_0_1_1.Args[0]
 16538  		if v_0_1_1_0.Op != OpAMD64ADDQconst {
 16539  			break
 16540  		}
 16541  		if v_0_1_1_0.AuxInt != -16 {
 16542  			break
 16543  		}
 16544  		v_0_1_1_0_0 := v_0_1_1_0.Args[0]
 16545  		if v_0_1_1_0_0.Op != OpAMD64ANDQconst {
 16546  			break
 16547  		}
 16548  		if v_0_1_1_0_0.AuxInt != 15 {
 16549  			break
 16550  		}
 16551  		if y != v_0_1_1_0_0.Args[0] {
 16552  			break
 16553  		}
 16554  		v_1 := v.Args[1]
 16555  		if v_1.Op != OpAMD64SHLL {
 16556  			break
 16557  		}
 16558  		_ = v_1.Args[1]
 16559  		if x != v_1.Args[0] {
 16560  			break
 16561  		}
 16562  		v_1_1 := v_1.Args[1]
 16563  		if v_1_1.Op != OpAMD64ANDQconst {
 16564  			break
 16565  		}
 16566  		if v_1_1.AuxInt != 15 {
 16567  			break
 16568  		}
 16569  		if y != v_1_1.Args[0] {
 16570  			break
 16571  		}
 16572  		if !(v.Type.Size() == 2) {
 16573  			break
 16574  		}
 16575  		v.reset(OpAMD64ROLW)
 16576  		v.AddArg(x)
 16577  		v.AddArg(y)
 16578  		return true
 16579  	}
 16580  	// match: (ORL (SHLL x (ANDLconst y [15])) (ANDL (SHRW x (NEGL (ADDLconst (ANDLconst y [15]) [-16]))) (SBBLcarrymask (CMPLconst (NEGL (ADDLconst (ANDLconst y [15]) [-16])) [16]))))
 16581  	// cond: v.Type.Size() == 2
 16582  	// result: (ROLW x y)
 16583  	for {
 16584  		_ = v.Args[1]
 16585  		v_0 := v.Args[0]
 16586  		if v_0.Op != OpAMD64SHLL {
 16587  			break
 16588  		}
 16589  		_ = v_0.Args[1]
 16590  		x := v_0.Args[0]
 16591  		v_0_1 := v_0.Args[1]
 16592  		if v_0_1.Op != OpAMD64ANDLconst {
 16593  			break
 16594  		}
 16595  		if v_0_1.AuxInt != 15 {
 16596  			break
 16597  		}
 16598  		y := v_0_1.Args[0]
 16599  		v_1 := v.Args[1]
 16600  		if v_1.Op != OpAMD64ANDL {
 16601  			break
 16602  		}
 16603  		_ = v_1.Args[1]
 16604  		v_1_0 := v_1.Args[0]
 16605  		if v_1_0.Op != OpAMD64SHRW {
 16606  			break
 16607  		}
 16608  		_ = v_1_0.Args[1]
 16609  		if x != v_1_0.Args[0] {
 16610  			break
 16611  		}
 16612  		v_1_0_1 := v_1_0.Args[1]
 16613  		if v_1_0_1.Op != OpAMD64NEGL {
 16614  			break
 16615  		}
 16616  		v_1_0_1_0 := v_1_0_1.Args[0]
 16617  		if v_1_0_1_0.Op != OpAMD64ADDLconst {
 16618  			break
 16619  		}
 16620  		if v_1_0_1_0.AuxInt != -16 {
 16621  			break
 16622  		}
 16623  		v_1_0_1_0_0 := v_1_0_1_0.Args[0]
 16624  		if v_1_0_1_0_0.Op != OpAMD64ANDLconst {
 16625  			break
 16626  		}
 16627  		if v_1_0_1_0_0.AuxInt != 15 {
 16628  			break
 16629  		}
 16630  		if y != v_1_0_1_0_0.Args[0] {
 16631  			break
 16632  		}
 16633  		v_1_1 := v_1.Args[1]
 16634  		if v_1_1.Op != OpAMD64SBBLcarrymask {
 16635  			break
 16636  		}
 16637  		v_1_1_0 := v_1_1.Args[0]
 16638  		if v_1_1_0.Op != OpAMD64CMPLconst {
 16639  			break
 16640  		}
 16641  		if v_1_1_0.AuxInt != 16 {
 16642  			break
 16643  		}
 16644  		v_1_1_0_0 := v_1_1_0.Args[0]
 16645  		if v_1_1_0_0.Op != OpAMD64NEGL {
 16646  			break
 16647  		}
 16648  		v_1_1_0_0_0 := v_1_1_0_0.Args[0]
 16649  		if v_1_1_0_0_0.Op != OpAMD64ADDLconst {
 16650  			break
 16651  		}
 16652  		if v_1_1_0_0_0.AuxInt != -16 {
 16653  			break
 16654  		}
 16655  		v_1_1_0_0_0_0 := v_1_1_0_0_0.Args[0]
 16656  		if v_1_1_0_0_0_0.Op != OpAMD64ANDLconst {
 16657  			break
 16658  		}
 16659  		if v_1_1_0_0_0_0.AuxInt != 15 {
 16660  			break
 16661  		}
 16662  		if y != v_1_1_0_0_0_0.Args[0] {
 16663  			break
 16664  		}
 16665  		if !(v.Type.Size() == 2) {
 16666  			break
 16667  		}
 16668  		v.reset(OpAMD64ROLW)
 16669  		v.AddArg(x)
 16670  		v.AddArg(y)
 16671  		return true
 16672  	}
 16673  	// match: (ORL (SHLL x (ANDLconst y [15])) (ANDL (SBBLcarrymask (CMPLconst (NEGL (ADDLconst (ANDLconst y [15]) [-16])) [16])) (SHRW x (NEGL (ADDLconst (ANDLconst y [15]) [-16])))))
 16674  	// cond: v.Type.Size() == 2
 16675  	// result: (ROLW x y)
 16676  	for {
 16677  		_ = v.Args[1]
 16678  		v_0 := v.Args[0]
 16679  		if v_0.Op != OpAMD64SHLL {
 16680  			break
 16681  		}
 16682  		_ = v_0.Args[1]
 16683  		x := v_0.Args[0]
 16684  		v_0_1 := v_0.Args[1]
 16685  		if v_0_1.Op != OpAMD64ANDLconst {
 16686  			break
 16687  		}
 16688  		if v_0_1.AuxInt != 15 {
 16689  			break
 16690  		}
 16691  		y := v_0_1.Args[0]
 16692  		v_1 := v.Args[1]
 16693  		if v_1.Op != OpAMD64ANDL {
 16694  			break
 16695  		}
 16696  		_ = v_1.Args[1]
 16697  		v_1_0 := v_1.Args[0]
 16698  		if v_1_0.Op != OpAMD64SBBLcarrymask {
 16699  			break
 16700  		}
 16701  		v_1_0_0 := v_1_0.Args[0]
 16702  		if v_1_0_0.Op != OpAMD64CMPLconst {
 16703  			break
 16704  		}
 16705  		if v_1_0_0.AuxInt != 16 {
 16706  			break
 16707  		}
 16708  		v_1_0_0_0 := v_1_0_0.Args[0]
 16709  		if v_1_0_0_0.Op != OpAMD64NEGL {
 16710  			break
 16711  		}
 16712  		v_1_0_0_0_0 := v_1_0_0_0.Args[0]
 16713  		if v_1_0_0_0_0.Op != OpAMD64ADDLconst {
 16714  			break
 16715  		}
 16716  		if v_1_0_0_0_0.AuxInt != -16 {
 16717  			break
 16718  		}
 16719  		v_1_0_0_0_0_0 := v_1_0_0_0_0.Args[0]
 16720  		if v_1_0_0_0_0_0.Op != OpAMD64ANDLconst {
 16721  			break
 16722  		}
 16723  		if v_1_0_0_0_0_0.AuxInt != 15 {
 16724  			break
 16725  		}
 16726  		if y != v_1_0_0_0_0_0.Args[0] {
 16727  			break
 16728  		}
 16729  		v_1_1 := v_1.Args[1]
 16730  		if v_1_1.Op != OpAMD64SHRW {
 16731  			break
 16732  		}
 16733  		_ = v_1_1.Args[1]
 16734  		if x != v_1_1.Args[0] {
 16735  			break
 16736  		}
 16737  		v_1_1_1 := v_1_1.Args[1]
 16738  		if v_1_1_1.Op != OpAMD64NEGL {
 16739  			break
 16740  		}
 16741  		v_1_1_1_0 := v_1_1_1.Args[0]
 16742  		if v_1_1_1_0.Op != OpAMD64ADDLconst {
 16743  			break
 16744  		}
 16745  		if v_1_1_1_0.AuxInt != -16 {
 16746  			break
 16747  		}
 16748  		v_1_1_1_0_0 := v_1_1_1_0.Args[0]
 16749  		if v_1_1_1_0_0.Op != OpAMD64ANDLconst {
 16750  			break
 16751  		}
 16752  		if v_1_1_1_0_0.AuxInt != 15 {
 16753  			break
 16754  		}
 16755  		if y != v_1_1_1_0_0.Args[0] {
 16756  			break
 16757  		}
 16758  		if !(v.Type.Size() == 2) {
 16759  			break
 16760  		}
 16761  		v.reset(OpAMD64ROLW)
 16762  		v.AddArg(x)
 16763  		v.AddArg(y)
 16764  		return true
 16765  	}
 16766  	return false
 16767  }
 16768  func rewriteValueAMD64_OpAMD64ORL_30(v *Value) bool {
 16769  	// match: (ORL (ANDL (SHRW x (NEGL (ADDLconst (ANDLconst y [15]) [-16]))) (SBBLcarrymask (CMPLconst (NEGL (ADDLconst (ANDLconst y [15]) [-16])) [16]))) (SHLL x (ANDLconst y [15])))
 16770  	// cond: v.Type.Size() == 2
 16771  	// result: (ROLW x y)
 16772  	for {
 16773  		_ = v.Args[1]
 16774  		v_0 := v.Args[0]
 16775  		if v_0.Op != OpAMD64ANDL {
 16776  			break
 16777  		}
 16778  		_ = v_0.Args[1]
 16779  		v_0_0 := v_0.Args[0]
 16780  		if v_0_0.Op != OpAMD64SHRW {
 16781  			break
 16782  		}
 16783  		_ = v_0_0.Args[1]
 16784  		x := v_0_0.Args[0]
 16785  		v_0_0_1 := v_0_0.Args[1]
 16786  		if v_0_0_1.Op != OpAMD64NEGL {
 16787  			break
 16788  		}
 16789  		v_0_0_1_0 := v_0_0_1.Args[0]
 16790  		if v_0_0_1_0.Op != OpAMD64ADDLconst {
 16791  			break
 16792  		}
 16793  		if v_0_0_1_0.AuxInt != -16 {
 16794  			break
 16795  		}
 16796  		v_0_0_1_0_0 := v_0_0_1_0.Args[0]
 16797  		if v_0_0_1_0_0.Op != OpAMD64ANDLconst {
 16798  			break
 16799  		}
 16800  		if v_0_0_1_0_0.AuxInt != 15 {
 16801  			break
 16802  		}
 16803  		y := v_0_0_1_0_0.Args[0]
 16804  		v_0_1 := v_0.Args[1]
 16805  		if v_0_1.Op != OpAMD64SBBLcarrymask {
 16806  			break
 16807  		}
 16808  		v_0_1_0 := v_0_1.Args[0]
 16809  		if v_0_1_0.Op != OpAMD64CMPLconst {
 16810  			break
 16811  		}
 16812  		if v_0_1_0.AuxInt != 16 {
 16813  			break
 16814  		}
 16815  		v_0_1_0_0 := v_0_1_0.Args[0]
 16816  		if v_0_1_0_0.Op != OpAMD64NEGL {
 16817  			break
 16818  		}
 16819  		v_0_1_0_0_0 := v_0_1_0_0.Args[0]
 16820  		if v_0_1_0_0_0.Op != OpAMD64ADDLconst {
 16821  			break
 16822  		}
 16823  		if v_0_1_0_0_0.AuxInt != -16 {
 16824  			break
 16825  		}
 16826  		v_0_1_0_0_0_0 := v_0_1_0_0_0.Args[0]
 16827  		if v_0_1_0_0_0_0.Op != OpAMD64ANDLconst {
 16828  			break
 16829  		}
 16830  		if v_0_1_0_0_0_0.AuxInt != 15 {
 16831  			break
 16832  		}
 16833  		if y != v_0_1_0_0_0_0.Args[0] {
 16834  			break
 16835  		}
 16836  		v_1 := v.Args[1]
 16837  		if v_1.Op != OpAMD64SHLL {
 16838  			break
 16839  		}
 16840  		_ = v_1.Args[1]
 16841  		if x != v_1.Args[0] {
 16842  			break
 16843  		}
 16844  		v_1_1 := v_1.Args[1]
 16845  		if v_1_1.Op != OpAMD64ANDLconst {
 16846  			break
 16847  		}
 16848  		if v_1_1.AuxInt != 15 {
 16849  			break
 16850  		}
 16851  		if y != v_1_1.Args[0] {
 16852  			break
 16853  		}
 16854  		if !(v.Type.Size() == 2) {
 16855  			break
 16856  		}
 16857  		v.reset(OpAMD64ROLW)
 16858  		v.AddArg(x)
 16859  		v.AddArg(y)
 16860  		return true
 16861  	}
 16862  	// match: (ORL (ANDL (SBBLcarrymask (CMPLconst (NEGL (ADDLconst (ANDLconst y [15]) [-16])) [16])) (SHRW x (NEGL (ADDLconst (ANDLconst y [15]) [-16])))) (SHLL x (ANDLconst y [15])))
 16863  	// cond: v.Type.Size() == 2
 16864  	// result: (ROLW x y)
 16865  	for {
 16866  		_ = v.Args[1]
 16867  		v_0 := v.Args[0]
 16868  		if v_0.Op != OpAMD64ANDL {
 16869  			break
 16870  		}
 16871  		_ = v_0.Args[1]
 16872  		v_0_0 := v_0.Args[0]
 16873  		if v_0_0.Op != OpAMD64SBBLcarrymask {
 16874  			break
 16875  		}
 16876  		v_0_0_0 := v_0_0.Args[0]
 16877  		if v_0_0_0.Op != OpAMD64CMPLconst {
 16878  			break
 16879  		}
 16880  		if v_0_0_0.AuxInt != 16 {
 16881  			break
 16882  		}
 16883  		v_0_0_0_0 := v_0_0_0.Args[0]
 16884  		if v_0_0_0_0.Op != OpAMD64NEGL {
 16885  			break
 16886  		}
 16887  		v_0_0_0_0_0 := v_0_0_0_0.Args[0]
 16888  		if v_0_0_0_0_0.Op != OpAMD64ADDLconst {
 16889  			break
 16890  		}
 16891  		if v_0_0_0_0_0.AuxInt != -16 {
 16892  			break
 16893  		}
 16894  		v_0_0_0_0_0_0 := v_0_0_0_0_0.Args[0]
 16895  		if v_0_0_0_0_0_0.Op != OpAMD64ANDLconst {
 16896  			break
 16897  		}
 16898  		if v_0_0_0_0_0_0.AuxInt != 15 {
 16899  			break
 16900  		}
 16901  		y := v_0_0_0_0_0_0.Args[0]
 16902  		v_0_1 := v_0.Args[1]
 16903  		if v_0_1.Op != OpAMD64SHRW {
 16904  			break
 16905  		}
 16906  		_ = v_0_1.Args[1]
 16907  		x := v_0_1.Args[0]
 16908  		v_0_1_1 := v_0_1.Args[1]
 16909  		if v_0_1_1.Op != OpAMD64NEGL {
 16910  			break
 16911  		}
 16912  		v_0_1_1_0 := v_0_1_1.Args[0]
 16913  		if v_0_1_1_0.Op != OpAMD64ADDLconst {
 16914  			break
 16915  		}
 16916  		if v_0_1_1_0.AuxInt != -16 {
 16917  			break
 16918  		}
 16919  		v_0_1_1_0_0 := v_0_1_1_0.Args[0]
 16920  		if v_0_1_1_0_0.Op != OpAMD64ANDLconst {
 16921  			break
 16922  		}
 16923  		if v_0_1_1_0_0.AuxInt != 15 {
 16924  			break
 16925  		}
 16926  		if y != v_0_1_1_0_0.Args[0] {
 16927  			break
 16928  		}
 16929  		v_1 := v.Args[1]
 16930  		if v_1.Op != OpAMD64SHLL {
 16931  			break
 16932  		}
 16933  		_ = v_1.Args[1]
 16934  		if x != v_1.Args[0] {
 16935  			break
 16936  		}
 16937  		v_1_1 := v_1.Args[1]
 16938  		if v_1_1.Op != OpAMD64ANDLconst {
 16939  			break
 16940  		}
 16941  		if v_1_1.AuxInt != 15 {
 16942  			break
 16943  		}
 16944  		if y != v_1_1.Args[0] {
 16945  			break
 16946  		}
 16947  		if !(v.Type.Size() == 2) {
 16948  			break
 16949  		}
 16950  		v.reset(OpAMD64ROLW)
 16951  		v.AddArg(x)
 16952  		v.AddArg(y)
 16953  		return true
 16954  	}
 16955  	// match: (ORL (SHRW x (ANDQconst y [15])) (SHLL x (NEGQ (ADDQconst (ANDQconst y [15]) [-16]))))
 16956  	// cond: v.Type.Size() == 2
 16957  	// result: (RORW x y)
 16958  	for {
 16959  		_ = v.Args[1]
 16960  		v_0 := v.Args[0]
 16961  		if v_0.Op != OpAMD64SHRW {
 16962  			break
 16963  		}
 16964  		_ = v_0.Args[1]
 16965  		x := v_0.Args[0]
 16966  		v_0_1 := v_0.Args[1]
 16967  		if v_0_1.Op != OpAMD64ANDQconst {
 16968  			break
 16969  		}
 16970  		if v_0_1.AuxInt != 15 {
 16971  			break
 16972  		}
 16973  		y := v_0_1.Args[0]
 16974  		v_1 := v.Args[1]
 16975  		if v_1.Op != OpAMD64SHLL {
 16976  			break
 16977  		}
 16978  		_ = v_1.Args[1]
 16979  		if x != v_1.Args[0] {
 16980  			break
 16981  		}
 16982  		v_1_1 := v_1.Args[1]
 16983  		if v_1_1.Op != OpAMD64NEGQ {
 16984  			break
 16985  		}
 16986  		v_1_1_0 := v_1_1.Args[0]
 16987  		if v_1_1_0.Op != OpAMD64ADDQconst {
 16988  			break
 16989  		}
 16990  		if v_1_1_0.AuxInt != -16 {
 16991  			break
 16992  		}
 16993  		v_1_1_0_0 := v_1_1_0.Args[0]
 16994  		if v_1_1_0_0.Op != OpAMD64ANDQconst {
 16995  			break
 16996  		}
 16997  		if v_1_1_0_0.AuxInt != 15 {
 16998  			break
 16999  		}
 17000  		if y != v_1_1_0_0.Args[0] {
 17001  			break
 17002  		}
 17003  		if !(v.Type.Size() == 2) {
 17004  			break
 17005  		}
 17006  		v.reset(OpAMD64RORW)
 17007  		v.AddArg(x)
 17008  		v.AddArg(y)
 17009  		return true
 17010  	}
 17011  	// match: (ORL (SHLL x (NEGQ (ADDQconst (ANDQconst y [15]) [-16]))) (SHRW x (ANDQconst y [15])))
 17012  	// cond: v.Type.Size() == 2
 17013  	// result: (RORW x y)
 17014  	for {
 17015  		_ = v.Args[1]
 17016  		v_0 := v.Args[0]
 17017  		if v_0.Op != OpAMD64SHLL {
 17018  			break
 17019  		}
 17020  		_ = v_0.Args[1]
 17021  		x := v_0.Args[0]
 17022  		v_0_1 := v_0.Args[1]
 17023  		if v_0_1.Op != OpAMD64NEGQ {
 17024  			break
 17025  		}
 17026  		v_0_1_0 := v_0_1.Args[0]
 17027  		if v_0_1_0.Op != OpAMD64ADDQconst {
 17028  			break
 17029  		}
 17030  		if v_0_1_0.AuxInt != -16 {
 17031  			break
 17032  		}
 17033  		v_0_1_0_0 := v_0_1_0.Args[0]
 17034  		if v_0_1_0_0.Op != OpAMD64ANDQconst {
 17035  			break
 17036  		}
 17037  		if v_0_1_0_0.AuxInt != 15 {
 17038  			break
 17039  		}
 17040  		y := v_0_1_0_0.Args[0]
 17041  		v_1 := v.Args[1]
 17042  		if v_1.Op != OpAMD64SHRW {
 17043  			break
 17044  		}
 17045  		_ = v_1.Args[1]
 17046  		if x != v_1.Args[0] {
 17047  			break
 17048  		}
 17049  		v_1_1 := v_1.Args[1]
 17050  		if v_1_1.Op != OpAMD64ANDQconst {
 17051  			break
 17052  		}
 17053  		if v_1_1.AuxInt != 15 {
 17054  			break
 17055  		}
 17056  		if y != v_1_1.Args[0] {
 17057  			break
 17058  		}
 17059  		if !(v.Type.Size() == 2) {
 17060  			break
 17061  		}
 17062  		v.reset(OpAMD64RORW)
 17063  		v.AddArg(x)
 17064  		v.AddArg(y)
 17065  		return true
 17066  	}
 17067  	// match: (ORL (SHRW x (ANDLconst y [15])) (SHLL x (NEGL (ADDLconst (ANDLconst y [15]) [-16]))))
 17068  	// cond: v.Type.Size() == 2
 17069  	// result: (RORW x y)
 17070  	for {
 17071  		_ = v.Args[1]
 17072  		v_0 := v.Args[0]
 17073  		if v_0.Op != OpAMD64SHRW {
 17074  			break
 17075  		}
 17076  		_ = v_0.Args[1]
 17077  		x := v_0.Args[0]
 17078  		v_0_1 := v_0.Args[1]
 17079  		if v_0_1.Op != OpAMD64ANDLconst {
 17080  			break
 17081  		}
 17082  		if v_0_1.AuxInt != 15 {
 17083  			break
 17084  		}
 17085  		y := v_0_1.Args[0]
 17086  		v_1 := v.Args[1]
 17087  		if v_1.Op != OpAMD64SHLL {
 17088  			break
 17089  		}
 17090  		_ = v_1.Args[1]
 17091  		if x != v_1.Args[0] {
 17092  			break
 17093  		}
 17094  		v_1_1 := v_1.Args[1]
 17095  		if v_1_1.Op != OpAMD64NEGL {
 17096  			break
 17097  		}
 17098  		v_1_1_0 := v_1_1.Args[0]
 17099  		if v_1_1_0.Op != OpAMD64ADDLconst {
 17100  			break
 17101  		}
 17102  		if v_1_1_0.AuxInt != -16 {
 17103  			break
 17104  		}
 17105  		v_1_1_0_0 := v_1_1_0.Args[0]
 17106  		if v_1_1_0_0.Op != OpAMD64ANDLconst {
 17107  			break
 17108  		}
 17109  		if v_1_1_0_0.AuxInt != 15 {
 17110  			break
 17111  		}
 17112  		if y != v_1_1_0_0.Args[0] {
 17113  			break
 17114  		}
 17115  		if !(v.Type.Size() == 2) {
 17116  			break
 17117  		}
 17118  		v.reset(OpAMD64RORW)
 17119  		v.AddArg(x)
 17120  		v.AddArg(y)
 17121  		return true
 17122  	}
 17123  	// match: (ORL (SHLL x (NEGL (ADDLconst (ANDLconst y [15]) [-16]))) (SHRW x (ANDLconst y [15])))
 17124  	// cond: v.Type.Size() == 2
 17125  	// result: (RORW x y)
 17126  	for {
 17127  		_ = v.Args[1]
 17128  		v_0 := v.Args[0]
 17129  		if v_0.Op != OpAMD64SHLL {
 17130  			break
 17131  		}
 17132  		_ = v_0.Args[1]
 17133  		x := v_0.Args[0]
 17134  		v_0_1 := v_0.Args[1]
 17135  		if v_0_1.Op != OpAMD64NEGL {
 17136  			break
 17137  		}
 17138  		v_0_1_0 := v_0_1.Args[0]
 17139  		if v_0_1_0.Op != OpAMD64ADDLconst {
 17140  			break
 17141  		}
 17142  		if v_0_1_0.AuxInt != -16 {
 17143  			break
 17144  		}
 17145  		v_0_1_0_0 := v_0_1_0.Args[0]
 17146  		if v_0_1_0_0.Op != OpAMD64ANDLconst {
 17147  			break
 17148  		}
 17149  		if v_0_1_0_0.AuxInt != 15 {
 17150  			break
 17151  		}
 17152  		y := v_0_1_0_0.Args[0]
 17153  		v_1 := v.Args[1]
 17154  		if v_1.Op != OpAMD64SHRW {
 17155  			break
 17156  		}
 17157  		_ = v_1.Args[1]
 17158  		if x != v_1.Args[0] {
 17159  			break
 17160  		}
 17161  		v_1_1 := v_1.Args[1]
 17162  		if v_1_1.Op != OpAMD64ANDLconst {
 17163  			break
 17164  		}
 17165  		if v_1_1.AuxInt != 15 {
 17166  			break
 17167  		}
 17168  		if y != v_1_1.Args[0] {
 17169  			break
 17170  		}
 17171  		if !(v.Type.Size() == 2) {
 17172  			break
 17173  		}
 17174  		v.reset(OpAMD64RORW)
 17175  		v.AddArg(x)
 17176  		v.AddArg(y)
 17177  		return true
 17178  	}
 17179  	// match: (ORL (SHLL x (ANDQconst y [ 7])) (ANDL (SHRB x (NEGQ (ADDQconst (ANDQconst y [ 7]) [ -8]))) (SBBLcarrymask (CMPQconst (NEGQ (ADDQconst (ANDQconst y [ 7]) [ -8])) [ 8]))))
 17180  	// cond: v.Type.Size() == 1
 17181  	// result: (ROLB x y)
 17182  	for {
 17183  		_ = v.Args[1]
 17184  		v_0 := v.Args[0]
 17185  		if v_0.Op != OpAMD64SHLL {
 17186  			break
 17187  		}
 17188  		_ = v_0.Args[1]
 17189  		x := v_0.Args[0]
 17190  		v_0_1 := v_0.Args[1]
 17191  		if v_0_1.Op != OpAMD64ANDQconst {
 17192  			break
 17193  		}
 17194  		if v_0_1.AuxInt != 7 {
 17195  			break
 17196  		}
 17197  		y := v_0_1.Args[0]
 17198  		v_1 := v.Args[1]
 17199  		if v_1.Op != OpAMD64ANDL {
 17200  			break
 17201  		}
 17202  		_ = v_1.Args[1]
 17203  		v_1_0 := v_1.Args[0]
 17204  		if v_1_0.Op != OpAMD64SHRB {
 17205  			break
 17206  		}
 17207  		_ = v_1_0.Args[1]
 17208  		if x != v_1_0.Args[0] {
 17209  			break
 17210  		}
 17211  		v_1_0_1 := v_1_0.Args[1]
 17212  		if v_1_0_1.Op != OpAMD64NEGQ {
 17213  			break
 17214  		}
 17215  		v_1_0_1_0 := v_1_0_1.Args[0]
 17216  		if v_1_0_1_0.Op != OpAMD64ADDQconst {
 17217  			break
 17218  		}
 17219  		if v_1_0_1_0.AuxInt != -8 {
 17220  			break
 17221  		}
 17222  		v_1_0_1_0_0 := v_1_0_1_0.Args[0]
 17223  		if v_1_0_1_0_0.Op != OpAMD64ANDQconst {
 17224  			break
 17225  		}
 17226  		if v_1_0_1_0_0.AuxInt != 7 {
 17227  			break
 17228  		}
 17229  		if y != v_1_0_1_0_0.Args[0] {
 17230  			break
 17231  		}
 17232  		v_1_1 := v_1.Args[1]
 17233  		if v_1_1.Op != OpAMD64SBBLcarrymask {
 17234  			break
 17235  		}
 17236  		v_1_1_0 := v_1_1.Args[0]
 17237  		if v_1_1_0.Op != OpAMD64CMPQconst {
 17238  			break
 17239  		}
 17240  		if v_1_1_0.AuxInt != 8 {
 17241  			break
 17242  		}
 17243  		v_1_1_0_0 := v_1_1_0.Args[0]
 17244  		if v_1_1_0_0.Op != OpAMD64NEGQ {
 17245  			break
 17246  		}
 17247  		v_1_1_0_0_0 := v_1_1_0_0.Args[0]
 17248  		if v_1_1_0_0_0.Op != OpAMD64ADDQconst {
 17249  			break
 17250  		}
 17251  		if v_1_1_0_0_0.AuxInt != -8 {
 17252  			break
 17253  		}
 17254  		v_1_1_0_0_0_0 := v_1_1_0_0_0.Args[0]
 17255  		if v_1_1_0_0_0_0.Op != OpAMD64ANDQconst {
 17256  			break
 17257  		}
 17258  		if v_1_1_0_0_0_0.AuxInt != 7 {
 17259  			break
 17260  		}
 17261  		if y != v_1_1_0_0_0_0.Args[0] {
 17262  			break
 17263  		}
 17264  		if !(v.Type.Size() == 1) {
 17265  			break
 17266  		}
 17267  		v.reset(OpAMD64ROLB)
 17268  		v.AddArg(x)
 17269  		v.AddArg(y)
 17270  		return true
 17271  	}
 17272  	// match: (ORL (SHLL x (ANDQconst y [ 7])) (ANDL (SBBLcarrymask (CMPQconst (NEGQ (ADDQconst (ANDQconst y [ 7]) [ -8])) [ 8])) (SHRB x (NEGQ (ADDQconst (ANDQconst y [ 7]) [ -8])))))
 17273  	// cond: v.Type.Size() == 1
 17274  	// result: (ROLB x y)
 17275  	for {
 17276  		_ = v.Args[1]
 17277  		v_0 := v.Args[0]
 17278  		if v_0.Op != OpAMD64SHLL {
 17279  			break
 17280  		}
 17281  		_ = v_0.Args[1]
 17282  		x := v_0.Args[0]
 17283  		v_0_1 := v_0.Args[1]
 17284  		if v_0_1.Op != OpAMD64ANDQconst {
 17285  			break
 17286  		}
 17287  		if v_0_1.AuxInt != 7 {
 17288  			break
 17289  		}
 17290  		y := v_0_1.Args[0]
 17291  		v_1 := v.Args[1]
 17292  		if v_1.Op != OpAMD64ANDL {
 17293  			break
 17294  		}
 17295  		_ = v_1.Args[1]
 17296  		v_1_0 := v_1.Args[0]
 17297  		if v_1_0.Op != OpAMD64SBBLcarrymask {
 17298  			break
 17299  		}
 17300  		v_1_0_0 := v_1_0.Args[0]
 17301  		if v_1_0_0.Op != OpAMD64CMPQconst {
 17302  			break
 17303  		}
 17304  		if v_1_0_0.AuxInt != 8 {
 17305  			break
 17306  		}
 17307  		v_1_0_0_0 := v_1_0_0.Args[0]
 17308  		if v_1_0_0_0.Op != OpAMD64NEGQ {
 17309  			break
 17310  		}
 17311  		v_1_0_0_0_0 := v_1_0_0_0.Args[0]
 17312  		if v_1_0_0_0_0.Op != OpAMD64ADDQconst {
 17313  			break
 17314  		}
 17315  		if v_1_0_0_0_0.AuxInt != -8 {
 17316  			break
 17317  		}
 17318  		v_1_0_0_0_0_0 := v_1_0_0_0_0.Args[0]
 17319  		if v_1_0_0_0_0_0.Op != OpAMD64ANDQconst {
 17320  			break
 17321  		}
 17322  		if v_1_0_0_0_0_0.AuxInt != 7 {
 17323  			break
 17324  		}
 17325  		if y != v_1_0_0_0_0_0.Args[0] {
 17326  			break
 17327  		}
 17328  		v_1_1 := v_1.Args[1]
 17329  		if v_1_1.Op != OpAMD64SHRB {
 17330  			break
 17331  		}
 17332  		_ = v_1_1.Args[1]
 17333  		if x != v_1_1.Args[0] {
 17334  			break
 17335  		}
 17336  		v_1_1_1 := v_1_1.Args[1]
 17337  		if v_1_1_1.Op != OpAMD64NEGQ {
 17338  			break
 17339  		}
 17340  		v_1_1_1_0 := v_1_1_1.Args[0]
 17341  		if v_1_1_1_0.Op != OpAMD64ADDQconst {
 17342  			break
 17343  		}
 17344  		if v_1_1_1_0.AuxInt != -8 {
 17345  			break
 17346  		}
 17347  		v_1_1_1_0_0 := v_1_1_1_0.Args[0]
 17348  		if v_1_1_1_0_0.Op != OpAMD64ANDQconst {
 17349  			break
 17350  		}
 17351  		if v_1_1_1_0_0.AuxInt != 7 {
 17352  			break
 17353  		}
 17354  		if y != v_1_1_1_0_0.Args[0] {
 17355  			break
 17356  		}
 17357  		if !(v.Type.Size() == 1) {
 17358  			break
 17359  		}
 17360  		v.reset(OpAMD64ROLB)
 17361  		v.AddArg(x)
 17362  		v.AddArg(y)
 17363  		return true
 17364  	}
 17365  	// match: (ORL (ANDL (SHRB x (NEGQ (ADDQconst (ANDQconst y [ 7]) [ -8]))) (SBBLcarrymask (CMPQconst (NEGQ (ADDQconst (ANDQconst y [ 7]) [ -8])) [ 8]))) (SHLL x (ANDQconst y [ 7])))
 17366  	// cond: v.Type.Size() == 1
 17367  	// result: (ROLB x y)
 17368  	for {
 17369  		_ = v.Args[1]
 17370  		v_0 := v.Args[0]
 17371  		if v_0.Op != OpAMD64ANDL {
 17372  			break
 17373  		}
 17374  		_ = v_0.Args[1]
 17375  		v_0_0 := v_0.Args[0]
 17376  		if v_0_0.Op != OpAMD64SHRB {
 17377  			break
 17378  		}
 17379  		_ = v_0_0.Args[1]
 17380  		x := v_0_0.Args[0]
 17381  		v_0_0_1 := v_0_0.Args[1]
 17382  		if v_0_0_1.Op != OpAMD64NEGQ {
 17383  			break
 17384  		}
 17385  		v_0_0_1_0 := v_0_0_1.Args[0]
 17386  		if v_0_0_1_0.Op != OpAMD64ADDQconst {
 17387  			break
 17388  		}
 17389  		if v_0_0_1_0.AuxInt != -8 {
 17390  			break
 17391  		}
 17392  		v_0_0_1_0_0 := v_0_0_1_0.Args[0]
 17393  		if v_0_0_1_0_0.Op != OpAMD64ANDQconst {
 17394  			break
 17395  		}
 17396  		if v_0_0_1_0_0.AuxInt != 7 {
 17397  			break
 17398  		}
 17399  		y := v_0_0_1_0_0.Args[0]
 17400  		v_0_1 := v_0.Args[1]
 17401  		if v_0_1.Op != OpAMD64SBBLcarrymask {
 17402  			break
 17403  		}
 17404  		v_0_1_0 := v_0_1.Args[0]
 17405  		if v_0_1_0.Op != OpAMD64CMPQconst {
 17406  			break
 17407  		}
 17408  		if v_0_1_0.AuxInt != 8 {
 17409  			break
 17410  		}
 17411  		v_0_1_0_0 := v_0_1_0.Args[0]
 17412  		if v_0_1_0_0.Op != OpAMD64NEGQ {
 17413  			break
 17414  		}
 17415  		v_0_1_0_0_0 := v_0_1_0_0.Args[0]
 17416  		if v_0_1_0_0_0.Op != OpAMD64ADDQconst {
 17417  			break
 17418  		}
 17419  		if v_0_1_0_0_0.AuxInt != -8 {
 17420  			break
 17421  		}
 17422  		v_0_1_0_0_0_0 := v_0_1_0_0_0.Args[0]
 17423  		if v_0_1_0_0_0_0.Op != OpAMD64ANDQconst {
 17424  			break
 17425  		}
 17426  		if v_0_1_0_0_0_0.AuxInt != 7 {
 17427  			break
 17428  		}
 17429  		if y != v_0_1_0_0_0_0.Args[0] {
 17430  			break
 17431  		}
 17432  		v_1 := v.Args[1]
 17433  		if v_1.Op != OpAMD64SHLL {
 17434  			break
 17435  		}
 17436  		_ = v_1.Args[1]
 17437  		if x != v_1.Args[0] {
 17438  			break
 17439  		}
 17440  		v_1_1 := v_1.Args[1]
 17441  		if v_1_1.Op != OpAMD64ANDQconst {
 17442  			break
 17443  		}
 17444  		if v_1_1.AuxInt != 7 {
 17445  			break
 17446  		}
 17447  		if y != v_1_1.Args[0] {
 17448  			break
 17449  		}
 17450  		if !(v.Type.Size() == 1) {
 17451  			break
 17452  		}
 17453  		v.reset(OpAMD64ROLB)
 17454  		v.AddArg(x)
 17455  		v.AddArg(y)
 17456  		return true
 17457  	}
 17458  	// match: (ORL (ANDL (SBBLcarrymask (CMPQconst (NEGQ (ADDQconst (ANDQconst y [ 7]) [ -8])) [ 8])) (SHRB x (NEGQ (ADDQconst (ANDQconst y [ 7]) [ -8])))) (SHLL x (ANDQconst y [ 7])))
 17459  	// cond: v.Type.Size() == 1
 17460  	// result: (ROLB x y)
 17461  	for {
 17462  		_ = v.Args[1]
 17463  		v_0 := v.Args[0]
 17464  		if v_0.Op != OpAMD64ANDL {
 17465  			break
 17466  		}
 17467  		_ = v_0.Args[1]
 17468  		v_0_0 := v_0.Args[0]
 17469  		if v_0_0.Op != OpAMD64SBBLcarrymask {
 17470  			break
 17471  		}
 17472  		v_0_0_0 := v_0_0.Args[0]
 17473  		if v_0_0_0.Op != OpAMD64CMPQconst {
 17474  			break
 17475  		}
 17476  		if v_0_0_0.AuxInt != 8 {
 17477  			break
 17478  		}
 17479  		v_0_0_0_0 := v_0_0_0.Args[0]
 17480  		if v_0_0_0_0.Op != OpAMD64NEGQ {
 17481  			break
 17482  		}
 17483  		v_0_0_0_0_0 := v_0_0_0_0.Args[0]
 17484  		if v_0_0_0_0_0.Op != OpAMD64ADDQconst {
 17485  			break
 17486  		}
 17487  		if v_0_0_0_0_0.AuxInt != -8 {
 17488  			break
 17489  		}
 17490  		v_0_0_0_0_0_0 := v_0_0_0_0_0.Args[0]
 17491  		if v_0_0_0_0_0_0.Op != OpAMD64ANDQconst {
 17492  			break
 17493  		}
 17494  		if v_0_0_0_0_0_0.AuxInt != 7 {
 17495  			break
 17496  		}
 17497  		y := v_0_0_0_0_0_0.Args[0]
 17498  		v_0_1 := v_0.Args[1]
 17499  		if v_0_1.Op != OpAMD64SHRB {
 17500  			break
 17501  		}
 17502  		_ = v_0_1.Args[1]
 17503  		x := v_0_1.Args[0]
 17504  		v_0_1_1 := v_0_1.Args[1]
 17505  		if v_0_1_1.Op != OpAMD64NEGQ {
 17506  			break
 17507  		}
 17508  		v_0_1_1_0 := v_0_1_1.Args[0]
 17509  		if v_0_1_1_0.Op != OpAMD64ADDQconst {
 17510  			break
 17511  		}
 17512  		if v_0_1_1_0.AuxInt != -8 {
 17513  			break
 17514  		}
 17515  		v_0_1_1_0_0 := v_0_1_1_0.Args[0]
 17516  		if v_0_1_1_0_0.Op != OpAMD64ANDQconst {
 17517  			break
 17518  		}
 17519  		if v_0_1_1_0_0.AuxInt != 7 {
 17520  			break
 17521  		}
 17522  		if y != v_0_1_1_0_0.Args[0] {
 17523  			break
 17524  		}
 17525  		v_1 := v.Args[1]
 17526  		if v_1.Op != OpAMD64SHLL {
 17527  			break
 17528  		}
 17529  		_ = v_1.Args[1]
 17530  		if x != v_1.Args[0] {
 17531  			break
 17532  		}
 17533  		v_1_1 := v_1.Args[1]
 17534  		if v_1_1.Op != OpAMD64ANDQconst {
 17535  			break
 17536  		}
 17537  		if v_1_1.AuxInt != 7 {
 17538  			break
 17539  		}
 17540  		if y != v_1_1.Args[0] {
 17541  			break
 17542  		}
 17543  		if !(v.Type.Size() == 1) {
 17544  			break
 17545  		}
 17546  		v.reset(OpAMD64ROLB)
 17547  		v.AddArg(x)
 17548  		v.AddArg(y)
 17549  		return true
 17550  	}
 17551  	return false
 17552  }
 17553  func rewriteValueAMD64_OpAMD64ORL_40(v *Value) bool {
 17554  	b := v.Block
 17555  	_ = b
 17556  	typ := &b.Func.Config.Types
 17557  	_ = typ
 17558  	// match: (ORL (SHLL x (ANDLconst y [ 7])) (ANDL (SHRB x (NEGL (ADDLconst (ANDLconst y [ 7]) [ -8]))) (SBBLcarrymask (CMPLconst (NEGL (ADDLconst (ANDLconst y [ 7]) [ -8])) [ 8]))))
 17559  	// cond: v.Type.Size() == 1
 17560  	// result: (ROLB x y)
 17561  	for {
 17562  		_ = v.Args[1]
 17563  		v_0 := v.Args[0]
 17564  		if v_0.Op != OpAMD64SHLL {
 17565  			break
 17566  		}
 17567  		_ = v_0.Args[1]
 17568  		x := v_0.Args[0]
 17569  		v_0_1 := v_0.Args[1]
 17570  		if v_0_1.Op != OpAMD64ANDLconst {
 17571  			break
 17572  		}
 17573  		if v_0_1.AuxInt != 7 {
 17574  			break
 17575  		}
 17576  		y := v_0_1.Args[0]
 17577  		v_1 := v.Args[1]
 17578  		if v_1.Op != OpAMD64ANDL {
 17579  			break
 17580  		}
 17581  		_ = v_1.Args[1]
 17582  		v_1_0 := v_1.Args[0]
 17583  		if v_1_0.Op != OpAMD64SHRB {
 17584  			break
 17585  		}
 17586  		_ = v_1_0.Args[1]
 17587  		if x != v_1_0.Args[0] {
 17588  			break
 17589  		}
 17590  		v_1_0_1 := v_1_0.Args[1]
 17591  		if v_1_0_1.Op != OpAMD64NEGL {
 17592  			break
 17593  		}
 17594  		v_1_0_1_0 := v_1_0_1.Args[0]
 17595  		if v_1_0_1_0.Op != OpAMD64ADDLconst {
 17596  			break
 17597  		}
 17598  		if v_1_0_1_0.AuxInt != -8 {
 17599  			break
 17600  		}
 17601  		v_1_0_1_0_0 := v_1_0_1_0.Args[0]
 17602  		if v_1_0_1_0_0.Op != OpAMD64ANDLconst {
 17603  			break
 17604  		}
 17605  		if v_1_0_1_0_0.AuxInt != 7 {
 17606  			break
 17607  		}
 17608  		if y != v_1_0_1_0_0.Args[0] {
 17609  			break
 17610  		}
 17611  		v_1_1 := v_1.Args[1]
 17612  		if v_1_1.Op != OpAMD64SBBLcarrymask {
 17613  			break
 17614  		}
 17615  		v_1_1_0 := v_1_1.Args[0]
 17616  		if v_1_1_0.Op != OpAMD64CMPLconst {
 17617  			break
 17618  		}
 17619  		if v_1_1_0.AuxInt != 8 {
 17620  			break
 17621  		}
 17622  		v_1_1_0_0 := v_1_1_0.Args[0]
 17623  		if v_1_1_0_0.Op != OpAMD64NEGL {
 17624  			break
 17625  		}
 17626  		v_1_1_0_0_0 := v_1_1_0_0.Args[0]
 17627  		if v_1_1_0_0_0.Op != OpAMD64ADDLconst {
 17628  			break
 17629  		}
 17630  		if v_1_1_0_0_0.AuxInt != -8 {
 17631  			break
 17632  		}
 17633  		v_1_1_0_0_0_0 := v_1_1_0_0_0.Args[0]
 17634  		if v_1_1_0_0_0_0.Op != OpAMD64ANDLconst {
 17635  			break
 17636  		}
 17637  		if v_1_1_0_0_0_0.AuxInt != 7 {
 17638  			break
 17639  		}
 17640  		if y != v_1_1_0_0_0_0.Args[0] {
 17641  			break
 17642  		}
 17643  		if !(v.Type.Size() == 1) {
 17644  			break
 17645  		}
 17646  		v.reset(OpAMD64ROLB)
 17647  		v.AddArg(x)
 17648  		v.AddArg(y)
 17649  		return true
 17650  	}
 17651  	// match: (ORL (SHLL x (ANDLconst y [ 7])) (ANDL (SBBLcarrymask (CMPLconst (NEGL (ADDLconst (ANDLconst y [ 7]) [ -8])) [ 8])) (SHRB x (NEGL (ADDLconst (ANDLconst y [ 7]) [ -8])))))
 17652  	// cond: v.Type.Size() == 1
 17653  	// result: (ROLB x y)
 17654  	for {
 17655  		_ = v.Args[1]
 17656  		v_0 := v.Args[0]
 17657  		if v_0.Op != OpAMD64SHLL {
 17658  			break
 17659  		}
 17660  		_ = v_0.Args[1]
 17661  		x := v_0.Args[0]
 17662  		v_0_1 := v_0.Args[1]
 17663  		if v_0_1.Op != OpAMD64ANDLconst {
 17664  			break
 17665  		}
 17666  		if v_0_1.AuxInt != 7 {
 17667  			break
 17668  		}
 17669  		y := v_0_1.Args[0]
 17670  		v_1 := v.Args[1]
 17671  		if v_1.Op != OpAMD64ANDL {
 17672  			break
 17673  		}
 17674  		_ = v_1.Args[1]
 17675  		v_1_0 := v_1.Args[0]
 17676  		if v_1_0.Op != OpAMD64SBBLcarrymask {
 17677  			break
 17678  		}
 17679  		v_1_0_0 := v_1_0.Args[0]
 17680  		if v_1_0_0.Op != OpAMD64CMPLconst {
 17681  			break
 17682  		}
 17683  		if v_1_0_0.AuxInt != 8 {
 17684  			break
 17685  		}
 17686  		v_1_0_0_0 := v_1_0_0.Args[0]
 17687  		if v_1_0_0_0.Op != OpAMD64NEGL {
 17688  			break
 17689  		}
 17690  		v_1_0_0_0_0 := v_1_0_0_0.Args[0]
 17691  		if v_1_0_0_0_0.Op != OpAMD64ADDLconst {
 17692  			break
 17693  		}
 17694  		if v_1_0_0_0_0.AuxInt != -8 {
 17695  			break
 17696  		}
 17697  		v_1_0_0_0_0_0 := v_1_0_0_0_0.Args[0]
 17698  		if v_1_0_0_0_0_0.Op != OpAMD64ANDLconst {
 17699  			break
 17700  		}
 17701  		if v_1_0_0_0_0_0.AuxInt != 7 {
 17702  			break
 17703  		}
 17704  		if y != v_1_0_0_0_0_0.Args[0] {
 17705  			break
 17706  		}
 17707  		v_1_1 := v_1.Args[1]
 17708  		if v_1_1.Op != OpAMD64SHRB {
 17709  			break
 17710  		}
 17711  		_ = v_1_1.Args[1]
 17712  		if x != v_1_1.Args[0] {
 17713  			break
 17714  		}
 17715  		v_1_1_1 := v_1_1.Args[1]
 17716  		if v_1_1_1.Op != OpAMD64NEGL {
 17717  			break
 17718  		}
 17719  		v_1_1_1_0 := v_1_1_1.Args[0]
 17720  		if v_1_1_1_0.Op != OpAMD64ADDLconst {
 17721  			break
 17722  		}
 17723  		if v_1_1_1_0.AuxInt != -8 {
 17724  			break
 17725  		}
 17726  		v_1_1_1_0_0 := v_1_1_1_0.Args[0]
 17727  		if v_1_1_1_0_0.Op != OpAMD64ANDLconst {
 17728  			break
 17729  		}
 17730  		if v_1_1_1_0_0.AuxInt != 7 {
 17731  			break
 17732  		}
 17733  		if y != v_1_1_1_0_0.Args[0] {
 17734  			break
 17735  		}
 17736  		if !(v.Type.Size() == 1) {
 17737  			break
 17738  		}
 17739  		v.reset(OpAMD64ROLB)
 17740  		v.AddArg(x)
 17741  		v.AddArg(y)
 17742  		return true
 17743  	}
 17744  	// match: (ORL (ANDL (SHRB x (NEGL (ADDLconst (ANDLconst y [ 7]) [ -8]))) (SBBLcarrymask (CMPLconst (NEGL (ADDLconst (ANDLconst y [ 7]) [ -8])) [ 8]))) (SHLL x (ANDLconst y [ 7])))
 17745  	// cond: v.Type.Size() == 1
 17746  	// result: (ROLB x y)
 17747  	for {
 17748  		_ = v.Args[1]
 17749  		v_0 := v.Args[0]
 17750  		if v_0.Op != OpAMD64ANDL {
 17751  			break
 17752  		}
 17753  		_ = v_0.Args[1]
 17754  		v_0_0 := v_0.Args[0]
 17755  		if v_0_0.Op != OpAMD64SHRB {
 17756  			break
 17757  		}
 17758  		_ = v_0_0.Args[1]
 17759  		x := v_0_0.Args[0]
 17760  		v_0_0_1 := v_0_0.Args[1]
 17761  		if v_0_0_1.Op != OpAMD64NEGL {
 17762  			break
 17763  		}
 17764  		v_0_0_1_0 := v_0_0_1.Args[0]
 17765  		if v_0_0_1_0.Op != OpAMD64ADDLconst {
 17766  			break
 17767  		}
 17768  		if v_0_0_1_0.AuxInt != -8 {
 17769  			break
 17770  		}
 17771  		v_0_0_1_0_0 := v_0_0_1_0.Args[0]
 17772  		if v_0_0_1_0_0.Op != OpAMD64ANDLconst {
 17773  			break
 17774  		}
 17775  		if v_0_0_1_0_0.AuxInt != 7 {
 17776  			break
 17777  		}
 17778  		y := v_0_0_1_0_0.Args[0]
 17779  		v_0_1 := v_0.Args[1]
 17780  		if v_0_1.Op != OpAMD64SBBLcarrymask {
 17781  			break
 17782  		}
 17783  		v_0_1_0 := v_0_1.Args[0]
 17784  		if v_0_1_0.Op != OpAMD64CMPLconst {
 17785  			break
 17786  		}
 17787  		if v_0_1_0.AuxInt != 8 {
 17788  			break
 17789  		}
 17790  		v_0_1_0_0 := v_0_1_0.Args[0]
 17791  		if v_0_1_0_0.Op != OpAMD64NEGL {
 17792  			break
 17793  		}
 17794  		v_0_1_0_0_0 := v_0_1_0_0.Args[0]
 17795  		if v_0_1_0_0_0.Op != OpAMD64ADDLconst {
 17796  			break
 17797  		}
 17798  		if v_0_1_0_0_0.AuxInt != -8 {
 17799  			break
 17800  		}
 17801  		v_0_1_0_0_0_0 := v_0_1_0_0_0.Args[0]
 17802  		if v_0_1_0_0_0_0.Op != OpAMD64ANDLconst {
 17803  			break
 17804  		}
 17805  		if v_0_1_0_0_0_0.AuxInt != 7 {
 17806  			break
 17807  		}
 17808  		if y != v_0_1_0_0_0_0.Args[0] {
 17809  			break
 17810  		}
 17811  		v_1 := v.Args[1]
 17812  		if v_1.Op != OpAMD64SHLL {
 17813  			break
 17814  		}
 17815  		_ = v_1.Args[1]
 17816  		if x != v_1.Args[0] {
 17817  			break
 17818  		}
 17819  		v_1_1 := v_1.Args[1]
 17820  		if v_1_1.Op != OpAMD64ANDLconst {
 17821  			break
 17822  		}
 17823  		if v_1_1.AuxInt != 7 {
 17824  			break
 17825  		}
 17826  		if y != v_1_1.Args[0] {
 17827  			break
 17828  		}
 17829  		if !(v.Type.Size() == 1) {
 17830  			break
 17831  		}
 17832  		v.reset(OpAMD64ROLB)
 17833  		v.AddArg(x)
 17834  		v.AddArg(y)
 17835  		return true
 17836  	}
 17837  	// match: (ORL (ANDL (SBBLcarrymask (CMPLconst (NEGL (ADDLconst (ANDLconst y [ 7]) [ -8])) [ 8])) (SHRB x (NEGL (ADDLconst (ANDLconst y [ 7]) [ -8])))) (SHLL x (ANDLconst y [ 7])))
 17838  	// cond: v.Type.Size() == 1
 17839  	// result: (ROLB x y)
 17840  	for {
 17841  		_ = v.Args[1]
 17842  		v_0 := v.Args[0]
 17843  		if v_0.Op != OpAMD64ANDL {
 17844  			break
 17845  		}
 17846  		_ = v_0.Args[1]
 17847  		v_0_0 := v_0.Args[0]
 17848  		if v_0_0.Op != OpAMD64SBBLcarrymask {
 17849  			break
 17850  		}
 17851  		v_0_0_0 := v_0_0.Args[0]
 17852  		if v_0_0_0.Op != OpAMD64CMPLconst {
 17853  			break
 17854  		}
 17855  		if v_0_0_0.AuxInt != 8 {
 17856  			break
 17857  		}
 17858  		v_0_0_0_0 := v_0_0_0.Args[0]
 17859  		if v_0_0_0_0.Op != OpAMD64NEGL {
 17860  			break
 17861  		}
 17862  		v_0_0_0_0_0 := v_0_0_0_0.Args[0]
 17863  		if v_0_0_0_0_0.Op != OpAMD64ADDLconst {
 17864  			break
 17865  		}
 17866  		if v_0_0_0_0_0.AuxInt != -8 {
 17867  			break
 17868  		}
 17869  		v_0_0_0_0_0_0 := v_0_0_0_0_0.Args[0]
 17870  		if v_0_0_0_0_0_0.Op != OpAMD64ANDLconst {
 17871  			break
 17872  		}
 17873  		if v_0_0_0_0_0_0.AuxInt != 7 {
 17874  			break
 17875  		}
 17876  		y := v_0_0_0_0_0_0.Args[0]
 17877  		v_0_1 := v_0.Args[1]
 17878  		if v_0_1.Op != OpAMD64SHRB {
 17879  			break
 17880  		}
 17881  		_ = v_0_1.Args[1]
 17882  		x := v_0_1.Args[0]
 17883  		v_0_1_1 := v_0_1.Args[1]
 17884  		if v_0_1_1.Op != OpAMD64NEGL {
 17885  			break
 17886  		}
 17887  		v_0_1_1_0 := v_0_1_1.Args[0]
 17888  		if v_0_1_1_0.Op != OpAMD64ADDLconst {
 17889  			break
 17890  		}
 17891  		if v_0_1_1_0.AuxInt != -8 {
 17892  			break
 17893  		}
 17894  		v_0_1_1_0_0 := v_0_1_1_0.Args[0]
 17895  		if v_0_1_1_0_0.Op != OpAMD64ANDLconst {
 17896  			break
 17897  		}
 17898  		if v_0_1_1_0_0.AuxInt != 7 {
 17899  			break
 17900  		}
 17901  		if y != v_0_1_1_0_0.Args[0] {
 17902  			break
 17903  		}
 17904  		v_1 := v.Args[1]
 17905  		if v_1.Op != OpAMD64SHLL {
 17906  			break
 17907  		}
 17908  		_ = v_1.Args[1]
 17909  		if x != v_1.Args[0] {
 17910  			break
 17911  		}
 17912  		v_1_1 := v_1.Args[1]
 17913  		if v_1_1.Op != OpAMD64ANDLconst {
 17914  			break
 17915  		}
 17916  		if v_1_1.AuxInt != 7 {
 17917  			break
 17918  		}
 17919  		if y != v_1_1.Args[0] {
 17920  			break
 17921  		}
 17922  		if !(v.Type.Size() == 1) {
 17923  			break
 17924  		}
 17925  		v.reset(OpAMD64ROLB)
 17926  		v.AddArg(x)
 17927  		v.AddArg(y)
 17928  		return true
 17929  	}
 17930  	// match: (ORL (SHRB x (ANDQconst y [ 7])) (SHLL x (NEGQ (ADDQconst (ANDQconst y [ 7]) [ -8]))))
 17931  	// cond: v.Type.Size() == 1
 17932  	// result: (RORB x y)
 17933  	for {
 17934  		_ = v.Args[1]
 17935  		v_0 := v.Args[0]
 17936  		if v_0.Op != OpAMD64SHRB {
 17937  			break
 17938  		}
 17939  		_ = v_0.Args[1]
 17940  		x := v_0.Args[0]
 17941  		v_0_1 := v_0.Args[1]
 17942  		if v_0_1.Op != OpAMD64ANDQconst {
 17943  			break
 17944  		}
 17945  		if v_0_1.AuxInt != 7 {
 17946  			break
 17947  		}
 17948  		y := v_0_1.Args[0]
 17949  		v_1 := v.Args[1]
 17950  		if v_1.Op != OpAMD64SHLL {
 17951  			break
 17952  		}
 17953  		_ = v_1.Args[1]
 17954  		if x != v_1.Args[0] {
 17955  			break
 17956  		}
 17957  		v_1_1 := v_1.Args[1]
 17958  		if v_1_1.Op != OpAMD64NEGQ {
 17959  			break
 17960  		}
 17961  		v_1_1_0 := v_1_1.Args[0]
 17962  		if v_1_1_0.Op != OpAMD64ADDQconst {
 17963  			break
 17964  		}
 17965  		if v_1_1_0.AuxInt != -8 {
 17966  			break
 17967  		}
 17968  		v_1_1_0_0 := v_1_1_0.Args[0]
 17969  		if v_1_1_0_0.Op != OpAMD64ANDQconst {
 17970  			break
 17971  		}
 17972  		if v_1_1_0_0.AuxInt != 7 {
 17973  			break
 17974  		}
 17975  		if y != v_1_1_0_0.Args[0] {
 17976  			break
 17977  		}
 17978  		if !(v.Type.Size() == 1) {
 17979  			break
 17980  		}
 17981  		v.reset(OpAMD64RORB)
 17982  		v.AddArg(x)
 17983  		v.AddArg(y)
 17984  		return true
 17985  	}
 17986  	// match: (ORL (SHLL x (NEGQ (ADDQconst (ANDQconst y [ 7]) [ -8]))) (SHRB x (ANDQconst y [ 7])))
 17987  	// cond: v.Type.Size() == 1
 17988  	// result: (RORB x y)
 17989  	for {
 17990  		_ = v.Args[1]
 17991  		v_0 := v.Args[0]
 17992  		if v_0.Op != OpAMD64SHLL {
 17993  			break
 17994  		}
 17995  		_ = v_0.Args[1]
 17996  		x := v_0.Args[0]
 17997  		v_0_1 := v_0.Args[1]
 17998  		if v_0_1.Op != OpAMD64NEGQ {
 17999  			break
 18000  		}
 18001  		v_0_1_0 := v_0_1.Args[0]
 18002  		if v_0_1_0.Op != OpAMD64ADDQconst {
 18003  			break
 18004  		}
 18005  		if v_0_1_0.AuxInt != -8 {
 18006  			break
 18007  		}
 18008  		v_0_1_0_0 := v_0_1_0.Args[0]
 18009  		if v_0_1_0_0.Op != OpAMD64ANDQconst {
 18010  			break
 18011  		}
 18012  		if v_0_1_0_0.AuxInt != 7 {
 18013  			break
 18014  		}
 18015  		y := v_0_1_0_0.Args[0]
 18016  		v_1 := v.Args[1]
 18017  		if v_1.Op != OpAMD64SHRB {
 18018  			break
 18019  		}
 18020  		_ = v_1.Args[1]
 18021  		if x != v_1.Args[0] {
 18022  			break
 18023  		}
 18024  		v_1_1 := v_1.Args[1]
 18025  		if v_1_1.Op != OpAMD64ANDQconst {
 18026  			break
 18027  		}
 18028  		if v_1_1.AuxInt != 7 {
 18029  			break
 18030  		}
 18031  		if y != v_1_1.Args[0] {
 18032  			break
 18033  		}
 18034  		if !(v.Type.Size() == 1) {
 18035  			break
 18036  		}
 18037  		v.reset(OpAMD64RORB)
 18038  		v.AddArg(x)
 18039  		v.AddArg(y)
 18040  		return true
 18041  	}
 18042  	// match: (ORL (SHRB x (ANDLconst y [ 7])) (SHLL x (NEGL (ADDLconst (ANDLconst y [ 7]) [ -8]))))
 18043  	// cond: v.Type.Size() == 1
 18044  	// result: (RORB x y)
 18045  	for {
 18046  		_ = v.Args[1]
 18047  		v_0 := v.Args[0]
 18048  		if v_0.Op != OpAMD64SHRB {
 18049  			break
 18050  		}
 18051  		_ = v_0.Args[1]
 18052  		x := v_0.Args[0]
 18053  		v_0_1 := v_0.Args[1]
 18054  		if v_0_1.Op != OpAMD64ANDLconst {
 18055  			break
 18056  		}
 18057  		if v_0_1.AuxInt != 7 {
 18058  			break
 18059  		}
 18060  		y := v_0_1.Args[0]
 18061  		v_1 := v.Args[1]
 18062  		if v_1.Op != OpAMD64SHLL {
 18063  			break
 18064  		}
 18065  		_ = v_1.Args[1]
 18066  		if x != v_1.Args[0] {
 18067  			break
 18068  		}
 18069  		v_1_1 := v_1.Args[1]
 18070  		if v_1_1.Op != OpAMD64NEGL {
 18071  			break
 18072  		}
 18073  		v_1_1_0 := v_1_1.Args[0]
 18074  		if v_1_1_0.Op != OpAMD64ADDLconst {
 18075  			break
 18076  		}
 18077  		if v_1_1_0.AuxInt != -8 {
 18078  			break
 18079  		}
 18080  		v_1_1_0_0 := v_1_1_0.Args[0]
 18081  		if v_1_1_0_0.Op != OpAMD64ANDLconst {
 18082  			break
 18083  		}
 18084  		if v_1_1_0_0.AuxInt != 7 {
 18085  			break
 18086  		}
 18087  		if y != v_1_1_0_0.Args[0] {
 18088  			break
 18089  		}
 18090  		if !(v.Type.Size() == 1) {
 18091  			break
 18092  		}
 18093  		v.reset(OpAMD64RORB)
 18094  		v.AddArg(x)
 18095  		v.AddArg(y)
 18096  		return true
 18097  	}
 18098  	// match: (ORL (SHLL x (NEGL (ADDLconst (ANDLconst y [ 7]) [ -8]))) (SHRB x (ANDLconst y [ 7])))
 18099  	// cond: v.Type.Size() == 1
 18100  	// result: (RORB x y)
 18101  	for {
 18102  		_ = v.Args[1]
 18103  		v_0 := v.Args[0]
 18104  		if v_0.Op != OpAMD64SHLL {
 18105  			break
 18106  		}
 18107  		_ = v_0.Args[1]
 18108  		x := v_0.Args[0]
 18109  		v_0_1 := v_0.Args[1]
 18110  		if v_0_1.Op != OpAMD64NEGL {
 18111  			break
 18112  		}
 18113  		v_0_1_0 := v_0_1.Args[0]
 18114  		if v_0_1_0.Op != OpAMD64ADDLconst {
 18115  			break
 18116  		}
 18117  		if v_0_1_0.AuxInt != -8 {
 18118  			break
 18119  		}
 18120  		v_0_1_0_0 := v_0_1_0.Args[0]
 18121  		if v_0_1_0_0.Op != OpAMD64ANDLconst {
 18122  			break
 18123  		}
 18124  		if v_0_1_0_0.AuxInt != 7 {
 18125  			break
 18126  		}
 18127  		y := v_0_1_0_0.Args[0]
 18128  		v_1 := v.Args[1]
 18129  		if v_1.Op != OpAMD64SHRB {
 18130  			break
 18131  		}
 18132  		_ = v_1.Args[1]
 18133  		if x != v_1.Args[0] {
 18134  			break
 18135  		}
 18136  		v_1_1 := v_1.Args[1]
 18137  		if v_1_1.Op != OpAMD64ANDLconst {
 18138  			break
 18139  		}
 18140  		if v_1_1.AuxInt != 7 {
 18141  			break
 18142  		}
 18143  		if y != v_1_1.Args[0] {
 18144  			break
 18145  		}
 18146  		if !(v.Type.Size() == 1) {
 18147  			break
 18148  		}
 18149  		v.reset(OpAMD64RORB)
 18150  		v.AddArg(x)
 18151  		v.AddArg(y)
 18152  		return true
 18153  	}
 18154  	// match: (ORL x x)
 18155  	// cond:
 18156  	// result: x
 18157  	for {
 18158  		_ = v.Args[1]
 18159  		x := v.Args[0]
 18160  		if x != v.Args[1] {
 18161  			break
 18162  		}
 18163  		v.reset(OpCopy)
 18164  		v.Type = x.Type
 18165  		v.AddArg(x)
 18166  		return true
 18167  	}
 18168  	// match: (ORL x0:(MOVBload [i0] {s} p mem) sh:(SHLLconst [8] x1:(MOVBload [i1] {s} p mem)))
 18169  	// cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)
 18170  	// result: @mergePoint(b,x0,x1) (MOVWload [i0] {s} p mem)
 18171  	for {
 18172  		_ = v.Args[1]
 18173  		x0 := v.Args[0]
 18174  		if x0.Op != OpAMD64MOVBload {
 18175  			break
 18176  		}
 18177  		i0 := x0.AuxInt
 18178  		s := x0.Aux
 18179  		_ = x0.Args[1]
 18180  		p := x0.Args[0]
 18181  		mem := x0.Args[1]
 18182  		sh := v.Args[1]
 18183  		if sh.Op != OpAMD64SHLLconst {
 18184  			break
 18185  		}
 18186  		if sh.AuxInt != 8 {
 18187  			break
 18188  		}
 18189  		x1 := sh.Args[0]
 18190  		if x1.Op != OpAMD64MOVBload {
 18191  			break
 18192  		}
 18193  		i1 := x1.AuxInt
 18194  		if x1.Aux != s {
 18195  			break
 18196  		}
 18197  		_ = x1.Args[1]
 18198  		if p != x1.Args[0] {
 18199  			break
 18200  		}
 18201  		if mem != x1.Args[1] {
 18202  			break
 18203  		}
 18204  		if !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) {
 18205  			break
 18206  		}
 18207  		b = mergePoint(b, x0, x1)
 18208  		v0 := b.NewValue0(v.Pos, OpAMD64MOVWload, typ.UInt16)
 18209  		v.reset(OpCopy)
 18210  		v.AddArg(v0)
 18211  		v0.AuxInt = i0
 18212  		v0.Aux = s
 18213  		v0.AddArg(p)
 18214  		v0.AddArg(mem)
 18215  		return true
 18216  	}
 18217  	return false
 18218  }
 18219  func rewriteValueAMD64_OpAMD64ORL_50(v *Value) bool {
 18220  	b := v.Block
 18221  	_ = b
 18222  	typ := &b.Func.Config.Types
 18223  	_ = typ
 18224  	// match: (ORL sh:(SHLLconst [8] x1:(MOVBload [i1] {s} p mem)) x0:(MOVBload [i0] {s} p mem))
 18225  	// cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)
 18226  	// result: @mergePoint(b,x0,x1) (MOVWload [i0] {s} p mem)
 18227  	for {
 18228  		_ = v.Args[1]
 18229  		sh := v.Args[0]
 18230  		if sh.Op != OpAMD64SHLLconst {
 18231  			break
 18232  		}
 18233  		if sh.AuxInt != 8 {
 18234  			break
 18235  		}
 18236  		x1 := sh.Args[0]
 18237  		if x1.Op != OpAMD64MOVBload {
 18238  			break
 18239  		}
 18240  		i1 := x1.AuxInt
 18241  		s := x1.Aux
 18242  		_ = x1.Args[1]
 18243  		p := x1.Args[0]
 18244  		mem := x1.Args[1]
 18245  		x0 := v.Args[1]
 18246  		if x0.Op != OpAMD64MOVBload {
 18247  			break
 18248  		}
 18249  		i0 := x0.AuxInt
 18250  		if x0.Aux != s {
 18251  			break
 18252  		}
 18253  		_ = x0.Args[1]
 18254  		if p != x0.Args[0] {
 18255  			break
 18256  		}
 18257  		if mem != x0.Args[1] {
 18258  			break
 18259  		}
 18260  		if !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) {
 18261  			break
 18262  		}
 18263  		b = mergePoint(b, x0, x1)
 18264  		v0 := b.NewValue0(v.Pos, OpAMD64MOVWload, typ.UInt16)
 18265  		v.reset(OpCopy)
 18266  		v.AddArg(v0)
 18267  		v0.AuxInt = i0
 18268  		v0.Aux = s
 18269  		v0.AddArg(p)
 18270  		v0.AddArg(mem)
 18271  		return true
 18272  	}
 18273  	// match: (ORL x0:(MOVWload [i0] {s} p mem) sh:(SHLLconst [16] x1:(MOVWload [i1] {s} p mem)))
 18274  	// cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)
 18275  	// result: @mergePoint(b,x0,x1) (MOVLload [i0] {s} p mem)
 18276  	for {
 18277  		_ = v.Args[1]
 18278  		x0 := v.Args[0]
 18279  		if x0.Op != OpAMD64MOVWload {
 18280  			break
 18281  		}
 18282  		i0 := x0.AuxInt
 18283  		s := x0.Aux
 18284  		_ = x0.Args[1]
 18285  		p := x0.Args[0]
 18286  		mem := x0.Args[1]
 18287  		sh := v.Args[1]
 18288  		if sh.Op != OpAMD64SHLLconst {
 18289  			break
 18290  		}
 18291  		if sh.AuxInt != 16 {
 18292  			break
 18293  		}
 18294  		x1 := sh.Args[0]
 18295  		if x1.Op != OpAMD64MOVWload {
 18296  			break
 18297  		}
 18298  		i1 := x1.AuxInt
 18299  		if x1.Aux != s {
 18300  			break
 18301  		}
 18302  		_ = x1.Args[1]
 18303  		if p != x1.Args[0] {
 18304  			break
 18305  		}
 18306  		if mem != x1.Args[1] {
 18307  			break
 18308  		}
 18309  		if !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) {
 18310  			break
 18311  		}
 18312  		b = mergePoint(b, x0, x1)
 18313  		v0 := b.NewValue0(v.Pos, OpAMD64MOVLload, typ.UInt32)
 18314  		v.reset(OpCopy)
 18315  		v.AddArg(v0)
 18316  		v0.AuxInt = i0
 18317  		v0.Aux = s
 18318  		v0.AddArg(p)
 18319  		v0.AddArg(mem)
 18320  		return true
 18321  	}
 18322  	// match: (ORL sh:(SHLLconst [16] x1:(MOVWload [i1] {s} p mem)) x0:(MOVWload [i0] {s} p mem))
 18323  	// cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)
 18324  	// result: @mergePoint(b,x0,x1) (MOVLload [i0] {s} p mem)
 18325  	for {
 18326  		_ = v.Args[1]
 18327  		sh := v.Args[0]
 18328  		if sh.Op != OpAMD64SHLLconst {
 18329  			break
 18330  		}
 18331  		if sh.AuxInt != 16 {
 18332  			break
 18333  		}
 18334  		x1 := sh.Args[0]
 18335  		if x1.Op != OpAMD64MOVWload {
 18336  			break
 18337  		}
 18338  		i1 := x1.AuxInt
 18339  		s := x1.Aux
 18340  		_ = x1.Args[1]
 18341  		p := x1.Args[0]
 18342  		mem := x1.Args[1]
 18343  		x0 := v.Args[1]
 18344  		if x0.Op != OpAMD64MOVWload {
 18345  			break
 18346  		}
 18347  		i0 := x0.AuxInt
 18348  		if x0.Aux != s {
 18349  			break
 18350  		}
 18351  		_ = x0.Args[1]
 18352  		if p != x0.Args[0] {
 18353  			break
 18354  		}
 18355  		if mem != x0.Args[1] {
 18356  			break
 18357  		}
 18358  		if !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) {
 18359  			break
 18360  		}
 18361  		b = mergePoint(b, x0, x1)
 18362  		v0 := b.NewValue0(v.Pos, OpAMD64MOVLload, typ.UInt32)
 18363  		v.reset(OpCopy)
 18364  		v.AddArg(v0)
 18365  		v0.AuxInt = i0
 18366  		v0.Aux = s
 18367  		v0.AddArg(p)
 18368  		v0.AddArg(mem)
 18369  		return true
 18370  	}
 18371  	// match: (ORL s1:(SHLLconst [j1] x1:(MOVBload [i1] {s} p mem)) or:(ORL s0:(SHLLconst [j0] x0:(MOVBload [i0] {s} p mem)) y))
 18372  	// cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)
 18373  	// result: @mergePoint(b,x0,x1) (ORL <v.Type> (SHLLconst <v.Type> [j0] (MOVWload [i0] {s} p mem)) y)
 18374  	for {
 18375  		_ = v.Args[1]
 18376  		s1 := v.Args[0]
 18377  		if s1.Op != OpAMD64SHLLconst {
 18378  			break
 18379  		}
 18380  		j1 := s1.AuxInt
 18381  		x1 := s1.Args[0]
 18382  		if x1.Op != OpAMD64MOVBload {
 18383  			break
 18384  		}
 18385  		i1 := x1.AuxInt
 18386  		s := x1.Aux
 18387  		_ = x1.Args[1]
 18388  		p := x1.Args[0]
 18389  		mem := x1.Args[1]
 18390  		or := v.Args[1]
 18391  		if or.Op != OpAMD64ORL {
 18392  			break
 18393  		}
 18394  		_ = or.Args[1]
 18395  		s0 := or.Args[0]
 18396  		if s0.Op != OpAMD64SHLLconst {
 18397  			break
 18398  		}
 18399  		j0 := s0.AuxInt
 18400  		x0 := s0.Args[0]
 18401  		if x0.Op != OpAMD64MOVBload {
 18402  			break
 18403  		}
 18404  		i0 := x0.AuxInt
 18405  		if x0.Aux != s {
 18406  			break
 18407  		}
 18408  		_ = x0.Args[1]
 18409  		if p != x0.Args[0] {
 18410  			break
 18411  		}
 18412  		if mem != x0.Args[1] {
 18413  			break
 18414  		}
 18415  		y := or.Args[1]
 18416  		if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) {
 18417  			break
 18418  		}
 18419  		b = mergePoint(b, x0, x1)
 18420  		v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type)
 18421  		v.reset(OpCopy)
 18422  		v.AddArg(v0)
 18423  		v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type)
 18424  		v1.AuxInt = j0
 18425  		v2 := b.NewValue0(v.Pos, OpAMD64MOVWload, typ.UInt16)
 18426  		v2.AuxInt = i0
 18427  		v2.Aux = s
 18428  		v2.AddArg(p)
 18429  		v2.AddArg(mem)
 18430  		v1.AddArg(v2)
 18431  		v0.AddArg(v1)
 18432  		v0.AddArg(y)
 18433  		return true
 18434  	}
 18435  	// match: (ORL s1:(SHLLconst [j1] x1:(MOVBload [i1] {s} p mem)) or:(ORL y s0:(SHLLconst [j0] x0:(MOVBload [i0] {s} p mem))))
 18436  	// cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)
 18437  	// result: @mergePoint(b,x0,x1) (ORL <v.Type> (SHLLconst <v.Type> [j0] (MOVWload [i0] {s} p mem)) y)
 18438  	for {
 18439  		_ = v.Args[1]
 18440  		s1 := v.Args[0]
 18441  		if s1.Op != OpAMD64SHLLconst {
 18442  			break
 18443  		}
 18444  		j1 := s1.AuxInt
 18445  		x1 := s1.Args[0]
 18446  		if x1.Op != OpAMD64MOVBload {
 18447  			break
 18448  		}
 18449  		i1 := x1.AuxInt
 18450  		s := x1.Aux
 18451  		_ = x1.Args[1]
 18452  		p := x1.Args[0]
 18453  		mem := x1.Args[1]
 18454  		or := v.Args[1]
 18455  		if or.Op != OpAMD64ORL {
 18456  			break
 18457  		}
 18458  		_ = or.Args[1]
 18459  		y := or.Args[0]
 18460  		s0 := or.Args[1]
 18461  		if s0.Op != OpAMD64SHLLconst {
 18462  			break
 18463  		}
 18464  		j0 := s0.AuxInt
 18465  		x0 := s0.Args[0]
 18466  		if x0.Op != OpAMD64MOVBload {
 18467  			break
 18468  		}
 18469  		i0 := x0.AuxInt
 18470  		if x0.Aux != s {
 18471  			break
 18472  		}
 18473  		_ = x0.Args[1]
 18474  		if p != x0.Args[0] {
 18475  			break
 18476  		}
 18477  		if mem != x0.Args[1] {
 18478  			break
 18479  		}
 18480  		if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) {
 18481  			break
 18482  		}
 18483  		b = mergePoint(b, x0, x1)
 18484  		v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type)
 18485  		v.reset(OpCopy)
 18486  		v.AddArg(v0)
 18487  		v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type)
 18488  		v1.AuxInt = j0
 18489  		v2 := b.NewValue0(v.Pos, OpAMD64MOVWload, typ.UInt16)
 18490  		v2.AuxInt = i0
 18491  		v2.Aux = s
 18492  		v2.AddArg(p)
 18493  		v2.AddArg(mem)
 18494  		v1.AddArg(v2)
 18495  		v0.AddArg(v1)
 18496  		v0.AddArg(y)
 18497  		return true
 18498  	}
 18499  	// match: (ORL or:(ORL s0:(SHLLconst [j0] x0:(MOVBload [i0] {s} p mem)) y) s1:(SHLLconst [j1] x1:(MOVBload [i1] {s} p mem)))
 18500  	// cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)
 18501  	// result: @mergePoint(b,x0,x1) (ORL <v.Type> (SHLLconst <v.Type> [j0] (MOVWload [i0] {s} p mem)) y)
 18502  	for {
 18503  		_ = v.Args[1]
 18504  		or := v.Args[0]
 18505  		if or.Op != OpAMD64ORL {
 18506  			break
 18507  		}
 18508  		_ = or.Args[1]
 18509  		s0 := or.Args[0]
 18510  		if s0.Op != OpAMD64SHLLconst {
 18511  			break
 18512  		}
 18513  		j0 := s0.AuxInt
 18514  		x0 := s0.Args[0]
 18515  		if x0.Op != OpAMD64MOVBload {
 18516  			break
 18517  		}
 18518  		i0 := x0.AuxInt
 18519  		s := x0.Aux
 18520  		_ = x0.Args[1]
 18521  		p := x0.Args[0]
 18522  		mem := x0.Args[1]
 18523  		y := or.Args[1]
 18524  		s1 := v.Args[1]
 18525  		if s1.Op != OpAMD64SHLLconst {
 18526  			break
 18527  		}
 18528  		j1 := s1.AuxInt
 18529  		x1 := s1.Args[0]
 18530  		if x1.Op != OpAMD64MOVBload {
 18531  			break
 18532  		}
 18533  		i1 := x1.AuxInt
 18534  		if x1.Aux != s {
 18535  			break
 18536  		}
 18537  		_ = x1.Args[1]
 18538  		if p != x1.Args[0] {
 18539  			break
 18540  		}
 18541  		if mem != x1.Args[1] {
 18542  			break
 18543  		}
 18544  		if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) {
 18545  			break
 18546  		}
 18547  		b = mergePoint(b, x0, x1)
 18548  		v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type)
 18549  		v.reset(OpCopy)
 18550  		v.AddArg(v0)
 18551  		v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type)
 18552  		v1.AuxInt = j0
 18553  		v2 := b.NewValue0(v.Pos, OpAMD64MOVWload, typ.UInt16)
 18554  		v2.AuxInt = i0
 18555  		v2.Aux = s
 18556  		v2.AddArg(p)
 18557  		v2.AddArg(mem)
 18558  		v1.AddArg(v2)
 18559  		v0.AddArg(v1)
 18560  		v0.AddArg(y)
 18561  		return true
 18562  	}
 18563  	// match: (ORL or:(ORL y s0:(SHLLconst [j0] x0:(MOVBload [i0] {s} p mem))) s1:(SHLLconst [j1] x1:(MOVBload [i1] {s} p mem)))
 18564  	// cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)
 18565  	// result: @mergePoint(b,x0,x1) (ORL <v.Type> (SHLLconst <v.Type> [j0] (MOVWload [i0] {s} p mem)) y)
 18566  	for {
 18567  		_ = v.Args[1]
 18568  		or := v.Args[0]
 18569  		if or.Op != OpAMD64ORL {
 18570  			break
 18571  		}
 18572  		_ = or.Args[1]
 18573  		y := or.Args[0]
 18574  		s0 := or.Args[1]
 18575  		if s0.Op != OpAMD64SHLLconst {
 18576  			break
 18577  		}
 18578  		j0 := s0.AuxInt
 18579  		x0 := s0.Args[0]
 18580  		if x0.Op != OpAMD64MOVBload {
 18581  			break
 18582  		}
 18583  		i0 := x0.AuxInt
 18584  		s := x0.Aux
 18585  		_ = x0.Args[1]
 18586  		p := x0.Args[0]
 18587  		mem := x0.Args[1]
 18588  		s1 := v.Args[1]
 18589  		if s1.Op != OpAMD64SHLLconst {
 18590  			break
 18591  		}
 18592  		j1 := s1.AuxInt
 18593  		x1 := s1.Args[0]
 18594  		if x1.Op != OpAMD64MOVBload {
 18595  			break
 18596  		}
 18597  		i1 := x1.AuxInt
 18598  		if x1.Aux != s {
 18599  			break
 18600  		}
 18601  		_ = x1.Args[1]
 18602  		if p != x1.Args[0] {
 18603  			break
 18604  		}
 18605  		if mem != x1.Args[1] {
 18606  			break
 18607  		}
 18608  		if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) {
 18609  			break
 18610  		}
 18611  		b = mergePoint(b, x0, x1)
 18612  		v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type)
 18613  		v.reset(OpCopy)
 18614  		v.AddArg(v0)
 18615  		v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type)
 18616  		v1.AuxInt = j0
 18617  		v2 := b.NewValue0(v.Pos, OpAMD64MOVWload, typ.UInt16)
 18618  		v2.AuxInt = i0
 18619  		v2.Aux = s
 18620  		v2.AddArg(p)
 18621  		v2.AddArg(mem)
 18622  		v1.AddArg(v2)
 18623  		v0.AddArg(v1)
 18624  		v0.AddArg(y)
 18625  		return true
 18626  	}
 18627  	// match: (ORL x0:(MOVBloadidx1 [i0] {s} p idx mem) sh:(SHLLconst [8] x1:(MOVBloadidx1 [i1] {s} p idx mem)))
 18628  	// cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)
 18629  	// result: @mergePoint(b,x0,x1) (MOVWloadidx1 <v.Type> [i0] {s} p idx mem)
 18630  	for {
 18631  		_ = v.Args[1]
 18632  		x0 := v.Args[0]
 18633  		if x0.Op != OpAMD64MOVBloadidx1 {
 18634  			break
 18635  		}
 18636  		i0 := x0.AuxInt
 18637  		s := x0.Aux
 18638  		_ = x0.Args[2]
 18639  		p := x0.Args[0]
 18640  		idx := x0.Args[1]
 18641  		mem := x0.Args[2]
 18642  		sh := v.Args[1]
 18643  		if sh.Op != OpAMD64SHLLconst {
 18644  			break
 18645  		}
 18646  		if sh.AuxInt != 8 {
 18647  			break
 18648  		}
 18649  		x1 := sh.Args[0]
 18650  		if x1.Op != OpAMD64MOVBloadidx1 {
 18651  			break
 18652  		}
 18653  		i1 := x1.AuxInt
 18654  		if x1.Aux != s {
 18655  			break
 18656  		}
 18657  		_ = x1.Args[2]
 18658  		if p != x1.Args[0] {
 18659  			break
 18660  		}
 18661  		if idx != x1.Args[1] {
 18662  			break
 18663  		}
 18664  		if mem != x1.Args[2] {
 18665  			break
 18666  		}
 18667  		if !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) {
 18668  			break
 18669  		}
 18670  		b = mergePoint(b, x0, x1)
 18671  		v0 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, v.Type)
 18672  		v.reset(OpCopy)
 18673  		v.AddArg(v0)
 18674  		v0.AuxInt = i0
 18675  		v0.Aux = s
 18676  		v0.AddArg(p)
 18677  		v0.AddArg(idx)
 18678  		v0.AddArg(mem)
 18679  		return true
 18680  	}
 18681  	// match: (ORL x0:(MOVBloadidx1 [i0] {s} idx p mem) sh:(SHLLconst [8] x1:(MOVBloadidx1 [i1] {s} p idx mem)))
 18682  	// cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)
 18683  	// result: @mergePoint(b,x0,x1) (MOVWloadidx1 <v.Type> [i0] {s} p idx mem)
 18684  	for {
 18685  		_ = v.Args[1]
 18686  		x0 := v.Args[0]
 18687  		if x0.Op != OpAMD64MOVBloadidx1 {
 18688  			break
 18689  		}
 18690  		i0 := x0.AuxInt
 18691  		s := x0.Aux
 18692  		_ = x0.Args[2]
 18693  		idx := x0.Args[0]
 18694  		p := x0.Args[1]
 18695  		mem := x0.Args[2]
 18696  		sh := v.Args[1]
 18697  		if sh.Op != OpAMD64SHLLconst {
 18698  			break
 18699  		}
 18700  		if sh.AuxInt != 8 {
 18701  			break
 18702  		}
 18703  		x1 := sh.Args[0]
 18704  		if x1.Op != OpAMD64MOVBloadidx1 {
 18705  			break
 18706  		}
 18707  		i1 := x1.AuxInt
 18708  		if x1.Aux != s {
 18709  			break
 18710  		}
 18711  		_ = x1.Args[2]
 18712  		if p != x1.Args[0] {
 18713  			break
 18714  		}
 18715  		if idx != x1.Args[1] {
 18716  			break
 18717  		}
 18718  		if mem != x1.Args[2] {
 18719  			break
 18720  		}
 18721  		if !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) {
 18722  			break
 18723  		}
 18724  		b = mergePoint(b, x0, x1)
 18725  		v0 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, v.Type)
 18726  		v.reset(OpCopy)
 18727  		v.AddArg(v0)
 18728  		v0.AuxInt = i0
 18729  		v0.Aux = s
 18730  		v0.AddArg(p)
 18731  		v0.AddArg(idx)
 18732  		v0.AddArg(mem)
 18733  		return true
 18734  	}
 18735  	// match: (ORL x0:(MOVBloadidx1 [i0] {s} p idx mem) sh:(SHLLconst [8] x1:(MOVBloadidx1 [i1] {s} idx p mem)))
 18736  	// cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)
 18737  	// result: @mergePoint(b,x0,x1) (MOVWloadidx1 <v.Type> [i0] {s} p idx mem)
 18738  	for {
 18739  		_ = v.Args[1]
 18740  		x0 := v.Args[0]
 18741  		if x0.Op != OpAMD64MOVBloadidx1 {
 18742  			break
 18743  		}
 18744  		i0 := x0.AuxInt
 18745  		s := x0.Aux
 18746  		_ = x0.Args[2]
 18747  		p := x0.Args[0]
 18748  		idx := x0.Args[1]
 18749  		mem := x0.Args[2]
 18750  		sh := v.Args[1]
 18751  		if sh.Op != OpAMD64SHLLconst {
 18752  			break
 18753  		}
 18754  		if sh.AuxInt != 8 {
 18755  			break
 18756  		}
 18757  		x1 := sh.Args[0]
 18758  		if x1.Op != OpAMD64MOVBloadidx1 {
 18759  			break
 18760  		}
 18761  		i1 := x1.AuxInt
 18762  		if x1.Aux != s {
 18763  			break
 18764  		}
 18765  		_ = x1.Args[2]
 18766  		if idx != x1.Args[0] {
 18767  			break
 18768  		}
 18769  		if p != x1.Args[1] {
 18770  			break
 18771  		}
 18772  		if mem != x1.Args[2] {
 18773  			break
 18774  		}
 18775  		if !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) {
 18776  			break
 18777  		}
 18778  		b = mergePoint(b, x0, x1)
 18779  		v0 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, v.Type)
 18780  		v.reset(OpCopy)
 18781  		v.AddArg(v0)
 18782  		v0.AuxInt = i0
 18783  		v0.Aux = s
 18784  		v0.AddArg(p)
 18785  		v0.AddArg(idx)
 18786  		v0.AddArg(mem)
 18787  		return true
 18788  	}
 18789  	return false
 18790  }
 18791  func rewriteValueAMD64_OpAMD64ORL_60(v *Value) bool {
 18792  	b := v.Block
 18793  	_ = b
 18794  	typ := &b.Func.Config.Types
 18795  	_ = typ
 18796  	// match: (ORL x0:(MOVBloadidx1 [i0] {s} idx p mem) sh:(SHLLconst [8] x1:(MOVBloadidx1 [i1] {s} idx p mem)))
 18797  	// cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)
 18798  	// result: @mergePoint(b,x0,x1) (MOVWloadidx1 <v.Type> [i0] {s} p idx mem)
 18799  	for {
 18800  		_ = v.Args[1]
 18801  		x0 := v.Args[0]
 18802  		if x0.Op != OpAMD64MOVBloadidx1 {
 18803  			break
 18804  		}
 18805  		i0 := x0.AuxInt
 18806  		s := x0.Aux
 18807  		_ = x0.Args[2]
 18808  		idx := x0.Args[0]
 18809  		p := x0.Args[1]
 18810  		mem := x0.Args[2]
 18811  		sh := v.Args[1]
 18812  		if sh.Op != OpAMD64SHLLconst {
 18813  			break
 18814  		}
 18815  		if sh.AuxInt != 8 {
 18816  			break
 18817  		}
 18818  		x1 := sh.Args[0]
 18819  		if x1.Op != OpAMD64MOVBloadidx1 {
 18820  			break
 18821  		}
 18822  		i1 := x1.AuxInt
 18823  		if x1.Aux != s {
 18824  			break
 18825  		}
 18826  		_ = x1.Args[2]
 18827  		if idx != x1.Args[0] {
 18828  			break
 18829  		}
 18830  		if p != x1.Args[1] {
 18831  			break
 18832  		}
 18833  		if mem != x1.Args[2] {
 18834  			break
 18835  		}
 18836  		if !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) {
 18837  			break
 18838  		}
 18839  		b = mergePoint(b, x0, x1)
 18840  		v0 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, v.Type)
 18841  		v.reset(OpCopy)
 18842  		v.AddArg(v0)
 18843  		v0.AuxInt = i0
 18844  		v0.Aux = s
 18845  		v0.AddArg(p)
 18846  		v0.AddArg(idx)
 18847  		v0.AddArg(mem)
 18848  		return true
 18849  	}
 18850  	// match: (ORL sh:(SHLLconst [8] x1:(MOVBloadidx1 [i1] {s} p idx mem)) x0:(MOVBloadidx1 [i0] {s} p idx mem))
 18851  	// cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)
 18852  	// result: @mergePoint(b,x0,x1) (MOVWloadidx1 <v.Type> [i0] {s} p idx mem)
 18853  	for {
 18854  		_ = v.Args[1]
 18855  		sh := v.Args[0]
 18856  		if sh.Op != OpAMD64SHLLconst {
 18857  			break
 18858  		}
 18859  		if sh.AuxInt != 8 {
 18860  			break
 18861  		}
 18862  		x1 := sh.Args[0]
 18863  		if x1.Op != OpAMD64MOVBloadidx1 {
 18864  			break
 18865  		}
 18866  		i1 := x1.AuxInt
 18867  		s := x1.Aux
 18868  		_ = x1.Args[2]
 18869  		p := x1.Args[0]
 18870  		idx := x1.Args[1]
 18871  		mem := x1.Args[2]
 18872  		x0 := v.Args[1]
 18873  		if x0.Op != OpAMD64MOVBloadidx1 {
 18874  			break
 18875  		}
 18876  		i0 := x0.AuxInt
 18877  		if x0.Aux != s {
 18878  			break
 18879  		}
 18880  		_ = x0.Args[2]
 18881  		if p != x0.Args[0] {
 18882  			break
 18883  		}
 18884  		if idx != x0.Args[1] {
 18885  			break
 18886  		}
 18887  		if mem != x0.Args[2] {
 18888  			break
 18889  		}
 18890  		if !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) {
 18891  			break
 18892  		}
 18893  		b = mergePoint(b, x0, x1)
 18894  		v0 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, v.Type)
 18895  		v.reset(OpCopy)
 18896  		v.AddArg(v0)
 18897  		v0.AuxInt = i0
 18898  		v0.Aux = s
 18899  		v0.AddArg(p)
 18900  		v0.AddArg(idx)
 18901  		v0.AddArg(mem)
 18902  		return true
 18903  	}
 18904  	// match: (ORL sh:(SHLLconst [8] x1:(MOVBloadidx1 [i1] {s} idx p mem)) x0:(MOVBloadidx1 [i0] {s} p idx mem))
 18905  	// cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)
 18906  	// result: @mergePoint(b,x0,x1) (MOVWloadidx1 <v.Type> [i0] {s} p idx mem)
 18907  	for {
 18908  		_ = v.Args[1]
 18909  		sh := v.Args[0]
 18910  		if sh.Op != OpAMD64SHLLconst {
 18911  			break
 18912  		}
 18913  		if sh.AuxInt != 8 {
 18914  			break
 18915  		}
 18916  		x1 := sh.Args[0]
 18917  		if x1.Op != OpAMD64MOVBloadidx1 {
 18918  			break
 18919  		}
 18920  		i1 := x1.AuxInt
 18921  		s := x1.Aux
 18922  		_ = x1.Args[2]
 18923  		idx := x1.Args[0]
 18924  		p := x1.Args[1]
 18925  		mem := x1.Args[2]
 18926  		x0 := v.Args[1]
 18927  		if x0.Op != OpAMD64MOVBloadidx1 {
 18928  			break
 18929  		}
 18930  		i0 := x0.AuxInt
 18931  		if x0.Aux != s {
 18932  			break
 18933  		}
 18934  		_ = x0.Args[2]
 18935  		if p != x0.Args[0] {
 18936  			break
 18937  		}
 18938  		if idx != x0.Args[1] {
 18939  			break
 18940  		}
 18941  		if mem != x0.Args[2] {
 18942  			break
 18943  		}
 18944  		if !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) {
 18945  			break
 18946  		}
 18947  		b = mergePoint(b, x0, x1)
 18948  		v0 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, v.Type)
 18949  		v.reset(OpCopy)
 18950  		v.AddArg(v0)
 18951  		v0.AuxInt = i0
 18952  		v0.Aux = s
 18953  		v0.AddArg(p)
 18954  		v0.AddArg(idx)
 18955  		v0.AddArg(mem)
 18956  		return true
 18957  	}
 18958  	// match: (ORL sh:(SHLLconst [8] x1:(MOVBloadidx1 [i1] {s} p idx mem)) x0:(MOVBloadidx1 [i0] {s} idx p mem))
 18959  	// cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)
 18960  	// result: @mergePoint(b,x0,x1) (MOVWloadidx1 <v.Type> [i0] {s} p idx mem)
 18961  	for {
 18962  		_ = v.Args[1]
 18963  		sh := v.Args[0]
 18964  		if sh.Op != OpAMD64SHLLconst {
 18965  			break
 18966  		}
 18967  		if sh.AuxInt != 8 {
 18968  			break
 18969  		}
 18970  		x1 := sh.Args[0]
 18971  		if x1.Op != OpAMD64MOVBloadidx1 {
 18972  			break
 18973  		}
 18974  		i1 := x1.AuxInt
 18975  		s := x1.Aux
 18976  		_ = x1.Args[2]
 18977  		p := x1.Args[0]
 18978  		idx := x1.Args[1]
 18979  		mem := x1.Args[2]
 18980  		x0 := v.Args[1]
 18981  		if x0.Op != OpAMD64MOVBloadidx1 {
 18982  			break
 18983  		}
 18984  		i0 := x0.AuxInt
 18985  		if x0.Aux != s {
 18986  			break
 18987  		}
 18988  		_ = x0.Args[2]
 18989  		if idx != x0.Args[0] {
 18990  			break
 18991  		}
 18992  		if p != x0.Args[1] {
 18993  			break
 18994  		}
 18995  		if mem != x0.Args[2] {
 18996  			break
 18997  		}
 18998  		if !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) {
 18999  			break
 19000  		}
 19001  		b = mergePoint(b, x0, x1)
 19002  		v0 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, v.Type)
 19003  		v.reset(OpCopy)
 19004  		v.AddArg(v0)
 19005  		v0.AuxInt = i0
 19006  		v0.Aux = s
 19007  		v0.AddArg(p)
 19008  		v0.AddArg(idx)
 19009  		v0.AddArg(mem)
 19010  		return true
 19011  	}
 19012  	// match: (ORL sh:(SHLLconst [8] x1:(MOVBloadidx1 [i1] {s} idx p mem)) x0:(MOVBloadidx1 [i0] {s} idx p mem))
 19013  	// cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)
 19014  	// result: @mergePoint(b,x0,x1) (MOVWloadidx1 <v.Type> [i0] {s} p idx mem)
 19015  	for {
 19016  		_ = v.Args[1]
 19017  		sh := v.Args[0]
 19018  		if sh.Op != OpAMD64SHLLconst {
 19019  			break
 19020  		}
 19021  		if sh.AuxInt != 8 {
 19022  			break
 19023  		}
 19024  		x1 := sh.Args[0]
 19025  		if x1.Op != OpAMD64MOVBloadidx1 {
 19026  			break
 19027  		}
 19028  		i1 := x1.AuxInt
 19029  		s := x1.Aux
 19030  		_ = x1.Args[2]
 19031  		idx := x1.Args[0]
 19032  		p := x1.Args[1]
 19033  		mem := x1.Args[2]
 19034  		x0 := v.Args[1]
 19035  		if x0.Op != OpAMD64MOVBloadidx1 {
 19036  			break
 19037  		}
 19038  		i0 := x0.AuxInt
 19039  		if x0.Aux != s {
 19040  			break
 19041  		}
 19042  		_ = x0.Args[2]
 19043  		if idx != x0.Args[0] {
 19044  			break
 19045  		}
 19046  		if p != x0.Args[1] {
 19047  			break
 19048  		}
 19049  		if mem != x0.Args[2] {
 19050  			break
 19051  		}
 19052  		if !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) {
 19053  			break
 19054  		}
 19055  		b = mergePoint(b, x0, x1)
 19056  		v0 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, v.Type)
 19057  		v.reset(OpCopy)
 19058  		v.AddArg(v0)
 19059  		v0.AuxInt = i0
 19060  		v0.Aux = s
 19061  		v0.AddArg(p)
 19062  		v0.AddArg(idx)
 19063  		v0.AddArg(mem)
 19064  		return true
 19065  	}
 19066  	// match: (ORL x0:(MOVWloadidx1 [i0] {s} p idx mem) sh:(SHLLconst [16] x1:(MOVWloadidx1 [i1] {s} p idx mem)))
 19067  	// cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)
 19068  	// result: @mergePoint(b,x0,x1) (MOVLloadidx1 [i0] {s} p idx mem)
 19069  	for {
 19070  		_ = v.Args[1]
 19071  		x0 := v.Args[0]
 19072  		if x0.Op != OpAMD64MOVWloadidx1 {
 19073  			break
 19074  		}
 19075  		i0 := x0.AuxInt
 19076  		s := x0.Aux
 19077  		_ = x0.Args[2]
 19078  		p := x0.Args[0]
 19079  		idx := x0.Args[1]
 19080  		mem := x0.Args[2]
 19081  		sh := v.Args[1]
 19082  		if sh.Op != OpAMD64SHLLconst {
 19083  			break
 19084  		}
 19085  		if sh.AuxInt != 16 {
 19086  			break
 19087  		}
 19088  		x1 := sh.Args[0]
 19089  		if x1.Op != OpAMD64MOVWloadidx1 {
 19090  			break
 19091  		}
 19092  		i1 := x1.AuxInt
 19093  		if x1.Aux != s {
 19094  			break
 19095  		}
 19096  		_ = x1.Args[2]
 19097  		if p != x1.Args[0] {
 19098  			break
 19099  		}
 19100  		if idx != x1.Args[1] {
 19101  			break
 19102  		}
 19103  		if mem != x1.Args[2] {
 19104  			break
 19105  		}
 19106  		if !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) {
 19107  			break
 19108  		}
 19109  		b = mergePoint(b, x0, x1)
 19110  		v0 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32)
 19111  		v.reset(OpCopy)
 19112  		v.AddArg(v0)
 19113  		v0.AuxInt = i0
 19114  		v0.Aux = s
 19115  		v0.AddArg(p)
 19116  		v0.AddArg(idx)
 19117  		v0.AddArg(mem)
 19118  		return true
 19119  	}
 19120  	// match: (ORL x0:(MOVWloadidx1 [i0] {s} idx p mem) sh:(SHLLconst [16] x1:(MOVWloadidx1 [i1] {s} p idx mem)))
 19121  	// cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)
 19122  	// result: @mergePoint(b,x0,x1) (MOVLloadidx1 [i0] {s} p idx mem)
 19123  	for {
 19124  		_ = v.Args[1]
 19125  		x0 := v.Args[0]
 19126  		if x0.Op != OpAMD64MOVWloadidx1 {
 19127  			break
 19128  		}
 19129  		i0 := x0.AuxInt
 19130  		s := x0.Aux
 19131  		_ = x0.Args[2]
 19132  		idx := x0.Args[0]
 19133  		p := x0.Args[1]
 19134  		mem := x0.Args[2]
 19135  		sh := v.Args[1]
 19136  		if sh.Op != OpAMD64SHLLconst {
 19137  			break
 19138  		}
 19139  		if sh.AuxInt != 16 {
 19140  			break
 19141  		}
 19142  		x1 := sh.Args[0]
 19143  		if x1.Op != OpAMD64MOVWloadidx1 {
 19144  			break
 19145  		}
 19146  		i1 := x1.AuxInt
 19147  		if x1.Aux != s {
 19148  			break
 19149  		}
 19150  		_ = x1.Args[2]
 19151  		if p != x1.Args[0] {
 19152  			break
 19153  		}
 19154  		if idx != x1.Args[1] {
 19155  			break
 19156  		}
 19157  		if mem != x1.Args[2] {
 19158  			break
 19159  		}
 19160  		if !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) {
 19161  			break
 19162  		}
 19163  		b = mergePoint(b, x0, x1)
 19164  		v0 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32)
 19165  		v.reset(OpCopy)
 19166  		v.AddArg(v0)
 19167  		v0.AuxInt = i0
 19168  		v0.Aux = s
 19169  		v0.AddArg(p)
 19170  		v0.AddArg(idx)
 19171  		v0.AddArg(mem)
 19172  		return true
 19173  	}
 19174  	// match: (ORL x0:(MOVWloadidx1 [i0] {s} p idx mem) sh:(SHLLconst [16] x1:(MOVWloadidx1 [i1] {s} idx p mem)))
 19175  	// cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)
 19176  	// result: @mergePoint(b,x0,x1) (MOVLloadidx1 [i0] {s} p idx mem)
 19177  	for {
 19178  		_ = v.Args[1]
 19179  		x0 := v.Args[0]
 19180  		if x0.Op != OpAMD64MOVWloadidx1 {
 19181  			break
 19182  		}
 19183  		i0 := x0.AuxInt
 19184  		s := x0.Aux
 19185  		_ = x0.Args[2]
 19186  		p := x0.Args[0]
 19187  		idx := x0.Args[1]
 19188  		mem := x0.Args[2]
 19189  		sh := v.Args[1]
 19190  		if sh.Op != OpAMD64SHLLconst {
 19191  			break
 19192  		}
 19193  		if sh.AuxInt != 16 {
 19194  			break
 19195  		}
 19196  		x1 := sh.Args[0]
 19197  		if x1.Op != OpAMD64MOVWloadidx1 {
 19198  			break
 19199  		}
 19200  		i1 := x1.AuxInt
 19201  		if x1.Aux != s {
 19202  			break
 19203  		}
 19204  		_ = x1.Args[2]
 19205  		if idx != x1.Args[0] {
 19206  			break
 19207  		}
 19208  		if p != x1.Args[1] {
 19209  			break
 19210  		}
 19211  		if mem != x1.Args[2] {
 19212  			break
 19213  		}
 19214  		if !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) {
 19215  			break
 19216  		}
 19217  		b = mergePoint(b, x0, x1)
 19218  		v0 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32)
 19219  		v.reset(OpCopy)
 19220  		v.AddArg(v0)
 19221  		v0.AuxInt = i0
 19222  		v0.Aux = s
 19223  		v0.AddArg(p)
 19224  		v0.AddArg(idx)
 19225  		v0.AddArg(mem)
 19226  		return true
 19227  	}
 19228  	// match: (ORL x0:(MOVWloadidx1 [i0] {s} idx p mem) sh:(SHLLconst [16] x1:(MOVWloadidx1 [i1] {s} idx p mem)))
 19229  	// cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)
 19230  	// result: @mergePoint(b,x0,x1) (MOVLloadidx1 [i0] {s} p idx mem)
 19231  	for {
 19232  		_ = v.Args[1]
 19233  		x0 := v.Args[0]
 19234  		if x0.Op != OpAMD64MOVWloadidx1 {
 19235  			break
 19236  		}
 19237  		i0 := x0.AuxInt
 19238  		s := x0.Aux
 19239  		_ = x0.Args[2]
 19240  		idx := x0.Args[0]
 19241  		p := x0.Args[1]
 19242  		mem := x0.Args[2]
 19243  		sh := v.Args[1]
 19244  		if sh.Op != OpAMD64SHLLconst {
 19245  			break
 19246  		}
 19247  		if sh.AuxInt != 16 {
 19248  			break
 19249  		}
 19250  		x1 := sh.Args[0]
 19251  		if x1.Op != OpAMD64MOVWloadidx1 {
 19252  			break
 19253  		}
 19254  		i1 := x1.AuxInt
 19255  		if x1.Aux != s {
 19256  			break
 19257  		}
 19258  		_ = x1.Args[2]
 19259  		if idx != x1.Args[0] {
 19260  			break
 19261  		}
 19262  		if p != x1.Args[1] {
 19263  			break
 19264  		}
 19265  		if mem != x1.Args[2] {
 19266  			break
 19267  		}
 19268  		if !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) {
 19269  			break
 19270  		}
 19271  		b = mergePoint(b, x0, x1)
 19272  		v0 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32)
 19273  		v.reset(OpCopy)
 19274  		v.AddArg(v0)
 19275  		v0.AuxInt = i0
 19276  		v0.Aux = s
 19277  		v0.AddArg(p)
 19278  		v0.AddArg(idx)
 19279  		v0.AddArg(mem)
 19280  		return true
 19281  	}
 19282  	// match: (ORL sh:(SHLLconst [16] x1:(MOVWloadidx1 [i1] {s} p idx mem)) x0:(MOVWloadidx1 [i0] {s} p idx mem))
 19283  	// cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)
 19284  	// result: @mergePoint(b,x0,x1) (MOVLloadidx1 [i0] {s} p idx mem)
 19285  	for {
 19286  		_ = v.Args[1]
 19287  		sh := v.Args[0]
 19288  		if sh.Op != OpAMD64SHLLconst {
 19289  			break
 19290  		}
 19291  		if sh.AuxInt != 16 {
 19292  			break
 19293  		}
 19294  		x1 := sh.Args[0]
 19295  		if x1.Op != OpAMD64MOVWloadidx1 {
 19296  			break
 19297  		}
 19298  		i1 := x1.AuxInt
 19299  		s := x1.Aux
 19300  		_ = x1.Args[2]
 19301  		p := x1.Args[0]
 19302  		idx := x1.Args[1]
 19303  		mem := x1.Args[2]
 19304  		x0 := v.Args[1]
 19305  		if x0.Op != OpAMD64MOVWloadidx1 {
 19306  			break
 19307  		}
 19308  		i0 := x0.AuxInt
 19309  		if x0.Aux != s {
 19310  			break
 19311  		}
 19312  		_ = x0.Args[2]
 19313  		if p != x0.Args[0] {
 19314  			break
 19315  		}
 19316  		if idx != x0.Args[1] {
 19317  			break
 19318  		}
 19319  		if mem != x0.Args[2] {
 19320  			break
 19321  		}
 19322  		if !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) {
 19323  			break
 19324  		}
 19325  		b = mergePoint(b, x0, x1)
 19326  		v0 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32)
 19327  		v.reset(OpCopy)
 19328  		v.AddArg(v0)
 19329  		v0.AuxInt = i0
 19330  		v0.Aux = s
 19331  		v0.AddArg(p)
 19332  		v0.AddArg(idx)
 19333  		v0.AddArg(mem)
 19334  		return true
 19335  	}
 19336  	return false
 19337  }
 19338  func rewriteValueAMD64_OpAMD64ORL_70(v *Value) bool {
 19339  	b := v.Block
 19340  	_ = b
 19341  	typ := &b.Func.Config.Types
 19342  	_ = typ
 19343  	// match: (ORL sh:(SHLLconst [16] x1:(MOVWloadidx1 [i1] {s} idx p mem)) x0:(MOVWloadidx1 [i0] {s} p idx mem))
 19344  	// cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)
 19345  	// result: @mergePoint(b,x0,x1) (MOVLloadidx1 [i0] {s} p idx mem)
 19346  	for {
 19347  		_ = v.Args[1]
 19348  		sh := v.Args[0]
 19349  		if sh.Op != OpAMD64SHLLconst {
 19350  			break
 19351  		}
 19352  		if sh.AuxInt != 16 {
 19353  			break
 19354  		}
 19355  		x1 := sh.Args[0]
 19356  		if x1.Op != OpAMD64MOVWloadidx1 {
 19357  			break
 19358  		}
 19359  		i1 := x1.AuxInt
 19360  		s := x1.Aux
 19361  		_ = x1.Args[2]
 19362  		idx := x1.Args[0]
 19363  		p := x1.Args[1]
 19364  		mem := x1.Args[2]
 19365  		x0 := v.Args[1]
 19366  		if x0.Op != OpAMD64MOVWloadidx1 {
 19367  			break
 19368  		}
 19369  		i0 := x0.AuxInt
 19370  		if x0.Aux != s {
 19371  			break
 19372  		}
 19373  		_ = x0.Args[2]
 19374  		if p != x0.Args[0] {
 19375  			break
 19376  		}
 19377  		if idx != x0.Args[1] {
 19378  			break
 19379  		}
 19380  		if mem != x0.Args[2] {
 19381  			break
 19382  		}
 19383  		if !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) {
 19384  			break
 19385  		}
 19386  		b = mergePoint(b, x0, x1)
 19387  		v0 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32)
 19388  		v.reset(OpCopy)
 19389  		v.AddArg(v0)
 19390  		v0.AuxInt = i0
 19391  		v0.Aux = s
 19392  		v0.AddArg(p)
 19393  		v0.AddArg(idx)
 19394  		v0.AddArg(mem)
 19395  		return true
 19396  	}
 19397  	// match: (ORL sh:(SHLLconst [16] x1:(MOVWloadidx1 [i1] {s} p idx mem)) x0:(MOVWloadidx1 [i0] {s} idx p mem))
 19398  	// cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)
 19399  	// result: @mergePoint(b,x0,x1) (MOVLloadidx1 [i0] {s} p idx mem)
 19400  	for {
 19401  		_ = v.Args[1]
 19402  		sh := v.Args[0]
 19403  		if sh.Op != OpAMD64SHLLconst {
 19404  			break
 19405  		}
 19406  		if sh.AuxInt != 16 {
 19407  			break
 19408  		}
 19409  		x1 := sh.Args[0]
 19410  		if x1.Op != OpAMD64MOVWloadidx1 {
 19411  			break
 19412  		}
 19413  		i1 := x1.AuxInt
 19414  		s := x1.Aux
 19415  		_ = x1.Args[2]
 19416  		p := x1.Args[0]
 19417  		idx := x1.Args[1]
 19418  		mem := x1.Args[2]
 19419  		x0 := v.Args[1]
 19420  		if x0.Op != OpAMD64MOVWloadidx1 {
 19421  			break
 19422  		}
 19423  		i0 := x0.AuxInt
 19424  		if x0.Aux != s {
 19425  			break
 19426  		}
 19427  		_ = x0.Args[2]
 19428  		if idx != x0.Args[0] {
 19429  			break
 19430  		}
 19431  		if p != x0.Args[1] {
 19432  			break
 19433  		}
 19434  		if mem != x0.Args[2] {
 19435  			break
 19436  		}
 19437  		if !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) {
 19438  			break
 19439  		}
 19440  		b = mergePoint(b, x0, x1)
 19441  		v0 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32)
 19442  		v.reset(OpCopy)
 19443  		v.AddArg(v0)
 19444  		v0.AuxInt = i0
 19445  		v0.Aux = s
 19446  		v0.AddArg(p)
 19447  		v0.AddArg(idx)
 19448  		v0.AddArg(mem)
 19449  		return true
 19450  	}
 19451  	// match: (ORL sh:(SHLLconst [16] x1:(MOVWloadidx1 [i1] {s} idx p mem)) x0:(MOVWloadidx1 [i0] {s} idx p mem))
 19452  	// cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)
 19453  	// result: @mergePoint(b,x0,x1) (MOVLloadidx1 [i0] {s} p idx mem)
 19454  	for {
 19455  		_ = v.Args[1]
 19456  		sh := v.Args[0]
 19457  		if sh.Op != OpAMD64SHLLconst {
 19458  			break
 19459  		}
 19460  		if sh.AuxInt != 16 {
 19461  			break
 19462  		}
 19463  		x1 := sh.Args[0]
 19464  		if x1.Op != OpAMD64MOVWloadidx1 {
 19465  			break
 19466  		}
 19467  		i1 := x1.AuxInt
 19468  		s := x1.Aux
 19469  		_ = x1.Args[2]
 19470  		idx := x1.Args[0]
 19471  		p := x1.Args[1]
 19472  		mem := x1.Args[2]
 19473  		x0 := v.Args[1]
 19474  		if x0.Op != OpAMD64MOVWloadidx1 {
 19475  			break
 19476  		}
 19477  		i0 := x0.AuxInt
 19478  		if x0.Aux != s {
 19479  			break
 19480  		}
 19481  		_ = x0.Args[2]
 19482  		if idx != x0.Args[0] {
 19483  			break
 19484  		}
 19485  		if p != x0.Args[1] {
 19486  			break
 19487  		}
 19488  		if mem != x0.Args[2] {
 19489  			break
 19490  		}
 19491  		if !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) {
 19492  			break
 19493  		}
 19494  		b = mergePoint(b, x0, x1)
 19495  		v0 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32)
 19496  		v.reset(OpCopy)
 19497  		v.AddArg(v0)
 19498  		v0.AuxInt = i0
 19499  		v0.Aux = s
 19500  		v0.AddArg(p)
 19501  		v0.AddArg(idx)
 19502  		v0.AddArg(mem)
 19503  		return true
 19504  	}
 19505  	// match: (ORL s1:(SHLLconst [j1] x1:(MOVBloadidx1 [i1] {s} p idx mem)) or:(ORL s0:(SHLLconst [j0] x0:(MOVBloadidx1 [i0] {s} p idx mem)) y))
 19506  	// cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)
 19507  	// result: @mergePoint(b,x0,x1) (ORL <v.Type> (SHLLconst <v.Type> [j0] (MOVWloadidx1 [i0] {s} p idx mem)) y)
 19508  	for {
 19509  		_ = v.Args[1]
 19510  		s1 := v.Args[0]
 19511  		if s1.Op != OpAMD64SHLLconst {
 19512  			break
 19513  		}
 19514  		j1 := s1.AuxInt
 19515  		x1 := s1.Args[0]
 19516  		if x1.Op != OpAMD64MOVBloadidx1 {
 19517  			break
 19518  		}
 19519  		i1 := x1.AuxInt
 19520  		s := x1.Aux
 19521  		_ = x1.Args[2]
 19522  		p := x1.Args[0]
 19523  		idx := x1.Args[1]
 19524  		mem := x1.Args[2]
 19525  		or := v.Args[1]
 19526  		if or.Op != OpAMD64ORL {
 19527  			break
 19528  		}
 19529  		_ = or.Args[1]
 19530  		s0 := or.Args[0]
 19531  		if s0.Op != OpAMD64SHLLconst {
 19532  			break
 19533  		}
 19534  		j0 := s0.AuxInt
 19535  		x0 := s0.Args[0]
 19536  		if x0.Op != OpAMD64MOVBloadidx1 {
 19537  			break
 19538  		}
 19539  		i0 := x0.AuxInt
 19540  		if x0.Aux != s {
 19541  			break
 19542  		}
 19543  		_ = x0.Args[2]
 19544  		if p != x0.Args[0] {
 19545  			break
 19546  		}
 19547  		if idx != x0.Args[1] {
 19548  			break
 19549  		}
 19550  		if mem != x0.Args[2] {
 19551  			break
 19552  		}
 19553  		y := or.Args[1]
 19554  		if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) {
 19555  			break
 19556  		}
 19557  		b = mergePoint(b, x0, x1)
 19558  		v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type)
 19559  		v.reset(OpCopy)
 19560  		v.AddArg(v0)
 19561  		v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type)
 19562  		v1.AuxInt = j0
 19563  		v2 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16)
 19564  		v2.AuxInt = i0
 19565  		v2.Aux = s
 19566  		v2.AddArg(p)
 19567  		v2.AddArg(idx)
 19568  		v2.AddArg(mem)
 19569  		v1.AddArg(v2)
 19570  		v0.AddArg(v1)
 19571  		v0.AddArg(y)
 19572  		return true
 19573  	}
 19574  	// match: (ORL s1:(SHLLconst [j1] x1:(MOVBloadidx1 [i1] {s} idx p mem)) or:(ORL s0:(SHLLconst [j0] x0:(MOVBloadidx1 [i0] {s} p idx mem)) y))
 19575  	// cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)
 19576  	// result: @mergePoint(b,x0,x1) (ORL <v.Type> (SHLLconst <v.Type> [j0] (MOVWloadidx1 [i0] {s} p idx mem)) y)
 19577  	for {
 19578  		_ = v.Args[1]
 19579  		s1 := v.Args[0]
 19580  		if s1.Op != OpAMD64SHLLconst {
 19581  			break
 19582  		}
 19583  		j1 := s1.AuxInt
 19584  		x1 := s1.Args[0]
 19585  		if x1.Op != OpAMD64MOVBloadidx1 {
 19586  			break
 19587  		}
 19588  		i1 := x1.AuxInt
 19589  		s := x1.Aux
 19590  		_ = x1.Args[2]
 19591  		idx := x1.Args[0]
 19592  		p := x1.Args[1]
 19593  		mem := x1.Args[2]
 19594  		or := v.Args[1]
 19595  		if or.Op != OpAMD64ORL {
 19596  			break
 19597  		}
 19598  		_ = or.Args[1]
 19599  		s0 := or.Args[0]
 19600  		if s0.Op != OpAMD64SHLLconst {
 19601  			break
 19602  		}
 19603  		j0 := s0.AuxInt
 19604  		x0 := s0.Args[0]
 19605  		if x0.Op != OpAMD64MOVBloadidx1 {
 19606  			break
 19607  		}
 19608  		i0 := x0.AuxInt
 19609  		if x0.Aux != s {
 19610  			break
 19611  		}
 19612  		_ = x0.Args[2]
 19613  		if p != x0.Args[0] {
 19614  			break
 19615  		}
 19616  		if idx != x0.Args[1] {
 19617  			break
 19618  		}
 19619  		if mem != x0.Args[2] {
 19620  			break
 19621  		}
 19622  		y := or.Args[1]
 19623  		if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) {
 19624  			break
 19625  		}
 19626  		b = mergePoint(b, x0, x1)
 19627  		v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type)
 19628  		v.reset(OpCopy)
 19629  		v.AddArg(v0)
 19630  		v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type)
 19631  		v1.AuxInt = j0
 19632  		v2 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16)
 19633  		v2.AuxInt = i0
 19634  		v2.Aux = s
 19635  		v2.AddArg(p)
 19636  		v2.AddArg(idx)
 19637  		v2.AddArg(mem)
 19638  		v1.AddArg(v2)
 19639  		v0.AddArg(v1)
 19640  		v0.AddArg(y)
 19641  		return true
 19642  	}
 19643  	// match: (ORL s1:(SHLLconst [j1] x1:(MOVBloadidx1 [i1] {s} p idx mem)) or:(ORL s0:(SHLLconst [j0] x0:(MOVBloadidx1 [i0] {s} idx p mem)) y))
 19644  	// cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)
 19645  	// result: @mergePoint(b,x0,x1) (ORL <v.Type> (SHLLconst <v.Type> [j0] (MOVWloadidx1 [i0] {s} p idx mem)) y)
 19646  	for {
 19647  		_ = v.Args[1]
 19648  		s1 := v.Args[0]
 19649  		if s1.Op != OpAMD64SHLLconst {
 19650  			break
 19651  		}
 19652  		j1 := s1.AuxInt
 19653  		x1 := s1.Args[0]
 19654  		if x1.Op != OpAMD64MOVBloadidx1 {
 19655  			break
 19656  		}
 19657  		i1 := x1.AuxInt
 19658  		s := x1.Aux
 19659  		_ = x1.Args[2]
 19660  		p := x1.Args[0]
 19661  		idx := x1.Args[1]
 19662  		mem := x1.Args[2]
 19663  		or := v.Args[1]
 19664  		if or.Op != OpAMD64ORL {
 19665  			break
 19666  		}
 19667  		_ = or.Args[1]
 19668  		s0 := or.Args[0]
 19669  		if s0.Op != OpAMD64SHLLconst {
 19670  			break
 19671  		}
 19672  		j0 := s0.AuxInt
 19673  		x0 := s0.Args[0]
 19674  		if x0.Op != OpAMD64MOVBloadidx1 {
 19675  			break
 19676  		}
 19677  		i0 := x0.AuxInt
 19678  		if x0.Aux != s {
 19679  			break
 19680  		}
 19681  		_ = x0.Args[2]
 19682  		if idx != x0.Args[0] {
 19683  			break
 19684  		}
 19685  		if p != x0.Args[1] {
 19686  			break
 19687  		}
 19688  		if mem != x0.Args[2] {
 19689  			break
 19690  		}
 19691  		y := or.Args[1]
 19692  		if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) {
 19693  			break
 19694  		}
 19695  		b = mergePoint(b, x0, x1)
 19696  		v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type)
 19697  		v.reset(OpCopy)
 19698  		v.AddArg(v0)
 19699  		v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type)
 19700  		v1.AuxInt = j0
 19701  		v2 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16)
 19702  		v2.AuxInt = i0
 19703  		v2.Aux = s
 19704  		v2.AddArg(p)
 19705  		v2.AddArg(idx)
 19706  		v2.AddArg(mem)
 19707  		v1.AddArg(v2)
 19708  		v0.AddArg(v1)
 19709  		v0.AddArg(y)
 19710  		return true
 19711  	}
 19712  	// match: (ORL s1:(SHLLconst [j1] x1:(MOVBloadidx1 [i1] {s} idx p mem)) or:(ORL s0:(SHLLconst [j0] x0:(MOVBloadidx1 [i0] {s} idx p mem)) y))
 19713  	// cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)
 19714  	// result: @mergePoint(b,x0,x1) (ORL <v.Type> (SHLLconst <v.Type> [j0] (MOVWloadidx1 [i0] {s} p idx mem)) y)
 19715  	for {
 19716  		_ = v.Args[1]
 19717  		s1 := v.Args[0]
 19718  		if s1.Op != OpAMD64SHLLconst {
 19719  			break
 19720  		}
 19721  		j1 := s1.AuxInt
 19722  		x1 := s1.Args[0]
 19723  		if x1.Op != OpAMD64MOVBloadidx1 {
 19724  			break
 19725  		}
 19726  		i1 := x1.AuxInt
 19727  		s := x1.Aux
 19728  		_ = x1.Args[2]
 19729  		idx := x1.Args[0]
 19730  		p := x1.Args[1]
 19731  		mem := x1.Args[2]
 19732  		or := v.Args[1]
 19733  		if or.Op != OpAMD64ORL {
 19734  			break
 19735  		}
 19736  		_ = or.Args[1]
 19737  		s0 := or.Args[0]
 19738  		if s0.Op != OpAMD64SHLLconst {
 19739  			break
 19740  		}
 19741  		j0 := s0.AuxInt
 19742  		x0 := s0.Args[0]
 19743  		if x0.Op != OpAMD64MOVBloadidx1 {
 19744  			break
 19745  		}
 19746  		i0 := x0.AuxInt
 19747  		if x0.Aux != s {
 19748  			break
 19749  		}
 19750  		_ = x0.Args[2]
 19751  		if idx != x0.Args[0] {
 19752  			break
 19753  		}
 19754  		if p != x0.Args[1] {
 19755  			break
 19756  		}
 19757  		if mem != x0.Args[2] {
 19758  			break
 19759  		}
 19760  		y := or.Args[1]
 19761  		if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) {
 19762  			break
 19763  		}
 19764  		b = mergePoint(b, x0, x1)
 19765  		v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type)
 19766  		v.reset(OpCopy)
 19767  		v.AddArg(v0)
 19768  		v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type)
 19769  		v1.AuxInt = j0
 19770  		v2 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16)
 19771  		v2.AuxInt = i0
 19772  		v2.Aux = s
 19773  		v2.AddArg(p)
 19774  		v2.AddArg(idx)
 19775  		v2.AddArg(mem)
 19776  		v1.AddArg(v2)
 19777  		v0.AddArg(v1)
 19778  		v0.AddArg(y)
 19779  		return true
 19780  	}
 19781  	// match: (ORL s1:(SHLLconst [j1] x1:(MOVBloadidx1 [i1] {s} p idx mem)) or:(ORL y s0:(SHLLconst [j0] x0:(MOVBloadidx1 [i0] {s} p idx mem))))
 19782  	// cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)
 19783  	// result: @mergePoint(b,x0,x1) (ORL <v.Type> (SHLLconst <v.Type> [j0] (MOVWloadidx1 [i0] {s} p idx mem)) y)
 19784  	for {
 19785  		_ = v.Args[1]
 19786  		s1 := v.Args[0]
 19787  		if s1.Op != OpAMD64SHLLconst {
 19788  			break
 19789  		}
 19790  		j1 := s1.AuxInt
 19791  		x1 := s1.Args[0]
 19792  		if x1.Op != OpAMD64MOVBloadidx1 {
 19793  			break
 19794  		}
 19795  		i1 := x1.AuxInt
 19796  		s := x1.Aux
 19797  		_ = x1.Args[2]
 19798  		p := x1.Args[0]
 19799  		idx := x1.Args[1]
 19800  		mem := x1.Args[2]
 19801  		or := v.Args[1]
 19802  		if or.Op != OpAMD64ORL {
 19803  			break
 19804  		}
 19805  		_ = or.Args[1]
 19806  		y := or.Args[0]
 19807  		s0 := or.Args[1]
 19808  		if s0.Op != OpAMD64SHLLconst {
 19809  			break
 19810  		}
 19811  		j0 := s0.AuxInt
 19812  		x0 := s0.Args[0]
 19813  		if x0.Op != OpAMD64MOVBloadidx1 {
 19814  			break
 19815  		}
 19816  		i0 := x0.AuxInt
 19817  		if x0.Aux != s {
 19818  			break
 19819  		}
 19820  		_ = x0.Args[2]
 19821  		if p != x0.Args[0] {
 19822  			break
 19823  		}
 19824  		if idx != x0.Args[1] {
 19825  			break
 19826  		}
 19827  		if mem != x0.Args[2] {
 19828  			break
 19829  		}
 19830  		if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) {
 19831  			break
 19832  		}
 19833  		b = mergePoint(b, x0, x1)
 19834  		v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type)
 19835  		v.reset(OpCopy)
 19836  		v.AddArg(v0)
 19837  		v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type)
 19838  		v1.AuxInt = j0
 19839  		v2 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16)
 19840  		v2.AuxInt = i0
 19841  		v2.Aux = s
 19842  		v2.AddArg(p)
 19843  		v2.AddArg(idx)
 19844  		v2.AddArg(mem)
 19845  		v1.AddArg(v2)
 19846  		v0.AddArg(v1)
 19847  		v0.AddArg(y)
 19848  		return true
 19849  	}
 19850  	// match: (ORL s1:(SHLLconst [j1] x1:(MOVBloadidx1 [i1] {s} idx p mem)) or:(ORL y s0:(SHLLconst [j0] x0:(MOVBloadidx1 [i0] {s} p idx mem))))
 19851  	// cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)
 19852  	// result: @mergePoint(b,x0,x1) (ORL <v.Type> (SHLLconst <v.Type> [j0] (MOVWloadidx1 [i0] {s} p idx mem)) y)
 19853  	for {
 19854  		_ = v.Args[1]
 19855  		s1 := v.Args[0]
 19856  		if s1.Op != OpAMD64SHLLconst {
 19857  			break
 19858  		}
 19859  		j1 := s1.AuxInt
 19860  		x1 := s1.Args[0]
 19861  		if x1.Op != OpAMD64MOVBloadidx1 {
 19862  			break
 19863  		}
 19864  		i1 := x1.AuxInt
 19865  		s := x1.Aux
 19866  		_ = x1.Args[2]
 19867  		idx := x1.Args[0]
 19868  		p := x1.Args[1]
 19869  		mem := x1.Args[2]
 19870  		or := v.Args[1]
 19871  		if or.Op != OpAMD64ORL {
 19872  			break
 19873  		}
 19874  		_ = or.Args[1]
 19875  		y := or.Args[0]
 19876  		s0 := or.Args[1]
 19877  		if s0.Op != OpAMD64SHLLconst {
 19878  			break
 19879  		}
 19880  		j0 := s0.AuxInt
 19881  		x0 := s0.Args[0]
 19882  		if x0.Op != OpAMD64MOVBloadidx1 {
 19883  			break
 19884  		}
 19885  		i0 := x0.AuxInt
 19886  		if x0.Aux != s {
 19887  			break
 19888  		}
 19889  		_ = x0.Args[2]
 19890  		if p != x0.Args[0] {
 19891  			break
 19892  		}
 19893  		if idx != x0.Args[1] {
 19894  			break
 19895  		}
 19896  		if mem != x0.Args[2] {
 19897  			break
 19898  		}
 19899  		if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) {
 19900  			break
 19901  		}
 19902  		b = mergePoint(b, x0, x1)
 19903  		v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type)
 19904  		v.reset(OpCopy)
 19905  		v.AddArg(v0)
 19906  		v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type)
 19907  		v1.AuxInt = j0
 19908  		v2 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16)
 19909  		v2.AuxInt = i0
 19910  		v2.Aux = s
 19911  		v2.AddArg(p)
 19912  		v2.AddArg(idx)
 19913  		v2.AddArg(mem)
 19914  		v1.AddArg(v2)
 19915  		v0.AddArg(v1)
 19916  		v0.AddArg(y)
 19917  		return true
 19918  	}
 19919  	// match: (ORL s1:(SHLLconst [j1] x1:(MOVBloadidx1 [i1] {s} p idx mem)) or:(ORL y s0:(SHLLconst [j0] x0:(MOVBloadidx1 [i0] {s} idx p mem))))
 19920  	// cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)
 19921  	// result: @mergePoint(b,x0,x1) (ORL <v.Type> (SHLLconst <v.Type> [j0] (MOVWloadidx1 [i0] {s} p idx mem)) y)
 19922  	for {
 19923  		_ = v.Args[1]
 19924  		s1 := v.Args[0]
 19925  		if s1.Op != OpAMD64SHLLconst {
 19926  			break
 19927  		}
 19928  		j1 := s1.AuxInt
 19929  		x1 := s1.Args[0]
 19930  		if x1.Op != OpAMD64MOVBloadidx1 {
 19931  			break
 19932  		}
 19933  		i1 := x1.AuxInt
 19934  		s := x1.Aux
 19935  		_ = x1.Args[2]
 19936  		p := x1.Args[0]
 19937  		idx := x1.Args[1]
 19938  		mem := x1.Args[2]
 19939  		or := v.Args[1]
 19940  		if or.Op != OpAMD64ORL {
 19941  			break
 19942  		}
 19943  		_ = or.Args[1]
 19944  		y := or.Args[0]
 19945  		s0 := or.Args[1]
 19946  		if s0.Op != OpAMD64SHLLconst {
 19947  			break
 19948  		}
 19949  		j0 := s0.AuxInt
 19950  		x0 := s0.Args[0]
 19951  		if x0.Op != OpAMD64MOVBloadidx1 {
 19952  			break
 19953  		}
 19954  		i0 := x0.AuxInt
 19955  		if x0.Aux != s {
 19956  			break
 19957  		}
 19958  		_ = x0.Args[2]
 19959  		if idx != x0.Args[0] {
 19960  			break
 19961  		}
 19962  		if p != x0.Args[1] {
 19963  			break
 19964  		}
 19965  		if mem != x0.Args[2] {
 19966  			break
 19967  		}
 19968  		if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) {
 19969  			break
 19970  		}
 19971  		b = mergePoint(b, x0, x1)
 19972  		v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type)
 19973  		v.reset(OpCopy)
 19974  		v.AddArg(v0)
 19975  		v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type)
 19976  		v1.AuxInt = j0
 19977  		v2 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16)
 19978  		v2.AuxInt = i0
 19979  		v2.Aux = s
 19980  		v2.AddArg(p)
 19981  		v2.AddArg(idx)
 19982  		v2.AddArg(mem)
 19983  		v1.AddArg(v2)
 19984  		v0.AddArg(v1)
 19985  		v0.AddArg(y)
 19986  		return true
 19987  	}
 19988  	return false
 19989  }
 19990  func rewriteValueAMD64_OpAMD64ORL_80(v *Value) bool {
 19991  	b := v.Block
 19992  	_ = b
 19993  	typ := &b.Func.Config.Types
 19994  	_ = typ
 19995  	// match: (ORL s1:(SHLLconst [j1] x1:(MOVBloadidx1 [i1] {s} idx p mem)) or:(ORL y s0:(SHLLconst [j0] x0:(MOVBloadidx1 [i0] {s} idx p mem))))
 19996  	// cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)
 19997  	// result: @mergePoint(b,x0,x1) (ORL <v.Type> (SHLLconst <v.Type> [j0] (MOVWloadidx1 [i0] {s} p idx mem)) y)
 19998  	for {
 19999  		_ = v.Args[1]
 20000  		s1 := v.Args[0]
 20001  		if s1.Op != OpAMD64SHLLconst {
 20002  			break
 20003  		}
 20004  		j1 := s1.AuxInt
 20005  		x1 := s1.Args[0]
 20006  		if x1.Op != OpAMD64MOVBloadidx1 {
 20007  			break
 20008  		}
 20009  		i1 := x1.AuxInt
 20010  		s := x1.Aux
 20011  		_ = x1.Args[2]
 20012  		idx := x1.Args[0]
 20013  		p := x1.Args[1]
 20014  		mem := x1.Args[2]
 20015  		or := v.Args[1]
 20016  		if or.Op != OpAMD64ORL {
 20017  			break
 20018  		}
 20019  		_ = or.Args[1]
 20020  		y := or.Args[0]
 20021  		s0 := or.Args[1]
 20022  		if s0.Op != OpAMD64SHLLconst {
 20023  			break
 20024  		}
 20025  		j0 := s0.AuxInt
 20026  		x0 := s0.Args[0]
 20027  		if x0.Op != OpAMD64MOVBloadidx1 {
 20028  			break
 20029  		}
 20030  		i0 := x0.AuxInt
 20031  		if x0.Aux != s {
 20032  			break
 20033  		}
 20034  		_ = x0.Args[2]
 20035  		if idx != x0.Args[0] {
 20036  			break
 20037  		}
 20038  		if p != x0.Args[1] {
 20039  			break
 20040  		}
 20041  		if mem != x0.Args[2] {
 20042  			break
 20043  		}
 20044  		if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) {
 20045  			break
 20046  		}
 20047  		b = mergePoint(b, x0, x1)
 20048  		v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type)
 20049  		v.reset(OpCopy)
 20050  		v.AddArg(v0)
 20051  		v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type)
 20052  		v1.AuxInt = j0
 20053  		v2 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16)
 20054  		v2.AuxInt = i0
 20055  		v2.Aux = s
 20056  		v2.AddArg(p)
 20057  		v2.AddArg(idx)
 20058  		v2.AddArg(mem)
 20059  		v1.AddArg(v2)
 20060  		v0.AddArg(v1)
 20061  		v0.AddArg(y)
 20062  		return true
 20063  	}
 20064  	// match: (ORL or:(ORL s0:(SHLLconst [j0] x0:(MOVBloadidx1 [i0] {s} p idx mem)) y) s1:(SHLLconst [j1] x1:(MOVBloadidx1 [i1] {s} p idx mem)))
 20065  	// cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)
 20066  	// result: @mergePoint(b,x0,x1) (ORL <v.Type> (SHLLconst <v.Type> [j0] (MOVWloadidx1 [i0] {s} p idx mem)) y)
 20067  	for {
 20068  		_ = v.Args[1]
 20069  		or := v.Args[0]
 20070  		if or.Op != OpAMD64ORL {
 20071  			break
 20072  		}
 20073  		_ = or.Args[1]
 20074  		s0 := or.Args[0]
 20075  		if s0.Op != OpAMD64SHLLconst {
 20076  			break
 20077  		}
 20078  		j0 := s0.AuxInt
 20079  		x0 := s0.Args[0]
 20080  		if x0.Op != OpAMD64MOVBloadidx1 {
 20081  			break
 20082  		}
 20083  		i0 := x0.AuxInt
 20084  		s := x0.Aux
 20085  		_ = x0.Args[2]
 20086  		p := x0.Args[0]
 20087  		idx := x0.Args[1]
 20088  		mem := x0.Args[2]
 20089  		y := or.Args[1]
 20090  		s1 := v.Args[1]
 20091  		if s1.Op != OpAMD64SHLLconst {
 20092  			break
 20093  		}
 20094  		j1 := s1.AuxInt
 20095  		x1 := s1.Args[0]
 20096  		if x1.Op != OpAMD64MOVBloadidx1 {
 20097  			break
 20098  		}
 20099  		i1 := x1.AuxInt
 20100  		if x1.Aux != s {
 20101  			break
 20102  		}
 20103  		_ = x1.Args[2]
 20104  		if p != x1.Args[0] {
 20105  			break
 20106  		}
 20107  		if idx != x1.Args[1] {
 20108  			break
 20109  		}
 20110  		if mem != x1.Args[2] {
 20111  			break
 20112  		}
 20113  		if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) {
 20114  			break
 20115  		}
 20116  		b = mergePoint(b, x0, x1)
 20117  		v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type)
 20118  		v.reset(OpCopy)
 20119  		v.AddArg(v0)
 20120  		v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type)
 20121  		v1.AuxInt = j0
 20122  		v2 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16)
 20123  		v2.AuxInt = i0
 20124  		v2.Aux = s
 20125  		v2.AddArg(p)
 20126  		v2.AddArg(idx)
 20127  		v2.AddArg(mem)
 20128  		v1.AddArg(v2)
 20129  		v0.AddArg(v1)
 20130  		v0.AddArg(y)
 20131  		return true
 20132  	}
 20133  	// match: (ORL or:(ORL s0:(SHLLconst [j0] x0:(MOVBloadidx1 [i0] {s} idx p mem)) y) s1:(SHLLconst [j1] x1:(MOVBloadidx1 [i1] {s} p idx mem)))
 20134  	// cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)
 20135  	// result: @mergePoint(b,x0,x1) (ORL <v.Type> (SHLLconst <v.Type> [j0] (MOVWloadidx1 [i0] {s} p idx mem)) y)
 20136  	for {
 20137  		_ = v.Args[1]
 20138  		or := v.Args[0]
 20139  		if or.Op != OpAMD64ORL {
 20140  			break
 20141  		}
 20142  		_ = or.Args[1]
 20143  		s0 := or.Args[0]
 20144  		if s0.Op != OpAMD64SHLLconst {
 20145  			break
 20146  		}
 20147  		j0 := s0.AuxInt
 20148  		x0 := s0.Args[0]
 20149  		if x0.Op != OpAMD64MOVBloadidx1 {
 20150  			break
 20151  		}
 20152  		i0 := x0.AuxInt
 20153  		s := x0.Aux
 20154  		_ = x0.Args[2]
 20155  		idx := x0.Args[0]
 20156  		p := x0.Args[1]
 20157  		mem := x0.Args[2]
 20158  		y := or.Args[1]
 20159  		s1 := v.Args[1]
 20160  		if s1.Op != OpAMD64SHLLconst {
 20161  			break
 20162  		}
 20163  		j1 := s1.AuxInt
 20164  		x1 := s1.Args[0]
 20165  		if x1.Op != OpAMD64MOVBloadidx1 {
 20166  			break
 20167  		}
 20168  		i1 := x1.AuxInt
 20169  		if x1.Aux != s {
 20170  			break
 20171  		}
 20172  		_ = x1.Args[2]
 20173  		if p != x1.Args[0] {
 20174  			break
 20175  		}
 20176  		if idx != x1.Args[1] {
 20177  			break
 20178  		}
 20179  		if mem != x1.Args[2] {
 20180  			break
 20181  		}
 20182  		if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) {
 20183  			break
 20184  		}
 20185  		b = mergePoint(b, x0, x1)
 20186  		v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type)
 20187  		v.reset(OpCopy)
 20188  		v.AddArg(v0)
 20189  		v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type)
 20190  		v1.AuxInt = j0
 20191  		v2 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16)
 20192  		v2.AuxInt = i0
 20193  		v2.Aux = s
 20194  		v2.AddArg(p)
 20195  		v2.AddArg(idx)
 20196  		v2.AddArg(mem)
 20197  		v1.AddArg(v2)
 20198  		v0.AddArg(v1)
 20199  		v0.AddArg(y)
 20200  		return true
 20201  	}
 20202  	// match: (ORL or:(ORL y s0:(SHLLconst [j0] x0:(MOVBloadidx1 [i0] {s} p idx mem))) s1:(SHLLconst [j1] x1:(MOVBloadidx1 [i1] {s} p idx mem)))
 20203  	// cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)
 20204  	// result: @mergePoint(b,x0,x1) (ORL <v.Type> (SHLLconst <v.Type> [j0] (MOVWloadidx1 [i0] {s} p idx mem)) y)
 20205  	for {
 20206  		_ = v.Args[1]
 20207  		or := v.Args[0]
 20208  		if or.Op != OpAMD64ORL {
 20209  			break
 20210  		}
 20211  		_ = or.Args[1]
 20212  		y := or.Args[0]
 20213  		s0 := or.Args[1]
 20214  		if s0.Op != OpAMD64SHLLconst {
 20215  			break
 20216  		}
 20217  		j0 := s0.AuxInt
 20218  		x0 := s0.Args[0]
 20219  		if x0.Op != OpAMD64MOVBloadidx1 {
 20220  			break
 20221  		}
 20222  		i0 := x0.AuxInt
 20223  		s := x0.Aux
 20224  		_ = x0.Args[2]
 20225  		p := x0.Args[0]
 20226  		idx := x0.Args[1]
 20227  		mem := x0.Args[2]
 20228  		s1 := v.Args[1]
 20229  		if s1.Op != OpAMD64SHLLconst {
 20230  			break
 20231  		}
 20232  		j1 := s1.AuxInt
 20233  		x1 := s1.Args[0]
 20234  		if x1.Op != OpAMD64MOVBloadidx1 {
 20235  			break
 20236  		}
 20237  		i1 := x1.AuxInt
 20238  		if x1.Aux != s {
 20239  			break
 20240  		}
 20241  		_ = x1.Args[2]
 20242  		if p != x1.Args[0] {
 20243  			break
 20244  		}
 20245  		if idx != x1.Args[1] {
 20246  			break
 20247  		}
 20248  		if mem != x1.Args[2] {
 20249  			break
 20250  		}
 20251  		if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) {
 20252  			break
 20253  		}
 20254  		b = mergePoint(b, x0, x1)
 20255  		v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type)
 20256  		v.reset(OpCopy)
 20257  		v.AddArg(v0)
 20258  		v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type)
 20259  		v1.AuxInt = j0
 20260  		v2 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16)
 20261  		v2.AuxInt = i0
 20262  		v2.Aux = s
 20263  		v2.AddArg(p)
 20264  		v2.AddArg(idx)
 20265  		v2.AddArg(mem)
 20266  		v1.AddArg(v2)
 20267  		v0.AddArg(v1)
 20268  		v0.AddArg(y)
 20269  		return true
 20270  	}
 20271  	// match: (ORL or:(ORL y s0:(SHLLconst [j0] x0:(MOVBloadidx1 [i0] {s} idx p mem))) s1:(SHLLconst [j1] x1:(MOVBloadidx1 [i1] {s} p idx mem)))
 20272  	// cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)
 20273  	// result: @mergePoint(b,x0,x1) (ORL <v.Type> (SHLLconst <v.Type> [j0] (MOVWloadidx1 [i0] {s} p idx mem)) y)
 20274  	for {
 20275  		_ = v.Args[1]
 20276  		or := v.Args[0]
 20277  		if or.Op != OpAMD64ORL {
 20278  			break
 20279  		}
 20280  		_ = or.Args[1]
 20281  		y := or.Args[0]
 20282  		s0 := or.Args[1]
 20283  		if s0.Op != OpAMD64SHLLconst {
 20284  			break
 20285  		}
 20286  		j0 := s0.AuxInt
 20287  		x0 := s0.Args[0]
 20288  		if x0.Op != OpAMD64MOVBloadidx1 {
 20289  			break
 20290  		}
 20291  		i0 := x0.AuxInt
 20292  		s := x0.Aux
 20293  		_ = x0.Args[2]
 20294  		idx := x0.Args[0]
 20295  		p := x0.Args[1]
 20296  		mem := x0.Args[2]
 20297  		s1 := v.Args[1]
 20298  		if s1.Op != OpAMD64SHLLconst {
 20299  			break
 20300  		}
 20301  		j1 := s1.AuxInt
 20302  		x1 := s1.Args[0]
 20303  		if x1.Op != OpAMD64MOVBloadidx1 {
 20304  			break
 20305  		}
 20306  		i1 := x1.AuxInt
 20307  		if x1.Aux != s {
 20308  			break
 20309  		}
 20310  		_ = x1.Args[2]
 20311  		if p != x1.Args[0] {
 20312  			break
 20313  		}
 20314  		if idx != x1.Args[1] {
 20315  			break
 20316  		}
 20317  		if mem != x1.Args[2] {
 20318  			break
 20319  		}
 20320  		if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) {
 20321  			break
 20322  		}
 20323  		b = mergePoint(b, x0, x1)
 20324  		v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type)
 20325  		v.reset(OpCopy)
 20326  		v.AddArg(v0)
 20327  		v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type)
 20328  		v1.AuxInt = j0
 20329  		v2 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16)
 20330  		v2.AuxInt = i0
 20331  		v2.Aux = s
 20332  		v2.AddArg(p)
 20333  		v2.AddArg(idx)
 20334  		v2.AddArg(mem)
 20335  		v1.AddArg(v2)
 20336  		v0.AddArg(v1)
 20337  		v0.AddArg(y)
 20338  		return true
 20339  	}
 20340  	// match: (ORL or:(ORL s0:(SHLLconst [j0] x0:(MOVBloadidx1 [i0] {s} p idx mem)) y) s1:(SHLLconst [j1] x1:(MOVBloadidx1 [i1] {s} idx p mem)))
 20341  	// cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)
 20342  	// result: @mergePoint(b,x0,x1) (ORL <v.Type> (SHLLconst <v.Type> [j0] (MOVWloadidx1 [i0] {s} p idx mem)) y)
 20343  	for {
 20344  		_ = v.Args[1]
 20345  		or := v.Args[0]
 20346  		if or.Op != OpAMD64ORL {
 20347  			break
 20348  		}
 20349  		_ = or.Args[1]
 20350  		s0 := or.Args[0]
 20351  		if s0.Op != OpAMD64SHLLconst {
 20352  			break
 20353  		}
 20354  		j0 := s0.AuxInt
 20355  		x0 := s0.Args[0]
 20356  		if x0.Op != OpAMD64MOVBloadidx1 {
 20357  			break
 20358  		}
 20359  		i0 := x0.AuxInt
 20360  		s := x0.Aux
 20361  		_ = x0.Args[2]
 20362  		p := x0.Args[0]
 20363  		idx := x0.Args[1]
 20364  		mem := x0.Args[2]
 20365  		y := or.Args[1]
 20366  		s1 := v.Args[1]
 20367  		if s1.Op != OpAMD64SHLLconst {
 20368  			break
 20369  		}
 20370  		j1 := s1.AuxInt
 20371  		x1 := s1.Args[0]
 20372  		if x1.Op != OpAMD64MOVBloadidx1 {
 20373  			break
 20374  		}
 20375  		i1 := x1.AuxInt
 20376  		if x1.Aux != s {
 20377  			break
 20378  		}
 20379  		_ = x1.Args[2]
 20380  		if idx != x1.Args[0] {
 20381  			break
 20382  		}
 20383  		if p != x1.Args[1] {
 20384  			break
 20385  		}
 20386  		if mem != x1.Args[2] {
 20387  			break
 20388  		}
 20389  		if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) {
 20390  			break
 20391  		}
 20392  		b = mergePoint(b, x0, x1)
 20393  		v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type)
 20394  		v.reset(OpCopy)
 20395  		v.AddArg(v0)
 20396  		v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type)
 20397  		v1.AuxInt = j0
 20398  		v2 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16)
 20399  		v2.AuxInt = i0
 20400  		v2.Aux = s
 20401  		v2.AddArg(p)
 20402  		v2.AddArg(idx)
 20403  		v2.AddArg(mem)
 20404  		v1.AddArg(v2)
 20405  		v0.AddArg(v1)
 20406  		v0.AddArg(y)
 20407  		return true
 20408  	}
 20409  	// match: (ORL or:(ORL s0:(SHLLconst [j0] x0:(MOVBloadidx1 [i0] {s} idx p mem)) y) s1:(SHLLconst [j1] x1:(MOVBloadidx1 [i1] {s} idx p mem)))
 20410  	// cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)
 20411  	// result: @mergePoint(b,x0,x1) (ORL <v.Type> (SHLLconst <v.Type> [j0] (MOVWloadidx1 [i0] {s} p idx mem)) y)
 20412  	for {
 20413  		_ = v.Args[1]
 20414  		or := v.Args[0]
 20415  		if or.Op != OpAMD64ORL {
 20416  			break
 20417  		}
 20418  		_ = or.Args[1]
 20419  		s0 := or.Args[0]
 20420  		if s0.Op != OpAMD64SHLLconst {
 20421  			break
 20422  		}
 20423  		j0 := s0.AuxInt
 20424  		x0 := s0.Args[0]
 20425  		if x0.Op != OpAMD64MOVBloadidx1 {
 20426  			break
 20427  		}
 20428  		i0 := x0.AuxInt
 20429  		s := x0.Aux
 20430  		_ = x0.Args[2]
 20431  		idx := x0.Args[0]
 20432  		p := x0.Args[1]
 20433  		mem := x0.Args[2]
 20434  		y := or.Args[1]
 20435  		s1 := v.Args[1]
 20436  		if s1.Op != OpAMD64SHLLconst {
 20437  			break
 20438  		}
 20439  		j1 := s1.AuxInt
 20440  		x1 := s1.Args[0]
 20441  		if x1.Op != OpAMD64MOVBloadidx1 {
 20442  			break
 20443  		}
 20444  		i1 := x1.AuxInt
 20445  		if x1.Aux != s {
 20446  			break
 20447  		}
 20448  		_ = x1.Args[2]
 20449  		if idx != x1.Args[0] {
 20450  			break
 20451  		}
 20452  		if p != x1.Args[1] {
 20453  			break
 20454  		}
 20455  		if mem != x1.Args[2] {
 20456  			break
 20457  		}
 20458  		if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) {
 20459  			break
 20460  		}
 20461  		b = mergePoint(b, x0, x1)
 20462  		v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type)
 20463  		v.reset(OpCopy)
 20464  		v.AddArg(v0)
 20465  		v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type)
 20466  		v1.AuxInt = j0
 20467  		v2 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16)
 20468  		v2.AuxInt = i0
 20469  		v2.Aux = s
 20470  		v2.AddArg(p)
 20471  		v2.AddArg(idx)
 20472  		v2.AddArg(mem)
 20473  		v1.AddArg(v2)
 20474  		v0.AddArg(v1)
 20475  		v0.AddArg(y)
 20476  		return true
 20477  	}
 20478  	// match: (ORL or:(ORL y s0:(SHLLconst [j0] x0:(MOVBloadidx1 [i0] {s} p idx mem))) s1:(SHLLconst [j1] x1:(MOVBloadidx1 [i1] {s} idx p mem)))
 20479  	// cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)
 20480  	// result: @mergePoint(b,x0,x1) (ORL <v.Type> (SHLLconst <v.Type> [j0] (MOVWloadidx1 [i0] {s} p idx mem)) y)
 20481  	for {
 20482  		_ = v.Args[1]
 20483  		or := v.Args[0]
 20484  		if or.Op != OpAMD64ORL {
 20485  			break
 20486  		}
 20487  		_ = or.Args[1]
 20488  		y := or.Args[0]
 20489  		s0 := or.Args[1]
 20490  		if s0.Op != OpAMD64SHLLconst {
 20491  			break
 20492  		}
 20493  		j0 := s0.AuxInt
 20494  		x0 := s0.Args[0]
 20495  		if x0.Op != OpAMD64MOVBloadidx1 {
 20496  			break
 20497  		}
 20498  		i0 := x0.AuxInt
 20499  		s := x0.Aux
 20500  		_ = x0.Args[2]
 20501  		p := x0.Args[0]
 20502  		idx := x0.Args[1]
 20503  		mem := x0.Args[2]
 20504  		s1 := v.Args[1]
 20505  		if s1.Op != OpAMD64SHLLconst {
 20506  			break
 20507  		}
 20508  		j1 := s1.AuxInt
 20509  		x1 := s1.Args[0]
 20510  		if x1.Op != OpAMD64MOVBloadidx1 {
 20511  			break
 20512  		}
 20513  		i1 := x1.AuxInt
 20514  		if x1.Aux != s {
 20515  			break
 20516  		}
 20517  		_ = x1.Args[2]
 20518  		if idx != x1.Args[0] {
 20519  			break
 20520  		}
 20521  		if p != x1.Args[1] {
 20522  			break
 20523  		}
 20524  		if mem != x1.Args[2] {
 20525  			break
 20526  		}
 20527  		if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) {
 20528  			break
 20529  		}
 20530  		b = mergePoint(b, x0, x1)
 20531  		v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type)
 20532  		v.reset(OpCopy)
 20533  		v.AddArg(v0)
 20534  		v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type)
 20535  		v1.AuxInt = j0
 20536  		v2 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16)
 20537  		v2.AuxInt = i0
 20538  		v2.Aux = s
 20539  		v2.AddArg(p)
 20540  		v2.AddArg(idx)
 20541  		v2.AddArg(mem)
 20542  		v1.AddArg(v2)
 20543  		v0.AddArg(v1)
 20544  		v0.AddArg(y)
 20545  		return true
 20546  	}
 20547  	// match: (ORL or:(ORL y s0:(SHLLconst [j0] x0:(MOVBloadidx1 [i0] {s} idx p mem))) s1:(SHLLconst [j1] x1:(MOVBloadidx1 [i1] {s} idx p mem)))
 20548  	// cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)
 20549  	// result: @mergePoint(b,x0,x1) (ORL <v.Type> (SHLLconst <v.Type> [j0] (MOVWloadidx1 [i0] {s} p idx mem)) y)
 20550  	for {
 20551  		_ = v.Args[1]
 20552  		or := v.Args[0]
 20553  		if or.Op != OpAMD64ORL {
 20554  			break
 20555  		}
 20556  		_ = or.Args[1]
 20557  		y := or.Args[0]
 20558  		s0 := or.Args[1]
 20559  		if s0.Op != OpAMD64SHLLconst {
 20560  			break
 20561  		}
 20562  		j0 := s0.AuxInt
 20563  		x0 := s0.Args[0]
 20564  		if x0.Op != OpAMD64MOVBloadidx1 {
 20565  			break
 20566  		}
 20567  		i0 := x0.AuxInt
 20568  		s := x0.Aux
 20569  		_ = x0.Args[2]
 20570  		idx := x0.Args[0]
 20571  		p := x0.Args[1]
 20572  		mem := x0.Args[2]
 20573  		s1 := v.Args[1]
 20574  		if s1.Op != OpAMD64SHLLconst {
 20575  			break
 20576  		}
 20577  		j1 := s1.AuxInt
 20578  		x1 := s1.Args[0]
 20579  		if x1.Op != OpAMD64MOVBloadidx1 {
 20580  			break
 20581  		}
 20582  		i1 := x1.AuxInt
 20583  		if x1.Aux != s {
 20584  			break
 20585  		}
 20586  		_ = x1.Args[2]
 20587  		if idx != x1.Args[0] {
 20588  			break
 20589  		}
 20590  		if p != x1.Args[1] {
 20591  			break
 20592  		}
 20593  		if mem != x1.Args[2] {
 20594  			break
 20595  		}
 20596  		if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) {
 20597  			break
 20598  		}
 20599  		b = mergePoint(b, x0, x1)
 20600  		v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type)
 20601  		v.reset(OpCopy)
 20602  		v.AddArg(v0)
 20603  		v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type)
 20604  		v1.AuxInt = j0
 20605  		v2 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16)
 20606  		v2.AuxInt = i0
 20607  		v2.Aux = s
 20608  		v2.AddArg(p)
 20609  		v2.AddArg(idx)
 20610  		v2.AddArg(mem)
 20611  		v1.AddArg(v2)
 20612  		v0.AddArg(v1)
 20613  		v0.AddArg(y)
 20614  		return true
 20615  	}
 20616  	// match: (ORL x1:(MOVBload [i1] {s} p mem) sh:(SHLLconst [8] x0:(MOVBload [i0] {s} p mem)))
 20617  	// cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)
 20618  	// result: @mergePoint(b,x0,x1) (ROLWconst <v.Type> [8] (MOVWload [i0] {s} p mem))
 20619  	for {
 20620  		_ = v.Args[1]
 20621  		x1 := v.Args[0]
 20622  		if x1.Op != OpAMD64MOVBload {
 20623  			break
 20624  		}
 20625  		i1 := x1.AuxInt
 20626  		s := x1.Aux
 20627  		_ = x1.Args[1]
 20628  		p := x1.Args[0]
 20629  		mem := x1.Args[1]
 20630  		sh := v.Args[1]
 20631  		if sh.Op != OpAMD64SHLLconst {
 20632  			break
 20633  		}
 20634  		if sh.AuxInt != 8 {
 20635  			break
 20636  		}
 20637  		x0 := sh.Args[0]
 20638  		if x0.Op != OpAMD64MOVBload {
 20639  			break
 20640  		}
 20641  		i0 := x0.AuxInt
 20642  		if x0.Aux != s {
 20643  			break
 20644  		}
 20645  		_ = x0.Args[1]
 20646  		if p != x0.Args[0] {
 20647  			break
 20648  		}
 20649  		if mem != x0.Args[1] {
 20650  			break
 20651  		}
 20652  		if !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) {
 20653  			break
 20654  		}
 20655  		b = mergePoint(b, x0, x1)
 20656  		v0 := b.NewValue0(v.Pos, OpAMD64ROLWconst, v.Type)
 20657  		v.reset(OpCopy)
 20658  		v.AddArg(v0)
 20659  		v0.AuxInt = 8
 20660  		v1 := b.NewValue0(v.Pos, OpAMD64MOVWload, typ.UInt16)
 20661  		v1.AuxInt = i0
 20662  		v1.Aux = s
 20663  		v1.AddArg(p)
 20664  		v1.AddArg(mem)
 20665  		v0.AddArg(v1)
 20666  		return true
 20667  	}
 20668  	return false
 20669  }
 20670  func rewriteValueAMD64_OpAMD64ORL_90(v *Value) bool {
 20671  	b := v.Block
 20672  	_ = b
 20673  	typ := &b.Func.Config.Types
 20674  	_ = typ
 20675  	// match: (ORL sh:(SHLLconst [8] x0:(MOVBload [i0] {s} p mem)) x1:(MOVBload [i1] {s} p mem))
 20676  	// cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)
 20677  	// result: @mergePoint(b,x0,x1) (ROLWconst <v.Type> [8] (MOVWload [i0] {s} p mem))
 20678  	for {
 20679  		_ = v.Args[1]
 20680  		sh := v.Args[0]
 20681  		if sh.Op != OpAMD64SHLLconst {
 20682  			break
 20683  		}
 20684  		if sh.AuxInt != 8 {
 20685  			break
 20686  		}
 20687  		x0 := sh.Args[0]
 20688  		if x0.Op != OpAMD64MOVBload {
 20689  			break
 20690  		}
 20691  		i0 := x0.AuxInt
 20692  		s := x0.Aux
 20693  		_ = x0.Args[1]
 20694  		p := x0.Args[0]
 20695  		mem := x0.Args[1]
 20696  		x1 := v.Args[1]
 20697  		if x1.Op != OpAMD64MOVBload {
 20698  			break
 20699  		}
 20700  		i1 := x1.AuxInt
 20701  		if x1.Aux != s {
 20702  			break
 20703  		}
 20704  		_ = x1.Args[1]
 20705  		if p != x1.Args[0] {
 20706  			break
 20707  		}
 20708  		if mem != x1.Args[1] {
 20709  			break
 20710  		}
 20711  		if !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) {
 20712  			break
 20713  		}
 20714  		b = mergePoint(b, x0, x1)
 20715  		v0 := b.NewValue0(v.Pos, OpAMD64ROLWconst, v.Type)
 20716  		v.reset(OpCopy)
 20717  		v.AddArg(v0)
 20718  		v0.AuxInt = 8
 20719  		v1 := b.NewValue0(v.Pos, OpAMD64MOVWload, typ.UInt16)
 20720  		v1.AuxInt = i0
 20721  		v1.Aux = s
 20722  		v1.AddArg(p)
 20723  		v1.AddArg(mem)
 20724  		v0.AddArg(v1)
 20725  		return true
 20726  	}
 20727  	// match: (ORL r1:(ROLWconst [8] x1:(MOVWload [i1] {s} p mem)) sh:(SHLLconst [16] r0:(ROLWconst [8] x0:(MOVWload [i0] {s} p mem))))
 20728  	// cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh)
 20729  	// result: @mergePoint(b,x0,x1) (BSWAPL <v.Type> (MOVLload [i0] {s} p mem))
 20730  	for {
 20731  		_ = v.Args[1]
 20732  		r1 := v.Args[0]
 20733  		if r1.Op != OpAMD64ROLWconst {
 20734  			break
 20735  		}
 20736  		if r1.AuxInt != 8 {
 20737  			break
 20738  		}
 20739  		x1 := r1.Args[0]
 20740  		if x1.Op != OpAMD64MOVWload {
 20741  			break
 20742  		}
 20743  		i1 := x1.AuxInt
 20744  		s := x1.Aux
 20745  		_ = x1.Args[1]
 20746  		p := x1.Args[0]
 20747  		mem := x1.Args[1]
 20748  		sh := v.Args[1]
 20749  		if sh.Op != OpAMD64SHLLconst {
 20750  			break
 20751  		}
 20752  		if sh.AuxInt != 16 {
 20753  			break
 20754  		}
 20755  		r0 := sh.Args[0]
 20756  		if r0.Op != OpAMD64ROLWconst {
 20757  			break
 20758  		}
 20759  		if r0.AuxInt != 8 {
 20760  			break
 20761  		}
 20762  		x0 := r0.Args[0]
 20763  		if x0.Op != OpAMD64MOVWload {
 20764  			break
 20765  		}
 20766  		i0 := x0.AuxInt
 20767  		if x0.Aux != s {
 20768  			break
 20769  		}
 20770  		_ = x0.Args[1]
 20771  		if p != x0.Args[0] {
 20772  			break
 20773  		}
 20774  		if mem != x0.Args[1] {
 20775  			break
 20776  		}
 20777  		if !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh)) {
 20778  			break
 20779  		}
 20780  		b = mergePoint(b, x0, x1)
 20781  		v0 := b.NewValue0(v.Pos, OpAMD64BSWAPL, v.Type)
 20782  		v.reset(OpCopy)
 20783  		v.AddArg(v0)
 20784  		v1 := b.NewValue0(v.Pos, OpAMD64MOVLload, typ.UInt32)
 20785  		v1.AuxInt = i0
 20786  		v1.Aux = s
 20787  		v1.AddArg(p)
 20788  		v1.AddArg(mem)
 20789  		v0.AddArg(v1)
 20790  		return true
 20791  	}
 20792  	// match: (ORL sh:(SHLLconst [16] r0:(ROLWconst [8] x0:(MOVWload [i0] {s} p mem))) r1:(ROLWconst [8] x1:(MOVWload [i1] {s} p mem)))
 20793  	// cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh)
 20794  	// result: @mergePoint(b,x0,x1) (BSWAPL <v.Type> (MOVLload [i0] {s} p mem))
 20795  	for {
 20796  		_ = v.Args[1]
 20797  		sh := v.Args[0]
 20798  		if sh.Op != OpAMD64SHLLconst {
 20799  			break
 20800  		}
 20801  		if sh.AuxInt != 16 {
 20802  			break
 20803  		}
 20804  		r0 := sh.Args[0]
 20805  		if r0.Op != OpAMD64ROLWconst {
 20806  			break
 20807  		}
 20808  		if r0.AuxInt != 8 {
 20809  			break
 20810  		}
 20811  		x0 := r0.Args[0]
 20812  		if x0.Op != OpAMD64MOVWload {
 20813  			break
 20814  		}
 20815  		i0 := x0.AuxInt
 20816  		s := x0.Aux
 20817  		_ = x0.Args[1]
 20818  		p := x0.Args[0]
 20819  		mem := x0.Args[1]
 20820  		r1 := v.Args[1]
 20821  		if r1.Op != OpAMD64ROLWconst {
 20822  			break
 20823  		}
 20824  		if r1.AuxInt != 8 {
 20825  			break
 20826  		}
 20827  		x1 := r1.Args[0]
 20828  		if x1.Op != OpAMD64MOVWload {
 20829  			break
 20830  		}
 20831  		i1 := x1.AuxInt
 20832  		if x1.Aux != s {
 20833  			break
 20834  		}
 20835  		_ = x1.Args[1]
 20836  		if p != x1.Args[0] {
 20837  			break
 20838  		}
 20839  		if mem != x1.Args[1] {
 20840  			break
 20841  		}
 20842  		if !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh)) {
 20843  			break
 20844  		}
 20845  		b = mergePoint(b, x0, x1)
 20846  		v0 := b.NewValue0(v.Pos, OpAMD64BSWAPL, v.Type)
 20847  		v.reset(OpCopy)
 20848  		v.AddArg(v0)
 20849  		v1 := b.NewValue0(v.Pos, OpAMD64MOVLload, typ.UInt32)
 20850  		v1.AuxInt = i0
 20851  		v1.Aux = s
 20852  		v1.AddArg(p)
 20853  		v1.AddArg(mem)
 20854  		v0.AddArg(v1)
 20855  		return true
 20856  	}
 20857  	// match: (ORL s0:(SHLLconst [j0] x0:(MOVBload [i0] {s} p mem)) or:(ORL s1:(SHLLconst [j1] x1:(MOVBload [i1] {s} p mem)) y))
 20858  	// cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)
 20859  	// result: @mergePoint(b,x0,x1) (ORL <v.Type> (SHLLconst <v.Type> [j1] (ROLWconst <typ.UInt16> [8] (MOVWload [i0] {s} p mem))) y)
 20860  	for {
 20861  		_ = v.Args[1]
 20862  		s0 := v.Args[0]
 20863  		if s0.Op != OpAMD64SHLLconst {
 20864  			break
 20865  		}
 20866  		j0 := s0.AuxInt
 20867  		x0 := s0.Args[0]
 20868  		if x0.Op != OpAMD64MOVBload {
 20869  			break
 20870  		}
 20871  		i0 := x0.AuxInt
 20872  		s := x0.Aux
 20873  		_ = x0.Args[1]
 20874  		p := x0.Args[0]
 20875  		mem := x0.Args[1]
 20876  		or := v.Args[1]
 20877  		if or.Op != OpAMD64ORL {
 20878  			break
 20879  		}
 20880  		_ = or.Args[1]
 20881  		s1 := or.Args[0]
 20882  		if s1.Op != OpAMD64SHLLconst {
 20883  			break
 20884  		}
 20885  		j1 := s1.AuxInt
 20886  		x1 := s1.Args[0]
 20887  		if x1.Op != OpAMD64MOVBload {
 20888  			break
 20889  		}
 20890  		i1 := x1.AuxInt
 20891  		if x1.Aux != s {
 20892  			break
 20893  		}
 20894  		_ = x1.Args[1]
 20895  		if p != x1.Args[0] {
 20896  			break
 20897  		}
 20898  		if mem != x1.Args[1] {
 20899  			break
 20900  		}
 20901  		y := or.Args[1]
 20902  		if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) {
 20903  			break
 20904  		}
 20905  		b = mergePoint(b, x0, x1)
 20906  		v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type)
 20907  		v.reset(OpCopy)
 20908  		v.AddArg(v0)
 20909  		v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type)
 20910  		v1.AuxInt = j1
 20911  		v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, typ.UInt16)
 20912  		v2.AuxInt = 8
 20913  		v3 := b.NewValue0(v.Pos, OpAMD64MOVWload, typ.UInt16)
 20914  		v3.AuxInt = i0
 20915  		v3.Aux = s
 20916  		v3.AddArg(p)
 20917  		v3.AddArg(mem)
 20918  		v2.AddArg(v3)
 20919  		v1.AddArg(v2)
 20920  		v0.AddArg(v1)
 20921  		v0.AddArg(y)
 20922  		return true
 20923  	}
 20924  	// match: (ORL s0:(SHLLconst [j0] x0:(MOVBload [i0] {s} p mem)) or:(ORL y s1:(SHLLconst [j1] x1:(MOVBload [i1] {s} p mem))))
 20925  	// cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)
 20926  	// result: @mergePoint(b,x0,x1) (ORL <v.Type> (SHLLconst <v.Type> [j1] (ROLWconst <typ.UInt16> [8] (MOVWload [i0] {s} p mem))) y)
 20927  	for {
 20928  		_ = v.Args[1]
 20929  		s0 := v.Args[0]
 20930  		if s0.Op != OpAMD64SHLLconst {
 20931  			break
 20932  		}
 20933  		j0 := s0.AuxInt
 20934  		x0 := s0.Args[0]
 20935  		if x0.Op != OpAMD64MOVBload {
 20936  			break
 20937  		}
 20938  		i0 := x0.AuxInt
 20939  		s := x0.Aux
 20940  		_ = x0.Args[1]
 20941  		p := x0.Args[0]
 20942  		mem := x0.Args[1]
 20943  		or := v.Args[1]
 20944  		if or.Op != OpAMD64ORL {
 20945  			break
 20946  		}
 20947  		_ = or.Args[1]
 20948  		y := or.Args[0]
 20949  		s1 := or.Args[1]
 20950  		if s1.Op != OpAMD64SHLLconst {
 20951  			break
 20952  		}
 20953  		j1 := s1.AuxInt
 20954  		x1 := s1.Args[0]
 20955  		if x1.Op != OpAMD64MOVBload {
 20956  			break
 20957  		}
 20958  		i1 := x1.AuxInt
 20959  		if x1.Aux != s {
 20960  			break
 20961  		}
 20962  		_ = x1.Args[1]
 20963  		if p != x1.Args[0] {
 20964  			break
 20965  		}
 20966  		if mem != x1.Args[1] {
 20967  			break
 20968  		}
 20969  		if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) {
 20970  			break
 20971  		}
 20972  		b = mergePoint(b, x0, x1)
 20973  		v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type)
 20974  		v.reset(OpCopy)
 20975  		v.AddArg(v0)
 20976  		v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type)
 20977  		v1.AuxInt = j1
 20978  		v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, typ.UInt16)
 20979  		v2.AuxInt = 8
 20980  		v3 := b.NewValue0(v.Pos, OpAMD64MOVWload, typ.UInt16)
 20981  		v3.AuxInt = i0
 20982  		v3.Aux = s
 20983  		v3.AddArg(p)
 20984  		v3.AddArg(mem)
 20985  		v2.AddArg(v3)
 20986  		v1.AddArg(v2)
 20987  		v0.AddArg(v1)
 20988  		v0.AddArg(y)
 20989  		return true
 20990  	}
 20991  	// match: (ORL or:(ORL s1:(SHLLconst [j1] x1:(MOVBload [i1] {s} p mem)) y) s0:(SHLLconst [j0] x0:(MOVBload [i0] {s} p mem)))
 20992  	// cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)
 20993  	// result: @mergePoint(b,x0,x1) (ORL <v.Type> (SHLLconst <v.Type> [j1] (ROLWconst <typ.UInt16> [8] (MOVWload [i0] {s} p mem))) y)
 20994  	for {
 20995  		_ = v.Args[1]
 20996  		or := v.Args[0]
 20997  		if or.Op != OpAMD64ORL {
 20998  			break
 20999  		}
 21000  		_ = or.Args[1]
 21001  		s1 := or.Args[0]
 21002  		if s1.Op != OpAMD64SHLLconst {
 21003  			break
 21004  		}
 21005  		j1 := s1.AuxInt
 21006  		x1 := s1.Args[0]
 21007  		if x1.Op != OpAMD64MOVBload {
 21008  			break
 21009  		}
 21010  		i1 := x1.AuxInt
 21011  		s := x1.Aux
 21012  		_ = x1.Args[1]
 21013  		p := x1.Args[0]
 21014  		mem := x1.Args[1]
 21015  		y := or.Args[1]
 21016  		s0 := v.Args[1]
 21017  		if s0.Op != OpAMD64SHLLconst {
 21018  			break
 21019  		}
 21020  		j0 := s0.AuxInt
 21021  		x0 := s0.Args[0]
 21022  		if x0.Op != OpAMD64MOVBload {
 21023  			break
 21024  		}
 21025  		i0 := x0.AuxInt
 21026  		if x0.Aux != s {
 21027  			break
 21028  		}
 21029  		_ = x0.Args[1]
 21030  		if p != x0.Args[0] {
 21031  			break
 21032  		}
 21033  		if mem != x0.Args[1] {
 21034  			break
 21035  		}
 21036  		if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) {
 21037  			break
 21038  		}
 21039  		b = mergePoint(b, x0, x1)
 21040  		v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type)
 21041  		v.reset(OpCopy)
 21042  		v.AddArg(v0)
 21043  		v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type)
 21044  		v1.AuxInt = j1
 21045  		v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, typ.UInt16)
 21046  		v2.AuxInt = 8
 21047  		v3 := b.NewValue0(v.Pos, OpAMD64MOVWload, typ.UInt16)
 21048  		v3.AuxInt = i0
 21049  		v3.Aux = s
 21050  		v3.AddArg(p)
 21051  		v3.AddArg(mem)
 21052  		v2.AddArg(v3)
 21053  		v1.AddArg(v2)
 21054  		v0.AddArg(v1)
 21055  		v0.AddArg(y)
 21056  		return true
 21057  	}
 21058  	// match: (ORL or:(ORL y s1:(SHLLconst [j1] x1:(MOVBload [i1] {s} p mem))) s0:(SHLLconst [j0] x0:(MOVBload [i0] {s} p mem)))
 21059  	// cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)
 21060  	// result: @mergePoint(b,x0,x1) (ORL <v.Type> (SHLLconst <v.Type> [j1] (ROLWconst <typ.UInt16> [8] (MOVWload [i0] {s} p mem))) y)
 21061  	for {
 21062  		_ = v.Args[1]
 21063  		or := v.Args[0]
 21064  		if or.Op != OpAMD64ORL {
 21065  			break
 21066  		}
 21067  		_ = or.Args[1]
 21068  		y := or.Args[0]
 21069  		s1 := or.Args[1]
 21070  		if s1.Op != OpAMD64SHLLconst {
 21071  			break
 21072  		}
 21073  		j1 := s1.AuxInt
 21074  		x1 := s1.Args[0]
 21075  		if x1.Op != OpAMD64MOVBload {
 21076  			break
 21077  		}
 21078  		i1 := x1.AuxInt
 21079  		s := x1.Aux
 21080  		_ = x1.Args[1]
 21081  		p := x1.Args[0]
 21082  		mem := x1.Args[1]
 21083  		s0 := v.Args[1]
 21084  		if s0.Op != OpAMD64SHLLconst {
 21085  			break
 21086  		}
 21087  		j0 := s0.AuxInt
 21088  		x0 := s0.Args[0]
 21089  		if x0.Op != OpAMD64MOVBload {
 21090  			break
 21091  		}
 21092  		i0 := x0.AuxInt
 21093  		if x0.Aux != s {
 21094  			break
 21095  		}
 21096  		_ = x0.Args[1]
 21097  		if p != x0.Args[0] {
 21098  			break
 21099  		}
 21100  		if mem != x0.Args[1] {
 21101  			break
 21102  		}
 21103  		if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) {
 21104  			break
 21105  		}
 21106  		b = mergePoint(b, x0, x1)
 21107  		v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type)
 21108  		v.reset(OpCopy)
 21109  		v.AddArg(v0)
 21110  		v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type)
 21111  		v1.AuxInt = j1
 21112  		v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, typ.UInt16)
 21113  		v2.AuxInt = 8
 21114  		v3 := b.NewValue0(v.Pos, OpAMD64MOVWload, typ.UInt16)
 21115  		v3.AuxInt = i0
 21116  		v3.Aux = s
 21117  		v3.AddArg(p)
 21118  		v3.AddArg(mem)
 21119  		v2.AddArg(v3)
 21120  		v1.AddArg(v2)
 21121  		v0.AddArg(v1)
 21122  		v0.AddArg(y)
 21123  		return true
 21124  	}
 21125  	// match: (ORL x1:(MOVBloadidx1 [i1] {s} p idx mem) sh:(SHLLconst [8] x0:(MOVBloadidx1 [i0] {s} p idx mem)))
 21126  	// cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)
 21127  	// result: @mergePoint(b,x0,x1) (ROLWconst <v.Type> [8] (MOVWloadidx1 [i0] {s} p idx mem))
 21128  	for {
 21129  		_ = v.Args[1]
 21130  		x1 := v.Args[0]
 21131  		if x1.Op != OpAMD64MOVBloadidx1 {
 21132  			break
 21133  		}
 21134  		i1 := x1.AuxInt
 21135  		s := x1.Aux
 21136  		_ = x1.Args[2]
 21137  		p := x1.Args[0]
 21138  		idx := x1.Args[1]
 21139  		mem := x1.Args[2]
 21140  		sh := v.Args[1]
 21141  		if sh.Op != OpAMD64SHLLconst {
 21142  			break
 21143  		}
 21144  		if sh.AuxInt != 8 {
 21145  			break
 21146  		}
 21147  		x0 := sh.Args[0]
 21148  		if x0.Op != OpAMD64MOVBloadidx1 {
 21149  			break
 21150  		}
 21151  		i0 := x0.AuxInt
 21152  		if x0.Aux != s {
 21153  			break
 21154  		}
 21155  		_ = x0.Args[2]
 21156  		if p != x0.Args[0] {
 21157  			break
 21158  		}
 21159  		if idx != x0.Args[1] {
 21160  			break
 21161  		}
 21162  		if mem != x0.Args[2] {
 21163  			break
 21164  		}
 21165  		if !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) {
 21166  			break
 21167  		}
 21168  		b = mergePoint(b, x0, x1)
 21169  		v0 := b.NewValue0(v.Pos, OpAMD64ROLWconst, v.Type)
 21170  		v.reset(OpCopy)
 21171  		v.AddArg(v0)
 21172  		v0.AuxInt = 8
 21173  		v1 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16)
 21174  		v1.AuxInt = i0
 21175  		v1.Aux = s
 21176  		v1.AddArg(p)
 21177  		v1.AddArg(idx)
 21178  		v1.AddArg(mem)
 21179  		v0.AddArg(v1)
 21180  		return true
 21181  	}
 21182  	// match: (ORL x1:(MOVBloadidx1 [i1] {s} idx p mem) sh:(SHLLconst [8] x0:(MOVBloadidx1 [i0] {s} p idx mem)))
 21183  	// cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)
 21184  	// result: @mergePoint(b,x0,x1) (ROLWconst <v.Type> [8] (MOVWloadidx1 [i0] {s} p idx mem))
 21185  	for {
 21186  		_ = v.Args[1]
 21187  		x1 := v.Args[0]
 21188  		if x1.Op != OpAMD64MOVBloadidx1 {
 21189  			break
 21190  		}
 21191  		i1 := x1.AuxInt
 21192  		s := x1.Aux
 21193  		_ = x1.Args[2]
 21194  		idx := x1.Args[0]
 21195  		p := x1.Args[1]
 21196  		mem := x1.Args[2]
 21197  		sh := v.Args[1]
 21198  		if sh.Op != OpAMD64SHLLconst {
 21199  			break
 21200  		}
 21201  		if sh.AuxInt != 8 {
 21202  			break
 21203  		}
 21204  		x0 := sh.Args[0]
 21205  		if x0.Op != OpAMD64MOVBloadidx1 {
 21206  			break
 21207  		}
 21208  		i0 := x0.AuxInt
 21209  		if x0.Aux != s {
 21210  			break
 21211  		}
 21212  		_ = x0.Args[2]
 21213  		if p != x0.Args[0] {
 21214  			break
 21215  		}
 21216  		if idx != x0.Args[1] {
 21217  			break
 21218  		}
 21219  		if mem != x0.Args[2] {
 21220  			break
 21221  		}
 21222  		if !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) {
 21223  			break
 21224  		}
 21225  		b = mergePoint(b, x0, x1)
 21226  		v0 := b.NewValue0(v.Pos, OpAMD64ROLWconst, v.Type)
 21227  		v.reset(OpCopy)
 21228  		v.AddArg(v0)
 21229  		v0.AuxInt = 8
 21230  		v1 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16)
 21231  		v1.AuxInt = i0
 21232  		v1.Aux = s
 21233  		v1.AddArg(p)
 21234  		v1.AddArg(idx)
 21235  		v1.AddArg(mem)
 21236  		v0.AddArg(v1)
 21237  		return true
 21238  	}
 21239  	// match: (ORL x1:(MOVBloadidx1 [i1] {s} p idx mem) sh:(SHLLconst [8] x0:(MOVBloadidx1 [i0] {s} idx p mem)))
 21240  	// cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)
 21241  	// result: @mergePoint(b,x0,x1) (ROLWconst <v.Type> [8] (MOVWloadidx1 [i0] {s} p idx mem))
 21242  	for {
 21243  		_ = v.Args[1]
 21244  		x1 := v.Args[0]
 21245  		if x1.Op != OpAMD64MOVBloadidx1 {
 21246  			break
 21247  		}
 21248  		i1 := x1.AuxInt
 21249  		s := x1.Aux
 21250  		_ = x1.Args[2]
 21251  		p := x1.Args[0]
 21252  		idx := x1.Args[1]
 21253  		mem := x1.Args[2]
 21254  		sh := v.Args[1]
 21255  		if sh.Op != OpAMD64SHLLconst {
 21256  			break
 21257  		}
 21258  		if sh.AuxInt != 8 {
 21259  			break
 21260  		}
 21261  		x0 := sh.Args[0]
 21262  		if x0.Op != OpAMD64MOVBloadidx1 {
 21263  			break
 21264  		}
 21265  		i0 := x0.AuxInt
 21266  		if x0.Aux != s {
 21267  			break
 21268  		}
 21269  		_ = x0.Args[2]
 21270  		if idx != x0.Args[0] {
 21271  			break
 21272  		}
 21273  		if p != x0.Args[1] {
 21274  			break
 21275  		}
 21276  		if mem != x0.Args[2] {
 21277  			break
 21278  		}
 21279  		if !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) {
 21280  			break
 21281  		}
 21282  		b = mergePoint(b, x0, x1)
 21283  		v0 := b.NewValue0(v.Pos, OpAMD64ROLWconst, v.Type)
 21284  		v.reset(OpCopy)
 21285  		v.AddArg(v0)
 21286  		v0.AuxInt = 8
 21287  		v1 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16)
 21288  		v1.AuxInt = i0
 21289  		v1.Aux = s
 21290  		v1.AddArg(p)
 21291  		v1.AddArg(idx)
 21292  		v1.AddArg(mem)
 21293  		v0.AddArg(v1)
 21294  		return true
 21295  	}
 21296  	return false
 21297  }
 21298  func rewriteValueAMD64_OpAMD64ORL_100(v *Value) bool {
 21299  	b := v.Block
 21300  	_ = b
 21301  	typ := &b.Func.Config.Types
 21302  	_ = typ
 21303  	// match: (ORL x1:(MOVBloadidx1 [i1] {s} idx p mem) sh:(SHLLconst [8] x0:(MOVBloadidx1 [i0] {s} idx p mem)))
 21304  	// cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)
 21305  	// result: @mergePoint(b,x0,x1) (ROLWconst <v.Type> [8] (MOVWloadidx1 [i0] {s} p idx mem))
 21306  	for {
 21307  		_ = v.Args[1]
 21308  		x1 := v.Args[0]
 21309  		if x1.Op != OpAMD64MOVBloadidx1 {
 21310  			break
 21311  		}
 21312  		i1 := x1.AuxInt
 21313  		s := x1.Aux
 21314  		_ = x1.Args[2]
 21315  		idx := x1.Args[0]
 21316  		p := x1.Args[1]
 21317  		mem := x1.Args[2]
 21318  		sh := v.Args[1]
 21319  		if sh.Op != OpAMD64SHLLconst {
 21320  			break
 21321  		}
 21322  		if sh.AuxInt != 8 {
 21323  			break
 21324  		}
 21325  		x0 := sh.Args[0]
 21326  		if x0.Op != OpAMD64MOVBloadidx1 {
 21327  			break
 21328  		}
 21329  		i0 := x0.AuxInt
 21330  		if x0.Aux != s {
 21331  			break
 21332  		}
 21333  		_ = x0.Args[2]
 21334  		if idx != x0.Args[0] {
 21335  			break
 21336  		}
 21337  		if p != x0.Args[1] {
 21338  			break
 21339  		}
 21340  		if mem != x0.Args[2] {
 21341  			break
 21342  		}
 21343  		if !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) {
 21344  			break
 21345  		}
 21346  		b = mergePoint(b, x0, x1)
 21347  		v0 := b.NewValue0(v.Pos, OpAMD64ROLWconst, v.Type)
 21348  		v.reset(OpCopy)
 21349  		v.AddArg(v0)
 21350  		v0.AuxInt = 8
 21351  		v1 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16)
 21352  		v1.AuxInt = i0
 21353  		v1.Aux = s
 21354  		v1.AddArg(p)
 21355  		v1.AddArg(idx)
 21356  		v1.AddArg(mem)
 21357  		v0.AddArg(v1)
 21358  		return true
 21359  	}
 21360  	// match: (ORL sh:(SHLLconst [8] x0:(MOVBloadidx1 [i0] {s} p idx mem)) x1:(MOVBloadidx1 [i1] {s} p idx mem))
 21361  	// cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)
 21362  	// result: @mergePoint(b,x0,x1) (ROLWconst <v.Type> [8] (MOVWloadidx1 [i0] {s} p idx mem))
 21363  	for {
 21364  		_ = v.Args[1]
 21365  		sh := v.Args[0]
 21366  		if sh.Op != OpAMD64SHLLconst {
 21367  			break
 21368  		}
 21369  		if sh.AuxInt != 8 {
 21370  			break
 21371  		}
 21372  		x0 := sh.Args[0]
 21373  		if x0.Op != OpAMD64MOVBloadidx1 {
 21374  			break
 21375  		}
 21376  		i0 := x0.AuxInt
 21377  		s := x0.Aux
 21378  		_ = x0.Args[2]
 21379  		p := x0.Args[0]
 21380  		idx := x0.Args[1]
 21381  		mem := x0.Args[2]
 21382  		x1 := v.Args[1]
 21383  		if x1.Op != OpAMD64MOVBloadidx1 {
 21384  			break
 21385  		}
 21386  		i1 := x1.AuxInt
 21387  		if x1.Aux != s {
 21388  			break
 21389  		}
 21390  		_ = x1.Args[2]
 21391  		if p != x1.Args[0] {
 21392  			break
 21393  		}
 21394  		if idx != x1.Args[1] {
 21395  			break
 21396  		}
 21397  		if mem != x1.Args[2] {
 21398  			break
 21399  		}
 21400  		if !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) {
 21401  			break
 21402  		}
 21403  		b = mergePoint(b, x0, x1)
 21404  		v0 := b.NewValue0(v.Pos, OpAMD64ROLWconst, v.Type)
 21405  		v.reset(OpCopy)
 21406  		v.AddArg(v0)
 21407  		v0.AuxInt = 8
 21408  		v1 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16)
 21409  		v1.AuxInt = i0
 21410  		v1.Aux = s
 21411  		v1.AddArg(p)
 21412  		v1.AddArg(idx)
 21413  		v1.AddArg(mem)
 21414  		v0.AddArg(v1)
 21415  		return true
 21416  	}
 21417  	// match: (ORL sh:(SHLLconst [8] x0:(MOVBloadidx1 [i0] {s} idx p mem)) x1:(MOVBloadidx1 [i1] {s} p idx mem))
 21418  	// cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)
 21419  	// result: @mergePoint(b,x0,x1) (ROLWconst <v.Type> [8] (MOVWloadidx1 [i0] {s} p idx mem))
 21420  	for {
 21421  		_ = v.Args[1]
 21422  		sh := v.Args[0]
 21423  		if sh.Op != OpAMD64SHLLconst {
 21424  			break
 21425  		}
 21426  		if sh.AuxInt != 8 {
 21427  			break
 21428  		}
 21429  		x0 := sh.Args[0]
 21430  		if x0.Op != OpAMD64MOVBloadidx1 {
 21431  			break
 21432  		}
 21433  		i0 := x0.AuxInt
 21434  		s := x0.Aux
 21435  		_ = x0.Args[2]
 21436  		idx := x0.Args[0]
 21437  		p := x0.Args[1]
 21438  		mem := x0.Args[2]
 21439  		x1 := v.Args[1]
 21440  		if x1.Op != OpAMD64MOVBloadidx1 {
 21441  			break
 21442  		}
 21443  		i1 := x1.AuxInt
 21444  		if x1.Aux != s {
 21445  			break
 21446  		}
 21447  		_ = x1.Args[2]
 21448  		if p != x1.Args[0] {
 21449  			break
 21450  		}
 21451  		if idx != x1.Args[1] {
 21452  			break
 21453  		}
 21454  		if mem != x1.Args[2] {
 21455  			break
 21456  		}
 21457  		if !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) {
 21458  			break
 21459  		}
 21460  		b = mergePoint(b, x0, x1)
 21461  		v0 := b.NewValue0(v.Pos, OpAMD64ROLWconst, v.Type)
 21462  		v.reset(OpCopy)
 21463  		v.AddArg(v0)
 21464  		v0.AuxInt = 8
 21465  		v1 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16)
 21466  		v1.AuxInt = i0
 21467  		v1.Aux = s
 21468  		v1.AddArg(p)
 21469  		v1.AddArg(idx)
 21470  		v1.AddArg(mem)
 21471  		v0.AddArg(v1)
 21472  		return true
 21473  	}
 21474  	// match: (ORL sh:(SHLLconst [8] x0:(MOVBloadidx1 [i0] {s} p idx mem)) x1:(MOVBloadidx1 [i1] {s} idx p mem))
 21475  	// cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)
 21476  	// result: @mergePoint(b,x0,x1) (ROLWconst <v.Type> [8] (MOVWloadidx1 [i0] {s} p idx mem))
 21477  	for {
 21478  		_ = v.Args[1]
 21479  		sh := v.Args[0]
 21480  		if sh.Op != OpAMD64SHLLconst {
 21481  			break
 21482  		}
 21483  		if sh.AuxInt != 8 {
 21484  			break
 21485  		}
 21486  		x0 := sh.Args[0]
 21487  		if x0.Op != OpAMD64MOVBloadidx1 {
 21488  			break
 21489  		}
 21490  		i0 := x0.AuxInt
 21491  		s := x0.Aux
 21492  		_ = x0.Args[2]
 21493  		p := x0.Args[0]
 21494  		idx := x0.Args[1]
 21495  		mem := x0.Args[2]
 21496  		x1 := v.Args[1]
 21497  		if x1.Op != OpAMD64MOVBloadidx1 {
 21498  			break
 21499  		}
 21500  		i1 := x1.AuxInt
 21501  		if x1.Aux != s {
 21502  			break
 21503  		}
 21504  		_ = x1.Args[2]
 21505  		if idx != x1.Args[0] {
 21506  			break
 21507  		}
 21508  		if p != x1.Args[1] {
 21509  			break
 21510  		}
 21511  		if mem != x1.Args[2] {
 21512  			break
 21513  		}
 21514  		if !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) {
 21515  			break
 21516  		}
 21517  		b = mergePoint(b, x0, x1)
 21518  		v0 := b.NewValue0(v.Pos, OpAMD64ROLWconst, v.Type)
 21519  		v.reset(OpCopy)
 21520  		v.AddArg(v0)
 21521  		v0.AuxInt = 8
 21522  		v1 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16)
 21523  		v1.AuxInt = i0
 21524  		v1.Aux = s
 21525  		v1.AddArg(p)
 21526  		v1.AddArg(idx)
 21527  		v1.AddArg(mem)
 21528  		v0.AddArg(v1)
 21529  		return true
 21530  	}
 21531  	// match: (ORL sh:(SHLLconst [8] x0:(MOVBloadidx1 [i0] {s} idx p mem)) x1:(MOVBloadidx1 [i1] {s} idx p mem))
 21532  	// cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)
 21533  	// result: @mergePoint(b,x0,x1) (ROLWconst <v.Type> [8] (MOVWloadidx1 [i0] {s} p idx mem))
 21534  	for {
 21535  		_ = v.Args[1]
 21536  		sh := v.Args[0]
 21537  		if sh.Op != OpAMD64SHLLconst {
 21538  			break
 21539  		}
 21540  		if sh.AuxInt != 8 {
 21541  			break
 21542  		}
 21543  		x0 := sh.Args[0]
 21544  		if x0.Op != OpAMD64MOVBloadidx1 {
 21545  			break
 21546  		}
 21547  		i0 := x0.AuxInt
 21548  		s := x0.Aux
 21549  		_ = x0.Args[2]
 21550  		idx := x0.Args[0]
 21551  		p := x0.Args[1]
 21552  		mem := x0.Args[2]
 21553  		x1 := v.Args[1]
 21554  		if x1.Op != OpAMD64MOVBloadidx1 {
 21555  			break
 21556  		}
 21557  		i1 := x1.AuxInt
 21558  		if x1.Aux != s {
 21559  			break
 21560  		}
 21561  		_ = x1.Args[2]
 21562  		if idx != x1.Args[0] {
 21563  			break
 21564  		}
 21565  		if p != x1.Args[1] {
 21566  			break
 21567  		}
 21568  		if mem != x1.Args[2] {
 21569  			break
 21570  		}
 21571  		if !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) {
 21572  			break
 21573  		}
 21574  		b = mergePoint(b, x0, x1)
 21575  		v0 := b.NewValue0(v.Pos, OpAMD64ROLWconst, v.Type)
 21576  		v.reset(OpCopy)
 21577  		v.AddArg(v0)
 21578  		v0.AuxInt = 8
 21579  		v1 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16)
 21580  		v1.AuxInt = i0
 21581  		v1.Aux = s
 21582  		v1.AddArg(p)
 21583  		v1.AddArg(idx)
 21584  		v1.AddArg(mem)
 21585  		v0.AddArg(v1)
 21586  		return true
 21587  	}
 21588  	// match: (ORL r1:(ROLWconst [8] x1:(MOVWloadidx1 [i1] {s} p idx mem)) sh:(SHLLconst [16] r0:(ROLWconst [8] x0:(MOVWloadidx1 [i0] {s} p idx mem))))
 21589  	// cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh)
 21590  	// result: @mergePoint(b,x0,x1) (BSWAPL <v.Type> (MOVLloadidx1 [i0] {s} p idx mem))
 21591  	for {
 21592  		_ = v.Args[1]
 21593  		r1 := v.Args[0]
 21594  		if r1.Op != OpAMD64ROLWconst {
 21595  			break
 21596  		}
 21597  		if r1.AuxInt != 8 {
 21598  			break
 21599  		}
 21600  		x1 := r1.Args[0]
 21601  		if x1.Op != OpAMD64MOVWloadidx1 {
 21602  			break
 21603  		}
 21604  		i1 := x1.AuxInt
 21605  		s := x1.Aux
 21606  		_ = x1.Args[2]
 21607  		p := x1.Args[0]
 21608  		idx := x1.Args[1]
 21609  		mem := x1.Args[2]
 21610  		sh := v.Args[1]
 21611  		if sh.Op != OpAMD64SHLLconst {
 21612  			break
 21613  		}
 21614  		if sh.AuxInt != 16 {
 21615  			break
 21616  		}
 21617  		r0 := sh.Args[0]
 21618  		if r0.Op != OpAMD64ROLWconst {
 21619  			break
 21620  		}
 21621  		if r0.AuxInt != 8 {
 21622  			break
 21623  		}
 21624  		x0 := r0.Args[0]
 21625  		if x0.Op != OpAMD64MOVWloadidx1 {
 21626  			break
 21627  		}
 21628  		i0 := x0.AuxInt
 21629  		if x0.Aux != s {
 21630  			break
 21631  		}
 21632  		_ = x0.Args[2]
 21633  		if p != x0.Args[0] {
 21634  			break
 21635  		}
 21636  		if idx != x0.Args[1] {
 21637  			break
 21638  		}
 21639  		if mem != x0.Args[2] {
 21640  			break
 21641  		}
 21642  		if !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh)) {
 21643  			break
 21644  		}
 21645  		b = mergePoint(b, x0, x1)
 21646  		v0 := b.NewValue0(v.Pos, OpAMD64BSWAPL, v.Type)
 21647  		v.reset(OpCopy)
 21648  		v.AddArg(v0)
 21649  		v1 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32)
 21650  		v1.AuxInt = i0
 21651  		v1.Aux = s
 21652  		v1.AddArg(p)
 21653  		v1.AddArg(idx)
 21654  		v1.AddArg(mem)
 21655  		v0.AddArg(v1)
 21656  		return true
 21657  	}
 21658  	// match: (ORL r1:(ROLWconst [8] x1:(MOVWloadidx1 [i1] {s} idx p mem)) sh:(SHLLconst [16] r0:(ROLWconst [8] x0:(MOVWloadidx1 [i0] {s} p idx mem))))
 21659  	// cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh)
 21660  	// result: @mergePoint(b,x0,x1) (BSWAPL <v.Type> (MOVLloadidx1 [i0] {s} p idx mem))
 21661  	for {
 21662  		_ = v.Args[1]
 21663  		r1 := v.Args[0]
 21664  		if r1.Op != OpAMD64ROLWconst {
 21665  			break
 21666  		}
 21667  		if r1.AuxInt != 8 {
 21668  			break
 21669  		}
 21670  		x1 := r1.Args[0]
 21671  		if x1.Op != OpAMD64MOVWloadidx1 {
 21672  			break
 21673  		}
 21674  		i1 := x1.AuxInt
 21675  		s := x1.Aux
 21676  		_ = x1.Args[2]
 21677  		idx := x1.Args[0]
 21678  		p := x1.Args[1]
 21679  		mem := x1.Args[2]
 21680  		sh := v.Args[1]
 21681  		if sh.Op != OpAMD64SHLLconst {
 21682  			break
 21683  		}
 21684  		if sh.AuxInt != 16 {
 21685  			break
 21686  		}
 21687  		r0 := sh.Args[0]
 21688  		if r0.Op != OpAMD64ROLWconst {
 21689  			break
 21690  		}
 21691  		if r0.AuxInt != 8 {
 21692  			break
 21693  		}
 21694  		x0 := r0.Args[0]
 21695  		if x0.Op != OpAMD64MOVWloadidx1 {
 21696  			break
 21697  		}
 21698  		i0 := x0.AuxInt
 21699  		if x0.Aux != s {
 21700  			break
 21701  		}
 21702  		_ = x0.Args[2]
 21703  		if p != x0.Args[0] {
 21704  			break
 21705  		}
 21706  		if idx != x0.Args[1] {
 21707  			break
 21708  		}
 21709  		if mem != x0.Args[2] {
 21710  			break
 21711  		}
 21712  		if !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh)) {
 21713  			break
 21714  		}
 21715  		b = mergePoint(b, x0, x1)
 21716  		v0 := b.NewValue0(v.Pos, OpAMD64BSWAPL, v.Type)
 21717  		v.reset(OpCopy)
 21718  		v.AddArg(v0)
 21719  		v1 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32)
 21720  		v1.AuxInt = i0
 21721  		v1.Aux = s
 21722  		v1.AddArg(p)
 21723  		v1.AddArg(idx)
 21724  		v1.AddArg(mem)
 21725  		v0.AddArg(v1)
 21726  		return true
 21727  	}
 21728  	// match: (ORL r1:(ROLWconst [8] x1:(MOVWloadidx1 [i1] {s} p idx mem)) sh:(SHLLconst [16] r0:(ROLWconst [8] x0:(MOVWloadidx1 [i0] {s} idx p mem))))
 21729  	// cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh)
 21730  	// result: @mergePoint(b,x0,x1) (BSWAPL <v.Type> (MOVLloadidx1 [i0] {s} p idx mem))
 21731  	for {
 21732  		_ = v.Args[1]
 21733  		r1 := v.Args[0]
 21734  		if r1.Op != OpAMD64ROLWconst {
 21735  			break
 21736  		}
 21737  		if r1.AuxInt != 8 {
 21738  			break
 21739  		}
 21740  		x1 := r1.Args[0]
 21741  		if x1.Op != OpAMD64MOVWloadidx1 {
 21742  			break
 21743  		}
 21744  		i1 := x1.AuxInt
 21745  		s := x1.Aux
 21746  		_ = x1.Args[2]
 21747  		p := x1.Args[0]
 21748  		idx := x1.Args[1]
 21749  		mem := x1.Args[2]
 21750  		sh := v.Args[1]
 21751  		if sh.Op != OpAMD64SHLLconst {
 21752  			break
 21753  		}
 21754  		if sh.AuxInt != 16 {
 21755  			break
 21756  		}
 21757  		r0 := sh.Args[0]
 21758  		if r0.Op != OpAMD64ROLWconst {
 21759  			break
 21760  		}
 21761  		if r0.AuxInt != 8 {
 21762  			break
 21763  		}
 21764  		x0 := r0.Args[0]
 21765  		if x0.Op != OpAMD64MOVWloadidx1 {
 21766  			break
 21767  		}
 21768  		i0 := x0.AuxInt
 21769  		if x0.Aux != s {
 21770  			break
 21771  		}
 21772  		_ = x0.Args[2]
 21773  		if idx != x0.Args[0] {
 21774  			break
 21775  		}
 21776  		if p != x0.Args[1] {
 21777  			break
 21778  		}
 21779  		if mem != x0.Args[2] {
 21780  			break
 21781  		}
 21782  		if !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh)) {
 21783  			break
 21784  		}
 21785  		b = mergePoint(b, x0, x1)
 21786  		v0 := b.NewValue0(v.Pos, OpAMD64BSWAPL, v.Type)
 21787  		v.reset(OpCopy)
 21788  		v.AddArg(v0)
 21789  		v1 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32)
 21790  		v1.AuxInt = i0
 21791  		v1.Aux = s
 21792  		v1.AddArg(p)
 21793  		v1.AddArg(idx)
 21794  		v1.AddArg(mem)
 21795  		v0.AddArg(v1)
 21796  		return true
 21797  	}
 21798  	// match: (ORL r1:(ROLWconst [8] x1:(MOVWloadidx1 [i1] {s} idx p mem)) sh:(SHLLconst [16] r0:(ROLWconst [8] x0:(MOVWloadidx1 [i0] {s} idx p mem))))
 21799  	// cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh)
 21800  	// result: @mergePoint(b,x0,x1) (BSWAPL <v.Type> (MOVLloadidx1 [i0] {s} p idx mem))
 21801  	for {
 21802  		_ = v.Args[1]
 21803  		r1 := v.Args[0]
 21804  		if r1.Op != OpAMD64ROLWconst {
 21805  			break
 21806  		}
 21807  		if r1.AuxInt != 8 {
 21808  			break
 21809  		}
 21810  		x1 := r1.Args[0]
 21811  		if x1.Op != OpAMD64MOVWloadidx1 {
 21812  			break
 21813  		}
 21814  		i1 := x1.AuxInt
 21815  		s := x1.Aux
 21816  		_ = x1.Args[2]
 21817  		idx := x1.Args[0]
 21818  		p := x1.Args[1]
 21819  		mem := x1.Args[2]
 21820  		sh := v.Args[1]
 21821  		if sh.Op != OpAMD64SHLLconst {
 21822  			break
 21823  		}
 21824  		if sh.AuxInt != 16 {
 21825  			break
 21826  		}
 21827  		r0 := sh.Args[0]
 21828  		if r0.Op != OpAMD64ROLWconst {
 21829  			break
 21830  		}
 21831  		if r0.AuxInt != 8 {
 21832  			break
 21833  		}
 21834  		x0 := r0.Args[0]
 21835  		if x0.Op != OpAMD64MOVWloadidx1 {
 21836  			break
 21837  		}
 21838  		i0 := x0.AuxInt
 21839  		if x0.Aux != s {
 21840  			break
 21841  		}
 21842  		_ = x0.Args[2]
 21843  		if idx != x0.Args[0] {
 21844  			break
 21845  		}
 21846  		if p != x0.Args[1] {
 21847  			break
 21848  		}
 21849  		if mem != x0.Args[2] {
 21850  			break
 21851  		}
 21852  		if !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh)) {
 21853  			break
 21854  		}
 21855  		b = mergePoint(b, x0, x1)
 21856  		v0 := b.NewValue0(v.Pos, OpAMD64BSWAPL, v.Type)
 21857  		v.reset(OpCopy)
 21858  		v.AddArg(v0)
 21859  		v1 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32)
 21860  		v1.AuxInt = i0
 21861  		v1.Aux = s
 21862  		v1.AddArg(p)
 21863  		v1.AddArg(idx)
 21864  		v1.AddArg(mem)
 21865  		v0.AddArg(v1)
 21866  		return true
 21867  	}
 21868  	// match: (ORL sh:(SHLLconst [16] r0:(ROLWconst [8] x0:(MOVWloadidx1 [i0] {s} p idx mem))) r1:(ROLWconst [8] x1:(MOVWloadidx1 [i1] {s} p idx mem)))
 21869  	// cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh)
 21870  	// result: @mergePoint(b,x0,x1) (BSWAPL <v.Type> (MOVLloadidx1 [i0] {s} p idx mem))
 21871  	for {
 21872  		_ = v.Args[1]
 21873  		sh := v.Args[0]
 21874  		if sh.Op != OpAMD64SHLLconst {
 21875  			break
 21876  		}
 21877  		if sh.AuxInt != 16 {
 21878  			break
 21879  		}
 21880  		r0 := sh.Args[0]
 21881  		if r0.Op != OpAMD64ROLWconst {
 21882  			break
 21883  		}
 21884  		if r0.AuxInt != 8 {
 21885  			break
 21886  		}
 21887  		x0 := r0.Args[0]
 21888  		if x0.Op != OpAMD64MOVWloadidx1 {
 21889  			break
 21890  		}
 21891  		i0 := x0.AuxInt
 21892  		s := x0.Aux
 21893  		_ = x0.Args[2]
 21894  		p := x0.Args[0]
 21895  		idx := x0.Args[1]
 21896  		mem := x0.Args[2]
 21897  		r1 := v.Args[1]
 21898  		if r1.Op != OpAMD64ROLWconst {
 21899  			break
 21900  		}
 21901  		if r1.AuxInt != 8 {
 21902  			break
 21903  		}
 21904  		x1 := r1.Args[0]
 21905  		if x1.Op != OpAMD64MOVWloadidx1 {
 21906  			break
 21907  		}
 21908  		i1 := x1.AuxInt
 21909  		if x1.Aux != s {
 21910  			break
 21911  		}
 21912  		_ = x1.Args[2]
 21913  		if p != x1.Args[0] {
 21914  			break
 21915  		}
 21916  		if idx != x1.Args[1] {
 21917  			break
 21918  		}
 21919  		if mem != x1.Args[2] {
 21920  			break
 21921  		}
 21922  		if !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh)) {
 21923  			break
 21924  		}
 21925  		b = mergePoint(b, x0, x1)
 21926  		v0 := b.NewValue0(v.Pos, OpAMD64BSWAPL, v.Type)
 21927  		v.reset(OpCopy)
 21928  		v.AddArg(v0)
 21929  		v1 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32)
 21930  		v1.AuxInt = i0
 21931  		v1.Aux = s
 21932  		v1.AddArg(p)
 21933  		v1.AddArg(idx)
 21934  		v1.AddArg(mem)
 21935  		v0.AddArg(v1)
 21936  		return true
 21937  	}
 21938  	return false
 21939  }
 21940  func rewriteValueAMD64_OpAMD64ORL_110(v *Value) bool {
 21941  	b := v.Block
 21942  	_ = b
 21943  	typ := &b.Func.Config.Types
 21944  	_ = typ
 21945  	// match: (ORL sh:(SHLLconst [16] r0:(ROLWconst [8] x0:(MOVWloadidx1 [i0] {s} idx p mem))) r1:(ROLWconst [8] x1:(MOVWloadidx1 [i1] {s} p idx mem)))
 21946  	// cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh)
 21947  	// result: @mergePoint(b,x0,x1) (BSWAPL <v.Type> (MOVLloadidx1 [i0] {s} p idx mem))
 21948  	for {
 21949  		_ = v.Args[1]
 21950  		sh := v.Args[0]
 21951  		if sh.Op != OpAMD64SHLLconst {
 21952  			break
 21953  		}
 21954  		if sh.AuxInt != 16 {
 21955  			break
 21956  		}
 21957  		r0 := sh.Args[0]
 21958  		if r0.Op != OpAMD64ROLWconst {
 21959  			break
 21960  		}
 21961  		if r0.AuxInt != 8 {
 21962  			break
 21963  		}
 21964  		x0 := r0.Args[0]
 21965  		if x0.Op != OpAMD64MOVWloadidx1 {
 21966  			break
 21967  		}
 21968  		i0 := x0.AuxInt
 21969  		s := x0.Aux
 21970  		_ = x0.Args[2]
 21971  		idx := x0.Args[0]
 21972  		p := x0.Args[1]
 21973  		mem := x0.Args[2]
 21974  		r1 := v.Args[1]
 21975  		if r1.Op != OpAMD64ROLWconst {
 21976  			break
 21977  		}
 21978  		if r1.AuxInt != 8 {
 21979  			break
 21980  		}
 21981  		x1 := r1.Args[0]
 21982  		if x1.Op != OpAMD64MOVWloadidx1 {
 21983  			break
 21984  		}
 21985  		i1 := x1.AuxInt
 21986  		if x1.Aux != s {
 21987  			break
 21988  		}
 21989  		_ = x1.Args[2]
 21990  		if p != x1.Args[0] {
 21991  			break
 21992  		}
 21993  		if idx != x1.Args[1] {
 21994  			break
 21995  		}
 21996  		if mem != x1.Args[2] {
 21997  			break
 21998  		}
 21999  		if !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh)) {
 22000  			break
 22001  		}
 22002  		b = mergePoint(b, x0, x1)
 22003  		v0 := b.NewValue0(v.Pos, OpAMD64BSWAPL, v.Type)
 22004  		v.reset(OpCopy)
 22005  		v.AddArg(v0)
 22006  		v1 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32)
 22007  		v1.AuxInt = i0
 22008  		v1.Aux = s
 22009  		v1.AddArg(p)
 22010  		v1.AddArg(idx)
 22011  		v1.AddArg(mem)
 22012  		v0.AddArg(v1)
 22013  		return true
 22014  	}
 22015  	// match: (ORL sh:(SHLLconst [16] r0:(ROLWconst [8] x0:(MOVWloadidx1 [i0] {s} p idx mem))) r1:(ROLWconst [8] x1:(MOVWloadidx1 [i1] {s} idx p mem)))
 22016  	// cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh)
 22017  	// result: @mergePoint(b,x0,x1) (BSWAPL <v.Type> (MOVLloadidx1 [i0] {s} p idx mem))
 22018  	for {
 22019  		_ = v.Args[1]
 22020  		sh := v.Args[0]
 22021  		if sh.Op != OpAMD64SHLLconst {
 22022  			break
 22023  		}
 22024  		if sh.AuxInt != 16 {
 22025  			break
 22026  		}
 22027  		r0 := sh.Args[0]
 22028  		if r0.Op != OpAMD64ROLWconst {
 22029  			break
 22030  		}
 22031  		if r0.AuxInt != 8 {
 22032  			break
 22033  		}
 22034  		x0 := r0.Args[0]
 22035  		if x0.Op != OpAMD64MOVWloadidx1 {
 22036  			break
 22037  		}
 22038  		i0 := x0.AuxInt
 22039  		s := x0.Aux
 22040  		_ = x0.Args[2]
 22041  		p := x0.Args[0]
 22042  		idx := x0.Args[1]
 22043  		mem := x0.Args[2]
 22044  		r1 := v.Args[1]
 22045  		if r1.Op != OpAMD64ROLWconst {
 22046  			break
 22047  		}
 22048  		if r1.AuxInt != 8 {
 22049  			break
 22050  		}
 22051  		x1 := r1.Args[0]
 22052  		if x1.Op != OpAMD64MOVWloadidx1 {
 22053  			break
 22054  		}
 22055  		i1 := x1.AuxInt
 22056  		if x1.Aux != s {
 22057  			break
 22058  		}
 22059  		_ = x1.Args[2]
 22060  		if idx != x1.Args[0] {
 22061  			break
 22062  		}
 22063  		if p != x1.Args[1] {
 22064  			break
 22065  		}
 22066  		if mem != x1.Args[2] {
 22067  			break
 22068  		}
 22069  		if !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh)) {
 22070  			break
 22071  		}
 22072  		b = mergePoint(b, x0, x1)
 22073  		v0 := b.NewValue0(v.Pos, OpAMD64BSWAPL, v.Type)
 22074  		v.reset(OpCopy)
 22075  		v.AddArg(v0)
 22076  		v1 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32)
 22077  		v1.AuxInt = i0
 22078  		v1.Aux = s
 22079  		v1.AddArg(p)
 22080  		v1.AddArg(idx)
 22081  		v1.AddArg(mem)
 22082  		v0.AddArg(v1)
 22083  		return true
 22084  	}
 22085  	// match: (ORL sh:(SHLLconst [16] r0:(ROLWconst [8] x0:(MOVWloadidx1 [i0] {s} idx p mem))) r1:(ROLWconst [8] x1:(MOVWloadidx1 [i1] {s} idx p mem)))
 22086  	// cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh)
 22087  	// result: @mergePoint(b,x0,x1) (BSWAPL <v.Type> (MOVLloadidx1 [i0] {s} p idx mem))
 22088  	for {
 22089  		_ = v.Args[1]
 22090  		sh := v.Args[0]
 22091  		if sh.Op != OpAMD64SHLLconst {
 22092  			break
 22093  		}
 22094  		if sh.AuxInt != 16 {
 22095  			break
 22096  		}
 22097  		r0 := sh.Args[0]
 22098  		if r0.Op != OpAMD64ROLWconst {
 22099  			break
 22100  		}
 22101  		if r0.AuxInt != 8 {
 22102  			break
 22103  		}
 22104  		x0 := r0.Args[0]
 22105  		if x0.Op != OpAMD64MOVWloadidx1 {
 22106  			break
 22107  		}
 22108  		i0 := x0.AuxInt
 22109  		s := x0.Aux
 22110  		_ = x0.Args[2]
 22111  		idx := x0.Args[0]
 22112  		p := x0.Args[1]
 22113  		mem := x0.Args[2]
 22114  		r1 := v.Args[1]
 22115  		if r1.Op != OpAMD64ROLWconst {
 22116  			break
 22117  		}
 22118  		if r1.AuxInt != 8 {
 22119  			break
 22120  		}
 22121  		x1 := r1.Args[0]
 22122  		if x1.Op != OpAMD64MOVWloadidx1 {
 22123  			break
 22124  		}
 22125  		i1 := x1.AuxInt
 22126  		if x1.Aux != s {
 22127  			break
 22128  		}
 22129  		_ = x1.Args[2]
 22130  		if idx != x1.Args[0] {
 22131  			break
 22132  		}
 22133  		if p != x1.Args[1] {
 22134  			break
 22135  		}
 22136  		if mem != x1.Args[2] {
 22137  			break
 22138  		}
 22139  		if !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh)) {
 22140  			break
 22141  		}
 22142  		b = mergePoint(b, x0, x1)
 22143  		v0 := b.NewValue0(v.Pos, OpAMD64BSWAPL, v.Type)
 22144  		v.reset(OpCopy)
 22145  		v.AddArg(v0)
 22146  		v1 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32)
 22147  		v1.AuxInt = i0
 22148  		v1.Aux = s
 22149  		v1.AddArg(p)
 22150  		v1.AddArg(idx)
 22151  		v1.AddArg(mem)
 22152  		v0.AddArg(v1)
 22153  		return true
 22154  	}
 22155  	// match: (ORL s0:(SHLLconst [j0] x0:(MOVBloadidx1 [i0] {s} p idx mem)) or:(ORL s1:(SHLLconst [j1] x1:(MOVBloadidx1 [i1] {s} p idx mem)) y))
 22156  	// cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)
 22157  	// result: @mergePoint(b,x0,x1) (ORL <v.Type> (SHLLconst <v.Type> [j1] (ROLWconst <typ.UInt16> [8] (MOVWloadidx1 [i0] {s} p idx mem))) y)
 22158  	for {
 22159  		_ = v.Args[1]
 22160  		s0 := v.Args[0]
 22161  		if s0.Op != OpAMD64SHLLconst {
 22162  			break
 22163  		}
 22164  		j0 := s0.AuxInt
 22165  		x0 := s0.Args[0]
 22166  		if x0.Op != OpAMD64MOVBloadidx1 {
 22167  			break
 22168  		}
 22169  		i0 := x0.AuxInt
 22170  		s := x0.Aux
 22171  		_ = x0.Args[2]
 22172  		p := x0.Args[0]
 22173  		idx := x0.Args[1]
 22174  		mem := x0.Args[2]
 22175  		or := v.Args[1]
 22176  		if or.Op != OpAMD64ORL {
 22177  			break
 22178  		}
 22179  		_ = or.Args[1]
 22180  		s1 := or.Args[0]
 22181  		if s1.Op != OpAMD64SHLLconst {
 22182  			break
 22183  		}
 22184  		j1 := s1.AuxInt
 22185  		x1 := s1.Args[0]
 22186  		if x1.Op != OpAMD64MOVBloadidx1 {
 22187  			break
 22188  		}
 22189  		i1 := x1.AuxInt
 22190  		if x1.Aux != s {
 22191  			break
 22192  		}
 22193  		_ = x1.Args[2]
 22194  		if p != x1.Args[0] {
 22195  			break
 22196  		}
 22197  		if idx != x1.Args[1] {
 22198  			break
 22199  		}
 22200  		if mem != x1.Args[2] {
 22201  			break
 22202  		}
 22203  		y := or.Args[1]
 22204  		if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) {
 22205  			break
 22206  		}
 22207  		b = mergePoint(b, x0, x1)
 22208  		v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type)
 22209  		v.reset(OpCopy)
 22210  		v.AddArg(v0)
 22211  		v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type)
 22212  		v1.AuxInt = j1
 22213  		v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, typ.UInt16)
 22214  		v2.AuxInt = 8
 22215  		v3 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16)
 22216  		v3.AuxInt = i0
 22217  		v3.Aux = s
 22218  		v3.AddArg(p)
 22219  		v3.AddArg(idx)
 22220  		v3.AddArg(mem)
 22221  		v2.AddArg(v3)
 22222  		v1.AddArg(v2)
 22223  		v0.AddArg(v1)
 22224  		v0.AddArg(y)
 22225  		return true
 22226  	}
 22227  	// match: (ORL s0:(SHLLconst [j0] x0:(MOVBloadidx1 [i0] {s} idx p mem)) or:(ORL s1:(SHLLconst [j1] x1:(MOVBloadidx1 [i1] {s} p idx mem)) y))
 22228  	// cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)
 22229  	// result: @mergePoint(b,x0,x1) (ORL <v.Type> (SHLLconst <v.Type> [j1] (ROLWconst <typ.UInt16> [8] (MOVWloadidx1 [i0] {s} p idx mem))) y)
 22230  	for {
 22231  		_ = v.Args[1]
 22232  		s0 := v.Args[0]
 22233  		if s0.Op != OpAMD64SHLLconst {
 22234  			break
 22235  		}
 22236  		j0 := s0.AuxInt
 22237  		x0 := s0.Args[0]
 22238  		if x0.Op != OpAMD64MOVBloadidx1 {
 22239  			break
 22240  		}
 22241  		i0 := x0.AuxInt
 22242  		s := x0.Aux
 22243  		_ = x0.Args[2]
 22244  		idx := x0.Args[0]
 22245  		p := x0.Args[1]
 22246  		mem := x0.Args[2]
 22247  		or := v.Args[1]
 22248  		if or.Op != OpAMD64ORL {
 22249  			break
 22250  		}
 22251  		_ = or.Args[1]
 22252  		s1 := or.Args[0]
 22253  		if s1.Op != OpAMD64SHLLconst {
 22254  			break
 22255  		}
 22256  		j1 := s1.AuxInt
 22257  		x1 := s1.Args[0]
 22258  		if x1.Op != OpAMD64MOVBloadidx1 {
 22259  			break
 22260  		}
 22261  		i1 := x1.AuxInt
 22262  		if x1.Aux != s {
 22263  			break
 22264  		}
 22265  		_ = x1.Args[2]
 22266  		if p != x1.Args[0] {
 22267  			break
 22268  		}
 22269  		if idx != x1.Args[1] {
 22270  			break
 22271  		}
 22272  		if mem != x1.Args[2] {
 22273  			break
 22274  		}
 22275  		y := or.Args[1]
 22276  		if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) {
 22277  			break
 22278  		}
 22279  		b = mergePoint(b, x0, x1)
 22280  		v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type)
 22281  		v.reset(OpCopy)
 22282  		v.AddArg(v0)
 22283  		v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type)
 22284  		v1.AuxInt = j1
 22285  		v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, typ.UInt16)
 22286  		v2.AuxInt = 8
 22287  		v3 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16)
 22288  		v3.AuxInt = i0
 22289  		v3.Aux = s
 22290  		v3.AddArg(p)
 22291  		v3.AddArg(idx)
 22292  		v3.AddArg(mem)
 22293  		v2.AddArg(v3)
 22294  		v1.AddArg(v2)
 22295  		v0.AddArg(v1)
 22296  		v0.AddArg(y)
 22297  		return true
 22298  	}
 22299  	// match: (ORL s0:(SHLLconst [j0] x0:(MOVBloadidx1 [i0] {s} p idx mem)) or:(ORL s1:(SHLLconst [j1] x1:(MOVBloadidx1 [i1] {s} idx p mem)) y))
 22300  	// cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)
 22301  	// result: @mergePoint(b,x0,x1) (ORL <v.Type> (SHLLconst <v.Type> [j1] (ROLWconst <typ.UInt16> [8] (MOVWloadidx1 [i0] {s} p idx mem))) y)
 22302  	for {
 22303  		_ = v.Args[1]
 22304  		s0 := v.Args[0]
 22305  		if s0.Op != OpAMD64SHLLconst {
 22306  			break
 22307  		}
 22308  		j0 := s0.AuxInt
 22309  		x0 := s0.Args[0]
 22310  		if x0.Op != OpAMD64MOVBloadidx1 {
 22311  			break
 22312  		}
 22313  		i0 := x0.AuxInt
 22314  		s := x0.Aux
 22315  		_ = x0.Args[2]
 22316  		p := x0.Args[0]
 22317  		idx := x0.Args[1]
 22318  		mem := x0.Args[2]
 22319  		or := v.Args[1]
 22320  		if or.Op != OpAMD64ORL {
 22321  			break
 22322  		}
 22323  		_ = or.Args[1]
 22324  		s1 := or.Args[0]
 22325  		if s1.Op != OpAMD64SHLLconst {
 22326  			break
 22327  		}
 22328  		j1 := s1.AuxInt
 22329  		x1 := s1.Args[0]
 22330  		if x1.Op != OpAMD64MOVBloadidx1 {
 22331  			break
 22332  		}
 22333  		i1 := x1.AuxInt
 22334  		if x1.Aux != s {
 22335  			break
 22336  		}
 22337  		_ = x1.Args[2]
 22338  		if idx != x1.Args[0] {
 22339  			break
 22340  		}
 22341  		if p != x1.Args[1] {
 22342  			break
 22343  		}
 22344  		if mem != x1.Args[2] {
 22345  			break
 22346  		}
 22347  		y := or.Args[1]
 22348  		if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) {
 22349  			break
 22350  		}
 22351  		b = mergePoint(b, x0, x1)
 22352  		v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type)
 22353  		v.reset(OpCopy)
 22354  		v.AddArg(v0)
 22355  		v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type)
 22356  		v1.AuxInt = j1
 22357  		v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, typ.UInt16)
 22358  		v2.AuxInt = 8
 22359  		v3 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16)
 22360  		v3.AuxInt = i0
 22361  		v3.Aux = s
 22362  		v3.AddArg(p)
 22363  		v3.AddArg(idx)
 22364  		v3.AddArg(mem)
 22365  		v2.AddArg(v3)
 22366  		v1.AddArg(v2)
 22367  		v0.AddArg(v1)
 22368  		v0.AddArg(y)
 22369  		return true
 22370  	}
 22371  	// match: (ORL s0:(SHLLconst [j0] x0:(MOVBloadidx1 [i0] {s} idx p mem)) or:(ORL s1:(SHLLconst [j1] x1:(MOVBloadidx1 [i1] {s} idx p mem)) y))
 22372  	// cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)
 22373  	// result: @mergePoint(b,x0,x1) (ORL <v.Type> (SHLLconst <v.Type> [j1] (ROLWconst <typ.UInt16> [8] (MOVWloadidx1 [i0] {s} p idx mem))) y)
 22374  	for {
 22375  		_ = v.Args[1]
 22376  		s0 := v.Args[0]
 22377  		if s0.Op != OpAMD64SHLLconst {
 22378  			break
 22379  		}
 22380  		j0 := s0.AuxInt
 22381  		x0 := s0.Args[0]
 22382  		if x0.Op != OpAMD64MOVBloadidx1 {
 22383  			break
 22384  		}
 22385  		i0 := x0.AuxInt
 22386  		s := x0.Aux
 22387  		_ = x0.Args[2]
 22388  		idx := x0.Args[0]
 22389  		p := x0.Args[1]
 22390  		mem := x0.Args[2]
 22391  		or := v.Args[1]
 22392  		if or.Op != OpAMD64ORL {
 22393  			break
 22394  		}
 22395  		_ = or.Args[1]
 22396  		s1 := or.Args[0]
 22397  		if s1.Op != OpAMD64SHLLconst {
 22398  			break
 22399  		}
 22400  		j1 := s1.AuxInt
 22401  		x1 := s1.Args[0]
 22402  		if x1.Op != OpAMD64MOVBloadidx1 {
 22403  			break
 22404  		}
 22405  		i1 := x1.AuxInt
 22406  		if x1.Aux != s {
 22407  			break
 22408  		}
 22409  		_ = x1.Args[2]
 22410  		if idx != x1.Args[0] {
 22411  			break
 22412  		}
 22413  		if p != x1.Args[1] {
 22414  			break
 22415  		}
 22416  		if mem != x1.Args[2] {
 22417  			break
 22418  		}
 22419  		y := or.Args[1]
 22420  		if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) {
 22421  			break
 22422  		}
 22423  		b = mergePoint(b, x0, x1)
 22424  		v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type)
 22425  		v.reset(OpCopy)
 22426  		v.AddArg(v0)
 22427  		v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type)
 22428  		v1.AuxInt = j1
 22429  		v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, typ.UInt16)
 22430  		v2.AuxInt = 8
 22431  		v3 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16)
 22432  		v3.AuxInt = i0
 22433  		v3.Aux = s
 22434  		v3.AddArg(p)
 22435  		v3.AddArg(idx)
 22436  		v3.AddArg(mem)
 22437  		v2.AddArg(v3)
 22438  		v1.AddArg(v2)
 22439  		v0.AddArg(v1)
 22440  		v0.AddArg(y)
 22441  		return true
 22442  	}
 22443  	// match: (ORL s0:(SHLLconst [j0] x0:(MOVBloadidx1 [i0] {s} p idx mem)) or:(ORL y s1:(SHLLconst [j1] x1:(MOVBloadidx1 [i1] {s} p idx mem))))
 22444  	// cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)
 22445  	// result: @mergePoint(b,x0,x1) (ORL <v.Type> (SHLLconst <v.Type> [j1] (ROLWconst <typ.UInt16> [8] (MOVWloadidx1 [i0] {s} p idx mem))) y)
 22446  	for {
 22447  		_ = v.Args[1]
 22448  		s0 := v.Args[0]
 22449  		if s0.Op != OpAMD64SHLLconst {
 22450  			break
 22451  		}
 22452  		j0 := s0.AuxInt
 22453  		x0 := s0.Args[0]
 22454  		if x0.Op != OpAMD64MOVBloadidx1 {
 22455  			break
 22456  		}
 22457  		i0 := x0.AuxInt
 22458  		s := x0.Aux
 22459  		_ = x0.Args[2]
 22460  		p := x0.Args[0]
 22461  		idx := x0.Args[1]
 22462  		mem := x0.Args[2]
 22463  		or := v.Args[1]
 22464  		if or.Op != OpAMD64ORL {
 22465  			break
 22466  		}
 22467  		_ = or.Args[1]
 22468  		y := or.Args[0]
 22469  		s1 := or.Args[1]
 22470  		if s1.Op != OpAMD64SHLLconst {
 22471  			break
 22472  		}
 22473  		j1 := s1.AuxInt
 22474  		x1 := s1.Args[0]
 22475  		if x1.Op != OpAMD64MOVBloadidx1 {
 22476  			break
 22477  		}
 22478  		i1 := x1.AuxInt
 22479  		if x1.Aux != s {
 22480  			break
 22481  		}
 22482  		_ = x1.Args[2]
 22483  		if p != x1.Args[0] {
 22484  			break
 22485  		}
 22486  		if idx != x1.Args[1] {
 22487  			break
 22488  		}
 22489  		if mem != x1.Args[2] {
 22490  			break
 22491  		}
 22492  		if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) {
 22493  			break
 22494  		}
 22495  		b = mergePoint(b, x0, x1)
 22496  		v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type)
 22497  		v.reset(OpCopy)
 22498  		v.AddArg(v0)
 22499  		v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type)
 22500  		v1.AuxInt = j1
 22501  		v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, typ.UInt16)
 22502  		v2.AuxInt = 8
 22503  		v3 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16)
 22504  		v3.AuxInt = i0
 22505  		v3.Aux = s
 22506  		v3.AddArg(p)
 22507  		v3.AddArg(idx)
 22508  		v3.AddArg(mem)
 22509  		v2.AddArg(v3)
 22510  		v1.AddArg(v2)
 22511  		v0.AddArg(v1)
 22512  		v0.AddArg(y)
 22513  		return true
 22514  	}
 22515  	// match: (ORL s0:(SHLLconst [j0] x0:(MOVBloadidx1 [i0] {s} idx p mem)) or:(ORL y s1:(SHLLconst [j1] x1:(MOVBloadidx1 [i1] {s} p idx mem))))
 22516  	// cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)
 22517  	// result: @mergePoint(b,x0,x1) (ORL <v.Type> (SHLLconst <v.Type> [j1] (ROLWconst <typ.UInt16> [8] (MOVWloadidx1 [i0] {s} p idx mem))) y)
 22518  	for {
 22519  		_ = v.Args[1]
 22520  		s0 := v.Args[0]
 22521  		if s0.Op != OpAMD64SHLLconst {
 22522  			break
 22523  		}
 22524  		j0 := s0.AuxInt
 22525  		x0 := s0.Args[0]
 22526  		if x0.Op != OpAMD64MOVBloadidx1 {
 22527  			break
 22528  		}
 22529  		i0 := x0.AuxInt
 22530  		s := x0.Aux
 22531  		_ = x0.Args[2]
 22532  		idx := x0.Args[0]
 22533  		p := x0.Args[1]
 22534  		mem := x0.Args[2]
 22535  		or := v.Args[1]
 22536  		if or.Op != OpAMD64ORL {
 22537  			break
 22538  		}
 22539  		_ = or.Args[1]
 22540  		y := or.Args[0]
 22541  		s1 := or.Args[1]
 22542  		if s1.Op != OpAMD64SHLLconst {
 22543  			break
 22544  		}
 22545  		j1 := s1.AuxInt
 22546  		x1 := s1.Args[0]
 22547  		if x1.Op != OpAMD64MOVBloadidx1 {
 22548  			break
 22549  		}
 22550  		i1 := x1.AuxInt
 22551  		if x1.Aux != s {
 22552  			break
 22553  		}
 22554  		_ = x1.Args[2]
 22555  		if p != x1.Args[0] {
 22556  			break
 22557  		}
 22558  		if idx != x1.Args[1] {
 22559  			break
 22560  		}
 22561  		if mem != x1.Args[2] {
 22562  			break
 22563  		}
 22564  		if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) {
 22565  			break
 22566  		}
 22567  		b = mergePoint(b, x0, x1)
 22568  		v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type)
 22569  		v.reset(OpCopy)
 22570  		v.AddArg(v0)
 22571  		v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type)
 22572  		v1.AuxInt = j1
 22573  		v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, typ.UInt16)
 22574  		v2.AuxInt = 8
 22575  		v3 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16)
 22576  		v3.AuxInt = i0
 22577  		v3.Aux = s
 22578  		v3.AddArg(p)
 22579  		v3.AddArg(idx)
 22580  		v3.AddArg(mem)
 22581  		v2.AddArg(v3)
 22582  		v1.AddArg(v2)
 22583  		v0.AddArg(v1)
 22584  		v0.AddArg(y)
 22585  		return true
 22586  	}
 22587  	// match: (ORL s0:(SHLLconst [j0] x0:(MOVBloadidx1 [i0] {s} p idx mem)) or:(ORL y s1:(SHLLconst [j1] x1:(MOVBloadidx1 [i1] {s} idx p mem))))
 22588  	// cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)
 22589  	// result: @mergePoint(b,x0,x1) (ORL <v.Type> (SHLLconst <v.Type> [j1] (ROLWconst <typ.UInt16> [8] (MOVWloadidx1 [i0] {s} p idx mem))) y)
 22590  	for {
 22591  		_ = v.Args[1]
 22592  		s0 := v.Args[0]
 22593  		if s0.Op != OpAMD64SHLLconst {
 22594  			break
 22595  		}
 22596  		j0 := s0.AuxInt
 22597  		x0 := s0.Args[0]
 22598  		if x0.Op != OpAMD64MOVBloadidx1 {
 22599  			break
 22600  		}
 22601  		i0 := x0.AuxInt
 22602  		s := x0.Aux
 22603  		_ = x0.Args[2]
 22604  		p := x0.Args[0]
 22605  		idx := x0.Args[1]
 22606  		mem := x0.Args[2]
 22607  		or := v.Args[1]
 22608  		if or.Op != OpAMD64ORL {
 22609  			break
 22610  		}
 22611  		_ = or.Args[1]
 22612  		y := or.Args[0]
 22613  		s1 := or.Args[1]
 22614  		if s1.Op != OpAMD64SHLLconst {
 22615  			break
 22616  		}
 22617  		j1 := s1.AuxInt
 22618  		x1 := s1.Args[0]
 22619  		if x1.Op != OpAMD64MOVBloadidx1 {
 22620  			break
 22621  		}
 22622  		i1 := x1.AuxInt
 22623  		if x1.Aux != s {
 22624  			break
 22625  		}
 22626  		_ = x1.Args[2]
 22627  		if idx != x1.Args[0] {
 22628  			break
 22629  		}
 22630  		if p != x1.Args[1] {
 22631  			break
 22632  		}
 22633  		if mem != x1.Args[2] {
 22634  			break
 22635  		}
 22636  		if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) {
 22637  			break
 22638  		}
 22639  		b = mergePoint(b, x0, x1)
 22640  		v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type)
 22641  		v.reset(OpCopy)
 22642  		v.AddArg(v0)
 22643  		v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type)
 22644  		v1.AuxInt = j1
 22645  		v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, typ.UInt16)
 22646  		v2.AuxInt = 8
 22647  		v3 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16)
 22648  		v3.AuxInt = i0
 22649  		v3.Aux = s
 22650  		v3.AddArg(p)
 22651  		v3.AddArg(idx)
 22652  		v3.AddArg(mem)
 22653  		v2.AddArg(v3)
 22654  		v1.AddArg(v2)
 22655  		v0.AddArg(v1)
 22656  		v0.AddArg(y)
 22657  		return true
 22658  	}
 22659  	return false
 22660  }
 22661  func rewriteValueAMD64_OpAMD64ORL_120(v *Value) bool {
 22662  	b := v.Block
 22663  	_ = b
 22664  	typ := &b.Func.Config.Types
 22665  	_ = typ
 22666  	// match: (ORL s0:(SHLLconst [j0] x0:(MOVBloadidx1 [i0] {s} idx p mem)) or:(ORL y s1:(SHLLconst [j1] x1:(MOVBloadidx1 [i1] {s} idx p mem))))
 22667  	// cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)
 22668  	// result: @mergePoint(b,x0,x1) (ORL <v.Type> (SHLLconst <v.Type> [j1] (ROLWconst <typ.UInt16> [8] (MOVWloadidx1 [i0] {s} p idx mem))) y)
 22669  	for {
 22670  		_ = v.Args[1]
 22671  		s0 := v.Args[0]
 22672  		if s0.Op != OpAMD64SHLLconst {
 22673  			break
 22674  		}
 22675  		j0 := s0.AuxInt
 22676  		x0 := s0.Args[0]
 22677  		if x0.Op != OpAMD64MOVBloadidx1 {
 22678  			break
 22679  		}
 22680  		i0 := x0.AuxInt
 22681  		s := x0.Aux
 22682  		_ = x0.Args[2]
 22683  		idx := x0.Args[0]
 22684  		p := x0.Args[1]
 22685  		mem := x0.Args[2]
 22686  		or := v.Args[1]
 22687  		if or.Op != OpAMD64ORL {
 22688  			break
 22689  		}
 22690  		_ = or.Args[1]
 22691  		y := or.Args[0]
 22692  		s1 := or.Args[1]
 22693  		if s1.Op != OpAMD64SHLLconst {
 22694  			break
 22695  		}
 22696  		j1 := s1.AuxInt
 22697  		x1 := s1.Args[0]
 22698  		if x1.Op != OpAMD64MOVBloadidx1 {
 22699  			break
 22700  		}
 22701  		i1 := x1.AuxInt
 22702  		if x1.Aux != s {
 22703  			break
 22704  		}
 22705  		_ = x1.Args[2]
 22706  		if idx != x1.Args[0] {
 22707  			break
 22708  		}
 22709  		if p != x1.Args[1] {
 22710  			break
 22711  		}
 22712  		if mem != x1.Args[2] {
 22713  			break
 22714  		}
 22715  		if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) {
 22716  			break
 22717  		}
 22718  		b = mergePoint(b, x0, x1)
 22719  		v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type)
 22720  		v.reset(OpCopy)
 22721  		v.AddArg(v0)
 22722  		v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type)
 22723  		v1.AuxInt = j1
 22724  		v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, typ.UInt16)
 22725  		v2.AuxInt = 8
 22726  		v3 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16)
 22727  		v3.AuxInt = i0
 22728  		v3.Aux = s
 22729  		v3.AddArg(p)
 22730  		v3.AddArg(idx)
 22731  		v3.AddArg(mem)
 22732  		v2.AddArg(v3)
 22733  		v1.AddArg(v2)
 22734  		v0.AddArg(v1)
 22735  		v0.AddArg(y)
 22736  		return true
 22737  	}
 22738  	// match: (ORL or:(ORL s1:(SHLLconst [j1] x1:(MOVBloadidx1 [i1] {s} p idx mem)) y) s0:(SHLLconst [j0] x0:(MOVBloadidx1 [i0] {s} p idx mem)))
 22739  	// cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)
 22740  	// result: @mergePoint(b,x0,x1) (ORL <v.Type> (SHLLconst <v.Type> [j1] (ROLWconst <typ.UInt16> [8] (MOVWloadidx1 [i0] {s} p idx mem))) y)
 22741  	for {
 22742  		_ = v.Args[1]
 22743  		or := v.Args[0]
 22744  		if or.Op != OpAMD64ORL {
 22745  			break
 22746  		}
 22747  		_ = or.Args[1]
 22748  		s1 := or.Args[0]
 22749  		if s1.Op != OpAMD64SHLLconst {
 22750  			break
 22751  		}
 22752  		j1 := s1.AuxInt
 22753  		x1 := s1.Args[0]
 22754  		if x1.Op != OpAMD64MOVBloadidx1 {
 22755  			break
 22756  		}
 22757  		i1 := x1.AuxInt
 22758  		s := x1.Aux
 22759  		_ = x1.Args[2]
 22760  		p := x1.Args[0]
 22761  		idx := x1.Args[1]
 22762  		mem := x1.Args[2]
 22763  		y := or.Args[1]
 22764  		s0 := v.Args[1]
 22765  		if s0.Op != OpAMD64SHLLconst {
 22766  			break
 22767  		}
 22768  		j0 := s0.AuxInt
 22769  		x0 := s0.Args[0]
 22770  		if x0.Op != OpAMD64MOVBloadidx1 {
 22771  			break
 22772  		}
 22773  		i0 := x0.AuxInt
 22774  		if x0.Aux != s {
 22775  			break
 22776  		}
 22777  		_ = x0.Args[2]
 22778  		if p != x0.Args[0] {
 22779  			break
 22780  		}
 22781  		if idx != x0.Args[1] {
 22782  			break
 22783  		}
 22784  		if mem != x0.Args[2] {
 22785  			break
 22786  		}
 22787  		if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) {
 22788  			break
 22789  		}
 22790  		b = mergePoint(b, x0, x1)
 22791  		v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type)
 22792  		v.reset(OpCopy)
 22793  		v.AddArg(v0)
 22794  		v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type)
 22795  		v1.AuxInt = j1
 22796  		v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, typ.UInt16)
 22797  		v2.AuxInt = 8
 22798  		v3 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16)
 22799  		v3.AuxInt = i0
 22800  		v3.Aux = s
 22801  		v3.AddArg(p)
 22802  		v3.AddArg(idx)
 22803  		v3.AddArg(mem)
 22804  		v2.AddArg(v3)
 22805  		v1.AddArg(v2)
 22806  		v0.AddArg(v1)
 22807  		v0.AddArg(y)
 22808  		return true
 22809  	}
 22810  	// match: (ORL or:(ORL s1:(SHLLconst [j1] x1:(MOVBloadidx1 [i1] {s} idx p mem)) y) s0:(SHLLconst [j0] x0:(MOVBloadidx1 [i0] {s} p idx mem)))
 22811  	// cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)
 22812  	// result: @mergePoint(b,x0,x1) (ORL <v.Type> (SHLLconst <v.Type> [j1] (ROLWconst <typ.UInt16> [8] (MOVWloadidx1 [i0] {s} p idx mem))) y)
 22813  	for {
 22814  		_ = v.Args[1]
 22815  		or := v.Args[0]
 22816  		if or.Op != OpAMD64ORL {
 22817  			break
 22818  		}
 22819  		_ = or.Args[1]
 22820  		s1 := or.Args[0]
 22821  		if s1.Op != OpAMD64SHLLconst {
 22822  			break
 22823  		}
 22824  		j1 := s1.AuxInt
 22825  		x1 := s1.Args[0]
 22826  		if x1.Op != OpAMD64MOVBloadidx1 {
 22827  			break
 22828  		}
 22829  		i1 := x1.AuxInt
 22830  		s := x1.Aux
 22831  		_ = x1.Args[2]
 22832  		idx := x1.Args[0]
 22833  		p := x1.Args[1]
 22834  		mem := x1.Args[2]
 22835  		y := or.Args[1]
 22836  		s0 := v.Args[1]
 22837  		if s0.Op != OpAMD64SHLLconst {
 22838  			break
 22839  		}
 22840  		j0 := s0.AuxInt
 22841  		x0 := s0.Args[0]
 22842  		if x0.Op != OpAMD64MOVBloadidx1 {
 22843  			break
 22844  		}
 22845  		i0 := x0.AuxInt
 22846  		if x0.Aux != s {
 22847  			break
 22848  		}
 22849  		_ = x0.Args[2]
 22850  		if p != x0.Args[0] {
 22851  			break
 22852  		}
 22853  		if idx != x0.Args[1] {
 22854  			break
 22855  		}
 22856  		if mem != x0.Args[2] {
 22857  			break
 22858  		}
 22859  		if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) {
 22860  			break
 22861  		}
 22862  		b = mergePoint(b, x0, x1)
 22863  		v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type)
 22864  		v.reset(OpCopy)
 22865  		v.AddArg(v0)
 22866  		v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type)
 22867  		v1.AuxInt = j1
 22868  		v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, typ.UInt16)
 22869  		v2.AuxInt = 8
 22870  		v3 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16)
 22871  		v3.AuxInt = i0
 22872  		v3.Aux = s
 22873  		v3.AddArg(p)
 22874  		v3.AddArg(idx)
 22875  		v3.AddArg(mem)
 22876  		v2.AddArg(v3)
 22877  		v1.AddArg(v2)
 22878  		v0.AddArg(v1)
 22879  		v0.AddArg(y)
 22880  		return true
 22881  	}
 22882  	// match: (ORL or:(ORL y s1:(SHLLconst [j1] x1:(MOVBloadidx1 [i1] {s} p idx mem))) s0:(SHLLconst [j0] x0:(MOVBloadidx1 [i0] {s} p idx mem)))
 22883  	// cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)
 22884  	// result: @mergePoint(b,x0,x1) (ORL <v.Type> (SHLLconst <v.Type> [j1] (ROLWconst <typ.UInt16> [8] (MOVWloadidx1 [i0] {s} p idx mem))) y)
 22885  	for {
 22886  		_ = v.Args[1]
 22887  		or := v.Args[0]
 22888  		if or.Op != OpAMD64ORL {
 22889  			break
 22890  		}
 22891  		_ = or.Args[1]
 22892  		y := or.Args[0]
 22893  		s1 := or.Args[1]
 22894  		if s1.Op != OpAMD64SHLLconst {
 22895  			break
 22896  		}
 22897  		j1 := s1.AuxInt
 22898  		x1 := s1.Args[0]
 22899  		if x1.Op != OpAMD64MOVBloadidx1 {
 22900  			break
 22901  		}
 22902  		i1 := x1.AuxInt
 22903  		s := x1.Aux
 22904  		_ = x1.Args[2]
 22905  		p := x1.Args[0]
 22906  		idx := x1.Args[1]
 22907  		mem := x1.Args[2]
 22908  		s0 := v.Args[1]
 22909  		if s0.Op != OpAMD64SHLLconst {
 22910  			break
 22911  		}
 22912  		j0 := s0.AuxInt
 22913  		x0 := s0.Args[0]
 22914  		if x0.Op != OpAMD64MOVBloadidx1 {
 22915  			break
 22916  		}
 22917  		i0 := x0.AuxInt
 22918  		if x0.Aux != s {
 22919  			break
 22920  		}
 22921  		_ = x0.Args[2]
 22922  		if p != x0.Args[0] {
 22923  			break
 22924  		}
 22925  		if idx != x0.Args[1] {
 22926  			break
 22927  		}
 22928  		if mem != x0.Args[2] {
 22929  			break
 22930  		}
 22931  		if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) {
 22932  			break
 22933  		}
 22934  		b = mergePoint(b, x0, x1)
 22935  		v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type)
 22936  		v.reset(OpCopy)
 22937  		v.AddArg(v0)
 22938  		v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type)
 22939  		v1.AuxInt = j1
 22940  		v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, typ.UInt16)
 22941  		v2.AuxInt = 8
 22942  		v3 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16)
 22943  		v3.AuxInt = i0
 22944  		v3.Aux = s
 22945  		v3.AddArg(p)
 22946  		v3.AddArg(idx)
 22947  		v3.AddArg(mem)
 22948  		v2.AddArg(v3)
 22949  		v1.AddArg(v2)
 22950  		v0.AddArg(v1)
 22951  		v0.AddArg(y)
 22952  		return true
 22953  	}
 22954  	// match: (ORL or:(ORL y s1:(SHLLconst [j1] x1:(MOVBloadidx1 [i1] {s} idx p mem))) s0:(SHLLconst [j0] x0:(MOVBloadidx1 [i0] {s} p idx mem)))
 22955  	// cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)
 22956  	// result: @mergePoint(b,x0,x1) (ORL <v.Type> (SHLLconst <v.Type> [j1] (ROLWconst <typ.UInt16> [8] (MOVWloadidx1 [i0] {s} p idx mem))) y)
 22957  	for {
 22958  		_ = v.Args[1]
 22959  		or := v.Args[0]
 22960  		if or.Op != OpAMD64ORL {
 22961  			break
 22962  		}
 22963  		_ = or.Args[1]
 22964  		y := or.Args[0]
 22965  		s1 := or.Args[1]
 22966  		if s1.Op != OpAMD64SHLLconst {
 22967  			break
 22968  		}
 22969  		j1 := s1.AuxInt
 22970  		x1 := s1.Args[0]
 22971  		if x1.Op != OpAMD64MOVBloadidx1 {
 22972  			break
 22973  		}
 22974  		i1 := x1.AuxInt
 22975  		s := x1.Aux
 22976  		_ = x1.Args[2]
 22977  		idx := x1.Args[0]
 22978  		p := x1.Args[1]
 22979  		mem := x1.Args[2]
 22980  		s0 := v.Args[1]
 22981  		if s0.Op != OpAMD64SHLLconst {
 22982  			break
 22983  		}
 22984  		j0 := s0.AuxInt
 22985  		x0 := s0.Args[0]
 22986  		if x0.Op != OpAMD64MOVBloadidx1 {
 22987  			break
 22988  		}
 22989  		i0 := x0.AuxInt
 22990  		if x0.Aux != s {
 22991  			break
 22992  		}
 22993  		_ = x0.Args[2]
 22994  		if p != x0.Args[0] {
 22995  			break
 22996  		}
 22997  		if idx != x0.Args[1] {
 22998  			break
 22999  		}
 23000  		if mem != x0.Args[2] {
 23001  			break
 23002  		}
 23003  		if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) {
 23004  			break
 23005  		}
 23006  		b = mergePoint(b, x0, x1)
 23007  		v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type)
 23008  		v.reset(OpCopy)
 23009  		v.AddArg(v0)
 23010  		v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type)
 23011  		v1.AuxInt = j1
 23012  		v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, typ.UInt16)
 23013  		v2.AuxInt = 8
 23014  		v3 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16)
 23015  		v3.AuxInt = i0
 23016  		v3.Aux = s
 23017  		v3.AddArg(p)
 23018  		v3.AddArg(idx)
 23019  		v3.AddArg(mem)
 23020  		v2.AddArg(v3)
 23021  		v1.AddArg(v2)
 23022  		v0.AddArg(v1)
 23023  		v0.AddArg(y)
 23024  		return true
 23025  	}
 23026  	// match: (ORL or:(ORL s1:(SHLLconst [j1] x1:(MOVBloadidx1 [i1] {s} p idx mem)) y) s0:(SHLLconst [j0] x0:(MOVBloadidx1 [i0] {s} idx p mem)))
 23027  	// cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)
 23028  	// result: @mergePoint(b,x0,x1) (ORL <v.Type> (SHLLconst <v.Type> [j1] (ROLWconst <typ.UInt16> [8] (MOVWloadidx1 [i0] {s} p idx mem))) y)
 23029  	for {
 23030  		_ = v.Args[1]
 23031  		or := v.Args[0]
 23032  		if or.Op != OpAMD64ORL {
 23033  			break
 23034  		}
 23035  		_ = or.Args[1]
 23036  		s1 := or.Args[0]
 23037  		if s1.Op != OpAMD64SHLLconst {
 23038  			break
 23039  		}
 23040  		j1 := s1.AuxInt
 23041  		x1 := s1.Args[0]
 23042  		if x1.Op != OpAMD64MOVBloadidx1 {
 23043  			break
 23044  		}
 23045  		i1 := x1.AuxInt
 23046  		s := x1.Aux
 23047  		_ = x1.Args[2]
 23048  		p := x1.Args[0]
 23049  		idx := x1.Args[1]
 23050  		mem := x1.Args[2]
 23051  		y := or.Args[1]
 23052  		s0 := v.Args[1]
 23053  		if s0.Op != OpAMD64SHLLconst {
 23054  			break
 23055  		}
 23056  		j0 := s0.AuxInt
 23057  		x0 := s0.Args[0]
 23058  		if x0.Op != OpAMD64MOVBloadidx1 {
 23059  			break
 23060  		}
 23061  		i0 := x0.AuxInt
 23062  		if x0.Aux != s {
 23063  			break
 23064  		}
 23065  		_ = x0.Args[2]
 23066  		if idx != x0.Args[0] {
 23067  			break
 23068  		}
 23069  		if p != x0.Args[1] {
 23070  			break
 23071  		}
 23072  		if mem != x0.Args[2] {
 23073  			break
 23074  		}
 23075  		if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) {
 23076  			break
 23077  		}
 23078  		b = mergePoint(b, x0, x1)
 23079  		v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type)
 23080  		v.reset(OpCopy)
 23081  		v.AddArg(v0)
 23082  		v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type)
 23083  		v1.AuxInt = j1
 23084  		v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, typ.UInt16)
 23085  		v2.AuxInt = 8
 23086  		v3 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16)
 23087  		v3.AuxInt = i0
 23088  		v3.Aux = s
 23089  		v3.AddArg(p)
 23090  		v3.AddArg(idx)
 23091  		v3.AddArg(mem)
 23092  		v2.AddArg(v3)
 23093  		v1.AddArg(v2)
 23094  		v0.AddArg(v1)
 23095  		v0.AddArg(y)
 23096  		return true
 23097  	}
 23098  	// match: (ORL or:(ORL s1:(SHLLconst [j1] x1:(MOVBloadidx1 [i1] {s} idx p mem)) y) s0:(SHLLconst [j0] x0:(MOVBloadidx1 [i0] {s} idx p mem)))
 23099  	// cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)
 23100  	// result: @mergePoint(b,x0,x1) (ORL <v.Type> (SHLLconst <v.Type> [j1] (ROLWconst <typ.UInt16> [8] (MOVWloadidx1 [i0] {s} p idx mem))) y)
 23101  	for {
 23102  		_ = v.Args[1]
 23103  		or := v.Args[0]
 23104  		if or.Op != OpAMD64ORL {
 23105  			break
 23106  		}
 23107  		_ = or.Args[1]
 23108  		s1 := or.Args[0]
 23109  		if s1.Op != OpAMD64SHLLconst {
 23110  			break
 23111  		}
 23112  		j1 := s1.AuxInt
 23113  		x1 := s1.Args[0]
 23114  		if x1.Op != OpAMD64MOVBloadidx1 {
 23115  			break
 23116  		}
 23117  		i1 := x1.AuxInt
 23118  		s := x1.Aux
 23119  		_ = x1.Args[2]
 23120  		idx := x1.Args[0]
 23121  		p := x1.Args[1]
 23122  		mem := x1.Args[2]
 23123  		y := or.Args[1]
 23124  		s0 := v.Args[1]
 23125  		if s0.Op != OpAMD64SHLLconst {
 23126  			break
 23127  		}
 23128  		j0 := s0.AuxInt
 23129  		x0 := s0.Args[0]
 23130  		if x0.Op != OpAMD64MOVBloadidx1 {
 23131  			break
 23132  		}
 23133  		i0 := x0.AuxInt
 23134  		if x0.Aux != s {
 23135  			break
 23136  		}
 23137  		_ = x0.Args[2]
 23138  		if idx != x0.Args[0] {
 23139  			break
 23140  		}
 23141  		if p != x0.Args[1] {
 23142  			break
 23143  		}
 23144  		if mem != x0.Args[2] {
 23145  			break
 23146  		}
 23147  		if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) {
 23148  			break
 23149  		}
 23150  		b = mergePoint(b, x0, x1)
 23151  		v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type)
 23152  		v.reset(OpCopy)
 23153  		v.AddArg(v0)
 23154  		v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type)
 23155  		v1.AuxInt = j1
 23156  		v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, typ.UInt16)
 23157  		v2.AuxInt = 8
 23158  		v3 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16)
 23159  		v3.AuxInt = i0
 23160  		v3.Aux = s
 23161  		v3.AddArg(p)
 23162  		v3.AddArg(idx)
 23163  		v3.AddArg(mem)
 23164  		v2.AddArg(v3)
 23165  		v1.AddArg(v2)
 23166  		v0.AddArg(v1)
 23167  		v0.AddArg(y)
 23168  		return true
 23169  	}
 23170  	// match: (ORL or:(ORL y s1:(SHLLconst [j1] x1:(MOVBloadidx1 [i1] {s} p idx mem))) s0:(SHLLconst [j0] x0:(MOVBloadidx1 [i0] {s} idx p mem)))
 23171  	// cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)
 23172  	// result: @mergePoint(b,x0,x1) (ORL <v.Type> (SHLLconst <v.Type> [j1] (ROLWconst <typ.UInt16> [8] (MOVWloadidx1 [i0] {s} p idx mem))) y)
 23173  	for {
 23174  		_ = v.Args[1]
 23175  		or := v.Args[0]
 23176  		if or.Op != OpAMD64ORL {
 23177  			break
 23178  		}
 23179  		_ = or.Args[1]
 23180  		y := or.Args[0]
 23181  		s1 := or.Args[1]
 23182  		if s1.Op != OpAMD64SHLLconst {
 23183  			break
 23184  		}
 23185  		j1 := s1.AuxInt
 23186  		x1 := s1.Args[0]
 23187  		if x1.Op != OpAMD64MOVBloadidx1 {
 23188  			break
 23189  		}
 23190  		i1 := x1.AuxInt
 23191  		s := x1.Aux
 23192  		_ = x1.Args[2]
 23193  		p := x1.Args[0]
 23194  		idx := x1.Args[1]
 23195  		mem := x1.Args[2]
 23196  		s0 := v.Args[1]
 23197  		if s0.Op != OpAMD64SHLLconst {
 23198  			break
 23199  		}
 23200  		j0 := s0.AuxInt
 23201  		x0 := s0.Args[0]
 23202  		if x0.Op != OpAMD64MOVBloadidx1 {
 23203  			break
 23204  		}
 23205  		i0 := x0.AuxInt
 23206  		if x0.Aux != s {
 23207  			break
 23208  		}
 23209  		_ = x0.Args[2]
 23210  		if idx != x0.Args[0] {
 23211  			break
 23212  		}
 23213  		if p != x0.Args[1] {
 23214  			break
 23215  		}
 23216  		if mem != x0.Args[2] {
 23217  			break
 23218  		}
 23219  		if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) {
 23220  			break
 23221  		}
 23222  		b = mergePoint(b, x0, x1)
 23223  		v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type)
 23224  		v.reset(OpCopy)
 23225  		v.AddArg(v0)
 23226  		v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type)
 23227  		v1.AuxInt = j1
 23228  		v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, typ.UInt16)
 23229  		v2.AuxInt = 8
 23230  		v3 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16)
 23231  		v3.AuxInt = i0
 23232  		v3.Aux = s
 23233  		v3.AddArg(p)
 23234  		v3.AddArg(idx)
 23235  		v3.AddArg(mem)
 23236  		v2.AddArg(v3)
 23237  		v1.AddArg(v2)
 23238  		v0.AddArg(v1)
 23239  		v0.AddArg(y)
 23240  		return true
 23241  	}
 23242  	// match: (ORL or:(ORL y s1:(SHLLconst [j1] x1:(MOVBloadidx1 [i1] {s} idx p mem))) s0:(SHLLconst [j0] x0:(MOVBloadidx1 [i0] {s} idx p mem)))
 23243  	// cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)
 23244  	// result: @mergePoint(b,x0,x1) (ORL <v.Type> (SHLLconst <v.Type> [j1] (ROLWconst <typ.UInt16> [8] (MOVWloadidx1 [i0] {s} p idx mem))) y)
 23245  	for {
 23246  		_ = v.Args[1]
 23247  		or := v.Args[0]
 23248  		if or.Op != OpAMD64ORL {
 23249  			break
 23250  		}
 23251  		_ = or.Args[1]
 23252  		y := or.Args[0]
 23253  		s1 := or.Args[1]
 23254  		if s1.Op != OpAMD64SHLLconst {
 23255  			break
 23256  		}
 23257  		j1 := s1.AuxInt
 23258  		x1 := s1.Args[0]
 23259  		if x1.Op != OpAMD64MOVBloadidx1 {
 23260  			break
 23261  		}
 23262  		i1 := x1.AuxInt
 23263  		s := x1.Aux
 23264  		_ = x1.Args[2]
 23265  		idx := x1.Args[0]
 23266  		p := x1.Args[1]
 23267  		mem := x1.Args[2]
 23268  		s0 := v.Args[1]
 23269  		if s0.Op != OpAMD64SHLLconst {
 23270  			break
 23271  		}
 23272  		j0 := s0.AuxInt
 23273  		x0 := s0.Args[0]
 23274  		if x0.Op != OpAMD64MOVBloadidx1 {
 23275  			break
 23276  		}
 23277  		i0 := x0.AuxInt
 23278  		if x0.Aux != s {
 23279  			break
 23280  		}
 23281  		_ = x0.Args[2]
 23282  		if idx != x0.Args[0] {
 23283  			break
 23284  		}
 23285  		if p != x0.Args[1] {
 23286  			break
 23287  		}
 23288  		if mem != x0.Args[2] {
 23289  			break
 23290  		}
 23291  		if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) {
 23292  			break
 23293  		}
 23294  		b = mergePoint(b, x0, x1)
 23295  		v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type)
 23296  		v.reset(OpCopy)
 23297  		v.AddArg(v0)
 23298  		v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type)
 23299  		v1.AuxInt = j1
 23300  		v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, typ.UInt16)
 23301  		v2.AuxInt = 8
 23302  		v3 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16)
 23303  		v3.AuxInt = i0
 23304  		v3.Aux = s
 23305  		v3.AddArg(p)
 23306  		v3.AddArg(idx)
 23307  		v3.AddArg(mem)
 23308  		v2.AddArg(v3)
 23309  		v1.AddArg(v2)
 23310  		v0.AddArg(v1)
 23311  		v0.AddArg(y)
 23312  		return true
 23313  	}
 23314  	// match: (ORL x l:(MOVLload [off] {sym} ptr mem))
 23315  	// cond: canMergeLoad(v, l, x) && clobber(l)
 23316  	// result: (ORLmem x [off] {sym} ptr mem)
 23317  	for {
 23318  		_ = v.Args[1]
 23319  		x := v.Args[0]
 23320  		l := v.Args[1]
 23321  		if l.Op != OpAMD64MOVLload {
 23322  			break
 23323  		}
 23324  		off := l.AuxInt
 23325  		sym := l.Aux
 23326  		_ = l.Args[1]
 23327  		ptr := l.Args[0]
 23328  		mem := l.Args[1]
 23329  		if !(canMergeLoad(v, l, x) && clobber(l)) {
 23330  			break
 23331  		}
 23332  		v.reset(OpAMD64ORLmem)
 23333  		v.AuxInt = off
 23334  		v.Aux = sym
 23335  		v.AddArg(x)
 23336  		v.AddArg(ptr)
 23337  		v.AddArg(mem)
 23338  		return true
 23339  	}
 23340  	return false
 23341  }
 23342  func rewriteValueAMD64_OpAMD64ORL_130(v *Value) bool {
 23343  	// match: (ORL l:(MOVLload [off] {sym} ptr mem) x)
 23344  	// cond: canMergeLoad(v, l, x) && clobber(l)
 23345  	// result: (ORLmem x [off] {sym} ptr mem)
 23346  	for {
 23347  		_ = v.Args[1]
 23348  		l := v.Args[0]
 23349  		if l.Op != OpAMD64MOVLload {
 23350  			break
 23351  		}
 23352  		off := l.AuxInt
 23353  		sym := l.Aux
 23354  		_ = l.Args[1]
 23355  		ptr := l.Args[0]
 23356  		mem := l.Args[1]
 23357  		x := v.Args[1]
 23358  		if !(canMergeLoad(v, l, x) && clobber(l)) {
 23359  			break
 23360  		}
 23361  		v.reset(OpAMD64ORLmem)
 23362  		v.AuxInt = off
 23363  		v.Aux = sym
 23364  		v.AddArg(x)
 23365  		v.AddArg(ptr)
 23366  		v.AddArg(mem)
 23367  		return true
 23368  	}
 23369  	return false
 23370  }
 23371  func rewriteValueAMD64_OpAMD64ORLconst_0(v *Value) bool {
 23372  	// match: (ORLconst [c] x)
 23373  	// cond: int32(c)==0
 23374  	// result: x
 23375  	for {
 23376  		c := v.AuxInt
 23377  		x := v.Args[0]
 23378  		if !(int32(c) == 0) {
 23379  			break
 23380  		}
 23381  		v.reset(OpCopy)
 23382  		v.Type = x.Type
 23383  		v.AddArg(x)
 23384  		return true
 23385  	}
 23386  	// match: (ORLconst [c] _)
 23387  	// cond: int32(c)==-1
 23388  	// result: (MOVLconst [-1])
 23389  	for {
 23390  		c := v.AuxInt
 23391  		if !(int32(c) == -1) {
 23392  			break
 23393  		}
 23394  		v.reset(OpAMD64MOVLconst)
 23395  		v.AuxInt = -1
 23396  		return true
 23397  	}
 23398  	// match: (ORLconst [c] (MOVLconst [d]))
 23399  	// cond:
 23400  	// result: (MOVLconst [c|d])
 23401  	for {
 23402  		c := v.AuxInt
 23403  		v_0 := v.Args[0]
 23404  		if v_0.Op != OpAMD64MOVLconst {
 23405  			break
 23406  		}
 23407  		d := v_0.AuxInt
 23408  		v.reset(OpAMD64MOVLconst)
 23409  		v.AuxInt = c | d
 23410  		return true
 23411  	}
 23412  	return false
 23413  }
 23414  func rewriteValueAMD64_OpAMD64ORLmem_0(v *Value) bool {
 23415  	b := v.Block
 23416  	_ = b
 23417  	typ := &b.Func.Config.Types
 23418  	_ = typ
 23419  	// match: (ORLmem x [off] {sym} ptr (MOVSSstore [off] {sym} ptr y _))
 23420  	// cond:
 23421  	// result: ( ORL x (MOVLf2i y))
 23422  	for {
 23423  		off := v.AuxInt
 23424  		sym := v.Aux
 23425  		_ = v.Args[2]
 23426  		x := v.Args[0]
 23427  		ptr := v.Args[1]
 23428  		v_2 := v.Args[2]
 23429  		if v_2.Op != OpAMD64MOVSSstore {
 23430  			break
 23431  		}
 23432  		if v_2.AuxInt != off {
 23433  			break
 23434  		}
 23435  		if v_2.Aux != sym {
 23436  			break
 23437  		}
 23438  		_ = v_2.Args[2]
 23439  		if ptr != v_2.Args[0] {
 23440  			break
 23441  		}
 23442  		y := v_2.Args[1]
 23443  		v.reset(OpAMD64ORL)
 23444  		v.AddArg(x)
 23445  		v0 := b.NewValue0(v.Pos, OpAMD64MOVLf2i, typ.UInt32)
 23446  		v0.AddArg(y)
 23447  		v.AddArg(v0)
 23448  		return true
 23449  	}
 23450  	return false
 23451  }
 23452  func rewriteValueAMD64_OpAMD64ORQ_0(v *Value) bool {
 23453  	// match: (ORQ x (MOVQconst [c]))
 23454  	// cond: is32Bit(c)
 23455  	// result: (ORQconst [c] x)
 23456  	for {
 23457  		_ = v.Args[1]
 23458  		x := v.Args[0]
 23459  		v_1 := v.Args[1]
 23460  		if v_1.Op != OpAMD64MOVQconst {
 23461  			break
 23462  		}
 23463  		c := v_1.AuxInt
 23464  		if !(is32Bit(c)) {
 23465  			break
 23466  		}
 23467  		v.reset(OpAMD64ORQconst)
 23468  		v.AuxInt = c
 23469  		v.AddArg(x)
 23470  		return true
 23471  	}
 23472  	// match: (ORQ (MOVQconst [c]) x)
 23473  	// cond: is32Bit(c)
 23474  	// result: (ORQconst [c] x)
 23475  	for {
 23476  		_ = v.Args[1]
 23477  		v_0 := v.Args[0]
 23478  		if v_0.Op != OpAMD64MOVQconst {
 23479  			break
 23480  		}
 23481  		c := v_0.AuxInt
 23482  		x := v.Args[1]
 23483  		if !(is32Bit(c)) {
 23484  			break
 23485  		}
 23486  		v.reset(OpAMD64ORQconst)
 23487  		v.AuxInt = c
 23488  		v.AddArg(x)
 23489  		return true
 23490  	}
 23491  	// match: (ORQ (SHLQconst x [c]) (SHRQconst x [d]))
 23492  	// cond: d==64-c
 23493  	// result: (ROLQconst x [c])
 23494  	for {
 23495  		_ = v.Args[1]
 23496  		v_0 := v.Args[0]
 23497  		if v_0.Op != OpAMD64SHLQconst {
 23498  			break
 23499  		}
 23500  		c := v_0.AuxInt
 23501  		x := v_0.Args[0]
 23502  		v_1 := v.Args[1]
 23503  		if v_1.Op != OpAMD64SHRQconst {
 23504  			break
 23505  		}
 23506  		d := v_1.AuxInt
 23507  		if x != v_1.Args[0] {
 23508  			break
 23509  		}
 23510  		if !(d == 64-c) {
 23511  			break
 23512  		}
 23513  		v.reset(OpAMD64ROLQconst)
 23514  		v.AuxInt = c
 23515  		v.AddArg(x)
 23516  		return true
 23517  	}
 23518  	// match: (ORQ (SHRQconst x [d]) (SHLQconst x [c]))
 23519  	// cond: d==64-c
 23520  	// result: (ROLQconst x [c])
 23521  	for {
 23522  		_ = v.Args[1]
 23523  		v_0 := v.Args[0]
 23524  		if v_0.Op != OpAMD64SHRQconst {
 23525  			break
 23526  		}
 23527  		d := v_0.AuxInt
 23528  		x := v_0.Args[0]
 23529  		v_1 := v.Args[1]
 23530  		if v_1.Op != OpAMD64SHLQconst {
 23531  			break
 23532  		}
 23533  		c := v_1.AuxInt
 23534  		if x != v_1.Args[0] {
 23535  			break
 23536  		}
 23537  		if !(d == 64-c) {
 23538  			break
 23539  		}
 23540  		v.reset(OpAMD64ROLQconst)
 23541  		v.AuxInt = c
 23542  		v.AddArg(x)
 23543  		return true
 23544  	}
 23545  	// match: (ORQ (SHLQ x y) (ANDQ (SHRQ x (NEGQ y)) (SBBQcarrymask (CMPQconst (NEGQ (ADDQconst (ANDQconst y [63]) [-64])) [64]))))
 23546  	// cond:
 23547  	// result: (ROLQ x y)
 23548  	for {
 23549  		_ = v.Args[1]
 23550  		v_0 := v.Args[0]
 23551  		if v_0.Op != OpAMD64SHLQ {
 23552  			break
 23553  		}
 23554  		_ = v_0.Args[1]
 23555  		x := v_0.Args[0]
 23556  		y := v_0.Args[1]
 23557  		v_1 := v.Args[1]
 23558  		if v_1.Op != OpAMD64ANDQ {
 23559  			break
 23560  		}
 23561  		_ = v_1.Args[1]
 23562  		v_1_0 := v_1.Args[0]
 23563  		if v_1_0.Op != OpAMD64SHRQ {
 23564  			break
 23565  		}
 23566  		_ = v_1_0.Args[1]
 23567  		if x != v_1_0.Args[0] {
 23568  			break
 23569  		}
 23570  		v_1_0_1 := v_1_0.Args[1]
 23571  		if v_1_0_1.Op != OpAMD64NEGQ {
 23572  			break
 23573  		}
 23574  		if y != v_1_0_1.Args[0] {
 23575  			break
 23576  		}
 23577  		v_1_1 := v_1.Args[1]
 23578  		if v_1_1.Op != OpAMD64SBBQcarrymask {
 23579  			break
 23580  		}
 23581  		v_1_1_0 := v_1_1.Args[0]
 23582  		if v_1_1_0.Op != OpAMD64CMPQconst {
 23583  			break
 23584  		}
 23585  		if v_1_1_0.AuxInt != 64 {
 23586  			break
 23587  		}
 23588  		v_1_1_0_0 := v_1_1_0.Args[0]
 23589  		if v_1_1_0_0.Op != OpAMD64NEGQ {
 23590  			break
 23591  		}
 23592  		v_1_1_0_0_0 := v_1_1_0_0.Args[0]
 23593  		if v_1_1_0_0_0.Op != OpAMD64ADDQconst {
 23594  			break
 23595  		}
 23596  		if v_1_1_0_0_0.AuxInt != -64 {
 23597  			break
 23598  		}
 23599  		v_1_1_0_0_0_0 := v_1_1_0_0_0.Args[0]
 23600  		if v_1_1_0_0_0_0.Op != OpAMD64ANDQconst {
 23601  			break
 23602  		}
 23603  		if v_1_1_0_0_0_0.AuxInt != 63 {
 23604  			break
 23605  		}
 23606  		if y != v_1_1_0_0_0_0.Args[0] {
 23607  			break
 23608  		}
 23609  		v.reset(OpAMD64ROLQ)
 23610  		v.AddArg(x)
 23611  		v.AddArg(y)
 23612  		return true
 23613  	}
 23614  	// match: (ORQ (SHLQ x y) (ANDQ (SBBQcarrymask (CMPQconst (NEGQ (ADDQconst (ANDQconst y [63]) [-64])) [64])) (SHRQ x (NEGQ y))))
 23615  	// cond:
 23616  	// result: (ROLQ x y)
 23617  	for {
 23618  		_ = v.Args[1]
 23619  		v_0 := v.Args[0]
 23620  		if v_0.Op != OpAMD64SHLQ {
 23621  			break
 23622  		}
 23623  		_ = v_0.Args[1]
 23624  		x := v_0.Args[0]
 23625  		y := v_0.Args[1]
 23626  		v_1 := v.Args[1]
 23627  		if v_1.Op != OpAMD64ANDQ {
 23628  			break
 23629  		}
 23630  		_ = v_1.Args[1]
 23631  		v_1_0 := v_1.Args[0]
 23632  		if v_1_0.Op != OpAMD64SBBQcarrymask {
 23633  			break
 23634  		}
 23635  		v_1_0_0 := v_1_0.Args[0]
 23636  		if v_1_0_0.Op != OpAMD64CMPQconst {
 23637  			break
 23638  		}
 23639  		if v_1_0_0.AuxInt != 64 {
 23640  			break
 23641  		}
 23642  		v_1_0_0_0 := v_1_0_0.Args[0]
 23643  		if v_1_0_0_0.Op != OpAMD64NEGQ {
 23644  			break
 23645  		}
 23646  		v_1_0_0_0_0 := v_1_0_0_0.Args[0]
 23647  		if v_1_0_0_0_0.Op != OpAMD64ADDQconst {
 23648  			break
 23649  		}
 23650  		if v_1_0_0_0_0.AuxInt != -64 {
 23651  			break
 23652  		}
 23653  		v_1_0_0_0_0_0 := v_1_0_0_0_0.Args[0]
 23654  		if v_1_0_0_0_0_0.Op != OpAMD64ANDQconst {
 23655  			break
 23656  		}
 23657  		if v_1_0_0_0_0_0.AuxInt != 63 {
 23658  			break
 23659  		}
 23660  		if y != v_1_0_0_0_0_0.Args[0] {
 23661  			break
 23662  		}
 23663  		v_1_1 := v_1.Args[1]
 23664  		if v_1_1.Op != OpAMD64SHRQ {
 23665  			break
 23666  		}
 23667  		_ = v_1_1.Args[1]
 23668  		if x != v_1_1.Args[0] {
 23669  			break
 23670  		}
 23671  		v_1_1_1 := v_1_1.Args[1]
 23672  		if v_1_1_1.Op != OpAMD64NEGQ {
 23673  			break
 23674  		}
 23675  		if y != v_1_1_1.Args[0] {
 23676  			break
 23677  		}
 23678  		v.reset(OpAMD64ROLQ)
 23679  		v.AddArg(x)
 23680  		v.AddArg(y)
 23681  		return true
 23682  	}
 23683  	// match: (ORQ (ANDQ (SHRQ x (NEGQ y)) (SBBQcarrymask (CMPQconst (NEGQ (ADDQconst (ANDQconst y [63]) [-64])) [64]))) (SHLQ x y))
 23684  	// cond:
 23685  	// result: (ROLQ x y)
 23686  	for {
 23687  		_ = v.Args[1]
 23688  		v_0 := v.Args[0]
 23689  		if v_0.Op != OpAMD64ANDQ {
 23690  			break
 23691  		}
 23692  		_ = v_0.Args[1]
 23693  		v_0_0 := v_0.Args[0]
 23694  		if v_0_0.Op != OpAMD64SHRQ {
 23695  			break
 23696  		}
 23697  		_ = v_0_0.Args[1]
 23698  		x := v_0_0.Args[0]
 23699  		v_0_0_1 := v_0_0.Args[1]
 23700  		if v_0_0_1.Op != OpAMD64NEGQ {
 23701  			break
 23702  		}
 23703  		y := v_0_0_1.Args[0]
 23704  		v_0_1 := v_0.Args[1]
 23705  		if v_0_1.Op != OpAMD64SBBQcarrymask {
 23706  			break
 23707  		}
 23708  		v_0_1_0 := v_0_1.Args[0]
 23709  		if v_0_1_0.Op != OpAMD64CMPQconst {
 23710  			break
 23711  		}
 23712  		if v_0_1_0.AuxInt != 64 {
 23713  			break
 23714  		}
 23715  		v_0_1_0_0 := v_0_1_0.Args[0]
 23716  		if v_0_1_0_0.Op != OpAMD64NEGQ {
 23717  			break
 23718  		}
 23719  		v_0_1_0_0_0 := v_0_1_0_0.Args[0]
 23720  		if v_0_1_0_0_0.Op != OpAMD64ADDQconst {
 23721  			break
 23722  		}
 23723  		if v_0_1_0_0_0.AuxInt != -64 {
 23724  			break
 23725  		}
 23726  		v_0_1_0_0_0_0 := v_0_1_0_0_0.Args[0]
 23727  		if v_0_1_0_0_0_0.Op != OpAMD64ANDQconst {
 23728  			break
 23729  		}
 23730  		if v_0_1_0_0_0_0.AuxInt != 63 {
 23731  			break
 23732  		}
 23733  		if y != v_0_1_0_0_0_0.Args[0] {
 23734  			break
 23735  		}
 23736  		v_1 := v.Args[1]
 23737  		if v_1.Op != OpAMD64SHLQ {
 23738  			break
 23739  		}
 23740  		_ = v_1.Args[1]
 23741  		if x != v_1.Args[0] {
 23742  			break
 23743  		}
 23744  		if y != v_1.Args[1] {
 23745  			break
 23746  		}
 23747  		v.reset(OpAMD64ROLQ)
 23748  		v.AddArg(x)
 23749  		v.AddArg(y)
 23750  		return true
 23751  	}
 23752  	// match: (ORQ (ANDQ (SBBQcarrymask (CMPQconst (NEGQ (ADDQconst (ANDQconst y [63]) [-64])) [64])) (SHRQ x (NEGQ y))) (SHLQ x y))
 23753  	// cond:
 23754  	// result: (ROLQ x y)
 23755  	for {
 23756  		_ = v.Args[1]
 23757  		v_0 := v.Args[0]
 23758  		if v_0.Op != OpAMD64ANDQ {
 23759  			break
 23760  		}
 23761  		_ = v_0.Args[1]
 23762  		v_0_0 := v_0.Args[0]
 23763  		if v_0_0.Op != OpAMD64SBBQcarrymask {
 23764  			break
 23765  		}
 23766  		v_0_0_0 := v_0_0.Args[0]
 23767  		if v_0_0_0.Op != OpAMD64CMPQconst {
 23768  			break
 23769  		}
 23770  		if v_0_0_0.AuxInt != 64 {
 23771  			break
 23772  		}
 23773  		v_0_0_0_0 := v_0_0_0.Args[0]
 23774  		if v_0_0_0_0.Op != OpAMD64NEGQ {
 23775  			break
 23776  		}
 23777  		v_0_0_0_0_0 := v_0_0_0_0.Args[0]
 23778  		if v_0_0_0_0_0.Op != OpAMD64ADDQconst {
 23779  			break
 23780  		}
 23781  		if v_0_0_0_0_0.AuxInt != -64 {
 23782  			break
 23783  		}
 23784  		v_0_0_0_0_0_0 := v_0_0_0_0_0.Args[0]
 23785  		if v_0_0_0_0_0_0.Op != OpAMD64ANDQconst {
 23786  			break
 23787  		}
 23788  		if v_0_0_0_0_0_0.AuxInt != 63 {
 23789  			break
 23790  		}
 23791  		y := v_0_0_0_0_0_0.Args[0]
 23792  		v_0_1 := v_0.Args[1]
 23793  		if v_0_1.Op != OpAMD64SHRQ {
 23794  			break
 23795  		}
 23796  		_ = v_0_1.Args[1]
 23797  		x := v_0_1.Args[0]
 23798  		v_0_1_1 := v_0_1.Args[1]
 23799  		if v_0_1_1.Op != OpAMD64NEGQ {
 23800  			break
 23801  		}
 23802  		if y != v_0_1_1.Args[0] {
 23803  			break
 23804  		}
 23805  		v_1 := v.Args[1]
 23806  		if v_1.Op != OpAMD64SHLQ {
 23807  			break
 23808  		}
 23809  		_ = v_1.Args[1]
 23810  		if x != v_1.Args[0] {
 23811  			break
 23812  		}
 23813  		if y != v_1.Args[1] {
 23814  			break
 23815  		}
 23816  		v.reset(OpAMD64ROLQ)
 23817  		v.AddArg(x)
 23818  		v.AddArg(y)
 23819  		return true
 23820  	}
 23821  	// match: (ORQ (SHLQ x y) (ANDQ (SHRQ x (NEGL y)) (SBBQcarrymask (CMPLconst (NEGL (ADDLconst (ANDLconst y [63]) [-64])) [64]))))
 23822  	// cond:
 23823  	// result: (ROLQ x y)
 23824  	for {
 23825  		_ = v.Args[1]
 23826  		v_0 := v.Args[0]
 23827  		if v_0.Op != OpAMD64SHLQ {
 23828  			break
 23829  		}
 23830  		_ = v_0.Args[1]
 23831  		x := v_0.Args[0]
 23832  		y := v_0.Args[1]
 23833  		v_1 := v.Args[1]
 23834  		if v_1.Op != OpAMD64ANDQ {
 23835  			break
 23836  		}
 23837  		_ = v_1.Args[1]
 23838  		v_1_0 := v_1.Args[0]
 23839  		if v_1_0.Op != OpAMD64SHRQ {
 23840  			break
 23841  		}
 23842  		_ = v_1_0.Args[1]
 23843  		if x != v_1_0.Args[0] {
 23844  			break
 23845  		}
 23846  		v_1_0_1 := v_1_0.Args[1]
 23847  		if v_1_0_1.Op != OpAMD64NEGL {
 23848  			break
 23849  		}
 23850  		if y != v_1_0_1.Args[0] {
 23851  			break
 23852  		}
 23853  		v_1_1 := v_1.Args[1]
 23854  		if v_1_1.Op != OpAMD64SBBQcarrymask {
 23855  			break
 23856  		}
 23857  		v_1_1_0 := v_1_1.Args[0]
 23858  		if v_1_1_0.Op != OpAMD64CMPLconst {
 23859  			break
 23860  		}
 23861  		if v_1_1_0.AuxInt != 64 {
 23862  			break
 23863  		}
 23864  		v_1_1_0_0 := v_1_1_0.Args[0]
 23865  		if v_1_1_0_0.Op != OpAMD64NEGL {
 23866  			break
 23867  		}
 23868  		v_1_1_0_0_0 := v_1_1_0_0.Args[0]
 23869  		if v_1_1_0_0_0.Op != OpAMD64ADDLconst {
 23870  			break
 23871  		}
 23872  		if v_1_1_0_0_0.AuxInt != -64 {
 23873  			break
 23874  		}
 23875  		v_1_1_0_0_0_0 := v_1_1_0_0_0.Args[0]
 23876  		if v_1_1_0_0_0_0.Op != OpAMD64ANDLconst {
 23877  			break
 23878  		}
 23879  		if v_1_1_0_0_0_0.AuxInt != 63 {
 23880  			break
 23881  		}
 23882  		if y != v_1_1_0_0_0_0.Args[0] {
 23883  			break
 23884  		}
 23885  		v.reset(OpAMD64ROLQ)
 23886  		v.AddArg(x)
 23887  		v.AddArg(y)
 23888  		return true
 23889  	}
 23890  	// match: (ORQ (SHLQ x y) (ANDQ (SBBQcarrymask (CMPLconst (NEGL (ADDLconst (ANDLconst y [63]) [-64])) [64])) (SHRQ x (NEGL y))))
 23891  	// cond:
 23892  	// result: (ROLQ x y)
 23893  	for {
 23894  		_ = v.Args[1]
 23895  		v_0 := v.Args[0]
 23896  		if v_0.Op != OpAMD64SHLQ {
 23897  			break
 23898  		}
 23899  		_ = v_0.Args[1]
 23900  		x := v_0.Args[0]
 23901  		y := v_0.Args[1]
 23902  		v_1 := v.Args[1]
 23903  		if v_1.Op != OpAMD64ANDQ {
 23904  			break
 23905  		}
 23906  		_ = v_1.Args[1]
 23907  		v_1_0 := v_1.Args[0]
 23908  		if v_1_0.Op != OpAMD64SBBQcarrymask {
 23909  			break
 23910  		}
 23911  		v_1_0_0 := v_1_0.Args[0]
 23912  		if v_1_0_0.Op != OpAMD64CMPLconst {
 23913  			break
 23914  		}
 23915  		if v_1_0_0.AuxInt != 64 {
 23916  			break
 23917  		}
 23918  		v_1_0_0_0 := v_1_0_0.Args[0]
 23919  		if v_1_0_0_0.Op != OpAMD64NEGL {
 23920  			break
 23921  		}
 23922  		v_1_0_0_0_0 := v_1_0_0_0.Args[0]
 23923  		if v_1_0_0_0_0.Op != OpAMD64ADDLconst {
 23924  			break
 23925  		}
 23926  		if v_1_0_0_0_0.AuxInt != -64 {
 23927  			break
 23928  		}
 23929  		v_1_0_0_0_0_0 := v_1_0_0_0_0.Args[0]
 23930  		if v_1_0_0_0_0_0.Op != OpAMD64ANDLconst {
 23931  			break
 23932  		}
 23933  		if v_1_0_0_0_0_0.AuxInt != 63 {
 23934  			break
 23935  		}
 23936  		if y != v_1_0_0_0_0_0.Args[0] {
 23937  			break
 23938  		}
 23939  		v_1_1 := v_1.Args[1]
 23940  		if v_1_1.Op != OpAMD64SHRQ {
 23941  			break
 23942  		}
 23943  		_ = v_1_1.Args[1]
 23944  		if x != v_1_1.Args[0] {
 23945  			break
 23946  		}
 23947  		v_1_1_1 := v_1_1.Args[1]
 23948  		if v_1_1_1.Op != OpAMD64NEGL {
 23949  			break
 23950  		}
 23951  		if y != v_1_1_1.Args[0] {
 23952  			break
 23953  		}
 23954  		v.reset(OpAMD64ROLQ)
 23955  		v.AddArg(x)
 23956  		v.AddArg(y)
 23957  		return true
 23958  	}
 23959  	return false
 23960  }
 23961  func rewriteValueAMD64_OpAMD64ORQ_10(v *Value) bool {
 23962  	// match: (ORQ (ANDQ (SHRQ x (NEGL y)) (SBBQcarrymask (CMPLconst (NEGL (ADDLconst (ANDLconst y [63]) [-64])) [64]))) (SHLQ x y))
 23963  	// cond:
 23964  	// result: (ROLQ x y)
 23965  	for {
 23966  		_ = v.Args[1]
 23967  		v_0 := v.Args[0]
 23968  		if v_0.Op != OpAMD64ANDQ {
 23969  			break
 23970  		}
 23971  		_ = v_0.Args[1]
 23972  		v_0_0 := v_0.Args[0]
 23973  		if v_0_0.Op != OpAMD64SHRQ {
 23974  			break
 23975  		}
 23976  		_ = v_0_0.Args[1]
 23977  		x := v_0_0.Args[0]
 23978  		v_0_0_1 := v_0_0.Args[1]
 23979  		if v_0_0_1.Op != OpAMD64NEGL {
 23980  			break
 23981  		}
 23982  		y := v_0_0_1.Args[0]
 23983  		v_0_1 := v_0.Args[1]
 23984  		if v_0_1.Op != OpAMD64SBBQcarrymask {
 23985  			break
 23986  		}
 23987  		v_0_1_0 := v_0_1.Args[0]
 23988  		if v_0_1_0.Op != OpAMD64CMPLconst {
 23989  			break
 23990  		}
 23991  		if v_0_1_0.AuxInt != 64 {
 23992  			break
 23993  		}
 23994  		v_0_1_0_0 := v_0_1_0.Args[0]
 23995  		if v_0_1_0_0.Op != OpAMD64NEGL {
 23996  			break
 23997  		}
 23998  		v_0_1_0_0_0 := v_0_1_0_0.Args[0]
 23999  		if v_0_1_0_0_0.Op != OpAMD64ADDLconst {
 24000  			break
 24001  		}
 24002  		if v_0_1_0_0_0.AuxInt != -64 {
 24003  			break
 24004  		}
 24005  		v_0_1_0_0_0_0 := v_0_1_0_0_0.Args[0]
 24006  		if v_0_1_0_0_0_0.Op != OpAMD64ANDLconst {
 24007  			break
 24008  		}
 24009  		if v_0_1_0_0_0_0.AuxInt != 63 {
 24010  			break
 24011  		}
 24012  		if y != v_0_1_0_0_0_0.Args[0] {
 24013  			break
 24014  		}
 24015  		v_1 := v.Args[1]
 24016  		if v_1.Op != OpAMD64SHLQ {
 24017  			break
 24018  		}
 24019  		_ = v_1.Args[1]
 24020  		if x != v_1.Args[0] {
 24021  			break
 24022  		}
 24023  		if y != v_1.Args[1] {
 24024  			break
 24025  		}
 24026  		v.reset(OpAMD64ROLQ)
 24027  		v.AddArg(x)
 24028  		v.AddArg(y)
 24029  		return true
 24030  	}
 24031  	// match: (ORQ (ANDQ (SBBQcarrymask (CMPLconst (NEGL (ADDLconst (ANDLconst y [63]) [-64])) [64])) (SHRQ x (NEGL y))) (SHLQ x y))
 24032  	// cond:
 24033  	// result: (ROLQ x y)
 24034  	for {
 24035  		_ = v.Args[1]
 24036  		v_0 := v.Args[0]
 24037  		if v_0.Op != OpAMD64ANDQ {
 24038  			break
 24039  		}
 24040  		_ = v_0.Args[1]
 24041  		v_0_0 := v_0.Args[0]
 24042  		if v_0_0.Op != OpAMD64SBBQcarrymask {
 24043  			break
 24044  		}
 24045  		v_0_0_0 := v_0_0.Args[0]
 24046  		if v_0_0_0.Op != OpAMD64CMPLconst {
 24047  			break
 24048  		}
 24049  		if v_0_0_0.AuxInt != 64 {
 24050  			break
 24051  		}
 24052  		v_0_0_0_0 := v_0_0_0.Args[0]
 24053  		if v_0_0_0_0.Op != OpAMD64NEGL {
 24054  			break
 24055  		}
 24056  		v_0_0_0_0_0 := v_0_0_0_0.Args[0]
 24057  		if v_0_0_0_0_0.Op != OpAMD64ADDLconst {
 24058  			break
 24059  		}
 24060  		if v_0_0_0_0_0.AuxInt != -64 {
 24061  			break
 24062  		}
 24063  		v_0_0_0_0_0_0 := v_0_0_0_0_0.Args[0]
 24064  		if v_0_0_0_0_0_0.Op != OpAMD64ANDLconst {
 24065  			break
 24066  		}
 24067  		if v_0_0_0_0_0_0.AuxInt != 63 {
 24068  			break
 24069  		}
 24070  		y := v_0_0_0_0_0_0.Args[0]
 24071  		v_0_1 := v_0.Args[1]
 24072  		if v_0_1.Op != OpAMD64SHRQ {
 24073  			break
 24074  		}
 24075  		_ = v_0_1.Args[1]
 24076  		x := v_0_1.Args[0]
 24077  		v_0_1_1 := v_0_1.Args[1]
 24078  		if v_0_1_1.Op != OpAMD64NEGL {
 24079  			break
 24080  		}
 24081  		if y != v_0_1_1.Args[0] {
 24082  			break
 24083  		}
 24084  		v_1 := v.Args[1]
 24085  		if v_1.Op != OpAMD64SHLQ {
 24086  			break
 24087  		}
 24088  		_ = v_1.Args[1]
 24089  		if x != v_1.Args[0] {
 24090  			break
 24091  		}
 24092  		if y != v_1.Args[1] {
 24093  			break
 24094  		}
 24095  		v.reset(OpAMD64ROLQ)
 24096  		v.AddArg(x)
 24097  		v.AddArg(y)
 24098  		return true
 24099  	}
 24100  	// match: (ORQ (SHRQ x y) (ANDQ (SHLQ x (NEGQ y)) (SBBQcarrymask (CMPQconst (NEGQ (ADDQconst (ANDQconst y [63]) [-64])) [64]))))
 24101  	// cond:
 24102  	// result: (RORQ x y)
 24103  	for {
 24104  		_ = v.Args[1]
 24105  		v_0 := v.Args[0]
 24106  		if v_0.Op != OpAMD64SHRQ {
 24107  			break
 24108  		}
 24109  		_ = v_0.Args[1]
 24110  		x := v_0.Args[0]
 24111  		y := v_0.Args[1]
 24112  		v_1 := v.Args[1]
 24113  		if v_1.Op != OpAMD64ANDQ {
 24114  			break
 24115  		}
 24116  		_ = v_1.Args[1]
 24117  		v_1_0 := v_1.Args[0]
 24118  		if v_1_0.Op != OpAMD64SHLQ {
 24119  			break
 24120  		}
 24121  		_ = v_1_0.Args[1]
 24122  		if x != v_1_0.Args[0] {
 24123  			break
 24124  		}
 24125  		v_1_0_1 := v_1_0.Args[1]
 24126  		if v_1_0_1.Op != OpAMD64NEGQ {
 24127  			break
 24128  		}
 24129  		if y != v_1_0_1.Args[0] {
 24130  			break
 24131  		}
 24132  		v_1_1 := v_1.Args[1]
 24133  		if v_1_1.Op != OpAMD64SBBQcarrymask {
 24134  			break
 24135  		}
 24136  		v_1_1_0 := v_1_1.Args[0]
 24137  		if v_1_1_0.Op != OpAMD64CMPQconst {
 24138  			break
 24139  		}
 24140  		if v_1_1_0.AuxInt != 64 {
 24141  			break
 24142  		}
 24143  		v_1_1_0_0 := v_1_1_0.Args[0]
 24144  		if v_1_1_0_0.Op != OpAMD64NEGQ {
 24145  			break
 24146  		}
 24147  		v_1_1_0_0_0 := v_1_1_0_0.Args[0]
 24148  		if v_1_1_0_0_0.Op != OpAMD64ADDQconst {
 24149  			break
 24150  		}
 24151  		if v_1_1_0_0_0.AuxInt != -64 {
 24152  			break
 24153  		}
 24154  		v_1_1_0_0_0_0 := v_1_1_0_0_0.Args[0]
 24155  		if v_1_1_0_0_0_0.Op != OpAMD64ANDQconst {
 24156  			break
 24157  		}
 24158  		if v_1_1_0_0_0_0.AuxInt != 63 {
 24159  			break
 24160  		}
 24161  		if y != v_1_1_0_0_0_0.Args[0] {
 24162  			break
 24163  		}
 24164  		v.reset(OpAMD64RORQ)
 24165  		v.AddArg(x)
 24166  		v.AddArg(y)
 24167  		return true
 24168  	}
 24169  	// match: (ORQ (SHRQ x y) (ANDQ (SBBQcarrymask (CMPQconst (NEGQ (ADDQconst (ANDQconst y [63]) [-64])) [64])) (SHLQ x (NEGQ y))))
 24170  	// cond:
 24171  	// result: (RORQ x y)
 24172  	for {
 24173  		_ = v.Args[1]
 24174  		v_0 := v.Args[0]
 24175  		if v_0.Op != OpAMD64SHRQ {
 24176  			break
 24177  		}
 24178  		_ = v_0.Args[1]
 24179  		x := v_0.Args[0]
 24180  		y := v_0.Args[1]
 24181  		v_1 := v.Args[1]
 24182  		if v_1.Op != OpAMD64ANDQ {
 24183  			break
 24184  		}
 24185  		_ = v_1.Args[1]
 24186  		v_1_0 := v_1.Args[0]
 24187  		if v_1_0.Op != OpAMD64SBBQcarrymask {
 24188  			break
 24189  		}
 24190  		v_1_0_0 := v_1_0.Args[0]
 24191  		if v_1_0_0.Op != OpAMD64CMPQconst {
 24192  			break
 24193  		}
 24194  		if v_1_0_0.AuxInt != 64 {
 24195  			break
 24196  		}
 24197  		v_1_0_0_0 := v_1_0_0.Args[0]
 24198  		if v_1_0_0_0.Op != OpAMD64NEGQ {
 24199  			break
 24200  		}
 24201  		v_1_0_0_0_0 := v_1_0_0_0.Args[0]
 24202  		if v_1_0_0_0_0.Op != OpAMD64ADDQconst {
 24203  			break
 24204  		}
 24205  		if v_1_0_0_0_0.AuxInt != -64 {
 24206  			break
 24207  		}
 24208  		v_1_0_0_0_0_0 := v_1_0_0_0_0.Args[0]
 24209  		if v_1_0_0_0_0_0.Op != OpAMD64ANDQconst {
 24210  			break
 24211  		}
 24212  		if v_1_0_0_0_0_0.AuxInt != 63 {
 24213  			break
 24214  		}
 24215  		if y != v_1_0_0_0_0_0.Args[0] {
 24216  			break
 24217  		}
 24218  		v_1_1 := v_1.Args[1]
 24219  		if v_1_1.Op != OpAMD64SHLQ {
 24220  			break
 24221  		}
 24222  		_ = v_1_1.Args[1]
 24223  		if x != v_1_1.Args[0] {
 24224  			break
 24225  		}
 24226  		v_1_1_1 := v_1_1.Args[1]
 24227  		if v_1_1_1.Op != OpAMD64NEGQ {
 24228  			break
 24229  		}
 24230  		if y != v_1_1_1.Args[0] {
 24231  			break
 24232  		}
 24233  		v.reset(OpAMD64RORQ)
 24234  		v.AddArg(x)
 24235  		v.AddArg(y)
 24236  		return true
 24237  	}
 24238  	// match: (ORQ (ANDQ (SHLQ x (NEGQ y)) (SBBQcarrymask (CMPQconst (NEGQ (ADDQconst (ANDQconst y [63]) [-64])) [64]))) (SHRQ x y))
 24239  	// cond:
 24240  	// result: (RORQ x y)
 24241  	for {
 24242  		_ = v.Args[1]
 24243  		v_0 := v.Args[0]
 24244  		if v_0.Op != OpAMD64ANDQ {
 24245  			break
 24246  		}
 24247  		_ = v_0.Args[1]
 24248  		v_0_0 := v_0.Args[0]
 24249  		if v_0_0.Op != OpAMD64SHLQ {
 24250  			break
 24251  		}
 24252  		_ = v_0_0.Args[1]
 24253  		x := v_0_0.Args[0]
 24254  		v_0_0_1 := v_0_0.Args[1]
 24255  		if v_0_0_1.Op != OpAMD64NEGQ {
 24256  			break
 24257  		}
 24258  		y := v_0_0_1.Args[0]
 24259  		v_0_1 := v_0.Args[1]
 24260  		if v_0_1.Op != OpAMD64SBBQcarrymask {
 24261  			break
 24262  		}
 24263  		v_0_1_0 := v_0_1.Args[0]
 24264  		if v_0_1_0.Op != OpAMD64CMPQconst {
 24265  			break
 24266  		}
 24267  		if v_0_1_0.AuxInt != 64 {
 24268  			break
 24269  		}
 24270  		v_0_1_0_0 := v_0_1_0.Args[0]
 24271  		if v_0_1_0_0.Op != OpAMD64NEGQ {
 24272  			break
 24273  		}
 24274  		v_0_1_0_0_0 := v_0_1_0_0.Args[0]
 24275  		if v_0_1_0_0_0.Op != OpAMD64ADDQconst {
 24276  			break
 24277  		}
 24278  		if v_0_1_0_0_0.AuxInt != -64 {
 24279  			break
 24280  		}
 24281  		v_0_1_0_0_0_0 := v_0_1_0_0_0.Args[0]
 24282  		if v_0_1_0_0_0_0.Op != OpAMD64ANDQconst {
 24283  			break
 24284  		}
 24285  		if v_0_1_0_0_0_0.AuxInt != 63 {
 24286  			break
 24287  		}
 24288  		if y != v_0_1_0_0_0_0.Args[0] {
 24289  			break
 24290  		}
 24291  		v_1 := v.Args[1]
 24292  		if v_1.Op != OpAMD64SHRQ {
 24293  			break
 24294  		}
 24295  		_ = v_1.Args[1]
 24296  		if x != v_1.Args[0] {
 24297  			break
 24298  		}
 24299  		if y != v_1.Args[1] {
 24300  			break
 24301  		}
 24302  		v.reset(OpAMD64RORQ)
 24303  		v.AddArg(x)
 24304  		v.AddArg(y)
 24305  		return true
 24306  	}
 24307  	// match: (ORQ (ANDQ (SBBQcarrymask (CMPQconst (NEGQ (ADDQconst (ANDQconst y [63]) [-64])) [64])) (SHLQ x (NEGQ y))) (SHRQ x y))
 24308  	// cond:
 24309  	// result: (RORQ x y)
 24310  	for {
 24311  		_ = v.Args[1]
 24312  		v_0 := v.Args[0]
 24313  		if v_0.Op != OpAMD64ANDQ {
 24314  			break
 24315  		}
 24316  		_ = v_0.Args[1]
 24317  		v_0_0 := v_0.Args[0]
 24318  		if v_0_0.Op != OpAMD64SBBQcarrymask {
 24319  			break
 24320  		}
 24321  		v_0_0_0 := v_0_0.Args[0]
 24322  		if v_0_0_0.Op != OpAMD64CMPQconst {
 24323  			break
 24324  		}
 24325  		if v_0_0_0.AuxInt != 64 {
 24326  			break
 24327  		}
 24328  		v_0_0_0_0 := v_0_0_0.Args[0]
 24329  		if v_0_0_0_0.Op != OpAMD64NEGQ {
 24330  			break
 24331  		}
 24332  		v_0_0_0_0_0 := v_0_0_0_0.Args[0]
 24333  		if v_0_0_0_0_0.Op != OpAMD64ADDQconst {
 24334  			break
 24335  		}
 24336  		if v_0_0_0_0_0.AuxInt != -64 {
 24337  			break
 24338  		}
 24339  		v_0_0_0_0_0_0 := v_0_0_0_0_0.Args[0]
 24340  		if v_0_0_0_0_0_0.Op != OpAMD64ANDQconst {
 24341  			break
 24342  		}
 24343  		if v_0_0_0_0_0_0.AuxInt != 63 {
 24344  			break
 24345  		}
 24346  		y := v_0_0_0_0_0_0.Args[0]
 24347  		v_0_1 := v_0.Args[1]
 24348  		if v_0_1.Op != OpAMD64SHLQ {
 24349  			break
 24350  		}
 24351  		_ = v_0_1.Args[1]
 24352  		x := v_0_1.Args[0]
 24353  		v_0_1_1 := v_0_1.Args[1]
 24354  		if v_0_1_1.Op != OpAMD64NEGQ {
 24355  			break
 24356  		}
 24357  		if y != v_0_1_1.Args[0] {
 24358  			break
 24359  		}
 24360  		v_1 := v.Args[1]
 24361  		if v_1.Op != OpAMD64SHRQ {
 24362  			break
 24363  		}
 24364  		_ = v_1.Args[1]
 24365  		if x != v_1.Args[0] {
 24366  			break
 24367  		}
 24368  		if y != v_1.Args[1] {
 24369  			break
 24370  		}
 24371  		v.reset(OpAMD64RORQ)
 24372  		v.AddArg(x)
 24373  		v.AddArg(y)
 24374  		return true
 24375  	}
 24376  	// match: (ORQ (SHRQ x y) (ANDQ (SHLQ x (NEGL y)) (SBBQcarrymask (CMPLconst (NEGL (ADDLconst (ANDLconst y [63]) [-64])) [64]))))
 24377  	// cond:
 24378  	// result: (RORQ x y)
 24379  	for {
 24380  		_ = v.Args[1]
 24381  		v_0 := v.Args[0]
 24382  		if v_0.Op != OpAMD64SHRQ {
 24383  			break
 24384  		}
 24385  		_ = v_0.Args[1]
 24386  		x := v_0.Args[0]
 24387  		y := v_0.Args[1]
 24388  		v_1 := v.Args[1]
 24389  		if v_1.Op != OpAMD64ANDQ {
 24390  			break
 24391  		}
 24392  		_ = v_1.Args[1]
 24393  		v_1_0 := v_1.Args[0]
 24394  		if v_1_0.Op != OpAMD64SHLQ {
 24395  			break
 24396  		}
 24397  		_ = v_1_0.Args[1]
 24398  		if x != v_1_0.Args[0] {
 24399  			break
 24400  		}
 24401  		v_1_0_1 := v_1_0.Args[1]
 24402  		if v_1_0_1.Op != OpAMD64NEGL {
 24403  			break
 24404  		}
 24405  		if y != v_1_0_1.Args[0] {
 24406  			break
 24407  		}
 24408  		v_1_1 := v_1.Args[1]
 24409  		if v_1_1.Op != OpAMD64SBBQcarrymask {
 24410  			break
 24411  		}
 24412  		v_1_1_0 := v_1_1.Args[0]
 24413  		if v_1_1_0.Op != OpAMD64CMPLconst {
 24414  			break
 24415  		}
 24416  		if v_1_1_0.AuxInt != 64 {
 24417  			break
 24418  		}
 24419  		v_1_1_0_0 := v_1_1_0.Args[0]
 24420  		if v_1_1_0_0.Op != OpAMD64NEGL {
 24421  			break
 24422  		}
 24423  		v_1_1_0_0_0 := v_1_1_0_0.Args[0]
 24424  		if v_1_1_0_0_0.Op != OpAMD64ADDLconst {
 24425  			break
 24426  		}
 24427  		if v_1_1_0_0_0.AuxInt != -64 {
 24428  			break
 24429  		}
 24430  		v_1_1_0_0_0_0 := v_1_1_0_0_0.Args[0]
 24431  		if v_1_1_0_0_0_0.Op != OpAMD64ANDLconst {
 24432  			break
 24433  		}
 24434  		if v_1_1_0_0_0_0.AuxInt != 63 {
 24435  			break
 24436  		}
 24437  		if y != v_1_1_0_0_0_0.Args[0] {
 24438  			break
 24439  		}
 24440  		v.reset(OpAMD64RORQ)
 24441  		v.AddArg(x)
 24442  		v.AddArg(y)
 24443  		return true
 24444  	}
 24445  	// match: (ORQ (SHRQ x y) (ANDQ (SBBQcarrymask (CMPLconst (NEGL (ADDLconst (ANDLconst y [63]) [-64])) [64])) (SHLQ x (NEGL y))))
 24446  	// cond:
 24447  	// result: (RORQ x y)
 24448  	for {
 24449  		_ = v.Args[1]
 24450  		v_0 := v.Args[0]
 24451  		if v_0.Op != OpAMD64SHRQ {
 24452  			break
 24453  		}
 24454  		_ = v_0.Args[1]
 24455  		x := v_0.Args[0]
 24456  		y := v_0.Args[1]
 24457  		v_1 := v.Args[1]
 24458  		if v_1.Op != OpAMD64ANDQ {
 24459  			break
 24460  		}
 24461  		_ = v_1.Args[1]
 24462  		v_1_0 := v_1.Args[0]
 24463  		if v_1_0.Op != OpAMD64SBBQcarrymask {
 24464  			break
 24465  		}
 24466  		v_1_0_0 := v_1_0.Args[0]
 24467  		if v_1_0_0.Op != OpAMD64CMPLconst {
 24468  			break
 24469  		}
 24470  		if v_1_0_0.AuxInt != 64 {
 24471  			break
 24472  		}
 24473  		v_1_0_0_0 := v_1_0_0.Args[0]
 24474  		if v_1_0_0_0.Op != OpAMD64NEGL {
 24475  			break
 24476  		}
 24477  		v_1_0_0_0_0 := v_1_0_0_0.Args[0]
 24478  		if v_1_0_0_0_0.Op != OpAMD64ADDLconst {
 24479  			break
 24480  		}
 24481  		if v_1_0_0_0_0.AuxInt != -64 {
 24482  			break
 24483  		}
 24484  		v_1_0_0_0_0_0 := v_1_0_0_0_0.Args[0]
 24485  		if v_1_0_0_0_0_0.Op != OpAMD64ANDLconst {
 24486  			break
 24487  		}
 24488  		if v_1_0_0_0_0_0.AuxInt != 63 {
 24489  			break
 24490  		}
 24491  		if y != v_1_0_0_0_0_0.Args[0] {
 24492  			break
 24493  		}
 24494  		v_1_1 := v_1.Args[1]
 24495  		if v_1_1.Op != OpAMD64SHLQ {
 24496  			break
 24497  		}
 24498  		_ = v_1_1.Args[1]
 24499  		if x != v_1_1.Args[0] {
 24500  			break
 24501  		}
 24502  		v_1_1_1 := v_1_1.Args[1]
 24503  		if v_1_1_1.Op != OpAMD64NEGL {
 24504  			break
 24505  		}
 24506  		if y != v_1_1_1.Args[0] {
 24507  			break
 24508  		}
 24509  		v.reset(OpAMD64RORQ)
 24510  		v.AddArg(x)
 24511  		v.AddArg(y)
 24512  		return true
 24513  	}
 24514  	// match: (ORQ (ANDQ (SHLQ x (NEGL y)) (SBBQcarrymask (CMPLconst (NEGL (ADDLconst (ANDLconst y [63]) [-64])) [64]))) (SHRQ x y))
 24515  	// cond:
 24516  	// result: (RORQ x y)
 24517  	for {
 24518  		_ = v.Args[1]
 24519  		v_0 := v.Args[0]
 24520  		if v_0.Op != OpAMD64ANDQ {
 24521  			break
 24522  		}
 24523  		_ = v_0.Args[1]
 24524  		v_0_0 := v_0.Args[0]
 24525  		if v_0_0.Op != OpAMD64SHLQ {
 24526  			break
 24527  		}
 24528  		_ = v_0_0.Args[1]
 24529  		x := v_0_0.Args[0]
 24530  		v_0_0_1 := v_0_0.Args[1]
 24531  		if v_0_0_1.Op != OpAMD64NEGL {
 24532  			break
 24533  		}
 24534  		y := v_0_0_1.Args[0]
 24535  		v_0_1 := v_0.Args[1]
 24536  		if v_0_1.Op != OpAMD64SBBQcarrymask {
 24537  			break
 24538  		}
 24539  		v_0_1_0 := v_0_1.Args[0]
 24540  		if v_0_1_0.Op != OpAMD64CMPLconst {
 24541  			break
 24542  		}
 24543  		if v_0_1_0.AuxInt != 64 {
 24544  			break
 24545  		}
 24546  		v_0_1_0_0 := v_0_1_0.Args[0]
 24547  		if v_0_1_0_0.Op != OpAMD64NEGL {
 24548  			break
 24549  		}
 24550  		v_0_1_0_0_0 := v_0_1_0_0.Args[0]
 24551  		if v_0_1_0_0_0.Op != OpAMD64ADDLconst {
 24552  			break
 24553  		}
 24554  		if v_0_1_0_0_0.AuxInt != -64 {
 24555  			break
 24556  		}
 24557  		v_0_1_0_0_0_0 := v_0_1_0_0_0.Args[0]
 24558  		if v_0_1_0_0_0_0.Op != OpAMD64ANDLconst {
 24559  			break
 24560  		}
 24561  		if v_0_1_0_0_0_0.AuxInt != 63 {
 24562  			break
 24563  		}
 24564  		if y != v_0_1_0_0_0_0.Args[0] {
 24565  			break
 24566  		}
 24567  		v_1 := v.Args[1]
 24568  		if v_1.Op != OpAMD64SHRQ {
 24569  			break
 24570  		}
 24571  		_ = v_1.Args[1]
 24572  		if x != v_1.Args[0] {
 24573  			break
 24574  		}
 24575  		if y != v_1.Args[1] {
 24576  			break
 24577  		}
 24578  		v.reset(OpAMD64RORQ)
 24579  		v.AddArg(x)
 24580  		v.AddArg(y)
 24581  		return true
 24582  	}
 24583  	// match: (ORQ (ANDQ (SBBQcarrymask (CMPLconst (NEGL (ADDLconst (ANDLconst y [63]) [-64])) [64])) (SHLQ x (NEGL y))) (SHRQ x y))
 24584  	// cond:
 24585  	// result: (RORQ x y)
 24586  	for {
 24587  		_ = v.Args[1]
 24588  		v_0 := v.Args[0]
 24589  		if v_0.Op != OpAMD64ANDQ {
 24590  			break
 24591  		}
 24592  		_ = v_0.Args[1]
 24593  		v_0_0 := v_0.Args[0]
 24594  		if v_0_0.Op != OpAMD64SBBQcarrymask {
 24595  			break
 24596  		}
 24597  		v_0_0_0 := v_0_0.Args[0]
 24598  		if v_0_0_0.Op != OpAMD64CMPLconst {
 24599  			break
 24600  		}
 24601  		if v_0_0_0.AuxInt != 64 {
 24602  			break
 24603  		}
 24604  		v_0_0_0_0 := v_0_0_0.Args[0]
 24605  		if v_0_0_0_0.Op != OpAMD64NEGL {
 24606  			break
 24607  		}
 24608  		v_0_0_0_0_0 := v_0_0_0_0.Args[0]
 24609  		if v_0_0_0_0_0.Op != OpAMD64ADDLconst {
 24610  			break
 24611  		}
 24612  		if v_0_0_0_0_0.AuxInt != -64 {
 24613  			break
 24614  		}
 24615  		v_0_0_0_0_0_0 := v_0_0_0_0_0.Args[0]
 24616  		if v_0_0_0_0_0_0.Op != OpAMD64ANDLconst {
 24617  			break
 24618  		}
 24619  		if v_0_0_0_0_0_0.AuxInt != 63 {
 24620  			break
 24621  		}
 24622  		y := v_0_0_0_0_0_0.Args[0]
 24623  		v_0_1 := v_0.Args[1]
 24624  		if v_0_1.Op != OpAMD64SHLQ {
 24625  			break
 24626  		}
 24627  		_ = v_0_1.Args[1]
 24628  		x := v_0_1.Args[0]
 24629  		v_0_1_1 := v_0_1.Args[1]
 24630  		if v_0_1_1.Op != OpAMD64NEGL {
 24631  			break
 24632  		}
 24633  		if y != v_0_1_1.Args[0] {
 24634  			break
 24635  		}
 24636  		v_1 := v.Args[1]
 24637  		if v_1.Op != OpAMD64SHRQ {
 24638  			break
 24639  		}
 24640  		_ = v_1.Args[1]
 24641  		if x != v_1.Args[0] {
 24642  			break
 24643  		}
 24644  		if y != v_1.Args[1] {
 24645  			break
 24646  		}
 24647  		v.reset(OpAMD64RORQ)
 24648  		v.AddArg(x)
 24649  		v.AddArg(y)
 24650  		return true
 24651  	}
 24652  	return false
 24653  }
 24654  func rewriteValueAMD64_OpAMD64ORQ_20(v *Value) bool {
 24655  	b := v.Block
 24656  	_ = b
 24657  	typ := &b.Func.Config.Types
 24658  	_ = typ
 24659  	// match: (ORQ x x)
 24660  	// cond:
 24661  	// result: x
 24662  	for {
 24663  		_ = v.Args[1]
 24664  		x := v.Args[0]
 24665  		if x != v.Args[1] {
 24666  			break
 24667  		}
 24668  		v.reset(OpCopy)
 24669  		v.Type = x.Type
 24670  		v.AddArg(x)
 24671  		return true
 24672  	}
 24673  	// match: (ORQ x0:(MOVBload [i0] {s} p mem) sh:(SHLQconst [8] x1:(MOVBload [i1] {s} p mem)))
 24674  	// cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)
 24675  	// result: @mergePoint(b,x0,x1) (MOVWload [i0] {s} p mem)
 24676  	for {
 24677  		_ = v.Args[1]
 24678  		x0 := v.Args[0]
 24679  		if x0.Op != OpAMD64MOVBload {
 24680  			break
 24681  		}
 24682  		i0 := x0.AuxInt
 24683  		s := x0.Aux
 24684  		_ = x0.Args[1]
 24685  		p := x0.Args[0]
 24686  		mem := x0.Args[1]
 24687  		sh := v.Args[1]
 24688  		if sh.Op != OpAMD64SHLQconst {
 24689  			break
 24690  		}
 24691  		if sh.AuxInt != 8 {
 24692  			break
 24693  		}
 24694  		x1 := sh.Args[0]
 24695  		if x1.Op != OpAMD64MOVBload {
 24696  			break
 24697  		}
 24698  		i1 := x1.AuxInt
 24699  		if x1.Aux != s {
 24700  			break
 24701  		}
 24702  		_ = x1.Args[1]
 24703  		if p != x1.Args[0] {
 24704  			break
 24705  		}
 24706  		if mem != x1.Args[1] {
 24707  			break
 24708  		}
 24709  		if !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) {
 24710  			break
 24711  		}
 24712  		b = mergePoint(b, x0, x1)
 24713  		v0 := b.NewValue0(v.Pos, OpAMD64MOVWload, typ.UInt16)
 24714  		v.reset(OpCopy)
 24715  		v.AddArg(v0)
 24716  		v0.AuxInt = i0
 24717  		v0.Aux = s
 24718  		v0.AddArg(p)
 24719  		v0.AddArg(mem)
 24720  		return true
 24721  	}
 24722  	// match: (ORQ sh:(SHLQconst [8] x1:(MOVBload [i1] {s} p mem)) x0:(MOVBload [i0] {s} p mem))
 24723  	// cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)
 24724  	// result: @mergePoint(b,x0,x1) (MOVWload [i0] {s} p mem)
 24725  	for {
 24726  		_ = v.Args[1]
 24727  		sh := v.Args[0]
 24728  		if sh.Op != OpAMD64SHLQconst {
 24729  			break
 24730  		}
 24731  		if sh.AuxInt != 8 {
 24732  			break
 24733  		}
 24734  		x1 := sh.Args[0]
 24735  		if x1.Op != OpAMD64MOVBload {
 24736  			break
 24737  		}
 24738  		i1 := x1.AuxInt
 24739  		s := x1.Aux
 24740  		_ = x1.Args[1]
 24741  		p := x1.Args[0]
 24742  		mem := x1.Args[1]
 24743  		x0 := v.Args[1]
 24744  		if x0.Op != OpAMD64MOVBload {
 24745  			break
 24746  		}
 24747  		i0 := x0.AuxInt
 24748  		if x0.Aux != s {
 24749  			break
 24750  		}
 24751  		_ = x0.Args[1]
 24752  		if p != x0.Args[0] {
 24753  			break
 24754  		}
 24755  		if mem != x0.Args[1] {
 24756  			break
 24757  		}
 24758  		if !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) {
 24759  			break
 24760  		}
 24761  		b = mergePoint(b, x0, x1)
 24762  		v0 := b.NewValue0(v.Pos, OpAMD64MOVWload, typ.UInt16)
 24763  		v.reset(OpCopy)
 24764  		v.AddArg(v0)
 24765  		v0.AuxInt = i0
 24766  		v0.Aux = s
 24767  		v0.AddArg(p)
 24768  		v0.AddArg(mem)
 24769  		return true
 24770  	}
 24771  	// match: (ORQ x0:(MOVWload [i0] {s} p mem) sh:(SHLQconst [16] x1:(MOVWload [i1] {s} p mem)))
 24772  	// cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)
 24773  	// result: @mergePoint(b,x0,x1) (MOVLload [i0] {s} p mem)
 24774  	for {
 24775  		_ = v.Args[1]
 24776  		x0 := v.Args[0]
 24777  		if x0.Op != OpAMD64MOVWload {
 24778  			break
 24779  		}
 24780  		i0 := x0.AuxInt
 24781  		s := x0.Aux
 24782  		_ = x0.Args[1]
 24783  		p := x0.Args[0]
 24784  		mem := x0.Args[1]
 24785  		sh := v.Args[1]
 24786  		if sh.Op != OpAMD64SHLQconst {
 24787  			break
 24788  		}
 24789  		if sh.AuxInt != 16 {
 24790  			break
 24791  		}
 24792  		x1 := sh.Args[0]
 24793  		if x1.Op != OpAMD64MOVWload {
 24794  			break
 24795  		}
 24796  		i1 := x1.AuxInt
 24797  		if x1.Aux != s {
 24798  			break
 24799  		}
 24800  		_ = x1.Args[1]
 24801  		if p != x1.Args[0] {
 24802  			break
 24803  		}
 24804  		if mem != x1.Args[1] {
 24805  			break
 24806  		}
 24807  		if !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) {
 24808  			break
 24809  		}
 24810  		b = mergePoint(b, x0, x1)
 24811  		v0 := b.NewValue0(v.Pos, OpAMD64MOVLload, typ.UInt32)
 24812  		v.reset(OpCopy)
 24813  		v.AddArg(v0)
 24814  		v0.AuxInt = i0
 24815  		v0.Aux = s
 24816  		v0.AddArg(p)
 24817  		v0.AddArg(mem)
 24818  		return true
 24819  	}
 24820  	// match: (ORQ sh:(SHLQconst [16] x1:(MOVWload [i1] {s} p mem)) x0:(MOVWload [i0] {s} p mem))
 24821  	// cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)
 24822  	// result: @mergePoint(b,x0,x1) (MOVLload [i0] {s} p mem)
 24823  	for {
 24824  		_ = v.Args[1]
 24825  		sh := v.Args[0]
 24826  		if sh.Op != OpAMD64SHLQconst {
 24827  			break
 24828  		}
 24829  		if sh.AuxInt != 16 {
 24830  			break
 24831  		}
 24832  		x1 := sh.Args[0]
 24833  		if x1.Op != OpAMD64MOVWload {
 24834  			break
 24835  		}
 24836  		i1 := x1.AuxInt
 24837  		s := x1.Aux
 24838  		_ = x1.Args[1]
 24839  		p := x1.Args[0]
 24840  		mem := x1.Args[1]
 24841  		x0 := v.Args[1]
 24842  		if x0.Op != OpAMD64MOVWload {
 24843  			break
 24844  		}
 24845  		i0 := x0.AuxInt
 24846  		if x0.Aux != s {
 24847  			break
 24848  		}
 24849  		_ = x0.Args[1]
 24850  		if p != x0.Args[0] {
 24851  			break
 24852  		}
 24853  		if mem != x0.Args[1] {
 24854  			break
 24855  		}
 24856  		if !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) {
 24857  			break
 24858  		}
 24859  		b = mergePoint(b, x0, x1)
 24860  		v0 := b.NewValue0(v.Pos, OpAMD64MOVLload, typ.UInt32)
 24861  		v.reset(OpCopy)
 24862  		v.AddArg(v0)
 24863  		v0.AuxInt = i0
 24864  		v0.Aux = s
 24865  		v0.AddArg(p)
 24866  		v0.AddArg(mem)
 24867  		return true
 24868  	}
 24869  	// match: (ORQ x0:(MOVLload [i0] {s} p mem) sh:(SHLQconst [32] x1:(MOVLload [i1] {s} p mem)))
 24870  	// cond: i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)
 24871  	// result: @mergePoint(b,x0,x1) (MOVQload [i0] {s} p mem)
 24872  	for {
 24873  		_ = v.Args[1]
 24874  		x0 := v.Args[0]
 24875  		if x0.Op != OpAMD64MOVLload {
 24876  			break
 24877  		}
 24878  		i0 := x0.AuxInt
 24879  		s := x0.Aux
 24880  		_ = x0.Args[1]
 24881  		p := x0.Args[0]
 24882  		mem := x0.Args[1]
 24883  		sh := v.Args[1]
 24884  		if sh.Op != OpAMD64SHLQconst {
 24885  			break
 24886  		}
 24887  		if sh.AuxInt != 32 {
 24888  			break
 24889  		}
 24890  		x1 := sh.Args[0]
 24891  		if x1.Op != OpAMD64MOVLload {
 24892  			break
 24893  		}
 24894  		i1 := x1.AuxInt
 24895  		if x1.Aux != s {
 24896  			break
 24897  		}
 24898  		_ = x1.Args[1]
 24899  		if p != x1.Args[0] {
 24900  			break
 24901  		}
 24902  		if mem != x1.Args[1] {
 24903  			break
 24904  		}
 24905  		if !(i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) {
 24906  			break
 24907  		}
 24908  		b = mergePoint(b, x0, x1)
 24909  		v0 := b.NewValue0(v.Pos, OpAMD64MOVQload, typ.UInt64)
 24910  		v.reset(OpCopy)
 24911  		v.AddArg(v0)
 24912  		v0.AuxInt = i0
 24913  		v0.Aux = s
 24914  		v0.AddArg(p)
 24915  		v0.AddArg(mem)
 24916  		return true
 24917  	}
 24918  	// match: (ORQ sh:(SHLQconst [32] x1:(MOVLload [i1] {s} p mem)) x0:(MOVLload [i0] {s} p mem))
 24919  	// cond: i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)
 24920  	// result: @mergePoint(b,x0,x1) (MOVQload [i0] {s} p mem)
 24921  	for {
 24922  		_ = v.Args[1]
 24923  		sh := v.Args[0]
 24924  		if sh.Op != OpAMD64SHLQconst {
 24925  			break
 24926  		}
 24927  		if sh.AuxInt != 32 {
 24928  			break
 24929  		}
 24930  		x1 := sh.Args[0]
 24931  		if x1.Op != OpAMD64MOVLload {
 24932  			break
 24933  		}
 24934  		i1 := x1.AuxInt
 24935  		s := x1.Aux
 24936  		_ = x1.Args[1]
 24937  		p := x1.Args[0]
 24938  		mem := x1.Args[1]
 24939  		x0 := v.Args[1]
 24940  		if x0.Op != OpAMD64MOVLload {
 24941  			break
 24942  		}
 24943  		i0 := x0.AuxInt
 24944  		if x0.Aux != s {
 24945  			break
 24946  		}
 24947  		_ = x0.Args[1]
 24948  		if p != x0.Args[0] {
 24949  			break
 24950  		}
 24951  		if mem != x0.Args[1] {
 24952  			break
 24953  		}
 24954  		if !(i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) {
 24955  			break
 24956  		}
 24957  		b = mergePoint(b, x0, x1)
 24958  		v0 := b.NewValue0(v.Pos, OpAMD64MOVQload, typ.UInt64)
 24959  		v.reset(OpCopy)
 24960  		v.AddArg(v0)
 24961  		v0.AuxInt = i0
 24962  		v0.Aux = s
 24963  		v0.AddArg(p)
 24964  		v0.AddArg(mem)
 24965  		return true
 24966  	}
 24967  	// match: (ORQ s1:(SHLQconst [j1] x1:(MOVBload [i1] {s} p mem)) or:(ORQ s0:(SHLQconst [j0] x0:(MOVBload [i0] {s} p mem)) y))
 24968  	// cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)
 24969  	// result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j0] (MOVWload [i0] {s} p mem)) y)
 24970  	for {
 24971  		_ = v.Args[1]
 24972  		s1 := v.Args[0]
 24973  		if s1.Op != OpAMD64SHLQconst {
 24974  			break
 24975  		}
 24976  		j1 := s1.AuxInt
 24977  		x1 := s1.Args[0]
 24978  		if x1.Op != OpAMD64MOVBload {
 24979  			break
 24980  		}
 24981  		i1 := x1.AuxInt
 24982  		s := x1.Aux
 24983  		_ = x1.Args[1]
 24984  		p := x1.Args[0]
 24985  		mem := x1.Args[1]
 24986  		or := v.Args[1]
 24987  		if or.Op != OpAMD64ORQ {
 24988  			break
 24989  		}
 24990  		_ = or.Args[1]
 24991  		s0 := or.Args[0]
 24992  		if s0.Op != OpAMD64SHLQconst {
 24993  			break
 24994  		}
 24995  		j0 := s0.AuxInt
 24996  		x0 := s0.Args[0]
 24997  		if x0.Op != OpAMD64MOVBload {
 24998  			break
 24999  		}
 25000  		i0 := x0.AuxInt
 25001  		if x0.Aux != s {
 25002  			break
 25003  		}
 25004  		_ = x0.Args[1]
 25005  		if p != x0.Args[0] {
 25006  			break
 25007  		}
 25008  		if mem != x0.Args[1] {
 25009  			break
 25010  		}
 25011  		y := or.Args[1]
 25012  		if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) {
 25013  			break
 25014  		}
 25015  		b = mergePoint(b, x0, x1)
 25016  		v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type)
 25017  		v.reset(OpCopy)
 25018  		v.AddArg(v0)
 25019  		v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type)
 25020  		v1.AuxInt = j0
 25021  		v2 := b.NewValue0(v.Pos, OpAMD64MOVWload, typ.UInt16)
 25022  		v2.AuxInt = i0
 25023  		v2.Aux = s
 25024  		v2.AddArg(p)
 25025  		v2.AddArg(mem)
 25026  		v1.AddArg(v2)
 25027  		v0.AddArg(v1)
 25028  		v0.AddArg(y)
 25029  		return true
 25030  	}
 25031  	// match: (ORQ s1:(SHLQconst [j1] x1:(MOVBload [i1] {s} p mem)) or:(ORQ y s0:(SHLQconst [j0] x0:(MOVBload [i0] {s} p mem))))
 25032  	// cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)
 25033  	// result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j0] (MOVWload [i0] {s} p mem)) y)
 25034  	for {
 25035  		_ = v.Args[1]
 25036  		s1 := v.Args[0]
 25037  		if s1.Op != OpAMD64SHLQconst {
 25038  			break
 25039  		}
 25040  		j1 := s1.AuxInt
 25041  		x1 := s1.Args[0]
 25042  		if x1.Op != OpAMD64MOVBload {
 25043  			break
 25044  		}
 25045  		i1 := x1.AuxInt
 25046  		s := x1.Aux
 25047  		_ = x1.Args[1]
 25048  		p := x1.Args[0]
 25049  		mem := x1.Args[1]
 25050  		or := v.Args[1]
 25051  		if or.Op != OpAMD64ORQ {
 25052  			break
 25053  		}
 25054  		_ = or.Args[1]
 25055  		y := or.Args[0]
 25056  		s0 := or.Args[1]
 25057  		if s0.Op != OpAMD64SHLQconst {
 25058  			break
 25059  		}
 25060  		j0 := s0.AuxInt
 25061  		x0 := s0.Args[0]
 25062  		if x0.Op != OpAMD64MOVBload {
 25063  			break
 25064  		}
 25065  		i0 := x0.AuxInt
 25066  		if x0.Aux != s {
 25067  			break
 25068  		}
 25069  		_ = x0.Args[1]
 25070  		if p != x0.Args[0] {
 25071  			break
 25072  		}
 25073  		if mem != x0.Args[1] {
 25074  			break
 25075  		}
 25076  		if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) {
 25077  			break
 25078  		}
 25079  		b = mergePoint(b, x0, x1)
 25080  		v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type)
 25081  		v.reset(OpCopy)
 25082  		v.AddArg(v0)
 25083  		v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type)
 25084  		v1.AuxInt = j0
 25085  		v2 := b.NewValue0(v.Pos, OpAMD64MOVWload, typ.UInt16)
 25086  		v2.AuxInt = i0
 25087  		v2.Aux = s
 25088  		v2.AddArg(p)
 25089  		v2.AddArg(mem)
 25090  		v1.AddArg(v2)
 25091  		v0.AddArg(v1)
 25092  		v0.AddArg(y)
 25093  		return true
 25094  	}
 25095  	// match: (ORQ or:(ORQ s0:(SHLQconst [j0] x0:(MOVBload [i0] {s} p mem)) y) s1:(SHLQconst [j1] x1:(MOVBload [i1] {s} p mem)))
 25096  	// cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)
 25097  	// result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j0] (MOVWload [i0] {s} p mem)) y)
 25098  	for {
 25099  		_ = v.Args[1]
 25100  		or := v.Args[0]
 25101  		if or.Op != OpAMD64ORQ {
 25102  			break
 25103  		}
 25104  		_ = or.Args[1]
 25105  		s0 := or.Args[0]
 25106  		if s0.Op != OpAMD64SHLQconst {
 25107  			break
 25108  		}
 25109  		j0 := s0.AuxInt
 25110  		x0 := s0.Args[0]
 25111  		if x0.Op != OpAMD64MOVBload {
 25112  			break
 25113  		}
 25114  		i0 := x0.AuxInt
 25115  		s := x0.Aux
 25116  		_ = x0.Args[1]
 25117  		p := x0.Args[0]
 25118  		mem := x0.Args[1]
 25119  		y := or.Args[1]
 25120  		s1 := v.Args[1]
 25121  		if s1.Op != OpAMD64SHLQconst {
 25122  			break
 25123  		}
 25124  		j1 := s1.AuxInt
 25125  		x1 := s1.Args[0]
 25126  		if x1.Op != OpAMD64MOVBload {
 25127  			break
 25128  		}
 25129  		i1 := x1.AuxInt
 25130  		if x1.Aux != s {
 25131  			break
 25132  		}
 25133  		_ = x1.Args[1]
 25134  		if p != x1.Args[0] {
 25135  			break
 25136  		}
 25137  		if mem != x1.Args[1] {
 25138  			break
 25139  		}
 25140  		if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) {
 25141  			break
 25142  		}
 25143  		b = mergePoint(b, x0, x1)
 25144  		v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type)
 25145  		v.reset(OpCopy)
 25146  		v.AddArg(v0)
 25147  		v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type)
 25148  		v1.AuxInt = j0
 25149  		v2 := b.NewValue0(v.Pos, OpAMD64MOVWload, typ.UInt16)
 25150  		v2.AuxInt = i0
 25151  		v2.Aux = s
 25152  		v2.AddArg(p)
 25153  		v2.AddArg(mem)
 25154  		v1.AddArg(v2)
 25155  		v0.AddArg(v1)
 25156  		v0.AddArg(y)
 25157  		return true
 25158  	}
 25159  	return false
 25160  }
 25161  func rewriteValueAMD64_OpAMD64ORQ_30(v *Value) bool {
 25162  	b := v.Block
 25163  	_ = b
 25164  	typ := &b.Func.Config.Types
 25165  	_ = typ
 25166  	// match: (ORQ or:(ORQ y s0:(SHLQconst [j0] x0:(MOVBload [i0] {s} p mem))) s1:(SHLQconst [j1] x1:(MOVBload [i1] {s} p mem)))
 25167  	// cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)
 25168  	// result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j0] (MOVWload [i0] {s} p mem)) y)
 25169  	for {
 25170  		_ = v.Args[1]
 25171  		or := v.Args[0]
 25172  		if or.Op != OpAMD64ORQ {
 25173  			break
 25174  		}
 25175  		_ = or.Args[1]
 25176  		y := or.Args[0]
 25177  		s0 := or.Args[1]
 25178  		if s0.Op != OpAMD64SHLQconst {
 25179  			break
 25180  		}
 25181  		j0 := s0.AuxInt
 25182  		x0 := s0.Args[0]
 25183  		if x0.Op != OpAMD64MOVBload {
 25184  			break
 25185  		}
 25186  		i0 := x0.AuxInt
 25187  		s := x0.Aux
 25188  		_ = x0.Args[1]
 25189  		p := x0.Args[0]
 25190  		mem := x0.Args[1]
 25191  		s1 := v.Args[1]
 25192  		if s1.Op != OpAMD64SHLQconst {
 25193  			break
 25194  		}
 25195  		j1 := s1.AuxInt
 25196  		x1 := s1.Args[0]
 25197  		if x1.Op != OpAMD64MOVBload {
 25198  			break
 25199  		}
 25200  		i1 := x1.AuxInt
 25201  		if x1.Aux != s {
 25202  			break
 25203  		}
 25204  		_ = x1.Args[1]
 25205  		if p != x1.Args[0] {
 25206  			break
 25207  		}
 25208  		if mem != x1.Args[1] {
 25209  			break
 25210  		}
 25211  		if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) {
 25212  			break
 25213  		}
 25214  		b = mergePoint(b, x0, x1)
 25215  		v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type)
 25216  		v.reset(OpCopy)
 25217  		v.AddArg(v0)
 25218  		v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type)
 25219  		v1.AuxInt = j0
 25220  		v2 := b.NewValue0(v.Pos, OpAMD64MOVWload, typ.UInt16)
 25221  		v2.AuxInt = i0
 25222  		v2.Aux = s
 25223  		v2.AddArg(p)
 25224  		v2.AddArg(mem)
 25225  		v1.AddArg(v2)
 25226  		v0.AddArg(v1)
 25227  		v0.AddArg(y)
 25228  		return true
 25229  	}
 25230  	// match: (ORQ s1:(SHLQconst [j1] x1:(MOVWload [i1] {s} p mem)) or:(ORQ s0:(SHLQconst [j0] x0:(MOVWload [i0] {s} p mem)) y))
 25231  	// cond: i1 == i0+2 && j1 == j0+16 && j0 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)
 25232  	// result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j0] (MOVLload [i0] {s} p mem)) y)
 25233  	for {
 25234  		_ = v.Args[1]
 25235  		s1 := v.Args[0]
 25236  		if s1.Op != OpAMD64SHLQconst {
 25237  			break
 25238  		}
 25239  		j1 := s1.AuxInt
 25240  		x1 := s1.Args[0]
 25241  		if x1.Op != OpAMD64MOVWload {
 25242  			break
 25243  		}
 25244  		i1 := x1.AuxInt
 25245  		s := x1.Aux
 25246  		_ = x1.Args[1]
 25247  		p := x1.Args[0]
 25248  		mem := x1.Args[1]
 25249  		or := v.Args[1]
 25250  		if or.Op != OpAMD64ORQ {
 25251  			break
 25252  		}
 25253  		_ = or.Args[1]
 25254  		s0 := or.Args[0]
 25255  		if s0.Op != OpAMD64SHLQconst {
 25256  			break
 25257  		}
 25258  		j0 := s0.AuxInt
 25259  		x0 := s0.Args[0]
 25260  		if x0.Op != OpAMD64MOVWload {
 25261  			break
 25262  		}
 25263  		i0 := x0.AuxInt
 25264  		if x0.Aux != s {
 25265  			break
 25266  		}
 25267  		_ = x0.Args[1]
 25268  		if p != x0.Args[0] {
 25269  			break
 25270  		}
 25271  		if mem != x0.Args[1] {
 25272  			break
 25273  		}
 25274  		y := or.Args[1]
 25275  		if !(i1 == i0+2 && j1 == j0+16 && j0%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) {
 25276  			break
 25277  		}
 25278  		b = mergePoint(b, x0, x1)
 25279  		v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type)
 25280  		v.reset(OpCopy)
 25281  		v.AddArg(v0)
 25282  		v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type)
 25283  		v1.AuxInt = j0
 25284  		v2 := b.NewValue0(v.Pos, OpAMD64MOVLload, typ.UInt32)
 25285  		v2.AuxInt = i0
 25286  		v2.Aux = s
 25287  		v2.AddArg(p)
 25288  		v2.AddArg(mem)
 25289  		v1.AddArg(v2)
 25290  		v0.AddArg(v1)
 25291  		v0.AddArg(y)
 25292  		return true
 25293  	}
 25294  	// match: (ORQ s1:(SHLQconst [j1] x1:(MOVWload [i1] {s} p mem)) or:(ORQ y s0:(SHLQconst [j0] x0:(MOVWload [i0] {s} p mem))))
 25295  	// cond: i1 == i0+2 && j1 == j0+16 && j0 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)
 25296  	// result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j0] (MOVLload [i0] {s} p mem)) y)
 25297  	for {
 25298  		_ = v.Args[1]
 25299  		s1 := v.Args[0]
 25300  		if s1.Op != OpAMD64SHLQconst {
 25301  			break
 25302  		}
 25303  		j1 := s1.AuxInt
 25304  		x1 := s1.Args[0]
 25305  		if x1.Op != OpAMD64MOVWload {
 25306  			break
 25307  		}
 25308  		i1 := x1.AuxInt
 25309  		s := x1.Aux
 25310  		_ = x1.Args[1]
 25311  		p := x1.Args[0]
 25312  		mem := x1.Args[1]
 25313  		or := v.Args[1]
 25314  		if or.Op != OpAMD64ORQ {
 25315  			break
 25316  		}
 25317  		_ = or.Args[1]
 25318  		y := or.Args[0]
 25319  		s0 := or.Args[1]
 25320  		if s0.Op != OpAMD64SHLQconst {
 25321  			break
 25322  		}
 25323  		j0 := s0.AuxInt
 25324  		x0 := s0.Args[0]
 25325  		if x0.Op != OpAMD64MOVWload {
 25326  			break
 25327  		}
 25328  		i0 := x0.AuxInt
 25329  		if x0.Aux != s {
 25330  			break
 25331  		}
 25332  		_ = x0.Args[1]
 25333  		if p != x0.Args[0] {
 25334  			break
 25335  		}
 25336  		if mem != x0.Args[1] {
 25337  			break
 25338  		}
 25339  		if !(i1 == i0+2 && j1 == j0+16 && j0%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) {
 25340  			break
 25341  		}
 25342  		b = mergePoint(b, x0, x1)
 25343  		v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type)
 25344  		v.reset(OpCopy)
 25345  		v.AddArg(v0)
 25346  		v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type)
 25347  		v1.AuxInt = j0
 25348  		v2 := b.NewValue0(v.Pos, OpAMD64MOVLload, typ.UInt32)
 25349  		v2.AuxInt = i0
 25350  		v2.Aux = s
 25351  		v2.AddArg(p)
 25352  		v2.AddArg(mem)
 25353  		v1.AddArg(v2)
 25354  		v0.AddArg(v1)
 25355  		v0.AddArg(y)
 25356  		return true
 25357  	}
 25358  	// match: (ORQ or:(ORQ s0:(SHLQconst [j0] x0:(MOVWload [i0] {s} p mem)) y) s1:(SHLQconst [j1] x1:(MOVWload [i1] {s} p mem)))
 25359  	// cond: i1 == i0+2 && j1 == j0+16 && j0 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)
 25360  	// result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j0] (MOVLload [i0] {s} p mem)) y)
 25361  	for {
 25362  		_ = v.Args[1]
 25363  		or := v.Args[0]
 25364  		if or.Op != OpAMD64ORQ {
 25365  			break
 25366  		}
 25367  		_ = or.Args[1]
 25368  		s0 := or.Args[0]
 25369  		if s0.Op != OpAMD64SHLQconst {
 25370  			break
 25371  		}
 25372  		j0 := s0.AuxInt
 25373  		x0 := s0.Args[0]
 25374  		if x0.Op != OpAMD64MOVWload {
 25375  			break
 25376  		}
 25377  		i0 := x0.AuxInt
 25378  		s := x0.Aux
 25379  		_ = x0.Args[1]
 25380  		p := x0.Args[0]
 25381  		mem := x0.Args[1]
 25382  		y := or.Args[1]
 25383  		s1 := v.Args[1]
 25384  		if s1.Op != OpAMD64SHLQconst {
 25385  			break
 25386  		}
 25387  		j1 := s1.AuxInt
 25388  		x1 := s1.Args[0]
 25389  		if x1.Op != OpAMD64MOVWload {
 25390  			break
 25391  		}
 25392  		i1 := x1.AuxInt
 25393  		if x1.Aux != s {
 25394  			break
 25395  		}
 25396  		_ = x1.Args[1]
 25397  		if p != x1.Args[0] {
 25398  			break
 25399  		}
 25400  		if mem != x1.Args[1] {
 25401  			break
 25402  		}
 25403  		if !(i1 == i0+2 && j1 == j0+16 && j0%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) {
 25404  			break
 25405  		}
 25406  		b = mergePoint(b, x0, x1)
 25407  		v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type)
 25408  		v.reset(OpCopy)
 25409  		v.AddArg(v0)
 25410  		v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type)
 25411  		v1.AuxInt = j0
 25412  		v2 := b.NewValue0(v.Pos, OpAMD64MOVLload, typ.UInt32)
 25413  		v2.AuxInt = i0
 25414  		v2.Aux = s
 25415  		v2.AddArg(p)
 25416  		v2.AddArg(mem)
 25417  		v1.AddArg(v2)
 25418  		v0.AddArg(v1)
 25419  		v0.AddArg(y)
 25420  		return true
 25421  	}
 25422  	// match: (ORQ or:(ORQ y s0:(SHLQconst [j0] x0:(MOVWload [i0] {s} p mem))) s1:(SHLQconst [j1] x1:(MOVWload [i1] {s} p mem)))
 25423  	// cond: i1 == i0+2 && j1 == j0+16 && j0 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)
 25424  	// result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j0] (MOVLload [i0] {s} p mem)) y)
 25425  	for {
 25426  		_ = v.Args[1]
 25427  		or := v.Args[0]
 25428  		if or.Op != OpAMD64ORQ {
 25429  			break
 25430  		}
 25431  		_ = or.Args[1]
 25432  		y := or.Args[0]
 25433  		s0 := or.Args[1]
 25434  		if s0.Op != OpAMD64SHLQconst {
 25435  			break
 25436  		}
 25437  		j0 := s0.AuxInt
 25438  		x0 := s0.Args[0]
 25439  		if x0.Op != OpAMD64MOVWload {
 25440  			break
 25441  		}
 25442  		i0 := x0.AuxInt
 25443  		s := x0.Aux
 25444  		_ = x0.Args[1]
 25445  		p := x0.Args[0]
 25446  		mem := x0.Args[1]
 25447  		s1 := v.Args[1]
 25448  		if s1.Op != OpAMD64SHLQconst {
 25449  			break
 25450  		}
 25451  		j1 := s1.AuxInt
 25452  		x1 := s1.Args[0]
 25453  		if x1.Op != OpAMD64MOVWload {
 25454  			break
 25455  		}
 25456  		i1 := x1.AuxInt
 25457  		if x1.Aux != s {
 25458  			break
 25459  		}
 25460  		_ = x1.Args[1]
 25461  		if p != x1.Args[0] {
 25462  			break
 25463  		}
 25464  		if mem != x1.Args[1] {
 25465  			break
 25466  		}
 25467  		if !(i1 == i0+2 && j1 == j0+16 && j0%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) {
 25468  			break
 25469  		}
 25470  		b = mergePoint(b, x0, x1)
 25471  		v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type)
 25472  		v.reset(OpCopy)
 25473  		v.AddArg(v0)
 25474  		v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type)
 25475  		v1.AuxInt = j0
 25476  		v2 := b.NewValue0(v.Pos, OpAMD64MOVLload, typ.UInt32)
 25477  		v2.AuxInt = i0
 25478  		v2.Aux = s
 25479  		v2.AddArg(p)
 25480  		v2.AddArg(mem)
 25481  		v1.AddArg(v2)
 25482  		v0.AddArg(v1)
 25483  		v0.AddArg(y)
 25484  		return true
 25485  	}
 25486  	// match: (ORQ x0:(MOVBloadidx1 [i0] {s} p idx mem) sh:(SHLQconst [8] x1:(MOVBloadidx1 [i1] {s} p idx mem)))
 25487  	// cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)
 25488  	// result: @mergePoint(b,x0,x1) (MOVWloadidx1 <v.Type> [i0] {s} p idx mem)
 25489  	for {
 25490  		_ = v.Args[1]
 25491  		x0 := v.Args[0]
 25492  		if x0.Op != OpAMD64MOVBloadidx1 {
 25493  			break
 25494  		}
 25495  		i0 := x0.AuxInt
 25496  		s := x0.Aux
 25497  		_ = x0.Args[2]
 25498  		p := x0.Args[0]
 25499  		idx := x0.Args[1]
 25500  		mem := x0.Args[2]
 25501  		sh := v.Args[1]
 25502  		if sh.Op != OpAMD64SHLQconst {
 25503  			break
 25504  		}
 25505  		if sh.AuxInt != 8 {
 25506  			break
 25507  		}
 25508  		x1 := sh.Args[0]
 25509  		if x1.Op != OpAMD64MOVBloadidx1 {
 25510  			break
 25511  		}
 25512  		i1 := x1.AuxInt
 25513  		if x1.Aux != s {
 25514  			break
 25515  		}
 25516  		_ = x1.Args[2]
 25517  		if p != x1.Args[0] {
 25518  			break
 25519  		}
 25520  		if idx != x1.Args[1] {
 25521  			break
 25522  		}
 25523  		if mem != x1.Args[2] {
 25524  			break
 25525  		}
 25526  		if !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) {
 25527  			break
 25528  		}
 25529  		b = mergePoint(b, x0, x1)
 25530  		v0 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, v.Type)
 25531  		v.reset(OpCopy)
 25532  		v.AddArg(v0)
 25533  		v0.AuxInt = i0
 25534  		v0.Aux = s
 25535  		v0.AddArg(p)
 25536  		v0.AddArg(idx)
 25537  		v0.AddArg(mem)
 25538  		return true
 25539  	}
 25540  	// match: (ORQ x0:(MOVBloadidx1 [i0] {s} idx p mem) sh:(SHLQconst [8] x1:(MOVBloadidx1 [i1] {s} p idx mem)))
 25541  	// cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)
 25542  	// result: @mergePoint(b,x0,x1) (MOVWloadidx1 <v.Type> [i0] {s} p idx mem)
 25543  	for {
 25544  		_ = v.Args[1]
 25545  		x0 := v.Args[0]
 25546  		if x0.Op != OpAMD64MOVBloadidx1 {
 25547  			break
 25548  		}
 25549  		i0 := x0.AuxInt
 25550  		s := x0.Aux
 25551  		_ = x0.Args[2]
 25552  		idx := x0.Args[0]
 25553  		p := x0.Args[1]
 25554  		mem := x0.Args[2]
 25555  		sh := v.Args[1]
 25556  		if sh.Op != OpAMD64SHLQconst {
 25557  			break
 25558  		}
 25559  		if sh.AuxInt != 8 {
 25560  			break
 25561  		}
 25562  		x1 := sh.Args[0]
 25563  		if x1.Op != OpAMD64MOVBloadidx1 {
 25564  			break
 25565  		}
 25566  		i1 := x1.AuxInt
 25567  		if x1.Aux != s {
 25568  			break
 25569  		}
 25570  		_ = x1.Args[2]
 25571  		if p != x1.Args[0] {
 25572  			break
 25573  		}
 25574  		if idx != x1.Args[1] {
 25575  			break
 25576  		}
 25577  		if mem != x1.Args[2] {
 25578  			break
 25579  		}
 25580  		if !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) {
 25581  			break
 25582  		}
 25583  		b = mergePoint(b, x0, x1)
 25584  		v0 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, v.Type)
 25585  		v.reset(OpCopy)
 25586  		v.AddArg(v0)
 25587  		v0.AuxInt = i0
 25588  		v0.Aux = s
 25589  		v0.AddArg(p)
 25590  		v0.AddArg(idx)
 25591  		v0.AddArg(mem)
 25592  		return true
 25593  	}
 25594  	// match: (ORQ x0:(MOVBloadidx1 [i0] {s} p idx mem) sh:(SHLQconst [8] x1:(MOVBloadidx1 [i1] {s} idx p mem)))
 25595  	// cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)
 25596  	// result: @mergePoint(b,x0,x1) (MOVWloadidx1 <v.Type> [i0] {s} p idx mem)
 25597  	for {
 25598  		_ = v.Args[1]
 25599  		x0 := v.Args[0]
 25600  		if x0.Op != OpAMD64MOVBloadidx1 {
 25601  			break
 25602  		}
 25603  		i0 := x0.AuxInt
 25604  		s := x0.Aux
 25605  		_ = x0.Args[2]
 25606  		p := x0.Args[0]
 25607  		idx := x0.Args[1]
 25608  		mem := x0.Args[2]
 25609  		sh := v.Args[1]
 25610  		if sh.Op != OpAMD64SHLQconst {
 25611  			break
 25612  		}
 25613  		if sh.AuxInt != 8 {
 25614  			break
 25615  		}
 25616  		x1 := sh.Args[0]
 25617  		if x1.Op != OpAMD64MOVBloadidx1 {
 25618  			break
 25619  		}
 25620  		i1 := x1.AuxInt
 25621  		if x1.Aux != s {
 25622  			break
 25623  		}
 25624  		_ = x1.Args[2]
 25625  		if idx != x1.Args[0] {
 25626  			break
 25627  		}
 25628  		if p != x1.Args[1] {
 25629  			break
 25630  		}
 25631  		if mem != x1.Args[2] {
 25632  			break
 25633  		}
 25634  		if !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) {
 25635  			break
 25636  		}
 25637  		b = mergePoint(b, x0, x1)
 25638  		v0 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, v.Type)
 25639  		v.reset(OpCopy)
 25640  		v.AddArg(v0)
 25641  		v0.AuxInt = i0
 25642  		v0.Aux = s
 25643  		v0.AddArg(p)
 25644  		v0.AddArg(idx)
 25645  		v0.AddArg(mem)
 25646  		return true
 25647  	}
 25648  	// match: (ORQ x0:(MOVBloadidx1 [i0] {s} idx p mem) sh:(SHLQconst [8] x1:(MOVBloadidx1 [i1] {s} idx p mem)))
 25649  	// cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)
 25650  	// result: @mergePoint(b,x0,x1) (MOVWloadidx1 <v.Type> [i0] {s} p idx mem)
 25651  	for {
 25652  		_ = v.Args[1]
 25653  		x0 := v.Args[0]
 25654  		if x0.Op != OpAMD64MOVBloadidx1 {
 25655  			break
 25656  		}
 25657  		i0 := x0.AuxInt
 25658  		s := x0.Aux
 25659  		_ = x0.Args[2]
 25660  		idx := x0.Args[0]
 25661  		p := x0.Args[1]
 25662  		mem := x0.Args[2]
 25663  		sh := v.Args[1]
 25664  		if sh.Op != OpAMD64SHLQconst {
 25665  			break
 25666  		}
 25667  		if sh.AuxInt != 8 {
 25668  			break
 25669  		}
 25670  		x1 := sh.Args[0]
 25671  		if x1.Op != OpAMD64MOVBloadidx1 {
 25672  			break
 25673  		}
 25674  		i1 := x1.AuxInt
 25675  		if x1.Aux != s {
 25676  			break
 25677  		}
 25678  		_ = x1.Args[2]
 25679  		if idx != x1.Args[0] {
 25680  			break
 25681  		}
 25682  		if p != x1.Args[1] {
 25683  			break
 25684  		}
 25685  		if mem != x1.Args[2] {
 25686  			break
 25687  		}
 25688  		if !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) {
 25689  			break
 25690  		}
 25691  		b = mergePoint(b, x0, x1)
 25692  		v0 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, v.Type)
 25693  		v.reset(OpCopy)
 25694  		v.AddArg(v0)
 25695  		v0.AuxInt = i0
 25696  		v0.Aux = s
 25697  		v0.AddArg(p)
 25698  		v0.AddArg(idx)
 25699  		v0.AddArg(mem)
 25700  		return true
 25701  	}
 25702  	// match: (ORQ sh:(SHLQconst [8] x1:(MOVBloadidx1 [i1] {s} p idx mem)) x0:(MOVBloadidx1 [i0] {s} p idx mem))
 25703  	// cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)
 25704  	// result: @mergePoint(b,x0,x1) (MOVWloadidx1 <v.Type> [i0] {s} p idx mem)
 25705  	for {
 25706  		_ = v.Args[1]
 25707  		sh := v.Args[0]
 25708  		if sh.Op != OpAMD64SHLQconst {
 25709  			break
 25710  		}
 25711  		if sh.AuxInt != 8 {
 25712  			break
 25713  		}
 25714  		x1 := sh.Args[0]
 25715  		if x1.Op != OpAMD64MOVBloadidx1 {
 25716  			break
 25717  		}
 25718  		i1 := x1.AuxInt
 25719  		s := x1.Aux
 25720  		_ = x1.Args[2]
 25721  		p := x1.Args[0]
 25722  		idx := x1.Args[1]
 25723  		mem := x1.Args[2]
 25724  		x0 := v.Args[1]
 25725  		if x0.Op != OpAMD64MOVBloadidx1 {
 25726  			break
 25727  		}
 25728  		i0 := x0.AuxInt
 25729  		if x0.Aux != s {
 25730  			break
 25731  		}
 25732  		_ = x0.Args[2]
 25733  		if p != x0.Args[0] {
 25734  			break
 25735  		}
 25736  		if idx != x0.Args[1] {
 25737  			break
 25738  		}
 25739  		if mem != x0.Args[2] {
 25740  			break
 25741  		}
 25742  		if !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) {
 25743  			break
 25744  		}
 25745  		b = mergePoint(b, x0, x1)
 25746  		v0 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, v.Type)
 25747  		v.reset(OpCopy)
 25748  		v.AddArg(v0)
 25749  		v0.AuxInt = i0
 25750  		v0.Aux = s
 25751  		v0.AddArg(p)
 25752  		v0.AddArg(idx)
 25753  		v0.AddArg(mem)
 25754  		return true
 25755  	}
 25756  	return false
 25757  }
 25758  func rewriteValueAMD64_OpAMD64ORQ_40(v *Value) bool {
 25759  	b := v.Block
 25760  	_ = b
 25761  	typ := &b.Func.Config.Types
 25762  	_ = typ
 25763  	// match: (ORQ sh:(SHLQconst [8] x1:(MOVBloadidx1 [i1] {s} idx p mem)) x0:(MOVBloadidx1 [i0] {s} p idx mem))
 25764  	// cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)
 25765  	// result: @mergePoint(b,x0,x1) (MOVWloadidx1 <v.Type> [i0] {s} p idx mem)
 25766  	for {
 25767  		_ = v.Args[1]
 25768  		sh := v.Args[0]
 25769  		if sh.Op != OpAMD64SHLQconst {
 25770  			break
 25771  		}
 25772  		if sh.AuxInt != 8 {
 25773  			break
 25774  		}
 25775  		x1 := sh.Args[0]
 25776  		if x1.Op != OpAMD64MOVBloadidx1 {
 25777  			break
 25778  		}
 25779  		i1 := x1.AuxInt
 25780  		s := x1.Aux
 25781  		_ = x1.Args[2]
 25782  		idx := x1.Args[0]
 25783  		p := x1.Args[1]
 25784  		mem := x1.Args[2]
 25785  		x0 := v.Args[1]
 25786  		if x0.Op != OpAMD64MOVBloadidx1 {
 25787  			break
 25788  		}
 25789  		i0 := x0.AuxInt
 25790  		if x0.Aux != s {
 25791  			break
 25792  		}
 25793  		_ = x0.Args[2]
 25794  		if p != x0.Args[0] {
 25795  			break
 25796  		}
 25797  		if idx != x0.Args[1] {
 25798  			break
 25799  		}
 25800  		if mem != x0.Args[2] {
 25801  			break
 25802  		}
 25803  		if !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) {
 25804  			break
 25805  		}
 25806  		b = mergePoint(b, x0, x1)
 25807  		v0 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, v.Type)
 25808  		v.reset(OpCopy)
 25809  		v.AddArg(v0)
 25810  		v0.AuxInt = i0
 25811  		v0.Aux = s
 25812  		v0.AddArg(p)
 25813  		v0.AddArg(idx)
 25814  		v0.AddArg(mem)
 25815  		return true
 25816  	}
 25817  	// match: (ORQ sh:(SHLQconst [8] x1:(MOVBloadidx1 [i1] {s} p idx mem)) x0:(MOVBloadidx1 [i0] {s} idx p mem))
 25818  	// cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)
 25819  	// result: @mergePoint(b,x0,x1) (MOVWloadidx1 <v.Type> [i0] {s} p idx mem)
 25820  	for {
 25821  		_ = v.Args[1]
 25822  		sh := v.Args[0]
 25823  		if sh.Op != OpAMD64SHLQconst {
 25824  			break
 25825  		}
 25826  		if sh.AuxInt != 8 {
 25827  			break
 25828  		}
 25829  		x1 := sh.Args[0]
 25830  		if x1.Op != OpAMD64MOVBloadidx1 {
 25831  			break
 25832  		}
 25833  		i1 := x1.AuxInt
 25834  		s := x1.Aux
 25835  		_ = x1.Args[2]
 25836  		p := x1.Args[0]
 25837  		idx := x1.Args[1]
 25838  		mem := x1.Args[2]
 25839  		x0 := v.Args[1]
 25840  		if x0.Op != OpAMD64MOVBloadidx1 {
 25841  			break
 25842  		}
 25843  		i0 := x0.AuxInt
 25844  		if x0.Aux != s {
 25845  			break
 25846  		}
 25847  		_ = x0.Args[2]
 25848  		if idx != x0.Args[0] {
 25849  			break
 25850  		}
 25851  		if p != x0.Args[1] {
 25852  			break
 25853  		}
 25854  		if mem != x0.Args[2] {
 25855  			break
 25856  		}
 25857  		if !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) {
 25858  			break
 25859  		}
 25860  		b = mergePoint(b, x0, x1)
 25861  		v0 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, v.Type)
 25862  		v.reset(OpCopy)
 25863  		v.AddArg(v0)
 25864  		v0.AuxInt = i0
 25865  		v0.Aux = s
 25866  		v0.AddArg(p)
 25867  		v0.AddArg(idx)
 25868  		v0.AddArg(mem)
 25869  		return true
 25870  	}
 25871  	// match: (ORQ sh:(SHLQconst [8] x1:(MOVBloadidx1 [i1] {s} idx p mem)) x0:(MOVBloadidx1 [i0] {s} idx p mem))
 25872  	// cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)
 25873  	// result: @mergePoint(b,x0,x1) (MOVWloadidx1 <v.Type> [i0] {s} p idx mem)
 25874  	for {
 25875  		_ = v.Args[1]
 25876  		sh := v.Args[0]
 25877  		if sh.Op != OpAMD64SHLQconst {
 25878  			break
 25879  		}
 25880  		if sh.AuxInt != 8 {
 25881  			break
 25882  		}
 25883  		x1 := sh.Args[0]
 25884  		if x1.Op != OpAMD64MOVBloadidx1 {
 25885  			break
 25886  		}
 25887  		i1 := x1.AuxInt
 25888  		s := x1.Aux
 25889  		_ = x1.Args[2]
 25890  		idx := x1.Args[0]
 25891  		p := x1.Args[1]
 25892  		mem := x1.Args[2]
 25893  		x0 := v.Args[1]
 25894  		if x0.Op != OpAMD64MOVBloadidx1 {
 25895  			break
 25896  		}
 25897  		i0 := x0.AuxInt
 25898  		if x0.Aux != s {
 25899  			break
 25900  		}
 25901  		_ = x0.Args[2]
 25902  		if idx != x0.Args[0] {
 25903  			break
 25904  		}
 25905  		if p != x0.Args[1] {
 25906  			break
 25907  		}
 25908  		if mem != x0.Args[2] {
 25909  			break
 25910  		}
 25911  		if !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) {
 25912  			break
 25913  		}
 25914  		b = mergePoint(b, x0, x1)
 25915  		v0 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, v.Type)
 25916  		v.reset(OpCopy)
 25917  		v.AddArg(v0)
 25918  		v0.AuxInt = i0
 25919  		v0.Aux = s
 25920  		v0.AddArg(p)
 25921  		v0.AddArg(idx)
 25922  		v0.AddArg(mem)
 25923  		return true
 25924  	}
 25925  	// match: (ORQ x0:(MOVWloadidx1 [i0] {s} p idx mem) sh:(SHLQconst [16] x1:(MOVWloadidx1 [i1] {s} p idx mem)))
 25926  	// cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)
 25927  	// result: @mergePoint(b,x0,x1) (MOVLloadidx1 [i0] {s} p idx mem)
 25928  	for {
 25929  		_ = v.Args[1]
 25930  		x0 := v.Args[0]
 25931  		if x0.Op != OpAMD64MOVWloadidx1 {
 25932  			break
 25933  		}
 25934  		i0 := x0.AuxInt
 25935  		s := x0.Aux
 25936  		_ = x0.Args[2]
 25937  		p := x0.Args[0]
 25938  		idx := x0.Args[1]
 25939  		mem := x0.Args[2]
 25940  		sh := v.Args[1]
 25941  		if sh.Op != OpAMD64SHLQconst {
 25942  			break
 25943  		}
 25944  		if sh.AuxInt != 16 {
 25945  			break
 25946  		}
 25947  		x1 := sh.Args[0]
 25948  		if x1.Op != OpAMD64MOVWloadidx1 {
 25949  			break
 25950  		}
 25951  		i1 := x1.AuxInt
 25952  		if x1.Aux != s {
 25953  			break
 25954  		}
 25955  		_ = x1.Args[2]
 25956  		if p != x1.Args[0] {
 25957  			break
 25958  		}
 25959  		if idx != x1.Args[1] {
 25960  			break
 25961  		}
 25962  		if mem != x1.Args[2] {
 25963  			break
 25964  		}
 25965  		if !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) {
 25966  			break
 25967  		}
 25968  		b = mergePoint(b, x0, x1)
 25969  		v0 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32)
 25970  		v.reset(OpCopy)
 25971  		v.AddArg(v0)
 25972  		v0.AuxInt = i0
 25973  		v0.Aux = s
 25974  		v0.AddArg(p)
 25975  		v0.AddArg(idx)
 25976  		v0.AddArg(mem)
 25977  		return true
 25978  	}
 25979  	// match: (ORQ x0:(MOVWloadidx1 [i0] {s} idx p mem) sh:(SHLQconst [16] x1:(MOVWloadidx1 [i1] {s} p idx mem)))
 25980  	// cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)
 25981  	// result: @mergePoint(b,x0,x1) (MOVLloadidx1 [i0] {s} p idx mem)
 25982  	for {
 25983  		_ = v.Args[1]
 25984  		x0 := v.Args[0]
 25985  		if x0.Op != OpAMD64MOVWloadidx1 {
 25986  			break
 25987  		}
 25988  		i0 := x0.AuxInt
 25989  		s := x0.Aux
 25990  		_ = x0.Args[2]
 25991  		idx := x0.Args[0]
 25992  		p := x0.Args[1]
 25993  		mem := x0.Args[2]
 25994  		sh := v.Args[1]
 25995  		if sh.Op != OpAMD64SHLQconst {
 25996  			break
 25997  		}
 25998  		if sh.AuxInt != 16 {
 25999  			break
 26000  		}
 26001  		x1 := sh.Args[0]
 26002  		if x1.Op != OpAMD64MOVWloadidx1 {
 26003  			break
 26004  		}
 26005  		i1 := x1.AuxInt
 26006  		if x1.Aux != s {
 26007  			break
 26008  		}
 26009  		_ = x1.Args[2]
 26010  		if p != x1.Args[0] {
 26011  			break
 26012  		}
 26013  		if idx != x1.Args[1] {
 26014  			break
 26015  		}
 26016  		if mem != x1.Args[2] {
 26017  			break
 26018  		}
 26019  		if !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) {
 26020  			break
 26021  		}
 26022  		b = mergePoint(b, x0, x1)
 26023  		v0 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32)
 26024  		v.reset(OpCopy)
 26025  		v.AddArg(v0)
 26026  		v0.AuxInt = i0
 26027  		v0.Aux = s
 26028  		v0.AddArg(p)
 26029  		v0.AddArg(idx)
 26030  		v0.AddArg(mem)
 26031  		return true
 26032  	}
 26033  	// match: (ORQ x0:(MOVWloadidx1 [i0] {s} p idx mem) sh:(SHLQconst [16] x1:(MOVWloadidx1 [i1] {s} idx p mem)))
 26034  	// cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)
 26035  	// result: @mergePoint(b,x0,x1) (MOVLloadidx1 [i0] {s} p idx mem)
 26036  	for {
 26037  		_ = v.Args[1]
 26038  		x0 := v.Args[0]
 26039  		if x0.Op != OpAMD64MOVWloadidx1 {
 26040  			break
 26041  		}
 26042  		i0 := x0.AuxInt
 26043  		s := x0.Aux
 26044  		_ = x0.Args[2]
 26045  		p := x0.Args[0]
 26046  		idx := x0.Args[1]
 26047  		mem := x0.Args[2]
 26048  		sh := v.Args[1]
 26049  		if sh.Op != OpAMD64SHLQconst {
 26050  			break
 26051  		}
 26052  		if sh.AuxInt != 16 {
 26053  			break
 26054  		}
 26055  		x1 := sh.Args[0]
 26056  		if x1.Op != OpAMD64MOVWloadidx1 {
 26057  			break
 26058  		}
 26059  		i1 := x1.AuxInt
 26060  		if x1.Aux != s {
 26061  			break
 26062  		}
 26063  		_ = x1.Args[2]
 26064  		if idx != x1.Args[0] {
 26065  			break
 26066  		}
 26067  		if p != x1.Args[1] {
 26068  			break
 26069  		}
 26070  		if mem != x1.Args[2] {
 26071  			break
 26072  		}
 26073  		if !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) {
 26074  			break
 26075  		}
 26076  		b = mergePoint(b, x0, x1)
 26077  		v0 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32)
 26078  		v.reset(OpCopy)
 26079  		v.AddArg(v0)
 26080  		v0.AuxInt = i0
 26081  		v0.Aux = s
 26082  		v0.AddArg(p)
 26083  		v0.AddArg(idx)
 26084  		v0.AddArg(mem)
 26085  		return true
 26086  	}
 26087  	// match: (ORQ x0:(MOVWloadidx1 [i0] {s} idx p mem) sh:(SHLQconst [16] x1:(MOVWloadidx1 [i1] {s} idx p mem)))
 26088  	// cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)
 26089  	// result: @mergePoint(b,x0,x1) (MOVLloadidx1 [i0] {s} p idx mem)
 26090  	for {
 26091  		_ = v.Args[1]
 26092  		x0 := v.Args[0]
 26093  		if x0.Op != OpAMD64MOVWloadidx1 {
 26094  			break
 26095  		}
 26096  		i0 := x0.AuxInt
 26097  		s := x0.Aux
 26098  		_ = x0.Args[2]
 26099  		idx := x0.Args[0]
 26100  		p := x0.Args[1]
 26101  		mem := x0.Args[2]
 26102  		sh := v.Args[1]
 26103  		if sh.Op != OpAMD64SHLQconst {
 26104  			break
 26105  		}
 26106  		if sh.AuxInt != 16 {
 26107  			break
 26108  		}
 26109  		x1 := sh.Args[0]
 26110  		if x1.Op != OpAMD64MOVWloadidx1 {
 26111  			break
 26112  		}
 26113  		i1 := x1.AuxInt
 26114  		if x1.Aux != s {
 26115  			break
 26116  		}
 26117  		_ = x1.Args[2]
 26118  		if idx != x1.Args[0] {
 26119  			break
 26120  		}
 26121  		if p != x1.Args[1] {
 26122  			break
 26123  		}
 26124  		if mem != x1.Args[2] {
 26125  			break
 26126  		}
 26127  		if !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) {
 26128  			break
 26129  		}
 26130  		b = mergePoint(b, x0, x1)
 26131  		v0 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32)
 26132  		v.reset(OpCopy)
 26133  		v.AddArg(v0)
 26134  		v0.AuxInt = i0
 26135  		v0.Aux = s
 26136  		v0.AddArg(p)
 26137  		v0.AddArg(idx)
 26138  		v0.AddArg(mem)
 26139  		return true
 26140  	}
 26141  	// match: (ORQ sh:(SHLQconst [16] x1:(MOVWloadidx1 [i1] {s} p idx mem)) x0:(MOVWloadidx1 [i0] {s} p idx mem))
 26142  	// cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)
 26143  	// result: @mergePoint(b,x0,x1) (MOVLloadidx1 [i0] {s} p idx mem)
 26144  	for {
 26145  		_ = v.Args[1]
 26146  		sh := v.Args[0]
 26147  		if sh.Op != OpAMD64SHLQconst {
 26148  			break
 26149  		}
 26150  		if sh.AuxInt != 16 {
 26151  			break
 26152  		}
 26153  		x1 := sh.Args[0]
 26154  		if x1.Op != OpAMD64MOVWloadidx1 {
 26155  			break
 26156  		}
 26157  		i1 := x1.AuxInt
 26158  		s := x1.Aux
 26159  		_ = x1.Args[2]
 26160  		p := x1.Args[0]
 26161  		idx := x1.Args[1]
 26162  		mem := x1.Args[2]
 26163  		x0 := v.Args[1]
 26164  		if x0.Op != OpAMD64MOVWloadidx1 {
 26165  			break
 26166  		}
 26167  		i0 := x0.AuxInt
 26168  		if x0.Aux != s {
 26169  			break
 26170  		}
 26171  		_ = x0.Args[2]
 26172  		if p != x0.Args[0] {
 26173  			break
 26174  		}
 26175  		if idx != x0.Args[1] {
 26176  			break
 26177  		}
 26178  		if mem != x0.Args[2] {
 26179  			break
 26180  		}
 26181  		if !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) {
 26182  			break
 26183  		}
 26184  		b = mergePoint(b, x0, x1)
 26185  		v0 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32)
 26186  		v.reset(OpCopy)
 26187  		v.AddArg(v0)
 26188  		v0.AuxInt = i0
 26189  		v0.Aux = s
 26190  		v0.AddArg(p)
 26191  		v0.AddArg(idx)
 26192  		v0.AddArg(mem)
 26193  		return true
 26194  	}
 26195  	// match: (ORQ sh:(SHLQconst [16] x1:(MOVWloadidx1 [i1] {s} idx p mem)) x0:(MOVWloadidx1 [i0] {s} p idx mem))
 26196  	// cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)
 26197  	// result: @mergePoint(b,x0,x1) (MOVLloadidx1 [i0] {s} p idx mem)
 26198  	for {
 26199  		_ = v.Args[1]
 26200  		sh := v.Args[0]
 26201  		if sh.Op != OpAMD64SHLQconst {
 26202  			break
 26203  		}
 26204  		if sh.AuxInt != 16 {
 26205  			break
 26206  		}
 26207  		x1 := sh.Args[0]
 26208  		if x1.Op != OpAMD64MOVWloadidx1 {
 26209  			break
 26210  		}
 26211  		i1 := x1.AuxInt
 26212  		s := x1.Aux
 26213  		_ = x1.Args[2]
 26214  		idx := x1.Args[0]
 26215  		p := x1.Args[1]
 26216  		mem := x1.Args[2]
 26217  		x0 := v.Args[1]
 26218  		if x0.Op != OpAMD64MOVWloadidx1 {
 26219  			break
 26220  		}
 26221  		i0 := x0.AuxInt
 26222  		if x0.Aux != s {
 26223  			break
 26224  		}
 26225  		_ = x0.Args[2]
 26226  		if p != x0.Args[0] {
 26227  			break
 26228  		}
 26229  		if idx != x0.Args[1] {
 26230  			break
 26231  		}
 26232  		if mem != x0.Args[2] {
 26233  			break
 26234  		}
 26235  		if !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) {
 26236  			break
 26237  		}
 26238  		b = mergePoint(b, x0, x1)
 26239  		v0 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32)
 26240  		v.reset(OpCopy)
 26241  		v.AddArg(v0)
 26242  		v0.AuxInt = i0
 26243  		v0.Aux = s
 26244  		v0.AddArg(p)
 26245  		v0.AddArg(idx)
 26246  		v0.AddArg(mem)
 26247  		return true
 26248  	}
 26249  	// match: (ORQ sh:(SHLQconst [16] x1:(MOVWloadidx1 [i1] {s} p idx mem)) x0:(MOVWloadidx1 [i0] {s} idx p mem))
 26250  	// cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)
 26251  	// result: @mergePoint(b,x0,x1) (MOVLloadidx1 [i0] {s} p idx mem)
 26252  	for {
 26253  		_ = v.Args[1]
 26254  		sh := v.Args[0]
 26255  		if sh.Op != OpAMD64SHLQconst {
 26256  			break
 26257  		}
 26258  		if sh.AuxInt != 16 {
 26259  			break
 26260  		}
 26261  		x1 := sh.Args[0]
 26262  		if x1.Op != OpAMD64MOVWloadidx1 {
 26263  			break
 26264  		}
 26265  		i1 := x1.AuxInt
 26266  		s := x1.Aux
 26267  		_ = x1.Args[2]
 26268  		p := x1.Args[0]
 26269  		idx := x1.Args[1]
 26270  		mem := x1.Args[2]
 26271  		x0 := v.Args[1]
 26272  		if x0.Op != OpAMD64MOVWloadidx1 {
 26273  			break
 26274  		}
 26275  		i0 := x0.AuxInt
 26276  		if x0.Aux != s {
 26277  			break
 26278  		}
 26279  		_ = x0.Args[2]
 26280  		if idx != x0.Args[0] {
 26281  			break
 26282  		}
 26283  		if p != x0.Args[1] {
 26284  			break
 26285  		}
 26286  		if mem != x0.Args[2] {
 26287  			break
 26288  		}
 26289  		if !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) {
 26290  			break
 26291  		}
 26292  		b = mergePoint(b, x0, x1)
 26293  		v0 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32)
 26294  		v.reset(OpCopy)
 26295  		v.AddArg(v0)
 26296  		v0.AuxInt = i0
 26297  		v0.Aux = s
 26298  		v0.AddArg(p)
 26299  		v0.AddArg(idx)
 26300  		v0.AddArg(mem)
 26301  		return true
 26302  	}
 26303  	return false
 26304  }
 26305  func rewriteValueAMD64_OpAMD64ORQ_50(v *Value) bool {
 26306  	b := v.Block
 26307  	_ = b
 26308  	typ := &b.Func.Config.Types
 26309  	_ = typ
 26310  	// match: (ORQ sh:(SHLQconst [16] x1:(MOVWloadidx1 [i1] {s} idx p mem)) x0:(MOVWloadidx1 [i0] {s} idx p mem))
 26311  	// cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)
 26312  	// result: @mergePoint(b,x0,x1) (MOVLloadidx1 [i0] {s} p idx mem)
 26313  	for {
 26314  		_ = v.Args[1]
 26315  		sh := v.Args[0]
 26316  		if sh.Op != OpAMD64SHLQconst {
 26317  			break
 26318  		}
 26319  		if sh.AuxInt != 16 {
 26320  			break
 26321  		}
 26322  		x1 := sh.Args[0]
 26323  		if x1.Op != OpAMD64MOVWloadidx1 {
 26324  			break
 26325  		}
 26326  		i1 := x1.AuxInt
 26327  		s := x1.Aux
 26328  		_ = x1.Args[2]
 26329  		idx := x1.Args[0]
 26330  		p := x1.Args[1]
 26331  		mem := x1.Args[2]
 26332  		x0 := v.Args[1]
 26333  		if x0.Op != OpAMD64MOVWloadidx1 {
 26334  			break
 26335  		}
 26336  		i0 := x0.AuxInt
 26337  		if x0.Aux != s {
 26338  			break
 26339  		}
 26340  		_ = x0.Args[2]
 26341  		if idx != x0.Args[0] {
 26342  			break
 26343  		}
 26344  		if p != x0.Args[1] {
 26345  			break
 26346  		}
 26347  		if mem != x0.Args[2] {
 26348  			break
 26349  		}
 26350  		if !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) {
 26351  			break
 26352  		}
 26353  		b = mergePoint(b, x0, x1)
 26354  		v0 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32)
 26355  		v.reset(OpCopy)
 26356  		v.AddArg(v0)
 26357  		v0.AuxInt = i0
 26358  		v0.Aux = s
 26359  		v0.AddArg(p)
 26360  		v0.AddArg(idx)
 26361  		v0.AddArg(mem)
 26362  		return true
 26363  	}
 26364  	// match: (ORQ x0:(MOVLloadidx1 [i0] {s} p idx mem) sh:(SHLQconst [32] x1:(MOVLloadidx1 [i1] {s} p idx mem)))
 26365  	// cond: i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)
 26366  	// result: @mergePoint(b,x0,x1) (MOVQloadidx1 [i0] {s} p idx mem)
 26367  	for {
 26368  		_ = v.Args[1]
 26369  		x0 := v.Args[0]
 26370  		if x0.Op != OpAMD64MOVLloadidx1 {
 26371  			break
 26372  		}
 26373  		i0 := x0.AuxInt
 26374  		s := x0.Aux
 26375  		_ = x0.Args[2]
 26376  		p := x0.Args[0]
 26377  		idx := x0.Args[1]
 26378  		mem := x0.Args[2]
 26379  		sh := v.Args[1]
 26380  		if sh.Op != OpAMD64SHLQconst {
 26381  			break
 26382  		}
 26383  		if sh.AuxInt != 32 {
 26384  			break
 26385  		}
 26386  		x1 := sh.Args[0]
 26387  		if x1.Op != OpAMD64MOVLloadidx1 {
 26388  			break
 26389  		}
 26390  		i1 := x1.AuxInt
 26391  		if x1.Aux != s {
 26392  			break
 26393  		}
 26394  		_ = x1.Args[2]
 26395  		if p != x1.Args[0] {
 26396  			break
 26397  		}
 26398  		if idx != x1.Args[1] {
 26399  			break
 26400  		}
 26401  		if mem != x1.Args[2] {
 26402  			break
 26403  		}
 26404  		if !(i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) {
 26405  			break
 26406  		}
 26407  		b = mergePoint(b, x0, x1)
 26408  		v0 := b.NewValue0(v.Pos, OpAMD64MOVQloadidx1, typ.UInt64)
 26409  		v.reset(OpCopy)
 26410  		v.AddArg(v0)
 26411  		v0.AuxInt = i0
 26412  		v0.Aux = s
 26413  		v0.AddArg(p)
 26414  		v0.AddArg(idx)
 26415  		v0.AddArg(mem)
 26416  		return true
 26417  	}
 26418  	// match: (ORQ x0:(MOVLloadidx1 [i0] {s} idx p mem) sh:(SHLQconst [32] x1:(MOVLloadidx1 [i1] {s} p idx mem)))
 26419  	// cond: i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)
 26420  	// result: @mergePoint(b,x0,x1) (MOVQloadidx1 [i0] {s} p idx mem)
 26421  	for {
 26422  		_ = v.Args[1]
 26423  		x0 := v.Args[0]
 26424  		if x0.Op != OpAMD64MOVLloadidx1 {
 26425  			break
 26426  		}
 26427  		i0 := x0.AuxInt
 26428  		s := x0.Aux
 26429  		_ = x0.Args[2]
 26430  		idx := x0.Args[0]
 26431  		p := x0.Args[1]
 26432  		mem := x0.Args[2]
 26433  		sh := v.Args[1]
 26434  		if sh.Op != OpAMD64SHLQconst {
 26435  			break
 26436  		}
 26437  		if sh.AuxInt != 32 {
 26438  			break
 26439  		}
 26440  		x1 := sh.Args[0]
 26441  		if x1.Op != OpAMD64MOVLloadidx1 {
 26442  			break
 26443  		}
 26444  		i1 := x1.AuxInt
 26445  		if x1.Aux != s {
 26446  			break
 26447  		}
 26448  		_ = x1.Args[2]
 26449  		if p != x1.Args[0] {
 26450  			break
 26451  		}
 26452  		if idx != x1.Args[1] {
 26453  			break
 26454  		}
 26455  		if mem != x1.Args[2] {
 26456  			break
 26457  		}
 26458  		if !(i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) {
 26459  			break
 26460  		}
 26461  		b = mergePoint(b, x0, x1)
 26462  		v0 := b.NewValue0(v.Pos, OpAMD64MOVQloadidx1, typ.UInt64)
 26463  		v.reset(OpCopy)
 26464  		v.AddArg(v0)
 26465  		v0.AuxInt = i0
 26466  		v0.Aux = s
 26467  		v0.AddArg(p)
 26468  		v0.AddArg(idx)
 26469  		v0.AddArg(mem)
 26470  		return true
 26471  	}
 26472  	// match: (ORQ x0:(MOVLloadidx1 [i0] {s} p idx mem) sh:(SHLQconst [32] x1:(MOVLloadidx1 [i1] {s} idx p mem)))
 26473  	// cond: i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)
 26474  	// result: @mergePoint(b,x0,x1) (MOVQloadidx1 [i0] {s} p idx mem)
 26475  	for {
 26476  		_ = v.Args[1]
 26477  		x0 := v.Args[0]
 26478  		if x0.Op != OpAMD64MOVLloadidx1 {
 26479  			break
 26480  		}
 26481  		i0 := x0.AuxInt
 26482  		s := x0.Aux
 26483  		_ = x0.Args[2]
 26484  		p := x0.Args[0]
 26485  		idx := x0.Args[1]
 26486  		mem := x0.Args[2]
 26487  		sh := v.Args[1]
 26488  		if sh.Op != OpAMD64SHLQconst {
 26489  			break
 26490  		}
 26491  		if sh.AuxInt != 32 {
 26492  			break
 26493  		}
 26494  		x1 := sh.Args[0]
 26495  		if x1.Op != OpAMD64MOVLloadidx1 {
 26496  			break
 26497  		}
 26498  		i1 := x1.AuxInt
 26499  		if x1.Aux != s {
 26500  			break
 26501  		}
 26502  		_ = x1.Args[2]
 26503  		if idx != x1.Args[0] {
 26504  			break
 26505  		}
 26506  		if p != x1.Args[1] {
 26507  			break
 26508  		}
 26509  		if mem != x1.Args[2] {
 26510  			break
 26511  		}
 26512  		if !(i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) {
 26513  			break
 26514  		}
 26515  		b = mergePoint(b, x0, x1)
 26516  		v0 := b.NewValue0(v.Pos, OpAMD64MOVQloadidx1, typ.UInt64)
 26517  		v.reset(OpCopy)
 26518  		v.AddArg(v0)
 26519  		v0.AuxInt = i0
 26520  		v0.Aux = s
 26521  		v0.AddArg(p)
 26522  		v0.AddArg(idx)
 26523  		v0.AddArg(mem)
 26524  		return true
 26525  	}
 26526  	// match: (ORQ x0:(MOVLloadidx1 [i0] {s} idx p mem) sh:(SHLQconst [32] x1:(MOVLloadidx1 [i1] {s} idx p mem)))
 26527  	// cond: i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)
 26528  	// result: @mergePoint(b,x0,x1) (MOVQloadidx1 [i0] {s} p idx mem)
 26529  	for {
 26530  		_ = v.Args[1]
 26531  		x0 := v.Args[0]
 26532  		if x0.Op != OpAMD64MOVLloadidx1 {
 26533  			break
 26534  		}
 26535  		i0 := x0.AuxInt
 26536  		s := x0.Aux
 26537  		_ = x0.Args[2]
 26538  		idx := x0.Args[0]
 26539  		p := x0.Args[1]
 26540  		mem := x0.Args[2]
 26541  		sh := v.Args[1]
 26542  		if sh.Op != OpAMD64SHLQconst {
 26543  			break
 26544  		}
 26545  		if sh.AuxInt != 32 {
 26546  			break
 26547  		}
 26548  		x1 := sh.Args[0]
 26549  		if x1.Op != OpAMD64MOVLloadidx1 {
 26550  			break
 26551  		}
 26552  		i1 := x1.AuxInt
 26553  		if x1.Aux != s {
 26554  			break
 26555  		}
 26556  		_ = x1.Args[2]
 26557  		if idx != x1.Args[0] {
 26558  			break
 26559  		}
 26560  		if p != x1.Args[1] {
 26561  			break
 26562  		}
 26563  		if mem != x1.Args[2] {
 26564  			break
 26565  		}
 26566  		if !(i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) {
 26567  			break
 26568  		}
 26569  		b = mergePoint(b, x0, x1)
 26570  		v0 := b.NewValue0(v.Pos, OpAMD64MOVQloadidx1, typ.UInt64)
 26571  		v.reset(OpCopy)
 26572  		v.AddArg(v0)
 26573  		v0.AuxInt = i0
 26574  		v0.Aux = s
 26575  		v0.AddArg(p)
 26576  		v0.AddArg(idx)
 26577  		v0.AddArg(mem)
 26578  		return true
 26579  	}
 26580  	// match: (ORQ sh:(SHLQconst [32] x1:(MOVLloadidx1 [i1] {s} p idx mem)) x0:(MOVLloadidx1 [i0] {s} p idx mem))
 26581  	// cond: i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)
 26582  	// result: @mergePoint(b,x0,x1) (MOVQloadidx1 [i0] {s} p idx mem)
 26583  	for {
 26584  		_ = v.Args[1]
 26585  		sh := v.Args[0]
 26586  		if sh.Op != OpAMD64SHLQconst {
 26587  			break
 26588  		}
 26589  		if sh.AuxInt != 32 {
 26590  			break
 26591  		}
 26592  		x1 := sh.Args[0]
 26593  		if x1.Op != OpAMD64MOVLloadidx1 {
 26594  			break
 26595  		}
 26596  		i1 := x1.AuxInt
 26597  		s := x1.Aux
 26598  		_ = x1.Args[2]
 26599  		p := x1.Args[0]
 26600  		idx := x1.Args[1]
 26601  		mem := x1.Args[2]
 26602  		x0 := v.Args[1]
 26603  		if x0.Op != OpAMD64MOVLloadidx1 {
 26604  			break
 26605  		}
 26606  		i0 := x0.AuxInt
 26607  		if x0.Aux != s {
 26608  			break
 26609  		}
 26610  		_ = x0.Args[2]
 26611  		if p != x0.Args[0] {
 26612  			break
 26613  		}
 26614  		if idx != x0.Args[1] {
 26615  			break
 26616  		}
 26617  		if mem != x0.Args[2] {
 26618  			break
 26619  		}
 26620  		if !(i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) {
 26621  			break
 26622  		}
 26623  		b = mergePoint(b, x0, x1)
 26624  		v0 := b.NewValue0(v.Pos, OpAMD64MOVQloadidx1, typ.UInt64)
 26625  		v.reset(OpCopy)
 26626  		v.AddArg(v0)
 26627  		v0.AuxInt = i0
 26628  		v0.Aux = s
 26629  		v0.AddArg(p)
 26630  		v0.AddArg(idx)
 26631  		v0.AddArg(mem)
 26632  		return true
 26633  	}
 26634  	// match: (ORQ sh:(SHLQconst [32] x1:(MOVLloadidx1 [i1] {s} idx p mem)) x0:(MOVLloadidx1 [i0] {s} p idx mem))
 26635  	// cond: i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)
 26636  	// result: @mergePoint(b,x0,x1) (MOVQloadidx1 [i0] {s} p idx mem)
 26637  	for {
 26638  		_ = v.Args[1]
 26639  		sh := v.Args[0]
 26640  		if sh.Op != OpAMD64SHLQconst {
 26641  			break
 26642  		}
 26643  		if sh.AuxInt != 32 {
 26644  			break
 26645  		}
 26646  		x1 := sh.Args[0]
 26647  		if x1.Op != OpAMD64MOVLloadidx1 {
 26648  			break
 26649  		}
 26650  		i1 := x1.AuxInt
 26651  		s := x1.Aux
 26652  		_ = x1.Args[2]
 26653  		idx := x1.Args[0]
 26654  		p := x1.Args[1]
 26655  		mem := x1.Args[2]
 26656  		x0 := v.Args[1]
 26657  		if x0.Op != OpAMD64MOVLloadidx1 {
 26658  			break
 26659  		}
 26660  		i0 := x0.AuxInt
 26661  		if x0.Aux != s {
 26662  			break
 26663  		}
 26664  		_ = x0.Args[2]
 26665  		if p != x0.Args[0] {
 26666  			break
 26667  		}
 26668  		if idx != x0.Args[1] {
 26669  			break
 26670  		}
 26671  		if mem != x0.Args[2] {
 26672  			break
 26673  		}
 26674  		if !(i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) {
 26675  			break
 26676  		}
 26677  		b = mergePoint(b, x0, x1)
 26678  		v0 := b.NewValue0(v.Pos, OpAMD64MOVQloadidx1, typ.UInt64)
 26679  		v.reset(OpCopy)
 26680  		v.AddArg(v0)
 26681  		v0.AuxInt = i0
 26682  		v0.Aux = s
 26683  		v0.AddArg(p)
 26684  		v0.AddArg(idx)
 26685  		v0.AddArg(mem)
 26686  		return true
 26687  	}
 26688  	// match: (ORQ sh:(SHLQconst [32] x1:(MOVLloadidx1 [i1] {s} p idx mem)) x0:(MOVLloadidx1 [i0] {s} idx p mem))
 26689  	// cond: i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)
 26690  	// result: @mergePoint(b,x0,x1) (MOVQloadidx1 [i0] {s} p idx mem)
 26691  	for {
 26692  		_ = v.Args[1]
 26693  		sh := v.Args[0]
 26694  		if sh.Op != OpAMD64SHLQconst {
 26695  			break
 26696  		}
 26697  		if sh.AuxInt != 32 {
 26698  			break
 26699  		}
 26700  		x1 := sh.Args[0]
 26701  		if x1.Op != OpAMD64MOVLloadidx1 {
 26702  			break
 26703  		}
 26704  		i1 := x1.AuxInt
 26705  		s := x1.Aux
 26706  		_ = x1.Args[2]
 26707  		p := x1.Args[0]
 26708  		idx := x1.Args[1]
 26709  		mem := x1.Args[2]
 26710  		x0 := v.Args[1]
 26711  		if x0.Op != OpAMD64MOVLloadidx1 {
 26712  			break
 26713  		}
 26714  		i0 := x0.AuxInt
 26715  		if x0.Aux != s {
 26716  			break
 26717  		}
 26718  		_ = x0.Args[2]
 26719  		if idx != x0.Args[0] {
 26720  			break
 26721  		}
 26722  		if p != x0.Args[1] {
 26723  			break
 26724  		}
 26725  		if mem != x0.Args[2] {
 26726  			break
 26727  		}
 26728  		if !(i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) {
 26729  			break
 26730  		}
 26731  		b = mergePoint(b, x0, x1)
 26732  		v0 := b.NewValue0(v.Pos, OpAMD64MOVQloadidx1, typ.UInt64)
 26733  		v.reset(OpCopy)
 26734  		v.AddArg(v0)
 26735  		v0.AuxInt = i0
 26736  		v0.Aux = s
 26737  		v0.AddArg(p)
 26738  		v0.AddArg(idx)
 26739  		v0.AddArg(mem)
 26740  		return true
 26741  	}
 26742  	// match: (ORQ sh:(SHLQconst [32] x1:(MOVLloadidx1 [i1] {s} idx p mem)) x0:(MOVLloadidx1 [i0] {s} idx p mem))
 26743  	// cond: i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)
 26744  	// result: @mergePoint(b,x0,x1) (MOVQloadidx1 [i0] {s} p idx mem)
 26745  	for {
 26746  		_ = v.Args[1]
 26747  		sh := v.Args[0]
 26748  		if sh.Op != OpAMD64SHLQconst {
 26749  			break
 26750  		}
 26751  		if sh.AuxInt != 32 {
 26752  			break
 26753  		}
 26754  		x1 := sh.Args[0]
 26755  		if x1.Op != OpAMD64MOVLloadidx1 {
 26756  			break
 26757  		}
 26758  		i1 := x1.AuxInt
 26759  		s := x1.Aux
 26760  		_ = x1.Args[2]
 26761  		idx := x1.Args[0]
 26762  		p := x1.Args[1]
 26763  		mem := x1.Args[2]
 26764  		x0 := v.Args[1]
 26765  		if x0.Op != OpAMD64MOVLloadidx1 {
 26766  			break
 26767  		}
 26768  		i0 := x0.AuxInt
 26769  		if x0.Aux != s {
 26770  			break
 26771  		}
 26772  		_ = x0.Args[2]
 26773  		if idx != x0.Args[0] {
 26774  			break
 26775  		}
 26776  		if p != x0.Args[1] {
 26777  			break
 26778  		}
 26779  		if mem != x0.Args[2] {
 26780  			break
 26781  		}
 26782  		if !(i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) {
 26783  			break
 26784  		}
 26785  		b = mergePoint(b, x0, x1)
 26786  		v0 := b.NewValue0(v.Pos, OpAMD64MOVQloadidx1, typ.UInt64)
 26787  		v.reset(OpCopy)
 26788  		v.AddArg(v0)
 26789  		v0.AuxInt = i0
 26790  		v0.Aux = s
 26791  		v0.AddArg(p)
 26792  		v0.AddArg(idx)
 26793  		v0.AddArg(mem)
 26794  		return true
 26795  	}
 26796  	// match: (ORQ s1:(SHLQconst [j1] x1:(MOVBloadidx1 [i1] {s} p idx mem)) or:(ORQ s0:(SHLQconst [j0] x0:(MOVBloadidx1 [i0] {s} p idx mem)) y))
 26797  	// cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)
 26798  	// result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j0] (MOVWloadidx1 [i0] {s} p idx mem)) y)
 26799  	for {
 26800  		_ = v.Args[1]
 26801  		s1 := v.Args[0]
 26802  		if s1.Op != OpAMD64SHLQconst {
 26803  			break
 26804  		}
 26805  		j1 := s1.AuxInt
 26806  		x1 := s1.Args[0]
 26807  		if x1.Op != OpAMD64MOVBloadidx1 {
 26808  			break
 26809  		}
 26810  		i1 := x1.AuxInt
 26811  		s := x1.Aux
 26812  		_ = x1.Args[2]
 26813  		p := x1.Args[0]
 26814  		idx := x1.Args[1]
 26815  		mem := x1.Args[2]
 26816  		or := v.Args[1]
 26817  		if or.Op != OpAMD64ORQ {
 26818  			break
 26819  		}
 26820  		_ = or.Args[1]
 26821  		s0 := or.Args[0]
 26822  		if s0.Op != OpAMD64SHLQconst {
 26823  			break
 26824  		}
 26825  		j0 := s0.AuxInt
 26826  		x0 := s0.Args[0]
 26827  		if x0.Op != OpAMD64MOVBloadidx1 {
 26828  			break
 26829  		}
 26830  		i0 := x0.AuxInt
 26831  		if x0.Aux != s {
 26832  			break
 26833  		}
 26834  		_ = x0.Args[2]
 26835  		if p != x0.Args[0] {
 26836  			break
 26837  		}
 26838  		if idx != x0.Args[1] {
 26839  			break
 26840  		}
 26841  		if mem != x0.Args[2] {
 26842  			break
 26843  		}
 26844  		y := or.Args[1]
 26845  		if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) {
 26846  			break
 26847  		}
 26848  		b = mergePoint(b, x0, x1)
 26849  		v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type)
 26850  		v.reset(OpCopy)
 26851  		v.AddArg(v0)
 26852  		v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type)
 26853  		v1.AuxInt = j0
 26854  		v2 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16)
 26855  		v2.AuxInt = i0
 26856  		v2.Aux = s
 26857  		v2.AddArg(p)
 26858  		v2.AddArg(idx)
 26859  		v2.AddArg(mem)
 26860  		v1.AddArg(v2)
 26861  		v0.AddArg(v1)
 26862  		v0.AddArg(y)
 26863  		return true
 26864  	}
 26865  	return false
 26866  }
 26867  func rewriteValueAMD64_OpAMD64ORQ_60(v *Value) bool {
 26868  	b := v.Block
 26869  	_ = b
 26870  	typ := &b.Func.Config.Types
 26871  	_ = typ
 26872  	// match: (ORQ s1:(SHLQconst [j1] x1:(MOVBloadidx1 [i1] {s} idx p mem)) or:(ORQ s0:(SHLQconst [j0] x0:(MOVBloadidx1 [i0] {s} p idx mem)) y))
 26873  	// cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)
 26874  	// result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j0] (MOVWloadidx1 [i0] {s} p idx mem)) y)
 26875  	for {
 26876  		_ = v.Args[1]
 26877  		s1 := v.Args[0]
 26878  		if s1.Op != OpAMD64SHLQconst {
 26879  			break
 26880  		}
 26881  		j1 := s1.AuxInt
 26882  		x1 := s1.Args[0]
 26883  		if x1.Op != OpAMD64MOVBloadidx1 {
 26884  			break
 26885  		}
 26886  		i1 := x1.AuxInt
 26887  		s := x1.Aux
 26888  		_ = x1.Args[2]
 26889  		idx := x1.Args[0]
 26890  		p := x1.Args[1]
 26891  		mem := x1.Args[2]
 26892  		or := v.Args[1]
 26893  		if or.Op != OpAMD64ORQ {
 26894  			break
 26895  		}
 26896  		_ = or.Args[1]
 26897  		s0 := or.Args[0]
 26898  		if s0.Op != OpAMD64SHLQconst {
 26899  			break
 26900  		}
 26901  		j0 := s0.AuxInt
 26902  		x0 := s0.Args[0]
 26903  		if x0.Op != OpAMD64MOVBloadidx1 {
 26904  			break
 26905  		}
 26906  		i0 := x0.AuxInt
 26907  		if x0.Aux != s {
 26908  			break
 26909  		}
 26910  		_ = x0.Args[2]
 26911  		if p != x0.Args[0] {
 26912  			break
 26913  		}
 26914  		if idx != x0.Args[1] {
 26915  			break
 26916  		}
 26917  		if mem != x0.Args[2] {
 26918  			break
 26919  		}
 26920  		y := or.Args[1]
 26921  		if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) {
 26922  			break
 26923  		}
 26924  		b = mergePoint(b, x0, x1)
 26925  		v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type)
 26926  		v.reset(OpCopy)
 26927  		v.AddArg(v0)
 26928  		v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type)
 26929  		v1.AuxInt = j0
 26930  		v2 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16)
 26931  		v2.AuxInt = i0
 26932  		v2.Aux = s
 26933  		v2.AddArg(p)
 26934  		v2.AddArg(idx)
 26935  		v2.AddArg(mem)
 26936  		v1.AddArg(v2)
 26937  		v0.AddArg(v1)
 26938  		v0.AddArg(y)
 26939  		return true
 26940  	}
 26941  	// match: (ORQ s1:(SHLQconst [j1] x1:(MOVBloadidx1 [i1] {s} p idx mem)) or:(ORQ s0:(SHLQconst [j0] x0:(MOVBloadidx1 [i0] {s} idx p mem)) y))
 26942  	// cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)
 26943  	// result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j0] (MOVWloadidx1 [i0] {s} p idx mem)) y)
 26944  	for {
 26945  		_ = v.Args[1]
 26946  		s1 := v.Args[0]
 26947  		if s1.Op != OpAMD64SHLQconst {
 26948  			break
 26949  		}
 26950  		j1 := s1.AuxInt
 26951  		x1 := s1.Args[0]
 26952  		if x1.Op != OpAMD64MOVBloadidx1 {
 26953  			break
 26954  		}
 26955  		i1 := x1.AuxInt
 26956  		s := x1.Aux
 26957  		_ = x1.Args[2]
 26958  		p := x1.Args[0]
 26959  		idx := x1.Args[1]
 26960  		mem := x1.Args[2]
 26961  		or := v.Args[1]
 26962  		if or.Op != OpAMD64ORQ {
 26963  			break
 26964  		}
 26965  		_ = or.Args[1]
 26966  		s0 := or.Args[0]
 26967  		if s0.Op != OpAMD64SHLQconst {
 26968  			break
 26969  		}
 26970  		j0 := s0.AuxInt
 26971  		x0 := s0.Args[0]
 26972  		if x0.Op != OpAMD64MOVBloadidx1 {
 26973  			break
 26974  		}
 26975  		i0 := x0.AuxInt
 26976  		if x0.Aux != s {
 26977  			break
 26978  		}
 26979  		_ = x0.Args[2]
 26980  		if idx != x0.Args[0] {
 26981  			break
 26982  		}
 26983  		if p != x0.Args[1] {
 26984  			break
 26985  		}
 26986  		if mem != x0.Args[2] {
 26987  			break
 26988  		}
 26989  		y := or.Args[1]
 26990  		if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) {
 26991  			break
 26992  		}
 26993  		b = mergePoint(b, x0, x1)
 26994  		v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type)
 26995  		v.reset(OpCopy)
 26996  		v.AddArg(v0)
 26997  		v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type)
 26998  		v1.AuxInt = j0
 26999  		v2 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16)
 27000  		v2.AuxInt = i0
 27001  		v2.Aux = s
 27002  		v2.AddArg(p)
 27003  		v2.AddArg(idx)
 27004  		v2.AddArg(mem)
 27005  		v1.AddArg(v2)
 27006  		v0.AddArg(v1)
 27007  		v0.AddArg(y)
 27008  		return true
 27009  	}
 27010  	// match: (ORQ s1:(SHLQconst [j1] x1:(MOVBloadidx1 [i1] {s} idx p mem)) or:(ORQ s0:(SHLQconst [j0] x0:(MOVBloadidx1 [i0] {s} idx p mem)) y))
 27011  	// cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)
 27012  	// result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j0] (MOVWloadidx1 [i0] {s} p idx mem)) y)
 27013  	for {
 27014  		_ = v.Args[1]
 27015  		s1 := v.Args[0]
 27016  		if s1.Op != OpAMD64SHLQconst {
 27017  			break
 27018  		}
 27019  		j1 := s1.AuxInt
 27020  		x1 := s1.Args[0]
 27021  		if x1.Op != OpAMD64MOVBloadidx1 {
 27022  			break
 27023  		}
 27024  		i1 := x1.AuxInt
 27025  		s := x1.Aux
 27026  		_ = x1.Args[2]
 27027  		idx := x1.Args[0]
 27028  		p := x1.Args[1]
 27029  		mem := x1.Args[2]
 27030  		or := v.Args[1]
 27031  		if or.Op != OpAMD64ORQ {
 27032  			break
 27033  		}
 27034  		_ = or.Args[1]
 27035  		s0 := or.Args[0]
 27036  		if s0.Op != OpAMD64SHLQconst {
 27037  			break
 27038  		}
 27039  		j0 := s0.AuxInt
 27040  		x0 := s0.Args[0]
 27041  		if x0.Op != OpAMD64MOVBloadidx1 {
 27042  			break
 27043  		}
 27044  		i0 := x0.AuxInt
 27045  		if x0.Aux != s {
 27046  			break
 27047  		}
 27048  		_ = x0.Args[2]
 27049  		if idx != x0.Args[0] {
 27050  			break
 27051  		}
 27052  		if p != x0.Args[1] {
 27053  			break
 27054  		}
 27055  		if mem != x0.Args[2] {
 27056  			break
 27057  		}
 27058  		y := or.Args[1]
 27059  		if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) {
 27060  			break
 27061  		}
 27062  		b = mergePoint(b, x0, x1)
 27063  		v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type)
 27064  		v.reset(OpCopy)
 27065  		v.AddArg(v0)
 27066  		v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type)
 27067  		v1.AuxInt = j0
 27068  		v2 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16)
 27069  		v2.AuxInt = i0
 27070  		v2.Aux = s
 27071  		v2.AddArg(p)
 27072  		v2.AddArg(idx)
 27073  		v2.AddArg(mem)
 27074  		v1.AddArg(v2)
 27075  		v0.AddArg(v1)
 27076  		v0.AddArg(y)
 27077  		return true
 27078  	}
 27079  	// match: (ORQ s1:(SHLQconst [j1] x1:(MOVBloadidx1 [i1] {s} p idx mem)) or:(ORQ y s0:(SHLQconst [j0] x0:(MOVBloadidx1 [i0] {s} p idx mem))))
 27080  	// cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)
 27081  	// result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j0] (MOVWloadidx1 [i0] {s} p idx mem)) y)
 27082  	for {
 27083  		_ = v.Args[1]
 27084  		s1 := v.Args[0]
 27085  		if s1.Op != OpAMD64SHLQconst {
 27086  			break
 27087  		}
 27088  		j1 := s1.AuxInt
 27089  		x1 := s1.Args[0]
 27090  		if x1.Op != OpAMD64MOVBloadidx1 {
 27091  			break
 27092  		}
 27093  		i1 := x1.AuxInt
 27094  		s := x1.Aux
 27095  		_ = x1.Args[2]
 27096  		p := x1.Args[0]
 27097  		idx := x1.Args[1]
 27098  		mem := x1.Args[2]
 27099  		or := v.Args[1]
 27100  		if or.Op != OpAMD64ORQ {
 27101  			break
 27102  		}
 27103  		_ = or.Args[1]
 27104  		y := or.Args[0]
 27105  		s0 := or.Args[1]
 27106  		if s0.Op != OpAMD64SHLQconst {
 27107  			break
 27108  		}
 27109  		j0 := s0.AuxInt
 27110  		x0 := s0.Args[0]
 27111  		if x0.Op != OpAMD64MOVBloadidx1 {
 27112  			break
 27113  		}
 27114  		i0 := x0.AuxInt
 27115  		if x0.Aux != s {
 27116  			break
 27117  		}
 27118  		_ = x0.Args[2]
 27119  		if p != x0.Args[0] {
 27120  			break
 27121  		}
 27122  		if idx != x0.Args[1] {
 27123  			break
 27124  		}
 27125  		if mem != x0.Args[2] {
 27126  			break
 27127  		}
 27128  		if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) {
 27129  			break
 27130  		}
 27131  		b = mergePoint(b, x0, x1)
 27132  		v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type)
 27133  		v.reset(OpCopy)
 27134  		v.AddArg(v0)
 27135  		v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type)
 27136  		v1.AuxInt = j0
 27137  		v2 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16)
 27138  		v2.AuxInt = i0
 27139  		v2.Aux = s
 27140  		v2.AddArg(p)
 27141  		v2.AddArg(idx)
 27142  		v2.AddArg(mem)
 27143  		v1.AddArg(v2)
 27144  		v0.AddArg(v1)
 27145  		v0.AddArg(y)
 27146  		return true
 27147  	}
 27148  	// match: (ORQ s1:(SHLQconst [j1] x1:(MOVBloadidx1 [i1] {s} idx p mem)) or:(ORQ y s0:(SHLQconst [j0] x0:(MOVBloadidx1 [i0] {s} p idx mem))))
 27149  	// cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)
 27150  	// result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j0] (MOVWloadidx1 [i0] {s} p idx mem)) y)
 27151  	for {
 27152  		_ = v.Args[1]
 27153  		s1 := v.Args[0]
 27154  		if s1.Op != OpAMD64SHLQconst {
 27155  			break
 27156  		}
 27157  		j1 := s1.AuxInt
 27158  		x1 := s1.Args[0]
 27159  		if x1.Op != OpAMD64MOVBloadidx1 {
 27160  			break
 27161  		}
 27162  		i1 := x1.AuxInt
 27163  		s := x1.Aux
 27164  		_ = x1.Args[2]
 27165  		idx := x1.Args[0]
 27166  		p := x1.Args[1]
 27167  		mem := x1.Args[2]
 27168  		or := v.Args[1]
 27169  		if or.Op != OpAMD64ORQ {
 27170  			break
 27171  		}
 27172  		_ = or.Args[1]
 27173  		y := or.Args[0]
 27174  		s0 := or.Args[1]
 27175  		if s0.Op != OpAMD64SHLQconst {
 27176  			break
 27177  		}
 27178  		j0 := s0.AuxInt
 27179  		x0 := s0.Args[0]
 27180  		if x0.Op != OpAMD64MOVBloadidx1 {
 27181  			break
 27182  		}
 27183  		i0 := x0.AuxInt
 27184  		if x0.Aux != s {
 27185  			break
 27186  		}
 27187  		_ = x0.Args[2]
 27188  		if p != x0.Args[0] {
 27189  			break
 27190  		}
 27191  		if idx != x0.Args[1] {
 27192  			break
 27193  		}
 27194  		if mem != x0.Args[2] {
 27195  			break
 27196  		}
 27197  		if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) {
 27198  			break
 27199  		}
 27200  		b = mergePoint(b, x0, x1)
 27201  		v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type)
 27202  		v.reset(OpCopy)
 27203  		v.AddArg(v0)
 27204  		v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type)
 27205  		v1.AuxInt = j0
 27206  		v2 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16)
 27207  		v2.AuxInt = i0
 27208  		v2.Aux = s
 27209  		v2.AddArg(p)
 27210  		v2.AddArg(idx)
 27211  		v2.AddArg(mem)
 27212  		v1.AddArg(v2)
 27213  		v0.AddArg(v1)
 27214  		v0.AddArg(y)
 27215  		return true
 27216  	}
 27217  	// match: (ORQ s1:(SHLQconst [j1] x1:(MOVBloadidx1 [i1] {s} p idx mem)) or:(ORQ y s0:(SHLQconst [j0] x0:(MOVBloadidx1 [i0] {s} idx p mem))))
 27218  	// cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)
 27219  	// result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j0] (MOVWloadidx1 [i0] {s} p idx mem)) y)
 27220  	for {
 27221  		_ = v.Args[1]
 27222  		s1 := v.Args[0]
 27223  		if s1.Op != OpAMD64SHLQconst {
 27224  			break
 27225  		}
 27226  		j1 := s1.AuxInt
 27227  		x1 := s1.Args[0]
 27228  		if x1.Op != OpAMD64MOVBloadidx1 {
 27229  			break
 27230  		}
 27231  		i1 := x1.AuxInt
 27232  		s := x1.Aux
 27233  		_ = x1.Args[2]
 27234  		p := x1.Args[0]
 27235  		idx := x1.Args[1]
 27236  		mem := x1.Args[2]
 27237  		or := v.Args[1]
 27238  		if or.Op != OpAMD64ORQ {
 27239  			break
 27240  		}
 27241  		_ = or.Args[1]
 27242  		y := or.Args[0]
 27243  		s0 := or.Args[1]
 27244  		if s0.Op != OpAMD64SHLQconst {
 27245  			break
 27246  		}
 27247  		j0 := s0.AuxInt
 27248  		x0 := s0.Args[0]
 27249  		if x0.Op != OpAMD64MOVBloadidx1 {
 27250  			break
 27251  		}
 27252  		i0 := x0.AuxInt
 27253  		if x0.Aux != s {
 27254  			break
 27255  		}
 27256  		_ = x0.Args[2]
 27257  		if idx != x0.Args[0] {
 27258  			break
 27259  		}
 27260  		if p != x0.Args[1] {
 27261  			break
 27262  		}
 27263  		if mem != x0.Args[2] {
 27264  			break
 27265  		}
 27266  		if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) {
 27267  			break
 27268  		}
 27269  		b = mergePoint(b, x0, x1)
 27270  		v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type)
 27271  		v.reset(OpCopy)
 27272  		v.AddArg(v0)
 27273  		v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type)
 27274  		v1.AuxInt = j0
 27275  		v2 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16)
 27276  		v2.AuxInt = i0
 27277  		v2.Aux = s
 27278  		v2.AddArg(p)
 27279  		v2.AddArg(idx)
 27280  		v2.AddArg(mem)
 27281  		v1.AddArg(v2)
 27282  		v0.AddArg(v1)
 27283  		v0.AddArg(y)
 27284  		return true
 27285  	}
 27286  	// match: (ORQ s1:(SHLQconst [j1] x1:(MOVBloadidx1 [i1] {s} idx p mem)) or:(ORQ y s0:(SHLQconst [j0] x0:(MOVBloadidx1 [i0] {s} idx p mem))))
 27287  	// cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)
 27288  	// result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j0] (MOVWloadidx1 [i0] {s} p idx mem)) y)
 27289  	for {
 27290  		_ = v.Args[1]
 27291  		s1 := v.Args[0]
 27292  		if s1.Op != OpAMD64SHLQconst {
 27293  			break
 27294  		}
 27295  		j1 := s1.AuxInt
 27296  		x1 := s1.Args[0]
 27297  		if x1.Op != OpAMD64MOVBloadidx1 {
 27298  			break
 27299  		}
 27300  		i1 := x1.AuxInt
 27301  		s := x1.Aux
 27302  		_ = x1.Args[2]
 27303  		idx := x1.Args[0]
 27304  		p := x1.Args[1]
 27305  		mem := x1.Args[2]
 27306  		or := v.Args[1]
 27307  		if or.Op != OpAMD64ORQ {
 27308  			break
 27309  		}
 27310  		_ = or.Args[1]
 27311  		y := or.Args[0]
 27312  		s0 := or.Args[1]
 27313  		if s0.Op != OpAMD64SHLQconst {
 27314  			break
 27315  		}
 27316  		j0 := s0.AuxInt
 27317  		x0 := s0.Args[0]
 27318  		if x0.Op != OpAMD64MOVBloadidx1 {
 27319  			break
 27320  		}
 27321  		i0 := x0.AuxInt
 27322  		if x0.Aux != s {
 27323  			break
 27324  		}
 27325  		_ = x0.Args[2]
 27326  		if idx != x0.Args[0] {
 27327  			break
 27328  		}
 27329  		if p != x0.Args[1] {
 27330  			break
 27331  		}
 27332  		if mem != x0.Args[2] {
 27333  			break
 27334  		}
 27335  		if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) {
 27336  			break
 27337  		}
 27338  		b = mergePoint(b, x0, x1)
 27339  		v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type)
 27340  		v.reset(OpCopy)
 27341  		v.AddArg(v0)
 27342  		v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type)
 27343  		v1.AuxInt = j0
 27344  		v2 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16)
 27345  		v2.AuxInt = i0
 27346  		v2.Aux = s
 27347  		v2.AddArg(p)
 27348  		v2.AddArg(idx)
 27349  		v2.AddArg(mem)
 27350  		v1.AddArg(v2)
 27351  		v0.AddArg(v1)
 27352  		v0.AddArg(y)
 27353  		return true
 27354  	}
 27355  	// match: (ORQ or:(ORQ s0:(SHLQconst [j0] x0:(MOVBloadidx1 [i0] {s} p idx mem)) y) s1:(SHLQconst [j1] x1:(MOVBloadidx1 [i1] {s} p idx mem)))
 27356  	// cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)
 27357  	// result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j0] (MOVWloadidx1 [i0] {s} p idx mem)) y)
 27358  	for {
 27359  		_ = v.Args[1]
 27360  		or := v.Args[0]
 27361  		if or.Op != OpAMD64ORQ {
 27362  			break
 27363  		}
 27364  		_ = or.Args[1]
 27365  		s0 := or.Args[0]
 27366  		if s0.Op != OpAMD64SHLQconst {
 27367  			break
 27368  		}
 27369  		j0 := s0.AuxInt
 27370  		x0 := s0.Args[0]
 27371  		if x0.Op != OpAMD64MOVBloadidx1 {
 27372  			break
 27373  		}
 27374  		i0 := x0.AuxInt
 27375  		s := x0.Aux
 27376  		_ = x0.Args[2]
 27377  		p := x0.Args[0]
 27378  		idx := x0.Args[1]
 27379  		mem := x0.Args[2]
 27380  		y := or.Args[1]
 27381  		s1 := v.Args[1]
 27382  		if s1.Op != OpAMD64SHLQconst {
 27383  			break
 27384  		}
 27385  		j1 := s1.AuxInt
 27386  		x1 := s1.Args[0]
 27387  		if x1.Op != OpAMD64MOVBloadidx1 {
 27388  			break
 27389  		}
 27390  		i1 := x1.AuxInt
 27391  		if x1.Aux != s {
 27392  			break
 27393  		}
 27394  		_ = x1.Args[2]
 27395  		if p != x1.Args[0] {
 27396  			break
 27397  		}
 27398  		if idx != x1.Args[1] {
 27399  			break
 27400  		}
 27401  		if mem != x1.Args[2] {
 27402  			break
 27403  		}
 27404  		if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) {
 27405  			break
 27406  		}
 27407  		b = mergePoint(b, x0, x1)
 27408  		v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type)
 27409  		v.reset(OpCopy)
 27410  		v.AddArg(v0)
 27411  		v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type)
 27412  		v1.AuxInt = j0
 27413  		v2 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16)
 27414  		v2.AuxInt = i0
 27415  		v2.Aux = s
 27416  		v2.AddArg(p)
 27417  		v2.AddArg(idx)
 27418  		v2.AddArg(mem)
 27419  		v1.AddArg(v2)
 27420  		v0.AddArg(v1)
 27421  		v0.AddArg(y)
 27422  		return true
 27423  	}
 27424  	// match: (ORQ or:(ORQ s0:(SHLQconst [j0] x0:(MOVBloadidx1 [i0] {s} idx p mem)) y) s1:(SHLQconst [j1] x1:(MOVBloadidx1 [i1] {s} p idx mem)))
 27425  	// cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)
 27426  	// result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j0] (MOVWloadidx1 [i0] {s} p idx mem)) y)
 27427  	for {
 27428  		_ = v.Args[1]
 27429  		or := v.Args[0]
 27430  		if or.Op != OpAMD64ORQ {
 27431  			break
 27432  		}
 27433  		_ = or.Args[1]
 27434  		s0 := or.Args[0]
 27435  		if s0.Op != OpAMD64SHLQconst {
 27436  			break
 27437  		}
 27438  		j0 := s0.AuxInt
 27439  		x0 := s0.Args[0]
 27440  		if x0.Op != OpAMD64MOVBloadidx1 {
 27441  			break
 27442  		}
 27443  		i0 := x0.AuxInt
 27444  		s := x0.Aux
 27445  		_ = x0.Args[2]
 27446  		idx := x0.Args[0]
 27447  		p := x0.Args[1]
 27448  		mem := x0.Args[2]
 27449  		y := or.Args[1]
 27450  		s1 := v.Args[1]
 27451  		if s1.Op != OpAMD64SHLQconst {
 27452  			break
 27453  		}
 27454  		j1 := s1.AuxInt
 27455  		x1 := s1.Args[0]
 27456  		if x1.Op != OpAMD64MOVBloadidx1 {
 27457  			break
 27458  		}
 27459  		i1 := x1.AuxInt
 27460  		if x1.Aux != s {
 27461  			break
 27462  		}
 27463  		_ = x1.Args[2]
 27464  		if p != x1.Args[0] {
 27465  			break
 27466  		}
 27467  		if idx != x1.Args[1] {
 27468  			break
 27469  		}
 27470  		if mem != x1.Args[2] {
 27471  			break
 27472  		}
 27473  		if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) {
 27474  			break
 27475  		}
 27476  		b = mergePoint(b, x0, x1)
 27477  		v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type)
 27478  		v.reset(OpCopy)
 27479  		v.AddArg(v0)
 27480  		v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type)
 27481  		v1.AuxInt = j0
 27482  		v2 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16)
 27483  		v2.AuxInt = i0
 27484  		v2.Aux = s
 27485  		v2.AddArg(p)
 27486  		v2.AddArg(idx)
 27487  		v2.AddArg(mem)
 27488  		v1.AddArg(v2)
 27489  		v0.AddArg(v1)
 27490  		v0.AddArg(y)
 27491  		return true
 27492  	}
 27493  	// match: (ORQ or:(ORQ y s0:(SHLQconst [j0] x0:(MOVBloadidx1 [i0] {s} p idx mem))) s1:(SHLQconst [j1] x1:(MOVBloadidx1 [i1] {s} p idx mem)))
 27494  	// cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)
 27495  	// result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j0] (MOVWloadidx1 [i0] {s} p idx mem)) y)
 27496  	for {
 27497  		_ = v.Args[1]
 27498  		or := v.Args[0]
 27499  		if or.Op != OpAMD64ORQ {
 27500  			break
 27501  		}
 27502  		_ = or.Args[1]
 27503  		y := or.Args[0]
 27504  		s0 := or.Args[1]
 27505  		if s0.Op != OpAMD64SHLQconst {
 27506  			break
 27507  		}
 27508  		j0 := s0.AuxInt
 27509  		x0 := s0.Args[0]
 27510  		if x0.Op != OpAMD64MOVBloadidx1 {
 27511  			break
 27512  		}
 27513  		i0 := x0.AuxInt
 27514  		s := x0.Aux
 27515  		_ = x0.Args[2]
 27516  		p := x0.Args[0]
 27517  		idx := x0.Args[1]
 27518  		mem := x0.Args[2]
 27519  		s1 := v.Args[1]
 27520  		if s1.Op != OpAMD64SHLQconst {
 27521  			break
 27522  		}
 27523  		j1 := s1.AuxInt
 27524  		x1 := s1.Args[0]
 27525  		if x1.Op != OpAMD64MOVBloadidx1 {
 27526  			break
 27527  		}
 27528  		i1 := x1.AuxInt
 27529  		if x1.Aux != s {
 27530  			break
 27531  		}
 27532  		_ = x1.Args[2]
 27533  		if p != x1.Args[0] {
 27534  			break
 27535  		}
 27536  		if idx != x1.Args[1] {
 27537  			break
 27538  		}
 27539  		if mem != x1.Args[2] {
 27540  			break
 27541  		}
 27542  		if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) {
 27543  			break
 27544  		}
 27545  		b = mergePoint(b, x0, x1)
 27546  		v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type)
 27547  		v.reset(OpCopy)
 27548  		v.AddArg(v0)
 27549  		v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type)
 27550  		v1.AuxInt = j0
 27551  		v2 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16)
 27552  		v2.AuxInt = i0
 27553  		v2.Aux = s
 27554  		v2.AddArg(p)
 27555  		v2.AddArg(idx)
 27556  		v2.AddArg(mem)
 27557  		v1.AddArg(v2)
 27558  		v0.AddArg(v1)
 27559  		v0.AddArg(y)
 27560  		return true
 27561  	}
 27562  	return false
 27563  }
 27564  func rewriteValueAMD64_OpAMD64ORQ_70(v *Value) bool {
 27565  	b := v.Block
 27566  	_ = b
 27567  	typ := &b.Func.Config.Types
 27568  	_ = typ
 27569  	// match: (ORQ or:(ORQ y s0:(SHLQconst [j0] x0:(MOVBloadidx1 [i0] {s} idx p mem))) s1:(SHLQconst [j1] x1:(MOVBloadidx1 [i1] {s} p idx mem)))
 27570  	// cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)
 27571  	// result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j0] (MOVWloadidx1 [i0] {s} p idx mem)) y)
 27572  	for {
 27573  		_ = v.Args[1]
 27574  		or := v.Args[0]
 27575  		if or.Op != OpAMD64ORQ {
 27576  			break
 27577  		}
 27578  		_ = or.Args[1]
 27579  		y := or.Args[0]
 27580  		s0 := or.Args[1]
 27581  		if s0.Op != OpAMD64SHLQconst {
 27582  			break
 27583  		}
 27584  		j0 := s0.AuxInt
 27585  		x0 := s0.Args[0]
 27586  		if x0.Op != OpAMD64MOVBloadidx1 {
 27587  			break
 27588  		}
 27589  		i0 := x0.AuxInt
 27590  		s := x0.Aux
 27591  		_ = x0.Args[2]
 27592  		idx := x0.Args[0]
 27593  		p := x0.Args[1]
 27594  		mem := x0.Args[2]
 27595  		s1 := v.Args[1]
 27596  		if s1.Op != OpAMD64SHLQconst {
 27597  			break
 27598  		}
 27599  		j1 := s1.AuxInt
 27600  		x1 := s1.Args[0]
 27601  		if x1.Op != OpAMD64MOVBloadidx1 {
 27602  			break
 27603  		}
 27604  		i1 := x1.AuxInt
 27605  		if x1.Aux != s {
 27606  			break
 27607  		}
 27608  		_ = x1.Args[2]
 27609  		if p != x1.Args[0] {
 27610  			break
 27611  		}
 27612  		if idx != x1.Args[1] {
 27613  			break
 27614  		}
 27615  		if mem != x1.Args[2] {
 27616  			break
 27617  		}
 27618  		if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) {
 27619  			break
 27620  		}
 27621  		b = mergePoint(b, x0, x1)
 27622  		v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type)
 27623  		v.reset(OpCopy)
 27624  		v.AddArg(v0)
 27625  		v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type)
 27626  		v1.AuxInt = j0
 27627  		v2 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16)
 27628  		v2.AuxInt = i0
 27629  		v2.Aux = s
 27630  		v2.AddArg(p)
 27631  		v2.AddArg(idx)
 27632  		v2.AddArg(mem)
 27633  		v1.AddArg(v2)
 27634  		v0.AddArg(v1)
 27635  		v0.AddArg(y)
 27636  		return true
 27637  	}
 27638  	// match: (ORQ or:(ORQ s0:(SHLQconst [j0] x0:(MOVBloadidx1 [i0] {s} p idx mem)) y) s1:(SHLQconst [j1] x1:(MOVBloadidx1 [i1] {s} idx p mem)))
 27639  	// cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)
 27640  	// result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j0] (MOVWloadidx1 [i0] {s} p idx mem)) y)
 27641  	for {
 27642  		_ = v.Args[1]
 27643  		or := v.Args[0]
 27644  		if or.Op != OpAMD64ORQ {
 27645  			break
 27646  		}
 27647  		_ = or.Args[1]
 27648  		s0 := or.Args[0]
 27649  		if s0.Op != OpAMD64SHLQconst {
 27650  			break
 27651  		}
 27652  		j0 := s0.AuxInt
 27653  		x0 := s0.Args[0]
 27654  		if x0.Op != OpAMD64MOVBloadidx1 {
 27655  			break
 27656  		}
 27657  		i0 := x0.AuxInt
 27658  		s := x0.Aux
 27659  		_ = x0.Args[2]
 27660  		p := x0.Args[0]
 27661  		idx := x0.Args[1]
 27662  		mem := x0.Args[2]
 27663  		y := or.Args[1]
 27664  		s1 := v.Args[1]
 27665  		if s1.Op != OpAMD64SHLQconst {
 27666  			break
 27667  		}
 27668  		j1 := s1.AuxInt
 27669  		x1 := s1.Args[0]
 27670  		if x1.Op != OpAMD64MOVBloadidx1 {
 27671  			break
 27672  		}
 27673  		i1 := x1.AuxInt
 27674  		if x1.Aux != s {
 27675  			break
 27676  		}
 27677  		_ = x1.Args[2]
 27678  		if idx != x1.Args[0] {
 27679  			break
 27680  		}
 27681  		if p != x1.Args[1] {
 27682  			break
 27683  		}
 27684  		if mem != x1.Args[2] {
 27685  			break
 27686  		}
 27687  		if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) {
 27688  			break
 27689  		}
 27690  		b = mergePoint(b, x0, x1)
 27691  		v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type)
 27692  		v.reset(OpCopy)
 27693  		v.AddArg(v0)
 27694  		v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type)
 27695  		v1.AuxInt = j0
 27696  		v2 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16)
 27697  		v2.AuxInt = i0
 27698  		v2.Aux = s
 27699  		v2.AddArg(p)
 27700  		v2.AddArg(idx)
 27701  		v2.AddArg(mem)
 27702  		v1.AddArg(v2)
 27703  		v0.AddArg(v1)
 27704  		v0.AddArg(y)
 27705  		return true
 27706  	}
 27707  	// match: (ORQ or:(ORQ s0:(SHLQconst [j0] x0:(MOVBloadidx1 [i0] {s} idx p mem)) y) s1:(SHLQconst [j1] x1:(MOVBloadidx1 [i1] {s} idx p mem)))
 27708  	// cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)
 27709  	// result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j0] (MOVWloadidx1 [i0] {s} p idx mem)) y)
 27710  	for {
 27711  		_ = v.Args[1]
 27712  		or := v.Args[0]
 27713  		if or.Op != OpAMD64ORQ {
 27714  			break
 27715  		}
 27716  		_ = or.Args[1]
 27717  		s0 := or.Args[0]
 27718  		if s0.Op != OpAMD64SHLQconst {
 27719  			break
 27720  		}
 27721  		j0 := s0.AuxInt
 27722  		x0 := s0.Args[0]
 27723  		if x0.Op != OpAMD64MOVBloadidx1 {
 27724  			break
 27725  		}
 27726  		i0 := x0.AuxInt
 27727  		s := x0.Aux
 27728  		_ = x0.Args[2]
 27729  		idx := x0.Args[0]
 27730  		p := x0.Args[1]
 27731  		mem := x0.Args[2]
 27732  		y := or.Args[1]
 27733  		s1 := v.Args[1]
 27734  		if s1.Op != OpAMD64SHLQconst {
 27735  			break
 27736  		}
 27737  		j1 := s1.AuxInt
 27738  		x1 := s1.Args[0]
 27739  		if x1.Op != OpAMD64MOVBloadidx1 {
 27740  			break
 27741  		}
 27742  		i1 := x1.AuxInt
 27743  		if x1.Aux != s {
 27744  			break
 27745  		}
 27746  		_ = x1.Args[2]
 27747  		if idx != x1.Args[0] {
 27748  			break
 27749  		}
 27750  		if p != x1.Args[1] {
 27751  			break
 27752  		}
 27753  		if mem != x1.Args[2] {
 27754  			break
 27755  		}
 27756  		if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) {
 27757  			break
 27758  		}
 27759  		b = mergePoint(b, x0, x1)
 27760  		v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type)
 27761  		v.reset(OpCopy)
 27762  		v.AddArg(v0)
 27763  		v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type)
 27764  		v1.AuxInt = j0
 27765  		v2 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16)
 27766  		v2.AuxInt = i0
 27767  		v2.Aux = s
 27768  		v2.AddArg(p)
 27769  		v2.AddArg(idx)
 27770  		v2.AddArg(mem)
 27771  		v1.AddArg(v2)
 27772  		v0.AddArg(v1)
 27773  		v0.AddArg(y)
 27774  		return true
 27775  	}
 27776  	// match: (ORQ or:(ORQ y s0:(SHLQconst [j0] x0:(MOVBloadidx1 [i0] {s} p idx mem))) s1:(SHLQconst [j1] x1:(MOVBloadidx1 [i1] {s} idx p mem)))
 27777  	// cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)
 27778  	// result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j0] (MOVWloadidx1 [i0] {s} p idx mem)) y)
 27779  	for {
 27780  		_ = v.Args[1]
 27781  		or := v.Args[0]
 27782  		if or.Op != OpAMD64ORQ {
 27783  			break
 27784  		}
 27785  		_ = or.Args[1]
 27786  		y := or.Args[0]
 27787  		s0 := or.Args[1]
 27788  		if s0.Op != OpAMD64SHLQconst {
 27789  			break
 27790  		}
 27791  		j0 := s0.AuxInt
 27792  		x0 := s0.Args[0]
 27793  		if x0.Op != OpAMD64MOVBloadidx1 {
 27794  			break
 27795  		}
 27796  		i0 := x0.AuxInt
 27797  		s := x0.Aux
 27798  		_ = x0.Args[2]
 27799  		p := x0.Args[0]
 27800  		idx := x0.Args[1]
 27801  		mem := x0.Args[2]
 27802  		s1 := v.Args[1]
 27803  		if s1.Op != OpAMD64SHLQconst {
 27804  			break
 27805  		}
 27806  		j1 := s1.AuxInt
 27807  		x1 := s1.Args[0]
 27808  		if x1.Op != OpAMD64MOVBloadidx1 {
 27809  			break
 27810  		}
 27811  		i1 := x1.AuxInt
 27812  		if x1.Aux != s {
 27813  			break
 27814  		}
 27815  		_ = x1.Args[2]
 27816  		if idx != x1.Args[0] {
 27817  			break
 27818  		}
 27819  		if p != x1.Args[1] {
 27820  			break
 27821  		}
 27822  		if mem != x1.Args[2] {
 27823  			break
 27824  		}
 27825  		if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) {
 27826  			break
 27827  		}
 27828  		b = mergePoint(b, x0, x1)
 27829  		v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type)
 27830  		v.reset(OpCopy)
 27831  		v.AddArg(v0)
 27832  		v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type)
 27833  		v1.AuxInt = j0
 27834  		v2 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16)
 27835  		v2.AuxInt = i0
 27836  		v2.Aux = s
 27837  		v2.AddArg(p)
 27838  		v2.AddArg(idx)
 27839  		v2.AddArg(mem)
 27840  		v1.AddArg(v2)
 27841  		v0.AddArg(v1)
 27842  		v0.AddArg(y)
 27843  		return true
 27844  	}
 27845  	// match: (ORQ or:(ORQ y s0:(SHLQconst [j0] x0:(MOVBloadidx1 [i0] {s} idx p mem))) s1:(SHLQconst [j1] x1:(MOVBloadidx1 [i1] {s} idx p mem)))
 27846  	// cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)
 27847  	// result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j0] (MOVWloadidx1 [i0] {s} p idx mem)) y)
 27848  	for {
 27849  		_ = v.Args[1]
 27850  		or := v.Args[0]
 27851  		if or.Op != OpAMD64ORQ {
 27852  			break
 27853  		}
 27854  		_ = or.Args[1]
 27855  		y := or.Args[0]
 27856  		s0 := or.Args[1]
 27857  		if s0.Op != OpAMD64SHLQconst {
 27858  			break
 27859  		}
 27860  		j0 := s0.AuxInt
 27861  		x0 := s0.Args[0]
 27862  		if x0.Op != OpAMD64MOVBloadidx1 {
 27863  			break
 27864  		}
 27865  		i0 := x0.AuxInt
 27866  		s := x0.Aux
 27867  		_ = x0.Args[2]
 27868  		idx := x0.Args[0]
 27869  		p := x0.Args[1]
 27870  		mem := x0.Args[2]
 27871  		s1 := v.Args[1]
 27872  		if s1.Op != OpAMD64SHLQconst {
 27873  			break
 27874  		}
 27875  		j1 := s1.AuxInt
 27876  		x1 := s1.Args[0]
 27877  		if x1.Op != OpAMD64MOVBloadidx1 {
 27878  			break
 27879  		}
 27880  		i1 := x1.AuxInt
 27881  		if x1.Aux != s {
 27882  			break
 27883  		}
 27884  		_ = x1.Args[2]
 27885  		if idx != x1.Args[0] {
 27886  			break
 27887  		}
 27888  		if p != x1.Args[1] {
 27889  			break
 27890  		}
 27891  		if mem != x1.Args[2] {
 27892  			break
 27893  		}
 27894  		if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) {
 27895  			break
 27896  		}
 27897  		b = mergePoint(b, x0, x1)
 27898  		v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type)
 27899  		v.reset(OpCopy)
 27900  		v.AddArg(v0)
 27901  		v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type)
 27902  		v1.AuxInt = j0
 27903  		v2 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16)
 27904  		v2.AuxInt = i0
 27905  		v2.Aux = s
 27906  		v2.AddArg(p)
 27907  		v2.AddArg(idx)
 27908  		v2.AddArg(mem)
 27909  		v1.AddArg(v2)
 27910  		v0.AddArg(v1)
 27911  		v0.AddArg(y)
 27912  		return true
 27913  	}
 27914  	// match: (ORQ s1:(SHLQconst [j1] x1:(MOVWloadidx1 [i1] {s} p idx mem)) or:(ORQ s0:(SHLQconst [j0] x0:(MOVWloadidx1 [i0] {s} p idx mem)) y))
 27915  	// cond: i1 == i0+2 && j1 == j0+16 && j0 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)
 27916  	// result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j0] (MOVLloadidx1 [i0] {s} p idx mem)) y)
 27917  	for {
 27918  		_ = v.Args[1]
 27919  		s1 := v.Args[0]
 27920  		if s1.Op != OpAMD64SHLQconst {
 27921  			break
 27922  		}
 27923  		j1 := s1.AuxInt
 27924  		x1 := s1.Args[0]
 27925  		if x1.Op != OpAMD64MOVWloadidx1 {
 27926  			break
 27927  		}
 27928  		i1 := x1.AuxInt
 27929  		s := x1.Aux
 27930  		_ = x1.Args[2]
 27931  		p := x1.Args[0]
 27932  		idx := x1.Args[1]
 27933  		mem := x1.Args[2]
 27934  		or := v.Args[1]
 27935  		if or.Op != OpAMD64ORQ {
 27936  			break
 27937  		}
 27938  		_ = or.Args[1]
 27939  		s0 := or.Args[0]
 27940  		if s0.Op != OpAMD64SHLQconst {
 27941  			break
 27942  		}
 27943  		j0 := s0.AuxInt
 27944  		x0 := s0.Args[0]
 27945  		if x0.Op != OpAMD64MOVWloadidx1 {
 27946  			break
 27947  		}
 27948  		i0 := x0.AuxInt
 27949  		if x0.Aux != s {
 27950  			break
 27951  		}
 27952  		_ = x0.Args[2]
 27953  		if p != x0.Args[0] {
 27954  			break
 27955  		}
 27956  		if idx != x0.Args[1] {
 27957  			break
 27958  		}
 27959  		if mem != x0.Args[2] {
 27960  			break
 27961  		}
 27962  		y := or.Args[1]
 27963  		if !(i1 == i0+2 && j1 == j0+16 && j0%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) {
 27964  			break
 27965  		}
 27966  		b = mergePoint(b, x0, x1)
 27967  		v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type)
 27968  		v.reset(OpCopy)
 27969  		v.AddArg(v0)
 27970  		v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type)
 27971  		v1.AuxInt = j0
 27972  		v2 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32)
 27973  		v2.AuxInt = i0
 27974  		v2.Aux = s
 27975  		v2.AddArg(p)
 27976  		v2.AddArg(idx)
 27977  		v2.AddArg(mem)
 27978  		v1.AddArg(v2)
 27979  		v0.AddArg(v1)
 27980  		v0.AddArg(y)
 27981  		return true
 27982  	}
 27983  	// match: (ORQ s1:(SHLQconst [j1] x1:(MOVWloadidx1 [i1] {s} idx p mem)) or:(ORQ s0:(SHLQconst [j0] x0:(MOVWloadidx1 [i0] {s} p idx mem)) y))
 27984  	// cond: i1 == i0+2 && j1 == j0+16 && j0 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)
 27985  	// result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j0] (MOVLloadidx1 [i0] {s} p idx mem)) y)
 27986  	for {
 27987  		_ = v.Args[1]
 27988  		s1 := v.Args[0]
 27989  		if s1.Op != OpAMD64SHLQconst {
 27990  			break
 27991  		}
 27992  		j1 := s1.AuxInt
 27993  		x1 := s1.Args[0]
 27994  		if x1.Op != OpAMD64MOVWloadidx1 {
 27995  			break
 27996  		}
 27997  		i1 := x1.AuxInt
 27998  		s := x1.Aux
 27999  		_ = x1.Args[2]
 28000  		idx := x1.Args[0]
 28001  		p := x1.Args[1]
 28002  		mem := x1.Args[2]
 28003  		or := v.Args[1]
 28004  		if or.Op != OpAMD64ORQ {
 28005  			break
 28006  		}
 28007  		_ = or.Args[1]
 28008  		s0 := or.Args[0]
 28009  		if s0.Op != OpAMD64SHLQconst {
 28010  			break
 28011  		}
 28012  		j0 := s0.AuxInt
 28013  		x0 := s0.Args[0]
 28014  		if x0.Op != OpAMD64MOVWloadidx1 {
 28015  			break
 28016  		}
 28017  		i0 := x0.AuxInt
 28018  		if x0.Aux != s {
 28019  			break
 28020  		}
 28021  		_ = x0.Args[2]
 28022  		if p != x0.Args[0] {
 28023  			break
 28024  		}
 28025  		if idx != x0.Args[1] {
 28026  			break
 28027  		}
 28028  		if mem != x0.Args[2] {
 28029  			break
 28030  		}
 28031  		y := or.Args[1]
 28032  		if !(i1 == i0+2 && j1 == j0+16 && j0%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) {
 28033  			break
 28034  		}
 28035  		b = mergePoint(b, x0, x1)
 28036  		v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type)
 28037  		v.reset(OpCopy)
 28038  		v.AddArg(v0)
 28039  		v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type)
 28040  		v1.AuxInt = j0
 28041  		v2 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32)
 28042  		v2.AuxInt = i0
 28043  		v2.Aux = s
 28044  		v2.AddArg(p)
 28045  		v2.AddArg(idx)
 28046  		v2.AddArg(mem)
 28047  		v1.AddArg(v2)
 28048  		v0.AddArg(v1)
 28049  		v0.AddArg(y)
 28050  		return true
 28051  	}
 28052  	// match: (ORQ s1:(SHLQconst [j1] x1:(MOVWloadidx1 [i1] {s} p idx mem)) or:(ORQ s0:(SHLQconst [j0] x0:(MOVWloadidx1 [i0] {s} idx p mem)) y))
 28053  	// cond: i1 == i0+2 && j1 == j0+16 && j0 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)
 28054  	// result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j0] (MOVLloadidx1 [i0] {s} p idx mem)) y)
 28055  	for {
 28056  		_ = v.Args[1]
 28057  		s1 := v.Args[0]
 28058  		if s1.Op != OpAMD64SHLQconst {
 28059  			break
 28060  		}
 28061  		j1 := s1.AuxInt
 28062  		x1 := s1.Args[0]
 28063  		if x1.Op != OpAMD64MOVWloadidx1 {
 28064  			break
 28065  		}
 28066  		i1 := x1.AuxInt
 28067  		s := x1.Aux
 28068  		_ = x1.Args[2]
 28069  		p := x1.Args[0]
 28070  		idx := x1.Args[1]
 28071  		mem := x1.Args[2]
 28072  		or := v.Args[1]
 28073  		if or.Op != OpAMD64ORQ {
 28074  			break
 28075  		}
 28076  		_ = or.Args[1]
 28077  		s0 := or.Args[0]
 28078  		if s0.Op != OpAMD64SHLQconst {
 28079  			break
 28080  		}
 28081  		j0 := s0.AuxInt
 28082  		x0 := s0.Args[0]
 28083  		if x0.Op != OpAMD64MOVWloadidx1 {
 28084  			break
 28085  		}
 28086  		i0 := x0.AuxInt
 28087  		if x0.Aux != s {
 28088  			break
 28089  		}
 28090  		_ = x0.Args[2]
 28091  		if idx != x0.Args[0] {
 28092  			break
 28093  		}
 28094  		if p != x0.Args[1] {
 28095  			break
 28096  		}
 28097  		if mem != x0.Args[2] {
 28098  			break
 28099  		}
 28100  		y := or.Args[1]
 28101  		if !(i1 == i0+2 && j1 == j0+16 && j0%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) {
 28102  			break
 28103  		}
 28104  		b = mergePoint(b, x0, x1)
 28105  		v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type)
 28106  		v.reset(OpCopy)
 28107  		v.AddArg(v0)
 28108  		v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type)
 28109  		v1.AuxInt = j0
 28110  		v2 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32)
 28111  		v2.AuxInt = i0
 28112  		v2.Aux = s
 28113  		v2.AddArg(p)
 28114  		v2.AddArg(idx)
 28115  		v2.AddArg(mem)
 28116  		v1.AddArg(v2)
 28117  		v0.AddArg(v1)
 28118  		v0.AddArg(y)
 28119  		return true
 28120  	}
 28121  	// match: (ORQ s1:(SHLQconst [j1] x1:(MOVWloadidx1 [i1] {s} idx p mem)) or:(ORQ s0:(SHLQconst [j0] x0:(MOVWloadidx1 [i0] {s} idx p mem)) y))
 28122  	// cond: i1 == i0+2 && j1 == j0+16 && j0 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)
 28123  	// result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j0] (MOVLloadidx1 [i0] {s} p idx mem)) y)
 28124  	for {
 28125  		_ = v.Args[1]
 28126  		s1 := v.Args[0]
 28127  		if s1.Op != OpAMD64SHLQconst {
 28128  			break
 28129  		}
 28130  		j1 := s1.AuxInt
 28131  		x1 := s1.Args[0]
 28132  		if x1.Op != OpAMD64MOVWloadidx1 {
 28133  			break
 28134  		}
 28135  		i1 := x1.AuxInt
 28136  		s := x1.Aux
 28137  		_ = x1.Args[2]
 28138  		idx := x1.Args[0]
 28139  		p := x1.Args[1]
 28140  		mem := x1.Args[2]
 28141  		or := v.Args[1]
 28142  		if or.Op != OpAMD64ORQ {
 28143  			break
 28144  		}
 28145  		_ = or.Args[1]
 28146  		s0 := or.Args[0]
 28147  		if s0.Op != OpAMD64SHLQconst {
 28148  			break
 28149  		}
 28150  		j0 := s0.AuxInt
 28151  		x0 := s0.Args[0]
 28152  		if x0.Op != OpAMD64MOVWloadidx1 {
 28153  			break
 28154  		}
 28155  		i0 := x0.AuxInt
 28156  		if x0.Aux != s {
 28157  			break
 28158  		}
 28159  		_ = x0.Args[2]
 28160  		if idx != x0.Args[0] {
 28161  			break
 28162  		}
 28163  		if p != x0.Args[1] {
 28164  			break
 28165  		}
 28166  		if mem != x0.Args[2] {
 28167  			break
 28168  		}
 28169  		y := or.Args[1]
 28170  		if !(i1 == i0+2 && j1 == j0+16 && j0%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) {
 28171  			break
 28172  		}
 28173  		b = mergePoint(b, x0, x1)
 28174  		v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type)
 28175  		v.reset(OpCopy)
 28176  		v.AddArg(v0)
 28177  		v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type)
 28178  		v1.AuxInt = j0
 28179  		v2 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32)
 28180  		v2.AuxInt = i0
 28181  		v2.Aux = s
 28182  		v2.AddArg(p)
 28183  		v2.AddArg(idx)
 28184  		v2.AddArg(mem)
 28185  		v1.AddArg(v2)
 28186  		v0.AddArg(v1)
 28187  		v0.AddArg(y)
 28188  		return true
 28189  	}
 28190  	// match: (ORQ s1:(SHLQconst [j1] x1:(MOVWloadidx1 [i1] {s} p idx mem)) or:(ORQ y s0:(SHLQconst [j0] x0:(MOVWloadidx1 [i0] {s} p idx mem))))
 28191  	// cond: i1 == i0+2 && j1 == j0+16 && j0 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)
 28192  	// result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j0] (MOVLloadidx1 [i0] {s} p idx mem)) y)
 28193  	for {
 28194  		_ = v.Args[1]
 28195  		s1 := v.Args[0]
 28196  		if s1.Op != OpAMD64SHLQconst {
 28197  			break
 28198  		}
 28199  		j1 := s1.AuxInt
 28200  		x1 := s1.Args[0]
 28201  		if x1.Op != OpAMD64MOVWloadidx1 {
 28202  			break
 28203  		}
 28204  		i1 := x1.AuxInt
 28205  		s := x1.Aux
 28206  		_ = x1.Args[2]
 28207  		p := x1.Args[0]
 28208  		idx := x1.Args[1]
 28209  		mem := x1.Args[2]
 28210  		or := v.Args[1]
 28211  		if or.Op != OpAMD64ORQ {
 28212  			break
 28213  		}
 28214  		_ = or.Args[1]
 28215  		y := or.Args[0]
 28216  		s0 := or.Args[1]
 28217  		if s0.Op != OpAMD64SHLQconst {
 28218  			break
 28219  		}
 28220  		j0 := s0.AuxInt
 28221  		x0 := s0.Args[0]
 28222  		if x0.Op != OpAMD64MOVWloadidx1 {
 28223  			break
 28224  		}
 28225  		i0 := x0.AuxInt
 28226  		if x0.Aux != s {
 28227  			break
 28228  		}
 28229  		_ = x0.Args[2]
 28230  		if p != x0.Args[0] {
 28231  			break
 28232  		}
 28233  		if idx != x0.Args[1] {
 28234  			break
 28235  		}
 28236  		if mem != x0.Args[2] {
 28237  			break
 28238  		}
 28239  		if !(i1 == i0+2 && j1 == j0+16 && j0%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) {
 28240  			break
 28241  		}
 28242  		b = mergePoint(b, x0, x1)
 28243  		v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type)
 28244  		v.reset(OpCopy)
 28245  		v.AddArg(v0)
 28246  		v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type)
 28247  		v1.AuxInt = j0
 28248  		v2 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32)
 28249  		v2.AuxInt = i0
 28250  		v2.Aux = s
 28251  		v2.AddArg(p)
 28252  		v2.AddArg(idx)
 28253  		v2.AddArg(mem)
 28254  		v1.AddArg(v2)
 28255  		v0.AddArg(v1)
 28256  		v0.AddArg(y)
 28257  		return true
 28258  	}
 28259  	return false
 28260  }
 28261  func rewriteValueAMD64_OpAMD64ORQ_80(v *Value) bool {
 28262  	b := v.Block
 28263  	_ = b
 28264  	typ := &b.Func.Config.Types
 28265  	_ = typ
 28266  	// match: (ORQ s1:(SHLQconst [j1] x1:(MOVWloadidx1 [i1] {s} idx p mem)) or:(ORQ y s0:(SHLQconst [j0] x0:(MOVWloadidx1 [i0] {s} p idx mem))))
 28267  	// cond: i1 == i0+2 && j1 == j0+16 && j0 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)
 28268  	// result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j0] (MOVLloadidx1 [i0] {s} p idx mem)) y)
 28269  	for {
 28270  		_ = v.Args[1]
 28271  		s1 := v.Args[0]
 28272  		if s1.Op != OpAMD64SHLQconst {
 28273  			break
 28274  		}
 28275  		j1 := s1.AuxInt
 28276  		x1 := s1.Args[0]
 28277  		if x1.Op != OpAMD64MOVWloadidx1 {
 28278  			break
 28279  		}
 28280  		i1 := x1.AuxInt
 28281  		s := x1.Aux
 28282  		_ = x1.Args[2]
 28283  		idx := x1.Args[0]
 28284  		p := x1.Args[1]
 28285  		mem := x1.Args[2]
 28286  		or := v.Args[1]
 28287  		if or.Op != OpAMD64ORQ {
 28288  			break
 28289  		}
 28290  		_ = or.Args[1]
 28291  		y := or.Args[0]
 28292  		s0 := or.Args[1]
 28293  		if s0.Op != OpAMD64SHLQconst {
 28294  			break
 28295  		}
 28296  		j0 := s0.AuxInt
 28297  		x0 := s0.Args[0]
 28298  		if x0.Op != OpAMD64MOVWloadidx1 {
 28299  			break
 28300  		}
 28301  		i0 := x0.AuxInt
 28302  		if x0.Aux != s {
 28303  			break
 28304  		}
 28305  		_ = x0.Args[2]
 28306  		if p != x0.Args[0] {
 28307  			break
 28308  		}
 28309  		if idx != x0.Args[1] {
 28310  			break
 28311  		}
 28312  		if mem != x0.Args[2] {
 28313  			break
 28314  		}
 28315  		if !(i1 == i0+2 && j1 == j0+16 && j0%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) {
 28316  			break
 28317  		}
 28318  		b = mergePoint(b, x0, x1)
 28319  		v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type)
 28320  		v.reset(OpCopy)
 28321  		v.AddArg(v0)
 28322  		v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type)
 28323  		v1.AuxInt = j0
 28324  		v2 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32)
 28325  		v2.AuxInt = i0
 28326  		v2.Aux = s
 28327  		v2.AddArg(p)
 28328  		v2.AddArg(idx)
 28329  		v2.AddArg(mem)
 28330  		v1.AddArg(v2)
 28331  		v0.AddArg(v1)
 28332  		v0.AddArg(y)
 28333  		return true
 28334  	}
 28335  	// match: (ORQ s1:(SHLQconst [j1] x1:(MOVWloadidx1 [i1] {s} p idx mem)) or:(ORQ y s0:(SHLQconst [j0] x0:(MOVWloadidx1 [i0] {s} idx p mem))))
 28336  	// cond: i1 == i0+2 && j1 == j0+16 && j0 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)
 28337  	// result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j0] (MOVLloadidx1 [i0] {s} p idx mem)) y)
 28338  	for {
 28339  		_ = v.Args[1]
 28340  		s1 := v.Args[0]
 28341  		if s1.Op != OpAMD64SHLQconst {
 28342  			break
 28343  		}
 28344  		j1 := s1.AuxInt
 28345  		x1 := s1.Args[0]
 28346  		if x1.Op != OpAMD64MOVWloadidx1 {
 28347  			break
 28348  		}
 28349  		i1 := x1.AuxInt
 28350  		s := x1.Aux
 28351  		_ = x1.Args[2]
 28352  		p := x1.Args[0]
 28353  		idx := x1.Args[1]
 28354  		mem := x1.Args[2]
 28355  		or := v.Args[1]
 28356  		if or.Op != OpAMD64ORQ {
 28357  			break
 28358  		}
 28359  		_ = or.Args[1]
 28360  		y := or.Args[0]
 28361  		s0 := or.Args[1]
 28362  		if s0.Op != OpAMD64SHLQconst {
 28363  			break
 28364  		}
 28365  		j0 := s0.AuxInt
 28366  		x0 := s0.Args[0]
 28367  		if x0.Op != OpAMD64MOVWloadidx1 {
 28368  			break
 28369  		}
 28370  		i0 := x0.AuxInt
 28371  		if x0.Aux != s {
 28372  			break
 28373  		}
 28374  		_ = x0.Args[2]
 28375  		if idx != x0.Args[0] {
 28376  			break
 28377  		}
 28378  		if p != x0.Args[1] {
 28379  			break
 28380  		}
 28381  		if mem != x0.Args[2] {
 28382  			break
 28383  		}
 28384  		if !(i1 == i0+2 && j1 == j0+16 && j0%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) {
 28385  			break
 28386  		}
 28387  		b = mergePoint(b, x0, x1)
 28388  		v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type)
 28389  		v.reset(OpCopy)
 28390  		v.AddArg(v0)
 28391  		v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type)
 28392  		v1.AuxInt = j0
 28393  		v2 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32)
 28394  		v2.AuxInt = i0
 28395  		v2.Aux = s
 28396  		v2.AddArg(p)
 28397  		v2.AddArg(idx)
 28398  		v2.AddArg(mem)
 28399  		v1.AddArg(v2)
 28400  		v0.AddArg(v1)
 28401  		v0.AddArg(y)
 28402  		return true
 28403  	}
 28404  	// match: (ORQ s1:(SHLQconst [j1] x1:(MOVWloadidx1 [i1] {s} idx p mem)) or:(ORQ y s0:(SHLQconst [j0] x0:(MOVWloadidx1 [i0] {s} idx p mem))))
 28405  	// cond: i1 == i0+2 && j1 == j0+16 && j0 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)
 28406  	// result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j0] (MOVLloadidx1 [i0] {s} p idx mem)) y)
 28407  	for {
 28408  		_ = v.Args[1]
 28409  		s1 := v.Args[0]
 28410  		if s1.Op != OpAMD64SHLQconst {
 28411  			break
 28412  		}
 28413  		j1 := s1.AuxInt
 28414  		x1 := s1.Args[0]
 28415  		if x1.Op != OpAMD64MOVWloadidx1 {
 28416  			break
 28417  		}
 28418  		i1 := x1.AuxInt
 28419  		s := x1.Aux
 28420  		_ = x1.Args[2]
 28421  		idx := x1.Args[0]
 28422  		p := x1.Args[1]
 28423  		mem := x1.Args[2]
 28424  		or := v.Args[1]
 28425  		if or.Op != OpAMD64ORQ {
 28426  			break
 28427  		}
 28428  		_ = or.Args[1]
 28429  		y := or.Args[0]
 28430  		s0 := or.Args[1]
 28431  		if s0.Op != OpAMD64SHLQconst {
 28432  			break
 28433  		}
 28434  		j0 := s0.AuxInt
 28435  		x0 := s0.Args[0]
 28436  		if x0.Op != OpAMD64MOVWloadidx1 {
 28437  			break
 28438  		}
 28439  		i0 := x0.AuxInt
 28440  		if x0.Aux != s {
 28441  			break
 28442  		}
 28443  		_ = x0.Args[2]
 28444  		if idx != x0.Args[0] {
 28445  			break
 28446  		}
 28447  		if p != x0.Args[1] {
 28448  			break
 28449  		}
 28450  		if mem != x0.Args[2] {
 28451  			break
 28452  		}
 28453  		if !(i1 == i0+2 && j1 == j0+16 && j0%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) {
 28454  			break
 28455  		}
 28456  		b = mergePoint(b, x0, x1)
 28457  		v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type)
 28458  		v.reset(OpCopy)
 28459  		v.AddArg(v0)
 28460  		v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type)
 28461  		v1.AuxInt = j0
 28462  		v2 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32)
 28463  		v2.AuxInt = i0
 28464  		v2.Aux = s
 28465  		v2.AddArg(p)
 28466  		v2.AddArg(idx)
 28467  		v2.AddArg(mem)
 28468  		v1.AddArg(v2)
 28469  		v0.AddArg(v1)
 28470  		v0.AddArg(y)
 28471  		return true
 28472  	}
 28473  	// match: (ORQ or:(ORQ s0:(SHLQconst [j0] x0:(MOVWloadidx1 [i0] {s} p idx mem)) y) s1:(SHLQconst [j1] x1:(MOVWloadidx1 [i1] {s} p idx mem)))
 28474  	// cond: i1 == i0+2 && j1 == j0+16 && j0 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)
 28475  	// result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j0] (MOVLloadidx1 [i0] {s} p idx mem)) y)
 28476  	for {
 28477  		_ = v.Args[1]
 28478  		or := v.Args[0]
 28479  		if or.Op != OpAMD64ORQ {
 28480  			break
 28481  		}
 28482  		_ = or.Args[1]
 28483  		s0 := or.Args[0]
 28484  		if s0.Op != OpAMD64SHLQconst {
 28485  			break
 28486  		}
 28487  		j0 := s0.AuxInt
 28488  		x0 := s0.Args[0]
 28489  		if x0.Op != OpAMD64MOVWloadidx1 {
 28490  			break
 28491  		}
 28492  		i0 := x0.AuxInt
 28493  		s := x0.Aux
 28494  		_ = x0.Args[2]
 28495  		p := x0.Args[0]
 28496  		idx := x0.Args[1]
 28497  		mem := x0.Args[2]
 28498  		y := or.Args[1]
 28499  		s1 := v.Args[1]
 28500  		if s1.Op != OpAMD64SHLQconst {
 28501  			break
 28502  		}
 28503  		j1 := s1.AuxInt
 28504  		x1 := s1.Args[0]
 28505  		if x1.Op != OpAMD64MOVWloadidx1 {
 28506  			break
 28507  		}
 28508  		i1 := x1.AuxInt
 28509  		if x1.Aux != s {
 28510  			break
 28511  		}
 28512  		_ = x1.Args[2]
 28513  		if p != x1.Args[0] {
 28514  			break
 28515  		}
 28516  		if idx != x1.Args[1] {
 28517  			break
 28518  		}
 28519  		if mem != x1.Args[2] {
 28520  			break
 28521  		}
 28522  		if !(i1 == i0+2 && j1 == j0+16 && j0%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) {
 28523  			break
 28524  		}
 28525  		b = mergePoint(b, x0, x1)
 28526  		v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type)
 28527  		v.reset(OpCopy)
 28528  		v.AddArg(v0)
 28529  		v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type)
 28530  		v1.AuxInt = j0
 28531  		v2 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32)
 28532  		v2.AuxInt = i0
 28533  		v2.Aux = s
 28534  		v2.AddArg(p)
 28535  		v2.AddArg(idx)
 28536  		v2.AddArg(mem)
 28537  		v1.AddArg(v2)
 28538  		v0.AddArg(v1)
 28539  		v0.AddArg(y)
 28540  		return true
 28541  	}
 28542  	// match: (ORQ or:(ORQ s0:(SHLQconst [j0] x0:(MOVWloadidx1 [i0] {s} idx p mem)) y) s1:(SHLQconst [j1] x1:(MOVWloadidx1 [i1] {s} p idx mem)))
 28543  	// cond: i1 == i0+2 && j1 == j0+16 && j0 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)
 28544  	// result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j0] (MOVLloadidx1 [i0] {s} p idx mem)) y)
 28545  	for {
 28546  		_ = v.Args[1]
 28547  		or := v.Args[0]
 28548  		if or.Op != OpAMD64ORQ {
 28549  			break
 28550  		}
 28551  		_ = or.Args[1]
 28552  		s0 := or.Args[0]
 28553  		if s0.Op != OpAMD64SHLQconst {
 28554  			break
 28555  		}
 28556  		j0 := s0.AuxInt
 28557  		x0 := s0.Args[0]
 28558  		if x0.Op != OpAMD64MOVWloadidx1 {
 28559  			break
 28560  		}
 28561  		i0 := x0.AuxInt
 28562  		s := x0.Aux
 28563  		_ = x0.Args[2]
 28564  		idx := x0.Args[0]
 28565  		p := x0.Args[1]
 28566  		mem := x0.Args[2]
 28567  		y := or.Args[1]
 28568  		s1 := v.Args[1]
 28569  		if s1.Op != OpAMD64SHLQconst {
 28570  			break
 28571  		}
 28572  		j1 := s1.AuxInt
 28573  		x1 := s1.Args[0]
 28574  		if x1.Op != OpAMD64MOVWloadidx1 {
 28575  			break
 28576  		}
 28577  		i1 := x1.AuxInt
 28578  		if x1.Aux != s {
 28579  			break
 28580  		}
 28581  		_ = x1.Args[2]
 28582  		if p != x1.Args[0] {
 28583  			break
 28584  		}
 28585  		if idx != x1.Args[1] {
 28586  			break
 28587  		}
 28588  		if mem != x1.Args[2] {
 28589  			break
 28590  		}
 28591  		if !(i1 == i0+2 && j1 == j0+16 && j0%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) {
 28592  			break
 28593  		}
 28594  		b = mergePoint(b, x0, x1)
 28595  		v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type)
 28596  		v.reset(OpCopy)
 28597  		v.AddArg(v0)
 28598  		v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type)
 28599  		v1.AuxInt = j0
 28600  		v2 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32)
 28601  		v2.AuxInt = i0
 28602  		v2.Aux = s
 28603  		v2.AddArg(p)
 28604  		v2.AddArg(idx)
 28605  		v2.AddArg(mem)
 28606  		v1.AddArg(v2)
 28607  		v0.AddArg(v1)
 28608  		v0.AddArg(y)
 28609  		return true
 28610  	}
 28611  	// match: (ORQ or:(ORQ y s0:(SHLQconst [j0] x0:(MOVWloadidx1 [i0] {s} p idx mem))) s1:(SHLQconst [j1] x1:(MOVWloadidx1 [i1] {s} p idx mem)))
 28612  	// cond: i1 == i0+2 && j1 == j0+16 && j0 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)
 28613  	// result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j0] (MOVLloadidx1 [i0] {s} p idx mem)) y)
 28614  	for {
 28615  		_ = v.Args[1]
 28616  		or := v.Args[0]
 28617  		if or.Op != OpAMD64ORQ {
 28618  			break
 28619  		}
 28620  		_ = or.Args[1]
 28621  		y := or.Args[0]
 28622  		s0 := or.Args[1]
 28623  		if s0.Op != OpAMD64SHLQconst {
 28624  			break
 28625  		}
 28626  		j0 := s0.AuxInt
 28627  		x0 := s0.Args[0]
 28628  		if x0.Op != OpAMD64MOVWloadidx1 {
 28629  			break
 28630  		}
 28631  		i0 := x0.AuxInt
 28632  		s := x0.Aux
 28633  		_ = x0.Args[2]
 28634  		p := x0.Args[0]
 28635  		idx := x0.Args[1]
 28636  		mem := x0.Args[2]
 28637  		s1 := v.Args[1]
 28638  		if s1.Op != OpAMD64SHLQconst {
 28639  			break
 28640  		}
 28641  		j1 := s1.AuxInt
 28642  		x1 := s1.Args[0]
 28643  		if x1.Op != OpAMD64MOVWloadidx1 {
 28644  			break
 28645  		}
 28646  		i1 := x1.AuxInt
 28647  		if x1.Aux != s {
 28648  			break
 28649  		}
 28650  		_ = x1.Args[2]
 28651  		if p != x1.Args[0] {
 28652  			break
 28653  		}
 28654  		if idx != x1.Args[1] {
 28655  			break
 28656  		}
 28657  		if mem != x1.Args[2] {
 28658  			break
 28659  		}
 28660  		if !(i1 == i0+2 && j1 == j0+16 && j0%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) {
 28661  			break
 28662  		}
 28663  		b = mergePoint(b, x0, x1)
 28664  		v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type)
 28665  		v.reset(OpCopy)
 28666  		v.AddArg(v0)
 28667  		v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type)
 28668  		v1.AuxInt = j0
 28669  		v2 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32)
 28670  		v2.AuxInt = i0
 28671  		v2.Aux = s
 28672  		v2.AddArg(p)
 28673  		v2.AddArg(idx)
 28674  		v2.AddArg(mem)
 28675  		v1.AddArg(v2)
 28676  		v0.AddArg(v1)
 28677  		v0.AddArg(y)
 28678  		return true
 28679  	}
 28680  	// match: (ORQ or:(ORQ y s0:(SHLQconst [j0] x0:(MOVWloadidx1 [i0] {s} idx p mem))) s1:(SHLQconst [j1] x1:(MOVWloadidx1 [i1] {s} p idx mem)))
 28681  	// cond: i1 == i0+2 && j1 == j0+16 && j0 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)
 28682  	// result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j0] (MOVLloadidx1 [i0] {s} p idx mem)) y)
 28683  	for {
 28684  		_ = v.Args[1]
 28685  		or := v.Args[0]
 28686  		if or.Op != OpAMD64ORQ {
 28687  			break
 28688  		}
 28689  		_ = or.Args[1]
 28690  		y := or.Args[0]
 28691  		s0 := or.Args[1]
 28692  		if s0.Op != OpAMD64SHLQconst {
 28693  			break
 28694  		}
 28695  		j0 := s0.AuxInt
 28696  		x0 := s0.Args[0]
 28697  		if x0.Op != OpAMD64MOVWloadidx1 {
 28698  			break
 28699  		}
 28700  		i0 := x0.AuxInt
 28701  		s := x0.Aux
 28702  		_ = x0.Args[2]
 28703  		idx := x0.Args[0]
 28704  		p := x0.Args[1]
 28705  		mem := x0.Args[2]
 28706  		s1 := v.Args[1]
 28707  		if s1.Op != OpAMD64SHLQconst {
 28708  			break
 28709  		}
 28710  		j1 := s1.AuxInt
 28711  		x1 := s1.Args[0]
 28712  		if x1.Op != OpAMD64MOVWloadidx1 {
 28713  			break
 28714  		}
 28715  		i1 := x1.AuxInt
 28716  		if x1.Aux != s {
 28717  			break
 28718  		}
 28719  		_ = x1.Args[2]
 28720  		if p != x1.Args[0] {
 28721  			break
 28722  		}
 28723  		if idx != x1.Args[1] {
 28724  			break
 28725  		}
 28726  		if mem != x1.Args[2] {
 28727  			break
 28728  		}
 28729  		if !(i1 == i0+2 && j1 == j0+16 && j0%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) {
 28730  			break
 28731  		}
 28732  		b = mergePoint(b, x0, x1)
 28733  		v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type)
 28734  		v.reset(OpCopy)
 28735  		v.AddArg(v0)
 28736  		v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type)
 28737  		v1.AuxInt = j0
 28738  		v2 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32)
 28739  		v2.AuxInt = i0
 28740  		v2.Aux = s
 28741  		v2.AddArg(p)
 28742  		v2.AddArg(idx)
 28743  		v2.AddArg(mem)
 28744  		v1.AddArg(v2)
 28745  		v0.AddArg(v1)
 28746  		v0.AddArg(y)
 28747  		return true
 28748  	}
 28749  	// match: (ORQ or:(ORQ s0:(SHLQconst [j0] x0:(MOVWloadidx1 [i0] {s} p idx mem)) y) s1:(SHLQconst [j1] x1:(MOVWloadidx1 [i1] {s} idx p mem)))
 28750  	// cond: i1 == i0+2 && j1 == j0+16 && j0 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)
 28751  	// result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j0] (MOVLloadidx1 [i0] {s} p idx mem)) y)
 28752  	for {
 28753  		_ = v.Args[1]
 28754  		or := v.Args[0]
 28755  		if or.Op != OpAMD64ORQ {
 28756  			break
 28757  		}
 28758  		_ = or.Args[1]
 28759  		s0 := or.Args[0]
 28760  		if s0.Op != OpAMD64SHLQconst {
 28761  			break
 28762  		}
 28763  		j0 := s0.AuxInt
 28764  		x0 := s0.Args[0]
 28765  		if x0.Op != OpAMD64MOVWloadidx1 {
 28766  			break
 28767  		}
 28768  		i0 := x0.AuxInt
 28769  		s := x0.Aux
 28770  		_ = x0.Args[2]
 28771  		p := x0.Args[0]
 28772  		idx := x0.Args[1]
 28773  		mem := x0.Args[2]
 28774  		y := or.Args[1]
 28775  		s1 := v.Args[1]
 28776  		if s1.Op != OpAMD64SHLQconst {
 28777  			break
 28778  		}
 28779  		j1 := s1.AuxInt
 28780  		x1 := s1.Args[0]
 28781  		if x1.Op != OpAMD64MOVWloadidx1 {
 28782  			break
 28783  		}
 28784  		i1 := x1.AuxInt
 28785  		if x1.Aux != s {
 28786  			break
 28787  		}
 28788  		_ = x1.Args[2]
 28789  		if idx != x1.Args[0] {
 28790  			break
 28791  		}
 28792  		if p != x1.Args[1] {
 28793  			break
 28794  		}
 28795  		if mem != x1.Args[2] {
 28796  			break
 28797  		}
 28798  		if !(i1 == i0+2 && j1 == j0+16 && j0%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) {
 28799  			break
 28800  		}
 28801  		b = mergePoint(b, x0, x1)
 28802  		v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type)
 28803  		v.reset(OpCopy)
 28804  		v.AddArg(v0)
 28805  		v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type)
 28806  		v1.AuxInt = j0
 28807  		v2 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32)
 28808  		v2.AuxInt = i0
 28809  		v2.Aux = s
 28810  		v2.AddArg(p)
 28811  		v2.AddArg(idx)
 28812  		v2.AddArg(mem)
 28813  		v1.AddArg(v2)
 28814  		v0.AddArg(v1)
 28815  		v0.AddArg(y)
 28816  		return true
 28817  	}
 28818  	// match: (ORQ or:(ORQ s0:(SHLQconst [j0] x0:(MOVWloadidx1 [i0] {s} idx p mem)) y) s1:(SHLQconst [j1] x1:(MOVWloadidx1 [i1] {s} idx p mem)))
 28819  	// cond: i1 == i0+2 && j1 == j0+16 && j0 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)
 28820  	// result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j0] (MOVLloadidx1 [i0] {s} p idx mem)) y)
 28821  	for {
 28822  		_ = v.Args[1]
 28823  		or := v.Args[0]
 28824  		if or.Op != OpAMD64ORQ {
 28825  			break
 28826  		}
 28827  		_ = or.Args[1]
 28828  		s0 := or.Args[0]
 28829  		if s0.Op != OpAMD64SHLQconst {
 28830  			break
 28831  		}
 28832  		j0 := s0.AuxInt
 28833  		x0 := s0.Args[0]
 28834  		if x0.Op != OpAMD64MOVWloadidx1 {
 28835  			break
 28836  		}
 28837  		i0 := x0.AuxInt
 28838  		s := x0.Aux
 28839  		_ = x0.Args[2]
 28840  		idx := x0.Args[0]
 28841  		p := x0.Args[1]
 28842  		mem := x0.Args[2]
 28843  		y := or.Args[1]
 28844  		s1 := v.Args[1]
 28845  		if s1.Op != OpAMD64SHLQconst {
 28846  			break
 28847  		}
 28848  		j1 := s1.AuxInt
 28849  		x1 := s1.Args[0]
 28850  		if x1.Op != OpAMD64MOVWloadidx1 {
 28851  			break
 28852  		}
 28853  		i1 := x1.AuxInt
 28854  		if x1.Aux != s {
 28855  			break
 28856  		}
 28857  		_ = x1.Args[2]
 28858  		if idx != x1.Args[0] {
 28859  			break
 28860  		}
 28861  		if p != x1.Args[1] {
 28862  			break
 28863  		}
 28864  		if mem != x1.Args[2] {
 28865  			break
 28866  		}
 28867  		if !(i1 == i0+2 && j1 == j0+16 && j0%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) {
 28868  			break
 28869  		}
 28870  		b = mergePoint(b, x0, x1)
 28871  		v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type)
 28872  		v.reset(OpCopy)
 28873  		v.AddArg(v0)
 28874  		v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type)
 28875  		v1.AuxInt = j0
 28876  		v2 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32)
 28877  		v2.AuxInt = i0
 28878  		v2.Aux = s
 28879  		v2.AddArg(p)
 28880  		v2.AddArg(idx)
 28881  		v2.AddArg(mem)
 28882  		v1.AddArg(v2)
 28883  		v0.AddArg(v1)
 28884  		v0.AddArg(y)
 28885  		return true
 28886  	}
 28887  	// match: (ORQ or:(ORQ y s0:(SHLQconst [j0] x0:(MOVWloadidx1 [i0] {s} p idx mem))) s1:(SHLQconst [j1] x1:(MOVWloadidx1 [i1] {s} idx p mem)))
 28888  	// cond: i1 == i0+2 && j1 == j0+16 && j0 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)
 28889  	// result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j0] (MOVLloadidx1 [i0] {s} p idx mem)) y)
 28890  	for {
 28891  		_ = v.Args[1]
 28892  		or := v.Args[0]
 28893  		if or.Op != OpAMD64ORQ {
 28894  			break
 28895  		}
 28896  		_ = or.Args[1]
 28897  		y := or.Args[0]
 28898  		s0 := or.Args[1]
 28899  		if s0.Op != OpAMD64SHLQconst {
 28900  			break
 28901  		}
 28902  		j0 := s0.AuxInt
 28903  		x0 := s0.Args[0]
 28904  		if x0.Op != OpAMD64MOVWloadidx1 {
 28905  			break
 28906  		}
 28907  		i0 := x0.AuxInt
 28908  		s := x0.Aux
 28909  		_ = x0.Args[2]
 28910  		p := x0.Args[0]
 28911  		idx := x0.Args[1]
 28912  		mem := x0.Args[2]
 28913  		s1 := v.Args[1]
 28914  		if s1.Op != OpAMD64SHLQconst {
 28915  			break
 28916  		}
 28917  		j1 := s1.AuxInt
 28918  		x1 := s1.Args[0]
 28919  		if x1.Op != OpAMD64MOVWloadidx1 {
 28920  			break
 28921  		}
 28922  		i1 := x1.AuxInt
 28923  		if x1.Aux != s {
 28924  			break
 28925  		}
 28926  		_ = x1.Args[2]
 28927  		if idx != x1.Args[0] {
 28928  			break
 28929  		}
 28930  		if p != x1.Args[1] {
 28931  			break
 28932  		}
 28933  		if mem != x1.Args[2] {
 28934  			break
 28935  		}
 28936  		if !(i1 == i0+2 && j1 == j0+16 && j0%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) {
 28937  			break
 28938  		}
 28939  		b = mergePoint(b, x0, x1)
 28940  		v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type)
 28941  		v.reset(OpCopy)
 28942  		v.AddArg(v0)
 28943  		v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type)
 28944  		v1.AuxInt = j0
 28945  		v2 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32)
 28946  		v2.AuxInt = i0
 28947  		v2.Aux = s
 28948  		v2.AddArg(p)
 28949  		v2.AddArg(idx)
 28950  		v2.AddArg(mem)
 28951  		v1.AddArg(v2)
 28952  		v0.AddArg(v1)
 28953  		v0.AddArg(y)
 28954  		return true
 28955  	}
 28956  	return false
 28957  }
 28958  func rewriteValueAMD64_OpAMD64ORQ_90(v *Value) bool {
 28959  	b := v.Block
 28960  	_ = b
 28961  	typ := &b.Func.Config.Types
 28962  	_ = typ
 28963  	// match: (ORQ or:(ORQ y s0:(SHLQconst [j0] x0:(MOVWloadidx1 [i0] {s} idx p mem))) s1:(SHLQconst [j1] x1:(MOVWloadidx1 [i1] {s} idx p mem)))
 28964  	// cond: i1 == i0+2 && j1 == j0+16 && j0 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)
 28965  	// result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j0] (MOVLloadidx1 [i0] {s} p idx mem)) y)
 28966  	for {
 28967  		_ = v.Args[1]
 28968  		or := v.Args[0]
 28969  		if or.Op != OpAMD64ORQ {
 28970  			break
 28971  		}
 28972  		_ = or.Args[1]
 28973  		y := or.Args[0]
 28974  		s0 := or.Args[1]
 28975  		if s0.Op != OpAMD64SHLQconst {
 28976  			break
 28977  		}
 28978  		j0 := s0.AuxInt
 28979  		x0 := s0.Args[0]
 28980  		if x0.Op != OpAMD64MOVWloadidx1 {
 28981  			break
 28982  		}
 28983  		i0 := x0.AuxInt
 28984  		s := x0.Aux
 28985  		_ = x0.Args[2]
 28986  		idx := x0.Args[0]
 28987  		p := x0.Args[1]
 28988  		mem := x0.Args[2]
 28989  		s1 := v.Args[1]
 28990  		if s1.Op != OpAMD64SHLQconst {
 28991  			break
 28992  		}
 28993  		j1 := s1.AuxInt
 28994  		x1 := s1.Args[0]
 28995  		if x1.Op != OpAMD64MOVWloadidx1 {
 28996  			break
 28997  		}
 28998  		i1 := x1.AuxInt
 28999  		if x1.Aux != s {
 29000  			break
 29001  		}
 29002  		_ = x1.Args[2]
 29003  		if idx != x1.Args[0] {
 29004  			break
 29005  		}
 29006  		if p != x1.Args[1] {
 29007  			break
 29008  		}
 29009  		if mem != x1.Args[2] {
 29010  			break
 29011  		}
 29012  		if !(i1 == i0+2 && j1 == j0+16 && j0%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) {
 29013  			break
 29014  		}
 29015  		b = mergePoint(b, x0, x1)
 29016  		v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type)
 29017  		v.reset(OpCopy)
 29018  		v.AddArg(v0)
 29019  		v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type)
 29020  		v1.AuxInt = j0
 29021  		v2 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32)
 29022  		v2.AuxInt = i0
 29023  		v2.Aux = s
 29024  		v2.AddArg(p)
 29025  		v2.AddArg(idx)
 29026  		v2.AddArg(mem)
 29027  		v1.AddArg(v2)
 29028  		v0.AddArg(v1)
 29029  		v0.AddArg(y)
 29030  		return true
 29031  	}
 29032  	// match: (ORQ x1:(MOVBload [i1] {s} p mem) sh:(SHLQconst [8] x0:(MOVBload [i0] {s} p mem)))
 29033  	// cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)
 29034  	// result: @mergePoint(b,x0,x1) (ROLWconst <v.Type> [8] (MOVWload [i0] {s} p mem))
 29035  	for {
 29036  		_ = v.Args[1]
 29037  		x1 := v.Args[0]
 29038  		if x1.Op != OpAMD64MOVBload {
 29039  			break
 29040  		}
 29041  		i1 := x1.AuxInt
 29042  		s := x1.Aux
 29043  		_ = x1.Args[1]
 29044  		p := x1.Args[0]
 29045  		mem := x1.Args[1]
 29046  		sh := v.Args[1]
 29047  		if sh.Op != OpAMD64SHLQconst {
 29048  			break
 29049  		}
 29050  		if sh.AuxInt != 8 {
 29051  			break
 29052  		}
 29053  		x0 := sh.Args[0]
 29054  		if x0.Op != OpAMD64MOVBload {
 29055  			break
 29056  		}
 29057  		i0 := x0.AuxInt
 29058  		if x0.Aux != s {
 29059  			break
 29060  		}
 29061  		_ = x0.Args[1]
 29062  		if p != x0.Args[0] {
 29063  			break
 29064  		}
 29065  		if mem != x0.Args[1] {
 29066  			break
 29067  		}
 29068  		if !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) {
 29069  			break
 29070  		}
 29071  		b = mergePoint(b, x0, x1)
 29072  		v0 := b.NewValue0(v.Pos, OpAMD64ROLWconst, v.Type)
 29073  		v.reset(OpCopy)
 29074  		v.AddArg(v0)
 29075  		v0.AuxInt = 8
 29076  		v1 := b.NewValue0(v.Pos, OpAMD64MOVWload, typ.UInt16)
 29077  		v1.AuxInt = i0
 29078  		v1.Aux = s
 29079  		v1.AddArg(p)
 29080  		v1.AddArg(mem)
 29081  		v0.AddArg(v1)
 29082  		return true
 29083  	}
 29084  	// match: (ORQ sh:(SHLQconst [8] x0:(MOVBload [i0] {s} p mem)) x1:(MOVBload [i1] {s} p mem))
 29085  	// cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)
 29086  	// result: @mergePoint(b,x0,x1) (ROLWconst <v.Type> [8] (MOVWload [i0] {s} p mem))
 29087  	for {
 29088  		_ = v.Args[1]
 29089  		sh := v.Args[0]
 29090  		if sh.Op != OpAMD64SHLQconst {
 29091  			break
 29092  		}
 29093  		if sh.AuxInt != 8 {
 29094  			break
 29095  		}
 29096  		x0 := sh.Args[0]
 29097  		if x0.Op != OpAMD64MOVBload {
 29098  			break
 29099  		}
 29100  		i0 := x0.AuxInt
 29101  		s := x0.Aux
 29102  		_ = x0.Args[1]
 29103  		p := x0.Args[0]
 29104  		mem := x0.Args[1]
 29105  		x1 := v.Args[1]
 29106  		if x1.Op != OpAMD64MOVBload {
 29107  			break
 29108  		}
 29109  		i1 := x1.AuxInt
 29110  		if x1.Aux != s {
 29111  			break
 29112  		}
 29113  		_ = x1.Args[1]
 29114  		if p != x1.Args[0] {
 29115  			break
 29116  		}
 29117  		if mem != x1.Args[1] {
 29118  			break
 29119  		}
 29120  		if !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) {
 29121  			break
 29122  		}
 29123  		b = mergePoint(b, x0, x1)
 29124  		v0 := b.NewValue0(v.Pos, OpAMD64ROLWconst, v.Type)
 29125  		v.reset(OpCopy)
 29126  		v.AddArg(v0)
 29127  		v0.AuxInt = 8
 29128  		v1 := b.NewValue0(v.Pos, OpAMD64MOVWload, typ.UInt16)
 29129  		v1.AuxInt = i0
 29130  		v1.Aux = s
 29131  		v1.AddArg(p)
 29132  		v1.AddArg(mem)
 29133  		v0.AddArg(v1)
 29134  		return true
 29135  	}
 29136  	// match: (ORQ r1:(ROLWconst [8] x1:(MOVWload [i1] {s} p mem)) sh:(SHLQconst [16] r0:(ROLWconst [8] x0:(MOVWload [i0] {s} p mem))))
 29137  	// cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh)
 29138  	// result: @mergePoint(b,x0,x1) (BSWAPL <v.Type> (MOVLload [i0] {s} p mem))
 29139  	for {
 29140  		_ = v.Args[1]
 29141  		r1 := v.Args[0]
 29142  		if r1.Op != OpAMD64ROLWconst {
 29143  			break
 29144  		}
 29145  		if r1.AuxInt != 8 {
 29146  			break
 29147  		}
 29148  		x1 := r1.Args[0]
 29149  		if x1.Op != OpAMD64MOVWload {
 29150  			break
 29151  		}
 29152  		i1 := x1.AuxInt
 29153  		s := x1.Aux
 29154  		_ = x1.Args[1]
 29155  		p := x1.Args[0]
 29156  		mem := x1.Args[1]
 29157  		sh := v.Args[1]
 29158  		if sh.Op != OpAMD64SHLQconst {
 29159  			break
 29160  		}
 29161  		if sh.AuxInt != 16 {
 29162  			break
 29163  		}
 29164  		r0 := sh.Args[0]
 29165  		if r0.Op != OpAMD64ROLWconst {
 29166  			break
 29167  		}
 29168  		if r0.AuxInt != 8 {
 29169  			break
 29170  		}
 29171  		x0 := r0.Args[0]
 29172  		if x0.Op != OpAMD64MOVWload {
 29173  			break
 29174  		}
 29175  		i0 := x0.AuxInt
 29176  		if x0.Aux != s {
 29177  			break
 29178  		}
 29179  		_ = x0.Args[1]
 29180  		if p != x0.Args[0] {
 29181  			break
 29182  		}
 29183  		if mem != x0.Args[1] {
 29184  			break
 29185  		}
 29186  		if !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh)) {
 29187  			break
 29188  		}
 29189  		b = mergePoint(b, x0, x1)
 29190  		v0 := b.NewValue0(v.Pos, OpAMD64BSWAPL, v.Type)
 29191  		v.reset(OpCopy)
 29192  		v.AddArg(v0)
 29193  		v1 := b.NewValue0(v.Pos, OpAMD64MOVLload, typ.UInt32)
 29194  		v1.AuxInt = i0
 29195  		v1.Aux = s
 29196  		v1.AddArg(p)
 29197  		v1.AddArg(mem)
 29198  		v0.AddArg(v1)
 29199  		return true
 29200  	}
 29201  	// match: (ORQ sh:(SHLQconst [16] r0:(ROLWconst [8] x0:(MOVWload [i0] {s} p mem))) r1:(ROLWconst [8] x1:(MOVWload [i1] {s} p mem)))
 29202  	// cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh)
 29203  	// result: @mergePoint(b,x0,x1) (BSWAPL <v.Type> (MOVLload [i0] {s} p mem))
 29204  	for {
 29205  		_ = v.Args[1]
 29206  		sh := v.Args[0]
 29207  		if sh.Op != OpAMD64SHLQconst {
 29208  			break
 29209  		}
 29210  		if sh.AuxInt != 16 {
 29211  			break
 29212  		}
 29213  		r0 := sh.Args[0]
 29214  		if r0.Op != OpAMD64ROLWconst {
 29215  			break
 29216  		}
 29217  		if r0.AuxInt != 8 {
 29218  			break
 29219  		}
 29220  		x0 := r0.Args[0]
 29221  		if x0.Op != OpAMD64MOVWload {
 29222  			break
 29223  		}
 29224  		i0 := x0.AuxInt
 29225  		s := x0.Aux
 29226  		_ = x0.Args[1]
 29227  		p := x0.Args[0]
 29228  		mem := x0.Args[1]
 29229  		r1 := v.Args[1]
 29230  		if r1.Op != OpAMD64ROLWconst {
 29231  			break
 29232  		}
 29233  		if r1.AuxInt != 8 {
 29234  			break
 29235  		}
 29236  		x1 := r1.Args[0]
 29237  		if x1.Op != OpAMD64MOVWload {
 29238  			break
 29239  		}
 29240  		i1 := x1.AuxInt
 29241  		if x1.Aux != s {
 29242  			break
 29243  		}
 29244  		_ = x1.Args[1]
 29245  		if p != x1.Args[0] {
 29246  			break
 29247  		}
 29248  		if mem != x1.Args[1] {
 29249  			break
 29250  		}
 29251  		if !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh)) {
 29252  			break
 29253  		}
 29254  		b = mergePoint(b, x0, x1)
 29255  		v0 := b.NewValue0(v.Pos, OpAMD64BSWAPL, v.Type)
 29256  		v.reset(OpCopy)
 29257  		v.AddArg(v0)
 29258  		v1 := b.NewValue0(v.Pos, OpAMD64MOVLload, typ.UInt32)
 29259  		v1.AuxInt = i0
 29260  		v1.Aux = s
 29261  		v1.AddArg(p)
 29262  		v1.AddArg(mem)
 29263  		v0.AddArg(v1)
 29264  		return true
 29265  	}
 29266  	// match: (ORQ r1:(BSWAPL x1:(MOVLload [i1] {s} p mem)) sh:(SHLQconst [32] r0:(BSWAPL x0:(MOVLload [i0] {s} p mem))))
 29267  	// cond: i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh)
 29268  	// result: @mergePoint(b,x0,x1) (BSWAPQ <v.Type> (MOVQload [i0] {s} p mem))
 29269  	for {
 29270  		_ = v.Args[1]
 29271  		r1 := v.Args[0]
 29272  		if r1.Op != OpAMD64BSWAPL {
 29273  			break
 29274  		}
 29275  		x1 := r1.Args[0]
 29276  		if x1.Op != OpAMD64MOVLload {
 29277  			break
 29278  		}
 29279  		i1 := x1.AuxInt
 29280  		s := x1.Aux
 29281  		_ = x1.Args[1]
 29282  		p := x1.Args[0]
 29283  		mem := x1.Args[1]
 29284  		sh := v.Args[1]
 29285  		if sh.Op != OpAMD64SHLQconst {
 29286  			break
 29287  		}
 29288  		if sh.AuxInt != 32 {
 29289  			break
 29290  		}
 29291  		r0 := sh.Args[0]
 29292  		if r0.Op != OpAMD64BSWAPL {
 29293  			break
 29294  		}
 29295  		x0 := r0.Args[0]
 29296  		if x0.Op != OpAMD64MOVLload {
 29297  			break
 29298  		}
 29299  		i0 := x0.AuxInt
 29300  		if x0.Aux != s {
 29301  			break
 29302  		}
 29303  		_ = x0.Args[1]
 29304  		if p != x0.Args[0] {
 29305  			break
 29306  		}
 29307  		if mem != x0.Args[1] {
 29308  			break
 29309  		}
 29310  		if !(i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh)) {
 29311  			break
 29312  		}
 29313  		b = mergePoint(b, x0, x1)
 29314  		v0 := b.NewValue0(v.Pos, OpAMD64BSWAPQ, v.Type)
 29315  		v.reset(OpCopy)
 29316  		v.AddArg(v0)
 29317  		v1 := b.NewValue0(v.Pos, OpAMD64MOVQload, typ.UInt64)
 29318  		v1.AuxInt = i0
 29319  		v1.Aux = s
 29320  		v1.AddArg(p)
 29321  		v1.AddArg(mem)
 29322  		v0.AddArg(v1)
 29323  		return true
 29324  	}
 29325  	// match: (ORQ sh:(SHLQconst [32] r0:(BSWAPL x0:(MOVLload [i0] {s} p mem))) r1:(BSWAPL x1:(MOVLload [i1] {s} p mem)))
 29326  	// cond: i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh)
 29327  	// result: @mergePoint(b,x0,x1) (BSWAPQ <v.Type> (MOVQload [i0] {s} p mem))
 29328  	for {
 29329  		_ = v.Args[1]
 29330  		sh := v.Args[0]
 29331  		if sh.Op != OpAMD64SHLQconst {
 29332  			break
 29333  		}
 29334  		if sh.AuxInt != 32 {
 29335  			break
 29336  		}
 29337  		r0 := sh.Args[0]
 29338  		if r0.Op != OpAMD64BSWAPL {
 29339  			break
 29340  		}
 29341  		x0 := r0.Args[0]
 29342  		if x0.Op != OpAMD64MOVLload {
 29343  			break
 29344  		}
 29345  		i0 := x0.AuxInt
 29346  		s := x0.Aux
 29347  		_ = x0.Args[1]
 29348  		p := x0.Args[0]
 29349  		mem := x0.Args[1]
 29350  		r1 := v.Args[1]
 29351  		if r1.Op != OpAMD64BSWAPL {
 29352  			break
 29353  		}
 29354  		x1 := r1.Args[0]
 29355  		if x1.Op != OpAMD64MOVLload {
 29356  			break
 29357  		}
 29358  		i1 := x1.AuxInt
 29359  		if x1.Aux != s {
 29360  			break
 29361  		}
 29362  		_ = x1.Args[1]
 29363  		if p != x1.Args[0] {
 29364  			break
 29365  		}
 29366  		if mem != x1.Args[1] {
 29367  			break
 29368  		}
 29369  		if !(i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh)) {
 29370  			break
 29371  		}
 29372  		b = mergePoint(b, x0, x1)
 29373  		v0 := b.NewValue0(v.Pos, OpAMD64BSWAPQ, v.Type)
 29374  		v.reset(OpCopy)
 29375  		v.AddArg(v0)
 29376  		v1 := b.NewValue0(v.Pos, OpAMD64MOVQload, typ.UInt64)
 29377  		v1.AuxInt = i0
 29378  		v1.Aux = s
 29379  		v1.AddArg(p)
 29380  		v1.AddArg(mem)
 29381  		v0.AddArg(v1)
 29382  		return true
 29383  	}
 29384  	// match: (ORQ s0:(SHLQconst [j0] x0:(MOVBload [i0] {s} p mem)) or:(ORQ s1:(SHLQconst [j1] x1:(MOVBload [i1] {s} p mem)) y))
 29385  	// cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)
 29386  	// result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (ROLWconst <typ.UInt16> [8] (MOVWload [i0] {s} p mem))) y)
 29387  	for {
 29388  		_ = v.Args[1]
 29389  		s0 := v.Args[0]
 29390  		if s0.Op != OpAMD64SHLQconst {
 29391  			break
 29392  		}
 29393  		j0 := s0.AuxInt
 29394  		x0 := s0.Args[0]
 29395  		if x0.Op != OpAMD64MOVBload {
 29396  			break
 29397  		}
 29398  		i0 := x0.AuxInt
 29399  		s := x0.Aux
 29400  		_ = x0.Args[1]
 29401  		p := x0.Args[0]
 29402  		mem := x0.Args[1]
 29403  		or := v.Args[1]
 29404  		if or.Op != OpAMD64ORQ {
 29405  			break
 29406  		}
 29407  		_ = or.Args[1]
 29408  		s1 := or.Args[0]
 29409  		if s1.Op != OpAMD64SHLQconst {
 29410  			break
 29411  		}
 29412  		j1 := s1.AuxInt
 29413  		x1 := s1.Args[0]
 29414  		if x1.Op != OpAMD64MOVBload {
 29415  			break
 29416  		}
 29417  		i1 := x1.AuxInt
 29418  		if x1.Aux != s {
 29419  			break
 29420  		}
 29421  		_ = x1.Args[1]
 29422  		if p != x1.Args[0] {
 29423  			break
 29424  		}
 29425  		if mem != x1.Args[1] {
 29426  			break
 29427  		}
 29428  		y := or.Args[1]
 29429  		if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) {
 29430  			break
 29431  		}
 29432  		b = mergePoint(b, x0, x1)
 29433  		v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type)
 29434  		v.reset(OpCopy)
 29435  		v.AddArg(v0)
 29436  		v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type)
 29437  		v1.AuxInt = j1
 29438  		v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, typ.UInt16)
 29439  		v2.AuxInt = 8
 29440  		v3 := b.NewValue0(v.Pos, OpAMD64MOVWload, typ.UInt16)
 29441  		v3.AuxInt = i0
 29442  		v3.Aux = s
 29443  		v3.AddArg(p)
 29444  		v3.AddArg(mem)
 29445  		v2.AddArg(v3)
 29446  		v1.AddArg(v2)
 29447  		v0.AddArg(v1)
 29448  		v0.AddArg(y)
 29449  		return true
 29450  	}
 29451  	// match: (ORQ s0:(SHLQconst [j0] x0:(MOVBload [i0] {s} p mem)) or:(ORQ y s1:(SHLQconst [j1] x1:(MOVBload [i1] {s} p mem))))
 29452  	// cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)
 29453  	// result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (ROLWconst <typ.UInt16> [8] (MOVWload [i0] {s} p mem))) y)
 29454  	for {
 29455  		_ = v.Args[1]
 29456  		s0 := v.Args[0]
 29457  		if s0.Op != OpAMD64SHLQconst {
 29458  			break
 29459  		}
 29460  		j0 := s0.AuxInt
 29461  		x0 := s0.Args[0]
 29462  		if x0.Op != OpAMD64MOVBload {
 29463  			break
 29464  		}
 29465  		i0 := x0.AuxInt
 29466  		s := x0.Aux
 29467  		_ = x0.Args[1]
 29468  		p := x0.Args[0]
 29469  		mem := x0.Args[1]
 29470  		or := v.Args[1]
 29471  		if or.Op != OpAMD64ORQ {
 29472  			break
 29473  		}
 29474  		_ = or.Args[1]
 29475  		y := or.Args[0]
 29476  		s1 := or.Args[1]
 29477  		if s1.Op != OpAMD64SHLQconst {
 29478  			break
 29479  		}
 29480  		j1 := s1.AuxInt
 29481  		x1 := s1.Args[0]
 29482  		if x1.Op != OpAMD64MOVBload {
 29483  			break
 29484  		}
 29485  		i1 := x1.AuxInt
 29486  		if x1.Aux != s {
 29487  			break
 29488  		}
 29489  		_ = x1.Args[1]
 29490  		if p != x1.Args[0] {
 29491  			break
 29492  		}
 29493  		if mem != x1.Args[1] {
 29494  			break
 29495  		}
 29496  		if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) {
 29497  			break
 29498  		}
 29499  		b = mergePoint(b, x0, x1)
 29500  		v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type)
 29501  		v.reset(OpCopy)
 29502  		v.AddArg(v0)
 29503  		v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type)
 29504  		v1.AuxInt = j1
 29505  		v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, typ.UInt16)
 29506  		v2.AuxInt = 8
 29507  		v3 := b.NewValue0(v.Pos, OpAMD64MOVWload, typ.UInt16)
 29508  		v3.AuxInt = i0
 29509  		v3.Aux = s
 29510  		v3.AddArg(p)
 29511  		v3.AddArg(mem)
 29512  		v2.AddArg(v3)
 29513  		v1.AddArg(v2)
 29514  		v0.AddArg(v1)
 29515  		v0.AddArg(y)
 29516  		return true
 29517  	}
 29518  	// match: (ORQ or:(ORQ s1:(SHLQconst [j1] x1:(MOVBload [i1] {s} p mem)) y) s0:(SHLQconst [j0] x0:(MOVBload [i0] {s} p mem)))
 29519  	// cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)
 29520  	// result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (ROLWconst <typ.UInt16> [8] (MOVWload [i0] {s} p mem))) y)
 29521  	for {
 29522  		_ = v.Args[1]
 29523  		or := v.Args[0]
 29524  		if or.Op != OpAMD64ORQ {
 29525  			break
 29526  		}
 29527  		_ = or.Args[1]
 29528  		s1 := or.Args[0]
 29529  		if s1.Op != OpAMD64SHLQconst {
 29530  			break
 29531  		}
 29532  		j1 := s1.AuxInt
 29533  		x1 := s1.Args[0]
 29534  		if x1.Op != OpAMD64MOVBload {
 29535  			break
 29536  		}
 29537  		i1 := x1.AuxInt
 29538  		s := x1.Aux
 29539  		_ = x1.Args[1]
 29540  		p := x1.Args[0]
 29541  		mem := x1.Args[1]
 29542  		y := or.Args[1]
 29543  		s0 := v.Args[1]
 29544  		if s0.Op != OpAMD64SHLQconst {
 29545  			break
 29546  		}
 29547  		j0 := s0.AuxInt
 29548  		x0 := s0.Args[0]
 29549  		if x0.Op != OpAMD64MOVBload {
 29550  			break
 29551  		}
 29552  		i0 := x0.AuxInt
 29553  		if x0.Aux != s {
 29554  			break
 29555  		}
 29556  		_ = x0.Args[1]
 29557  		if p != x0.Args[0] {
 29558  			break
 29559  		}
 29560  		if mem != x0.Args[1] {
 29561  			break
 29562  		}
 29563  		if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) {
 29564  			break
 29565  		}
 29566  		b = mergePoint(b, x0, x1)
 29567  		v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type)
 29568  		v.reset(OpCopy)
 29569  		v.AddArg(v0)
 29570  		v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type)
 29571  		v1.AuxInt = j1
 29572  		v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, typ.UInt16)
 29573  		v2.AuxInt = 8
 29574  		v3 := b.NewValue0(v.Pos, OpAMD64MOVWload, typ.UInt16)
 29575  		v3.AuxInt = i0
 29576  		v3.Aux = s
 29577  		v3.AddArg(p)
 29578  		v3.AddArg(mem)
 29579  		v2.AddArg(v3)
 29580  		v1.AddArg(v2)
 29581  		v0.AddArg(v1)
 29582  		v0.AddArg(y)
 29583  		return true
 29584  	}
 29585  	return false
 29586  }
 29587  func rewriteValueAMD64_OpAMD64ORQ_100(v *Value) bool {
 29588  	b := v.Block
 29589  	_ = b
 29590  	typ := &b.Func.Config.Types
 29591  	_ = typ
 29592  	// match: (ORQ or:(ORQ y s1:(SHLQconst [j1] x1:(MOVBload [i1] {s} p mem))) s0:(SHLQconst [j0] x0:(MOVBload [i0] {s} p mem)))
 29593  	// cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)
 29594  	// result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (ROLWconst <typ.UInt16> [8] (MOVWload [i0] {s} p mem))) y)
 29595  	for {
 29596  		_ = v.Args[1]
 29597  		or := v.Args[0]
 29598  		if or.Op != OpAMD64ORQ {
 29599  			break
 29600  		}
 29601  		_ = or.Args[1]
 29602  		y := or.Args[0]
 29603  		s1 := or.Args[1]
 29604  		if s1.Op != OpAMD64SHLQconst {
 29605  			break
 29606  		}
 29607  		j1 := s1.AuxInt
 29608  		x1 := s1.Args[0]
 29609  		if x1.Op != OpAMD64MOVBload {
 29610  			break
 29611  		}
 29612  		i1 := x1.AuxInt
 29613  		s := x1.Aux
 29614  		_ = x1.Args[1]
 29615  		p := x1.Args[0]
 29616  		mem := x1.Args[1]
 29617  		s0 := v.Args[1]
 29618  		if s0.Op != OpAMD64SHLQconst {
 29619  			break
 29620  		}
 29621  		j0 := s0.AuxInt
 29622  		x0 := s0.Args[0]
 29623  		if x0.Op != OpAMD64MOVBload {
 29624  			break
 29625  		}
 29626  		i0 := x0.AuxInt
 29627  		if x0.Aux != s {
 29628  			break
 29629  		}
 29630  		_ = x0.Args[1]
 29631  		if p != x0.Args[0] {
 29632  			break
 29633  		}
 29634  		if mem != x0.Args[1] {
 29635  			break
 29636  		}
 29637  		if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) {
 29638  			break
 29639  		}
 29640  		b = mergePoint(b, x0, x1)
 29641  		v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type)
 29642  		v.reset(OpCopy)
 29643  		v.AddArg(v0)
 29644  		v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type)
 29645  		v1.AuxInt = j1
 29646  		v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, typ.UInt16)
 29647  		v2.AuxInt = 8
 29648  		v3 := b.NewValue0(v.Pos, OpAMD64MOVWload, typ.UInt16)
 29649  		v3.AuxInt = i0
 29650  		v3.Aux = s
 29651  		v3.AddArg(p)
 29652  		v3.AddArg(mem)
 29653  		v2.AddArg(v3)
 29654  		v1.AddArg(v2)
 29655  		v0.AddArg(v1)
 29656  		v0.AddArg(y)
 29657  		return true
 29658  	}
 29659  	// match: (ORQ s0:(SHLQconst [j0] r0:(ROLWconst [8] x0:(MOVWload [i0] {s} p mem))) or:(ORQ s1:(SHLQconst [j1] r1:(ROLWconst [8] x1:(MOVWload [i1] {s} p mem))) y))
 29660  	// cond: i1 == i0+2 && j1 == j0-16 && j1 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or)
 29661  	// result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (BSWAPL <typ.UInt32> (MOVLload [i0] {s} p mem))) y)
 29662  	for {
 29663  		_ = v.Args[1]
 29664  		s0 := v.Args[0]
 29665  		if s0.Op != OpAMD64SHLQconst {
 29666  			break
 29667  		}
 29668  		j0 := s0.AuxInt
 29669  		r0 := s0.Args[0]
 29670  		if r0.Op != OpAMD64ROLWconst {
 29671  			break
 29672  		}
 29673  		if r0.AuxInt != 8 {
 29674  			break
 29675  		}
 29676  		x0 := r0.Args[0]
 29677  		if x0.Op != OpAMD64MOVWload {
 29678  			break
 29679  		}
 29680  		i0 := x0.AuxInt
 29681  		s := x0.Aux
 29682  		_ = x0.Args[1]
 29683  		p := x0.Args[0]
 29684  		mem := x0.Args[1]
 29685  		or := v.Args[1]
 29686  		if or.Op != OpAMD64ORQ {
 29687  			break
 29688  		}
 29689  		_ = or.Args[1]
 29690  		s1 := or.Args[0]
 29691  		if s1.Op != OpAMD64SHLQconst {
 29692  			break
 29693  		}
 29694  		j1 := s1.AuxInt
 29695  		r1 := s1.Args[0]
 29696  		if r1.Op != OpAMD64ROLWconst {
 29697  			break
 29698  		}
 29699  		if r1.AuxInt != 8 {
 29700  			break
 29701  		}
 29702  		x1 := r1.Args[0]
 29703  		if x1.Op != OpAMD64MOVWload {
 29704  			break
 29705  		}
 29706  		i1 := x1.AuxInt
 29707  		if x1.Aux != s {
 29708  			break
 29709  		}
 29710  		_ = x1.Args[1]
 29711  		if p != x1.Args[0] {
 29712  			break
 29713  		}
 29714  		if mem != x1.Args[1] {
 29715  			break
 29716  		}
 29717  		y := or.Args[1]
 29718  		if !(i1 == i0+2 && j1 == j0-16 && j1%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or)) {
 29719  			break
 29720  		}
 29721  		b = mergePoint(b, x0, x1)
 29722  		v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type)
 29723  		v.reset(OpCopy)
 29724  		v.AddArg(v0)
 29725  		v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type)
 29726  		v1.AuxInt = j1
 29727  		v2 := b.NewValue0(v.Pos, OpAMD64BSWAPL, typ.UInt32)
 29728  		v3 := b.NewValue0(v.Pos, OpAMD64MOVLload, typ.UInt32)
 29729  		v3.AuxInt = i0
 29730  		v3.Aux = s
 29731  		v3.AddArg(p)
 29732  		v3.AddArg(mem)
 29733  		v2.AddArg(v3)
 29734  		v1.AddArg(v2)
 29735  		v0.AddArg(v1)
 29736  		v0.AddArg(y)
 29737  		return true
 29738  	}
 29739  	// match: (ORQ s0:(SHLQconst [j0] r0:(ROLWconst [8] x0:(MOVWload [i0] {s} p mem))) or:(ORQ y s1:(SHLQconst [j1] r1:(ROLWconst [8] x1:(MOVWload [i1] {s} p mem)))))
 29740  	// cond: i1 == i0+2 && j1 == j0-16 && j1 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or)
 29741  	// result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (BSWAPL <typ.UInt32> (MOVLload [i0] {s} p mem))) y)
 29742  	for {
 29743  		_ = v.Args[1]
 29744  		s0 := v.Args[0]
 29745  		if s0.Op != OpAMD64SHLQconst {
 29746  			break
 29747  		}
 29748  		j0 := s0.AuxInt
 29749  		r0 := s0.Args[0]
 29750  		if r0.Op != OpAMD64ROLWconst {
 29751  			break
 29752  		}
 29753  		if r0.AuxInt != 8 {
 29754  			break
 29755  		}
 29756  		x0 := r0.Args[0]
 29757  		if x0.Op != OpAMD64MOVWload {
 29758  			break
 29759  		}
 29760  		i0 := x0.AuxInt
 29761  		s := x0.Aux
 29762  		_ = x0.Args[1]
 29763  		p := x0.Args[0]
 29764  		mem := x0.Args[1]
 29765  		or := v.Args[1]
 29766  		if or.Op != OpAMD64ORQ {
 29767  			break
 29768  		}
 29769  		_ = or.Args[1]
 29770  		y := or.Args[0]
 29771  		s1 := or.Args[1]
 29772  		if s1.Op != OpAMD64SHLQconst {
 29773  			break
 29774  		}
 29775  		j1 := s1.AuxInt
 29776  		r1 := s1.Args[0]
 29777  		if r1.Op != OpAMD64ROLWconst {
 29778  			break
 29779  		}
 29780  		if r1.AuxInt != 8 {
 29781  			break
 29782  		}
 29783  		x1 := r1.Args[0]
 29784  		if x1.Op != OpAMD64MOVWload {
 29785  			break
 29786  		}
 29787  		i1 := x1.AuxInt
 29788  		if x1.Aux != s {
 29789  			break
 29790  		}
 29791  		_ = x1.Args[1]
 29792  		if p != x1.Args[0] {
 29793  			break
 29794  		}
 29795  		if mem != x1.Args[1] {
 29796  			break
 29797  		}
 29798  		if !(i1 == i0+2 && j1 == j0-16 && j1%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or)) {
 29799  			break
 29800  		}
 29801  		b = mergePoint(b, x0, x1)
 29802  		v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type)
 29803  		v.reset(OpCopy)
 29804  		v.AddArg(v0)
 29805  		v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type)
 29806  		v1.AuxInt = j1
 29807  		v2 := b.NewValue0(v.Pos, OpAMD64BSWAPL, typ.UInt32)
 29808  		v3 := b.NewValue0(v.Pos, OpAMD64MOVLload, typ.UInt32)
 29809  		v3.AuxInt = i0
 29810  		v3.Aux = s
 29811  		v3.AddArg(p)
 29812  		v3.AddArg(mem)
 29813  		v2.AddArg(v3)
 29814  		v1.AddArg(v2)
 29815  		v0.AddArg(v1)
 29816  		v0.AddArg(y)
 29817  		return true
 29818  	}
 29819  	// match: (ORQ or:(ORQ s1:(SHLQconst [j1] r1:(ROLWconst [8] x1:(MOVWload [i1] {s} p mem))) y) s0:(SHLQconst [j0] r0:(ROLWconst [8] x0:(MOVWload [i0] {s} p mem))))
 29820  	// cond: i1 == i0+2 && j1 == j0-16 && j1 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or)
 29821  	// result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (BSWAPL <typ.UInt32> (MOVLload [i0] {s} p mem))) y)
 29822  	for {
 29823  		_ = v.Args[1]
 29824  		or := v.Args[0]
 29825  		if or.Op != OpAMD64ORQ {
 29826  			break
 29827  		}
 29828  		_ = or.Args[1]
 29829  		s1 := or.Args[0]
 29830  		if s1.Op != OpAMD64SHLQconst {
 29831  			break
 29832  		}
 29833  		j1 := s1.AuxInt
 29834  		r1 := s1.Args[0]
 29835  		if r1.Op != OpAMD64ROLWconst {
 29836  			break
 29837  		}
 29838  		if r1.AuxInt != 8 {
 29839  			break
 29840  		}
 29841  		x1 := r1.Args[0]
 29842  		if x1.Op != OpAMD64MOVWload {
 29843  			break
 29844  		}
 29845  		i1 := x1.AuxInt
 29846  		s := x1.Aux
 29847  		_ = x1.Args[1]
 29848  		p := x1.Args[0]
 29849  		mem := x1.Args[1]
 29850  		y := or.Args[1]
 29851  		s0 := v.Args[1]
 29852  		if s0.Op != OpAMD64SHLQconst {
 29853  			break
 29854  		}
 29855  		j0 := s0.AuxInt
 29856  		r0 := s0.Args[0]
 29857  		if r0.Op != OpAMD64ROLWconst {
 29858  			break
 29859  		}
 29860  		if r0.AuxInt != 8 {
 29861  			break
 29862  		}
 29863  		x0 := r0.Args[0]
 29864  		if x0.Op != OpAMD64MOVWload {
 29865  			break
 29866  		}
 29867  		i0 := x0.AuxInt
 29868  		if x0.Aux != s {
 29869  			break
 29870  		}
 29871  		_ = x0.Args[1]
 29872  		if p != x0.Args[0] {
 29873  			break
 29874  		}
 29875  		if mem != x0.Args[1] {
 29876  			break
 29877  		}
 29878  		if !(i1 == i0+2 && j1 == j0-16 && j1%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or)) {
 29879  			break
 29880  		}
 29881  		b = mergePoint(b, x0, x1)
 29882  		v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type)
 29883  		v.reset(OpCopy)
 29884  		v.AddArg(v0)
 29885  		v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type)
 29886  		v1.AuxInt = j1
 29887  		v2 := b.NewValue0(v.Pos, OpAMD64BSWAPL, typ.UInt32)
 29888  		v3 := b.NewValue0(v.Pos, OpAMD64MOVLload, typ.UInt32)
 29889  		v3.AuxInt = i0
 29890  		v3.Aux = s
 29891  		v3.AddArg(p)
 29892  		v3.AddArg(mem)
 29893  		v2.AddArg(v3)
 29894  		v1.AddArg(v2)
 29895  		v0.AddArg(v1)
 29896  		v0.AddArg(y)
 29897  		return true
 29898  	}
 29899  	// match: (ORQ or:(ORQ y s1:(SHLQconst [j1] r1:(ROLWconst [8] x1:(MOVWload [i1] {s} p mem)))) s0:(SHLQconst [j0] r0:(ROLWconst [8] x0:(MOVWload [i0] {s} p mem))))
 29900  	// cond: i1 == i0+2 && j1 == j0-16 && j1 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or)
 29901  	// result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (BSWAPL <typ.UInt32> (MOVLload [i0] {s} p mem))) y)
 29902  	for {
 29903  		_ = v.Args[1]
 29904  		or := v.Args[0]
 29905  		if or.Op != OpAMD64ORQ {
 29906  			break
 29907  		}
 29908  		_ = or.Args[1]
 29909  		y := or.Args[0]
 29910  		s1 := or.Args[1]
 29911  		if s1.Op != OpAMD64SHLQconst {
 29912  			break
 29913  		}
 29914  		j1 := s1.AuxInt
 29915  		r1 := s1.Args[0]
 29916  		if r1.Op != OpAMD64ROLWconst {
 29917  			break
 29918  		}
 29919  		if r1.AuxInt != 8 {
 29920  			break
 29921  		}
 29922  		x1 := r1.Args[0]
 29923  		if x1.Op != OpAMD64MOVWload {
 29924  			break
 29925  		}
 29926  		i1 := x1.AuxInt
 29927  		s := x1.Aux
 29928  		_ = x1.Args[1]
 29929  		p := x1.Args[0]
 29930  		mem := x1.Args[1]
 29931  		s0 := v.Args[1]
 29932  		if s0.Op != OpAMD64SHLQconst {
 29933  			break
 29934  		}
 29935  		j0 := s0.AuxInt
 29936  		r0 := s0.Args[0]
 29937  		if r0.Op != OpAMD64ROLWconst {
 29938  			break
 29939  		}
 29940  		if r0.AuxInt != 8 {
 29941  			break
 29942  		}
 29943  		x0 := r0.Args[0]
 29944  		if x0.Op != OpAMD64MOVWload {
 29945  			break
 29946  		}
 29947  		i0 := x0.AuxInt
 29948  		if x0.Aux != s {
 29949  			break
 29950  		}
 29951  		_ = x0.Args[1]
 29952  		if p != x0.Args[0] {
 29953  			break
 29954  		}
 29955  		if mem != x0.Args[1] {
 29956  			break
 29957  		}
 29958  		if !(i1 == i0+2 && j1 == j0-16 && j1%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or)) {
 29959  			break
 29960  		}
 29961  		b = mergePoint(b, x0, x1)
 29962  		v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type)
 29963  		v.reset(OpCopy)
 29964  		v.AddArg(v0)
 29965  		v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type)
 29966  		v1.AuxInt = j1
 29967  		v2 := b.NewValue0(v.Pos, OpAMD64BSWAPL, typ.UInt32)
 29968  		v3 := b.NewValue0(v.Pos, OpAMD64MOVLload, typ.UInt32)
 29969  		v3.AuxInt = i0
 29970  		v3.Aux = s
 29971  		v3.AddArg(p)
 29972  		v3.AddArg(mem)
 29973  		v2.AddArg(v3)
 29974  		v1.AddArg(v2)
 29975  		v0.AddArg(v1)
 29976  		v0.AddArg(y)
 29977  		return true
 29978  	}
 29979  	// match: (ORQ x1:(MOVBloadidx1 [i1] {s} p idx mem) sh:(SHLQconst [8] x0:(MOVBloadidx1 [i0] {s} p idx mem)))
 29980  	// cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)
 29981  	// result: @mergePoint(b,x0,x1) (ROLWconst <v.Type> [8] (MOVWloadidx1 [i0] {s} p idx mem))
 29982  	for {
 29983  		_ = v.Args[1]
 29984  		x1 := v.Args[0]
 29985  		if x1.Op != OpAMD64MOVBloadidx1 {
 29986  			break
 29987  		}
 29988  		i1 := x1.AuxInt
 29989  		s := x1.Aux
 29990  		_ = x1.Args[2]
 29991  		p := x1.Args[0]
 29992  		idx := x1.Args[1]
 29993  		mem := x1.Args[2]
 29994  		sh := v.Args[1]
 29995  		if sh.Op != OpAMD64SHLQconst {
 29996  			break
 29997  		}
 29998  		if sh.AuxInt != 8 {
 29999  			break
 30000  		}
 30001  		x0 := sh.Args[0]
 30002  		if x0.Op != OpAMD64MOVBloadidx1 {
 30003  			break
 30004  		}
 30005  		i0 := x0.AuxInt
 30006  		if x0.Aux != s {
 30007  			break
 30008  		}
 30009  		_ = x0.Args[2]
 30010  		if p != x0.Args[0] {
 30011  			break
 30012  		}
 30013  		if idx != x0.Args[1] {
 30014  			break
 30015  		}
 30016  		if mem != x0.Args[2] {
 30017  			break
 30018  		}
 30019  		if !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) {
 30020  			break
 30021  		}
 30022  		b = mergePoint(b, x0, x1)
 30023  		v0 := b.NewValue0(v.Pos, OpAMD64ROLWconst, v.Type)
 30024  		v.reset(OpCopy)
 30025  		v.AddArg(v0)
 30026  		v0.AuxInt = 8
 30027  		v1 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16)
 30028  		v1.AuxInt = i0
 30029  		v1.Aux = s
 30030  		v1.AddArg(p)
 30031  		v1.AddArg(idx)
 30032  		v1.AddArg(mem)
 30033  		v0.AddArg(v1)
 30034  		return true
 30035  	}
 30036  	// match: (ORQ x1:(MOVBloadidx1 [i1] {s} idx p mem) sh:(SHLQconst [8] x0:(MOVBloadidx1 [i0] {s} p idx mem)))
 30037  	// cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)
 30038  	// result: @mergePoint(b,x0,x1) (ROLWconst <v.Type> [8] (MOVWloadidx1 [i0] {s} p idx mem))
 30039  	for {
 30040  		_ = v.Args[1]
 30041  		x1 := v.Args[0]
 30042  		if x1.Op != OpAMD64MOVBloadidx1 {
 30043  			break
 30044  		}
 30045  		i1 := x1.AuxInt
 30046  		s := x1.Aux
 30047  		_ = x1.Args[2]
 30048  		idx := x1.Args[0]
 30049  		p := x1.Args[1]
 30050  		mem := x1.Args[2]
 30051  		sh := v.Args[1]
 30052  		if sh.Op != OpAMD64SHLQconst {
 30053  			break
 30054  		}
 30055  		if sh.AuxInt != 8 {
 30056  			break
 30057  		}
 30058  		x0 := sh.Args[0]
 30059  		if x0.Op != OpAMD64MOVBloadidx1 {
 30060  			break
 30061  		}
 30062  		i0 := x0.AuxInt
 30063  		if x0.Aux != s {
 30064  			break
 30065  		}
 30066  		_ = x0.Args[2]
 30067  		if p != x0.Args[0] {
 30068  			break
 30069  		}
 30070  		if idx != x0.Args[1] {
 30071  			break
 30072  		}
 30073  		if mem != x0.Args[2] {
 30074  			break
 30075  		}
 30076  		if !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) {
 30077  			break
 30078  		}
 30079  		b = mergePoint(b, x0, x1)
 30080  		v0 := b.NewValue0(v.Pos, OpAMD64ROLWconst, v.Type)
 30081  		v.reset(OpCopy)
 30082  		v.AddArg(v0)
 30083  		v0.AuxInt = 8
 30084  		v1 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16)
 30085  		v1.AuxInt = i0
 30086  		v1.Aux = s
 30087  		v1.AddArg(p)
 30088  		v1.AddArg(idx)
 30089  		v1.AddArg(mem)
 30090  		v0.AddArg(v1)
 30091  		return true
 30092  	}
 30093  	// match: (ORQ x1:(MOVBloadidx1 [i1] {s} p idx mem) sh:(SHLQconst [8] x0:(MOVBloadidx1 [i0] {s} idx p mem)))
 30094  	// cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)
 30095  	// result: @mergePoint(b,x0,x1) (ROLWconst <v.Type> [8] (MOVWloadidx1 [i0] {s} p idx mem))
 30096  	for {
 30097  		_ = v.Args[1]
 30098  		x1 := v.Args[0]
 30099  		if x1.Op != OpAMD64MOVBloadidx1 {
 30100  			break
 30101  		}
 30102  		i1 := x1.AuxInt
 30103  		s := x1.Aux
 30104  		_ = x1.Args[2]
 30105  		p := x1.Args[0]
 30106  		idx := x1.Args[1]
 30107  		mem := x1.Args[2]
 30108  		sh := v.Args[1]
 30109  		if sh.Op != OpAMD64SHLQconst {
 30110  			break
 30111  		}
 30112  		if sh.AuxInt != 8 {
 30113  			break
 30114  		}
 30115  		x0 := sh.Args[0]
 30116  		if x0.Op != OpAMD64MOVBloadidx1 {
 30117  			break
 30118  		}
 30119  		i0 := x0.AuxInt
 30120  		if x0.Aux != s {
 30121  			break
 30122  		}
 30123  		_ = x0.Args[2]
 30124  		if idx != x0.Args[0] {
 30125  			break
 30126  		}
 30127  		if p != x0.Args[1] {
 30128  			break
 30129  		}
 30130  		if mem != x0.Args[2] {
 30131  			break
 30132  		}
 30133  		if !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) {
 30134  			break
 30135  		}
 30136  		b = mergePoint(b, x0, x1)
 30137  		v0 := b.NewValue0(v.Pos, OpAMD64ROLWconst, v.Type)
 30138  		v.reset(OpCopy)
 30139  		v.AddArg(v0)
 30140  		v0.AuxInt = 8
 30141  		v1 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16)
 30142  		v1.AuxInt = i0
 30143  		v1.Aux = s
 30144  		v1.AddArg(p)
 30145  		v1.AddArg(idx)
 30146  		v1.AddArg(mem)
 30147  		v0.AddArg(v1)
 30148  		return true
 30149  	}
 30150  	// match: (ORQ x1:(MOVBloadidx1 [i1] {s} idx p mem) sh:(SHLQconst [8] x0:(MOVBloadidx1 [i0] {s} idx p mem)))
 30151  	// cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)
 30152  	// result: @mergePoint(b,x0,x1) (ROLWconst <v.Type> [8] (MOVWloadidx1 [i0] {s} p idx mem))
 30153  	for {
 30154  		_ = v.Args[1]
 30155  		x1 := v.Args[0]
 30156  		if x1.Op != OpAMD64MOVBloadidx1 {
 30157  			break
 30158  		}
 30159  		i1 := x1.AuxInt
 30160  		s := x1.Aux
 30161  		_ = x1.Args[2]
 30162  		idx := x1.Args[0]
 30163  		p := x1.Args[1]
 30164  		mem := x1.Args[2]
 30165  		sh := v.Args[1]
 30166  		if sh.Op != OpAMD64SHLQconst {
 30167  			break
 30168  		}
 30169  		if sh.AuxInt != 8 {
 30170  			break
 30171  		}
 30172  		x0 := sh.Args[0]
 30173  		if x0.Op != OpAMD64MOVBloadidx1 {
 30174  			break
 30175  		}
 30176  		i0 := x0.AuxInt
 30177  		if x0.Aux != s {
 30178  			break
 30179  		}
 30180  		_ = x0.Args[2]
 30181  		if idx != x0.Args[0] {
 30182  			break
 30183  		}
 30184  		if p != x0.Args[1] {
 30185  			break
 30186  		}
 30187  		if mem != x0.Args[2] {
 30188  			break
 30189  		}
 30190  		if !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) {
 30191  			break
 30192  		}
 30193  		b = mergePoint(b, x0, x1)
 30194  		v0 := b.NewValue0(v.Pos, OpAMD64ROLWconst, v.Type)
 30195  		v.reset(OpCopy)
 30196  		v.AddArg(v0)
 30197  		v0.AuxInt = 8
 30198  		v1 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16)
 30199  		v1.AuxInt = i0
 30200  		v1.Aux = s
 30201  		v1.AddArg(p)
 30202  		v1.AddArg(idx)
 30203  		v1.AddArg(mem)
 30204  		v0.AddArg(v1)
 30205  		return true
 30206  	}
 30207  	// match: (ORQ sh:(SHLQconst [8] x0:(MOVBloadidx1 [i0] {s} p idx mem)) x1:(MOVBloadidx1 [i1] {s} p idx mem))
 30208  	// cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)
 30209  	// result: @mergePoint(b,x0,x1) (ROLWconst <v.Type> [8] (MOVWloadidx1 [i0] {s} p idx mem))
 30210  	for {
 30211  		_ = v.Args[1]
 30212  		sh := v.Args[0]
 30213  		if sh.Op != OpAMD64SHLQconst {
 30214  			break
 30215  		}
 30216  		if sh.AuxInt != 8 {
 30217  			break
 30218  		}
 30219  		x0 := sh.Args[0]
 30220  		if x0.Op != OpAMD64MOVBloadidx1 {
 30221  			break
 30222  		}
 30223  		i0 := x0.AuxInt
 30224  		s := x0.Aux
 30225  		_ = x0.Args[2]
 30226  		p := x0.Args[0]
 30227  		idx := x0.Args[1]
 30228  		mem := x0.Args[2]
 30229  		x1 := v.Args[1]
 30230  		if x1.Op != OpAMD64MOVBloadidx1 {
 30231  			break
 30232  		}
 30233  		i1 := x1.AuxInt
 30234  		if x1.Aux != s {
 30235  			break
 30236  		}
 30237  		_ = x1.Args[2]
 30238  		if p != x1.Args[0] {
 30239  			break
 30240  		}
 30241  		if idx != x1.Args[1] {
 30242  			break
 30243  		}
 30244  		if mem != x1.Args[2] {
 30245  			break
 30246  		}
 30247  		if !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) {
 30248  			break
 30249  		}
 30250  		b = mergePoint(b, x0, x1)
 30251  		v0 := b.NewValue0(v.Pos, OpAMD64ROLWconst, v.Type)
 30252  		v.reset(OpCopy)
 30253  		v.AddArg(v0)
 30254  		v0.AuxInt = 8
 30255  		v1 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16)
 30256  		v1.AuxInt = i0
 30257  		v1.Aux = s
 30258  		v1.AddArg(p)
 30259  		v1.AddArg(idx)
 30260  		v1.AddArg(mem)
 30261  		v0.AddArg(v1)
 30262  		return true
 30263  	}
 30264  	return false
 30265  }
 30266  func rewriteValueAMD64_OpAMD64ORQ_110(v *Value) bool {
 30267  	b := v.Block
 30268  	_ = b
 30269  	typ := &b.Func.Config.Types
 30270  	_ = typ
 30271  	// match: (ORQ sh:(SHLQconst [8] x0:(MOVBloadidx1 [i0] {s} idx p mem)) x1:(MOVBloadidx1 [i1] {s} p idx mem))
 30272  	// cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)
 30273  	// result: @mergePoint(b,x0,x1) (ROLWconst <v.Type> [8] (MOVWloadidx1 [i0] {s} p idx mem))
 30274  	for {
 30275  		_ = v.Args[1]
 30276  		sh := v.Args[0]
 30277  		if sh.Op != OpAMD64SHLQconst {
 30278  			break
 30279  		}
 30280  		if sh.AuxInt != 8 {
 30281  			break
 30282  		}
 30283  		x0 := sh.Args[0]
 30284  		if x0.Op != OpAMD64MOVBloadidx1 {
 30285  			break
 30286  		}
 30287  		i0 := x0.AuxInt
 30288  		s := x0.Aux
 30289  		_ = x0.Args[2]
 30290  		idx := x0.Args[0]
 30291  		p := x0.Args[1]
 30292  		mem := x0.Args[2]
 30293  		x1 := v.Args[1]
 30294  		if x1.Op != OpAMD64MOVBloadidx1 {
 30295  			break
 30296  		}
 30297  		i1 := x1.AuxInt
 30298  		if x1.Aux != s {
 30299  			break
 30300  		}
 30301  		_ = x1.Args[2]
 30302  		if p != x1.Args[0] {
 30303  			break
 30304  		}
 30305  		if idx != x1.Args[1] {
 30306  			break
 30307  		}
 30308  		if mem != x1.Args[2] {
 30309  			break
 30310  		}
 30311  		if !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) {
 30312  			break
 30313  		}
 30314  		b = mergePoint(b, x0, x1)
 30315  		v0 := b.NewValue0(v.Pos, OpAMD64ROLWconst, v.Type)
 30316  		v.reset(OpCopy)
 30317  		v.AddArg(v0)
 30318  		v0.AuxInt = 8
 30319  		v1 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16)
 30320  		v1.AuxInt = i0
 30321  		v1.Aux = s
 30322  		v1.AddArg(p)
 30323  		v1.AddArg(idx)
 30324  		v1.AddArg(mem)
 30325  		v0.AddArg(v1)
 30326  		return true
 30327  	}
 30328  	// match: (ORQ sh:(SHLQconst [8] x0:(MOVBloadidx1 [i0] {s} p idx mem)) x1:(MOVBloadidx1 [i1] {s} idx p mem))
 30329  	// cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)
 30330  	// result: @mergePoint(b,x0,x1) (ROLWconst <v.Type> [8] (MOVWloadidx1 [i0] {s} p idx mem))
 30331  	for {
 30332  		_ = v.Args[1]
 30333  		sh := v.Args[0]
 30334  		if sh.Op != OpAMD64SHLQconst {
 30335  			break
 30336  		}
 30337  		if sh.AuxInt != 8 {
 30338  			break
 30339  		}
 30340  		x0 := sh.Args[0]
 30341  		if x0.Op != OpAMD64MOVBloadidx1 {
 30342  			break
 30343  		}
 30344  		i0 := x0.AuxInt
 30345  		s := x0.Aux
 30346  		_ = x0.Args[2]
 30347  		p := x0.Args[0]
 30348  		idx := x0.Args[1]
 30349  		mem := x0.Args[2]
 30350  		x1 := v.Args[1]
 30351  		if x1.Op != OpAMD64MOVBloadidx1 {
 30352  			break
 30353  		}
 30354  		i1 := x1.AuxInt
 30355  		if x1.Aux != s {
 30356  			break
 30357  		}
 30358  		_ = x1.Args[2]
 30359  		if idx != x1.Args[0] {
 30360  			break
 30361  		}
 30362  		if p != x1.Args[1] {
 30363  			break
 30364  		}
 30365  		if mem != x1.Args[2] {
 30366  			break
 30367  		}
 30368  		if !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) {
 30369  			break
 30370  		}
 30371  		b = mergePoint(b, x0, x1)
 30372  		v0 := b.NewValue0(v.Pos, OpAMD64ROLWconst, v.Type)
 30373  		v.reset(OpCopy)
 30374  		v.AddArg(v0)
 30375  		v0.AuxInt = 8
 30376  		v1 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16)
 30377  		v1.AuxInt = i0
 30378  		v1.Aux = s
 30379  		v1.AddArg(p)
 30380  		v1.AddArg(idx)
 30381  		v1.AddArg(mem)
 30382  		v0.AddArg(v1)
 30383  		return true
 30384  	}
 30385  	// match: (ORQ sh:(SHLQconst [8] x0:(MOVBloadidx1 [i0] {s} idx p mem)) x1:(MOVBloadidx1 [i1] {s} idx p mem))
 30386  	// cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)
 30387  	// result: @mergePoint(b,x0,x1) (ROLWconst <v.Type> [8] (MOVWloadidx1 [i0] {s} p idx mem))
 30388  	for {
 30389  		_ = v.Args[1]
 30390  		sh := v.Args[0]
 30391  		if sh.Op != OpAMD64SHLQconst {
 30392  			break
 30393  		}
 30394  		if sh.AuxInt != 8 {
 30395  			break
 30396  		}
 30397  		x0 := sh.Args[0]
 30398  		if x0.Op != OpAMD64MOVBloadidx1 {
 30399  			break
 30400  		}
 30401  		i0 := x0.AuxInt
 30402  		s := x0.Aux
 30403  		_ = x0.Args[2]
 30404  		idx := x0.Args[0]
 30405  		p := x0.Args[1]
 30406  		mem := x0.Args[2]
 30407  		x1 := v.Args[1]
 30408  		if x1.Op != OpAMD64MOVBloadidx1 {
 30409  			break
 30410  		}
 30411  		i1 := x1.AuxInt
 30412  		if x1.Aux != s {
 30413  			break
 30414  		}
 30415  		_ = x1.Args[2]
 30416  		if idx != x1.Args[0] {
 30417  			break
 30418  		}
 30419  		if p != x1.Args[1] {
 30420  			break
 30421  		}
 30422  		if mem != x1.Args[2] {
 30423  			break
 30424  		}
 30425  		if !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) {
 30426  			break
 30427  		}
 30428  		b = mergePoint(b, x0, x1)
 30429  		v0 := b.NewValue0(v.Pos, OpAMD64ROLWconst, v.Type)
 30430  		v.reset(OpCopy)
 30431  		v.AddArg(v0)
 30432  		v0.AuxInt = 8
 30433  		v1 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16)
 30434  		v1.AuxInt = i0
 30435  		v1.Aux = s
 30436  		v1.AddArg(p)
 30437  		v1.AddArg(idx)
 30438  		v1.AddArg(mem)
 30439  		v0.AddArg(v1)
 30440  		return true
 30441  	}
 30442  	// match: (ORQ r1:(ROLWconst [8] x1:(MOVWloadidx1 [i1] {s} p idx mem)) sh:(SHLQconst [16] r0:(ROLWconst [8] x0:(MOVWloadidx1 [i0] {s} p idx mem))))
 30443  	// cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh)
 30444  	// result: @mergePoint(b,x0,x1) (BSWAPL <v.Type> (MOVLloadidx1 [i0] {s} p idx mem))
 30445  	for {
 30446  		_ = v.Args[1]
 30447  		r1 := v.Args[0]
 30448  		if r1.Op != OpAMD64ROLWconst {
 30449  			break
 30450  		}
 30451  		if r1.AuxInt != 8 {
 30452  			break
 30453  		}
 30454  		x1 := r1.Args[0]
 30455  		if x1.Op != OpAMD64MOVWloadidx1 {
 30456  			break
 30457  		}
 30458  		i1 := x1.AuxInt
 30459  		s := x1.Aux
 30460  		_ = x1.Args[2]
 30461  		p := x1.Args[0]
 30462  		idx := x1.Args[1]
 30463  		mem := x1.Args[2]
 30464  		sh := v.Args[1]
 30465  		if sh.Op != OpAMD64SHLQconst {
 30466  			break
 30467  		}
 30468  		if sh.AuxInt != 16 {
 30469  			break
 30470  		}
 30471  		r0 := sh.Args[0]
 30472  		if r0.Op != OpAMD64ROLWconst {
 30473  			break
 30474  		}
 30475  		if r0.AuxInt != 8 {
 30476  			break
 30477  		}
 30478  		x0 := r0.Args[0]
 30479  		if x0.Op != OpAMD64MOVWloadidx1 {
 30480  			break
 30481  		}
 30482  		i0 := x0.AuxInt
 30483  		if x0.Aux != s {
 30484  			break
 30485  		}
 30486  		_ = x0.Args[2]
 30487  		if p != x0.Args[0] {
 30488  			break
 30489  		}
 30490  		if idx != x0.Args[1] {
 30491  			break
 30492  		}
 30493  		if mem != x0.Args[2] {
 30494  			break
 30495  		}
 30496  		if !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh)) {
 30497  			break
 30498  		}
 30499  		b = mergePoint(b, x0, x1)
 30500  		v0 := b.NewValue0(v.Pos, OpAMD64BSWAPL, v.Type)
 30501  		v.reset(OpCopy)
 30502  		v.AddArg(v0)
 30503  		v1 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32)
 30504  		v1.AuxInt = i0
 30505  		v1.Aux = s
 30506  		v1.AddArg(p)
 30507  		v1.AddArg(idx)
 30508  		v1.AddArg(mem)
 30509  		v0.AddArg(v1)
 30510  		return true
 30511  	}
 30512  	// match: (ORQ r1:(ROLWconst [8] x1:(MOVWloadidx1 [i1] {s} idx p mem)) sh:(SHLQconst [16] r0:(ROLWconst [8] x0:(MOVWloadidx1 [i0] {s} p idx mem))))
 30513  	// cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh)
 30514  	// result: @mergePoint(b,x0,x1) (BSWAPL <v.Type> (MOVLloadidx1 [i0] {s} p idx mem))
 30515  	for {
 30516  		_ = v.Args[1]
 30517  		r1 := v.Args[0]
 30518  		if r1.Op != OpAMD64ROLWconst {
 30519  			break
 30520  		}
 30521  		if r1.AuxInt != 8 {
 30522  			break
 30523  		}
 30524  		x1 := r1.Args[0]
 30525  		if x1.Op != OpAMD64MOVWloadidx1 {
 30526  			break
 30527  		}
 30528  		i1 := x1.AuxInt
 30529  		s := x1.Aux
 30530  		_ = x1.Args[2]
 30531  		idx := x1.Args[0]
 30532  		p := x1.Args[1]
 30533  		mem := x1.Args[2]
 30534  		sh := v.Args[1]
 30535  		if sh.Op != OpAMD64SHLQconst {
 30536  			break
 30537  		}
 30538  		if sh.AuxInt != 16 {
 30539  			break
 30540  		}
 30541  		r0 := sh.Args[0]
 30542  		if r0.Op != OpAMD64ROLWconst {
 30543  			break
 30544  		}
 30545  		if r0.AuxInt != 8 {
 30546  			break
 30547  		}
 30548  		x0 := r0.Args[0]
 30549  		if x0.Op != OpAMD64MOVWloadidx1 {
 30550  			break
 30551  		}
 30552  		i0 := x0.AuxInt
 30553  		if x0.Aux != s {
 30554  			break
 30555  		}
 30556  		_ = x0.Args[2]
 30557  		if p != x0.Args[0] {
 30558  			break
 30559  		}
 30560  		if idx != x0.Args[1] {
 30561  			break
 30562  		}
 30563  		if mem != x0.Args[2] {
 30564  			break
 30565  		}
 30566  		if !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh)) {
 30567  			break
 30568  		}
 30569  		b = mergePoint(b, x0, x1)
 30570  		v0 := b.NewValue0(v.Pos, OpAMD64BSWAPL, v.Type)
 30571  		v.reset(OpCopy)
 30572  		v.AddArg(v0)
 30573  		v1 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32)
 30574  		v1.AuxInt = i0
 30575  		v1.Aux = s
 30576  		v1.AddArg(p)
 30577  		v1.AddArg(idx)
 30578  		v1.AddArg(mem)
 30579  		v0.AddArg(v1)
 30580  		return true
 30581  	}
 30582  	// match: (ORQ r1:(ROLWconst [8] x1:(MOVWloadidx1 [i1] {s} p idx mem)) sh:(SHLQconst [16] r0:(ROLWconst [8] x0:(MOVWloadidx1 [i0] {s} idx p mem))))
 30583  	// cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh)
 30584  	// result: @mergePoint(b,x0,x1) (BSWAPL <v.Type> (MOVLloadidx1 [i0] {s} p idx mem))
 30585  	for {
 30586  		_ = v.Args[1]
 30587  		r1 := v.Args[0]
 30588  		if r1.Op != OpAMD64ROLWconst {
 30589  			break
 30590  		}
 30591  		if r1.AuxInt != 8 {
 30592  			break
 30593  		}
 30594  		x1 := r1.Args[0]
 30595  		if x1.Op != OpAMD64MOVWloadidx1 {
 30596  			break
 30597  		}
 30598  		i1 := x1.AuxInt
 30599  		s := x1.Aux
 30600  		_ = x1.Args[2]
 30601  		p := x1.Args[0]
 30602  		idx := x1.Args[1]
 30603  		mem := x1.Args[2]
 30604  		sh := v.Args[1]
 30605  		if sh.Op != OpAMD64SHLQconst {
 30606  			break
 30607  		}
 30608  		if sh.AuxInt != 16 {
 30609  			break
 30610  		}
 30611  		r0 := sh.Args[0]
 30612  		if r0.Op != OpAMD64ROLWconst {
 30613  			break
 30614  		}
 30615  		if r0.AuxInt != 8 {
 30616  			break
 30617  		}
 30618  		x0 := r0.Args[0]
 30619  		if x0.Op != OpAMD64MOVWloadidx1 {
 30620  			break
 30621  		}
 30622  		i0 := x0.AuxInt
 30623  		if x0.Aux != s {
 30624  			break
 30625  		}
 30626  		_ = x0.Args[2]
 30627  		if idx != x0.Args[0] {
 30628  			break
 30629  		}
 30630  		if p != x0.Args[1] {
 30631  			break
 30632  		}
 30633  		if mem != x0.Args[2] {
 30634  			break
 30635  		}
 30636  		if !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh)) {
 30637  			break
 30638  		}
 30639  		b = mergePoint(b, x0, x1)
 30640  		v0 := b.NewValue0(v.Pos, OpAMD64BSWAPL, v.Type)
 30641  		v.reset(OpCopy)
 30642  		v.AddArg(v0)
 30643  		v1 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32)
 30644  		v1.AuxInt = i0
 30645  		v1.Aux = s
 30646  		v1.AddArg(p)
 30647  		v1.AddArg(idx)
 30648  		v1.AddArg(mem)
 30649  		v0.AddArg(v1)
 30650  		return true
 30651  	}
 30652  	// match: (ORQ r1:(ROLWconst [8] x1:(MOVWloadidx1 [i1] {s} idx p mem)) sh:(SHLQconst [16] r0:(ROLWconst [8] x0:(MOVWloadidx1 [i0] {s} idx p mem))))
 30653  	// cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh)
 30654  	// result: @mergePoint(b,x0,x1) (BSWAPL <v.Type> (MOVLloadidx1 [i0] {s} p idx mem))
 30655  	for {
 30656  		_ = v.Args[1]
 30657  		r1 := v.Args[0]
 30658  		if r1.Op != OpAMD64ROLWconst {
 30659  			break
 30660  		}
 30661  		if r1.AuxInt != 8 {
 30662  			break
 30663  		}
 30664  		x1 := r1.Args[0]
 30665  		if x1.Op != OpAMD64MOVWloadidx1 {
 30666  			break
 30667  		}
 30668  		i1 := x1.AuxInt
 30669  		s := x1.Aux
 30670  		_ = x1.Args[2]
 30671  		idx := x1.Args[0]
 30672  		p := x1.Args[1]
 30673  		mem := x1.Args[2]
 30674  		sh := v.Args[1]
 30675  		if sh.Op != OpAMD64SHLQconst {
 30676  			break
 30677  		}
 30678  		if sh.AuxInt != 16 {
 30679  			break
 30680  		}
 30681  		r0 := sh.Args[0]
 30682  		if r0.Op != OpAMD64ROLWconst {
 30683  			break
 30684  		}
 30685  		if r0.AuxInt != 8 {
 30686  			break
 30687  		}
 30688  		x0 := r0.Args[0]
 30689  		if x0.Op != OpAMD64MOVWloadidx1 {
 30690  			break
 30691  		}
 30692  		i0 := x0.AuxInt
 30693  		if x0.Aux != s {
 30694  			break
 30695  		}
 30696  		_ = x0.Args[2]
 30697  		if idx != x0.Args[0] {
 30698  			break
 30699  		}
 30700  		if p != x0.Args[1] {
 30701  			break
 30702  		}
 30703  		if mem != x0.Args[2] {
 30704  			break
 30705  		}
 30706  		if !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh)) {
 30707  			break
 30708  		}
 30709  		b = mergePoint(b, x0, x1)
 30710  		v0 := b.NewValue0(v.Pos, OpAMD64BSWAPL, v.Type)
 30711  		v.reset(OpCopy)
 30712  		v.AddArg(v0)
 30713  		v1 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32)
 30714  		v1.AuxInt = i0
 30715  		v1.Aux = s
 30716  		v1.AddArg(p)
 30717  		v1.AddArg(idx)
 30718  		v1.AddArg(mem)
 30719  		v0.AddArg(v1)
 30720  		return true
 30721  	}
 30722  	// match: (ORQ sh:(SHLQconst [16] r0:(ROLWconst [8] x0:(MOVWloadidx1 [i0] {s} p idx mem))) r1:(ROLWconst [8] x1:(MOVWloadidx1 [i1] {s} p idx mem)))
 30723  	// cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh)
 30724  	// result: @mergePoint(b,x0,x1) (BSWAPL <v.Type> (MOVLloadidx1 [i0] {s} p idx mem))
 30725  	for {
 30726  		_ = v.Args[1]
 30727  		sh := v.Args[0]
 30728  		if sh.Op != OpAMD64SHLQconst {
 30729  			break
 30730  		}
 30731  		if sh.AuxInt != 16 {
 30732  			break
 30733  		}
 30734  		r0 := sh.Args[0]
 30735  		if r0.Op != OpAMD64ROLWconst {
 30736  			break
 30737  		}
 30738  		if r0.AuxInt != 8 {
 30739  			break
 30740  		}
 30741  		x0 := r0.Args[0]
 30742  		if x0.Op != OpAMD64MOVWloadidx1 {
 30743  			break
 30744  		}
 30745  		i0 := x0.AuxInt
 30746  		s := x0.Aux
 30747  		_ = x0.Args[2]
 30748  		p := x0.Args[0]
 30749  		idx := x0.Args[1]
 30750  		mem := x0.Args[2]
 30751  		r1 := v.Args[1]
 30752  		if r1.Op != OpAMD64ROLWconst {
 30753  			break
 30754  		}
 30755  		if r1.AuxInt != 8 {
 30756  			break
 30757  		}
 30758  		x1 := r1.Args[0]
 30759  		if x1.Op != OpAMD64MOVWloadidx1 {
 30760  			break
 30761  		}
 30762  		i1 := x1.AuxInt
 30763  		if x1.Aux != s {
 30764  			break
 30765  		}
 30766  		_ = x1.Args[2]
 30767  		if p != x1.Args[0] {
 30768  			break
 30769  		}
 30770  		if idx != x1.Args[1] {
 30771  			break
 30772  		}
 30773  		if mem != x1.Args[2] {
 30774  			break
 30775  		}
 30776  		if !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh)) {
 30777  			break
 30778  		}
 30779  		b = mergePoint(b, x0, x1)
 30780  		v0 := b.NewValue0(v.Pos, OpAMD64BSWAPL, v.Type)
 30781  		v.reset(OpCopy)
 30782  		v.AddArg(v0)
 30783  		v1 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32)
 30784  		v1.AuxInt = i0
 30785  		v1.Aux = s
 30786  		v1.AddArg(p)
 30787  		v1.AddArg(idx)
 30788  		v1.AddArg(mem)
 30789  		v0.AddArg(v1)
 30790  		return true
 30791  	}
 30792  	// match: (ORQ sh:(SHLQconst [16] r0:(ROLWconst [8] x0:(MOVWloadidx1 [i0] {s} idx p mem))) r1:(ROLWconst [8] x1:(MOVWloadidx1 [i1] {s} p idx mem)))
 30793  	// cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh)
 30794  	// result: @mergePoint(b,x0,x1) (BSWAPL <v.Type> (MOVLloadidx1 [i0] {s} p idx mem))
 30795  	for {
 30796  		_ = v.Args[1]
 30797  		sh := v.Args[0]
 30798  		if sh.Op != OpAMD64SHLQconst {
 30799  			break
 30800  		}
 30801  		if sh.AuxInt != 16 {
 30802  			break
 30803  		}
 30804  		r0 := sh.Args[0]
 30805  		if r0.Op != OpAMD64ROLWconst {
 30806  			break
 30807  		}
 30808  		if r0.AuxInt != 8 {
 30809  			break
 30810  		}
 30811  		x0 := r0.Args[0]
 30812  		if x0.Op != OpAMD64MOVWloadidx1 {
 30813  			break
 30814  		}
 30815  		i0 := x0.AuxInt
 30816  		s := x0.Aux
 30817  		_ = x0.Args[2]
 30818  		idx := x0.Args[0]
 30819  		p := x0.Args[1]
 30820  		mem := x0.Args[2]
 30821  		r1 := v.Args[1]
 30822  		if r1.Op != OpAMD64ROLWconst {
 30823  			break
 30824  		}
 30825  		if r1.AuxInt != 8 {
 30826  			break
 30827  		}
 30828  		x1 := r1.Args[0]
 30829  		if x1.Op != OpAMD64MOVWloadidx1 {
 30830  			break
 30831  		}
 30832  		i1 := x1.AuxInt
 30833  		if x1.Aux != s {
 30834  			break
 30835  		}
 30836  		_ = x1.Args[2]
 30837  		if p != x1.Args[0] {
 30838  			break
 30839  		}
 30840  		if idx != x1.Args[1] {
 30841  			break
 30842  		}
 30843  		if mem != x1.Args[2] {
 30844  			break
 30845  		}
 30846  		if !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh)) {
 30847  			break
 30848  		}
 30849  		b = mergePoint(b, x0, x1)
 30850  		v0 := b.NewValue0(v.Pos, OpAMD64BSWAPL, v.Type)
 30851  		v.reset(OpCopy)
 30852  		v.AddArg(v0)
 30853  		v1 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32)
 30854  		v1.AuxInt = i0
 30855  		v1.Aux = s
 30856  		v1.AddArg(p)
 30857  		v1.AddArg(idx)
 30858  		v1.AddArg(mem)
 30859  		v0.AddArg(v1)
 30860  		return true
 30861  	}
 30862  	// match: (ORQ sh:(SHLQconst [16] r0:(ROLWconst [8] x0:(MOVWloadidx1 [i0] {s} p idx mem))) r1:(ROLWconst [8] x1:(MOVWloadidx1 [i1] {s} idx p mem)))
 30863  	// cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh)
 30864  	// result: @mergePoint(b,x0,x1) (BSWAPL <v.Type> (MOVLloadidx1 [i0] {s} p idx mem))
 30865  	for {
 30866  		_ = v.Args[1]
 30867  		sh := v.Args[0]
 30868  		if sh.Op != OpAMD64SHLQconst {
 30869  			break
 30870  		}
 30871  		if sh.AuxInt != 16 {
 30872  			break
 30873  		}
 30874  		r0 := sh.Args[0]
 30875  		if r0.Op != OpAMD64ROLWconst {
 30876  			break
 30877  		}
 30878  		if r0.AuxInt != 8 {
 30879  			break
 30880  		}
 30881  		x0 := r0.Args[0]
 30882  		if x0.Op != OpAMD64MOVWloadidx1 {
 30883  			break
 30884  		}
 30885  		i0 := x0.AuxInt
 30886  		s := x0.Aux
 30887  		_ = x0.Args[2]
 30888  		p := x0.Args[0]
 30889  		idx := x0.Args[1]
 30890  		mem := x0.Args[2]
 30891  		r1 := v.Args[1]
 30892  		if r1.Op != OpAMD64ROLWconst {
 30893  			break
 30894  		}
 30895  		if r1.AuxInt != 8 {
 30896  			break
 30897  		}
 30898  		x1 := r1.Args[0]
 30899  		if x1.Op != OpAMD64MOVWloadidx1 {
 30900  			break
 30901  		}
 30902  		i1 := x1.AuxInt
 30903  		if x1.Aux != s {
 30904  			break
 30905  		}
 30906  		_ = x1.Args[2]
 30907  		if idx != x1.Args[0] {
 30908  			break
 30909  		}
 30910  		if p != x1.Args[1] {
 30911  			break
 30912  		}
 30913  		if mem != x1.Args[2] {
 30914  			break
 30915  		}
 30916  		if !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh)) {
 30917  			break
 30918  		}
 30919  		b = mergePoint(b, x0, x1)
 30920  		v0 := b.NewValue0(v.Pos, OpAMD64BSWAPL, v.Type)
 30921  		v.reset(OpCopy)
 30922  		v.AddArg(v0)
 30923  		v1 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32)
 30924  		v1.AuxInt = i0
 30925  		v1.Aux = s
 30926  		v1.AddArg(p)
 30927  		v1.AddArg(idx)
 30928  		v1.AddArg(mem)
 30929  		v0.AddArg(v1)
 30930  		return true
 30931  	}
 30932  	return false
 30933  }
 30934  func rewriteValueAMD64_OpAMD64ORQ_120(v *Value) bool {
 30935  	b := v.Block
 30936  	_ = b
 30937  	typ := &b.Func.Config.Types
 30938  	_ = typ
 30939  	// match: (ORQ sh:(SHLQconst [16] r0:(ROLWconst [8] x0:(MOVWloadidx1 [i0] {s} idx p mem))) r1:(ROLWconst [8] x1:(MOVWloadidx1 [i1] {s} idx p mem)))
 30940  	// cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh)
 30941  	// result: @mergePoint(b,x0,x1) (BSWAPL <v.Type> (MOVLloadidx1 [i0] {s} p idx mem))
 30942  	for {
 30943  		_ = v.Args[1]
 30944  		sh := v.Args[0]
 30945  		if sh.Op != OpAMD64SHLQconst {
 30946  			break
 30947  		}
 30948  		if sh.AuxInt != 16 {
 30949  			break
 30950  		}
 30951  		r0 := sh.Args[0]
 30952  		if r0.Op != OpAMD64ROLWconst {
 30953  			break
 30954  		}
 30955  		if r0.AuxInt != 8 {
 30956  			break
 30957  		}
 30958  		x0 := r0.Args[0]
 30959  		if x0.Op != OpAMD64MOVWloadidx1 {
 30960  			break
 30961  		}
 30962  		i0 := x0.AuxInt
 30963  		s := x0.Aux
 30964  		_ = x0.Args[2]
 30965  		idx := x0.Args[0]
 30966  		p := x0.Args[1]
 30967  		mem := x0.Args[2]
 30968  		r1 := v.Args[1]
 30969  		if r1.Op != OpAMD64ROLWconst {
 30970  			break
 30971  		}
 30972  		if r1.AuxInt != 8 {
 30973  			break
 30974  		}
 30975  		x1 := r1.Args[0]
 30976  		if x1.Op != OpAMD64MOVWloadidx1 {
 30977  			break
 30978  		}
 30979  		i1 := x1.AuxInt
 30980  		if x1.Aux != s {
 30981  			break
 30982  		}
 30983  		_ = x1.Args[2]
 30984  		if idx != x1.Args[0] {
 30985  			break
 30986  		}
 30987  		if p != x1.Args[1] {
 30988  			break
 30989  		}
 30990  		if mem != x1.Args[2] {
 30991  			break
 30992  		}
 30993  		if !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh)) {
 30994  			break
 30995  		}
 30996  		b = mergePoint(b, x0, x1)
 30997  		v0 := b.NewValue0(v.Pos, OpAMD64BSWAPL, v.Type)
 30998  		v.reset(OpCopy)
 30999  		v.AddArg(v0)
 31000  		v1 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32)
 31001  		v1.AuxInt = i0
 31002  		v1.Aux = s
 31003  		v1.AddArg(p)
 31004  		v1.AddArg(idx)
 31005  		v1.AddArg(mem)
 31006  		v0.AddArg(v1)
 31007  		return true
 31008  	}
 31009  	// match: (ORQ r1:(BSWAPL x1:(MOVLloadidx1 [i1] {s} p idx mem)) sh:(SHLQconst [32] r0:(BSWAPL x0:(MOVLloadidx1 [i0] {s} p idx mem))))
 31010  	// cond: i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh)
 31011  	// result: @mergePoint(b,x0,x1) (BSWAPQ <v.Type> (MOVQloadidx1 [i0] {s} p idx mem))
 31012  	for {
 31013  		_ = v.Args[1]
 31014  		r1 := v.Args[0]
 31015  		if r1.Op != OpAMD64BSWAPL {
 31016  			break
 31017  		}
 31018  		x1 := r1.Args[0]
 31019  		if x1.Op != OpAMD64MOVLloadidx1 {
 31020  			break
 31021  		}
 31022  		i1 := x1.AuxInt
 31023  		s := x1.Aux
 31024  		_ = x1.Args[2]
 31025  		p := x1.Args[0]
 31026  		idx := x1.Args[1]
 31027  		mem := x1.Args[2]
 31028  		sh := v.Args[1]
 31029  		if sh.Op != OpAMD64SHLQconst {
 31030  			break
 31031  		}
 31032  		if sh.AuxInt != 32 {
 31033  			break
 31034  		}
 31035  		r0 := sh.Args[0]
 31036  		if r0.Op != OpAMD64BSWAPL {
 31037  			break
 31038  		}
 31039  		x0 := r0.Args[0]
 31040  		if x0.Op != OpAMD64MOVLloadidx1 {
 31041  			break
 31042  		}
 31043  		i0 := x0.AuxInt
 31044  		if x0.Aux != s {
 31045  			break
 31046  		}
 31047  		_ = x0.Args[2]
 31048  		if p != x0.Args[0] {
 31049  			break
 31050  		}
 31051  		if idx != x0.Args[1] {
 31052  			break
 31053  		}
 31054  		if mem != x0.Args[2] {
 31055  			break
 31056  		}
 31057  		if !(i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh)) {
 31058  			break
 31059  		}
 31060  		b = mergePoint(b, x0, x1)
 31061  		v0 := b.NewValue0(v.Pos, OpAMD64BSWAPQ, v.Type)
 31062  		v.reset(OpCopy)
 31063  		v.AddArg(v0)
 31064  		v1 := b.NewValue0(v.Pos, OpAMD64MOVQloadidx1, typ.UInt64)
 31065  		v1.AuxInt = i0
 31066  		v1.Aux = s
 31067  		v1.AddArg(p)
 31068  		v1.AddArg(idx)
 31069  		v1.AddArg(mem)
 31070  		v0.AddArg(v1)
 31071  		return true
 31072  	}
 31073  	// match: (ORQ r1:(BSWAPL x1:(MOVLloadidx1 [i1] {s} idx p mem)) sh:(SHLQconst [32] r0:(BSWAPL x0:(MOVLloadidx1 [i0] {s} p idx mem))))
 31074  	// cond: i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh)
 31075  	// result: @mergePoint(b,x0,x1) (BSWAPQ <v.Type> (MOVQloadidx1 [i0] {s} p idx mem))
 31076  	for {
 31077  		_ = v.Args[1]
 31078  		r1 := v.Args[0]
 31079  		if r1.Op != OpAMD64BSWAPL {
 31080  			break
 31081  		}
 31082  		x1 := r1.Args[0]
 31083  		if x1.Op != OpAMD64MOVLloadidx1 {
 31084  			break
 31085  		}
 31086  		i1 := x1.AuxInt
 31087  		s := x1.Aux
 31088  		_ = x1.Args[2]
 31089  		idx := x1.Args[0]
 31090  		p := x1.Args[1]
 31091  		mem := x1.Args[2]
 31092  		sh := v.Args[1]
 31093  		if sh.Op != OpAMD64SHLQconst {
 31094  			break
 31095  		}
 31096  		if sh.AuxInt != 32 {
 31097  			break
 31098  		}
 31099  		r0 := sh.Args[0]
 31100  		if r0.Op != OpAMD64BSWAPL {
 31101  			break
 31102  		}
 31103  		x0 := r0.Args[0]
 31104  		if x0.Op != OpAMD64MOVLloadidx1 {
 31105  			break
 31106  		}
 31107  		i0 := x0.AuxInt
 31108  		if x0.Aux != s {
 31109  			break
 31110  		}
 31111  		_ = x0.Args[2]
 31112  		if p != x0.Args[0] {
 31113  			break
 31114  		}
 31115  		if idx != x0.Args[1] {
 31116  			break
 31117  		}
 31118  		if mem != x0.Args[2] {
 31119  			break
 31120  		}
 31121  		if !(i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh)) {
 31122  			break
 31123  		}
 31124  		b = mergePoint(b, x0, x1)
 31125  		v0 := b.NewValue0(v.Pos, OpAMD64BSWAPQ, v.Type)
 31126  		v.reset(OpCopy)
 31127  		v.AddArg(v0)
 31128  		v1 := b.NewValue0(v.Pos, OpAMD64MOVQloadidx1, typ.UInt64)
 31129  		v1.AuxInt = i0
 31130  		v1.Aux = s
 31131  		v1.AddArg(p)
 31132  		v1.AddArg(idx)
 31133  		v1.AddArg(mem)
 31134  		v0.AddArg(v1)
 31135  		return true
 31136  	}
 31137  	// match: (ORQ r1:(BSWAPL x1:(MOVLloadidx1 [i1] {s} p idx mem)) sh:(SHLQconst [32] r0:(BSWAPL x0:(MOVLloadidx1 [i0] {s} idx p mem))))
 31138  	// cond: i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh)
 31139  	// result: @mergePoint(b,x0,x1) (BSWAPQ <v.Type> (MOVQloadidx1 [i0] {s} p idx mem))
 31140  	for {
 31141  		_ = v.Args[1]
 31142  		r1 := v.Args[0]
 31143  		if r1.Op != OpAMD64BSWAPL {
 31144  			break
 31145  		}
 31146  		x1 := r1.Args[0]
 31147  		if x1.Op != OpAMD64MOVLloadidx1 {
 31148  			break
 31149  		}
 31150  		i1 := x1.AuxInt
 31151  		s := x1.Aux
 31152  		_ = x1.Args[2]
 31153  		p := x1.Args[0]
 31154  		idx := x1.Args[1]
 31155  		mem := x1.Args[2]
 31156  		sh := v.Args[1]
 31157  		if sh.Op != OpAMD64SHLQconst {
 31158  			break
 31159  		}
 31160  		if sh.AuxInt != 32 {
 31161  			break
 31162  		}
 31163  		r0 := sh.Args[0]
 31164  		if r0.Op != OpAMD64BSWAPL {
 31165  			break
 31166  		}
 31167  		x0 := r0.Args[0]
 31168  		if x0.Op != OpAMD64MOVLloadidx1 {
 31169  			break
 31170  		}
 31171  		i0 := x0.AuxInt
 31172  		if x0.Aux != s {
 31173  			break
 31174  		}
 31175  		_ = x0.Args[2]
 31176  		if idx != x0.Args[0] {
 31177  			break
 31178  		}
 31179  		if p != x0.Args[1] {
 31180  			break
 31181  		}
 31182  		if mem != x0.Args[2] {
 31183  			break
 31184  		}
 31185  		if !(i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh)) {
 31186  			break
 31187  		}
 31188  		b = mergePoint(b, x0, x1)
 31189  		v0 := b.NewValue0(v.Pos, OpAMD64BSWAPQ, v.Type)
 31190  		v.reset(OpCopy)
 31191  		v.AddArg(v0)
 31192  		v1 := b.NewValue0(v.Pos, OpAMD64MOVQloadidx1, typ.UInt64)
 31193  		v1.AuxInt = i0
 31194  		v1.Aux = s
 31195  		v1.AddArg(p)
 31196  		v1.AddArg(idx)
 31197  		v1.AddArg(mem)
 31198  		v0.AddArg(v1)
 31199  		return true
 31200  	}
 31201  	// match: (ORQ r1:(BSWAPL x1:(MOVLloadidx1 [i1] {s} idx p mem)) sh:(SHLQconst [32] r0:(BSWAPL x0:(MOVLloadidx1 [i0] {s} idx p mem))))
 31202  	// cond: i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh)
 31203  	// result: @mergePoint(b,x0,x1) (BSWAPQ <v.Type> (MOVQloadidx1 [i0] {s} p idx mem))
 31204  	for {
 31205  		_ = v.Args[1]
 31206  		r1 := v.Args[0]
 31207  		if r1.Op != OpAMD64BSWAPL {
 31208  			break
 31209  		}
 31210  		x1 := r1.Args[0]
 31211  		if x1.Op != OpAMD64MOVLloadidx1 {
 31212  			break
 31213  		}
 31214  		i1 := x1.AuxInt
 31215  		s := x1.Aux
 31216  		_ = x1.Args[2]
 31217  		idx := x1.Args[0]
 31218  		p := x1.Args[1]
 31219  		mem := x1.Args[2]
 31220  		sh := v.Args[1]
 31221  		if sh.Op != OpAMD64SHLQconst {
 31222  			break
 31223  		}
 31224  		if sh.AuxInt != 32 {
 31225  			break
 31226  		}
 31227  		r0 := sh.Args[0]
 31228  		if r0.Op != OpAMD64BSWAPL {
 31229  			break
 31230  		}
 31231  		x0 := r0.Args[0]
 31232  		if x0.Op != OpAMD64MOVLloadidx1 {
 31233  			break
 31234  		}
 31235  		i0 := x0.AuxInt
 31236  		if x0.Aux != s {
 31237  			break
 31238  		}
 31239  		_ = x0.Args[2]
 31240  		if idx != x0.Args[0] {
 31241  			break
 31242  		}
 31243  		if p != x0.Args[1] {
 31244  			break
 31245  		}
 31246  		if mem != x0.Args[2] {
 31247  			break
 31248  		}
 31249  		if !(i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh)) {
 31250  			break
 31251  		}
 31252  		b = mergePoint(b, x0, x1)
 31253  		v0 := b.NewValue0(v.Pos, OpAMD64BSWAPQ, v.Type)
 31254  		v.reset(OpCopy)
 31255  		v.AddArg(v0)
 31256  		v1 := b.NewValue0(v.Pos, OpAMD64MOVQloadidx1, typ.UInt64)
 31257  		v1.AuxInt = i0
 31258  		v1.Aux = s
 31259  		v1.AddArg(p)
 31260  		v1.AddArg(idx)
 31261  		v1.AddArg(mem)
 31262  		v0.AddArg(v1)
 31263  		return true
 31264  	}
 31265  	// match: (ORQ sh:(SHLQconst [32] r0:(BSWAPL x0:(MOVLloadidx1 [i0] {s} p idx mem))) r1:(BSWAPL x1:(MOVLloadidx1 [i1] {s} p idx mem)))
 31266  	// cond: i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh)
 31267  	// result: @mergePoint(b,x0,x1) (BSWAPQ <v.Type> (MOVQloadidx1 [i0] {s} p idx mem))
 31268  	for {
 31269  		_ = v.Args[1]
 31270  		sh := v.Args[0]
 31271  		if sh.Op != OpAMD64SHLQconst {
 31272  			break
 31273  		}
 31274  		if sh.AuxInt != 32 {
 31275  			break
 31276  		}
 31277  		r0 := sh.Args[0]
 31278  		if r0.Op != OpAMD64BSWAPL {
 31279  			break
 31280  		}
 31281  		x0 := r0.Args[0]
 31282  		if x0.Op != OpAMD64MOVLloadidx1 {
 31283  			break
 31284  		}
 31285  		i0 := x0.AuxInt
 31286  		s := x0.Aux
 31287  		_ = x0.Args[2]
 31288  		p := x0.Args[0]
 31289  		idx := x0.Args[1]
 31290  		mem := x0.Args[2]
 31291  		r1 := v.Args[1]
 31292  		if r1.Op != OpAMD64BSWAPL {
 31293  			break
 31294  		}
 31295  		x1 := r1.Args[0]
 31296  		if x1.Op != OpAMD64MOVLloadidx1 {
 31297  			break
 31298  		}
 31299  		i1 := x1.AuxInt
 31300  		if x1.Aux != s {
 31301  			break
 31302  		}
 31303  		_ = x1.Args[2]
 31304  		if p != x1.Args[0] {
 31305  			break
 31306  		}
 31307  		if idx != x1.Args[1] {
 31308  			break
 31309  		}
 31310  		if mem != x1.Args[2] {
 31311  			break
 31312  		}
 31313  		if !(i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh)) {
 31314  			break
 31315  		}
 31316  		b = mergePoint(b, x0, x1)
 31317  		v0 := b.NewValue0(v.Pos, OpAMD64BSWAPQ, v.Type)
 31318  		v.reset(OpCopy)
 31319  		v.AddArg(v0)
 31320  		v1 := b.NewValue0(v.Pos, OpAMD64MOVQloadidx1, typ.UInt64)
 31321  		v1.AuxInt = i0
 31322  		v1.Aux = s
 31323  		v1.AddArg(p)
 31324  		v1.AddArg(idx)
 31325  		v1.AddArg(mem)
 31326  		v0.AddArg(v1)
 31327  		return true
 31328  	}
 31329  	// match: (ORQ sh:(SHLQconst [32] r0:(BSWAPL x0:(MOVLloadidx1 [i0] {s} idx p mem))) r1:(BSWAPL x1:(MOVLloadidx1 [i1] {s} p idx mem)))
 31330  	// cond: i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh)
 31331  	// result: @mergePoint(b,x0,x1) (BSWAPQ <v.Type> (MOVQloadidx1 [i0] {s} p idx mem))
 31332  	for {
 31333  		_ = v.Args[1]
 31334  		sh := v.Args[0]
 31335  		if sh.Op != OpAMD64SHLQconst {
 31336  			break
 31337  		}
 31338  		if sh.AuxInt != 32 {
 31339  			break
 31340  		}
 31341  		r0 := sh.Args[0]
 31342  		if r0.Op != OpAMD64BSWAPL {
 31343  			break
 31344  		}
 31345  		x0 := r0.Args[0]
 31346  		if x0.Op != OpAMD64MOVLloadidx1 {
 31347  			break
 31348  		}
 31349  		i0 := x0.AuxInt
 31350  		s := x0.Aux
 31351  		_ = x0.Args[2]
 31352  		idx := x0.Args[0]
 31353  		p := x0.Args[1]
 31354  		mem := x0.Args[2]
 31355  		r1 := v.Args[1]
 31356  		if r1.Op != OpAMD64BSWAPL {
 31357  			break
 31358  		}
 31359  		x1 := r1.Args[0]
 31360  		if x1.Op != OpAMD64MOVLloadidx1 {
 31361  			break
 31362  		}
 31363  		i1 := x1.AuxInt
 31364  		if x1.Aux != s {
 31365  			break
 31366  		}
 31367  		_ = x1.Args[2]
 31368  		if p != x1.Args[0] {
 31369  			break
 31370  		}
 31371  		if idx != x1.Args[1] {
 31372  			break
 31373  		}
 31374  		if mem != x1.Args[2] {
 31375  			break
 31376  		}
 31377  		if !(i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh)) {
 31378  			break
 31379  		}
 31380  		b = mergePoint(b, x0, x1)
 31381  		v0 := b.NewValue0(v.Pos, OpAMD64BSWAPQ, v.Type)
 31382  		v.reset(OpCopy)
 31383  		v.AddArg(v0)
 31384  		v1 := b.NewValue0(v.Pos, OpAMD64MOVQloadidx1, typ.UInt64)
 31385  		v1.AuxInt = i0
 31386  		v1.Aux = s
 31387  		v1.AddArg(p)
 31388  		v1.AddArg(idx)
 31389  		v1.AddArg(mem)
 31390  		v0.AddArg(v1)
 31391  		return true
 31392  	}
 31393  	// match: (ORQ sh:(SHLQconst [32] r0:(BSWAPL x0:(MOVLloadidx1 [i0] {s} p idx mem))) r1:(BSWAPL x1:(MOVLloadidx1 [i1] {s} idx p mem)))
 31394  	// cond: i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh)
 31395  	// result: @mergePoint(b,x0,x1) (BSWAPQ <v.Type> (MOVQloadidx1 [i0] {s} p idx mem))
 31396  	for {
 31397  		_ = v.Args[1]
 31398  		sh := v.Args[0]
 31399  		if sh.Op != OpAMD64SHLQconst {
 31400  			break
 31401  		}
 31402  		if sh.AuxInt != 32 {
 31403  			break
 31404  		}
 31405  		r0 := sh.Args[0]
 31406  		if r0.Op != OpAMD64BSWAPL {
 31407  			break
 31408  		}
 31409  		x0 := r0.Args[0]
 31410  		if x0.Op != OpAMD64MOVLloadidx1 {
 31411  			break
 31412  		}
 31413  		i0 := x0.AuxInt
 31414  		s := x0.Aux
 31415  		_ = x0.Args[2]
 31416  		p := x0.Args[0]
 31417  		idx := x0.Args[1]
 31418  		mem := x0.Args[2]
 31419  		r1 := v.Args[1]
 31420  		if r1.Op != OpAMD64BSWAPL {
 31421  			break
 31422  		}
 31423  		x1 := r1.Args[0]
 31424  		if x1.Op != OpAMD64MOVLloadidx1 {
 31425  			break
 31426  		}
 31427  		i1 := x1.AuxInt
 31428  		if x1.Aux != s {
 31429  			break
 31430  		}
 31431  		_ = x1.Args[2]
 31432  		if idx != x1.Args[0] {
 31433  			break
 31434  		}
 31435  		if p != x1.Args[1] {
 31436  			break
 31437  		}
 31438  		if mem != x1.Args[2] {
 31439  			break
 31440  		}
 31441  		if !(i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh)) {
 31442  			break
 31443  		}
 31444  		b = mergePoint(b, x0, x1)
 31445  		v0 := b.NewValue0(v.Pos, OpAMD64BSWAPQ, v.Type)
 31446  		v.reset(OpCopy)
 31447  		v.AddArg(v0)
 31448  		v1 := b.NewValue0(v.Pos, OpAMD64MOVQloadidx1, typ.UInt64)
 31449  		v1.AuxInt = i0
 31450  		v1.Aux = s
 31451  		v1.AddArg(p)
 31452  		v1.AddArg(idx)
 31453  		v1.AddArg(mem)
 31454  		v0.AddArg(v1)
 31455  		return true
 31456  	}
 31457  	// match: (ORQ sh:(SHLQconst [32] r0:(BSWAPL x0:(MOVLloadidx1 [i0] {s} idx p mem))) r1:(BSWAPL x1:(MOVLloadidx1 [i1] {s} idx p mem)))
 31458  	// cond: i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh)
 31459  	// result: @mergePoint(b,x0,x1) (BSWAPQ <v.Type> (MOVQloadidx1 [i0] {s} p idx mem))
 31460  	for {
 31461  		_ = v.Args[1]
 31462  		sh := v.Args[0]
 31463  		if sh.Op != OpAMD64SHLQconst {
 31464  			break
 31465  		}
 31466  		if sh.AuxInt != 32 {
 31467  			break
 31468  		}
 31469  		r0 := sh.Args[0]
 31470  		if r0.Op != OpAMD64BSWAPL {
 31471  			break
 31472  		}
 31473  		x0 := r0.Args[0]
 31474  		if x0.Op != OpAMD64MOVLloadidx1 {
 31475  			break
 31476  		}
 31477  		i0 := x0.AuxInt
 31478  		s := x0.Aux
 31479  		_ = x0.Args[2]
 31480  		idx := x0.Args[0]
 31481  		p := x0.Args[1]
 31482  		mem := x0.Args[2]
 31483  		r1 := v.Args[1]
 31484  		if r1.Op != OpAMD64BSWAPL {
 31485  			break
 31486  		}
 31487  		x1 := r1.Args[0]
 31488  		if x1.Op != OpAMD64MOVLloadidx1 {
 31489  			break
 31490  		}
 31491  		i1 := x1.AuxInt
 31492  		if x1.Aux != s {
 31493  			break
 31494  		}
 31495  		_ = x1.Args[2]
 31496  		if idx != x1.Args[0] {
 31497  			break
 31498  		}
 31499  		if p != x1.Args[1] {
 31500  			break
 31501  		}
 31502  		if mem != x1.Args[2] {
 31503  			break
 31504  		}
 31505  		if !(i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh)) {
 31506  			break
 31507  		}
 31508  		b = mergePoint(b, x0, x1)
 31509  		v0 := b.NewValue0(v.Pos, OpAMD64BSWAPQ, v.Type)
 31510  		v.reset(OpCopy)
 31511  		v.AddArg(v0)
 31512  		v1 := b.NewValue0(v.Pos, OpAMD64MOVQloadidx1, typ.UInt64)
 31513  		v1.AuxInt = i0
 31514  		v1.Aux = s
 31515  		v1.AddArg(p)
 31516  		v1.AddArg(idx)
 31517  		v1.AddArg(mem)
 31518  		v0.AddArg(v1)
 31519  		return true
 31520  	}
 31521  	// match: (ORQ s0:(SHLQconst [j0] x0:(MOVBloadidx1 [i0] {s} p idx mem)) or:(ORQ s1:(SHLQconst [j1] x1:(MOVBloadidx1 [i1] {s} p idx mem)) y))
 31522  	// cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)
 31523  	// result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (ROLWconst <typ.UInt16> [8] (MOVWloadidx1 [i0] {s} p idx mem))) y)
 31524  	for {
 31525  		_ = v.Args[1]
 31526  		s0 := v.Args[0]
 31527  		if s0.Op != OpAMD64SHLQconst {
 31528  			break
 31529  		}
 31530  		j0 := s0.AuxInt
 31531  		x0 := s0.Args[0]
 31532  		if x0.Op != OpAMD64MOVBloadidx1 {
 31533  			break
 31534  		}
 31535  		i0 := x0.AuxInt
 31536  		s := x0.Aux
 31537  		_ = x0.Args[2]
 31538  		p := x0.Args[0]
 31539  		idx := x0.Args[1]
 31540  		mem := x0.Args[2]
 31541  		or := v.Args[1]
 31542  		if or.Op != OpAMD64ORQ {
 31543  			break
 31544  		}
 31545  		_ = or.Args[1]
 31546  		s1 := or.Args[0]
 31547  		if s1.Op != OpAMD64SHLQconst {
 31548  			break
 31549  		}
 31550  		j1 := s1.AuxInt
 31551  		x1 := s1.Args[0]
 31552  		if x1.Op != OpAMD64MOVBloadidx1 {
 31553  			break
 31554  		}
 31555  		i1 := x1.AuxInt
 31556  		if x1.Aux != s {
 31557  			break
 31558  		}
 31559  		_ = x1.Args[2]
 31560  		if p != x1.Args[0] {
 31561  			break
 31562  		}
 31563  		if idx != x1.Args[1] {
 31564  			break
 31565  		}
 31566  		if mem != x1.Args[2] {
 31567  			break
 31568  		}
 31569  		y := or.Args[1]
 31570  		if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) {
 31571  			break
 31572  		}
 31573  		b = mergePoint(b, x0, x1)
 31574  		v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type)
 31575  		v.reset(OpCopy)
 31576  		v.AddArg(v0)
 31577  		v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type)
 31578  		v1.AuxInt = j1
 31579  		v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, typ.UInt16)
 31580  		v2.AuxInt = 8
 31581  		v3 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16)
 31582  		v3.AuxInt = i0
 31583  		v3.Aux = s
 31584  		v3.AddArg(p)
 31585  		v3.AddArg(idx)
 31586  		v3.AddArg(mem)
 31587  		v2.AddArg(v3)
 31588  		v1.AddArg(v2)
 31589  		v0.AddArg(v1)
 31590  		v0.AddArg(y)
 31591  		return true
 31592  	}
 31593  	return false
 31594  }
 31595  func rewriteValueAMD64_OpAMD64ORQ_130(v *Value) bool {
 31596  	b := v.Block
 31597  	_ = b
 31598  	typ := &b.Func.Config.Types
 31599  	_ = typ
 31600  	// match: (ORQ s0:(SHLQconst [j0] x0:(MOVBloadidx1 [i0] {s} idx p mem)) or:(ORQ s1:(SHLQconst [j1] x1:(MOVBloadidx1 [i1] {s} p idx mem)) y))
 31601  	// cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)
 31602  	// result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (ROLWconst <typ.UInt16> [8] (MOVWloadidx1 [i0] {s} p idx mem))) y)
 31603  	for {
 31604  		_ = v.Args[1]
 31605  		s0 := v.Args[0]
 31606  		if s0.Op != OpAMD64SHLQconst {
 31607  			break
 31608  		}
 31609  		j0 := s0.AuxInt
 31610  		x0 := s0.Args[0]
 31611  		if x0.Op != OpAMD64MOVBloadidx1 {
 31612  			break
 31613  		}
 31614  		i0 := x0.AuxInt
 31615  		s := x0.Aux
 31616  		_ = x0.Args[2]
 31617  		idx := x0.Args[0]
 31618  		p := x0.Args[1]
 31619  		mem := x0.Args[2]
 31620  		or := v.Args[1]
 31621  		if or.Op != OpAMD64ORQ {
 31622  			break
 31623  		}
 31624  		_ = or.Args[1]
 31625  		s1 := or.Args[0]
 31626  		if s1.Op != OpAMD64SHLQconst {
 31627  			break
 31628  		}
 31629  		j1 := s1.AuxInt
 31630  		x1 := s1.Args[0]
 31631  		if x1.Op != OpAMD64MOVBloadidx1 {
 31632  			break
 31633  		}
 31634  		i1 := x1.AuxInt
 31635  		if x1.Aux != s {
 31636  			break
 31637  		}
 31638  		_ = x1.Args[2]
 31639  		if p != x1.Args[0] {
 31640  			break
 31641  		}
 31642  		if idx != x1.Args[1] {
 31643  			break
 31644  		}
 31645  		if mem != x1.Args[2] {
 31646  			break
 31647  		}
 31648  		y := or.Args[1]
 31649  		if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) {
 31650  			break
 31651  		}
 31652  		b = mergePoint(b, x0, x1)
 31653  		v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type)
 31654  		v.reset(OpCopy)
 31655  		v.AddArg(v0)
 31656  		v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type)
 31657  		v1.AuxInt = j1
 31658  		v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, typ.UInt16)
 31659  		v2.AuxInt = 8
 31660  		v3 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16)
 31661  		v3.AuxInt = i0
 31662  		v3.Aux = s
 31663  		v3.AddArg(p)
 31664  		v3.AddArg(idx)
 31665  		v3.AddArg(mem)
 31666  		v2.AddArg(v3)
 31667  		v1.AddArg(v2)
 31668  		v0.AddArg(v1)
 31669  		v0.AddArg(y)
 31670  		return true
 31671  	}
 31672  	// match: (ORQ s0:(SHLQconst [j0] x0:(MOVBloadidx1 [i0] {s} p idx mem)) or:(ORQ s1:(SHLQconst [j1] x1:(MOVBloadidx1 [i1] {s} idx p mem)) y))
 31673  	// cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)
 31674  	// result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (ROLWconst <typ.UInt16> [8] (MOVWloadidx1 [i0] {s} p idx mem))) y)
 31675  	for {
 31676  		_ = v.Args[1]
 31677  		s0 := v.Args[0]
 31678  		if s0.Op != OpAMD64SHLQconst {
 31679  			break
 31680  		}
 31681  		j0 := s0.AuxInt
 31682  		x0 := s0.Args[0]
 31683  		if x0.Op != OpAMD64MOVBloadidx1 {
 31684  			break
 31685  		}
 31686  		i0 := x0.AuxInt
 31687  		s := x0.Aux
 31688  		_ = x0.Args[2]
 31689  		p := x0.Args[0]
 31690  		idx := x0.Args[1]
 31691  		mem := x0.Args[2]
 31692  		or := v.Args[1]
 31693  		if or.Op != OpAMD64ORQ {
 31694  			break
 31695  		}
 31696  		_ = or.Args[1]
 31697  		s1 := or.Args[0]
 31698  		if s1.Op != OpAMD64SHLQconst {
 31699  			break
 31700  		}
 31701  		j1 := s1.AuxInt
 31702  		x1 := s1.Args[0]
 31703  		if x1.Op != OpAMD64MOVBloadidx1 {
 31704  			break
 31705  		}
 31706  		i1 := x1.AuxInt
 31707  		if x1.Aux != s {
 31708  			break
 31709  		}
 31710  		_ = x1.Args[2]
 31711  		if idx != x1.Args[0] {
 31712  			break
 31713  		}
 31714  		if p != x1.Args[1] {
 31715  			break
 31716  		}
 31717  		if mem != x1.Args[2] {
 31718  			break
 31719  		}
 31720  		y := or.Args[1]
 31721  		if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) {
 31722  			break
 31723  		}
 31724  		b = mergePoint(b, x0, x1)
 31725  		v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type)
 31726  		v.reset(OpCopy)
 31727  		v.AddArg(v0)
 31728  		v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type)
 31729  		v1.AuxInt = j1
 31730  		v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, typ.UInt16)
 31731  		v2.AuxInt = 8
 31732  		v3 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16)
 31733  		v3.AuxInt = i0
 31734  		v3.Aux = s
 31735  		v3.AddArg(p)
 31736  		v3.AddArg(idx)
 31737  		v3.AddArg(mem)
 31738  		v2.AddArg(v3)
 31739  		v1.AddArg(v2)
 31740  		v0.AddArg(v1)
 31741  		v0.AddArg(y)
 31742  		return true
 31743  	}
 31744  	// match: (ORQ s0:(SHLQconst [j0] x0:(MOVBloadidx1 [i0] {s} idx p mem)) or:(ORQ s1:(SHLQconst [j1] x1:(MOVBloadidx1 [i1] {s} idx p mem)) y))
 31745  	// cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)
 31746  	// result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (ROLWconst <typ.UInt16> [8] (MOVWloadidx1 [i0] {s} p idx mem))) y)
 31747  	for {
 31748  		_ = v.Args[1]
 31749  		s0 := v.Args[0]
 31750  		if s0.Op != OpAMD64SHLQconst {
 31751  			break
 31752  		}
 31753  		j0 := s0.AuxInt
 31754  		x0 := s0.Args[0]
 31755  		if x0.Op != OpAMD64MOVBloadidx1 {
 31756  			break
 31757  		}
 31758  		i0 := x0.AuxInt
 31759  		s := x0.Aux
 31760  		_ = x0.Args[2]
 31761  		idx := x0.Args[0]
 31762  		p := x0.Args[1]
 31763  		mem := x0.Args[2]
 31764  		or := v.Args[1]
 31765  		if or.Op != OpAMD64ORQ {
 31766  			break
 31767  		}
 31768  		_ = or.Args[1]
 31769  		s1 := or.Args[0]
 31770  		if s1.Op != OpAMD64SHLQconst {
 31771  			break
 31772  		}
 31773  		j1 := s1.AuxInt
 31774  		x1 := s1.Args[0]
 31775  		if x1.Op != OpAMD64MOVBloadidx1 {
 31776  			break
 31777  		}
 31778  		i1 := x1.AuxInt
 31779  		if x1.Aux != s {
 31780  			break
 31781  		}
 31782  		_ = x1.Args[2]
 31783  		if idx != x1.Args[0] {
 31784  			break
 31785  		}
 31786  		if p != x1.Args[1] {
 31787  			break
 31788  		}
 31789  		if mem != x1.Args[2] {
 31790  			break
 31791  		}
 31792  		y := or.Args[1]
 31793  		if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) {
 31794  			break
 31795  		}
 31796  		b = mergePoint(b, x0, x1)
 31797  		v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type)
 31798  		v.reset(OpCopy)
 31799  		v.AddArg(v0)
 31800  		v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type)
 31801  		v1.AuxInt = j1
 31802  		v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, typ.UInt16)
 31803  		v2.AuxInt = 8
 31804  		v3 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16)
 31805  		v3.AuxInt = i0
 31806  		v3.Aux = s
 31807  		v3.AddArg(p)
 31808  		v3.AddArg(idx)
 31809  		v3.AddArg(mem)
 31810  		v2.AddArg(v3)
 31811  		v1.AddArg(v2)
 31812  		v0.AddArg(v1)
 31813  		v0.AddArg(y)
 31814  		return true
 31815  	}
 31816  	// match: (ORQ s0:(SHLQconst [j0] x0:(MOVBloadidx1 [i0] {s} p idx mem)) or:(ORQ y s1:(SHLQconst [j1] x1:(MOVBloadidx1 [i1] {s} p idx mem))))
 31817  	// cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)
 31818  	// result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (ROLWconst <typ.UInt16> [8] (MOVWloadidx1 [i0] {s} p idx mem))) y)
 31819  	for {
 31820  		_ = v.Args[1]
 31821  		s0 := v.Args[0]
 31822  		if s0.Op != OpAMD64SHLQconst {
 31823  			break
 31824  		}
 31825  		j0 := s0.AuxInt
 31826  		x0 := s0.Args[0]
 31827  		if x0.Op != OpAMD64MOVBloadidx1 {
 31828  			break
 31829  		}
 31830  		i0 := x0.AuxInt
 31831  		s := x0.Aux
 31832  		_ = x0.Args[2]
 31833  		p := x0.Args[0]
 31834  		idx := x0.Args[1]
 31835  		mem := x0.Args[2]
 31836  		or := v.Args[1]
 31837  		if or.Op != OpAMD64ORQ {
 31838  			break
 31839  		}
 31840  		_ = or.Args[1]
 31841  		y := or.Args[0]
 31842  		s1 := or.Args[1]
 31843  		if s1.Op != OpAMD64SHLQconst {
 31844  			break
 31845  		}
 31846  		j1 := s1.AuxInt
 31847  		x1 := s1.Args[0]
 31848  		if x1.Op != OpAMD64MOVBloadidx1 {
 31849  			break
 31850  		}
 31851  		i1 := x1.AuxInt
 31852  		if x1.Aux != s {
 31853  			break
 31854  		}
 31855  		_ = x1.Args[2]
 31856  		if p != x1.Args[0] {
 31857  			break
 31858  		}
 31859  		if idx != x1.Args[1] {
 31860  			break
 31861  		}
 31862  		if mem != x1.Args[2] {
 31863  			break
 31864  		}
 31865  		if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) {
 31866  			break
 31867  		}
 31868  		b = mergePoint(b, x0, x1)
 31869  		v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type)
 31870  		v.reset(OpCopy)
 31871  		v.AddArg(v0)
 31872  		v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type)
 31873  		v1.AuxInt = j1
 31874  		v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, typ.UInt16)
 31875  		v2.AuxInt = 8
 31876  		v3 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16)
 31877  		v3.AuxInt = i0
 31878  		v3.Aux = s
 31879  		v3.AddArg(p)
 31880  		v3.AddArg(idx)
 31881  		v3.AddArg(mem)
 31882  		v2.AddArg(v3)
 31883  		v1.AddArg(v2)
 31884  		v0.AddArg(v1)
 31885  		v0.AddArg(y)
 31886  		return true
 31887  	}
 31888  	// match: (ORQ s0:(SHLQconst [j0] x0:(MOVBloadidx1 [i0] {s} idx p mem)) or:(ORQ y s1:(SHLQconst [j1] x1:(MOVBloadidx1 [i1] {s} p idx mem))))
 31889  	// cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)
 31890  	// result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (ROLWconst <typ.UInt16> [8] (MOVWloadidx1 [i0] {s} p idx mem))) y)
 31891  	for {
 31892  		_ = v.Args[1]
 31893  		s0 := v.Args[0]
 31894  		if s0.Op != OpAMD64SHLQconst {
 31895  			break
 31896  		}
 31897  		j0 := s0.AuxInt
 31898  		x0 := s0.Args[0]
 31899  		if x0.Op != OpAMD64MOVBloadidx1 {
 31900  			break
 31901  		}
 31902  		i0 := x0.AuxInt
 31903  		s := x0.Aux
 31904  		_ = x0.Args[2]
 31905  		idx := x0.Args[0]
 31906  		p := x0.Args[1]
 31907  		mem := x0.Args[2]
 31908  		or := v.Args[1]
 31909  		if or.Op != OpAMD64ORQ {
 31910  			break
 31911  		}
 31912  		_ = or.Args[1]
 31913  		y := or.Args[0]
 31914  		s1 := or.Args[1]
 31915  		if s1.Op != OpAMD64SHLQconst {
 31916  			break
 31917  		}
 31918  		j1 := s1.AuxInt
 31919  		x1 := s1.Args[0]
 31920  		if x1.Op != OpAMD64MOVBloadidx1 {
 31921  			break
 31922  		}
 31923  		i1 := x1.AuxInt
 31924  		if x1.Aux != s {
 31925  			break
 31926  		}
 31927  		_ = x1.Args[2]
 31928  		if p != x1.Args[0] {
 31929  			break
 31930  		}
 31931  		if idx != x1.Args[1] {
 31932  			break
 31933  		}
 31934  		if mem != x1.Args[2] {
 31935  			break
 31936  		}
 31937  		if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) {
 31938  			break
 31939  		}
 31940  		b = mergePoint(b, x0, x1)
 31941  		v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type)
 31942  		v.reset(OpCopy)
 31943  		v.AddArg(v0)
 31944  		v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type)
 31945  		v1.AuxInt = j1
 31946  		v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, typ.UInt16)
 31947  		v2.AuxInt = 8
 31948  		v3 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16)
 31949  		v3.AuxInt = i0
 31950  		v3.Aux = s
 31951  		v3.AddArg(p)
 31952  		v3.AddArg(idx)
 31953  		v3.AddArg(mem)
 31954  		v2.AddArg(v3)
 31955  		v1.AddArg(v2)
 31956  		v0.AddArg(v1)
 31957  		v0.AddArg(y)
 31958  		return true
 31959  	}
 31960  	// match: (ORQ s0:(SHLQconst [j0] x0:(MOVBloadidx1 [i0] {s} p idx mem)) or:(ORQ y s1:(SHLQconst [j1] x1:(MOVBloadidx1 [i1] {s} idx p mem))))
 31961  	// cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)
 31962  	// result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (ROLWconst <typ.UInt16> [8] (MOVWloadidx1 [i0] {s} p idx mem))) y)
 31963  	for {
 31964  		_ = v.Args[1]
 31965  		s0 := v.Args[0]
 31966  		if s0.Op != OpAMD64SHLQconst {
 31967  			break
 31968  		}
 31969  		j0 := s0.AuxInt
 31970  		x0 := s0.Args[0]
 31971  		if x0.Op != OpAMD64MOVBloadidx1 {
 31972  			break
 31973  		}
 31974  		i0 := x0.AuxInt
 31975  		s := x0.Aux
 31976  		_ = x0.Args[2]
 31977  		p := x0.Args[0]
 31978  		idx := x0.Args[1]
 31979  		mem := x0.Args[2]
 31980  		or := v.Args[1]
 31981  		if or.Op != OpAMD64ORQ {
 31982  			break
 31983  		}
 31984  		_ = or.Args[1]
 31985  		y := or.Args[0]
 31986  		s1 := or.Args[1]
 31987  		if s1.Op != OpAMD64SHLQconst {
 31988  			break
 31989  		}
 31990  		j1 := s1.AuxInt
 31991  		x1 := s1.Args[0]
 31992  		if x1.Op != OpAMD64MOVBloadidx1 {
 31993  			break
 31994  		}
 31995  		i1 := x1.AuxInt
 31996  		if x1.Aux != s {
 31997  			break
 31998  		}
 31999  		_ = x1.Args[2]
 32000  		if idx != x1.Args[0] {
 32001  			break
 32002  		}
 32003  		if p != x1.Args[1] {
 32004  			break
 32005  		}
 32006  		if mem != x1.Args[2] {
 32007  			break
 32008  		}
 32009  		if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) {
 32010  			break
 32011  		}
 32012  		b = mergePoint(b, x0, x1)
 32013  		v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type)
 32014  		v.reset(OpCopy)
 32015  		v.AddArg(v0)
 32016  		v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type)
 32017  		v1.AuxInt = j1
 32018  		v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, typ.UInt16)
 32019  		v2.AuxInt = 8
 32020  		v3 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16)
 32021  		v3.AuxInt = i0
 32022  		v3.Aux = s
 32023  		v3.AddArg(p)
 32024  		v3.AddArg(idx)
 32025  		v3.AddArg(mem)
 32026  		v2.AddArg(v3)
 32027  		v1.AddArg(v2)
 32028  		v0.AddArg(v1)
 32029  		v0.AddArg(y)
 32030  		return true
 32031  	}
 32032  	// match: (ORQ s0:(SHLQconst [j0] x0:(MOVBloadidx1 [i0] {s} idx p mem)) or:(ORQ y s1:(SHLQconst [j1] x1:(MOVBloadidx1 [i1] {s} idx p mem))))
 32033  	// cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)
 32034  	// result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (ROLWconst <typ.UInt16> [8] (MOVWloadidx1 [i0] {s} p idx mem))) y)
 32035  	for {
 32036  		_ = v.Args[1]
 32037  		s0 := v.Args[0]
 32038  		if s0.Op != OpAMD64SHLQconst {
 32039  			break
 32040  		}
 32041  		j0 := s0.AuxInt
 32042  		x0 := s0.Args[0]
 32043  		if x0.Op != OpAMD64MOVBloadidx1 {
 32044  			break
 32045  		}
 32046  		i0 := x0.AuxInt
 32047  		s := x0.Aux
 32048  		_ = x0.Args[2]
 32049  		idx := x0.Args[0]
 32050  		p := x0.Args[1]
 32051  		mem := x0.Args[2]
 32052  		or := v.Args[1]
 32053  		if or.Op != OpAMD64ORQ {
 32054  			break
 32055  		}
 32056  		_ = or.Args[1]
 32057  		y := or.Args[0]
 32058  		s1 := or.Args[1]
 32059  		if s1.Op != OpAMD64SHLQconst {
 32060  			break
 32061  		}
 32062  		j1 := s1.AuxInt
 32063  		x1 := s1.Args[0]
 32064  		if x1.Op != OpAMD64MOVBloadidx1 {
 32065  			break
 32066  		}
 32067  		i1 := x1.AuxInt
 32068  		if x1.Aux != s {
 32069  			break
 32070  		}
 32071  		_ = x1.Args[2]
 32072  		if idx != x1.Args[0] {
 32073  			break
 32074  		}
 32075  		if p != x1.Args[1] {
 32076  			break
 32077  		}
 32078  		if mem != x1.Args[2] {
 32079  			break
 32080  		}
 32081  		if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) {
 32082  			break
 32083  		}
 32084  		b = mergePoint(b, x0, x1)
 32085  		v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type)
 32086  		v.reset(OpCopy)
 32087  		v.AddArg(v0)
 32088  		v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type)
 32089  		v1.AuxInt = j1
 32090  		v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, typ.UInt16)
 32091  		v2.AuxInt = 8
 32092  		v3 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16)
 32093  		v3.AuxInt = i0
 32094  		v3.Aux = s
 32095  		v3.AddArg(p)
 32096  		v3.AddArg(idx)
 32097  		v3.AddArg(mem)
 32098  		v2.AddArg(v3)
 32099  		v1.AddArg(v2)
 32100  		v0.AddArg(v1)
 32101  		v0.AddArg(y)
 32102  		return true
 32103  	}
 32104  	// match: (ORQ or:(ORQ s1:(SHLQconst [j1] x1:(MOVBloadidx1 [i1] {s} p idx mem)) y) s0:(SHLQconst [j0] x0:(MOVBloadidx1 [i0] {s} p idx mem)))
 32105  	// cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)
 32106  	// result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (ROLWconst <typ.UInt16> [8] (MOVWloadidx1 [i0] {s} p idx mem))) y)
 32107  	for {
 32108  		_ = v.Args[1]
 32109  		or := v.Args[0]
 32110  		if or.Op != OpAMD64ORQ {
 32111  			break
 32112  		}
 32113  		_ = or.Args[1]
 32114  		s1 := or.Args[0]
 32115  		if s1.Op != OpAMD64SHLQconst {
 32116  			break
 32117  		}
 32118  		j1 := s1.AuxInt
 32119  		x1 := s1.Args[0]
 32120  		if x1.Op != OpAMD64MOVBloadidx1 {
 32121  			break
 32122  		}
 32123  		i1 := x1.AuxInt
 32124  		s := x1.Aux
 32125  		_ = x1.Args[2]
 32126  		p := x1.Args[0]
 32127  		idx := x1.Args[1]
 32128  		mem := x1.Args[2]
 32129  		y := or.Args[1]
 32130  		s0 := v.Args[1]
 32131  		if s0.Op != OpAMD64SHLQconst {
 32132  			break
 32133  		}
 32134  		j0 := s0.AuxInt
 32135  		x0 := s0.Args[0]
 32136  		if x0.Op != OpAMD64MOVBloadidx1 {
 32137  			break
 32138  		}
 32139  		i0 := x0.AuxInt
 32140  		if x0.Aux != s {
 32141  			break
 32142  		}
 32143  		_ = x0.Args[2]
 32144  		if p != x0.Args[0] {
 32145  			break
 32146  		}
 32147  		if idx != x0.Args[1] {
 32148  			break
 32149  		}
 32150  		if mem != x0.Args[2] {
 32151  			break
 32152  		}
 32153  		if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) {
 32154  			break
 32155  		}
 32156  		b = mergePoint(b, x0, x1)
 32157  		v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type)
 32158  		v.reset(OpCopy)
 32159  		v.AddArg(v0)
 32160  		v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type)
 32161  		v1.AuxInt = j1
 32162  		v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, typ.UInt16)
 32163  		v2.AuxInt = 8
 32164  		v3 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16)
 32165  		v3.AuxInt = i0
 32166  		v3.Aux = s
 32167  		v3.AddArg(p)
 32168  		v3.AddArg(idx)
 32169  		v3.AddArg(mem)
 32170  		v2.AddArg(v3)
 32171  		v1.AddArg(v2)
 32172  		v0.AddArg(v1)
 32173  		v0.AddArg(y)
 32174  		return true
 32175  	}
 32176  	// match: (ORQ or:(ORQ s1:(SHLQconst [j1] x1:(MOVBloadidx1 [i1] {s} idx p mem)) y) s0:(SHLQconst [j0] x0:(MOVBloadidx1 [i0] {s} p idx mem)))
 32177  	// cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)
 32178  	// result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (ROLWconst <typ.UInt16> [8] (MOVWloadidx1 [i0] {s} p idx mem))) y)
 32179  	for {
 32180  		_ = v.Args[1]
 32181  		or := v.Args[0]
 32182  		if or.Op != OpAMD64ORQ {
 32183  			break
 32184  		}
 32185  		_ = or.Args[1]
 32186  		s1 := or.Args[0]
 32187  		if s1.Op != OpAMD64SHLQconst {
 32188  			break
 32189  		}
 32190  		j1 := s1.AuxInt
 32191  		x1 := s1.Args[0]
 32192  		if x1.Op != OpAMD64MOVBloadidx1 {
 32193  			break
 32194  		}
 32195  		i1 := x1.AuxInt
 32196  		s := x1.Aux
 32197  		_ = x1.Args[2]
 32198  		idx := x1.Args[0]
 32199  		p := x1.Args[1]
 32200  		mem := x1.Args[2]
 32201  		y := or.Args[1]
 32202  		s0 := v.Args[1]
 32203  		if s0.Op != OpAMD64SHLQconst {
 32204  			break
 32205  		}
 32206  		j0 := s0.AuxInt
 32207  		x0 := s0.Args[0]
 32208  		if x0.Op != OpAMD64MOVBloadidx1 {
 32209  			break
 32210  		}
 32211  		i0 := x0.AuxInt
 32212  		if x0.Aux != s {
 32213  			break
 32214  		}
 32215  		_ = x0.Args[2]
 32216  		if p != x0.Args[0] {
 32217  			break
 32218  		}
 32219  		if idx != x0.Args[1] {
 32220  			break
 32221  		}
 32222  		if mem != x0.Args[2] {
 32223  			break
 32224  		}
 32225  		if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) {
 32226  			break
 32227  		}
 32228  		b = mergePoint(b, x0, x1)
 32229  		v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type)
 32230  		v.reset(OpCopy)
 32231  		v.AddArg(v0)
 32232  		v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type)
 32233  		v1.AuxInt = j1
 32234  		v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, typ.UInt16)
 32235  		v2.AuxInt = 8
 32236  		v3 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16)
 32237  		v3.AuxInt = i0
 32238  		v3.Aux = s
 32239  		v3.AddArg(p)
 32240  		v3.AddArg(idx)
 32241  		v3.AddArg(mem)
 32242  		v2.AddArg(v3)
 32243  		v1.AddArg(v2)
 32244  		v0.AddArg(v1)
 32245  		v0.AddArg(y)
 32246  		return true
 32247  	}
 32248  	// match: (ORQ or:(ORQ y s1:(SHLQconst [j1] x1:(MOVBloadidx1 [i1] {s} p idx mem))) s0:(SHLQconst [j0] x0:(MOVBloadidx1 [i0] {s} p idx mem)))
 32249  	// cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)
 32250  	// result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (ROLWconst <typ.UInt16> [8] (MOVWloadidx1 [i0] {s} p idx mem))) y)
 32251  	for {
 32252  		_ = v.Args[1]
 32253  		or := v.Args[0]
 32254  		if or.Op != OpAMD64ORQ {
 32255  			break
 32256  		}
 32257  		_ = or.Args[1]
 32258  		y := or.Args[0]
 32259  		s1 := or.Args[1]
 32260  		if s1.Op != OpAMD64SHLQconst {
 32261  			break
 32262  		}
 32263  		j1 := s1.AuxInt
 32264  		x1 := s1.Args[0]
 32265  		if x1.Op != OpAMD64MOVBloadidx1 {
 32266  			break
 32267  		}
 32268  		i1 := x1.AuxInt
 32269  		s := x1.Aux
 32270  		_ = x1.Args[2]
 32271  		p := x1.Args[0]
 32272  		idx := x1.Args[1]
 32273  		mem := x1.Args[2]
 32274  		s0 := v.Args[1]
 32275  		if s0.Op != OpAMD64SHLQconst {
 32276  			break
 32277  		}
 32278  		j0 := s0.AuxInt
 32279  		x0 := s0.Args[0]
 32280  		if x0.Op != OpAMD64MOVBloadidx1 {
 32281  			break
 32282  		}
 32283  		i0 := x0.AuxInt
 32284  		if x0.Aux != s {
 32285  			break
 32286  		}
 32287  		_ = x0.Args[2]
 32288  		if p != x0.Args[0] {
 32289  			break
 32290  		}
 32291  		if idx != x0.Args[1] {
 32292  			break
 32293  		}
 32294  		if mem != x0.Args[2] {
 32295  			break
 32296  		}
 32297  		if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) {
 32298  			break
 32299  		}
 32300  		b = mergePoint(b, x0, x1)
 32301  		v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type)
 32302  		v.reset(OpCopy)
 32303  		v.AddArg(v0)
 32304  		v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type)
 32305  		v1.AuxInt = j1
 32306  		v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, typ.UInt16)
 32307  		v2.AuxInt = 8
 32308  		v3 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16)
 32309  		v3.AuxInt = i0
 32310  		v3.Aux = s
 32311  		v3.AddArg(p)
 32312  		v3.AddArg(idx)
 32313  		v3.AddArg(mem)
 32314  		v2.AddArg(v3)
 32315  		v1.AddArg(v2)
 32316  		v0.AddArg(v1)
 32317  		v0.AddArg(y)
 32318  		return true
 32319  	}
 32320  	return false
 32321  }
 32322  func rewriteValueAMD64_OpAMD64ORQ_140(v *Value) bool {
 32323  	b := v.Block
 32324  	_ = b
 32325  	typ := &b.Func.Config.Types
 32326  	_ = typ
 32327  	// match: (ORQ or:(ORQ y s1:(SHLQconst [j1] x1:(MOVBloadidx1 [i1] {s} idx p mem))) s0:(SHLQconst [j0] x0:(MOVBloadidx1 [i0] {s} p idx mem)))
 32328  	// cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)
 32329  	// result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (ROLWconst <typ.UInt16> [8] (MOVWloadidx1 [i0] {s} p idx mem))) y)
 32330  	for {
 32331  		_ = v.Args[1]
 32332  		or := v.Args[0]
 32333  		if or.Op != OpAMD64ORQ {
 32334  			break
 32335  		}
 32336  		_ = or.Args[1]
 32337  		y := or.Args[0]
 32338  		s1 := or.Args[1]
 32339  		if s1.Op != OpAMD64SHLQconst {
 32340  			break
 32341  		}
 32342  		j1 := s1.AuxInt
 32343  		x1 := s1.Args[0]
 32344  		if x1.Op != OpAMD64MOVBloadidx1 {
 32345  			break
 32346  		}
 32347  		i1 := x1.AuxInt
 32348  		s := x1.Aux
 32349  		_ = x1.Args[2]
 32350  		idx := x1.Args[0]
 32351  		p := x1.Args[1]
 32352  		mem := x1.Args[2]
 32353  		s0 := v.Args[1]
 32354  		if s0.Op != OpAMD64SHLQconst {
 32355  			break
 32356  		}
 32357  		j0 := s0.AuxInt
 32358  		x0 := s0.Args[0]
 32359  		if x0.Op != OpAMD64MOVBloadidx1 {
 32360  			break
 32361  		}
 32362  		i0 := x0.AuxInt
 32363  		if x0.Aux != s {
 32364  			break
 32365  		}
 32366  		_ = x0.Args[2]
 32367  		if p != x0.Args[0] {
 32368  			break
 32369  		}
 32370  		if idx != x0.Args[1] {
 32371  			break
 32372  		}
 32373  		if mem != x0.Args[2] {
 32374  			break
 32375  		}
 32376  		if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) {
 32377  			break
 32378  		}
 32379  		b = mergePoint(b, x0, x1)
 32380  		v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type)
 32381  		v.reset(OpCopy)
 32382  		v.AddArg(v0)
 32383  		v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type)
 32384  		v1.AuxInt = j1
 32385  		v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, typ.UInt16)
 32386  		v2.AuxInt = 8
 32387  		v3 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16)
 32388  		v3.AuxInt = i0
 32389  		v3.Aux = s
 32390  		v3.AddArg(p)
 32391  		v3.AddArg(idx)
 32392  		v3.AddArg(mem)
 32393  		v2.AddArg(v3)
 32394  		v1.AddArg(v2)
 32395  		v0.AddArg(v1)
 32396  		v0.AddArg(y)
 32397  		return true
 32398  	}
 32399  	// match: (ORQ or:(ORQ s1:(SHLQconst [j1] x1:(MOVBloadidx1 [i1] {s} p idx mem)) y) s0:(SHLQconst [j0] x0:(MOVBloadidx1 [i0] {s} idx p mem)))
 32400  	// cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)
 32401  	// result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (ROLWconst <typ.UInt16> [8] (MOVWloadidx1 [i0] {s} p idx mem))) y)
 32402  	for {
 32403  		_ = v.Args[1]
 32404  		or := v.Args[0]
 32405  		if or.Op != OpAMD64ORQ {
 32406  			break
 32407  		}
 32408  		_ = or.Args[1]
 32409  		s1 := or.Args[0]
 32410  		if s1.Op != OpAMD64SHLQconst {
 32411  			break
 32412  		}
 32413  		j1 := s1.AuxInt
 32414  		x1 := s1.Args[0]
 32415  		if x1.Op != OpAMD64MOVBloadidx1 {
 32416  			break
 32417  		}
 32418  		i1 := x1.AuxInt
 32419  		s := x1.Aux
 32420  		_ = x1.Args[2]
 32421  		p := x1.Args[0]
 32422  		idx := x1.Args[1]
 32423  		mem := x1.Args[2]
 32424  		y := or.Args[1]
 32425  		s0 := v.Args[1]
 32426  		if s0.Op != OpAMD64SHLQconst {
 32427  			break
 32428  		}
 32429  		j0 := s0.AuxInt
 32430  		x0 := s0.Args[0]
 32431  		if x0.Op != OpAMD64MOVBloadidx1 {
 32432  			break
 32433  		}
 32434  		i0 := x0.AuxInt
 32435  		if x0.Aux != s {
 32436  			break
 32437  		}
 32438  		_ = x0.Args[2]
 32439  		if idx != x0.Args[0] {
 32440  			break
 32441  		}
 32442  		if p != x0.Args[1] {
 32443  			break
 32444  		}
 32445  		if mem != x0.Args[2] {
 32446  			break
 32447  		}
 32448  		if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) {
 32449  			break
 32450  		}
 32451  		b = mergePoint(b, x0, x1)
 32452  		v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type)
 32453  		v.reset(OpCopy)
 32454  		v.AddArg(v0)
 32455  		v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type)
 32456  		v1.AuxInt = j1
 32457  		v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, typ.UInt16)
 32458  		v2.AuxInt = 8
 32459  		v3 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16)
 32460  		v3.AuxInt = i0
 32461  		v3.Aux = s
 32462  		v3.AddArg(p)
 32463  		v3.AddArg(idx)
 32464  		v3.AddArg(mem)
 32465  		v2.AddArg(v3)
 32466  		v1.AddArg(v2)
 32467  		v0.AddArg(v1)
 32468  		v0.AddArg(y)
 32469  		return true
 32470  	}
 32471  	// match: (ORQ or:(ORQ s1:(SHLQconst [j1] x1:(MOVBloadidx1 [i1] {s} idx p mem)) y) s0:(SHLQconst [j0] x0:(MOVBloadidx1 [i0] {s} idx p mem)))
 32472  	// cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)
 32473  	// result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (ROLWconst <typ.UInt16> [8] (MOVWloadidx1 [i0] {s} p idx mem))) y)
 32474  	for {
 32475  		_ = v.Args[1]
 32476  		or := v.Args[0]
 32477  		if or.Op != OpAMD64ORQ {
 32478  			break
 32479  		}
 32480  		_ = or.Args[1]
 32481  		s1 := or.Args[0]
 32482  		if s1.Op != OpAMD64SHLQconst {
 32483  			break
 32484  		}
 32485  		j1 := s1.AuxInt
 32486  		x1 := s1.Args[0]
 32487  		if x1.Op != OpAMD64MOVBloadidx1 {
 32488  			break
 32489  		}
 32490  		i1 := x1.AuxInt
 32491  		s := x1.Aux
 32492  		_ = x1.Args[2]
 32493  		idx := x1.Args[0]
 32494  		p := x1.Args[1]
 32495  		mem := x1.Args[2]
 32496  		y := or.Args[1]
 32497  		s0 := v.Args[1]
 32498  		if s0.Op != OpAMD64SHLQconst {
 32499  			break
 32500  		}
 32501  		j0 := s0.AuxInt
 32502  		x0 := s0.Args[0]
 32503  		if x0.Op != OpAMD64MOVBloadidx1 {
 32504  			break
 32505  		}
 32506  		i0 := x0.AuxInt
 32507  		if x0.Aux != s {
 32508  			break
 32509  		}
 32510  		_ = x0.Args[2]
 32511  		if idx != x0.Args[0] {
 32512  			break
 32513  		}
 32514  		if p != x0.Args[1] {
 32515  			break
 32516  		}
 32517  		if mem != x0.Args[2] {
 32518  			break
 32519  		}
 32520  		if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) {
 32521  			break
 32522  		}
 32523  		b = mergePoint(b, x0, x1)
 32524  		v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type)
 32525  		v.reset(OpCopy)
 32526  		v.AddArg(v0)
 32527  		v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type)
 32528  		v1.AuxInt = j1
 32529  		v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, typ.UInt16)
 32530  		v2.AuxInt = 8
 32531  		v3 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16)
 32532  		v3.AuxInt = i0
 32533  		v3.Aux = s
 32534  		v3.AddArg(p)
 32535  		v3.AddArg(idx)
 32536  		v3.AddArg(mem)
 32537  		v2.AddArg(v3)
 32538  		v1.AddArg(v2)
 32539  		v0.AddArg(v1)
 32540  		v0.AddArg(y)
 32541  		return true
 32542  	}
 32543  	// match: (ORQ or:(ORQ y s1:(SHLQconst [j1] x1:(MOVBloadidx1 [i1] {s} p idx mem))) s0:(SHLQconst [j0] x0:(MOVBloadidx1 [i0] {s} idx p mem)))
 32544  	// cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)
 32545  	// result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (ROLWconst <typ.UInt16> [8] (MOVWloadidx1 [i0] {s} p idx mem))) y)
 32546  	for {
 32547  		_ = v.Args[1]
 32548  		or := v.Args[0]
 32549  		if or.Op != OpAMD64ORQ {
 32550  			break
 32551  		}
 32552  		_ = or.Args[1]
 32553  		y := or.Args[0]
 32554  		s1 := or.Args[1]
 32555  		if s1.Op != OpAMD64SHLQconst {
 32556  			break
 32557  		}
 32558  		j1 := s1.AuxInt
 32559  		x1 := s1.Args[0]
 32560  		if x1.Op != OpAMD64MOVBloadidx1 {
 32561  			break
 32562  		}
 32563  		i1 := x1.AuxInt
 32564  		s := x1.Aux
 32565  		_ = x1.Args[2]
 32566  		p := x1.Args[0]
 32567  		idx := x1.Args[1]
 32568  		mem := x1.Args[2]
 32569  		s0 := v.Args[1]
 32570  		if s0.Op != OpAMD64SHLQconst {
 32571  			break
 32572  		}
 32573  		j0 := s0.AuxInt
 32574  		x0 := s0.Args[0]
 32575  		if x0.Op != OpAMD64MOVBloadidx1 {
 32576  			break
 32577  		}
 32578  		i0 := x0.AuxInt
 32579  		if x0.Aux != s {
 32580  			break
 32581  		}
 32582  		_ = x0.Args[2]
 32583  		if idx != x0.Args[0] {
 32584  			break
 32585  		}
 32586  		if p != x0.Args[1] {
 32587  			break
 32588  		}
 32589  		if mem != x0.Args[2] {
 32590  			break
 32591  		}
 32592  		if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) {
 32593  			break
 32594  		}
 32595  		b = mergePoint(b, x0, x1)
 32596  		v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type)
 32597  		v.reset(OpCopy)
 32598  		v.AddArg(v0)
 32599  		v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type)
 32600  		v1.AuxInt = j1
 32601  		v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, typ.UInt16)
 32602  		v2.AuxInt = 8
 32603  		v3 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16)
 32604  		v3.AuxInt = i0
 32605  		v3.Aux = s
 32606  		v3.AddArg(p)
 32607  		v3.AddArg(idx)
 32608  		v3.AddArg(mem)
 32609  		v2.AddArg(v3)
 32610  		v1.AddArg(v2)
 32611  		v0.AddArg(v1)
 32612  		v0.AddArg(y)
 32613  		return true
 32614  	}
 32615  	// match: (ORQ or:(ORQ y s1:(SHLQconst [j1] x1:(MOVBloadidx1 [i1] {s} idx p mem))) s0:(SHLQconst [j0] x0:(MOVBloadidx1 [i0] {s} idx p mem)))
 32616  	// cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)
 32617  	// result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (ROLWconst <typ.UInt16> [8] (MOVWloadidx1 [i0] {s} p idx mem))) y)
 32618  	for {
 32619  		_ = v.Args[1]
 32620  		or := v.Args[0]
 32621  		if or.Op != OpAMD64ORQ {
 32622  			break
 32623  		}
 32624  		_ = or.Args[1]
 32625  		y := or.Args[0]
 32626  		s1 := or.Args[1]
 32627  		if s1.Op != OpAMD64SHLQconst {
 32628  			break
 32629  		}
 32630  		j1 := s1.AuxInt
 32631  		x1 := s1.Args[0]
 32632  		if x1.Op != OpAMD64MOVBloadidx1 {
 32633  			break
 32634  		}
 32635  		i1 := x1.AuxInt
 32636  		s := x1.Aux
 32637  		_ = x1.Args[2]
 32638  		idx := x1.Args[0]
 32639  		p := x1.Args[1]
 32640  		mem := x1.Args[2]
 32641  		s0 := v.Args[1]
 32642  		if s0.Op != OpAMD64SHLQconst {
 32643  			break
 32644  		}
 32645  		j0 := s0.AuxInt
 32646  		x0 := s0.Args[0]
 32647  		if x0.Op != OpAMD64MOVBloadidx1 {
 32648  			break
 32649  		}
 32650  		i0 := x0.AuxInt
 32651  		if x0.Aux != s {
 32652  			break
 32653  		}
 32654  		_ = x0.Args[2]
 32655  		if idx != x0.Args[0] {
 32656  			break
 32657  		}
 32658  		if p != x0.Args[1] {
 32659  			break
 32660  		}
 32661  		if mem != x0.Args[2] {
 32662  			break
 32663  		}
 32664  		if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) {
 32665  			break
 32666  		}
 32667  		b = mergePoint(b, x0, x1)
 32668  		v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type)
 32669  		v.reset(OpCopy)
 32670  		v.AddArg(v0)
 32671  		v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type)
 32672  		v1.AuxInt = j1
 32673  		v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, typ.UInt16)
 32674  		v2.AuxInt = 8
 32675  		v3 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16)
 32676  		v3.AuxInt = i0
 32677  		v3.Aux = s
 32678  		v3.AddArg(p)
 32679  		v3.AddArg(idx)
 32680  		v3.AddArg(mem)
 32681  		v2.AddArg(v3)
 32682  		v1.AddArg(v2)
 32683  		v0.AddArg(v1)
 32684  		v0.AddArg(y)
 32685  		return true
 32686  	}
 32687  	// match: (ORQ s0:(SHLQconst [j0] r0:(ROLWconst [8] x0:(MOVWloadidx1 [i0] {s} p idx mem))) or:(ORQ s1:(SHLQconst [j1] r1:(ROLWconst [8] x1:(MOVWloadidx1 [i1] {s} p idx mem))) y))
 32688  	// cond: i1 == i0+2 && j1 == j0-16 && j1 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or)
 32689  	// result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (BSWAPL <typ.UInt32> (MOVLloadidx1 [i0] {s} p idx mem))) y)
 32690  	for {
 32691  		_ = v.Args[1]
 32692  		s0 := v.Args[0]
 32693  		if s0.Op != OpAMD64SHLQconst {
 32694  			break
 32695  		}
 32696  		j0 := s0.AuxInt
 32697  		r0 := s0.Args[0]
 32698  		if r0.Op != OpAMD64ROLWconst {
 32699  			break
 32700  		}
 32701  		if r0.AuxInt != 8 {
 32702  			break
 32703  		}
 32704  		x0 := r0.Args[0]
 32705  		if x0.Op != OpAMD64MOVWloadidx1 {
 32706  			break
 32707  		}
 32708  		i0 := x0.AuxInt
 32709  		s := x0.Aux
 32710  		_ = x0.Args[2]
 32711  		p := x0.Args[0]
 32712  		idx := x0.Args[1]
 32713  		mem := x0.Args[2]
 32714  		or := v.Args[1]
 32715  		if or.Op != OpAMD64ORQ {
 32716  			break
 32717  		}
 32718  		_ = or.Args[1]
 32719  		s1 := or.Args[0]
 32720  		if s1.Op != OpAMD64SHLQconst {
 32721  			break
 32722  		}
 32723  		j1 := s1.AuxInt
 32724  		r1 := s1.Args[0]
 32725  		if r1.Op != OpAMD64ROLWconst {
 32726  			break
 32727  		}
 32728  		if r1.AuxInt != 8 {
 32729  			break
 32730  		}
 32731  		x1 := r1.Args[0]
 32732  		if x1.Op != OpAMD64MOVWloadidx1 {
 32733  			break
 32734  		}
 32735  		i1 := x1.AuxInt
 32736  		if x1.Aux != s {
 32737  			break
 32738  		}
 32739  		_ = x1.Args[2]
 32740  		if p != x1.Args[0] {
 32741  			break
 32742  		}
 32743  		if idx != x1.Args[1] {
 32744  			break
 32745  		}
 32746  		if mem != x1.Args[2] {
 32747  			break
 32748  		}
 32749  		y := or.Args[1]
 32750  		if !(i1 == i0+2 && j1 == j0-16 && j1%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or)) {
 32751  			break
 32752  		}
 32753  		b = mergePoint(b, x0, x1)
 32754  		v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type)
 32755  		v.reset(OpCopy)
 32756  		v.AddArg(v0)
 32757  		v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type)
 32758  		v1.AuxInt = j1
 32759  		v2 := b.NewValue0(v.Pos, OpAMD64BSWAPL, typ.UInt32)
 32760  		v3 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32)
 32761  		v3.AuxInt = i0
 32762  		v3.Aux = s
 32763  		v3.AddArg(p)
 32764  		v3.AddArg(idx)
 32765  		v3.AddArg(mem)
 32766  		v2.AddArg(v3)
 32767  		v1.AddArg(v2)
 32768  		v0.AddArg(v1)
 32769  		v0.AddArg(y)
 32770  		return true
 32771  	}
 32772  	// match: (ORQ s0:(SHLQconst [j0] r0:(ROLWconst [8] x0:(MOVWloadidx1 [i0] {s} idx p mem))) or:(ORQ s1:(SHLQconst [j1] r1:(ROLWconst [8] x1:(MOVWloadidx1 [i1] {s} p idx mem))) y))
 32773  	// cond: i1 == i0+2 && j1 == j0-16 && j1 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or)
 32774  	// result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (BSWAPL <typ.UInt32> (MOVLloadidx1 [i0] {s} p idx mem))) y)
 32775  	for {
 32776  		_ = v.Args[1]
 32777  		s0 := v.Args[0]
 32778  		if s0.Op != OpAMD64SHLQconst {
 32779  			break
 32780  		}
 32781  		j0 := s0.AuxInt
 32782  		r0 := s0.Args[0]
 32783  		if r0.Op != OpAMD64ROLWconst {
 32784  			break
 32785  		}
 32786  		if r0.AuxInt != 8 {
 32787  			break
 32788  		}
 32789  		x0 := r0.Args[0]
 32790  		if x0.Op != OpAMD64MOVWloadidx1 {
 32791  			break
 32792  		}
 32793  		i0 := x0.AuxInt
 32794  		s := x0.Aux
 32795  		_ = x0.Args[2]
 32796  		idx := x0.Args[0]
 32797  		p := x0.Args[1]
 32798  		mem := x0.Args[2]
 32799  		or := v.Args[1]
 32800  		if or.Op != OpAMD64ORQ {
 32801  			break
 32802  		}
 32803  		_ = or.Args[1]
 32804  		s1 := or.Args[0]
 32805  		if s1.Op != OpAMD64SHLQconst {
 32806  			break
 32807  		}
 32808  		j1 := s1.AuxInt
 32809  		r1 := s1.Args[0]
 32810  		if r1.Op != OpAMD64ROLWconst {
 32811  			break
 32812  		}
 32813  		if r1.AuxInt != 8 {
 32814  			break
 32815  		}
 32816  		x1 := r1.Args[0]
 32817  		if x1.Op != OpAMD64MOVWloadidx1 {
 32818  			break
 32819  		}
 32820  		i1 := x1.AuxInt
 32821  		if x1.Aux != s {
 32822  			break
 32823  		}
 32824  		_ = x1.Args[2]
 32825  		if p != x1.Args[0] {
 32826  			break
 32827  		}
 32828  		if idx != x1.Args[1] {
 32829  			break
 32830  		}
 32831  		if mem != x1.Args[2] {
 32832  			break
 32833  		}
 32834  		y := or.Args[1]
 32835  		if !(i1 == i0+2 && j1 == j0-16 && j1%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or)) {
 32836  			break
 32837  		}
 32838  		b = mergePoint(b, x0, x1)
 32839  		v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type)
 32840  		v.reset(OpCopy)
 32841  		v.AddArg(v0)
 32842  		v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type)
 32843  		v1.AuxInt = j1
 32844  		v2 := b.NewValue0(v.Pos, OpAMD64BSWAPL, typ.UInt32)
 32845  		v3 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32)
 32846  		v3.AuxInt = i0
 32847  		v3.Aux = s
 32848  		v3.AddArg(p)
 32849  		v3.AddArg(idx)
 32850  		v3.AddArg(mem)
 32851  		v2.AddArg(v3)
 32852  		v1.AddArg(v2)
 32853  		v0.AddArg(v1)
 32854  		v0.AddArg(y)
 32855  		return true
 32856  	}
 32857  	// match: (ORQ s0:(SHLQconst [j0] r0:(ROLWconst [8] x0:(MOVWloadidx1 [i0] {s} p idx mem))) or:(ORQ s1:(SHLQconst [j1] r1:(ROLWconst [8] x1:(MOVWloadidx1 [i1] {s} idx p mem))) y))
 32858  	// cond: i1 == i0+2 && j1 == j0-16 && j1 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or)
 32859  	// result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (BSWAPL <typ.UInt32> (MOVLloadidx1 [i0] {s} p idx mem))) y)
 32860  	for {
 32861  		_ = v.Args[1]
 32862  		s0 := v.Args[0]
 32863  		if s0.Op != OpAMD64SHLQconst {
 32864  			break
 32865  		}
 32866  		j0 := s0.AuxInt
 32867  		r0 := s0.Args[0]
 32868  		if r0.Op != OpAMD64ROLWconst {
 32869  			break
 32870  		}
 32871  		if r0.AuxInt != 8 {
 32872  			break
 32873  		}
 32874  		x0 := r0.Args[0]
 32875  		if x0.Op != OpAMD64MOVWloadidx1 {
 32876  			break
 32877  		}
 32878  		i0 := x0.AuxInt
 32879  		s := x0.Aux
 32880  		_ = x0.Args[2]
 32881  		p := x0.Args[0]
 32882  		idx := x0.Args[1]
 32883  		mem := x0.Args[2]
 32884  		or := v.Args[1]
 32885  		if or.Op != OpAMD64ORQ {
 32886  			break
 32887  		}
 32888  		_ = or.Args[1]
 32889  		s1 := or.Args[0]
 32890  		if s1.Op != OpAMD64SHLQconst {
 32891  			break
 32892  		}
 32893  		j1 := s1.AuxInt
 32894  		r1 := s1.Args[0]
 32895  		if r1.Op != OpAMD64ROLWconst {
 32896  			break
 32897  		}
 32898  		if r1.AuxInt != 8 {
 32899  			break
 32900  		}
 32901  		x1 := r1.Args[0]
 32902  		if x1.Op != OpAMD64MOVWloadidx1 {
 32903  			break
 32904  		}
 32905  		i1 := x1.AuxInt
 32906  		if x1.Aux != s {
 32907  			break
 32908  		}
 32909  		_ = x1.Args[2]
 32910  		if idx != x1.Args[0] {
 32911  			break
 32912  		}
 32913  		if p != x1.Args[1] {
 32914  			break
 32915  		}
 32916  		if mem != x1.Args[2] {
 32917  			break
 32918  		}
 32919  		y := or.Args[1]
 32920  		if !(i1 == i0+2 && j1 == j0-16 && j1%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or)) {
 32921  			break
 32922  		}
 32923  		b = mergePoint(b, x0, x1)
 32924  		v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type)
 32925  		v.reset(OpCopy)
 32926  		v.AddArg(v0)
 32927  		v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type)
 32928  		v1.AuxInt = j1
 32929  		v2 := b.NewValue0(v.Pos, OpAMD64BSWAPL, typ.UInt32)
 32930  		v3 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32)
 32931  		v3.AuxInt = i0
 32932  		v3.Aux = s
 32933  		v3.AddArg(p)
 32934  		v3.AddArg(idx)
 32935  		v3.AddArg(mem)
 32936  		v2.AddArg(v3)
 32937  		v1.AddArg(v2)
 32938  		v0.AddArg(v1)
 32939  		v0.AddArg(y)
 32940  		return true
 32941  	}
 32942  	// match: (ORQ s0:(SHLQconst [j0] r0:(ROLWconst [8] x0:(MOVWloadidx1 [i0] {s} idx p mem))) or:(ORQ s1:(SHLQconst [j1] r1:(ROLWconst [8] x1:(MOVWloadidx1 [i1] {s} idx p mem))) y))
 32943  	// cond: i1 == i0+2 && j1 == j0-16 && j1 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or)
 32944  	// result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (BSWAPL <typ.UInt32> (MOVLloadidx1 [i0] {s} p idx mem))) y)
 32945  	for {
 32946  		_ = v.Args[1]
 32947  		s0 := v.Args[0]
 32948  		if s0.Op != OpAMD64SHLQconst {
 32949  			break
 32950  		}
 32951  		j0 := s0.AuxInt
 32952  		r0 := s0.Args[0]
 32953  		if r0.Op != OpAMD64ROLWconst {
 32954  			break
 32955  		}
 32956  		if r0.AuxInt != 8 {
 32957  			break
 32958  		}
 32959  		x0 := r0.Args[0]
 32960  		if x0.Op != OpAMD64MOVWloadidx1 {
 32961  			break
 32962  		}
 32963  		i0 := x0.AuxInt
 32964  		s := x0.Aux
 32965  		_ = x0.Args[2]
 32966  		idx := x0.Args[0]
 32967  		p := x0.Args[1]
 32968  		mem := x0.Args[2]
 32969  		or := v.Args[1]
 32970  		if or.Op != OpAMD64ORQ {
 32971  			break
 32972  		}
 32973  		_ = or.Args[1]
 32974  		s1 := or.Args[0]
 32975  		if s1.Op != OpAMD64SHLQconst {
 32976  			break
 32977  		}
 32978  		j1 := s1.AuxInt
 32979  		r1 := s1.Args[0]
 32980  		if r1.Op != OpAMD64ROLWconst {
 32981  			break
 32982  		}
 32983  		if r1.AuxInt != 8 {
 32984  			break
 32985  		}
 32986  		x1 := r1.Args[0]
 32987  		if x1.Op != OpAMD64MOVWloadidx1 {
 32988  			break
 32989  		}
 32990  		i1 := x1.AuxInt
 32991  		if x1.Aux != s {
 32992  			break
 32993  		}
 32994  		_ = x1.Args[2]
 32995  		if idx != x1.Args[0] {
 32996  			break
 32997  		}
 32998  		if p != x1.Args[1] {
 32999  			break
 33000  		}
 33001  		if mem != x1.Args[2] {
 33002  			break
 33003  		}
 33004  		y := or.Args[1]
 33005  		if !(i1 == i0+2 && j1 == j0-16 && j1%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or)) {
 33006  			break
 33007  		}
 33008  		b = mergePoint(b, x0, x1)
 33009  		v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type)
 33010  		v.reset(OpCopy)
 33011  		v.AddArg(v0)
 33012  		v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type)
 33013  		v1.AuxInt = j1
 33014  		v2 := b.NewValue0(v.Pos, OpAMD64BSWAPL, typ.UInt32)
 33015  		v3 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32)
 33016  		v3.AuxInt = i0
 33017  		v3.Aux = s
 33018  		v3.AddArg(p)
 33019  		v3.AddArg(idx)
 33020  		v3.AddArg(mem)
 33021  		v2.AddArg(v3)
 33022  		v1.AddArg(v2)
 33023  		v0.AddArg(v1)
 33024  		v0.AddArg(y)
 33025  		return true
 33026  	}
 33027  	// match: (ORQ s0:(SHLQconst [j0] r0:(ROLWconst [8] x0:(MOVWloadidx1 [i0] {s} p idx mem))) or:(ORQ y s1:(SHLQconst [j1] r1:(ROLWconst [8] x1:(MOVWloadidx1 [i1] {s} p idx mem)))))
 33028  	// cond: i1 == i0+2 && j1 == j0-16 && j1 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or)
 33029  	// result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (BSWAPL <typ.UInt32> (MOVLloadidx1 [i0] {s} p idx mem))) y)
 33030  	for {
 33031  		_ = v.Args[1]
 33032  		s0 := v.Args[0]
 33033  		if s0.Op != OpAMD64SHLQconst {
 33034  			break
 33035  		}
 33036  		j0 := s0.AuxInt
 33037  		r0 := s0.Args[0]
 33038  		if r0.Op != OpAMD64ROLWconst {
 33039  			break
 33040  		}
 33041  		if r0.AuxInt != 8 {
 33042  			break
 33043  		}
 33044  		x0 := r0.Args[0]
 33045  		if x0.Op != OpAMD64MOVWloadidx1 {
 33046  			break
 33047  		}
 33048  		i0 := x0.AuxInt
 33049  		s := x0.Aux
 33050  		_ = x0.Args[2]
 33051  		p := x0.Args[0]
 33052  		idx := x0.Args[1]
 33053  		mem := x0.Args[2]
 33054  		or := v.Args[1]
 33055  		if or.Op != OpAMD64ORQ {
 33056  			break
 33057  		}
 33058  		_ = or.Args[1]
 33059  		y := or.Args[0]
 33060  		s1 := or.Args[1]
 33061  		if s1.Op != OpAMD64SHLQconst {
 33062  			break
 33063  		}
 33064  		j1 := s1.AuxInt
 33065  		r1 := s1.Args[0]
 33066  		if r1.Op != OpAMD64ROLWconst {
 33067  			break
 33068  		}
 33069  		if r1.AuxInt != 8 {
 33070  			break
 33071  		}
 33072  		x1 := r1.Args[0]
 33073  		if x1.Op != OpAMD64MOVWloadidx1 {
 33074  			break
 33075  		}
 33076  		i1 := x1.AuxInt
 33077  		if x1.Aux != s {
 33078  			break
 33079  		}
 33080  		_ = x1.Args[2]
 33081  		if p != x1.Args[0] {
 33082  			break
 33083  		}
 33084  		if idx != x1.Args[1] {
 33085  			break
 33086  		}
 33087  		if mem != x1.Args[2] {
 33088  			break
 33089  		}
 33090  		if !(i1 == i0+2 && j1 == j0-16 && j1%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or)) {
 33091  			break
 33092  		}
 33093  		b = mergePoint(b, x0, x1)
 33094  		v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type)
 33095  		v.reset(OpCopy)
 33096  		v.AddArg(v0)
 33097  		v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type)
 33098  		v1.AuxInt = j1
 33099  		v2 := b.NewValue0(v.Pos, OpAMD64BSWAPL, typ.UInt32)
 33100  		v3 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32)
 33101  		v3.AuxInt = i0
 33102  		v3.Aux = s
 33103  		v3.AddArg(p)
 33104  		v3.AddArg(idx)
 33105  		v3.AddArg(mem)
 33106  		v2.AddArg(v3)
 33107  		v1.AddArg(v2)
 33108  		v0.AddArg(v1)
 33109  		v0.AddArg(y)
 33110  		return true
 33111  	}
 33112  	return false
 33113  }
 33114  func rewriteValueAMD64_OpAMD64ORQ_150(v *Value) bool {
 33115  	b := v.Block
 33116  	_ = b
 33117  	typ := &b.Func.Config.Types
 33118  	_ = typ
 33119  	// match: (ORQ s0:(SHLQconst [j0] r0:(ROLWconst [8] x0:(MOVWloadidx1 [i0] {s} idx p mem))) or:(ORQ y s1:(SHLQconst [j1] r1:(ROLWconst [8] x1:(MOVWloadidx1 [i1] {s} p idx mem)))))
 33120  	// cond: i1 == i0+2 && j1 == j0-16 && j1 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or)
 33121  	// result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (BSWAPL <typ.UInt32> (MOVLloadidx1 [i0] {s} p idx mem))) y)
 33122  	for {
 33123  		_ = v.Args[1]
 33124  		s0 := v.Args[0]
 33125  		if s0.Op != OpAMD64SHLQconst {
 33126  			break
 33127  		}
 33128  		j0 := s0.AuxInt
 33129  		r0 := s0.Args[0]
 33130  		if r0.Op != OpAMD64ROLWconst {
 33131  			break
 33132  		}
 33133  		if r0.AuxInt != 8 {
 33134  			break
 33135  		}
 33136  		x0 := r0.Args[0]
 33137  		if x0.Op != OpAMD64MOVWloadidx1 {
 33138  			break
 33139  		}
 33140  		i0 := x0.AuxInt
 33141  		s := x0.Aux
 33142  		_ = x0.Args[2]
 33143  		idx := x0.Args[0]
 33144  		p := x0.Args[1]
 33145  		mem := x0.Args[2]
 33146  		or := v.Args[1]
 33147  		if or.Op != OpAMD64ORQ {
 33148  			break
 33149  		}
 33150  		_ = or.Args[1]
 33151  		y := or.Args[0]
 33152  		s1 := or.Args[1]
 33153  		if s1.Op != OpAMD64SHLQconst {
 33154  			break
 33155  		}
 33156  		j1 := s1.AuxInt
 33157  		r1 := s1.Args[0]
 33158  		if r1.Op != OpAMD64ROLWconst {
 33159  			break
 33160  		}
 33161  		if r1.AuxInt != 8 {
 33162  			break
 33163  		}
 33164  		x1 := r1.Args[0]
 33165  		if x1.Op != OpAMD64MOVWloadidx1 {
 33166  			break
 33167  		}
 33168  		i1 := x1.AuxInt
 33169  		if x1.Aux != s {
 33170  			break
 33171  		}
 33172  		_ = x1.Args[2]
 33173  		if p != x1.Args[0] {
 33174  			break
 33175  		}
 33176  		if idx != x1.Args[1] {
 33177  			break
 33178  		}
 33179  		if mem != x1.Args[2] {
 33180  			break
 33181  		}
 33182  		if !(i1 == i0+2 && j1 == j0-16 && j1%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or)) {
 33183  			break
 33184  		}
 33185  		b = mergePoint(b, x0, x1)
 33186  		v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type)
 33187  		v.reset(OpCopy)
 33188  		v.AddArg(v0)
 33189  		v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type)
 33190  		v1.AuxInt = j1
 33191  		v2 := b.NewValue0(v.Pos, OpAMD64BSWAPL, typ.UInt32)
 33192  		v3 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32)
 33193  		v3.AuxInt = i0
 33194  		v3.Aux = s
 33195  		v3.AddArg(p)
 33196  		v3.AddArg(idx)
 33197  		v3.AddArg(mem)
 33198  		v2.AddArg(v3)
 33199  		v1.AddArg(v2)
 33200  		v0.AddArg(v1)
 33201  		v0.AddArg(y)
 33202  		return true
 33203  	}
 33204  	// match: (ORQ s0:(SHLQconst [j0] r0:(ROLWconst [8] x0:(MOVWloadidx1 [i0] {s} p idx mem))) or:(ORQ y s1:(SHLQconst [j1] r1:(ROLWconst [8] x1:(MOVWloadidx1 [i1] {s} idx p mem)))))
 33205  	// cond: i1 == i0+2 && j1 == j0-16 && j1 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or)
 33206  	// result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (BSWAPL <typ.UInt32> (MOVLloadidx1 [i0] {s} p idx mem))) y)
 33207  	for {
 33208  		_ = v.Args[1]
 33209  		s0 := v.Args[0]
 33210  		if s0.Op != OpAMD64SHLQconst {
 33211  			break
 33212  		}
 33213  		j0 := s0.AuxInt
 33214  		r0 := s0.Args[0]
 33215  		if r0.Op != OpAMD64ROLWconst {
 33216  			break
 33217  		}
 33218  		if r0.AuxInt != 8 {
 33219  			break
 33220  		}
 33221  		x0 := r0.Args[0]
 33222  		if x0.Op != OpAMD64MOVWloadidx1 {
 33223  			break
 33224  		}
 33225  		i0 := x0.AuxInt
 33226  		s := x0.Aux
 33227  		_ = x0.Args[2]
 33228  		p := x0.Args[0]
 33229  		idx := x0.Args[1]
 33230  		mem := x0.Args[2]
 33231  		or := v.Args[1]
 33232  		if or.Op != OpAMD64ORQ {
 33233  			break
 33234  		}
 33235  		_ = or.Args[1]
 33236  		y := or.Args[0]
 33237  		s1 := or.Args[1]
 33238  		if s1.Op != OpAMD64SHLQconst {
 33239  			break
 33240  		}
 33241  		j1 := s1.AuxInt
 33242  		r1 := s1.Args[0]
 33243  		if r1.Op != OpAMD64ROLWconst {
 33244  			break
 33245  		}
 33246  		if r1.AuxInt != 8 {
 33247  			break
 33248  		}
 33249  		x1 := r1.Args[0]
 33250  		if x1.Op != OpAMD64MOVWloadidx1 {
 33251  			break
 33252  		}
 33253  		i1 := x1.AuxInt
 33254  		if x1.Aux != s {
 33255  			break
 33256  		}
 33257  		_ = x1.Args[2]
 33258  		if idx != x1.Args[0] {
 33259  			break
 33260  		}
 33261  		if p != x1.Args[1] {
 33262  			break
 33263  		}
 33264  		if mem != x1.Args[2] {
 33265  			break
 33266  		}
 33267  		if !(i1 == i0+2 && j1 == j0-16 && j1%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or)) {
 33268  			break
 33269  		}
 33270  		b = mergePoint(b, x0, x1)
 33271  		v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type)
 33272  		v.reset(OpCopy)
 33273  		v.AddArg(v0)
 33274  		v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type)
 33275  		v1.AuxInt = j1
 33276  		v2 := b.NewValue0(v.Pos, OpAMD64BSWAPL, typ.UInt32)
 33277  		v3 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32)
 33278  		v3.AuxInt = i0
 33279  		v3.Aux = s
 33280  		v3.AddArg(p)
 33281  		v3.AddArg(idx)
 33282  		v3.AddArg(mem)
 33283  		v2.AddArg(v3)
 33284  		v1.AddArg(v2)
 33285  		v0.AddArg(v1)
 33286  		v0.AddArg(y)
 33287  		return true
 33288  	}
 33289  	// match: (ORQ s0:(SHLQconst [j0] r0:(ROLWconst [8] x0:(MOVWloadidx1 [i0] {s} idx p mem))) or:(ORQ y s1:(SHLQconst [j1] r1:(ROLWconst [8] x1:(MOVWloadidx1 [i1] {s} idx p mem)))))
 33290  	// cond: i1 == i0+2 && j1 == j0-16 && j1 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or)
 33291  	// result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (BSWAPL <typ.UInt32> (MOVLloadidx1 [i0] {s} p idx mem))) y)
 33292  	for {
 33293  		_ = v.Args[1]
 33294  		s0 := v.Args[0]
 33295  		if s0.Op != OpAMD64SHLQconst {
 33296  			break
 33297  		}
 33298  		j0 := s0.AuxInt
 33299  		r0 := s0.Args[0]
 33300  		if r0.Op != OpAMD64ROLWconst {
 33301  			break
 33302  		}
 33303  		if r0.AuxInt != 8 {
 33304  			break
 33305  		}
 33306  		x0 := r0.Args[0]
 33307  		if x0.Op != OpAMD64MOVWloadidx1 {
 33308  			break
 33309  		}
 33310  		i0 := x0.AuxInt
 33311  		s := x0.Aux
 33312  		_ = x0.Args[2]
 33313  		idx := x0.Args[0]
 33314  		p := x0.Args[1]
 33315  		mem := x0.Args[2]
 33316  		or := v.Args[1]
 33317  		if or.Op != OpAMD64ORQ {
 33318  			break
 33319  		}
 33320  		_ = or.Args[1]
 33321  		y := or.Args[0]
 33322  		s1 := or.Args[1]
 33323  		if s1.Op != OpAMD64SHLQconst {
 33324  			break
 33325  		}
 33326  		j1 := s1.AuxInt
 33327  		r1 := s1.Args[0]
 33328  		if r1.Op != OpAMD64ROLWconst {
 33329  			break
 33330  		}
 33331  		if r1.AuxInt != 8 {
 33332  			break
 33333  		}
 33334  		x1 := r1.Args[0]
 33335  		if x1.Op != OpAMD64MOVWloadidx1 {
 33336  			break
 33337  		}
 33338  		i1 := x1.AuxInt
 33339  		if x1.Aux != s {
 33340  			break
 33341  		}
 33342  		_ = x1.Args[2]
 33343  		if idx != x1.Args[0] {
 33344  			break
 33345  		}
 33346  		if p != x1.Args[1] {
 33347  			break
 33348  		}
 33349  		if mem != x1.Args[2] {
 33350  			break
 33351  		}
 33352  		if !(i1 == i0+2 && j1 == j0-16 && j1%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or)) {
 33353  			break
 33354  		}
 33355  		b = mergePoint(b, x0, x1)
 33356  		v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type)
 33357  		v.reset(OpCopy)
 33358  		v.AddArg(v0)
 33359  		v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type)
 33360  		v1.AuxInt = j1
 33361  		v2 := b.NewValue0(v.Pos, OpAMD64BSWAPL, typ.UInt32)
 33362  		v3 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32)
 33363  		v3.AuxInt = i0
 33364  		v3.Aux = s
 33365  		v3.AddArg(p)
 33366  		v3.AddArg(idx)
 33367  		v3.AddArg(mem)
 33368  		v2.AddArg(v3)
 33369  		v1.AddArg(v2)
 33370  		v0.AddArg(v1)
 33371  		v0.AddArg(y)
 33372  		return true
 33373  	}
 33374  	// match: (ORQ or:(ORQ s1:(SHLQconst [j1] r1:(ROLWconst [8] x1:(MOVWloadidx1 [i1] {s} p idx mem))) y) s0:(SHLQconst [j0] r0:(ROLWconst [8] x0:(MOVWloadidx1 [i0] {s} p idx mem))))
 33375  	// cond: i1 == i0+2 && j1 == j0-16 && j1 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or)
 33376  	// result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (BSWAPL <typ.UInt32> (MOVLloadidx1 [i0] {s} p idx mem))) y)
 33377  	for {
 33378  		_ = v.Args[1]
 33379  		or := v.Args[0]
 33380  		if or.Op != OpAMD64ORQ {
 33381  			break
 33382  		}
 33383  		_ = or.Args[1]
 33384  		s1 := or.Args[0]
 33385  		if s1.Op != OpAMD64SHLQconst {
 33386  			break
 33387  		}
 33388  		j1 := s1.AuxInt
 33389  		r1 := s1.Args[0]
 33390  		if r1.Op != OpAMD64ROLWconst {
 33391  			break
 33392  		}
 33393  		if r1.AuxInt != 8 {
 33394  			break
 33395  		}
 33396  		x1 := r1.Args[0]
 33397  		if x1.Op != OpAMD64MOVWloadidx1 {
 33398  			break
 33399  		}
 33400  		i1 := x1.AuxInt
 33401  		s := x1.Aux
 33402  		_ = x1.Args[2]
 33403  		p := x1.Args[0]
 33404  		idx := x1.Args[1]
 33405  		mem := x1.Args[2]
 33406  		y := or.Args[1]
 33407  		s0 := v.Args[1]
 33408  		if s0.Op != OpAMD64SHLQconst {
 33409  			break
 33410  		}
 33411  		j0 := s0.AuxInt
 33412  		r0 := s0.Args[0]
 33413  		if r0.Op != OpAMD64ROLWconst {
 33414  			break
 33415  		}
 33416  		if r0.AuxInt != 8 {
 33417  			break
 33418  		}
 33419  		x0 := r0.Args[0]
 33420  		if x0.Op != OpAMD64MOVWloadidx1 {
 33421  			break
 33422  		}
 33423  		i0 := x0.AuxInt
 33424  		if x0.Aux != s {
 33425  			break
 33426  		}
 33427  		_ = x0.Args[2]
 33428  		if p != x0.Args[0] {
 33429  			break
 33430  		}
 33431  		if idx != x0.Args[1] {
 33432  			break
 33433  		}
 33434  		if mem != x0.Args[2] {
 33435  			break
 33436  		}
 33437  		if !(i1 == i0+2 && j1 == j0-16 && j1%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or)) {
 33438  			break
 33439  		}
 33440  		b = mergePoint(b, x0, x1)
 33441  		v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type)
 33442  		v.reset(OpCopy)
 33443  		v.AddArg(v0)
 33444  		v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type)
 33445  		v1.AuxInt = j1
 33446  		v2 := b.NewValue0(v.Pos, OpAMD64BSWAPL, typ.UInt32)
 33447  		v3 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32)
 33448  		v3.AuxInt = i0
 33449  		v3.Aux = s
 33450  		v3.AddArg(p)
 33451  		v3.AddArg(idx)
 33452  		v3.AddArg(mem)
 33453  		v2.AddArg(v3)
 33454  		v1.AddArg(v2)
 33455  		v0.AddArg(v1)
 33456  		v0.AddArg(y)
 33457  		return true
 33458  	}
 33459  	// match: (ORQ or:(ORQ s1:(SHLQconst [j1] r1:(ROLWconst [8] x1:(MOVWloadidx1 [i1] {s} idx p mem))) y) s0:(SHLQconst [j0] r0:(ROLWconst [8] x0:(MOVWloadidx1 [i0] {s} p idx mem))))
 33460  	// cond: i1 == i0+2 && j1 == j0-16 && j1 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or)
 33461  	// result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (BSWAPL <typ.UInt32> (MOVLloadidx1 [i0] {s} p idx mem))) y)
 33462  	for {
 33463  		_ = v.Args[1]
 33464  		or := v.Args[0]
 33465  		if or.Op != OpAMD64ORQ {
 33466  			break
 33467  		}
 33468  		_ = or.Args[1]
 33469  		s1 := or.Args[0]
 33470  		if s1.Op != OpAMD64SHLQconst {
 33471  			break
 33472  		}
 33473  		j1 := s1.AuxInt
 33474  		r1 := s1.Args[0]
 33475  		if r1.Op != OpAMD64ROLWconst {
 33476  			break
 33477  		}
 33478  		if r1.AuxInt != 8 {
 33479  			break
 33480  		}
 33481  		x1 := r1.Args[0]
 33482  		if x1.Op != OpAMD64MOVWloadidx1 {
 33483  			break
 33484  		}
 33485  		i1 := x1.AuxInt
 33486  		s := x1.Aux
 33487  		_ = x1.Args[2]
 33488  		idx := x1.Args[0]
 33489  		p := x1.Args[1]
 33490  		mem := x1.Args[2]
 33491  		y := or.Args[1]
 33492  		s0 := v.Args[1]
 33493  		if s0.Op != OpAMD64SHLQconst {
 33494  			break
 33495  		}
 33496  		j0 := s0.AuxInt
 33497  		r0 := s0.Args[0]
 33498  		if r0.Op != OpAMD64ROLWconst {
 33499  			break
 33500  		}
 33501  		if r0.AuxInt != 8 {
 33502  			break
 33503  		}
 33504  		x0 := r0.Args[0]
 33505  		if x0.Op != OpAMD64MOVWloadidx1 {
 33506  			break
 33507  		}
 33508  		i0 := x0.AuxInt
 33509  		if x0.Aux != s {
 33510  			break
 33511  		}
 33512  		_ = x0.Args[2]
 33513  		if p != x0.Args[0] {
 33514  			break
 33515  		}
 33516  		if idx != x0.Args[1] {
 33517  			break
 33518  		}
 33519  		if mem != x0.Args[2] {
 33520  			break
 33521  		}
 33522  		if !(i1 == i0+2 && j1 == j0-16 && j1%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or)) {
 33523  			break
 33524  		}
 33525  		b = mergePoint(b, x0, x1)
 33526  		v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type)
 33527  		v.reset(OpCopy)
 33528  		v.AddArg(v0)
 33529  		v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type)
 33530  		v1.AuxInt = j1
 33531  		v2 := b.NewValue0(v.Pos, OpAMD64BSWAPL, typ.UInt32)
 33532  		v3 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32)
 33533  		v3.AuxInt = i0
 33534  		v3.Aux = s
 33535  		v3.AddArg(p)
 33536  		v3.AddArg(idx)
 33537  		v3.AddArg(mem)
 33538  		v2.AddArg(v3)
 33539  		v1.AddArg(v2)
 33540  		v0.AddArg(v1)
 33541  		v0.AddArg(y)
 33542  		return true
 33543  	}
 33544  	// match: (ORQ or:(ORQ y s1:(SHLQconst [j1] r1:(ROLWconst [8] x1:(MOVWloadidx1 [i1] {s} p idx mem)))) s0:(SHLQconst [j0] r0:(ROLWconst [8] x0:(MOVWloadidx1 [i0] {s} p idx mem))))
 33545  	// cond: i1 == i0+2 && j1 == j0-16 && j1 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or)
 33546  	// result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (BSWAPL <typ.UInt32> (MOVLloadidx1 [i0] {s} p idx mem))) y)
 33547  	for {
 33548  		_ = v.Args[1]
 33549  		or := v.Args[0]
 33550  		if or.Op != OpAMD64ORQ {
 33551  			break
 33552  		}
 33553  		_ = or.Args[1]
 33554  		y := or.Args[0]
 33555  		s1 := or.Args[1]
 33556  		if s1.Op != OpAMD64SHLQconst {
 33557  			break
 33558  		}
 33559  		j1 := s1.AuxInt
 33560  		r1 := s1.Args[0]
 33561  		if r1.Op != OpAMD64ROLWconst {
 33562  			break
 33563  		}
 33564  		if r1.AuxInt != 8 {
 33565  			break
 33566  		}
 33567  		x1 := r1.Args[0]
 33568  		if x1.Op != OpAMD64MOVWloadidx1 {
 33569  			break
 33570  		}
 33571  		i1 := x1.AuxInt
 33572  		s := x1.Aux
 33573  		_ = x1.Args[2]
 33574  		p := x1.Args[0]
 33575  		idx := x1.Args[1]
 33576  		mem := x1.Args[2]
 33577  		s0 := v.Args[1]
 33578  		if s0.Op != OpAMD64SHLQconst {
 33579  			break
 33580  		}
 33581  		j0 := s0.AuxInt
 33582  		r0 := s0.Args[0]
 33583  		if r0.Op != OpAMD64ROLWconst {
 33584  			break
 33585  		}
 33586  		if r0.AuxInt != 8 {
 33587  			break
 33588  		}
 33589  		x0 := r0.Args[0]
 33590  		if x0.Op != OpAMD64MOVWloadidx1 {
 33591  			break
 33592  		}
 33593  		i0 := x0.AuxInt
 33594  		if x0.Aux != s {
 33595  			break
 33596  		}
 33597  		_ = x0.Args[2]
 33598  		if p != x0.Args[0] {
 33599  			break
 33600  		}
 33601  		if idx != x0.Args[1] {
 33602  			break
 33603  		}
 33604  		if mem != x0.Args[2] {
 33605  			break
 33606  		}
 33607  		if !(i1 == i0+2 && j1 == j0-16 && j1%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or)) {
 33608  			break
 33609  		}
 33610  		b = mergePoint(b, x0, x1)
 33611  		v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type)
 33612  		v.reset(OpCopy)
 33613  		v.AddArg(v0)
 33614  		v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type)
 33615  		v1.AuxInt = j1
 33616  		v2 := b.NewValue0(v.Pos, OpAMD64BSWAPL, typ.UInt32)
 33617  		v3 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32)
 33618  		v3.AuxInt = i0
 33619  		v3.Aux = s
 33620  		v3.AddArg(p)
 33621  		v3.AddArg(idx)
 33622  		v3.AddArg(mem)
 33623  		v2.AddArg(v3)
 33624  		v1.AddArg(v2)
 33625  		v0.AddArg(v1)
 33626  		v0.AddArg(y)
 33627  		return true
 33628  	}
 33629  	// match: (ORQ or:(ORQ y s1:(SHLQconst [j1] r1:(ROLWconst [8] x1:(MOVWloadidx1 [i1] {s} idx p mem)))) s0:(SHLQconst [j0] r0:(ROLWconst [8] x0:(MOVWloadidx1 [i0] {s} p idx mem))))
 33630  	// cond: i1 == i0+2 && j1 == j0-16 && j1 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or)
 33631  	// result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (BSWAPL <typ.UInt32> (MOVLloadidx1 [i0] {s} p idx mem))) y)
 33632  	for {
 33633  		_ = v.Args[1]
 33634  		or := v.Args[0]
 33635  		if or.Op != OpAMD64ORQ {
 33636  			break
 33637  		}
 33638  		_ = or.Args[1]
 33639  		y := or.Args[0]
 33640  		s1 := or.Args[1]
 33641  		if s1.Op != OpAMD64SHLQconst {
 33642  			break
 33643  		}
 33644  		j1 := s1.AuxInt
 33645  		r1 := s1.Args[0]
 33646  		if r1.Op != OpAMD64ROLWconst {
 33647  			break
 33648  		}
 33649  		if r1.AuxInt != 8 {
 33650  			break
 33651  		}
 33652  		x1 := r1.Args[0]
 33653  		if x1.Op != OpAMD64MOVWloadidx1 {
 33654  			break
 33655  		}
 33656  		i1 := x1.AuxInt
 33657  		s := x1.Aux
 33658  		_ = x1.Args[2]
 33659  		idx := x1.Args[0]
 33660  		p := x1.Args[1]
 33661  		mem := x1.Args[2]
 33662  		s0 := v.Args[1]
 33663  		if s0.Op != OpAMD64SHLQconst {
 33664  			break
 33665  		}
 33666  		j0 := s0.AuxInt
 33667  		r0 := s0.Args[0]
 33668  		if r0.Op != OpAMD64ROLWconst {
 33669  			break
 33670  		}
 33671  		if r0.AuxInt != 8 {
 33672  			break
 33673  		}
 33674  		x0 := r0.Args[0]
 33675  		if x0.Op != OpAMD64MOVWloadidx1 {
 33676  			break
 33677  		}
 33678  		i0 := x0.AuxInt
 33679  		if x0.Aux != s {
 33680  			break
 33681  		}
 33682  		_ = x0.Args[2]
 33683  		if p != x0.Args[0] {
 33684  			break
 33685  		}
 33686  		if idx != x0.Args[1] {
 33687  			break
 33688  		}
 33689  		if mem != x0.Args[2] {
 33690  			break
 33691  		}
 33692  		if !(i1 == i0+2 && j1 == j0-16 && j1%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or)) {
 33693  			break
 33694  		}
 33695  		b = mergePoint(b, x0, x1)
 33696  		v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type)
 33697  		v.reset(OpCopy)
 33698  		v.AddArg(v0)
 33699  		v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type)
 33700  		v1.AuxInt = j1
 33701  		v2 := b.NewValue0(v.Pos, OpAMD64BSWAPL, typ.UInt32)
 33702  		v3 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32)
 33703  		v3.AuxInt = i0
 33704  		v3.Aux = s
 33705  		v3.AddArg(p)
 33706  		v3.AddArg(idx)
 33707  		v3.AddArg(mem)
 33708  		v2.AddArg(v3)
 33709  		v1.AddArg(v2)
 33710  		v0.AddArg(v1)
 33711  		v0.AddArg(y)
 33712  		return true
 33713  	}
 33714  	// match: (ORQ or:(ORQ s1:(SHLQconst [j1] r1:(ROLWconst [8] x1:(MOVWloadidx1 [i1] {s} p idx mem))) y) s0:(SHLQconst [j0] r0:(ROLWconst [8] x0:(MOVWloadidx1 [i0] {s} idx p mem))))
 33715  	// cond: i1 == i0+2 && j1 == j0-16 && j1 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or)
 33716  	// result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (BSWAPL <typ.UInt32> (MOVLloadidx1 [i0] {s} p idx mem))) y)
 33717  	for {
 33718  		_ = v.Args[1]
 33719  		or := v.Args[0]
 33720  		if or.Op != OpAMD64ORQ {
 33721  			break
 33722  		}
 33723  		_ = or.Args[1]
 33724  		s1 := or.Args[0]
 33725  		if s1.Op != OpAMD64SHLQconst {
 33726  			break
 33727  		}
 33728  		j1 := s1.AuxInt
 33729  		r1 := s1.Args[0]
 33730  		if r1.Op != OpAMD64ROLWconst {
 33731  			break
 33732  		}
 33733  		if r1.AuxInt != 8 {
 33734  			break
 33735  		}
 33736  		x1 := r1.Args[0]
 33737  		if x1.Op != OpAMD64MOVWloadidx1 {
 33738  			break
 33739  		}
 33740  		i1 := x1.AuxInt
 33741  		s := x1.Aux
 33742  		_ = x1.Args[2]
 33743  		p := x1.Args[0]
 33744  		idx := x1.Args[1]
 33745  		mem := x1.Args[2]
 33746  		y := or.Args[1]
 33747  		s0 := v.Args[1]
 33748  		if s0.Op != OpAMD64SHLQconst {
 33749  			break
 33750  		}
 33751  		j0 := s0.AuxInt
 33752  		r0 := s0.Args[0]
 33753  		if r0.Op != OpAMD64ROLWconst {
 33754  			break
 33755  		}
 33756  		if r0.AuxInt != 8 {
 33757  			break
 33758  		}
 33759  		x0 := r0.Args[0]
 33760  		if x0.Op != OpAMD64MOVWloadidx1 {
 33761  			break
 33762  		}
 33763  		i0 := x0.AuxInt
 33764  		if x0.Aux != s {
 33765  			break
 33766  		}
 33767  		_ = x0.Args[2]
 33768  		if idx != x0.Args[0] {
 33769  			break
 33770  		}
 33771  		if p != x0.Args[1] {
 33772  			break
 33773  		}
 33774  		if mem != x0.Args[2] {
 33775  			break
 33776  		}
 33777  		if !(i1 == i0+2 && j1 == j0-16 && j1%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or)) {
 33778  			break
 33779  		}
 33780  		b = mergePoint(b, x0, x1)
 33781  		v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type)
 33782  		v.reset(OpCopy)
 33783  		v.AddArg(v0)
 33784  		v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type)
 33785  		v1.AuxInt = j1
 33786  		v2 := b.NewValue0(v.Pos, OpAMD64BSWAPL, typ.UInt32)
 33787  		v3 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32)
 33788  		v3.AuxInt = i0
 33789  		v3.Aux = s
 33790  		v3.AddArg(p)
 33791  		v3.AddArg(idx)
 33792  		v3.AddArg(mem)
 33793  		v2.AddArg(v3)
 33794  		v1.AddArg(v2)
 33795  		v0.AddArg(v1)
 33796  		v0.AddArg(y)
 33797  		return true
 33798  	}
 33799  	// match: (ORQ or:(ORQ s1:(SHLQconst [j1] r1:(ROLWconst [8] x1:(MOVWloadidx1 [i1] {s} idx p mem))) y) s0:(SHLQconst [j0] r0:(ROLWconst [8] x0:(MOVWloadidx1 [i0] {s} idx p mem))))
 33800  	// cond: i1 == i0+2 && j1 == j0-16 && j1 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or)
 33801  	// result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (BSWAPL <typ.UInt32> (MOVLloadidx1 [i0] {s} p idx mem))) y)
 33802  	for {
 33803  		_ = v.Args[1]
 33804  		or := v.Args[0]
 33805  		if or.Op != OpAMD64ORQ {
 33806  			break
 33807  		}
 33808  		_ = or.Args[1]
 33809  		s1 := or.Args[0]
 33810  		if s1.Op != OpAMD64SHLQconst {
 33811  			break
 33812  		}
 33813  		j1 := s1.AuxInt
 33814  		r1 := s1.Args[0]
 33815  		if r1.Op != OpAMD64ROLWconst {
 33816  			break
 33817  		}
 33818  		if r1.AuxInt != 8 {
 33819  			break
 33820  		}
 33821  		x1 := r1.Args[0]
 33822  		if x1.Op != OpAMD64MOVWloadidx1 {
 33823  			break
 33824  		}
 33825  		i1 := x1.AuxInt
 33826  		s := x1.Aux
 33827  		_ = x1.Args[2]
 33828  		idx := x1.Args[0]
 33829  		p := x1.Args[1]
 33830  		mem := x1.Args[2]
 33831  		y := or.Args[1]
 33832  		s0 := v.Args[1]
 33833  		if s0.Op != OpAMD64SHLQconst {
 33834  			break
 33835  		}
 33836  		j0 := s0.AuxInt
 33837  		r0 := s0.Args[0]
 33838  		if r0.Op != OpAMD64ROLWconst {
 33839  			break
 33840  		}
 33841  		if r0.AuxInt != 8 {
 33842  			break
 33843  		}
 33844  		x0 := r0.Args[0]
 33845  		if x0.Op != OpAMD64MOVWloadidx1 {
 33846  			break
 33847  		}
 33848  		i0 := x0.AuxInt
 33849  		if x0.Aux != s {
 33850  			break
 33851  		}
 33852  		_ = x0.Args[2]
 33853  		if idx != x0.Args[0] {
 33854  			break
 33855  		}
 33856  		if p != x0.Args[1] {
 33857  			break
 33858  		}
 33859  		if mem != x0.Args[2] {
 33860  			break
 33861  		}
 33862  		if !(i1 == i0+2 && j1 == j0-16 && j1%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or)) {
 33863  			break
 33864  		}
 33865  		b = mergePoint(b, x0, x1)
 33866  		v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type)
 33867  		v.reset(OpCopy)
 33868  		v.AddArg(v0)
 33869  		v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type)
 33870  		v1.AuxInt = j1
 33871  		v2 := b.NewValue0(v.Pos, OpAMD64BSWAPL, typ.UInt32)
 33872  		v3 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32)
 33873  		v3.AuxInt = i0
 33874  		v3.Aux = s
 33875  		v3.AddArg(p)
 33876  		v3.AddArg(idx)
 33877  		v3.AddArg(mem)
 33878  		v2.AddArg(v3)
 33879  		v1.AddArg(v2)
 33880  		v0.AddArg(v1)
 33881  		v0.AddArg(y)
 33882  		return true
 33883  	}
 33884  	// match: (ORQ or:(ORQ y s1:(SHLQconst [j1] r1:(ROLWconst [8] x1:(MOVWloadidx1 [i1] {s} p idx mem)))) s0:(SHLQconst [j0] r0:(ROLWconst [8] x0:(MOVWloadidx1 [i0] {s} idx p mem))))
 33885  	// cond: i1 == i0+2 && j1 == j0-16 && j1 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or)
 33886  	// result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (BSWAPL <typ.UInt32> (MOVLloadidx1 [i0] {s} p idx mem))) y)
 33887  	for {
 33888  		_ = v.Args[1]
 33889  		or := v.Args[0]
 33890  		if or.Op != OpAMD64ORQ {
 33891  			break
 33892  		}
 33893  		_ = or.Args[1]
 33894  		y := or.Args[0]
 33895  		s1 := or.Args[1]
 33896  		if s1.Op != OpAMD64SHLQconst {
 33897  			break
 33898  		}
 33899  		j1 := s1.AuxInt
 33900  		r1 := s1.Args[0]
 33901  		if r1.Op != OpAMD64ROLWconst {
 33902  			break
 33903  		}
 33904  		if r1.AuxInt != 8 {
 33905  			break
 33906  		}
 33907  		x1 := r1.Args[0]
 33908  		if x1.Op != OpAMD64MOVWloadidx1 {
 33909  			break
 33910  		}
 33911  		i1 := x1.AuxInt
 33912  		s := x1.Aux
 33913  		_ = x1.Args[2]
 33914  		p := x1.Args[0]
 33915  		idx := x1.Args[1]
 33916  		mem := x1.Args[2]
 33917  		s0 := v.Args[1]
 33918  		if s0.Op != OpAMD64SHLQconst {
 33919  			break
 33920  		}
 33921  		j0 := s0.AuxInt
 33922  		r0 := s0.Args[0]
 33923  		if r0.Op != OpAMD64ROLWconst {
 33924  			break
 33925  		}
 33926  		if r0.AuxInt != 8 {
 33927  			break
 33928  		}
 33929  		x0 := r0.Args[0]
 33930  		if x0.Op != OpAMD64MOVWloadidx1 {
 33931  			break
 33932  		}
 33933  		i0 := x0.AuxInt
 33934  		if x0.Aux != s {
 33935  			break
 33936  		}
 33937  		_ = x0.Args[2]
 33938  		if idx != x0.Args[0] {
 33939  			break
 33940  		}
 33941  		if p != x0.Args[1] {
 33942  			break
 33943  		}
 33944  		if mem != x0.Args[2] {
 33945  			break
 33946  		}
 33947  		if !(i1 == i0+2 && j1 == j0-16 && j1%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or)) {
 33948  			break
 33949  		}
 33950  		b = mergePoint(b, x0, x1)
 33951  		v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type)
 33952  		v.reset(OpCopy)
 33953  		v.AddArg(v0)
 33954  		v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type)
 33955  		v1.AuxInt = j1
 33956  		v2 := b.NewValue0(v.Pos, OpAMD64BSWAPL, typ.UInt32)
 33957  		v3 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32)
 33958  		v3.AuxInt = i0
 33959  		v3.Aux = s
 33960  		v3.AddArg(p)
 33961  		v3.AddArg(idx)
 33962  		v3.AddArg(mem)
 33963  		v2.AddArg(v3)
 33964  		v1.AddArg(v2)
 33965  		v0.AddArg(v1)
 33966  		v0.AddArg(y)
 33967  		return true
 33968  	}
 33969  	return false
 33970  }
 33971  func rewriteValueAMD64_OpAMD64ORQ_160(v *Value) bool {
 33972  	b := v.Block
 33973  	_ = b
 33974  	typ := &b.Func.Config.Types
 33975  	_ = typ
 33976  	// match: (ORQ or:(ORQ y s1:(SHLQconst [j1] r1:(ROLWconst [8] x1:(MOVWloadidx1 [i1] {s} idx p mem)))) s0:(SHLQconst [j0] r0:(ROLWconst [8] x0:(MOVWloadidx1 [i0] {s} idx p mem))))
 33977  	// cond: i1 == i0+2 && j1 == j0-16 && j1 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or)
 33978  	// result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (BSWAPL <typ.UInt32> (MOVLloadidx1 [i0] {s} p idx mem))) y)
 33979  	for {
 33980  		_ = v.Args[1]
 33981  		or := v.Args[0]
 33982  		if or.Op != OpAMD64ORQ {
 33983  			break
 33984  		}
 33985  		_ = or.Args[1]
 33986  		y := or.Args[0]
 33987  		s1 := or.Args[1]
 33988  		if s1.Op != OpAMD64SHLQconst {
 33989  			break
 33990  		}
 33991  		j1 := s1.AuxInt
 33992  		r1 := s1.Args[0]
 33993  		if r1.Op != OpAMD64ROLWconst {
 33994  			break
 33995  		}
 33996  		if r1.AuxInt != 8 {
 33997  			break
 33998  		}
 33999  		x1 := r1.Args[0]
 34000  		if x1.Op != OpAMD64MOVWloadidx1 {
 34001  			break
 34002  		}
 34003  		i1 := x1.AuxInt
 34004  		s := x1.Aux
 34005  		_ = x1.Args[2]
 34006  		idx := x1.Args[0]
 34007  		p := x1.Args[1]
 34008  		mem := x1.Args[2]
 34009  		s0 := v.Args[1]
 34010  		if s0.Op != OpAMD64SHLQconst {
 34011  			break
 34012  		}
 34013  		j0 := s0.AuxInt
 34014  		r0 := s0.Args[0]
 34015  		if r0.Op != OpAMD64ROLWconst {
 34016  			break
 34017  		}
 34018  		if r0.AuxInt != 8 {
 34019  			break
 34020  		}
 34021  		x0 := r0.Args[0]
 34022  		if x0.Op != OpAMD64MOVWloadidx1 {
 34023  			break
 34024  		}
 34025  		i0 := x0.AuxInt
 34026  		if x0.Aux != s {
 34027  			break
 34028  		}
 34029  		_ = x0.Args[2]
 34030  		if idx != x0.Args[0] {
 34031  			break
 34032  		}
 34033  		if p != x0.Args[1] {
 34034  			break
 34035  		}
 34036  		if mem != x0.Args[2] {
 34037  			break
 34038  		}
 34039  		if !(i1 == i0+2 && j1 == j0-16 && j1%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or)) {
 34040  			break
 34041  		}
 34042  		b = mergePoint(b, x0, x1)
 34043  		v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type)
 34044  		v.reset(OpCopy)
 34045  		v.AddArg(v0)
 34046  		v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type)
 34047  		v1.AuxInt = j1
 34048  		v2 := b.NewValue0(v.Pos, OpAMD64BSWAPL, typ.UInt32)
 34049  		v3 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32)
 34050  		v3.AuxInt = i0
 34051  		v3.Aux = s
 34052  		v3.AddArg(p)
 34053  		v3.AddArg(idx)
 34054  		v3.AddArg(mem)
 34055  		v2.AddArg(v3)
 34056  		v1.AddArg(v2)
 34057  		v0.AddArg(v1)
 34058  		v0.AddArg(y)
 34059  		return true
 34060  	}
 34061  	// match: (ORQ x l:(MOVQload [off] {sym} ptr mem))
 34062  	// cond: canMergeLoad(v, l, x) && clobber(l)
 34063  	// result: (ORQmem x [off] {sym} ptr mem)
 34064  	for {
 34065  		_ = v.Args[1]
 34066  		x := v.Args[0]
 34067  		l := v.Args[1]
 34068  		if l.Op != OpAMD64MOVQload {
 34069  			break
 34070  		}
 34071  		off := l.AuxInt
 34072  		sym := l.Aux
 34073  		_ = l.Args[1]
 34074  		ptr := l.Args[0]
 34075  		mem := l.Args[1]
 34076  		if !(canMergeLoad(v, l, x) && clobber(l)) {
 34077  			break
 34078  		}
 34079  		v.reset(OpAMD64ORQmem)
 34080  		v.AuxInt = off
 34081  		v.Aux = sym
 34082  		v.AddArg(x)
 34083  		v.AddArg(ptr)
 34084  		v.AddArg(mem)
 34085  		return true
 34086  	}
 34087  	// match: (ORQ l:(MOVQload [off] {sym} ptr mem) x)
 34088  	// cond: canMergeLoad(v, l, x) && clobber(l)
 34089  	// result: (ORQmem x [off] {sym} ptr mem)
 34090  	for {
 34091  		_ = v.Args[1]
 34092  		l := v.Args[0]
 34093  		if l.Op != OpAMD64MOVQload {
 34094  			break
 34095  		}
 34096  		off := l.AuxInt
 34097  		sym := l.Aux
 34098  		_ = l.Args[1]
 34099  		ptr := l.Args[0]
 34100  		mem := l.Args[1]
 34101  		x := v.Args[1]
 34102  		if !(canMergeLoad(v, l, x) && clobber(l)) {
 34103  			break
 34104  		}
 34105  		v.reset(OpAMD64ORQmem)
 34106  		v.AuxInt = off
 34107  		v.Aux = sym
 34108  		v.AddArg(x)
 34109  		v.AddArg(ptr)
 34110  		v.AddArg(mem)
 34111  		return true
 34112  	}
 34113  	return false
 34114  }
 34115  func rewriteValueAMD64_OpAMD64ORQconst_0(v *Value) bool {
 34116  	// match: (ORQconst [0] x)
 34117  	// cond:
 34118  	// result: x
 34119  	for {
 34120  		if v.AuxInt != 0 {
 34121  			break
 34122  		}
 34123  		x := v.Args[0]
 34124  		v.reset(OpCopy)
 34125  		v.Type = x.Type
 34126  		v.AddArg(x)
 34127  		return true
 34128  	}
 34129  	// match: (ORQconst [-1] _)
 34130  	// cond:
 34131  	// result: (MOVQconst [-1])
 34132  	for {
 34133  		if v.AuxInt != -1 {
 34134  			break
 34135  		}
 34136  		v.reset(OpAMD64MOVQconst)
 34137  		v.AuxInt = -1
 34138  		return true
 34139  	}
 34140  	// match: (ORQconst [c] (MOVQconst [d]))
 34141  	// cond:
 34142  	// result: (MOVQconst [c|d])
 34143  	for {
 34144  		c := v.AuxInt
 34145  		v_0 := v.Args[0]
 34146  		if v_0.Op != OpAMD64MOVQconst {
 34147  			break
 34148  		}
 34149  		d := v_0.AuxInt
 34150  		v.reset(OpAMD64MOVQconst)
 34151  		v.AuxInt = c | d
 34152  		return true
 34153  	}
 34154  	return false
 34155  }
 34156  func rewriteValueAMD64_OpAMD64ORQmem_0(v *Value) bool {
 34157  	b := v.Block
 34158  	_ = b
 34159  	typ := &b.Func.Config.Types
 34160  	_ = typ
 34161  	// match: (ORQmem x [off] {sym} ptr (MOVSDstore [off] {sym} ptr y _))
 34162  	// cond:
 34163  	// result: ( ORQ x (MOVQf2i y))
 34164  	for {
 34165  		off := v.AuxInt
 34166  		sym := v.Aux
 34167  		_ = v.Args[2]
 34168  		x := v.Args[0]
 34169  		ptr := v.Args[1]
 34170  		v_2 := v.Args[2]
 34171  		if v_2.Op != OpAMD64MOVSDstore {
 34172  			break
 34173  		}
 34174  		if v_2.AuxInt != off {
 34175  			break
 34176  		}
 34177  		if v_2.Aux != sym {
 34178  			break
 34179  		}
 34180  		_ = v_2.Args[2]
 34181  		if ptr != v_2.Args[0] {
 34182  			break
 34183  		}
 34184  		y := v_2.Args[1]
 34185  		v.reset(OpAMD64ORQ)
 34186  		v.AddArg(x)
 34187  		v0 := b.NewValue0(v.Pos, OpAMD64MOVQf2i, typ.UInt64)
 34188  		v0.AddArg(y)
 34189  		v.AddArg(v0)
 34190  		return true
 34191  	}
 34192  	return false
 34193  }
 34194  func rewriteValueAMD64_OpAMD64ROLB_0(v *Value) bool {
 34195  	// match: (ROLB x (NEGQ y))
 34196  	// cond:
 34197  	// result: (RORB x y)
 34198  	for {
 34199  		_ = v.Args[1]
 34200  		x := v.Args[0]
 34201  		v_1 := v.Args[1]
 34202  		if v_1.Op != OpAMD64NEGQ {
 34203  			break
 34204  		}
 34205  		y := v_1.Args[0]
 34206  		v.reset(OpAMD64RORB)
 34207  		v.AddArg(x)
 34208  		v.AddArg(y)
 34209  		return true
 34210  	}
 34211  	// match: (ROLB x (NEGL y))
 34212  	// cond:
 34213  	// result: (RORB x y)
 34214  	for {
 34215  		_ = v.Args[1]
 34216  		x := v.Args[0]
 34217  		v_1 := v.Args[1]
 34218  		if v_1.Op != OpAMD64NEGL {
 34219  			break
 34220  		}
 34221  		y := v_1.Args[0]
 34222  		v.reset(OpAMD64RORB)
 34223  		v.AddArg(x)
 34224  		v.AddArg(y)
 34225  		return true
 34226  	}
 34227  	// match: (ROLB x (MOVQconst [c]))
 34228  	// cond:
 34229  	// result: (ROLBconst [c&7 ] x)
 34230  	for {
 34231  		_ = v.Args[1]
 34232  		x := v.Args[0]
 34233  		v_1 := v.Args[1]
 34234  		if v_1.Op != OpAMD64MOVQconst {
 34235  			break
 34236  		}
 34237  		c := v_1.AuxInt
 34238  		v.reset(OpAMD64ROLBconst)
 34239  		v.AuxInt = c & 7
 34240  		v.AddArg(x)
 34241  		return true
 34242  	}
 34243  	// match: (ROLB x (MOVLconst [c]))
 34244  	// cond:
 34245  	// result: (ROLBconst [c&7 ] x)
 34246  	for {
 34247  		_ = v.Args[1]
 34248  		x := v.Args[0]
 34249  		v_1 := v.Args[1]
 34250  		if v_1.Op != OpAMD64MOVLconst {
 34251  			break
 34252  		}
 34253  		c := v_1.AuxInt
 34254  		v.reset(OpAMD64ROLBconst)
 34255  		v.AuxInt = c & 7
 34256  		v.AddArg(x)
 34257  		return true
 34258  	}
 34259  	return false
 34260  }
 34261  func rewriteValueAMD64_OpAMD64ROLBconst_0(v *Value) bool {
 34262  	// match: (ROLBconst [c] (ROLBconst [d] x))
 34263  	// cond:
 34264  	// result: (ROLBconst [(c+d)& 7] x)
 34265  	for {
 34266  		c := v.AuxInt
 34267  		v_0 := v.Args[0]
 34268  		if v_0.Op != OpAMD64ROLBconst {
 34269  			break
 34270  		}
 34271  		d := v_0.AuxInt
 34272  		x := v_0.Args[0]
 34273  		v.reset(OpAMD64ROLBconst)
 34274  		v.AuxInt = (c + d) & 7
 34275  		v.AddArg(x)
 34276  		return true
 34277  	}
 34278  	// match: (ROLBconst x [0])
 34279  	// cond:
 34280  	// result: x
 34281  	for {
 34282  		if v.AuxInt != 0 {
 34283  			break
 34284  		}
 34285  		x := v.Args[0]
 34286  		v.reset(OpCopy)
 34287  		v.Type = x.Type
 34288  		v.AddArg(x)
 34289  		return true
 34290  	}
 34291  	return false
 34292  }
 34293  func rewriteValueAMD64_OpAMD64ROLL_0(v *Value) bool {
 34294  	// match: (ROLL x (NEGQ y))
 34295  	// cond:
 34296  	// result: (RORL x y)
 34297  	for {
 34298  		_ = v.Args[1]
 34299  		x := v.Args[0]
 34300  		v_1 := v.Args[1]
 34301  		if v_1.Op != OpAMD64NEGQ {
 34302  			break
 34303  		}
 34304  		y := v_1.Args[0]
 34305  		v.reset(OpAMD64RORL)
 34306  		v.AddArg(x)
 34307  		v.AddArg(y)
 34308  		return true
 34309  	}
 34310  	// match: (ROLL x (NEGL y))
 34311  	// cond:
 34312  	// result: (RORL x y)
 34313  	for {
 34314  		_ = v.Args[1]
 34315  		x := v.Args[0]
 34316  		v_1 := v.Args[1]
 34317  		if v_1.Op != OpAMD64NEGL {
 34318  			break
 34319  		}
 34320  		y := v_1.Args[0]
 34321  		v.reset(OpAMD64RORL)
 34322  		v.AddArg(x)
 34323  		v.AddArg(y)
 34324  		return true
 34325  	}
 34326  	// match: (ROLL x (MOVQconst [c]))
 34327  	// cond:
 34328  	// result: (ROLLconst [c&31] x)
 34329  	for {
 34330  		_ = v.Args[1]
 34331  		x := v.Args[0]
 34332  		v_1 := v.Args[1]
 34333  		if v_1.Op != OpAMD64MOVQconst {
 34334  			break
 34335  		}
 34336  		c := v_1.AuxInt
 34337  		v.reset(OpAMD64ROLLconst)
 34338  		v.AuxInt = c & 31
 34339  		v.AddArg(x)
 34340  		return true
 34341  	}
 34342  	// match: (ROLL x (MOVLconst [c]))
 34343  	// cond:
 34344  	// result: (ROLLconst [c&31] x)
 34345  	for {
 34346  		_ = v.Args[1]
 34347  		x := v.Args[0]
 34348  		v_1 := v.Args[1]
 34349  		if v_1.Op != OpAMD64MOVLconst {
 34350  			break
 34351  		}
 34352  		c := v_1.AuxInt
 34353  		v.reset(OpAMD64ROLLconst)
 34354  		v.AuxInt = c & 31
 34355  		v.AddArg(x)
 34356  		return true
 34357  	}
 34358  	return false
 34359  }
 34360  func rewriteValueAMD64_OpAMD64ROLLconst_0(v *Value) bool {
 34361  	// match: (ROLLconst [c] (ROLLconst [d] x))
 34362  	// cond:
 34363  	// result: (ROLLconst [(c+d)&31] x)
 34364  	for {
 34365  		c := v.AuxInt
 34366  		v_0 := v.Args[0]
 34367  		if v_0.Op != OpAMD64ROLLconst {
 34368  			break
 34369  		}
 34370  		d := v_0.AuxInt
 34371  		x := v_0.Args[0]
 34372  		v.reset(OpAMD64ROLLconst)
 34373  		v.AuxInt = (c + d) & 31
 34374  		v.AddArg(x)
 34375  		return true
 34376  	}
 34377  	// match: (ROLLconst x [0])
 34378  	// cond:
 34379  	// result: x
 34380  	for {
 34381  		if v.AuxInt != 0 {
 34382  			break
 34383  		}
 34384  		x := v.Args[0]
 34385  		v.reset(OpCopy)
 34386  		v.Type = x.Type
 34387  		v.AddArg(x)
 34388  		return true
 34389  	}
 34390  	return false
 34391  }
 34392  func rewriteValueAMD64_OpAMD64ROLQ_0(v *Value) bool {
 34393  	// match: (ROLQ x (NEGQ y))
 34394  	// cond:
 34395  	// result: (RORQ x y)
 34396  	for {
 34397  		_ = v.Args[1]
 34398  		x := v.Args[0]
 34399  		v_1 := v.Args[1]
 34400  		if v_1.Op != OpAMD64NEGQ {
 34401  			break
 34402  		}
 34403  		y := v_1.Args[0]
 34404  		v.reset(OpAMD64RORQ)
 34405  		v.AddArg(x)
 34406  		v.AddArg(y)
 34407  		return true
 34408  	}
 34409  	// match: (ROLQ x (NEGL y))
 34410  	// cond:
 34411  	// result: (RORQ x y)
 34412  	for {
 34413  		_ = v.Args[1]
 34414  		x := v.Args[0]
 34415  		v_1 := v.Args[1]
 34416  		if v_1.Op != OpAMD64NEGL {
 34417  			break
 34418  		}
 34419  		y := v_1.Args[0]
 34420  		v.reset(OpAMD64RORQ)
 34421  		v.AddArg(x)
 34422  		v.AddArg(y)
 34423  		return true
 34424  	}
 34425  	// match: (ROLQ x (MOVQconst [c]))
 34426  	// cond:
 34427  	// result: (ROLQconst [c&63] x)
 34428  	for {
 34429  		_ = v.Args[1]
 34430  		x := v.Args[0]
 34431  		v_1 := v.Args[1]
 34432  		if v_1.Op != OpAMD64MOVQconst {
 34433  			break
 34434  		}
 34435  		c := v_1.AuxInt
 34436  		v.reset(OpAMD64ROLQconst)
 34437  		v.AuxInt = c & 63
 34438  		v.AddArg(x)
 34439  		return true
 34440  	}
 34441  	// match: (ROLQ x (MOVLconst [c]))
 34442  	// cond:
 34443  	// result: (ROLQconst [c&63] x)
 34444  	for {
 34445  		_ = v.Args[1]
 34446  		x := v.Args[0]
 34447  		v_1 := v.Args[1]
 34448  		if v_1.Op != OpAMD64MOVLconst {
 34449  			break
 34450  		}
 34451  		c := v_1.AuxInt
 34452  		v.reset(OpAMD64ROLQconst)
 34453  		v.AuxInt = c & 63
 34454  		v.AddArg(x)
 34455  		return true
 34456  	}
 34457  	return false
 34458  }
 34459  func rewriteValueAMD64_OpAMD64ROLQconst_0(v *Value) bool {
 34460  	// match: (ROLQconst [c] (ROLQconst [d] x))
 34461  	// cond:
 34462  	// result: (ROLQconst [(c+d)&63] x)
 34463  	for {
 34464  		c := v.AuxInt
 34465  		v_0 := v.Args[0]
 34466  		if v_0.Op != OpAMD64ROLQconst {
 34467  			break
 34468  		}
 34469  		d := v_0.AuxInt
 34470  		x := v_0.Args[0]
 34471  		v.reset(OpAMD64ROLQconst)
 34472  		v.AuxInt = (c + d) & 63
 34473  		v.AddArg(x)
 34474  		return true
 34475  	}
 34476  	// match: (ROLQconst x [0])
 34477  	// cond:
 34478  	// result: x
 34479  	for {
 34480  		if v.AuxInt != 0 {
 34481  			break
 34482  		}
 34483  		x := v.Args[0]
 34484  		v.reset(OpCopy)
 34485  		v.Type = x.Type
 34486  		v.AddArg(x)
 34487  		return true
 34488  	}
 34489  	return false
 34490  }
 34491  func rewriteValueAMD64_OpAMD64ROLW_0(v *Value) bool {
 34492  	// match: (ROLW x (NEGQ y))
 34493  	// cond:
 34494  	// result: (RORW x y)
 34495  	for {
 34496  		_ = v.Args[1]
 34497  		x := v.Args[0]
 34498  		v_1 := v.Args[1]
 34499  		if v_1.Op != OpAMD64NEGQ {
 34500  			break
 34501  		}
 34502  		y := v_1.Args[0]
 34503  		v.reset(OpAMD64RORW)
 34504  		v.AddArg(x)
 34505  		v.AddArg(y)
 34506  		return true
 34507  	}
 34508  	// match: (ROLW x (NEGL y))
 34509  	// cond:
 34510  	// result: (RORW x y)
 34511  	for {
 34512  		_ = v.Args[1]
 34513  		x := v.Args[0]
 34514  		v_1 := v.Args[1]
 34515  		if v_1.Op != OpAMD64NEGL {
 34516  			break
 34517  		}
 34518  		y := v_1.Args[0]
 34519  		v.reset(OpAMD64RORW)
 34520  		v.AddArg(x)
 34521  		v.AddArg(y)
 34522  		return true
 34523  	}
 34524  	// match: (ROLW x (MOVQconst [c]))
 34525  	// cond:
 34526  	// result: (ROLWconst [c&15] x)
 34527  	for {
 34528  		_ = v.Args[1]
 34529  		x := v.Args[0]
 34530  		v_1 := v.Args[1]
 34531  		if v_1.Op != OpAMD64MOVQconst {
 34532  			break
 34533  		}
 34534  		c := v_1.AuxInt
 34535  		v.reset(OpAMD64ROLWconst)
 34536  		v.AuxInt = c & 15
 34537  		v.AddArg(x)
 34538  		return true
 34539  	}
 34540  	// match: (ROLW x (MOVLconst [c]))
 34541  	// cond:
 34542  	// result: (ROLWconst [c&15] x)
 34543  	for {
 34544  		_ = v.Args[1]
 34545  		x := v.Args[0]
 34546  		v_1 := v.Args[1]
 34547  		if v_1.Op != OpAMD64MOVLconst {
 34548  			break
 34549  		}
 34550  		c := v_1.AuxInt
 34551  		v.reset(OpAMD64ROLWconst)
 34552  		v.AuxInt = c & 15
 34553  		v.AddArg(x)
 34554  		return true
 34555  	}
 34556  	return false
 34557  }
 34558  func rewriteValueAMD64_OpAMD64ROLWconst_0(v *Value) bool {
 34559  	// match: (ROLWconst [c] (ROLWconst [d] x))
 34560  	// cond:
 34561  	// result: (ROLWconst [(c+d)&15] x)
 34562  	for {
 34563  		c := v.AuxInt
 34564  		v_0 := v.Args[0]
 34565  		if v_0.Op != OpAMD64ROLWconst {
 34566  			break
 34567  		}
 34568  		d := v_0.AuxInt
 34569  		x := v_0.Args[0]
 34570  		v.reset(OpAMD64ROLWconst)
 34571  		v.AuxInt = (c + d) & 15
 34572  		v.AddArg(x)
 34573  		return true
 34574  	}
 34575  	// match: (ROLWconst x [0])
 34576  	// cond:
 34577  	// result: x
 34578  	for {
 34579  		if v.AuxInt != 0 {
 34580  			break
 34581  		}
 34582  		x := v.Args[0]
 34583  		v.reset(OpCopy)
 34584  		v.Type = x.Type
 34585  		v.AddArg(x)
 34586  		return true
 34587  	}
 34588  	return false
 34589  }
 34590  func rewriteValueAMD64_OpAMD64RORB_0(v *Value) bool {
 34591  	// match: (RORB x (NEGQ y))
 34592  	// cond:
 34593  	// result: (ROLB x y)
 34594  	for {
 34595  		_ = v.Args[1]
 34596  		x := v.Args[0]
 34597  		v_1 := v.Args[1]
 34598  		if v_1.Op != OpAMD64NEGQ {
 34599  			break
 34600  		}
 34601  		y := v_1.Args[0]
 34602  		v.reset(OpAMD64ROLB)
 34603  		v.AddArg(x)
 34604  		v.AddArg(y)
 34605  		return true
 34606  	}
 34607  	// match: (RORB x (NEGL y))
 34608  	// cond:
 34609  	// result: (ROLB x y)
 34610  	for {
 34611  		_ = v.Args[1]
 34612  		x := v.Args[0]
 34613  		v_1 := v.Args[1]
 34614  		if v_1.Op != OpAMD64NEGL {
 34615  			break
 34616  		}
 34617  		y := v_1.Args[0]
 34618  		v.reset(OpAMD64ROLB)
 34619  		v.AddArg(x)
 34620  		v.AddArg(y)
 34621  		return true
 34622  	}
 34623  	// match: (RORB x (MOVQconst [c]))
 34624  	// cond:
 34625  	// result: (ROLBconst [(-c)&7 ] x)
 34626  	for {
 34627  		_ = v.Args[1]
 34628  		x := v.Args[0]
 34629  		v_1 := v.Args[1]
 34630  		if v_1.Op != OpAMD64MOVQconst {
 34631  			break
 34632  		}
 34633  		c := v_1.AuxInt
 34634  		v.reset(OpAMD64ROLBconst)
 34635  		v.AuxInt = (-c) & 7
 34636  		v.AddArg(x)
 34637  		return true
 34638  	}
 34639  	// match: (RORB x (MOVLconst [c]))
 34640  	// cond:
 34641  	// result: (ROLBconst [(-c)&7 ] x)
 34642  	for {
 34643  		_ = v.Args[1]
 34644  		x := v.Args[0]
 34645  		v_1 := v.Args[1]
 34646  		if v_1.Op != OpAMD64MOVLconst {
 34647  			break
 34648  		}
 34649  		c := v_1.AuxInt
 34650  		v.reset(OpAMD64ROLBconst)
 34651  		v.AuxInt = (-c) & 7
 34652  		v.AddArg(x)
 34653  		return true
 34654  	}
 34655  	return false
 34656  }
 34657  func rewriteValueAMD64_OpAMD64RORL_0(v *Value) bool {
 34658  	// match: (RORL x (NEGQ y))
 34659  	// cond:
 34660  	// result: (ROLL x y)
 34661  	for {
 34662  		_ = v.Args[1]
 34663  		x := v.Args[0]
 34664  		v_1 := v.Args[1]
 34665  		if v_1.Op != OpAMD64NEGQ {
 34666  			break
 34667  		}
 34668  		y := v_1.Args[0]
 34669  		v.reset(OpAMD64ROLL)
 34670  		v.AddArg(x)
 34671  		v.AddArg(y)
 34672  		return true
 34673  	}
 34674  	// match: (RORL x (NEGL y))
 34675  	// cond:
 34676  	// result: (ROLL x y)
 34677  	for {
 34678  		_ = v.Args[1]
 34679  		x := v.Args[0]
 34680  		v_1 := v.Args[1]
 34681  		if v_1.Op != OpAMD64NEGL {
 34682  			break
 34683  		}
 34684  		y := v_1.Args[0]
 34685  		v.reset(OpAMD64ROLL)
 34686  		v.AddArg(x)
 34687  		v.AddArg(y)
 34688  		return true
 34689  	}
 34690  	// match: (RORL x (MOVQconst [c]))
 34691  	// cond:
 34692  	// result: (ROLLconst [(-c)&31] x)
 34693  	for {
 34694  		_ = v.Args[1]
 34695  		x := v.Args[0]
 34696  		v_1 := v.Args[1]
 34697  		if v_1.Op != OpAMD64MOVQconst {
 34698  			break
 34699  		}
 34700  		c := v_1.AuxInt
 34701  		v.reset(OpAMD64ROLLconst)
 34702  		v.AuxInt = (-c) & 31
 34703  		v.AddArg(x)
 34704  		return true
 34705  	}
 34706  	// match: (RORL x (MOVLconst [c]))
 34707  	// cond:
 34708  	// result: (ROLLconst [(-c)&31] x)
 34709  	for {
 34710  		_ = v.Args[1]
 34711  		x := v.Args[0]
 34712  		v_1 := v.Args[1]
 34713  		if v_1.Op != OpAMD64MOVLconst {
 34714  			break
 34715  		}
 34716  		c := v_1.AuxInt
 34717  		v.reset(OpAMD64ROLLconst)
 34718  		v.AuxInt = (-c) & 31
 34719  		v.AddArg(x)
 34720  		return true
 34721  	}
 34722  	return false
 34723  }
 34724  func rewriteValueAMD64_OpAMD64RORQ_0(v *Value) bool {
 34725  	// match: (RORQ x (NEGQ y))
 34726  	// cond:
 34727  	// result: (ROLQ x y)
 34728  	for {
 34729  		_ = v.Args[1]
 34730  		x := v.Args[0]
 34731  		v_1 := v.Args[1]
 34732  		if v_1.Op != OpAMD64NEGQ {
 34733  			break
 34734  		}
 34735  		y := v_1.Args[0]
 34736  		v.reset(OpAMD64ROLQ)
 34737  		v.AddArg(x)
 34738  		v.AddArg(y)
 34739  		return true
 34740  	}
 34741  	// match: (RORQ x (NEGL y))
 34742  	// cond:
 34743  	// result: (ROLQ x y)
 34744  	for {
 34745  		_ = v.Args[1]
 34746  		x := v.Args[0]
 34747  		v_1 := v.Args[1]
 34748  		if v_1.Op != OpAMD64NEGL {
 34749  			break
 34750  		}
 34751  		y := v_1.Args[0]
 34752  		v.reset(OpAMD64ROLQ)
 34753  		v.AddArg(x)
 34754  		v.AddArg(y)
 34755  		return true
 34756  	}
 34757  	// match: (RORQ x (MOVQconst [c]))
 34758  	// cond:
 34759  	// result: (ROLQconst [(-c)&63] x)
 34760  	for {
 34761  		_ = v.Args[1]
 34762  		x := v.Args[0]
 34763  		v_1 := v.Args[1]
 34764  		if v_1.Op != OpAMD64MOVQconst {
 34765  			break
 34766  		}
 34767  		c := v_1.AuxInt
 34768  		v.reset(OpAMD64ROLQconst)
 34769  		v.AuxInt = (-c) & 63
 34770  		v.AddArg(x)
 34771  		return true
 34772  	}
 34773  	// match: (RORQ x (MOVLconst [c]))
 34774  	// cond:
 34775  	// result: (ROLQconst [(-c)&63] x)
 34776  	for {
 34777  		_ = v.Args[1]
 34778  		x := v.Args[0]
 34779  		v_1 := v.Args[1]
 34780  		if v_1.Op != OpAMD64MOVLconst {
 34781  			break
 34782  		}
 34783  		c := v_1.AuxInt
 34784  		v.reset(OpAMD64ROLQconst)
 34785  		v.AuxInt = (-c) & 63
 34786  		v.AddArg(x)
 34787  		return true
 34788  	}
 34789  	return false
 34790  }
 34791  func rewriteValueAMD64_OpAMD64RORW_0(v *Value) bool {
 34792  	// match: (RORW x (NEGQ y))
 34793  	// cond:
 34794  	// result: (ROLW x y)
 34795  	for {
 34796  		_ = v.Args[1]
 34797  		x := v.Args[0]
 34798  		v_1 := v.Args[1]
 34799  		if v_1.Op != OpAMD64NEGQ {
 34800  			break
 34801  		}
 34802  		y := v_1.Args[0]
 34803  		v.reset(OpAMD64ROLW)
 34804  		v.AddArg(x)
 34805  		v.AddArg(y)
 34806  		return true
 34807  	}
 34808  	// match: (RORW x (NEGL y))
 34809  	// cond:
 34810  	// result: (ROLW x y)
 34811  	for {
 34812  		_ = v.Args[1]
 34813  		x := v.Args[0]
 34814  		v_1 := v.Args[1]
 34815  		if v_1.Op != OpAMD64NEGL {
 34816  			break
 34817  		}
 34818  		y := v_1.Args[0]
 34819  		v.reset(OpAMD64ROLW)
 34820  		v.AddArg(x)
 34821  		v.AddArg(y)
 34822  		return true
 34823  	}
 34824  	// match: (RORW x (MOVQconst [c]))
 34825  	// cond:
 34826  	// result: (ROLWconst [(-c)&15] x)
 34827  	for {
 34828  		_ = v.Args[1]
 34829  		x := v.Args[0]
 34830  		v_1 := v.Args[1]
 34831  		if v_1.Op != OpAMD64MOVQconst {
 34832  			break
 34833  		}
 34834  		c := v_1.AuxInt
 34835  		v.reset(OpAMD64ROLWconst)
 34836  		v.AuxInt = (-c) & 15
 34837  		v.AddArg(x)
 34838  		return true
 34839  	}
 34840  	// match: (RORW x (MOVLconst [c]))
 34841  	// cond:
 34842  	// result: (ROLWconst [(-c)&15] x)
 34843  	for {
 34844  		_ = v.Args[1]
 34845  		x := v.Args[0]
 34846  		v_1 := v.Args[1]
 34847  		if v_1.Op != OpAMD64MOVLconst {
 34848  			break
 34849  		}
 34850  		c := v_1.AuxInt
 34851  		v.reset(OpAMD64ROLWconst)
 34852  		v.AuxInt = (-c) & 15
 34853  		v.AddArg(x)
 34854  		return true
 34855  	}
 34856  	return false
 34857  }
 34858  func rewriteValueAMD64_OpAMD64SARB_0(v *Value) bool {
 34859  	// match: (SARB x (MOVQconst [c]))
 34860  	// cond:
 34861  	// result: (SARBconst [min(c&31,7)] x)
 34862  	for {
 34863  		_ = v.Args[1]
 34864  		x := v.Args[0]
 34865  		v_1 := v.Args[1]
 34866  		if v_1.Op != OpAMD64MOVQconst {
 34867  			break
 34868  		}
 34869  		c := v_1.AuxInt
 34870  		v.reset(OpAMD64SARBconst)
 34871  		v.AuxInt = min(c&31, 7)
 34872  		v.AddArg(x)
 34873  		return true
 34874  	}
 34875  	// match: (SARB x (MOVLconst [c]))
 34876  	// cond:
 34877  	// result: (SARBconst [min(c&31,7)] x)
 34878  	for {
 34879  		_ = v.Args[1]
 34880  		x := v.Args[0]
 34881  		v_1 := v.Args[1]
 34882  		if v_1.Op != OpAMD64MOVLconst {
 34883  			break
 34884  		}
 34885  		c := v_1.AuxInt
 34886  		v.reset(OpAMD64SARBconst)
 34887  		v.AuxInt = min(c&31, 7)
 34888  		v.AddArg(x)
 34889  		return true
 34890  	}
 34891  	return false
 34892  }
 34893  func rewriteValueAMD64_OpAMD64SARBconst_0(v *Value) bool {
 34894  	// match: (SARBconst x [0])
 34895  	// cond:
 34896  	// result: x
 34897  	for {
 34898  		if v.AuxInt != 0 {
 34899  			break
 34900  		}
 34901  		x := v.Args[0]
 34902  		v.reset(OpCopy)
 34903  		v.Type = x.Type
 34904  		v.AddArg(x)
 34905  		return true
 34906  	}
 34907  	// match: (SARBconst [c] (MOVQconst [d]))
 34908  	// cond:
 34909  	// result: (MOVQconst [int64(int8(d))>>uint64(c)])
 34910  	for {
 34911  		c := v.AuxInt
 34912  		v_0 := v.Args[0]
 34913  		if v_0.Op != OpAMD64MOVQconst {
 34914  			break
 34915  		}
 34916  		d := v_0.AuxInt
 34917  		v.reset(OpAMD64MOVQconst)
 34918  		v.AuxInt = int64(int8(d)) >> uint64(c)
 34919  		return true
 34920  	}
 34921  	return false
 34922  }
 34923  func rewriteValueAMD64_OpAMD64SARL_0(v *Value) bool {
 34924  	b := v.Block
 34925  	_ = b
 34926  	// match: (SARL x (MOVQconst [c]))
 34927  	// cond:
 34928  	// result: (SARLconst [c&31] x)
 34929  	for {
 34930  		_ = v.Args[1]
 34931  		x := v.Args[0]
 34932  		v_1 := v.Args[1]
 34933  		if v_1.Op != OpAMD64MOVQconst {
 34934  			break
 34935  		}
 34936  		c := v_1.AuxInt
 34937  		v.reset(OpAMD64SARLconst)
 34938  		v.AuxInt = c & 31
 34939  		v.AddArg(x)
 34940  		return true
 34941  	}
 34942  	// match: (SARL x (MOVLconst [c]))
 34943  	// cond:
 34944  	// result: (SARLconst [c&31] x)
 34945  	for {
 34946  		_ = v.Args[1]
 34947  		x := v.Args[0]
 34948  		v_1 := v.Args[1]
 34949  		if v_1.Op != OpAMD64MOVLconst {
 34950  			break
 34951  		}
 34952  		c := v_1.AuxInt
 34953  		v.reset(OpAMD64SARLconst)
 34954  		v.AuxInt = c & 31
 34955  		v.AddArg(x)
 34956  		return true
 34957  	}
 34958  	// match: (SARL x (ADDQconst [c] y))
 34959  	// cond: c & 31 == 0
 34960  	// result: (SARL x y)
 34961  	for {
 34962  		_ = v.Args[1]
 34963  		x := v.Args[0]
 34964  		v_1 := v.Args[1]
 34965  		if v_1.Op != OpAMD64ADDQconst {
 34966  			break
 34967  		}
 34968  		c := v_1.AuxInt
 34969  		y := v_1.Args[0]
 34970  		if !(c&31 == 0) {
 34971  			break
 34972  		}
 34973  		v.reset(OpAMD64SARL)
 34974  		v.AddArg(x)
 34975  		v.AddArg(y)
 34976  		return true
 34977  	}
 34978  	// match: (SARL x (NEGQ <t> (ADDQconst [c] y)))
 34979  	// cond: c & 31 == 0
 34980  	// result: (SARL x (NEGQ <t> y))
 34981  	for {
 34982  		_ = v.Args[1]
 34983  		x := v.Args[0]
 34984  		v_1 := v.Args[1]
 34985  		if v_1.Op != OpAMD64NEGQ {
 34986  			break
 34987  		}
 34988  		t := v_1.Type
 34989  		v_1_0 := v_1.Args[0]
 34990  		if v_1_0.Op != OpAMD64ADDQconst {
 34991  			break
 34992  		}
 34993  		c := v_1_0.AuxInt
 34994  		y := v_1_0.Args[0]
 34995  		if !(c&31 == 0) {
 34996  			break
 34997  		}
 34998  		v.reset(OpAMD64SARL)
 34999  		v.AddArg(x)
 35000  		v0 := b.NewValue0(v.Pos, OpAMD64NEGQ, t)
 35001  		v0.AddArg(y)
 35002  		v.AddArg(v0)
 35003  		return true
 35004  	}
 35005  	// match: (SARL x (ANDQconst [c] y))
 35006  	// cond: c & 31 == 31
 35007  	// result: (SARL x y)
 35008  	for {
 35009  		_ = v.Args[1]
 35010  		x := v.Args[0]
 35011  		v_1 := v.Args[1]
 35012  		if v_1.Op != OpAMD64ANDQconst {
 35013  			break
 35014  		}
 35015  		c := v_1.AuxInt
 35016  		y := v_1.Args[0]
 35017  		if !(c&31 == 31) {
 35018  			break
 35019  		}
 35020  		v.reset(OpAMD64SARL)
 35021  		v.AddArg(x)
 35022  		v.AddArg(y)
 35023  		return true
 35024  	}
 35025  	// match: (SARL x (NEGQ <t> (ANDQconst [c] y)))
 35026  	// cond: c & 31 == 31
 35027  	// result: (SARL x (NEGQ <t> y))
 35028  	for {
 35029  		_ = v.Args[1]
 35030  		x := v.Args[0]
 35031  		v_1 := v.Args[1]
 35032  		if v_1.Op != OpAMD64NEGQ {
 35033  			break
 35034  		}
 35035  		t := v_1.Type
 35036  		v_1_0 := v_1.Args[0]
 35037  		if v_1_0.Op != OpAMD64ANDQconst {
 35038  			break
 35039  		}
 35040  		c := v_1_0.AuxInt
 35041  		y := v_1_0.Args[0]
 35042  		if !(c&31 == 31) {
 35043  			break
 35044  		}
 35045  		v.reset(OpAMD64SARL)
 35046  		v.AddArg(x)
 35047  		v0 := b.NewValue0(v.Pos, OpAMD64NEGQ, t)
 35048  		v0.AddArg(y)
 35049  		v.AddArg(v0)
 35050  		return true
 35051  	}
 35052  	// match: (SARL x (ADDLconst [c] y))
 35053  	// cond: c & 31 == 0
 35054  	// result: (SARL x y)
 35055  	for {
 35056  		_ = v.Args[1]
 35057  		x := v.Args[0]
 35058  		v_1 := v.Args[1]
 35059  		if v_1.Op != OpAMD64ADDLconst {
 35060  			break
 35061  		}
 35062  		c := v_1.AuxInt
 35063  		y := v_1.Args[0]
 35064  		if !(c&31 == 0) {
 35065  			break
 35066  		}
 35067  		v.reset(OpAMD64SARL)
 35068  		v.AddArg(x)
 35069  		v.AddArg(y)
 35070  		return true
 35071  	}
 35072  	// match: (SARL x (NEGL <t> (ADDLconst [c] y)))
 35073  	// cond: c & 31 == 0
 35074  	// result: (SARL x (NEGL <t> y))
 35075  	for {
 35076  		_ = v.Args[1]
 35077  		x := v.Args[0]
 35078  		v_1 := v.Args[1]
 35079  		if v_1.Op != OpAMD64NEGL {
 35080  			break
 35081  		}
 35082  		t := v_1.Type
 35083  		v_1_0 := v_1.Args[0]
 35084  		if v_1_0.Op != OpAMD64ADDLconst {
 35085  			break
 35086  		}
 35087  		c := v_1_0.AuxInt
 35088  		y := v_1_0.Args[0]
 35089  		if !(c&31 == 0) {
 35090  			break
 35091  		}
 35092  		v.reset(OpAMD64SARL)
 35093  		v.AddArg(x)
 35094  		v0 := b.NewValue0(v.Pos, OpAMD64NEGL, t)
 35095  		v0.AddArg(y)
 35096  		v.AddArg(v0)
 35097  		return true
 35098  	}
 35099  	// match: (SARL x (ANDLconst [c] y))
 35100  	// cond: c & 31 == 31
 35101  	// result: (SARL x y)
 35102  	for {
 35103  		_ = v.Args[1]
 35104  		x := v.Args[0]
 35105  		v_1 := v.Args[1]
 35106  		if v_1.Op != OpAMD64ANDLconst {
 35107  			break
 35108  		}
 35109  		c := v_1.AuxInt
 35110  		y := v_1.Args[0]
 35111  		if !(c&31 == 31) {
 35112  			break
 35113  		}
 35114  		v.reset(OpAMD64SARL)
 35115  		v.AddArg(x)
 35116  		v.AddArg(y)
 35117  		return true
 35118  	}
 35119  	// match: (SARL x (NEGL <t> (ANDLconst [c] y)))
 35120  	// cond: c & 31 == 31
 35121  	// result: (SARL x (NEGL <t> y))
 35122  	for {
 35123  		_ = v.Args[1]
 35124  		x := v.Args[0]
 35125  		v_1 := v.Args[1]
 35126  		if v_1.Op != OpAMD64NEGL {
 35127  			break
 35128  		}
 35129  		t := v_1.Type
 35130  		v_1_0 := v_1.Args[0]
 35131  		if v_1_0.Op != OpAMD64ANDLconst {
 35132  			break
 35133  		}
 35134  		c := v_1_0.AuxInt
 35135  		y := v_1_0.Args[0]
 35136  		if !(c&31 == 31) {
 35137  			break
 35138  		}
 35139  		v.reset(OpAMD64SARL)
 35140  		v.AddArg(x)
 35141  		v0 := b.NewValue0(v.Pos, OpAMD64NEGL, t)
 35142  		v0.AddArg(y)
 35143  		v.AddArg(v0)
 35144  		return true
 35145  	}
 35146  	return false
 35147  }
 35148  func rewriteValueAMD64_OpAMD64SARLconst_0(v *Value) bool {
 35149  	// match: (SARLconst x [0])
 35150  	// cond:
 35151  	// result: x
 35152  	for {
 35153  		if v.AuxInt != 0 {
 35154  			break
 35155  		}
 35156  		x := v.Args[0]
 35157  		v.reset(OpCopy)
 35158  		v.Type = x.Type
 35159  		v.AddArg(x)
 35160  		return true
 35161  	}
 35162  	// match: (SARLconst [c] (MOVQconst [d]))
 35163  	// cond:
 35164  	// result: (MOVQconst [int64(int32(d))>>uint64(c)])
 35165  	for {
 35166  		c := v.AuxInt
 35167  		v_0 := v.Args[0]
 35168  		if v_0.Op != OpAMD64MOVQconst {
 35169  			break
 35170  		}
 35171  		d := v_0.AuxInt
 35172  		v.reset(OpAMD64MOVQconst)
 35173  		v.AuxInt = int64(int32(d)) >> uint64(c)
 35174  		return true
 35175  	}
 35176  	return false
 35177  }
 35178  func rewriteValueAMD64_OpAMD64SARQ_0(v *Value) bool {
 35179  	b := v.Block
 35180  	_ = b
 35181  	// match: (SARQ x (MOVQconst [c]))
 35182  	// cond:
 35183  	// result: (SARQconst [c&63] x)
 35184  	for {
 35185  		_ = v.Args[1]
 35186  		x := v.Args[0]
 35187  		v_1 := v.Args[1]
 35188  		if v_1.Op != OpAMD64MOVQconst {
 35189  			break
 35190  		}
 35191  		c := v_1.AuxInt
 35192  		v.reset(OpAMD64SARQconst)
 35193  		v.AuxInt = c & 63
 35194  		v.AddArg(x)
 35195  		return true
 35196  	}
 35197  	// match: (SARQ x (MOVLconst [c]))
 35198  	// cond:
 35199  	// result: (SARQconst [c&63] x)
 35200  	for {
 35201  		_ = v.Args[1]
 35202  		x := v.Args[0]
 35203  		v_1 := v.Args[1]
 35204  		if v_1.Op != OpAMD64MOVLconst {
 35205  			break
 35206  		}
 35207  		c := v_1.AuxInt
 35208  		v.reset(OpAMD64SARQconst)
 35209  		v.AuxInt = c & 63
 35210  		v.AddArg(x)
 35211  		return true
 35212  	}
 35213  	// match: (SARQ x (ADDQconst [c] y))
 35214  	// cond: c & 63 == 0
 35215  	// result: (SARQ x y)
 35216  	for {
 35217  		_ = v.Args[1]
 35218  		x := v.Args[0]
 35219  		v_1 := v.Args[1]
 35220  		if v_1.Op != OpAMD64ADDQconst {
 35221  			break
 35222  		}
 35223  		c := v_1.AuxInt
 35224  		y := v_1.Args[0]
 35225  		if !(c&63 == 0) {
 35226  			break
 35227  		}
 35228  		v.reset(OpAMD64SARQ)
 35229  		v.AddArg(x)
 35230  		v.AddArg(y)
 35231  		return true
 35232  	}
 35233  	// match: (SARQ x (NEGQ <t> (ADDQconst [c] y)))
 35234  	// cond: c & 63 == 0
 35235  	// result: (SARQ x (NEGQ <t> y))
 35236  	for {
 35237  		_ = v.Args[1]
 35238  		x := v.Args[0]
 35239  		v_1 := v.Args[1]
 35240  		if v_1.Op != OpAMD64NEGQ {
 35241  			break
 35242  		}
 35243  		t := v_1.Type
 35244  		v_1_0 := v_1.Args[0]
 35245  		if v_1_0.Op != OpAMD64ADDQconst {
 35246  			break
 35247  		}
 35248  		c := v_1_0.AuxInt
 35249  		y := v_1_0.Args[0]
 35250  		if !(c&63 == 0) {
 35251  			break
 35252  		}
 35253  		v.reset(OpAMD64SARQ)
 35254  		v.AddArg(x)
 35255  		v0 := b.NewValue0(v.Pos, OpAMD64NEGQ, t)
 35256  		v0.AddArg(y)
 35257  		v.AddArg(v0)
 35258  		return true
 35259  	}
 35260  	// match: (SARQ x (ANDQconst [c] y))
 35261  	// cond: c & 63 == 63
 35262  	// result: (SARQ x y)
 35263  	for {
 35264  		_ = v.Args[1]
 35265  		x := v.Args[0]
 35266  		v_1 := v.Args[1]
 35267  		if v_1.Op != OpAMD64ANDQconst {
 35268  			break
 35269  		}
 35270  		c := v_1.AuxInt
 35271  		y := v_1.Args[0]
 35272  		if !(c&63 == 63) {
 35273  			break
 35274  		}
 35275  		v.reset(OpAMD64SARQ)
 35276  		v.AddArg(x)
 35277  		v.AddArg(y)
 35278  		return true
 35279  	}
 35280  	// match: (SARQ x (NEGQ <t> (ANDQconst [c] y)))
 35281  	// cond: c & 63 == 63
 35282  	// result: (SARQ x (NEGQ <t> y))
 35283  	for {
 35284  		_ = v.Args[1]
 35285  		x := v.Args[0]
 35286  		v_1 := v.Args[1]
 35287  		if v_1.Op != OpAMD64NEGQ {
 35288  			break
 35289  		}
 35290  		t := v_1.Type
 35291  		v_1_0 := v_1.Args[0]
 35292  		if v_1_0.Op != OpAMD64ANDQconst {
 35293  			break
 35294  		}
 35295  		c := v_1_0.AuxInt
 35296  		y := v_1_0.Args[0]
 35297  		if !(c&63 == 63) {
 35298  			break
 35299  		}
 35300  		v.reset(OpAMD64SARQ)
 35301  		v.AddArg(x)
 35302  		v0 := b.NewValue0(v.Pos, OpAMD64NEGQ, t)
 35303  		v0.AddArg(y)
 35304  		v.AddArg(v0)
 35305  		return true
 35306  	}
 35307  	// match: (SARQ x (ADDLconst [c] y))
 35308  	// cond: c & 63 == 0
 35309  	// result: (SARQ x y)
 35310  	for {
 35311  		_ = v.Args[1]
 35312  		x := v.Args[0]
 35313  		v_1 := v.Args[1]
 35314  		if v_1.Op != OpAMD64ADDLconst {
 35315  			break
 35316  		}
 35317  		c := v_1.AuxInt
 35318  		y := v_1.Args[0]
 35319  		if !(c&63 == 0) {
 35320  			break
 35321  		}
 35322  		v.reset(OpAMD64SARQ)
 35323  		v.AddArg(x)
 35324  		v.AddArg(y)
 35325  		return true
 35326  	}
 35327  	// match: (SARQ x (NEGL <t> (ADDLconst [c] y)))
 35328  	// cond: c & 63 == 0
 35329  	// result: (SARQ x (NEGL <t> y))
 35330  	for {
 35331  		_ = v.Args[1]
 35332  		x := v.Args[0]
 35333  		v_1 := v.Args[1]
 35334  		if v_1.Op != OpAMD64NEGL {
 35335  			break
 35336  		}
 35337  		t := v_1.Type
 35338  		v_1_0 := v_1.Args[0]
 35339  		if v_1_0.Op != OpAMD64ADDLconst {
 35340  			break
 35341  		}
 35342  		c := v_1_0.AuxInt
 35343  		y := v_1_0.Args[0]
 35344  		if !(c&63 == 0) {
 35345  			break
 35346  		}
 35347  		v.reset(OpAMD64SARQ)
 35348  		v.AddArg(x)
 35349  		v0 := b.NewValue0(v.Pos, OpAMD64NEGL, t)
 35350  		v0.AddArg(y)
 35351  		v.AddArg(v0)
 35352  		return true
 35353  	}
 35354  	// match: (SARQ x (ANDLconst [c] y))
 35355  	// cond: c & 63 == 63
 35356  	// result: (SARQ x y)
 35357  	for {
 35358  		_ = v.Args[1]
 35359  		x := v.Args[0]
 35360  		v_1 := v.Args[1]
 35361  		if v_1.Op != OpAMD64ANDLconst {
 35362  			break
 35363  		}
 35364  		c := v_1.AuxInt
 35365  		y := v_1.Args[0]
 35366  		if !(c&63 == 63) {
 35367  			break
 35368  		}
 35369  		v.reset(OpAMD64SARQ)
 35370  		v.AddArg(x)
 35371  		v.AddArg(y)
 35372  		return true
 35373  	}
 35374  	// match: (SARQ x (NEGL <t> (ANDLconst [c] y)))
 35375  	// cond: c & 63 == 63
 35376  	// result: (SARQ x (NEGL <t> y))
 35377  	for {
 35378  		_ = v.Args[1]
 35379  		x := v.Args[0]
 35380  		v_1 := v.Args[1]
 35381  		if v_1.Op != OpAMD64NEGL {
 35382  			break
 35383  		}
 35384  		t := v_1.Type
 35385  		v_1_0 := v_1.Args[0]
 35386  		if v_1_0.Op != OpAMD64ANDLconst {
 35387  			break
 35388  		}
 35389  		c := v_1_0.AuxInt
 35390  		y := v_1_0.Args[0]
 35391  		if !(c&63 == 63) {
 35392  			break
 35393  		}
 35394  		v.reset(OpAMD64SARQ)
 35395  		v.AddArg(x)
 35396  		v0 := b.NewValue0(v.Pos, OpAMD64NEGL, t)
 35397  		v0.AddArg(y)
 35398  		v.AddArg(v0)
 35399  		return true
 35400  	}
 35401  	return false
 35402  }
 35403  func rewriteValueAMD64_OpAMD64SARQconst_0(v *Value) bool {
 35404  	// match: (SARQconst x [0])
 35405  	// cond:
 35406  	// result: x
 35407  	for {
 35408  		if v.AuxInt != 0 {
 35409  			break
 35410  		}
 35411  		x := v.Args[0]
 35412  		v.reset(OpCopy)
 35413  		v.Type = x.Type
 35414  		v.AddArg(x)
 35415  		return true
 35416  	}
 35417  	// match: (SARQconst [c] (MOVQconst [d]))
 35418  	// cond:
 35419  	// result: (MOVQconst [d>>uint64(c)])
 35420  	for {
 35421  		c := v.AuxInt
 35422  		v_0 := v.Args[0]
 35423  		if v_0.Op != OpAMD64MOVQconst {
 35424  			break
 35425  		}
 35426  		d := v_0.AuxInt
 35427  		v.reset(OpAMD64MOVQconst)
 35428  		v.AuxInt = d >> uint64(c)
 35429  		return true
 35430  	}
 35431  	return false
 35432  }
 35433  func rewriteValueAMD64_OpAMD64SARW_0(v *Value) bool {
 35434  	// match: (SARW x (MOVQconst [c]))
 35435  	// cond:
 35436  	// result: (SARWconst [min(c&31,15)] x)
 35437  	for {
 35438  		_ = v.Args[1]
 35439  		x := v.Args[0]
 35440  		v_1 := v.Args[1]
 35441  		if v_1.Op != OpAMD64MOVQconst {
 35442  			break
 35443  		}
 35444  		c := v_1.AuxInt
 35445  		v.reset(OpAMD64SARWconst)
 35446  		v.AuxInt = min(c&31, 15)
 35447  		v.AddArg(x)
 35448  		return true
 35449  	}
 35450  	// match: (SARW x (MOVLconst [c]))
 35451  	// cond:
 35452  	// result: (SARWconst [min(c&31,15)] x)
 35453  	for {
 35454  		_ = v.Args[1]
 35455  		x := v.Args[0]
 35456  		v_1 := v.Args[1]
 35457  		if v_1.Op != OpAMD64MOVLconst {
 35458  			break
 35459  		}
 35460  		c := v_1.AuxInt
 35461  		v.reset(OpAMD64SARWconst)
 35462  		v.AuxInt = min(c&31, 15)
 35463  		v.AddArg(x)
 35464  		return true
 35465  	}
 35466  	return false
 35467  }
 35468  func rewriteValueAMD64_OpAMD64SARWconst_0(v *Value) bool {
 35469  	// match: (SARWconst x [0])
 35470  	// cond:
 35471  	// result: x
 35472  	for {
 35473  		if v.AuxInt != 0 {
 35474  			break
 35475  		}
 35476  		x := v.Args[0]
 35477  		v.reset(OpCopy)
 35478  		v.Type = x.Type
 35479  		v.AddArg(x)
 35480  		return true
 35481  	}
 35482  	// match: (SARWconst [c] (MOVQconst [d]))
 35483  	// cond:
 35484  	// result: (MOVQconst [int64(int16(d))>>uint64(c)])
 35485  	for {
 35486  		c := v.AuxInt
 35487  		v_0 := v.Args[0]
 35488  		if v_0.Op != OpAMD64MOVQconst {
 35489  			break
 35490  		}
 35491  		d := v_0.AuxInt
 35492  		v.reset(OpAMD64MOVQconst)
 35493  		v.AuxInt = int64(int16(d)) >> uint64(c)
 35494  		return true
 35495  	}
 35496  	return false
 35497  }
 35498  func rewriteValueAMD64_OpAMD64SBBLcarrymask_0(v *Value) bool {
 35499  	// match: (SBBLcarrymask (FlagEQ))
 35500  	// cond:
 35501  	// result: (MOVLconst [0])
 35502  	for {
 35503  		v_0 := v.Args[0]
 35504  		if v_0.Op != OpAMD64FlagEQ {
 35505  			break
 35506  		}
 35507  		v.reset(OpAMD64MOVLconst)
 35508  		v.AuxInt = 0
 35509  		return true
 35510  	}
 35511  	// match: (SBBLcarrymask (FlagLT_ULT))
 35512  	// cond:
 35513  	// result: (MOVLconst [-1])
 35514  	for {
 35515  		v_0 := v.Args[0]
 35516  		if v_0.Op != OpAMD64FlagLT_ULT {
 35517  			break
 35518  		}
 35519  		v.reset(OpAMD64MOVLconst)
 35520  		v.AuxInt = -1
 35521  		return true
 35522  	}
 35523  	// match: (SBBLcarrymask (FlagLT_UGT))
 35524  	// cond:
 35525  	// result: (MOVLconst [0])
 35526  	for {
 35527  		v_0 := v.Args[0]
 35528  		if v_0.Op != OpAMD64FlagLT_UGT {
 35529  			break
 35530  		}
 35531  		v.reset(OpAMD64MOVLconst)
 35532  		v.AuxInt = 0
 35533  		return true
 35534  	}
 35535  	// match: (SBBLcarrymask (FlagGT_ULT))
 35536  	// cond:
 35537  	// result: (MOVLconst [-1])
 35538  	for {
 35539  		v_0 := v.Args[0]
 35540  		if v_0.Op != OpAMD64FlagGT_ULT {
 35541  			break
 35542  		}
 35543  		v.reset(OpAMD64MOVLconst)
 35544  		v.AuxInt = -1
 35545  		return true
 35546  	}
 35547  	// match: (SBBLcarrymask (FlagGT_UGT))
 35548  	// cond:
 35549  	// result: (MOVLconst [0])
 35550  	for {
 35551  		v_0 := v.Args[0]
 35552  		if v_0.Op != OpAMD64FlagGT_UGT {
 35553  			break
 35554  		}
 35555  		v.reset(OpAMD64MOVLconst)
 35556  		v.AuxInt = 0
 35557  		return true
 35558  	}
 35559  	return false
 35560  }
 35561  func rewriteValueAMD64_OpAMD64SBBQcarrymask_0(v *Value) bool {
 35562  	// match: (SBBQcarrymask (FlagEQ))
 35563  	// cond:
 35564  	// result: (MOVQconst [0])
 35565  	for {
 35566  		v_0 := v.Args[0]
 35567  		if v_0.Op != OpAMD64FlagEQ {
 35568  			break
 35569  		}
 35570  		v.reset(OpAMD64MOVQconst)
 35571  		v.AuxInt = 0
 35572  		return true
 35573  	}
 35574  	// match: (SBBQcarrymask (FlagLT_ULT))
 35575  	// cond:
 35576  	// result: (MOVQconst [-1])
 35577  	for {
 35578  		v_0 := v.Args[0]
 35579  		if v_0.Op != OpAMD64FlagLT_ULT {
 35580  			break
 35581  		}
 35582  		v.reset(OpAMD64MOVQconst)
 35583  		v.AuxInt = -1
 35584  		return true
 35585  	}
 35586  	// match: (SBBQcarrymask (FlagLT_UGT))
 35587  	// cond:
 35588  	// result: (MOVQconst [0])
 35589  	for {
 35590  		v_0 := v.Args[0]
 35591  		if v_0.Op != OpAMD64FlagLT_UGT {
 35592  			break
 35593  		}
 35594  		v.reset(OpAMD64MOVQconst)
 35595  		v.AuxInt = 0
 35596  		return true
 35597  	}
 35598  	// match: (SBBQcarrymask (FlagGT_ULT))
 35599  	// cond:
 35600  	// result: (MOVQconst [-1])
 35601  	for {
 35602  		v_0 := v.Args[0]
 35603  		if v_0.Op != OpAMD64FlagGT_ULT {
 35604  			break
 35605  		}
 35606  		v.reset(OpAMD64MOVQconst)
 35607  		v.AuxInt = -1
 35608  		return true
 35609  	}
 35610  	// match: (SBBQcarrymask (FlagGT_UGT))
 35611  	// cond:
 35612  	// result: (MOVQconst [0])
 35613  	for {
 35614  		v_0 := v.Args[0]
 35615  		if v_0.Op != OpAMD64FlagGT_UGT {
 35616  			break
 35617  		}
 35618  		v.reset(OpAMD64MOVQconst)
 35619  		v.AuxInt = 0
 35620  		return true
 35621  	}
 35622  	return false
 35623  }
 35624  func rewriteValueAMD64_OpAMD64SETA_0(v *Value) bool {
 35625  	// match: (SETA (InvertFlags x))
 35626  	// cond:
 35627  	// result: (SETB x)
 35628  	for {
 35629  		v_0 := v.Args[0]
 35630  		if v_0.Op != OpAMD64InvertFlags {
 35631  			break
 35632  		}
 35633  		x := v_0.Args[0]
 35634  		v.reset(OpAMD64SETB)
 35635  		v.AddArg(x)
 35636  		return true
 35637  	}
 35638  	// match: (SETA (FlagEQ))
 35639  	// cond:
 35640  	// result: (MOVLconst [0])
 35641  	for {
 35642  		v_0 := v.Args[0]
 35643  		if v_0.Op != OpAMD64FlagEQ {
 35644  			break
 35645  		}
 35646  		v.reset(OpAMD64MOVLconst)
 35647  		v.AuxInt = 0
 35648  		return true
 35649  	}
 35650  	// match: (SETA (FlagLT_ULT))
 35651  	// cond:
 35652  	// result: (MOVLconst [0])
 35653  	for {
 35654  		v_0 := v.Args[0]
 35655  		if v_0.Op != OpAMD64FlagLT_ULT {
 35656  			break
 35657  		}
 35658  		v.reset(OpAMD64MOVLconst)
 35659  		v.AuxInt = 0
 35660  		return true
 35661  	}
 35662  	// match: (SETA (FlagLT_UGT))
 35663  	// cond:
 35664  	// result: (MOVLconst [1])
 35665  	for {
 35666  		v_0 := v.Args[0]
 35667  		if v_0.Op != OpAMD64FlagLT_UGT {
 35668  			break
 35669  		}
 35670  		v.reset(OpAMD64MOVLconst)
 35671  		v.AuxInt = 1
 35672  		return true
 35673  	}
 35674  	// match: (SETA (FlagGT_ULT))
 35675  	// cond:
 35676  	// result: (MOVLconst [0])
 35677  	for {
 35678  		v_0 := v.Args[0]
 35679  		if v_0.Op != OpAMD64FlagGT_ULT {
 35680  			break
 35681  		}
 35682  		v.reset(OpAMD64MOVLconst)
 35683  		v.AuxInt = 0
 35684  		return true
 35685  	}
 35686  	// match: (SETA (FlagGT_UGT))
 35687  	// cond:
 35688  	// result: (MOVLconst [1])
 35689  	for {
 35690  		v_0 := v.Args[0]
 35691  		if v_0.Op != OpAMD64FlagGT_UGT {
 35692  			break
 35693  		}
 35694  		v.reset(OpAMD64MOVLconst)
 35695  		v.AuxInt = 1
 35696  		return true
 35697  	}
 35698  	return false
 35699  }
 35700  func rewriteValueAMD64_OpAMD64SETAE_0(v *Value) bool {
 35701  	// match: (SETAE (InvertFlags x))
 35702  	// cond:
 35703  	// result: (SETBE x)
 35704  	for {
 35705  		v_0 := v.Args[0]
 35706  		if v_0.Op != OpAMD64InvertFlags {
 35707  			break
 35708  		}
 35709  		x := v_0.Args[0]
 35710  		v.reset(OpAMD64SETBE)
 35711  		v.AddArg(x)
 35712  		return true
 35713  	}
 35714  	// match: (SETAE (FlagEQ))
 35715  	// cond:
 35716  	// result: (MOVLconst [1])
 35717  	for {
 35718  		v_0 := v.Args[0]
 35719  		if v_0.Op != OpAMD64FlagEQ {
 35720  			break
 35721  		}
 35722  		v.reset(OpAMD64MOVLconst)
 35723  		v.AuxInt = 1
 35724  		return true
 35725  	}
 35726  	// match: (SETAE (FlagLT_ULT))
 35727  	// cond:
 35728  	// result: (MOVLconst [0])
 35729  	for {
 35730  		v_0 := v.Args[0]
 35731  		if v_0.Op != OpAMD64FlagLT_ULT {
 35732  			break
 35733  		}
 35734  		v.reset(OpAMD64MOVLconst)
 35735  		v.AuxInt = 0
 35736  		return true
 35737  	}
 35738  	// match: (SETAE (FlagLT_UGT))
 35739  	// cond:
 35740  	// result: (MOVLconst [1])
 35741  	for {
 35742  		v_0 := v.Args[0]
 35743  		if v_0.Op != OpAMD64FlagLT_UGT {
 35744  			break
 35745  		}
 35746  		v.reset(OpAMD64MOVLconst)
 35747  		v.AuxInt = 1
 35748  		return true
 35749  	}
 35750  	// match: (SETAE (FlagGT_ULT))
 35751  	// cond:
 35752  	// result: (MOVLconst [0])
 35753  	for {
 35754  		v_0 := v.Args[0]
 35755  		if v_0.Op != OpAMD64FlagGT_ULT {
 35756  			break
 35757  		}
 35758  		v.reset(OpAMD64MOVLconst)
 35759  		v.AuxInt = 0
 35760  		return true
 35761  	}
 35762  	// match: (SETAE (FlagGT_UGT))
 35763  	// cond:
 35764  	// result: (MOVLconst [1])
 35765  	for {
 35766  		v_0 := v.Args[0]
 35767  		if v_0.Op != OpAMD64FlagGT_UGT {
 35768  			break
 35769  		}
 35770  		v.reset(OpAMD64MOVLconst)
 35771  		v.AuxInt = 1
 35772  		return true
 35773  	}
 35774  	return false
 35775  }
 35776  func rewriteValueAMD64_OpAMD64SETAEmem_0(v *Value) bool {
 35777  	b := v.Block
 35778  	_ = b
 35779  	// match: (SETAEmem [off] {sym} ptr (InvertFlags x) mem)
 35780  	// cond:
 35781  	// result: (SETBEmem [off] {sym} ptr x mem)
 35782  	for {
 35783  		off := v.AuxInt
 35784  		sym := v.Aux
 35785  		_ = v.Args[2]
 35786  		ptr := v.Args[0]
 35787  		v_1 := v.Args[1]
 35788  		if v_1.Op != OpAMD64InvertFlags {
 35789  			break
 35790  		}
 35791  		x := v_1.Args[0]
 35792  		mem := v.Args[2]
 35793  		v.reset(OpAMD64SETBEmem)
 35794  		v.AuxInt = off
 35795  		v.Aux = sym
 35796  		v.AddArg(ptr)
 35797  		v.AddArg(x)
 35798  		v.AddArg(mem)
 35799  		return true
 35800  	}
 35801  	// match: (SETAEmem [off1] {sym} (ADDQconst [off2] base) val mem)
 35802  	// cond: is32Bit(off1+off2)
 35803  	// result: (SETAEmem [off1+off2] {sym} base val mem)
 35804  	for {
 35805  		off1 := v.AuxInt
 35806  		sym := v.Aux
 35807  		_ = v.Args[2]
 35808  		v_0 := v.Args[0]
 35809  		if v_0.Op != OpAMD64ADDQconst {
 35810  			break
 35811  		}
 35812  		off2 := v_0.AuxInt
 35813  		base := v_0.Args[0]
 35814  		val := v.Args[1]
 35815  		mem := v.Args[2]
 35816  		if !(is32Bit(off1 + off2)) {
 35817  			break
 35818  		}
 35819  		v.reset(OpAMD64SETAEmem)
 35820  		v.AuxInt = off1 + off2
 35821  		v.Aux = sym
 35822  		v.AddArg(base)
 35823  		v.AddArg(val)
 35824  		v.AddArg(mem)
 35825  		return true
 35826  	}
 35827  	// match: (SETAEmem [off1] {sym1} (LEAQ [off2] {sym2} base) val mem)
 35828  	// cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
 35829  	// result: (SETAEmem [off1+off2] {mergeSym(sym1,sym2)} base val mem)
 35830  	for {
 35831  		off1 := v.AuxInt
 35832  		sym1 := v.Aux
 35833  		_ = v.Args[2]
 35834  		v_0 := v.Args[0]
 35835  		if v_0.Op != OpAMD64LEAQ {
 35836  			break
 35837  		}
 35838  		off2 := v_0.AuxInt
 35839  		sym2 := v_0.Aux
 35840  		base := v_0.Args[0]
 35841  		val := v.Args[1]
 35842  		mem := v.Args[2]
 35843  		if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
 35844  			break
 35845  		}
 35846  		v.reset(OpAMD64SETAEmem)
 35847  		v.AuxInt = off1 + off2
 35848  		v.Aux = mergeSym(sym1, sym2)
 35849  		v.AddArg(base)
 35850  		v.AddArg(val)
 35851  		v.AddArg(mem)
 35852  		return true
 35853  	}
 35854  	// match: (SETAEmem [off] {sym} ptr x:(FlagEQ) mem)
 35855  	// cond:
 35856  	// result: (MOVBstore [off] {sym} ptr (MOVLconst <x.Type> [1]) mem)
 35857  	for {
 35858  		off := v.AuxInt
 35859  		sym := v.Aux
 35860  		_ = v.Args[2]
 35861  		ptr := v.Args[0]
 35862  		x := v.Args[1]
 35863  		if x.Op != OpAMD64FlagEQ {
 35864  			break
 35865  		}
 35866  		mem := v.Args[2]
 35867  		v.reset(OpAMD64MOVBstore)
 35868  		v.AuxInt = off
 35869  		v.Aux = sym
 35870  		v.AddArg(ptr)
 35871  		v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, x.Type)
 35872  		v0.AuxInt = 1
 35873  		v.AddArg(v0)
 35874  		v.AddArg(mem)
 35875  		return true
 35876  	}
 35877  	// match: (SETAEmem [off] {sym} ptr x:(FlagLT_ULT) mem)
 35878  	// cond:
 35879  	// result: (MOVBstore [off] {sym} ptr (MOVLconst <x.Type> [0]) mem)
 35880  	for {
 35881  		off := v.AuxInt
 35882  		sym := v.Aux
 35883  		_ = v.Args[2]
 35884  		ptr := v.Args[0]
 35885  		x := v.Args[1]
 35886  		if x.Op != OpAMD64FlagLT_ULT {
 35887  			break
 35888  		}
 35889  		mem := v.Args[2]
 35890  		v.reset(OpAMD64MOVBstore)
 35891  		v.AuxInt = off
 35892  		v.Aux = sym
 35893  		v.AddArg(ptr)
 35894  		v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, x.Type)
 35895  		v0.AuxInt = 0
 35896  		v.AddArg(v0)
 35897  		v.AddArg(mem)
 35898  		return true
 35899  	}
 35900  	// match: (SETAEmem [off] {sym} ptr x:(FlagLT_UGT) mem)
 35901  	// cond:
 35902  	// result: (MOVBstore [off] {sym} ptr (MOVLconst <x.Type> [1]) mem)
 35903  	for {
 35904  		off := v.AuxInt
 35905  		sym := v.Aux
 35906  		_ = v.Args[2]
 35907  		ptr := v.Args[0]
 35908  		x := v.Args[1]
 35909  		if x.Op != OpAMD64FlagLT_UGT {
 35910  			break
 35911  		}
 35912  		mem := v.Args[2]
 35913  		v.reset(OpAMD64MOVBstore)
 35914  		v.AuxInt = off
 35915  		v.Aux = sym
 35916  		v.AddArg(ptr)
 35917  		v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, x.Type)
 35918  		v0.AuxInt = 1
 35919  		v.AddArg(v0)
 35920  		v.AddArg(mem)
 35921  		return true
 35922  	}
 35923  	// match: (SETAEmem [off] {sym} ptr x:(FlagGT_ULT) mem)
 35924  	// cond:
 35925  	// result: (MOVBstore [off] {sym} ptr (MOVLconst <x.Type> [0]) mem)
 35926  	for {
 35927  		off := v.AuxInt
 35928  		sym := v.Aux
 35929  		_ = v.Args[2]
 35930  		ptr := v.Args[0]
 35931  		x := v.Args[1]
 35932  		if x.Op != OpAMD64FlagGT_ULT {
 35933  			break
 35934  		}
 35935  		mem := v.Args[2]
 35936  		v.reset(OpAMD64MOVBstore)
 35937  		v.AuxInt = off
 35938  		v.Aux = sym
 35939  		v.AddArg(ptr)
 35940  		v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, x.Type)
 35941  		v0.AuxInt = 0
 35942  		v.AddArg(v0)
 35943  		v.AddArg(mem)
 35944  		return true
 35945  	}
 35946  	// match: (SETAEmem [off] {sym} ptr x:(FlagGT_UGT) mem)
 35947  	// cond:
 35948  	// result: (MOVBstore [off] {sym} ptr (MOVLconst <x.Type> [1]) mem)
 35949  	for {
 35950  		off := v.AuxInt
 35951  		sym := v.Aux
 35952  		_ = v.Args[2]
 35953  		ptr := v.Args[0]
 35954  		x := v.Args[1]
 35955  		if x.Op != OpAMD64FlagGT_UGT {
 35956  			break
 35957  		}
 35958  		mem := v.Args[2]
 35959  		v.reset(OpAMD64MOVBstore)
 35960  		v.AuxInt = off
 35961  		v.Aux = sym
 35962  		v.AddArg(ptr)
 35963  		v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, x.Type)
 35964  		v0.AuxInt = 1
 35965  		v.AddArg(v0)
 35966  		v.AddArg(mem)
 35967  		return true
 35968  	}
 35969  	return false
 35970  }
 35971  func rewriteValueAMD64_OpAMD64SETAmem_0(v *Value) bool {
 35972  	b := v.Block
 35973  	_ = b
 35974  	// match: (SETAmem [off] {sym} ptr (InvertFlags x) mem)
 35975  	// cond:
 35976  	// result: (SETBmem [off] {sym} ptr x mem)
 35977  	for {
 35978  		off := v.AuxInt
 35979  		sym := v.Aux
 35980  		_ = v.Args[2]
 35981  		ptr := v.Args[0]
 35982  		v_1 := v.Args[1]
 35983  		if v_1.Op != OpAMD64InvertFlags {
 35984  			break
 35985  		}
 35986  		x := v_1.Args[0]
 35987  		mem := v.Args[2]
 35988  		v.reset(OpAMD64SETBmem)
 35989  		v.AuxInt = off
 35990  		v.Aux = sym
 35991  		v.AddArg(ptr)
 35992  		v.AddArg(x)
 35993  		v.AddArg(mem)
 35994  		return true
 35995  	}
 35996  	// match: (SETAmem [off1] {sym} (ADDQconst [off2] base) val mem)
 35997  	// cond: is32Bit(off1+off2)
 35998  	// result: (SETAmem [off1+off2] {sym} base val mem)
 35999  	for {
 36000  		off1 := v.AuxInt
 36001  		sym := v.Aux
 36002  		_ = v.Args[2]
 36003  		v_0 := v.Args[0]
 36004  		if v_0.Op != OpAMD64ADDQconst {
 36005  			break
 36006  		}
 36007  		off2 := v_0.AuxInt
 36008  		base := v_0.Args[0]
 36009  		val := v.Args[1]
 36010  		mem := v.Args[2]
 36011  		if !(is32Bit(off1 + off2)) {
 36012  			break
 36013  		}
 36014  		v.reset(OpAMD64SETAmem)
 36015  		v.AuxInt = off1 + off2
 36016  		v.Aux = sym
 36017  		v.AddArg(base)
 36018  		v.AddArg(val)
 36019  		v.AddArg(mem)
 36020  		return true
 36021  	}
 36022  	// match: (SETAmem [off1] {sym1} (LEAQ [off2] {sym2} base) val mem)
 36023  	// cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
 36024  	// result: (SETAmem [off1+off2] {mergeSym(sym1,sym2)} base val mem)
 36025  	for {
 36026  		off1 := v.AuxInt
 36027  		sym1 := v.Aux
 36028  		_ = v.Args[2]
 36029  		v_0 := v.Args[0]
 36030  		if v_0.Op != OpAMD64LEAQ {
 36031  			break
 36032  		}
 36033  		off2 := v_0.AuxInt
 36034  		sym2 := v_0.Aux
 36035  		base := v_0.Args[0]
 36036  		val := v.Args[1]
 36037  		mem := v.Args[2]
 36038  		if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
 36039  			break
 36040  		}
 36041  		v.reset(OpAMD64SETAmem)
 36042  		v.AuxInt = off1 + off2
 36043  		v.Aux = mergeSym(sym1, sym2)
 36044  		v.AddArg(base)
 36045  		v.AddArg(val)
 36046  		v.AddArg(mem)
 36047  		return true
 36048  	}
 36049  	// match: (SETAmem [off] {sym} ptr x:(FlagEQ) mem)
 36050  	// cond:
 36051  	// result: (MOVBstore [off] {sym} ptr (MOVLconst <x.Type> [0]) mem)
 36052  	for {
 36053  		off := v.AuxInt
 36054  		sym := v.Aux
 36055  		_ = v.Args[2]
 36056  		ptr := v.Args[0]
 36057  		x := v.Args[1]
 36058  		if x.Op != OpAMD64FlagEQ {
 36059  			break
 36060  		}
 36061  		mem := v.Args[2]
 36062  		v.reset(OpAMD64MOVBstore)
 36063  		v.AuxInt = off
 36064  		v.Aux = sym
 36065  		v.AddArg(ptr)
 36066  		v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, x.Type)
 36067  		v0.AuxInt = 0
 36068  		v.AddArg(v0)
 36069  		v.AddArg(mem)
 36070  		return true
 36071  	}
 36072  	// match: (SETAmem [off] {sym} ptr x:(FlagLT_ULT) mem)
 36073  	// cond:
 36074  	// result: (MOVBstore [off] {sym} ptr (MOVLconst <x.Type> [0]) mem)
 36075  	for {
 36076  		off := v.AuxInt
 36077  		sym := v.Aux
 36078  		_ = v.Args[2]
 36079  		ptr := v.Args[0]
 36080  		x := v.Args[1]
 36081  		if x.Op != OpAMD64FlagLT_ULT {
 36082  			break
 36083  		}
 36084  		mem := v.Args[2]
 36085  		v.reset(OpAMD64MOVBstore)
 36086  		v.AuxInt = off
 36087  		v.Aux = sym
 36088  		v.AddArg(ptr)
 36089  		v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, x.Type)
 36090  		v0.AuxInt = 0
 36091  		v.AddArg(v0)
 36092  		v.AddArg(mem)
 36093  		return true
 36094  	}
 36095  	// match: (SETAmem [off] {sym} ptr x:(FlagLT_UGT) mem)
 36096  	// cond:
 36097  	// result: (MOVBstore [off] {sym} ptr (MOVLconst <x.Type> [1]) mem)
 36098  	for {
 36099  		off := v.AuxInt
 36100  		sym := v.Aux
 36101  		_ = v.Args[2]
 36102  		ptr := v.Args[0]
 36103  		x := v.Args[1]
 36104  		if x.Op != OpAMD64FlagLT_UGT {
 36105  			break
 36106  		}
 36107  		mem := v.Args[2]
 36108  		v.reset(OpAMD64MOVBstore)
 36109  		v.AuxInt = off
 36110  		v.Aux = sym
 36111  		v.AddArg(ptr)
 36112  		v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, x.Type)
 36113  		v0.AuxInt = 1
 36114  		v.AddArg(v0)
 36115  		v.AddArg(mem)
 36116  		return true
 36117  	}
 36118  	// match: (SETAmem [off] {sym} ptr x:(FlagGT_ULT) mem)
 36119  	// cond:
 36120  	// result: (MOVBstore [off] {sym} ptr (MOVLconst <x.Type> [0]) mem)
 36121  	for {
 36122  		off := v.AuxInt
 36123  		sym := v.Aux
 36124  		_ = v.Args[2]
 36125  		ptr := v.Args[0]
 36126  		x := v.Args[1]
 36127  		if x.Op != OpAMD64FlagGT_ULT {
 36128  			break
 36129  		}
 36130  		mem := v.Args[2]
 36131  		v.reset(OpAMD64MOVBstore)
 36132  		v.AuxInt = off
 36133  		v.Aux = sym
 36134  		v.AddArg(ptr)
 36135  		v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, x.Type)
 36136  		v0.AuxInt = 0
 36137  		v.AddArg(v0)
 36138  		v.AddArg(mem)
 36139  		return true
 36140  	}
 36141  	// match: (SETAmem [off] {sym} ptr x:(FlagGT_UGT) mem)
 36142  	// cond:
 36143  	// result: (MOVBstore [off] {sym} ptr (MOVLconst <x.Type> [1]) mem)
 36144  	for {
 36145  		off := v.AuxInt
 36146  		sym := v.Aux
 36147  		_ = v.Args[2]
 36148  		ptr := v.Args[0]
 36149  		x := v.Args[1]
 36150  		if x.Op != OpAMD64FlagGT_UGT {
 36151  			break
 36152  		}
 36153  		mem := v.Args[2]
 36154  		v.reset(OpAMD64MOVBstore)
 36155  		v.AuxInt = off
 36156  		v.Aux = sym
 36157  		v.AddArg(ptr)
 36158  		v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, x.Type)
 36159  		v0.AuxInt = 1
 36160  		v.AddArg(v0)
 36161  		v.AddArg(mem)
 36162  		return true
 36163  	}
 36164  	return false
 36165  }
 36166  func rewriteValueAMD64_OpAMD64SETB_0(v *Value) bool {
 36167  	// match: (SETB (InvertFlags x))
 36168  	// cond:
 36169  	// result: (SETA x)
 36170  	for {
 36171  		v_0 := v.Args[0]
 36172  		if v_0.Op != OpAMD64InvertFlags {
 36173  			break
 36174  		}
 36175  		x := v_0.Args[0]
 36176  		v.reset(OpAMD64SETA)
 36177  		v.AddArg(x)
 36178  		return true
 36179  	}
 36180  	// match: (SETB (FlagEQ))
 36181  	// cond:
 36182  	// result: (MOVLconst [0])
 36183  	for {
 36184  		v_0 := v.Args[0]
 36185  		if v_0.Op != OpAMD64FlagEQ {
 36186  			break
 36187  		}
 36188  		v.reset(OpAMD64MOVLconst)
 36189  		v.AuxInt = 0
 36190  		return true
 36191  	}
 36192  	// match: (SETB (FlagLT_ULT))
 36193  	// cond:
 36194  	// result: (MOVLconst [1])
 36195  	for {
 36196  		v_0 := v.Args[0]
 36197  		if v_0.Op != OpAMD64FlagLT_ULT {
 36198  			break
 36199  		}
 36200  		v.reset(OpAMD64MOVLconst)
 36201  		v.AuxInt = 1
 36202  		return true
 36203  	}
 36204  	// match: (SETB (FlagLT_UGT))
 36205  	// cond:
 36206  	// result: (MOVLconst [0])
 36207  	for {
 36208  		v_0 := v.Args[0]
 36209  		if v_0.Op != OpAMD64FlagLT_UGT {
 36210  			break
 36211  		}
 36212  		v.reset(OpAMD64MOVLconst)
 36213  		v.AuxInt = 0
 36214  		return true
 36215  	}
 36216  	// match: (SETB (FlagGT_ULT))
 36217  	// cond:
 36218  	// result: (MOVLconst [1])
 36219  	for {
 36220  		v_0 := v.Args[0]
 36221  		if v_0.Op != OpAMD64FlagGT_ULT {
 36222  			break
 36223  		}
 36224  		v.reset(OpAMD64MOVLconst)
 36225  		v.AuxInt = 1
 36226  		return true
 36227  	}
 36228  	// match: (SETB (FlagGT_UGT))
 36229  	// cond:
 36230  	// result: (MOVLconst [0])
 36231  	for {
 36232  		v_0 := v.Args[0]
 36233  		if v_0.Op != OpAMD64FlagGT_UGT {
 36234  			break
 36235  		}
 36236  		v.reset(OpAMD64MOVLconst)
 36237  		v.AuxInt = 0
 36238  		return true
 36239  	}
 36240  	return false
 36241  }
 36242  func rewriteValueAMD64_OpAMD64SETBE_0(v *Value) bool {
 36243  	// match: (SETBE (InvertFlags x))
 36244  	// cond:
 36245  	// result: (SETAE x)
 36246  	for {
 36247  		v_0 := v.Args[0]
 36248  		if v_0.Op != OpAMD64InvertFlags {
 36249  			break
 36250  		}
 36251  		x := v_0.Args[0]
 36252  		v.reset(OpAMD64SETAE)
 36253  		v.AddArg(x)
 36254  		return true
 36255  	}
 36256  	// match: (SETBE (FlagEQ))
 36257  	// cond:
 36258  	// result: (MOVLconst [1])
 36259  	for {
 36260  		v_0 := v.Args[0]
 36261  		if v_0.Op != OpAMD64FlagEQ {
 36262  			break
 36263  		}
 36264  		v.reset(OpAMD64MOVLconst)
 36265  		v.AuxInt = 1
 36266  		return true
 36267  	}
 36268  	// match: (SETBE (FlagLT_ULT))
 36269  	// cond:
 36270  	// result: (MOVLconst [1])
 36271  	for {
 36272  		v_0 := v.Args[0]
 36273  		if v_0.Op != OpAMD64FlagLT_ULT {
 36274  			break
 36275  		}
 36276  		v.reset(OpAMD64MOVLconst)
 36277  		v.AuxInt = 1
 36278  		return true
 36279  	}
 36280  	// match: (SETBE (FlagLT_UGT))
 36281  	// cond:
 36282  	// result: (MOVLconst [0])
 36283  	for {
 36284  		v_0 := v.Args[0]
 36285  		if v_0.Op != OpAMD64FlagLT_UGT {
 36286  			break
 36287  		}
 36288  		v.reset(OpAMD64MOVLconst)
 36289  		v.AuxInt = 0
 36290  		return true
 36291  	}
 36292  	// match: (SETBE (FlagGT_ULT))
 36293  	// cond:
 36294  	// result: (MOVLconst [1])
 36295  	for {
 36296  		v_0 := v.Args[0]
 36297  		if v_0.Op != OpAMD64FlagGT_ULT {
 36298  			break
 36299  		}
 36300  		v.reset(OpAMD64MOVLconst)
 36301  		v.AuxInt = 1
 36302  		return true
 36303  	}
 36304  	// match: (SETBE (FlagGT_UGT))
 36305  	// cond:
 36306  	// result: (MOVLconst [0])
 36307  	for {
 36308  		v_0 := v.Args[0]
 36309  		if v_0.Op != OpAMD64FlagGT_UGT {
 36310  			break
 36311  		}
 36312  		v.reset(OpAMD64MOVLconst)
 36313  		v.AuxInt = 0
 36314  		return true
 36315  	}
 36316  	return false
 36317  }
 36318  func rewriteValueAMD64_OpAMD64SETBEmem_0(v *Value) bool {
 36319  	b := v.Block
 36320  	_ = b
 36321  	// match: (SETBEmem [off] {sym} ptr (InvertFlags x) mem)
 36322  	// cond:
 36323  	// result: (SETAEmem [off] {sym} ptr x mem)
 36324  	for {
 36325  		off := v.AuxInt
 36326  		sym := v.Aux
 36327  		_ = v.Args[2]
 36328  		ptr := v.Args[0]
 36329  		v_1 := v.Args[1]
 36330  		if v_1.Op != OpAMD64InvertFlags {
 36331  			break
 36332  		}
 36333  		x := v_1.Args[0]
 36334  		mem := v.Args[2]
 36335  		v.reset(OpAMD64SETAEmem)
 36336  		v.AuxInt = off
 36337  		v.Aux = sym
 36338  		v.AddArg(ptr)
 36339  		v.AddArg(x)
 36340  		v.AddArg(mem)
 36341  		return true
 36342  	}
 36343  	// match: (SETBEmem [off1] {sym} (ADDQconst [off2] base) val mem)
 36344  	// cond: is32Bit(off1+off2)
 36345  	// result: (SETBEmem [off1+off2] {sym} base val mem)
 36346  	for {
 36347  		off1 := v.AuxInt
 36348  		sym := v.Aux
 36349  		_ = v.Args[2]
 36350  		v_0 := v.Args[0]
 36351  		if v_0.Op != OpAMD64ADDQconst {
 36352  			break
 36353  		}
 36354  		off2 := v_0.AuxInt
 36355  		base := v_0.Args[0]
 36356  		val := v.Args[1]
 36357  		mem := v.Args[2]
 36358  		if !(is32Bit(off1 + off2)) {
 36359  			break
 36360  		}
 36361  		v.reset(OpAMD64SETBEmem)
 36362  		v.AuxInt = off1 + off2
 36363  		v.Aux = sym
 36364  		v.AddArg(base)
 36365  		v.AddArg(val)
 36366  		v.AddArg(mem)
 36367  		return true
 36368  	}
 36369  	// match: (SETBEmem [off1] {sym1} (LEAQ [off2] {sym2} base) val mem)
 36370  	// cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
 36371  	// result: (SETBEmem [off1+off2] {mergeSym(sym1,sym2)} base val mem)
 36372  	for {
 36373  		off1 := v.AuxInt
 36374  		sym1 := v.Aux
 36375  		_ = v.Args[2]
 36376  		v_0 := v.Args[0]
 36377  		if v_0.Op != OpAMD64LEAQ {
 36378  			break
 36379  		}
 36380  		off2 := v_0.AuxInt
 36381  		sym2 := v_0.Aux
 36382  		base := v_0.Args[0]
 36383  		val := v.Args[1]
 36384  		mem := v.Args[2]
 36385  		if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
 36386  			break
 36387  		}
 36388  		v.reset(OpAMD64SETBEmem)
 36389  		v.AuxInt = off1 + off2
 36390  		v.Aux = mergeSym(sym1, sym2)
 36391  		v.AddArg(base)
 36392  		v.AddArg(val)
 36393  		v.AddArg(mem)
 36394  		return true
 36395  	}
 36396  	// match: (SETBEmem [off] {sym} ptr x:(FlagEQ) mem)
 36397  	// cond:
 36398  	// result: (MOVBstore [off] {sym} ptr (MOVLconst <x.Type> [1]) mem)
 36399  	for {
 36400  		off := v.AuxInt
 36401  		sym := v.Aux
 36402  		_ = v.Args[2]
 36403  		ptr := v.Args[0]
 36404  		x := v.Args[1]
 36405  		if x.Op != OpAMD64FlagEQ {
 36406  			break
 36407  		}
 36408  		mem := v.Args[2]
 36409  		v.reset(OpAMD64MOVBstore)
 36410  		v.AuxInt = off
 36411  		v.Aux = sym
 36412  		v.AddArg(ptr)
 36413  		v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, x.Type)
 36414  		v0.AuxInt = 1
 36415  		v.AddArg(v0)
 36416  		v.AddArg(mem)
 36417  		return true
 36418  	}
 36419  	// match: (SETBEmem [off] {sym} ptr x:(FlagLT_ULT) mem)
 36420  	// cond:
 36421  	// result: (MOVBstore [off] {sym} ptr (MOVLconst <x.Type> [1]) mem)
 36422  	for {
 36423  		off := v.AuxInt
 36424  		sym := v.Aux
 36425  		_ = v.Args[2]
 36426  		ptr := v.Args[0]
 36427  		x := v.Args[1]
 36428  		if x.Op != OpAMD64FlagLT_ULT {
 36429  			break
 36430  		}
 36431  		mem := v.Args[2]
 36432  		v.reset(OpAMD64MOVBstore)
 36433  		v.AuxInt = off
 36434  		v.Aux = sym
 36435  		v.AddArg(ptr)
 36436  		v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, x.Type)
 36437  		v0.AuxInt = 1
 36438  		v.AddArg(v0)
 36439  		v.AddArg(mem)
 36440  		return true
 36441  	}
 36442  	// match: (SETBEmem [off] {sym} ptr x:(FlagLT_UGT) mem)
 36443  	// cond:
 36444  	// result: (MOVBstore [off] {sym} ptr (MOVLconst <x.Type> [0]) mem)
 36445  	for {
 36446  		off := v.AuxInt
 36447  		sym := v.Aux
 36448  		_ = v.Args[2]
 36449  		ptr := v.Args[0]
 36450  		x := v.Args[1]
 36451  		if x.Op != OpAMD64FlagLT_UGT {
 36452  			break
 36453  		}
 36454  		mem := v.Args[2]
 36455  		v.reset(OpAMD64MOVBstore)
 36456  		v.AuxInt = off
 36457  		v.Aux = sym
 36458  		v.AddArg(ptr)
 36459  		v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, x.Type)
 36460  		v0.AuxInt = 0
 36461  		v.AddArg(v0)
 36462  		v.AddArg(mem)
 36463  		return true
 36464  	}
 36465  	// match: (SETBEmem [off] {sym} ptr x:(FlagGT_ULT) mem)
 36466  	// cond:
 36467  	// result: (MOVBstore [off] {sym} ptr (MOVLconst <x.Type> [1]) mem)
 36468  	for {
 36469  		off := v.AuxInt
 36470  		sym := v.Aux
 36471  		_ = v.Args[2]
 36472  		ptr := v.Args[0]
 36473  		x := v.Args[1]
 36474  		if x.Op != OpAMD64FlagGT_ULT {
 36475  			break
 36476  		}
 36477  		mem := v.Args[2]
 36478  		v.reset(OpAMD64MOVBstore)
 36479  		v.AuxInt = off
 36480  		v.Aux = sym
 36481  		v.AddArg(ptr)
 36482  		v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, x.Type)
 36483  		v0.AuxInt = 1
 36484  		v.AddArg(v0)
 36485  		v.AddArg(mem)
 36486  		return true
 36487  	}
 36488  	// match: (SETBEmem [off] {sym} ptr x:(FlagGT_UGT) mem)
 36489  	// cond:
 36490  	// result: (MOVBstore [off] {sym} ptr (MOVLconst <x.Type> [0]) mem)
 36491  	for {
 36492  		off := v.AuxInt
 36493  		sym := v.Aux
 36494  		_ = v.Args[2]
 36495  		ptr := v.Args[0]
 36496  		x := v.Args[1]
 36497  		if x.Op != OpAMD64FlagGT_UGT {
 36498  			break
 36499  		}
 36500  		mem := v.Args[2]
 36501  		v.reset(OpAMD64MOVBstore)
 36502  		v.AuxInt = off
 36503  		v.Aux = sym
 36504  		v.AddArg(ptr)
 36505  		v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, x.Type)
 36506  		v0.AuxInt = 0
 36507  		v.AddArg(v0)
 36508  		v.AddArg(mem)
 36509  		return true
 36510  	}
 36511  	return false
 36512  }
 36513  func rewriteValueAMD64_OpAMD64SETBmem_0(v *Value) bool {
 36514  	b := v.Block
 36515  	_ = b
 36516  	// match: (SETBmem [off] {sym} ptr (InvertFlags x) mem)
 36517  	// cond:
 36518  	// result: (SETAmem [off] {sym} ptr x mem)
 36519  	for {
 36520  		off := v.AuxInt
 36521  		sym := v.Aux
 36522  		_ = v.Args[2]
 36523  		ptr := v.Args[0]
 36524  		v_1 := v.Args[1]
 36525  		if v_1.Op != OpAMD64InvertFlags {
 36526  			break
 36527  		}
 36528  		x := v_1.Args[0]
 36529  		mem := v.Args[2]
 36530  		v.reset(OpAMD64SETAmem)
 36531  		v.AuxInt = off
 36532  		v.Aux = sym
 36533  		v.AddArg(ptr)
 36534  		v.AddArg(x)
 36535  		v.AddArg(mem)
 36536  		return true
 36537  	}
 36538  	// match: (SETBmem [off1] {sym} (ADDQconst [off2] base) val mem)
 36539  	// cond: is32Bit(off1+off2)
 36540  	// result: (SETBmem [off1+off2] {sym} base val mem)
 36541  	for {
 36542  		off1 := v.AuxInt
 36543  		sym := v.Aux
 36544  		_ = v.Args[2]
 36545  		v_0 := v.Args[0]
 36546  		if v_0.Op != OpAMD64ADDQconst {
 36547  			break
 36548  		}
 36549  		off2 := v_0.AuxInt
 36550  		base := v_0.Args[0]
 36551  		val := v.Args[1]
 36552  		mem := v.Args[2]
 36553  		if !(is32Bit(off1 + off2)) {
 36554  			break
 36555  		}
 36556  		v.reset(OpAMD64SETBmem)
 36557  		v.AuxInt = off1 + off2
 36558  		v.Aux = sym
 36559  		v.AddArg(base)
 36560  		v.AddArg(val)
 36561  		v.AddArg(mem)
 36562  		return true
 36563  	}
 36564  	// match: (SETBmem [off1] {sym1} (LEAQ [off2] {sym2} base) val mem)
 36565  	// cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
 36566  	// result: (SETBmem [off1+off2] {mergeSym(sym1,sym2)} base val mem)
 36567  	for {
 36568  		off1 := v.AuxInt
 36569  		sym1 := v.Aux
 36570  		_ = v.Args[2]
 36571  		v_0 := v.Args[0]
 36572  		if v_0.Op != OpAMD64LEAQ {
 36573  			break
 36574  		}
 36575  		off2 := v_0.AuxInt
 36576  		sym2 := v_0.Aux
 36577  		base := v_0.Args[0]
 36578  		val := v.Args[1]
 36579  		mem := v.Args[2]
 36580  		if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
 36581  			break
 36582  		}
 36583  		v.reset(OpAMD64SETBmem)
 36584  		v.AuxInt = off1 + off2
 36585  		v.Aux = mergeSym(sym1, sym2)
 36586  		v.AddArg(base)
 36587  		v.AddArg(val)
 36588  		v.AddArg(mem)
 36589  		return true
 36590  	}
 36591  	// match: (SETBmem [off] {sym} ptr x:(FlagEQ) mem)
 36592  	// cond:
 36593  	// result: (MOVBstore [off] {sym} ptr (MOVLconst <x.Type> [0]) mem)
 36594  	for {
 36595  		off := v.AuxInt
 36596  		sym := v.Aux
 36597  		_ = v.Args[2]
 36598  		ptr := v.Args[0]
 36599  		x := v.Args[1]
 36600  		if x.Op != OpAMD64FlagEQ {
 36601  			break
 36602  		}
 36603  		mem := v.Args[2]
 36604  		v.reset(OpAMD64MOVBstore)
 36605  		v.AuxInt = off
 36606  		v.Aux = sym
 36607  		v.AddArg(ptr)
 36608  		v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, x.Type)
 36609  		v0.AuxInt = 0
 36610  		v.AddArg(v0)
 36611  		v.AddArg(mem)
 36612  		return true
 36613  	}
 36614  	// match: (SETBmem [off] {sym} ptr x:(FlagLT_ULT) mem)
 36615  	// cond:
 36616  	// result: (MOVBstore [off] {sym} ptr (MOVLconst <x.Type> [1]) mem)
 36617  	for {
 36618  		off := v.AuxInt
 36619  		sym := v.Aux
 36620  		_ = v.Args[2]
 36621  		ptr := v.Args[0]
 36622  		x := v.Args[1]
 36623  		if x.Op != OpAMD64FlagLT_ULT {
 36624  			break
 36625  		}
 36626  		mem := v.Args[2]
 36627  		v.reset(OpAMD64MOVBstore)
 36628  		v.AuxInt = off
 36629  		v.Aux = sym
 36630  		v.AddArg(ptr)
 36631  		v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, x.Type)
 36632  		v0.AuxInt = 1
 36633  		v.AddArg(v0)
 36634  		v.AddArg(mem)
 36635  		return true
 36636  	}
 36637  	// match: (SETBmem [off] {sym} ptr x:(FlagLT_UGT) mem)
 36638  	// cond:
 36639  	// result: (MOVBstore [off] {sym} ptr (MOVLconst <x.Type> [0]) mem)
 36640  	for {
 36641  		off := v.AuxInt
 36642  		sym := v.Aux
 36643  		_ = v.Args[2]
 36644  		ptr := v.Args[0]
 36645  		x := v.Args[1]
 36646  		if x.Op != OpAMD64FlagLT_UGT {
 36647  			break
 36648  		}
 36649  		mem := v.Args[2]
 36650  		v.reset(OpAMD64MOVBstore)
 36651  		v.AuxInt = off
 36652  		v.Aux = sym
 36653  		v.AddArg(ptr)
 36654  		v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, x.Type)
 36655  		v0.AuxInt = 0
 36656  		v.AddArg(v0)
 36657  		v.AddArg(mem)
 36658  		return true
 36659  	}
 36660  	// match: (SETBmem [off] {sym} ptr x:(FlagGT_ULT) mem)
 36661  	// cond:
 36662  	// result: (MOVBstore [off] {sym} ptr (MOVLconst <x.Type> [1]) mem)
 36663  	for {
 36664  		off := v.AuxInt
 36665  		sym := v.Aux
 36666  		_ = v.Args[2]
 36667  		ptr := v.Args[0]
 36668  		x := v.Args[1]
 36669  		if x.Op != OpAMD64FlagGT_ULT {
 36670  			break
 36671  		}
 36672  		mem := v.Args[2]
 36673  		v.reset(OpAMD64MOVBstore)
 36674  		v.AuxInt = off
 36675  		v.Aux = sym
 36676  		v.AddArg(ptr)
 36677  		v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, x.Type)
 36678  		v0.AuxInt = 1
 36679  		v.AddArg(v0)
 36680  		v.AddArg(mem)
 36681  		return true
 36682  	}
 36683  	// match: (SETBmem [off] {sym} ptr x:(FlagGT_UGT) mem)
 36684  	// cond:
 36685  	// result: (MOVBstore [off] {sym} ptr (MOVLconst <x.Type> [0]) mem)
 36686  	for {
 36687  		off := v.AuxInt
 36688  		sym := v.Aux
 36689  		_ = v.Args[2]
 36690  		ptr := v.Args[0]
 36691  		x := v.Args[1]
 36692  		if x.Op != OpAMD64FlagGT_UGT {
 36693  			break
 36694  		}
 36695  		mem := v.Args[2]
 36696  		v.reset(OpAMD64MOVBstore)
 36697  		v.AuxInt = off
 36698  		v.Aux = sym
 36699  		v.AddArg(ptr)
 36700  		v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, x.Type)
 36701  		v0.AuxInt = 0
 36702  		v.AddArg(v0)
 36703  		v.AddArg(mem)
 36704  		return true
 36705  	}
 36706  	return false
 36707  }
 36708  func rewriteValueAMD64_OpAMD64SETEQ_0(v *Value) bool {
 36709  	b := v.Block
 36710  	_ = b
 36711  	config := b.Func.Config
 36712  	_ = config
 36713  	// match: (SETEQ (TESTL (SHLL (MOVLconst [1]) x) y))
 36714  	// cond: !config.nacl
 36715  	// result: (SETAE (BTL x y))
 36716  	for {
 36717  		v_0 := v.Args[0]
 36718  		if v_0.Op != OpAMD64TESTL {
 36719  			break
 36720  		}
 36721  		_ = v_0.Args[1]
 36722  		v_0_0 := v_0.Args[0]
 36723  		if v_0_0.Op != OpAMD64SHLL {
 36724  			break
 36725  		}
 36726  		_ = v_0_0.Args[1]
 36727  		v_0_0_0 := v_0_0.Args[0]
 36728  		if v_0_0_0.Op != OpAMD64MOVLconst {
 36729  			break
 36730  		}
 36731  		if v_0_0_0.AuxInt != 1 {
 36732  			break
 36733  		}
 36734  		x := v_0_0.Args[1]
 36735  		y := v_0.Args[1]
 36736  		if !(!config.nacl) {
 36737  			break
 36738  		}
 36739  		v.reset(OpAMD64SETAE)
 36740  		v0 := b.NewValue0(v.Pos, OpAMD64BTL, types.TypeFlags)
 36741  		v0.AddArg(x)
 36742  		v0.AddArg(y)
 36743  		v.AddArg(v0)
 36744  		return true
 36745  	}
 36746  	// match: (SETEQ (TESTL y (SHLL (MOVLconst [1]) x)))
 36747  	// cond: !config.nacl
 36748  	// result: (SETAE (BTL x y))
 36749  	for {
 36750  		v_0 := v.Args[0]
 36751  		if v_0.Op != OpAMD64TESTL {
 36752  			break
 36753  		}
 36754  		_ = v_0.Args[1]
 36755  		y := v_0.Args[0]
 36756  		v_0_1 := v_0.Args[1]
 36757  		if v_0_1.Op != OpAMD64SHLL {
 36758  			break
 36759  		}
 36760  		_ = v_0_1.Args[1]
 36761  		v_0_1_0 := v_0_1.Args[0]
 36762  		if v_0_1_0.Op != OpAMD64MOVLconst {
 36763  			break
 36764  		}
 36765  		if v_0_1_0.AuxInt != 1 {
 36766  			break
 36767  		}
 36768  		x := v_0_1.Args[1]
 36769  		if !(!config.nacl) {
 36770  			break
 36771  		}
 36772  		v.reset(OpAMD64SETAE)
 36773  		v0 := b.NewValue0(v.Pos, OpAMD64BTL, types.TypeFlags)
 36774  		v0.AddArg(x)
 36775  		v0.AddArg(y)
 36776  		v.AddArg(v0)
 36777  		return true
 36778  	}
 36779  	// match: (SETEQ (TESTQ (SHLQ (MOVQconst [1]) x) y))
 36780  	// cond: !config.nacl
 36781  	// result: (SETAE (BTQ x y))
 36782  	for {
 36783  		v_0 := v.Args[0]
 36784  		if v_0.Op != OpAMD64TESTQ {
 36785  			break
 36786  		}
 36787  		_ = v_0.Args[1]
 36788  		v_0_0 := v_0.Args[0]
 36789  		if v_0_0.Op != OpAMD64SHLQ {
 36790  			break
 36791  		}
 36792  		_ = v_0_0.Args[1]
 36793  		v_0_0_0 := v_0_0.Args[0]
 36794  		if v_0_0_0.Op != OpAMD64MOVQconst {
 36795  			break
 36796  		}
 36797  		if v_0_0_0.AuxInt != 1 {
 36798  			break
 36799  		}
 36800  		x := v_0_0.Args[1]
 36801  		y := v_0.Args[1]
 36802  		if !(!config.nacl) {
 36803  			break
 36804  		}
 36805  		v.reset(OpAMD64SETAE)
 36806  		v0 := b.NewValue0(v.Pos, OpAMD64BTQ, types.TypeFlags)
 36807  		v0.AddArg(x)
 36808  		v0.AddArg(y)
 36809  		v.AddArg(v0)
 36810  		return true
 36811  	}
 36812  	// match: (SETEQ (TESTQ y (SHLQ (MOVQconst [1]) x)))
 36813  	// cond: !config.nacl
 36814  	// result: (SETAE (BTQ x y))
 36815  	for {
 36816  		v_0 := v.Args[0]
 36817  		if v_0.Op != OpAMD64TESTQ {
 36818  			break
 36819  		}
 36820  		_ = v_0.Args[1]
 36821  		y := v_0.Args[0]
 36822  		v_0_1 := v_0.Args[1]
 36823  		if v_0_1.Op != OpAMD64SHLQ {
 36824  			break
 36825  		}
 36826  		_ = v_0_1.Args[1]
 36827  		v_0_1_0 := v_0_1.Args[0]
 36828  		if v_0_1_0.Op != OpAMD64MOVQconst {
 36829  			break
 36830  		}
 36831  		if v_0_1_0.AuxInt != 1 {
 36832  			break
 36833  		}
 36834  		x := v_0_1.Args[1]
 36835  		if !(!config.nacl) {
 36836  			break
 36837  		}
 36838  		v.reset(OpAMD64SETAE)
 36839  		v0 := b.NewValue0(v.Pos, OpAMD64BTQ, types.TypeFlags)
 36840  		v0.AddArg(x)
 36841  		v0.AddArg(y)
 36842  		v.AddArg(v0)
 36843  		return true
 36844  	}
 36845  	// match: (SETEQ (TESTLconst [c] x))
 36846  	// cond: isPowerOfTwo(c) && log2(c) < 32 && !config.nacl
 36847  	// result: (SETAE (BTLconst [log2(c)] x))
 36848  	for {
 36849  		v_0 := v.Args[0]
 36850  		if v_0.Op != OpAMD64TESTLconst {
 36851  			break
 36852  		}
 36853  		c := v_0.AuxInt
 36854  		x := v_0.Args[0]
 36855  		if !(isPowerOfTwo(c) && log2(c) < 32 && !config.nacl) {
 36856  			break
 36857  		}
 36858  		v.reset(OpAMD64SETAE)
 36859  		v0 := b.NewValue0(v.Pos, OpAMD64BTLconst, types.TypeFlags)
 36860  		v0.AuxInt = log2(c)
 36861  		v0.AddArg(x)
 36862  		v.AddArg(v0)
 36863  		return true
 36864  	}
 36865  	// match: (SETEQ (TESTQconst [c] x))
 36866  	// cond: isPowerOfTwo(c) && log2(c) < 64 && !config.nacl
 36867  	// result: (SETAE (BTQconst [log2(c)] x))
 36868  	for {
 36869  		v_0 := v.Args[0]
 36870  		if v_0.Op != OpAMD64TESTQconst {
 36871  			break
 36872  		}
 36873  		c := v_0.AuxInt
 36874  		x := v_0.Args[0]
 36875  		if !(isPowerOfTwo(c) && log2(c) < 64 && !config.nacl) {
 36876  			break
 36877  		}
 36878  		v.reset(OpAMD64SETAE)
 36879  		v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags)
 36880  		v0.AuxInt = log2(c)
 36881  		v0.AddArg(x)
 36882  		v.AddArg(v0)
 36883  		return true
 36884  	}
 36885  	// match: (SETEQ (TESTQ (MOVQconst [c]) x))
 36886  	// cond: isPowerOfTwo(c) && log2(c) < 64 && !config.nacl
 36887  	// result: (SETAE (BTQconst [log2(c)] x))
 36888  	for {
 36889  		v_0 := v.Args[0]
 36890  		if v_0.Op != OpAMD64TESTQ {
 36891  			break
 36892  		}
 36893  		_ = v_0.Args[1]
 36894  		v_0_0 := v_0.Args[0]
 36895  		if v_0_0.Op != OpAMD64MOVQconst {
 36896  			break
 36897  		}
 36898  		c := v_0_0.AuxInt
 36899  		x := v_0.Args[1]
 36900  		if !(isPowerOfTwo(c) && log2(c) < 64 && !config.nacl) {
 36901  			break
 36902  		}
 36903  		v.reset(OpAMD64SETAE)
 36904  		v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags)
 36905  		v0.AuxInt = log2(c)
 36906  		v0.AddArg(x)
 36907  		v.AddArg(v0)
 36908  		return true
 36909  	}
 36910  	// match: (SETEQ (TESTQ x (MOVQconst [c])))
 36911  	// cond: isPowerOfTwo(c) && log2(c) < 64 && !config.nacl
 36912  	// result: (SETAE (BTQconst [log2(c)] x))
 36913  	for {
 36914  		v_0 := v.Args[0]
 36915  		if v_0.Op != OpAMD64TESTQ {
 36916  			break
 36917  		}
 36918  		_ = v_0.Args[1]
 36919  		x := v_0.Args[0]
 36920  		v_0_1 := v_0.Args[1]
 36921  		if v_0_1.Op != OpAMD64MOVQconst {
 36922  			break
 36923  		}
 36924  		c := v_0_1.AuxInt
 36925  		if !(isPowerOfTwo(c) && log2(c) < 64 && !config.nacl) {
 36926  			break
 36927  		}
 36928  		v.reset(OpAMD64SETAE)
 36929  		v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags)
 36930  		v0.AuxInt = log2(c)
 36931  		v0.AddArg(x)
 36932  		v.AddArg(v0)
 36933  		return true
 36934  	}
 36935  	// match: (SETEQ (InvertFlags x))
 36936  	// cond:
 36937  	// result: (SETEQ x)
 36938  	for {
 36939  		v_0 := v.Args[0]
 36940  		if v_0.Op != OpAMD64InvertFlags {
 36941  			break
 36942  		}
 36943  		x := v_0.Args[0]
 36944  		v.reset(OpAMD64SETEQ)
 36945  		v.AddArg(x)
 36946  		return true
 36947  	}
 36948  	// match: (SETEQ (FlagEQ))
 36949  	// cond:
 36950  	// result: (MOVLconst [1])
 36951  	for {
 36952  		v_0 := v.Args[0]
 36953  		if v_0.Op != OpAMD64FlagEQ {
 36954  			break
 36955  		}
 36956  		v.reset(OpAMD64MOVLconst)
 36957  		v.AuxInt = 1
 36958  		return true
 36959  	}
 36960  	return false
 36961  }
 36962  func rewriteValueAMD64_OpAMD64SETEQ_10(v *Value) bool {
 36963  	// match: (SETEQ (FlagLT_ULT))
 36964  	// cond:
 36965  	// result: (MOVLconst [0])
 36966  	for {
 36967  		v_0 := v.Args[0]
 36968  		if v_0.Op != OpAMD64FlagLT_ULT {
 36969  			break
 36970  		}
 36971  		v.reset(OpAMD64MOVLconst)
 36972  		v.AuxInt = 0
 36973  		return true
 36974  	}
 36975  	// match: (SETEQ (FlagLT_UGT))
 36976  	// cond:
 36977  	// result: (MOVLconst [0])
 36978  	for {
 36979  		v_0 := v.Args[0]
 36980  		if v_0.Op != OpAMD64FlagLT_UGT {
 36981  			break
 36982  		}
 36983  		v.reset(OpAMD64MOVLconst)
 36984  		v.AuxInt = 0
 36985  		return true
 36986  	}
 36987  	// match: (SETEQ (FlagGT_ULT))
 36988  	// cond:
 36989  	// result: (MOVLconst [0])
 36990  	for {
 36991  		v_0 := v.Args[0]
 36992  		if v_0.Op != OpAMD64FlagGT_ULT {
 36993  			break
 36994  		}
 36995  		v.reset(OpAMD64MOVLconst)
 36996  		v.AuxInt = 0
 36997  		return true
 36998  	}
 36999  	// match: (SETEQ (FlagGT_UGT))
 37000  	// cond:
 37001  	// result: (MOVLconst [0])
 37002  	for {
 37003  		v_0 := v.Args[0]
 37004  		if v_0.Op != OpAMD64FlagGT_UGT {
 37005  			break
 37006  		}
 37007  		v.reset(OpAMD64MOVLconst)
 37008  		v.AuxInt = 0
 37009  		return true
 37010  	}
 37011  	return false
 37012  }
 37013  func rewriteValueAMD64_OpAMD64SETEQmem_0(v *Value) bool {
 37014  	b := v.Block
 37015  	_ = b
 37016  	config := b.Func.Config
 37017  	_ = config
 37018  	// match: (SETEQmem [off] {sym} ptr (TESTL (SHLL (MOVLconst [1]) x) y) mem)
 37019  	// cond: !config.nacl
 37020  	// result: (SETAEmem [off] {sym} ptr (BTL x y) mem)
 37021  	for {
 37022  		off := v.AuxInt
 37023  		sym := v.Aux
 37024  		_ = v.Args[2]
 37025  		ptr := v.Args[0]
 37026  		v_1 := v.Args[1]
 37027  		if v_1.Op != OpAMD64TESTL {
 37028  			break
 37029  		}
 37030  		_ = v_1.Args[1]
 37031  		v_1_0 := v_1.Args[0]
 37032  		if v_1_0.Op != OpAMD64SHLL {
 37033  			break
 37034  		}
 37035  		_ = v_1_0.Args[1]
 37036  		v_1_0_0 := v_1_0.Args[0]
 37037  		if v_1_0_0.Op != OpAMD64MOVLconst {
 37038  			break
 37039  		}
 37040  		if v_1_0_0.AuxInt != 1 {
 37041  			break
 37042  		}
 37043  		x := v_1_0.Args[1]
 37044  		y := v_1.Args[1]
 37045  		mem := v.Args[2]
 37046  		if !(!config.nacl) {
 37047  			break
 37048  		}
 37049  		v.reset(OpAMD64SETAEmem)
 37050  		v.AuxInt = off
 37051  		v.Aux = sym
 37052  		v.AddArg(ptr)
 37053  		v0 := b.NewValue0(v.Pos, OpAMD64BTL, types.TypeFlags)
 37054  		v0.AddArg(x)
 37055  		v0.AddArg(y)
 37056  		v.AddArg(v0)
 37057  		v.AddArg(mem)
 37058  		return true
 37059  	}
 37060  	// match: (SETEQmem [off] {sym} ptr (TESTL y (SHLL (MOVLconst [1]) x)) mem)
 37061  	// cond: !config.nacl
 37062  	// result: (SETAEmem [off] {sym} ptr (BTL x y) mem)
 37063  	for {
 37064  		off := v.AuxInt
 37065  		sym := v.Aux
 37066  		_ = v.Args[2]
 37067  		ptr := v.Args[0]
 37068  		v_1 := v.Args[1]
 37069  		if v_1.Op != OpAMD64TESTL {
 37070  			break
 37071  		}
 37072  		_ = v_1.Args[1]
 37073  		y := v_1.Args[0]
 37074  		v_1_1 := v_1.Args[1]
 37075  		if v_1_1.Op != OpAMD64SHLL {
 37076  			break
 37077  		}
 37078  		_ = v_1_1.Args[1]
 37079  		v_1_1_0 := v_1_1.Args[0]
 37080  		if v_1_1_0.Op != OpAMD64MOVLconst {
 37081  			break
 37082  		}
 37083  		if v_1_1_0.AuxInt != 1 {
 37084  			break
 37085  		}
 37086  		x := v_1_1.Args[1]
 37087  		mem := v.Args[2]
 37088  		if !(!config.nacl) {
 37089  			break
 37090  		}
 37091  		v.reset(OpAMD64SETAEmem)
 37092  		v.AuxInt = off
 37093  		v.Aux = sym
 37094  		v.AddArg(ptr)
 37095  		v0 := b.NewValue0(v.Pos, OpAMD64BTL, types.TypeFlags)
 37096  		v0.AddArg(x)
 37097  		v0.AddArg(y)
 37098  		v.AddArg(v0)
 37099  		v.AddArg(mem)
 37100  		return true
 37101  	}
 37102  	// match: (SETEQmem [off] {sym} ptr (TESTQ (SHLQ (MOVQconst [1]) x) y) mem)
 37103  	// cond: !config.nacl
 37104  	// result: (SETAEmem [off] {sym} ptr (BTQ x y) mem)
 37105  	for {
 37106  		off := v.AuxInt
 37107  		sym := v.Aux
 37108  		_ = v.Args[2]
 37109  		ptr := v.Args[0]
 37110  		v_1 := v.Args[1]
 37111  		if v_1.Op != OpAMD64TESTQ {
 37112  			break
 37113  		}
 37114  		_ = v_1.Args[1]
 37115  		v_1_0 := v_1.Args[0]
 37116  		if v_1_0.Op != OpAMD64SHLQ {
 37117  			break
 37118  		}
 37119  		_ = v_1_0.Args[1]
 37120  		v_1_0_0 := v_1_0.Args[0]
 37121  		if v_1_0_0.Op != OpAMD64MOVQconst {
 37122  			break
 37123  		}
 37124  		if v_1_0_0.AuxInt != 1 {
 37125  			break
 37126  		}
 37127  		x := v_1_0.Args[1]
 37128  		y := v_1.Args[1]
 37129  		mem := v.Args[2]
 37130  		if !(!config.nacl) {
 37131  			break
 37132  		}
 37133  		v.reset(OpAMD64SETAEmem)
 37134  		v.AuxInt = off
 37135  		v.Aux = sym
 37136  		v.AddArg(ptr)
 37137  		v0 := b.NewValue0(v.Pos, OpAMD64BTQ, types.TypeFlags)
 37138  		v0.AddArg(x)
 37139  		v0.AddArg(y)
 37140  		v.AddArg(v0)
 37141  		v.AddArg(mem)
 37142  		return true
 37143  	}
 37144  	// match: (SETEQmem [off] {sym} ptr (TESTQ y (SHLQ (MOVQconst [1]) x)) mem)
 37145  	// cond: !config.nacl
 37146  	// result: (SETAEmem [off] {sym} ptr (BTQ x y) mem)
 37147  	for {
 37148  		off := v.AuxInt
 37149  		sym := v.Aux
 37150  		_ = v.Args[2]
 37151  		ptr := v.Args[0]
 37152  		v_1 := v.Args[1]
 37153  		if v_1.Op != OpAMD64TESTQ {
 37154  			break
 37155  		}
 37156  		_ = v_1.Args[1]
 37157  		y := v_1.Args[0]
 37158  		v_1_1 := v_1.Args[1]
 37159  		if v_1_1.Op != OpAMD64SHLQ {
 37160  			break
 37161  		}
 37162  		_ = v_1_1.Args[1]
 37163  		v_1_1_0 := v_1_1.Args[0]
 37164  		if v_1_1_0.Op != OpAMD64MOVQconst {
 37165  			break
 37166  		}
 37167  		if v_1_1_0.AuxInt != 1 {
 37168  			break
 37169  		}
 37170  		x := v_1_1.Args[1]
 37171  		mem := v.Args[2]
 37172  		if !(!config.nacl) {
 37173  			break
 37174  		}
 37175  		v.reset(OpAMD64SETAEmem)
 37176  		v.AuxInt = off
 37177  		v.Aux = sym
 37178  		v.AddArg(ptr)
 37179  		v0 := b.NewValue0(v.Pos, OpAMD64BTQ, types.TypeFlags)
 37180  		v0.AddArg(x)
 37181  		v0.AddArg(y)
 37182  		v.AddArg(v0)
 37183  		v.AddArg(mem)
 37184  		return true
 37185  	}
 37186  	// match: (SETEQmem [off] {sym} ptr (TESTLconst [c] x) mem)
 37187  	// cond: isPowerOfTwo(c) && log2(c) < 32 && !config.nacl
 37188  	// result: (SETAEmem [off] {sym} ptr (BTLconst [log2(c)] x) mem)
 37189  	for {
 37190  		off := v.AuxInt
 37191  		sym := v.Aux
 37192  		_ = v.Args[2]
 37193  		ptr := v.Args[0]
 37194  		v_1 := v.Args[1]
 37195  		if v_1.Op != OpAMD64TESTLconst {
 37196  			break
 37197  		}
 37198  		c := v_1.AuxInt
 37199  		x := v_1.Args[0]
 37200  		mem := v.Args[2]
 37201  		if !(isPowerOfTwo(c) && log2(c) < 32 && !config.nacl) {
 37202  			break
 37203  		}
 37204  		v.reset(OpAMD64SETAEmem)
 37205  		v.AuxInt = off
 37206  		v.Aux = sym
 37207  		v.AddArg(ptr)
 37208  		v0 := b.NewValue0(v.Pos, OpAMD64BTLconst, types.TypeFlags)
 37209  		v0.AuxInt = log2(c)
 37210  		v0.AddArg(x)
 37211  		v.AddArg(v0)
 37212  		v.AddArg(mem)
 37213  		return true
 37214  	}
 37215  	// match: (SETEQmem [off] {sym} ptr (TESTQconst [c] x) mem)
 37216  	// cond: isPowerOfTwo(c) && log2(c) < 64 && !config.nacl
 37217  	// result: (SETAEmem [off] {sym} ptr (BTQconst [log2(c)] x) mem)
 37218  	for {
 37219  		off := v.AuxInt
 37220  		sym := v.Aux
 37221  		_ = v.Args[2]
 37222  		ptr := v.Args[0]
 37223  		v_1 := v.Args[1]
 37224  		if v_1.Op != OpAMD64TESTQconst {
 37225  			break
 37226  		}
 37227  		c := v_1.AuxInt
 37228  		x := v_1.Args[0]
 37229  		mem := v.Args[2]
 37230  		if !(isPowerOfTwo(c) && log2(c) < 64 && !config.nacl) {
 37231  			break
 37232  		}
 37233  		v.reset(OpAMD64SETAEmem)
 37234  		v.AuxInt = off
 37235  		v.Aux = sym
 37236  		v.AddArg(ptr)
 37237  		v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags)
 37238  		v0.AuxInt = log2(c)
 37239  		v0.AddArg(x)
 37240  		v.AddArg(v0)
 37241  		v.AddArg(mem)
 37242  		return true
 37243  	}
 37244  	// match: (SETEQmem [off] {sym} ptr (TESTQ (MOVQconst [c]) x) mem)
 37245  	// cond: isPowerOfTwo(c) && log2(c) < 64 && !config.nacl
 37246  	// result: (SETAEmem [off] {sym} ptr (BTQconst [log2(c)] x) mem)
 37247  	for {
 37248  		off := v.AuxInt
 37249  		sym := v.Aux
 37250  		_ = v.Args[2]
 37251  		ptr := v.Args[0]
 37252  		v_1 := v.Args[1]
 37253  		if v_1.Op != OpAMD64TESTQ {
 37254  			break
 37255  		}
 37256  		_ = v_1.Args[1]
 37257  		v_1_0 := v_1.Args[0]
 37258  		if v_1_0.Op != OpAMD64MOVQconst {
 37259  			break
 37260  		}
 37261  		c := v_1_0.AuxInt
 37262  		x := v_1.Args[1]
 37263  		mem := v.Args[2]
 37264  		if !(isPowerOfTwo(c) && log2(c) < 64 && !config.nacl) {
 37265  			break
 37266  		}
 37267  		v.reset(OpAMD64SETAEmem)
 37268  		v.AuxInt = off
 37269  		v.Aux = sym
 37270  		v.AddArg(ptr)
 37271  		v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags)
 37272  		v0.AuxInt = log2(c)
 37273  		v0.AddArg(x)
 37274  		v.AddArg(v0)
 37275  		v.AddArg(mem)
 37276  		return true
 37277  	}
 37278  	// match: (SETEQmem [off] {sym} ptr (TESTQ x (MOVQconst [c])) mem)
 37279  	// cond: isPowerOfTwo(c) && log2(c) < 64 && !config.nacl
 37280  	// result: (SETAEmem [off] {sym} ptr (BTQconst [log2(c)] x) mem)
 37281  	for {
 37282  		off := v.AuxInt
 37283  		sym := v.Aux
 37284  		_ = v.Args[2]
 37285  		ptr := v.Args[0]
 37286  		v_1 := v.Args[1]
 37287  		if v_1.Op != OpAMD64TESTQ {
 37288  			break
 37289  		}
 37290  		_ = v_1.Args[1]
 37291  		x := v_1.Args[0]
 37292  		v_1_1 := v_1.Args[1]
 37293  		if v_1_1.Op != OpAMD64MOVQconst {
 37294  			break
 37295  		}
 37296  		c := v_1_1.AuxInt
 37297  		mem := v.Args[2]
 37298  		if !(isPowerOfTwo(c) && log2(c) < 64 && !config.nacl) {
 37299  			break
 37300  		}
 37301  		v.reset(OpAMD64SETAEmem)
 37302  		v.AuxInt = off
 37303  		v.Aux = sym
 37304  		v.AddArg(ptr)
 37305  		v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags)
 37306  		v0.AuxInt = log2(c)
 37307  		v0.AddArg(x)
 37308  		v.AddArg(v0)
 37309  		v.AddArg(mem)
 37310  		return true
 37311  	}
 37312  	// match: (SETEQmem [off] {sym} ptr (InvertFlags x) mem)
 37313  	// cond:
 37314  	// result: (SETEQmem [off] {sym} ptr x mem)
 37315  	for {
 37316  		off := v.AuxInt
 37317  		sym := v.Aux
 37318  		_ = v.Args[2]
 37319  		ptr := v.Args[0]
 37320  		v_1 := v.Args[1]
 37321  		if v_1.Op != OpAMD64InvertFlags {
 37322  			break
 37323  		}
 37324  		x := v_1.Args[0]
 37325  		mem := v.Args[2]
 37326  		v.reset(OpAMD64SETEQmem)
 37327  		v.AuxInt = off
 37328  		v.Aux = sym
 37329  		v.AddArg(ptr)
 37330  		v.AddArg(x)
 37331  		v.AddArg(mem)
 37332  		return true
 37333  	}
 37334  	// match: (SETEQmem [off1] {sym} (ADDQconst [off2] base) val mem)
 37335  	// cond: is32Bit(off1+off2)
 37336  	// result: (SETEQmem [off1+off2] {sym} base val mem)
 37337  	for {
 37338  		off1 := v.AuxInt
 37339  		sym := v.Aux
 37340  		_ = v.Args[2]
 37341  		v_0 := v.Args[0]
 37342  		if v_0.Op != OpAMD64ADDQconst {
 37343  			break
 37344  		}
 37345  		off2 := v_0.AuxInt
 37346  		base := v_0.Args[0]
 37347  		val := v.Args[1]
 37348  		mem := v.Args[2]
 37349  		if !(is32Bit(off1 + off2)) {
 37350  			break
 37351  		}
 37352  		v.reset(OpAMD64SETEQmem)
 37353  		v.AuxInt = off1 + off2
 37354  		v.Aux = sym
 37355  		v.AddArg(base)
 37356  		v.AddArg(val)
 37357  		v.AddArg(mem)
 37358  		return true
 37359  	}
 37360  	return false
 37361  }
 37362  func rewriteValueAMD64_OpAMD64SETEQmem_10(v *Value) bool {
 37363  	b := v.Block
 37364  	_ = b
 37365  	// match: (SETEQmem [off1] {sym1} (LEAQ [off2] {sym2} base) val mem)
 37366  	// cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
 37367  	// result: (SETEQmem [off1+off2] {mergeSym(sym1,sym2)} base val mem)
 37368  	for {
 37369  		off1 := v.AuxInt
 37370  		sym1 := v.Aux
 37371  		_ = v.Args[2]
 37372  		v_0 := v.Args[0]
 37373  		if v_0.Op != OpAMD64LEAQ {
 37374  			break
 37375  		}
 37376  		off2 := v_0.AuxInt
 37377  		sym2 := v_0.Aux
 37378  		base := v_0.Args[0]
 37379  		val := v.Args[1]
 37380  		mem := v.Args[2]
 37381  		if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
 37382  			break
 37383  		}
 37384  		v.reset(OpAMD64SETEQmem)
 37385  		v.AuxInt = off1 + off2
 37386  		v.Aux = mergeSym(sym1, sym2)
 37387  		v.AddArg(base)
 37388  		v.AddArg(val)
 37389  		v.AddArg(mem)
 37390  		return true
 37391  	}
 37392  	// match: (SETEQmem [off] {sym} ptr x:(FlagEQ) mem)
 37393  	// cond:
 37394  	// result: (MOVBstore [off] {sym} ptr (MOVLconst <x.Type> [1]) mem)
 37395  	for {
 37396  		off := v.AuxInt
 37397  		sym := v.Aux
 37398  		_ = v.Args[2]
 37399  		ptr := v.Args[0]
 37400  		x := v.Args[1]
 37401  		if x.Op != OpAMD64FlagEQ {
 37402  			break
 37403  		}
 37404  		mem := v.Args[2]
 37405  		v.reset(OpAMD64MOVBstore)
 37406  		v.AuxInt = off
 37407  		v.Aux = sym
 37408  		v.AddArg(ptr)
 37409  		v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, x.Type)
 37410  		v0.AuxInt = 1
 37411  		v.AddArg(v0)
 37412  		v.AddArg(mem)
 37413  		return true
 37414  	}
 37415  	// match: (SETEQmem [off] {sym} ptr x:(FlagLT_ULT) mem)
 37416  	// cond:
 37417  	// result: (MOVBstore [off] {sym} ptr (MOVLconst <x.Type> [0]) mem)
 37418  	for {
 37419  		off := v.AuxInt
 37420  		sym := v.Aux
 37421  		_ = v.Args[2]
 37422  		ptr := v.Args[0]
 37423  		x := v.Args[1]
 37424  		if x.Op != OpAMD64FlagLT_ULT {
 37425  			break
 37426  		}
 37427  		mem := v.Args[2]
 37428  		v.reset(OpAMD64MOVBstore)
 37429  		v.AuxInt = off
 37430  		v.Aux = sym
 37431  		v.AddArg(ptr)
 37432  		v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, x.Type)
 37433  		v0.AuxInt = 0
 37434  		v.AddArg(v0)
 37435  		v.AddArg(mem)
 37436  		return true
 37437  	}
 37438  	// match: (SETEQmem [off] {sym} ptr x:(FlagLT_UGT) mem)
 37439  	// cond:
 37440  	// result: (MOVBstore [off] {sym} ptr (MOVLconst <x.Type> [0]) mem)
 37441  	for {
 37442  		off := v.AuxInt
 37443  		sym := v.Aux
 37444  		_ = v.Args[2]
 37445  		ptr := v.Args[0]
 37446  		x := v.Args[1]
 37447  		if x.Op != OpAMD64FlagLT_UGT {
 37448  			break
 37449  		}
 37450  		mem := v.Args[2]
 37451  		v.reset(OpAMD64MOVBstore)
 37452  		v.AuxInt = off
 37453  		v.Aux = sym
 37454  		v.AddArg(ptr)
 37455  		v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, x.Type)
 37456  		v0.AuxInt = 0
 37457  		v.AddArg(v0)
 37458  		v.AddArg(mem)
 37459  		return true
 37460  	}
 37461  	// match: (SETEQmem [off] {sym} ptr x:(FlagGT_ULT) mem)
 37462  	// cond:
 37463  	// result: (MOVBstore [off] {sym} ptr (MOVLconst <x.Type> [0]) mem)
 37464  	for {
 37465  		off := v.AuxInt
 37466  		sym := v.Aux
 37467  		_ = v.Args[2]
 37468  		ptr := v.Args[0]
 37469  		x := v.Args[1]
 37470  		if x.Op != OpAMD64FlagGT_ULT {
 37471  			break
 37472  		}
 37473  		mem := v.Args[2]
 37474  		v.reset(OpAMD64MOVBstore)
 37475  		v.AuxInt = off
 37476  		v.Aux = sym
 37477  		v.AddArg(ptr)
 37478  		v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, x.Type)
 37479  		v0.AuxInt = 0
 37480  		v.AddArg(v0)
 37481  		v.AddArg(mem)
 37482  		return true
 37483  	}
 37484  	// match: (SETEQmem [off] {sym} ptr x:(FlagGT_UGT) mem)
 37485  	// cond:
 37486  	// result: (MOVBstore [off] {sym} ptr (MOVLconst <x.Type> [0]) mem)
 37487  	for {
 37488  		off := v.AuxInt
 37489  		sym := v.Aux
 37490  		_ = v.Args[2]
 37491  		ptr := v.Args[0]
 37492  		x := v.Args[1]
 37493  		if x.Op != OpAMD64FlagGT_UGT {
 37494  			break
 37495  		}
 37496  		mem := v.Args[2]
 37497  		v.reset(OpAMD64MOVBstore)
 37498  		v.AuxInt = off
 37499  		v.Aux = sym
 37500  		v.AddArg(ptr)
 37501  		v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, x.Type)
 37502  		v0.AuxInt = 0
 37503  		v.AddArg(v0)
 37504  		v.AddArg(mem)
 37505  		return true
 37506  	}
 37507  	return false
 37508  }
 37509  func rewriteValueAMD64_OpAMD64SETG_0(v *Value) bool {
 37510  	// match: (SETG (InvertFlags x))
 37511  	// cond:
 37512  	// result: (SETL x)
 37513  	for {
 37514  		v_0 := v.Args[0]
 37515  		if v_0.Op != OpAMD64InvertFlags {
 37516  			break
 37517  		}
 37518  		x := v_0.Args[0]
 37519  		v.reset(OpAMD64SETL)
 37520  		v.AddArg(x)
 37521  		return true
 37522  	}
 37523  	// match: (SETG (FlagEQ))
 37524  	// cond:
 37525  	// result: (MOVLconst [0])
 37526  	for {
 37527  		v_0 := v.Args[0]
 37528  		if v_0.Op != OpAMD64FlagEQ {
 37529  			break
 37530  		}
 37531  		v.reset(OpAMD64MOVLconst)
 37532  		v.AuxInt = 0
 37533  		return true
 37534  	}
 37535  	// match: (SETG (FlagLT_ULT))
 37536  	// cond:
 37537  	// result: (MOVLconst [0])
 37538  	for {
 37539  		v_0 := v.Args[0]
 37540  		if v_0.Op != OpAMD64FlagLT_ULT {
 37541  			break
 37542  		}
 37543  		v.reset(OpAMD64MOVLconst)
 37544  		v.AuxInt = 0
 37545  		return true
 37546  	}
 37547  	// match: (SETG (FlagLT_UGT))
 37548  	// cond:
 37549  	// result: (MOVLconst [0])
 37550  	for {
 37551  		v_0 := v.Args[0]
 37552  		if v_0.Op != OpAMD64FlagLT_UGT {
 37553  			break
 37554  		}
 37555  		v.reset(OpAMD64MOVLconst)
 37556  		v.AuxInt = 0
 37557  		return true
 37558  	}
 37559  	// match: (SETG (FlagGT_ULT))
 37560  	// cond:
 37561  	// result: (MOVLconst [1])
 37562  	for {
 37563  		v_0 := v.Args[0]
 37564  		if v_0.Op != OpAMD64FlagGT_ULT {
 37565  			break
 37566  		}
 37567  		v.reset(OpAMD64MOVLconst)
 37568  		v.AuxInt = 1
 37569  		return true
 37570  	}
 37571  	// match: (SETG (FlagGT_UGT))
 37572  	// cond:
 37573  	// result: (MOVLconst [1])
 37574  	for {
 37575  		v_0 := v.Args[0]
 37576  		if v_0.Op != OpAMD64FlagGT_UGT {
 37577  			break
 37578  		}
 37579  		v.reset(OpAMD64MOVLconst)
 37580  		v.AuxInt = 1
 37581  		return true
 37582  	}
 37583  	return false
 37584  }
 37585  func rewriteValueAMD64_OpAMD64SETGE_0(v *Value) bool {
 37586  	// match: (SETGE (InvertFlags x))
 37587  	// cond:
 37588  	// result: (SETLE x)
 37589  	for {
 37590  		v_0 := v.Args[0]
 37591  		if v_0.Op != OpAMD64InvertFlags {
 37592  			break
 37593  		}
 37594  		x := v_0.Args[0]
 37595  		v.reset(OpAMD64SETLE)
 37596  		v.AddArg(x)
 37597  		return true
 37598  	}
 37599  	// match: (SETGE (FlagEQ))
 37600  	// cond:
 37601  	// result: (MOVLconst [1])
 37602  	for {
 37603  		v_0 := v.Args[0]
 37604  		if v_0.Op != OpAMD64FlagEQ {
 37605  			break
 37606  		}
 37607  		v.reset(OpAMD64MOVLconst)
 37608  		v.AuxInt = 1
 37609  		return true
 37610  	}
 37611  	// match: (SETGE (FlagLT_ULT))
 37612  	// cond:
 37613  	// result: (MOVLconst [0])
 37614  	for {
 37615  		v_0 := v.Args[0]
 37616  		if v_0.Op != OpAMD64FlagLT_ULT {
 37617  			break
 37618  		}
 37619  		v.reset(OpAMD64MOVLconst)
 37620  		v.AuxInt = 0
 37621  		return true
 37622  	}
 37623  	// match: (SETGE (FlagLT_UGT))
 37624  	// cond:
 37625  	// result: (MOVLconst [0])
 37626  	for {
 37627  		v_0 := v.Args[0]
 37628  		if v_0.Op != OpAMD64FlagLT_UGT {
 37629  			break
 37630  		}
 37631  		v.reset(OpAMD64MOVLconst)
 37632  		v.AuxInt = 0
 37633  		return true
 37634  	}
 37635  	// match: (SETGE (FlagGT_ULT))
 37636  	// cond:
 37637  	// result: (MOVLconst [1])
 37638  	for {
 37639  		v_0 := v.Args[0]
 37640  		if v_0.Op != OpAMD64FlagGT_ULT {
 37641  			break
 37642  		}
 37643  		v.reset(OpAMD64MOVLconst)
 37644  		v.AuxInt = 1
 37645  		return true
 37646  	}
 37647  	// match: (SETGE (FlagGT_UGT))
 37648  	// cond:
 37649  	// result: (MOVLconst [1])
 37650  	for {
 37651  		v_0 := v.Args[0]
 37652  		if v_0.Op != OpAMD64FlagGT_UGT {
 37653  			break
 37654  		}
 37655  		v.reset(OpAMD64MOVLconst)
 37656  		v.AuxInt = 1
 37657  		return true
 37658  	}
 37659  	return false
 37660  }
 37661  func rewriteValueAMD64_OpAMD64SETGEmem_0(v *Value) bool {
 37662  	b := v.Block
 37663  	_ = b
 37664  	// match: (SETGEmem [off] {sym} ptr (InvertFlags x) mem)
 37665  	// cond:
 37666  	// result: (SETLEmem [off] {sym} ptr x mem)
 37667  	for {
 37668  		off := v.AuxInt
 37669  		sym := v.Aux
 37670  		_ = v.Args[2]
 37671  		ptr := v.Args[0]
 37672  		v_1 := v.Args[1]
 37673  		if v_1.Op != OpAMD64InvertFlags {
 37674  			break
 37675  		}
 37676  		x := v_1.Args[0]
 37677  		mem := v.Args[2]
 37678  		v.reset(OpAMD64SETLEmem)
 37679  		v.AuxInt = off
 37680  		v.Aux = sym
 37681  		v.AddArg(ptr)
 37682  		v.AddArg(x)
 37683  		v.AddArg(mem)
 37684  		return true
 37685  	}
 37686  	// match: (SETGEmem [off1] {sym} (ADDQconst [off2] base) val mem)
 37687  	// cond: is32Bit(off1+off2)
 37688  	// result: (SETGEmem [off1+off2] {sym} base val mem)
 37689  	for {
 37690  		off1 := v.AuxInt
 37691  		sym := v.Aux
 37692  		_ = v.Args[2]
 37693  		v_0 := v.Args[0]
 37694  		if v_0.Op != OpAMD64ADDQconst {
 37695  			break
 37696  		}
 37697  		off2 := v_0.AuxInt
 37698  		base := v_0.Args[0]
 37699  		val := v.Args[1]
 37700  		mem := v.Args[2]
 37701  		if !(is32Bit(off1 + off2)) {
 37702  			break
 37703  		}
 37704  		v.reset(OpAMD64SETGEmem)
 37705  		v.AuxInt = off1 + off2
 37706  		v.Aux = sym
 37707  		v.AddArg(base)
 37708  		v.AddArg(val)
 37709  		v.AddArg(mem)
 37710  		return true
 37711  	}
 37712  	// match: (SETGEmem [off1] {sym1} (LEAQ [off2] {sym2} base) val mem)
 37713  	// cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
 37714  	// result: (SETGEmem [off1+off2] {mergeSym(sym1,sym2)} base val mem)
 37715  	for {
 37716  		off1 := v.AuxInt
 37717  		sym1 := v.Aux
 37718  		_ = v.Args[2]
 37719  		v_0 := v.Args[0]
 37720  		if v_0.Op != OpAMD64LEAQ {
 37721  			break
 37722  		}
 37723  		off2 := v_0.AuxInt
 37724  		sym2 := v_0.Aux
 37725  		base := v_0.Args[0]
 37726  		val := v.Args[1]
 37727  		mem := v.Args[2]
 37728  		if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
 37729  			break
 37730  		}
 37731  		v.reset(OpAMD64SETGEmem)
 37732  		v.AuxInt = off1 + off2
 37733  		v.Aux = mergeSym(sym1, sym2)
 37734  		v.AddArg(base)
 37735  		v.AddArg(val)
 37736  		v.AddArg(mem)
 37737  		return true
 37738  	}
 37739  	// match: (SETGEmem [off] {sym} ptr x:(FlagEQ) mem)
 37740  	// cond:
 37741  	// result: (MOVBstore [off] {sym} ptr (MOVLconst <x.Type> [1]) mem)
 37742  	for {
 37743  		off := v.AuxInt
 37744  		sym := v.Aux
 37745  		_ = v.Args[2]
 37746  		ptr := v.Args[0]
 37747  		x := v.Args[1]
 37748  		if x.Op != OpAMD64FlagEQ {
 37749  			break
 37750  		}
 37751  		mem := v.Args[2]
 37752  		v.reset(OpAMD64MOVBstore)
 37753  		v.AuxInt = off
 37754  		v.Aux = sym
 37755  		v.AddArg(ptr)
 37756  		v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, x.Type)
 37757  		v0.AuxInt = 1
 37758  		v.AddArg(v0)
 37759  		v.AddArg(mem)
 37760  		return true
 37761  	}
 37762  	// match: (SETGEmem [off] {sym} ptr x:(FlagLT_ULT) mem)
 37763  	// cond:
 37764  	// result: (MOVBstore [off] {sym} ptr (MOVLconst <x.Type> [0]) mem)
 37765  	for {
 37766  		off := v.AuxInt
 37767  		sym := v.Aux
 37768  		_ = v.Args[2]
 37769  		ptr := v.Args[0]
 37770  		x := v.Args[1]
 37771  		if x.Op != OpAMD64FlagLT_ULT {
 37772  			break
 37773  		}
 37774  		mem := v.Args[2]
 37775  		v.reset(OpAMD64MOVBstore)
 37776  		v.AuxInt = off
 37777  		v.Aux = sym
 37778  		v.AddArg(ptr)
 37779  		v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, x.Type)
 37780  		v0.AuxInt = 0
 37781  		v.AddArg(v0)
 37782  		v.AddArg(mem)
 37783  		return true
 37784  	}
 37785  	// match: (SETGEmem [off] {sym} ptr x:(FlagLT_UGT) mem)
 37786  	// cond:
 37787  	// result: (MOVBstore [off] {sym} ptr (MOVLconst <x.Type> [0]) mem)
 37788  	for {
 37789  		off := v.AuxInt
 37790  		sym := v.Aux
 37791  		_ = v.Args[2]
 37792  		ptr := v.Args[0]
 37793  		x := v.Args[1]
 37794  		if x.Op != OpAMD64FlagLT_UGT {
 37795  			break
 37796  		}
 37797  		mem := v.Args[2]
 37798  		v.reset(OpAMD64MOVBstore)
 37799  		v.AuxInt = off
 37800  		v.Aux = sym
 37801  		v.AddArg(ptr)
 37802  		v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, x.Type)
 37803  		v0.AuxInt = 0
 37804  		v.AddArg(v0)
 37805  		v.AddArg(mem)
 37806  		return true
 37807  	}
 37808  	// match: (SETGEmem [off] {sym} ptr x:(FlagGT_ULT) mem)
 37809  	// cond:
 37810  	// result: (MOVBstore [off] {sym} ptr (MOVLconst <x.Type> [1]) mem)
 37811  	for {
 37812  		off := v.AuxInt
 37813  		sym := v.Aux
 37814  		_ = v.Args[2]
 37815  		ptr := v.Args[0]
 37816  		x := v.Args[1]
 37817  		if x.Op != OpAMD64FlagGT_ULT {
 37818  			break
 37819  		}
 37820  		mem := v.Args[2]
 37821  		v.reset(OpAMD64MOVBstore)
 37822  		v.AuxInt = off
 37823  		v.Aux = sym
 37824  		v.AddArg(ptr)
 37825  		v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, x.Type)
 37826  		v0.AuxInt = 1
 37827  		v.AddArg(v0)
 37828  		v.AddArg(mem)
 37829  		return true
 37830  	}
 37831  	// match: (SETGEmem [off] {sym} ptr x:(FlagGT_UGT) mem)
 37832  	// cond:
 37833  	// result: (MOVBstore [off] {sym} ptr (MOVLconst <x.Type> [1]) mem)
 37834  	for {
 37835  		off := v.AuxInt
 37836  		sym := v.Aux
 37837  		_ = v.Args[2]
 37838  		ptr := v.Args[0]
 37839  		x := v.Args[1]
 37840  		if x.Op != OpAMD64FlagGT_UGT {
 37841  			break
 37842  		}
 37843  		mem := v.Args[2]
 37844  		v.reset(OpAMD64MOVBstore)
 37845  		v.AuxInt = off
 37846  		v.Aux = sym
 37847  		v.AddArg(ptr)
 37848  		v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, x.Type)
 37849  		v0.AuxInt = 1
 37850  		v.AddArg(v0)
 37851  		v.AddArg(mem)
 37852  		return true
 37853  	}
 37854  	return false
 37855  }
 37856  func rewriteValueAMD64_OpAMD64SETGmem_0(v *Value) bool {
 37857  	b := v.Block
 37858  	_ = b
 37859  	// match: (SETGmem [off] {sym} ptr (InvertFlags x) mem)
 37860  	// cond:
 37861  	// result: (SETLmem [off] {sym} ptr x mem)
 37862  	for {
 37863  		off := v.AuxInt
 37864  		sym := v.Aux
 37865  		_ = v.Args[2]
 37866  		ptr := v.Args[0]
 37867  		v_1 := v.Args[1]
 37868  		if v_1.Op != OpAMD64InvertFlags {
 37869  			break
 37870  		}
 37871  		x := v_1.Args[0]
 37872  		mem := v.Args[2]
 37873  		v.reset(OpAMD64SETLmem)
 37874  		v.AuxInt = off
 37875  		v.Aux = sym
 37876  		v.AddArg(ptr)
 37877  		v.AddArg(x)
 37878  		v.AddArg(mem)
 37879  		return true
 37880  	}
 37881  	// match: (SETGmem [off1] {sym} (ADDQconst [off2] base) val mem)
 37882  	// cond: is32Bit(off1+off2)
 37883  	// result: (SETGmem [off1+off2] {sym} base val mem)
 37884  	for {
 37885  		off1 := v.AuxInt
 37886  		sym := v.Aux
 37887  		_ = v.Args[2]
 37888  		v_0 := v.Args[0]
 37889  		if v_0.Op != OpAMD64ADDQconst {
 37890  			break
 37891  		}
 37892  		off2 := v_0.AuxInt
 37893  		base := v_0.Args[0]
 37894  		val := v.Args[1]
 37895  		mem := v.Args[2]
 37896  		if !(is32Bit(off1 + off2)) {
 37897  			break
 37898  		}
 37899  		v.reset(OpAMD64SETGmem)
 37900  		v.AuxInt = off1 + off2
 37901  		v.Aux = sym
 37902  		v.AddArg(base)
 37903  		v.AddArg(val)
 37904  		v.AddArg(mem)
 37905  		return true
 37906  	}
 37907  	// match: (SETGmem [off1] {sym1} (LEAQ [off2] {sym2} base) val mem)
 37908  	// cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
 37909  	// result: (SETGmem [off1+off2] {mergeSym(sym1,sym2)} base val mem)
 37910  	for {
 37911  		off1 := v.AuxInt
 37912  		sym1 := v.Aux
 37913  		_ = v.Args[2]
 37914  		v_0 := v.Args[0]
 37915  		if v_0.Op != OpAMD64LEAQ {
 37916  			break
 37917  		}
 37918  		off2 := v_0.AuxInt
 37919  		sym2 := v_0.Aux
 37920  		base := v_0.Args[0]
 37921  		val := v.Args[1]
 37922  		mem := v.Args[2]
 37923  		if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
 37924  			break
 37925  		}
 37926  		v.reset(OpAMD64SETGmem)
 37927  		v.AuxInt = off1 + off2
 37928  		v.Aux = mergeSym(sym1, sym2)
 37929  		v.AddArg(base)
 37930  		v.AddArg(val)
 37931  		v.AddArg(mem)
 37932  		return true
 37933  	}
 37934  	// match: (SETGmem [off] {sym} ptr x:(FlagEQ) mem)
 37935  	// cond:
 37936  	// result: (MOVBstore [off] {sym} ptr (MOVLconst <x.Type> [0]) mem)
 37937  	for {
 37938  		off := v.AuxInt
 37939  		sym := v.Aux
 37940  		_ = v.Args[2]
 37941  		ptr := v.Args[0]
 37942  		x := v.Args[1]
 37943  		if x.Op != OpAMD64FlagEQ {
 37944  			break
 37945  		}
 37946  		mem := v.Args[2]
 37947  		v.reset(OpAMD64MOVBstore)
 37948  		v.AuxInt = off
 37949  		v.Aux = sym
 37950  		v.AddArg(ptr)
 37951  		v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, x.Type)
 37952  		v0.AuxInt = 0
 37953  		v.AddArg(v0)
 37954  		v.AddArg(mem)
 37955  		return true
 37956  	}
 37957  	// match: (SETGmem [off] {sym} ptr x:(FlagLT_ULT) mem)
 37958  	// cond:
 37959  	// result: (MOVBstore [off] {sym} ptr (MOVLconst <x.Type> [0]) mem)
 37960  	for {
 37961  		off := v.AuxInt
 37962  		sym := v.Aux
 37963  		_ = v.Args[2]
 37964  		ptr := v.Args[0]
 37965  		x := v.Args[1]
 37966  		if x.Op != OpAMD64FlagLT_ULT {
 37967  			break
 37968  		}
 37969  		mem := v.Args[2]
 37970  		v.reset(OpAMD64MOVBstore)
 37971  		v.AuxInt = off
 37972  		v.Aux = sym
 37973  		v.AddArg(ptr)
 37974  		v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, x.Type)
 37975  		v0.AuxInt = 0
 37976  		v.AddArg(v0)
 37977  		v.AddArg(mem)
 37978  		return true
 37979  	}
 37980  	// match: (SETGmem [off] {sym} ptr x:(FlagLT_UGT) mem)
 37981  	// cond:
 37982  	// result: (MOVBstore [off] {sym} ptr (MOVLconst <x.Type> [0]) mem)
 37983  	for {
 37984  		off := v.AuxInt
 37985  		sym := v.Aux
 37986  		_ = v.Args[2]
 37987  		ptr := v.Args[0]
 37988  		x := v.Args[1]
 37989  		if x.Op != OpAMD64FlagLT_UGT {
 37990  			break
 37991  		}
 37992  		mem := v.Args[2]
 37993  		v.reset(OpAMD64MOVBstore)
 37994  		v.AuxInt = off
 37995  		v.Aux = sym
 37996  		v.AddArg(ptr)
 37997  		v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, x.Type)
 37998  		v0.AuxInt = 0
 37999  		v.AddArg(v0)
 38000  		v.AddArg(mem)
 38001  		return true
 38002  	}
 38003  	// match: (SETGmem [off] {sym} ptr x:(FlagGT_ULT) mem)
 38004  	// cond:
 38005  	// result: (MOVBstore [off] {sym} ptr (MOVLconst <x.Type> [1]) mem)
 38006  	for {
 38007  		off := v.AuxInt
 38008  		sym := v.Aux
 38009  		_ = v.Args[2]
 38010  		ptr := v.Args[0]
 38011  		x := v.Args[1]
 38012  		if x.Op != OpAMD64FlagGT_ULT {
 38013  			break
 38014  		}
 38015  		mem := v.Args[2]
 38016  		v.reset(OpAMD64MOVBstore)
 38017  		v.AuxInt = off
 38018  		v.Aux = sym
 38019  		v.AddArg(ptr)
 38020  		v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, x.Type)
 38021  		v0.AuxInt = 1
 38022  		v.AddArg(v0)
 38023  		v.AddArg(mem)
 38024  		return true
 38025  	}
 38026  	// match: (SETGmem [off] {sym} ptr x:(FlagGT_UGT) mem)
 38027  	// cond:
 38028  	// result: (MOVBstore [off] {sym} ptr (MOVLconst <x.Type> [1]) mem)
 38029  	for {
 38030  		off := v.AuxInt
 38031  		sym := v.Aux
 38032  		_ = v.Args[2]
 38033  		ptr := v.Args[0]
 38034  		x := v.Args[1]
 38035  		if x.Op != OpAMD64FlagGT_UGT {
 38036  			break
 38037  		}
 38038  		mem := v.Args[2]
 38039  		v.reset(OpAMD64MOVBstore)
 38040  		v.AuxInt = off
 38041  		v.Aux = sym
 38042  		v.AddArg(ptr)
 38043  		v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, x.Type)
 38044  		v0.AuxInt = 1
 38045  		v.AddArg(v0)
 38046  		v.AddArg(mem)
 38047  		return true
 38048  	}
 38049  	return false
 38050  }
 38051  func rewriteValueAMD64_OpAMD64SETL_0(v *Value) bool {
 38052  	// match: (SETL (InvertFlags x))
 38053  	// cond:
 38054  	// result: (SETG x)
 38055  	for {
 38056  		v_0 := v.Args[0]
 38057  		if v_0.Op != OpAMD64InvertFlags {
 38058  			break
 38059  		}
 38060  		x := v_0.Args[0]
 38061  		v.reset(OpAMD64SETG)
 38062  		v.AddArg(x)
 38063  		return true
 38064  	}
 38065  	// match: (SETL (FlagEQ))
 38066  	// cond:
 38067  	// result: (MOVLconst [0])
 38068  	for {
 38069  		v_0 := v.Args[0]
 38070  		if v_0.Op != OpAMD64FlagEQ {
 38071  			break
 38072  		}
 38073  		v.reset(OpAMD64MOVLconst)
 38074  		v.AuxInt = 0
 38075  		return true
 38076  	}
 38077  	// match: (SETL (FlagLT_ULT))
 38078  	// cond:
 38079  	// result: (MOVLconst [1])
 38080  	for {
 38081  		v_0 := v.Args[0]
 38082  		if v_0.Op != OpAMD64FlagLT_ULT {
 38083  			break
 38084  		}
 38085  		v.reset(OpAMD64MOVLconst)
 38086  		v.AuxInt = 1
 38087  		return true
 38088  	}
 38089  	// match: (SETL (FlagLT_UGT))
 38090  	// cond:
 38091  	// result: (MOVLconst [1])
 38092  	for {
 38093  		v_0 := v.Args[0]
 38094  		if v_0.Op != OpAMD64FlagLT_UGT {
 38095  			break
 38096  		}
 38097  		v.reset(OpAMD64MOVLconst)
 38098  		v.AuxInt = 1
 38099  		return true
 38100  	}
 38101  	// match: (SETL (FlagGT_ULT))
 38102  	// cond:
 38103  	// result: (MOVLconst [0])
 38104  	for {
 38105  		v_0 := v.Args[0]
 38106  		if v_0.Op != OpAMD64FlagGT_ULT {
 38107  			break
 38108  		}
 38109  		v.reset(OpAMD64MOVLconst)
 38110  		v.AuxInt = 0
 38111  		return true
 38112  	}
 38113  	// match: (SETL (FlagGT_UGT))
 38114  	// cond:
 38115  	// result: (MOVLconst [0])
 38116  	for {
 38117  		v_0 := v.Args[0]
 38118  		if v_0.Op != OpAMD64FlagGT_UGT {
 38119  			break
 38120  		}
 38121  		v.reset(OpAMD64MOVLconst)
 38122  		v.AuxInt = 0
 38123  		return true
 38124  	}
 38125  	return false
 38126  }
 38127  func rewriteValueAMD64_OpAMD64SETLE_0(v *Value) bool {
 38128  	// match: (SETLE (InvertFlags x))
 38129  	// cond:
 38130  	// result: (SETGE x)
 38131  	for {
 38132  		v_0 := v.Args[0]
 38133  		if v_0.Op != OpAMD64InvertFlags {
 38134  			break
 38135  		}
 38136  		x := v_0.Args[0]
 38137  		v.reset(OpAMD64SETGE)
 38138  		v.AddArg(x)
 38139  		return true
 38140  	}
 38141  	// match: (SETLE (FlagEQ))
 38142  	// cond:
 38143  	// result: (MOVLconst [1])
 38144  	for {
 38145  		v_0 := v.Args[0]
 38146  		if v_0.Op != OpAMD64FlagEQ {
 38147  			break
 38148  		}
 38149  		v.reset(OpAMD64MOVLconst)
 38150  		v.AuxInt = 1
 38151  		return true
 38152  	}
 38153  	// match: (SETLE (FlagLT_ULT))
 38154  	// cond:
 38155  	// result: (MOVLconst [1])
 38156  	for {
 38157  		v_0 := v.Args[0]
 38158  		if v_0.Op != OpAMD64FlagLT_ULT {
 38159  			break
 38160  		}
 38161  		v.reset(OpAMD64MOVLconst)
 38162  		v.AuxInt = 1
 38163  		return true
 38164  	}
 38165  	// match: (SETLE (FlagLT_UGT))
 38166  	// cond:
 38167  	// result: (MOVLconst [1])
 38168  	for {
 38169  		v_0 := v.Args[0]
 38170  		if v_0.Op != OpAMD64FlagLT_UGT {
 38171  			break
 38172  		}
 38173  		v.reset(OpAMD64MOVLconst)
 38174  		v.AuxInt = 1
 38175  		return true
 38176  	}
 38177  	// match: (SETLE (FlagGT_ULT))
 38178  	// cond:
 38179  	// result: (MOVLconst [0])
 38180  	for {
 38181  		v_0 := v.Args[0]
 38182  		if v_0.Op != OpAMD64FlagGT_ULT {
 38183  			break
 38184  		}
 38185  		v.reset(OpAMD64MOVLconst)
 38186  		v.AuxInt = 0
 38187  		return true
 38188  	}
 38189  	// match: (SETLE (FlagGT_UGT))
 38190  	// cond:
 38191  	// result: (MOVLconst [0])
 38192  	for {
 38193  		v_0 := v.Args[0]
 38194  		if v_0.Op != OpAMD64FlagGT_UGT {
 38195  			break
 38196  		}
 38197  		v.reset(OpAMD64MOVLconst)
 38198  		v.AuxInt = 0
 38199  		return true
 38200  	}
 38201  	return false
 38202  }
 38203  func rewriteValueAMD64_OpAMD64SETLEmem_0(v *Value) bool {
 38204  	b := v.Block
 38205  	_ = b
 38206  	// match: (SETLEmem [off] {sym} ptr (InvertFlags x) mem)
 38207  	// cond:
 38208  	// result: (SETGEmem [off] {sym} ptr x mem)
 38209  	for {
 38210  		off := v.AuxInt
 38211  		sym := v.Aux
 38212  		_ = v.Args[2]
 38213  		ptr := v.Args[0]
 38214  		v_1 := v.Args[1]
 38215  		if v_1.Op != OpAMD64InvertFlags {
 38216  			break
 38217  		}
 38218  		x := v_1.Args[0]
 38219  		mem := v.Args[2]
 38220  		v.reset(OpAMD64SETGEmem)
 38221  		v.AuxInt = off
 38222  		v.Aux = sym
 38223  		v.AddArg(ptr)
 38224  		v.AddArg(x)
 38225  		v.AddArg(mem)
 38226  		return true
 38227  	}
 38228  	// match: (SETLEmem [off1] {sym} (ADDQconst [off2] base) val mem)
 38229  	// cond: is32Bit(off1+off2)
 38230  	// result: (SETLEmem [off1+off2] {sym} base val mem)
 38231  	for {
 38232  		off1 := v.AuxInt
 38233  		sym := v.Aux
 38234  		_ = v.Args[2]
 38235  		v_0 := v.Args[0]
 38236  		if v_0.Op != OpAMD64ADDQconst {
 38237  			break
 38238  		}
 38239  		off2 := v_0.AuxInt
 38240  		base := v_0.Args[0]
 38241  		val := v.Args[1]
 38242  		mem := v.Args[2]
 38243  		if !(is32Bit(off1 + off2)) {
 38244  			break
 38245  		}
 38246  		v.reset(OpAMD64SETLEmem)
 38247  		v.AuxInt = off1 + off2
 38248  		v.Aux = sym
 38249  		v.AddArg(base)
 38250  		v.AddArg(val)
 38251  		v.AddArg(mem)
 38252  		return true
 38253  	}
 38254  	// match: (SETLEmem [off1] {sym1} (LEAQ [off2] {sym2} base) val mem)
 38255  	// cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
 38256  	// result: (SETLEmem [off1+off2] {mergeSym(sym1,sym2)} base val mem)
 38257  	for {
 38258  		off1 := v.AuxInt
 38259  		sym1 := v.Aux
 38260  		_ = v.Args[2]
 38261  		v_0 := v.Args[0]
 38262  		if v_0.Op != OpAMD64LEAQ {
 38263  			break
 38264  		}
 38265  		off2 := v_0.AuxInt
 38266  		sym2 := v_0.Aux
 38267  		base := v_0.Args[0]
 38268  		val := v.Args[1]
 38269  		mem := v.Args[2]
 38270  		if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
 38271  			break
 38272  		}
 38273  		v.reset(OpAMD64SETLEmem)
 38274  		v.AuxInt = off1 + off2
 38275  		v.Aux = mergeSym(sym1, sym2)
 38276  		v.AddArg(base)
 38277  		v.AddArg(val)
 38278  		v.AddArg(mem)
 38279  		return true
 38280  	}
 38281  	// match: (SETLEmem [off] {sym} ptr x:(FlagEQ) mem)
 38282  	// cond:
 38283  	// result: (MOVBstore [off] {sym} ptr (MOVLconst <x.Type> [1]) mem)
 38284  	for {
 38285  		off := v.AuxInt
 38286  		sym := v.Aux
 38287  		_ = v.Args[2]
 38288  		ptr := v.Args[0]
 38289  		x := v.Args[1]
 38290  		if x.Op != OpAMD64FlagEQ {
 38291  			break
 38292  		}
 38293  		mem := v.Args[2]
 38294  		v.reset(OpAMD64MOVBstore)
 38295  		v.AuxInt = off
 38296  		v.Aux = sym
 38297  		v.AddArg(ptr)
 38298  		v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, x.Type)
 38299  		v0.AuxInt = 1
 38300  		v.AddArg(v0)
 38301  		v.AddArg(mem)
 38302  		return true
 38303  	}
 38304  	// match: (SETLEmem [off] {sym} ptr x:(FlagLT_ULT) mem)
 38305  	// cond:
 38306  	// result: (MOVBstore [off] {sym} ptr (MOVLconst <x.Type> [1]) mem)
 38307  	for {
 38308  		off := v.AuxInt
 38309  		sym := v.Aux
 38310  		_ = v.Args[2]
 38311  		ptr := v.Args[0]
 38312  		x := v.Args[1]
 38313  		if x.Op != OpAMD64FlagLT_ULT {
 38314  			break
 38315  		}
 38316  		mem := v.Args[2]
 38317  		v.reset(OpAMD64MOVBstore)
 38318  		v.AuxInt = off
 38319  		v.Aux = sym
 38320  		v.AddArg(ptr)
 38321  		v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, x.Type)
 38322  		v0.AuxInt = 1
 38323  		v.AddArg(v0)
 38324  		v.AddArg(mem)
 38325  		return true
 38326  	}
 38327  	// match: (SETLEmem [off] {sym} ptr x:(FlagLT_UGT) mem)
 38328  	// cond:
 38329  	// result: (MOVBstore [off] {sym} ptr (MOVLconst <x.Type> [1]) mem)
 38330  	for {
 38331  		off := v.AuxInt
 38332  		sym := v.Aux
 38333  		_ = v.Args[2]
 38334  		ptr := v.Args[0]
 38335  		x := v.Args[1]
 38336  		if x.Op != OpAMD64FlagLT_UGT {
 38337  			break
 38338  		}
 38339  		mem := v.Args[2]
 38340  		v.reset(OpAMD64MOVBstore)
 38341  		v.AuxInt = off
 38342  		v.Aux = sym
 38343  		v.AddArg(ptr)
 38344  		v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, x.Type)
 38345  		v0.AuxInt = 1
 38346  		v.AddArg(v0)
 38347  		v.AddArg(mem)
 38348  		return true
 38349  	}
 38350  	// match: (SETLEmem [off] {sym} ptr x:(FlagGT_ULT) mem)
 38351  	// cond:
 38352  	// result: (MOVBstore [off] {sym} ptr (MOVLconst <x.Type> [0]) mem)
 38353  	for {
 38354  		off := v.AuxInt
 38355  		sym := v.Aux
 38356  		_ = v.Args[2]
 38357  		ptr := v.Args[0]
 38358  		x := v.Args[1]
 38359  		if x.Op != OpAMD64FlagGT_ULT {
 38360  			break
 38361  		}
 38362  		mem := v.Args[2]
 38363  		v.reset(OpAMD64MOVBstore)
 38364  		v.AuxInt = off
 38365  		v.Aux = sym
 38366  		v.AddArg(ptr)
 38367  		v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, x.Type)
 38368  		v0.AuxInt = 0
 38369  		v.AddArg(v0)
 38370  		v.AddArg(mem)
 38371  		return true
 38372  	}
 38373  	// match: (SETLEmem [off] {sym} ptr x:(FlagGT_UGT) mem)
 38374  	// cond:
 38375  	// result: (MOVBstore [off] {sym} ptr (MOVLconst <x.Type> [0]) mem)
 38376  	for {
 38377  		off := v.AuxInt
 38378  		sym := v.Aux
 38379  		_ = v.Args[2]
 38380  		ptr := v.Args[0]
 38381  		x := v.Args[1]
 38382  		if x.Op != OpAMD64FlagGT_UGT {
 38383  			break
 38384  		}
 38385  		mem := v.Args[2]
 38386  		v.reset(OpAMD64MOVBstore)
 38387  		v.AuxInt = off
 38388  		v.Aux = sym
 38389  		v.AddArg(ptr)
 38390  		v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, x.Type)
 38391  		v0.AuxInt = 0
 38392  		v.AddArg(v0)
 38393  		v.AddArg(mem)
 38394  		return true
 38395  	}
 38396  	return false
 38397  }
 38398  func rewriteValueAMD64_OpAMD64SETLmem_0(v *Value) bool {
 38399  	b := v.Block
 38400  	_ = b
 38401  	// match: (SETLmem [off] {sym} ptr (InvertFlags x) mem)
 38402  	// cond:
 38403  	// result: (SETGmem [off] {sym} ptr x mem)
 38404  	for {
 38405  		off := v.AuxInt
 38406  		sym := v.Aux
 38407  		_ = v.Args[2]
 38408  		ptr := v.Args[0]
 38409  		v_1 := v.Args[1]
 38410  		if v_1.Op != OpAMD64InvertFlags {
 38411  			break
 38412  		}
 38413  		x := v_1.Args[0]
 38414  		mem := v.Args[2]
 38415  		v.reset(OpAMD64SETGmem)
 38416  		v.AuxInt = off
 38417  		v.Aux = sym
 38418  		v.AddArg(ptr)
 38419  		v.AddArg(x)
 38420  		v.AddArg(mem)
 38421  		return true
 38422  	}
 38423  	// match: (SETLmem [off1] {sym} (ADDQconst [off2] base) val mem)
 38424  	// cond: is32Bit(off1+off2)
 38425  	// result: (SETLmem [off1+off2] {sym} base val mem)
 38426  	for {
 38427  		off1 := v.AuxInt
 38428  		sym := v.Aux
 38429  		_ = v.Args[2]
 38430  		v_0 := v.Args[0]
 38431  		if v_0.Op != OpAMD64ADDQconst {
 38432  			break
 38433  		}
 38434  		off2 := v_0.AuxInt
 38435  		base := v_0.Args[0]
 38436  		val := v.Args[1]
 38437  		mem := v.Args[2]
 38438  		if !(is32Bit(off1 + off2)) {
 38439  			break
 38440  		}
 38441  		v.reset(OpAMD64SETLmem)
 38442  		v.AuxInt = off1 + off2
 38443  		v.Aux = sym
 38444  		v.AddArg(base)
 38445  		v.AddArg(val)
 38446  		v.AddArg(mem)
 38447  		return true
 38448  	}
 38449  	// match: (SETLmem [off1] {sym1} (LEAQ [off2] {sym2} base) val mem)
 38450  	// cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
 38451  	// result: (SETLmem [off1+off2] {mergeSym(sym1,sym2)} base val mem)
 38452  	for {
 38453  		off1 := v.AuxInt
 38454  		sym1 := v.Aux
 38455  		_ = v.Args[2]
 38456  		v_0 := v.Args[0]
 38457  		if v_0.Op != OpAMD64LEAQ {
 38458  			break
 38459  		}
 38460  		off2 := v_0.AuxInt
 38461  		sym2 := v_0.Aux
 38462  		base := v_0.Args[0]
 38463  		val := v.Args[1]
 38464  		mem := v.Args[2]
 38465  		if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
 38466  			break
 38467  		}
 38468  		v.reset(OpAMD64SETLmem)
 38469  		v.AuxInt = off1 + off2
 38470  		v.Aux = mergeSym(sym1, sym2)
 38471  		v.AddArg(base)
 38472  		v.AddArg(val)
 38473  		v.AddArg(mem)
 38474  		return true
 38475  	}
 38476  	// match: (SETLmem [off] {sym} ptr x:(FlagEQ) mem)
 38477  	// cond:
 38478  	// result: (MOVBstore [off] {sym} ptr (MOVLconst <x.Type> [0]) mem)
 38479  	for {
 38480  		off := v.AuxInt
 38481  		sym := v.Aux
 38482  		_ = v.Args[2]
 38483  		ptr := v.Args[0]
 38484  		x := v.Args[1]
 38485  		if x.Op != OpAMD64FlagEQ {
 38486  			break
 38487  		}
 38488  		mem := v.Args[2]
 38489  		v.reset(OpAMD64MOVBstore)
 38490  		v.AuxInt = off
 38491  		v.Aux = sym
 38492  		v.AddArg(ptr)
 38493  		v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, x.Type)
 38494  		v0.AuxInt = 0
 38495  		v.AddArg(v0)
 38496  		v.AddArg(mem)
 38497  		return true
 38498  	}
 38499  	// match: (SETLmem [off] {sym} ptr x:(FlagLT_ULT) mem)
 38500  	// cond:
 38501  	// result: (MOVBstore [off] {sym} ptr (MOVLconst <x.Type> [1]) mem)
 38502  	for {
 38503  		off := v.AuxInt
 38504  		sym := v.Aux
 38505  		_ = v.Args[2]
 38506  		ptr := v.Args[0]
 38507  		x := v.Args[1]
 38508  		if x.Op != OpAMD64FlagLT_ULT {
 38509  			break
 38510  		}
 38511  		mem := v.Args[2]
 38512  		v.reset(OpAMD64MOVBstore)
 38513  		v.AuxInt = off
 38514  		v.Aux = sym
 38515  		v.AddArg(ptr)
 38516  		v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, x.Type)
 38517  		v0.AuxInt = 1
 38518  		v.AddArg(v0)
 38519  		v.AddArg(mem)
 38520  		return true
 38521  	}
 38522  	// match: (SETLmem [off] {sym} ptr x:(FlagLT_UGT) mem)
 38523  	// cond:
 38524  	// result: (MOVBstore [off] {sym} ptr (MOVLconst <x.Type> [1]) mem)
 38525  	for {
 38526  		off := v.AuxInt
 38527  		sym := v.Aux
 38528  		_ = v.Args[2]
 38529  		ptr := v.Args[0]
 38530  		x := v.Args[1]
 38531  		if x.Op != OpAMD64FlagLT_UGT {
 38532  			break
 38533  		}
 38534  		mem := v.Args[2]
 38535  		v.reset(OpAMD64MOVBstore)
 38536  		v.AuxInt = off
 38537  		v.Aux = sym
 38538  		v.AddArg(ptr)
 38539  		v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, x.Type)
 38540  		v0.AuxInt = 1
 38541  		v.AddArg(v0)
 38542  		v.AddArg(mem)
 38543  		return true
 38544  	}
 38545  	// match: (SETLmem [off] {sym} ptr x:(FlagGT_ULT) mem)
 38546  	// cond:
 38547  	// result: (MOVBstore [off] {sym} ptr (MOVLconst <x.Type> [0]) mem)
 38548  	for {
 38549  		off := v.AuxInt
 38550  		sym := v.Aux
 38551  		_ = v.Args[2]
 38552  		ptr := v.Args[0]
 38553  		x := v.Args[1]
 38554  		if x.Op != OpAMD64FlagGT_ULT {
 38555  			break
 38556  		}
 38557  		mem := v.Args[2]
 38558  		v.reset(OpAMD64MOVBstore)
 38559  		v.AuxInt = off
 38560  		v.Aux = sym
 38561  		v.AddArg(ptr)
 38562  		v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, x.Type)
 38563  		v0.AuxInt = 0
 38564  		v.AddArg(v0)
 38565  		v.AddArg(mem)
 38566  		return true
 38567  	}
 38568  	// match: (SETLmem [off] {sym} ptr x:(FlagGT_UGT) mem)
 38569  	// cond:
 38570  	// result: (MOVBstore [off] {sym} ptr (MOVLconst <x.Type> [0]) mem)
 38571  	for {
 38572  		off := v.AuxInt
 38573  		sym := v.Aux
 38574  		_ = v.Args[2]
 38575  		ptr := v.Args[0]
 38576  		x := v.Args[1]
 38577  		if x.Op != OpAMD64FlagGT_UGT {
 38578  			break
 38579  		}
 38580  		mem := v.Args[2]
 38581  		v.reset(OpAMD64MOVBstore)
 38582  		v.AuxInt = off
 38583  		v.Aux = sym
 38584  		v.AddArg(ptr)
 38585  		v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, x.Type)
 38586  		v0.AuxInt = 0
 38587  		v.AddArg(v0)
 38588  		v.AddArg(mem)
 38589  		return true
 38590  	}
 38591  	return false
 38592  }
 38593  func rewriteValueAMD64_OpAMD64SETNE_0(v *Value) bool {
 38594  	b := v.Block
 38595  	_ = b
 38596  	config := b.Func.Config
 38597  	_ = config
 38598  	// match: (SETNE (TESTL (SHLL (MOVLconst [1]) x) y))
 38599  	// cond: !config.nacl
 38600  	// result: (SETB (BTL x y))
 38601  	for {
 38602  		v_0 := v.Args[0]
 38603  		if v_0.Op != OpAMD64TESTL {
 38604  			break
 38605  		}
 38606  		_ = v_0.Args[1]
 38607  		v_0_0 := v_0.Args[0]
 38608  		if v_0_0.Op != OpAMD64SHLL {
 38609  			break
 38610  		}
 38611  		_ = v_0_0.Args[1]
 38612  		v_0_0_0 := v_0_0.Args[0]
 38613  		if v_0_0_0.Op != OpAMD64MOVLconst {
 38614  			break
 38615  		}
 38616  		if v_0_0_0.AuxInt != 1 {
 38617  			break
 38618  		}
 38619  		x := v_0_0.Args[1]
 38620  		y := v_0.Args[1]
 38621  		if !(!config.nacl) {
 38622  			break
 38623  		}
 38624  		v.reset(OpAMD64SETB)
 38625  		v0 := b.NewValue0(v.Pos, OpAMD64BTL, types.TypeFlags)
 38626  		v0.AddArg(x)
 38627  		v0.AddArg(y)
 38628  		v.AddArg(v0)
 38629  		return true
 38630  	}
 38631  	// match: (SETNE (TESTL y (SHLL (MOVLconst [1]) x)))
 38632  	// cond: !config.nacl
 38633  	// result: (SETB (BTL x y))
 38634  	for {
 38635  		v_0 := v.Args[0]
 38636  		if v_0.Op != OpAMD64TESTL {
 38637  			break
 38638  		}
 38639  		_ = v_0.Args[1]
 38640  		y := v_0.Args[0]
 38641  		v_0_1 := v_0.Args[1]
 38642  		if v_0_1.Op != OpAMD64SHLL {
 38643  			break
 38644  		}
 38645  		_ = v_0_1.Args[1]
 38646  		v_0_1_0 := v_0_1.Args[0]
 38647  		if v_0_1_0.Op != OpAMD64MOVLconst {
 38648  			break
 38649  		}
 38650  		if v_0_1_0.AuxInt != 1 {
 38651  			break
 38652  		}
 38653  		x := v_0_1.Args[1]
 38654  		if !(!config.nacl) {
 38655  			break
 38656  		}
 38657  		v.reset(OpAMD64SETB)
 38658  		v0 := b.NewValue0(v.Pos, OpAMD64BTL, types.TypeFlags)
 38659  		v0.AddArg(x)
 38660  		v0.AddArg(y)
 38661  		v.AddArg(v0)
 38662  		return true
 38663  	}
 38664  	// match: (SETNE (TESTQ (SHLQ (MOVQconst [1]) x) y))
 38665  	// cond: !config.nacl
 38666  	// result: (SETB (BTQ x y))
 38667  	for {
 38668  		v_0 := v.Args[0]
 38669  		if v_0.Op != OpAMD64TESTQ {
 38670  			break
 38671  		}
 38672  		_ = v_0.Args[1]
 38673  		v_0_0 := v_0.Args[0]
 38674  		if v_0_0.Op != OpAMD64SHLQ {
 38675  			break
 38676  		}
 38677  		_ = v_0_0.Args[1]
 38678  		v_0_0_0 := v_0_0.Args[0]
 38679  		if v_0_0_0.Op != OpAMD64MOVQconst {
 38680  			break
 38681  		}
 38682  		if v_0_0_0.AuxInt != 1 {
 38683  			break
 38684  		}
 38685  		x := v_0_0.Args[1]
 38686  		y := v_0.Args[1]
 38687  		if !(!config.nacl) {
 38688  			break
 38689  		}
 38690  		v.reset(OpAMD64SETB)
 38691  		v0 := b.NewValue0(v.Pos, OpAMD64BTQ, types.TypeFlags)
 38692  		v0.AddArg(x)
 38693  		v0.AddArg(y)
 38694  		v.AddArg(v0)
 38695  		return true
 38696  	}
 38697  	// match: (SETNE (TESTQ y (SHLQ (MOVQconst [1]) x)))
 38698  	// cond: !config.nacl
 38699  	// result: (SETB (BTQ x y))
 38700  	for {
 38701  		v_0 := v.Args[0]
 38702  		if v_0.Op != OpAMD64TESTQ {
 38703  			break
 38704  		}
 38705  		_ = v_0.Args[1]
 38706  		y := v_0.Args[0]
 38707  		v_0_1 := v_0.Args[1]
 38708  		if v_0_1.Op != OpAMD64SHLQ {
 38709  			break
 38710  		}
 38711  		_ = v_0_1.Args[1]
 38712  		v_0_1_0 := v_0_1.Args[0]
 38713  		if v_0_1_0.Op != OpAMD64MOVQconst {
 38714  			break
 38715  		}
 38716  		if v_0_1_0.AuxInt != 1 {
 38717  			break
 38718  		}
 38719  		x := v_0_1.Args[1]
 38720  		if !(!config.nacl) {
 38721  			break
 38722  		}
 38723  		v.reset(OpAMD64SETB)
 38724  		v0 := b.NewValue0(v.Pos, OpAMD64BTQ, types.TypeFlags)
 38725  		v0.AddArg(x)
 38726  		v0.AddArg(y)
 38727  		v.AddArg(v0)
 38728  		return true
 38729  	}
 38730  	// match: (SETNE (TESTLconst [c] x))
 38731  	// cond: isPowerOfTwo(c) && log2(c) < 32 && !config.nacl
 38732  	// result: (SETB (BTLconst [log2(c)] x))
 38733  	for {
 38734  		v_0 := v.Args[0]
 38735  		if v_0.Op != OpAMD64TESTLconst {
 38736  			break
 38737  		}
 38738  		c := v_0.AuxInt
 38739  		x := v_0.Args[0]
 38740  		if !(isPowerOfTwo(c) && log2(c) < 32 && !config.nacl) {
 38741  			break
 38742  		}
 38743  		v.reset(OpAMD64SETB)
 38744  		v0 := b.NewValue0(v.Pos, OpAMD64BTLconst, types.TypeFlags)
 38745  		v0.AuxInt = log2(c)
 38746  		v0.AddArg(x)
 38747  		v.AddArg(v0)
 38748  		return true
 38749  	}
 38750  	// match: (SETNE (TESTQconst [c] x))
 38751  	// cond: isPowerOfTwo(c) && log2(c) < 64 && !config.nacl
 38752  	// result: (SETB (BTQconst [log2(c)] x))
 38753  	for {
 38754  		v_0 := v.Args[0]
 38755  		if v_0.Op != OpAMD64TESTQconst {
 38756  			break
 38757  		}
 38758  		c := v_0.AuxInt
 38759  		x := v_0.Args[0]
 38760  		if !(isPowerOfTwo(c) && log2(c) < 64 && !config.nacl) {
 38761  			break
 38762  		}
 38763  		v.reset(OpAMD64SETB)
 38764  		v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags)
 38765  		v0.AuxInt = log2(c)
 38766  		v0.AddArg(x)
 38767  		v.AddArg(v0)
 38768  		return true
 38769  	}
 38770  	// match: (SETNE (TESTQ (MOVQconst [c]) x))
 38771  	// cond: isPowerOfTwo(c) && log2(c) < 64 && !config.nacl
 38772  	// result: (SETB (BTQconst [log2(c)] x))
 38773  	for {
 38774  		v_0 := v.Args[0]
 38775  		if v_0.Op != OpAMD64TESTQ {
 38776  			break
 38777  		}
 38778  		_ = v_0.Args[1]
 38779  		v_0_0 := v_0.Args[0]
 38780  		if v_0_0.Op != OpAMD64MOVQconst {
 38781  			break
 38782  		}
 38783  		c := v_0_0.AuxInt
 38784  		x := v_0.Args[1]
 38785  		if !(isPowerOfTwo(c) && log2(c) < 64 && !config.nacl) {
 38786  			break
 38787  		}
 38788  		v.reset(OpAMD64SETB)
 38789  		v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags)
 38790  		v0.AuxInt = log2(c)
 38791  		v0.AddArg(x)
 38792  		v.AddArg(v0)
 38793  		return true
 38794  	}
 38795  	// match: (SETNE (TESTQ x (MOVQconst [c])))
 38796  	// cond: isPowerOfTwo(c) && log2(c) < 64 && !config.nacl
 38797  	// result: (SETB (BTQconst [log2(c)] x))
 38798  	for {
 38799  		v_0 := v.Args[0]
 38800  		if v_0.Op != OpAMD64TESTQ {
 38801  			break
 38802  		}
 38803  		_ = v_0.Args[1]
 38804  		x := v_0.Args[0]
 38805  		v_0_1 := v_0.Args[1]
 38806  		if v_0_1.Op != OpAMD64MOVQconst {
 38807  			break
 38808  		}
 38809  		c := v_0_1.AuxInt
 38810  		if !(isPowerOfTwo(c) && log2(c) < 64 && !config.nacl) {
 38811  			break
 38812  		}
 38813  		v.reset(OpAMD64SETB)
 38814  		v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags)
 38815  		v0.AuxInt = log2(c)
 38816  		v0.AddArg(x)
 38817  		v.AddArg(v0)
 38818  		return true
 38819  	}
 38820  	// match: (SETNE (InvertFlags x))
 38821  	// cond:
 38822  	// result: (SETNE x)
 38823  	for {
 38824  		v_0 := v.Args[0]
 38825  		if v_0.Op != OpAMD64InvertFlags {
 38826  			break
 38827  		}
 38828  		x := v_0.Args[0]
 38829  		v.reset(OpAMD64SETNE)
 38830  		v.AddArg(x)
 38831  		return true
 38832  	}
 38833  	// match: (SETNE (FlagEQ))
 38834  	// cond:
 38835  	// result: (MOVLconst [0])
 38836  	for {
 38837  		v_0 := v.Args[0]
 38838  		if v_0.Op != OpAMD64FlagEQ {
 38839  			break
 38840  		}
 38841  		v.reset(OpAMD64MOVLconst)
 38842  		v.AuxInt = 0
 38843  		return true
 38844  	}
 38845  	return false
 38846  }
 38847  func rewriteValueAMD64_OpAMD64SETNE_10(v *Value) bool {
 38848  	// match: (SETNE (FlagLT_ULT))
 38849  	// cond:
 38850  	// result: (MOVLconst [1])
 38851  	for {
 38852  		v_0 := v.Args[0]
 38853  		if v_0.Op != OpAMD64FlagLT_ULT {
 38854  			break
 38855  		}
 38856  		v.reset(OpAMD64MOVLconst)
 38857  		v.AuxInt = 1
 38858  		return true
 38859  	}
 38860  	// match: (SETNE (FlagLT_UGT))
 38861  	// cond:
 38862  	// result: (MOVLconst [1])
 38863  	for {
 38864  		v_0 := v.Args[0]
 38865  		if v_0.Op != OpAMD64FlagLT_UGT {
 38866  			break
 38867  		}
 38868  		v.reset(OpAMD64MOVLconst)
 38869  		v.AuxInt = 1
 38870  		return true
 38871  	}
 38872  	// match: (SETNE (FlagGT_ULT))
 38873  	// cond:
 38874  	// result: (MOVLconst [1])
 38875  	for {
 38876  		v_0 := v.Args[0]
 38877  		if v_0.Op != OpAMD64FlagGT_ULT {
 38878  			break
 38879  		}
 38880  		v.reset(OpAMD64MOVLconst)
 38881  		v.AuxInt = 1
 38882  		return true
 38883  	}
 38884  	// match: (SETNE (FlagGT_UGT))
 38885  	// cond:
 38886  	// result: (MOVLconst [1])
 38887  	for {
 38888  		v_0 := v.Args[0]
 38889  		if v_0.Op != OpAMD64FlagGT_UGT {
 38890  			break
 38891  		}
 38892  		v.reset(OpAMD64MOVLconst)
 38893  		v.AuxInt = 1
 38894  		return true
 38895  	}
 38896  	return false
 38897  }
 38898  func rewriteValueAMD64_OpAMD64SETNEmem_0(v *Value) bool {
 38899  	b := v.Block
 38900  	_ = b
 38901  	config := b.Func.Config
 38902  	_ = config
 38903  	// match: (SETNEmem [off] {sym} ptr (TESTL (SHLL (MOVLconst [1]) x) y) mem)
 38904  	// cond: !config.nacl
 38905  	// result: (SETBmem [off] {sym} ptr (BTL x y) mem)
 38906  	for {
 38907  		off := v.AuxInt
 38908  		sym := v.Aux
 38909  		_ = v.Args[2]
 38910  		ptr := v.Args[0]
 38911  		v_1 := v.Args[1]
 38912  		if v_1.Op != OpAMD64TESTL {
 38913  			break
 38914  		}
 38915  		_ = v_1.Args[1]
 38916  		v_1_0 := v_1.Args[0]
 38917  		if v_1_0.Op != OpAMD64SHLL {
 38918  			break
 38919  		}
 38920  		_ = v_1_0.Args[1]
 38921  		v_1_0_0 := v_1_0.Args[0]
 38922  		if v_1_0_0.Op != OpAMD64MOVLconst {
 38923  			break
 38924  		}
 38925  		if v_1_0_0.AuxInt != 1 {
 38926  			break
 38927  		}
 38928  		x := v_1_0.Args[1]
 38929  		y := v_1.Args[1]
 38930  		mem := v.Args[2]
 38931  		if !(!config.nacl) {
 38932  			break
 38933  		}
 38934  		v.reset(OpAMD64SETBmem)
 38935  		v.AuxInt = off
 38936  		v.Aux = sym
 38937  		v.AddArg(ptr)
 38938  		v0 := b.NewValue0(v.Pos, OpAMD64BTL, types.TypeFlags)
 38939  		v0.AddArg(x)
 38940  		v0.AddArg(y)
 38941  		v.AddArg(v0)
 38942  		v.AddArg(mem)
 38943  		return true
 38944  	}
 38945  	// match: (SETNEmem [off] {sym} ptr (TESTL y (SHLL (MOVLconst [1]) x)) mem)
 38946  	// cond: !config.nacl
 38947  	// result: (SETBmem [off] {sym} ptr (BTL x y) mem)
 38948  	for {
 38949  		off := v.AuxInt
 38950  		sym := v.Aux
 38951  		_ = v.Args[2]
 38952  		ptr := v.Args[0]
 38953  		v_1 := v.Args[1]
 38954  		if v_1.Op != OpAMD64TESTL {
 38955  			break
 38956  		}
 38957  		_ = v_1.Args[1]
 38958  		y := v_1.Args[0]
 38959  		v_1_1 := v_1.Args[1]
 38960  		if v_1_1.Op != OpAMD64SHLL {
 38961  			break
 38962  		}
 38963  		_ = v_1_1.Args[1]
 38964  		v_1_1_0 := v_1_1.Args[0]
 38965  		if v_1_1_0.Op != OpAMD64MOVLconst {
 38966  			break
 38967  		}
 38968  		if v_1_1_0.AuxInt != 1 {
 38969  			break
 38970  		}
 38971  		x := v_1_1.Args[1]
 38972  		mem := v.Args[2]
 38973  		if !(!config.nacl) {
 38974  			break
 38975  		}
 38976  		v.reset(OpAMD64SETBmem)
 38977  		v.AuxInt = off
 38978  		v.Aux = sym
 38979  		v.AddArg(ptr)
 38980  		v0 := b.NewValue0(v.Pos, OpAMD64BTL, types.TypeFlags)
 38981  		v0.AddArg(x)
 38982  		v0.AddArg(y)
 38983  		v.AddArg(v0)
 38984  		v.AddArg(mem)
 38985  		return true
 38986  	}
 38987  	// match: (SETNEmem [off] {sym} ptr (TESTQ (SHLQ (MOVQconst [1]) x) y) mem)
 38988  	// cond: !config.nacl
 38989  	// result: (SETBmem [off] {sym} ptr (BTQ x y) mem)
 38990  	for {
 38991  		off := v.AuxInt
 38992  		sym := v.Aux
 38993  		_ = v.Args[2]
 38994  		ptr := v.Args[0]
 38995  		v_1 := v.Args[1]
 38996  		if v_1.Op != OpAMD64TESTQ {
 38997  			break
 38998  		}
 38999  		_ = v_1.Args[1]
 39000  		v_1_0 := v_1.Args[0]
 39001  		if v_1_0.Op != OpAMD64SHLQ {
 39002  			break
 39003  		}
 39004  		_ = v_1_0.Args[1]
 39005  		v_1_0_0 := v_1_0.Args[0]
 39006  		if v_1_0_0.Op != OpAMD64MOVQconst {
 39007  			break
 39008  		}
 39009  		if v_1_0_0.AuxInt != 1 {
 39010  			break
 39011  		}
 39012  		x := v_1_0.Args[1]
 39013  		y := v_1.Args[1]
 39014  		mem := v.Args[2]
 39015  		if !(!config.nacl) {
 39016  			break
 39017  		}
 39018  		v.reset(OpAMD64SETBmem)
 39019  		v.AuxInt = off
 39020  		v.Aux = sym
 39021  		v.AddArg(ptr)
 39022  		v0 := b.NewValue0(v.Pos, OpAMD64BTQ, types.TypeFlags)
 39023  		v0.AddArg(x)
 39024  		v0.AddArg(y)
 39025  		v.AddArg(v0)
 39026  		v.AddArg(mem)
 39027  		return true
 39028  	}
 39029  	// match: (SETNEmem [off] {sym} ptr (TESTQ y (SHLQ (MOVQconst [1]) x)) mem)
 39030  	// cond: !config.nacl
 39031  	// result: (SETBmem [off] {sym} ptr (BTQ x y) mem)
 39032  	for {
 39033  		off := v.AuxInt
 39034  		sym := v.Aux
 39035  		_ = v.Args[2]
 39036  		ptr := v.Args[0]
 39037  		v_1 := v.Args[1]
 39038  		if v_1.Op != OpAMD64TESTQ {
 39039  			break
 39040  		}
 39041  		_ = v_1.Args[1]
 39042  		y := v_1.Args[0]
 39043  		v_1_1 := v_1.Args[1]
 39044  		if v_1_1.Op != OpAMD64SHLQ {
 39045  			break
 39046  		}
 39047  		_ = v_1_1.Args[1]
 39048  		v_1_1_0 := v_1_1.Args[0]
 39049  		if v_1_1_0.Op != OpAMD64MOVQconst {
 39050  			break
 39051  		}
 39052  		if v_1_1_0.AuxInt != 1 {
 39053  			break
 39054  		}
 39055  		x := v_1_1.Args[1]
 39056  		mem := v.Args[2]
 39057  		if !(!config.nacl) {
 39058  			break
 39059  		}
 39060  		v.reset(OpAMD64SETBmem)
 39061  		v.AuxInt = off
 39062  		v.Aux = sym
 39063  		v.AddArg(ptr)
 39064  		v0 := b.NewValue0(v.Pos, OpAMD64BTQ, types.TypeFlags)
 39065  		v0.AddArg(x)
 39066  		v0.AddArg(y)
 39067  		v.AddArg(v0)
 39068  		v.AddArg(mem)
 39069  		return true
 39070  	}
 39071  	// match: (SETNEmem [off] {sym} ptr (TESTLconst [c] x) mem)
 39072  	// cond: isPowerOfTwo(c) && log2(c) < 32 && !config.nacl
 39073  	// result: (SETBmem [off] {sym} ptr (BTLconst [log2(c)] x) mem)
 39074  	for {
 39075  		off := v.AuxInt
 39076  		sym := v.Aux
 39077  		_ = v.Args[2]
 39078  		ptr := v.Args[0]
 39079  		v_1 := v.Args[1]
 39080  		if v_1.Op != OpAMD64TESTLconst {
 39081  			break
 39082  		}
 39083  		c := v_1.AuxInt
 39084  		x := v_1.Args[0]
 39085  		mem := v.Args[2]
 39086  		if !(isPowerOfTwo(c) && log2(c) < 32 && !config.nacl) {
 39087  			break
 39088  		}
 39089  		v.reset(OpAMD64SETBmem)
 39090  		v.AuxInt = off
 39091  		v.Aux = sym
 39092  		v.AddArg(ptr)
 39093  		v0 := b.NewValue0(v.Pos, OpAMD64BTLconst, types.TypeFlags)
 39094  		v0.AuxInt = log2(c)
 39095  		v0.AddArg(x)
 39096  		v.AddArg(v0)
 39097  		v.AddArg(mem)
 39098  		return true
 39099  	}
 39100  	// match: (SETNEmem [off] {sym} ptr (TESTQconst [c] x) mem)
 39101  	// cond: isPowerOfTwo(c) && log2(c) < 64 && !config.nacl
 39102  	// result: (SETBmem [off] {sym} ptr (BTQconst [log2(c)] x) mem)
 39103  	for {
 39104  		off := v.AuxInt
 39105  		sym := v.Aux
 39106  		_ = v.Args[2]
 39107  		ptr := v.Args[0]
 39108  		v_1 := v.Args[1]
 39109  		if v_1.Op != OpAMD64TESTQconst {
 39110  			break
 39111  		}
 39112  		c := v_1.AuxInt
 39113  		x := v_1.Args[0]
 39114  		mem := v.Args[2]
 39115  		if !(isPowerOfTwo(c) && log2(c) < 64 && !config.nacl) {
 39116  			break
 39117  		}
 39118  		v.reset(OpAMD64SETBmem)
 39119  		v.AuxInt = off
 39120  		v.Aux = sym
 39121  		v.AddArg(ptr)
 39122  		v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags)
 39123  		v0.AuxInt = log2(c)
 39124  		v0.AddArg(x)
 39125  		v.AddArg(v0)
 39126  		v.AddArg(mem)
 39127  		return true
 39128  	}
 39129  	// match: (SETNEmem [off] {sym} ptr (TESTQ (MOVQconst [c]) x) mem)
 39130  	// cond: isPowerOfTwo(c) && log2(c) < 64 && !config.nacl
 39131  	// result: (SETBmem [off] {sym} ptr (BTQconst [log2(c)] x) mem)
 39132  	for {
 39133  		off := v.AuxInt
 39134  		sym := v.Aux
 39135  		_ = v.Args[2]
 39136  		ptr := v.Args[0]
 39137  		v_1 := v.Args[1]
 39138  		if v_1.Op != OpAMD64TESTQ {
 39139  			break
 39140  		}
 39141  		_ = v_1.Args[1]
 39142  		v_1_0 := v_1.Args[0]
 39143  		if v_1_0.Op != OpAMD64MOVQconst {
 39144  			break
 39145  		}
 39146  		c := v_1_0.AuxInt
 39147  		x := v_1.Args[1]
 39148  		mem := v.Args[2]
 39149  		if !(isPowerOfTwo(c) && log2(c) < 64 && !config.nacl) {
 39150  			break
 39151  		}
 39152  		v.reset(OpAMD64SETBmem)
 39153  		v.AuxInt = off
 39154  		v.Aux = sym
 39155  		v.AddArg(ptr)
 39156  		v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags)
 39157  		v0.AuxInt = log2(c)
 39158  		v0.AddArg(x)
 39159  		v.AddArg(v0)
 39160  		v.AddArg(mem)
 39161  		return true
 39162  	}
 39163  	// match: (SETNEmem [off] {sym} ptr (TESTQ x (MOVQconst [c])) mem)
 39164  	// cond: isPowerOfTwo(c) && log2(c) < 64 && !config.nacl
 39165  	// result: (SETBmem [off] {sym} ptr (BTQconst [log2(c)] x) mem)
 39166  	for {
 39167  		off := v.AuxInt
 39168  		sym := v.Aux
 39169  		_ = v.Args[2]
 39170  		ptr := v.Args[0]
 39171  		v_1 := v.Args[1]
 39172  		if v_1.Op != OpAMD64TESTQ {
 39173  			break
 39174  		}
 39175  		_ = v_1.Args[1]
 39176  		x := v_1.Args[0]
 39177  		v_1_1 := v_1.Args[1]
 39178  		if v_1_1.Op != OpAMD64MOVQconst {
 39179  			break
 39180  		}
 39181  		c := v_1_1.AuxInt
 39182  		mem := v.Args[2]
 39183  		if !(isPowerOfTwo(c) && log2(c) < 64 && !config.nacl) {
 39184  			break
 39185  		}
 39186  		v.reset(OpAMD64SETBmem)
 39187  		v.AuxInt = off
 39188  		v.Aux = sym
 39189  		v.AddArg(ptr)
 39190  		v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags)
 39191  		v0.AuxInt = log2(c)
 39192  		v0.AddArg(x)
 39193  		v.AddArg(v0)
 39194  		v.AddArg(mem)
 39195  		return true
 39196  	}
 39197  	// match: (SETNEmem [off] {sym} ptr (InvertFlags x) mem)
 39198  	// cond:
 39199  	// result: (SETNEmem [off] {sym} ptr x mem)
 39200  	for {
 39201  		off := v.AuxInt
 39202  		sym := v.Aux
 39203  		_ = v.Args[2]
 39204  		ptr := v.Args[0]
 39205  		v_1 := v.Args[1]
 39206  		if v_1.Op != OpAMD64InvertFlags {
 39207  			break
 39208  		}
 39209  		x := v_1.Args[0]
 39210  		mem := v.Args[2]
 39211  		v.reset(OpAMD64SETNEmem)
 39212  		v.AuxInt = off
 39213  		v.Aux = sym
 39214  		v.AddArg(ptr)
 39215  		v.AddArg(x)
 39216  		v.AddArg(mem)
 39217  		return true
 39218  	}
 39219  	// match: (SETNEmem [off1] {sym} (ADDQconst [off2] base) val mem)
 39220  	// cond: is32Bit(off1+off2)
 39221  	// result: (SETNEmem [off1+off2] {sym} base val mem)
 39222  	for {
 39223  		off1 := v.AuxInt
 39224  		sym := v.Aux
 39225  		_ = v.Args[2]
 39226  		v_0 := v.Args[0]
 39227  		if v_0.Op != OpAMD64ADDQconst {
 39228  			break
 39229  		}
 39230  		off2 := v_0.AuxInt
 39231  		base := v_0.Args[0]
 39232  		val := v.Args[1]
 39233  		mem := v.Args[2]
 39234  		if !(is32Bit(off1 + off2)) {
 39235  			break
 39236  		}
 39237  		v.reset(OpAMD64SETNEmem)
 39238  		v.AuxInt = off1 + off2
 39239  		v.Aux = sym
 39240  		v.AddArg(base)
 39241  		v.AddArg(val)
 39242  		v.AddArg(mem)
 39243  		return true
 39244  	}
 39245  	return false
 39246  }
 39247  func rewriteValueAMD64_OpAMD64SETNEmem_10(v *Value) bool {
 39248  	b := v.Block
 39249  	_ = b
 39250  	// match: (SETNEmem [off1] {sym1} (LEAQ [off2] {sym2} base) val mem)
 39251  	// cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
 39252  	// result: (SETNEmem [off1+off2] {mergeSym(sym1,sym2)} base val mem)
 39253  	for {
 39254  		off1 := v.AuxInt
 39255  		sym1 := v.Aux
 39256  		_ = v.Args[2]
 39257  		v_0 := v.Args[0]
 39258  		if v_0.Op != OpAMD64LEAQ {
 39259  			break
 39260  		}
 39261  		off2 := v_0.AuxInt
 39262  		sym2 := v_0.Aux
 39263  		base := v_0.Args[0]
 39264  		val := v.Args[1]
 39265  		mem := v.Args[2]
 39266  		if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
 39267  			break
 39268  		}
 39269  		v.reset(OpAMD64SETNEmem)
 39270  		v.AuxInt = off1 + off2
 39271  		v.Aux = mergeSym(sym1, sym2)
 39272  		v.AddArg(base)
 39273  		v.AddArg(val)
 39274  		v.AddArg(mem)
 39275  		return true
 39276  	}
 39277  	// match: (SETNEmem [off] {sym} ptr x:(FlagEQ) mem)
 39278  	// cond:
 39279  	// result: (MOVBstore [off] {sym} ptr (MOVLconst <x.Type> [0]) mem)
 39280  	for {
 39281  		off := v.AuxInt
 39282  		sym := v.Aux
 39283  		_ = v.Args[2]
 39284  		ptr := v.Args[0]
 39285  		x := v.Args[1]
 39286  		if x.Op != OpAMD64FlagEQ {
 39287  			break
 39288  		}
 39289  		mem := v.Args[2]
 39290  		v.reset(OpAMD64MOVBstore)
 39291  		v.AuxInt = off
 39292  		v.Aux = sym
 39293  		v.AddArg(ptr)
 39294  		v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, x.Type)
 39295  		v0.AuxInt = 0
 39296  		v.AddArg(v0)
 39297  		v.AddArg(mem)
 39298  		return true
 39299  	}
 39300  	// match: (SETNEmem [off] {sym} ptr x:(FlagLT_ULT) mem)
 39301  	// cond:
 39302  	// result: (MOVBstore [off] {sym} ptr (MOVLconst <x.Type> [1]) mem)
 39303  	for {
 39304  		off := v.AuxInt
 39305  		sym := v.Aux
 39306  		_ = v.Args[2]
 39307  		ptr := v.Args[0]
 39308  		x := v.Args[1]
 39309  		if x.Op != OpAMD64FlagLT_ULT {
 39310  			break
 39311  		}
 39312  		mem := v.Args[2]
 39313  		v.reset(OpAMD64MOVBstore)
 39314  		v.AuxInt = off
 39315  		v.Aux = sym
 39316  		v.AddArg(ptr)
 39317  		v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, x.Type)
 39318  		v0.AuxInt = 1
 39319  		v.AddArg(v0)
 39320  		v.AddArg(mem)
 39321  		return true
 39322  	}
 39323  	// match: (SETNEmem [off] {sym} ptr x:(FlagLT_UGT) mem)
 39324  	// cond:
 39325  	// result: (MOVBstore [off] {sym} ptr (MOVLconst <x.Type> [1]) mem)
 39326  	for {
 39327  		off := v.AuxInt
 39328  		sym := v.Aux
 39329  		_ = v.Args[2]
 39330  		ptr := v.Args[0]
 39331  		x := v.Args[1]
 39332  		if x.Op != OpAMD64FlagLT_UGT {
 39333  			break
 39334  		}
 39335  		mem := v.Args[2]
 39336  		v.reset(OpAMD64MOVBstore)
 39337  		v.AuxInt = off
 39338  		v.Aux = sym
 39339  		v.AddArg(ptr)
 39340  		v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, x.Type)
 39341  		v0.AuxInt = 1
 39342  		v.AddArg(v0)
 39343  		v.AddArg(mem)
 39344  		return true
 39345  	}
 39346  	// match: (SETNEmem [off] {sym} ptr x:(FlagGT_ULT) mem)
 39347  	// cond:
 39348  	// result: (MOVBstore [off] {sym} ptr (MOVLconst <x.Type> [1]) mem)
 39349  	for {
 39350  		off := v.AuxInt
 39351  		sym := v.Aux
 39352  		_ = v.Args[2]
 39353  		ptr := v.Args[0]
 39354  		x := v.Args[1]
 39355  		if x.Op != OpAMD64FlagGT_ULT {
 39356  			break
 39357  		}
 39358  		mem := v.Args[2]
 39359  		v.reset(OpAMD64MOVBstore)
 39360  		v.AuxInt = off
 39361  		v.Aux = sym
 39362  		v.AddArg(ptr)
 39363  		v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, x.Type)
 39364  		v0.AuxInt = 1
 39365  		v.AddArg(v0)
 39366  		v.AddArg(mem)
 39367  		return true
 39368  	}
 39369  	// match: (SETNEmem [off] {sym} ptr x:(FlagGT_UGT) mem)
 39370  	// cond:
 39371  	// result: (MOVBstore [off] {sym} ptr (MOVLconst <x.Type> [1]) mem)
 39372  	for {
 39373  		off := v.AuxInt
 39374  		sym := v.Aux
 39375  		_ = v.Args[2]
 39376  		ptr := v.Args[0]
 39377  		x := v.Args[1]
 39378  		if x.Op != OpAMD64FlagGT_UGT {
 39379  			break
 39380  		}
 39381  		mem := v.Args[2]
 39382  		v.reset(OpAMD64MOVBstore)
 39383  		v.AuxInt = off
 39384  		v.Aux = sym
 39385  		v.AddArg(ptr)
 39386  		v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, x.Type)
 39387  		v0.AuxInt = 1
 39388  		v.AddArg(v0)
 39389  		v.AddArg(mem)
 39390  		return true
 39391  	}
 39392  	return false
 39393  }
 39394  func rewriteValueAMD64_OpAMD64SHLL_0(v *Value) bool {
 39395  	b := v.Block
 39396  	_ = b
 39397  	// match: (SHLL x (MOVQconst [c]))
 39398  	// cond:
 39399  	// result: (SHLLconst [c&31] x)
 39400  	for {
 39401  		_ = v.Args[1]
 39402  		x := v.Args[0]
 39403  		v_1 := v.Args[1]
 39404  		if v_1.Op != OpAMD64MOVQconst {
 39405  			break
 39406  		}
 39407  		c := v_1.AuxInt
 39408  		v.reset(OpAMD64SHLLconst)
 39409  		v.AuxInt = c & 31
 39410  		v.AddArg(x)
 39411  		return true
 39412  	}
 39413  	// match: (SHLL x (MOVLconst [c]))
 39414  	// cond:
 39415  	// result: (SHLLconst [c&31] x)
 39416  	for {
 39417  		_ = v.Args[1]
 39418  		x := v.Args[0]
 39419  		v_1 := v.Args[1]
 39420  		if v_1.Op != OpAMD64MOVLconst {
 39421  			break
 39422  		}
 39423  		c := v_1.AuxInt
 39424  		v.reset(OpAMD64SHLLconst)
 39425  		v.AuxInt = c & 31
 39426  		v.AddArg(x)
 39427  		return true
 39428  	}
 39429  	// match: (SHLL x (ADDQconst [c] y))
 39430  	// cond: c & 31 == 0
 39431  	// result: (SHLL x y)
 39432  	for {
 39433  		_ = v.Args[1]
 39434  		x := v.Args[0]
 39435  		v_1 := v.Args[1]
 39436  		if v_1.Op != OpAMD64ADDQconst {
 39437  			break
 39438  		}
 39439  		c := v_1.AuxInt
 39440  		y := v_1.Args[0]
 39441  		if !(c&31 == 0) {
 39442  			break
 39443  		}
 39444  		v.reset(OpAMD64SHLL)
 39445  		v.AddArg(x)
 39446  		v.AddArg(y)
 39447  		return true
 39448  	}
 39449  	// match: (SHLL x (NEGQ <t> (ADDQconst [c] y)))
 39450  	// cond: c & 31 == 0
 39451  	// result: (SHLL x (NEGQ <t> y))
 39452  	for {
 39453  		_ = v.Args[1]
 39454  		x := v.Args[0]
 39455  		v_1 := v.Args[1]
 39456  		if v_1.Op != OpAMD64NEGQ {
 39457  			break
 39458  		}
 39459  		t := v_1.Type
 39460  		v_1_0 := v_1.Args[0]
 39461  		if v_1_0.Op != OpAMD64ADDQconst {
 39462  			break
 39463  		}
 39464  		c := v_1_0.AuxInt
 39465  		y := v_1_0.Args[0]
 39466  		if !(c&31 == 0) {
 39467  			break
 39468  		}
 39469  		v.reset(OpAMD64SHLL)
 39470  		v.AddArg(x)
 39471  		v0 := b.NewValue0(v.Pos, OpAMD64NEGQ, t)
 39472  		v0.AddArg(y)
 39473  		v.AddArg(v0)
 39474  		return true
 39475  	}
 39476  	// match: (SHLL x (ANDQconst [c] y))
 39477  	// cond: c & 31 == 31
 39478  	// result: (SHLL x y)
 39479  	for {
 39480  		_ = v.Args[1]
 39481  		x := v.Args[0]
 39482  		v_1 := v.Args[1]
 39483  		if v_1.Op != OpAMD64ANDQconst {
 39484  			break
 39485  		}
 39486  		c := v_1.AuxInt
 39487  		y := v_1.Args[0]
 39488  		if !(c&31 == 31) {
 39489  			break
 39490  		}
 39491  		v.reset(OpAMD64SHLL)
 39492  		v.AddArg(x)
 39493  		v.AddArg(y)
 39494  		return true
 39495  	}
 39496  	// match: (SHLL x (NEGQ <t> (ANDQconst [c] y)))
 39497  	// cond: c & 31 == 31
 39498  	// result: (SHLL x (NEGQ <t> y))
 39499  	for {
 39500  		_ = v.Args[1]
 39501  		x := v.Args[0]
 39502  		v_1 := v.Args[1]
 39503  		if v_1.Op != OpAMD64NEGQ {
 39504  			break
 39505  		}
 39506  		t := v_1.Type
 39507  		v_1_0 := v_1.Args[0]
 39508  		if v_1_0.Op != OpAMD64ANDQconst {
 39509  			break
 39510  		}
 39511  		c := v_1_0.AuxInt
 39512  		y := v_1_0.Args[0]
 39513  		if !(c&31 == 31) {
 39514  			break
 39515  		}
 39516  		v.reset(OpAMD64SHLL)
 39517  		v.AddArg(x)
 39518  		v0 := b.NewValue0(v.Pos, OpAMD64NEGQ, t)
 39519  		v0.AddArg(y)
 39520  		v.AddArg(v0)
 39521  		return true
 39522  	}
 39523  	// match: (SHLL x (ADDLconst [c] y))
 39524  	// cond: c & 31 == 0
 39525  	// result: (SHLL x y)
 39526  	for {
 39527  		_ = v.Args[1]
 39528  		x := v.Args[0]
 39529  		v_1 := v.Args[1]
 39530  		if v_1.Op != OpAMD64ADDLconst {
 39531  			break
 39532  		}
 39533  		c := v_1.AuxInt
 39534  		y := v_1.Args[0]
 39535  		if !(c&31 == 0) {
 39536  			break
 39537  		}
 39538  		v.reset(OpAMD64SHLL)
 39539  		v.AddArg(x)
 39540  		v.AddArg(y)
 39541  		return true
 39542  	}
 39543  	// match: (SHLL x (NEGL <t> (ADDLconst [c] y)))
 39544  	// cond: c & 31 == 0
 39545  	// result: (SHLL x (NEGL <t> y))
 39546  	for {
 39547  		_ = v.Args[1]
 39548  		x := v.Args[0]
 39549  		v_1 := v.Args[1]
 39550  		if v_1.Op != OpAMD64NEGL {
 39551  			break
 39552  		}
 39553  		t := v_1.Type
 39554  		v_1_0 := v_1.Args[0]
 39555  		if v_1_0.Op != OpAMD64ADDLconst {
 39556  			break
 39557  		}
 39558  		c := v_1_0.AuxInt
 39559  		y := v_1_0.Args[0]
 39560  		if !(c&31 == 0) {
 39561  			break
 39562  		}
 39563  		v.reset(OpAMD64SHLL)
 39564  		v.AddArg(x)
 39565  		v0 := b.NewValue0(v.Pos, OpAMD64NEGL, t)
 39566  		v0.AddArg(y)
 39567  		v.AddArg(v0)
 39568  		return true
 39569  	}
 39570  	// match: (SHLL x (ANDLconst [c] y))
 39571  	// cond: c & 31 == 31
 39572  	// result: (SHLL x y)
 39573  	for {
 39574  		_ = v.Args[1]
 39575  		x := v.Args[0]
 39576  		v_1 := v.Args[1]
 39577  		if v_1.Op != OpAMD64ANDLconst {
 39578  			break
 39579  		}
 39580  		c := v_1.AuxInt
 39581  		y := v_1.Args[0]
 39582  		if !(c&31 == 31) {
 39583  			break
 39584  		}
 39585  		v.reset(OpAMD64SHLL)
 39586  		v.AddArg(x)
 39587  		v.AddArg(y)
 39588  		return true
 39589  	}
 39590  	// match: (SHLL x (NEGL <t> (ANDLconst [c] y)))
 39591  	// cond: c & 31 == 31
 39592  	// result: (SHLL x (NEGL <t> y))
 39593  	for {
 39594  		_ = v.Args[1]
 39595  		x := v.Args[0]
 39596  		v_1 := v.Args[1]
 39597  		if v_1.Op != OpAMD64NEGL {
 39598  			break
 39599  		}
 39600  		t := v_1.Type
 39601  		v_1_0 := v_1.Args[0]
 39602  		if v_1_0.Op != OpAMD64ANDLconst {
 39603  			break
 39604  		}
 39605  		c := v_1_0.AuxInt
 39606  		y := v_1_0.Args[0]
 39607  		if !(c&31 == 31) {
 39608  			break
 39609  		}
 39610  		v.reset(OpAMD64SHLL)
 39611  		v.AddArg(x)
 39612  		v0 := b.NewValue0(v.Pos, OpAMD64NEGL, t)
 39613  		v0.AddArg(y)
 39614  		v.AddArg(v0)
 39615  		return true
 39616  	}
 39617  	return false
 39618  }
 39619  func rewriteValueAMD64_OpAMD64SHLLconst_0(v *Value) bool {
 39620  	// match: (SHLLconst x [0])
 39621  	// cond:
 39622  	// result: x
 39623  	for {
 39624  		if v.AuxInt != 0 {
 39625  			break
 39626  		}
 39627  		x := v.Args[0]
 39628  		v.reset(OpCopy)
 39629  		v.Type = x.Type
 39630  		v.AddArg(x)
 39631  		return true
 39632  	}
 39633  	return false
 39634  }
 39635  func rewriteValueAMD64_OpAMD64SHLQ_0(v *Value) bool {
 39636  	b := v.Block
 39637  	_ = b
 39638  	// match: (SHLQ x (MOVQconst [c]))
 39639  	// cond:
 39640  	// result: (SHLQconst [c&63] x)
 39641  	for {
 39642  		_ = v.Args[1]
 39643  		x := v.Args[0]
 39644  		v_1 := v.Args[1]
 39645  		if v_1.Op != OpAMD64MOVQconst {
 39646  			break
 39647  		}
 39648  		c := v_1.AuxInt
 39649  		v.reset(OpAMD64SHLQconst)
 39650  		v.AuxInt = c & 63
 39651  		v.AddArg(x)
 39652  		return true
 39653  	}
 39654  	// match: (SHLQ x (MOVLconst [c]))
 39655  	// cond:
 39656  	// result: (SHLQconst [c&63] x)
 39657  	for {
 39658  		_ = v.Args[1]
 39659  		x := v.Args[0]
 39660  		v_1 := v.Args[1]
 39661  		if v_1.Op != OpAMD64MOVLconst {
 39662  			break
 39663  		}
 39664  		c := v_1.AuxInt
 39665  		v.reset(OpAMD64SHLQconst)
 39666  		v.AuxInt = c & 63
 39667  		v.AddArg(x)
 39668  		return true
 39669  	}
 39670  	// match: (SHLQ x (ADDQconst [c] y))
 39671  	// cond: c & 63 == 0
 39672  	// result: (SHLQ x y)
 39673  	for {
 39674  		_ = v.Args[1]
 39675  		x := v.Args[0]
 39676  		v_1 := v.Args[1]
 39677  		if v_1.Op != OpAMD64ADDQconst {
 39678  			break
 39679  		}
 39680  		c := v_1.AuxInt
 39681  		y := v_1.Args[0]
 39682  		if !(c&63 == 0) {
 39683  			break
 39684  		}
 39685  		v.reset(OpAMD64SHLQ)
 39686  		v.AddArg(x)
 39687  		v.AddArg(y)
 39688  		return true
 39689  	}
 39690  	// match: (SHLQ x (NEGQ <t> (ADDQconst [c] y)))
 39691  	// cond: c & 63 == 0
 39692  	// result: (SHLQ x (NEGQ <t> y))
 39693  	for {
 39694  		_ = v.Args[1]
 39695  		x := v.Args[0]
 39696  		v_1 := v.Args[1]
 39697  		if v_1.Op != OpAMD64NEGQ {
 39698  			break
 39699  		}
 39700  		t := v_1.Type
 39701  		v_1_0 := v_1.Args[0]
 39702  		if v_1_0.Op != OpAMD64ADDQconst {
 39703  			break
 39704  		}
 39705  		c := v_1_0.AuxInt
 39706  		y := v_1_0.Args[0]
 39707  		if !(c&63 == 0) {
 39708  			break
 39709  		}
 39710  		v.reset(OpAMD64SHLQ)
 39711  		v.AddArg(x)
 39712  		v0 := b.NewValue0(v.Pos, OpAMD64NEGQ, t)
 39713  		v0.AddArg(y)
 39714  		v.AddArg(v0)
 39715  		return true
 39716  	}
 39717  	// match: (SHLQ x (ANDQconst [c] y))
 39718  	// cond: c & 63 == 63
 39719  	// result: (SHLQ x y)
 39720  	for {
 39721  		_ = v.Args[1]
 39722  		x := v.Args[0]
 39723  		v_1 := v.Args[1]
 39724  		if v_1.Op != OpAMD64ANDQconst {
 39725  			break
 39726  		}
 39727  		c := v_1.AuxInt
 39728  		y := v_1.Args[0]
 39729  		if !(c&63 == 63) {
 39730  			break
 39731  		}
 39732  		v.reset(OpAMD64SHLQ)
 39733  		v.AddArg(x)
 39734  		v.AddArg(y)
 39735  		return true
 39736  	}
 39737  	// match: (SHLQ x (NEGQ <t> (ANDQconst [c] y)))
 39738  	// cond: c & 63 == 63
 39739  	// result: (SHLQ x (NEGQ <t> y))
 39740  	for {
 39741  		_ = v.Args[1]
 39742  		x := v.Args[0]
 39743  		v_1 := v.Args[1]
 39744  		if v_1.Op != OpAMD64NEGQ {
 39745  			break
 39746  		}
 39747  		t := v_1.Type
 39748  		v_1_0 := v_1.Args[0]
 39749  		if v_1_0.Op != OpAMD64ANDQconst {
 39750  			break
 39751  		}
 39752  		c := v_1_0.AuxInt
 39753  		y := v_1_0.Args[0]
 39754  		if !(c&63 == 63) {
 39755  			break
 39756  		}
 39757  		v.reset(OpAMD64SHLQ)
 39758  		v.AddArg(x)
 39759  		v0 := b.NewValue0(v.Pos, OpAMD64NEGQ, t)
 39760  		v0.AddArg(y)
 39761  		v.AddArg(v0)
 39762  		return true
 39763  	}
 39764  	// match: (SHLQ x (ADDLconst [c] y))
 39765  	// cond: c & 63 == 0
 39766  	// result: (SHLQ x y)
 39767  	for {
 39768  		_ = v.Args[1]
 39769  		x := v.Args[0]
 39770  		v_1 := v.Args[1]
 39771  		if v_1.Op != OpAMD64ADDLconst {
 39772  			break
 39773  		}
 39774  		c := v_1.AuxInt
 39775  		y := v_1.Args[0]
 39776  		if !(c&63 == 0) {
 39777  			break
 39778  		}
 39779  		v.reset(OpAMD64SHLQ)
 39780  		v.AddArg(x)
 39781  		v.AddArg(y)
 39782  		return true
 39783  	}
 39784  	// match: (SHLQ x (NEGL <t> (ADDLconst [c] y)))
 39785  	// cond: c & 63 == 0
 39786  	// result: (SHLQ x (NEGL <t> y))
 39787  	for {
 39788  		_ = v.Args[1]
 39789  		x := v.Args[0]
 39790  		v_1 := v.Args[1]
 39791  		if v_1.Op != OpAMD64NEGL {
 39792  			break
 39793  		}
 39794  		t := v_1.Type
 39795  		v_1_0 := v_1.Args[0]
 39796  		if v_1_0.Op != OpAMD64ADDLconst {
 39797  			break
 39798  		}
 39799  		c := v_1_0.AuxInt
 39800  		y := v_1_0.Args[0]
 39801  		if !(c&63 == 0) {
 39802  			break
 39803  		}
 39804  		v.reset(OpAMD64SHLQ)
 39805  		v.AddArg(x)
 39806  		v0 := b.NewValue0(v.Pos, OpAMD64NEGL, t)
 39807  		v0.AddArg(y)
 39808  		v.AddArg(v0)
 39809  		return true
 39810  	}
 39811  	// match: (SHLQ x (ANDLconst [c] y))
 39812  	// cond: c & 63 == 63
 39813  	// result: (SHLQ x y)
 39814  	for {
 39815  		_ = v.Args[1]
 39816  		x := v.Args[0]
 39817  		v_1 := v.Args[1]
 39818  		if v_1.Op != OpAMD64ANDLconst {
 39819  			break
 39820  		}
 39821  		c := v_1.AuxInt
 39822  		y := v_1.Args[0]
 39823  		if !(c&63 == 63) {
 39824  			break
 39825  		}
 39826  		v.reset(OpAMD64SHLQ)
 39827  		v.AddArg(x)
 39828  		v.AddArg(y)
 39829  		return true
 39830  	}
 39831  	// match: (SHLQ x (NEGL <t> (ANDLconst [c] y)))
 39832  	// cond: c & 63 == 63
 39833  	// result: (SHLQ x (NEGL <t> y))
 39834  	for {
 39835  		_ = v.Args[1]
 39836  		x := v.Args[0]
 39837  		v_1 := v.Args[1]
 39838  		if v_1.Op != OpAMD64NEGL {
 39839  			break
 39840  		}
 39841  		t := v_1.Type
 39842  		v_1_0 := v_1.Args[0]
 39843  		if v_1_0.Op != OpAMD64ANDLconst {
 39844  			break
 39845  		}
 39846  		c := v_1_0.AuxInt
 39847  		y := v_1_0.Args[0]
 39848  		if !(c&63 == 63) {
 39849  			break
 39850  		}
 39851  		v.reset(OpAMD64SHLQ)
 39852  		v.AddArg(x)
 39853  		v0 := b.NewValue0(v.Pos, OpAMD64NEGL, t)
 39854  		v0.AddArg(y)
 39855  		v.AddArg(v0)
 39856  		return true
 39857  	}
 39858  	return false
 39859  }
 39860  func rewriteValueAMD64_OpAMD64SHLQconst_0(v *Value) bool {
 39861  	// match: (SHLQconst x [0])
 39862  	// cond:
 39863  	// result: x
 39864  	for {
 39865  		if v.AuxInt != 0 {
 39866  			break
 39867  		}
 39868  		x := v.Args[0]
 39869  		v.reset(OpCopy)
 39870  		v.Type = x.Type
 39871  		v.AddArg(x)
 39872  		return true
 39873  	}
 39874  	return false
 39875  }
 39876  func rewriteValueAMD64_OpAMD64SHRB_0(v *Value) bool {
 39877  	// match: (SHRB x (MOVQconst [c]))
 39878  	// cond: c&31 < 8
 39879  	// result: (SHRBconst [c&31] x)
 39880  	for {
 39881  		_ = v.Args[1]
 39882  		x := v.Args[0]
 39883  		v_1 := v.Args[1]
 39884  		if v_1.Op != OpAMD64MOVQconst {
 39885  			break
 39886  		}
 39887  		c := v_1.AuxInt
 39888  		if !(c&31 < 8) {
 39889  			break
 39890  		}
 39891  		v.reset(OpAMD64SHRBconst)
 39892  		v.AuxInt = c & 31
 39893  		v.AddArg(x)
 39894  		return true
 39895  	}
 39896  	// match: (SHRB x (MOVLconst [c]))
 39897  	// cond: c&31 < 8
 39898  	// result: (SHRBconst [c&31] x)
 39899  	for {
 39900  		_ = v.Args[1]
 39901  		x := v.Args[0]
 39902  		v_1 := v.Args[1]
 39903  		if v_1.Op != OpAMD64MOVLconst {
 39904  			break
 39905  		}
 39906  		c := v_1.AuxInt
 39907  		if !(c&31 < 8) {
 39908  			break
 39909  		}
 39910  		v.reset(OpAMD64SHRBconst)
 39911  		v.AuxInt = c & 31
 39912  		v.AddArg(x)
 39913  		return true
 39914  	}
 39915  	// match: (SHRB _ (MOVQconst [c]))
 39916  	// cond: c&31 >= 8
 39917  	// result: (MOVLconst [0])
 39918  	for {
 39919  		_ = v.Args[1]
 39920  		v_1 := v.Args[1]
 39921  		if v_1.Op != OpAMD64MOVQconst {
 39922  			break
 39923  		}
 39924  		c := v_1.AuxInt
 39925  		if !(c&31 >= 8) {
 39926  			break
 39927  		}
 39928  		v.reset(OpAMD64MOVLconst)
 39929  		v.AuxInt = 0
 39930  		return true
 39931  	}
 39932  	// match: (SHRB _ (MOVLconst [c]))
 39933  	// cond: c&31 >= 8
 39934  	// result: (MOVLconst [0])
 39935  	for {
 39936  		_ = v.Args[1]
 39937  		v_1 := v.Args[1]
 39938  		if v_1.Op != OpAMD64MOVLconst {
 39939  			break
 39940  		}
 39941  		c := v_1.AuxInt
 39942  		if !(c&31 >= 8) {
 39943  			break
 39944  		}
 39945  		v.reset(OpAMD64MOVLconst)
 39946  		v.AuxInt = 0
 39947  		return true
 39948  	}
 39949  	return false
 39950  }
 39951  func rewriteValueAMD64_OpAMD64SHRBconst_0(v *Value) bool {
 39952  	// match: (SHRBconst x [0])
 39953  	// cond:
 39954  	// result: x
 39955  	for {
 39956  		if v.AuxInt != 0 {
 39957  			break
 39958  		}
 39959  		x := v.Args[0]
 39960  		v.reset(OpCopy)
 39961  		v.Type = x.Type
 39962  		v.AddArg(x)
 39963  		return true
 39964  	}
 39965  	return false
 39966  }
 39967  func rewriteValueAMD64_OpAMD64SHRL_0(v *Value) bool {
 39968  	b := v.Block
 39969  	_ = b
 39970  	// match: (SHRL x (MOVQconst [c]))
 39971  	// cond:
 39972  	// result: (SHRLconst [c&31] x)
 39973  	for {
 39974  		_ = v.Args[1]
 39975  		x := v.Args[0]
 39976  		v_1 := v.Args[1]
 39977  		if v_1.Op != OpAMD64MOVQconst {
 39978  			break
 39979  		}
 39980  		c := v_1.AuxInt
 39981  		v.reset(OpAMD64SHRLconst)
 39982  		v.AuxInt = c & 31
 39983  		v.AddArg(x)
 39984  		return true
 39985  	}
 39986  	// match: (SHRL x (MOVLconst [c]))
 39987  	// cond:
 39988  	// result: (SHRLconst [c&31] x)
 39989  	for {
 39990  		_ = v.Args[1]
 39991  		x := v.Args[0]
 39992  		v_1 := v.Args[1]
 39993  		if v_1.Op != OpAMD64MOVLconst {
 39994  			break
 39995  		}
 39996  		c := v_1.AuxInt
 39997  		v.reset(OpAMD64SHRLconst)
 39998  		v.AuxInt = c & 31
 39999  		v.AddArg(x)
 40000  		return true
 40001  	}
 40002  	// match: (SHRL x (ADDQconst [c] y))
 40003  	// cond: c & 31 == 0
 40004  	// result: (SHRL x y)
 40005  	for {
 40006  		_ = v.Args[1]
 40007  		x := v.Args[0]
 40008  		v_1 := v.Args[1]
 40009  		if v_1.Op != OpAMD64ADDQconst {
 40010  			break
 40011  		}
 40012  		c := v_1.AuxInt
 40013  		y := v_1.Args[0]
 40014  		if !(c&31 == 0) {
 40015  			break
 40016  		}
 40017  		v.reset(OpAMD64SHRL)
 40018  		v.AddArg(x)
 40019  		v.AddArg(y)
 40020  		return true
 40021  	}
 40022  	// match: (SHRL x (NEGQ <t> (ADDQconst [c] y)))
 40023  	// cond: c & 31 == 0
 40024  	// result: (SHRL x (NEGQ <t> y))
 40025  	for {
 40026  		_ = v.Args[1]
 40027  		x := v.Args[0]
 40028  		v_1 := v.Args[1]
 40029  		if v_1.Op != OpAMD64NEGQ {
 40030  			break
 40031  		}
 40032  		t := v_1.Type
 40033  		v_1_0 := v_1.Args[0]
 40034  		if v_1_0.Op != OpAMD64ADDQconst {
 40035  			break
 40036  		}
 40037  		c := v_1_0.AuxInt
 40038  		y := v_1_0.Args[0]
 40039  		if !(c&31 == 0) {
 40040  			break
 40041  		}
 40042  		v.reset(OpAMD64SHRL)
 40043  		v.AddArg(x)
 40044  		v0 := b.NewValue0(v.Pos, OpAMD64NEGQ, t)
 40045  		v0.AddArg(y)
 40046  		v.AddArg(v0)
 40047  		return true
 40048  	}
 40049  	// match: (SHRL x (ANDQconst [c] y))
 40050  	// cond: c & 31 == 31
 40051  	// result: (SHRL x y)
 40052  	for {
 40053  		_ = v.Args[1]
 40054  		x := v.Args[0]
 40055  		v_1 := v.Args[1]
 40056  		if v_1.Op != OpAMD64ANDQconst {
 40057  			break
 40058  		}
 40059  		c := v_1.AuxInt
 40060  		y := v_1.Args[0]
 40061  		if !(c&31 == 31) {
 40062  			break
 40063  		}
 40064  		v.reset(OpAMD64SHRL)
 40065  		v.AddArg(x)
 40066  		v.AddArg(y)
 40067  		return true
 40068  	}
 40069  	// match: (SHRL x (NEGQ <t> (ANDQconst [c] y)))
 40070  	// cond: c & 31 == 31
 40071  	// result: (SHRL x (NEGQ <t> y))
 40072  	for {
 40073  		_ = v.Args[1]
 40074  		x := v.Args[0]
 40075  		v_1 := v.Args[1]
 40076  		if v_1.Op != OpAMD64NEGQ {
 40077  			break
 40078  		}
 40079  		t := v_1.Type
 40080  		v_1_0 := v_1.Args[0]
 40081  		if v_1_0.Op != OpAMD64ANDQconst {
 40082  			break
 40083  		}
 40084  		c := v_1_0.AuxInt
 40085  		y := v_1_0.Args[0]
 40086  		if !(c&31 == 31) {
 40087  			break
 40088  		}
 40089  		v.reset(OpAMD64SHRL)
 40090  		v.AddArg(x)
 40091  		v0 := b.NewValue0(v.Pos, OpAMD64NEGQ, t)
 40092  		v0.AddArg(y)
 40093  		v.AddArg(v0)
 40094  		return true
 40095  	}
 40096  	// match: (SHRL x (ADDLconst [c] y))
 40097  	// cond: c & 31 == 0
 40098  	// result: (SHRL x y)
 40099  	for {
 40100  		_ = v.Args[1]
 40101  		x := v.Args[0]
 40102  		v_1 := v.Args[1]
 40103  		if v_1.Op != OpAMD64ADDLconst {
 40104  			break
 40105  		}
 40106  		c := v_1.AuxInt
 40107  		y := v_1.Args[0]
 40108  		if !(c&31 == 0) {
 40109  			break
 40110  		}
 40111  		v.reset(OpAMD64SHRL)
 40112  		v.AddArg(x)
 40113  		v.AddArg(y)
 40114  		return true
 40115  	}
 40116  	// match: (SHRL x (NEGL <t> (ADDLconst [c] y)))
 40117  	// cond: c & 31 == 0
 40118  	// result: (SHRL x (NEGL <t> y))
 40119  	for {
 40120  		_ = v.Args[1]
 40121  		x := v.Args[0]
 40122  		v_1 := v.Args[1]
 40123  		if v_1.Op != OpAMD64NEGL {
 40124  			break
 40125  		}
 40126  		t := v_1.Type
 40127  		v_1_0 := v_1.Args[0]
 40128  		if v_1_0.Op != OpAMD64ADDLconst {
 40129  			break
 40130  		}
 40131  		c := v_1_0.AuxInt
 40132  		y := v_1_0.Args[0]
 40133  		if !(c&31 == 0) {
 40134  			break
 40135  		}
 40136  		v.reset(OpAMD64SHRL)
 40137  		v.AddArg(x)
 40138  		v0 := b.NewValue0(v.Pos, OpAMD64NEGL, t)
 40139  		v0.AddArg(y)
 40140  		v.AddArg(v0)
 40141  		return true
 40142  	}
 40143  	// match: (SHRL x (ANDLconst [c] y))
 40144  	// cond: c & 31 == 31
 40145  	// result: (SHRL x y)
 40146  	for {
 40147  		_ = v.Args[1]
 40148  		x := v.Args[0]
 40149  		v_1 := v.Args[1]
 40150  		if v_1.Op != OpAMD64ANDLconst {
 40151  			break
 40152  		}
 40153  		c := v_1.AuxInt
 40154  		y := v_1.Args[0]
 40155  		if !(c&31 == 31) {
 40156  			break
 40157  		}
 40158  		v.reset(OpAMD64SHRL)
 40159  		v.AddArg(x)
 40160  		v.AddArg(y)
 40161  		return true
 40162  	}
 40163  	// match: (SHRL x (NEGL <t> (ANDLconst [c] y)))
 40164  	// cond: c & 31 == 31
 40165  	// result: (SHRL x (NEGL <t> y))
 40166  	for {
 40167  		_ = v.Args[1]
 40168  		x := v.Args[0]
 40169  		v_1 := v.Args[1]
 40170  		if v_1.Op != OpAMD64NEGL {
 40171  			break
 40172  		}
 40173  		t := v_1.Type
 40174  		v_1_0 := v_1.Args[0]
 40175  		if v_1_0.Op != OpAMD64ANDLconst {
 40176  			break
 40177  		}
 40178  		c := v_1_0.AuxInt
 40179  		y := v_1_0.Args[0]
 40180  		if !(c&31 == 31) {
 40181  			break
 40182  		}
 40183  		v.reset(OpAMD64SHRL)
 40184  		v.AddArg(x)
 40185  		v0 := b.NewValue0(v.Pos, OpAMD64NEGL, t)
 40186  		v0.AddArg(y)
 40187  		v.AddArg(v0)
 40188  		return true
 40189  	}
 40190  	return false
 40191  }
 40192  func rewriteValueAMD64_OpAMD64SHRLconst_0(v *Value) bool {
 40193  	// match: (SHRLconst x [0])
 40194  	// cond:
 40195  	// result: x
 40196  	for {
 40197  		if v.AuxInt != 0 {
 40198  			break
 40199  		}
 40200  		x := v.Args[0]
 40201  		v.reset(OpCopy)
 40202  		v.Type = x.Type
 40203  		v.AddArg(x)
 40204  		return true
 40205  	}
 40206  	return false
 40207  }
 40208  func rewriteValueAMD64_OpAMD64SHRQ_0(v *Value) bool {
 40209  	b := v.Block
 40210  	_ = b
 40211  	// match: (SHRQ x (MOVQconst [c]))
 40212  	// cond:
 40213  	// result: (SHRQconst [c&63] x)
 40214  	for {
 40215  		_ = v.Args[1]
 40216  		x := v.Args[0]
 40217  		v_1 := v.Args[1]
 40218  		if v_1.Op != OpAMD64MOVQconst {
 40219  			break
 40220  		}
 40221  		c := v_1.AuxInt
 40222  		v.reset(OpAMD64SHRQconst)
 40223  		v.AuxInt = c & 63
 40224  		v.AddArg(x)
 40225  		return true
 40226  	}
 40227  	// match: (SHRQ x (MOVLconst [c]))
 40228  	// cond:
 40229  	// result: (SHRQconst [c&63] x)
 40230  	for {
 40231  		_ = v.Args[1]
 40232  		x := v.Args[0]
 40233  		v_1 := v.Args[1]
 40234  		if v_1.Op != OpAMD64MOVLconst {
 40235  			break
 40236  		}
 40237  		c := v_1.AuxInt
 40238  		v.reset(OpAMD64SHRQconst)
 40239  		v.AuxInt = c & 63
 40240  		v.AddArg(x)
 40241  		return true
 40242  	}
 40243  	// match: (SHRQ x (ADDQconst [c] y))
 40244  	// cond: c & 63 == 0
 40245  	// result: (SHRQ x y)
 40246  	for {
 40247  		_ = v.Args[1]
 40248  		x := v.Args[0]
 40249  		v_1 := v.Args[1]
 40250  		if v_1.Op != OpAMD64ADDQconst {
 40251  			break
 40252  		}
 40253  		c := v_1.AuxInt
 40254  		y := v_1.Args[0]
 40255  		if !(c&63 == 0) {
 40256  			break
 40257  		}
 40258  		v.reset(OpAMD64SHRQ)
 40259  		v.AddArg(x)
 40260  		v.AddArg(y)
 40261  		return true
 40262  	}
 40263  	// match: (SHRQ x (NEGQ <t> (ADDQconst [c] y)))
 40264  	// cond: c & 63 == 0
 40265  	// result: (SHRQ x (NEGQ <t> y))
 40266  	for {
 40267  		_ = v.Args[1]
 40268  		x := v.Args[0]
 40269  		v_1 := v.Args[1]
 40270  		if v_1.Op != OpAMD64NEGQ {
 40271  			break
 40272  		}
 40273  		t := v_1.Type
 40274  		v_1_0 := v_1.Args[0]
 40275  		if v_1_0.Op != OpAMD64ADDQconst {
 40276  			break
 40277  		}
 40278  		c := v_1_0.AuxInt
 40279  		y := v_1_0.Args[0]
 40280  		if !(c&63 == 0) {
 40281  			break
 40282  		}
 40283  		v.reset(OpAMD64SHRQ)
 40284  		v.AddArg(x)
 40285  		v0 := b.NewValue0(v.Pos, OpAMD64NEGQ, t)
 40286  		v0.AddArg(y)
 40287  		v.AddArg(v0)
 40288  		return true
 40289  	}
 40290  	// match: (SHRQ x (ANDQconst [c] y))
 40291  	// cond: c & 63 == 63
 40292  	// result: (SHRQ x y)
 40293  	for {
 40294  		_ = v.Args[1]
 40295  		x := v.Args[0]
 40296  		v_1 := v.Args[1]
 40297  		if v_1.Op != OpAMD64ANDQconst {
 40298  			break
 40299  		}
 40300  		c := v_1.AuxInt
 40301  		y := v_1.Args[0]
 40302  		if !(c&63 == 63) {
 40303  			break
 40304  		}
 40305  		v.reset(OpAMD64SHRQ)
 40306  		v.AddArg(x)
 40307  		v.AddArg(y)
 40308  		return true
 40309  	}
 40310  	// match: (SHRQ x (NEGQ <t> (ANDQconst [c] y)))
 40311  	// cond: c & 63 == 63
 40312  	// result: (SHRQ x (NEGQ <t> y))
 40313  	for {
 40314  		_ = v.Args[1]
 40315  		x := v.Args[0]
 40316  		v_1 := v.Args[1]
 40317  		if v_1.Op != OpAMD64NEGQ {
 40318  			break
 40319  		}
 40320  		t := v_1.Type
 40321  		v_1_0 := v_1.Args[0]
 40322  		if v_1_0.Op != OpAMD64ANDQconst {
 40323  			break
 40324  		}
 40325  		c := v_1_0.AuxInt
 40326  		y := v_1_0.Args[0]
 40327  		if !(c&63 == 63) {
 40328  			break
 40329  		}
 40330  		v.reset(OpAMD64SHRQ)
 40331  		v.AddArg(x)
 40332  		v0 := b.NewValue0(v.Pos, OpAMD64NEGQ, t)
 40333  		v0.AddArg(y)
 40334  		v.AddArg(v0)
 40335  		return true
 40336  	}
 40337  	// match: (SHRQ x (ADDLconst [c] y))
 40338  	// cond: c & 63 == 0
 40339  	// result: (SHRQ x y)
 40340  	for {
 40341  		_ = v.Args[1]
 40342  		x := v.Args[0]
 40343  		v_1 := v.Args[1]
 40344  		if v_1.Op != OpAMD64ADDLconst {
 40345  			break
 40346  		}
 40347  		c := v_1.AuxInt
 40348  		y := v_1.Args[0]
 40349  		if !(c&63 == 0) {
 40350  			break
 40351  		}
 40352  		v.reset(OpAMD64SHRQ)
 40353  		v.AddArg(x)
 40354  		v.AddArg(y)
 40355  		return true
 40356  	}
 40357  	// match: (SHRQ x (NEGL <t> (ADDLconst [c] y)))
 40358  	// cond: c & 63 == 0
 40359  	// result: (SHRQ x (NEGL <t> y))
 40360  	for {
 40361  		_ = v.Args[1]
 40362  		x := v.Args[0]
 40363  		v_1 := v.Args[1]
 40364  		if v_1.Op != OpAMD64NEGL {
 40365  			break
 40366  		}
 40367  		t := v_1.Type
 40368  		v_1_0 := v_1.Args[0]
 40369  		if v_1_0.Op != OpAMD64ADDLconst {
 40370  			break
 40371  		}
 40372  		c := v_1_0.AuxInt
 40373  		y := v_1_0.Args[0]
 40374  		if !(c&63 == 0) {
 40375  			break
 40376  		}
 40377  		v.reset(OpAMD64SHRQ)
 40378  		v.AddArg(x)
 40379  		v0 := b.NewValue0(v.Pos, OpAMD64NEGL, t)
 40380  		v0.AddArg(y)
 40381  		v.AddArg(v0)
 40382  		return true
 40383  	}
 40384  	// match: (SHRQ x (ANDLconst [c] y))
 40385  	// cond: c & 63 == 63
 40386  	// result: (SHRQ x y)
 40387  	for {
 40388  		_ = v.Args[1]
 40389  		x := v.Args[0]
 40390  		v_1 := v.Args[1]
 40391  		if v_1.Op != OpAMD64ANDLconst {
 40392  			break
 40393  		}
 40394  		c := v_1.AuxInt
 40395  		y := v_1.Args[0]
 40396  		if !(c&63 == 63) {
 40397  			break
 40398  		}
 40399  		v.reset(OpAMD64SHRQ)
 40400  		v.AddArg(x)
 40401  		v.AddArg(y)
 40402  		return true
 40403  	}
 40404  	// match: (SHRQ x (NEGL <t> (ANDLconst [c] y)))
 40405  	// cond: c & 63 == 63
 40406  	// result: (SHRQ x (NEGL <t> y))
 40407  	for {
 40408  		_ = v.Args[1]
 40409  		x := v.Args[0]
 40410  		v_1 := v.Args[1]
 40411  		if v_1.Op != OpAMD64NEGL {
 40412  			break
 40413  		}
 40414  		t := v_1.Type
 40415  		v_1_0 := v_1.Args[0]
 40416  		if v_1_0.Op != OpAMD64ANDLconst {
 40417  			break
 40418  		}
 40419  		c := v_1_0.AuxInt
 40420  		y := v_1_0.Args[0]
 40421  		if !(c&63 == 63) {
 40422  			break
 40423  		}
 40424  		v.reset(OpAMD64SHRQ)
 40425  		v.AddArg(x)
 40426  		v0 := b.NewValue0(v.Pos, OpAMD64NEGL, t)
 40427  		v0.AddArg(y)
 40428  		v.AddArg(v0)
 40429  		return true
 40430  	}
 40431  	return false
 40432  }
 40433  func rewriteValueAMD64_OpAMD64SHRQconst_0(v *Value) bool {
 40434  	// match: (SHRQconst x [0])
 40435  	// cond:
 40436  	// result: x
 40437  	for {
 40438  		if v.AuxInt != 0 {
 40439  			break
 40440  		}
 40441  		x := v.Args[0]
 40442  		v.reset(OpCopy)
 40443  		v.Type = x.Type
 40444  		v.AddArg(x)
 40445  		return true
 40446  	}
 40447  	return false
 40448  }
 40449  func rewriteValueAMD64_OpAMD64SHRW_0(v *Value) bool {
 40450  	// match: (SHRW x (MOVQconst [c]))
 40451  	// cond: c&31 < 16
 40452  	// result: (SHRWconst [c&31] x)
 40453  	for {
 40454  		_ = v.Args[1]
 40455  		x := v.Args[0]
 40456  		v_1 := v.Args[1]
 40457  		if v_1.Op != OpAMD64MOVQconst {
 40458  			break
 40459  		}
 40460  		c := v_1.AuxInt
 40461  		if !(c&31 < 16) {
 40462  			break
 40463  		}
 40464  		v.reset(OpAMD64SHRWconst)
 40465  		v.AuxInt = c & 31
 40466  		v.AddArg(x)
 40467  		return true
 40468  	}
 40469  	// match: (SHRW x (MOVLconst [c]))
 40470  	// cond: c&31 < 16
 40471  	// result: (SHRWconst [c&31] x)
 40472  	for {
 40473  		_ = v.Args[1]
 40474  		x := v.Args[0]
 40475  		v_1 := v.Args[1]
 40476  		if v_1.Op != OpAMD64MOVLconst {
 40477  			break
 40478  		}
 40479  		c := v_1.AuxInt
 40480  		if !(c&31 < 16) {
 40481  			break
 40482  		}
 40483  		v.reset(OpAMD64SHRWconst)
 40484  		v.AuxInt = c & 31
 40485  		v.AddArg(x)
 40486  		return true
 40487  	}
 40488  	// match: (SHRW _ (MOVQconst [c]))
 40489  	// cond: c&31 >= 16
 40490  	// result: (MOVLconst [0])
 40491  	for {
 40492  		_ = v.Args[1]
 40493  		v_1 := v.Args[1]
 40494  		if v_1.Op != OpAMD64MOVQconst {
 40495  			break
 40496  		}
 40497  		c := v_1.AuxInt
 40498  		if !(c&31 >= 16) {
 40499  			break
 40500  		}
 40501  		v.reset(OpAMD64MOVLconst)
 40502  		v.AuxInt = 0
 40503  		return true
 40504  	}
 40505  	// match: (SHRW _ (MOVLconst [c]))
 40506  	// cond: c&31 >= 16
 40507  	// result: (MOVLconst [0])
 40508  	for {
 40509  		_ = v.Args[1]
 40510  		v_1 := v.Args[1]
 40511  		if v_1.Op != OpAMD64MOVLconst {
 40512  			break
 40513  		}
 40514  		c := v_1.AuxInt
 40515  		if !(c&31 >= 16) {
 40516  			break
 40517  		}
 40518  		v.reset(OpAMD64MOVLconst)
 40519  		v.AuxInt = 0
 40520  		return true
 40521  	}
 40522  	return false
 40523  }
 40524  func rewriteValueAMD64_OpAMD64SHRWconst_0(v *Value) bool {
 40525  	// match: (SHRWconst x [0])
 40526  	// cond:
 40527  	// result: x
 40528  	for {
 40529  		if v.AuxInt != 0 {
 40530  			break
 40531  		}
 40532  		x := v.Args[0]
 40533  		v.reset(OpCopy)
 40534  		v.Type = x.Type
 40535  		v.AddArg(x)
 40536  		return true
 40537  	}
 40538  	return false
 40539  }
 40540  func rewriteValueAMD64_OpAMD64SUBL_0(v *Value) bool {
 40541  	b := v.Block
 40542  	_ = b
 40543  	// match: (SUBL x (MOVLconst [c]))
 40544  	// cond:
 40545  	// result: (SUBLconst x [c])
 40546  	for {
 40547  		_ = v.Args[1]
 40548  		x := v.Args[0]
 40549  		v_1 := v.Args[1]
 40550  		if v_1.Op != OpAMD64MOVLconst {
 40551  			break
 40552  		}
 40553  		c := v_1.AuxInt
 40554  		v.reset(OpAMD64SUBLconst)
 40555  		v.AuxInt = c
 40556  		v.AddArg(x)
 40557  		return true
 40558  	}
 40559  	// match: (SUBL (MOVLconst [c]) x)
 40560  	// cond:
 40561  	// result: (NEGL (SUBLconst <v.Type> x [c]))
 40562  	for {
 40563  		_ = v.Args[1]
 40564  		v_0 := v.Args[0]
 40565  		if v_0.Op != OpAMD64MOVLconst {
 40566  			break
 40567  		}
 40568  		c := v_0.AuxInt
 40569  		x := v.Args[1]
 40570  		v.reset(OpAMD64NEGL)
 40571  		v0 := b.NewValue0(v.Pos, OpAMD64SUBLconst, v.Type)
 40572  		v0.AuxInt = c
 40573  		v0.AddArg(x)
 40574  		v.AddArg(v0)
 40575  		return true
 40576  	}
 40577  	// match: (SUBL x x)
 40578  	// cond:
 40579  	// result: (MOVLconst [0])
 40580  	for {
 40581  		_ = v.Args[1]
 40582  		x := v.Args[0]
 40583  		if x != v.Args[1] {
 40584  			break
 40585  		}
 40586  		v.reset(OpAMD64MOVLconst)
 40587  		v.AuxInt = 0
 40588  		return true
 40589  	}
 40590  	// match: (SUBL x l:(MOVLload [off] {sym} ptr mem))
 40591  	// cond: canMergeLoad(v, l, x) && clobber(l)
 40592  	// result: (SUBLmem x [off] {sym} ptr mem)
 40593  	for {
 40594  		_ = v.Args[1]
 40595  		x := v.Args[0]
 40596  		l := v.Args[1]
 40597  		if l.Op != OpAMD64MOVLload {
 40598  			break
 40599  		}
 40600  		off := l.AuxInt
 40601  		sym := l.Aux
 40602  		_ = l.Args[1]
 40603  		ptr := l.Args[0]
 40604  		mem := l.Args[1]
 40605  		if !(canMergeLoad(v, l, x) && clobber(l)) {
 40606  			break
 40607  		}
 40608  		v.reset(OpAMD64SUBLmem)
 40609  		v.AuxInt = off
 40610  		v.Aux = sym
 40611  		v.AddArg(x)
 40612  		v.AddArg(ptr)
 40613  		v.AddArg(mem)
 40614  		return true
 40615  	}
 40616  	return false
 40617  }
 40618  func rewriteValueAMD64_OpAMD64SUBLconst_0(v *Value) bool {
 40619  	// match: (SUBLconst [c] x)
 40620  	// cond: int32(c) == 0
 40621  	// result: x
 40622  	for {
 40623  		c := v.AuxInt
 40624  		x := v.Args[0]
 40625  		if !(int32(c) == 0) {
 40626  			break
 40627  		}
 40628  		v.reset(OpCopy)
 40629  		v.Type = x.Type
 40630  		v.AddArg(x)
 40631  		return true
 40632  	}
 40633  	// match: (SUBLconst [c] x)
 40634  	// cond:
 40635  	// result: (ADDLconst [int64(int32(-c))] x)
 40636  	for {
 40637  		c := v.AuxInt
 40638  		x := v.Args[0]
 40639  		v.reset(OpAMD64ADDLconst)
 40640  		v.AuxInt = int64(int32(-c))
 40641  		v.AddArg(x)
 40642  		return true
 40643  	}
 40644  }
 40645  func rewriteValueAMD64_OpAMD64SUBLmem_0(v *Value) bool {
 40646  	b := v.Block
 40647  	_ = b
 40648  	typ := &b.Func.Config.Types
 40649  	_ = typ
 40650  	// match: (SUBLmem x [off] {sym} ptr (MOVSSstore [off] {sym} ptr y _))
 40651  	// cond:
 40652  	// result: (SUBL x (MOVLf2i y))
 40653  	for {
 40654  		off := v.AuxInt
 40655  		sym := v.Aux
 40656  		_ = v.Args[2]
 40657  		x := v.Args[0]
 40658  		ptr := v.Args[1]
 40659  		v_2 := v.Args[2]
 40660  		if v_2.Op != OpAMD64MOVSSstore {
 40661  			break
 40662  		}
 40663  		if v_2.AuxInt != off {
 40664  			break
 40665  		}
 40666  		if v_2.Aux != sym {
 40667  			break
 40668  		}
 40669  		_ = v_2.Args[2]
 40670  		if ptr != v_2.Args[0] {
 40671  			break
 40672  		}
 40673  		y := v_2.Args[1]
 40674  		v.reset(OpAMD64SUBL)
 40675  		v.AddArg(x)
 40676  		v0 := b.NewValue0(v.Pos, OpAMD64MOVLf2i, typ.UInt32)
 40677  		v0.AddArg(y)
 40678  		v.AddArg(v0)
 40679  		return true
 40680  	}
 40681  	return false
 40682  }
 40683  func rewriteValueAMD64_OpAMD64SUBQ_0(v *Value) bool {
 40684  	b := v.Block
 40685  	_ = b
 40686  	// match: (SUBQ x (MOVQconst [c]))
 40687  	// cond: is32Bit(c)
 40688  	// result: (SUBQconst x [c])
 40689  	for {
 40690  		_ = v.Args[1]
 40691  		x := v.Args[0]
 40692  		v_1 := v.Args[1]
 40693  		if v_1.Op != OpAMD64MOVQconst {
 40694  			break
 40695  		}
 40696  		c := v_1.AuxInt
 40697  		if !(is32Bit(c)) {
 40698  			break
 40699  		}
 40700  		v.reset(OpAMD64SUBQconst)
 40701  		v.AuxInt = c
 40702  		v.AddArg(x)
 40703  		return true
 40704  	}
 40705  	// match: (SUBQ (MOVQconst [c]) x)
 40706  	// cond: is32Bit(c)
 40707  	// result: (NEGQ (SUBQconst <v.Type> x [c]))
 40708  	for {
 40709  		_ = v.Args[1]
 40710  		v_0 := v.Args[0]
 40711  		if v_0.Op != OpAMD64MOVQconst {
 40712  			break
 40713  		}
 40714  		c := v_0.AuxInt
 40715  		x := v.Args[1]
 40716  		if !(is32Bit(c)) {
 40717  			break
 40718  		}
 40719  		v.reset(OpAMD64NEGQ)
 40720  		v0 := b.NewValue0(v.Pos, OpAMD64SUBQconst, v.Type)
 40721  		v0.AuxInt = c
 40722  		v0.AddArg(x)
 40723  		v.AddArg(v0)
 40724  		return true
 40725  	}
 40726  	// match: (SUBQ x x)
 40727  	// cond:
 40728  	// result: (MOVQconst [0])
 40729  	for {
 40730  		_ = v.Args[1]
 40731  		x := v.Args[0]
 40732  		if x != v.Args[1] {
 40733  			break
 40734  		}
 40735  		v.reset(OpAMD64MOVQconst)
 40736  		v.AuxInt = 0
 40737  		return true
 40738  	}
 40739  	// match: (SUBQ x l:(MOVQload [off] {sym} ptr mem))
 40740  	// cond: canMergeLoad(v, l, x) && clobber(l)
 40741  	// result: (SUBQmem x [off] {sym} ptr mem)
 40742  	for {
 40743  		_ = v.Args[1]
 40744  		x := v.Args[0]
 40745  		l := v.Args[1]
 40746  		if l.Op != OpAMD64MOVQload {
 40747  			break
 40748  		}
 40749  		off := l.AuxInt
 40750  		sym := l.Aux
 40751  		_ = l.Args[1]
 40752  		ptr := l.Args[0]
 40753  		mem := l.Args[1]
 40754  		if !(canMergeLoad(v, l, x) && clobber(l)) {
 40755  			break
 40756  		}
 40757  		v.reset(OpAMD64SUBQmem)
 40758  		v.AuxInt = off
 40759  		v.Aux = sym
 40760  		v.AddArg(x)
 40761  		v.AddArg(ptr)
 40762  		v.AddArg(mem)
 40763  		return true
 40764  	}
 40765  	return false
 40766  }
 40767  func rewriteValueAMD64_OpAMD64SUBQconst_0(v *Value) bool {
 40768  	// match: (SUBQconst [0] x)
 40769  	// cond:
 40770  	// result: x
 40771  	for {
 40772  		if v.AuxInt != 0 {
 40773  			break
 40774  		}
 40775  		x := v.Args[0]
 40776  		v.reset(OpCopy)
 40777  		v.Type = x.Type
 40778  		v.AddArg(x)
 40779  		return true
 40780  	}
 40781  	// match: (SUBQconst [c] x)
 40782  	// cond: c != -(1<<31)
 40783  	// result: (ADDQconst [-c] x)
 40784  	for {
 40785  		c := v.AuxInt
 40786  		x := v.Args[0]
 40787  		if !(c != -(1 << 31)) {
 40788  			break
 40789  		}
 40790  		v.reset(OpAMD64ADDQconst)
 40791  		v.AuxInt = -c
 40792  		v.AddArg(x)
 40793  		return true
 40794  	}
 40795  	// match: (SUBQconst (MOVQconst [d]) [c])
 40796  	// cond:
 40797  	// result: (MOVQconst [d-c])
 40798  	for {
 40799  		c := v.AuxInt
 40800  		v_0 := v.Args[0]
 40801  		if v_0.Op != OpAMD64MOVQconst {
 40802  			break
 40803  		}
 40804  		d := v_0.AuxInt
 40805  		v.reset(OpAMD64MOVQconst)
 40806  		v.AuxInt = d - c
 40807  		return true
 40808  	}
 40809  	// match: (SUBQconst (SUBQconst x [d]) [c])
 40810  	// cond: is32Bit(-c-d)
 40811  	// result: (ADDQconst [-c-d] x)
 40812  	for {
 40813  		c := v.AuxInt
 40814  		v_0 := v.Args[0]
 40815  		if v_0.Op != OpAMD64SUBQconst {
 40816  			break
 40817  		}
 40818  		d := v_0.AuxInt
 40819  		x := v_0.Args[0]
 40820  		if !(is32Bit(-c - d)) {
 40821  			break
 40822  		}
 40823  		v.reset(OpAMD64ADDQconst)
 40824  		v.AuxInt = -c - d
 40825  		v.AddArg(x)
 40826  		return true
 40827  	}
 40828  	return false
 40829  }
 40830  func rewriteValueAMD64_OpAMD64SUBQmem_0(v *Value) bool {
 40831  	b := v.Block
 40832  	_ = b
 40833  	typ := &b.Func.Config.Types
 40834  	_ = typ
 40835  	// match: (SUBQmem x [off] {sym} ptr (MOVSDstore [off] {sym} ptr y _))
 40836  	// cond:
 40837  	// result: (SUBQ x (MOVQf2i y))
 40838  	for {
 40839  		off := v.AuxInt
 40840  		sym := v.Aux
 40841  		_ = v.Args[2]
 40842  		x := v.Args[0]
 40843  		ptr := v.Args[1]
 40844  		v_2 := v.Args[2]
 40845  		if v_2.Op != OpAMD64MOVSDstore {
 40846  			break
 40847  		}
 40848  		if v_2.AuxInt != off {
 40849  			break
 40850  		}
 40851  		if v_2.Aux != sym {
 40852  			break
 40853  		}
 40854  		_ = v_2.Args[2]
 40855  		if ptr != v_2.Args[0] {
 40856  			break
 40857  		}
 40858  		y := v_2.Args[1]
 40859  		v.reset(OpAMD64SUBQ)
 40860  		v.AddArg(x)
 40861  		v0 := b.NewValue0(v.Pos, OpAMD64MOVQf2i, typ.UInt64)
 40862  		v0.AddArg(y)
 40863  		v.AddArg(v0)
 40864  		return true
 40865  	}
 40866  	return false
 40867  }
 40868  func rewriteValueAMD64_OpAMD64SUBSD_0(v *Value) bool {
 40869  	// match: (SUBSD x l:(MOVSDload [off] {sym} ptr mem))
 40870  	// cond: canMergeLoad(v, l, x) && clobber(l)
 40871  	// result: (SUBSDmem x [off] {sym} ptr mem)
 40872  	for {
 40873  		_ = v.Args[1]
 40874  		x := v.Args[0]
 40875  		l := v.Args[1]
 40876  		if l.Op != OpAMD64MOVSDload {
 40877  			break
 40878  		}
 40879  		off := l.AuxInt
 40880  		sym := l.Aux
 40881  		_ = l.Args[1]
 40882  		ptr := l.Args[0]
 40883  		mem := l.Args[1]
 40884  		if !(canMergeLoad(v, l, x) && clobber(l)) {
 40885  			break
 40886  		}
 40887  		v.reset(OpAMD64SUBSDmem)
 40888  		v.AuxInt = off
 40889  		v.Aux = sym
 40890  		v.AddArg(x)
 40891  		v.AddArg(ptr)
 40892  		v.AddArg(mem)
 40893  		return true
 40894  	}
 40895  	return false
 40896  }
 40897  func rewriteValueAMD64_OpAMD64SUBSDmem_0(v *Value) bool {
 40898  	b := v.Block
 40899  	_ = b
 40900  	typ := &b.Func.Config.Types
 40901  	_ = typ
 40902  	// match: (SUBSDmem x [off] {sym} ptr (MOVQstore [off] {sym} ptr y _))
 40903  	// cond:
 40904  	// result: (SUBSD x (MOVQi2f y))
 40905  	for {
 40906  		off := v.AuxInt
 40907  		sym := v.Aux
 40908  		_ = v.Args[2]
 40909  		x := v.Args[0]
 40910  		ptr := v.Args[1]
 40911  		v_2 := v.Args[2]
 40912  		if v_2.Op != OpAMD64MOVQstore {
 40913  			break
 40914  		}
 40915  		if v_2.AuxInt != off {
 40916  			break
 40917  		}
 40918  		if v_2.Aux != sym {
 40919  			break
 40920  		}
 40921  		_ = v_2.Args[2]
 40922  		if ptr != v_2.Args[0] {
 40923  			break
 40924  		}
 40925  		y := v_2.Args[1]
 40926  		v.reset(OpAMD64SUBSD)
 40927  		v.AddArg(x)
 40928  		v0 := b.NewValue0(v.Pos, OpAMD64MOVQi2f, typ.Float64)
 40929  		v0.AddArg(y)
 40930  		v.AddArg(v0)
 40931  		return true
 40932  	}
 40933  	return false
 40934  }
 40935  func rewriteValueAMD64_OpAMD64SUBSS_0(v *Value) bool {
 40936  	// match: (SUBSS x l:(MOVSSload [off] {sym} ptr mem))
 40937  	// cond: canMergeLoad(v, l, x) && clobber(l)
 40938  	// result: (SUBSSmem x [off] {sym} ptr mem)
 40939  	for {
 40940  		_ = v.Args[1]
 40941  		x := v.Args[0]
 40942  		l := v.Args[1]
 40943  		if l.Op != OpAMD64MOVSSload {
 40944  			break
 40945  		}
 40946  		off := l.AuxInt
 40947  		sym := l.Aux
 40948  		_ = l.Args[1]
 40949  		ptr := l.Args[0]
 40950  		mem := l.Args[1]
 40951  		if !(canMergeLoad(v, l, x) && clobber(l)) {
 40952  			break
 40953  		}
 40954  		v.reset(OpAMD64SUBSSmem)
 40955  		v.AuxInt = off
 40956  		v.Aux = sym
 40957  		v.AddArg(x)
 40958  		v.AddArg(ptr)
 40959  		v.AddArg(mem)
 40960  		return true
 40961  	}
 40962  	return false
 40963  }
 40964  func rewriteValueAMD64_OpAMD64SUBSSmem_0(v *Value) bool {
 40965  	b := v.Block
 40966  	_ = b
 40967  	typ := &b.Func.Config.Types
 40968  	_ = typ
 40969  	// match: (SUBSSmem x [off] {sym} ptr (MOVLstore [off] {sym} ptr y _))
 40970  	// cond:
 40971  	// result: (SUBSS x (MOVLi2f y))
 40972  	for {
 40973  		off := v.AuxInt
 40974  		sym := v.Aux
 40975  		_ = v.Args[2]
 40976  		x := v.Args[0]
 40977  		ptr := v.Args[1]
 40978  		v_2 := v.Args[2]
 40979  		if v_2.Op != OpAMD64MOVLstore {
 40980  			break
 40981  		}
 40982  		if v_2.AuxInt != off {
 40983  			break
 40984  		}
 40985  		if v_2.Aux != sym {
 40986  			break
 40987  		}
 40988  		_ = v_2.Args[2]
 40989  		if ptr != v_2.Args[0] {
 40990  			break
 40991  		}
 40992  		y := v_2.Args[1]
 40993  		v.reset(OpAMD64SUBSS)
 40994  		v.AddArg(x)
 40995  		v0 := b.NewValue0(v.Pos, OpAMD64MOVLi2f, typ.Float32)
 40996  		v0.AddArg(y)
 40997  		v.AddArg(v0)
 40998  		return true
 40999  	}
 41000  	return false
 41001  }
 41002  func rewriteValueAMD64_OpAMD64TESTB_0(v *Value) bool {
 41003  	// match: (TESTB (MOVLconst [c]) x)
 41004  	// cond:
 41005  	// result: (TESTBconst [c] x)
 41006  	for {
 41007  		_ = v.Args[1]
 41008  		v_0 := v.Args[0]
 41009  		if v_0.Op != OpAMD64MOVLconst {
 41010  			break
 41011  		}
 41012  		c := v_0.AuxInt
 41013  		x := v.Args[1]
 41014  		v.reset(OpAMD64TESTBconst)
 41015  		v.AuxInt = c
 41016  		v.AddArg(x)
 41017  		return true
 41018  	}
 41019  	// match: (TESTB x (MOVLconst [c]))
 41020  	// cond:
 41021  	// result: (TESTBconst [c] x)
 41022  	for {
 41023  		_ = v.Args[1]
 41024  		x := v.Args[0]
 41025  		v_1 := v.Args[1]
 41026  		if v_1.Op != OpAMD64MOVLconst {
 41027  			break
 41028  		}
 41029  		c := v_1.AuxInt
 41030  		v.reset(OpAMD64TESTBconst)
 41031  		v.AuxInt = c
 41032  		v.AddArg(x)
 41033  		return true
 41034  	}
 41035  	return false
 41036  }
 41037  func rewriteValueAMD64_OpAMD64TESTL_0(v *Value) bool {
 41038  	// match: (TESTL (MOVLconst [c]) x)
 41039  	// cond:
 41040  	// result: (TESTLconst [c] x)
 41041  	for {
 41042  		_ = v.Args[1]
 41043  		v_0 := v.Args[0]
 41044  		if v_0.Op != OpAMD64MOVLconst {
 41045  			break
 41046  		}
 41047  		c := v_0.AuxInt
 41048  		x := v.Args[1]
 41049  		v.reset(OpAMD64TESTLconst)
 41050  		v.AuxInt = c
 41051  		v.AddArg(x)
 41052  		return true
 41053  	}
 41054  	// match: (TESTL x (MOVLconst [c]))
 41055  	// cond:
 41056  	// result: (TESTLconst [c] x)
 41057  	for {
 41058  		_ = v.Args[1]
 41059  		x := v.Args[0]
 41060  		v_1 := v.Args[1]
 41061  		if v_1.Op != OpAMD64MOVLconst {
 41062  			break
 41063  		}
 41064  		c := v_1.AuxInt
 41065  		v.reset(OpAMD64TESTLconst)
 41066  		v.AuxInt = c
 41067  		v.AddArg(x)
 41068  		return true
 41069  	}
 41070  	return false
 41071  }
 41072  func rewriteValueAMD64_OpAMD64TESTQ_0(v *Value) bool {
 41073  	// match: (TESTQ (MOVQconst [c]) x)
 41074  	// cond: is32Bit(c)
 41075  	// result: (TESTQconst [c] x)
 41076  	for {
 41077  		_ = v.Args[1]
 41078  		v_0 := v.Args[0]
 41079  		if v_0.Op != OpAMD64MOVQconst {
 41080  			break
 41081  		}
 41082  		c := v_0.AuxInt
 41083  		x := v.Args[1]
 41084  		if !(is32Bit(c)) {
 41085  			break
 41086  		}
 41087  		v.reset(OpAMD64TESTQconst)
 41088  		v.AuxInt = c
 41089  		v.AddArg(x)
 41090  		return true
 41091  	}
 41092  	// match: (TESTQ x (MOVQconst [c]))
 41093  	// cond: is32Bit(c)
 41094  	// result: (TESTQconst [c] x)
 41095  	for {
 41096  		_ = v.Args[1]
 41097  		x := v.Args[0]
 41098  		v_1 := v.Args[1]
 41099  		if v_1.Op != OpAMD64MOVQconst {
 41100  			break
 41101  		}
 41102  		c := v_1.AuxInt
 41103  		if !(is32Bit(c)) {
 41104  			break
 41105  		}
 41106  		v.reset(OpAMD64TESTQconst)
 41107  		v.AuxInt = c
 41108  		v.AddArg(x)
 41109  		return true
 41110  	}
 41111  	return false
 41112  }
 41113  func rewriteValueAMD64_OpAMD64TESTW_0(v *Value) bool {
 41114  	// match: (TESTW (MOVLconst [c]) x)
 41115  	// cond:
 41116  	// result: (TESTWconst [c] x)
 41117  	for {
 41118  		_ = v.Args[1]
 41119  		v_0 := v.Args[0]
 41120  		if v_0.Op != OpAMD64MOVLconst {
 41121  			break
 41122  		}
 41123  		c := v_0.AuxInt
 41124  		x := v.Args[1]
 41125  		v.reset(OpAMD64TESTWconst)
 41126  		v.AuxInt = c
 41127  		v.AddArg(x)
 41128  		return true
 41129  	}
 41130  	// match: (TESTW x (MOVLconst [c]))
 41131  	// cond:
 41132  	// result: (TESTWconst [c] x)
 41133  	for {
 41134  		_ = v.Args[1]
 41135  		x := v.Args[0]
 41136  		v_1 := v.Args[1]
 41137  		if v_1.Op != OpAMD64MOVLconst {
 41138  			break
 41139  		}
 41140  		c := v_1.AuxInt
 41141  		v.reset(OpAMD64TESTWconst)
 41142  		v.AuxInt = c
 41143  		v.AddArg(x)
 41144  		return true
 41145  	}
 41146  	return false
 41147  }
 41148  func rewriteValueAMD64_OpAMD64XADDLlock_0(v *Value) bool {
 41149  	// match: (XADDLlock [off1] {sym} val (ADDQconst [off2] ptr) mem)
 41150  	// cond: is32Bit(off1+off2)
 41151  	// result: (XADDLlock [off1+off2] {sym} val ptr mem)
 41152  	for {
 41153  		off1 := v.AuxInt
 41154  		sym := v.Aux
 41155  		_ = v.Args[2]
 41156  		val := v.Args[0]
 41157  		v_1 := v.Args[1]
 41158  		if v_1.Op != OpAMD64ADDQconst {
 41159  			break
 41160  		}
 41161  		off2 := v_1.AuxInt
 41162  		ptr := v_1.Args[0]
 41163  		mem := v.Args[2]
 41164  		if !(is32Bit(off1 + off2)) {
 41165  			break
 41166  		}
 41167  		v.reset(OpAMD64XADDLlock)
 41168  		v.AuxInt = off1 + off2
 41169  		v.Aux = sym
 41170  		v.AddArg(val)
 41171  		v.AddArg(ptr)
 41172  		v.AddArg(mem)
 41173  		return true
 41174  	}
 41175  	return false
 41176  }
 41177  func rewriteValueAMD64_OpAMD64XADDQlock_0(v *Value) bool {
 41178  	// match: (XADDQlock [off1] {sym} val (ADDQconst [off2] ptr) mem)
 41179  	// cond: is32Bit(off1+off2)
 41180  	// result: (XADDQlock [off1+off2] {sym} val ptr mem)
 41181  	for {
 41182  		off1 := v.AuxInt
 41183  		sym := v.Aux
 41184  		_ = v.Args[2]
 41185  		val := v.Args[0]
 41186  		v_1 := v.Args[1]
 41187  		if v_1.Op != OpAMD64ADDQconst {
 41188  			break
 41189  		}
 41190  		off2 := v_1.AuxInt
 41191  		ptr := v_1.Args[0]
 41192  		mem := v.Args[2]
 41193  		if !(is32Bit(off1 + off2)) {
 41194  			break
 41195  		}
 41196  		v.reset(OpAMD64XADDQlock)
 41197  		v.AuxInt = off1 + off2
 41198  		v.Aux = sym
 41199  		v.AddArg(val)
 41200  		v.AddArg(ptr)
 41201  		v.AddArg(mem)
 41202  		return true
 41203  	}
 41204  	return false
 41205  }
 41206  func rewriteValueAMD64_OpAMD64XCHGL_0(v *Value) bool {
 41207  	// match: (XCHGL [off1] {sym} val (ADDQconst [off2] ptr) mem)
 41208  	// cond: is32Bit(off1+off2)
 41209  	// result: (XCHGL [off1+off2] {sym} val ptr mem)
 41210  	for {
 41211  		off1 := v.AuxInt
 41212  		sym := v.Aux
 41213  		_ = v.Args[2]
 41214  		val := v.Args[0]
 41215  		v_1 := v.Args[1]
 41216  		if v_1.Op != OpAMD64ADDQconst {
 41217  			break
 41218  		}
 41219  		off2 := v_1.AuxInt
 41220  		ptr := v_1.Args[0]
 41221  		mem := v.Args[2]
 41222  		if !(is32Bit(off1 + off2)) {
 41223  			break
 41224  		}
 41225  		v.reset(OpAMD64XCHGL)
 41226  		v.AuxInt = off1 + off2
 41227  		v.Aux = sym
 41228  		v.AddArg(val)
 41229  		v.AddArg(ptr)
 41230  		v.AddArg(mem)
 41231  		return true
 41232  	}
 41233  	// match: (XCHGL [off1] {sym1} val (LEAQ [off2] {sym2} ptr) mem)
 41234  	// cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) && ptr.Op != OpSB
 41235  	// result: (XCHGL [off1+off2] {mergeSym(sym1,sym2)} val ptr mem)
 41236  	for {
 41237  		off1 := v.AuxInt
 41238  		sym1 := v.Aux
 41239  		_ = v.Args[2]
 41240  		val := v.Args[0]
 41241  		v_1 := v.Args[1]
 41242  		if v_1.Op != OpAMD64LEAQ {
 41243  			break
 41244  		}
 41245  		off2 := v_1.AuxInt
 41246  		sym2 := v_1.Aux
 41247  		ptr := v_1.Args[0]
 41248  		mem := v.Args[2]
 41249  		if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2) && ptr.Op != OpSB) {
 41250  			break
 41251  		}
 41252  		v.reset(OpAMD64XCHGL)
 41253  		v.AuxInt = off1 + off2
 41254  		v.Aux = mergeSym(sym1, sym2)
 41255  		v.AddArg(val)
 41256  		v.AddArg(ptr)
 41257  		v.AddArg(mem)
 41258  		return true
 41259  	}
 41260  	return false
 41261  }
 41262  func rewriteValueAMD64_OpAMD64XCHGQ_0(v *Value) bool {
 41263  	// match: (XCHGQ [off1] {sym} val (ADDQconst [off2] ptr) mem)
 41264  	// cond: is32Bit(off1+off2)
 41265  	// result: (XCHGQ [off1+off2] {sym} val ptr mem)
 41266  	for {
 41267  		off1 := v.AuxInt
 41268  		sym := v.Aux
 41269  		_ = v.Args[2]
 41270  		val := v.Args[0]
 41271  		v_1 := v.Args[1]
 41272  		if v_1.Op != OpAMD64ADDQconst {
 41273  			break
 41274  		}
 41275  		off2 := v_1.AuxInt
 41276  		ptr := v_1.Args[0]
 41277  		mem := v.Args[2]
 41278  		if !(is32Bit(off1 + off2)) {
 41279  			break
 41280  		}
 41281  		v.reset(OpAMD64XCHGQ)
 41282  		v.AuxInt = off1 + off2
 41283  		v.Aux = sym
 41284  		v.AddArg(val)
 41285  		v.AddArg(ptr)
 41286  		v.AddArg(mem)
 41287  		return true
 41288  	}
 41289  	// match: (XCHGQ [off1] {sym1} val (LEAQ [off2] {sym2} ptr) mem)
 41290  	// cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) && ptr.Op != OpSB
 41291  	// result: (XCHGQ [off1+off2] {mergeSym(sym1,sym2)} val ptr mem)
 41292  	for {
 41293  		off1 := v.AuxInt
 41294  		sym1 := v.Aux
 41295  		_ = v.Args[2]
 41296  		val := v.Args[0]
 41297  		v_1 := v.Args[1]
 41298  		if v_1.Op != OpAMD64LEAQ {
 41299  			break
 41300  		}
 41301  		off2 := v_1.AuxInt
 41302  		sym2 := v_1.Aux
 41303  		ptr := v_1.Args[0]
 41304  		mem := v.Args[2]
 41305  		if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2) && ptr.Op != OpSB) {
 41306  			break
 41307  		}
 41308  		v.reset(OpAMD64XCHGQ)
 41309  		v.AuxInt = off1 + off2
 41310  		v.Aux = mergeSym(sym1, sym2)
 41311  		v.AddArg(val)
 41312  		v.AddArg(ptr)
 41313  		v.AddArg(mem)
 41314  		return true
 41315  	}
 41316  	return false
 41317  }
 41318  func rewriteValueAMD64_OpAMD64XORL_0(v *Value) bool {
 41319  	// match: (XORL x (MOVLconst [c]))
 41320  	// cond:
 41321  	// result: (XORLconst [c] x)
 41322  	for {
 41323  		_ = v.Args[1]
 41324  		x := v.Args[0]
 41325  		v_1 := v.Args[1]
 41326  		if v_1.Op != OpAMD64MOVLconst {
 41327  			break
 41328  		}
 41329  		c := v_1.AuxInt
 41330  		v.reset(OpAMD64XORLconst)
 41331  		v.AuxInt = c
 41332  		v.AddArg(x)
 41333  		return true
 41334  	}
 41335  	// match: (XORL (MOVLconst [c]) x)
 41336  	// cond:
 41337  	// result: (XORLconst [c] x)
 41338  	for {
 41339  		_ = v.Args[1]
 41340  		v_0 := v.Args[0]
 41341  		if v_0.Op != OpAMD64MOVLconst {
 41342  			break
 41343  		}
 41344  		c := v_0.AuxInt
 41345  		x := v.Args[1]
 41346  		v.reset(OpAMD64XORLconst)
 41347  		v.AuxInt = c
 41348  		v.AddArg(x)
 41349  		return true
 41350  	}
 41351  	// match: (XORL (SHLLconst x [c]) (SHRLconst x [d]))
 41352  	// cond: d==32-c
 41353  	// result: (ROLLconst x [c])
 41354  	for {
 41355  		_ = v.Args[1]
 41356  		v_0 := v.Args[0]
 41357  		if v_0.Op != OpAMD64SHLLconst {
 41358  			break
 41359  		}
 41360  		c := v_0.AuxInt
 41361  		x := v_0.Args[0]
 41362  		v_1 := v.Args[1]
 41363  		if v_1.Op != OpAMD64SHRLconst {
 41364  			break
 41365  		}
 41366  		d := v_1.AuxInt
 41367  		if x != v_1.Args[0] {
 41368  			break
 41369  		}
 41370  		if !(d == 32-c) {
 41371  			break
 41372  		}
 41373  		v.reset(OpAMD64ROLLconst)
 41374  		v.AuxInt = c
 41375  		v.AddArg(x)
 41376  		return true
 41377  	}
 41378  	// match: (XORL (SHRLconst x [d]) (SHLLconst x [c]))
 41379  	// cond: d==32-c
 41380  	// result: (ROLLconst x [c])
 41381  	for {
 41382  		_ = v.Args[1]
 41383  		v_0 := v.Args[0]
 41384  		if v_0.Op != OpAMD64SHRLconst {
 41385  			break
 41386  		}
 41387  		d := v_0.AuxInt
 41388  		x := v_0.Args[0]
 41389  		v_1 := v.Args[1]
 41390  		if v_1.Op != OpAMD64SHLLconst {
 41391  			break
 41392  		}
 41393  		c := v_1.AuxInt
 41394  		if x != v_1.Args[0] {
 41395  			break
 41396  		}
 41397  		if !(d == 32-c) {
 41398  			break
 41399  		}
 41400  		v.reset(OpAMD64ROLLconst)
 41401  		v.AuxInt = c
 41402  		v.AddArg(x)
 41403  		return true
 41404  	}
 41405  	// match: (XORL <t> (SHLLconst x [c]) (SHRWconst x [d]))
 41406  	// cond: d==16-c && c < 16 && t.Size() == 2
 41407  	// result: (ROLWconst x [c])
 41408  	for {
 41409  		t := v.Type
 41410  		_ = v.Args[1]
 41411  		v_0 := v.Args[0]
 41412  		if v_0.Op != OpAMD64SHLLconst {
 41413  			break
 41414  		}
 41415  		c := v_0.AuxInt
 41416  		x := v_0.Args[0]
 41417  		v_1 := v.Args[1]
 41418  		if v_1.Op != OpAMD64SHRWconst {
 41419  			break
 41420  		}
 41421  		d := v_1.AuxInt
 41422  		if x != v_1.Args[0] {
 41423  			break
 41424  		}
 41425  		if !(d == 16-c && c < 16 && t.Size() == 2) {
 41426  			break
 41427  		}
 41428  		v.reset(OpAMD64ROLWconst)
 41429  		v.AuxInt = c
 41430  		v.AddArg(x)
 41431  		return true
 41432  	}
 41433  	// match: (XORL <t> (SHRWconst x [d]) (SHLLconst x [c]))
 41434  	// cond: d==16-c && c < 16 && t.Size() == 2
 41435  	// result: (ROLWconst x [c])
 41436  	for {
 41437  		t := v.Type
 41438  		_ = v.Args[1]
 41439  		v_0 := v.Args[0]
 41440  		if v_0.Op != OpAMD64SHRWconst {
 41441  			break
 41442  		}
 41443  		d := v_0.AuxInt
 41444  		x := v_0.Args[0]
 41445  		v_1 := v.Args[1]
 41446  		if v_1.Op != OpAMD64SHLLconst {
 41447  			break
 41448  		}
 41449  		c := v_1.AuxInt
 41450  		if x != v_1.Args[0] {
 41451  			break
 41452  		}
 41453  		if !(d == 16-c && c < 16 && t.Size() == 2) {
 41454  			break
 41455  		}
 41456  		v.reset(OpAMD64ROLWconst)
 41457  		v.AuxInt = c
 41458  		v.AddArg(x)
 41459  		return true
 41460  	}
 41461  	// match: (XORL <t> (SHLLconst x [c]) (SHRBconst x [d]))
 41462  	// cond: d==8-c && c < 8 && t.Size() == 1
 41463  	// result: (ROLBconst x [c])
 41464  	for {
 41465  		t := v.Type
 41466  		_ = v.Args[1]
 41467  		v_0 := v.Args[0]
 41468  		if v_0.Op != OpAMD64SHLLconst {
 41469  			break
 41470  		}
 41471  		c := v_0.AuxInt
 41472  		x := v_0.Args[0]
 41473  		v_1 := v.Args[1]
 41474  		if v_1.Op != OpAMD64SHRBconst {
 41475  			break
 41476  		}
 41477  		d := v_1.AuxInt
 41478  		if x != v_1.Args[0] {
 41479  			break
 41480  		}
 41481  		if !(d == 8-c && c < 8 && t.Size() == 1) {
 41482  			break
 41483  		}
 41484  		v.reset(OpAMD64ROLBconst)
 41485  		v.AuxInt = c
 41486  		v.AddArg(x)
 41487  		return true
 41488  	}
 41489  	// match: (XORL <t> (SHRBconst x [d]) (SHLLconst x [c]))
 41490  	// cond: d==8-c && c < 8 && t.Size() == 1
 41491  	// result: (ROLBconst x [c])
 41492  	for {
 41493  		t := v.Type
 41494  		_ = v.Args[1]
 41495  		v_0 := v.Args[0]
 41496  		if v_0.Op != OpAMD64SHRBconst {
 41497  			break
 41498  		}
 41499  		d := v_0.AuxInt
 41500  		x := v_0.Args[0]
 41501  		v_1 := v.Args[1]
 41502  		if v_1.Op != OpAMD64SHLLconst {
 41503  			break
 41504  		}
 41505  		c := v_1.AuxInt
 41506  		if x != v_1.Args[0] {
 41507  			break
 41508  		}
 41509  		if !(d == 8-c && c < 8 && t.Size() == 1) {
 41510  			break
 41511  		}
 41512  		v.reset(OpAMD64ROLBconst)
 41513  		v.AuxInt = c
 41514  		v.AddArg(x)
 41515  		return true
 41516  	}
 41517  	// match: (XORL x x)
 41518  	// cond:
 41519  	// result: (MOVLconst [0])
 41520  	for {
 41521  		_ = v.Args[1]
 41522  		x := v.Args[0]
 41523  		if x != v.Args[1] {
 41524  			break
 41525  		}
 41526  		v.reset(OpAMD64MOVLconst)
 41527  		v.AuxInt = 0
 41528  		return true
 41529  	}
 41530  	// match: (XORL x l:(MOVLload [off] {sym} ptr mem))
 41531  	// cond: canMergeLoad(v, l, x) && clobber(l)
 41532  	// result: (XORLmem x [off] {sym} ptr mem)
 41533  	for {
 41534  		_ = v.Args[1]
 41535  		x := v.Args[0]
 41536  		l := v.Args[1]
 41537  		if l.Op != OpAMD64MOVLload {
 41538  			break
 41539  		}
 41540  		off := l.AuxInt
 41541  		sym := l.Aux
 41542  		_ = l.Args[1]
 41543  		ptr := l.Args[0]
 41544  		mem := l.Args[1]
 41545  		if !(canMergeLoad(v, l, x) && clobber(l)) {
 41546  			break
 41547  		}
 41548  		v.reset(OpAMD64XORLmem)
 41549  		v.AuxInt = off
 41550  		v.Aux = sym
 41551  		v.AddArg(x)
 41552  		v.AddArg(ptr)
 41553  		v.AddArg(mem)
 41554  		return true
 41555  	}
 41556  	return false
 41557  }
 41558  func rewriteValueAMD64_OpAMD64XORL_10(v *Value) bool {
 41559  	// match: (XORL l:(MOVLload [off] {sym} ptr mem) x)
 41560  	// cond: canMergeLoad(v, l, x) && clobber(l)
 41561  	// result: (XORLmem x [off] {sym} ptr mem)
 41562  	for {
 41563  		_ = v.Args[1]
 41564  		l := v.Args[0]
 41565  		if l.Op != OpAMD64MOVLload {
 41566  			break
 41567  		}
 41568  		off := l.AuxInt
 41569  		sym := l.Aux
 41570  		_ = l.Args[1]
 41571  		ptr := l.Args[0]
 41572  		mem := l.Args[1]
 41573  		x := v.Args[1]
 41574  		if !(canMergeLoad(v, l, x) && clobber(l)) {
 41575  			break
 41576  		}
 41577  		v.reset(OpAMD64XORLmem)
 41578  		v.AuxInt = off
 41579  		v.Aux = sym
 41580  		v.AddArg(x)
 41581  		v.AddArg(ptr)
 41582  		v.AddArg(mem)
 41583  		return true
 41584  	}
 41585  	return false
 41586  }
 41587  func rewriteValueAMD64_OpAMD64XORLconst_0(v *Value) bool {
 41588  	// match: (XORLconst [1] (SETNE x))
 41589  	// cond:
 41590  	// result: (SETEQ x)
 41591  	for {
 41592  		if v.AuxInt != 1 {
 41593  			break
 41594  		}
 41595  		v_0 := v.Args[0]
 41596  		if v_0.Op != OpAMD64SETNE {
 41597  			break
 41598  		}
 41599  		x := v_0.Args[0]
 41600  		v.reset(OpAMD64SETEQ)
 41601  		v.AddArg(x)
 41602  		return true
 41603  	}
 41604  	// match: (XORLconst [1] (SETEQ x))
 41605  	// cond:
 41606  	// result: (SETNE x)
 41607  	for {
 41608  		if v.AuxInt != 1 {
 41609  			break
 41610  		}
 41611  		v_0 := v.Args[0]
 41612  		if v_0.Op != OpAMD64SETEQ {
 41613  			break
 41614  		}
 41615  		x := v_0.Args[0]
 41616  		v.reset(OpAMD64SETNE)
 41617  		v.AddArg(x)
 41618  		return true
 41619  	}
 41620  	// match: (XORLconst [1] (SETL x))
 41621  	// cond:
 41622  	// result: (SETGE x)
 41623  	for {
 41624  		if v.AuxInt != 1 {
 41625  			break
 41626  		}
 41627  		v_0 := v.Args[0]
 41628  		if v_0.Op != OpAMD64SETL {
 41629  			break
 41630  		}
 41631  		x := v_0.Args[0]
 41632  		v.reset(OpAMD64SETGE)
 41633  		v.AddArg(x)
 41634  		return true
 41635  	}
 41636  	// match: (XORLconst [1] (SETGE x))
 41637  	// cond:
 41638  	// result: (SETL x)
 41639  	for {
 41640  		if v.AuxInt != 1 {
 41641  			break
 41642  		}
 41643  		v_0 := v.Args[0]
 41644  		if v_0.Op != OpAMD64SETGE {
 41645  			break
 41646  		}
 41647  		x := v_0.Args[0]
 41648  		v.reset(OpAMD64SETL)
 41649  		v.AddArg(x)
 41650  		return true
 41651  	}
 41652  	// match: (XORLconst [1] (SETLE x))
 41653  	// cond:
 41654  	// result: (SETG x)
 41655  	for {
 41656  		if v.AuxInt != 1 {
 41657  			break
 41658  		}
 41659  		v_0 := v.Args[0]
 41660  		if v_0.Op != OpAMD64SETLE {
 41661  			break
 41662  		}
 41663  		x := v_0.Args[0]
 41664  		v.reset(OpAMD64SETG)
 41665  		v.AddArg(x)
 41666  		return true
 41667  	}
 41668  	// match: (XORLconst [1] (SETG x))
 41669  	// cond:
 41670  	// result: (SETLE x)
 41671  	for {
 41672  		if v.AuxInt != 1 {
 41673  			break
 41674  		}
 41675  		v_0 := v.Args[0]
 41676  		if v_0.Op != OpAMD64SETG {
 41677  			break
 41678  		}
 41679  		x := v_0.Args[0]
 41680  		v.reset(OpAMD64SETLE)
 41681  		v.AddArg(x)
 41682  		return true
 41683  	}
 41684  	// match: (XORLconst [1] (SETB x))
 41685  	// cond:
 41686  	// result: (SETAE x)
 41687  	for {
 41688  		if v.AuxInt != 1 {
 41689  			break
 41690  		}
 41691  		v_0 := v.Args[0]
 41692  		if v_0.Op != OpAMD64SETB {
 41693  			break
 41694  		}
 41695  		x := v_0.Args[0]
 41696  		v.reset(OpAMD64SETAE)
 41697  		v.AddArg(x)
 41698  		return true
 41699  	}
 41700  	// match: (XORLconst [1] (SETAE x))
 41701  	// cond:
 41702  	// result: (SETB x)
 41703  	for {
 41704  		if v.AuxInt != 1 {
 41705  			break
 41706  		}
 41707  		v_0 := v.Args[0]
 41708  		if v_0.Op != OpAMD64SETAE {
 41709  			break
 41710  		}
 41711  		x := v_0.Args[0]
 41712  		v.reset(OpAMD64SETB)
 41713  		v.AddArg(x)
 41714  		return true
 41715  	}
 41716  	// match: (XORLconst [1] (SETBE x))
 41717  	// cond:
 41718  	// result: (SETA x)
 41719  	for {
 41720  		if v.AuxInt != 1 {
 41721  			break
 41722  		}
 41723  		v_0 := v.Args[0]
 41724  		if v_0.Op != OpAMD64SETBE {
 41725  			break
 41726  		}
 41727  		x := v_0.Args[0]
 41728  		v.reset(OpAMD64SETA)
 41729  		v.AddArg(x)
 41730  		return true
 41731  	}
 41732  	// match: (XORLconst [1] (SETA x))
 41733  	// cond:
 41734  	// result: (SETBE x)
 41735  	for {
 41736  		if v.AuxInt != 1 {
 41737  			break
 41738  		}
 41739  		v_0 := v.Args[0]
 41740  		if v_0.Op != OpAMD64SETA {
 41741  			break
 41742  		}
 41743  		x := v_0.Args[0]
 41744  		v.reset(OpAMD64SETBE)
 41745  		v.AddArg(x)
 41746  		return true
 41747  	}
 41748  	return false
 41749  }
 41750  func rewriteValueAMD64_OpAMD64XORLconst_10(v *Value) bool {
 41751  	// match: (XORLconst [c] (XORLconst [d] x))
 41752  	// cond:
 41753  	// result: (XORLconst [c ^ d] x)
 41754  	for {
 41755  		c := v.AuxInt
 41756  		v_0 := v.Args[0]
 41757  		if v_0.Op != OpAMD64XORLconst {
 41758  			break
 41759  		}
 41760  		d := v_0.AuxInt
 41761  		x := v_0.Args[0]
 41762  		v.reset(OpAMD64XORLconst)
 41763  		v.AuxInt = c ^ d
 41764  		v.AddArg(x)
 41765  		return true
 41766  	}
 41767  	// match: (XORLconst [c] x)
 41768  	// cond: int32(c)==0
 41769  	// result: x
 41770  	for {
 41771  		c := v.AuxInt
 41772  		x := v.Args[0]
 41773  		if !(int32(c) == 0) {
 41774  			break
 41775  		}
 41776  		v.reset(OpCopy)
 41777  		v.Type = x.Type
 41778  		v.AddArg(x)
 41779  		return true
 41780  	}
 41781  	// match: (XORLconst [c] (MOVLconst [d]))
 41782  	// cond:
 41783  	// result: (MOVLconst [c^d])
 41784  	for {
 41785  		c := v.AuxInt
 41786  		v_0 := v.Args[0]
 41787  		if v_0.Op != OpAMD64MOVLconst {
 41788  			break
 41789  		}
 41790  		d := v_0.AuxInt
 41791  		v.reset(OpAMD64MOVLconst)
 41792  		v.AuxInt = c ^ d
 41793  		return true
 41794  	}
 41795  	return false
 41796  }
 41797  func rewriteValueAMD64_OpAMD64XORLmem_0(v *Value) bool {
 41798  	b := v.Block
 41799  	_ = b
 41800  	typ := &b.Func.Config.Types
 41801  	_ = typ
 41802  	// match: (XORLmem x [off] {sym} ptr (MOVSSstore [off] {sym} ptr y _))
 41803  	// cond:
 41804  	// result: (XORL x (MOVLf2i y))
 41805  	for {
 41806  		off := v.AuxInt
 41807  		sym := v.Aux
 41808  		_ = v.Args[2]
 41809  		x := v.Args[0]
 41810  		ptr := v.Args[1]
 41811  		v_2 := v.Args[2]
 41812  		if v_2.Op != OpAMD64MOVSSstore {
 41813  			break
 41814  		}
 41815  		if v_2.AuxInt != off {
 41816  			break
 41817  		}
 41818  		if v_2.Aux != sym {
 41819  			break
 41820  		}
 41821  		_ = v_2.Args[2]
 41822  		if ptr != v_2.Args[0] {
 41823  			break
 41824  		}
 41825  		y := v_2.Args[1]
 41826  		v.reset(OpAMD64XORL)
 41827  		v.AddArg(x)
 41828  		v0 := b.NewValue0(v.Pos, OpAMD64MOVLf2i, typ.UInt32)
 41829  		v0.AddArg(y)
 41830  		v.AddArg(v0)
 41831  		return true
 41832  	}
 41833  	return false
 41834  }
 41835  func rewriteValueAMD64_OpAMD64XORQ_0(v *Value) bool {
 41836  	// match: (XORQ x (MOVQconst [c]))
 41837  	// cond: is32Bit(c)
 41838  	// result: (XORQconst [c] x)
 41839  	for {
 41840  		_ = v.Args[1]
 41841  		x := v.Args[0]
 41842  		v_1 := v.Args[1]
 41843  		if v_1.Op != OpAMD64MOVQconst {
 41844  			break
 41845  		}
 41846  		c := v_1.AuxInt
 41847  		if !(is32Bit(c)) {
 41848  			break
 41849  		}
 41850  		v.reset(OpAMD64XORQconst)
 41851  		v.AuxInt = c
 41852  		v.AddArg(x)
 41853  		return true
 41854  	}
 41855  	// match: (XORQ (MOVQconst [c]) x)
 41856  	// cond: is32Bit(c)
 41857  	// result: (XORQconst [c] x)
 41858  	for {
 41859  		_ = v.Args[1]
 41860  		v_0 := v.Args[0]
 41861  		if v_0.Op != OpAMD64MOVQconst {
 41862  			break
 41863  		}
 41864  		c := v_0.AuxInt
 41865  		x := v.Args[1]
 41866  		if !(is32Bit(c)) {
 41867  			break
 41868  		}
 41869  		v.reset(OpAMD64XORQconst)
 41870  		v.AuxInt = c
 41871  		v.AddArg(x)
 41872  		return true
 41873  	}
 41874  	// match: (XORQ (SHLQconst x [c]) (SHRQconst x [d]))
 41875  	// cond: d==64-c
 41876  	// result: (ROLQconst x [c])
 41877  	for {
 41878  		_ = v.Args[1]
 41879  		v_0 := v.Args[0]
 41880  		if v_0.Op != OpAMD64SHLQconst {
 41881  			break
 41882  		}
 41883  		c := v_0.AuxInt
 41884  		x := v_0.Args[0]
 41885  		v_1 := v.Args[1]
 41886  		if v_1.Op != OpAMD64SHRQconst {
 41887  			break
 41888  		}
 41889  		d := v_1.AuxInt
 41890  		if x != v_1.Args[0] {
 41891  			break
 41892  		}
 41893  		if !(d == 64-c) {
 41894  			break
 41895  		}
 41896  		v.reset(OpAMD64ROLQconst)
 41897  		v.AuxInt = c
 41898  		v.AddArg(x)
 41899  		return true
 41900  	}
 41901  	// match: (XORQ (SHRQconst x [d]) (SHLQconst x [c]))
 41902  	// cond: d==64-c
 41903  	// result: (ROLQconst x [c])
 41904  	for {
 41905  		_ = v.Args[1]
 41906  		v_0 := v.Args[0]
 41907  		if v_0.Op != OpAMD64SHRQconst {
 41908  			break
 41909  		}
 41910  		d := v_0.AuxInt
 41911  		x := v_0.Args[0]
 41912  		v_1 := v.Args[1]
 41913  		if v_1.Op != OpAMD64SHLQconst {
 41914  			break
 41915  		}
 41916  		c := v_1.AuxInt
 41917  		if x != v_1.Args[0] {
 41918  			break
 41919  		}
 41920  		if !(d == 64-c) {
 41921  			break
 41922  		}
 41923  		v.reset(OpAMD64ROLQconst)
 41924  		v.AuxInt = c
 41925  		v.AddArg(x)
 41926  		return true
 41927  	}
 41928  	// match: (XORQ x x)
 41929  	// cond:
 41930  	// result: (MOVQconst [0])
 41931  	for {
 41932  		_ = v.Args[1]
 41933  		x := v.Args[0]
 41934  		if x != v.Args[1] {
 41935  			break
 41936  		}
 41937  		v.reset(OpAMD64MOVQconst)
 41938  		v.AuxInt = 0
 41939  		return true
 41940  	}
 41941  	// match: (XORQ x l:(MOVQload [off] {sym} ptr mem))
 41942  	// cond: canMergeLoad(v, l, x) && clobber(l)
 41943  	// result: (XORQmem x [off] {sym} ptr mem)
 41944  	for {
 41945  		_ = v.Args[1]
 41946  		x := v.Args[0]
 41947  		l := v.Args[1]
 41948  		if l.Op != OpAMD64MOVQload {
 41949  			break
 41950  		}
 41951  		off := l.AuxInt
 41952  		sym := l.Aux
 41953  		_ = l.Args[1]
 41954  		ptr := l.Args[0]
 41955  		mem := l.Args[1]
 41956  		if !(canMergeLoad(v, l, x) && clobber(l)) {
 41957  			break
 41958  		}
 41959  		v.reset(OpAMD64XORQmem)
 41960  		v.AuxInt = off
 41961  		v.Aux = sym
 41962  		v.AddArg(x)
 41963  		v.AddArg(ptr)
 41964  		v.AddArg(mem)
 41965  		return true
 41966  	}
 41967  	// match: (XORQ l:(MOVQload [off] {sym} ptr mem) x)
 41968  	// cond: canMergeLoad(v, l, x) && clobber(l)
 41969  	// result: (XORQmem x [off] {sym} ptr mem)
 41970  	for {
 41971  		_ = v.Args[1]
 41972  		l := v.Args[0]
 41973  		if l.Op != OpAMD64MOVQload {
 41974  			break
 41975  		}
 41976  		off := l.AuxInt
 41977  		sym := l.Aux
 41978  		_ = l.Args[1]
 41979  		ptr := l.Args[0]
 41980  		mem := l.Args[1]
 41981  		x := v.Args[1]
 41982  		if !(canMergeLoad(v, l, x) && clobber(l)) {
 41983  			break
 41984  		}
 41985  		v.reset(OpAMD64XORQmem)
 41986  		v.AuxInt = off
 41987  		v.Aux = sym
 41988  		v.AddArg(x)
 41989  		v.AddArg(ptr)
 41990  		v.AddArg(mem)
 41991  		return true
 41992  	}
 41993  	return false
 41994  }
 41995  func rewriteValueAMD64_OpAMD64XORQconst_0(v *Value) bool {
 41996  	// match: (XORQconst [c] (XORQconst [d] x))
 41997  	// cond:
 41998  	// result: (XORQconst [c ^ d] x)
 41999  	for {
 42000  		c := v.AuxInt
 42001  		v_0 := v.Args[0]
 42002  		if v_0.Op != OpAMD64XORQconst {
 42003  			break
 42004  		}
 42005  		d := v_0.AuxInt
 42006  		x := v_0.Args[0]
 42007  		v.reset(OpAMD64XORQconst)
 42008  		v.AuxInt = c ^ d
 42009  		v.AddArg(x)
 42010  		return true
 42011  	}
 42012  	// match: (XORQconst [0] x)
 42013  	// cond:
 42014  	// result: x
 42015  	for {
 42016  		if v.AuxInt != 0 {
 42017  			break
 42018  		}
 42019  		x := v.Args[0]
 42020  		v.reset(OpCopy)
 42021  		v.Type = x.Type
 42022  		v.AddArg(x)
 42023  		return true
 42024  	}
 42025  	// match: (XORQconst [c] (MOVQconst [d]))
 42026  	// cond:
 42027  	// result: (MOVQconst [c^d])
 42028  	for {
 42029  		c := v.AuxInt
 42030  		v_0 := v.Args[0]
 42031  		if v_0.Op != OpAMD64MOVQconst {
 42032  			break
 42033  		}
 42034  		d := v_0.AuxInt
 42035  		v.reset(OpAMD64MOVQconst)
 42036  		v.AuxInt = c ^ d
 42037  		return true
 42038  	}
 42039  	return false
 42040  }
 42041  func rewriteValueAMD64_OpAMD64XORQmem_0(v *Value) bool {
 42042  	b := v.Block
 42043  	_ = b
 42044  	typ := &b.Func.Config.Types
 42045  	_ = typ
 42046  	// match: (XORQmem x [off] {sym} ptr (MOVSDstore [off] {sym} ptr y _))
 42047  	// cond:
 42048  	// result: (XORQ x (MOVQf2i y))
 42049  	for {
 42050  		off := v.AuxInt
 42051  		sym := v.Aux
 42052  		_ = v.Args[2]
 42053  		x := v.Args[0]
 42054  		ptr := v.Args[1]
 42055  		v_2 := v.Args[2]
 42056  		if v_2.Op != OpAMD64MOVSDstore {
 42057  			break
 42058  		}
 42059  		if v_2.AuxInt != off {
 42060  			break
 42061  		}
 42062  		if v_2.Aux != sym {
 42063  			break
 42064  		}
 42065  		_ = v_2.Args[2]
 42066  		if ptr != v_2.Args[0] {
 42067  			break
 42068  		}
 42069  		y := v_2.Args[1]
 42070  		v.reset(OpAMD64XORQ)
 42071  		v.AddArg(x)
 42072  		v0 := b.NewValue0(v.Pos, OpAMD64MOVQf2i, typ.UInt64)
 42073  		v0.AddArg(y)
 42074  		v.AddArg(v0)
 42075  		return true
 42076  	}
 42077  	return false
 42078  }
 42079  func rewriteValueAMD64_OpAdd16_0(v *Value) bool {
 42080  	// match: (Add16 x y)
 42081  	// cond:
 42082  	// result: (ADDL x y)
 42083  	for {
 42084  		_ = v.Args[1]
 42085  		x := v.Args[0]
 42086  		y := v.Args[1]
 42087  		v.reset(OpAMD64ADDL)
 42088  		v.AddArg(x)
 42089  		v.AddArg(y)
 42090  		return true
 42091  	}
 42092  }
 42093  func rewriteValueAMD64_OpAdd32_0(v *Value) bool {
 42094  	// match: (Add32 x y)
 42095  	// cond:
 42096  	// result: (ADDL x y)
 42097  	for {
 42098  		_ = v.Args[1]
 42099  		x := v.Args[0]
 42100  		y := v.Args[1]
 42101  		v.reset(OpAMD64ADDL)
 42102  		v.AddArg(x)
 42103  		v.AddArg(y)
 42104  		return true
 42105  	}
 42106  }
 42107  func rewriteValueAMD64_OpAdd32F_0(v *Value) bool {
 42108  	// match: (Add32F x y)
 42109  	// cond:
 42110  	// result: (ADDSS x y)
 42111  	for {
 42112  		_ = v.Args[1]
 42113  		x := v.Args[0]
 42114  		y := v.Args[1]
 42115  		v.reset(OpAMD64ADDSS)
 42116  		v.AddArg(x)
 42117  		v.AddArg(y)
 42118  		return true
 42119  	}
 42120  }
 42121  func rewriteValueAMD64_OpAdd64_0(v *Value) bool {
 42122  	// match: (Add64 x y)
 42123  	// cond:
 42124  	// result: (ADDQ x y)
 42125  	for {
 42126  		_ = v.Args[1]
 42127  		x := v.Args[0]
 42128  		y := v.Args[1]
 42129  		v.reset(OpAMD64ADDQ)
 42130  		v.AddArg(x)
 42131  		v.AddArg(y)
 42132  		return true
 42133  	}
 42134  }
 42135  func rewriteValueAMD64_OpAdd64F_0(v *Value) bool {
 42136  	// match: (Add64F x y)
 42137  	// cond:
 42138  	// result: (ADDSD x y)
 42139  	for {
 42140  		_ = v.Args[1]
 42141  		x := v.Args[0]
 42142  		y := v.Args[1]
 42143  		v.reset(OpAMD64ADDSD)
 42144  		v.AddArg(x)
 42145  		v.AddArg(y)
 42146  		return true
 42147  	}
 42148  }
 42149  func rewriteValueAMD64_OpAdd8_0(v *Value) bool {
 42150  	// match: (Add8 x y)
 42151  	// cond:
 42152  	// result: (ADDL x y)
 42153  	for {
 42154  		_ = v.Args[1]
 42155  		x := v.Args[0]
 42156  		y := v.Args[1]
 42157  		v.reset(OpAMD64ADDL)
 42158  		v.AddArg(x)
 42159  		v.AddArg(y)
 42160  		return true
 42161  	}
 42162  }
 42163  func rewriteValueAMD64_OpAddPtr_0(v *Value) bool {
 42164  	b := v.Block
 42165  	_ = b
 42166  	config := b.Func.Config
 42167  	_ = config
 42168  	// match: (AddPtr x y)
 42169  	// cond: config.PtrSize == 8
 42170  	// result: (ADDQ x y)
 42171  	for {
 42172  		_ = v.Args[1]
 42173  		x := v.Args[0]
 42174  		y := v.Args[1]
 42175  		if !(config.PtrSize == 8) {
 42176  			break
 42177  		}
 42178  		v.reset(OpAMD64ADDQ)
 42179  		v.AddArg(x)
 42180  		v.AddArg(y)
 42181  		return true
 42182  	}
 42183  	// match: (AddPtr x y)
 42184  	// cond: config.PtrSize == 4
 42185  	// result: (ADDL x y)
 42186  	for {
 42187  		_ = v.Args[1]
 42188  		x := v.Args[0]
 42189  		y := v.Args[1]
 42190  		if !(config.PtrSize == 4) {
 42191  			break
 42192  		}
 42193  		v.reset(OpAMD64ADDL)
 42194  		v.AddArg(x)
 42195  		v.AddArg(y)
 42196  		return true
 42197  	}
 42198  	return false
 42199  }
 42200  func rewriteValueAMD64_OpAddr_0(v *Value) bool {
 42201  	b := v.Block
 42202  	_ = b
 42203  	config := b.Func.Config
 42204  	_ = config
 42205  	// match: (Addr {sym} base)
 42206  	// cond: config.PtrSize == 8
 42207  	// result: (LEAQ {sym} base)
 42208  	for {
 42209  		sym := v.Aux
 42210  		base := v.Args[0]
 42211  		if !(config.PtrSize == 8) {
 42212  			break
 42213  		}
 42214  		v.reset(OpAMD64LEAQ)
 42215  		v.Aux = sym
 42216  		v.AddArg(base)
 42217  		return true
 42218  	}
 42219  	// match: (Addr {sym} base)
 42220  	// cond: config.PtrSize == 4
 42221  	// result: (LEAL {sym} base)
 42222  	for {
 42223  		sym := v.Aux
 42224  		base := v.Args[0]
 42225  		if !(config.PtrSize == 4) {
 42226  			break
 42227  		}
 42228  		v.reset(OpAMD64LEAL)
 42229  		v.Aux = sym
 42230  		v.AddArg(base)
 42231  		return true
 42232  	}
 42233  	return false
 42234  }
 42235  func rewriteValueAMD64_OpAnd16_0(v *Value) bool {
 42236  	// match: (And16 x y)
 42237  	// cond:
 42238  	// result: (ANDL x y)
 42239  	for {
 42240  		_ = v.Args[1]
 42241  		x := v.Args[0]
 42242  		y := v.Args[1]
 42243  		v.reset(OpAMD64ANDL)
 42244  		v.AddArg(x)
 42245  		v.AddArg(y)
 42246  		return true
 42247  	}
 42248  }
 42249  func rewriteValueAMD64_OpAnd32_0(v *Value) bool {
 42250  	// match: (And32 x y)
 42251  	// cond:
 42252  	// result: (ANDL x y)
 42253  	for {
 42254  		_ = v.Args[1]
 42255  		x := v.Args[0]
 42256  		y := v.Args[1]
 42257  		v.reset(OpAMD64ANDL)
 42258  		v.AddArg(x)
 42259  		v.AddArg(y)
 42260  		return true
 42261  	}
 42262  }
 42263  func rewriteValueAMD64_OpAnd64_0(v *Value) bool {
 42264  	// match: (And64 x y)
 42265  	// cond:
 42266  	// result: (ANDQ x y)
 42267  	for {
 42268  		_ = v.Args[1]
 42269  		x := v.Args[0]
 42270  		y := v.Args[1]
 42271  		v.reset(OpAMD64ANDQ)
 42272  		v.AddArg(x)
 42273  		v.AddArg(y)
 42274  		return true
 42275  	}
 42276  }
 42277  func rewriteValueAMD64_OpAnd8_0(v *Value) bool {
 42278  	// match: (And8 x y)
 42279  	// cond:
 42280  	// result: (ANDL x y)
 42281  	for {
 42282  		_ = v.Args[1]
 42283  		x := v.Args[0]
 42284  		y := v.Args[1]
 42285  		v.reset(OpAMD64ANDL)
 42286  		v.AddArg(x)
 42287  		v.AddArg(y)
 42288  		return true
 42289  	}
 42290  }
 42291  func rewriteValueAMD64_OpAndB_0(v *Value) bool {
 42292  	// match: (AndB x y)
 42293  	// cond:
 42294  	// result: (ANDL x y)
 42295  	for {
 42296  		_ = v.Args[1]
 42297  		x := v.Args[0]
 42298  		y := v.Args[1]
 42299  		v.reset(OpAMD64ANDL)
 42300  		v.AddArg(x)
 42301  		v.AddArg(y)
 42302  		return true
 42303  	}
 42304  }
 42305  func rewriteValueAMD64_OpAtomicAdd32_0(v *Value) bool {
 42306  	b := v.Block
 42307  	_ = b
 42308  	typ := &b.Func.Config.Types
 42309  	_ = typ
 42310  	// match: (AtomicAdd32 ptr val mem)
 42311  	// cond:
 42312  	// result: (AddTupleFirst32 val (XADDLlock val ptr mem))
 42313  	for {
 42314  		_ = v.Args[2]
 42315  		ptr := v.Args[0]
 42316  		val := v.Args[1]
 42317  		mem := v.Args[2]
 42318  		v.reset(OpAMD64AddTupleFirst32)
 42319  		v.AddArg(val)
 42320  		v0 := b.NewValue0(v.Pos, OpAMD64XADDLlock, types.NewTuple(typ.UInt32, types.TypeMem))
 42321  		v0.AddArg(val)
 42322  		v0.AddArg(ptr)
 42323  		v0.AddArg(mem)
 42324  		v.AddArg(v0)
 42325  		return true
 42326  	}
 42327  }
 42328  func rewriteValueAMD64_OpAtomicAdd64_0(v *Value) bool {
 42329  	b := v.Block
 42330  	_ = b
 42331  	typ := &b.Func.Config.Types
 42332  	_ = typ
 42333  	// match: (AtomicAdd64 ptr val mem)
 42334  	// cond:
 42335  	// result: (AddTupleFirst64 val (XADDQlock val ptr mem))
 42336  	for {
 42337  		_ = v.Args[2]
 42338  		ptr := v.Args[0]
 42339  		val := v.Args[1]
 42340  		mem := v.Args[2]
 42341  		v.reset(OpAMD64AddTupleFirst64)
 42342  		v.AddArg(val)
 42343  		v0 := b.NewValue0(v.Pos, OpAMD64XADDQlock, types.NewTuple(typ.UInt64, types.TypeMem))
 42344  		v0.AddArg(val)
 42345  		v0.AddArg(ptr)
 42346  		v0.AddArg(mem)
 42347  		v.AddArg(v0)
 42348  		return true
 42349  	}
 42350  }
 42351  func rewriteValueAMD64_OpAtomicAnd8_0(v *Value) bool {
 42352  	// match: (AtomicAnd8 ptr val mem)
 42353  	// cond:
 42354  	// result: (ANDBlock ptr val mem)
 42355  	for {
 42356  		_ = v.Args[2]
 42357  		ptr := v.Args[0]
 42358  		val := v.Args[1]
 42359  		mem := v.Args[2]
 42360  		v.reset(OpAMD64ANDBlock)
 42361  		v.AddArg(ptr)
 42362  		v.AddArg(val)
 42363  		v.AddArg(mem)
 42364  		return true
 42365  	}
 42366  }
 42367  func rewriteValueAMD64_OpAtomicCompareAndSwap32_0(v *Value) bool {
 42368  	// match: (AtomicCompareAndSwap32 ptr old new_ mem)
 42369  	// cond:
 42370  	// result: (CMPXCHGLlock ptr old new_ mem)
 42371  	for {
 42372  		_ = v.Args[3]
 42373  		ptr := v.Args[0]
 42374  		old := v.Args[1]
 42375  		new_ := v.Args[2]
 42376  		mem := v.Args[3]
 42377  		v.reset(OpAMD64CMPXCHGLlock)
 42378  		v.AddArg(ptr)
 42379  		v.AddArg(old)
 42380  		v.AddArg(new_)
 42381  		v.AddArg(mem)
 42382  		return true
 42383  	}
 42384  }
 42385  func rewriteValueAMD64_OpAtomicCompareAndSwap64_0(v *Value) bool {
 42386  	// match: (AtomicCompareAndSwap64 ptr old new_ mem)
 42387  	// cond:
 42388  	// result: (CMPXCHGQlock ptr old new_ mem)
 42389  	for {
 42390  		_ = v.Args[3]
 42391  		ptr := v.Args[0]
 42392  		old := v.Args[1]
 42393  		new_ := v.Args[2]
 42394  		mem := v.Args[3]
 42395  		v.reset(OpAMD64CMPXCHGQlock)
 42396  		v.AddArg(ptr)
 42397  		v.AddArg(old)
 42398  		v.AddArg(new_)
 42399  		v.AddArg(mem)
 42400  		return true
 42401  	}
 42402  }
 42403  func rewriteValueAMD64_OpAtomicExchange32_0(v *Value) bool {
 42404  	// match: (AtomicExchange32 ptr val mem)
 42405  	// cond:
 42406  	// result: (XCHGL val ptr mem)
 42407  	for {
 42408  		_ = v.Args[2]
 42409  		ptr := v.Args[0]
 42410  		val := v.Args[1]
 42411  		mem := v.Args[2]
 42412  		v.reset(OpAMD64XCHGL)
 42413  		v.AddArg(val)
 42414  		v.AddArg(ptr)
 42415  		v.AddArg(mem)
 42416  		return true
 42417  	}
 42418  }
 42419  func rewriteValueAMD64_OpAtomicExchange64_0(v *Value) bool {
 42420  	// match: (AtomicExchange64 ptr val mem)
 42421  	// cond:
 42422  	// result: (XCHGQ val ptr mem)
 42423  	for {
 42424  		_ = v.Args[2]
 42425  		ptr := v.Args[0]
 42426  		val := v.Args[1]
 42427  		mem := v.Args[2]
 42428  		v.reset(OpAMD64XCHGQ)
 42429  		v.AddArg(val)
 42430  		v.AddArg(ptr)
 42431  		v.AddArg(mem)
 42432  		return true
 42433  	}
 42434  }
 42435  func rewriteValueAMD64_OpAtomicLoad32_0(v *Value) bool {
 42436  	// match: (AtomicLoad32 ptr mem)
 42437  	// cond:
 42438  	// result: (MOVLatomicload ptr mem)
 42439  	for {
 42440  		_ = v.Args[1]
 42441  		ptr := v.Args[0]
 42442  		mem := v.Args[1]
 42443  		v.reset(OpAMD64MOVLatomicload)
 42444  		v.AddArg(ptr)
 42445  		v.AddArg(mem)
 42446  		return true
 42447  	}
 42448  }
 42449  func rewriteValueAMD64_OpAtomicLoad64_0(v *Value) bool {
 42450  	// match: (AtomicLoad64 ptr mem)
 42451  	// cond:
 42452  	// result: (MOVQatomicload ptr mem)
 42453  	for {
 42454  		_ = v.Args[1]
 42455  		ptr := v.Args[0]
 42456  		mem := v.Args[1]
 42457  		v.reset(OpAMD64MOVQatomicload)
 42458  		v.AddArg(ptr)
 42459  		v.AddArg(mem)
 42460  		return true
 42461  	}
 42462  }
 42463  func rewriteValueAMD64_OpAtomicLoadPtr_0(v *Value) bool {
 42464  	b := v.Block
 42465  	_ = b
 42466  	config := b.Func.Config
 42467  	_ = config
 42468  	// match: (AtomicLoadPtr ptr mem)
 42469  	// cond: config.PtrSize == 8
 42470  	// result: (MOVQatomicload ptr mem)
 42471  	for {
 42472  		_ = v.Args[1]
 42473  		ptr := v.Args[0]
 42474  		mem := v.Args[1]
 42475  		if !(config.PtrSize == 8) {
 42476  			break
 42477  		}
 42478  		v.reset(OpAMD64MOVQatomicload)
 42479  		v.AddArg(ptr)
 42480  		v.AddArg(mem)
 42481  		return true
 42482  	}
 42483  	// match: (AtomicLoadPtr ptr mem)
 42484  	// cond: config.PtrSize == 4
 42485  	// result: (MOVLatomicload ptr mem)
 42486  	for {
 42487  		_ = v.Args[1]
 42488  		ptr := v.Args[0]
 42489  		mem := v.Args[1]
 42490  		if !(config.PtrSize == 4) {
 42491  			break
 42492  		}
 42493  		v.reset(OpAMD64MOVLatomicload)
 42494  		v.AddArg(ptr)
 42495  		v.AddArg(mem)
 42496  		return true
 42497  	}
 42498  	return false
 42499  }
 42500  func rewriteValueAMD64_OpAtomicOr8_0(v *Value) bool {
 42501  	// match: (AtomicOr8 ptr val mem)
 42502  	// cond:
 42503  	// result: (ORBlock ptr val mem)
 42504  	for {
 42505  		_ = v.Args[2]
 42506  		ptr := v.Args[0]
 42507  		val := v.Args[1]
 42508  		mem := v.Args[2]
 42509  		v.reset(OpAMD64ORBlock)
 42510  		v.AddArg(ptr)
 42511  		v.AddArg(val)
 42512  		v.AddArg(mem)
 42513  		return true
 42514  	}
 42515  }
 42516  func rewriteValueAMD64_OpAtomicStore32_0(v *Value) bool {
 42517  	b := v.Block
 42518  	_ = b
 42519  	typ := &b.Func.Config.Types
 42520  	_ = typ
 42521  	// match: (AtomicStore32 ptr val mem)
 42522  	// cond:
 42523  	// result: (Select1 (XCHGL <types.NewTuple(typ.UInt32,types.TypeMem)> val ptr mem))
 42524  	for {
 42525  		_ = v.Args[2]
 42526  		ptr := v.Args[0]
 42527  		val := v.Args[1]
 42528  		mem := v.Args[2]
 42529  		v.reset(OpSelect1)
 42530  		v0 := b.NewValue0(v.Pos, OpAMD64XCHGL, types.NewTuple(typ.UInt32, types.TypeMem))
 42531  		v0.AddArg(val)
 42532  		v0.AddArg(ptr)
 42533  		v0.AddArg(mem)
 42534  		v.AddArg(v0)
 42535  		return true
 42536  	}
 42537  }
 42538  func rewriteValueAMD64_OpAtomicStore64_0(v *Value) bool {
 42539  	b := v.Block
 42540  	_ = b
 42541  	typ := &b.Func.Config.Types
 42542  	_ = typ
 42543  	// match: (AtomicStore64 ptr val mem)
 42544  	// cond:
 42545  	// result: (Select1 (XCHGQ <types.NewTuple(typ.UInt64,types.TypeMem)> val ptr mem))
 42546  	for {
 42547  		_ = v.Args[2]
 42548  		ptr := v.Args[0]
 42549  		val := v.Args[1]
 42550  		mem := v.Args[2]
 42551  		v.reset(OpSelect1)
 42552  		v0 := b.NewValue0(v.Pos, OpAMD64XCHGQ, types.NewTuple(typ.UInt64, types.TypeMem))
 42553  		v0.AddArg(val)
 42554  		v0.AddArg(ptr)
 42555  		v0.AddArg(mem)
 42556  		v.AddArg(v0)
 42557  		return true
 42558  	}
 42559  }
 42560  func rewriteValueAMD64_OpAtomicStorePtrNoWB_0(v *Value) bool {
 42561  	b := v.Block
 42562  	_ = b
 42563  	config := b.Func.Config
 42564  	_ = config
 42565  	typ := &b.Func.Config.Types
 42566  	_ = typ
 42567  	// match: (AtomicStorePtrNoWB ptr val mem)
 42568  	// cond: config.PtrSize == 8
 42569  	// result: (Select1 (XCHGQ <types.NewTuple(typ.BytePtr,types.TypeMem)> val ptr mem))
 42570  	for {
 42571  		_ = v.Args[2]
 42572  		ptr := v.Args[0]
 42573  		val := v.Args[1]
 42574  		mem := v.Args[2]
 42575  		if !(config.PtrSize == 8) {
 42576  			break
 42577  		}
 42578  		v.reset(OpSelect1)
 42579  		v0 := b.NewValue0(v.Pos, OpAMD64XCHGQ, types.NewTuple(typ.BytePtr, types.TypeMem))
 42580  		v0.AddArg(val)
 42581  		v0.AddArg(ptr)
 42582  		v0.AddArg(mem)
 42583  		v.AddArg(v0)
 42584  		return true
 42585  	}
 42586  	// match: (AtomicStorePtrNoWB ptr val mem)
 42587  	// cond: config.PtrSize == 4
 42588  	// result: (Select1 (XCHGL <types.NewTuple(typ.BytePtr,types.TypeMem)> val ptr mem))
 42589  	for {
 42590  		_ = v.Args[2]
 42591  		ptr := v.Args[0]
 42592  		val := v.Args[1]
 42593  		mem := v.Args[2]
 42594  		if !(config.PtrSize == 4) {
 42595  			break
 42596  		}
 42597  		v.reset(OpSelect1)
 42598  		v0 := b.NewValue0(v.Pos, OpAMD64XCHGL, types.NewTuple(typ.BytePtr, types.TypeMem))
 42599  		v0.AddArg(val)
 42600  		v0.AddArg(ptr)
 42601  		v0.AddArg(mem)
 42602  		v.AddArg(v0)
 42603  		return true
 42604  	}
 42605  	return false
 42606  }
 42607  func rewriteValueAMD64_OpAvg64u_0(v *Value) bool {
 42608  	// match: (Avg64u x y)
 42609  	// cond:
 42610  	// result: (AVGQU x y)
 42611  	for {
 42612  		_ = v.Args[1]
 42613  		x := v.Args[0]
 42614  		y := v.Args[1]
 42615  		v.reset(OpAMD64AVGQU)
 42616  		v.AddArg(x)
 42617  		v.AddArg(y)
 42618  		return true
 42619  	}
 42620  }
 42621  func rewriteValueAMD64_OpBitLen32_0(v *Value) bool {
 42622  	b := v.Block
 42623  	_ = b
 42624  	typ := &b.Func.Config.Types
 42625  	_ = typ
 42626  	// match: (BitLen32 x)
 42627  	// cond:
 42628  	// result: (BitLen64 (MOVLQZX <typ.UInt64> x))
 42629  	for {
 42630  		x := v.Args[0]
 42631  		v.reset(OpBitLen64)
 42632  		v0 := b.NewValue0(v.Pos, OpAMD64MOVLQZX, typ.UInt64)
 42633  		v0.AddArg(x)
 42634  		v.AddArg(v0)
 42635  		return true
 42636  	}
 42637  }
 42638  func rewriteValueAMD64_OpBitLen64_0(v *Value) bool {
 42639  	b := v.Block
 42640  	_ = b
 42641  	typ := &b.Func.Config.Types
 42642  	_ = typ
 42643  	// match: (BitLen64 <t> x)
 42644  	// cond:
 42645  	// result: (ADDQconst [1] (CMOVQEQ <t> (Select0 <t> (BSRQ x)) (MOVQconst <t> [-1]) (Select1 <types.TypeFlags> (BSRQ x))))
 42646  	for {
 42647  		t := v.Type
 42648  		x := v.Args[0]
 42649  		v.reset(OpAMD64ADDQconst)
 42650  		v.AuxInt = 1
 42651  		v0 := b.NewValue0(v.Pos, OpAMD64CMOVQEQ, t)
 42652  		v1 := b.NewValue0(v.Pos, OpSelect0, t)
 42653  		v2 := b.NewValue0(v.Pos, OpAMD64BSRQ, types.NewTuple(typ.UInt64, types.TypeFlags))
 42654  		v2.AddArg(x)
 42655  		v1.AddArg(v2)
 42656  		v0.AddArg(v1)
 42657  		v3 := b.NewValue0(v.Pos, OpAMD64MOVQconst, t)
 42658  		v3.AuxInt = -1
 42659  		v0.AddArg(v3)
 42660  		v4 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags)
 42661  		v5 := b.NewValue0(v.Pos, OpAMD64BSRQ, types.NewTuple(typ.UInt64, types.TypeFlags))
 42662  		v5.AddArg(x)
 42663  		v4.AddArg(v5)
 42664  		v0.AddArg(v4)
 42665  		v.AddArg(v0)
 42666  		return true
 42667  	}
 42668  }
 42669  func rewriteValueAMD64_OpBswap32_0(v *Value) bool {
 42670  	// match: (Bswap32 x)
 42671  	// cond:
 42672  	// result: (BSWAPL x)
 42673  	for {
 42674  		x := v.Args[0]
 42675  		v.reset(OpAMD64BSWAPL)
 42676  		v.AddArg(x)
 42677  		return true
 42678  	}
 42679  }
 42680  func rewriteValueAMD64_OpBswap64_0(v *Value) bool {
 42681  	// match: (Bswap64 x)
 42682  	// cond:
 42683  	// result: (BSWAPQ x)
 42684  	for {
 42685  		x := v.Args[0]
 42686  		v.reset(OpAMD64BSWAPQ)
 42687  		v.AddArg(x)
 42688  		return true
 42689  	}
 42690  }
 42691  func rewriteValueAMD64_OpCeil_0(v *Value) bool {
 42692  	// match: (Ceil x)
 42693  	// cond:
 42694  	// result: (ROUNDSD [2] x)
 42695  	for {
 42696  		x := v.Args[0]
 42697  		v.reset(OpAMD64ROUNDSD)
 42698  		v.AuxInt = 2
 42699  		v.AddArg(x)
 42700  		return true
 42701  	}
 42702  }
 42703  func rewriteValueAMD64_OpClosureCall_0(v *Value) bool {
 42704  	// match: (ClosureCall [argwid] entry closure mem)
 42705  	// cond:
 42706  	// result: (CALLclosure [argwid] entry closure mem)
 42707  	for {
 42708  		argwid := v.AuxInt
 42709  		_ = v.Args[2]
 42710  		entry := v.Args[0]
 42711  		closure := v.Args[1]
 42712  		mem := v.Args[2]
 42713  		v.reset(OpAMD64CALLclosure)
 42714  		v.AuxInt = argwid
 42715  		v.AddArg(entry)
 42716  		v.AddArg(closure)
 42717  		v.AddArg(mem)
 42718  		return true
 42719  	}
 42720  }
 42721  func rewriteValueAMD64_OpCom16_0(v *Value) bool {
 42722  	// match: (Com16 x)
 42723  	// cond:
 42724  	// result: (NOTL x)
 42725  	for {
 42726  		x := v.Args[0]
 42727  		v.reset(OpAMD64NOTL)
 42728  		v.AddArg(x)
 42729  		return true
 42730  	}
 42731  }
 42732  func rewriteValueAMD64_OpCom32_0(v *Value) bool {
 42733  	// match: (Com32 x)
 42734  	// cond:
 42735  	// result: (NOTL x)
 42736  	for {
 42737  		x := v.Args[0]
 42738  		v.reset(OpAMD64NOTL)
 42739  		v.AddArg(x)
 42740  		return true
 42741  	}
 42742  }
 42743  func rewriteValueAMD64_OpCom64_0(v *Value) bool {
 42744  	// match: (Com64 x)
 42745  	// cond:
 42746  	// result: (NOTQ x)
 42747  	for {
 42748  		x := v.Args[0]
 42749  		v.reset(OpAMD64NOTQ)
 42750  		v.AddArg(x)
 42751  		return true
 42752  	}
 42753  }
 42754  func rewriteValueAMD64_OpCom8_0(v *Value) bool {
 42755  	// match: (Com8 x)
 42756  	// cond:
 42757  	// result: (NOTL x)
 42758  	for {
 42759  		x := v.Args[0]
 42760  		v.reset(OpAMD64NOTL)
 42761  		v.AddArg(x)
 42762  		return true
 42763  	}
 42764  }
 42765  func rewriteValueAMD64_OpConst16_0(v *Value) bool {
 42766  	// match: (Const16 [val])
 42767  	// cond:
 42768  	// result: (MOVLconst [val])
 42769  	for {
 42770  		val := v.AuxInt
 42771  		v.reset(OpAMD64MOVLconst)
 42772  		v.AuxInt = val
 42773  		return true
 42774  	}
 42775  }
 42776  func rewriteValueAMD64_OpConst32_0(v *Value) bool {
 42777  	// match: (Const32 [val])
 42778  	// cond:
 42779  	// result: (MOVLconst [val])
 42780  	for {
 42781  		val := v.AuxInt
 42782  		v.reset(OpAMD64MOVLconst)
 42783  		v.AuxInt = val
 42784  		return true
 42785  	}
 42786  }
 42787  func rewriteValueAMD64_OpConst32F_0(v *Value) bool {
 42788  	// match: (Const32F [val])
 42789  	// cond:
 42790  	// result: (MOVSSconst [val])
 42791  	for {
 42792  		val := v.AuxInt
 42793  		v.reset(OpAMD64MOVSSconst)
 42794  		v.AuxInt = val
 42795  		return true
 42796  	}
 42797  }
 42798  func rewriteValueAMD64_OpConst64_0(v *Value) bool {
 42799  	// match: (Const64 [val])
 42800  	// cond:
 42801  	// result: (MOVQconst [val])
 42802  	for {
 42803  		val := v.AuxInt
 42804  		v.reset(OpAMD64MOVQconst)
 42805  		v.AuxInt = val
 42806  		return true
 42807  	}
 42808  }
 42809  func rewriteValueAMD64_OpConst64F_0(v *Value) bool {
 42810  	// match: (Const64F [val])
 42811  	// cond:
 42812  	// result: (MOVSDconst [val])
 42813  	for {
 42814  		val := v.AuxInt
 42815  		v.reset(OpAMD64MOVSDconst)
 42816  		v.AuxInt = val
 42817  		return true
 42818  	}
 42819  }
 42820  func rewriteValueAMD64_OpConst8_0(v *Value) bool {
 42821  	// match: (Const8 [val])
 42822  	// cond:
 42823  	// result: (MOVLconst [val])
 42824  	for {
 42825  		val := v.AuxInt
 42826  		v.reset(OpAMD64MOVLconst)
 42827  		v.AuxInt = val
 42828  		return true
 42829  	}
 42830  }
 42831  func rewriteValueAMD64_OpConstBool_0(v *Value) bool {
 42832  	// match: (ConstBool [b])
 42833  	// cond:
 42834  	// result: (MOVLconst [b])
 42835  	for {
 42836  		b := v.AuxInt
 42837  		v.reset(OpAMD64MOVLconst)
 42838  		v.AuxInt = b
 42839  		return true
 42840  	}
 42841  }
 42842  func rewriteValueAMD64_OpConstNil_0(v *Value) bool {
 42843  	b := v.Block
 42844  	_ = b
 42845  	config := b.Func.Config
 42846  	_ = config
 42847  	// match: (ConstNil)
 42848  	// cond: config.PtrSize == 8
 42849  	// result: (MOVQconst [0])
 42850  	for {
 42851  		if !(config.PtrSize == 8) {
 42852  			break
 42853  		}
 42854  		v.reset(OpAMD64MOVQconst)
 42855  		v.AuxInt = 0
 42856  		return true
 42857  	}
 42858  	// match: (ConstNil)
 42859  	// cond: config.PtrSize == 4
 42860  	// result: (MOVLconst [0])
 42861  	for {
 42862  		if !(config.PtrSize == 4) {
 42863  			break
 42864  		}
 42865  		v.reset(OpAMD64MOVLconst)
 42866  		v.AuxInt = 0
 42867  		return true
 42868  	}
 42869  	return false
 42870  }
 42871  func rewriteValueAMD64_OpConvert_0(v *Value) bool {
 42872  	b := v.Block
 42873  	_ = b
 42874  	config := b.Func.Config
 42875  	_ = config
 42876  	// match: (Convert <t> x mem)
 42877  	// cond: config.PtrSize == 8
 42878  	// result: (MOVQconvert <t> x mem)
 42879  	for {
 42880  		t := v.Type
 42881  		_ = v.Args[1]
 42882  		x := v.Args[0]
 42883  		mem := v.Args[1]
 42884  		if !(config.PtrSize == 8) {
 42885  			break
 42886  		}
 42887  		v.reset(OpAMD64MOVQconvert)
 42888  		v.Type = t
 42889  		v.AddArg(x)
 42890  		v.AddArg(mem)
 42891  		return true
 42892  	}
 42893  	// match: (Convert <t> x mem)
 42894  	// cond: config.PtrSize == 4
 42895  	// result: (MOVLconvert <t> x mem)
 42896  	for {
 42897  		t := v.Type
 42898  		_ = v.Args[1]
 42899  		x := v.Args[0]
 42900  		mem := v.Args[1]
 42901  		if !(config.PtrSize == 4) {
 42902  			break
 42903  		}
 42904  		v.reset(OpAMD64MOVLconvert)
 42905  		v.Type = t
 42906  		v.AddArg(x)
 42907  		v.AddArg(mem)
 42908  		return true
 42909  	}
 42910  	return false
 42911  }
 42912  func rewriteValueAMD64_OpCtz32_0(v *Value) bool {
 42913  	b := v.Block
 42914  	_ = b
 42915  	typ := &b.Func.Config.Types
 42916  	_ = typ
 42917  	// match: (Ctz32 x)
 42918  	// cond:
 42919  	// result: (Select0 (BSFQ (ORQ <typ.UInt64> (MOVQconst [1<<32]) x)))
 42920  	for {
 42921  		x := v.Args[0]
 42922  		v.reset(OpSelect0)
 42923  		v0 := b.NewValue0(v.Pos, OpAMD64BSFQ, types.NewTuple(typ.UInt64, types.TypeFlags))
 42924  		v1 := b.NewValue0(v.Pos, OpAMD64ORQ, typ.UInt64)
 42925  		v2 := b.NewValue0(v.Pos, OpAMD64MOVQconst, typ.UInt64)
 42926  		v2.AuxInt = 1 << 32
 42927  		v1.AddArg(v2)
 42928  		v1.AddArg(x)
 42929  		v0.AddArg(v1)
 42930  		v.AddArg(v0)
 42931  		return true
 42932  	}
 42933  }
 42934  func rewriteValueAMD64_OpCtz64_0(v *Value) bool {
 42935  	b := v.Block
 42936  	_ = b
 42937  	typ := &b.Func.Config.Types
 42938  	_ = typ
 42939  	// match: (Ctz64 <t> x)
 42940  	// cond:
 42941  	// result: (CMOVQEQ (Select0 <t> (BSFQ x)) (MOVQconst <t> [64]) (Select1 <types.TypeFlags> (BSFQ x)))
 42942  	for {
 42943  		t := v.Type
 42944  		x := v.Args[0]
 42945  		v.reset(OpAMD64CMOVQEQ)
 42946  		v0 := b.NewValue0(v.Pos, OpSelect0, t)
 42947  		v1 := b.NewValue0(v.Pos, OpAMD64BSFQ, types.NewTuple(typ.UInt64, types.TypeFlags))
 42948  		v1.AddArg(x)
 42949  		v0.AddArg(v1)
 42950  		v.AddArg(v0)
 42951  		v2 := b.NewValue0(v.Pos, OpAMD64MOVQconst, t)
 42952  		v2.AuxInt = 64
 42953  		v.AddArg(v2)
 42954  		v3 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags)
 42955  		v4 := b.NewValue0(v.Pos, OpAMD64BSFQ, types.NewTuple(typ.UInt64, types.TypeFlags))
 42956  		v4.AddArg(x)
 42957  		v3.AddArg(v4)
 42958  		v.AddArg(v3)
 42959  		return true
 42960  	}
 42961  }
 42962  func rewriteValueAMD64_OpCvt32Fto32_0(v *Value) bool {
 42963  	// match: (Cvt32Fto32 x)
 42964  	// cond:
 42965  	// result: (CVTTSS2SL x)
 42966  	for {
 42967  		x := v.Args[0]
 42968  		v.reset(OpAMD64CVTTSS2SL)
 42969  		v.AddArg(x)
 42970  		return true
 42971  	}
 42972  }
 42973  func rewriteValueAMD64_OpCvt32Fto64_0(v *Value) bool {
 42974  	// match: (Cvt32Fto64 x)
 42975  	// cond:
 42976  	// result: (CVTTSS2SQ x)
 42977  	for {
 42978  		x := v.Args[0]
 42979  		v.reset(OpAMD64CVTTSS2SQ)
 42980  		v.AddArg(x)
 42981  		return true
 42982  	}
 42983  }
 42984  func rewriteValueAMD64_OpCvt32Fto64F_0(v *Value) bool {
 42985  	// match: (Cvt32Fto64F x)
 42986  	// cond:
 42987  	// result: (CVTSS2SD x)
 42988  	for {
 42989  		x := v.Args[0]
 42990  		v.reset(OpAMD64CVTSS2SD)
 42991  		v.AddArg(x)
 42992  		return true
 42993  	}
 42994  }
 42995  func rewriteValueAMD64_OpCvt32to32F_0(v *Value) bool {
 42996  	// match: (Cvt32to32F x)
 42997  	// cond:
 42998  	// result: (CVTSL2SS x)
 42999  	for {
 43000  		x := v.Args[0]
 43001  		v.reset(OpAMD64CVTSL2SS)
 43002  		v.AddArg(x)
 43003  		return true
 43004  	}
 43005  }
 43006  func rewriteValueAMD64_OpCvt32to64F_0(v *Value) bool {
 43007  	// match: (Cvt32to64F x)
 43008  	// cond:
 43009  	// result: (CVTSL2SD x)
 43010  	for {
 43011  		x := v.Args[0]
 43012  		v.reset(OpAMD64CVTSL2SD)
 43013  		v.AddArg(x)
 43014  		return true
 43015  	}
 43016  }
 43017  func rewriteValueAMD64_OpCvt64Fto32_0(v *Value) bool {
 43018  	// match: (Cvt64Fto32 x)
 43019  	// cond:
 43020  	// result: (CVTTSD2SL x)
 43021  	for {
 43022  		x := v.Args[0]
 43023  		v.reset(OpAMD64CVTTSD2SL)
 43024  		v.AddArg(x)
 43025  		return true
 43026  	}
 43027  }
 43028  func rewriteValueAMD64_OpCvt64Fto32F_0(v *Value) bool {
 43029  	// match: (Cvt64Fto32F x)
 43030  	// cond:
 43031  	// result: (CVTSD2SS x)
 43032  	for {
 43033  		x := v.Args[0]
 43034  		v.reset(OpAMD64CVTSD2SS)
 43035  		v.AddArg(x)
 43036  		return true
 43037  	}
 43038  }
 43039  func rewriteValueAMD64_OpCvt64Fto64_0(v *Value) bool {
 43040  	// match: (Cvt64Fto64 x)
 43041  	// cond:
 43042  	// result: (CVTTSD2SQ x)
 43043  	for {
 43044  		x := v.Args[0]
 43045  		v.reset(OpAMD64CVTTSD2SQ)
 43046  		v.AddArg(x)
 43047  		return true
 43048  	}
 43049  }
 43050  func rewriteValueAMD64_OpCvt64to32F_0(v *Value) bool {
 43051  	// match: (Cvt64to32F x)
 43052  	// cond:
 43053  	// result: (CVTSQ2SS x)
 43054  	for {
 43055  		x := v.Args[0]
 43056  		v.reset(OpAMD64CVTSQ2SS)
 43057  		v.AddArg(x)
 43058  		return true
 43059  	}
 43060  }
 43061  func rewriteValueAMD64_OpCvt64to64F_0(v *Value) bool {
 43062  	// match: (Cvt64to64F x)
 43063  	// cond:
 43064  	// result: (CVTSQ2SD x)
 43065  	for {
 43066  		x := v.Args[0]
 43067  		v.reset(OpAMD64CVTSQ2SD)
 43068  		v.AddArg(x)
 43069  		return true
 43070  	}
 43071  }
 43072  func rewriteValueAMD64_OpDiv128u_0(v *Value) bool {
 43073  	// match: (Div128u xhi xlo y)
 43074  	// cond:
 43075  	// result: (DIVQU2 xhi xlo y)
 43076  	for {
 43077  		_ = v.Args[2]
 43078  		xhi := v.Args[0]
 43079  		xlo := v.Args[1]
 43080  		y := v.Args[2]
 43081  		v.reset(OpAMD64DIVQU2)
 43082  		v.AddArg(xhi)
 43083  		v.AddArg(xlo)
 43084  		v.AddArg(y)
 43085  		return true
 43086  	}
 43087  }
 43088  func rewriteValueAMD64_OpDiv16_0(v *Value) bool {
 43089  	b := v.Block
 43090  	_ = b
 43091  	typ := &b.Func.Config.Types
 43092  	_ = typ
 43093  	// match: (Div16 x y)
 43094  	// cond:
 43095  	// result: (Select0 (DIVW x y))
 43096  	for {
 43097  		_ = v.Args[1]
 43098  		x := v.Args[0]
 43099  		y := v.Args[1]
 43100  		v.reset(OpSelect0)
 43101  		v0 := b.NewValue0(v.Pos, OpAMD64DIVW, types.NewTuple(typ.Int16, typ.Int16))
 43102  		v0.AddArg(x)
 43103  		v0.AddArg(y)
 43104  		v.AddArg(v0)
 43105  		return true
 43106  	}
 43107  }
 43108  func rewriteValueAMD64_OpDiv16u_0(v *Value) bool {
 43109  	b := v.Block
 43110  	_ = b
 43111  	typ := &b.Func.Config.Types
 43112  	_ = typ
 43113  	// match: (Div16u x y)
 43114  	// cond:
 43115  	// result: (Select0 (DIVWU x y))
 43116  	for {
 43117  		_ = v.Args[1]
 43118  		x := v.Args[0]
 43119  		y := v.Args[1]
 43120  		v.reset(OpSelect0)
 43121  		v0 := b.NewValue0(v.Pos, OpAMD64DIVWU, types.NewTuple(typ.UInt16, typ.UInt16))
 43122  		v0.AddArg(x)
 43123  		v0.AddArg(y)
 43124  		v.AddArg(v0)
 43125  		return true
 43126  	}
 43127  }
 43128  func rewriteValueAMD64_OpDiv32_0(v *Value) bool {
 43129  	b := v.Block
 43130  	_ = b
 43131  	typ := &b.Func.Config.Types
 43132  	_ = typ
 43133  	// match: (Div32 x y)
 43134  	// cond:
 43135  	// result: (Select0 (DIVL x y))
 43136  	for {
 43137  		_ = v.Args[1]
 43138  		x := v.Args[0]
 43139  		y := v.Args[1]
 43140  		v.reset(OpSelect0)
 43141  		v0 := b.NewValue0(v.Pos, OpAMD64DIVL, types.NewTuple(typ.Int32, typ.Int32))
 43142  		v0.AddArg(x)
 43143  		v0.AddArg(y)
 43144  		v.AddArg(v0)
 43145  		return true
 43146  	}
 43147  }
 43148  func rewriteValueAMD64_OpDiv32F_0(v *Value) bool {
 43149  	// match: (Div32F x y)
 43150  	// cond:
 43151  	// result: (DIVSS x y)
 43152  	for {
 43153  		_ = v.Args[1]
 43154  		x := v.Args[0]
 43155  		y := v.Args[1]
 43156  		v.reset(OpAMD64DIVSS)
 43157  		v.AddArg(x)
 43158  		v.AddArg(y)
 43159  		return true
 43160  	}
 43161  }
 43162  func rewriteValueAMD64_OpDiv32u_0(v *Value) bool {
 43163  	b := v.Block
 43164  	_ = b
 43165  	typ := &b.Func.Config.Types
 43166  	_ = typ
 43167  	// match: (Div32u x y)
 43168  	// cond:
 43169  	// result: (Select0 (DIVLU x y))
 43170  	for {
 43171  		_ = v.Args[1]
 43172  		x := v.Args[0]
 43173  		y := v.Args[1]
 43174  		v.reset(OpSelect0)
 43175  		v0 := b.NewValue0(v.Pos, OpAMD64DIVLU, types.NewTuple(typ.UInt32, typ.UInt32))
 43176  		v0.AddArg(x)
 43177  		v0.AddArg(y)
 43178  		v.AddArg(v0)
 43179  		return true
 43180  	}
 43181  }
 43182  func rewriteValueAMD64_OpDiv64_0(v *Value) bool {
 43183  	b := v.Block
 43184  	_ = b
 43185  	typ := &b.Func.Config.Types
 43186  	_ = typ
 43187  	// match: (Div64 x y)
 43188  	// cond:
 43189  	// result: (Select0 (DIVQ x y))
 43190  	for {
 43191  		_ = v.Args[1]
 43192  		x := v.Args[0]
 43193  		y := v.Args[1]
 43194  		v.reset(OpSelect0)
 43195  		v0 := b.NewValue0(v.Pos, OpAMD64DIVQ, types.NewTuple(typ.Int64, typ.Int64))
 43196  		v0.AddArg(x)
 43197  		v0.AddArg(y)
 43198  		v.AddArg(v0)
 43199  		return true
 43200  	}
 43201  }
 43202  func rewriteValueAMD64_OpDiv64F_0(v *Value) bool {
 43203  	// match: (Div64F x y)
 43204  	// cond:
 43205  	// result: (DIVSD x y)
 43206  	for {
 43207  		_ = v.Args[1]
 43208  		x := v.Args[0]
 43209  		y := v.Args[1]
 43210  		v.reset(OpAMD64DIVSD)
 43211  		v.AddArg(x)
 43212  		v.AddArg(y)
 43213  		return true
 43214  	}
 43215  }
 43216  func rewriteValueAMD64_OpDiv64u_0(v *Value) bool {
 43217  	b := v.Block
 43218  	_ = b
 43219  	typ := &b.Func.Config.Types
 43220  	_ = typ
 43221  	// match: (Div64u x y)
 43222  	// cond:
 43223  	// result: (Select0 (DIVQU x y))
 43224  	for {
 43225  		_ = v.Args[1]
 43226  		x := v.Args[0]
 43227  		y := v.Args[1]
 43228  		v.reset(OpSelect0)
 43229  		v0 := b.NewValue0(v.Pos, OpAMD64DIVQU, types.NewTuple(typ.UInt64, typ.UInt64))
 43230  		v0.AddArg(x)
 43231  		v0.AddArg(y)
 43232  		v.AddArg(v0)
 43233  		return true
 43234  	}
 43235  }
 43236  func rewriteValueAMD64_OpDiv8_0(v *Value) bool {
 43237  	b := v.Block
 43238  	_ = b
 43239  	typ := &b.Func.Config.Types
 43240  	_ = typ
 43241  	// match: (Div8 x y)
 43242  	// cond:
 43243  	// result: (Select0 (DIVW (SignExt8to16 x) (SignExt8to16 y)))
 43244  	for {
 43245  		_ = v.Args[1]
 43246  		x := v.Args[0]
 43247  		y := v.Args[1]
 43248  		v.reset(OpSelect0)
 43249  		v0 := b.NewValue0(v.Pos, OpAMD64DIVW, types.NewTuple(typ.Int16, typ.Int16))
 43250  		v1 := b.NewValue0(v.Pos, OpSignExt8to16, typ.Int16)
 43251  		v1.AddArg(x)
 43252  		v0.AddArg(v1)
 43253  		v2 := b.NewValue0(v.Pos, OpSignExt8to16, typ.Int16)
 43254  		v2.AddArg(y)
 43255  		v0.AddArg(v2)
 43256  		v.AddArg(v0)
 43257  		return true
 43258  	}
 43259  }
 43260  func rewriteValueAMD64_OpDiv8u_0(v *Value) bool {
 43261  	b := v.Block
 43262  	_ = b
 43263  	typ := &b.Func.Config.Types
 43264  	_ = typ
 43265  	// match: (Div8u x y)
 43266  	// cond:
 43267  	// result: (Select0 (DIVWU (ZeroExt8to16 x) (ZeroExt8to16 y)))
 43268  	for {
 43269  		_ = v.Args[1]
 43270  		x := v.Args[0]
 43271  		y := v.Args[1]
 43272  		v.reset(OpSelect0)
 43273  		v0 := b.NewValue0(v.Pos, OpAMD64DIVWU, types.NewTuple(typ.UInt16, typ.UInt16))
 43274  		v1 := b.NewValue0(v.Pos, OpZeroExt8to16, typ.UInt16)
 43275  		v1.AddArg(x)
 43276  		v0.AddArg(v1)
 43277  		v2 := b.NewValue0(v.Pos, OpZeroExt8to16, typ.UInt16)
 43278  		v2.AddArg(y)
 43279  		v0.AddArg(v2)
 43280  		v.AddArg(v0)
 43281  		return true
 43282  	}
 43283  }
 43284  func rewriteValueAMD64_OpEq16_0(v *Value) bool {
 43285  	b := v.Block
 43286  	_ = b
 43287  	// match: (Eq16 x y)
 43288  	// cond:
 43289  	// result: (SETEQ (CMPW x y))
 43290  	for {
 43291  		_ = v.Args[1]
 43292  		x := v.Args[0]
 43293  		y := v.Args[1]
 43294  		v.reset(OpAMD64SETEQ)
 43295  		v0 := b.NewValue0(v.Pos, OpAMD64CMPW, types.TypeFlags)
 43296  		v0.AddArg(x)
 43297  		v0.AddArg(y)
 43298  		v.AddArg(v0)
 43299  		return true
 43300  	}
 43301  }
 43302  func rewriteValueAMD64_OpEq32_0(v *Value) bool {
 43303  	b := v.Block
 43304  	_ = b
 43305  	// match: (Eq32 x y)
 43306  	// cond:
 43307  	// result: (SETEQ (CMPL x y))
 43308  	for {
 43309  		_ = v.Args[1]
 43310  		x := v.Args[0]
 43311  		y := v.Args[1]
 43312  		v.reset(OpAMD64SETEQ)
 43313  		v0 := b.NewValue0(v.Pos, OpAMD64CMPL, types.TypeFlags)
 43314  		v0.AddArg(x)
 43315  		v0.AddArg(y)
 43316  		v.AddArg(v0)
 43317  		return true
 43318  	}
 43319  }
 43320  func rewriteValueAMD64_OpEq32F_0(v *Value) bool {
 43321  	b := v.Block
 43322  	_ = b
 43323  	// match: (Eq32F x y)
 43324  	// cond:
 43325  	// result: (SETEQF (UCOMISS x y))
 43326  	for {
 43327  		_ = v.Args[1]
 43328  		x := v.Args[0]
 43329  		y := v.Args[1]
 43330  		v.reset(OpAMD64SETEQF)
 43331  		v0 := b.NewValue0(v.Pos, OpAMD64UCOMISS, types.TypeFlags)
 43332  		v0.AddArg(x)
 43333  		v0.AddArg(y)
 43334  		v.AddArg(v0)
 43335  		return true
 43336  	}
 43337  }
 43338  func rewriteValueAMD64_OpEq64_0(v *Value) bool {
 43339  	b := v.Block
 43340  	_ = b
 43341  	// match: (Eq64 x y)
 43342  	// cond:
 43343  	// result: (SETEQ (CMPQ x y))
 43344  	for {
 43345  		_ = v.Args[1]
 43346  		x := v.Args[0]
 43347  		y := v.Args[1]
 43348  		v.reset(OpAMD64SETEQ)
 43349  		v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, types.TypeFlags)
 43350  		v0.AddArg(x)
 43351  		v0.AddArg(y)
 43352  		v.AddArg(v0)
 43353  		return true
 43354  	}
 43355  }
 43356  func rewriteValueAMD64_OpEq64F_0(v *Value) bool {
 43357  	b := v.Block
 43358  	_ = b
 43359  	// match: (Eq64F x y)
 43360  	// cond:
 43361  	// result: (SETEQF (UCOMISD x y))
 43362  	for {
 43363  		_ = v.Args[1]
 43364  		x := v.Args[0]
 43365  		y := v.Args[1]
 43366  		v.reset(OpAMD64SETEQF)
 43367  		v0 := b.NewValue0(v.Pos, OpAMD64UCOMISD, types.TypeFlags)
 43368  		v0.AddArg(x)
 43369  		v0.AddArg(y)
 43370  		v.AddArg(v0)
 43371  		return true
 43372  	}
 43373  }
 43374  func rewriteValueAMD64_OpEq8_0(v *Value) bool {
 43375  	b := v.Block
 43376  	_ = b
 43377  	// match: (Eq8 x y)
 43378  	// cond:
 43379  	// result: (SETEQ (CMPB x y))
 43380  	for {
 43381  		_ = v.Args[1]
 43382  		x := v.Args[0]
 43383  		y := v.Args[1]
 43384  		v.reset(OpAMD64SETEQ)
 43385  		v0 := b.NewValue0(v.Pos, OpAMD64CMPB, types.TypeFlags)
 43386  		v0.AddArg(x)
 43387  		v0.AddArg(y)
 43388  		v.AddArg(v0)
 43389  		return true
 43390  	}
 43391  }
 43392  func rewriteValueAMD64_OpEqB_0(v *Value) bool {
 43393  	b := v.Block
 43394  	_ = b
 43395  	// match: (EqB x y)
 43396  	// cond:
 43397  	// result: (SETEQ (CMPB x y))
 43398  	for {
 43399  		_ = v.Args[1]
 43400  		x := v.Args[0]
 43401  		y := v.Args[1]
 43402  		v.reset(OpAMD64SETEQ)
 43403  		v0 := b.NewValue0(v.Pos, OpAMD64CMPB, types.TypeFlags)
 43404  		v0.AddArg(x)
 43405  		v0.AddArg(y)
 43406  		v.AddArg(v0)
 43407  		return true
 43408  	}
 43409  }
 43410  func rewriteValueAMD64_OpEqPtr_0(v *Value) bool {
 43411  	b := v.Block
 43412  	_ = b
 43413  	config := b.Func.Config
 43414  	_ = config
 43415  	// match: (EqPtr x y)
 43416  	// cond: config.PtrSize == 8
 43417  	// result: (SETEQ (CMPQ x y))
 43418  	for {
 43419  		_ = v.Args[1]
 43420  		x := v.Args[0]
 43421  		y := v.Args[1]
 43422  		if !(config.PtrSize == 8) {
 43423  			break
 43424  		}
 43425  		v.reset(OpAMD64SETEQ)
 43426  		v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, types.TypeFlags)
 43427  		v0.AddArg(x)
 43428  		v0.AddArg(y)
 43429  		v.AddArg(v0)
 43430  		return true
 43431  	}
 43432  	// match: (EqPtr x y)
 43433  	// cond: config.PtrSize == 4
 43434  	// result: (SETEQ (CMPL x y))
 43435  	for {
 43436  		_ = v.Args[1]
 43437  		x := v.Args[0]
 43438  		y := v.Args[1]
 43439  		if !(config.PtrSize == 4) {
 43440  			break
 43441  		}
 43442  		v.reset(OpAMD64SETEQ)
 43443  		v0 := b.NewValue0(v.Pos, OpAMD64CMPL, types.TypeFlags)
 43444  		v0.AddArg(x)
 43445  		v0.AddArg(y)
 43446  		v.AddArg(v0)
 43447  		return true
 43448  	}
 43449  	return false
 43450  }
 43451  func rewriteValueAMD64_OpFloor_0(v *Value) bool {
 43452  	// match: (Floor x)
 43453  	// cond:
 43454  	// result: (ROUNDSD [1] x)
 43455  	for {
 43456  		x := v.Args[0]
 43457  		v.reset(OpAMD64ROUNDSD)
 43458  		v.AuxInt = 1
 43459  		v.AddArg(x)
 43460  		return true
 43461  	}
 43462  }
 43463  func rewriteValueAMD64_OpGeq16_0(v *Value) bool {
 43464  	b := v.Block
 43465  	_ = b
 43466  	// match: (Geq16 x y)
 43467  	// cond:
 43468  	// result: (SETGE (CMPW x y))
 43469  	for {
 43470  		_ = v.Args[1]
 43471  		x := v.Args[0]
 43472  		y := v.Args[1]
 43473  		v.reset(OpAMD64SETGE)
 43474  		v0 := b.NewValue0(v.Pos, OpAMD64CMPW, types.TypeFlags)
 43475  		v0.AddArg(x)
 43476  		v0.AddArg(y)
 43477  		v.AddArg(v0)
 43478  		return true
 43479  	}
 43480  }
 43481  func rewriteValueAMD64_OpGeq16U_0(v *Value) bool {
 43482  	b := v.Block
 43483  	_ = b
 43484  	// match: (Geq16U x y)
 43485  	// cond:
 43486  	// result: (SETAE (CMPW x y))
 43487  	for {
 43488  		_ = v.Args[1]
 43489  		x := v.Args[0]
 43490  		y := v.Args[1]
 43491  		v.reset(OpAMD64SETAE)
 43492  		v0 := b.NewValue0(v.Pos, OpAMD64CMPW, types.TypeFlags)
 43493  		v0.AddArg(x)
 43494  		v0.AddArg(y)
 43495  		v.AddArg(v0)
 43496  		return true
 43497  	}
 43498  }
 43499  func rewriteValueAMD64_OpGeq32_0(v *Value) bool {
 43500  	b := v.Block
 43501  	_ = b
 43502  	// match: (Geq32 x y)
 43503  	// cond:
 43504  	// result: (SETGE (CMPL x y))
 43505  	for {
 43506  		_ = v.Args[1]
 43507  		x := v.Args[0]
 43508  		y := v.Args[1]
 43509  		v.reset(OpAMD64SETGE)
 43510  		v0 := b.NewValue0(v.Pos, OpAMD64CMPL, types.TypeFlags)
 43511  		v0.AddArg(x)
 43512  		v0.AddArg(y)
 43513  		v.AddArg(v0)
 43514  		return true
 43515  	}
 43516  }
 43517  func rewriteValueAMD64_OpGeq32F_0(v *Value) bool {
 43518  	b := v.Block
 43519  	_ = b
 43520  	// match: (Geq32F x y)
 43521  	// cond:
 43522  	// result: (SETGEF (UCOMISS x y))
 43523  	for {
 43524  		_ = v.Args[1]
 43525  		x := v.Args[0]
 43526  		y := v.Args[1]
 43527  		v.reset(OpAMD64SETGEF)
 43528  		v0 := b.NewValue0(v.Pos, OpAMD64UCOMISS, types.TypeFlags)
 43529  		v0.AddArg(x)
 43530  		v0.AddArg(y)
 43531  		v.AddArg(v0)
 43532  		return true
 43533  	}
 43534  }
 43535  func rewriteValueAMD64_OpGeq32U_0(v *Value) bool {
 43536  	b := v.Block
 43537  	_ = b
 43538  	// match: (Geq32U x y)
 43539  	// cond:
 43540  	// result: (SETAE (CMPL x y))
 43541  	for {
 43542  		_ = v.Args[1]
 43543  		x := v.Args[0]
 43544  		y := v.Args[1]
 43545  		v.reset(OpAMD64SETAE)
 43546  		v0 := b.NewValue0(v.Pos, OpAMD64CMPL, types.TypeFlags)
 43547  		v0.AddArg(x)
 43548  		v0.AddArg(y)
 43549  		v.AddArg(v0)
 43550  		return true
 43551  	}
 43552  }
 43553  func rewriteValueAMD64_OpGeq64_0(v *Value) bool {
 43554  	b := v.Block
 43555  	_ = b
 43556  	// match: (Geq64 x y)
 43557  	// cond:
 43558  	// result: (SETGE (CMPQ x y))
 43559  	for {
 43560  		_ = v.Args[1]
 43561  		x := v.Args[0]
 43562  		y := v.Args[1]
 43563  		v.reset(OpAMD64SETGE)
 43564  		v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, types.TypeFlags)
 43565  		v0.AddArg(x)
 43566  		v0.AddArg(y)
 43567  		v.AddArg(v0)
 43568  		return true
 43569  	}
 43570  }
 43571  func rewriteValueAMD64_OpGeq64F_0(v *Value) bool {
 43572  	b := v.Block
 43573  	_ = b
 43574  	// match: (Geq64F x y)
 43575  	// cond:
 43576  	// result: (SETGEF (UCOMISD x y))
 43577  	for {
 43578  		_ = v.Args[1]
 43579  		x := v.Args[0]
 43580  		y := v.Args[1]
 43581  		v.reset(OpAMD64SETGEF)
 43582  		v0 := b.NewValue0(v.Pos, OpAMD64UCOMISD, types.TypeFlags)
 43583  		v0.AddArg(x)
 43584  		v0.AddArg(y)
 43585  		v.AddArg(v0)
 43586  		return true
 43587  	}
 43588  }
 43589  func rewriteValueAMD64_OpGeq64U_0(v *Value) bool {
 43590  	b := v.Block
 43591  	_ = b
 43592  	// match: (Geq64U x y)
 43593  	// cond:
 43594  	// result: (SETAE (CMPQ x y))
 43595  	for {
 43596  		_ = v.Args[1]
 43597  		x := v.Args[0]
 43598  		y := v.Args[1]
 43599  		v.reset(OpAMD64SETAE)
 43600  		v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, types.TypeFlags)
 43601  		v0.AddArg(x)
 43602  		v0.AddArg(y)
 43603  		v.AddArg(v0)
 43604  		return true
 43605  	}
 43606  }
 43607  func rewriteValueAMD64_OpGeq8_0(v *Value) bool {
 43608  	b := v.Block
 43609  	_ = b
 43610  	// match: (Geq8 x y)
 43611  	// cond:
 43612  	// result: (SETGE (CMPB x y))
 43613  	for {
 43614  		_ = v.Args[1]
 43615  		x := v.Args[0]
 43616  		y := v.Args[1]
 43617  		v.reset(OpAMD64SETGE)
 43618  		v0 := b.NewValue0(v.Pos, OpAMD64CMPB, types.TypeFlags)
 43619  		v0.AddArg(x)
 43620  		v0.AddArg(y)
 43621  		v.AddArg(v0)
 43622  		return true
 43623  	}
 43624  }
 43625  func rewriteValueAMD64_OpGeq8U_0(v *Value) bool {
 43626  	b := v.Block
 43627  	_ = b
 43628  	// match: (Geq8U x y)
 43629  	// cond:
 43630  	// result: (SETAE (CMPB x y))
 43631  	for {
 43632  		_ = v.Args[1]
 43633  		x := v.Args[0]
 43634  		y := v.Args[1]
 43635  		v.reset(OpAMD64SETAE)
 43636  		v0 := b.NewValue0(v.Pos, OpAMD64CMPB, types.TypeFlags)
 43637  		v0.AddArg(x)
 43638  		v0.AddArg(y)
 43639  		v.AddArg(v0)
 43640  		return true
 43641  	}
 43642  }
 43643  func rewriteValueAMD64_OpGetCallerPC_0(v *Value) bool {
 43644  	// match: (GetCallerPC)
 43645  	// cond:
 43646  	// result: (LoweredGetCallerPC)
 43647  	for {
 43648  		v.reset(OpAMD64LoweredGetCallerPC)
 43649  		return true
 43650  	}
 43651  }
 43652  func rewriteValueAMD64_OpGetCallerSP_0(v *Value) bool {
 43653  	// match: (GetCallerSP)
 43654  	// cond:
 43655  	// result: (LoweredGetCallerSP)
 43656  	for {
 43657  		v.reset(OpAMD64LoweredGetCallerSP)
 43658  		return true
 43659  	}
 43660  }
 43661  func rewriteValueAMD64_OpGetClosurePtr_0(v *Value) bool {
 43662  	// match: (GetClosurePtr)
 43663  	// cond:
 43664  	// result: (LoweredGetClosurePtr)
 43665  	for {
 43666  		v.reset(OpAMD64LoweredGetClosurePtr)
 43667  		return true
 43668  	}
 43669  }
 43670  func rewriteValueAMD64_OpGetG_0(v *Value) bool {
 43671  	// match: (GetG mem)
 43672  	// cond:
 43673  	// result: (LoweredGetG mem)
 43674  	for {
 43675  		mem := v.Args[0]
 43676  		v.reset(OpAMD64LoweredGetG)
 43677  		v.AddArg(mem)
 43678  		return true
 43679  	}
 43680  }
 43681  func rewriteValueAMD64_OpGreater16_0(v *Value) bool {
 43682  	b := v.Block
 43683  	_ = b
 43684  	// match: (Greater16 x y)
 43685  	// cond:
 43686  	// result: (SETG (CMPW x y))
 43687  	for {
 43688  		_ = v.Args[1]
 43689  		x := v.Args[0]
 43690  		y := v.Args[1]
 43691  		v.reset(OpAMD64SETG)
 43692  		v0 := b.NewValue0(v.Pos, OpAMD64CMPW, types.TypeFlags)
 43693  		v0.AddArg(x)
 43694  		v0.AddArg(y)
 43695  		v.AddArg(v0)
 43696  		return true
 43697  	}
 43698  }
 43699  func rewriteValueAMD64_OpGreater16U_0(v *Value) bool {
 43700  	b := v.Block
 43701  	_ = b
 43702  	// match: (Greater16U x y)
 43703  	// cond:
 43704  	// result: (SETA (CMPW x y))
 43705  	for {
 43706  		_ = v.Args[1]
 43707  		x := v.Args[0]
 43708  		y := v.Args[1]
 43709  		v.reset(OpAMD64SETA)
 43710  		v0 := b.NewValue0(v.Pos, OpAMD64CMPW, types.TypeFlags)
 43711  		v0.AddArg(x)
 43712  		v0.AddArg(y)
 43713  		v.AddArg(v0)
 43714  		return true
 43715  	}
 43716  }
 43717  func rewriteValueAMD64_OpGreater32_0(v *Value) bool {
 43718  	b := v.Block
 43719  	_ = b
 43720  	// match: (Greater32 x y)
 43721  	// cond:
 43722  	// result: (SETG (CMPL x y))
 43723  	for {
 43724  		_ = v.Args[1]
 43725  		x := v.Args[0]
 43726  		y := v.Args[1]
 43727  		v.reset(OpAMD64SETG)
 43728  		v0 := b.NewValue0(v.Pos, OpAMD64CMPL, types.TypeFlags)
 43729  		v0.AddArg(x)
 43730  		v0.AddArg(y)
 43731  		v.AddArg(v0)
 43732  		return true
 43733  	}
 43734  }
 43735  func rewriteValueAMD64_OpGreater32F_0(v *Value) bool {
 43736  	b := v.Block
 43737  	_ = b
 43738  	// match: (Greater32F x y)
 43739  	// cond:
 43740  	// result: (SETGF (UCOMISS x y))
 43741  	for {
 43742  		_ = v.Args[1]
 43743  		x := v.Args[0]
 43744  		y := v.Args[1]
 43745  		v.reset(OpAMD64SETGF)
 43746  		v0 := b.NewValue0(v.Pos, OpAMD64UCOMISS, types.TypeFlags)
 43747  		v0.AddArg(x)
 43748  		v0.AddArg(y)
 43749  		v.AddArg(v0)
 43750  		return true
 43751  	}
 43752  }
 43753  func rewriteValueAMD64_OpGreater32U_0(v *Value) bool {
 43754  	b := v.Block
 43755  	_ = b
 43756  	// match: (Greater32U x y)
 43757  	// cond:
 43758  	// result: (SETA (CMPL x y))
 43759  	for {
 43760  		_ = v.Args[1]
 43761  		x := v.Args[0]
 43762  		y := v.Args[1]
 43763  		v.reset(OpAMD64SETA)
 43764  		v0 := b.NewValue0(v.Pos, OpAMD64CMPL, types.TypeFlags)
 43765  		v0.AddArg(x)
 43766  		v0.AddArg(y)
 43767  		v.AddArg(v0)
 43768  		return true
 43769  	}
 43770  }
 43771  func rewriteValueAMD64_OpGreater64_0(v *Value) bool {
 43772  	b := v.Block
 43773  	_ = b
 43774  	// match: (Greater64 x y)
 43775  	// cond:
 43776  	// result: (SETG (CMPQ x y))
 43777  	for {
 43778  		_ = v.Args[1]
 43779  		x := v.Args[0]
 43780  		y := v.Args[1]
 43781  		v.reset(OpAMD64SETG)
 43782  		v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, types.TypeFlags)
 43783  		v0.AddArg(x)
 43784  		v0.AddArg(y)
 43785  		v.AddArg(v0)
 43786  		return true
 43787  	}
 43788  }
 43789  func rewriteValueAMD64_OpGreater64F_0(v *Value) bool {
 43790  	b := v.Block
 43791  	_ = b
 43792  	// match: (Greater64F x y)
 43793  	// cond:
 43794  	// result: (SETGF (UCOMISD x y))
 43795  	for {
 43796  		_ = v.Args[1]
 43797  		x := v.Args[0]
 43798  		y := v.Args[1]
 43799  		v.reset(OpAMD64SETGF)
 43800  		v0 := b.NewValue0(v.Pos, OpAMD64UCOMISD, types.TypeFlags)
 43801  		v0.AddArg(x)
 43802  		v0.AddArg(y)
 43803  		v.AddArg(v0)
 43804  		return true
 43805  	}
 43806  }
 43807  func rewriteValueAMD64_OpGreater64U_0(v *Value) bool {
 43808  	b := v.Block
 43809  	_ = b
 43810  	// match: (Greater64U x y)
 43811  	// cond:
 43812  	// result: (SETA (CMPQ x y))
 43813  	for {
 43814  		_ = v.Args[1]
 43815  		x := v.Args[0]
 43816  		y := v.Args[1]
 43817  		v.reset(OpAMD64SETA)
 43818  		v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, types.TypeFlags)
 43819  		v0.AddArg(x)
 43820  		v0.AddArg(y)
 43821  		v.AddArg(v0)
 43822  		return true
 43823  	}
 43824  }
 43825  func rewriteValueAMD64_OpGreater8_0(v *Value) bool {
 43826  	b := v.Block
 43827  	_ = b
 43828  	// match: (Greater8 x y)
 43829  	// cond:
 43830  	// result: (SETG (CMPB x y))
 43831  	for {
 43832  		_ = v.Args[1]
 43833  		x := v.Args[0]
 43834  		y := v.Args[1]
 43835  		v.reset(OpAMD64SETG)
 43836  		v0 := b.NewValue0(v.Pos, OpAMD64CMPB, types.TypeFlags)
 43837  		v0.AddArg(x)
 43838  		v0.AddArg(y)
 43839  		v.AddArg(v0)
 43840  		return true
 43841  	}
 43842  }
 43843  func rewriteValueAMD64_OpGreater8U_0(v *Value) bool {
 43844  	b := v.Block
 43845  	_ = b
 43846  	// match: (Greater8U x y)
 43847  	// cond:
 43848  	// result: (SETA (CMPB x y))
 43849  	for {
 43850  		_ = v.Args[1]
 43851  		x := v.Args[0]
 43852  		y := v.Args[1]
 43853  		v.reset(OpAMD64SETA)
 43854  		v0 := b.NewValue0(v.Pos, OpAMD64CMPB, types.TypeFlags)
 43855  		v0.AddArg(x)
 43856  		v0.AddArg(y)
 43857  		v.AddArg(v0)
 43858  		return true
 43859  	}
 43860  }
 43861  func rewriteValueAMD64_OpHmul32_0(v *Value) bool {
 43862  	// match: (Hmul32 x y)
 43863  	// cond:
 43864  	// result: (HMULL x y)
 43865  	for {
 43866  		_ = v.Args[1]
 43867  		x := v.Args[0]
 43868  		y := v.Args[1]
 43869  		v.reset(OpAMD64HMULL)
 43870  		v.AddArg(x)
 43871  		v.AddArg(y)
 43872  		return true
 43873  	}
 43874  }
 43875  func rewriteValueAMD64_OpHmul32u_0(v *Value) bool {
 43876  	// match: (Hmul32u x y)
 43877  	// cond:
 43878  	// result: (HMULLU x y)
 43879  	for {
 43880  		_ = v.Args[1]
 43881  		x := v.Args[0]
 43882  		y := v.Args[1]
 43883  		v.reset(OpAMD64HMULLU)
 43884  		v.AddArg(x)
 43885  		v.AddArg(y)
 43886  		return true
 43887  	}
 43888  }
 43889  func rewriteValueAMD64_OpHmul64_0(v *Value) bool {
 43890  	// match: (Hmul64 x y)
 43891  	// cond:
 43892  	// result: (HMULQ x y)
 43893  	for {
 43894  		_ = v.Args[1]
 43895  		x := v.Args[0]
 43896  		y := v.Args[1]
 43897  		v.reset(OpAMD64HMULQ)
 43898  		v.AddArg(x)
 43899  		v.AddArg(y)
 43900  		return true
 43901  	}
 43902  }
 43903  func rewriteValueAMD64_OpHmul64u_0(v *Value) bool {
 43904  	// match: (Hmul64u x y)
 43905  	// cond:
 43906  	// result: (HMULQU x y)
 43907  	for {
 43908  		_ = v.Args[1]
 43909  		x := v.Args[0]
 43910  		y := v.Args[1]
 43911  		v.reset(OpAMD64HMULQU)
 43912  		v.AddArg(x)
 43913  		v.AddArg(y)
 43914  		return true
 43915  	}
 43916  }
 43917  func rewriteValueAMD64_OpInt64Hi_0(v *Value) bool {
 43918  	// match: (Int64Hi x)
 43919  	// cond:
 43920  	// result: (SHRQconst [32] x)
 43921  	for {
 43922  		x := v.Args[0]
 43923  		v.reset(OpAMD64SHRQconst)
 43924  		v.AuxInt = 32
 43925  		v.AddArg(x)
 43926  		return true
 43927  	}
 43928  }
 43929  func rewriteValueAMD64_OpInterCall_0(v *Value) bool {
 43930  	// match: (InterCall [argwid] entry mem)
 43931  	// cond:
 43932  	// result: (CALLinter [argwid] entry mem)
 43933  	for {
 43934  		argwid := v.AuxInt
 43935  		_ = v.Args[1]
 43936  		entry := v.Args[0]
 43937  		mem := v.Args[1]
 43938  		v.reset(OpAMD64CALLinter)
 43939  		v.AuxInt = argwid
 43940  		v.AddArg(entry)
 43941  		v.AddArg(mem)
 43942  		return true
 43943  	}
 43944  }
 43945  func rewriteValueAMD64_OpIsInBounds_0(v *Value) bool {
 43946  	b := v.Block
 43947  	_ = b
 43948  	config := b.Func.Config
 43949  	_ = config
 43950  	// match: (IsInBounds idx len)
 43951  	// cond: config.PtrSize == 8
 43952  	// result: (SETB (CMPQ idx len))
 43953  	for {
 43954  		_ = v.Args[1]
 43955  		idx := v.Args[0]
 43956  		len := v.Args[1]
 43957  		if !(config.PtrSize == 8) {
 43958  			break
 43959  		}
 43960  		v.reset(OpAMD64SETB)
 43961  		v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, types.TypeFlags)
 43962  		v0.AddArg(idx)
 43963  		v0.AddArg(len)
 43964  		v.AddArg(v0)
 43965  		return true
 43966  	}
 43967  	// match: (IsInBounds idx len)
 43968  	// cond: config.PtrSize == 4
 43969  	// result: (SETB (CMPL idx len))
 43970  	for {
 43971  		_ = v.Args[1]
 43972  		idx := v.Args[0]
 43973  		len := v.Args[1]
 43974  		if !(config.PtrSize == 4) {
 43975  			break
 43976  		}
 43977  		v.reset(OpAMD64SETB)
 43978  		v0 := b.NewValue0(v.Pos, OpAMD64CMPL, types.TypeFlags)
 43979  		v0.AddArg(idx)
 43980  		v0.AddArg(len)
 43981  		v.AddArg(v0)
 43982  		return true
 43983  	}
 43984  	return false
 43985  }
 43986  func rewriteValueAMD64_OpIsNonNil_0(v *Value) bool {
 43987  	b := v.Block
 43988  	_ = b
 43989  	config := b.Func.Config
 43990  	_ = config
 43991  	// match: (IsNonNil p)
 43992  	// cond: config.PtrSize == 8
 43993  	// result: (SETNE (TESTQ p p))
 43994  	for {
 43995  		p := v.Args[0]
 43996  		if !(config.PtrSize == 8) {
 43997  			break
 43998  		}
 43999  		v.reset(OpAMD64SETNE)
 44000  		v0 := b.NewValue0(v.Pos, OpAMD64TESTQ, types.TypeFlags)
 44001  		v0.AddArg(p)
 44002  		v0.AddArg(p)
 44003  		v.AddArg(v0)
 44004  		return true
 44005  	}
 44006  	// match: (IsNonNil p)
 44007  	// cond: config.PtrSize == 4
 44008  	// result: (SETNE (TESTL p p))
 44009  	for {
 44010  		p := v.Args[0]
 44011  		if !(config.PtrSize == 4) {
 44012  			break
 44013  		}
 44014  		v.reset(OpAMD64SETNE)
 44015  		v0 := b.NewValue0(v.Pos, OpAMD64TESTL, types.TypeFlags)
 44016  		v0.AddArg(p)
 44017  		v0.AddArg(p)
 44018  		v.AddArg(v0)
 44019  		return true
 44020  	}
 44021  	return false
 44022  }
 44023  func rewriteValueAMD64_OpIsSliceInBounds_0(v *Value) bool {
 44024  	b := v.Block
 44025  	_ = b
 44026  	config := b.Func.Config
 44027  	_ = config
 44028  	// match: (IsSliceInBounds idx len)
 44029  	// cond: config.PtrSize == 8
 44030  	// result: (SETBE (CMPQ idx len))
 44031  	for {
 44032  		_ = v.Args[1]
 44033  		idx := v.Args[0]
 44034  		len := v.Args[1]
 44035  		if !(config.PtrSize == 8) {
 44036  			break
 44037  		}
 44038  		v.reset(OpAMD64SETBE)
 44039  		v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, types.TypeFlags)
 44040  		v0.AddArg(idx)
 44041  		v0.AddArg(len)
 44042  		v.AddArg(v0)
 44043  		return true
 44044  	}
 44045  	// match: (IsSliceInBounds idx len)
 44046  	// cond: config.PtrSize == 4
 44047  	// result: (SETBE (CMPL idx len))
 44048  	for {
 44049  		_ = v.Args[1]
 44050  		idx := v.Args[0]
 44051  		len := v.Args[1]
 44052  		if !(config.PtrSize == 4) {
 44053  			break
 44054  		}
 44055  		v.reset(OpAMD64SETBE)
 44056  		v0 := b.NewValue0(v.Pos, OpAMD64CMPL, types.TypeFlags)
 44057  		v0.AddArg(idx)
 44058  		v0.AddArg(len)
 44059  		v.AddArg(v0)
 44060  		return true
 44061  	}
 44062  	return false
 44063  }
 44064  func rewriteValueAMD64_OpLeq16_0(v *Value) bool {
 44065  	b := v.Block
 44066  	_ = b
 44067  	// match: (Leq16 x y)
 44068  	// cond:
 44069  	// result: (SETLE (CMPW x y))
 44070  	for {
 44071  		_ = v.Args[1]
 44072  		x := v.Args[0]
 44073  		y := v.Args[1]
 44074  		v.reset(OpAMD64SETLE)
 44075  		v0 := b.NewValue0(v.Pos, OpAMD64CMPW, types.TypeFlags)
 44076  		v0.AddArg(x)
 44077  		v0.AddArg(y)
 44078  		v.AddArg(v0)
 44079  		return true
 44080  	}
 44081  }
 44082  func rewriteValueAMD64_OpLeq16U_0(v *Value) bool {
 44083  	b := v.Block
 44084  	_ = b
 44085  	// match: (Leq16U x y)
 44086  	// cond:
 44087  	// result: (SETBE (CMPW x y))
 44088  	for {
 44089  		_ = v.Args[1]
 44090  		x := v.Args[0]
 44091  		y := v.Args[1]
 44092  		v.reset(OpAMD64SETBE)
 44093  		v0 := b.NewValue0(v.Pos, OpAMD64CMPW, types.TypeFlags)
 44094  		v0.AddArg(x)
 44095  		v0.AddArg(y)
 44096  		v.AddArg(v0)
 44097  		return true
 44098  	}
 44099  }
 44100  func rewriteValueAMD64_OpLeq32_0(v *Value) bool {
 44101  	b := v.Block
 44102  	_ = b
 44103  	// match: (Leq32 x y)
 44104  	// cond:
 44105  	// result: (SETLE (CMPL x y))
 44106  	for {
 44107  		_ = v.Args[1]
 44108  		x := v.Args[0]
 44109  		y := v.Args[1]
 44110  		v.reset(OpAMD64SETLE)
 44111  		v0 := b.NewValue0(v.Pos, OpAMD64CMPL, types.TypeFlags)
 44112  		v0.AddArg(x)
 44113  		v0.AddArg(y)
 44114  		v.AddArg(v0)
 44115  		return true
 44116  	}
 44117  }
 44118  func rewriteValueAMD64_OpLeq32F_0(v *Value) bool {
 44119  	b := v.Block
 44120  	_ = b
 44121  	// match: (Leq32F x y)
 44122  	// cond:
 44123  	// result: (SETGEF (UCOMISS y x))
 44124  	for {
 44125  		_ = v.Args[1]
 44126  		x := v.Args[0]
 44127  		y := v.Args[1]
 44128  		v.reset(OpAMD64SETGEF)
 44129  		v0 := b.NewValue0(v.Pos, OpAMD64UCOMISS, types.TypeFlags)
 44130  		v0.AddArg(y)
 44131  		v0.AddArg(x)
 44132  		v.AddArg(v0)
 44133  		return true
 44134  	}
 44135  }
 44136  func rewriteValueAMD64_OpLeq32U_0(v *Value) bool {
 44137  	b := v.Block
 44138  	_ = b
 44139  	// match: (Leq32U x y)
 44140  	// cond:
 44141  	// result: (SETBE (CMPL x y))
 44142  	for {
 44143  		_ = v.Args[1]
 44144  		x := v.Args[0]
 44145  		y := v.Args[1]
 44146  		v.reset(OpAMD64SETBE)
 44147  		v0 := b.NewValue0(v.Pos, OpAMD64CMPL, types.TypeFlags)
 44148  		v0.AddArg(x)
 44149  		v0.AddArg(y)
 44150  		v.AddArg(v0)
 44151  		return true
 44152  	}
 44153  }
 44154  func rewriteValueAMD64_OpLeq64_0(v *Value) bool {
 44155  	b := v.Block
 44156  	_ = b
 44157  	// match: (Leq64 x y)
 44158  	// cond:
 44159  	// result: (SETLE (CMPQ x y))
 44160  	for {
 44161  		_ = v.Args[1]
 44162  		x := v.Args[0]
 44163  		y := v.Args[1]
 44164  		v.reset(OpAMD64SETLE)
 44165  		v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, types.TypeFlags)
 44166  		v0.AddArg(x)
 44167  		v0.AddArg(y)
 44168  		v.AddArg(v0)
 44169  		return true
 44170  	}
 44171  }
 44172  func rewriteValueAMD64_OpLeq64F_0(v *Value) bool {
 44173  	b := v.Block
 44174  	_ = b
 44175  	// match: (Leq64F x y)
 44176  	// cond:
 44177  	// result: (SETGEF (UCOMISD y x))
 44178  	for {
 44179  		_ = v.Args[1]
 44180  		x := v.Args[0]
 44181  		y := v.Args[1]
 44182  		v.reset(OpAMD64SETGEF)
 44183  		v0 := b.NewValue0(v.Pos, OpAMD64UCOMISD, types.TypeFlags)
 44184  		v0.AddArg(y)
 44185  		v0.AddArg(x)
 44186  		v.AddArg(v0)
 44187  		return true
 44188  	}
 44189  }
 44190  func rewriteValueAMD64_OpLeq64U_0(v *Value) bool {
 44191  	b := v.Block
 44192  	_ = b
 44193  	// match: (Leq64U x y)
 44194  	// cond:
 44195  	// result: (SETBE (CMPQ x y))
 44196  	for {
 44197  		_ = v.Args[1]
 44198  		x := v.Args[0]
 44199  		y := v.Args[1]
 44200  		v.reset(OpAMD64SETBE)
 44201  		v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, types.TypeFlags)
 44202  		v0.AddArg(x)
 44203  		v0.AddArg(y)
 44204  		v.AddArg(v0)
 44205  		return true
 44206  	}
 44207  }
 44208  func rewriteValueAMD64_OpLeq8_0(v *Value) bool {
 44209  	b := v.Block
 44210  	_ = b
 44211  	// match: (Leq8 x y)
 44212  	// cond:
 44213  	// result: (SETLE (CMPB x y))
 44214  	for {
 44215  		_ = v.Args[1]
 44216  		x := v.Args[0]
 44217  		y := v.Args[1]
 44218  		v.reset(OpAMD64SETLE)
 44219  		v0 := b.NewValue0(v.Pos, OpAMD64CMPB, types.TypeFlags)
 44220  		v0.AddArg(x)
 44221  		v0.AddArg(y)
 44222  		v.AddArg(v0)
 44223  		return true
 44224  	}
 44225  }
 44226  func rewriteValueAMD64_OpLeq8U_0(v *Value) bool {
 44227  	b := v.Block
 44228  	_ = b
 44229  	// match: (Leq8U x y)
 44230  	// cond:
 44231  	// result: (SETBE (CMPB x y))
 44232  	for {
 44233  		_ = v.Args[1]
 44234  		x := v.Args[0]
 44235  		y := v.Args[1]
 44236  		v.reset(OpAMD64SETBE)
 44237  		v0 := b.NewValue0(v.Pos, OpAMD64CMPB, types.TypeFlags)
 44238  		v0.AddArg(x)
 44239  		v0.AddArg(y)
 44240  		v.AddArg(v0)
 44241  		return true
 44242  	}
 44243  }
 44244  func rewriteValueAMD64_OpLess16_0(v *Value) bool {
 44245  	b := v.Block
 44246  	_ = b
 44247  	// match: (Less16 x y)
 44248  	// cond:
 44249  	// result: (SETL (CMPW x y))
 44250  	for {
 44251  		_ = v.Args[1]
 44252  		x := v.Args[0]
 44253  		y := v.Args[1]
 44254  		v.reset(OpAMD64SETL)
 44255  		v0 := b.NewValue0(v.Pos, OpAMD64CMPW, types.TypeFlags)
 44256  		v0.AddArg(x)
 44257  		v0.AddArg(y)
 44258  		v.AddArg(v0)
 44259  		return true
 44260  	}
 44261  }
 44262  func rewriteValueAMD64_OpLess16U_0(v *Value) bool {
 44263  	b := v.Block
 44264  	_ = b
 44265  	// match: (Less16U x y)
 44266  	// cond:
 44267  	// result: (SETB (CMPW x y))
 44268  	for {
 44269  		_ = v.Args[1]
 44270  		x := v.Args[0]
 44271  		y := v.Args[1]
 44272  		v.reset(OpAMD64SETB)
 44273  		v0 := b.NewValue0(v.Pos, OpAMD64CMPW, types.TypeFlags)
 44274  		v0.AddArg(x)
 44275  		v0.AddArg(y)
 44276  		v.AddArg(v0)
 44277  		return true
 44278  	}
 44279  }
 44280  func rewriteValueAMD64_OpLess32_0(v *Value) bool {
 44281  	b := v.Block
 44282  	_ = b
 44283  	// match: (Less32 x y)
 44284  	// cond:
 44285  	// result: (SETL (CMPL x y))
 44286  	for {
 44287  		_ = v.Args[1]
 44288  		x := v.Args[0]
 44289  		y := v.Args[1]
 44290  		v.reset(OpAMD64SETL)
 44291  		v0 := b.NewValue0(v.Pos, OpAMD64CMPL, types.TypeFlags)
 44292  		v0.AddArg(x)
 44293  		v0.AddArg(y)
 44294  		v.AddArg(v0)
 44295  		return true
 44296  	}
 44297  }
 44298  func rewriteValueAMD64_OpLess32F_0(v *Value) bool {
 44299  	b := v.Block
 44300  	_ = b
 44301  	// match: (Less32F x y)
 44302  	// cond:
 44303  	// result: (SETGF (UCOMISS y x))
 44304  	for {
 44305  		_ = v.Args[1]
 44306  		x := v.Args[0]
 44307  		y := v.Args[1]
 44308  		v.reset(OpAMD64SETGF)
 44309  		v0 := b.NewValue0(v.Pos, OpAMD64UCOMISS, types.TypeFlags)
 44310  		v0.AddArg(y)
 44311  		v0.AddArg(x)
 44312  		v.AddArg(v0)
 44313  		return true
 44314  	}
 44315  }
 44316  func rewriteValueAMD64_OpLess32U_0(v *Value) bool {
 44317  	b := v.Block
 44318  	_ = b
 44319  	// match: (Less32U x y)
 44320  	// cond:
 44321  	// result: (SETB (CMPL x y))
 44322  	for {
 44323  		_ = v.Args[1]
 44324  		x := v.Args[0]
 44325  		y := v.Args[1]
 44326  		v.reset(OpAMD64SETB)
 44327  		v0 := b.NewValue0(v.Pos, OpAMD64CMPL, types.TypeFlags)
 44328  		v0.AddArg(x)
 44329  		v0.AddArg(y)
 44330  		v.AddArg(v0)
 44331  		return true
 44332  	}
 44333  }
 44334  func rewriteValueAMD64_OpLess64_0(v *Value) bool {
 44335  	b := v.Block
 44336  	_ = b
 44337  	// match: (Less64 x y)
 44338  	// cond:
 44339  	// result: (SETL (CMPQ x y))
 44340  	for {
 44341  		_ = v.Args[1]
 44342  		x := v.Args[0]
 44343  		y := v.Args[1]
 44344  		v.reset(OpAMD64SETL)
 44345  		v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, types.TypeFlags)
 44346  		v0.AddArg(x)
 44347  		v0.AddArg(y)
 44348  		v.AddArg(v0)
 44349  		return true
 44350  	}
 44351  }
 44352  func rewriteValueAMD64_OpLess64F_0(v *Value) bool {
 44353  	b := v.Block
 44354  	_ = b
 44355  	// match: (Less64F x y)
 44356  	// cond:
 44357  	// result: (SETGF (UCOMISD y x))
 44358  	for {
 44359  		_ = v.Args[1]
 44360  		x := v.Args[0]
 44361  		y := v.Args[1]
 44362  		v.reset(OpAMD64SETGF)
 44363  		v0 := b.NewValue0(v.Pos, OpAMD64UCOMISD, types.TypeFlags)
 44364  		v0.AddArg(y)
 44365  		v0.AddArg(x)
 44366  		v.AddArg(v0)
 44367  		return true
 44368  	}
 44369  }
 44370  func rewriteValueAMD64_OpLess64U_0(v *Value) bool {
 44371  	b := v.Block
 44372  	_ = b
 44373  	// match: (Less64U x y)
 44374  	// cond:
 44375  	// result: (SETB (CMPQ x y))
 44376  	for {
 44377  		_ = v.Args[1]
 44378  		x := v.Args[0]
 44379  		y := v.Args[1]
 44380  		v.reset(OpAMD64SETB)
 44381  		v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, types.TypeFlags)
 44382  		v0.AddArg(x)
 44383  		v0.AddArg(y)
 44384  		v.AddArg(v0)
 44385  		return true
 44386  	}
 44387  }
 44388  func rewriteValueAMD64_OpLess8_0(v *Value) bool {
 44389  	b := v.Block
 44390  	_ = b
 44391  	// match: (Less8 x y)
 44392  	// cond:
 44393  	// result: (SETL (CMPB x y))
 44394  	for {
 44395  		_ = v.Args[1]
 44396  		x := v.Args[0]
 44397  		y := v.Args[1]
 44398  		v.reset(OpAMD64SETL)
 44399  		v0 := b.NewValue0(v.Pos, OpAMD64CMPB, types.TypeFlags)
 44400  		v0.AddArg(x)
 44401  		v0.AddArg(y)
 44402  		v.AddArg(v0)
 44403  		return true
 44404  	}
 44405  }
 44406  func rewriteValueAMD64_OpLess8U_0(v *Value) bool {
 44407  	b := v.Block
 44408  	_ = b
 44409  	// match: (Less8U x y)
 44410  	// cond:
 44411  	// result: (SETB (CMPB x y))
 44412  	for {
 44413  		_ = v.Args[1]
 44414  		x := v.Args[0]
 44415  		y := v.Args[1]
 44416  		v.reset(OpAMD64SETB)
 44417  		v0 := b.NewValue0(v.Pos, OpAMD64CMPB, types.TypeFlags)
 44418  		v0.AddArg(x)
 44419  		v0.AddArg(y)
 44420  		v.AddArg(v0)
 44421  		return true
 44422  	}
 44423  }
 44424  func rewriteValueAMD64_OpLoad_0(v *Value) bool {
 44425  	b := v.Block
 44426  	_ = b
 44427  	config := b.Func.Config
 44428  	_ = config
 44429  	// match: (Load <t> ptr mem)
 44430  	// cond: (is64BitInt(t) || isPtr(t) && config.PtrSize == 8)
 44431  	// result: (MOVQload ptr mem)
 44432  	for {
 44433  		t := v.Type
 44434  		_ = v.Args[1]
 44435  		ptr := v.Args[0]
 44436  		mem := v.Args[1]
 44437  		if !(is64BitInt(t) || isPtr(t) && config.PtrSize == 8) {
 44438  			break
 44439  		}
 44440  		v.reset(OpAMD64MOVQload)
 44441  		v.AddArg(ptr)
 44442  		v.AddArg(mem)
 44443  		return true
 44444  	}
 44445  	// match: (Load <t> ptr mem)
 44446  	// cond: (is32BitInt(t) || isPtr(t) && config.PtrSize == 4)
 44447  	// result: (MOVLload ptr mem)
 44448  	for {
 44449  		t := v.Type
 44450  		_ = v.Args[1]
 44451  		ptr := v.Args[0]
 44452  		mem := v.Args[1]
 44453  		if !(is32BitInt(t) || isPtr(t) && config.PtrSize == 4) {
 44454  			break
 44455  		}
 44456  		v.reset(OpAMD64MOVLload)
 44457  		v.AddArg(ptr)
 44458  		v.AddArg(mem)
 44459  		return true
 44460  	}
 44461  	// match: (Load <t> ptr mem)
 44462  	// cond: is16BitInt(t)
 44463  	// result: (MOVWload ptr mem)
 44464  	for {
 44465  		t := v.Type
 44466  		_ = v.Args[1]
 44467  		ptr := v.Args[0]
 44468  		mem := v.Args[1]
 44469  		if !(is16BitInt(t)) {
 44470  			break
 44471  		}
 44472  		v.reset(OpAMD64MOVWload)
 44473  		v.AddArg(ptr)
 44474  		v.AddArg(mem)
 44475  		return true
 44476  	}
 44477  	// match: (Load <t> ptr mem)
 44478  	// cond: (t.IsBoolean() || is8BitInt(t))
 44479  	// result: (MOVBload ptr mem)
 44480  	for {
 44481  		t := v.Type
 44482  		_ = v.Args[1]
 44483  		ptr := v.Args[0]
 44484  		mem := v.Args[1]
 44485  		if !(t.IsBoolean() || is8BitInt(t)) {
 44486  			break
 44487  		}
 44488  		v.reset(OpAMD64MOVBload)
 44489  		v.AddArg(ptr)
 44490  		v.AddArg(mem)
 44491  		return true
 44492  	}
 44493  	// match: (Load <t> ptr mem)
 44494  	// cond: is32BitFloat(t)
 44495  	// result: (MOVSSload ptr mem)
 44496  	for {
 44497  		t := v.Type
 44498  		_ = v.Args[1]
 44499  		ptr := v.Args[0]
 44500  		mem := v.Args[1]
 44501  		if !(is32BitFloat(t)) {
 44502  			break
 44503  		}
 44504  		v.reset(OpAMD64MOVSSload)
 44505  		v.AddArg(ptr)
 44506  		v.AddArg(mem)
 44507  		return true
 44508  	}
 44509  	// match: (Load <t> ptr mem)
 44510  	// cond: is64BitFloat(t)
 44511  	// result: (MOVSDload ptr mem)
 44512  	for {
 44513  		t := v.Type
 44514  		_ = v.Args[1]
 44515  		ptr := v.Args[0]
 44516  		mem := v.Args[1]
 44517  		if !(is64BitFloat(t)) {
 44518  			break
 44519  		}
 44520  		v.reset(OpAMD64MOVSDload)
 44521  		v.AddArg(ptr)
 44522  		v.AddArg(mem)
 44523  		return true
 44524  	}
 44525  	return false
 44526  }
 44527  func rewriteValueAMD64_OpLsh16x16_0(v *Value) bool {
 44528  	b := v.Block
 44529  	_ = b
 44530  	// match: (Lsh16x16 <t> x y)
 44531  	// cond:
 44532  	// result: (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPWconst y [32])))
 44533  	for {
 44534  		t := v.Type
 44535  		_ = v.Args[1]
 44536  		x := v.Args[0]
 44537  		y := v.Args[1]
 44538  		v.reset(OpAMD64ANDL)
 44539  		v0 := b.NewValue0(v.Pos, OpAMD64SHLL, t)
 44540  		v0.AddArg(x)
 44541  		v0.AddArg(y)
 44542  		v.AddArg(v0)
 44543  		v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t)
 44544  		v2 := b.NewValue0(v.Pos, OpAMD64CMPWconst, types.TypeFlags)
 44545  		v2.AuxInt = 32
 44546  		v2.AddArg(y)
 44547  		v1.AddArg(v2)
 44548  		v.AddArg(v1)
 44549  		return true
 44550  	}
 44551  }
 44552  func rewriteValueAMD64_OpLsh16x32_0(v *Value) bool {
 44553  	b := v.Block
 44554  	_ = b
 44555  	// match: (Lsh16x32 <t> x y)
 44556  	// cond:
 44557  	// result: (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPLconst y [32])))
 44558  	for {
 44559  		t := v.Type
 44560  		_ = v.Args[1]
 44561  		x := v.Args[0]
 44562  		y := v.Args[1]
 44563  		v.reset(OpAMD64ANDL)
 44564  		v0 := b.NewValue0(v.Pos, OpAMD64SHLL, t)
 44565  		v0.AddArg(x)
 44566  		v0.AddArg(y)
 44567  		v.AddArg(v0)
 44568  		v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t)
 44569  		v2 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags)
 44570  		v2.AuxInt = 32
 44571  		v2.AddArg(y)
 44572  		v1.AddArg(v2)
 44573  		v.AddArg(v1)
 44574  		return true
 44575  	}
 44576  }
 44577  func rewriteValueAMD64_OpLsh16x64_0(v *Value) bool {
 44578  	b := v.Block
 44579  	_ = b
 44580  	// match: (Lsh16x64 <t> x y)
 44581  	// cond:
 44582  	// result: (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPQconst y [32])))
 44583  	for {
 44584  		t := v.Type
 44585  		_ = v.Args[1]
 44586  		x := v.Args[0]
 44587  		y := v.Args[1]
 44588  		v.reset(OpAMD64ANDL)
 44589  		v0 := b.NewValue0(v.Pos, OpAMD64SHLL, t)
 44590  		v0.AddArg(x)
 44591  		v0.AddArg(y)
 44592  		v.AddArg(v0)
 44593  		v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t)
 44594  		v2 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags)
 44595  		v2.AuxInt = 32
 44596  		v2.AddArg(y)
 44597  		v1.AddArg(v2)
 44598  		v.AddArg(v1)
 44599  		return true
 44600  	}
 44601  }
 44602  func rewriteValueAMD64_OpLsh16x8_0(v *Value) bool {
 44603  	b := v.Block
 44604  	_ = b
 44605  	// match: (Lsh16x8 <t> x y)
 44606  	// cond:
 44607  	// result: (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPBconst y [32])))
 44608  	for {
 44609  		t := v.Type
 44610  		_ = v.Args[1]
 44611  		x := v.Args[0]
 44612  		y := v.Args[1]
 44613  		v.reset(OpAMD64ANDL)
 44614  		v0 := b.NewValue0(v.Pos, OpAMD64SHLL, t)
 44615  		v0.AddArg(x)
 44616  		v0.AddArg(y)
 44617  		v.AddArg(v0)
 44618  		v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t)
 44619  		v2 := b.NewValue0(v.Pos, OpAMD64CMPBconst, types.TypeFlags)
 44620  		v2.AuxInt = 32
 44621  		v2.AddArg(y)
 44622  		v1.AddArg(v2)
 44623  		v.AddArg(v1)
 44624  		return true
 44625  	}
 44626  }
 44627  func rewriteValueAMD64_OpLsh32x16_0(v *Value) bool {
 44628  	b := v.Block
 44629  	_ = b
 44630  	// match: (Lsh32x16 <t> x y)
 44631  	// cond:
 44632  	// result: (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPWconst y [32])))
 44633  	for {
 44634  		t := v.Type
 44635  		_ = v.Args[1]
 44636  		x := v.Args[0]
 44637  		y := v.Args[1]
 44638  		v.reset(OpAMD64ANDL)
 44639  		v0 := b.NewValue0(v.Pos, OpAMD64SHLL, t)
 44640  		v0.AddArg(x)
 44641  		v0.AddArg(y)
 44642  		v.AddArg(v0)
 44643  		v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t)
 44644  		v2 := b.NewValue0(v.Pos, OpAMD64CMPWconst, types.TypeFlags)
 44645  		v2.AuxInt = 32
 44646  		v2.AddArg(y)
 44647  		v1.AddArg(v2)
 44648  		v.AddArg(v1)
 44649  		return true
 44650  	}
 44651  }
 44652  func rewriteValueAMD64_OpLsh32x32_0(v *Value) bool {
 44653  	b := v.Block
 44654  	_ = b
 44655  	// match: (Lsh32x32 <t> x y)
 44656  	// cond:
 44657  	// result: (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPLconst y [32])))
 44658  	for {
 44659  		t := v.Type
 44660  		_ = v.Args[1]
 44661  		x := v.Args[0]
 44662  		y := v.Args[1]
 44663  		v.reset(OpAMD64ANDL)
 44664  		v0 := b.NewValue0(v.Pos, OpAMD64SHLL, t)
 44665  		v0.AddArg(x)
 44666  		v0.AddArg(y)
 44667  		v.AddArg(v0)
 44668  		v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t)
 44669  		v2 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags)
 44670  		v2.AuxInt = 32
 44671  		v2.AddArg(y)
 44672  		v1.AddArg(v2)
 44673  		v.AddArg(v1)
 44674  		return true
 44675  	}
 44676  }
 44677  func rewriteValueAMD64_OpLsh32x64_0(v *Value) bool {
 44678  	b := v.Block
 44679  	_ = b
 44680  	// match: (Lsh32x64 <t> x y)
 44681  	// cond:
 44682  	// result: (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPQconst y [32])))
 44683  	for {
 44684  		t := v.Type
 44685  		_ = v.Args[1]
 44686  		x := v.Args[0]
 44687  		y := v.Args[1]
 44688  		v.reset(OpAMD64ANDL)
 44689  		v0 := b.NewValue0(v.Pos, OpAMD64SHLL, t)
 44690  		v0.AddArg(x)
 44691  		v0.AddArg(y)
 44692  		v.AddArg(v0)
 44693  		v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t)
 44694  		v2 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags)
 44695  		v2.AuxInt = 32
 44696  		v2.AddArg(y)
 44697  		v1.AddArg(v2)
 44698  		v.AddArg(v1)
 44699  		return true
 44700  	}
 44701  }
 44702  func rewriteValueAMD64_OpLsh32x8_0(v *Value) bool {
 44703  	b := v.Block
 44704  	_ = b
 44705  	// match: (Lsh32x8 <t> x y)
 44706  	// cond:
 44707  	// result: (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPBconst y [32])))
 44708  	for {
 44709  		t := v.Type
 44710  		_ = v.Args[1]
 44711  		x := v.Args[0]
 44712  		y := v.Args[1]
 44713  		v.reset(OpAMD64ANDL)
 44714  		v0 := b.NewValue0(v.Pos, OpAMD64SHLL, t)
 44715  		v0.AddArg(x)
 44716  		v0.AddArg(y)
 44717  		v.AddArg(v0)
 44718  		v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t)
 44719  		v2 := b.NewValue0(v.Pos, OpAMD64CMPBconst, types.TypeFlags)
 44720  		v2.AuxInt = 32
 44721  		v2.AddArg(y)
 44722  		v1.AddArg(v2)
 44723  		v.AddArg(v1)
 44724  		return true
 44725  	}
 44726  }
 44727  func rewriteValueAMD64_OpLsh64x16_0(v *Value) bool {
 44728  	b := v.Block
 44729  	_ = b
 44730  	// match: (Lsh64x16 <t> x y)
 44731  	// cond:
 44732  	// result: (ANDQ (SHLQ <t> x y) (SBBQcarrymask <t> (CMPWconst y [64])))
 44733  	for {
 44734  		t := v.Type
 44735  		_ = v.Args[1]
 44736  		x := v.Args[0]
 44737  		y := v.Args[1]
 44738  		v.reset(OpAMD64ANDQ)
 44739  		v0 := b.NewValue0(v.Pos, OpAMD64SHLQ, t)
 44740  		v0.AddArg(x)
 44741  		v0.AddArg(y)
 44742  		v.AddArg(v0)
 44743  		v1 := b.NewValue0(v.Pos, OpAMD64SBBQcarrymask, t)
 44744  		v2 := b.NewValue0(v.Pos, OpAMD64CMPWconst, types.TypeFlags)
 44745  		v2.AuxInt = 64
 44746  		v2.AddArg(y)
 44747  		v1.AddArg(v2)
 44748  		v.AddArg(v1)
 44749  		return true
 44750  	}
 44751  }
 44752  func rewriteValueAMD64_OpLsh64x32_0(v *Value) bool {
 44753  	b := v.Block
 44754  	_ = b
 44755  	// match: (Lsh64x32 <t> x y)
 44756  	// cond:
 44757  	// result: (ANDQ (SHLQ <t> x y) (SBBQcarrymask <t> (CMPLconst y [64])))
 44758  	for {
 44759  		t := v.Type
 44760  		_ = v.Args[1]
 44761  		x := v.Args[0]
 44762  		y := v.Args[1]
 44763  		v.reset(OpAMD64ANDQ)
 44764  		v0 := b.NewValue0(v.Pos, OpAMD64SHLQ, t)
 44765  		v0.AddArg(x)
 44766  		v0.AddArg(y)
 44767  		v.AddArg(v0)
 44768  		v1 := b.NewValue0(v.Pos, OpAMD64SBBQcarrymask, t)
 44769  		v2 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags)
 44770  		v2.AuxInt = 64
 44771  		v2.AddArg(y)
 44772  		v1.AddArg(v2)
 44773  		v.AddArg(v1)
 44774  		return true
 44775  	}
 44776  }
 44777  func rewriteValueAMD64_OpLsh64x64_0(v *Value) bool {
 44778  	b := v.Block
 44779  	_ = b
 44780  	// match: (Lsh64x64 <t> x y)
 44781  	// cond:
 44782  	// result: (ANDQ (SHLQ <t> x y) (SBBQcarrymask <t> (CMPQconst y [64])))
 44783  	for {
 44784  		t := v.Type
 44785  		_ = v.Args[1]
 44786  		x := v.Args[0]
 44787  		y := v.Args[1]
 44788  		v.reset(OpAMD64ANDQ)
 44789  		v0 := b.NewValue0(v.Pos, OpAMD64SHLQ, t)
 44790  		v0.AddArg(x)
 44791  		v0.AddArg(y)
 44792  		v.AddArg(v0)
 44793  		v1 := b.NewValue0(v.Pos, OpAMD64SBBQcarrymask, t)
 44794  		v2 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags)
 44795  		v2.AuxInt = 64
 44796  		v2.AddArg(y)
 44797  		v1.AddArg(v2)
 44798  		v.AddArg(v1)
 44799  		return true
 44800  	}
 44801  }
 44802  func rewriteValueAMD64_OpLsh64x8_0(v *Value) bool {
 44803  	b := v.Block
 44804  	_ = b
 44805  	// match: (Lsh64x8 <t> x y)
 44806  	// cond:
 44807  	// result: (ANDQ (SHLQ <t> x y) (SBBQcarrymask <t> (CMPBconst y [64])))
 44808  	for {
 44809  		t := v.Type
 44810  		_ = v.Args[1]
 44811  		x := v.Args[0]
 44812  		y := v.Args[1]
 44813  		v.reset(OpAMD64ANDQ)
 44814  		v0 := b.NewValue0(v.Pos, OpAMD64SHLQ, t)
 44815  		v0.AddArg(x)
 44816  		v0.AddArg(y)
 44817  		v.AddArg(v0)
 44818  		v1 := b.NewValue0(v.Pos, OpAMD64SBBQcarrymask, t)
 44819  		v2 := b.NewValue0(v.Pos, OpAMD64CMPBconst, types.TypeFlags)
 44820  		v2.AuxInt = 64
 44821  		v2.AddArg(y)
 44822  		v1.AddArg(v2)
 44823  		v.AddArg(v1)
 44824  		return true
 44825  	}
 44826  }
 44827  func rewriteValueAMD64_OpLsh8x16_0(v *Value) bool {
 44828  	b := v.Block
 44829  	_ = b
 44830  	// match: (Lsh8x16 <t> x y)
 44831  	// cond:
 44832  	// result: (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPWconst y [32])))
 44833  	for {
 44834  		t := v.Type
 44835  		_ = v.Args[1]
 44836  		x := v.Args[0]
 44837  		y := v.Args[1]
 44838  		v.reset(OpAMD64ANDL)
 44839  		v0 := b.NewValue0(v.Pos, OpAMD64SHLL, t)
 44840  		v0.AddArg(x)
 44841  		v0.AddArg(y)
 44842  		v.AddArg(v0)
 44843  		v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t)
 44844  		v2 := b.NewValue0(v.Pos, OpAMD64CMPWconst, types.TypeFlags)
 44845  		v2.AuxInt = 32
 44846  		v2.AddArg(y)
 44847  		v1.AddArg(v2)
 44848  		v.AddArg(v1)
 44849  		return true
 44850  	}
 44851  }
 44852  func rewriteValueAMD64_OpLsh8x32_0(v *Value) bool {
 44853  	b := v.Block
 44854  	_ = b
 44855  	// match: (Lsh8x32 <t> x y)
 44856  	// cond:
 44857  	// result: (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPLconst y [32])))
 44858  	for {
 44859  		t := v.Type
 44860  		_ = v.Args[1]
 44861  		x := v.Args[0]
 44862  		y := v.Args[1]
 44863  		v.reset(OpAMD64ANDL)
 44864  		v0 := b.NewValue0(v.Pos, OpAMD64SHLL, t)
 44865  		v0.AddArg(x)
 44866  		v0.AddArg(y)
 44867  		v.AddArg(v0)
 44868  		v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t)
 44869  		v2 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags)
 44870  		v2.AuxInt = 32
 44871  		v2.AddArg(y)
 44872  		v1.AddArg(v2)
 44873  		v.AddArg(v1)
 44874  		return true
 44875  	}
 44876  }
 44877  func rewriteValueAMD64_OpLsh8x64_0(v *Value) bool {
 44878  	b := v.Block
 44879  	_ = b
 44880  	// match: (Lsh8x64 <t> x y)
 44881  	// cond:
 44882  	// result: (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPQconst y [32])))
 44883  	for {
 44884  		t := v.Type
 44885  		_ = v.Args[1]
 44886  		x := v.Args[0]
 44887  		y := v.Args[1]
 44888  		v.reset(OpAMD64ANDL)
 44889  		v0 := b.NewValue0(v.Pos, OpAMD64SHLL, t)
 44890  		v0.AddArg(x)
 44891  		v0.AddArg(y)
 44892  		v.AddArg(v0)
 44893  		v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t)
 44894  		v2 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags)
 44895  		v2.AuxInt = 32
 44896  		v2.AddArg(y)
 44897  		v1.AddArg(v2)
 44898  		v.AddArg(v1)
 44899  		return true
 44900  	}
 44901  }
 44902  func rewriteValueAMD64_OpLsh8x8_0(v *Value) bool {
 44903  	b := v.Block
 44904  	_ = b
 44905  	// match: (Lsh8x8 <t> x y)
 44906  	// cond:
 44907  	// result: (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPBconst y [32])))
 44908  	for {
 44909  		t := v.Type
 44910  		_ = v.Args[1]
 44911  		x := v.Args[0]
 44912  		y := v.Args[1]
 44913  		v.reset(OpAMD64ANDL)
 44914  		v0 := b.NewValue0(v.Pos, OpAMD64SHLL, t)
 44915  		v0.AddArg(x)
 44916  		v0.AddArg(y)
 44917  		v.AddArg(v0)
 44918  		v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t)
 44919  		v2 := b.NewValue0(v.Pos, OpAMD64CMPBconst, types.TypeFlags)
 44920  		v2.AuxInt = 32
 44921  		v2.AddArg(y)
 44922  		v1.AddArg(v2)
 44923  		v.AddArg(v1)
 44924  		return true
 44925  	}
 44926  }
 44927  func rewriteValueAMD64_OpMod16_0(v *Value) bool {
 44928  	b := v.Block
 44929  	_ = b
 44930  	typ := &b.Func.Config.Types
 44931  	_ = typ
 44932  	// match: (Mod16 x y)
 44933  	// cond:
 44934  	// result: (Select1 (DIVW x y))
 44935  	for {
 44936  		_ = v.Args[1]
 44937  		x := v.Args[0]
 44938  		y := v.Args[1]
 44939  		v.reset(OpSelect1)
 44940  		v0 := b.NewValue0(v.Pos, OpAMD64DIVW, types.NewTuple(typ.Int16, typ.Int16))
 44941  		v0.AddArg(x)
 44942  		v0.AddArg(y)
 44943  		v.AddArg(v0)
 44944  		return true
 44945  	}
 44946  }
 44947  func rewriteValueAMD64_OpMod16u_0(v *Value) bool {
 44948  	b := v.Block
 44949  	_ = b
 44950  	typ := &b.Func.Config.Types
 44951  	_ = typ
 44952  	// match: (Mod16u x y)
 44953  	// cond:
 44954  	// result: (Select1 (DIVWU x y))
 44955  	for {
 44956  		_ = v.Args[1]
 44957  		x := v.Args[0]
 44958  		y := v.Args[1]
 44959  		v.reset(OpSelect1)
 44960  		v0 := b.NewValue0(v.Pos, OpAMD64DIVWU, types.NewTuple(typ.UInt16, typ.UInt16))
 44961  		v0.AddArg(x)
 44962  		v0.AddArg(y)
 44963  		v.AddArg(v0)
 44964  		return true
 44965  	}
 44966  }
 44967  func rewriteValueAMD64_OpMod32_0(v *Value) bool {
 44968  	b := v.Block
 44969  	_ = b
 44970  	typ := &b.Func.Config.Types
 44971  	_ = typ
 44972  	// match: (Mod32 x y)
 44973  	// cond:
 44974  	// result: (Select1 (DIVL x y))
 44975  	for {
 44976  		_ = v.Args[1]
 44977  		x := v.Args[0]
 44978  		y := v.Args[1]
 44979  		v.reset(OpSelect1)
 44980  		v0 := b.NewValue0(v.Pos, OpAMD64DIVL, types.NewTuple(typ.Int32, typ.Int32))
 44981  		v0.AddArg(x)
 44982  		v0.AddArg(y)
 44983  		v.AddArg(v0)
 44984  		return true
 44985  	}
 44986  }
 44987  func rewriteValueAMD64_OpMod32u_0(v *Value) bool {
 44988  	b := v.Block
 44989  	_ = b
 44990  	typ := &b.Func.Config.Types
 44991  	_ = typ
 44992  	// match: (Mod32u x y)
 44993  	// cond:
 44994  	// result: (Select1 (DIVLU x y))
 44995  	for {
 44996  		_ = v.Args[1]
 44997  		x := v.Args[0]
 44998  		y := v.Args[1]
 44999  		v.reset(OpSelect1)
 45000  		v0 := b.NewValue0(v.Pos, OpAMD64DIVLU, types.NewTuple(typ.UInt32, typ.UInt32))
 45001  		v0.AddArg(x)
 45002  		v0.AddArg(y)
 45003  		v.AddArg(v0)
 45004  		return true
 45005  	}
 45006  }
 45007  func rewriteValueAMD64_OpMod64_0(v *Value) bool {
 45008  	b := v.Block
 45009  	_ = b
 45010  	typ := &b.Func.Config.Types
 45011  	_ = typ
 45012  	// match: (Mod64 x y)
 45013  	// cond:
 45014  	// result: (Select1 (DIVQ x y))
 45015  	for {
 45016  		_ = v.Args[1]
 45017  		x := v.Args[0]
 45018  		y := v.Args[1]
 45019  		v.reset(OpSelect1)
 45020  		v0 := b.NewValue0(v.Pos, OpAMD64DIVQ, types.NewTuple(typ.Int64, typ.Int64))
 45021  		v0.AddArg(x)
 45022  		v0.AddArg(y)
 45023  		v.AddArg(v0)
 45024  		return true
 45025  	}
 45026  }
 45027  func rewriteValueAMD64_OpMod64u_0(v *Value) bool {
 45028  	b := v.Block
 45029  	_ = b
 45030  	typ := &b.Func.Config.Types
 45031  	_ = typ
 45032  	// match: (Mod64u x y)
 45033  	// cond:
 45034  	// result: (Select1 (DIVQU x y))
 45035  	for {
 45036  		_ = v.Args[1]
 45037  		x := v.Args[0]
 45038  		y := v.Args[1]
 45039  		v.reset(OpSelect1)
 45040  		v0 := b.NewValue0(v.Pos, OpAMD64DIVQU, types.NewTuple(typ.UInt64, typ.UInt64))
 45041  		v0.AddArg(x)
 45042  		v0.AddArg(y)
 45043  		v.AddArg(v0)
 45044  		return true
 45045  	}
 45046  }
 45047  func rewriteValueAMD64_OpMod8_0(v *Value) bool {
 45048  	b := v.Block
 45049  	_ = b
 45050  	typ := &b.Func.Config.Types
 45051  	_ = typ
 45052  	// match: (Mod8 x y)
 45053  	// cond:
 45054  	// result: (Select1 (DIVW (SignExt8to16 x) (SignExt8to16 y)))
 45055  	for {
 45056  		_ = v.Args[1]
 45057  		x := v.Args[0]
 45058  		y := v.Args[1]
 45059  		v.reset(OpSelect1)
 45060  		v0 := b.NewValue0(v.Pos, OpAMD64DIVW, types.NewTuple(typ.Int16, typ.Int16))
 45061  		v1 := b.NewValue0(v.Pos, OpSignExt8to16, typ.Int16)
 45062  		v1.AddArg(x)
 45063  		v0.AddArg(v1)
 45064  		v2 := b.NewValue0(v.Pos, OpSignExt8to16, typ.Int16)
 45065  		v2.AddArg(y)
 45066  		v0.AddArg(v2)
 45067  		v.AddArg(v0)
 45068  		return true
 45069  	}
 45070  }
 45071  func rewriteValueAMD64_OpMod8u_0(v *Value) bool {
 45072  	b := v.Block
 45073  	_ = b
 45074  	typ := &b.Func.Config.Types
 45075  	_ = typ
 45076  	// match: (Mod8u x y)
 45077  	// cond:
 45078  	// result: (Select1 (DIVWU (ZeroExt8to16 x) (ZeroExt8to16 y)))
 45079  	for {
 45080  		_ = v.Args[1]
 45081  		x := v.Args[0]
 45082  		y := v.Args[1]
 45083  		v.reset(OpSelect1)
 45084  		v0 := b.NewValue0(v.Pos, OpAMD64DIVWU, types.NewTuple(typ.UInt16, typ.UInt16))
 45085  		v1 := b.NewValue0(v.Pos, OpZeroExt8to16, typ.UInt16)
 45086  		v1.AddArg(x)
 45087  		v0.AddArg(v1)
 45088  		v2 := b.NewValue0(v.Pos, OpZeroExt8to16, typ.UInt16)
 45089  		v2.AddArg(y)
 45090  		v0.AddArg(v2)
 45091  		v.AddArg(v0)
 45092  		return true
 45093  	}
 45094  }
 45095  func rewriteValueAMD64_OpMove_0(v *Value) bool {
 45096  	b := v.Block
 45097  	_ = b
 45098  	config := b.Func.Config
 45099  	_ = config
 45100  	typ := &b.Func.Config.Types
 45101  	_ = typ
 45102  	// match: (Move [0] _ _ mem)
 45103  	// cond:
 45104  	// result: mem
 45105  	for {
 45106  		if v.AuxInt != 0 {
 45107  			break
 45108  		}
 45109  		_ = v.Args[2]
 45110  		mem := v.Args[2]
 45111  		v.reset(OpCopy)
 45112  		v.Type = mem.Type
 45113  		v.AddArg(mem)
 45114  		return true
 45115  	}
 45116  	// match: (Move [1] dst src mem)
 45117  	// cond:
 45118  	// result: (MOVBstore dst (MOVBload src mem) mem)
 45119  	for {
 45120  		if v.AuxInt != 1 {
 45121  			break
 45122  		}
 45123  		_ = v.Args[2]
 45124  		dst := v.Args[0]
 45125  		src := v.Args[1]
 45126  		mem := v.Args[2]
 45127  		v.reset(OpAMD64MOVBstore)
 45128  		v.AddArg(dst)
 45129  		v0 := b.NewValue0(v.Pos, OpAMD64MOVBload, typ.UInt8)
 45130  		v0.AddArg(src)
 45131  		v0.AddArg(mem)
 45132  		v.AddArg(v0)
 45133  		v.AddArg(mem)
 45134  		return true
 45135  	}
 45136  	// match: (Move [2] dst src mem)
 45137  	// cond:
 45138  	// result: (MOVWstore dst (MOVWload src mem) mem)
 45139  	for {
 45140  		if v.AuxInt != 2 {
 45141  			break
 45142  		}
 45143  		_ = v.Args[2]
 45144  		dst := v.Args[0]
 45145  		src := v.Args[1]
 45146  		mem := v.Args[2]
 45147  		v.reset(OpAMD64MOVWstore)
 45148  		v.AddArg(dst)
 45149  		v0 := b.NewValue0(v.Pos, OpAMD64MOVWload, typ.UInt16)
 45150  		v0.AddArg(src)
 45151  		v0.AddArg(mem)
 45152  		v.AddArg(v0)
 45153  		v.AddArg(mem)
 45154  		return true
 45155  	}
 45156  	// match: (Move [4] dst src mem)
 45157  	// cond:
 45158  	// result: (MOVLstore dst (MOVLload src mem) mem)
 45159  	for {
 45160  		if v.AuxInt != 4 {
 45161  			break
 45162  		}
 45163  		_ = v.Args[2]
 45164  		dst := v.Args[0]
 45165  		src := v.Args[1]
 45166  		mem := v.Args[2]
 45167  		v.reset(OpAMD64MOVLstore)
 45168  		v.AddArg(dst)
 45169  		v0 := b.NewValue0(v.Pos, OpAMD64MOVLload, typ.UInt32)
 45170  		v0.AddArg(src)
 45171  		v0.AddArg(mem)
 45172  		v.AddArg(v0)
 45173  		v.AddArg(mem)
 45174  		return true
 45175  	}
 45176  	// match: (Move [8] dst src mem)
 45177  	// cond:
 45178  	// result: (MOVQstore dst (MOVQload src mem) mem)
 45179  	for {
 45180  		if v.AuxInt != 8 {
 45181  			break
 45182  		}
 45183  		_ = v.Args[2]
 45184  		dst := v.Args[0]
 45185  		src := v.Args[1]
 45186  		mem := v.Args[2]
 45187  		v.reset(OpAMD64MOVQstore)
 45188  		v.AddArg(dst)
 45189  		v0 := b.NewValue0(v.Pos, OpAMD64MOVQload, typ.UInt64)
 45190  		v0.AddArg(src)
 45191  		v0.AddArg(mem)
 45192  		v.AddArg(v0)
 45193  		v.AddArg(mem)
 45194  		return true
 45195  	}
 45196  	// match: (Move [16] dst src mem)
 45197  	// cond: config.useSSE
 45198  	// result: (MOVOstore dst (MOVOload src mem) mem)
 45199  	for {
 45200  		if v.AuxInt != 16 {
 45201  			break
 45202  		}
 45203  		_ = v.Args[2]
 45204  		dst := v.Args[0]
 45205  		src := v.Args[1]
 45206  		mem := v.Args[2]
 45207  		if !(config.useSSE) {
 45208  			break
 45209  		}
 45210  		v.reset(OpAMD64MOVOstore)
 45211  		v.AddArg(dst)
 45212  		v0 := b.NewValue0(v.Pos, OpAMD64MOVOload, types.TypeInt128)
 45213  		v0.AddArg(src)
 45214  		v0.AddArg(mem)
 45215  		v.AddArg(v0)
 45216  		v.AddArg(mem)
 45217  		return true
 45218  	}
 45219  	// match: (Move [16] dst src mem)
 45220  	// cond: !config.useSSE
 45221  	// result: (MOVQstore [8] dst (MOVQload [8] src mem) (MOVQstore dst (MOVQload src mem) mem))
 45222  	for {
 45223  		if v.AuxInt != 16 {
 45224  			break
 45225  		}
 45226  		_ = v.Args[2]
 45227  		dst := v.Args[0]
 45228  		src := v.Args[1]
 45229  		mem := v.Args[2]
 45230  		if !(!config.useSSE) {
 45231  			break
 45232  		}
 45233  		v.reset(OpAMD64MOVQstore)
 45234  		v.AuxInt = 8
 45235  		v.AddArg(dst)
 45236  		v0 := b.NewValue0(v.Pos, OpAMD64MOVQload, typ.UInt64)
 45237  		v0.AuxInt = 8
 45238  		v0.AddArg(src)
 45239  		v0.AddArg(mem)
 45240  		v.AddArg(v0)
 45241  		v1 := b.NewValue0(v.Pos, OpAMD64MOVQstore, types.TypeMem)
 45242  		v1.AddArg(dst)
 45243  		v2 := b.NewValue0(v.Pos, OpAMD64MOVQload, typ.UInt64)
 45244  		v2.AddArg(src)
 45245  		v2.AddArg(mem)
 45246  		v1.AddArg(v2)
 45247  		v1.AddArg(mem)
 45248  		v.AddArg(v1)
 45249  		return true
 45250  	}
 45251  	// match: (Move [3] dst src mem)
 45252  	// cond:
 45253  	// result: (MOVBstore [2] dst (MOVBload [2] src mem) (MOVWstore dst (MOVWload src mem) mem))
 45254  	for {
 45255  		if v.AuxInt != 3 {
 45256  			break
 45257  		}
 45258  		_ = v.Args[2]
 45259  		dst := v.Args[0]
 45260  		src := v.Args[1]
 45261  		mem := v.Args[2]
 45262  		v.reset(OpAMD64MOVBstore)
 45263  		v.AuxInt = 2
 45264  		v.AddArg(dst)
 45265  		v0 := b.NewValue0(v.Pos, OpAMD64MOVBload, typ.UInt8)
 45266  		v0.AuxInt = 2
 45267  		v0.AddArg(src)
 45268  		v0.AddArg(mem)
 45269  		v.AddArg(v0)
 45270  		v1 := b.NewValue0(v.Pos, OpAMD64MOVWstore, types.TypeMem)
 45271  		v1.AddArg(dst)
 45272  		v2 := b.NewValue0(v.Pos, OpAMD64MOVWload, typ.UInt16)
 45273  		v2.AddArg(src)
 45274  		v2.AddArg(mem)
 45275  		v1.AddArg(v2)
 45276  		v1.AddArg(mem)
 45277  		v.AddArg(v1)
 45278  		return true
 45279  	}
 45280  	// match: (Move [5] dst src mem)
 45281  	// cond:
 45282  	// result: (MOVBstore [4] dst (MOVBload [4] src mem) (MOVLstore dst (MOVLload src mem) mem))
 45283  	for {
 45284  		if v.AuxInt != 5 {
 45285  			break
 45286  		}
 45287  		_ = v.Args[2]
 45288  		dst := v.Args[0]
 45289  		src := v.Args[1]
 45290  		mem := v.Args[2]
 45291  		v.reset(OpAMD64MOVBstore)
 45292  		v.AuxInt = 4
 45293  		v.AddArg(dst)
 45294  		v0 := b.NewValue0(v.Pos, OpAMD64MOVBload, typ.UInt8)
 45295  		v0.AuxInt = 4
 45296  		v0.AddArg(src)
 45297  		v0.AddArg(mem)
 45298  		v.AddArg(v0)
 45299  		v1 := b.NewValue0(v.Pos, OpAMD64MOVLstore, types.TypeMem)
 45300  		v1.AddArg(dst)
 45301  		v2 := b.NewValue0(v.Pos, OpAMD64MOVLload, typ.UInt32)
 45302  		v2.AddArg(src)
 45303  		v2.AddArg(mem)
 45304  		v1.AddArg(v2)
 45305  		v1.AddArg(mem)
 45306  		v.AddArg(v1)
 45307  		return true
 45308  	}
 45309  	// match: (Move [6] dst src mem)
 45310  	// cond:
 45311  	// result: (MOVWstore [4] dst (MOVWload [4] src mem) (MOVLstore dst (MOVLload src mem) mem))
 45312  	for {
 45313  		if v.AuxInt != 6 {
 45314  			break
 45315  		}
 45316  		_ = v.Args[2]
 45317  		dst := v.Args[0]
 45318  		src := v.Args[1]
 45319  		mem := v.Args[2]
 45320  		v.reset(OpAMD64MOVWstore)
 45321  		v.AuxInt = 4
 45322  		v.AddArg(dst)
 45323  		v0 := b.NewValue0(v.Pos, OpAMD64MOVWload, typ.UInt16)
 45324  		v0.AuxInt = 4
 45325  		v0.AddArg(src)
 45326  		v0.AddArg(mem)
 45327  		v.AddArg(v0)
 45328  		v1 := b.NewValue0(v.Pos, OpAMD64MOVLstore, types.TypeMem)
 45329  		v1.AddArg(dst)
 45330  		v2 := b.NewValue0(v.Pos, OpAMD64MOVLload, typ.UInt32)
 45331  		v2.AddArg(src)
 45332  		v2.AddArg(mem)
 45333  		v1.AddArg(v2)
 45334  		v1.AddArg(mem)
 45335  		v.AddArg(v1)
 45336  		return true
 45337  	}
 45338  	return false
 45339  }
 45340  func rewriteValueAMD64_OpMove_10(v *Value) bool {
 45341  	b := v.Block
 45342  	_ = b
 45343  	config := b.Func.Config
 45344  	_ = config
 45345  	typ := &b.Func.Config.Types
 45346  	_ = typ
 45347  	// match: (Move [7] dst src mem)
 45348  	// cond:
 45349  	// result: (MOVLstore [3] dst (MOVLload [3] src mem) (MOVLstore dst (MOVLload src mem) mem))
 45350  	for {
 45351  		if v.AuxInt != 7 {
 45352  			break
 45353  		}
 45354  		_ = v.Args[2]
 45355  		dst := v.Args[0]
 45356  		src := v.Args[1]
 45357  		mem := v.Args[2]
 45358  		v.reset(OpAMD64MOVLstore)
 45359  		v.AuxInt = 3
 45360  		v.AddArg(dst)
 45361  		v0 := b.NewValue0(v.Pos, OpAMD64MOVLload, typ.UInt32)
 45362  		v0.AuxInt = 3
 45363  		v0.AddArg(src)
 45364  		v0.AddArg(mem)
 45365  		v.AddArg(v0)
 45366  		v1 := b.NewValue0(v.Pos, OpAMD64MOVLstore, types.TypeMem)
 45367  		v1.AddArg(dst)
 45368  		v2 := b.NewValue0(v.Pos, OpAMD64MOVLload, typ.UInt32)
 45369  		v2.AddArg(src)
 45370  		v2.AddArg(mem)
 45371  		v1.AddArg(v2)
 45372  		v1.AddArg(mem)
 45373  		v.AddArg(v1)
 45374  		return true
 45375  	}
 45376  	// match: (Move [s] dst src mem)
 45377  	// cond: s > 8 && s < 16
 45378  	// result: (MOVQstore [s-8] dst (MOVQload [s-8] src mem) (MOVQstore dst (MOVQload src mem) mem))
 45379  	for {
 45380  		s := v.AuxInt
 45381  		_ = v.Args[2]
 45382  		dst := v.Args[0]
 45383  		src := v.Args[1]
 45384  		mem := v.Args[2]
 45385  		if !(s > 8 && s < 16) {
 45386  			break
 45387  		}
 45388  		v.reset(OpAMD64MOVQstore)
 45389  		v.AuxInt = s - 8
 45390  		v.AddArg(dst)
 45391  		v0 := b.NewValue0(v.Pos, OpAMD64MOVQload, typ.UInt64)
 45392  		v0.AuxInt = s - 8
 45393  		v0.AddArg(src)
 45394  		v0.AddArg(mem)
 45395  		v.AddArg(v0)
 45396  		v1 := b.NewValue0(v.Pos, OpAMD64MOVQstore, types.TypeMem)
 45397  		v1.AddArg(dst)
 45398  		v2 := b.NewValue0(v.Pos, OpAMD64MOVQload, typ.UInt64)
 45399  		v2.AddArg(src)
 45400  		v2.AddArg(mem)
 45401  		v1.AddArg(v2)
 45402  		v1.AddArg(mem)
 45403  		v.AddArg(v1)
 45404  		return true
 45405  	}
 45406  	// match: (Move [s] dst src mem)
 45407  	// cond: s > 16 && s%16 != 0 && s%16 <= 8
 45408  	// result: (Move [s-s%16] (OffPtr <dst.Type> dst [s%16]) (OffPtr <src.Type> src [s%16]) (MOVQstore dst (MOVQload src mem) mem))
 45409  	for {
 45410  		s := v.AuxInt
 45411  		_ = v.Args[2]
 45412  		dst := v.Args[0]
 45413  		src := v.Args[1]
 45414  		mem := v.Args[2]
 45415  		if !(s > 16 && s%16 != 0 && s%16 <= 8) {
 45416  			break
 45417  		}
 45418  		v.reset(OpMove)
 45419  		v.AuxInt = s - s%16
 45420  		v0 := b.NewValue0(v.Pos, OpOffPtr, dst.Type)
 45421  		v0.AuxInt = s % 16
 45422  		v0.AddArg(dst)
 45423  		v.AddArg(v0)
 45424  		v1 := b.NewValue0(v.Pos, OpOffPtr, src.Type)
 45425  		v1.AuxInt = s % 16
 45426  		v1.AddArg(src)
 45427  		v.AddArg(v1)
 45428  		v2 := b.NewValue0(v.Pos, OpAMD64MOVQstore, types.TypeMem)
 45429  		v2.AddArg(dst)
 45430  		v3 := b.NewValue0(v.Pos, OpAMD64MOVQload, typ.UInt64)
 45431  		v3.AddArg(src)
 45432  		v3.AddArg(mem)
 45433  		v2.AddArg(v3)
 45434  		v2.AddArg(mem)
 45435  		v.AddArg(v2)
 45436  		return true
 45437  	}
 45438  	// match: (Move [s] dst src mem)
 45439  	// cond: s > 16 && s%16 != 0 && s%16 > 8 && config.useSSE
 45440  	// result: (Move [s-s%16] (OffPtr <dst.Type> dst [s%16]) (OffPtr <src.Type> src [s%16]) (MOVOstore dst (MOVOload src mem) mem))
 45441  	for {
 45442  		s := v.AuxInt
 45443  		_ = v.Args[2]
 45444  		dst := v.Args[0]
 45445  		src := v.Args[1]
 45446  		mem := v.Args[2]
 45447  		if !(s > 16 && s%16 != 0 && s%16 > 8 && config.useSSE) {
 45448  			break
 45449  		}
 45450  		v.reset(OpMove)
 45451  		v.AuxInt = s - s%16
 45452  		v0 := b.NewValue0(v.Pos, OpOffPtr, dst.Type)
 45453  		v0.AuxInt = s % 16
 45454  		v0.AddArg(dst)
 45455  		v.AddArg(v0)
 45456  		v1 := b.NewValue0(v.Pos, OpOffPtr, src.Type)
 45457  		v1.AuxInt = s % 16
 45458  		v1.AddArg(src)
 45459  		v.AddArg(v1)
 45460  		v2 := b.NewValue0(v.Pos, OpAMD64MOVOstore, types.TypeMem)
 45461  		v2.AddArg(dst)
 45462  		v3 := b.NewValue0(v.Pos, OpAMD64MOVOload, types.TypeInt128)
 45463  		v3.AddArg(src)
 45464  		v3.AddArg(mem)
 45465  		v2.AddArg(v3)
 45466  		v2.AddArg(mem)
 45467  		v.AddArg(v2)
 45468  		return true
 45469  	}
 45470  	// match: (Move [s] dst src mem)
 45471  	// cond: s > 16 && s%16 != 0 && s%16 > 8 && !config.useSSE
 45472  	// result: (Move [s-s%16] (OffPtr <dst.Type> dst [s%16]) (OffPtr <src.Type> src [s%16]) (MOVQstore [8] dst (MOVQload [8] src mem) (MOVQstore dst (MOVQload src mem) mem)))
 45473  	for {
 45474  		s := v.AuxInt
 45475  		_ = v.Args[2]
 45476  		dst := v.Args[0]
 45477  		src := v.Args[1]
 45478  		mem := v.Args[2]
 45479  		if !(s > 16 && s%16 != 0 && s%16 > 8 && !config.useSSE) {
 45480  			break
 45481  		}
 45482  		v.reset(OpMove)
 45483  		v.AuxInt = s - s%16
 45484  		v0 := b.NewValue0(v.Pos, OpOffPtr, dst.Type)
 45485  		v0.AuxInt = s % 16
 45486  		v0.AddArg(dst)
 45487  		v.AddArg(v0)
 45488  		v1 := b.NewValue0(v.Pos, OpOffPtr, src.Type)
 45489  		v1.AuxInt = s % 16
 45490  		v1.AddArg(src)
 45491  		v.AddArg(v1)
 45492  		v2 := b.NewValue0(v.Pos, OpAMD64MOVQstore, types.TypeMem)
 45493  		v2.AuxInt = 8
 45494  		v2.AddArg(dst)
 45495  		v3 := b.NewValue0(v.Pos, OpAMD64MOVQload, typ.UInt64)
 45496  		v3.AuxInt = 8
 45497  		v3.AddArg(src)
 45498  		v3.AddArg(mem)
 45499  		v2.AddArg(v3)
 45500  		v4 := b.NewValue0(v.Pos, OpAMD64MOVQstore, types.TypeMem)
 45501  		v4.AddArg(dst)
 45502  		v5 := b.NewValue0(v.Pos, OpAMD64MOVQload, typ.UInt64)
 45503  		v5.AddArg(src)
 45504  		v5.AddArg(mem)
 45505  		v4.AddArg(v5)
 45506  		v4.AddArg(mem)
 45507  		v2.AddArg(v4)
 45508  		v.AddArg(v2)
 45509  		return true
 45510  	}
 45511  	// match: (Move [s] dst src mem)
 45512  	// cond: s >= 32 && s <= 16*64 && s%16 == 0 && !config.noDuffDevice
 45513  	// result: (DUFFCOPY [14*(64-s/16)] dst src mem)
 45514  	for {
 45515  		s := v.AuxInt
 45516  		_ = v.Args[2]
 45517  		dst := v.Args[0]
 45518  		src := v.Args[1]
 45519  		mem := v.Args[2]
 45520  		if !(s >= 32 && s <= 16*64 && s%16 == 0 && !config.noDuffDevice) {
 45521  			break
 45522  		}
 45523  		v.reset(OpAMD64DUFFCOPY)
 45524  		v.AuxInt = 14 * (64 - s/16)
 45525  		v.AddArg(dst)
 45526  		v.AddArg(src)
 45527  		v.AddArg(mem)
 45528  		return true
 45529  	}
 45530  	// match: (Move [s] dst src mem)
 45531  	// cond: (s > 16*64 || config.noDuffDevice) && s%8 == 0
 45532  	// result: (REPMOVSQ dst src (MOVQconst [s/8]) mem)
 45533  	for {
 45534  		s := v.AuxInt
 45535  		_ = v.Args[2]
 45536  		dst := v.Args[0]
 45537  		src := v.Args[1]
 45538  		mem := v.Args[2]
 45539  		if !((s > 16*64 || config.noDuffDevice) && s%8 == 0) {
 45540  			break
 45541  		}
 45542  		v.reset(OpAMD64REPMOVSQ)
 45543  		v.AddArg(dst)
 45544  		v.AddArg(src)
 45545  		v0 := b.NewValue0(v.Pos, OpAMD64MOVQconst, typ.UInt64)
 45546  		v0.AuxInt = s / 8
 45547  		v.AddArg(v0)
 45548  		v.AddArg(mem)
 45549  		return true
 45550  	}
 45551  	return false
 45552  }
 45553  func rewriteValueAMD64_OpMul16_0(v *Value) bool {
 45554  	// match: (Mul16 x y)
 45555  	// cond:
 45556  	// result: (MULL x y)
 45557  	for {
 45558  		_ = v.Args[1]
 45559  		x := v.Args[0]
 45560  		y := v.Args[1]
 45561  		v.reset(OpAMD64MULL)
 45562  		v.AddArg(x)
 45563  		v.AddArg(y)
 45564  		return true
 45565  	}
 45566  }
 45567  func rewriteValueAMD64_OpMul32_0(v *Value) bool {
 45568  	// match: (Mul32 x y)
 45569  	// cond:
 45570  	// result: (MULL x y)
 45571  	for {
 45572  		_ = v.Args[1]
 45573  		x := v.Args[0]
 45574  		y := v.Args[1]
 45575  		v.reset(OpAMD64MULL)
 45576  		v.AddArg(x)
 45577  		v.AddArg(y)
 45578  		return true
 45579  	}
 45580  }
 45581  func rewriteValueAMD64_OpMul32F_0(v *Value) bool {
 45582  	// match: (Mul32F x y)
 45583  	// cond:
 45584  	// result: (MULSS x y)
 45585  	for {
 45586  		_ = v.Args[1]
 45587  		x := v.Args[0]
 45588  		y := v.Args[1]
 45589  		v.reset(OpAMD64MULSS)
 45590  		v.AddArg(x)
 45591  		v.AddArg(y)
 45592  		return true
 45593  	}
 45594  }
 45595  func rewriteValueAMD64_OpMul64_0(v *Value) bool {
 45596  	// match: (Mul64 x y)
 45597  	// cond:
 45598  	// result: (MULQ x y)
 45599  	for {
 45600  		_ = v.Args[1]
 45601  		x := v.Args[0]
 45602  		y := v.Args[1]
 45603  		v.reset(OpAMD64MULQ)
 45604  		v.AddArg(x)
 45605  		v.AddArg(y)
 45606  		return true
 45607  	}
 45608  }
 45609  func rewriteValueAMD64_OpMul64F_0(v *Value) bool {
 45610  	// match: (Mul64F x y)
 45611  	// cond:
 45612  	// result: (MULSD x y)
 45613  	for {
 45614  		_ = v.Args[1]
 45615  		x := v.Args[0]
 45616  		y := v.Args[1]
 45617  		v.reset(OpAMD64MULSD)
 45618  		v.AddArg(x)
 45619  		v.AddArg(y)
 45620  		return true
 45621  	}
 45622  }
 45623  func rewriteValueAMD64_OpMul64uhilo_0(v *Value) bool {
 45624  	// match: (Mul64uhilo x y)
 45625  	// cond:
 45626  	// result: (MULQU2 x y)
 45627  	for {
 45628  		_ = v.Args[1]
 45629  		x := v.Args[0]
 45630  		y := v.Args[1]
 45631  		v.reset(OpAMD64MULQU2)
 45632  		v.AddArg(x)
 45633  		v.AddArg(y)
 45634  		return true
 45635  	}
 45636  }
 45637  func rewriteValueAMD64_OpMul8_0(v *Value) bool {
 45638  	// match: (Mul8 x y)
 45639  	// cond:
 45640  	// result: (MULL x y)
 45641  	for {
 45642  		_ = v.Args[1]
 45643  		x := v.Args[0]
 45644  		y := v.Args[1]
 45645  		v.reset(OpAMD64MULL)
 45646  		v.AddArg(x)
 45647  		v.AddArg(y)
 45648  		return true
 45649  	}
 45650  }
 45651  func rewriteValueAMD64_OpNeg16_0(v *Value) bool {
 45652  	// match: (Neg16 x)
 45653  	// cond:
 45654  	// result: (NEGL x)
 45655  	for {
 45656  		x := v.Args[0]
 45657  		v.reset(OpAMD64NEGL)
 45658  		v.AddArg(x)
 45659  		return true
 45660  	}
 45661  }
 45662  func rewriteValueAMD64_OpNeg32_0(v *Value) bool {
 45663  	// match: (Neg32 x)
 45664  	// cond:
 45665  	// result: (NEGL x)
 45666  	for {
 45667  		x := v.Args[0]
 45668  		v.reset(OpAMD64NEGL)
 45669  		v.AddArg(x)
 45670  		return true
 45671  	}
 45672  }
 45673  func rewriteValueAMD64_OpNeg32F_0(v *Value) bool {
 45674  	b := v.Block
 45675  	_ = b
 45676  	typ := &b.Func.Config.Types
 45677  	_ = typ
 45678  	// match: (Neg32F x)
 45679  	// cond:
 45680  	// result: (PXOR x (MOVSSconst <typ.Float32> [f2i(math.Copysign(0, -1))]))
 45681  	for {
 45682  		x := v.Args[0]
 45683  		v.reset(OpAMD64PXOR)
 45684  		v.AddArg(x)
 45685  		v0 := b.NewValue0(v.Pos, OpAMD64MOVSSconst, typ.Float32)
 45686  		v0.AuxInt = f2i(math.Copysign(0, -1))
 45687  		v.AddArg(v0)
 45688  		return true
 45689  	}
 45690  }
 45691  func rewriteValueAMD64_OpNeg64_0(v *Value) bool {
 45692  	// match: (Neg64 x)
 45693  	// cond:
 45694  	// result: (NEGQ x)
 45695  	for {
 45696  		x := v.Args[0]
 45697  		v.reset(OpAMD64NEGQ)
 45698  		v.AddArg(x)
 45699  		return true
 45700  	}
 45701  }
 45702  func rewriteValueAMD64_OpNeg64F_0(v *Value) bool {
 45703  	b := v.Block
 45704  	_ = b
 45705  	typ := &b.Func.Config.Types
 45706  	_ = typ
 45707  	// match: (Neg64F x)
 45708  	// cond:
 45709  	// result: (PXOR x (MOVSDconst <typ.Float64> [f2i(math.Copysign(0, -1))]))
 45710  	for {
 45711  		x := v.Args[0]
 45712  		v.reset(OpAMD64PXOR)
 45713  		v.AddArg(x)
 45714  		v0 := b.NewValue0(v.Pos, OpAMD64MOVSDconst, typ.Float64)
 45715  		v0.AuxInt = f2i(math.Copysign(0, -1))
 45716  		v.AddArg(v0)
 45717  		return true
 45718  	}
 45719  }
 45720  func rewriteValueAMD64_OpNeg8_0(v *Value) bool {
 45721  	// match: (Neg8 x)
 45722  	// cond:
 45723  	// result: (NEGL x)
 45724  	for {
 45725  		x := v.Args[0]
 45726  		v.reset(OpAMD64NEGL)
 45727  		v.AddArg(x)
 45728  		return true
 45729  	}
 45730  }
 45731  func rewriteValueAMD64_OpNeq16_0(v *Value) bool {
 45732  	b := v.Block
 45733  	_ = b
 45734  	// match: (Neq16 x y)
 45735  	// cond:
 45736  	// result: (SETNE (CMPW x y))
 45737  	for {
 45738  		_ = v.Args[1]
 45739  		x := v.Args[0]
 45740  		y := v.Args[1]
 45741  		v.reset(OpAMD64SETNE)
 45742  		v0 := b.NewValue0(v.Pos, OpAMD64CMPW, types.TypeFlags)
 45743  		v0.AddArg(x)
 45744  		v0.AddArg(y)
 45745  		v.AddArg(v0)
 45746  		return true
 45747  	}
 45748  }
 45749  func rewriteValueAMD64_OpNeq32_0(v *Value) bool {
 45750  	b := v.Block
 45751  	_ = b
 45752  	// match: (Neq32 x y)
 45753  	// cond:
 45754  	// result: (SETNE (CMPL x y))
 45755  	for {
 45756  		_ = v.Args[1]
 45757  		x := v.Args[0]
 45758  		y := v.Args[1]
 45759  		v.reset(OpAMD64SETNE)
 45760  		v0 := b.NewValue0(v.Pos, OpAMD64CMPL, types.TypeFlags)
 45761  		v0.AddArg(x)
 45762  		v0.AddArg(y)
 45763  		v.AddArg(v0)
 45764  		return true
 45765  	}
 45766  }
 45767  func rewriteValueAMD64_OpNeq32F_0(v *Value) bool {
 45768  	b := v.Block
 45769  	_ = b
 45770  	// match: (Neq32F x y)
 45771  	// cond:
 45772  	// result: (SETNEF (UCOMISS x y))
 45773  	for {
 45774  		_ = v.Args[1]
 45775  		x := v.Args[0]
 45776  		y := v.Args[1]
 45777  		v.reset(OpAMD64SETNEF)
 45778  		v0 := b.NewValue0(v.Pos, OpAMD64UCOMISS, types.TypeFlags)
 45779  		v0.AddArg(x)
 45780  		v0.AddArg(y)
 45781  		v.AddArg(v0)
 45782  		return true
 45783  	}
 45784  }
 45785  func rewriteValueAMD64_OpNeq64_0(v *Value) bool {
 45786  	b := v.Block
 45787  	_ = b
 45788  	// match: (Neq64 x y)
 45789  	// cond:
 45790  	// result: (SETNE (CMPQ x y))
 45791  	for {
 45792  		_ = v.Args[1]
 45793  		x := v.Args[0]
 45794  		y := v.Args[1]
 45795  		v.reset(OpAMD64SETNE)
 45796  		v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, types.TypeFlags)
 45797  		v0.AddArg(x)
 45798  		v0.AddArg(y)
 45799  		v.AddArg(v0)
 45800  		return true
 45801  	}
 45802  }
 45803  func rewriteValueAMD64_OpNeq64F_0(v *Value) bool {
 45804  	b := v.Block
 45805  	_ = b
 45806  	// match: (Neq64F x y)
 45807  	// cond:
 45808  	// result: (SETNEF (UCOMISD x y))
 45809  	for {
 45810  		_ = v.Args[1]
 45811  		x := v.Args[0]
 45812  		y := v.Args[1]
 45813  		v.reset(OpAMD64SETNEF)
 45814  		v0 := b.NewValue0(v.Pos, OpAMD64UCOMISD, types.TypeFlags)
 45815  		v0.AddArg(x)
 45816  		v0.AddArg(y)
 45817  		v.AddArg(v0)
 45818  		return true
 45819  	}
 45820  }
 45821  func rewriteValueAMD64_OpNeq8_0(v *Value) bool {
 45822  	b := v.Block
 45823  	_ = b
 45824  	// match: (Neq8 x y)
 45825  	// cond:
 45826  	// result: (SETNE (CMPB x y))
 45827  	for {
 45828  		_ = v.Args[1]
 45829  		x := v.Args[0]
 45830  		y := v.Args[1]
 45831  		v.reset(OpAMD64SETNE)
 45832  		v0 := b.NewValue0(v.Pos, OpAMD64CMPB, types.TypeFlags)
 45833  		v0.AddArg(x)
 45834  		v0.AddArg(y)
 45835  		v.AddArg(v0)
 45836  		return true
 45837  	}
 45838  }
 45839  func rewriteValueAMD64_OpNeqB_0(v *Value) bool {
 45840  	b := v.Block
 45841  	_ = b
 45842  	// match: (NeqB x y)
 45843  	// cond:
 45844  	// result: (SETNE (CMPB x y))
 45845  	for {
 45846  		_ = v.Args[1]
 45847  		x := v.Args[0]
 45848  		y := v.Args[1]
 45849  		v.reset(OpAMD64SETNE)
 45850  		v0 := b.NewValue0(v.Pos, OpAMD64CMPB, types.TypeFlags)
 45851  		v0.AddArg(x)
 45852  		v0.AddArg(y)
 45853  		v.AddArg(v0)
 45854  		return true
 45855  	}
 45856  }
 45857  func rewriteValueAMD64_OpNeqPtr_0(v *Value) bool {
 45858  	b := v.Block
 45859  	_ = b
 45860  	config := b.Func.Config
 45861  	_ = config
 45862  	// match: (NeqPtr x y)
 45863  	// cond: config.PtrSize == 8
 45864  	// result: (SETNE (CMPQ x y))
 45865  	for {
 45866  		_ = v.Args[1]
 45867  		x := v.Args[0]
 45868  		y := v.Args[1]
 45869  		if !(config.PtrSize == 8) {
 45870  			break
 45871  		}
 45872  		v.reset(OpAMD64SETNE)
 45873  		v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, types.TypeFlags)
 45874  		v0.AddArg(x)
 45875  		v0.AddArg(y)
 45876  		v.AddArg(v0)
 45877  		return true
 45878  	}
 45879  	// match: (NeqPtr x y)
 45880  	// cond: config.PtrSize == 4
 45881  	// result: (SETNE (CMPL x y))
 45882  	for {
 45883  		_ = v.Args[1]
 45884  		x := v.Args[0]
 45885  		y := v.Args[1]
 45886  		if !(config.PtrSize == 4) {
 45887  			break
 45888  		}
 45889  		v.reset(OpAMD64SETNE)
 45890  		v0 := b.NewValue0(v.Pos, OpAMD64CMPL, types.TypeFlags)
 45891  		v0.AddArg(x)
 45892  		v0.AddArg(y)
 45893  		v.AddArg(v0)
 45894  		return true
 45895  	}
 45896  	return false
 45897  }
 45898  func rewriteValueAMD64_OpNilCheck_0(v *Value) bool {
 45899  	// match: (NilCheck ptr mem)
 45900  	// cond:
 45901  	// result: (LoweredNilCheck ptr mem)
 45902  	for {
 45903  		_ = v.Args[1]
 45904  		ptr := v.Args[0]
 45905  		mem := v.Args[1]
 45906  		v.reset(OpAMD64LoweredNilCheck)
 45907  		v.AddArg(ptr)
 45908  		v.AddArg(mem)
 45909  		return true
 45910  	}
 45911  }
 45912  func rewriteValueAMD64_OpNot_0(v *Value) bool {
 45913  	// match: (Not x)
 45914  	// cond:
 45915  	// result: (XORLconst [1] x)
 45916  	for {
 45917  		x := v.Args[0]
 45918  		v.reset(OpAMD64XORLconst)
 45919  		v.AuxInt = 1
 45920  		v.AddArg(x)
 45921  		return true
 45922  	}
 45923  }
 45924  func rewriteValueAMD64_OpOffPtr_0(v *Value) bool {
 45925  	b := v.Block
 45926  	_ = b
 45927  	config := b.Func.Config
 45928  	_ = config
 45929  	typ := &b.Func.Config.Types
 45930  	_ = typ
 45931  	// match: (OffPtr [off] ptr)
 45932  	// cond: config.PtrSize == 8 && is32Bit(off)
 45933  	// result: (ADDQconst [off] ptr)
 45934  	for {
 45935  		off := v.AuxInt
 45936  		ptr := v.Args[0]
 45937  		if !(config.PtrSize == 8 && is32Bit(off)) {
 45938  			break
 45939  		}
 45940  		v.reset(OpAMD64ADDQconst)
 45941  		v.AuxInt = off
 45942  		v.AddArg(ptr)
 45943  		return true
 45944  	}
 45945  	// match: (OffPtr [off] ptr)
 45946  	// cond: config.PtrSize == 8
 45947  	// result: (ADDQ (MOVQconst [off]) ptr)
 45948  	for {
 45949  		off := v.AuxInt
 45950  		ptr := v.Args[0]
 45951  		if !(config.PtrSize == 8) {
 45952  			break
 45953  		}
 45954  		v.reset(OpAMD64ADDQ)
 45955  		v0 := b.NewValue0(v.Pos, OpAMD64MOVQconst, typ.UInt64)
 45956  		v0.AuxInt = off
 45957  		v.AddArg(v0)
 45958  		v.AddArg(ptr)
 45959  		return true
 45960  	}
 45961  	// match: (OffPtr [off] ptr)
 45962  	// cond: config.PtrSize == 4
 45963  	// result: (ADDLconst [off] ptr)
 45964  	for {
 45965  		off := v.AuxInt
 45966  		ptr := v.Args[0]
 45967  		if !(config.PtrSize == 4) {
 45968  			break
 45969  		}
 45970  		v.reset(OpAMD64ADDLconst)
 45971  		v.AuxInt = off
 45972  		v.AddArg(ptr)
 45973  		return true
 45974  	}
 45975  	return false
 45976  }
 45977  func rewriteValueAMD64_OpOr16_0(v *Value) bool {
 45978  	// match: (Or16 x y)
 45979  	// cond:
 45980  	// result: (ORL x y)
 45981  	for {
 45982  		_ = v.Args[1]
 45983  		x := v.Args[0]
 45984  		y := v.Args[1]
 45985  		v.reset(OpAMD64ORL)
 45986  		v.AddArg(x)
 45987  		v.AddArg(y)
 45988  		return true
 45989  	}
 45990  }
 45991  func rewriteValueAMD64_OpOr32_0(v *Value) bool {
 45992  	// match: (Or32 x y)
 45993  	// cond:
 45994  	// result: (ORL x y)
 45995  	for {
 45996  		_ = v.Args[1]
 45997  		x := v.Args[0]
 45998  		y := v.Args[1]
 45999  		v.reset(OpAMD64ORL)
 46000  		v.AddArg(x)
 46001  		v.AddArg(y)
 46002  		return true
 46003  	}
 46004  }
 46005  func rewriteValueAMD64_OpOr64_0(v *Value) bool {
 46006  	// match: (Or64 x y)
 46007  	// cond:
 46008  	// result: (ORQ x y)
 46009  	for {
 46010  		_ = v.Args[1]
 46011  		x := v.Args[0]
 46012  		y := v.Args[1]
 46013  		v.reset(OpAMD64ORQ)
 46014  		v.AddArg(x)
 46015  		v.AddArg(y)
 46016  		return true
 46017  	}
 46018  }
 46019  func rewriteValueAMD64_OpOr8_0(v *Value) bool {
 46020  	// match: (Or8 x y)
 46021  	// cond:
 46022  	// result: (ORL x y)
 46023  	for {
 46024  		_ = v.Args[1]
 46025  		x := v.Args[0]
 46026  		y := v.Args[1]
 46027  		v.reset(OpAMD64ORL)
 46028  		v.AddArg(x)
 46029  		v.AddArg(y)
 46030  		return true
 46031  	}
 46032  }
 46033  func rewriteValueAMD64_OpOrB_0(v *Value) bool {
 46034  	// match: (OrB x y)
 46035  	// cond:
 46036  	// result: (ORL x y)
 46037  	for {
 46038  		_ = v.Args[1]
 46039  		x := v.Args[0]
 46040  		y := v.Args[1]
 46041  		v.reset(OpAMD64ORL)
 46042  		v.AddArg(x)
 46043  		v.AddArg(y)
 46044  		return true
 46045  	}
 46046  }
 46047  func rewriteValueAMD64_OpPopCount16_0(v *Value) bool {
 46048  	b := v.Block
 46049  	_ = b
 46050  	typ := &b.Func.Config.Types
 46051  	_ = typ
 46052  	// match: (PopCount16 x)
 46053  	// cond:
 46054  	// result: (POPCNTL (MOVWQZX <typ.UInt32> x))
 46055  	for {
 46056  		x := v.Args[0]
 46057  		v.reset(OpAMD64POPCNTL)
 46058  		v0 := b.NewValue0(v.Pos, OpAMD64MOVWQZX, typ.UInt32)
 46059  		v0.AddArg(x)
 46060  		v.AddArg(v0)
 46061  		return true
 46062  	}
 46063  }
 46064  func rewriteValueAMD64_OpPopCount32_0(v *Value) bool {
 46065  	// match: (PopCount32 x)
 46066  	// cond:
 46067  	// result: (POPCNTL x)
 46068  	for {
 46069  		x := v.Args[0]
 46070  		v.reset(OpAMD64POPCNTL)
 46071  		v.AddArg(x)
 46072  		return true
 46073  	}
 46074  }
 46075  func rewriteValueAMD64_OpPopCount64_0(v *Value) bool {
 46076  	// match: (PopCount64 x)
 46077  	// cond:
 46078  	// result: (POPCNTQ x)
 46079  	for {
 46080  		x := v.Args[0]
 46081  		v.reset(OpAMD64POPCNTQ)
 46082  		v.AddArg(x)
 46083  		return true
 46084  	}
 46085  }
 46086  func rewriteValueAMD64_OpPopCount8_0(v *Value) bool {
 46087  	b := v.Block
 46088  	_ = b
 46089  	typ := &b.Func.Config.Types
 46090  	_ = typ
 46091  	// match: (PopCount8 x)
 46092  	// cond:
 46093  	// result: (POPCNTL (MOVBQZX <typ.UInt32> x))
 46094  	for {
 46095  		x := v.Args[0]
 46096  		v.reset(OpAMD64POPCNTL)
 46097  		v0 := b.NewValue0(v.Pos, OpAMD64MOVBQZX, typ.UInt32)
 46098  		v0.AddArg(x)
 46099  		v.AddArg(v0)
 46100  		return true
 46101  	}
 46102  }
 46103  func rewriteValueAMD64_OpRound32F_0(v *Value) bool {
 46104  	// match: (Round32F x)
 46105  	// cond:
 46106  	// result: x
 46107  	for {
 46108  		x := v.Args[0]
 46109  		v.reset(OpCopy)
 46110  		v.Type = x.Type
 46111  		v.AddArg(x)
 46112  		return true
 46113  	}
 46114  }
 46115  func rewriteValueAMD64_OpRound64F_0(v *Value) bool {
 46116  	// match: (Round64F x)
 46117  	// cond:
 46118  	// result: x
 46119  	for {
 46120  		x := v.Args[0]
 46121  		v.reset(OpCopy)
 46122  		v.Type = x.Type
 46123  		v.AddArg(x)
 46124  		return true
 46125  	}
 46126  }
 46127  func rewriteValueAMD64_OpRoundToEven_0(v *Value) bool {
 46128  	// match: (RoundToEven x)
 46129  	// cond:
 46130  	// result: (ROUNDSD [0] x)
 46131  	for {
 46132  		x := v.Args[0]
 46133  		v.reset(OpAMD64ROUNDSD)
 46134  		v.AuxInt = 0
 46135  		v.AddArg(x)
 46136  		return true
 46137  	}
 46138  }
 46139  func rewriteValueAMD64_OpRsh16Ux16_0(v *Value) bool {
 46140  	b := v.Block
 46141  	_ = b
 46142  	// match: (Rsh16Ux16 <t> x y)
 46143  	// cond:
 46144  	// result: (ANDL (SHRW <t> x y) (SBBLcarrymask <t> (CMPWconst y [16])))
 46145  	for {
 46146  		t := v.Type
 46147  		_ = v.Args[1]
 46148  		x := v.Args[0]
 46149  		y := v.Args[1]
 46150  		v.reset(OpAMD64ANDL)
 46151  		v0 := b.NewValue0(v.Pos, OpAMD64SHRW, t)
 46152  		v0.AddArg(x)
 46153  		v0.AddArg(y)
 46154  		v.AddArg(v0)
 46155  		v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t)
 46156  		v2 := b.NewValue0(v.Pos, OpAMD64CMPWconst, types.TypeFlags)
 46157  		v2.AuxInt = 16
 46158  		v2.AddArg(y)
 46159  		v1.AddArg(v2)
 46160  		v.AddArg(v1)
 46161  		return true
 46162  	}
 46163  }
 46164  func rewriteValueAMD64_OpRsh16Ux32_0(v *Value) bool {
 46165  	b := v.Block
 46166  	_ = b
 46167  	// match: (Rsh16Ux32 <t> x y)
 46168  	// cond:
 46169  	// result: (ANDL (SHRW <t> x y) (SBBLcarrymask <t> (CMPLconst y [16])))
 46170  	for {
 46171  		t := v.Type
 46172  		_ = v.Args[1]
 46173  		x := v.Args[0]
 46174  		y := v.Args[1]
 46175  		v.reset(OpAMD64ANDL)
 46176  		v0 := b.NewValue0(v.Pos, OpAMD64SHRW, t)
 46177  		v0.AddArg(x)
 46178  		v0.AddArg(y)
 46179  		v.AddArg(v0)
 46180  		v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t)
 46181  		v2 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags)
 46182  		v2.AuxInt = 16
 46183  		v2.AddArg(y)
 46184  		v1.AddArg(v2)
 46185  		v.AddArg(v1)
 46186  		return true
 46187  	}
 46188  }
 46189  func rewriteValueAMD64_OpRsh16Ux64_0(v *Value) bool {
 46190  	b := v.Block
 46191  	_ = b
 46192  	// match: (Rsh16Ux64 <t> x y)
 46193  	// cond:
 46194  	// result: (ANDL (SHRW <t> x y) (SBBLcarrymask <t> (CMPQconst y [16])))
 46195  	for {
 46196  		t := v.Type
 46197  		_ = v.Args[1]
 46198  		x := v.Args[0]
 46199  		y := v.Args[1]
 46200  		v.reset(OpAMD64ANDL)
 46201  		v0 := b.NewValue0(v.Pos, OpAMD64SHRW, t)
 46202  		v0.AddArg(x)
 46203  		v0.AddArg(y)
 46204  		v.AddArg(v0)
 46205  		v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t)
 46206  		v2 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags)
 46207  		v2.AuxInt = 16
 46208  		v2.AddArg(y)
 46209  		v1.AddArg(v2)
 46210  		v.AddArg(v1)
 46211  		return true
 46212  	}
 46213  }
 46214  func rewriteValueAMD64_OpRsh16Ux8_0(v *Value) bool {
 46215  	b := v.Block
 46216  	_ = b
 46217  	// match: (Rsh16Ux8 <t> x y)
 46218  	// cond:
 46219  	// result: (ANDL (SHRW <t> x y) (SBBLcarrymask <t> (CMPBconst y [16])))
 46220  	for {
 46221  		t := v.Type
 46222  		_ = v.Args[1]
 46223  		x := v.Args[0]
 46224  		y := v.Args[1]
 46225  		v.reset(OpAMD64ANDL)
 46226  		v0 := b.NewValue0(v.Pos, OpAMD64SHRW, t)
 46227  		v0.AddArg(x)
 46228  		v0.AddArg(y)
 46229  		v.AddArg(v0)
 46230  		v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t)
 46231  		v2 := b.NewValue0(v.Pos, OpAMD64CMPBconst, types.TypeFlags)
 46232  		v2.AuxInt = 16
 46233  		v2.AddArg(y)
 46234  		v1.AddArg(v2)
 46235  		v.AddArg(v1)
 46236  		return true
 46237  	}
 46238  }
 46239  func rewriteValueAMD64_OpRsh16x16_0(v *Value) bool {
 46240  	b := v.Block
 46241  	_ = b
 46242  	// match: (Rsh16x16 <t> x y)
 46243  	// cond:
 46244  	// result: (SARW <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPWconst y [16])))))
 46245  	for {
 46246  		t := v.Type
 46247  		_ = v.Args[1]
 46248  		x := v.Args[0]
 46249  		y := v.Args[1]
 46250  		v.reset(OpAMD64SARW)
 46251  		v.Type = t
 46252  		v.AddArg(x)
 46253  		v0 := b.NewValue0(v.Pos, OpAMD64ORL, y.Type)
 46254  		v0.AddArg(y)
 46255  		v1 := b.NewValue0(v.Pos, OpAMD64NOTL, y.Type)
 46256  		v2 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, y.Type)
 46257  		v3 := b.NewValue0(v.Pos, OpAMD64CMPWconst, types.TypeFlags)
 46258  		v3.AuxInt = 16
 46259  		v3.AddArg(y)
 46260  		v2.AddArg(v3)
 46261  		v1.AddArg(v2)
 46262  		v0.AddArg(v1)
 46263  		v.AddArg(v0)
 46264  		return true
 46265  	}
 46266  }
 46267  func rewriteValueAMD64_OpRsh16x32_0(v *Value) bool {
 46268  	b := v.Block
 46269  	_ = b
 46270  	// match: (Rsh16x32 <t> x y)
 46271  	// cond:
 46272  	// result: (SARW <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPLconst y [16])))))
 46273  	for {
 46274  		t := v.Type
 46275  		_ = v.Args[1]
 46276  		x := v.Args[0]
 46277  		y := v.Args[1]
 46278  		v.reset(OpAMD64SARW)
 46279  		v.Type = t
 46280  		v.AddArg(x)
 46281  		v0 := b.NewValue0(v.Pos, OpAMD64ORL, y.Type)
 46282  		v0.AddArg(y)
 46283  		v1 := b.NewValue0(v.Pos, OpAMD64NOTL, y.Type)
 46284  		v2 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, y.Type)
 46285  		v3 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags)
 46286  		v3.AuxInt = 16
 46287  		v3.AddArg(y)
 46288  		v2.AddArg(v3)
 46289  		v1.AddArg(v2)
 46290  		v0.AddArg(v1)
 46291  		v.AddArg(v0)
 46292  		return true
 46293  	}
 46294  }
 46295  func rewriteValueAMD64_OpRsh16x64_0(v *Value) bool {
 46296  	b := v.Block
 46297  	_ = b
 46298  	// match: (Rsh16x64 <t> x y)
 46299  	// cond:
 46300  	// result: (SARW <t> x (ORQ <y.Type> y (NOTQ <y.Type> (SBBQcarrymask <y.Type> (CMPQconst y [16])))))
 46301  	for {
 46302  		t := v.Type
 46303  		_ = v.Args[1]
 46304  		x := v.Args[0]
 46305  		y := v.Args[1]
 46306  		v.reset(OpAMD64SARW)
 46307  		v.Type = t
 46308  		v.AddArg(x)
 46309  		v0 := b.NewValue0(v.Pos, OpAMD64ORQ, y.Type)
 46310  		v0.AddArg(y)
 46311  		v1 := b.NewValue0(v.Pos, OpAMD64NOTQ, y.Type)
 46312  		v2 := b.NewValue0(v.Pos, OpAMD64SBBQcarrymask, y.Type)
 46313  		v3 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags)
 46314  		v3.AuxInt = 16
 46315  		v3.AddArg(y)
 46316  		v2.AddArg(v3)
 46317  		v1.AddArg(v2)
 46318  		v0.AddArg(v1)
 46319  		v.AddArg(v0)
 46320  		return true
 46321  	}
 46322  }
 46323  func rewriteValueAMD64_OpRsh16x8_0(v *Value) bool {
 46324  	b := v.Block
 46325  	_ = b
 46326  	// match: (Rsh16x8 <t> x y)
 46327  	// cond:
 46328  	// result: (SARW <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPBconst y [16])))))
 46329  	for {
 46330  		t := v.Type
 46331  		_ = v.Args[1]
 46332  		x := v.Args[0]
 46333  		y := v.Args[1]
 46334  		v.reset(OpAMD64SARW)
 46335  		v.Type = t
 46336  		v.AddArg(x)
 46337  		v0 := b.NewValue0(v.Pos, OpAMD64ORL, y.Type)
 46338  		v0.AddArg(y)
 46339  		v1 := b.NewValue0(v.Pos, OpAMD64NOTL, y.Type)
 46340  		v2 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, y.Type)
 46341  		v3 := b.NewValue0(v.Pos, OpAMD64CMPBconst, types.TypeFlags)
 46342  		v3.AuxInt = 16
 46343  		v3.AddArg(y)
 46344  		v2.AddArg(v3)
 46345  		v1.AddArg(v2)
 46346  		v0.AddArg(v1)
 46347  		v.AddArg(v0)
 46348  		return true
 46349  	}
 46350  }
 46351  func rewriteValueAMD64_OpRsh32Ux16_0(v *Value) bool {
 46352  	b := v.Block
 46353  	_ = b
 46354  	// match: (Rsh32Ux16 <t> x y)
 46355  	// cond:
 46356  	// result: (ANDL (SHRL <t> x y) (SBBLcarrymask <t> (CMPWconst y [32])))
 46357  	for {
 46358  		t := v.Type
 46359  		_ = v.Args[1]
 46360  		x := v.Args[0]
 46361  		y := v.Args[1]
 46362  		v.reset(OpAMD64ANDL)
 46363  		v0 := b.NewValue0(v.Pos, OpAMD64SHRL, t)
 46364  		v0.AddArg(x)
 46365  		v0.AddArg(y)
 46366  		v.AddArg(v0)
 46367  		v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t)
 46368  		v2 := b.NewValue0(v.Pos, OpAMD64CMPWconst, types.TypeFlags)
 46369  		v2.AuxInt = 32
 46370  		v2.AddArg(y)
 46371  		v1.AddArg(v2)
 46372  		v.AddArg(v1)
 46373  		return true
 46374  	}
 46375  }
 46376  func rewriteValueAMD64_OpRsh32Ux32_0(v *Value) bool {
 46377  	b := v.Block
 46378  	_ = b
 46379  	// match: (Rsh32Ux32 <t> x y)
 46380  	// cond:
 46381  	// result: (ANDL (SHRL <t> x y) (SBBLcarrymask <t> (CMPLconst y [32])))
 46382  	for {
 46383  		t := v.Type
 46384  		_ = v.Args[1]
 46385  		x := v.Args[0]
 46386  		y := v.Args[1]
 46387  		v.reset(OpAMD64ANDL)
 46388  		v0 := b.NewValue0(v.Pos, OpAMD64SHRL, t)
 46389  		v0.AddArg(x)
 46390  		v0.AddArg(y)
 46391  		v.AddArg(v0)
 46392  		v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t)
 46393  		v2 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags)
 46394  		v2.AuxInt = 32
 46395  		v2.AddArg(y)
 46396  		v1.AddArg(v2)
 46397  		v.AddArg(v1)
 46398  		return true
 46399  	}
 46400  }
 46401  func rewriteValueAMD64_OpRsh32Ux64_0(v *Value) bool {
 46402  	b := v.Block
 46403  	_ = b
 46404  	// match: (Rsh32Ux64 <t> x y)
 46405  	// cond:
 46406  	// result: (ANDL (SHRL <t> x y) (SBBLcarrymask <t> (CMPQconst y [32])))
 46407  	for {
 46408  		t := v.Type
 46409  		_ = v.Args[1]
 46410  		x := v.Args[0]
 46411  		y := v.Args[1]
 46412  		v.reset(OpAMD64ANDL)
 46413  		v0 := b.NewValue0(v.Pos, OpAMD64SHRL, t)
 46414  		v0.AddArg(x)
 46415  		v0.AddArg(y)
 46416  		v.AddArg(v0)
 46417  		v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t)
 46418  		v2 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags)
 46419  		v2.AuxInt = 32
 46420  		v2.AddArg(y)
 46421  		v1.AddArg(v2)
 46422  		v.AddArg(v1)
 46423  		return true
 46424  	}
 46425  }
 46426  func rewriteValueAMD64_OpRsh32Ux8_0(v *Value) bool {
 46427  	b := v.Block
 46428  	_ = b
 46429  	// match: (Rsh32Ux8 <t> x y)
 46430  	// cond:
 46431  	// result: (ANDL (SHRL <t> x y) (SBBLcarrymask <t> (CMPBconst y [32])))
 46432  	for {
 46433  		t := v.Type
 46434  		_ = v.Args[1]
 46435  		x := v.Args[0]
 46436  		y := v.Args[1]
 46437  		v.reset(OpAMD64ANDL)
 46438  		v0 := b.NewValue0(v.Pos, OpAMD64SHRL, t)
 46439  		v0.AddArg(x)
 46440  		v0.AddArg(y)
 46441  		v.AddArg(v0)
 46442  		v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t)
 46443  		v2 := b.NewValue0(v.Pos, OpAMD64CMPBconst, types.TypeFlags)
 46444  		v2.AuxInt = 32
 46445  		v2.AddArg(y)
 46446  		v1.AddArg(v2)
 46447  		v.AddArg(v1)
 46448  		return true
 46449  	}
 46450  }
 46451  func rewriteValueAMD64_OpRsh32x16_0(v *Value) bool {
 46452  	b := v.Block
 46453  	_ = b
 46454  	// match: (Rsh32x16 <t> x y)
 46455  	// cond:
 46456  	// result: (SARL <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPWconst y [32])))))
 46457  	for {
 46458  		t := v.Type
 46459  		_ = v.Args[1]
 46460  		x := v.Args[0]
 46461  		y := v.Args[1]
 46462  		v.reset(OpAMD64SARL)
 46463  		v.Type = t
 46464  		v.AddArg(x)
 46465  		v0 := b.NewValue0(v.Pos, OpAMD64ORL, y.Type)
 46466  		v0.AddArg(y)
 46467  		v1 := b.NewValue0(v.Pos, OpAMD64NOTL, y.Type)
 46468  		v2 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, y.Type)
 46469  		v3 := b.NewValue0(v.Pos, OpAMD64CMPWconst, types.TypeFlags)
 46470  		v3.AuxInt = 32
 46471  		v3.AddArg(y)
 46472  		v2.AddArg(v3)
 46473  		v1.AddArg(v2)
 46474  		v0.AddArg(v1)
 46475  		v.AddArg(v0)
 46476  		return true
 46477  	}
 46478  }
 46479  func rewriteValueAMD64_OpRsh32x32_0(v *Value) bool {
 46480  	b := v.Block
 46481  	_ = b
 46482  	// match: (Rsh32x32 <t> x y)
 46483  	// cond:
 46484  	// result: (SARL <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPLconst y [32])))))
 46485  	for {
 46486  		t := v.Type
 46487  		_ = v.Args[1]
 46488  		x := v.Args[0]
 46489  		y := v.Args[1]
 46490  		v.reset(OpAMD64SARL)
 46491  		v.Type = t
 46492  		v.AddArg(x)
 46493  		v0 := b.NewValue0(v.Pos, OpAMD64ORL, y.Type)
 46494  		v0.AddArg(y)
 46495  		v1 := b.NewValue0(v.Pos, OpAMD64NOTL, y.Type)
 46496  		v2 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, y.Type)
 46497  		v3 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags)
 46498  		v3.AuxInt = 32
 46499  		v3.AddArg(y)
 46500  		v2.AddArg(v3)
 46501  		v1.AddArg(v2)
 46502  		v0.AddArg(v1)
 46503  		v.AddArg(v0)
 46504  		return true
 46505  	}
 46506  }
 46507  func rewriteValueAMD64_OpRsh32x64_0(v *Value) bool {
 46508  	b := v.Block
 46509  	_ = b
 46510  	// match: (Rsh32x64 <t> x y)
 46511  	// cond:
 46512  	// result: (SARL <t> x (ORQ <y.Type> y (NOTQ <y.Type> (SBBQcarrymask <y.Type> (CMPQconst y [32])))))
 46513  	for {
 46514  		t := v.Type
 46515  		_ = v.Args[1]
 46516  		x := v.Args[0]
 46517  		y := v.Args[1]
 46518  		v.reset(OpAMD64SARL)
 46519  		v.Type = t
 46520  		v.AddArg(x)
 46521  		v0 := b.NewValue0(v.Pos, OpAMD64ORQ, y.Type)
 46522  		v0.AddArg(y)
 46523  		v1 := b.NewValue0(v.Pos, OpAMD64NOTQ, y.Type)
 46524  		v2 := b.NewValue0(v.Pos, OpAMD64SBBQcarrymask, y.Type)
 46525  		v3 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags)
 46526  		v3.AuxInt = 32
 46527  		v3.AddArg(y)
 46528  		v2.AddArg(v3)
 46529  		v1.AddArg(v2)
 46530  		v0.AddArg(v1)
 46531  		v.AddArg(v0)
 46532  		return true
 46533  	}
 46534  }
 46535  func rewriteValueAMD64_OpRsh32x8_0(v *Value) bool {
 46536  	b := v.Block
 46537  	_ = b
 46538  	// match: (Rsh32x8 <t> x y)
 46539  	// cond:
 46540  	// result: (SARL <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPBconst y [32])))))
 46541  	for {
 46542  		t := v.Type
 46543  		_ = v.Args[1]
 46544  		x := v.Args[0]
 46545  		y := v.Args[1]
 46546  		v.reset(OpAMD64SARL)
 46547  		v.Type = t
 46548  		v.AddArg(x)
 46549  		v0 := b.NewValue0(v.Pos, OpAMD64ORL, y.Type)
 46550  		v0.AddArg(y)
 46551  		v1 := b.NewValue0(v.Pos, OpAMD64NOTL, y.Type)
 46552  		v2 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, y.Type)
 46553  		v3 := b.NewValue0(v.Pos, OpAMD64CMPBconst, types.TypeFlags)
 46554  		v3.AuxInt = 32
 46555  		v3.AddArg(y)
 46556  		v2.AddArg(v3)
 46557  		v1.AddArg(v2)
 46558  		v0.AddArg(v1)
 46559  		v.AddArg(v0)
 46560  		return true
 46561  	}
 46562  }
 46563  func rewriteValueAMD64_OpRsh64Ux16_0(v *Value) bool {
 46564  	b := v.Block
 46565  	_ = b
 46566  	// match: (Rsh64Ux16 <t> x y)
 46567  	// cond:
 46568  	// result: (ANDQ (SHRQ <t> x y) (SBBQcarrymask <t> (CMPWconst y [64])))
 46569  	for {
 46570  		t := v.Type
 46571  		_ = v.Args[1]
 46572  		x := v.Args[0]
 46573  		y := v.Args[1]
 46574  		v.reset(OpAMD64ANDQ)
 46575  		v0 := b.NewValue0(v.Pos, OpAMD64SHRQ, t)
 46576  		v0.AddArg(x)
 46577  		v0.AddArg(y)
 46578  		v.AddArg(v0)
 46579  		v1 := b.NewValue0(v.Pos, OpAMD64SBBQcarrymask, t)
 46580  		v2 := b.NewValue0(v.Pos, OpAMD64CMPWconst, types.TypeFlags)
 46581  		v2.AuxInt = 64
 46582  		v2.AddArg(y)
 46583  		v1.AddArg(v2)
 46584  		v.AddArg(v1)
 46585  		return true
 46586  	}
 46587  }
 46588  func rewriteValueAMD64_OpRsh64Ux32_0(v *Value) bool {
 46589  	b := v.Block
 46590  	_ = b
 46591  	// match: (Rsh64Ux32 <t> x y)
 46592  	// cond:
 46593  	// result: (ANDQ (SHRQ <t> x y) (SBBQcarrymask <t> (CMPLconst y [64])))
 46594  	for {
 46595  		t := v.Type
 46596  		_ = v.Args[1]
 46597  		x := v.Args[0]
 46598  		y := v.Args[1]
 46599  		v.reset(OpAMD64ANDQ)
 46600  		v0 := b.NewValue0(v.Pos, OpAMD64SHRQ, t)
 46601  		v0.AddArg(x)
 46602  		v0.AddArg(y)
 46603  		v.AddArg(v0)
 46604  		v1 := b.NewValue0(v.Pos, OpAMD64SBBQcarrymask, t)
 46605  		v2 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags)
 46606  		v2.AuxInt = 64
 46607  		v2.AddArg(y)
 46608  		v1.AddArg(v2)
 46609  		v.AddArg(v1)
 46610  		return true
 46611  	}
 46612  }
 46613  func rewriteValueAMD64_OpRsh64Ux64_0(v *Value) bool {
 46614  	b := v.Block
 46615  	_ = b
 46616  	// match: (Rsh64Ux64 <t> x y)
 46617  	// cond:
 46618  	// result: (ANDQ (SHRQ <t> x y) (SBBQcarrymask <t> (CMPQconst y [64])))
 46619  	for {
 46620  		t := v.Type
 46621  		_ = v.Args[1]
 46622  		x := v.Args[0]
 46623  		y := v.Args[1]
 46624  		v.reset(OpAMD64ANDQ)
 46625  		v0 := b.NewValue0(v.Pos, OpAMD64SHRQ, t)
 46626  		v0.AddArg(x)
 46627  		v0.AddArg(y)
 46628  		v.AddArg(v0)
 46629  		v1 := b.NewValue0(v.Pos, OpAMD64SBBQcarrymask, t)
 46630  		v2 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags)
 46631  		v2.AuxInt = 64
 46632  		v2.AddArg(y)
 46633  		v1.AddArg(v2)
 46634  		v.AddArg(v1)
 46635  		return true
 46636  	}
 46637  }
 46638  func rewriteValueAMD64_OpRsh64Ux8_0(v *Value) bool {
 46639  	b := v.Block
 46640  	_ = b
 46641  	// match: (Rsh64Ux8 <t> x y)
 46642  	// cond:
 46643  	// result: (ANDQ (SHRQ <t> x y) (SBBQcarrymask <t> (CMPBconst y [64])))
 46644  	for {
 46645  		t := v.Type
 46646  		_ = v.Args[1]
 46647  		x := v.Args[0]
 46648  		y := v.Args[1]
 46649  		v.reset(OpAMD64ANDQ)
 46650  		v0 := b.NewValue0(v.Pos, OpAMD64SHRQ, t)
 46651  		v0.AddArg(x)
 46652  		v0.AddArg(y)
 46653  		v.AddArg(v0)
 46654  		v1 := b.NewValue0(v.Pos, OpAMD64SBBQcarrymask, t)
 46655  		v2 := b.NewValue0(v.Pos, OpAMD64CMPBconst, types.TypeFlags)
 46656  		v2.AuxInt = 64
 46657  		v2.AddArg(y)
 46658  		v1.AddArg(v2)
 46659  		v.AddArg(v1)
 46660  		return true
 46661  	}
 46662  }
 46663  func rewriteValueAMD64_OpRsh64x16_0(v *Value) bool {
 46664  	b := v.Block
 46665  	_ = b
 46666  	// match: (Rsh64x16 <t> x y)
 46667  	// cond:
 46668  	// result: (SARQ <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPWconst y [64])))))
 46669  	for {
 46670  		t := v.Type
 46671  		_ = v.Args[1]
 46672  		x := v.Args[0]
 46673  		y := v.Args[1]
 46674  		v.reset(OpAMD64SARQ)
 46675  		v.Type = t
 46676  		v.AddArg(x)
 46677  		v0 := b.NewValue0(v.Pos, OpAMD64ORL, y.Type)
 46678  		v0.AddArg(y)
 46679  		v1 := b.NewValue0(v.Pos, OpAMD64NOTL, y.Type)
 46680  		v2 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, y.Type)
 46681  		v3 := b.NewValue0(v.Pos, OpAMD64CMPWconst, types.TypeFlags)
 46682  		v3.AuxInt = 64
 46683  		v3.AddArg(y)
 46684  		v2.AddArg(v3)
 46685  		v1.AddArg(v2)
 46686  		v0.AddArg(v1)
 46687  		v.AddArg(v0)
 46688  		return true
 46689  	}
 46690  }
 46691  func rewriteValueAMD64_OpRsh64x32_0(v *Value) bool {
 46692  	b := v.Block
 46693  	_ = b
 46694  	// match: (Rsh64x32 <t> x y)
 46695  	// cond:
 46696  	// result: (SARQ <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPLconst y [64])))))
 46697  	for {
 46698  		t := v.Type
 46699  		_ = v.Args[1]
 46700  		x := v.Args[0]
 46701  		y := v.Args[1]
 46702  		v.reset(OpAMD64SARQ)
 46703  		v.Type = t
 46704  		v.AddArg(x)
 46705  		v0 := b.NewValue0(v.Pos, OpAMD64ORL, y.Type)
 46706  		v0.AddArg(y)
 46707  		v1 := b.NewValue0(v.Pos, OpAMD64NOTL, y.Type)
 46708  		v2 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, y.Type)
 46709  		v3 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags)
 46710  		v3.AuxInt = 64
 46711  		v3.AddArg(y)
 46712  		v2.AddArg(v3)
 46713  		v1.AddArg(v2)
 46714  		v0.AddArg(v1)
 46715  		v.AddArg(v0)
 46716  		return true
 46717  	}
 46718  }
 46719  func rewriteValueAMD64_OpRsh64x64_0(v *Value) bool {
 46720  	b := v.Block
 46721  	_ = b
 46722  	// match: (Rsh64x64 <t> x y)
 46723  	// cond:
 46724  	// result: (SARQ <t> x (ORQ <y.Type> y (NOTQ <y.Type> (SBBQcarrymask <y.Type> (CMPQconst y [64])))))
 46725  	for {
 46726  		t := v.Type
 46727  		_ = v.Args[1]
 46728  		x := v.Args[0]
 46729  		y := v.Args[1]
 46730  		v.reset(OpAMD64SARQ)
 46731  		v.Type = t
 46732  		v.AddArg(x)
 46733  		v0 := b.NewValue0(v.Pos, OpAMD64ORQ, y.Type)
 46734  		v0.AddArg(y)
 46735  		v1 := b.NewValue0(v.Pos, OpAMD64NOTQ, y.Type)
 46736  		v2 := b.NewValue0(v.Pos, OpAMD64SBBQcarrymask, y.Type)
 46737  		v3 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags)
 46738  		v3.AuxInt = 64
 46739  		v3.AddArg(y)
 46740  		v2.AddArg(v3)
 46741  		v1.AddArg(v2)
 46742  		v0.AddArg(v1)
 46743  		v.AddArg(v0)
 46744  		return true
 46745  	}
 46746  }
 46747  func rewriteValueAMD64_OpRsh64x8_0(v *Value) bool {
 46748  	b := v.Block
 46749  	_ = b
 46750  	// match: (Rsh64x8 <t> x y)
 46751  	// cond:
 46752  	// result: (SARQ <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPBconst y [64])))))
 46753  	for {
 46754  		t := v.Type
 46755  		_ = v.Args[1]
 46756  		x := v.Args[0]
 46757  		y := v.Args[1]
 46758  		v.reset(OpAMD64SARQ)
 46759  		v.Type = t
 46760  		v.AddArg(x)
 46761  		v0 := b.NewValue0(v.Pos, OpAMD64ORL, y.Type)
 46762  		v0.AddArg(y)
 46763  		v1 := b.NewValue0(v.Pos, OpAMD64NOTL, y.Type)
 46764  		v2 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, y.Type)
 46765  		v3 := b.NewValue0(v.Pos, OpAMD64CMPBconst, types.TypeFlags)
 46766  		v3.AuxInt = 64
 46767  		v3.AddArg(y)
 46768  		v2.AddArg(v3)
 46769  		v1.AddArg(v2)
 46770  		v0.AddArg(v1)
 46771  		v.AddArg(v0)
 46772  		return true
 46773  	}
 46774  }
 46775  func rewriteValueAMD64_OpRsh8Ux16_0(v *Value) bool {
 46776  	b := v.Block
 46777  	_ = b
 46778  	// match: (Rsh8Ux16 <t> x y)
 46779  	// cond:
 46780  	// result: (ANDL (SHRB <t> x y) (SBBLcarrymask <t> (CMPWconst y [8])))
 46781  	for {
 46782  		t := v.Type
 46783  		_ = v.Args[1]
 46784  		x := v.Args[0]
 46785  		y := v.Args[1]
 46786  		v.reset(OpAMD64ANDL)
 46787  		v0 := b.NewValue0(v.Pos, OpAMD64SHRB, t)
 46788  		v0.AddArg(x)
 46789  		v0.AddArg(y)
 46790  		v.AddArg(v0)
 46791  		v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t)
 46792  		v2 := b.NewValue0(v.Pos, OpAMD64CMPWconst, types.TypeFlags)
 46793  		v2.AuxInt = 8
 46794  		v2.AddArg(y)
 46795  		v1.AddArg(v2)
 46796  		v.AddArg(v1)
 46797  		return true
 46798  	}
 46799  }
 46800  func rewriteValueAMD64_OpRsh8Ux32_0(v *Value) bool {
 46801  	b := v.Block
 46802  	_ = b
 46803  	// match: (Rsh8Ux32 <t> x y)
 46804  	// cond:
 46805  	// result: (ANDL (SHRB <t> x y) (SBBLcarrymask <t> (CMPLconst y [8])))
 46806  	for {
 46807  		t := v.Type
 46808  		_ = v.Args[1]
 46809  		x := v.Args[0]
 46810  		y := v.Args[1]
 46811  		v.reset(OpAMD64ANDL)
 46812  		v0 := b.NewValue0(v.Pos, OpAMD64SHRB, t)
 46813  		v0.AddArg(x)
 46814  		v0.AddArg(y)
 46815  		v.AddArg(v0)
 46816  		v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t)
 46817  		v2 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags)
 46818  		v2.AuxInt = 8
 46819  		v2.AddArg(y)
 46820  		v1.AddArg(v2)
 46821  		v.AddArg(v1)
 46822  		return true
 46823  	}
 46824  }
 46825  func rewriteValueAMD64_OpRsh8Ux64_0(v *Value) bool {
 46826  	b := v.Block
 46827  	_ = b
 46828  	// match: (Rsh8Ux64 <t> x y)
 46829  	// cond:
 46830  	// result: (ANDL (SHRB <t> x y) (SBBLcarrymask <t> (CMPQconst y [8])))
 46831  	for {
 46832  		t := v.Type
 46833  		_ = v.Args[1]
 46834  		x := v.Args[0]
 46835  		y := v.Args[1]
 46836  		v.reset(OpAMD64ANDL)
 46837  		v0 := b.NewValue0(v.Pos, OpAMD64SHRB, t)
 46838  		v0.AddArg(x)
 46839  		v0.AddArg(y)
 46840  		v.AddArg(v0)
 46841  		v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t)
 46842  		v2 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags)
 46843  		v2.AuxInt = 8
 46844  		v2.AddArg(y)
 46845  		v1.AddArg(v2)
 46846  		v.AddArg(v1)
 46847  		return true
 46848  	}
 46849  }
 46850  func rewriteValueAMD64_OpRsh8Ux8_0(v *Value) bool {
 46851  	b := v.Block
 46852  	_ = b
 46853  	// match: (Rsh8Ux8 <t> x y)
 46854  	// cond:
 46855  	// result: (ANDL (SHRB <t> x y) (SBBLcarrymask <t> (CMPBconst y [8])))
 46856  	for {
 46857  		t := v.Type
 46858  		_ = v.Args[1]
 46859  		x := v.Args[0]
 46860  		y := v.Args[1]
 46861  		v.reset(OpAMD64ANDL)
 46862  		v0 := b.NewValue0(v.Pos, OpAMD64SHRB, t)
 46863  		v0.AddArg(x)
 46864  		v0.AddArg(y)
 46865  		v.AddArg(v0)
 46866  		v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t)
 46867  		v2 := b.NewValue0(v.Pos, OpAMD64CMPBconst, types.TypeFlags)
 46868  		v2.AuxInt = 8
 46869  		v2.AddArg(y)
 46870  		v1.AddArg(v2)
 46871  		v.AddArg(v1)
 46872  		return true
 46873  	}
 46874  }
 46875  func rewriteValueAMD64_OpRsh8x16_0(v *Value) bool {
 46876  	b := v.Block
 46877  	_ = b
 46878  	// match: (Rsh8x16 <t> x y)
 46879  	// cond:
 46880  	// result: (SARB <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPWconst y [8])))))
 46881  	for {
 46882  		t := v.Type
 46883  		_ = v.Args[1]
 46884  		x := v.Args[0]
 46885  		y := v.Args[1]
 46886  		v.reset(OpAMD64SARB)
 46887  		v.Type = t
 46888  		v.AddArg(x)
 46889  		v0 := b.NewValue0(v.Pos, OpAMD64ORL, y.Type)
 46890  		v0.AddArg(y)
 46891  		v1 := b.NewValue0(v.Pos, OpAMD64NOTL, y.Type)
 46892  		v2 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, y.Type)
 46893  		v3 := b.NewValue0(v.Pos, OpAMD64CMPWconst, types.TypeFlags)
 46894  		v3.AuxInt = 8
 46895  		v3.AddArg(y)
 46896  		v2.AddArg(v3)
 46897  		v1.AddArg(v2)
 46898  		v0.AddArg(v1)
 46899  		v.AddArg(v0)
 46900  		return true
 46901  	}
 46902  }
 46903  func rewriteValueAMD64_OpRsh8x32_0(v *Value) bool {
 46904  	b := v.Block
 46905  	_ = b
 46906  	// match: (Rsh8x32 <t> x y)
 46907  	// cond:
 46908  	// result: (SARB <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPLconst y [8])))))
 46909  	for {
 46910  		t := v.Type
 46911  		_ = v.Args[1]
 46912  		x := v.Args[0]
 46913  		y := v.Args[1]
 46914  		v.reset(OpAMD64SARB)
 46915  		v.Type = t
 46916  		v.AddArg(x)
 46917  		v0 := b.NewValue0(v.Pos, OpAMD64ORL, y.Type)
 46918  		v0.AddArg(y)
 46919  		v1 := b.NewValue0(v.Pos, OpAMD64NOTL, y.Type)
 46920  		v2 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, y.Type)
 46921  		v3 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags)
 46922  		v3.AuxInt = 8
 46923  		v3.AddArg(y)
 46924  		v2.AddArg(v3)
 46925  		v1.AddArg(v2)
 46926  		v0.AddArg(v1)
 46927  		v.AddArg(v0)
 46928  		return true
 46929  	}
 46930  }
 46931  func rewriteValueAMD64_OpRsh8x64_0(v *Value) bool {
 46932  	b := v.Block
 46933  	_ = b
 46934  	// match: (Rsh8x64 <t> x y)
 46935  	// cond:
 46936  	// result: (SARB <t> x (ORQ <y.Type> y (NOTQ <y.Type> (SBBQcarrymask <y.Type> (CMPQconst y [8])))))
 46937  	for {
 46938  		t := v.Type
 46939  		_ = v.Args[1]
 46940  		x := v.Args[0]
 46941  		y := v.Args[1]
 46942  		v.reset(OpAMD64SARB)
 46943  		v.Type = t
 46944  		v.AddArg(x)
 46945  		v0 := b.NewValue0(v.Pos, OpAMD64ORQ, y.Type)
 46946  		v0.AddArg(y)
 46947  		v1 := b.NewValue0(v.Pos, OpAMD64NOTQ, y.Type)
 46948  		v2 := b.NewValue0(v.Pos, OpAMD64SBBQcarrymask, y.Type)
 46949  		v3 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags)
 46950  		v3.AuxInt = 8
 46951  		v3.AddArg(y)
 46952  		v2.AddArg(v3)
 46953  		v1.AddArg(v2)
 46954  		v0.AddArg(v1)
 46955  		v.AddArg(v0)
 46956  		return true
 46957  	}
 46958  }
 46959  func rewriteValueAMD64_OpRsh8x8_0(v *Value) bool {
 46960  	b := v.Block
 46961  	_ = b
 46962  	// match: (Rsh8x8 <t> x y)
 46963  	// cond:
 46964  	// result: (SARB <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPBconst y [8])))))
 46965  	for {
 46966  		t := v.Type
 46967  		_ = v.Args[1]
 46968  		x := v.Args[0]
 46969  		y := v.Args[1]
 46970  		v.reset(OpAMD64SARB)
 46971  		v.Type = t
 46972  		v.AddArg(x)
 46973  		v0 := b.NewValue0(v.Pos, OpAMD64ORL, y.Type)
 46974  		v0.AddArg(y)
 46975  		v1 := b.NewValue0(v.Pos, OpAMD64NOTL, y.Type)
 46976  		v2 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, y.Type)
 46977  		v3 := b.NewValue0(v.Pos, OpAMD64CMPBconst, types.TypeFlags)
 46978  		v3.AuxInt = 8
 46979  		v3.AddArg(y)
 46980  		v2.AddArg(v3)
 46981  		v1.AddArg(v2)
 46982  		v0.AddArg(v1)
 46983  		v.AddArg(v0)
 46984  		return true
 46985  	}
 46986  }
 46987  func rewriteValueAMD64_OpSelect0_0(v *Value) bool {
 46988  	b := v.Block
 46989  	_ = b
 46990  	// match: (Select0 <t> (AddTupleFirst32 val tuple))
 46991  	// cond:
 46992  	// result: (ADDL val (Select0 <t> tuple))
 46993  	for {
 46994  		t := v.Type
 46995  		v_0 := v.Args[0]
 46996  		if v_0.Op != OpAMD64AddTupleFirst32 {
 46997  			break
 46998  		}
 46999  		_ = v_0.Args[1]
 47000  		val := v_0.Args[0]
 47001  		tuple := v_0.Args[1]
 47002  		v.reset(OpAMD64ADDL)
 47003  		v.AddArg(val)
 47004  		v0 := b.NewValue0(v.Pos, OpSelect0, t)
 47005  		v0.AddArg(tuple)
 47006  		v.AddArg(v0)
 47007  		return true
 47008  	}
 47009  	// match: (Select0 <t> (AddTupleFirst64 val tuple))
 47010  	// cond:
 47011  	// result: (ADDQ val (Select0 <t> tuple))
 47012  	for {
 47013  		t := v.Type
 47014  		v_0 := v.Args[0]
 47015  		if v_0.Op != OpAMD64AddTupleFirst64 {
 47016  			break
 47017  		}
 47018  		_ = v_0.Args[1]
 47019  		val := v_0.Args[0]
 47020  		tuple := v_0.Args[1]
 47021  		v.reset(OpAMD64ADDQ)
 47022  		v.AddArg(val)
 47023  		v0 := b.NewValue0(v.Pos, OpSelect0, t)
 47024  		v0.AddArg(tuple)
 47025  		v.AddArg(v0)
 47026  		return true
 47027  	}
 47028  	return false
 47029  }
 47030  func rewriteValueAMD64_OpSelect1_0(v *Value) bool {
 47031  	// match: (Select1 (AddTupleFirst32 _ tuple))
 47032  	// cond:
 47033  	// result: (Select1 tuple)
 47034  	for {
 47035  		v_0 := v.Args[0]
 47036  		if v_0.Op != OpAMD64AddTupleFirst32 {
 47037  			break
 47038  		}
 47039  		_ = v_0.Args[1]
 47040  		tuple := v_0.Args[1]
 47041  		v.reset(OpSelect1)
 47042  		v.AddArg(tuple)
 47043  		return true
 47044  	}
 47045  	// match: (Select1 (AddTupleFirst64 _ tuple))
 47046  	// cond:
 47047  	// result: (Select1 tuple)
 47048  	for {
 47049  		v_0 := v.Args[0]
 47050  		if v_0.Op != OpAMD64AddTupleFirst64 {
 47051  			break
 47052  		}
 47053  		_ = v_0.Args[1]
 47054  		tuple := v_0.Args[1]
 47055  		v.reset(OpSelect1)
 47056  		v.AddArg(tuple)
 47057  		return true
 47058  	}
 47059  	return false
 47060  }
 47061  func rewriteValueAMD64_OpSignExt16to32_0(v *Value) bool {
 47062  	// match: (SignExt16to32 x)
 47063  	// cond:
 47064  	// result: (MOVWQSX x)
 47065  	for {
 47066  		x := v.Args[0]
 47067  		v.reset(OpAMD64MOVWQSX)
 47068  		v.AddArg(x)
 47069  		return true
 47070  	}
 47071  }
 47072  func rewriteValueAMD64_OpSignExt16to64_0(v *Value) bool {
 47073  	// match: (SignExt16to64 x)
 47074  	// cond:
 47075  	// result: (MOVWQSX x)
 47076  	for {
 47077  		x := v.Args[0]
 47078  		v.reset(OpAMD64MOVWQSX)
 47079  		v.AddArg(x)
 47080  		return true
 47081  	}
 47082  }
 47083  func rewriteValueAMD64_OpSignExt32to64_0(v *Value) bool {
 47084  	// match: (SignExt32to64 x)
 47085  	// cond:
 47086  	// result: (MOVLQSX x)
 47087  	for {
 47088  		x := v.Args[0]
 47089  		v.reset(OpAMD64MOVLQSX)
 47090  		v.AddArg(x)
 47091  		return true
 47092  	}
 47093  }
 47094  func rewriteValueAMD64_OpSignExt8to16_0(v *Value) bool {
 47095  	// match: (SignExt8to16 x)
 47096  	// cond:
 47097  	// result: (MOVBQSX x)
 47098  	for {
 47099  		x := v.Args[0]
 47100  		v.reset(OpAMD64MOVBQSX)
 47101  		v.AddArg(x)
 47102  		return true
 47103  	}
 47104  }
 47105  func rewriteValueAMD64_OpSignExt8to32_0(v *Value) bool {
 47106  	// match: (SignExt8to32 x)
 47107  	// cond:
 47108  	// result: (MOVBQSX x)
 47109  	for {
 47110  		x := v.Args[0]
 47111  		v.reset(OpAMD64MOVBQSX)
 47112  		v.AddArg(x)
 47113  		return true
 47114  	}
 47115  }
 47116  func rewriteValueAMD64_OpSignExt8to64_0(v *Value) bool {
 47117  	// match: (SignExt8to64 x)
 47118  	// cond:
 47119  	// result: (MOVBQSX x)
 47120  	for {
 47121  		x := v.Args[0]
 47122  		v.reset(OpAMD64MOVBQSX)
 47123  		v.AddArg(x)
 47124  		return true
 47125  	}
 47126  }
 47127  func rewriteValueAMD64_OpSlicemask_0(v *Value) bool {
 47128  	b := v.Block
 47129  	_ = b
 47130  	// match: (Slicemask <t> x)
 47131  	// cond:
 47132  	// result: (SARQconst (NEGQ <t> x) [63])
 47133  	for {
 47134  		t := v.Type
 47135  		x := v.Args[0]
 47136  		v.reset(OpAMD64SARQconst)
 47137  		v.AuxInt = 63
 47138  		v0 := b.NewValue0(v.Pos, OpAMD64NEGQ, t)
 47139  		v0.AddArg(x)
 47140  		v.AddArg(v0)
 47141  		return true
 47142  	}
 47143  }
 47144  func rewriteValueAMD64_OpSqrt_0(v *Value) bool {
 47145  	// match: (Sqrt x)
 47146  	// cond:
 47147  	// result: (SQRTSD x)
 47148  	for {
 47149  		x := v.Args[0]
 47150  		v.reset(OpAMD64SQRTSD)
 47151  		v.AddArg(x)
 47152  		return true
 47153  	}
 47154  }
 47155  func rewriteValueAMD64_OpStaticCall_0(v *Value) bool {
 47156  	// match: (StaticCall [argwid] {target} mem)
 47157  	// cond:
 47158  	// result: (CALLstatic [argwid] {target} mem)
 47159  	for {
 47160  		argwid := v.AuxInt
 47161  		target := v.Aux
 47162  		mem := v.Args[0]
 47163  		v.reset(OpAMD64CALLstatic)
 47164  		v.AuxInt = argwid
 47165  		v.Aux = target
 47166  		v.AddArg(mem)
 47167  		return true
 47168  	}
 47169  }
 47170  func rewriteValueAMD64_OpStore_0(v *Value) bool {
 47171  	// match: (Store {t} ptr val mem)
 47172  	// cond: t.(*types.Type).Size() == 8 && is64BitFloat(val.Type)
 47173  	// result: (MOVSDstore ptr val mem)
 47174  	for {
 47175  		t := v.Aux
 47176  		_ = v.Args[2]
 47177  		ptr := v.Args[0]
 47178  		val := v.Args[1]
 47179  		mem := v.Args[2]
 47180  		if !(t.(*types.Type).Size() == 8 && is64BitFloat(val.Type)) {
 47181  			break
 47182  		}
 47183  		v.reset(OpAMD64MOVSDstore)
 47184  		v.AddArg(ptr)
 47185  		v.AddArg(val)
 47186  		v.AddArg(mem)
 47187  		return true
 47188  	}
 47189  	// match: (Store {t} ptr val mem)
 47190  	// cond: t.(*types.Type).Size() == 4 && is32BitFloat(val.Type)
 47191  	// result: (MOVSSstore ptr val mem)
 47192  	for {
 47193  		t := v.Aux
 47194  		_ = v.Args[2]
 47195  		ptr := v.Args[0]
 47196  		val := v.Args[1]
 47197  		mem := v.Args[2]
 47198  		if !(t.(*types.Type).Size() == 4 && is32BitFloat(val.Type)) {
 47199  			break
 47200  		}
 47201  		v.reset(OpAMD64MOVSSstore)
 47202  		v.AddArg(ptr)
 47203  		v.AddArg(val)
 47204  		v.AddArg(mem)
 47205  		return true
 47206  	}
 47207  	// match: (Store {t} ptr val mem)
 47208  	// cond: t.(*types.Type).Size() == 8
 47209  	// result: (MOVQstore ptr val mem)
 47210  	for {
 47211  		t := v.Aux
 47212  		_ = v.Args[2]
 47213  		ptr := v.Args[0]
 47214  		val := v.Args[1]
 47215  		mem := v.Args[2]
 47216  		if !(t.(*types.Type).Size() == 8) {
 47217  			break
 47218  		}
 47219  		v.reset(OpAMD64MOVQstore)
 47220  		v.AddArg(ptr)
 47221  		v.AddArg(val)
 47222  		v.AddArg(mem)
 47223  		return true
 47224  	}
 47225  	// match: (Store {t} ptr val mem)
 47226  	// cond: t.(*types.Type).Size() == 4
 47227  	// result: (MOVLstore ptr val mem)
 47228  	for {
 47229  		t := v.Aux
 47230  		_ = v.Args[2]
 47231  		ptr := v.Args[0]
 47232  		val := v.Args[1]
 47233  		mem := v.Args[2]
 47234  		if !(t.(*types.Type).Size() == 4) {
 47235  			break
 47236  		}
 47237  		v.reset(OpAMD64MOVLstore)
 47238  		v.AddArg(ptr)
 47239  		v.AddArg(val)
 47240  		v.AddArg(mem)
 47241  		return true
 47242  	}
 47243  	// match: (Store {t} ptr val mem)
 47244  	// cond: t.(*types.Type).Size() == 2
 47245  	// result: (MOVWstore ptr val mem)
 47246  	for {
 47247  		t := v.Aux
 47248  		_ = v.Args[2]
 47249  		ptr := v.Args[0]
 47250  		val := v.Args[1]
 47251  		mem := v.Args[2]
 47252  		if !(t.(*types.Type).Size() == 2) {
 47253  			break
 47254  		}
 47255  		v.reset(OpAMD64MOVWstore)
 47256  		v.AddArg(ptr)
 47257  		v.AddArg(val)
 47258  		v.AddArg(mem)
 47259  		return true
 47260  	}
 47261  	// match: (Store {t} ptr val mem)
 47262  	// cond: t.(*types.Type).Size() == 1
 47263  	// result: (MOVBstore ptr val mem)
 47264  	for {
 47265  		t := v.Aux
 47266  		_ = v.Args[2]
 47267  		ptr := v.Args[0]
 47268  		val := v.Args[1]
 47269  		mem := v.Args[2]
 47270  		if !(t.(*types.Type).Size() == 1) {
 47271  			break
 47272  		}
 47273  		v.reset(OpAMD64MOVBstore)
 47274  		v.AddArg(ptr)
 47275  		v.AddArg(val)
 47276  		v.AddArg(mem)
 47277  		return true
 47278  	}
 47279  	return false
 47280  }
 47281  func rewriteValueAMD64_OpSub16_0(v *Value) bool {
 47282  	// match: (Sub16 x y)
 47283  	// cond:
 47284  	// result: (SUBL x y)
 47285  	for {
 47286  		_ = v.Args[1]
 47287  		x := v.Args[0]
 47288  		y := v.Args[1]
 47289  		v.reset(OpAMD64SUBL)
 47290  		v.AddArg(x)
 47291  		v.AddArg(y)
 47292  		return true
 47293  	}
 47294  }
 47295  func rewriteValueAMD64_OpSub32_0(v *Value) bool {
 47296  	// match: (Sub32 x y)
 47297  	// cond:
 47298  	// result: (SUBL x y)
 47299  	for {
 47300  		_ = v.Args[1]
 47301  		x := v.Args[0]
 47302  		y := v.Args[1]
 47303  		v.reset(OpAMD64SUBL)
 47304  		v.AddArg(x)
 47305  		v.AddArg(y)
 47306  		return true
 47307  	}
 47308  }
 47309  func rewriteValueAMD64_OpSub32F_0(v *Value) bool {
 47310  	// match: (Sub32F x y)
 47311  	// cond:
 47312  	// result: (SUBSS x y)
 47313  	for {
 47314  		_ = v.Args[1]
 47315  		x := v.Args[0]
 47316  		y := v.Args[1]
 47317  		v.reset(OpAMD64SUBSS)
 47318  		v.AddArg(x)
 47319  		v.AddArg(y)
 47320  		return true
 47321  	}
 47322  }
 47323  func rewriteValueAMD64_OpSub64_0(v *Value) bool {
 47324  	// match: (Sub64 x y)
 47325  	// cond:
 47326  	// result: (SUBQ x y)
 47327  	for {
 47328  		_ = v.Args[1]
 47329  		x := v.Args[0]
 47330  		y := v.Args[1]
 47331  		v.reset(OpAMD64SUBQ)
 47332  		v.AddArg(x)
 47333  		v.AddArg(y)
 47334  		return true
 47335  	}
 47336  }
 47337  func rewriteValueAMD64_OpSub64F_0(v *Value) bool {
 47338  	// match: (Sub64F x y)
 47339  	// cond:
 47340  	// result: (SUBSD x y)
 47341  	for {
 47342  		_ = v.Args[1]
 47343  		x := v.Args[0]
 47344  		y := v.Args[1]
 47345  		v.reset(OpAMD64SUBSD)
 47346  		v.AddArg(x)
 47347  		v.AddArg(y)
 47348  		return true
 47349  	}
 47350  }
 47351  func rewriteValueAMD64_OpSub8_0(v *Value) bool {
 47352  	// match: (Sub8 x y)
 47353  	// cond:
 47354  	// result: (SUBL x y)
 47355  	for {
 47356  		_ = v.Args[1]
 47357  		x := v.Args[0]
 47358  		y := v.Args[1]
 47359  		v.reset(OpAMD64SUBL)
 47360  		v.AddArg(x)
 47361  		v.AddArg(y)
 47362  		return true
 47363  	}
 47364  }
 47365  func rewriteValueAMD64_OpSubPtr_0(v *Value) bool {
 47366  	b := v.Block
 47367  	_ = b
 47368  	config := b.Func.Config
 47369  	_ = config
 47370  	// match: (SubPtr x y)
 47371  	// cond: config.PtrSize == 8
 47372  	// result: (SUBQ x y)
 47373  	for {
 47374  		_ = v.Args[1]
 47375  		x := v.Args[0]
 47376  		y := v.Args[1]
 47377  		if !(config.PtrSize == 8) {
 47378  			break
 47379  		}
 47380  		v.reset(OpAMD64SUBQ)
 47381  		v.AddArg(x)
 47382  		v.AddArg(y)
 47383  		return true
 47384  	}
 47385  	// match: (SubPtr x y)
 47386  	// cond: config.PtrSize == 4
 47387  	// result: (SUBL x y)
 47388  	for {
 47389  		_ = v.Args[1]
 47390  		x := v.Args[0]
 47391  		y := v.Args[1]
 47392  		if !(config.PtrSize == 4) {
 47393  			break
 47394  		}
 47395  		v.reset(OpAMD64SUBL)
 47396  		v.AddArg(x)
 47397  		v.AddArg(y)
 47398  		return true
 47399  	}
 47400  	return false
 47401  }
 47402  func rewriteValueAMD64_OpTrunc_0(v *Value) bool {
 47403  	// match: (Trunc x)
 47404  	// cond:
 47405  	// result: (ROUNDSD [3] x)
 47406  	for {
 47407  		x := v.Args[0]
 47408  		v.reset(OpAMD64ROUNDSD)
 47409  		v.AuxInt = 3
 47410  		v.AddArg(x)
 47411  		return true
 47412  	}
 47413  }
 47414  func rewriteValueAMD64_OpTrunc16to8_0(v *Value) bool {
 47415  	// match: (Trunc16to8 x)
 47416  	// cond:
 47417  	// result: x
 47418  	for {
 47419  		x := v.Args[0]
 47420  		v.reset(OpCopy)
 47421  		v.Type = x.Type
 47422  		v.AddArg(x)
 47423  		return true
 47424  	}
 47425  }
 47426  func rewriteValueAMD64_OpTrunc32to16_0(v *Value) bool {
 47427  	// match: (Trunc32to16 x)
 47428  	// cond:
 47429  	// result: x
 47430  	for {
 47431  		x := v.Args[0]
 47432  		v.reset(OpCopy)
 47433  		v.Type = x.Type
 47434  		v.AddArg(x)
 47435  		return true
 47436  	}
 47437  }
 47438  func rewriteValueAMD64_OpTrunc32to8_0(v *Value) bool {
 47439  	// match: (Trunc32to8 x)
 47440  	// cond:
 47441  	// result: x
 47442  	for {
 47443  		x := v.Args[0]
 47444  		v.reset(OpCopy)
 47445  		v.Type = x.Type
 47446  		v.AddArg(x)
 47447  		return true
 47448  	}
 47449  }
 47450  func rewriteValueAMD64_OpTrunc64to16_0(v *Value) bool {
 47451  	// match: (Trunc64to16 x)
 47452  	// cond:
 47453  	// result: x
 47454  	for {
 47455  		x := v.Args[0]
 47456  		v.reset(OpCopy)
 47457  		v.Type = x.Type
 47458  		v.AddArg(x)
 47459  		return true
 47460  	}
 47461  }
 47462  func rewriteValueAMD64_OpTrunc64to32_0(v *Value) bool {
 47463  	// match: (Trunc64to32 x)
 47464  	// cond:
 47465  	// result: x
 47466  	for {
 47467  		x := v.Args[0]
 47468  		v.reset(OpCopy)
 47469  		v.Type = x.Type
 47470  		v.AddArg(x)
 47471  		return true
 47472  	}
 47473  }
 47474  func rewriteValueAMD64_OpTrunc64to8_0(v *Value) bool {
 47475  	// match: (Trunc64to8 x)
 47476  	// cond:
 47477  	// result: x
 47478  	for {
 47479  		x := v.Args[0]
 47480  		v.reset(OpCopy)
 47481  		v.Type = x.Type
 47482  		v.AddArg(x)
 47483  		return true
 47484  	}
 47485  }
 47486  func rewriteValueAMD64_OpWB_0(v *Value) bool {
 47487  	// match: (WB {fn} destptr srcptr mem)
 47488  	// cond:
 47489  	// result: (LoweredWB {fn} destptr srcptr mem)
 47490  	for {
 47491  		fn := v.Aux
 47492  		_ = v.Args[2]
 47493  		destptr := v.Args[0]
 47494  		srcptr := v.Args[1]
 47495  		mem := v.Args[2]
 47496  		v.reset(OpAMD64LoweredWB)
 47497  		v.Aux = fn
 47498  		v.AddArg(destptr)
 47499  		v.AddArg(srcptr)
 47500  		v.AddArg(mem)
 47501  		return true
 47502  	}
 47503  }
 47504  func rewriteValueAMD64_OpXor16_0(v *Value) bool {
 47505  	// match: (Xor16 x y)
 47506  	// cond:
 47507  	// result: (XORL x y)
 47508  	for {
 47509  		_ = v.Args[1]
 47510  		x := v.Args[0]
 47511  		y := v.Args[1]
 47512  		v.reset(OpAMD64XORL)
 47513  		v.AddArg(x)
 47514  		v.AddArg(y)
 47515  		return true
 47516  	}
 47517  }
 47518  func rewriteValueAMD64_OpXor32_0(v *Value) bool {
 47519  	// match: (Xor32 x y)
 47520  	// cond:
 47521  	// result: (XORL x y)
 47522  	for {
 47523  		_ = v.Args[1]
 47524  		x := v.Args[0]
 47525  		y := v.Args[1]
 47526  		v.reset(OpAMD64XORL)
 47527  		v.AddArg(x)
 47528  		v.AddArg(y)
 47529  		return true
 47530  	}
 47531  }
 47532  func rewriteValueAMD64_OpXor64_0(v *Value) bool {
 47533  	// match: (Xor64 x y)
 47534  	// cond:
 47535  	// result: (XORQ x y)
 47536  	for {
 47537  		_ = v.Args[1]
 47538  		x := v.Args[0]
 47539  		y := v.Args[1]
 47540  		v.reset(OpAMD64XORQ)
 47541  		v.AddArg(x)
 47542  		v.AddArg(y)
 47543  		return true
 47544  	}
 47545  }
 47546  func rewriteValueAMD64_OpXor8_0(v *Value) bool {
 47547  	// match: (Xor8 x y)
 47548  	// cond:
 47549  	// result: (XORL x y)
 47550  	for {
 47551  		_ = v.Args[1]
 47552  		x := v.Args[0]
 47553  		y := v.Args[1]
 47554  		v.reset(OpAMD64XORL)
 47555  		v.AddArg(x)
 47556  		v.AddArg(y)
 47557  		return true
 47558  	}
 47559  }
 47560  func rewriteValueAMD64_OpZero_0(v *Value) bool {
 47561  	b := v.Block
 47562  	_ = b
 47563  	config := b.Func.Config
 47564  	_ = config
 47565  	// match: (Zero [0] _ mem)
 47566  	// cond:
 47567  	// result: mem
 47568  	for {
 47569  		if v.AuxInt != 0 {
 47570  			break
 47571  		}
 47572  		_ = v.Args[1]
 47573  		mem := v.Args[1]
 47574  		v.reset(OpCopy)
 47575  		v.Type = mem.Type
 47576  		v.AddArg(mem)
 47577  		return true
 47578  	}
 47579  	// match: (Zero [1] destptr mem)
 47580  	// cond:
 47581  	// result: (MOVBstoreconst [0] destptr mem)
 47582  	for {
 47583  		if v.AuxInt != 1 {
 47584  			break
 47585  		}
 47586  		_ = v.Args[1]
 47587  		destptr := v.Args[0]
 47588  		mem := v.Args[1]
 47589  		v.reset(OpAMD64MOVBstoreconst)
 47590  		v.AuxInt = 0
 47591  		v.AddArg(destptr)
 47592  		v.AddArg(mem)
 47593  		return true
 47594  	}
 47595  	// match: (Zero [2] destptr mem)
 47596  	// cond:
 47597  	// result: (MOVWstoreconst [0] destptr mem)
 47598  	for {
 47599  		if v.AuxInt != 2 {
 47600  			break
 47601  		}
 47602  		_ = v.Args[1]
 47603  		destptr := v.Args[0]
 47604  		mem := v.Args[1]
 47605  		v.reset(OpAMD64MOVWstoreconst)
 47606  		v.AuxInt = 0
 47607  		v.AddArg(destptr)
 47608  		v.AddArg(mem)
 47609  		return true
 47610  	}
 47611  	// match: (Zero [4] destptr mem)
 47612  	// cond:
 47613  	// result: (MOVLstoreconst [0] destptr mem)
 47614  	for {
 47615  		if v.AuxInt != 4 {
 47616  			break
 47617  		}
 47618  		_ = v.Args[1]
 47619  		destptr := v.Args[0]
 47620  		mem := v.Args[1]
 47621  		v.reset(OpAMD64MOVLstoreconst)
 47622  		v.AuxInt = 0
 47623  		v.AddArg(destptr)
 47624  		v.AddArg(mem)
 47625  		return true
 47626  	}
 47627  	// match: (Zero [8] destptr mem)
 47628  	// cond:
 47629  	// result: (MOVQstoreconst [0] destptr mem)
 47630  	for {
 47631  		if v.AuxInt != 8 {
 47632  			break
 47633  		}
 47634  		_ = v.Args[1]
 47635  		destptr := v.Args[0]
 47636  		mem := v.Args[1]
 47637  		v.reset(OpAMD64MOVQstoreconst)
 47638  		v.AuxInt = 0
 47639  		v.AddArg(destptr)
 47640  		v.AddArg(mem)
 47641  		return true
 47642  	}
 47643  	// match: (Zero [3] destptr mem)
 47644  	// cond:
 47645  	// result: (MOVBstoreconst [makeValAndOff(0,2)] destptr (MOVWstoreconst [0] destptr mem))
 47646  	for {
 47647  		if v.AuxInt != 3 {
 47648  			break
 47649  		}
 47650  		_ = v.Args[1]
 47651  		destptr := v.Args[0]
 47652  		mem := v.Args[1]
 47653  		v.reset(OpAMD64MOVBstoreconst)
 47654  		v.AuxInt = makeValAndOff(0, 2)
 47655  		v.AddArg(destptr)
 47656  		v0 := b.NewValue0(v.Pos, OpAMD64MOVWstoreconst, types.TypeMem)
 47657  		v0.AuxInt = 0
 47658  		v0.AddArg(destptr)
 47659  		v0.AddArg(mem)
 47660  		v.AddArg(v0)
 47661  		return true
 47662  	}
 47663  	// match: (Zero [5] destptr mem)
 47664  	// cond:
 47665  	// result: (MOVBstoreconst [makeValAndOff(0,4)] destptr (MOVLstoreconst [0] destptr mem))
 47666  	for {
 47667  		if v.AuxInt != 5 {
 47668  			break
 47669  		}
 47670  		_ = v.Args[1]
 47671  		destptr := v.Args[0]
 47672  		mem := v.Args[1]
 47673  		v.reset(OpAMD64MOVBstoreconst)
 47674  		v.AuxInt = makeValAndOff(0, 4)
 47675  		v.AddArg(destptr)
 47676  		v0 := b.NewValue0(v.Pos, OpAMD64MOVLstoreconst, types.TypeMem)
 47677  		v0.AuxInt = 0
 47678  		v0.AddArg(destptr)
 47679  		v0.AddArg(mem)
 47680  		v.AddArg(v0)
 47681  		return true
 47682  	}
 47683  	// match: (Zero [6] destptr mem)
 47684  	// cond:
 47685  	// result: (MOVWstoreconst [makeValAndOff(0,4)] destptr (MOVLstoreconst [0] destptr mem))
 47686  	for {
 47687  		if v.AuxInt != 6 {
 47688  			break
 47689  		}
 47690  		_ = v.Args[1]
 47691  		destptr := v.Args[0]
 47692  		mem := v.Args[1]
 47693  		v.reset(OpAMD64MOVWstoreconst)
 47694  		v.AuxInt = makeValAndOff(0, 4)
 47695  		v.AddArg(destptr)
 47696  		v0 := b.NewValue0(v.Pos, OpAMD64MOVLstoreconst, types.TypeMem)
 47697  		v0.AuxInt = 0
 47698  		v0.AddArg(destptr)
 47699  		v0.AddArg(mem)
 47700  		v.AddArg(v0)
 47701  		return true
 47702  	}
 47703  	// match: (Zero [7] destptr mem)
 47704  	// cond:
 47705  	// result: (MOVLstoreconst [makeValAndOff(0,3)] destptr (MOVLstoreconst [0] destptr mem))
 47706  	for {
 47707  		if v.AuxInt != 7 {
 47708  			break
 47709  		}
 47710  		_ = v.Args[1]
 47711  		destptr := v.Args[0]
 47712  		mem := v.Args[1]
 47713  		v.reset(OpAMD64MOVLstoreconst)
 47714  		v.AuxInt = makeValAndOff(0, 3)
 47715  		v.AddArg(destptr)
 47716  		v0 := b.NewValue0(v.Pos, OpAMD64MOVLstoreconst, types.TypeMem)
 47717  		v0.AuxInt = 0
 47718  		v0.AddArg(destptr)
 47719  		v0.AddArg(mem)
 47720  		v.AddArg(v0)
 47721  		return true
 47722  	}
 47723  	// match: (Zero [s] destptr mem)
 47724  	// cond: s%8 != 0 && s > 8 && !config.useSSE
 47725  	// result: (Zero [s-s%8] (OffPtr <destptr.Type> destptr [s%8]) (MOVQstoreconst [0] destptr mem))
 47726  	for {
 47727  		s := v.AuxInt
 47728  		_ = v.Args[1]
 47729  		destptr := v.Args[0]
 47730  		mem := v.Args[1]
 47731  		if !(s%8 != 0 && s > 8 && !config.useSSE) {
 47732  			break
 47733  		}
 47734  		v.reset(OpZero)
 47735  		v.AuxInt = s - s%8
 47736  		v0 := b.NewValue0(v.Pos, OpOffPtr, destptr.Type)
 47737  		v0.AuxInt = s % 8
 47738  		v0.AddArg(destptr)
 47739  		v.AddArg(v0)
 47740  		v1 := b.NewValue0(v.Pos, OpAMD64MOVQstoreconst, types.TypeMem)
 47741  		v1.AuxInt = 0
 47742  		v1.AddArg(destptr)
 47743  		v1.AddArg(mem)
 47744  		v.AddArg(v1)
 47745  		return true
 47746  	}
 47747  	return false
 47748  }
 47749  func rewriteValueAMD64_OpZero_10(v *Value) bool {
 47750  	b := v.Block
 47751  	_ = b
 47752  	config := b.Func.Config
 47753  	_ = config
 47754  	// match: (Zero [16] destptr mem)
 47755  	// cond: !config.useSSE
 47756  	// result: (MOVQstoreconst [makeValAndOff(0,8)] destptr (MOVQstoreconst [0] destptr mem))
 47757  	for {
 47758  		if v.AuxInt != 16 {
 47759  			break
 47760  		}
 47761  		_ = v.Args[1]
 47762  		destptr := v.Args[0]
 47763  		mem := v.Args[1]
 47764  		if !(!config.useSSE) {
 47765  			break
 47766  		}
 47767  		v.reset(OpAMD64MOVQstoreconst)
 47768  		v.AuxInt = makeValAndOff(0, 8)
 47769  		v.AddArg(destptr)
 47770  		v0 := b.NewValue0(v.Pos, OpAMD64MOVQstoreconst, types.TypeMem)
 47771  		v0.AuxInt = 0
 47772  		v0.AddArg(destptr)
 47773  		v0.AddArg(mem)
 47774  		v.AddArg(v0)
 47775  		return true
 47776  	}
 47777  	// match: (Zero [24] destptr mem)
 47778  	// cond: !config.useSSE
 47779  	// result: (MOVQstoreconst [makeValAndOff(0,16)] destptr (MOVQstoreconst [makeValAndOff(0,8)] destptr (MOVQstoreconst [0] destptr mem)))
 47780  	for {
 47781  		if v.AuxInt != 24 {
 47782  			break
 47783  		}
 47784  		_ = v.Args[1]
 47785  		destptr := v.Args[0]
 47786  		mem := v.Args[1]
 47787  		if !(!config.useSSE) {
 47788  			break
 47789  		}
 47790  		v.reset(OpAMD64MOVQstoreconst)
 47791  		v.AuxInt = makeValAndOff(0, 16)
 47792  		v.AddArg(destptr)
 47793  		v0 := b.NewValue0(v.Pos, OpAMD64MOVQstoreconst, types.TypeMem)
 47794  		v0.AuxInt = makeValAndOff(0, 8)
 47795  		v0.AddArg(destptr)
 47796  		v1 := b.NewValue0(v.Pos, OpAMD64MOVQstoreconst, types.TypeMem)
 47797  		v1.AuxInt = 0
 47798  		v1.AddArg(destptr)
 47799  		v1.AddArg(mem)
 47800  		v0.AddArg(v1)
 47801  		v.AddArg(v0)
 47802  		return true
 47803  	}
 47804  	// match: (Zero [32] destptr mem)
 47805  	// cond: !config.useSSE
 47806  	// result: (MOVQstoreconst [makeValAndOff(0,24)] destptr (MOVQstoreconst [makeValAndOff(0,16)] destptr (MOVQstoreconst [makeValAndOff(0,8)] destptr (MOVQstoreconst [0] destptr mem))))
 47807  	for {
 47808  		if v.AuxInt != 32 {
 47809  			break
 47810  		}
 47811  		_ = v.Args[1]
 47812  		destptr := v.Args[0]
 47813  		mem := v.Args[1]
 47814  		if !(!config.useSSE) {
 47815  			break
 47816  		}
 47817  		v.reset(OpAMD64MOVQstoreconst)
 47818  		v.AuxInt = makeValAndOff(0, 24)
 47819  		v.AddArg(destptr)
 47820  		v0 := b.NewValue0(v.Pos, OpAMD64MOVQstoreconst, types.TypeMem)
 47821  		v0.AuxInt = makeValAndOff(0, 16)
 47822  		v0.AddArg(destptr)
 47823  		v1 := b.NewValue0(v.Pos, OpAMD64MOVQstoreconst, types.TypeMem)
 47824  		v1.AuxInt = makeValAndOff(0, 8)
 47825  		v1.AddArg(destptr)
 47826  		v2 := b.NewValue0(v.Pos, OpAMD64MOVQstoreconst, types.TypeMem)
 47827  		v2.AuxInt = 0
 47828  		v2.AddArg(destptr)
 47829  		v2.AddArg(mem)
 47830  		v1.AddArg(v2)
 47831  		v0.AddArg(v1)
 47832  		v.AddArg(v0)
 47833  		return true
 47834  	}
 47835  	// match: (Zero [s] destptr mem)
 47836  	// cond: s > 8 && s < 16 && config.useSSE
 47837  	// result: (MOVQstoreconst [makeValAndOff(0,s-8)] destptr (MOVQstoreconst [0] destptr mem))
 47838  	for {
 47839  		s := v.AuxInt
 47840  		_ = v.Args[1]
 47841  		destptr := v.Args[0]
 47842  		mem := v.Args[1]
 47843  		if !(s > 8 && s < 16 && config.useSSE) {
 47844  			break
 47845  		}
 47846  		v.reset(OpAMD64MOVQstoreconst)
 47847  		v.AuxInt = makeValAndOff(0, s-8)
 47848  		v.AddArg(destptr)
 47849  		v0 := b.NewValue0(v.Pos, OpAMD64MOVQstoreconst, types.TypeMem)
 47850  		v0.AuxInt = 0
 47851  		v0.AddArg(destptr)
 47852  		v0.AddArg(mem)
 47853  		v.AddArg(v0)
 47854  		return true
 47855  	}
 47856  	// match: (Zero [s] destptr mem)
 47857  	// cond: s%16 != 0 && s > 16 && s%16 > 8 && config.useSSE
 47858  	// result: (Zero [s-s%16] (OffPtr <destptr.Type> destptr [s%16]) (MOVOstore destptr (MOVOconst [0]) mem))
 47859  	for {
 47860  		s := v.AuxInt
 47861  		_ = v.Args[1]
 47862  		destptr := v.Args[0]
 47863  		mem := v.Args[1]
 47864  		if !(s%16 != 0 && s > 16 && s%16 > 8 && config.useSSE) {
 47865  			break
 47866  		}
 47867  		v.reset(OpZero)
 47868  		v.AuxInt = s - s%16
 47869  		v0 := b.NewValue0(v.Pos, OpOffPtr, destptr.Type)
 47870  		v0.AuxInt = s % 16
 47871  		v0.AddArg(destptr)
 47872  		v.AddArg(v0)
 47873  		v1 := b.NewValue0(v.Pos, OpAMD64MOVOstore, types.TypeMem)
 47874  		v1.AddArg(destptr)
 47875  		v2 := b.NewValue0(v.Pos, OpAMD64MOVOconst, types.TypeInt128)
 47876  		v2.AuxInt = 0
 47877  		v1.AddArg(v2)
 47878  		v1.AddArg(mem)
 47879  		v.AddArg(v1)
 47880  		return true
 47881  	}
 47882  	// match: (Zero [s] destptr mem)
 47883  	// cond: s%16 != 0 && s > 16 && s%16 <= 8 && config.useSSE
 47884  	// result: (Zero [s-s%16] (OffPtr <destptr.Type> destptr [s%16]) (MOVQstoreconst [0] destptr mem))
 47885  	for {
 47886  		s := v.AuxInt
 47887  		_ = v.Args[1]
 47888  		destptr := v.Args[0]
 47889  		mem := v.Args[1]
 47890  		if !(s%16 != 0 && s > 16 && s%16 <= 8 && config.useSSE) {
 47891  			break
 47892  		}
 47893  		v.reset(OpZero)
 47894  		v.AuxInt = s - s%16
 47895  		v0 := b.NewValue0(v.Pos, OpOffPtr, destptr.Type)
 47896  		v0.AuxInt = s % 16
 47897  		v0.AddArg(destptr)
 47898  		v.AddArg(v0)
 47899  		v1 := b.NewValue0(v.Pos, OpAMD64MOVQstoreconst, types.TypeMem)
 47900  		v1.AuxInt = 0
 47901  		v1.AddArg(destptr)
 47902  		v1.AddArg(mem)
 47903  		v.AddArg(v1)
 47904  		return true
 47905  	}
 47906  	// match: (Zero [16] destptr mem)
 47907  	// cond: config.useSSE
 47908  	// result: (MOVOstore destptr (MOVOconst [0]) mem)
 47909  	for {
 47910  		if v.AuxInt != 16 {
 47911  			break
 47912  		}
 47913  		_ = v.Args[1]
 47914  		destptr := v.Args[0]
 47915  		mem := v.Args[1]
 47916  		if !(config.useSSE) {
 47917  			break
 47918  		}
 47919  		v.reset(OpAMD64MOVOstore)
 47920  		v.AddArg(destptr)
 47921  		v0 := b.NewValue0(v.Pos, OpAMD64MOVOconst, types.TypeInt128)
 47922  		v0.AuxInt = 0
 47923  		v.AddArg(v0)
 47924  		v.AddArg(mem)
 47925  		return true
 47926  	}
 47927  	// match: (Zero [32] destptr mem)
 47928  	// cond: config.useSSE
 47929  	// result: (MOVOstore (OffPtr <destptr.Type> destptr [16]) (MOVOconst [0]) (MOVOstore destptr (MOVOconst [0]) mem))
 47930  	for {
 47931  		if v.AuxInt != 32 {
 47932  			break
 47933  		}
 47934  		_ = v.Args[1]
 47935  		destptr := v.Args[0]
 47936  		mem := v.Args[1]
 47937  		if !(config.useSSE) {
 47938  			break
 47939  		}
 47940  		v.reset(OpAMD64MOVOstore)
 47941  		v0 := b.NewValue0(v.Pos, OpOffPtr, destptr.Type)
 47942  		v0.AuxInt = 16
 47943  		v0.AddArg(destptr)
 47944  		v.AddArg(v0)
 47945  		v1 := b.NewValue0(v.Pos, OpAMD64MOVOconst, types.TypeInt128)
 47946  		v1.AuxInt = 0
 47947  		v.AddArg(v1)
 47948  		v2 := b.NewValue0(v.Pos, OpAMD64MOVOstore, types.TypeMem)
 47949  		v2.AddArg(destptr)
 47950  		v3 := b.NewValue0(v.Pos, OpAMD64MOVOconst, types.TypeInt128)
 47951  		v3.AuxInt = 0
 47952  		v2.AddArg(v3)
 47953  		v2.AddArg(mem)
 47954  		v.AddArg(v2)
 47955  		return true
 47956  	}
 47957  	// match: (Zero [48] destptr mem)
 47958  	// cond: config.useSSE
 47959  	// result: (MOVOstore (OffPtr <destptr.Type> destptr [32]) (MOVOconst [0]) (MOVOstore (OffPtr <destptr.Type> destptr [16]) (MOVOconst [0]) (MOVOstore destptr (MOVOconst [0]) mem)))
 47960  	for {
 47961  		if v.AuxInt != 48 {
 47962  			break
 47963  		}
 47964  		_ = v.Args[1]
 47965  		destptr := v.Args[0]
 47966  		mem := v.Args[1]
 47967  		if !(config.useSSE) {
 47968  			break
 47969  		}
 47970  		v.reset(OpAMD64MOVOstore)
 47971  		v0 := b.NewValue0(v.Pos, OpOffPtr, destptr.Type)
 47972  		v0.AuxInt = 32
 47973  		v0.AddArg(destptr)
 47974  		v.AddArg(v0)
 47975  		v1 := b.NewValue0(v.Pos, OpAMD64MOVOconst, types.TypeInt128)
 47976  		v1.AuxInt = 0
 47977  		v.AddArg(v1)
 47978  		v2 := b.NewValue0(v.Pos, OpAMD64MOVOstore, types.TypeMem)
 47979  		v3 := b.NewValue0(v.Pos, OpOffPtr, destptr.Type)
 47980  		v3.AuxInt = 16
 47981  		v3.AddArg(destptr)
 47982  		v2.AddArg(v3)
 47983  		v4 := b.NewValue0(v.Pos, OpAMD64MOVOconst, types.TypeInt128)
 47984  		v4.AuxInt = 0
 47985  		v2.AddArg(v4)
 47986  		v5 := b.NewValue0(v.Pos, OpAMD64MOVOstore, types.TypeMem)
 47987  		v5.AddArg(destptr)
 47988  		v6 := b.NewValue0(v.Pos, OpAMD64MOVOconst, types.TypeInt128)
 47989  		v6.AuxInt = 0
 47990  		v5.AddArg(v6)
 47991  		v5.AddArg(mem)
 47992  		v2.AddArg(v5)
 47993  		v.AddArg(v2)
 47994  		return true
 47995  	}
 47996  	// match: (Zero [64] destptr mem)
 47997  	// cond: config.useSSE
 47998  	// result: (MOVOstore (OffPtr <destptr.Type> destptr [48]) (MOVOconst [0]) (MOVOstore (OffPtr <destptr.Type> destptr [32]) (MOVOconst [0]) (MOVOstore (OffPtr <destptr.Type> destptr [16]) (MOVOconst [0]) (MOVOstore destptr (MOVOconst [0]) mem))))
 47999  	for {
 48000  		if v.AuxInt != 64 {
 48001  			break
 48002  		}
 48003  		_ = v.Args[1]
 48004  		destptr := v.Args[0]
 48005  		mem := v.Args[1]
 48006  		if !(config.useSSE) {
 48007  			break
 48008  		}
 48009  		v.reset(OpAMD64MOVOstore)
 48010  		v0 := b.NewValue0(v.Pos, OpOffPtr, destptr.Type)
 48011  		v0.AuxInt = 48
 48012  		v0.AddArg(destptr)
 48013  		v.AddArg(v0)
 48014  		v1 := b.NewValue0(v.Pos, OpAMD64MOVOconst, types.TypeInt128)
 48015  		v1.AuxInt = 0
 48016  		v.AddArg(v1)
 48017  		v2 := b.NewValue0(v.Pos, OpAMD64MOVOstore, types.TypeMem)
 48018  		v3 := b.NewValue0(v.Pos, OpOffPtr, destptr.Type)
 48019  		v3.AuxInt = 32
 48020  		v3.AddArg(destptr)
 48021  		v2.AddArg(v3)
 48022  		v4 := b.NewValue0(v.Pos, OpAMD64MOVOconst, types.TypeInt128)
 48023  		v4.AuxInt = 0
 48024  		v2.AddArg(v4)
 48025  		v5 := b.NewValue0(v.Pos, OpAMD64MOVOstore, types.TypeMem)
 48026  		v6 := b.NewValue0(v.Pos, OpOffPtr, destptr.Type)
 48027  		v6.AuxInt = 16
 48028  		v6.AddArg(destptr)
 48029  		v5.AddArg(v6)
 48030  		v7 := b.NewValue0(v.Pos, OpAMD64MOVOconst, types.TypeInt128)
 48031  		v7.AuxInt = 0
 48032  		v5.AddArg(v7)
 48033  		v8 := b.NewValue0(v.Pos, OpAMD64MOVOstore, types.TypeMem)
 48034  		v8.AddArg(destptr)
 48035  		v9 := b.NewValue0(v.Pos, OpAMD64MOVOconst, types.TypeInt128)
 48036  		v9.AuxInt = 0
 48037  		v8.AddArg(v9)
 48038  		v8.AddArg(mem)
 48039  		v5.AddArg(v8)
 48040  		v2.AddArg(v5)
 48041  		v.AddArg(v2)
 48042  		return true
 48043  	}
 48044  	return false
 48045  }
 48046  func rewriteValueAMD64_OpZero_20(v *Value) bool {
 48047  	b := v.Block
 48048  	_ = b
 48049  	config := b.Func.Config
 48050  	_ = config
 48051  	typ := &b.Func.Config.Types
 48052  	_ = typ
 48053  	// match: (Zero [s] destptr mem)
 48054  	// cond: s > 64 && s <= 1024 && s%16 == 0 && !config.noDuffDevice
 48055  	// result: (DUFFZERO [s] destptr (MOVOconst [0]) mem)
 48056  	for {
 48057  		s := v.AuxInt
 48058  		_ = v.Args[1]
 48059  		destptr := v.Args[0]
 48060  		mem := v.Args[1]
 48061  		if !(s > 64 && s <= 1024 && s%16 == 0 && !config.noDuffDevice) {
 48062  			break
 48063  		}
 48064  		v.reset(OpAMD64DUFFZERO)
 48065  		v.AuxInt = s
 48066  		v.AddArg(destptr)
 48067  		v0 := b.NewValue0(v.Pos, OpAMD64MOVOconst, types.TypeInt128)
 48068  		v0.AuxInt = 0
 48069  		v.AddArg(v0)
 48070  		v.AddArg(mem)
 48071  		return true
 48072  	}
 48073  	// match: (Zero [s] destptr mem)
 48074  	// cond: (s > 1024 || (config.noDuffDevice && s > 64 || !config.useSSE && s > 32)) && s%8 == 0
 48075  	// result: (REPSTOSQ destptr (MOVQconst [s/8]) (MOVQconst [0]) mem)
 48076  	for {
 48077  		s := v.AuxInt
 48078  		_ = v.Args[1]
 48079  		destptr := v.Args[0]
 48080  		mem := v.Args[1]
 48081  		if !((s > 1024 || (config.noDuffDevice && s > 64 || !config.useSSE && s > 32)) && s%8 == 0) {
 48082  			break
 48083  		}
 48084  		v.reset(OpAMD64REPSTOSQ)
 48085  		v.AddArg(destptr)
 48086  		v0 := b.NewValue0(v.Pos, OpAMD64MOVQconst, typ.UInt64)
 48087  		v0.AuxInt = s / 8
 48088  		v.AddArg(v0)
 48089  		v1 := b.NewValue0(v.Pos, OpAMD64MOVQconst, typ.UInt64)
 48090  		v1.AuxInt = 0
 48091  		v.AddArg(v1)
 48092  		v.AddArg(mem)
 48093  		return true
 48094  	}
 48095  	return false
 48096  }
 48097  func rewriteValueAMD64_OpZeroExt16to32_0(v *Value) bool {
 48098  	// match: (ZeroExt16to32 x)
 48099  	// cond:
 48100  	// result: (MOVWQZX x)
 48101  	for {
 48102  		x := v.Args[0]
 48103  		v.reset(OpAMD64MOVWQZX)
 48104  		v.AddArg(x)
 48105  		return true
 48106  	}
 48107  }
 48108  func rewriteValueAMD64_OpZeroExt16to64_0(v *Value) bool {
 48109  	// match: (ZeroExt16to64 x)
 48110  	// cond:
 48111  	// result: (MOVWQZX x)
 48112  	for {
 48113  		x := v.Args[0]
 48114  		v.reset(OpAMD64MOVWQZX)
 48115  		v.AddArg(x)
 48116  		return true
 48117  	}
 48118  }
 48119  func rewriteValueAMD64_OpZeroExt32to64_0(v *Value) bool {
 48120  	// match: (ZeroExt32to64 x)
 48121  	// cond:
 48122  	// result: (MOVLQZX x)
 48123  	for {
 48124  		x := v.Args[0]
 48125  		v.reset(OpAMD64MOVLQZX)
 48126  		v.AddArg(x)
 48127  		return true
 48128  	}
 48129  }
 48130  func rewriteValueAMD64_OpZeroExt8to16_0(v *Value) bool {
 48131  	// match: (ZeroExt8to16 x)
 48132  	// cond:
 48133  	// result: (MOVBQZX x)
 48134  	for {
 48135  		x := v.Args[0]
 48136  		v.reset(OpAMD64MOVBQZX)
 48137  		v.AddArg(x)
 48138  		return true
 48139  	}
 48140  }
 48141  func rewriteValueAMD64_OpZeroExt8to32_0(v *Value) bool {
 48142  	// match: (ZeroExt8to32 x)
 48143  	// cond:
 48144  	// result: (MOVBQZX x)
 48145  	for {
 48146  		x := v.Args[0]
 48147  		v.reset(OpAMD64MOVBQZX)
 48148  		v.AddArg(x)
 48149  		return true
 48150  	}
 48151  }
 48152  func rewriteValueAMD64_OpZeroExt8to64_0(v *Value) bool {
 48153  	// match: (ZeroExt8to64 x)
 48154  	// cond:
 48155  	// result: (MOVBQZX x)
 48156  	for {
 48157  		x := v.Args[0]
 48158  		v.reset(OpAMD64MOVBQZX)
 48159  		v.AddArg(x)
 48160  		return true
 48161  	}
 48162  }
 48163  func rewriteBlockAMD64(b *Block) bool {
 48164  	config := b.Func.Config
 48165  	_ = config
 48166  	fe := b.Func.fe
 48167  	_ = fe
 48168  	typ := &config.Types
 48169  	_ = typ
 48170  	switch b.Kind {
 48171  	case BlockAMD64EQ:
 48172  		// match: (EQ (TESTL (SHLL (MOVLconst [1]) x) y))
 48173  		// cond: !config.nacl
 48174  		// result: (UGE (BTL x y))
 48175  		for {
 48176  			v := b.Control
 48177  			if v.Op != OpAMD64TESTL {
 48178  				break
 48179  			}
 48180  			_ = v.Args[1]
 48181  			v_0 := v.Args[0]
 48182  			if v_0.Op != OpAMD64SHLL {
 48183  				break
 48184  			}
 48185  			_ = v_0.Args[1]
 48186  			v_0_0 := v_0.Args[0]
 48187  			if v_0_0.Op != OpAMD64MOVLconst {
 48188  				break
 48189  			}
 48190  			if v_0_0.AuxInt != 1 {
 48191  				break
 48192  			}
 48193  			x := v_0.Args[1]
 48194  			y := v.Args[1]
 48195  			if !(!config.nacl) {
 48196  				break
 48197  			}
 48198  			b.Kind = BlockAMD64UGE
 48199  			v0 := b.NewValue0(v.Pos, OpAMD64BTL, types.TypeFlags)
 48200  			v0.AddArg(x)
 48201  			v0.AddArg(y)
 48202  			b.SetControl(v0)
 48203  			b.Aux = nil
 48204  			return true
 48205  		}
 48206  		// match: (EQ (TESTL y (SHLL (MOVLconst [1]) x)))
 48207  		// cond: !config.nacl
 48208  		// result: (UGE (BTL x y))
 48209  		for {
 48210  			v := b.Control
 48211  			if v.Op != OpAMD64TESTL {
 48212  				break
 48213  			}
 48214  			_ = v.Args[1]
 48215  			y := v.Args[0]
 48216  			v_1 := v.Args[1]
 48217  			if v_1.Op != OpAMD64SHLL {
 48218  				break
 48219  			}
 48220  			_ = v_1.Args[1]
 48221  			v_1_0 := v_1.Args[0]
 48222  			if v_1_0.Op != OpAMD64MOVLconst {
 48223  				break
 48224  			}
 48225  			if v_1_0.AuxInt != 1 {
 48226  				break
 48227  			}
 48228  			x := v_1.Args[1]
 48229  			if !(!config.nacl) {
 48230  				break
 48231  			}
 48232  			b.Kind = BlockAMD64UGE
 48233  			v0 := b.NewValue0(v.Pos, OpAMD64BTL, types.TypeFlags)
 48234  			v0.AddArg(x)
 48235  			v0.AddArg(y)
 48236  			b.SetControl(v0)
 48237  			b.Aux = nil
 48238  			return true
 48239  		}
 48240  		// match: (EQ (TESTQ (SHLQ (MOVQconst [1]) x) y))
 48241  		// cond: !config.nacl
 48242  		// result: (UGE (BTQ x y))
 48243  		for {
 48244  			v := b.Control
 48245  			if v.Op != OpAMD64TESTQ {
 48246  				break
 48247  			}
 48248  			_ = v.Args[1]
 48249  			v_0 := v.Args[0]
 48250  			if v_0.Op != OpAMD64SHLQ {
 48251  				break
 48252  			}
 48253  			_ = v_0.Args[1]
 48254  			v_0_0 := v_0.Args[0]
 48255  			if v_0_0.Op != OpAMD64MOVQconst {
 48256  				break
 48257  			}
 48258  			if v_0_0.AuxInt != 1 {
 48259  				break
 48260  			}
 48261  			x := v_0.Args[1]
 48262  			y := v.Args[1]
 48263  			if !(!config.nacl) {
 48264  				break
 48265  			}
 48266  			b.Kind = BlockAMD64UGE
 48267  			v0 := b.NewValue0(v.Pos, OpAMD64BTQ, types.TypeFlags)
 48268  			v0.AddArg(x)
 48269  			v0.AddArg(y)
 48270  			b.SetControl(v0)
 48271  			b.Aux = nil
 48272  			return true
 48273  		}
 48274  		// match: (EQ (TESTQ y (SHLQ (MOVQconst [1]) x)))
 48275  		// cond: !config.nacl
 48276  		// result: (UGE (BTQ x y))
 48277  		for {
 48278  			v := b.Control
 48279  			if v.Op != OpAMD64TESTQ {
 48280  				break
 48281  			}
 48282  			_ = v.Args[1]
 48283  			y := v.Args[0]
 48284  			v_1 := v.Args[1]
 48285  			if v_1.Op != OpAMD64SHLQ {
 48286  				break
 48287  			}
 48288  			_ = v_1.Args[1]
 48289  			v_1_0 := v_1.Args[0]
 48290  			if v_1_0.Op != OpAMD64MOVQconst {
 48291  				break
 48292  			}
 48293  			if v_1_0.AuxInt != 1 {
 48294  				break
 48295  			}
 48296  			x := v_1.Args[1]
 48297  			if !(!config.nacl) {
 48298  				break
 48299  			}
 48300  			b.Kind = BlockAMD64UGE
 48301  			v0 := b.NewValue0(v.Pos, OpAMD64BTQ, types.TypeFlags)
 48302  			v0.AddArg(x)
 48303  			v0.AddArg(y)
 48304  			b.SetControl(v0)
 48305  			b.Aux = nil
 48306  			return true
 48307  		}
 48308  		// match: (EQ (TESTLconst [c] x))
 48309  		// cond: isPowerOfTwo(c) && log2(c) < 32 && !config.nacl
 48310  		// result: (UGE (BTLconst [log2(c)] x))
 48311  		for {
 48312  			v := b.Control
 48313  			if v.Op != OpAMD64TESTLconst {
 48314  				break
 48315  			}
 48316  			c := v.AuxInt
 48317  			x := v.Args[0]
 48318  			if !(isPowerOfTwo(c) && log2(c) < 32 && !config.nacl) {
 48319  				break
 48320  			}
 48321  			b.Kind = BlockAMD64UGE
 48322  			v0 := b.NewValue0(v.Pos, OpAMD64BTLconst, types.TypeFlags)
 48323  			v0.AuxInt = log2(c)
 48324  			v0.AddArg(x)
 48325  			b.SetControl(v0)
 48326  			b.Aux = nil
 48327  			return true
 48328  		}
 48329  		// match: (EQ (TESTQconst [c] x))
 48330  		// cond: isPowerOfTwo(c) && log2(c) < 64 && !config.nacl
 48331  		// result: (UGE (BTQconst [log2(c)] x))
 48332  		for {
 48333  			v := b.Control
 48334  			if v.Op != OpAMD64TESTQconst {
 48335  				break
 48336  			}
 48337  			c := v.AuxInt
 48338  			x := v.Args[0]
 48339  			if !(isPowerOfTwo(c) && log2(c) < 64 && !config.nacl) {
 48340  				break
 48341  			}
 48342  			b.Kind = BlockAMD64UGE
 48343  			v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags)
 48344  			v0.AuxInt = log2(c)
 48345  			v0.AddArg(x)
 48346  			b.SetControl(v0)
 48347  			b.Aux = nil
 48348  			return true
 48349  		}
 48350  		// match: (EQ (TESTQ (MOVQconst [c]) x))
 48351  		// cond: isPowerOfTwo(c) && log2(c) < 64 && !config.nacl
 48352  		// result: (UGE (BTQconst [log2(c)] x))
 48353  		for {
 48354  			v := b.Control
 48355  			if v.Op != OpAMD64TESTQ {
 48356  				break
 48357  			}
 48358  			_ = v.Args[1]
 48359  			v_0 := v.Args[0]
 48360  			if v_0.Op != OpAMD64MOVQconst {
 48361  				break
 48362  			}
 48363  			c := v_0.AuxInt
 48364  			x := v.Args[1]
 48365  			if !(isPowerOfTwo(c) && log2(c) < 64 && !config.nacl) {
 48366  				break
 48367  			}
 48368  			b.Kind = BlockAMD64UGE
 48369  			v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags)
 48370  			v0.AuxInt = log2(c)
 48371  			v0.AddArg(x)
 48372  			b.SetControl(v0)
 48373  			b.Aux = nil
 48374  			return true
 48375  		}
 48376  		// match: (EQ (TESTQ x (MOVQconst [c])))
 48377  		// cond: isPowerOfTwo(c) && log2(c) < 64 && !config.nacl
 48378  		// result: (UGE (BTQconst [log2(c)] x))
 48379  		for {
 48380  			v := b.Control
 48381  			if v.Op != OpAMD64TESTQ {
 48382  				break
 48383  			}
 48384  			_ = v.Args[1]
 48385  			x := v.Args[0]
 48386  			v_1 := v.Args[1]
 48387  			if v_1.Op != OpAMD64MOVQconst {
 48388  				break
 48389  			}
 48390  			c := v_1.AuxInt
 48391  			if !(isPowerOfTwo(c) && log2(c) < 64 && !config.nacl) {
 48392  				break
 48393  			}
 48394  			b.Kind = BlockAMD64UGE
 48395  			v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags)
 48396  			v0.AuxInt = log2(c)
 48397  			v0.AddArg(x)
 48398  			b.SetControl(v0)
 48399  			b.Aux = nil
 48400  			return true
 48401  		}
 48402  		// match: (EQ (InvertFlags cmp) yes no)
 48403  		// cond:
 48404  		// result: (EQ cmp yes no)
 48405  		for {
 48406  			v := b.Control
 48407  			if v.Op != OpAMD64InvertFlags {
 48408  				break
 48409  			}
 48410  			cmp := v.Args[0]
 48411  			b.Kind = BlockAMD64EQ
 48412  			b.SetControl(cmp)
 48413  			b.Aux = nil
 48414  			return true
 48415  		}
 48416  		// match: (EQ (FlagEQ) yes no)
 48417  		// cond:
 48418  		// result: (First nil yes no)
 48419  		for {
 48420  			v := b.Control
 48421  			if v.Op != OpAMD64FlagEQ {
 48422  				break
 48423  			}
 48424  			b.Kind = BlockFirst
 48425  			b.SetControl(nil)
 48426  			b.Aux = nil
 48427  			return true
 48428  		}
 48429  		// match: (EQ (FlagLT_ULT) yes no)
 48430  		// cond:
 48431  		// result: (First nil no yes)
 48432  		for {
 48433  			v := b.Control
 48434  			if v.Op != OpAMD64FlagLT_ULT {
 48435  				break
 48436  			}
 48437  			b.Kind = BlockFirst
 48438  			b.SetControl(nil)
 48439  			b.Aux = nil
 48440  			b.swapSuccessors()
 48441  			return true
 48442  		}
 48443  		// match: (EQ (FlagLT_UGT) yes no)
 48444  		// cond:
 48445  		// result: (First nil no yes)
 48446  		for {
 48447  			v := b.Control
 48448  			if v.Op != OpAMD64FlagLT_UGT {
 48449  				break
 48450  			}
 48451  			b.Kind = BlockFirst
 48452  			b.SetControl(nil)
 48453  			b.Aux = nil
 48454  			b.swapSuccessors()
 48455  			return true
 48456  		}
 48457  		// match: (EQ (FlagGT_ULT) yes no)
 48458  		// cond:
 48459  		// result: (First nil no yes)
 48460  		for {
 48461  			v := b.Control
 48462  			if v.Op != OpAMD64FlagGT_ULT {
 48463  				break
 48464  			}
 48465  			b.Kind = BlockFirst
 48466  			b.SetControl(nil)
 48467  			b.Aux = nil
 48468  			b.swapSuccessors()
 48469  			return true
 48470  		}
 48471  		// match: (EQ (FlagGT_UGT) yes no)
 48472  		// cond:
 48473  		// result: (First nil no yes)
 48474  		for {
 48475  			v := b.Control
 48476  			if v.Op != OpAMD64FlagGT_UGT {
 48477  				break
 48478  			}
 48479  			b.Kind = BlockFirst
 48480  			b.SetControl(nil)
 48481  			b.Aux = nil
 48482  			b.swapSuccessors()
 48483  			return true
 48484  		}
 48485  	case BlockAMD64GE:
 48486  		// match: (GE (InvertFlags cmp) yes no)
 48487  		// cond:
 48488  		// result: (LE cmp yes no)
 48489  		for {
 48490  			v := b.Control
 48491  			if v.Op != OpAMD64InvertFlags {
 48492  				break
 48493  			}
 48494  			cmp := v.Args[0]
 48495  			b.Kind = BlockAMD64LE
 48496  			b.SetControl(cmp)
 48497  			b.Aux = nil
 48498  			return true
 48499  		}
 48500  		// match: (GE (FlagEQ) yes no)
 48501  		// cond:
 48502  		// result: (First nil yes no)
 48503  		for {
 48504  			v := b.Control
 48505  			if v.Op != OpAMD64FlagEQ {
 48506  				break
 48507  			}
 48508  			b.Kind = BlockFirst
 48509  			b.SetControl(nil)
 48510  			b.Aux = nil
 48511  			return true
 48512  		}
 48513  		// match: (GE (FlagLT_ULT) yes no)
 48514  		// cond:
 48515  		// result: (First nil no yes)
 48516  		for {
 48517  			v := b.Control
 48518  			if v.Op != OpAMD64FlagLT_ULT {
 48519  				break
 48520  			}
 48521  			b.Kind = BlockFirst
 48522  			b.SetControl(nil)
 48523  			b.Aux = nil
 48524  			b.swapSuccessors()
 48525  			return true
 48526  		}
 48527  		// match: (GE (FlagLT_UGT) yes no)
 48528  		// cond:
 48529  		// result: (First nil no yes)
 48530  		for {
 48531  			v := b.Control
 48532  			if v.Op != OpAMD64FlagLT_UGT {
 48533  				break
 48534  			}
 48535  			b.Kind = BlockFirst
 48536  			b.SetControl(nil)
 48537  			b.Aux = nil
 48538  			b.swapSuccessors()
 48539  			return true
 48540  		}
 48541  		// match: (GE (FlagGT_ULT) yes no)
 48542  		// cond:
 48543  		// result: (First nil yes no)
 48544  		for {
 48545  			v := b.Control
 48546  			if v.Op != OpAMD64FlagGT_ULT {
 48547  				break
 48548  			}
 48549  			b.Kind = BlockFirst
 48550  			b.SetControl(nil)
 48551  			b.Aux = nil
 48552  			return true
 48553  		}
 48554  		// match: (GE (FlagGT_UGT) yes no)
 48555  		// cond:
 48556  		// result: (First nil yes no)
 48557  		for {
 48558  			v := b.Control
 48559  			if v.Op != OpAMD64FlagGT_UGT {
 48560  				break
 48561  			}
 48562  			b.Kind = BlockFirst
 48563  			b.SetControl(nil)
 48564  			b.Aux = nil
 48565  			return true
 48566  		}
 48567  	case BlockAMD64GT:
 48568  		// match: (GT (InvertFlags cmp) yes no)
 48569  		// cond:
 48570  		// result: (LT cmp yes no)
 48571  		for {
 48572  			v := b.Control
 48573  			if v.Op != OpAMD64InvertFlags {
 48574  				break
 48575  			}
 48576  			cmp := v.Args[0]
 48577  			b.Kind = BlockAMD64LT
 48578  			b.SetControl(cmp)
 48579  			b.Aux = nil
 48580  			return true
 48581  		}
 48582  		// match: (GT (FlagEQ) yes no)
 48583  		// cond:
 48584  		// result: (First nil no yes)
 48585  		for {
 48586  			v := b.Control
 48587  			if v.Op != OpAMD64FlagEQ {
 48588  				break
 48589  			}
 48590  			b.Kind = BlockFirst
 48591  			b.SetControl(nil)
 48592  			b.Aux = nil
 48593  			b.swapSuccessors()
 48594  			return true
 48595  		}
 48596  		// match: (GT (FlagLT_ULT) yes no)
 48597  		// cond:
 48598  		// result: (First nil no yes)
 48599  		for {
 48600  			v := b.Control
 48601  			if v.Op != OpAMD64FlagLT_ULT {
 48602  				break
 48603  			}
 48604  			b.Kind = BlockFirst
 48605  			b.SetControl(nil)
 48606  			b.Aux = nil
 48607  			b.swapSuccessors()
 48608  			return true
 48609  		}
 48610  		// match: (GT (FlagLT_UGT) yes no)
 48611  		// cond:
 48612  		// result: (First nil no yes)
 48613  		for {
 48614  			v := b.Control
 48615  			if v.Op != OpAMD64FlagLT_UGT {
 48616  				break
 48617  			}
 48618  			b.Kind = BlockFirst
 48619  			b.SetControl(nil)
 48620  			b.Aux = nil
 48621  			b.swapSuccessors()
 48622  			return true
 48623  		}
 48624  		// match: (GT (FlagGT_ULT) yes no)
 48625  		// cond:
 48626  		// result: (First nil yes no)
 48627  		for {
 48628  			v := b.Control
 48629  			if v.Op != OpAMD64FlagGT_ULT {
 48630  				break
 48631  			}
 48632  			b.Kind = BlockFirst
 48633  			b.SetControl(nil)
 48634  			b.Aux = nil
 48635  			return true
 48636  		}
 48637  		// match: (GT (FlagGT_UGT) yes no)
 48638  		// cond:
 48639  		// result: (First nil yes no)
 48640  		for {
 48641  			v := b.Control
 48642  			if v.Op != OpAMD64FlagGT_UGT {
 48643  				break
 48644  			}
 48645  			b.Kind = BlockFirst
 48646  			b.SetControl(nil)
 48647  			b.Aux = nil
 48648  			return true
 48649  		}
 48650  	case BlockIf:
 48651  		// match: (If (SETL cmp) yes no)
 48652  		// cond:
 48653  		// result: (LT cmp yes no)
 48654  		for {
 48655  			v := b.Control
 48656  			if v.Op != OpAMD64SETL {
 48657  				break
 48658  			}
 48659  			cmp := v.Args[0]
 48660  			b.Kind = BlockAMD64LT
 48661  			b.SetControl(cmp)
 48662  			b.Aux = nil
 48663  			return true
 48664  		}
 48665  		// match: (If (SETLE cmp) yes no)
 48666  		// cond:
 48667  		// result: (LE cmp yes no)
 48668  		for {
 48669  			v := b.Control
 48670  			if v.Op != OpAMD64SETLE {
 48671  				break
 48672  			}
 48673  			cmp := v.Args[0]
 48674  			b.Kind = BlockAMD64LE
 48675  			b.SetControl(cmp)
 48676  			b.Aux = nil
 48677  			return true
 48678  		}
 48679  		// match: (If (SETG cmp) yes no)
 48680  		// cond:
 48681  		// result: (GT cmp yes no)
 48682  		for {
 48683  			v := b.Control
 48684  			if v.Op != OpAMD64SETG {
 48685  				break
 48686  			}
 48687  			cmp := v.Args[0]
 48688  			b.Kind = BlockAMD64GT
 48689  			b.SetControl(cmp)
 48690  			b.Aux = nil
 48691  			return true
 48692  		}
 48693  		// match: (If (SETGE cmp) yes no)
 48694  		// cond:
 48695  		// result: (GE cmp yes no)
 48696  		for {
 48697  			v := b.Control
 48698  			if v.Op != OpAMD64SETGE {
 48699  				break
 48700  			}
 48701  			cmp := v.Args[0]
 48702  			b.Kind = BlockAMD64GE
 48703  			b.SetControl(cmp)
 48704  			b.Aux = nil
 48705  			return true
 48706  		}
 48707  		// match: (If (SETEQ cmp) yes no)
 48708  		// cond:
 48709  		// result: (EQ cmp yes no)
 48710  		for {
 48711  			v := b.Control
 48712  			if v.Op != OpAMD64SETEQ {
 48713  				break
 48714  			}
 48715  			cmp := v.Args[0]
 48716  			b.Kind = BlockAMD64EQ
 48717  			b.SetControl(cmp)
 48718  			b.Aux = nil
 48719  			return true
 48720  		}
 48721  		// match: (If (SETNE cmp) yes no)
 48722  		// cond:
 48723  		// result: (NE cmp yes no)
 48724  		for {
 48725  			v := b.Control
 48726  			if v.Op != OpAMD64SETNE {
 48727  				break
 48728  			}
 48729  			cmp := v.Args[0]
 48730  			b.Kind = BlockAMD64NE
 48731  			b.SetControl(cmp)
 48732  			b.Aux = nil
 48733  			return true
 48734  		}
 48735  		// match: (If (SETB cmp) yes no)
 48736  		// cond:
 48737  		// result: (ULT cmp yes no)
 48738  		for {
 48739  			v := b.Control
 48740  			if v.Op != OpAMD64SETB {
 48741  				break
 48742  			}
 48743  			cmp := v.Args[0]
 48744  			b.Kind = BlockAMD64ULT
 48745  			b.SetControl(cmp)
 48746  			b.Aux = nil
 48747  			return true
 48748  		}
 48749  		// match: (If (SETBE cmp) yes no)
 48750  		// cond:
 48751  		// result: (ULE cmp yes no)
 48752  		for {
 48753  			v := b.Control
 48754  			if v.Op != OpAMD64SETBE {
 48755  				break
 48756  			}
 48757  			cmp := v.Args[0]
 48758  			b.Kind = BlockAMD64ULE
 48759  			b.SetControl(cmp)
 48760  			b.Aux = nil
 48761  			return true
 48762  		}
 48763  		// match: (If (SETA cmp) yes no)
 48764  		// cond:
 48765  		// result: (UGT cmp yes no)
 48766  		for {
 48767  			v := b.Control
 48768  			if v.Op != OpAMD64SETA {
 48769  				break
 48770  			}
 48771  			cmp := v.Args[0]
 48772  			b.Kind = BlockAMD64UGT
 48773  			b.SetControl(cmp)
 48774  			b.Aux = nil
 48775  			return true
 48776  		}
 48777  		// match: (If (SETAE cmp) yes no)
 48778  		// cond:
 48779  		// result: (UGE cmp yes no)
 48780  		for {
 48781  			v := b.Control
 48782  			if v.Op != OpAMD64SETAE {
 48783  				break
 48784  			}
 48785  			cmp := v.Args[0]
 48786  			b.Kind = BlockAMD64UGE
 48787  			b.SetControl(cmp)
 48788  			b.Aux = nil
 48789  			return true
 48790  		}
 48791  		// match: (If (SETGF cmp) yes no)
 48792  		// cond:
 48793  		// result: (UGT cmp yes no)
 48794  		for {
 48795  			v := b.Control
 48796  			if v.Op != OpAMD64SETGF {
 48797  				break
 48798  			}
 48799  			cmp := v.Args[0]
 48800  			b.Kind = BlockAMD64UGT
 48801  			b.SetControl(cmp)
 48802  			b.Aux = nil
 48803  			return true
 48804  		}
 48805  		// match: (If (SETGEF cmp) yes no)
 48806  		// cond:
 48807  		// result: (UGE cmp yes no)
 48808  		for {
 48809  			v := b.Control
 48810  			if v.Op != OpAMD64SETGEF {
 48811  				break
 48812  			}
 48813  			cmp := v.Args[0]
 48814  			b.Kind = BlockAMD64UGE
 48815  			b.SetControl(cmp)
 48816  			b.Aux = nil
 48817  			return true
 48818  		}
 48819  		// match: (If (SETEQF cmp) yes no)
 48820  		// cond:
 48821  		// result: (EQF cmp yes no)
 48822  		for {
 48823  			v := b.Control
 48824  			if v.Op != OpAMD64SETEQF {
 48825  				break
 48826  			}
 48827  			cmp := v.Args[0]
 48828  			b.Kind = BlockAMD64EQF
 48829  			b.SetControl(cmp)
 48830  			b.Aux = nil
 48831  			return true
 48832  		}
 48833  		// match: (If (SETNEF cmp) yes no)
 48834  		// cond:
 48835  		// result: (NEF cmp yes no)
 48836  		for {
 48837  			v := b.Control
 48838  			if v.Op != OpAMD64SETNEF {
 48839  				break
 48840  			}
 48841  			cmp := v.Args[0]
 48842  			b.Kind = BlockAMD64NEF
 48843  			b.SetControl(cmp)
 48844  			b.Aux = nil
 48845  			return true
 48846  		}
 48847  		// match: (If cond yes no)
 48848  		// cond:
 48849  		// result: (NE (TESTB cond cond) yes no)
 48850  		for {
 48851  			v := b.Control
 48852  			_ = v
 48853  			cond := b.Control
 48854  			b.Kind = BlockAMD64NE
 48855  			v0 := b.NewValue0(v.Pos, OpAMD64TESTB, types.TypeFlags)
 48856  			v0.AddArg(cond)
 48857  			v0.AddArg(cond)
 48858  			b.SetControl(v0)
 48859  			b.Aux = nil
 48860  			return true
 48861  		}
 48862  	case BlockAMD64LE:
 48863  		// match: (LE (InvertFlags cmp) yes no)
 48864  		// cond:
 48865  		// result: (GE cmp yes no)
 48866  		for {
 48867  			v := b.Control
 48868  			if v.Op != OpAMD64InvertFlags {
 48869  				break
 48870  			}
 48871  			cmp := v.Args[0]
 48872  			b.Kind = BlockAMD64GE
 48873  			b.SetControl(cmp)
 48874  			b.Aux = nil
 48875  			return true
 48876  		}
 48877  		// match: (LE (FlagEQ) yes no)
 48878  		// cond:
 48879  		// result: (First nil yes no)
 48880  		for {
 48881  			v := b.Control
 48882  			if v.Op != OpAMD64FlagEQ {
 48883  				break
 48884  			}
 48885  			b.Kind = BlockFirst
 48886  			b.SetControl(nil)
 48887  			b.Aux = nil
 48888  			return true
 48889  		}
 48890  		// match: (LE (FlagLT_ULT) yes no)
 48891  		// cond:
 48892  		// result: (First nil yes no)
 48893  		for {
 48894  			v := b.Control
 48895  			if v.Op != OpAMD64FlagLT_ULT {
 48896  				break
 48897  			}
 48898  			b.Kind = BlockFirst
 48899  			b.SetControl(nil)
 48900  			b.Aux = nil
 48901  			return true
 48902  		}
 48903  		// match: (LE (FlagLT_UGT) yes no)
 48904  		// cond:
 48905  		// result: (First nil yes no)
 48906  		for {
 48907  			v := b.Control
 48908  			if v.Op != OpAMD64FlagLT_UGT {
 48909  				break
 48910  			}
 48911  			b.Kind = BlockFirst
 48912  			b.SetControl(nil)
 48913  			b.Aux = nil
 48914  			return true
 48915  		}
 48916  		// match: (LE (FlagGT_ULT) yes no)
 48917  		// cond:
 48918  		// result: (First nil no yes)
 48919  		for {
 48920  			v := b.Control
 48921  			if v.Op != OpAMD64FlagGT_ULT {
 48922  				break
 48923  			}
 48924  			b.Kind = BlockFirst
 48925  			b.SetControl(nil)
 48926  			b.Aux = nil
 48927  			b.swapSuccessors()
 48928  			return true
 48929  		}
 48930  		// match: (LE (FlagGT_UGT) yes no)
 48931  		// cond:
 48932  		// result: (First nil no yes)
 48933  		for {
 48934  			v := b.Control
 48935  			if v.Op != OpAMD64FlagGT_UGT {
 48936  				break
 48937  			}
 48938  			b.Kind = BlockFirst
 48939  			b.SetControl(nil)
 48940  			b.Aux = nil
 48941  			b.swapSuccessors()
 48942  			return true
 48943  		}
 48944  	case BlockAMD64LT:
 48945  		// match: (LT (InvertFlags cmp) yes no)
 48946  		// cond:
 48947  		// result: (GT cmp yes no)
 48948  		for {
 48949  			v := b.Control
 48950  			if v.Op != OpAMD64InvertFlags {
 48951  				break
 48952  			}
 48953  			cmp := v.Args[0]
 48954  			b.Kind = BlockAMD64GT
 48955  			b.SetControl(cmp)
 48956  			b.Aux = nil
 48957  			return true
 48958  		}
 48959  		// match: (LT (FlagEQ) yes no)
 48960  		// cond:
 48961  		// result: (First nil no yes)
 48962  		for {
 48963  			v := b.Control
 48964  			if v.Op != OpAMD64FlagEQ {
 48965  				break
 48966  			}
 48967  			b.Kind = BlockFirst
 48968  			b.SetControl(nil)
 48969  			b.Aux = nil
 48970  			b.swapSuccessors()
 48971  			return true
 48972  		}
 48973  		// match: (LT (FlagLT_ULT) yes no)
 48974  		// cond:
 48975  		// result: (First nil yes no)
 48976  		for {
 48977  			v := b.Control
 48978  			if v.Op != OpAMD64FlagLT_ULT {
 48979  				break
 48980  			}
 48981  			b.Kind = BlockFirst
 48982  			b.SetControl(nil)
 48983  			b.Aux = nil
 48984  			return true
 48985  		}
 48986  		// match: (LT (FlagLT_UGT) yes no)
 48987  		// cond:
 48988  		// result: (First nil yes no)
 48989  		for {
 48990  			v := b.Control
 48991  			if v.Op != OpAMD64FlagLT_UGT {
 48992  				break
 48993  			}
 48994  			b.Kind = BlockFirst
 48995  			b.SetControl(nil)
 48996  			b.Aux = nil
 48997  			return true
 48998  		}
 48999  		// match: (LT (FlagGT_ULT) yes no)
 49000  		// cond:
 49001  		// result: (First nil no yes)
 49002  		for {
 49003  			v := b.Control
 49004  			if v.Op != OpAMD64FlagGT_ULT {
 49005  				break
 49006  			}
 49007  			b.Kind = BlockFirst
 49008  			b.SetControl(nil)
 49009  			b.Aux = nil
 49010  			b.swapSuccessors()
 49011  			return true
 49012  		}
 49013  		// match: (LT (FlagGT_UGT) yes no)
 49014  		// cond:
 49015  		// result: (First nil no yes)
 49016  		for {
 49017  			v := b.Control
 49018  			if v.Op != OpAMD64FlagGT_UGT {
 49019  				break
 49020  			}
 49021  			b.Kind = BlockFirst
 49022  			b.SetControl(nil)
 49023  			b.Aux = nil
 49024  			b.swapSuccessors()
 49025  			return true
 49026  		}
 49027  	case BlockAMD64NE:
 49028  		// match: (NE (TESTB (SETL cmp) (SETL cmp)) yes no)
 49029  		// cond:
 49030  		// result: (LT cmp yes no)
 49031  		for {
 49032  			v := b.Control
 49033  			if v.Op != OpAMD64TESTB {
 49034  				break
 49035  			}
 49036  			_ = v.Args[1]
 49037  			v_0 := v.Args[0]
 49038  			if v_0.Op != OpAMD64SETL {
 49039  				break
 49040  			}
 49041  			cmp := v_0.Args[0]
 49042  			v_1 := v.Args[1]
 49043  			if v_1.Op != OpAMD64SETL {
 49044  				break
 49045  			}
 49046  			if cmp != v_1.Args[0] {
 49047  				break
 49048  			}
 49049  			b.Kind = BlockAMD64LT
 49050  			b.SetControl(cmp)
 49051  			b.Aux = nil
 49052  			return true
 49053  		}
 49054  		// match: (NE (TESTB (SETL cmp) (SETL cmp)) yes no)
 49055  		// cond:
 49056  		// result: (LT cmp yes no)
 49057  		for {
 49058  			v := b.Control
 49059  			if v.Op != OpAMD64TESTB {
 49060  				break
 49061  			}
 49062  			_ = v.Args[1]
 49063  			v_0 := v.Args[0]
 49064  			if v_0.Op != OpAMD64SETL {
 49065  				break
 49066  			}
 49067  			cmp := v_0.Args[0]
 49068  			v_1 := v.Args[1]
 49069  			if v_1.Op != OpAMD64SETL {
 49070  				break
 49071  			}
 49072  			if cmp != v_1.Args[0] {
 49073  				break
 49074  			}
 49075  			b.Kind = BlockAMD64LT
 49076  			b.SetControl(cmp)
 49077  			b.Aux = nil
 49078  			return true
 49079  		}
 49080  		// match: (NE (TESTB (SETLE cmp) (SETLE cmp)) yes no)
 49081  		// cond:
 49082  		// result: (LE cmp yes no)
 49083  		for {
 49084  			v := b.Control
 49085  			if v.Op != OpAMD64TESTB {
 49086  				break
 49087  			}
 49088  			_ = v.Args[1]
 49089  			v_0 := v.Args[0]
 49090  			if v_0.Op != OpAMD64SETLE {
 49091  				break
 49092  			}
 49093  			cmp := v_0.Args[0]
 49094  			v_1 := v.Args[1]
 49095  			if v_1.Op != OpAMD64SETLE {
 49096  				break
 49097  			}
 49098  			if cmp != v_1.Args[0] {
 49099  				break
 49100  			}
 49101  			b.Kind = BlockAMD64LE
 49102  			b.SetControl(cmp)
 49103  			b.Aux = nil
 49104  			return true
 49105  		}
 49106  		// match: (NE (TESTB (SETLE cmp) (SETLE cmp)) yes no)
 49107  		// cond:
 49108  		// result: (LE cmp yes no)
 49109  		for {
 49110  			v := b.Control
 49111  			if v.Op != OpAMD64TESTB {
 49112  				break
 49113  			}
 49114  			_ = v.Args[1]
 49115  			v_0 := v.Args[0]
 49116  			if v_0.Op != OpAMD64SETLE {
 49117  				break
 49118  			}
 49119  			cmp := v_0.Args[0]
 49120  			v_1 := v.Args[1]
 49121  			if v_1.Op != OpAMD64SETLE {
 49122  				break
 49123  			}
 49124  			if cmp != v_1.Args[0] {
 49125  				break
 49126  			}
 49127  			b.Kind = BlockAMD64LE
 49128  			b.SetControl(cmp)
 49129  			b.Aux = nil
 49130  			return true
 49131  		}
 49132  		// match: (NE (TESTB (SETG cmp) (SETG cmp)) yes no)
 49133  		// cond:
 49134  		// result: (GT cmp yes no)
 49135  		for {
 49136  			v := b.Control
 49137  			if v.Op != OpAMD64TESTB {
 49138  				break
 49139  			}
 49140  			_ = v.Args[1]
 49141  			v_0 := v.Args[0]
 49142  			if v_0.Op != OpAMD64SETG {
 49143  				break
 49144  			}
 49145  			cmp := v_0.Args[0]
 49146  			v_1 := v.Args[1]
 49147  			if v_1.Op != OpAMD64SETG {
 49148  				break
 49149  			}
 49150  			if cmp != v_1.Args[0] {
 49151  				break
 49152  			}
 49153  			b.Kind = BlockAMD64GT
 49154  			b.SetControl(cmp)
 49155  			b.Aux = nil
 49156  			return true
 49157  		}
 49158  		// match: (NE (TESTB (SETG cmp) (SETG cmp)) yes no)
 49159  		// cond:
 49160  		// result: (GT cmp yes no)
 49161  		for {
 49162  			v := b.Control
 49163  			if v.Op != OpAMD64TESTB {
 49164  				break
 49165  			}
 49166  			_ = v.Args[1]
 49167  			v_0 := v.Args[0]
 49168  			if v_0.Op != OpAMD64SETG {
 49169  				break
 49170  			}
 49171  			cmp := v_0.Args[0]
 49172  			v_1 := v.Args[1]
 49173  			if v_1.Op != OpAMD64SETG {
 49174  				break
 49175  			}
 49176  			if cmp != v_1.Args[0] {
 49177  				break
 49178  			}
 49179  			b.Kind = BlockAMD64GT
 49180  			b.SetControl(cmp)
 49181  			b.Aux = nil
 49182  			return true
 49183  		}
 49184  		// match: (NE (TESTB (SETGE cmp) (SETGE cmp)) yes no)
 49185  		// cond:
 49186  		// result: (GE cmp yes no)
 49187  		for {
 49188  			v := b.Control
 49189  			if v.Op != OpAMD64TESTB {
 49190  				break
 49191  			}
 49192  			_ = v.Args[1]
 49193  			v_0 := v.Args[0]
 49194  			if v_0.Op != OpAMD64SETGE {
 49195  				break
 49196  			}
 49197  			cmp := v_0.Args[0]
 49198  			v_1 := v.Args[1]
 49199  			if v_1.Op != OpAMD64SETGE {
 49200  				break
 49201  			}
 49202  			if cmp != v_1.Args[0] {
 49203  				break
 49204  			}
 49205  			b.Kind = BlockAMD64GE
 49206  			b.SetControl(cmp)
 49207  			b.Aux = nil
 49208  			return true
 49209  		}
 49210  		// match: (NE (TESTB (SETGE cmp) (SETGE cmp)) yes no)
 49211  		// cond:
 49212  		// result: (GE cmp yes no)
 49213  		for {
 49214  			v := b.Control
 49215  			if v.Op != OpAMD64TESTB {
 49216  				break
 49217  			}
 49218  			_ = v.Args[1]
 49219  			v_0 := v.Args[0]
 49220  			if v_0.Op != OpAMD64SETGE {
 49221  				break
 49222  			}
 49223  			cmp := v_0.Args[0]
 49224  			v_1 := v.Args[1]
 49225  			if v_1.Op != OpAMD64SETGE {
 49226  				break
 49227  			}
 49228  			if cmp != v_1.Args[0] {
 49229  				break
 49230  			}
 49231  			b.Kind = BlockAMD64GE
 49232  			b.SetControl(cmp)
 49233  			b.Aux = nil
 49234  			return true
 49235  		}
 49236  		// match: (NE (TESTB (SETEQ cmp) (SETEQ cmp)) yes no)
 49237  		// cond:
 49238  		// result: (EQ cmp yes no)
 49239  		for {
 49240  			v := b.Control
 49241  			if v.Op != OpAMD64TESTB {
 49242  				break
 49243  			}
 49244  			_ = v.Args[1]
 49245  			v_0 := v.Args[0]
 49246  			if v_0.Op != OpAMD64SETEQ {
 49247  				break
 49248  			}
 49249  			cmp := v_0.Args[0]
 49250  			v_1 := v.Args[1]
 49251  			if v_1.Op != OpAMD64SETEQ {
 49252  				break
 49253  			}
 49254  			if cmp != v_1.Args[0] {
 49255  				break
 49256  			}
 49257  			b.Kind = BlockAMD64EQ
 49258  			b.SetControl(cmp)
 49259  			b.Aux = nil
 49260  			return true
 49261  		}
 49262  		// match: (NE (TESTB (SETEQ cmp) (SETEQ cmp)) yes no)
 49263  		// cond:
 49264  		// result: (EQ cmp yes no)
 49265  		for {
 49266  			v := b.Control
 49267  			if v.Op != OpAMD64TESTB {
 49268  				break
 49269  			}
 49270  			_ = v.Args[1]
 49271  			v_0 := v.Args[0]
 49272  			if v_0.Op != OpAMD64SETEQ {
 49273  				break
 49274  			}
 49275  			cmp := v_0.Args[0]
 49276  			v_1 := v.Args[1]
 49277  			if v_1.Op != OpAMD64SETEQ {
 49278  				break
 49279  			}
 49280  			if cmp != v_1.Args[0] {
 49281  				break
 49282  			}
 49283  			b.Kind = BlockAMD64EQ
 49284  			b.SetControl(cmp)
 49285  			b.Aux = nil
 49286  			return true
 49287  		}
 49288  		// match: (NE (TESTB (SETNE cmp) (SETNE cmp)) yes no)
 49289  		// cond:
 49290  		// result: (NE cmp yes no)
 49291  		for {
 49292  			v := b.Control
 49293  			if v.Op != OpAMD64TESTB {
 49294  				break
 49295  			}
 49296  			_ = v.Args[1]
 49297  			v_0 := v.Args[0]
 49298  			if v_0.Op != OpAMD64SETNE {
 49299  				break
 49300  			}
 49301  			cmp := v_0.Args[0]
 49302  			v_1 := v.Args[1]
 49303  			if v_1.Op != OpAMD64SETNE {
 49304  				break
 49305  			}
 49306  			if cmp != v_1.Args[0] {
 49307  				break
 49308  			}
 49309  			b.Kind = BlockAMD64NE
 49310  			b.SetControl(cmp)
 49311  			b.Aux = nil
 49312  			return true
 49313  		}
 49314  		// match: (NE (TESTB (SETNE cmp) (SETNE cmp)) yes no)
 49315  		// cond:
 49316  		// result: (NE cmp yes no)
 49317  		for {
 49318  			v := b.Control
 49319  			if v.Op != OpAMD64TESTB {
 49320  				break
 49321  			}
 49322  			_ = v.Args[1]
 49323  			v_0 := v.Args[0]
 49324  			if v_0.Op != OpAMD64SETNE {
 49325  				break
 49326  			}
 49327  			cmp := v_0.Args[0]
 49328  			v_1 := v.Args[1]
 49329  			if v_1.Op != OpAMD64SETNE {
 49330  				break
 49331  			}
 49332  			if cmp != v_1.Args[0] {
 49333  				break
 49334  			}
 49335  			b.Kind = BlockAMD64NE
 49336  			b.SetControl(cmp)
 49337  			b.Aux = nil
 49338  			return true
 49339  		}
 49340  		// match: (NE (TESTB (SETB cmp) (SETB cmp)) yes no)
 49341  		// cond:
 49342  		// result: (ULT cmp yes no)
 49343  		for {
 49344  			v := b.Control
 49345  			if v.Op != OpAMD64TESTB {
 49346  				break
 49347  			}
 49348  			_ = v.Args[1]
 49349  			v_0 := v.Args[0]
 49350  			if v_0.Op != OpAMD64SETB {
 49351  				break
 49352  			}
 49353  			cmp := v_0.Args[0]
 49354  			v_1 := v.Args[1]
 49355  			if v_1.Op != OpAMD64SETB {
 49356  				break
 49357  			}
 49358  			if cmp != v_1.Args[0] {
 49359  				break
 49360  			}
 49361  			b.Kind = BlockAMD64ULT
 49362  			b.SetControl(cmp)
 49363  			b.Aux = nil
 49364  			return true
 49365  		}
 49366  		// match: (NE (TESTB (SETB cmp) (SETB cmp)) yes no)
 49367  		// cond:
 49368  		// result: (ULT cmp yes no)
 49369  		for {
 49370  			v := b.Control
 49371  			if v.Op != OpAMD64TESTB {
 49372  				break
 49373  			}
 49374  			_ = v.Args[1]
 49375  			v_0 := v.Args[0]
 49376  			if v_0.Op != OpAMD64SETB {
 49377  				break
 49378  			}
 49379  			cmp := v_0.Args[0]
 49380  			v_1 := v.Args[1]
 49381  			if v_1.Op != OpAMD64SETB {
 49382  				break
 49383  			}
 49384  			if cmp != v_1.Args[0] {
 49385  				break
 49386  			}
 49387  			b.Kind = BlockAMD64ULT
 49388  			b.SetControl(cmp)
 49389  			b.Aux = nil
 49390  			return true
 49391  		}
 49392  		// match: (NE (TESTB (SETBE cmp) (SETBE cmp)) yes no)
 49393  		// cond:
 49394  		// result: (ULE cmp yes no)
 49395  		for {
 49396  			v := b.Control
 49397  			if v.Op != OpAMD64TESTB {
 49398  				break
 49399  			}
 49400  			_ = v.Args[1]
 49401  			v_0 := v.Args[0]
 49402  			if v_0.Op != OpAMD64SETBE {
 49403  				break
 49404  			}
 49405  			cmp := v_0.Args[0]
 49406  			v_1 := v.Args[1]
 49407  			if v_1.Op != OpAMD64SETBE {
 49408  				break
 49409  			}
 49410  			if cmp != v_1.Args[0] {
 49411  				break
 49412  			}
 49413  			b.Kind = BlockAMD64ULE
 49414  			b.SetControl(cmp)
 49415  			b.Aux = nil
 49416  			return true
 49417  		}
 49418  		// match: (NE (TESTB (SETBE cmp) (SETBE cmp)) yes no)
 49419  		// cond:
 49420  		// result: (ULE cmp yes no)
 49421  		for {
 49422  			v := b.Control
 49423  			if v.Op != OpAMD64TESTB {
 49424  				break
 49425  			}
 49426  			_ = v.Args[1]
 49427  			v_0 := v.Args[0]
 49428  			if v_0.Op != OpAMD64SETBE {
 49429  				break
 49430  			}
 49431  			cmp := v_0.Args[0]
 49432  			v_1 := v.Args[1]
 49433  			if v_1.Op != OpAMD64SETBE {
 49434  				break
 49435  			}
 49436  			if cmp != v_1.Args[0] {
 49437  				break
 49438  			}
 49439  			b.Kind = BlockAMD64ULE
 49440  			b.SetControl(cmp)
 49441  			b.Aux = nil
 49442  			return true
 49443  		}
 49444  		// match: (NE (TESTB (SETA cmp) (SETA cmp)) yes no)
 49445  		// cond:
 49446  		// result: (UGT cmp yes no)
 49447  		for {
 49448  			v := b.Control
 49449  			if v.Op != OpAMD64TESTB {
 49450  				break
 49451  			}
 49452  			_ = v.Args[1]
 49453  			v_0 := v.Args[0]
 49454  			if v_0.Op != OpAMD64SETA {
 49455  				break
 49456  			}
 49457  			cmp := v_0.Args[0]
 49458  			v_1 := v.Args[1]
 49459  			if v_1.Op != OpAMD64SETA {
 49460  				break
 49461  			}
 49462  			if cmp != v_1.Args[0] {
 49463  				break
 49464  			}
 49465  			b.Kind = BlockAMD64UGT
 49466  			b.SetControl(cmp)
 49467  			b.Aux = nil
 49468  			return true
 49469  		}
 49470  		// match: (NE (TESTB (SETA cmp) (SETA cmp)) yes no)
 49471  		// cond:
 49472  		// result: (UGT cmp yes no)
 49473  		for {
 49474  			v := b.Control
 49475  			if v.Op != OpAMD64TESTB {
 49476  				break
 49477  			}
 49478  			_ = v.Args[1]
 49479  			v_0 := v.Args[0]
 49480  			if v_0.Op != OpAMD64SETA {
 49481  				break
 49482  			}
 49483  			cmp := v_0.Args[0]
 49484  			v_1 := v.Args[1]
 49485  			if v_1.Op != OpAMD64SETA {
 49486  				break
 49487  			}
 49488  			if cmp != v_1.Args[0] {
 49489  				break
 49490  			}
 49491  			b.Kind = BlockAMD64UGT
 49492  			b.SetControl(cmp)
 49493  			b.Aux = nil
 49494  			return true
 49495  		}
 49496  		// match: (NE (TESTB (SETAE cmp) (SETAE cmp)) yes no)
 49497  		// cond:
 49498  		// result: (UGE cmp yes no)
 49499  		for {
 49500  			v := b.Control
 49501  			if v.Op != OpAMD64TESTB {
 49502  				break
 49503  			}
 49504  			_ = v.Args[1]
 49505  			v_0 := v.Args[0]
 49506  			if v_0.Op != OpAMD64SETAE {
 49507  				break
 49508  			}
 49509  			cmp := v_0.Args[0]
 49510  			v_1 := v.Args[1]
 49511  			if v_1.Op != OpAMD64SETAE {
 49512  				break
 49513  			}
 49514  			if cmp != v_1.Args[0] {
 49515  				break
 49516  			}
 49517  			b.Kind = BlockAMD64UGE
 49518  			b.SetControl(cmp)
 49519  			b.Aux = nil
 49520  			return true
 49521  		}
 49522  		// match: (NE (TESTB (SETAE cmp) (SETAE cmp)) yes no)
 49523  		// cond:
 49524  		// result: (UGE cmp yes no)
 49525  		for {
 49526  			v := b.Control
 49527  			if v.Op != OpAMD64TESTB {
 49528  				break
 49529  			}
 49530  			_ = v.Args[1]
 49531  			v_0 := v.Args[0]
 49532  			if v_0.Op != OpAMD64SETAE {
 49533  				break
 49534  			}
 49535  			cmp := v_0.Args[0]
 49536  			v_1 := v.Args[1]
 49537  			if v_1.Op != OpAMD64SETAE {
 49538  				break
 49539  			}
 49540  			if cmp != v_1.Args[0] {
 49541  				break
 49542  			}
 49543  			b.Kind = BlockAMD64UGE
 49544  			b.SetControl(cmp)
 49545  			b.Aux = nil
 49546  			return true
 49547  		}
 49548  		// match: (NE (TESTL (SHLL (MOVLconst [1]) x) y))
 49549  		// cond: !config.nacl
 49550  		// result: (ULT (BTL x y))
 49551  		for {
 49552  			v := b.Control
 49553  			if v.Op != OpAMD64TESTL {
 49554  				break
 49555  			}
 49556  			_ = v.Args[1]
 49557  			v_0 := v.Args[0]
 49558  			if v_0.Op != OpAMD64SHLL {
 49559  				break
 49560  			}
 49561  			_ = v_0.Args[1]
 49562  			v_0_0 := v_0.Args[0]
 49563  			if v_0_0.Op != OpAMD64MOVLconst {
 49564  				break
 49565  			}
 49566  			if v_0_0.AuxInt != 1 {
 49567  				break
 49568  			}
 49569  			x := v_0.Args[1]
 49570  			y := v.Args[1]
 49571  			if !(!config.nacl) {
 49572  				break
 49573  			}
 49574  			b.Kind = BlockAMD64ULT
 49575  			v0 := b.NewValue0(v.Pos, OpAMD64BTL, types.TypeFlags)
 49576  			v0.AddArg(x)
 49577  			v0.AddArg(y)
 49578  			b.SetControl(v0)
 49579  			b.Aux = nil
 49580  			return true
 49581  		}
 49582  		// match: (NE (TESTL y (SHLL (MOVLconst [1]) x)))
 49583  		// cond: !config.nacl
 49584  		// result: (ULT (BTL x y))
 49585  		for {
 49586  			v := b.Control
 49587  			if v.Op != OpAMD64TESTL {
 49588  				break
 49589  			}
 49590  			_ = v.Args[1]
 49591  			y := v.Args[0]
 49592  			v_1 := v.Args[1]
 49593  			if v_1.Op != OpAMD64SHLL {
 49594  				break
 49595  			}
 49596  			_ = v_1.Args[1]
 49597  			v_1_0 := v_1.Args[0]
 49598  			if v_1_0.Op != OpAMD64MOVLconst {
 49599  				break
 49600  			}
 49601  			if v_1_0.AuxInt != 1 {
 49602  				break
 49603  			}
 49604  			x := v_1.Args[1]
 49605  			if !(!config.nacl) {
 49606  				break
 49607  			}
 49608  			b.Kind = BlockAMD64ULT
 49609  			v0 := b.NewValue0(v.Pos, OpAMD64BTL, types.TypeFlags)
 49610  			v0.AddArg(x)
 49611  			v0.AddArg(y)
 49612  			b.SetControl(v0)
 49613  			b.Aux = nil
 49614  			return true
 49615  		}
 49616  		// match: (NE (TESTQ (SHLQ (MOVQconst [1]) x) y))
 49617  		// cond: !config.nacl
 49618  		// result: (ULT (BTQ x y))
 49619  		for {
 49620  			v := b.Control
 49621  			if v.Op != OpAMD64TESTQ {
 49622  				break
 49623  			}
 49624  			_ = v.Args[1]
 49625  			v_0 := v.Args[0]
 49626  			if v_0.Op != OpAMD64SHLQ {
 49627  				break
 49628  			}
 49629  			_ = v_0.Args[1]
 49630  			v_0_0 := v_0.Args[0]
 49631  			if v_0_0.Op != OpAMD64MOVQconst {
 49632  				break
 49633  			}
 49634  			if v_0_0.AuxInt != 1 {
 49635  				break
 49636  			}
 49637  			x := v_0.Args[1]
 49638  			y := v.Args[1]
 49639  			if !(!config.nacl) {
 49640  				break
 49641  			}
 49642  			b.Kind = BlockAMD64ULT
 49643  			v0 := b.NewValue0(v.Pos, OpAMD64BTQ, types.TypeFlags)
 49644  			v0.AddArg(x)
 49645  			v0.AddArg(y)
 49646  			b.SetControl(v0)
 49647  			b.Aux = nil
 49648  			return true
 49649  		}
 49650  		// match: (NE (TESTQ y (SHLQ (MOVQconst [1]) x)))
 49651  		// cond: !config.nacl
 49652  		// result: (ULT (BTQ x y))
 49653  		for {
 49654  			v := b.Control
 49655  			if v.Op != OpAMD64TESTQ {
 49656  				break
 49657  			}
 49658  			_ = v.Args[1]
 49659  			y := v.Args[0]
 49660  			v_1 := v.Args[1]
 49661  			if v_1.Op != OpAMD64SHLQ {
 49662  				break
 49663  			}
 49664  			_ = v_1.Args[1]
 49665  			v_1_0 := v_1.Args[0]
 49666  			if v_1_0.Op != OpAMD64MOVQconst {
 49667  				break
 49668  			}
 49669  			if v_1_0.AuxInt != 1 {
 49670  				break
 49671  			}
 49672  			x := v_1.Args[1]
 49673  			if !(!config.nacl) {
 49674  				break
 49675  			}
 49676  			b.Kind = BlockAMD64ULT
 49677  			v0 := b.NewValue0(v.Pos, OpAMD64BTQ, types.TypeFlags)
 49678  			v0.AddArg(x)
 49679  			v0.AddArg(y)
 49680  			b.SetControl(v0)
 49681  			b.Aux = nil
 49682  			return true
 49683  		}
 49684  		// match: (NE (TESTLconst [c] x))
 49685  		// cond: isPowerOfTwo(c) && log2(c) < 32 && !config.nacl
 49686  		// result: (ULT (BTLconst [log2(c)] x))
 49687  		for {
 49688  			v := b.Control
 49689  			if v.Op != OpAMD64TESTLconst {
 49690  				break
 49691  			}
 49692  			c := v.AuxInt
 49693  			x := v.Args[0]
 49694  			if !(isPowerOfTwo(c) && log2(c) < 32 && !config.nacl) {
 49695  				break
 49696  			}
 49697  			b.Kind = BlockAMD64ULT
 49698  			v0 := b.NewValue0(v.Pos, OpAMD64BTLconst, types.TypeFlags)
 49699  			v0.AuxInt = log2(c)
 49700  			v0.AddArg(x)
 49701  			b.SetControl(v0)
 49702  			b.Aux = nil
 49703  			return true
 49704  		}
 49705  		// match: (NE (TESTQconst [c] x))
 49706  		// cond: isPowerOfTwo(c) && log2(c) < 64 && !config.nacl
 49707  		// result: (ULT (BTQconst [log2(c)] x))
 49708  		for {
 49709  			v := b.Control
 49710  			if v.Op != OpAMD64TESTQconst {
 49711  				break
 49712  			}
 49713  			c := v.AuxInt
 49714  			x := v.Args[0]
 49715  			if !(isPowerOfTwo(c) && log2(c) < 64 && !config.nacl) {
 49716  				break
 49717  			}
 49718  			b.Kind = BlockAMD64ULT
 49719  			v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags)
 49720  			v0.AuxInt = log2(c)
 49721  			v0.AddArg(x)
 49722  			b.SetControl(v0)
 49723  			b.Aux = nil
 49724  			return true
 49725  		}
 49726  		// match: (NE (TESTQ (MOVQconst [c]) x))
 49727  		// cond: isPowerOfTwo(c) && log2(c) < 64 && !config.nacl
 49728  		// result: (ULT (BTQconst [log2(c)] x))
 49729  		for {
 49730  			v := b.Control
 49731  			if v.Op != OpAMD64TESTQ {
 49732  				break
 49733  			}
 49734  			_ = v.Args[1]
 49735  			v_0 := v.Args[0]
 49736  			if v_0.Op != OpAMD64MOVQconst {
 49737  				break
 49738  			}
 49739  			c := v_0.AuxInt
 49740  			x := v.Args[1]
 49741  			if !(isPowerOfTwo(c) && log2(c) < 64 && !config.nacl) {
 49742  				break
 49743  			}
 49744  			b.Kind = BlockAMD64ULT
 49745  			v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags)
 49746  			v0.AuxInt = log2(c)
 49747  			v0.AddArg(x)
 49748  			b.SetControl(v0)
 49749  			b.Aux = nil
 49750  			return true
 49751  		}
 49752  		// match: (NE (TESTQ x (MOVQconst [c])))
 49753  		// cond: isPowerOfTwo(c) && log2(c) < 64 && !config.nacl
 49754  		// result: (ULT (BTQconst [log2(c)] x))
 49755  		for {
 49756  			v := b.Control
 49757  			if v.Op != OpAMD64TESTQ {
 49758  				break
 49759  			}
 49760  			_ = v.Args[1]
 49761  			x := v.Args[0]
 49762  			v_1 := v.Args[1]
 49763  			if v_1.Op != OpAMD64MOVQconst {
 49764  				break
 49765  			}
 49766  			c := v_1.AuxInt
 49767  			if !(isPowerOfTwo(c) && log2(c) < 64 && !config.nacl) {
 49768  				break
 49769  			}
 49770  			b.Kind = BlockAMD64ULT
 49771  			v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags)
 49772  			v0.AuxInt = log2(c)
 49773  			v0.AddArg(x)
 49774  			b.SetControl(v0)
 49775  			b.Aux = nil
 49776  			return true
 49777  		}
 49778  		// match: (NE (TESTB (SETGF cmp) (SETGF cmp)) yes no)
 49779  		// cond:
 49780  		// result: (UGT cmp yes no)
 49781  		for {
 49782  			v := b.Control
 49783  			if v.Op != OpAMD64TESTB {
 49784  				break
 49785  			}
 49786  			_ = v.Args[1]
 49787  			v_0 := v.Args[0]
 49788  			if v_0.Op != OpAMD64SETGF {
 49789  				break
 49790  			}
 49791  			cmp := v_0.Args[0]
 49792  			v_1 := v.Args[1]
 49793  			if v_1.Op != OpAMD64SETGF {
 49794  				break
 49795  			}
 49796  			if cmp != v_1.Args[0] {
 49797  				break
 49798  			}
 49799  			b.Kind = BlockAMD64UGT
 49800  			b.SetControl(cmp)
 49801  			b.Aux = nil
 49802  			return true
 49803  		}
 49804  		// match: (NE (TESTB (SETGF cmp) (SETGF cmp)) yes no)
 49805  		// cond:
 49806  		// result: (UGT cmp yes no)
 49807  		for {
 49808  			v := b.Control
 49809  			if v.Op != OpAMD64TESTB {
 49810  				break
 49811  			}
 49812  			_ = v.Args[1]
 49813  			v_0 := v.Args[0]
 49814  			if v_0.Op != OpAMD64SETGF {
 49815  				break
 49816  			}
 49817  			cmp := v_0.Args[0]
 49818  			v_1 := v.Args[1]
 49819  			if v_1.Op != OpAMD64SETGF {
 49820  				break
 49821  			}
 49822  			if cmp != v_1.Args[0] {
 49823  				break
 49824  			}
 49825  			b.Kind = BlockAMD64UGT
 49826  			b.SetControl(cmp)
 49827  			b.Aux = nil
 49828  			return true
 49829  		}
 49830  		// match: (NE (TESTB (SETGEF cmp) (SETGEF cmp)) yes no)
 49831  		// cond:
 49832  		// result: (UGE cmp yes no)
 49833  		for {
 49834  			v := b.Control
 49835  			if v.Op != OpAMD64TESTB {
 49836  				break
 49837  			}
 49838  			_ = v.Args[1]
 49839  			v_0 := v.Args[0]
 49840  			if v_0.Op != OpAMD64SETGEF {
 49841  				break
 49842  			}
 49843  			cmp := v_0.Args[0]
 49844  			v_1 := v.Args[1]
 49845  			if v_1.Op != OpAMD64SETGEF {
 49846  				break
 49847  			}
 49848  			if cmp != v_1.Args[0] {
 49849  				break
 49850  			}
 49851  			b.Kind = BlockAMD64UGE
 49852  			b.SetControl(cmp)
 49853  			b.Aux = nil
 49854  			return true
 49855  		}
 49856  		// match: (NE (TESTB (SETGEF cmp) (SETGEF cmp)) yes no)
 49857  		// cond:
 49858  		// result: (UGE cmp yes no)
 49859  		for {
 49860  			v := b.Control
 49861  			if v.Op != OpAMD64TESTB {
 49862  				break
 49863  			}
 49864  			_ = v.Args[1]
 49865  			v_0 := v.Args[0]
 49866  			if v_0.Op != OpAMD64SETGEF {
 49867  				break
 49868  			}
 49869  			cmp := v_0.Args[0]
 49870  			v_1 := v.Args[1]
 49871  			if v_1.Op != OpAMD64SETGEF {
 49872  				break
 49873  			}
 49874  			if cmp != v_1.Args[0] {
 49875  				break
 49876  			}
 49877  			b.Kind = BlockAMD64UGE
 49878  			b.SetControl(cmp)
 49879  			b.Aux = nil
 49880  			return true
 49881  		}
 49882  		// match: (NE (TESTB (SETEQF cmp) (SETEQF cmp)) yes no)
 49883  		// cond:
 49884  		// result: (EQF cmp yes no)
 49885  		for {
 49886  			v := b.Control
 49887  			if v.Op != OpAMD64TESTB {
 49888  				break
 49889  			}
 49890  			_ = v.Args[1]
 49891  			v_0 := v.Args[0]
 49892  			if v_0.Op != OpAMD64SETEQF {
 49893  				break
 49894  			}
 49895  			cmp := v_0.Args[0]
 49896  			v_1 := v.Args[1]
 49897  			if v_1.Op != OpAMD64SETEQF {
 49898  				break
 49899  			}
 49900  			if cmp != v_1.Args[0] {
 49901  				break
 49902  			}
 49903  			b.Kind = BlockAMD64EQF
 49904  			b.SetControl(cmp)
 49905  			b.Aux = nil
 49906  			return true
 49907  		}
 49908  		// match: (NE (TESTB (SETEQF cmp) (SETEQF cmp)) yes no)
 49909  		// cond:
 49910  		// result: (EQF cmp yes no)
 49911  		for {
 49912  			v := b.Control
 49913  			if v.Op != OpAMD64TESTB {
 49914  				break
 49915  			}
 49916  			_ = v.Args[1]
 49917  			v_0 := v.Args[0]
 49918  			if v_0.Op != OpAMD64SETEQF {
 49919  				break
 49920  			}
 49921  			cmp := v_0.Args[0]
 49922  			v_1 := v.Args[1]
 49923  			if v_1.Op != OpAMD64SETEQF {
 49924  				break
 49925  			}
 49926  			if cmp != v_1.Args[0] {
 49927  				break
 49928  			}
 49929  			b.Kind = BlockAMD64EQF
 49930  			b.SetControl(cmp)
 49931  			b.Aux = nil
 49932  			return true
 49933  		}
 49934  		// match: (NE (TESTB (SETNEF cmp) (SETNEF cmp)) yes no)
 49935  		// cond:
 49936  		// result: (NEF cmp yes no)
 49937  		for {
 49938  			v := b.Control
 49939  			if v.Op != OpAMD64TESTB {
 49940  				break
 49941  			}
 49942  			_ = v.Args[1]
 49943  			v_0 := v.Args[0]
 49944  			if v_0.Op != OpAMD64SETNEF {
 49945  				break
 49946  			}
 49947  			cmp := v_0.Args[0]
 49948  			v_1 := v.Args[1]
 49949  			if v_1.Op != OpAMD64SETNEF {
 49950  				break
 49951  			}
 49952  			if cmp != v_1.Args[0] {
 49953  				break
 49954  			}
 49955  			b.Kind = BlockAMD64NEF
 49956  			b.SetControl(cmp)
 49957  			b.Aux = nil
 49958  			return true
 49959  		}
 49960  		// match: (NE (TESTB (SETNEF cmp) (SETNEF cmp)) yes no)
 49961  		// cond:
 49962  		// result: (NEF cmp yes no)
 49963  		for {
 49964  			v := b.Control
 49965  			if v.Op != OpAMD64TESTB {
 49966  				break
 49967  			}
 49968  			_ = v.Args[1]
 49969  			v_0 := v.Args[0]
 49970  			if v_0.Op != OpAMD64SETNEF {
 49971  				break
 49972  			}
 49973  			cmp := v_0.Args[0]
 49974  			v_1 := v.Args[1]
 49975  			if v_1.Op != OpAMD64SETNEF {
 49976  				break
 49977  			}
 49978  			if cmp != v_1.Args[0] {
 49979  				break
 49980  			}
 49981  			b.Kind = BlockAMD64NEF
 49982  			b.SetControl(cmp)
 49983  			b.Aux = nil
 49984  			return true
 49985  		}
 49986  		// match: (NE (InvertFlags cmp) yes no)
 49987  		// cond:
 49988  		// result: (NE cmp yes no)
 49989  		for {
 49990  			v := b.Control
 49991  			if v.Op != OpAMD64InvertFlags {
 49992  				break
 49993  			}
 49994  			cmp := v.Args[0]
 49995  			b.Kind = BlockAMD64NE
 49996  			b.SetControl(cmp)
 49997  			b.Aux = nil
 49998  			return true
 49999  		}
 50000  		// match: (NE (FlagEQ) yes no)
 50001  		// cond:
 50002  		// result: (First nil no yes)
 50003  		for {
 50004  			v := b.Control
 50005  			if v.Op != OpAMD64FlagEQ {
 50006  				break
 50007  			}
 50008  			b.Kind = BlockFirst
 50009  			b.SetControl(nil)
 50010  			b.Aux = nil
 50011  			b.swapSuccessors()
 50012  			return true
 50013  		}
 50014  		// match: (NE (FlagLT_ULT) yes no)
 50015  		// cond:
 50016  		// result: (First nil yes no)
 50017  		for {
 50018  			v := b.Control
 50019  			if v.Op != OpAMD64FlagLT_ULT {
 50020  				break
 50021  			}
 50022  			b.Kind = BlockFirst
 50023  			b.SetControl(nil)
 50024  			b.Aux = nil
 50025  			return true
 50026  		}
 50027  		// match: (NE (FlagLT_UGT) yes no)
 50028  		// cond:
 50029  		// result: (First nil yes no)
 50030  		for {
 50031  			v := b.Control
 50032  			if v.Op != OpAMD64FlagLT_UGT {
 50033  				break
 50034  			}
 50035  			b.Kind = BlockFirst
 50036  			b.SetControl(nil)
 50037  			b.Aux = nil
 50038  			return true
 50039  		}
 50040  		// match: (NE (FlagGT_ULT) yes no)
 50041  		// cond:
 50042  		// result: (First nil yes no)
 50043  		for {
 50044  			v := b.Control
 50045  			if v.Op != OpAMD64FlagGT_ULT {
 50046  				break
 50047  			}
 50048  			b.Kind = BlockFirst
 50049  			b.SetControl(nil)
 50050  			b.Aux = nil
 50051  			return true
 50052  		}
 50053  		// match: (NE (FlagGT_UGT) yes no)
 50054  		// cond:
 50055  		// result: (First nil yes no)
 50056  		for {
 50057  			v := b.Control
 50058  			if v.Op != OpAMD64FlagGT_UGT {
 50059  				break
 50060  			}
 50061  			b.Kind = BlockFirst
 50062  			b.SetControl(nil)
 50063  			b.Aux = nil
 50064  			return true
 50065  		}
 50066  	case BlockAMD64UGE:
 50067  		// match: (UGE (InvertFlags cmp) yes no)
 50068  		// cond:
 50069  		// result: (ULE cmp yes no)
 50070  		for {
 50071  			v := b.Control
 50072  			if v.Op != OpAMD64InvertFlags {
 50073  				break
 50074  			}
 50075  			cmp := v.Args[0]
 50076  			b.Kind = BlockAMD64ULE
 50077  			b.SetControl(cmp)
 50078  			b.Aux = nil
 50079  			return true
 50080  		}
 50081  		// match: (UGE (FlagEQ) yes no)
 50082  		// cond:
 50083  		// result: (First nil yes no)
 50084  		for {
 50085  			v := b.Control
 50086  			if v.Op != OpAMD64FlagEQ {
 50087  				break
 50088  			}
 50089  			b.Kind = BlockFirst
 50090  			b.SetControl(nil)
 50091  			b.Aux = nil
 50092  			return true
 50093  		}
 50094  		// match: (UGE (FlagLT_ULT) yes no)
 50095  		// cond:
 50096  		// result: (First nil no yes)
 50097  		for {
 50098  			v := b.Control
 50099  			if v.Op != OpAMD64FlagLT_ULT {
 50100  				break
 50101  			}
 50102  			b.Kind = BlockFirst
 50103  			b.SetControl(nil)
 50104  			b.Aux = nil
 50105  			b.swapSuccessors()
 50106  			return true
 50107  		}
 50108  		// match: (UGE (FlagLT_UGT) yes no)
 50109  		// cond:
 50110  		// result: (First nil yes no)
 50111  		for {
 50112  			v := b.Control
 50113  			if v.Op != OpAMD64FlagLT_UGT {
 50114  				break
 50115  			}
 50116  			b.Kind = BlockFirst
 50117  			b.SetControl(nil)
 50118  			b.Aux = nil
 50119  			return true
 50120  		}
 50121  		// match: (UGE (FlagGT_ULT) yes no)
 50122  		// cond:
 50123  		// result: (First nil no yes)
 50124  		for {
 50125  			v := b.Control
 50126  			if v.Op != OpAMD64FlagGT_ULT {
 50127  				break
 50128  			}
 50129  			b.Kind = BlockFirst
 50130  			b.SetControl(nil)
 50131  			b.Aux = nil
 50132  			b.swapSuccessors()
 50133  			return true
 50134  		}
 50135  		// match: (UGE (FlagGT_UGT) yes no)
 50136  		// cond:
 50137  		// result: (First nil yes no)
 50138  		for {
 50139  			v := b.Control
 50140  			if v.Op != OpAMD64FlagGT_UGT {
 50141  				break
 50142  			}
 50143  			b.Kind = BlockFirst
 50144  			b.SetControl(nil)
 50145  			b.Aux = nil
 50146  			return true
 50147  		}
 50148  	case BlockAMD64UGT:
 50149  		// match: (UGT (InvertFlags cmp) yes no)
 50150  		// cond:
 50151  		// result: (ULT cmp yes no)
 50152  		for {
 50153  			v := b.Control
 50154  			if v.Op != OpAMD64InvertFlags {
 50155  				break
 50156  			}
 50157  			cmp := v.Args[0]
 50158  			b.Kind = BlockAMD64ULT
 50159  			b.SetControl(cmp)
 50160  			b.Aux = nil
 50161  			return true
 50162  		}
 50163  		// match: (UGT (FlagEQ) yes no)
 50164  		// cond:
 50165  		// result: (First nil no yes)
 50166  		for {
 50167  			v := b.Control
 50168  			if v.Op != OpAMD64FlagEQ {
 50169  				break
 50170  			}
 50171  			b.Kind = BlockFirst
 50172  			b.SetControl(nil)
 50173  			b.Aux = nil
 50174  			b.swapSuccessors()
 50175  			return true
 50176  		}
 50177  		// match: (UGT (FlagLT_ULT) yes no)
 50178  		// cond:
 50179  		// result: (First nil no yes)
 50180  		for {
 50181  			v := b.Control
 50182  			if v.Op != OpAMD64FlagLT_ULT {
 50183  				break
 50184  			}
 50185  			b.Kind = BlockFirst
 50186  			b.SetControl(nil)
 50187  			b.Aux = nil
 50188  			b.swapSuccessors()
 50189  			return true
 50190  		}
 50191  		// match: (UGT (FlagLT_UGT) yes no)
 50192  		// cond:
 50193  		// result: (First nil yes no)
 50194  		for {
 50195  			v := b.Control
 50196  			if v.Op != OpAMD64FlagLT_UGT {
 50197  				break
 50198  			}
 50199  			b.Kind = BlockFirst
 50200  			b.SetControl(nil)
 50201  			b.Aux = nil
 50202  			return true
 50203  		}
 50204  		// match: (UGT (FlagGT_ULT) yes no)
 50205  		// cond:
 50206  		// result: (First nil no yes)
 50207  		for {
 50208  			v := b.Control
 50209  			if v.Op != OpAMD64FlagGT_ULT {
 50210  				break
 50211  			}
 50212  			b.Kind = BlockFirst
 50213  			b.SetControl(nil)
 50214  			b.Aux = nil
 50215  			b.swapSuccessors()
 50216  			return true
 50217  		}
 50218  		// match: (UGT (FlagGT_UGT) yes no)
 50219  		// cond:
 50220  		// result: (First nil yes no)
 50221  		for {
 50222  			v := b.Control
 50223  			if v.Op != OpAMD64FlagGT_UGT {
 50224  				break
 50225  			}
 50226  			b.Kind = BlockFirst
 50227  			b.SetControl(nil)
 50228  			b.Aux = nil
 50229  			return true
 50230  		}
 50231  	case BlockAMD64ULE:
 50232  		// match: (ULE (InvertFlags cmp) yes no)
 50233  		// cond:
 50234  		// result: (UGE cmp yes no)
 50235  		for {
 50236  			v := b.Control
 50237  			if v.Op != OpAMD64InvertFlags {
 50238  				break
 50239  			}
 50240  			cmp := v.Args[0]
 50241  			b.Kind = BlockAMD64UGE
 50242  			b.SetControl(cmp)
 50243  			b.Aux = nil
 50244  			return true
 50245  		}
 50246  		// match: (ULE (FlagEQ) yes no)
 50247  		// cond:
 50248  		// result: (First nil yes no)
 50249  		for {
 50250  			v := b.Control
 50251  			if v.Op != OpAMD64FlagEQ {
 50252  				break
 50253  			}
 50254  			b.Kind = BlockFirst
 50255  			b.SetControl(nil)
 50256  			b.Aux = nil
 50257  			return true
 50258  		}
 50259  		// match: (ULE (FlagLT_ULT) yes no)
 50260  		// cond:
 50261  		// result: (First nil yes no)
 50262  		for {
 50263  			v := b.Control
 50264  			if v.Op != OpAMD64FlagLT_ULT {
 50265  				break
 50266  			}
 50267  			b.Kind = BlockFirst
 50268  			b.SetControl(nil)
 50269  			b.Aux = nil
 50270  			return true
 50271  		}
 50272  		// match: (ULE (FlagLT_UGT) yes no)
 50273  		// cond:
 50274  		// result: (First nil no yes)
 50275  		for {
 50276  			v := b.Control
 50277  			if v.Op != OpAMD64FlagLT_UGT {
 50278  				break
 50279  			}
 50280  			b.Kind = BlockFirst
 50281  			b.SetControl(nil)
 50282  			b.Aux = nil
 50283  			b.swapSuccessors()
 50284  			return true
 50285  		}
 50286  		// match: (ULE (FlagGT_ULT) yes no)
 50287  		// cond:
 50288  		// result: (First nil yes no)
 50289  		for {
 50290  			v := b.Control
 50291  			if v.Op != OpAMD64FlagGT_ULT {
 50292  				break
 50293  			}
 50294  			b.Kind = BlockFirst
 50295  			b.SetControl(nil)
 50296  			b.Aux = nil
 50297  			return true
 50298  		}
 50299  		// match: (ULE (FlagGT_UGT) yes no)
 50300  		// cond:
 50301  		// result: (First nil no yes)
 50302  		for {
 50303  			v := b.Control
 50304  			if v.Op != OpAMD64FlagGT_UGT {
 50305  				break
 50306  			}
 50307  			b.Kind = BlockFirst
 50308  			b.SetControl(nil)
 50309  			b.Aux = nil
 50310  			b.swapSuccessors()
 50311  			return true
 50312  		}
 50313  	case BlockAMD64ULT:
 50314  		// match: (ULT (InvertFlags cmp) yes no)
 50315  		// cond:
 50316  		// result: (UGT cmp yes no)
 50317  		for {
 50318  			v := b.Control
 50319  			if v.Op != OpAMD64InvertFlags {
 50320  				break
 50321  			}
 50322  			cmp := v.Args[0]
 50323  			b.Kind = BlockAMD64UGT
 50324  			b.SetControl(cmp)
 50325  			b.Aux = nil
 50326  			return true
 50327  		}
 50328  		// match: (ULT (FlagEQ) yes no)
 50329  		// cond:
 50330  		// result: (First nil no yes)
 50331  		for {
 50332  			v := b.Control
 50333  			if v.Op != OpAMD64FlagEQ {
 50334  				break
 50335  			}
 50336  			b.Kind = BlockFirst
 50337  			b.SetControl(nil)
 50338  			b.Aux = nil
 50339  			b.swapSuccessors()
 50340  			return true
 50341  		}
 50342  		// match: (ULT (FlagLT_ULT) yes no)
 50343  		// cond:
 50344  		// result: (First nil yes no)
 50345  		for {
 50346  			v := b.Control
 50347  			if v.Op != OpAMD64FlagLT_ULT {
 50348  				break
 50349  			}
 50350  			b.Kind = BlockFirst
 50351  			b.SetControl(nil)
 50352  			b.Aux = nil
 50353  			return true
 50354  		}
 50355  		// match: (ULT (FlagLT_UGT) yes no)
 50356  		// cond:
 50357  		// result: (First nil no yes)
 50358  		for {
 50359  			v := b.Control
 50360  			if v.Op != OpAMD64FlagLT_UGT {
 50361  				break
 50362  			}
 50363  			b.Kind = BlockFirst
 50364  			b.SetControl(nil)
 50365  			b.Aux = nil
 50366  			b.swapSuccessors()
 50367  			return true
 50368  		}
 50369  		// match: (ULT (FlagGT_ULT) yes no)
 50370  		// cond:
 50371  		// result: (First nil yes no)
 50372  		for {
 50373  			v := b.Control
 50374  			if v.Op != OpAMD64FlagGT_ULT {
 50375  				break
 50376  			}
 50377  			b.Kind = BlockFirst
 50378  			b.SetControl(nil)
 50379  			b.Aux = nil
 50380  			return true
 50381  		}
 50382  		// match: (ULT (FlagGT_UGT) yes no)
 50383  		// cond:
 50384  		// result: (First nil no yes)
 50385  		for {
 50386  			v := b.Control
 50387  			if v.Op != OpAMD64FlagGT_UGT {
 50388  				break
 50389  			}
 50390  			b.Kind = BlockFirst
 50391  			b.SetControl(nil)
 50392  			b.Aux = nil
 50393  			b.swapSuccessors()
 50394  			return true
 50395  		}
 50396  	}
 50397  	return false
 50398  }