github.com/liujq9674git/golang-src-1.7@v0.0.0-20230517174348-17f6ec47f3f8/src/cmd/compile/internal/ssa/rewriteAMD64.go (about)

     1  // autogenerated from gen/AMD64.rules: do not edit!
     2  // generated with: cd gen; go run *.go
     3  
     4  package ssa
     5  
     6  import "math"
     7  
     8  var _ = math.MinInt8 // in case not otherwise used
     9  func rewriteValueAMD64(v *Value, config *Config) bool {
    10  	switch v.Op {
    11  	case OpAMD64ADDL:
    12  		return rewriteValueAMD64_OpAMD64ADDL(v, config)
    13  	case OpAMD64ADDLconst:
    14  		return rewriteValueAMD64_OpAMD64ADDLconst(v, config)
    15  	case OpAMD64ADDQ:
    16  		return rewriteValueAMD64_OpAMD64ADDQ(v, config)
    17  	case OpAMD64ADDQconst:
    18  		return rewriteValueAMD64_OpAMD64ADDQconst(v, config)
    19  	case OpAMD64ANDL:
    20  		return rewriteValueAMD64_OpAMD64ANDL(v, config)
    21  	case OpAMD64ANDLconst:
    22  		return rewriteValueAMD64_OpAMD64ANDLconst(v, config)
    23  	case OpAMD64ANDQ:
    24  		return rewriteValueAMD64_OpAMD64ANDQ(v, config)
    25  	case OpAMD64ANDQconst:
    26  		return rewriteValueAMD64_OpAMD64ANDQconst(v, config)
    27  	case OpAdd16:
    28  		return rewriteValueAMD64_OpAdd16(v, config)
    29  	case OpAdd32:
    30  		return rewriteValueAMD64_OpAdd32(v, config)
    31  	case OpAdd32F:
    32  		return rewriteValueAMD64_OpAdd32F(v, config)
    33  	case OpAdd64:
    34  		return rewriteValueAMD64_OpAdd64(v, config)
    35  	case OpAdd64F:
    36  		return rewriteValueAMD64_OpAdd64F(v, config)
    37  	case OpAdd8:
    38  		return rewriteValueAMD64_OpAdd8(v, config)
    39  	case OpAddPtr:
    40  		return rewriteValueAMD64_OpAddPtr(v, config)
    41  	case OpAddr:
    42  		return rewriteValueAMD64_OpAddr(v, config)
    43  	case OpAnd16:
    44  		return rewriteValueAMD64_OpAnd16(v, config)
    45  	case OpAnd32:
    46  		return rewriteValueAMD64_OpAnd32(v, config)
    47  	case OpAnd64:
    48  		return rewriteValueAMD64_OpAnd64(v, config)
    49  	case OpAnd8:
    50  		return rewriteValueAMD64_OpAnd8(v, config)
    51  	case OpAndB:
    52  		return rewriteValueAMD64_OpAndB(v, config)
    53  	case OpAvg64u:
    54  		return rewriteValueAMD64_OpAvg64u(v, config)
    55  	case OpBswap32:
    56  		return rewriteValueAMD64_OpBswap32(v, config)
    57  	case OpBswap64:
    58  		return rewriteValueAMD64_OpBswap64(v, config)
    59  	case OpAMD64CMOVLEQconst:
    60  		return rewriteValueAMD64_OpAMD64CMOVLEQconst(v, config)
    61  	case OpAMD64CMOVQEQconst:
    62  		return rewriteValueAMD64_OpAMD64CMOVQEQconst(v, config)
    63  	case OpAMD64CMOVWEQconst:
    64  		return rewriteValueAMD64_OpAMD64CMOVWEQconst(v, config)
    65  	case OpAMD64CMPB:
    66  		return rewriteValueAMD64_OpAMD64CMPB(v, config)
    67  	case OpAMD64CMPBconst:
    68  		return rewriteValueAMD64_OpAMD64CMPBconst(v, config)
    69  	case OpAMD64CMPL:
    70  		return rewriteValueAMD64_OpAMD64CMPL(v, config)
    71  	case OpAMD64CMPLconst:
    72  		return rewriteValueAMD64_OpAMD64CMPLconst(v, config)
    73  	case OpAMD64CMPQ:
    74  		return rewriteValueAMD64_OpAMD64CMPQ(v, config)
    75  	case OpAMD64CMPQconst:
    76  		return rewriteValueAMD64_OpAMD64CMPQconst(v, config)
    77  	case OpAMD64CMPW:
    78  		return rewriteValueAMD64_OpAMD64CMPW(v, config)
    79  	case OpAMD64CMPWconst:
    80  		return rewriteValueAMD64_OpAMD64CMPWconst(v, config)
    81  	case OpClosureCall:
    82  		return rewriteValueAMD64_OpClosureCall(v, config)
    83  	case OpCom16:
    84  		return rewriteValueAMD64_OpCom16(v, config)
    85  	case OpCom32:
    86  		return rewriteValueAMD64_OpCom32(v, config)
    87  	case OpCom64:
    88  		return rewriteValueAMD64_OpCom64(v, config)
    89  	case OpCom8:
    90  		return rewriteValueAMD64_OpCom8(v, config)
    91  	case OpConst16:
    92  		return rewriteValueAMD64_OpConst16(v, config)
    93  	case OpConst32:
    94  		return rewriteValueAMD64_OpConst32(v, config)
    95  	case OpConst32F:
    96  		return rewriteValueAMD64_OpConst32F(v, config)
    97  	case OpConst64:
    98  		return rewriteValueAMD64_OpConst64(v, config)
    99  	case OpConst64F:
   100  		return rewriteValueAMD64_OpConst64F(v, config)
   101  	case OpConst8:
   102  		return rewriteValueAMD64_OpConst8(v, config)
   103  	case OpConstBool:
   104  		return rewriteValueAMD64_OpConstBool(v, config)
   105  	case OpConstNil:
   106  		return rewriteValueAMD64_OpConstNil(v, config)
   107  	case OpConvert:
   108  		return rewriteValueAMD64_OpConvert(v, config)
   109  	case OpCtz16:
   110  		return rewriteValueAMD64_OpCtz16(v, config)
   111  	case OpCtz32:
   112  		return rewriteValueAMD64_OpCtz32(v, config)
   113  	case OpCtz64:
   114  		return rewriteValueAMD64_OpCtz64(v, config)
   115  	case OpCvt32Fto32:
   116  		return rewriteValueAMD64_OpCvt32Fto32(v, config)
   117  	case OpCvt32Fto64:
   118  		return rewriteValueAMD64_OpCvt32Fto64(v, config)
   119  	case OpCvt32Fto64F:
   120  		return rewriteValueAMD64_OpCvt32Fto64F(v, config)
   121  	case OpCvt32to32F:
   122  		return rewriteValueAMD64_OpCvt32to32F(v, config)
   123  	case OpCvt32to64F:
   124  		return rewriteValueAMD64_OpCvt32to64F(v, config)
   125  	case OpCvt64Fto32:
   126  		return rewriteValueAMD64_OpCvt64Fto32(v, config)
   127  	case OpCvt64Fto32F:
   128  		return rewriteValueAMD64_OpCvt64Fto32F(v, config)
   129  	case OpCvt64Fto64:
   130  		return rewriteValueAMD64_OpCvt64Fto64(v, config)
   131  	case OpCvt64to32F:
   132  		return rewriteValueAMD64_OpCvt64to32F(v, config)
   133  	case OpCvt64to64F:
   134  		return rewriteValueAMD64_OpCvt64to64F(v, config)
   135  	case OpDeferCall:
   136  		return rewriteValueAMD64_OpDeferCall(v, config)
   137  	case OpDiv16:
   138  		return rewriteValueAMD64_OpDiv16(v, config)
   139  	case OpDiv16u:
   140  		return rewriteValueAMD64_OpDiv16u(v, config)
   141  	case OpDiv32:
   142  		return rewriteValueAMD64_OpDiv32(v, config)
   143  	case OpDiv32F:
   144  		return rewriteValueAMD64_OpDiv32F(v, config)
   145  	case OpDiv32u:
   146  		return rewriteValueAMD64_OpDiv32u(v, config)
   147  	case OpDiv64:
   148  		return rewriteValueAMD64_OpDiv64(v, config)
   149  	case OpDiv64F:
   150  		return rewriteValueAMD64_OpDiv64F(v, config)
   151  	case OpDiv64u:
   152  		return rewriteValueAMD64_OpDiv64u(v, config)
   153  	case OpDiv8:
   154  		return rewriteValueAMD64_OpDiv8(v, config)
   155  	case OpDiv8u:
   156  		return rewriteValueAMD64_OpDiv8u(v, config)
   157  	case OpEq16:
   158  		return rewriteValueAMD64_OpEq16(v, config)
   159  	case OpEq32:
   160  		return rewriteValueAMD64_OpEq32(v, config)
   161  	case OpEq32F:
   162  		return rewriteValueAMD64_OpEq32F(v, config)
   163  	case OpEq64:
   164  		return rewriteValueAMD64_OpEq64(v, config)
   165  	case OpEq64F:
   166  		return rewriteValueAMD64_OpEq64F(v, config)
   167  	case OpEq8:
   168  		return rewriteValueAMD64_OpEq8(v, config)
   169  	case OpEqB:
   170  		return rewriteValueAMD64_OpEqB(v, config)
   171  	case OpEqPtr:
   172  		return rewriteValueAMD64_OpEqPtr(v, config)
   173  	case OpGeq16:
   174  		return rewriteValueAMD64_OpGeq16(v, config)
   175  	case OpGeq16U:
   176  		return rewriteValueAMD64_OpGeq16U(v, config)
   177  	case OpGeq32:
   178  		return rewriteValueAMD64_OpGeq32(v, config)
   179  	case OpGeq32F:
   180  		return rewriteValueAMD64_OpGeq32F(v, config)
   181  	case OpGeq32U:
   182  		return rewriteValueAMD64_OpGeq32U(v, config)
   183  	case OpGeq64:
   184  		return rewriteValueAMD64_OpGeq64(v, config)
   185  	case OpGeq64F:
   186  		return rewriteValueAMD64_OpGeq64F(v, config)
   187  	case OpGeq64U:
   188  		return rewriteValueAMD64_OpGeq64U(v, config)
   189  	case OpGeq8:
   190  		return rewriteValueAMD64_OpGeq8(v, config)
   191  	case OpGeq8U:
   192  		return rewriteValueAMD64_OpGeq8U(v, config)
   193  	case OpGetClosurePtr:
   194  		return rewriteValueAMD64_OpGetClosurePtr(v, config)
   195  	case OpGetG:
   196  		return rewriteValueAMD64_OpGetG(v, config)
   197  	case OpGoCall:
   198  		return rewriteValueAMD64_OpGoCall(v, config)
   199  	case OpGreater16:
   200  		return rewriteValueAMD64_OpGreater16(v, config)
   201  	case OpGreater16U:
   202  		return rewriteValueAMD64_OpGreater16U(v, config)
   203  	case OpGreater32:
   204  		return rewriteValueAMD64_OpGreater32(v, config)
   205  	case OpGreater32F:
   206  		return rewriteValueAMD64_OpGreater32F(v, config)
   207  	case OpGreater32U:
   208  		return rewriteValueAMD64_OpGreater32U(v, config)
   209  	case OpGreater64:
   210  		return rewriteValueAMD64_OpGreater64(v, config)
   211  	case OpGreater64F:
   212  		return rewriteValueAMD64_OpGreater64F(v, config)
   213  	case OpGreater64U:
   214  		return rewriteValueAMD64_OpGreater64U(v, config)
   215  	case OpGreater8:
   216  		return rewriteValueAMD64_OpGreater8(v, config)
   217  	case OpGreater8U:
   218  		return rewriteValueAMD64_OpGreater8U(v, config)
   219  	case OpHmul16:
   220  		return rewriteValueAMD64_OpHmul16(v, config)
   221  	case OpHmul16u:
   222  		return rewriteValueAMD64_OpHmul16u(v, config)
   223  	case OpHmul32:
   224  		return rewriteValueAMD64_OpHmul32(v, config)
   225  	case OpHmul32u:
   226  		return rewriteValueAMD64_OpHmul32u(v, config)
   227  	case OpHmul64:
   228  		return rewriteValueAMD64_OpHmul64(v, config)
   229  	case OpHmul64u:
   230  		return rewriteValueAMD64_OpHmul64u(v, config)
   231  	case OpHmul8:
   232  		return rewriteValueAMD64_OpHmul8(v, config)
   233  	case OpHmul8u:
   234  		return rewriteValueAMD64_OpHmul8u(v, config)
   235  	case OpITab:
   236  		return rewriteValueAMD64_OpITab(v, config)
   237  	case OpInterCall:
   238  		return rewriteValueAMD64_OpInterCall(v, config)
   239  	case OpIsInBounds:
   240  		return rewriteValueAMD64_OpIsInBounds(v, config)
   241  	case OpIsNonNil:
   242  		return rewriteValueAMD64_OpIsNonNil(v, config)
   243  	case OpIsSliceInBounds:
   244  		return rewriteValueAMD64_OpIsSliceInBounds(v, config)
   245  	case OpAMD64LEAQ:
   246  		return rewriteValueAMD64_OpAMD64LEAQ(v, config)
   247  	case OpAMD64LEAQ1:
   248  		return rewriteValueAMD64_OpAMD64LEAQ1(v, config)
   249  	case OpAMD64LEAQ2:
   250  		return rewriteValueAMD64_OpAMD64LEAQ2(v, config)
   251  	case OpAMD64LEAQ4:
   252  		return rewriteValueAMD64_OpAMD64LEAQ4(v, config)
   253  	case OpAMD64LEAQ8:
   254  		return rewriteValueAMD64_OpAMD64LEAQ8(v, config)
   255  	case OpLeq16:
   256  		return rewriteValueAMD64_OpLeq16(v, config)
   257  	case OpLeq16U:
   258  		return rewriteValueAMD64_OpLeq16U(v, config)
   259  	case OpLeq32:
   260  		return rewriteValueAMD64_OpLeq32(v, config)
   261  	case OpLeq32F:
   262  		return rewriteValueAMD64_OpLeq32F(v, config)
   263  	case OpLeq32U:
   264  		return rewriteValueAMD64_OpLeq32U(v, config)
   265  	case OpLeq64:
   266  		return rewriteValueAMD64_OpLeq64(v, config)
   267  	case OpLeq64F:
   268  		return rewriteValueAMD64_OpLeq64F(v, config)
   269  	case OpLeq64U:
   270  		return rewriteValueAMD64_OpLeq64U(v, config)
   271  	case OpLeq8:
   272  		return rewriteValueAMD64_OpLeq8(v, config)
   273  	case OpLeq8U:
   274  		return rewriteValueAMD64_OpLeq8U(v, config)
   275  	case OpLess16:
   276  		return rewriteValueAMD64_OpLess16(v, config)
   277  	case OpLess16U:
   278  		return rewriteValueAMD64_OpLess16U(v, config)
   279  	case OpLess32:
   280  		return rewriteValueAMD64_OpLess32(v, config)
   281  	case OpLess32F:
   282  		return rewriteValueAMD64_OpLess32F(v, config)
   283  	case OpLess32U:
   284  		return rewriteValueAMD64_OpLess32U(v, config)
   285  	case OpLess64:
   286  		return rewriteValueAMD64_OpLess64(v, config)
   287  	case OpLess64F:
   288  		return rewriteValueAMD64_OpLess64F(v, config)
   289  	case OpLess64U:
   290  		return rewriteValueAMD64_OpLess64U(v, config)
   291  	case OpLess8:
   292  		return rewriteValueAMD64_OpLess8(v, config)
   293  	case OpLess8U:
   294  		return rewriteValueAMD64_OpLess8U(v, config)
   295  	case OpLoad:
   296  		return rewriteValueAMD64_OpLoad(v, config)
   297  	case OpLrot16:
   298  		return rewriteValueAMD64_OpLrot16(v, config)
   299  	case OpLrot32:
   300  		return rewriteValueAMD64_OpLrot32(v, config)
   301  	case OpLrot64:
   302  		return rewriteValueAMD64_OpLrot64(v, config)
   303  	case OpLrot8:
   304  		return rewriteValueAMD64_OpLrot8(v, config)
   305  	case OpLsh16x16:
   306  		return rewriteValueAMD64_OpLsh16x16(v, config)
   307  	case OpLsh16x32:
   308  		return rewriteValueAMD64_OpLsh16x32(v, config)
   309  	case OpLsh16x64:
   310  		return rewriteValueAMD64_OpLsh16x64(v, config)
   311  	case OpLsh16x8:
   312  		return rewriteValueAMD64_OpLsh16x8(v, config)
   313  	case OpLsh32x16:
   314  		return rewriteValueAMD64_OpLsh32x16(v, config)
   315  	case OpLsh32x32:
   316  		return rewriteValueAMD64_OpLsh32x32(v, config)
   317  	case OpLsh32x64:
   318  		return rewriteValueAMD64_OpLsh32x64(v, config)
   319  	case OpLsh32x8:
   320  		return rewriteValueAMD64_OpLsh32x8(v, config)
   321  	case OpLsh64x16:
   322  		return rewriteValueAMD64_OpLsh64x16(v, config)
   323  	case OpLsh64x32:
   324  		return rewriteValueAMD64_OpLsh64x32(v, config)
   325  	case OpLsh64x64:
   326  		return rewriteValueAMD64_OpLsh64x64(v, config)
   327  	case OpLsh64x8:
   328  		return rewriteValueAMD64_OpLsh64x8(v, config)
   329  	case OpLsh8x16:
   330  		return rewriteValueAMD64_OpLsh8x16(v, config)
   331  	case OpLsh8x32:
   332  		return rewriteValueAMD64_OpLsh8x32(v, config)
   333  	case OpLsh8x64:
   334  		return rewriteValueAMD64_OpLsh8x64(v, config)
   335  	case OpLsh8x8:
   336  		return rewriteValueAMD64_OpLsh8x8(v, config)
   337  	case OpAMD64MOVBQSX:
   338  		return rewriteValueAMD64_OpAMD64MOVBQSX(v, config)
   339  	case OpAMD64MOVBQSXload:
   340  		return rewriteValueAMD64_OpAMD64MOVBQSXload(v, config)
   341  	case OpAMD64MOVBQZX:
   342  		return rewriteValueAMD64_OpAMD64MOVBQZX(v, config)
   343  	case OpAMD64MOVBload:
   344  		return rewriteValueAMD64_OpAMD64MOVBload(v, config)
   345  	case OpAMD64MOVBloadidx1:
   346  		return rewriteValueAMD64_OpAMD64MOVBloadidx1(v, config)
   347  	case OpAMD64MOVBstore:
   348  		return rewriteValueAMD64_OpAMD64MOVBstore(v, config)
   349  	case OpAMD64MOVBstoreconst:
   350  		return rewriteValueAMD64_OpAMD64MOVBstoreconst(v, config)
   351  	case OpAMD64MOVBstoreconstidx1:
   352  		return rewriteValueAMD64_OpAMD64MOVBstoreconstidx1(v, config)
   353  	case OpAMD64MOVBstoreidx1:
   354  		return rewriteValueAMD64_OpAMD64MOVBstoreidx1(v, config)
   355  	case OpAMD64MOVLQSX:
   356  		return rewriteValueAMD64_OpAMD64MOVLQSX(v, config)
   357  	case OpAMD64MOVLQSXload:
   358  		return rewriteValueAMD64_OpAMD64MOVLQSXload(v, config)
   359  	case OpAMD64MOVLQZX:
   360  		return rewriteValueAMD64_OpAMD64MOVLQZX(v, config)
   361  	case OpAMD64MOVLload:
   362  		return rewriteValueAMD64_OpAMD64MOVLload(v, config)
   363  	case OpAMD64MOVLloadidx1:
   364  		return rewriteValueAMD64_OpAMD64MOVLloadidx1(v, config)
   365  	case OpAMD64MOVLloadidx4:
   366  		return rewriteValueAMD64_OpAMD64MOVLloadidx4(v, config)
   367  	case OpAMD64MOVLstore:
   368  		return rewriteValueAMD64_OpAMD64MOVLstore(v, config)
   369  	case OpAMD64MOVLstoreconst:
   370  		return rewriteValueAMD64_OpAMD64MOVLstoreconst(v, config)
   371  	case OpAMD64MOVLstoreconstidx1:
   372  		return rewriteValueAMD64_OpAMD64MOVLstoreconstidx1(v, config)
   373  	case OpAMD64MOVLstoreconstidx4:
   374  		return rewriteValueAMD64_OpAMD64MOVLstoreconstidx4(v, config)
   375  	case OpAMD64MOVLstoreidx1:
   376  		return rewriteValueAMD64_OpAMD64MOVLstoreidx1(v, config)
   377  	case OpAMD64MOVLstoreidx4:
   378  		return rewriteValueAMD64_OpAMD64MOVLstoreidx4(v, config)
   379  	case OpAMD64MOVOload:
   380  		return rewriteValueAMD64_OpAMD64MOVOload(v, config)
   381  	case OpAMD64MOVOstore:
   382  		return rewriteValueAMD64_OpAMD64MOVOstore(v, config)
   383  	case OpAMD64MOVQload:
   384  		return rewriteValueAMD64_OpAMD64MOVQload(v, config)
   385  	case OpAMD64MOVQloadidx1:
   386  		return rewriteValueAMD64_OpAMD64MOVQloadidx1(v, config)
   387  	case OpAMD64MOVQloadidx8:
   388  		return rewriteValueAMD64_OpAMD64MOVQloadidx8(v, config)
   389  	case OpAMD64MOVQstore:
   390  		return rewriteValueAMD64_OpAMD64MOVQstore(v, config)
   391  	case OpAMD64MOVQstoreconst:
   392  		return rewriteValueAMD64_OpAMD64MOVQstoreconst(v, config)
   393  	case OpAMD64MOVQstoreconstidx1:
   394  		return rewriteValueAMD64_OpAMD64MOVQstoreconstidx1(v, config)
   395  	case OpAMD64MOVQstoreconstidx8:
   396  		return rewriteValueAMD64_OpAMD64MOVQstoreconstidx8(v, config)
   397  	case OpAMD64MOVQstoreidx1:
   398  		return rewriteValueAMD64_OpAMD64MOVQstoreidx1(v, config)
   399  	case OpAMD64MOVQstoreidx8:
   400  		return rewriteValueAMD64_OpAMD64MOVQstoreidx8(v, config)
   401  	case OpAMD64MOVSDload:
   402  		return rewriteValueAMD64_OpAMD64MOVSDload(v, config)
   403  	case OpAMD64MOVSDloadidx1:
   404  		return rewriteValueAMD64_OpAMD64MOVSDloadidx1(v, config)
   405  	case OpAMD64MOVSDloadidx8:
   406  		return rewriteValueAMD64_OpAMD64MOVSDloadidx8(v, config)
   407  	case OpAMD64MOVSDstore:
   408  		return rewriteValueAMD64_OpAMD64MOVSDstore(v, config)
   409  	case OpAMD64MOVSDstoreidx1:
   410  		return rewriteValueAMD64_OpAMD64MOVSDstoreidx1(v, config)
   411  	case OpAMD64MOVSDstoreidx8:
   412  		return rewriteValueAMD64_OpAMD64MOVSDstoreidx8(v, config)
   413  	case OpAMD64MOVSSload:
   414  		return rewriteValueAMD64_OpAMD64MOVSSload(v, config)
   415  	case OpAMD64MOVSSloadidx1:
   416  		return rewriteValueAMD64_OpAMD64MOVSSloadidx1(v, config)
   417  	case OpAMD64MOVSSloadidx4:
   418  		return rewriteValueAMD64_OpAMD64MOVSSloadidx4(v, config)
   419  	case OpAMD64MOVSSstore:
   420  		return rewriteValueAMD64_OpAMD64MOVSSstore(v, config)
   421  	case OpAMD64MOVSSstoreidx1:
   422  		return rewriteValueAMD64_OpAMD64MOVSSstoreidx1(v, config)
   423  	case OpAMD64MOVSSstoreidx4:
   424  		return rewriteValueAMD64_OpAMD64MOVSSstoreidx4(v, config)
   425  	case OpAMD64MOVWQSX:
   426  		return rewriteValueAMD64_OpAMD64MOVWQSX(v, config)
   427  	case OpAMD64MOVWQSXload:
   428  		return rewriteValueAMD64_OpAMD64MOVWQSXload(v, config)
   429  	case OpAMD64MOVWQZX:
   430  		return rewriteValueAMD64_OpAMD64MOVWQZX(v, config)
   431  	case OpAMD64MOVWload:
   432  		return rewriteValueAMD64_OpAMD64MOVWload(v, config)
   433  	case OpAMD64MOVWloadidx1:
   434  		return rewriteValueAMD64_OpAMD64MOVWloadidx1(v, config)
   435  	case OpAMD64MOVWloadidx2:
   436  		return rewriteValueAMD64_OpAMD64MOVWloadidx2(v, config)
   437  	case OpAMD64MOVWstore:
   438  		return rewriteValueAMD64_OpAMD64MOVWstore(v, config)
   439  	case OpAMD64MOVWstoreconst:
   440  		return rewriteValueAMD64_OpAMD64MOVWstoreconst(v, config)
   441  	case OpAMD64MOVWstoreconstidx1:
   442  		return rewriteValueAMD64_OpAMD64MOVWstoreconstidx1(v, config)
   443  	case OpAMD64MOVWstoreconstidx2:
   444  		return rewriteValueAMD64_OpAMD64MOVWstoreconstidx2(v, config)
   445  	case OpAMD64MOVWstoreidx1:
   446  		return rewriteValueAMD64_OpAMD64MOVWstoreidx1(v, config)
   447  	case OpAMD64MOVWstoreidx2:
   448  		return rewriteValueAMD64_OpAMD64MOVWstoreidx2(v, config)
   449  	case OpAMD64MULL:
   450  		return rewriteValueAMD64_OpAMD64MULL(v, config)
   451  	case OpAMD64MULLconst:
   452  		return rewriteValueAMD64_OpAMD64MULLconst(v, config)
   453  	case OpAMD64MULQ:
   454  		return rewriteValueAMD64_OpAMD64MULQ(v, config)
   455  	case OpAMD64MULQconst:
   456  		return rewriteValueAMD64_OpAMD64MULQconst(v, config)
   457  	case OpMod16:
   458  		return rewriteValueAMD64_OpMod16(v, config)
   459  	case OpMod16u:
   460  		return rewriteValueAMD64_OpMod16u(v, config)
   461  	case OpMod32:
   462  		return rewriteValueAMD64_OpMod32(v, config)
   463  	case OpMod32u:
   464  		return rewriteValueAMD64_OpMod32u(v, config)
   465  	case OpMod64:
   466  		return rewriteValueAMD64_OpMod64(v, config)
   467  	case OpMod64u:
   468  		return rewriteValueAMD64_OpMod64u(v, config)
   469  	case OpMod8:
   470  		return rewriteValueAMD64_OpMod8(v, config)
   471  	case OpMod8u:
   472  		return rewriteValueAMD64_OpMod8u(v, config)
   473  	case OpMove:
   474  		return rewriteValueAMD64_OpMove(v, config)
   475  	case OpMul16:
   476  		return rewriteValueAMD64_OpMul16(v, config)
   477  	case OpMul32:
   478  		return rewriteValueAMD64_OpMul32(v, config)
   479  	case OpMul32F:
   480  		return rewriteValueAMD64_OpMul32F(v, config)
   481  	case OpMul64:
   482  		return rewriteValueAMD64_OpMul64(v, config)
   483  	case OpMul64F:
   484  		return rewriteValueAMD64_OpMul64F(v, config)
   485  	case OpMul8:
   486  		return rewriteValueAMD64_OpMul8(v, config)
   487  	case OpAMD64NEGL:
   488  		return rewriteValueAMD64_OpAMD64NEGL(v, config)
   489  	case OpAMD64NEGQ:
   490  		return rewriteValueAMD64_OpAMD64NEGQ(v, config)
   491  	case OpAMD64NOTL:
   492  		return rewriteValueAMD64_OpAMD64NOTL(v, config)
   493  	case OpAMD64NOTQ:
   494  		return rewriteValueAMD64_OpAMD64NOTQ(v, config)
   495  	case OpNeg16:
   496  		return rewriteValueAMD64_OpNeg16(v, config)
   497  	case OpNeg32:
   498  		return rewriteValueAMD64_OpNeg32(v, config)
   499  	case OpNeg32F:
   500  		return rewriteValueAMD64_OpNeg32F(v, config)
   501  	case OpNeg64:
   502  		return rewriteValueAMD64_OpNeg64(v, config)
   503  	case OpNeg64F:
   504  		return rewriteValueAMD64_OpNeg64F(v, config)
   505  	case OpNeg8:
   506  		return rewriteValueAMD64_OpNeg8(v, config)
   507  	case OpNeq16:
   508  		return rewriteValueAMD64_OpNeq16(v, config)
   509  	case OpNeq32:
   510  		return rewriteValueAMD64_OpNeq32(v, config)
   511  	case OpNeq32F:
   512  		return rewriteValueAMD64_OpNeq32F(v, config)
   513  	case OpNeq64:
   514  		return rewriteValueAMD64_OpNeq64(v, config)
   515  	case OpNeq64F:
   516  		return rewriteValueAMD64_OpNeq64F(v, config)
   517  	case OpNeq8:
   518  		return rewriteValueAMD64_OpNeq8(v, config)
   519  	case OpNeqB:
   520  		return rewriteValueAMD64_OpNeqB(v, config)
   521  	case OpNeqPtr:
   522  		return rewriteValueAMD64_OpNeqPtr(v, config)
   523  	case OpNilCheck:
   524  		return rewriteValueAMD64_OpNilCheck(v, config)
   525  	case OpNot:
   526  		return rewriteValueAMD64_OpNot(v, config)
   527  	case OpAMD64ORL:
   528  		return rewriteValueAMD64_OpAMD64ORL(v, config)
   529  	case OpAMD64ORLconst:
   530  		return rewriteValueAMD64_OpAMD64ORLconst(v, config)
   531  	case OpAMD64ORQ:
   532  		return rewriteValueAMD64_OpAMD64ORQ(v, config)
   533  	case OpAMD64ORQconst:
   534  		return rewriteValueAMD64_OpAMD64ORQconst(v, config)
   535  	case OpOffPtr:
   536  		return rewriteValueAMD64_OpOffPtr(v, config)
   537  	case OpOr16:
   538  		return rewriteValueAMD64_OpOr16(v, config)
   539  	case OpOr32:
   540  		return rewriteValueAMD64_OpOr32(v, config)
   541  	case OpOr64:
   542  		return rewriteValueAMD64_OpOr64(v, config)
   543  	case OpOr8:
   544  		return rewriteValueAMD64_OpOr8(v, config)
   545  	case OpOrB:
   546  		return rewriteValueAMD64_OpOrB(v, config)
   547  	case OpRsh16Ux16:
   548  		return rewriteValueAMD64_OpRsh16Ux16(v, config)
   549  	case OpRsh16Ux32:
   550  		return rewriteValueAMD64_OpRsh16Ux32(v, config)
   551  	case OpRsh16Ux64:
   552  		return rewriteValueAMD64_OpRsh16Ux64(v, config)
   553  	case OpRsh16Ux8:
   554  		return rewriteValueAMD64_OpRsh16Ux8(v, config)
   555  	case OpRsh16x16:
   556  		return rewriteValueAMD64_OpRsh16x16(v, config)
   557  	case OpRsh16x32:
   558  		return rewriteValueAMD64_OpRsh16x32(v, config)
   559  	case OpRsh16x64:
   560  		return rewriteValueAMD64_OpRsh16x64(v, config)
   561  	case OpRsh16x8:
   562  		return rewriteValueAMD64_OpRsh16x8(v, config)
   563  	case OpRsh32Ux16:
   564  		return rewriteValueAMD64_OpRsh32Ux16(v, config)
   565  	case OpRsh32Ux32:
   566  		return rewriteValueAMD64_OpRsh32Ux32(v, config)
   567  	case OpRsh32Ux64:
   568  		return rewriteValueAMD64_OpRsh32Ux64(v, config)
   569  	case OpRsh32Ux8:
   570  		return rewriteValueAMD64_OpRsh32Ux8(v, config)
   571  	case OpRsh32x16:
   572  		return rewriteValueAMD64_OpRsh32x16(v, config)
   573  	case OpRsh32x32:
   574  		return rewriteValueAMD64_OpRsh32x32(v, config)
   575  	case OpRsh32x64:
   576  		return rewriteValueAMD64_OpRsh32x64(v, config)
   577  	case OpRsh32x8:
   578  		return rewriteValueAMD64_OpRsh32x8(v, config)
   579  	case OpRsh64Ux16:
   580  		return rewriteValueAMD64_OpRsh64Ux16(v, config)
   581  	case OpRsh64Ux32:
   582  		return rewriteValueAMD64_OpRsh64Ux32(v, config)
   583  	case OpRsh64Ux64:
   584  		return rewriteValueAMD64_OpRsh64Ux64(v, config)
   585  	case OpRsh64Ux8:
   586  		return rewriteValueAMD64_OpRsh64Ux8(v, config)
   587  	case OpRsh64x16:
   588  		return rewriteValueAMD64_OpRsh64x16(v, config)
   589  	case OpRsh64x32:
   590  		return rewriteValueAMD64_OpRsh64x32(v, config)
   591  	case OpRsh64x64:
   592  		return rewriteValueAMD64_OpRsh64x64(v, config)
   593  	case OpRsh64x8:
   594  		return rewriteValueAMD64_OpRsh64x8(v, config)
   595  	case OpRsh8Ux16:
   596  		return rewriteValueAMD64_OpRsh8Ux16(v, config)
   597  	case OpRsh8Ux32:
   598  		return rewriteValueAMD64_OpRsh8Ux32(v, config)
   599  	case OpRsh8Ux64:
   600  		return rewriteValueAMD64_OpRsh8Ux64(v, config)
   601  	case OpRsh8Ux8:
   602  		return rewriteValueAMD64_OpRsh8Ux8(v, config)
   603  	case OpRsh8x16:
   604  		return rewriteValueAMD64_OpRsh8x16(v, config)
   605  	case OpRsh8x32:
   606  		return rewriteValueAMD64_OpRsh8x32(v, config)
   607  	case OpRsh8x64:
   608  		return rewriteValueAMD64_OpRsh8x64(v, config)
   609  	case OpRsh8x8:
   610  		return rewriteValueAMD64_OpRsh8x8(v, config)
   611  	case OpAMD64SARB:
   612  		return rewriteValueAMD64_OpAMD64SARB(v, config)
   613  	case OpAMD64SARBconst:
   614  		return rewriteValueAMD64_OpAMD64SARBconst(v, config)
   615  	case OpAMD64SARL:
   616  		return rewriteValueAMD64_OpAMD64SARL(v, config)
   617  	case OpAMD64SARLconst:
   618  		return rewriteValueAMD64_OpAMD64SARLconst(v, config)
   619  	case OpAMD64SARQ:
   620  		return rewriteValueAMD64_OpAMD64SARQ(v, config)
   621  	case OpAMD64SARQconst:
   622  		return rewriteValueAMD64_OpAMD64SARQconst(v, config)
   623  	case OpAMD64SARW:
   624  		return rewriteValueAMD64_OpAMD64SARW(v, config)
   625  	case OpAMD64SARWconst:
   626  		return rewriteValueAMD64_OpAMD64SARWconst(v, config)
   627  	case OpAMD64SBBLcarrymask:
   628  		return rewriteValueAMD64_OpAMD64SBBLcarrymask(v, config)
   629  	case OpAMD64SBBQcarrymask:
   630  		return rewriteValueAMD64_OpAMD64SBBQcarrymask(v, config)
   631  	case OpAMD64SETA:
   632  		return rewriteValueAMD64_OpAMD64SETA(v, config)
   633  	case OpAMD64SETAE:
   634  		return rewriteValueAMD64_OpAMD64SETAE(v, config)
   635  	case OpAMD64SETB:
   636  		return rewriteValueAMD64_OpAMD64SETB(v, config)
   637  	case OpAMD64SETBE:
   638  		return rewriteValueAMD64_OpAMD64SETBE(v, config)
   639  	case OpAMD64SETEQ:
   640  		return rewriteValueAMD64_OpAMD64SETEQ(v, config)
   641  	case OpAMD64SETG:
   642  		return rewriteValueAMD64_OpAMD64SETG(v, config)
   643  	case OpAMD64SETGE:
   644  		return rewriteValueAMD64_OpAMD64SETGE(v, config)
   645  	case OpAMD64SETL:
   646  		return rewriteValueAMD64_OpAMD64SETL(v, config)
   647  	case OpAMD64SETLE:
   648  		return rewriteValueAMD64_OpAMD64SETLE(v, config)
   649  	case OpAMD64SETNE:
   650  		return rewriteValueAMD64_OpAMD64SETNE(v, config)
   651  	case OpAMD64SHLL:
   652  		return rewriteValueAMD64_OpAMD64SHLL(v, config)
   653  	case OpAMD64SHLQ:
   654  		return rewriteValueAMD64_OpAMD64SHLQ(v, config)
   655  	case OpAMD64SHRB:
   656  		return rewriteValueAMD64_OpAMD64SHRB(v, config)
   657  	case OpAMD64SHRL:
   658  		return rewriteValueAMD64_OpAMD64SHRL(v, config)
   659  	case OpAMD64SHRQ:
   660  		return rewriteValueAMD64_OpAMD64SHRQ(v, config)
   661  	case OpAMD64SHRW:
   662  		return rewriteValueAMD64_OpAMD64SHRW(v, config)
   663  	case OpAMD64SUBL:
   664  		return rewriteValueAMD64_OpAMD64SUBL(v, config)
   665  	case OpAMD64SUBLconst:
   666  		return rewriteValueAMD64_OpAMD64SUBLconst(v, config)
   667  	case OpAMD64SUBQ:
   668  		return rewriteValueAMD64_OpAMD64SUBQ(v, config)
   669  	case OpAMD64SUBQconst:
   670  		return rewriteValueAMD64_OpAMD64SUBQconst(v, config)
   671  	case OpSignExt16to32:
   672  		return rewriteValueAMD64_OpSignExt16to32(v, config)
   673  	case OpSignExt16to64:
   674  		return rewriteValueAMD64_OpSignExt16to64(v, config)
   675  	case OpSignExt32to64:
   676  		return rewriteValueAMD64_OpSignExt32to64(v, config)
   677  	case OpSignExt8to16:
   678  		return rewriteValueAMD64_OpSignExt8to16(v, config)
   679  	case OpSignExt8to32:
   680  		return rewriteValueAMD64_OpSignExt8to32(v, config)
   681  	case OpSignExt8to64:
   682  		return rewriteValueAMD64_OpSignExt8to64(v, config)
   683  	case OpSqrt:
   684  		return rewriteValueAMD64_OpSqrt(v, config)
   685  	case OpStaticCall:
   686  		return rewriteValueAMD64_OpStaticCall(v, config)
   687  	case OpStore:
   688  		return rewriteValueAMD64_OpStore(v, config)
   689  	case OpSub16:
   690  		return rewriteValueAMD64_OpSub16(v, config)
   691  	case OpSub32:
   692  		return rewriteValueAMD64_OpSub32(v, config)
   693  	case OpSub32F:
   694  		return rewriteValueAMD64_OpSub32F(v, config)
   695  	case OpSub64:
   696  		return rewriteValueAMD64_OpSub64(v, config)
   697  	case OpSub64F:
   698  		return rewriteValueAMD64_OpSub64F(v, config)
   699  	case OpSub8:
   700  		return rewriteValueAMD64_OpSub8(v, config)
   701  	case OpSubPtr:
   702  		return rewriteValueAMD64_OpSubPtr(v, config)
   703  	case OpTrunc16to8:
   704  		return rewriteValueAMD64_OpTrunc16to8(v, config)
   705  	case OpTrunc32to16:
   706  		return rewriteValueAMD64_OpTrunc32to16(v, config)
   707  	case OpTrunc32to8:
   708  		return rewriteValueAMD64_OpTrunc32to8(v, config)
   709  	case OpTrunc64to16:
   710  		return rewriteValueAMD64_OpTrunc64to16(v, config)
   711  	case OpTrunc64to32:
   712  		return rewriteValueAMD64_OpTrunc64to32(v, config)
   713  	case OpTrunc64to8:
   714  		return rewriteValueAMD64_OpTrunc64to8(v, config)
   715  	case OpAMD64XORL:
   716  		return rewriteValueAMD64_OpAMD64XORL(v, config)
   717  	case OpAMD64XORLconst:
   718  		return rewriteValueAMD64_OpAMD64XORLconst(v, config)
   719  	case OpAMD64XORQ:
   720  		return rewriteValueAMD64_OpAMD64XORQ(v, config)
   721  	case OpAMD64XORQconst:
   722  		return rewriteValueAMD64_OpAMD64XORQconst(v, config)
   723  	case OpXor16:
   724  		return rewriteValueAMD64_OpXor16(v, config)
   725  	case OpXor32:
   726  		return rewriteValueAMD64_OpXor32(v, config)
   727  	case OpXor64:
   728  		return rewriteValueAMD64_OpXor64(v, config)
   729  	case OpXor8:
   730  		return rewriteValueAMD64_OpXor8(v, config)
   731  	case OpZero:
   732  		return rewriteValueAMD64_OpZero(v, config)
   733  	case OpZeroExt16to32:
   734  		return rewriteValueAMD64_OpZeroExt16to32(v, config)
   735  	case OpZeroExt16to64:
   736  		return rewriteValueAMD64_OpZeroExt16to64(v, config)
   737  	case OpZeroExt32to64:
   738  		return rewriteValueAMD64_OpZeroExt32to64(v, config)
   739  	case OpZeroExt8to16:
   740  		return rewriteValueAMD64_OpZeroExt8to16(v, config)
   741  	case OpZeroExt8to32:
   742  		return rewriteValueAMD64_OpZeroExt8to32(v, config)
   743  	case OpZeroExt8to64:
   744  		return rewriteValueAMD64_OpZeroExt8to64(v, config)
   745  	}
   746  	return false
   747  }
   748  func rewriteValueAMD64_OpAMD64ADDL(v *Value, config *Config) bool {
   749  	b := v.Block
   750  	_ = b
   751  	// match: (ADDL x (MOVLconst [c]))
   752  	// cond:
   753  	// result: (ADDLconst [c] x)
   754  	for {
   755  		x := v.Args[0]
   756  		v_1 := v.Args[1]
   757  		if v_1.Op != OpAMD64MOVLconst {
   758  			break
   759  		}
   760  		c := v_1.AuxInt
   761  		v.reset(OpAMD64ADDLconst)
   762  		v.AuxInt = c
   763  		v.AddArg(x)
   764  		return true
   765  	}
   766  	// match: (ADDL (MOVLconst [c]) x)
   767  	// cond:
   768  	// result: (ADDLconst [c] x)
   769  	for {
   770  		v_0 := v.Args[0]
   771  		if v_0.Op != OpAMD64MOVLconst {
   772  			break
   773  		}
   774  		c := v_0.AuxInt
   775  		x := v.Args[1]
   776  		v.reset(OpAMD64ADDLconst)
   777  		v.AuxInt = c
   778  		v.AddArg(x)
   779  		return true
   780  	}
   781  	// match: (ADDL x (NEGL y))
   782  	// cond:
   783  	// result: (SUBL x y)
   784  	for {
   785  		x := v.Args[0]
   786  		v_1 := v.Args[1]
   787  		if v_1.Op != OpAMD64NEGL {
   788  			break
   789  		}
   790  		y := v_1.Args[0]
   791  		v.reset(OpAMD64SUBL)
   792  		v.AddArg(x)
   793  		v.AddArg(y)
   794  		return true
   795  	}
   796  	return false
   797  }
   798  func rewriteValueAMD64_OpAMD64ADDLconst(v *Value, config *Config) bool {
   799  	b := v.Block
   800  	_ = b
   801  	// match: (ADDLconst [c] x)
   802  	// cond: int32(c)==0
   803  	// result: x
   804  	for {
   805  		c := v.AuxInt
   806  		x := v.Args[0]
   807  		if !(int32(c) == 0) {
   808  			break
   809  		}
   810  		v.reset(OpCopy)
   811  		v.Type = x.Type
   812  		v.AddArg(x)
   813  		return true
   814  	}
   815  	// match: (ADDLconst [c] (MOVLconst [d]))
   816  	// cond:
   817  	// result: (MOVLconst [int64(int32(c+d))])
   818  	for {
   819  		c := v.AuxInt
   820  		v_0 := v.Args[0]
   821  		if v_0.Op != OpAMD64MOVLconst {
   822  			break
   823  		}
   824  		d := v_0.AuxInt
   825  		v.reset(OpAMD64MOVLconst)
   826  		v.AuxInt = int64(int32(c + d))
   827  		return true
   828  	}
   829  	// match: (ADDLconst [c] (ADDLconst [d] x))
   830  	// cond:
   831  	// result: (ADDLconst [int64(int32(c+d))] x)
   832  	for {
   833  		c := v.AuxInt
   834  		v_0 := v.Args[0]
   835  		if v_0.Op != OpAMD64ADDLconst {
   836  			break
   837  		}
   838  		d := v_0.AuxInt
   839  		x := v_0.Args[0]
   840  		v.reset(OpAMD64ADDLconst)
   841  		v.AuxInt = int64(int32(c + d))
   842  		v.AddArg(x)
   843  		return true
   844  	}
   845  	return false
   846  }
   847  func rewriteValueAMD64_OpAMD64ADDQ(v *Value, config *Config) bool {
   848  	b := v.Block
   849  	_ = b
   850  	// match: (ADDQ x (MOVQconst [c]))
   851  	// cond: is32Bit(c)
   852  	// result: (ADDQconst [c] x)
   853  	for {
   854  		x := v.Args[0]
   855  		v_1 := v.Args[1]
   856  		if v_1.Op != OpAMD64MOVQconst {
   857  			break
   858  		}
   859  		c := v_1.AuxInt
   860  		if !(is32Bit(c)) {
   861  			break
   862  		}
   863  		v.reset(OpAMD64ADDQconst)
   864  		v.AuxInt = c
   865  		v.AddArg(x)
   866  		return true
   867  	}
   868  	// match: (ADDQ (MOVQconst [c]) x)
   869  	// cond: is32Bit(c)
   870  	// result: (ADDQconst [c] x)
   871  	for {
   872  		v_0 := v.Args[0]
   873  		if v_0.Op != OpAMD64MOVQconst {
   874  			break
   875  		}
   876  		c := v_0.AuxInt
   877  		x := v.Args[1]
   878  		if !(is32Bit(c)) {
   879  			break
   880  		}
   881  		v.reset(OpAMD64ADDQconst)
   882  		v.AuxInt = c
   883  		v.AddArg(x)
   884  		return true
   885  	}
   886  	// match: (ADDQ x (SHLQconst [3] y))
   887  	// cond:
   888  	// result: (LEAQ8 x y)
   889  	for {
   890  		x := v.Args[0]
   891  		v_1 := v.Args[1]
   892  		if v_1.Op != OpAMD64SHLQconst {
   893  			break
   894  		}
   895  		if v_1.AuxInt != 3 {
   896  			break
   897  		}
   898  		y := v_1.Args[0]
   899  		v.reset(OpAMD64LEAQ8)
   900  		v.AddArg(x)
   901  		v.AddArg(y)
   902  		return true
   903  	}
   904  	// match: (ADDQ x (SHLQconst [2] y))
   905  	// cond:
   906  	// result: (LEAQ4 x y)
   907  	for {
   908  		x := v.Args[0]
   909  		v_1 := v.Args[1]
   910  		if v_1.Op != OpAMD64SHLQconst {
   911  			break
   912  		}
   913  		if v_1.AuxInt != 2 {
   914  			break
   915  		}
   916  		y := v_1.Args[0]
   917  		v.reset(OpAMD64LEAQ4)
   918  		v.AddArg(x)
   919  		v.AddArg(y)
   920  		return true
   921  	}
   922  	// match: (ADDQ x (SHLQconst [1] y))
   923  	// cond:
   924  	// result: (LEAQ2 x y)
   925  	for {
   926  		x := v.Args[0]
   927  		v_1 := v.Args[1]
   928  		if v_1.Op != OpAMD64SHLQconst {
   929  			break
   930  		}
   931  		if v_1.AuxInt != 1 {
   932  			break
   933  		}
   934  		y := v_1.Args[0]
   935  		v.reset(OpAMD64LEAQ2)
   936  		v.AddArg(x)
   937  		v.AddArg(y)
   938  		return true
   939  	}
   940  	// match: (ADDQ x (ADDQ y y))
   941  	// cond:
   942  	// result: (LEAQ2 x y)
   943  	for {
   944  		x := v.Args[0]
   945  		v_1 := v.Args[1]
   946  		if v_1.Op != OpAMD64ADDQ {
   947  			break
   948  		}
   949  		y := v_1.Args[0]
   950  		if y != v_1.Args[1] {
   951  			break
   952  		}
   953  		v.reset(OpAMD64LEAQ2)
   954  		v.AddArg(x)
   955  		v.AddArg(y)
   956  		return true
   957  	}
   958  	// match: (ADDQ x (ADDQ x y))
   959  	// cond:
   960  	// result: (LEAQ2 y x)
   961  	for {
   962  		x := v.Args[0]
   963  		v_1 := v.Args[1]
   964  		if v_1.Op != OpAMD64ADDQ {
   965  			break
   966  		}
   967  		if x != v_1.Args[0] {
   968  			break
   969  		}
   970  		y := v_1.Args[1]
   971  		v.reset(OpAMD64LEAQ2)
   972  		v.AddArg(y)
   973  		v.AddArg(x)
   974  		return true
   975  	}
   976  	// match: (ADDQ x (ADDQ y x))
   977  	// cond:
   978  	// result: (LEAQ2 y x)
   979  	for {
   980  		x := v.Args[0]
   981  		v_1 := v.Args[1]
   982  		if v_1.Op != OpAMD64ADDQ {
   983  			break
   984  		}
   985  		y := v_1.Args[0]
   986  		if x != v_1.Args[1] {
   987  			break
   988  		}
   989  		v.reset(OpAMD64LEAQ2)
   990  		v.AddArg(y)
   991  		v.AddArg(x)
   992  		return true
   993  	}
   994  	// match: (ADDQ (ADDQconst [c] x) y)
   995  	// cond:
   996  	// result: (LEAQ1 [c] x y)
   997  	for {
   998  		v_0 := v.Args[0]
   999  		if v_0.Op != OpAMD64ADDQconst {
  1000  			break
  1001  		}
  1002  		c := v_0.AuxInt
  1003  		x := v_0.Args[0]
  1004  		y := v.Args[1]
  1005  		v.reset(OpAMD64LEAQ1)
  1006  		v.AuxInt = c
  1007  		v.AddArg(x)
  1008  		v.AddArg(y)
  1009  		return true
  1010  	}
  1011  	// match: (ADDQ x (ADDQconst [c] y))
  1012  	// cond:
  1013  	// result: (LEAQ1 [c] x y)
  1014  	for {
  1015  		x := v.Args[0]
  1016  		v_1 := v.Args[1]
  1017  		if v_1.Op != OpAMD64ADDQconst {
  1018  			break
  1019  		}
  1020  		c := v_1.AuxInt
  1021  		y := v_1.Args[0]
  1022  		v.reset(OpAMD64LEAQ1)
  1023  		v.AuxInt = c
  1024  		v.AddArg(x)
  1025  		v.AddArg(y)
  1026  		return true
  1027  	}
  1028  	// match: (ADDQ x (LEAQ [c] {s} y))
  1029  	// cond: x.Op != OpSB && y.Op != OpSB
  1030  	// result: (LEAQ1 [c] {s} x y)
  1031  	for {
  1032  		x := v.Args[0]
  1033  		v_1 := v.Args[1]
  1034  		if v_1.Op != OpAMD64LEAQ {
  1035  			break
  1036  		}
  1037  		c := v_1.AuxInt
  1038  		s := v_1.Aux
  1039  		y := v_1.Args[0]
  1040  		if !(x.Op != OpSB && y.Op != OpSB) {
  1041  			break
  1042  		}
  1043  		v.reset(OpAMD64LEAQ1)
  1044  		v.AuxInt = c
  1045  		v.Aux = s
  1046  		v.AddArg(x)
  1047  		v.AddArg(y)
  1048  		return true
  1049  	}
  1050  	// match: (ADDQ (LEAQ [c] {s} x) y)
  1051  	// cond: x.Op != OpSB && y.Op != OpSB
  1052  	// result: (LEAQ1 [c] {s} x y)
  1053  	for {
  1054  		v_0 := v.Args[0]
  1055  		if v_0.Op != OpAMD64LEAQ {
  1056  			break
  1057  		}
  1058  		c := v_0.AuxInt
  1059  		s := v_0.Aux
  1060  		x := v_0.Args[0]
  1061  		y := v.Args[1]
  1062  		if !(x.Op != OpSB && y.Op != OpSB) {
  1063  			break
  1064  		}
  1065  		v.reset(OpAMD64LEAQ1)
  1066  		v.AuxInt = c
  1067  		v.Aux = s
  1068  		v.AddArg(x)
  1069  		v.AddArg(y)
  1070  		return true
  1071  	}
  1072  	// match: (ADDQ x (NEGQ y))
  1073  	// cond:
  1074  	// result: (SUBQ x y)
  1075  	for {
  1076  		x := v.Args[0]
  1077  		v_1 := v.Args[1]
  1078  		if v_1.Op != OpAMD64NEGQ {
  1079  			break
  1080  		}
  1081  		y := v_1.Args[0]
  1082  		v.reset(OpAMD64SUBQ)
  1083  		v.AddArg(x)
  1084  		v.AddArg(y)
  1085  		return true
  1086  	}
  1087  	return false
  1088  }
  1089  func rewriteValueAMD64_OpAMD64ADDQconst(v *Value, config *Config) bool {
  1090  	b := v.Block
  1091  	_ = b
  1092  	// match: (ADDQconst [c] (ADDQ x y))
  1093  	// cond:
  1094  	// result: (LEAQ1 [c] x y)
  1095  	for {
  1096  		c := v.AuxInt
  1097  		v_0 := v.Args[0]
  1098  		if v_0.Op != OpAMD64ADDQ {
  1099  			break
  1100  		}
  1101  		x := v_0.Args[0]
  1102  		y := v_0.Args[1]
  1103  		v.reset(OpAMD64LEAQ1)
  1104  		v.AuxInt = c
  1105  		v.AddArg(x)
  1106  		v.AddArg(y)
  1107  		return true
  1108  	}
  1109  	// match: (ADDQconst [c] (LEAQ [d] {s} x))
  1110  	// cond: is32Bit(c+d)
  1111  	// result: (LEAQ [c+d] {s} x)
  1112  	for {
  1113  		c := v.AuxInt
  1114  		v_0 := v.Args[0]
  1115  		if v_0.Op != OpAMD64LEAQ {
  1116  			break
  1117  		}
  1118  		d := v_0.AuxInt
  1119  		s := v_0.Aux
  1120  		x := v_0.Args[0]
  1121  		if !(is32Bit(c + d)) {
  1122  			break
  1123  		}
  1124  		v.reset(OpAMD64LEAQ)
  1125  		v.AuxInt = c + d
  1126  		v.Aux = s
  1127  		v.AddArg(x)
  1128  		return true
  1129  	}
  1130  	// match: (ADDQconst [c] (LEAQ1 [d] {s} x y))
  1131  	// cond: is32Bit(c+d)
  1132  	// result: (LEAQ1 [c+d] {s} x y)
  1133  	for {
  1134  		c := v.AuxInt
  1135  		v_0 := v.Args[0]
  1136  		if v_0.Op != OpAMD64LEAQ1 {
  1137  			break
  1138  		}
  1139  		d := v_0.AuxInt
  1140  		s := v_0.Aux
  1141  		x := v_0.Args[0]
  1142  		y := v_0.Args[1]
  1143  		if !(is32Bit(c + d)) {
  1144  			break
  1145  		}
  1146  		v.reset(OpAMD64LEAQ1)
  1147  		v.AuxInt = c + d
  1148  		v.Aux = s
  1149  		v.AddArg(x)
  1150  		v.AddArg(y)
  1151  		return true
  1152  	}
  1153  	// match: (ADDQconst [c] (LEAQ2 [d] {s} x y))
  1154  	// cond: is32Bit(c+d)
  1155  	// result: (LEAQ2 [c+d] {s} x y)
  1156  	for {
  1157  		c := v.AuxInt
  1158  		v_0 := v.Args[0]
  1159  		if v_0.Op != OpAMD64LEAQ2 {
  1160  			break
  1161  		}
  1162  		d := v_0.AuxInt
  1163  		s := v_0.Aux
  1164  		x := v_0.Args[0]
  1165  		y := v_0.Args[1]
  1166  		if !(is32Bit(c + d)) {
  1167  			break
  1168  		}
  1169  		v.reset(OpAMD64LEAQ2)
  1170  		v.AuxInt = c + d
  1171  		v.Aux = s
  1172  		v.AddArg(x)
  1173  		v.AddArg(y)
  1174  		return true
  1175  	}
  1176  	// match: (ADDQconst [c] (LEAQ4 [d] {s} x y))
  1177  	// cond: is32Bit(c+d)
  1178  	// result: (LEAQ4 [c+d] {s} x y)
  1179  	for {
  1180  		c := v.AuxInt
  1181  		v_0 := v.Args[0]
  1182  		if v_0.Op != OpAMD64LEAQ4 {
  1183  			break
  1184  		}
  1185  		d := v_0.AuxInt
  1186  		s := v_0.Aux
  1187  		x := v_0.Args[0]
  1188  		y := v_0.Args[1]
  1189  		if !(is32Bit(c + d)) {
  1190  			break
  1191  		}
  1192  		v.reset(OpAMD64LEAQ4)
  1193  		v.AuxInt = c + d
  1194  		v.Aux = s
  1195  		v.AddArg(x)
  1196  		v.AddArg(y)
  1197  		return true
  1198  	}
  1199  	// match: (ADDQconst [c] (LEAQ8 [d] {s} x y))
  1200  	// cond: is32Bit(c+d)
  1201  	// result: (LEAQ8 [c+d] {s} x y)
  1202  	for {
  1203  		c := v.AuxInt
  1204  		v_0 := v.Args[0]
  1205  		if v_0.Op != OpAMD64LEAQ8 {
  1206  			break
  1207  		}
  1208  		d := v_0.AuxInt
  1209  		s := v_0.Aux
  1210  		x := v_0.Args[0]
  1211  		y := v_0.Args[1]
  1212  		if !(is32Bit(c + d)) {
  1213  			break
  1214  		}
  1215  		v.reset(OpAMD64LEAQ8)
  1216  		v.AuxInt = c + d
  1217  		v.Aux = s
  1218  		v.AddArg(x)
  1219  		v.AddArg(y)
  1220  		return true
  1221  	}
  1222  	// match: (ADDQconst [0] x)
  1223  	// cond:
  1224  	// result: x
  1225  	for {
  1226  		if v.AuxInt != 0 {
  1227  			break
  1228  		}
  1229  		x := v.Args[0]
  1230  		v.reset(OpCopy)
  1231  		v.Type = x.Type
  1232  		v.AddArg(x)
  1233  		return true
  1234  	}
  1235  	// match: (ADDQconst [c] (MOVQconst [d]))
  1236  	// cond:
  1237  	// result: (MOVQconst [c+d])
  1238  	for {
  1239  		c := v.AuxInt
  1240  		v_0 := v.Args[0]
  1241  		if v_0.Op != OpAMD64MOVQconst {
  1242  			break
  1243  		}
  1244  		d := v_0.AuxInt
  1245  		v.reset(OpAMD64MOVQconst)
  1246  		v.AuxInt = c + d
  1247  		return true
  1248  	}
  1249  	// match: (ADDQconst [c] (ADDQconst [d] x))
  1250  	// cond: is32Bit(c+d)
  1251  	// result: (ADDQconst [c+d] x)
  1252  	for {
  1253  		c := v.AuxInt
  1254  		v_0 := v.Args[0]
  1255  		if v_0.Op != OpAMD64ADDQconst {
  1256  			break
  1257  		}
  1258  		d := v_0.AuxInt
  1259  		x := v_0.Args[0]
  1260  		if !(is32Bit(c + d)) {
  1261  			break
  1262  		}
  1263  		v.reset(OpAMD64ADDQconst)
  1264  		v.AuxInt = c + d
  1265  		v.AddArg(x)
  1266  		return true
  1267  	}
  1268  	return false
  1269  }
  1270  func rewriteValueAMD64_OpAMD64ANDL(v *Value, config *Config) bool {
  1271  	b := v.Block
  1272  	_ = b
  1273  	// match: (ANDL x (MOVLconst [c]))
  1274  	// cond:
  1275  	// result: (ANDLconst [c] x)
  1276  	for {
  1277  		x := v.Args[0]
  1278  		v_1 := v.Args[1]
  1279  		if v_1.Op != OpAMD64MOVLconst {
  1280  			break
  1281  		}
  1282  		c := v_1.AuxInt
  1283  		v.reset(OpAMD64ANDLconst)
  1284  		v.AuxInt = c
  1285  		v.AddArg(x)
  1286  		return true
  1287  	}
  1288  	// match: (ANDL (MOVLconst [c]) x)
  1289  	// cond:
  1290  	// result: (ANDLconst [c] x)
  1291  	for {
  1292  		v_0 := v.Args[0]
  1293  		if v_0.Op != OpAMD64MOVLconst {
  1294  			break
  1295  		}
  1296  		c := v_0.AuxInt
  1297  		x := v.Args[1]
  1298  		v.reset(OpAMD64ANDLconst)
  1299  		v.AuxInt = c
  1300  		v.AddArg(x)
  1301  		return true
  1302  	}
  1303  	// match: (ANDL x x)
  1304  	// cond:
  1305  	// result: x
  1306  	for {
  1307  		x := v.Args[0]
  1308  		if x != v.Args[1] {
  1309  			break
  1310  		}
  1311  		v.reset(OpCopy)
  1312  		v.Type = x.Type
  1313  		v.AddArg(x)
  1314  		return true
  1315  	}
  1316  	return false
  1317  }
  1318  func rewriteValueAMD64_OpAMD64ANDLconst(v *Value, config *Config) bool {
  1319  	b := v.Block
  1320  	_ = b
  1321  	// match: (ANDLconst [c] (ANDLconst [d] x))
  1322  	// cond:
  1323  	// result: (ANDLconst [c & d] x)
  1324  	for {
  1325  		c := v.AuxInt
  1326  		v_0 := v.Args[0]
  1327  		if v_0.Op != OpAMD64ANDLconst {
  1328  			break
  1329  		}
  1330  		d := v_0.AuxInt
  1331  		x := v_0.Args[0]
  1332  		v.reset(OpAMD64ANDLconst)
  1333  		v.AuxInt = c & d
  1334  		v.AddArg(x)
  1335  		return true
  1336  	}
  1337  	// match: (ANDLconst [c] _)
  1338  	// cond: int32(c)==0
  1339  	// result: (MOVLconst [0])
  1340  	for {
  1341  		c := v.AuxInt
  1342  		if !(int32(c) == 0) {
  1343  			break
  1344  		}
  1345  		v.reset(OpAMD64MOVLconst)
  1346  		v.AuxInt = 0
  1347  		return true
  1348  	}
  1349  	// match: (ANDLconst [c] x)
  1350  	// cond: int32(c)==-1
  1351  	// result: x
  1352  	for {
  1353  		c := v.AuxInt
  1354  		x := v.Args[0]
  1355  		if !(int32(c) == -1) {
  1356  			break
  1357  		}
  1358  		v.reset(OpCopy)
  1359  		v.Type = x.Type
  1360  		v.AddArg(x)
  1361  		return true
  1362  	}
  1363  	// match: (ANDLconst [c] (MOVLconst [d]))
  1364  	// cond:
  1365  	// result: (MOVLconst [c&d])
  1366  	for {
  1367  		c := v.AuxInt
  1368  		v_0 := v.Args[0]
  1369  		if v_0.Op != OpAMD64MOVLconst {
  1370  			break
  1371  		}
  1372  		d := v_0.AuxInt
  1373  		v.reset(OpAMD64MOVLconst)
  1374  		v.AuxInt = c & d
  1375  		return true
  1376  	}
  1377  	return false
  1378  }
  1379  func rewriteValueAMD64_OpAMD64ANDQ(v *Value, config *Config) bool {
  1380  	b := v.Block
  1381  	_ = b
  1382  	// match: (ANDQ x (MOVQconst [c]))
  1383  	// cond: is32Bit(c)
  1384  	// result: (ANDQconst [c] x)
  1385  	for {
  1386  		x := v.Args[0]
  1387  		v_1 := v.Args[1]
  1388  		if v_1.Op != OpAMD64MOVQconst {
  1389  			break
  1390  		}
  1391  		c := v_1.AuxInt
  1392  		if !(is32Bit(c)) {
  1393  			break
  1394  		}
  1395  		v.reset(OpAMD64ANDQconst)
  1396  		v.AuxInt = c
  1397  		v.AddArg(x)
  1398  		return true
  1399  	}
  1400  	// match: (ANDQ (MOVQconst [c]) x)
  1401  	// cond: is32Bit(c)
  1402  	// result: (ANDQconst [c] x)
  1403  	for {
  1404  		v_0 := v.Args[0]
  1405  		if v_0.Op != OpAMD64MOVQconst {
  1406  			break
  1407  		}
  1408  		c := v_0.AuxInt
  1409  		x := v.Args[1]
  1410  		if !(is32Bit(c)) {
  1411  			break
  1412  		}
  1413  		v.reset(OpAMD64ANDQconst)
  1414  		v.AuxInt = c
  1415  		v.AddArg(x)
  1416  		return true
  1417  	}
  1418  	// match: (ANDQ x x)
  1419  	// cond:
  1420  	// result: x
  1421  	for {
  1422  		x := v.Args[0]
  1423  		if x != v.Args[1] {
  1424  			break
  1425  		}
  1426  		v.reset(OpCopy)
  1427  		v.Type = x.Type
  1428  		v.AddArg(x)
  1429  		return true
  1430  	}
  1431  	return false
  1432  }
  1433  func rewriteValueAMD64_OpAMD64ANDQconst(v *Value, config *Config) bool {
  1434  	b := v.Block
  1435  	_ = b
  1436  	// match: (ANDQconst [c] (ANDQconst [d] x))
  1437  	// cond:
  1438  	// result: (ANDQconst [c & d] x)
  1439  	for {
  1440  		c := v.AuxInt
  1441  		v_0 := v.Args[0]
  1442  		if v_0.Op != OpAMD64ANDQconst {
  1443  			break
  1444  		}
  1445  		d := v_0.AuxInt
  1446  		x := v_0.Args[0]
  1447  		v.reset(OpAMD64ANDQconst)
  1448  		v.AuxInt = c & d
  1449  		v.AddArg(x)
  1450  		return true
  1451  	}
  1452  	// match: (ANDQconst [0xFF] x)
  1453  	// cond:
  1454  	// result: (MOVBQZX x)
  1455  	for {
  1456  		if v.AuxInt != 0xFF {
  1457  			break
  1458  		}
  1459  		x := v.Args[0]
  1460  		v.reset(OpAMD64MOVBQZX)
  1461  		v.AddArg(x)
  1462  		return true
  1463  	}
  1464  	// match: (ANDQconst [0xFFFF] x)
  1465  	// cond:
  1466  	// result: (MOVWQZX x)
  1467  	for {
  1468  		if v.AuxInt != 0xFFFF {
  1469  			break
  1470  		}
  1471  		x := v.Args[0]
  1472  		v.reset(OpAMD64MOVWQZX)
  1473  		v.AddArg(x)
  1474  		return true
  1475  	}
  1476  	// match: (ANDQconst [0xFFFFFFFF] x)
  1477  	// cond:
  1478  	// result: (MOVLQZX x)
  1479  	for {
  1480  		if v.AuxInt != 0xFFFFFFFF {
  1481  			break
  1482  		}
  1483  		x := v.Args[0]
  1484  		v.reset(OpAMD64MOVLQZX)
  1485  		v.AddArg(x)
  1486  		return true
  1487  	}
  1488  	// match: (ANDQconst [0] _)
  1489  	// cond:
  1490  	// result: (MOVQconst [0])
  1491  	for {
  1492  		if v.AuxInt != 0 {
  1493  			break
  1494  		}
  1495  		v.reset(OpAMD64MOVQconst)
  1496  		v.AuxInt = 0
  1497  		return true
  1498  	}
  1499  	// match: (ANDQconst [-1] x)
  1500  	// cond:
  1501  	// result: x
  1502  	for {
  1503  		if v.AuxInt != -1 {
  1504  			break
  1505  		}
  1506  		x := v.Args[0]
  1507  		v.reset(OpCopy)
  1508  		v.Type = x.Type
  1509  		v.AddArg(x)
  1510  		return true
  1511  	}
  1512  	// match: (ANDQconst [c] (MOVQconst [d]))
  1513  	// cond:
  1514  	// result: (MOVQconst [c&d])
  1515  	for {
  1516  		c := v.AuxInt
  1517  		v_0 := v.Args[0]
  1518  		if v_0.Op != OpAMD64MOVQconst {
  1519  			break
  1520  		}
  1521  		d := v_0.AuxInt
  1522  		v.reset(OpAMD64MOVQconst)
  1523  		v.AuxInt = c & d
  1524  		return true
  1525  	}
  1526  	return false
  1527  }
  1528  func rewriteValueAMD64_OpAdd16(v *Value, config *Config) bool {
  1529  	b := v.Block
  1530  	_ = b
  1531  	// match: (Add16  x y)
  1532  	// cond:
  1533  	// result: (ADDL  x y)
  1534  	for {
  1535  		x := v.Args[0]
  1536  		y := v.Args[1]
  1537  		v.reset(OpAMD64ADDL)
  1538  		v.AddArg(x)
  1539  		v.AddArg(y)
  1540  		return true
  1541  	}
  1542  }
  1543  func rewriteValueAMD64_OpAdd32(v *Value, config *Config) bool {
  1544  	b := v.Block
  1545  	_ = b
  1546  	// match: (Add32  x y)
  1547  	// cond:
  1548  	// result: (ADDL  x y)
  1549  	for {
  1550  		x := v.Args[0]
  1551  		y := v.Args[1]
  1552  		v.reset(OpAMD64ADDL)
  1553  		v.AddArg(x)
  1554  		v.AddArg(y)
  1555  		return true
  1556  	}
  1557  }
  1558  func rewriteValueAMD64_OpAdd32F(v *Value, config *Config) bool {
  1559  	b := v.Block
  1560  	_ = b
  1561  	// match: (Add32F x y)
  1562  	// cond:
  1563  	// result: (ADDSS x y)
  1564  	for {
  1565  		x := v.Args[0]
  1566  		y := v.Args[1]
  1567  		v.reset(OpAMD64ADDSS)
  1568  		v.AddArg(x)
  1569  		v.AddArg(y)
  1570  		return true
  1571  	}
  1572  }
  1573  func rewriteValueAMD64_OpAdd64(v *Value, config *Config) bool {
  1574  	b := v.Block
  1575  	_ = b
  1576  	// match: (Add64  x y)
  1577  	// cond:
  1578  	// result: (ADDQ  x y)
  1579  	for {
  1580  		x := v.Args[0]
  1581  		y := v.Args[1]
  1582  		v.reset(OpAMD64ADDQ)
  1583  		v.AddArg(x)
  1584  		v.AddArg(y)
  1585  		return true
  1586  	}
  1587  }
  1588  func rewriteValueAMD64_OpAdd64F(v *Value, config *Config) bool {
  1589  	b := v.Block
  1590  	_ = b
  1591  	// match: (Add64F x y)
  1592  	// cond:
  1593  	// result: (ADDSD x y)
  1594  	for {
  1595  		x := v.Args[0]
  1596  		y := v.Args[1]
  1597  		v.reset(OpAMD64ADDSD)
  1598  		v.AddArg(x)
  1599  		v.AddArg(y)
  1600  		return true
  1601  	}
  1602  }
  1603  func rewriteValueAMD64_OpAdd8(v *Value, config *Config) bool {
  1604  	b := v.Block
  1605  	_ = b
  1606  	// match: (Add8   x y)
  1607  	// cond:
  1608  	// result: (ADDL  x y)
  1609  	for {
  1610  		x := v.Args[0]
  1611  		y := v.Args[1]
  1612  		v.reset(OpAMD64ADDL)
  1613  		v.AddArg(x)
  1614  		v.AddArg(y)
  1615  		return true
  1616  	}
  1617  }
  1618  func rewriteValueAMD64_OpAddPtr(v *Value, config *Config) bool {
  1619  	b := v.Block
  1620  	_ = b
  1621  	// match: (AddPtr x y)
  1622  	// cond:
  1623  	// result: (ADDQ  x y)
  1624  	for {
  1625  		x := v.Args[0]
  1626  		y := v.Args[1]
  1627  		v.reset(OpAMD64ADDQ)
  1628  		v.AddArg(x)
  1629  		v.AddArg(y)
  1630  		return true
  1631  	}
  1632  }
  1633  func rewriteValueAMD64_OpAddr(v *Value, config *Config) bool {
  1634  	b := v.Block
  1635  	_ = b
  1636  	// match: (Addr {sym} base)
  1637  	// cond:
  1638  	// result: (LEAQ {sym} base)
  1639  	for {
  1640  		sym := v.Aux
  1641  		base := v.Args[0]
  1642  		v.reset(OpAMD64LEAQ)
  1643  		v.Aux = sym
  1644  		v.AddArg(base)
  1645  		return true
  1646  	}
  1647  }
  1648  func rewriteValueAMD64_OpAnd16(v *Value, config *Config) bool {
  1649  	b := v.Block
  1650  	_ = b
  1651  	// match: (And16 x y)
  1652  	// cond:
  1653  	// result: (ANDL x y)
  1654  	for {
  1655  		x := v.Args[0]
  1656  		y := v.Args[1]
  1657  		v.reset(OpAMD64ANDL)
  1658  		v.AddArg(x)
  1659  		v.AddArg(y)
  1660  		return true
  1661  	}
  1662  }
  1663  func rewriteValueAMD64_OpAnd32(v *Value, config *Config) bool {
  1664  	b := v.Block
  1665  	_ = b
  1666  	// match: (And32 x y)
  1667  	// cond:
  1668  	// result: (ANDL x y)
  1669  	for {
  1670  		x := v.Args[0]
  1671  		y := v.Args[1]
  1672  		v.reset(OpAMD64ANDL)
  1673  		v.AddArg(x)
  1674  		v.AddArg(y)
  1675  		return true
  1676  	}
  1677  }
  1678  func rewriteValueAMD64_OpAnd64(v *Value, config *Config) bool {
  1679  	b := v.Block
  1680  	_ = b
  1681  	// match: (And64 x y)
  1682  	// cond:
  1683  	// result: (ANDQ x y)
  1684  	for {
  1685  		x := v.Args[0]
  1686  		y := v.Args[1]
  1687  		v.reset(OpAMD64ANDQ)
  1688  		v.AddArg(x)
  1689  		v.AddArg(y)
  1690  		return true
  1691  	}
  1692  }
  1693  func rewriteValueAMD64_OpAnd8(v *Value, config *Config) bool {
  1694  	b := v.Block
  1695  	_ = b
  1696  	// match: (And8  x y)
  1697  	// cond:
  1698  	// result: (ANDL x y)
  1699  	for {
  1700  		x := v.Args[0]
  1701  		y := v.Args[1]
  1702  		v.reset(OpAMD64ANDL)
  1703  		v.AddArg(x)
  1704  		v.AddArg(y)
  1705  		return true
  1706  	}
  1707  }
  1708  func rewriteValueAMD64_OpAndB(v *Value, config *Config) bool {
  1709  	b := v.Block
  1710  	_ = b
  1711  	// match: (AndB x y)
  1712  	// cond:
  1713  	// result: (ANDL x y)
  1714  	for {
  1715  		x := v.Args[0]
  1716  		y := v.Args[1]
  1717  		v.reset(OpAMD64ANDL)
  1718  		v.AddArg(x)
  1719  		v.AddArg(y)
  1720  		return true
  1721  	}
  1722  }
  1723  func rewriteValueAMD64_OpAvg64u(v *Value, config *Config) bool {
  1724  	b := v.Block
  1725  	_ = b
  1726  	// match: (Avg64u x y)
  1727  	// cond:
  1728  	// result: (AVGQU x y)
  1729  	for {
  1730  		x := v.Args[0]
  1731  		y := v.Args[1]
  1732  		v.reset(OpAMD64AVGQU)
  1733  		v.AddArg(x)
  1734  		v.AddArg(y)
  1735  		return true
  1736  	}
  1737  }
  1738  func rewriteValueAMD64_OpBswap32(v *Value, config *Config) bool {
  1739  	b := v.Block
  1740  	_ = b
  1741  	// match: (Bswap32 x)
  1742  	// cond:
  1743  	// result: (BSWAPL x)
  1744  	for {
  1745  		x := v.Args[0]
  1746  		v.reset(OpAMD64BSWAPL)
  1747  		v.AddArg(x)
  1748  		return true
  1749  	}
  1750  }
  1751  func rewriteValueAMD64_OpBswap64(v *Value, config *Config) bool {
  1752  	b := v.Block
  1753  	_ = b
  1754  	// match: (Bswap64 x)
  1755  	// cond:
  1756  	// result: (BSWAPQ x)
  1757  	for {
  1758  		x := v.Args[0]
  1759  		v.reset(OpAMD64BSWAPQ)
  1760  		v.AddArg(x)
  1761  		return true
  1762  	}
  1763  }
  1764  func rewriteValueAMD64_OpAMD64CMOVLEQconst(v *Value, config *Config) bool {
  1765  	b := v.Block
  1766  	_ = b
  1767  	// match: (CMOVLEQconst x (InvertFlags y) [c])
  1768  	// cond:
  1769  	// result: (CMOVLNEconst x y [c])
  1770  	for {
  1771  		x := v.Args[0]
  1772  		v_1 := v.Args[1]
  1773  		if v_1.Op != OpAMD64InvertFlags {
  1774  			break
  1775  		}
  1776  		y := v_1.Args[0]
  1777  		c := v.AuxInt
  1778  		v.reset(OpAMD64CMOVLNEconst)
  1779  		v.AddArg(x)
  1780  		v.AddArg(y)
  1781  		v.AuxInt = c
  1782  		return true
  1783  	}
  1784  	// match: (CMOVLEQconst _ (FlagEQ) [c])
  1785  	// cond:
  1786  	// result: (Const32 [c])
  1787  	for {
  1788  		v_1 := v.Args[1]
  1789  		if v_1.Op != OpAMD64FlagEQ {
  1790  			break
  1791  		}
  1792  		c := v.AuxInt
  1793  		v.reset(OpConst32)
  1794  		v.AuxInt = c
  1795  		return true
  1796  	}
  1797  	// match: (CMOVLEQconst x (FlagLT_ULT))
  1798  	// cond:
  1799  	// result: x
  1800  	for {
  1801  		x := v.Args[0]
  1802  		v_1 := v.Args[1]
  1803  		if v_1.Op != OpAMD64FlagLT_ULT {
  1804  			break
  1805  		}
  1806  		v.reset(OpCopy)
  1807  		v.Type = x.Type
  1808  		v.AddArg(x)
  1809  		return true
  1810  	}
  1811  	// match: (CMOVLEQconst x (FlagLT_UGT))
  1812  	// cond:
  1813  	// result: x
  1814  	for {
  1815  		x := v.Args[0]
  1816  		v_1 := v.Args[1]
  1817  		if v_1.Op != OpAMD64FlagLT_UGT {
  1818  			break
  1819  		}
  1820  		v.reset(OpCopy)
  1821  		v.Type = x.Type
  1822  		v.AddArg(x)
  1823  		return true
  1824  	}
  1825  	// match: (CMOVLEQconst x (FlagGT_ULT))
  1826  	// cond:
  1827  	// result: x
  1828  	for {
  1829  		x := v.Args[0]
  1830  		v_1 := v.Args[1]
  1831  		if v_1.Op != OpAMD64FlagGT_ULT {
  1832  			break
  1833  		}
  1834  		v.reset(OpCopy)
  1835  		v.Type = x.Type
  1836  		v.AddArg(x)
  1837  		return true
  1838  	}
  1839  	// match: (CMOVLEQconst x (FlagGT_UGT))
  1840  	// cond:
  1841  	// result: x
  1842  	for {
  1843  		x := v.Args[0]
  1844  		v_1 := v.Args[1]
  1845  		if v_1.Op != OpAMD64FlagGT_UGT {
  1846  			break
  1847  		}
  1848  		v.reset(OpCopy)
  1849  		v.Type = x.Type
  1850  		v.AddArg(x)
  1851  		return true
  1852  	}
  1853  	return false
  1854  }
  1855  func rewriteValueAMD64_OpAMD64CMOVQEQconst(v *Value, config *Config) bool {
  1856  	b := v.Block
  1857  	_ = b
  1858  	// match: (CMOVQEQconst x (InvertFlags y) [c])
  1859  	// cond:
  1860  	// result: (CMOVQNEconst x y [c])
  1861  	for {
  1862  		x := v.Args[0]
  1863  		v_1 := v.Args[1]
  1864  		if v_1.Op != OpAMD64InvertFlags {
  1865  			break
  1866  		}
  1867  		y := v_1.Args[0]
  1868  		c := v.AuxInt
  1869  		v.reset(OpAMD64CMOVQNEconst)
  1870  		v.AddArg(x)
  1871  		v.AddArg(y)
  1872  		v.AuxInt = c
  1873  		return true
  1874  	}
  1875  	// match: (CMOVQEQconst _ (FlagEQ) [c])
  1876  	// cond:
  1877  	// result: (Const64 [c])
  1878  	for {
  1879  		v_1 := v.Args[1]
  1880  		if v_1.Op != OpAMD64FlagEQ {
  1881  			break
  1882  		}
  1883  		c := v.AuxInt
  1884  		v.reset(OpConst64)
  1885  		v.AuxInt = c
  1886  		return true
  1887  	}
  1888  	// match: (CMOVQEQconst x (FlagLT_ULT))
  1889  	// cond:
  1890  	// result: x
  1891  	for {
  1892  		x := v.Args[0]
  1893  		v_1 := v.Args[1]
  1894  		if v_1.Op != OpAMD64FlagLT_ULT {
  1895  			break
  1896  		}
  1897  		v.reset(OpCopy)
  1898  		v.Type = x.Type
  1899  		v.AddArg(x)
  1900  		return true
  1901  	}
  1902  	// match: (CMOVQEQconst x (FlagLT_UGT))
  1903  	// cond:
  1904  	// result: x
  1905  	for {
  1906  		x := v.Args[0]
  1907  		v_1 := v.Args[1]
  1908  		if v_1.Op != OpAMD64FlagLT_UGT {
  1909  			break
  1910  		}
  1911  		v.reset(OpCopy)
  1912  		v.Type = x.Type
  1913  		v.AddArg(x)
  1914  		return true
  1915  	}
  1916  	// match: (CMOVQEQconst x (FlagGT_ULT))
  1917  	// cond:
  1918  	// result: x
  1919  	for {
  1920  		x := v.Args[0]
  1921  		v_1 := v.Args[1]
  1922  		if v_1.Op != OpAMD64FlagGT_ULT {
  1923  			break
  1924  		}
  1925  		v.reset(OpCopy)
  1926  		v.Type = x.Type
  1927  		v.AddArg(x)
  1928  		return true
  1929  	}
  1930  	// match: (CMOVQEQconst x (FlagGT_UGT))
  1931  	// cond:
  1932  	// result: x
  1933  	for {
  1934  		x := v.Args[0]
  1935  		v_1 := v.Args[1]
  1936  		if v_1.Op != OpAMD64FlagGT_UGT {
  1937  			break
  1938  		}
  1939  		v.reset(OpCopy)
  1940  		v.Type = x.Type
  1941  		v.AddArg(x)
  1942  		return true
  1943  	}
  1944  	return false
  1945  }
  1946  func rewriteValueAMD64_OpAMD64CMOVWEQconst(v *Value, config *Config) bool {
  1947  	b := v.Block
  1948  	_ = b
  1949  	// match: (CMOVWEQconst x (InvertFlags y) [c])
  1950  	// cond:
  1951  	// result: (CMOVWNEconst x y [c])
  1952  	for {
  1953  		x := v.Args[0]
  1954  		v_1 := v.Args[1]
  1955  		if v_1.Op != OpAMD64InvertFlags {
  1956  			break
  1957  		}
  1958  		y := v_1.Args[0]
  1959  		c := v.AuxInt
  1960  		v.reset(OpAMD64CMOVWNEconst)
  1961  		v.AddArg(x)
  1962  		v.AddArg(y)
  1963  		v.AuxInt = c
  1964  		return true
  1965  	}
  1966  	// match: (CMOVWEQconst _ (FlagEQ) [c])
  1967  	// cond:
  1968  	// result: (Const16 [c])
  1969  	for {
  1970  		v_1 := v.Args[1]
  1971  		if v_1.Op != OpAMD64FlagEQ {
  1972  			break
  1973  		}
  1974  		c := v.AuxInt
  1975  		v.reset(OpConst16)
  1976  		v.AuxInt = c
  1977  		return true
  1978  	}
  1979  	// match: (CMOVWEQconst x (FlagLT_ULT))
  1980  	// cond:
  1981  	// result: x
  1982  	for {
  1983  		x := v.Args[0]
  1984  		v_1 := v.Args[1]
  1985  		if v_1.Op != OpAMD64FlagLT_ULT {
  1986  			break
  1987  		}
  1988  		v.reset(OpCopy)
  1989  		v.Type = x.Type
  1990  		v.AddArg(x)
  1991  		return true
  1992  	}
  1993  	// match: (CMOVWEQconst x (FlagLT_UGT))
  1994  	// cond:
  1995  	// result: x
  1996  	for {
  1997  		x := v.Args[0]
  1998  		v_1 := v.Args[1]
  1999  		if v_1.Op != OpAMD64FlagLT_UGT {
  2000  			break
  2001  		}
  2002  		v.reset(OpCopy)
  2003  		v.Type = x.Type
  2004  		v.AddArg(x)
  2005  		return true
  2006  	}
  2007  	// match: (CMOVWEQconst x (FlagGT_ULT))
  2008  	// cond:
  2009  	// result: x
  2010  	for {
  2011  		x := v.Args[0]
  2012  		v_1 := v.Args[1]
  2013  		if v_1.Op != OpAMD64FlagGT_ULT {
  2014  			break
  2015  		}
  2016  		v.reset(OpCopy)
  2017  		v.Type = x.Type
  2018  		v.AddArg(x)
  2019  		return true
  2020  	}
  2021  	// match: (CMOVWEQconst x (FlagGT_UGT))
  2022  	// cond:
  2023  	// result: x
  2024  	for {
  2025  		x := v.Args[0]
  2026  		v_1 := v.Args[1]
  2027  		if v_1.Op != OpAMD64FlagGT_UGT {
  2028  			break
  2029  		}
  2030  		v.reset(OpCopy)
  2031  		v.Type = x.Type
  2032  		v.AddArg(x)
  2033  		return true
  2034  	}
  2035  	return false
  2036  }
  2037  func rewriteValueAMD64_OpAMD64CMPB(v *Value, config *Config) bool {
  2038  	b := v.Block
  2039  	_ = b
  2040  	// match: (CMPB x (MOVLconst [c]))
  2041  	// cond:
  2042  	// result: (CMPBconst x [int64(int8(c))])
  2043  	for {
  2044  		x := v.Args[0]
  2045  		v_1 := v.Args[1]
  2046  		if v_1.Op != OpAMD64MOVLconst {
  2047  			break
  2048  		}
  2049  		c := v_1.AuxInt
  2050  		v.reset(OpAMD64CMPBconst)
  2051  		v.AddArg(x)
  2052  		v.AuxInt = int64(int8(c))
  2053  		return true
  2054  	}
  2055  	// match: (CMPB (MOVLconst [c]) x)
  2056  	// cond:
  2057  	// result: (InvertFlags (CMPBconst x [int64(int8(c))]))
  2058  	for {
  2059  		v_0 := v.Args[0]
  2060  		if v_0.Op != OpAMD64MOVLconst {
  2061  			break
  2062  		}
  2063  		c := v_0.AuxInt
  2064  		x := v.Args[1]
  2065  		v.reset(OpAMD64InvertFlags)
  2066  		v0 := b.NewValue0(v.Line, OpAMD64CMPBconst, TypeFlags)
  2067  		v0.AddArg(x)
  2068  		v0.AuxInt = int64(int8(c))
  2069  		v.AddArg(v0)
  2070  		return true
  2071  	}
  2072  	return false
  2073  }
  2074  func rewriteValueAMD64_OpAMD64CMPBconst(v *Value, config *Config) bool {
  2075  	b := v.Block
  2076  	_ = b
  2077  	// match: (CMPBconst (MOVLconst [x]) [y])
  2078  	// cond: int8(x)==int8(y)
  2079  	// result: (FlagEQ)
  2080  	for {
  2081  		v_0 := v.Args[0]
  2082  		if v_0.Op != OpAMD64MOVLconst {
  2083  			break
  2084  		}
  2085  		x := v_0.AuxInt
  2086  		y := v.AuxInt
  2087  		if !(int8(x) == int8(y)) {
  2088  			break
  2089  		}
  2090  		v.reset(OpAMD64FlagEQ)
  2091  		return true
  2092  	}
  2093  	// match: (CMPBconst (MOVLconst [x]) [y])
  2094  	// cond: int8(x)<int8(y) && uint8(x)<uint8(y)
  2095  	// result: (FlagLT_ULT)
  2096  	for {
  2097  		v_0 := v.Args[0]
  2098  		if v_0.Op != OpAMD64MOVLconst {
  2099  			break
  2100  		}
  2101  		x := v_0.AuxInt
  2102  		y := v.AuxInt
  2103  		if !(int8(x) < int8(y) && uint8(x) < uint8(y)) {
  2104  			break
  2105  		}
  2106  		v.reset(OpAMD64FlagLT_ULT)
  2107  		return true
  2108  	}
  2109  	// match: (CMPBconst (MOVLconst [x]) [y])
  2110  	// cond: int8(x)<int8(y) && uint8(x)>uint8(y)
  2111  	// result: (FlagLT_UGT)
  2112  	for {
  2113  		v_0 := v.Args[0]
  2114  		if v_0.Op != OpAMD64MOVLconst {
  2115  			break
  2116  		}
  2117  		x := v_0.AuxInt
  2118  		y := v.AuxInt
  2119  		if !(int8(x) < int8(y) && uint8(x) > uint8(y)) {
  2120  			break
  2121  		}
  2122  		v.reset(OpAMD64FlagLT_UGT)
  2123  		return true
  2124  	}
  2125  	// match: (CMPBconst (MOVLconst [x]) [y])
  2126  	// cond: int8(x)>int8(y) && uint8(x)<uint8(y)
  2127  	// result: (FlagGT_ULT)
  2128  	for {
  2129  		v_0 := v.Args[0]
  2130  		if v_0.Op != OpAMD64MOVLconst {
  2131  			break
  2132  		}
  2133  		x := v_0.AuxInt
  2134  		y := v.AuxInt
  2135  		if !(int8(x) > int8(y) && uint8(x) < uint8(y)) {
  2136  			break
  2137  		}
  2138  		v.reset(OpAMD64FlagGT_ULT)
  2139  		return true
  2140  	}
  2141  	// match: (CMPBconst (MOVLconst [x]) [y])
  2142  	// cond: int8(x)>int8(y) && uint8(x)>uint8(y)
  2143  	// result: (FlagGT_UGT)
  2144  	for {
  2145  		v_0 := v.Args[0]
  2146  		if v_0.Op != OpAMD64MOVLconst {
  2147  			break
  2148  		}
  2149  		x := v_0.AuxInt
  2150  		y := v.AuxInt
  2151  		if !(int8(x) > int8(y) && uint8(x) > uint8(y)) {
  2152  			break
  2153  		}
  2154  		v.reset(OpAMD64FlagGT_UGT)
  2155  		return true
  2156  	}
  2157  	// match: (CMPBconst (ANDLconst _ [m]) [n])
  2158  	// cond: 0 <= int8(m) && int8(m) < int8(n)
  2159  	// result: (FlagLT_ULT)
  2160  	for {
  2161  		v_0 := v.Args[0]
  2162  		if v_0.Op != OpAMD64ANDLconst {
  2163  			break
  2164  		}
  2165  		m := v_0.AuxInt
  2166  		n := v.AuxInt
  2167  		if !(0 <= int8(m) && int8(m) < int8(n)) {
  2168  			break
  2169  		}
  2170  		v.reset(OpAMD64FlagLT_ULT)
  2171  		return true
  2172  	}
  2173  	// match: (CMPBconst (ANDL x y) [0])
  2174  	// cond:
  2175  	// result: (TESTB x y)
  2176  	for {
  2177  		v_0 := v.Args[0]
  2178  		if v_0.Op != OpAMD64ANDL {
  2179  			break
  2180  		}
  2181  		x := v_0.Args[0]
  2182  		y := v_0.Args[1]
  2183  		if v.AuxInt != 0 {
  2184  			break
  2185  		}
  2186  		v.reset(OpAMD64TESTB)
  2187  		v.AddArg(x)
  2188  		v.AddArg(y)
  2189  		return true
  2190  	}
  2191  	// match: (CMPBconst (ANDLconst [c] x) [0])
  2192  	// cond:
  2193  	// result: (TESTBconst [int64(int8(c))] x)
  2194  	for {
  2195  		v_0 := v.Args[0]
  2196  		if v_0.Op != OpAMD64ANDLconst {
  2197  			break
  2198  		}
  2199  		c := v_0.AuxInt
  2200  		x := v_0.Args[0]
  2201  		if v.AuxInt != 0 {
  2202  			break
  2203  		}
  2204  		v.reset(OpAMD64TESTBconst)
  2205  		v.AuxInt = int64(int8(c))
  2206  		v.AddArg(x)
  2207  		return true
  2208  	}
  2209  	// match: (CMPBconst x [0])
  2210  	// cond:
  2211  	// result: (TESTB x x)
  2212  	for {
  2213  		x := v.Args[0]
  2214  		if v.AuxInt != 0 {
  2215  			break
  2216  		}
  2217  		v.reset(OpAMD64TESTB)
  2218  		v.AddArg(x)
  2219  		v.AddArg(x)
  2220  		return true
  2221  	}
  2222  	return false
  2223  }
  2224  func rewriteValueAMD64_OpAMD64CMPL(v *Value, config *Config) bool {
  2225  	b := v.Block
  2226  	_ = b
  2227  	// match: (CMPL x (MOVLconst [c]))
  2228  	// cond:
  2229  	// result: (CMPLconst x [c])
  2230  	for {
  2231  		x := v.Args[0]
  2232  		v_1 := v.Args[1]
  2233  		if v_1.Op != OpAMD64MOVLconst {
  2234  			break
  2235  		}
  2236  		c := v_1.AuxInt
  2237  		v.reset(OpAMD64CMPLconst)
  2238  		v.AddArg(x)
  2239  		v.AuxInt = c
  2240  		return true
  2241  	}
  2242  	// match: (CMPL (MOVLconst [c]) x)
  2243  	// cond:
  2244  	// result: (InvertFlags (CMPLconst x [c]))
  2245  	for {
  2246  		v_0 := v.Args[0]
  2247  		if v_0.Op != OpAMD64MOVLconst {
  2248  			break
  2249  		}
  2250  		c := v_0.AuxInt
  2251  		x := v.Args[1]
  2252  		v.reset(OpAMD64InvertFlags)
  2253  		v0 := b.NewValue0(v.Line, OpAMD64CMPLconst, TypeFlags)
  2254  		v0.AddArg(x)
  2255  		v0.AuxInt = c
  2256  		v.AddArg(v0)
  2257  		return true
  2258  	}
  2259  	return false
  2260  }
  2261  func rewriteValueAMD64_OpAMD64CMPLconst(v *Value, config *Config) bool {
  2262  	b := v.Block
  2263  	_ = b
  2264  	// match: (CMPLconst (MOVLconst [x]) [y])
  2265  	// cond: int32(x)==int32(y)
  2266  	// result: (FlagEQ)
  2267  	for {
  2268  		v_0 := v.Args[0]
  2269  		if v_0.Op != OpAMD64MOVLconst {
  2270  			break
  2271  		}
  2272  		x := v_0.AuxInt
  2273  		y := v.AuxInt
  2274  		if !(int32(x) == int32(y)) {
  2275  			break
  2276  		}
  2277  		v.reset(OpAMD64FlagEQ)
  2278  		return true
  2279  	}
  2280  	// match: (CMPLconst (MOVLconst [x]) [y])
  2281  	// cond: int32(x)<int32(y) && uint32(x)<uint32(y)
  2282  	// result: (FlagLT_ULT)
  2283  	for {
  2284  		v_0 := v.Args[0]
  2285  		if v_0.Op != OpAMD64MOVLconst {
  2286  			break
  2287  		}
  2288  		x := v_0.AuxInt
  2289  		y := v.AuxInt
  2290  		if !(int32(x) < int32(y) && uint32(x) < uint32(y)) {
  2291  			break
  2292  		}
  2293  		v.reset(OpAMD64FlagLT_ULT)
  2294  		return true
  2295  	}
  2296  	// match: (CMPLconst (MOVLconst [x]) [y])
  2297  	// cond: int32(x)<int32(y) && uint32(x)>uint32(y)
  2298  	// result: (FlagLT_UGT)
  2299  	for {
  2300  		v_0 := v.Args[0]
  2301  		if v_0.Op != OpAMD64MOVLconst {
  2302  			break
  2303  		}
  2304  		x := v_0.AuxInt
  2305  		y := v.AuxInt
  2306  		if !(int32(x) < int32(y) && uint32(x) > uint32(y)) {
  2307  			break
  2308  		}
  2309  		v.reset(OpAMD64FlagLT_UGT)
  2310  		return true
  2311  	}
  2312  	// match: (CMPLconst (MOVLconst [x]) [y])
  2313  	// cond: int32(x)>int32(y) && uint32(x)<uint32(y)
  2314  	// result: (FlagGT_ULT)
  2315  	for {
  2316  		v_0 := v.Args[0]
  2317  		if v_0.Op != OpAMD64MOVLconst {
  2318  			break
  2319  		}
  2320  		x := v_0.AuxInt
  2321  		y := v.AuxInt
  2322  		if !(int32(x) > int32(y) && uint32(x) < uint32(y)) {
  2323  			break
  2324  		}
  2325  		v.reset(OpAMD64FlagGT_ULT)
  2326  		return true
  2327  	}
  2328  	// match: (CMPLconst (MOVLconst [x]) [y])
  2329  	// cond: int32(x)>int32(y) && uint32(x)>uint32(y)
  2330  	// result: (FlagGT_UGT)
  2331  	for {
  2332  		v_0 := v.Args[0]
  2333  		if v_0.Op != OpAMD64MOVLconst {
  2334  			break
  2335  		}
  2336  		x := v_0.AuxInt
  2337  		y := v.AuxInt
  2338  		if !(int32(x) > int32(y) && uint32(x) > uint32(y)) {
  2339  			break
  2340  		}
  2341  		v.reset(OpAMD64FlagGT_UGT)
  2342  		return true
  2343  	}
  2344  	// match: (CMPLconst (SHRLconst _ [c]) [n])
  2345  	// cond: 0 <= n && 0 < c && c <= 32 && (1<<uint64(32-c)) <= uint64(n)
  2346  	// result: (FlagLT_ULT)
  2347  	for {
  2348  		v_0 := v.Args[0]
  2349  		if v_0.Op != OpAMD64SHRLconst {
  2350  			break
  2351  		}
  2352  		c := v_0.AuxInt
  2353  		n := v.AuxInt
  2354  		if !(0 <= n && 0 < c && c <= 32 && (1<<uint64(32-c)) <= uint64(n)) {
  2355  			break
  2356  		}
  2357  		v.reset(OpAMD64FlagLT_ULT)
  2358  		return true
  2359  	}
  2360  	// match: (CMPLconst (ANDLconst _ [m]) [n])
  2361  	// cond: 0 <= int32(m) && int32(m) < int32(n)
  2362  	// result: (FlagLT_ULT)
  2363  	for {
  2364  		v_0 := v.Args[0]
  2365  		if v_0.Op != OpAMD64ANDLconst {
  2366  			break
  2367  		}
  2368  		m := v_0.AuxInt
  2369  		n := v.AuxInt
  2370  		if !(0 <= int32(m) && int32(m) < int32(n)) {
  2371  			break
  2372  		}
  2373  		v.reset(OpAMD64FlagLT_ULT)
  2374  		return true
  2375  	}
  2376  	// match: (CMPLconst (ANDL x y) [0])
  2377  	// cond:
  2378  	// result: (TESTL x y)
  2379  	for {
  2380  		v_0 := v.Args[0]
  2381  		if v_0.Op != OpAMD64ANDL {
  2382  			break
  2383  		}
  2384  		x := v_0.Args[0]
  2385  		y := v_0.Args[1]
  2386  		if v.AuxInt != 0 {
  2387  			break
  2388  		}
  2389  		v.reset(OpAMD64TESTL)
  2390  		v.AddArg(x)
  2391  		v.AddArg(y)
  2392  		return true
  2393  	}
  2394  	// match: (CMPLconst (ANDLconst [c] x) [0])
  2395  	// cond:
  2396  	// result: (TESTLconst [c] x)
  2397  	for {
  2398  		v_0 := v.Args[0]
  2399  		if v_0.Op != OpAMD64ANDLconst {
  2400  			break
  2401  		}
  2402  		c := v_0.AuxInt
  2403  		x := v_0.Args[0]
  2404  		if v.AuxInt != 0 {
  2405  			break
  2406  		}
  2407  		v.reset(OpAMD64TESTLconst)
  2408  		v.AuxInt = c
  2409  		v.AddArg(x)
  2410  		return true
  2411  	}
  2412  	// match: (CMPLconst x [0])
  2413  	// cond:
  2414  	// result: (TESTL x x)
  2415  	for {
  2416  		x := v.Args[0]
  2417  		if v.AuxInt != 0 {
  2418  			break
  2419  		}
  2420  		v.reset(OpAMD64TESTL)
  2421  		v.AddArg(x)
  2422  		v.AddArg(x)
  2423  		return true
  2424  	}
  2425  	return false
  2426  }
  2427  func rewriteValueAMD64_OpAMD64CMPQ(v *Value, config *Config) bool {
  2428  	b := v.Block
  2429  	_ = b
  2430  	// match: (CMPQ x (MOVQconst [c]))
  2431  	// cond: is32Bit(c)
  2432  	// result: (CMPQconst x [c])
  2433  	for {
  2434  		x := v.Args[0]
  2435  		v_1 := v.Args[1]
  2436  		if v_1.Op != OpAMD64MOVQconst {
  2437  			break
  2438  		}
  2439  		c := v_1.AuxInt
  2440  		if !(is32Bit(c)) {
  2441  			break
  2442  		}
  2443  		v.reset(OpAMD64CMPQconst)
  2444  		v.AddArg(x)
  2445  		v.AuxInt = c
  2446  		return true
  2447  	}
  2448  	// match: (CMPQ (MOVQconst [c]) x)
  2449  	// cond: is32Bit(c)
  2450  	// result: (InvertFlags (CMPQconst x [c]))
  2451  	for {
  2452  		v_0 := v.Args[0]
  2453  		if v_0.Op != OpAMD64MOVQconst {
  2454  			break
  2455  		}
  2456  		c := v_0.AuxInt
  2457  		x := v.Args[1]
  2458  		if !(is32Bit(c)) {
  2459  			break
  2460  		}
  2461  		v.reset(OpAMD64InvertFlags)
  2462  		v0 := b.NewValue0(v.Line, OpAMD64CMPQconst, TypeFlags)
  2463  		v0.AddArg(x)
  2464  		v0.AuxInt = c
  2465  		v.AddArg(v0)
  2466  		return true
  2467  	}
  2468  	return false
  2469  }
  2470  func rewriteValueAMD64_OpAMD64CMPQconst(v *Value, config *Config) bool {
  2471  	b := v.Block
  2472  	_ = b
  2473  	// match: (CMPQconst (MOVQconst [x]) [y])
  2474  	// cond: x==y
  2475  	// result: (FlagEQ)
  2476  	for {
  2477  		v_0 := v.Args[0]
  2478  		if v_0.Op != OpAMD64MOVQconst {
  2479  			break
  2480  		}
  2481  		x := v_0.AuxInt
  2482  		y := v.AuxInt
  2483  		if !(x == y) {
  2484  			break
  2485  		}
  2486  		v.reset(OpAMD64FlagEQ)
  2487  		return true
  2488  	}
  2489  	// match: (CMPQconst (MOVQconst [x]) [y])
  2490  	// cond: x<y && uint64(x)<uint64(y)
  2491  	// result: (FlagLT_ULT)
  2492  	for {
  2493  		v_0 := v.Args[0]
  2494  		if v_0.Op != OpAMD64MOVQconst {
  2495  			break
  2496  		}
  2497  		x := v_0.AuxInt
  2498  		y := v.AuxInt
  2499  		if !(x < y && uint64(x) < uint64(y)) {
  2500  			break
  2501  		}
  2502  		v.reset(OpAMD64FlagLT_ULT)
  2503  		return true
  2504  	}
  2505  	// match: (CMPQconst (MOVQconst [x]) [y])
  2506  	// cond: x<y && uint64(x)>uint64(y)
  2507  	// result: (FlagLT_UGT)
  2508  	for {
  2509  		v_0 := v.Args[0]
  2510  		if v_0.Op != OpAMD64MOVQconst {
  2511  			break
  2512  		}
  2513  		x := v_0.AuxInt
  2514  		y := v.AuxInt
  2515  		if !(x < y && uint64(x) > uint64(y)) {
  2516  			break
  2517  		}
  2518  		v.reset(OpAMD64FlagLT_UGT)
  2519  		return true
  2520  	}
  2521  	// match: (CMPQconst (MOVQconst [x]) [y])
  2522  	// cond: x>y && uint64(x)<uint64(y)
  2523  	// result: (FlagGT_ULT)
  2524  	for {
  2525  		v_0 := v.Args[0]
  2526  		if v_0.Op != OpAMD64MOVQconst {
  2527  			break
  2528  		}
  2529  		x := v_0.AuxInt
  2530  		y := v.AuxInt
  2531  		if !(x > y && uint64(x) < uint64(y)) {
  2532  			break
  2533  		}
  2534  		v.reset(OpAMD64FlagGT_ULT)
  2535  		return true
  2536  	}
  2537  	// match: (CMPQconst (MOVQconst [x]) [y])
  2538  	// cond: x>y && uint64(x)>uint64(y)
  2539  	// result: (FlagGT_UGT)
  2540  	for {
  2541  		v_0 := v.Args[0]
  2542  		if v_0.Op != OpAMD64MOVQconst {
  2543  			break
  2544  		}
  2545  		x := v_0.AuxInt
  2546  		y := v.AuxInt
  2547  		if !(x > y && uint64(x) > uint64(y)) {
  2548  			break
  2549  		}
  2550  		v.reset(OpAMD64FlagGT_UGT)
  2551  		return true
  2552  	}
  2553  	// match: (CMPQconst (MOVBQZX _) [c])
  2554  	// cond: 0xFF < c
  2555  	// result: (FlagLT_ULT)
  2556  	for {
  2557  		v_0 := v.Args[0]
  2558  		if v_0.Op != OpAMD64MOVBQZX {
  2559  			break
  2560  		}
  2561  		c := v.AuxInt
  2562  		if !(0xFF < c) {
  2563  			break
  2564  		}
  2565  		v.reset(OpAMD64FlagLT_ULT)
  2566  		return true
  2567  	}
  2568  	// match: (CMPQconst (MOVWQZX _) [c])
  2569  	// cond: 0xFFFF < c
  2570  	// result: (FlagLT_ULT)
  2571  	for {
  2572  		v_0 := v.Args[0]
  2573  		if v_0.Op != OpAMD64MOVWQZX {
  2574  			break
  2575  		}
  2576  		c := v.AuxInt
  2577  		if !(0xFFFF < c) {
  2578  			break
  2579  		}
  2580  		v.reset(OpAMD64FlagLT_ULT)
  2581  		return true
  2582  	}
  2583  	// match: (CMPQconst (MOVLQZX _) [c])
  2584  	// cond: 0xFFFFFFFF < c
  2585  	// result: (FlagLT_ULT)
  2586  	for {
  2587  		v_0 := v.Args[0]
  2588  		if v_0.Op != OpAMD64MOVLQZX {
  2589  			break
  2590  		}
  2591  		c := v.AuxInt
  2592  		if !(0xFFFFFFFF < c) {
  2593  			break
  2594  		}
  2595  		v.reset(OpAMD64FlagLT_ULT)
  2596  		return true
  2597  	}
  2598  	// match: (CMPQconst (SHRQconst _ [c]) [n])
  2599  	// cond: 0 <= n && 0 < c && c <= 64 && (1<<uint64(64-c)) <= uint64(n)
  2600  	// result: (FlagLT_ULT)
  2601  	for {
  2602  		v_0 := v.Args[0]
  2603  		if v_0.Op != OpAMD64SHRQconst {
  2604  			break
  2605  		}
  2606  		c := v_0.AuxInt
  2607  		n := v.AuxInt
  2608  		if !(0 <= n && 0 < c && c <= 64 && (1<<uint64(64-c)) <= uint64(n)) {
  2609  			break
  2610  		}
  2611  		v.reset(OpAMD64FlagLT_ULT)
  2612  		return true
  2613  	}
  2614  	// match: (CMPQconst (ANDQconst _ [m]) [n])
  2615  	// cond: 0 <= m && m < n
  2616  	// result: (FlagLT_ULT)
  2617  	for {
  2618  		v_0 := v.Args[0]
  2619  		if v_0.Op != OpAMD64ANDQconst {
  2620  			break
  2621  		}
  2622  		m := v_0.AuxInt
  2623  		n := v.AuxInt
  2624  		if !(0 <= m && m < n) {
  2625  			break
  2626  		}
  2627  		v.reset(OpAMD64FlagLT_ULT)
  2628  		return true
  2629  	}
  2630  	// match: (CMPQconst (ANDQ x y) [0])
  2631  	// cond:
  2632  	// result: (TESTQ x y)
  2633  	for {
  2634  		v_0 := v.Args[0]
  2635  		if v_0.Op != OpAMD64ANDQ {
  2636  			break
  2637  		}
  2638  		x := v_0.Args[0]
  2639  		y := v_0.Args[1]
  2640  		if v.AuxInt != 0 {
  2641  			break
  2642  		}
  2643  		v.reset(OpAMD64TESTQ)
  2644  		v.AddArg(x)
  2645  		v.AddArg(y)
  2646  		return true
  2647  	}
  2648  	// match: (CMPQconst (ANDQconst [c] x) [0])
  2649  	// cond:
  2650  	// result: (TESTQconst [c] x)
  2651  	for {
  2652  		v_0 := v.Args[0]
  2653  		if v_0.Op != OpAMD64ANDQconst {
  2654  			break
  2655  		}
  2656  		c := v_0.AuxInt
  2657  		x := v_0.Args[0]
  2658  		if v.AuxInt != 0 {
  2659  			break
  2660  		}
  2661  		v.reset(OpAMD64TESTQconst)
  2662  		v.AuxInt = c
  2663  		v.AddArg(x)
  2664  		return true
  2665  	}
  2666  	// match: (CMPQconst x [0])
  2667  	// cond:
  2668  	// result: (TESTQ x x)
  2669  	for {
  2670  		x := v.Args[0]
  2671  		if v.AuxInt != 0 {
  2672  			break
  2673  		}
  2674  		v.reset(OpAMD64TESTQ)
  2675  		v.AddArg(x)
  2676  		v.AddArg(x)
  2677  		return true
  2678  	}
  2679  	return false
  2680  }
  2681  func rewriteValueAMD64_OpAMD64CMPW(v *Value, config *Config) bool {
  2682  	b := v.Block
  2683  	_ = b
  2684  	// match: (CMPW x (MOVLconst [c]))
  2685  	// cond:
  2686  	// result: (CMPWconst x [int64(int16(c))])
  2687  	for {
  2688  		x := v.Args[0]
  2689  		v_1 := v.Args[1]
  2690  		if v_1.Op != OpAMD64MOVLconst {
  2691  			break
  2692  		}
  2693  		c := v_1.AuxInt
  2694  		v.reset(OpAMD64CMPWconst)
  2695  		v.AddArg(x)
  2696  		v.AuxInt = int64(int16(c))
  2697  		return true
  2698  	}
  2699  	// match: (CMPW (MOVLconst [c]) x)
  2700  	// cond:
  2701  	// result: (InvertFlags (CMPWconst x [int64(int16(c))]))
  2702  	for {
  2703  		v_0 := v.Args[0]
  2704  		if v_0.Op != OpAMD64MOVLconst {
  2705  			break
  2706  		}
  2707  		c := v_0.AuxInt
  2708  		x := v.Args[1]
  2709  		v.reset(OpAMD64InvertFlags)
  2710  		v0 := b.NewValue0(v.Line, OpAMD64CMPWconst, TypeFlags)
  2711  		v0.AddArg(x)
  2712  		v0.AuxInt = int64(int16(c))
  2713  		v.AddArg(v0)
  2714  		return true
  2715  	}
  2716  	return false
  2717  }
  2718  func rewriteValueAMD64_OpAMD64CMPWconst(v *Value, config *Config) bool {
  2719  	b := v.Block
  2720  	_ = b
  2721  	// match: (CMPWconst (MOVLconst [x]) [y])
  2722  	// cond: int16(x)==int16(y)
  2723  	// result: (FlagEQ)
  2724  	for {
  2725  		v_0 := v.Args[0]
  2726  		if v_0.Op != OpAMD64MOVLconst {
  2727  			break
  2728  		}
  2729  		x := v_0.AuxInt
  2730  		y := v.AuxInt
  2731  		if !(int16(x) == int16(y)) {
  2732  			break
  2733  		}
  2734  		v.reset(OpAMD64FlagEQ)
  2735  		return true
  2736  	}
  2737  	// match: (CMPWconst (MOVLconst [x]) [y])
  2738  	// cond: int16(x)<int16(y) && uint16(x)<uint16(y)
  2739  	// result: (FlagLT_ULT)
  2740  	for {
  2741  		v_0 := v.Args[0]
  2742  		if v_0.Op != OpAMD64MOVLconst {
  2743  			break
  2744  		}
  2745  		x := v_0.AuxInt
  2746  		y := v.AuxInt
  2747  		if !(int16(x) < int16(y) && uint16(x) < uint16(y)) {
  2748  			break
  2749  		}
  2750  		v.reset(OpAMD64FlagLT_ULT)
  2751  		return true
  2752  	}
  2753  	// match: (CMPWconst (MOVLconst [x]) [y])
  2754  	// cond: int16(x)<int16(y) && uint16(x)>uint16(y)
  2755  	// result: (FlagLT_UGT)
  2756  	for {
  2757  		v_0 := v.Args[0]
  2758  		if v_0.Op != OpAMD64MOVLconst {
  2759  			break
  2760  		}
  2761  		x := v_0.AuxInt
  2762  		y := v.AuxInt
  2763  		if !(int16(x) < int16(y) && uint16(x) > uint16(y)) {
  2764  			break
  2765  		}
  2766  		v.reset(OpAMD64FlagLT_UGT)
  2767  		return true
  2768  	}
  2769  	// match: (CMPWconst (MOVLconst [x]) [y])
  2770  	// cond: int16(x)>int16(y) && uint16(x)<uint16(y)
  2771  	// result: (FlagGT_ULT)
  2772  	for {
  2773  		v_0 := v.Args[0]
  2774  		if v_0.Op != OpAMD64MOVLconst {
  2775  			break
  2776  		}
  2777  		x := v_0.AuxInt
  2778  		y := v.AuxInt
  2779  		if !(int16(x) > int16(y) && uint16(x) < uint16(y)) {
  2780  			break
  2781  		}
  2782  		v.reset(OpAMD64FlagGT_ULT)
  2783  		return true
  2784  	}
  2785  	// match: (CMPWconst (MOVLconst [x]) [y])
  2786  	// cond: int16(x)>int16(y) && uint16(x)>uint16(y)
  2787  	// result: (FlagGT_UGT)
  2788  	for {
  2789  		v_0 := v.Args[0]
  2790  		if v_0.Op != OpAMD64MOVLconst {
  2791  			break
  2792  		}
  2793  		x := v_0.AuxInt
  2794  		y := v.AuxInt
  2795  		if !(int16(x) > int16(y) && uint16(x) > uint16(y)) {
  2796  			break
  2797  		}
  2798  		v.reset(OpAMD64FlagGT_UGT)
  2799  		return true
  2800  	}
  2801  	// match: (CMPWconst (ANDLconst _ [m]) [n])
  2802  	// cond: 0 <= int16(m) && int16(m) < int16(n)
  2803  	// result: (FlagLT_ULT)
  2804  	for {
  2805  		v_0 := v.Args[0]
  2806  		if v_0.Op != OpAMD64ANDLconst {
  2807  			break
  2808  		}
  2809  		m := v_0.AuxInt
  2810  		n := v.AuxInt
  2811  		if !(0 <= int16(m) && int16(m) < int16(n)) {
  2812  			break
  2813  		}
  2814  		v.reset(OpAMD64FlagLT_ULT)
  2815  		return true
  2816  	}
  2817  	// match: (CMPWconst (ANDL x y) [0])
  2818  	// cond:
  2819  	// result: (TESTW x y)
  2820  	for {
  2821  		v_0 := v.Args[0]
  2822  		if v_0.Op != OpAMD64ANDL {
  2823  			break
  2824  		}
  2825  		x := v_0.Args[0]
  2826  		y := v_0.Args[1]
  2827  		if v.AuxInt != 0 {
  2828  			break
  2829  		}
  2830  		v.reset(OpAMD64TESTW)
  2831  		v.AddArg(x)
  2832  		v.AddArg(y)
  2833  		return true
  2834  	}
  2835  	// match: (CMPWconst (ANDLconst [c] x) [0])
  2836  	// cond:
  2837  	// result: (TESTWconst [int64(int16(c))] x)
  2838  	for {
  2839  		v_0 := v.Args[0]
  2840  		if v_0.Op != OpAMD64ANDLconst {
  2841  			break
  2842  		}
  2843  		c := v_0.AuxInt
  2844  		x := v_0.Args[0]
  2845  		if v.AuxInt != 0 {
  2846  			break
  2847  		}
  2848  		v.reset(OpAMD64TESTWconst)
  2849  		v.AuxInt = int64(int16(c))
  2850  		v.AddArg(x)
  2851  		return true
  2852  	}
  2853  	// match: (CMPWconst x [0])
  2854  	// cond:
  2855  	// result: (TESTW x x)
  2856  	for {
  2857  		x := v.Args[0]
  2858  		if v.AuxInt != 0 {
  2859  			break
  2860  		}
  2861  		v.reset(OpAMD64TESTW)
  2862  		v.AddArg(x)
  2863  		v.AddArg(x)
  2864  		return true
  2865  	}
  2866  	return false
  2867  }
  2868  func rewriteValueAMD64_OpClosureCall(v *Value, config *Config) bool {
  2869  	b := v.Block
  2870  	_ = b
  2871  	// match: (ClosureCall [argwid] entry closure mem)
  2872  	// cond:
  2873  	// result: (CALLclosure [argwid] entry closure mem)
  2874  	for {
  2875  		argwid := v.AuxInt
  2876  		entry := v.Args[0]
  2877  		closure := v.Args[1]
  2878  		mem := v.Args[2]
  2879  		v.reset(OpAMD64CALLclosure)
  2880  		v.AuxInt = argwid
  2881  		v.AddArg(entry)
  2882  		v.AddArg(closure)
  2883  		v.AddArg(mem)
  2884  		return true
  2885  	}
  2886  }
  2887  func rewriteValueAMD64_OpCom16(v *Value, config *Config) bool {
  2888  	b := v.Block
  2889  	_ = b
  2890  	// match: (Com16 x)
  2891  	// cond:
  2892  	// result: (NOTL x)
  2893  	for {
  2894  		x := v.Args[0]
  2895  		v.reset(OpAMD64NOTL)
  2896  		v.AddArg(x)
  2897  		return true
  2898  	}
  2899  }
  2900  func rewriteValueAMD64_OpCom32(v *Value, config *Config) bool {
  2901  	b := v.Block
  2902  	_ = b
  2903  	// match: (Com32 x)
  2904  	// cond:
  2905  	// result: (NOTL x)
  2906  	for {
  2907  		x := v.Args[0]
  2908  		v.reset(OpAMD64NOTL)
  2909  		v.AddArg(x)
  2910  		return true
  2911  	}
  2912  }
  2913  func rewriteValueAMD64_OpCom64(v *Value, config *Config) bool {
  2914  	b := v.Block
  2915  	_ = b
  2916  	// match: (Com64 x)
  2917  	// cond:
  2918  	// result: (NOTQ x)
  2919  	for {
  2920  		x := v.Args[0]
  2921  		v.reset(OpAMD64NOTQ)
  2922  		v.AddArg(x)
  2923  		return true
  2924  	}
  2925  }
  2926  func rewriteValueAMD64_OpCom8(v *Value, config *Config) bool {
  2927  	b := v.Block
  2928  	_ = b
  2929  	// match: (Com8  x)
  2930  	// cond:
  2931  	// result: (NOTL x)
  2932  	for {
  2933  		x := v.Args[0]
  2934  		v.reset(OpAMD64NOTL)
  2935  		v.AddArg(x)
  2936  		return true
  2937  	}
  2938  }
  2939  func rewriteValueAMD64_OpConst16(v *Value, config *Config) bool {
  2940  	b := v.Block
  2941  	_ = b
  2942  	// match: (Const16  [val])
  2943  	// cond:
  2944  	// result: (MOVLconst [val])
  2945  	for {
  2946  		val := v.AuxInt
  2947  		v.reset(OpAMD64MOVLconst)
  2948  		v.AuxInt = val
  2949  		return true
  2950  	}
  2951  }
  2952  func rewriteValueAMD64_OpConst32(v *Value, config *Config) bool {
  2953  	b := v.Block
  2954  	_ = b
  2955  	// match: (Const32  [val])
  2956  	// cond:
  2957  	// result: (MOVLconst [val])
  2958  	for {
  2959  		val := v.AuxInt
  2960  		v.reset(OpAMD64MOVLconst)
  2961  		v.AuxInt = val
  2962  		return true
  2963  	}
  2964  }
  2965  func rewriteValueAMD64_OpConst32F(v *Value, config *Config) bool {
  2966  	b := v.Block
  2967  	_ = b
  2968  	// match: (Const32F [val])
  2969  	// cond:
  2970  	// result: (MOVSSconst [val])
  2971  	for {
  2972  		val := v.AuxInt
  2973  		v.reset(OpAMD64MOVSSconst)
  2974  		v.AuxInt = val
  2975  		return true
  2976  	}
  2977  }
  2978  func rewriteValueAMD64_OpConst64(v *Value, config *Config) bool {
  2979  	b := v.Block
  2980  	_ = b
  2981  	// match: (Const64  [val])
  2982  	// cond:
  2983  	// result: (MOVQconst [val])
  2984  	for {
  2985  		val := v.AuxInt
  2986  		v.reset(OpAMD64MOVQconst)
  2987  		v.AuxInt = val
  2988  		return true
  2989  	}
  2990  }
  2991  func rewriteValueAMD64_OpConst64F(v *Value, config *Config) bool {
  2992  	b := v.Block
  2993  	_ = b
  2994  	// match: (Const64F [val])
  2995  	// cond:
  2996  	// result: (MOVSDconst [val])
  2997  	for {
  2998  		val := v.AuxInt
  2999  		v.reset(OpAMD64MOVSDconst)
  3000  		v.AuxInt = val
  3001  		return true
  3002  	}
  3003  }
  3004  func rewriteValueAMD64_OpConst8(v *Value, config *Config) bool {
  3005  	b := v.Block
  3006  	_ = b
  3007  	// match: (Const8   [val])
  3008  	// cond:
  3009  	// result: (MOVLconst [val])
  3010  	for {
  3011  		val := v.AuxInt
  3012  		v.reset(OpAMD64MOVLconst)
  3013  		v.AuxInt = val
  3014  		return true
  3015  	}
  3016  }
  3017  func rewriteValueAMD64_OpConstBool(v *Value, config *Config) bool {
  3018  	b := v.Block
  3019  	_ = b
  3020  	// match: (ConstBool [b])
  3021  	// cond:
  3022  	// result: (MOVLconst [b])
  3023  	for {
  3024  		b := v.AuxInt
  3025  		v.reset(OpAMD64MOVLconst)
  3026  		v.AuxInt = b
  3027  		return true
  3028  	}
  3029  }
  3030  func rewriteValueAMD64_OpConstNil(v *Value, config *Config) bool {
  3031  	b := v.Block
  3032  	_ = b
  3033  	// match: (ConstNil)
  3034  	// cond:
  3035  	// result: (MOVQconst [0])
  3036  	for {
  3037  		v.reset(OpAMD64MOVQconst)
  3038  		v.AuxInt = 0
  3039  		return true
  3040  	}
  3041  }
  3042  func rewriteValueAMD64_OpConvert(v *Value, config *Config) bool {
  3043  	b := v.Block
  3044  	_ = b
  3045  	// match: (Convert <t> x mem)
  3046  	// cond:
  3047  	// result: (MOVQconvert <t> x mem)
  3048  	for {
  3049  		t := v.Type
  3050  		x := v.Args[0]
  3051  		mem := v.Args[1]
  3052  		v.reset(OpAMD64MOVQconvert)
  3053  		v.Type = t
  3054  		v.AddArg(x)
  3055  		v.AddArg(mem)
  3056  		return true
  3057  	}
  3058  }
  3059  func rewriteValueAMD64_OpCtz16(v *Value, config *Config) bool {
  3060  	b := v.Block
  3061  	_ = b
  3062  	// match: (Ctz16 <t> x)
  3063  	// cond:
  3064  	// result: (CMOVWEQconst (BSFW <t> x) (CMPWconst x [0]) [16])
  3065  	for {
  3066  		t := v.Type
  3067  		x := v.Args[0]
  3068  		v.reset(OpAMD64CMOVWEQconst)
  3069  		v0 := b.NewValue0(v.Line, OpAMD64BSFW, t)
  3070  		v0.AddArg(x)
  3071  		v.AddArg(v0)
  3072  		v1 := b.NewValue0(v.Line, OpAMD64CMPWconst, TypeFlags)
  3073  		v1.AddArg(x)
  3074  		v1.AuxInt = 0
  3075  		v.AddArg(v1)
  3076  		v.AuxInt = 16
  3077  		return true
  3078  	}
  3079  }
  3080  func rewriteValueAMD64_OpCtz32(v *Value, config *Config) bool {
  3081  	b := v.Block
  3082  	_ = b
  3083  	// match: (Ctz32 <t> x)
  3084  	// cond:
  3085  	// result: (CMOVLEQconst (BSFL <t> x) (CMPLconst x [0]) [32])
  3086  	for {
  3087  		t := v.Type
  3088  		x := v.Args[0]
  3089  		v.reset(OpAMD64CMOVLEQconst)
  3090  		v0 := b.NewValue0(v.Line, OpAMD64BSFL, t)
  3091  		v0.AddArg(x)
  3092  		v.AddArg(v0)
  3093  		v1 := b.NewValue0(v.Line, OpAMD64CMPLconst, TypeFlags)
  3094  		v1.AddArg(x)
  3095  		v1.AuxInt = 0
  3096  		v.AddArg(v1)
  3097  		v.AuxInt = 32
  3098  		return true
  3099  	}
  3100  }
  3101  func rewriteValueAMD64_OpCtz64(v *Value, config *Config) bool {
  3102  	b := v.Block
  3103  	_ = b
  3104  	// match: (Ctz64 <t> x)
  3105  	// cond:
  3106  	// result: (CMOVQEQconst (BSFQ <t> x) (CMPQconst x [0]) [64])
  3107  	for {
  3108  		t := v.Type
  3109  		x := v.Args[0]
  3110  		v.reset(OpAMD64CMOVQEQconst)
  3111  		v0 := b.NewValue0(v.Line, OpAMD64BSFQ, t)
  3112  		v0.AddArg(x)
  3113  		v.AddArg(v0)
  3114  		v1 := b.NewValue0(v.Line, OpAMD64CMPQconst, TypeFlags)
  3115  		v1.AddArg(x)
  3116  		v1.AuxInt = 0
  3117  		v.AddArg(v1)
  3118  		v.AuxInt = 64
  3119  		return true
  3120  	}
  3121  }
  3122  func rewriteValueAMD64_OpCvt32Fto32(v *Value, config *Config) bool {
  3123  	b := v.Block
  3124  	_ = b
  3125  	// match: (Cvt32Fto32 x)
  3126  	// cond:
  3127  	// result: (CVTTSS2SL x)
  3128  	for {
  3129  		x := v.Args[0]
  3130  		v.reset(OpAMD64CVTTSS2SL)
  3131  		v.AddArg(x)
  3132  		return true
  3133  	}
  3134  }
  3135  func rewriteValueAMD64_OpCvt32Fto64(v *Value, config *Config) bool {
  3136  	b := v.Block
  3137  	_ = b
  3138  	// match: (Cvt32Fto64 x)
  3139  	// cond:
  3140  	// result: (CVTTSS2SQ x)
  3141  	for {
  3142  		x := v.Args[0]
  3143  		v.reset(OpAMD64CVTTSS2SQ)
  3144  		v.AddArg(x)
  3145  		return true
  3146  	}
  3147  }
  3148  func rewriteValueAMD64_OpCvt32Fto64F(v *Value, config *Config) bool {
  3149  	b := v.Block
  3150  	_ = b
  3151  	// match: (Cvt32Fto64F x)
  3152  	// cond:
  3153  	// result: (CVTSS2SD x)
  3154  	for {
  3155  		x := v.Args[0]
  3156  		v.reset(OpAMD64CVTSS2SD)
  3157  		v.AddArg(x)
  3158  		return true
  3159  	}
  3160  }
  3161  func rewriteValueAMD64_OpCvt32to32F(v *Value, config *Config) bool {
  3162  	b := v.Block
  3163  	_ = b
  3164  	// match: (Cvt32to32F x)
  3165  	// cond:
  3166  	// result: (CVTSL2SS x)
  3167  	for {
  3168  		x := v.Args[0]
  3169  		v.reset(OpAMD64CVTSL2SS)
  3170  		v.AddArg(x)
  3171  		return true
  3172  	}
  3173  }
  3174  func rewriteValueAMD64_OpCvt32to64F(v *Value, config *Config) bool {
  3175  	b := v.Block
  3176  	_ = b
  3177  	// match: (Cvt32to64F x)
  3178  	// cond:
  3179  	// result: (CVTSL2SD x)
  3180  	for {
  3181  		x := v.Args[0]
  3182  		v.reset(OpAMD64CVTSL2SD)
  3183  		v.AddArg(x)
  3184  		return true
  3185  	}
  3186  }
  3187  func rewriteValueAMD64_OpCvt64Fto32(v *Value, config *Config) bool {
  3188  	b := v.Block
  3189  	_ = b
  3190  	// match: (Cvt64Fto32 x)
  3191  	// cond:
  3192  	// result: (CVTTSD2SL x)
  3193  	for {
  3194  		x := v.Args[0]
  3195  		v.reset(OpAMD64CVTTSD2SL)
  3196  		v.AddArg(x)
  3197  		return true
  3198  	}
  3199  }
  3200  func rewriteValueAMD64_OpCvt64Fto32F(v *Value, config *Config) bool {
  3201  	b := v.Block
  3202  	_ = b
  3203  	// match: (Cvt64Fto32F x)
  3204  	// cond:
  3205  	// result: (CVTSD2SS x)
  3206  	for {
  3207  		x := v.Args[0]
  3208  		v.reset(OpAMD64CVTSD2SS)
  3209  		v.AddArg(x)
  3210  		return true
  3211  	}
  3212  }
  3213  func rewriteValueAMD64_OpCvt64Fto64(v *Value, config *Config) bool {
  3214  	b := v.Block
  3215  	_ = b
  3216  	// match: (Cvt64Fto64 x)
  3217  	// cond:
  3218  	// result: (CVTTSD2SQ x)
  3219  	for {
  3220  		x := v.Args[0]
  3221  		v.reset(OpAMD64CVTTSD2SQ)
  3222  		v.AddArg(x)
  3223  		return true
  3224  	}
  3225  }
  3226  func rewriteValueAMD64_OpCvt64to32F(v *Value, config *Config) bool {
  3227  	b := v.Block
  3228  	_ = b
  3229  	// match: (Cvt64to32F x)
  3230  	// cond:
  3231  	// result: (CVTSQ2SS x)
  3232  	for {
  3233  		x := v.Args[0]
  3234  		v.reset(OpAMD64CVTSQ2SS)
  3235  		v.AddArg(x)
  3236  		return true
  3237  	}
  3238  }
  3239  func rewriteValueAMD64_OpCvt64to64F(v *Value, config *Config) bool {
  3240  	b := v.Block
  3241  	_ = b
  3242  	// match: (Cvt64to64F x)
  3243  	// cond:
  3244  	// result: (CVTSQ2SD x)
  3245  	for {
  3246  		x := v.Args[0]
  3247  		v.reset(OpAMD64CVTSQ2SD)
  3248  		v.AddArg(x)
  3249  		return true
  3250  	}
  3251  }
  3252  func rewriteValueAMD64_OpDeferCall(v *Value, config *Config) bool {
  3253  	b := v.Block
  3254  	_ = b
  3255  	// match: (DeferCall [argwid] mem)
  3256  	// cond:
  3257  	// result: (CALLdefer [argwid] mem)
  3258  	for {
  3259  		argwid := v.AuxInt
  3260  		mem := v.Args[0]
  3261  		v.reset(OpAMD64CALLdefer)
  3262  		v.AuxInt = argwid
  3263  		v.AddArg(mem)
  3264  		return true
  3265  	}
  3266  }
  3267  func rewriteValueAMD64_OpDiv16(v *Value, config *Config) bool {
  3268  	b := v.Block
  3269  	_ = b
  3270  	// match: (Div16  x y)
  3271  	// cond:
  3272  	// result: (DIVW  x y)
  3273  	for {
  3274  		x := v.Args[0]
  3275  		y := v.Args[1]
  3276  		v.reset(OpAMD64DIVW)
  3277  		v.AddArg(x)
  3278  		v.AddArg(y)
  3279  		return true
  3280  	}
  3281  }
  3282  func rewriteValueAMD64_OpDiv16u(v *Value, config *Config) bool {
  3283  	b := v.Block
  3284  	_ = b
  3285  	// match: (Div16u x y)
  3286  	// cond:
  3287  	// result: (DIVWU x y)
  3288  	for {
  3289  		x := v.Args[0]
  3290  		y := v.Args[1]
  3291  		v.reset(OpAMD64DIVWU)
  3292  		v.AddArg(x)
  3293  		v.AddArg(y)
  3294  		return true
  3295  	}
  3296  }
  3297  func rewriteValueAMD64_OpDiv32(v *Value, config *Config) bool {
  3298  	b := v.Block
  3299  	_ = b
  3300  	// match: (Div32  x y)
  3301  	// cond:
  3302  	// result: (DIVL  x y)
  3303  	for {
  3304  		x := v.Args[0]
  3305  		y := v.Args[1]
  3306  		v.reset(OpAMD64DIVL)
  3307  		v.AddArg(x)
  3308  		v.AddArg(y)
  3309  		return true
  3310  	}
  3311  }
  3312  func rewriteValueAMD64_OpDiv32F(v *Value, config *Config) bool {
  3313  	b := v.Block
  3314  	_ = b
  3315  	// match: (Div32F x y)
  3316  	// cond:
  3317  	// result: (DIVSS x y)
  3318  	for {
  3319  		x := v.Args[0]
  3320  		y := v.Args[1]
  3321  		v.reset(OpAMD64DIVSS)
  3322  		v.AddArg(x)
  3323  		v.AddArg(y)
  3324  		return true
  3325  	}
  3326  }
  3327  func rewriteValueAMD64_OpDiv32u(v *Value, config *Config) bool {
  3328  	b := v.Block
  3329  	_ = b
  3330  	// match: (Div32u x y)
  3331  	// cond:
  3332  	// result: (DIVLU x y)
  3333  	for {
  3334  		x := v.Args[0]
  3335  		y := v.Args[1]
  3336  		v.reset(OpAMD64DIVLU)
  3337  		v.AddArg(x)
  3338  		v.AddArg(y)
  3339  		return true
  3340  	}
  3341  }
  3342  func rewriteValueAMD64_OpDiv64(v *Value, config *Config) bool {
  3343  	b := v.Block
  3344  	_ = b
  3345  	// match: (Div64  x y)
  3346  	// cond:
  3347  	// result: (DIVQ  x y)
  3348  	for {
  3349  		x := v.Args[0]
  3350  		y := v.Args[1]
  3351  		v.reset(OpAMD64DIVQ)
  3352  		v.AddArg(x)
  3353  		v.AddArg(y)
  3354  		return true
  3355  	}
  3356  }
  3357  func rewriteValueAMD64_OpDiv64F(v *Value, config *Config) bool {
  3358  	b := v.Block
  3359  	_ = b
  3360  	// match: (Div64F x y)
  3361  	// cond:
  3362  	// result: (DIVSD x y)
  3363  	for {
  3364  		x := v.Args[0]
  3365  		y := v.Args[1]
  3366  		v.reset(OpAMD64DIVSD)
  3367  		v.AddArg(x)
  3368  		v.AddArg(y)
  3369  		return true
  3370  	}
  3371  }
  3372  func rewriteValueAMD64_OpDiv64u(v *Value, config *Config) bool {
  3373  	b := v.Block
  3374  	_ = b
  3375  	// match: (Div64u x y)
  3376  	// cond:
  3377  	// result: (DIVQU x y)
  3378  	for {
  3379  		x := v.Args[0]
  3380  		y := v.Args[1]
  3381  		v.reset(OpAMD64DIVQU)
  3382  		v.AddArg(x)
  3383  		v.AddArg(y)
  3384  		return true
  3385  	}
  3386  }
  3387  func rewriteValueAMD64_OpDiv8(v *Value, config *Config) bool {
  3388  	b := v.Block
  3389  	_ = b
  3390  	// match: (Div8   x y)
  3391  	// cond:
  3392  	// result: (DIVW  (SignExt8to16 x) (SignExt8to16 y))
  3393  	for {
  3394  		x := v.Args[0]
  3395  		y := v.Args[1]
  3396  		v.reset(OpAMD64DIVW)
  3397  		v0 := b.NewValue0(v.Line, OpSignExt8to16, config.fe.TypeInt16())
  3398  		v0.AddArg(x)
  3399  		v.AddArg(v0)
  3400  		v1 := b.NewValue0(v.Line, OpSignExt8to16, config.fe.TypeInt16())
  3401  		v1.AddArg(y)
  3402  		v.AddArg(v1)
  3403  		return true
  3404  	}
  3405  }
  3406  func rewriteValueAMD64_OpDiv8u(v *Value, config *Config) bool {
  3407  	b := v.Block
  3408  	_ = b
  3409  	// match: (Div8u  x y)
  3410  	// cond:
  3411  	// result: (DIVWU (ZeroExt8to16 x) (ZeroExt8to16 y))
  3412  	for {
  3413  		x := v.Args[0]
  3414  		y := v.Args[1]
  3415  		v.reset(OpAMD64DIVWU)
  3416  		v0 := b.NewValue0(v.Line, OpZeroExt8to16, config.fe.TypeUInt16())
  3417  		v0.AddArg(x)
  3418  		v.AddArg(v0)
  3419  		v1 := b.NewValue0(v.Line, OpZeroExt8to16, config.fe.TypeUInt16())
  3420  		v1.AddArg(y)
  3421  		v.AddArg(v1)
  3422  		return true
  3423  	}
  3424  }
  3425  func rewriteValueAMD64_OpEq16(v *Value, config *Config) bool {
  3426  	b := v.Block
  3427  	_ = b
  3428  	// match: (Eq16  x y)
  3429  	// cond:
  3430  	// result: (SETEQ (CMPW x y))
  3431  	for {
  3432  		x := v.Args[0]
  3433  		y := v.Args[1]
  3434  		v.reset(OpAMD64SETEQ)
  3435  		v0 := b.NewValue0(v.Line, OpAMD64CMPW, TypeFlags)
  3436  		v0.AddArg(x)
  3437  		v0.AddArg(y)
  3438  		v.AddArg(v0)
  3439  		return true
  3440  	}
  3441  }
  3442  func rewriteValueAMD64_OpEq32(v *Value, config *Config) bool {
  3443  	b := v.Block
  3444  	_ = b
  3445  	// match: (Eq32  x y)
  3446  	// cond:
  3447  	// result: (SETEQ (CMPL x y))
  3448  	for {
  3449  		x := v.Args[0]
  3450  		y := v.Args[1]
  3451  		v.reset(OpAMD64SETEQ)
  3452  		v0 := b.NewValue0(v.Line, OpAMD64CMPL, TypeFlags)
  3453  		v0.AddArg(x)
  3454  		v0.AddArg(y)
  3455  		v.AddArg(v0)
  3456  		return true
  3457  	}
  3458  }
  3459  func rewriteValueAMD64_OpEq32F(v *Value, config *Config) bool {
  3460  	b := v.Block
  3461  	_ = b
  3462  	// match: (Eq32F x y)
  3463  	// cond:
  3464  	// result: (SETEQF (UCOMISS x y))
  3465  	for {
  3466  		x := v.Args[0]
  3467  		y := v.Args[1]
  3468  		v.reset(OpAMD64SETEQF)
  3469  		v0 := b.NewValue0(v.Line, OpAMD64UCOMISS, TypeFlags)
  3470  		v0.AddArg(x)
  3471  		v0.AddArg(y)
  3472  		v.AddArg(v0)
  3473  		return true
  3474  	}
  3475  }
  3476  func rewriteValueAMD64_OpEq64(v *Value, config *Config) bool {
  3477  	b := v.Block
  3478  	_ = b
  3479  	// match: (Eq64  x y)
  3480  	// cond:
  3481  	// result: (SETEQ (CMPQ x y))
  3482  	for {
  3483  		x := v.Args[0]
  3484  		y := v.Args[1]
  3485  		v.reset(OpAMD64SETEQ)
  3486  		v0 := b.NewValue0(v.Line, OpAMD64CMPQ, TypeFlags)
  3487  		v0.AddArg(x)
  3488  		v0.AddArg(y)
  3489  		v.AddArg(v0)
  3490  		return true
  3491  	}
  3492  }
  3493  func rewriteValueAMD64_OpEq64F(v *Value, config *Config) bool {
  3494  	b := v.Block
  3495  	_ = b
  3496  	// match: (Eq64F x y)
  3497  	// cond:
  3498  	// result: (SETEQF (UCOMISD x y))
  3499  	for {
  3500  		x := v.Args[0]
  3501  		y := v.Args[1]
  3502  		v.reset(OpAMD64SETEQF)
  3503  		v0 := b.NewValue0(v.Line, OpAMD64UCOMISD, TypeFlags)
  3504  		v0.AddArg(x)
  3505  		v0.AddArg(y)
  3506  		v.AddArg(v0)
  3507  		return true
  3508  	}
  3509  }
  3510  func rewriteValueAMD64_OpEq8(v *Value, config *Config) bool {
  3511  	b := v.Block
  3512  	_ = b
  3513  	// match: (Eq8   x y)
  3514  	// cond:
  3515  	// result: (SETEQ (CMPB x y))
  3516  	for {
  3517  		x := v.Args[0]
  3518  		y := v.Args[1]
  3519  		v.reset(OpAMD64SETEQ)
  3520  		v0 := b.NewValue0(v.Line, OpAMD64CMPB, TypeFlags)
  3521  		v0.AddArg(x)
  3522  		v0.AddArg(y)
  3523  		v.AddArg(v0)
  3524  		return true
  3525  	}
  3526  }
  3527  func rewriteValueAMD64_OpEqB(v *Value, config *Config) bool {
  3528  	b := v.Block
  3529  	_ = b
  3530  	// match: (EqB   x y)
  3531  	// cond:
  3532  	// result: (SETEQ (CMPB x y))
  3533  	for {
  3534  		x := v.Args[0]
  3535  		y := v.Args[1]
  3536  		v.reset(OpAMD64SETEQ)
  3537  		v0 := b.NewValue0(v.Line, OpAMD64CMPB, TypeFlags)
  3538  		v0.AddArg(x)
  3539  		v0.AddArg(y)
  3540  		v.AddArg(v0)
  3541  		return true
  3542  	}
  3543  }
  3544  func rewriteValueAMD64_OpEqPtr(v *Value, config *Config) bool {
  3545  	b := v.Block
  3546  	_ = b
  3547  	// match: (EqPtr x y)
  3548  	// cond:
  3549  	// result: (SETEQ (CMPQ x y))
  3550  	for {
  3551  		x := v.Args[0]
  3552  		y := v.Args[1]
  3553  		v.reset(OpAMD64SETEQ)
  3554  		v0 := b.NewValue0(v.Line, OpAMD64CMPQ, TypeFlags)
  3555  		v0.AddArg(x)
  3556  		v0.AddArg(y)
  3557  		v.AddArg(v0)
  3558  		return true
  3559  	}
  3560  }
  3561  func rewriteValueAMD64_OpGeq16(v *Value, config *Config) bool {
  3562  	b := v.Block
  3563  	_ = b
  3564  	// match: (Geq16  x y)
  3565  	// cond:
  3566  	// result: (SETGE (CMPW x y))
  3567  	for {
  3568  		x := v.Args[0]
  3569  		y := v.Args[1]
  3570  		v.reset(OpAMD64SETGE)
  3571  		v0 := b.NewValue0(v.Line, OpAMD64CMPW, TypeFlags)
  3572  		v0.AddArg(x)
  3573  		v0.AddArg(y)
  3574  		v.AddArg(v0)
  3575  		return true
  3576  	}
  3577  }
  3578  func rewriteValueAMD64_OpGeq16U(v *Value, config *Config) bool {
  3579  	b := v.Block
  3580  	_ = b
  3581  	// match: (Geq16U x y)
  3582  	// cond:
  3583  	// result: (SETAE (CMPW x y))
  3584  	for {
  3585  		x := v.Args[0]
  3586  		y := v.Args[1]
  3587  		v.reset(OpAMD64SETAE)
  3588  		v0 := b.NewValue0(v.Line, OpAMD64CMPW, TypeFlags)
  3589  		v0.AddArg(x)
  3590  		v0.AddArg(y)
  3591  		v.AddArg(v0)
  3592  		return true
  3593  	}
  3594  }
  3595  func rewriteValueAMD64_OpGeq32(v *Value, config *Config) bool {
  3596  	b := v.Block
  3597  	_ = b
  3598  	// match: (Geq32  x y)
  3599  	// cond:
  3600  	// result: (SETGE (CMPL x y))
  3601  	for {
  3602  		x := v.Args[0]
  3603  		y := v.Args[1]
  3604  		v.reset(OpAMD64SETGE)
  3605  		v0 := b.NewValue0(v.Line, OpAMD64CMPL, TypeFlags)
  3606  		v0.AddArg(x)
  3607  		v0.AddArg(y)
  3608  		v.AddArg(v0)
  3609  		return true
  3610  	}
  3611  }
  3612  func rewriteValueAMD64_OpGeq32F(v *Value, config *Config) bool {
  3613  	b := v.Block
  3614  	_ = b
  3615  	// match: (Geq32F x y)
  3616  	// cond:
  3617  	// result: (SETGEF (UCOMISS x y))
  3618  	for {
  3619  		x := v.Args[0]
  3620  		y := v.Args[1]
  3621  		v.reset(OpAMD64SETGEF)
  3622  		v0 := b.NewValue0(v.Line, OpAMD64UCOMISS, TypeFlags)
  3623  		v0.AddArg(x)
  3624  		v0.AddArg(y)
  3625  		v.AddArg(v0)
  3626  		return true
  3627  	}
  3628  }
  3629  func rewriteValueAMD64_OpGeq32U(v *Value, config *Config) bool {
  3630  	b := v.Block
  3631  	_ = b
  3632  	// match: (Geq32U x y)
  3633  	// cond:
  3634  	// result: (SETAE (CMPL x y))
  3635  	for {
  3636  		x := v.Args[0]
  3637  		y := v.Args[1]
  3638  		v.reset(OpAMD64SETAE)
  3639  		v0 := b.NewValue0(v.Line, OpAMD64CMPL, TypeFlags)
  3640  		v0.AddArg(x)
  3641  		v0.AddArg(y)
  3642  		v.AddArg(v0)
  3643  		return true
  3644  	}
  3645  }
  3646  func rewriteValueAMD64_OpGeq64(v *Value, config *Config) bool {
  3647  	b := v.Block
  3648  	_ = b
  3649  	// match: (Geq64  x y)
  3650  	// cond:
  3651  	// result: (SETGE (CMPQ x y))
  3652  	for {
  3653  		x := v.Args[0]
  3654  		y := v.Args[1]
  3655  		v.reset(OpAMD64SETGE)
  3656  		v0 := b.NewValue0(v.Line, OpAMD64CMPQ, TypeFlags)
  3657  		v0.AddArg(x)
  3658  		v0.AddArg(y)
  3659  		v.AddArg(v0)
  3660  		return true
  3661  	}
  3662  }
  3663  func rewriteValueAMD64_OpGeq64F(v *Value, config *Config) bool {
  3664  	b := v.Block
  3665  	_ = b
  3666  	// match: (Geq64F x y)
  3667  	// cond:
  3668  	// result: (SETGEF (UCOMISD x y))
  3669  	for {
  3670  		x := v.Args[0]
  3671  		y := v.Args[1]
  3672  		v.reset(OpAMD64SETGEF)
  3673  		v0 := b.NewValue0(v.Line, OpAMD64UCOMISD, TypeFlags)
  3674  		v0.AddArg(x)
  3675  		v0.AddArg(y)
  3676  		v.AddArg(v0)
  3677  		return true
  3678  	}
  3679  }
  3680  func rewriteValueAMD64_OpGeq64U(v *Value, config *Config) bool {
  3681  	b := v.Block
  3682  	_ = b
  3683  	// match: (Geq64U x y)
  3684  	// cond:
  3685  	// result: (SETAE (CMPQ x y))
  3686  	for {
  3687  		x := v.Args[0]
  3688  		y := v.Args[1]
  3689  		v.reset(OpAMD64SETAE)
  3690  		v0 := b.NewValue0(v.Line, OpAMD64CMPQ, TypeFlags)
  3691  		v0.AddArg(x)
  3692  		v0.AddArg(y)
  3693  		v.AddArg(v0)
  3694  		return true
  3695  	}
  3696  }
  3697  func rewriteValueAMD64_OpGeq8(v *Value, config *Config) bool {
  3698  	b := v.Block
  3699  	_ = b
  3700  	// match: (Geq8   x y)
  3701  	// cond:
  3702  	// result: (SETGE (CMPB x y))
  3703  	for {
  3704  		x := v.Args[0]
  3705  		y := v.Args[1]
  3706  		v.reset(OpAMD64SETGE)
  3707  		v0 := b.NewValue0(v.Line, OpAMD64CMPB, TypeFlags)
  3708  		v0.AddArg(x)
  3709  		v0.AddArg(y)
  3710  		v.AddArg(v0)
  3711  		return true
  3712  	}
  3713  }
  3714  func rewriteValueAMD64_OpGeq8U(v *Value, config *Config) bool {
  3715  	b := v.Block
  3716  	_ = b
  3717  	// match: (Geq8U  x y)
  3718  	// cond:
  3719  	// result: (SETAE (CMPB x y))
  3720  	for {
  3721  		x := v.Args[0]
  3722  		y := v.Args[1]
  3723  		v.reset(OpAMD64SETAE)
  3724  		v0 := b.NewValue0(v.Line, OpAMD64CMPB, TypeFlags)
  3725  		v0.AddArg(x)
  3726  		v0.AddArg(y)
  3727  		v.AddArg(v0)
  3728  		return true
  3729  	}
  3730  }
  3731  func rewriteValueAMD64_OpGetClosurePtr(v *Value, config *Config) bool {
  3732  	b := v.Block
  3733  	_ = b
  3734  	// match: (GetClosurePtr)
  3735  	// cond:
  3736  	// result: (LoweredGetClosurePtr)
  3737  	for {
  3738  		v.reset(OpAMD64LoweredGetClosurePtr)
  3739  		return true
  3740  	}
  3741  }
  3742  func rewriteValueAMD64_OpGetG(v *Value, config *Config) bool {
  3743  	b := v.Block
  3744  	_ = b
  3745  	// match: (GetG mem)
  3746  	// cond:
  3747  	// result: (LoweredGetG mem)
  3748  	for {
  3749  		mem := v.Args[0]
  3750  		v.reset(OpAMD64LoweredGetG)
  3751  		v.AddArg(mem)
  3752  		return true
  3753  	}
  3754  }
  3755  func rewriteValueAMD64_OpGoCall(v *Value, config *Config) bool {
  3756  	b := v.Block
  3757  	_ = b
  3758  	// match: (GoCall [argwid] mem)
  3759  	// cond:
  3760  	// result: (CALLgo [argwid] mem)
  3761  	for {
  3762  		argwid := v.AuxInt
  3763  		mem := v.Args[0]
  3764  		v.reset(OpAMD64CALLgo)
  3765  		v.AuxInt = argwid
  3766  		v.AddArg(mem)
  3767  		return true
  3768  	}
  3769  }
  3770  func rewriteValueAMD64_OpGreater16(v *Value, config *Config) bool {
  3771  	b := v.Block
  3772  	_ = b
  3773  	// match: (Greater16  x y)
  3774  	// cond:
  3775  	// result: (SETG (CMPW x y))
  3776  	for {
  3777  		x := v.Args[0]
  3778  		y := v.Args[1]
  3779  		v.reset(OpAMD64SETG)
  3780  		v0 := b.NewValue0(v.Line, OpAMD64CMPW, TypeFlags)
  3781  		v0.AddArg(x)
  3782  		v0.AddArg(y)
  3783  		v.AddArg(v0)
  3784  		return true
  3785  	}
  3786  }
  3787  func rewriteValueAMD64_OpGreater16U(v *Value, config *Config) bool {
  3788  	b := v.Block
  3789  	_ = b
  3790  	// match: (Greater16U x y)
  3791  	// cond:
  3792  	// result: (SETA (CMPW x y))
  3793  	for {
  3794  		x := v.Args[0]
  3795  		y := v.Args[1]
  3796  		v.reset(OpAMD64SETA)
  3797  		v0 := b.NewValue0(v.Line, OpAMD64CMPW, TypeFlags)
  3798  		v0.AddArg(x)
  3799  		v0.AddArg(y)
  3800  		v.AddArg(v0)
  3801  		return true
  3802  	}
  3803  }
  3804  func rewriteValueAMD64_OpGreater32(v *Value, config *Config) bool {
  3805  	b := v.Block
  3806  	_ = b
  3807  	// match: (Greater32  x y)
  3808  	// cond:
  3809  	// result: (SETG (CMPL x y))
  3810  	for {
  3811  		x := v.Args[0]
  3812  		y := v.Args[1]
  3813  		v.reset(OpAMD64SETG)
  3814  		v0 := b.NewValue0(v.Line, OpAMD64CMPL, TypeFlags)
  3815  		v0.AddArg(x)
  3816  		v0.AddArg(y)
  3817  		v.AddArg(v0)
  3818  		return true
  3819  	}
  3820  }
  3821  func rewriteValueAMD64_OpGreater32F(v *Value, config *Config) bool {
  3822  	b := v.Block
  3823  	_ = b
  3824  	// match: (Greater32F x y)
  3825  	// cond:
  3826  	// result: (SETGF (UCOMISS x y))
  3827  	for {
  3828  		x := v.Args[0]
  3829  		y := v.Args[1]
  3830  		v.reset(OpAMD64SETGF)
  3831  		v0 := b.NewValue0(v.Line, OpAMD64UCOMISS, TypeFlags)
  3832  		v0.AddArg(x)
  3833  		v0.AddArg(y)
  3834  		v.AddArg(v0)
  3835  		return true
  3836  	}
  3837  }
  3838  func rewriteValueAMD64_OpGreater32U(v *Value, config *Config) bool {
  3839  	b := v.Block
  3840  	_ = b
  3841  	// match: (Greater32U x y)
  3842  	// cond:
  3843  	// result: (SETA (CMPL x y))
  3844  	for {
  3845  		x := v.Args[0]
  3846  		y := v.Args[1]
  3847  		v.reset(OpAMD64SETA)
  3848  		v0 := b.NewValue0(v.Line, OpAMD64CMPL, TypeFlags)
  3849  		v0.AddArg(x)
  3850  		v0.AddArg(y)
  3851  		v.AddArg(v0)
  3852  		return true
  3853  	}
  3854  }
  3855  func rewriteValueAMD64_OpGreater64(v *Value, config *Config) bool {
  3856  	b := v.Block
  3857  	_ = b
  3858  	// match: (Greater64  x y)
  3859  	// cond:
  3860  	// result: (SETG (CMPQ x y))
  3861  	for {
  3862  		x := v.Args[0]
  3863  		y := v.Args[1]
  3864  		v.reset(OpAMD64SETG)
  3865  		v0 := b.NewValue0(v.Line, OpAMD64CMPQ, TypeFlags)
  3866  		v0.AddArg(x)
  3867  		v0.AddArg(y)
  3868  		v.AddArg(v0)
  3869  		return true
  3870  	}
  3871  }
  3872  func rewriteValueAMD64_OpGreater64F(v *Value, config *Config) bool {
  3873  	b := v.Block
  3874  	_ = b
  3875  	// match: (Greater64F x y)
  3876  	// cond:
  3877  	// result: (SETGF (UCOMISD x y))
  3878  	for {
  3879  		x := v.Args[0]
  3880  		y := v.Args[1]
  3881  		v.reset(OpAMD64SETGF)
  3882  		v0 := b.NewValue0(v.Line, OpAMD64UCOMISD, TypeFlags)
  3883  		v0.AddArg(x)
  3884  		v0.AddArg(y)
  3885  		v.AddArg(v0)
  3886  		return true
  3887  	}
  3888  }
  3889  func rewriteValueAMD64_OpGreater64U(v *Value, config *Config) bool {
  3890  	b := v.Block
  3891  	_ = b
  3892  	// match: (Greater64U x y)
  3893  	// cond:
  3894  	// result: (SETA (CMPQ x y))
  3895  	for {
  3896  		x := v.Args[0]
  3897  		y := v.Args[1]
  3898  		v.reset(OpAMD64SETA)
  3899  		v0 := b.NewValue0(v.Line, OpAMD64CMPQ, TypeFlags)
  3900  		v0.AddArg(x)
  3901  		v0.AddArg(y)
  3902  		v.AddArg(v0)
  3903  		return true
  3904  	}
  3905  }
  3906  func rewriteValueAMD64_OpGreater8(v *Value, config *Config) bool {
  3907  	b := v.Block
  3908  	_ = b
  3909  	// match: (Greater8   x y)
  3910  	// cond:
  3911  	// result: (SETG (CMPB x y))
  3912  	for {
  3913  		x := v.Args[0]
  3914  		y := v.Args[1]
  3915  		v.reset(OpAMD64SETG)
  3916  		v0 := b.NewValue0(v.Line, OpAMD64CMPB, TypeFlags)
  3917  		v0.AddArg(x)
  3918  		v0.AddArg(y)
  3919  		v.AddArg(v0)
  3920  		return true
  3921  	}
  3922  }
  3923  func rewriteValueAMD64_OpGreater8U(v *Value, config *Config) bool {
  3924  	b := v.Block
  3925  	_ = b
  3926  	// match: (Greater8U  x y)
  3927  	// cond:
  3928  	// result: (SETA (CMPB x y))
  3929  	for {
  3930  		x := v.Args[0]
  3931  		y := v.Args[1]
  3932  		v.reset(OpAMD64SETA)
  3933  		v0 := b.NewValue0(v.Line, OpAMD64CMPB, TypeFlags)
  3934  		v0.AddArg(x)
  3935  		v0.AddArg(y)
  3936  		v.AddArg(v0)
  3937  		return true
  3938  	}
  3939  }
  3940  func rewriteValueAMD64_OpHmul16(v *Value, config *Config) bool {
  3941  	b := v.Block
  3942  	_ = b
  3943  	// match: (Hmul16  x y)
  3944  	// cond:
  3945  	// result: (HMULW  x y)
  3946  	for {
  3947  		x := v.Args[0]
  3948  		y := v.Args[1]
  3949  		v.reset(OpAMD64HMULW)
  3950  		v.AddArg(x)
  3951  		v.AddArg(y)
  3952  		return true
  3953  	}
  3954  }
  3955  func rewriteValueAMD64_OpHmul16u(v *Value, config *Config) bool {
  3956  	b := v.Block
  3957  	_ = b
  3958  	// match: (Hmul16u x y)
  3959  	// cond:
  3960  	// result: (HMULWU x y)
  3961  	for {
  3962  		x := v.Args[0]
  3963  		y := v.Args[1]
  3964  		v.reset(OpAMD64HMULWU)
  3965  		v.AddArg(x)
  3966  		v.AddArg(y)
  3967  		return true
  3968  	}
  3969  }
  3970  func rewriteValueAMD64_OpHmul32(v *Value, config *Config) bool {
  3971  	b := v.Block
  3972  	_ = b
  3973  	// match: (Hmul32  x y)
  3974  	// cond:
  3975  	// result: (HMULL  x y)
  3976  	for {
  3977  		x := v.Args[0]
  3978  		y := v.Args[1]
  3979  		v.reset(OpAMD64HMULL)
  3980  		v.AddArg(x)
  3981  		v.AddArg(y)
  3982  		return true
  3983  	}
  3984  }
  3985  func rewriteValueAMD64_OpHmul32u(v *Value, config *Config) bool {
  3986  	b := v.Block
  3987  	_ = b
  3988  	// match: (Hmul32u x y)
  3989  	// cond:
  3990  	// result: (HMULLU x y)
  3991  	for {
  3992  		x := v.Args[0]
  3993  		y := v.Args[1]
  3994  		v.reset(OpAMD64HMULLU)
  3995  		v.AddArg(x)
  3996  		v.AddArg(y)
  3997  		return true
  3998  	}
  3999  }
  4000  func rewriteValueAMD64_OpHmul64(v *Value, config *Config) bool {
  4001  	b := v.Block
  4002  	_ = b
  4003  	// match: (Hmul64  x y)
  4004  	// cond:
  4005  	// result: (HMULQ  x y)
  4006  	for {
  4007  		x := v.Args[0]
  4008  		y := v.Args[1]
  4009  		v.reset(OpAMD64HMULQ)
  4010  		v.AddArg(x)
  4011  		v.AddArg(y)
  4012  		return true
  4013  	}
  4014  }
  4015  func rewriteValueAMD64_OpHmul64u(v *Value, config *Config) bool {
  4016  	b := v.Block
  4017  	_ = b
  4018  	// match: (Hmul64u x y)
  4019  	// cond:
  4020  	// result: (HMULQU x y)
  4021  	for {
  4022  		x := v.Args[0]
  4023  		y := v.Args[1]
  4024  		v.reset(OpAMD64HMULQU)
  4025  		v.AddArg(x)
  4026  		v.AddArg(y)
  4027  		return true
  4028  	}
  4029  }
  4030  func rewriteValueAMD64_OpHmul8(v *Value, config *Config) bool {
  4031  	b := v.Block
  4032  	_ = b
  4033  	// match: (Hmul8   x y)
  4034  	// cond:
  4035  	// result: (HMULB  x y)
  4036  	for {
  4037  		x := v.Args[0]
  4038  		y := v.Args[1]
  4039  		v.reset(OpAMD64HMULB)
  4040  		v.AddArg(x)
  4041  		v.AddArg(y)
  4042  		return true
  4043  	}
  4044  }
  4045  func rewriteValueAMD64_OpHmul8u(v *Value, config *Config) bool {
  4046  	b := v.Block
  4047  	_ = b
  4048  	// match: (Hmul8u  x y)
  4049  	// cond:
  4050  	// result: (HMULBU x y)
  4051  	for {
  4052  		x := v.Args[0]
  4053  		y := v.Args[1]
  4054  		v.reset(OpAMD64HMULBU)
  4055  		v.AddArg(x)
  4056  		v.AddArg(y)
  4057  		return true
  4058  	}
  4059  }
  4060  func rewriteValueAMD64_OpITab(v *Value, config *Config) bool {
  4061  	b := v.Block
  4062  	_ = b
  4063  	// match: (ITab (Load ptr mem))
  4064  	// cond:
  4065  	// result: (MOVQload ptr mem)
  4066  	for {
  4067  		v_0 := v.Args[0]
  4068  		if v_0.Op != OpLoad {
  4069  			break
  4070  		}
  4071  		ptr := v_0.Args[0]
  4072  		mem := v_0.Args[1]
  4073  		v.reset(OpAMD64MOVQload)
  4074  		v.AddArg(ptr)
  4075  		v.AddArg(mem)
  4076  		return true
  4077  	}
  4078  	return false
  4079  }
  4080  func rewriteValueAMD64_OpInterCall(v *Value, config *Config) bool {
  4081  	b := v.Block
  4082  	_ = b
  4083  	// match: (InterCall [argwid] entry mem)
  4084  	// cond:
  4085  	// result: (CALLinter [argwid] entry mem)
  4086  	for {
  4087  		argwid := v.AuxInt
  4088  		entry := v.Args[0]
  4089  		mem := v.Args[1]
  4090  		v.reset(OpAMD64CALLinter)
  4091  		v.AuxInt = argwid
  4092  		v.AddArg(entry)
  4093  		v.AddArg(mem)
  4094  		return true
  4095  	}
  4096  }
  4097  func rewriteValueAMD64_OpIsInBounds(v *Value, config *Config) bool {
  4098  	b := v.Block
  4099  	_ = b
  4100  	// match: (IsInBounds idx len)
  4101  	// cond:
  4102  	// result: (SETB (CMPQ idx len))
  4103  	for {
  4104  		idx := v.Args[0]
  4105  		len := v.Args[1]
  4106  		v.reset(OpAMD64SETB)
  4107  		v0 := b.NewValue0(v.Line, OpAMD64CMPQ, TypeFlags)
  4108  		v0.AddArg(idx)
  4109  		v0.AddArg(len)
  4110  		v.AddArg(v0)
  4111  		return true
  4112  	}
  4113  }
  4114  func rewriteValueAMD64_OpIsNonNil(v *Value, config *Config) bool {
  4115  	b := v.Block
  4116  	_ = b
  4117  	// match: (IsNonNil p)
  4118  	// cond:
  4119  	// result: (SETNE (TESTQ p p))
  4120  	for {
  4121  		p := v.Args[0]
  4122  		v.reset(OpAMD64SETNE)
  4123  		v0 := b.NewValue0(v.Line, OpAMD64TESTQ, TypeFlags)
  4124  		v0.AddArg(p)
  4125  		v0.AddArg(p)
  4126  		v.AddArg(v0)
  4127  		return true
  4128  	}
  4129  }
  4130  func rewriteValueAMD64_OpIsSliceInBounds(v *Value, config *Config) bool {
  4131  	b := v.Block
  4132  	_ = b
  4133  	// match: (IsSliceInBounds idx len)
  4134  	// cond:
  4135  	// result: (SETBE (CMPQ idx len))
  4136  	for {
  4137  		idx := v.Args[0]
  4138  		len := v.Args[1]
  4139  		v.reset(OpAMD64SETBE)
  4140  		v0 := b.NewValue0(v.Line, OpAMD64CMPQ, TypeFlags)
  4141  		v0.AddArg(idx)
  4142  		v0.AddArg(len)
  4143  		v.AddArg(v0)
  4144  		return true
  4145  	}
  4146  }
  4147  func rewriteValueAMD64_OpAMD64LEAQ(v *Value, config *Config) bool {
  4148  	b := v.Block
  4149  	_ = b
  4150  	// match: (LEAQ [c] {s} (ADDQconst [d] x))
  4151  	// cond: is32Bit(c+d)
  4152  	// result: (LEAQ [c+d] {s} x)
  4153  	for {
  4154  		c := v.AuxInt
  4155  		s := v.Aux
  4156  		v_0 := v.Args[0]
  4157  		if v_0.Op != OpAMD64ADDQconst {
  4158  			break
  4159  		}
  4160  		d := v_0.AuxInt
  4161  		x := v_0.Args[0]
  4162  		if !(is32Bit(c + d)) {
  4163  			break
  4164  		}
  4165  		v.reset(OpAMD64LEAQ)
  4166  		v.AuxInt = c + d
  4167  		v.Aux = s
  4168  		v.AddArg(x)
  4169  		return true
  4170  	}
  4171  	// match: (LEAQ [c] {s} (ADDQ x y))
  4172  	// cond: x.Op != OpSB && y.Op != OpSB
  4173  	// result: (LEAQ1 [c] {s} x y)
  4174  	for {
  4175  		c := v.AuxInt
  4176  		s := v.Aux
  4177  		v_0 := v.Args[0]
  4178  		if v_0.Op != OpAMD64ADDQ {
  4179  			break
  4180  		}
  4181  		x := v_0.Args[0]
  4182  		y := v_0.Args[1]
  4183  		if !(x.Op != OpSB && y.Op != OpSB) {
  4184  			break
  4185  		}
  4186  		v.reset(OpAMD64LEAQ1)
  4187  		v.AuxInt = c
  4188  		v.Aux = s
  4189  		v.AddArg(x)
  4190  		v.AddArg(y)
  4191  		return true
  4192  	}
  4193  	// match: (LEAQ [off1] {sym1} (LEAQ [off2] {sym2} x))
  4194  	// cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
  4195  	// result: (LEAQ [off1+off2] {mergeSym(sym1,sym2)} x)
  4196  	for {
  4197  		off1 := v.AuxInt
  4198  		sym1 := v.Aux
  4199  		v_0 := v.Args[0]
  4200  		if v_0.Op != OpAMD64LEAQ {
  4201  			break
  4202  		}
  4203  		off2 := v_0.AuxInt
  4204  		sym2 := v_0.Aux
  4205  		x := v_0.Args[0]
  4206  		if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
  4207  			break
  4208  		}
  4209  		v.reset(OpAMD64LEAQ)
  4210  		v.AuxInt = off1 + off2
  4211  		v.Aux = mergeSym(sym1, sym2)
  4212  		v.AddArg(x)
  4213  		return true
  4214  	}
  4215  	// match: (LEAQ [off1] {sym1} (LEAQ1 [off2] {sym2} x y))
  4216  	// cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
  4217  	// result: (LEAQ1 [off1+off2] {mergeSym(sym1,sym2)} x y)
  4218  	for {
  4219  		off1 := v.AuxInt
  4220  		sym1 := v.Aux
  4221  		v_0 := v.Args[0]
  4222  		if v_0.Op != OpAMD64LEAQ1 {
  4223  			break
  4224  		}
  4225  		off2 := v_0.AuxInt
  4226  		sym2 := v_0.Aux
  4227  		x := v_0.Args[0]
  4228  		y := v_0.Args[1]
  4229  		if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
  4230  			break
  4231  		}
  4232  		v.reset(OpAMD64LEAQ1)
  4233  		v.AuxInt = off1 + off2
  4234  		v.Aux = mergeSym(sym1, sym2)
  4235  		v.AddArg(x)
  4236  		v.AddArg(y)
  4237  		return true
  4238  	}
  4239  	// match: (LEAQ [off1] {sym1} (LEAQ2 [off2] {sym2} x y))
  4240  	// cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
  4241  	// result: (LEAQ2 [off1+off2] {mergeSym(sym1,sym2)} x y)
  4242  	for {
  4243  		off1 := v.AuxInt
  4244  		sym1 := v.Aux
  4245  		v_0 := v.Args[0]
  4246  		if v_0.Op != OpAMD64LEAQ2 {
  4247  			break
  4248  		}
  4249  		off2 := v_0.AuxInt
  4250  		sym2 := v_0.Aux
  4251  		x := v_0.Args[0]
  4252  		y := v_0.Args[1]
  4253  		if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
  4254  			break
  4255  		}
  4256  		v.reset(OpAMD64LEAQ2)
  4257  		v.AuxInt = off1 + off2
  4258  		v.Aux = mergeSym(sym1, sym2)
  4259  		v.AddArg(x)
  4260  		v.AddArg(y)
  4261  		return true
  4262  	}
  4263  	// match: (LEAQ [off1] {sym1} (LEAQ4 [off2] {sym2} x y))
  4264  	// cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
  4265  	// result: (LEAQ4 [off1+off2] {mergeSym(sym1,sym2)} x y)
  4266  	for {
  4267  		off1 := v.AuxInt
  4268  		sym1 := v.Aux
  4269  		v_0 := v.Args[0]
  4270  		if v_0.Op != OpAMD64LEAQ4 {
  4271  			break
  4272  		}
  4273  		off2 := v_0.AuxInt
  4274  		sym2 := v_0.Aux
  4275  		x := v_0.Args[0]
  4276  		y := v_0.Args[1]
  4277  		if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
  4278  			break
  4279  		}
  4280  		v.reset(OpAMD64LEAQ4)
  4281  		v.AuxInt = off1 + off2
  4282  		v.Aux = mergeSym(sym1, sym2)
  4283  		v.AddArg(x)
  4284  		v.AddArg(y)
  4285  		return true
  4286  	}
  4287  	// match: (LEAQ [off1] {sym1} (LEAQ8 [off2] {sym2} x y))
  4288  	// cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
  4289  	// result: (LEAQ8 [off1+off2] {mergeSym(sym1,sym2)} x y)
  4290  	for {
  4291  		off1 := v.AuxInt
  4292  		sym1 := v.Aux
  4293  		v_0 := v.Args[0]
  4294  		if v_0.Op != OpAMD64LEAQ8 {
  4295  			break
  4296  		}
  4297  		off2 := v_0.AuxInt
  4298  		sym2 := v_0.Aux
  4299  		x := v_0.Args[0]
  4300  		y := v_0.Args[1]
  4301  		if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
  4302  			break
  4303  		}
  4304  		v.reset(OpAMD64LEAQ8)
  4305  		v.AuxInt = off1 + off2
  4306  		v.Aux = mergeSym(sym1, sym2)
  4307  		v.AddArg(x)
  4308  		v.AddArg(y)
  4309  		return true
  4310  	}
  4311  	return false
  4312  }
  4313  func rewriteValueAMD64_OpAMD64LEAQ1(v *Value, config *Config) bool {
  4314  	b := v.Block
  4315  	_ = b
  4316  	// match: (LEAQ1 [c] {s} (ADDQconst [d] x) y)
  4317  	// cond: is32Bit(c+d)   && x.Op != OpSB
  4318  	// result: (LEAQ1 [c+d] {s} x y)
  4319  	for {
  4320  		c := v.AuxInt
  4321  		s := v.Aux
  4322  		v_0 := v.Args[0]
  4323  		if v_0.Op != OpAMD64ADDQconst {
  4324  			break
  4325  		}
  4326  		d := v_0.AuxInt
  4327  		x := v_0.Args[0]
  4328  		y := v.Args[1]
  4329  		if !(is32Bit(c+d) && x.Op != OpSB) {
  4330  			break
  4331  		}
  4332  		v.reset(OpAMD64LEAQ1)
  4333  		v.AuxInt = c + d
  4334  		v.Aux = s
  4335  		v.AddArg(x)
  4336  		v.AddArg(y)
  4337  		return true
  4338  	}
  4339  	// match: (LEAQ1 [c] {s} x (ADDQconst [d] y))
  4340  	// cond: is32Bit(c+d)   && y.Op != OpSB
  4341  	// result: (LEAQ1 [c+d] {s} x y)
  4342  	for {
  4343  		c := v.AuxInt
  4344  		s := v.Aux
  4345  		x := v.Args[0]
  4346  		v_1 := v.Args[1]
  4347  		if v_1.Op != OpAMD64ADDQconst {
  4348  			break
  4349  		}
  4350  		d := v_1.AuxInt
  4351  		y := v_1.Args[0]
  4352  		if !(is32Bit(c+d) && y.Op != OpSB) {
  4353  			break
  4354  		}
  4355  		v.reset(OpAMD64LEAQ1)
  4356  		v.AuxInt = c + d
  4357  		v.Aux = s
  4358  		v.AddArg(x)
  4359  		v.AddArg(y)
  4360  		return true
  4361  	}
  4362  	// match: (LEAQ1 [c] {s} x (SHLQconst [1] y))
  4363  	// cond:
  4364  	// result: (LEAQ2 [c] {s} x y)
  4365  	for {
  4366  		c := v.AuxInt
  4367  		s := v.Aux
  4368  		x := v.Args[0]
  4369  		v_1 := v.Args[1]
  4370  		if v_1.Op != OpAMD64SHLQconst {
  4371  			break
  4372  		}
  4373  		if v_1.AuxInt != 1 {
  4374  			break
  4375  		}
  4376  		y := v_1.Args[0]
  4377  		v.reset(OpAMD64LEAQ2)
  4378  		v.AuxInt = c
  4379  		v.Aux = s
  4380  		v.AddArg(x)
  4381  		v.AddArg(y)
  4382  		return true
  4383  	}
  4384  	// match: (LEAQ1 [c] {s} (SHLQconst [1] x) y)
  4385  	// cond:
  4386  	// result: (LEAQ2 [c] {s} y x)
  4387  	for {
  4388  		c := v.AuxInt
  4389  		s := v.Aux
  4390  		v_0 := v.Args[0]
  4391  		if v_0.Op != OpAMD64SHLQconst {
  4392  			break
  4393  		}
  4394  		if v_0.AuxInt != 1 {
  4395  			break
  4396  		}
  4397  		x := v_0.Args[0]
  4398  		y := v.Args[1]
  4399  		v.reset(OpAMD64LEAQ2)
  4400  		v.AuxInt = c
  4401  		v.Aux = s
  4402  		v.AddArg(y)
  4403  		v.AddArg(x)
  4404  		return true
  4405  	}
  4406  	// match: (LEAQ1 [c] {s} x (SHLQconst [2] y))
  4407  	// cond:
  4408  	// result: (LEAQ4 [c] {s} x y)
  4409  	for {
  4410  		c := v.AuxInt
  4411  		s := v.Aux
  4412  		x := v.Args[0]
  4413  		v_1 := v.Args[1]
  4414  		if v_1.Op != OpAMD64SHLQconst {
  4415  			break
  4416  		}
  4417  		if v_1.AuxInt != 2 {
  4418  			break
  4419  		}
  4420  		y := v_1.Args[0]
  4421  		v.reset(OpAMD64LEAQ4)
  4422  		v.AuxInt = c
  4423  		v.Aux = s
  4424  		v.AddArg(x)
  4425  		v.AddArg(y)
  4426  		return true
  4427  	}
  4428  	// match: (LEAQ1 [c] {s} (SHLQconst [2] x) y)
  4429  	// cond:
  4430  	// result: (LEAQ4 [c] {s} y x)
  4431  	for {
  4432  		c := v.AuxInt
  4433  		s := v.Aux
  4434  		v_0 := v.Args[0]
  4435  		if v_0.Op != OpAMD64SHLQconst {
  4436  			break
  4437  		}
  4438  		if v_0.AuxInt != 2 {
  4439  			break
  4440  		}
  4441  		x := v_0.Args[0]
  4442  		y := v.Args[1]
  4443  		v.reset(OpAMD64LEAQ4)
  4444  		v.AuxInt = c
  4445  		v.Aux = s
  4446  		v.AddArg(y)
  4447  		v.AddArg(x)
  4448  		return true
  4449  	}
  4450  	// match: (LEAQ1 [c] {s} x (SHLQconst [3] y))
  4451  	// cond:
  4452  	// result: (LEAQ8 [c] {s} x y)
  4453  	for {
  4454  		c := v.AuxInt
  4455  		s := v.Aux
  4456  		x := v.Args[0]
  4457  		v_1 := v.Args[1]
  4458  		if v_1.Op != OpAMD64SHLQconst {
  4459  			break
  4460  		}
  4461  		if v_1.AuxInt != 3 {
  4462  			break
  4463  		}
  4464  		y := v_1.Args[0]
  4465  		v.reset(OpAMD64LEAQ8)
  4466  		v.AuxInt = c
  4467  		v.Aux = s
  4468  		v.AddArg(x)
  4469  		v.AddArg(y)
  4470  		return true
  4471  	}
  4472  	// match: (LEAQ1 [c] {s} (SHLQconst [3] x) y)
  4473  	// cond:
  4474  	// result: (LEAQ8 [c] {s} y x)
  4475  	for {
  4476  		c := v.AuxInt
  4477  		s := v.Aux
  4478  		v_0 := v.Args[0]
  4479  		if v_0.Op != OpAMD64SHLQconst {
  4480  			break
  4481  		}
  4482  		if v_0.AuxInt != 3 {
  4483  			break
  4484  		}
  4485  		x := v_0.Args[0]
  4486  		y := v.Args[1]
  4487  		v.reset(OpAMD64LEAQ8)
  4488  		v.AuxInt = c
  4489  		v.Aux = s
  4490  		v.AddArg(y)
  4491  		v.AddArg(x)
  4492  		return true
  4493  	}
  4494  	// match: (LEAQ1 [off1] {sym1} (LEAQ [off2] {sym2} x) y)
  4495  	// cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) && x.Op != OpSB
  4496  	// result: (LEAQ1 [off1+off2] {mergeSym(sym1,sym2)} x y)
  4497  	for {
  4498  		off1 := v.AuxInt
  4499  		sym1 := v.Aux
  4500  		v_0 := v.Args[0]
  4501  		if v_0.Op != OpAMD64LEAQ {
  4502  			break
  4503  		}
  4504  		off2 := v_0.AuxInt
  4505  		sym2 := v_0.Aux
  4506  		x := v_0.Args[0]
  4507  		y := v.Args[1]
  4508  		if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2) && x.Op != OpSB) {
  4509  			break
  4510  		}
  4511  		v.reset(OpAMD64LEAQ1)
  4512  		v.AuxInt = off1 + off2
  4513  		v.Aux = mergeSym(sym1, sym2)
  4514  		v.AddArg(x)
  4515  		v.AddArg(y)
  4516  		return true
  4517  	}
  4518  	// match: (LEAQ1 [off1] {sym1} x (LEAQ [off2] {sym2} y))
  4519  	// cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) && y.Op != OpSB
  4520  	// result: (LEAQ1 [off1+off2] {mergeSym(sym1,sym2)} x y)
  4521  	for {
  4522  		off1 := v.AuxInt
  4523  		sym1 := v.Aux
  4524  		x := v.Args[0]
  4525  		v_1 := v.Args[1]
  4526  		if v_1.Op != OpAMD64LEAQ {
  4527  			break
  4528  		}
  4529  		off2 := v_1.AuxInt
  4530  		sym2 := v_1.Aux
  4531  		y := v_1.Args[0]
  4532  		if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2) && y.Op != OpSB) {
  4533  			break
  4534  		}
  4535  		v.reset(OpAMD64LEAQ1)
  4536  		v.AuxInt = off1 + off2
  4537  		v.Aux = mergeSym(sym1, sym2)
  4538  		v.AddArg(x)
  4539  		v.AddArg(y)
  4540  		return true
  4541  	}
  4542  	return false
  4543  }
  4544  func rewriteValueAMD64_OpAMD64LEAQ2(v *Value, config *Config) bool {
  4545  	b := v.Block
  4546  	_ = b
  4547  	// match: (LEAQ2 [c] {s} (ADDQconst [d] x) y)
  4548  	// cond: is32Bit(c+d)   && x.Op != OpSB
  4549  	// result: (LEAQ2 [c+d] {s} x y)
  4550  	for {
  4551  		c := v.AuxInt
  4552  		s := v.Aux
  4553  		v_0 := v.Args[0]
  4554  		if v_0.Op != OpAMD64ADDQconst {
  4555  			break
  4556  		}
  4557  		d := v_0.AuxInt
  4558  		x := v_0.Args[0]
  4559  		y := v.Args[1]
  4560  		if !(is32Bit(c+d) && x.Op != OpSB) {
  4561  			break
  4562  		}
  4563  		v.reset(OpAMD64LEAQ2)
  4564  		v.AuxInt = c + d
  4565  		v.Aux = s
  4566  		v.AddArg(x)
  4567  		v.AddArg(y)
  4568  		return true
  4569  	}
  4570  	// match: (LEAQ2 [c] {s} x (ADDQconst [d] y))
  4571  	// cond: is32Bit(c+2*d) && y.Op != OpSB
  4572  	// result: (LEAQ2 [c+2*d] {s} x y)
  4573  	for {
  4574  		c := v.AuxInt
  4575  		s := v.Aux
  4576  		x := v.Args[0]
  4577  		v_1 := v.Args[1]
  4578  		if v_1.Op != OpAMD64ADDQconst {
  4579  			break
  4580  		}
  4581  		d := v_1.AuxInt
  4582  		y := v_1.Args[0]
  4583  		if !(is32Bit(c+2*d) && y.Op != OpSB) {
  4584  			break
  4585  		}
  4586  		v.reset(OpAMD64LEAQ2)
  4587  		v.AuxInt = c + 2*d
  4588  		v.Aux = s
  4589  		v.AddArg(x)
  4590  		v.AddArg(y)
  4591  		return true
  4592  	}
  4593  	// match: (LEAQ2 [c] {s} x (SHLQconst [1] y))
  4594  	// cond:
  4595  	// result: (LEAQ4 [c] {s} x y)
  4596  	for {
  4597  		c := v.AuxInt
  4598  		s := v.Aux
  4599  		x := v.Args[0]
  4600  		v_1 := v.Args[1]
  4601  		if v_1.Op != OpAMD64SHLQconst {
  4602  			break
  4603  		}
  4604  		if v_1.AuxInt != 1 {
  4605  			break
  4606  		}
  4607  		y := v_1.Args[0]
  4608  		v.reset(OpAMD64LEAQ4)
  4609  		v.AuxInt = c
  4610  		v.Aux = s
  4611  		v.AddArg(x)
  4612  		v.AddArg(y)
  4613  		return true
  4614  	}
  4615  	// match: (LEAQ2 [c] {s} x (SHLQconst [2] y))
  4616  	// cond:
  4617  	// result: (LEAQ8 [c] {s} x y)
  4618  	for {
  4619  		c := v.AuxInt
  4620  		s := v.Aux
  4621  		x := v.Args[0]
  4622  		v_1 := v.Args[1]
  4623  		if v_1.Op != OpAMD64SHLQconst {
  4624  			break
  4625  		}
  4626  		if v_1.AuxInt != 2 {
  4627  			break
  4628  		}
  4629  		y := v_1.Args[0]
  4630  		v.reset(OpAMD64LEAQ8)
  4631  		v.AuxInt = c
  4632  		v.Aux = s
  4633  		v.AddArg(x)
  4634  		v.AddArg(y)
  4635  		return true
  4636  	}
  4637  	// match: (LEAQ2 [off1] {sym1} (LEAQ [off2] {sym2} x) y)
  4638  	// cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) && x.Op != OpSB
  4639  	// result: (LEAQ2 [off1+off2] {mergeSym(sym1,sym2)} x y)
  4640  	for {
  4641  		off1 := v.AuxInt
  4642  		sym1 := v.Aux
  4643  		v_0 := v.Args[0]
  4644  		if v_0.Op != OpAMD64LEAQ {
  4645  			break
  4646  		}
  4647  		off2 := v_0.AuxInt
  4648  		sym2 := v_0.Aux
  4649  		x := v_0.Args[0]
  4650  		y := v.Args[1]
  4651  		if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2) && x.Op != OpSB) {
  4652  			break
  4653  		}
  4654  		v.reset(OpAMD64LEAQ2)
  4655  		v.AuxInt = off1 + off2
  4656  		v.Aux = mergeSym(sym1, sym2)
  4657  		v.AddArg(x)
  4658  		v.AddArg(y)
  4659  		return true
  4660  	}
  4661  	return false
  4662  }
  4663  func rewriteValueAMD64_OpAMD64LEAQ4(v *Value, config *Config) bool {
  4664  	b := v.Block
  4665  	_ = b
  4666  	// match: (LEAQ4 [c] {s} (ADDQconst [d] x) y)
  4667  	// cond: is32Bit(c+d)   && x.Op != OpSB
  4668  	// result: (LEAQ4 [c+d] {s} x y)
  4669  	for {
  4670  		c := v.AuxInt
  4671  		s := v.Aux
  4672  		v_0 := v.Args[0]
  4673  		if v_0.Op != OpAMD64ADDQconst {
  4674  			break
  4675  		}
  4676  		d := v_0.AuxInt
  4677  		x := v_0.Args[0]
  4678  		y := v.Args[1]
  4679  		if !(is32Bit(c+d) && x.Op != OpSB) {
  4680  			break
  4681  		}
  4682  		v.reset(OpAMD64LEAQ4)
  4683  		v.AuxInt = c + d
  4684  		v.Aux = s
  4685  		v.AddArg(x)
  4686  		v.AddArg(y)
  4687  		return true
  4688  	}
  4689  	// match: (LEAQ4 [c] {s} x (ADDQconst [d] y))
  4690  	// cond: is32Bit(c+4*d) && y.Op != OpSB
  4691  	// result: (LEAQ4 [c+4*d] {s} x y)
  4692  	for {
  4693  		c := v.AuxInt
  4694  		s := v.Aux
  4695  		x := v.Args[0]
  4696  		v_1 := v.Args[1]
  4697  		if v_1.Op != OpAMD64ADDQconst {
  4698  			break
  4699  		}
  4700  		d := v_1.AuxInt
  4701  		y := v_1.Args[0]
  4702  		if !(is32Bit(c+4*d) && y.Op != OpSB) {
  4703  			break
  4704  		}
  4705  		v.reset(OpAMD64LEAQ4)
  4706  		v.AuxInt = c + 4*d
  4707  		v.Aux = s
  4708  		v.AddArg(x)
  4709  		v.AddArg(y)
  4710  		return true
  4711  	}
  4712  	// match: (LEAQ4 [c] {s} x (SHLQconst [1] y))
  4713  	// cond:
  4714  	// result: (LEAQ8 [c] {s} x y)
  4715  	for {
  4716  		c := v.AuxInt
  4717  		s := v.Aux
  4718  		x := v.Args[0]
  4719  		v_1 := v.Args[1]
  4720  		if v_1.Op != OpAMD64SHLQconst {
  4721  			break
  4722  		}
  4723  		if v_1.AuxInt != 1 {
  4724  			break
  4725  		}
  4726  		y := v_1.Args[0]
  4727  		v.reset(OpAMD64LEAQ8)
  4728  		v.AuxInt = c
  4729  		v.Aux = s
  4730  		v.AddArg(x)
  4731  		v.AddArg(y)
  4732  		return true
  4733  	}
  4734  	// match: (LEAQ4 [off1] {sym1} (LEAQ [off2] {sym2} x) y)
  4735  	// cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) && x.Op != OpSB
  4736  	// result: (LEAQ4 [off1+off2] {mergeSym(sym1,sym2)} x y)
  4737  	for {
  4738  		off1 := v.AuxInt
  4739  		sym1 := v.Aux
  4740  		v_0 := v.Args[0]
  4741  		if v_0.Op != OpAMD64LEAQ {
  4742  			break
  4743  		}
  4744  		off2 := v_0.AuxInt
  4745  		sym2 := v_0.Aux
  4746  		x := v_0.Args[0]
  4747  		y := v.Args[1]
  4748  		if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2) && x.Op != OpSB) {
  4749  			break
  4750  		}
  4751  		v.reset(OpAMD64LEAQ4)
  4752  		v.AuxInt = off1 + off2
  4753  		v.Aux = mergeSym(sym1, sym2)
  4754  		v.AddArg(x)
  4755  		v.AddArg(y)
  4756  		return true
  4757  	}
  4758  	return false
  4759  }
  4760  func rewriteValueAMD64_OpAMD64LEAQ8(v *Value, config *Config) bool {
  4761  	b := v.Block
  4762  	_ = b
  4763  	// match: (LEAQ8 [c] {s} (ADDQconst [d] x) y)
  4764  	// cond: is32Bit(c+d)   && x.Op != OpSB
  4765  	// result: (LEAQ8 [c+d] {s} x y)
  4766  	for {
  4767  		c := v.AuxInt
  4768  		s := v.Aux
  4769  		v_0 := v.Args[0]
  4770  		if v_0.Op != OpAMD64ADDQconst {
  4771  			break
  4772  		}
  4773  		d := v_0.AuxInt
  4774  		x := v_0.Args[0]
  4775  		y := v.Args[1]
  4776  		if !(is32Bit(c+d) && x.Op != OpSB) {
  4777  			break
  4778  		}
  4779  		v.reset(OpAMD64LEAQ8)
  4780  		v.AuxInt = c + d
  4781  		v.Aux = s
  4782  		v.AddArg(x)
  4783  		v.AddArg(y)
  4784  		return true
  4785  	}
  4786  	// match: (LEAQ8 [c] {s} x (ADDQconst [d] y))
  4787  	// cond: is32Bit(c+8*d) && y.Op != OpSB
  4788  	// result: (LEAQ8 [c+8*d] {s} x y)
  4789  	for {
  4790  		c := v.AuxInt
  4791  		s := v.Aux
  4792  		x := v.Args[0]
  4793  		v_1 := v.Args[1]
  4794  		if v_1.Op != OpAMD64ADDQconst {
  4795  			break
  4796  		}
  4797  		d := v_1.AuxInt
  4798  		y := v_1.Args[0]
  4799  		if !(is32Bit(c+8*d) && y.Op != OpSB) {
  4800  			break
  4801  		}
  4802  		v.reset(OpAMD64LEAQ8)
  4803  		v.AuxInt = c + 8*d
  4804  		v.Aux = s
  4805  		v.AddArg(x)
  4806  		v.AddArg(y)
  4807  		return true
  4808  	}
  4809  	// match: (LEAQ8 [off1] {sym1} (LEAQ [off2] {sym2} x) y)
  4810  	// cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) && x.Op != OpSB
  4811  	// result: (LEAQ8 [off1+off2] {mergeSym(sym1,sym2)} x y)
  4812  	for {
  4813  		off1 := v.AuxInt
  4814  		sym1 := v.Aux
  4815  		v_0 := v.Args[0]
  4816  		if v_0.Op != OpAMD64LEAQ {
  4817  			break
  4818  		}
  4819  		off2 := v_0.AuxInt
  4820  		sym2 := v_0.Aux
  4821  		x := v_0.Args[0]
  4822  		y := v.Args[1]
  4823  		if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2) && x.Op != OpSB) {
  4824  			break
  4825  		}
  4826  		v.reset(OpAMD64LEAQ8)
  4827  		v.AuxInt = off1 + off2
  4828  		v.Aux = mergeSym(sym1, sym2)
  4829  		v.AddArg(x)
  4830  		v.AddArg(y)
  4831  		return true
  4832  	}
  4833  	return false
  4834  }
  4835  func rewriteValueAMD64_OpLeq16(v *Value, config *Config) bool {
  4836  	b := v.Block
  4837  	_ = b
  4838  	// match: (Leq16  x y)
  4839  	// cond:
  4840  	// result: (SETLE (CMPW x y))
  4841  	for {
  4842  		x := v.Args[0]
  4843  		y := v.Args[1]
  4844  		v.reset(OpAMD64SETLE)
  4845  		v0 := b.NewValue0(v.Line, OpAMD64CMPW, TypeFlags)
  4846  		v0.AddArg(x)
  4847  		v0.AddArg(y)
  4848  		v.AddArg(v0)
  4849  		return true
  4850  	}
  4851  }
  4852  func rewriteValueAMD64_OpLeq16U(v *Value, config *Config) bool {
  4853  	b := v.Block
  4854  	_ = b
  4855  	// match: (Leq16U x y)
  4856  	// cond:
  4857  	// result: (SETBE (CMPW x y))
  4858  	for {
  4859  		x := v.Args[0]
  4860  		y := v.Args[1]
  4861  		v.reset(OpAMD64SETBE)
  4862  		v0 := b.NewValue0(v.Line, OpAMD64CMPW, TypeFlags)
  4863  		v0.AddArg(x)
  4864  		v0.AddArg(y)
  4865  		v.AddArg(v0)
  4866  		return true
  4867  	}
  4868  }
  4869  func rewriteValueAMD64_OpLeq32(v *Value, config *Config) bool {
  4870  	b := v.Block
  4871  	_ = b
  4872  	// match: (Leq32  x y)
  4873  	// cond:
  4874  	// result: (SETLE (CMPL x y))
  4875  	for {
  4876  		x := v.Args[0]
  4877  		y := v.Args[1]
  4878  		v.reset(OpAMD64SETLE)
  4879  		v0 := b.NewValue0(v.Line, OpAMD64CMPL, TypeFlags)
  4880  		v0.AddArg(x)
  4881  		v0.AddArg(y)
  4882  		v.AddArg(v0)
  4883  		return true
  4884  	}
  4885  }
  4886  func rewriteValueAMD64_OpLeq32F(v *Value, config *Config) bool {
  4887  	b := v.Block
  4888  	_ = b
  4889  	// match: (Leq32F x y)
  4890  	// cond:
  4891  	// result: (SETGEF (UCOMISS y x))
  4892  	for {
  4893  		x := v.Args[0]
  4894  		y := v.Args[1]
  4895  		v.reset(OpAMD64SETGEF)
  4896  		v0 := b.NewValue0(v.Line, OpAMD64UCOMISS, TypeFlags)
  4897  		v0.AddArg(y)
  4898  		v0.AddArg(x)
  4899  		v.AddArg(v0)
  4900  		return true
  4901  	}
  4902  }
  4903  func rewriteValueAMD64_OpLeq32U(v *Value, config *Config) bool {
  4904  	b := v.Block
  4905  	_ = b
  4906  	// match: (Leq32U x y)
  4907  	// cond:
  4908  	// result: (SETBE (CMPL x y))
  4909  	for {
  4910  		x := v.Args[0]
  4911  		y := v.Args[1]
  4912  		v.reset(OpAMD64SETBE)
  4913  		v0 := b.NewValue0(v.Line, OpAMD64CMPL, TypeFlags)
  4914  		v0.AddArg(x)
  4915  		v0.AddArg(y)
  4916  		v.AddArg(v0)
  4917  		return true
  4918  	}
  4919  }
  4920  func rewriteValueAMD64_OpLeq64(v *Value, config *Config) bool {
  4921  	b := v.Block
  4922  	_ = b
  4923  	// match: (Leq64  x y)
  4924  	// cond:
  4925  	// result: (SETLE (CMPQ x y))
  4926  	for {
  4927  		x := v.Args[0]
  4928  		y := v.Args[1]
  4929  		v.reset(OpAMD64SETLE)
  4930  		v0 := b.NewValue0(v.Line, OpAMD64CMPQ, TypeFlags)
  4931  		v0.AddArg(x)
  4932  		v0.AddArg(y)
  4933  		v.AddArg(v0)
  4934  		return true
  4935  	}
  4936  }
  4937  func rewriteValueAMD64_OpLeq64F(v *Value, config *Config) bool {
  4938  	b := v.Block
  4939  	_ = b
  4940  	// match: (Leq64F x y)
  4941  	// cond:
  4942  	// result: (SETGEF (UCOMISD y x))
  4943  	for {
  4944  		x := v.Args[0]
  4945  		y := v.Args[1]
  4946  		v.reset(OpAMD64SETGEF)
  4947  		v0 := b.NewValue0(v.Line, OpAMD64UCOMISD, TypeFlags)
  4948  		v0.AddArg(y)
  4949  		v0.AddArg(x)
  4950  		v.AddArg(v0)
  4951  		return true
  4952  	}
  4953  }
  4954  func rewriteValueAMD64_OpLeq64U(v *Value, config *Config) bool {
  4955  	b := v.Block
  4956  	_ = b
  4957  	// match: (Leq64U x y)
  4958  	// cond:
  4959  	// result: (SETBE (CMPQ x y))
  4960  	for {
  4961  		x := v.Args[0]
  4962  		y := v.Args[1]
  4963  		v.reset(OpAMD64SETBE)
  4964  		v0 := b.NewValue0(v.Line, OpAMD64CMPQ, TypeFlags)
  4965  		v0.AddArg(x)
  4966  		v0.AddArg(y)
  4967  		v.AddArg(v0)
  4968  		return true
  4969  	}
  4970  }
  4971  func rewriteValueAMD64_OpLeq8(v *Value, config *Config) bool {
  4972  	b := v.Block
  4973  	_ = b
  4974  	// match: (Leq8   x y)
  4975  	// cond:
  4976  	// result: (SETLE (CMPB x y))
  4977  	for {
  4978  		x := v.Args[0]
  4979  		y := v.Args[1]
  4980  		v.reset(OpAMD64SETLE)
  4981  		v0 := b.NewValue0(v.Line, OpAMD64CMPB, TypeFlags)
  4982  		v0.AddArg(x)
  4983  		v0.AddArg(y)
  4984  		v.AddArg(v0)
  4985  		return true
  4986  	}
  4987  }
  4988  func rewriteValueAMD64_OpLeq8U(v *Value, config *Config) bool {
  4989  	b := v.Block
  4990  	_ = b
  4991  	// match: (Leq8U  x y)
  4992  	// cond:
  4993  	// result: (SETBE (CMPB x y))
  4994  	for {
  4995  		x := v.Args[0]
  4996  		y := v.Args[1]
  4997  		v.reset(OpAMD64SETBE)
  4998  		v0 := b.NewValue0(v.Line, OpAMD64CMPB, TypeFlags)
  4999  		v0.AddArg(x)
  5000  		v0.AddArg(y)
  5001  		v.AddArg(v0)
  5002  		return true
  5003  	}
  5004  }
  5005  func rewriteValueAMD64_OpLess16(v *Value, config *Config) bool {
  5006  	b := v.Block
  5007  	_ = b
  5008  	// match: (Less16  x y)
  5009  	// cond:
  5010  	// result: (SETL (CMPW x y))
  5011  	for {
  5012  		x := v.Args[0]
  5013  		y := v.Args[1]
  5014  		v.reset(OpAMD64SETL)
  5015  		v0 := b.NewValue0(v.Line, OpAMD64CMPW, TypeFlags)
  5016  		v0.AddArg(x)
  5017  		v0.AddArg(y)
  5018  		v.AddArg(v0)
  5019  		return true
  5020  	}
  5021  }
  5022  func rewriteValueAMD64_OpLess16U(v *Value, config *Config) bool {
  5023  	b := v.Block
  5024  	_ = b
  5025  	// match: (Less16U x y)
  5026  	// cond:
  5027  	// result: (SETB (CMPW x y))
  5028  	for {
  5029  		x := v.Args[0]
  5030  		y := v.Args[1]
  5031  		v.reset(OpAMD64SETB)
  5032  		v0 := b.NewValue0(v.Line, OpAMD64CMPW, TypeFlags)
  5033  		v0.AddArg(x)
  5034  		v0.AddArg(y)
  5035  		v.AddArg(v0)
  5036  		return true
  5037  	}
  5038  }
  5039  func rewriteValueAMD64_OpLess32(v *Value, config *Config) bool {
  5040  	b := v.Block
  5041  	_ = b
  5042  	// match: (Less32  x y)
  5043  	// cond:
  5044  	// result: (SETL (CMPL x y))
  5045  	for {
  5046  		x := v.Args[0]
  5047  		y := v.Args[1]
  5048  		v.reset(OpAMD64SETL)
  5049  		v0 := b.NewValue0(v.Line, OpAMD64CMPL, TypeFlags)
  5050  		v0.AddArg(x)
  5051  		v0.AddArg(y)
  5052  		v.AddArg(v0)
  5053  		return true
  5054  	}
  5055  }
  5056  func rewriteValueAMD64_OpLess32F(v *Value, config *Config) bool {
  5057  	b := v.Block
  5058  	_ = b
  5059  	// match: (Less32F x y)
  5060  	// cond:
  5061  	// result: (SETGF (UCOMISS y x))
  5062  	for {
  5063  		x := v.Args[0]
  5064  		y := v.Args[1]
  5065  		v.reset(OpAMD64SETGF)
  5066  		v0 := b.NewValue0(v.Line, OpAMD64UCOMISS, TypeFlags)
  5067  		v0.AddArg(y)
  5068  		v0.AddArg(x)
  5069  		v.AddArg(v0)
  5070  		return true
  5071  	}
  5072  }
  5073  func rewriteValueAMD64_OpLess32U(v *Value, config *Config) bool {
  5074  	b := v.Block
  5075  	_ = b
  5076  	// match: (Less32U x y)
  5077  	// cond:
  5078  	// result: (SETB (CMPL x y))
  5079  	for {
  5080  		x := v.Args[0]
  5081  		y := v.Args[1]
  5082  		v.reset(OpAMD64SETB)
  5083  		v0 := b.NewValue0(v.Line, OpAMD64CMPL, TypeFlags)
  5084  		v0.AddArg(x)
  5085  		v0.AddArg(y)
  5086  		v.AddArg(v0)
  5087  		return true
  5088  	}
  5089  }
  5090  func rewriteValueAMD64_OpLess64(v *Value, config *Config) bool {
  5091  	b := v.Block
  5092  	_ = b
  5093  	// match: (Less64  x y)
  5094  	// cond:
  5095  	// result: (SETL (CMPQ x y))
  5096  	for {
  5097  		x := v.Args[0]
  5098  		y := v.Args[1]
  5099  		v.reset(OpAMD64SETL)
  5100  		v0 := b.NewValue0(v.Line, OpAMD64CMPQ, TypeFlags)
  5101  		v0.AddArg(x)
  5102  		v0.AddArg(y)
  5103  		v.AddArg(v0)
  5104  		return true
  5105  	}
  5106  }
  5107  func rewriteValueAMD64_OpLess64F(v *Value, config *Config) bool {
  5108  	b := v.Block
  5109  	_ = b
  5110  	// match: (Less64F x y)
  5111  	// cond:
  5112  	// result: (SETGF (UCOMISD y x))
  5113  	for {
  5114  		x := v.Args[0]
  5115  		y := v.Args[1]
  5116  		v.reset(OpAMD64SETGF)
  5117  		v0 := b.NewValue0(v.Line, OpAMD64UCOMISD, TypeFlags)
  5118  		v0.AddArg(y)
  5119  		v0.AddArg(x)
  5120  		v.AddArg(v0)
  5121  		return true
  5122  	}
  5123  }
  5124  func rewriteValueAMD64_OpLess64U(v *Value, config *Config) bool {
  5125  	b := v.Block
  5126  	_ = b
  5127  	// match: (Less64U x y)
  5128  	// cond:
  5129  	// result: (SETB (CMPQ x y))
  5130  	for {
  5131  		x := v.Args[0]
  5132  		y := v.Args[1]
  5133  		v.reset(OpAMD64SETB)
  5134  		v0 := b.NewValue0(v.Line, OpAMD64CMPQ, TypeFlags)
  5135  		v0.AddArg(x)
  5136  		v0.AddArg(y)
  5137  		v.AddArg(v0)
  5138  		return true
  5139  	}
  5140  }
  5141  func rewriteValueAMD64_OpLess8(v *Value, config *Config) bool {
  5142  	b := v.Block
  5143  	_ = b
  5144  	// match: (Less8   x y)
  5145  	// cond:
  5146  	// result: (SETL (CMPB x y))
  5147  	for {
  5148  		x := v.Args[0]
  5149  		y := v.Args[1]
  5150  		v.reset(OpAMD64SETL)
  5151  		v0 := b.NewValue0(v.Line, OpAMD64CMPB, TypeFlags)
  5152  		v0.AddArg(x)
  5153  		v0.AddArg(y)
  5154  		v.AddArg(v0)
  5155  		return true
  5156  	}
  5157  }
  5158  func rewriteValueAMD64_OpLess8U(v *Value, config *Config) bool {
  5159  	b := v.Block
  5160  	_ = b
  5161  	// match: (Less8U  x y)
  5162  	// cond:
  5163  	// result: (SETB (CMPB x y))
  5164  	for {
  5165  		x := v.Args[0]
  5166  		y := v.Args[1]
  5167  		v.reset(OpAMD64SETB)
  5168  		v0 := b.NewValue0(v.Line, OpAMD64CMPB, TypeFlags)
  5169  		v0.AddArg(x)
  5170  		v0.AddArg(y)
  5171  		v.AddArg(v0)
  5172  		return true
  5173  	}
  5174  }
  5175  func rewriteValueAMD64_OpLoad(v *Value, config *Config) bool {
  5176  	b := v.Block
  5177  	_ = b
  5178  	// match: (Load <t> ptr mem)
  5179  	// cond: (is64BitInt(t) || isPtr(t))
  5180  	// result: (MOVQload ptr mem)
  5181  	for {
  5182  		t := v.Type
  5183  		ptr := v.Args[0]
  5184  		mem := v.Args[1]
  5185  		if !(is64BitInt(t) || isPtr(t)) {
  5186  			break
  5187  		}
  5188  		v.reset(OpAMD64MOVQload)
  5189  		v.AddArg(ptr)
  5190  		v.AddArg(mem)
  5191  		return true
  5192  	}
  5193  	// match: (Load <t> ptr mem)
  5194  	// cond: is32BitInt(t)
  5195  	// result: (MOVLload ptr mem)
  5196  	for {
  5197  		t := v.Type
  5198  		ptr := v.Args[0]
  5199  		mem := v.Args[1]
  5200  		if !(is32BitInt(t)) {
  5201  			break
  5202  		}
  5203  		v.reset(OpAMD64MOVLload)
  5204  		v.AddArg(ptr)
  5205  		v.AddArg(mem)
  5206  		return true
  5207  	}
  5208  	// match: (Load <t> ptr mem)
  5209  	// cond: is16BitInt(t)
  5210  	// result: (MOVWload ptr mem)
  5211  	for {
  5212  		t := v.Type
  5213  		ptr := v.Args[0]
  5214  		mem := v.Args[1]
  5215  		if !(is16BitInt(t)) {
  5216  			break
  5217  		}
  5218  		v.reset(OpAMD64MOVWload)
  5219  		v.AddArg(ptr)
  5220  		v.AddArg(mem)
  5221  		return true
  5222  	}
  5223  	// match: (Load <t> ptr mem)
  5224  	// cond: (t.IsBoolean() || is8BitInt(t))
  5225  	// result: (MOVBload ptr mem)
  5226  	for {
  5227  		t := v.Type
  5228  		ptr := v.Args[0]
  5229  		mem := v.Args[1]
  5230  		if !(t.IsBoolean() || is8BitInt(t)) {
  5231  			break
  5232  		}
  5233  		v.reset(OpAMD64MOVBload)
  5234  		v.AddArg(ptr)
  5235  		v.AddArg(mem)
  5236  		return true
  5237  	}
  5238  	// match: (Load <t> ptr mem)
  5239  	// cond: is32BitFloat(t)
  5240  	// result: (MOVSSload ptr mem)
  5241  	for {
  5242  		t := v.Type
  5243  		ptr := v.Args[0]
  5244  		mem := v.Args[1]
  5245  		if !(is32BitFloat(t)) {
  5246  			break
  5247  		}
  5248  		v.reset(OpAMD64MOVSSload)
  5249  		v.AddArg(ptr)
  5250  		v.AddArg(mem)
  5251  		return true
  5252  	}
  5253  	// match: (Load <t> ptr mem)
  5254  	// cond: is64BitFloat(t)
  5255  	// result: (MOVSDload ptr mem)
  5256  	for {
  5257  		t := v.Type
  5258  		ptr := v.Args[0]
  5259  		mem := v.Args[1]
  5260  		if !(is64BitFloat(t)) {
  5261  			break
  5262  		}
  5263  		v.reset(OpAMD64MOVSDload)
  5264  		v.AddArg(ptr)
  5265  		v.AddArg(mem)
  5266  		return true
  5267  	}
  5268  	return false
  5269  }
  5270  func rewriteValueAMD64_OpLrot16(v *Value, config *Config) bool {
  5271  	b := v.Block
  5272  	_ = b
  5273  	// match: (Lrot16 <t> x [c])
  5274  	// cond:
  5275  	// result: (ROLWconst <t> [c&15] x)
  5276  	for {
  5277  		t := v.Type
  5278  		x := v.Args[0]
  5279  		c := v.AuxInt
  5280  		v.reset(OpAMD64ROLWconst)
  5281  		v.Type = t
  5282  		v.AuxInt = c & 15
  5283  		v.AddArg(x)
  5284  		return true
  5285  	}
  5286  }
  5287  func rewriteValueAMD64_OpLrot32(v *Value, config *Config) bool {
  5288  	b := v.Block
  5289  	_ = b
  5290  	// match: (Lrot32 <t> x [c])
  5291  	// cond:
  5292  	// result: (ROLLconst <t> [c&31] x)
  5293  	for {
  5294  		t := v.Type
  5295  		x := v.Args[0]
  5296  		c := v.AuxInt
  5297  		v.reset(OpAMD64ROLLconst)
  5298  		v.Type = t
  5299  		v.AuxInt = c & 31
  5300  		v.AddArg(x)
  5301  		return true
  5302  	}
  5303  }
  5304  func rewriteValueAMD64_OpLrot64(v *Value, config *Config) bool {
  5305  	b := v.Block
  5306  	_ = b
  5307  	// match: (Lrot64 <t> x [c])
  5308  	// cond:
  5309  	// result: (ROLQconst <t> [c&63] x)
  5310  	for {
  5311  		t := v.Type
  5312  		x := v.Args[0]
  5313  		c := v.AuxInt
  5314  		v.reset(OpAMD64ROLQconst)
  5315  		v.Type = t
  5316  		v.AuxInt = c & 63
  5317  		v.AddArg(x)
  5318  		return true
  5319  	}
  5320  }
  5321  func rewriteValueAMD64_OpLrot8(v *Value, config *Config) bool {
  5322  	b := v.Block
  5323  	_ = b
  5324  	// match: (Lrot8  <t> x [c])
  5325  	// cond:
  5326  	// result: (ROLBconst <t> [c&7] x)
  5327  	for {
  5328  		t := v.Type
  5329  		x := v.Args[0]
  5330  		c := v.AuxInt
  5331  		v.reset(OpAMD64ROLBconst)
  5332  		v.Type = t
  5333  		v.AuxInt = c & 7
  5334  		v.AddArg(x)
  5335  		return true
  5336  	}
  5337  }
  5338  func rewriteValueAMD64_OpLsh16x16(v *Value, config *Config) bool {
  5339  	b := v.Block
  5340  	_ = b
  5341  	// match: (Lsh16x16 <t> x y)
  5342  	// cond:
  5343  	// result: (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPWconst y [32])))
  5344  	for {
  5345  		t := v.Type
  5346  		x := v.Args[0]
  5347  		y := v.Args[1]
  5348  		v.reset(OpAMD64ANDL)
  5349  		v0 := b.NewValue0(v.Line, OpAMD64SHLL, t)
  5350  		v0.AddArg(x)
  5351  		v0.AddArg(y)
  5352  		v.AddArg(v0)
  5353  		v1 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, t)
  5354  		v2 := b.NewValue0(v.Line, OpAMD64CMPWconst, TypeFlags)
  5355  		v2.AddArg(y)
  5356  		v2.AuxInt = 32
  5357  		v1.AddArg(v2)
  5358  		v.AddArg(v1)
  5359  		return true
  5360  	}
  5361  }
  5362  func rewriteValueAMD64_OpLsh16x32(v *Value, config *Config) bool {
  5363  	b := v.Block
  5364  	_ = b
  5365  	// match: (Lsh16x32 <t> x y)
  5366  	// cond:
  5367  	// result: (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPLconst y [32])))
  5368  	for {
  5369  		t := v.Type
  5370  		x := v.Args[0]
  5371  		y := v.Args[1]
  5372  		v.reset(OpAMD64ANDL)
  5373  		v0 := b.NewValue0(v.Line, OpAMD64SHLL, t)
  5374  		v0.AddArg(x)
  5375  		v0.AddArg(y)
  5376  		v.AddArg(v0)
  5377  		v1 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, t)
  5378  		v2 := b.NewValue0(v.Line, OpAMD64CMPLconst, TypeFlags)
  5379  		v2.AddArg(y)
  5380  		v2.AuxInt = 32
  5381  		v1.AddArg(v2)
  5382  		v.AddArg(v1)
  5383  		return true
  5384  	}
  5385  }
  5386  func rewriteValueAMD64_OpLsh16x64(v *Value, config *Config) bool {
  5387  	b := v.Block
  5388  	_ = b
  5389  	// match: (Lsh16x64 <t> x y)
  5390  	// cond:
  5391  	// result: (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPQconst y [32])))
  5392  	for {
  5393  		t := v.Type
  5394  		x := v.Args[0]
  5395  		y := v.Args[1]
  5396  		v.reset(OpAMD64ANDL)
  5397  		v0 := b.NewValue0(v.Line, OpAMD64SHLL, t)
  5398  		v0.AddArg(x)
  5399  		v0.AddArg(y)
  5400  		v.AddArg(v0)
  5401  		v1 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, t)
  5402  		v2 := b.NewValue0(v.Line, OpAMD64CMPQconst, TypeFlags)
  5403  		v2.AddArg(y)
  5404  		v2.AuxInt = 32
  5405  		v1.AddArg(v2)
  5406  		v.AddArg(v1)
  5407  		return true
  5408  	}
  5409  }
  5410  func rewriteValueAMD64_OpLsh16x8(v *Value, config *Config) bool {
  5411  	b := v.Block
  5412  	_ = b
  5413  	// match: (Lsh16x8  <t> x y)
  5414  	// cond:
  5415  	// result: (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPBconst y [32])))
  5416  	for {
  5417  		t := v.Type
  5418  		x := v.Args[0]
  5419  		y := v.Args[1]
  5420  		v.reset(OpAMD64ANDL)
  5421  		v0 := b.NewValue0(v.Line, OpAMD64SHLL, t)
  5422  		v0.AddArg(x)
  5423  		v0.AddArg(y)
  5424  		v.AddArg(v0)
  5425  		v1 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, t)
  5426  		v2 := b.NewValue0(v.Line, OpAMD64CMPBconst, TypeFlags)
  5427  		v2.AddArg(y)
  5428  		v2.AuxInt = 32
  5429  		v1.AddArg(v2)
  5430  		v.AddArg(v1)
  5431  		return true
  5432  	}
  5433  }
  5434  func rewriteValueAMD64_OpLsh32x16(v *Value, config *Config) bool {
  5435  	b := v.Block
  5436  	_ = b
  5437  	// match: (Lsh32x16 <t> x y)
  5438  	// cond:
  5439  	// result: (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPWconst y [32])))
  5440  	for {
  5441  		t := v.Type
  5442  		x := v.Args[0]
  5443  		y := v.Args[1]
  5444  		v.reset(OpAMD64ANDL)
  5445  		v0 := b.NewValue0(v.Line, OpAMD64SHLL, t)
  5446  		v0.AddArg(x)
  5447  		v0.AddArg(y)
  5448  		v.AddArg(v0)
  5449  		v1 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, t)
  5450  		v2 := b.NewValue0(v.Line, OpAMD64CMPWconst, TypeFlags)
  5451  		v2.AddArg(y)
  5452  		v2.AuxInt = 32
  5453  		v1.AddArg(v2)
  5454  		v.AddArg(v1)
  5455  		return true
  5456  	}
  5457  }
  5458  func rewriteValueAMD64_OpLsh32x32(v *Value, config *Config) bool {
  5459  	b := v.Block
  5460  	_ = b
  5461  	// match: (Lsh32x32 <t> x y)
  5462  	// cond:
  5463  	// result: (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPLconst y [32])))
  5464  	for {
  5465  		t := v.Type
  5466  		x := v.Args[0]
  5467  		y := v.Args[1]
  5468  		v.reset(OpAMD64ANDL)
  5469  		v0 := b.NewValue0(v.Line, OpAMD64SHLL, t)
  5470  		v0.AddArg(x)
  5471  		v0.AddArg(y)
  5472  		v.AddArg(v0)
  5473  		v1 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, t)
  5474  		v2 := b.NewValue0(v.Line, OpAMD64CMPLconst, TypeFlags)
  5475  		v2.AddArg(y)
  5476  		v2.AuxInt = 32
  5477  		v1.AddArg(v2)
  5478  		v.AddArg(v1)
  5479  		return true
  5480  	}
  5481  }
  5482  func rewriteValueAMD64_OpLsh32x64(v *Value, config *Config) bool {
  5483  	b := v.Block
  5484  	_ = b
  5485  	// match: (Lsh32x64 <t> x y)
  5486  	// cond:
  5487  	// result: (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPQconst y [32])))
  5488  	for {
  5489  		t := v.Type
  5490  		x := v.Args[0]
  5491  		y := v.Args[1]
  5492  		v.reset(OpAMD64ANDL)
  5493  		v0 := b.NewValue0(v.Line, OpAMD64SHLL, t)
  5494  		v0.AddArg(x)
  5495  		v0.AddArg(y)
  5496  		v.AddArg(v0)
  5497  		v1 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, t)
  5498  		v2 := b.NewValue0(v.Line, OpAMD64CMPQconst, TypeFlags)
  5499  		v2.AddArg(y)
  5500  		v2.AuxInt = 32
  5501  		v1.AddArg(v2)
  5502  		v.AddArg(v1)
  5503  		return true
  5504  	}
  5505  }
  5506  func rewriteValueAMD64_OpLsh32x8(v *Value, config *Config) bool {
  5507  	b := v.Block
  5508  	_ = b
  5509  	// match: (Lsh32x8  <t> x y)
  5510  	// cond:
  5511  	// result: (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPBconst y [32])))
  5512  	for {
  5513  		t := v.Type
  5514  		x := v.Args[0]
  5515  		y := v.Args[1]
  5516  		v.reset(OpAMD64ANDL)
  5517  		v0 := b.NewValue0(v.Line, OpAMD64SHLL, t)
  5518  		v0.AddArg(x)
  5519  		v0.AddArg(y)
  5520  		v.AddArg(v0)
  5521  		v1 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, t)
  5522  		v2 := b.NewValue0(v.Line, OpAMD64CMPBconst, TypeFlags)
  5523  		v2.AddArg(y)
  5524  		v2.AuxInt = 32
  5525  		v1.AddArg(v2)
  5526  		v.AddArg(v1)
  5527  		return true
  5528  	}
  5529  }
  5530  func rewriteValueAMD64_OpLsh64x16(v *Value, config *Config) bool {
  5531  	b := v.Block
  5532  	_ = b
  5533  	// match: (Lsh64x16 <t> x y)
  5534  	// cond:
  5535  	// result: (ANDQ (SHLQ <t> x y) (SBBQcarrymask <t> (CMPWconst y [64])))
  5536  	for {
  5537  		t := v.Type
  5538  		x := v.Args[0]
  5539  		y := v.Args[1]
  5540  		v.reset(OpAMD64ANDQ)
  5541  		v0 := b.NewValue0(v.Line, OpAMD64SHLQ, t)
  5542  		v0.AddArg(x)
  5543  		v0.AddArg(y)
  5544  		v.AddArg(v0)
  5545  		v1 := b.NewValue0(v.Line, OpAMD64SBBQcarrymask, t)
  5546  		v2 := b.NewValue0(v.Line, OpAMD64CMPWconst, TypeFlags)
  5547  		v2.AddArg(y)
  5548  		v2.AuxInt = 64
  5549  		v1.AddArg(v2)
  5550  		v.AddArg(v1)
  5551  		return true
  5552  	}
  5553  }
  5554  func rewriteValueAMD64_OpLsh64x32(v *Value, config *Config) bool {
  5555  	b := v.Block
  5556  	_ = b
  5557  	// match: (Lsh64x32 <t> x y)
  5558  	// cond:
  5559  	// result: (ANDQ (SHLQ <t> x y) (SBBQcarrymask <t> (CMPLconst y [64])))
  5560  	for {
  5561  		t := v.Type
  5562  		x := v.Args[0]
  5563  		y := v.Args[1]
  5564  		v.reset(OpAMD64ANDQ)
  5565  		v0 := b.NewValue0(v.Line, OpAMD64SHLQ, t)
  5566  		v0.AddArg(x)
  5567  		v0.AddArg(y)
  5568  		v.AddArg(v0)
  5569  		v1 := b.NewValue0(v.Line, OpAMD64SBBQcarrymask, t)
  5570  		v2 := b.NewValue0(v.Line, OpAMD64CMPLconst, TypeFlags)
  5571  		v2.AddArg(y)
  5572  		v2.AuxInt = 64
  5573  		v1.AddArg(v2)
  5574  		v.AddArg(v1)
  5575  		return true
  5576  	}
  5577  }
  5578  func rewriteValueAMD64_OpLsh64x64(v *Value, config *Config) bool {
  5579  	b := v.Block
  5580  	_ = b
  5581  	// match: (Lsh64x64 <t> x y)
  5582  	// cond:
  5583  	// result: (ANDQ (SHLQ <t> x y) (SBBQcarrymask <t> (CMPQconst y [64])))
  5584  	for {
  5585  		t := v.Type
  5586  		x := v.Args[0]
  5587  		y := v.Args[1]
  5588  		v.reset(OpAMD64ANDQ)
  5589  		v0 := b.NewValue0(v.Line, OpAMD64SHLQ, t)
  5590  		v0.AddArg(x)
  5591  		v0.AddArg(y)
  5592  		v.AddArg(v0)
  5593  		v1 := b.NewValue0(v.Line, OpAMD64SBBQcarrymask, t)
  5594  		v2 := b.NewValue0(v.Line, OpAMD64CMPQconst, TypeFlags)
  5595  		v2.AddArg(y)
  5596  		v2.AuxInt = 64
  5597  		v1.AddArg(v2)
  5598  		v.AddArg(v1)
  5599  		return true
  5600  	}
  5601  }
  5602  func rewriteValueAMD64_OpLsh64x8(v *Value, config *Config) bool {
  5603  	b := v.Block
  5604  	_ = b
  5605  	// match: (Lsh64x8  <t> x y)
  5606  	// cond:
  5607  	// result: (ANDQ (SHLQ <t> x y) (SBBQcarrymask <t> (CMPBconst y [64])))
  5608  	for {
  5609  		t := v.Type
  5610  		x := v.Args[0]
  5611  		y := v.Args[1]
  5612  		v.reset(OpAMD64ANDQ)
  5613  		v0 := b.NewValue0(v.Line, OpAMD64SHLQ, t)
  5614  		v0.AddArg(x)
  5615  		v0.AddArg(y)
  5616  		v.AddArg(v0)
  5617  		v1 := b.NewValue0(v.Line, OpAMD64SBBQcarrymask, t)
  5618  		v2 := b.NewValue0(v.Line, OpAMD64CMPBconst, TypeFlags)
  5619  		v2.AddArg(y)
  5620  		v2.AuxInt = 64
  5621  		v1.AddArg(v2)
  5622  		v.AddArg(v1)
  5623  		return true
  5624  	}
  5625  }
  5626  func rewriteValueAMD64_OpLsh8x16(v *Value, config *Config) bool {
  5627  	b := v.Block
  5628  	_ = b
  5629  	// match: (Lsh8x16 <t> x y)
  5630  	// cond:
  5631  	// result: (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPWconst y [32])))
  5632  	for {
  5633  		t := v.Type
  5634  		x := v.Args[0]
  5635  		y := v.Args[1]
  5636  		v.reset(OpAMD64ANDL)
  5637  		v0 := b.NewValue0(v.Line, OpAMD64SHLL, t)
  5638  		v0.AddArg(x)
  5639  		v0.AddArg(y)
  5640  		v.AddArg(v0)
  5641  		v1 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, t)
  5642  		v2 := b.NewValue0(v.Line, OpAMD64CMPWconst, TypeFlags)
  5643  		v2.AddArg(y)
  5644  		v2.AuxInt = 32
  5645  		v1.AddArg(v2)
  5646  		v.AddArg(v1)
  5647  		return true
  5648  	}
  5649  }
  5650  func rewriteValueAMD64_OpLsh8x32(v *Value, config *Config) bool {
  5651  	b := v.Block
  5652  	_ = b
  5653  	// match: (Lsh8x32 <t> x y)
  5654  	// cond:
  5655  	// result: (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPLconst y [32])))
  5656  	for {
  5657  		t := v.Type
  5658  		x := v.Args[0]
  5659  		y := v.Args[1]
  5660  		v.reset(OpAMD64ANDL)
  5661  		v0 := b.NewValue0(v.Line, OpAMD64SHLL, t)
  5662  		v0.AddArg(x)
  5663  		v0.AddArg(y)
  5664  		v.AddArg(v0)
  5665  		v1 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, t)
  5666  		v2 := b.NewValue0(v.Line, OpAMD64CMPLconst, TypeFlags)
  5667  		v2.AddArg(y)
  5668  		v2.AuxInt = 32
  5669  		v1.AddArg(v2)
  5670  		v.AddArg(v1)
  5671  		return true
  5672  	}
  5673  }
  5674  func rewriteValueAMD64_OpLsh8x64(v *Value, config *Config) bool {
  5675  	b := v.Block
  5676  	_ = b
  5677  	// match: (Lsh8x64 <t> x y)
  5678  	// cond:
  5679  	// result: (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPQconst y [32])))
  5680  	for {
  5681  		t := v.Type
  5682  		x := v.Args[0]
  5683  		y := v.Args[1]
  5684  		v.reset(OpAMD64ANDL)
  5685  		v0 := b.NewValue0(v.Line, OpAMD64SHLL, t)
  5686  		v0.AddArg(x)
  5687  		v0.AddArg(y)
  5688  		v.AddArg(v0)
  5689  		v1 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, t)
  5690  		v2 := b.NewValue0(v.Line, OpAMD64CMPQconst, TypeFlags)
  5691  		v2.AddArg(y)
  5692  		v2.AuxInt = 32
  5693  		v1.AddArg(v2)
  5694  		v.AddArg(v1)
  5695  		return true
  5696  	}
  5697  }
  5698  func rewriteValueAMD64_OpLsh8x8(v *Value, config *Config) bool {
  5699  	b := v.Block
  5700  	_ = b
  5701  	// match: (Lsh8x8  <t> x y)
  5702  	// cond:
  5703  	// result: (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPBconst y [32])))
  5704  	for {
  5705  		t := v.Type
  5706  		x := v.Args[0]
  5707  		y := v.Args[1]
  5708  		v.reset(OpAMD64ANDL)
  5709  		v0 := b.NewValue0(v.Line, OpAMD64SHLL, t)
  5710  		v0.AddArg(x)
  5711  		v0.AddArg(y)
  5712  		v.AddArg(v0)
  5713  		v1 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, t)
  5714  		v2 := b.NewValue0(v.Line, OpAMD64CMPBconst, TypeFlags)
  5715  		v2.AddArg(y)
  5716  		v2.AuxInt = 32
  5717  		v1.AddArg(v2)
  5718  		v.AddArg(v1)
  5719  		return true
  5720  	}
  5721  }
  5722  func rewriteValueAMD64_OpAMD64MOVBQSX(v *Value, config *Config) bool {
  5723  	b := v.Block
  5724  	_ = b
  5725  	// match: (MOVBQSX x:(MOVBload [off] {sym} ptr mem))
  5726  	// cond: x.Uses == 1 && clobber(x)
  5727  	// result: @x.Block (MOVBQSXload <v.Type> [off] {sym} ptr mem)
  5728  	for {
  5729  		x := v.Args[0]
  5730  		if x.Op != OpAMD64MOVBload {
  5731  			break
  5732  		}
  5733  		off := x.AuxInt
  5734  		sym := x.Aux
  5735  		ptr := x.Args[0]
  5736  		mem := x.Args[1]
  5737  		if !(x.Uses == 1 && clobber(x)) {
  5738  			break
  5739  		}
  5740  		b = x.Block
  5741  		v0 := b.NewValue0(v.Line, OpAMD64MOVBQSXload, v.Type)
  5742  		v.reset(OpCopy)
  5743  		v.AddArg(v0)
  5744  		v0.AuxInt = off
  5745  		v0.Aux = sym
  5746  		v0.AddArg(ptr)
  5747  		v0.AddArg(mem)
  5748  		return true
  5749  	}
  5750  	// match: (MOVBQSX (ANDLconst [c] x))
  5751  	// cond: c & 0x80 == 0
  5752  	// result: (ANDLconst [c & 0x7f] x)
  5753  	for {
  5754  		v_0 := v.Args[0]
  5755  		if v_0.Op != OpAMD64ANDLconst {
  5756  			break
  5757  		}
  5758  		c := v_0.AuxInt
  5759  		x := v_0.Args[0]
  5760  		if !(c&0x80 == 0) {
  5761  			break
  5762  		}
  5763  		v.reset(OpAMD64ANDLconst)
  5764  		v.AuxInt = c & 0x7f
  5765  		v.AddArg(x)
  5766  		return true
  5767  	}
  5768  	return false
  5769  }
  5770  func rewriteValueAMD64_OpAMD64MOVBQSXload(v *Value, config *Config) bool {
  5771  	b := v.Block
  5772  	_ = b
  5773  	// match: (MOVBQSXload [off1] {sym1} (LEAQ [off2] {sym2} base) mem)
  5774  	// cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
  5775  	// result: (MOVBQSXload [off1+off2] {mergeSym(sym1,sym2)} base mem)
  5776  	for {
  5777  		off1 := v.AuxInt
  5778  		sym1 := v.Aux
  5779  		v_0 := v.Args[0]
  5780  		if v_0.Op != OpAMD64LEAQ {
  5781  			break
  5782  		}
  5783  		off2 := v_0.AuxInt
  5784  		sym2 := v_0.Aux
  5785  		base := v_0.Args[0]
  5786  		mem := v.Args[1]
  5787  		if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
  5788  			break
  5789  		}
  5790  		v.reset(OpAMD64MOVBQSXload)
  5791  		v.AuxInt = off1 + off2
  5792  		v.Aux = mergeSym(sym1, sym2)
  5793  		v.AddArg(base)
  5794  		v.AddArg(mem)
  5795  		return true
  5796  	}
  5797  	return false
  5798  }
  5799  func rewriteValueAMD64_OpAMD64MOVBQZX(v *Value, config *Config) bool {
  5800  	b := v.Block
  5801  	_ = b
  5802  	// match: (MOVBQZX x:(MOVBload [off] {sym} ptr mem))
  5803  	// cond: x.Uses == 1 && clobber(x)
  5804  	// result: @x.Block (MOVBload <v.Type> [off] {sym} ptr mem)
  5805  	for {
  5806  		x := v.Args[0]
  5807  		if x.Op != OpAMD64MOVBload {
  5808  			break
  5809  		}
  5810  		off := x.AuxInt
  5811  		sym := x.Aux
  5812  		ptr := x.Args[0]
  5813  		mem := x.Args[1]
  5814  		if !(x.Uses == 1 && clobber(x)) {
  5815  			break
  5816  		}
  5817  		b = x.Block
  5818  		v0 := b.NewValue0(v.Line, OpAMD64MOVBload, v.Type)
  5819  		v.reset(OpCopy)
  5820  		v.AddArg(v0)
  5821  		v0.AuxInt = off
  5822  		v0.Aux = sym
  5823  		v0.AddArg(ptr)
  5824  		v0.AddArg(mem)
  5825  		return true
  5826  	}
  5827  	// match: (MOVBQZX x:(MOVBloadidx1 [off] {sym} ptr idx mem))
  5828  	// cond: x.Uses == 1 && clobber(x)
  5829  	// result: @x.Block (MOVBloadidx1 <v.Type> [off] {sym} ptr idx mem)
  5830  	for {
  5831  		x := v.Args[0]
  5832  		if x.Op != OpAMD64MOVBloadidx1 {
  5833  			break
  5834  		}
  5835  		off := x.AuxInt
  5836  		sym := x.Aux
  5837  		ptr := x.Args[0]
  5838  		idx := x.Args[1]
  5839  		mem := x.Args[2]
  5840  		if !(x.Uses == 1 && clobber(x)) {
  5841  			break
  5842  		}
  5843  		b = x.Block
  5844  		v0 := b.NewValue0(v.Line, OpAMD64MOVBloadidx1, v.Type)
  5845  		v.reset(OpCopy)
  5846  		v.AddArg(v0)
  5847  		v0.AuxInt = off
  5848  		v0.Aux = sym
  5849  		v0.AddArg(ptr)
  5850  		v0.AddArg(idx)
  5851  		v0.AddArg(mem)
  5852  		return true
  5853  	}
  5854  	// match: (MOVBQZX (ANDLconst [c] x))
  5855  	// cond:
  5856  	// result: (ANDLconst [c & 0xff] x)
  5857  	for {
  5858  		v_0 := v.Args[0]
  5859  		if v_0.Op != OpAMD64ANDLconst {
  5860  			break
  5861  		}
  5862  		c := v_0.AuxInt
  5863  		x := v_0.Args[0]
  5864  		v.reset(OpAMD64ANDLconst)
  5865  		v.AuxInt = c & 0xff
  5866  		v.AddArg(x)
  5867  		return true
  5868  	}
  5869  	return false
  5870  }
  5871  func rewriteValueAMD64_OpAMD64MOVBload(v *Value, config *Config) bool {
  5872  	b := v.Block
  5873  	_ = b
  5874  	// match: (MOVBload [off] {sym} ptr (MOVBstore [off2] {sym2} ptr2 x _))
  5875  	// cond: sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)
  5876  	// result: x
  5877  	for {
  5878  		off := v.AuxInt
  5879  		sym := v.Aux
  5880  		ptr := v.Args[0]
  5881  		v_1 := v.Args[1]
  5882  		if v_1.Op != OpAMD64MOVBstore {
  5883  			break
  5884  		}
  5885  		off2 := v_1.AuxInt
  5886  		sym2 := v_1.Aux
  5887  		ptr2 := v_1.Args[0]
  5888  		x := v_1.Args[1]
  5889  		if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) {
  5890  			break
  5891  		}
  5892  		v.reset(OpCopy)
  5893  		v.Type = x.Type
  5894  		v.AddArg(x)
  5895  		return true
  5896  	}
  5897  	// match: (MOVBload  [off1] {sym} (ADDQconst [off2] ptr) mem)
  5898  	// cond: is32Bit(off1+off2)
  5899  	// result: (MOVBload  [off1+off2] {sym} ptr mem)
  5900  	for {
  5901  		off1 := v.AuxInt
  5902  		sym := v.Aux
  5903  		v_0 := v.Args[0]
  5904  		if v_0.Op != OpAMD64ADDQconst {
  5905  			break
  5906  		}
  5907  		off2 := v_0.AuxInt
  5908  		ptr := v_0.Args[0]
  5909  		mem := v.Args[1]
  5910  		if !(is32Bit(off1 + off2)) {
  5911  			break
  5912  		}
  5913  		v.reset(OpAMD64MOVBload)
  5914  		v.AuxInt = off1 + off2
  5915  		v.Aux = sym
  5916  		v.AddArg(ptr)
  5917  		v.AddArg(mem)
  5918  		return true
  5919  	}
  5920  	// match: (MOVBload  [off1] {sym1} (LEAQ [off2] {sym2} base) mem)
  5921  	// cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
  5922  	// result: (MOVBload  [off1+off2] {mergeSym(sym1,sym2)} base mem)
  5923  	for {
  5924  		off1 := v.AuxInt
  5925  		sym1 := v.Aux
  5926  		v_0 := v.Args[0]
  5927  		if v_0.Op != OpAMD64LEAQ {
  5928  			break
  5929  		}
  5930  		off2 := v_0.AuxInt
  5931  		sym2 := v_0.Aux
  5932  		base := v_0.Args[0]
  5933  		mem := v.Args[1]
  5934  		if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
  5935  			break
  5936  		}
  5937  		v.reset(OpAMD64MOVBload)
  5938  		v.AuxInt = off1 + off2
  5939  		v.Aux = mergeSym(sym1, sym2)
  5940  		v.AddArg(base)
  5941  		v.AddArg(mem)
  5942  		return true
  5943  	}
  5944  	// match: (MOVBload [off1] {sym1} (LEAQ1 [off2] {sym2} ptr idx) mem)
  5945  	// cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
  5946  	// result: (MOVBloadidx1 [off1+off2] {mergeSym(sym1,sym2)} ptr idx mem)
  5947  	for {
  5948  		off1 := v.AuxInt
  5949  		sym1 := v.Aux
  5950  		v_0 := v.Args[0]
  5951  		if v_0.Op != OpAMD64LEAQ1 {
  5952  			break
  5953  		}
  5954  		off2 := v_0.AuxInt
  5955  		sym2 := v_0.Aux
  5956  		ptr := v_0.Args[0]
  5957  		idx := v_0.Args[1]
  5958  		mem := v.Args[1]
  5959  		if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
  5960  			break
  5961  		}
  5962  		v.reset(OpAMD64MOVBloadidx1)
  5963  		v.AuxInt = off1 + off2
  5964  		v.Aux = mergeSym(sym1, sym2)
  5965  		v.AddArg(ptr)
  5966  		v.AddArg(idx)
  5967  		v.AddArg(mem)
  5968  		return true
  5969  	}
  5970  	// match: (MOVBload [off] {sym} (ADDQ ptr idx) mem)
  5971  	// cond: ptr.Op != OpSB
  5972  	// result: (MOVBloadidx1 [off] {sym} ptr idx mem)
  5973  	for {
  5974  		off := v.AuxInt
  5975  		sym := v.Aux
  5976  		v_0 := v.Args[0]
  5977  		if v_0.Op != OpAMD64ADDQ {
  5978  			break
  5979  		}
  5980  		ptr := v_0.Args[0]
  5981  		idx := v_0.Args[1]
  5982  		mem := v.Args[1]
  5983  		if !(ptr.Op != OpSB) {
  5984  			break
  5985  		}
  5986  		v.reset(OpAMD64MOVBloadidx1)
  5987  		v.AuxInt = off
  5988  		v.Aux = sym
  5989  		v.AddArg(ptr)
  5990  		v.AddArg(idx)
  5991  		v.AddArg(mem)
  5992  		return true
  5993  	}
  5994  	return false
  5995  }
  5996  func rewriteValueAMD64_OpAMD64MOVBloadidx1(v *Value, config *Config) bool {
  5997  	b := v.Block
  5998  	_ = b
  5999  	// match: (MOVBloadidx1 [c] {sym} (ADDQconst [d] ptr) idx mem)
  6000  	// cond:
  6001  	// result: (MOVBloadidx1 [c+d] {sym} ptr idx mem)
  6002  	for {
  6003  		c := v.AuxInt
  6004  		sym := v.Aux
  6005  		v_0 := v.Args[0]
  6006  		if v_0.Op != OpAMD64ADDQconst {
  6007  			break
  6008  		}
  6009  		d := v_0.AuxInt
  6010  		ptr := v_0.Args[0]
  6011  		idx := v.Args[1]
  6012  		mem := v.Args[2]
  6013  		v.reset(OpAMD64MOVBloadidx1)
  6014  		v.AuxInt = c + d
  6015  		v.Aux = sym
  6016  		v.AddArg(ptr)
  6017  		v.AddArg(idx)
  6018  		v.AddArg(mem)
  6019  		return true
  6020  	}
  6021  	// match: (MOVBloadidx1 [c] {sym} ptr (ADDQconst [d] idx) mem)
  6022  	// cond:
  6023  	// result: (MOVBloadidx1 [c+d] {sym} ptr idx mem)
  6024  	for {
  6025  		c := v.AuxInt
  6026  		sym := v.Aux
  6027  		ptr := v.Args[0]
  6028  		v_1 := v.Args[1]
  6029  		if v_1.Op != OpAMD64ADDQconst {
  6030  			break
  6031  		}
  6032  		d := v_1.AuxInt
  6033  		idx := v_1.Args[0]
  6034  		mem := v.Args[2]
  6035  		v.reset(OpAMD64MOVBloadidx1)
  6036  		v.AuxInt = c + d
  6037  		v.Aux = sym
  6038  		v.AddArg(ptr)
  6039  		v.AddArg(idx)
  6040  		v.AddArg(mem)
  6041  		return true
  6042  	}
  6043  	return false
  6044  }
  6045  func rewriteValueAMD64_OpAMD64MOVBstore(v *Value, config *Config) bool {
  6046  	b := v.Block
  6047  	_ = b
  6048  	// match: (MOVBstore [off] {sym} ptr (MOVBQSX x) mem)
  6049  	// cond:
  6050  	// result: (MOVBstore [off] {sym} ptr x mem)
  6051  	for {
  6052  		off := v.AuxInt
  6053  		sym := v.Aux
  6054  		ptr := v.Args[0]
  6055  		v_1 := v.Args[1]
  6056  		if v_1.Op != OpAMD64MOVBQSX {
  6057  			break
  6058  		}
  6059  		x := v_1.Args[0]
  6060  		mem := v.Args[2]
  6061  		v.reset(OpAMD64MOVBstore)
  6062  		v.AuxInt = off
  6063  		v.Aux = sym
  6064  		v.AddArg(ptr)
  6065  		v.AddArg(x)
  6066  		v.AddArg(mem)
  6067  		return true
  6068  	}
  6069  	// match: (MOVBstore [off] {sym} ptr (MOVBQZX x) mem)
  6070  	// cond:
  6071  	// result: (MOVBstore [off] {sym} ptr x mem)
  6072  	for {
  6073  		off := v.AuxInt
  6074  		sym := v.Aux
  6075  		ptr := v.Args[0]
  6076  		v_1 := v.Args[1]
  6077  		if v_1.Op != OpAMD64MOVBQZX {
  6078  			break
  6079  		}
  6080  		x := v_1.Args[0]
  6081  		mem := v.Args[2]
  6082  		v.reset(OpAMD64MOVBstore)
  6083  		v.AuxInt = off
  6084  		v.Aux = sym
  6085  		v.AddArg(ptr)
  6086  		v.AddArg(x)
  6087  		v.AddArg(mem)
  6088  		return true
  6089  	}
  6090  	// match: (MOVBstore  [off1] {sym} (ADDQconst [off2] ptr) val mem)
  6091  	// cond: is32Bit(off1+off2)
  6092  	// result: (MOVBstore  [off1+off2] {sym} ptr val mem)
  6093  	for {
  6094  		off1 := v.AuxInt
  6095  		sym := v.Aux
  6096  		v_0 := v.Args[0]
  6097  		if v_0.Op != OpAMD64ADDQconst {
  6098  			break
  6099  		}
  6100  		off2 := v_0.AuxInt
  6101  		ptr := v_0.Args[0]
  6102  		val := v.Args[1]
  6103  		mem := v.Args[2]
  6104  		if !(is32Bit(off1 + off2)) {
  6105  			break
  6106  		}
  6107  		v.reset(OpAMD64MOVBstore)
  6108  		v.AuxInt = off1 + off2
  6109  		v.Aux = sym
  6110  		v.AddArg(ptr)
  6111  		v.AddArg(val)
  6112  		v.AddArg(mem)
  6113  		return true
  6114  	}
  6115  	// match: (MOVBstore [off] {sym} ptr (MOVLconst [c]) mem)
  6116  	// cond: validOff(off)
  6117  	// result: (MOVBstoreconst [makeValAndOff(int64(int8(c)),off)] {sym} ptr mem)
  6118  	for {
  6119  		off := v.AuxInt
  6120  		sym := v.Aux
  6121  		ptr := v.Args[0]
  6122  		v_1 := v.Args[1]
  6123  		if v_1.Op != OpAMD64MOVLconst {
  6124  			break
  6125  		}
  6126  		c := v_1.AuxInt
  6127  		mem := v.Args[2]
  6128  		if !(validOff(off)) {
  6129  			break
  6130  		}
  6131  		v.reset(OpAMD64MOVBstoreconst)
  6132  		v.AuxInt = makeValAndOff(int64(int8(c)), off)
  6133  		v.Aux = sym
  6134  		v.AddArg(ptr)
  6135  		v.AddArg(mem)
  6136  		return true
  6137  	}
  6138  	// match: (MOVBstore  [off1] {sym1} (LEAQ [off2] {sym2} base) val mem)
  6139  	// cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
  6140  	// result: (MOVBstore  [off1+off2] {mergeSym(sym1,sym2)} base val mem)
  6141  	for {
  6142  		off1 := v.AuxInt
  6143  		sym1 := v.Aux
  6144  		v_0 := v.Args[0]
  6145  		if v_0.Op != OpAMD64LEAQ {
  6146  			break
  6147  		}
  6148  		off2 := v_0.AuxInt
  6149  		sym2 := v_0.Aux
  6150  		base := v_0.Args[0]
  6151  		val := v.Args[1]
  6152  		mem := v.Args[2]
  6153  		if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
  6154  			break
  6155  		}
  6156  		v.reset(OpAMD64MOVBstore)
  6157  		v.AuxInt = off1 + off2
  6158  		v.Aux = mergeSym(sym1, sym2)
  6159  		v.AddArg(base)
  6160  		v.AddArg(val)
  6161  		v.AddArg(mem)
  6162  		return true
  6163  	}
  6164  	// match: (MOVBstore [off1] {sym1} (LEAQ1 [off2] {sym2} ptr idx) val mem)
  6165  	// cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
  6166  	// result: (MOVBstoreidx1 [off1+off2] {mergeSym(sym1,sym2)} ptr idx val mem)
  6167  	for {
  6168  		off1 := v.AuxInt
  6169  		sym1 := v.Aux
  6170  		v_0 := v.Args[0]
  6171  		if v_0.Op != OpAMD64LEAQ1 {
  6172  			break
  6173  		}
  6174  		off2 := v_0.AuxInt
  6175  		sym2 := v_0.Aux
  6176  		ptr := v_0.Args[0]
  6177  		idx := v_0.Args[1]
  6178  		val := v.Args[1]
  6179  		mem := v.Args[2]
  6180  		if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
  6181  			break
  6182  		}
  6183  		v.reset(OpAMD64MOVBstoreidx1)
  6184  		v.AuxInt = off1 + off2
  6185  		v.Aux = mergeSym(sym1, sym2)
  6186  		v.AddArg(ptr)
  6187  		v.AddArg(idx)
  6188  		v.AddArg(val)
  6189  		v.AddArg(mem)
  6190  		return true
  6191  	}
  6192  	// match: (MOVBstore [off] {sym} (ADDQ ptr idx) val mem)
  6193  	// cond: ptr.Op != OpSB
  6194  	// result: (MOVBstoreidx1 [off] {sym} ptr idx val mem)
  6195  	for {
  6196  		off := v.AuxInt
  6197  		sym := v.Aux
  6198  		v_0 := v.Args[0]
  6199  		if v_0.Op != OpAMD64ADDQ {
  6200  			break
  6201  		}
  6202  		ptr := v_0.Args[0]
  6203  		idx := v_0.Args[1]
  6204  		val := v.Args[1]
  6205  		mem := v.Args[2]
  6206  		if !(ptr.Op != OpSB) {
  6207  			break
  6208  		}
  6209  		v.reset(OpAMD64MOVBstoreidx1)
  6210  		v.AuxInt = off
  6211  		v.Aux = sym
  6212  		v.AddArg(ptr)
  6213  		v.AddArg(idx)
  6214  		v.AddArg(val)
  6215  		v.AddArg(mem)
  6216  		return true
  6217  	}
  6218  	// match: (MOVBstore [i] {s} p (SHRQconst [8] w) x:(MOVBstore [i-1] {s} p w mem))
  6219  	// cond: x.Uses == 1   && clobber(x)
  6220  	// result: (MOVWstore [i-1] {s} p w mem)
  6221  	for {
  6222  		i := v.AuxInt
  6223  		s := v.Aux
  6224  		p := v.Args[0]
  6225  		v_1 := v.Args[1]
  6226  		if v_1.Op != OpAMD64SHRQconst {
  6227  			break
  6228  		}
  6229  		if v_1.AuxInt != 8 {
  6230  			break
  6231  		}
  6232  		w := v_1.Args[0]
  6233  		x := v.Args[2]
  6234  		if x.Op != OpAMD64MOVBstore {
  6235  			break
  6236  		}
  6237  		if x.AuxInt != i-1 {
  6238  			break
  6239  		}
  6240  		if x.Aux != s {
  6241  			break
  6242  		}
  6243  		if p != x.Args[0] {
  6244  			break
  6245  		}
  6246  		if w != x.Args[1] {
  6247  			break
  6248  		}
  6249  		mem := x.Args[2]
  6250  		if !(x.Uses == 1 && clobber(x)) {
  6251  			break
  6252  		}
  6253  		v.reset(OpAMD64MOVWstore)
  6254  		v.AuxInt = i - 1
  6255  		v.Aux = s
  6256  		v.AddArg(p)
  6257  		v.AddArg(w)
  6258  		v.AddArg(mem)
  6259  		return true
  6260  	}
  6261  	// match: (MOVBstore [i] {s} p (SHRQconst [j] w) x:(MOVBstore [i-1] {s} p w0:(SHRQconst [j-8] w) mem))
  6262  	// cond: x.Uses == 1   && clobber(x)
  6263  	// result: (MOVWstore [i-1] {s} p w0 mem)
  6264  	for {
  6265  		i := v.AuxInt
  6266  		s := v.Aux
  6267  		p := v.Args[0]
  6268  		v_1 := v.Args[1]
  6269  		if v_1.Op != OpAMD64SHRQconst {
  6270  			break
  6271  		}
  6272  		j := v_1.AuxInt
  6273  		w := v_1.Args[0]
  6274  		x := v.Args[2]
  6275  		if x.Op != OpAMD64MOVBstore {
  6276  			break
  6277  		}
  6278  		if x.AuxInt != i-1 {
  6279  			break
  6280  		}
  6281  		if x.Aux != s {
  6282  			break
  6283  		}
  6284  		if p != x.Args[0] {
  6285  			break
  6286  		}
  6287  		w0 := x.Args[1]
  6288  		if w0.Op != OpAMD64SHRQconst {
  6289  			break
  6290  		}
  6291  		if w0.AuxInt != j-8 {
  6292  			break
  6293  		}
  6294  		if w != w0.Args[0] {
  6295  			break
  6296  		}
  6297  		mem := x.Args[2]
  6298  		if !(x.Uses == 1 && clobber(x)) {
  6299  			break
  6300  		}
  6301  		v.reset(OpAMD64MOVWstore)
  6302  		v.AuxInt = i - 1
  6303  		v.Aux = s
  6304  		v.AddArg(p)
  6305  		v.AddArg(w0)
  6306  		v.AddArg(mem)
  6307  		return true
  6308  	}
  6309  	return false
  6310  }
  6311  func rewriteValueAMD64_OpAMD64MOVBstoreconst(v *Value, config *Config) bool {
  6312  	b := v.Block
  6313  	_ = b
  6314  	// match: (MOVBstoreconst [sc] {s} (ADDQconst [off] ptr) mem)
  6315  	// cond: ValAndOff(sc).canAdd(off)
  6316  	// result: (MOVBstoreconst [ValAndOff(sc).add(off)] {s} ptr mem)
  6317  	for {
  6318  		sc := v.AuxInt
  6319  		s := v.Aux
  6320  		v_0 := v.Args[0]
  6321  		if v_0.Op != OpAMD64ADDQconst {
  6322  			break
  6323  		}
  6324  		off := v_0.AuxInt
  6325  		ptr := v_0.Args[0]
  6326  		mem := v.Args[1]
  6327  		if !(ValAndOff(sc).canAdd(off)) {
  6328  			break
  6329  		}
  6330  		v.reset(OpAMD64MOVBstoreconst)
  6331  		v.AuxInt = ValAndOff(sc).add(off)
  6332  		v.Aux = s
  6333  		v.AddArg(ptr)
  6334  		v.AddArg(mem)
  6335  		return true
  6336  	}
  6337  	// match: (MOVBstoreconst [sc] {sym1} (LEAQ [off] {sym2} ptr) mem)
  6338  	// cond: canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off)
  6339  	// result: (MOVBstoreconst [ValAndOff(sc).add(off)] {mergeSym(sym1, sym2)} ptr mem)
  6340  	for {
  6341  		sc := v.AuxInt
  6342  		sym1 := v.Aux
  6343  		v_0 := v.Args[0]
  6344  		if v_0.Op != OpAMD64LEAQ {
  6345  			break
  6346  		}
  6347  		off := v_0.AuxInt
  6348  		sym2 := v_0.Aux
  6349  		ptr := v_0.Args[0]
  6350  		mem := v.Args[1]
  6351  		if !(canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off)) {
  6352  			break
  6353  		}
  6354  		v.reset(OpAMD64MOVBstoreconst)
  6355  		v.AuxInt = ValAndOff(sc).add(off)
  6356  		v.Aux = mergeSym(sym1, sym2)
  6357  		v.AddArg(ptr)
  6358  		v.AddArg(mem)
  6359  		return true
  6360  	}
  6361  	// match: (MOVBstoreconst [x] {sym1} (LEAQ1 [off] {sym2} ptr idx) mem)
  6362  	// cond: canMergeSym(sym1, sym2)
  6363  	// result: (MOVBstoreconstidx1 [ValAndOff(x).add(off)] {mergeSym(sym1,sym2)} ptr idx mem)
  6364  	for {
  6365  		x := v.AuxInt
  6366  		sym1 := v.Aux
  6367  		v_0 := v.Args[0]
  6368  		if v_0.Op != OpAMD64LEAQ1 {
  6369  			break
  6370  		}
  6371  		off := v_0.AuxInt
  6372  		sym2 := v_0.Aux
  6373  		ptr := v_0.Args[0]
  6374  		idx := v_0.Args[1]
  6375  		mem := v.Args[1]
  6376  		if !(canMergeSym(sym1, sym2)) {
  6377  			break
  6378  		}
  6379  		v.reset(OpAMD64MOVBstoreconstidx1)
  6380  		v.AuxInt = ValAndOff(x).add(off)
  6381  		v.Aux = mergeSym(sym1, sym2)
  6382  		v.AddArg(ptr)
  6383  		v.AddArg(idx)
  6384  		v.AddArg(mem)
  6385  		return true
  6386  	}
  6387  	// match: (MOVBstoreconst [x] {sym} (ADDQ ptr idx) mem)
  6388  	// cond:
  6389  	// result: (MOVBstoreconstidx1 [x] {sym} ptr idx mem)
  6390  	for {
  6391  		x := v.AuxInt
  6392  		sym := v.Aux
  6393  		v_0 := v.Args[0]
  6394  		if v_0.Op != OpAMD64ADDQ {
  6395  			break
  6396  		}
  6397  		ptr := v_0.Args[0]
  6398  		idx := v_0.Args[1]
  6399  		mem := v.Args[1]
  6400  		v.reset(OpAMD64MOVBstoreconstidx1)
  6401  		v.AuxInt = x
  6402  		v.Aux = sym
  6403  		v.AddArg(ptr)
  6404  		v.AddArg(idx)
  6405  		v.AddArg(mem)
  6406  		return true
  6407  	}
  6408  	// match: (MOVBstoreconst [c] {s} p x:(MOVBstoreconst [a] {s} p mem))
  6409  	// cond: x.Uses == 1   && ValAndOff(a).Off() + 1 == ValAndOff(c).Off()   && clobber(x)
  6410  	// result: (MOVWstoreconst [makeValAndOff(ValAndOff(a).Val()&0xff | ValAndOff(c).Val()<<8, ValAndOff(a).Off())] {s} p mem)
  6411  	for {
  6412  		c := v.AuxInt
  6413  		s := v.Aux
  6414  		p := v.Args[0]
  6415  		x := v.Args[1]
  6416  		if x.Op != OpAMD64MOVBstoreconst {
  6417  			break
  6418  		}
  6419  		a := x.AuxInt
  6420  		if x.Aux != s {
  6421  			break
  6422  		}
  6423  		if p != x.Args[0] {
  6424  			break
  6425  		}
  6426  		mem := x.Args[1]
  6427  		if !(x.Uses == 1 && ValAndOff(a).Off()+1 == ValAndOff(c).Off() && clobber(x)) {
  6428  			break
  6429  		}
  6430  		v.reset(OpAMD64MOVWstoreconst)
  6431  		v.AuxInt = makeValAndOff(ValAndOff(a).Val()&0xff|ValAndOff(c).Val()<<8, ValAndOff(a).Off())
  6432  		v.Aux = s
  6433  		v.AddArg(p)
  6434  		v.AddArg(mem)
  6435  		return true
  6436  	}
  6437  	return false
  6438  }
  6439  func rewriteValueAMD64_OpAMD64MOVBstoreconstidx1(v *Value, config *Config) bool {
  6440  	b := v.Block
  6441  	_ = b
  6442  	// match: (MOVBstoreconstidx1 [x] {sym} (ADDQconst [c] ptr) idx mem)
  6443  	// cond:
  6444  	// result: (MOVBstoreconstidx1 [ValAndOff(x).add(c)] {sym} ptr idx mem)
  6445  	for {
  6446  		x := v.AuxInt
  6447  		sym := v.Aux
  6448  		v_0 := v.Args[0]
  6449  		if v_0.Op != OpAMD64ADDQconst {
  6450  			break
  6451  		}
  6452  		c := v_0.AuxInt
  6453  		ptr := v_0.Args[0]
  6454  		idx := v.Args[1]
  6455  		mem := v.Args[2]
  6456  		v.reset(OpAMD64MOVBstoreconstidx1)
  6457  		v.AuxInt = ValAndOff(x).add(c)
  6458  		v.Aux = sym
  6459  		v.AddArg(ptr)
  6460  		v.AddArg(idx)
  6461  		v.AddArg(mem)
  6462  		return true
  6463  	}
  6464  	// match: (MOVBstoreconstidx1 [x] {sym} ptr (ADDQconst [c] idx) mem)
  6465  	// cond:
  6466  	// result: (MOVBstoreconstidx1 [ValAndOff(x).add(c)] {sym} ptr idx mem)
  6467  	for {
  6468  		x := v.AuxInt
  6469  		sym := v.Aux
  6470  		ptr := v.Args[0]
  6471  		v_1 := v.Args[1]
  6472  		if v_1.Op != OpAMD64ADDQconst {
  6473  			break
  6474  		}
  6475  		c := v_1.AuxInt
  6476  		idx := v_1.Args[0]
  6477  		mem := v.Args[2]
  6478  		v.reset(OpAMD64MOVBstoreconstidx1)
  6479  		v.AuxInt = ValAndOff(x).add(c)
  6480  		v.Aux = sym
  6481  		v.AddArg(ptr)
  6482  		v.AddArg(idx)
  6483  		v.AddArg(mem)
  6484  		return true
  6485  	}
  6486  	// match: (MOVBstoreconstidx1 [c] {s} p i x:(MOVBstoreconstidx1 [a] {s} p i mem))
  6487  	// cond: x.Uses == 1   && ValAndOff(a).Off() + 1 == ValAndOff(c).Off()   && clobber(x)
  6488  	// result: (MOVWstoreconstidx1 [makeValAndOff(ValAndOff(a).Val()&0xff | ValAndOff(c).Val()<<8, ValAndOff(a).Off())] {s} p i mem)
  6489  	for {
  6490  		c := v.AuxInt
  6491  		s := v.Aux
  6492  		p := v.Args[0]
  6493  		i := v.Args[1]
  6494  		x := v.Args[2]
  6495  		if x.Op != OpAMD64MOVBstoreconstidx1 {
  6496  			break
  6497  		}
  6498  		a := x.AuxInt
  6499  		if x.Aux != s {
  6500  			break
  6501  		}
  6502  		if p != x.Args[0] {
  6503  			break
  6504  		}
  6505  		if i != x.Args[1] {
  6506  			break
  6507  		}
  6508  		mem := x.Args[2]
  6509  		if !(x.Uses == 1 && ValAndOff(a).Off()+1 == ValAndOff(c).Off() && clobber(x)) {
  6510  			break
  6511  		}
  6512  		v.reset(OpAMD64MOVWstoreconstidx1)
  6513  		v.AuxInt = makeValAndOff(ValAndOff(a).Val()&0xff|ValAndOff(c).Val()<<8, ValAndOff(a).Off())
  6514  		v.Aux = s
  6515  		v.AddArg(p)
  6516  		v.AddArg(i)
  6517  		v.AddArg(mem)
  6518  		return true
  6519  	}
  6520  	return false
  6521  }
  6522  func rewriteValueAMD64_OpAMD64MOVBstoreidx1(v *Value, config *Config) bool {
  6523  	b := v.Block
  6524  	_ = b
  6525  	// match: (MOVBstoreidx1 [c] {sym} (ADDQconst [d] ptr) idx val mem)
  6526  	// cond:
  6527  	// result: (MOVBstoreidx1 [c+d] {sym} ptr idx val mem)
  6528  	for {
  6529  		c := v.AuxInt
  6530  		sym := v.Aux
  6531  		v_0 := v.Args[0]
  6532  		if v_0.Op != OpAMD64ADDQconst {
  6533  			break
  6534  		}
  6535  		d := v_0.AuxInt
  6536  		ptr := v_0.Args[0]
  6537  		idx := v.Args[1]
  6538  		val := v.Args[2]
  6539  		mem := v.Args[3]
  6540  		v.reset(OpAMD64MOVBstoreidx1)
  6541  		v.AuxInt = c + d
  6542  		v.Aux = sym
  6543  		v.AddArg(ptr)
  6544  		v.AddArg(idx)
  6545  		v.AddArg(val)
  6546  		v.AddArg(mem)
  6547  		return true
  6548  	}
  6549  	// match: (MOVBstoreidx1 [c] {sym} ptr (ADDQconst [d] idx) val mem)
  6550  	// cond:
  6551  	// result: (MOVBstoreidx1 [c+d] {sym} ptr idx val mem)
  6552  	for {
  6553  		c := v.AuxInt
  6554  		sym := v.Aux
  6555  		ptr := v.Args[0]
  6556  		v_1 := v.Args[1]
  6557  		if v_1.Op != OpAMD64ADDQconst {
  6558  			break
  6559  		}
  6560  		d := v_1.AuxInt
  6561  		idx := v_1.Args[0]
  6562  		val := v.Args[2]
  6563  		mem := v.Args[3]
  6564  		v.reset(OpAMD64MOVBstoreidx1)
  6565  		v.AuxInt = c + d
  6566  		v.Aux = sym
  6567  		v.AddArg(ptr)
  6568  		v.AddArg(idx)
  6569  		v.AddArg(val)
  6570  		v.AddArg(mem)
  6571  		return true
  6572  	}
  6573  	// match: (MOVBstoreidx1 [i] {s} p idx (SHRQconst [8] w) x:(MOVBstoreidx1 [i-1] {s} p idx w mem))
  6574  	// cond: x.Uses == 1   && clobber(x)
  6575  	// result: (MOVWstoreidx1 [i-1] {s} p idx w mem)
  6576  	for {
  6577  		i := v.AuxInt
  6578  		s := v.Aux
  6579  		p := v.Args[0]
  6580  		idx := v.Args[1]
  6581  		v_2 := v.Args[2]
  6582  		if v_2.Op != OpAMD64SHRQconst {
  6583  			break
  6584  		}
  6585  		if v_2.AuxInt != 8 {
  6586  			break
  6587  		}
  6588  		w := v_2.Args[0]
  6589  		x := v.Args[3]
  6590  		if x.Op != OpAMD64MOVBstoreidx1 {
  6591  			break
  6592  		}
  6593  		if x.AuxInt != i-1 {
  6594  			break
  6595  		}
  6596  		if x.Aux != s {
  6597  			break
  6598  		}
  6599  		if p != x.Args[0] {
  6600  			break
  6601  		}
  6602  		if idx != x.Args[1] {
  6603  			break
  6604  		}
  6605  		if w != x.Args[2] {
  6606  			break
  6607  		}
  6608  		mem := x.Args[3]
  6609  		if !(x.Uses == 1 && clobber(x)) {
  6610  			break
  6611  		}
  6612  		v.reset(OpAMD64MOVWstoreidx1)
  6613  		v.AuxInt = i - 1
  6614  		v.Aux = s
  6615  		v.AddArg(p)
  6616  		v.AddArg(idx)
  6617  		v.AddArg(w)
  6618  		v.AddArg(mem)
  6619  		return true
  6620  	}
  6621  	// match: (MOVBstoreidx1 [i] {s} p idx (SHRQconst [j] w) x:(MOVBstoreidx1 [i-1] {s} p idx w0:(SHRQconst [j-8] w) mem))
  6622  	// cond: x.Uses == 1   && clobber(x)
  6623  	// result: (MOVWstoreidx1 [i-1] {s} p idx w0 mem)
  6624  	for {
  6625  		i := v.AuxInt
  6626  		s := v.Aux
  6627  		p := v.Args[0]
  6628  		idx := v.Args[1]
  6629  		v_2 := v.Args[2]
  6630  		if v_2.Op != OpAMD64SHRQconst {
  6631  			break
  6632  		}
  6633  		j := v_2.AuxInt
  6634  		w := v_2.Args[0]
  6635  		x := v.Args[3]
  6636  		if x.Op != OpAMD64MOVBstoreidx1 {
  6637  			break
  6638  		}
  6639  		if x.AuxInt != i-1 {
  6640  			break
  6641  		}
  6642  		if x.Aux != s {
  6643  			break
  6644  		}
  6645  		if p != x.Args[0] {
  6646  			break
  6647  		}
  6648  		if idx != x.Args[1] {
  6649  			break
  6650  		}
  6651  		w0 := x.Args[2]
  6652  		if w0.Op != OpAMD64SHRQconst {
  6653  			break
  6654  		}
  6655  		if w0.AuxInt != j-8 {
  6656  			break
  6657  		}
  6658  		if w != w0.Args[0] {
  6659  			break
  6660  		}
  6661  		mem := x.Args[3]
  6662  		if !(x.Uses == 1 && clobber(x)) {
  6663  			break
  6664  		}
  6665  		v.reset(OpAMD64MOVWstoreidx1)
  6666  		v.AuxInt = i - 1
  6667  		v.Aux = s
  6668  		v.AddArg(p)
  6669  		v.AddArg(idx)
  6670  		v.AddArg(w0)
  6671  		v.AddArg(mem)
  6672  		return true
  6673  	}
  6674  	return false
  6675  }
  6676  func rewriteValueAMD64_OpAMD64MOVLQSX(v *Value, config *Config) bool {
  6677  	b := v.Block
  6678  	_ = b
  6679  	// match: (MOVLQSX x:(MOVLload [off] {sym} ptr mem))
  6680  	// cond: x.Uses == 1 && clobber(x)
  6681  	// result: @x.Block (MOVLQSXload <v.Type> [off] {sym} ptr mem)
  6682  	for {
  6683  		x := v.Args[0]
  6684  		if x.Op != OpAMD64MOVLload {
  6685  			break
  6686  		}
  6687  		off := x.AuxInt
  6688  		sym := x.Aux
  6689  		ptr := x.Args[0]
  6690  		mem := x.Args[1]
  6691  		if !(x.Uses == 1 && clobber(x)) {
  6692  			break
  6693  		}
  6694  		b = x.Block
  6695  		v0 := b.NewValue0(v.Line, OpAMD64MOVLQSXload, v.Type)
  6696  		v.reset(OpCopy)
  6697  		v.AddArg(v0)
  6698  		v0.AuxInt = off
  6699  		v0.Aux = sym
  6700  		v0.AddArg(ptr)
  6701  		v0.AddArg(mem)
  6702  		return true
  6703  	}
  6704  	// match: (MOVLQSX (ANDLconst [c] x))
  6705  	// cond: c & 0x80000000 == 0
  6706  	// result: (ANDLconst [c & 0x7fffffff] x)
  6707  	for {
  6708  		v_0 := v.Args[0]
  6709  		if v_0.Op != OpAMD64ANDLconst {
  6710  			break
  6711  		}
  6712  		c := v_0.AuxInt
  6713  		x := v_0.Args[0]
  6714  		if !(c&0x80000000 == 0) {
  6715  			break
  6716  		}
  6717  		v.reset(OpAMD64ANDLconst)
  6718  		v.AuxInt = c & 0x7fffffff
  6719  		v.AddArg(x)
  6720  		return true
  6721  	}
  6722  	return false
  6723  }
  6724  func rewriteValueAMD64_OpAMD64MOVLQSXload(v *Value, config *Config) bool {
  6725  	b := v.Block
  6726  	_ = b
  6727  	// match: (MOVLQSXload [off1] {sym1} (LEAQ [off2] {sym2} base) mem)
  6728  	// cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
  6729  	// result: (MOVLQSXload [off1+off2] {mergeSym(sym1,sym2)} base mem)
  6730  	for {
  6731  		off1 := v.AuxInt
  6732  		sym1 := v.Aux
  6733  		v_0 := v.Args[0]
  6734  		if v_0.Op != OpAMD64LEAQ {
  6735  			break
  6736  		}
  6737  		off2 := v_0.AuxInt
  6738  		sym2 := v_0.Aux
  6739  		base := v_0.Args[0]
  6740  		mem := v.Args[1]
  6741  		if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
  6742  			break
  6743  		}
  6744  		v.reset(OpAMD64MOVLQSXload)
  6745  		v.AuxInt = off1 + off2
  6746  		v.Aux = mergeSym(sym1, sym2)
  6747  		v.AddArg(base)
  6748  		v.AddArg(mem)
  6749  		return true
  6750  	}
  6751  	return false
  6752  }
  6753  func rewriteValueAMD64_OpAMD64MOVLQZX(v *Value, config *Config) bool {
  6754  	b := v.Block
  6755  	_ = b
  6756  	// match: (MOVLQZX x:(MOVLload [off] {sym} ptr mem))
  6757  	// cond: x.Uses == 1 && clobber(x)
  6758  	// result: @x.Block (MOVLload <v.Type> [off] {sym} ptr mem)
  6759  	for {
  6760  		x := v.Args[0]
  6761  		if x.Op != OpAMD64MOVLload {
  6762  			break
  6763  		}
  6764  		off := x.AuxInt
  6765  		sym := x.Aux
  6766  		ptr := x.Args[0]
  6767  		mem := x.Args[1]
  6768  		if !(x.Uses == 1 && clobber(x)) {
  6769  			break
  6770  		}
  6771  		b = x.Block
  6772  		v0 := b.NewValue0(v.Line, OpAMD64MOVLload, v.Type)
  6773  		v.reset(OpCopy)
  6774  		v.AddArg(v0)
  6775  		v0.AuxInt = off
  6776  		v0.Aux = sym
  6777  		v0.AddArg(ptr)
  6778  		v0.AddArg(mem)
  6779  		return true
  6780  	}
  6781  	// match: (MOVLQZX x:(MOVLloadidx1 [off] {sym} ptr idx mem))
  6782  	// cond: x.Uses == 1 && clobber(x)
  6783  	// result: @x.Block (MOVLloadidx1 <v.Type> [off] {sym} ptr idx mem)
  6784  	for {
  6785  		x := v.Args[0]
  6786  		if x.Op != OpAMD64MOVLloadidx1 {
  6787  			break
  6788  		}
  6789  		off := x.AuxInt
  6790  		sym := x.Aux
  6791  		ptr := x.Args[0]
  6792  		idx := x.Args[1]
  6793  		mem := x.Args[2]
  6794  		if !(x.Uses == 1 && clobber(x)) {
  6795  			break
  6796  		}
  6797  		b = x.Block
  6798  		v0 := b.NewValue0(v.Line, OpAMD64MOVLloadidx1, v.Type)
  6799  		v.reset(OpCopy)
  6800  		v.AddArg(v0)
  6801  		v0.AuxInt = off
  6802  		v0.Aux = sym
  6803  		v0.AddArg(ptr)
  6804  		v0.AddArg(idx)
  6805  		v0.AddArg(mem)
  6806  		return true
  6807  	}
  6808  	// match: (MOVLQZX x:(MOVLloadidx4 [off] {sym} ptr idx mem))
  6809  	// cond: x.Uses == 1 && clobber(x)
  6810  	// result: @x.Block (MOVLloadidx4 <v.Type> [off] {sym} ptr idx mem)
  6811  	for {
  6812  		x := v.Args[0]
  6813  		if x.Op != OpAMD64MOVLloadidx4 {
  6814  			break
  6815  		}
  6816  		off := x.AuxInt
  6817  		sym := x.Aux
  6818  		ptr := x.Args[0]
  6819  		idx := x.Args[1]
  6820  		mem := x.Args[2]
  6821  		if !(x.Uses == 1 && clobber(x)) {
  6822  			break
  6823  		}
  6824  		b = x.Block
  6825  		v0 := b.NewValue0(v.Line, OpAMD64MOVLloadidx4, v.Type)
  6826  		v.reset(OpCopy)
  6827  		v.AddArg(v0)
  6828  		v0.AuxInt = off
  6829  		v0.Aux = sym
  6830  		v0.AddArg(ptr)
  6831  		v0.AddArg(idx)
  6832  		v0.AddArg(mem)
  6833  		return true
  6834  	}
  6835  	// match: (MOVLQZX (ANDLconst [c] x))
  6836  	// cond:
  6837  	// result: (ANDLconst [c] x)
  6838  	for {
  6839  		v_0 := v.Args[0]
  6840  		if v_0.Op != OpAMD64ANDLconst {
  6841  			break
  6842  		}
  6843  		c := v_0.AuxInt
  6844  		x := v_0.Args[0]
  6845  		v.reset(OpAMD64ANDLconst)
  6846  		v.AuxInt = c
  6847  		v.AddArg(x)
  6848  		return true
  6849  	}
  6850  	return false
  6851  }
  6852  func rewriteValueAMD64_OpAMD64MOVLload(v *Value, config *Config) bool {
  6853  	b := v.Block
  6854  	_ = b
  6855  	// match: (MOVLload [off] {sym} ptr (MOVLstore [off2] {sym2} ptr2 x _))
  6856  	// cond: sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)
  6857  	// result: x
  6858  	for {
  6859  		off := v.AuxInt
  6860  		sym := v.Aux
  6861  		ptr := v.Args[0]
  6862  		v_1 := v.Args[1]
  6863  		if v_1.Op != OpAMD64MOVLstore {
  6864  			break
  6865  		}
  6866  		off2 := v_1.AuxInt
  6867  		sym2 := v_1.Aux
  6868  		ptr2 := v_1.Args[0]
  6869  		x := v_1.Args[1]
  6870  		if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) {
  6871  			break
  6872  		}
  6873  		v.reset(OpCopy)
  6874  		v.Type = x.Type
  6875  		v.AddArg(x)
  6876  		return true
  6877  	}
  6878  	// match: (MOVLload  [off1] {sym} (ADDQconst [off2] ptr) mem)
  6879  	// cond: is32Bit(off1+off2)
  6880  	// result: (MOVLload  [off1+off2] {sym} ptr mem)
  6881  	for {
  6882  		off1 := v.AuxInt
  6883  		sym := v.Aux
  6884  		v_0 := v.Args[0]
  6885  		if v_0.Op != OpAMD64ADDQconst {
  6886  			break
  6887  		}
  6888  		off2 := v_0.AuxInt
  6889  		ptr := v_0.Args[0]
  6890  		mem := v.Args[1]
  6891  		if !(is32Bit(off1 + off2)) {
  6892  			break
  6893  		}
  6894  		v.reset(OpAMD64MOVLload)
  6895  		v.AuxInt = off1 + off2
  6896  		v.Aux = sym
  6897  		v.AddArg(ptr)
  6898  		v.AddArg(mem)
  6899  		return true
  6900  	}
  6901  	// match: (MOVLload  [off1] {sym1} (LEAQ [off2] {sym2} base) mem)
  6902  	// cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
  6903  	// result: (MOVLload  [off1+off2] {mergeSym(sym1,sym2)} base mem)
  6904  	for {
  6905  		off1 := v.AuxInt
  6906  		sym1 := v.Aux
  6907  		v_0 := v.Args[0]
  6908  		if v_0.Op != OpAMD64LEAQ {
  6909  			break
  6910  		}
  6911  		off2 := v_0.AuxInt
  6912  		sym2 := v_0.Aux
  6913  		base := v_0.Args[0]
  6914  		mem := v.Args[1]
  6915  		if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
  6916  			break
  6917  		}
  6918  		v.reset(OpAMD64MOVLload)
  6919  		v.AuxInt = off1 + off2
  6920  		v.Aux = mergeSym(sym1, sym2)
  6921  		v.AddArg(base)
  6922  		v.AddArg(mem)
  6923  		return true
  6924  	}
  6925  	// match: (MOVLload [off1] {sym1} (LEAQ1 [off2] {sym2} ptr idx) mem)
  6926  	// cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
  6927  	// result: (MOVLloadidx1 [off1+off2] {mergeSym(sym1,sym2)} ptr idx mem)
  6928  	for {
  6929  		off1 := v.AuxInt
  6930  		sym1 := v.Aux
  6931  		v_0 := v.Args[0]
  6932  		if v_0.Op != OpAMD64LEAQ1 {
  6933  			break
  6934  		}
  6935  		off2 := v_0.AuxInt
  6936  		sym2 := v_0.Aux
  6937  		ptr := v_0.Args[0]
  6938  		idx := v_0.Args[1]
  6939  		mem := v.Args[1]
  6940  		if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
  6941  			break
  6942  		}
  6943  		v.reset(OpAMD64MOVLloadidx1)
  6944  		v.AuxInt = off1 + off2
  6945  		v.Aux = mergeSym(sym1, sym2)
  6946  		v.AddArg(ptr)
  6947  		v.AddArg(idx)
  6948  		v.AddArg(mem)
  6949  		return true
  6950  	}
  6951  	// match: (MOVLload [off1] {sym1} (LEAQ4 [off2] {sym2} ptr idx) mem)
  6952  	// cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
  6953  	// result: (MOVLloadidx4 [off1+off2] {mergeSym(sym1,sym2)} ptr idx mem)
  6954  	for {
  6955  		off1 := v.AuxInt
  6956  		sym1 := v.Aux
  6957  		v_0 := v.Args[0]
  6958  		if v_0.Op != OpAMD64LEAQ4 {
  6959  			break
  6960  		}
  6961  		off2 := v_0.AuxInt
  6962  		sym2 := v_0.Aux
  6963  		ptr := v_0.Args[0]
  6964  		idx := v_0.Args[1]
  6965  		mem := v.Args[1]
  6966  		if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
  6967  			break
  6968  		}
  6969  		v.reset(OpAMD64MOVLloadidx4)
  6970  		v.AuxInt = off1 + off2
  6971  		v.Aux = mergeSym(sym1, sym2)
  6972  		v.AddArg(ptr)
  6973  		v.AddArg(idx)
  6974  		v.AddArg(mem)
  6975  		return true
  6976  	}
  6977  	// match: (MOVLload [off] {sym} (ADDQ ptr idx) mem)
  6978  	// cond: ptr.Op != OpSB
  6979  	// result: (MOVLloadidx1 [off] {sym} ptr idx mem)
  6980  	for {
  6981  		off := v.AuxInt
  6982  		sym := v.Aux
  6983  		v_0 := v.Args[0]
  6984  		if v_0.Op != OpAMD64ADDQ {
  6985  			break
  6986  		}
  6987  		ptr := v_0.Args[0]
  6988  		idx := v_0.Args[1]
  6989  		mem := v.Args[1]
  6990  		if !(ptr.Op != OpSB) {
  6991  			break
  6992  		}
  6993  		v.reset(OpAMD64MOVLloadidx1)
  6994  		v.AuxInt = off
  6995  		v.Aux = sym
  6996  		v.AddArg(ptr)
  6997  		v.AddArg(idx)
  6998  		v.AddArg(mem)
  6999  		return true
  7000  	}
  7001  	return false
  7002  }
  7003  func rewriteValueAMD64_OpAMD64MOVLloadidx1(v *Value, config *Config) bool {
  7004  	b := v.Block
  7005  	_ = b
  7006  	// match: (MOVLloadidx1 [c] {sym} ptr (SHLQconst [2] idx) mem)
  7007  	// cond:
  7008  	// result: (MOVLloadidx4 [c] {sym} ptr idx mem)
  7009  	for {
  7010  		c := v.AuxInt
  7011  		sym := v.Aux
  7012  		ptr := v.Args[0]
  7013  		v_1 := v.Args[1]
  7014  		if v_1.Op != OpAMD64SHLQconst {
  7015  			break
  7016  		}
  7017  		if v_1.AuxInt != 2 {
  7018  			break
  7019  		}
  7020  		idx := v_1.Args[0]
  7021  		mem := v.Args[2]
  7022  		v.reset(OpAMD64MOVLloadidx4)
  7023  		v.AuxInt = c
  7024  		v.Aux = sym
  7025  		v.AddArg(ptr)
  7026  		v.AddArg(idx)
  7027  		v.AddArg(mem)
  7028  		return true
  7029  	}
  7030  	// match: (MOVLloadidx1 [c] {sym} (ADDQconst [d] ptr) idx mem)
  7031  	// cond:
  7032  	// result: (MOVLloadidx1 [c+d] {sym} ptr idx mem)
  7033  	for {
  7034  		c := v.AuxInt
  7035  		sym := v.Aux
  7036  		v_0 := v.Args[0]
  7037  		if v_0.Op != OpAMD64ADDQconst {
  7038  			break
  7039  		}
  7040  		d := v_0.AuxInt
  7041  		ptr := v_0.Args[0]
  7042  		idx := v.Args[1]
  7043  		mem := v.Args[2]
  7044  		v.reset(OpAMD64MOVLloadidx1)
  7045  		v.AuxInt = c + d
  7046  		v.Aux = sym
  7047  		v.AddArg(ptr)
  7048  		v.AddArg(idx)
  7049  		v.AddArg(mem)
  7050  		return true
  7051  	}
  7052  	// match: (MOVLloadidx1 [c] {sym} ptr (ADDQconst [d] idx) mem)
  7053  	// cond:
  7054  	// result: (MOVLloadidx1 [c+d] {sym} ptr idx mem)
  7055  	for {
  7056  		c := v.AuxInt
  7057  		sym := v.Aux
  7058  		ptr := v.Args[0]
  7059  		v_1 := v.Args[1]
  7060  		if v_1.Op != OpAMD64ADDQconst {
  7061  			break
  7062  		}
  7063  		d := v_1.AuxInt
  7064  		idx := v_1.Args[0]
  7065  		mem := v.Args[2]
  7066  		v.reset(OpAMD64MOVLloadidx1)
  7067  		v.AuxInt = c + d
  7068  		v.Aux = sym
  7069  		v.AddArg(ptr)
  7070  		v.AddArg(idx)
  7071  		v.AddArg(mem)
  7072  		return true
  7073  	}
  7074  	return false
  7075  }
  7076  func rewriteValueAMD64_OpAMD64MOVLloadidx4(v *Value, config *Config) bool {
  7077  	b := v.Block
  7078  	_ = b
  7079  	// match: (MOVLloadidx4 [c] {sym} (ADDQconst [d] ptr) idx mem)
  7080  	// cond:
  7081  	// result: (MOVLloadidx4 [c+d] {sym} ptr idx mem)
  7082  	for {
  7083  		c := v.AuxInt
  7084  		sym := v.Aux
  7085  		v_0 := v.Args[0]
  7086  		if v_0.Op != OpAMD64ADDQconst {
  7087  			break
  7088  		}
  7089  		d := v_0.AuxInt
  7090  		ptr := v_0.Args[0]
  7091  		idx := v.Args[1]
  7092  		mem := v.Args[2]
  7093  		v.reset(OpAMD64MOVLloadidx4)
  7094  		v.AuxInt = c + d
  7095  		v.Aux = sym
  7096  		v.AddArg(ptr)
  7097  		v.AddArg(idx)
  7098  		v.AddArg(mem)
  7099  		return true
  7100  	}
  7101  	// match: (MOVLloadidx4 [c] {sym} ptr (ADDQconst [d] idx) mem)
  7102  	// cond:
  7103  	// result: (MOVLloadidx4 [c+4*d] {sym} ptr idx mem)
  7104  	for {
  7105  		c := v.AuxInt
  7106  		sym := v.Aux
  7107  		ptr := v.Args[0]
  7108  		v_1 := v.Args[1]
  7109  		if v_1.Op != OpAMD64ADDQconst {
  7110  			break
  7111  		}
  7112  		d := v_1.AuxInt
  7113  		idx := v_1.Args[0]
  7114  		mem := v.Args[2]
  7115  		v.reset(OpAMD64MOVLloadidx4)
  7116  		v.AuxInt = c + 4*d
  7117  		v.Aux = sym
  7118  		v.AddArg(ptr)
  7119  		v.AddArg(idx)
  7120  		v.AddArg(mem)
  7121  		return true
  7122  	}
  7123  	return false
  7124  }
  7125  func rewriteValueAMD64_OpAMD64MOVLstore(v *Value, config *Config) bool {
  7126  	b := v.Block
  7127  	_ = b
  7128  	// match: (MOVLstore [off] {sym} ptr (MOVLQSX x) mem)
  7129  	// cond:
  7130  	// result: (MOVLstore [off] {sym} ptr x mem)
  7131  	for {
  7132  		off := v.AuxInt
  7133  		sym := v.Aux
  7134  		ptr := v.Args[0]
  7135  		v_1 := v.Args[1]
  7136  		if v_1.Op != OpAMD64MOVLQSX {
  7137  			break
  7138  		}
  7139  		x := v_1.Args[0]
  7140  		mem := v.Args[2]
  7141  		v.reset(OpAMD64MOVLstore)
  7142  		v.AuxInt = off
  7143  		v.Aux = sym
  7144  		v.AddArg(ptr)
  7145  		v.AddArg(x)
  7146  		v.AddArg(mem)
  7147  		return true
  7148  	}
  7149  	// match: (MOVLstore [off] {sym} ptr (MOVLQZX x) mem)
  7150  	// cond:
  7151  	// result: (MOVLstore [off] {sym} ptr x mem)
  7152  	for {
  7153  		off := v.AuxInt
  7154  		sym := v.Aux
  7155  		ptr := v.Args[0]
  7156  		v_1 := v.Args[1]
  7157  		if v_1.Op != OpAMD64MOVLQZX {
  7158  			break
  7159  		}
  7160  		x := v_1.Args[0]
  7161  		mem := v.Args[2]
  7162  		v.reset(OpAMD64MOVLstore)
  7163  		v.AuxInt = off
  7164  		v.Aux = sym
  7165  		v.AddArg(ptr)
  7166  		v.AddArg(x)
  7167  		v.AddArg(mem)
  7168  		return true
  7169  	}
  7170  	// match: (MOVLstore  [off1] {sym} (ADDQconst [off2] ptr) val mem)
  7171  	// cond: is32Bit(off1+off2)
  7172  	// result: (MOVLstore  [off1+off2] {sym} ptr val mem)
  7173  	for {
  7174  		off1 := v.AuxInt
  7175  		sym := v.Aux
  7176  		v_0 := v.Args[0]
  7177  		if v_0.Op != OpAMD64ADDQconst {
  7178  			break
  7179  		}
  7180  		off2 := v_0.AuxInt
  7181  		ptr := v_0.Args[0]
  7182  		val := v.Args[1]
  7183  		mem := v.Args[2]
  7184  		if !(is32Bit(off1 + off2)) {
  7185  			break
  7186  		}
  7187  		v.reset(OpAMD64MOVLstore)
  7188  		v.AuxInt = off1 + off2
  7189  		v.Aux = sym
  7190  		v.AddArg(ptr)
  7191  		v.AddArg(val)
  7192  		v.AddArg(mem)
  7193  		return true
  7194  	}
  7195  	// match: (MOVLstore [off] {sym} ptr (MOVLconst [c]) mem)
  7196  	// cond: validOff(off)
  7197  	// result: (MOVLstoreconst [makeValAndOff(int64(int32(c)),off)] {sym} ptr mem)
  7198  	for {
  7199  		off := v.AuxInt
  7200  		sym := v.Aux
  7201  		ptr := v.Args[0]
  7202  		v_1 := v.Args[1]
  7203  		if v_1.Op != OpAMD64MOVLconst {
  7204  			break
  7205  		}
  7206  		c := v_1.AuxInt
  7207  		mem := v.Args[2]
  7208  		if !(validOff(off)) {
  7209  			break
  7210  		}
  7211  		v.reset(OpAMD64MOVLstoreconst)
  7212  		v.AuxInt = makeValAndOff(int64(int32(c)), off)
  7213  		v.Aux = sym
  7214  		v.AddArg(ptr)
  7215  		v.AddArg(mem)
  7216  		return true
  7217  	}
  7218  	// match: (MOVLstore  [off1] {sym1} (LEAQ [off2] {sym2} base) val mem)
  7219  	// cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
  7220  	// result: (MOVLstore  [off1+off2] {mergeSym(sym1,sym2)} base val mem)
  7221  	for {
  7222  		off1 := v.AuxInt
  7223  		sym1 := v.Aux
  7224  		v_0 := v.Args[0]
  7225  		if v_0.Op != OpAMD64LEAQ {
  7226  			break
  7227  		}
  7228  		off2 := v_0.AuxInt
  7229  		sym2 := v_0.Aux
  7230  		base := v_0.Args[0]
  7231  		val := v.Args[1]
  7232  		mem := v.Args[2]
  7233  		if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
  7234  			break
  7235  		}
  7236  		v.reset(OpAMD64MOVLstore)
  7237  		v.AuxInt = off1 + off2
  7238  		v.Aux = mergeSym(sym1, sym2)
  7239  		v.AddArg(base)
  7240  		v.AddArg(val)
  7241  		v.AddArg(mem)
  7242  		return true
  7243  	}
  7244  	// match: (MOVLstore [off1] {sym1} (LEAQ1 [off2] {sym2} ptr idx) val mem)
  7245  	// cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
  7246  	// result: (MOVLstoreidx1 [off1+off2] {mergeSym(sym1,sym2)} ptr idx val mem)
  7247  	for {
  7248  		off1 := v.AuxInt
  7249  		sym1 := v.Aux
  7250  		v_0 := v.Args[0]
  7251  		if v_0.Op != OpAMD64LEAQ1 {
  7252  			break
  7253  		}
  7254  		off2 := v_0.AuxInt
  7255  		sym2 := v_0.Aux
  7256  		ptr := v_0.Args[0]
  7257  		idx := v_0.Args[1]
  7258  		val := v.Args[1]
  7259  		mem := v.Args[2]
  7260  		if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
  7261  			break
  7262  		}
  7263  		v.reset(OpAMD64MOVLstoreidx1)
  7264  		v.AuxInt = off1 + off2
  7265  		v.Aux = mergeSym(sym1, sym2)
  7266  		v.AddArg(ptr)
  7267  		v.AddArg(idx)
  7268  		v.AddArg(val)
  7269  		v.AddArg(mem)
  7270  		return true
  7271  	}
  7272  	// match: (MOVLstore [off1] {sym1} (LEAQ4 [off2] {sym2} ptr idx) val mem)
  7273  	// cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
  7274  	// result: (MOVLstoreidx4 [off1+off2] {mergeSym(sym1,sym2)} ptr idx val mem)
  7275  	for {
  7276  		off1 := v.AuxInt
  7277  		sym1 := v.Aux
  7278  		v_0 := v.Args[0]
  7279  		if v_0.Op != OpAMD64LEAQ4 {
  7280  			break
  7281  		}
  7282  		off2 := v_0.AuxInt
  7283  		sym2 := v_0.Aux
  7284  		ptr := v_0.Args[0]
  7285  		idx := v_0.Args[1]
  7286  		val := v.Args[1]
  7287  		mem := v.Args[2]
  7288  		if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
  7289  			break
  7290  		}
  7291  		v.reset(OpAMD64MOVLstoreidx4)
  7292  		v.AuxInt = off1 + off2
  7293  		v.Aux = mergeSym(sym1, sym2)
  7294  		v.AddArg(ptr)
  7295  		v.AddArg(idx)
  7296  		v.AddArg(val)
  7297  		v.AddArg(mem)
  7298  		return true
  7299  	}
  7300  	// match: (MOVLstore [off] {sym} (ADDQ ptr idx) val mem)
  7301  	// cond: ptr.Op != OpSB
  7302  	// result: (MOVLstoreidx1 [off] {sym} ptr idx val mem)
  7303  	for {
  7304  		off := v.AuxInt
  7305  		sym := v.Aux
  7306  		v_0 := v.Args[0]
  7307  		if v_0.Op != OpAMD64ADDQ {
  7308  			break
  7309  		}
  7310  		ptr := v_0.Args[0]
  7311  		idx := v_0.Args[1]
  7312  		val := v.Args[1]
  7313  		mem := v.Args[2]
  7314  		if !(ptr.Op != OpSB) {
  7315  			break
  7316  		}
  7317  		v.reset(OpAMD64MOVLstoreidx1)
  7318  		v.AuxInt = off
  7319  		v.Aux = sym
  7320  		v.AddArg(ptr)
  7321  		v.AddArg(idx)
  7322  		v.AddArg(val)
  7323  		v.AddArg(mem)
  7324  		return true
  7325  	}
  7326  	// match: (MOVLstore [i] {s} p (SHRQconst [32] w) x:(MOVLstore [i-4] {s} p w mem))
  7327  	// cond: x.Uses == 1   && clobber(x)
  7328  	// result: (MOVQstore [i-4] {s} p w mem)
  7329  	for {
  7330  		i := v.AuxInt
  7331  		s := v.Aux
  7332  		p := v.Args[0]
  7333  		v_1 := v.Args[1]
  7334  		if v_1.Op != OpAMD64SHRQconst {
  7335  			break
  7336  		}
  7337  		if v_1.AuxInt != 32 {
  7338  			break
  7339  		}
  7340  		w := v_1.Args[0]
  7341  		x := v.Args[2]
  7342  		if x.Op != OpAMD64MOVLstore {
  7343  			break
  7344  		}
  7345  		if x.AuxInt != i-4 {
  7346  			break
  7347  		}
  7348  		if x.Aux != s {
  7349  			break
  7350  		}
  7351  		if p != x.Args[0] {
  7352  			break
  7353  		}
  7354  		if w != x.Args[1] {
  7355  			break
  7356  		}
  7357  		mem := x.Args[2]
  7358  		if !(x.Uses == 1 && clobber(x)) {
  7359  			break
  7360  		}
  7361  		v.reset(OpAMD64MOVQstore)
  7362  		v.AuxInt = i - 4
  7363  		v.Aux = s
  7364  		v.AddArg(p)
  7365  		v.AddArg(w)
  7366  		v.AddArg(mem)
  7367  		return true
  7368  	}
  7369  	// match: (MOVLstore [i] {s} p (SHRQconst [j] w) x:(MOVLstore [i-4] {s} p w0:(SHRQconst [j-32] w) mem))
  7370  	// cond: x.Uses == 1   && clobber(x)
  7371  	// result: (MOVQstore [i-4] {s} p w0 mem)
  7372  	for {
  7373  		i := v.AuxInt
  7374  		s := v.Aux
  7375  		p := v.Args[0]
  7376  		v_1 := v.Args[1]
  7377  		if v_1.Op != OpAMD64SHRQconst {
  7378  			break
  7379  		}
  7380  		j := v_1.AuxInt
  7381  		w := v_1.Args[0]
  7382  		x := v.Args[2]
  7383  		if x.Op != OpAMD64MOVLstore {
  7384  			break
  7385  		}
  7386  		if x.AuxInt != i-4 {
  7387  			break
  7388  		}
  7389  		if x.Aux != s {
  7390  			break
  7391  		}
  7392  		if p != x.Args[0] {
  7393  			break
  7394  		}
  7395  		w0 := x.Args[1]
  7396  		if w0.Op != OpAMD64SHRQconst {
  7397  			break
  7398  		}
  7399  		if w0.AuxInt != j-32 {
  7400  			break
  7401  		}
  7402  		if w != w0.Args[0] {
  7403  			break
  7404  		}
  7405  		mem := x.Args[2]
  7406  		if !(x.Uses == 1 && clobber(x)) {
  7407  			break
  7408  		}
  7409  		v.reset(OpAMD64MOVQstore)
  7410  		v.AuxInt = i - 4
  7411  		v.Aux = s
  7412  		v.AddArg(p)
  7413  		v.AddArg(w0)
  7414  		v.AddArg(mem)
  7415  		return true
  7416  	}
  7417  	return false
  7418  }
  7419  func rewriteValueAMD64_OpAMD64MOVLstoreconst(v *Value, config *Config) bool {
  7420  	b := v.Block
  7421  	_ = b
  7422  	// match: (MOVLstoreconst [sc] {s} (ADDQconst [off] ptr) mem)
  7423  	// cond: ValAndOff(sc).canAdd(off)
  7424  	// result: (MOVLstoreconst [ValAndOff(sc).add(off)] {s} ptr mem)
  7425  	for {
  7426  		sc := v.AuxInt
  7427  		s := v.Aux
  7428  		v_0 := v.Args[0]
  7429  		if v_0.Op != OpAMD64ADDQconst {
  7430  			break
  7431  		}
  7432  		off := v_0.AuxInt
  7433  		ptr := v_0.Args[0]
  7434  		mem := v.Args[1]
  7435  		if !(ValAndOff(sc).canAdd(off)) {
  7436  			break
  7437  		}
  7438  		v.reset(OpAMD64MOVLstoreconst)
  7439  		v.AuxInt = ValAndOff(sc).add(off)
  7440  		v.Aux = s
  7441  		v.AddArg(ptr)
  7442  		v.AddArg(mem)
  7443  		return true
  7444  	}
  7445  	// match: (MOVLstoreconst [sc] {sym1} (LEAQ [off] {sym2} ptr) mem)
  7446  	// cond: canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off)
  7447  	// result: (MOVLstoreconst [ValAndOff(sc).add(off)] {mergeSym(sym1, sym2)} ptr mem)
  7448  	for {
  7449  		sc := v.AuxInt
  7450  		sym1 := v.Aux
  7451  		v_0 := v.Args[0]
  7452  		if v_0.Op != OpAMD64LEAQ {
  7453  			break
  7454  		}
  7455  		off := v_0.AuxInt
  7456  		sym2 := v_0.Aux
  7457  		ptr := v_0.Args[0]
  7458  		mem := v.Args[1]
  7459  		if !(canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off)) {
  7460  			break
  7461  		}
  7462  		v.reset(OpAMD64MOVLstoreconst)
  7463  		v.AuxInt = ValAndOff(sc).add(off)
  7464  		v.Aux = mergeSym(sym1, sym2)
  7465  		v.AddArg(ptr)
  7466  		v.AddArg(mem)
  7467  		return true
  7468  	}
  7469  	// match: (MOVLstoreconst [x] {sym1} (LEAQ1 [off] {sym2} ptr idx) mem)
  7470  	// cond: canMergeSym(sym1, sym2)
  7471  	// result: (MOVLstoreconstidx1 [ValAndOff(x).add(off)] {mergeSym(sym1,sym2)} ptr idx mem)
  7472  	for {
  7473  		x := v.AuxInt
  7474  		sym1 := v.Aux
  7475  		v_0 := v.Args[0]
  7476  		if v_0.Op != OpAMD64LEAQ1 {
  7477  			break
  7478  		}
  7479  		off := v_0.AuxInt
  7480  		sym2 := v_0.Aux
  7481  		ptr := v_0.Args[0]
  7482  		idx := v_0.Args[1]
  7483  		mem := v.Args[1]
  7484  		if !(canMergeSym(sym1, sym2)) {
  7485  			break
  7486  		}
  7487  		v.reset(OpAMD64MOVLstoreconstidx1)
  7488  		v.AuxInt = ValAndOff(x).add(off)
  7489  		v.Aux = mergeSym(sym1, sym2)
  7490  		v.AddArg(ptr)
  7491  		v.AddArg(idx)
  7492  		v.AddArg(mem)
  7493  		return true
  7494  	}
  7495  	// match: (MOVLstoreconst [x] {sym1} (LEAQ4 [off] {sym2} ptr idx) mem)
  7496  	// cond: canMergeSym(sym1, sym2)
  7497  	// result: (MOVLstoreconstidx4 [ValAndOff(x).add(off)] {mergeSym(sym1,sym2)} ptr idx mem)
  7498  	for {
  7499  		x := v.AuxInt
  7500  		sym1 := v.Aux
  7501  		v_0 := v.Args[0]
  7502  		if v_0.Op != OpAMD64LEAQ4 {
  7503  			break
  7504  		}
  7505  		off := v_0.AuxInt
  7506  		sym2 := v_0.Aux
  7507  		ptr := v_0.Args[0]
  7508  		idx := v_0.Args[1]
  7509  		mem := v.Args[1]
  7510  		if !(canMergeSym(sym1, sym2)) {
  7511  			break
  7512  		}
  7513  		v.reset(OpAMD64MOVLstoreconstidx4)
  7514  		v.AuxInt = ValAndOff(x).add(off)
  7515  		v.Aux = mergeSym(sym1, sym2)
  7516  		v.AddArg(ptr)
  7517  		v.AddArg(idx)
  7518  		v.AddArg(mem)
  7519  		return true
  7520  	}
  7521  	// match: (MOVLstoreconst [x] {sym} (ADDQ ptr idx) mem)
  7522  	// cond:
  7523  	// result: (MOVLstoreconstidx1 [x] {sym} ptr idx mem)
  7524  	for {
  7525  		x := v.AuxInt
  7526  		sym := v.Aux
  7527  		v_0 := v.Args[0]
  7528  		if v_0.Op != OpAMD64ADDQ {
  7529  			break
  7530  		}
  7531  		ptr := v_0.Args[0]
  7532  		idx := v_0.Args[1]
  7533  		mem := v.Args[1]
  7534  		v.reset(OpAMD64MOVLstoreconstidx1)
  7535  		v.AuxInt = x
  7536  		v.Aux = sym
  7537  		v.AddArg(ptr)
  7538  		v.AddArg(idx)
  7539  		v.AddArg(mem)
  7540  		return true
  7541  	}
  7542  	// match: (MOVLstoreconst [c] {s} p x:(MOVLstoreconst [a] {s} p mem))
  7543  	// cond: x.Uses == 1   && ValAndOff(a).Off() + 4 == ValAndOff(c).Off()   && clobber(x)
  7544  	// result: (MOVQstore [ValAndOff(a).Off()] {s} p (MOVQconst [ValAndOff(a).Val()&0xffffffff | ValAndOff(c).Val()<<32]) mem)
  7545  	for {
  7546  		c := v.AuxInt
  7547  		s := v.Aux
  7548  		p := v.Args[0]
  7549  		x := v.Args[1]
  7550  		if x.Op != OpAMD64MOVLstoreconst {
  7551  			break
  7552  		}
  7553  		a := x.AuxInt
  7554  		if x.Aux != s {
  7555  			break
  7556  		}
  7557  		if p != x.Args[0] {
  7558  			break
  7559  		}
  7560  		mem := x.Args[1]
  7561  		if !(x.Uses == 1 && ValAndOff(a).Off()+4 == ValAndOff(c).Off() && clobber(x)) {
  7562  			break
  7563  		}
  7564  		v.reset(OpAMD64MOVQstore)
  7565  		v.AuxInt = ValAndOff(a).Off()
  7566  		v.Aux = s
  7567  		v.AddArg(p)
  7568  		v0 := b.NewValue0(v.Line, OpAMD64MOVQconst, config.fe.TypeUInt64())
  7569  		v0.AuxInt = ValAndOff(a).Val()&0xffffffff | ValAndOff(c).Val()<<32
  7570  		v.AddArg(v0)
  7571  		v.AddArg(mem)
  7572  		return true
  7573  	}
  7574  	return false
  7575  }
  7576  func rewriteValueAMD64_OpAMD64MOVLstoreconstidx1(v *Value, config *Config) bool {
  7577  	b := v.Block
  7578  	_ = b
  7579  	// match: (MOVLstoreconstidx1 [c] {sym} ptr (SHLQconst [2] idx) mem)
  7580  	// cond:
  7581  	// result: (MOVLstoreconstidx4 [c] {sym} ptr idx mem)
  7582  	for {
  7583  		c := v.AuxInt
  7584  		sym := v.Aux
  7585  		ptr := v.Args[0]
  7586  		v_1 := v.Args[1]
  7587  		if v_1.Op != OpAMD64SHLQconst {
  7588  			break
  7589  		}
  7590  		if v_1.AuxInt != 2 {
  7591  			break
  7592  		}
  7593  		idx := v_1.Args[0]
  7594  		mem := v.Args[2]
  7595  		v.reset(OpAMD64MOVLstoreconstidx4)
  7596  		v.AuxInt = c
  7597  		v.Aux = sym
  7598  		v.AddArg(ptr)
  7599  		v.AddArg(idx)
  7600  		v.AddArg(mem)
  7601  		return true
  7602  	}
  7603  	// match: (MOVLstoreconstidx1 [x] {sym} (ADDQconst [c] ptr) idx mem)
  7604  	// cond:
  7605  	// result: (MOVLstoreconstidx1 [ValAndOff(x).add(c)] {sym} ptr idx mem)
  7606  	for {
  7607  		x := v.AuxInt
  7608  		sym := v.Aux
  7609  		v_0 := v.Args[0]
  7610  		if v_0.Op != OpAMD64ADDQconst {
  7611  			break
  7612  		}
  7613  		c := v_0.AuxInt
  7614  		ptr := v_0.Args[0]
  7615  		idx := v.Args[1]
  7616  		mem := v.Args[2]
  7617  		v.reset(OpAMD64MOVLstoreconstidx1)
  7618  		v.AuxInt = ValAndOff(x).add(c)
  7619  		v.Aux = sym
  7620  		v.AddArg(ptr)
  7621  		v.AddArg(idx)
  7622  		v.AddArg(mem)
  7623  		return true
  7624  	}
  7625  	// match: (MOVLstoreconstidx1 [x] {sym} ptr (ADDQconst [c] idx) mem)
  7626  	// cond:
  7627  	// result: (MOVLstoreconstidx1 [ValAndOff(x).add(c)] {sym} ptr idx mem)
  7628  	for {
  7629  		x := v.AuxInt
  7630  		sym := v.Aux
  7631  		ptr := v.Args[0]
  7632  		v_1 := v.Args[1]
  7633  		if v_1.Op != OpAMD64ADDQconst {
  7634  			break
  7635  		}
  7636  		c := v_1.AuxInt
  7637  		idx := v_1.Args[0]
  7638  		mem := v.Args[2]
  7639  		v.reset(OpAMD64MOVLstoreconstidx1)
  7640  		v.AuxInt = ValAndOff(x).add(c)
  7641  		v.Aux = sym
  7642  		v.AddArg(ptr)
  7643  		v.AddArg(idx)
  7644  		v.AddArg(mem)
  7645  		return true
  7646  	}
  7647  	// match: (MOVLstoreconstidx1 [c] {s} p i x:(MOVLstoreconstidx1 [a] {s} p i mem))
  7648  	// cond: x.Uses == 1   && ValAndOff(a).Off() + 4 == ValAndOff(c).Off()   && clobber(x)
  7649  	// result: (MOVQstoreidx1 [ValAndOff(a).Off()] {s} p i (MOVQconst [ValAndOff(a).Val()&0xffffffff | ValAndOff(c).Val()<<32]) mem)
  7650  	for {
  7651  		c := v.AuxInt
  7652  		s := v.Aux
  7653  		p := v.Args[0]
  7654  		i := v.Args[1]
  7655  		x := v.Args[2]
  7656  		if x.Op != OpAMD64MOVLstoreconstidx1 {
  7657  			break
  7658  		}
  7659  		a := x.AuxInt
  7660  		if x.Aux != s {
  7661  			break
  7662  		}
  7663  		if p != x.Args[0] {
  7664  			break
  7665  		}
  7666  		if i != x.Args[1] {
  7667  			break
  7668  		}
  7669  		mem := x.Args[2]
  7670  		if !(x.Uses == 1 && ValAndOff(a).Off()+4 == ValAndOff(c).Off() && clobber(x)) {
  7671  			break
  7672  		}
  7673  		v.reset(OpAMD64MOVQstoreidx1)
  7674  		v.AuxInt = ValAndOff(a).Off()
  7675  		v.Aux = s
  7676  		v.AddArg(p)
  7677  		v.AddArg(i)
  7678  		v0 := b.NewValue0(v.Line, OpAMD64MOVQconst, config.fe.TypeUInt64())
  7679  		v0.AuxInt = ValAndOff(a).Val()&0xffffffff | ValAndOff(c).Val()<<32
  7680  		v.AddArg(v0)
  7681  		v.AddArg(mem)
  7682  		return true
  7683  	}
  7684  	return false
  7685  }
  7686  func rewriteValueAMD64_OpAMD64MOVLstoreconstidx4(v *Value, config *Config) bool {
  7687  	b := v.Block
  7688  	_ = b
  7689  	// match: (MOVLstoreconstidx4 [x] {sym} (ADDQconst [c] ptr) idx mem)
  7690  	// cond:
  7691  	// result: (MOVLstoreconstidx4 [ValAndOff(x).add(c)] {sym} ptr idx mem)
  7692  	for {
  7693  		x := v.AuxInt
  7694  		sym := v.Aux
  7695  		v_0 := v.Args[0]
  7696  		if v_0.Op != OpAMD64ADDQconst {
  7697  			break
  7698  		}
  7699  		c := v_0.AuxInt
  7700  		ptr := v_0.Args[0]
  7701  		idx := v.Args[1]
  7702  		mem := v.Args[2]
  7703  		v.reset(OpAMD64MOVLstoreconstidx4)
  7704  		v.AuxInt = ValAndOff(x).add(c)
  7705  		v.Aux = sym
  7706  		v.AddArg(ptr)
  7707  		v.AddArg(idx)
  7708  		v.AddArg(mem)
  7709  		return true
  7710  	}
  7711  	// match: (MOVLstoreconstidx4 [x] {sym} ptr (ADDQconst [c] idx) mem)
  7712  	// cond:
  7713  	// result: (MOVLstoreconstidx4 [ValAndOff(x).add(4*c)] {sym} ptr idx mem)
  7714  	for {
  7715  		x := v.AuxInt
  7716  		sym := v.Aux
  7717  		ptr := v.Args[0]
  7718  		v_1 := v.Args[1]
  7719  		if v_1.Op != OpAMD64ADDQconst {
  7720  			break
  7721  		}
  7722  		c := v_1.AuxInt
  7723  		idx := v_1.Args[0]
  7724  		mem := v.Args[2]
  7725  		v.reset(OpAMD64MOVLstoreconstidx4)
  7726  		v.AuxInt = ValAndOff(x).add(4 * c)
  7727  		v.Aux = sym
  7728  		v.AddArg(ptr)
  7729  		v.AddArg(idx)
  7730  		v.AddArg(mem)
  7731  		return true
  7732  	}
  7733  	// match: (MOVLstoreconstidx4 [c] {s} p i x:(MOVLstoreconstidx4 [a] {s} p i mem))
  7734  	// cond: x.Uses == 1   && ValAndOff(a).Off() + 4 == ValAndOff(c).Off()   && clobber(x)
  7735  	// result: (MOVQstoreidx1 [ValAndOff(a).Off()] {s} p (SHLQconst <i.Type> [2] i) (MOVQconst [ValAndOff(a).Val()&0xffffffff | ValAndOff(c).Val()<<32]) mem)
  7736  	for {
  7737  		c := v.AuxInt
  7738  		s := v.Aux
  7739  		p := v.Args[0]
  7740  		i := v.Args[1]
  7741  		x := v.Args[2]
  7742  		if x.Op != OpAMD64MOVLstoreconstidx4 {
  7743  			break
  7744  		}
  7745  		a := x.AuxInt
  7746  		if x.Aux != s {
  7747  			break
  7748  		}
  7749  		if p != x.Args[0] {
  7750  			break
  7751  		}
  7752  		if i != x.Args[1] {
  7753  			break
  7754  		}
  7755  		mem := x.Args[2]
  7756  		if !(x.Uses == 1 && ValAndOff(a).Off()+4 == ValAndOff(c).Off() && clobber(x)) {
  7757  			break
  7758  		}
  7759  		v.reset(OpAMD64MOVQstoreidx1)
  7760  		v.AuxInt = ValAndOff(a).Off()
  7761  		v.Aux = s
  7762  		v.AddArg(p)
  7763  		v0 := b.NewValue0(v.Line, OpAMD64SHLQconst, i.Type)
  7764  		v0.AuxInt = 2
  7765  		v0.AddArg(i)
  7766  		v.AddArg(v0)
  7767  		v1 := b.NewValue0(v.Line, OpAMD64MOVQconst, config.fe.TypeUInt64())
  7768  		v1.AuxInt = ValAndOff(a).Val()&0xffffffff | ValAndOff(c).Val()<<32
  7769  		v.AddArg(v1)
  7770  		v.AddArg(mem)
  7771  		return true
  7772  	}
  7773  	return false
  7774  }
  7775  func rewriteValueAMD64_OpAMD64MOVLstoreidx1(v *Value, config *Config) bool {
  7776  	b := v.Block
  7777  	_ = b
  7778  	// match: (MOVLstoreidx1 [c] {sym} ptr (SHLQconst [2] idx) val mem)
  7779  	// cond:
  7780  	// result: (MOVLstoreidx4 [c] {sym} ptr idx val mem)
  7781  	for {
  7782  		c := v.AuxInt
  7783  		sym := v.Aux
  7784  		ptr := v.Args[0]
  7785  		v_1 := v.Args[1]
  7786  		if v_1.Op != OpAMD64SHLQconst {
  7787  			break
  7788  		}
  7789  		if v_1.AuxInt != 2 {
  7790  			break
  7791  		}
  7792  		idx := v_1.Args[0]
  7793  		val := v.Args[2]
  7794  		mem := v.Args[3]
  7795  		v.reset(OpAMD64MOVLstoreidx4)
  7796  		v.AuxInt = c
  7797  		v.Aux = sym
  7798  		v.AddArg(ptr)
  7799  		v.AddArg(idx)
  7800  		v.AddArg(val)
  7801  		v.AddArg(mem)
  7802  		return true
  7803  	}
  7804  	// match: (MOVLstoreidx1 [c] {sym} (ADDQconst [d] ptr) idx val mem)
  7805  	// cond:
  7806  	// result: (MOVLstoreidx1 [c+d] {sym} ptr idx val mem)
  7807  	for {
  7808  		c := v.AuxInt
  7809  		sym := v.Aux
  7810  		v_0 := v.Args[0]
  7811  		if v_0.Op != OpAMD64ADDQconst {
  7812  			break
  7813  		}
  7814  		d := v_0.AuxInt
  7815  		ptr := v_0.Args[0]
  7816  		idx := v.Args[1]
  7817  		val := v.Args[2]
  7818  		mem := v.Args[3]
  7819  		v.reset(OpAMD64MOVLstoreidx1)
  7820  		v.AuxInt = c + d
  7821  		v.Aux = sym
  7822  		v.AddArg(ptr)
  7823  		v.AddArg(idx)
  7824  		v.AddArg(val)
  7825  		v.AddArg(mem)
  7826  		return true
  7827  	}
  7828  	// match: (MOVLstoreidx1 [c] {sym} ptr (ADDQconst [d] idx) val mem)
  7829  	// cond:
  7830  	// result: (MOVLstoreidx1 [c+d] {sym} ptr idx val mem)
  7831  	for {
  7832  		c := v.AuxInt
  7833  		sym := v.Aux
  7834  		ptr := v.Args[0]
  7835  		v_1 := v.Args[1]
  7836  		if v_1.Op != OpAMD64ADDQconst {
  7837  			break
  7838  		}
  7839  		d := v_1.AuxInt
  7840  		idx := v_1.Args[0]
  7841  		val := v.Args[2]
  7842  		mem := v.Args[3]
  7843  		v.reset(OpAMD64MOVLstoreidx1)
  7844  		v.AuxInt = c + d
  7845  		v.Aux = sym
  7846  		v.AddArg(ptr)
  7847  		v.AddArg(idx)
  7848  		v.AddArg(val)
  7849  		v.AddArg(mem)
  7850  		return true
  7851  	}
  7852  	// match: (MOVLstoreidx1 [i] {s} p idx (SHRQconst [32] w) x:(MOVLstoreidx1 [i-4] {s} p idx w mem))
  7853  	// cond: x.Uses == 1   && clobber(x)
  7854  	// result: (MOVQstoreidx1 [i-4] {s} p idx w mem)
  7855  	for {
  7856  		i := v.AuxInt
  7857  		s := v.Aux
  7858  		p := v.Args[0]
  7859  		idx := v.Args[1]
  7860  		v_2 := v.Args[2]
  7861  		if v_2.Op != OpAMD64SHRQconst {
  7862  			break
  7863  		}
  7864  		if v_2.AuxInt != 32 {
  7865  			break
  7866  		}
  7867  		w := v_2.Args[0]
  7868  		x := v.Args[3]
  7869  		if x.Op != OpAMD64MOVLstoreidx1 {
  7870  			break
  7871  		}
  7872  		if x.AuxInt != i-4 {
  7873  			break
  7874  		}
  7875  		if x.Aux != s {
  7876  			break
  7877  		}
  7878  		if p != x.Args[0] {
  7879  			break
  7880  		}
  7881  		if idx != x.Args[1] {
  7882  			break
  7883  		}
  7884  		if w != x.Args[2] {
  7885  			break
  7886  		}
  7887  		mem := x.Args[3]
  7888  		if !(x.Uses == 1 && clobber(x)) {
  7889  			break
  7890  		}
  7891  		v.reset(OpAMD64MOVQstoreidx1)
  7892  		v.AuxInt = i - 4
  7893  		v.Aux = s
  7894  		v.AddArg(p)
  7895  		v.AddArg(idx)
  7896  		v.AddArg(w)
  7897  		v.AddArg(mem)
  7898  		return true
  7899  	}
  7900  	// match: (MOVLstoreidx1 [i] {s} p idx (SHRQconst [j] w) x:(MOVLstoreidx1 [i-4] {s} p idx w0:(SHRQconst [j-32] w) mem))
  7901  	// cond: x.Uses == 1   && clobber(x)
  7902  	// result: (MOVQstoreidx1 [i-4] {s} p idx w0 mem)
  7903  	for {
  7904  		i := v.AuxInt
  7905  		s := v.Aux
  7906  		p := v.Args[0]
  7907  		idx := v.Args[1]
  7908  		v_2 := v.Args[2]
  7909  		if v_2.Op != OpAMD64SHRQconst {
  7910  			break
  7911  		}
  7912  		j := v_2.AuxInt
  7913  		w := v_2.Args[0]
  7914  		x := v.Args[3]
  7915  		if x.Op != OpAMD64MOVLstoreidx1 {
  7916  			break
  7917  		}
  7918  		if x.AuxInt != i-4 {
  7919  			break
  7920  		}
  7921  		if x.Aux != s {
  7922  			break
  7923  		}
  7924  		if p != x.Args[0] {
  7925  			break
  7926  		}
  7927  		if idx != x.Args[1] {
  7928  			break
  7929  		}
  7930  		w0 := x.Args[2]
  7931  		if w0.Op != OpAMD64SHRQconst {
  7932  			break
  7933  		}
  7934  		if w0.AuxInt != j-32 {
  7935  			break
  7936  		}
  7937  		if w != w0.Args[0] {
  7938  			break
  7939  		}
  7940  		mem := x.Args[3]
  7941  		if !(x.Uses == 1 && clobber(x)) {
  7942  			break
  7943  		}
  7944  		v.reset(OpAMD64MOVQstoreidx1)
  7945  		v.AuxInt = i - 4
  7946  		v.Aux = s
  7947  		v.AddArg(p)
  7948  		v.AddArg(idx)
  7949  		v.AddArg(w0)
  7950  		v.AddArg(mem)
  7951  		return true
  7952  	}
  7953  	return false
  7954  }
  7955  func rewriteValueAMD64_OpAMD64MOVLstoreidx4(v *Value, config *Config) bool {
  7956  	b := v.Block
  7957  	_ = b
  7958  	// match: (MOVLstoreidx4 [c] {sym} (ADDQconst [d] ptr) idx val mem)
  7959  	// cond:
  7960  	// result: (MOVLstoreidx4 [c+d] {sym} ptr idx val mem)
  7961  	for {
  7962  		c := v.AuxInt
  7963  		sym := v.Aux
  7964  		v_0 := v.Args[0]
  7965  		if v_0.Op != OpAMD64ADDQconst {
  7966  			break
  7967  		}
  7968  		d := v_0.AuxInt
  7969  		ptr := v_0.Args[0]
  7970  		idx := v.Args[1]
  7971  		val := v.Args[2]
  7972  		mem := v.Args[3]
  7973  		v.reset(OpAMD64MOVLstoreidx4)
  7974  		v.AuxInt = c + d
  7975  		v.Aux = sym
  7976  		v.AddArg(ptr)
  7977  		v.AddArg(idx)
  7978  		v.AddArg(val)
  7979  		v.AddArg(mem)
  7980  		return true
  7981  	}
  7982  	// match: (MOVLstoreidx4 [c] {sym} ptr (ADDQconst [d] idx) val mem)
  7983  	// cond:
  7984  	// result: (MOVLstoreidx4 [c+4*d] {sym} ptr idx val mem)
  7985  	for {
  7986  		c := v.AuxInt
  7987  		sym := v.Aux
  7988  		ptr := v.Args[0]
  7989  		v_1 := v.Args[1]
  7990  		if v_1.Op != OpAMD64ADDQconst {
  7991  			break
  7992  		}
  7993  		d := v_1.AuxInt
  7994  		idx := v_1.Args[0]
  7995  		val := v.Args[2]
  7996  		mem := v.Args[3]
  7997  		v.reset(OpAMD64MOVLstoreidx4)
  7998  		v.AuxInt = c + 4*d
  7999  		v.Aux = sym
  8000  		v.AddArg(ptr)
  8001  		v.AddArg(idx)
  8002  		v.AddArg(val)
  8003  		v.AddArg(mem)
  8004  		return true
  8005  	}
  8006  	// match: (MOVLstoreidx4 [i] {s} p idx (SHRQconst [32] w) x:(MOVLstoreidx4 [i-4] {s} p idx w mem))
  8007  	// cond: x.Uses == 1   && clobber(x)
  8008  	// result: (MOVQstoreidx1 [i-4] {s} p (SHLQconst <idx.Type> [2] idx) w mem)
  8009  	for {
  8010  		i := v.AuxInt
  8011  		s := v.Aux
  8012  		p := v.Args[0]
  8013  		idx := v.Args[1]
  8014  		v_2 := v.Args[2]
  8015  		if v_2.Op != OpAMD64SHRQconst {
  8016  			break
  8017  		}
  8018  		if v_2.AuxInt != 32 {
  8019  			break
  8020  		}
  8021  		w := v_2.Args[0]
  8022  		x := v.Args[3]
  8023  		if x.Op != OpAMD64MOVLstoreidx4 {
  8024  			break
  8025  		}
  8026  		if x.AuxInt != i-4 {
  8027  			break
  8028  		}
  8029  		if x.Aux != s {
  8030  			break
  8031  		}
  8032  		if p != x.Args[0] {
  8033  			break
  8034  		}
  8035  		if idx != x.Args[1] {
  8036  			break
  8037  		}
  8038  		if w != x.Args[2] {
  8039  			break
  8040  		}
  8041  		mem := x.Args[3]
  8042  		if !(x.Uses == 1 && clobber(x)) {
  8043  			break
  8044  		}
  8045  		v.reset(OpAMD64MOVQstoreidx1)
  8046  		v.AuxInt = i - 4
  8047  		v.Aux = s
  8048  		v.AddArg(p)
  8049  		v0 := b.NewValue0(v.Line, OpAMD64SHLQconst, idx.Type)
  8050  		v0.AuxInt = 2
  8051  		v0.AddArg(idx)
  8052  		v.AddArg(v0)
  8053  		v.AddArg(w)
  8054  		v.AddArg(mem)
  8055  		return true
  8056  	}
  8057  	// match: (MOVLstoreidx4 [i] {s} p idx (SHRQconst [j] w) x:(MOVLstoreidx4 [i-4] {s} p idx w0:(SHRQconst [j-32] w) mem))
  8058  	// cond: x.Uses == 1   && clobber(x)
  8059  	// result: (MOVQstoreidx1 [i-4] {s} p (SHLQconst <idx.Type> [2] idx) w0 mem)
  8060  	for {
  8061  		i := v.AuxInt
  8062  		s := v.Aux
  8063  		p := v.Args[0]
  8064  		idx := v.Args[1]
  8065  		v_2 := v.Args[2]
  8066  		if v_2.Op != OpAMD64SHRQconst {
  8067  			break
  8068  		}
  8069  		j := v_2.AuxInt
  8070  		w := v_2.Args[0]
  8071  		x := v.Args[3]
  8072  		if x.Op != OpAMD64MOVLstoreidx4 {
  8073  			break
  8074  		}
  8075  		if x.AuxInt != i-4 {
  8076  			break
  8077  		}
  8078  		if x.Aux != s {
  8079  			break
  8080  		}
  8081  		if p != x.Args[0] {
  8082  			break
  8083  		}
  8084  		if idx != x.Args[1] {
  8085  			break
  8086  		}
  8087  		w0 := x.Args[2]
  8088  		if w0.Op != OpAMD64SHRQconst {
  8089  			break
  8090  		}
  8091  		if w0.AuxInt != j-32 {
  8092  			break
  8093  		}
  8094  		if w != w0.Args[0] {
  8095  			break
  8096  		}
  8097  		mem := x.Args[3]
  8098  		if !(x.Uses == 1 && clobber(x)) {
  8099  			break
  8100  		}
  8101  		v.reset(OpAMD64MOVQstoreidx1)
  8102  		v.AuxInt = i - 4
  8103  		v.Aux = s
  8104  		v.AddArg(p)
  8105  		v0 := b.NewValue0(v.Line, OpAMD64SHLQconst, idx.Type)
  8106  		v0.AuxInt = 2
  8107  		v0.AddArg(idx)
  8108  		v.AddArg(v0)
  8109  		v.AddArg(w0)
  8110  		v.AddArg(mem)
  8111  		return true
  8112  	}
  8113  	return false
  8114  }
  8115  func rewriteValueAMD64_OpAMD64MOVOload(v *Value, config *Config) bool {
  8116  	b := v.Block
  8117  	_ = b
  8118  	// match: (MOVOload  [off1] {sym} (ADDQconst [off2] ptr) mem)
  8119  	// cond: is32Bit(off1+off2)
  8120  	// result: (MOVOload  [off1+off2] {sym} ptr mem)
  8121  	for {
  8122  		off1 := v.AuxInt
  8123  		sym := v.Aux
  8124  		v_0 := v.Args[0]
  8125  		if v_0.Op != OpAMD64ADDQconst {
  8126  			break
  8127  		}
  8128  		off2 := v_0.AuxInt
  8129  		ptr := v_0.Args[0]
  8130  		mem := v.Args[1]
  8131  		if !(is32Bit(off1 + off2)) {
  8132  			break
  8133  		}
  8134  		v.reset(OpAMD64MOVOload)
  8135  		v.AuxInt = off1 + off2
  8136  		v.Aux = sym
  8137  		v.AddArg(ptr)
  8138  		v.AddArg(mem)
  8139  		return true
  8140  	}
  8141  	// match: (MOVOload [off1] {sym1} (LEAQ [off2] {sym2} base) mem)
  8142  	// cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
  8143  	// result: (MOVOload [off1+off2] {mergeSym(sym1,sym2)} base mem)
  8144  	for {
  8145  		off1 := v.AuxInt
  8146  		sym1 := v.Aux
  8147  		v_0 := v.Args[0]
  8148  		if v_0.Op != OpAMD64LEAQ {
  8149  			break
  8150  		}
  8151  		off2 := v_0.AuxInt
  8152  		sym2 := v_0.Aux
  8153  		base := v_0.Args[0]
  8154  		mem := v.Args[1]
  8155  		if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
  8156  			break
  8157  		}
  8158  		v.reset(OpAMD64MOVOload)
  8159  		v.AuxInt = off1 + off2
  8160  		v.Aux = mergeSym(sym1, sym2)
  8161  		v.AddArg(base)
  8162  		v.AddArg(mem)
  8163  		return true
  8164  	}
  8165  	return false
  8166  }
  8167  func rewriteValueAMD64_OpAMD64MOVOstore(v *Value, config *Config) bool {
  8168  	b := v.Block
  8169  	_ = b
  8170  	// match: (MOVOstore  [off1] {sym} (ADDQconst [off2] ptr) val mem)
  8171  	// cond: is32Bit(off1+off2)
  8172  	// result: (MOVOstore  [off1+off2] {sym} ptr val mem)
  8173  	for {
  8174  		off1 := v.AuxInt
  8175  		sym := v.Aux
  8176  		v_0 := v.Args[0]
  8177  		if v_0.Op != OpAMD64ADDQconst {
  8178  			break
  8179  		}
  8180  		off2 := v_0.AuxInt
  8181  		ptr := v_0.Args[0]
  8182  		val := v.Args[1]
  8183  		mem := v.Args[2]
  8184  		if !(is32Bit(off1 + off2)) {
  8185  			break
  8186  		}
  8187  		v.reset(OpAMD64MOVOstore)
  8188  		v.AuxInt = off1 + off2
  8189  		v.Aux = sym
  8190  		v.AddArg(ptr)
  8191  		v.AddArg(val)
  8192  		v.AddArg(mem)
  8193  		return true
  8194  	}
  8195  	// match: (MOVOstore [off1] {sym1} (LEAQ [off2] {sym2} base) val mem)
  8196  	// cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
  8197  	// result: (MOVOstore [off1+off2] {mergeSym(sym1,sym2)} base val mem)
  8198  	for {
  8199  		off1 := v.AuxInt
  8200  		sym1 := v.Aux
  8201  		v_0 := v.Args[0]
  8202  		if v_0.Op != OpAMD64LEAQ {
  8203  			break
  8204  		}
  8205  		off2 := v_0.AuxInt
  8206  		sym2 := v_0.Aux
  8207  		base := v_0.Args[0]
  8208  		val := v.Args[1]
  8209  		mem := v.Args[2]
  8210  		if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
  8211  			break
  8212  		}
  8213  		v.reset(OpAMD64MOVOstore)
  8214  		v.AuxInt = off1 + off2
  8215  		v.Aux = mergeSym(sym1, sym2)
  8216  		v.AddArg(base)
  8217  		v.AddArg(val)
  8218  		v.AddArg(mem)
  8219  		return true
  8220  	}
  8221  	return false
  8222  }
  8223  func rewriteValueAMD64_OpAMD64MOVQload(v *Value, config *Config) bool {
  8224  	b := v.Block
  8225  	_ = b
  8226  	// match: (MOVQload [off] {sym} ptr (MOVQstore [off2] {sym2} ptr2 x _))
  8227  	// cond: sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)
  8228  	// result: x
  8229  	for {
  8230  		off := v.AuxInt
  8231  		sym := v.Aux
  8232  		ptr := v.Args[0]
  8233  		v_1 := v.Args[1]
  8234  		if v_1.Op != OpAMD64MOVQstore {
  8235  			break
  8236  		}
  8237  		off2 := v_1.AuxInt
  8238  		sym2 := v_1.Aux
  8239  		ptr2 := v_1.Args[0]
  8240  		x := v_1.Args[1]
  8241  		if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) {
  8242  			break
  8243  		}
  8244  		v.reset(OpCopy)
  8245  		v.Type = x.Type
  8246  		v.AddArg(x)
  8247  		return true
  8248  	}
  8249  	// match: (MOVQload  [off1] {sym} (ADDQconst [off2] ptr) mem)
  8250  	// cond: is32Bit(off1+off2)
  8251  	// result: (MOVQload  [off1+off2] {sym} ptr mem)
  8252  	for {
  8253  		off1 := v.AuxInt
  8254  		sym := v.Aux
  8255  		v_0 := v.Args[0]
  8256  		if v_0.Op != OpAMD64ADDQconst {
  8257  			break
  8258  		}
  8259  		off2 := v_0.AuxInt
  8260  		ptr := v_0.Args[0]
  8261  		mem := v.Args[1]
  8262  		if !(is32Bit(off1 + off2)) {
  8263  			break
  8264  		}
  8265  		v.reset(OpAMD64MOVQload)
  8266  		v.AuxInt = off1 + off2
  8267  		v.Aux = sym
  8268  		v.AddArg(ptr)
  8269  		v.AddArg(mem)
  8270  		return true
  8271  	}
  8272  	// match: (MOVQload  [off1] {sym1} (LEAQ [off2] {sym2} base) mem)
  8273  	// cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
  8274  	// result: (MOVQload  [off1+off2] {mergeSym(sym1,sym2)} base mem)
  8275  	for {
  8276  		off1 := v.AuxInt
  8277  		sym1 := v.Aux
  8278  		v_0 := v.Args[0]
  8279  		if v_0.Op != OpAMD64LEAQ {
  8280  			break
  8281  		}
  8282  		off2 := v_0.AuxInt
  8283  		sym2 := v_0.Aux
  8284  		base := v_0.Args[0]
  8285  		mem := v.Args[1]
  8286  		if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
  8287  			break
  8288  		}
  8289  		v.reset(OpAMD64MOVQload)
  8290  		v.AuxInt = off1 + off2
  8291  		v.Aux = mergeSym(sym1, sym2)
  8292  		v.AddArg(base)
  8293  		v.AddArg(mem)
  8294  		return true
  8295  	}
  8296  	// match: (MOVQload [off1] {sym1} (LEAQ1 [off2] {sym2} ptr idx) mem)
  8297  	// cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
  8298  	// result: (MOVQloadidx1 [off1+off2] {mergeSym(sym1,sym2)} ptr idx mem)
  8299  	for {
  8300  		off1 := v.AuxInt
  8301  		sym1 := v.Aux
  8302  		v_0 := v.Args[0]
  8303  		if v_0.Op != OpAMD64LEAQ1 {
  8304  			break
  8305  		}
  8306  		off2 := v_0.AuxInt
  8307  		sym2 := v_0.Aux
  8308  		ptr := v_0.Args[0]
  8309  		idx := v_0.Args[1]
  8310  		mem := v.Args[1]
  8311  		if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
  8312  			break
  8313  		}
  8314  		v.reset(OpAMD64MOVQloadidx1)
  8315  		v.AuxInt = off1 + off2
  8316  		v.Aux = mergeSym(sym1, sym2)
  8317  		v.AddArg(ptr)
  8318  		v.AddArg(idx)
  8319  		v.AddArg(mem)
  8320  		return true
  8321  	}
  8322  	// match: (MOVQload [off1] {sym1} (LEAQ8 [off2] {sym2} ptr idx) mem)
  8323  	// cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
  8324  	// result: (MOVQloadidx8 [off1+off2] {mergeSym(sym1,sym2)} ptr idx mem)
  8325  	for {
  8326  		off1 := v.AuxInt
  8327  		sym1 := v.Aux
  8328  		v_0 := v.Args[0]
  8329  		if v_0.Op != OpAMD64LEAQ8 {
  8330  			break
  8331  		}
  8332  		off2 := v_0.AuxInt
  8333  		sym2 := v_0.Aux
  8334  		ptr := v_0.Args[0]
  8335  		idx := v_0.Args[1]
  8336  		mem := v.Args[1]
  8337  		if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
  8338  			break
  8339  		}
  8340  		v.reset(OpAMD64MOVQloadidx8)
  8341  		v.AuxInt = off1 + off2
  8342  		v.Aux = mergeSym(sym1, sym2)
  8343  		v.AddArg(ptr)
  8344  		v.AddArg(idx)
  8345  		v.AddArg(mem)
  8346  		return true
  8347  	}
  8348  	// match: (MOVQload [off] {sym} (ADDQ ptr idx) mem)
  8349  	// cond: ptr.Op != OpSB
  8350  	// result: (MOVQloadidx1 [off] {sym} ptr idx mem)
  8351  	for {
  8352  		off := v.AuxInt
  8353  		sym := v.Aux
  8354  		v_0 := v.Args[0]
  8355  		if v_0.Op != OpAMD64ADDQ {
  8356  			break
  8357  		}
  8358  		ptr := v_0.Args[0]
  8359  		idx := v_0.Args[1]
  8360  		mem := v.Args[1]
  8361  		if !(ptr.Op != OpSB) {
  8362  			break
  8363  		}
  8364  		v.reset(OpAMD64MOVQloadidx1)
  8365  		v.AuxInt = off
  8366  		v.Aux = sym
  8367  		v.AddArg(ptr)
  8368  		v.AddArg(idx)
  8369  		v.AddArg(mem)
  8370  		return true
  8371  	}
  8372  	return false
  8373  }
  8374  func rewriteValueAMD64_OpAMD64MOVQloadidx1(v *Value, config *Config) bool {
  8375  	b := v.Block
  8376  	_ = b
  8377  	// match: (MOVQloadidx1 [c] {sym} ptr (SHLQconst [3] idx) mem)
  8378  	// cond:
  8379  	// result: (MOVQloadidx8 [c] {sym} ptr idx mem)
  8380  	for {
  8381  		c := v.AuxInt
  8382  		sym := v.Aux
  8383  		ptr := v.Args[0]
  8384  		v_1 := v.Args[1]
  8385  		if v_1.Op != OpAMD64SHLQconst {
  8386  			break
  8387  		}
  8388  		if v_1.AuxInt != 3 {
  8389  			break
  8390  		}
  8391  		idx := v_1.Args[0]
  8392  		mem := v.Args[2]
  8393  		v.reset(OpAMD64MOVQloadidx8)
  8394  		v.AuxInt = c
  8395  		v.Aux = sym
  8396  		v.AddArg(ptr)
  8397  		v.AddArg(idx)
  8398  		v.AddArg(mem)
  8399  		return true
  8400  	}
  8401  	// match: (MOVQloadidx1 [c] {sym} (ADDQconst [d] ptr) idx mem)
  8402  	// cond:
  8403  	// result: (MOVQloadidx1 [c+d] {sym} ptr idx mem)
  8404  	for {
  8405  		c := v.AuxInt
  8406  		sym := v.Aux
  8407  		v_0 := v.Args[0]
  8408  		if v_0.Op != OpAMD64ADDQconst {
  8409  			break
  8410  		}
  8411  		d := v_0.AuxInt
  8412  		ptr := v_0.Args[0]
  8413  		idx := v.Args[1]
  8414  		mem := v.Args[2]
  8415  		v.reset(OpAMD64MOVQloadidx1)
  8416  		v.AuxInt = c + d
  8417  		v.Aux = sym
  8418  		v.AddArg(ptr)
  8419  		v.AddArg(idx)
  8420  		v.AddArg(mem)
  8421  		return true
  8422  	}
  8423  	// match: (MOVQloadidx1 [c] {sym} ptr (ADDQconst [d] idx) mem)
  8424  	// cond:
  8425  	// result: (MOVQloadidx1 [c+d] {sym} ptr idx mem)
  8426  	for {
  8427  		c := v.AuxInt
  8428  		sym := v.Aux
  8429  		ptr := v.Args[0]
  8430  		v_1 := v.Args[1]
  8431  		if v_1.Op != OpAMD64ADDQconst {
  8432  			break
  8433  		}
  8434  		d := v_1.AuxInt
  8435  		idx := v_1.Args[0]
  8436  		mem := v.Args[2]
  8437  		v.reset(OpAMD64MOVQloadidx1)
  8438  		v.AuxInt = c + d
  8439  		v.Aux = sym
  8440  		v.AddArg(ptr)
  8441  		v.AddArg(idx)
  8442  		v.AddArg(mem)
  8443  		return true
  8444  	}
  8445  	return false
  8446  }
  8447  func rewriteValueAMD64_OpAMD64MOVQloadidx8(v *Value, config *Config) bool {
  8448  	b := v.Block
  8449  	_ = b
  8450  	// match: (MOVQloadidx8 [c] {sym} (ADDQconst [d] ptr) idx mem)
  8451  	// cond:
  8452  	// result: (MOVQloadidx8 [c+d] {sym} ptr idx mem)
  8453  	for {
  8454  		c := v.AuxInt
  8455  		sym := v.Aux
  8456  		v_0 := v.Args[0]
  8457  		if v_0.Op != OpAMD64ADDQconst {
  8458  			break
  8459  		}
  8460  		d := v_0.AuxInt
  8461  		ptr := v_0.Args[0]
  8462  		idx := v.Args[1]
  8463  		mem := v.Args[2]
  8464  		v.reset(OpAMD64MOVQloadidx8)
  8465  		v.AuxInt = c + d
  8466  		v.Aux = sym
  8467  		v.AddArg(ptr)
  8468  		v.AddArg(idx)
  8469  		v.AddArg(mem)
  8470  		return true
  8471  	}
  8472  	// match: (MOVQloadidx8 [c] {sym} ptr (ADDQconst [d] idx) mem)
  8473  	// cond:
  8474  	// result: (MOVQloadidx8 [c+8*d] {sym} ptr idx mem)
  8475  	for {
  8476  		c := v.AuxInt
  8477  		sym := v.Aux
  8478  		ptr := v.Args[0]
  8479  		v_1 := v.Args[1]
  8480  		if v_1.Op != OpAMD64ADDQconst {
  8481  			break
  8482  		}
  8483  		d := v_1.AuxInt
  8484  		idx := v_1.Args[0]
  8485  		mem := v.Args[2]
  8486  		v.reset(OpAMD64MOVQloadidx8)
  8487  		v.AuxInt = c + 8*d
  8488  		v.Aux = sym
  8489  		v.AddArg(ptr)
  8490  		v.AddArg(idx)
  8491  		v.AddArg(mem)
  8492  		return true
  8493  	}
  8494  	return false
  8495  }
  8496  func rewriteValueAMD64_OpAMD64MOVQstore(v *Value, config *Config) bool {
  8497  	b := v.Block
  8498  	_ = b
  8499  	// match: (MOVQstore  [off1] {sym} (ADDQconst [off2] ptr) val mem)
  8500  	// cond: is32Bit(off1+off2)
  8501  	// result: (MOVQstore  [off1+off2] {sym} ptr val mem)
  8502  	for {
  8503  		off1 := v.AuxInt
  8504  		sym := v.Aux
  8505  		v_0 := v.Args[0]
  8506  		if v_0.Op != OpAMD64ADDQconst {
  8507  			break
  8508  		}
  8509  		off2 := v_0.AuxInt
  8510  		ptr := v_0.Args[0]
  8511  		val := v.Args[1]
  8512  		mem := v.Args[2]
  8513  		if !(is32Bit(off1 + off2)) {
  8514  			break
  8515  		}
  8516  		v.reset(OpAMD64MOVQstore)
  8517  		v.AuxInt = off1 + off2
  8518  		v.Aux = sym
  8519  		v.AddArg(ptr)
  8520  		v.AddArg(val)
  8521  		v.AddArg(mem)
  8522  		return true
  8523  	}
  8524  	// match: (MOVQstore [off] {sym} ptr (MOVQconst [c]) mem)
  8525  	// cond: validValAndOff(c,off)
  8526  	// result: (MOVQstoreconst [makeValAndOff(c,off)] {sym} ptr mem)
  8527  	for {
  8528  		off := v.AuxInt
  8529  		sym := v.Aux
  8530  		ptr := v.Args[0]
  8531  		v_1 := v.Args[1]
  8532  		if v_1.Op != OpAMD64MOVQconst {
  8533  			break
  8534  		}
  8535  		c := v_1.AuxInt
  8536  		mem := v.Args[2]
  8537  		if !(validValAndOff(c, off)) {
  8538  			break
  8539  		}
  8540  		v.reset(OpAMD64MOVQstoreconst)
  8541  		v.AuxInt = makeValAndOff(c, off)
  8542  		v.Aux = sym
  8543  		v.AddArg(ptr)
  8544  		v.AddArg(mem)
  8545  		return true
  8546  	}
  8547  	// match: (MOVQstore  [off1] {sym1} (LEAQ [off2] {sym2} base) val mem)
  8548  	// cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
  8549  	// result: (MOVQstore  [off1+off2] {mergeSym(sym1,sym2)} base val mem)
  8550  	for {
  8551  		off1 := v.AuxInt
  8552  		sym1 := v.Aux
  8553  		v_0 := v.Args[0]
  8554  		if v_0.Op != OpAMD64LEAQ {
  8555  			break
  8556  		}
  8557  		off2 := v_0.AuxInt
  8558  		sym2 := v_0.Aux
  8559  		base := v_0.Args[0]
  8560  		val := v.Args[1]
  8561  		mem := v.Args[2]
  8562  		if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
  8563  			break
  8564  		}
  8565  		v.reset(OpAMD64MOVQstore)
  8566  		v.AuxInt = off1 + off2
  8567  		v.Aux = mergeSym(sym1, sym2)
  8568  		v.AddArg(base)
  8569  		v.AddArg(val)
  8570  		v.AddArg(mem)
  8571  		return true
  8572  	}
  8573  	// match: (MOVQstore [off1] {sym1} (LEAQ1 [off2] {sym2} ptr idx) val mem)
  8574  	// cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
  8575  	// result: (MOVQstoreidx1 [off1+off2] {mergeSym(sym1,sym2)} ptr idx val mem)
  8576  	for {
  8577  		off1 := v.AuxInt
  8578  		sym1 := v.Aux
  8579  		v_0 := v.Args[0]
  8580  		if v_0.Op != OpAMD64LEAQ1 {
  8581  			break
  8582  		}
  8583  		off2 := v_0.AuxInt
  8584  		sym2 := v_0.Aux
  8585  		ptr := v_0.Args[0]
  8586  		idx := v_0.Args[1]
  8587  		val := v.Args[1]
  8588  		mem := v.Args[2]
  8589  		if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
  8590  			break
  8591  		}
  8592  		v.reset(OpAMD64MOVQstoreidx1)
  8593  		v.AuxInt = off1 + off2
  8594  		v.Aux = mergeSym(sym1, sym2)
  8595  		v.AddArg(ptr)
  8596  		v.AddArg(idx)
  8597  		v.AddArg(val)
  8598  		v.AddArg(mem)
  8599  		return true
  8600  	}
  8601  	// match: (MOVQstore [off1] {sym1} (LEAQ8 [off2] {sym2} ptr idx) val mem)
  8602  	// cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
  8603  	// result: (MOVQstoreidx8 [off1+off2] {mergeSym(sym1,sym2)} ptr idx val mem)
  8604  	for {
  8605  		off1 := v.AuxInt
  8606  		sym1 := v.Aux
  8607  		v_0 := v.Args[0]
  8608  		if v_0.Op != OpAMD64LEAQ8 {
  8609  			break
  8610  		}
  8611  		off2 := v_0.AuxInt
  8612  		sym2 := v_0.Aux
  8613  		ptr := v_0.Args[0]
  8614  		idx := v_0.Args[1]
  8615  		val := v.Args[1]
  8616  		mem := v.Args[2]
  8617  		if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
  8618  			break
  8619  		}
  8620  		v.reset(OpAMD64MOVQstoreidx8)
  8621  		v.AuxInt = off1 + off2
  8622  		v.Aux = mergeSym(sym1, sym2)
  8623  		v.AddArg(ptr)
  8624  		v.AddArg(idx)
  8625  		v.AddArg(val)
  8626  		v.AddArg(mem)
  8627  		return true
  8628  	}
  8629  	// match: (MOVQstore [off] {sym} (ADDQ ptr idx) val mem)
  8630  	// cond: ptr.Op != OpSB
  8631  	// result: (MOVQstoreidx1 [off] {sym} ptr idx val mem)
  8632  	for {
  8633  		off := v.AuxInt
  8634  		sym := v.Aux
  8635  		v_0 := v.Args[0]
  8636  		if v_0.Op != OpAMD64ADDQ {
  8637  			break
  8638  		}
  8639  		ptr := v_0.Args[0]
  8640  		idx := v_0.Args[1]
  8641  		val := v.Args[1]
  8642  		mem := v.Args[2]
  8643  		if !(ptr.Op != OpSB) {
  8644  			break
  8645  		}
  8646  		v.reset(OpAMD64MOVQstoreidx1)
  8647  		v.AuxInt = off
  8648  		v.Aux = sym
  8649  		v.AddArg(ptr)
  8650  		v.AddArg(idx)
  8651  		v.AddArg(val)
  8652  		v.AddArg(mem)
  8653  		return true
  8654  	}
  8655  	return false
  8656  }
  8657  func rewriteValueAMD64_OpAMD64MOVQstoreconst(v *Value, config *Config) bool {
  8658  	b := v.Block
  8659  	_ = b
  8660  	// match: (MOVQstoreconst [sc] {s} (ADDQconst [off] ptr) mem)
  8661  	// cond: ValAndOff(sc).canAdd(off)
  8662  	// result: (MOVQstoreconst [ValAndOff(sc).add(off)] {s} ptr mem)
  8663  	for {
  8664  		sc := v.AuxInt
  8665  		s := v.Aux
  8666  		v_0 := v.Args[0]
  8667  		if v_0.Op != OpAMD64ADDQconst {
  8668  			break
  8669  		}
  8670  		off := v_0.AuxInt
  8671  		ptr := v_0.Args[0]
  8672  		mem := v.Args[1]
  8673  		if !(ValAndOff(sc).canAdd(off)) {
  8674  			break
  8675  		}
  8676  		v.reset(OpAMD64MOVQstoreconst)
  8677  		v.AuxInt = ValAndOff(sc).add(off)
  8678  		v.Aux = s
  8679  		v.AddArg(ptr)
  8680  		v.AddArg(mem)
  8681  		return true
  8682  	}
  8683  	// match: (MOVQstoreconst [sc] {sym1} (LEAQ [off] {sym2} ptr) mem)
  8684  	// cond: canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off)
  8685  	// result: (MOVQstoreconst [ValAndOff(sc).add(off)] {mergeSym(sym1, sym2)} ptr mem)
  8686  	for {
  8687  		sc := v.AuxInt
  8688  		sym1 := v.Aux
  8689  		v_0 := v.Args[0]
  8690  		if v_0.Op != OpAMD64LEAQ {
  8691  			break
  8692  		}
  8693  		off := v_0.AuxInt
  8694  		sym2 := v_0.Aux
  8695  		ptr := v_0.Args[0]
  8696  		mem := v.Args[1]
  8697  		if !(canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off)) {
  8698  			break
  8699  		}
  8700  		v.reset(OpAMD64MOVQstoreconst)
  8701  		v.AuxInt = ValAndOff(sc).add(off)
  8702  		v.Aux = mergeSym(sym1, sym2)
  8703  		v.AddArg(ptr)
  8704  		v.AddArg(mem)
  8705  		return true
  8706  	}
  8707  	// match: (MOVQstoreconst [x] {sym1} (LEAQ1 [off] {sym2} ptr idx) mem)
  8708  	// cond: canMergeSym(sym1, sym2)
  8709  	// result: (MOVQstoreconstidx1 [ValAndOff(x).add(off)] {mergeSym(sym1,sym2)} ptr idx mem)
  8710  	for {
  8711  		x := v.AuxInt
  8712  		sym1 := v.Aux
  8713  		v_0 := v.Args[0]
  8714  		if v_0.Op != OpAMD64LEAQ1 {
  8715  			break
  8716  		}
  8717  		off := v_0.AuxInt
  8718  		sym2 := v_0.Aux
  8719  		ptr := v_0.Args[0]
  8720  		idx := v_0.Args[1]
  8721  		mem := v.Args[1]
  8722  		if !(canMergeSym(sym1, sym2)) {
  8723  			break
  8724  		}
  8725  		v.reset(OpAMD64MOVQstoreconstidx1)
  8726  		v.AuxInt = ValAndOff(x).add(off)
  8727  		v.Aux = mergeSym(sym1, sym2)
  8728  		v.AddArg(ptr)
  8729  		v.AddArg(idx)
  8730  		v.AddArg(mem)
  8731  		return true
  8732  	}
  8733  	// match: (MOVQstoreconst [x] {sym1} (LEAQ8 [off] {sym2} ptr idx) mem)
  8734  	// cond: canMergeSym(sym1, sym2)
  8735  	// result: (MOVQstoreconstidx8 [ValAndOff(x).add(off)] {mergeSym(sym1,sym2)} ptr idx mem)
  8736  	for {
  8737  		x := v.AuxInt
  8738  		sym1 := v.Aux
  8739  		v_0 := v.Args[0]
  8740  		if v_0.Op != OpAMD64LEAQ8 {
  8741  			break
  8742  		}
  8743  		off := v_0.AuxInt
  8744  		sym2 := v_0.Aux
  8745  		ptr := v_0.Args[0]
  8746  		idx := v_0.Args[1]
  8747  		mem := v.Args[1]
  8748  		if !(canMergeSym(sym1, sym2)) {
  8749  			break
  8750  		}
  8751  		v.reset(OpAMD64MOVQstoreconstidx8)
  8752  		v.AuxInt = ValAndOff(x).add(off)
  8753  		v.Aux = mergeSym(sym1, sym2)
  8754  		v.AddArg(ptr)
  8755  		v.AddArg(idx)
  8756  		v.AddArg(mem)
  8757  		return true
  8758  	}
  8759  	// match: (MOVQstoreconst [x] {sym} (ADDQ ptr idx) mem)
  8760  	// cond:
  8761  	// result: (MOVQstoreconstidx1 [x] {sym} ptr idx mem)
  8762  	for {
  8763  		x := v.AuxInt
  8764  		sym := v.Aux
  8765  		v_0 := v.Args[0]
  8766  		if v_0.Op != OpAMD64ADDQ {
  8767  			break
  8768  		}
  8769  		ptr := v_0.Args[0]
  8770  		idx := v_0.Args[1]
  8771  		mem := v.Args[1]
  8772  		v.reset(OpAMD64MOVQstoreconstidx1)
  8773  		v.AuxInt = x
  8774  		v.Aux = sym
  8775  		v.AddArg(ptr)
  8776  		v.AddArg(idx)
  8777  		v.AddArg(mem)
  8778  		return true
  8779  	}
  8780  	return false
  8781  }
  8782  func rewriteValueAMD64_OpAMD64MOVQstoreconstidx1(v *Value, config *Config) bool {
  8783  	b := v.Block
  8784  	_ = b
  8785  	// match: (MOVQstoreconstidx1 [c] {sym} ptr (SHLQconst [3] idx) mem)
  8786  	// cond:
  8787  	// result: (MOVQstoreconstidx8 [c] {sym} ptr idx mem)
  8788  	for {
  8789  		c := v.AuxInt
  8790  		sym := v.Aux
  8791  		ptr := v.Args[0]
  8792  		v_1 := v.Args[1]
  8793  		if v_1.Op != OpAMD64SHLQconst {
  8794  			break
  8795  		}
  8796  		if v_1.AuxInt != 3 {
  8797  			break
  8798  		}
  8799  		idx := v_1.Args[0]
  8800  		mem := v.Args[2]
  8801  		v.reset(OpAMD64MOVQstoreconstidx8)
  8802  		v.AuxInt = c
  8803  		v.Aux = sym
  8804  		v.AddArg(ptr)
  8805  		v.AddArg(idx)
  8806  		v.AddArg(mem)
  8807  		return true
  8808  	}
  8809  	// match: (MOVQstoreconstidx1 [x] {sym} (ADDQconst [c] ptr) idx mem)
  8810  	// cond:
  8811  	// result: (MOVQstoreconstidx1 [ValAndOff(x).add(c)] {sym} ptr idx mem)
  8812  	for {
  8813  		x := v.AuxInt
  8814  		sym := v.Aux
  8815  		v_0 := v.Args[0]
  8816  		if v_0.Op != OpAMD64ADDQconst {
  8817  			break
  8818  		}
  8819  		c := v_0.AuxInt
  8820  		ptr := v_0.Args[0]
  8821  		idx := v.Args[1]
  8822  		mem := v.Args[2]
  8823  		v.reset(OpAMD64MOVQstoreconstidx1)
  8824  		v.AuxInt = ValAndOff(x).add(c)
  8825  		v.Aux = sym
  8826  		v.AddArg(ptr)
  8827  		v.AddArg(idx)
  8828  		v.AddArg(mem)
  8829  		return true
  8830  	}
  8831  	// match: (MOVQstoreconstidx1 [x] {sym} ptr (ADDQconst [c] idx) mem)
  8832  	// cond:
  8833  	// result: (MOVQstoreconstidx1 [ValAndOff(x).add(c)] {sym} ptr idx mem)
  8834  	for {
  8835  		x := v.AuxInt
  8836  		sym := v.Aux
  8837  		ptr := v.Args[0]
  8838  		v_1 := v.Args[1]
  8839  		if v_1.Op != OpAMD64ADDQconst {
  8840  			break
  8841  		}
  8842  		c := v_1.AuxInt
  8843  		idx := v_1.Args[0]
  8844  		mem := v.Args[2]
  8845  		v.reset(OpAMD64MOVQstoreconstidx1)
  8846  		v.AuxInt = ValAndOff(x).add(c)
  8847  		v.Aux = sym
  8848  		v.AddArg(ptr)
  8849  		v.AddArg(idx)
  8850  		v.AddArg(mem)
  8851  		return true
  8852  	}
  8853  	return false
  8854  }
  8855  func rewriteValueAMD64_OpAMD64MOVQstoreconstidx8(v *Value, config *Config) bool {
  8856  	b := v.Block
  8857  	_ = b
  8858  	// match: (MOVQstoreconstidx8 [x] {sym} (ADDQconst [c] ptr) idx mem)
  8859  	// cond:
  8860  	// result: (MOVQstoreconstidx8 [ValAndOff(x).add(c)] {sym} ptr idx mem)
  8861  	for {
  8862  		x := v.AuxInt
  8863  		sym := v.Aux
  8864  		v_0 := v.Args[0]
  8865  		if v_0.Op != OpAMD64ADDQconst {
  8866  			break
  8867  		}
  8868  		c := v_0.AuxInt
  8869  		ptr := v_0.Args[0]
  8870  		idx := v.Args[1]
  8871  		mem := v.Args[2]
  8872  		v.reset(OpAMD64MOVQstoreconstidx8)
  8873  		v.AuxInt = ValAndOff(x).add(c)
  8874  		v.Aux = sym
  8875  		v.AddArg(ptr)
  8876  		v.AddArg(idx)
  8877  		v.AddArg(mem)
  8878  		return true
  8879  	}
  8880  	// match: (MOVQstoreconstidx8 [x] {sym} ptr (ADDQconst [c] idx) mem)
  8881  	// cond:
  8882  	// result: (MOVQstoreconstidx8 [ValAndOff(x).add(8*c)] {sym} ptr idx mem)
  8883  	for {
  8884  		x := v.AuxInt
  8885  		sym := v.Aux
  8886  		ptr := v.Args[0]
  8887  		v_1 := v.Args[1]
  8888  		if v_1.Op != OpAMD64ADDQconst {
  8889  			break
  8890  		}
  8891  		c := v_1.AuxInt
  8892  		idx := v_1.Args[0]
  8893  		mem := v.Args[2]
  8894  		v.reset(OpAMD64MOVQstoreconstidx8)
  8895  		v.AuxInt = ValAndOff(x).add(8 * c)
  8896  		v.Aux = sym
  8897  		v.AddArg(ptr)
  8898  		v.AddArg(idx)
  8899  		v.AddArg(mem)
  8900  		return true
  8901  	}
  8902  	return false
  8903  }
  8904  func rewriteValueAMD64_OpAMD64MOVQstoreidx1(v *Value, config *Config) bool {
  8905  	b := v.Block
  8906  	_ = b
  8907  	// match: (MOVQstoreidx1 [c] {sym} ptr (SHLQconst [3] idx) val mem)
  8908  	// cond:
  8909  	// result: (MOVQstoreidx8 [c] {sym} ptr idx val mem)
  8910  	for {
  8911  		c := v.AuxInt
  8912  		sym := v.Aux
  8913  		ptr := v.Args[0]
  8914  		v_1 := v.Args[1]
  8915  		if v_1.Op != OpAMD64SHLQconst {
  8916  			break
  8917  		}
  8918  		if v_1.AuxInt != 3 {
  8919  			break
  8920  		}
  8921  		idx := v_1.Args[0]
  8922  		val := v.Args[2]
  8923  		mem := v.Args[3]
  8924  		v.reset(OpAMD64MOVQstoreidx8)
  8925  		v.AuxInt = c
  8926  		v.Aux = sym
  8927  		v.AddArg(ptr)
  8928  		v.AddArg(idx)
  8929  		v.AddArg(val)
  8930  		v.AddArg(mem)
  8931  		return true
  8932  	}
  8933  	// match: (MOVQstoreidx1 [c] {sym} (ADDQconst [d] ptr) idx val mem)
  8934  	// cond:
  8935  	// result: (MOVQstoreidx1 [c+d] {sym} ptr idx val mem)
  8936  	for {
  8937  		c := v.AuxInt
  8938  		sym := v.Aux
  8939  		v_0 := v.Args[0]
  8940  		if v_0.Op != OpAMD64ADDQconst {
  8941  			break
  8942  		}
  8943  		d := v_0.AuxInt
  8944  		ptr := v_0.Args[0]
  8945  		idx := v.Args[1]
  8946  		val := v.Args[2]
  8947  		mem := v.Args[3]
  8948  		v.reset(OpAMD64MOVQstoreidx1)
  8949  		v.AuxInt = c + d
  8950  		v.Aux = sym
  8951  		v.AddArg(ptr)
  8952  		v.AddArg(idx)
  8953  		v.AddArg(val)
  8954  		v.AddArg(mem)
  8955  		return true
  8956  	}
  8957  	// match: (MOVQstoreidx1 [c] {sym} ptr (ADDQconst [d] idx) val mem)
  8958  	// cond:
  8959  	// result: (MOVQstoreidx1 [c+d] {sym} ptr idx val mem)
  8960  	for {
  8961  		c := v.AuxInt
  8962  		sym := v.Aux
  8963  		ptr := v.Args[0]
  8964  		v_1 := v.Args[1]
  8965  		if v_1.Op != OpAMD64ADDQconst {
  8966  			break
  8967  		}
  8968  		d := v_1.AuxInt
  8969  		idx := v_1.Args[0]
  8970  		val := v.Args[2]
  8971  		mem := v.Args[3]
  8972  		v.reset(OpAMD64MOVQstoreidx1)
  8973  		v.AuxInt = c + d
  8974  		v.Aux = sym
  8975  		v.AddArg(ptr)
  8976  		v.AddArg(idx)
  8977  		v.AddArg(val)
  8978  		v.AddArg(mem)
  8979  		return true
  8980  	}
  8981  	return false
  8982  }
  8983  func rewriteValueAMD64_OpAMD64MOVQstoreidx8(v *Value, config *Config) bool {
  8984  	b := v.Block
  8985  	_ = b
  8986  	// match: (MOVQstoreidx8 [c] {sym} (ADDQconst [d] ptr) idx val mem)
  8987  	// cond:
  8988  	// result: (MOVQstoreidx8 [c+d] {sym} ptr idx val mem)
  8989  	for {
  8990  		c := v.AuxInt
  8991  		sym := v.Aux
  8992  		v_0 := v.Args[0]
  8993  		if v_0.Op != OpAMD64ADDQconst {
  8994  			break
  8995  		}
  8996  		d := v_0.AuxInt
  8997  		ptr := v_0.Args[0]
  8998  		idx := v.Args[1]
  8999  		val := v.Args[2]
  9000  		mem := v.Args[3]
  9001  		v.reset(OpAMD64MOVQstoreidx8)
  9002  		v.AuxInt = c + d
  9003  		v.Aux = sym
  9004  		v.AddArg(ptr)
  9005  		v.AddArg(idx)
  9006  		v.AddArg(val)
  9007  		v.AddArg(mem)
  9008  		return true
  9009  	}
  9010  	// match: (MOVQstoreidx8 [c] {sym} ptr (ADDQconst [d] idx) val mem)
  9011  	// cond:
  9012  	// result: (MOVQstoreidx8 [c+8*d] {sym} ptr idx val mem)
  9013  	for {
  9014  		c := v.AuxInt
  9015  		sym := v.Aux
  9016  		ptr := v.Args[0]
  9017  		v_1 := v.Args[1]
  9018  		if v_1.Op != OpAMD64ADDQconst {
  9019  			break
  9020  		}
  9021  		d := v_1.AuxInt
  9022  		idx := v_1.Args[0]
  9023  		val := v.Args[2]
  9024  		mem := v.Args[3]
  9025  		v.reset(OpAMD64MOVQstoreidx8)
  9026  		v.AuxInt = c + 8*d
  9027  		v.Aux = sym
  9028  		v.AddArg(ptr)
  9029  		v.AddArg(idx)
  9030  		v.AddArg(val)
  9031  		v.AddArg(mem)
  9032  		return true
  9033  	}
  9034  	return false
  9035  }
  9036  func rewriteValueAMD64_OpAMD64MOVSDload(v *Value, config *Config) bool {
  9037  	b := v.Block
  9038  	_ = b
  9039  	// match: (MOVSDload [off1] {sym} (ADDQconst [off2] ptr) mem)
  9040  	// cond: is32Bit(off1+off2)
  9041  	// result: (MOVSDload [off1+off2] {sym} ptr mem)
  9042  	for {
  9043  		off1 := v.AuxInt
  9044  		sym := v.Aux
  9045  		v_0 := v.Args[0]
  9046  		if v_0.Op != OpAMD64ADDQconst {
  9047  			break
  9048  		}
  9049  		off2 := v_0.AuxInt
  9050  		ptr := v_0.Args[0]
  9051  		mem := v.Args[1]
  9052  		if !(is32Bit(off1 + off2)) {
  9053  			break
  9054  		}
  9055  		v.reset(OpAMD64MOVSDload)
  9056  		v.AuxInt = off1 + off2
  9057  		v.Aux = sym
  9058  		v.AddArg(ptr)
  9059  		v.AddArg(mem)
  9060  		return true
  9061  	}
  9062  	// match: (MOVSDload [off1] {sym1} (LEAQ [off2] {sym2} base) mem)
  9063  	// cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
  9064  	// result: (MOVSDload [off1+off2] {mergeSym(sym1,sym2)} base mem)
  9065  	for {
  9066  		off1 := v.AuxInt
  9067  		sym1 := v.Aux
  9068  		v_0 := v.Args[0]
  9069  		if v_0.Op != OpAMD64LEAQ {
  9070  			break
  9071  		}
  9072  		off2 := v_0.AuxInt
  9073  		sym2 := v_0.Aux
  9074  		base := v_0.Args[0]
  9075  		mem := v.Args[1]
  9076  		if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
  9077  			break
  9078  		}
  9079  		v.reset(OpAMD64MOVSDload)
  9080  		v.AuxInt = off1 + off2
  9081  		v.Aux = mergeSym(sym1, sym2)
  9082  		v.AddArg(base)
  9083  		v.AddArg(mem)
  9084  		return true
  9085  	}
  9086  	// match: (MOVSDload [off1] {sym1} (LEAQ1 [off2] {sym2} ptr idx) mem)
  9087  	// cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
  9088  	// result: (MOVSDloadidx1 [off1+off2] {mergeSym(sym1,sym2)} ptr idx mem)
  9089  	for {
  9090  		off1 := v.AuxInt
  9091  		sym1 := v.Aux
  9092  		v_0 := v.Args[0]
  9093  		if v_0.Op != OpAMD64LEAQ1 {
  9094  			break
  9095  		}
  9096  		off2 := v_0.AuxInt
  9097  		sym2 := v_0.Aux
  9098  		ptr := v_0.Args[0]
  9099  		idx := v_0.Args[1]
  9100  		mem := v.Args[1]
  9101  		if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
  9102  			break
  9103  		}
  9104  		v.reset(OpAMD64MOVSDloadidx1)
  9105  		v.AuxInt = off1 + off2
  9106  		v.Aux = mergeSym(sym1, sym2)
  9107  		v.AddArg(ptr)
  9108  		v.AddArg(idx)
  9109  		v.AddArg(mem)
  9110  		return true
  9111  	}
  9112  	// match: (MOVSDload [off1] {sym1} (LEAQ8 [off2] {sym2} ptr idx) mem)
  9113  	// cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
  9114  	// result: (MOVSDloadidx8 [off1+off2] {mergeSym(sym1,sym2)} ptr idx mem)
  9115  	for {
  9116  		off1 := v.AuxInt
  9117  		sym1 := v.Aux
  9118  		v_0 := v.Args[0]
  9119  		if v_0.Op != OpAMD64LEAQ8 {
  9120  			break
  9121  		}
  9122  		off2 := v_0.AuxInt
  9123  		sym2 := v_0.Aux
  9124  		ptr := v_0.Args[0]
  9125  		idx := v_0.Args[1]
  9126  		mem := v.Args[1]
  9127  		if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
  9128  			break
  9129  		}
  9130  		v.reset(OpAMD64MOVSDloadidx8)
  9131  		v.AuxInt = off1 + off2
  9132  		v.Aux = mergeSym(sym1, sym2)
  9133  		v.AddArg(ptr)
  9134  		v.AddArg(idx)
  9135  		v.AddArg(mem)
  9136  		return true
  9137  	}
  9138  	// match: (MOVSDload [off] {sym} (ADDQ ptr idx) mem)
  9139  	// cond: ptr.Op != OpSB
  9140  	// result: (MOVSDloadidx1 [off] {sym} ptr idx mem)
  9141  	for {
  9142  		off := v.AuxInt
  9143  		sym := v.Aux
  9144  		v_0 := v.Args[0]
  9145  		if v_0.Op != OpAMD64ADDQ {
  9146  			break
  9147  		}
  9148  		ptr := v_0.Args[0]
  9149  		idx := v_0.Args[1]
  9150  		mem := v.Args[1]
  9151  		if !(ptr.Op != OpSB) {
  9152  			break
  9153  		}
  9154  		v.reset(OpAMD64MOVSDloadidx1)
  9155  		v.AuxInt = off
  9156  		v.Aux = sym
  9157  		v.AddArg(ptr)
  9158  		v.AddArg(idx)
  9159  		v.AddArg(mem)
  9160  		return true
  9161  	}
  9162  	return false
  9163  }
  9164  func rewriteValueAMD64_OpAMD64MOVSDloadidx1(v *Value, config *Config) bool {
  9165  	b := v.Block
  9166  	_ = b
  9167  	// match: (MOVSDloadidx1 [c] {sym} (ADDQconst [d] ptr) idx mem)
  9168  	// cond:
  9169  	// result: (MOVSDloadidx1 [c+d] {sym} ptr idx mem)
  9170  	for {
  9171  		c := v.AuxInt
  9172  		sym := v.Aux
  9173  		v_0 := v.Args[0]
  9174  		if v_0.Op != OpAMD64ADDQconst {
  9175  			break
  9176  		}
  9177  		d := v_0.AuxInt
  9178  		ptr := v_0.Args[0]
  9179  		idx := v.Args[1]
  9180  		mem := v.Args[2]
  9181  		v.reset(OpAMD64MOVSDloadidx1)
  9182  		v.AuxInt = c + d
  9183  		v.Aux = sym
  9184  		v.AddArg(ptr)
  9185  		v.AddArg(idx)
  9186  		v.AddArg(mem)
  9187  		return true
  9188  	}
  9189  	// match: (MOVSDloadidx1 [c] {sym} ptr (ADDQconst [d] idx) mem)
  9190  	// cond:
  9191  	// result: (MOVSDloadidx1 [c+d] {sym} ptr idx mem)
  9192  	for {
  9193  		c := v.AuxInt
  9194  		sym := v.Aux
  9195  		ptr := v.Args[0]
  9196  		v_1 := v.Args[1]
  9197  		if v_1.Op != OpAMD64ADDQconst {
  9198  			break
  9199  		}
  9200  		d := v_1.AuxInt
  9201  		idx := v_1.Args[0]
  9202  		mem := v.Args[2]
  9203  		v.reset(OpAMD64MOVSDloadidx1)
  9204  		v.AuxInt = c + d
  9205  		v.Aux = sym
  9206  		v.AddArg(ptr)
  9207  		v.AddArg(idx)
  9208  		v.AddArg(mem)
  9209  		return true
  9210  	}
  9211  	return false
  9212  }
  9213  func rewriteValueAMD64_OpAMD64MOVSDloadidx8(v *Value, config *Config) bool {
  9214  	b := v.Block
  9215  	_ = b
  9216  	// match: (MOVSDloadidx8 [c] {sym} (ADDQconst [d] ptr) idx mem)
  9217  	// cond:
  9218  	// result: (MOVSDloadidx8 [c+d] {sym} ptr idx mem)
  9219  	for {
  9220  		c := v.AuxInt
  9221  		sym := v.Aux
  9222  		v_0 := v.Args[0]
  9223  		if v_0.Op != OpAMD64ADDQconst {
  9224  			break
  9225  		}
  9226  		d := v_0.AuxInt
  9227  		ptr := v_0.Args[0]
  9228  		idx := v.Args[1]
  9229  		mem := v.Args[2]
  9230  		v.reset(OpAMD64MOVSDloadidx8)
  9231  		v.AuxInt = c + d
  9232  		v.Aux = sym
  9233  		v.AddArg(ptr)
  9234  		v.AddArg(idx)
  9235  		v.AddArg(mem)
  9236  		return true
  9237  	}
  9238  	// match: (MOVSDloadidx8 [c] {sym} ptr (ADDQconst [d] idx) mem)
  9239  	// cond:
  9240  	// result: (MOVSDloadidx8 [c+8*d] {sym} ptr idx mem)
  9241  	for {
  9242  		c := v.AuxInt
  9243  		sym := v.Aux
  9244  		ptr := v.Args[0]
  9245  		v_1 := v.Args[1]
  9246  		if v_1.Op != OpAMD64ADDQconst {
  9247  			break
  9248  		}
  9249  		d := v_1.AuxInt
  9250  		idx := v_1.Args[0]
  9251  		mem := v.Args[2]
  9252  		v.reset(OpAMD64MOVSDloadidx8)
  9253  		v.AuxInt = c + 8*d
  9254  		v.Aux = sym
  9255  		v.AddArg(ptr)
  9256  		v.AddArg(idx)
  9257  		v.AddArg(mem)
  9258  		return true
  9259  	}
  9260  	return false
  9261  }
  9262  func rewriteValueAMD64_OpAMD64MOVSDstore(v *Value, config *Config) bool {
  9263  	b := v.Block
  9264  	_ = b
  9265  	// match: (MOVSDstore [off1] {sym} (ADDQconst [off2] ptr) val mem)
  9266  	// cond: is32Bit(off1+off2)
  9267  	// result: (MOVSDstore [off1+off2] {sym} ptr val mem)
  9268  	for {
  9269  		off1 := v.AuxInt
  9270  		sym := v.Aux
  9271  		v_0 := v.Args[0]
  9272  		if v_0.Op != OpAMD64ADDQconst {
  9273  			break
  9274  		}
  9275  		off2 := v_0.AuxInt
  9276  		ptr := v_0.Args[0]
  9277  		val := v.Args[1]
  9278  		mem := v.Args[2]
  9279  		if !(is32Bit(off1 + off2)) {
  9280  			break
  9281  		}
  9282  		v.reset(OpAMD64MOVSDstore)
  9283  		v.AuxInt = off1 + off2
  9284  		v.Aux = sym
  9285  		v.AddArg(ptr)
  9286  		v.AddArg(val)
  9287  		v.AddArg(mem)
  9288  		return true
  9289  	}
  9290  	// match: (MOVSDstore [off1] {sym1} (LEAQ [off2] {sym2} base) val mem)
  9291  	// cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
  9292  	// result: (MOVSDstore [off1+off2] {mergeSym(sym1,sym2)} base val mem)
  9293  	for {
  9294  		off1 := v.AuxInt
  9295  		sym1 := v.Aux
  9296  		v_0 := v.Args[0]
  9297  		if v_0.Op != OpAMD64LEAQ {
  9298  			break
  9299  		}
  9300  		off2 := v_0.AuxInt
  9301  		sym2 := v_0.Aux
  9302  		base := v_0.Args[0]
  9303  		val := v.Args[1]
  9304  		mem := v.Args[2]
  9305  		if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
  9306  			break
  9307  		}
  9308  		v.reset(OpAMD64MOVSDstore)
  9309  		v.AuxInt = off1 + off2
  9310  		v.Aux = mergeSym(sym1, sym2)
  9311  		v.AddArg(base)
  9312  		v.AddArg(val)
  9313  		v.AddArg(mem)
  9314  		return true
  9315  	}
  9316  	// match: (MOVSDstore [off1] {sym1} (LEAQ1 [off2] {sym2} ptr idx) val mem)
  9317  	// cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
  9318  	// result: (MOVSDstoreidx1 [off1+off2] {mergeSym(sym1,sym2)} ptr idx val mem)
  9319  	for {
  9320  		off1 := v.AuxInt
  9321  		sym1 := v.Aux
  9322  		v_0 := v.Args[0]
  9323  		if v_0.Op != OpAMD64LEAQ1 {
  9324  			break
  9325  		}
  9326  		off2 := v_0.AuxInt
  9327  		sym2 := v_0.Aux
  9328  		ptr := v_0.Args[0]
  9329  		idx := v_0.Args[1]
  9330  		val := v.Args[1]
  9331  		mem := v.Args[2]
  9332  		if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
  9333  			break
  9334  		}
  9335  		v.reset(OpAMD64MOVSDstoreidx1)
  9336  		v.AuxInt = off1 + off2
  9337  		v.Aux = mergeSym(sym1, sym2)
  9338  		v.AddArg(ptr)
  9339  		v.AddArg(idx)
  9340  		v.AddArg(val)
  9341  		v.AddArg(mem)
  9342  		return true
  9343  	}
  9344  	// match: (MOVSDstore [off1] {sym1} (LEAQ8 [off2] {sym2} ptr idx) val mem)
  9345  	// cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
  9346  	// result: (MOVSDstoreidx8 [off1+off2] {mergeSym(sym1,sym2)} ptr idx val mem)
  9347  	for {
  9348  		off1 := v.AuxInt
  9349  		sym1 := v.Aux
  9350  		v_0 := v.Args[0]
  9351  		if v_0.Op != OpAMD64LEAQ8 {
  9352  			break
  9353  		}
  9354  		off2 := v_0.AuxInt
  9355  		sym2 := v_0.Aux
  9356  		ptr := v_0.Args[0]
  9357  		idx := v_0.Args[1]
  9358  		val := v.Args[1]
  9359  		mem := v.Args[2]
  9360  		if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
  9361  			break
  9362  		}
  9363  		v.reset(OpAMD64MOVSDstoreidx8)
  9364  		v.AuxInt = off1 + off2
  9365  		v.Aux = mergeSym(sym1, sym2)
  9366  		v.AddArg(ptr)
  9367  		v.AddArg(idx)
  9368  		v.AddArg(val)
  9369  		v.AddArg(mem)
  9370  		return true
  9371  	}
  9372  	// match: (MOVSDstore [off] {sym} (ADDQ ptr idx) val mem)
  9373  	// cond: ptr.Op != OpSB
  9374  	// result: (MOVSDstoreidx1 [off] {sym} ptr idx val mem)
  9375  	for {
  9376  		off := v.AuxInt
  9377  		sym := v.Aux
  9378  		v_0 := v.Args[0]
  9379  		if v_0.Op != OpAMD64ADDQ {
  9380  			break
  9381  		}
  9382  		ptr := v_0.Args[0]
  9383  		idx := v_0.Args[1]
  9384  		val := v.Args[1]
  9385  		mem := v.Args[2]
  9386  		if !(ptr.Op != OpSB) {
  9387  			break
  9388  		}
  9389  		v.reset(OpAMD64MOVSDstoreidx1)
  9390  		v.AuxInt = off
  9391  		v.Aux = sym
  9392  		v.AddArg(ptr)
  9393  		v.AddArg(idx)
  9394  		v.AddArg(val)
  9395  		v.AddArg(mem)
  9396  		return true
  9397  	}
  9398  	return false
  9399  }
  9400  func rewriteValueAMD64_OpAMD64MOVSDstoreidx1(v *Value, config *Config) bool {
  9401  	b := v.Block
  9402  	_ = b
  9403  	// match: (MOVSDstoreidx1 [c] {sym} (ADDQconst [d] ptr) idx val mem)
  9404  	// cond:
  9405  	// result: (MOVSDstoreidx1 [c+d] {sym} ptr idx val mem)
  9406  	for {
  9407  		c := v.AuxInt
  9408  		sym := v.Aux
  9409  		v_0 := v.Args[0]
  9410  		if v_0.Op != OpAMD64ADDQconst {
  9411  			break
  9412  		}
  9413  		d := v_0.AuxInt
  9414  		ptr := v_0.Args[0]
  9415  		idx := v.Args[1]
  9416  		val := v.Args[2]
  9417  		mem := v.Args[3]
  9418  		v.reset(OpAMD64MOVSDstoreidx1)
  9419  		v.AuxInt = c + d
  9420  		v.Aux = sym
  9421  		v.AddArg(ptr)
  9422  		v.AddArg(idx)
  9423  		v.AddArg(val)
  9424  		v.AddArg(mem)
  9425  		return true
  9426  	}
  9427  	// match: (MOVSDstoreidx1 [c] {sym} ptr (ADDQconst [d] idx) val mem)
  9428  	// cond:
  9429  	// result: (MOVSDstoreidx1 [c+d] {sym} ptr idx val mem)
  9430  	for {
  9431  		c := v.AuxInt
  9432  		sym := v.Aux
  9433  		ptr := v.Args[0]
  9434  		v_1 := v.Args[1]
  9435  		if v_1.Op != OpAMD64ADDQconst {
  9436  			break
  9437  		}
  9438  		d := v_1.AuxInt
  9439  		idx := v_1.Args[0]
  9440  		val := v.Args[2]
  9441  		mem := v.Args[3]
  9442  		v.reset(OpAMD64MOVSDstoreidx1)
  9443  		v.AuxInt = c + d
  9444  		v.Aux = sym
  9445  		v.AddArg(ptr)
  9446  		v.AddArg(idx)
  9447  		v.AddArg(val)
  9448  		v.AddArg(mem)
  9449  		return true
  9450  	}
  9451  	return false
  9452  }
  9453  func rewriteValueAMD64_OpAMD64MOVSDstoreidx8(v *Value, config *Config) bool {
  9454  	b := v.Block
  9455  	_ = b
  9456  	// match: (MOVSDstoreidx8 [c] {sym} (ADDQconst [d] ptr) idx val mem)
  9457  	// cond:
  9458  	// result: (MOVSDstoreidx8 [c+d] {sym} ptr idx val mem)
  9459  	for {
  9460  		c := v.AuxInt
  9461  		sym := v.Aux
  9462  		v_0 := v.Args[0]
  9463  		if v_0.Op != OpAMD64ADDQconst {
  9464  			break
  9465  		}
  9466  		d := v_0.AuxInt
  9467  		ptr := v_0.Args[0]
  9468  		idx := v.Args[1]
  9469  		val := v.Args[2]
  9470  		mem := v.Args[3]
  9471  		v.reset(OpAMD64MOVSDstoreidx8)
  9472  		v.AuxInt = c + d
  9473  		v.Aux = sym
  9474  		v.AddArg(ptr)
  9475  		v.AddArg(idx)
  9476  		v.AddArg(val)
  9477  		v.AddArg(mem)
  9478  		return true
  9479  	}
  9480  	// match: (MOVSDstoreidx8 [c] {sym} ptr (ADDQconst [d] idx) val mem)
  9481  	// cond:
  9482  	// result: (MOVSDstoreidx8 [c+8*d] {sym} ptr idx val mem)
  9483  	for {
  9484  		c := v.AuxInt
  9485  		sym := v.Aux
  9486  		ptr := v.Args[0]
  9487  		v_1 := v.Args[1]
  9488  		if v_1.Op != OpAMD64ADDQconst {
  9489  			break
  9490  		}
  9491  		d := v_1.AuxInt
  9492  		idx := v_1.Args[0]
  9493  		val := v.Args[2]
  9494  		mem := v.Args[3]
  9495  		v.reset(OpAMD64MOVSDstoreidx8)
  9496  		v.AuxInt = c + 8*d
  9497  		v.Aux = sym
  9498  		v.AddArg(ptr)
  9499  		v.AddArg(idx)
  9500  		v.AddArg(val)
  9501  		v.AddArg(mem)
  9502  		return true
  9503  	}
  9504  	return false
  9505  }
  9506  func rewriteValueAMD64_OpAMD64MOVSSload(v *Value, config *Config) bool {
  9507  	b := v.Block
  9508  	_ = b
  9509  	// match: (MOVSSload [off1] {sym} (ADDQconst [off2] ptr) mem)
  9510  	// cond: is32Bit(off1+off2)
  9511  	// result: (MOVSSload [off1+off2] {sym} ptr mem)
  9512  	for {
  9513  		off1 := v.AuxInt
  9514  		sym := v.Aux
  9515  		v_0 := v.Args[0]
  9516  		if v_0.Op != OpAMD64ADDQconst {
  9517  			break
  9518  		}
  9519  		off2 := v_0.AuxInt
  9520  		ptr := v_0.Args[0]
  9521  		mem := v.Args[1]
  9522  		if !(is32Bit(off1 + off2)) {
  9523  			break
  9524  		}
  9525  		v.reset(OpAMD64MOVSSload)
  9526  		v.AuxInt = off1 + off2
  9527  		v.Aux = sym
  9528  		v.AddArg(ptr)
  9529  		v.AddArg(mem)
  9530  		return true
  9531  	}
  9532  	// match: (MOVSSload [off1] {sym1} (LEAQ [off2] {sym2} base) mem)
  9533  	// cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
  9534  	// result: (MOVSSload [off1+off2] {mergeSym(sym1,sym2)} base mem)
  9535  	for {
  9536  		off1 := v.AuxInt
  9537  		sym1 := v.Aux
  9538  		v_0 := v.Args[0]
  9539  		if v_0.Op != OpAMD64LEAQ {
  9540  			break
  9541  		}
  9542  		off2 := v_0.AuxInt
  9543  		sym2 := v_0.Aux
  9544  		base := v_0.Args[0]
  9545  		mem := v.Args[1]
  9546  		if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
  9547  			break
  9548  		}
  9549  		v.reset(OpAMD64MOVSSload)
  9550  		v.AuxInt = off1 + off2
  9551  		v.Aux = mergeSym(sym1, sym2)
  9552  		v.AddArg(base)
  9553  		v.AddArg(mem)
  9554  		return true
  9555  	}
  9556  	// match: (MOVSSload [off1] {sym1} (LEAQ1 [off2] {sym2} ptr idx) mem)
  9557  	// cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
  9558  	// result: (MOVSSloadidx1 [off1+off2] {mergeSym(sym1,sym2)} ptr idx mem)
  9559  	for {
  9560  		off1 := v.AuxInt
  9561  		sym1 := v.Aux
  9562  		v_0 := v.Args[0]
  9563  		if v_0.Op != OpAMD64LEAQ1 {
  9564  			break
  9565  		}
  9566  		off2 := v_0.AuxInt
  9567  		sym2 := v_0.Aux
  9568  		ptr := v_0.Args[0]
  9569  		idx := v_0.Args[1]
  9570  		mem := v.Args[1]
  9571  		if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
  9572  			break
  9573  		}
  9574  		v.reset(OpAMD64MOVSSloadidx1)
  9575  		v.AuxInt = off1 + off2
  9576  		v.Aux = mergeSym(sym1, sym2)
  9577  		v.AddArg(ptr)
  9578  		v.AddArg(idx)
  9579  		v.AddArg(mem)
  9580  		return true
  9581  	}
  9582  	// match: (MOVSSload [off1] {sym1} (LEAQ4 [off2] {sym2} ptr idx) mem)
  9583  	// cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
  9584  	// result: (MOVSSloadidx4 [off1+off2] {mergeSym(sym1,sym2)} ptr idx mem)
  9585  	for {
  9586  		off1 := v.AuxInt
  9587  		sym1 := v.Aux
  9588  		v_0 := v.Args[0]
  9589  		if v_0.Op != OpAMD64LEAQ4 {
  9590  			break
  9591  		}
  9592  		off2 := v_0.AuxInt
  9593  		sym2 := v_0.Aux
  9594  		ptr := v_0.Args[0]
  9595  		idx := v_0.Args[1]
  9596  		mem := v.Args[1]
  9597  		if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
  9598  			break
  9599  		}
  9600  		v.reset(OpAMD64MOVSSloadidx4)
  9601  		v.AuxInt = off1 + off2
  9602  		v.Aux = mergeSym(sym1, sym2)
  9603  		v.AddArg(ptr)
  9604  		v.AddArg(idx)
  9605  		v.AddArg(mem)
  9606  		return true
  9607  	}
  9608  	// match: (MOVSSload [off] {sym} (ADDQ ptr idx) mem)
  9609  	// cond: ptr.Op != OpSB
  9610  	// result: (MOVSSloadidx1 [off] {sym} ptr idx mem)
  9611  	for {
  9612  		off := v.AuxInt
  9613  		sym := v.Aux
  9614  		v_0 := v.Args[0]
  9615  		if v_0.Op != OpAMD64ADDQ {
  9616  			break
  9617  		}
  9618  		ptr := v_0.Args[0]
  9619  		idx := v_0.Args[1]
  9620  		mem := v.Args[1]
  9621  		if !(ptr.Op != OpSB) {
  9622  			break
  9623  		}
  9624  		v.reset(OpAMD64MOVSSloadidx1)
  9625  		v.AuxInt = off
  9626  		v.Aux = sym
  9627  		v.AddArg(ptr)
  9628  		v.AddArg(idx)
  9629  		v.AddArg(mem)
  9630  		return true
  9631  	}
  9632  	return false
  9633  }
  9634  func rewriteValueAMD64_OpAMD64MOVSSloadidx1(v *Value, config *Config) bool {
  9635  	b := v.Block
  9636  	_ = b
  9637  	// match: (MOVSSloadidx1 [c] {sym} (ADDQconst [d] ptr) idx mem)
  9638  	// cond:
  9639  	// result: (MOVSSloadidx1 [c+d] {sym} ptr idx mem)
  9640  	for {
  9641  		c := v.AuxInt
  9642  		sym := v.Aux
  9643  		v_0 := v.Args[0]
  9644  		if v_0.Op != OpAMD64ADDQconst {
  9645  			break
  9646  		}
  9647  		d := v_0.AuxInt
  9648  		ptr := v_0.Args[0]
  9649  		idx := v.Args[1]
  9650  		mem := v.Args[2]
  9651  		v.reset(OpAMD64MOVSSloadidx1)
  9652  		v.AuxInt = c + d
  9653  		v.Aux = sym
  9654  		v.AddArg(ptr)
  9655  		v.AddArg(idx)
  9656  		v.AddArg(mem)
  9657  		return true
  9658  	}
  9659  	// match: (MOVSSloadidx1 [c] {sym} ptr (ADDQconst [d] idx) mem)
  9660  	// cond:
  9661  	// result: (MOVSSloadidx1 [c+d] {sym} ptr idx mem)
  9662  	for {
  9663  		c := v.AuxInt
  9664  		sym := v.Aux
  9665  		ptr := v.Args[0]
  9666  		v_1 := v.Args[1]
  9667  		if v_1.Op != OpAMD64ADDQconst {
  9668  			break
  9669  		}
  9670  		d := v_1.AuxInt
  9671  		idx := v_1.Args[0]
  9672  		mem := v.Args[2]
  9673  		v.reset(OpAMD64MOVSSloadidx1)
  9674  		v.AuxInt = c + d
  9675  		v.Aux = sym
  9676  		v.AddArg(ptr)
  9677  		v.AddArg(idx)
  9678  		v.AddArg(mem)
  9679  		return true
  9680  	}
  9681  	return false
  9682  }
  9683  func rewriteValueAMD64_OpAMD64MOVSSloadidx4(v *Value, config *Config) bool {
  9684  	b := v.Block
  9685  	_ = b
  9686  	// match: (MOVSSloadidx4 [c] {sym} (ADDQconst [d] ptr) idx mem)
  9687  	// cond:
  9688  	// result: (MOVSSloadidx4 [c+d] {sym} ptr idx mem)
  9689  	for {
  9690  		c := v.AuxInt
  9691  		sym := v.Aux
  9692  		v_0 := v.Args[0]
  9693  		if v_0.Op != OpAMD64ADDQconst {
  9694  			break
  9695  		}
  9696  		d := v_0.AuxInt
  9697  		ptr := v_0.Args[0]
  9698  		idx := v.Args[1]
  9699  		mem := v.Args[2]
  9700  		v.reset(OpAMD64MOVSSloadidx4)
  9701  		v.AuxInt = c + d
  9702  		v.Aux = sym
  9703  		v.AddArg(ptr)
  9704  		v.AddArg(idx)
  9705  		v.AddArg(mem)
  9706  		return true
  9707  	}
  9708  	// match: (MOVSSloadidx4 [c] {sym} ptr (ADDQconst [d] idx) mem)
  9709  	// cond:
  9710  	// result: (MOVSSloadidx4 [c+4*d] {sym} ptr idx mem)
  9711  	for {
  9712  		c := v.AuxInt
  9713  		sym := v.Aux
  9714  		ptr := v.Args[0]
  9715  		v_1 := v.Args[1]
  9716  		if v_1.Op != OpAMD64ADDQconst {
  9717  			break
  9718  		}
  9719  		d := v_1.AuxInt
  9720  		idx := v_1.Args[0]
  9721  		mem := v.Args[2]
  9722  		v.reset(OpAMD64MOVSSloadidx4)
  9723  		v.AuxInt = c + 4*d
  9724  		v.Aux = sym
  9725  		v.AddArg(ptr)
  9726  		v.AddArg(idx)
  9727  		v.AddArg(mem)
  9728  		return true
  9729  	}
  9730  	return false
  9731  }
  9732  func rewriteValueAMD64_OpAMD64MOVSSstore(v *Value, config *Config) bool {
  9733  	b := v.Block
  9734  	_ = b
  9735  	// match: (MOVSSstore [off1] {sym} (ADDQconst [off2] ptr) val mem)
  9736  	// cond: is32Bit(off1+off2)
  9737  	// result: (MOVSSstore [off1+off2] {sym} ptr val mem)
  9738  	for {
  9739  		off1 := v.AuxInt
  9740  		sym := v.Aux
  9741  		v_0 := v.Args[0]
  9742  		if v_0.Op != OpAMD64ADDQconst {
  9743  			break
  9744  		}
  9745  		off2 := v_0.AuxInt
  9746  		ptr := v_0.Args[0]
  9747  		val := v.Args[1]
  9748  		mem := v.Args[2]
  9749  		if !(is32Bit(off1 + off2)) {
  9750  			break
  9751  		}
  9752  		v.reset(OpAMD64MOVSSstore)
  9753  		v.AuxInt = off1 + off2
  9754  		v.Aux = sym
  9755  		v.AddArg(ptr)
  9756  		v.AddArg(val)
  9757  		v.AddArg(mem)
  9758  		return true
  9759  	}
  9760  	// match: (MOVSSstore [off1] {sym1} (LEAQ [off2] {sym2} base) val mem)
  9761  	// cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
  9762  	// result: (MOVSSstore [off1+off2] {mergeSym(sym1,sym2)} base val mem)
  9763  	for {
  9764  		off1 := v.AuxInt
  9765  		sym1 := v.Aux
  9766  		v_0 := v.Args[0]
  9767  		if v_0.Op != OpAMD64LEAQ {
  9768  			break
  9769  		}
  9770  		off2 := v_0.AuxInt
  9771  		sym2 := v_0.Aux
  9772  		base := v_0.Args[0]
  9773  		val := v.Args[1]
  9774  		mem := v.Args[2]
  9775  		if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
  9776  			break
  9777  		}
  9778  		v.reset(OpAMD64MOVSSstore)
  9779  		v.AuxInt = off1 + off2
  9780  		v.Aux = mergeSym(sym1, sym2)
  9781  		v.AddArg(base)
  9782  		v.AddArg(val)
  9783  		v.AddArg(mem)
  9784  		return true
  9785  	}
  9786  	// match: (MOVSSstore [off1] {sym1} (LEAQ1 [off2] {sym2} ptr idx) val mem)
  9787  	// cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
  9788  	// result: (MOVSSstoreidx1 [off1+off2] {mergeSym(sym1,sym2)} ptr idx val mem)
  9789  	for {
  9790  		off1 := v.AuxInt
  9791  		sym1 := v.Aux
  9792  		v_0 := v.Args[0]
  9793  		if v_0.Op != OpAMD64LEAQ1 {
  9794  			break
  9795  		}
  9796  		off2 := v_0.AuxInt
  9797  		sym2 := v_0.Aux
  9798  		ptr := v_0.Args[0]
  9799  		idx := v_0.Args[1]
  9800  		val := v.Args[1]
  9801  		mem := v.Args[2]
  9802  		if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
  9803  			break
  9804  		}
  9805  		v.reset(OpAMD64MOVSSstoreidx1)
  9806  		v.AuxInt = off1 + off2
  9807  		v.Aux = mergeSym(sym1, sym2)
  9808  		v.AddArg(ptr)
  9809  		v.AddArg(idx)
  9810  		v.AddArg(val)
  9811  		v.AddArg(mem)
  9812  		return true
  9813  	}
  9814  	// match: (MOVSSstore [off1] {sym1} (LEAQ4 [off2] {sym2} ptr idx) val mem)
  9815  	// cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
  9816  	// result: (MOVSSstoreidx4 [off1+off2] {mergeSym(sym1,sym2)} ptr idx val mem)
  9817  	for {
  9818  		off1 := v.AuxInt
  9819  		sym1 := v.Aux
  9820  		v_0 := v.Args[0]
  9821  		if v_0.Op != OpAMD64LEAQ4 {
  9822  			break
  9823  		}
  9824  		off2 := v_0.AuxInt
  9825  		sym2 := v_0.Aux
  9826  		ptr := v_0.Args[0]
  9827  		idx := v_0.Args[1]
  9828  		val := v.Args[1]
  9829  		mem := v.Args[2]
  9830  		if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
  9831  			break
  9832  		}
  9833  		v.reset(OpAMD64MOVSSstoreidx4)
  9834  		v.AuxInt = off1 + off2
  9835  		v.Aux = mergeSym(sym1, sym2)
  9836  		v.AddArg(ptr)
  9837  		v.AddArg(idx)
  9838  		v.AddArg(val)
  9839  		v.AddArg(mem)
  9840  		return true
  9841  	}
  9842  	// match: (MOVSSstore [off] {sym} (ADDQ ptr idx) val mem)
  9843  	// cond: ptr.Op != OpSB
  9844  	// result: (MOVSSstoreidx1 [off] {sym} ptr idx val mem)
  9845  	for {
  9846  		off := v.AuxInt
  9847  		sym := v.Aux
  9848  		v_0 := v.Args[0]
  9849  		if v_0.Op != OpAMD64ADDQ {
  9850  			break
  9851  		}
  9852  		ptr := v_0.Args[0]
  9853  		idx := v_0.Args[1]
  9854  		val := v.Args[1]
  9855  		mem := v.Args[2]
  9856  		if !(ptr.Op != OpSB) {
  9857  			break
  9858  		}
  9859  		v.reset(OpAMD64MOVSSstoreidx1)
  9860  		v.AuxInt = off
  9861  		v.Aux = sym
  9862  		v.AddArg(ptr)
  9863  		v.AddArg(idx)
  9864  		v.AddArg(val)
  9865  		v.AddArg(mem)
  9866  		return true
  9867  	}
  9868  	return false
  9869  }
  9870  func rewriteValueAMD64_OpAMD64MOVSSstoreidx1(v *Value, config *Config) bool {
  9871  	b := v.Block
  9872  	_ = b
  9873  	// match: (MOVSSstoreidx1 [c] {sym} (ADDQconst [d] ptr) idx val mem)
  9874  	// cond:
  9875  	// result: (MOVSSstoreidx1 [c+d] {sym} ptr idx val mem)
  9876  	for {
  9877  		c := v.AuxInt
  9878  		sym := v.Aux
  9879  		v_0 := v.Args[0]
  9880  		if v_0.Op != OpAMD64ADDQconst {
  9881  			break
  9882  		}
  9883  		d := v_0.AuxInt
  9884  		ptr := v_0.Args[0]
  9885  		idx := v.Args[1]
  9886  		val := v.Args[2]
  9887  		mem := v.Args[3]
  9888  		v.reset(OpAMD64MOVSSstoreidx1)
  9889  		v.AuxInt = c + d
  9890  		v.Aux = sym
  9891  		v.AddArg(ptr)
  9892  		v.AddArg(idx)
  9893  		v.AddArg(val)
  9894  		v.AddArg(mem)
  9895  		return true
  9896  	}
  9897  	// match: (MOVSSstoreidx1 [c] {sym} ptr (ADDQconst [d] idx) val mem)
  9898  	// cond:
  9899  	// result: (MOVSSstoreidx1 [c+d] {sym} ptr idx val mem)
  9900  	for {
  9901  		c := v.AuxInt
  9902  		sym := v.Aux
  9903  		ptr := v.Args[0]
  9904  		v_1 := v.Args[1]
  9905  		if v_1.Op != OpAMD64ADDQconst {
  9906  			break
  9907  		}
  9908  		d := v_1.AuxInt
  9909  		idx := v_1.Args[0]
  9910  		val := v.Args[2]
  9911  		mem := v.Args[3]
  9912  		v.reset(OpAMD64MOVSSstoreidx1)
  9913  		v.AuxInt = c + d
  9914  		v.Aux = sym
  9915  		v.AddArg(ptr)
  9916  		v.AddArg(idx)
  9917  		v.AddArg(val)
  9918  		v.AddArg(mem)
  9919  		return true
  9920  	}
  9921  	return false
  9922  }
  9923  func rewriteValueAMD64_OpAMD64MOVSSstoreidx4(v *Value, config *Config) bool {
  9924  	b := v.Block
  9925  	_ = b
  9926  	// match: (MOVSSstoreidx4 [c] {sym} (ADDQconst [d] ptr) idx val mem)
  9927  	// cond:
  9928  	// result: (MOVSSstoreidx4 [c+d] {sym} ptr idx val mem)
  9929  	for {
  9930  		c := v.AuxInt
  9931  		sym := v.Aux
  9932  		v_0 := v.Args[0]
  9933  		if v_0.Op != OpAMD64ADDQconst {
  9934  			break
  9935  		}
  9936  		d := v_0.AuxInt
  9937  		ptr := v_0.Args[0]
  9938  		idx := v.Args[1]
  9939  		val := v.Args[2]
  9940  		mem := v.Args[3]
  9941  		v.reset(OpAMD64MOVSSstoreidx4)
  9942  		v.AuxInt = c + d
  9943  		v.Aux = sym
  9944  		v.AddArg(ptr)
  9945  		v.AddArg(idx)
  9946  		v.AddArg(val)
  9947  		v.AddArg(mem)
  9948  		return true
  9949  	}
  9950  	// match: (MOVSSstoreidx4 [c] {sym} ptr (ADDQconst [d] idx) val mem)
  9951  	// cond:
  9952  	// result: (MOVSSstoreidx4 [c+4*d] {sym} ptr idx val mem)
  9953  	for {
  9954  		c := v.AuxInt
  9955  		sym := v.Aux
  9956  		ptr := v.Args[0]
  9957  		v_1 := v.Args[1]
  9958  		if v_1.Op != OpAMD64ADDQconst {
  9959  			break
  9960  		}
  9961  		d := v_1.AuxInt
  9962  		idx := v_1.Args[0]
  9963  		val := v.Args[2]
  9964  		mem := v.Args[3]
  9965  		v.reset(OpAMD64MOVSSstoreidx4)
  9966  		v.AuxInt = c + 4*d
  9967  		v.Aux = sym
  9968  		v.AddArg(ptr)
  9969  		v.AddArg(idx)
  9970  		v.AddArg(val)
  9971  		v.AddArg(mem)
  9972  		return true
  9973  	}
  9974  	return false
  9975  }
  9976  func rewriteValueAMD64_OpAMD64MOVWQSX(v *Value, config *Config) bool {
  9977  	b := v.Block
  9978  	_ = b
  9979  	// match: (MOVWQSX x:(MOVWload [off] {sym} ptr mem))
  9980  	// cond: x.Uses == 1 && clobber(x)
  9981  	// result: @x.Block (MOVWQSXload <v.Type> [off] {sym} ptr mem)
  9982  	for {
  9983  		x := v.Args[0]
  9984  		if x.Op != OpAMD64MOVWload {
  9985  			break
  9986  		}
  9987  		off := x.AuxInt
  9988  		sym := x.Aux
  9989  		ptr := x.Args[0]
  9990  		mem := x.Args[1]
  9991  		if !(x.Uses == 1 && clobber(x)) {
  9992  			break
  9993  		}
  9994  		b = x.Block
  9995  		v0 := b.NewValue0(v.Line, OpAMD64MOVWQSXload, v.Type)
  9996  		v.reset(OpCopy)
  9997  		v.AddArg(v0)
  9998  		v0.AuxInt = off
  9999  		v0.Aux = sym
 10000  		v0.AddArg(ptr)
 10001  		v0.AddArg(mem)
 10002  		return true
 10003  	}
 10004  	// match: (MOVWQSX (ANDLconst [c] x))
 10005  	// cond: c & 0x8000 == 0
 10006  	// result: (ANDLconst [c & 0x7fff] x)
 10007  	for {
 10008  		v_0 := v.Args[0]
 10009  		if v_0.Op != OpAMD64ANDLconst {
 10010  			break
 10011  		}
 10012  		c := v_0.AuxInt
 10013  		x := v_0.Args[0]
 10014  		if !(c&0x8000 == 0) {
 10015  			break
 10016  		}
 10017  		v.reset(OpAMD64ANDLconst)
 10018  		v.AuxInt = c & 0x7fff
 10019  		v.AddArg(x)
 10020  		return true
 10021  	}
 10022  	return false
 10023  }
 10024  func rewriteValueAMD64_OpAMD64MOVWQSXload(v *Value, config *Config) bool {
 10025  	b := v.Block
 10026  	_ = b
 10027  	// match: (MOVWQSXload [off1] {sym1} (LEAQ [off2] {sym2} base) mem)
 10028  	// cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
 10029  	// result: (MOVWQSXload [off1+off2] {mergeSym(sym1,sym2)} base mem)
 10030  	for {
 10031  		off1 := v.AuxInt
 10032  		sym1 := v.Aux
 10033  		v_0 := v.Args[0]
 10034  		if v_0.Op != OpAMD64LEAQ {
 10035  			break
 10036  		}
 10037  		off2 := v_0.AuxInt
 10038  		sym2 := v_0.Aux
 10039  		base := v_0.Args[0]
 10040  		mem := v.Args[1]
 10041  		if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
 10042  			break
 10043  		}
 10044  		v.reset(OpAMD64MOVWQSXload)
 10045  		v.AuxInt = off1 + off2
 10046  		v.Aux = mergeSym(sym1, sym2)
 10047  		v.AddArg(base)
 10048  		v.AddArg(mem)
 10049  		return true
 10050  	}
 10051  	return false
 10052  }
 10053  func rewriteValueAMD64_OpAMD64MOVWQZX(v *Value, config *Config) bool {
 10054  	b := v.Block
 10055  	_ = b
 10056  	// match: (MOVWQZX x:(MOVWload [off] {sym} ptr mem))
 10057  	// cond: x.Uses == 1 && clobber(x)
 10058  	// result: @x.Block (MOVWload <v.Type> [off] {sym} ptr mem)
 10059  	for {
 10060  		x := v.Args[0]
 10061  		if x.Op != OpAMD64MOVWload {
 10062  			break
 10063  		}
 10064  		off := x.AuxInt
 10065  		sym := x.Aux
 10066  		ptr := x.Args[0]
 10067  		mem := x.Args[1]
 10068  		if !(x.Uses == 1 && clobber(x)) {
 10069  			break
 10070  		}
 10071  		b = x.Block
 10072  		v0 := b.NewValue0(v.Line, OpAMD64MOVWload, v.Type)
 10073  		v.reset(OpCopy)
 10074  		v.AddArg(v0)
 10075  		v0.AuxInt = off
 10076  		v0.Aux = sym
 10077  		v0.AddArg(ptr)
 10078  		v0.AddArg(mem)
 10079  		return true
 10080  	}
 10081  	// match: (MOVWQZX x:(MOVWloadidx1 [off] {sym} ptr idx mem))
 10082  	// cond: x.Uses == 1 && clobber(x)
 10083  	// result: @x.Block (MOVWloadidx1 <v.Type> [off] {sym} ptr idx mem)
 10084  	for {
 10085  		x := v.Args[0]
 10086  		if x.Op != OpAMD64MOVWloadidx1 {
 10087  			break
 10088  		}
 10089  		off := x.AuxInt
 10090  		sym := x.Aux
 10091  		ptr := x.Args[0]
 10092  		idx := x.Args[1]
 10093  		mem := x.Args[2]
 10094  		if !(x.Uses == 1 && clobber(x)) {
 10095  			break
 10096  		}
 10097  		b = x.Block
 10098  		v0 := b.NewValue0(v.Line, OpAMD64MOVWloadidx1, v.Type)
 10099  		v.reset(OpCopy)
 10100  		v.AddArg(v0)
 10101  		v0.AuxInt = off
 10102  		v0.Aux = sym
 10103  		v0.AddArg(ptr)
 10104  		v0.AddArg(idx)
 10105  		v0.AddArg(mem)
 10106  		return true
 10107  	}
 10108  	// match: (MOVWQZX x:(MOVWloadidx2 [off] {sym} ptr idx mem))
 10109  	// cond: x.Uses == 1 && clobber(x)
 10110  	// result: @x.Block (MOVWloadidx2 <v.Type> [off] {sym} ptr idx mem)
 10111  	for {
 10112  		x := v.Args[0]
 10113  		if x.Op != OpAMD64MOVWloadidx2 {
 10114  			break
 10115  		}
 10116  		off := x.AuxInt
 10117  		sym := x.Aux
 10118  		ptr := x.Args[0]
 10119  		idx := x.Args[1]
 10120  		mem := x.Args[2]
 10121  		if !(x.Uses == 1 && clobber(x)) {
 10122  			break
 10123  		}
 10124  		b = x.Block
 10125  		v0 := b.NewValue0(v.Line, OpAMD64MOVWloadidx2, v.Type)
 10126  		v.reset(OpCopy)
 10127  		v.AddArg(v0)
 10128  		v0.AuxInt = off
 10129  		v0.Aux = sym
 10130  		v0.AddArg(ptr)
 10131  		v0.AddArg(idx)
 10132  		v0.AddArg(mem)
 10133  		return true
 10134  	}
 10135  	// match: (MOVWQZX (ANDLconst [c] x))
 10136  	// cond:
 10137  	// result: (ANDLconst [c & 0xffff] x)
 10138  	for {
 10139  		v_0 := v.Args[0]
 10140  		if v_0.Op != OpAMD64ANDLconst {
 10141  			break
 10142  		}
 10143  		c := v_0.AuxInt
 10144  		x := v_0.Args[0]
 10145  		v.reset(OpAMD64ANDLconst)
 10146  		v.AuxInt = c & 0xffff
 10147  		v.AddArg(x)
 10148  		return true
 10149  	}
 10150  	return false
 10151  }
 10152  func rewriteValueAMD64_OpAMD64MOVWload(v *Value, config *Config) bool {
 10153  	b := v.Block
 10154  	_ = b
 10155  	// match: (MOVWload [off] {sym} ptr (MOVWstore [off2] {sym2} ptr2 x _))
 10156  	// cond: sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)
 10157  	// result: x
 10158  	for {
 10159  		off := v.AuxInt
 10160  		sym := v.Aux
 10161  		ptr := v.Args[0]
 10162  		v_1 := v.Args[1]
 10163  		if v_1.Op != OpAMD64MOVWstore {
 10164  			break
 10165  		}
 10166  		off2 := v_1.AuxInt
 10167  		sym2 := v_1.Aux
 10168  		ptr2 := v_1.Args[0]
 10169  		x := v_1.Args[1]
 10170  		if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) {
 10171  			break
 10172  		}
 10173  		v.reset(OpCopy)
 10174  		v.Type = x.Type
 10175  		v.AddArg(x)
 10176  		return true
 10177  	}
 10178  	// match: (MOVWload  [off1] {sym} (ADDQconst [off2] ptr) mem)
 10179  	// cond: is32Bit(off1+off2)
 10180  	// result: (MOVWload  [off1+off2] {sym} ptr mem)
 10181  	for {
 10182  		off1 := v.AuxInt
 10183  		sym := v.Aux
 10184  		v_0 := v.Args[0]
 10185  		if v_0.Op != OpAMD64ADDQconst {
 10186  			break
 10187  		}
 10188  		off2 := v_0.AuxInt
 10189  		ptr := v_0.Args[0]
 10190  		mem := v.Args[1]
 10191  		if !(is32Bit(off1 + off2)) {
 10192  			break
 10193  		}
 10194  		v.reset(OpAMD64MOVWload)
 10195  		v.AuxInt = off1 + off2
 10196  		v.Aux = sym
 10197  		v.AddArg(ptr)
 10198  		v.AddArg(mem)
 10199  		return true
 10200  	}
 10201  	// match: (MOVWload  [off1] {sym1} (LEAQ [off2] {sym2} base) mem)
 10202  	// cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
 10203  	// result: (MOVWload  [off1+off2] {mergeSym(sym1,sym2)} base mem)
 10204  	for {
 10205  		off1 := v.AuxInt
 10206  		sym1 := v.Aux
 10207  		v_0 := v.Args[0]
 10208  		if v_0.Op != OpAMD64LEAQ {
 10209  			break
 10210  		}
 10211  		off2 := v_0.AuxInt
 10212  		sym2 := v_0.Aux
 10213  		base := v_0.Args[0]
 10214  		mem := v.Args[1]
 10215  		if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
 10216  			break
 10217  		}
 10218  		v.reset(OpAMD64MOVWload)
 10219  		v.AuxInt = off1 + off2
 10220  		v.Aux = mergeSym(sym1, sym2)
 10221  		v.AddArg(base)
 10222  		v.AddArg(mem)
 10223  		return true
 10224  	}
 10225  	// match: (MOVWload [off1] {sym1} (LEAQ1 [off2] {sym2} ptr idx) mem)
 10226  	// cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
 10227  	// result: (MOVWloadidx1 [off1+off2] {mergeSym(sym1,sym2)} ptr idx mem)
 10228  	for {
 10229  		off1 := v.AuxInt
 10230  		sym1 := v.Aux
 10231  		v_0 := v.Args[0]
 10232  		if v_0.Op != OpAMD64LEAQ1 {
 10233  			break
 10234  		}
 10235  		off2 := v_0.AuxInt
 10236  		sym2 := v_0.Aux
 10237  		ptr := v_0.Args[0]
 10238  		idx := v_0.Args[1]
 10239  		mem := v.Args[1]
 10240  		if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
 10241  			break
 10242  		}
 10243  		v.reset(OpAMD64MOVWloadidx1)
 10244  		v.AuxInt = off1 + off2
 10245  		v.Aux = mergeSym(sym1, sym2)
 10246  		v.AddArg(ptr)
 10247  		v.AddArg(idx)
 10248  		v.AddArg(mem)
 10249  		return true
 10250  	}
 10251  	// match: (MOVWload [off1] {sym1} (LEAQ2 [off2] {sym2} ptr idx) mem)
 10252  	// cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
 10253  	// result: (MOVWloadidx2 [off1+off2] {mergeSym(sym1,sym2)} ptr idx mem)
 10254  	for {
 10255  		off1 := v.AuxInt
 10256  		sym1 := v.Aux
 10257  		v_0 := v.Args[0]
 10258  		if v_0.Op != OpAMD64LEAQ2 {
 10259  			break
 10260  		}
 10261  		off2 := v_0.AuxInt
 10262  		sym2 := v_0.Aux
 10263  		ptr := v_0.Args[0]
 10264  		idx := v_0.Args[1]
 10265  		mem := v.Args[1]
 10266  		if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
 10267  			break
 10268  		}
 10269  		v.reset(OpAMD64MOVWloadidx2)
 10270  		v.AuxInt = off1 + off2
 10271  		v.Aux = mergeSym(sym1, sym2)
 10272  		v.AddArg(ptr)
 10273  		v.AddArg(idx)
 10274  		v.AddArg(mem)
 10275  		return true
 10276  	}
 10277  	// match: (MOVWload [off] {sym} (ADDQ ptr idx) mem)
 10278  	// cond: ptr.Op != OpSB
 10279  	// result: (MOVWloadidx1 [off] {sym} ptr idx mem)
 10280  	for {
 10281  		off := v.AuxInt
 10282  		sym := v.Aux
 10283  		v_0 := v.Args[0]
 10284  		if v_0.Op != OpAMD64ADDQ {
 10285  			break
 10286  		}
 10287  		ptr := v_0.Args[0]
 10288  		idx := v_0.Args[1]
 10289  		mem := v.Args[1]
 10290  		if !(ptr.Op != OpSB) {
 10291  			break
 10292  		}
 10293  		v.reset(OpAMD64MOVWloadidx1)
 10294  		v.AuxInt = off
 10295  		v.Aux = sym
 10296  		v.AddArg(ptr)
 10297  		v.AddArg(idx)
 10298  		v.AddArg(mem)
 10299  		return true
 10300  	}
 10301  	return false
 10302  }
 10303  func rewriteValueAMD64_OpAMD64MOVWloadidx1(v *Value, config *Config) bool {
 10304  	b := v.Block
 10305  	_ = b
 10306  	// match: (MOVWloadidx1 [c] {sym} ptr (SHLQconst [1] idx) mem)
 10307  	// cond:
 10308  	// result: (MOVWloadidx2 [c] {sym} ptr idx mem)
 10309  	for {
 10310  		c := v.AuxInt
 10311  		sym := v.Aux
 10312  		ptr := v.Args[0]
 10313  		v_1 := v.Args[1]
 10314  		if v_1.Op != OpAMD64SHLQconst {
 10315  			break
 10316  		}
 10317  		if v_1.AuxInt != 1 {
 10318  			break
 10319  		}
 10320  		idx := v_1.Args[0]
 10321  		mem := v.Args[2]
 10322  		v.reset(OpAMD64MOVWloadidx2)
 10323  		v.AuxInt = c
 10324  		v.Aux = sym
 10325  		v.AddArg(ptr)
 10326  		v.AddArg(idx)
 10327  		v.AddArg(mem)
 10328  		return true
 10329  	}
 10330  	// match: (MOVWloadidx1 [c] {sym} (ADDQconst [d] ptr) idx mem)
 10331  	// cond:
 10332  	// result: (MOVWloadidx1 [c+d] {sym} ptr idx mem)
 10333  	for {
 10334  		c := v.AuxInt
 10335  		sym := v.Aux
 10336  		v_0 := v.Args[0]
 10337  		if v_0.Op != OpAMD64ADDQconst {
 10338  			break
 10339  		}
 10340  		d := v_0.AuxInt
 10341  		ptr := v_0.Args[0]
 10342  		idx := v.Args[1]
 10343  		mem := v.Args[2]
 10344  		v.reset(OpAMD64MOVWloadidx1)
 10345  		v.AuxInt = c + d
 10346  		v.Aux = sym
 10347  		v.AddArg(ptr)
 10348  		v.AddArg(idx)
 10349  		v.AddArg(mem)
 10350  		return true
 10351  	}
 10352  	// match: (MOVWloadidx1 [c] {sym} ptr (ADDQconst [d] idx) mem)
 10353  	// cond:
 10354  	// result: (MOVWloadidx1 [c+d] {sym} ptr idx mem)
 10355  	for {
 10356  		c := v.AuxInt
 10357  		sym := v.Aux
 10358  		ptr := v.Args[0]
 10359  		v_1 := v.Args[1]
 10360  		if v_1.Op != OpAMD64ADDQconst {
 10361  			break
 10362  		}
 10363  		d := v_1.AuxInt
 10364  		idx := v_1.Args[0]
 10365  		mem := v.Args[2]
 10366  		v.reset(OpAMD64MOVWloadidx1)
 10367  		v.AuxInt = c + d
 10368  		v.Aux = sym
 10369  		v.AddArg(ptr)
 10370  		v.AddArg(idx)
 10371  		v.AddArg(mem)
 10372  		return true
 10373  	}
 10374  	return false
 10375  }
 10376  func rewriteValueAMD64_OpAMD64MOVWloadidx2(v *Value, config *Config) bool {
 10377  	b := v.Block
 10378  	_ = b
 10379  	// match: (MOVWloadidx2 [c] {sym} (ADDQconst [d] ptr) idx mem)
 10380  	// cond:
 10381  	// result: (MOVWloadidx2 [c+d] {sym} ptr idx mem)
 10382  	for {
 10383  		c := v.AuxInt
 10384  		sym := v.Aux
 10385  		v_0 := v.Args[0]
 10386  		if v_0.Op != OpAMD64ADDQconst {
 10387  			break
 10388  		}
 10389  		d := v_0.AuxInt
 10390  		ptr := v_0.Args[0]
 10391  		idx := v.Args[1]
 10392  		mem := v.Args[2]
 10393  		v.reset(OpAMD64MOVWloadidx2)
 10394  		v.AuxInt = c + d
 10395  		v.Aux = sym
 10396  		v.AddArg(ptr)
 10397  		v.AddArg(idx)
 10398  		v.AddArg(mem)
 10399  		return true
 10400  	}
 10401  	// match: (MOVWloadidx2 [c] {sym} ptr (ADDQconst [d] idx) mem)
 10402  	// cond:
 10403  	// result: (MOVWloadidx2 [c+2*d] {sym} ptr idx mem)
 10404  	for {
 10405  		c := v.AuxInt
 10406  		sym := v.Aux
 10407  		ptr := v.Args[0]
 10408  		v_1 := v.Args[1]
 10409  		if v_1.Op != OpAMD64ADDQconst {
 10410  			break
 10411  		}
 10412  		d := v_1.AuxInt
 10413  		idx := v_1.Args[0]
 10414  		mem := v.Args[2]
 10415  		v.reset(OpAMD64MOVWloadidx2)
 10416  		v.AuxInt = c + 2*d
 10417  		v.Aux = sym
 10418  		v.AddArg(ptr)
 10419  		v.AddArg(idx)
 10420  		v.AddArg(mem)
 10421  		return true
 10422  	}
 10423  	return false
 10424  }
 10425  func rewriteValueAMD64_OpAMD64MOVWstore(v *Value, config *Config) bool {
 10426  	b := v.Block
 10427  	_ = b
 10428  	// match: (MOVWstore [off] {sym} ptr (MOVWQSX x) mem)
 10429  	// cond:
 10430  	// result: (MOVWstore [off] {sym} ptr x mem)
 10431  	for {
 10432  		off := v.AuxInt
 10433  		sym := v.Aux
 10434  		ptr := v.Args[0]
 10435  		v_1 := v.Args[1]
 10436  		if v_1.Op != OpAMD64MOVWQSX {
 10437  			break
 10438  		}
 10439  		x := v_1.Args[0]
 10440  		mem := v.Args[2]
 10441  		v.reset(OpAMD64MOVWstore)
 10442  		v.AuxInt = off
 10443  		v.Aux = sym
 10444  		v.AddArg(ptr)
 10445  		v.AddArg(x)
 10446  		v.AddArg(mem)
 10447  		return true
 10448  	}
 10449  	// match: (MOVWstore [off] {sym} ptr (MOVWQZX x) mem)
 10450  	// cond:
 10451  	// result: (MOVWstore [off] {sym} ptr x mem)
 10452  	for {
 10453  		off := v.AuxInt
 10454  		sym := v.Aux
 10455  		ptr := v.Args[0]
 10456  		v_1 := v.Args[1]
 10457  		if v_1.Op != OpAMD64MOVWQZX {
 10458  			break
 10459  		}
 10460  		x := v_1.Args[0]
 10461  		mem := v.Args[2]
 10462  		v.reset(OpAMD64MOVWstore)
 10463  		v.AuxInt = off
 10464  		v.Aux = sym
 10465  		v.AddArg(ptr)
 10466  		v.AddArg(x)
 10467  		v.AddArg(mem)
 10468  		return true
 10469  	}
 10470  	// match: (MOVWstore  [off1] {sym} (ADDQconst [off2] ptr) val mem)
 10471  	// cond: is32Bit(off1+off2)
 10472  	// result: (MOVWstore  [off1+off2] {sym} ptr val mem)
 10473  	for {
 10474  		off1 := v.AuxInt
 10475  		sym := v.Aux
 10476  		v_0 := v.Args[0]
 10477  		if v_0.Op != OpAMD64ADDQconst {
 10478  			break
 10479  		}
 10480  		off2 := v_0.AuxInt
 10481  		ptr := v_0.Args[0]
 10482  		val := v.Args[1]
 10483  		mem := v.Args[2]
 10484  		if !(is32Bit(off1 + off2)) {
 10485  			break
 10486  		}
 10487  		v.reset(OpAMD64MOVWstore)
 10488  		v.AuxInt = off1 + off2
 10489  		v.Aux = sym
 10490  		v.AddArg(ptr)
 10491  		v.AddArg(val)
 10492  		v.AddArg(mem)
 10493  		return true
 10494  	}
 10495  	// match: (MOVWstore [off] {sym} ptr (MOVLconst [c]) mem)
 10496  	// cond: validOff(off)
 10497  	// result: (MOVWstoreconst [makeValAndOff(int64(int16(c)),off)] {sym} ptr mem)
 10498  	for {
 10499  		off := v.AuxInt
 10500  		sym := v.Aux
 10501  		ptr := v.Args[0]
 10502  		v_1 := v.Args[1]
 10503  		if v_1.Op != OpAMD64MOVLconst {
 10504  			break
 10505  		}
 10506  		c := v_1.AuxInt
 10507  		mem := v.Args[2]
 10508  		if !(validOff(off)) {
 10509  			break
 10510  		}
 10511  		v.reset(OpAMD64MOVWstoreconst)
 10512  		v.AuxInt = makeValAndOff(int64(int16(c)), off)
 10513  		v.Aux = sym
 10514  		v.AddArg(ptr)
 10515  		v.AddArg(mem)
 10516  		return true
 10517  	}
 10518  	// match: (MOVWstore  [off1] {sym1} (LEAQ [off2] {sym2} base) val mem)
 10519  	// cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
 10520  	// result: (MOVWstore  [off1+off2] {mergeSym(sym1,sym2)} base val mem)
 10521  	for {
 10522  		off1 := v.AuxInt
 10523  		sym1 := v.Aux
 10524  		v_0 := v.Args[0]
 10525  		if v_0.Op != OpAMD64LEAQ {
 10526  			break
 10527  		}
 10528  		off2 := v_0.AuxInt
 10529  		sym2 := v_0.Aux
 10530  		base := v_0.Args[0]
 10531  		val := v.Args[1]
 10532  		mem := v.Args[2]
 10533  		if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
 10534  			break
 10535  		}
 10536  		v.reset(OpAMD64MOVWstore)
 10537  		v.AuxInt = off1 + off2
 10538  		v.Aux = mergeSym(sym1, sym2)
 10539  		v.AddArg(base)
 10540  		v.AddArg(val)
 10541  		v.AddArg(mem)
 10542  		return true
 10543  	}
 10544  	// match: (MOVWstore [off1] {sym1} (LEAQ1 [off2] {sym2} ptr idx) val mem)
 10545  	// cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
 10546  	// result: (MOVWstoreidx1 [off1+off2] {mergeSym(sym1,sym2)} ptr idx val mem)
 10547  	for {
 10548  		off1 := v.AuxInt
 10549  		sym1 := v.Aux
 10550  		v_0 := v.Args[0]
 10551  		if v_0.Op != OpAMD64LEAQ1 {
 10552  			break
 10553  		}
 10554  		off2 := v_0.AuxInt
 10555  		sym2 := v_0.Aux
 10556  		ptr := v_0.Args[0]
 10557  		idx := v_0.Args[1]
 10558  		val := v.Args[1]
 10559  		mem := v.Args[2]
 10560  		if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
 10561  			break
 10562  		}
 10563  		v.reset(OpAMD64MOVWstoreidx1)
 10564  		v.AuxInt = off1 + off2
 10565  		v.Aux = mergeSym(sym1, sym2)
 10566  		v.AddArg(ptr)
 10567  		v.AddArg(idx)
 10568  		v.AddArg(val)
 10569  		v.AddArg(mem)
 10570  		return true
 10571  	}
 10572  	// match: (MOVWstore [off1] {sym1} (LEAQ2 [off2] {sym2} ptr idx) val mem)
 10573  	// cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
 10574  	// result: (MOVWstoreidx2 [off1+off2] {mergeSym(sym1,sym2)} ptr idx val mem)
 10575  	for {
 10576  		off1 := v.AuxInt
 10577  		sym1 := v.Aux
 10578  		v_0 := v.Args[0]
 10579  		if v_0.Op != OpAMD64LEAQ2 {
 10580  			break
 10581  		}
 10582  		off2 := v_0.AuxInt
 10583  		sym2 := v_0.Aux
 10584  		ptr := v_0.Args[0]
 10585  		idx := v_0.Args[1]
 10586  		val := v.Args[1]
 10587  		mem := v.Args[2]
 10588  		if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
 10589  			break
 10590  		}
 10591  		v.reset(OpAMD64MOVWstoreidx2)
 10592  		v.AuxInt = off1 + off2
 10593  		v.Aux = mergeSym(sym1, sym2)
 10594  		v.AddArg(ptr)
 10595  		v.AddArg(idx)
 10596  		v.AddArg(val)
 10597  		v.AddArg(mem)
 10598  		return true
 10599  	}
 10600  	// match: (MOVWstore [off] {sym} (ADDQ ptr idx) val mem)
 10601  	// cond: ptr.Op != OpSB
 10602  	// result: (MOVWstoreidx1 [off] {sym} ptr idx val mem)
 10603  	for {
 10604  		off := v.AuxInt
 10605  		sym := v.Aux
 10606  		v_0 := v.Args[0]
 10607  		if v_0.Op != OpAMD64ADDQ {
 10608  			break
 10609  		}
 10610  		ptr := v_0.Args[0]
 10611  		idx := v_0.Args[1]
 10612  		val := v.Args[1]
 10613  		mem := v.Args[2]
 10614  		if !(ptr.Op != OpSB) {
 10615  			break
 10616  		}
 10617  		v.reset(OpAMD64MOVWstoreidx1)
 10618  		v.AuxInt = off
 10619  		v.Aux = sym
 10620  		v.AddArg(ptr)
 10621  		v.AddArg(idx)
 10622  		v.AddArg(val)
 10623  		v.AddArg(mem)
 10624  		return true
 10625  	}
 10626  	// match: (MOVWstore [i] {s} p (SHRQconst [16] w) x:(MOVWstore [i-2] {s} p w mem))
 10627  	// cond: x.Uses == 1   && clobber(x)
 10628  	// result: (MOVLstore [i-2] {s} p w mem)
 10629  	for {
 10630  		i := v.AuxInt
 10631  		s := v.Aux
 10632  		p := v.Args[0]
 10633  		v_1 := v.Args[1]
 10634  		if v_1.Op != OpAMD64SHRQconst {
 10635  			break
 10636  		}
 10637  		if v_1.AuxInt != 16 {
 10638  			break
 10639  		}
 10640  		w := v_1.Args[0]
 10641  		x := v.Args[2]
 10642  		if x.Op != OpAMD64MOVWstore {
 10643  			break
 10644  		}
 10645  		if x.AuxInt != i-2 {
 10646  			break
 10647  		}
 10648  		if x.Aux != s {
 10649  			break
 10650  		}
 10651  		if p != x.Args[0] {
 10652  			break
 10653  		}
 10654  		if w != x.Args[1] {
 10655  			break
 10656  		}
 10657  		mem := x.Args[2]
 10658  		if !(x.Uses == 1 && clobber(x)) {
 10659  			break
 10660  		}
 10661  		v.reset(OpAMD64MOVLstore)
 10662  		v.AuxInt = i - 2
 10663  		v.Aux = s
 10664  		v.AddArg(p)
 10665  		v.AddArg(w)
 10666  		v.AddArg(mem)
 10667  		return true
 10668  	}
 10669  	// match: (MOVWstore [i] {s} p (SHRQconst [j] w) x:(MOVWstore [i-2] {s} p w0:(SHRQconst [j-16] w) mem))
 10670  	// cond: x.Uses == 1   && clobber(x)
 10671  	// result: (MOVLstore [i-2] {s} p w0 mem)
 10672  	for {
 10673  		i := v.AuxInt
 10674  		s := v.Aux
 10675  		p := v.Args[0]
 10676  		v_1 := v.Args[1]
 10677  		if v_1.Op != OpAMD64SHRQconst {
 10678  			break
 10679  		}
 10680  		j := v_1.AuxInt
 10681  		w := v_1.Args[0]
 10682  		x := v.Args[2]
 10683  		if x.Op != OpAMD64MOVWstore {
 10684  			break
 10685  		}
 10686  		if x.AuxInt != i-2 {
 10687  			break
 10688  		}
 10689  		if x.Aux != s {
 10690  			break
 10691  		}
 10692  		if p != x.Args[0] {
 10693  			break
 10694  		}
 10695  		w0 := x.Args[1]
 10696  		if w0.Op != OpAMD64SHRQconst {
 10697  			break
 10698  		}
 10699  		if w0.AuxInt != j-16 {
 10700  			break
 10701  		}
 10702  		if w != w0.Args[0] {
 10703  			break
 10704  		}
 10705  		mem := x.Args[2]
 10706  		if !(x.Uses == 1 && clobber(x)) {
 10707  			break
 10708  		}
 10709  		v.reset(OpAMD64MOVLstore)
 10710  		v.AuxInt = i - 2
 10711  		v.Aux = s
 10712  		v.AddArg(p)
 10713  		v.AddArg(w0)
 10714  		v.AddArg(mem)
 10715  		return true
 10716  	}
 10717  	return false
 10718  }
 10719  func rewriteValueAMD64_OpAMD64MOVWstoreconst(v *Value, config *Config) bool {
 10720  	b := v.Block
 10721  	_ = b
 10722  	// match: (MOVWstoreconst [sc] {s} (ADDQconst [off] ptr) mem)
 10723  	// cond: ValAndOff(sc).canAdd(off)
 10724  	// result: (MOVWstoreconst [ValAndOff(sc).add(off)] {s} ptr mem)
 10725  	for {
 10726  		sc := v.AuxInt
 10727  		s := v.Aux
 10728  		v_0 := v.Args[0]
 10729  		if v_0.Op != OpAMD64ADDQconst {
 10730  			break
 10731  		}
 10732  		off := v_0.AuxInt
 10733  		ptr := v_0.Args[0]
 10734  		mem := v.Args[1]
 10735  		if !(ValAndOff(sc).canAdd(off)) {
 10736  			break
 10737  		}
 10738  		v.reset(OpAMD64MOVWstoreconst)
 10739  		v.AuxInt = ValAndOff(sc).add(off)
 10740  		v.Aux = s
 10741  		v.AddArg(ptr)
 10742  		v.AddArg(mem)
 10743  		return true
 10744  	}
 10745  	// match: (MOVWstoreconst [sc] {sym1} (LEAQ [off] {sym2} ptr) mem)
 10746  	// cond: canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off)
 10747  	// result: (MOVWstoreconst [ValAndOff(sc).add(off)] {mergeSym(sym1, sym2)} ptr mem)
 10748  	for {
 10749  		sc := v.AuxInt
 10750  		sym1 := v.Aux
 10751  		v_0 := v.Args[0]
 10752  		if v_0.Op != OpAMD64LEAQ {
 10753  			break
 10754  		}
 10755  		off := v_0.AuxInt
 10756  		sym2 := v_0.Aux
 10757  		ptr := v_0.Args[0]
 10758  		mem := v.Args[1]
 10759  		if !(canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off)) {
 10760  			break
 10761  		}
 10762  		v.reset(OpAMD64MOVWstoreconst)
 10763  		v.AuxInt = ValAndOff(sc).add(off)
 10764  		v.Aux = mergeSym(sym1, sym2)
 10765  		v.AddArg(ptr)
 10766  		v.AddArg(mem)
 10767  		return true
 10768  	}
 10769  	// match: (MOVWstoreconst [x] {sym1} (LEAQ1 [off] {sym2} ptr idx) mem)
 10770  	// cond: canMergeSym(sym1, sym2)
 10771  	// result: (MOVWstoreconstidx1 [ValAndOff(x).add(off)] {mergeSym(sym1,sym2)} ptr idx mem)
 10772  	for {
 10773  		x := v.AuxInt
 10774  		sym1 := v.Aux
 10775  		v_0 := v.Args[0]
 10776  		if v_0.Op != OpAMD64LEAQ1 {
 10777  			break
 10778  		}
 10779  		off := v_0.AuxInt
 10780  		sym2 := v_0.Aux
 10781  		ptr := v_0.Args[0]
 10782  		idx := v_0.Args[1]
 10783  		mem := v.Args[1]
 10784  		if !(canMergeSym(sym1, sym2)) {
 10785  			break
 10786  		}
 10787  		v.reset(OpAMD64MOVWstoreconstidx1)
 10788  		v.AuxInt = ValAndOff(x).add(off)
 10789  		v.Aux = mergeSym(sym1, sym2)
 10790  		v.AddArg(ptr)
 10791  		v.AddArg(idx)
 10792  		v.AddArg(mem)
 10793  		return true
 10794  	}
 10795  	// match: (MOVWstoreconst [x] {sym1} (LEAQ2 [off] {sym2} ptr idx) mem)
 10796  	// cond: canMergeSym(sym1, sym2)
 10797  	// result: (MOVWstoreconstidx2 [ValAndOff(x).add(off)] {mergeSym(sym1,sym2)} ptr idx mem)
 10798  	for {
 10799  		x := v.AuxInt
 10800  		sym1 := v.Aux
 10801  		v_0 := v.Args[0]
 10802  		if v_0.Op != OpAMD64LEAQ2 {
 10803  			break
 10804  		}
 10805  		off := v_0.AuxInt
 10806  		sym2 := v_0.Aux
 10807  		ptr := v_0.Args[0]
 10808  		idx := v_0.Args[1]
 10809  		mem := v.Args[1]
 10810  		if !(canMergeSym(sym1, sym2)) {
 10811  			break
 10812  		}
 10813  		v.reset(OpAMD64MOVWstoreconstidx2)
 10814  		v.AuxInt = ValAndOff(x).add(off)
 10815  		v.Aux = mergeSym(sym1, sym2)
 10816  		v.AddArg(ptr)
 10817  		v.AddArg(idx)
 10818  		v.AddArg(mem)
 10819  		return true
 10820  	}
 10821  	// match: (MOVWstoreconst [x] {sym} (ADDQ ptr idx) mem)
 10822  	// cond:
 10823  	// result: (MOVWstoreconstidx1 [x] {sym} ptr idx mem)
 10824  	for {
 10825  		x := v.AuxInt
 10826  		sym := v.Aux
 10827  		v_0 := v.Args[0]
 10828  		if v_0.Op != OpAMD64ADDQ {
 10829  			break
 10830  		}
 10831  		ptr := v_0.Args[0]
 10832  		idx := v_0.Args[1]
 10833  		mem := v.Args[1]
 10834  		v.reset(OpAMD64MOVWstoreconstidx1)
 10835  		v.AuxInt = x
 10836  		v.Aux = sym
 10837  		v.AddArg(ptr)
 10838  		v.AddArg(idx)
 10839  		v.AddArg(mem)
 10840  		return true
 10841  	}
 10842  	// match: (MOVWstoreconst [c] {s} p x:(MOVWstoreconst [a] {s} p mem))
 10843  	// cond: x.Uses == 1   && ValAndOff(a).Off() + 2 == ValAndOff(c).Off()   && clobber(x)
 10844  	// result: (MOVLstoreconst [makeValAndOff(ValAndOff(a).Val()&0xffff | ValAndOff(c).Val()<<16, ValAndOff(a).Off())] {s} p mem)
 10845  	for {
 10846  		c := v.AuxInt
 10847  		s := v.Aux
 10848  		p := v.Args[0]
 10849  		x := v.Args[1]
 10850  		if x.Op != OpAMD64MOVWstoreconst {
 10851  			break
 10852  		}
 10853  		a := x.AuxInt
 10854  		if x.Aux != s {
 10855  			break
 10856  		}
 10857  		if p != x.Args[0] {
 10858  			break
 10859  		}
 10860  		mem := x.Args[1]
 10861  		if !(x.Uses == 1 && ValAndOff(a).Off()+2 == ValAndOff(c).Off() && clobber(x)) {
 10862  			break
 10863  		}
 10864  		v.reset(OpAMD64MOVLstoreconst)
 10865  		v.AuxInt = makeValAndOff(ValAndOff(a).Val()&0xffff|ValAndOff(c).Val()<<16, ValAndOff(a).Off())
 10866  		v.Aux = s
 10867  		v.AddArg(p)
 10868  		v.AddArg(mem)
 10869  		return true
 10870  	}
 10871  	return false
 10872  }
 10873  func rewriteValueAMD64_OpAMD64MOVWstoreconstidx1(v *Value, config *Config) bool {
 10874  	b := v.Block
 10875  	_ = b
 10876  	// match: (MOVWstoreconstidx1 [c] {sym} ptr (SHLQconst [1] idx) mem)
 10877  	// cond:
 10878  	// result: (MOVWstoreconstidx2 [c] {sym} ptr idx mem)
 10879  	for {
 10880  		c := v.AuxInt
 10881  		sym := v.Aux
 10882  		ptr := v.Args[0]
 10883  		v_1 := v.Args[1]
 10884  		if v_1.Op != OpAMD64SHLQconst {
 10885  			break
 10886  		}
 10887  		if v_1.AuxInt != 1 {
 10888  			break
 10889  		}
 10890  		idx := v_1.Args[0]
 10891  		mem := v.Args[2]
 10892  		v.reset(OpAMD64MOVWstoreconstidx2)
 10893  		v.AuxInt = c
 10894  		v.Aux = sym
 10895  		v.AddArg(ptr)
 10896  		v.AddArg(idx)
 10897  		v.AddArg(mem)
 10898  		return true
 10899  	}
 10900  	// match: (MOVWstoreconstidx1 [x] {sym} (ADDQconst [c] ptr) idx mem)
 10901  	// cond:
 10902  	// result: (MOVWstoreconstidx1 [ValAndOff(x).add(c)] {sym} ptr idx mem)
 10903  	for {
 10904  		x := v.AuxInt
 10905  		sym := v.Aux
 10906  		v_0 := v.Args[0]
 10907  		if v_0.Op != OpAMD64ADDQconst {
 10908  			break
 10909  		}
 10910  		c := v_0.AuxInt
 10911  		ptr := v_0.Args[0]
 10912  		idx := v.Args[1]
 10913  		mem := v.Args[2]
 10914  		v.reset(OpAMD64MOVWstoreconstidx1)
 10915  		v.AuxInt = ValAndOff(x).add(c)
 10916  		v.Aux = sym
 10917  		v.AddArg(ptr)
 10918  		v.AddArg(idx)
 10919  		v.AddArg(mem)
 10920  		return true
 10921  	}
 10922  	// match: (MOVWstoreconstidx1 [x] {sym} ptr (ADDQconst [c] idx) mem)
 10923  	// cond:
 10924  	// result: (MOVWstoreconstidx1 [ValAndOff(x).add(c)] {sym} ptr idx mem)
 10925  	for {
 10926  		x := v.AuxInt
 10927  		sym := v.Aux
 10928  		ptr := v.Args[0]
 10929  		v_1 := v.Args[1]
 10930  		if v_1.Op != OpAMD64ADDQconst {
 10931  			break
 10932  		}
 10933  		c := v_1.AuxInt
 10934  		idx := v_1.Args[0]
 10935  		mem := v.Args[2]
 10936  		v.reset(OpAMD64MOVWstoreconstidx1)
 10937  		v.AuxInt = ValAndOff(x).add(c)
 10938  		v.Aux = sym
 10939  		v.AddArg(ptr)
 10940  		v.AddArg(idx)
 10941  		v.AddArg(mem)
 10942  		return true
 10943  	}
 10944  	// match: (MOVWstoreconstidx1 [c] {s} p i x:(MOVWstoreconstidx1 [a] {s} p i mem))
 10945  	// cond: x.Uses == 1   && ValAndOff(a).Off() + 2 == ValAndOff(c).Off()   && clobber(x)
 10946  	// result: (MOVLstoreconstidx1 [makeValAndOff(ValAndOff(a).Val()&0xffff | ValAndOff(c).Val()<<16, ValAndOff(a).Off())] {s} p i mem)
 10947  	for {
 10948  		c := v.AuxInt
 10949  		s := v.Aux
 10950  		p := v.Args[0]
 10951  		i := v.Args[1]
 10952  		x := v.Args[2]
 10953  		if x.Op != OpAMD64MOVWstoreconstidx1 {
 10954  			break
 10955  		}
 10956  		a := x.AuxInt
 10957  		if x.Aux != s {
 10958  			break
 10959  		}
 10960  		if p != x.Args[0] {
 10961  			break
 10962  		}
 10963  		if i != x.Args[1] {
 10964  			break
 10965  		}
 10966  		mem := x.Args[2]
 10967  		if !(x.Uses == 1 && ValAndOff(a).Off()+2 == ValAndOff(c).Off() && clobber(x)) {
 10968  			break
 10969  		}
 10970  		v.reset(OpAMD64MOVLstoreconstidx1)
 10971  		v.AuxInt = makeValAndOff(ValAndOff(a).Val()&0xffff|ValAndOff(c).Val()<<16, ValAndOff(a).Off())
 10972  		v.Aux = s
 10973  		v.AddArg(p)
 10974  		v.AddArg(i)
 10975  		v.AddArg(mem)
 10976  		return true
 10977  	}
 10978  	return false
 10979  }
 10980  func rewriteValueAMD64_OpAMD64MOVWstoreconstidx2(v *Value, config *Config) bool {
 10981  	b := v.Block
 10982  	_ = b
 10983  	// match: (MOVWstoreconstidx2 [x] {sym} (ADDQconst [c] ptr) idx mem)
 10984  	// cond:
 10985  	// result: (MOVWstoreconstidx2 [ValAndOff(x).add(c)] {sym} ptr idx mem)
 10986  	for {
 10987  		x := v.AuxInt
 10988  		sym := v.Aux
 10989  		v_0 := v.Args[0]
 10990  		if v_0.Op != OpAMD64ADDQconst {
 10991  			break
 10992  		}
 10993  		c := v_0.AuxInt
 10994  		ptr := v_0.Args[0]
 10995  		idx := v.Args[1]
 10996  		mem := v.Args[2]
 10997  		v.reset(OpAMD64MOVWstoreconstidx2)
 10998  		v.AuxInt = ValAndOff(x).add(c)
 10999  		v.Aux = sym
 11000  		v.AddArg(ptr)
 11001  		v.AddArg(idx)
 11002  		v.AddArg(mem)
 11003  		return true
 11004  	}
 11005  	// match: (MOVWstoreconstidx2 [x] {sym} ptr (ADDQconst [c] idx) mem)
 11006  	// cond:
 11007  	// result: (MOVWstoreconstidx2 [ValAndOff(x).add(2*c)] {sym} ptr idx mem)
 11008  	for {
 11009  		x := v.AuxInt
 11010  		sym := v.Aux
 11011  		ptr := v.Args[0]
 11012  		v_1 := v.Args[1]
 11013  		if v_1.Op != OpAMD64ADDQconst {
 11014  			break
 11015  		}
 11016  		c := v_1.AuxInt
 11017  		idx := v_1.Args[0]
 11018  		mem := v.Args[2]
 11019  		v.reset(OpAMD64MOVWstoreconstidx2)
 11020  		v.AuxInt = ValAndOff(x).add(2 * c)
 11021  		v.Aux = sym
 11022  		v.AddArg(ptr)
 11023  		v.AddArg(idx)
 11024  		v.AddArg(mem)
 11025  		return true
 11026  	}
 11027  	// match: (MOVWstoreconstidx2 [c] {s} p i x:(MOVWstoreconstidx2 [a] {s} p i mem))
 11028  	// cond: x.Uses == 1   && ValAndOff(a).Off() + 2 == ValAndOff(c).Off()   && clobber(x)
 11029  	// result: (MOVLstoreconstidx1 [makeValAndOff(ValAndOff(a).Val()&0xffff | ValAndOff(c).Val()<<16, ValAndOff(a).Off())] {s} p (SHLQconst <i.Type> [1] i) mem)
 11030  	for {
 11031  		c := v.AuxInt
 11032  		s := v.Aux
 11033  		p := v.Args[0]
 11034  		i := v.Args[1]
 11035  		x := v.Args[2]
 11036  		if x.Op != OpAMD64MOVWstoreconstidx2 {
 11037  			break
 11038  		}
 11039  		a := x.AuxInt
 11040  		if x.Aux != s {
 11041  			break
 11042  		}
 11043  		if p != x.Args[0] {
 11044  			break
 11045  		}
 11046  		if i != x.Args[1] {
 11047  			break
 11048  		}
 11049  		mem := x.Args[2]
 11050  		if !(x.Uses == 1 && ValAndOff(a).Off()+2 == ValAndOff(c).Off() && clobber(x)) {
 11051  			break
 11052  		}
 11053  		v.reset(OpAMD64MOVLstoreconstidx1)
 11054  		v.AuxInt = makeValAndOff(ValAndOff(a).Val()&0xffff|ValAndOff(c).Val()<<16, ValAndOff(a).Off())
 11055  		v.Aux = s
 11056  		v.AddArg(p)
 11057  		v0 := b.NewValue0(v.Line, OpAMD64SHLQconst, i.Type)
 11058  		v0.AuxInt = 1
 11059  		v0.AddArg(i)
 11060  		v.AddArg(v0)
 11061  		v.AddArg(mem)
 11062  		return true
 11063  	}
 11064  	return false
 11065  }
 11066  func rewriteValueAMD64_OpAMD64MOVWstoreidx1(v *Value, config *Config) bool {
 11067  	b := v.Block
 11068  	_ = b
 11069  	// match: (MOVWstoreidx1 [c] {sym} ptr (SHLQconst [1] idx) val mem)
 11070  	// cond:
 11071  	// result: (MOVWstoreidx2 [c] {sym} ptr idx val mem)
 11072  	for {
 11073  		c := v.AuxInt
 11074  		sym := v.Aux
 11075  		ptr := v.Args[0]
 11076  		v_1 := v.Args[1]
 11077  		if v_1.Op != OpAMD64SHLQconst {
 11078  			break
 11079  		}
 11080  		if v_1.AuxInt != 1 {
 11081  			break
 11082  		}
 11083  		idx := v_1.Args[0]
 11084  		val := v.Args[2]
 11085  		mem := v.Args[3]
 11086  		v.reset(OpAMD64MOVWstoreidx2)
 11087  		v.AuxInt = c
 11088  		v.Aux = sym
 11089  		v.AddArg(ptr)
 11090  		v.AddArg(idx)
 11091  		v.AddArg(val)
 11092  		v.AddArg(mem)
 11093  		return true
 11094  	}
 11095  	// match: (MOVWstoreidx1 [c] {sym} (ADDQconst [d] ptr) idx val mem)
 11096  	// cond:
 11097  	// result: (MOVWstoreidx1 [c+d] {sym} ptr idx val mem)
 11098  	for {
 11099  		c := v.AuxInt
 11100  		sym := v.Aux
 11101  		v_0 := v.Args[0]
 11102  		if v_0.Op != OpAMD64ADDQconst {
 11103  			break
 11104  		}
 11105  		d := v_0.AuxInt
 11106  		ptr := v_0.Args[0]
 11107  		idx := v.Args[1]
 11108  		val := v.Args[2]
 11109  		mem := v.Args[3]
 11110  		v.reset(OpAMD64MOVWstoreidx1)
 11111  		v.AuxInt = c + d
 11112  		v.Aux = sym
 11113  		v.AddArg(ptr)
 11114  		v.AddArg(idx)
 11115  		v.AddArg(val)
 11116  		v.AddArg(mem)
 11117  		return true
 11118  	}
 11119  	// match: (MOVWstoreidx1 [c] {sym} ptr (ADDQconst [d] idx) val mem)
 11120  	// cond:
 11121  	// result: (MOVWstoreidx1 [c+d] {sym} ptr idx val mem)
 11122  	for {
 11123  		c := v.AuxInt
 11124  		sym := v.Aux
 11125  		ptr := v.Args[0]
 11126  		v_1 := v.Args[1]
 11127  		if v_1.Op != OpAMD64ADDQconst {
 11128  			break
 11129  		}
 11130  		d := v_1.AuxInt
 11131  		idx := v_1.Args[0]
 11132  		val := v.Args[2]
 11133  		mem := v.Args[3]
 11134  		v.reset(OpAMD64MOVWstoreidx1)
 11135  		v.AuxInt = c + d
 11136  		v.Aux = sym
 11137  		v.AddArg(ptr)
 11138  		v.AddArg(idx)
 11139  		v.AddArg(val)
 11140  		v.AddArg(mem)
 11141  		return true
 11142  	}
 11143  	// match: (MOVWstoreidx1 [i] {s} p idx (SHRQconst [16] w) x:(MOVWstoreidx1 [i-2] {s} p idx w mem))
 11144  	// cond: x.Uses == 1   && clobber(x)
 11145  	// result: (MOVLstoreidx1 [i-2] {s} p idx w mem)
 11146  	for {
 11147  		i := v.AuxInt
 11148  		s := v.Aux
 11149  		p := v.Args[0]
 11150  		idx := v.Args[1]
 11151  		v_2 := v.Args[2]
 11152  		if v_2.Op != OpAMD64SHRQconst {
 11153  			break
 11154  		}
 11155  		if v_2.AuxInt != 16 {
 11156  			break
 11157  		}
 11158  		w := v_2.Args[0]
 11159  		x := v.Args[3]
 11160  		if x.Op != OpAMD64MOVWstoreidx1 {
 11161  			break
 11162  		}
 11163  		if x.AuxInt != i-2 {
 11164  			break
 11165  		}
 11166  		if x.Aux != s {
 11167  			break
 11168  		}
 11169  		if p != x.Args[0] {
 11170  			break
 11171  		}
 11172  		if idx != x.Args[1] {
 11173  			break
 11174  		}
 11175  		if w != x.Args[2] {
 11176  			break
 11177  		}
 11178  		mem := x.Args[3]
 11179  		if !(x.Uses == 1 && clobber(x)) {
 11180  			break
 11181  		}
 11182  		v.reset(OpAMD64MOVLstoreidx1)
 11183  		v.AuxInt = i - 2
 11184  		v.Aux = s
 11185  		v.AddArg(p)
 11186  		v.AddArg(idx)
 11187  		v.AddArg(w)
 11188  		v.AddArg(mem)
 11189  		return true
 11190  	}
 11191  	// match: (MOVWstoreidx1 [i] {s} p idx (SHRQconst [j] w) x:(MOVWstoreidx1 [i-2] {s} p idx w0:(SHRQconst [j-16] w) mem))
 11192  	// cond: x.Uses == 1   && clobber(x)
 11193  	// result: (MOVLstoreidx1 [i-2] {s} p idx w0 mem)
 11194  	for {
 11195  		i := v.AuxInt
 11196  		s := v.Aux
 11197  		p := v.Args[0]
 11198  		idx := v.Args[1]
 11199  		v_2 := v.Args[2]
 11200  		if v_2.Op != OpAMD64SHRQconst {
 11201  			break
 11202  		}
 11203  		j := v_2.AuxInt
 11204  		w := v_2.Args[0]
 11205  		x := v.Args[3]
 11206  		if x.Op != OpAMD64MOVWstoreidx1 {
 11207  			break
 11208  		}
 11209  		if x.AuxInt != i-2 {
 11210  			break
 11211  		}
 11212  		if x.Aux != s {
 11213  			break
 11214  		}
 11215  		if p != x.Args[0] {
 11216  			break
 11217  		}
 11218  		if idx != x.Args[1] {
 11219  			break
 11220  		}
 11221  		w0 := x.Args[2]
 11222  		if w0.Op != OpAMD64SHRQconst {
 11223  			break
 11224  		}
 11225  		if w0.AuxInt != j-16 {
 11226  			break
 11227  		}
 11228  		if w != w0.Args[0] {
 11229  			break
 11230  		}
 11231  		mem := x.Args[3]
 11232  		if !(x.Uses == 1 && clobber(x)) {
 11233  			break
 11234  		}
 11235  		v.reset(OpAMD64MOVLstoreidx1)
 11236  		v.AuxInt = i - 2
 11237  		v.Aux = s
 11238  		v.AddArg(p)
 11239  		v.AddArg(idx)
 11240  		v.AddArg(w0)
 11241  		v.AddArg(mem)
 11242  		return true
 11243  	}
 11244  	return false
 11245  }
 11246  func rewriteValueAMD64_OpAMD64MOVWstoreidx2(v *Value, config *Config) bool {
 11247  	b := v.Block
 11248  	_ = b
 11249  	// match: (MOVWstoreidx2 [c] {sym} (ADDQconst [d] ptr) idx val mem)
 11250  	// cond:
 11251  	// result: (MOVWstoreidx2 [c+d] {sym} ptr idx val mem)
 11252  	for {
 11253  		c := v.AuxInt
 11254  		sym := v.Aux
 11255  		v_0 := v.Args[0]
 11256  		if v_0.Op != OpAMD64ADDQconst {
 11257  			break
 11258  		}
 11259  		d := v_0.AuxInt
 11260  		ptr := v_0.Args[0]
 11261  		idx := v.Args[1]
 11262  		val := v.Args[2]
 11263  		mem := v.Args[3]
 11264  		v.reset(OpAMD64MOVWstoreidx2)
 11265  		v.AuxInt = c + d
 11266  		v.Aux = sym
 11267  		v.AddArg(ptr)
 11268  		v.AddArg(idx)
 11269  		v.AddArg(val)
 11270  		v.AddArg(mem)
 11271  		return true
 11272  	}
 11273  	// match: (MOVWstoreidx2 [c] {sym} ptr (ADDQconst [d] idx) val mem)
 11274  	// cond:
 11275  	// result: (MOVWstoreidx2 [c+2*d] {sym} ptr idx val mem)
 11276  	for {
 11277  		c := v.AuxInt
 11278  		sym := v.Aux
 11279  		ptr := v.Args[0]
 11280  		v_1 := v.Args[1]
 11281  		if v_1.Op != OpAMD64ADDQconst {
 11282  			break
 11283  		}
 11284  		d := v_1.AuxInt
 11285  		idx := v_1.Args[0]
 11286  		val := v.Args[2]
 11287  		mem := v.Args[3]
 11288  		v.reset(OpAMD64MOVWstoreidx2)
 11289  		v.AuxInt = c + 2*d
 11290  		v.Aux = sym
 11291  		v.AddArg(ptr)
 11292  		v.AddArg(idx)
 11293  		v.AddArg(val)
 11294  		v.AddArg(mem)
 11295  		return true
 11296  	}
 11297  	// match: (MOVWstoreidx2 [i] {s} p idx (SHRQconst [16] w) x:(MOVWstoreidx2 [i-2] {s} p idx w mem))
 11298  	// cond: x.Uses == 1   && clobber(x)
 11299  	// result: (MOVLstoreidx1 [i-2] {s} p (SHLQconst <idx.Type> [1] idx) w mem)
 11300  	for {
 11301  		i := v.AuxInt
 11302  		s := v.Aux
 11303  		p := v.Args[0]
 11304  		idx := v.Args[1]
 11305  		v_2 := v.Args[2]
 11306  		if v_2.Op != OpAMD64SHRQconst {
 11307  			break
 11308  		}
 11309  		if v_2.AuxInt != 16 {
 11310  			break
 11311  		}
 11312  		w := v_2.Args[0]
 11313  		x := v.Args[3]
 11314  		if x.Op != OpAMD64MOVWstoreidx2 {
 11315  			break
 11316  		}
 11317  		if x.AuxInt != i-2 {
 11318  			break
 11319  		}
 11320  		if x.Aux != s {
 11321  			break
 11322  		}
 11323  		if p != x.Args[0] {
 11324  			break
 11325  		}
 11326  		if idx != x.Args[1] {
 11327  			break
 11328  		}
 11329  		if w != x.Args[2] {
 11330  			break
 11331  		}
 11332  		mem := x.Args[3]
 11333  		if !(x.Uses == 1 && clobber(x)) {
 11334  			break
 11335  		}
 11336  		v.reset(OpAMD64MOVLstoreidx1)
 11337  		v.AuxInt = i - 2
 11338  		v.Aux = s
 11339  		v.AddArg(p)
 11340  		v0 := b.NewValue0(v.Line, OpAMD64SHLQconst, idx.Type)
 11341  		v0.AuxInt = 1
 11342  		v0.AddArg(idx)
 11343  		v.AddArg(v0)
 11344  		v.AddArg(w)
 11345  		v.AddArg(mem)
 11346  		return true
 11347  	}
 11348  	// match: (MOVWstoreidx2 [i] {s} p idx (SHRQconst [j] w) x:(MOVWstoreidx2 [i-2] {s} p idx w0:(SHRQconst [j-16] w) mem))
 11349  	// cond: x.Uses == 1   && clobber(x)
 11350  	// result: (MOVLstoreidx1 [i-2] {s} p (SHLQconst <idx.Type> [1] idx) w0 mem)
 11351  	for {
 11352  		i := v.AuxInt
 11353  		s := v.Aux
 11354  		p := v.Args[0]
 11355  		idx := v.Args[1]
 11356  		v_2 := v.Args[2]
 11357  		if v_2.Op != OpAMD64SHRQconst {
 11358  			break
 11359  		}
 11360  		j := v_2.AuxInt
 11361  		w := v_2.Args[0]
 11362  		x := v.Args[3]
 11363  		if x.Op != OpAMD64MOVWstoreidx2 {
 11364  			break
 11365  		}
 11366  		if x.AuxInt != i-2 {
 11367  			break
 11368  		}
 11369  		if x.Aux != s {
 11370  			break
 11371  		}
 11372  		if p != x.Args[0] {
 11373  			break
 11374  		}
 11375  		if idx != x.Args[1] {
 11376  			break
 11377  		}
 11378  		w0 := x.Args[2]
 11379  		if w0.Op != OpAMD64SHRQconst {
 11380  			break
 11381  		}
 11382  		if w0.AuxInt != j-16 {
 11383  			break
 11384  		}
 11385  		if w != w0.Args[0] {
 11386  			break
 11387  		}
 11388  		mem := x.Args[3]
 11389  		if !(x.Uses == 1 && clobber(x)) {
 11390  			break
 11391  		}
 11392  		v.reset(OpAMD64MOVLstoreidx1)
 11393  		v.AuxInt = i - 2
 11394  		v.Aux = s
 11395  		v.AddArg(p)
 11396  		v0 := b.NewValue0(v.Line, OpAMD64SHLQconst, idx.Type)
 11397  		v0.AuxInt = 1
 11398  		v0.AddArg(idx)
 11399  		v.AddArg(v0)
 11400  		v.AddArg(w0)
 11401  		v.AddArg(mem)
 11402  		return true
 11403  	}
 11404  	return false
 11405  }
 11406  func rewriteValueAMD64_OpAMD64MULL(v *Value, config *Config) bool {
 11407  	b := v.Block
 11408  	_ = b
 11409  	// match: (MULL x (MOVLconst [c]))
 11410  	// cond:
 11411  	// result: (MULLconst [c] x)
 11412  	for {
 11413  		x := v.Args[0]
 11414  		v_1 := v.Args[1]
 11415  		if v_1.Op != OpAMD64MOVLconst {
 11416  			break
 11417  		}
 11418  		c := v_1.AuxInt
 11419  		v.reset(OpAMD64MULLconst)
 11420  		v.AuxInt = c
 11421  		v.AddArg(x)
 11422  		return true
 11423  	}
 11424  	// match: (MULL (MOVLconst [c]) x)
 11425  	// cond:
 11426  	// result: (MULLconst [c] x)
 11427  	for {
 11428  		v_0 := v.Args[0]
 11429  		if v_0.Op != OpAMD64MOVLconst {
 11430  			break
 11431  		}
 11432  		c := v_0.AuxInt
 11433  		x := v.Args[1]
 11434  		v.reset(OpAMD64MULLconst)
 11435  		v.AuxInt = c
 11436  		v.AddArg(x)
 11437  		return true
 11438  	}
 11439  	return false
 11440  }
 11441  func rewriteValueAMD64_OpAMD64MULLconst(v *Value, config *Config) bool {
 11442  	b := v.Block
 11443  	_ = b
 11444  	// match: (MULLconst [c] (MOVLconst [d]))
 11445  	// cond:
 11446  	// result: (MOVLconst [int64(int32(c*d))])
 11447  	for {
 11448  		c := v.AuxInt
 11449  		v_0 := v.Args[0]
 11450  		if v_0.Op != OpAMD64MOVLconst {
 11451  			break
 11452  		}
 11453  		d := v_0.AuxInt
 11454  		v.reset(OpAMD64MOVLconst)
 11455  		v.AuxInt = int64(int32(c * d))
 11456  		return true
 11457  	}
 11458  	return false
 11459  }
 11460  func rewriteValueAMD64_OpAMD64MULQ(v *Value, config *Config) bool {
 11461  	b := v.Block
 11462  	_ = b
 11463  	// match: (MULQ x (MOVQconst [c]))
 11464  	// cond: is32Bit(c)
 11465  	// result: (MULQconst [c] x)
 11466  	for {
 11467  		x := v.Args[0]
 11468  		v_1 := v.Args[1]
 11469  		if v_1.Op != OpAMD64MOVQconst {
 11470  			break
 11471  		}
 11472  		c := v_1.AuxInt
 11473  		if !(is32Bit(c)) {
 11474  			break
 11475  		}
 11476  		v.reset(OpAMD64MULQconst)
 11477  		v.AuxInt = c
 11478  		v.AddArg(x)
 11479  		return true
 11480  	}
 11481  	// match: (MULQ (MOVQconst [c]) x)
 11482  	// cond: is32Bit(c)
 11483  	// result: (MULQconst [c] x)
 11484  	for {
 11485  		v_0 := v.Args[0]
 11486  		if v_0.Op != OpAMD64MOVQconst {
 11487  			break
 11488  		}
 11489  		c := v_0.AuxInt
 11490  		x := v.Args[1]
 11491  		if !(is32Bit(c)) {
 11492  			break
 11493  		}
 11494  		v.reset(OpAMD64MULQconst)
 11495  		v.AuxInt = c
 11496  		v.AddArg(x)
 11497  		return true
 11498  	}
 11499  	return false
 11500  }
 11501  func rewriteValueAMD64_OpAMD64MULQconst(v *Value, config *Config) bool {
 11502  	b := v.Block
 11503  	_ = b
 11504  	// match: (MULQconst [-1] x)
 11505  	// cond:
 11506  	// result: (NEGQ x)
 11507  	for {
 11508  		if v.AuxInt != -1 {
 11509  			break
 11510  		}
 11511  		x := v.Args[0]
 11512  		v.reset(OpAMD64NEGQ)
 11513  		v.AddArg(x)
 11514  		return true
 11515  	}
 11516  	// match: (MULQconst [0] _)
 11517  	// cond:
 11518  	// result: (MOVQconst [0])
 11519  	for {
 11520  		if v.AuxInt != 0 {
 11521  			break
 11522  		}
 11523  		v.reset(OpAMD64MOVQconst)
 11524  		v.AuxInt = 0
 11525  		return true
 11526  	}
 11527  	// match: (MULQconst [1] x)
 11528  	// cond:
 11529  	// result: x
 11530  	for {
 11531  		if v.AuxInt != 1 {
 11532  			break
 11533  		}
 11534  		x := v.Args[0]
 11535  		v.reset(OpCopy)
 11536  		v.Type = x.Type
 11537  		v.AddArg(x)
 11538  		return true
 11539  	}
 11540  	// match: (MULQconst [3] x)
 11541  	// cond:
 11542  	// result: (LEAQ2 x x)
 11543  	for {
 11544  		if v.AuxInt != 3 {
 11545  			break
 11546  		}
 11547  		x := v.Args[0]
 11548  		v.reset(OpAMD64LEAQ2)
 11549  		v.AddArg(x)
 11550  		v.AddArg(x)
 11551  		return true
 11552  	}
 11553  	// match: (MULQconst [5] x)
 11554  	// cond:
 11555  	// result: (LEAQ4 x x)
 11556  	for {
 11557  		if v.AuxInt != 5 {
 11558  			break
 11559  		}
 11560  		x := v.Args[0]
 11561  		v.reset(OpAMD64LEAQ4)
 11562  		v.AddArg(x)
 11563  		v.AddArg(x)
 11564  		return true
 11565  	}
 11566  	// match: (MULQconst [7] x)
 11567  	// cond:
 11568  	// result: (LEAQ8 (NEGQ <v.Type> x) x)
 11569  	for {
 11570  		if v.AuxInt != 7 {
 11571  			break
 11572  		}
 11573  		x := v.Args[0]
 11574  		v.reset(OpAMD64LEAQ8)
 11575  		v0 := b.NewValue0(v.Line, OpAMD64NEGQ, v.Type)
 11576  		v0.AddArg(x)
 11577  		v.AddArg(v0)
 11578  		v.AddArg(x)
 11579  		return true
 11580  	}
 11581  	// match: (MULQconst [9] x)
 11582  	// cond:
 11583  	// result: (LEAQ8 x x)
 11584  	for {
 11585  		if v.AuxInt != 9 {
 11586  			break
 11587  		}
 11588  		x := v.Args[0]
 11589  		v.reset(OpAMD64LEAQ8)
 11590  		v.AddArg(x)
 11591  		v.AddArg(x)
 11592  		return true
 11593  	}
 11594  	// match: (MULQconst [11] x)
 11595  	// cond:
 11596  	// result: (LEAQ2 x (LEAQ4 <v.Type> x x))
 11597  	for {
 11598  		if v.AuxInt != 11 {
 11599  			break
 11600  		}
 11601  		x := v.Args[0]
 11602  		v.reset(OpAMD64LEAQ2)
 11603  		v.AddArg(x)
 11604  		v0 := b.NewValue0(v.Line, OpAMD64LEAQ4, v.Type)
 11605  		v0.AddArg(x)
 11606  		v0.AddArg(x)
 11607  		v.AddArg(v0)
 11608  		return true
 11609  	}
 11610  	// match: (MULQconst [13] x)
 11611  	// cond:
 11612  	// result: (LEAQ4 x (LEAQ2 <v.Type> x x))
 11613  	for {
 11614  		if v.AuxInt != 13 {
 11615  			break
 11616  		}
 11617  		x := v.Args[0]
 11618  		v.reset(OpAMD64LEAQ4)
 11619  		v.AddArg(x)
 11620  		v0 := b.NewValue0(v.Line, OpAMD64LEAQ2, v.Type)
 11621  		v0.AddArg(x)
 11622  		v0.AddArg(x)
 11623  		v.AddArg(v0)
 11624  		return true
 11625  	}
 11626  	// match: (MULQconst [21] x)
 11627  	// cond:
 11628  	// result: (LEAQ4 x (LEAQ4 <v.Type> x x))
 11629  	for {
 11630  		if v.AuxInt != 21 {
 11631  			break
 11632  		}
 11633  		x := v.Args[0]
 11634  		v.reset(OpAMD64LEAQ4)
 11635  		v.AddArg(x)
 11636  		v0 := b.NewValue0(v.Line, OpAMD64LEAQ4, v.Type)
 11637  		v0.AddArg(x)
 11638  		v0.AddArg(x)
 11639  		v.AddArg(v0)
 11640  		return true
 11641  	}
 11642  	// match: (MULQconst [25] x)
 11643  	// cond:
 11644  	// result: (LEAQ8 x (LEAQ2 <v.Type> x x))
 11645  	for {
 11646  		if v.AuxInt != 25 {
 11647  			break
 11648  		}
 11649  		x := v.Args[0]
 11650  		v.reset(OpAMD64LEAQ8)
 11651  		v.AddArg(x)
 11652  		v0 := b.NewValue0(v.Line, OpAMD64LEAQ2, v.Type)
 11653  		v0.AddArg(x)
 11654  		v0.AddArg(x)
 11655  		v.AddArg(v0)
 11656  		return true
 11657  	}
 11658  	// match: (MULQconst [37] x)
 11659  	// cond:
 11660  	// result: (LEAQ4 x (LEAQ8 <v.Type> x x))
 11661  	for {
 11662  		if v.AuxInt != 37 {
 11663  			break
 11664  		}
 11665  		x := v.Args[0]
 11666  		v.reset(OpAMD64LEAQ4)
 11667  		v.AddArg(x)
 11668  		v0 := b.NewValue0(v.Line, OpAMD64LEAQ8, v.Type)
 11669  		v0.AddArg(x)
 11670  		v0.AddArg(x)
 11671  		v.AddArg(v0)
 11672  		return true
 11673  	}
 11674  	// match: (MULQconst [41] x)
 11675  	// cond:
 11676  	// result: (LEAQ8 x (LEAQ4 <v.Type> x x))
 11677  	for {
 11678  		if v.AuxInt != 41 {
 11679  			break
 11680  		}
 11681  		x := v.Args[0]
 11682  		v.reset(OpAMD64LEAQ8)
 11683  		v.AddArg(x)
 11684  		v0 := b.NewValue0(v.Line, OpAMD64LEAQ4, v.Type)
 11685  		v0.AddArg(x)
 11686  		v0.AddArg(x)
 11687  		v.AddArg(v0)
 11688  		return true
 11689  	}
 11690  	// match: (MULQconst [73] x)
 11691  	// cond:
 11692  	// result: (LEAQ8 x (LEAQ8 <v.Type> x x))
 11693  	for {
 11694  		if v.AuxInt != 73 {
 11695  			break
 11696  		}
 11697  		x := v.Args[0]
 11698  		v.reset(OpAMD64LEAQ8)
 11699  		v.AddArg(x)
 11700  		v0 := b.NewValue0(v.Line, OpAMD64LEAQ8, v.Type)
 11701  		v0.AddArg(x)
 11702  		v0.AddArg(x)
 11703  		v.AddArg(v0)
 11704  		return true
 11705  	}
 11706  	// match: (MULQconst [c] x)
 11707  	// cond: isPowerOfTwo(c)
 11708  	// result: (SHLQconst [log2(c)] x)
 11709  	for {
 11710  		c := v.AuxInt
 11711  		x := v.Args[0]
 11712  		if !(isPowerOfTwo(c)) {
 11713  			break
 11714  		}
 11715  		v.reset(OpAMD64SHLQconst)
 11716  		v.AuxInt = log2(c)
 11717  		v.AddArg(x)
 11718  		return true
 11719  	}
 11720  	// match: (MULQconst [c] x)
 11721  	// cond: isPowerOfTwo(c+1) && c >= 15
 11722  	// result: (SUBQ (SHLQconst <v.Type> [log2(c+1)] x) x)
 11723  	for {
 11724  		c := v.AuxInt
 11725  		x := v.Args[0]
 11726  		if !(isPowerOfTwo(c+1) && c >= 15) {
 11727  			break
 11728  		}
 11729  		v.reset(OpAMD64SUBQ)
 11730  		v0 := b.NewValue0(v.Line, OpAMD64SHLQconst, v.Type)
 11731  		v0.AuxInt = log2(c + 1)
 11732  		v0.AddArg(x)
 11733  		v.AddArg(v0)
 11734  		v.AddArg(x)
 11735  		return true
 11736  	}
 11737  	// match: (MULQconst [c] x)
 11738  	// cond: isPowerOfTwo(c-1) && c >= 17
 11739  	// result: (LEAQ1 (SHLQconst <v.Type> [log2(c-1)] x) x)
 11740  	for {
 11741  		c := v.AuxInt
 11742  		x := v.Args[0]
 11743  		if !(isPowerOfTwo(c-1) && c >= 17) {
 11744  			break
 11745  		}
 11746  		v.reset(OpAMD64LEAQ1)
 11747  		v0 := b.NewValue0(v.Line, OpAMD64SHLQconst, v.Type)
 11748  		v0.AuxInt = log2(c - 1)
 11749  		v0.AddArg(x)
 11750  		v.AddArg(v0)
 11751  		v.AddArg(x)
 11752  		return true
 11753  	}
 11754  	// match: (MULQconst [c] x)
 11755  	// cond: isPowerOfTwo(c-2) && c >= 34
 11756  	// result: (LEAQ2 (SHLQconst <v.Type> [log2(c-2)] x) x)
 11757  	for {
 11758  		c := v.AuxInt
 11759  		x := v.Args[0]
 11760  		if !(isPowerOfTwo(c-2) && c >= 34) {
 11761  			break
 11762  		}
 11763  		v.reset(OpAMD64LEAQ2)
 11764  		v0 := b.NewValue0(v.Line, OpAMD64SHLQconst, v.Type)
 11765  		v0.AuxInt = log2(c - 2)
 11766  		v0.AddArg(x)
 11767  		v.AddArg(v0)
 11768  		v.AddArg(x)
 11769  		return true
 11770  	}
 11771  	// match: (MULQconst [c] x)
 11772  	// cond: isPowerOfTwo(c-4) && c >= 68
 11773  	// result: (LEAQ4 (SHLQconst <v.Type> [log2(c-4)] x) x)
 11774  	for {
 11775  		c := v.AuxInt
 11776  		x := v.Args[0]
 11777  		if !(isPowerOfTwo(c-4) && c >= 68) {
 11778  			break
 11779  		}
 11780  		v.reset(OpAMD64LEAQ4)
 11781  		v0 := b.NewValue0(v.Line, OpAMD64SHLQconst, v.Type)
 11782  		v0.AuxInt = log2(c - 4)
 11783  		v0.AddArg(x)
 11784  		v.AddArg(v0)
 11785  		v.AddArg(x)
 11786  		return true
 11787  	}
 11788  	// match: (MULQconst [c] x)
 11789  	// cond: isPowerOfTwo(c-8) && c >= 136
 11790  	// result: (LEAQ8 (SHLQconst <v.Type> [log2(c-8)] x) x)
 11791  	for {
 11792  		c := v.AuxInt
 11793  		x := v.Args[0]
 11794  		if !(isPowerOfTwo(c-8) && c >= 136) {
 11795  			break
 11796  		}
 11797  		v.reset(OpAMD64LEAQ8)
 11798  		v0 := b.NewValue0(v.Line, OpAMD64SHLQconst, v.Type)
 11799  		v0.AuxInt = log2(c - 8)
 11800  		v0.AddArg(x)
 11801  		v.AddArg(v0)
 11802  		v.AddArg(x)
 11803  		return true
 11804  	}
 11805  	// match: (MULQconst [c] x)
 11806  	// cond: c%3 == 0 && isPowerOfTwo(c/3)
 11807  	// result: (SHLQconst [log2(c/3)] (LEAQ2 <v.Type> x x))
 11808  	for {
 11809  		c := v.AuxInt
 11810  		x := v.Args[0]
 11811  		if !(c%3 == 0 && isPowerOfTwo(c/3)) {
 11812  			break
 11813  		}
 11814  		v.reset(OpAMD64SHLQconst)
 11815  		v.AuxInt = log2(c / 3)
 11816  		v0 := b.NewValue0(v.Line, OpAMD64LEAQ2, v.Type)
 11817  		v0.AddArg(x)
 11818  		v0.AddArg(x)
 11819  		v.AddArg(v0)
 11820  		return true
 11821  	}
 11822  	// match: (MULQconst [c] x)
 11823  	// cond: c%5 == 0 && isPowerOfTwo(c/5)
 11824  	// result: (SHLQconst [log2(c/5)] (LEAQ4 <v.Type> x x))
 11825  	for {
 11826  		c := v.AuxInt
 11827  		x := v.Args[0]
 11828  		if !(c%5 == 0 && isPowerOfTwo(c/5)) {
 11829  			break
 11830  		}
 11831  		v.reset(OpAMD64SHLQconst)
 11832  		v.AuxInt = log2(c / 5)
 11833  		v0 := b.NewValue0(v.Line, OpAMD64LEAQ4, v.Type)
 11834  		v0.AddArg(x)
 11835  		v0.AddArg(x)
 11836  		v.AddArg(v0)
 11837  		return true
 11838  	}
 11839  	// match: (MULQconst [c] x)
 11840  	// cond: c%9 == 0 && isPowerOfTwo(c/9)
 11841  	// result: (SHLQconst [log2(c/9)] (LEAQ8 <v.Type> x x))
 11842  	for {
 11843  		c := v.AuxInt
 11844  		x := v.Args[0]
 11845  		if !(c%9 == 0 && isPowerOfTwo(c/9)) {
 11846  			break
 11847  		}
 11848  		v.reset(OpAMD64SHLQconst)
 11849  		v.AuxInt = log2(c / 9)
 11850  		v0 := b.NewValue0(v.Line, OpAMD64LEAQ8, v.Type)
 11851  		v0.AddArg(x)
 11852  		v0.AddArg(x)
 11853  		v.AddArg(v0)
 11854  		return true
 11855  	}
 11856  	// match: (MULQconst [c] (MOVQconst [d]))
 11857  	// cond:
 11858  	// result: (MOVQconst [c*d])
 11859  	for {
 11860  		c := v.AuxInt
 11861  		v_0 := v.Args[0]
 11862  		if v_0.Op != OpAMD64MOVQconst {
 11863  			break
 11864  		}
 11865  		d := v_0.AuxInt
 11866  		v.reset(OpAMD64MOVQconst)
 11867  		v.AuxInt = c * d
 11868  		return true
 11869  	}
 11870  	return false
 11871  }
 11872  func rewriteValueAMD64_OpMod16(v *Value, config *Config) bool {
 11873  	b := v.Block
 11874  	_ = b
 11875  	// match: (Mod16  x y)
 11876  	// cond:
 11877  	// result: (MODW  x y)
 11878  	for {
 11879  		x := v.Args[0]
 11880  		y := v.Args[1]
 11881  		v.reset(OpAMD64MODW)
 11882  		v.AddArg(x)
 11883  		v.AddArg(y)
 11884  		return true
 11885  	}
 11886  }
 11887  func rewriteValueAMD64_OpMod16u(v *Value, config *Config) bool {
 11888  	b := v.Block
 11889  	_ = b
 11890  	// match: (Mod16u x y)
 11891  	// cond:
 11892  	// result: (MODWU x y)
 11893  	for {
 11894  		x := v.Args[0]
 11895  		y := v.Args[1]
 11896  		v.reset(OpAMD64MODWU)
 11897  		v.AddArg(x)
 11898  		v.AddArg(y)
 11899  		return true
 11900  	}
 11901  }
 11902  func rewriteValueAMD64_OpMod32(v *Value, config *Config) bool {
 11903  	b := v.Block
 11904  	_ = b
 11905  	// match: (Mod32  x y)
 11906  	// cond:
 11907  	// result: (MODL  x y)
 11908  	for {
 11909  		x := v.Args[0]
 11910  		y := v.Args[1]
 11911  		v.reset(OpAMD64MODL)
 11912  		v.AddArg(x)
 11913  		v.AddArg(y)
 11914  		return true
 11915  	}
 11916  }
 11917  func rewriteValueAMD64_OpMod32u(v *Value, config *Config) bool {
 11918  	b := v.Block
 11919  	_ = b
 11920  	// match: (Mod32u x y)
 11921  	// cond:
 11922  	// result: (MODLU x y)
 11923  	for {
 11924  		x := v.Args[0]
 11925  		y := v.Args[1]
 11926  		v.reset(OpAMD64MODLU)
 11927  		v.AddArg(x)
 11928  		v.AddArg(y)
 11929  		return true
 11930  	}
 11931  }
 11932  func rewriteValueAMD64_OpMod64(v *Value, config *Config) bool {
 11933  	b := v.Block
 11934  	_ = b
 11935  	// match: (Mod64  x y)
 11936  	// cond:
 11937  	// result: (MODQ  x y)
 11938  	for {
 11939  		x := v.Args[0]
 11940  		y := v.Args[1]
 11941  		v.reset(OpAMD64MODQ)
 11942  		v.AddArg(x)
 11943  		v.AddArg(y)
 11944  		return true
 11945  	}
 11946  }
 11947  func rewriteValueAMD64_OpMod64u(v *Value, config *Config) bool {
 11948  	b := v.Block
 11949  	_ = b
 11950  	// match: (Mod64u x y)
 11951  	// cond:
 11952  	// result: (MODQU x y)
 11953  	for {
 11954  		x := v.Args[0]
 11955  		y := v.Args[1]
 11956  		v.reset(OpAMD64MODQU)
 11957  		v.AddArg(x)
 11958  		v.AddArg(y)
 11959  		return true
 11960  	}
 11961  }
 11962  func rewriteValueAMD64_OpMod8(v *Value, config *Config) bool {
 11963  	b := v.Block
 11964  	_ = b
 11965  	// match: (Mod8   x y)
 11966  	// cond:
 11967  	// result: (MODW  (SignExt8to16 x) (SignExt8to16 y))
 11968  	for {
 11969  		x := v.Args[0]
 11970  		y := v.Args[1]
 11971  		v.reset(OpAMD64MODW)
 11972  		v0 := b.NewValue0(v.Line, OpSignExt8to16, config.fe.TypeInt16())
 11973  		v0.AddArg(x)
 11974  		v.AddArg(v0)
 11975  		v1 := b.NewValue0(v.Line, OpSignExt8to16, config.fe.TypeInt16())
 11976  		v1.AddArg(y)
 11977  		v.AddArg(v1)
 11978  		return true
 11979  	}
 11980  }
 11981  func rewriteValueAMD64_OpMod8u(v *Value, config *Config) bool {
 11982  	b := v.Block
 11983  	_ = b
 11984  	// match: (Mod8u  x y)
 11985  	// cond:
 11986  	// result: (MODWU (ZeroExt8to16 x) (ZeroExt8to16 y))
 11987  	for {
 11988  		x := v.Args[0]
 11989  		y := v.Args[1]
 11990  		v.reset(OpAMD64MODWU)
 11991  		v0 := b.NewValue0(v.Line, OpZeroExt8to16, config.fe.TypeUInt16())
 11992  		v0.AddArg(x)
 11993  		v.AddArg(v0)
 11994  		v1 := b.NewValue0(v.Line, OpZeroExt8to16, config.fe.TypeUInt16())
 11995  		v1.AddArg(y)
 11996  		v.AddArg(v1)
 11997  		return true
 11998  	}
 11999  }
 12000  func rewriteValueAMD64_OpMove(v *Value, config *Config) bool {
 12001  	b := v.Block
 12002  	_ = b
 12003  	// match: (Move [0] _ _ mem)
 12004  	// cond:
 12005  	// result: mem
 12006  	for {
 12007  		if v.AuxInt != 0 {
 12008  			break
 12009  		}
 12010  		mem := v.Args[2]
 12011  		v.reset(OpCopy)
 12012  		v.Type = mem.Type
 12013  		v.AddArg(mem)
 12014  		return true
 12015  	}
 12016  	// match: (Move [1] dst src mem)
 12017  	// cond:
 12018  	// result: (MOVBstore dst (MOVBload src mem) mem)
 12019  	for {
 12020  		if v.AuxInt != 1 {
 12021  			break
 12022  		}
 12023  		dst := v.Args[0]
 12024  		src := v.Args[1]
 12025  		mem := v.Args[2]
 12026  		v.reset(OpAMD64MOVBstore)
 12027  		v.AddArg(dst)
 12028  		v0 := b.NewValue0(v.Line, OpAMD64MOVBload, config.fe.TypeUInt8())
 12029  		v0.AddArg(src)
 12030  		v0.AddArg(mem)
 12031  		v.AddArg(v0)
 12032  		v.AddArg(mem)
 12033  		return true
 12034  	}
 12035  	// match: (Move [2] dst src mem)
 12036  	// cond:
 12037  	// result: (MOVWstore dst (MOVWload src mem) mem)
 12038  	for {
 12039  		if v.AuxInt != 2 {
 12040  			break
 12041  		}
 12042  		dst := v.Args[0]
 12043  		src := v.Args[1]
 12044  		mem := v.Args[2]
 12045  		v.reset(OpAMD64MOVWstore)
 12046  		v.AddArg(dst)
 12047  		v0 := b.NewValue0(v.Line, OpAMD64MOVWload, config.fe.TypeUInt16())
 12048  		v0.AddArg(src)
 12049  		v0.AddArg(mem)
 12050  		v.AddArg(v0)
 12051  		v.AddArg(mem)
 12052  		return true
 12053  	}
 12054  	// match: (Move [4] dst src mem)
 12055  	// cond:
 12056  	// result: (MOVLstore dst (MOVLload src mem) mem)
 12057  	for {
 12058  		if v.AuxInt != 4 {
 12059  			break
 12060  		}
 12061  		dst := v.Args[0]
 12062  		src := v.Args[1]
 12063  		mem := v.Args[2]
 12064  		v.reset(OpAMD64MOVLstore)
 12065  		v.AddArg(dst)
 12066  		v0 := b.NewValue0(v.Line, OpAMD64MOVLload, config.fe.TypeUInt32())
 12067  		v0.AddArg(src)
 12068  		v0.AddArg(mem)
 12069  		v.AddArg(v0)
 12070  		v.AddArg(mem)
 12071  		return true
 12072  	}
 12073  	// match: (Move [8] dst src mem)
 12074  	// cond:
 12075  	// result: (MOVQstore dst (MOVQload src mem) mem)
 12076  	for {
 12077  		if v.AuxInt != 8 {
 12078  			break
 12079  		}
 12080  		dst := v.Args[0]
 12081  		src := v.Args[1]
 12082  		mem := v.Args[2]
 12083  		v.reset(OpAMD64MOVQstore)
 12084  		v.AddArg(dst)
 12085  		v0 := b.NewValue0(v.Line, OpAMD64MOVQload, config.fe.TypeUInt64())
 12086  		v0.AddArg(src)
 12087  		v0.AddArg(mem)
 12088  		v.AddArg(v0)
 12089  		v.AddArg(mem)
 12090  		return true
 12091  	}
 12092  	// match: (Move [16] dst src mem)
 12093  	// cond:
 12094  	// result: (MOVOstore dst (MOVOload src mem) mem)
 12095  	for {
 12096  		if v.AuxInt != 16 {
 12097  			break
 12098  		}
 12099  		dst := v.Args[0]
 12100  		src := v.Args[1]
 12101  		mem := v.Args[2]
 12102  		v.reset(OpAMD64MOVOstore)
 12103  		v.AddArg(dst)
 12104  		v0 := b.NewValue0(v.Line, OpAMD64MOVOload, TypeInt128)
 12105  		v0.AddArg(src)
 12106  		v0.AddArg(mem)
 12107  		v.AddArg(v0)
 12108  		v.AddArg(mem)
 12109  		return true
 12110  	}
 12111  	// match: (Move [3] dst src mem)
 12112  	// cond:
 12113  	// result: (MOVBstore [2] dst (MOVBload [2] src mem) 		(MOVWstore dst (MOVWload src mem) mem))
 12114  	for {
 12115  		if v.AuxInt != 3 {
 12116  			break
 12117  		}
 12118  		dst := v.Args[0]
 12119  		src := v.Args[1]
 12120  		mem := v.Args[2]
 12121  		v.reset(OpAMD64MOVBstore)
 12122  		v.AuxInt = 2
 12123  		v.AddArg(dst)
 12124  		v0 := b.NewValue0(v.Line, OpAMD64MOVBload, config.fe.TypeUInt8())
 12125  		v0.AuxInt = 2
 12126  		v0.AddArg(src)
 12127  		v0.AddArg(mem)
 12128  		v.AddArg(v0)
 12129  		v1 := b.NewValue0(v.Line, OpAMD64MOVWstore, TypeMem)
 12130  		v1.AddArg(dst)
 12131  		v2 := b.NewValue0(v.Line, OpAMD64MOVWload, config.fe.TypeUInt16())
 12132  		v2.AddArg(src)
 12133  		v2.AddArg(mem)
 12134  		v1.AddArg(v2)
 12135  		v1.AddArg(mem)
 12136  		v.AddArg(v1)
 12137  		return true
 12138  	}
 12139  	// match: (Move [5] dst src mem)
 12140  	// cond:
 12141  	// result: (MOVBstore [4] dst (MOVBload [4] src mem) 		(MOVLstore dst (MOVLload src mem) mem))
 12142  	for {
 12143  		if v.AuxInt != 5 {
 12144  			break
 12145  		}
 12146  		dst := v.Args[0]
 12147  		src := v.Args[1]
 12148  		mem := v.Args[2]
 12149  		v.reset(OpAMD64MOVBstore)
 12150  		v.AuxInt = 4
 12151  		v.AddArg(dst)
 12152  		v0 := b.NewValue0(v.Line, OpAMD64MOVBload, config.fe.TypeUInt8())
 12153  		v0.AuxInt = 4
 12154  		v0.AddArg(src)
 12155  		v0.AddArg(mem)
 12156  		v.AddArg(v0)
 12157  		v1 := b.NewValue0(v.Line, OpAMD64MOVLstore, TypeMem)
 12158  		v1.AddArg(dst)
 12159  		v2 := b.NewValue0(v.Line, OpAMD64MOVLload, config.fe.TypeUInt32())
 12160  		v2.AddArg(src)
 12161  		v2.AddArg(mem)
 12162  		v1.AddArg(v2)
 12163  		v1.AddArg(mem)
 12164  		v.AddArg(v1)
 12165  		return true
 12166  	}
 12167  	// match: (Move [6] dst src mem)
 12168  	// cond:
 12169  	// result: (MOVWstore [4] dst (MOVWload [4] src mem) 		(MOVLstore dst (MOVLload src mem) mem))
 12170  	for {
 12171  		if v.AuxInt != 6 {
 12172  			break
 12173  		}
 12174  		dst := v.Args[0]
 12175  		src := v.Args[1]
 12176  		mem := v.Args[2]
 12177  		v.reset(OpAMD64MOVWstore)
 12178  		v.AuxInt = 4
 12179  		v.AddArg(dst)
 12180  		v0 := b.NewValue0(v.Line, OpAMD64MOVWload, config.fe.TypeUInt16())
 12181  		v0.AuxInt = 4
 12182  		v0.AddArg(src)
 12183  		v0.AddArg(mem)
 12184  		v.AddArg(v0)
 12185  		v1 := b.NewValue0(v.Line, OpAMD64MOVLstore, TypeMem)
 12186  		v1.AddArg(dst)
 12187  		v2 := b.NewValue0(v.Line, OpAMD64MOVLload, config.fe.TypeUInt32())
 12188  		v2.AddArg(src)
 12189  		v2.AddArg(mem)
 12190  		v1.AddArg(v2)
 12191  		v1.AddArg(mem)
 12192  		v.AddArg(v1)
 12193  		return true
 12194  	}
 12195  	// match: (Move [7] dst src mem)
 12196  	// cond:
 12197  	// result: (MOVLstore [3] dst (MOVLload [3] src mem) 		(MOVLstore dst (MOVLload src mem) mem))
 12198  	for {
 12199  		if v.AuxInt != 7 {
 12200  			break
 12201  		}
 12202  		dst := v.Args[0]
 12203  		src := v.Args[1]
 12204  		mem := v.Args[2]
 12205  		v.reset(OpAMD64MOVLstore)
 12206  		v.AuxInt = 3
 12207  		v.AddArg(dst)
 12208  		v0 := b.NewValue0(v.Line, OpAMD64MOVLload, config.fe.TypeUInt32())
 12209  		v0.AuxInt = 3
 12210  		v0.AddArg(src)
 12211  		v0.AddArg(mem)
 12212  		v.AddArg(v0)
 12213  		v1 := b.NewValue0(v.Line, OpAMD64MOVLstore, TypeMem)
 12214  		v1.AddArg(dst)
 12215  		v2 := b.NewValue0(v.Line, OpAMD64MOVLload, config.fe.TypeUInt32())
 12216  		v2.AddArg(src)
 12217  		v2.AddArg(mem)
 12218  		v1.AddArg(v2)
 12219  		v1.AddArg(mem)
 12220  		v.AddArg(v1)
 12221  		return true
 12222  	}
 12223  	// match: (Move [size] dst src mem)
 12224  	// cond: size > 8 && size < 16
 12225  	// result: (MOVQstore [size-8] dst (MOVQload [size-8] src mem) 		(MOVQstore dst (MOVQload src mem) mem))
 12226  	for {
 12227  		size := v.AuxInt
 12228  		dst := v.Args[0]
 12229  		src := v.Args[1]
 12230  		mem := v.Args[2]
 12231  		if !(size > 8 && size < 16) {
 12232  			break
 12233  		}
 12234  		v.reset(OpAMD64MOVQstore)
 12235  		v.AuxInt = size - 8
 12236  		v.AddArg(dst)
 12237  		v0 := b.NewValue0(v.Line, OpAMD64MOVQload, config.fe.TypeUInt64())
 12238  		v0.AuxInt = size - 8
 12239  		v0.AddArg(src)
 12240  		v0.AddArg(mem)
 12241  		v.AddArg(v0)
 12242  		v1 := b.NewValue0(v.Line, OpAMD64MOVQstore, TypeMem)
 12243  		v1.AddArg(dst)
 12244  		v2 := b.NewValue0(v.Line, OpAMD64MOVQload, config.fe.TypeUInt64())
 12245  		v2.AddArg(src)
 12246  		v2.AddArg(mem)
 12247  		v1.AddArg(v2)
 12248  		v1.AddArg(mem)
 12249  		v.AddArg(v1)
 12250  		return true
 12251  	}
 12252  	// match: (Move [size] dst src mem)
 12253  	// cond: size > 16 && size%16 != 0 && size%16 <= 8
 12254  	// result: (Move [size-size%16] (ADDQconst <dst.Type> dst [size%16]) (ADDQconst <src.Type> src [size%16]) 		(MOVQstore dst (MOVQload src mem) mem))
 12255  	for {
 12256  		size := v.AuxInt
 12257  		dst := v.Args[0]
 12258  		src := v.Args[1]
 12259  		mem := v.Args[2]
 12260  		if !(size > 16 && size%16 != 0 && size%16 <= 8) {
 12261  			break
 12262  		}
 12263  		v.reset(OpMove)
 12264  		v.AuxInt = size - size%16
 12265  		v0 := b.NewValue0(v.Line, OpAMD64ADDQconst, dst.Type)
 12266  		v0.AddArg(dst)
 12267  		v0.AuxInt = size % 16
 12268  		v.AddArg(v0)
 12269  		v1 := b.NewValue0(v.Line, OpAMD64ADDQconst, src.Type)
 12270  		v1.AddArg(src)
 12271  		v1.AuxInt = size % 16
 12272  		v.AddArg(v1)
 12273  		v2 := b.NewValue0(v.Line, OpAMD64MOVQstore, TypeMem)
 12274  		v2.AddArg(dst)
 12275  		v3 := b.NewValue0(v.Line, OpAMD64MOVQload, config.fe.TypeUInt64())
 12276  		v3.AddArg(src)
 12277  		v3.AddArg(mem)
 12278  		v2.AddArg(v3)
 12279  		v2.AddArg(mem)
 12280  		v.AddArg(v2)
 12281  		return true
 12282  	}
 12283  	// match: (Move [size] dst src mem)
 12284  	// cond: size > 16 && size%16 != 0 && size%16 > 8
 12285  	// result: (Move [size-size%16] (ADDQconst <dst.Type> dst [size%16]) (ADDQconst <src.Type> src [size%16]) 		(MOVOstore dst (MOVOload src mem) mem))
 12286  	for {
 12287  		size := v.AuxInt
 12288  		dst := v.Args[0]
 12289  		src := v.Args[1]
 12290  		mem := v.Args[2]
 12291  		if !(size > 16 && size%16 != 0 && size%16 > 8) {
 12292  			break
 12293  		}
 12294  		v.reset(OpMove)
 12295  		v.AuxInt = size - size%16
 12296  		v0 := b.NewValue0(v.Line, OpAMD64ADDQconst, dst.Type)
 12297  		v0.AddArg(dst)
 12298  		v0.AuxInt = size % 16
 12299  		v.AddArg(v0)
 12300  		v1 := b.NewValue0(v.Line, OpAMD64ADDQconst, src.Type)
 12301  		v1.AddArg(src)
 12302  		v1.AuxInt = size % 16
 12303  		v.AddArg(v1)
 12304  		v2 := b.NewValue0(v.Line, OpAMD64MOVOstore, TypeMem)
 12305  		v2.AddArg(dst)
 12306  		v3 := b.NewValue0(v.Line, OpAMD64MOVOload, TypeInt128)
 12307  		v3.AddArg(src)
 12308  		v3.AddArg(mem)
 12309  		v2.AddArg(v3)
 12310  		v2.AddArg(mem)
 12311  		v.AddArg(v2)
 12312  		return true
 12313  	}
 12314  	// match: (Move [size] dst src mem)
 12315  	// cond: size >= 32 && size <= 16*64 && size%16 == 0 && !config.noDuffDevice
 12316  	// result: (DUFFCOPY [14*(64-size/16)] dst src mem)
 12317  	for {
 12318  		size := v.AuxInt
 12319  		dst := v.Args[0]
 12320  		src := v.Args[1]
 12321  		mem := v.Args[2]
 12322  		if !(size >= 32 && size <= 16*64 && size%16 == 0 && !config.noDuffDevice) {
 12323  			break
 12324  		}
 12325  		v.reset(OpAMD64DUFFCOPY)
 12326  		v.AuxInt = 14 * (64 - size/16)
 12327  		v.AddArg(dst)
 12328  		v.AddArg(src)
 12329  		v.AddArg(mem)
 12330  		return true
 12331  	}
 12332  	// match: (Move [size] dst src mem)
 12333  	// cond: (size > 16*64 || config.noDuffDevice) && size%8 == 0
 12334  	// result: (REPMOVSQ dst src (MOVQconst [size/8]) mem)
 12335  	for {
 12336  		size := v.AuxInt
 12337  		dst := v.Args[0]
 12338  		src := v.Args[1]
 12339  		mem := v.Args[2]
 12340  		if !((size > 16*64 || config.noDuffDevice) && size%8 == 0) {
 12341  			break
 12342  		}
 12343  		v.reset(OpAMD64REPMOVSQ)
 12344  		v.AddArg(dst)
 12345  		v.AddArg(src)
 12346  		v0 := b.NewValue0(v.Line, OpAMD64MOVQconst, config.fe.TypeUInt64())
 12347  		v0.AuxInt = size / 8
 12348  		v.AddArg(v0)
 12349  		v.AddArg(mem)
 12350  		return true
 12351  	}
 12352  	return false
 12353  }
 12354  func rewriteValueAMD64_OpMul16(v *Value, config *Config) bool {
 12355  	b := v.Block
 12356  	_ = b
 12357  	// match: (Mul16  x y)
 12358  	// cond:
 12359  	// result: (MULL  x y)
 12360  	for {
 12361  		x := v.Args[0]
 12362  		y := v.Args[1]
 12363  		v.reset(OpAMD64MULL)
 12364  		v.AddArg(x)
 12365  		v.AddArg(y)
 12366  		return true
 12367  	}
 12368  }
 12369  func rewriteValueAMD64_OpMul32(v *Value, config *Config) bool {
 12370  	b := v.Block
 12371  	_ = b
 12372  	// match: (Mul32  x y)
 12373  	// cond:
 12374  	// result: (MULL  x y)
 12375  	for {
 12376  		x := v.Args[0]
 12377  		y := v.Args[1]
 12378  		v.reset(OpAMD64MULL)
 12379  		v.AddArg(x)
 12380  		v.AddArg(y)
 12381  		return true
 12382  	}
 12383  }
 12384  func rewriteValueAMD64_OpMul32F(v *Value, config *Config) bool {
 12385  	b := v.Block
 12386  	_ = b
 12387  	// match: (Mul32F x y)
 12388  	// cond:
 12389  	// result: (MULSS x y)
 12390  	for {
 12391  		x := v.Args[0]
 12392  		y := v.Args[1]
 12393  		v.reset(OpAMD64MULSS)
 12394  		v.AddArg(x)
 12395  		v.AddArg(y)
 12396  		return true
 12397  	}
 12398  }
 12399  func rewriteValueAMD64_OpMul64(v *Value, config *Config) bool {
 12400  	b := v.Block
 12401  	_ = b
 12402  	// match: (Mul64  x y)
 12403  	// cond:
 12404  	// result: (MULQ  x y)
 12405  	for {
 12406  		x := v.Args[0]
 12407  		y := v.Args[1]
 12408  		v.reset(OpAMD64MULQ)
 12409  		v.AddArg(x)
 12410  		v.AddArg(y)
 12411  		return true
 12412  	}
 12413  }
 12414  func rewriteValueAMD64_OpMul64F(v *Value, config *Config) bool {
 12415  	b := v.Block
 12416  	_ = b
 12417  	// match: (Mul64F x y)
 12418  	// cond:
 12419  	// result: (MULSD x y)
 12420  	for {
 12421  		x := v.Args[0]
 12422  		y := v.Args[1]
 12423  		v.reset(OpAMD64MULSD)
 12424  		v.AddArg(x)
 12425  		v.AddArg(y)
 12426  		return true
 12427  	}
 12428  }
 12429  func rewriteValueAMD64_OpMul8(v *Value, config *Config) bool {
 12430  	b := v.Block
 12431  	_ = b
 12432  	// match: (Mul8   x y)
 12433  	// cond:
 12434  	// result: (MULL  x y)
 12435  	for {
 12436  		x := v.Args[0]
 12437  		y := v.Args[1]
 12438  		v.reset(OpAMD64MULL)
 12439  		v.AddArg(x)
 12440  		v.AddArg(y)
 12441  		return true
 12442  	}
 12443  }
 12444  func rewriteValueAMD64_OpAMD64NEGL(v *Value, config *Config) bool {
 12445  	b := v.Block
 12446  	_ = b
 12447  	// match: (NEGL (MOVLconst [c]))
 12448  	// cond:
 12449  	// result: (MOVLconst [int64(int32(-c))])
 12450  	for {
 12451  		v_0 := v.Args[0]
 12452  		if v_0.Op != OpAMD64MOVLconst {
 12453  			break
 12454  		}
 12455  		c := v_0.AuxInt
 12456  		v.reset(OpAMD64MOVLconst)
 12457  		v.AuxInt = int64(int32(-c))
 12458  		return true
 12459  	}
 12460  	return false
 12461  }
 12462  func rewriteValueAMD64_OpAMD64NEGQ(v *Value, config *Config) bool {
 12463  	b := v.Block
 12464  	_ = b
 12465  	// match: (NEGQ (MOVQconst [c]))
 12466  	// cond:
 12467  	// result: (MOVQconst [-c])
 12468  	for {
 12469  		v_0 := v.Args[0]
 12470  		if v_0.Op != OpAMD64MOVQconst {
 12471  			break
 12472  		}
 12473  		c := v_0.AuxInt
 12474  		v.reset(OpAMD64MOVQconst)
 12475  		v.AuxInt = -c
 12476  		return true
 12477  	}
 12478  	return false
 12479  }
 12480  func rewriteValueAMD64_OpAMD64NOTL(v *Value, config *Config) bool {
 12481  	b := v.Block
 12482  	_ = b
 12483  	// match: (NOTL (MOVLconst [c]))
 12484  	// cond:
 12485  	// result: (MOVLconst [^c])
 12486  	for {
 12487  		v_0 := v.Args[0]
 12488  		if v_0.Op != OpAMD64MOVLconst {
 12489  			break
 12490  		}
 12491  		c := v_0.AuxInt
 12492  		v.reset(OpAMD64MOVLconst)
 12493  		v.AuxInt = ^c
 12494  		return true
 12495  	}
 12496  	return false
 12497  }
 12498  func rewriteValueAMD64_OpAMD64NOTQ(v *Value, config *Config) bool {
 12499  	b := v.Block
 12500  	_ = b
 12501  	// match: (NOTQ (MOVQconst [c]))
 12502  	// cond:
 12503  	// result: (MOVQconst [^c])
 12504  	for {
 12505  		v_0 := v.Args[0]
 12506  		if v_0.Op != OpAMD64MOVQconst {
 12507  			break
 12508  		}
 12509  		c := v_0.AuxInt
 12510  		v.reset(OpAMD64MOVQconst)
 12511  		v.AuxInt = ^c
 12512  		return true
 12513  	}
 12514  	return false
 12515  }
 12516  func rewriteValueAMD64_OpNeg16(v *Value, config *Config) bool {
 12517  	b := v.Block
 12518  	_ = b
 12519  	// match: (Neg16  x)
 12520  	// cond:
 12521  	// result: (NEGL x)
 12522  	for {
 12523  		x := v.Args[0]
 12524  		v.reset(OpAMD64NEGL)
 12525  		v.AddArg(x)
 12526  		return true
 12527  	}
 12528  }
 12529  func rewriteValueAMD64_OpNeg32(v *Value, config *Config) bool {
 12530  	b := v.Block
 12531  	_ = b
 12532  	// match: (Neg32  x)
 12533  	// cond:
 12534  	// result: (NEGL x)
 12535  	for {
 12536  		x := v.Args[0]
 12537  		v.reset(OpAMD64NEGL)
 12538  		v.AddArg(x)
 12539  		return true
 12540  	}
 12541  }
 12542  func rewriteValueAMD64_OpNeg32F(v *Value, config *Config) bool {
 12543  	b := v.Block
 12544  	_ = b
 12545  	// match: (Neg32F x)
 12546  	// cond:
 12547  	// result: (PXOR x (MOVSSconst <config.Frontend().TypeFloat32()> [f2i(math.Copysign(0, -1))]))
 12548  	for {
 12549  		x := v.Args[0]
 12550  		v.reset(OpAMD64PXOR)
 12551  		v.AddArg(x)
 12552  		v0 := b.NewValue0(v.Line, OpAMD64MOVSSconst, config.Frontend().TypeFloat32())
 12553  		v0.AuxInt = f2i(math.Copysign(0, -1))
 12554  		v.AddArg(v0)
 12555  		return true
 12556  	}
 12557  }
 12558  func rewriteValueAMD64_OpNeg64(v *Value, config *Config) bool {
 12559  	b := v.Block
 12560  	_ = b
 12561  	// match: (Neg64  x)
 12562  	// cond:
 12563  	// result: (NEGQ x)
 12564  	for {
 12565  		x := v.Args[0]
 12566  		v.reset(OpAMD64NEGQ)
 12567  		v.AddArg(x)
 12568  		return true
 12569  	}
 12570  }
 12571  func rewriteValueAMD64_OpNeg64F(v *Value, config *Config) bool {
 12572  	b := v.Block
 12573  	_ = b
 12574  	// match: (Neg64F x)
 12575  	// cond:
 12576  	// result: (PXOR x (MOVSDconst <config.Frontend().TypeFloat64()> [f2i(math.Copysign(0, -1))]))
 12577  	for {
 12578  		x := v.Args[0]
 12579  		v.reset(OpAMD64PXOR)
 12580  		v.AddArg(x)
 12581  		v0 := b.NewValue0(v.Line, OpAMD64MOVSDconst, config.Frontend().TypeFloat64())
 12582  		v0.AuxInt = f2i(math.Copysign(0, -1))
 12583  		v.AddArg(v0)
 12584  		return true
 12585  	}
 12586  }
 12587  func rewriteValueAMD64_OpNeg8(v *Value, config *Config) bool {
 12588  	b := v.Block
 12589  	_ = b
 12590  	// match: (Neg8   x)
 12591  	// cond:
 12592  	// result: (NEGL x)
 12593  	for {
 12594  		x := v.Args[0]
 12595  		v.reset(OpAMD64NEGL)
 12596  		v.AddArg(x)
 12597  		return true
 12598  	}
 12599  }
 12600  func rewriteValueAMD64_OpNeq16(v *Value, config *Config) bool {
 12601  	b := v.Block
 12602  	_ = b
 12603  	// match: (Neq16  x y)
 12604  	// cond:
 12605  	// result: (SETNE (CMPW x y))
 12606  	for {
 12607  		x := v.Args[0]
 12608  		y := v.Args[1]
 12609  		v.reset(OpAMD64SETNE)
 12610  		v0 := b.NewValue0(v.Line, OpAMD64CMPW, TypeFlags)
 12611  		v0.AddArg(x)
 12612  		v0.AddArg(y)
 12613  		v.AddArg(v0)
 12614  		return true
 12615  	}
 12616  }
 12617  func rewriteValueAMD64_OpNeq32(v *Value, config *Config) bool {
 12618  	b := v.Block
 12619  	_ = b
 12620  	// match: (Neq32  x y)
 12621  	// cond:
 12622  	// result: (SETNE (CMPL x y))
 12623  	for {
 12624  		x := v.Args[0]
 12625  		y := v.Args[1]
 12626  		v.reset(OpAMD64SETNE)
 12627  		v0 := b.NewValue0(v.Line, OpAMD64CMPL, TypeFlags)
 12628  		v0.AddArg(x)
 12629  		v0.AddArg(y)
 12630  		v.AddArg(v0)
 12631  		return true
 12632  	}
 12633  }
 12634  func rewriteValueAMD64_OpNeq32F(v *Value, config *Config) bool {
 12635  	b := v.Block
 12636  	_ = b
 12637  	// match: (Neq32F x y)
 12638  	// cond:
 12639  	// result: (SETNEF (UCOMISS x y))
 12640  	for {
 12641  		x := v.Args[0]
 12642  		y := v.Args[1]
 12643  		v.reset(OpAMD64SETNEF)
 12644  		v0 := b.NewValue0(v.Line, OpAMD64UCOMISS, TypeFlags)
 12645  		v0.AddArg(x)
 12646  		v0.AddArg(y)
 12647  		v.AddArg(v0)
 12648  		return true
 12649  	}
 12650  }
 12651  func rewriteValueAMD64_OpNeq64(v *Value, config *Config) bool {
 12652  	b := v.Block
 12653  	_ = b
 12654  	// match: (Neq64  x y)
 12655  	// cond:
 12656  	// result: (SETNE (CMPQ x y))
 12657  	for {
 12658  		x := v.Args[0]
 12659  		y := v.Args[1]
 12660  		v.reset(OpAMD64SETNE)
 12661  		v0 := b.NewValue0(v.Line, OpAMD64CMPQ, TypeFlags)
 12662  		v0.AddArg(x)
 12663  		v0.AddArg(y)
 12664  		v.AddArg(v0)
 12665  		return true
 12666  	}
 12667  }
 12668  func rewriteValueAMD64_OpNeq64F(v *Value, config *Config) bool {
 12669  	b := v.Block
 12670  	_ = b
 12671  	// match: (Neq64F x y)
 12672  	// cond:
 12673  	// result: (SETNEF (UCOMISD x y))
 12674  	for {
 12675  		x := v.Args[0]
 12676  		y := v.Args[1]
 12677  		v.reset(OpAMD64SETNEF)
 12678  		v0 := b.NewValue0(v.Line, OpAMD64UCOMISD, TypeFlags)
 12679  		v0.AddArg(x)
 12680  		v0.AddArg(y)
 12681  		v.AddArg(v0)
 12682  		return true
 12683  	}
 12684  }
 12685  func rewriteValueAMD64_OpNeq8(v *Value, config *Config) bool {
 12686  	b := v.Block
 12687  	_ = b
 12688  	// match: (Neq8   x y)
 12689  	// cond:
 12690  	// result: (SETNE (CMPB x y))
 12691  	for {
 12692  		x := v.Args[0]
 12693  		y := v.Args[1]
 12694  		v.reset(OpAMD64SETNE)
 12695  		v0 := b.NewValue0(v.Line, OpAMD64CMPB, TypeFlags)
 12696  		v0.AddArg(x)
 12697  		v0.AddArg(y)
 12698  		v.AddArg(v0)
 12699  		return true
 12700  	}
 12701  }
 12702  func rewriteValueAMD64_OpNeqB(v *Value, config *Config) bool {
 12703  	b := v.Block
 12704  	_ = b
 12705  	// match: (NeqB   x y)
 12706  	// cond:
 12707  	// result: (SETNE (CMPB x y))
 12708  	for {
 12709  		x := v.Args[0]
 12710  		y := v.Args[1]
 12711  		v.reset(OpAMD64SETNE)
 12712  		v0 := b.NewValue0(v.Line, OpAMD64CMPB, TypeFlags)
 12713  		v0.AddArg(x)
 12714  		v0.AddArg(y)
 12715  		v.AddArg(v0)
 12716  		return true
 12717  	}
 12718  }
 12719  func rewriteValueAMD64_OpNeqPtr(v *Value, config *Config) bool {
 12720  	b := v.Block
 12721  	_ = b
 12722  	// match: (NeqPtr x y)
 12723  	// cond:
 12724  	// result: (SETNE (CMPQ x y))
 12725  	for {
 12726  		x := v.Args[0]
 12727  		y := v.Args[1]
 12728  		v.reset(OpAMD64SETNE)
 12729  		v0 := b.NewValue0(v.Line, OpAMD64CMPQ, TypeFlags)
 12730  		v0.AddArg(x)
 12731  		v0.AddArg(y)
 12732  		v.AddArg(v0)
 12733  		return true
 12734  	}
 12735  }
 12736  func rewriteValueAMD64_OpNilCheck(v *Value, config *Config) bool {
 12737  	b := v.Block
 12738  	_ = b
 12739  	// match: (NilCheck ptr mem)
 12740  	// cond:
 12741  	// result: (LoweredNilCheck ptr mem)
 12742  	for {
 12743  		ptr := v.Args[0]
 12744  		mem := v.Args[1]
 12745  		v.reset(OpAMD64LoweredNilCheck)
 12746  		v.AddArg(ptr)
 12747  		v.AddArg(mem)
 12748  		return true
 12749  	}
 12750  }
 12751  func rewriteValueAMD64_OpNot(v *Value, config *Config) bool {
 12752  	b := v.Block
 12753  	_ = b
 12754  	// match: (Not x)
 12755  	// cond:
 12756  	// result: (XORLconst [1] x)
 12757  	for {
 12758  		x := v.Args[0]
 12759  		v.reset(OpAMD64XORLconst)
 12760  		v.AuxInt = 1
 12761  		v.AddArg(x)
 12762  		return true
 12763  	}
 12764  }
 12765  func rewriteValueAMD64_OpAMD64ORL(v *Value, config *Config) bool {
 12766  	b := v.Block
 12767  	_ = b
 12768  	// match: (ORL x (MOVLconst [c]))
 12769  	// cond:
 12770  	// result: (ORLconst [c] x)
 12771  	for {
 12772  		x := v.Args[0]
 12773  		v_1 := v.Args[1]
 12774  		if v_1.Op != OpAMD64MOVLconst {
 12775  			break
 12776  		}
 12777  		c := v_1.AuxInt
 12778  		v.reset(OpAMD64ORLconst)
 12779  		v.AuxInt = c
 12780  		v.AddArg(x)
 12781  		return true
 12782  	}
 12783  	// match: (ORL (MOVLconst [c]) x)
 12784  	// cond:
 12785  	// result: (ORLconst [c] x)
 12786  	for {
 12787  		v_0 := v.Args[0]
 12788  		if v_0.Op != OpAMD64MOVLconst {
 12789  			break
 12790  		}
 12791  		c := v_0.AuxInt
 12792  		x := v.Args[1]
 12793  		v.reset(OpAMD64ORLconst)
 12794  		v.AuxInt = c
 12795  		v.AddArg(x)
 12796  		return true
 12797  	}
 12798  	// match: (ORL x x)
 12799  	// cond:
 12800  	// result: x
 12801  	for {
 12802  		x := v.Args[0]
 12803  		if x != v.Args[1] {
 12804  			break
 12805  		}
 12806  		v.reset(OpCopy)
 12807  		v.Type = x.Type
 12808  		v.AddArg(x)
 12809  		return true
 12810  	}
 12811  	// match: (ORL                  x0:(MOVBload [i]   {s} p mem)     s0:(SHLLconst [8] x1:(MOVBload [i+1] {s} p mem)))
 12812  	// cond: x0.Uses == 1   && x1.Uses == 1   && s0.Uses == 1   && mergePoint(b,x0,x1) != nil   && clobber(x0)   && clobber(x1)   && clobber(s0)
 12813  	// result: @mergePoint(b,x0,x1) (MOVWload [i] {s} p mem)
 12814  	for {
 12815  		x0 := v.Args[0]
 12816  		if x0.Op != OpAMD64MOVBload {
 12817  			break
 12818  		}
 12819  		i := x0.AuxInt
 12820  		s := x0.Aux
 12821  		p := x0.Args[0]
 12822  		mem := x0.Args[1]
 12823  		s0 := v.Args[1]
 12824  		if s0.Op != OpAMD64SHLLconst {
 12825  			break
 12826  		}
 12827  		if s0.AuxInt != 8 {
 12828  			break
 12829  		}
 12830  		x1 := s0.Args[0]
 12831  		if x1.Op != OpAMD64MOVBload {
 12832  			break
 12833  		}
 12834  		if x1.AuxInt != i+1 {
 12835  			break
 12836  		}
 12837  		if x1.Aux != s {
 12838  			break
 12839  		}
 12840  		if p != x1.Args[0] {
 12841  			break
 12842  		}
 12843  		if mem != x1.Args[1] {
 12844  			break
 12845  		}
 12846  		if !(x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0)) {
 12847  			break
 12848  		}
 12849  		b = mergePoint(b, x0, x1)
 12850  		v0 := b.NewValue0(v.Line, OpAMD64MOVWload, config.fe.TypeUInt16())
 12851  		v.reset(OpCopy)
 12852  		v.AddArg(v0)
 12853  		v0.AuxInt = i
 12854  		v0.Aux = s
 12855  		v0.AddArg(p)
 12856  		v0.AddArg(mem)
 12857  		return true
 12858  	}
 12859  	// match: (ORL o0:(ORL o1:(ORL                        x0:(MOVBload [i]   {s} p mem)     s0:(SHLLconst [8]  x1:(MOVBload [i+1] {s} p mem)))     s1:(SHLLconst [16] x2:(MOVBload [i+2] {s} p mem)))     s2:(SHLLconst [24] x3:(MOVBload [i+3] {s} p mem)))
 12860  	// cond: x0.Uses == 1   && x1.Uses == 1   && x2.Uses == 1   && x3.Uses == 1   && s0.Uses == 1   && s1.Uses == 1   && s2.Uses == 1   && o0.Uses == 1   && o1.Uses == 1   && mergePoint(b,x0,x1,x2,x3) != nil   && clobber(x0)   && clobber(x1)   && clobber(x2)   && clobber(x3)   && clobber(s0)   && clobber(s1)   && clobber(s2)   && clobber(o0)   && clobber(o1)
 12861  	// result: @mergePoint(b,x0,x1,x2,x3) (MOVLload [i] {s} p mem)
 12862  	for {
 12863  		o0 := v.Args[0]
 12864  		if o0.Op != OpAMD64ORL {
 12865  			break
 12866  		}
 12867  		o1 := o0.Args[0]
 12868  		if o1.Op != OpAMD64ORL {
 12869  			break
 12870  		}
 12871  		x0 := o1.Args[0]
 12872  		if x0.Op != OpAMD64MOVBload {
 12873  			break
 12874  		}
 12875  		i := x0.AuxInt
 12876  		s := x0.Aux
 12877  		p := x0.Args[0]
 12878  		mem := x0.Args[1]
 12879  		s0 := o1.Args[1]
 12880  		if s0.Op != OpAMD64SHLLconst {
 12881  			break
 12882  		}
 12883  		if s0.AuxInt != 8 {
 12884  			break
 12885  		}
 12886  		x1 := s0.Args[0]
 12887  		if x1.Op != OpAMD64MOVBload {
 12888  			break
 12889  		}
 12890  		if x1.AuxInt != i+1 {
 12891  			break
 12892  		}
 12893  		if x1.Aux != s {
 12894  			break
 12895  		}
 12896  		if p != x1.Args[0] {
 12897  			break
 12898  		}
 12899  		if mem != x1.Args[1] {
 12900  			break
 12901  		}
 12902  		s1 := o0.Args[1]
 12903  		if s1.Op != OpAMD64SHLLconst {
 12904  			break
 12905  		}
 12906  		if s1.AuxInt != 16 {
 12907  			break
 12908  		}
 12909  		x2 := s1.Args[0]
 12910  		if x2.Op != OpAMD64MOVBload {
 12911  			break
 12912  		}
 12913  		if x2.AuxInt != i+2 {
 12914  			break
 12915  		}
 12916  		if x2.Aux != s {
 12917  			break
 12918  		}
 12919  		if p != x2.Args[0] {
 12920  			break
 12921  		}
 12922  		if mem != x2.Args[1] {
 12923  			break
 12924  		}
 12925  		s2 := v.Args[1]
 12926  		if s2.Op != OpAMD64SHLLconst {
 12927  			break
 12928  		}
 12929  		if s2.AuxInt != 24 {
 12930  			break
 12931  		}
 12932  		x3 := s2.Args[0]
 12933  		if x3.Op != OpAMD64MOVBload {
 12934  			break
 12935  		}
 12936  		if x3.AuxInt != i+3 {
 12937  			break
 12938  		}
 12939  		if x3.Aux != s {
 12940  			break
 12941  		}
 12942  		if p != x3.Args[0] {
 12943  			break
 12944  		}
 12945  		if mem != x3.Args[1] {
 12946  			break
 12947  		}
 12948  		if !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && s2.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && mergePoint(b, x0, x1, x2, x3) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(s0) && clobber(s1) && clobber(s2) && clobber(o0) && clobber(o1)) {
 12949  			break
 12950  		}
 12951  		b = mergePoint(b, x0, x1, x2, x3)
 12952  		v0 := b.NewValue0(v.Line, OpAMD64MOVLload, config.fe.TypeUInt32())
 12953  		v.reset(OpCopy)
 12954  		v.AddArg(v0)
 12955  		v0.AuxInt = i
 12956  		v0.Aux = s
 12957  		v0.AddArg(p)
 12958  		v0.AddArg(mem)
 12959  		return true
 12960  	}
 12961  	// match: (ORL                  x0:(MOVBloadidx1 [i]   {s} p idx mem)     s0:(SHLLconst [8] x1:(MOVBloadidx1 [i+1] {s} p idx mem)))
 12962  	// cond: x0.Uses == 1   && x1.Uses == 1   && s0.Uses == 1   && mergePoint(b,x0,x1) != nil   && clobber(x0)   && clobber(x1)   && clobber(s0)
 12963  	// result: @mergePoint(b,x0,x1) (MOVWloadidx1 <v.Type> [i] {s} p idx mem)
 12964  	for {
 12965  		x0 := v.Args[0]
 12966  		if x0.Op != OpAMD64MOVBloadidx1 {
 12967  			break
 12968  		}
 12969  		i := x0.AuxInt
 12970  		s := x0.Aux
 12971  		p := x0.Args[0]
 12972  		idx := x0.Args[1]
 12973  		mem := x0.Args[2]
 12974  		s0 := v.Args[1]
 12975  		if s0.Op != OpAMD64SHLLconst {
 12976  			break
 12977  		}
 12978  		if s0.AuxInt != 8 {
 12979  			break
 12980  		}
 12981  		x1 := s0.Args[0]
 12982  		if x1.Op != OpAMD64MOVBloadidx1 {
 12983  			break
 12984  		}
 12985  		if x1.AuxInt != i+1 {
 12986  			break
 12987  		}
 12988  		if x1.Aux != s {
 12989  			break
 12990  		}
 12991  		if p != x1.Args[0] {
 12992  			break
 12993  		}
 12994  		if idx != x1.Args[1] {
 12995  			break
 12996  		}
 12997  		if mem != x1.Args[2] {
 12998  			break
 12999  		}
 13000  		if !(x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0)) {
 13001  			break
 13002  		}
 13003  		b = mergePoint(b, x0, x1)
 13004  		v0 := b.NewValue0(v.Line, OpAMD64MOVWloadidx1, v.Type)
 13005  		v.reset(OpCopy)
 13006  		v.AddArg(v0)
 13007  		v0.AuxInt = i
 13008  		v0.Aux = s
 13009  		v0.AddArg(p)
 13010  		v0.AddArg(idx)
 13011  		v0.AddArg(mem)
 13012  		return true
 13013  	}
 13014  	// match: (ORL o0:(ORL o1:(ORL                        x0:(MOVBloadidx1 [i]   {s} p idx mem)     s0:(SHLLconst [8]  x1:(MOVBloadidx1 [i+1] {s} p idx mem)))     s1:(SHLLconst [16] x2:(MOVBloadidx1 [i+2] {s} p idx mem)))     s2:(SHLLconst [24] x3:(MOVBloadidx1 [i+3] {s} p idx mem)))
 13015  	// cond: x0.Uses == 1   && x1.Uses == 1   && x2.Uses == 1   && x3.Uses == 1   && s0.Uses == 1   && s1.Uses == 1   && s2.Uses == 1   && o0.Uses == 1   && o1.Uses == 1   && mergePoint(b,x0,x1,x2,x3) != nil   && clobber(x0)   && clobber(x1)   && clobber(x2)   && clobber(x3)   && clobber(s0)   && clobber(s1)   && clobber(s2)   && clobber(o0)   && clobber(o1)
 13016  	// result: @mergePoint(b,x0,x1,x2,x3) (MOVLloadidx1 <v.Type> [i] {s} p idx mem)
 13017  	for {
 13018  		o0 := v.Args[0]
 13019  		if o0.Op != OpAMD64ORL {
 13020  			break
 13021  		}
 13022  		o1 := o0.Args[0]
 13023  		if o1.Op != OpAMD64ORL {
 13024  			break
 13025  		}
 13026  		x0 := o1.Args[0]
 13027  		if x0.Op != OpAMD64MOVBloadidx1 {
 13028  			break
 13029  		}
 13030  		i := x0.AuxInt
 13031  		s := x0.Aux
 13032  		p := x0.Args[0]
 13033  		idx := x0.Args[1]
 13034  		mem := x0.Args[2]
 13035  		s0 := o1.Args[1]
 13036  		if s0.Op != OpAMD64SHLLconst {
 13037  			break
 13038  		}
 13039  		if s0.AuxInt != 8 {
 13040  			break
 13041  		}
 13042  		x1 := s0.Args[0]
 13043  		if x1.Op != OpAMD64MOVBloadidx1 {
 13044  			break
 13045  		}
 13046  		if x1.AuxInt != i+1 {
 13047  			break
 13048  		}
 13049  		if x1.Aux != s {
 13050  			break
 13051  		}
 13052  		if p != x1.Args[0] {
 13053  			break
 13054  		}
 13055  		if idx != x1.Args[1] {
 13056  			break
 13057  		}
 13058  		if mem != x1.Args[2] {
 13059  			break
 13060  		}
 13061  		s1 := o0.Args[1]
 13062  		if s1.Op != OpAMD64SHLLconst {
 13063  			break
 13064  		}
 13065  		if s1.AuxInt != 16 {
 13066  			break
 13067  		}
 13068  		x2 := s1.Args[0]
 13069  		if x2.Op != OpAMD64MOVBloadidx1 {
 13070  			break
 13071  		}
 13072  		if x2.AuxInt != i+2 {
 13073  			break
 13074  		}
 13075  		if x2.Aux != s {
 13076  			break
 13077  		}
 13078  		if p != x2.Args[0] {
 13079  			break
 13080  		}
 13081  		if idx != x2.Args[1] {
 13082  			break
 13083  		}
 13084  		if mem != x2.Args[2] {
 13085  			break
 13086  		}
 13087  		s2 := v.Args[1]
 13088  		if s2.Op != OpAMD64SHLLconst {
 13089  			break
 13090  		}
 13091  		if s2.AuxInt != 24 {
 13092  			break
 13093  		}
 13094  		x3 := s2.Args[0]
 13095  		if x3.Op != OpAMD64MOVBloadidx1 {
 13096  			break
 13097  		}
 13098  		if x3.AuxInt != i+3 {
 13099  			break
 13100  		}
 13101  		if x3.Aux != s {
 13102  			break
 13103  		}
 13104  		if p != x3.Args[0] {
 13105  			break
 13106  		}
 13107  		if idx != x3.Args[1] {
 13108  			break
 13109  		}
 13110  		if mem != x3.Args[2] {
 13111  			break
 13112  		}
 13113  		if !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && s2.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && mergePoint(b, x0, x1, x2, x3) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(s0) && clobber(s1) && clobber(s2) && clobber(o0) && clobber(o1)) {
 13114  			break
 13115  		}
 13116  		b = mergePoint(b, x0, x1, x2, x3)
 13117  		v0 := b.NewValue0(v.Line, OpAMD64MOVLloadidx1, v.Type)
 13118  		v.reset(OpCopy)
 13119  		v.AddArg(v0)
 13120  		v0.AuxInt = i
 13121  		v0.Aux = s
 13122  		v0.AddArg(p)
 13123  		v0.AddArg(idx)
 13124  		v0.AddArg(mem)
 13125  		return true
 13126  	}
 13127  	return false
 13128  }
 13129  func rewriteValueAMD64_OpAMD64ORLconst(v *Value, config *Config) bool {
 13130  	b := v.Block
 13131  	_ = b
 13132  	// match: (ORLconst [c] x)
 13133  	// cond: int32(c)==0
 13134  	// result: x
 13135  	for {
 13136  		c := v.AuxInt
 13137  		x := v.Args[0]
 13138  		if !(int32(c) == 0) {
 13139  			break
 13140  		}
 13141  		v.reset(OpCopy)
 13142  		v.Type = x.Type
 13143  		v.AddArg(x)
 13144  		return true
 13145  	}
 13146  	// match: (ORLconst [c] _)
 13147  	// cond: int32(c)==-1
 13148  	// result: (MOVLconst [-1])
 13149  	for {
 13150  		c := v.AuxInt
 13151  		if !(int32(c) == -1) {
 13152  			break
 13153  		}
 13154  		v.reset(OpAMD64MOVLconst)
 13155  		v.AuxInt = -1
 13156  		return true
 13157  	}
 13158  	// match: (ORLconst [c] (MOVLconst [d]))
 13159  	// cond:
 13160  	// result: (MOVLconst [c|d])
 13161  	for {
 13162  		c := v.AuxInt
 13163  		v_0 := v.Args[0]
 13164  		if v_0.Op != OpAMD64MOVLconst {
 13165  			break
 13166  		}
 13167  		d := v_0.AuxInt
 13168  		v.reset(OpAMD64MOVLconst)
 13169  		v.AuxInt = c | d
 13170  		return true
 13171  	}
 13172  	return false
 13173  }
 13174  func rewriteValueAMD64_OpAMD64ORQ(v *Value, config *Config) bool {
 13175  	b := v.Block
 13176  	_ = b
 13177  	// match: (ORQ x (MOVQconst [c]))
 13178  	// cond: is32Bit(c)
 13179  	// result: (ORQconst [c] x)
 13180  	for {
 13181  		x := v.Args[0]
 13182  		v_1 := v.Args[1]
 13183  		if v_1.Op != OpAMD64MOVQconst {
 13184  			break
 13185  		}
 13186  		c := v_1.AuxInt
 13187  		if !(is32Bit(c)) {
 13188  			break
 13189  		}
 13190  		v.reset(OpAMD64ORQconst)
 13191  		v.AuxInt = c
 13192  		v.AddArg(x)
 13193  		return true
 13194  	}
 13195  	// match: (ORQ (MOVQconst [c]) x)
 13196  	// cond: is32Bit(c)
 13197  	// result: (ORQconst [c] x)
 13198  	for {
 13199  		v_0 := v.Args[0]
 13200  		if v_0.Op != OpAMD64MOVQconst {
 13201  			break
 13202  		}
 13203  		c := v_0.AuxInt
 13204  		x := v.Args[1]
 13205  		if !(is32Bit(c)) {
 13206  			break
 13207  		}
 13208  		v.reset(OpAMD64ORQconst)
 13209  		v.AuxInt = c
 13210  		v.AddArg(x)
 13211  		return true
 13212  	}
 13213  	// match: (ORQ x x)
 13214  	// cond:
 13215  	// result: x
 13216  	for {
 13217  		x := v.Args[0]
 13218  		if x != v.Args[1] {
 13219  			break
 13220  		}
 13221  		v.reset(OpCopy)
 13222  		v.Type = x.Type
 13223  		v.AddArg(x)
 13224  		return true
 13225  	}
 13226  	// match: (ORQ o0:(ORQ o1:(ORQ o2:(ORQ o3:(ORQ o4:(ORQ o5:(ORQ                        x0:(MOVBload [i]   {s} p mem)     s0:(SHLQconst [8]  x1:(MOVBload [i+1] {s} p mem)))     s1:(SHLQconst [16] x2:(MOVBload [i+2] {s} p mem)))     s2:(SHLQconst [24] x3:(MOVBload [i+3] {s} p mem)))     s3:(SHLQconst [32] x4:(MOVBload [i+4] {s} p mem)))     s4:(SHLQconst [40] x5:(MOVBload [i+5] {s} p mem)))     s5:(SHLQconst [48] x6:(MOVBload [i+6] {s} p mem)))     s6:(SHLQconst [56] x7:(MOVBload [i+7] {s} p mem)))
 13227  	// cond: x0.Uses == 1   && x1.Uses == 1   && x2.Uses == 1   && x3.Uses == 1   && x4.Uses == 1   && x5.Uses == 1   && x6.Uses == 1   && x7.Uses == 1   && s0.Uses == 1   && s1.Uses == 1   && s2.Uses == 1   && s3.Uses == 1   && s4.Uses == 1   && s5.Uses == 1   && s6.Uses == 1   && o0.Uses == 1   && o1.Uses == 1   && o2.Uses == 1   && o3.Uses == 1   && o4.Uses == 1   && o5.Uses == 1   && mergePoint(b,x0,x1,x2,x3,x4,x5,x6,x7) != nil   && clobber(x0)   && clobber(x1)   && clobber(x2)   && clobber(x3)   && clobber(x4)   && clobber(x5)   && clobber(x6)   && clobber(x7)   && clobber(s0)   && clobber(s1)   && clobber(s2)   && clobber(s3)   && clobber(s4)   && clobber(s5)   && clobber(s6)   && clobber(o0)   && clobber(o1)   && clobber(o2)   && clobber(o3)   && clobber(o4)   && clobber(o5)
 13228  	// result: @mergePoint(b,x0,x1,x2,x3,x4,x5,x6,x7) (MOVQload [i] {s} p mem)
 13229  	for {
 13230  		o0 := v.Args[0]
 13231  		if o0.Op != OpAMD64ORQ {
 13232  			break
 13233  		}
 13234  		o1 := o0.Args[0]
 13235  		if o1.Op != OpAMD64ORQ {
 13236  			break
 13237  		}
 13238  		o2 := o1.Args[0]
 13239  		if o2.Op != OpAMD64ORQ {
 13240  			break
 13241  		}
 13242  		o3 := o2.Args[0]
 13243  		if o3.Op != OpAMD64ORQ {
 13244  			break
 13245  		}
 13246  		o4 := o3.Args[0]
 13247  		if o4.Op != OpAMD64ORQ {
 13248  			break
 13249  		}
 13250  		o5 := o4.Args[0]
 13251  		if o5.Op != OpAMD64ORQ {
 13252  			break
 13253  		}
 13254  		x0 := o5.Args[0]
 13255  		if x0.Op != OpAMD64MOVBload {
 13256  			break
 13257  		}
 13258  		i := x0.AuxInt
 13259  		s := x0.Aux
 13260  		p := x0.Args[0]
 13261  		mem := x0.Args[1]
 13262  		s0 := o5.Args[1]
 13263  		if s0.Op != OpAMD64SHLQconst {
 13264  			break
 13265  		}
 13266  		if s0.AuxInt != 8 {
 13267  			break
 13268  		}
 13269  		x1 := s0.Args[0]
 13270  		if x1.Op != OpAMD64MOVBload {
 13271  			break
 13272  		}
 13273  		if x1.AuxInt != i+1 {
 13274  			break
 13275  		}
 13276  		if x1.Aux != s {
 13277  			break
 13278  		}
 13279  		if p != x1.Args[0] {
 13280  			break
 13281  		}
 13282  		if mem != x1.Args[1] {
 13283  			break
 13284  		}
 13285  		s1 := o4.Args[1]
 13286  		if s1.Op != OpAMD64SHLQconst {
 13287  			break
 13288  		}
 13289  		if s1.AuxInt != 16 {
 13290  			break
 13291  		}
 13292  		x2 := s1.Args[0]
 13293  		if x2.Op != OpAMD64MOVBload {
 13294  			break
 13295  		}
 13296  		if x2.AuxInt != i+2 {
 13297  			break
 13298  		}
 13299  		if x2.Aux != s {
 13300  			break
 13301  		}
 13302  		if p != x2.Args[0] {
 13303  			break
 13304  		}
 13305  		if mem != x2.Args[1] {
 13306  			break
 13307  		}
 13308  		s2 := o3.Args[1]
 13309  		if s2.Op != OpAMD64SHLQconst {
 13310  			break
 13311  		}
 13312  		if s2.AuxInt != 24 {
 13313  			break
 13314  		}
 13315  		x3 := s2.Args[0]
 13316  		if x3.Op != OpAMD64MOVBload {
 13317  			break
 13318  		}
 13319  		if x3.AuxInt != i+3 {
 13320  			break
 13321  		}
 13322  		if x3.Aux != s {
 13323  			break
 13324  		}
 13325  		if p != x3.Args[0] {
 13326  			break
 13327  		}
 13328  		if mem != x3.Args[1] {
 13329  			break
 13330  		}
 13331  		s3 := o2.Args[1]
 13332  		if s3.Op != OpAMD64SHLQconst {
 13333  			break
 13334  		}
 13335  		if s3.AuxInt != 32 {
 13336  			break
 13337  		}
 13338  		x4 := s3.Args[0]
 13339  		if x4.Op != OpAMD64MOVBload {
 13340  			break
 13341  		}
 13342  		if x4.AuxInt != i+4 {
 13343  			break
 13344  		}
 13345  		if x4.Aux != s {
 13346  			break
 13347  		}
 13348  		if p != x4.Args[0] {
 13349  			break
 13350  		}
 13351  		if mem != x4.Args[1] {
 13352  			break
 13353  		}
 13354  		s4 := o1.Args[1]
 13355  		if s4.Op != OpAMD64SHLQconst {
 13356  			break
 13357  		}
 13358  		if s4.AuxInt != 40 {
 13359  			break
 13360  		}
 13361  		x5 := s4.Args[0]
 13362  		if x5.Op != OpAMD64MOVBload {
 13363  			break
 13364  		}
 13365  		if x5.AuxInt != i+5 {
 13366  			break
 13367  		}
 13368  		if x5.Aux != s {
 13369  			break
 13370  		}
 13371  		if p != x5.Args[0] {
 13372  			break
 13373  		}
 13374  		if mem != x5.Args[1] {
 13375  			break
 13376  		}
 13377  		s5 := o0.Args[1]
 13378  		if s5.Op != OpAMD64SHLQconst {
 13379  			break
 13380  		}
 13381  		if s5.AuxInt != 48 {
 13382  			break
 13383  		}
 13384  		x6 := s5.Args[0]
 13385  		if x6.Op != OpAMD64MOVBload {
 13386  			break
 13387  		}
 13388  		if x6.AuxInt != i+6 {
 13389  			break
 13390  		}
 13391  		if x6.Aux != s {
 13392  			break
 13393  		}
 13394  		if p != x6.Args[0] {
 13395  			break
 13396  		}
 13397  		if mem != x6.Args[1] {
 13398  			break
 13399  		}
 13400  		s6 := v.Args[1]
 13401  		if s6.Op != OpAMD64SHLQconst {
 13402  			break
 13403  		}
 13404  		if s6.AuxInt != 56 {
 13405  			break
 13406  		}
 13407  		x7 := s6.Args[0]
 13408  		if x7.Op != OpAMD64MOVBload {
 13409  			break
 13410  		}
 13411  		if x7.AuxInt != i+7 {
 13412  			break
 13413  		}
 13414  		if x7.Aux != s {
 13415  			break
 13416  		}
 13417  		if p != x7.Args[0] {
 13418  			break
 13419  		}
 13420  		if mem != x7.Args[1] {
 13421  			break
 13422  		}
 13423  		if !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && x7.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && s2.Uses == 1 && s3.Uses == 1 && s4.Uses == 1 && s5.Uses == 1 && s6.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1 && o3.Uses == 1 && o4.Uses == 1 && o5.Uses == 1 && mergePoint(b, x0, x1, x2, x3, x4, x5, x6, x7) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) && clobber(x7) && clobber(s0) && clobber(s1) && clobber(s2) && clobber(s3) && clobber(s4) && clobber(s5) && clobber(s6) && clobber(o0) && clobber(o1) && clobber(o2) && clobber(o3) && clobber(o4) && clobber(o5)) {
 13424  			break
 13425  		}
 13426  		b = mergePoint(b, x0, x1, x2, x3, x4, x5, x6, x7)
 13427  		v0 := b.NewValue0(v.Line, OpAMD64MOVQload, config.fe.TypeUInt64())
 13428  		v.reset(OpCopy)
 13429  		v.AddArg(v0)
 13430  		v0.AuxInt = i
 13431  		v0.Aux = s
 13432  		v0.AddArg(p)
 13433  		v0.AddArg(mem)
 13434  		return true
 13435  	}
 13436  	// match: (ORQ o0:(ORQ o1:(ORQ o2:(ORQ o3:(ORQ o4:(ORQ o5:(ORQ                        x0:(MOVBloadidx1 [i]   {s} p idx mem)     s0:(SHLQconst [8]  x1:(MOVBloadidx1 [i+1] {s} p idx mem)))     s1:(SHLQconst [16] x2:(MOVBloadidx1 [i+2] {s} p idx mem)))     s2:(SHLQconst [24] x3:(MOVBloadidx1 [i+3] {s} p idx mem)))     s3:(SHLQconst [32] x4:(MOVBloadidx1 [i+4] {s} p idx mem)))     s4:(SHLQconst [40] x5:(MOVBloadidx1 [i+5] {s} p idx mem)))     s5:(SHLQconst [48] x6:(MOVBloadidx1 [i+6] {s} p idx mem)))     s6:(SHLQconst [56] x7:(MOVBloadidx1 [i+7] {s} p idx mem)))
 13437  	// cond: x0.Uses == 1   && x1.Uses == 1   && x2.Uses == 1   && x3.Uses == 1   && x4.Uses == 1   && x5.Uses == 1   && x6.Uses == 1   && x7.Uses == 1   && s0.Uses == 1   && s1.Uses == 1   && s2.Uses == 1   && s3.Uses == 1   && s4.Uses == 1   && s5.Uses == 1   && s6.Uses == 1   && o0.Uses == 1   && o1.Uses == 1   && o2.Uses == 1   && o3.Uses == 1   && o4.Uses == 1   && o5.Uses == 1   && mergePoint(b,x0,x1,x2,x3,x4,x5,x6,x7) != nil   && clobber(x0)   && clobber(x1)   && clobber(x2)   && clobber(x3)   && clobber(x4)   && clobber(x5)   && clobber(x6)   && clobber(x7)   && clobber(s0)   && clobber(s1)   && clobber(s2)   && clobber(s3)   && clobber(s4)   && clobber(s5)   && clobber(s6)   && clobber(o0)   && clobber(o1)   && clobber(o2)   && clobber(o3)   && clobber(o4)   && clobber(o5)
 13438  	// result: @mergePoint(b,x0,x1,x2,x3,x4,x5,x6,x7) (MOVQloadidx1 <v.Type> [i] {s} p idx mem)
 13439  	for {
 13440  		o0 := v.Args[0]
 13441  		if o0.Op != OpAMD64ORQ {
 13442  			break
 13443  		}
 13444  		o1 := o0.Args[0]
 13445  		if o1.Op != OpAMD64ORQ {
 13446  			break
 13447  		}
 13448  		o2 := o1.Args[0]
 13449  		if o2.Op != OpAMD64ORQ {
 13450  			break
 13451  		}
 13452  		o3 := o2.Args[0]
 13453  		if o3.Op != OpAMD64ORQ {
 13454  			break
 13455  		}
 13456  		o4 := o3.Args[0]
 13457  		if o4.Op != OpAMD64ORQ {
 13458  			break
 13459  		}
 13460  		o5 := o4.Args[0]
 13461  		if o5.Op != OpAMD64ORQ {
 13462  			break
 13463  		}
 13464  		x0 := o5.Args[0]
 13465  		if x0.Op != OpAMD64MOVBloadidx1 {
 13466  			break
 13467  		}
 13468  		i := x0.AuxInt
 13469  		s := x0.Aux
 13470  		p := x0.Args[0]
 13471  		idx := x0.Args[1]
 13472  		mem := x0.Args[2]
 13473  		s0 := o5.Args[1]
 13474  		if s0.Op != OpAMD64SHLQconst {
 13475  			break
 13476  		}
 13477  		if s0.AuxInt != 8 {
 13478  			break
 13479  		}
 13480  		x1 := s0.Args[0]
 13481  		if x1.Op != OpAMD64MOVBloadidx1 {
 13482  			break
 13483  		}
 13484  		if x1.AuxInt != i+1 {
 13485  			break
 13486  		}
 13487  		if x1.Aux != s {
 13488  			break
 13489  		}
 13490  		if p != x1.Args[0] {
 13491  			break
 13492  		}
 13493  		if idx != x1.Args[1] {
 13494  			break
 13495  		}
 13496  		if mem != x1.Args[2] {
 13497  			break
 13498  		}
 13499  		s1 := o4.Args[1]
 13500  		if s1.Op != OpAMD64SHLQconst {
 13501  			break
 13502  		}
 13503  		if s1.AuxInt != 16 {
 13504  			break
 13505  		}
 13506  		x2 := s1.Args[0]
 13507  		if x2.Op != OpAMD64MOVBloadidx1 {
 13508  			break
 13509  		}
 13510  		if x2.AuxInt != i+2 {
 13511  			break
 13512  		}
 13513  		if x2.Aux != s {
 13514  			break
 13515  		}
 13516  		if p != x2.Args[0] {
 13517  			break
 13518  		}
 13519  		if idx != x2.Args[1] {
 13520  			break
 13521  		}
 13522  		if mem != x2.Args[2] {
 13523  			break
 13524  		}
 13525  		s2 := o3.Args[1]
 13526  		if s2.Op != OpAMD64SHLQconst {
 13527  			break
 13528  		}
 13529  		if s2.AuxInt != 24 {
 13530  			break
 13531  		}
 13532  		x3 := s2.Args[0]
 13533  		if x3.Op != OpAMD64MOVBloadidx1 {
 13534  			break
 13535  		}
 13536  		if x3.AuxInt != i+3 {
 13537  			break
 13538  		}
 13539  		if x3.Aux != s {
 13540  			break
 13541  		}
 13542  		if p != x3.Args[0] {
 13543  			break
 13544  		}
 13545  		if idx != x3.Args[1] {
 13546  			break
 13547  		}
 13548  		if mem != x3.Args[2] {
 13549  			break
 13550  		}
 13551  		s3 := o2.Args[1]
 13552  		if s3.Op != OpAMD64SHLQconst {
 13553  			break
 13554  		}
 13555  		if s3.AuxInt != 32 {
 13556  			break
 13557  		}
 13558  		x4 := s3.Args[0]
 13559  		if x4.Op != OpAMD64MOVBloadidx1 {
 13560  			break
 13561  		}
 13562  		if x4.AuxInt != i+4 {
 13563  			break
 13564  		}
 13565  		if x4.Aux != s {
 13566  			break
 13567  		}
 13568  		if p != x4.Args[0] {
 13569  			break
 13570  		}
 13571  		if idx != x4.Args[1] {
 13572  			break
 13573  		}
 13574  		if mem != x4.Args[2] {
 13575  			break
 13576  		}
 13577  		s4 := o1.Args[1]
 13578  		if s4.Op != OpAMD64SHLQconst {
 13579  			break
 13580  		}
 13581  		if s4.AuxInt != 40 {
 13582  			break
 13583  		}
 13584  		x5 := s4.Args[0]
 13585  		if x5.Op != OpAMD64MOVBloadidx1 {
 13586  			break
 13587  		}
 13588  		if x5.AuxInt != i+5 {
 13589  			break
 13590  		}
 13591  		if x5.Aux != s {
 13592  			break
 13593  		}
 13594  		if p != x5.Args[0] {
 13595  			break
 13596  		}
 13597  		if idx != x5.Args[1] {
 13598  			break
 13599  		}
 13600  		if mem != x5.Args[2] {
 13601  			break
 13602  		}
 13603  		s5 := o0.Args[1]
 13604  		if s5.Op != OpAMD64SHLQconst {
 13605  			break
 13606  		}
 13607  		if s5.AuxInt != 48 {
 13608  			break
 13609  		}
 13610  		x6 := s5.Args[0]
 13611  		if x6.Op != OpAMD64MOVBloadidx1 {
 13612  			break
 13613  		}
 13614  		if x6.AuxInt != i+6 {
 13615  			break
 13616  		}
 13617  		if x6.Aux != s {
 13618  			break
 13619  		}
 13620  		if p != x6.Args[0] {
 13621  			break
 13622  		}
 13623  		if idx != x6.Args[1] {
 13624  			break
 13625  		}
 13626  		if mem != x6.Args[2] {
 13627  			break
 13628  		}
 13629  		s6 := v.Args[1]
 13630  		if s6.Op != OpAMD64SHLQconst {
 13631  			break
 13632  		}
 13633  		if s6.AuxInt != 56 {
 13634  			break
 13635  		}
 13636  		x7 := s6.Args[0]
 13637  		if x7.Op != OpAMD64MOVBloadidx1 {
 13638  			break
 13639  		}
 13640  		if x7.AuxInt != i+7 {
 13641  			break
 13642  		}
 13643  		if x7.Aux != s {
 13644  			break
 13645  		}
 13646  		if p != x7.Args[0] {
 13647  			break
 13648  		}
 13649  		if idx != x7.Args[1] {
 13650  			break
 13651  		}
 13652  		if mem != x7.Args[2] {
 13653  			break
 13654  		}
 13655  		if !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && x7.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && s2.Uses == 1 && s3.Uses == 1 && s4.Uses == 1 && s5.Uses == 1 && s6.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1 && o3.Uses == 1 && o4.Uses == 1 && o5.Uses == 1 && mergePoint(b, x0, x1, x2, x3, x4, x5, x6, x7) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) && clobber(x7) && clobber(s0) && clobber(s1) && clobber(s2) && clobber(s3) && clobber(s4) && clobber(s5) && clobber(s6) && clobber(o0) && clobber(o1) && clobber(o2) && clobber(o3) && clobber(o4) && clobber(o5)) {
 13656  			break
 13657  		}
 13658  		b = mergePoint(b, x0, x1, x2, x3, x4, x5, x6, x7)
 13659  		v0 := b.NewValue0(v.Line, OpAMD64MOVQloadidx1, v.Type)
 13660  		v.reset(OpCopy)
 13661  		v.AddArg(v0)
 13662  		v0.AuxInt = i
 13663  		v0.Aux = s
 13664  		v0.AddArg(p)
 13665  		v0.AddArg(idx)
 13666  		v0.AddArg(mem)
 13667  		return true
 13668  	}
 13669  	return false
 13670  }
 13671  func rewriteValueAMD64_OpAMD64ORQconst(v *Value, config *Config) bool {
 13672  	b := v.Block
 13673  	_ = b
 13674  	// match: (ORQconst [0] x)
 13675  	// cond:
 13676  	// result: x
 13677  	for {
 13678  		if v.AuxInt != 0 {
 13679  			break
 13680  		}
 13681  		x := v.Args[0]
 13682  		v.reset(OpCopy)
 13683  		v.Type = x.Type
 13684  		v.AddArg(x)
 13685  		return true
 13686  	}
 13687  	// match: (ORQconst [-1] _)
 13688  	// cond:
 13689  	// result: (MOVQconst [-1])
 13690  	for {
 13691  		if v.AuxInt != -1 {
 13692  			break
 13693  		}
 13694  		v.reset(OpAMD64MOVQconst)
 13695  		v.AuxInt = -1
 13696  		return true
 13697  	}
 13698  	// match: (ORQconst [c] (MOVQconst [d]))
 13699  	// cond:
 13700  	// result: (MOVQconst [c|d])
 13701  	for {
 13702  		c := v.AuxInt
 13703  		v_0 := v.Args[0]
 13704  		if v_0.Op != OpAMD64MOVQconst {
 13705  			break
 13706  		}
 13707  		d := v_0.AuxInt
 13708  		v.reset(OpAMD64MOVQconst)
 13709  		v.AuxInt = c | d
 13710  		return true
 13711  	}
 13712  	return false
 13713  }
 13714  func rewriteValueAMD64_OpOffPtr(v *Value, config *Config) bool {
 13715  	b := v.Block
 13716  	_ = b
 13717  	// match: (OffPtr [off] ptr)
 13718  	// cond: is32Bit(off)
 13719  	// result: (ADDQconst [off] ptr)
 13720  	for {
 13721  		off := v.AuxInt
 13722  		ptr := v.Args[0]
 13723  		if !(is32Bit(off)) {
 13724  			break
 13725  		}
 13726  		v.reset(OpAMD64ADDQconst)
 13727  		v.AuxInt = off
 13728  		v.AddArg(ptr)
 13729  		return true
 13730  	}
 13731  	// match: (OffPtr [off] ptr)
 13732  	// cond:
 13733  	// result: (ADDQ (MOVQconst [off]) ptr)
 13734  	for {
 13735  		off := v.AuxInt
 13736  		ptr := v.Args[0]
 13737  		v.reset(OpAMD64ADDQ)
 13738  		v0 := b.NewValue0(v.Line, OpAMD64MOVQconst, config.fe.TypeUInt64())
 13739  		v0.AuxInt = off
 13740  		v.AddArg(v0)
 13741  		v.AddArg(ptr)
 13742  		return true
 13743  	}
 13744  }
 13745  func rewriteValueAMD64_OpOr16(v *Value, config *Config) bool {
 13746  	b := v.Block
 13747  	_ = b
 13748  	// match: (Or16 x y)
 13749  	// cond:
 13750  	// result: (ORL x y)
 13751  	for {
 13752  		x := v.Args[0]
 13753  		y := v.Args[1]
 13754  		v.reset(OpAMD64ORL)
 13755  		v.AddArg(x)
 13756  		v.AddArg(y)
 13757  		return true
 13758  	}
 13759  }
 13760  func rewriteValueAMD64_OpOr32(v *Value, config *Config) bool {
 13761  	b := v.Block
 13762  	_ = b
 13763  	// match: (Or32 x y)
 13764  	// cond:
 13765  	// result: (ORL x y)
 13766  	for {
 13767  		x := v.Args[0]
 13768  		y := v.Args[1]
 13769  		v.reset(OpAMD64ORL)
 13770  		v.AddArg(x)
 13771  		v.AddArg(y)
 13772  		return true
 13773  	}
 13774  }
 13775  func rewriteValueAMD64_OpOr64(v *Value, config *Config) bool {
 13776  	b := v.Block
 13777  	_ = b
 13778  	// match: (Or64 x y)
 13779  	// cond:
 13780  	// result: (ORQ x y)
 13781  	for {
 13782  		x := v.Args[0]
 13783  		y := v.Args[1]
 13784  		v.reset(OpAMD64ORQ)
 13785  		v.AddArg(x)
 13786  		v.AddArg(y)
 13787  		return true
 13788  	}
 13789  }
 13790  func rewriteValueAMD64_OpOr8(v *Value, config *Config) bool {
 13791  	b := v.Block
 13792  	_ = b
 13793  	// match: (Or8  x y)
 13794  	// cond:
 13795  	// result: (ORL x y)
 13796  	for {
 13797  		x := v.Args[0]
 13798  		y := v.Args[1]
 13799  		v.reset(OpAMD64ORL)
 13800  		v.AddArg(x)
 13801  		v.AddArg(y)
 13802  		return true
 13803  	}
 13804  }
 13805  func rewriteValueAMD64_OpOrB(v *Value, config *Config) bool {
 13806  	b := v.Block
 13807  	_ = b
 13808  	// match: (OrB x y)
 13809  	// cond:
 13810  	// result: (ORL x y)
 13811  	for {
 13812  		x := v.Args[0]
 13813  		y := v.Args[1]
 13814  		v.reset(OpAMD64ORL)
 13815  		v.AddArg(x)
 13816  		v.AddArg(y)
 13817  		return true
 13818  	}
 13819  }
 13820  func rewriteValueAMD64_OpRsh16Ux16(v *Value, config *Config) bool {
 13821  	b := v.Block
 13822  	_ = b
 13823  	// match: (Rsh16Ux16 <t> x y)
 13824  	// cond:
 13825  	// result: (ANDL (SHRW <t> x y) (SBBLcarrymask <t> (CMPWconst y [16])))
 13826  	for {
 13827  		t := v.Type
 13828  		x := v.Args[0]
 13829  		y := v.Args[1]
 13830  		v.reset(OpAMD64ANDL)
 13831  		v0 := b.NewValue0(v.Line, OpAMD64SHRW, t)
 13832  		v0.AddArg(x)
 13833  		v0.AddArg(y)
 13834  		v.AddArg(v0)
 13835  		v1 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, t)
 13836  		v2 := b.NewValue0(v.Line, OpAMD64CMPWconst, TypeFlags)
 13837  		v2.AddArg(y)
 13838  		v2.AuxInt = 16
 13839  		v1.AddArg(v2)
 13840  		v.AddArg(v1)
 13841  		return true
 13842  	}
 13843  }
 13844  func rewriteValueAMD64_OpRsh16Ux32(v *Value, config *Config) bool {
 13845  	b := v.Block
 13846  	_ = b
 13847  	// match: (Rsh16Ux32 <t> x y)
 13848  	// cond:
 13849  	// result: (ANDL (SHRW <t> x y) (SBBLcarrymask <t> (CMPLconst y [16])))
 13850  	for {
 13851  		t := v.Type
 13852  		x := v.Args[0]
 13853  		y := v.Args[1]
 13854  		v.reset(OpAMD64ANDL)
 13855  		v0 := b.NewValue0(v.Line, OpAMD64SHRW, t)
 13856  		v0.AddArg(x)
 13857  		v0.AddArg(y)
 13858  		v.AddArg(v0)
 13859  		v1 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, t)
 13860  		v2 := b.NewValue0(v.Line, OpAMD64CMPLconst, TypeFlags)
 13861  		v2.AddArg(y)
 13862  		v2.AuxInt = 16
 13863  		v1.AddArg(v2)
 13864  		v.AddArg(v1)
 13865  		return true
 13866  	}
 13867  }
 13868  func rewriteValueAMD64_OpRsh16Ux64(v *Value, config *Config) bool {
 13869  	b := v.Block
 13870  	_ = b
 13871  	// match: (Rsh16Ux64 <t> x y)
 13872  	// cond:
 13873  	// result: (ANDL (SHRW <t> x y) (SBBLcarrymask <t> (CMPQconst y [16])))
 13874  	for {
 13875  		t := v.Type
 13876  		x := v.Args[0]
 13877  		y := v.Args[1]
 13878  		v.reset(OpAMD64ANDL)
 13879  		v0 := b.NewValue0(v.Line, OpAMD64SHRW, t)
 13880  		v0.AddArg(x)
 13881  		v0.AddArg(y)
 13882  		v.AddArg(v0)
 13883  		v1 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, t)
 13884  		v2 := b.NewValue0(v.Line, OpAMD64CMPQconst, TypeFlags)
 13885  		v2.AddArg(y)
 13886  		v2.AuxInt = 16
 13887  		v1.AddArg(v2)
 13888  		v.AddArg(v1)
 13889  		return true
 13890  	}
 13891  }
 13892  func rewriteValueAMD64_OpRsh16Ux8(v *Value, config *Config) bool {
 13893  	b := v.Block
 13894  	_ = b
 13895  	// match: (Rsh16Ux8  <t> x y)
 13896  	// cond:
 13897  	// result: (ANDL (SHRW <t> x y) (SBBLcarrymask <t> (CMPBconst y [16])))
 13898  	for {
 13899  		t := v.Type
 13900  		x := v.Args[0]
 13901  		y := v.Args[1]
 13902  		v.reset(OpAMD64ANDL)
 13903  		v0 := b.NewValue0(v.Line, OpAMD64SHRW, t)
 13904  		v0.AddArg(x)
 13905  		v0.AddArg(y)
 13906  		v.AddArg(v0)
 13907  		v1 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, t)
 13908  		v2 := b.NewValue0(v.Line, OpAMD64CMPBconst, TypeFlags)
 13909  		v2.AddArg(y)
 13910  		v2.AuxInt = 16
 13911  		v1.AddArg(v2)
 13912  		v.AddArg(v1)
 13913  		return true
 13914  	}
 13915  }
 13916  func rewriteValueAMD64_OpRsh16x16(v *Value, config *Config) bool {
 13917  	b := v.Block
 13918  	_ = b
 13919  	// match: (Rsh16x16 <t> x y)
 13920  	// cond:
 13921  	// result: (SARW <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPWconst y [16])))))
 13922  	for {
 13923  		t := v.Type
 13924  		x := v.Args[0]
 13925  		y := v.Args[1]
 13926  		v.reset(OpAMD64SARW)
 13927  		v.Type = t
 13928  		v.AddArg(x)
 13929  		v0 := b.NewValue0(v.Line, OpAMD64ORL, y.Type)
 13930  		v0.AddArg(y)
 13931  		v1 := b.NewValue0(v.Line, OpAMD64NOTL, y.Type)
 13932  		v2 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, y.Type)
 13933  		v3 := b.NewValue0(v.Line, OpAMD64CMPWconst, TypeFlags)
 13934  		v3.AddArg(y)
 13935  		v3.AuxInt = 16
 13936  		v2.AddArg(v3)
 13937  		v1.AddArg(v2)
 13938  		v0.AddArg(v1)
 13939  		v.AddArg(v0)
 13940  		return true
 13941  	}
 13942  }
 13943  func rewriteValueAMD64_OpRsh16x32(v *Value, config *Config) bool {
 13944  	b := v.Block
 13945  	_ = b
 13946  	// match: (Rsh16x32 <t> x y)
 13947  	// cond:
 13948  	// result: (SARW <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPLconst y [16])))))
 13949  	for {
 13950  		t := v.Type
 13951  		x := v.Args[0]
 13952  		y := v.Args[1]
 13953  		v.reset(OpAMD64SARW)
 13954  		v.Type = t
 13955  		v.AddArg(x)
 13956  		v0 := b.NewValue0(v.Line, OpAMD64ORL, y.Type)
 13957  		v0.AddArg(y)
 13958  		v1 := b.NewValue0(v.Line, OpAMD64NOTL, y.Type)
 13959  		v2 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, y.Type)
 13960  		v3 := b.NewValue0(v.Line, OpAMD64CMPLconst, TypeFlags)
 13961  		v3.AddArg(y)
 13962  		v3.AuxInt = 16
 13963  		v2.AddArg(v3)
 13964  		v1.AddArg(v2)
 13965  		v0.AddArg(v1)
 13966  		v.AddArg(v0)
 13967  		return true
 13968  	}
 13969  }
 13970  func rewriteValueAMD64_OpRsh16x64(v *Value, config *Config) bool {
 13971  	b := v.Block
 13972  	_ = b
 13973  	// match: (Rsh16x64 <t> x y)
 13974  	// cond:
 13975  	// result: (SARW <t> x (ORQ <y.Type> y (NOTQ <y.Type> (SBBQcarrymask <y.Type> (CMPQconst y [16])))))
 13976  	for {
 13977  		t := v.Type
 13978  		x := v.Args[0]
 13979  		y := v.Args[1]
 13980  		v.reset(OpAMD64SARW)
 13981  		v.Type = t
 13982  		v.AddArg(x)
 13983  		v0 := b.NewValue0(v.Line, OpAMD64ORQ, y.Type)
 13984  		v0.AddArg(y)
 13985  		v1 := b.NewValue0(v.Line, OpAMD64NOTQ, y.Type)
 13986  		v2 := b.NewValue0(v.Line, OpAMD64SBBQcarrymask, y.Type)
 13987  		v3 := b.NewValue0(v.Line, OpAMD64CMPQconst, TypeFlags)
 13988  		v3.AddArg(y)
 13989  		v3.AuxInt = 16
 13990  		v2.AddArg(v3)
 13991  		v1.AddArg(v2)
 13992  		v0.AddArg(v1)
 13993  		v.AddArg(v0)
 13994  		return true
 13995  	}
 13996  }
 13997  func rewriteValueAMD64_OpRsh16x8(v *Value, config *Config) bool {
 13998  	b := v.Block
 13999  	_ = b
 14000  	// match: (Rsh16x8  <t> x y)
 14001  	// cond:
 14002  	// result: (SARW <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPBconst y [16])))))
 14003  	for {
 14004  		t := v.Type
 14005  		x := v.Args[0]
 14006  		y := v.Args[1]
 14007  		v.reset(OpAMD64SARW)
 14008  		v.Type = t
 14009  		v.AddArg(x)
 14010  		v0 := b.NewValue0(v.Line, OpAMD64ORL, y.Type)
 14011  		v0.AddArg(y)
 14012  		v1 := b.NewValue0(v.Line, OpAMD64NOTL, y.Type)
 14013  		v2 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, y.Type)
 14014  		v3 := b.NewValue0(v.Line, OpAMD64CMPBconst, TypeFlags)
 14015  		v3.AddArg(y)
 14016  		v3.AuxInt = 16
 14017  		v2.AddArg(v3)
 14018  		v1.AddArg(v2)
 14019  		v0.AddArg(v1)
 14020  		v.AddArg(v0)
 14021  		return true
 14022  	}
 14023  }
 14024  func rewriteValueAMD64_OpRsh32Ux16(v *Value, config *Config) bool {
 14025  	b := v.Block
 14026  	_ = b
 14027  	// match: (Rsh32Ux16 <t> x y)
 14028  	// cond:
 14029  	// result: (ANDL (SHRL <t> x y) (SBBLcarrymask <t> (CMPWconst y [32])))
 14030  	for {
 14031  		t := v.Type
 14032  		x := v.Args[0]
 14033  		y := v.Args[1]
 14034  		v.reset(OpAMD64ANDL)
 14035  		v0 := b.NewValue0(v.Line, OpAMD64SHRL, t)
 14036  		v0.AddArg(x)
 14037  		v0.AddArg(y)
 14038  		v.AddArg(v0)
 14039  		v1 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, t)
 14040  		v2 := b.NewValue0(v.Line, OpAMD64CMPWconst, TypeFlags)
 14041  		v2.AddArg(y)
 14042  		v2.AuxInt = 32
 14043  		v1.AddArg(v2)
 14044  		v.AddArg(v1)
 14045  		return true
 14046  	}
 14047  }
 14048  func rewriteValueAMD64_OpRsh32Ux32(v *Value, config *Config) bool {
 14049  	b := v.Block
 14050  	_ = b
 14051  	// match: (Rsh32Ux32 <t> x y)
 14052  	// cond:
 14053  	// result: (ANDL (SHRL <t> x y) (SBBLcarrymask <t> (CMPLconst y [32])))
 14054  	for {
 14055  		t := v.Type
 14056  		x := v.Args[0]
 14057  		y := v.Args[1]
 14058  		v.reset(OpAMD64ANDL)
 14059  		v0 := b.NewValue0(v.Line, OpAMD64SHRL, t)
 14060  		v0.AddArg(x)
 14061  		v0.AddArg(y)
 14062  		v.AddArg(v0)
 14063  		v1 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, t)
 14064  		v2 := b.NewValue0(v.Line, OpAMD64CMPLconst, TypeFlags)
 14065  		v2.AddArg(y)
 14066  		v2.AuxInt = 32
 14067  		v1.AddArg(v2)
 14068  		v.AddArg(v1)
 14069  		return true
 14070  	}
 14071  }
 14072  func rewriteValueAMD64_OpRsh32Ux64(v *Value, config *Config) bool {
 14073  	b := v.Block
 14074  	_ = b
 14075  	// match: (Rsh32Ux64 <t> x y)
 14076  	// cond:
 14077  	// result: (ANDL (SHRL <t> x y) (SBBLcarrymask <t> (CMPQconst y [32])))
 14078  	for {
 14079  		t := v.Type
 14080  		x := v.Args[0]
 14081  		y := v.Args[1]
 14082  		v.reset(OpAMD64ANDL)
 14083  		v0 := b.NewValue0(v.Line, OpAMD64SHRL, t)
 14084  		v0.AddArg(x)
 14085  		v0.AddArg(y)
 14086  		v.AddArg(v0)
 14087  		v1 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, t)
 14088  		v2 := b.NewValue0(v.Line, OpAMD64CMPQconst, TypeFlags)
 14089  		v2.AddArg(y)
 14090  		v2.AuxInt = 32
 14091  		v1.AddArg(v2)
 14092  		v.AddArg(v1)
 14093  		return true
 14094  	}
 14095  }
 14096  func rewriteValueAMD64_OpRsh32Ux8(v *Value, config *Config) bool {
 14097  	b := v.Block
 14098  	_ = b
 14099  	// match: (Rsh32Ux8  <t> x y)
 14100  	// cond:
 14101  	// result: (ANDL (SHRL <t> x y) (SBBLcarrymask <t> (CMPBconst y [32])))
 14102  	for {
 14103  		t := v.Type
 14104  		x := v.Args[0]
 14105  		y := v.Args[1]
 14106  		v.reset(OpAMD64ANDL)
 14107  		v0 := b.NewValue0(v.Line, OpAMD64SHRL, t)
 14108  		v0.AddArg(x)
 14109  		v0.AddArg(y)
 14110  		v.AddArg(v0)
 14111  		v1 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, t)
 14112  		v2 := b.NewValue0(v.Line, OpAMD64CMPBconst, TypeFlags)
 14113  		v2.AddArg(y)
 14114  		v2.AuxInt = 32
 14115  		v1.AddArg(v2)
 14116  		v.AddArg(v1)
 14117  		return true
 14118  	}
 14119  }
 14120  func rewriteValueAMD64_OpRsh32x16(v *Value, config *Config) bool {
 14121  	b := v.Block
 14122  	_ = b
 14123  	// match: (Rsh32x16 <t> x y)
 14124  	// cond:
 14125  	// result: (SARL <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPWconst y [32])))))
 14126  	for {
 14127  		t := v.Type
 14128  		x := v.Args[0]
 14129  		y := v.Args[1]
 14130  		v.reset(OpAMD64SARL)
 14131  		v.Type = t
 14132  		v.AddArg(x)
 14133  		v0 := b.NewValue0(v.Line, OpAMD64ORL, y.Type)
 14134  		v0.AddArg(y)
 14135  		v1 := b.NewValue0(v.Line, OpAMD64NOTL, y.Type)
 14136  		v2 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, y.Type)
 14137  		v3 := b.NewValue0(v.Line, OpAMD64CMPWconst, TypeFlags)
 14138  		v3.AddArg(y)
 14139  		v3.AuxInt = 32
 14140  		v2.AddArg(v3)
 14141  		v1.AddArg(v2)
 14142  		v0.AddArg(v1)
 14143  		v.AddArg(v0)
 14144  		return true
 14145  	}
 14146  }
 14147  func rewriteValueAMD64_OpRsh32x32(v *Value, config *Config) bool {
 14148  	b := v.Block
 14149  	_ = b
 14150  	// match: (Rsh32x32 <t> x y)
 14151  	// cond:
 14152  	// result: (SARL <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPLconst y [32])))))
 14153  	for {
 14154  		t := v.Type
 14155  		x := v.Args[0]
 14156  		y := v.Args[1]
 14157  		v.reset(OpAMD64SARL)
 14158  		v.Type = t
 14159  		v.AddArg(x)
 14160  		v0 := b.NewValue0(v.Line, OpAMD64ORL, y.Type)
 14161  		v0.AddArg(y)
 14162  		v1 := b.NewValue0(v.Line, OpAMD64NOTL, y.Type)
 14163  		v2 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, y.Type)
 14164  		v3 := b.NewValue0(v.Line, OpAMD64CMPLconst, TypeFlags)
 14165  		v3.AddArg(y)
 14166  		v3.AuxInt = 32
 14167  		v2.AddArg(v3)
 14168  		v1.AddArg(v2)
 14169  		v0.AddArg(v1)
 14170  		v.AddArg(v0)
 14171  		return true
 14172  	}
 14173  }
 14174  func rewriteValueAMD64_OpRsh32x64(v *Value, config *Config) bool {
 14175  	b := v.Block
 14176  	_ = b
 14177  	// match: (Rsh32x64 <t> x y)
 14178  	// cond:
 14179  	// result: (SARL <t> x (ORQ <y.Type> y (NOTQ <y.Type> (SBBQcarrymask <y.Type> (CMPQconst y [32])))))
 14180  	for {
 14181  		t := v.Type
 14182  		x := v.Args[0]
 14183  		y := v.Args[1]
 14184  		v.reset(OpAMD64SARL)
 14185  		v.Type = t
 14186  		v.AddArg(x)
 14187  		v0 := b.NewValue0(v.Line, OpAMD64ORQ, y.Type)
 14188  		v0.AddArg(y)
 14189  		v1 := b.NewValue0(v.Line, OpAMD64NOTQ, y.Type)
 14190  		v2 := b.NewValue0(v.Line, OpAMD64SBBQcarrymask, y.Type)
 14191  		v3 := b.NewValue0(v.Line, OpAMD64CMPQconst, TypeFlags)
 14192  		v3.AddArg(y)
 14193  		v3.AuxInt = 32
 14194  		v2.AddArg(v3)
 14195  		v1.AddArg(v2)
 14196  		v0.AddArg(v1)
 14197  		v.AddArg(v0)
 14198  		return true
 14199  	}
 14200  }
 14201  func rewriteValueAMD64_OpRsh32x8(v *Value, config *Config) bool {
 14202  	b := v.Block
 14203  	_ = b
 14204  	// match: (Rsh32x8  <t> x y)
 14205  	// cond:
 14206  	// result: (SARL <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPBconst y [32])))))
 14207  	for {
 14208  		t := v.Type
 14209  		x := v.Args[0]
 14210  		y := v.Args[1]
 14211  		v.reset(OpAMD64SARL)
 14212  		v.Type = t
 14213  		v.AddArg(x)
 14214  		v0 := b.NewValue0(v.Line, OpAMD64ORL, y.Type)
 14215  		v0.AddArg(y)
 14216  		v1 := b.NewValue0(v.Line, OpAMD64NOTL, y.Type)
 14217  		v2 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, y.Type)
 14218  		v3 := b.NewValue0(v.Line, OpAMD64CMPBconst, TypeFlags)
 14219  		v3.AddArg(y)
 14220  		v3.AuxInt = 32
 14221  		v2.AddArg(v3)
 14222  		v1.AddArg(v2)
 14223  		v0.AddArg(v1)
 14224  		v.AddArg(v0)
 14225  		return true
 14226  	}
 14227  }
 14228  func rewriteValueAMD64_OpRsh64Ux16(v *Value, config *Config) bool {
 14229  	b := v.Block
 14230  	_ = b
 14231  	// match: (Rsh64Ux16 <t> x y)
 14232  	// cond:
 14233  	// result: (ANDQ (SHRQ <t> x y) (SBBQcarrymask <t> (CMPWconst y [64])))
 14234  	for {
 14235  		t := v.Type
 14236  		x := v.Args[0]
 14237  		y := v.Args[1]
 14238  		v.reset(OpAMD64ANDQ)
 14239  		v0 := b.NewValue0(v.Line, OpAMD64SHRQ, t)
 14240  		v0.AddArg(x)
 14241  		v0.AddArg(y)
 14242  		v.AddArg(v0)
 14243  		v1 := b.NewValue0(v.Line, OpAMD64SBBQcarrymask, t)
 14244  		v2 := b.NewValue0(v.Line, OpAMD64CMPWconst, TypeFlags)
 14245  		v2.AddArg(y)
 14246  		v2.AuxInt = 64
 14247  		v1.AddArg(v2)
 14248  		v.AddArg(v1)
 14249  		return true
 14250  	}
 14251  }
 14252  func rewriteValueAMD64_OpRsh64Ux32(v *Value, config *Config) bool {
 14253  	b := v.Block
 14254  	_ = b
 14255  	// match: (Rsh64Ux32 <t> x y)
 14256  	// cond:
 14257  	// result: (ANDQ (SHRQ <t> x y) (SBBQcarrymask <t> (CMPLconst y [64])))
 14258  	for {
 14259  		t := v.Type
 14260  		x := v.Args[0]
 14261  		y := v.Args[1]
 14262  		v.reset(OpAMD64ANDQ)
 14263  		v0 := b.NewValue0(v.Line, OpAMD64SHRQ, t)
 14264  		v0.AddArg(x)
 14265  		v0.AddArg(y)
 14266  		v.AddArg(v0)
 14267  		v1 := b.NewValue0(v.Line, OpAMD64SBBQcarrymask, t)
 14268  		v2 := b.NewValue0(v.Line, OpAMD64CMPLconst, TypeFlags)
 14269  		v2.AddArg(y)
 14270  		v2.AuxInt = 64
 14271  		v1.AddArg(v2)
 14272  		v.AddArg(v1)
 14273  		return true
 14274  	}
 14275  }
 14276  func rewriteValueAMD64_OpRsh64Ux64(v *Value, config *Config) bool {
 14277  	b := v.Block
 14278  	_ = b
 14279  	// match: (Rsh64Ux64 <t> x y)
 14280  	// cond:
 14281  	// result: (ANDQ (SHRQ <t> x y) (SBBQcarrymask <t> (CMPQconst y [64])))
 14282  	for {
 14283  		t := v.Type
 14284  		x := v.Args[0]
 14285  		y := v.Args[1]
 14286  		v.reset(OpAMD64ANDQ)
 14287  		v0 := b.NewValue0(v.Line, OpAMD64SHRQ, t)
 14288  		v0.AddArg(x)
 14289  		v0.AddArg(y)
 14290  		v.AddArg(v0)
 14291  		v1 := b.NewValue0(v.Line, OpAMD64SBBQcarrymask, t)
 14292  		v2 := b.NewValue0(v.Line, OpAMD64CMPQconst, TypeFlags)
 14293  		v2.AddArg(y)
 14294  		v2.AuxInt = 64
 14295  		v1.AddArg(v2)
 14296  		v.AddArg(v1)
 14297  		return true
 14298  	}
 14299  }
 14300  func rewriteValueAMD64_OpRsh64Ux8(v *Value, config *Config) bool {
 14301  	b := v.Block
 14302  	_ = b
 14303  	// match: (Rsh64Ux8  <t> x y)
 14304  	// cond:
 14305  	// result: (ANDQ (SHRQ <t> x y) (SBBQcarrymask <t> (CMPBconst y [64])))
 14306  	for {
 14307  		t := v.Type
 14308  		x := v.Args[0]
 14309  		y := v.Args[1]
 14310  		v.reset(OpAMD64ANDQ)
 14311  		v0 := b.NewValue0(v.Line, OpAMD64SHRQ, t)
 14312  		v0.AddArg(x)
 14313  		v0.AddArg(y)
 14314  		v.AddArg(v0)
 14315  		v1 := b.NewValue0(v.Line, OpAMD64SBBQcarrymask, t)
 14316  		v2 := b.NewValue0(v.Line, OpAMD64CMPBconst, TypeFlags)
 14317  		v2.AddArg(y)
 14318  		v2.AuxInt = 64
 14319  		v1.AddArg(v2)
 14320  		v.AddArg(v1)
 14321  		return true
 14322  	}
 14323  }
 14324  func rewriteValueAMD64_OpRsh64x16(v *Value, config *Config) bool {
 14325  	b := v.Block
 14326  	_ = b
 14327  	// match: (Rsh64x16 <t> x y)
 14328  	// cond:
 14329  	// result: (SARQ <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPWconst y [64])))))
 14330  	for {
 14331  		t := v.Type
 14332  		x := v.Args[0]
 14333  		y := v.Args[1]
 14334  		v.reset(OpAMD64SARQ)
 14335  		v.Type = t
 14336  		v.AddArg(x)
 14337  		v0 := b.NewValue0(v.Line, OpAMD64ORL, y.Type)
 14338  		v0.AddArg(y)
 14339  		v1 := b.NewValue0(v.Line, OpAMD64NOTL, y.Type)
 14340  		v2 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, y.Type)
 14341  		v3 := b.NewValue0(v.Line, OpAMD64CMPWconst, TypeFlags)
 14342  		v3.AddArg(y)
 14343  		v3.AuxInt = 64
 14344  		v2.AddArg(v3)
 14345  		v1.AddArg(v2)
 14346  		v0.AddArg(v1)
 14347  		v.AddArg(v0)
 14348  		return true
 14349  	}
 14350  }
 14351  func rewriteValueAMD64_OpRsh64x32(v *Value, config *Config) bool {
 14352  	b := v.Block
 14353  	_ = b
 14354  	// match: (Rsh64x32 <t> x y)
 14355  	// cond:
 14356  	// result: (SARQ <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPLconst y [64])))))
 14357  	for {
 14358  		t := v.Type
 14359  		x := v.Args[0]
 14360  		y := v.Args[1]
 14361  		v.reset(OpAMD64SARQ)
 14362  		v.Type = t
 14363  		v.AddArg(x)
 14364  		v0 := b.NewValue0(v.Line, OpAMD64ORL, y.Type)
 14365  		v0.AddArg(y)
 14366  		v1 := b.NewValue0(v.Line, OpAMD64NOTL, y.Type)
 14367  		v2 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, y.Type)
 14368  		v3 := b.NewValue0(v.Line, OpAMD64CMPLconst, TypeFlags)
 14369  		v3.AddArg(y)
 14370  		v3.AuxInt = 64
 14371  		v2.AddArg(v3)
 14372  		v1.AddArg(v2)
 14373  		v0.AddArg(v1)
 14374  		v.AddArg(v0)
 14375  		return true
 14376  	}
 14377  }
 14378  func rewriteValueAMD64_OpRsh64x64(v *Value, config *Config) bool {
 14379  	b := v.Block
 14380  	_ = b
 14381  	// match: (Rsh64x64 <t> x y)
 14382  	// cond:
 14383  	// result: (SARQ <t> x (ORQ <y.Type> y (NOTQ <y.Type> (SBBQcarrymask <y.Type> (CMPQconst y [64])))))
 14384  	for {
 14385  		t := v.Type
 14386  		x := v.Args[0]
 14387  		y := v.Args[1]
 14388  		v.reset(OpAMD64SARQ)
 14389  		v.Type = t
 14390  		v.AddArg(x)
 14391  		v0 := b.NewValue0(v.Line, OpAMD64ORQ, y.Type)
 14392  		v0.AddArg(y)
 14393  		v1 := b.NewValue0(v.Line, OpAMD64NOTQ, y.Type)
 14394  		v2 := b.NewValue0(v.Line, OpAMD64SBBQcarrymask, y.Type)
 14395  		v3 := b.NewValue0(v.Line, OpAMD64CMPQconst, TypeFlags)
 14396  		v3.AddArg(y)
 14397  		v3.AuxInt = 64
 14398  		v2.AddArg(v3)
 14399  		v1.AddArg(v2)
 14400  		v0.AddArg(v1)
 14401  		v.AddArg(v0)
 14402  		return true
 14403  	}
 14404  }
 14405  func rewriteValueAMD64_OpRsh64x8(v *Value, config *Config) bool {
 14406  	b := v.Block
 14407  	_ = b
 14408  	// match: (Rsh64x8  <t> x y)
 14409  	// cond:
 14410  	// result: (SARQ <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPBconst y [64])))))
 14411  	for {
 14412  		t := v.Type
 14413  		x := v.Args[0]
 14414  		y := v.Args[1]
 14415  		v.reset(OpAMD64SARQ)
 14416  		v.Type = t
 14417  		v.AddArg(x)
 14418  		v0 := b.NewValue0(v.Line, OpAMD64ORL, y.Type)
 14419  		v0.AddArg(y)
 14420  		v1 := b.NewValue0(v.Line, OpAMD64NOTL, y.Type)
 14421  		v2 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, y.Type)
 14422  		v3 := b.NewValue0(v.Line, OpAMD64CMPBconst, TypeFlags)
 14423  		v3.AddArg(y)
 14424  		v3.AuxInt = 64
 14425  		v2.AddArg(v3)
 14426  		v1.AddArg(v2)
 14427  		v0.AddArg(v1)
 14428  		v.AddArg(v0)
 14429  		return true
 14430  	}
 14431  }
 14432  func rewriteValueAMD64_OpRsh8Ux16(v *Value, config *Config) bool {
 14433  	b := v.Block
 14434  	_ = b
 14435  	// match: (Rsh8Ux16 <t> x y)
 14436  	// cond:
 14437  	// result: (ANDL (SHRB <t> x y) (SBBLcarrymask <t> (CMPWconst y [8])))
 14438  	for {
 14439  		t := v.Type
 14440  		x := v.Args[0]
 14441  		y := v.Args[1]
 14442  		v.reset(OpAMD64ANDL)
 14443  		v0 := b.NewValue0(v.Line, OpAMD64SHRB, t)
 14444  		v0.AddArg(x)
 14445  		v0.AddArg(y)
 14446  		v.AddArg(v0)
 14447  		v1 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, t)
 14448  		v2 := b.NewValue0(v.Line, OpAMD64CMPWconst, TypeFlags)
 14449  		v2.AddArg(y)
 14450  		v2.AuxInt = 8
 14451  		v1.AddArg(v2)
 14452  		v.AddArg(v1)
 14453  		return true
 14454  	}
 14455  }
 14456  func rewriteValueAMD64_OpRsh8Ux32(v *Value, config *Config) bool {
 14457  	b := v.Block
 14458  	_ = b
 14459  	// match: (Rsh8Ux32 <t> x y)
 14460  	// cond:
 14461  	// result: (ANDL (SHRB <t> x y) (SBBLcarrymask <t> (CMPLconst y [8])))
 14462  	for {
 14463  		t := v.Type
 14464  		x := v.Args[0]
 14465  		y := v.Args[1]
 14466  		v.reset(OpAMD64ANDL)
 14467  		v0 := b.NewValue0(v.Line, OpAMD64SHRB, t)
 14468  		v0.AddArg(x)
 14469  		v0.AddArg(y)
 14470  		v.AddArg(v0)
 14471  		v1 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, t)
 14472  		v2 := b.NewValue0(v.Line, OpAMD64CMPLconst, TypeFlags)
 14473  		v2.AddArg(y)
 14474  		v2.AuxInt = 8
 14475  		v1.AddArg(v2)
 14476  		v.AddArg(v1)
 14477  		return true
 14478  	}
 14479  }
 14480  func rewriteValueAMD64_OpRsh8Ux64(v *Value, config *Config) bool {
 14481  	b := v.Block
 14482  	_ = b
 14483  	// match: (Rsh8Ux64 <t> x y)
 14484  	// cond:
 14485  	// result: (ANDL (SHRB <t> x y) (SBBLcarrymask <t> (CMPQconst y [8])))
 14486  	for {
 14487  		t := v.Type
 14488  		x := v.Args[0]
 14489  		y := v.Args[1]
 14490  		v.reset(OpAMD64ANDL)
 14491  		v0 := b.NewValue0(v.Line, OpAMD64SHRB, t)
 14492  		v0.AddArg(x)
 14493  		v0.AddArg(y)
 14494  		v.AddArg(v0)
 14495  		v1 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, t)
 14496  		v2 := b.NewValue0(v.Line, OpAMD64CMPQconst, TypeFlags)
 14497  		v2.AddArg(y)
 14498  		v2.AuxInt = 8
 14499  		v1.AddArg(v2)
 14500  		v.AddArg(v1)
 14501  		return true
 14502  	}
 14503  }
 14504  func rewriteValueAMD64_OpRsh8Ux8(v *Value, config *Config) bool {
 14505  	b := v.Block
 14506  	_ = b
 14507  	// match: (Rsh8Ux8  <t> x y)
 14508  	// cond:
 14509  	// result: (ANDL (SHRB <t> x y) (SBBLcarrymask <t> (CMPBconst y [8])))
 14510  	for {
 14511  		t := v.Type
 14512  		x := v.Args[0]
 14513  		y := v.Args[1]
 14514  		v.reset(OpAMD64ANDL)
 14515  		v0 := b.NewValue0(v.Line, OpAMD64SHRB, t)
 14516  		v0.AddArg(x)
 14517  		v0.AddArg(y)
 14518  		v.AddArg(v0)
 14519  		v1 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, t)
 14520  		v2 := b.NewValue0(v.Line, OpAMD64CMPBconst, TypeFlags)
 14521  		v2.AddArg(y)
 14522  		v2.AuxInt = 8
 14523  		v1.AddArg(v2)
 14524  		v.AddArg(v1)
 14525  		return true
 14526  	}
 14527  }
 14528  func rewriteValueAMD64_OpRsh8x16(v *Value, config *Config) bool {
 14529  	b := v.Block
 14530  	_ = b
 14531  	// match: (Rsh8x16 <t> x y)
 14532  	// cond:
 14533  	// result: (SARB <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPWconst y [8])))))
 14534  	for {
 14535  		t := v.Type
 14536  		x := v.Args[0]
 14537  		y := v.Args[1]
 14538  		v.reset(OpAMD64SARB)
 14539  		v.Type = t
 14540  		v.AddArg(x)
 14541  		v0 := b.NewValue0(v.Line, OpAMD64ORL, y.Type)
 14542  		v0.AddArg(y)
 14543  		v1 := b.NewValue0(v.Line, OpAMD64NOTL, y.Type)
 14544  		v2 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, y.Type)
 14545  		v3 := b.NewValue0(v.Line, OpAMD64CMPWconst, TypeFlags)
 14546  		v3.AddArg(y)
 14547  		v3.AuxInt = 8
 14548  		v2.AddArg(v3)
 14549  		v1.AddArg(v2)
 14550  		v0.AddArg(v1)
 14551  		v.AddArg(v0)
 14552  		return true
 14553  	}
 14554  }
 14555  func rewriteValueAMD64_OpRsh8x32(v *Value, config *Config) bool {
 14556  	b := v.Block
 14557  	_ = b
 14558  	// match: (Rsh8x32 <t> x y)
 14559  	// cond:
 14560  	// result: (SARB <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPLconst y [8])))))
 14561  	for {
 14562  		t := v.Type
 14563  		x := v.Args[0]
 14564  		y := v.Args[1]
 14565  		v.reset(OpAMD64SARB)
 14566  		v.Type = t
 14567  		v.AddArg(x)
 14568  		v0 := b.NewValue0(v.Line, OpAMD64ORL, y.Type)
 14569  		v0.AddArg(y)
 14570  		v1 := b.NewValue0(v.Line, OpAMD64NOTL, y.Type)
 14571  		v2 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, y.Type)
 14572  		v3 := b.NewValue0(v.Line, OpAMD64CMPLconst, TypeFlags)
 14573  		v3.AddArg(y)
 14574  		v3.AuxInt = 8
 14575  		v2.AddArg(v3)
 14576  		v1.AddArg(v2)
 14577  		v0.AddArg(v1)
 14578  		v.AddArg(v0)
 14579  		return true
 14580  	}
 14581  }
 14582  func rewriteValueAMD64_OpRsh8x64(v *Value, config *Config) bool {
 14583  	b := v.Block
 14584  	_ = b
 14585  	// match: (Rsh8x64 <t> x y)
 14586  	// cond:
 14587  	// result: (SARB <t> x (ORQ <y.Type> y (NOTQ <y.Type> (SBBQcarrymask <y.Type> (CMPQconst y [8])))))
 14588  	for {
 14589  		t := v.Type
 14590  		x := v.Args[0]
 14591  		y := v.Args[1]
 14592  		v.reset(OpAMD64SARB)
 14593  		v.Type = t
 14594  		v.AddArg(x)
 14595  		v0 := b.NewValue0(v.Line, OpAMD64ORQ, y.Type)
 14596  		v0.AddArg(y)
 14597  		v1 := b.NewValue0(v.Line, OpAMD64NOTQ, y.Type)
 14598  		v2 := b.NewValue0(v.Line, OpAMD64SBBQcarrymask, y.Type)
 14599  		v3 := b.NewValue0(v.Line, OpAMD64CMPQconst, TypeFlags)
 14600  		v3.AddArg(y)
 14601  		v3.AuxInt = 8
 14602  		v2.AddArg(v3)
 14603  		v1.AddArg(v2)
 14604  		v0.AddArg(v1)
 14605  		v.AddArg(v0)
 14606  		return true
 14607  	}
 14608  }
 14609  func rewriteValueAMD64_OpRsh8x8(v *Value, config *Config) bool {
 14610  	b := v.Block
 14611  	_ = b
 14612  	// match: (Rsh8x8  <t> x y)
 14613  	// cond:
 14614  	// result: (SARB <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPBconst y [8])))))
 14615  	for {
 14616  		t := v.Type
 14617  		x := v.Args[0]
 14618  		y := v.Args[1]
 14619  		v.reset(OpAMD64SARB)
 14620  		v.Type = t
 14621  		v.AddArg(x)
 14622  		v0 := b.NewValue0(v.Line, OpAMD64ORL, y.Type)
 14623  		v0.AddArg(y)
 14624  		v1 := b.NewValue0(v.Line, OpAMD64NOTL, y.Type)
 14625  		v2 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, y.Type)
 14626  		v3 := b.NewValue0(v.Line, OpAMD64CMPBconst, TypeFlags)
 14627  		v3.AddArg(y)
 14628  		v3.AuxInt = 8
 14629  		v2.AddArg(v3)
 14630  		v1.AddArg(v2)
 14631  		v0.AddArg(v1)
 14632  		v.AddArg(v0)
 14633  		return true
 14634  	}
 14635  }
 14636  func rewriteValueAMD64_OpAMD64SARB(v *Value, config *Config) bool {
 14637  	b := v.Block
 14638  	_ = b
 14639  	// match: (SARB x (MOVQconst [c]))
 14640  	// cond:
 14641  	// result: (SARBconst [c&31] x)
 14642  	for {
 14643  		x := v.Args[0]
 14644  		v_1 := v.Args[1]
 14645  		if v_1.Op != OpAMD64MOVQconst {
 14646  			break
 14647  		}
 14648  		c := v_1.AuxInt
 14649  		v.reset(OpAMD64SARBconst)
 14650  		v.AuxInt = c & 31
 14651  		v.AddArg(x)
 14652  		return true
 14653  	}
 14654  	// match: (SARB x (MOVLconst [c]))
 14655  	// cond:
 14656  	// result: (SARBconst [c&31] x)
 14657  	for {
 14658  		x := v.Args[0]
 14659  		v_1 := v.Args[1]
 14660  		if v_1.Op != OpAMD64MOVLconst {
 14661  			break
 14662  		}
 14663  		c := v_1.AuxInt
 14664  		v.reset(OpAMD64SARBconst)
 14665  		v.AuxInt = c & 31
 14666  		v.AddArg(x)
 14667  		return true
 14668  	}
 14669  	return false
 14670  }
 14671  func rewriteValueAMD64_OpAMD64SARBconst(v *Value, config *Config) bool {
 14672  	b := v.Block
 14673  	_ = b
 14674  	// match: (SARBconst [c] (MOVQconst [d]))
 14675  	// cond:
 14676  	// result: (MOVQconst [d>>uint64(c)])
 14677  	for {
 14678  		c := v.AuxInt
 14679  		v_0 := v.Args[0]
 14680  		if v_0.Op != OpAMD64MOVQconst {
 14681  			break
 14682  		}
 14683  		d := v_0.AuxInt
 14684  		v.reset(OpAMD64MOVQconst)
 14685  		v.AuxInt = d >> uint64(c)
 14686  		return true
 14687  	}
 14688  	return false
 14689  }
 14690  func rewriteValueAMD64_OpAMD64SARL(v *Value, config *Config) bool {
 14691  	b := v.Block
 14692  	_ = b
 14693  	// match: (SARL x (MOVQconst [c]))
 14694  	// cond:
 14695  	// result: (SARLconst [c&31] x)
 14696  	for {
 14697  		x := v.Args[0]
 14698  		v_1 := v.Args[1]
 14699  		if v_1.Op != OpAMD64MOVQconst {
 14700  			break
 14701  		}
 14702  		c := v_1.AuxInt
 14703  		v.reset(OpAMD64SARLconst)
 14704  		v.AuxInt = c & 31
 14705  		v.AddArg(x)
 14706  		return true
 14707  	}
 14708  	// match: (SARL x (MOVLconst [c]))
 14709  	// cond:
 14710  	// result: (SARLconst [c&31] x)
 14711  	for {
 14712  		x := v.Args[0]
 14713  		v_1 := v.Args[1]
 14714  		if v_1.Op != OpAMD64MOVLconst {
 14715  			break
 14716  		}
 14717  		c := v_1.AuxInt
 14718  		v.reset(OpAMD64SARLconst)
 14719  		v.AuxInt = c & 31
 14720  		v.AddArg(x)
 14721  		return true
 14722  	}
 14723  	// match: (SARL x (ANDLconst [31] y))
 14724  	// cond:
 14725  	// result: (SARL x y)
 14726  	for {
 14727  		x := v.Args[0]
 14728  		v_1 := v.Args[1]
 14729  		if v_1.Op != OpAMD64ANDLconst {
 14730  			break
 14731  		}
 14732  		if v_1.AuxInt != 31 {
 14733  			break
 14734  		}
 14735  		y := v_1.Args[0]
 14736  		v.reset(OpAMD64SARL)
 14737  		v.AddArg(x)
 14738  		v.AddArg(y)
 14739  		return true
 14740  	}
 14741  	return false
 14742  }
 14743  func rewriteValueAMD64_OpAMD64SARLconst(v *Value, config *Config) bool {
 14744  	b := v.Block
 14745  	_ = b
 14746  	// match: (SARLconst [c] (MOVQconst [d]))
 14747  	// cond:
 14748  	// result: (MOVQconst [d>>uint64(c)])
 14749  	for {
 14750  		c := v.AuxInt
 14751  		v_0 := v.Args[0]
 14752  		if v_0.Op != OpAMD64MOVQconst {
 14753  			break
 14754  		}
 14755  		d := v_0.AuxInt
 14756  		v.reset(OpAMD64MOVQconst)
 14757  		v.AuxInt = d >> uint64(c)
 14758  		return true
 14759  	}
 14760  	return false
 14761  }
 14762  func rewriteValueAMD64_OpAMD64SARQ(v *Value, config *Config) bool {
 14763  	b := v.Block
 14764  	_ = b
 14765  	// match: (SARQ x (MOVQconst [c]))
 14766  	// cond:
 14767  	// result: (SARQconst [c&63] x)
 14768  	for {
 14769  		x := v.Args[0]
 14770  		v_1 := v.Args[1]
 14771  		if v_1.Op != OpAMD64MOVQconst {
 14772  			break
 14773  		}
 14774  		c := v_1.AuxInt
 14775  		v.reset(OpAMD64SARQconst)
 14776  		v.AuxInt = c & 63
 14777  		v.AddArg(x)
 14778  		return true
 14779  	}
 14780  	// match: (SARQ x (MOVLconst [c]))
 14781  	// cond:
 14782  	// result: (SARQconst [c&63] x)
 14783  	for {
 14784  		x := v.Args[0]
 14785  		v_1 := v.Args[1]
 14786  		if v_1.Op != OpAMD64MOVLconst {
 14787  			break
 14788  		}
 14789  		c := v_1.AuxInt
 14790  		v.reset(OpAMD64SARQconst)
 14791  		v.AuxInt = c & 63
 14792  		v.AddArg(x)
 14793  		return true
 14794  	}
 14795  	// match: (SARQ x (ANDQconst [63] y))
 14796  	// cond:
 14797  	// result: (SARQ x y)
 14798  	for {
 14799  		x := v.Args[0]
 14800  		v_1 := v.Args[1]
 14801  		if v_1.Op != OpAMD64ANDQconst {
 14802  			break
 14803  		}
 14804  		if v_1.AuxInt != 63 {
 14805  			break
 14806  		}
 14807  		y := v_1.Args[0]
 14808  		v.reset(OpAMD64SARQ)
 14809  		v.AddArg(x)
 14810  		v.AddArg(y)
 14811  		return true
 14812  	}
 14813  	return false
 14814  }
 14815  func rewriteValueAMD64_OpAMD64SARQconst(v *Value, config *Config) bool {
 14816  	b := v.Block
 14817  	_ = b
 14818  	// match: (SARQconst [c] (MOVQconst [d]))
 14819  	// cond:
 14820  	// result: (MOVQconst [d>>uint64(c)])
 14821  	for {
 14822  		c := v.AuxInt
 14823  		v_0 := v.Args[0]
 14824  		if v_0.Op != OpAMD64MOVQconst {
 14825  			break
 14826  		}
 14827  		d := v_0.AuxInt
 14828  		v.reset(OpAMD64MOVQconst)
 14829  		v.AuxInt = d >> uint64(c)
 14830  		return true
 14831  	}
 14832  	return false
 14833  }
 14834  func rewriteValueAMD64_OpAMD64SARW(v *Value, config *Config) bool {
 14835  	b := v.Block
 14836  	_ = b
 14837  	// match: (SARW x (MOVQconst [c]))
 14838  	// cond:
 14839  	// result: (SARWconst [c&31] x)
 14840  	for {
 14841  		x := v.Args[0]
 14842  		v_1 := v.Args[1]
 14843  		if v_1.Op != OpAMD64MOVQconst {
 14844  			break
 14845  		}
 14846  		c := v_1.AuxInt
 14847  		v.reset(OpAMD64SARWconst)
 14848  		v.AuxInt = c & 31
 14849  		v.AddArg(x)
 14850  		return true
 14851  	}
 14852  	// match: (SARW x (MOVLconst [c]))
 14853  	// cond:
 14854  	// result: (SARWconst [c&31] x)
 14855  	for {
 14856  		x := v.Args[0]
 14857  		v_1 := v.Args[1]
 14858  		if v_1.Op != OpAMD64MOVLconst {
 14859  			break
 14860  		}
 14861  		c := v_1.AuxInt
 14862  		v.reset(OpAMD64SARWconst)
 14863  		v.AuxInt = c & 31
 14864  		v.AddArg(x)
 14865  		return true
 14866  	}
 14867  	return false
 14868  }
 14869  func rewriteValueAMD64_OpAMD64SARWconst(v *Value, config *Config) bool {
 14870  	b := v.Block
 14871  	_ = b
 14872  	// match: (SARWconst [c] (MOVQconst [d]))
 14873  	// cond:
 14874  	// result: (MOVQconst [d>>uint64(c)])
 14875  	for {
 14876  		c := v.AuxInt
 14877  		v_0 := v.Args[0]
 14878  		if v_0.Op != OpAMD64MOVQconst {
 14879  			break
 14880  		}
 14881  		d := v_0.AuxInt
 14882  		v.reset(OpAMD64MOVQconst)
 14883  		v.AuxInt = d >> uint64(c)
 14884  		return true
 14885  	}
 14886  	return false
 14887  }
 14888  func rewriteValueAMD64_OpAMD64SBBLcarrymask(v *Value, config *Config) bool {
 14889  	b := v.Block
 14890  	_ = b
 14891  	// match: (SBBLcarrymask (FlagEQ))
 14892  	// cond:
 14893  	// result: (MOVLconst [0])
 14894  	for {
 14895  		v_0 := v.Args[0]
 14896  		if v_0.Op != OpAMD64FlagEQ {
 14897  			break
 14898  		}
 14899  		v.reset(OpAMD64MOVLconst)
 14900  		v.AuxInt = 0
 14901  		return true
 14902  	}
 14903  	// match: (SBBLcarrymask (FlagLT_ULT))
 14904  	// cond:
 14905  	// result: (MOVLconst [-1])
 14906  	for {
 14907  		v_0 := v.Args[0]
 14908  		if v_0.Op != OpAMD64FlagLT_ULT {
 14909  			break
 14910  		}
 14911  		v.reset(OpAMD64MOVLconst)
 14912  		v.AuxInt = -1
 14913  		return true
 14914  	}
 14915  	// match: (SBBLcarrymask (FlagLT_UGT))
 14916  	// cond:
 14917  	// result: (MOVLconst [0])
 14918  	for {
 14919  		v_0 := v.Args[0]
 14920  		if v_0.Op != OpAMD64FlagLT_UGT {
 14921  			break
 14922  		}
 14923  		v.reset(OpAMD64MOVLconst)
 14924  		v.AuxInt = 0
 14925  		return true
 14926  	}
 14927  	// match: (SBBLcarrymask (FlagGT_ULT))
 14928  	// cond:
 14929  	// result: (MOVLconst [-1])
 14930  	for {
 14931  		v_0 := v.Args[0]
 14932  		if v_0.Op != OpAMD64FlagGT_ULT {
 14933  			break
 14934  		}
 14935  		v.reset(OpAMD64MOVLconst)
 14936  		v.AuxInt = -1
 14937  		return true
 14938  	}
 14939  	// match: (SBBLcarrymask (FlagGT_UGT))
 14940  	// cond:
 14941  	// result: (MOVLconst [0])
 14942  	for {
 14943  		v_0 := v.Args[0]
 14944  		if v_0.Op != OpAMD64FlagGT_UGT {
 14945  			break
 14946  		}
 14947  		v.reset(OpAMD64MOVLconst)
 14948  		v.AuxInt = 0
 14949  		return true
 14950  	}
 14951  	return false
 14952  }
 14953  func rewriteValueAMD64_OpAMD64SBBQcarrymask(v *Value, config *Config) bool {
 14954  	b := v.Block
 14955  	_ = b
 14956  	// match: (SBBQcarrymask (FlagEQ))
 14957  	// cond:
 14958  	// result: (MOVQconst [0])
 14959  	for {
 14960  		v_0 := v.Args[0]
 14961  		if v_0.Op != OpAMD64FlagEQ {
 14962  			break
 14963  		}
 14964  		v.reset(OpAMD64MOVQconst)
 14965  		v.AuxInt = 0
 14966  		return true
 14967  	}
 14968  	// match: (SBBQcarrymask (FlagLT_ULT))
 14969  	// cond:
 14970  	// result: (MOVQconst [-1])
 14971  	for {
 14972  		v_0 := v.Args[0]
 14973  		if v_0.Op != OpAMD64FlagLT_ULT {
 14974  			break
 14975  		}
 14976  		v.reset(OpAMD64MOVQconst)
 14977  		v.AuxInt = -1
 14978  		return true
 14979  	}
 14980  	// match: (SBBQcarrymask (FlagLT_UGT))
 14981  	// cond:
 14982  	// result: (MOVQconst [0])
 14983  	for {
 14984  		v_0 := v.Args[0]
 14985  		if v_0.Op != OpAMD64FlagLT_UGT {
 14986  			break
 14987  		}
 14988  		v.reset(OpAMD64MOVQconst)
 14989  		v.AuxInt = 0
 14990  		return true
 14991  	}
 14992  	// match: (SBBQcarrymask (FlagGT_ULT))
 14993  	// cond:
 14994  	// result: (MOVQconst [-1])
 14995  	for {
 14996  		v_0 := v.Args[0]
 14997  		if v_0.Op != OpAMD64FlagGT_ULT {
 14998  			break
 14999  		}
 15000  		v.reset(OpAMD64MOVQconst)
 15001  		v.AuxInt = -1
 15002  		return true
 15003  	}
 15004  	// match: (SBBQcarrymask (FlagGT_UGT))
 15005  	// cond:
 15006  	// result: (MOVQconst [0])
 15007  	for {
 15008  		v_0 := v.Args[0]
 15009  		if v_0.Op != OpAMD64FlagGT_UGT {
 15010  			break
 15011  		}
 15012  		v.reset(OpAMD64MOVQconst)
 15013  		v.AuxInt = 0
 15014  		return true
 15015  	}
 15016  	return false
 15017  }
 15018  func rewriteValueAMD64_OpAMD64SETA(v *Value, config *Config) bool {
 15019  	b := v.Block
 15020  	_ = b
 15021  	// match: (SETA (InvertFlags x))
 15022  	// cond:
 15023  	// result: (SETB x)
 15024  	for {
 15025  		v_0 := v.Args[0]
 15026  		if v_0.Op != OpAMD64InvertFlags {
 15027  			break
 15028  		}
 15029  		x := v_0.Args[0]
 15030  		v.reset(OpAMD64SETB)
 15031  		v.AddArg(x)
 15032  		return true
 15033  	}
 15034  	// match: (SETA (FlagEQ))
 15035  	// cond:
 15036  	// result: (MOVLconst [0])
 15037  	for {
 15038  		v_0 := v.Args[0]
 15039  		if v_0.Op != OpAMD64FlagEQ {
 15040  			break
 15041  		}
 15042  		v.reset(OpAMD64MOVLconst)
 15043  		v.AuxInt = 0
 15044  		return true
 15045  	}
 15046  	// match: (SETA (FlagLT_ULT))
 15047  	// cond:
 15048  	// result: (MOVLconst [0])
 15049  	for {
 15050  		v_0 := v.Args[0]
 15051  		if v_0.Op != OpAMD64FlagLT_ULT {
 15052  			break
 15053  		}
 15054  		v.reset(OpAMD64MOVLconst)
 15055  		v.AuxInt = 0
 15056  		return true
 15057  	}
 15058  	// match: (SETA (FlagLT_UGT))
 15059  	// cond:
 15060  	// result: (MOVLconst [1])
 15061  	for {
 15062  		v_0 := v.Args[0]
 15063  		if v_0.Op != OpAMD64FlagLT_UGT {
 15064  			break
 15065  		}
 15066  		v.reset(OpAMD64MOVLconst)
 15067  		v.AuxInt = 1
 15068  		return true
 15069  	}
 15070  	// match: (SETA (FlagGT_ULT))
 15071  	// cond:
 15072  	// result: (MOVLconst [0])
 15073  	for {
 15074  		v_0 := v.Args[0]
 15075  		if v_0.Op != OpAMD64FlagGT_ULT {
 15076  			break
 15077  		}
 15078  		v.reset(OpAMD64MOVLconst)
 15079  		v.AuxInt = 0
 15080  		return true
 15081  	}
 15082  	// match: (SETA (FlagGT_UGT))
 15083  	// cond:
 15084  	// result: (MOVLconst [1])
 15085  	for {
 15086  		v_0 := v.Args[0]
 15087  		if v_0.Op != OpAMD64FlagGT_UGT {
 15088  			break
 15089  		}
 15090  		v.reset(OpAMD64MOVLconst)
 15091  		v.AuxInt = 1
 15092  		return true
 15093  	}
 15094  	return false
 15095  }
 15096  func rewriteValueAMD64_OpAMD64SETAE(v *Value, config *Config) bool {
 15097  	b := v.Block
 15098  	_ = b
 15099  	// match: (SETAE (InvertFlags x))
 15100  	// cond:
 15101  	// result: (SETBE x)
 15102  	for {
 15103  		v_0 := v.Args[0]
 15104  		if v_0.Op != OpAMD64InvertFlags {
 15105  			break
 15106  		}
 15107  		x := v_0.Args[0]
 15108  		v.reset(OpAMD64SETBE)
 15109  		v.AddArg(x)
 15110  		return true
 15111  	}
 15112  	// match: (SETAE (FlagEQ))
 15113  	// cond:
 15114  	// result: (MOVLconst [1])
 15115  	for {
 15116  		v_0 := v.Args[0]
 15117  		if v_0.Op != OpAMD64FlagEQ {
 15118  			break
 15119  		}
 15120  		v.reset(OpAMD64MOVLconst)
 15121  		v.AuxInt = 1
 15122  		return true
 15123  	}
 15124  	// match: (SETAE (FlagLT_ULT))
 15125  	// cond:
 15126  	// result: (MOVLconst [0])
 15127  	for {
 15128  		v_0 := v.Args[0]
 15129  		if v_0.Op != OpAMD64FlagLT_ULT {
 15130  			break
 15131  		}
 15132  		v.reset(OpAMD64MOVLconst)
 15133  		v.AuxInt = 0
 15134  		return true
 15135  	}
 15136  	// match: (SETAE (FlagLT_UGT))
 15137  	// cond:
 15138  	// result: (MOVLconst [1])
 15139  	for {
 15140  		v_0 := v.Args[0]
 15141  		if v_0.Op != OpAMD64FlagLT_UGT {
 15142  			break
 15143  		}
 15144  		v.reset(OpAMD64MOVLconst)
 15145  		v.AuxInt = 1
 15146  		return true
 15147  	}
 15148  	// match: (SETAE (FlagGT_ULT))
 15149  	// cond:
 15150  	// result: (MOVLconst [0])
 15151  	for {
 15152  		v_0 := v.Args[0]
 15153  		if v_0.Op != OpAMD64FlagGT_ULT {
 15154  			break
 15155  		}
 15156  		v.reset(OpAMD64MOVLconst)
 15157  		v.AuxInt = 0
 15158  		return true
 15159  	}
 15160  	// match: (SETAE (FlagGT_UGT))
 15161  	// cond:
 15162  	// result: (MOVLconst [1])
 15163  	for {
 15164  		v_0 := v.Args[0]
 15165  		if v_0.Op != OpAMD64FlagGT_UGT {
 15166  			break
 15167  		}
 15168  		v.reset(OpAMD64MOVLconst)
 15169  		v.AuxInt = 1
 15170  		return true
 15171  	}
 15172  	return false
 15173  }
 15174  func rewriteValueAMD64_OpAMD64SETB(v *Value, config *Config) bool {
 15175  	b := v.Block
 15176  	_ = b
 15177  	// match: (SETB (InvertFlags x))
 15178  	// cond:
 15179  	// result: (SETA x)
 15180  	for {
 15181  		v_0 := v.Args[0]
 15182  		if v_0.Op != OpAMD64InvertFlags {
 15183  			break
 15184  		}
 15185  		x := v_0.Args[0]
 15186  		v.reset(OpAMD64SETA)
 15187  		v.AddArg(x)
 15188  		return true
 15189  	}
 15190  	// match: (SETB (FlagEQ))
 15191  	// cond:
 15192  	// result: (MOVLconst [0])
 15193  	for {
 15194  		v_0 := v.Args[0]
 15195  		if v_0.Op != OpAMD64FlagEQ {
 15196  			break
 15197  		}
 15198  		v.reset(OpAMD64MOVLconst)
 15199  		v.AuxInt = 0
 15200  		return true
 15201  	}
 15202  	// match: (SETB (FlagLT_ULT))
 15203  	// cond:
 15204  	// result: (MOVLconst [1])
 15205  	for {
 15206  		v_0 := v.Args[0]
 15207  		if v_0.Op != OpAMD64FlagLT_ULT {
 15208  			break
 15209  		}
 15210  		v.reset(OpAMD64MOVLconst)
 15211  		v.AuxInt = 1
 15212  		return true
 15213  	}
 15214  	// match: (SETB (FlagLT_UGT))
 15215  	// cond:
 15216  	// result: (MOVLconst [0])
 15217  	for {
 15218  		v_0 := v.Args[0]
 15219  		if v_0.Op != OpAMD64FlagLT_UGT {
 15220  			break
 15221  		}
 15222  		v.reset(OpAMD64MOVLconst)
 15223  		v.AuxInt = 0
 15224  		return true
 15225  	}
 15226  	// match: (SETB (FlagGT_ULT))
 15227  	// cond:
 15228  	// result: (MOVLconst [1])
 15229  	for {
 15230  		v_0 := v.Args[0]
 15231  		if v_0.Op != OpAMD64FlagGT_ULT {
 15232  			break
 15233  		}
 15234  		v.reset(OpAMD64MOVLconst)
 15235  		v.AuxInt = 1
 15236  		return true
 15237  	}
 15238  	// match: (SETB (FlagGT_UGT))
 15239  	// cond:
 15240  	// result: (MOVLconst [0])
 15241  	for {
 15242  		v_0 := v.Args[0]
 15243  		if v_0.Op != OpAMD64FlagGT_UGT {
 15244  			break
 15245  		}
 15246  		v.reset(OpAMD64MOVLconst)
 15247  		v.AuxInt = 0
 15248  		return true
 15249  	}
 15250  	return false
 15251  }
 15252  func rewriteValueAMD64_OpAMD64SETBE(v *Value, config *Config) bool {
 15253  	b := v.Block
 15254  	_ = b
 15255  	// match: (SETBE (InvertFlags x))
 15256  	// cond:
 15257  	// result: (SETAE x)
 15258  	for {
 15259  		v_0 := v.Args[0]
 15260  		if v_0.Op != OpAMD64InvertFlags {
 15261  			break
 15262  		}
 15263  		x := v_0.Args[0]
 15264  		v.reset(OpAMD64SETAE)
 15265  		v.AddArg(x)
 15266  		return true
 15267  	}
 15268  	// match: (SETBE (FlagEQ))
 15269  	// cond:
 15270  	// result: (MOVLconst [1])
 15271  	for {
 15272  		v_0 := v.Args[0]
 15273  		if v_0.Op != OpAMD64FlagEQ {
 15274  			break
 15275  		}
 15276  		v.reset(OpAMD64MOVLconst)
 15277  		v.AuxInt = 1
 15278  		return true
 15279  	}
 15280  	// match: (SETBE (FlagLT_ULT))
 15281  	// cond:
 15282  	// result: (MOVLconst [1])
 15283  	for {
 15284  		v_0 := v.Args[0]
 15285  		if v_0.Op != OpAMD64FlagLT_ULT {
 15286  			break
 15287  		}
 15288  		v.reset(OpAMD64MOVLconst)
 15289  		v.AuxInt = 1
 15290  		return true
 15291  	}
 15292  	// match: (SETBE (FlagLT_UGT))
 15293  	// cond:
 15294  	// result: (MOVLconst [0])
 15295  	for {
 15296  		v_0 := v.Args[0]
 15297  		if v_0.Op != OpAMD64FlagLT_UGT {
 15298  			break
 15299  		}
 15300  		v.reset(OpAMD64MOVLconst)
 15301  		v.AuxInt = 0
 15302  		return true
 15303  	}
 15304  	// match: (SETBE (FlagGT_ULT))
 15305  	// cond:
 15306  	// result: (MOVLconst [1])
 15307  	for {
 15308  		v_0 := v.Args[0]
 15309  		if v_0.Op != OpAMD64FlagGT_ULT {
 15310  			break
 15311  		}
 15312  		v.reset(OpAMD64MOVLconst)
 15313  		v.AuxInt = 1
 15314  		return true
 15315  	}
 15316  	// match: (SETBE (FlagGT_UGT))
 15317  	// cond:
 15318  	// result: (MOVLconst [0])
 15319  	for {
 15320  		v_0 := v.Args[0]
 15321  		if v_0.Op != OpAMD64FlagGT_UGT {
 15322  			break
 15323  		}
 15324  		v.reset(OpAMD64MOVLconst)
 15325  		v.AuxInt = 0
 15326  		return true
 15327  	}
 15328  	return false
 15329  }
 15330  func rewriteValueAMD64_OpAMD64SETEQ(v *Value, config *Config) bool {
 15331  	b := v.Block
 15332  	_ = b
 15333  	// match: (SETEQ (InvertFlags x))
 15334  	// cond:
 15335  	// result: (SETEQ x)
 15336  	for {
 15337  		v_0 := v.Args[0]
 15338  		if v_0.Op != OpAMD64InvertFlags {
 15339  			break
 15340  		}
 15341  		x := v_0.Args[0]
 15342  		v.reset(OpAMD64SETEQ)
 15343  		v.AddArg(x)
 15344  		return true
 15345  	}
 15346  	// match: (SETEQ (FlagEQ))
 15347  	// cond:
 15348  	// result: (MOVLconst [1])
 15349  	for {
 15350  		v_0 := v.Args[0]
 15351  		if v_0.Op != OpAMD64FlagEQ {
 15352  			break
 15353  		}
 15354  		v.reset(OpAMD64MOVLconst)
 15355  		v.AuxInt = 1
 15356  		return true
 15357  	}
 15358  	// match: (SETEQ (FlagLT_ULT))
 15359  	// cond:
 15360  	// result: (MOVLconst [0])
 15361  	for {
 15362  		v_0 := v.Args[0]
 15363  		if v_0.Op != OpAMD64FlagLT_ULT {
 15364  			break
 15365  		}
 15366  		v.reset(OpAMD64MOVLconst)
 15367  		v.AuxInt = 0
 15368  		return true
 15369  	}
 15370  	// match: (SETEQ (FlagLT_UGT))
 15371  	// cond:
 15372  	// result: (MOVLconst [0])
 15373  	for {
 15374  		v_0 := v.Args[0]
 15375  		if v_0.Op != OpAMD64FlagLT_UGT {
 15376  			break
 15377  		}
 15378  		v.reset(OpAMD64MOVLconst)
 15379  		v.AuxInt = 0
 15380  		return true
 15381  	}
 15382  	// match: (SETEQ (FlagGT_ULT))
 15383  	// cond:
 15384  	// result: (MOVLconst [0])
 15385  	for {
 15386  		v_0 := v.Args[0]
 15387  		if v_0.Op != OpAMD64FlagGT_ULT {
 15388  			break
 15389  		}
 15390  		v.reset(OpAMD64MOVLconst)
 15391  		v.AuxInt = 0
 15392  		return true
 15393  	}
 15394  	// match: (SETEQ (FlagGT_UGT))
 15395  	// cond:
 15396  	// result: (MOVLconst [0])
 15397  	for {
 15398  		v_0 := v.Args[0]
 15399  		if v_0.Op != OpAMD64FlagGT_UGT {
 15400  			break
 15401  		}
 15402  		v.reset(OpAMD64MOVLconst)
 15403  		v.AuxInt = 0
 15404  		return true
 15405  	}
 15406  	return false
 15407  }
 15408  func rewriteValueAMD64_OpAMD64SETG(v *Value, config *Config) bool {
 15409  	b := v.Block
 15410  	_ = b
 15411  	// match: (SETG (InvertFlags x))
 15412  	// cond:
 15413  	// result: (SETL x)
 15414  	for {
 15415  		v_0 := v.Args[0]
 15416  		if v_0.Op != OpAMD64InvertFlags {
 15417  			break
 15418  		}
 15419  		x := v_0.Args[0]
 15420  		v.reset(OpAMD64SETL)
 15421  		v.AddArg(x)
 15422  		return true
 15423  	}
 15424  	// match: (SETG (FlagEQ))
 15425  	// cond:
 15426  	// result: (MOVLconst [0])
 15427  	for {
 15428  		v_0 := v.Args[0]
 15429  		if v_0.Op != OpAMD64FlagEQ {
 15430  			break
 15431  		}
 15432  		v.reset(OpAMD64MOVLconst)
 15433  		v.AuxInt = 0
 15434  		return true
 15435  	}
 15436  	// match: (SETG (FlagLT_ULT))
 15437  	// cond:
 15438  	// result: (MOVLconst [0])
 15439  	for {
 15440  		v_0 := v.Args[0]
 15441  		if v_0.Op != OpAMD64FlagLT_ULT {
 15442  			break
 15443  		}
 15444  		v.reset(OpAMD64MOVLconst)
 15445  		v.AuxInt = 0
 15446  		return true
 15447  	}
 15448  	// match: (SETG (FlagLT_UGT))
 15449  	// cond:
 15450  	// result: (MOVLconst [0])
 15451  	for {
 15452  		v_0 := v.Args[0]
 15453  		if v_0.Op != OpAMD64FlagLT_UGT {
 15454  			break
 15455  		}
 15456  		v.reset(OpAMD64MOVLconst)
 15457  		v.AuxInt = 0
 15458  		return true
 15459  	}
 15460  	// match: (SETG (FlagGT_ULT))
 15461  	// cond:
 15462  	// result: (MOVLconst [1])
 15463  	for {
 15464  		v_0 := v.Args[0]
 15465  		if v_0.Op != OpAMD64FlagGT_ULT {
 15466  			break
 15467  		}
 15468  		v.reset(OpAMD64MOVLconst)
 15469  		v.AuxInt = 1
 15470  		return true
 15471  	}
 15472  	// match: (SETG (FlagGT_UGT))
 15473  	// cond:
 15474  	// result: (MOVLconst [1])
 15475  	for {
 15476  		v_0 := v.Args[0]
 15477  		if v_0.Op != OpAMD64FlagGT_UGT {
 15478  			break
 15479  		}
 15480  		v.reset(OpAMD64MOVLconst)
 15481  		v.AuxInt = 1
 15482  		return true
 15483  	}
 15484  	return false
 15485  }
 15486  func rewriteValueAMD64_OpAMD64SETGE(v *Value, config *Config) bool {
 15487  	b := v.Block
 15488  	_ = b
 15489  	// match: (SETGE (InvertFlags x))
 15490  	// cond:
 15491  	// result: (SETLE x)
 15492  	for {
 15493  		v_0 := v.Args[0]
 15494  		if v_0.Op != OpAMD64InvertFlags {
 15495  			break
 15496  		}
 15497  		x := v_0.Args[0]
 15498  		v.reset(OpAMD64SETLE)
 15499  		v.AddArg(x)
 15500  		return true
 15501  	}
 15502  	// match: (SETGE (FlagEQ))
 15503  	// cond:
 15504  	// result: (MOVLconst [1])
 15505  	for {
 15506  		v_0 := v.Args[0]
 15507  		if v_0.Op != OpAMD64FlagEQ {
 15508  			break
 15509  		}
 15510  		v.reset(OpAMD64MOVLconst)
 15511  		v.AuxInt = 1
 15512  		return true
 15513  	}
 15514  	// match: (SETGE (FlagLT_ULT))
 15515  	// cond:
 15516  	// result: (MOVLconst [0])
 15517  	for {
 15518  		v_0 := v.Args[0]
 15519  		if v_0.Op != OpAMD64FlagLT_ULT {
 15520  			break
 15521  		}
 15522  		v.reset(OpAMD64MOVLconst)
 15523  		v.AuxInt = 0
 15524  		return true
 15525  	}
 15526  	// match: (SETGE (FlagLT_UGT))
 15527  	// cond:
 15528  	// result: (MOVLconst [0])
 15529  	for {
 15530  		v_0 := v.Args[0]
 15531  		if v_0.Op != OpAMD64FlagLT_UGT {
 15532  			break
 15533  		}
 15534  		v.reset(OpAMD64MOVLconst)
 15535  		v.AuxInt = 0
 15536  		return true
 15537  	}
 15538  	// match: (SETGE (FlagGT_ULT))
 15539  	// cond:
 15540  	// result: (MOVLconst [1])
 15541  	for {
 15542  		v_0 := v.Args[0]
 15543  		if v_0.Op != OpAMD64FlagGT_ULT {
 15544  			break
 15545  		}
 15546  		v.reset(OpAMD64MOVLconst)
 15547  		v.AuxInt = 1
 15548  		return true
 15549  	}
 15550  	// match: (SETGE (FlagGT_UGT))
 15551  	// cond:
 15552  	// result: (MOVLconst [1])
 15553  	for {
 15554  		v_0 := v.Args[0]
 15555  		if v_0.Op != OpAMD64FlagGT_UGT {
 15556  			break
 15557  		}
 15558  		v.reset(OpAMD64MOVLconst)
 15559  		v.AuxInt = 1
 15560  		return true
 15561  	}
 15562  	return false
 15563  }
 15564  func rewriteValueAMD64_OpAMD64SETL(v *Value, config *Config) bool {
 15565  	b := v.Block
 15566  	_ = b
 15567  	// match: (SETL (InvertFlags x))
 15568  	// cond:
 15569  	// result: (SETG x)
 15570  	for {
 15571  		v_0 := v.Args[0]
 15572  		if v_0.Op != OpAMD64InvertFlags {
 15573  			break
 15574  		}
 15575  		x := v_0.Args[0]
 15576  		v.reset(OpAMD64SETG)
 15577  		v.AddArg(x)
 15578  		return true
 15579  	}
 15580  	// match: (SETL (FlagEQ))
 15581  	// cond:
 15582  	// result: (MOVLconst [0])
 15583  	for {
 15584  		v_0 := v.Args[0]
 15585  		if v_0.Op != OpAMD64FlagEQ {
 15586  			break
 15587  		}
 15588  		v.reset(OpAMD64MOVLconst)
 15589  		v.AuxInt = 0
 15590  		return true
 15591  	}
 15592  	// match: (SETL (FlagLT_ULT))
 15593  	// cond:
 15594  	// result: (MOVLconst [1])
 15595  	for {
 15596  		v_0 := v.Args[0]
 15597  		if v_0.Op != OpAMD64FlagLT_ULT {
 15598  			break
 15599  		}
 15600  		v.reset(OpAMD64MOVLconst)
 15601  		v.AuxInt = 1
 15602  		return true
 15603  	}
 15604  	// match: (SETL (FlagLT_UGT))
 15605  	// cond:
 15606  	// result: (MOVLconst [1])
 15607  	for {
 15608  		v_0 := v.Args[0]
 15609  		if v_0.Op != OpAMD64FlagLT_UGT {
 15610  			break
 15611  		}
 15612  		v.reset(OpAMD64MOVLconst)
 15613  		v.AuxInt = 1
 15614  		return true
 15615  	}
 15616  	// match: (SETL (FlagGT_ULT))
 15617  	// cond:
 15618  	// result: (MOVLconst [0])
 15619  	for {
 15620  		v_0 := v.Args[0]
 15621  		if v_0.Op != OpAMD64FlagGT_ULT {
 15622  			break
 15623  		}
 15624  		v.reset(OpAMD64MOVLconst)
 15625  		v.AuxInt = 0
 15626  		return true
 15627  	}
 15628  	// match: (SETL (FlagGT_UGT))
 15629  	// cond:
 15630  	// result: (MOVLconst [0])
 15631  	for {
 15632  		v_0 := v.Args[0]
 15633  		if v_0.Op != OpAMD64FlagGT_UGT {
 15634  			break
 15635  		}
 15636  		v.reset(OpAMD64MOVLconst)
 15637  		v.AuxInt = 0
 15638  		return true
 15639  	}
 15640  	return false
 15641  }
 15642  func rewriteValueAMD64_OpAMD64SETLE(v *Value, config *Config) bool {
 15643  	b := v.Block
 15644  	_ = b
 15645  	// match: (SETLE (InvertFlags x))
 15646  	// cond:
 15647  	// result: (SETGE x)
 15648  	for {
 15649  		v_0 := v.Args[0]
 15650  		if v_0.Op != OpAMD64InvertFlags {
 15651  			break
 15652  		}
 15653  		x := v_0.Args[0]
 15654  		v.reset(OpAMD64SETGE)
 15655  		v.AddArg(x)
 15656  		return true
 15657  	}
 15658  	// match: (SETLE (FlagEQ))
 15659  	// cond:
 15660  	// result: (MOVLconst [1])
 15661  	for {
 15662  		v_0 := v.Args[0]
 15663  		if v_0.Op != OpAMD64FlagEQ {
 15664  			break
 15665  		}
 15666  		v.reset(OpAMD64MOVLconst)
 15667  		v.AuxInt = 1
 15668  		return true
 15669  	}
 15670  	// match: (SETLE (FlagLT_ULT))
 15671  	// cond:
 15672  	// result: (MOVLconst [1])
 15673  	for {
 15674  		v_0 := v.Args[0]
 15675  		if v_0.Op != OpAMD64FlagLT_ULT {
 15676  			break
 15677  		}
 15678  		v.reset(OpAMD64MOVLconst)
 15679  		v.AuxInt = 1
 15680  		return true
 15681  	}
 15682  	// match: (SETLE (FlagLT_UGT))
 15683  	// cond:
 15684  	// result: (MOVLconst [1])
 15685  	for {
 15686  		v_0 := v.Args[0]
 15687  		if v_0.Op != OpAMD64FlagLT_UGT {
 15688  			break
 15689  		}
 15690  		v.reset(OpAMD64MOVLconst)
 15691  		v.AuxInt = 1
 15692  		return true
 15693  	}
 15694  	// match: (SETLE (FlagGT_ULT))
 15695  	// cond:
 15696  	// result: (MOVLconst [0])
 15697  	for {
 15698  		v_0 := v.Args[0]
 15699  		if v_0.Op != OpAMD64FlagGT_ULT {
 15700  			break
 15701  		}
 15702  		v.reset(OpAMD64MOVLconst)
 15703  		v.AuxInt = 0
 15704  		return true
 15705  	}
 15706  	// match: (SETLE (FlagGT_UGT))
 15707  	// cond:
 15708  	// result: (MOVLconst [0])
 15709  	for {
 15710  		v_0 := v.Args[0]
 15711  		if v_0.Op != OpAMD64FlagGT_UGT {
 15712  			break
 15713  		}
 15714  		v.reset(OpAMD64MOVLconst)
 15715  		v.AuxInt = 0
 15716  		return true
 15717  	}
 15718  	return false
 15719  }
 15720  func rewriteValueAMD64_OpAMD64SETNE(v *Value, config *Config) bool {
 15721  	b := v.Block
 15722  	_ = b
 15723  	// match: (SETNE (InvertFlags x))
 15724  	// cond:
 15725  	// result: (SETNE x)
 15726  	for {
 15727  		v_0 := v.Args[0]
 15728  		if v_0.Op != OpAMD64InvertFlags {
 15729  			break
 15730  		}
 15731  		x := v_0.Args[0]
 15732  		v.reset(OpAMD64SETNE)
 15733  		v.AddArg(x)
 15734  		return true
 15735  	}
 15736  	// match: (SETNE (FlagEQ))
 15737  	// cond:
 15738  	// result: (MOVLconst [0])
 15739  	for {
 15740  		v_0 := v.Args[0]
 15741  		if v_0.Op != OpAMD64FlagEQ {
 15742  			break
 15743  		}
 15744  		v.reset(OpAMD64MOVLconst)
 15745  		v.AuxInt = 0
 15746  		return true
 15747  	}
 15748  	// match: (SETNE (FlagLT_ULT))
 15749  	// cond:
 15750  	// result: (MOVLconst [1])
 15751  	for {
 15752  		v_0 := v.Args[0]
 15753  		if v_0.Op != OpAMD64FlagLT_ULT {
 15754  			break
 15755  		}
 15756  		v.reset(OpAMD64MOVLconst)
 15757  		v.AuxInt = 1
 15758  		return true
 15759  	}
 15760  	// match: (SETNE (FlagLT_UGT))
 15761  	// cond:
 15762  	// result: (MOVLconst [1])
 15763  	for {
 15764  		v_0 := v.Args[0]
 15765  		if v_0.Op != OpAMD64FlagLT_UGT {
 15766  			break
 15767  		}
 15768  		v.reset(OpAMD64MOVLconst)
 15769  		v.AuxInt = 1
 15770  		return true
 15771  	}
 15772  	// match: (SETNE (FlagGT_ULT))
 15773  	// cond:
 15774  	// result: (MOVLconst [1])
 15775  	for {
 15776  		v_0 := v.Args[0]
 15777  		if v_0.Op != OpAMD64FlagGT_ULT {
 15778  			break
 15779  		}
 15780  		v.reset(OpAMD64MOVLconst)
 15781  		v.AuxInt = 1
 15782  		return true
 15783  	}
 15784  	// match: (SETNE (FlagGT_UGT))
 15785  	// cond:
 15786  	// result: (MOVLconst [1])
 15787  	for {
 15788  		v_0 := v.Args[0]
 15789  		if v_0.Op != OpAMD64FlagGT_UGT {
 15790  			break
 15791  		}
 15792  		v.reset(OpAMD64MOVLconst)
 15793  		v.AuxInt = 1
 15794  		return true
 15795  	}
 15796  	return false
 15797  }
 15798  func rewriteValueAMD64_OpAMD64SHLL(v *Value, config *Config) bool {
 15799  	b := v.Block
 15800  	_ = b
 15801  	// match: (SHLL x (MOVQconst [c]))
 15802  	// cond:
 15803  	// result: (SHLLconst [c&31] x)
 15804  	for {
 15805  		x := v.Args[0]
 15806  		v_1 := v.Args[1]
 15807  		if v_1.Op != OpAMD64MOVQconst {
 15808  			break
 15809  		}
 15810  		c := v_1.AuxInt
 15811  		v.reset(OpAMD64SHLLconst)
 15812  		v.AuxInt = c & 31
 15813  		v.AddArg(x)
 15814  		return true
 15815  	}
 15816  	// match: (SHLL x (MOVLconst [c]))
 15817  	// cond:
 15818  	// result: (SHLLconst [c&31] x)
 15819  	for {
 15820  		x := v.Args[0]
 15821  		v_1 := v.Args[1]
 15822  		if v_1.Op != OpAMD64MOVLconst {
 15823  			break
 15824  		}
 15825  		c := v_1.AuxInt
 15826  		v.reset(OpAMD64SHLLconst)
 15827  		v.AuxInt = c & 31
 15828  		v.AddArg(x)
 15829  		return true
 15830  	}
 15831  	// match: (SHLL x (ANDLconst [31] y))
 15832  	// cond:
 15833  	// result: (SHLL x y)
 15834  	for {
 15835  		x := v.Args[0]
 15836  		v_1 := v.Args[1]
 15837  		if v_1.Op != OpAMD64ANDLconst {
 15838  			break
 15839  		}
 15840  		if v_1.AuxInt != 31 {
 15841  			break
 15842  		}
 15843  		y := v_1.Args[0]
 15844  		v.reset(OpAMD64SHLL)
 15845  		v.AddArg(x)
 15846  		v.AddArg(y)
 15847  		return true
 15848  	}
 15849  	return false
 15850  }
 15851  func rewriteValueAMD64_OpAMD64SHLQ(v *Value, config *Config) bool {
 15852  	b := v.Block
 15853  	_ = b
 15854  	// match: (SHLQ x (MOVQconst [c]))
 15855  	// cond:
 15856  	// result: (SHLQconst [c&63] x)
 15857  	for {
 15858  		x := v.Args[0]
 15859  		v_1 := v.Args[1]
 15860  		if v_1.Op != OpAMD64MOVQconst {
 15861  			break
 15862  		}
 15863  		c := v_1.AuxInt
 15864  		v.reset(OpAMD64SHLQconst)
 15865  		v.AuxInt = c & 63
 15866  		v.AddArg(x)
 15867  		return true
 15868  	}
 15869  	// match: (SHLQ x (MOVLconst [c]))
 15870  	// cond:
 15871  	// result: (SHLQconst [c&63] x)
 15872  	for {
 15873  		x := v.Args[0]
 15874  		v_1 := v.Args[1]
 15875  		if v_1.Op != OpAMD64MOVLconst {
 15876  			break
 15877  		}
 15878  		c := v_1.AuxInt
 15879  		v.reset(OpAMD64SHLQconst)
 15880  		v.AuxInt = c & 63
 15881  		v.AddArg(x)
 15882  		return true
 15883  	}
 15884  	// match: (SHLQ x (ANDQconst [63] y))
 15885  	// cond:
 15886  	// result: (SHLQ x y)
 15887  	for {
 15888  		x := v.Args[0]
 15889  		v_1 := v.Args[1]
 15890  		if v_1.Op != OpAMD64ANDQconst {
 15891  			break
 15892  		}
 15893  		if v_1.AuxInt != 63 {
 15894  			break
 15895  		}
 15896  		y := v_1.Args[0]
 15897  		v.reset(OpAMD64SHLQ)
 15898  		v.AddArg(x)
 15899  		v.AddArg(y)
 15900  		return true
 15901  	}
 15902  	return false
 15903  }
 15904  func rewriteValueAMD64_OpAMD64SHRB(v *Value, config *Config) bool {
 15905  	b := v.Block
 15906  	_ = b
 15907  	// match: (SHRB x (MOVQconst [c]))
 15908  	// cond:
 15909  	// result: (SHRBconst [c&31] x)
 15910  	for {
 15911  		x := v.Args[0]
 15912  		v_1 := v.Args[1]
 15913  		if v_1.Op != OpAMD64MOVQconst {
 15914  			break
 15915  		}
 15916  		c := v_1.AuxInt
 15917  		v.reset(OpAMD64SHRBconst)
 15918  		v.AuxInt = c & 31
 15919  		v.AddArg(x)
 15920  		return true
 15921  	}
 15922  	// match: (SHRB x (MOVLconst [c]))
 15923  	// cond:
 15924  	// result: (SHRBconst [c&31] x)
 15925  	for {
 15926  		x := v.Args[0]
 15927  		v_1 := v.Args[1]
 15928  		if v_1.Op != OpAMD64MOVLconst {
 15929  			break
 15930  		}
 15931  		c := v_1.AuxInt
 15932  		v.reset(OpAMD64SHRBconst)
 15933  		v.AuxInt = c & 31
 15934  		v.AddArg(x)
 15935  		return true
 15936  	}
 15937  	return false
 15938  }
 15939  func rewriteValueAMD64_OpAMD64SHRL(v *Value, config *Config) bool {
 15940  	b := v.Block
 15941  	_ = b
 15942  	// match: (SHRL x (MOVQconst [c]))
 15943  	// cond:
 15944  	// result: (SHRLconst [c&31] x)
 15945  	for {
 15946  		x := v.Args[0]
 15947  		v_1 := v.Args[1]
 15948  		if v_1.Op != OpAMD64MOVQconst {
 15949  			break
 15950  		}
 15951  		c := v_1.AuxInt
 15952  		v.reset(OpAMD64SHRLconst)
 15953  		v.AuxInt = c & 31
 15954  		v.AddArg(x)
 15955  		return true
 15956  	}
 15957  	// match: (SHRL x (MOVLconst [c]))
 15958  	// cond:
 15959  	// result: (SHRLconst [c&31] x)
 15960  	for {
 15961  		x := v.Args[0]
 15962  		v_1 := v.Args[1]
 15963  		if v_1.Op != OpAMD64MOVLconst {
 15964  			break
 15965  		}
 15966  		c := v_1.AuxInt
 15967  		v.reset(OpAMD64SHRLconst)
 15968  		v.AuxInt = c & 31
 15969  		v.AddArg(x)
 15970  		return true
 15971  	}
 15972  	// match: (SHRL x (ANDLconst [31] y))
 15973  	// cond:
 15974  	// result: (SHRL x y)
 15975  	for {
 15976  		x := v.Args[0]
 15977  		v_1 := v.Args[1]
 15978  		if v_1.Op != OpAMD64ANDLconst {
 15979  			break
 15980  		}
 15981  		if v_1.AuxInt != 31 {
 15982  			break
 15983  		}
 15984  		y := v_1.Args[0]
 15985  		v.reset(OpAMD64SHRL)
 15986  		v.AddArg(x)
 15987  		v.AddArg(y)
 15988  		return true
 15989  	}
 15990  	return false
 15991  }
 15992  func rewriteValueAMD64_OpAMD64SHRQ(v *Value, config *Config) bool {
 15993  	b := v.Block
 15994  	_ = b
 15995  	// match: (SHRQ x (MOVQconst [c]))
 15996  	// cond:
 15997  	// result: (SHRQconst [c&63] x)
 15998  	for {
 15999  		x := v.Args[0]
 16000  		v_1 := v.Args[1]
 16001  		if v_1.Op != OpAMD64MOVQconst {
 16002  			break
 16003  		}
 16004  		c := v_1.AuxInt
 16005  		v.reset(OpAMD64SHRQconst)
 16006  		v.AuxInt = c & 63
 16007  		v.AddArg(x)
 16008  		return true
 16009  	}
 16010  	// match: (SHRQ x (MOVLconst [c]))
 16011  	// cond:
 16012  	// result: (SHRQconst [c&63] x)
 16013  	for {
 16014  		x := v.Args[0]
 16015  		v_1 := v.Args[1]
 16016  		if v_1.Op != OpAMD64MOVLconst {
 16017  			break
 16018  		}
 16019  		c := v_1.AuxInt
 16020  		v.reset(OpAMD64SHRQconst)
 16021  		v.AuxInt = c & 63
 16022  		v.AddArg(x)
 16023  		return true
 16024  	}
 16025  	// match: (SHRQ x (ANDQconst [63] y))
 16026  	// cond:
 16027  	// result: (SHRQ x y)
 16028  	for {
 16029  		x := v.Args[0]
 16030  		v_1 := v.Args[1]
 16031  		if v_1.Op != OpAMD64ANDQconst {
 16032  			break
 16033  		}
 16034  		if v_1.AuxInt != 63 {
 16035  			break
 16036  		}
 16037  		y := v_1.Args[0]
 16038  		v.reset(OpAMD64SHRQ)
 16039  		v.AddArg(x)
 16040  		v.AddArg(y)
 16041  		return true
 16042  	}
 16043  	return false
 16044  }
 16045  func rewriteValueAMD64_OpAMD64SHRW(v *Value, config *Config) bool {
 16046  	b := v.Block
 16047  	_ = b
 16048  	// match: (SHRW x (MOVQconst [c]))
 16049  	// cond:
 16050  	// result: (SHRWconst [c&31] x)
 16051  	for {
 16052  		x := v.Args[0]
 16053  		v_1 := v.Args[1]
 16054  		if v_1.Op != OpAMD64MOVQconst {
 16055  			break
 16056  		}
 16057  		c := v_1.AuxInt
 16058  		v.reset(OpAMD64SHRWconst)
 16059  		v.AuxInt = c & 31
 16060  		v.AddArg(x)
 16061  		return true
 16062  	}
 16063  	// match: (SHRW x (MOVLconst [c]))
 16064  	// cond:
 16065  	// result: (SHRWconst [c&31] x)
 16066  	for {
 16067  		x := v.Args[0]
 16068  		v_1 := v.Args[1]
 16069  		if v_1.Op != OpAMD64MOVLconst {
 16070  			break
 16071  		}
 16072  		c := v_1.AuxInt
 16073  		v.reset(OpAMD64SHRWconst)
 16074  		v.AuxInt = c & 31
 16075  		v.AddArg(x)
 16076  		return true
 16077  	}
 16078  	return false
 16079  }
 16080  func rewriteValueAMD64_OpAMD64SUBL(v *Value, config *Config) bool {
 16081  	b := v.Block
 16082  	_ = b
 16083  	// match: (SUBL x (MOVLconst [c]))
 16084  	// cond:
 16085  	// result: (SUBLconst x [c])
 16086  	for {
 16087  		x := v.Args[0]
 16088  		v_1 := v.Args[1]
 16089  		if v_1.Op != OpAMD64MOVLconst {
 16090  			break
 16091  		}
 16092  		c := v_1.AuxInt
 16093  		v.reset(OpAMD64SUBLconst)
 16094  		v.AddArg(x)
 16095  		v.AuxInt = c
 16096  		return true
 16097  	}
 16098  	// match: (SUBL (MOVLconst [c]) x)
 16099  	// cond:
 16100  	// result: (NEGL (SUBLconst <v.Type> x [c]))
 16101  	for {
 16102  		v_0 := v.Args[0]
 16103  		if v_0.Op != OpAMD64MOVLconst {
 16104  			break
 16105  		}
 16106  		c := v_0.AuxInt
 16107  		x := v.Args[1]
 16108  		v.reset(OpAMD64NEGL)
 16109  		v0 := b.NewValue0(v.Line, OpAMD64SUBLconst, v.Type)
 16110  		v0.AddArg(x)
 16111  		v0.AuxInt = c
 16112  		v.AddArg(v0)
 16113  		return true
 16114  	}
 16115  	// match: (SUBL x x)
 16116  	// cond:
 16117  	// result: (MOVLconst [0])
 16118  	for {
 16119  		x := v.Args[0]
 16120  		if x != v.Args[1] {
 16121  			break
 16122  		}
 16123  		v.reset(OpAMD64MOVLconst)
 16124  		v.AuxInt = 0
 16125  		return true
 16126  	}
 16127  	return false
 16128  }
 16129  func rewriteValueAMD64_OpAMD64SUBLconst(v *Value, config *Config) bool {
 16130  	b := v.Block
 16131  	_ = b
 16132  	// match: (SUBLconst [c] x)
 16133  	// cond: int32(c) == 0
 16134  	// result: x
 16135  	for {
 16136  		c := v.AuxInt
 16137  		x := v.Args[0]
 16138  		if !(int32(c) == 0) {
 16139  			break
 16140  		}
 16141  		v.reset(OpCopy)
 16142  		v.Type = x.Type
 16143  		v.AddArg(x)
 16144  		return true
 16145  	}
 16146  	// match: (SUBLconst [c] x)
 16147  	// cond:
 16148  	// result: (ADDLconst [int64(int32(-c))] x)
 16149  	for {
 16150  		c := v.AuxInt
 16151  		x := v.Args[0]
 16152  		v.reset(OpAMD64ADDLconst)
 16153  		v.AuxInt = int64(int32(-c))
 16154  		v.AddArg(x)
 16155  		return true
 16156  	}
 16157  }
 16158  func rewriteValueAMD64_OpAMD64SUBQ(v *Value, config *Config) bool {
 16159  	b := v.Block
 16160  	_ = b
 16161  	// match: (SUBQ x (MOVQconst [c]))
 16162  	// cond: is32Bit(c)
 16163  	// result: (SUBQconst x [c])
 16164  	for {
 16165  		x := v.Args[0]
 16166  		v_1 := v.Args[1]
 16167  		if v_1.Op != OpAMD64MOVQconst {
 16168  			break
 16169  		}
 16170  		c := v_1.AuxInt
 16171  		if !(is32Bit(c)) {
 16172  			break
 16173  		}
 16174  		v.reset(OpAMD64SUBQconst)
 16175  		v.AddArg(x)
 16176  		v.AuxInt = c
 16177  		return true
 16178  	}
 16179  	// match: (SUBQ (MOVQconst [c]) x)
 16180  	// cond: is32Bit(c)
 16181  	// result: (NEGQ (SUBQconst <v.Type> x [c]))
 16182  	for {
 16183  		v_0 := v.Args[0]
 16184  		if v_0.Op != OpAMD64MOVQconst {
 16185  			break
 16186  		}
 16187  		c := v_0.AuxInt
 16188  		x := v.Args[1]
 16189  		if !(is32Bit(c)) {
 16190  			break
 16191  		}
 16192  		v.reset(OpAMD64NEGQ)
 16193  		v0 := b.NewValue0(v.Line, OpAMD64SUBQconst, v.Type)
 16194  		v0.AddArg(x)
 16195  		v0.AuxInt = c
 16196  		v.AddArg(v0)
 16197  		return true
 16198  	}
 16199  	// match: (SUBQ x x)
 16200  	// cond:
 16201  	// result: (MOVQconst [0])
 16202  	for {
 16203  		x := v.Args[0]
 16204  		if x != v.Args[1] {
 16205  			break
 16206  		}
 16207  		v.reset(OpAMD64MOVQconst)
 16208  		v.AuxInt = 0
 16209  		return true
 16210  	}
 16211  	return false
 16212  }
 16213  func rewriteValueAMD64_OpAMD64SUBQconst(v *Value, config *Config) bool {
 16214  	b := v.Block
 16215  	_ = b
 16216  	// match: (SUBQconst [0] x)
 16217  	// cond:
 16218  	// result: x
 16219  	for {
 16220  		if v.AuxInt != 0 {
 16221  			break
 16222  		}
 16223  		x := v.Args[0]
 16224  		v.reset(OpCopy)
 16225  		v.Type = x.Type
 16226  		v.AddArg(x)
 16227  		return true
 16228  	}
 16229  	// match: (SUBQconst [c] x)
 16230  	// cond: c != -(1<<31)
 16231  	// result: (ADDQconst [-c] x)
 16232  	for {
 16233  		c := v.AuxInt
 16234  		x := v.Args[0]
 16235  		if !(c != -(1 << 31)) {
 16236  			break
 16237  		}
 16238  		v.reset(OpAMD64ADDQconst)
 16239  		v.AuxInt = -c
 16240  		v.AddArg(x)
 16241  		return true
 16242  	}
 16243  	// match: (SUBQconst (MOVQconst [d]) [c])
 16244  	// cond:
 16245  	// result: (MOVQconst [d-c])
 16246  	for {
 16247  		v_0 := v.Args[0]
 16248  		if v_0.Op != OpAMD64MOVQconst {
 16249  			break
 16250  		}
 16251  		d := v_0.AuxInt
 16252  		c := v.AuxInt
 16253  		v.reset(OpAMD64MOVQconst)
 16254  		v.AuxInt = d - c
 16255  		return true
 16256  	}
 16257  	// match: (SUBQconst (SUBQconst x [d]) [c])
 16258  	// cond: is32Bit(-c-d)
 16259  	// result: (ADDQconst [-c-d] x)
 16260  	for {
 16261  		v_0 := v.Args[0]
 16262  		if v_0.Op != OpAMD64SUBQconst {
 16263  			break
 16264  		}
 16265  		x := v_0.Args[0]
 16266  		d := v_0.AuxInt
 16267  		c := v.AuxInt
 16268  		if !(is32Bit(-c - d)) {
 16269  			break
 16270  		}
 16271  		v.reset(OpAMD64ADDQconst)
 16272  		v.AuxInt = -c - d
 16273  		v.AddArg(x)
 16274  		return true
 16275  	}
 16276  	return false
 16277  }
 16278  func rewriteValueAMD64_OpSignExt16to32(v *Value, config *Config) bool {
 16279  	b := v.Block
 16280  	_ = b
 16281  	// match: (SignExt16to32 x)
 16282  	// cond:
 16283  	// result: (MOVWQSX x)
 16284  	for {
 16285  		x := v.Args[0]
 16286  		v.reset(OpAMD64MOVWQSX)
 16287  		v.AddArg(x)
 16288  		return true
 16289  	}
 16290  }
 16291  func rewriteValueAMD64_OpSignExt16to64(v *Value, config *Config) bool {
 16292  	b := v.Block
 16293  	_ = b
 16294  	// match: (SignExt16to64 x)
 16295  	// cond:
 16296  	// result: (MOVWQSX x)
 16297  	for {
 16298  		x := v.Args[0]
 16299  		v.reset(OpAMD64MOVWQSX)
 16300  		v.AddArg(x)
 16301  		return true
 16302  	}
 16303  }
 16304  func rewriteValueAMD64_OpSignExt32to64(v *Value, config *Config) bool {
 16305  	b := v.Block
 16306  	_ = b
 16307  	// match: (SignExt32to64 x)
 16308  	// cond:
 16309  	// result: (MOVLQSX x)
 16310  	for {
 16311  		x := v.Args[0]
 16312  		v.reset(OpAMD64MOVLQSX)
 16313  		v.AddArg(x)
 16314  		return true
 16315  	}
 16316  }
 16317  func rewriteValueAMD64_OpSignExt8to16(v *Value, config *Config) bool {
 16318  	b := v.Block
 16319  	_ = b
 16320  	// match: (SignExt8to16  x)
 16321  	// cond:
 16322  	// result: (MOVBQSX x)
 16323  	for {
 16324  		x := v.Args[0]
 16325  		v.reset(OpAMD64MOVBQSX)
 16326  		v.AddArg(x)
 16327  		return true
 16328  	}
 16329  }
 16330  func rewriteValueAMD64_OpSignExt8to32(v *Value, config *Config) bool {
 16331  	b := v.Block
 16332  	_ = b
 16333  	// match: (SignExt8to32  x)
 16334  	// cond:
 16335  	// result: (MOVBQSX x)
 16336  	for {
 16337  		x := v.Args[0]
 16338  		v.reset(OpAMD64MOVBQSX)
 16339  		v.AddArg(x)
 16340  		return true
 16341  	}
 16342  }
 16343  func rewriteValueAMD64_OpSignExt8to64(v *Value, config *Config) bool {
 16344  	b := v.Block
 16345  	_ = b
 16346  	// match: (SignExt8to64  x)
 16347  	// cond:
 16348  	// result: (MOVBQSX x)
 16349  	for {
 16350  		x := v.Args[0]
 16351  		v.reset(OpAMD64MOVBQSX)
 16352  		v.AddArg(x)
 16353  		return true
 16354  	}
 16355  }
 16356  func rewriteValueAMD64_OpSqrt(v *Value, config *Config) bool {
 16357  	b := v.Block
 16358  	_ = b
 16359  	// match: (Sqrt x)
 16360  	// cond:
 16361  	// result: (SQRTSD x)
 16362  	for {
 16363  		x := v.Args[0]
 16364  		v.reset(OpAMD64SQRTSD)
 16365  		v.AddArg(x)
 16366  		return true
 16367  	}
 16368  }
 16369  func rewriteValueAMD64_OpStaticCall(v *Value, config *Config) bool {
 16370  	b := v.Block
 16371  	_ = b
 16372  	// match: (StaticCall [argwid] {target} mem)
 16373  	// cond:
 16374  	// result: (CALLstatic [argwid] {target} mem)
 16375  	for {
 16376  		argwid := v.AuxInt
 16377  		target := v.Aux
 16378  		mem := v.Args[0]
 16379  		v.reset(OpAMD64CALLstatic)
 16380  		v.AuxInt = argwid
 16381  		v.Aux = target
 16382  		v.AddArg(mem)
 16383  		return true
 16384  	}
 16385  }
 16386  func rewriteValueAMD64_OpStore(v *Value, config *Config) bool {
 16387  	b := v.Block
 16388  	_ = b
 16389  	// match: (Store [8] ptr val mem)
 16390  	// cond: is64BitFloat(val.Type)
 16391  	// result: (MOVSDstore ptr val mem)
 16392  	for {
 16393  		if v.AuxInt != 8 {
 16394  			break
 16395  		}
 16396  		ptr := v.Args[0]
 16397  		val := v.Args[1]
 16398  		mem := v.Args[2]
 16399  		if !(is64BitFloat(val.Type)) {
 16400  			break
 16401  		}
 16402  		v.reset(OpAMD64MOVSDstore)
 16403  		v.AddArg(ptr)
 16404  		v.AddArg(val)
 16405  		v.AddArg(mem)
 16406  		return true
 16407  	}
 16408  	// match: (Store [4] ptr val mem)
 16409  	// cond: is32BitFloat(val.Type)
 16410  	// result: (MOVSSstore ptr val mem)
 16411  	for {
 16412  		if v.AuxInt != 4 {
 16413  			break
 16414  		}
 16415  		ptr := v.Args[0]
 16416  		val := v.Args[1]
 16417  		mem := v.Args[2]
 16418  		if !(is32BitFloat(val.Type)) {
 16419  			break
 16420  		}
 16421  		v.reset(OpAMD64MOVSSstore)
 16422  		v.AddArg(ptr)
 16423  		v.AddArg(val)
 16424  		v.AddArg(mem)
 16425  		return true
 16426  	}
 16427  	// match: (Store [8] ptr val mem)
 16428  	// cond:
 16429  	// result: (MOVQstore ptr val mem)
 16430  	for {
 16431  		if v.AuxInt != 8 {
 16432  			break
 16433  		}
 16434  		ptr := v.Args[0]
 16435  		val := v.Args[1]
 16436  		mem := v.Args[2]
 16437  		v.reset(OpAMD64MOVQstore)
 16438  		v.AddArg(ptr)
 16439  		v.AddArg(val)
 16440  		v.AddArg(mem)
 16441  		return true
 16442  	}
 16443  	// match: (Store [4] ptr val mem)
 16444  	// cond:
 16445  	// result: (MOVLstore ptr val mem)
 16446  	for {
 16447  		if v.AuxInt != 4 {
 16448  			break
 16449  		}
 16450  		ptr := v.Args[0]
 16451  		val := v.Args[1]
 16452  		mem := v.Args[2]
 16453  		v.reset(OpAMD64MOVLstore)
 16454  		v.AddArg(ptr)
 16455  		v.AddArg(val)
 16456  		v.AddArg(mem)
 16457  		return true
 16458  	}
 16459  	// match: (Store [2] ptr val mem)
 16460  	// cond:
 16461  	// result: (MOVWstore ptr val mem)
 16462  	for {
 16463  		if v.AuxInt != 2 {
 16464  			break
 16465  		}
 16466  		ptr := v.Args[0]
 16467  		val := v.Args[1]
 16468  		mem := v.Args[2]
 16469  		v.reset(OpAMD64MOVWstore)
 16470  		v.AddArg(ptr)
 16471  		v.AddArg(val)
 16472  		v.AddArg(mem)
 16473  		return true
 16474  	}
 16475  	// match: (Store [1] ptr val mem)
 16476  	// cond:
 16477  	// result: (MOVBstore ptr val mem)
 16478  	for {
 16479  		if v.AuxInt != 1 {
 16480  			break
 16481  		}
 16482  		ptr := v.Args[0]
 16483  		val := v.Args[1]
 16484  		mem := v.Args[2]
 16485  		v.reset(OpAMD64MOVBstore)
 16486  		v.AddArg(ptr)
 16487  		v.AddArg(val)
 16488  		v.AddArg(mem)
 16489  		return true
 16490  	}
 16491  	return false
 16492  }
 16493  func rewriteValueAMD64_OpSub16(v *Value, config *Config) bool {
 16494  	b := v.Block
 16495  	_ = b
 16496  	// match: (Sub16  x y)
 16497  	// cond:
 16498  	// result: (SUBL  x y)
 16499  	for {
 16500  		x := v.Args[0]
 16501  		y := v.Args[1]
 16502  		v.reset(OpAMD64SUBL)
 16503  		v.AddArg(x)
 16504  		v.AddArg(y)
 16505  		return true
 16506  	}
 16507  }
 16508  func rewriteValueAMD64_OpSub32(v *Value, config *Config) bool {
 16509  	b := v.Block
 16510  	_ = b
 16511  	// match: (Sub32  x y)
 16512  	// cond:
 16513  	// result: (SUBL  x y)
 16514  	for {
 16515  		x := v.Args[0]
 16516  		y := v.Args[1]
 16517  		v.reset(OpAMD64SUBL)
 16518  		v.AddArg(x)
 16519  		v.AddArg(y)
 16520  		return true
 16521  	}
 16522  }
 16523  func rewriteValueAMD64_OpSub32F(v *Value, config *Config) bool {
 16524  	b := v.Block
 16525  	_ = b
 16526  	// match: (Sub32F x y)
 16527  	// cond:
 16528  	// result: (SUBSS x y)
 16529  	for {
 16530  		x := v.Args[0]
 16531  		y := v.Args[1]
 16532  		v.reset(OpAMD64SUBSS)
 16533  		v.AddArg(x)
 16534  		v.AddArg(y)
 16535  		return true
 16536  	}
 16537  }
 16538  func rewriteValueAMD64_OpSub64(v *Value, config *Config) bool {
 16539  	b := v.Block
 16540  	_ = b
 16541  	// match: (Sub64  x y)
 16542  	// cond:
 16543  	// result: (SUBQ  x y)
 16544  	for {
 16545  		x := v.Args[0]
 16546  		y := v.Args[1]
 16547  		v.reset(OpAMD64SUBQ)
 16548  		v.AddArg(x)
 16549  		v.AddArg(y)
 16550  		return true
 16551  	}
 16552  }
 16553  func rewriteValueAMD64_OpSub64F(v *Value, config *Config) bool {
 16554  	b := v.Block
 16555  	_ = b
 16556  	// match: (Sub64F x y)
 16557  	// cond:
 16558  	// result: (SUBSD x y)
 16559  	for {
 16560  		x := v.Args[0]
 16561  		y := v.Args[1]
 16562  		v.reset(OpAMD64SUBSD)
 16563  		v.AddArg(x)
 16564  		v.AddArg(y)
 16565  		return true
 16566  	}
 16567  }
 16568  func rewriteValueAMD64_OpSub8(v *Value, config *Config) bool {
 16569  	b := v.Block
 16570  	_ = b
 16571  	// match: (Sub8   x y)
 16572  	// cond:
 16573  	// result: (SUBL  x y)
 16574  	for {
 16575  		x := v.Args[0]
 16576  		y := v.Args[1]
 16577  		v.reset(OpAMD64SUBL)
 16578  		v.AddArg(x)
 16579  		v.AddArg(y)
 16580  		return true
 16581  	}
 16582  }
 16583  func rewriteValueAMD64_OpSubPtr(v *Value, config *Config) bool {
 16584  	b := v.Block
 16585  	_ = b
 16586  	// match: (SubPtr x y)
 16587  	// cond:
 16588  	// result: (SUBQ  x y)
 16589  	for {
 16590  		x := v.Args[0]
 16591  		y := v.Args[1]
 16592  		v.reset(OpAMD64SUBQ)
 16593  		v.AddArg(x)
 16594  		v.AddArg(y)
 16595  		return true
 16596  	}
 16597  }
 16598  func rewriteValueAMD64_OpTrunc16to8(v *Value, config *Config) bool {
 16599  	b := v.Block
 16600  	_ = b
 16601  	// match: (Trunc16to8  x)
 16602  	// cond:
 16603  	// result: x
 16604  	for {
 16605  		x := v.Args[0]
 16606  		v.reset(OpCopy)
 16607  		v.Type = x.Type
 16608  		v.AddArg(x)
 16609  		return true
 16610  	}
 16611  }
 16612  func rewriteValueAMD64_OpTrunc32to16(v *Value, config *Config) bool {
 16613  	b := v.Block
 16614  	_ = b
 16615  	// match: (Trunc32to16 x)
 16616  	// cond:
 16617  	// result: x
 16618  	for {
 16619  		x := v.Args[0]
 16620  		v.reset(OpCopy)
 16621  		v.Type = x.Type
 16622  		v.AddArg(x)
 16623  		return true
 16624  	}
 16625  }
 16626  func rewriteValueAMD64_OpTrunc32to8(v *Value, config *Config) bool {
 16627  	b := v.Block
 16628  	_ = b
 16629  	// match: (Trunc32to8  x)
 16630  	// cond:
 16631  	// result: x
 16632  	for {
 16633  		x := v.Args[0]
 16634  		v.reset(OpCopy)
 16635  		v.Type = x.Type
 16636  		v.AddArg(x)
 16637  		return true
 16638  	}
 16639  }
 16640  func rewriteValueAMD64_OpTrunc64to16(v *Value, config *Config) bool {
 16641  	b := v.Block
 16642  	_ = b
 16643  	// match: (Trunc64to16 x)
 16644  	// cond:
 16645  	// result: x
 16646  	for {
 16647  		x := v.Args[0]
 16648  		v.reset(OpCopy)
 16649  		v.Type = x.Type
 16650  		v.AddArg(x)
 16651  		return true
 16652  	}
 16653  }
 16654  func rewriteValueAMD64_OpTrunc64to32(v *Value, config *Config) bool {
 16655  	b := v.Block
 16656  	_ = b
 16657  	// match: (Trunc64to32 x)
 16658  	// cond:
 16659  	// result: x
 16660  	for {
 16661  		x := v.Args[0]
 16662  		v.reset(OpCopy)
 16663  		v.Type = x.Type
 16664  		v.AddArg(x)
 16665  		return true
 16666  	}
 16667  }
 16668  func rewriteValueAMD64_OpTrunc64to8(v *Value, config *Config) bool {
 16669  	b := v.Block
 16670  	_ = b
 16671  	// match: (Trunc64to8  x)
 16672  	// cond:
 16673  	// result: x
 16674  	for {
 16675  		x := v.Args[0]
 16676  		v.reset(OpCopy)
 16677  		v.Type = x.Type
 16678  		v.AddArg(x)
 16679  		return true
 16680  	}
 16681  }
 16682  func rewriteValueAMD64_OpAMD64XORL(v *Value, config *Config) bool {
 16683  	b := v.Block
 16684  	_ = b
 16685  	// match: (XORL x (MOVLconst [c]))
 16686  	// cond:
 16687  	// result: (XORLconst [c] x)
 16688  	for {
 16689  		x := v.Args[0]
 16690  		v_1 := v.Args[1]
 16691  		if v_1.Op != OpAMD64MOVLconst {
 16692  			break
 16693  		}
 16694  		c := v_1.AuxInt
 16695  		v.reset(OpAMD64XORLconst)
 16696  		v.AuxInt = c
 16697  		v.AddArg(x)
 16698  		return true
 16699  	}
 16700  	// match: (XORL (MOVLconst [c]) x)
 16701  	// cond:
 16702  	// result: (XORLconst [c] x)
 16703  	for {
 16704  		v_0 := v.Args[0]
 16705  		if v_0.Op != OpAMD64MOVLconst {
 16706  			break
 16707  		}
 16708  		c := v_0.AuxInt
 16709  		x := v.Args[1]
 16710  		v.reset(OpAMD64XORLconst)
 16711  		v.AuxInt = c
 16712  		v.AddArg(x)
 16713  		return true
 16714  	}
 16715  	// match: (XORL x x)
 16716  	// cond:
 16717  	// result: (MOVLconst [0])
 16718  	for {
 16719  		x := v.Args[0]
 16720  		if x != v.Args[1] {
 16721  			break
 16722  		}
 16723  		v.reset(OpAMD64MOVLconst)
 16724  		v.AuxInt = 0
 16725  		return true
 16726  	}
 16727  	return false
 16728  }
 16729  func rewriteValueAMD64_OpAMD64XORLconst(v *Value, config *Config) bool {
 16730  	b := v.Block
 16731  	_ = b
 16732  	// match: (XORLconst [c] x)
 16733  	// cond: int32(c)==0
 16734  	// result: x
 16735  	for {
 16736  		c := v.AuxInt
 16737  		x := v.Args[0]
 16738  		if !(int32(c) == 0) {
 16739  			break
 16740  		}
 16741  		v.reset(OpCopy)
 16742  		v.Type = x.Type
 16743  		v.AddArg(x)
 16744  		return true
 16745  	}
 16746  	// match: (XORLconst [c] (MOVLconst [d]))
 16747  	// cond:
 16748  	// result: (MOVLconst [c^d])
 16749  	for {
 16750  		c := v.AuxInt
 16751  		v_0 := v.Args[0]
 16752  		if v_0.Op != OpAMD64MOVLconst {
 16753  			break
 16754  		}
 16755  		d := v_0.AuxInt
 16756  		v.reset(OpAMD64MOVLconst)
 16757  		v.AuxInt = c ^ d
 16758  		return true
 16759  	}
 16760  	return false
 16761  }
 16762  func rewriteValueAMD64_OpAMD64XORQ(v *Value, config *Config) bool {
 16763  	b := v.Block
 16764  	_ = b
 16765  	// match: (XORQ x (MOVQconst [c]))
 16766  	// cond: is32Bit(c)
 16767  	// result: (XORQconst [c] x)
 16768  	for {
 16769  		x := v.Args[0]
 16770  		v_1 := v.Args[1]
 16771  		if v_1.Op != OpAMD64MOVQconst {
 16772  			break
 16773  		}
 16774  		c := v_1.AuxInt
 16775  		if !(is32Bit(c)) {
 16776  			break
 16777  		}
 16778  		v.reset(OpAMD64XORQconst)
 16779  		v.AuxInt = c
 16780  		v.AddArg(x)
 16781  		return true
 16782  	}
 16783  	// match: (XORQ (MOVQconst [c]) x)
 16784  	// cond: is32Bit(c)
 16785  	// result: (XORQconst [c] x)
 16786  	for {
 16787  		v_0 := v.Args[0]
 16788  		if v_0.Op != OpAMD64MOVQconst {
 16789  			break
 16790  		}
 16791  		c := v_0.AuxInt
 16792  		x := v.Args[1]
 16793  		if !(is32Bit(c)) {
 16794  			break
 16795  		}
 16796  		v.reset(OpAMD64XORQconst)
 16797  		v.AuxInt = c
 16798  		v.AddArg(x)
 16799  		return true
 16800  	}
 16801  	// match: (XORQ x x)
 16802  	// cond:
 16803  	// result: (MOVQconst [0])
 16804  	for {
 16805  		x := v.Args[0]
 16806  		if x != v.Args[1] {
 16807  			break
 16808  		}
 16809  		v.reset(OpAMD64MOVQconst)
 16810  		v.AuxInt = 0
 16811  		return true
 16812  	}
 16813  	return false
 16814  }
 16815  func rewriteValueAMD64_OpAMD64XORQconst(v *Value, config *Config) bool {
 16816  	b := v.Block
 16817  	_ = b
 16818  	// match: (XORQconst [0] x)
 16819  	// cond:
 16820  	// result: x
 16821  	for {
 16822  		if v.AuxInt != 0 {
 16823  			break
 16824  		}
 16825  		x := v.Args[0]
 16826  		v.reset(OpCopy)
 16827  		v.Type = x.Type
 16828  		v.AddArg(x)
 16829  		return true
 16830  	}
 16831  	// match: (XORQconst [c] (MOVQconst [d]))
 16832  	// cond:
 16833  	// result: (MOVQconst [c^d])
 16834  	for {
 16835  		c := v.AuxInt
 16836  		v_0 := v.Args[0]
 16837  		if v_0.Op != OpAMD64MOVQconst {
 16838  			break
 16839  		}
 16840  		d := v_0.AuxInt
 16841  		v.reset(OpAMD64MOVQconst)
 16842  		v.AuxInt = c ^ d
 16843  		return true
 16844  	}
 16845  	return false
 16846  }
 16847  func rewriteValueAMD64_OpXor16(v *Value, config *Config) bool {
 16848  	b := v.Block
 16849  	_ = b
 16850  	// match: (Xor16 x y)
 16851  	// cond:
 16852  	// result: (XORL x y)
 16853  	for {
 16854  		x := v.Args[0]
 16855  		y := v.Args[1]
 16856  		v.reset(OpAMD64XORL)
 16857  		v.AddArg(x)
 16858  		v.AddArg(y)
 16859  		return true
 16860  	}
 16861  }
 16862  func rewriteValueAMD64_OpXor32(v *Value, config *Config) bool {
 16863  	b := v.Block
 16864  	_ = b
 16865  	// match: (Xor32 x y)
 16866  	// cond:
 16867  	// result: (XORL x y)
 16868  	for {
 16869  		x := v.Args[0]
 16870  		y := v.Args[1]
 16871  		v.reset(OpAMD64XORL)
 16872  		v.AddArg(x)
 16873  		v.AddArg(y)
 16874  		return true
 16875  	}
 16876  }
 16877  func rewriteValueAMD64_OpXor64(v *Value, config *Config) bool {
 16878  	b := v.Block
 16879  	_ = b
 16880  	// match: (Xor64 x y)
 16881  	// cond:
 16882  	// result: (XORQ x y)
 16883  	for {
 16884  		x := v.Args[0]
 16885  		y := v.Args[1]
 16886  		v.reset(OpAMD64XORQ)
 16887  		v.AddArg(x)
 16888  		v.AddArg(y)
 16889  		return true
 16890  	}
 16891  }
 16892  func rewriteValueAMD64_OpXor8(v *Value, config *Config) bool {
 16893  	b := v.Block
 16894  	_ = b
 16895  	// match: (Xor8  x y)
 16896  	// cond:
 16897  	// result: (XORL x y)
 16898  	for {
 16899  		x := v.Args[0]
 16900  		y := v.Args[1]
 16901  		v.reset(OpAMD64XORL)
 16902  		v.AddArg(x)
 16903  		v.AddArg(y)
 16904  		return true
 16905  	}
 16906  }
 16907  func rewriteValueAMD64_OpZero(v *Value, config *Config) bool {
 16908  	b := v.Block
 16909  	_ = b
 16910  	// match: (Zero [0] _ mem)
 16911  	// cond:
 16912  	// result: mem
 16913  	for {
 16914  		if v.AuxInt != 0 {
 16915  			break
 16916  		}
 16917  		mem := v.Args[1]
 16918  		v.reset(OpCopy)
 16919  		v.Type = mem.Type
 16920  		v.AddArg(mem)
 16921  		return true
 16922  	}
 16923  	// match: (Zero [1] destptr mem)
 16924  	// cond:
 16925  	// result: (MOVBstoreconst [0] destptr mem)
 16926  	for {
 16927  		if v.AuxInt != 1 {
 16928  			break
 16929  		}
 16930  		destptr := v.Args[0]
 16931  		mem := v.Args[1]
 16932  		v.reset(OpAMD64MOVBstoreconst)
 16933  		v.AuxInt = 0
 16934  		v.AddArg(destptr)
 16935  		v.AddArg(mem)
 16936  		return true
 16937  	}
 16938  	// match: (Zero [2] destptr mem)
 16939  	// cond:
 16940  	// result: (MOVWstoreconst [0] destptr mem)
 16941  	for {
 16942  		if v.AuxInt != 2 {
 16943  			break
 16944  		}
 16945  		destptr := v.Args[0]
 16946  		mem := v.Args[1]
 16947  		v.reset(OpAMD64MOVWstoreconst)
 16948  		v.AuxInt = 0
 16949  		v.AddArg(destptr)
 16950  		v.AddArg(mem)
 16951  		return true
 16952  	}
 16953  	// match: (Zero [4] destptr mem)
 16954  	// cond:
 16955  	// result: (MOVLstoreconst [0] destptr mem)
 16956  	for {
 16957  		if v.AuxInt != 4 {
 16958  			break
 16959  		}
 16960  		destptr := v.Args[0]
 16961  		mem := v.Args[1]
 16962  		v.reset(OpAMD64MOVLstoreconst)
 16963  		v.AuxInt = 0
 16964  		v.AddArg(destptr)
 16965  		v.AddArg(mem)
 16966  		return true
 16967  	}
 16968  	// match: (Zero [8] destptr mem)
 16969  	// cond:
 16970  	// result: (MOVQstoreconst [0] destptr mem)
 16971  	for {
 16972  		if v.AuxInt != 8 {
 16973  			break
 16974  		}
 16975  		destptr := v.Args[0]
 16976  		mem := v.Args[1]
 16977  		v.reset(OpAMD64MOVQstoreconst)
 16978  		v.AuxInt = 0
 16979  		v.AddArg(destptr)
 16980  		v.AddArg(mem)
 16981  		return true
 16982  	}
 16983  	// match: (Zero [3] destptr mem)
 16984  	// cond:
 16985  	// result: (MOVBstoreconst [makeValAndOff(0,2)] destptr 		(MOVWstoreconst [0] destptr mem))
 16986  	for {
 16987  		if v.AuxInt != 3 {
 16988  			break
 16989  		}
 16990  		destptr := v.Args[0]
 16991  		mem := v.Args[1]
 16992  		v.reset(OpAMD64MOVBstoreconst)
 16993  		v.AuxInt = makeValAndOff(0, 2)
 16994  		v.AddArg(destptr)
 16995  		v0 := b.NewValue0(v.Line, OpAMD64MOVWstoreconst, TypeMem)
 16996  		v0.AuxInt = 0
 16997  		v0.AddArg(destptr)
 16998  		v0.AddArg(mem)
 16999  		v.AddArg(v0)
 17000  		return true
 17001  	}
 17002  	// match: (Zero [5] destptr mem)
 17003  	// cond:
 17004  	// result: (MOVBstoreconst [makeValAndOff(0,4)] destptr 		(MOVLstoreconst [0] destptr mem))
 17005  	for {
 17006  		if v.AuxInt != 5 {
 17007  			break
 17008  		}
 17009  		destptr := v.Args[0]
 17010  		mem := v.Args[1]
 17011  		v.reset(OpAMD64MOVBstoreconst)
 17012  		v.AuxInt = makeValAndOff(0, 4)
 17013  		v.AddArg(destptr)
 17014  		v0 := b.NewValue0(v.Line, OpAMD64MOVLstoreconst, TypeMem)
 17015  		v0.AuxInt = 0
 17016  		v0.AddArg(destptr)
 17017  		v0.AddArg(mem)
 17018  		v.AddArg(v0)
 17019  		return true
 17020  	}
 17021  	// match: (Zero [6] destptr mem)
 17022  	// cond:
 17023  	// result: (MOVWstoreconst [makeValAndOff(0,4)] destptr 		(MOVLstoreconst [0] destptr mem))
 17024  	for {
 17025  		if v.AuxInt != 6 {
 17026  			break
 17027  		}
 17028  		destptr := v.Args[0]
 17029  		mem := v.Args[1]
 17030  		v.reset(OpAMD64MOVWstoreconst)
 17031  		v.AuxInt = makeValAndOff(0, 4)
 17032  		v.AddArg(destptr)
 17033  		v0 := b.NewValue0(v.Line, OpAMD64MOVLstoreconst, TypeMem)
 17034  		v0.AuxInt = 0
 17035  		v0.AddArg(destptr)
 17036  		v0.AddArg(mem)
 17037  		v.AddArg(v0)
 17038  		return true
 17039  	}
 17040  	// match: (Zero [7] destptr mem)
 17041  	// cond:
 17042  	// result: (MOVLstoreconst [makeValAndOff(0,3)] destptr 		(MOVLstoreconst [0] destptr mem))
 17043  	for {
 17044  		if v.AuxInt != 7 {
 17045  			break
 17046  		}
 17047  		destptr := v.Args[0]
 17048  		mem := v.Args[1]
 17049  		v.reset(OpAMD64MOVLstoreconst)
 17050  		v.AuxInt = makeValAndOff(0, 3)
 17051  		v.AddArg(destptr)
 17052  		v0 := b.NewValue0(v.Line, OpAMD64MOVLstoreconst, TypeMem)
 17053  		v0.AuxInt = 0
 17054  		v0.AddArg(destptr)
 17055  		v0.AddArg(mem)
 17056  		v.AddArg(v0)
 17057  		return true
 17058  	}
 17059  	// match: (Zero [size] destptr mem)
 17060  	// cond: size%8 != 0 && size > 8
 17061  	// result: (Zero [size-size%8] (ADDQconst destptr [size%8]) 		(MOVQstoreconst [0] destptr mem))
 17062  	for {
 17063  		size := v.AuxInt
 17064  		destptr := v.Args[0]
 17065  		mem := v.Args[1]
 17066  		if !(size%8 != 0 && size > 8) {
 17067  			break
 17068  		}
 17069  		v.reset(OpZero)
 17070  		v.AuxInt = size - size%8
 17071  		v0 := b.NewValue0(v.Line, OpAMD64ADDQconst, config.fe.TypeUInt64())
 17072  		v0.AddArg(destptr)
 17073  		v0.AuxInt = size % 8
 17074  		v.AddArg(v0)
 17075  		v1 := b.NewValue0(v.Line, OpAMD64MOVQstoreconst, TypeMem)
 17076  		v1.AuxInt = 0
 17077  		v1.AddArg(destptr)
 17078  		v1.AddArg(mem)
 17079  		v.AddArg(v1)
 17080  		return true
 17081  	}
 17082  	// match: (Zero [16] destptr mem)
 17083  	// cond:
 17084  	// result: (MOVQstoreconst [makeValAndOff(0,8)] destptr 		(MOVQstoreconst [0] destptr mem))
 17085  	for {
 17086  		if v.AuxInt != 16 {
 17087  			break
 17088  		}
 17089  		destptr := v.Args[0]
 17090  		mem := v.Args[1]
 17091  		v.reset(OpAMD64MOVQstoreconst)
 17092  		v.AuxInt = makeValAndOff(0, 8)
 17093  		v.AddArg(destptr)
 17094  		v0 := b.NewValue0(v.Line, OpAMD64MOVQstoreconst, TypeMem)
 17095  		v0.AuxInt = 0
 17096  		v0.AddArg(destptr)
 17097  		v0.AddArg(mem)
 17098  		v.AddArg(v0)
 17099  		return true
 17100  	}
 17101  	// match: (Zero [24] destptr mem)
 17102  	// cond:
 17103  	// result: (MOVQstoreconst [makeValAndOff(0,16)] destptr 		(MOVQstoreconst [makeValAndOff(0,8)] destptr 			(MOVQstoreconst [0] destptr mem)))
 17104  	for {
 17105  		if v.AuxInt != 24 {
 17106  			break
 17107  		}
 17108  		destptr := v.Args[0]
 17109  		mem := v.Args[1]
 17110  		v.reset(OpAMD64MOVQstoreconst)
 17111  		v.AuxInt = makeValAndOff(0, 16)
 17112  		v.AddArg(destptr)
 17113  		v0 := b.NewValue0(v.Line, OpAMD64MOVQstoreconst, TypeMem)
 17114  		v0.AuxInt = makeValAndOff(0, 8)
 17115  		v0.AddArg(destptr)
 17116  		v1 := b.NewValue0(v.Line, OpAMD64MOVQstoreconst, TypeMem)
 17117  		v1.AuxInt = 0
 17118  		v1.AddArg(destptr)
 17119  		v1.AddArg(mem)
 17120  		v0.AddArg(v1)
 17121  		v.AddArg(v0)
 17122  		return true
 17123  	}
 17124  	// match: (Zero [32] destptr mem)
 17125  	// cond:
 17126  	// result: (MOVQstoreconst [makeValAndOff(0,24)] destptr 		(MOVQstoreconst [makeValAndOff(0,16)] destptr 			(MOVQstoreconst [makeValAndOff(0,8)] destptr 				(MOVQstoreconst [0] destptr mem))))
 17127  	for {
 17128  		if v.AuxInt != 32 {
 17129  			break
 17130  		}
 17131  		destptr := v.Args[0]
 17132  		mem := v.Args[1]
 17133  		v.reset(OpAMD64MOVQstoreconst)
 17134  		v.AuxInt = makeValAndOff(0, 24)
 17135  		v.AddArg(destptr)
 17136  		v0 := b.NewValue0(v.Line, OpAMD64MOVQstoreconst, TypeMem)
 17137  		v0.AuxInt = makeValAndOff(0, 16)
 17138  		v0.AddArg(destptr)
 17139  		v1 := b.NewValue0(v.Line, OpAMD64MOVQstoreconst, TypeMem)
 17140  		v1.AuxInt = makeValAndOff(0, 8)
 17141  		v1.AddArg(destptr)
 17142  		v2 := b.NewValue0(v.Line, OpAMD64MOVQstoreconst, TypeMem)
 17143  		v2.AuxInt = 0
 17144  		v2.AddArg(destptr)
 17145  		v2.AddArg(mem)
 17146  		v1.AddArg(v2)
 17147  		v0.AddArg(v1)
 17148  		v.AddArg(v0)
 17149  		return true
 17150  	}
 17151  	// match: (Zero [size] destptr mem)
 17152  	// cond: size <= 1024 && size%8 == 0 && size%16 != 0 && !config.noDuffDevice
 17153  	// result: (Zero [size-8] (ADDQconst [8] destptr) (MOVQstore destptr (MOVQconst [0]) mem))
 17154  	for {
 17155  		size := v.AuxInt
 17156  		destptr := v.Args[0]
 17157  		mem := v.Args[1]
 17158  		if !(size <= 1024 && size%8 == 0 && size%16 != 0 && !config.noDuffDevice) {
 17159  			break
 17160  		}
 17161  		v.reset(OpZero)
 17162  		v.AuxInt = size - 8
 17163  		v0 := b.NewValue0(v.Line, OpAMD64ADDQconst, config.fe.TypeUInt64())
 17164  		v0.AuxInt = 8
 17165  		v0.AddArg(destptr)
 17166  		v.AddArg(v0)
 17167  		v1 := b.NewValue0(v.Line, OpAMD64MOVQstore, TypeMem)
 17168  		v1.AddArg(destptr)
 17169  		v2 := b.NewValue0(v.Line, OpAMD64MOVQconst, config.fe.TypeUInt64())
 17170  		v2.AuxInt = 0
 17171  		v1.AddArg(v2)
 17172  		v1.AddArg(mem)
 17173  		v.AddArg(v1)
 17174  		return true
 17175  	}
 17176  	// match: (Zero [size] destptr mem)
 17177  	// cond: size <= 1024 && size%16 == 0 && !config.noDuffDevice
 17178  	// result: (DUFFZERO [size] destptr (MOVOconst [0]) mem)
 17179  	for {
 17180  		size := v.AuxInt
 17181  		destptr := v.Args[0]
 17182  		mem := v.Args[1]
 17183  		if !(size <= 1024 && size%16 == 0 && !config.noDuffDevice) {
 17184  			break
 17185  		}
 17186  		v.reset(OpAMD64DUFFZERO)
 17187  		v.AuxInt = size
 17188  		v.AddArg(destptr)
 17189  		v0 := b.NewValue0(v.Line, OpAMD64MOVOconst, TypeInt128)
 17190  		v0.AuxInt = 0
 17191  		v.AddArg(v0)
 17192  		v.AddArg(mem)
 17193  		return true
 17194  	}
 17195  	// match: (Zero [size] destptr mem)
 17196  	// cond: (size > 1024 || (config.noDuffDevice && size > 32)) && size%8 == 0
 17197  	// result: (REPSTOSQ destptr (MOVQconst [size/8]) (MOVQconst [0]) mem)
 17198  	for {
 17199  		size := v.AuxInt
 17200  		destptr := v.Args[0]
 17201  		mem := v.Args[1]
 17202  		if !((size > 1024 || (config.noDuffDevice && size > 32)) && size%8 == 0) {
 17203  			break
 17204  		}
 17205  		v.reset(OpAMD64REPSTOSQ)
 17206  		v.AddArg(destptr)
 17207  		v0 := b.NewValue0(v.Line, OpAMD64MOVQconst, config.fe.TypeUInt64())
 17208  		v0.AuxInt = size / 8
 17209  		v.AddArg(v0)
 17210  		v1 := b.NewValue0(v.Line, OpAMD64MOVQconst, config.fe.TypeUInt64())
 17211  		v1.AuxInt = 0
 17212  		v.AddArg(v1)
 17213  		v.AddArg(mem)
 17214  		return true
 17215  	}
 17216  	return false
 17217  }
 17218  func rewriteValueAMD64_OpZeroExt16to32(v *Value, config *Config) bool {
 17219  	b := v.Block
 17220  	_ = b
 17221  	// match: (ZeroExt16to32 x)
 17222  	// cond:
 17223  	// result: (MOVWQZX x)
 17224  	for {
 17225  		x := v.Args[0]
 17226  		v.reset(OpAMD64MOVWQZX)
 17227  		v.AddArg(x)
 17228  		return true
 17229  	}
 17230  }
 17231  func rewriteValueAMD64_OpZeroExt16to64(v *Value, config *Config) bool {
 17232  	b := v.Block
 17233  	_ = b
 17234  	// match: (ZeroExt16to64 x)
 17235  	// cond:
 17236  	// result: (MOVWQZX x)
 17237  	for {
 17238  		x := v.Args[0]
 17239  		v.reset(OpAMD64MOVWQZX)
 17240  		v.AddArg(x)
 17241  		return true
 17242  	}
 17243  }
 17244  func rewriteValueAMD64_OpZeroExt32to64(v *Value, config *Config) bool {
 17245  	b := v.Block
 17246  	_ = b
 17247  	// match: (ZeroExt32to64 x)
 17248  	// cond:
 17249  	// result: (MOVLQZX x)
 17250  	for {
 17251  		x := v.Args[0]
 17252  		v.reset(OpAMD64MOVLQZX)
 17253  		v.AddArg(x)
 17254  		return true
 17255  	}
 17256  }
 17257  func rewriteValueAMD64_OpZeroExt8to16(v *Value, config *Config) bool {
 17258  	b := v.Block
 17259  	_ = b
 17260  	// match: (ZeroExt8to16  x)
 17261  	// cond:
 17262  	// result: (MOVBQZX x)
 17263  	for {
 17264  		x := v.Args[0]
 17265  		v.reset(OpAMD64MOVBQZX)
 17266  		v.AddArg(x)
 17267  		return true
 17268  	}
 17269  }
 17270  func rewriteValueAMD64_OpZeroExt8to32(v *Value, config *Config) bool {
 17271  	b := v.Block
 17272  	_ = b
 17273  	// match: (ZeroExt8to32  x)
 17274  	// cond:
 17275  	// result: (MOVBQZX x)
 17276  	for {
 17277  		x := v.Args[0]
 17278  		v.reset(OpAMD64MOVBQZX)
 17279  		v.AddArg(x)
 17280  		return true
 17281  	}
 17282  }
 17283  func rewriteValueAMD64_OpZeroExt8to64(v *Value, config *Config) bool {
 17284  	b := v.Block
 17285  	_ = b
 17286  	// match: (ZeroExt8to64  x)
 17287  	// cond:
 17288  	// result: (MOVBQZX x)
 17289  	for {
 17290  		x := v.Args[0]
 17291  		v.reset(OpAMD64MOVBQZX)
 17292  		v.AddArg(x)
 17293  		return true
 17294  	}
 17295  }
 17296  func rewriteBlockAMD64(b *Block) bool {
 17297  	switch b.Kind {
 17298  	case BlockAMD64EQ:
 17299  		// match: (EQ (InvertFlags cmp) yes no)
 17300  		// cond:
 17301  		// result: (EQ cmp yes no)
 17302  		for {
 17303  			v := b.Control
 17304  			if v.Op != OpAMD64InvertFlags {
 17305  				break
 17306  			}
 17307  			cmp := v.Args[0]
 17308  			yes := b.Succs[0]
 17309  			no := b.Succs[1]
 17310  			b.Kind = BlockAMD64EQ
 17311  			b.SetControl(cmp)
 17312  			_ = yes
 17313  			_ = no
 17314  			return true
 17315  		}
 17316  		// match: (EQ (FlagEQ) yes no)
 17317  		// cond:
 17318  		// result: (First nil yes no)
 17319  		for {
 17320  			v := b.Control
 17321  			if v.Op != OpAMD64FlagEQ {
 17322  				break
 17323  			}
 17324  			yes := b.Succs[0]
 17325  			no := b.Succs[1]
 17326  			b.Kind = BlockFirst
 17327  			b.SetControl(nil)
 17328  			_ = yes
 17329  			_ = no
 17330  			return true
 17331  		}
 17332  		// match: (EQ (FlagLT_ULT) yes no)
 17333  		// cond:
 17334  		// result: (First nil no yes)
 17335  		for {
 17336  			v := b.Control
 17337  			if v.Op != OpAMD64FlagLT_ULT {
 17338  				break
 17339  			}
 17340  			yes := b.Succs[0]
 17341  			no := b.Succs[1]
 17342  			b.Kind = BlockFirst
 17343  			b.SetControl(nil)
 17344  			b.swapSuccessors()
 17345  			_ = no
 17346  			_ = yes
 17347  			return true
 17348  		}
 17349  		// match: (EQ (FlagLT_UGT) yes no)
 17350  		// cond:
 17351  		// result: (First nil no yes)
 17352  		for {
 17353  			v := b.Control
 17354  			if v.Op != OpAMD64FlagLT_UGT {
 17355  				break
 17356  			}
 17357  			yes := b.Succs[0]
 17358  			no := b.Succs[1]
 17359  			b.Kind = BlockFirst
 17360  			b.SetControl(nil)
 17361  			b.swapSuccessors()
 17362  			_ = no
 17363  			_ = yes
 17364  			return true
 17365  		}
 17366  		// match: (EQ (FlagGT_ULT) yes no)
 17367  		// cond:
 17368  		// result: (First nil no yes)
 17369  		for {
 17370  			v := b.Control
 17371  			if v.Op != OpAMD64FlagGT_ULT {
 17372  				break
 17373  			}
 17374  			yes := b.Succs[0]
 17375  			no := b.Succs[1]
 17376  			b.Kind = BlockFirst
 17377  			b.SetControl(nil)
 17378  			b.swapSuccessors()
 17379  			_ = no
 17380  			_ = yes
 17381  			return true
 17382  		}
 17383  		// match: (EQ (FlagGT_UGT) yes no)
 17384  		// cond:
 17385  		// result: (First nil no yes)
 17386  		for {
 17387  			v := b.Control
 17388  			if v.Op != OpAMD64FlagGT_UGT {
 17389  				break
 17390  			}
 17391  			yes := b.Succs[0]
 17392  			no := b.Succs[1]
 17393  			b.Kind = BlockFirst
 17394  			b.SetControl(nil)
 17395  			b.swapSuccessors()
 17396  			_ = no
 17397  			_ = yes
 17398  			return true
 17399  		}
 17400  	case BlockAMD64GE:
 17401  		// match: (GE (InvertFlags cmp) yes no)
 17402  		// cond:
 17403  		// result: (LE cmp yes no)
 17404  		for {
 17405  			v := b.Control
 17406  			if v.Op != OpAMD64InvertFlags {
 17407  				break
 17408  			}
 17409  			cmp := v.Args[0]
 17410  			yes := b.Succs[0]
 17411  			no := b.Succs[1]
 17412  			b.Kind = BlockAMD64LE
 17413  			b.SetControl(cmp)
 17414  			_ = yes
 17415  			_ = no
 17416  			return true
 17417  		}
 17418  		// match: (GE (FlagEQ) yes no)
 17419  		// cond:
 17420  		// result: (First nil yes no)
 17421  		for {
 17422  			v := b.Control
 17423  			if v.Op != OpAMD64FlagEQ {
 17424  				break
 17425  			}
 17426  			yes := b.Succs[0]
 17427  			no := b.Succs[1]
 17428  			b.Kind = BlockFirst
 17429  			b.SetControl(nil)
 17430  			_ = yes
 17431  			_ = no
 17432  			return true
 17433  		}
 17434  		// match: (GE (FlagLT_ULT) yes no)
 17435  		// cond:
 17436  		// result: (First nil no yes)
 17437  		for {
 17438  			v := b.Control
 17439  			if v.Op != OpAMD64FlagLT_ULT {
 17440  				break
 17441  			}
 17442  			yes := b.Succs[0]
 17443  			no := b.Succs[1]
 17444  			b.Kind = BlockFirst
 17445  			b.SetControl(nil)
 17446  			b.swapSuccessors()
 17447  			_ = no
 17448  			_ = yes
 17449  			return true
 17450  		}
 17451  		// match: (GE (FlagLT_UGT) yes no)
 17452  		// cond:
 17453  		// result: (First nil no yes)
 17454  		for {
 17455  			v := b.Control
 17456  			if v.Op != OpAMD64FlagLT_UGT {
 17457  				break
 17458  			}
 17459  			yes := b.Succs[0]
 17460  			no := b.Succs[1]
 17461  			b.Kind = BlockFirst
 17462  			b.SetControl(nil)
 17463  			b.swapSuccessors()
 17464  			_ = no
 17465  			_ = yes
 17466  			return true
 17467  		}
 17468  		// match: (GE (FlagGT_ULT) yes no)
 17469  		// cond:
 17470  		// result: (First nil yes no)
 17471  		for {
 17472  			v := b.Control
 17473  			if v.Op != OpAMD64FlagGT_ULT {
 17474  				break
 17475  			}
 17476  			yes := b.Succs[0]
 17477  			no := b.Succs[1]
 17478  			b.Kind = BlockFirst
 17479  			b.SetControl(nil)
 17480  			_ = yes
 17481  			_ = no
 17482  			return true
 17483  		}
 17484  		// match: (GE (FlagGT_UGT) yes no)
 17485  		// cond:
 17486  		// result: (First nil yes no)
 17487  		for {
 17488  			v := b.Control
 17489  			if v.Op != OpAMD64FlagGT_UGT {
 17490  				break
 17491  			}
 17492  			yes := b.Succs[0]
 17493  			no := b.Succs[1]
 17494  			b.Kind = BlockFirst
 17495  			b.SetControl(nil)
 17496  			_ = yes
 17497  			_ = no
 17498  			return true
 17499  		}
 17500  	case BlockAMD64GT:
 17501  		// match: (GT (InvertFlags cmp) yes no)
 17502  		// cond:
 17503  		// result: (LT cmp yes no)
 17504  		for {
 17505  			v := b.Control
 17506  			if v.Op != OpAMD64InvertFlags {
 17507  				break
 17508  			}
 17509  			cmp := v.Args[0]
 17510  			yes := b.Succs[0]
 17511  			no := b.Succs[1]
 17512  			b.Kind = BlockAMD64LT
 17513  			b.SetControl(cmp)
 17514  			_ = yes
 17515  			_ = no
 17516  			return true
 17517  		}
 17518  		// match: (GT (FlagEQ) yes no)
 17519  		// cond:
 17520  		// result: (First nil no yes)
 17521  		for {
 17522  			v := b.Control
 17523  			if v.Op != OpAMD64FlagEQ {
 17524  				break
 17525  			}
 17526  			yes := b.Succs[0]
 17527  			no := b.Succs[1]
 17528  			b.Kind = BlockFirst
 17529  			b.SetControl(nil)
 17530  			b.swapSuccessors()
 17531  			_ = no
 17532  			_ = yes
 17533  			return true
 17534  		}
 17535  		// match: (GT (FlagLT_ULT) yes no)
 17536  		// cond:
 17537  		// result: (First nil no yes)
 17538  		for {
 17539  			v := b.Control
 17540  			if v.Op != OpAMD64FlagLT_ULT {
 17541  				break
 17542  			}
 17543  			yes := b.Succs[0]
 17544  			no := b.Succs[1]
 17545  			b.Kind = BlockFirst
 17546  			b.SetControl(nil)
 17547  			b.swapSuccessors()
 17548  			_ = no
 17549  			_ = yes
 17550  			return true
 17551  		}
 17552  		// match: (GT (FlagLT_UGT) yes no)
 17553  		// cond:
 17554  		// result: (First nil no yes)
 17555  		for {
 17556  			v := b.Control
 17557  			if v.Op != OpAMD64FlagLT_UGT {
 17558  				break
 17559  			}
 17560  			yes := b.Succs[0]
 17561  			no := b.Succs[1]
 17562  			b.Kind = BlockFirst
 17563  			b.SetControl(nil)
 17564  			b.swapSuccessors()
 17565  			_ = no
 17566  			_ = yes
 17567  			return true
 17568  		}
 17569  		// match: (GT (FlagGT_ULT) yes no)
 17570  		// cond:
 17571  		// result: (First nil yes no)
 17572  		for {
 17573  			v := b.Control
 17574  			if v.Op != OpAMD64FlagGT_ULT {
 17575  				break
 17576  			}
 17577  			yes := b.Succs[0]
 17578  			no := b.Succs[1]
 17579  			b.Kind = BlockFirst
 17580  			b.SetControl(nil)
 17581  			_ = yes
 17582  			_ = no
 17583  			return true
 17584  		}
 17585  		// match: (GT (FlagGT_UGT) yes no)
 17586  		// cond:
 17587  		// result: (First nil yes no)
 17588  		for {
 17589  			v := b.Control
 17590  			if v.Op != OpAMD64FlagGT_UGT {
 17591  				break
 17592  			}
 17593  			yes := b.Succs[0]
 17594  			no := b.Succs[1]
 17595  			b.Kind = BlockFirst
 17596  			b.SetControl(nil)
 17597  			_ = yes
 17598  			_ = no
 17599  			return true
 17600  		}
 17601  	case BlockIf:
 17602  		// match: (If (SETL  cmp) yes no)
 17603  		// cond:
 17604  		// result: (LT  cmp yes no)
 17605  		for {
 17606  			v := b.Control
 17607  			if v.Op != OpAMD64SETL {
 17608  				break
 17609  			}
 17610  			cmp := v.Args[0]
 17611  			yes := b.Succs[0]
 17612  			no := b.Succs[1]
 17613  			b.Kind = BlockAMD64LT
 17614  			b.SetControl(cmp)
 17615  			_ = yes
 17616  			_ = no
 17617  			return true
 17618  		}
 17619  		// match: (If (SETLE cmp) yes no)
 17620  		// cond:
 17621  		// result: (LE  cmp yes no)
 17622  		for {
 17623  			v := b.Control
 17624  			if v.Op != OpAMD64SETLE {
 17625  				break
 17626  			}
 17627  			cmp := v.Args[0]
 17628  			yes := b.Succs[0]
 17629  			no := b.Succs[1]
 17630  			b.Kind = BlockAMD64LE
 17631  			b.SetControl(cmp)
 17632  			_ = yes
 17633  			_ = no
 17634  			return true
 17635  		}
 17636  		// match: (If (SETG  cmp) yes no)
 17637  		// cond:
 17638  		// result: (GT  cmp yes no)
 17639  		for {
 17640  			v := b.Control
 17641  			if v.Op != OpAMD64SETG {
 17642  				break
 17643  			}
 17644  			cmp := v.Args[0]
 17645  			yes := b.Succs[0]
 17646  			no := b.Succs[1]
 17647  			b.Kind = BlockAMD64GT
 17648  			b.SetControl(cmp)
 17649  			_ = yes
 17650  			_ = no
 17651  			return true
 17652  		}
 17653  		// match: (If (SETGE cmp) yes no)
 17654  		// cond:
 17655  		// result: (GE  cmp yes no)
 17656  		for {
 17657  			v := b.Control
 17658  			if v.Op != OpAMD64SETGE {
 17659  				break
 17660  			}
 17661  			cmp := v.Args[0]
 17662  			yes := b.Succs[0]
 17663  			no := b.Succs[1]
 17664  			b.Kind = BlockAMD64GE
 17665  			b.SetControl(cmp)
 17666  			_ = yes
 17667  			_ = no
 17668  			return true
 17669  		}
 17670  		// match: (If (SETEQ cmp) yes no)
 17671  		// cond:
 17672  		// result: (EQ  cmp yes no)
 17673  		for {
 17674  			v := b.Control
 17675  			if v.Op != OpAMD64SETEQ {
 17676  				break
 17677  			}
 17678  			cmp := v.Args[0]
 17679  			yes := b.Succs[0]
 17680  			no := b.Succs[1]
 17681  			b.Kind = BlockAMD64EQ
 17682  			b.SetControl(cmp)
 17683  			_ = yes
 17684  			_ = no
 17685  			return true
 17686  		}
 17687  		// match: (If (SETNE cmp) yes no)
 17688  		// cond:
 17689  		// result: (NE  cmp yes no)
 17690  		for {
 17691  			v := b.Control
 17692  			if v.Op != OpAMD64SETNE {
 17693  				break
 17694  			}
 17695  			cmp := v.Args[0]
 17696  			yes := b.Succs[0]
 17697  			no := b.Succs[1]
 17698  			b.Kind = BlockAMD64NE
 17699  			b.SetControl(cmp)
 17700  			_ = yes
 17701  			_ = no
 17702  			return true
 17703  		}
 17704  		// match: (If (SETB  cmp) yes no)
 17705  		// cond:
 17706  		// result: (ULT cmp yes no)
 17707  		for {
 17708  			v := b.Control
 17709  			if v.Op != OpAMD64SETB {
 17710  				break
 17711  			}
 17712  			cmp := v.Args[0]
 17713  			yes := b.Succs[0]
 17714  			no := b.Succs[1]
 17715  			b.Kind = BlockAMD64ULT
 17716  			b.SetControl(cmp)
 17717  			_ = yes
 17718  			_ = no
 17719  			return true
 17720  		}
 17721  		// match: (If (SETBE cmp) yes no)
 17722  		// cond:
 17723  		// result: (ULE cmp yes no)
 17724  		for {
 17725  			v := b.Control
 17726  			if v.Op != OpAMD64SETBE {
 17727  				break
 17728  			}
 17729  			cmp := v.Args[0]
 17730  			yes := b.Succs[0]
 17731  			no := b.Succs[1]
 17732  			b.Kind = BlockAMD64ULE
 17733  			b.SetControl(cmp)
 17734  			_ = yes
 17735  			_ = no
 17736  			return true
 17737  		}
 17738  		// match: (If (SETA  cmp) yes no)
 17739  		// cond:
 17740  		// result: (UGT cmp yes no)
 17741  		for {
 17742  			v := b.Control
 17743  			if v.Op != OpAMD64SETA {
 17744  				break
 17745  			}
 17746  			cmp := v.Args[0]
 17747  			yes := b.Succs[0]
 17748  			no := b.Succs[1]
 17749  			b.Kind = BlockAMD64UGT
 17750  			b.SetControl(cmp)
 17751  			_ = yes
 17752  			_ = no
 17753  			return true
 17754  		}
 17755  		// match: (If (SETAE cmp) yes no)
 17756  		// cond:
 17757  		// result: (UGE cmp yes no)
 17758  		for {
 17759  			v := b.Control
 17760  			if v.Op != OpAMD64SETAE {
 17761  				break
 17762  			}
 17763  			cmp := v.Args[0]
 17764  			yes := b.Succs[0]
 17765  			no := b.Succs[1]
 17766  			b.Kind = BlockAMD64UGE
 17767  			b.SetControl(cmp)
 17768  			_ = yes
 17769  			_ = no
 17770  			return true
 17771  		}
 17772  		// match: (If (SETGF  cmp) yes no)
 17773  		// cond:
 17774  		// result: (UGT  cmp yes no)
 17775  		for {
 17776  			v := b.Control
 17777  			if v.Op != OpAMD64SETGF {
 17778  				break
 17779  			}
 17780  			cmp := v.Args[0]
 17781  			yes := b.Succs[0]
 17782  			no := b.Succs[1]
 17783  			b.Kind = BlockAMD64UGT
 17784  			b.SetControl(cmp)
 17785  			_ = yes
 17786  			_ = no
 17787  			return true
 17788  		}
 17789  		// match: (If (SETGEF cmp) yes no)
 17790  		// cond:
 17791  		// result: (UGE  cmp yes no)
 17792  		for {
 17793  			v := b.Control
 17794  			if v.Op != OpAMD64SETGEF {
 17795  				break
 17796  			}
 17797  			cmp := v.Args[0]
 17798  			yes := b.Succs[0]
 17799  			no := b.Succs[1]
 17800  			b.Kind = BlockAMD64UGE
 17801  			b.SetControl(cmp)
 17802  			_ = yes
 17803  			_ = no
 17804  			return true
 17805  		}
 17806  		// match: (If (SETEQF cmp) yes no)
 17807  		// cond:
 17808  		// result: (EQF  cmp yes no)
 17809  		for {
 17810  			v := b.Control
 17811  			if v.Op != OpAMD64SETEQF {
 17812  				break
 17813  			}
 17814  			cmp := v.Args[0]
 17815  			yes := b.Succs[0]
 17816  			no := b.Succs[1]
 17817  			b.Kind = BlockAMD64EQF
 17818  			b.SetControl(cmp)
 17819  			_ = yes
 17820  			_ = no
 17821  			return true
 17822  		}
 17823  		// match: (If (SETNEF cmp) yes no)
 17824  		// cond:
 17825  		// result: (NEF  cmp yes no)
 17826  		for {
 17827  			v := b.Control
 17828  			if v.Op != OpAMD64SETNEF {
 17829  				break
 17830  			}
 17831  			cmp := v.Args[0]
 17832  			yes := b.Succs[0]
 17833  			no := b.Succs[1]
 17834  			b.Kind = BlockAMD64NEF
 17835  			b.SetControl(cmp)
 17836  			_ = yes
 17837  			_ = no
 17838  			return true
 17839  		}
 17840  		// match: (If cond yes no)
 17841  		// cond:
 17842  		// result: (NE (TESTB cond cond) yes no)
 17843  		for {
 17844  			v := b.Control
 17845  			cond := b.Control
 17846  			yes := b.Succs[0]
 17847  			no := b.Succs[1]
 17848  			b.Kind = BlockAMD64NE
 17849  			v0 := b.NewValue0(v.Line, OpAMD64TESTB, TypeFlags)
 17850  			v0.AddArg(cond)
 17851  			v0.AddArg(cond)
 17852  			b.SetControl(v0)
 17853  			_ = yes
 17854  			_ = no
 17855  			return true
 17856  		}
 17857  	case BlockAMD64LE:
 17858  		// match: (LE (InvertFlags cmp) yes no)
 17859  		// cond:
 17860  		// result: (GE cmp yes no)
 17861  		for {
 17862  			v := b.Control
 17863  			if v.Op != OpAMD64InvertFlags {
 17864  				break
 17865  			}
 17866  			cmp := v.Args[0]
 17867  			yes := b.Succs[0]
 17868  			no := b.Succs[1]
 17869  			b.Kind = BlockAMD64GE
 17870  			b.SetControl(cmp)
 17871  			_ = yes
 17872  			_ = no
 17873  			return true
 17874  		}
 17875  		// match: (LE (FlagEQ) yes no)
 17876  		// cond:
 17877  		// result: (First nil yes no)
 17878  		for {
 17879  			v := b.Control
 17880  			if v.Op != OpAMD64FlagEQ {
 17881  				break
 17882  			}
 17883  			yes := b.Succs[0]
 17884  			no := b.Succs[1]
 17885  			b.Kind = BlockFirst
 17886  			b.SetControl(nil)
 17887  			_ = yes
 17888  			_ = no
 17889  			return true
 17890  		}
 17891  		// match: (LE (FlagLT_ULT) yes no)
 17892  		// cond:
 17893  		// result: (First nil yes no)
 17894  		for {
 17895  			v := b.Control
 17896  			if v.Op != OpAMD64FlagLT_ULT {
 17897  				break
 17898  			}
 17899  			yes := b.Succs[0]
 17900  			no := b.Succs[1]
 17901  			b.Kind = BlockFirst
 17902  			b.SetControl(nil)
 17903  			_ = yes
 17904  			_ = no
 17905  			return true
 17906  		}
 17907  		// match: (LE (FlagLT_UGT) yes no)
 17908  		// cond:
 17909  		// result: (First nil yes no)
 17910  		for {
 17911  			v := b.Control
 17912  			if v.Op != OpAMD64FlagLT_UGT {
 17913  				break
 17914  			}
 17915  			yes := b.Succs[0]
 17916  			no := b.Succs[1]
 17917  			b.Kind = BlockFirst
 17918  			b.SetControl(nil)
 17919  			_ = yes
 17920  			_ = no
 17921  			return true
 17922  		}
 17923  		// match: (LE (FlagGT_ULT) yes no)
 17924  		// cond:
 17925  		// result: (First nil no yes)
 17926  		for {
 17927  			v := b.Control
 17928  			if v.Op != OpAMD64FlagGT_ULT {
 17929  				break
 17930  			}
 17931  			yes := b.Succs[0]
 17932  			no := b.Succs[1]
 17933  			b.Kind = BlockFirst
 17934  			b.SetControl(nil)
 17935  			b.swapSuccessors()
 17936  			_ = no
 17937  			_ = yes
 17938  			return true
 17939  		}
 17940  		// match: (LE (FlagGT_UGT) yes no)
 17941  		// cond:
 17942  		// result: (First nil no yes)
 17943  		for {
 17944  			v := b.Control
 17945  			if v.Op != OpAMD64FlagGT_UGT {
 17946  				break
 17947  			}
 17948  			yes := b.Succs[0]
 17949  			no := b.Succs[1]
 17950  			b.Kind = BlockFirst
 17951  			b.SetControl(nil)
 17952  			b.swapSuccessors()
 17953  			_ = no
 17954  			_ = yes
 17955  			return true
 17956  		}
 17957  	case BlockAMD64LT:
 17958  		// match: (LT (InvertFlags cmp) yes no)
 17959  		// cond:
 17960  		// result: (GT cmp yes no)
 17961  		for {
 17962  			v := b.Control
 17963  			if v.Op != OpAMD64InvertFlags {
 17964  				break
 17965  			}
 17966  			cmp := v.Args[0]
 17967  			yes := b.Succs[0]
 17968  			no := b.Succs[1]
 17969  			b.Kind = BlockAMD64GT
 17970  			b.SetControl(cmp)
 17971  			_ = yes
 17972  			_ = no
 17973  			return true
 17974  		}
 17975  		// match: (LT (FlagEQ) yes no)
 17976  		// cond:
 17977  		// result: (First nil no yes)
 17978  		for {
 17979  			v := b.Control
 17980  			if v.Op != OpAMD64FlagEQ {
 17981  				break
 17982  			}
 17983  			yes := b.Succs[0]
 17984  			no := b.Succs[1]
 17985  			b.Kind = BlockFirst
 17986  			b.SetControl(nil)
 17987  			b.swapSuccessors()
 17988  			_ = no
 17989  			_ = yes
 17990  			return true
 17991  		}
 17992  		// match: (LT (FlagLT_ULT) yes no)
 17993  		// cond:
 17994  		// result: (First nil yes no)
 17995  		for {
 17996  			v := b.Control
 17997  			if v.Op != OpAMD64FlagLT_ULT {
 17998  				break
 17999  			}
 18000  			yes := b.Succs[0]
 18001  			no := b.Succs[1]
 18002  			b.Kind = BlockFirst
 18003  			b.SetControl(nil)
 18004  			_ = yes
 18005  			_ = no
 18006  			return true
 18007  		}
 18008  		// match: (LT (FlagLT_UGT) yes no)
 18009  		// cond:
 18010  		// result: (First nil yes no)
 18011  		for {
 18012  			v := b.Control
 18013  			if v.Op != OpAMD64FlagLT_UGT {
 18014  				break
 18015  			}
 18016  			yes := b.Succs[0]
 18017  			no := b.Succs[1]
 18018  			b.Kind = BlockFirst
 18019  			b.SetControl(nil)
 18020  			_ = yes
 18021  			_ = no
 18022  			return true
 18023  		}
 18024  		// match: (LT (FlagGT_ULT) yes no)
 18025  		// cond:
 18026  		// result: (First nil no yes)
 18027  		for {
 18028  			v := b.Control
 18029  			if v.Op != OpAMD64FlagGT_ULT {
 18030  				break
 18031  			}
 18032  			yes := b.Succs[0]
 18033  			no := b.Succs[1]
 18034  			b.Kind = BlockFirst
 18035  			b.SetControl(nil)
 18036  			b.swapSuccessors()
 18037  			_ = no
 18038  			_ = yes
 18039  			return true
 18040  		}
 18041  		// match: (LT (FlagGT_UGT) yes no)
 18042  		// cond:
 18043  		// result: (First nil no yes)
 18044  		for {
 18045  			v := b.Control
 18046  			if v.Op != OpAMD64FlagGT_UGT {
 18047  				break
 18048  			}
 18049  			yes := b.Succs[0]
 18050  			no := b.Succs[1]
 18051  			b.Kind = BlockFirst
 18052  			b.SetControl(nil)
 18053  			b.swapSuccessors()
 18054  			_ = no
 18055  			_ = yes
 18056  			return true
 18057  		}
 18058  	case BlockAMD64NE:
 18059  		// match: (NE (TESTB (SETL  cmp) (SETL  cmp)) yes no)
 18060  		// cond:
 18061  		// result: (LT  cmp yes no)
 18062  		for {
 18063  			v := b.Control
 18064  			if v.Op != OpAMD64TESTB {
 18065  				break
 18066  			}
 18067  			v_0 := v.Args[0]
 18068  			if v_0.Op != OpAMD64SETL {
 18069  				break
 18070  			}
 18071  			cmp := v_0.Args[0]
 18072  			v_1 := v.Args[1]
 18073  			if v_1.Op != OpAMD64SETL {
 18074  				break
 18075  			}
 18076  			if cmp != v_1.Args[0] {
 18077  				break
 18078  			}
 18079  			yes := b.Succs[0]
 18080  			no := b.Succs[1]
 18081  			b.Kind = BlockAMD64LT
 18082  			b.SetControl(cmp)
 18083  			_ = yes
 18084  			_ = no
 18085  			return true
 18086  		}
 18087  		// match: (NE (TESTB (SETLE cmp) (SETLE cmp)) yes no)
 18088  		// cond:
 18089  		// result: (LE  cmp yes no)
 18090  		for {
 18091  			v := b.Control
 18092  			if v.Op != OpAMD64TESTB {
 18093  				break
 18094  			}
 18095  			v_0 := v.Args[0]
 18096  			if v_0.Op != OpAMD64SETLE {
 18097  				break
 18098  			}
 18099  			cmp := v_0.Args[0]
 18100  			v_1 := v.Args[1]
 18101  			if v_1.Op != OpAMD64SETLE {
 18102  				break
 18103  			}
 18104  			if cmp != v_1.Args[0] {
 18105  				break
 18106  			}
 18107  			yes := b.Succs[0]
 18108  			no := b.Succs[1]
 18109  			b.Kind = BlockAMD64LE
 18110  			b.SetControl(cmp)
 18111  			_ = yes
 18112  			_ = no
 18113  			return true
 18114  		}
 18115  		// match: (NE (TESTB (SETG  cmp) (SETG  cmp)) yes no)
 18116  		// cond:
 18117  		// result: (GT  cmp yes no)
 18118  		for {
 18119  			v := b.Control
 18120  			if v.Op != OpAMD64TESTB {
 18121  				break
 18122  			}
 18123  			v_0 := v.Args[0]
 18124  			if v_0.Op != OpAMD64SETG {
 18125  				break
 18126  			}
 18127  			cmp := v_0.Args[0]
 18128  			v_1 := v.Args[1]
 18129  			if v_1.Op != OpAMD64SETG {
 18130  				break
 18131  			}
 18132  			if cmp != v_1.Args[0] {
 18133  				break
 18134  			}
 18135  			yes := b.Succs[0]
 18136  			no := b.Succs[1]
 18137  			b.Kind = BlockAMD64GT
 18138  			b.SetControl(cmp)
 18139  			_ = yes
 18140  			_ = no
 18141  			return true
 18142  		}
 18143  		// match: (NE (TESTB (SETGE cmp) (SETGE cmp)) yes no)
 18144  		// cond:
 18145  		// result: (GE  cmp yes no)
 18146  		for {
 18147  			v := b.Control
 18148  			if v.Op != OpAMD64TESTB {
 18149  				break
 18150  			}
 18151  			v_0 := v.Args[0]
 18152  			if v_0.Op != OpAMD64SETGE {
 18153  				break
 18154  			}
 18155  			cmp := v_0.Args[0]
 18156  			v_1 := v.Args[1]
 18157  			if v_1.Op != OpAMD64SETGE {
 18158  				break
 18159  			}
 18160  			if cmp != v_1.Args[0] {
 18161  				break
 18162  			}
 18163  			yes := b.Succs[0]
 18164  			no := b.Succs[1]
 18165  			b.Kind = BlockAMD64GE
 18166  			b.SetControl(cmp)
 18167  			_ = yes
 18168  			_ = no
 18169  			return true
 18170  		}
 18171  		// match: (NE (TESTB (SETEQ cmp) (SETEQ cmp)) yes no)
 18172  		// cond:
 18173  		// result: (EQ  cmp yes no)
 18174  		for {
 18175  			v := b.Control
 18176  			if v.Op != OpAMD64TESTB {
 18177  				break
 18178  			}
 18179  			v_0 := v.Args[0]
 18180  			if v_0.Op != OpAMD64SETEQ {
 18181  				break
 18182  			}
 18183  			cmp := v_0.Args[0]
 18184  			v_1 := v.Args[1]
 18185  			if v_1.Op != OpAMD64SETEQ {
 18186  				break
 18187  			}
 18188  			if cmp != v_1.Args[0] {
 18189  				break
 18190  			}
 18191  			yes := b.Succs[0]
 18192  			no := b.Succs[1]
 18193  			b.Kind = BlockAMD64EQ
 18194  			b.SetControl(cmp)
 18195  			_ = yes
 18196  			_ = no
 18197  			return true
 18198  		}
 18199  		// match: (NE (TESTB (SETNE cmp) (SETNE cmp)) yes no)
 18200  		// cond:
 18201  		// result: (NE  cmp yes no)
 18202  		for {
 18203  			v := b.Control
 18204  			if v.Op != OpAMD64TESTB {
 18205  				break
 18206  			}
 18207  			v_0 := v.Args[0]
 18208  			if v_0.Op != OpAMD64SETNE {
 18209  				break
 18210  			}
 18211  			cmp := v_0.Args[0]
 18212  			v_1 := v.Args[1]
 18213  			if v_1.Op != OpAMD64SETNE {
 18214  				break
 18215  			}
 18216  			if cmp != v_1.Args[0] {
 18217  				break
 18218  			}
 18219  			yes := b.Succs[0]
 18220  			no := b.Succs[1]
 18221  			b.Kind = BlockAMD64NE
 18222  			b.SetControl(cmp)
 18223  			_ = yes
 18224  			_ = no
 18225  			return true
 18226  		}
 18227  		// match: (NE (TESTB (SETB  cmp) (SETB  cmp)) yes no)
 18228  		// cond:
 18229  		// result: (ULT cmp yes no)
 18230  		for {
 18231  			v := b.Control
 18232  			if v.Op != OpAMD64TESTB {
 18233  				break
 18234  			}
 18235  			v_0 := v.Args[0]
 18236  			if v_0.Op != OpAMD64SETB {
 18237  				break
 18238  			}
 18239  			cmp := v_0.Args[0]
 18240  			v_1 := v.Args[1]
 18241  			if v_1.Op != OpAMD64SETB {
 18242  				break
 18243  			}
 18244  			if cmp != v_1.Args[0] {
 18245  				break
 18246  			}
 18247  			yes := b.Succs[0]
 18248  			no := b.Succs[1]
 18249  			b.Kind = BlockAMD64ULT
 18250  			b.SetControl(cmp)
 18251  			_ = yes
 18252  			_ = no
 18253  			return true
 18254  		}
 18255  		// match: (NE (TESTB (SETBE cmp) (SETBE cmp)) yes no)
 18256  		// cond:
 18257  		// result: (ULE cmp yes no)
 18258  		for {
 18259  			v := b.Control
 18260  			if v.Op != OpAMD64TESTB {
 18261  				break
 18262  			}
 18263  			v_0 := v.Args[0]
 18264  			if v_0.Op != OpAMD64SETBE {
 18265  				break
 18266  			}
 18267  			cmp := v_0.Args[0]
 18268  			v_1 := v.Args[1]
 18269  			if v_1.Op != OpAMD64SETBE {
 18270  				break
 18271  			}
 18272  			if cmp != v_1.Args[0] {
 18273  				break
 18274  			}
 18275  			yes := b.Succs[0]
 18276  			no := b.Succs[1]
 18277  			b.Kind = BlockAMD64ULE
 18278  			b.SetControl(cmp)
 18279  			_ = yes
 18280  			_ = no
 18281  			return true
 18282  		}
 18283  		// match: (NE (TESTB (SETA  cmp) (SETA  cmp)) yes no)
 18284  		// cond:
 18285  		// result: (UGT cmp yes no)
 18286  		for {
 18287  			v := b.Control
 18288  			if v.Op != OpAMD64TESTB {
 18289  				break
 18290  			}
 18291  			v_0 := v.Args[0]
 18292  			if v_0.Op != OpAMD64SETA {
 18293  				break
 18294  			}
 18295  			cmp := v_0.Args[0]
 18296  			v_1 := v.Args[1]
 18297  			if v_1.Op != OpAMD64SETA {
 18298  				break
 18299  			}
 18300  			if cmp != v_1.Args[0] {
 18301  				break
 18302  			}
 18303  			yes := b.Succs[0]
 18304  			no := b.Succs[1]
 18305  			b.Kind = BlockAMD64UGT
 18306  			b.SetControl(cmp)
 18307  			_ = yes
 18308  			_ = no
 18309  			return true
 18310  		}
 18311  		// match: (NE (TESTB (SETAE cmp) (SETAE cmp)) yes no)
 18312  		// cond:
 18313  		// result: (UGE cmp yes no)
 18314  		for {
 18315  			v := b.Control
 18316  			if v.Op != OpAMD64TESTB {
 18317  				break
 18318  			}
 18319  			v_0 := v.Args[0]
 18320  			if v_0.Op != OpAMD64SETAE {
 18321  				break
 18322  			}
 18323  			cmp := v_0.Args[0]
 18324  			v_1 := v.Args[1]
 18325  			if v_1.Op != OpAMD64SETAE {
 18326  				break
 18327  			}
 18328  			if cmp != v_1.Args[0] {
 18329  				break
 18330  			}
 18331  			yes := b.Succs[0]
 18332  			no := b.Succs[1]
 18333  			b.Kind = BlockAMD64UGE
 18334  			b.SetControl(cmp)
 18335  			_ = yes
 18336  			_ = no
 18337  			return true
 18338  		}
 18339  		// match: (NE (TESTB (SETGF  cmp) (SETGF  cmp)) yes no)
 18340  		// cond:
 18341  		// result: (UGT  cmp yes no)
 18342  		for {
 18343  			v := b.Control
 18344  			if v.Op != OpAMD64TESTB {
 18345  				break
 18346  			}
 18347  			v_0 := v.Args[0]
 18348  			if v_0.Op != OpAMD64SETGF {
 18349  				break
 18350  			}
 18351  			cmp := v_0.Args[0]
 18352  			v_1 := v.Args[1]
 18353  			if v_1.Op != OpAMD64SETGF {
 18354  				break
 18355  			}
 18356  			if cmp != v_1.Args[0] {
 18357  				break
 18358  			}
 18359  			yes := b.Succs[0]
 18360  			no := b.Succs[1]
 18361  			b.Kind = BlockAMD64UGT
 18362  			b.SetControl(cmp)
 18363  			_ = yes
 18364  			_ = no
 18365  			return true
 18366  		}
 18367  		// match: (NE (TESTB (SETGEF cmp) (SETGEF cmp)) yes no)
 18368  		// cond:
 18369  		// result: (UGE  cmp yes no)
 18370  		for {
 18371  			v := b.Control
 18372  			if v.Op != OpAMD64TESTB {
 18373  				break
 18374  			}
 18375  			v_0 := v.Args[0]
 18376  			if v_0.Op != OpAMD64SETGEF {
 18377  				break
 18378  			}
 18379  			cmp := v_0.Args[0]
 18380  			v_1 := v.Args[1]
 18381  			if v_1.Op != OpAMD64SETGEF {
 18382  				break
 18383  			}
 18384  			if cmp != v_1.Args[0] {
 18385  				break
 18386  			}
 18387  			yes := b.Succs[0]
 18388  			no := b.Succs[1]
 18389  			b.Kind = BlockAMD64UGE
 18390  			b.SetControl(cmp)
 18391  			_ = yes
 18392  			_ = no
 18393  			return true
 18394  		}
 18395  		// match: (NE (TESTB (SETEQF cmp) (SETEQF cmp)) yes no)
 18396  		// cond:
 18397  		// result: (EQF  cmp yes no)
 18398  		for {
 18399  			v := b.Control
 18400  			if v.Op != OpAMD64TESTB {
 18401  				break
 18402  			}
 18403  			v_0 := v.Args[0]
 18404  			if v_0.Op != OpAMD64SETEQF {
 18405  				break
 18406  			}
 18407  			cmp := v_0.Args[0]
 18408  			v_1 := v.Args[1]
 18409  			if v_1.Op != OpAMD64SETEQF {
 18410  				break
 18411  			}
 18412  			if cmp != v_1.Args[0] {
 18413  				break
 18414  			}
 18415  			yes := b.Succs[0]
 18416  			no := b.Succs[1]
 18417  			b.Kind = BlockAMD64EQF
 18418  			b.SetControl(cmp)
 18419  			_ = yes
 18420  			_ = no
 18421  			return true
 18422  		}
 18423  		// match: (NE (TESTB (SETNEF cmp) (SETNEF cmp)) yes no)
 18424  		// cond:
 18425  		// result: (NEF  cmp yes no)
 18426  		for {
 18427  			v := b.Control
 18428  			if v.Op != OpAMD64TESTB {
 18429  				break
 18430  			}
 18431  			v_0 := v.Args[0]
 18432  			if v_0.Op != OpAMD64SETNEF {
 18433  				break
 18434  			}
 18435  			cmp := v_0.Args[0]
 18436  			v_1 := v.Args[1]
 18437  			if v_1.Op != OpAMD64SETNEF {
 18438  				break
 18439  			}
 18440  			if cmp != v_1.Args[0] {
 18441  				break
 18442  			}
 18443  			yes := b.Succs[0]
 18444  			no := b.Succs[1]
 18445  			b.Kind = BlockAMD64NEF
 18446  			b.SetControl(cmp)
 18447  			_ = yes
 18448  			_ = no
 18449  			return true
 18450  		}
 18451  		// match: (NE (InvertFlags cmp) yes no)
 18452  		// cond:
 18453  		// result: (NE cmp yes no)
 18454  		for {
 18455  			v := b.Control
 18456  			if v.Op != OpAMD64InvertFlags {
 18457  				break
 18458  			}
 18459  			cmp := v.Args[0]
 18460  			yes := b.Succs[0]
 18461  			no := b.Succs[1]
 18462  			b.Kind = BlockAMD64NE
 18463  			b.SetControl(cmp)
 18464  			_ = yes
 18465  			_ = no
 18466  			return true
 18467  		}
 18468  		// match: (NE (FlagEQ) yes no)
 18469  		// cond:
 18470  		// result: (First nil no yes)
 18471  		for {
 18472  			v := b.Control
 18473  			if v.Op != OpAMD64FlagEQ {
 18474  				break
 18475  			}
 18476  			yes := b.Succs[0]
 18477  			no := b.Succs[1]
 18478  			b.Kind = BlockFirst
 18479  			b.SetControl(nil)
 18480  			b.swapSuccessors()
 18481  			_ = no
 18482  			_ = yes
 18483  			return true
 18484  		}
 18485  		// match: (NE (FlagLT_ULT) yes no)
 18486  		// cond:
 18487  		// result: (First nil yes no)
 18488  		for {
 18489  			v := b.Control
 18490  			if v.Op != OpAMD64FlagLT_ULT {
 18491  				break
 18492  			}
 18493  			yes := b.Succs[0]
 18494  			no := b.Succs[1]
 18495  			b.Kind = BlockFirst
 18496  			b.SetControl(nil)
 18497  			_ = yes
 18498  			_ = no
 18499  			return true
 18500  		}
 18501  		// match: (NE (FlagLT_UGT) yes no)
 18502  		// cond:
 18503  		// result: (First nil yes no)
 18504  		for {
 18505  			v := b.Control
 18506  			if v.Op != OpAMD64FlagLT_UGT {
 18507  				break
 18508  			}
 18509  			yes := b.Succs[0]
 18510  			no := b.Succs[1]
 18511  			b.Kind = BlockFirst
 18512  			b.SetControl(nil)
 18513  			_ = yes
 18514  			_ = no
 18515  			return true
 18516  		}
 18517  		// match: (NE (FlagGT_ULT) yes no)
 18518  		// cond:
 18519  		// result: (First nil yes no)
 18520  		for {
 18521  			v := b.Control
 18522  			if v.Op != OpAMD64FlagGT_ULT {
 18523  				break
 18524  			}
 18525  			yes := b.Succs[0]
 18526  			no := b.Succs[1]
 18527  			b.Kind = BlockFirst
 18528  			b.SetControl(nil)
 18529  			_ = yes
 18530  			_ = no
 18531  			return true
 18532  		}
 18533  		// match: (NE (FlagGT_UGT) yes no)
 18534  		// cond:
 18535  		// result: (First nil yes no)
 18536  		for {
 18537  			v := b.Control
 18538  			if v.Op != OpAMD64FlagGT_UGT {
 18539  				break
 18540  			}
 18541  			yes := b.Succs[0]
 18542  			no := b.Succs[1]
 18543  			b.Kind = BlockFirst
 18544  			b.SetControl(nil)
 18545  			_ = yes
 18546  			_ = no
 18547  			return true
 18548  		}
 18549  	case BlockAMD64UGE:
 18550  		// match: (UGE (InvertFlags cmp) yes no)
 18551  		// cond:
 18552  		// result: (ULE cmp yes no)
 18553  		for {
 18554  			v := b.Control
 18555  			if v.Op != OpAMD64InvertFlags {
 18556  				break
 18557  			}
 18558  			cmp := v.Args[0]
 18559  			yes := b.Succs[0]
 18560  			no := b.Succs[1]
 18561  			b.Kind = BlockAMD64ULE
 18562  			b.SetControl(cmp)
 18563  			_ = yes
 18564  			_ = no
 18565  			return true
 18566  		}
 18567  		// match: (UGE (FlagEQ) yes no)
 18568  		// cond:
 18569  		// result: (First nil yes no)
 18570  		for {
 18571  			v := b.Control
 18572  			if v.Op != OpAMD64FlagEQ {
 18573  				break
 18574  			}
 18575  			yes := b.Succs[0]
 18576  			no := b.Succs[1]
 18577  			b.Kind = BlockFirst
 18578  			b.SetControl(nil)
 18579  			_ = yes
 18580  			_ = no
 18581  			return true
 18582  		}
 18583  		// match: (UGE (FlagLT_ULT) yes no)
 18584  		// cond:
 18585  		// result: (First nil no yes)
 18586  		for {
 18587  			v := b.Control
 18588  			if v.Op != OpAMD64FlagLT_ULT {
 18589  				break
 18590  			}
 18591  			yes := b.Succs[0]
 18592  			no := b.Succs[1]
 18593  			b.Kind = BlockFirst
 18594  			b.SetControl(nil)
 18595  			b.swapSuccessors()
 18596  			_ = no
 18597  			_ = yes
 18598  			return true
 18599  		}
 18600  		// match: (UGE (FlagLT_UGT) yes no)
 18601  		// cond:
 18602  		// result: (First nil yes no)
 18603  		for {
 18604  			v := b.Control
 18605  			if v.Op != OpAMD64FlagLT_UGT {
 18606  				break
 18607  			}
 18608  			yes := b.Succs[0]
 18609  			no := b.Succs[1]
 18610  			b.Kind = BlockFirst
 18611  			b.SetControl(nil)
 18612  			_ = yes
 18613  			_ = no
 18614  			return true
 18615  		}
 18616  		// match: (UGE (FlagGT_ULT) yes no)
 18617  		// cond:
 18618  		// result: (First nil no yes)
 18619  		for {
 18620  			v := b.Control
 18621  			if v.Op != OpAMD64FlagGT_ULT {
 18622  				break
 18623  			}
 18624  			yes := b.Succs[0]
 18625  			no := b.Succs[1]
 18626  			b.Kind = BlockFirst
 18627  			b.SetControl(nil)
 18628  			b.swapSuccessors()
 18629  			_ = no
 18630  			_ = yes
 18631  			return true
 18632  		}
 18633  		// match: (UGE (FlagGT_UGT) yes no)
 18634  		// cond:
 18635  		// result: (First nil yes no)
 18636  		for {
 18637  			v := b.Control
 18638  			if v.Op != OpAMD64FlagGT_UGT {
 18639  				break
 18640  			}
 18641  			yes := b.Succs[0]
 18642  			no := b.Succs[1]
 18643  			b.Kind = BlockFirst
 18644  			b.SetControl(nil)
 18645  			_ = yes
 18646  			_ = no
 18647  			return true
 18648  		}
 18649  	case BlockAMD64UGT:
 18650  		// match: (UGT (InvertFlags cmp) yes no)
 18651  		// cond:
 18652  		// result: (ULT cmp yes no)
 18653  		for {
 18654  			v := b.Control
 18655  			if v.Op != OpAMD64InvertFlags {
 18656  				break
 18657  			}
 18658  			cmp := v.Args[0]
 18659  			yes := b.Succs[0]
 18660  			no := b.Succs[1]
 18661  			b.Kind = BlockAMD64ULT
 18662  			b.SetControl(cmp)
 18663  			_ = yes
 18664  			_ = no
 18665  			return true
 18666  		}
 18667  		// match: (UGT (FlagEQ) yes no)
 18668  		// cond:
 18669  		// result: (First nil no yes)
 18670  		for {
 18671  			v := b.Control
 18672  			if v.Op != OpAMD64FlagEQ {
 18673  				break
 18674  			}
 18675  			yes := b.Succs[0]
 18676  			no := b.Succs[1]
 18677  			b.Kind = BlockFirst
 18678  			b.SetControl(nil)
 18679  			b.swapSuccessors()
 18680  			_ = no
 18681  			_ = yes
 18682  			return true
 18683  		}
 18684  		// match: (UGT (FlagLT_ULT) yes no)
 18685  		// cond:
 18686  		// result: (First nil no yes)
 18687  		for {
 18688  			v := b.Control
 18689  			if v.Op != OpAMD64FlagLT_ULT {
 18690  				break
 18691  			}
 18692  			yes := b.Succs[0]
 18693  			no := b.Succs[1]
 18694  			b.Kind = BlockFirst
 18695  			b.SetControl(nil)
 18696  			b.swapSuccessors()
 18697  			_ = no
 18698  			_ = yes
 18699  			return true
 18700  		}
 18701  		// match: (UGT (FlagLT_UGT) yes no)
 18702  		// cond:
 18703  		// result: (First nil yes no)
 18704  		for {
 18705  			v := b.Control
 18706  			if v.Op != OpAMD64FlagLT_UGT {
 18707  				break
 18708  			}
 18709  			yes := b.Succs[0]
 18710  			no := b.Succs[1]
 18711  			b.Kind = BlockFirst
 18712  			b.SetControl(nil)
 18713  			_ = yes
 18714  			_ = no
 18715  			return true
 18716  		}
 18717  		// match: (UGT (FlagGT_ULT) yes no)
 18718  		// cond:
 18719  		// result: (First nil no yes)
 18720  		for {
 18721  			v := b.Control
 18722  			if v.Op != OpAMD64FlagGT_ULT {
 18723  				break
 18724  			}
 18725  			yes := b.Succs[0]
 18726  			no := b.Succs[1]
 18727  			b.Kind = BlockFirst
 18728  			b.SetControl(nil)
 18729  			b.swapSuccessors()
 18730  			_ = no
 18731  			_ = yes
 18732  			return true
 18733  		}
 18734  		// match: (UGT (FlagGT_UGT) yes no)
 18735  		// cond:
 18736  		// result: (First nil yes no)
 18737  		for {
 18738  			v := b.Control
 18739  			if v.Op != OpAMD64FlagGT_UGT {
 18740  				break
 18741  			}
 18742  			yes := b.Succs[0]
 18743  			no := b.Succs[1]
 18744  			b.Kind = BlockFirst
 18745  			b.SetControl(nil)
 18746  			_ = yes
 18747  			_ = no
 18748  			return true
 18749  		}
 18750  	case BlockAMD64ULE:
 18751  		// match: (ULE (InvertFlags cmp) yes no)
 18752  		// cond:
 18753  		// result: (UGE cmp yes no)
 18754  		for {
 18755  			v := b.Control
 18756  			if v.Op != OpAMD64InvertFlags {
 18757  				break
 18758  			}
 18759  			cmp := v.Args[0]
 18760  			yes := b.Succs[0]
 18761  			no := b.Succs[1]
 18762  			b.Kind = BlockAMD64UGE
 18763  			b.SetControl(cmp)
 18764  			_ = yes
 18765  			_ = no
 18766  			return true
 18767  		}
 18768  		// match: (ULE (FlagEQ) yes no)
 18769  		// cond:
 18770  		// result: (First nil yes no)
 18771  		for {
 18772  			v := b.Control
 18773  			if v.Op != OpAMD64FlagEQ {
 18774  				break
 18775  			}
 18776  			yes := b.Succs[0]
 18777  			no := b.Succs[1]
 18778  			b.Kind = BlockFirst
 18779  			b.SetControl(nil)
 18780  			_ = yes
 18781  			_ = no
 18782  			return true
 18783  		}
 18784  		// match: (ULE (FlagLT_ULT) yes no)
 18785  		// cond:
 18786  		// result: (First nil yes no)
 18787  		for {
 18788  			v := b.Control
 18789  			if v.Op != OpAMD64FlagLT_ULT {
 18790  				break
 18791  			}
 18792  			yes := b.Succs[0]
 18793  			no := b.Succs[1]
 18794  			b.Kind = BlockFirst
 18795  			b.SetControl(nil)
 18796  			_ = yes
 18797  			_ = no
 18798  			return true
 18799  		}
 18800  		// match: (ULE (FlagLT_UGT) yes no)
 18801  		// cond:
 18802  		// result: (First nil no yes)
 18803  		for {
 18804  			v := b.Control
 18805  			if v.Op != OpAMD64FlagLT_UGT {
 18806  				break
 18807  			}
 18808  			yes := b.Succs[0]
 18809  			no := b.Succs[1]
 18810  			b.Kind = BlockFirst
 18811  			b.SetControl(nil)
 18812  			b.swapSuccessors()
 18813  			_ = no
 18814  			_ = yes
 18815  			return true
 18816  		}
 18817  		// match: (ULE (FlagGT_ULT) yes no)
 18818  		// cond:
 18819  		// result: (First nil yes no)
 18820  		for {
 18821  			v := b.Control
 18822  			if v.Op != OpAMD64FlagGT_ULT {
 18823  				break
 18824  			}
 18825  			yes := b.Succs[0]
 18826  			no := b.Succs[1]
 18827  			b.Kind = BlockFirst
 18828  			b.SetControl(nil)
 18829  			_ = yes
 18830  			_ = no
 18831  			return true
 18832  		}
 18833  		// match: (ULE (FlagGT_UGT) yes no)
 18834  		// cond:
 18835  		// result: (First nil no yes)
 18836  		for {
 18837  			v := b.Control
 18838  			if v.Op != OpAMD64FlagGT_UGT {
 18839  				break
 18840  			}
 18841  			yes := b.Succs[0]
 18842  			no := b.Succs[1]
 18843  			b.Kind = BlockFirst
 18844  			b.SetControl(nil)
 18845  			b.swapSuccessors()
 18846  			_ = no
 18847  			_ = yes
 18848  			return true
 18849  		}
 18850  	case BlockAMD64ULT:
 18851  		// match: (ULT (InvertFlags cmp) yes no)
 18852  		// cond:
 18853  		// result: (UGT cmp yes no)
 18854  		for {
 18855  			v := b.Control
 18856  			if v.Op != OpAMD64InvertFlags {
 18857  				break
 18858  			}
 18859  			cmp := v.Args[0]
 18860  			yes := b.Succs[0]
 18861  			no := b.Succs[1]
 18862  			b.Kind = BlockAMD64UGT
 18863  			b.SetControl(cmp)
 18864  			_ = yes
 18865  			_ = no
 18866  			return true
 18867  		}
 18868  		// match: (ULT (FlagEQ) yes no)
 18869  		// cond:
 18870  		// result: (First nil no yes)
 18871  		for {
 18872  			v := b.Control
 18873  			if v.Op != OpAMD64FlagEQ {
 18874  				break
 18875  			}
 18876  			yes := b.Succs[0]
 18877  			no := b.Succs[1]
 18878  			b.Kind = BlockFirst
 18879  			b.SetControl(nil)
 18880  			b.swapSuccessors()
 18881  			_ = no
 18882  			_ = yes
 18883  			return true
 18884  		}
 18885  		// match: (ULT (FlagLT_ULT) yes no)
 18886  		// cond:
 18887  		// result: (First nil yes no)
 18888  		for {
 18889  			v := b.Control
 18890  			if v.Op != OpAMD64FlagLT_ULT {
 18891  				break
 18892  			}
 18893  			yes := b.Succs[0]
 18894  			no := b.Succs[1]
 18895  			b.Kind = BlockFirst
 18896  			b.SetControl(nil)
 18897  			_ = yes
 18898  			_ = no
 18899  			return true
 18900  		}
 18901  		// match: (ULT (FlagLT_UGT) yes no)
 18902  		// cond:
 18903  		// result: (First nil no yes)
 18904  		for {
 18905  			v := b.Control
 18906  			if v.Op != OpAMD64FlagLT_UGT {
 18907  				break
 18908  			}
 18909  			yes := b.Succs[0]
 18910  			no := b.Succs[1]
 18911  			b.Kind = BlockFirst
 18912  			b.SetControl(nil)
 18913  			b.swapSuccessors()
 18914  			_ = no
 18915  			_ = yes
 18916  			return true
 18917  		}
 18918  		// match: (ULT (FlagGT_ULT) yes no)
 18919  		// cond:
 18920  		// result: (First nil yes no)
 18921  		for {
 18922  			v := b.Control
 18923  			if v.Op != OpAMD64FlagGT_ULT {
 18924  				break
 18925  			}
 18926  			yes := b.Succs[0]
 18927  			no := b.Succs[1]
 18928  			b.Kind = BlockFirst
 18929  			b.SetControl(nil)
 18930  			_ = yes
 18931  			_ = no
 18932  			return true
 18933  		}
 18934  		// match: (ULT (FlagGT_UGT) yes no)
 18935  		// cond:
 18936  		// result: (First nil no yes)
 18937  		for {
 18938  			v := b.Control
 18939  			if v.Op != OpAMD64FlagGT_UGT {
 18940  				break
 18941  			}
 18942  			yes := b.Succs[0]
 18943  			no := b.Succs[1]
 18944  			b.Kind = BlockFirst
 18945  			b.SetControl(nil)
 18946  			b.swapSuccessors()
 18947  			_ = no
 18948  			_ = yes
 18949  			return true
 18950  		}
 18951  	}
 18952  	return false
 18953  }