github.com/megatontech/mynoteforgo@v0.0.0-20200507084910-5d0c6ea6e890/源码/cmd/compile/internal/ssa/rewriteAMD64.go (about) 1 // Code generated from gen/AMD64.rules; DO NOT EDIT. 2 // generated with: cd gen; go run *.go 3 4 package ssa 5 6 import "fmt" 7 import "math" 8 import "cmd/internal/obj" 9 import "cmd/internal/objabi" 10 import "cmd/compile/internal/types" 11 12 var _ = fmt.Println // in case not otherwise used 13 var _ = math.MinInt8 // in case not otherwise used 14 var _ = obj.ANOP // in case not otherwise used 15 var _ = objabi.GOROOT // in case not otherwise used 16 var _ = types.TypeMem // in case not otherwise used 17 18 func rewriteValueAMD64(v *Value) bool { 19 switch v.Op { 20 case OpAMD64ADCQ: 21 return rewriteValueAMD64_OpAMD64ADCQ_0(v) 22 case OpAMD64ADCQconst: 23 return rewriteValueAMD64_OpAMD64ADCQconst_0(v) 24 case OpAMD64ADDL: 25 return rewriteValueAMD64_OpAMD64ADDL_0(v) || rewriteValueAMD64_OpAMD64ADDL_10(v) || rewriteValueAMD64_OpAMD64ADDL_20(v) 26 case OpAMD64ADDLconst: 27 return rewriteValueAMD64_OpAMD64ADDLconst_0(v) || rewriteValueAMD64_OpAMD64ADDLconst_10(v) 28 case OpAMD64ADDLconstmodify: 29 return rewriteValueAMD64_OpAMD64ADDLconstmodify_0(v) 30 case OpAMD64ADDLload: 31 return rewriteValueAMD64_OpAMD64ADDLload_0(v) 32 case OpAMD64ADDLmodify: 33 return rewriteValueAMD64_OpAMD64ADDLmodify_0(v) 34 case OpAMD64ADDQ: 35 return rewriteValueAMD64_OpAMD64ADDQ_0(v) || rewriteValueAMD64_OpAMD64ADDQ_10(v) || rewriteValueAMD64_OpAMD64ADDQ_20(v) 36 case OpAMD64ADDQcarry: 37 return rewriteValueAMD64_OpAMD64ADDQcarry_0(v) 38 case OpAMD64ADDQconst: 39 return rewriteValueAMD64_OpAMD64ADDQconst_0(v) || rewriteValueAMD64_OpAMD64ADDQconst_10(v) 40 case OpAMD64ADDQconstmodify: 41 return rewriteValueAMD64_OpAMD64ADDQconstmodify_0(v) 42 case OpAMD64ADDQload: 43 return rewriteValueAMD64_OpAMD64ADDQload_0(v) 44 case OpAMD64ADDQmodify: 45 return rewriteValueAMD64_OpAMD64ADDQmodify_0(v) 46 case OpAMD64ADDSD: 47 return rewriteValueAMD64_OpAMD64ADDSD_0(v) 48 case OpAMD64ADDSDload: 49 return rewriteValueAMD64_OpAMD64ADDSDload_0(v) 50 case OpAMD64ADDSS: 51 return rewriteValueAMD64_OpAMD64ADDSS_0(v) 52 case OpAMD64ADDSSload: 53 return rewriteValueAMD64_OpAMD64ADDSSload_0(v) 54 case OpAMD64ANDL: 55 return rewriteValueAMD64_OpAMD64ANDL_0(v) 56 case OpAMD64ANDLconst: 57 return rewriteValueAMD64_OpAMD64ANDLconst_0(v) 58 case OpAMD64ANDLconstmodify: 59 return rewriteValueAMD64_OpAMD64ANDLconstmodify_0(v) 60 case OpAMD64ANDLload: 61 return rewriteValueAMD64_OpAMD64ANDLload_0(v) 62 case OpAMD64ANDLmodify: 63 return rewriteValueAMD64_OpAMD64ANDLmodify_0(v) 64 case OpAMD64ANDQ: 65 return rewriteValueAMD64_OpAMD64ANDQ_0(v) 66 case OpAMD64ANDQconst: 67 return rewriteValueAMD64_OpAMD64ANDQconst_0(v) 68 case OpAMD64ANDQconstmodify: 69 return rewriteValueAMD64_OpAMD64ANDQconstmodify_0(v) 70 case OpAMD64ANDQload: 71 return rewriteValueAMD64_OpAMD64ANDQload_0(v) 72 case OpAMD64ANDQmodify: 73 return rewriteValueAMD64_OpAMD64ANDQmodify_0(v) 74 case OpAMD64BSFQ: 75 return rewriteValueAMD64_OpAMD64BSFQ_0(v) 76 case OpAMD64BTCLconst: 77 return rewriteValueAMD64_OpAMD64BTCLconst_0(v) 78 case OpAMD64BTCLconstmodify: 79 return rewriteValueAMD64_OpAMD64BTCLconstmodify_0(v) 80 case OpAMD64BTCLmodify: 81 return rewriteValueAMD64_OpAMD64BTCLmodify_0(v) 82 case OpAMD64BTCQconst: 83 return rewriteValueAMD64_OpAMD64BTCQconst_0(v) 84 case OpAMD64BTCQconstmodify: 85 return rewriteValueAMD64_OpAMD64BTCQconstmodify_0(v) 86 case OpAMD64BTCQmodify: 87 return rewriteValueAMD64_OpAMD64BTCQmodify_0(v) 88 case OpAMD64BTLconst: 89 return rewriteValueAMD64_OpAMD64BTLconst_0(v) 90 case OpAMD64BTQconst: 91 return rewriteValueAMD64_OpAMD64BTQconst_0(v) 92 case OpAMD64BTRLconst: 93 return rewriteValueAMD64_OpAMD64BTRLconst_0(v) 94 case OpAMD64BTRLconstmodify: 95 return rewriteValueAMD64_OpAMD64BTRLconstmodify_0(v) 96 case OpAMD64BTRLmodify: 97 return rewriteValueAMD64_OpAMD64BTRLmodify_0(v) 98 case OpAMD64BTRQconst: 99 return rewriteValueAMD64_OpAMD64BTRQconst_0(v) 100 case OpAMD64BTRQconstmodify: 101 return rewriteValueAMD64_OpAMD64BTRQconstmodify_0(v) 102 case OpAMD64BTRQmodify: 103 return rewriteValueAMD64_OpAMD64BTRQmodify_0(v) 104 case OpAMD64BTSLconst: 105 return rewriteValueAMD64_OpAMD64BTSLconst_0(v) 106 case OpAMD64BTSLconstmodify: 107 return rewriteValueAMD64_OpAMD64BTSLconstmodify_0(v) 108 case OpAMD64BTSLmodify: 109 return rewriteValueAMD64_OpAMD64BTSLmodify_0(v) 110 case OpAMD64BTSQconst: 111 return rewriteValueAMD64_OpAMD64BTSQconst_0(v) 112 case OpAMD64BTSQconstmodify: 113 return rewriteValueAMD64_OpAMD64BTSQconstmodify_0(v) 114 case OpAMD64BTSQmodify: 115 return rewriteValueAMD64_OpAMD64BTSQmodify_0(v) 116 case OpAMD64CMOVLCC: 117 return rewriteValueAMD64_OpAMD64CMOVLCC_0(v) 118 case OpAMD64CMOVLCS: 119 return rewriteValueAMD64_OpAMD64CMOVLCS_0(v) 120 case OpAMD64CMOVLEQ: 121 return rewriteValueAMD64_OpAMD64CMOVLEQ_0(v) 122 case OpAMD64CMOVLGE: 123 return rewriteValueAMD64_OpAMD64CMOVLGE_0(v) 124 case OpAMD64CMOVLGT: 125 return rewriteValueAMD64_OpAMD64CMOVLGT_0(v) 126 case OpAMD64CMOVLHI: 127 return rewriteValueAMD64_OpAMD64CMOVLHI_0(v) 128 case OpAMD64CMOVLLE: 129 return rewriteValueAMD64_OpAMD64CMOVLLE_0(v) 130 case OpAMD64CMOVLLS: 131 return rewriteValueAMD64_OpAMD64CMOVLLS_0(v) 132 case OpAMD64CMOVLLT: 133 return rewriteValueAMD64_OpAMD64CMOVLLT_0(v) 134 case OpAMD64CMOVLNE: 135 return rewriteValueAMD64_OpAMD64CMOVLNE_0(v) 136 case OpAMD64CMOVQCC: 137 return rewriteValueAMD64_OpAMD64CMOVQCC_0(v) 138 case OpAMD64CMOVQCS: 139 return rewriteValueAMD64_OpAMD64CMOVQCS_0(v) 140 case OpAMD64CMOVQEQ: 141 return rewriteValueAMD64_OpAMD64CMOVQEQ_0(v) 142 case OpAMD64CMOVQGE: 143 return rewriteValueAMD64_OpAMD64CMOVQGE_0(v) 144 case OpAMD64CMOVQGT: 145 return rewriteValueAMD64_OpAMD64CMOVQGT_0(v) 146 case OpAMD64CMOVQHI: 147 return rewriteValueAMD64_OpAMD64CMOVQHI_0(v) 148 case OpAMD64CMOVQLE: 149 return rewriteValueAMD64_OpAMD64CMOVQLE_0(v) 150 case OpAMD64CMOVQLS: 151 return rewriteValueAMD64_OpAMD64CMOVQLS_0(v) 152 case OpAMD64CMOVQLT: 153 return rewriteValueAMD64_OpAMD64CMOVQLT_0(v) 154 case OpAMD64CMOVQNE: 155 return rewriteValueAMD64_OpAMD64CMOVQNE_0(v) 156 case OpAMD64CMOVWCC: 157 return rewriteValueAMD64_OpAMD64CMOVWCC_0(v) 158 case OpAMD64CMOVWCS: 159 return rewriteValueAMD64_OpAMD64CMOVWCS_0(v) 160 case OpAMD64CMOVWEQ: 161 return rewriteValueAMD64_OpAMD64CMOVWEQ_0(v) 162 case OpAMD64CMOVWGE: 163 return rewriteValueAMD64_OpAMD64CMOVWGE_0(v) 164 case OpAMD64CMOVWGT: 165 return rewriteValueAMD64_OpAMD64CMOVWGT_0(v) 166 case OpAMD64CMOVWHI: 167 return rewriteValueAMD64_OpAMD64CMOVWHI_0(v) 168 case OpAMD64CMOVWLE: 169 return rewriteValueAMD64_OpAMD64CMOVWLE_0(v) 170 case OpAMD64CMOVWLS: 171 return rewriteValueAMD64_OpAMD64CMOVWLS_0(v) 172 case OpAMD64CMOVWLT: 173 return rewriteValueAMD64_OpAMD64CMOVWLT_0(v) 174 case OpAMD64CMOVWNE: 175 return rewriteValueAMD64_OpAMD64CMOVWNE_0(v) 176 case OpAMD64CMPB: 177 return rewriteValueAMD64_OpAMD64CMPB_0(v) 178 case OpAMD64CMPBconst: 179 return rewriteValueAMD64_OpAMD64CMPBconst_0(v) 180 case OpAMD64CMPBconstload: 181 return rewriteValueAMD64_OpAMD64CMPBconstload_0(v) 182 case OpAMD64CMPBload: 183 return rewriteValueAMD64_OpAMD64CMPBload_0(v) 184 case OpAMD64CMPL: 185 return rewriteValueAMD64_OpAMD64CMPL_0(v) 186 case OpAMD64CMPLconst: 187 return rewriteValueAMD64_OpAMD64CMPLconst_0(v) || rewriteValueAMD64_OpAMD64CMPLconst_10(v) 188 case OpAMD64CMPLconstload: 189 return rewriteValueAMD64_OpAMD64CMPLconstload_0(v) 190 case OpAMD64CMPLload: 191 return rewriteValueAMD64_OpAMD64CMPLload_0(v) 192 case OpAMD64CMPQ: 193 return rewriteValueAMD64_OpAMD64CMPQ_0(v) 194 case OpAMD64CMPQconst: 195 return rewriteValueAMD64_OpAMD64CMPQconst_0(v) || rewriteValueAMD64_OpAMD64CMPQconst_10(v) 196 case OpAMD64CMPQconstload: 197 return rewriteValueAMD64_OpAMD64CMPQconstload_0(v) 198 case OpAMD64CMPQload: 199 return rewriteValueAMD64_OpAMD64CMPQload_0(v) 200 case OpAMD64CMPW: 201 return rewriteValueAMD64_OpAMD64CMPW_0(v) 202 case OpAMD64CMPWconst: 203 return rewriteValueAMD64_OpAMD64CMPWconst_0(v) 204 case OpAMD64CMPWconstload: 205 return rewriteValueAMD64_OpAMD64CMPWconstload_0(v) 206 case OpAMD64CMPWload: 207 return rewriteValueAMD64_OpAMD64CMPWload_0(v) 208 case OpAMD64CMPXCHGLlock: 209 return rewriteValueAMD64_OpAMD64CMPXCHGLlock_0(v) 210 case OpAMD64CMPXCHGQlock: 211 return rewriteValueAMD64_OpAMD64CMPXCHGQlock_0(v) 212 case OpAMD64DIVSD: 213 return rewriteValueAMD64_OpAMD64DIVSD_0(v) 214 case OpAMD64DIVSDload: 215 return rewriteValueAMD64_OpAMD64DIVSDload_0(v) 216 case OpAMD64DIVSS: 217 return rewriteValueAMD64_OpAMD64DIVSS_0(v) 218 case OpAMD64DIVSSload: 219 return rewriteValueAMD64_OpAMD64DIVSSload_0(v) 220 case OpAMD64HMULL: 221 return rewriteValueAMD64_OpAMD64HMULL_0(v) 222 case OpAMD64HMULLU: 223 return rewriteValueAMD64_OpAMD64HMULLU_0(v) 224 case OpAMD64HMULQ: 225 return rewriteValueAMD64_OpAMD64HMULQ_0(v) 226 case OpAMD64HMULQU: 227 return rewriteValueAMD64_OpAMD64HMULQU_0(v) 228 case OpAMD64LEAL: 229 return rewriteValueAMD64_OpAMD64LEAL_0(v) 230 case OpAMD64LEAL1: 231 return rewriteValueAMD64_OpAMD64LEAL1_0(v) 232 case OpAMD64LEAL2: 233 return rewriteValueAMD64_OpAMD64LEAL2_0(v) 234 case OpAMD64LEAL4: 235 return rewriteValueAMD64_OpAMD64LEAL4_0(v) 236 case OpAMD64LEAL8: 237 return rewriteValueAMD64_OpAMD64LEAL8_0(v) 238 case OpAMD64LEAQ: 239 return rewriteValueAMD64_OpAMD64LEAQ_0(v) 240 case OpAMD64LEAQ1: 241 return rewriteValueAMD64_OpAMD64LEAQ1_0(v) 242 case OpAMD64LEAQ2: 243 return rewriteValueAMD64_OpAMD64LEAQ2_0(v) 244 case OpAMD64LEAQ4: 245 return rewriteValueAMD64_OpAMD64LEAQ4_0(v) 246 case OpAMD64LEAQ8: 247 return rewriteValueAMD64_OpAMD64LEAQ8_0(v) 248 case OpAMD64MOVBQSX: 249 return rewriteValueAMD64_OpAMD64MOVBQSX_0(v) 250 case OpAMD64MOVBQSXload: 251 return rewriteValueAMD64_OpAMD64MOVBQSXload_0(v) 252 case OpAMD64MOVBQZX: 253 return rewriteValueAMD64_OpAMD64MOVBQZX_0(v) 254 case OpAMD64MOVBload: 255 return rewriteValueAMD64_OpAMD64MOVBload_0(v) 256 case OpAMD64MOVBloadidx1: 257 return rewriteValueAMD64_OpAMD64MOVBloadidx1_0(v) 258 case OpAMD64MOVBstore: 259 return rewriteValueAMD64_OpAMD64MOVBstore_0(v) || rewriteValueAMD64_OpAMD64MOVBstore_10(v) || rewriteValueAMD64_OpAMD64MOVBstore_20(v) || rewriteValueAMD64_OpAMD64MOVBstore_30(v) 260 case OpAMD64MOVBstoreconst: 261 return rewriteValueAMD64_OpAMD64MOVBstoreconst_0(v) 262 case OpAMD64MOVBstoreconstidx1: 263 return rewriteValueAMD64_OpAMD64MOVBstoreconstidx1_0(v) 264 case OpAMD64MOVBstoreidx1: 265 return rewriteValueAMD64_OpAMD64MOVBstoreidx1_0(v) || rewriteValueAMD64_OpAMD64MOVBstoreidx1_10(v) 266 case OpAMD64MOVLQSX: 267 return rewriteValueAMD64_OpAMD64MOVLQSX_0(v) 268 case OpAMD64MOVLQSXload: 269 return rewriteValueAMD64_OpAMD64MOVLQSXload_0(v) 270 case OpAMD64MOVLQZX: 271 return rewriteValueAMD64_OpAMD64MOVLQZX_0(v) 272 case OpAMD64MOVLatomicload: 273 return rewriteValueAMD64_OpAMD64MOVLatomicload_0(v) 274 case OpAMD64MOVLf2i: 275 return rewriteValueAMD64_OpAMD64MOVLf2i_0(v) 276 case OpAMD64MOVLi2f: 277 return rewriteValueAMD64_OpAMD64MOVLi2f_0(v) 278 case OpAMD64MOVLload: 279 return rewriteValueAMD64_OpAMD64MOVLload_0(v) || rewriteValueAMD64_OpAMD64MOVLload_10(v) 280 case OpAMD64MOVLloadidx1: 281 return rewriteValueAMD64_OpAMD64MOVLloadidx1_0(v) 282 case OpAMD64MOVLloadidx4: 283 return rewriteValueAMD64_OpAMD64MOVLloadidx4_0(v) 284 case OpAMD64MOVLloadidx8: 285 return rewriteValueAMD64_OpAMD64MOVLloadidx8_0(v) 286 case OpAMD64MOVLstore: 287 return rewriteValueAMD64_OpAMD64MOVLstore_0(v) || rewriteValueAMD64_OpAMD64MOVLstore_10(v) || rewriteValueAMD64_OpAMD64MOVLstore_20(v) || rewriteValueAMD64_OpAMD64MOVLstore_30(v) 288 case OpAMD64MOVLstoreconst: 289 return rewriteValueAMD64_OpAMD64MOVLstoreconst_0(v) 290 case OpAMD64MOVLstoreconstidx1: 291 return rewriteValueAMD64_OpAMD64MOVLstoreconstidx1_0(v) 292 case OpAMD64MOVLstoreconstidx4: 293 return rewriteValueAMD64_OpAMD64MOVLstoreconstidx4_0(v) 294 case OpAMD64MOVLstoreidx1: 295 return rewriteValueAMD64_OpAMD64MOVLstoreidx1_0(v) 296 case OpAMD64MOVLstoreidx4: 297 return rewriteValueAMD64_OpAMD64MOVLstoreidx4_0(v) 298 case OpAMD64MOVLstoreidx8: 299 return rewriteValueAMD64_OpAMD64MOVLstoreidx8_0(v) 300 case OpAMD64MOVOload: 301 return rewriteValueAMD64_OpAMD64MOVOload_0(v) 302 case OpAMD64MOVOstore: 303 return rewriteValueAMD64_OpAMD64MOVOstore_0(v) 304 case OpAMD64MOVQatomicload: 305 return rewriteValueAMD64_OpAMD64MOVQatomicload_0(v) 306 case OpAMD64MOVQf2i: 307 return rewriteValueAMD64_OpAMD64MOVQf2i_0(v) 308 case OpAMD64MOVQi2f: 309 return rewriteValueAMD64_OpAMD64MOVQi2f_0(v) 310 case OpAMD64MOVQload: 311 return rewriteValueAMD64_OpAMD64MOVQload_0(v) 312 case OpAMD64MOVQloadidx1: 313 return rewriteValueAMD64_OpAMD64MOVQloadidx1_0(v) 314 case OpAMD64MOVQloadidx8: 315 return rewriteValueAMD64_OpAMD64MOVQloadidx8_0(v) 316 case OpAMD64MOVQstore: 317 return rewriteValueAMD64_OpAMD64MOVQstore_0(v) || rewriteValueAMD64_OpAMD64MOVQstore_10(v) || rewriteValueAMD64_OpAMD64MOVQstore_20(v) || rewriteValueAMD64_OpAMD64MOVQstore_30(v) 318 case OpAMD64MOVQstoreconst: 319 return rewriteValueAMD64_OpAMD64MOVQstoreconst_0(v) 320 case OpAMD64MOVQstoreconstidx1: 321 return rewriteValueAMD64_OpAMD64MOVQstoreconstidx1_0(v) 322 case OpAMD64MOVQstoreconstidx8: 323 return rewriteValueAMD64_OpAMD64MOVQstoreconstidx8_0(v) 324 case OpAMD64MOVQstoreidx1: 325 return rewriteValueAMD64_OpAMD64MOVQstoreidx1_0(v) 326 case OpAMD64MOVQstoreidx8: 327 return rewriteValueAMD64_OpAMD64MOVQstoreidx8_0(v) 328 case OpAMD64MOVSDload: 329 return rewriteValueAMD64_OpAMD64MOVSDload_0(v) 330 case OpAMD64MOVSDloadidx1: 331 return rewriteValueAMD64_OpAMD64MOVSDloadidx1_0(v) 332 case OpAMD64MOVSDloadidx8: 333 return rewriteValueAMD64_OpAMD64MOVSDloadidx8_0(v) 334 case OpAMD64MOVSDstore: 335 return rewriteValueAMD64_OpAMD64MOVSDstore_0(v) 336 case OpAMD64MOVSDstoreidx1: 337 return rewriteValueAMD64_OpAMD64MOVSDstoreidx1_0(v) 338 case OpAMD64MOVSDstoreidx8: 339 return rewriteValueAMD64_OpAMD64MOVSDstoreidx8_0(v) 340 case OpAMD64MOVSSload: 341 return rewriteValueAMD64_OpAMD64MOVSSload_0(v) 342 case OpAMD64MOVSSloadidx1: 343 return rewriteValueAMD64_OpAMD64MOVSSloadidx1_0(v) 344 case OpAMD64MOVSSloadidx4: 345 return rewriteValueAMD64_OpAMD64MOVSSloadidx4_0(v) 346 case OpAMD64MOVSSstore: 347 return rewriteValueAMD64_OpAMD64MOVSSstore_0(v) 348 case OpAMD64MOVSSstoreidx1: 349 return rewriteValueAMD64_OpAMD64MOVSSstoreidx1_0(v) 350 case OpAMD64MOVSSstoreidx4: 351 return rewriteValueAMD64_OpAMD64MOVSSstoreidx4_0(v) 352 case OpAMD64MOVWQSX: 353 return rewriteValueAMD64_OpAMD64MOVWQSX_0(v) 354 case OpAMD64MOVWQSXload: 355 return rewriteValueAMD64_OpAMD64MOVWQSXload_0(v) 356 case OpAMD64MOVWQZX: 357 return rewriteValueAMD64_OpAMD64MOVWQZX_0(v) 358 case OpAMD64MOVWload: 359 return rewriteValueAMD64_OpAMD64MOVWload_0(v) 360 case OpAMD64MOVWloadidx1: 361 return rewriteValueAMD64_OpAMD64MOVWloadidx1_0(v) 362 case OpAMD64MOVWloadidx2: 363 return rewriteValueAMD64_OpAMD64MOVWloadidx2_0(v) 364 case OpAMD64MOVWstore: 365 return rewriteValueAMD64_OpAMD64MOVWstore_0(v) || rewriteValueAMD64_OpAMD64MOVWstore_10(v) 366 case OpAMD64MOVWstoreconst: 367 return rewriteValueAMD64_OpAMD64MOVWstoreconst_0(v) 368 case OpAMD64MOVWstoreconstidx1: 369 return rewriteValueAMD64_OpAMD64MOVWstoreconstidx1_0(v) 370 case OpAMD64MOVWstoreconstidx2: 371 return rewriteValueAMD64_OpAMD64MOVWstoreconstidx2_0(v) 372 case OpAMD64MOVWstoreidx1: 373 return rewriteValueAMD64_OpAMD64MOVWstoreidx1_0(v) 374 case OpAMD64MOVWstoreidx2: 375 return rewriteValueAMD64_OpAMD64MOVWstoreidx2_0(v) 376 case OpAMD64MULL: 377 return rewriteValueAMD64_OpAMD64MULL_0(v) 378 case OpAMD64MULLconst: 379 return rewriteValueAMD64_OpAMD64MULLconst_0(v) || rewriteValueAMD64_OpAMD64MULLconst_10(v) || rewriteValueAMD64_OpAMD64MULLconst_20(v) || rewriteValueAMD64_OpAMD64MULLconst_30(v) 380 case OpAMD64MULQ: 381 return rewriteValueAMD64_OpAMD64MULQ_0(v) 382 case OpAMD64MULQconst: 383 return rewriteValueAMD64_OpAMD64MULQconst_0(v) || rewriteValueAMD64_OpAMD64MULQconst_10(v) || rewriteValueAMD64_OpAMD64MULQconst_20(v) || rewriteValueAMD64_OpAMD64MULQconst_30(v) 384 case OpAMD64MULSD: 385 return rewriteValueAMD64_OpAMD64MULSD_0(v) 386 case OpAMD64MULSDload: 387 return rewriteValueAMD64_OpAMD64MULSDload_0(v) 388 case OpAMD64MULSS: 389 return rewriteValueAMD64_OpAMD64MULSS_0(v) 390 case OpAMD64MULSSload: 391 return rewriteValueAMD64_OpAMD64MULSSload_0(v) 392 case OpAMD64NEGL: 393 return rewriteValueAMD64_OpAMD64NEGL_0(v) 394 case OpAMD64NEGQ: 395 return rewriteValueAMD64_OpAMD64NEGQ_0(v) 396 case OpAMD64NOTL: 397 return rewriteValueAMD64_OpAMD64NOTL_0(v) 398 case OpAMD64NOTQ: 399 return rewriteValueAMD64_OpAMD64NOTQ_0(v) 400 case OpAMD64ORL: 401 return rewriteValueAMD64_OpAMD64ORL_0(v) || rewriteValueAMD64_OpAMD64ORL_10(v) || rewriteValueAMD64_OpAMD64ORL_20(v) || rewriteValueAMD64_OpAMD64ORL_30(v) || rewriteValueAMD64_OpAMD64ORL_40(v) || rewriteValueAMD64_OpAMD64ORL_50(v) || rewriteValueAMD64_OpAMD64ORL_60(v) || rewriteValueAMD64_OpAMD64ORL_70(v) || rewriteValueAMD64_OpAMD64ORL_80(v) || rewriteValueAMD64_OpAMD64ORL_90(v) || rewriteValueAMD64_OpAMD64ORL_100(v) || rewriteValueAMD64_OpAMD64ORL_110(v) || rewriteValueAMD64_OpAMD64ORL_120(v) || rewriteValueAMD64_OpAMD64ORL_130(v) 402 case OpAMD64ORLconst: 403 return rewriteValueAMD64_OpAMD64ORLconst_0(v) 404 case OpAMD64ORLconstmodify: 405 return rewriteValueAMD64_OpAMD64ORLconstmodify_0(v) 406 case OpAMD64ORLload: 407 return rewriteValueAMD64_OpAMD64ORLload_0(v) 408 case OpAMD64ORLmodify: 409 return rewriteValueAMD64_OpAMD64ORLmodify_0(v) 410 case OpAMD64ORQ: 411 return rewriteValueAMD64_OpAMD64ORQ_0(v) || rewriteValueAMD64_OpAMD64ORQ_10(v) || rewriteValueAMD64_OpAMD64ORQ_20(v) || rewriteValueAMD64_OpAMD64ORQ_30(v) || rewriteValueAMD64_OpAMD64ORQ_40(v) || rewriteValueAMD64_OpAMD64ORQ_50(v) || rewriteValueAMD64_OpAMD64ORQ_60(v) || rewriteValueAMD64_OpAMD64ORQ_70(v) || rewriteValueAMD64_OpAMD64ORQ_80(v) || rewriteValueAMD64_OpAMD64ORQ_90(v) || rewriteValueAMD64_OpAMD64ORQ_100(v) || rewriteValueAMD64_OpAMD64ORQ_110(v) || rewriteValueAMD64_OpAMD64ORQ_120(v) || rewriteValueAMD64_OpAMD64ORQ_130(v) || rewriteValueAMD64_OpAMD64ORQ_140(v) || rewriteValueAMD64_OpAMD64ORQ_150(v) || rewriteValueAMD64_OpAMD64ORQ_160(v) 412 case OpAMD64ORQconst: 413 return rewriteValueAMD64_OpAMD64ORQconst_0(v) 414 case OpAMD64ORQconstmodify: 415 return rewriteValueAMD64_OpAMD64ORQconstmodify_0(v) 416 case OpAMD64ORQload: 417 return rewriteValueAMD64_OpAMD64ORQload_0(v) 418 case OpAMD64ORQmodify: 419 return rewriteValueAMD64_OpAMD64ORQmodify_0(v) 420 case OpAMD64ROLB: 421 return rewriteValueAMD64_OpAMD64ROLB_0(v) 422 case OpAMD64ROLBconst: 423 return rewriteValueAMD64_OpAMD64ROLBconst_0(v) 424 case OpAMD64ROLL: 425 return rewriteValueAMD64_OpAMD64ROLL_0(v) 426 case OpAMD64ROLLconst: 427 return rewriteValueAMD64_OpAMD64ROLLconst_0(v) 428 case OpAMD64ROLQ: 429 return rewriteValueAMD64_OpAMD64ROLQ_0(v) 430 case OpAMD64ROLQconst: 431 return rewriteValueAMD64_OpAMD64ROLQconst_0(v) 432 case OpAMD64ROLW: 433 return rewriteValueAMD64_OpAMD64ROLW_0(v) 434 case OpAMD64ROLWconst: 435 return rewriteValueAMD64_OpAMD64ROLWconst_0(v) 436 case OpAMD64RORB: 437 return rewriteValueAMD64_OpAMD64RORB_0(v) 438 case OpAMD64RORL: 439 return rewriteValueAMD64_OpAMD64RORL_0(v) 440 case OpAMD64RORQ: 441 return rewriteValueAMD64_OpAMD64RORQ_0(v) 442 case OpAMD64RORW: 443 return rewriteValueAMD64_OpAMD64RORW_0(v) 444 case OpAMD64SARB: 445 return rewriteValueAMD64_OpAMD64SARB_0(v) 446 case OpAMD64SARBconst: 447 return rewriteValueAMD64_OpAMD64SARBconst_0(v) 448 case OpAMD64SARL: 449 return rewriteValueAMD64_OpAMD64SARL_0(v) 450 case OpAMD64SARLconst: 451 return rewriteValueAMD64_OpAMD64SARLconst_0(v) 452 case OpAMD64SARQ: 453 return rewriteValueAMD64_OpAMD64SARQ_0(v) 454 case OpAMD64SARQconst: 455 return rewriteValueAMD64_OpAMD64SARQconst_0(v) 456 case OpAMD64SARW: 457 return rewriteValueAMD64_OpAMD64SARW_0(v) 458 case OpAMD64SARWconst: 459 return rewriteValueAMD64_OpAMD64SARWconst_0(v) 460 case OpAMD64SBBLcarrymask: 461 return rewriteValueAMD64_OpAMD64SBBLcarrymask_0(v) 462 case OpAMD64SBBQ: 463 return rewriteValueAMD64_OpAMD64SBBQ_0(v) 464 case OpAMD64SBBQcarrymask: 465 return rewriteValueAMD64_OpAMD64SBBQcarrymask_0(v) 466 case OpAMD64SBBQconst: 467 return rewriteValueAMD64_OpAMD64SBBQconst_0(v) 468 case OpAMD64SETA: 469 return rewriteValueAMD64_OpAMD64SETA_0(v) 470 case OpAMD64SETAE: 471 return rewriteValueAMD64_OpAMD64SETAE_0(v) 472 case OpAMD64SETAEstore: 473 return rewriteValueAMD64_OpAMD64SETAEstore_0(v) 474 case OpAMD64SETAstore: 475 return rewriteValueAMD64_OpAMD64SETAstore_0(v) 476 case OpAMD64SETB: 477 return rewriteValueAMD64_OpAMD64SETB_0(v) 478 case OpAMD64SETBE: 479 return rewriteValueAMD64_OpAMD64SETBE_0(v) 480 case OpAMD64SETBEstore: 481 return rewriteValueAMD64_OpAMD64SETBEstore_0(v) 482 case OpAMD64SETBstore: 483 return rewriteValueAMD64_OpAMD64SETBstore_0(v) 484 case OpAMD64SETEQ: 485 return rewriteValueAMD64_OpAMD64SETEQ_0(v) || rewriteValueAMD64_OpAMD64SETEQ_10(v) || rewriteValueAMD64_OpAMD64SETEQ_20(v) 486 case OpAMD64SETEQstore: 487 return rewriteValueAMD64_OpAMD64SETEQstore_0(v) || rewriteValueAMD64_OpAMD64SETEQstore_10(v) || rewriteValueAMD64_OpAMD64SETEQstore_20(v) 488 case OpAMD64SETG: 489 return rewriteValueAMD64_OpAMD64SETG_0(v) 490 case OpAMD64SETGE: 491 return rewriteValueAMD64_OpAMD64SETGE_0(v) 492 case OpAMD64SETGEstore: 493 return rewriteValueAMD64_OpAMD64SETGEstore_0(v) 494 case OpAMD64SETGstore: 495 return rewriteValueAMD64_OpAMD64SETGstore_0(v) 496 case OpAMD64SETL: 497 return rewriteValueAMD64_OpAMD64SETL_0(v) 498 case OpAMD64SETLE: 499 return rewriteValueAMD64_OpAMD64SETLE_0(v) 500 case OpAMD64SETLEstore: 501 return rewriteValueAMD64_OpAMD64SETLEstore_0(v) 502 case OpAMD64SETLstore: 503 return rewriteValueAMD64_OpAMD64SETLstore_0(v) 504 case OpAMD64SETNE: 505 return rewriteValueAMD64_OpAMD64SETNE_0(v) || rewriteValueAMD64_OpAMD64SETNE_10(v) || rewriteValueAMD64_OpAMD64SETNE_20(v) 506 case OpAMD64SETNEstore: 507 return rewriteValueAMD64_OpAMD64SETNEstore_0(v) || rewriteValueAMD64_OpAMD64SETNEstore_10(v) || rewriteValueAMD64_OpAMD64SETNEstore_20(v) 508 case OpAMD64SHLL: 509 return rewriteValueAMD64_OpAMD64SHLL_0(v) 510 case OpAMD64SHLLconst: 511 return rewriteValueAMD64_OpAMD64SHLLconst_0(v) 512 case OpAMD64SHLQ: 513 return rewriteValueAMD64_OpAMD64SHLQ_0(v) 514 case OpAMD64SHLQconst: 515 return rewriteValueAMD64_OpAMD64SHLQconst_0(v) 516 case OpAMD64SHRB: 517 return rewriteValueAMD64_OpAMD64SHRB_0(v) 518 case OpAMD64SHRBconst: 519 return rewriteValueAMD64_OpAMD64SHRBconst_0(v) 520 case OpAMD64SHRL: 521 return rewriteValueAMD64_OpAMD64SHRL_0(v) 522 case OpAMD64SHRLconst: 523 return rewriteValueAMD64_OpAMD64SHRLconst_0(v) 524 case OpAMD64SHRQ: 525 return rewriteValueAMD64_OpAMD64SHRQ_0(v) 526 case OpAMD64SHRQconst: 527 return rewriteValueAMD64_OpAMD64SHRQconst_0(v) 528 case OpAMD64SHRW: 529 return rewriteValueAMD64_OpAMD64SHRW_0(v) 530 case OpAMD64SHRWconst: 531 return rewriteValueAMD64_OpAMD64SHRWconst_0(v) 532 case OpAMD64SUBL: 533 return rewriteValueAMD64_OpAMD64SUBL_0(v) 534 case OpAMD64SUBLconst: 535 return rewriteValueAMD64_OpAMD64SUBLconst_0(v) 536 case OpAMD64SUBLload: 537 return rewriteValueAMD64_OpAMD64SUBLload_0(v) 538 case OpAMD64SUBLmodify: 539 return rewriteValueAMD64_OpAMD64SUBLmodify_0(v) 540 case OpAMD64SUBQ: 541 return rewriteValueAMD64_OpAMD64SUBQ_0(v) 542 case OpAMD64SUBQborrow: 543 return rewriteValueAMD64_OpAMD64SUBQborrow_0(v) 544 case OpAMD64SUBQconst: 545 return rewriteValueAMD64_OpAMD64SUBQconst_0(v) 546 case OpAMD64SUBQload: 547 return rewriteValueAMD64_OpAMD64SUBQload_0(v) 548 case OpAMD64SUBQmodify: 549 return rewriteValueAMD64_OpAMD64SUBQmodify_0(v) 550 case OpAMD64SUBSD: 551 return rewriteValueAMD64_OpAMD64SUBSD_0(v) 552 case OpAMD64SUBSDload: 553 return rewriteValueAMD64_OpAMD64SUBSDload_0(v) 554 case OpAMD64SUBSS: 555 return rewriteValueAMD64_OpAMD64SUBSS_0(v) 556 case OpAMD64SUBSSload: 557 return rewriteValueAMD64_OpAMD64SUBSSload_0(v) 558 case OpAMD64TESTB: 559 return rewriteValueAMD64_OpAMD64TESTB_0(v) 560 case OpAMD64TESTBconst: 561 return rewriteValueAMD64_OpAMD64TESTBconst_0(v) 562 case OpAMD64TESTL: 563 return rewriteValueAMD64_OpAMD64TESTL_0(v) 564 case OpAMD64TESTLconst: 565 return rewriteValueAMD64_OpAMD64TESTLconst_0(v) 566 case OpAMD64TESTQ: 567 return rewriteValueAMD64_OpAMD64TESTQ_0(v) 568 case OpAMD64TESTQconst: 569 return rewriteValueAMD64_OpAMD64TESTQconst_0(v) 570 case OpAMD64TESTW: 571 return rewriteValueAMD64_OpAMD64TESTW_0(v) 572 case OpAMD64TESTWconst: 573 return rewriteValueAMD64_OpAMD64TESTWconst_0(v) 574 case OpAMD64XADDLlock: 575 return rewriteValueAMD64_OpAMD64XADDLlock_0(v) 576 case OpAMD64XADDQlock: 577 return rewriteValueAMD64_OpAMD64XADDQlock_0(v) 578 case OpAMD64XCHGL: 579 return rewriteValueAMD64_OpAMD64XCHGL_0(v) 580 case OpAMD64XCHGQ: 581 return rewriteValueAMD64_OpAMD64XCHGQ_0(v) 582 case OpAMD64XORL: 583 return rewriteValueAMD64_OpAMD64XORL_0(v) || rewriteValueAMD64_OpAMD64XORL_10(v) 584 case OpAMD64XORLconst: 585 return rewriteValueAMD64_OpAMD64XORLconst_0(v) || rewriteValueAMD64_OpAMD64XORLconst_10(v) 586 case OpAMD64XORLconstmodify: 587 return rewriteValueAMD64_OpAMD64XORLconstmodify_0(v) 588 case OpAMD64XORLload: 589 return rewriteValueAMD64_OpAMD64XORLload_0(v) 590 case OpAMD64XORLmodify: 591 return rewriteValueAMD64_OpAMD64XORLmodify_0(v) 592 case OpAMD64XORQ: 593 return rewriteValueAMD64_OpAMD64XORQ_0(v) || rewriteValueAMD64_OpAMD64XORQ_10(v) 594 case OpAMD64XORQconst: 595 return rewriteValueAMD64_OpAMD64XORQconst_0(v) 596 case OpAMD64XORQconstmodify: 597 return rewriteValueAMD64_OpAMD64XORQconstmodify_0(v) 598 case OpAMD64XORQload: 599 return rewriteValueAMD64_OpAMD64XORQload_0(v) 600 case OpAMD64XORQmodify: 601 return rewriteValueAMD64_OpAMD64XORQmodify_0(v) 602 case OpAdd16: 603 return rewriteValueAMD64_OpAdd16_0(v) 604 case OpAdd32: 605 return rewriteValueAMD64_OpAdd32_0(v) 606 case OpAdd32F: 607 return rewriteValueAMD64_OpAdd32F_0(v) 608 case OpAdd64: 609 return rewriteValueAMD64_OpAdd64_0(v) 610 case OpAdd64F: 611 return rewriteValueAMD64_OpAdd64F_0(v) 612 case OpAdd8: 613 return rewriteValueAMD64_OpAdd8_0(v) 614 case OpAddPtr: 615 return rewriteValueAMD64_OpAddPtr_0(v) 616 case OpAddr: 617 return rewriteValueAMD64_OpAddr_0(v) 618 case OpAnd16: 619 return rewriteValueAMD64_OpAnd16_0(v) 620 case OpAnd32: 621 return rewriteValueAMD64_OpAnd32_0(v) 622 case OpAnd64: 623 return rewriteValueAMD64_OpAnd64_0(v) 624 case OpAnd8: 625 return rewriteValueAMD64_OpAnd8_0(v) 626 case OpAndB: 627 return rewriteValueAMD64_OpAndB_0(v) 628 case OpAtomicAdd32: 629 return rewriteValueAMD64_OpAtomicAdd32_0(v) 630 case OpAtomicAdd64: 631 return rewriteValueAMD64_OpAtomicAdd64_0(v) 632 case OpAtomicAnd8: 633 return rewriteValueAMD64_OpAtomicAnd8_0(v) 634 case OpAtomicCompareAndSwap32: 635 return rewriteValueAMD64_OpAtomicCompareAndSwap32_0(v) 636 case OpAtomicCompareAndSwap64: 637 return rewriteValueAMD64_OpAtomicCompareAndSwap64_0(v) 638 case OpAtomicExchange32: 639 return rewriteValueAMD64_OpAtomicExchange32_0(v) 640 case OpAtomicExchange64: 641 return rewriteValueAMD64_OpAtomicExchange64_0(v) 642 case OpAtomicLoad32: 643 return rewriteValueAMD64_OpAtomicLoad32_0(v) 644 case OpAtomicLoad64: 645 return rewriteValueAMD64_OpAtomicLoad64_0(v) 646 case OpAtomicLoadPtr: 647 return rewriteValueAMD64_OpAtomicLoadPtr_0(v) 648 case OpAtomicOr8: 649 return rewriteValueAMD64_OpAtomicOr8_0(v) 650 case OpAtomicStore32: 651 return rewriteValueAMD64_OpAtomicStore32_0(v) 652 case OpAtomicStore64: 653 return rewriteValueAMD64_OpAtomicStore64_0(v) 654 case OpAtomicStorePtrNoWB: 655 return rewriteValueAMD64_OpAtomicStorePtrNoWB_0(v) 656 case OpAvg64u: 657 return rewriteValueAMD64_OpAvg64u_0(v) 658 case OpBitLen16: 659 return rewriteValueAMD64_OpBitLen16_0(v) 660 case OpBitLen32: 661 return rewriteValueAMD64_OpBitLen32_0(v) 662 case OpBitLen64: 663 return rewriteValueAMD64_OpBitLen64_0(v) 664 case OpBitLen8: 665 return rewriteValueAMD64_OpBitLen8_0(v) 666 case OpBswap32: 667 return rewriteValueAMD64_OpBswap32_0(v) 668 case OpBswap64: 669 return rewriteValueAMD64_OpBswap64_0(v) 670 case OpCeil: 671 return rewriteValueAMD64_OpCeil_0(v) 672 case OpClosureCall: 673 return rewriteValueAMD64_OpClosureCall_0(v) 674 case OpCom16: 675 return rewriteValueAMD64_OpCom16_0(v) 676 case OpCom32: 677 return rewriteValueAMD64_OpCom32_0(v) 678 case OpCom64: 679 return rewriteValueAMD64_OpCom64_0(v) 680 case OpCom8: 681 return rewriteValueAMD64_OpCom8_0(v) 682 case OpCondSelect: 683 return rewriteValueAMD64_OpCondSelect_0(v) || rewriteValueAMD64_OpCondSelect_10(v) || rewriteValueAMD64_OpCondSelect_20(v) || rewriteValueAMD64_OpCondSelect_30(v) || rewriteValueAMD64_OpCondSelect_40(v) 684 case OpConst16: 685 return rewriteValueAMD64_OpConst16_0(v) 686 case OpConst32: 687 return rewriteValueAMD64_OpConst32_0(v) 688 case OpConst32F: 689 return rewriteValueAMD64_OpConst32F_0(v) 690 case OpConst64: 691 return rewriteValueAMD64_OpConst64_0(v) 692 case OpConst64F: 693 return rewriteValueAMD64_OpConst64F_0(v) 694 case OpConst8: 695 return rewriteValueAMD64_OpConst8_0(v) 696 case OpConstBool: 697 return rewriteValueAMD64_OpConstBool_0(v) 698 case OpConstNil: 699 return rewriteValueAMD64_OpConstNil_0(v) 700 case OpCtz16: 701 return rewriteValueAMD64_OpCtz16_0(v) 702 case OpCtz16NonZero: 703 return rewriteValueAMD64_OpCtz16NonZero_0(v) 704 case OpCtz32: 705 return rewriteValueAMD64_OpCtz32_0(v) 706 case OpCtz32NonZero: 707 return rewriteValueAMD64_OpCtz32NonZero_0(v) 708 case OpCtz64: 709 return rewriteValueAMD64_OpCtz64_0(v) 710 case OpCtz64NonZero: 711 return rewriteValueAMD64_OpCtz64NonZero_0(v) 712 case OpCtz8: 713 return rewriteValueAMD64_OpCtz8_0(v) 714 case OpCtz8NonZero: 715 return rewriteValueAMD64_OpCtz8NonZero_0(v) 716 case OpCvt32Fto32: 717 return rewriteValueAMD64_OpCvt32Fto32_0(v) 718 case OpCvt32Fto64: 719 return rewriteValueAMD64_OpCvt32Fto64_0(v) 720 case OpCvt32Fto64F: 721 return rewriteValueAMD64_OpCvt32Fto64F_0(v) 722 case OpCvt32to32F: 723 return rewriteValueAMD64_OpCvt32to32F_0(v) 724 case OpCvt32to64F: 725 return rewriteValueAMD64_OpCvt32to64F_0(v) 726 case OpCvt64Fto32: 727 return rewriteValueAMD64_OpCvt64Fto32_0(v) 728 case OpCvt64Fto32F: 729 return rewriteValueAMD64_OpCvt64Fto32F_0(v) 730 case OpCvt64Fto64: 731 return rewriteValueAMD64_OpCvt64Fto64_0(v) 732 case OpCvt64to32F: 733 return rewriteValueAMD64_OpCvt64to32F_0(v) 734 case OpCvt64to64F: 735 return rewriteValueAMD64_OpCvt64to64F_0(v) 736 case OpDiv128u: 737 return rewriteValueAMD64_OpDiv128u_0(v) 738 case OpDiv16: 739 return rewriteValueAMD64_OpDiv16_0(v) 740 case OpDiv16u: 741 return rewriteValueAMD64_OpDiv16u_0(v) 742 case OpDiv32: 743 return rewriteValueAMD64_OpDiv32_0(v) 744 case OpDiv32F: 745 return rewriteValueAMD64_OpDiv32F_0(v) 746 case OpDiv32u: 747 return rewriteValueAMD64_OpDiv32u_0(v) 748 case OpDiv64: 749 return rewriteValueAMD64_OpDiv64_0(v) 750 case OpDiv64F: 751 return rewriteValueAMD64_OpDiv64F_0(v) 752 case OpDiv64u: 753 return rewriteValueAMD64_OpDiv64u_0(v) 754 case OpDiv8: 755 return rewriteValueAMD64_OpDiv8_0(v) 756 case OpDiv8u: 757 return rewriteValueAMD64_OpDiv8u_0(v) 758 case OpEq16: 759 return rewriteValueAMD64_OpEq16_0(v) 760 case OpEq32: 761 return rewriteValueAMD64_OpEq32_0(v) 762 case OpEq32F: 763 return rewriteValueAMD64_OpEq32F_0(v) 764 case OpEq64: 765 return rewriteValueAMD64_OpEq64_0(v) 766 case OpEq64F: 767 return rewriteValueAMD64_OpEq64F_0(v) 768 case OpEq8: 769 return rewriteValueAMD64_OpEq8_0(v) 770 case OpEqB: 771 return rewriteValueAMD64_OpEqB_0(v) 772 case OpEqPtr: 773 return rewriteValueAMD64_OpEqPtr_0(v) 774 case OpFloor: 775 return rewriteValueAMD64_OpFloor_0(v) 776 case OpGeq16: 777 return rewriteValueAMD64_OpGeq16_0(v) 778 case OpGeq16U: 779 return rewriteValueAMD64_OpGeq16U_0(v) 780 case OpGeq32: 781 return rewriteValueAMD64_OpGeq32_0(v) 782 case OpGeq32F: 783 return rewriteValueAMD64_OpGeq32F_0(v) 784 case OpGeq32U: 785 return rewriteValueAMD64_OpGeq32U_0(v) 786 case OpGeq64: 787 return rewriteValueAMD64_OpGeq64_0(v) 788 case OpGeq64F: 789 return rewriteValueAMD64_OpGeq64F_0(v) 790 case OpGeq64U: 791 return rewriteValueAMD64_OpGeq64U_0(v) 792 case OpGeq8: 793 return rewriteValueAMD64_OpGeq8_0(v) 794 case OpGeq8U: 795 return rewriteValueAMD64_OpGeq8U_0(v) 796 case OpGetCallerPC: 797 return rewriteValueAMD64_OpGetCallerPC_0(v) 798 case OpGetCallerSP: 799 return rewriteValueAMD64_OpGetCallerSP_0(v) 800 case OpGetClosurePtr: 801 return rewriteValueAMD64_OpGetClosurePtr_0(v) 802 case OpGetG: 803 return rewriteValueAMD64_OpGetG_0(v) 804 case OpGreater16: 805 return rewriteValueAMD64_OpGreater16_0(v) 806 case OpGreater16U: 807 return rewriteValueAMD64_OpGreater16U_0(v) 808 case OpGreater32: 809 return rewriteValueAMD64_OpGreater32_0(v) 810 case OpGreater32F: 811 return rewriteValueAMD64_OpGreater32F_0(v) 812 case OpGreater32U: 813 return rewriteValueAMD64_OpGreater32U_0(v) 814 case OpGreater64: 815 return rewriteValueAMD64_OpGreater64_0(v) 816 case OpGreater64F: 817 return rewriteValueAMD64_OpGreater64F_0(v) 818 case OpGreater64U: 819 return rewriteValueAMD64_OpGreater64U_0(v) 820 case OpGreater8: 821 return rewriteValueAMD64_OpGreater8_0(v) 822 case OpGreater8U: 823 return rewriteValueAMD64_OpGreater8U_0(v) 824 case OpHmul32: 825 return rewriteValueAMD64_OpHmul32_0(v) 826 case OpHmul32u: 827 return rewriteValueAMD64_OpHmul32u_0(v) 828 case OpHmul64: 829 return rewriteValueAMD64_OpHmul64_0(v) 830 case OpHmul64u: 831 return rewriteValueAMD64_OpHmul64u_0(v) 832 case OpInt64Hi: 833 return rewriteValueAMD64_OpInt64Hi_0(v) 834 case OpInterCall: 835 return rewriteValueAMD64_OpInterCall_0(v) 836 case OpIsInBounds: 837 return rewriteValueAMD64_OpIsInBounds_0(v) 838 case OpIsNonNil: 839 return rewriteValueAMD64_OpIsNonNil_0(v) 840 case OpIsSliceInBounds: 841 return rewriteValueAMD64_OpIsSliceInBounds_0(v) 842 case OpLeq16: 843 return rewriteValueAMD64_OpLeq16_0(v) 844 case OpLeq16U: 845 return rewriteValueAMD64_OpLeq16U_0(v) 846 case OpLeq32: 847 return rewriteValueAMD64_OpLeq32_0(v) 848 case OpLeq32F: 849 return rewriteValueAMD64_OpLeq32F_0(v) 850 case OpLeq32U: 851 return rewriteValueAMD64_OpLeq32U_0(v) 852 case OpLeq64: 853 return rewriteValueAMD64_OpLeq64_0(v) 854 case OpLeq64F: 855 return rewriteValueAMD64_OpLeq64F_0(v) 856 case OpLeq64U: 857 return rewriteValueAMD64_OpLeq64U_0(v) 858 case OpLeq8: 859 return rewriteValueAMD64_OpLeq8_0(v) 860 case OpLeq8U: 861 return rewriteValueAMD64_OpLeq8U_0(v) 862 case OpLess16: 863 return rewriteValueAMD64_OpLess16_0(v) 864 case OpLess16U: 865 return rewriteValueAMD64_OpLess16U_0(v) 866 case OpLess32: 867 return rewriteValueAMD64_OpLess32_0(v) 868 case OpLess32F: 869 return rewriteValueAMD64_OpLess32F_0(v) 870 case OpLess32U: 871 return rewriteValueAMD64_OpLess32U_0(v) 872 case OpLess64: 873 return rewriteValueAMD64_OpLess64_0(v) 874 case OpLess64F: 875 return rewriteValueAMD64_OpLess64F_0(v) 876 case OpLess64U: 877 return rewriteValueAMD64_OpLess64U_0(v) 878 case OpLess8: 879 return rewriteValueAMD64_OpLess8_0(v) 880 case OpLess8U: 881 return rewriteValueAMD64_OpLess8U_0(v) 882 case OpLoad: 883 return rewriteValueAMD64_OpLoad_0(v) 884 case OpLocalAddr: 885 return rewriteValueAMD64_OpLocalAddr_0(v) 886 case OpLsh16x16: 887 return rewriteValueAMD64_OpLsh16x16_0(v) 888 case OpLsh16x32: 889 return rewriteValueAMD64_OpLsh16x32_0(v) 890 case OpLsh16x64: 891 return rewriteValueAMD64_OpLsh16x64_0(v) 892 case OpLsh16x8: 893 return rewriteValueAMD64_OpLsh16x8_0(v) 894 case OpLsh32x16: 895 return rewriteValueAMD64_OpLsh32x16_0(v) 896 case OpLsh32x32: 897 return rewriteValueAMD64_OpLsh32x32_0(v) 898 case OpLsh32x64: 899 return rewriteValueAMD64_OpLsh32x64_0(v) 900 case OpLsh32x8: 901 return rewriteValueAMD64_OpLsh32x8_0(v) 902 case OpLsh64x16: 903 return rewriteValueAMD64_OpLsh64x16_0(v) 904 case OpLsh64x32: 905 return rewriteValueAMD64_OpLsh64x32_0(v) 906 case OpLsh64x64: 907 return rewriteValueAMD64_OpLsh64x64_0(v) 908 case OpLsh64x8: 909 return rewriteValueAMD64_OpLsh64x8_0(v) 910 case OpLsh8x16: 911 return rewriteValueAMD64_OpLsh8x16_0(v) 912 case OpLsh8x32: 913 return rewriteValueAMD64_OpLsh8x32_0(v) 914 case OpLsh8x64: 915 return rewriteValueAMD64_OpLsh8x64_0(v) 916 case OpLsh8x8: 917 return rewriteValueAMD64_OpLsh8x8_0(v) 918 case OpMod16: 919 return rewriteValueAMD64_OpMod16_0(v) 920 case OpMod16u: 921 return rewriteValueAMD64_OpMod16u_0(v) 922 case OpMod32: 923 return rewriteValueAMD64_OpMod32_0(v) 924 case OpMod32u: 925 return rewriteValueAMD64_OpMod32u_0(v) 926 case OpMod64: 927 return rewriteValueAMD64_OpMod64_0(v) 928 case OpMod64u: 929 return rewriteValueAMD64_OpMod64u_0(v) 930 case OpMod8: 931 return rewriteValueAMD64_OpMod8_0(v) 932 case OpMod8u: 933 return rewriteValueAMD64_OpMod8u_0(v) 934 case OpMove: 935 return rewriteValueAMD64_OpMove_0(v) || rewriteValueAMD64_OpMove_10(v) || rewriteValueAMD64_OpMove_20(v) 936 case OpMul16: 937 return rewriteValueAMD64_OpMul16_0(v) 938 case OpMul32: 939 return rewriteValueAMD64_OpMul32_0(v) 940 case OpMul32F: 941 return rewriteValueAMD64_OpMul32F_0(v) 942 case OpMul64: 943 return rewriteValueAMD64_OpMul64_0(v) 944 case OpMul64F: 945 return rewriteValueAMD64_OpMul64F_0(v) 946 case OpMul64uhilo: 947 return rewriteValueAMD64_OpMul64uhilo_0(v) 948 case OpMul8: 949 return rewriteValueAMD64_OpMul8_0(v) 950 case OpNeg16: 951 return rewriteValueAMD64_OpNeg16_0(v) 952 case OpNeg32: 953 return rewriteValueAMD64_OpNeg32_0(v) 954 case OpNeg32F: 955 return rewriteValueAMD64_OpNeg32F_0(v) 956 case OpNeg64: 957 return rewriteValueAMD64_OpNeg64_0(v) 958 case OpNeg64F: 959 return rewriteValueAMD64_OpNeg64F_0(v) 960 case OpNeg8: 961 return rewriteValueAMD64_OpNeg8_0(v) 962 case OpNeq16: 963 return rewriteValueAMD64_OpNeq16_0(v) 964 case OpNeq32: 965 return rewriteValueAMD64_OpNeq32_0(v) 966 case OpNeq32F: 967 return rewriteValueAMD64_OpNeq32F_0(v) 968 case OpNeq64: 969 return rewriteValueAMD64_OpNeq64_0(v) 970 case OpNeq64F: 971 return rewriteValueAMD64_OpNeq64F_0(v) 972 case OpNeq8: 973 return rewriteValueAMD64_OpNeq8_0(v) 974 case OpNeqB: 975 return rewriteValueAMD64_OpNeqB_0(v) 976 case OpNeqPtr: 977 return rewriteValueAMD64_OpNeqPtr_0(v) 978 case OpNilCheck: 979 return rewriteValueAMD64_OpNilCheck_0(v) 980 case OpNot: 981 return rewriteValueAMD64_OpNot_0(v) 982 case OpOffPtr: 983 return rewriteValueAMD64_OpOffPtr_0(v) 984 case OpOr16: 985 return rewriteValueAMD64_OpOr16_0(v) 986 case OpOr32: 987 return rewriteValueAMD64_OpOr32_0(v) 988 case OpOr64: 989 return rewriteValueAMD64_OpOr64_0(v) 990 case OpOr8: 991 return rewriteValueAMD64_OpOr8_0(v) 992 case OpOrB: 993 return rewriteValueAMD64_OpOrB_0(v) 994 case OpPopCount16: 995 return rewriteValueAMD64_OpPopCount16_0(v) 996 case OpPopCount32: 997 return rewriteValueAMD64_OpPopCount32_0(v) 998 case OpPopCount64: 999 return rewriteValueAMD64_OpPopCount64_0(v) 1000 case OpPopCount8: 1001 return rewriteValueAMD64_OpPopCount8_0(v) 1002 case OpRotateLeft16: 1003 return rewriteValueAMD64_OpRotateLeft16_0(v) 1004 case OpRotateLeft32: 1005 return rewriteValueAMD64_OpRotateLeft32_0(v) 1006 case OpRotateLeft64: 1007 return rewriteValueAMD64_OpRotateLeft64_0(v) 1008 case OpRotateLeft8: 1009 return rewriteValueAMD64_OpRotateLeft8_0(v) 1010 case OpRound32F: 1011 return rewriteValueAMD64_OpRound32F_0(v) 1012 case OpRound64F: 1013 return rewriteValueAMD64_OpRound64F_0(v) 1014 case OpRoundToEven: 1015 return rewriteValueAMD64_OpRoundToEven_0(v) 1016 case OpRsh16Ux16: 1017 return rewriteValueAMD64_OpRsh16Ux16_0(v) 1018 case OpRsh16Ux32: 1019 return rewriteValueAMD64_OpRsh16Ux32_0(v) 1020 case OpRsh16Ux64: 1021 return rewriteValueAMD64_OpRsh16Ux64_0(v) 1022 case OpRsh16Ux8: 1023 return rewriteValueAMD64_OpRsh16Ux8_0(v) 1024 case OpRsh16x16: 1025 return rewriteValueAMD64_OpRsh16x16_0(v) 1026 case OpRsh16x32: 1027 return rewriteValueAMD64_OpRsh16x32_0(v) 1028 case OpRsh16x64: 1029 return rewriteValueAMD64_OpRsh16x64_0(v) 1030 case OpRsh16x8: 1031 return rewriteValueAMD64_OpRsh16x8_0(v) 1032 case OpRsh32Ux16: 1033 return rewriteValueAMD64_OpRsh32Ux16_0(v) 1034 case OpRsh32Ux32: 1035 return rewriteValueAMD64_OpRsh32Ux32_0(v) 1036 case OpRsh32Ux64: 1037 return rewriteValueAMD64_OpRsh32Ux64_0(v) 1038 case OpRsh32Ux8: 1039 return rewriteValueAMD64_OpRsh32Ux8_0(v) 1040 case OpRsh32x16: 1041 return rewriteValueAMD64_OpRsh32x16_0(v) 1042 case OpRsh32x32: 1043 return rewriteValueAMD64_OpRsh32x32_0(v) 1044 case OpRsh32x64: 1045 return rewriteValueAMD64_OpRsh32x64_0(v) 1046 case OpRsh32x8: 1047 return rewriteValueAMD64_OpRsh32x8_0(v) 1048 case OpRsh64Ux16: 1049 return rewriteValueAMD64_OpRsh64Ux16_0(v) 1050 case OpRsh64Ux32: 1051 return rewriteValueAMD64_OpRsh64Ux32_0(v) 1052 case OpRsh64Ux64: 1053 return rewriteValueAMD64_OpRsh64Ux64_0(v) 1054 case OpRsh64Ux8: 1055 return rewriteValueAMD64_OpRsh64Ux8_0(v) 1056 case OpRsh64x16: 1057 return rewriteValueAMD64_OpRsh64x16_0(v) 1058 case OpRsh64x32: 1059 return rewriteValueAMD64_OpRsh64x32_0(v) 1060 case OpRsh64x64: 1061 return rewriteValueAMD64_OpRsh64x64_0(v) 1062 case OpRsh64x8: 1063 return rewriteValueAMD64_OpRsh64x8_0(v) 1064 case OpRsh8Ux16: 1065 return rewriteValueAMD64_OpRsh8Ux16_0(v) 1066 case OpRsh8Ux32: 1067 return rewriteValueAMD64_OpRsh8Ux32_0(v) 1068 case OpRsh8Ux64: 1069 return rewriteValueAMD64_OpRsh8Ux64_0(v) 1070 case OpRsh8Ux8: 1071 return rewriteValueAMD64_OpRsh8Ux8_0(v) 1072 case OpRsh8x16: 1073 return rewriteValueAMD64_OpRsh8x16_0(v) 1074 case OpRsh8x32: 1075 return rewriteValueAMD64_OpRsh8x32_0(v) 1076 case OpRsh8x64: 1077 return rewriteValueAMD64_OpRsh8x64_0(v) 1078 case OpRsh8x8: 1079 return rewriteValueAMD64_OpRsh8x8_0(v) 1080 case OpSelect0: 1081 return rewriteValueAMD64_OpSelect0_0(v) 1082 case OpSelect1: 1083 return rewriteValueAMD64_OpSelect1_0(v) 1084 case OpSignExt16to32: 1085 return rewriteValueAMD64_OpSignExt16to32_0(v) 1086 case OpSignExt16to64: 1087 return rewriteValueAMD64_OpSignExt16to64_0(v) 1088 case OpSignExt32to64: 1089 return rewriteValueAMD64_OpSignExt32to64_0(v) 1090 case OpSignExt8to16: 1091 return rewriteValueAMD64_OpSignExt8to16_0(v) 1092 case OpSignExt8to32: 1093 return rewriteValueAMD64_OpSignExt8to32_0(v) 1094 case OpSignExt8to64: 1095 return rewriteValueAMD64_OpSignExt8to64_0(v) 1096 case OpSlicemask: 1097 return rewriteValueAMD64_OpSlicemask_0(v) 1098 case OpSqrt: 1099 return rewriteValueAMD64_OpSqrt_0(v) 1100 case OpStaticCall: 1101 return rewriteValueAMD64_OpStaticCall_0(v) 1102 case OpStore: 1103 return rewriteValueAMD64_OpStore_0(v) 1104 case OpSub16: 1105 return rewriteValueAMD64_OpSub16_0(v) 1106 case OpSub32: 1107 return rewriteValueAMD64_OpSub32_0(v) 1108 case OpSub32F: 1109 return rewriteValueAMD64_OpSub32F_0(v) 1110 case OpSub64: 1111 return rewriteValueAMD64_OpSub64_0(v) 1112 case OpSub64F: 1113 return rewriteValueAMD64_OpSub64F_0(v) 1114 case OpSub8: 1115 return rewriteValueAMD64_OpSub8_0(v) 1116 case OpSubPtr: 1117 return rewriteValueAMD64_OpSubPtr_0(v) 1118 case OpTrunc: 1119 return rewriteValueAMD64_OpTrunc_0(v) 1120 case OpTrunc16to8: 1121 return rewriteValueAMD64_OpTrunc16to8_0(v) 1122 case OpTrunc32to16: 1123 return rewriteValueAMD64_OpTrunc32to16_0(v) 1124 case OpTrunc32to8: 1125 return rewriteValueAMD64_OpTrunc32to8_0(v) 1126 case OpTrunc64to16: 1127 return rewriteValueAMD64_OpTrunc64to16_0(v) 1128 case OpTrunc64to32: 1129 return rewriteValueAMD64_OpTrunc64to32_0(v) 1130 case OpTrunc64to8: 1131 return rewriteValueAMD64_OpTrunc64to8_0(v) 1132 case OpWB: 1133 return rewriteValueAMD64_OpWB_0(v) 1134 case OpXor16: 1135 return rewriteValueAMD64_OpXor16_0(v) 1136 case OpXor32: 1137 return rewriteValueAMD64_OpXor32_0(v) 1138 case OpXor64: 1139 return rewriteValueAMD64_OpXor64_0(v) 1140 case OpXor8: 1141 return rewriteValueAMD64_OpXor8_0(v) 1142 case OpZero: 1143 return rewriteValueAMD64_OpZero_0(v) || rewriteValueAMD64_OpZero_10(v) || rewriteValueAMD64_OpZero_20(v) 1144 case OpZeroExt16to32: 1145 return rewriteValueAMD64_OpZeroExt16to32_0(v) 1146 case OpZeroExt16to64: 1147 return rewriteValueAMD64_OpZeroExt16to64_0(v) 1148 case OpZeroExt32to64: 1149 return rewriteValueAMD64_OpZeroExt32to64_0(v) 1150 case OpZeroExt8to16: 1151 return rewriteValueAMD64_OpZeroExt8to16_0(v) 1152 case OpZeroExt8to32: 1153 return rewriteValueAMD64_OpZeroExt8to32_0(v) 1154 case OpZeroExt8to64: 1155 return rewriteValueAMD64_OpZeroExt8to64_0(v) 1156 } 1157 return false 1158 } 1159 func rewriteValueAMD64_OpAMD64ADCQ_0(v *Value) bool { 1160 // match: (ADCQ x (MOVQconst [c]) carry) 1161 // cond: is32Bit(c) 1162 // result: (ADCQconst x [c] carry) 1163 for { 1164 _ = v.Args[2] 1165 x := v.Args[0] 1166 v_1 := v.Args[1] 1167 if v_1.Op != OpAMD64MOVQconst { 1168 break 1169 } 1170 c := v_1.AuxInt 1171 carry := v.Args[2] 1172 if !(is32Bit(c)) { 1173 break 1174 } 1175 v.reset(OpAMD64ADCQconst) 1176 v.AuxInt = c 1177 v.AddArg(x) 1178 v.AddArg(carry) 1179 return true 1180 } 1181 // match: (ADCQ (MOVQconst [c]) x carry) 1182 // cond: is32Bit(c) 1183 // result: (ADCQconst x [c] carry) 1184 for { 1185 _ = v.Args[2] 1186 v_0 := v.Args[0] 1187 if v_0.Op != OpAMD64MOVQconst { 1188 break 1189 } 1190 c := v_0.AuxInt 1191 x := v.Args[1] 1192 carry := v.Args[2] 1193 if !(is32Bit(c)) { 1194 break 1195 } 1196 v.reset(OpAMD64ADCQconst) 1197 v.AuxInt = c 1198 v.AddArg(x) 1199 v.AddArg(carry) 1200 return true 1201 } 1202 // match: (ADCQ x y (FlagEQ)) 1203 // cond: 1204 // result: (ADDQcarry x y) 1205 for { 1206 _ = v.Args[2] 1207 x := v.Args[0] 1208 y := v.Args[1] 1209 v_2 := v.Args[2] 1210 if v_2.Op != OpAMD64FlagEQ { 1211 break 1212 } 1213 v.reset(OpAMD64ADDQcarry) 1214 v.AddArg(x) 1215 v.AddArg(y) 1216 return true 1217 } 1218 return false 1219 } 1220 func rewriteValueAMD64_OpAMD64ADCQconst_0(v *Value) bool { 1221 // match: (ADCQconst x [c] (FlagEQ)) 1222 // cond: 1223 // result: (ADDQconstcarry x [c]) 1224 for { 1225 c := v.AuxInt 1226 _ = v.Args[1] 1227 x := v.Args[0] 1228 v_1 := v.Args[1] 1229 if v_1.Op != OpAMD64FlagEQ { 1230 break 1231 } 1232 v.reset(OpAMD64ADDQconstcarry) 1233 v.AuxInt = c 1234 v.AddArg(x) 1235 return true 1236 } 1237 return false 1238 } 1239 func rewriteValueAMD64_OpAMD64ADDL_0(v *Value) bool { 1240 // match: (ADDL x (MOVLconst [c])) 1241 // cond: 1242 // result: (ADDLconst [c] x) 1243 for { 1244 _ = v.Args[1] 1245 x := v.Args[0] 1246 v_1 := v.Args[1] 1247 if v_1.Op != OpAMD64MOVLconst { 1248 break 1249 } 1250 c := v_1.AuxInt 1251 v.reset(OpAMD64ADDLconst) 1252 v.AuxInt = c 1253 v.AddArg(x) 1254 return true 1255 } 1256 // match: (ADDL (MOVLconst [c]) x) 1257 // cond: 1258 // result: (ADDLconst [c] x) 1259 for { 1260 _ = v.Args[1] 1261 v_0 := v.Args[0] 1262 if v_0.Op != OpAMD64MOVLconst { 1263 break 1264 } 1265 c := v_0.AuxInt 1266 x := v.Args[1] 1267 v.reset(OpAMD64ADDLconst) 1268 v.AuxInt = c 1269 v.AddArg(x) 1270 return true 1271 } 1272 // match: (ADDL (SHLLconst x [c]) (SHRLconst x [d])) 1273 // cond: d==32-c 1274 // result: (ROLLconst x [c]) 1275 for { 1276 _ = v.Args[1] 1277 v_0 := v.Args[0] 1278 if v_0.Op != OpAMD64SHLLconst { 1279 break 1280 } 1281 c := v_0.AuxInt 1282 x := v_0.Args[0] 1283 v_1 := v.Args[1] 1284 if v_1.Op != OpAMD64SHRLconst { 1285 break 1286 } 1287 d := v_1.AuxInt 1288 if x != v_1.Args[0] { 1289 break 1290 } 1291 if !(d == 32-c) { 1292 break 1293 } 1294 v.reset(OpAMD64ROLLconst) 1295 v.AuxInt = c 1296 v.AddArg(x) 1297 return true 1298 } 1299 // match: (ADDL (SHRLconst x [d]) (SHLLconst x [c])) 1300 // cond: d==32-c 1301 // result: (ROLLconst x [c]) 1302 for { 1303 _ = v.Args[1] 1304 v_0 := v.Args[0] 1305 if v_0.Op != OpAMD64SHRLconst { 1306 break 1307 } 1308 d := v_0.AuxInt 1309 x := v_0.Args[0] 1310 v_1 := v.Args[1] 1311 if v_1.Op != OpAMD64SHLLconst { 1312 break 1313 } 1314 c := v_1.AuxInt 1315 if x != v_1.Args[0] { 1316 break 1317 } 1318 if !(d == 32-c) { 1319 break 1320 } 1321 v.reset(OpAMD64ROLLconst) 1322 v.AuxInt = c 1323 v.AddArg(x) 1324 return true 1325 } 1326 // match: (ADDL <t> (SHLLconst x [c]) (SHRWconst x [d])) 1327 // cond: d==16-c && c < 16 && t.Size() == 2 1328 // result: (ROLWconst x [c]) 1329 for { 1330 t := v.Type 1331 _ = v.Args[1] 1332 v_0 := v.Args[0] 1333 if v_0.Op != OpAMD64SHLLconst { 1334 break 1335 } 1336 c := v_0.AuxInt 1337 x := v_0.Args[0] 1338 v_1 := v.Args[1] 1339 if v_1.Op != OpAMD64SHRWconst { 1340 break 1341 } 1342 d := v_1.AuxInt 1343 if x != v_1.Args[0] { 1344 break 1345 } 1346 if !(d == 16-c && c < 16 && t.Size() == 2) { 1347 break 1348 } 1349 v.reset(OpAMD64ROLWconst) 1350 v.AuxInt = c 1351 v.AddArg(x) 1352 return true 1353 } 1354 // match: (ADDL <t> (SHRWconst x [d]) (SHLLconst x [c])) 1355 // cond: d==16-c && c < 16 && t.Size() == 2 1356 // result: (ROLWconst x [c]) 1357 for { 1358 t := v.Type 1359 _ = v.Args[1] 1360 v_0 := v.Args[0] 1361 if v_0.Op != OpAMD64SHRWconst { 1362 break 1363 } 1364 d := v_0.AuxInt 1365 x := v_0.Args[0] 1366 v_1 := v.Args[1] 1367 if v_1.Op != OpAMD64SHLLconst { 1368 break 1369 } 1370 c := v_1.AuxInt 1371 if x != v_1.Args[0] { 1372 break 1373 } 1374 if !(d == 16-c && c < 16 && t.Size() == 2) { 1375 break 1376 } 1377 v.reset(OpAMD64ROLWconst) 1378 v.AuxInt = c 1379 v.AddArg(x) 1380 return true 1381 } 1382 // match: (ADDL <t> (SHLLconst x [c]) (SHRBconst x [d])) 1383 // cond: d==8-c && c < 8 && t.Size() == 1 1384 // result: (ROLBconst x [c]) 1385 for { 1386 t := v.Type 1387 _ = v.Args[1] 1388 v_0 := v.Args[0] 1389 if v_0.Op != OpAMD64SHLLconst { 1390 break 1391 } 1392 c := v_0.AuxInt 1393 x := v_0.Args[0] 1394 v_1 := v.Args[1] 1395 if v_1.Op != OpAMD64SHRBconst { 1396 break 1397 } 1398 d := v_1.AuxInt 1399 if x != v_1.Args[0] { 1400 break 1401 } 1402 if !(d == 8-c && c < 8 && t.Size() == 1) { 1403 break 1404 } 1405 v.reset(OpAMD64ROLBconst) 1406 v.AuxInt = c 1407 v.AddArg(x) 1408 return true 1409 } 1410 // match: (ADDL <t> (SHRBconst x [d]) (SHLLconst x [c])) 1411 // cond: d==8-c && c < 8 && t.Size() == 1 1412 // result: (ROLBconst x [c]) 1413 for { 1414 t := v.Type 1415 _ = v.Args[1] 1416 v_0 := v.Args[0] 1417 if v_0.Op != OpAMD64SHRBconst { 1418 break 1419 } 1420 d := v_0.AuxInt 1421 x := v_0.Args[0] 1422 v_1 := v.Args[1] 1423 if v_1.Op != OpAMD64SHLLconst { 1424 break 1425 } 1426 c := v_1.AuxInt 1427 if x != v_1.Args[0] { 1428 break 1429 } 1430 if !(d == 8-c && c < 8 && t.Size() == 1) { 1431 break 1432 } 1433 v.reset(OpAMD64ROLBconst) 1434 v.AuxInt = c 1435 v.AddArg(x) 1436 return true 1437 } 1438 // match: (ADDL x (SHLLconst [3] y)) 1439 // cond: 1440 // result: (LEAL8 x y) 1441 for { 1442 _ = v.Args[1] 1443 x := v.Args[0] 1444 v_1 := v.Args[1] 1445 if v_1.Op != OpAMD64SHLLconst { 1446 break 1447 } 1448 if v_1.AuxInt != 3 { 1449 break 1450 } 1451 y := v_1.Args[0] 1452 v.reset(OpAMD64LEAL8) 1453 v.AddArg(x) 1454 v.AddArg(y) 1455 return true 1456 } 1457 // match: (ADDL (SHLLconst [3] y) x) 1458 // cond: 1459 // result: (LEAL8 x y) 1460 for { 1461 _ = v.Args[1] 1462 v_0 := v.Args[0] 1463 if v_0.Op != OpAMD64SHLLconst { 1464 break 1465 } 1466 if v_0.AuxInt != 3 { 1467 break 1468 } 1469 y := v_0.Args[0] 1470 x := v.Args[1] 1471 v.reset(OpAMD64LEAL8) 1472 v.AddArg(x) 1473 v.AddArg(y) 1474 return true 1475 } 1476 return false 1477 } 1478 func rewriteValueAMD64_OpAMD64ADDL_10(v *Value) bool { 1479 // match: (ADDL x (SHLLconst [2] y)) 1480 // cond: 1481 // result: (LEAL4 x y) 1482 for { 1483 _ = v.Args[1] 1484 x := v.Args[0] 1485 v_1 := v.Args[1] 1486 if v_1.Op != OpAMD64SHLLconst { 1487 break 1488 } 1489 if v_1.AuxInt != 2 { 1490 break 1491 } 1492 y := v_1.Args[0] 1493 v.reset(OpAMD64LEAL4) 1494 v.AddArg(x) 1495 v.AddArg(y) 1496 return true 1497 } 1498 // match: (ADDL (SHLLconst [2] y) x) 1499 // cond: 1500 // result: (LEAL4 x y) 1501 for { 1502 _ = v.Args[1] 1503 v_0 := v.Args[0] 1504 if v_0.Op != OpAMD64SHLLconst { 1505 break 1506 } 1507 if v_0.AuxInt != 2 { 1508 break 1509 } 1510 y := v_0.Args[0] 1511 x := v.Args[1] 1512 v.reset(OpAMD64LEAL4) 1513 v.AddArg(x) 1514 v.AddArg(y) 1515 return true 1516 } 1517 // match: (ADDL x (SHLLconst [1] y)) 1518 // cond: 1519 // result: (LEAL2 x y) 1520 for { 1521 _ = v.Args[1] 1522 x := v.Args[0] 1523 v_1 := v.Args[1] 1524 if v_1.Op != OpAMD64SHLLconst { 1525 break 1526 } 1527 if v_1.AuxInt != 1 { 1528 break 1529 } 1530 y := v_1.Args[0] 1531 v.reset(OpAMD64LEAL2) 1532 v.AddArg(x) 1533 v.AddArg(y) 1534 return true 1535 } 1536 // match: (ADDL (SHLLconst [1] y) x) 1537 // cond: 1538 // result: (LEAL2 x y) 1539 for { 1540 _ = v.Args[1] 1541 v_0 := v.Args[0] 1542 if v_0.Op != OpAMD64SHLLconst { 1543 break 1544 } 1545 if v_0.AuxInt != 1 { 1546 break 1547 } 1548 y := v_0.Args[0] 1549 x := v.Args[1] 1550 v.reset(OpAMD64LEAL2) 1551 v.AddArg(x) 1552 v.AddArg(y) 1553 return true 1554 } 1555 // match: (ADDL x (ADDL y y)) 1556 // cond: 1557 // result: (LEAL2 x y) 1558 for { 1559 _ = v.Args[1] 1560 x := v.Args[0] 1561 v_1 := v.Args[1] 1562 if v_1.Op != OpAMD64ADDL { 1563 break 1564 } 1565 _ = v_1.Args[1] 1566 y := v_1.Args[0] 1567 if y != v_1.Args[1] { 1568 break 1569 } 1570 v.reset(OpAMD64LEAL2) 1571 v.AddArg(x) 1572 v.AddArg(y) 1573 return true 1574 } 1575 // match: (ADDL (ADDL y y) x) 1576 // cond: 1577 // result: (LEAL2 x y) 1578 for { 1579 _ = v.Args[1] 1580 v_0 := v.Args[0] 1581 if v_0.Op != OpAMD64ADDL { 1582 break 1583 } 1584 _ = v_0.Args[1] 1585 y := v_0.Args[0] 1586 if y != v_0.Args[1] { 1587 break 1588 } 1589 x := v.Args[1] 1590 v.reset(OpAMD64LEAL2) 1591 v.AddArg(x) 1592 v.AddArg(y) 1593 return true 1594 } 1595 // match: (ADDL x (ADDL x y)) 1596 // cond: 1597 // result: (LEAL2 y x) 1598 for { 1599 _ = v.Args[1] 1600 x := v.Args[0] 1601 v_1 := v.Args[1] 1602 if v_1.Op != OpAMD64ADDL { 1603 break 1604 } 1605 _ = v_1.Args[1] 1606 if x != v_1.Args[0] { 1607 break 1608 } 1609 y := v_1.Args[1] 1610 v.reset(OpAMD64LEAL2) 1611 v.AddArg(y) 1612 v.AddArg(x) 1613 return true 1614 } 1615 // match: (ADDL x (ADDL y x)) 1616 // cond: 1617 // result: (LEAL2 y x) 1618 for { 1619 _ = v.Args[1] 1620 x := v.Args[0] 1621 v_1 := v.Args[1] 1622 if v_1.Op != OpAMD64ADDL { 1623 break 1624 } 1625 _ = v_1.Args[1] 1626 y := v_1.Args[0] 1627 if x != v_1.Args[1] { 1628 break 1629 } 1630 v.reset(OpAMD64LEAL2) 1631 v.AddArg(y) 1632 v.AddArg(x) 1633 return true 1634 } 1635 // match: (ADDL (ADDL x y) x) 1636 // cond: 1637 // result: (LEAL2 y x) 1638 for { 1639 _ = v.Args[1] 1640 v_0 := v.Args[0] 1641 if v_0.Op != OpAMD64ADDL { 1642 break 1643 } 1644 _ = v_0.Args[1] 1645 x := v_0.Args[0] 1646 y := v_0.Args[1] 1647 if x != v.Args[1] { 1648 break 1649 } 1650 v.reset(OpAMD64LEAL2) 1651 v.AddArg(y) 1652 v.AddArg(x) 1653 return true 1654 } 1655 // match: (ADDL (ADDL y x) x) 1656 // cond: 1657 // result: (LEAL2 y x) 1658 for { 1659 _ = v.Args[1] 1660 v_0 := v.Args[0] 1661 if v_0.Op != OpAMD64ADDL { 1662 break 1663 } 1664 _ = v_0.Args[1] 1665 y := v_0.Args[0] 1666 x := v_0.Args[1] 1667 if x != v.Args[1] { 1668 break 1669 } 1670 v.reset(OpAMD64LEAL2) 1671 v.AddArg(y) 1672 v.AddArg(x) 1673 return true 1674 } 1675 return false 1676 } 1677 func rewriteValueAMD64_OpAMD64ADDL_20(v *Value) bool { 1678 // match: (ADDL (ADDLconst [c] x) y) 1679 // cond: 1680 // result: (LEAL1 [c] x y) 1681 for { 1682 _ = v.Args[1] 1683 v_0 := v.Args[0] 1684 if v_0.Op != OpAMD64ADDLconst { 1685 break 1686 } 1687 c := v_0.AuxInt 1688 x := v_0.Args[0] 1689 y := v.Args[1] 1690 v.reset(OpAMD64LEAL1) 1691 v.AuxInt = c 1692 v.AddArg(x) 1693 v.AddArg(y) 1694 return true 1695 } 1696 // match: (ADDL y (ADDLconst [c] x)) 1697 // cond: 1698 // result: (LEAL1 [c] x y) 1699 for { 1700 _ = v.Args[1] 1701 y := v.Args[0] 1702 v_1 := v.Args[1] 1703 if v_1.Op != OpAMD64ADDLconst { 1704 break 1705 } 1706 c := v_1.AuxInt 1707 x := v_1.Args[0] 1708 v.reset(OpAMD64LEAL1) 1709 v.AuxInt = c 1710 v.AddArg(x) 1711 v.AddArg(y) 1712 return true 1713 } 1714 // match: (ADDL x (LEAL [c] {s} y)) 1715 // cond: x.Op != OpSB && y.Op != OpSB 1716 // result: (LEAL1 [c] {s} x y) 1717 for { 1718 _ = v.Args[1] 1719 x := v.Args[0] 1720 v_1 := v.Args[1] 1721 if v_1.Op != OpAMD64LEAL { 1722 break 1723 } 1724 c := v_1.AuxInt 1725 s := v_1.Aux 1726 y := v_1.Args[0] 1727 if !(x.Op != OpSB && y.Op != OpSB) { 1728 break 1729 } 1730 v.reset(OpAMD64LEAL1) 1731 v.AuxInt = c 1732 v.Aux = s 1733 v.AddArg(x) 1734 v.AddArg(y) 1735 return true 1736 } 1737 // match: (ADDL (LEAL [c] {s} y) x) 1738 // cond: x.Op != OpSB && y.Op != OpSB 1739 // result: (LEAL1 [c] {s} x y) 1740 for { 1741 _ = v.Args[1] 1742 v_0 := v.Args[0] 1743 if v_0.Op != OpAMD64LEAL { 1744 break 1745 } 1746 c := v_0.AuxInt 1747 s := v_0.Aux 1748 y := v_0.Args[0] 1749 x := v.Args[1] 1750 if !(x.Op != OpSB && y.Op != OpSB) { 1751 break 1752 } 1753 v.reset(OpAMD64LEAL1) 1754 v.AuxInt = c 1755 v.Aux = s 1756 v.AddArg(x) 1757 v.AddArg(y) 1758 return true 1759 } 1760 // match: (ADDL x (NEGL y)) 1761 // cond: 1762 // result: (SUBL x y) 1763 for { 1764 _ = v.Args[1] 1765 x := v.Args[0] 1766 v_1 := v.Args[1] 1767 if v_1.Op != OpAMD64NEGL { 1768 break 1769 } 1770 y := v_1.Args[0] 1771 v.reset(OpAMD64SUBL) 1772 v.AddArg(x) 1773 v.AddArg(y) 1774 return true 1775 } 1776 // match: (ADDL (NEGL y) x) 1777 // cond: 1778 // result: (SUBL x y) 1779 for { 1780 _ = v.Args[1] 1781 v_0 := v.Args[0] 1782 if v_0.Op != OpAMD64NEGL { 1783 break 1784 } 1785 y := v_0.Args[0] 1786 x := v.Args[1] 1787 v.reset(OpAMD64SUBL) 1788 v.AddArg(x) 1789 v.AddArg(y) 1790 return true 1791 } 1792 // match: (ADDL x l:(MOVLload [off] {sym} ptr mem)) 1793 // cond: canMergeLoadClobber(v, l, x) && clobber(l) 1794 // result: (ADDLload x [off] {sym} ptr mem) 1795 for { 1796 _ = v.Args[1] 1797 x := v.Args[0] 1798 l := v.Args[1] 1799 if l.Op != OpAMD64MOVLload { 1800 break 1801 } 1802 off := l.AuxInt 1803 sym := l.Aux 1804 _ = l.Args[1] 1805 ptr := l.Args[0] 1806 mem := l.Args[1] 1807 if !(canMergeLoadClobber(v, l, x) && clobber(l)) { 1808 break 1809 } 1810 v.reset(OpAMD64ADDLload) 1811 v.AuxInt = off 1812 v.Aux = sym 1813 v.AddArg(x) 1814 v.AddArg(ptr) 1815 v.AddArg(mem) 1816 return true 1817 } 1818 // match: (ADDL l:(MOVLload [off] {sym} ptr mem) x) 1819 // cond: canMergeLoadClobber(v, l, x) && clobber(l) 1820 // result: (ADDLload x [off] {sym} ptr mem) 1821 for { 1822 _ = v.Args[1] 1823 l := v.Args[0] 1824 if l.Op != OpAMD64MOVLload { 1825 break 1826 } 1827 off := l.AuxInt 1828 sym := l.Aux 1829 _ = l.Args[1] 1830 ptr := l.Args[0] 1831 mem := l.Args[1] 1832 x := v.Args[1] 1833 if !(canMergeLoadClobber(v, l, x) && clobber(l)) { 1834 break 1835 } 1836 v.reset(OpAMD64ADDLload) 1837 v.AuxInt = off 1838 v.Aux = sym 1839 v.AddArg(x) 1840 v.AddArg(ptr) 1841 v.AddArg(mem) 1842 return true 1843 } 1844 return false 1845 } 1846 func rewriteValueAMD64_OpAMD64ADDLconst_0(v *Value) bool { 1847 // match: (ADDLconst [c] (ADDL x y)) 1848 // cond: 1849 // result: (LEAL1 [c] x y) 1850 for { 1851 c := v.AuxInt 1852 v_0 := v.Args[0] 1853 if v_0.Op != OpAMD64ADDL { 1854 break 1855 } 1856 _ = v_0.Args[1] 1857 x := v_0.Args[0] 1858 y := v_0.Args[1] 1859 v.reset(OpAMD64LEAL1) 1860 v.AuxInt = c 1861 v.AddArg(x) 1862 v.AddArg(y) 1863 return true 1864 } 1865 // match: (ADDLconst [c] (SHLLconst [1] x)) 1866 // cond: 1867 // result: (LEAL1 [c] x x) 1868 for { 1869 c := v.AuxInt 1870 v_0 := v.Args[0] 1871 if v_0.Op != OpAMD64SHLLconst { 1872 break 1873 } 1874 if v_0.AuxInt != 1 { 1875 break 1876 } 1877 x := v_0.Args[0] 1878 v.reset(OpAMD64LEAL1) 1879 v.AuxInt = c 1880 v.AddArg(x) 1881 v.AddArg(x) 1882 return true 1883 } 1884 // match: (ADDLconst [c] (LEAL [d] {s} x)) 1885 // cond: is32Bit(c+d) 1886 // result: (LEAL [c+d] {s} x) 1887 for { 1888 c := v.AuxInt 1889 v_0 := v.Args[0] 1890 if v_0.Op != OpAMD64LEAL { 1891 break 1892 } 1893 d := v_0.AuxInt 1894 s := v_0.Aux 1895 x := v_0.Args[0] 1896 if !(is32Bit(c + d)) { 1897 break 1898 } 1899 v.reset(OpAMD64LEAL) 1900 v.AuxInt = c + d 1901 v.Aux = s 1902 v.AddArg(x) 1903 return true 1904 } 1905 // match: (ADDLconst [c] (LEAL1 [d] {s} x y)) 1906 // cond: is32Bit(c+d) 1907 // result: (LEAL1 [c+d] {s} x y) 1908 for { 1909 c := v.AuxInt 1910 v_0 := v.Args[0] 1911 if v_0.Op != OpAMD64LEAL1 { 1912 break 1913 } 1914 d := v_0.AuxInt 1915 s := v_0.Aux 1916 _ = v_0.Args[1] 1917 x := v_0.Args[0] 1918 y := v_0.Args[1] 1919 if !(is32Bit(c + d)) { 1920 break 1921 } 1922 v.reset(OpAMD64LEAL1) 1923 v.AuxInt = c + d 1924 v.Aux = s 1925 v.AddArg(x) 1926 v.AddArg(y) 1927 return true 1928 } 1929 // match: (ADDLconst [c] (LEAL2 [d] {s} x y)) 1930 // cond: is32Bit(c+d) 1931 // result: (LEAL2 [c+d] {s} x y) 1932 for { 1933 c := v.AuxInt 1934 v_0 := v.Args[0] 1935 if v_0.Op != OpAMD64LEAL2 { 1936 break 1937 } 1938 d := v_0.AuxInt 1939 s := v_0.Aux 1940 _ = v_0.Args[1] 1941 x := v_0.Args[0] 1942 y := v_0.Args[1] 1943 if !(is32Bit(c + d)) { 1944 break 1945 } 1946 v.reset(OpAMD64LEAL2) 1947 v.AuxInt = c + d 1948 v.Aux = s 1949 v.AddArg(x) 1950 v.AddArg(y) 1951 return true 1952 } 1953 // match: (ADDLconst [c] (LEAL4 [d] {s} x y)) 1954 // cond: is32Bit(c+d) 1955 // result: (LEAL4 [c+d] {s} x y) 1956 for { 1957 c := v.AuxInt 1958 v_0 := v.Args[0] 1959 if v_0.Op != OpAMD64LEAL4 { 1960 break 1961 } 1962 d := v_0.AuxInt 1963 s := v_0.Aux 1964 _ = v_0.Args[1] 1965 x := v_0.Args[0] 1966 y := v_0.Args[1] 1967 if !(is32Bit(c + d)) { 1968 break 1969 } 1970 v.reset(OpAMD64LEAL4) 1971 v.AuxInt = c + d 1972 v.Aux = s 1973 v.AddArg(x) 1974 v.AddArg(y) 1975 return true 1976 } 1977 // match: (ADDLconst [c] (LEAL8 [d] {s} x y)) 1978 // cond: is32Bit(c+d) 1979 // result: (LEAL8 [c+d] {s} x y) 1980 for { 1981 c := v.AuxInt 1982 v_0 := v.Args[0] 1983 if v_0.Op != OpAMD64LEAL8 { 1984 break 1985 } 1986 d := v_0.AuxInt 1987 s := v_0.Aux 1988 _ = v_0.Args[1] 1989 x := v_0.Args[0] 1990 y := v_0.Args[1] 1991 if !(is32Bit(c + d)) { 1992 break 1993 } 1994 v.reset(OpAMD64LEAL8) 1995 v.AuxInt = c + d 1996 v.Aux = s 1997 v.AddArg(x) 1998 v.AddArg(y) 1999 return true 2000 } 2001 // match: (ADDLconst [c] x) 2002 // cond: int32(c)==0 2003 // result: x 2004 for { 2005 c := v.AuxInt 2006 x := v.Args[0] 2007 if !(int32(c) == 0) { 2008 break 2009 } 2010 v.reset(OpCopy) 2011 v.Type = x.Type 2012 v.AddArg(x) 2013 return true 2014 } 2015 // match: (ADDLconst [c] (MOVLconst [d])) 2016 // cond: 2017 // result: (MOVLconst [int64(int32(c+d))]) 2018 for { 2019 c := v.AuxInt 2020 v_0 := v.Args[0] 2021 if v_0.Op != OpAMD64MOVLconst { 2022 break 2023 } 2024 d := v_0.AuxInt 2025 v.reset(OpAMD64MOVLconst) 2026 v.AuxInt = int64(int32(c + d)) 2027 return true 2028 } 2029 // match: (ADDLconst [c] (ADDLconst [d] x)) 2030 // cond: 2031 // result: (ADDLconst [int64(int32(c+d))] x) 2032 for { 2033 c := v.AuxInt 2034 v_0 := v.Args[0] 2035 if v_0.Op != OpAMD64ADDLconst { 2036 break 2037 } 2038 d := v_0.AuxInt 2039 x := v_0.Args[0] 2040 v.reset(OpAMD64ADDLconst) 2041 v.AuxInt = int64(int32(c + d)) 2042 v.AddArg(x) 2043 return true 2044 } 2045 return false 2046 } 2047 func rewriteValueAMD64_OpAMD64ADDLconst_10(v *Value) bool { 2048 // match: (ADDLconst [off] x:(SP)) 2049 // cond: 2050 // result: (LEAL [off] x) 2051 for { 2052 off := v.AuxInt 2053 x := v.Args[0] 2054 if x.Op != OpSP { 2055 break 2056 } 2057 v.reset(OpAMD64LEAL) 2058 v.AuxInt = off 2059 v.AddArg(x) 2060 return true 2061 } 2062 return false 2063 } 2064 func rewriteValueAMD64_OpAMD64ADDLconstmodify_0(v *Value) bool { 2065 // match: (ADDLconstmodify [valoff1] {sym} (ADDQconst [off2] base) mem) 2066 // cond: ValAndOff(valoff1).canAdd(off2) 2067 // result: (ADDLconstmodify [ValAndOff(valoff1).add(off2)] {sym} base mem) 2068 for { 2069 valoff1 := v.AuxInt 2070 sym := v.Aux 2071 _ = v.Args[1] 2072 v_0 := v.Args[0] 2073 if v_0.Op != OpAMD64ADDQconst { 2074 break 2075 } 2076 off2 := v_0.AuxInt 2077 base := v_0.Args[0] 2078 mem := v.Args[1] 2079 if !(ValAndOff(valoff1).canAdd(off2)) { 2080 break 2081 } 2082 v.reset(OpAMD64ADDLconstmodify) 2083 v.AuxInt = ValAndOff(valoff1).add(off2) 2084 v.Aux = sym 2085 v.AddArg(base) 2086 v.AddArg(mem) 2087 return true 2088 } 2089 // match: (ADDLconstmodify [valoff1] {sym1} (LEAQ [off2] {sym2} base) mem) 2090 // cond: ValAndOff(valoff1).canAdd(off2) && canMergeSym(sym1, sym2) 2091 // result: (ADDLconstmodify [ValAndOff(valoff1).add(off2)] {mergeSym(sym1,sym2)} base mem) 2092 for { 2093 valoff1 := v.AuxInt 2094 sym1 := v.Aux 2095 _ = v.Args[1] 2096 v_0 := v.Args[0] 2097 if v_0.Op != OpAMD64LEAQ { 2098 break 2099 } 2100 off2 := v_0.AuxInt 2101 sym2 := v_0.Aux 2102 base := v_0.Args[0] 2103 mem := v.Args[1] 2104 if !(ValAndOff(valoff1).canAdd(off2) && canMergeSym(sym1, sym2)) { 2105 break 2106 } 2107 v.reset(OpAMD64ADDLconstmodify) 2108 v.AuxInt = ValAndOff(valoff1).add(off2) 2109 v.Aux = mergeSym(sym1, sym2) 2110 v.AddArg(base) 2111 v.AddArg(mem) 2112 return true 2113 } 2114 return false 2115 } 2116 func rewriteValueAMD64_OpAMD64ADDLload_0(v *Value) bool { 2117 b := v.Block 2118 _ = b 2119 typ := &b.Func.Config.Types 2120 _ = typ 2121 // match: (ADDLload [off1] {sym} val (ADDQconst [off2] base) mem) 2122 // cond: is32Bit(off1+off2) 2123 // result: (ADDLload [off1+off2] {sym} val base mem) 2124 for { 2125 off1 := v.AuxInt 2126 sym := v.Aux 2127 _ = v.Args[2] 2128 val := v.Args[0] 2129 v_1 := v.Args[1] 2130 if v_1.Op != OpAMD64ADDQconst { 2131 break 2132 } 2133 off2 := v_1.AuxInt 2134 base := v_1.Args[0] 2135 mem := v.Args[2] 2136 if !(is32Bit(off1 + off2)) { 2137 break 2138 } 2139 v.reset(OpAMD64ADDLload) 2140 v.AuxInt = off1 + off2 2141 v.Aux = sym 2142 v.AddArg(val) 2143 v.AddArg(base) 2144 v.AddArg(mem) 2145 return true 2146 } 2147 // match: (ADDLload [off1] {sym1} val (LEAQ [off2] {sym2} base) mem) 2148 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 2149 // result: (ADDLload [off1+off2] {mergeSym(sym1,sym2)} val base mem) 2150 for { 2151 off1 := v.AuxInt 2152 sym1 := v.Aux 2153 _ = v.Args[2] 2154 val := v.Args[0] 2155 v_1 := v.Args[1] 2156 if v_1.Op != OpAMD64LEAQ { 2157 break 2158 } 2159 off2 := v_1.AuxInt 2160 sym2 := v_1.Aux 2161 base := v_1.Args[0] 2162 mem := v.Args[2] 2163 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 2164 break 2165 } 2166 v.reset(OpAMD64ADDLload) 2167 v.AuxInt = off1 + off2 2168 v.Aux = mergeSym(sym1, sym2) 2169 v.AddArg(val) 2170 v.AddArg(base) 2171 v.AddArg(mem) 2172 return true 2173 } 2174 // match: (ADDLload x [off] {sym} ptr (MOVSSstore [off] {sym} ptr y _)) 2175 // cond: 2176 // result: (ADDL x (MOVLf2i y)) 2177 for { 2178 off := v.AuxInt 2179 sym := v.Aux 2180 _ = v.Args[2] 2181 x := v.Args[0] 2182 ptr := v.Args[1] 2183 v_2 := v.Args[2] 2184 if v_2.Op != OpAMD64MOVSSstore { 2185 break 2186 } 2187 if v_2.AuxInt != off { 2188 break 2189 } 2190 if v_2.Aux != sym { 2191 break 2192 } 2193 _ = v_2.Args[2] 2194 if ptr != v_2.Args[0] { 2195 break 2196 } 2197 y := v_2.Args[1] 2198 v.reset(OpAMD64ADDL) 2199 v.AddArg(x) 2200 v0 := b.NewValue0(v_2.Pos, OpAMD64MOVLf2i, typ.UInt32) 2201 v0.AddArg(y) 2202 v.AddArg(v0) 2203 return true 2204 } 2205 return false 2206 } 2207 func rewriteValueAMD64_OpAMD64ADDLmodify_0(v *Value) bool { 2208 // match: (ADDLmodify [off1] {sym} (ADDQconst [off2] base) val mem) 2209 // cond: is32Bit(off1+off2) 2210 // result: (ADDLmodify [off1+off2] {sym} base val mem) 2211 for { 2212 off1 := v.AuxInt 2213 sym := v.Aux 2214 _ = v.Args[2] 2215 v_0 := v.Args[0] 2216 if v_0.Op != OpAMD64ADDQconst { 2217 break 2218 } 2219 off2 := v_0.AuxInt 2220 base := v_0.Args[0] 2221 val := v.Args[1] 2222 mem := v.Args[2] 2223 if !(is32Bit(off1 + off2)) { 2224 break 2225 } 2226 v.reset(OpAMD64ADDLmodify) 2227 v.AuxInt = off1 + off2 2228 v.Aux = sym 2229 v.AddArg(base) 2230 v.AddArg(val) 2231 v.AddArg(mem) 2232 return true 2233 } 2234 // match: (ADDLmodify [off1] {sym1} (LEAQ [off2] {sym2} base) val mem) 2235 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 2236 // result: (ADDLmodify [off1+off2] {mergeSym(sym1,sym2)} base val mem) 2237 for { 2238 off1 := v.AuxInt 2239 sym1 := v.Aux 2240 _ = v.Args[2] 2241 v_0 := v.Args[0] 2242 if v_0.Op != OpAMD64LEAQ { 2243 break 2244 } 2245 off2 := v_0.AuxInt 2246 sym2 := v_0.Aux 2247 base := v_0.Args[0] 2248 val := v.Args[1] 2249 mem := v.Args[2] 2250 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 2251 break 2252 } 2253 v.reset(OpAMD64ADDLmodify) 2254 v.AuxInt = off1 + off2 2255 v.Aux = mergeSym(sym1, sym2) 2256 v.AddArg(base) 2257 v.AddArg(val) 2258 v.AddArg(mem) 2259 return true 2260 } 2261 return false 2262 } 2263 func rewriteValueAMD64_OpAMD64ADDQ_0(v *Value) bool { 2264 // match: (ADDQ x (MOVQconst [c])) 2265 // cond: is32Bit(c) 2266 // result: (ADDQconst [c] x) 2267 for { 2268 _ = v.Args[1] 2269 x := v.Args[0] 2270 v_1 := v.Args[1] 2271 if v_1.Op != OpAMD64MOVQconst { 2272 break 2273 } 2274 c := v_1.AuxInt 2275 if !(is32Bit(c)) { 2276 break 2277 } 2278 v.reset(OpAMD64ADDQconst) 2279 v.AuxInt = c 2280 v.AddArg(x) 2281 return true 2282 } 2283 // match: (ADDQ (MOVQconst [c]) x) 2284 // cond: is32Bit(c) 2285 // result: (ADDQconst [c] x) 2286 for { 2287 _ = v.Args[1] 2288 v_0 := v.Args[0] 2289 if v_0.Op != OpAMD64MOVQconst { 2290 break 2291 } 2292 c := v_0.AuxInt 2293 x := v.Args[1] 2294 if !(is32Bit(c)) { 2295 break 2296 } 2297 v.reset(OpAMD64ADDQconst) 2298 v.AuxInt = c 2299 v.AddArg(x) 2300 return true 2301 } 2302 // match: (ADDQ (SHLQconst x [c]) (SHRQconst x [d])) 2303 // cond: d==64-c 2304 // result: (ROLQconst x [c]) 2305 for { 2306 _ = v.Args[1] 2307 v_0 := v.Args[0] 2308 if v_0.Op != OpAMD64SHLQconst { 2309 break 2310 } 2311 c := v_0.AuxInt 2312 x := v_0.Args[0] 2313 v_1 := v.Args[1] 2314 if v_1.Op != OpAMD64SHRQconst { 2315 break 2316 } 2317 d := v_1.AuxInt 2318 if x != v_1.Args[0] { 2319 break 2320 } 2321 if !(d == 64-c) { 2322 break 2323 } 2324 v.reset(OpAMD64ROLQconst) 2325 v.AuxInt = c 2326 v.AddArg(x) 2327 return true 2328 } 2329 // match: (ADDQ (SHRQconst x [d]) (SHLQconst x [c])) 2330 // cond: d==64-c 2331 // result: (ROLQconst x [c]) 2332 for { 2333 _ = v.Args[1] 2334 v_0 := v.Args[0] 2335 if v_0.Op != OpAMD64SHRQconst { 2336 break 2337 } 2338 d := v_0.AuxInt 2339 x := v_0.Args[0] 2340 v_1 := v.Args[1] 2341 if v_1.Op != OpAMD64SHLQconst { 2342 break 2343 } 2344 c := v_1.AuxInt 2345 if x != v_1.Args[0] { 2346 break 2347 } 2348 if !(d == 64-c) { 2349 break 2350 } 2351 v.reset(OpAMD64ROLQconst) 2352 v.AuxInt = c 2353 v.AddArg(x) 2354 return true 2355 } 2356 // match: (ADDQ x (SHLQconst [3] y)) 2357 // cond: 2358 // result: (LEAQ8 x y) 2359 for { 2360 _ = v.Args[1] 2361 x := v.Args[0] 2362 v_1 := v.Args[1] 2363 if v_1.Op != OpAMD64SHLQconst { 2364 break 2365 } 2366 if v_1.AuxInt != 3 { 2367 break 2368 } 2369 y := v_1.Args[0] 2370 v.reset(OpAMD64LEAQ8) 2371 v.AddArg(x) 2372 v.AddArg(y) 2373 return true 2374 } 2375 // match: (ADDQ (SHLQconst [3] y) x) 2376 // cond: 2377 // result: (LEAQ8 x y) 2378 for { 2379 _ = v.Args[1] 2380 v_0 := v.Args[0] 2381 if v_0.Op != OpAMD64SHLQconst { 2382 break 2383 } 2384 if v_0.AuxInt != 3 { 2385 break 2386 } 2387 y := v_0.Args[0] 2388 x := v.Args[1] 2389 v.reset(OpAMD64LEAQ8) 2390 v.AddArg(x) 2391 v.AddArg(y) 2392 return true 2393 } 2394 // match: (ADDQ x (SHLQconst [2] y)) 2395 // cond: 2396 // result: (LEAQ4 x y) 2397 for { 2398 _ = v.Args[1] 2399 x := v.Args[0] 2400 v_1 := v.Args[1] 2401 if v_1.Op != OpAMD64SHLQconst { 2402 break 2403 } 2404 if v_1.AuxInt != 2 { 2405 break 2406 } 2407 y := v_1.Args[0] 2408 v.reset(OpAMD64LEAQ4) 2409 v.AddArg(x) 2410 v.AddArg(y) 2411 return true 2412 } 2413 // match: (ADDQ (SHLQconst [2] y) x) 2414 // cond: 2415 // result: (LEAQ4 x y) 2416 for { 2417 _ = v.Args[1] 2418 v_0 := v.Args[0] 2419 if v_0.Op != OpAMD64SHLQconst { 2420 break 2421 } 2422 if v_0.AuxInt != 2 { 2423 break 2424 } 2425 y := v_0.Args[0] 2426 x := v.Args[1] 2427 v.reset(OpAMD64LEAQ4) 2428 v.AddArg(x) 2429 v.AddArg(y) 2430 return true 2431 } 2432 // match: (ADDQ x (SHLQconst [1] y)) 2433 // cond: 2434 // result: (LEAQ2 x y) 2435 for { 2436 _ = v.Args[1] 2437 x := v.Args[0] 2438 v_1 := v.Args[1] 2439 if v_1.Op != OpAMD64SHLQconst { 2440 break 2441 } 2442 if v_1.AuxInt != 1 { 2443 break 2444 } 2445 y := v_1.Args[0] 2446 v.reset(OpAMD64LEAQ2) 2447 v.AddArg(x) 2448 v.AddArg(y) 2449 return true 2450 } 2451 // match: (ADDQ (SHLQconst [1] y) x) 2452 // cond: 2453 // result: (LEAQ2 x y) 2454 for { 2455 _ = v.Args[1] 2456 v_0 := v.Args[0] 2457 if v_0.Op != OpAMD64SHLQconst { 2458 break 2459 } 2460 if v_0.AuxInt != 1 { 2461 break 2462 } 2463 y := v_0.Args[0] 2464 x := v.Args[1] 2465 v.reset(OpAMD64LEAQ2) 2466 v.AddArg(x) 2467 v.AddArg(y) 2468 return true 2469 } 2470 return false 2471 } 2472 func rewriteValueAMD64_OpAMD64ADDQ_10(v *Value) bool { 2473 // match: (ADDQ x (ADDQ y y)) 2474 // cond: 2475 // result: (LEAQ2 x y) 2476 for { 2477 _ = v.Args[1] 2478 x := v.Args[0] 2479 v_1 := v.Args[1] 2480 if v_1.Op != OpAMD64ADDQ { 2481 break 2482 } 2483 _ = v_1.Args[1] 2484 y := v_1.Args[0] 2485 if y != v_1.Args[1] { 2486 break 2487 } 2488 v.reset(OpAMD64LEAQ2) 2489 v.AddArg(x) 2490 v.AddArg(y) 2491 return true 2492 } 2493 // match: (ADDQ (ADDQ y y) x) 2494 // cond: 2495 // result: (LEAQ2 x y) 2496 for { 2497 _ = v.Args[1] 2498 v_0 := v.Args[0] 2499 if v_0.Op != OpAMD64ADDQ { 2500 break 2501 } 2502 _ = v_0.Args[1] 2503 y := v_0.Args[0] 2504 if y != v_0.Args[1] { 2505 break 2506 } 2507 x := v.Args[1] 2508 v.reset(OpAMD64LEAQ2) 2509 v.AddArg(x) 2510 v.AddArg(y) 2511 return true 2512 } 2513 // match: (ADDQ x (ADDQ x y)) 2514 // cond: 2515 // result: (LEAQ2 y x) 2516 for { 2517 _ = v.Args[1] 2518 x := v.Args[0] 2519 v_1 := v.Args[1] 2520 if v_1.Op != OpAMD64ADDQ { 2521 break 2522 } 2523 _ = v_1.Args[1] 2524 if x != v_1.Args[0] { 2525 break 2526 } 2527 y := v_1.Args[1] 2528 v.reset(OpAMD64LEAQ2) 2529 v.AddArg(y) 2530 v.AddArg(x) 2531 return true 2532 } 2533 // match: (ADDQ x (ADDQ y x)) 2534 // cond: 2535 // result: (LEAQ2 y x) 2536 for { 2537 _ = v.Args[1] 2538 x := v.Args[0] 2539 v_1 := v.Args[1] 2540 if v_1.Op != OpAMD64ADDQ { 2541 break 2542 } 2543 _ = v_1.Args[1] 2544 y := v_1.Args[0] 2545 if x != v_1.Args[1] { 2546 break 2547 } 2548 v.reset(OpAMD64LEAQ2) 2549 v.AddArg(y) 2550 v.AddArg(x) 2551 return true 2552 } 2553 // match: (ADDQ (ADDQ x y) x) 2554 // cond: 2555 // result: (LEAQ2 y x) 2556 for { 2557 _ = v.Args[1] 2558 v_0 := v.Args[0] 2559 if v_0.Op != OpAMD64ADDQ { 2560 break 2561 } 2562 _ = v_0.Args[1] 2563 x := v_0.Args[0] 2564 y := v_0.Args[1] 2565 if x != v.Args[1] { 2566 break 2567 } 2568 v.reset(OpAMD64LEAQ2) 2569 v.AddArg(y) 2570 v.AddArg(x) 2571 return true 2572 } 2573 // match: (ADDQ (ADDQ y x) x) 2574 // cond: 2575 // result: (LEAQ2 y x) 2576 for { 2577 _ = v.Args[1] 2578 v_0 := v.Args[0] 2579 if v_0.Op != OpAMD64ADDQ { 2580 break 2581 } 2582 _ = v_0.Args[1] 2583 y := v_0.Args[0] 2584 x := v_0.Args[1] 2585 if x != v.Args[1] { 2586 break 2587 } 2588 v.reset(OpAMD64LEAQ2) 2589 v.AddArg(y) 2590 v.AddArg(x) 2591 return true 2592 } 2593 // match: (ADDQ (ADDQconst [c] x) y) 2594 // cond: 2595 // result: (LEAQ1 [c] x y) 2596 for { 2597 _ = v.Args[1] 2598 v_0 := v.Args[0] 2599 if v_0.Op != OpAMD64ADDQconst { 2600 break 2601 } 2602 c := v_0.AuxInt 2603 x := v_0.Args[0] 2604 y := v.Args[1] 2605 v.reset(OpAMD64LEAQ1) 2606 v.AuxInt = c 2607 v.AddArg(x) 2608 v.AddArg(y) 2609 return true 2610 } 2611 // match: (ADDQ y (ADDQconst [c] x)) 2612 // cond: 2613 // result: (LEAQ1 [c] x y) 2614 for { 2615 _ = v.Args[1] 2616 y := v.Args[0] 2617 v_1 := v.Args[1] 2618 if v_1.Op != OpAMD64ADDQconst { 2619 break 2620 } 2621 c := v_1.AuxInt 2622 x := v_1.Args[0] 2623 v.reset(OpAMD64LEAQ1) 2624 v.AuxInt = c 2625 v.AddArg(x) 2626 v.AddArg(y) 2627 return true 2628 } 2629 // match: (ADDQ x (LEAQ [c] {s} y)) 2630 // cond: x.Op != OpSB && y.Op != OpSB 2631 // result: (LEAQ1 [c] {s} x y) 2632 for { 2633 _ = v.Args[1] 2634 x := v.Args[0] 2635 v_1 := v.Args[1] 2636 if v_1.Op != OpAMD64LEAQ { 2637 break 2638 } 2639 c := v_1.AuxInt 2640 s := v_1.Aux 2641 y := v_1.Args[0] 2642 if !(x.Op != OpSB && y.Op != OpSB) { 2643 break 2644 } 2645 v.reset(OpAMD64LEAQ1) 2646 v.AuxInt = c 2647 v.Aux = s 2648 v.AddArg(x) 2649 v.AddArg(y) 2650 return true 2651 } 2652 // match: (ADDQ (LEAQ [c] {s} y) x) 2653 // cond: x.Op != OpSB && y.Op != OpSB 2654 // result: (LEAQ1 [c] {s} x y) 2655 for { 2656 _ = v.Args[1] 2657 v_0 := v.Args[0] 2658 if v_0.Op != OpAMD64LEAQ { 2659 break 2660 } 2661 c := v_0.AuxInt 2662 s := v_0.Aux 2663 y := v_0.Args[0] 2664 x := v.Args[1] 2665 if !(x.Op != OpSB && y.Op != OpSB) { 2666 break 2667 } 2668 v.reset(OpAMD64LEAQ1) 2669 v.AuxInt = c 2670 v.Aux = s 2671 v.AddArg(x) 2672 v.AddArg(y) 2673 return true 2674 } 2675 return false 2676 } 2677 func rewriteValueAMD64_OpAMD64ADDQ_20(v *Value) bool { 2678 // match: (ADDQ x (NEGQ y)) 2679 // cond: 2680 // result: (SUBQ x y) 2681 for { 2682 _ = v.Args[1] 2683 x := v.Args[0] 2684 v_1 := v.Args[1] 2685 if v_1.Op != OpAMD64NEGQ { 2686 break 2687 } 2688 y := v_1.Args[0] 2689 v.reset(OpAMD64SUBQ) 2690 v.AddArg(x) 2691 v.AddArg(y) 2692 return true 2693 } 2694 // match: (ADDQ (NEGQ y) x) 2695 // cond: 2696 // result: (SUBQ x y) 2697 for { 2698 _ = v.Args[1] 2699 v_0 := v.Args[0] 2700 if v_0.Op != OpAMD64NEGQ { 2701 break 2702 } 2703 y := v_0.Args[0] 2704 x := v.Args[1] 2705 v.reset(OpAMD64SUBQ) 2706 v.AddArg(x) 2707 v.AddArg(y) 2708 return true 2709 } 2710 // match: (ADDQ x l:(MOVQload [off] {sym} ptr mem)) 2711 // cond: canMergeLoadClobber(v, l, x) && clobber(l) 2712 // result: (ADDQload x [off] {sym} ptr mem) 2713 for { 2714 _ = v.Args[1] 2715 x := v.Args[0] 2716 l := v.Args[1] 2717 if l.Op != OpAMD64MOVQload { 2718 break 2719 } 2720 off := l.AuxInt 2721 sym := l.Aux 2722 _ = l.Args[1] 2723 ptr := l.Args[0] 2724 mem := l.Args[1] 2725 if !(canMergeLoadClobber(v, l, x) && clobber(l)) { 2726 break 2727 } 2728 v.reset(OpAMD64ADDQload) 2729 v.AuxInt = off 2730 v.Aux = sym 2731 v.AddArg(x) 2732 v.AddArg(ptr) 2733 v.AddArg(mem) 2734 return true 2735 } 2736 // match: (ADDQ l:(MOVQload [off] {sym} ptr mem) x) 2737 // cond: canMergeLoadClobber(v, l, x) && clobber(l) 2738 // result: (ADDQload x [off] {sym} ptr mem) 2739 for { 2740 _ = v.Args[1] 2741 l := v.Args[0] 2742 if l.Op != OpAMD64MOVQload { 2743 break 2744 } 2745 off := l.AuxInt 2746 sym := l.Aux 2747 _ = l.Args[1] 2748 ptr := l.Args[0] 2749 mem := l.Args[1] 2750 x := v.Args[1] 2751 if !(canMergeLoadClobber(v, l, x) && clobber(l)) { 2752 break 2753 } 2754 v.reset(OpAMD64ADDQload) 2755 v.AuxInt = off 2756 v.Aux = sym 2757 v.AddArg(x) 2758 v.AddArg(ptr) 2759 v.AddArg(mem) 2760 return true 2761 } 2762 return false 2763 } 2764 func rewriteValueAMD64_OpAMD64ADDQcarry_0(v *Value) bool { 2765 // match: (ADDQcarry x (MOVQconst [c])) 2766 // cond: is32Bit(c) 2767 // result: (ADDQconstcarry x [c]) 2768 for { 2769 _ = v.Args[1] 2770 x := v.Args[0] 2771 v_1 := v.Args[1] 2772 if v_1.Op != OpAMD64MOVQconst { 2773 break 2774 } 2775 c := v_1.AuxInt 2776 if !(is32Bit(c)) { 2777 break 2778 } 2779 v.reset(OpAMD64ADDQconstcarry) 2780 v.AuxInt = c 2781 v.AddArg(x) 2782 return true 2783 } 2784 // match: (ADDQcarry (MOVQconst [c]) x) 2785 // cond: is32Bit(c) 2786 // result: (ADDQconstcarry x [c]) 2787 for { 2788 _ = v.Args[1] 2789 v_0 := v.Args[0] 2790 if v_0.Op != OpAMD64MOVQconst { 2791 break 2792 } 2793 c := v_0.AuxInt 2794 x := v.Args[1] 2795 if !(is32Bit(c)) { 2796 break 2797 } 2798 v.reset(OpAMD64ADDQconstcarry) 2799 v.AuxInt = c 2800 v.AddArg(x) 2801 return true 2802 } 2803 return false 2804 } 2805 func rewriteValueAMD64_OpAMD64ADDQconst_0(v *Value) bool { 2806 // match: (ADDQconst [c] (ADDQ x y)) 2807 // cond: 2808 // result: (LEAQ1 [c] x y) 2809 for { 2810 c := v.AuxInt 2811 v_0 := v.Args[0] 2812 if v_0.Op != OpAMD64ADDQ { 2813 break 2814 } 2815 _ = v_0.Args[1] 2816 x := v_0.Args[0] 2817 y := v_0.Args[1] 2818 v.reset(OpAMD64LEAQ1) 2819 v.AuxInt = c 2820 v.AddArg(x) 2821 v.AddArg(y) 2822 return true 2823 } 2824 // match: (ADDQconst [c] (SHLQconst [1] x)) 2825 // cond: 2826 // result: (LEAQ1 [c] x x) 2827 for { 2828 c := v.AuxInt 2829 v_0 := v.Args[0] 2830 if v_0.Op != OpAMD64SHLQconst { 2831 break 2832 } 2833 if v_0.AuxInt != 1 { 2834 break 2835 } 2836 x := v_0.Args[0] 2837 v.reset(OpAMD64LEAQ1) 2838 v.AuxInt = c 2839 v.AddArg(x) 2840 v.AddArg(x) 2841 return true 2842 } 2843 // match: (ADDQconst [c] (LEAQ [d] {s} x)) 2844 // cond: is32Bit(c+d) 2845 // result: (LEAQ [c+d] {s} x) 2846 for { 2847 c := v.AuxInt 2848 v_0 := v.Args[0] 2849 if v_0.Op != OpAMD64LEAQ { 2850 break 2851 } 2852 d := v_0.AuxInt 2853 s := v_0.Aux 2854 x := v_0.Args[0] 2855 if !(is32Bit(c + d)) { 2856 break 2857 } 2858 v.reset(OpAMD64LEAQ) 2859 v.AuxInt = c + d 2860 v.Aux = s 2861 v.AddArg(x) 2862 return true 2863 } 2864 // match: (ADDQconst [c] (LEAQ1 [d] {s} x y)) 2865 // cond: is32Bit(c+d) 2866 // result: (LEAQ1 [c+d] {s} x y) 2867 for { 2868 c := v.AuxInt 2869 v_0 := v.Args[0] 2870 if v_0.Op != OpAMD64LEAQ1 { 2871 break 2872 } 2873 d := v_0.AuxInt 2874 s := v_0.Aux 2875 _ = v_0.Args[1] 2876 x := v_0.Args[0] 2877 y := v_0.Args[1] 2878 if !(is32Bit(c + d)) { 2879 break 2880 } 2881 v.reset(OpAMD64LEAQ1) 2882 v.AuxInt = c + d 2883 v.Aux = s 2884 v.AddArg(x) 2885 v.AddArg(y) 2886 return true 2887 } 2888 // match: (ADDQconst [c] (LEAQ2 [d] {s} x y)) 2889 // cond: is32Bit(c+d) 2890 // result: (LEAQ2 [c+d] {s} x y) 2891 for { 2892 c := v.AuxInt 2893 v_0 := v.Args[0] 2894 if v_0.Op != OpAMD64LEAQ2 { 2895 break 2896 } 2897 d := v_0.AuxInt 2898 s := v_0.Aux 2899 _ = v_0.Args[1] 2900 x := v_0.Args[0] 2901 y := v_0.Args[1] 2902 if !(is32Bit(c + d)) { 2903 break 2904 } 2905 v.reset(OpAMD64LEAQ2) 2906 v.AuxInt = c + d 2907 v.Aux = s 2908 v.AddArg(x) 2909 v.AddArg(y) 2910 return true 2911 } 2912 // match: (ADDQconst [c] (LEAQ4 [d] {s} x y)) 2913 // cond: is32Bit(c+d) 2914 // result: (LEAQ4 [c+d] {s} x y) 2915 for { 2916 c := v.AuxInt 2917 v_0 := v.Args[0] 2918 if v_0.Op != OpAMD64LEAQ4 { 2919 break 2920 } 2921 d := v_0.AuxInt 2922 s := v_0.Aux 2923 _ = v_0.Args[1] 2924 x := v_0.Args[0] 2925 y := v_0.Args[1] 2926 if !(is32Bit(c + d)) { 2927 break 2928 } 2929 v.reset(OpAMD64LEAQ4) 2930 v.AuxInt = c + d 2931 v.Aux = s 2932 v.AddArg(x) 2933 v.AddArg(y) 2934 return true 2935 } 2936 // match: (ADDQconst [c] (LEAQ8 [d] {s} x y)) 2937 // cond: is32Bit(c+d) 2938 // result: (LEAQ8 [c+d] {s} x y) 2939 for { 2940 c := v.AuxInt 2941 v_0 := v.Args[0] 2942 if v_0.Op != OpAMD64LEAQ8 { 2943 break 2944 } 2945 d := v_0.AuxInt 2946 s := v_0.Aux 2947 _ = v_0.Args[1] 2948 x := v_0.Args[0] 2949 y := v_0.Args[1] 2950 if !(is32Bit(c + d)) { 2951 break 2952 } 2953 v.reset(OpAMD64LEAQ8) 2954 v.AuxInt = c + d 2955 v.Aux = s 2956 v.AddArg(x) 2957 v.AddArg(y) 2958 return true 2959 } 2960 // match: (ADDQconst [0] x) 2961 // cond: 2962 // result: x 2963 for { 2964 if v.AuxInt != 0 { 2965 break 2966 } 2967 x := v.Args[0] 2968 v.reset(OpCopy) 2969 v.Type = x.Type 2970 v.AddArg(x) 2971 return true 2972 } 2973 // match: (ADDQconst [c] (MOVQconst [d])) 2974 // cond: 2975 // result: (MOVQconst [c+d]) 2976 for { 2977 c := v.AuxInt 2978 v_0 := v.Args[0] 2979 if v_0.Op != OpAMD64MOVQconst { 2980 break 2981 } 2982 d := v_0.AuxInt 2983 v.reset(OpAMD64MOVQconst) 2984 v.AuxInt = c + d 2985 return true 2986 } 2987 // match: (ADDQconst [c] (ADDQconst [d] x)) 2988 // cond: is32Bit(c+d) 2989 // result: (ADDQconst [c+d] x) 2990 for { 2991 c := v.AuxInt 2992 v_0 := v.Args[0] 2993 if v_0.Op != OpAMD64ADDQconst { 2994 break 2995 } 2996 d := v_0.AuxInt 2997 x := v_0.Args[0] 2998 if !(is32Bit(c + d)) { 2999 break 3000 } 3001 v.reset(OpAMD64ADDQconst) 3002 v.AuxInt = c + d 3003 v.AddArg(x) 3004 return true 3005 } 3006 return false 3007 } 3008 func rewriteValueAMD64_OpAMD64ADDQconst_10(v *Value) bool { 3009 // match: (ADDQconst [off] x:(SP)) 3010 // cond: 3011 // result: (LEAQ [off] x) 3012 for { 3013 off := v.AuxInt 3014 x := v.Args[0] 3015 if x.Op != OpSP { 3016 break 3017 } 3018 v.reset(OpAMD64LEAQ) 3019 v.AuxInt = off 3020 v.AddArg(x) 3021 return true 3022 } 3023 return false 3024 } 3025 func rewriteValueAMD64_OpAMD64ADDQconstmodify_0(v *Value) bool { 3026 // match: (ADDQconstmodify [valoff1] {sym} (ADDQconst [off2] base) mem) 3027 // cond: ValAndOff(valoff1).canAdd(off2) 3028 // result: (ADDQconstmodify [ValAndOff(valoff1).add(off2)] {sym} base mem) 3029 for { 3030 valoff1 := v.AuxInt 3031 sym := v.Aux 3032 _ = v.Args[1] 3033 v_0 := v.Args[0] 3034 if v_0.Op != OpAMD64ADDQconst { 3035 break 3036 } 3037 off2 := v_0.AuxInt 3038 base := v_0.Args[0] 3039 mem := v.Args[1] 3040 if !(ValAndOff(valoff1).canAdd(off2)) { 3041 break 3042 } 3043 v.reset(OpAMD64ADDQconstmodify) 3044 v.AuxInt = ValAndOff(valoff1).add(off2) 3045 v.Aux = sym 3046 v.AddArg(base) 3047 v.AddArg(mem) 3048 return true 3049 } 3050 // match: (ADDQconstmodify [valoff1] {sym1} (LEAQ [off2] {sym2} base) mem) 3051 // cond: ValAndOff(valoff1).canAdd(off2) && canMergeSym(sym1, sym2) 3052 // result: (ADDQconstmodify [ValAndOff(valoff1).add(off2)] {mergeSym(sym1,sym2)} base mem) 3053 for { 3054 valoff1 := v.AuxInt 3055 sym1 := v.Aux 3056 _ = v.Args[1] 3057 v_0 := v.Args[0] 3058 if v_0.Op != OpAMD64LEAQ { 3059 break 3060 } 3061 off2 := v_0.AuxInt 3062 sym2 := v_0.Aux 3063 base := v_0.Args[0] 3064 mem := v.Args[1] 3065 if !(ValAndOff(valoff1).canAdd(off2) && canMergeSym(sym1, sym2)) { 3066 break 3067 } 3068 v.reset(OpAMD64ADDQconstmodify) 3069 v.AuxInt = ValAndOff(valoff1).add(off2) 3070 v.Aux = mergeSym(sym1, sym2) 3071 v.AddArg(base) 3072 v.AddArg(mem) 3073 return true 3074 } 3075 return false 3076 } 3077 func rewriteValueAMD64_OpAMD64ADDQload_0(v *Value) bool { 3078 b := v.Block 3079 _ = b 3080 typ := &b.Func.Config.Types 3081 _ = typ 3082 // match: (ADDQload [off1] {sym} val (ADDQconst [off2] base) mem) 3083 // cond: is32Bit(off1+off2) 3084 // result: (ADDQload [off1+off2] {sym} val base mem) 3085 for { 3086 off1 := v.AuxInt 3087 sym := v.Aux 3088 _ = v.Args[2] 3089 val := v.Args[0] 3090 v_1 := v.Args[1] 3091 if v_1.Op != OpAMD64ADDQconst { 3092 break 3093 } 3094 off2 := v_1.AuxInt 3095 base := v_1.Args[0] 3096 mem := v.Args[2] 3097 if !(is32Bit(off1 + off2)) { 3098 break 3099 } 3100 v.reset(OpAMD64ADDQload) 3101 v.AuxInt = off1 + off2 3102 v.Aux = sym 3103 v.AddArg(val) 3104 v.AddArg(base) 3105 v.AddArg(mem) 3106 return true 3107 } 3108 // match: (ADDQload [off1] {sym1} val (LEAQ [off2] {sym2} base) mem) 3109 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 3110 // result: (ADDQload [off1+off2] {mergeSym(sym1,sym2)} val base mem) 3111 for { 3112 off1 := v.AuxInt 3113 sym1 := v.Aux 3114 _ = v.Args[2] 3115 val := v.Args[0] 3116 v_1 := v.Args[1] 3117 if v_1.Op != OpAMD64LEAQ { 3118 break 3119 } 3120 off2 := v_1.AuxInt 3121 sym2 := v_1.Aux 3122 base := v_1.Args[0] 3123 mem := v.Args[2] 3124 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 3125 break 3126 } 3127 v.reset(OpAMD64ADDQload) 3128 v.AuxInt = off1 + off2 3129 v.Aux = mergeSym(sym1, sym2) 3130 v.AddArg(val) 3131 v.AddArg(base) 3132 v.AddArg(mem) 3133 return true 3134 } 3135 // match: (ADDQload x [off] {sym} ptr (MOVSDstore [off] {sym} ptr y _)) 3136 // cond: 3137 // result: (ADDQ x (MOVQf2i y)) 3138 for { 3139 off := v.AuxInt 3140 sym := v.Aux 3141 _ = v.Args[2] 3142 x := v.Args[0] 3143 ptr := v.Args[1] 3144 v_2 := v.Args[2] 3145 if v_2.Op != OpAMD64MOVSDstore { 3146 break 3147 } 3148 if v_2.AuxInt != off { 3149 break 3150 } 3151 if v_2.Aux != sym { 3152 break 3153 } 3154 _ = v_2.Args[2] 3155 if ptr != v_2.Args[0] { 3156 break 3157 } 3158 y := v_2.Args[1] 3159 v.reset(OpAMD64ADDQ) 3160 v.AddArg(x) 3161 v0 := b.NewValue0(v_2.Pos, OpAMD64MOVQf2i, typ.UInt64) 3162 v0.AddArg(y) 3163 v.AddArg(v0) 3164 return true 3165 } 3166 return false 3167 } 3168 func rewriteValueAMD64_OpAMD64ADDQmodify_0(v *Value) bool { 3169 // match: (ADDQmodify [off1] {sym} (ADDQconst [off2] base) val mem) 3170 // cond: is32Bit(off1+off2) 3171 // result: (ADDQmodify [off1+off2] {sym} base val mem) 3172 for { 3173 off1 := v.AuxInt 3174 sym := v.Aux 3175 _ = v.Args[2] 3176 v_0 := v.Args[0] 3177 if v_0.Op != OpAMD64ADDQconst { 3178 break 3179 } 3180 off2 := v_0.AuxInt 3181 base := v_0.Args[0] 3182 val := v.Args[1] 3183 mem := v.Args[2] 3184 if !(is32Bit(off1 + off2)) { 3185 break 3186 } 3187 v.reset(OpAMD64ADDQmodify) 3188 v.AuxInt = off1 + off2 3189 v.Aux = sym 3190 v.AddArg(base) 3191 v.AddArg(val) 3192 v.AddArg(mem) 3193 return true 3194 } 3195 // match: (ADDQmodify [off1] {sym1} (LEAQ [off2] {sym2} base) val mem) 3196 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 3197 // result: (ADDQmodify [off1+off2] {mergeSym(sym1,sym2)} base val mem) 3198 for { 3199 off1 := v.AuxInt 3200 sym1 := v.Aux 3201 _ = v.Args[2] 3202 v_0 := v.Args[0] 3203 if v_0.Op != OpAMD64LEAQ { 3204 break 3205 } 3206 off2 := v_0.AuxInt 3207 sym2 := v_0.Aux 3208 base := v_0.Args[0] 3209 val := v.Args[1] 3210 mem := v.Args[2] 3211 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 3212 break 3213 } 3214 v.reset(OpAMD64ADDQmodify) 3215 v.AuxInt = off1 + off2 3216 v.Aux = mergeSym(sym1, sym2) 3217 v.AddArg(base) 3218 v.AddArg(val) 3219 v.AddArg(mem) 3220 return true 3221 } 3222 return false 3223 } 3224 func rewriteValueAMD64_OpAMD64ADDSD_0(v *Value) bool { 3225 // match: (ADDSD x l:(MOVSDload [off] {sym} ptr mem)) 3226 // cond: canMergeLoadClobber(v, l, x) && clobber(l) 3227 // result: (ADDSDload x [off] {sym} ptr mem) 3228 for { 3229 _ = v.Args[1] 3230 x := v.Args[0] 3231 l := v.Args[1] 3232 if l.Op != OpAMD64MOVSDload { 3233 break 3234 } 3235 off := l.AuxInt 3236 sym := l.Aux 3237 _ = l.Args[1] 3238 ptr := l.Args[0] 3239 mem := l.Args[1] 3240 if !(canMergeLoadClobber(v, l, x) && clobber(l)) { 3241 break 3242 } 3243 v.reset(OpAMD64ADDSDload) 3244 v.AuxInt = off 3245 v.Aux = sym 3246 v.AddArg(x) 3247 v.AddArg(ptr) 3248 v.AddArg(mem) 3249 return true 3250 } 3251 // match: (ADDSD l:(MOVSDload [off] {sym} ptr mem) x) 3252 // cond: canMergeLoadClobber(v, l, x) && clobber(l) 3253 // result: (ADDSDload x [off] {sym} ptr mem) 3254 for { 3255 _ = v.Args[1] 3256 l := v.Args[0] 3257 if l.Op != OpAMD64MOVSDload { 3258 break 3259 } 3260 off := l.AuxInt 3261 sym := l.Aux 3262 _ = l.Args[1] 3263 ptr := l.Args[0] 3264 mem := l.Args[1] 3265 x := v.Args[1] 3266 if !(canMergeLoadClobber(v, l, x) && clobber(l)) { 3267 break 3268 } 3269 v.reset(OpAMD64ADDSDload) 3270 v.AuxInt = off 3271 v.Aux = sym 3272 v.AddArg(x) 3273 v.AddArg(ptr) 3274 v.AddArg(mem) 3275 return true 3276 } 3277 return false 3278 } 3279 func rewriteValueAMD64_OpAMD64ADDSDload_0(v *Value) bool { 3280 b := v.Block 3281 _ = b 3282 typ := &b.Func.Config.Types 3283 _ = typ 3284 // match: (ADDSDload [off1] {sym} val (ADDQconst [off2] base) mem) 3285 // cond: is32Bit(off1+off2) 3286 // result: (ADDSDload [off1+off2] {sym} val base mem) 3287 for { 3288 off1 := v.AuxInt 3289 sym := v.Aux 3290 _ = v.Args[2] 3291 val := v.Args[0] 3292 v_1 := v.Args[1] 3293 if v_1.Op != OpAMD64ADDQconst { 3294 break 3295 } 3296 off2 := v_1.AuxInt 3297 base := v_1.Args[0] 3298 mem := v.Args[2] 3299 if !(is32Bit(off1 + off2)) { 3300 break 3301 } 3302 v.reset(OpAMD64ADDSDload) 3303 v.AuxInt = off1 + off2 3304 v.Aux = sym 3305 v.AddArg(val) 3306 v.AddArg(base) 3307 v.AddArg(mem) 3308 return true 3309 } 3310 // match: (ADDSDload [off1] {sym1} val (LEAQ [off2] {sym2} base) mem) 3311 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 3312 // result: (ADDSDload [off1+off2] {mergeSym(sym1,sym2)} val base mem) 3313 for { 3314 off1 := v.AuxInt 3315 sym1 := v.Aux 3316 _ = v.Args[2] 3317 val := v.Args[0] 3318 v_1 := v.Args[1] 3319 if v_1.Op != OpAMD64LEAQ { 3320 break 3321 } 3322 off2 := v_1.AuxInt 3323 sym2 := v_1.Aux 3324 base := v_1.Args[0] 3325 mem := v.Args[2] 3326 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 3327 break 3328 } 3329 v.reset(OpAMD64ADDSDload) 3330 v.AuxInt = off1 + off2 3331 v.Aux = mergeSym(sym1, sym2) 3332 v.AddArg(val) 3333 v.AddArg(base) 3334 v.AddArg(mem) 3335 return true 3336 } 3337 // match: (ADDSDload x [off] {sym} ptr (MOVQstore [off] {sym} ptr y _)) 3338 // cond: 3339 // result: (ADDSD x (MOVQi2f y)) 3340 for { 3341 off := v.AuxInt 3342 sym := v.Aux 3343 _ = v.Args[2] 3344 x := v.Args[0] 3345 ptr := v.Args[1] 3346 v_2 := v.Args[2] 3347 if v_2.Op != OpAMD64MOVQstore { 3348 break 3349 } 3350 if v_2.AuxInt != off { 3351 break 3352 } 3353 if v_2.Aux != sym { 3354 break 3355 } 3356 _ = v_2.Args[2] 3357 if ptr != v_2.Args[0] { 3358 break 3359 } 3360 y := v_2.Args[1] 3361 v.reset(OpAMD64ADDSD) 3362 v.AddArg(x) 3363 v0 := b.NewValue0(v_2.Pos, OpAMD64MOVQi2f, typ.Float64) 3364 v0.AddArg(y) 3365 v.AddArg(v0) 3366 return true 3367 } 3368 return false 3369 } 3370 func rewriteValueAMD64_OpAMD64ADDSS_0(v *Value) bool { 3371 // match: (ADDSS x l:(MOVSSload [off] {sym} ptr mem)) 3372 // cond: canMergeLoadClobber(v, l, x) && clobber(l) 3373 // result: (ADDSSload x [off] {sym} ptr mem) 3374 for { 3375 _ = v.Args[1] 3376 x := v.Args[0] 3377 l := v.Args[1] 3378 if l.Op != OpAMD64MOVSSload { 3379 break 3380 } 3381 off := l.AuxInt 3382 sym := l.Aux 3383 _ = l.Args[1] 3384 ptr := l.Args[0] 3385 mem := l.Args[1] 3386 if !(canMergeLoadClobber(v, l, x) && clobber(l)) { 3387 break 3388 } 3389 v.reset(OpAMD64ADDSSload) 3390 v.AuxInt = off 3391 v.Aux = sym 3392 v.AddArg(x) 3393 v.AddArg(ptr) 3394 v.AddArg(mem) 3395 return true 3396 } 3397 // match: (ADDSS l:(MOVSSload [off] {sym} ptr mem) x) 3398 // cond: canMergeLoadClobber(v, l, x) && clobber(l) 3399 // result: (ADDSSload x [off] {sym} ptr mem) 3400 for { 3401 _ = v.Args[1] 3402 l := v.Args[0] 3403 if l.Op != OpAMD64MOVSSload { 3404 break 3405 } 3406 off := l.AuxInt 3407 sym := l.Aux 3408 _ = l.Args[1] 3409 ptr := l.Args[0] 3410 mem := l.Args[1] 3411 x := v.Args[1] 3412 if !(canMergeLoadClobber(v, l, x) && clobber(l)) { 3413 break 3414 } 3415 v.reset(OpAMD64ADDSSload) 3416 v.AuxInt = off 3417 v.Aux = sym 3418 v.AddArg(x) 3419 v.AddArg(ptr) 3420 v.AddArg(mem) 3421 return true 3422 } 3423 return false 3424 } 3425 func rewriteValueAMD64_OpAMD64ADDSSload_0(v *Value) bool { 3426 b := v.Block 3427 _ = b 3428 typ := &b.Func.Config.Types 3429 _ = typ 3430 // match: (ADDSSload [off1] {sym} val (ADDQconst [off2] base) mem) 3431 // cond: is32Bit(off1+off2) 3432 // result: (ADDSSload [off1+off2] {sym} val base mem) 3433 for { 3434 off1 := v.AuxInt 3435 sym := v.Aux 3436 _ = v.Args[2] 3437 val := v.Args[0] 3438 v_1 := v.Args[1] 3439 if v_1.Op != OpAMD64ADDQconst { 3440 break 3441 } 3442 off2 := v_1.AuxInt 3443 base := v_1.Args[0] 3444 mem := v.Args[2] 3445 if !(is32Bit(off1 + off2)) { 3446 break 3447 } 3448 v.reset(OpAMD64ADDSSload) 3449 v.AuxInt = off1 + off2 3450 v.Aux = sym 3451 v.AddArg(val) 3452 v.AddArg(base) 3453 v.AddArg(mem) 3454 return true 3455 } 3456 // match: (ADDSSload [off1] {sym1} val (LEAQ [off2] {sym2} base) mem) 3457 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 3458 // result: (ADDSSload [off1+off2] {mergeSym(sym1,sym2)} val base mem) 3459 for { 3460 off1 := v.AuxInt 3461 sym1 := v.Aux 3462 _ = v.Args[2] 3463 val := v.Args[0] 3464 v_1 := v.Args[1] 3465 if v_1.Op != OpAMD64LEAQ { 3466 break 3467 } 3468 off2 := v_1.AuxInt 3469 sym2 := v_1.Aux 3470 base := v_1.Args[0] 3471 mem := v.Args[2] 3472 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 3473 break 3474 } 3475 v.reset(OpAMD64ADDSSload) 3476 v.AuxInt = off1 + off2 3477 v.Aux = mergeSym(sym1, sym2) 3478 v.AddArg(val) 3479 v.AddArg(base) 3480 v.AddArg(mem) 3481 return true 3482 } 3483 // match: (ADDSSload x [off] {sym} ptr (MOVLstore [off] {sym} ptr y _)) 3484 // cond: 3485 // result: (ADDSS x (MOVLi2f y)) 3486 for { 3487 off := v.AuxInt 3488 sym := v.Aux 3489 _ = v.Args[2] 3490 x := v.Args[0] 3491 ptr := v.Args[1] 3492 v_2 := v.Args[2] 3493 if v_2.Op != OpAMD64MOVLstore { 3494 break 3495 } 3496 if v_2.AuxInt != off { 3497 break 3498 } 3499 if v_2.Aux != sym { 3500 break 3501 } 3502 _ = v_2.Args[2] 3503 if ptr != v_2.Args[0] { 3504 break 3505 } 3506 y := v_2.Args[1] 3507 v.reset(OpAMD64ADDSS) 3508 v.AddArg(x) 3509 v0 := b.NewValue0(v_2.Pos, OpAMD64MOVLi2f, typ.Float32) 3510 v0.AddArg(y) 3511 v.AddArg(v0) 3512 return true 3513 } 3514 return false 3515 } 3516 func rewriteValueAMD64_OpAMD64ANDL_0(v *Value) bool { 3517 b := v.Block 3518 _ = b 3519 config := b.Func.Config 3520 _ = config 3521 // match: (ANDL (NOTL (SHLL (MOVLconst [1]) y)) x) 3522 // cond: !config.nacl 3523 // result: (BTRL x y) 3524 for { 3525 _ = v.Args[1] 3526 v_0 := v.Args[0] 3527 if v_0.Op != OpAMD64NOTL { 3528 break 3529 } 3530 v_0_0 := v_0.Args[0] 3531 if v_0_0.Op != OpAMD64SHLL { 3532 break 3533 } 3534 _ = v_0_0.Args[1] 3535 v_0_0_0 := v_0_0.Args[0] 3536 if v_0_0_0.Op != OpAMD64MOVLconst { 3537 break 3538 } 3539 if v_0_0_0.AuxInt != 1 { 3540 break 3541 } 3542 y := v_0_0.Args[1] 3543 x := v.Args[1] 3544 if !(!config.nacl) { 3545 break 3546 } 3547 v.reset(OpAMD64BTRL) 3548 v.AddArg(x) 3549 v.AddArg(y) 3550 return true 3551 } 3552 // match: (ANDL x (NOTL (SHLL (MOVLconst [1]) y))) 3553 // cond: !config.nacl 3554 // result: (BTRL x y) 3555 for { 3556 _ = v.Args[1] 3557 x := v.Args[0] 3558 v_1 := v.Args[1] 3559 if v_1.Op != OpAMD64NOTL { 3560 break 3561 } 3562 v_1_0 := v_1.Args[0] 3563 if v_1_0.Op != OpAMD64SHLL { 3564 break 3565 } 3566 _ = v_1_0.Args[1] 3567 v_1_0_0 := v_1_0.Args[0] 3568 if v_1_0_0.Op != OpAMD64MOVLconst { 3569 break 3570 } 3571 if v_1_0_0.AuxInt != 1 { 3572 break 3573 } 3574 y := v_1_0.Args[1] 3575 if !(!config.nacl) { 3576 break 3577 } 3578 v.reset(OpAMD64BTRL) 3579 v.AddArg(x) 3580 v.AddArg(y) 3581 return true 3582 } 3583 // match: (ANDL (MOVLconst [c]) x) 3584 // cond: isUint32PowerOfTwo(^c) && uint64(^c) >= 128 && !config.nacl 3585 // result: (BTRLconst [log2uint32(^c)] x) 3586 for { 3587 _ = v.Args[1] 3588 v_0 := v.Args[0] 3589 if v_0.Op != OpAMD64MOVLconst { 3590 break 3591 } 3592 c := v_0.AuxInt 3593 x := v.Args[1] 3594 if !(isUint32PowerOfTwo(^c) && uint64(^c) >= 128 && !config.nacl) { 3595 break 3596 } 3597 v.reset(OpAMD64BTRLconst) 3598 v.AuxInt = log2uint32(^c) 3599 v.AddArg(x) 3600 return true 3601 } 3602 // match: (ANDL x (MOVLconst [c])) 3603 // cond: isUint32PowerOfTwo(^c) && uint64(^c) >= 128 && !config.nacl 3604 // result: (BTRLconst [log2uint32(^c)] x) 3605 for { 3606 _ = v.Args[1] 3607 x := v.Args[0] 3608 v_1 := v.Args[1] 3609 if v_1.Op != OpAMD64MOVLconst { 3610 break 3611 } 3612 c := v_1.AuxInt 3613 if !(isUint32PowerOfTwo(^c) && uint64(^c) >= 128 && !config.nacl) { 3614 break 3615 } 3616 v.reset(OpAMD64BTRLconst) 3617 v.AuxInt = log2uint32(^c) 3618 v.AddArg(x) 3619 return true 3620 } 3621 // match: (ANDL x (MOVLconst [c])) 3622 // cond: 3623 // result: (ANDLconst [c] x) 3624 for { 3625 _ = v.Args[1] 3626 x := v.Args[0] 3627 v_1 := v.Args[1] 3628 if v_1.Op != OpAMD64MOVLconst { 3629 break 3630 } 3631 c := v_1.AuxInt 3632 v.reset(OpAMD64ANDLconst) 3633 v.AuxInt = c 3634 v.AddArg(x) 3635 return true 3636 } 3637 // match: (ANDL (MOVLconst [c]) x) 3638 // cond: 3639 // result: (ANDLconst [c] x) 3640 for { 3641 _ = v.Args[1] 3642 v_0 := v.Args[0] 3643 if v_0.Op != OpAMD64MOVLconst { 3644 break 3645 } 3646 c := v_0.AuxInt 3647 x := v.Args[1] 3648 v.reset(OpAMD64ANDLconst) 3649 v.AuxInt = c 3650 v.AddArg(x) 3651 return true 3652 } 3653 // match: (ANDL x x) 3654 // cond: 3655 // result: x 3656 for { 3657 _ = v.Args[1] 3658 x := v.Args[0] 3659 if x != v.Args[1] { 3660 break 3661 } 3662 v.reset(OpCopy) 3663 v.Type = x.Type 3664 v.AddArg(x) 3665 return true 3666 } 3667 // match: (ANDL x l:(MOVLload [off] {sym} ptr mem)) 3668 // cond: canMergeLoadClobber(v, l, x) && clobber(l) 3669 // result: (ANDLload x [off] {sym} ptr mem) 3670 for { 3671 _ = v.Args[1] 3672 x := v.Args[0] 3673 l := v.Args[1] 3674 if l.Op != OpAMD64MOVLload { 3675 break 3676 } 3677 off := l.AuxInt 3678 sym := l.Aux 3679 _ = l.Args[1] 3680 ptr := l.Args[0] 3681 mem := l.Args[1] 3682 if !(canMergeLoadClobber(v, l, x) && clobber(l)) { 3683 break 3684 } 3685 v.reset(OpAMD64ANDLload) 3686 v.AuxInt = off 3687 v.Aux = sym 3688 v.AddArg(x) 3689 v.AddArg(ptr) 3690 v.AddArg(mem) 3691 return true 3692 } 3693 // match: (ANDL l:(MOVLload [off] {sym} ptr mem) x) 3694 // cond: canMergeLoadClobber(v, l, x) && clobber(l) 3695 // result: (ANDLload x [off] {sym} ptr mem) 3696 for { 3697 _ = v.Args[1] 3698 l := v.Args[0] 3699 if l.Op != OpAMD64MOVLload { 3700 break 3701 } 3702 off := l.AuxInt 3703 sym := l.Aux 3704 _ = l.Args[1] 3705 ptr := l.Args[0] 3706 mem := l.Args[1] 3707 x := v.Args[1] 3708 if !(canMergeLoadClobber(v, l, x) && clobber(l)) { 3709 break 3710 } 3711 v.reset(OpAMD64ANDLload) 3712 v.AuxInt = off 3713 v.Aux = sym 3714 v.AddArg(x) 3715 v.AddArg(ptr) 3716 v.AddArg(mem) 3717 return true 3718 } 3719 return false 3720 } 3721 func rewriteValueAMD64_OpAMD64ANDLconst_0(v *Value) bool { 3722 b := v.Block 3723 _ = b 3724 config := b.Func.Config 3725 _ = config 3726 // match: (ANDLconst [c] x) 3727 // cond: isUint32PowerOfTwo(^c) && uint64(^c) >= 128 && !config.nacl 3728 // result: (BTRLconst [log2uint32(^c)] x) 3729 for { 3730 c := v.AuxInt 3731 x := v.Args[0] 3732 if !(isUint32PowerOfTwo(^c) && uint64(^c) >= 128 && !config.nacl) { 3733 break 3734 } 3735 v.reset(OpAMD64BTRLconst) 3736 v.AuxInt = log2uint32(^c) 3737 v.AddArg(x) 3738 return true 3739 } 3740 // match: (ANDLconst [c] (ANDLconst [d] x)) 3741 // cond: 3742 // result: (ANDLconst [c & d] x) 3743 for { 3744 c := v.AuxInt 3745 v_0 := v.Args[0] 3746 if v_0.Op != OpAMD64ANDLconst { 3747 break 3748 } 3749 d := v_0.AuxInt 3750 x := v_0.Args[0] 3751 v.reset(OpAMD64ANDLconst) 3752 v.AuxInt = c & d 3753 v.AddArg(x) 3754 return true 3755 } 3756 // match: (ANDLconst [c] (BTRLconst [d] x)) 3757 // cond: 3758 // result: (ANDLconst [c &^ (1<<uint32(d))] x) 3759 for { 3760 c := v.AuxInt 3761 v_0 := v.Args[0] 3762 if v_0.Op != OpAMD64BTRLconst { 3763 break 3764 } 3765 d := v_0.AuxInt 3766 x := v_0.Args[0] 3767 v.reset(OpAMD64ANDLconst) 3768 v.AuxInt = c &^ (1 << uint32(d)) 3769 v.AddArg(x) 3770 return true 3771 } 3772 // match: (ANDLconst [ 0xFF] x) 3773 // cond: 3774 // result: (MOVBQZX x) 3775 for { 3776 if v.AuxInt != 0xFF { 3777 break 3778 } 3779 x := v.Args[0] 3780 v.reset(OpAMD64MOVBQZX) 3781 v.AddArg(x) 3782 return true 3783 } 3784 // match: (ANDLconst [0xFFFF] x) 3785 // cond: 3786 // result: (MOVWQZX x) 3787 for { 3788 if v.AuxInt != 0xFFFF { 3789 break 3790 } 3791 x := v.Args[0] 3792 v.reset(OpAMD64MOVWQZX) 3793 v.AddArg(x) 3794 return true 3795 } 3796 // match: (ANDLconst [c] _) 3797 // cond: int32(c)==0 3798 // result: (MOVLconst [0]) 3799 for { 3800 c := v.AuxInt 3801 if !(int32(c) == 0) { 3802 break 3803 } 3804 v.reset(OpAMD64MOVLconst) 3805 v.AuxInt = 0 3806 return true 3807 } 3808 // match: (ANDLconst [c] x) 3809 // cond: int32(c)==-1 3810 // result: x 3811 for { 3812 c := v.AuxInt 3813 x := v.Args[0] 3814 if !(int32(c) == -1) { 3815 break 3816 } 3817 v.reset(OpCopy) 3818 v.Type = x.Type 3819 v.AddArg(x) 3820 return true 3821 } 3822 // match: (ANDLconst [c] (MOVLconst [d])) 3823 // cond: 3824 // result: (MOVLconst [c&d]) 3825 for { 3826 c := v.AuxInt 3827 v_0 := v.Args[0] 3828 if v_0.Op != OpAMD64MOVLconst { 3829 break 3830 } 3831 d := v_0.AuxInt 3832 v.reset(OpAMD64MOVLconst) 3833 v.AuxInt = c & d 3834 return true 3835 } 3836 return false 3837 } 3838 func rewriteValueAMD64_OpAMD64ANDLconstmodify_0(v *Value) bool { 3839 // match: (ANDLconstmodify [valoff1] {sym} (ADDQconst [off2] base) mem) 3840 // cond: ValAndOff(valoff1).canAdd(off2) 3841 // result: (ANDLconstmodify [ValAndOff(valoff1).add(off2)] {sym} base mem) 3842 for { 3843 valoff1 := v.AuxInt 3844 sym := v.Aux 3845 _ = v.Args[1] 3846 v_0 := v.Args[0] 3847 if v_0.Op != OpAMD64ADDQconst { 3848 break 3849 } 3850 off2 := v_0.AuxInt 3851 base := v_0.Args[0] 3852 mem := v.Args[1] 3853 if !(ValAndOff(valoff1).canAdd(off2)) { 3854 break 3855 } 3856 v.reset(OpAMD64ANDLconstmodify) 3857 v.AuxInt = ValAndOff(valoff1).add(off2) 3858 v.Aux = sym 3859 v.AddArg(base) 3860 v.AddArg(mem) 3861 return true 3862 } 3863 // match: (ANDLconstmodify [valoff1] {sym1} (LEAQ [off2] {sym2} base) mem) 3864 // cond: ValAndOff(valoff1).canAdd(off2) && canMergeSym(sym1, sym2) 3865 // result: (ANDLconstmodify [ValAndOff(valoff1).add(off2)] {mergeSym(sym1,sym2)} base mem) 3866 for { 3867 valoff1 := v.AuxInt 3868 sym1 := v.Aux 3869 _ = v.Args[1] 3870 v_0 := v.Args[0] 3871 if v_0.Op != OpAMD64LEAQ { 3872 break 3873 } 3874 off2 := v_0.AuxInt 3875 sym2 := v_0.Aux 3876 base := v_0.Args[0] 3877 mem := v.Args[1] 3878 if !(ValAndOff(valoff1).canAdd(off2) && canMergeSym(sym1, sym2)) { 3879 break 3880 } 3881 v.reset(OpAMD64ANDLconstmodify) 3882 v.AuxInt = ValAndOff(valoff1).add(off2) 3883 v.Aux = mergeSym(sym1, sym2) 3884 v.AddArg(base) 3885 v.AddArg(mem) 3886 return true 3887 } 3888 return false 3889 } 3890 func rewriteValueAMD64_OpAMD64ANDLload_0(v *Value) bool { 3891 b := v.Block 3892 _ = b 3893 typ := &b.Func.Config.Types 3894 _ = typ 3895 // match: (ANDLload [off1] {sym} val (ADDQconst [off2] base) mem) 3896 // cond: is32Bit(off1+off2) 3897 // result: (ANDLload [off1+off2] {sym} val base mem) 3898 for { 3899 off1 := v.AuxInt 3900 sym := v.Aux 3901 _ = v.Args[2] 3902 val := v.Args[0] 3903 v_1 := v.Args[1] 3904 if v_1.Op != OpAMD64ADDQconst { 3905 break 3906 } 3907 off2 := v_1.AuxInt 3908 base := v_1.Args[0] 3909 mem := v.Args[2] 3910 if !(is32Bit(off1 + off2)) { 3911 break 3912 } 3913 v.reset(OpAMD64ANDLload) 3914 v.AuxInt = off1 + off2 3915 v.Aux = sym 3916 v.AddArg(val) 3917 v.AddArg(base) 3918 v.AddArg(mem) 3919 return true 3920 } 3921 // match: (ANDLload [off1] {sym1} val (LEAQ [off2] {sym2} base) mem) 3922 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 3923 // result: (ANDLload [off1+off2] {mergeSym(sym1,sym2)} val base mem) 3924 for { 3925 off1 := v.AuxInt 3926 sym1 := v.Aux 3927 _ = v.Args[2] 3928 val := v.Args[0] 3929 v_1 := v.Args[1] 3930 if v_1.Op != OpAMD64LEAQ { 3931 break 3932 } 3933 off2 := v_1.AuxInt 3934 sym2 := v_1.Aux 3935 base := v_1.Args[0] 3936 mem := v.Args[2] 3937 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 3938 break 3939 } 3940 v.reset(OpAMD64ANDLload) 3941 v.AuxInt = off1 + off2 3942 v.Aux = mergeSym(sym1, sym2) 3943 v.AddArg(val) 3944 v.AddArg(base) 3945 v.AddArg(mem) 3946 return true 3947 } 3948 // match: (ANDLload x [off] {sym} ptr (MOVSSstore [off] {sym} ptr y _)) 3949 // cond: 3950 // result: (ANDL x (MOVLf2i y)) 3951 for { 3952 off := v.AuxInt 3953 sym := v.Aux 3954 _ = v.Args[2] 3955 x := v.Args[0] 3956 ptr := v.Args[1] 3957 v_2 := v.Args[2] 3958 if v_2.Op != OpAMD64MOVSSstore { 3959 break 3960 } 3961 if v_2.AuxInt != off { 3962 break 3963 } 3964 if v_2.Aux != sym { 3965 break 3966 } 3967 _ = v_2.Args[2] 3968 if ptr != v_2.Args[0] { 3969 break 3970 } 3971 y := v_2.Args[1] 3972 v.reset(OpAMD64ANDL) 3973 v.AddArg(x) 3974 v0 := b.NewValue0(v_2.Pos, OpAMD64MOVLf2i, typ.UInt32) 3975 v0.AddArg(y) 3976 v.AddArg(v0) 3977 return true 3978 } 3979 return false 3980 } 3981 func rewriteValueAMD64_OpAMD64ANDLmodify_0(v *Value) bool { 3982 // match: (ANDLmodify [off1] {sym} (ADDQconst [off2] base) val mem) 3983 // cond: is32Bit(off1+off2) 3984 // result: (ANDLmodify [off1+off2] {sym} base val mem) 3985 for { 3986 off1 := v.AuxInt 3987 sym := v.Aux 3988 _ = v.Args[2] 3989 v_0 := v.Args[0] 3990 if v_0.Op != OpAMD64ADDQconst { 3991 break 3992 } 3993 off2 := v_0.AuxInt 3994 base := v_0.Args[0] 3995 val := v.Args[1] 3996 mem := v.Args[2] 3997 if !(is32Bit(off1 + off2)) { 3998 break 3999 } 4000 v.reset(OpAMD64ANDLmodify) 4001 v.AuxInt = off1 + off2 4002 v.Aux = sym 4003 v.AddArg(base) 4004 v.AddArg(val) 4005 v.AddArg(mem) 4006 return true 4007 } 4008 // match: (ANDLmodify [off1] {sym1} (LEAQ [off2] {sym2} base) val mem) 4009 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 4010 // result: (ANDLmodify [off1+off2] {mergeSym(sym1,sym2)} base val mem) 4011 for { 4012 off1 := v.AuxInt 4013 sym1 := v.Aux 4014 _ = v.Args[2] 4015 v_0 := v.Args[0] 4016 if v_0.Op != OpAMD64LEAQ { 4017 break 4018 } 4019 off2 := v_0.AuxInt 4020 sym2 := v_0.Aux 4021 base := v_0.Args[0] 4022 val := v.Args[1] 4023 mem := v.Args[2] 4024 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 4025 break 4026 } 4027 v.reset(OpAMD64ANDLmodify) 4028 v.AuxInt = off1 + off2 4029 v.Aux = mergeSym(sym1, sym2) 4030 v.AddArg(base) 4031 v.AddArg(val) 4032 v.AddArg(mem) 4033 return true 4034 } 4035 return false 4036 } 4037 func rewriteValueAMD64_OpAMD64ANDQ_0(v *Value) bool { 4038 b := v.Block 4039 _ = b 4040 config := b.Func.Config 4041 _ = config 4042 // match: (ANDQ (NOTQ (SHLQ (MOVQconst [1]) y)) x) 4043 // cond: !config.nacl 4044 // result: (BTRQ x y) 4045 for { 4046 _ = v.Args[1] 4047 v_0 := v.Args[0] 4048 if v_0.Op != OpAMD64NOTQ { 4049 break 4050 } 4051 v_0_0 := v_0.Args[0] 4052 if v_0_0.Op != OpAMD64SHLQ { 4053 break 4054 } 4055 _ = v_0_0.Args[1] 4056 v_0_0_0 := v_0_0.Args[0] 4057 if v_0_0_0.Op != OpAMD64MOVQconst { 4058 break 4059 } 4060 if v_0_0_0.AuxInt != 1 { 4061 break 4062 } 4063 y := v_0_0.Args[1] 4064 x := v.Args[1] 4065 if !(!config.nacl) { 4066 break 4067 } 4068 v.reset(OpAMD64BTRQ) 4069 v.AddArg(x) 4070 v.AddArg(y) 4071 return true 4072 } 4073 // match: (ANDQ x (NOTQ (SHLQ (MOVQconst [1]) y))) 4074 // cond: !config.nacl 4075 // result: (BTRQ x y) 4076 for { 4077 _ = v.Args[1] 4078 x := v.Args[0] 4079 v_1 := v.Args[1] 4080 if v_1.Op != OpAMD64NOTQ { 4081 break 4082 } 4083 v_1_0 := v_1.Args[0] 4084 if v_1_0.Op != OpAMD64SHLQ { 4085 break 4086 } 4087 _ = v_1_0.Args[1] 4088 v_1_0_0 := v_1_0.Args[0] 4089 if v_1_0_0.Op != OpAMD64MOVQconst { 4090 break 4091 } 4092 if v_1_0_0.AuxInt != 1 { 4093 break 4094 } 4095 y := v_1_0.Args[1] 4096 if !(!config.nacl) { 4097 break 4098 } 4099 v.reset(OpAMD64BTRQ) 4100 v.AddArg(x) 4101 v.AddArg(y) 4102 return true 4103 } 4104 // match: (ANDQ (MOVQconst [c]) x) 4105 // cond: isUint64PowerOfTwo(^c) && uint64(^c) >= 128 && !config.nacl 4106 // result: (BTRQconst [log2(^c)] x) 4107 for { 4108 _ = v.Args[1] 4109 v_0 := v.Args[0] 4110 if v_0.Op != OpAMD64MOVQconst { 4111 break 4112 } 4113 c := v_0.AuxInt 4114 x := v.Args[1] 4115 if !(isUint64PowerOfTwo(^c) && uint64(^c) >= 128 && !config.nacl) { 4116 break 4117 } 4118 v.reset(OpAMD64BTRQconst) 4119 v.AuxInt = log2(^c) 4120 v.AddArg(x) 4121 return true 4122 } 4123 // match: (ANDQ x (MOVQconst [c])) 4124 // cond: isUint64PowerOfTwo(^c) && uint64(^c) >= 128 && !config.nacl 4125 // result: (BTRQconst [log2(^c)] x) 4126 for { 4127 _ = v.Args[1] 4128 x := v.Args[0] 4129 v_1 := v.Args[1] 4130 if v_1.Op != OpAMD64MOVQconst { 4131 break 4132 } 4133 c := v_1.AuxInt 4134 if !(isUint64PowerOfTwo(^c) && uint64(^c) >= 128 && !config.nacl) { 4135 break 4136 } 4137 v.reset(OpAMD64BTRQconst) 4138 v.AuxInt = log2(^c) 4139 v.AddArg(x) 4140 return true 4141 } 4142 // match: (ANDQ x (MOVQconst [c])) 4143 // cond: is32Bit(c) 4144 // result: (ANDQconst [c] x) 4145 for { 4146 _ = v.Args[1] 4147 x := v.Args[0] 4148 v_1 := v.Args[1] 4149 if v_1.Op != OpAMD64MOVQconst { 4150 break 4151 } 4152 c := v_1.AuxInt 4153 if !(is32Bit(c)) { 4154 break 4155 } 4156 v.reset(OpAMD64ANDQconst) 4157 v.AuxInt = c 4158 v.AddArg(x) 4159 return true 4160 } 4161 // match: (ANDQ (MOVQconst [c]) x) 4162 // cond: is32Bit(c) 4163 // result: (ANDQconst [c] x) 4164 for { 4165 _ = v.Args[1] 4166 v_0 := v.Args[0] 4167 if v_0.Op != OpAMD64MOVQconst { 4168 break 4169 } 4170 c := v_0.AuxInt 4171 x := v.Args[1] 4172 if !(is32Bit(c)) { 4173 break 4174 } 4175 v.reset(OpAMD64ANDQconst) 4176 v.AuxInt = c 4177 v.AddArg(x) 4178 return true 4179 } 4180 // match: (ANDQ x x) 4181 // cond: 4182 // result: x 4183 for { 4184 _ = v.Args[1] 4185 x := v.Args[0] 4186 if x != v.Args[1] { 4187 break 4188 } 4189 v.reset(OpCopy) 4190 v.Type = x.Type 4191 v.AddArg(x) 4192 return true 4193 } 4194 // match: (ANDQ x l:(MOVQload [off] {sym} ptr mem)) 4195 // cond: canMergeLoadClobber(v, l, x) && clobber(l) 4196 // result: (ANDQload x [off] {sym} ptr mem) 4197 for { 4198 _ = v.Args[1] 4199 x := v.Args[0] 4200 l := v.Args[1] 4201 if l.Op != OpAMD64MOVQload { 4202 break 4203 } 4204 off := l.AuxInt 4205 sym := l.Aux 4206 _ = l.Args[1] 4207 ptr := l.Args[0] 4208 mem := l.Args[1] 4209 if !(canMergeLoadClobber(v, l, x) && clobber(l)) { 4210 break 4211 } 4212 v.reset(OpAMD64ANDQload) 4213 v.AuxInt = off 4214 v.Aux = sym 4215 v.AddArg(x) 4216 v.AddArg(ptr) 4217 v.AddArg(mem) 4218 return true 4219 } 4220 // match: (ANDQ l:(MOVQload [off] {sym} ptr mem) x) 4221 // cond: canMergeLoadClobber(v, l, x) && clobber(l) 4222 // result: (ANDQload x [off] {sym} ptr mem) 4223 for { 4224 _ = v.Args[1] 4225 l := v.Args[0] 4226 if l.Op != OpAMD64MOVQload { 4227 break 4228 } 4229 off := l.AuxInt 4230 sym := l.Aux 4231 _ = l.Args[1] 4232 ptr := l.Args[0] 4233 mem := l.Args[1] 4234 x := v.Args[1] 4235 if !(canMergeLoadClobber(v, l, x) && clobber(l)) { 4236 break 4237 } 4238 v.reset(OpAMD64ANDQload) 4239 v.AuxInt = off 4240 v.Aux = sym 4241 v.AddArg(x) 4242 v.AddArg(ptr) 4243 v.AddArg(mem) 4244 return true 4245 } 4246 return false 4247 } 4248 func rewriteValueAMD64_OpAMD64ANDQconst_0(v *Value) bool { 4249 b := v.Block 4250 _ = b 4251 config := b.Func.Config 4252 _ = config 4253 // match: (ANDQconst [c] x) 4254 // cond: isUint64PowerOfTwo(^c) && uint64(^c) >= 128 && !config.nacl 4255 // result: (BTRQconst [log2(^c)] x) 4256 for { 4257 c := v.AuxInt 4258 x := v.Args[0] 4259 if !(isUint64PowerOfTwo(^c) && uint64(^c) >= 128 && !config.nacl) { 4260 break 4261 } 4262 v.reset(OpAMD64BTRQconst) 4263 v.AuxInt = log2(^c) 4264 v.AddArg(x) 4265 return true 4266 } 4267 // match: (ANDQconst [c] (ANDQconst [d] x)) 4268 // cond: 4269 // result: (ANDQconst [c & d] x) 4270 for { 4271 c := v.AuxInt 4272 v_0 := v.Args[0] 4273 if v_0.Op != OpAMD64ANDQconst { 4274 break 4275 } 4276 d := v_0.AuxInt 4277 x := v_0.Args[0] 4278 v.reset(OpAMD64ANDQconst) 4279 v.AuxInt = c & d 4280 v.AddArg(x) 4281 return true 4282 } 4283 // match: (ANDQconst [c] (BTRQconst [d] x)) 4284 // cond: 4285 // result: (ANDQconst [c &^ (1<<uint32(d))] x) 4286 for { 4287 c := v.AuxInt 4288 v_0 := v.Args[0] 4289 if v_0.Op != OpAMD64BTRQconst { 4290 break 4291 } 4292 d := v_0.AuxInt 4293 x := v_0.Args[0] 4294 v.reset(OpAMD64ANDQconst) 4295 v.AuxInt = c &^ (1 << uint32(d)) 4296 v.AddArg(x) 4297 return true 4298 } 4299 // match: (ANDQconst [ 0xFF] x) 4300 // cond: 4301 // result: (MOVBQZX x) 4302 for { 4303 if v.AuxInt != 0xFF { 4304 break 4305 } 4306 x := v.Args[0] 4307 v.reset(OpAMD64MOVBQZX) 4308 v.AddArg(x) 4309 return true 4310 } 4311 // match: (ANDQconst [0xFFFF] x) 4312 // cond: 4313 // result: (MOVWQZX x) 4314 for { 4315 if v.AuxInt != 0xFFFF { 4316 break 4317 } 4318 x := v.Args[0] 4319 v.reset(OpAMD64MOVWQZX) 4320 v.AddArg(x) 4321 return true 4322 } 4323 // match: (ANDQconst [0xFFFFFFFF] x) 4324 // cond: 4325 // result: (MOVLQZX x) 4326 for { 4327 if v.AuxInt != 0xFFFFFFFF { 4328 break 4329 } 4330 x := v.Args[0] 4331 v.reset(OpAMD64MOVLQZX) 4332 v.AddArg(x) 4333 return true 4334 } 4335 // match: (ANDQconst [0] _) 4336 // cond: 4337 // result: (MOVQconst [0]) 4338 for { 4339 if v.AuxInt != 0 { 4340 break 4341 } 4342 v.reset(OpAMD64MOVQconst) 4343 v.AuxInt = 0 4344 return true 4345 } 4346 // match: (ANDQconst [-1] x) 4347 // cond: 4348 // result: x 4349 for { 4350 if v.AuxInt != -1 { 4351 break 4352 } 4353 x := v.Args[0] 4354 v.reset(OpCopy) 4355 v.Type = x.Type 4356 v.AddArg(x) 4357 return true 4358 } 4359 // match: (ANDQconst [c] (MOVQconst [d])) 4360 // cond: 4361 // result: (MOVQconst [c&d]) 4362 for { 4363 c := v.AuxInt 4364 v_0 := v.Args[0] 4365 if v_0.Op != OpAMD64MOVQconst { 4366 break 4367 } 4368 d := v_0.AuxInt 4369 v.reset(OpAMD64MOVQconst) 4370 v.AuxInt = c & d 4371 return true 4372 } 4373 return false 4374 } 4375 func rewriteValueAMD64_OpAMD64ANDQconstmodify_0(v *Value) bool { 4376 // match: (ANDQconstmodify [valoff1] {sym} (ADDQconst [off2] base) mem) 4377 // cond: ValAndOff(valoff1).canAdd(off2) 4378 // result: (ANDQconstmodify [ValAndOff(valoff1).add(off2)] {sym} base mem) 4379 for { 4380 valoff1 := v.AuxInt 4381 sym := v.Aux 4382 _ = v.Args[1] 4383 v_0 := v.Args[0] 4384 if v_0.Op != OpAMD64ADDQconst { 4385 break 4386 } 4387 off2 := v_0.AuxInt 4388 base := v_0.Args[0] 4389 mem := v.Args[1] 4390 if !(ValAndOff(valoff1).canAdd(off2)) { 4391 break 4392 } 4393 v.reset(OpAMD64ANDQconstmodify) 4394 v.AuxInt = ValAndOff(valoff1).add(off2) 4395 v.Aux = sym 4396 v.AddArg(base) 4397 v.AddArg(mem) 4398 return true 4399 } 4400 // match: (ANDQconstmodify [valoff1] {sym1} (LEAQ [off2] {sym2} base) mem) 4401 // cond: ValAndOff(valoff1).canAdd(off2) && canMergeSym(sym1, sym2) 4402 // result: (ANDQconstmodify [ValAndOff(valoff1).add(off2)] {mergeSym(sym1,sym2)} base mem) 4403 for { 4404 valoff1 := v.AuxInt 4405 sym1 := v.Aux 4406 _ = v.Args[1] 4407 v_0 := v.Args[0] 4408 if v_0.Op != OpAMD64LEAQ { 4409 break 4410 } 4411 off2 := v_0.AuxInt 4412 sym2 := v_0.Aux 4413 base := v_0.Args[0] 4414 mem := v.Args[1] 4415 if !(ValAndOff(valoff1).canAdd(off2) && canMergeSym(sym1, sym2)) { 4416 break 4417 } 4418 v.reset(OpAMD64ANDQconstmodify) 4419 v.AuxInt = ValAndOff(valoff1).add(off2) 4420 v.Aux = mergeSym(sym1, sym2) 4421 v.AddArg(base) 4422 v.AddArg(mem) 4423 return true 4424 } 4425 return false 4426 } 4427 func rewriteValueAMD64_OpAMD64ANDQload_0(v *Value) bool { 4428 b := v.Block 4429 _ = b 4430 typ := &b.Func.Config.Types 4431 _ = typ 4432 // match: (ANDQload [off1] {sym} val (ADDQconst [off2] base) mem) 4433 // cond: is32Bit(off1+off2) 4434 // result: (ANDQload [off1+off2] {sym} val base mem) 4435 for { 4436 off1 := v.AuxInt 4437 sym := v.Aux 4438 _ = v.Args[2] 4439 val := v.Args[0] 4440 v_1 := v.Args[1] 4441 if v_1.Op != OpAMD64ADDQconst { 4442 break 4443 } 4444 off2 := v_1.AuxInt 4445 base := v_1.Args[0] 4446 mem := v.Args[2] 4447 if !(is32Bit(off1 + off2)) { 4448 break 4449 } 4450 v.reset(OpAMD64ANDQload) 4451 v.AuxInt = off1 + off2 4452 v.Aux = sym 4453 v.AddArg(val) 4454 v.AddArg(base) 4455 v.AddArg(mem) 4456 return true 4457 } 4458 // match: (ANDQload [off1] {sym1} val (LEAQ [off2] {sym2} base) mem) 4459 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 4460 // result: (ANDQload [off1+off2] {mergeSym(sym1,sym2)} val base mem) 4461 for { 4462 off1 := v.AuxInt 4463 sym1 := v.Aux 4464 _ = v.Args[2] 4465 val := v.Args[0] 4466 v_1 := v.Args[1] 4467 if v_1.Op != OpAMD64LEAQ { 4468 break 4469 } 4470 off2 := v_1.AuxInt 4471 sym2 := v_1.Aux 4472 base := v_1.Args[0] 4473 mem := v.Args[2] 4474 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 4475 break 4476 } 4477 v.reset(OpAMD64ANDQload) 4478 v.AuxInt = off1 + off2 4479 v.Aux = mergeSym(sym1, sym2) 4480 v.AddArg(val) 4481 v.AddArg(base) 4482 v.AddArg(mem) 4483 return true 4484 } 4485 // match: (ANDQload x [off] {sym} ptr (MOVSDstore [off] {sym} ptr y _)) 4486 // cond: 4487 // result: (ANDQ x (MOVQf2i y)) 4488 for { 4489 off := v.AuxInt 4490 sym := v.Aux 4491 _ = v.Args[2] 4492 x := v.Args[0] 4493 ptr := v.Args[1] 4494 v_2 := v.Args[2] 4495 if v_2.Op != OpAMD64MOVSDstore { 4496 break 4497 } 4498 if v_2.AuxInt != off { 4499 break 4500 } 4501 if v_2.Aux != sym { 4502 break 4503 } 4504 _ = v_2.Args[2] 4505 if ptr != v_2.Args[0] { 4506 break 4507 } 4508 y := v_2.Args[1] 4509 v.reset(OpAMD64ANDQ) 4510 v.AddArg(x) 4511 v0 := b.NewValue0(v_2.Pos, OpAMD64MOVQf2i, typ.UInt64) 4512 v0.AddArg(y) 4513 v.AddArg(v0) 4514 return true 4515 } 4516 return false 4517 } 4518 func rewriteValueAMD64_OpAMD64ANDQmodify_0(v *Value) bool { 4519 // match: (ANDQmodify [off1] {sym} (ADDQconst [off2] base) val mem) 4520 // cond: is32Bit(off1+off2) 4521 // result: (ANDQmodify [off1+off2] {sym} base val mem) 4522 for { 4523 off1 := v.AuxInt 4524 sym := v.Aux 4525 _ = v.Args[2] 4526 v_0 := v.Args[0] 4527 if v_0.Op != OpAMD64ADDQconst { 4528 break 4529 } 4530 off2 := v_0.AuxInt 4531 base := v_0.Args[0] 4532 val := v.Args[1] 4533 mem := v.Args[2] 4534 if !(is32Bit(off1 + off2)) { 4535 break 4536 } 4537 v.reset(OpAMD64ANDQmodify) 4538 v.AuxInt = off1 + off2 4539 v.Aux = sym 4540 v.AddArg(base) 4541 v.AddArg(val) 4542 v.AddArg(mem) 4543 return true 4544 } 4545 // match: (ANDQmodify [off1] {sym1} (LEAQ [off2] {sym2} base) val mem) 4546 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 4547 // result: (ANDQmodify [off1+off2] {mergeSym(sym1,sym2)} base val mem) 4548 for { 4549 off1 := v.AuxInt 4550 sym1 := v.Aux 4551 _ = v.Args[2] 4552 v_0 := v.Args[0] 4553 if v_0.Op != OpAMD64LEAQ { 4554 break 4555 } 4556 off2 := v_0.AuxInt 4557 sym2 := v_0.Aux 4558 base := v_0.Args[0] 4559 val := v.Args[1] 4560 mem := v.Args[2] 4561 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 4562 break 4563 } 4564 v.reset(OpAMD64ANDQmodify) 4565 v.AuxInt = off1 + off2 4566 v.Aux = mergeSym(sym1, sym2) 4567 v.AddArg(base) 4568 v.AddArg(val) 4569 v.AddArg(mem) 4570 return true 4571 } 4572 return false 4573 } 4574 func rewriteValueAMD64_OpAMD64BSFQ_0(v *Value) bool { 4575 b := v.Block 4576 _ = b 4577 // match: (BSFQ (ORQconst <t> [1<<8] (MOVBQZX x))) 4578 // cond: 4579 // result: (BSFQ (ORQconst <t> [1<<8] x)) 4580 for { 4581 v_0 := v.Args[0] 4582 if v_0.Op != OpAMD64ORQconst { 4583 break 4584 } 4585 t := v_0.Type 4586 if v_0.AuxInt != 1<<8 { 4587 break 4588 } 4589 v_0_0 := v_0.Args[0] 4590 if v_0_0.Op != OpAMD64MOVBQZX { 4591 break 4592 } 4593 x := v_0_0.Args[0] 4594 v.reset(OpAMD64BSFQ) 4595 v0 := b.NewValue0(v.Pos, OpAMD64ORQconst, t) 4596 v0.AuxInt = 1 << 8 4597 v0.AddArg(x) 4598 v.AddArg(v0) 4599 return true 4600 } 4601 // match: (BSFQ (ORQconst <t> [1<<16] (MOVWQZX x))) 4602 // cond: 4603 // result: (BSFQ (ORQconst <t> [1<<16] x)) 4604 for { 4605 v_0 := v.Args[0] 4606 if v_0.Op != OpAMD64ORQconst { 4607 break 4608 } 4609 t := v_0.Type 4610 if v_0.AuxInt != 1<<16 { 4611 break 4612 } 4613 v_0_0 := v_0.Args[0] 4614 if v_0_0.Op != OpAMD64MOVWQZX { 4615 break 4616 } 4617 x := v_0_0.Args[0] 4618 v.reset(OpAMD64BSFQ) 4619 v0 := b.NewValue0(v.Pos, OpAMD64ORQconst, t) 4620 v0.AuxInt = 1 << 16 4621 v0.AddArg(x) 4622 v.AddArg(v0) 4623 return true 4624 } 4625 return false 4626 } 4627 func rewriteValueAMD64_OpAMD64BTCLconst_0(v *Value) bool { 4628 // match: (BTCLconst [c] (XORLconst [d] x)) 4629 // cond: 4630 // result: (XORLconst [d ^ 1<<uint32(c)] x) 4631 for { 4632 c := v.AuxInt 4633 v_0 := v.Args[0] 4634 if v_0.Op != OpAMD64XORLconst { 4635 break 4636 } 4637 d := v_0.AuxInt 4638 x := v_0.Args[0] 4639 v.reset(OpAMD64XORLconst) 4640 v.AuxInt = d ^ 1<<uint32(c) 4641 v.AddArg(x) 4642 return true 4643 } 4644 // match: (BTCLconst [c] (BTCLconst [d] x)) 4645 // cond: 4646 // result: (XORLconst [1<<uint32(c) ^ 1<<uint32(d)] x) 4647 for { 4648 c := v.AuxInt 4649 v_0 := v.Args[0] 4650 if v_0.Op != OpAMD64BTCLconst { 4651 break 4652 } 4653 d := v_0.AuxInt 4654 x := v_0.Args[0] 4655 v.reset(OpAMD64XORLconst) 4656 v.AuxInt = 1<<uint32(c) ^ 1<<uint32(d) 4657 v.AddArg(x) 4658 return true 4659 } 4660 // match: (BTCLconst [c] (MOVLconst [d])) 4661 // cond: 4662 // result: (MOVLconst [d^(1<<uint32(c))]) 4663 for { 4664 c := v.AuxInt 4665 v_0 := v.Args[0] 4666 if v_0.Op != OpAMD64MOVLconst { 4667 break 4668 } 4669 d := v_0.AuxInt 4670 v.reset(OpAMD64MOVLconst) 4671 v.AuxInt = d ^ (1 << uint32(c)) 4672 return true 4673 } 4674 return false 4675 } 4676 func rewriteValueAMD64_OpAMD64BTCLconstmodify_0(v *Value) bool { 4677 // match: (BTCLconstmodify [valoff1] {sym} (ADDQconst [off2] base) mem) 4678 // cond: ValAndOff(valoff1).canAdd(off2) 4679 // result: (BTCLconstmodify [ValAndOff(valoff1).add(off2)] {sym} base mem) 4680 for { 4681 valoff1 := v.AuxInt 4682 sym := v.Aux 4683 _ = v.Args[1] 4684 v_0 := v.Args[0] 4685 if v_0.Op != OpAMD64ADDQconst { 4686 break 4687 } 4688 off2 := v_0.AuxInt 4689 base := v_0.Args[0] 4690 mem := v.Args[1] 4691 if !(ValAndOff(valoff1).canAdd(off2)) { 4692 break 4693 } 4694 v.reset(OpAMD64BTCLconstmodify) 4695 v.AuxInt = ValAndOff(valoff1).add(off2) 4696 v.Aux = sym 4697 v.AddArg(base) 4698 v.AddArg(mem) 4699 return true 4700 } 4701 // match: (BTCLconstmodify [valoff1] {sym1} (LEAQ [off2] {sym2} base) mem) 4702 // cond: ValAndOff(valoff1).canAdd(off2) && canMergeSym(sym1, sym2) 4703 // result: (BTCLconstmodify [ValAndOff(valoff1).add(off2)] {mergeSym(sym1,sym2)} base mem) 4704 for { 4705 valoff1 := v.AuxInt 4706 sym1 := v.Aux 4707 _ = v.Args[1] 4708 v_0 := v.Args[0] 4709 if v_0.Op != OpAMD64LEAQ { 4710 break 4711 } 4712 off2 := v_0.AuxInt 4713 sym2 := v_0.Aux 4714 base := v_0.Args[0] 4715 mem := v.Args[1] 4716 if !(ValAndOff(valoff1).canAdd(off2) && canMergeSym(sym1, sym2)) { 4717 break 4718 } 4719 v.reset(OpAMD64BTCLconstmodify) 4720 v.AuxInt = ValAndOff(valoff1).add(off2) 4721 v.Aux = mergeSym(sym1, sym2) 4722 v.AddArg(base) 4723 v.AddArg(mem) 4724 return true 4725 } 4726 return false 4727 } 4728 func rewriteValueAMD64_OpAMD64BTCLmodify_0(v *Value) bool { 4729 // match: (BTCLmodify [off1] {sym} (ADDQconst [off2] base) val mem) 4730 // cond: is32Bit(off1+off2) 4731 // result: (BTCLmodify [off1+off2] {sym} base val mem) 4732 for { 4733 off1 := v.AuxInt 4734 sym := v.Aux 4735 _ = v.Args[2] 4736 v_0 := v.Args[0] 4737 if v_0.Op != OpAMD64ADDQconst { 4738 break 4739 } 4740 off2 := v_0.AuxInt 4741 base := v_0.Args[0] 4742 val := v.Args[1] 4743 mem := v.Args[2] 4744 if !(is32Bit(off1 + off2)) { 4745 break 4746 } 4747 v.reset(OpAMD64BTCLmodify) 4748 v.AuxInt = off1 + off2 4749 v.Aux = sym 4750 v.AddArg(base) 4751 v.AddArg(val) 4752 v.AddArg(mem) 4753 return true 4754 } 4755 // match: (BTCLmodify [off1] {sym1} (LEAQ [off2] {sym2} base) val mem) 4756 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 4757 // result: (BTCLmodify [off1+off2] {mergeSym(sym1,sym2)} base val mem) 4758 for { 4759 off1 := v.AuxInt 4760 sym1 := v.Aux 4761 _ = v.Args[2] 4762 v_0 := v.Args[0] 4763 if v_0.Op != OpAMD64LEAQ { 4764 break 4765 } 4766 off2 := v_0.AuxInt 4767 sym2 := v_0.Aux 4768 base := v_0.Args[0] 4769 val := v.Args[1] 4770 mem := v.Args[2] 4771 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 4772 break 4773 } 4774 v.reset(OpAMD64BTCLmodify) 4775 v.AuxInt = off1 + off2 4776 v.Aux = mergeSym(sym1, sym2) 4777 v.AddArg(base) 4778 v.AddArg(val) 4779 v.AddArg(mem) 4780 return true 4781 } 4782 return false 4783 } 4784 func rewriteValueAMD64_OpAMD64BTCQconst_0(v *Value) bool { 4785 // match: (BTCQconst [c] (XORQconst [d] x)) 4786 // cond: 4787 // result: (XORQconst [d ^ 1<<uint32(c)] x) 4788 for { 4789 c := v.AuxInt 4790 v_0 := v.Args[0] 4791 if v_0.Op != OpAMD64XORQconst { 4792 break 4793 } 4794 d := v_0.AuxInt 4795 x := v_0.Args[0] 4796 v.reset(OpAMD64XORQconst) 4797 v.AuxInt = d ^ 1<<uint32(c) 4798 v.AddArg(x) 4799 return true 4800 } 4801 // match: (BTCQconst [c] (BTCQconst [d] x)) 4802 // cond: 4803 // result: (XORQconst [1<<uint32(c) ^ 1<<uint32(d)] x) 4804 for { 4805 c := v.AuxInt 4806 v_0 := v.Args[0] 4807 if v_0.Op != OpAMD64BTCQconst { 4808 break 4809 } 4810 d := v_0.AuxInt 4811 x := v_0.Args[0] 4812 v.reset(OpAMD64XORQconst) 4813 v.AuxInt = 1<<uint32(c) ^ 1<<uint32(d) 4814 v.AddArg(x) 4815 return true 4816 } 4817 // match: (BTCQconst [c] (MOVQconst [d])) 4818 // cond: 4819 // result: (MOVQconst [d^(1<<uint32(c))]) 4820 for { 4821 c := v.AuxInt 4822 v_0 := v.Args[0] 4823 if v_0.Op != OpAMD64MOVQconst { 4824 break 4825 } 4826 d := v_0.AuxInt 4827 v.reset(OpAMD64MOVQconst) 4828 v.AuxInt = d ^ (1 << uint32(c)) 4829 return true 4830 } 4831 return false 4832 } 4833 func rewriteValueAMD64_OpAMD64BTCQconstmodify_0(v *Value) bool { 4834 // match: (BTCQconstmodify [valoff1] {sym} (ADDQconst [off2] base) mem) 4835 // cond: ValAndOff(valoff1).canAdd(off2) 4836 // result: (BTCQconstmodify [ValAndOff(valoff1).add(off2)] {sym} base mem) 4837 for { 4838 valoff1 := v.AuxInt 4839 sym := v.Aux 4840 _ = v.Args[1] 4841 v_0 := v.Args[0] 4842 if v_0.Op != OpAMD64ADDQconst { 4843 break 4844 } 4845 off2 := v_0.AuxInt 4846 base := v_0.Args[0] 4847 mem := v.Args[1] 4848 if !(ValAndOff(valoff1).canAdd(off2)) { 4849 break 4850 } 4851 v.reset(OpAMD64BTCQconstmodify) 4852 v.AuxInt = ValAndOff(valoff1).add(off2) 4853 v.Aux = sym 4854 v.AddArg(base) 4855 v.AddArg(mem) 4856 return true 4857 } 4858 // match: (BTCQconstmodify [valoff1] {sym1} (LEAQ [off2] {sym2} base) mem) 4859 // cond: ValAndOff(valoff1).canAdd(off2) && canMergeSym(sym1, sym2) 4860 // result: (BTCQconstmodify [ValAndOff(valoff1).add(off2)] {mergeSym(sym1,sym2)} base mem) 4861 for { 4862 valoff1 := v.AuxInt 4863 sym1 := v.Aux 4864 _ = v.Args[1] 4865 v_0 := v.Args[0] 4866 if v_0.Op != OpAMD64LEAQ { 4867 break 4868 } 4869 off2 := v_0.AuxInt 4870 sym2 := v_0.Aux 4871 base := v_0.Args[0] 4872 mem := v.Args[1] 4873 if !(ValAndOff(valoff1).canAdd(off2) && canMergeSym(sym1, sym2)) { 4874 break 4875 } 4876 v.reset(OpAMD64BTCQconstmodify) 4877 v.AuxInt = ValAndOff(valoff1).add(off2) 4878 v.Aux = mergeSym(sym1, sym2) 4879 v.AddArg(base) 4880 v.AddArg(mem) 4881 return true 4882 } 4883 return false 4884 } 4885 func rewriteValueAMD64_OpAMD64BTCQmodify_0(v *Value) bool { 4886 // match: (BTCQmodify [off1] {sym} (ADDQconst [off2] base) val mem) 4887 // cond: is32Bit(off1+off2) 4888 // result: (BTCQmodify [off1+off2] {sym} base val mem) 4889 for { 4890 off1 := v.AuxInt 4891 sym := v.Aux 4892 _ = v.Args[2] 4893 v_0 := v.Args[0] 4894 if v_0.Op != OpAMD64ADDQconst { 4895 break 4896 } 4897 off2 := v_0.AuxInt 4898 base := v_0.Args[0] 4899 val := v.Args[1] 4900 mem := v.Args[2] 4901 if !(is32Bit(off1 + off2)) { 4902 break 4903 } 4904 v.reset(OpAMD64BTCQmodify) 4905 v.AuxInt = off1 + off2 4906 v.Aux = sym 4907 v.AddArg(base) 4908 v.AddArg(val) 4909 v.AddArg(mem) 4910 return true 4911 } 4912 // match: (BTCQmodify [off1] {sym1} (LEAQ [off2] {sym2} base) val mem) 4913 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 4914 // result: (BTCQmodify [off1+off2] {mergeSym(sym1,sym2)} base val mem) 4915 for { 4916 off1 := v.AuxInt 4917 sym1 := v.Aux 4918 _ = v.Args[2] 4919 v_0 := v.Args[0] 4920 if v_0.Op != OpAMD64LEAQ { 4921 break 4922 } 4923 off2 := v_0.AuxInt 4924 sym2 := v_0.Aux 4925 base := v_0.Args[0] 4926 val := v.Args[1] 4927 mem := v.Args[2] 4928 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 4929 break 4930 } 4931 v.reset(OpAMD64BTCQmodify) 4932 v.AuxInt = off1 + off2 4933 v.Aux = mergeSym(sym1, sym2) 4934 v.AddArg(base) 4935 v.AddArg(val) 4936 v.AddArg(mem) 4937 return true 4938 } 4939 return false 4940 } 4941 func rewriteValueAMD64_OpAMD64BTLconst_0(v *Value) bool { 4942 // match: (BTLconst [c] (SHRQconst [d] x)) 4943 // cond: (c+d)<64 4944 // result: (BTQconst [c+d] x) 4945 for { 4946 c := v.AuxInt 4947 v_0 := v.Args[0] 4948 if v_0.Op != OpAMD64SHRQconst { 4949 break 4950 } 4951 d := v_0.AuxInt 4952 x := v_0.Args[0] 4953 if !((c + d) < 64) { 4954 break 4955 } 4956 v.reset(OpAMD64BTQconst) 4957 v.AuxInt = c + d 4958 v.AddArg(x) 4959 return true 4960 } 4961 // match: (BTLconst [c] (SHLQconst [d] x)) 4962 // cond: c>d 4963 // result: (BTLconst [c-d] x) 4964 for { 4965 c := v.AuxInt 4966 v_0 := v.Args[0] 4967 if v_0.Op != OpAMD64SHLQconst { 4968 break 4969 } 4970 d := v_0.AuxInt 4971 x := v_0.Args[0] 4972 if !(c > d) { 4973 break 4974 } 4975 v.reset(OpAMD64BTLconst) 4976 v.AuxInt = c - d 4977 v.AddArg(x) 4978 return true 4979 } 4980 // match: (BTLconst [0] s:(SHRQ x y)) 4981 // cond: 4982 // result: (BTQ y x) 4983 for { 4984 if v.AuxInt != 0 { 4985 break 4986 } 4987 s := v.Args[0] 4988 if s.Op != OpAMD64SHRQ { 4989 break 4990 } 4991 _ = s.Args[1] 4992 x := s.Args[0] 4993 y := s.Args[1] 4994 v.reset(OpAMD64BTQ) 4995 v.AddArg(y) 4996 v.AddArg(x) 4997 return true 4998 } 4999 // match: (BTLconst [c] (SHRLconst [d] x)) 5000 // cond: (c+d)<32 5001 // result: (BTLconst [c+d] x) 5002 for { 5003 c := v.AuxInt 5004 v_0 := v.Args[0] 5005 if v_0.Op != OpAMD64SHRLconst { 5006 break 5007 } 5008 d := v_0.AuxInt 5009 x := v_0.Args[0] 5010 if !((c + d) < 32) { 5011 break 5012 } 5013 v.reset(OpAMD64BTLconst) 5014 v.AuxInt = c + d 5015 v.AddArg(x) 5016 return true 5017 } 5018 // match: (BTLconst [c] (SHLLconst [d] x)) 5019 // cond: c>d 5020 // result: (BTLconst [c-d] x) 5021 for { 5022 c := v.AuxInt 5023 v_0 := v.Args[0] 5024 if v_0.Op != OpAMD64SHLLconst { 5025 break 5026 } 5027 d := v_0.AuxInt 5028 x := v_0.Args[0] 5029 if !(c > d) { 5030 break 5031 } 5032 v.reset(OpAMD64BTLconst) 5033 v.AuxInt = c - d 5034 v.AddArg(x) 5035 return true 5036 } 5037 // match: (BTLconst [0] s:(SHRL x y)) 5038 // cond: 5039 // result: (BTL y x) 5040 for { 5041 if v.AuxInt != 0 { 5042 break 5043 } 5044 s := v.Args[0] 5045 if s.Op != OpAMD64SHRL { 5046 break 5047 } 5048 _ = s.Args[1] 5049 x := s.Args[0] 5050 y := s.Args[1] 5051 v.reset(OpAMD64BTL) 5052 v.AddArg(y) 5053 v.AddArg(x) 5054 return true 5055 } 5056 return false 5057 } 5058 func rewriteValueAMD64_OpAMD64BTQconst_0(v *Value) bool { 5059 // match: (BTQconst [c] (SHRQconst [d] x)) 5060 // cond: (c+d)<64 5061 // result: (BTQconst [c+d] x) 5062 for { 5063 c := v.AuxInt 5064 v_0 := v.Args[0] 5065 if v_0.Op != OpAMD64SHRQconst { 5066 break 5067 } 5068 d := v_0.AuxInt 5069 x := v_0.Args[0] 5070 if !((c + d) < 64) { 5071 break 5072 } 5073 v.reset(OpAMD64BTQconst) 5074 v.AuxInt = c + d 5075 v.AddArg(x) 5076 return true 5077 } 5078 // match: (BTQconst [c] (SHLQconst [d] x)) 5079 // cond: c>d 5080 // result: (BTQconst [c-d] x) 5081 for { 5082 c := v.AuxInt 5083 v_0 := v.Args[0] 5084 if v_0.Op != OpAMD64SHLQconst { 5085 break 5086 } 5087 d := v_0.AuxInt 5088 x := v_0.Args[0] 5089 if !(c > d) { 5090 break 5091 } 5092 v.reset(OpAMD64BTQconst) 5093 v.AuxInt = c - d 5094 v.AddArg(x) 5095 return true 5096 } 5097 // match: (BTQconst [0] s:(SHRQ x y)) 5098 // cond: 5099 // result: (BTQ y x) 5100 for { 5101 if v.AuxInt != 0 { 5102 break 5103 } 5104 s := v.Args[0] 5105 if s.Op != OpAMD64SHRQ { 5106 break 5107 } 5108 _ = s.Args[1] 5109 x := s.Args[0] 5110 y := s.Args[1] 5111 v.reset(OpAMD64BTQ) 5112 v.AddArg(y) 5113 v.AddArg(x) 5114 return true 5115 } 5116 return false 5117 } 5118 func rewriteValueAMD64_OpAMD64BTRLconst_0(v *Value) bool { 5119 // match: (BTRLconst [c] (BTSLconst [c] x)) 5120 // cond: 5121 // result: (BTRLconst [c] x) 5122 for { 5123 c := v.AuxInt 5124 v_0 := v.Args[0] 5125 if v_0.Op != OpAMD64BTSLconst { 5126 break 5127 } 5128 if v_0.AuxInt != c { 5129 break 5130 } 5131 x := v_0.Args[0] 5132 v.reset(OpAMD64BTRLconst) 5133 v.AuxInt = c 5134 v.AddArg(x) 5135 return true 5136 } 5137 // match: (BTRLconst [c] (BTCLconst [c] x)) 5138 // cond: 5139 // result: (BTRLconst [c] x) 5140 for { 5141 c := v.AuxInt 5142 v_0 := v.Args[0] 5143 if v_0.Op != OpAMD64BTCLconst { 5144 break 5145 } 5146 if v_0.AuxInt != c { 5147 break 5148 } 5149 x := v_0.Args[0] 5150 v.reset(OpAMD64BTRLconst) 5151 v.AuxInt = c 5152 v.AddArg(x) 5153 return true 5154 } 5155 // match: (BTRLconst [c] (ANDLconst [d] x)) 5156 // cond: 5157 // result: (ANDLconst [d &^ (1<<uint32(c))] x) 5158 for { 5159 c := v.AuxInt 5160 v_0 := v.Args[0] 5161 if v_0.Op != OpAMD64ANDLconst { 5162 break 5163 } 5164 d := v_0.AuxInt 5165 x := v_0.Args[0] 5166 v.reset(OpAMD64ANDLconst) 5167 v.AuxInt = d &^ (1 << uint32(c)) 5168 v.AddArg(x) 5169 return true 5170 } 5171 // match: (BTRLconst [c] (BTRLconst [d] x)) 5172 // cond: 5173 // result: (ANDLconst [^(1<<uint32(c) | 1<<uint32(d))] x) 5174 for { 5175 c := v.AuxInt 5176 v_0 := v.Args[0] 5177 if v_0.Op != OpAMD64BTRLconst { 5178 break 5179 } 5180 d := v_0.AuxInt 5181 x := v_0.Args[0] 5182 v.reset(OpAMD64ANDLconst) 5183 v.AuxInt = ^(1<<uint32(c) | 1<<uint32(d)) 5184 v.AddArg(x) 5185 return true 5186 } 5187 // match: (BTRLconst [c] (MOVLconst [d])) 5188 // cond: 5189 // result: (MOVLconst [d&^(1<<uint32(c))]) 5190 for { 5191 c := v.AuxInt 5192 v_0 := v.Args[0] 5193 if v_0.Op != OpAMD64MOVLconst { 5194 break 5195 } 5196 d := v_0.AuxInt 5197 v.reset(OpAMD64MOVLconst) 5198 v.AuxInt = d &^ (1 << uint32(c)) 5199 return true 5200 } 5201 return false 5202 } 5203 func rewriteValueAMD64_OpAMD64BTRLconstmodify_0(v *Value) bool { 5204 // match: (BTRLconstmodify [valoff1] {sym} (ADDQconst [off2] base) mem) 5205 // cond: ValAndOff(valoff1).canAdd(off2) 5206 // result: (BTRLconstmodify [ValAndOff(valoff1).add(off2)] {sym} base mem) 5207 for { 5208 valoff1 := v.AuxInt 5209 sym := v.Aux 5210 _ = v.Args[1] 5211 v_0 := v.Args[0] 5212 if v_0.Op != OpAMD64ADDQconst { 5213 break 5214 } 5215 off2 := v_0.AuxInt 5216 base := v_0.Args[0] 5217 mem := v.Args[1] 5218 if !(ValAndOff(valoff1).canAdd(off2)) { 5219 break 5220 } 5221 v.reset(OpAMD64BTRLconstmodify) 5222 v.AuxInt = ValAndOff(valoff1).add(off2) 5223 v.Aux = sym 5224 v.AddArg(base) 5225 v.AddArg(mem) 5226 return true 5227 } 5228 // match: (BTRLconstmodify [valoff1] {sym1} (LEAQ [off2] {sym2} base) mem) 5229 // cond: ValAndOff(valoff1).canAdd(off2) && canMergeSym(sym1, sym2) 5230 // result: (BTRLconstmodify [ValAndOff(valoff1).add(off2)] {mergeSym(sym1,sym2)} base mem) 5231 for { 5232 valoff1 := v.AuxInt 5233 sym1 := v.Aux 5234 _ = v.Args[1] 5235 v_0 := v.Args[0] 5236 if v_0.Op != OpAMD64LEAQ { 5237 break 5238 } 5239 off2 := v_0.AuxInt 5240 sym2 := v_0.Aux 5241 base := v_0.Args[0] 5242 mem := v.Args[1] 5243 if !(ValAndOff(valoff1).canAdd(off2) && canMergeSym(sym1, sym2)) { 5244 break 5245 } 5246 v.reset(OpAMD64BTRLconstmodify) 5247 v.AuxInt = ValAndOff(valoff1).add(off2) 5248 v.Aux = mergeSym(sym1, sym2) 5249 v.AddArg(base) 5250 v.AddArg(mem) 5251 return true 5252 } 5253 return false 5254 } 5255 func rewriteValueAMD64_OpAMD64BTRLmodify_0(v *Value) bool { 5256 // match: (BTRLmodify [off1] {sym} (ADDQconst [off2] base) val mem) 5257 // cond: is32Bit(off1+off2) 5258 // result: (BTRLmodify [off1+off2] {sym} base val mem) 5259 for { 5260 off1 := v.AuxInt 5261 sym := v.Aux 5262 _ = v.Args[2] 5263 v_0 := v.Args[0] 5264 if v_0.Op != OpAMD64ADDQconst { 5265 break 5266 } 5267 off2 := v_0.AuxInt 5268 base := v_0.Args[0] 5269 val := v.Args[1] 5270 mem := v.Args[2] 5271 if !(is32Bit(off1 + off2)) { 5272 break 5273 } 5274 v.reset(OpAMD64BTRLmodify) 5275 v.AuxInt = off1 + off2 5276 v.Aux = sym 5277 v.AddArg(base) 5278 v.AddArg(val) 5279 v.AddArg(mem) 5280 return true 5281 } 5282 // match: (BTRLmodify [off1] {sym1} (LEAQ [off2] {sym2} base) val mem) 5283 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 5284 // result: (BTRLmodify [off1+off2] {mergeSym(sym1,sym2)} base val mem) 5285 for { 5286 off1 := v.AuxInt 5287 sym1 := v.Aux 5288 _ = v.Args[2] 5289 v_0 := v.Args[0] 5290 if v_0.Op != OpAMD64LEAQ { 5291 break 5292 } 5293 off2 := v_0.AuxInt 5294 sym2 := v_0.Aux 5295 base := v_0.Args[0] 5296 val := v.Args[1] 5297 mem := v.Args[2] 5298 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 5299 break 5300 } 5301 v.reset(OpAMD64BTRLmodify) 5302 v.AuxInt = off1 + off2 5303 v.Aux = mergeSym(sym1, sym2) 5304 v.AddArg(base) 5305 v.AddArg(val) 5306 v.AddArg(mem) 5307 return true 5308 } 5309 return false 5310 } 5311 func rewriteValueAMD64_OpAMD64BTRQconst_0(v *Value) bool { 5312 // match: (BTRQconst [c] (BTSQconst [c] x)) 5313 // cond: 5314 // result: (BTRQconst [c] x) 5315 for { 5316 c := v.AuxInt 5317 v_0 := v.Args[0] 5318 if v_0.Op != OpAMD64BTSQconst { 5319 break 5320 } 5321 if v_0.AuxInt != c { 5322 break 5323 } 5324 x := v_0.Args[0] 5325 v.reset(OpAMD64BTRQconst) 5326 v.AuxInt = c 5327 v.AddArg(x) 5328 return true 5329 } 5330 // match: (BTRQconst [c] (BTCQconst [c] x)) 5331 // cond: 5332 // result: (BTRQconst [c] x) 5333 for { 5334 c := v.AuxInt 5335 v_0 := v.Args[0] 5336 if v_0.Op != OpAMD64BTCQconst { 5337 break 5338 } 5339 if v_0.AuxInt != c { 5340 break 5341 } 5342 x := v_0.Args[0] 5343 v.reset(OpAMD64BTRQconst) 5344 v.AuxInt = c 5345 v.AddArg(x) 5346 return true 5347 } 5348 // match: (BTRQconst [c] (ANDQconst [d] x)) 5349 // cond: 5350 // result: (ANDQconst [d &^ (1<<uint32(c))] x) 5351 for { 5352 c := v.AuxInt 5353 v_0 := v.Args[0] 5354 if v_0.Op != OpAMD64ANDQconst { 5355 break 5356 } 5357 d := v_0.AuxInt 5358 x := v_0.Args[0] 5359 v.reset(OpAMD64ANDQconst) 5360 v.AuxInt = d &^ (1 << uint32(c)) 5361 v.AddArg(x) 5362 return true 5363 } 5364 // match: (BTRQconst [c] (BTRQconst [d] x)) 5365 // cond: 5366 // result: (ANDQconst [^(1<<uint32(c) | 1<<uint32(d))] x) 5367 for { 5368 c := v.AuxInt 5369 v_0 := v.Args[0] 5370 if v_0.Op != OpAMD64BTRQconst { 5371 break 5372 } 5373 d := v_0.AuxInt 5374 x := v_0.Args[0] 5375 v.reset(OpAMD64ANDQconst) 5376 v.AuxInt = ^(1<<uint32(c) | 1<<uint32(d)) 5377 v.AddArg(x) 5378 return true 5379 } 5380 // match: (BTRQconst [c] (MOVQconst [d])) 5381 // cond: 5382 // result: (MOVQconst [d&^(1<<uint32(c))]) 5383 for { 5384 c := v.AuxInt 5385 v_0 := v.Args[0] 5386 if v_0.Op != OpAMD64MOVQconst { 5387 break 5388 } 5389 d := v_0.AuxInt 5390 v.reset(OpAMD64MOVQconst) 5391 v.AuxInt = d &^ (1 << uint32(c)) 5392 return true 5393 } 5394 return false 5395 } 5396 func rewriteValueAMD64_OpAMD64BTRQconstmodify_0(v *Value) bool { 5397 // match: (BTRQconstmodify [valoff1] {sym} (ADDQconst [off2] base) mem) 5398 // cond: ValAndOff(valoff1).canAdd(off2) 5399 // result: (BTRQconstmodify [ValAndOff(valoff1).add(off2)] {sym} base mem) 5400 for { 5401 valoff1 := v.AuxInt 5402 sym := v.Aux 5403 _ = v.Args[1] 5404 v_0 := v.Args[0] 5405 if v_0.Op != OpAMD64ADDQconst { 5406 break 5407 } 5408 off2 := v_0.AuxInt 5409 base := v_0.Args[0] 5410 mem := v.Args[1] 5411 if !(ValAndOff(valoff1).canAdd(off2)) { 5412 break 5413 } 5414 v.reset(OpAMD64BTRQconstmodify) 5415 v.AuxInt = ValAndOff(valoff1).add(off2) 5416 v.Aux = sym 5417 v.AddArg(base) 5418 v.AddArg(mem) 5419 return true 5420 } 5421 // match: (BTRQconstmodify [valoff1] {sym1} (LEAQ [off2] {sym2} base) mem) 5422 // cond: ValAndOff(valoff1).canAdd(off2) && canMergeSym(sym1, sym2) 5423 // result: (BTRQconstmodify [ValAndOff(valoff1).add(off2)] {mergeSym(sym1,sym2)} base mem) 5424 for { 5425 valoff1 := v.AuxInt 5426 sym1 := v.Aux 5427 _ = v.Args[1] 5428 v_0 := v.Args[0] 5429 if v_0.Op != OpAMD64LEAQ { 5430 break 5431 } 5432 off2 := v_0.AuxInt 5433 sym2 := v_0.Aux 5434 base := v_0.Args[0] 5435 mem := v.Args[1] 5436 if !(ValAndOff(valoff1).canAdd(off2) && canMergeSym(sym1, sym2)) { 5437 break 5438 } 5439 v.reset(OpAMD64BTRQconstmodify) 5440 v.AuxInt = ValAndOff(valoff1).add(off2) 5441 v.Aux = mergeSym(sym1, sym2) 5442 v.AddArg(base) 5443 v.AddArg(mem) 5444 return true 5445 } 5446 return false 5447 } 5448 func rewriteValueAMD64_OpAMD64BTRQmodify_0(v *Value) bool { 5449 // match: (BTRQmodify [off1] {sym} (ADDQconst [off2] base) val mem) 5450 // cond: is32Bit(off1+off2) 5451 // result: (BTRQmodify [off1+off2] {sym} base val mem) 5452 for { 5453 off1 := v.AuxInt 5454 sym := v.Aux 5455 _ = v.Args[2] 5456 v_0 := v.Args[0] 5457 if v_0.Op != OpAMD64ADDQconst { 5458 break 5459 } 5460 off2 := v_0.AuxInt 5461 base := v_0.Args[0] 5462 val := v.Args[1] 5463 mem := v.Args[2] 5464 if !(is32Bit(off1 + off2)) { 5465 break 5466 } 5467 v.reset(OpAMD64BTRQmodify) 5468 v.AuxInt = off1 + off2 5469 v.Aux = sym 5470 v.AddArg(base) 5471 v.AddArg(val) 5472 v.AddArg(mem) 5473 return true 5474 } 5475 // match: (BTRQmodify [off1] {sym1} (LEAQ [off2] {sym2} base) val mem) 5476 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 5477 // result: (BTRQmodify [off1+off2] {mergeSym(sym1,sym2)} base val mem) 5478 for { 5479 off1 := v.AuxInt 5480 sym1 := v.Aux 5481 _ = v.Args[2] 5482 v_0 := v.Args[0] 5483 if v_0.Op != OpAMD64LEAQ { 5484 break 5485 } 5486 off2 := v_0.AuxInt 5487 sym2 := v_0.Aux 5488 base := v_0.Args[0] 5489 val := v.Args[1] 5490 mem := v.Args[2] 5491 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 5492 break 5493 } 5494 v.reset(OpAMD64BTRQmodify) 5495 v.AuxInt = off1 + off2 5496 v.Aux = mergeSym(sym1, sym2) 5497 v.AddArg(base) 5498 v.AddArg(val) 5499 v.AddArg(mem) 5500 return true 5501 } 5502 return false 5503 } 5504 func rewriteValueAMD64_OpAMD64BTSLconst_0(v *Value) bool { 5505 // match: (BTSLconst [c] (BTRLconst [c] x)) 5506 // cond: 5507 // result: (BTSLconst [c] x) 5508 for { 5509 c := v.AuxInt 5510 v_0 := v.Args[0] 5511 if v_0.Op != OpAMD64BTRLconst { 5512 break 5513 } 5514 if v_0.AuxInt != c { 5515 break 5516 } 5517 x := v_0.Args[0] 5518 v.reset(OpAMD64BTSLconst) 5519 v.AuxInt = c 5520 v.AddArg(x) 5521 return true 5522 } 5523 // match: (BTSLconst [c] (BTCLconst [c] x)) 5524 // cond: 5525 // result: (BTSLconst [c] x) 5526 for { 5527 c := v.AuxInt 5528 v_0 := v.Args[0] 5529 if v_0.Op != OpAMD64BTCLconst { 5530 break 5531 } 5532 if v_0.AuxInt != c { 5533 break 5534 } 5535 x := v_0.Args[0] 5536 v.reset(OpAMD64BTSLconst) 5537 v.AuxInt = c 5538 v.AddArg(x) 5539 return true 5540 } 5541 // match: (BTSLconst [c] (ORLconst [d] x)) 5542 // cond: 5543 // result: (ORLconst [d | 1<<uint32(c)] x) 5544 for { 5545 c := v.AuxInt 5546 v_0 := v.Args[0] 5547 if v_0.Op != OpAMD64ORLconst { 5548 break 5549 } 5550 d := v_0.AuxInt 5551 x := v_0.Args[0] 5552 v.reset(OpAMD64ORLconst) 5553 v.AuxInt = d | 1<<uint32(c) 5554 v.AddArg(x) 5555 return true 5556 } 5557 // match: (BTSLconst [c] (BTSLconst [d] x)) 5558 // cond: 5559 // result: (ORLconst [1<<uint32(d) | 1<<uint32(c)] x) 5560 for { 5561 c := v.AuxInt 5562 v_0 := v.Args[0] 5563 if v_0.Op != OpAMD64BTSLconst { 5564 break 5565 } 5566 d := v_0.AuxInt 5567 x := v_0.Args[0] 5568 v.reset(OpAMD64ORLconst) 5569 v.AuxInt = 1<<uint32(d) | 1<<uint32(c) 5570 v.AddArg(x) 5571 return true 5572 } 5573 // match: (BTSLconst [c] (MOVLconst [d])) 5574 // cond: 5575 // result: (MOVLconst [d|(1<<uint32(c))]) 5576 for { 5577 c := v.AuxInt 5578 v_0 := v.Args[0] 5579 if v_0.Op != OpAMD64MOVLconst { 5580 break 5581 } 5582 d := v_0.AuxInt 5583 v.reset(OpAMD64MOVLconst) 5584 v.AuxInt = d | (1 << uint32(c)) 5585 return true 5586 } 5587 return false 5588 } 5589 func rewriteValueAMD64_OpAMD64BTSLconstmodify_0(v *Value) bool { 5590 // match: (BTSLconstmodify [valoff1] {sym} (ADDQconst [off2] base) mem) 5591 // cond: ValAndOff(valoff1).canAdd(off2) 5592 // result: (BTSLconstmodify [ValAndOff(valoff1).add(off2)] {sym} base mem) 5593 for { 5594 valoff1 := v.AuxInt 5595 sym := v.Aux 5596 _ = v.Args[1] 5597 v_0 := v.Args[0] 5598 if v_0.Op != OpAMD64ADDQconst { 5599 break 5600 } 5601 off2 := v_0.AuxInt 5602 base := v_0.Args[0] 5603 mem := v.Args[1] 5604 if !(ValAndOff(valoff1).canAdd(off2)) { 5605 break 5606 } 5607 v.reset(OpAMD64BTSLconstmodify) 5608 v.AuxInt = ValAndOff(valoff1).add(off2) 5609 v.Aux = sym 5610 v.AddArg(base) 5611 v.AddArg(mem) 5612 return true 5613 } 5614 // match: (BTSLconstmodify [valoff1] {sym1} (LEAQ [off2] {sym2} base) mem) 5615 // cond: ValAndOff(valoff1).canAdd(off2) && canMergeSym(sym1, sym2) 5616 // result: (BTSLconstmodify [ValAndOff(valoff1).add(off2)] {mergeSym(sym1,sym2)} base mem) 5617 for { 5618 valoff1 := v.AuxInt 5619 sym1 := v.Aux 5620 _ = v.Args[1] 5621 v_0 := v.Args[0] 5622 if v_0.Op != OpAMD64LEAQ { 5623 break 5624 } 5625 off2 := v_0.AuxInt 5626 sym2 := v_0.Aux 5627 base := v_0.Args[0] 5628 mem := v.Args[1] 5629 if !(ValAndOff(valoff1).canAdd(off2) && canMergeSym(sym1, sym2)) { 5630 break 5631 } 5632 v.reset(OpAMD64BTSLconstmodify) 5633 v.AuxInt = ValAndOff(valoff1).add(off2) 5634 v.Aux = mergeSym(sym1, sym2) 5635 v.AddArg(base) 5636 v.AddArg(mem) 5637 return true 5638 } 5639 return false 5640 } 5641 func rewriteValueAMD64_OpAMD64BTSLmodify_0(v *Value) bool { 5642 // match: (BTSLmodify [off1] {sym} (ADDQconst [off2] base) val mem) 5643 // cond: is32Bit(off1+off2) 5644 // result: (BTSLmodify [off1+off2] {sym} base val mem) 5645 for { 5646 off1 := v.AuxInt 5647 sym := v.Aux 5648 _ = v.Args[2] 5649 v_0 := v.Args[0] 5650 if v_0.Op != OpAMD64ADDQconst { 5651 break 5652 } 5653 off2 := v_0.AuxInt 5654 base := v_0.Args[0] 5655 val := v.Args[1] 5656 mem := v.Args[2] 5657 if !(is32Bit(off1 + off2)) { 5658 break 5659 } 5660 v.reset(OpAMD64BTSLmodify) 5661 v.AuxInt = off1 + off2 5662 v.Aux = sym 5663 v.AddArg(base) 5664 v.AddArg(val) 5665 v.AddArg(mem) 5666 return true 5667 } 5668 // match: (BTSLmodify [off1] {sym1} (LEAQ [off2] {sym2} base) val mem) 5669 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 5670 // result: (BTSLmodify [off1+off2] {mergeSym(sym1,sym2)} base val mem) 5671 for { 5672 off1 := v.AuxInt 5673 sym1 := v.Aux 5674 _ = v.Args[2] 5675 v_0 := v.Args[0] 5676 if v_0.Op != OpAMD64LEAQ { 5677 break 5678 } 5679 off2 := v_0.AuxInt 5680 sym2 := v_0.Aux 5681 base := v_0.Args[0] 5682 val := v.Args[1] 5683 mem := v.Args[2] 5684 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 5685 break 5686 } 5687 v.reset(OpAMD64BTSLmodify) 5688 v.AuxInt = off1 + off2 5689 v.Aux = mergeSym(sym1, sym2) 5690 v.AddArg(base) 5691 v.AddArg(val) 5692 v.AddArg(mem) 5693 return true 5694 } 5695 return false 5696 } 5697 func rewriteValueAMD64_OpAMD64BTSQconst_0(v *Value) bool { 5698 // match: (BTSQconst [c] (BTRQconst [c] x)) 5699 // cond: 5700 // result: (BTSQconst [c] x) 5701 for { 5702 c := v.AuxInt 5703 v_0 := v.Args[0] 5704 if v_0.Op != OpAMD64BTRQconst { 5705 break 5706 } 5707 if v_0.AuxInt != c { 5708 break 5709 } 5710 x := v_0.Args[0] 5711 v.reset(OpAMD64BTSQconst) 5712 v.AuxInt = c 5713 v.AddArg(x) 5714 return true 5715 } 5716 // match: (BTSQconst [c] (BTCQconst [c] x)) 5717 // cond: 5718 // result: (BTSQconst [c] x) 5719 for { 5720 c := v.AuxInt 5721 v_0 := v.Args[0] 5722 if v_0.Op != OpAMD64BTCQconst { 5723 break 5724 } 5725 if v_0.AuxInt != c { 5726 break 5727 } 5728 x := v_0.Args[0] 5729 v.reset(OpAMD64BTSQconst) 5730 v.AuxInt = c 5731 v.AddArg(x) 5732 return true 5733 } 5734 // match: (BTSQconst [c] (ORQconst [d] x)) 5735 // cond: 5736 // result: (ORQconst [d | 1<<uint32(c)] x) 5737 for { 5738 c := v.AuxInt 5739 v_0 := v.Args[0] 5740 if v_0.Op != OpAMD64ORQconst { 5741 break 5742 } 5743 d := v_0.AuxInt 5744 x := v_0.Args[0] 5745 v.reset(OpAMD64ORQconst) 5746 v.AuxInt = d | 1<<uint32(c) 5747 v.AddArg(x) 5748 return true 5749 } 5750 // match: (BTSQconst [c] (BTSQconst [d] x)) 5751 // cond: 5752 // result: (ORQconst [1<<uint32(d) | 1<<uint32(c)] x) 5753 for { 5754 c := v.AuxInt 5755 v_0 := v.Args[0] 5756 if v_0.Op != OpAMD64BTSQconst { 5757 break 5758 } 5759 d := v_0.AuxInt 5760 x := v_0.Args[0] 5761 v.reset(OpAMD64ORQconst) 5762 v.AuxInt = 1<<uint32(d) | 1<<uint32(c) 5763 v.AddArg(x) 5764 return true 5765 } 5766 // match: (BTSQconst [c] (MOVQconst [d])) 5767 // cond: 5768 // result: (MOVQconst [d|(1<<uint32(c))]) 5769 for { 5770 c := v.AuxInt 5771 v_0 := v.Args[0] 5772 if v_0.Op != OpAMD64MOVQconst { 5773 break 5774 } 5775 d := v_0.AuxInt 5776 v.reset(OpAMD64MOVQconst) 5777 v.AuxInt = d | (1 << uint32(c)) 5778 return true 5779 } 5780 return false 5781 } 5782 func rewriteValueAMD64_OpAMD64BTSQconstmodify_0(v *Value) bool { 5783 // match: (BTSQconstmodify [valoff1] {sym} (ADDQconst [off2] base) mem) 5784 // cond: ValAndOff(valoff1).canAdd(off2) 5785 // result: (BTSQconstmodify [ValAndOff(valoff1).add(off2)] {sym} base mem) 5786 for { 5787 valoff1 := v.AuxInt 5788 sym := v.Aux 5789 _ = v.Args[1] 5790 v_0 := v.Args[0] 5791 if v_0.Op != OpAMD64ADDQconst { 5792 break 5793 } 5794 off2 := v_0.AuxInt 5795 base := v_0.Args[0] 5796 mem := v.Args[1] 5797 if !(ValAndOff(valoff1).canAdd(off2)) { 5798 break 5799 } 5800 v.reset(OpAMD64BTSQconstmodify) 5801 v.AuxInt = ValAndOff(valoff1).add(off2) 5802 v.Aux = sym 5803 v.AddArg(base) 5804 v.AddArg(mem) 5805 return true 5806 } 5807 // match: (BTSQconstmodify [valoff1] {sym1} (LEAQ [off2] {sym2} base) mem) 5808 // cond: ValAndOff(valoff1).canAdd(off2) && canMergeSym(sym1, sym2) 5809 // result: (BTSQconstmodify [ValAndOff(valoff1).add(off2)] {mergeSym(sym1,sym2)} base mem) 5810 for { 5811 valoff1 := v.AuxInt 5812 sym1 := v.Aux 5813 _ = v.Args[1] 5814 v_0 := v.Args[0] 5815 if v_0.Op != OpAMD64LEAQ { 5816 break 5817 } 5818 off2 := v_0.AuxInt 5819 sym2 := v_0.Aux 5820 base := v_0.Args[0] 5821 mem := v.Args[1] 5822 if !(ValAndOff(valoff1).canAdd(off2) && canMergeSym(sym1, sym2)) { 5823 break 5824 } 5825 v.reset(OpAMD64BTSQconstmodify) 5826 v.AuxInt = ValAndOff(valoff1).add(off2) 5827 v.Aux = mergeSym(sym1, sym2) 5828 v.AddArg(base) 5829 v.AddArg(mem) 5830 return true 5831 } 5832 return false 5833 } 5834 func rewriteValueAMD64_OpAMD64BTSQmodify_0(v *Value) bool { 5835 // match: (BTSQmodify [off1] {sym} (ADDQconst [off2] base) val mem) 5836 // cond: is32Bit(off1+off2) 5837 // result: (BTSQmodify [off1+off2] {sym} base val mem) 5838 for { 5839 off1 := v.AuxInt 5840 sym := v.Aux 5841 _ = v.Args[2] 5842 v_0 := v.Args[0] 5843 if v_0.Op != OpAMD64ADDQconst { 5844 break 5845 } 5846 off2 := v_0.AuxInt 5847 base := v_0.Args[0] 5848 val := v.Args[1] 5849 mem := v.Args[2] 5850 if !(is32Bit(off1 + off2)) { 5851 break 5852 } 5853 v.reset(OpAMD64BTSQmodify) 5854 v.AuxInt = off1 + off2 5855 v.Aux = sym 5856 v.AddArg(base) 5857 v.AddArg(val) 5858 v.AddArg(mem) 5859 return true 5860 } 5861 // match: (BTSQmodify [off1] {sym1} (LEAQ [off2] {sym2} base) val mem) 5862 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 5863 // result: (BTSQmodify [off1+off2] {mergeSym(sym1,sym2)} base val mem) 5864 for { 5865 off1 := v.AuxInt 5866 sym1 := v.Aux 5867 _ = v.Args[2] 5868 v_0 := v.Args[0] 5869 if v_0.Op != OpAMD64LEAQ { 5870 break 5871 } 5872 off2 := v_0.AuxInt 5873 sym2 := v_0.Aux 5874 base := v_0.Args[0] 5875 val := v.Args[1] 5876 mem := v.Args[2] 5877 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 5878 break 5879 } 5880 v.reset(OpAMD64BTSQmodify) 5881 v.AuxInt = off1 + off2 5882 v.Aux = mergeSym(sym1, sym2) 5883 v.AddArg(base) 5884 v.AddArg(val) 5885 v.AddArg(mem) 5886 return true 5887 } 5888 return false 5889 } 5890 func rewriteValueAMD64_OpAMD64CMOVLCC_0(v *Value) bool { 5891 // match: (CMOVLCC x y (InvertFlags cond)) 5892 // cond: 5893 // result: (CMOVLLS x y cond) 5894 for { 5895 _ = v.Args[2] 5896 x := v.Args[0] 5897 y := v.Args[1] 5898 v_2 := v.Args[2] 5899 if v_2.Op != OpAMD64InvertFlags { 5900 break 5901 } 5902 cond := v_2.Args[0] 5903 v.reset(OpAMD64CMOVLLS) 5904 v.AddArg(x) 5905 v.AddArg(y) 5906 v.AddArg(cond) 5907 return true 5908 } 5909 // match: (CMOVLCC _ x (FlagEQ)) 5910 // cond: 5911 // result: x 5912 for { 5913 _ = v.Args[2] 5914 x := v.Args[1] 5915 v_2 := v.Args[2] 5916 if v_2.Op != OpAMD64FlagEQ { 5917 break 5918 } 5919 v.reset(OpCopy) 5920 v.Type = x.Type 5921 v.AddArg(x) 5922 return true 5923 } 5924 // match: (CMOVLCC _ x (FlagGT_UGT)) 5925 // cond: 5926 // result: x 5927 for { 5928 _ = v.Args[2] 5929 x := v.Args[1] 5930 v_2 := v.Args[2] 5931 if v_2.Op != OpAMD64FlagGT_UGT { 5932 break 5933 } 5934 v.reset(OpCopy) 5935 v.Type = x.Type 5936 v.AddArg(x) 5937 return true 5938 } 5939 // match: (CMOVLCC y _ (FlagGT_ULT)) 5940 // cond: 5941 // result: y 5942 for { 5943 _ = v.Args[2] 5944 y := v.Args[0] 5945 v_2 := v.Args[2] 5946 if v_2.Op != OpAMD64FlagGT_ULT { 5947 break 5948 } 5949 v.reset(OpCopy) 5950 v.Type = y.Type 5951 v.AddArg(y) 5952 return true 5953 } 5954 // match: (CMOVLCC y _ (FlagLT_ULT)) 5955 // cond: 5956 // result: y 5957 for { 5958 _ = v.Args[2] 5959 y := v.Args[0] 5960 v_2 := v.Args[2] 5961 if v_2.Op != OpAMD64FlagLT_ULT { 5962 break 5963 } 5964 v.reset(OpCopy) 5965 v.Type = y.Type 5966 v.AddArg(y) 5967 return true 5968 } 5969 // match: (CMOVLCC _ x (FlagLT_UGT)) 5970 // cond: 5971 // result: x 5972 for { 5973 _ = v.Args[2] 5974 x := v.Args[1] 5975 v_2 := v.Args[2] 5976 if v_2.Op != OpAMD64FlagLT_UGT { 5977 break 5978 } 5979 v.reset(OpCopy) 5980 v.Type = x.Type 5981 v.AddArg(x) 5982 return true 5983 } 5984 return false 5985 } 5986 func rewriteValueAMD64_OpAMD64CMOVLCS_0(v *Value) bool { 5987 // match: (CMOVLCS x y (InvertFlags cond)) 5988 // cond: 5989 // result: (CMOVLHI x y cond) 5990 for { 5991 _ = v.Args[2] 5992 x := v.Args[0] 5993 y := v.Args[1] 5994 v_2 := v.Args[2] 5995 if v_2.Op != OpAMD64InvertFlags { 5996 break 5997 } 5998 cond := v_2.Args[0] 5999 v.reset(OpAMD64CMOVLHI) 6000 v.AddArg(x) 6001 v.AddArg(y) 6002 v.AddArg(cond) 6003 return true 6004 } 6005 // match: (CMOVLCS y _ (FlagEQ)) 6006 // cond: 6007 // result: y 6008 for { 6009 _ = v.Args[2] 6010 y := v.Args[0] 6011 v_2 := v.Args[2] 6012 if v_2.Op != OpAMD64FlagEQ { 6013 break 6014 } 6015 v.reset(OpCopy) 6016 v.Type = y.Type 6017 v.AddArg(y) 6018 return true 6019 } 6020 // match: (CMOVLCS y _ (FlagGT_UGT)) 6021 // cond: 6022 // result: y 6023 for { 6024 _ = v.Args[2] 6025 y := v.Args[0] 6026 v_2 := v.Args[2] 6027 if v_2.Op != OpAMD64FlagGT_UGT { 6028 break 6029 } 6030 v.reset(OpCopy) 6031 v.Type = y.Type 6032 v.AddArg(y) 6033 return true 6034 } 6035 // match: (CMOVLCS _ x (FlagGT_ULT)) 6036 // cond: 6037 // result: x 6038 for { 6039 _ = v.Args[2] 6040 x := v.Args[1] 6041 v_2 := v.Args[2] 6042 if v_2.Op != OpAMD64FlagGT_ULT { 6043 break 6044 } 6045 v.reset(OpCopy) 6046 v.Type = x.Type 6047 v.AddArg(x) 6048 return true 6049 } 6050 // match: (CMOVLCS _ x (FlagLT_ULT)) 6051 // cond: 6052 // result: x 6053 for { 6054 _ = v.Args[2] 6055 x := v.Args[1] 6056 v_2 := v.Args[2] 6057 if v_2.Op != OpAMD64FlagLT_ULT { 6058 break 6059 } 6060 v.reset(OpCopy) 6061 v.Type = x.Type 6062 v.AddArg(x) 6063 return true 6064 } 6065 // match: (CMOVLCS y _ (FlagLT_UGT)) 6066 // cond: 6067 // result: y 6068 for { 6069 _ = v.Args[2] 6070 y := v.Args[0] 6071 v_2 := v.Args[2] 6072 if v_2.Op != OpAMD64FlagLT_UGT { 6073 break 6074 } 6075 v.reset(OpCopy) 6076 v.Type = y.Type 6077 v.AddArg(y) 6078 return true 6079 } 6080 return false 6081 } 6082 func rewriteValueAMD64_OpAMD64CMOVLEQ_0(v *Value) bool { 6083 // match: (CMOVLEQ x y (InvertFlags cond)) 6084 // cond: 6085 // result: (CMOVLEQ x y cond) 6086 for { 6087 _ = v.Args[2] 6088 x := v.Args[0] 6089 y := v.Args[1] 6090 v_2 := v.Args[2] 6091 if v_2.Op != OpAMD64InvertFlags { 6092 break 6093 } 6094 cond := v_2.Args[0] 6095 v.reset(OpAMD64CMOVLEQ) 6096 v.AddArg(x) 6097 v.AddArg(y) 6098 v.AddArg(cond) 6099 return true 6100 } 6101 // match: (CMOVLEQ _ x (FlagEQ)) 6102 // cond: 6103 // result: x 6104 for { 6105 _ = v.Args[2] 6106 x := v.Args[1] 6107 v_2 := v.Args[2] 6108 if v_2.Op != OpAMD64FlagEQ { 6109 break 6110 } 6111 v.reset(OpCopy) 6112 v.Type = x.Type 6113 v.AddArg(x) 6114 return true 6115 } 6116 // match: (CMOVLEQ y _ (FlagGT_UGT)) 6117 // cond: 6118 // result: y 6119 for { 6120 _ = v.Args[2] 6121 y := v.Args[0] 6122 v_2 := v.Args[2] 6123 if v_2.Op != OpAMD64FlagGT_UGT { 6124 break 6125 } 6126 v.reset(OpCopy) 6127 v.Type = y.Type 6128 v.AddArg(y) 6129 return true 6130 } 6131 // match: (CMOVLEQ y _ (FlagGT_ULT)) 6132 // cond: 6133 // result: y 6134 for { 6135 _ = v.Args[2] 6136 y := v.Args[0] 6137 v_2 := v.Args[2] 6138 if v_2.Op != OpAMD64FlagGT_ULT { 6139 break 6140 } 6141 v.reset(OpCopy) 6142 v.Type = y.Type 6143 v.AddArg(y) 6144 return true 6145 } 6146 // match: (CMOVLEQ y _ (FlagLT_ULT)) 6147 // cond: 6148 // result: y 6149 for { 6150 _ = v.Args[2] 6151 y := v.Args[0] 6152 v_2 := v.Args[2] 6153 if v_2.Op != OpAMD64FlagLT_ULT { 6154 break 6155 } 6156 v.reset(OpCopy) 6157 v.Type = y.Type 6158 v.AddArg(y) 6159 return true 6160 } 6161 // match: (CMOVLEQ y _ (FlagLT_UGT)) 6162 // cond: 6163 // result: y 6164 for { 6165 _ = v.Args[2] 6166 y := v.Args[0] 6167 v_2 := v.Args[2] 6168 if v_2.Op != OpAMD64FlagLT_UGT { 6169 break 6170 } 6171 v.reset(OpCopy) 6172 v.Type = y.Type 6173 v.AddArg(y) 6174 return true 6175 } 6176 return false 6177 } 6178 func rewriteValueAMD64_OpAMD64CMOVLGE_0(v *Value) bool { 6179 // match: (CMOVLGE x y (InvertFlags cond)) 6180 // cond: 6181 // result: (CMOVLLE x y cond) 6182 for { 6183 _ = v.Args[2] 6184 x := v.Args[0] 6185 y := v.Args[1] 6186 v_2 := v.Args[2] 6187 if v_2.Op != OpAMD64InvertFlags { 6188 break 6189 } 6190 cond := v_2.Args[0] 6191 v.reset(OpAMD64CMOVLLE) 6192 v.AddArg(x) 6193 v.AddArg(y) 6194 v.AddArg(cond) 6195 return true 6196 } 6197 // match: (CMOVLGE _ x (FlagEQ)) 6198 // cond: 6199 // result: x 6200 for { 6201 _ = v.Args[2] 6202 x := v.Args[1] 6203 v_2 := v.Args[2] 6204 if v_2.Op != OpAMD64FlagEQ { 6205 break 6206 } 6207 v.reset(OpCopy) 6208 v.Type = x.Type 6209 v.AddArg(x) 6210 return true 6211 } 6212 // match: (CMOVLGE _ x (FlagGT_UGT)) 6213 // cond: 6214 // result: x 6215 for { 6216 _ = v.Args[2] 6217 x := v.Args[1] 6218 v_2 := v.Args[2] 6219 if v_2.Op != OpAMD64FlagGT_UGT { 6220 break 6221 } 6222 v.reset(OpCopy) 6223 v.Type = x.Type 6224 v.AddArg(x) 6225 return true 6226 } 6227 // match: (CMOVLGE _ x (FlagGT_ULT)) 6228 // cond: 6229 // result: x 6230 for { 6231 _ = v.Args[2] 6232 x := v.Args[1] 6233 v_2 := v.Args[2] 6234 if v_2.Op != OpAMD64FlagGT_ULT { 6235 break 6236 } 6237 v.reset(OpCopy) 6238 v.Type = x.Type 6239 v.AddArg(x) 6240 return true 6241 } 6242 // match: (CMOVLGE y _ (FlagLT_ULT)) 6243 // cond: 6244 // result: y 6245 for { 6246 _ = v.Args[2] 6247 y := v.Args[0] 6248 v_2 := v.Args[2] 6249 if v_2.Op != OpAMD64FlagLT_ULT { 6250 break 6251 } 6252 v.reset(OpCopy) 6253 v.Type = y.Type 6254 v.AddArg(y) 6255 return true 6256 } 6257 // match: (CMOVLGE y _ (FlagLT_UGT)) 6258 // cond: 6259 // result: y 6260 for { 6261 _ = v.Args[2] 6262 y := v.Args[0] 6263 v_2 := v.Args[2] 6264 if v_2.Op != OpAMD64FlagLT_UGT { 6265 break 6266 } 6267 v.reset(OpCopy) 6268 v.Type = y.Type 6269 v.AddArg(y) 6270 return true 6271 } 6272 return false 6273 } 6274 func rewriteValueAMD64_OpAMD64CMOVLGT_0(v *Value) bool { 6275 // match: (CMOVLGT x y (InvertFlags cond)) 6276 // cond: 6277 // result: (CMOVLLT x y cond) 6278 for { 6279 _ = v.Args[2] 6280 x := v.Args[0] 6281 y := v.Args[1] 6282 v_2 := v.Args[2] 6283 if v_2.Op != OpAMD64InvertFlags { 6284 break 6285 } 6286 cond := v_2.Args[0] 6287 v.reset(OpAMD64CMOVLLT) 6288 v.AddArg(x) 6289 v.AddArg(y) 6290 v.AddArg(cond) 6291 return true 6292 } 6293 // match: (CMOVLGT y _ (FlagEQ)) 6294 // cond: 6295 // result: y 6296 for { 6297 _ = v.Args[2] 6298 y := v.Args[0] 6299 v_2 := v.Args[2] 6300 if v_2.Op != OpAMD64FlagEQ { 6301 break 6302 } 6303 v.reset(OpCopy) 6304 v.Type = y.Type 6305 v.AddArg(y) 6306 return true 6307 } 6308 // match: (CMOVLGT _ x (FlagGT_UGT)) 6309 // cond: 6310 // result: x 6311 for { 6312 _ = v.Args[2] 6313 x := v.Args[1] 6314 v_2 := v.Args[2] 6315 if v_2.Op != OpAMD64FlagGT_UGT { 6316 break 6317 } 6318 v.reset(OpCopy) 6319 v.Type = x.Type 6320 v.AddArg(x) 6321 return true 6322 } 6323 // match: (CMOVLGT _ x (FlagGT_ULT)) 6324 // cond: 6325 // result: x 6326 for { 6327 _ = v.Args[2] 6328 x := v.Args[1] 6329 v_2 := v.Args[2] 6330 if v_2.Op != OpAMD64FlagGT_ULT { 6331 break 6332 } 6333 v.reset(OpCopy) 6334 v.Type = x.Type 6335 v.AddArg(x) 6336 return true 6337 } 6338 // match: (CMOVLGT y _ (FlagLT_ULT)) 6339 // cond: 6340 // result: y 6341 for { 6342 _ = v.Args[2] 6343 y := v.Args[0] 6344 v_2 := v.Args[2] 6345 if v_2.Op != OpAMD64FlagLT_ULT { 6346 break 6347 } 6348 v.reset(OpCopy) 6349 v.Type = y.Type 6350 v.AddArg(y) 6351 return true 6352 } 6353 // match: (CMOVLGT y _ (FlagLT_UGT)) 6354 // cond: 6355 // result: y 6356 for { 6357 _ = v.Args[2] 6358 y := v.Args[0] 6359 v_2 := v.Args[2] 6360 if v_2.Op != OpAMD64FlagLT_UGT { 6361 break 6362 } 6363 v.reset(OpCopy) 6364 v.Type = y.Type 6365 v.AddArg(y) 6366 return true 6367 } 6368 return false 6369 } 6370 func rewriteValueAMD64_OpAMD64CMOVLHI_0(v *Value) bool { 6371 // match: (CMOVLHI x y (InvertFlags cond)) 6372 // cond: 6373 // result: (CMOVLCS x y cond) 6374 for { 6375 _ = v.Args[2] 6376 x := v.Args[0] 6377 y := v.Args[1] 6378 v_2 := v.Args[2] 6379 if v_2.Op != OpAMD64InvertFlags { 6380 break 6381 } 6382 cond := v_2.Args[0] 6383 v.reset(OpAMD64CMOVLCS) 6384 v.AddArg(x) 6385 v.AddArg(y) 6386 v.AddArg(cond) 6387 return true 6388 } 6389 // match: (CMOVLHI y _ (FlagEQ)) 6390 // cond: 6391 // result: y 6392 for { 6393 _ = v.Args[2] 6394 y := v.Args[0] 6395 v_2 := v.Args[2] 6396 if v_2.Op != OpAMD64FlagEQ { 6397 break 6398 } 6399 v.reset(OpCopy) 6400 v.Type = y.Type 6401 v.AddArg(y) 6402 return true 6403 } 6404 // match: (CMOVLHI _ x (FlagGT_UGT)) 6405 // cond: 6406 // result: x 6407 for { 6408 _ = v.Args[2] 6409 x := v.Args[1] 6410 v_2 := v.Args[2] 6411 if v_2.Op != OpAMD64FlagGT_UGT { 6412 break 6413 } 6414 v.reset(OpCopy) 6415 v.Type = x.Type 6416 v.AddArg(x) 6417 return true 6418 } 6419 // match: (CMOVLHI y _ (FlagGT_ULT)) 6420 // cond: 6421 // result: y 6422 for { 6423 _ = v.Args[2] 6424 y := v.Args[0] 6425 v_2 := v.Args[2] 6426 if v_2.Op != OpAMD64FlagGT_ULT { 6427 break 6428 } 6429 v.reset(OpCopy) 6430 v.Type = y.Type 6431 v.AddArg(y) 6432 return true 6433 } 6434 // match: (CMOVLHI y _ (FlagLT_ULT)) 6435 // cond: 6436 // result: y 6437 for { 6438 _ = v.Args[2] 6439 y := v.Args[0] 6440 v_2 := v.Args[2] 6441 if v_2.Op != OpAMD64FlagLT_ULT { 6442 break 6443 } 6444 v.reset(OpCopy) 6445 v.Type = y.Type 6446 v.AddArg(y) 6447 return true 6448 } 6449 // match: (CMOVLHI _ x (FlagLT_UGT)) 6450 // cond: 6451 // result: x 6452 for { 6453 _ = v.Args[2] 6454 x := v.Args[1] 6455 v_2 := v.Args[2] 6456 if v_2.Op != OpAMD64FlagLT_UGT { 6457 break 6458 } 6459 v.reset(OpCopy) 6460 v.Type = x.Type 6461 v.AddArg(x) 6462 return true 6463 } 6464 return false 6465 } 6466 func rewriteValueAMD64_OpAMD64CMOVLLE_0(v *Value) bool { 6467 // match: (CMOVLLE x y (InvertFlags cond)) 6468 // cond: 6469 // result: (CMOVLGE x y cond) 6470 for { 6471 _ = v.Args[2] 6472 x := v.Args[0] 6473 y := v.Args[1] 6474 v_2 := v.Args[2] 6475 if v_2.Op != OpAMD64InvertFlags { 6476 break 6477 } 6478 cond := v_2.Args[0] 6479 v.reset(OpAMD64CMOVLGE) 6480 v.AddArg(x) 6481 v.AddArg(y) 6482 v.AddArg(cond) 6483 return true 6484 } 6485 // match: (CMOVLLE _ x (FlagEQ)) 6486 // cond: 6487 // result: x 6488 for { 6489 _ = v.Args[2] 6490 x := v.Args[1] 6491 v_2 := v.Args[2] 6492 if v_2.Op != OpAMD64FlagEQ { 6493 break 6494 } 6495 v.reset(OpCopy) 6496 v.Type = x.Type 6497 v.AddArg(x) 6498 return true 6499 } 6500 // match: (CMOVLLE y _ (FlagGT_UGT)) 6501 // cond: 6502 // result: y 6503 for { 6504 _ = v.Args[2] 6505 y := v.Args[0] 6506 v_2 := v.Args[2] 6507 if v_2.Op != OpAMD64FlagGT_UGT { 6508 break 6509 } 6510 v.reset(OpCopy) 6511 v.Type = y.Type 6512 v.AddArg(y) 6513 return true 6514 } 6515 // match: (CMOVLLE y _ (FlagGT_ULT)) 6516 // cond: 6517 // result: y 6518 for { 6519 _ = v.Args[2] 6520 y := v.Args[0] 6521 v_2 := v.Args[2] 6522 if v_2.Op != OpAMD64FlagGT_ULT { 6523 break 6524 } 6525 v.reset(OpCopy) 6526 v.Type = y.Type 6527 v.AddArg(y) 6528 return true 6529 } 6530 // match: (CMOVLLE _ x (FlagLT_ULT)) 6531 // cond: 6532 // result: x 6533 for { 6534 _ = v.Args[2] 6535 x := v.Args[1] 6536 v_2 := v.Args[2] 6537 if v_2.Op != OpAMD64FlagLT_ULT { 6538 break 6539 } 6540 v.reset(OpCopy) 6541 v.Type = x.Type 6542 v.AddArg(x) 6543 return true 6544 } 6545 // match: (CMOVLLE _ x (FlagLT_UGT)) 6546 // cond: 6547 // result: x 6548 for { 6549 _ = v.Args[2] 6550 x := v.Args[1] 6551 v_2 := v.Args[2] 6552 if v_2.Op != OpAMD64FlagLT_UGT { 6553 break 6554 } 6555 v.reset(OpCopy) 6556 v.Type = x.Type 6557 v.AddArg(x) 6558 return true 6559 } 6560 return false 6561 } 6562 func rewriteValueAMD64_OpAMD64CMOVLLS_0(v *Value) bool { 6563 // match: (CMOVLLS x y (InvertFlags cond)) 6564 // cond: 6565 // result: (CMOVLCC x y cond) 6566 for { 6567 _ = v.Args[2] 6568 x := v.Args[0] 6569 y := v.Args[1] 6570 v_2 := v.Args[2] 6571 if v_2.Op != OpAMD64InvertFlags { 6572 break 6573 } 6574 cond := v_2.Args[0] 6575 v.reset(OpAMD64CMOVLCC) 6576 v.AddArg(x) 6577 v.AddArg(y) 6578 v.AddArg(cond) 6579 return true 6580 } 6581 // match: (CMOVLLS _ x (FlagEQ)) 6582 // cond: 6583 // result: x 6584 for { 6585 _ = v.Args[2] 6586 x := v.Args[1] 6587 v_2 := v.Args[2] 6588 if v_2.Op != OpAMD64FlagEQ { 6589 break 6590 } 6591 v.reset(OpCopy) 6592 v.Type = x.Type 6593 v.AddArg(x) 6594 return true 6595 } 6596 // match: (CMOVLLS y _ (FlagGT_UGT)) 6597 // cond: 6598 // result: y 6599 for { 6600 _ = v.Args[2] 6601 y := v.Args[0] 6602 v_2 := v.Args[2] 6603 if v_2.Op != OpAMD64FlagGT_UGT { 6604 break 6605 } 6606 v.reset(OpCopy) 6607 v.Type = y.Type 6608 v.AddArg(y) 6609 return true 6610 } 6611 // match: (CMOVLLS _ x (FlagGT_ULT)) 6612 // cond: 6613 // result: x 6614 for { 6615 _ = v.Args[2] 6616 x := v.Args[1] 6617 v_2 := v.Args[2] 6618 if v_2.Op != OpAMD64FlagGT_ULT { 6619 break 6620 } 6621 v.reset(OpCopy) 6622 v.Type = x.Type 6623 v.AddArg(x) 6624 return true 6625 } 6626 // match: (CMOVLLS _ x (FlagLT_ULT)) 6627 // cond: 6628 // result: x 6629 for { 6630 _ = v.Args[2] 6631 x := v.Args[1] 6632 v_2 := v.Args[2] 6633 if v_2.Op != OpAMD64FlagLT_ULT { 6634 break 6635 } 6636 v.reset(OpCopy) 6637 v.Type = x.Type 6638 v.AddArg(x) 6639 return true 6640 } 6641 // match: (CMOVLLS y _ (FlagLT_UGT)) 6642 // cond: 6643 // result: y 6644 for { 6645 _ = v.Args[2] 6646 y := v.Args[0] 6647 v_2 := v.Args[2] 6648 if v_2.Op != OpAMD64FlagLT_UGT { 6649 break 6650 } 6651 v.reset(OpCopy) 6652 v.Type = y.Type 6653 v.AddArg(y) 6654 return true 6655 } 6656 return false 6657 } 6658 func rewriteValueAMD64_OpAMD64CMOVLLT_0(v *Value) bool { 6659 // match: (CMOVLLT x y (InvertFlags cond)) 6660 // cond: 6661 // result: (CMOVLGT x y cond) 6662 for { 6663 _ = v.Args[2] 6664 x := v.Args[0] 6665 y := v.Args[1] 6666 v_2 := v.Args[2] 6667 if v_2.Op != OpAMD64InvertFlags { 6668 break 6669 } 6670 cond := v_2.Args[0] 6671 v.reset(OpAMD64CMOVLGT) 6672 v.AddArg(x) 6673 v.AddArg(y) 6674 v.AddArg(cond) 6675 return true 6676 } 6677 // match: (CMOVLLT y _ (FlagEQ)) 6678 // cond: 6679 // result: y 6680 for { 6681 _ = v.Args[2] 6682 y := v.Args[0] 6683 v_2 := v.Args[2] 6684 if v_2.Op != OpAMD64FlagEQ { 6685 break 6686 } 6687 v.reset(OpCopy) 6688 v.Type = y.Type 6689 v.AddArg(y) 6690 return true 6691 } 6692 // match: (CMOVLLT y _ (FlagGT_UGT)) 6693 // cond: 6694 // result: y 6695 for { 6696 _ = v.Args[2] 6697 y := v.Args[0] 6698 v_2 := v.Args[2] 6699 if v_2.Op != OpAMD64FlagGT_UGT { 6700 break 6701 } 6702 v.reset(OpCopy) 6703 v.Type = y.Type 6704 v.AddArg(y) 6705 return true 6706 } 6707 // match: (CMOVLLT y _ (FlagGT_ULT)) 6708 // cond: 6709 // result: y 6710 for { 6711 _ = v.Args[2] 6712 y := v.Args[0] 6713 v_2 := v.Args[2] 6714 if v_2.Op != OpAMD64FlagGT_ULT { 6715 break 6716 } 6717 v.reset(OpCopy) 6718 v.Type = y.Type 6719 v.AddArg(y) 6720 return true 6721 } 6722 // match: (CMOVLLT _ x (FlagLT_ULT)) 6723 // cond: 6724 // result: x 6725 for { 6726 _ = v.Args[2] 6727 x := v.Args[1] 6728 v_2 := v.Args[2] 6729 if v_2.Op != OpAMD64FlagLT_ULT { 6730 break 6731 } 6732 v.reset(OpCopy) 6733 v.Type = x.Type 6734 v.AddArg(x) 6735 return true 6736 } 6737 // match: (CMOVLLT _ x (FlagLT_UGT)) 6738 // cond: 6739 // result: x 6740 for { 6741 _ = v.Args[2] 6742 x := v.Args[1] 6743 v_2 := v.Args[2] 6744 if v_2.Op != OpAMD64FlagLT_UGT { 6745 break 6746 } 6747 v.reset(OpCopy) 6748 v.Type = x.Type 6749 v.AddArg(x) 6750 return true 6751 } 6752 return false 6753 } 6754 func rewriteValueAMD64_OpAMD64CMOVLNE_0(v *Value) bool { 6755 // match: (CMOVLNE x y (InvertFlags cond)) 6756 // cond: 6757 // result: (CMOVLNE x y cond) 6758 for { 6759 _ = v.Args[2] 6760 x := v.Args[0] 6761 y := v.Args[1] 6762 v_2 := v.Args[2] 6763 if v_2.Op != OpAMD64InvertFlags { 6764 break 6765 } 6766 cond := v_2.Args[0] 6767 v.reset(OpAMD64CMOVLNE) 6768 v.AddArg(x) 6769 v.AddArg(y) 6770 v.AddArg(cond) 6771 return true 6772 } 6773 // match: (CMOVLNE y _ (FlagEQ)) 6774 // cond: 6775 // result: y 6776 for { 6777 _ = v.Args[2] 6778 y := v.Args[0] 6779 v_2 := v.Args[2] 6780 if v_2.Op != OpAMD64FlagEQ { 6781 break 6782 } 6783 v.reset(OpCopy) 6784 v.Type = y.Type 6785 v.AddArg(y) 6786 return true 6787 } 6788 // match: (CMOVLNE _ x (FlagGT_UGT)) 6789 // cond: 6790 // result: x 6791 for { 6792 _ = v.Args[2] 6793 x := v.Args[1] 6794 v_2 := v.Args[2] 6795 if v_2.Op != OpAMD64FlagGT_UGT { 6796 break 6797 } 6798 v.reset(OpCopy) 6799 v.Type = x.Type 6800 v.AddArg(x) 6801 return true 6802 } 6803 // match: (CMOVLNE _ x (FlagGT_ULT)) 6804 // cond: 6805 // result: x 6806 for { 6807 _ = v.Args[2] 6808 x := v.Args[1] 6809 v_2 := v.Args[2] 6810 if v_2.Op != OpAMD64FlagGT_ULT { 6811 break 6812 } 6813 v.reset(OpCopy) 6814 v.Type = x.Type 6815 v.AddArg(x) 6816 return true 6817 } 6818 // match: (CMOVLNE _ x (FlagLT_ULT)) 6819 // cond: 6820 // result: x 6821 for { 6822 _ = v.Args[2] 6823 x := v.Args[1] 6824 v_2 := v.Args[2] 6825 if v_2.Op != OpAMD64FlagLT_ULT { 6826 break 6827 } 6828 v.reset(OpCopy) 6829 v.Type = x.Type 6830 v.AddArg(x) 6831 return true 6832 } 6833 // match: (CMOVLNE _ x (FlagLT_UGT)) 6834 // cond: 6835 // result: x 6836 for { 6837 _ = v.Args[2] 6838 x := v.Args[1] 6839 v_2 := v.Args[2] 6840 if v_2.Op != OpAMD64FlagLT_UGT { 6841 break 6842 } 6843 v.reset(OpCopy) 6844 v.Type = x.Type 6845 v.AddArg(x) 6846 return true 6847 } 6848 return false 6849 } 6850 func rewriteValueAMD64_OpAMD64CMOVQCC_0(v *Value) bool { 6851 // match: (CMOVQCC x y (InvertFlags cond)) 6852 // cond: 6853 // result: (CMOVQLS x y cond) 6854 for { 6855 _ = v.Args[2] 6856 x := v.Args[0] 6857 y := v.Args[1] 6858 v_2 := v.Args[2] 6859 if v_2.Op != OpAMD64InvertFlags { 6860 break 6861 } 6862 cond := v_2.Args[0] 6863 v.reset(OpAMD64CMOVQLS) 6864 v.AddArg(x) 6865 v.AddArg(y) 6866 v.AddArg(cond) 6867 return true 6868 } 6869 // match: (CMOVQCC _ x (FlagEQ)) 6870 // cond: 6871 // result: x 6872 for { 6873 _ = v.Args[2] 6874 x := v.Args[1] 6875 v_2 := v.Args[2] 6876 if v_2.Op != OpAMD64FlagEQ { 6877 break 6878 } 6879 v.reset(OpCopy) 6880 v.Type = x.Type 6881 v.AddArg(x) 6882 return true 6883 } 6884 // match: (CMOVQCC _ x (FlagGT_UGT)) 6885 // cond: 6886 // result: x 6887 for { 6888 _ = v.Args[2] 6889 x := v.Args[1] 6890 v_2 := v.Args[2] 6891 if v_2.Op != OpAMD64FlagGT_UGT { 6892 break 6893 } 6894 v.reset(OpCopy) 6895 v.Type = x.Type 6896 v.AddArg(x) 6897 return true 6898 } 6899 // match: (CMOVQCC y _ (FlagGT_ULT)) 6900 // cond: 6901 // result: y 6902 for { 6903 _ = v.Args[2] 6904 y := v.Args[0] 6905 v_2 := v.Args[2] 6906 if v_2.Op != OpAMD64FlagGT_ULT { 6907 break 6908 } 6909 v.reset(OpCopy) 6910 v.Type = y.Type 6911 v.AddArg(y) 6912 return true 6913 } 6914 // match: (CMOVQCC y _ (FlagLT_ULT)) 6915 // cond: 6916 // result: y 6917 for { 6918 _ = v.Args[2] 6919 y := v.Args[0] 6920 v_2 := v.Args[2] 6921 if v_2.Op != OpAMD64FlagLT_ULT { 6922 break 6923 } 6924 v.reset(OpCopy) 6925 v.Type = y.Type 6926 v.AddArg(y) 6927 return true 6928 } 6929 // match: (CMOVQCC _ x (FlagLT_UGT)) 6930 // cond: 6931 // result: x 6932 for { 6933 _ = v.Args[2] 6934 x := v.Args[1] 6935 v_2 := v.Args[2] 6936 if v_2.Op != OpAMD64FlagLT_UGT { 6937 break 6938 } 6939 v.reset(OpCopy) 6940 v.Type = x.Type 6941 v.AddArg(x) 6942 return true 6943 } 6944 return false 6945 } 6946 func rewriteValueAMD64_OpAMD64CMOVQCS_0(v *Value) bool { 6947 // match: (CMOVQCS x y (InvertFlags cond)) 6948 // cond: 6949 // result: (CMOVQHI x y cond) 6950 for { 6951 _ = v.Args[2] 6952 x := v.Args[0] 6953 y := v.Args[1] 6954 v_2 := v.Args[2] 6955 if v_2.Op != OpAMD64InvertFlags { 6956 break 6957 } 6958 cond := v_2.Args[0] 6959 v.reset(OpAMD64CMOVQHI) 6960 v.AddArg(x) 6961 v.AddArg(y) 6962 v.AddArg(cond) 6963 return true 6964 } 6965 // match: (CMOVQCS y _ (FlagEQ)) 6966 // cond: 6967 // result: y 6968 for { 6969 _ = v.Args[2] 6970 y := v.Args[0] 6971 v_2 := v.Args[2] 6972 if v_2.Op != OpAMD64FlagEQ { 6973 break 6974 } 6975 v.reset(OpCopy) 6976 v.Type = y.Type 6977 v.AddArg(y) 6978 return true 6979 } 6980 // match: (CMOVQCS y _ (FlagGT_UGT)) 6981 // cond: 6982 // result: y 6983 for { 6984 _ = v.Args[2] 6985 y := v.Args[0] 6986 v_2 := v.Args[2] 6987 if v_2.Op != OpAMD64FlagGT_UGT { 6988 break 6989 } 6990 v.reset(OpCopy) 6991 v.Type = y.Type 6992 v.AddArg(y) 6993 return true 6994 } 6995 // match: (CMOVQCS _ x (FlagGT_ULT)) 6996 // cond: 6997 // result: x 6998 for { 6999 _ = v.Args[2] 7000 x := v.Args[1] 7001 v_2 := v.Args[2] 7002 if v_2.Op != OpAMD64FlagGT_ULT { 7003 break 7004 } 7005 v.reset(OpCopy) 7006 v.Type = x.Type 7007 v.AddArg(x) 7008 return true 7009 } 7010 // match: (CMOVQCS _ x (FlagLT_ULT)) 7011 // cond: 7012 // result: x 7013 for { 7014 _ = v.Args[2] 7015 x := v.Args[1] 7016 v_2 := v.Args[2] 7017 if v_2.Op != OpAMD64FlagLT_ULT { 7018 break 7019 } 7020 v.reset(OpCopy) 7021 v.Type = x.Type 7022 v.AddArg(x) 7023 return true 7024 } 7025 // match: (CMOVQCS y _ (FlagLT_UGT)) 7026 // cond: 7027 // result: y 7028 for { 7029 _ = v.Args[2] 7030 y := v.Args[0] 7031 v_2 := v.Args[2] 7032 if v_2.Op != OpAMD64FlagLT_UGT { 7033 break 7034 } 7035 v.reset(OpCopy) 7036 v.Type = y.Type 7037 v.AddArg(y) 7038 return true 7039 } 7040 return false 7041 } 7042 func rewriteValueAMD64_OpAMD64CMOVQEQ_0(v *Value) bool { 7043 // match: (CMOVQEQ x y (InvertFlags cond)) 7044 // cond: 7045 // result: (CMOVQEQ x y cond) 7046 for { 7047 _ = v.Args[2] 7048 x := v.Args[0] 7049 y := v.Args[1] 7050 v_2 := v.Args[2] 7051 if v_2.Op != OpAMD64InvertFlags { 7052 break 7053 } 7054 cond := v_2.Args[0] 7055 v.reset(OpAMD64CMOVQEQ) 7056 v.AddArg(x) 7057 v.AddArg(y) 7058 v.AddArg(cond) 7059 return true 7060 } 7061 // match: (CMOVQEQ _ x (FlagEQ)) 7062 // cond: 7063 // result: x 7064 for { 7065 _ = v.Args[2] 7066 x := v.Args[1] 7067 v_2 := v.Args[2] 7068 if v_2.Op != OpAMD64FlagEQ { 7069 break 7070 } 7071 v.reset(OpCopy) 7072 v.Type = x.Type 7073 v.AddArg(x) 7074 return true 7075 } 7076 // match: (CMOVQEQ y _ (FlagGT_UGT)) 7077 // cond: 7078 // result: y 7079 for { 7080 _ = v.Args[2] 7081 y := v.Args[0] 7082 v_2 := v.Args[2] 7083 if v_2.Op != OpAMD64FlagGT_UGT { 7084 break 7085 } 7086 v.reset(OpCopy) 7087 v.Type = y.Type 7088 v.AddArg(y) 7089 return true 7090 } 7091 // match: (CMOVQEQ y _ (FlagGT_ULT)) 7092 // cond: 7093 // result: y 7094 for { 7095 _ = v.Args[2] 7096 y := v.Args[0] 7097 v_2 := v.Args[2] 7098 if v_2.Op != OpAMD64FlagGT_ULT { 7099 break 7100 } 7101 v.reset(OpCopy) 7102 v.Type = y.Type 7103 v.AddArg(y) 7104 return true 7105 } 7106 // match: (CMOVQEQ y _ (FlagLT_ULT)) 7107 // cond: 7108 // result: y 7109 for { 7110 _ = v.Args[2] 7111 y := v.Args[0] 7112 v_2 := v.Args[2] 7113 if v_2.Op != OpAMD64FlagLT_ULT { 7114 break 7115 } 7116 v.reset(OpCopy) 7117 v.Type = y.Type 7118 v.AddArg(y) 7119 return true 7120 } 7121 // match: (CMOVQEQ y _ (FlagLT_UGT)) 7122 // cond: 7123 // result: y 7124 for { 7125 _ = v.Args[2] 7126 y := v.Args[0] 7127 v_2 := v.Args[2] 7128 if v_2.Op != OpAMD64FlagLT_UGT { 7129 break 7130 } 7131 v.reset(OpCopy) 7132 v.Type = y.Type 7133 v.AddArg(y) 7134 return true 7135 } 7136 // match: (CMOVQEQ x _ (Select1 (BSFQ (ORQconst [c] _)))) 7137 // cond: c != 0 7138 // result: x 7139 for { 7140 _ = v.Args[2] 7141 x := v.Args[0] 7142 v_2 := v.Args[2] 7143 if v_2.Op != OpSelect1 { 7144 break 7145 } 7146 v_2_0 := v_2.Args[0] 7147 if v_2_0.Op != OpAMD64BSFQ { 7148 break 7149 } 7150 v_2_0_0 := v_2_0.Args[0] 7151 if v_2_0_0.Op != OpAMD64ORQconst { 7152 break 7153 } 7154 c := v_2_0_0.AuxInt 7155 if !(c != 0) { 7156 break 7157 } 7158 v.reset(OpCopy) 7159 v.Type = x.Type 7160 v.AddArg(x) 7161 return true 7162 } 7163 return false 7164 } 7165 func rewriteValueAMD64_OpAMD64CMOVQGE_0(v *Value) bool { 7166 // match: (CMOVQGE x y (InvertFlags cond)) 7167 // cond: 7168 // result: (CMOVQLE x y cond) 7169 for { 7170 _ = v.Args[2] 7171 x := v.Args[0] 7172 y := v.Args[1] 7173 v_2 := v.Args[2] 7174 if v_2.Op != OpAMD64InvertFlags { 7175 break 7176 } 7177 cond := v_2.Args[0] 7178 v.reset(OpAMD64CMOVQLE) 7179 v.AddArg(x) 7180 v.AddArg(y) 7181 v.AddArg(cond) 7182 return true 7183 } 7184 // match: (CMOVQGE _ x (FlagEQ)) 7185 // cond: 7186 // result: x 7187 for { 7188 _ = v.Args[2] 7189 x := v.Args[1] 7190 v_2 := v.Args[2] 7191 if v_2.Op != OpAMD64FlagEQ { 7192 break 7193 } 7194 v.reset(OpCopy) 7195 v.Type = x.Type 7196 v.AddArg(x) 7197 return true 7198 } 7199 // match: (CMOVQGE _ x (FlagGT_UGT)) 7200 // cond: 7201 // result: x 7202 for { 7203 _ = v.Args[2] 7204 x := v.Args[1] 7205 v_2 := v.Args[2] 7206 if v_2.Op != OpAMD64FlagGT_UGT { 7207 break 7208 } 7209 v.reset(OpCopy) 7210 v.Type = x.Type 7211 v.AddArg(x) 7212 return true 7213 } 7214 // match: (CMOVQGE _ x (FlagGT_ULT)) 7215 // cond: 7216 // result: x 7217 for { 7218 _ = v.Args[2] 7219 x := v.Args[1] 7220 v_2 := v.Args[2] 7221 if v_2.Op != OpAMD64FlagGT_ULT { 7222 break 7223 } 7224 v.reset(OpCopy) 7225 v.Type = x.Type 7226 v.AddArg(x) 7227 return true 7228 } 7229 // match: (CMOVQGE y _ (FlagLT_ULT)) 7230 // cond: 7231 // result: y 7232 for { 7233 _ = v.Args[2] 7234 y := v.Args[0] 7235 v_2 := v.Args[2] 7236 if v_2.Op != OpAMD64FlagLT_ULT { 7237 break 7238 } 7239 v.reset(OpCopy) 7240 v.Type = y.Type 7241 v.AddArg(y) 7242 return true 7243 } 7244 // match: (CMOVQGE y _ (FlagLT_UGT)) 7245 // cond: 7246 // result: y 7247 for { 7248 _ = v.Args[2] 7249 y := v.Args[0] 7250 v_2 := v.Args[2] 7251 if v_2.Op != OpAMD64FlagLT_UGT { 7252 break 7253 } 7254 v.reset(OpCopy) 7255 v.Type = y.Type 7256 v.AddArg(y) 7257 return true 7258 } 7259 return false 7260 } 7261 func rewriteValueAMD64_OpAMD64CMOVQGT_0(v *Value) bool { 7262 // match: (CMOVQGT x y (InvertFlags cond)) 7263 // cond: 7264 // result: (CMOVQLT x y cond) 7265 for { 7266 _ = v.Args[2] 7267 x := v.Args[0] 7268 y := v.Args[1] 7269 v_2 := v.Args[2] 7270 if v_2.Op != OpAMD64InvertFlags { 7271 break 7272 } 7273 cond := v_2.Args[0] 7274 v.reset(OpAMD64CMOVQLT) 7275 v.AddArg(x) 7276 v.AddArg(y) 7277 v.AddArg(cond) 7278 return true 7279 } 7280 // match: (CMOVQGT y _ (FlagEQ)) 7281 // cond: 7282 // result: y 7283 for { 7284 _ = v.Args[2] 7285 y := v.Args[0] 7286 v_2 := v.Args[2] 7287 if v_2.Op != OpAMD64FlagEQ { 7288 break 7289 } 7290 v.reset(OpCopy) 7291 v.Type = y.Type 7292 v.AddArg(y) 7293 return true 7294 } 7295 // match: (CMOVQGT _ x (FlagGT_UGT)) 7296 // cond: 7297 // result: x 7298 for { 7299 _ = v.Args[2] 7300 x := v.Args[1] 7301 v_2 := v.Args[2] 7302 if v_2.Op != OpAMD64FlagGT_UGT { 7303 break 7304 } 7305 v.reset(OpCopy) 7306 v.Type = x.Type 7307 v.AddArg(x) 7308 return true 7309 } 7310 // match: (CMOVQGT _ x (FlagGT_ULT)) 7311 // cond: 7312 // result: x 7313 for { 7314 _ = v.Args[2] 7315 x := v.Args[1] 7316 v_2 := v.Args[2] 7317 if v_2.Op != OpAMD64FlagGT_ULT { 7318 break 7319 } 7320 v.reset(OpCopy) 7321 v.Type = x.Type 7322 v.AddArg(x) 7323 return true 7324 } 7325 // match: (CMOVQGT y _ (FlagLT_ULT)) 7326 // cond: 7327 // result: y 7328 for { 7329 _ = v.Args[2] 7330 y := v.Args[0] 7331 v_2 := v.Args[2] 7332 if v_2.Op != OpAMD64FlagLT_ULT { 7333 break 7334 } 7335 v.reset(OpCopy) 7336 v.Type = y.Type 7337 v.AddArg(y) 7338 return true 7339 } 7340 // match: (CMOVQGT y _ (FlagLT_UGT)) 7341 // cond: 7342 // result: y 7343 for { 7344 _ = v.Args[2] 7345 y := v.Args[0] 7346 v_2 := v.Args[2] 7347 if v_2.Op != OpAMD64FlagLT_UGT { 7348 break 7349 } 7350 v.reset(OpCopy) 7351 v.Type = y.Type 7352 v.AddArg(y) 7353 return true 7354 } 7355 return false 7356 } 7357 func rewriteValueAMD64_OpAMD64CMOVQHI_0(v *Value) bool { 7358 // match: (CMOVQHI x y (InvertFlags cond)) 7359 // cond: 7360 // result: (CMOVQCS x y cond) 7361 for { 7362 _ = v.Args[2] 7363 x := v.Args[0] 7364 y := v.Args[1] 7365 v_2 := v.Args[2] 7366 if v_2.Op != OpAMD64InvertFlags { 7367 break 7368 } 7369 cond := v_2.Args[0] 7370 v.reset(OpAMD64CMOVQCS) 7371 v.AddArg(x) 7372 v.AddArg(y) 7373 v.AddArg(cond) 7374 return true 7375 } 7376 // match: (CMOVQHI y _ (FlagEQ)) 7377 // cond: 7378 // result: y 7379 for { 7380 _ = v.Args[2] 7381 y := v.Args[0] 7382 v_2 := v.Args[2] 7383 if v_2.Op != OpAMD64FlagEQ { 7384 break 7385 } 7386 v.reset(OpCopy) 7387 v.Type = y.Type 7388 v.AddArg(y) 7389 return true 7390 } 7391 // match: (CMOVQHI _ x (FlagGT_UGT)) 7392 // cond: 7393 // result: x 7394 for { 7395 _ = v.Args[2] 7396 x := v.Args[1] 7397 v_2 := v.Args[2] 7398 if v_2.Op != OpAMD64FlagGT_UGT { 7399 break 7400 } 7401 v.reset(OpCopy) 7402 v.Type = x.Type 7403 v.AddArg(x) 7404 return true 7405 } 7406 // match: (CMOVQHI y _ (FlagGT_ULT)) 7407 // cond: 7408 // result: y 7409 for { 7410 _ = v.Args[2] 7411 y := v.Args[0] 7412 v_2 := v.Args[2] 7413 if v_2.Op != OpAMD64FlagGT_ULT { 7414 break 7415 } 7416 v.reset(OpCopy) 7417 v.Type = y.Type 7418 v.AddArg(y) 7419 return true 7420 } 7421 // match: (CMOVQHI y _ (FlagLT_ULT)) 7422 // cond: 7423 // result: y 7424 for { 7425 _ = v.Args[2] 7426 y := v.Args[0] 7427 v_2 := v.Args[2] 7428 if v_2.Op != OpAMD64FlagLT_ULT { 7429 break 7430 } 7431 v.reset(OpCopy) 7432 v.Type = y.Type 7433 v.AddArg(y) 7434 return true 7435 } 7436 // match: (CMOVQHI _ x (FlagLT_UGT)) 7437 // cond: 7438 // result: x 7439 for { 7440 _ = v.Args[2] 7441 x := v.Args[1] 7442 v_2 := v.Args[2] 7443 if v_2.Op != OpAMD64FlagLT_UGT { 7444 break 7445 } 7446 v.reset(OpCopy) 7447 v.Type = x.Type 7448 v.AddArg(x) 7449 return true 7450 } 7451 return false 7452 } 7453 func rewriteValueAMD64_OpAMD64CMOVQLE_0(v *Value) bool { 7454 // match: (CMOVQLE x y (InvertFlags cond)) 7455 // cond: 7456 // result: (CMOVQGE x y cond) 7457 for { 7458 _ = v.Args[2] 7459 x := v.Args[0] 7460 y := v.Args[1] 7461 v_2 := v.Args[2] 7462 if v_2.Op != OpAMD64InvertFlags { 7463 break 7464 } 7465 cond := v_2.Args[0] 7466 v.reset(OpAMD64CMOVQGE) 7467 v.AddArg(x) 7468 v.AddArg(y) 7469 v.AddArg(cond) 7470 return true 7471 } 7472 // match: (CMOVQLE _ x (FlagEQ)) 7473 // cond: 7474 // result: x 7475 for { 7476 _ = v.Args[2] 7477 x := v.Args[1] 7478 v_2 := v.Args[2] 7479 if v_2.Op != OpAMD64FlagEQ { 7480 break 7481 } 7482 v.reset(OpCopy) 7483 v.Type = x.Type 7484 v.AddArg(x) 7485 return true 7486 } 7487 // match: (CMOVQLE y _ (FlagGT_UGT)) 7488 // cond: 7489 // result: y 7490 for { 7491 _ = v.Args[2] 7492 y := v.Args[0] 7493 v_2 := v.Args[2] 7494 if v_2.Op != OpAMD64FlagGT_UGT { 7495 break 7496 } 7497 v.reset(OpCopy) 7498 v.Type = y.Type 7499 v.AddArg(y) 7500 return true 7501 } 7502 // match: (CMOVQLE y _ (FlagGT_ULT)) 7503 // cond: 7504 // result: y 7505 for { 7506 _ = v.Args[2] 7507 y := v.Args[0] 7508 v_2 := v.Args[2] 7509 if v_2.Op != OpAMD64FlagGT_ULT { 7510 break 7511 } 7512 v.reset(OpCopy) 7513 v.Type = y.Type 7514 v.AddArg(y) 7515 return true 7516 } 7517 // match: (CMOVQLE _ x (FlagLT_ULT)) 7518 // cond: 7519 // result: x 7520 for { 7521 _ = v.Args[2] 7522 x := v.Args[1] 7523 v_2 := v.Args[2] 7524 if v_2.Op != OpAMD64FlagLT_ULT { 7525 break 7526 } 7527 v.reset(OpCopy) 7528 v.Type = x.Type 7529 v.AddArg(x) 7530 return true 7531 } 7532 // match: (CMOVQLE _ x (FlagLT_UGT)) 7533 // cond: 7534 // result: x 7535 for { 7536 _ = v.Args[2] 7537 x := v.Args[1] 7538 v_2 := v.Args[2] 7539 if v_2.Op != OpAMD64FlagLT_UGT { 7540 break 7541 } 7542 v.reset(OpCopy) 7543 v.Type = x.Type 7544 v.AddArg(x) 7545 return true 7546 } 7547 return false 7548 } 7549 func rewriteValueAMD64_OpAMD64CMOVQLS_0(v *Value) bool { 7550 // match: (CMOVQLS x y (InvertFlags cond)) 7551 // cond: 7552 // result: (CMOVQCC x y cond) 7553 for { 7554 _ = v.Args[2] 7555 x := v.Args[0] 7556 y := v.Args[1] 7557 v_2 := v.Args[2] 7558 if v_2.Op != OpAMD64InvertFlags { 7559 break 7560 } 7561 cond := v_2.Args[0] 7562 v.reset(OpAMD64CMOVQCC) 7563 v.AddArg(x) 7564 v.AddArg(y) 7565 v.AddArg(cond) 7566 return true 7567 } 7568 // match: (CMOVQLS _ x (FlagEQ)) 7569 // cond: 7570 // result: x 7571 for { 7572 _ = v.Args[2] 7573 x := v.Args[1] 7574 v_2 := v.Args[2] 7575 if v_2.Op != OpAMD64FlagEQ { 7576 break 7577 } 7578 v.reset(OpCopy) 7579 v.Type = x.Type 7580 v.AddArg(x) 7581 return true 7582 } 7583 // match: (CMOVQLS y _ (FlagGT_UGT)) 7584 // cond: 7585 // result: y 7586 for { 7587 _ = v.Args[2] 7588 y := v.Args[0] 7589 v_2 := v.Args[2] 7590 if v_2.Op != OpAMD64FlagGT_UGT { 7591 break 7592 } 7593 v.reset(OpCopy) 7594 v.Type = y.Type 7595 v.AddArg(y) 7596 return true 7597 } 7598 // match: (CMOVQLS _ x (FlagGT_ULT)) 7599 // cond: 7600 // result: x 7601 for { 7602 _ = v.Args[2] 7603 x := v.Args[1] 7604 v_2 := v.Args[2] 7605 if v_2.Op != OpAMD64FlagGT_ULT { 7606 break 7607 } 7608 v.reset(OpCopy) 7609 v.Type = x.Type 7610 v.AddArg(x) 7611 return true 7612 } 7613 // match: (CMOVQLS _ x (FlagLT_ULT)) 7614 // cond: 7615 // result: x 7616 for { 7617 _ = v.Args[2] 7618 x := v.Args[1] 7619 v_2 := v.Args[2] 7620 if v_2.Op != OpAMD64FlagLT_ULT { 7621 break 7622 } 7623 v.reset(OpCopy) 7624 v.Type = x.Type 7625 v.AddArg(x) 7626 return true 7627 } 7628 // match: (CMOVQLS y _ (FlagLT_UGT)) 7629 // cond: 7630 // result: y 7631 for { 7632 _ = v.Args[2] 7633 y := v.Args[0] 7634 v_2 := v.Args[2] 7635 if v_2.Op != OpAMD64FlagLT_UGT { 7636 break 7637 } 7638 v.reset(OpCopy) 7639 v.Type = y.Type 7640 v.AddArg(y) 7641 return true 7642 } 7643 return false 7644 } 7645 func rewriteValueAMD64_OpAMD64CMOVQLT_0(v *Value) bool { 7646 // match: (CMOVQLT x y (InvertFlags cond)) 7647 // cond: 7648 // result: (CMOVQGT x y cond) 7649 for { 7650 _ = v.Args[2] 7651 x := v.Args[0] 7652 y := v.Args[1] 7653 v_2 := v.Args[2] 7654 if v_2.Op != OpAMD64InvertFlags { 7655 break 7656 } 7657 cond := v_2.Args[0] 7658 v.reset(OpAMD64CMOVQGT) 7659 v.AddArg(x) 7660 v.AddArg(y) 7661 v.AddArg(cond) 7662 return true 7663 } 7664 // match: (CMOVQLT y _ (FlagEQ)) 7665 // cond: 7666 // result: y 7667 for { 7668 _ = v.Args[2] 7669 y := v.Args[0] 7670 v_2 := v.Args[2] 7671 if v_2.Op != OpAMD64FlagEQ { 7672 break 7673 } 7674 v.reset(OpCopy) 7675 v.Type = y.Type 7676 v.AddArg(y) 7677 return true 7678 } 7679 // match: (CMOVQLT y _ (FlagGT_UGT)) 7680 // cond: 7681 // result: y 7682 for { 7683 _ = v.Args[2] 7684 y := v.Args[0] 7685 v_2 := v.Args[2] 7686 if v_2.Op != OpAMD64FlagGT_UGT { 7687 break 7688 } 7689 v.reset(OpCopy) 7690 v.Type = y.Type 7691 v.AddArg(y) 7692 return true 7693 } 7694 // match: (CMOVQLT y _ (FlagGT_ULT)) 7695 // cond: 7696 // result: y 7697 for { 7698 _ = v.Args[2] 7699 y := v.Args[0] 7700 v_2 := v.Args[2] 7701 if v_2.Op != OpAMD64FlagGT_ULT { 7702 break 7703 } 7704 v.reset(OpCopy) 7705 v.Type = y.Type 7706 v.AddArg(y) 7707 return true 7708 } 7709 // match: (CMOVQLT _ x (FlagLT_ULT)) 7710 // cond: 7711 // result: x 7712 for { 7713 _ = v.Args[2] 7714 x := v.Args[1] 7715 v_2 := v.Args[2] 7716 if v_2.Op != OpAMD64FlagLT_ULT { 7717 break 7718 } 7719 v.reset(OpCopy) 7720 v.Type = x.Type 7721 v.AddArg(x) 7722 return true 7723 } 7724 // match: (CMOVQLT _ x (FlagLT_UGT)) 7725 // cond: 7726 // result: x 7727 for { 7728 _ = v.Args[2] 7729 x := v.Args[1] 7730 v_2 := v.Args[2] 7731 if v_2.Op != OpAMD64FlagLT_UGT { 7732 break 7733 } 7734 v.reset(OpCopy) 7735 v.Type = x.Type 7736 v.AddArg(x) 7737 return true 7738 } 7739 return false 7740 } 7741 func rewriteValueAMD64_OpAMD64CMOVQNE_0(v *Value) bool { 7742 // match: (CMOVQNE x y (InvertFlags cond)) 7743 // cond: 7744 // result: (CMOVQNE x y cond) 7745 for { 7746 _ = v.Args[2] 7747 x := v.Args[0] 7748 y := v.Args[1] 7749 v_2 := v.Args[2] 7750 if v_2.Op != OpAMD64InvertFlags { 7751 break 7752 } 7753 cond := v_2.Args[0] 7754 v.reset(OpAMD64CMOVQNE) 7755 v.AddArg(x) 7756 v.AddArg(y) 7757 v.AddArg(cond) 7758 return true 7759 } 7760 // match: (CMOVQNE y _ (FlagEQ)) 7761 // cond: 7762 // result: y 7763 for { 7764 _ = v.Args[2] 7765 y := v.Args[0] 7766 v_2 := v.Args[2] 7767 if v_2.Op != OpAMD64FlagEQ { 7768 break 7769 } 7770 v.reset(OpCopy) 7771 v.Type = y.Type 7772 v.AddArg(y) 7773 return true 7774 } 7775 // match: (CMOVQNE _ x (FlagGT_UGT)) 7776 // cond: 7777 // result: x 7778 for { 7779 _ = v.Args[2] 7780 x := v.Args[1] 7781 v_2 := v.Args[2] 7782 if v_2.Op != OpAMD64FlagGT_UGT { 7783 break 7784 } 7785 v.reset(OpCopy) 7786 v.Type = x.Type 7787 v.AddArg(x) 7788 return true 7789 } 7790 // match: (CMOVQNE _ x (FlagGT_ULT)) 7791 // cond: 7792 // result: x 7793 for { 7794 _ = v.Args[2] 7795 x := v.Args[1] 7796 v_2 := v.Args[2] 7797 if v_2.Op != OpAMD64FlagGT_ULT { 7798 break 7799 } 7800 v.reset(OpCopy) 7801 v.Type = x.Type 7802 v.AddArg(x) 7803 return true 7804 } 7805 // match: (CMOVQNE _ x (FlagLT_ULT)) 7806 // cond: 7807 // result: x 7808 for { 7809 _ = v.Args[2] 7810 x := v.Args[1] 7811 v_2 := v.Args[2] 7812 if v_2.Op != OpAMD64FlagLT_ULT { 7813 break 7814 } 7815 v.reset(OpCopy) 7816 v.Type = x.Type 7817 v.AddArg(x) 7818 return true 7819 } 7820 // match: (CMOVQNE _ x (FlagLT_UGT)) 7821 // cond: 7822 // result: x 7823 for { 7824 _ = v.Args[2] 7825 x := v.Args[1] 7826 v_2 := v.Args[2] 7827 if v_2.Op != OpAMD64FlagLT_UGT { 7828 break 7829 } 7830 v.reset(OpCopy) 7831 v.Type = x.Type 7832 v.AddArg(x) 7833 return true 7834 } 7835 return false 7836 } 7837 func rewriteValueAMD64_OpAMD64CMOVWCC_0(v *Value) bool { 7838 // match: (CMOVWCC x y (InvertFlags cond)) 7839 // cond: 7840 // result: (CMOVWLS x y cond) 7841 for { 7842 _ = v.Args[2] 7843 x := v.Args[0] 7844 y := v.Args[1] 7845 v_2 := v.Args[2] 7846 if v_2.Op != OpAMD64InvertFlags { 7847 break 7848 } 7849 cond := v_2.Args[0] 7850 v.reset(OpAMD64CMOVWLS) 7851 v.AddArg(x) 7852 v.AddArg(y) 7853 v.AddArg(cond) 7854 return true 7855 } 7856 // match: (CMOVWCC _ x (FlagEQ)) 7857 // cond: 7858 // result: x 7859 for { 7860 _ = v.Args[2] 7861 x := v.Args[1] 7862 v_2 := v.Args[2] 7863 if v_2.Op != OpAMD64FlagEQ { 7864 break 7865 } 7866 v.reset(OpCopy) 7867 v.Type = x.Type 7868 v.AddArg(x) 7869 return true 7870 } 7871 // match: (CMOVWCC _ x (FlagGT_UGT)) 7872 // cond: 7873 // result: x 7874 for { 7875 _ = v.Args[2] 7876 x := v.Args[1] 7877 v_2 := v.Args[2] 7878 if v_2.Op != OpAMD64FlagGT_UGT { 7879 break 7880 } 7881 v.reset(OpCopy) 7882 v.Type = x.Type 7883 v.AddArg(x) 7884 return true 7885 } 7886 // match: (CMOVWCC y _ (FlagGT_ULT)) 7887 // cond: 7888 // result: y 7889 for { 7890 _ = v.Args[2] 7891 y := v.Args[0] 7892 v_2 := v.Args[2] 7893 if v_2.Op != OpAMD64FlagGT_ULT { 7894 break 7895 } 7896 v.reset(OpCopy) 7897 v.Type = y.Type 7898 v.AddArg(y) 7899 return true 7900 } 7901 // match: (CMOVWCC y _ (FlagLT_ULT)) 7902 // cond: 7903 // result: y 7904 for { 7905 _ = v.Args[2] 7906 y := v.Args[0] 7907 v_2 := v.Args[2] 7908 if v_2.Op != OpAMD64FlagLT_ULT { 7909 break 7910 } 7911 v.reset(OpCopy) 7912 v.Type = y.Type 7913 v.AddArg(y) 7914 return true 7915 } 7916 // match: (CMOVWCC _ x (FlagLT_UGT)) 7917 // cond: 7918 // result: x 7919 for { 7920 _ = v.Args[2] 7921 x := v.Args[1] 7922 v_2 := v.Args[2] 7923 if v_2.Op != OpAMD64FlagLT_UGT { 7924 break 7925 } 7926 v.reset(OpCopy) 7927 v.Type = x.Type 7928 v.AddArg(x) 7929 return true 7930 } 7931 return false 7932 } 7933 func rewriteValueAMD64_OpAMD64CMOVWCS_0(v *Value) bool { 7934 // match: (CMOVWCS x y (InvertFlags cond)) 7935 // cond: 7936 // result: (CMOVWHI x y cond) 7937 for { 7938 _ = v.Args[2] 7939 x := v.Args[0] 7940 y := v.Args[1] 7941 v_2 := v.Args[2] 7942 if v_2.Op != OpAMD64InvertFlags { 7943 break 7944 } 7945 cond := v_2.Args[0] 7946 v.reset(OpAMD64CMOVWHI) 7947 v.AddArg(x) 7948 v.AddArg(y) 7949 v.AddArg(cond) 7950 return true 7951 } 7952 // match: (CMOVWCS y _ (FlagEQ)) 7953 // cond: 7954 // result: y 7955 for { 7956 _ = v.Args[2] 7957 y := v.Args[0] 7958 v_2 := v.Args[2] 7959 if v_2.Op != OpAMD64FlagEQ { 7960 break 7961 } 7962 v.reset(OpCopy) 7963 v.Type = y.Type 7964 v.AddArg(y) 7965 return true 7966 } 7967 // match: (CMOVWCS y _ (FlagGT_UGT)) 7968 // cond: 7969 // result: y 7970 for { 7971 _ = v.Args[2] 7972 y := v.Args[0] 7973 v_2 := v.Args[2] 7974 if v_2.Op != OpAMD64FlagGT_UGT { 7975 break 7976 } 7977 v.reset(OpCopy) 7978 v.Type = y.Type 7979 v.AddArg(y) 7980 return true 7981 } 7982 // match: (CMOVWCS _ x (FlagGT_ULT)) 7983 // cond: 7984 // result: x 7985 for { 7986 _ = v.Args[2] 7987 x := v.Args[1] 7988 v_2 := v.Args[2] 7989 if v_2.Op != OpAMD64FlagGT_ULT { 7990 break 7991 } 7992 v.reset(OpCopy) 7993 v.Type = x.Type 7994 v.AddArg(x) 7995 return true 7996 } 7997 // match: (CMOVWCS _ x (FlagLT_ULT)) 7998 // cond: 7999 // result: x 8000 for { 8001 _ = v.Args[2] 8002 x := v.Args[1] 8003 v_2 := v.Args[2] 8004 if v_2.Op != OpAMD64FlagLT_ULT { 8005 break 8006 } 8007 v.reset(OpCopy) 8008 v.Type = x.Type 8009 v.AddArg(x) 8010 return true 8011 } 8012 // match: (CMOVWCS y _ (FlagLT_UGT)) 8013 // cond: 8014 // result: y 8015 for { 8016 _ = v.Args[2] 8017 y := v.Args[0] 8018 v_2 := v.Args[2] 8019 if v_2.Op != OpAMD64FlagLT_UGT { 8020 break 8021 } 8022 v.reset(OpCopy) 8023 v.Type = y.Type 8024 v.AddArg(y) 8025 return true 8026 } 8027 return false 8028 } 8029 func rewriteValueAMD64_OpAMD64CMOVWEQ_0(v *Value) bool { 8030 // match: (CMOVWEQ x y (InvertFlags cond)) 8031 // cond: 8032 // result: (CMOVWEQ x y cond) 8033 for { 8034 _ = v.Args[2] 8035 x := v.Args[0] 8036 y := v.Args[1] 8037 v_2 := v.Args[2] 8038 if v_2.Op != OpAMD64InvertFlags { 8039 break 8040 } 8041 cond := v_2.Args[0] 8042 v.reset(OpAMD64CMOVWEQ) 8043 v.AddArg(x) 8044 v.AddArg(y) 8045 v.AddArg(cond) 8046 return true 8047 } 8048 // match: (CMOVWEQ _ x (FlagEQ)) 8049 // cond: 8050 // result: x 8051 for { 8052 _ = v.Args[2] 8053 x := v.Args[1] 8054 v_2 := v.Args[2] 8055 if v_2.Op != OpAMD64FlagEQ { 8056 break 8057 } 8058 v.reset(OpCopy) 8059 v.Type = x.Type 8060 v.AddArg(x) 8061 return true 8062 } 8063 // match: (CMOVWEQ y _ (FlagGT_UGT)) 8064 // cond: 8065 // result: y 8066 for { 8067 _ = v.Args[2] 8068 y := v.Args[0] 8069 v_2 := v.Args[2] 8070 if v_2.Op != OpAMD64FlagGT_UGT { 8071 break 8072 } 8073 v.reset(OpCopy) 8074 v.Type = y.Type 8075 v.AddArg(y) 8076 return true 8077 } 8078 // match: (CMOVWEQ y _ (FlagGT_ULT)) 8079 // cond: 8080 // result: y 8081 for { 8082 _ = v.Args[2] 8083 y := v.Args[0] 8084 v_2 := v.Args[2] 8085 if v_2.Op != OpAMD64FlagGT_ULT { 8086 break 8087 } 8088 v.reset(OpCopy) 8089 v.Type = y.Type 8090 v.AddArg(y) 8091 return true 8092 } 8093 // match: (CMOVWEQ y _ (FlagLT_ULT)) 8094 // cond: 8095 // result: y 8096 for { 8097 _ = v.Args[2] 8098 y := v.Args[0] 8099 v_2 := v.Args[2] 8100 if v_2.Op != OpAMD64FlagLT_ULT { 8101 break 8102 } 8103 v.reset(OpCopy) 8104 v.Type = y.Type 8105 v.AddArg(y) 8106 return true 8107 } 8108 // match: (CMOVWEQ y _ (FlagLT_UGT)) 8109 // cond: 8110 // result: y 8111 for { 8112 _ = v.Args[2] 8113 y := v.Args[0] 8114 v_2 := v.Args[2] 8115 if v_2.Op != OpAMD64FlagLT_UGT { 8116 break 8117 } 8118 v.reset(OpCopy) 8119 v.Type = y.Type 8120 v.AddArg(y) 8121 return true 8122 } 8123 return false 8124 } 8125 func rewriteValueAMD64_OpAMD64CMOVWGE_0(v *Value) bool { 8126 // match: (CMOVWGE x y (InvertFlags cond)) 8127 // cond: 8128 // result: (CMOVWLE x y cond) 8129 for { 8130 _ = v.Args[2] 8131 x := v.Args[0] 8132 y := v.Args[1] 8133 v_2 := v.Args[2] 8134 if v_2.Op != OpAMD64InvertFlags { 8135 break 8136 } 8137 cond := v_2.Args[0] 8138 v.reset(OpAMD64CMOVWLE) 8139 v.AddArg(x) 8140 v.AddArg(y) 8141 v.AddArg(cond) 8142 return true 8143 } 8144 // match: (CMOVWGE _ x (FlagEQ)) 8145 // cond: 8146 // result: x 8147 for { 8148 _ = v.Args[2] 8149 x := v.Args[1] 8150 v_2 := v.Args[2] 8151 if v_2.Op != OpAMD64FlagEQ { 8152 break 8153 } 8154 v.reset(OpCopy) 8155 v.Type = x.Type 8156 v.AddArg(x) 8157 return true 8158 } 8159 // match: (CMOVWGE _ x (FlagGT_UGT)) 8160 // cond: 8161 // result: x 8162 for { 8163 _ = v.Args[2] 8164 x := v.Args[1] 8165 v_2 := v.Args[2] 8166 if v_2.Op != OpAMD64FlagGT_UGT { 8167 break 8168 } 8169 v.reset(OpCopy) 8170 v.Type = x.Type 8171 v.AddArg(x) 8172 return true 8173 } 8174 // match: (CMOVWGE _ x (FlagGT_ULT)) 8175 // cond: 8176 // result: x 8177 for { 8178 _ = v.Args[2] 8179 x := v.Args[1] 8180 v_2 := v.Args[2] 8181 if v_2.Op != OpAMD64FlagGT_ULT { 8182 break 8183 } 8184 v.reset(OpCopy) 8185 v.Type = x.Type 8186 v.AddArg(x) 8187 return true 8188 } 8189 // match: (CMOVWGE y _ (FlagLT_ULT)) 8190 // cond: 8191 // result: y 8192 for { 8193 _ = v.Args[2] 8194 y := v.Args[0] 8195 v_2 := v.Args[2] 8196 if v_2.Op != OpAMD64FlagLT_ULT { 8197 break 8198 } 8199 v.reset(OpCopy) 8200 v.Type = y.Type 8201 v.AddArg(y) 8202 return true 8203 } 8204 // match: (CMOVWGE y _ (FlagLT_UGT)) 8205 // cond: 8206 // result: y 8207 for { 8208 _ = v.Args[2] 8209 y := v.Args[0] 8210 v_2 := v.Args[2] 8211 if v_2.Op != OpAMD64FlagLT_UGT { 8212 break 8213 } 8214 v.reset(OpCopy) 8215 v.Type = y.Type 8216 v.AddArg(y) 8217 return true 8218 } 8219 return false 8220 } 8221 func rewriteValueAMD64_OpAMD64CMOVWGT_0(v *Value) bool { 8222 // match: (CMOVWGT x y (InvertFlags cond)) 8223 // cond: 8224 // result: (CMOVWLT x y cond) 8225 for { 8226 _ = v.Args[2] 8227 x := v.Args[0] 8228 y := v.Args[1] 8229 v_2 := v.Args[2] 8230 if v_2.Op != OpAMD64InvertFlags { 8231 break 8232 } 8233 cond := v_2.Args[0] 8234 v.reset(OpAMD64CMOVWLT) 8235 v.AddArg(x) 8236 v.AddArg(y) 8237 v.AddArg(cond) 8238 return true 8239 } 8240 // match: (CMOVWGT y _ (FlagEQ)) 8241 // cond: 8242 // result: y 8243 for { 8244 _ = v.Args[2] 8245 y := v.Args[0] 8246 v_2 := v.Args[2] 8247 if v_2.Op != OpAMD64FlagEQ { 8248 break 8249 } 8250 v.reset(OpCopy) 8251 v.Type = y.Type 8252 v.AddArg(y) 8253 return true 8254 } 8255 // match: (CMOVWGT _ x (FlagGT_UGT)) 8256 // cond: 8257 // result: x 8258 for { 8259 _ = v.Args[2] 8260 x := v.Args[1] 8261 v_2 := v.Args[2] 8262 if v_2.Op != OpAMD64FlagGT_UGT { 8263 break 8264 } 8265 v.reset(OpCopy) 8266 v.Type = x.Type 8267 v.AddArg(x) 8268 return true 8269 } 8270 // match: (CMOVWGT _ x (FlagGT_ULT)) 8271 // cond: 8272 // result: x 8273 for { 8274 _ = v.Args[2] 8275 x := v.Args[1] 8276 v_2 := v.Args[2] 8277 if v_2.Op != OpAMD64FlagGT_ULT { 8278 break 8279 } 8280 v.reset(OpCopy) 8281 v.Type = x.Type 8282 v.AddArg(x) 8283 return true 8284 } 8285 // match: (CMOVWGT y _ (FlagLT_ULT)) 8286 // cond: 8287 // result: y 8288 for { 8289 _ = v.Args[2] 8290 y := v.Args[0] 8291 v_2 := v.Args[2] 8292 if v_2.Op != OpAMD64FlagLT_ULT { 8293 break 8294 } 8295 v.reset(OpCopy) 8296 v.Type = y.Type 8297 v.AddArg(y) 8298 return true 8299 } 8300 // match: (CMOVWGT y _ (FlagLT_UGT)) 8301 // cond: 8302 // result: y 8303 for { 8304 _ = v.Args[2] 8305 y := v.Args[0] 8306 v_2 := v.Args[2] 8307 if v_2.Op != OpAMD64FlagLT_UGT { 8308 break 8309 } 8310 v.reset(OpCopy) 8311 v.Type = y.Type 8312 v.AddArg(y) 8313 return true 8314 } 8315 return false 8316 } 8317 func rewriteValueAMD64_OpAMD64CMOVWHI_0(v *Value) bool { 8318 // match: (CMOVWHI x y (InvertFlags cond)) 8319 // cond: 8320 // result: (CMOVWCS x y cond) 8321 for { 8322 _ = v.Args[2] 8323 x := v.Args[0] 8324 y := v.Args[1] 8325 v_2 := v.Args[2] 8326 if v_2.Op != OpAMD64InvertFlags { 8327 break 8328 } 8329 cond := v_2.Args[0] 8330 v.reset(OpAMD64CMOVWCS) 8331 v.AddArg(x) 8332 v.AddArg(y) 8333 v.AddArg(cond) 8334 return true 8335 } 8336 // match: (CMOVWHI y _ (FlagEQ)) 8337 // cond: 8338 // result: y 8339 for { 8340 _ = v.Args[2] 8341 y := v.Args[0] 8342 v_2 := v.Args[2] 8343 if v_2.Op != OpAMD64FlagEQ { 8344 break 8345 } 8346 v.reset(OpCopy) 8347 v.Type = y.Type 8348 v.AddArg(y) 8349 return true 8350 } 8351 // match: (CMOVWHI _ x (FlagGT_UGT)) 8352 // cond: 8353 // result: x 8354 for { 8355 _ = v.Args[2] 8356 x := v.Args[1] 8357 v_2 := v.Args[2] 8358 if v_2.Op != OpAMD64FlagGT_UGT { 8359 break 8360 } 8361 v.reset(OpCopy) 8362 v.Type = x.Type 8363 v.AddArg(x) 8364 return true 8365 } 8366 // match: (CMOVWHI y _ (FlagGT_ULT)) 8367 // cond: 8368 // result: y 8369 for { 8370 _ = v.Args[2] 8371 y := v.Args[0] 8372 v_2 := v.Args[2] 8373 if v_2.Op != OpAMD64FlagGT_ULT { 8374 break 8375 } 8376 v.reset(OpCopy) 8377 v.Type = y.Type 8378 v.AddArg(y) 8379 return true 8380 } 8381 // match: (CMOVWHI y _ (FlagLT_ULT)) 8382 // cond: 8383 // result: y 8384 for { 8385 _ = v.Args[2] 8386 y := v.Args[0] 8387 v_2 := v.Args[2] 8388 if v_2.Op != OpAMD64FlagLT_ULT { 8389 break 8390 } 8391 v.reset(OpCopy) 8392 v.Type = y.Type 8393 v.AddArg(y) 8394 return true 8395 } 8396 // match: (CMOVWHI _ x (FlagLT_UGT)) 8397 // cond: 8398 // result: x 8399 for { 8400 _ = v.Args[2] 8401 x := v.Args[1] 8402 v_2 := v.Args[2] 8403 if v_2.Op != OpAMD64FlagLT_UGT { 8404 break 8405 } 8406 v.reset(OpCopy) 8407 v.Type = x.Type 8408 v.AddArg(x) 8409 return true 8410 } 8411 return false 8412 } 8413 func rewriteValueAMD64_OpAMD64CMOVWLE_0(v *Value) bool { 8414 // match: (CMOVWLE x y (InvertFlags cond)) 8415 // cond: 8416 // result: (CMOVWGE x y cond) 8417 for { 8418 _ = v.Args[2] 8419 x := v.Args[0] 8420 y := v.Args[1] 8421 v_2 := v.Args[2] 8422 if v_2.Op != OpAMD64InvertFlags { 8423 break 8424 } 8425 cond := v_2.Args[0] 8426 v.reset(OpAMD64CMOVWGE) 8427 v.AddArg(x) 8428 v.AddArg(y) 8429 v.AddArg(cond) 8430 return true 8431 } 8432 // match: (CMOVWLE _ x (FlagEQ)) 8433 // cond: 8434 // result: x 8435 for { 8436 _ = v.Args[2] 8437 x := v.Args[1] 8438 v_2 := v.Args[2] 8439 if v_2.Op != OpAMD64FlagEQ { 8440 break 8441 } 8442 v.reset(OpCopy) 8443 v.Type = x.Type 8444 v.AddArg(x) 8445 return true 8446 } 8447 // match: (CMOVWLE y _ (FlagGT_UGT)) 8448 // cond: 8449 // result: y 8450 for { 8451 _ = v.Args[2] 8452 y := v.Args[0] 8453 v_2 := v.Args[2] 8454 if v_2.Op != OpAMD64FlagGT_UGT { 8455 break 8456 } 8457 v.reset(OpCopy) 8458 v.Type = y.Type 8459 v.AddArg(y) 8460 return true 8461 } 8462 // match: (CMOVWLE y _ (FlagGT_ULT)) 8463 // cond: 8464 // result: y 8465 for { 8466 _ = v.Args[2] 8467 y := v.Args[0] 8468 v_2 := v.Args[2] 8469 if v_2.Op != OpAMD64FlagGT_ULT { 8470 break 8471 } 8472 v.reset(OpCopy) 8473 v.Type = y.Type 8474 v.AddArg(y) 8475 return true 8476 } 8477 // match: (CMOVWLE _ x (FlagLT_ULT)) 8478 // cond: 8479 // result: x 8480 for { 8481 _ = v.Args[2] 8482 x := v.Args[1] 8483 v_2 := v.Args[2] 8484 if v_2.Op != OpAMD64FlagLT_ULT { 8485 break 8486 } 8487 v.reset(OpCopy) 8488 v.Type = x.Type 8489 v.AddArg(x) 8490 return true 8491 } 8492 // match: (CMOVWLE _ x (FlagLT_UGT)) 8493 // cond: 8494 // result: x 8495 for { 8496 _ = v.Args[2] 8497 x := v.Args[1] 8498 v_2 := v.Args[2] 8499 if v_2.Op != OpAMD64FlagLT_UGT { 8500 break 8501 } 8502 v.reset(OpCopy) 8503 v.Type = x.Type 8504 v.AddArg(x) 8505 return true 8506 } 8507 return false 8508 } 8509 func rewriteValueAMD64_OpAMD64CMOVWLS_0(v *Value) bool { 8510 // match: (CMOVWLS x y (InvertFlags cond)) 8511 // cond: 8512 // result: (CMOVWCC x y cond) 8513 for { 8514 _ = v.Args[2] 8515 x := v.Args[0] 8516 y := v.Args[1] 8517 v_2 := v.Args[2] 8518 if v_2.Op != OpAMD64InvertFlags { 8519 break 8520 } 8521 cond := v_2.Args[0] 8522 v.reset(OpAMD64CMOVWCC) 8523 v.AddArg(x) 8524 v.AddArg(y) 8525 v.AddArg(cond) 8526 return true 8527 } 8528 // match: (CMOVWLS _ x (FlagEQ)) 8529 // cond: 8530 // result: x 8531 for { 8532 _ = v.Args[2] 8533 x := v.Args[1] 8534 v_2 := v.Args[2] 8535 if v_2.Op != OpAMD64FlagEQ { 8536 break 8537 } 8538 v.reset(OpCopy) 8539 v.Type = x.Type 8540 v.AddArg(x) 8541 return true 8542 } 8543 // match: (CMOVWLS y _ (FlagGT_UGT)) 8544 // cond: 8545 // result: y 8546 for { 8547 _ = v.Args[2] 8548 y := v.Args[0] 8549 v_2 := v.Args[2] 8550 if v_2.Op != OpAMD64FlagGT_UGT { 8551 break 8552 } 8553 v.reset(OpCopy) 8554 v.Type = y.Type 8555 v.AddArg(y) 8556 return true 8557 } 8558 // match: (CMOVWLS _ x (FlagGT_ULT)) 8559 // cond: 8560 // result: x 8561 for { 8562 _ = v.Args[2] 8563 x := v.Args[1] 8564 v_2 := v.Args[2] 8565 if v_2.Op != OpAMD64FlagGT_ULT { 8566 break 8567 } 8568 v.reset(OpCopy) 8569 v.Type = x.Type 8570 v.AddArg(x) 8571 return true 8572 } 8573 // match: (CMOVWLS _ x (FlagLT_ULT)) 8574 // cond: 8575 // result: x 8576 for { 8577 _ = v.Args[2] 8578 x := v.Args[1] 8579 v_2 := v.Args[2] 8580 if v_2.Op != OpAMD64FlagLT_ULT { 8581 break 8582 } 8583 v.reset(OpCopy) 8584 v.Type = x.Type 8585 v.AddArg(x) 8586 return true 8587 } 8588 // match: (CMOVWLS y _ (FlagLT_UGT)) 8589 // cond: 8590 // result: y 8591 for { 8592 _ = v.Args[2] 8593 y := v.Args[0] 8594 v_2 := v.Args[2] 8595 if v_2.Op != OpAMD64FlagLT_UGT { 8596 break 8597 } 8598 v.reset(OpCopy) 8599 v.Type = y.Type 8600 v.AddArg(y) 8601 return true 8602 } 8603 return false 8604 } 8605 func rewriteValueAMD64_OpAMD64CMOVWLT_0(v *Value) bool { 8606 // match: (CMOVWLT x y (InvertFlags cond)) 8607 // cond: 8608 // result: (CMOVWGT x y cond) 8609 for { 8610 _ = v.Args[2] 8611 x := v.Args[0] 8612 y := v.Args[1] 8613 v_2 := v.Args[2] 8614 if v_2.Op != OpAMD64InvertFlags { 8615 break 8616 } 8617 cond := v_2.Args[0] 8618 v.reset(OpAMD64CMOVWGT) 8619 v.AddArg(x) 8620 v.AddArg(y) 8621 v.AddArg(cond) 8622 return true 8623 } 8624 // match: (CMOVWLT y _ (FlagEQ)) 8625 // cond: 8626 // result: y 8627 for { 8628 _ = v.Args[2] 8629 y := v.Args[0] 8630 v_2 := v.Args[2] 8631 if v_2.Op != OpAMD64FlagEQ { 8632 break 8633 } 8634 v.reset(OpCopy) 8635 v.Type = y.Type 8636 v.AddArg(y) 8637 return true 8638 } 8639 // match: (CMOVWLT y _ (FlagGT_UGT)) 8640 // cond: 8641 // result: y 8642 for { 8643 _ = v.Args[2] 8644 y := v.Args[0] 8645 v_2 := v.Args[2] 8646 if v_2.Op != OpAMD64FlagGT_UGT { 8647 break 8648 } 8649 v.reset(OpCopy) 8650 v.Type = y.Type 8651 v.AddArg(y) 8652 return true 8653 } 8654 // match: (CMOVWLT y _ (FlagGT_ULT)) 8655 // cond: 8656 // result: y 8657 for { 8658 _ = v.Args[2] 8659 y := v.Args[0] 8660 v_2 := v.Args[2] 8661 if v_2.Op != OpAMD64FlagGT_ULT { 8662 break 8663 } 8664 v.reset(OpCopy) 8665 v.Type = y.Type 8666 v.AddArg(y) 8667 return true 8668 } 8669 // match: (CMOVWLT _ x (FlagLT_ULT)) 8670 // cond: 8671 // result: x 8672 for { 8673 _ = v.Args[2] 8674 x := v.Args[1] 8675 v_2 := v.Args[2] 8676 if v_2.Op != OpAMD64FlagLT_ULT { 8677 break 8678 } 8679 v.reset(OpCopy) 8680 v.Type = x.Type 8681 v.AddArg(x) 8682 return true 8683 } 8684 // match: (CMOVWLT _ x (FlagLT_UGT)) 8685 // cond: 8686 // result: x 8687 for { 8688 _ = v.Args[2] 8689 x := v.Args[1] 8690 v_2 := v.Args[2] 8691 if v_2.Op != OpAMD64FlagLT_UGT { 8692 break 8693 } 8694 v.reset(OpCopy) 8695 v.Type = x.Type 8696 v.AddArg(x) 8697 return true 8698 } 8699 return false 8700 } 8701 func rewriteValueAMD64_OpAMD64CMOVWNE_0(v *Value) bool { 8702 // match: (CMOVWNE x y (InvertFlags cond)) 8703 // cond: 8704 // result: (CMOVWNE x y cond) 8705 for { 8706 _ = v.Args[2] 8707 x := v.Args[0] 8708 y := v.Args[1] 8709 v_2 := v.Args[2] 8710 if v_2.Op != OpAMD64InvertFlags { 8711 break 8712 } 8713 cond := v_2.Args[0] 8714 v.reset(OpAMD64CMOVWNE) 8715 v.AddArg(x) 8716 v.AddArg(y) 8717 v.AddArg(cond) 8718 return true 8719 } 8720 // match: (CMOVWNE y _ (FlagEQ)) 8721 // cond: 8722 // result: y 8723 for { 8724 _ = v.Args[2] 8725 y := v.Args[0] 8726 v_2 := v.Args[2] 8727 if v_2.Op != OpAMD64FlagEQ { 8728 break 8729 } 8730 v.reset(OpCopy) 8731 v.Type = y.Type 8732 v.AddArg(y) 8733 return true 8734 } 8735 // match: (CMOVWNE _ x (FlagGT_UGT)) 8736 // cond: 8737 // result: x 8738 for { 8739 _ = v.Args[2] 8740 x := v.Args[1] 8741 v_2 := v.Args[2] 8742 if v_2.Op != OpAMD64FlagGT_UGT { 8743 break 8744 } 8745 v.reset(OpCopy) 8746 v.Type = x.Type 8747 v.AddArg(x) 8748 return true 8749 } 8750 // match: (CMOVWNE _ x (FlagGT_ULT)) 8751 // cond: 8752 // result: x 8753 for { 8754 _ = v.Args[2] 8755 x := v.Args[1] 8756 v_2 := v.Args[2] 8757 if v_2.Op != OpAMD64FlagGT_ULT { 8758 break 8759 } 8760 v.reset(OpCopy) 8761 v.Type = x.Type 8762 v.AddArg(x) 8763 return true 8764 } 8765 // match: (CMOVWNE _ x (FlagLT_ULT)) 8766 // cond: 8767 // result: x 8768 for { 8769 _ = v.Args[2] 8770 x := v.Args[1] 8771 v_2 := v.Args[2] 8772 if v_2.Op != OpAMD64FlagLT_ULT { 8773 break 8774 } 8775 v.reset(OpCopy) 8776 v.Type = x.Type 8777 v.AddArg(x) 8778 return true 8779 } 8780 // match: (CMOVWNE _ x (FlagLT_UGT)) 8781 // cond: 8782 // result: x 8783 for { 8784 _ = v.Args[2] 8785 x := v.Args[1] 8786 v_2 := v.Args[2] 8787 if v_2.Op != OpAMD64FlagLT_UGT { 8788 break 8789 } 8790 v.reset(OpCopy) 8791 v.Type = x.Type 8792 v.AddArg(x) 8793 return true 8794 } 8795 return false 8796 } 8797 func rewriteValueAMD64_OpAMD64CMPB_0(v *Value) bool { 8798 b := v.Block 8799 _ = b 8800 // match: (CMPB x (MOVLconst [c])) 8801 // cond: 8802 // result: (CMPBconst x [int64(int8(c))]) 8803 for { 8804 _ = v.Args[1] 8805 x := v.Args[0] 8806 v_1 := v.Args[1] 8807 if v_1.Op != OpAMD64MOVLconst { 8808 break 8809 } 8810 c := v_1.AuxInt 8811 v.reset(OpAMD64CMPBconst) 8812 v.AuxInt = int64(int8(c)) 8813 v.AddArg(x) 8814 return true 8815 } 8816 // match: (CMPB (MOVLconst [c]) x) 8817 // cond: 8818 // result: (InvertFlags (CMPBconst x [int64(int8(c))])) 8819 for { 8820 _ = v.Args[1] 8821 v_0 := v.Args[0] 8822 if v_0.Op != OpAMD64MOVLconst { 8823 break 8824 } 8825 c := v_0.AuxInt 8826 x := v.Args[1] 8827 v.reset(OpAMD64InvertFlags) 8828 v0 := b.NewValue0(v.Pos, OpAMD64CMPBconst, types.TypeFlags) 8829 v0.AuxInt = int64(int8(c)) 8830 v0.AddArg(x) 8831 v.AddArg(v0) 8832 return true 8833 } 8834 // match: (CMPB l:(MOVBload {sym} [off] ptr mem) x) 8835 // cond: canMergeLoad(v, l) && clobber(l) 8836 // result: (CMPBload {sym} [off] ptr x mem) 8837 for { 8838 _ = v.Args[1] 8839 l := v.Args[0] 8840 if l.Op != OpAMD64MOVBload { 8841 break 8842 } 8843 off := l.AuxInt 8844 sym := l.Aux 8845 _ = l.Args[1] 8846 ptr := l.Args[0] 8847 mem := l.Args[1] 8848 x := v.Args[1] 8849 if !(canMergeLoad(v, l) && clobber(l)) { 8850 break 8851 } 8852 v.reset(OpAMD64CMPBload) 8853 v.AuxInt = off 8854 v.Aux = sym 8855 v.AddArg(ptr) 8856 v.AddArg(x) 8857 v.AddArg(mem) 8858 return true 8859 } 8860 // match: (CMPB x l:(MOVBload {sym} [off] ptr mem)) 8861 // cond: canMergeLoad(v, l) && clobber(l) 8862 // result: (InvertFlags (CMPBload {sym} [off] ptr x mem)) 8863 for { 8864 _ = v.Args[1] 8865 x := v.Args[0] 8866 l := v.Args[1] 8867 if l.Op != OpAMD64MOVBload { 8868 break 8869 } 8870 off := l.AuxInt 8871 sym := l.Aux 8872 _ = l.Args[1] 8873 ptr := l.Args[0] 8874 mem := l.Args[1] 8875 if !(canMergeLoad(v, l) && clobber(l)) { 8876 break 8877 } 8878 v.reset(OpAMD64InvertFlags) 8879 v0 := b.NewValue0(l.Pos, OpAMD64CMPBload, types.TypeFlags) 8880 v0.AuxInt = off 8881 v0.Aux = sym 8882 v0.AddArg(ptr) 8883 v0.AddArg(x) 8884 v0.AddArg(mem) 8885 v.AddArg(v0) 8886 return true 8887 } 8888 return false 8889 } 8890 func rewriteValueAMD64_OpAMD64CMPBconst_0(v *Value) bool { 8891 b := v.Block 8892 _ = b 8893 // match: (CMPBconst (MOVLconst [x]) [y]) 8894 // cond: int8(x)==int8(y) 8895 // result: (FlagEQ) 8896 for { 8897 y := v.AuxInt 8898 v_0 := v.Args[0] 8899 if v_0.Op != OpAMD64MOVLconst { 8900 break 8901 } 8902 x := v_0.AuxInt 8903 if !(int8(x) == int8(y)) { 8904 break 8905 } 8906 v.reset(OpAMD64FlagEQ) 8907 return true 8908 } 8909 // match: (CMPBconst (MOVLconst [x]) [y]) 8910 // cond: int8(x)<int8(y) && uint8(x)<uint8(y) 8911 // result: (FlagLT_ULT) 8912 for { 8913 y := v.AuxInt 8914 v_0 := v.Args[0] 8915 if v_0.Op != OpAMD64MOVLconst { 8916 break 8917 } 8918 x := v_0.AuxInt 8919 if !(int8(x) < int8(y) && uint8(x) < uint8(y)) { 8920 break 8921 } 8922 v.reset(OpAMD64FlagLT_ULT) 8923 return true 8924 } 8925 // match: (CMPBconst (MOVLconst [x]) [y]) 8926 // cond: int8(x)<int8(y) && uint8(x)>uint8(y) 8927 // result: (FlagLT_UGT) 8928 for { 8929 y := v.AuxInt 8930 v_0 := v.Args[0] 8931 if v_0.Op != OpAMD64MOVLconst { 8932 break 8933 } 8934 x := v_0.AuxInt 8935 if !(int8(x) < int8(y) && uint8(x) > uint8(y)) { 8936 break 8937 } 8938 v.reset(OpAMD64FlagLT_UGT) 8939 return true 8940 } 8941 // match: (CMPBconst (MOVLconst [x]) [y]) 8942 // cond: int8(x)>int8(y) && uint8(x)<uint8(y) 8943 // result: (FlagGT_ULT) 8944 for { 8945 y := v.AuxInt 8946 v_0 := v.Args[0] 8947 if v_0.Op != OpAMD64MOVLconst { 8948 break 8949 } 8950 x := v_0.AuxInt 8951 if !(int8(x) > int8(y) && uint8(x) < uint8(y)) { 8952 break 8953 } 8954 v.reset(OpAMD64FlagGT_ULT) 8955 return true 8956 } 8957 // match: (CMPBconst (MOVLconst [x]) [y]) 8958 // cond: int8(x)>int8(y) && uint8(x)>uint8(y) 8959 // result: (FlagGT_UGT) 8960 for { 8961 y := v.AuxInt 8962 v_0 := v.Args[0] 8963 if v_0.Op != OpAMD64MOVLconst { 8964 break 8965 } 8966 x := v_0.AuxInt 8967 if !(int8(x) > int8(y) && uint8(x) > uint8(y)) { 8968 break 8969 } 8970 v.reset(OpAMD64FlagGT_UGT) 8971 return true 8972 } 8973 // match: (CMPBconst (ANDLconst _ [m]) [n]) 8974 // cond: 0 <= int8(m) && int8(m) < int8(n) 8975 // result: (FlagLT_ULT) 8976 for { 8977 n := v.AuxInt 8978 v_0 := v.Args[0] 8979 if v_0.Op != OpAMD64ANDLconst { 8980 break 8981 } 8982 m := v_0.AuxInt 8983 if !(0 <= int8(m) && int8(m) < int8(n)) { 8984 break 8985 } 8986 v.reset(OpAMD64FlagLT_ULT) 8987 return true 8988 } 8989 // match: (CMPBconst (ANDL x y) [0]) 8990 // cond: 8991 // result: (TESTB x y) 8992 for { 8993 if v.AuxInt != 0 { 8994 break 8995 } 8996 v_0 := v.Args[0] 8997 if v_0.Op != OpAMD64ANDL { 8998 break 8999 } 9000 _ = v_0.Args[1] 9001 x := v_0.Args[0] 9002 y := v_0.Args[1] 9003 v.reset(OpAMD64TESTB) 9004 v.AddArg(x) 9005 v.AddArg(y) 9006 return true 9007 } 9008 // match: (CMPBconst (ANDLconst [c] x) [0]) 9009 // cond: 9010 // result: (TESTBconst [int64(int8(c))] x) 9011 for { 9012 if v.AuxInt != 0 { 9013 break 9014 } 9015 v_0 := v.Args[0] 9016 if v_0.Op != OpAMD64ANDLconst { 9017 break 9018 } 9019 c := v_0.AuxInt 9020 x := v_0.Args[0] 9021 v.reset(OpAMD64TESTBconst) 9022 v.AuxInt = int64(int8(c)) 9023 v.AddArg(x) 9024 return true 9025 } 9026 // match: (CMPBconst x [0]) 9027 // cond: 9028 // result: (TESTB x x) 9029 for { 9030 if v.AuxInt != 0 { 9031 break 9032 } 9033 x := v.Args[0] 9034 v.reset(OpAMD64TESTB) 9035 v.AddArg(x) 9036 v.AddArg(x) 9037 return true 9038 } 9039 // match: (CMPBconst l:(MOVBload {sym} [off] ptr mem) [c]) 9040 // cond: l.Uses == 1 && validValAndOff(c, off) && clobber(l) 9041 // result: @l.Block (CMPBconstload {sym} [makeValAndOff(c,off)] ptr mem) 9042 for { 9043 c := v.AuxInt 9044 l := v.Args[0] 9045 if l.Op != OpAMD64MOVBload { 9046 break 9047 } 9048 off := l.AuxInt 9049 sym := l.Aux 9050 _ = l.Args[1] 9051 ptr := l.Args[0] 9052 mem := l.Args[1] 9053 if !(l.Uses == 1 && validValAndOff(c, off) && clobber(l)) { 9054 break 9055 } 9056 b = l.Block 9057 v0 := b.NewValue0(l.Pos, OpAMD64CMPBconstload, types.TypeFlags) 9058 v.reset(OpCopy) 9059 v.AddArg(v0) 9060 v0.AuxInt = makeValAndOff(c, off) 9061 v0.Aux = sym 9062 v0.AddArg(ptr) 9063 v0.AddArg(mem) 9064 return true 9065 } 9066 return false 9067 } 9068 func rewriteValueAMD64_OpAMD64CMPBconstload_0(v *Value) bool { 9069 // match: (CMPBconstload [valoff1] {sym} (ADDQconst [off2] base) mem) 9070 // cond: ValAndOff(valoff1).canAdd(off2) 9071 // result: (CMPBconstload [ValAndOff(valoff1).add(off2)] {sym} base mem) 9072 for { 9073 valoff1 := v.AuxInt 9074 sym := v.Aux 9075 _ = v.Args[1] 9076 v_0 := v.Args[0] 9077 if v_0.Op != OpAMD64ADDQconst { 9078 break 9079 } 9080 off2 := v_0.AuxInt 9081 base := v_0.Args[0] 9082 mem := v.Args[1] 9083 if !(ValAndOff(valoff1).canAdd(off2)) { 9084 break 9085 } 9086 v.reset(OpAMD64CMPBconstload) 9087 v.AuxInt = ValAndOff(valoff1).add(off2) 9088 v.Aux = sym 9089 v.AddArg(base) 9090 v.AddArg(mem) 9091 return true 9092 } 9093 // match: (CMPBconstload [valoff1] {sym1} (LEAQ [off2] {sym2} base) mem) 9094 // cond: ValAndOff(valoff1).canAdd(off2) && canMergeSym(sym1, sym2) 9095 // result: (CMPBconstload [ValAndOff(valoff1).add(off2)] {mergeSym(sym1,sym2)} base mem) 9096 for { 9097 valoff1 := v.AuxInt 9098 sym1 := v.Aux 9099 _ = v.Args[1] 9100 v_0 := v.Args[0] 9101 if v_0.Op != OpAMD64LEAQ { 9102 break 9103 } 9104 off2 := v_0.AuxInt 9105 sym2 := v_0.Aux 9106 base := v_0.Args[0] 9107 mem := v.Args[1] 9108 if !(ValAndOff(valoff1).canAdd(off2) && canMergeSym(sym1, sym2)) { 9109 break 9110 } 9111 v.reset(OpAMD64CMPBconstload) 9112 v.AuxInt = ValAndOff(valoff1).add(off2) 9113 v.Aux = mergeSym(sym1, sym2) 9114 v.AddArg(base) 9115 v.AddArg(mem) 9116 return true 9117 } 9118 return false 9119 } 9120 func rewriteValueAMD64_OpAMD64CMPBload_0(v *Value) bool { 9121 // match: (CMPBload [off1] {sym} (ADDQconst [off2] base) val mem) 9122 // cond: is32Bit(off1+off2) 9123 // result: (CMPBload [off1+off2] {sym} base val mem) 9124 for { 9125 off1 := v.AuxInt 9126 sym := v.Aux 9127 _ = v.Args[2] 9128 v_0 := v.Args[0] 9129 if v_0.Op != OpAMD64ADDQconst { 9130 break 9131 } 9132 off2 := v_0.AuxInt 9133 base := v_0.Args[0] 9134 val := v.Args[1] 9135 mem := v.Args[2] 9136 if !(is32Bit(off1 + off2)) { 9137 break 9138 } 9139 v.reset(OpAMD64CMPBload) 9140 v.AuxInt = off1 + off2 9141 v.Aux = sym 9142 v.AddArg(base) 9143 v.AddArg(val) 9144 v.AddArg(mem) 9145 return true 9146 } 9147 // match: (CMPBload [off1] {sym1} (LEAQ [off2] {sym2} base) val mem) 9148 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 9149 // result: (CMPBload [off1+off2] {mergeSym(sym1,sym2)} base val mem) 9150 for { 9151 off1 := v.AuxInt 9152 sym1 := v.Aux 9153 _ = v.Args[2] 9154 v_0 := v.Args[0] 9155 if v_0.Op != OpAMD64LEAQ { 9156 break 9157 } 9158 off2 := v_0.AuxInt 9159 sym2 := v_0.Aux 9160 base := v_0.Args[0] 9161 val := v.Args[1] 9162 mem := v.Args[2] 9163 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 9164 break 9165 } 9166 v.reset(OpAMD64CMPBload) 9167 v.AuxInt = off1 + off2 9168 v.Aux = mergeSym(sym1, sym2) 9169 v.AddArg(base) 9170 v.AddArg(val) 9171 v.AddArg(mem) 9172 return true 9173 } 9174 // match: (CMPBload {sym} [off] ptr (MOVLconst [c]) mem) 9175 // cond: validValAndOff(int64(int8(c)),off) 9176 // result: (CMPBconstload {sym} [makeValAndOff(int64(int8(c)),off)] ptr mem) 9177 for { 9178 off := v.AuxInt 9179 sym := v.Aux 9180 _ = v.Args[2] 9181 ptr := v.Args[0] 9182 v_1 := v.Args[1] 9183 if v_1.Op != OpAMD64MOVLconst { 9184 break 9185 } 9186 c := v_1.AuxInt 9187 mem := v.Args[2] 9188 if !(validValAndOff(int64(int8(c)), off)) { 9189 break 9190 } 9191 v.reset(OpAMD64CMPBconstload) 9192 v.AuxInt = makeValAndOff(int64(int8(c)), off) 9193 v.Aux = sym 9194 v.AddArg(ptr) 9195 v.AddArg(mem) 9196 return true 9197 } 9198 return false 9199 } 9200 func rewriteValueAMD64_OpAMD64CMPL_0(v *Value) bool { 9201 b := v.Block 9202 _ = b 9203 // match: (CMPL x (MOVLconst [c])) 9204 // cond: 9205 // result: (CMPLconst x [c]) 9206 for { 9207 _ = v.Args[1] 9208 x := v.Args[0] 9209 v_1 := v.Args[1] 9210 if v_1.Op != OpAMD64MOVLconst { 9211 break 9212 } 9213 c := v_1.AuxInt 9214 v.reset(OpAMD64CMPLconst) 9215 v.AuxInt = c 9216 v.AddArg(x) 9217 return true 9218 } 9219 // match: (CMPL (MOVLconst [c]) x) 9220 // cond: 9221 // result: (InvertFlags (CMPLconst x [c])) 9222 for { 9223 _ = v.Args[1] 9224 v_0 := v.Args[0] 9225 if v_0.Op != OpAMD64MOVLconst { 9226 break 9227 } 9228 c := v_0.AuxInt 9229 x := v.Args[1] 9230 v.reset(OpAMD64InvertFlags) 9231 v0 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags) 9232 v0.AuxInt = c 9233 v0.AddArg(x) 9234 v.AddArg(v0) 9235 return true 9236 } 9237 // match: (CMPL l:(MOVLload {sym} [off] ptr mem) x) 9238 // cond: canMergeLoad(v, l) && clobber(l) 9239 // result: (CMPLload {sym} [off] ptr x mem) 9240 for { 9241 _ = v.Args[1] 9242 l := v.Args[0] 9243 if l.Op != OpAMD64MOVLload { 9244 break 9245 } 9246 off := l.AuxInt 9247 sym := l.Aux 9248 _ = l.Args[1] 9249 ptr := l.Args[0] 9250 mem := l.Args[1] 9251 x := v.Args[1] 9252 if !(canMergeLoad(v, l) && clobber(l)) { 9253 break 9254 } 9255 v.reset(OpAMD64CMPLload) 9256 v.AuxInt = off 9257 v.Aux = sym 9258 v.AddArg(ptr) 9259 v.AddArg(x) 9260 v.AddArg(mem) 9261 return true 9262 } 9263 // match: (CMPL x l:(MOVLload {sym} [off] ptr mem)) 9264 // cond: canMergeLoad(v, l) && clobber(l) 9265 // result: (InvertFlags (CMPLload {sym} [off] ptr x mem)) 9266 for { 9267 _ = v.Args[1] 9268 x := v.Args[0] 9269 l := v.Args[1] 9270 if l.Op != OpAMD64MOVLload { 9271 break 9272 } 9273 off := l.AuxInt 9274 sym := l.Aux 9275 _ = l.Args[1] 9276 ptr := l.Args[0] 9277 mem := l.Args[1] 9278 if !(canMergeLoad(v, l) && clobber(l)) { 9279 break 9280 } 9281 v.reset(OpAMD64InvertFlags) 9282 v0 := b.NewValue0(l.Pos, OpAMD64CMPLload, types.TypeFlags) 9283 v0.AuxInt = off 9284 v0.Aux = sym 9285 v0.AddArg(ptr) 9286 v0.AddArg(x) 9287 v0.AddArg(mem) 9288 v.AddArg(v0) 9289 return true 9290 } 9291 return false 9292 } 9293 func rewriteValueAMD64_OpAMD64CMPLconst_0(v *Value) bool { 9294 // match: (CMPLconst (MOVLconst [x]) [y]) 9295 // cond: int32(x)==int32(y) 9296 // result: (FlagEQ) 9297 for { 9298 y := v.AuxInt 9299 v_0 := v.Args[0] 9300 if v_0.Op != OpAMD64MOVLconst { 9301 break 9302 } 9303 x := v_0.AuxInt 9304 if !(int32(x) == int32(y)) { 9305 break 9306 } 9307 v.reset(OpAMD64FlagEQ) 9308 return true 9309 } 9310 // match: (CMPLconst (MOVLconst [x]) [y]) 9311 // cond: int32(x)<int32(y) && uint32(x)<uint32(y) 9312 // result: (FlagLT_ULT) 9313 for { 9314 y := v.AuxInt 9315 v_0 := v.Args[0] 9316 if v_0.Op != OpAMD64MOVLconst { 9317 break 9318 } 9319 x := v_0.AuxInt 9320 if !(int32(x) < int32(y) && uint32(x) < uint32(y)) { 9321 break 9322 } 9323 v.reset(OpAMD64FlagLT_ULT) 9324 return true 9325 } 9326 // match: (CMPLconst (MOVLconst [x]) [y]) 9327 // cond: int32(x)<int32(y) && uint32(x)>uint32(y) 9328 // result: (FlagLT_UGT) 9329 for { 9330 y := v.AuxInt 9331 v_0 := v.Args[0] 9332 if v_0.Op != OpAMD64MOVLconst { 9333 break 9334 } 9335 x := v_0.AuxInt 9336 if !(int32(x) < int32(y) && uint32(x) > uint32(y)) { 9337 break 9338 } 9339 v.reset(OpAMD64FlagLT_UGT) 9340 return true 9341 } 9342 // match: (CMPLconst (MOVLconst [x]) [y]) 9343 // cond: int32(x)>int32(y) && uint32(x)<uint32(y) 9344 // result: (FlagGT_ULT) 9345 for { 9346 y := v.AuxInt 9347 v_0 := v.Args[0] 9348 if v_0.Op != OpAMD64MOVLconst { 9349 break 9350 } 9351 x := v_0.AuxInt 9352 if !(int32(x) > int32(y) && uint32(x) < uint32(y)) { 9353 break 9354 } 9355 v.reset(OpAMD64FlagGT_ULT) 9356 return true 9357 } 9358 // match: (CMPLconst (MOVLconst [x]) [y]) 9359 // cond: int32(x)>int32(y) && uint32(x)>uint32(y) 9360 // result: (FlagGT_UGT) 9361 for { 9362 y := v.AuxInt 9363 v_0 := v.Args[0] 9364 if v_0.Op != OpAMD64MOVLconst { 9365 break 9366 } 9367 x := v_0.AuxInt 9368 if !(int32(x) > int32(y) && uint32(x) > uint32(y)) { 9369 break 9370 } 9371 v.reset(OpAMD64FlagGT_UGT) 9372 return true 9373 } 9374 // match: (CMPLconst (SHRLconst _ [c]) [n]) 9375 // cond: 0 <= n && 0 < c && c <= 32 && (1<<uint64(32-c)) <= uint64(n) 9376 // result: (FlagLT_ULT) 9377 for { 9378 n := v.AuxInt 9379 v_0 := v.Args[0] 9380 if v_0.Op != OpAMD64SHRLconst { 9381 break 9382 } 9383 c := v_0.AuxInt 9384 if !(0 <= n && 0 < c && c <= 32 && (1<<uint64(32-c)) <= uint64(n)) { 9385 break 9386 } 9387 v.reset(OpAMD64FlagLT_ULT) 9388 return true 9389 } 9390 // match: (CMPLconst (ANDLconst _ [m]) [n]) 9391 // cond: 0 <= int32(m) && int32(m) < int32(n) 9392 // result: (FlagLT_ULT) 9393 for { 9394 n := v.AuxInt 9395 v_0 := v.Args[0] 9396 if v_0.Op != OpAMD64ANDLconst { 9397 break 9398 } 9399 m := v_0.AuxInt 9400 if !(0 <= int32(m) && int32(m) < int32(n)) { 9401 break 9402 } 9403 v.reset(OpAMD64FlagLT_ULT) 9404 return true 9405 } 9406 // match: (CMPLconst (ANDL x y) [0]) 9407 // cond: 9408 // result: (TESTL x y) 9409 for { 9410 if v.AuxInt != 0 { 9411 break 9412 } 9413 v_0 := v.Args[0] 9414 if v_0.Op != OpAMD64ANDL { 9415 break 9416 } 9417 _ = v_0.Args[1] 9418 x := v_0.Args[0] 9419 y := v_0.Args[1] 9420 v.reset(OpAMD64TESTL) 9421 v.AddArg(x) 9422 v.AddArg(y) 9423 return true 9424 } 9425 // match: (CMPLconst (ANDLconst [c] x) [0]) 9426 // cond: 9427 // result: (TESTLconst [c] x) 9428 for { 9429 if v.AuxInt != 0 { 9430 break 9431 } 9432 v_0 := v.Args[0] 9433 if v_0.Op != OpAMD64ANDLconst { 9434 break 9435 } 9436 c := v_0.AuxInt 9437 x := v_0.Args[0] 9438 v.reset(OpAMD64TESTLconst) 9439 v.AuxInt = c 9440 v.AddArg(x) 9441 return true 9442 } 9443 // match: (CMPLconst x [0]) 9444 // cond: 9445 // result: (TESTL x x) 9446 for { 9447 if v.AuxInt != 0 { 9448 break 9449 } 9450 x := v.Args[0] 9451 v.reset(OpAMD64TESTL) 9452 v.AddArg(x) 9453 v.AddArg(x) 9454 return true 9455 } 9456 return false 9457 } 9458 func rewriteValueAMD64_OpAMD64CMPLconst_10(v *Value) bool { 9459 b := v.Block 9460 _ = b 9461 // match: (CMPLconst l:(MOVLload {sym} [off] ptr mem) [c]) 9462 // cond: l.Uses == 1 && validValAndOff(c, off) && clobber(l) 9463 // result: @l.Block (CMPLconstload {sym} [makeValAndOff(c,off)] ptr mem) 9464 for { 9465 c := v.AuxInt 9466 l := v.Args[0] 9467 if l.Op != OpAMD64MOVLload { 9468 break 9469 } 9470 off := l.AuxInt 9471 sym := l.Aux 9472 _ = l.Args[1] 9473 ptr := l.Args[0] 9474 mem := l.Args[1] 9475 if !(l.Uses == 1 && validValAndOff(c, off) && clobber(l)) { 9476 break 9477 } 9478 b = l.Block 9479 v0 := b.NewValue0(l.Pos, OpAMD64CMPLconstload, types.TypeFlags) 9480 v.reset(OpCopy) 9481 v.AddArg(v0) 9482 v0.AuxInt = makeValAndOff(c, off) 9483 v0.Aux = sym 9484 v0.AddArg(ptr) 9485 v0.AddArg(mem) 9486 return true 9487 } 9488 return false 9489 } 9490 func rewriteValueAMD64_OpAMD64CMPLconstload_0(v *Value) bool { 9491 // match: (CMPLconstload [valoff1] {sym} (ADDQconst [off2] base) mem) 9492 // cond: ValAndOff(valoff1).canAdd(off2) 9493 // result: (CMPLconstload [ValAndOff(valoff1).add(off2)] {sym} base mem) 9494 for { 9495 valoff1 := v.AuxInt 9496 sym := v.Aux 9497 _ = v.Args[1] 9498 v_0 := v.Args[0] 9499 if v_0.Op != OpAMD64ADDQconst { 9500 break 9501 } 9502 off2 := v_0.AuxInt 9503 base := v_0.Args[0] 9504 mem := v.Args[1] 9505 if !(ValAndOff(valoff1).canAdd(off2)) { 9506 break 9507 } 9508 v.reset(OpAMD64CMPLconstload) 9509 v.AuxInt = ValAndOff(valoff1).add(off2) 9510 v.Aux = sym 9511 v.AddArg(base) 9512 v.AddArg(mem) 9513 return true 9514 } 9515 // match: (CMPLconstload [valoff1] {sym1} (LEAQ [off2] {sym2} base) mem) 9516 // cond: ValAndOff(valoff1).canAdd(off2) && canMergeSym(sym1, sym2) 9517 // result: (CMPLconstload [ValAndOff(valoff1).add(off2)] {mergeSym(sym1,sym2)} base mem) 9518 for { 9519 valoff1 := v.AuxInt 9520 sym1 := v.Aux 9521 _ = v.Args[1] 9522 v_0 := v.Args[0] 9523 if v_0.Op != OpAMD64LEAQ { 9524 break 9525 } 9526 off2 := v_0.AuxInt 9527 sym2 := v_0.Aux 9528 base := v_0.Args[0] 9529 mem := v.Args[1] 9530 if !(ValAndOff(valoff1).canAdd(off2) && canMergeSym(sym1, sym2)) { 9531 break 9532 } 9533 v.reset(OpAMD64CMPLconstload) 9534 v.AuxInt = ValAndOff(valoff1).add(off2) 9535 v.Aux = mergeSym(sym1, sym2) 9536 v.AddArg(base) 9537 v.AddArg(mem) 9538 return true 9539 } 9540 return false 9541 } 9542 func rewriteValueAMD64_OpAMD64CMPLload_0(v *Value) bool { 9543 // match: (CMPLload [off1] {sym} (ADDQconst [off2] base) val mem) 9544 // cond: is32Bit(off1+off2) 9545 // result: (CMPLload [off1+off2] {sym} base val mem) 9546 for { 9547 off1 := v.AuxInt 9548 sym := v.Aux 9549 _ = v.Args[2] 9550 v_0 := v.Args[0] 9551 if v_0.Op != OpAMD64ADDQconst { 9552 break 9553 } 9554 off2 := v_0.AuxInt 9555 base := v_0.Args[0] 9556 val := v.Args[1] 9557 mem := v.Args[2] 9558 if !(is32Bit(off1 + off2)) { 9559 break 9560 } 9561 v.reset(OpAMD64CMPLload) 9562 v.AuxInt = off1 + off2 9563 v.Aux = sym 9564 v.AddArg(base) 9565 v.AddArg(val) 9566 v.AddArg(mem) 9567 return true 9568 } 9569 // match: (CMPLload [off1] {sym1} (LEAQ [off2] {sym2} base) val mem) 9570 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 9571 // result: (CMPLload [off1+off2] {mergeSym(sym1,sym2)} base val mem) 9572 for { 9573 off1 := v.AuxInt 9574 sym1 := v.Aux 9575 _ = v.Args[2] 9576 v_0 := v.Args[0] 9577 if v_0.Op != OpAMD64LEAQ { 9578 break 9579 } 9580 off2 := v_0.AuxInt 9581 sym2 := v_0.Aux 9582 base := v_0.Args[0] 9583 val := v.Args[1] 9584 mem := v.Args[2] 9585 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 9586 break 9587 } 9588 v.reset(OpAMD64CMPLload) 9589 v.AuxInt = off1 + off2 9590 v.Aux = mergeSym(sym1, sym2) 9591 v.AddArg(base) 9592 v.AddArg(val) 9593 v.AddArg(mem) 9594 return true 9595 } 9596 // match: (CMPLload {sym} [off] ptr (MOVLconst [c]) mem) 9597 // cond: validValAndOff(c,off) 9598 // result: (CMPLconstload {sym} [makeValAndOff(c,off)] ptr mem) 9599 for { 9600 off := v.AuxInt 9601 sym := v.Aux 9602 _ = v.Args[2] 9603 ptr := v.Args[0] 9604 v_1 := v.Args[1] 9605 if v_1.Op != OpAMD64MOVLconst { 9606 break 9607 } 9608 c := v_1.AuxInt 9609 mem := v.Args[2] 9610 if !(validValAndOff(c, off)) { 9611 break 9612 } 9613 v.reset(OpAMD64CMPLconstload) 9614 v.AuxInt = makeValAndOff(c, off) 9615 v.Aux = sym 9616 v.AddArg(ptr) 9617 v.AddArg(mem) 9618 return true 9619 } 9620 return false 9621 } 9622 func rewriteValueAMD64_OpAMD64CMPQ_0(v *Value) bool { 9623 b := v.Block 9624 _ = b 9625 // match: (CMPQ x (MOVQconst [c])) 9626 // cond: is32Bit(c) 9627 // result: (CMPQconst x [c]) 9628 for { 9629 _ = v.Args[1] 9630 x := v.Args[0] 9631 v_1 := v.Args[1] 9632 if v_1.Op != OpAMD64MOVQconst { 9633 break 9634 } 9635 c := v_1.AuxInt 9636 if !(is32Bit(c)) { 9637 break 9638 } 9639 v.reset(OpAMD64CMPQconst) 9640 v.AuxInt = c 9641 v.AddArg(x) 9642 return true 9643 } 9644 // match: (CMPQ (MOVQconst [c]) x) 9645 // cond: is32Bit(c) 9646 // result: (InvertFlags (CMPQconst x [c])) 9647 for { 9648 _ = v.Args[1] 9649 v_0 := v.Args[0] 9650 if v_0.Op != OpAMD64MOVQconst { 9651 break 9652 } 9653 c := v_0.AuxInt 9654 x := v.Args[1] 9655 if !(is32Bit(c)) { 9656 break 9657 } 9658 v.reset(OpAMD64InvertFlags) 9659 v0 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags) 9660 v0.AuxInt = c 9661 v0.AddArg(x) 9662 v.AddArg(v0) 9663 return true 9664 } 9665 // match: (CMPQ l:(MOVQload {sym} [off] ptr mem) x) 9666 // cond: canMergeLoad(v, l) && clobber(l) 9667 // result: (CMPQload {sym} [off] ptr x mem) 9668 for { 9669 _ = v.Args[1] 9670 l := v.Args[0] 9671 if l.Op != OpAMD64MOVQload { 9672 break 9673 } 9674 off := l.AuxInt 9675 sym := l.Aux 9676 _ = l.Args[1] 9677 ptr := l.Args[0] 9678 mem := l.Args[1] 9679 x := v.Args[1] 9680 if !(canMergeLoad(v, l) && clobber(l)) { 9681 break 9682 } 9683 v.reset(OpAMD64CMPQload) 9684 v.AuxInt = off 9685 v.Aux = sym 9686 v.AddArg(ptr) 9687 v.AddArg(x) 9688 v.AddArg(mem) 9689 return true 9690 } 9691 // match: (CMPQ x l:(MOVQload {sym} [off] ptr mem)) 9692 // cond: canMergeLoad(v, l) && clobber(l) 9693 // result: (InvertFlags (CMPQload {sym} [off] ptr x mem)) 9694 for { 9695 _ = v.Args[1] 9696 x := v.Args[0] 9697 l := v.Args[1] 9698 if l.Op != OpAMD64MOVQload { 9699 break 9700 } 9701 off := l.AuxInt 9702 sym := l.Aux 9703 _ = l.Args[1] 9704 ptr := l.Args[0] 9705 mem := l.Args[1] 9706 if !(canMergeLoad(v, l) && clobber(l)) { 9707 break 9708 } 9709 v.reset(OpAMD64InvertFlags) 9710 v0 := b.NewValue0(l.Pos, OpAMD64CMPQload, types.TypeFlags) 9711 v0.AuxInt = off 9712 v0.Aux = sym 9713 v0.AddArg(ptr) 9714 v0.AddArg(x) 9715 v0.AddArg(mem) 9716 v.AddArg(v0) 9717 return true 9718 } 9719 return false 9720 } 9721 func rewriteValueAMD64_OpAMD64CMPQconst_0(v *Value) bool { 9722 // match: (CMPQconst (NEGQ (ADDQconst [-16] (ANDQconst [15] _))) [32]) 9723 // cond: 9724 // result: (FlagLT_ULT) 9725 for { 9726 if v.AuxInt != 32 { 9727 break 9728 } 9729 v_0 := v.Args[0] 9730 if v_0.Op != OpAMD64NEGQ { 9731 break 9732 } 9733 v_0_0 := v_0.Args[0] 9734 if v_0_0.Op != OpAMD64ADDQconst { 9735 break 9736 } 9737 if v_0_0.AuxInt != -16 { 9738 break 9739 } 9740 v_0_0_0 := v_0_0.Args[0] 9741 if v_0_0_0.Op != OpAMD64ANDQconst { 9742 break 9743 } 9744 if v_0_0_0.AuxInt != 15 { 9745 break 9746 } 9747 v.reset(OpAMD64FlagLT_ULT) 9748 return true 9749 } 9750 // match: (CMPQconst (NEGQ (ADDQconst [ -8] (ANDQconst [7] _))) [32]) 9751 // cond: 9752 // result: (FlagLT_ULT) 9753 for { 9754 if v.AuxInt != 32 { 9755 break 9756 } 9757 v_0 := v.Args[0] 9758 if v_0.Op != OpAMD64NEGQ { 9759 break 9760 } 9761 v_0_0 := v_0.Args[0] 9762 if v_0_0.Op != OpAMD64ADDQconst { 9763 break 9764 } 9765 if v_0_0.AuxInt != -8 { 9766 break 9767 } 9768 v_0_0_0 := v_0_0.Args[0] 9769 if v_0_0_0.Op != OpAMD64ANDQconst { 9770 break 9771 } 9772 if v_0_0_0.AuxInt != 7 { 9773 break 9774 } 9775 v.reset(OpAMD64FlagLT_ULT) 9776 return true 9777 } 9778 // match: (CMPQconst (MOVQconst [x]) [y]) 9779 // cond: x==y 9780 // result: (FlagEQ) 9781 for { 9782 y := v.AuxInt 9783 v_0 := v.Args[0] 9784 if v_0.Op != OpAMD64MOVQconst { 9785 break 9786 } 9787 x := v_0.AuxInt 9788 if !(x == y) { 9789 break 9790 } 9791 v.reset(OpAMD64FlagEQ) 9792 return true 9793 } 9794 // match: (CMPQconst (MOVQconst [x]) [y]) 9795 // cond: x<y && uint64(x)<uint64(y) 9796 // result: (FlagLT_ULT) 9797 for { 9798 y := v.AuxInt 9799 v_0 := v.Args[0] 9800 if v_0.Op != OpAMD64MOVQconst { 9801 break 9802 } 9803 x := v_0.AuxInt 9804 if !(x < y && uint64(x) < uint64(y)) { 9805 break 9806 } 9807 v.reset(OpAMD64FlagLT_ULT) 9808 return true 9809 } 9810 // match: (CMPQconst (MOVQconst [x]) [y]) 9811 // cond: x<y && uint64(x)>uint64(y) 9812 // result: (FlagLT_UGT) 9813 for { 9814 y := v.AuxInt 9815 v_0 := v.Args[0] 9816 if v_0.Op != OpAMD64MOVQconst { 9817 break 9818 } 9819 x := v_0.AuxInt 9820 if !(x < y && uint64(x) > uint64(y)) { 9821 break 9822 } 9823 v.reset(OpAMD64FlagLT_UGT) 9824 return true 9825 } 9826 // match: (CMPQconst (MOVQconst [x]) [y]) 9827 // cond: x>y && uint64(x)<uint64(y) 9828 // result: (FlagGT_ULT) 9829 for { 9830 y := v.AuxInt 9831 v_0 := v.Args[0] 9832 if v_0.Op != OpAMD64MOVQconst { 9833 break 9834 } 9835 x := v_0.AuxInt 9836 if !(x > y && uint64(x) < uint64(y)) { 9837 break 9838 } 9839 v.reset(OpAMD64FlagGT_ULT) 9840 return true 9841 } 9842 // match: (CMPQconst (MOVQconst [x]) [y]) 9843 // cond: x>y && uint64(x)>uint64(y) 9844 // result: (FlagGT_UGT) 9845 for { 9846 y := v.AuxInt 9847 v_0 := v.Args[0] 9848 if v_0.Op != OpAMD64MOVQconst { 9849 break 9850 } 9851 x := v_0.AuxInt 9852 if !(x > y && uint64(x) > uint64(y)) { 9853 break 9854 } 9855 v.reset(OpAMD64FlagGT_UGT) 9856 return true 9857 } 9858 // match: (CMPQconst (MOVBQZX _) [c]) 9859 // cond: 0xFF < c 9860 // result: (FlagLT_ULT) 9861 for { 9862 c := v.AuxInt 9863 v_0 := v.Args[0] 9864 if v_0.Op != OpAMD64MOVBQZX { 9865 break 9866 } 9867 if !(0xFF < c) { 9868 break 9869 } 9870 v.reset(OpAMD64FlagLT_ULT) 9871 return true 9872 } 9873 // match: (CMPQconst (MOVWQZX _) [c]) 9874 // cond: 0xFFFF < c 9875 // result: (FlagLT_ULT) 9876 for { 9877 c := v.AuxInt 9878 v_0 := v.Args[0] 9879 if v_0.Op != OpAMD64MOVWQZX { 9880 break 9881 } 9882 if !(0xFFFF < c) { 9883 break 9884 } 9885 v.reset(OpAMD64FlagLT_ULT) 9886 return true 9887 } 9888 // match: (CMPQconst (MOVLQZX _) [c]) 9889 // cond: 0xFFFFFFFF < c 9890 // result: (FlagLT_ULT) 9891 for { 9892 c := v.AuxInt 9893 v_0 := v.Args[0] 9894 if v_0.Op != OpAMD64MOVLQZX { 9895 break 9896 } 9897 if !(0xFFFFFFFF < c) { 9898 break 9899 } 9900 v.reset(OpAMD64FlagLT_ULT) 9901 return true 9902 } 9903 return false 9904 } 9905 func rewriteValueAMD64_OpAMD64CMPQconst_10(v *Value) bool { 9906 b := v.Block 9907 _ = b 9908 // match: (CMPQconst (SHRQconst _ [c]) [n]) 9909 // cond: 0 <= n && 0 < c && c <= 64 && (1<<uint64(64-c)) <= uint64(n) 9910 // result: (FlagLT_ULT) 9911 for { 9912 n := v.AuxInt 9913 v_0 := v.Args[0] 9914 if v_0.Op != OpAMD64SHRQconst { 9915 break 9916 } 9917 c := v_0.AuxInt 9918 if !(0 <= n && 0 < c && c <= 64 && (1<<uint64(64-c)) <= uint64(n)) { 9919 break 9920 } 9921 v.reset(OpAMD64FlagLT_ULT) 9922 return true 9923 } 9924 // match: (CMPQconst (ANDQconst _ [m]) [n]) 9925 // cond: 0 <= m && m < n 9926 // result: (FlagLT_ULT) 9927 for { 9928 n := v.AuxInt 9929 v_0 := v.Args[0] 9930 if v_0.Op != OpAMD64ANDQconst { 9931 break 9932 } 9933 m := v_0.AuxInt 9934 if !(0 <= m && m < n) { 9935 break 9936 } 9937 v.reset(OpAMD64FlagLT_ULT) 9938 return true 9939 } 9940 // match: (CMPQconst (ANDLconst _ [m]) [n]) 9941 // cond: 0 <= m && m < n 9942 // result: (FlagLT_ULT) 9943 for { 9944 n := v.AuxInt 9945 v_0 := v.Args[0] 9946 if v_0.Op != OpAMD64ANDLconst { 9947 break 9948 } 9949 m := v_0.AuxInt 9950 if !(0 <= m && m < n) { 9951 break 9952 } 9953 v.reset(OpAMD64FlagLT_ULT) 9954 return true 9955 } 9956 // match: (CMPQconst (ANDQ x y) [0]) 9957 // cond: 9958 // result: (TESTQ x y) 9959 for { 9960 if v.AuxInt != 0 { 9961 break 9962 } 9963 v_0 := v.Args[0] 9964 if v_0.Op != OpAMD64ANDQ { 9965 break 9966 } 9967 _ = v_0.Args[1] 9968 x := v_0.Args[0] 9969 y := v_0.Args[1] 9970 v.reset(OpAMD64TESTQ) 9971 v.AddArg(x) 9972 v.AddArg(y) 9973 return true 9974 } 9975 // match: (CMPQconst (ANDQconst [c] x) [0]) 9976 // cond: 9977 // result: (TESTQconst [c] x) 9978 for { 9979 if v.AuxInt != 0 { 9980 break 9981 } 9982 v_0 := v.Args[0] 9983 if v_0.Op != OpAMD64ANDQconst { 9984 break 9985 } 9986 c := v_0.AuxInt 9987 x := v_0.Args[0] 9988 v.reset(OpAMD64TESTQconst) 9989 v.AuxInt = c 9990 v.AddArg(x) 9991 return true 9992 } 9993 // match: (CMPQconst x [0]) 9994 // cond: 9995 // result: (TESTQ x x) 9996 for { 9997 if v.AuxInt != 0 { 9998 break 9999 } 10000 x := v.Args[0] 10001 v.reset(OpAMD64TESTQ) 10002 v.AddArg(x) 10003 v.AddArg(x) 10004 return true 10005 } 10006 // match: (CMPQconst l:(MOVQload {sym} [off] ptr mem) [c]) 10007 // cond: l.Uses == 1 && validValAndOff(c, off) && clobber(l) 10008 // result: @l.Block (CMPQconstload {sym} [makeValAndOff(c,off)] ptr mem) 10009 for { 10010 c := v.AuxInt 10011 l := v.Args[0] 10012 if l.Op != OpAMD64MOVQload { 10013 break 10014 } 10015 off := l.AuxInt 10016 sym := l.Aux 10017 _ = l.Args[1] 10018 ptr := l.Args[0] 10019 mem := l.Args[1] 10020 if !(l.Uses == 1 && validValAndOff(c, off) && clobber(l)) { 10021 break 10022 } 10023 b = l.Block 10024 v0 := b.NewValue0(l.Pos, OpAMD64CMPQconstload, types.TypeFlags) 10025 v.reset(OpCopy) 10026 v.AddArg(v0) 10027 v0.AuxInt = makeValAndOff(c, off) 10028 v0.Aux = sym 10029 v0.AddArg(ptr) 10030 v0.AddArg(mem) 10031 return true 10032 } 10033 return false 10034 } 10035 func rewriteValueAMD64_OpAMD64CMPQconstload_0(v *Value) bool { 10036 // match: (CMPQconstload [valoff1] {sym} (ADDQconst [off2] base) mem) 10037 // cond: ValAndOff(valoff1).canAdd(off2) 10038 // result: (CMPQconstload [ValAndOff(valoff1).add(off2)] {sym} base mem) 10039 for { 10040 valoff1 := v.AuxInt 10041 sym := v.Aux 10042 _ = v.Args[1] 10043 v_0 := v.Args[0] 10044 if v_0.Op != OpAMD64ADDQconst { 10045 break 10046 } 10047 off2 := v_0.AuxInt 10048 base := v_0.Args[0] 10049 mem := v.Args[1] 10050 if !(ValAndOff(valoff1).canAdd(off2)) { 10051 break 10052 } 10053 v.reset(OpAMD64CMPQconstload) 10054 v.AuxInt = ValAndOff(valoff1).add(off2) 10055 v.Aux = sym 10056 v.AddArg(base) 10057 v.AddArg(mem) 10058 return true 10059 } 10060 // match: (CMPQconstload [valoff1] {sym1} (LEAQ [off2] {sym2} base) mem) 10061 // cond: ValAndOff(valoff1).canAdd(off2) && canMergeSym(sym1, sym2) 10062 // result: (CMPQconstload [ValAndOff(valoff1).add(off2)] {mergeSym(sym1,sym2)} base mem) 10063 for { 10064 valoff1 := v.AuxInt 10065 sym1 := v.Aux 10066 _ = v.Args[1] 10067 v_0 := v.Args[0] 10068 if v_0.Op != OpAMD64LEAQ { 10069 break 10070 } 10071 off2 := v_0.AuxInt 10072 sym2 := v_0.Aux 10073 base := v_0.Args[0] 10074 mem := v.Args[1] 10075 if !(ValAndOff(valoff1).canAdd(off2) && canMergeSym(sym1, sym2)) { 10076 break 10077 } 10078 v.reset(OpAMD64CMPQconstload) 10079 v.AuxInt = ValAndOff(valoff1).add(off2) 10080 v.Aux = mergeSym(sym1, sym2) 10081 v.AddArg(base) 10082 v.AddArg(mem) 10083 return true 10084 } 10085 return false 10086 } 10087 func rewriteValueAMD64_OpAMD64CMPQload_0(v *Value) bool { 10088 // match: (CMPQload [off1] {sym} (ADDQconst [off2] base) val mem) 10089 // cond: is32Bit(off1+off2) 10090 // result: (CMPQload [off1+off2] {sym} base val mem) 10091 for { 10092 off1 := v.AuxInt 10093 sym := v.Aux 10094 _ = v.Args[2] 10095 v_0 := v.Args[0] 10096 if v_0.Op != OpAMD64ADDQconst { 10097 break 10098 } 10099 off2 := v_0.AuxInt 10100 base := v_0.Args[0] 10101 val := v.Args[1] 10102 mem := v.Args[2] 10103 if !(is32Bit(off1 + off2)) { 10104 break 10105 } 10106 v.reset(OpAMD64CMPQload) 10107 v.AuxInt = off1 + off2 10108 v.Aux = sym 10109 v.AddArg(base) 10110 v.AddArg(val) 10111 v.AddArg(mem) 10112 return true 10113 } 10114 // match: (CMPQload [off1] {sym1} (LEAQ [off2] {sym2} base) val mem) 10115 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 10116 // result: (CMPQload [off1+off2] {mergeSym(sym1,sym2)} base val mem) 10117 for { 10118 off1 := v.AuxInt 10119 sym1 := v.Aux 10120 _ = v.Args[2] 10121 v_0 := v.Args[0] 10122 if v_0.Op != OpAMD64LEAQ { 10123 break 10124 } 10125 off2 := v_0.AuxInt 10126 sym2 := v_0.Aux 10127 base := v_0.Args[0] 10128 val := v.Args[1] 10129 mem := v.Args[2] 10130 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 10131 break 10132 } 10133 v.reset(OpAMD64CMPQload) 10134 v.AuxInt = off1 + off2 10135 v.Aux = mergeSym(sym1, sym2) 10136 v.AddArg(base) 10137 v.AddArg(val) 10138 v.AddArg(mem) 10139 return true 10140 } 10141 // match: (CMPQload {sym} [off] ptr (MOVQconst [c]) mem) 10142 // cond: validValAndOff(c,off) 10143 // result: (CMPQconstload {sym} [makeValAndOff(c,off)] ptr mem) 10144 for { 10145 off := v.AuxInt 10146 sym := v.Aux 10147 _ = v.Args[2] 10148 ptr := v.Args[0] 10149 v_1 := v.Args[1] 10150 if v_1.Op != OpAMD64MOVQconst { 10151 break 10152 } 10153 c := v_1.AuxInt 10154 mem := v.Args[2] 10155 if !(validValAndOff(c, off)) { 10156 break 10157 } 10158 v.reset(OpAMD64CMPQconstload) 10159 v.AuxInt = makeValAndOff(c, off) 10160 v.Aux = sym 10161 v.AddArg(ptr) 10162 v.AddArg(mem) 10163 return true 10164 } 10165 return false 10166 } 10167 func rewriteValueAMD64_OpAMD64CMPW_0(v *Value) bool { 10168 b := v.Block 10169 _ = b 10170 // match: (CMPW x (MOVLconst [c])) 10171 // cond: 10172 // result: (CMPWconst x [int64(int16(c))]) 10173 for { 10174 _ = v.Args[1] 10175 x := v.Args[0] 10176 v_1 := v.Args[1] 10177 if v_1.Op != OpAMD64MOVLconst { 10178 break 10179 } 10180 c := v_1.AuxInt 10181 v.reset(OpAMD64CMPWconst) 10182 v.AuxInt = int64(int16(c)) 10183 v.AddArg(x) 10184 return true 10185 } 10186 // match: (CMPW (MOVLconst [c]) x) 10187 // cond: 10188 // result: (InvertFlags (CMPWconst x [int64(int16(c))])) 10189 for { 10190 _ = v.Args[1] 10191 v_0 := v.Args[0] 10192 if v_0.Op != OpAMD64MOVLconst { 10193 break 10194 } 10195 c := v_0.AuxInt 10196 x := v.Args[1] 10197 v.reset(OpAMD64InvertFlags) 10198 v0 := b.NewValue0(v.Pos, OpAMD64CMPWconst, types.TypeFlags) 10199 v0.AuxInt = int64(int16(c)) 10200 v0.AddArg(x) 10201 v.AddArg(v0) 10202 return true 10203 } 10204 // match: (CMPW l:(MOVWload {sym} [off] ptr mem) x) 10205 // cond: canMergeLoad(v, l) && clobber(l) 10206 // result: (CMPWload {sym} [off] ptr x mem) 10207 for { 10208 _ = v.Args[1] 10209 l := v.Args[0] 10210 if l.Op != OpAMD64MOVWload { 10211 break 10212 } 10213 off := l.AuxInt 10214 sym := l.Aux 10215 _ = l.Args[1] 10216 ptr := l.Args[0] 10217 mem := l.Args[1] 10218 x := v.Args[1] 10219 if !(canMergeLoad(v, l) && clobber(l)) { 10220 break 10221 } 10222 v.reset(OpAMD64CMPWload) 10223 v.AuxInt = off 10224 v.Aux = sym 10225 v.AddArg(ptr) 10226 v.AddArg(x) 10227 v.AddArg(mem) 10228 return true 10229 } 10230 // match: (CMPW x l:(MOVWload {sym} [off] ptr mem)) 10231 // cond: canMergeLoad(v, l) && clobber(l) 10232 // result: (InvertFlags (CMPWload {sym} [off] ptr x mem)) 10233 for { 10234 _ = v.Args[1] 10235 x := v.Args[0] 10236 l := v.Args[1] 10237 if l.Op != OpAMD64MOVWload { 10238 break 10239 } 10240 off := l.AuxInt 10241 sym := l.Aux 10242 _ = l.Args[1] 10243 ptr := l.Args[0] 10244 mem := l.Args[1] 10245 if !(canMergeLoad(v, l) && clobber(l)) { 10246 break 10247 } 10248 v.reset(OpAMD64InvertFlags) 10249 v0 := b.NewValue0(l.Pos, OpAMD64CMPWload, types.TypeFlags) 10250 v0.AuxInt = off 10251 v0.Aux = sym 10252 v0.AddArg(ptr) 10253 v0.AddArg(x) 10254 v0.AddArg(mem) 10255 v.AddArg(v0) 10256 return true 10257 } 10258 return false 10259 } 10260 func rewriteValueAMD64_OpAMD64CMPWconst_0(v *Value) bool { 10261 b := v.Block 10262 _ = b 10263 // match: (CMPWconst (MOVLconst [x]) [y]) 10264 // cond: int16(x)==int16(y) 10265 // result: (FlagEQ) 10266 for { 10267 y := v.AuxInt 10268 v_0 := v.Args[0] 10269 if v_0.Op != OpAMD64MOVLconst { 10270 break 10271 } 10272 x := v_0.AuxInt 10273 if !(int16(x) == int16(y)) { 10274 break 10275 } 10276 v.reset(OpAMD64FlagEQ) 10277 return true 10278 } 10279 // match: (CMPWconst (MOVLconst [x]) [y]) 10280 // cond: int16(x)<int16(y) && uint16(x)<uint16(y) 10281 // result: (FlagLT_ULT) 10282 for { 10283 y := v.AuxInt 10284 v_0 := v.Args[0] 10285 if v_0.Op != OpAMD64MOVLconst { 10286 break 10287 } 10288 x := v_0.AuxInt 10289 if !(int16(x) < int16(y) && uint16(x) < uint16(y)) { 10290 break 10291 } 10292 v.reset(OpAMD64FlagLT_ULT) 10293 return true 10294 } 10295 // match: (CMPWconst (MOVLconst [x]) [y]) 10296 // cond: int16(x)<int16(y) && uint16(x)>uint16(y) 10297 // result: (FlagLT_UGT) 10298 for { 10299 y := v.AuxInt 10300 v_0 := v.Args[0] 10301 if v_0.Op != OpAMD64MOVLconst { 10302 break 10303 } 10304 x := v_0.AuxInt 10305 if !(int16(x) < int16(y) && uint16(x) > uint16(y)) { 10306 break 10307 } 10308 v.reset(OpAMD64FlagLT_UGT) 10309 return true 10310 } 10311 // match: (CMPWconst (MOVLconst [x]) [y]) 10312 // cond: int16(x)>int16(y) && uint16(x)<uint16(y) 10313 // result: (FlagGT_ULT) 10314 for { 10315 y := v.AuxInt 10316 v_0 := v.Args[0] 10317 if v_0.Op != OpAMD64MOVLconst { 10318 break 10319 } 10320 x := v_0.AuxInt 10321 if !(int16(x) > int16(y) && uint16(x) < uint16(y)) { 10322 break 10323 } 10324 v.reset(OpAMD64FlagGT_ULT) 10325 return true 10326 } 10327 // match: (CMPWconst (MOVLconst [x]) [y]) 10328 // cond: int16(x)>int16(y) && uint16(x)>uint16(y) 10329 // result: (FlagGT_UGT) 10330 for { 10331 y := v.AuxInt 10332 v_0 := v.Args[0] 10333 if v_0.Op != OpAMD64MOVLconst { 10334 break 10335 } 10336 x := v_0.AuxInt 10337 if !(int16(x) > int16(y) && uint16(x) > uint16(y)) { 10338 break 10339 } 10340 v.reset(OpAMD64FlagGT_UGT) 10341 return true 10342 } 10343 // match: (CMPWconst (ANDLconst _ [m]) [n]) 10344 // cond: 0 <= int16(m) && int16(m) < int16(n) 10345 // result: (FlagLT_ULT) 10346 for { 10347 n := v.AuxInt 10348 v_0 := v.Args[0] 10349 if v_0.Op != OpAMD64ANDLconst { 10350 break 10351 } 10352 m := v_0.AuxInt 10353 if !(0 <= int16(m) && int16(m) < int16(n)) { 10354 break 10355 } 10356 v.reset(OpAMD64FlagLT_ULT) 10357 return true 10358 } 10359 // match: (CMPWconst (ANDL x y) [0]) 10360 // cond: 10361 // result: (TESTW x y) 10362 for { 10363 if v.AuxInt != 0 { 10364 break 10365 } 10366 v_0 := v.Args[0] 10367 if v_0.Op != OpAMD64ANDL { 10368 break 10369 } 10370 _ = v_0.Args[1] 10371 x := v_0.Args[0] 10372 y := v_0.Args[1] 10373 v.reset(OpAMD64TESTW) 10374 v.AddArg(x) 10375 v.AddArg(y) 10376 return true 10377 } 10378 // match: (CMPWconst (ANDLconst [c] x) [0]) 10379 // cond: 10380 // result: (TESTWconst [int64(int16(c))] x) 10381 for { 10382 if v.AuxInt != 0 { 10383 break 10384 } 10385 v_0 := v.Args[0] 10386 if v_0.Op != OpAMD64ANDLconst { 10387 break 10388 } 10389 c := v_0.AuxInt 10390 x := v_0.Args[0] 10391 v.reset(OpAMD64TESTWconst) 10392 v.AuxInt = int64(int16(c)) 10393 v.AddArg(x) 10394 return true 10395 } 10396 // match: (CMPWconst x [0]) 10397 // cond: 10398 // result: (TESTW x x) 10399 for { 10400 if v.AuxInt != 0 { 10401 break 10402 } 10403 x := v.Args[0] 10404 v.reset(OpAMD64TESTW) 10405 v.AddArg(x) 10406 v.AddArg(x) 10407 return true 10408 } 10409 // match: (CMPWconst l:(MOVWload {sym} [off] ptr mem) [c]) 10410 // cond: l.Uses == 1 && validValAndOff(c, off) && clobber(l) 10411 // result: @l.Block (CMPWconstload {sym} [makeValAndOff(c,off)] ptr mem) 10412 for { 10413 c := v.AuxInt 10414 l := v.Args[0] 10415 if l.Op != OpAMD64MOVWload { 10416 break 10417 } 10418 off := l.AuxInt 10419 sym := l.Aux 10420 _ = l.Args[1] 10421 ptr := l.Args[0] 10422 mem := l.Args[1] 10423 if !(l.Uses == 1 && validValAndOff(c, off) && clobber(l)) { 10424 break 10425 } 10426 b = l.Block 10427 v0 := b.NewValue0(l.Pos, OpAMD64CMPWconstload, types.TypeFlags) 10428 v.reset(OpCopy) 10429 v.AddArg(v0) 10430 v0.AuxInt = makeValAndOff(c, off) 10431 v0.Aux = sym 10432 v0.AddArg(ptr) 10433 v0.AddArg(mem) 10434 return true 10435 } 10436 return false 10437 } 10438 func rewriteValueAMD64_OpAMD64CMPWconstload_0(v *Value) bool { 10439 // match: (CMPWconstload [valoff1] {sym} (ADDQconst [off2] base) mem) 10440 // cond: ValAndOff(valoff1).canAdd(off2) 10441 // result: (CMPWconstload [ValAndOff(valoff1).add(off2)] {sym} base mem) 10442 for { 10443 valoff1 := v.AuxInt 10444 sym := v.Aux 10445 _ = v.Args[1] 10446 v_0 := v.Args[0] 10447 if v_0.Op != OpAMD64ADDQconst { 10448 break 10449 } 10450 off2 := v_0.AuxInt 10451 base := v_0.Args[0] 10452 mem := v.Args[1] 10453 if !(ValAndOff(valoff1).canAdd(off2)) { 10454 break 10455 } 10456 v.reset(OpAMD64CMPWconstload) 10457 v.AuxInt = ValAndOff(valoff1).add(off2) 10458 v.Aux = sym 10459 v.AddArg(base) 10460 v.AddArg(mem) 10461 return true 10462 } 10463 // match: (CMPWconstload [valoff1] {sym1} (LEAQ [off2] {sym2} base) mem) 10464 // cond: ValAndOff(valoff1).canAdd(off2) && canMergeSym(sym1, sym2) 10465 // result: (CMPWconstload [ValAndOff(valoff1).add(off2)] {mergeSym(sym1,sym2)} base mem) 10466 for { 10467 valoff1 := v.AuxInt 10468 sym1 := v.Aux 10469 _ = v.Args[1] 10470 v_0 := v.Args[0] 10471 if v_0.Op != OpAMD64LEAQ { 10472 break 10473 } 10474 off2 := v_0.AuxInt 10475 sym2 := v_0.Aux 10476 base := v_0.Args[0] 10477 mem := v.Args[1] 10478 if !(ValAndOff(valoff1).canAdd(off2) && canMergeSym(sym1, sym2)) { 10479 break 10480 } 10481 v.reset(OpAMD64CMPWconstload) 10482 v.AuxInt = ValAndOff(valoff1).add(off2) 10483 v.Aux = mergeSym(sym1, sym2) 10484 v.AddArg(base) 10485 v.AddArg(mem) 10486 return true 10487 } 10488 return false 10489 } 10490 func rewriteValueAMD64_OpAMD64CMPWload_0(v *Value) bool { 10491 // match: (CMPWload [off1] {sym} (ADDQconst [off2] base) val mem) 10492 // cond: is32Bit(off1+off2) 10493 // result: (CMPWload [off1+off2] {sym} base val mem) 10494 for { 10495 off1 := v.AuxInt 10496 sym := v.Aux 10497 _ = v.Args[2] 10498 v_0 := v.Args[0] 10499 if v_0.Op != OpAMD64ADDQconst { 10500 break 10501 } 10502 off2 := v_0.AuxInt 10503 base := v_0.Args[0] 10504 val := v.Args[1] 10505 mem := v.Args[2] 10506 if !(is32Bit(off1 + off2)) { 10507 break 10508 } 10509 v.reset(OpAMD64CMPWload) 10510 v.AuxInt = off1 + off2 10511 v.Aux = sym 10512 v.AddArg(base) 10513 v.AddArg(val) 10514 v.AddArg(mem) 10515 return true 10516 } 10517 // match: (CMPWload [off1] {sym1} (LEAQ [off2] {sym2} base) val mem) 10518 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 10519 // result: (CMPWload [off1+off2] {mergeSym(sym1,sym2)} base val mem) 10520 for { 10521 off1 := v.AuxInt 10522 sym1 := v.Aux 10523 _ = v.Args[2] 10524 v_0 := v.Args[0] 10525 if v_0.Op != OpAMD64LEAQ { 10526 break 10527 } 10528 off2 := v_0.AuxInt 10529 sym2 := v_0.Aux 10530 base := v_0.Args[0] 10531 val := v.Args[1] 10532 mem := v.Args[2] 10533 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 10534 break 10535 } 10536 v.reset(OpAMD64CMPWload) 10537 v.AuxInt = off1 + off2 10538 v.Aux = mergeSym(sym1, sym2) 10539 v.AddArg(base) 10540 v.AddArg(val) 10541 v.AddArg(mem) 10542 return true 10543 } 10544 // match: (CMPWload {sym} [off] ptr (MOVLconst [c]) mem) 10545 // cond: validValAndOff(int64(int16(c)),off) 10546 // result: (CMPWconstload {sym} [makeValAndOff(int64(int16(c)),off)] ptr mem) 10547 for { 10548 off := v.AuxInt 10549 sym := v.Aux 10550 _ = v.Args[2] 10551 ptr := v.Args[0] 10552 v_1 := v.Args[1] 10553 if v_1.Op != OpAMD64MOVLconst { 10554 break 10555 } 10556 c := v_1.AuxInt 10557 mem := v.Args[2] 10558 if !(validValAndOff(int64(int16(c)), off)) { 10559 break 10560 } 10561 v.reset(OpAMD64CMPWconstload) 10562 v.AuxInt = makeValAndOff(int64(int16(c)), off) 10563 v.Aux = sym 10564 v.AddArg(ptr) 10565 v.AddArg(mem) 10566 return true 10567 } 10568 return false 10569 } 10570 func rewriteValueAMD64_OpAMD64CMPXCHGLlock_0(v *Value) bool { 10571 // match: (CMPXCHGLlock [off1] {sym} (ADDQconst [off2] ptr) old new_ mem) 10572 // cond: is32Bit(off1+off2) 10573 // result: (CMPXCHGLlock [off1+off2] {sym} ptr old new_ mem) 10574 for { 10575 off1 := v.AuxInt 10576 sym := v.Aux 10577 _ = v.Args[3] 10578 v_0 := v.Args[0] 10579 if v_0.Op != OpAMD64ADDQconst { 10580 break 10581 } 10582 off2 := v_0.AuxInt 10583 ptr := v_0.Args[0] 10584 old := v.Args[1] 10585 new_ := v.Args[2] 10586 mem := v.Args[3] 10587 if !(is32Bit(off1 + off2)) { 10588 break 10589 } 10590 v.reset(OpAMD64CMPXCHGLlock) 10591 v.AuxInt = off1 + off2 10592 v.Aux = sym 10593 v.AddArg(ptr) 10594 v.AddArg(old) 10595 v.AddArg(new_) 10596 v.AddArg(mem) 10597 return true 10598 } 10599 return false 10600 } 10601 func rewriteValueAMD64_OpAMD64CMPXCHGQlock_0(v *Value) bool { 10602 // match: (CMPXCHGQlock [off1] {sym} (ADDQconst [off2] ptr) old new_ mem) 10603 // cond: is32Bit(off1+off2) 10604 // result: (CMPXCHGQlock [off1+off2] {sym} ptr old new_ mem) 10605 for { 10606 off1 := v.AuxInt 10607 sym := v.Aux 10608 _ = v.Args[3] 10609 v_0 := v.Args[0] 10610 if v_0.Op != OpAMD64ADDQconst { 10611 break 10612 } 10613 off2 := v_0.AuxInt 10614 ptr := v_0.Args[0] 10615 old := v.Args[1] 10616 new_ := v.Args[2] 10617 mem := v.Args[3] 10618 if !(is32Bit(off1 + off2)) { 10619 break 10620 } 10621 v.reset(OpAMD64CMPXCHGQlock) 10622 v.AuxInt = off1 + off2 10623 v.Aux = sym 10624 v.AddArg(ptr) 10625 v.AddArg(old) 10626 v.AddArg(new_) 10627 v.AddArg(mem) 10628 return true 10629 } 10630 return false 10631 } 10632 func rewriteValueAMD64_OpAMD64DIVSD_0(v *Value) bool { 10633 // match: (DIVSD x l:(MOVSDload [off] {sym} ptr mem)) 10634 // cond: canMergeLoadClobber(v, l, x) && clobber(l) 10635 // result: (DIVSDload x [off] {sym} ptr mem) 10636 for { 10637 _ = v.Args[1] 10638 x := v.Args[0] 10639 l := v.Args[1] 10640 if l.Op != OpAMD64MOVSDload { 10641 break 10642 } 10643 off := l.AuxInt 10644 sym := l.Aux 10645 _ = l.Args[1] 10646 ptr := l.Args[0] 10647 mem := l.Args[1] 10648 if !(canMergeLoadClobber(v, l, x) && clobber(l)) { 10649 break 10650 } 10651 v.reset(OpAMD64DIVSDload) 10652 v.AuxInt = off 10653 v.Aux = sym 10654 v.AddArg(x) 10655 v.AddArg(ptr) 10656 v.AddArg(mem) 10657 return true 10658 } 10659 return false 10660 } 10661 func rewriteValueAMD64_OpAMD64DIVSDload_0(v *Value) bool { 10662 // match: (DIVSDload [off1] {sym} val (ADDQconst [off2] base) mem) 10663 // cond: is32Bit(off1+off2) 10664 // result: (DIVSDload [off1+off2] {sym} val base mem) 10665 for { 10666 off1 := v.AuxInt 10667 sym := v.Aux 10668 _ = v.Args[2] 10669 val := v.Args[0] 10670 v_1 := v.Args[1] 10671 if v_1.Op != OpAMD64ADDQconst { 10672 break 10673 } 10674 off2 := v_1.AuxInt 10675 base := v_1.Args[0] 10676 mem := v.Args[2] 10677 if !(is32Bit(off1 + off2)) { 10678 break 10679 } 10680 v.reset(OpAMD64DIVSDload) 10681 v.AuxInt = off1 + off2 10682 v.Aux = sym 10683 v.AddArg(val) 10684 v.AddArg(base) 10685 v.AddArg(mem) 10686 return true 10687 } 10688 // match: (DIVSDload [off1] {sym1} val (LEAQ [off2] {sym2} base) mem) 10689 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 10690 // result: (DIVSDload [off1+off2] {mergeSym(sym1,sym2)} val base mem) 10691 for { 10692 off1 := v.AuxInt 10693 sym1 := v.Aux 10694 _ = v.Args[2] 10695 val := v.Args[0] 10696 v_1 := v.Args[1] 10697 if v_1.Op != OpAMD64LEAQ { 10698 break 10699 } 10700 off2 := v_1.AuxInt 10701 sym2 := v_1.Aux 10702 base := v_1.Args[0] 10703 mem := v.Args[2] 10704 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 10705 break 10706 } 10707 v.reset(OpAMD64DIVSDload) 10708 v.AuxInt = off1 + off2 10709 v.Aux = mergeSym(sym1, sym2) 10710 v.AddArg(val) 10711 v.AddArg(base) 10712 v.AddArg(mem) 10713 return true 10714 } 10715 return false 10716 } 10717 func rewriteValueAMD64_OpAMD64DIVSS_0(v *Value) bool { 10718 // match: (DIVSS x l:(MOVSSload [off] {sym} ptr mem)) 10719 // cond: canMergeLoadClobber(v, l, x) && clobber(l) 10720 // result: (DIVSSload x [off] {sym} ptr mem) 10721 for { 10722 _ = v.Args[1] 10723 x := v.Args[0] 10724 l := v.Args[1] 10725 if l.Op != OpAMD64MOVSSload { 10726 break 10727 } 10728 off := l.AuxInt 10729 sym := l.Aux 10730 _ = l.Args[1] 10731 ptr := l.Args[0] 10732 mem := l.Args[1] 10733 if !(canMergeLoadClobber(v, l, x) && clobber(l)) { 10734 break 10735 } 10736 v.reset(OpAMD64DIVSSload) 10737 v.AuxInt = off 10738 v.Aux = sym 10739 v.AddArg(x) 10740 v.AddArg(ptr) 10741 v.AddArg(mem) 10742 return true 10743 } 10744 return false 10745 } 10746 func rewriteValueAMD64_OpAMD64DIVSSload_0(v *Value) bool { 10747 // match: (DIVSSload [off1] {sym} val (ADDQconst [off2] base) mem) 10748 // cond: is32Bit(off1+off2) 10749 // result: (DIVSSload [off1+off2] {sym} val base mem) 10750 for { 10751 off1 := v.AuxInt 10752 sym := v.Aux 10753 _ = v.Args[2] 10754 val := v.Args[0] 10755 v_1 := v.Args[1] 10756 if v_1.Op != OpAMD64ADDQconst { 10757 break 10758 } 10759 off2 := v_1.AuxInt 10760 base := v_1.Args[0] 10761 mem := v.Args[2] 10762 if !(is32Bit(off1 + off2)) { 10763 break 10764 } 10765 v.reset(OpAMD64DIVSSload) 10766 v.AuxInt = off1 + off2 10767 v.Aux = sym 10768 v.AddArg(val) 10769 v.AddArg(base) 10770 v.AddArg(mem) 10771 return true 10772 } 10773 // match: (DIVSSload [off1] {sym1} val (LEAQ [off2] {sym2} base) mem) 10774 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 10775 // result: (DIVSSload [off1+off2] {mergeSym(sym1,sym2)} val base mem) 10776 for { 10777 off1 := v.AuxInt 10778 sym1 := v.Aux 10779 _ = v.Args[2] 10780 val := v.Args[0] 10781 v_1 := v.Args[1] 10782 if v_1.Op != OpAMD64LEAQ { 10783 break 10784 } 10785 off2 := v_1.AuxInt 10786 sym2 := v_1.Aux 10787 base := v_1.Args[0] 10788 mem := v.Args[2] 10789 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 10790 break 10791 } 10792 v.reset(OpAMD64DIVSSload) 10793 v.AuxInt = off1 + off2 10794 v.Aux = mergeSym(sym1, sym2) 10795 v.AddArg(val) 10796 v.AddArg(base) 10797 v.AddArg(mem) 10798 return true 10799 } 10800 return false 10801 } 10802 func rewriteValueAMD64_OpAMD64HMULL_0(v *Value) bool { 10803 // match: (HMULL x y) 10804 // cond: !x.rematerializeable() && y.rematerializeable() 10805 // result: (HMULL y x) 10806 for { 10807 _ = v.Args[1] 10808 x := v.Args[0] 10809 y := v.Args[1] 10810 if !(!x.rematerializeable() && y.rematerializeable()) { 10811 break 10812 } 10813 v.reset(OpAMD64HMULL) 10814 v.AddArg(y) 10815 v.AddArg(x) 10816 return true 10817 } 10818 return false 10819 } 10820 func rewriteValueAMD64_OpAMD64HMULLU_0(v *Value) bool { 10821 // match: (HMULLU x y) 10822 // cond: !x.rematerializeable() && y.rematerializeable() 10823 // result: (HMULLU y x) 10824 for { 10825 _ = v.Args[1] 10826 x := v.Args[0] 10827 y := v.Args[1] 10828 if !(!x.rematerializeable() && y.rematerializeable()) { 10829 break 10830 } 10831 v.reset(OpAMD64HMULLU) 10832 v.AddArg(y) 10833 v.AddArg(x) 10834 return true 10835 } 10836 return false 10837 } 10838 func rewriteValueAMD64_OpAMD64HMULQ_0(v *Value) bool { 10839 // match: (HMULQ x y) 10840 // cond: !x.rematerializeable() && y.rematerializeable() 10841 // result: (HMULQ y x) 10842 for { 10843 _ = v.Args[1] 10844 x := v.Args[0] 10845 y := v.Args[1] 10846 if !(!x.rematerializeable() && y.rematerializeable()) { 10847 break 10848 } 10849 v.reset(OpAMD64HMULQ) 10850 v.AddArg(y) 10851 v.AddArg(x) 10852 return true 10853 } 10854 return false 10855 } 10856 func rewriteValueAMD64_OpAMD64HMULQU_0(v *Value) bool { 10857 // match: (HMULQU x y) 10858 // cond: !x.rematerializeable() && y.rematerializeable() 10859 // result: (HMULQU y x) 10860 for { 10861 _ = v.Args[1] 10862 x := v.Args[0] 10863 y := v.Args[1] 10864 if !(!x.rematerializeable() && y.rematerializeable()) { 10865 break 10866 } 10867 v.reset(OpAMD64HMULQU) 10868 v.AddArg(y) 10869 v.AddArg(x) 10870 return true 10871 } 10872 return false 10873 } 10874 func rewriteValueAMD64_OpAMD64LEAL_0(v *Value) bool { 10875 // match: (LEAL [c] {s} (ADDLconst [d] x)) 10876 // cond: is32Bit(c+d) 10877 // result: (LEAL [c+d] {s} x) 10878 for { 10879 c := v.AuxInt 10880 s := v.Aux 10881 v_0 := v.Args[0] 10882 if v_0.Op != OpAMD64ADDLconst { 10883 break 10884 } 10885 d := v_0.AuxInt 10886 x := v_0.Args[0] 10887 if !(is32Bit(c + d)) { 10888 break 10889 } 10890 v.reset(OpAMD64LEAL) 10891 v.AuxInt = c + d 10892 v.Aux = s 10893 v.AddArg(x) 10894 return true 10895 } 10896 // match: (LEAL [c] {s} (ADDL x y)) 10897 // cond: x.Op != OpSB && y.Op != OpSB 10898 // result: (LEAL1 [c] {s} x y) 10899 for { 10900 c := v.AuxInt 10901 s := v.Aux 10902 v_0 := v.Args[0] 10903 if v_0.Op != OpAMD64ADDL { 10904 break 10905 } 10906 _ = v_0.Args[1] 10907 x := v_0.Args[0] 10908 y := v_0.Args[1] 10909 if !(x.Op != OpSB && y.Op != OpSB) { 10910 break 10911 } 10912 v.reset(OpAMD64LEAL1) 10913 v.AuxInt = c 10914 v.Aux = s 10915 v.AddArg(x) 10916 v.AddArg(y) 10917 return true 10918 } 10919 return false 10920 } 10921 func rewriteValueAMD64_OpAMD64LEAL1_0(v *Value) bool { 10922 // match: (LEAL1 [c] {s} (ADDLconst [d] x) y) 10923 // cond: is32Bit(c+d) && x.Op != OpSB 10924 // result: (LEAL1 [c+d] {s} x y) 10925 for { 10926 c := v.AuxInt 10927 s := v.Aux 10928 _ = v.Args[1] 10929 v_0 := v.Args[0] 10930 if v_0.Op != OpAMD64ADDLconst { 10931 break 10932 } 10933 d := v_0.AuxInt 10934 x := v_0.Args[0] 10935 y := v.Args[1] 10936 if !(is32Bit(c+d) && x.Op != OpSB) { 10937 break 10938 } 10939 v.reset(OpAMD64LEAL1) 10940 v.AuxInt = c + d 10941 v.Aux = s 10942 v.AddArg(x) 10943 v.AddArg(y) 10944 return true 10945 } 10946 // match: (LEAL1 [c] {s} y (ADDLconst [d] x)) 10947 // cond: is32Bit(c+d) && x.Op != OpSB 10948 // result: (LEAL1 [c+d] {s} x y) 10949 for { 10950 c := v.AuxInt 10951 s := v.Aux 10952 _ = v.Args[1] 10953 y := v.Args[0] 10954 v_1 := v.Args[1] 10955 if v_1.Op != OpAMD64ADDLconst { 10956 break 10957 } 10958 d := v_1.AuxInt 10959 x := v_1.Args[0] 10960 if !(is32Bit(c+d) && x.Op != OpSB) { 10961 break 10962 } 10963 v.reset(OpAMD64LEAL1) 10964 v.AuxInt = c + d 10965 v.Aux = s 10966 v.AddArg(x) 10967 v.AddArg(y) 10968 return true 10969 } 10970 // match: (LEAL1 [c] {s} x (SHLLconst [1] y)) 10971 // cond: 10972 // result: (LEAL2 [c] {s} x y) 10973 for { 10974 c := v.AuxInt 10975 s := v.Aux 10976 _ = v.Args[1] 10977 x := v.Args[0] 10978 v_1 := v.Args[1] 10979 if v_1.Op != OpAMD64SHLLconst { 10980 break 10981 } 10982 if v_1.AuxInt != 1 { 10983 break 10984 } 10985 y := v_1.Args[0] 10986 v.reset(OpAMD64LEAL2) 10987 v.AuxInt = c 10988 v.Aux = s 10989 v.AddArg(x) 10990 v.AddArg(y) 10991 return true 10992 } 10993 // match: (LEAL1 [c] {s} (SHLLconst [1] y) x) 10994 // cond: 10995 // result: (LEAL2 [c] {s} x y) 10996 for { 10997 c := v.AuxInt 10998 s := v.Aux 10999 _ = v.Args[1] 11000 v_0 := v.Args[0] 11001 if v_0.Op != OpAMD64SHLLconst { 11002 break 11003 } 11004 if v_0.AuxInt != 1 { 11005 break 11006 } 11007 y := v_0.Args[0] 11008 x := v.Args[1] 11009 v.reset(OpAMD64LEAL2) 11010 v.AuxInt = c 11011 v.Aux = s 11012 v.AddArg(x) 11013 v.AddArg(y) 11014 return true 11015 } 11016 // match: (LEAL1 [c] {s} x (SHLLconst [2] y)) 11017 // cond: 11018 // result: (LEAL4 [c] {s} x y) 11019 for { 11020 c := v.AuxInt 11021 s := v.Aux 11022 _ = v.Args[1] 11023 x := v.Args[0] 11024 v_1 := v.Args[1] 11025 if v_1.Op != OpAMD64SHLLconst { 11026 break 11027 } 11028 if v_1.AuxInt != 2 { 11029 break 11030 } 11031 y := v_1.Args[0] 11032 v.reset(OpAMD64LEAL4) 11033 v.AuxInt = c 11034 v.Aux = s 11035 v.AddArg(x) 11036 v.AddArg(y) 11037 return true 11038 } 11039 // match: (LEAL1 [c] {s} (SHLLconst [2] y) x) 11040 // cond: 11041 // result: (LEAL4 [c] {s} x y) 11042 for { 11043 c := v.AuxInt 11044 s := v.Aux 11045 _ = v.Args[1] 11046 v_0 := v.Args[0] 11047 if v_0.Op != OpAMD64SHLLconst { 11048 break 11049 } 11050 if v_0.AuxInt != 2 { 11051 break 11052 } 11053 y := v_0.Args[0] 11054 x := v.Args[1] 11055 v.reset(OpAMD64LEAL4) 11056 v.AuxInt = c 11057 v.Aux = s 11058 v.AddArg(x) 11059 v.AddArg(y) 11060 return true 11061 } 11062 // match: (LEAL1 [c] {s} x (SHLLconst [3] y)) 11063 // cond: 11064 // result: (LEAL8 [c] {s} x y) 11065 for { 11066 c := v.AuxInt 11067 s := v.Aux 11068 _ = v.Args[1] 11069 x := v.Args[0] 11070 v_1 := v.Args[1] 11071 if v_1.Op != OpAMD64SHLLconst { 11072 break 11073 } 11074 if v_1.AuxInt != 3 { 11075 break 11076 } 11077 y := v_1.Args[0] 11078 v.reset(OpAMD64LEAL8) 11079 v.AuxInt = c 11080 v.Aux = s 11081 v.AddArg(x) 11082 v.AddArg(y) 11083 return true 11084 } 11085 // match: (LEAL1 [c] {s} (SHLLconst [3] y) x) 11086 // cond: 11087 // result: (LEAL8 [c] {s} x y) 11088 for { 11089 c := v.AuxInt 11090 s := v.Aux 11091 _ = v.Args[1] 11092 v_0 := v.Args[0] 11093 if v_0.Op != OpAMD64SHLLconst { 11094 break 11095 } 11096 if v_0.AuxInt != 3 { 11097 break 11098 } 11099 y := v_0.Args[0] 11100 x := v.Args[1] 11101 v.reset(OpAMD64LEAL8) 11102 v.AuxInt = c 11103 v.Aux = s 11104 v.AddArg(x) 11105 v.AddArg(y) 11106 return true 11107 } 11108 return false 11109 } 11110 func rewriteValueAMD64_OpAMD64LEAL2_0(v *Value) bool { 11111 // match: (LEAL2 [c] {s} (ADDLconst [d] x) y) 11112 // cond: is32Bit(c+d) && x.Op != OpSB 11113 // result: (LEAL2 [c+d] {s} x y) 11114 for { 11115 c := v.AuxInt 11116 s := v.Aux 11117 _ = v.Args[1] 11118 v_0 := v.Args[0] 11119 if v_0.Op != OpAMD64ADDLconst { 11120 break 11121 } 11122 d := v_0.AuxInt 11123 x := v_0.Args[0] 11124 y := v.Args[1] 11125 if !(is32Bit(c+d) && x.Op != OpSB) { 11126 break 11127 } 11128 v.reset(OpAMD64LEAL2) 11129 v.AuxInt = c + d 11130 v.Aux = s 11131 v.AddArg(x) 11132 v.AddArg(y) 11133 return true 11134 } 11135 // match: (LEAL2 [c] {s} x (ADDLconst [d] y)) 11136 // cond: is32Bit(c+2*d) && y.Op != OpSB 11137 // result: (LEAL2 [c+2*d] {s} x y) 11138 for { 11139 c := v.AuxInt 11140 s := v.Aux 11141 _ = v.Args[1] 11142 x := v.Args[0] 11143 v_1 := v.Args[1] 11144 if v_1.Op != OpAMD64ADDLconst { 11145 break 11146 } 11147 d := v_1.AuxInt 11148 y := v_1.Args[0] 11149 if !(is32Bit(c+2*d) && y.Op != OpSB) { 11150 break 11151 } 11152 v.reset(OpAMD64LEAL2) 11153 v.AuxInt = c + 2*d 11154 v.Aux = s 11155 v.AddArg(x) 11156 v.AddArg(y) 11157 return true 11158 } 11159 // match: (LEAL2 [c] {s} x (SHLLconst [1] y)) 11160 // cond: 11161 // result: (LEAL4 [c] {s} x y) 11162 for { 11163 c := v.AuxInt 11164 s := v.Aux 11165 _ = v.Args[1] 11166 x := v.Args[0] 11167 v_1 := v.Args[1] 11168 if v_1.Op != OpAMD64SHLLconst { 11169 break 11170 } 11171 if v_1.AuxInt != 1 { 11172 break 11173 } 11174 y := v_1.Args[0] 11175 v.reset(OpAMD64LEAL4) 11176 v.AuxInt = c 11177 v.Aux = s 11178 v.AddArg(x) 11179 v.AddArg(y) 11180 return true 11181 } 11182 // match: (LEAL2 [c] {s} x (SHLLconst [2] y)) 11183 // cond: 11184 // result: (LEAL8 [c] {s} x y) 11185 for { 11186 c := v.AuxInt 11187 s := v.Aux 11188 _ = v.Args[1] 11189 x := v.Args[0] 11190 v_1 := v.Args[1] 11191 if v_1.Op != OpAMD64SHLLconst { 11192 break 11193 } 11194 if v_1.AuxInt != 2 { 11195 break 11196 } 11197 y := v_1.Args[0] 11198 v.reset(OpAMD64LEAL8) 11199 v.AuxInt = c 11200 v.Aux = s 11201 v.AddArg(x) 11202 v.AddArg(y) 11203 return true 11204 } 11205 return false 11206 } 11207 func rewriteValueAMD64_OpAMD64LEAL4_0(v *Value) bool { 11208 // match: (LEAL4 [c] {s} (ADDLconst [d] x) y) 11209 // cond: is32Bit(c+d) && x.Op != OpSB 11210 // result: (LEAL4 [c+d] {s} x y) 11211 for { 11212 c := v.AuxInt 11213 s := v.Aux 11214 _ = v.Args[1] 11215 v_0 := v.Args[0] 11216 if v_0.Op != OpAMD64ADDLconst { 11217 break 11218 } 11219 d := v_0.AuxInt 11220 x := v_0.Args[0] 11221 y := v.Args[1] 11222 if !(is32Bit(c+d) && x.Op != OpSB) { 11223 break 11224 } 11225 v.reset(OpAMD64LEAL4) 11226 v.AuxInt = c + d 11227 v.Aux = s 11228 v.AddArg(x) 11229 v.AddArg(y) 11230 return true 11231 } 11232 // match: (LEAL4 [c] {s} x (ADDLconst [d] y)) 11233 // cond: is32Bit(c+4*d) && y.Op != OpSB 11234 // result: (LEAL4 [c+4*d] {s} x y) 11235 for { 11236 c := v.AuxInt 11237 s := v.Aux 11238 _ = v.Args[1] 11239 x := v.Args[0] 11240 v_1 := v.Args[1] 11241 if v_1.Op != OpAMD64ADDLconst { 11242 break 11243 } 11244 d := v_1.AuxInt 11245 y := v_1.Args[0] 11246 if !(is32Bit(c+4*d) && y.Op != OpSB) { 11247 break 11248 } 11249 v.reset(OpAMD64LEAL4) 11250 v.AuxInt = c + 4*d 11251 v.Aux = s 11252 v.AddArg(x) 11253 v.AddArg(y) 11254 return true 11255 } 11256 // match: (LEAL4 [c] {s} x (SHLLconst [1] y)) 11257 // cond: 11258 // result: (LEAL8 [c] {s} x y) 11259 for { 11260 c := v.AuxInt 11261 s := v.Aux 11262 _ = v.Args[1] 11263 x := v.Args[0] 11264 v_1 := v.Args[1] 11265 if v_1.Op != OpAMD64SHLLconst { 11266 break 11267 } 11268 if v_1.AuxInt != 1 { 11269 break 11270 } 11271 y := v_1.Args[0] 11272 v.reset(OpAMD64LEAL8) 11273 v.AuxInt = c 11274 v.Aux = s 11275 v.AddArg(x) 11276 v.AddArg(y) 11277 return true 11278 } 11279 return false 11280 } 11281 func rewriteValueAMD64_OpAMD64LEAL8_0(v *Value) bool { 11282 // match: (LEAL8 [c] {s} (ADDLconst [d] x) y) 11283 // cond: is32Bit(c+d) && x.Op != OpSB 11284 // result: (LEAL8 [c+d] {s} x y) 11285 for { 11286 c := v.AuxInt 11287 s := v.Aux 11288 _ = v.Args[1] 11289 v_0 := v.Args[0] 11290 if v_0.Op != OpAMD64ADDLconst { 11291 break 11292 } 11293 d := v_0.AuxInt 11294 x := v_0.Args[0] 11295 y := v.Args[1] 11296 if !(is32Bit(c+d) && x.Op != OpSB) { 11297 break 11298 } 11299 v.reset(OpAMD64LEAL8) 11300 v.AuxInt = c + d 11301 v.Aux = s 11302 v.AddArg(x) 11303 v.AddArg(y) 11304 return true 11305 } 11306 // match: (LEAL8 [c] {s} x (ADDLconst [d] y)) 11307 // cond: is32Bit(c+8*d) && y.Op != OpSB 11308 // result: (LEAL8 [c+8*d] {s} x y) 11309 for { 11310 c := v.AuxInt 11311 s := v.Aux 11312 _ = v.Args[1] 11313 x := v.Args[0] 11314 v_1 := v.Args[1] 11315 if v_1.Op != OpAMD64ADDLconst { 11316 break 11317 } 11318 d := v_1.AuxInt 11319 y := v_1.Args[0] 11320 if !(is32Bit(c+8*d) && y.Op != OpSB) { 11321 break 11322 } 11323 v.reset(OpAMD64LEAL8) 11324 v.AuxInt = c + 8*d 11325 v.Aux = s 11326 v.AddArg(x) 11327 v.AddArg(y) 11328 return true 11329 } 11330 return false 11331 } 11332 func rewriteValueAMD64_OpAMD64LEAQ_0(v *Value) bool { 11333 // match: (LEAQ [c] {s} (ADDQconst [d] x)) 11334 // cond: is32Bit(c+d) 11335 // result: (LEAQ [c+d] {s} x) 11336 for { 11337 c := v.AuxInt 11338 s := v.Aux 11339 v_0 := v.Args[0] 11340 if v_0.Op != OpAMD64ADDQconst { 11341 break 11342 } 11343 d := v_0.AuxInt 11344 x := v_0.Args[0] 11345 if !(is32Bit(c + d)) { 11346 break 11347 } 11348 v.reset(OpAMD64LEAQ) 11349 v.AuxInt = c + d 11350 v.Aux = s 11351 v.AddArg(x) 11352 return true 11353 } 11354 // match: (LEAQ [c] {s} (ADDQ x y)) 11355 // cond: x.Op != OpSB && y.Op != OpSB 11356 // result: (LEAQ1 [c] {s} x y) 11357 for { 11358 c := v.AuxInt 11359 s := v.Aux 11360 v_0 := v.Args[0] 11361 if v_0.Op != OpAMD64ADDQ { 11362 break 11363 } 11364 _ = v_0.Args[1] 11365 x := v_0.Args[0] 11366 y := v_0.Args[1] 11367 if !(x.Op != OpSB && y.Op != OpSB) { 11368 break 11369 } 11370 v.reset(OpAMD64LEAQ1) 11371 v.AuxInt = c 11372 v.Aux = s 11373 v.AddArg(x) 11374 v.AddArg(y) 11375 return true 11376 } 11377 // match: (LEAQ [off1] {sym1} (LEAQ [off2] {sym2} x)) 11378 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 11379 // result: (LEAQ [off1+off2] {mergeSym(sym1,sym2)} x) 11380 for { 11381 off1 := v.AuxInt 11382 sym1 := v.Aux 11383 v_0 := v.Args[0] 11384 if v_0.Op != OpAMD64LEAQ { 11385 break 11386 } 11387 off2 := v_0.AuxInt 11388 sym2 := v_0.Aux 11389 x := v_0.Args[0] 11390 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 11391 break 11392 } 11393 v.reset(OpAMD64LEAQ) 11394 v.AuxInt = off1 + off2 11395 v.Aux = mergeSym(sym1, sym2) 11396 v.AddArg(x) 11397 return true 11398 } 11399 // match: (LEAQ [off1] {sym1} (LEAQ1 [off2] {sym2} x y)) 11400 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 11401 // result: (LEAQ1 [off1+off2] {mergeSym(sym1,sym2)} x y) 11402 for { 11403 off1 := v.AuxInt 11404 sym1 := v.Aux 11405 v_0 := v.Args[0] 11406 if v_0.Op != OpAMD64LEAQ1 { 11407 break 11408 } 11409 off2 := v_0.AuxInt 11410 sym2 := v_0.Aux 11411 _ = v_0.Args[1] 11412 x := v_0.Args[0] 11413 y := v_0.Args[1] 11414 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 11415 break 11416 } 11417 v.reset(OpAMD64LEAQ1) 11418 v.AuxInt = off1 + off2 11419 v.Aux = mergeSym(sym1, sym2) 11420 v.AddArg(x) 11421 v.AddArg(y) 11422 return true 11423 } 11424 // match: (LEAQ [off1] {sym1} (LEAQ2 [off2] {sym2} x y)) 11425 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 11426 // result: (LEAQ2 [off1+off2] {mergeSym(sym1,sym2)} x y) 11427 for { 11428 off1 := v.AuxInt 11429 sym1 := v.Aux 11430 v_0 := v.Args[0] 11431 if v_0.Op != OpAMD64LEAQ2 { 11432 break 11433 } 11434 off2 := v_0.AuxInt 11435 sym2 := v_0.Aux 11436 _ = v_0.Args[1] 11437 x := v_0.Args[0] 11438 y := v_0.Args[1] 11439 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 11440 break 11441 } 11442 v.reset(OpAMD64LEAQ2) 11443 v.AuxInt = off1 + off2 11444 v.Aux = mergeSym(sym1, sym2) 11445 v.AddArg(x) 11446 v.AddArg(y) 11447 return true 11448 } 11449 // match: (LEAQ [off1] {sym1} (LEAQ4 [off2] {sym2} x y)) 11450 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 11451 // result: (LEAQ4 [off1+off2] {mergeSym(sym1,sym2)} x y) 11452 for { 11453 off1 := v.AuxInt 11454 sym1 := v.Aux 11455 v_0 := v.Args[0] 11456 if v_0.Op != OpAMD64LEAQ4 { 11457 break 11458 } 11459 off2 := v_0.AuxInt 11460 sym2 := v_0.Aux 11461 _ = v_0.Args[1] 11462 x := v_0.Args[0] 11463 y := v_0.Args[1] 11464 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 11465 break 11466 } 11467 v.reset(OpAMD64LEAQ4) 11468 v.AuxInt = off1 + off2 11469 v.Aux = mergeSym(sym1, sym2) 11470 v.AddArg(x) 11471 v.AddArg(y) 11472 return true 11473 } 11474 // match: (LEAQ [off1] {sym1} (LEAQ8 [off2] {sym2} x y)) 11475 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 11476 // result: (LEAQ8 [off1+off2] {mergeSym(sym1,sym2)} x y) 11477 for { 11478 off1 := v.AuxInt 11479 sym1 := v.Aux 11480 v_0 := v.Args[0] 11481 if v_0.Op != OpAMD64LEAQ8 { 11482 break 11483 } 11484 off2 := v_0.AuxInt 11485 sym2 := v_0.Aux 11486 _ = v_0.Args[1] 11487 x := v_0.Args[0] 11488 y := v_0.Args[1] 11489 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 11490 break 11491 } 11492 v.reset(OpAMD64LEAQ8) 11493 v.AuxInt = off1 + off2 11494 v.Aux = mergeSym(sym1, sym2) 11495 v.AddArg(x) 11496 v.AddArg(y) 11497 return true 11498 } 11499 return false 11500 } 11501 func rewriteValueAMD64_OpAMD64LEAQ1_0(v *Value) bool { 11502 // match: (LEAQ1 [c] {s} (ADDQconst [d] x) y) 11503 // cond: is32Bit(c+d) && x.Op != OpSB 11504 // result: (LEAQ1 [c+d] {s} x y) 11505 for { 11506 c := v.AuxInt 11507 s := v.Aux 11508 _ = v.Args[1] 11509 v_0 := v.Args[0] 11510 if v_0.Op != OpAMD64ADDQconst { 11511 break 11512 } 11513 d := v_0.AuxInt 11514 x := v_0.Args[0] 11515 y := v.Args[1] 11516 if !(is32Bit(c+d) && x.Op != OpSB) { 11517 break 11518 } 11519 v.reset(OpAMD64LEAQ1) 11520 v.AuxInt = c + d 11521 v.Aux = s 11522 v.AddArg(x) 11523 v.AddArg(y) 11524 return true 11525 } 11526 // match: (LEAQ1 [c] {s} y (ADDQconst [d] x)) 11527 // cond: is32Bit(c+d) && x.Op != OpSB 11528 // result: (LEAQ1 [c+d] {s} x y) 11529 for { 11530 c := v.AuxInt 11531 s := v.Aux 11532 _ = v.Args[1] 11533 y := v.Args[0] 11534 v_1 := v.Args[1] 11535 if v_1.Op != OpAMD64ADDQconst { 11536 break 11537 } 11538 d := v_1.AuxInt 11539 x := v_1.Args[0] 11540 if !(is32Bit(c+d) && x.Op != OpSB) { 11541 break 11542 } 11543 v.reset(OpAMD64LEAQ1) 11544 v.AuxInt = c + d 11545 v.Aux = s 11546 v.AddArg(x) 11547 v.AddArg(y) 11548 return true 11549 } 11550 // match: (LEAQ1 [c] {s} x (SHLQconst [1] y)) 11551 // cond: 11552 // result: (LEAQ2 [c] {s} x y) 11553 for { 11554 c := v.AuxInt 11555 s := v.Aux 11556 _ = v.Args[1] 11557 x := v.Args[0] 11558 v_1 := v.Args[1] 11559 if v_1.Op != OpAMD64SHLQconst { 11560 break 11561 } 11562 if v_1.AuxInt != 1 { 11563 break 11564 } 11565 y := v_1.Args[0] 11566 v.reset(OpAMD64LEAQ2) 11567 v.AuxInt = c 11568 v.Aux = s 11569 v.AddArg(x) 11570 v.AddArg(y) 11571 return true 11572 } 11573 // match: (LEAQ1 [c] {s} (SHLQconst [1] y) x) 11574 // cond: 11575 // result: (LEAQ2 [c] {s} x y) 11576 for { 11577 c := v.AuxInt 11578 s := v.Aux 11579 _ = v.Args[1] 11580 v_0 := v.Args[0] 11581 if v_0.Op != OpAMD64SHLQconst { 11582 break 11583 } 11584 if v_0.AuxInt != 1 { 11585 break 11586 } 11587 y := v_0.Args[0] 11588 x := v.Args[1] 11589 v.reset(OpAMD64LEAQ2) 11590 v.AuxInt = c 11591 v.Aux = s 11592 v.AddArg(x) 11593 v.AddArg(y) 11594 return true 11595 } 11596 // match: (LEAQ1 [c] {s} x (SHLQconst [2] y)) 11597 // cond: 11598 // result: (LEAQ4 [c] {s} x y) 11599 for { 11600 c := v.AuxInt 11601 s := v.Aux 11602 _ = v.Args[1] 11603 x := v.Args[0] 11604 v_1 := v.Args[1] 11605 if v_1.Op != OpAMD64SHLQconst { 11606 break 11607 } 11608 if v_1.AuxInt != 2 { 11609 break 11610 } 11611 y := v_1.Args[0] 11612 v.reset(OpAMD64LEAQ4) 11613 v.AuxInt = c 11614 v.Aux = s 11615 v.AddArg(x) 11616 v.AddArg(y) 11617 return true 11618 } 11619 // match: (LEAQ1 [c] {s} (SHLQconst [2] y) x) 11620 // cond: 11621 // result: (LEAQ4 [c] {s} x y) 11622 for { 11623 c := v.AuxInt 11624 s := v.Aux 11625 _ = v.Args[1] 11626 v_0 := v.Args[0] 11627 if v_0.Op != OpAMD64SHLQconst { 11628 break 11629 } 11630 if v_0.AuxInt != 2 { 11631 break 11632 } 11633 y := v_0.Args[0] 11634 x := v.Args[1] 11635 v.reset(OpAMD64LEAQ4) 11636 v.AuxInt = c 11637 v.Aux = s 11638 v.AddArg(x) 11639 v.AddArg(y) 11640 return true 11641 } 11642 // match: (LEAQ1 [c] {s} x (SHLQconst [3] y)) 11643 // cond: 11644 // result: (LEAQ8 [c] {s} x y) 11645 for { 11646 c := v.AuxInt 11647 s := v.Aux 11648 _ = v.Args[1] 11649 x := v.Args[0] 11650 v_1 := v.Args[1] 11651 if v_1.Op != OpAMD64SHLQconst { 11652 break 11653 } 11654 if v_1.AuxInt != 3 { 11655 break 11656 } 11657 y := v_1.Args[0] 11658 v.reset(OpAMD64LEAQ8) 11659 v.AuxInt = c 11660 v.Aux = s 11661 v.AddArg(x) 11662 v.AddArg(y) 11663 return true 11664 } 11665 // match: (LEAQ1 [c] {s} (SHLQconst [3] y) x) 11666 // cond: 11667 // result: (LEAQ8 [c] {s} x y) 11668 for { 11669 c := v.AuxInt 11670 s := v.Aux 11671 _ = v.Args[1] 11672 v_0 := v.Args[0] 11673 if v_0.Op != OpAMD64SHLQconst { 11674 break 11675 } 11676 if v_0.AuxInt != 3 { 11677 break 11678 } 11679 y := v_0.Args[0] 11680 x := v.Args[1] 11681 v.reset(OpAMD64LEAQ8) 11682 v.AuxInt = c 11683 v.Aux = s 11684 v.AddArg(x) 11685 v.AddArg(y) 11686 return true 11687 } 11688 // match: (LEAQ1 [off1] {sym1} (LEAQ [off2] {sym2} x) y) 11689 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) && x.Op != OpSB 11690 // result: (LEAQ1 [off1+off2] {mergeSym(sym1,sym2)} x y) 11691 for { 11692 off1 := v.AuxInt 11693 sym1 := v.Aux 11694 _ = v.Args[1] 11695 v_0 := v.Args[0] 11696 if v_0.Op != OpAMD64LEAQ { 11697 break 11698 } 11699 off2 := v_0.AuxInt 11700 sym2 := v_0.Aux 11701 x := v_0.Args[0] 11702 y := v.Args[1] 11703 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2) && x.Op != OpSB) { 11704 break 11705 } 11706 v.reset(OpAMD64LEAQ1) 11707 v.AuxInt = off1 + off2 11708 v.Aux = mergeSym(sym1, sym2) 11709 v.AddArg(x) 11710 v.AddArg(y) 11711 return true 11712 } 11713 // match: (LEAQ1 [off1] {sym1} y (LEAQ [off2] {sym2} x)) 11714 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) && x.Op != OpSB 11715 // result: (LEAQ1 [off1+off2] {mergeSym(sym1,sym2)} x y) 11716 for { 11717 off1 := v.AuxInt 11718 sym1 := v.Aux 11719 _ = v.Args[1] 11720 y := v.Args[0] 11721 v_1 := v.Args[1] 11722 if v_1.Op != OpAMD64LEAQ { 11723 break 11724 } 11725 off2 := v_1.AuxInt 11726 sym2 := v_1.Aux 11727 x := v_1.Args[0] 11728 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2) && x.Op != OpSB) { 11729 break 11730 } 11731 v.reset(OpAMD64LEAQ1) 11732 v.AuxInt = off1 + off2 11733 v.Aux = mergeSym(sym1, sym2) 11734 v.AddArg(x) 11735 v.AddArg(y) 11736 return true 11737 } 11738 return false 11739 } 11740 func rewriteValueAMD64_OpAMD64LEAQ2_0(v *Value) bool { 11741 // match: (LEAQ2 [c] {s} (ADDQconst [d] x) y) 11742 // cond: is32Bit(c+d) && x.Op != OpSB 11743 // result: (LEAQ2 [c+d] {s} x y) 11744 for { 11745 c := v.AuxInt 11746 s := v.Aux 11747 _ = v.Args[1] 11748 v_0 := v.Args[0] 11749 if v_0.Op != OpAMD64ADDQconst { 11750 break 11751 } 11752 d := v_0.AuxInt 11753 x := v_0.Args[0] 11754 y := v.Args[1] 11755 if !(is32Bit(c+d) && x.Op != OpSB) { 11756 break 11757 } 11758 v.reset(OpAMD64LEAQ2) 11759 v.AuxInt = c + d 11760 v.Aux = s 11761 v.AddArg(x) 11762 v.AddArg(y) 11763 return true 11764 } 11765 // match: (LEAQ2 [c] {s} x (ADDQconst [d] y)) 11766 // cond: is32Bit(c+2*d) && y.Op != OpSB 11767 // result: (LEAQ2 [c+2*d] {s} x y) 11768 for { 11769 c := v.AuxInt 11770 s := v.Aux 11771 _ = v.Args[1] 11772 x := v.Args[0] 11773 v_1 := v.Args[1] 11774 if v_1.Op != OpAMD64ADDQconst { 11775 break 11776 } 11777 d := v_1.AuxInt 11778 y := v_1.Args[0] 11779 if !(is32Bit(c+2*d) && y.Op != OpSB) { 11780 break 11781 } 11782 v.reset(OpAMD64LEAQ2) 11783 v.AuxInt = c + 2*d 11784 v.Aux = s 11785 v.AddArg(x) 11786 v.AddArg(y) 11787 return true 11788 } 11789 // match: (LEAQ2 [c] {s} x (SHLQconst [1] y)) 11790 // cond: 11791 // result: (LEAQ4 [c] {s} x y) 11792 for { 11793 c := v.AuxInt 11794 s := v.Aux 11795 _ = v.Args[1] 11796 x := v.Args[0] 11797 v_1 := v.Args[1] 11798 if v_1.Op != OpAMD64SHLQconst { 11799 break 11800 } 11801 if v_1.AuxInt != 1 { 11802 break 11803 } 11804 y := v_1.Args[0] 11805 v.reset(OpAMD64LEAQ4) 11806 v.AuxInt = c 11807 v.Aux = s 11808 v.AddArg(x) 11809 v.AddArg(y) 11810 return true 11811 } 11812 // match: (LEAQ2 [c] {s} x (SHLQconst [2] y)) 11813 // cond: 11814 // result: (LEAQ8 [c] {s} x y) 11815 for { 11816 c := v.AuxInt 11817 s := v.Aux 11818 _ = v.Args[1] 11819 x := v.Args[0] 11820 v_1 := v.Args[1] 11821 if v_1.Op != OpAMD64SHLQconst { 11822 break 11823 } 11824 if v_1.AuxInt != 2 { 11825 break 11826 } 11827 y := v_1.Args[0] 11828 v.reset(OpAMD64LEAQ8) 11829 v.AuxInt = c 11830 v.Aux = s 11831 v.AddArg(x) 11832 v.AddArg(y) 11833 return true 11834 } 11835 // match: (LEAQ2 [off1] {sym1} (LEAQ [off2] {sym2} x) y) 11836 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) && x.Op != OpSB 11837 // result: (LEAQ2 [off1+off2] {mergeSym(sym1,sym2)} x y) 11838 for { 11839 off1 := v.AuxInt 11840 sym1 := v.Aux 11841 _ = v.Args[1] 11842 v_0 := v.Args[0] 11843 if v_0.Op != OpAMD64LEAQ { 11844 break 11845 } 11846 off2 := v_0.AuxInt 11847 sym2 := v_0.Aux 11848 x := v_0.Args[0] 11849 y := v.Args[1] 11850 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2) && x.Op != OpSB) { 11851 break 11852 } 11853 v.reset(OpAMD64LEAQ2) 11854 v.AuxInt = off1 + off2 11855 v.Aux = mergeSym(sym1, sym2) 11856 v.AddArg(x) 11857 v.AddArg(y) 11858 return true 11859 } 11860 return false 11861 } 11862 func rewriteValueAMD64_OpAMD64LEAQ4_0(v *Value) bool { 11863 // match: (LEAQ4 [c] {s} (ADDQconst [d] x) y) 11864 // cond: is32Bit(c+d) && x.Op != OpSB 11865 // result: (LEAQ4 [c+d] {s} x y) 11866 for { 11867 c := v.AuxInt 11868 s := v.Aux 11869 _ = v.Args[1] 11870 v_0 := v.Args[0] 11871 if v_0.Op != OpAMD64ADDQconst { 11872 break 11873 } 11874 d := v_0.AuxInt 11875 x := v_0.Args[0] 11876 y := v.Args[1] 11877 if !(is32Bit(c+d) && x.Op != OpSB) { 11878 break 11879 } 11880 v.reset(OpAMD64LEAQ4) 11881 v.AuxInt = c + d 11882 v.Aux = s 11883 v.AddArg(x) 11884 v.AddArg(y) 11885 return true 11886 } 11887 // match: (LEAQ4 [c] {s} x (ADDQconst [d] y)) 11888 // cond: is32Bit(c+4*d) && y.Op != OpSB 11889 // result: (LEAQ4 [c+4*d] {s} x y) 11890 for { 11891 c := v.AuxInt 11892 s := v.Aux 11893 _ = v.Args[1] 11894 x := v.Args[0] 11895 v_1 := v.Args[1] 11896 if v_1.Op != OpAMD64ADDQconst { 11897 break 11898 } 11899 d := v_1.AuxInt 11900 y := v_1.Args[0] 11901 if !(is32Bit(c+4*d) && y.Op != OpSB) { 11902 break 11903 } 11904 v.reset(OpAMD64LEAQ4) 11905 v.AuxInt = c + 4*d 11906 v.Aux = s 11907 v.AddArg(x) 11908 v.AddArg(y) 11909 return true 11910 } 11911 // match: (LEAQ4 [c] {s} x (SHLQconst [1] y)) 11912 // cond: 11913 // result: (LEAQ8 [c] {s} x y) 11914 for { 11915 c := v.AuxInt 11916 s := v.Aux 11917 _ = v.Args[1] 11918 x := v.Args[0] 11919 v_1 := v.Args[1] 11920 if v_1.Op != OpAMD64SHLQconst { 11921 break 11922 } 11923 if v_1.AuxInt != 1 { 11924 break 11925 } 11926 y := v_1.Args[0] 11927 v.reset(OpAMD64LEAQ8) 11928 v.AuxInt = c 11929 v.Aux = s 11930 v.AddArg(x) 11931 v.AddArg(y) 11932 return true 11933 } 11934 // match: (LEAQ4 [off1] {sym1} (LEAQ [off2] {sym2} x) y) 11935 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) && x.Op != OpSB 11936 // result: (LEAQ4 [off1+off2] {mergeSym(sym1,sym2)} x y) 11937 for { 11938 off1 := v.AuxInt 11939 sym1 := v.Aux 11940 _ = v.Args[1] 11941 v_0 := v.Args[0] 11942 if v_0.Op != OpAMD64LEAQ { 11943 break 11944 } 11945 off2 := v_0.AuxInt 11946 sym2 := v_0.Aux 11947 x := v_0.Args[0] 11948 y := v.Args[1] 11949 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2) && x.Op != OpSB) { 11950 break 11951 } 11952 v.reset(OpAMD64LEAQ4) 11953 v.AuxInt = off1 + off2 11954 v.Aux = mergeSym(sym1, sym2) 11955 v.AddArg(x) 11956 v.AddArg(y) 11957 return true 11958 } 11959 return false 11960 } 11961 func rewriteValueAMD64_OpAMD64LEAQ8_0(v *Value) bool { 11962 // match: (LEAQ8 [c] {s} (ADDQconst [d] x) y) 11963 // cond: is32Bit(c+d) && x.Op != OpSB 11964 // result: (LEAQ8 [c+d] {s} x y) 11965 for { 11966 c := v.AuxInt 11967 s := v.Aux 11968 _ = v.Args[1] 11969 v_0 := v.Args[0] 11970 if v_0.Op != OpAMD64ADDQconst { 11971 break 11972 } 11973 d := v_0.AuxInt 11974 x := v_0.Args[0] 11975 y := v.Args[1] 11976 if !(is32Bit(c+d) && x.Op != OpSB) { 11977 break 11978 } 11979 v.reset(OpAMD64LEAQ8) 11980 v.AuxInt = c + d 11981 v.Aux = s 11982 v.AddArg(x) 11983 v.AddArg(y) 11984 return true 11985 } 11986 // match: (LEAQ8 [c] {s} x (ADDQconst [d] y)) 11987 // cond: is32Bit(c+8*d) && y.Op != OpSB 11988 // result: (LEAQ8 [c+8*d] {s} x y) 11989 for { 11990 c := v.AuxInt 11991 s := v.Aux 11992 _ = v.Args[1] 11993 x := v.Args[0] 11994 v_1 := v.Args[1] 11995 if v_1.Op != OpAMD64ADDQconst { 11996 break 11997 } 11998 d := v_1.AuxInt 11999 y := v_1.Args[0] 12000 if !(is32Bit(c+8*d) && y.Op != OpSB) { 12001 break 12002 } 12003 v.reset(OpAMD64LEAQ8) 12004 v.AuxInt = c + 8*d 12005 v.Aux = s 12006 v.AddArg(x) 12007 v.AddArg(y) 12008 return true 12009 } 12010 // match: (LEAQ8 [off1] {sym1} (LEAQ [off2] {sym2} x) y) 12011 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) && x.Op != OpSB 12012 // result: (LEAQ8 [off1+off2] {mergeSym(sym1,sym2)} x y) 12013 for { 12014 off1 := v.AuxInt 12015 sym1 := v.Aux 12016 _ = v.Args[1] 12017 v_0 := v.Args[0] 12018 if v_0.Op != OpAMD64LEAQ { 12019 break 12020 } 12021 off2 := v_0.AuxInt 12022 sym2 := v_0.Aux 12023 x := v_0.Args[0] 12024 y := v.Args[1] 12025 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2) && x.Op != OpSB) { 12026 break 12027 } 12028 v.reset(OpAMD64LEAQ8) 12029 v.AuxInt = off1 + off2 12030 v.Aux = mergeSym(sym1, sym2) 12031 v.AddArg(x) 12032 v.AddArg(y) 12033 return true 12034 } 12035 return false 12036 } 12037 func rewriteValueAMD64_OpAMD64MOVBQSX_0(v *Value) bool { 12038 b := v.Block 12039 _ = b 12040 // match: (MOVBQSX x:(MOVBload [off] {sym} ptr mem)) 12041 // cond: x.Uses == 1 && clobber(x) 12042 // result: @x.Block (MOVBQSXload <v.Type> [off] {sym} ptr mem) 12043 for { 12044 x := v.Args[0] 12045 if x.Op != OpAMD64MOVBload { 12046 break 12047 } 12048 off := x.AuxInt 12049 sym := x.Aux 12050 _ = x.Args[1] 12051 ptr := x.Args[0] 12052 mem := x.Args[1] 12053 if !(x.Uses == 1 && clobber(x)) { 12054 break 12055 } 12056 b = x.Block 12057 v0 := b.NewValue0(x.Pos, OpAMD64MOVBQSXload, v.Type) 12058 v.reset(OpCopy) 12059 v.AddArg(v0) 12060 v0.AuxInt = off 12061 v0.Aux = sym 12062 v0.AddArg(ptr) 12063 v0.AddArg(mem) 12064 return true 12065 } 12066 // match: (MOVBQSX x:(MOVWload [off] {sym} ptr mem)) 12067 // cond: x.Uses == 1 && clobber(x) 12068 // result: @x.Block (MOVBQSXload <v.Type> [off] {sym} ptr mem) 12069 for { 12070 x := v.Args[0] 12071 if x.Op != OpAMD64MOVWload { 12072 break 12073 } 12074 off := x.AuxInt 12075 sym := x.Aux 12076 _ = x.Args[1] 12077 ptr := x.Args[0] 12078 mem := x.Args[1] 12079 if !(x.Uses == 1 && clobber(x)) { 12080 break 12081 } 12082 b = x.Block 12083 v0 := b.NewValue0(x.Pos, OpAMD64MOVBQSXload, v.Type) 12084 v.reset(OpCopy) 12085 v.AddArg(v0) 12086 v0.AuxInt = off 12087 v0.Aux = sym 12088 v0.AddArg(ptr) 12089 v0.AddArg(mem) 12090 return true 12091 } 12092 // match: (MOVBQSX x:(MOVLload [off] {sym} ptr mem)) 12093 // cond: x.Uses == 1 && clobber(x) 12094 // result: @x.Block (MOVBQSXload <v.Type> [off] {sym} ptr mem) 12095 for { 12096 x := v.Args[0] 12097 if x.Op != OpAMD64MOVLload { 12098 break 12099 } 12100 off := x.AuxInt 12101 sym := x.Aux 12102 _ = x.Args[1] 12103 ptr := x.Args[0] 12104 mem := x.Args[1] 12105 if !(x.Uses == 1 && clobber(x)) { 12106 break 12107 } 12108 b = x.Block 12109 v0 := b.NewValue0(x.Pos, OpAMD64MOVBQSXload, v.Type) 12110 v.reset(OpCopy) 12111 v.AddArg(v0) 12112 v0.AuxInt = off 12113 v0.Aux = sym 12114 v0.AddArg(ptr) 12115 v0.AddArg(mem) 12116 return true 12117 } 12118 // match: (MOVBQSX x:(MOVQload [off] {sym} ptr mem)) 12119 // cond: x.Uses == 1 && clobber(x) 12120 // result: @x.Block (MOVBQSXload <v.Type> [off] {sym} ptr mem) 12121 for { 12122 x := v.Args[0] 12123 if x.Op != OpAMD64MOVQload { 12124 break 12125 } 12126 off := x.AuxInt 12127 sym := x.Aux 12128 _ = x.Args[1] 12129 ptr := x.Args[0] 12130 mem := x.Args[1] 12131 if !(x.Uses == 1 && clobber(x)) { 12132 break 12133 } 12134 b = x.Block 12135 v0 := b.NewValue0(x.Pos, OpAMD64MOVBQSXload, v.Type) 12136 v.reset(OpCopy) 12137 v.AddArg(v0) 12138 v0.AuxInt = off 12139 v0.Aux = sym 12140 v0.AddArg(ptr) 12141 v0.AddArg(mem) 12142 return true 12143 } 12144 // match: (MOVBQSX (ANDLconst [c] x)) 12145 // cond: c & 0x80 == 0 12146 // result: (ANDLconst [c & 0x7f] x) 12147 for { 12148 v_0 := v.Args[0] 12149 if v_0.Op != OpAMD64ANDLconst { 12150 break 12151 } 12152 c := v_0.AuxInt 12153 x := v_0.Args[0] 12154 if !(c&0x80 == 0) { 12155 break 12156 } 12157 v.reset(OpAMD64ANDLconst) 12158 v.AuxInt = c & 0x7f 12159 v.AddArg(x) 12160 return true 12161 } 12162 // match: (MOVBQSX (MOVBQSX x)) 12163 // cond: 12164 // result: (MOVBQSX x) 12165 for { 12166 v_0 := v.Args[0] 12167 if v_0.Op != OpAMD64MOVBQSX { 12168 break 12169 } 12170 x := v_0.Args[0] 12171 v.reset(OpAMD64MOVBQSX) 12172 v.AddArg(x) 12173 return true 12174 } 12175 return false 12176 } 12177 func rewriteValueAMD64_OpAMD64MOVBQSXload_0(v *Value) bool { 12178 // match: (MOVBQSXload [off] {sym} ptr (MOVBstore [off2] {sym2} ptr2 x _)) 12179 // cond: sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) 12180 // result: (MOVBQSX x) 12181 for { 12182 off := v.AuxInt 12183 sym := v.Aux 12184 _ = v.Args[1] 12185 ptr := v.Args[0] 12186 v_1 := v.Args[1] 12187 if v_1.Op != OpAMD64MOVBstore { 12188 break 12189 } 12190 off2 := v_1.AuxInt 12191 sym2 := v_1.Aux 12192 _ = v_1.Args[2] 12193 ptr2 := v_1.Args[0] 12194 x := v_1.Args[1] 12195 if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) { 12196 break 12197 } 12198 v.reset(OpAMD64MOVBQSX) 12199 v.AddArg(x) 12200 return true 12201 } 12202 // match: (MOVBQSXload [off1] {sym1} (LEAQ [off2] {sym2} base) mem) 12203 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 12204 // result: (MOVBQSXload [off1+off2] {mergeSym(sym1,sym2)} base mem) 12205 for { 12206 off1 := v.AuxInt 12207 sym1 := v.Aux 12208 _ = v.Args[1] 12209 v_0 := v.Args[0] 12210 if v_0.Op != OpAMD64LEAQ { 12211 break 12212 } 12213 off2 := v_0.AuxInt 12214 sym2 := v_0.Aux 12215 base := v_0.Args[0] 12216 mem := v.Args[1] 12217 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 12218 break 12219 } 12220 v.reset(OpAMD64MOVBQSXload) 12221 v.AuxInt = off1 + off2 12222 v.Aux = mergeSym(sym1, sym2) 12223 v.AddArg(base) 12224 v.AddArg(mem) 12225 return true 12226 } 12227 return false 12228 } 12229 func rewriteValueAMD64_OpAMD64MOVBQZX_0(v *Value) bool { 12230 b := v.Block 12231 _ = b 12232 // match: (MOVBQZX x:(MOVBload [off] {sym} ptr mem)) 12233 // cond: x.Uses == 1 && clobber(x) 12234 // result: @x.Block (MOVBload <v.Type> [off] {sym} ptr mem) 12235 for { 12236 x := v.Args[0] 12237 if x.Op != OpAMD64MOVBload { 12238 break 12239 } 12240 off := x.AuxInt 12241 sym := x.Aux 12242 _ = x.Args[1] 12243 ptr := x.Args[0] 12244 mem := x.Args[1] 12245 if !(x.Uses == 1 && clobber(x)) { 12246 break 12247 } 12248 b = x.Block 12249 v0 := b.NewValue0(x.Pos, OpAMD64MOVBload, v.Type) 12250 v.reset(OpCopy) 12251 v.AddArg(v0) 12252 v0.AuxInt = off 12253 v0.Aux = sym 12254 v0.AddArg(ptr) 12255 v0.AddArg(mem) 12256 return true 12257 } 12258 // match: (MOVBQZX x:(MOVWload [off] {sym} ptr mem)) 12259 // cond: x.Uses == 1 && clobber(x) 12260 // result: @x.Block (MOVBload <v.Type> [off] {sym} ptr mem) 12261 for { 12262 x := v.Args[0] 12263 if x.Op != OpAMD64MOVWload { 12264 break 12265 } 12266 off := x.AuxInt 12267 sym := x.Aux 12268 _ = x.Args[1] 12269 ptr := x.Args[0] 12270 mem := x.Args[1] 12271 if !(x.Uses == 1 && clobber(x)) { 12272 break 12273 } 12274 b = x.Block 12275 v0 := b.NewValue0(x.Pos, OpAMD64MOVBload, v.Type) 12276 v.reset(OpCopy) 12277 v.AddArg(v0) 12278 v0.AuxInt = off 12279 v0.Aux = sym 12280 v0.AddArg(ptr) 12281 v0.AddArg(mem) 12282 return true 12283 } 12284 // match: (MOVBQZX x:(MOVLload [off] {sym} ptr mem)) 12285 // cond: x.Uses == 1 && clobber(x) 12286 // result: @x.Block (MOVBload <v.Type> [off] {sym} ptr mem) 12287 for { 12288 x := v.Args[0] 12289 if x.Op != OpAMD64MOVLload { 12290 break 12291 } 12292 off := x.AuxInt 12293 sym := x.Aux 12294 _ = x.Args[1] 12295 ptr := x.Args[0] 12296 mem := x.Args[1] 12297 if !(x.Uses == 1 && clobber(x)) { 12298 break 12299 } 12300 b = x.Block 12301 v0 := b.NewValue0(x.Pos, OpAMD64MOVBload, v.Type) 12302 v.reset(OpCopy) 12303 v.AddArg(v0) 12304 v0.AuxInt = off 12305 v0.Aux = sym 12306 v0.AddArg(ptr) 12307 v0.AddArg(mem) 12308 return true 12309 } 12310 // match: (MOVBQZX x:(MOVQload [off] {sym} ptr mem)) 12311 // cond: x.Uses == 1 && clobber(x) 12312 // result: @x.Block (MOVBload <v.Type> [off] {sym} ptr mem) 12313 for { 12314 x := v.Args[0] 12315 if x.Op != OpAMD64MOVQload { 12316 break 12317 } 12318 off := x.AuxInt 12319 sym := x.Aux 12320 _ = x.Args[1] 12321 ptr := x.Args[0] 12322 mem := x.Args[1] 12323 if !(x.Uses == 1 && clobber(x)) { 12324 break 12325 } 12326 b = x.Block 12327 v0 := b.NewValue0(x.Pos, OpAMD64MOVBload, v.Type) 12328 v.reset(OpCopy) 12329 v.AddArg(v0) 12330 v0.AuxInt = off 12331 v0.Aux = sym 12332 v0.AddArg(ptr) 12333 v0.AddArg(mem) 12334 return true 12335 } 12336 // match: (MOVBQZX x) 12337 // cond: zeroUpper56Bits(x,3) 12338 // result: x 12339 for { 12340 x := v.Args[0] 12341 if !(zeroUpper56Bits(x, 3)) { 12342 break 12343 } 12344 v.reset(OpCopy) 12345 v.Type = x.Type 12346 v.AddArg(x) 12347 return true 12348 } 12349 // match: (MOVBQZX x:(MOVBloadidx1 [off] {sym} ptr idx mem)) 12350 // cond: x.Uses == 1 && clobber(x) 12351 // result: @x.Block (MOVBloadidx1 <v.Type> [off] {sym} ptr idx mem) 12352 for { 12353 x := v.Args[0] 12354 if x.Op != OpAMD64MOVBloadidx1 { 12355 break 12356 } 12357 off := x.AuxInt 12358 sym := x.Aux 12359 _ = x.Args[2] 12360 ptr := x.Args[0] 12361 idx := x.Args[1] 12362 mem := x.Args[2] 12363 if !(x.Uses == 1 && clobber(x)) { 12364 break 12365 } 12366 b = x.Block 12367 v0 := b.NewValue0(v.Pos, OpAMD64MOVBloadidx1, v.Type) 12368 v.reset(OpCopy) 12369 v.AddArg(v0) 12370 v0.AuxInt = off 12371 v0.Aux = sym 12372 v0.AddArg(ptr) 12373 v0.AddArg(idx) 12374 v0.AddArg(mem) 12375 return true 12376 } 12377 // match: (MOVBQZX (ANDLconst [c] x)) 12378 // cond: 12379 // result: (ANDLconst [c & 0xff] x) 12380 for { 12381 v_0 := v.Args[0] 12382 if v_0.Op != OpAMD64ANDLconst { 12383 break 12384 } 12385 c := v_0.AuxInt 12386 x := v_0.Args[0] 12387 v.reset(OpAMD64ANDLconst) 12388 v.AuxInt = c & 0xff 12389 v.AddArg(x) 12390 return true 12391 } 12392 // match: (MOVBQZX (MOVBQZX x)) 12393 // cond: 12394 // result: (MOVBQZX x) 12395 for { 12396 v_0 := v.Args[0] 12397 if v_0.Op != OpAMD64MOVBQZX { 12398 break 12399 } 12400 x := v_0.Args[0] 12401 v.reset(OpAMD64MOVBQZX) 12402 v.AddArg(x) 12403 return true 12404 } 12405 return false 12406 } 12407 func rewriteValueAMD64_OpAMD64MOVBload_0(v *Value) bool { 12408 // match: (MOVBload [off] {sym} ptr (MOVBstore [off2] {sym2} ptr2 x _)) 12409 // cond: sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) 12410 // result: (MOVBQZX x) 12411 for { 12412 off := v.AuxInt 12413 sym := v.Aux 12414 _ = v.Args[1] 12415 ptr := v.Args[0] 12416 v_1 := v.Args[1] 12417 if v_1.Op != OpAMD64MOVBstore { 12418 break 12419 } 12420 off2 := v_1.AuxInt 12421 sym2 := v_1.Aux 12422 _ = v_1.Args[2] 12423 ptr2 := v_1.Args[0] 12424 x := v_1.Args[1] 12425 if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) { 12426 break 12427 } 12428 v.reset(OpAMD64MOVBQZX) 12429 v.AddArg(x) 12430 return true 12431 } 12432 // match: (MOVBload [off1] {sym} (ADDQconst [off2] ptr) mem) 12433 // cond: is32Bit(off1+off2) 12434 // result: (MOVBload [off1+off2] {sym} ptr mem) 12435 for { 12436 off1 := v.AuxInt 12437 sym := v.Aux 12438 _ = v.Args[1] 12439 v_0 := v.Args[0] 12440 if v_0.Op != OpAMD64ADDQconst { 12441 break 12442 } 12443 off2 := v_0.AuxInt 12444 ptr := v_0.Args[0] 12445 mem := v.Args[1] 12446 if !(is32Bit(off1 + off2)) { 12447 break 12448 } 12449 v.reset(OpAMD64MOVBload) 12450 v.AuxInt = off1 + off2 12451 v.Aux = sym 12452 v.AddArg(ptr) 12453 v.AddArg(mem) 12454 return true 12455 } 12456 // match: (MOVBload [off1] {sym1} (LEAQ [off2] {sym2} base) mem) 12457 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 12458 // result: (MOVBload [off1+off2] {mergeSym(sym1,sym2)} base mem) 12459 for { 12460 off1 := v.AuxInt 12461 sym1 := v.Aux 12462 _ = v.Args[1] 12463 v_0 := v.Args[0] 12464 if v_0.Op != OpAMD64LEAQ { 12465 break 12466 } 12467 off2 := v_0.AuxInt 12468 sym2 := v_0.Aux 12469 base := v_0.Args[0] 12470 mem := v.Args[1] 12471 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 12472 break 12473 } 12474 v.reset(OpAMD64MOVBload) 12475 v.AuxInt = off1 + off2 12476 v.Aux = mergeSym(sym1, sym2) 12477 v.AddArg(base) 12478 v.AddArg(mem) 12479 return true 12480 } 12481 // match: (MOVBload [off1] {sym1} (LEAQ1 [off2] {sym2} ptr idx) mem) 12482 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 12483 // result: (MOVBloadidx1 [off1+off2] {mergeSym(sym1,sym2)} ptr idx mem) 12484 for { 12485 off1 := v.AuxInt 12486 sym1 := v.Aux 12487 _ = v.Args[1] 12488 v_0 := v.Args[0] 12489 if v_0.Op != OpAMD64LEAQ1 { 12490 break 12491 } 12492 off2 := v_0.AuxInt 12493 sym2 := v_0.Aux 12494 _ = v_0.Args[1] 12495 ptr := v_0.Args[0] 12496 idx := v_0.Args[1] 12497 mem := v.Args[1] 12498 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 12499 break 12500 } 12501 v.reset(OpAMD64MOVBloadidx1) 12502 v.AuxInt = off1 + off2 12503 v.Aux = mergeSym(sym1, sym2) 12504 v.AddArg(ptr) 12505 v.AddArg(idx) 12506 v.AddArg(mem) 12507 return true 12508 } 12509 // match: (MOVBload [off] {sym} (ADDQ ptr idx) mem) 12510 // cond: ptr.Op != OpSB 12511 // result: (MOVBloadidx1 [off] {sym} ptr idx mem) 12512 for { 12513 off := v.AuxInt 12514 sym := v.Aux 12515 _ = v.Args[1] 12516 v_0 := v.Args[0] 12517 if v_0.Op != OpAMD64ADDQ { 12518 break 12519 } 12520 _ = v_0.Args[1] 12521 ptr := v_0.Args[0] 12522 idx := v_0.Args[1] 12523 mem := v.Args[1] 12524 if !(ptr.Op != OpSB) { 12525 break 12526 } 12527 v.reset(OpAMD64MOVBloadidx1) 12528 v.AuxInt = off 12529 v.Aux = sym 12530 v.AddArg(ptr) 12531 v.AddArg(idx) 12532 v.AddArg(mem) 12533 return true 12534 } 12535 // match: (MOVBload [off1] {sym1} (LEAL [off2] {sym2} base) mem) 12536 // cond: canMergeSym(sym1, sym2) && is32Bit(off1+off2) 12537 // result: (MOVBload [off1+off2] {mergeSym(sym1,sym2)} base mem) 12538 for { 12539 off1 := v.AuxInt 12540 sym1 := v.Aux 12541 _ = v.Args[1] 12542 v_0 := v.Args[0] 12543 if v_0.Op != OpAMD64LEAL { 12544 break 12545 } 12546 off2 := v_0.AuxInt 12547 sym2 := v_0.Aux 12548 base := v_0.Args[0] 12549 mem := v.Args[1] 12550 if !(canMergeSym(sym1, sym2) && is32Bit(off1+off2)) { 12551 break 12552 } 12553 v.reset(OpAMD64MOVBload) 12554 v.AuxInt = off1 + off2 12555 v.Aux = mergeSym(sym1, sym2) 12556 v.AddArg(base) 12557 v.AddArg(mem) 12558 return true 12559 } 12560 // match: (MOVBload [off1] {sym} (ADDLconst [off2] ptr) mem) 12561 // cond: is32Bit(off1+off2) 12562 // result: (MOVBload [off1+off2] {sym} ptr mem) 12563 for { 12564 off1 := v.AuxInt 12565 sym := v.Aux 12566 _ = v.Args[1] 12567 v_0 := v.Args[0] 12568 if v_0.Op != OpAMD64ADDLconst { 12569 break 12570 } 12571 off2 := v_0.AuxInt 12572 ptr := v_0.Args[0] 12573 mem := v.Args[1] 12574 if !(is32Bit(off1 + off2)) { 12575 break 12576 } 12577 v.reset(OpAMD64MOVBload) 12578 v.AuxInt = off1 + off2 12579 v.Aux = sym 12580 v.AddArg(ptr) 12581 v.AddArg(mem) 12582 return true 12583 } 12584 // match: (MOVBload [off] {sym} (SB) _) 12585 // cond: symIsRO(sym) 12586 // result: (MOVLconst [int64(read8(sym, off))]) 12587 for { 12588 off := v.AuxInt 12589 sym := v.Aux 12590 _ = v.Args[1] 12591 v_0 := v.Args[0] 12592 if v_0.Op != OpSB { 12593 break 12594 } 12595 if !(symIsRO(sym)) { 12596 break 12597 } 12598 v.reset(OpAMD64MOVLconst) 12599 v.AuxInt = int64(read8(sym, off)) 12600 return true 12601 } 12602 return false 12603 } 12604 func rewriteValueAMD64_OpAMD64MOVBloadidx1_0(v *Value) bool { 12605 // match: (MOVBloadidx1 [c] {sym} (ADDQconst [d] ptr) idx mem) 12606 // cond: is32Bit(c+d) 12607 // result: (MOVBloadidx1 [c+d] {sym} ptr idx mem) 12608 for { 12609 c := v.AuxInt 12610 sym := v.Aux 12611 _ = v.Args[2] 12612 v_0 := v.Args[0] 12613 if v_0.Op != OpAMD64ADDQconst { 12614 break 12615 } 12616 d := v_0.AuxInt 12617 ptr := v_0.Args[0] 12618 idx := v.Args[1] 12619 mem := v.Args[2] 12620 if !(is32Bit(c + d)) { 12621 break 12622 } 12623 v.reset(OpAMD64MOVBloadidx1) 12624 v.AuxInt = c + d 12625 v.Aux = sym 12626 v.AddArg(ptr) 12627 v.AddArg(idx) 12628 v.AddArg(mem) 12629 return true 12630 } 12631 // match: (MOVBloadidx1 [c] {sym} idx (ADDQconst [d] ptr) mem) 12632 // cond: is32Bit(c+d) 12633 // result: (MOVBloadidx1 [c+d] {sym} ptr idx mem) 12634 for { 12635 c := v.AuxInt 12636 sym := v.Aux 12637 _ = v.Args[2] 12638 idx := v.Args[0] 12639 v_1 := v.Args[1] 12640 if v_1.Op != OpAMD64ADDQconst { 12641 break 12642 } 12643 d := v_1.AuxInt 12644 ptr := v_1.Args[0] 12645 mem := v.Args[2] 12646 if !(is32Bit(c + d)) { 12647 break 12648 } 12649 v.reset(OpAMD64MOVBloadidx1) 12650 v.AuxInt = c + d 12651 v.Aux = sym 12652 v.AddArg(ptr) 12653 v.AddArg(idx) 12654 v.AddArg(mem) 12655 return true 12656 } 12657 // match: (MOVBloadidx1 [c] {sym} ptr (ADDQconst [d] idx) mem) 12658 // cond: is32Bit(c+d) 12659 // result: (MOVBloadidx1 [c+d] {sym} ptr idx mem) 12660 for { 12661 c := v.AuxInt 12662 sym := v.Aux 12663 _ = v.Args[2] 12664 ptr := v.Args[0] 12665 v_1 := v.Args[1] 12666 if v_1.Op != OpAMD64ADDQconst { 12667 break 12668 } 12669 d := v_1.AuxInt 12670 idx := v_1.Args[0] 12671 mem := v.Args[2] 12672 if !(is32Bit(c + d)) { 12673 break 12674 } 12675 v.reset(OpAMD64MOVBloadidx1) 12676 v.AuxInt = c + d 12677 v.Aux = sym 12678 v.AddArg(ptr) 12679 v.AddArg(idx) 12680 v.AddArg(mem) 12681 return true 12682 } 12683 // match: (MOVBloadidx1 [c] {sym} (ADDQconst [d] idx) ptr mem) 12684 // cond: is32Bit(c+d) 12685 // result: (MOVBloadidx1 [c+d] {sym} ptr idx mem) 12686 for { 12687 c := v.AuxInt 12688 sym := v.Aux 12689 _ = v.Args[2] 12690 v_0 := v.Args[0] 12691 if v_0.Op != OpAMD64ADDQconst { 12692 break 12693 } 12694 d := v_0.AuxInt 12695 idx := v_0.Args[0] 12696 ptr := v.Args[1] 12697 mem := v.Args[2] 12698 if !(is32Bit(c + d)) { 12699 break 12700 } 12701 v.reset(OpAMD64MOVBloadidx1) 12702 v.AuxInt = c + d 12703 v.Aux = sym 12704 v.AddArg(ptr) 12705 v.AddArg(idx) 12706 v.AddArg(mem) 12707 return true 12708 } 12709 // match: (MOVBloadidx1 [i] {s} p (MOVQconst [c]) mem) 12710 // cond: is32Bit(i+c) 12711 // result: (MOVBload [i+c] {s} p mem) 12712 for { 12713 i := v.AuxInt 12714 s := v.Aux 12715 _ = v.Args[2] 12716 p := v.Args[0] 12717 v_1 := v.Args[1] 12718 if v_1.Op != OpAMD64MOVQconst { 12719 break 12720 } 12721 c := v_1.AuxInt 12722 mem := v.Args[2] 12723 if !(is32Bit(i + c)) { 12724 break 12725 } 12726 v.reset(OpAMD64MOVBload) 12727 v.AuxInt = i + c 12728 v.Aux = s 12729 v.AddArg(p) 12730 v.AddArg(mem) 12731 return true 12732 } 12733 // match: (MOVBloadidx1 [i] {s} (MOVQconst [c]) p mem) 12734 // cond: is32Bit(i+c) 12735 // result: (MOVBload [i+c] {s} p mem) 12736 for { 12737 i := v.AuxInt 12738 s := v.Aux 12739 _ = v.Args[2] 12740 v_0 := v.Args[0] 12741 if v_0.Op != OpAMD64MOVQconst { 12742 break 12743 } 12744 c := v_0.AuxInt 12745 p := v.Args[1] 12746 mem := v.Args[2] 12747 if !(is32Bit(i + c)) { 12748 break 12749 } 12750 v.reset(OpAMD64MOVBload) 12751 v.AuxInt = i + c 12752 v.Aux = s 12753 v.AddArg(p) 12754 v.AddArg(mem) 12755 return true 12756 } 12757 return false 12758 } 12759 func rewriteValueAMD64_OpAMD64MOVBstore_0(v *Value) bool { 12760 // match: (MOVBstore [off] {sym} ptr y:(SETL x) mem) 12761 // cond: y.Uses == 1 12762 // result: (SETLstore [off] {sym} ptr x mem) 12763 for { 12764 off := v.AuxInt 12765 sym := v.Aux 12766 _ = v.Args[2] 12767 ptr := v.Args[0] 12768 y := v.Args[1] 12769 if y.Op != OpAMD64SETL { 12770 break 12771 } 12772 x := y.Args[0] 12773 mem := v.Args[2] 12774 if !(y.Uses == 1) { 12775 break 12776 } 12777 v.reset(OpAMD64SETLstore) 12778 v.AuxInt = off 12779 v.Aux = sym 12780 v.AddArg(ptr) 12781 v.AddArg(x) 12782 v.AddArg(mem) 12783 return true 12784 } 12785 // match: (MOVBstore [off] {sym} ptr y:(SETLE x) mem) 12786 // cond: y.Uses == 1 12787 // result: (SETLEstore [off] {sym} ptr x mem) 12788 for { 12789 off := v.AuxInt 12790 sym := v.Aux 12791 _ = v.Args[2] 12792 ptr := v.Args[0] 12793 y := v.Args[1] 12794 if y.Op != OpAMD64SETLE { 12795 break 12796 } 12797 x := y.Args[0] 12798 mem := v.Args[2] 12799 if !(y.Uses == 1) { 12800 break 12801 } 12802 v.reset(OpAMD64SETLEstore) 12803 v.AuxInt = off 12804 v.Aux = sym 12805 v.AddArg(ptr) 12806 v.AddArg(x) 12807 v.AddArg(mem) 12808 return true 12809 } 12810 // match: (MOVBstore [off] {sym} ptr y:(SETG x) mem) 12811 // cond: y.Uses == 1 12812 // result: (SETGstore [off] {sym} ptr x mem) 12813 for { 12814 off := v.AuxInt 12815 sym := v.Aux 12816 _ = v.Args[2] 12817 ptr := v.Args[0] 12818 y := v.Args[1] 12819 if y.Op != OpAMD64SETG { 12820 break 12821 } 12822 x := y.Args[0] 12823 mem := v.Args[2] 12824 if !(y.Uses == 1) { 12825 break 12826 } 12827 v.reset(OpAMD64SETGstore) 12828 v.AuxInt = off 12829 v.Aux = sym 12830 v.AddArg(ptr) 12831 v.AddArg(x) 12832 v.AddArg(mem) 12833 return true 12834 } 12835 // match: (MOVBstore [off] {sym} ptr y:(SETGE x) mem) 12836 // cond: y.Uses == 1 12837 // result: (SETGEstore [off] {sym} ptr x mem) 12838 for { 12839 off := v.AuxInt 12840 sym := v.Aux 12841 _ = v.Args[2] 12842 ptr := v.Args[0] 12843 y := v.Args[1] 12844 if y.Op != OpAMD64SETGE { 12845 break 12846 } 12847 x := y.Args[0] 12848 mem := v.Args[2] 12849 if !(y.Uses == 1) { 12850 break 12851 } 12852 v.reset(OpAMD64SETGEstore) 12853 v.AuxInt = off 12854 v.Aux = sym 12855 v.AddArg(ptr) 12856 v.AddArg(x) 12857 v.AddArg(mem) 12858 return true 12859 } 12860 // match: (MOVBstore [off] {sym} ptr y:(SETEQ x) mem) 12861 // cond: y.Uses == 1 12862 // result: (SETEQstore [off] {sym} ptr x mem) 12863 for { 12864 off := v.AuxInt 12865 sym := v.Aux 12866 _ = v.Args[2] 12867 ptr := v.Args[0] 12868 y := v.Args[1] 12869 if y.Op != OpAMD64SETEQ { 12870 break 12871 } 12872 x := y.Args[0] 12873 mem := v.Args[2] 12874 if !(y.Uses == 1) { 12875 break 12876 } 12877 v.reset(OpAMD64SETEQstore) 12878 v.AuxInt = off 12879 v.Aux = sym 12880 v.AddArg(ptr) 12881 v.AddArg(x) 12882 v.AddArg(mem) 12883 return true 12884 } 12885 // match: (MOVBstore [off] {sym} ptr y:(SETNE x) mem) 12886 // cond: y.Uses == 1 12887 // result: (SETNEstore [off] {sym} ptr x mem) 12888 for { 12889 off := v.AuxInt 12890 sym := v.Aux 12891 _ = v.Args[2] 12892 ptr := v.Args[0] 12893 y := v.Args[1] 12894 if y.Op != OpAMD64SETNE { 12895 break 12896 } 12897 x := y.Args[0] 12898 mem := v.Args[2] 12899 if !(y.Uses == 1) { 12900 break 12901 } 12902 v.reset(OpAMD64SETNEstore) 12903 v.AuxInt = off 12904 v.Aux = sym 12905 v.AddArg(ptr) 12906 v.AddArg(x) 12907 v.AddArg(mem) 12908 return true 12909 } 12910 // match: (MOVBstore [off] {sym} ptr y:(SETB x) mem) 12911 // cond: y.Uses == 1 12912 // result: (SETBstore [off] {sym} ptr x mem) 12913 for { 12914 off := v.AuxInt 12915 sym := v.Aux 12916 _ = v.Args[2] 12917 ptr := v.Args[0] 12918 y := v.Args[1] 12919 if y.Op != OpAMD64SETB { 12920 break 12921 } 12922 x := y.Args[0] 12923 mem := v.Args[2] 12924 if !(y.Uses == 1) { 12925 break 12926 } 12927 v.reset(OpAMD64SETBstore) 12928 v.AuxInt = off 12929 v.Aux = sym 12930 v.AddArg(ptr) 12931 v.AddArg(x) 12932 v.AddArg(mem) 12933 return true 12934 } 12935 // match: (MOVBstore [off] {sym} ptr y:(SETBE x) mem) 12936 // cond: y.Uses == 1 12937 // result: (SETBEstore [off] {sym} ptr x mem) 12938 for { 12939 off := v.AuxInt 12940 sym := v.Aux 12941 _ = v.Args[2] 12942 ptr := v.Args[0] 12943 y := v.Args[1] 12944 if y.Op != OpAMD64SETBE { 12945 break 12946 } 12947 x := y.Args[0] 12948 mem := v.Args[2] 12949 if !(y.Uses == 1) { 12950 break 12951 } 12952 v.reset(OpAMD64SETBEstore) 12953 v.AuxInt = off 12954 v.Aux = sym 12955 v.AddArg(ptr) 12956 v.AddArg(x) 12957 v.AddArg(mem) 12958 return true 12959 } 12960 // match: (MOVBstore [off] {sym} ptr y:(SETA x) mem) 12961 // cond: y.Uses == 1 12962 // result: (SETAstore [off] {sym} ptr x mem) 12963 for { 12964 off := v.AuxInt 12965 sym := v.Aux 12966 _ = v.Args[2] 12967 ptr := v.Args[0] 12968 y := v.Args[1] 12969 if y.Op != OpAMD64SETA { 12970 break 12971 } 12972 x := y.Args[0] 12973 mem := v.Args[2] 12974 if !(y.Uses == 1) { 12975 break 12976 } 12977 v.reset(OpAMD64SETAstore) 12978 v.AuxInt = off 12979 v.Aux = sym 12980 v.AddArg(ptr) 12981 v.AddArg(x) 12982 v.AddArg(mem) 12983 return true 12984 } 12985 // match: (MOVBstore [off] {sym} ptr y:(SETAE x) mem) 12986 // cond: y.Uses == 1 12987 // result: (SETAEstore [off] {sym} ptr x mem) 12988 for { 12989 off := v.AuxInt 12990 sym := v.Aux 12991 _ = v.Args[2] 12992 ptr := v.Args[0] 12993 y := v.Args[1] 12994 if y.Op != OpAMD64SETAE { 12995 break 12996 } 12997 x := y.Args[0] 12998 mem := v.Args[2] 12999 if !(y.Uses == 1) { 13000 break 13001 } 13002 v.reset(OpAMD64SETAEstore) 13003 v.AuxInt = off 13004 v.Aux = sym 13005 v.AddArg(ptr) 13006 v.AddArg(x) 13007 v.AddArg(mem) 13008 return true 13009 } 13010 return false 13011 } 13012 func rewriteValueAMD64_OpAMD64MOVBstore_10(v *Value) bool { 13013 b := v.Block 13014 _ = b 13015 // match: (MOVBstore [off] {sym} ptr (MOVBQSX x) mem) 13016 // cond: 13017 // result: (MOVBstore [off] {sym} ptr x mem) 13018 for { 13019 off := v.AuxInt 13020 sym := v.Aux 13021 _ = v.Args[2] 13022 ptr := v.Args[0] 13023 v_1 := v.Args[1] 13024 if v_1.Op != OpAMD64MOVBQSX { 13025 break 13026 } 13027 x := v_1.Args[0] 13028 mem := v.Args[2] 13029 v.reset(OpAMD64MOVBstore) 13030 v.AuxInt = off 13031 v.Aux = sym 13032 v.AddArg(ptr) 13033 v.AddArg(x) 13034 v.AddArg(mem) 13035 return true 13036 } 13037 // match: (MOVBstore [off] {sym} ptr (MOVBQZX x) mem) 13038 // cond: 13039 // result: (MOVBstore [off] {sym} ptr x mem) 13040 for { 13041 off := v.AuxInt 13042 sym := v.Aux 13043 _ = v.Args[2] 13044 ptr := v.Args[0] 13045 v_1 := v.Args[1] 13046 if v_1.Op != OpAMD64MOVBQZX { 13047 break 13048 } 13049 x := v_1.Args[0] 13050 mem := v.Args[2] 13051 v.reset(OpAMD64MOVBstore) 13052 v.AuxInt = off 13053 v.Aux = sym 13054 v.AddArg(ptr) 13055 v.AddArg(x) 13056 v.AddArg(mem) 13057 return true 13058 } 13059 // match: (MOVBstore [off1] {sym} (ADDQconst [off2] ptr) val mem) 13060 // cond: is32Bit(off1+off2) 13061 // result: (MOVBstore [off1+off2] {sym} ptr val mem) 13062 for { 13063 off1 := v.AuxInt 13064 sym := v.Aux 13065 _ = v.Args[2] 13066 v_0 := v.Args[0] 13067 if v_0.Op != OpAMD64ADDQconst { 13068 break 13069 } 13070 off2 := v_0.AuxInt 13071 ptr := v_0.Args[0] 13072 val := v.Args[1] 13073 mem := v.Args[2] 13074 if !(is32Bit(off1 + off2)) { 13075 break 13076 } 13077 v.reset(OpAMD64MOVBstore) 13078 v.AuxInt = off1 + off2 13079 v.Aux = sym 13080 v.AddArg(ptr) 13081 v.AddArg(val) 13082 v.AddArg(mem) 13083 return true 13084 } 13085 // match: (MOVBstore [off] {sym} ptr (MOVLconst [c]) mem) 13086 // cond: validOff(off) 13087 // result: (MOVBstoreconst [makeValAndOff(int64(int8(c)),off)] {sym} ptr mem) 13088 for { 13089 off := v.AuxInt 13090 sym := v.Aux 13091 _ = v.Args[2] 13092 ptr := v.Args[0] 13093 v_1 := v.Args[1] 13094 if v_1.Op != OpAMD64MOVLconst { 13095 break 13096 } 13097 c := v_1.AuxInt 13098 mem := v.Args[2] 13099 if !(validOff(off)) { 13100 break 13101 } 13102 v.reset(OpAMD64MOVBstoreconst) 13103 v.AuxInt = makeValAndOff(int64(int8(c)), off) 13104 v.Aux = sym 13105 v.AddArg(ptr) 13106 v.AddArg(mem) 13107 return true 13108 } 13109 // match: (MOVBstore [off] {sym} ptr (MOVQconst [c]) mem) 13110 // cond: validOff(off) 13111 // result: (MOVBstoreconst [makeValAndOff(int64(int8(c)),off)] {sym} ptr mem) 13112 for { 13113 off := v.AuxInt 13114 sym := v.Aux 13115 _ = v.Args[2] 13116 ptr := v.Args[0] 13117 v_1 := v.Args[1] 13118 if v_1.Op != OpAMD64MOVQconst { 13119 break 13120 } 13121 c := v_1.AuxInt 13122 mem := v.Args[2] 13123 if !(validOff(off)) { 13124 break 13125 } 13126 v.reset(OpAMD64MOVBstoreconst) 13127 v.AuxInt = makeValAndOff(int64(int8(c)), off) 13128 v.Aux = sym 13129 v.AddArg(ptr) 13130 v.AddArg(mem) 13131 return true 13132 } 13133 // match: (MOVBstore [off1] {sym1} (LEAQ [off2] {sym2} base) val mem) 13134 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 13135 // result: (MOVBstore [off1+off2] {mergeSym(sym1,sym2)} base val mem) 13136 for { 13137 off1 := v.AuxInt 13138 sym1 := v.Aux 13139 _ = v.Args[2] 13140 v_0 := v.Args[0] 13141 if v_0.Op != OpAMD64LEAQ { 13142 break 13143 } 13144 off2 := v_0.AuxInt 13145 sym2 := v_0.Aux 13146 base := v_0.Args[0] 13147 val := v.Args[1] 13148 mem := v.Args[2] 13149 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 13150 break 13151 } 13152 v.reset(OpAMD64MOVBstore) 13153 v.AuxInt = off1 + off2 13154 v.Aux = mergeSym(sym1, sym2) 13155 v.AddArg(base) 13156 v.AddArg(val) 13157 v.AddArg(mem) 13158 return true 13159 } 13160 // match: (MOVBstore [off1] {sym1} (LEAQ1 [off2] {sym2} ptr idx) val mem) 13161 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 13162 // result: (MOVBstoreidx1 [off1+off2] {mergeSym(sym1,sym2)} ptr idx val mem) 13163 for { 13164 off1 := v.AuxInt 13165 sym1 := v.Aux 13166 _ = v.Args[2] 13167 v_0 := v.Args[0] 13168 if v_0.Op != OpAMD64LEAQ1 { 13169 break 13170 } 13171 off2 := v_0.AuxInt 13172 sym2 := v_0.Aux 13173 _ = v_0.Args[1] 13174 ptr := v_0.Args[0] 13175 idx := v_0.Args[1] 13176 val := v.Args[1] 13177 mem := v.Args[2] 13178 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 13179 break 13180 } 13181 v.reset(OpAMD64MOVBstoreidx1) 13182 v.AuxInt = off1 + off2 13183 v.Aux = mergeSym(sym1, sym2) 13184 v.AddArg(ptr) 13185 v.AddArg(idx) 13186 v.AddArg(val) 13187 v.AddArg(mem) 13188 return true 13189 } 13190 // match: (MOVBstore [off] {sym} (ADDQ ptr idx) val mem) 13191 // cond: ptr.Op != OpSB 13192 // result: (MOVBstoreidx1 [off] {sym} ptr idx val mem) 13193 for { 13194 off := v.AuxInt 13195 sym := v.Aux 13196 _ = v.Args[2] 13197 v_0 := v.Args[0] 13198 if v_0.Op != OpAMD64ADDQ { 13199 break 13200 } 13201 _ = v_0.Args[1] 13202 ptr := v_0.Args[0] 13203 idx := v_0.Args[1] 13204 val := v.Args[1] 13205 mem := v.Args[2] 13206 if !(ptr.Op != OpSB) { 13207 break 13208 } 13209 v.reset(OpAMD64MOVBstoreidx1) 13210 v.AuxInt = off 13211 v.Aux = sym 13212 v.AddArg(ptr) 13213 v.AddArg(idx) 13214 v.AddArg(val) 13215 v.AddArg(mem) 13216 return true 13217 } 13218 // match: (MOVBstore [i] {s} p w x0:(MOVBstore [i-1] {s} p (SHRWconst [8] w) mem)) 13219 // cond: x0.Uses == 1 && clobber(x0) 13220 // result: (MOVWstore [i-1] {s} p (ROLWconst <w.Type> [8] w) mem) 13221 for { 13222 i := v.AuxInt 13223 s := v.Aux 13224 _ = v.Args[2] 13225 p := v.Args[0] 13226 w := v.Args[1] 13227 x0 := v.Args[2] 13228 if x0.Op != OpAMD64MOVBstore { 13229 break 13230 } 13231 if x0.AuxInt != i-1 { 13232 break 13233 } 13234 if x0.Aux != s { 13235 break 13236 } 13237 _ = x0.Args[2] 13238 if p != x0.Args[0] { 13239 break 13240 } 13241 x0_1 := x0.Args[1] 13242 if x0_1.Op != OpAMD64SHRWconst { 13243 break 13244 } 13245 if x0_1.AuxInt != 8 { 13246 break 13247 } 13248 if w != x0_1.Args[0] { 13249 break 13250 } 13251 mem := x0.Args[2] 13252 if !(x0.Uses == 1 && clobber(x0)) { 13253 break 13254 } 13255 v.reset(OpAMD64MOVWstore) 13256 v.AuxInt = i - 1 13257 v.Aux = s 13258 v.AddArg(p) 13259 v0 := b.NewValue0(x0.Pos, OpAMD64ROLWconst, w.Type) 13260 v0.AuxInt = 8 13261 v0.AddArg(w) 13262 v.AddArg(v0) 13263 v.AddArg(mem) 13264 return true 13265 } 13266 // match: (MOVBstore [i] {s} p w x2:(MOVBstore [i-1] {s} p (SHRLconst [8] w) x1:(MOVBstore [i-2] {s} p (SHRLconst [16] w) x0:(MOVBstore [i-3] {s} p (SHRLconst [24] w) mem)))) 13267 // cond: x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) 13268 // result: (MOVLstore [i-3] {s} p (BSWAPL <w.Type> w) mem) 13269 for { 13270 i := v.AuxInt 13271 s := v.Aux 13272 _ = v.Args[2] 13273 p := v.Args[0] 13274 w := v.Args[1] 13275 x2 := v.Args[2] 13276 if x2.Op != OpAMD64MOVBstore { 13277 break 13278 } 13279 if x2.AuxInt != i-1 { 13280 break 13281 } 13282 if x2.Aux != s { 13283 break 13284 } 13285 _ = x2.Args[2] 13286 if p != x2.Args[0] { 13287 break 13288 } 13289 x2_1 := x2.Args[1] 13290 if x2_1.Op != OpAMD64SHRLconst { 13291 break 13292 } 13293 if x2_1.AuxInt != 8 { 13294 break 13295 } 13296 if w != x2_1.Args[0] { 13297 break 13298 } 13299 x1 := x2.Args[2] 13300 if x1.Op != OpAMD64MOVBstore { 13301 break 13302 } 13303 if x1.AuxInt != i-2 { 13304 break 13305 } 13306 if x1.Aux != s { 13307 break 13308 } 13309 _ = x1.Args[2] 13310 if p != x1.Args[0] { 13311 break 13312 } 13313 x1_1 := x1.Args[1] 13314 if x1_1.Op != OpAMD64SHRLconst { 13315 break 13316 } 13317 if x1_1.AuxInt != 16 { 13318 break 13319 } 13320 if w != x1_1.Args[0] { 13321 break 13322 } 13323 x0 := x1.Args[2] 13324 if x0.Op != OpAMD64MOVBstore { 13325 break 13326 } 13327 if x0.AuxInt != i-3 { 13328 break 13329 } 13330 if x0.Aux != s { 13331 break 13332 } 13333 _ = x0.Args[2] 13334 if p != x0.Args[0] { 13335 break 13336 } 13337 x0_1 := x0.Args[1] 13338 if x0_1.Op != OpAMD64SHRLconst { 13339 break 13340 } 13341 if x0_1.AuxInt != 24 { 13342 break 13343 } 13344 if w != x0_1.Args[0] { 13345 break 13346 } 13347 mem := x0.Args[2] 13348 if !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2)) { 13349 break 13350 } 13351 v.reset(OpAMD64MOVLstore) 13352 v.AuxInt = i - 3 13353 v.Aux = s 13354 v.AddArg(p) 13355 v0 := b.NewValue0(x0.Pos, OpAMD64BSWAPL, w.Type) 13356 v0.AddArg(w) 13357 v.AddArg(v0) 13358 v.AddArg(mem) 13359 return true 13360 } 13361 return false 13362 } 13363 func rewriteValueAMD64_OpAMD64MOVBstore_20(v *Value) bool { 13364 b := v.Block 13365 _ = b 13366 typ := &b.Func.Config.Types 13367 _ = typ 13368 // match: (MOVBstore [i] {s} p w x6:(MOVBstore [i-1] {s} p (SHRQconst [8] w) x5:(MOVBstore [i-2] {s} p (SHRQconst [16] w) x4:(MOVBstore [i-3] {s} p (SHRQconst [24] w) x3:(MOVBstore [i-4] {s} p (SHRQconst [32] w) x2:(MOVBstore [i-5] {s} p (SHRQconst [40] w) x1:(MOVBstore [i-6] {s} p (SHRQconst [48] w) x0:(MOVBstore [i-7] {s} p (SHRQconst [56] w) mem)))))))) 13369 // cond: x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) 13370 // result: (MOVQstore [i-7] {s} p (BSWAPQ <w.Type> w) mem) 13371 for { 13372 i := v.AuxInt 13373 s := v.Aux 13374 _ = v.Args[2] 13375 p := v.Args[0] 13376 w := v.Args[1] 13377 x6 := v.Args[2] 13378 if x6.Op != OpAMD64MOVBstore { 13379 break 13380 } 13381 if x6.AuxInt != i-1 { 13382 break 13383 } 13384 if x6.Aux != s { 13385 break 13386 } 13387 _ = x6.Args[2] 13388 if p != x6.Args[0] { 13389 break 13390 } 13391 x6_1 := x6.Args[1] 13392 if x6_1.Op != OpAMD64SHRQconst { 13393 break 13394 } 13395 if x6_1.AuxInt != 8 { 13396 break 13397 } 13398 if w != x6_1.Args[0] { 13399 break 13400 } 13401 x5 := x6.Args[2] 13402 if x5.Op != OpAMD64MOVBstore { 13403 break 13404 } 13405 if x5.AuxInt != i-2 { 13406 break 13407 } 13408 if x5.Aux != s { 13409 break 13410 } 13411 _ = x5.Args[2] 13412 if p != x5.Args[0] { 13413 break 13414 } 13415 x5_1 := x5.Args[1] 13416 if x5_1.Op != OpAMD64SHRQconst { 13417 break 13418 } 13419 if x5_1.AuxInt != 16 { 13420 break 13421 } 13422 if w != x5_1.Args[0] { 13423 break 13424 } 13425 x4 := x5.Args[2] 13426 if x4.Op != OpAMD64MOVBstore { 13427 break 13428 } 13429 if x4.AuxInt != i-3 { 13430 break 13431 } 13432 if x4.Aux != s { 13433 break 13434 } 13435 _ = x4.Args[2] 13436 if p != x4.Args[0] { 13437 break 13438 } 13439 x4_1 := x4.Args[1] 13440 if x4_1.Op != OpAMD64SHRQconst { 13441 break 13442 } 13443 if x4_1.AuxInt != 24 { 13444 break 13445 } 13446 if w != x4_1.Args[0] { 13447 break 13448 } 13449 x3 := x4.Args[2] 13450 if x3.Op != OpAMD64MOVBstore { 13451 break 13452 } 13453 if x3.AuxInt != i-4 { 13454 break 13455 } 13456 if x3.Aux != s { 13457 break 13458 } 13459 _ = x3.Args[2] 13460 if p != x3.Args[0] { 13461 break 13462 } 13463 x3_1 := x3.Args[1] 13464 if x3_1.Op != OpAMD64SHRQconst { 13465 break 13466 } 13467 if x3_1.AuxInt != 32 { 13468 break 13469 } 13470 if w != x3_1.Args[0] { 13471 break 13472 } 13473 x2 := x3.Args[2] 13474 if x2.Op != OpAMD64MOVBstore { 13475 break 13476 } 13477 if x2.AuxInt != i-5 { 13478 break 13479 } 13480 if x2.Aux != s { 13481 break 13482 } 13483 _ = x2.Args[2] 13484 if p != x2.Args[0] { 13485 break 13486 } 13487 x2_1 := x2.Args[1] 13488 if x2_1.Op != OpAMD64SHRQconst { 13489 break 13490 } 13491 if x2_1.AuxInt != 40 { 13492 break 13493 } 13494 if w != x2_1.Args[0] { 13495 break 13496 } 13497 x1 := x2.Args[2] 13498 if x1.Op != OpAMD64MOVBstore { 13499 break 13500 } 13501 if x1.AuxInt != i-6 { 13502 break 13503 } 13504 if x1.Aux != s { 13505 break 13506 } 13507 _ = x1.Args[2] 13508 if p != x1.Args[0] { 13509 break 13510 } 13511 x1_1 := x1.Args[1] 13512 if x1_1.Op != OpAMD64SHRQconst { 13513 break 13514 } 13515 if x1_1.AuxInt != 48 { 13516 break 13517 } 13518 if w != x1_1.Args[0] { 13519 break 13520 } 13521 x0 := x1.Args[2] 13522 if x0.Op != OpAMD64MOVBstore { 13523 break 13524 } 13525 if x0.AuxInt != i-7 { 13526 break 13527 } 13528 if x0.Aux != s { 13529 break 13530 } 13531 _ = x0.Args[2] 13532 if p != x0.Args[0] { 13533 break 13534 } 13535 x0_1 := x0.Args[1] 13536 if x0_1.Op != OpAMD64SHRQconst { 13537 break 13538 } 13539 if x0_1.AuxInt != 56 { 13540 break 13541 } 13542 if w != x0_1.Args[0] { 13543 break 13544 } 13545 mem := x0.Args[2] 13546 if !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6)) { 13547 break 13548 } 13549 v.reset(OpAMD64MOVQstore) 13550 v.AuxInt = i - 7 13551 v.Aux = s 13552 v.AddArg(p) 13553 v0 := b.NewValue0(x0.Pos, OpAMD64BSWAPQ, w.Type) 13554 v0.AddArg(w) 13555 v.AddArg(v0) 13556 v.AddArg(mem) 13557 return true 13558 } 13559 // match: (MOVBstore [i] {s} p (SHRWconst [8] w) x:(MOVBstore [i-1] {s} p w mem)) 13560 // cond: x.Uses == 1 && clobber(x) 13561 // result: (MOVWstore [i-1] {s} p w mem) 13562 for { 13563 i := v.AuxInt 13564 s := v.Aux 13565 _ = v.Args[2] 13566 p := v.Args[0] 13567 v_1 := v.Args[1] 13568 if v_1.Op != OpAMD64SHRWconst { 13569 break 13570 } 13571 if v_1.AuxInt != 8 { 13572 break 13573 } 13574 w := v_1.Args[0] 13575 x := v.Args[2] 13576 if x.Op != OpAMD64MOVBstore { 13577 break 13578 } 13579 if x.AuxInt != i-1 { 13580 break 13581 } 13582 if x.Aux != s { 13583 break 13584 } 13585 _ = x.Args[2] 13586 if p != x.Args[0] { 13587 break 13588 } 13589 if w != x.Args[1] { 13590 break 13591 } 13592 mem := x.Args[2] 13593 if !(x.Uses == 1 && clobber(x)) { 13594 break 13595 } 13596 v.reset(OpAMD64MOVWstore) 13597 v.AuxInt = i - 1 13598 v.Aux = s 13599 v.AddArg(p) 13600 v.AddArg(w) 13601 v.AddArg(mem) 13602 return true 13603 } 13604 // match: (MOVBstore [i] {s} p (SHRLconst [8] w) x:(MOVBstore [i-1] {s} p w mem)) 13605 // cond: x.Uses == 1 && clobber(x) 13606 // result: (MOVWstore [i-1] {s} p w mem) 13607 for { 13608 i := v.AuxInt 13609 s := v.Aux 13610 _ = v.Args[2] 13611 p := v.Args[0] 13612 v_1 := v.Args[1] 13613 if v_1.Op != OpAMD64SHRLconst { 13614 break 13615 } 13616 if v_1.AuxInt != 8 { 13617 break 13618 } 13619 w := v_1.Args[0] 13620 x := v.Args[2] 13621 if x.Op != OpAMD64MOVBstore { 13622 break 13623 } 13624 if x.AuxInt != i-1 { 13625 break 13626 } 13627 if x.Aux != s { 13628 break 13629 } 13630 _ = x.Args[2] 13631 if p != x.Args[0] { 13632 break 13633 } 13634 if w != x.Args[1] { 13635 break 13636 } 13637 mem := x.Args[2] 13638 if !(x.Uses == 1 && clobber(x)) { 13639 break 13640 } 13641 v.reset(OpAMD64MOVWstore) 13642 v.AuxInt = i - 1 13643 v.Aux = s 13644 v.AddArg(p) 13645 v.AddArg(w) 13646 v.AddArg(mem) 13647 return true 13648 } 13649 // match: (MOVBstore [i] {s} p (SHRQconst [8] w) x:(MOVBstore [i-1] {s} p w mem)) 13650 // cond: x.Uses == 1 && clobber(x) 13651 // result: (MOVWstore [i-1] {s} p w mem) 13652 for { 13653 i := v.AuxInt 13654 s := v.Aux 13655 _ = v.Args[2] 13656 p := v.Args[0] 13657 v_1 := v.Args[1] 13658 if v_1.Op != OpAMD64SHRQconst { 13659 break 13660 } 13661 if v_1.AuxInt != 8 { 13662 break 13663 } 13664 w := v_1.Args[0] 13665 x := v.Args[2] 13666 if x.Op != OpAMD64MOVBstore { 13667 break 13668 } 13669 if x.AuxInt != i-1 { 13670 break 13671 } 13672 if x.Aux != s { 13673 break 13674 } 13675 _ = x.Args[2] 13676 if p != x.Args[0] { 13677 break 13678 } 13679 if w != x.Args[1] { 13680 break 13681 } 13682 mem := x.Args[2] 13683 if !(x.Uses == 1 && clobber(x)) { 13684 break 13685 } 13686 v.reset(OpAMD64MOVWstore) 13687 v.AuxInt = i - 1 13688 v.Aux = s 13689 v.AddArg(p) 13690 v.AddArg(w) 13691 v.AddArg(mem) 13692 return true 13693 } 13694 // match: (MOVBstore [i] {s} p w x:(MOVBstore [i+1] {s} p (SHRWconst [8] w) mem)) 13695 // cond: x.Uses == 1 && clobber(x) 13696 // result: (MOVWstore [i] {s} p w mem) 13697 for { 13698 i := v.AuxInt 13699 s := v.Aux 13700 _ = v.Args[2] 13701 p := v.Args[0] 13702 w := v.Args[1] 13703 x := v.Args[2] 13704 if x.Op != OpAMD64MOVBstore { 13705 break 13706 } 13707 if x.AuxInt != i+1 { 13708 break 13709 } 13710 if x.Aux != s { 13711 break 13712 } 13713 _ = x.Args[2] 13714 if p != x.Args[0] { 13715 break 13716 } 13717 x_1 := x.Args[1] 13718 if x_1.Op != OpAMD64SHRWconst { 13719 break 13720 } 13721 if x_1.AuxInt != 8 { 13722 break 13723 } 13724 if w != x_1.Args[0] { 13725 break 13726 } 13727 mem := x.Args[2] 13728 if !(x.Uses == 1 && clobber(x)) { 13729 break 13730 } 13731 v.reset(OpAMD64MOVWstore) 13732 v.AuxInt = i 13733 v.Aux = s 13734 v.AddArg(p) 13735 v.AddArg(w) 13736 v.AddArg(mem) 13737 return true 13738 } 13739 // match: (MOVBstore [i] {s} p w x:(MOVBstore [i+1] {s} p (SHRLconst [8] w) mem)) 13740 // cond: x.Uses == 1 && clobber(x) 13741 // result: (MOVWstore [i] {s} p w mem) 13742 for { 13743 i := v.AuxInt 13744 s := v.Aux 13745 _ = v.Args[2] 13746 p := v.Args[0] 13747 w := v.Args[1] 13748 x := v.Args[2] 13749 if x.Op != OpAMD64MOVBstore { 13750 break 13751 } 13752 if x.AuxInt != i+1 { 13753 break 13754 } 13755 if x.Aux != s { 13756 break 13757 } 13758 _ = x.Args[2] 13759 if p != x.Args[0] { 13760 break 13761 } 13762 x_1 := x.Args[1] 13763 if x_1.Op != OpAMD64SHRLconst { 13764 break 13765 } 13766 if x_1.AuxInt != 8 { 13767 break 13768 } 13769 if w != x_1.Args[0] { 13770 break 13771 } 13772 mem := x.Args[2] 13773 if !(x.Uses == 1 && clobber(x)) { 13774 break 13775 } 13776 v.reset(OpAMD64MOVWstore) 13777 v.AuxInt = i 13778 v.Aux = s 13779 v.AddArg(p) 13780 v.AddArg(w) 13781 v.AddArg(mem) 13782 return true 13783 } 13784 // match: (MOVBstore [i] {s} p w x:(MOVBstore [i+1] {s} p (SHRQconst [8] w) mem)) 13785 // cond: x.Uses == 1 && clobber(x) 13786 // result: (MOVWstore [i] {s} p w mem) 13787 for { 13788 i := v.AuxInt 13789 s := v.Aux 13790 _ = v.Args[2] 13791 p := v.Args[0] 13792 w := v.Args[1] 13793 x := v.Args[2] 13794 if x.Op != OpAMD64MOVBstore { 13795 break 13796 } 13797 if x.AuxInt != i+1 { 13798 break 13799 } 13800 if x.Aux != s { 13801 break 13802 } 13803 _ = x.Args[2] 13804 if p != x.Args[0] { 13805 break 13806 } 13807 x_1 := x.Args[1] 13808 if x_1.Op != OpAMD64SHRQconst { 13809 break 13810 } 13811 if x_1.AuxInt != 8 { 13812 break 13813 } 13814 if w != x_1.Args[0] { 13815 break 13816 } 13817 mem := x.Args[2] 13818 if !(x.Uses == 1 && clobber(x)) { 13819 break 13820 } 13821 v.reset(OpAMD64MOVWstore) 13822 v.AuxInt = i 13823 v.Aux = s 13824 v.AddArg(p) 13825 v.AddArg(w) 13826 v.AddArg(mem) 13827 return true 13828 } 13829 // match: (MOVBstore [i] {s} p (SHRLconst [j] w) x:(MOVBstore [i-1] {s} p w0:(SHRLconst [j-8] w) mem)) 13830 // cond: x.Uses == 1 && clobber(x) 13831 // result: (MOVWstore [i-1] {s} p w0 mem) 13832 for { 13833 i := v.AuxInt 13834 s := v.Aux 13835 _ = v.Args[2] 13836 p := v.Args[0] 13837 v_1 := v.Args[1] 13838 if v_1.Op != OpAMD64SHRLconst { 13839 break 13840 } 13841 j := v_1.AuxInt 13842 w := v_1.Args[0] 13843 x := v.Args[2] 13844 if x.Op != OpAMD64MOVBstore { 13845 break 13846 } 13847 if x.AuxInt != i-1 { 13848 break 13849 } 13850 if x.Aux != s { 13851 break 13852 } 13853 _ = x.Args[2] 13854 if p != x.Args[0] { 13855 break 13856 } 13857 w0 := x.Args[1] 13858 if w0.Op != OpAMD64SHRLconst { 13859 break 13860 } 13861 if w0.AuxInt != j-8 { 13862 break 13863 } 13864 if w != w0.Args[0] { 13865 break 13866 } 13867 mem := x.Args[2] 13868 if !(x.Uses == 1 && clobber(x)) { 13869 break 13870 } 13871 v.reset(OpAMD64MOVWstore) 13872 v.AuxInt = i - 1 13873 v.Aux = s 13874 v.AddArg(p) 13875 v.AddArg(w0) 13876 v.AddArg(mem) 13877 return true 13878 } 13879 // match: (MOVBstore [i] {s} p (SHRQconst [j] w) x:(MOVBstore [i-1] {s} p w0:(SHRQconst [j-8] w) mem)) 13880 // cond: x.Uses == 1 && clobber(x) 13881 // result: (MOVWstore [i-1] {s} p w0 mem) 13882 for { 13883 i := v.AuxInt 13884 s := v.Aux 13885 _ = v.Args[2] 13886 p := v.Args[0] 13887 v_1 := v.Args[1] 13888 if v_1.Op != OpAMD64SHRQconst { 13889 break 13890 } 13891 j := v_1.AuxInt 13892 w := v_1.Args[0] 13893 x := v.Args[2] 13894 if x.Op != OpAMD64MOVBstore { 13895 break 13896 } 13897 if x.AuxInt != i-1 { 13898 break 13899 } 13900 if x.Aux != s { 13901 break 13902 } 13903 _ = x.Args[2] 13904 if p != x.Args[0] { 13905 break 13906 } 13907 w0 := x.Args[1] 13908 if w0.Op != OpAMD64SHRQconst { 13909 break 13910 } 13911 if w0.AuxInt != j-8 { 13912 break 13913 } 13914 if w != w0.Args[0] { 13915 break 13916 } 13917 mem := x.Args[2] 13918 if !(x.Uses == 1 && clobber(x)) { 13919 break 13920 } 13921 v.reset(OpAMD64MOVWstore) 13922 v.AuxInt = i - 1 13923 v.Aux = s 13924 v.AddArg(p) 13925 v.AddArg(w0) 13926 v.AddArg(mem) 13927 return true 13928 } 13929 // match: (MOVBstore [i] {s} p x1:(MOVBload [j] {s2} p2 mem) mem2:(MOVBstore [i-1] {s} p x2:(MOVBload [j-1] {s2} p2 mem) mem)) 13930 // cond: x1.Uses == 1 && x2.Uses == 1 && mem2.Uses == 1 && clobber(x1) && clobber(x2) && clobber(mem2) 13931 // result: (MOVWstore [i-1] {s} p (MOVWload [j-1] {s2} p2 mem) mem) 13932 for { 13933 i := v.AuxInt 13934 s := v.Aux 13935 _ = v.Args[2] 13936 p := v.Args[0] 13937 x1 := v.Args[1] 13938 if x1.Op != OpAMD64MOVBload { 13939 break 13940 } 13941 j := x1.AuxInt 13942 s2 := x1.Aux 13943 _ = x1.Args[1] 13944 p2 := x1.Args[0] 13945 mem := x1.Args[1] 13946 mem2 := v.Args[2] 13947 if mem2.Op != OpAMD64MOVBstore { 13948 break 13949 } 13950 if mem2.AuxInt != i-1 { 13951 break 13952 } 13953 if mem2.Aux != s { 13954 break 13955 } 13956 _ = mem2.Args[2] 13957 if p != mem2.Args[0] { 13958 break 13959 } 13960 x2 := mem2.Args[1] 13961 if x2.Op != OpAMD64MOVBload { 13962 break 13963 } 13964 if x2.AuxInt != j-1 { 13965 break 13966 } 13967 if x2.Aux != s2 { 13968 break 13969 } 13970 _ = x2.Args[1] 13971 if p2 != x2.Args[0] { 13972 break 13973 } 13974 if mem != x2.Args[1] { 13975 break 13976 } 13977 if mem != mem2.Args[2] { 13978 break 13979 } 13980 if !(x1.Uses == 1 && x2.Uses == 1 && mem2.Uses == 1 && clobber(x1) && clobber(x2) && clobber(mem2)) { 13981 break 13982 } 13983 v.reset(OpAMD64MOVWstore) 13984 v.AuxInt = i - 1 13985 v.Aux = s 13986 v.AddArg(p) 13987 v0 := b.NewValue0(x2.Pos, OpAMD64MOVWload, typ.UInt16) 13988 v0.AuxInt = j - 1 13989 v0.Aux = s2 13990 v0.AddArg(p2) 13991 v0.AddArg(mem) 13992 v.AddArg(v0) 13993 v.AddArg(mem) 13994 return true 13995 } 13996 return false 13997 } 13998 func rewriteValueAMD64_OpAMD64MOVBstore_30(v *Value) bool { 13999 // match: (MOVBstore [off1] {sym1} (LEAL [off2] {sym2} base) val mem) 14000 // cond: canMergeSym(sym1, sym2) && is32Bit(off1+off2) 14001 // result: (MOVBstore [off1+off2] {mergeSym(sym1,sym2)} base val mem) 14002 for { 14003 off1 := v.AuxInt 14004 sym1 := v.Aux 14005 _ = v.Args[2] 14006 v_0 := v.Args[0] 14007 if v_0.Op != OpAMD64LEAL { 14008 break 14009 } 14010 off2 := v_0.AuxInt 14011 sym2 := v_0.Aux 14012 base := v_0.Args[0] 14013 val := v.Args[1] 14014 mem := v.Args[2] 14015 if !(canMergeSym(sym1, sym2) && is32Bit(off1+off2)) { 14016 break 14017 } 14018 v.reset(OpAMD64MOVBstore) 14019 v.AuxInt = off1 + off2 14020 v.Aux = mergeSym(sym1, sym2) 14021 v.AddArg(base) 14022 v.AddArg(val) 14023 v.AddArg(mem) 14024 return true 14025 } 14026 // match: (MOVBstore [off1] {sym} (ADDLconst [off2] ptr) val mem) 14027 // cond: is32Bit(off1+off2) 14028 // result: (MOVBstore [off1+off2] {sym} ptr val mem) 14029 for { 14030 off1 := v.AuxInt 14031 sym := v.Aux 14032 _ = v.Args[2] 14033 v_0 := v.Args[0] 14034 if v_0.Op != OpAMD64ADDLconst { 14035 break 14036 } 14037 off2 := v_0.AuxInt 14038 ptr := v_0.Args[0] 14039 val := v.Args[1] 14040 mem := v.Args[2] 14041 if !(is32Bit(off1 + off2)) { 14042 break 14043 } 14044 v.reset(OpAMD64MOVBstore) 14045 v.AuxInt = off1 + off2 14046 v.Aux = sym 14047 v.AddArg(ptr) 14048 v.AddArg(val) 14049 v.AddArg(mem) 14050 return true 14051 } 14052 return false 14053 } 14054 func rewriteValueAMD64_OpAMD64MOVBstoreconst_0(v *Value) bool { 14055 // match: (MOVBstoreconst [sc] {s} (ADDQconst [off] ptr) mem) 14056 // cond: ValAndOff(sc).canAdd(off) 14057 // result: (MOVBstoreconst [ValAndOff(sc).add(off)] {s} ptr mem) 14058 for { 14059 sc := v.AuxInt 14060 s := v.Aux 14061 _ = v.Args[1] 14062 v_0 := v.Args[0] 14063 if v_0.Op != OpAMD64ADDQconst { 14064 break 14065 } 14066 off := v_0.AuxInt 14067 ptr := v_0.Args[0] 14068 mem := v.Args[1] 14069 if !(ValAndOff(sc).canAdd(off)) { 14070 break 14071 } 14072 v.reset(OpAMD64MOVBstoreconst) 14073 v.AuxInt = ValAndOff(sc).add(off) 14074 v.Aux = s 14075 v.AddArg(ptr) 14076 v.AddArg(mem) 14077 return true 14078 } 14079 // match: (MOVBstoreconst [sc] {sym1} (LEAQ [off] {sym2} ptr) mem) 14080 // cond: canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off) 14081 // result: (MOVBstoreconst [ValAndOff(sc).add(off)] {mergeSym(sym1, sym2)} ptr mem) 14082 for { 14083 sc := v.AuxInt 14084 sym1 := v.Aux 14085 _ = v.Args[1] 14086 v_0 := v.Args[0] 14087 if v_0.Op != OpAMD64LEAQ { 14088 break 14089 } 14090 off := v_0.AuxInt 14091 sym2 := v_0.Aux 14092 ptr := v_0.Args[0] 14093 mem := v.Args[1] 14094 if !(canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off)) { 14095 break 14096 } 14097 v.reset(OpAMD64MOVBstoreconst) 14098 v.AuxInt = ValAndOff(sc).add(off) 14099 v.Aux = mergeSym(sym1, sym2) 14100 v.AddArg(ptr) 14101 v.AddArg(mem) 14102 return true 14103 } 14104 // match: (MOVBstoreconst [x] {sym1} (LEAQ1 [off] {sym2} ptr idx) mem) 14105 // cond: canMergeSym(sym1, sym2) 14106 // result: (MOVBstoreconstidx1 [ValAndOff(x).add(off)] {mergeSym(sym1,sym2)} ptr idx mem) 14107 for { 14108 x := v.AuxInt 14109 sym1 := v.Aux 14110 _ = v.Args[1] 14111 v_0 := v.Args[0] 14112 if v_0.Op != OpAMD64LEAQ1 { 14113 break 14114 } 14115 off := v_0.AuxInt 14116 sym2 := v_0.Aux 14117 _ = v_0.Args[1] 14118 ptr := v_0.Args[0] 14119 idx := v_0.Args[1] 14120 mem := v.Args[1] 14121 if !(canMergeSym(sym1, sym2)) { 14122 break 14123 } 14124 v.reset(OpAMD64MOVBstoreconstidx1) 14125 v.AuxInt = ValAndOff(x).add(off) 14126 v.Aux = mergeSym(sym1, sym2) 14127 v.AddArg(ptr) 14128 v.AddArg(idx) 14129 v.AddArg(mem) 14130 return true 14131 } 14132 // match: (MOVBstoreconst [x] {sym} (ADDQ ptr idx) mem) 14133 // cond: 14134 // result: (MOVBstoreconstidx1 [x] {sym} ptr idx mem) 14135 for { 14136 x := v.AuxInt 14137 sym := v.Aux 14138 _ = v.Args[1] 14139 v_0 := v.Args[0] 14140 if v_0.Op != OpAMD64ADDQ { 14141 break 14142 } 14143 _ = v_0.Args[1] 14144 ptr := v_0.Args[0] 14145 idx := v_0.Args[1] 14146 mem := v.Args[1] 14147 v.reset(OpAMD64MOVBstoreconstidx1) 14148 v.AuxInt = x 14149 v.Aux = sym 14150 v.AddArg(ptr) 14151 v.AddArg(idx) 14152 v.AddArg(mem) 14153 return true 14154 } 14155 // match: (MOVBstoreconst [c] {s} p x:(MOVBstoreconst [a] {s} p mem)) 14156 // cond: x.Uses == 1 && ValAndOff(a).Off() + 1 == ValAndOff(c).Off() && clobber(x) 14157 // result: (MOVWstoreconst [makeValAndOff(ValAndOff(a).Val()&0xff | ValAndOff(c).Val()<<8, ValAndOff(a).Off())] {s} p mem) 14158 for { 14159 c := v.AuxInt 14160 s := v.Aux 14161 _ = v.Args[1] 14162 p := v.Args[0] 14163 x := v.Args[1] 14164 if x.Op != OpAMD64MOVBstoreconst { 14165 break 14166 } 14167 a := x.AuxInt 14168 if x.Aux != s { 14169 break 14170 } 14171 _ = x.Args[1] 14172 if p != x.Args[0] { 14173 break 14174 } 14175 mem := x.Args[1] 14176 if !(x.Uses == 1 && ValAndOff(a).Off()+1 == ValAndOff(c).Off() && clobber(x)) { 14177 break 14178 } 14179 v.reset(OpAMD64MOVWstoreconst) 14180 v.AuxInt = makeValAndOff(ValAndOff(a).Val()&0xff|ValAndOff(c).Val()<<8, ValAndOff(a).Off()) 14181 v.Aux = s 14182 v.AddArg(p) 14183 v.AddArg(mem) 14184 return true 14185 } 14186 // match: (MOVBstoreconst [a] {s} p x:(MOVBstoreconst [c] {s} p mem)) 14187 // cond: x.Uses == 1 && ValAndOff(a).Off() + 1 == ValAndOff(c).Off() && clobber(x) 14188 // result: (MOVWstoreconst [makeValAndOff(ValAndOff(a).Val()&0xff | ValAndOff(c).Val()<<8, ValAndOff(a).Off())] {s} p mem) 14189 for { 14190 a := v.AuxInt 14191 s := v.Aux 14192 _ = v.Args[1] 14193 p := v.Args[0] 14194 x := v.Args[1] 14195 if x.Op != OpAMD64MOVBstoreconst { 14196 break 14197 } 14198 c := x.AuxInt 14199 if x.Aux != s { 14200 break 14201 } 14202 _ = x.Args[1] 14203 if p != x.Args[0] { 14204 break 14205 } 14206 mem := x.Args[1] 14207 if !(x.Uses == 1 && ValAndOff(a).Off()+1 == ValAndOff(c).Off() && clobber(x)) { 14208 break 14209 } 14210 v.reset(OpAMD64MOVWstoreconst) 14211 v.AuxInt = makeValAndOff(ValAndOff(a).Val()&0xff|ValAndOff(c).Val()<<8, ValAndOff(a).Off()) 14212 v.Aux = s 14213 v.AddArg(p) 14214 v.AddArg(mem) 14215 return true 14216 } 14217 // match: (MOVBstoreconst [sc] {sym1} (LEAL [off] {sym2} ptr) mem) 14218 // cond: canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off) 14219 // result: (MOVBstoreconst [ValAndOff(sc).add(off)] {mergeSym(sym1, sym2)} ptr mem) 14220 for { 14221 sc := v.AuxInt 14222 sym1 := v.Aux 14223 _ = v.Args[1] 14224 v_0 := v.Args[0] 14225 if v_0.Op != OpAMD64LEAL { 14226 break 14227 } 14228 off := v_0.AuxInt 14229 sym2 := v_0.Aux 14230 ptr := v_0.Args[0] 14231 mem := v.Args[1] 14232 if !(canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off)) { 14233 break 14234 } 14235 v.reset(OpAMD64MOVBstoreconst) 14236 v.AuxInt = ValAndOff(sc).add(off) 14237 v.Aux = mergeSym(sym1, sym2) 14238 v.AddArg(ptr) 14239 v.AddArg(mem) 14240 return true 14241 } 14242 // match: (MOVBstoreconst [sc] {s} (ADDLconst [off] ptr) mem) 14243 // cond: ValAndOff(sc).canAdd(off) 14244 // result: (MOVBstoreconst [ValAndOff(sc).add(off)] {s} ptr mem) 14245 for { 14246 sc := v.AuxInt 14247 s := v.Aux 14248 _ = v.Args[1] 14249 v_0 := v.Args[0] 14250 if v_0.Op != OpAMD64ADDLconst { 14251 break 14252 } 14253 off := v_0.AuxInt 14254 ptr := v_0.Args[0] 14255 mem := v.Args[1] 14256 if !(ValAndOff(sc).canAdd(off)) { 14257 break 14258 } 14259 v.reset(OpAMD64MOVBstoreconst) 14260 v.AuxInt = ValAndOff(sc).add(off) 14261 v.Aux = s 14262 v.AddArg(ptr) 14263 v.AddArg(mem) 14264 return true 14265 } 14266 return false 14267 } 14268 func rewriteValueAMD64_OpAMD64MOVBstoreconstidx1_0(v *Value) bool { 14269 // match: (MOVBstoreconstidx1 [x] {sym} (ADDQconst [c] ptr) idx mem) 14270 // cond: ValAndOff(x).canAdd(c) 14271 // result: (MOVBstoreconstidx1 [ValAndOff(x).add(c)] {sym} ptr idx mem) 14272 for { 14273 x := v.AuxInt 14274 sym := v.Aux 14275 _ = v.Args[2] 14276 v_0 := v.Args[0] 14277 if v_0.Op != OpAMD64ADDQconst { 14278 break 14279 } 14280 c := v_0.AuxInt 14281 ptr := v_0.Args[0] 14282 idx := v.Args[1] 14283 mem := v.Args[2] 14284 if !(ValAndOff(x).canAdd(c)) { 14285 break 14286 } 14287 v.reset(OpAMD64MOVBstoreconstidx1) 14288 v.AuxInt = ValAndOff(x).add(c) 14289 v.Aux = sym 14290 v.AddArg(ptr) 14291 v.AddArg(idx) 14292 v.AddArg(mem) 14293 return true 14294 } 14295 // match: (MOVBstoreconstidx1 [x] {sym} ptr (ADDQconst [c] idx) mem) 14296 // cond: ValAndOff(x).canAdd(c) 14297 // result: (MOVBstoreconstidx1 [ValAndOff(x).add(c)] {sym} ptr idx mem) 14298 for { 14299 x := v.AuxInt 14300 sym := v.Aux 14301 _ = v.Args[2] 14302 ptr := v.Args[0] 14303 v_1 := v.Args[1] 14304 if v_1.Op != OpAMD64ADDQconst { 14305 break 14306 } 14307 c := v_1.AuxInt 14308 idx := v_1.Args[0] 14309 mem := v.Args[2] 14310 if !(ValAndOff(x).canAdd(c)) { 14311 break 14312 } 14313 v.reset(OpAMD64MOVBstoreconstidx1) 14314 v.AuxInt = ValAndOff(x).add(c) 14315 v.Aux = sym 14316 v.AddArg(ptr) 14317 v.AddArg(idx) 14318 v.AddArg(mem) 14319 return true 14320 } 14321 // match: (MOVBstoreconstidx1 [c] {s} p i x:(MOVBstoreconstidx1 [a] {s} p i mem)) 14322 // cond: x.Uses == 1 && ValAndOff(a).Off() + 1 == ValAndOff(c).Off() && clobber(x) 14323 // result: (MOVWstoreconstidx1 [makeValAndOff(ValAndOff(a).Val()&0xff | ValAndOff(c).Val()<<8, ValAndOff(a).Off())] {s} p i mem) 14324 for { 14325 c := v.AuxInt 14326 s := v.Aux 14327 _ = v.Args[2] 14328 p := v.Args[0] 14329 i := v.Args[1] 14330 x := v.Args[2] 14331 if x.Op != OpAMD64MOVBstoreconstidx1 { 14332 break 14333 } 14334 a := x.AuxInt 14335 if x.Aux != s { 14336 break 14337 } 14338 _ = x.Args[2] 14339 if p != x.Args[0] { 14340 break 14341 } 14342 if i != x.Args[1] { 14343 break 14344 } 14345 mem := x.Args[2] 14346 if !(x.Uses == 1 && ValAndOff(a).Off()+1 == ValAndOff(c).Off() && clobber(x)) { 14347 break 14348 } 14349 v.reset(OpAMD64MOVWstoreconstidx1) 14350 v.AuxInt = makeValAndOff(ValAndOff(a).Val()&0xff|ValAndOff(c).Val()<<8, ValAndOff(a).Off()) 14351 v.Aux = s 14352 v.AddArg(p) 14353 v.AddArg(i) 14354 v.AddArg(mem) 14355 return true 14356 } 14357 return false 14358 } 14359 func rewriteValueAMD64_OpAMD64MOVBstoreidx1_0(v *Value) bool { 14360 b := v.Block 14361 _ = b 14362 // match: (MOVBstoreidx1 [c] {sym} (ADDQconst [d] ptr) idx val mem) 14363 // cond: is32Bit(c+d) 14364 // result: (MOVBstoreidx1 [c+d] {sym} ptr idx val mem) 14365 for { 14366 c := v.AuxInt 14367 sym := v.Aux 14368 _ = v.Args[3] 14369 v_0 := v.Args[0] 14370 if v_0.Op != OpAMD64ADDQconst { 14371 break 14372 } 14373 d := v_0.AuxInt 14374 ptr := v_0.Args[0] 14375 idx := v.Args[1] 14376 val := v.Args[2] 14377 mem := v.Args[3] 14378 if !(is32Bit(c + d)) { 14379 break 14380 } 14381 v.reset(OpAMD64MOVBstoreidx1) 14382 v.AuxInt = c + d 14383 v.Aux = sym 14384 v.AddArg(ptr) 14385 v.AddArg(idx) 14386 v.AddArg(val) 14387 v.AddArg(mem) 14388 return true 14389 } 14390 // match: (MOVBstoreidx1 [c] {sym} ptr (ADDQconst [d] idx) val mem) 14391 // cond: is32Bit(c+d) 14392 // result: (MOVBstoreidx1 [c+d] {sym} ptr idx val mem) 14393 for { 14394 c := v.AuxInt 14395 sym := v.Aux 14396 _ = v.Args[3] 14397 ptr := v.Args[0] 14398 v_1 := v.Args[1] 14399 if v_1.Op != OpAMD64ADDQconst { 14400 break 14401 } 14402 d := v_1.AuxInt 14403 idx := v_1.Args[0] 14404 val := v.Args[2] 14405 mem := v.Args[3] 14406 if !(is32Bit(c + d)) { 14407 break 14408 } 14409 v.reset(OpAMD64MOVBstoreidx1) 14410 v.AuxInt = c + d 14411 v.Aux = sym 14412 v.AddArg(ptr) 14413 v.AddArg(idx) 14414 v.AddArg(val) 14415 v.AddArg(mem) 14416 return true 14417 } 14418 // match: (MOVBstoreidx1 [i] {s} p idx w x0:(MOVBstoreidx1 [i-1] {s} p idx (SHRWconst [8] w) mem)) 14419 // cond: x0.Uses == 1 && clobber(x0) 14420 // result: (MOVWstoreidx1 [i-1] {s} p idx (ROLWconst <w.Type> [8] w) mem) 14421 for { 14422 i := v.AuxInt 14423 s := v.Aux 14424 _ = v.Args[3] 14425 p := v.Args[0] 14426 idx := v.Args[1] 14427 w := v.Args[2] 14428 x0 := v.Args[3] 14429 if x0.Op != OpAMD64MOVBstoreidx1 { 14430 break 14431 } 14432 if x0.AuxInt != i-1 { 14433 break 14434 } 14435 if x0.Aux != s { 14436 break 14437 } 14438 _ = x0.Args[3] 14439 if p != x0.Args[0] { 14440 break 14441 } 14442 if idx != x0.Args[1] { 14443 break 14444 } 14445 x0_2 := x0.Args[2] 14446 if x0_2.Op != OpAMD64SHRWconst { 14447 break 14448 } 14449 if x0_2.AuxInt != 8 { 14450 break 14451 } 14452 if w != x0_2.Args[0] { 14453 break 14454 } 14455 mem := x0.Args[3] 14456 if !(x0.Uses == 1 && clobber(x0)) { 14457 break 14458 } 14459 v.reset(OpAMD64MOVWstoreidx1) 14460 v.AuxInt = i - 1 14461 v.Aux = s 14462 v.AddArg(p) 14463 v.AddArg(idx) 14464 v0 := b.NewValue0(v.Pos, OpAMD64ROLWconst, w.Type) 14465 v0.AuxInt = 8 14466 v0.AddArg(w) 14467 v.AddArg(v0) 14468 v.AddArg(mem) 14469 return true 14470 } 14471 // match: (MOVBstoreidx1 [i] {s} p idx w x2:(MOVBstoreidx1 [i-1] {s} p idx (SHRLconst [8] w) x1:(MOVBstoreidx1 [i-2] {s} p idx (SHRLconst [16] w) x0:(MOVBstoreidx1 [i-3] {s} p idx (SHRLconst [24] w) mem)))) 14472 // cond: x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) 14473 // result: (MOVLstoreidx1 [i-3] {s} p idx (BSWAPL <w.Type> w) mem) 14474 for { 14475 i := v.AuxInt 14476 s := v.Aux 14477 _ = v.Args[3] 14478 p := v.Args[0] 14479 idx := v.Args[1] 14480 w := v.Args[2] 14481 x2 := v.Args[3] 14482 if x2.Op != OpAMD64MOVBstoreidx1 { 14483 break 14484 } 14485 if x2.AuxInt != i-1 { 14486 break 14487 } 14488 if x2.Aux != s { 14489 break 14490 } 14491 _ = x2.Args[3] 14492 if p != x2.Args[0] { 14493 break 14494 } 14495 if idx != x2.Args[1] { 14496 break 14497 } 14498 x2_2 := x2.Args[2] 14499 if x2_2.Op != OpAMD64SHRLconst { 14500 break 14501 } 14502 if x2_2.AuxInt != 8 { 14503 break 14504 } 14505 if w != x2_2.Args[0] { 14506 break 14507 } 14508 x1 := x2.Args[3] 14509 if x1.Op != OpAMD64MOVBstoreidx1 { 14510 break 14511 } 14512 if x1.AuxInt != i-2 { 14513 break 14514 } 14515 if x1.Aux != s { 14516 break 14517 } 14518 _ = x1.Args[3] 14519 if p != x1.Args[0] { 14520 break 14521 } 14522 if idx != x1.Args[1] { 14523 break 14524 } 14525 x1_2 := x1.Args[2] 14526 if x1_2.Op != OpAMD64SHRLconst { 14527 break 14528 } 14529 if x1_2.AuxInt != 16 { 14530 break 14531 } 14532 if w != x1_2.Args[0] { 14533 break 14534 } 14535 x0 := x1.Args[3] 14536 if x0.Op != OpAMD64MOVBstoreidx1 { 14537 break 14538 } 14539 if x0.AuxInt != i-3 { 14540 break 14541 } 14542 if x0.Aux != s { 14543 break 14544 } 14545 _ = x0.Args[3] 14546 if p != x0.Args[0] { 14547 break 14548 } 14549 if idx != x0.Args[1] { 14550 break 14551 } 14552 x0_2 := x0.Args[2] 14553 if x0_2.Op != OpAMD64SHRLconst { 14554 break 14555 } 14556 if x0_2.AuxInt != 24 { 14557 break 14558 } 14559 if w != x0_2.Args[0] { 14560 break 14561 } 14562 mem := x0.Args[3] 14563 if !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2)) { 14564 break 14565 } 14566 v.reset(OpAMD64MOVLstoreidx1) 14567 v.AuxInt = i - 3 14568 v.Aux = s 14569 v.AddArg(p) 14570 v.AddArg(idx) 14571 v0 := b.NewValue0(v.Pos, OpAMD64BSWAPL, w.Type) 14572 v0.AddArg(w) 14573 v.AddArg(v0) 14574 v.AddArg(mem) 14575 return true 14576 } 14577 // match: (MOVBstoreidx1 [i] {s} p idx w x6:(MOVBstoreidx1 [i-1] {s} p idx (SHRQconst [8] w) x5:(MOVBstoreidx1 [i-2] {s} p idx (SHRQconst [16] w) x4:(MOVBstoreidx1 [i-3] {s} p idx (SHRQconst [24] w) x3:(MOVBstoreidx1 [i-4] {s} p idx (SHRQconst [32] w) x2:(MOVBstoreidx1 [i-5] {s} p idx (SHRQconst [40] w) x1:(MOVBstoreidx1 [i-6] {s} p idx (SHRQconst [48] w) x0:(MOVBstoreidx1 [i-7] {s} p idx (SHRQconst [56] w) mem)))))))) 14578 // cond: x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) 14579 // result: (MOVQstoreidx1 [i-7] {s} p idx (BSWAPQ <w.Type> w) mem) 14580 for { 14581 i := v.AuxInt 14582 s := v.Aux 14583 _ = v.Args[3] 14584 p := v.Args[0] 14585 idx := v.Args[1] 14586 w := v.Args[2] 14587 x6 := v.Args[3] 14588 if x6.Op != OpAMD64MOVBstoreidx1 { 14589 break 14590 } 14591 if x6.AuxInt != i-1 { 14592 break 14593 } 14594 if x6.Aux != s { 14595 break 14596 } 14597 _ = x6.Args[3] 14598 if p != x6.Args[0] { 14599 break 14600 } 14601 if idx != x6.Args[1] { 14602 break 14603 } 14604 x6_2 := x6.Args[2] 14605 if x6_2.Op != OpAMD64SHRQconst { 14606 break 14607 } 14608 if x6_2.AuxInt != 8 { 14609 break 14610 } 14611 if w != x6_2.Args[0] { 14612 break 14613 } 14614 x5 := x6.Args[3] 14615 if x5.Op != OpAMD64MOVBstoreidx1 { 14616 break 14617 } 14618 if x5.AuxInt != i-2 { 14619 break 14620 } 14621 if x5.Aux != s { 14622 break 14623 } 14624 _ = x5.Args[3] 14625 if p != x5.Args[0] { 14626 break 14627 } 14628 if idx != x5.Args[1] { 14629 break 14630 } 14631 x5_2 := x5.Args[2] 14632 if x5_2.Op != OpAMD64SHRQconst { 14633 break 14634 } 14635 if x5_2.AuxInt != 16 { 14636 break 14637 } 14638 if w != x5_2.Args[0] { 14639 break 14640 } 14641 x4 := x5.Args[3] 14642 if x4.Op != OpAMD64MOVBstoreidx1 { 14643 break 14644 } 14645 if x4.AuxInt != i-3 { 14646 break 14647 } 14648 if x4.Aux != s { 14649 break 14650 } 14651 _ = x4.Args[3] 14652 if p != x4.Args[0] { 14653 break 14654 } 14655 if idx != x4.Args[1] { 14656 break 14657 } 14658 x4_2 := x4.Args[2] 14659 if x4_2.Op != OpAMD64SHRQconst { 14660 break 14661 } 14662 if x4_2.AuxInt != 24 { 14663 break 14664 } 14665 if w != x4_2.Args[0] { 14666 break 14667 } 14668 x3 := x4.Args[3] 14669 if x3.Op != OpAMD64MOVBstoreidx1 { 14670 break 14671 } 14672 if x3.AuxInt != i-4 { 14673 break 14674 } 14675 if x3.Aux != s { 14676 break 14677 } 14678 _ = x3.Args[3] 14679 if p != x3.Args[0] { 14680 break 14681 } 14682 if idx != x3.Args[1] { 14683 break 14684 } 14685 x3_2 := x3.Args[2] 14686 if x3_2.Op != OpAMD64SHRQconst { 14687 break 14688 } 14689 if x3_2.AuxInt != 32 { 14690 break 14691 } 14692 if w != x3_2.Args[0] { 14693 break 14694 } 14695 x2 := x3.Args[3] 14696 if x2.Op != OpAMD64MOVBstoreidx1 { 14697 break 14698 } 14699 if x2.AuxInt != i-5 { 14700 break 14701 } 14702 if x2.Aux != s { 14703 break 14704 } 14705 _ = x2.Args[3] 14706 if p != x2.Args[0] { 14707 break 14708 } 14709 if idx != x2.Args[1] { 14710 break 14711 } 14712 x2_2 := x2.Args[2] 14713 if x2_2.Op != OpAMD64SHRQconst { 14714 break 14715 } 14716 if x2_2.AuxInt != 40 { 14717 break 14718 } 14719 if w != x2_2.Args[0] { 14720 break 14721 } 14722 x1 := x2.Args[3] 14723 if x1.Op != OpAMD64MOVBstoreidx1 { 14724 break 14725 } 14726 if x1.AuxInt != i-6 { 14727 break 14728 } 14729 if x1.Aux != s { 14730 break 14731 } 14732 _ = x1.Args[3] 14733 if p != x1.Args[0] { 14734 break 14735 } 14736 if idx != x1.Args[1] { 14737 break 14738 } 14739 x1_2 := x1.Args[2] 14740 if x1_2.Op != OpAMD64SHRQconst { 14741 break 14742 } 14743 if x1_2.AuxInt != 48 { 14744 break 14745 } 14746 if w != x1_2.Args[0] { 14747 break 14748 } 14749 x0 := x1.Args[3] 14750 if x0.Op != OpAMD64MOVBstoreidx1 { 14751 break 14752 } 14753 if x0.AuxInt != i-7 { 14754 break 14755 } 14756 if x0.Aux != s { 14757 break 14758 } 14759 _ = x0.Args[3] 14760 if p != x0.Args[0] { 14761 break 14762 } 14763 if idx != x0.Args[1] { 14764 break 14765 } 14766 x0_2 := x0.Args[2] 14767 if x0_2.Op != OpAMD64SHRQconst { 14768 break 14769 } 14770 if x0_2.AuxInt != 56 { 14771 break 14772 } 14773 if w != x0_2.Args[0] { 14774 break 14775 } 14776 mem := x0.Args[3] 14777 if !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6)) { 14778 break 14779 } 14780 v.reset(OpAMD64MOVQstoreidx1) 14781 v.AuxInt = i - 7 14782 v.Aux = s 14783 v.AddArg(p) 14784 v.AddArg(idx) 14785 v0 := b.NewValue0(v.Pos, OpAMD64BSWAPQ, w.Type) 14786 v0.AddArg(w) 14787 v.AddArg(v0) 14788 v.AddArg(mem) 14789 return true 14790 } 14791 // match: (MOVBstoreidx1 [i] {s} p idx (SHRWconst [8] w) x:(MOVBstoreidx1 [i-1] {s} p idx w mem)) 14792 // cond: x.Uses == 1 && clobber(x) 14793 // result: (MOVWstoreidx1 [i-1] {s} p idx w mem) 14794 for { 14795 i := v.AuxInt 14796 s := v.Aux 14797 _ = v.Args[3] 14798 p := v.Args[0] 14799 idx := v.Args[1] 14800 v_2 := v.Args[2] 14801 if v_2.Op != OpAMD64SHRWconst { 14802 break 14803 } 14804 if v_2.AuxInt != 8 { 14805 break 14806 } 14807 w := v_2.Args[0] 14808 x := v.Args[3] 14809 if x.Op != OpAMD64MOVBstoreidx1 { 14810 break 14811 } 14812 if x.AuxInt != i-1 { 14813 break 14814 } 14815 if x.Aux != s { 14816 break 14817 } 14818 _ = x.Args[3] 14819 if p != x.Args[0] { 14820 break 14821 } 14822 if idx != x.Args[1] { 14823 break 14824 } 14825 if w != x.Args[2] { 14826 break 14827 } 14828 mem := x.Args[3] 14829 if !(x.Uses == 1 && clobber(x)) { 14830 break 14831 } 14832 v.reset(OpAMD64MOVWstoreidx1) 14833 v.AuxInt = i - 1 14834 v.Aux = s 14835 v.AddArg(p) 14836 v.AddArg(idx) 14837 v.AddArg(w) 14838 v.AddArg(mem) 14839 return true 14840 } 14841 // match: (MOVBstoreidx1 [i] {s} p idx (SHRLconst [8] w) x:(MOVBstoreidx1 [i-1] {s} p idx w mem)) 14842 // cond: x.Uses == 1 && clobber(x) 14843 // result: (MOVWstoreidx1 [i-1] {s} p idx w mem) 14844 for { 14845 i := v.AuxInt 14846 s := v.Aux 14847 _ = v.Args[3] 14848 p := v.Args[0] 14849 idx := v.Args[1] 14850 v_2 := v.Args[2] 14851 if v_2.Op != OpAMD64SHRLconst { 14852 break 14853 } 14854 if v_2.AuxInt != 8 { 14855 break 14856 } 14857 w := v_2.Args[0] 14858 x := v.Args[3] 14859 if x.Op != OpAMD64MOVBstoreidx1 { 14860 break 14861 } 14862 if x.AuxInt != i-1 { 14863 break 14864 } 14865 if x.Aux != s { 14866 break 14867 } 14868 _ = x.Args[3] 14869 if p != x.Args[0] { 14870 break 14871 } 14872 if idx != x.Args[1] { 14873 break 14874 } 14875 if w != x.Args[2] { 14876 break 14877 } 14878 mem := x.Args[3] 14879 if !(x.Uses == 1 && clobber(x)) { 14880 break 14881 } 14882 v.reset(OpAMD64MOVWstoreidx1) 14883 v.AuxInt = i - 1 14884 v.Aux = s 14885 v.AddArg(p) 14886 v.AddArg(idx) 14887 v.AddArg(w) 14888 v.AddArg(mem) 14889 return true 14890 } 14891 // match: (MOVBstoreidx1 [i] {s} p idx (SHRQconst [8] w) x:(MOVBstoreidx1 [i-1] {s} p idx w mem)) 14892 // cond: x.Uses == 1 && clobber(x) 14893 // result: (MOVWstoreidx1 [i-1] {s} p idx w mem) 14894 for { 14895 i := v.AuxInt 14896 s := v.Aux 14897 _ = v.Args[3] 14898 p := v.Args[0] 14899 idx := v.Args[1] 14900 v_2 := v.Args[2] 14901 if v_2.Op != OpAMD64SHRQconst { 14902 break 14903 } 14904 if v_2.AuxInt != 8 { 14905 break 14906 } 14907 w := v_2.Args[0] 14908 x := v.Args[3] 14909 if x.Op != OpAMD64MOVBstoreidx1 { 14910 break 14911 } 14912 if x.AuxInt != i-1 { 14913 break 14914 } 14915 if x.Aux != s { 14916 break 14917 } 14918 _ = x.Args[3] 14919 if p != x.Args[0] { 14920 break 14921 } 14922 if idx != x.Args[1] { 14923 break 14924 } 14925 if w != x.Args[2] { 14926 break 14927 } 14928 mem := x.Args[3] 14929 if !(x.Uses == 1 && clobber(x)) { 14930 break 14931 } 14932 v.reset(OpAMD64MOVWstoreidx1) 14933 v.AuxInt = i - 1 14934 v.Aux = s 14935 v.AddArg(p) 14936 v.AddArg(idx) 14937 v.AddArg(w) 14938 v.AddArg(mem) 14939 return true 14940 } 14941 // match: (MOVBstoreidx1 [i] {s} p idx (SHRLconst [j] w) x:(MOVBstoreidx1 [i-1] {s} p idx w0:(SHRLconst [j-8] w) mem)) 14942 // cond: x.Uses == 1 && clobber(x) 14943 // result: (MOVWstoreidx1 [i-1] {s} p idx w0 mem) 14944 for { 14945 i := v.AuxInt 14946 s := v.Aux 14947 _ = v.Args[3] 14948 p := v.Args[0] 14949 idx := v.Args[1] 14950 v_2 := v.Args[2] 14951 if v_2.Op != OpAMD64SHRLconst { 14952 break 14953 } 14954 j := v_2.AuxInt 14955 w := v_2.Args[0] 14956 x := v.Args[3] 14957 if x.Op != OpAMD64MOVBstoreidx1 { 14958 break 14959 } 14960 if x.AuxInt != i-1 { 14961 break 14962 } 14963 if x.Aux != s { 14964 break 14965 } 14966 _ = x.Args[3] 14967 if p != x.Args[0] { 14968 break 14969 } 14970 if idx != x.Args[1] { 14971 break 14972 } 14973 w0 := x.Args[2] 14974 if w0.Op != OpAMD64SHRLconst { 14975 break 14976 } 14977 if w0.AuxInt != j-8 { 14978 break 14979 } 14980 if w != w0.Args[0] { 14981 break 14982 } 14983 mem := x.Args[3] 14984 if !(x.Uses == 1 && clobber(x)) { 14985 break 14986 } 14987 v.reset(OpAMD64MOVWstoreidx1) 14988 v.AuxInt = i - 1 14989 v.Aux = s 14990 v.AddArg(p) 14991 v.AddArg(idx) 14992 v.AddArg(w0) 14993 v.AddArg(mem) 14994 return true 14995 } 14996 // match: (MOVBstoreidx1 [i] {s} p idx (SHRQconst [j] w) x:(MOVBstoreidx1 [i-1] {s} p idx w0:(SHRQconst [j-8] w) mem)) 14997 // cond: x.Uses == 1 && clobber(x) 14998 // result: (MOVWstoreidx1 [i-1] {s} p idx w0 mem) 14999 for { 15000 i := v.AuxInt 15001 s := v.Aux 15002 _ = v.Args[3] 15003 p := v.Args[0] 15004 idx := v.Args[1] 15005 v_2 := v.Args[2] 15006 if v_2.Op != OpAMD64SHRQconst { 15007 break 15008 } 15009 j := v_2.AuxInt 15010 w := v_2.Args[0] 15011 x := v.Args[3] 15012 if x.Op != OpAMD64MOVBstoreidx1 { 15013 break 15014 } 15015 if x.AuxInt != i-1 { 15016 break 15017 } 15018 if x.Aux != s { 15019 break 15020 } 15021 _ = x.Args[3] 15022 if p != x.Args[0] { 15023 break 15024 } 15025 if idx != x.Args[1] { 15026 break 15027 } 15028 w0 := x.Args[2] 15029 if w0.Op != OpAMD64SHRQconst { 15030 break 15031 } 15032 if w0.AuxInt != j-8 { 15033 break 15034 } 15035 if w != w0.Args[0] { 15036 break 15037 } 15038 mem := x.Args[3] 15039 if !(x.Uses == 1 && clobber(x)) { 15040 break 15041 } 15042 v.reset(OpAMD64MOVWstoreidx1) 15043 v.AuxInt = i - 1 15044 v.Aux = s 15045 v.AddArg(p) 15046 v.AddArg(idx) 15047 v.AddArg(w0) 15048 v.AddArg(mem) 15049 return true 15050 } 15051 return false 15052 } 15053 func rewriteValueAMD64_OpAMD64MOVBstoreidx1_10(v *Value) bool { 15054 // match: (MOVBstoreidx1 [i] {s} p (MOVQconst [c]) w mem) 15055 // cond: is32Bit(i+c) 15056 // result: (MOVBstore [i+c] {s} p w mem) 15057 for { 15058 i := v.AuxInt 15059 s := v.Aux 15060 _ = v.Args[3] 15061 p := v.Args[0] 15062 v_1 := v.Args[1] 15063 if v_1.Op != OpAMD64MOVQconst { 15064 break 15065 } 15066 c := v_1.AuxInt 15067 w := v.Args[2] 15068 mem := v.Args[3] 15069 if !(is32Bit(i + c)) { 15070 break 15071 } 15072 v.reset(OpAMD64MOVBstore) 15073 v.AuxInt = i + c 15074 v.Aux = s 15075 v.AddArg(p) 15076 v.AddArg(w) 15077 v.AddArg(mem) 15078 return true 15079 } 15080 return false 15081 } 15082 func rewriteValueAMD64_OpAMD64MOVLQSX_0(v *Value) bool { 15083 b := v.Block 15084 _ = b 15085 // match: (MOVLQSX x:(MOVLload [off] {sym} ptr mem)) 15086 // cond: x.Uses == 1 && clobber(x) 15087 // result: @x.Block (MOVLQSXload <v.Type> [off] {sym} ptr mem) 15088 for { 15089 x := v.Args[0] 15090 if x.Op != OpAMD64MOVLload { 15091 break 15092 } 15093 off := x.AuxInt 15094 sym := x.Aux 15095 _ = x.Args[1] 15096 ptr := x.Args[0] 15097 mem := x.Args[1] 15098 if !(x.Uses == 1 && clobber(x)) { 15099 break 15100 } 15101 b = x.Block 15102 v0 := b.NewValue0(x.Pos, OpAMD64MOVLQSXload, v.Type) 15103 v.reset(OpCopy) 15104 v.AddArg(v0) 15105 v0.AuxInt = off 15106 v0.Aux = sym 15107 v0.AddArg(ptr) 15108 v0.AddArg(mem) 15109 return true 15110 } 15111 // match: (MOVLQSX x:(MOVQload [off] {sym} ptr mem)) 15112 // cond: x.Uses == 1 && clobber(x) 15113 // result: @x.Block (MOVLQSXload <v.Type> [off] {sym} ptr mem) 15114 for { 15115 x := v.Args[0] 15116 if x.Op != OpAMD64MOVQload { 15117 break 15118 } 15119 off := x.AuxInt 15120 sym := x.Aux 15121 _ = x.Args[1] 15122 ptr := x.Args[0] 15123 mem := x.Args[1] 15124 if !(x.Uses == 1 && clobber(x)) { 15125 break 15126 } 15127 b = x.Block 15128 v0 := b.NewValue0(x.Pos, OpAMD64MOVLQSXload, v.Type) 15129 v.reset(OpCopy) 15130 v.AddArg(v0) 15131 v0.AuxInt = off 15132 v0.Aux = sym 15133 v0.AddArg(ptr) 15134 v0.AddArg(mem) 15135 return true 15136 } 15137 // match: (MOVLQSX (ANDLconst [c] x)) 15138 // cond: c & 0x80000000 == 0 15139 // result: (ANDLconst [c & 0x7fffffff] x) 15140 for { 15141 v_0 := v.Args[0] 15142 if v_0.Op != OpAMD64ANDLconst { 15143 break 15144 } 15145 c := v_0.AuxInt 15146 x := v_0.Args[0] 15147 if !(c&0x80000000 == 0) { 15148 break 15149 } 15150 v.reset(OpAMD64ANDLconst) 15151 v.AuxInt = c & 0x7fffffff 15152 v.AddArg(x) 15153 return true 15154 } 15155 // match: (MOVLQSX (MOVLQSX x)) 15156 // cond: 15157 // result: (MOVLQSX x) 15158 for { 15159 v_0 := v.Args[0] 15160 if v_0.Op != OpAMD64MOVLQSX { 15161 break 15162 } 15163 x := v_0.Args[0] 15164 v.reset(OpAMD64MOVLQSX) 15165 v.AddArg(x) 15166 return true 15167 } 15168 // match: (MOVLQSX (MOVWQSX x)) 15169 // cond: 15170 // result: (MOVWQSX x) 15171 for { 15172 v_0 := v.Args[0] 15173 if v_0.Op != OpAMD64MOVWQSX { 15174 break 15175 } 15176 x := v_0.Args[0] 15177 v.reset(OpAMD64MOVWQSX) 15178 v.AddArg(x) 15179 return true 15180 } 15181 // match: (MOVLQSX (MOVBQSX x)) 15182 // cond: 15183 // result: (MOVBQSX x) 15184 for { 15185 v_0 := v.Args[0] 15186 if v_0.Op != OpAMD64MOVBQSX { 15187 break 15188 } 15189 x := v_0.Args[0] 15190 v.reset(OpAMD64MOVBQSX) 15191 v.AddArg(x) 15192 return true 15193 } 15194 return false 15195 } 15196 func rewriteValueAMD64_OpAMD64MOVLQSXload_0(v *Value) bool { 15197 // match: (MOVLQSXload [off] {sym} ptr (MOVLstore [off2] {sym2} ptr2 x _)) 15198 // cond: sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) 15199 // result: (MOVLQSX x) 15200 for { 15201 off := v.AuxInt 15202 sym := v.Aux 15203 _ = v.Args[1] 15204 ptr := v.Args[0] 15205 v_1 := v.Args[1] 15206 if v_1.Op != OpAMD64MOVLstore { 15207 break 15208 } 15209 off2 := v_1.AuxInt 15210 sym2 := v_1.Aux 15211 _ = v_1.Args[2] 15212 ptr2 := v_1.Args[0] 15213 x := v_1.Args[1] 15214 if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) { 15215 break 15216 } 15217 v.reset(OpAMD64MOVLQSX) 15218 v.AddArg(x) 15219 return true 15220 } 15221 // match: (MOVLQSXload [off1] {sym1} (LEAQ [off2] {sym2} base) mem) 15222 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 15223 // result: (MOVLQSXload [off1+off2] {mergeSym(sym1,sym2)} base mem) 15224 for { 15225 off1 := v.AuxInt 15226 sym1 := v.Aux 15227 _ = v.Args[1] 15228 v_0 := v.Args[0] 15229 if v_0.Op != OpAMD64LEAQ { 15230 break 15231 } 15232 off2 := v_0.AuxInt 15233 sym2 := v_0.Aux 15234 base := v_0.Args[0] 15235 mem := v.Args[1] 15236 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 15237 break 15238 } 15239 v.reset(OpAMD64MOVLQSXload) 15240 v.AuxInt = off1 + off2 15241 v.Aux = mergeSym(sym1, sym2) 15242 v.AddArg(base) 15243 v.AddArg(mem) 15244 return true 15245 } 15246 return false 15247 } 15248 func rewriteValueAMD64_OpAMD64MOVLQZX_0(v *Value) bool { 15249 b := v.Block 15250 _ = b 15251 // match: (MOVLQZX x:(MOVLload [off] {sym} ptr mem)) 15252 // cond: x.Uses == 1 && clobber(x) 15253 // result: @x.Block (MOVLload <v.Type> [off] {sym} ptr mem) 15254 for { 15255 x := v.Args[0] 15256 if x.Op != OpAMD64MOVLload { 15257 break 15258 } 15259 off := x.AuxInt 15260 sym := x.Aux 15261 _ = x.Args[1] 15262 ptr := x.Args[0] 15263 mem := x.Args[1] 15264 if !(x.Uses == 1 && clobber(x)) { 15265 break 15266 } 15267 b = x.Block 15268 v0 := b.NewValue0(x.Pos, OpAMD64MOVLload, v.Type) 15269 v.reset(OpCopy) 15270 v.AddArg(v0) 15271 v0.AuxInt = off 15272 v0.Aux = sym 15273 v0.AddArg(ptr) 15274 v0.AddArg(mem) 15275 return true 15276 } 15277 // match: (MOVLQZX x:(MOVQload [off] {sym} ptr mem)) 15278 // cond: x.Uses == 1 && clobber(x) 15279 // result: @x.Block (MOVLload <v.Type> [off] {sym} ptr mem) 15280 for { 15281 x := v.Args[0] 15282 if x.Op != OpAMD64MOVQload { 15283 break 15284 } 15285 off := x.AuxInt 15286 sym := x.Aux 15287 _ = x.Args[1] 15288 ptr := x.Args[0] 15289 mem := x.Args[1] 15290 if !(x.Uses == 1 && clobber(x)) { 15291 break 15292 } 15293 b = x.Block 15294 v0 := b.NewValue0(x.Pos, OpAMD64MOVLload, v.Type) 15295 v.reset(OpCopy) 15296 v.AddArg(v0) 15297 v0.AuxInt = off 15298 v0.Aux = sym 15299 v0.AddArg(ptr) 15300 v0.AddArg(mem) 15301 return true 15302 } 15303 // match: (MOVLQZX x) 15304 // cond: zeroUpper32Bits(x,3) 15305 // result: x 15306 for { 15307 x := v.Args[0] 15308 if !(zeroUpper32Bits(x, 3)) { 15309 break 15310 } 15311 v.reset(OpCopy) 15312 v.Type = x.Type 15313 v.AddArg(x) 15314 return true 15315 } 15316 // match: (MOVLQZX x:(MOVLloadidx1 [off] {sym} ptr idx mem)) 15317 // cond: x.Uses == 1 && clobber(x) 15318 // result: @x.Block (MOVLloadidx1 <v.Type> [off] {sym} ptr idx mem) 15319 for { 15320 x := v.Args[0] 15321 if x.Op != OpAMD64MOVLloadidx1 { 15322 break 15323 } 15324 off := x.AuxInt 15325 sym := x.Aux 15326 _ = x.Args[2] 15327 ptr := x.Args[0] 15328 idx := x.Args[1] 15329 mem := x.Args[2] 15330 if !(x.Uses == 1 && clobber(x)) { 15331 break 15332 } 15333 b = x.Block 15334 v0 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, v.Type) 15335 v.reset(OpCopy) 15336 v.AddArg(v0) 15337 v0.AuxInt = off 15338 v0.Aux = sym 15339 v0.AddArg(ptr) 15340 v0.AddArg(idx) 15341 v0.AddArg(mem) 15342 return true 15343 } 15344 // match: (MOVLQZX x:(MOVLloadidx4 [off] {sym} ptr idx mem)) 15345 // cond: x.Uses == 1 && clobber(x) 15346 // result: @x.Block (MOVLloadidx4 <v.Type> [off] {sym} ptr idx mem) 15347 for { 15348 x := v.Args[0] 15349 if x.Op != OpAMD64MOVLloadidx4 { 15350 break 15351 } 15352 off := x.AuxInt 15353 sym := x.Aux 15354 _ = x.Args[2] 15355 ptr := x.Args[0] 15356 idx := x.Args[1] 15357 mem := x.Args[2] 15358 if !(x.Uses == 1 && clobber(x)) { 15359 break 15360 } 15361 b = x.Block 15362 v0 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx4, v.Type) 15363 v.reset(OpCopy) 15364 v.AddArg(v0) 15365 v0.AuxInt = off 15366 v0.Aux = sym 15367 v0.AddArg(ptr) 15368 v0.AddArg(idx) 15369 v0.AddArg(mem) 15370 return true 15371 } 15372 // match: (MOVLQZX (ANDLconst [c] x)) 15373 // cond: 15374 // result: (ANDLconst [c] x) 15375 for { 15376 v_0 := v.Args[0] 15377 if v_0.Op != OpAMD64ANDLconst { 15378 break 15379 } 15380 c := v_0.AuxInt 15381 x := v_0.Args[0] 15382 v.reset(OpAMD64ANDLconst) 15383 v.AuxInt = c 15384 v.AddArg(x) 15385 return true 15386 } 15387 // match: (MOVLQZX (MOVLQZX x)) 15388 // cond: 15389 // result: (MOVLQZX x) 15390 for { 15391 v_0 := v.Args[0] 15392 if v_0.Op != OpAMD64MOVLQZX { 15393 break 15394 } 15395 x := v_0.Args[0] 15396 v.reset(OpAMD64MOVLQZX) 15397 v.AddArg(x) 15398 return true 15399 } 15400 // match: (MOVLQZX (MOVWQZX x)) 15401 // cond: 15402 // result: (MOVWQZX x) 15403 for { 15404 v_0 := v.Args[0] 15405 if v_0.Op != OpAMD64MOVWQZX { 15406 break 15407 } 15408 x := v_0.Args[0] 15409 v.reset(OpAMD64MOVWQZX) 15410 v.AddArg(x) 15411 return true 15412 } 15413 // match: (MOVLQZX (MOVBQZX x)) 15414 // cond: 15415 // result: (MOVBQZX x) 15416 for { 15417 v_0 := v.Args[0] 15418 if v_0.Op != OpAMD64MOVBQZX { 15419 break 15420 } 15421 x := v_0.Args[0] 15422 v.reset(OpAMD64MOVBQZX) 15423 v.AddArg(x) 15424 return true 15425 } 15426 return false 15427 } 15428 func rewriteValueAMD64_OpAMD64MOVLatomicload_0(v *Value) bool { 15429 // match: (MOVLatomicload [off1] {sym} (ADDQconst [off2] ptr) mem) 15430 // cond: is32Bit(off1+off2) 15431 // result: (MOVLatomicload [off1+off2] {sym} ptr mem) 15432 for { 15433 off1 := v.AuxInt 15434 sym := v.Aux 15435 _ = v.Args[1] 15436 v_0 := v.Args[0] 15437 if v_0.Op != OpAMD64ADDQconst { 15438 break 15439 } 15440 off2 := v_0.AuxInt 15441 ptr := v_0.Args[0] 15442 mem := v.Args[1] 15443 if !(is32Bit(off1 + off2)) { 15444 break 15445 } 15446 v.reset(OpAMD64MOVLatomicload) 15447 v.AuxInt = off1 + off2 15448 v.Aux = sym 15449 v.AddArg(ptr) 15450 v.AddArg(mem) 15451 return true 15452 } 15453 // match: (MOVLatomicload [off1] {sym1} (LEAQ [off2] {sym2} ptr) mem) 15454 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 15455 // result: (MOVLatomicload [off1+off2] {mergeSym(sym1,sym2)} ptr mem) 15456 for { 15457 off1 := v.AuxInt 15458 sym1 := v.Aux 15459 _ = v.Args[1] 15460 v_0 := v.Args[0] 15461 if v_0.Op != OpAMD64LEAQ { 15462 break 15463 } 15464 off2 := v_0.AuxInt 15465 sym2 := v_0.Aux 15466 ptr := v_0.Args[0] 15467 mem := v.Args[1] 15468 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 15469 break 15470 } 15471 v.reset(OpAMD64MOVLatomicload) 15472 v.AuxInt = off1 + off2 15473 v.Aux = mergeSym(sym1, sym2) 15474 v.AddArg(ptr) 15475 v.AddArg(mem) 15476 return true 15477 } 15478 return false 15479 } 15480 func rewriteValueAMD64_OpAMD64MOVLf2i_0(v *Value) bool { 15481 b := v.Block 15482 _ = b 15483 // match: (MOVLf2i <t> (Arg <u> [off] {sym})) 15484 // cond: t.Size() == u.Size() 15485 // result: @b.Func.Entry (Arg <t> [off] {sym}) 15486 for { 15487 t := v.Type 15488 v_0 := v.Args[0] 15489 if v_0.Op != OpArg { 15490 break 15491 } 15492 u := v_0.Type 15493 off := v_0.AuxInt 15494 sym := v_0.Aux 15495 if !(t.Size() == u.Size()) { 15496 break 15497 } 15498 b = b.Func.Entry 15499 v0 := b.NewValue0(v.Pos, OpArg, t) 15500 v.reset(OpCopy) 15501 v.AddArg(v0) 15502 v0.AuxInt = off 15503 v0.Aux = sym 15504 return true 15505 } 15506 return false 15507 } 15508 func rewriteValueAMD64_OpAMD64MOVLi2f_0(v *Value) bool { 15509 b := v.Block 15510 _ = b 15511 // match: (MOVLi2f <t> (Arg <u> [off] {sym})) 15512 // cond: t.Size() == u.Size() 15513 // result: @b.Func.Entry (Arg <t> [off] {sym}) 15514 for { 15515 t := v.Type 15516 v_0 := v.Args[0] 15517 if v_0.Op != OpArg { 15518 break 15519 } 15520 u := v_0.Type 15521 off := v_0.AuxInt 15522 sym := v_0.Aux 15523 if !(t.Size() == u.Size()) { 15524 break 15525 } 15526 b = b.Func.Entry 15527 v0 := b.NewValue0(v.Pos, OpArg, t) 15528 v.reset(OpCopy) 15529 v.AddArg(v0) 15530 v0.AuxInt = off 15531 v0.Aux = sym 15532 return true 15533 } 15534 return false 15535 } 15536 func rewriteValueAMD64_OpAMD64MOVLload_0(v *Value) bool { 15537 // match: (MOVLload [off] {sym} ptr (MOVLstore [off2] {sym2} ptr2 x _)) 15538 // cond: sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) 15539 // result: (MOVLQZX x) 15540 for { 15541 off := v.AuxInt 15542 sym := v.Aux 15543 _ = v.Args[1] 15544 ptr := v.Args[0] 15545 v_1 := v.Args[1] 15546 if v_1.Op != OpAMD64MOVLstore { 15547 break 15548 } 15549 off2 := v_1.AuxInt 15550 sym2 := v_1.Aux 15551 _ = v_1.Args[2] 15552 ptr2 := v_1.Args[0] 15553 x := v_1.Args[1] 15554 if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) { 15555 break 15556 } 15557 v.reset(OpAMD64MOVLQZX) 15558 v.AddArg(x) 15559 return true 15560 } 15561 // match: (MOVLload [off1] {sym} (ADDQconst [off2] ptr) mem) 15562 // cond: is32Bit(off1+off2) 15563 // result: (MOVLload [off1+off2] {sym} ptr mem) 15564 for { 15565 off1 := v.AuxInt 15566 sym := v.Aux 15567 _ = v.Args[1] 15568 v_0 := v.Args[0] 15569 if v_0.Op != OpAMD64ADDQconst { 15570 break 15571 } 15572 off2 := v_0.AuxInt 15573 ptr := v_0.Args[0] 15574 mem := v.Args[1] 15575 if !(is32Bit(off1 + off2)) { 15576 break 15577 } 15578 v.reset(OpAMD64MOVLload) 15579 v.AuxInt = off1 + off2 15580 v.Aux = sym 15581 v.AddArg(ptr) 15582 v.AddArg(mem) 15583 return true 15584 } 15585 // match: (MOVLload [off1] {sym1} (LEAQ [off2] {sym2} base) mem) 15586 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 15587 // result: (MOVLload [off1+off2] {mergeSym(sym1,sym2)} base mem) 15588 for { 15589 off1 := v.AuxInt 15590 sym1 := v.Aux 15591 _ = v.Args[1] 15592 v_0 := v.Args[0] 15593 if v_0.Op != OpAMD64LEAQ { 15594 break 15595 } 15596 off2 := v_0.AuxInt 15597 sym2 := v_0.Aux 15598 base := v_0.Args[0] 15599 mem := v.Args[1] 15600 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 15601 break 15602 } 15603 v.reset(OpAMD64MOVLload) 15604 v.AuxInt = off1 + off2 15605 v.Aux = mergeSym(sym1, sym2) 15606 v.AddArg(base) 15607 v.AddArg(mem) 15608 return true 15609 } 15610 // match: (MOVLload [off1] {sym1} (LEAQ1 [off2] {sym2} ptr idx) mem) 15611 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 15612 // result: (MOVLloadidx1 [off1+off2] {mergeSym(sym1,sym2)} ptr idx mem) 15613 for { 15614 off1 := v.AuxInt 15615 sym1 := v.Aux 15616 _ = v.Args[1] 15617 v_0 := v.Args[0] 15618 if v_0.Op != OpAMD64LEAQ1 { 15619 break 15620 } 15621 off2 := v_0.AuxInt 15622 sym2 := v_0.Aux 15623 _ = v_0.Args[1] 15624 ptr := v_0.Args[0] 15625 idx := v_0.Args[1] 15626 mem := v.Args[1] 15627 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 15628 break 15629 } 15630 v.reset(OpAMD64MOVLloadidx1) 15631 v.AuxInt = off1 + off2 15632 v.Aux = mergeSym(sym1, sym2) 15633 v.AddArg(ptr) 15634 v.AddArg(idx) 15635 v.AddArg(mem) 15636 return true 15637 } 15638 // match: (MOVLload [off1] {sym1} (LEAQ4 [off2] {sym2} ptr idx) mem) 15639 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 15640 // result: (MOVLloadidx4 [off1+off2] {mergeSym(sym1,sym2)} ptr idx mem) 15641 for { 15642 off1 := v.AuxInt 15643 sym1 := v.Aux 15644 _ = v.Args[1] 15645 v_0 := v.Args[0] 15646 if v_0.Op != OpAMD64LEAQ4 { 15647 break 15648 } 15649 off2 := v_0.AuxInt 15650 sym2 := v_0.Aux 15651 _ = v_0.Args[1] 15652 ptr := v_0.Args[0] 15653 idx := v_0.Args[1] 15654 mem := v.Args[1] 15655 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 15656 break 15657 } 15658 v.reset(OpAMD64MOVLloadidx4) 15659 v.AuxInt = off1 + off2 15660 v.Aux = mergeSym(sym1, sym2) 15661 v.AddArg(ptr) 15662 v.AddArg(idx) 15663 v.AddArg(mem) 15664 return true 15665 } 15666 // match: (MOVLload [off1] {sym1} (LEAQ8 [off2] {sym2} ptr idx) mem) 15667 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 15668 // result: (MOVLloadidx8 [off1+off2] {mergeSym(sym1,sym2)} ptr idx mem) 15669 for { 15670 off1 := v.AuxInt 15671 sym1 := v.Aux 15672 _ = v.Args[1] 15673 v_0 := v.Args[0] 15674 if v_0.Op != OpAMD64LEAQ8 { 15675 break 15676 } 15677 off2 := v_0.AuxInt 15678 sym2 := v_0.Aux 15679 _ = v_0.Args[1] 15680 ptr := v_0.Args[0] 15681 idx := v_0.Args[1] 15682 mem := v.Args[1] 15683 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 15684 break 15685 } 15686 v.reset(OpAMD64MOVLloadidx8) 15687 v.AuxInt = off1 + off2 15688 v.Aux = mergeSym(sym1, sym2) 15689 v.AddArg(ptr) 15690 v.AddArg(idx) 15691 v.AddArg(mem) 15692 return true 15693 } 15694 // match: (MOVLload [off] {sym} (ADDQ ptr idx) mem) 15695 // cond: ptr.Op != OpSB 15696 // result: (MOVLloadidx1 [off] {sym} ptr idx mem) 15697 for { 15698 off := v.AuxInt 15699 sym := v.Aux 15700 _ = v.Args[1] 15701 v_0 := v.Args[0] 15702 if v_0.Op != OpAMD64ADDQ { 15703 break 15704 } 15705 _ = v_0.Args[1] 15706 ptr := v_0.Args[0] 15707 idx := v_0.Args[1] 15708 mem := v.Args[1] 15709 if !(ptr.Op != OpSB) { 15710 break 15711 } 15712 v.reset(OpAMD64MOVLloadidx1) 15713 v.AuxInt = off 15714 v.Aux = sym 15715 v.AddArg(ptr) 15716 v.AddArg(idx) 15717 v.AddArg(mem) 15718 return true 15719 } 15720 // match: (MOVLload [off1] {sym1} (LEAL [off2] {sym2} base) mem) 15721 // cond: canMergeSym(sym1, sym2) && is32Bit(off1+off2) 15722 // result: (MOVLload [off1+off2] {mergeSym(sym1,sym2)} base mem) 15723 for { 15724 off1 := v.AuxInt 15725 sym1 := v.Aux 15726 _ = v.Args[1] 15727 v_0 := v.Args[0] 15728 if v_0.Op != OpAMD64LEAL { 15729 break 15730 } 15731 off2 := v_0.AuxInt 15732 sym2 := v_0.Aux 15733 base := v_0.Args[0] 15734 mem := v.Args[1] 15735 if !(canMergeSym(sym1, sym2) && is32Bit(off1+off2)) { 15736 break 15737 } 15738 v.reset(OpAMD64MOVLload) 15739 v.AuxInt = off1 + off2 15740 v.Aux = mergeSym(sym1, sym2) 15741 v.AddArg(base) 15742 v.AddArg(mem) 15743 return true 15744 } 15745 // match: (MOVLload [off1] {sym} (ADDLconst [off2] ptr) mem) 15746 // cond: is32Bit(off1+off2) 15747 // result: (MOVLload [off1+off2] {sym} ptr mem) 15748 for { 15749 off1 := v.AuxInt 15750 sym := v.Aux 15751 _ = v.Args[1] 15752 v_0 := v.Args[0] 15753 if v_0.Op != OpAMD64ADDLconst { 15754 break 15755 } 15756 off2 := v_0.AuxInt 15757 ptr := v_0.Args[0] 15758 mem := v.Args[1] 15759 if !(is32Bit(off1 + off2)) { 15760 break 15761 } 15762 v.reset(OpAMD64MOVLload) 15763 v.AuxInt = off1 + off2 15764 v.Aux = sym 15765 v.AddArg(ptr) 15766 v.AddArg(mem) 15767 return true 15768 } 15769 // match: (MOVLload [off] {sym} ptr (MOVSSstore [off] {sym} ptr val _)) 15770 // cond: 15771 // result: (MOVLf2i val) 15772 for { 15773 off := v.AuxInt 15774 sym := v.Aux 15775 _ = v.Args[1] 15776 ptr := v.Args[0] 15777 v_1 := v.Args[1] 15778 if v_1.Op != OpAMD64MOVSSstore { 15779 break 15780 } 15781 if v_1.AuxInt != off { 15782 break 15783 } 15784 if v_1.Aux != sym { 15785 break 15786 } 15787 _ = v_1.Args[2] 15788 if ptr != v_1.Args[0] { 15789 break 15790 } 15791 val := v_1.Args[1] 15792 v.reset(OpAMD64MOVLf2i) 15793 v.AddArg(val) 15794 return true 15795 } 15796 return false 15797 } 15798 func rewriteValueAMD64_OpAMD64MOVLload_10(v *Value) bool { 15799 b := v.Block 15800 _ = b 15801 config := b.Func.Config 15802 _ = config 15803 // match: (MOVLload [off] {sym} (SB) _) 15804 // cond: symIsRO(sym) 15805 // result: (MOVQconst [int64(read32(sym, off, config.BigEndian))]) 15806 for { 15807 off := v.AuxInt 15808 sym := v.Aux 15809 _ = v.Args[1] 15810 v_0 := v.Args[0] 15811 if v_0.Op != OpSB { 15812 break 15813 } 15814 if !(symIsRO(sym)) { 15815 break 15816 } 15817 v.reset(OpAMD64MOVQconst) 15818 v.AuxInt = int64(read32(sym, off, config.BigEndian)) 15819 return true 15820 } 15821 return false 15822 } 15823 func rewriteValueAMD64_OpAMD64MOVLloadidx1_0(v *Value) bool { 15824 // match: (MOVLloadidx1 [c] {sym} ptr (SHLQconst [2] idx) mem) 15825 // cond: 15826 // result: (MOVLloadidx4 [c] {sym} ptr idx mem) 15827 for { 15828 c := v.AuxInt 15829 sym := v.Aux 15830 _ = v.Args[2] 15831 ptr := v.Args[0] 15832 v_1 := v.Args[1] 15833 if v_1.Op != OpAMD64SHLQconst { 15834 break 15835 } 15836 if v_1.AuxInt != 2 { 15837 break 15838 } 15839 idx := v_1.Args[0] 15840 mem := v.Args[2] 15841 v.reset(OpAMD64MOVLloadidx4) 15842 v.AuxInt = c 15843 v.Aux = sym 15844 v.AddArg(ptr) 15845 v.AddArg(idx) 15846 v.AddArg(mem) 15847 return true 15848 } 15849 // match: (MOVLloadidx1 [c] {sym} (SHLQconst [2] idx) ptr mem) 15850 // cond: 15851 // result: (MOVLloadidx4 [c] {sym} ptr idx mem) 15852 for { 15853 c := v.AuxInt 15854 sym := v.Aux 15855 _ = v.Args[2] 15856 v_0 := v.Args[0] 15857 if v_0.Op != OpAMD64SHLQconst { 15858 break 15859 } 15860 if v_0.AuxInt != 2 { 15861 break 15862 } 15863 idx := v_0.Args[0] 15864 ptr := v.Args[1] 15865 mem := v.Args[2] 15866 v.reset(OpAMD64MOVLloadidx4) 15867 v.AuxInt = c 15868 v.Aux = sym 15869 v.AddArg(ptr) 15870 v.AddArg(idx) 15871 v.AddArg(mem) 15872 return true 15873 } 15874 // match: (MOVLloadidx1 [c] {sym} ptr (SHLQconst [3] idx) mem) 15875 // cond: 15876 // result: (MOVLloadidx8 [c] {sym} ptr idx mem) 15877 for { 15878 c := v.AuxInt 15879 sym := v.Aux 15880 _ = v.Args[2] 15881 ptr := v.Args[0] 15882 v_1 := v.Args[1] 15883 if v_1.Op != OpAMD64SHLQconst { 15884 break 15885 } 15886 if v_1.AuxInt != 3 { 15887 break 15888 } 15889 idx := v_1.Args[0] 15890 mem := v.Args[2] 15891 v.reset(OpAMD64MOVLloadidx8) 15892 v.AuxInt = c 15893 v.Aux = sym 15894 v.AddArg(ptr) 15895 v.AddArg(idx) 15896 v.AddArg(mem) 15897 return true 15898 } 15899 // match: (MOVLloadidx1 [c] {sym} (SHLQconst [3] idx) ptr mem) 15900 // cond: 15901 // result: (MOVLloadidx8 [c] {sym} ptr idx mem) 15902 for { 15903 c := v.AuxInt 15904 sym := v.Aux 15905 _ = v.Args[2] 15906 v_0 := v.Args[0] 15907 if v_0.Op != OpAMD64SHLQconst { 15908 break 15909 } 15910 if v_0.AuxInt != 3 { 15911 break 15912 } 15913 idx := v_0.Args[0] 15914 ptr := v.Args[1] 15915 mem := v.Args[2] 15916 v.reset(OpAMD64MOVLloadidx8) 15917 v.AuxInt = c 15918 v.Aux = sym 15919 v.AddArg(ptr) 15920 v.AddArg(idx) 15921 v.AddArg(mem) 15922 return true 15923 } 15924 // match: (MOVLloadidx1 [c] {sym} (ADDQconst [d] ptr) idx mem) 15925 // cond: is32Bit(c+d) 15926 // result: (MOVLloadidx1 [c+d] {sym} ptr idx mem) 15927 for { 15928 c := v.AuxInt 15929 sym := v.Aux 15930 _ = v.Args[2] 15931 v_0 := v.Args[0] 15932 if v_0.Op != OpAMD64ADDQconst { 15933 break 15934 } 15935 d := v_0.AuxInt 15936 ptr := v_0.Args[0] 15937 idx := v.Args[1] 15938 mem := v.Args[2] 15939 if !(is32Bit(c + d)) { 15940 break 15941 } 15942 v.reset(OpAMD64MOVLloadidx1) 15943 v.AuxInt = c + d 15944 v.Aux = sym 15945 v.AddArg(ptr) 15946 v.AddArg(idx) 15947 v.AddArg(mem) 15948 return true 15949 } 15950 // match: (MOVLloadidx1 [c] {sym} idx (ADDQconst [d] ptr) mem) 15951 // cond: is32Bit(c+d) 15952 // result: (MOVLloadidx1 [c+d] {sym} ptr idx mem) 15953 for { 15954 c := v.AuxInt 15955 sym := v.Aux 15956 _ = v.Args[2] 15957 idx := v.Args[0] 15958 v_1 := v.Args[1] 15959 if v_1.Op != OpAMD64ADDQconst { 15960 break 15961 } 15962 d := v_1.AuxInt 15963 ptr := v_1.Args[0] 15964 mem := v.Args[2] 15965 if !(is32Bit(c + d)) { 15966 break 15967 } 15968 v.reset(OpAMD64MOVLloadidx1) 15969 v.AuxInt = c + d 15970 v.Aux = sym 15971 v.AddArg(ptr) 15972 v.AddArg(idx) 15973 v.AddArg(mem) 15974 return true 15975 } 15976 // match: (MOVLloadidx1 [c] {sym} ptr (ADDQconst [d] idx) mem) 15977 // cond: is32Bit(c+d) 15978 // result: (MOVLloadidx1 [c+d] {sym} ptr idx mem) 15979 for { 15980 c := v.AuxInt 15981 sym := v.Aux 15982 _ = v.Args[2] 15983 ptr := v.Args[0] 15984 v_1 := v.Args[1] 15985 if v_1.Op != OpAMD64ADDQconst { 15986 break 15987 } 15988 d := v_1.AuxInt 15989 idx := v_1.Args[0] 15990 mem := v.Args[2] 15991 if !(is32Bit(c + d)) { 15992 break 15993 } 15994 v.reset(OpAMD64MOVLloadidx1) 15995 v.AuxInt = c + d 15996 v.Aux = sym 15997 v.AddArg(ptr) 15998 v.AddArg(idx) 15999 v.AddArg(mem) 16000 return true 16001 } 16002 // match: (MOVLloadidx1 [c] {sym} (ADDQconst [d] idx) ptr mem) 16003 // cond: is32Bit(c+d) 16004 // result: (MOVLloadidx1 [c+d] {sym} ptr idx mem) 16005 for { 16006 c := v.AuxInt 16007 sym := v.Aux 16008 _ = v.Args[2] 16009 v_0 := v.Args[0] 16010 if v_0.Op != OpAMD64ADDQconst { 16011 break 16012 } 16013 d := v_0.AuxInt 16014 idx := v_0.Args[0] 16015 ptr := v.Args[1] 16016 mem := v.Args[2] 16017 if !(is32Bit(c + d)) { 16018 break 16019 } 16020 v.reset(OpAMD64MOVLloadidx1) 16021 v.AuxInt = c + d 16022 v.Aux = sym 16023 v.AddArg(ptr) 16024 v.AddArg(idx) 16025 v.AddArg(mem) 16026 return true 16027 } 16028 // match: (MOVLloadidx1 [i] {s} p (MOVQconst [c]) mem) 16029 // cond: is32Bit(i+c) 16030 // result: (MOVLload [i+c] {s} p mem) 16031 for { 16032 i := v.AuxInt 16033 s := v.Aux 16034 _ = v.Args[2] 16035 p := v.Args[0] 16036 v_1 := v.Args[1] 16037 if v_1.Op != OpAMD64MOVQconst { 16038 break 16039 } 16040 c := v_1.AuxInt 16041 mem := v.Args[2] 16042 if !(is32Bit(i + c)) { 16043 break 16044 } 16045 v.reset(OpAMD64MOVLload) 16046 v.AuxInt = i + c 16047 v.Aux = s 16048 v.AddArg(p) 16049 v.AddArg(mem) 16050 return true 16051 } 16052 // match: (MOVLloadidx1 [i] {s} (MOVQconst [c]) p mem) 16053 // cond: is32Bit(i+c) 16054 // result: (MOVLload [i+c] {s} p mem) 16055 for { 16056 i := v.AuxInt 16057 s := v.Aux 16058 _ = v.Args[2] 16059 v_0 := v.Args[0] 16060 if v_0.Op != OpAMD64MOVQconst { 16061 break 16062 } 16063 c := v_0.AuxInt 16064 p := v.Args[1] 16065 mem := v.Args[2] 16066 if !(is32Bit(i + c)) { 16067 break 16068 } 16069 v.reset(OpAMD64MOVLload) 16070 v.AuxInt = i + c 16071 v.Aux = s 16072 v.AddArg(p) 16073 v.AddArg(mem) 16074 return true 16075 } 16076 return false 16077 } 16078 func rewriteValueAMD64_OpAMD64MOVLloadidx4_0(v *Value) bool { 16079 // match: (MOVLloadidx4 [c] {sym} (ADDQconst [d] ptr) idx mem) 16080 // cond: is32Bit(c+d) 16081 // result: (MOVLloadidx4 [c+d] {sym} ptr idx mem) 16082 for { 16083 c := v.AuxInt 16084 sym := v.Aux 16085 _ = v.Args[2] 16086 v_0 := v.Args[0] 16087 if v_0.Op != OpAMD64ADDQconst { 16088 break 16089 } 16090 d := v_0.AuxInt 16091 ptr := v_0.Args[0] 16092 idx := v.Args[1] 16093 mem := v.Args[2] 16094 if !(is32Bit(c + d)) { 16095 break 16096 } 16097 v.reset(OpAMD64MOVLloadidx4) 16098 v.AuxInt = c + d 16099 v.Aux = sym 16100 v.AddArg(ptr) 16101 v.AddArg(idx) 16102 v.AddArg(mem) 16103 return true 16104 } 16105 // match: (MOVLloadidx4 [c] {sym} ptr (ADDQconst [d] idx) mem) 16106 // cond: is32Bit(c+4*d) 16107 // result: (MOVLloadidx4 [c+4*d] {sym} ptr idx mem) 16108 for { 16109 c := v.AuxInt 16110 sym := v.Aux 16111 _ = v.Args[2] 16112 ptr := v.Args[0] 16113 v_1 := v.Args[1] 16114 if v_1.Op != OpAMD64ADDQconst { 16115 break 16116 } 16117 d := v_1.AuxInt 16118 idx := v_1.Args[0] 16119 mem := v.Args[2] 16120 if !(is32Bit(c + 4*d)) { 16121 break 16122 } 16123 v.reset(OpAMD64MOVLloadidx4) 16124 v.AuxInt = c + 4*d 16125 v.Aux = sym 16126 v.AddArg(ptr) 16127 v.AddArg(idx) 16128 v.AddArg(mem) 16129 return true 16130 } 16131 // match: (MOVLloadidx4 [i] {s} p (MOVQconst [c]) mem) 16132 // cond: is32Bit(i+4*c) 16133 // result: (MOVLload [i+4*c] {s} p mem) 16134 for { 16135 i := v.AuxInt 16136 s := v.Aux 16137 _ = v.Args[2] 16138 p := v.Args[0] 16139 v_1 := v.Args[1] 16140 if v_1.Op != OpAMD64MOVQconst { 16141 break 16142 } 16143 c := v_1.AuxInt 16144 mem := v.Args[2] 16145 if !(is32Bit(i + 4*c)) { 16146 break 16147 } 16148 v.reset(OpAMD64MOVLload) 16149 v.AuxInt = i + 4*c 16150 v.Aux = s 16151 v.AddArg(p) 16152 v.AddArg(mem) 16153 return true 16154 } 16155 return false 16156 } 16157 func rewriteValueAMD64_OpAMD64MOVLloadidx8_0(v *Value) bool { 16158 // match: (MOVLloadidx8 [c] {sym} (ADDQconst [d] ptr) idx mem) 16159 // cond: is32Bit(c+d) 16160 // result: (MOVLloadidx8 [c+d] {sym} ptr idx mem) 16161 for { 16162 c := v.AuxInt 16163 sym := v.Aux 16164 _ = v.Args[2] 16165 v_0 := v.Args[0] 16166 if v_0.Op != OpAMD64ADDQconst { 16167 break 16168 } 16169 d := v_0.AuxInt 16170 ptr := v_0.Args[0] 16171 idx := v.Args[1] 16172 mem := v.Args[2] 16173 if !(is32Bit(c + d)) { 16174 break 16175 } 16176 v.reset(OpAMD64MOVLloadidx8) 16177 v.AuxInt = c + d 16178 v.Aux = sym 16179 v.AddArg(ptr) 16180 v.AddArg(idx) 16181 v.AddArg(mem) 16182 return true 16183 } 16184 // match: (MOVLloadidx8 [c] {sym} ptr (ADDQconst [d] idx) mem) 16185 // cond: is32Bit(c+8*d) 16186 // result: (MOVLloadidx8 [c+8*d] {sym} ptr idx mem) 16187 for { 16188 c := v.AuxInt 16189 sym := v.Aux 16190 _ = v.Args[2] 16191 ptr := v.Args[0] 16192 v_1 := v.Args[1] 16193 if v_1.Op != OpAMD64ADDQconst { 16194 break 16195 } 16196 d := v_1.AuxInt 16197 idx := v_1.Args[0] 16198 mem := v.Args[2] 16199 if !(is32Bit(c + 8*d)) { 16200 break 16201 } 16202 v.reset(OpAMD64MOVLloadidx8) 16203 v.AuxInt = c + 8*d 16204 v.Aux = sym 16205 v.AddArg(ptr) 16206 v.AddArg(idx) 16207 v.AddArg(mem) 16208 return true 16209 } 16210 // match: (MOVLloadidx8 [i] {s} p (MOVQconst [c]) mem) 16211 // cond: is32Bit(i+8*c) 16212 // result: (MOVLload [i+8*c] {s} p mem) 16213 for { 16214 i := v.AuxInt 16215 s := v.Aux 16216 _ = v.Args[2] 16217 p := v.Args[0] 16218 v_1 := v.Args[1] 16219 if v_1.Op != OpAMD64MOVQconst { 16220 break 16221 } 16222 c := v_1.AuxInt 16223 mem := v.Args[2] 16224 if !(is32Bit(i + 8*c)) { 16225 break 16226 } 16227 v.reset(OpAMD64MOVLload) 16228 v.AuxInt = i + 8*c 16229 v.Aux = s 16230 v.AddArg(p) 16231 v.AddArg(mem) 16232 return true 16233 } 16234 return false 16235 } 16236 func rewriteValueAMD64_OpAMD64MOVLstore_0(v *Value) bool { 16237 // match: (MOVLstore [off] {sym} ptr (MOVLQSX x) mem) 16238 // cond: 16239 // result: (MOVLstore [off] {sym} ptr x mem) 16240 for { 16241 off := v.AuxInt 16242 sym := v.Aux 16243 _ = v.Args[2] 16244 ptr := v.Args[0] 16245 v_1 := v.Args[1] 16246 if v_1.Op != OpAMD64MOVLQSX { 16247 break 16248 } 16249 x := v_1.Args[0] 16250 mem := v.Args[2] 16251 v.reset(OpAMD64MOVLstore) 16252 v.AuxInt = off 16253 v.Aux = sym 16254 v.AddArg(ptr) 16255 v.AddArg(x) 16256 v.AddArg(mem) 16257 return true 16258 } 16259 // match: (MOVLstore [off] {sym} ptr (MOVLQZX x) mem) 16260 // cond: 16261 // result: (MOVLstore [off] {sym} ptr x mem) 16262 for { 16263 off := v.AuxInt 16264 sym := v.Aux 16265 _ = v.Args[2] 16266 ptr := v.Args[0] 16267 v_1 := v.Args[1] 16268 if v_1.Op != OpAMD64MOVLQZX { 16269 break 16270 } 16271 x := v_1.Args[0] 16272 mem := v.Args[2] 16273 v.reset(OpAMD64MOVLstore) 16274 v.AuxInt = off 16275 v.Aux = sym 16276 v.AddArg(ptr) 16277 v.AddArg(x) 16278 v.AddArg(mem) 16279 return true 16280 } 16281 // match: (MOVLstore [off1] {sym} (ADDQconst [off2] ptr) val mem) 16282 // cond: is32Bit(off1+off2) 16283 // result: (MOVLstore [off1+off2] {sym} ptr val mem) 16284 for { 16285 off1 := v.AuxInt 16286 sym := v.Aux 16287 _ = v.Args[2] 16288 v_0 := v.Args[0] 16289 if v_0.Op != OpAMD64ADDQconst { 16290 break 16291 } 16292 off2 := v_0.AuxInt 16293 ptr := v_0.Args[0] 16294 val := v.Args[1] 16295 mem := v.Args[2] 16296 if !(is32Bit(off1 + off2)) { 16297 break 16298 } 16299 v.reset(OpAMD64MOVLstore) 16300 v.AuxInt = off1 + off2 16301 v.Aux = sym 16302 v.AddArg(ptr) 16303 v.AddArg(val) 16304 v.AddArg(mem) 16305 return true 16306 } 16307 // match: (MOVLstore [off] {sym} ptr (MOVLconst [c]) mem) 16308 // cond: validOff(off) 16309 // result: (MOVLstoreconst [makeValAndOff(int64(int32(c)),off)] {sym} ptr mem) 16310 for { 16311 off := v.AuxInt 16312 sym := v.Aux 16313 _ = v.Args[2] 16314 ptr := v.Args[0] 16315 v_1 := v.Args[1] 16316 if v_1.Op != OpAMD64MOVLconst { 16317 break 16318 } 16319 c := v_1.AuxInt 16320 mem := v.Args[2] 16321 if !(validOff(off)) { 16322 break 16323 } 16324 v.reset(OpAMD64MOVLstoreconst) 16325 v.AuxInt = makeValAndOff(int64(int32(c)), off) 16326 v.Aux = sym 16327 v.AddArg(ptr) 16328 v.AddArg(mem) 16329 return true 16330 } 16331 // match: (MOVLstore [off] {sym} ptr (MOVQconst [c]) mem) 16332 // cond: validOff(off) 16333 // result: (MOVLstoreconst [makeValAndOff(int64(int32(c)),off)] {sym} ptr mem) 16334 for { 16335 off := v.AuxInt 16336 sym := v.Aux 16337 _ = v.Args[2] 16338 ptr := v.Args[0] 16339 v_1 := v.Args[1] 16340 if v_1.Op != OpAMD64MOVQconst { 16341 break 16342 } 16343 c := v_1.AuxInt 16344 mem := v.Args[2] 16345 if !(validOff(off)) { 16346 break 16347 } 16348 v.reset(OpAMD64MOVLstoreconst) 16349 v.AuxInt = makeValAndOff(int64(int32(c)), off) 16350 v.Aux = sym 16351 v.AddArg(ptr) 16352 v.AddArg(mem) 16353 return true 16354 } 16355 // match: (MOVLstore [off1] {sym1} (LEAQ [off2] {sym2} base) val mem) 16356 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 16357 // result: (MOVLstore [off1+off2] {mergeSym(sym1,sym2)} base val mem) 16358 for { 16359 off1 := v.AuxInt 16360 sym1 := v.Aux 16361 _ = v.Args[2] 16362 v_0 := v.Args[0] 16363 if v_0.Op != OpAMD64LEAQ { 16364 break 16365 } 16366 off2 := v_0.AuxInt 16367 sym2 := v_0.Aux 16368 base := v_0.Args[0] 16369 val := v.Args[1] 16370 mem := v.Args[2] 16371 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 16372 break 16373 } 16374 v.reset(OpAMD64MOVLstore) 16375 v.AuxInt = off1 + off2 16376 v.Aux = mergeSym(sym1, sym2) 16377 v.AddArg(base) 16378 v.AddArg(val) 16379 v.AddArg(mem) 16380 return true 16381 } 16382 // match: (MOVLstore [off1] {sym1} (LEAQ1 [off2] {sym2} ptr idx) val mem) 16383 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 16384 // result: (MOVLstoreidx1 [off1+off2] {mergeSym(sym1,sym2)} ptr idx val mem) 16385 for { 16386 off1 := v.AuxInt 16387 sym1 := v.Aux 16388 _ = v.Args[2] 16389 v_0 := v.Args[0] 16390 if v_0.Op != OpAMD64LEAQ1 { 16391 break 16392 } 16393 off2 := v_0.AuxInt 16394 sym2 := v_0.Aux 16395 _ = v_0.Args[1] 16396 ptr := v_0.Args[0] 16397 idx := v_0.Args[1] 16398 val := v.Args[1] 16399 mem := v.Args[2] 16400 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 16401 break 16402 } 16403 v.reset(OpAMD64MOVLstoreidx1) 16404 v.AuxInt = off1 + off2 16405 v.Aux = mergeSym(sym1, sym2) 16406 v.AddArg(ptr) 16407 v.AddArg(idx) 16408 v.AddArg(val) 16409 v.AddArg(mem) 16410 return true 16411 } 16412 // match: (MOVLstore [off1] {sym1} (LEAQ4 [off2] {sym2} ptr idx) val mem) 16413 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 16414 // result: (MOVLstoreidx4 [off1+off2] {mergeSym(sym1,sym2)} ptr idx val mem) 16415 for { 16416 off1 := v.AuxInt 16417 sym1 := v.Aux 16418 _ = v.Args[2] 16419 v_0 := v.Args[0] 16420 if v_0.Op != OpAMD64LEAQ4 { 16421 break 16422 } 16423 off2 := v_0.AuxInt 16424 sym2 := v_0.Aux 16425 _ = v_0.Args[1] 16426 ptr := v_0.Args[0] 16427 idx := v_0.Args[1] 16428 val := v.Args[1] 16429 mem := v.Args[2] 16430 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 16431 break 16432 } 16433 v.reset(OpAMD64MOVLstoreidx4) 16434 v.AuxInt = off1 + off2 16435 v.Aux = mergeSym(sym1, sym2) 16436 v.AddArg(ptr) 16437 v.AddArg(idx) 16438 v.AddArg(val) 16439 v.AddArg(mem) 16440 return true 16441 } 16442 // match: (MOVLstore [off1] {sym1} (LEAQ8 [off2] {sym2} ptr idx) val mem) 16443 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 16444 // result: (MOVLstoreidx8 [off1+off2] {mergeSym(sym1,sym2)} ptr idx val mem) 16445 for { 16446 off1 := v.AuxInt 16447 sym1 := v.Aux 16448 _ = v.Args[2] 16449 v_0 := v.Args[0] 16450 if v_0.Op != OpAMD64LEAQ8 { 16451 break 16452 } 16453 off2 := v_0.AuxInt 16454 sym2 := v_0.Aux 16455 _ = v_0.Args[1] 16456 ptr := v_0.Args[0] 16457 idx := v_0.Args[1] 16458 val := v.Args[1] 16459 mem := v.Args[2] 16460 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 16461 break 16462 } 16463 v.reset(OpAMD64MOVLstoreidx8) 16464 v.AuxInt = off1 + off2 16465 v.Aux = mergeSym(sym1, sym2) 16466 v.AddArg(ptr) 16467 v.AddArg(idx) 16468 v.AddArg(val) 16469 v.AddArg(mem) 16470 return true 16471 } 16472 // match: (MOVLstore [off] {sym} (ADDQ ptr idx) val mem) 16473 // cond: ptr.Op != OpSB 16474 // result: (MOVLstoreidx1 [off] {sym} ptr idx val mem) 16475 for { 16476 off := v.AuxInt 16477 sym := v.Aux 16478 _ = v.Args[2] 16479 v_0 := v.Args[0] 16480 if v_0.Op != OpAMD64ADDQ { 16481 break 16482 } 16483 _ = v_0.Args[1] 16484 ptr := v_0.Args[0] 16485 idx := v_0.Args[1] 16486 val := v.Args[1] 16487 mem := v.Args[2] 16488 if !(ptr.Op != OpSB) { 16489 break 16490 } 16491 v.reset(OpAMD64MOVLstoreidx1) 16492 v.AuxInt = off 16493 v.Aux = sym 16494 v.AddArg(ptr) 16495 v.AddArg(idx) 16496 v.AddArg(val) 16497 v.AddArg(mem) 16498 return true 16499 } 16500 return false 16501 } 16502 func rewriteValueAMD64_OpAMD64MOVLstore_10(v *Value) bool { 16503 b := v.Block 16504 _ = b 16505 typ := &b.Func.Config.Types 16506 _ = typ 16507 // match: (MOVLstore [i] {s} p (SHRQconst [32] w) x:(MOVLstore [i-4] {s} p w mem)) 16508 // cond: x.Uses == 1 && clobber(x) 16509 // result: (MOVQstore [i-4] {s} p w mem) 16510 for { 16511 i := v.AuxInt 16512 s := v.Aux 16513 _ = v.Args[2] 16514 p := v.Args[0] 16515 v_1 := v.Args[1] 16516 if v_1.Op != OpAMD64SHRQconst { 16517 break 16518 } 16519 if v_1.AuxInt != 32 { 16520 break 16521 } 16522 w := v_1.Args[0] 16523 x := v.Args[2] 16524 if x.Op != OpAMD64MOVLstore { 16525 break 16526 } 16527 if x.AuxInt != i-4 { 16528 break 16529 } 16530 if x.Aux != s { 16531 break 16532 } 16533 _ = x.Args[2] 16534 if p != x.Args[0] { 16535 break 16536 } 16537 if w != x.Args[1] { 16538 break 16539 } 16540 mem := x.Args[2] 16541 if !(x.Uses == 1 && clobber(x)) { 16542 break 16543 } 16544 v.reset(OpAMD64MOVQstore) 16545 v.AuxInt = i - 4 16546 v.Aux = s 16547 v.AddArg(p) 16548 v.AddArg(w) 16549 v.AddArg(mem) 16550 return true 16551 } 16552 // match: (MOVLstore [i] {s} p (SHRQconst [j] w) x:(MOVLstore [i-4] {s} p w0:(SHRQconst [j-32] w) mem)) 16553 // cond: x.Uses == 1 && clobber(x) 16554 // result: (MOVQstore [i-4] {s} p w0 mem) 16555 for { 16556 i := v.AuxInt 16557 s := v.Aux 16558 _ = v.Args[2] 16559 p := v.Args[0] 16560 v_1 := v.Args[1] 16561 if v_1.Op != OpAMD64SHRQconst { 16562 break 16563 } 16564 j := v_1.AuxInt 16565 w := v_1.Args[0] 16566 x := v.Args[2] 16567 if x.Op != OpAMD64MOVLstore { 16568 break 16569 } 16570 if x.AuxInt != i-4 { 16571 break 16572 } 16573 if x.Aux != s { 16574 break 16575 } 16576 _ = x.Args[2] 16577 if p != x.Args[0] { 16578 break 16579 } 16580 w0 := x.Args[1] 16581 if w0.Op != OpAMD64SHRQconst { 16582 break 16583 } 16584 if w0.AuxInt != j-32 { 16585 break 16586 } 16587 if w != w0.Args[0] { 16588 break 16589 } 16590 mem := x.Args[2] 16591 if !(x.Uses == 1 && clobber(x)) { 16592 break 16593 } 16594 v.reset(OpAMD64MOVQstore) 16595 v.AuxInt = i - 4 16596 v.Aux = s 16597 v.AddArg(p) 16598 v.AddArg(w0) 16599 v.AddArg(mem) 16600 return true 16601 } 16602 // match: (MOVLstore [i] {s} p x1:(MOVLload [j] {s2} p2 mem) mem2:(MOVLstore [i-4] {s} p x2:(MOVLload [j-4] {s2} p2 mem) mem)) 16603 // cond: x1.Uses == 1 && x2.Uses == 1 && mem2.Uses == 1 && clobber(x1) && clobber(x2) && clobber(mem2) 16604 // result: (MOVQstore [i-4] {s} p (MOVQload [j-4] {s2} p2 mem) mem) 16605 for { 16606 i := v.AuxInt 16607 s := v.Aux 16608 _ = v.Args[2] 16609 p := v.Args[0] 16610 x1 := v.Args[1] 16611 if x1.Op != OpAMD64MOVLload { 16612 break 16613 } 16614 j := x1.AuxInt 16615 s2 := x1.Aux 16616 _ = x1.Args[1] 16617 p2 := x1.Args[0] 16618 mem := x1.Args[1] 16619 mem2 := v.Args[2] 16620 if mem2.Op != OpAMD64MOVLstore { 16621 break 16622 } 16623 if mem2.AuxInt != i-4 { 16624 break 16625 } 16626 if mem2.Aux != s { 16627 break 16628 } 16629 _ = mem2.Args[2] 16630 if p != mem2.Args[0] { 16631 break 16632 } 16633 x2 := mem2.Args[1] 16634 if x2.Op != OpAMD64MOVLload { 16635 break 16636 } 16637 if x2.AuxInt != j-4 { 16638 break 16639 } 16640 if x2.Aux != s2 { 16641 break 16642 } 16643 _ = x2.Args[1] 16644 if p2 != x2.Args[0] { 16645 break 16646 } 16647 if mem != x2.Args[1] { 16648 break 16649 } 16650 if mem != mem2.Args[2] { 16651 break 16652 } 16653 if !(x1.Uses == 1 && x2.Uses == 1 && mem2.Uses == 1 && clobber(x1) && clobber(x2) && clobber(mem2)) { 16654 break 16655 } 16656 v.reset(OpAMD64MOVQstore) 16657 v.AuxInt = i - 4 16658 v.Aux = s 16659 v.AddArg(p) 16660 v0 := b.NewValue0(x2.Pos, OpAMD64MOVQload, typ.UInt64) 16661 v0.AuxInt = j - 4 16662 v0.Aux = s2 16663 v0.AddArg(p2) 16664 v0.AddArg(mem) 16665 v.AddArg(v0) 16666 v.AddArg(mem) 16667 return true 16668 } 16669 // match: (MOVLstore [off1] {sym1} (LEAL [off2] {sym2} base) val mem) 16670 // cond: canMergeSym(sym1, sym2) && is32Bit(off1+off2) 16671 // result: (MOVLstore [off1+off2] {mergeSym(sym1,sym2)} base val mem) 16672 for { 16673 off1 := v.AuxInt 16674 sym1 := v.Aux 16675 _ = v.Args[2] 16676 v_0 := v.Args[0] 16677 if v_0.Op != OpAMD64LEAL { 16678 break 16679 } 16680 off2 := v_0.AuxInt 16681 sym2 := v_0.Aux 16682 base := v_0.Args[0] 16683 val := v.Args[1] 16684 mem := v.Args[2] 16685 if !(canMergeSym(sym1, sym2) && is32Bit(off1+off2)) { 16686 break 16687 } 16688 v.reset(OpAMD64MOVLstore) 16689 v.AuxInt = off1 + off2 16690 v.Aux = mergeSym(sym1, sym2) 16691 v.AddArg(base) 16692 v.AddArg(val) 16693 v.AddArg(mem) 16694 return true 16695 } 16696 // match: (MOVLstore [off1] {sym} (ADDLconst [off2] ptr) val mem) 16697 // cond: is32Bit(off1+off2) 16698 // result: (MOVLstore [off1+off2] {sym} ptr val mem) 16699 for { 16700 off1 := v.AuxInt 16701 sym := v.Aux 16702 _ = v.Args[2] 16703 v_0 := v.Args[0] 16704 if v_0.Op != OpAMD64ADDLconst { 16705 break 16706 } 16707 off2 := v_0.AuxInt 16708 ptr := v_0.Args[0] 16709 val := v.Args[1] 16710 mem := v.Args[2] 16711 if !(is32Bit(off1 + off2)) { 16712 break 16713 } 16714 v.reset(OpAMD64MOVLstore) 16715 v.AuxInt = off1 + off2 16716 v.Aux = sym 16717 v.AddArg(ptr) 16718 v.AddArg(val) 16719 v.AddArg(mem) 16720 return true 16721 } 16722 // match: (MOVLstore {sym} [off] ptr y:(ADDLload x [off] {sym} ptr mem) mem) 16723 // cond: y.Uses==1 && clobber(y) 16724 // result: (ADDLmodify [off] {sym} ptr x mem) 16725 for { 16726 off := v.AuxInt 16727 sym := v.Aux 16728 _ = v.Args[2] 16729 ptr := v.Args[0] 16730 y := v.Args[1] 16731 if y.Op != OpAMD64ADDLload { 16732 break 16733 } 16734 if y.AuxInt != off { 16735 break 16736 } 16737 if y.Aux != sym { 16738 break 16739 } 16740 _ = y.Args[2] 16741 x := y.Args[0] 16742 if ptr != y.Args[1] { 16743 break 16744 } 16745 mem := y.Args[2] 16746 if mem != v.Args[2] { 16747 break 16748 } 16749 if !(y.Uses == 1 && clobber(y)) { 16750 break 16751 } 16752 v.reset(OpAMD64ADDLmodify) 16753 v.AuxInt = off 16754 v.Aux = sym 16755 v.AddArg(ptr) 16756 v.AddArg(x) 16757 v.AddArg(mem) 16758 return true 16759 } 16760 // match: (MOVLstore {sym} [off] ptr y:(ANDLload x [off] {sym} ptr mem) mem) 16761 // cond: y.Uses==1 && clobber(y) 16762 // result: (ANDLmodify [off] {sym} ptr x mem) 16763 for { 16764 off := v.AuxInt 16765 sym := v.Aux 16766 _ = v.Args[2] 16767 ptr := v.Args[0] 16768 y := v.Args[1] 16769 if y.Op != OpAMD64ANDLload { 16770 break 16771 } 16772 if y.AuxInt != off { 16773 break 16774 } 16775 if y.Aux != sym { 16776 break 16777 } 16778 _ = y.Args[2] 16779 x := y.Args[0] 16780 if ptr != y.Args[1] { 16781 break 16782 } 16783 mem := y.Args[2] 16784 if mem != v.Args[2] { 16785 break 16786 } 16787 if !(y.Uses == 1 && clobber(y)) { 16788 break 16789 } 16790 v.reset(OpAMD64ANDLmodify) 16791 v.AuxInt = off 16792 v.Aux = sym 16793 v.AddArg(ptr) 16794 v.AddArg(x) 16795 v.AddArg(mem) 16796 return true 16797 } 16798 // match: (MOVLstore {sym} [off] ptr y:(ORLload x [off] {sym} ptr mem) mem) 16799 // cond: y.Uses==1 && clobber(y) 16800 // result: (ORLmodify [off] {sym} ptr x mem) 16801 for { 16802 off := v.AuxInt 16803 sym := v.Aux 16804 _ = v.Args[2] 16805 ptr := v.Args[0] 16806 y := v.Args[1] 16807 if y.Op != OpAMD64ORLload { 16808 break 16809 } 16810 if y.AuxInt != off { 16811 break 16812 } 16813 if y.Aux != sym { 16814 break 16815 } 16816 _ = y.Args[2] 16817 x := y.Args[0] 16818 if ptr != y.Args[1] { 16819 break 16820 } 16821 mem := y.Args[2] 16822 if mem != v.Args[2] { 16823 break 16824 } 16825 if !(y.Uses == 1 && clobber(y)) { 16826 break 16827 } 16828 v.reset(OpAMD64ORLmodify) 16829 v.AuxInt = off 16830 v.Aux = sym 16831 v.AddArg(ptr) 16832 v.AddArg(x) 16833 v.AddArg(mem) 16834 return true 16835 } 16836 // match: (MOVLstore {sym} [off] ptr y:(XORLload x [off] {sym} ptr mem) mem) 16837 // cond: y.Uses==1 && clobber(y) 16838 // result: (XORLmodify [off] {sym} ptr x mem) 16839 for { 16840 off := v.AuxInt 16841 sym := v.Aux 16842 _ = v.Args[2] 16843 ptr := v.Args[0] 16844 y := v.Args[1] 16845 if y.Op != OpAMD64XORLload { 16846 break 16847 } 16848 if y.AuxInt != off { 16849 break 16850 } 16851 if y.Aux != sym { 16852 break 16853 } 16854 _ = y.Args[2] 16855 x := y.Args[0] 16856 if ptr != y.Args[1] { 16857 break 16858 } 16859 mem := y.Args[2] 16860 if mem != v.Args[2] { 16861 break 16862 } 16863 if !(y.Uses == 1 && clobber(y)) { 16864 break 16865 } 16866 v.reset(OpAMD64XORLmodify) 16867 v.AuxInt = off 16868 v.Aux = sym 16869 v.AddArg(ptr) 16870 v.AddArg(x) 16871 v.AddArg(mem) 16872 return true 16873 } 16874 // match: (MOVLstore {sym} [off] ptr y:(ADDL l:(MOVLload [off] {sym} ptr mem) x) mem) 16875 // cond: y.Uses==1 && l.Uses==1 && clobber(y) && clobber(l) 16876 // result: (ADDLmodify [off] {sym} ptr x mem) 16877 for { 16878 off := v.AuxInt 16879 sym := v.Aux 16880 _ = v.Args[2] 16881 ptr := v.Args[0] 16882 y := v.Args[1] 16883 if y.Op != OpAMD64ADDL { 16884 break 16885 } 16886 _ = y.Args[1] 16887 l := y.Args[0] 16888 if l.Op != OpAMD64MOVLload { 16889 break 16890 } 16891 if l.AuxInt != off { 16892 break 16893 } 16894 if l.Aux != sym { 16895 break 16896 } 16897 _ = l.Args[1] 16898 if ptr != l.Args[0] { 16899 break 16900 } 16901 mem := l.Args[1] 16902 x := y.Args[1] 16903 if mem != v.Args[2] { 16904 break 16905 } 16906 if !(y.Uses == 1 && l.Uses == 1 && clobber(y) && clobber(l)) { 16907 break 16908 } 16909 v.reset(OpAMD64ADDLmodify) 16910 v.AuxInt = off 16911 v.Aux = sym 16912 v.AddArg(ptr) 16913 v.AddArg(x) 16914 v.AddArg(mem) 16915 return true 16916 } 16917 return false 16918 } 16919 func rewriteValueAMD64_OpAMD64MOVLstore_20(v *Value) bool { 16920 // match: (MOVLstore {sym} [off] ptr y:(ADDL x l:(MOVLload [off] {sym} ptr mem)) mem) 16921 // cond: y.Uses==1 && l.Uses==1 && clobber(y) && clobber(l) 16922 // result: (ADDLmodify [off] {sym} ptr x mem) 16923 for { 16924 off := v.AuxInt 16925 sym := v.Aux 16926 _ = v.Args[2] 16927 ptr := v.Args[0] 16928 y := v.Args[1] 16929 if y.Op != OpAMD64ADDL { 16930 break 16931 } 16932 _ = y.Args[1] 16933 x := y.Args[0] 16934 l := y.Args[1] 16935 if l.Op != OpAMD64MOVLload { 16936 break 16937 } 16938 if l.AuxInt != off { 16939 break 16940 } 16941 if l.Aux != sym { 16942 break 16943 } 16944 _ = l.Args[1] 16945 if ptr != l.Args[0] { 16946 break 16947 } 16948 mem := l.Args[1] 16949 if mem != v.Args[2] { 16950 break 16951 } 16952 if !(y.Uses == 1 && l.Uses == 1 && clobber(y) && clobber(l)) { 16953 break 16954 } 16955 v.reset(OpAMD64ADDLmodify) 16956 v.AuxInt = off 16957 v.Aux = sym 16958 v.AddArg(ptr) 16959 v.AddArg(x) 16960 v.AddArg(mem) 16961 return true 16962 } 16963 // match: (MOVLstore {sym} [off] ptr y:(SUBL l:(MOVLload [off] {sym} ptr mem) x) mem) 16964 // cond: y.Uses==1 && l.Uses==1 && clobber(y) && clobber(l) 16965 // result: (SUBLmodify [off] {sym} ptr x mem) 16966 for { 16967 off := v.AuxInt 16968 sym := v.Aux 16969 _ = v.Args[2] 16970 ptr := v.Args[0] 16971 y := v.Args[1] 16972 if y.Op != OpAMD64SUBL { 16973 break 16974 } 16975 _ = y.Args[1] 16976 l := y.Args[0] 16977 if l.Op != OpAMD64MOVLload { 16978 break 16979 } 16980 if l.AuxInt != off { 16981 break 16982 } 16983 if l.Aux != sym { 16984 break 16985 } 16986 _ = l.Args[1] 16987 if ptr != l.Args[0] { 16988 break 16989 } 16990 mem := l.Args[1] 16991 x := y.Args[1] 16992 if mem != v.Args[2] { 16993 break 16994 } 16995 if !(y.Uses == 1 && l.Uses == 1 && clobber(y) && clobber(l)) { 16996 break 16997 } 16998 v.reset(OpAMD64SUBLmodify) 16999 v.AuxInt = off 17000 v.Aux = sym 17001 v.AddArg(ptr) 17002 v.AddArg(x) 17003 v.AddArg(mem) 17004 return true 17005 } 17006 // match: (MOVLstore {sym} [off] ptr y:(ANDL l:(MOVLload [off] {sym} ptr mem) x) mem) 17007 // cond: y.Uses==1 && l.Uses==1 && clobber(y) && clobber(l) 17008 // result: (ANDLmodify [off] {sym} ptr x mem) 17009 for { 17010 off := v.AuxInt 17011 sym := v.Aux 17012 _ = v.Args[2] 17013 ptr := v.Args[0] 17014 y := v.Args[1] 17015 if y.Op != OpAMD64ANDL { 17016 break 17017 } 17018 _ = y.Args[1] 17019 l := y.Args[0] 17020 if l.Op != OpAMD64MOVLload { 17021 break 17022 } 17023 if l.AuxInt != off { 17024 break 17025 } 17026 if l.Aux != sym { 17027 break 17028 } 17029 _ = l.Args[1] 17030 if ptr != l.Args[0] { 17031 break 17032 } 17033 mem := l.Args[1] 17034 x := y.Args[1] 17035 if mem != v.Args[2] { 17036 break 17037 } 17038 if !(y.Uses == 1 && l.Uses == 1 && clobber(y) && clobber(l)) { 17039 break 17040 } 17041 v.reset(OpAMD64ANDLmodify) 17042 v.AuxInt = off 17043 v.Aux = sym 17044 v.AddArg(ptr) 17045 v.AddArg(x) 17046 v.AddArg(mem) 17047 return true 17048 } 17049 // match: (MOVLstore {sym} [off] ptr y:(ANDL x l:(MOVLload [off] {sym} ptr mem)) mem) 17050 // cond: y.Uses==1 && l.Uses==1 && clobber(y) && clobber(l) 17051 // result: (ANDLmodify [off] {sym} ptr x mem) 17052 for { 17053 off := v.AuxInt 17054 sym := v.Aux 17055 _ = v.Args[2] 17056 ptr := v.Args[0] 17057 y := v.Args[1] 17058 if y.Op != OpAMD64ANDL { 17059 break 17060 } 17061 _ = y.Args[1] 17062 x := y.Args[0] 17063 l := y.Args[1] 17064 if l.Op != OpAMD64MOVLload { 17065 break 17066 } 17067 if l.AuxInt != off { 17068 break 17069 } 17070 if l.Aux != sym { 17071 break 17072 } 17073 _ = l.Args[1] 17074 if ptr != l.Args[0] { 17075 break 17076 } 17077 mem := l.Args[1] 17078 if mem != v.Args[2] { 17079 break 17080 } 17081 if !(y.Uses == 1 && l.Uses == 1 && clobber(y) && clobber(l)) { 17082 break 17083 } 17084 v.reset(OpAMD64ANDLmodify) 17085 v.AuxInt = off 17086 v.Aux = sym 17087 v.AddArg(ptr) 17088 v.AddArg(x) 17089 v.AddArg(mem) 17090 return true 17091 } 17092 // match: (MOVLstore {sym} [off] ptr y:(ORL l:(MOVLload [off] {sym} ptr mem) x) mem) 17093 // cond: y.Uses==1 && l.Uses==1 && clobber(y) && clobber(l) 17094 // result: (ORLmodify [off] {sym} ptr x mem) 17095 for { 17096 off := v.AuxInt 17097 sym := v.Aux 17098 _ = v.Args[2] 17099 ptr := v.Args[0] 17100 y := v.Args[1] 17101 if y.Op != OpAMD64ORL { 17102 break 17103 } 17104 _ = y.Args[1] 17105 l := y.Args[0] 17106 if l.Op != OpAMD64MOVLload { 17107 break 17108 } 17109 if l.AuxInt != off { 17110 break 17111 } 17112 if l.Aux != sym { 17113 break 17114 } 17115 _ = l.Args[1] 17116 if ptr != l.Args[0] { 17117 break 17118 } 17119 mem := l.Args[1] 17120 x := y.Args[1] 17121 if mem != v.Args[2] { 17122 break 17123 } 17124 if !(y.Uses == 1 && l.Uses == 1 && clobber(y) && clobber(l)) { 17125 break 17126 } 17127 v.reset(OpAMD64ORLmodify) 17128 v.AuxInt = off 17129 v.Aux = sym 17130 v.AddArg(ptr) 17131 v.AddArg(x) 17132 v.AddArg(mem) 17133 return true 17134 } 17135 // match: (MOVLstore {sym} [off] ptr y:(ORL x l:(MOVLload [off] {sym} ptr mem)) mem) 17136 // cond: y.Uses==1 && l.Uses==1 && clobber(y) && clobber(l) 17137 // result: (ORLmodify [off] {sym} ptr x mem) 17138 for { 17139 off := v.AuxInt 17140 sym := v.Aux 17141 _ = v.Args[2] 17142 ptr := v.Args[0] 17143 y := v.Args[1] 17144 if y.Op != OpAMD64ORL { 17145 break 17146 } 17147 _ = y.Args[1] 17148 x := y.Args[0] 17149 l := y.Args[1] 17150 if l.Op != OpAMD64MOVLload { 17151 break 17152 } 17153 if l.AuxInt != off { 17154 break 17155 } 17156 if l.Aux != sym { 17157 break 17158 } 17159 _ = l.Args[1] 17160 if ptr != l.Args[0] { 17161 break 17162 } 17163 mem := l.Args[1] 17164 if mem != v.Args[2] { 17165 break 17166 } 17167 if !(y.Uses == 1 && l.Uses == 1 && clobber(y) && clobber(l)) { 17168 break 17169 } 17170 v.reset(OpAMD64ORLmodify) 17171 v.AuxInt = off 17172 v.Aux = sym 17173 v.AddArg(ptr) 17174 v.AddArg(x) 17175 v.AddArg(mem) 17176 return true 17177 } 17178 // match: (MOVLstore {sym} [off] ptr y:(XORL l:(MOVLload [off] {sym} ptr mem) x) mem) 17179 // cond: y.Uses==1 && l.Uses==1 && clobber(y) && clobber(l) 17180 // result: (XORLmodify [off] {sym} ptr x mem) 17181 for { 17182 off := v.AuxInt 17183 sym := v.Aux 17184 _ = v.Args[2] 17185 ptr := v.Args[0] 17186 y := v.Args[1] 17187 if y.Op != OpAMD64XORL { 17188 break 17189 } 17190 _ = y.Args[1] 17191 l := y.Args[0] 17192 if l.Op != OpAMD64MOVLload { 17193 break 17194 } 17195 if l.AuxInt != off { 17196 break 17197 } 17198 if l.Aux != sym { 17199 break 17200 } 17201 _ = l.Args[1] 17202 if ptr != l.Args[0] { 17203 break 17204 } 17205 mem := l.Args[1] 17206 x := y.Args[1] 17207 if mem != v.Args[2] { 17208 break 17209 } 17210 if !(y.Uses == 1 && l.Uses == 1 && clobber(y) && clobber(l)) { 17211 break 17212 } 17213 v.reset(OpAMD64XORLmodify) 17214 v.AuxInt = off 17215 v.Aux = sym 17216 v.AddArg(ptr) 17217 v.AddArg(x) 17218 v.AddArg(mem) 17219 return true 17220 } 17221 // match: (MOVLstore {sym} [off] ptr y:(XORL x l:(MOVLload [off] {sym} ptr mem)) mem) 17222 // cond: y.Uses==1 && l.Uses==1 && clobber(y) && clobber(l) 17223 // result: (XORLmodify [off] {sym} ptr x mem) 17224 for { 17225 off := v.AuxInt 17226 sym := v.Aux 17227 _ = v.Args[2] 17228 ptr := v.Args[0] 17229 y := v.Args[1] 17230 if y.Op != OpAMD64XORL { 17231 break 17232 } 17233 _ = y.Args[1] 17234 x := y.Args[0] 17235 l := y.Args[1] 17236 if l.Op != OpAMD64MOVLload { 17237 break 17238 } 17239 if l.AuxInt != off { 17240 break 17241 } 17242 if l.Aux != sym { 17243 break 17244 } 17245 _ = l.Args[1] 17246 if ptr != l.Args[0] { 17247 break 17248 } 17249 mem := l.Args[1] 17250 if mem != v.Args[2] { 17251 break 17252 } 17253 if !(y.Uses == 1 && l.Uses == 1 && clobber(y) && clobber(l)) { 17254 break 17255 } 17256 v.reset(OpAMD64XORLmodify) 17257 v.AuxInt = off 17258 v.Aux = sym 17259 v.AddArg(ptr) 17260 v.AddArg(x) 17261 v.AddArg(mem) 17262 return true 17263 } 17264 // match: (MOVLstore {sym} [off] ptr y:(BTCL l:(MOVLload [off] {sym} ptr mem) x) mem) 17265 // cond: y.Uses==1 && l.Uses==1 && clobber(y) && clobber(l) 17266 // result: (BTCLmodify [off] {sym} ptr x mem) 17267 for { 17268 off := v.AuxInt 17269 sym := v.Aux 17270 _ = v.Args[2] 17271 ptr := v.Args[0] 17272 y := v.Args[1] 17273 if y.Op != OpAMD64BTCL { 17274 break 17275 } 17276 _ = y.Args[1] 17277 l := y.Args[0] 17278 if l.Op != OpAMD64MOVLload { 17279 break 17280 } 17281 if l.AuxInt != off { 17282 break 17283 } 17284 if l.Aux != sym { 17285 break 17286 } 17287 _ = l.Args[1] 17288 if ptr != l.Args[0] { 17289 break 17290 } 17291 mem := l.Args[1] 17292 x := y.Args[1] 17293 if mem != v.Args[2] { 17294 break 17295 } 17296 if !(y.Uses == 1 && l.Uses == 1 && clobber(y) && clobber(l)) { 17297 break 17298 } 17299 v.reset(OpAMD64BTCLmodify) 17300 v.AuxInt = off 17301 v.Aux = sym 17302 v.AddArg(ptr) 17303 v.AddArg(x) 17304 v.AddArg(mem) 17305 return true 17306 } 17307 // match: (MOVLstore {sym} [off] ptr y:(BTRL l:(MOVLload [off] {sym} ptr mem) x) mem) 17308 // cond: y.Uses==1 && l.Uses==1 && clobber(y) && clobber(l) 17309 // result: (BTRLmodify [off] {sym} ptr x mem) 17310 for { 17311 off := v.AuxInt 17312 sym := v.Aux 17313 _ = v.Args[2] 17314 ptr := v.Args[0] 17315 y := v.Args[1] 17316 if y.Op != OpAMD64BTRL { 17317 break 17318 } 17319 _ = y.Args[1] 17320 l := y.Args[0] 17321 if l.Op != OpAMD64MOVLload { 17322 break 17323 } 17324 if l.AuxInt != off { 17325 break 17326 } 17327 if l.Aux != sym { 17328 break 17329 } 17330 _ = l.Args[1] 17331 if ptr != l.Args[0] { 17332 break 17333 } 17334 mem := l.Args[1] 17335 x := y.Args[1] 17336 if mem != v.Args[2] { 17337 break 17338 } 17339 if !(y.Uses == 1 && l.Uses == 1 && clobber(y) && clobber(l)) { 17340 break 17341 } 17342 v.reset(OpAMD64BTRLmodify) 17343 v.AuxInt = off 17344 v.Aux = sym 17345 v.AddArg(ptr) 17346 v.AddArg(x) 17347 v.AddArg(mem) 17348 return true 17349 } 17350 return false 17351 } 17352 func rewriteValueAMD64_OpAMD64MOVLstore_30(v *Value) bool { 17353 // match: (MOVLstore {sym} [off] ptr y:(BTSL l:(MOVLload [off] {sym} ptr mem) x) mem) 17354 // cond: y.Uses==1 && l.Uses==1 && clobber(y) && clobber(l) 17355 // result: (BTSLmodify [off] {sym} ptr x mem) 17356 for { 17357 off := v.AuxInt 17358 sym := v.Aux 17359 _ = v.Args[2] 17360 ptr := v.Args[0] 17361 y := v.Args[1] 17362 if y.Op != OpAMD64BTSL { 17363 break 17364 } 17365 _ = y.Args[1] 17366 l := y.Args[0] 17367 if l.Op != OpAMD64MOVLload { 17368 break 17369 } 17370 if l.AuxInt != off { 17371 break 17372 } 17373 if l.Aux != sym { 17374 break 17375 } 17376 _ = l.Args[1] 17377 if ptr != l.Args[0] { 17378 break 17379 } 17380 mem := l.Args[1] 17381 x := y.Args[1] 17382 if mem != v.Args[2] { 17383 break 17384 } 17385 if !(y.Uses == 1 && l.Uses == 1 && clobber(y) && clobber(l)) { 17386 break 17387 } 17388 v.reset(OpAMD64BTSLmodify) 17389 v.AuxInt = off 17390 v.Aux = sym 17391 v.AddArg(ptr) 17392 v.AddArg(x) 17393 v.AddArg(mem) 17394 return true 17395 } 17396 // match: (MOVLstore [off] {sym} ptr a:(ADDLconst [c] l:(MOVLload [off] {sym} ptr2 mem)) mem) 17397 // cond: isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && validValAndOff(c,off) && clobber(l) && clobber(a) 17398 // result: (ADDLconstmodify {sym} [makeValAndOff(c,off)] ptr mem) 17399 for { 17400 off := v.AuxInt 17401 sym := v.Aux 17402 _ = v.Args[2] 17403 ptr := v.Args[0] 17404 a := v.Args[1] 17405 if a.Op != OpAMD64ADDLconst { 17406 break 17407 } 17408 c := a.AuxInt 17409 l := a.Args[0] 17410 if l.Op != OpAMD64MOVLload { 17411 break 17412 } 17413 if l.AuxInt != off { 17414 break 17415 } 17416 if l.Aux != sym { 17417 break 17418 } 17419 _ = l.Args[1] 17420 ptr2 := l.Args[0] 17421 mem := l.Args[1] 17422 if mem != v.Args[2] { 17423 break 17424 } 17425 if !(isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && validValAndOff(c, off) && clobber(l) && clobber(a)) { 17426 break 17427 } 17428 v.reset(OpAMD64ADDLconstmodify) 17429 v.AuxInt = makeValAndOff(c, off) 17430 v.Aux = sym 17431 v.AddArg(ptr) 17432 v.AddArg(mem) 17433 return true 17434 } 17435 // match: (MOVLstore [off] {sym} ptr a:(ANDLconst [c] l:(MOVLload [off] {sym} ptr2 mem)) mem) 17436 // cond: isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && validValAndOff(c,off) && clobber(l) && clobber(a) 17437 // result: (ANDLconstmodify {sym} [makeValAndOff(c,off)] ptr mem) 17438 for { 17439 off := v.AuxInt 17440 sym := v.Aux 17441 _ = v.Args[2] 17442 ptr := v.Args[0] 17443 a := v.Args[1] 17444 if a.Op != OpAMD64ANDLconst { 17445 break 17446 } 17447 c := a.AuxInt 17448 l := a.Args[0] 17449 if l.Op != OpAMD64MOVLload { 17450 break 17451 } 17452 if l.AuxInt != off { 17453 break 17454 } 17455 if l.Aux != sym { 17456 break 17457 } 17458 _ = l.Args[1] 17459 ptr2 := l.Args[0] 17460 mem := l.Args[1] 17461 if mem != v.Args[2] { 17462 break 17463 } 17464 if !(isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && validValAndOff(c, off) && clobber(l) && clobber(a)) { 17465 break 17466 } 17467 v.reset(OpAMD64ANDLconstmodify) 17468 v.AuxInt = makeValAndOff(c, off) 17469 v.Aux = sym 17470 v.AddArg(ptr) 17471 v.AddArg(mem) 17472 return true 17473 } 17474 // match: (MOVLstore [off] {sym} ptr a:(ORLconst [c] l:(MOVLload [off] {sym} ptr2 mem)) mem) 17475 // cond: isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && validValAndOff(c,off) && clobber(l) && clobber(a) 17476 // result: (ORLconstmodify {sym} [makeValAndOff(c,off)] ptr mem) 17477 for { 17478 off := v.AuxInt 17479 sym := v.Aux 17480 _ = v.Args[2] 17481 ptr := v.Args[0] 17482 a := v.Args[1] 17483 if a.Op != OpAMD64ORLconst { 17484 break 17485 } 17486 c := a.AuxInt 17487 l := a.Args[0] 17488 if l.Op != OpAMD64MOVLload { 17489 break 17490 } 17491 if l.AuxInt != off { 17492 break 17493 } 17494 if l.Aux != sym { 17495 break 17496 } 17497 _ = l.Args[1] 17498 ptr2 := l.Args[0] 17499 mem := l.Args[1] 17500 if mem != v.Args[2] { 17501 break 17502 } 17503 if !(isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && validValAndOff(c, off) && clobber(l) && clobber(a)) { 17504 break 17505 } 17506 v.reset(OpAMD64ORLconstmodify) 17507 v.AuxInt = makeValAndOff(c, off) 17508 v.Aux = sym 17509 v.AddArg(ptr) 17510 v.AddArg(mem) 17511 return true 17512 } 17513 // match: (MOVLstore [off] {sym} ptr a:(XORLconst [c] l:(MOVLload [off] {sym} ptr2 mem)) mem) 17514 // cond: isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && validValAndOff(c,off) && clobber(l) && clobber(a) 17515 // result: (XORLconstmodify {sym} [makeValAndOff(c,off)] ptr mem) 17516 for { 17517 off := v.AuxInt 17518 sym := v.Aux 17519 _ = v.Args[2] 17520 ptr := v.Args[0] 17521 a := v.Args[1] 17522 if a.Op != OpAMD64XORLconst { 17523 break 17524 } 17525 c := a.AuxInt 17526 l := a.Args[0] 17527 if l.Op != OpAMD64MOVLload { 17528 break 17529 } 17530 if l.AuxInt != off { 17531 break 17532 } 17533 if l.Aux != sym { 17534 break 17535 } 17536 _ = l.Args[1] 17537 ptr2 := l.Args[0] 17538 mem := l.Args[1] 17539 if mem != v.Args[2] { 17540 break 17541 } 17542 if !(isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && validValAndOff(c, off) && clobber(l) && clobber(a)) { 17543 break 17544 } 17545 v.reset(OpAMD64XORLconstmodify) 17546 v.AuxInt = makeValAndOff(c, off) 17547 v.Aux = sym 17548 v.AddArg(ptr) 17549 v.AddArg(mem) 17550 return true 17551 } 17552 // match: (MOVLstore [off] {sym} ptr a:(BTCLconst [c] l:(MOVLload [off] {sym} ptr2 mem)) mem) 17553 // cond: isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && validValAndOff(c,off) && clobber(l) && clobber(a) 17554 // result: (BTCLconstmodify {sym} [makeValAndOff(c,off)] ptr mem) 17555 for { 17556 off := v.AuxInt 17557 sym := v.Aux 17558 _ = v.Args[2] 17559 ptr := v.Args[0] 17560 a := v.Args[1] 17561 if a.Op != OpAMD64BTCLconst { 17562 break 17563 } 17564 c := a.AuxInt 17565 l := a.Args[0] 17566 if l.Op != OpAMD64MOVLload { 17567 break 17568 } 17569 if l.AuxInt != off { 17570 break 17571 } 17572 if l.Aux != sym { 17573 break 17574 } 17575 _ = l.Args[1] 17576 ptr2 := l.Args[0] 17577 mem := l.Args[1] 17578 if mem != v.Args[2] { 17579 break 17580 } 17581 if !(isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && validValAndOff(c, off) && clobber(l) && clobber(a)) { 17582 break 17583 } 17584 v.reset(OpAMD64BTCLconstmodify) 17585 v.AuxInt = makeValAndOff(c, off) 17586 v.Aux = sym 17587 v.AddArg(ptr) 17588 v.AddArg(mem) 17589 return true 17590 } 17591 // match: (MOVLstore [off] {sym} ptr a:(BTRLconst [c] l:(MOVLload [off] {sym} ptr2 mem)) mem) 17592 // cond: isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && validValAndOff(c,off) && clobber(l) && clobber(a) 17593 // result: (BTRLconstmodify {sym} [makeValAndOff(c,off)] ptr mem) 17594 for { 17595 off := v.AuxInt 17596 sym := v.Aux 17597 _ = v.Args[2] 17598 ptr := v.Args[0] 17599 a := v.Args[1] 17600 if a.Op != OpAMD64BTRLconst { 17601 break 17602 } 17603 c := a.AuxInt 17604 l := a.Args[0] 17605 if l.Op != OpAMD64MOVLload { 17606 break 17607 } 17608 if l.AuxInt != off { 17609 break 17610 } 17611 if l.Aux != sym { 17612 break 17613 } 17614 _ = l.Args[1] 17615 ptr2 := l.Args[0] 17616 mem := l.Args[1] 17617 if mem != v.Args[2] { 17618 break 17619 } 17620 if !(isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && validValAndOff(c, off) && clobber(l) && clobber(a)) { 17621 break 17622 } 17623 v.reset(OpAMD64BTRLconstmodify) 17624 v.AuxInt = makeValAndOff(c, off) 17625 v.Aux = sym 17626 v.AddArg(ptr) 17627 v.AddArg(mem) 17628 return true 17629 } 17630 // match: (MOVLstore [off] {sym} ptr a:(BTSLconst [c] l:(MOVLload [off] {sym} ptr2 mem)) mem) 17631 // cond: isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && validValAndOff(c,off) && clobber(l) && clobber(a) 17632 // result: (BTSLconstmodify {sym} [makeValAndOff(c,off)] ptr mem) 17633 for { 17634 off := v.AuxInt 17635 sym := v.Aux 17636 _ = v.Args[2] 17637 ptr := v.Args[0] 17638 a := v.Args[1] 17639 if a.Op != OpAMD64BTSLconst { 17640 break 17641 } 17642 c := a.AuxInt 17643 l := a.Args[0] 17644 if l.Op != OpAMD64MOVLload { 17645 break 17646 } 17647 if l.AuxInt != off { 17648 break 17649 } 17650 if l.Aux != sym { 17651 break 17652 } 17653 _ = l.Args[1] 17654 ptr2 := l.Args[0] 17655 mem := l.Args[1] 17656 if mem != v.Args[2] { 17657 break 17658 } 17659 if !(isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && validValAndOff(c, off) && clobber(l) && clobber(a)) { 17660 break 17661 } 17662 v.reset(OpAMD64BTSLconstmodify) 17663 v.AuxInt = makeValAndOff(c, off) 17664 v.Aux = sym 17665 v.AddArg(ptr) 17666 v.AddArg(mem) 17667 return true 17668 } 17669 // match: (MOVLstore [off] {sym} ptr (MOVLf2i val) mem) 17670 // cond: 17671 // result: (MOVSSstore [off] {sym} ptr val mem) 17672 for { 17673 off := v.AuxInt 17674 sym := v.Aux 17675 _ = v.Args[2] 17676 ptr := v.Args[0] 17677 v_1 := v.Args[1] 17678 if v_1.Op != OpAMD64MOVLf2i { 17679 break 17680 } 17681 val := v_1.Args[0] 17682 mem := v.Args[2] 17683 v.reset(OpAMD64MOVSSstore) 17684 v.AuxInt = off 17685 v.Aux = sym 17686 v.AddArg(ptr) 17687 v.AddArg(val) 17688 v.AddArg(mem) 17689 return true 17690 } 17691 return false 17692 } 17693 func rewriteValueAMD64_OpAMD64MOVLstoreconst_0(v *Value) bool { 17694 b := v.Block 17695 _ = b 17696 typ := &b.Func.Config.Types 17697 _ = typ 17698 // match: (MOVLstoreconst [sc] {s} (ADDQconst [off] ptr) mem) 17699 // cond: ValAndOff(sc).canAdd(off) 17700 // result: (MOVLstoreconst [ValAndOff(sc).add(off)] {s} ptr mem) 17701 for { 17702 sc := v.AuxInt 17703 s := v.Aux 17704 _ = v.Args[1] 17705 v_0 := v.Args[0] 17706 if v_0.Op != OpAMD64ADDQconst { 17707 break 17708 } 17709 off := v_0.AuxInt 17710 ptr := v_0.Args[0] 17711 mem := v.Args[1] 17712 if !(ValAndOff(sc).canAdd(off)) { 17713 break 17714 } 17715 v.reset(OpAMD64MOVLstoreconst) 17716 v.AuxInt = ValAndOff(sc).add(off) 17717 v.Aux = s 17718 v.AddArg(ptr) 17719 v.AddArg(mem) 17720 return true 17721 } 17722 // match: (MOVLstoreconst [sc] {sym1} (LEAQ [off] {sym2} ptr) mem) 17723 // cond: canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off) 17724 // result: (MOVLstoreconst [ValAndOff(sc).add(off)] {mergeSym(sym1, sym2)} ptr mem) 17725 for { 17726 sc := v.AuxInt 17727 sym1 := v.Aux 17728 _ = v.Args[1] 17729 v_0 := v.Args[0] 17730 if v_0.Op != OpAMD64LEAQ { 17731 break 17732 } 17733 off := v_0.AuxInt 17734 sym2 := v_0.Aux 17735 ptr := v_0.Args[0] 17736 mem := v.Args[1] 17737 if !(canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off)) { 17738 break 17739 } 17740 v.reset(OpAMD64MOVLstoreconst) 17741 v.AuxInt = ValAndOff(sc).add(off) 17742 v.Aux = mergeSym(sym1, sym2) 17743 v.AddArg(ptr) 17744 v.AddArg(mem) 17745 return true 17746 } 17747 // match: (MOVLstoreconst [x] {sym1} (LEAQ1 [off] {sym2} ptr idx) mem) 17748 // cond: canMergeSym(sym1, sym2) 17749 // result: (MOVLstoreconstidx1 [ValAndOff(x).add(off)] {mergeSym(sym1,sym2)} ptr idx mem) 17750 for { 17751 x := v.AuxInt 17752 sym1 := v.Aux 17753 _ = v.Args[1] 17754 v_0 := v.Args[0] 17755 if v_0.Op != OpAMD64LEAQ1 { 17756 break 17757 } 17758 off := v_0.AuxInt 17759 sym2 := v_0.Aux 17760 _ = v_0.Args[1] 17761 ptr := v_0.Args[0] 17762 idx := v_0.Args[1] 17763 mem := v.Args[1] 17764 if !(canMergeSym(sym1, sym2)) { 17765 break 17766 } 17767 v.reset(OpAMD64MOVLstoreconstidx1) 17768 v.AuxInt = ValAndOff(x).add(off) 17769 v.Aux = mergeSym(sym1, sym2) 17770 v.AddArg(ptr) 17771 v.AddArg(idx) 17772 v.AddArg(mem) 17773 return true 17774 } 17775 // match: (MOVLstoreconst [x] {sym1} (LEAQ4 [off] {sym2} ptr idx) mem) 17776 // cond: canMergeSym(sym1, sym2) 17777 // result: (MOVLstoreconstidx4 [ValAndOff(x).add(off)] {mergeSym(sym1,sym2)} ptr idx mem) 17778 for { 17779 x := v.AuxInt 17780 sym1 := v.Aux 17781 _ = v.Args[1] 17782 v_0 := v.Args[0] 17783 if v_0.Op != OpAMD64LEAQ4 { 17784 break 17785 } 17786 off := v_0.AuxInt 17787 sym2 := v_0.Aux 17788 _ = v_0.Args[1] 17789 ptr := v_0.Args[0] 17790 idx := v_0.Args[1] 17791 mem := v.Args[1] 17792 if !(canMergeSym(sym1, sym2)) { 17793 break 17794 } 17795 v.reset(OpAMD64MOVLstoreconstidx4) 17796 v.AuxInt = ValAndOff(x).add(off) 17797 v.Aux = mergeSym(sym1, sym2) 17798 v.AddArg(ptr) 17799 v.AddArg(idx) 17800 v.AddArg(mem) 17801 return true 17802 } 17803 // match: (MOVLstoreconst [x] {sym} (ADDQ ptr idx) mem) 17804 // cond: 17805 // result: (MOVLstoreconstidx1 [x] {sym} ptr idx mem) 17806 for { 17807 x := v.AuxInt 17808 sym := v.Aux 17809 _ = v.Args[1] 17810 v_0 := v.Args[0] 17811 if v_0.Op != OpAMD64ADDQ { 17812 break 17813 } 17814 _ = v_0.Args[1] 17815 ptr := v_0.Args[0] 17816 idx := v_0.Args[1] 17817 mem := v.Args[1] 17818 v.reset(OpAMD64MOVLstoreconstidx1) 17819 v.AuxInt = x 17820 v.Aux = sym 17821 v.AddArg(ptr) 17822 v.AddArg(idx) 17823 v.AddArg(mem) 17824 return true 17825 } 17826 // match: (MOVLstoreconst [c] {s} p x:(MOVLstoreconst [a] {s} p mem)) 17827 // cond: x.Uses == 1 && ValAndOff(a).Off() + 4 == ValAndOff(c).Off() && clobber(x) 17828 // result: (MOVQstore [ValAndOff(a).Off()] {s} p (MOVQconst [ValAndOff(a).Val()&0xffffffff | ValAndOff(c).Val()<<32]) mem) 17829 for { 17830 c := v.AuxInt 17831 s := v.Aux 17832 _ = v.Args[1] 17833 p := v.Args[0] 17834 x := v.Args[1] 17835 if x.Op != OpAMD64MOVLstoreconst { 17836 break 17837 } 17838 a := x.AuxInt 17839 if x.Aux != s { 17840 break 17841 } 17842 _ = x.Args[1] 17843 if p != x.Args[0] { 17844 break 17845 } 17846 mem := x.Args[1] 17847 if !(x.Uses == 1 && ValAndOff(a).Off()+4 == ValAndOff(c).Off() && clobber(x)) { 17848 break 17849 } 17850 v.reset(OpAMD64MOVQstore) 17851 v.AuxInt = ValAndOff(a).Off() 17852 v.Aux = s 17853 v.AddArg(p) 17854 v0 := b.NewValue0(x.Pos, OpAMD64MOVQconst, typ.UInt64) 17855 v0.AuxInt = ValAndOff(a).Val()&0xffffffff | ValAndOff(c).Val()<<32 17856 v.AddArg(v0) 17857 v.AddArg(mem) 17858 return true 17859 } 17860 // match: (MOVLstoreconst [a] {s} p x:(MOVLstoreconst [c] {s} p mem)) 17861 // cond: x.Uses == 1 && ValAndOff(a).Off() + 4 == ValAndOff(c).Off() && clobber(x) 17862 // result: (MOVQstore [ValAndOff(a).Off()] {s} p (MOVQconst [ValAndOff(a).Val()&0xffffffff | ValAndOff(c).Val()<<32]) mem) 17863 for { 17864 a := v.AuxInt 17865 s := v.Aux 17866 _ = v.Args[1] 17867 p := v.Args[0] 17868 x := v.Args[1] 17869 if x.Op != OpAMD64MOVLstoreconst { 17870 break 17871 } 17872 c := x.AuxInt 17873 if x.Aux != s { 17874 break 17875 } 17876 _ = x.Args[1] 17877 if p != x.Args[0] { 17878 break 17879 } 17880 mem := x.Args[1] 17881 if !(x.Uses == 1 && ValAndOff(a).Off()+4 == ValAndOff(c).Off() && clobber(x)) { 17882 break 17883 } 17884 v.reset(OpAMD64MOVQstore) 17885 v.AuxInt = ValAndOff(a).Off() 17886 v.Aux = s 17887 v.AddArg(p) 17888 v0 := b.NewValue0(x.Pos, OpAMD64MOVQconst, typ.UInt64) 17889 v0.AuxInt = ValAndOff(a).Val()&0xffffffff | ValAndOff(c).Val()<<32 17890 v.AddArg(v0) 17891 v.AddArg(mem) 17892 return true 17893 } 17894 // match: (MOVLstoreconst [sc] {sym1} (LEAL [off] {sym2} ptr) mem) 17895 // cond: canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off) 17896 // result: (MOVLstoreconst [ValAndOff(sc).add(off)] {mergeSym(sym1, sym2)} ptr mem) 17897 for { 17898 sc := v.AuxInt 17899 sym1 := v.Aux 17900 _ = v.Args[1] 17901 v_0 := v.Args[0] 17902 if v_0.Op != OpAMD64LEAL { 17903 break 17904 } 17905 off := v_0.AuxInt 17906 sym2 := v_0.Aux 17907 ptr := v_0.Args[0] 17908 mem := v.Args[1] 17909 if !(canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off)) { 17910 break 17911 } 17912 v.reset(OpAMD64MOVLstoreconst) 17913 v.AuxInt = ValAndOff(sc).add(off) 17914 v.Aux = mergeSym(sym1, sym2) 17915 v.AddArg(ptr) 17916 v.AddArg(mem) 17917 return true 17918 } 17919 // match: (MOVLstoreconst [sc] {s} (ADDLconst [off] ptr) mem) 17920 // cond: ValAndOff(sc).canAdd(off) 17921 // result: (MOVLstoreconst [ValAndOff(sc).add(off)] {s} ptr mem) 17922 for { 17923 sc := v.AuxInt 17924 s := v.Aux 17925 _ = v.Args[1] 17926 v_0 := v.Args[0] 17927 if v_0.Op != OpAMD64ADDLconst { 17928 break 17929 } 17930 off := v_0.AuxInt 17931 ptr := v_0.Args[0] 17932 mem := v.Args[1] 17933 if !(ValAndOff(sc).canAdd(off)) { 17934 break 17935 } 17936 v.reset(OpAMD64MOVLstoreconst) 17937 v.AuxInt = ValAndOff(sc).add(off) 17938 v.Aux = s 17939 v.AddArg(ptr) 17940 v.AddArg(mem) 17941 return true 17942 } 17943 return false 17944 } 17945 func rewriteValueAMD64_OpAMD64MOVLstoreconstidx1_0(v *Value) bool { 17946 b := v.Block 17947 _ = b 17948 typ := &b.Func.Config.Types 17949 _ = typ 17950 // match: (MOVLstoreconstidx1 [c] {sym} ptr (SHLQconst [2] idx) mem) 17951 // cond: 17952 // result: (MOVLstoreconstidx4 [c] {sym} ptr idx mem) 17953 for { 17954 c := v.AuxInt 17955 sym := v.Aux 17956 _ = v.Args[2] 17957 ptr := v.Args[0] 17958 v_1 := v.Args[1] 17959 if v_1.Op != OpAMD64SHLQconst { 17960 break 17961 } 17962 if v_1.AuxInt != 2 { 17963 break 17964 } 17965 idx := v_1.Args[0] 17966 mem := v.Args[2] 17967 v.reset(OpAMD64MOVLstoreconstidx4) 17968 v.AuxInt = c 17969 v.Aux = sym 17970 v.AddArg(ptr) 17971 v.AddArg(idx) 17972 v.AddArg(mem) 17973 return true 17974 } 17975 // match: (MOVLstoreconstidx1 [x] {sym} (ADDQconst [c] ptr) idx mem) 17976 // cond: ValAndOff(x).canAdd(c) 17977 // result: (MOVLstoreconstidx1 [ValAndOff(x).add(c)] {sym} ptr idx mem) 17978 for { 17979 x := v.AuxInt 17980 sym := v.Aux 17981 _ = v.Args[2] 17982 v_0 := v.Args[0] 17983 if v_0.Op != OpAMD64ADDQconst { 17984 break 17985 } 17986 c := v_0.AuxInt 17987 ptr := v_0.Args[0] 17988 idx := v.Args[1] 17989 mem := v.Args[2] 17990 if !(ValAndOff(x).canAdd(c)) { 17991 break 17992 } 17993 v.reset(OpAMD64MOVLstoreconstidx1) 17994 v.AuxInt = ValAndOff(x).add(c) 17995 v.Aux = sym 17996 v.AddArg(ptr) 17997 v.AddArg(idx) 17998 v.AddArg(mem) 17999 return true 18000 } 18001 // match: (MOVLstoreconstidx1 [x] {sym} ptr (ADDQconst [c] idx) mem) 18002 // cond: ValAndOff(x).canAdd(c) 18003 // result: (MOVLstoreconstidx1 [ValAndOff(x).add(c)] {sym} ptr idx mem) 18004 for { 18005 x := v.AuxInt 18006 sym := v.Aux 18007 _ = v.Args[2] 18008 ptr := v.Args[0] 18009 v_1 := v.Args[1] 18010 if v_1.Op != OpAMD64ADDQconst { 18011 break 18012 } 18013 c := v_1.AuxInt 18014 idx := v_1.Args[0] 18015 mem := v.Args[2] 18016 if !(ValAndOff(x).canAdd(c)) { 18017 break 18018 } 18019 v.reset(OpAMD64MOVLstoreconstidx1) 18020 v.AuxInt = ValAndOff(x).add(c) 18021 v.Aux = sym 18022 v.AddArg(ptr) 18023 v.AddArg(idx) 18024 v.AddArg(mem) 18025 return true 18026 } 18027 // match: (MOVLstoreconstidx1 [c] {s} p i x:(MOVLstoreconstidx1 [a] {s} p i mem)) 18028 // cond: x.Uses == 1 && ValAndOff(a).Off() + 4 == ValAndOff(c).Off() && clobber(x) 18029 // result: (MOVQstoreidx1 [ValAndOff(a).Off()] {s} p i (MOVQconst [ValAndOff(a).Val()&0xffffffff | ValAndOff(c).Val()<<32]) mem) 18030 for { 18031 c := v.AuxInt 18032 s := v.Aux 18033 _ = v.Args[2] 18034 p := v.Args[0] 18035 i := v.Args[1] 18036 x := v.Args[2] 18037 if x.Op != OpAMD64MOVLstoreconstidx1 { 18038 break 18039 } 18040 a := x.AuxInt 18041 if x.Aux != s { 18042 break 18043 } 18044 _ = x.Args[2] 18045 if p != x.Args[0] { 18046 break 18047 } 18048 if i != x.Args[1] { 18049 break 18050 } 18051 mem := x.Args[2] 18052 if !(x.Uses == 1 && ValAndOff(a).Off()+4 == ValAndOff(c).Off() && clobber(x)) { 18053 break 18054 } 18055 v.reset(OpAMD64MOVQstoreidx1) 18056 v.AuxInt = ValAndOff(a).Off() 18057 v.Aux = s 18058 v.AddArg(p) 18059 v.AddArg(i) 18060 v0 := b.NewValue0(v.Pos, OpAMD64MOVQconst, typ.UInt64) 18061 v0.AuxInt = ValAndOff(a).Val()&0xffffffff | ValAndOff(c).Val()<<32 18062 v.AddArg(v0) 18063 v.AddArg(mem) 18064 return true 18065 } 18066 return false 18067 } 18068 func rewriteValueAMD64_OpAMD64MOVLstoreconstidx4_0(v *Value) bool { 18069 b := v.Block 18070 _ = b 18071 typ := &b.Func.Config.Types 18072 _ = typ 18073 // match: (MOVLstoreconstidx4 [x] {sym} (ADDQconst [c] ptr) idx mem) 18074 // cond: ValAndOff(x).canAdd(c) 18075 // result: (MOVLstoreconstidx4 [ValAndOff(x).add(c)] {sym} ptr idx mem) 18076 for { 18077 x := v.AuxInt 18078 sym := v.Aux 18079 _ = v.Args[2] 18080 v_0 := v.Args[0] 18081 if v_0.Op != OpAMD64ADDQconst { 18082 break 18083 } 18084 c := v_0.AuxInt 18085 ptr := v_0.Args[0] 18086 idx := v.Args[1] 18087 mem := v.Args[2] 18088 if !(ValAndOff(x).canAdd(c)) { 18089 break 18090 } 18091 v.reset(OpAMD64MOVLstoreconstidx4) 18092 v.AuxInt = ValAndOff(x).add(c) 18093 v.Aux = sym 18094 v.AddArg(ptr) 18095 v.AddArg(idx) 18096 v.AddArg(mem) 18097 return true 18098 } 18099 // match: (MOVLstoreconstidx4 [x] {sym} ptr (ADDQconst [c] idx) mem) 18100 // cond: ValAndOff(x).canAdd(4*c) 18101 // result: (MOVLstoreconstidx4 [ValAndOff(x).add(4*c)] {sym} ptr idx mem) 18102 for { 18103 x := v.AuxInt 18104 sym := v.Aux 18105 _ = v.Args[2] 18106 ptr := v.Args[0] 18107 v_1 := v.Args[1] 18108 if v_1.Op != OpAMD64ADDQconst { 18109 break 18110 } 18111 c := v_1.AuxInt 18112 idx := v_1.Args[0] 18113 mem := v.Args[2] 18114 if !(ValAndOff(x).canAdd(4 * c)) { 18115 break 18116 } 18117 v.reset(OpAMD64MOVLstoreconstidx4) 18118 v.AuxInt = ValAndOff(x).add(4 * c) 18119 v.Aux = sym 18120 v.AddArg(ptr) 18121 v.AddArg(idx) 18122 v.AddArg(mem) 18123 return true 18124 } 18125 // match: (MOVLstoreconstidx4 [c] {s} p i x:(MOVLstoreconstidx4 [a] {s} p i mem)) 18126 // cond: x.Uses == 1 && ValAndOff(a).Off() + 4 == ValAndOff(c).Off() && clobber(x) 18127 // result: (MOVQstoreidx1 [ValAndOff(a).Off()] {s} p (SHLQconst <i.Type> [2] i) (MOVQconst [ValAndOff(a).Val()&0xffffffff | ValAndOff(c).Val()<<32]) mem) 18128 for { 18129 c := v.AuxInt 18130 s := v.Aux 18131 _ = v.Args[2] 18132 p := v.Args[0] 18133 i := v.Args[1] 18134 x := v.Args[2] 18135 if x.Op != OpAMD64MOVLstoreconstidx4 { 18136 break 18137 } 18138 a := x.AuxInt 18139 if x.Aux != s { 18140 break 18141 } 18142 _ = x.Args[2] 18143 if p != x.Args[0] { 18144 break 18145 } 18146 if i != x.Args[1] { 18147 break 18148 } 18149 mem := x.Args[2] 18150 if !(x.Uses == 1 && ValAndOff(a).Off()+4 == ValAndOff(c).Off() && clobber(x)) { 18151 break 18152 } 18153 v.reset(OpAMD64MOVQstoreidx1) 18154 v.AuxInt = ValAndOff(a).Off() 18155 v.Aux = s 18156 v.AddArg(p) 18157 v0 := b.NewValue0(v.Pos, OpAMD64SHLQconst, i.Type) 18158 v0.AuxInt = 2 18159 v0.AddArg(i) 18160 v.AddArg(v0) 18161 v1 := b.NewValue0(v.Pos, OpAMD64MOVQconst, typ.UInt64) 18162 v1.AuxInt = ValAndOff(a).Val()&0xffffffff | ValAndOff(c).Val()<<32 18163 v.AddArg(v1) 18164 v.AddArg(mem) 18165 return true 18166 } 18167 return false 18168 } 18169 func rewriteValueAMD64_OpAMD64MOVLstoreidx1_0(v *Value) bool { 18170 // match: (MOVLstoreidx1 [c] {sym} ptr (SHLQconst [2] idx) val mem) 18171 // cond: 18172 // result: (MOVLstoreidx4 [c] {sym} ptr idx val mem) 18173 for { 18174 c := v.AuxInt 18175 sym := v.Aux 18176 _ = v.Args[3] 18177 ptr := v.Args[0] 18178 v_1 := v.Args[1] 18179 if v_1.Op != OpAMD64SHLQconst { 18180 break 18181 } 18182 if v_1.AuxInt != 2 { 18183 break 18184 } 18185 idx := v_1.Args[0] 18186 val := v.Args[2] 18187 mem := v.Args[3] 18188 v.reset(OpAMD64MOVLstoreidx4) 18189 v.AuxInt = c 18190 v.Aux = sym 18191 v.AddArg(ptr) 18192 v.AddArg(idx) 18193 v.AddArg(val) 18194 v.AddArg(mem) 18195 return true 18196 } 18197 // match: (MOVLstoreidx1 [c] {sym} ptr (SHLQconst [3] idx) val mem) 18198 // cond: 18199 // result: (MOVLstoreidx8 [c] {sym} ptr idx val mem) 18200 for { 18201 c := v.AuxInt 18202 sym := v.Aux 18203 _ = v.Args[3] 18204 ptr := v.Args[0] 18205 v_1 := v.Args[1] 18206 if v_1.Op != OpAMD64SHLQconst { 18207 break 18208 } 18209 if v_1.AuxInt != 3 { 18210 break 18211 } 18212 idx := v_1.Args[0] 18213 val := v.Args[2] 18214 mem := v.Args[3] 18215 v.reset(OpAMD64MOVLstoreidx8) 18216 v.AuxInt = c 18217 v.Aux = sym 18218 v.AddArg(ptr) 18219 v.AddArg(idx) 18220 v.AddArg(val) 18221 v.AddArg(mem) 18222 return true 18223 } 18224 // match: (MOVLstoreidx1 [c] {sym} (ADDQconst [d] ptr) idx val mem) 18225 // cond: is32Bit(c+d) 18226 // result: (MOVLstoreidx1 [c+d] {sym} ptr idx val mem) 18227 for { 18228 c := v.AuxInt 18229 sym := v.Aux 18230 _ = v.Args[3] 18231 v_0 := v.Args[0] 18232 if v_0.Op != OpAMD64ADDQconst { 18233 break 18234 } 18235 d := v_0.AuxInt 18236 ptr := v_0.Args[0] 18237 idx := v.Args[1] 18238 val := v.Args[2] 18239 mem := v.Args[3] 18240 if !(is32Bit(c + d)) { 18241 break 18242 } 18243 v.reset(OpAMD64MOVLstoreidx1) 18244 v.AuxInt = c + d 18245 v.Aux = sym 18246 v.AddArg(ptr) 18247 v.AddArg(idx) 18248 v.AddArg(val) 18249 v.AddArg(mem) 18250 return true 18251 } 18252 // match: (MOVLstoreidx1 [c] {sym} ptr (ADDQconst [d] idx) val mem) 18253 // cond: is32Bit(c+d) 18254 // result: (MOVLstoreidx1 [c+d] {sym} ptr idx val mem) 18255 for { 18256 c := v.AuxInt 18257 sym := v.Aux 18258 _ = v.Args[3] 18259 ptr := v.Args[0] 18260 v_1 := v.Args[1] 18261 if v_1.Op != OpAMD64ADDQconst { 18262 break 18263 } 18264 d := v_1.AuxInt 18265 idx := v_1.Args[0] 18266 val := v.Args[2] 18267 mem := v.Args[3] 18268 if !(is32Bit(c + d)) { 18269 break 18270 } 18271 v.reset(OpAMD64MOVLstoreidx1) 18272 v.AuxInt = c + d 18273 v.Aux = sym 18274 v.AddArg(ptr) 18275 v.AddArg(idx) 18276 v.AddArg(val) 18277 v.AddArg(mem) 18278 return true 18279 } 18280 // match: (MOVLstoreidx1 [i] {s} p idx (SHRQconst [32] w) x:(MOVLstoreidx1 [i-4] {s} p idx w mem)) 18281 // cond: x.Uses == 1 && clobber(x) 18282 // result: (MOVQstoreidx1 [i-4] {s} p idx w mem) 18283 for { 18284 i := v.AuxInt 18285 s := v.Aux 18286 _ = v.Args[3] 18287 p := v.Args[0] 18288 idx := v.Args[1] 18289 v_2 := v.Args[2] 18290 if v_2.Op != OpAMD64SHRQconst { 18291 break 18292 } 18293 if v_2.AuxInt != 32 { 18294 break 18295 } 18296 w := v_2.Args[0] 18297 x := v.Args[3] 18298 if x.Op != OpAMD64MOVLstoreidx1 { 18299 break 18300 } 18301 if x.AuxInt != i-4 { 18302 break 18303 } 18304 if x.Aux != s { 18305 break 18306 } 18307 _ = x.Args[3] 18308 if p != x.Args[0] { 18309 break 18310 } 18311 if idx != x.Args[1] { 18312 break 18313 } 18314 if w != x.Args[2] { 18315 break 18316 } 18317 mem := x.Args[3] 18318 if !(x.Uses == 1 && clobber(x)) { 18319 break 18320 } 18321 v.reset(OpAMD64MOVQstoreidx1) 18322 v.AuxInt = i - 4 18323 v.Aux = s 18324 v.AddArg(p) 18325 v.AddArg(idx) 18326 v.AddArg(w) 18327 v.AddArg(mem) 18328 return true 18329 } 18330 // match: (MOVLstoreidx1 [i] {s} p idx (SHRQconst [j] w) x:(MOVLstoreidx1 [i-4] {s} p idx w0:(SHRQconst [j-32] w) mem)) 18331 // cond: x.Uses == 1 && clobber(x) 18332 // result: (MOVQstoreidx1 [i-4] {s} p idx w0 mem) 18333 for { 18334 i := v.AuxInt 18335 s := v.Aux 18336 _ = v.Args[3] 18337 p := v.Args[0] 18338 idx := v.Args[1] 18339 v_2 := v.Args[2] 18340 if v_2.Op != OpAMD64SHRQconst { 18341 break 18342 } 18343 j := v_2.AuxInt 18344 w := v_2.Args[0] 18345 x := v.Args[3] 18346 if x.Op != OpAMD64MOVLstoreidx1 { 18347 break 18348 } 18349 if x.AuxInt != i-4 { 18350 break 18351 } 18352 if x.Aux != s { 18353 break 18354 } 18355 _ = x.Args[3] 18356 if p != x.Args[0] { 18357 break 18358 } 18359 if idx != x.Args[1] { 18360 break 18361 } 18362 w0 := x.Args[2] 18363 if w0.Op != OpAMD64SHRQconst { 18364 break 18365 } 18366 if w0.AuxInt != j-32 { 18367 break 18368 } 18369 if w != w0.Args[0] { 18370 break 18371 } 18372 mem := x.Args[3] 18373 if !(x.Uses == 1 && clobber(x)) { 18374 break 18375 } 18376 v.reset(OpAMD64MOVQstoreidx1) 18377 v.AuxInt = i - 4 18378 v.Aux = s 18379 v.AddArg(p) 18380 v.AddArg(idx) 18381 v.AddArg(w0) 18382 v.AddArg(mem) 18383 return true 18384 } 18385 // match: (MOVLstoreidx1 [i] {s} p (MOVQconst [c]) w mem) 18386 // cond: is32Bit(i+c) 18387 // result: (MOVLstore [i+c] {s} p w mem) 18388 for { 18389 i := v.AuxInt 18390 s := v.Aux 18391 _ = v.Args[3] 18392 p := v.Args[0] 18393 v_1 := v.Args[1] 18394 if v_1.Op != OpAMD64MOVQconst { 18395 break 18396 } 18397 c := v_1.AuxInt 18398 w := v.Args[2] 18399 mem := v.Args[3] 18400 if !(is32Bit(i + c)) { 18401 break 18402 } 18403 v.reset(OpAMD64MOVLstore) 18404 v.AuxInt = i + c 18405 v.Aux = s 18406 v.AddArg(p) 18407 v.AddArg(w) 18408 v.AddArg(mem) 18409 return true 18410 } 18411 return false 18412 } 18413 func rewriteValueAMD64_OpAMD64MOVLstoreidx4_0(v *Value) bool { 18414 b := v.Block 18415 _ = b 18416 // match: (MOVLstoreidx4 [c] {sym} (ADDQconst [d] ptr) idx val mem) 18417 // cond: is32Bit(c+d) 18418 // result: (MOVLstoreidx4 [c+d] {sym} ptr idx val mem) 18419 for { 18420 c := v.AuxInt 18421 sym := v.Aux 18422 _ = v.Args[3] 18423 v_0 := v.Args[0] 18424 if v_0.Op != OpAMD64ADDQconst { 18425 break 18426 } 18427 d := v_0.AuxInt 18428 ptr := v_0.Args[0] 18429 idx := v.Args[1] 18430 val := v.Args[2] 18431 mem := v.Args[3] 18432 if !(is32Bit(c + d)) { 18433 break 18434 } 18435 v.reset(OpAMD64MOVLstoreidx4) 18436 v.AuxInt = c + d 18437 v.Aux = sym 18438 v.AddArg(ptr) 18439 v.AddArg(idx) 18440 v.AddArg(val) 18441 v.AddArg(mem) 18442 return true 18443 } 18444 // match: (MOVLstoreidx4 [c] {sym} ptr (ADDQconst [d] idx) val mem) 18445 // cond: is32Bit(c+4*d) 18446 // result: (MOVLstoreidx4 [c+4*d] {sym} ptr idx val mem) 18447 for { 18448 c := v.AuxInt 18449 sym := v.Aux 18450 _ = v.Args[3] 18451 ptr := v.Args[0] 18452 v_1 := v.Args[1] 18453 if v_1.Op != OpAMD64ADDQconst { 18454 break 18455 } 18456 d := v_1.AuxInt 18457 idx := v_1.Args[0] 18458 val := v.Args[2] 18459 mem := v.Args[3] 18460 if !(is32Bit(c + 4*d)) { 18461 break 18462 } 18463 v.reset(OpAMD64MOVLstoreidx4) 18464 v.AuxInt = c + 4*d 18465 v.Aux = sym 18466 v.AddArg(ptr) 18467 v.AddArg(idx) 18468 v.AddArg(val) 18469 v.AddArg(mem) 18470 return true 18471 } 18472 // match: (MOVLstoreidx4 [i] {s} p idx (SHRQconst [32] w) x:(MOVLstoreidx4 [i-4] {s} p idx w mem)) 18473 // cond: x.Uses == 1 && clobber(x) 18474 // result: (MOVQstoreidx1 [i-4] {s} p (SHLQconst <idx.Type> [2] idx) w mem) 18475 for { 18476 i := v.AuxInt 18477 s := v.Aux 18478 _ = v.Args[3] 18479 p := v.Args[0] 18480 idx := v.Args[1] 18481 v_2 := v.Args[2] 18482 if v_2.Op != OpAMD64SHRQconst { 18483 break 18484 } 18485 if v_2.AuxInt != 32 { 18486 break 18487 } 18488 w := v_2.Args[0] 18489 x := v.Args[3] 18490 if x.Op != OpAMD64MOVLstoreidx4 { 18491 break 18492 } 18493 if x.AuxInt != i-4 { 18494 break 18495 } 18496 if x.Aux != s { 18497 break 18498 } 18499 _ = x.Args[3] 18500 if p != x.Args[0] { 18501 break 18502 } 18503 if idx != x.Args[1] { 18504 break 18505 } 18506 if w != x.Args[2] { 18507 break 18508 } 18509 mem := x.Args[3] 18510 if !(x.Uses == 1 && clobber(x)) { 18511 break 18512 } 18513 v.reset(OpAMD64MOVQstoreidx1) 18514 v.AuxInt = i - 4 18515 v.Aux = s 18516 v.AddArg(p) 18517 v0 := b.NewValue0(v.Pos, OpAMD64SHLQconst, idx.Type) 18518 v0.AuxInt = 2 18519 v0.AddArg(idx) 18520 v.AddArg(v0) 18521 v.AddArg(w) 18522 v.AddArg(mem) 18523 return true 18524 } 18525 // match: (MOVLstoreidx4 [i] {s} p idx (SHRQconst [j] w) x:(MOVLstoreidx4 [i-4] {s} p idx w0:(SHRQconst [j-32] w) mem)) 18526 // cond: x.Uses == 1 && clobber(x) 18527 // result: (MOVQstoreidx1 [i-4] {s} p (SHLQconst <idx.Type> [2] idx) w0 mem) 18528 for { 18529 i := v.AuxInt 18530 s := v.Aux 18531 _ = v.Args[3] 18532 p := v.Args[0] 18533 idx := v.Args[1] 18534 v_2 := v.Args[2] 18535 if v_2.Op != OpAMD64SHRQconst { 18536 break 18537 } 18538 j := v_2.AuxInt 18539 w := v_2.Args[0] 18540 x := v.Args[3] 18541 if x.Op != OpAMD64MOVLstoreidx4 { 18542 break 18543 } 18544 if x.AuxInt != i-4 { 18545 break 18546 } 18547 if x.Aux != s { 18548 break 18549 } 18550 _ = x.Args[3] 18551 if p != x.Args[0] { 18552 break 18553 } 18554 if idx != x.Args[1] { 18555 break 18556 } 18557 w0 := x.Args[2] 18558 if w0.Op != OpAMD64SHRQconst { 18559 break 18560 } 18561 if w0.AuxInt != j-32 { 18562 break 18563 } 18564 if w != w0.Args[0] { 18565 break 18566 } 18567 mem := x.Args[3] 18568 if !(x.Uses == 1 && clobber(x)) { 18569 break 18570 } 18571 v.reset(OpAMD64MOVQstoreidx1) 18572 v.AuxInt = i - 4 18573 v.Aux = s 18574 v.AddArg(p) 18575 v0 := b.NewValue0(v.Pos, OpAMD64SHLQconst, idx.Type) 18576 v0.AuxInt = 2 18577 v0.AddArg(idx) 18578 v.AddArg(v0) 18579 v.AddArg(w0) 18580 v.AddArg(mem) 18581 return true 18582 } 18583 // match: (MOVLstoreidx4 [i] {s} p (MOVQconst [c]) w mem) 18584 // cond: is32Bit(i+4*c) 18585 // result: (MOVLstore [i+4*c] {s} p w mem) 18586 for { 18587 i := v.AuxInt 18588 s := v.Aux 18589 _ = v.Args[3] 18590 p := v.Args[0] 18591 v_1 := v.Args[1] 18592 if v_1.Op != OpAMD64MOVQconst { 18593 break 18594 } 18595 c := v_1.AuxInt 18596 w := v.Args[2] 18597 mem := v.Args[3] 18598 if !(is32Bit(i + 4*c)) { 18599 break 18600 } 18601 v.reset(OpAMD64MOVLstore) 18602 v.AuxInt = i + 4*c 18603 v.Aux = s 18604 v.AddArg(p) 18605 v.AddArg(w) 18606 v.AddArg(mem) 18607 return true 18608 } 18609 return false 18610 } 18611 func rewriteValueAMD64_OpAMD64MOVLstoreidx8_0(v *Value) bool { 18612 // match: (MOVLstoreidx8 [c] {sym} (ADDQconst [d] ptr) idx val mem) 18613 // cond: is32Bit(c+d) 18614 // result: (MOVLstoreidx8 [c+d] {sym} ptr idx val mem) 18615 for { 18616 c := v.AuxInt 18617 sym := v.Aux 18618 _ = v.Args[3] 18619 v_0 := v.Args[0] 18620 if v_0.Op != OpAMD64ADDQconst { 18621 break 18622 } 18623 d := v_0.AuxInt 18624 ptr := v_0.Args[0] 18625 idx := v.Args[1] 18626 val := v.Args[2] 18627 mem := v.Args[3] 18628 if !(is32Bit(c + d)) { 18629 break 18630 } 18631 v.reset(OpAMD64MOVLstoreidx8) 18632 v.AuxInt = c + d 18633 v.Aux = sym 18634 v.AddArg(ptr) 18635 v.AddArg(idx) 18636 v.AddArg(val) 18637 v.AddArg(mem) 18638 return true 18639 } 18640 // match: (MOVLstoreidx8 [c] {sym} ptr (ADDQconst [d] idx) val mem) 18641 // cond: is32Bit(c+8*d) 18642 // result: (MOVLstoreidx8 [c+8*d] {sym} ptr idx val mem) 18643 for { 18644 c := v.AuxInt 18645 sym := v.Aux 18646 _ = v.Args[3] 18647 ptr := v.Args[0] 18648 v_1 := v.Args[1] 18649 if v_1.Op != OpAMD64ADDQconst { 18650 break 18651 } 18652 d := v_1.AuxInt 18653 idx := v_1.Args[0] 18654 val := v.Args[2] 18655 mem := v.Args[3] 18656 if !(is32Bit(c + 8*d)) { 18657 break 18658 } 18659 v.reset(OpAMD64MOVLstoreidx8) 18660 v.AuxInt = c + 8*d 18661 v.Aux = sym 18662 v.AddArg(ptr) 18663 v.AddArg(idx) 18664 v.AddArg(val) 18665 v.AddArg(mem) 18666 return true 18667 } 18668 // match: (MOVLstoreidx8 [i] {s} p (MOVQconst [c]) w mem) 18669 // cond: is32Bit(i+8*c) 18670 // result: (MOVLstore [i+8*c] {s} p w mem) 18671 for { 18672 i := v.AuxInt 18673 s := v.Aux 18674 _ = v.Args[3] 18675 p := v.Args[0] 18676 v_1 := v.Args[1] 18677 if v_1.Op != OpAMD64MOVQconst { 18678 break 18679 } 18680 c := v_1.AuxInt 18681 w := v.Args[2] 18682 mem := v.Args[3] 18683 if !(is32Bit(i + 8*c)) { 18684 break 18685 } 18686 v.reset(OpAMD64MOVLstore) 18687 v.AuxInt = i + 8*c 18688 v.Aux = s 18689 v.AddArg(p) 18690 v.AddArg(w) 18691 v.AddArg(mem) 18692 return true 18693 } 18694 return false 18695 } 18696 func rewriteValueAMD64_OpAMD64MOVOload_0(v *Value) bool { 18697 // match: (MOVOload [off1] {sym} (ADDQconst [off2] ptr) mem) 18698 // cond: is32Bit(off1+off2) 18699 // result: (MOVOload [off1+off2] {sym} ptr mem) 18700 for { 18701 off1 := v.AuxInt 18702 sym := v.Aux 18703 _ = v.Args[1] 18704 v_0 := v.Args[0] 18705 if v_0.Op != OpAMD64ADDQconst { 18706 break 18707 } 18708 off2 := v_0.AuxInt 18709 ptr := v_0.Args[0] 18710 mem := v.Args[1] 18711 if !(is32Bit(off1 + off2)) { 18712 break 18713 } 18714 v.reset(OpAMD64MOVOload) 18715 v.AuxInt = off1 + off2 18716 v.Aux = sym 18717 v.AddArg(ptr) 18718 v.AddArg(mem) 18719 return true 18720 } 18721 // match: (MOVOload [off1] {sym1} (LEAQ [off2] {sym2} base) mem) 18722 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 18723 // result: (MOVOload [off1+off2] {mergeSym(sym1,sym2)} base mem) 18724 for { 18725 off1 := v.AuxInt 18726 sym1 := v.Aux 18727 _ = v.Args[1] 18728 v_0 := v.Args[0] 18729 if v_0.Op != OpAMD64LEAQ { 18730 break 18731 } 18732 off2 := v_0.AuxInt 18733 sym2 := v_0.Aux 18734 base := v_0.Args[0] 18735 mem := v.Args[1] 18736 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 18737 break 18738 } 18739 v.reset(OpAMD64MOVOload) 18740 v.AuxInt = off1 + off2 18741 v.Aux = mergeSym(sym1, sym2) 18742 v.AddArg(base) 18743 v.AddArg(mem) 18744 return true 18745 } 18746 return false 18747 } 18748 func rewriteValueAMD64_OpAMD64MOVOstore_0(v *Value) bool { 18749 // match: (MOVOstore [off1] {sym} (ADDQconst [off2] ptr) val mem) 18750 // cond: is32Bit(off1+off2) 18751 // result: (MOVOstore [off1+off2] {sym} ptr val mem) 18752 for { 18753 off1 := v.AuxInt 18754 sym := v.Aux 18755 _ = v.Args[2] 18756 v_0 := v.Args[0] 18757 if v_0.Op != OpAMD64ADDQconst { 18758 break 18759 } 18760 off2 := v_0.AuxInt 18761 ptr := v_0.Args[0] 18762 val := v.Args[1] 18763 mem := v.Args[2] 18764 if !(is32Bit(off1 + off2)) { 18765 break 18766 } 18767 v.reset(OpAMD64MOVOstore) 18768 v.AuxInt = off1 + off2 18769 v.Aux = sym 18770 v.AddArg(ptr) 18771 v.AddArg(val) 18772 v.AddArg(mem) 18773 return true 18774 } 18775 // match: (MOVOstore [off1] {sym1} (LEAQ [off2] {sym2} base) val mem) 18776 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 18777 // result: (MOVOstore [off1+off2] {mergeSym(sym1,sym2)} base val mem) 18778 for { 18779 off1 := v.AuxInt 18780 sym1 := v.Aux 18781 _ = v.Args[2] 18782 v_0 := v.Args[0] 18783 if v_0.Op != OpAMD64LEAQ { 18784 break 18785 } 18786 off2 := v_0.AuxInt 18787 sym2 := v_0.Aux 18788 base := v_0.Args[0] 18789 val := v.Args[1] 18790 mem := v.Args[2] 18791 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 18792 break 18793 } 18794 v.reset(OpAMD64MOVOstore) 18795 v.AuxInt = off1 + off2 18796 v.Aux = mergeSym(sym1, sym2) 18797 v.AddArg(base) 18798 v.AddArg(val) 18799 v.AddArg(mem) 18800 return true 18801 } 18802 return false 18803 } 18804 func rewriteValueAMD64_OpAMD64MOVQatomicload_0(v *Value) bool { 18805 // match: (MOVQatomicload [off1] {sym} (ADDQconst [off2] ptr) mem) 18806 // cond: is32Bit(off1+off2) 18807 // result: (MOVQatomicload [off1+off2] {sym} ptr mem) 18808 for { 18809 off1 := v.AuxInt 18810 sym := v.Aux 18811 _ = v.Args[1] 18812 v_0 := v.Args[0] 18813 if v_0.Op != OpAMD64ADDQconst { 18814 break 18815 } 18816 off2 := v_0.AuxInt 18817 ptr := v_0.Args[0] 18818 mem := v.Args[1] 18819 if !(is32Bit(off1 + off2)) { 18820 break 18821 } 18822 v.reset(OpAMD64MOVQatomicload) 18823 v.AuxInt = off1 + off2 18824 v.Aux = sym 18825 v.AddArg(ptr) 18826 v.AddArg(mem) 18827 return true 18828 } 18829 // match: (MOVQatomicload [off1] {sym1} (LEAQ [off2] {sym2} ptr) mem) 18830 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 18831 // result: (MOVQatomicload [off1+off2] {mergeSym(sym1,sym2)} ptr mem) 18832 for { 18833 off1 := v.AuxInt 18834 sym1 := v.Aux 18835 _ = v.Args[1] 18836 v_0 := v.Args[0] 18837 if v_0.Op != OpAMD64LEAQ { 18838 break 18839 } 18840 off2 := v_0.AuxInt 18841 sym2 := v_0.Aux 18842 ptr := v_0.Args[0] 18843 mem := v.Args[1] 18844 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 18845 break 18846 } 18847 v.reset(OpAMD64MOVQatomicload) 18848 v.AuxInt = off1 + off2 18849 v.Aux = mergeSym(sym1, sym2) 18850 v.AddArg(ptr) 18851 v.AddArg(mem) 18852 return true 18853 } 18854 return false 18855 } 18856 func rewriteValueAMD64_OpAMD64MOVQf2i_0(v *Value) bool { 18857 b := v.Block 18858 _ = b 18859 // match: (MOVQf2i <t> (Arg <u> [off] {sym})) 18860 // cond: t.Size() == u.Size() 18861 // result: @b.Func.Entry (Arg <t> [off] {sym}) 18862 for { 18863 t := v.Type 18864 v_0 := v.Args[0] 18865 if v_0.Op != OpArg { 18866 break 18867 } 18868 u := v_0.Type 18869 off := v_0.AuxInt 18870 sym := v_0.Aux 18871 if !(t.Size() == u.Size()) { 18872 break 18873 } 18874 b = b.Func.Entry 18875 v0 := b.NewValue0(v.Pos, OpArg, t) 18876 v.reset(OpCopy) 18877 v.AddArg(v0) 18878 v0.AuxInt = off 18879 v0.Aux = sym 18880 return true 18881 } 18882 return false 18883 } 18884 func rewriteValueAMD64_OpAMD64MOVQi2f_0(v *Value) bool { 18885 b := v.Block 18886 _ = b 18887 // match: (MOVQi2f <t> (Arg <u> [off] {sym})) 18888 // cond: t.Size() == u.Size() 18889 // result: @b.Func.Entry (Arg <t> [off] {sym}) 18890 for { 18891 t := v.Type 18892 v_0 := v.Args[0] 18893 if v_0.Op != OpArg { 18894 break 18895 } 18896 u := v_0.Type 18897 off := v_0.AuxInt 18898 sym := v_0.Aux 18899 if !(t.Size() == u.Size()) { 18900 break 18901 } 18902 b = b.Func.Entry 18903 v0 := b.NewValue0(v.Pos, OpArg, t) 18904 v.reset(OpCopy) 18905 v.AddArg(v0) 18906 v0.AuxInt = off 18907 v0.Aux = sym 18908 return true 18909 } 18910 return false 18911 } 18912 func rewriteValueAMD64_OpAMD64MOVQload_0(v *Value) bool { 18913 b := v.Block 18914 _ = b 18915 config := b.Func.Config 18916 _ = config 18917 // match: (MOVQload [off] {sym} ptr (MOVQstore [off2] {sym2} ptr2 x _)) 18918 // cond: sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) 18919 // result: x 18920 for { 18921 off := v.AuxInt 18922 sym := v.Aux 18923 _ = v.Args[1] 18924 ptr := v.Args[0] 18925 v_1 := v.Args[1] 18926 if v_1.Op != OpAMD64MOVQstore { 18927 break 18928 } 18929 off2 := v_1.AuxInt 18930 sym2 := v_1.Aux 18931 _ = v_1.Args[2] 18932 ptr2 := v_1.Args[0] 18933 x := v_1.Args[1] 18934 if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) { 18935 break 18936 } 18937 v.reset(OpCopy) 18938 v.Type = x.Type 18939 v.AddArg(x) 18940 return true 18941 } 18942 // match: (MOVQload [off1] {sym} (ADDQconst [off2] ptr) mem) 18943 // cond: is32Bit(off1+off2) 18944 // result: (MOVQload [off1+off2] {sym} ptr mem) 18945 for { 18946 off1 := v.AuxInt 18947 sym := v.Aux 18948 _ = v.Args[1] 18949 v_0 := v.Args[0] 18950 if v_0.Op != OpAMD64ADDQconst { 18951 break 18952 } 18953 off2 := v_0.AuxInt 18954 ptr := v_0.Args[0] 18955 mem := v.Args[1] 18956 if !(is32Bit(off1 + off2)) { 18957 break 18958 } 18959 v.reset(OpAMD64MOVQload) 18960 v.AuxInt = off1 + off2 18961 v.Aux = sym 18962 v.AddArg(ptr) 18963 v.AddArg(mem) 18964 return true 18965 } 18966 // match: (MOVQload [off1] {sym1} (LEAQ [off2] {sym2} base) mem) 18967 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 18968 // result: (MOVQload [off1+off2] {mergeSym(sym1,sym2)} base mem) 18969 for { 18970 off1 := v.AuxInt 18971 sym1 := v.Aux 18972 _ = v.Args[1] 18973 v_0 := v.Args[0] 18974 if v_0.Op != OpAMD64LEAQ { 18975 break 18976 } 18977 off2 := v_0.AuxInt 18978 sym2 := v_0.Aux 18979 base := v_0.Args[0] 18980 mem := v.Args[1] 18981 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 18982 break 18983 } 18984 v.reset(OpAMD64MOVQload) 18985 v.AuxInt = off1 + off2 18986 v.Aux = mergeSym(sym1, sym2) 18987 v.AddArg(base) 18988 v.AddArg(mem) 18989 return true 18990 } 18991 // match: (MOVQload [off1] {sym1} (LEAQ1 [off2] {sym2} ptr idx) mem) 18992 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 18993 // result: (MOVQloadidx1 [off1+off2] {mergeSym(sym1,sym2)} ptr idx mem) 18994 for { 18995 off1 := v.AuxInt 18996 sym1 := v.Aux 18997 _ = v.Args[1] 18998 v_0 := v.Args[0] 18999 if v_0.Op != OpAMD64LEAQ1 { 19000 break 19001 } 19002 off2 := v_0.AuxInt 19003 sym2 := v_0.Aux 19004 _ = v_0.Args[1] 19005 ptr := v_0.Args[0] 19006 idx := v_0.Args[1] 19007 mem := v.Args[1] 19008 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 19009 break 19010 } 19011 v.reset(OpAMD64MOVQloadidx1) 19012 v.AuxInt = off1 + off2 19013 v.Aux = mergeSym(sym1, sym2) 19014 v.AddArg(ptr) 19015 v.AddArg(idx) 19016 v.AddArg(mem) 19017 return true 19018 } 19019 // match: (MOVQload [off1] {sym1} (LEAQ8 [off2] {sym2} ptr idx) mem) 19020 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 19021 // result: (MOVQloadidx8 [off1+off2] {mergeSym(sym1,sym2)} ptr idx mem) 19022 for { 19023 off1 := v.AuxInt 19024 sym1 := v.Aux 19025 _ = v.Args[1] 19026 v_0 := v.Args[0] 19027 if v_0.Op != OpAMD64LEAQ8 { 19028 break 19029 } 19030 off2 := v_0.AuxInt 19031 sym2 := v_0.Aux 19032 _ = v_0.Args[1] 19033 ptr := v_0.Args[0] 19034 idx := v_0.Args[1] 19035 mem := v.Args[1] 19036 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 19037 break 19038 } 19039 v.reset(OpAMD64MOVQloadidx8) 19040 v.AuxInt = off1 + off2 19041 v.Aux = mergeSym(sym1, sym2) 19042 v.AddArg(ptr) 19043 v.AddArg(idx) 19044 v.AddArg(mem) 19045 return true 19046 } 19047 // match: (MOVQload [off] {sym} (ADDQ ptr idx) mem) 19048 // cond: ptr.Op != OpSB 19049 // result: (MOVQloadidx1 [off] {sym} ptr idx mem) 19050 for { 19051 off := v.AuxInt 19052 sym := v.Aux 19053 _ = v.Args[1] 19054 v_0 := v.Args[0] 19055 if v_0.Op != OpAMD64ADDQ { 19056 break 19057 } 19058 _ = v_0.Args[1] 19059 ptr := v_0.Args[0] 19060 idx := v_0.Args[1] 19061 mem := v.Args[1] 19062 if !(ptr.Op != OpSB) { 19063 break 19064 } 19065 v.reset(OpAMD64MOVQloadidx1) 19066 v.AuxInt = off 19067 v.Aux = sym 19068 v.AddArg(ptr) 19069 v.AddArg(idx) 19070 v.AddArg(mem) 19071 return true 19072 } 19073 // match: (MOVQload [off1] {sym1} (LEAL [off2] {sym2} base) mem) 19074 // cond: canMergeSym(sym1, sym2) && is32Bit(off1+off2) 19075 // result: (MOVQload [off1+off2] {mergeSym(sym1,sym2)} base mem) 19076 for { 19077 off1 := v.AuxInt 19078 sym1 := v.Aux 19079 _ = v.Args[1] 19080 v_0 := v.Args[0] 19081 if v_0.Op != OpAMD64LEAL { 19082 break 19083 } 19084 off2 := v_0.AuxInt 19085 sym2 := v_0.Aux 19086 base := v_0.Args[0] 19087 mem := v.Args[1] 19088 if !(canMergeSym(sym1, sym2) && is32Bit(off1+off2)) { 19089 break 19090 } 19091 v.reset(OpAMD64MOVQload) 19092 v.AuxInt = off1 + off2 19093 v.Aux = mergeSym(sym1, sym2) 19094 v.AddArg(base) 19095 v.AddArg(mem) 19096 return true 19097 } 19098 // match: (MOVQload [off1] {sym} (ADDLconst [off2] ptr) mem) 19099 // cond: is32Bit(off1+off2) 19100 // result: (MOVQload [off1+off2] {sym} ptr mem) 19101 for { 19102 off1 := v.AuxInt 19103 sym := v.Aux 19104 _ = v.Args[1] 19105 v_0 := v.Args[0] 19106 if v_0.Op != OpAMD64ADDLconst { 19107 break 19108 } 19109 off2 := v_0.AuxInt 19110 ptr := v_0.Args[0] 19111 mem := v.Args[1] 19112 if !(is32Bit(off1 + off2)) { 19113 break 19114 } 19115 v.reset(OpAMD64MOVQload) 19116 v.AuxInt = off1 + off2 19117 v.Aux = sym 19118 v.AddArg(ptr) 19119 v.AddArg(mem) 19120 return true 19121 } 19122 // match: (MOVQload [off] {sym} ptr (MOVSDstore [off] {sym} ptr val _)) 19123 // cond: 19124 // result: (MOVQf2i val) 19125 for { 19126 off := v.AuxInt 19127 sym := v.Aux 19128 _ = v.Args[1] 19129 ptr := v.Args[0] 19130 v_1 := v.Args[1] 19131 if v_1.Op != OpAMD64MOVSDstore { 19132 break 19133 } 19134 if v_1.AuxInt != off { 19135 break 19136 } 19137 if v_1.Aux != sym { 19138 break 19139 } 19140 _ = v_1.Args[2] 19141 if ptr != v_1.Args[0] { 19142 break 19143 } 19144 val := v_1.Args[1] 19145 v.reset(OpAMD64MOVQf2i) 19146 v.AddArg(val) 19147 return true 19148 } 19149 // match: (MOVQload [off] {sym} (SB) _) 19150 // cond: symIsRO(sym) 19151 // result: (MOVQconst [int64(read64(sym, off, config.BigEndian))]) 19152 for { 19153 off := v.AuxInt 19154 sym := v.Aux 19155 _ = v.Args[1] 19156 v_0 := v.Args[0] 19157 if v_0.Op != OpSB { 19158 break 19159 } 19160 if !(symIsRO(sym)) { 19161 break 19162 } 19163 v.reset(OpAMD64MOVQconst) 19164 v.AuxInt = int64(read64(sym, off, config.BigEndian)) 19165 return true 19166 } 19167 return false 19168 } 19169 func rewriteValueAMD64_OpAMD64MOVQloadidx1_0(v *Value) bool { 19170 // match: (MOVQloadidx1 [c] {sym} ptr (SHLQconst [3] idx) mem) 19171 // cond: 19172 // result: (MOVQloadidx8 [c] {sym} ptr idx mem) 19173 for { 19174 c := v.AuxInt 19175 sym := v.Aux 19176 _ = v.Args[2] 19177 ptr := v.Args[0] 19178 v_1 := v.Args[1] 19179 if v_1.Op != OpAMD64SHLQconst { 19180 break 19181 } 19182 if v_1.AuxInt != 3 { 19183 break 19184 } 19185 idx := v_1.Args[0] 19186 mem := v.Args[2] 19187 v.reset(OpAMD64MOVQloadidx8) 19188 v.AuxInt = c 19189 v.Aux = sym 19190 v.AddArg(ptr) 19191 v.AddArg(idx) 19192 v.AddArg(mem) 19193 return true 19194 } 19195 // match: (MOVQloadidx1 [c] {sym} (SHLQconst [3] idx) ptr mem) 19196 // cond: 19197 // result: (MOVQloadidx8 [c] {sym} ptr idx mem) 19198 for { 19199 c := v.AuxInt 19200 sym := v.Aux 19201 _ = v.Args[2] 19202 v_0 := v.Args[0] 19203 if v_0.Op != OpAMD64SHLQconst { 19204 break 19205 } 19206 if v_0.AuxInt != 3 { 19207 break 19208 } 19209 idx := v_0.Args[0] 19210 ptr := v.Args[1] 19211 mem := v.Args[2] 19212 v.reset(OpAMD64MOVQloadidx8) 19213 v.AuxInt = c 19214 v.Aux = sym 19215 v.AddArg(ptr) 19216 v.AddArg(idx) 19217 v.AddArg(mem) 19218 return true 19219 } 19220 // match: (MOVQloadidx1 [c] {sym} (ADDQconst [d] ptr) idx mem) 19221 // cond: is32Bit(c+d) 19222 // result: (MOVQloadidx1 [c+d] {sym} ptr idx mem) 19223 for { 19224 c := v.AuxInt 19225 sym := v.Aux 19226 _ = v.Args[2] 19227 v_0 := v.Args[0] 19228 if v_0.Op != OpAMD64ADDQconst { 19229 break 19230 } 19231 d := v_0.AuxInt 19232 ptr := v_0.Args[0] 19233 idx := v.Args[1] 19234 mem := v.Args[2] 19235 if !(is32Bit(c + d)) { 19236 break 19237 } 19238 v.reset(OpAMD64MOVQloadidx1) 19239 v.AuxInt = c + d 19240 v.Aux = sym 19241 v.AddArg(ptr) 19242 v.AddArg(idx) 19243 v.AddArg(mem) 19244 return true 19245 } 19246 // match: (MOVQloadidx1 [c] {sym} idx (ADDQconst [d] ptr) mem) 19247 // cond: is32Bit(c+d) 19248 // result: (MOVQloadidx1 [c+d] {sym} ptr idx mem) 19249 for { 19250 c := v.AuxInt 19251 sym := v.Aux 19252 _ = v.Args[2] 19253 idx := v.Args[0] 19254 v_1 := v.Args[1] 19255 if v_1.Op != OpAMD64ADDQconst { 19256 break 19257 } 19258 d := v_1.AuxInt 19259 ptr := v_1.Args[0] 19260 mem := v.Args[2] 19261 if !(is32Bit(c + d)) { 19262 break 19263 } 19264 v.reset(OpAMD64MOVQloadidx1) 19265 v.AuxInt = c + d 19266 v.Aux = sym 19267 v.AddArg(ptr) 19268 v.AddArg(idx) 19269 v.AddArg(mem) 19270 return true 19271 } 19272 // match: (MOVQloadidx1 [c] {sym} ptr (ADDQconst [d] idx) mem) 19273 // cond: is32Bit(c+d) 19274 // result: (MOVQloadidx1 [c+d] {sym} ptr idx mem) 19275 for { 19276 c := v.AuxInt 19277 sym := v.Aux 19278 _ = v.Args[2] 19279 ptr := v.Args[0] 19280 v_1 := v.Args[1] 19281 if v_1.Op != OpAMD64ADDQconst { 19282 break 19283 } 19284 d := v_1.AuxInt 19285 idx := v_1.Args[0] 19286 mem := v.Args[2] 19287 if !(is32Bit(c + d)) { 19288 break 19289 } 19290 v.reset(OpAMD64MOVQloadidx1) 19291 v.AuxInt = c + d 19292 v.Aux = sym 19293 v.AddArg(ptr) 19294 v.AddArg(idx) 19295 v.AddArg(mem) 19296 return true 19297 } 19298 // match: (MOVQloadidx1 [c] {sym} (ADDQconst [d] idx) ptr mem) 19299 // cond: is32Bit(c+d) 19300 // result: (MOVQloadidx1 [c+d] {sym} ptr idx mem) 19301 for { 19302 c := v.AuxInt 19303 sym := v.Aux 19304 _ = v.Args[2] 19305 v_0 := v.Args[0] 19306 if v_0.Op != OpAMD64ADDQconst { 19307 break 19308 } 19309 d := v_0.AuxInt 19310 idx := v_0.Args[0] 19311 ptr := v.Args[1] 19312 mem := v.Args[2] 19313 if !(is32Bit(c + d)) { 19314 break 19315 } 19316 v.reset(OpAMD64MOVQloadidx1) 19317 v.AuxInt = c + d 19318 v.Aux = sym 19319 v.AddArg(ptr) 19320 v.AddArg(idx) 19321 v.AddArg(mem) 19322 return true 19323 } 19324 // match: (MOVQloadidx1 [i] {s} p (MOVQconst [c]) mem) 19325 // cond: is32Bit(i+c) 19326 // result: (MOVQload [i+c] {s} p mem) 19327 for { 19328 i := v.AuxInt 19329 s := v.Aux 19330 _ = v.Args[2] 19331 p := v.Args[0] 19332 v_1 := v.Args[1] 19333 if v_1.Op != OpAMD64MOVQconst { 19334 break 19335 } 19336 c := v_1.AuxInt 19337 mem := v.Args[2] 19338 if !(is32Bit(i + c)) { 19339 break 19340 } 19341 v.reset(OpAMD64MOVQload) 19342 v.AuxInt = i + c 19343 v.Aux = s 19344 v.AddArg(p) 19345 v.AddArg(mem) 19346 return true 19347 } 19348 // match: (MOVQloadidx1 [i] {s} (MOVQconst [c]) p mem) 19349 // cond: is32Bit(i+c) 19350 // result: (MOVQload [i+c] {s} p mem) 19351 for { 19352 i := v.AuxInt 19353 s := v.Aux 19354 _ = v.Args[2] 19355 v_0 := v.Args[0] 19356 if v_0.Op != OpAMD64MOVQconst { 19357 break 19358 } 19359 c := v_0.AuxInt 19360 p := v.Args[1] 19361 mem := v.Args[2] 19362 if !(is32Bit(i + c)) { 19363 break 19364 } 19365 v.reset(OpAMD64MOVQload) 19366 v.AuxInt = i + c 19367 v.Aux = s 19368 v.AddArg(p) 19369 v.AddArg(mem) 19370 return true 19371 } 19372 return false 19373 } 19374 func rewriteValueAMD64_OpAMD64MOVQloadidx8_0(v *Value) bool { 19375 // match: (MOVQloadidx8 [c] {sym} (ADDQconst [d] ptr) idx mem) 19376 // cond: is32Bit(c+d) 19377 // result: (MOVQloadidx8 [c+d] {sym} ptr idx mem) 19378 for { 19379 c := v.AuxInt 19380 sym := v.Aux 19381 _ = v.Args[2] 19382 v_0 := v.Args[0] 19383 if v_0.Op != OpAMD64ADDQconst { 19384 break 19385 } 19386 d := v_0.AuxInt 19387 ptr := v_0.Args[0] 19388 idx := v.Args[1] 19389 mem := v.Args[2] 19390 if !(is32Bit(c + d)) { 19391 break 19392 } 19393 v.reset(OpAMD64MOVQloadidx8) 19394 v.AuxInt = c + d 19395 v.Aux = sym 19396 v.AddArg(ptr) 19397 v.AddArg(idx) 19398 v.AddArg(mem) 19399 return true 19400 } 19401 // match: (MOVQloadidx8 [c] {sym} ptr (ADDQconst [d] idx) mem) 19402 // cond: is32Bit(c+8*d) 19403 // result: (MOVQloadidx8 [c+8*d] {sym} ptr idx mem) 19404 for { 19405 c := v.AuxInt 19406 sym := v.Aux 19407 _ = v.Args[2] 19408 ptr := v.Args[0] 19409 v_1 := v.Args[1] 19410 if v_1.Op != OpAMD64ADDQconst { 19411 break 19412 } 19413 d := v_1.AuxInt 19414 idx := v_1.Args[0] 19415 mem := v.Args[2] 19416 if !(is32Bit(c + 8*d)) { 19417 break 19418 } 19419 v.reset(OpAMD64MOVQloadidx8) 19420 v.AuxInt = c + 8*d 19421 v.Aux = sym 19422 v.AddArg(ptr) 19423 v.AddArg(idx) 19424 v.AddArg(mem) 19425 return true 19426 } 19427 // match: (MOVQloadidx8 [i] {s} p (MOVQconst [c]) mem) 19428 // cond: is32Bit(i+8*c) 19429 // result: (MOVQload [i+8*c] {s} p mem) 19430 for { 19431 i := v.AuxInt 19432 s := v.Aux 19433 _ = v.Args[2] 19434 p := v.Args[0] 19435 v_1 := v.Args[1] 19436 if v_1.Op != OpAMD64MOVQconst { 19437 break 19438 } 19439 c := v_1.AuxInt 19440 mem := v.Args[2] 19441 if !(is32Bit(i + 8*c)) { 19442 break 19443 } 19444 v.reset(OpAMD64MOVQload) 19445 v.AuxInt = i + 8*c 19446 v.Aux = s 19447 v.AddArg(p) 19448 v.AddArg(mem) 19449 return true 19450 } 19451 return false 19452 } 19453 func rewriteValueAMD64_OpAMD64MOVQstore_0(v *Value) bool { 19454 // match: (MOVQstore [off1] {sym} (ADDQconst [off2] ptr) val mem) 19455 // cond: is32Bit(off1+off2) 19456 // result: (MOVQstore [off1+off2] {sym} ptr val mem) 19457 for { 19458 off1 := v.AuxInt 19459 sym := v.Aux 19460 _ = v.Args[2] 19461 v_0 := v.Args[0] 19462 if v_0.Op != OpAMD64ADDQconst { 19463 break 19464 } 19465 off2 := v_0.AuxInt 19466 ptr := v_0.Args[0] 19467 val := v.Args[1] 19468 mem := v.Args[2] 19469 if !(is32Bit(off1 + off2)) { 19470 break 19471 } 19472 v.reset(OpAMD64MOVQstore) 19473 v.AuxInt = off1 + off2 19474 v.Aux = sym 19475 v.AddArg(ptr) 19476 v.AddArg(val) 19477 v.AddArg(mem) 19478 return true 19479 } 19480 // match: (MOVQstore [off] {sym} ptr (MOVQconst [c]) mem) 19481 // cond: validValAndOff(c,off) 19482 // result: (MOVQstoreconst [makeValAndOff(c,off)] {sym} ptr mem) 19483 for { 19484 off := v.AuxInt 19485 sym := v.Aux 19486 _ = v.Args[2] 19487 ptr := v.Args[0] 19488 v_1 := v.Args[1] 19489 if v_1.Op != OpAMD64MOVQconst { 19490 break 19491 } 19492 c := v_1.AuxInt 19493 mem := v.Args[2] 19494 if !(validValAndOff(c, off)) { 19495 break 19496 } 19497 v.reset(OpAMD64MOVQstoreconst) 19498 v.AuxInt = makeValAndOff(c, off) 19499 v.Aux = sym 19500 v.AddArg(ptr) 19501 v.AddArg(mem) 19502 return true 19503 } 19504 // match: (MOVQstore [off1] {sym1} (LEAQ [off2] {sym2} base) val mem) 19505 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 19506 // result: (MOVQstore [off1+off2] {mergeSym(sym1,sym2)} base val mem) 19507 for { 19508 off1 := v.AuxInt 19509 sym1 := v.Aux 19510 _ = v.Args[2] 19511 v_0 := v.Args[0] 19512 if v_0.Op != OpAMD64LEAQ { 19513 break 19514 } 19515 off2 := v_0.AuxInt 19516 sym2 := v_0.Aux 19517 base := v_0.Args[0] 19518 val := v.Args[1] 19519 mem := v.Args[2] 19520 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 19521 break 19522 } 19523 v.reset(OpAMD64MOVQstore) 19524 v.AuxInt = off1 + off2 19525 v.Aux = mergeSym(sym1, sym2) 19526 v.AddArg(base) 19527 v.AddArg(val) 19528 v.AddArg(mem) 19529 return true 19530 } 19531 // match: (MOVQstore [off1] {sym1} (LEAQ1 [off2] {sym2} ptr idx) val mem) 19532 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 19533 // result: (MOVQstoreidx1 [off1+off2] {mergeSym(sym1,sym2)} ptr idx val mem) 19534 for { 19535 off1 := v.AuxInt 19536 sym1 := v.Aux 19537 _ = v.Args[2] 19538 v_0 := v.Args[0] 19539 if v_0.Op != OpAMD64LEAQ1 { 19540 break 19541 } 19542 off2 := v_0.AuxInt 19543 sym2 := v_0.Aux 19544 _ = v_0.Args[1] 19545 ptr := v_0.Args[0] 19546 idx := v_0.Args[1] 19547 val := v.Args[1] 19548 mem := v.Args[2] 19549 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 19550 break 19551 } 19552 v.reset(OpAMD64MOVQstoreidx1) 19553 v.AuxInt = off1 + off2 19554 v.Aux = mergeSym(sym1, sym2) 19555 v.AddArg(ptr) 19556 v.AddArg(idx) 19557 v.AddArg(val) 19558 v.AddArg(mem) 19559 return true 19560 } 19561 // match: (MOVQstore [off1] {sym1} (LEAQ8 [off2] {sym2} ptr idx) val mem) 19562 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 19563 // result: (MOVQstoreidx8 [off1+off2] {mergeSym(sym1,sym2)} ptr idx val mem) 19564 for { 19565 off1 := v.AuxInt 19566 sym1 := v.Aux 19567 _ = v.Args[2] 19568 v_0 := v.Args[0] 19569 if v_0.Op != OpAMD64LEAQ8 { 19570 break 19571 } 19572 off2 := v_0.AuxInt 19573 sym2 := v_0.Aux 19574 _ = v_0.Args[1] 19575 ptr := v_0.Args[0] 19576 idx := v_0.Args[1] 19577 val := v.Args[1] 19578 mem := v.Args[2] 19579 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 19580 break 19581 } 19582 v.reset(OpAMD64MOVQstoreidx8) 19583 v.AuxInt = off1 + off2 19584 v.Aux = mergeSym(sym1, sym2) 19585 v.AddArg(ptr) 19586 v.AddArg(idx) 19587 v.AddArg(val) 19588 v.AddArg(mem) 19589 return true 19590 } 19591 // match: (MOVQstore [off] {sym} (ADDQ ptr idx) val mem) 19592 // cond: ptr.Op != OpSB 19593 // result: (MOVQstoreidx1 [off] {sym} ptr idx val mem) 19594 for { 19595 off := v.AuxInt 19596 sym := v.Aux 19597 _ = v.Args[2] 19598 v_0 := v.Args[0] 19599 if v_0.Op != OpAMD64ADDQ { 19600 break 19601 } 19602 _ = v_0.Args[1] 19603 ptr := v_0.Args[0] 19604 idx := v_0.Args[1] 19605 val := v.Args[1] 19606 mem := v.Args[2] 19607 if !(ptr.Op != OpSB) { 19608 break 19609 } 19610 v.reset(OpAMD64MOVQstoreidx1) 19611 v.AuxInt = off 19612 v.Aux = sym 19613 v.AddArg(ptr) 19614 v.AddArg(idx) 19615 v.AddArg(val) 19616 v.AddArg(mem) 19617 return true 19618 } 19619 // match: (MOVQstore [off1] {sym1} (LEAL [off2] {sym2} base) val mem) 19620 // cond: canMergeSym(sym1, sym2) && is32Bit(off1+off2) 19621 // result: (MOVQstore [off1+off2] {mergeSym(sym1,sym2)} base val mem) 19622 for { 19623 off1 := v.AuxInt 19624 sym1 := v.Aux 19625 _ = v.Args[2] 19626 v_0 := v.Args[0] 19627 if v_0.Op != OpAMD64LEAL { 19628 break 19629 } 19630 off2 := v_0.AuxInt 19631 sym2 := v_0.Aux 19632 base := v_0.Args[0] 19633 val := v.Args[1] 19634 mem := v.Args[2] 19635 if !(canMergeSym(sym1, sym2) && is32Bit(off1+off2)) { 19636 break 19637 } 19638 v.reset(OpAMD64MOVQstore) 19639 v.AuxInt = off1 + off2 19640 v.Aux = mergeSym(sym1, sym2) 19641 v.AddArg(base) 19642 v.AddArg(val) 19643 v.AddArg(mem) 19644 return true 19645 } 19646 // match: (MOVQstore [off1] {sym} (ADDLconst [off2] ptr) val mem) 19647 // cond: is32Bit(off1+off2) 19648 // result: (MOVQstore [off1+off2] {sym} ptr val mem) 19649 for { 19650 off1 := v.AuxInt 19651 sym := v.Aux 19652 _ = v.Args[2] 19653 v_0 := v.Args[0] 19654 if v_0.Op != OpAMD64ADDLconst { 19655 break 19656 } 19657 off2 := v_0.AuxInt 19658 ptr := v_0.Args[0] 19659 val := v.Args[1] 19660 mem := v.Args[2] 19661 if !(is32Bit(off1 + off2)) { 19662 break 19663 } 19664 v.reset(OpAMD64MOVQstore) 19665 v.AuxInt = off1 + off2 19666 v.Aux = sym 19667 v.AddArg(ptr) 19668 v.AddArg(val) 19669 v.AddArg(mem) 19670 return true 19671 } 19672 // match: (MOVQstore {sym} [off] ptr y:(ADDQload x [off] {sym} ptr mem) mem) 19673 // cond: y.Uses==1 && clobber(y) 19674 // result: (ADDQmodify [off] {sym} ptr x mem) 19675 for { 19676 off := v.AuxInt 19677 sym := v.Aux 19678 _ = v.Args[2] 19679 ptr := v.Args[0] 19680 y := v.Args[1] 19681 if y.Op != OpAMD64ADDQload { 19682 break 19683 } 19684 if y.AuxInt != off { 19685 break 19686 } 19687 if y.Aux != sym { 19688 break 19689 } 19690 _ = y.Args[2] 19691 x := y.Args[0] 19692 if ptr != y.Args[1] { 19693 break 19694 } 19695 mem := y.Args[2] 19696 if mem != v.Args[2] { 19697 break 19698 } 19699 if !(y.Uses == 1 && clobber(y)) { 19700 break 19701 } 19702 v.reset(OpAMD64ADDQmodify) 19703 v.AuxInt = off 19704 v.Aux = sym 19705 v.AddArg(ptr) 19706 v.AddArg(x) 19707 v.AddArg(mem) 19708 return true 19709 } 19710 // match: (MOVQstore {sym} [off] ptr y:(ANDQload x [off] {sym} ptr mem) mem) 19711 // cond: y.Uses==1 && clobber(y) 19712 // result: (ANDQmodify [off] {sym} ptr x mem) 19713 for { 19714 off := v.AuxInt 19715 sym := v.Aux 19716 _ = v.Args[2] 19717 ptr := v.Args[0] 19718 y := v.Args[1] 19719 if y.Op != OpAMD64ANDQload { 19720 break 19721 } 19722 if y.AuxInt != off { 19723 break 19724 } 19725 if y.Aux != sym { 19726 break 19727 } 19728 _ = y.Args[2] 19729 x := y.Args[0] 19730 if ptr != y.Args[1] { 19731 break 19732 } 19733 mem := y.Args[2] 19734 if mem != v.Args[2] { 19735 break 19736 } 19737 if !(y.Uses == 1 && clobber(y)) { 19738 break 19739 } 19740 v.reset(OpAMD64ANDQmodify) 19741 v.AuxInt = off 19742 v.Aux = sym 19743 v.AddArg(ptr) 19744 v.AddArg(x) 19745 v.AddArg(mem) 19746 return true 19747 } 19748 return false 19749 } 19750 func rewriteValueAMD64_OpAMD64MOVQstore_10(v *Value) bool { 19751 // match: (MOVQstore {sym} [off] ptr y:(ORQload x [off] {sym} ptr mem) mem) 19752 // cond: y.Uses==1 && clobber(y) 19753 // result: (ORQmodify [off] {sym} ptr x mem) 19754 for { 19755 off := v.AuxInt 19756 sym := v.Aux 19757 _ = v.Args[2] 19758 ptr := v.Args[0] 19759 y := v.Args[1] 19760 if y.Op != OpAMD64ORQload { 19761 break 19762 } 19763 if y.AuxInt != off { 19764 break 19765 } 19766 if y.Aux != sym { 19767 break 19768 } 19769 _ = y.Args[2] 19770 x := y.Args[0] 19771 if ptr != y.Args[1] { 19772 break 19773 } 19774 mem := y.Args[2] 19775 if mem != v.Args[2] { 19776 break 19777 } 19778 if !(y.Uses == 1 && clobber(y)) { 19779 break 19780 } 19781 v.reset(OpAMD64ORQmodify) 19782 v.AuxInt = off 19783 v.Aux = sym 19784 v.AddArg(ptr) 19785 v.AddArg(x) 19786 v.AddArg(mem) 19787 return true 19788 } 19789 // match: (MOVQstore {sym} [off] ptr y:(XORQload x [off] {sym} ptr mem) mem) 19790 // cond: y.Uses==1 && clobber(y) 19791 // result: (XORQmodify [off] {sym} ptr x mem) 19792 for { 19793 off := v.AuxInt 19794 sym := v.Aux 19795 _ = v.Args[2] 19796 ptr := v.Args[0] 19797 y := v.Args[1] 19798 if y.Op != OpAMD64XORQload { 19799 break 19800 } 19801 if y.AuxInt != off { 19802 break 19803 } 19804 if y.Aux != sym { 19805 break 19806 } 19807 _ = y.Args[2] 19808 x := y.Args[0] 19809 if ptr != y.Args[1] { 19810 break 19811 } 19812 mem := y.Args[2] 19813 if mem != v.Args[2] { 19814 break 19815 } 19816 if !(y.Uses == 1 && clobber(y)) { 19817 break 19818 } 19819 v.reset(OpAMD64XORQmodify) 19820 v.AuxInt = off 19821 v.Aux = sym 19822 v.AddArg(ptr) 19823 v.AddArg(x) 19824 v.AddArg(mem) 19825 return true 19826 } 19827 // match: (MOVQstore {sym} [off] ptr y:(ADDQ l:(MOVQload [off] {sym} ptr mem) x) mem) 19828 // cond: y.Uses==1 && l.Uses==1 && clobber(y) && clobber(l) 19829 // result: (ADDQmodify [off] {sym} ptr x mem) 19830 for { 19831 off := v.AuxInt 19832 sym := v.Aux 19833 _ = v.Args[2] 19834 ptr := v.Args[0] 19835 y := v.Args[1] 19836 if y.Op != OpAMD64ADDQ { 19837 break 19838 } 19839 _ = y.Args[1] 19840 l := y.Args[0] 19841 if l.Op != OpAMD64MOVQload { 19842 break 19843 } 19844 if l.AuxInt != off { 19845 break 19846 } 19847 if l.Aux != sym { 19848 break 19849 } 19850 _ = l.Args[1] 19851 if ptr != l.Args[0] { 19852 break 19853 } 19854 mem := l.Args[1] 19855 x := y.Args[1] 19856 if mem != v.Args[2] { 19857 break 19858 } 19859 if !(y.Uses == 1 && l.Uses == 1 && clobber(y) && clobber(l)) { 19860 break 19861 } 19862 v.reset(OpAMD64ADDQmodify) 19863 v.AuxInt = off 19864 v.Aux = sym 19865 v.AddArg(ptr) 19866 v.AddArg(x) 19867 v.AddArg(mem) 19868 return true 19869 } 19870 // match: (MOVQstore {sym} [off] ptr y:(ADDQ x l:(MOVQload [off] {sym} ptr mem)) mem) 19871 // cond: y.Uses==1 && l.Uses==1 && clobber(y) && clobber(l) 19872 // result: (ADDQmodify [off] {sym} ptr x mem) 19873 for { 19874 off := v.AuxInt 19875 sym := v.Aux 19876 _ = v.Args[2] 19877 ptr := v.Args[0] 19878 y := v.Args[1] 19879 if y.Op != OpAMD64ADDQ { 19880 break 19881 } 19882 _ = y.Args[1] 19883 x := y.Args[0] 19884 l := y.Args[1] 19885 if l.Op != OpAMD64MOVQload { 19886 break 19887 } 19888 if l.AuxInt != off { 19889 break 19890 } 19891 if l.Aux != sym { 19892 break 19893 } 19894 _ = l.Args[1] 19895 if ptr != l.Args[0] { 19896 break 19897 } 19898 mem := l.Args[1] 19899 if mem != v.Args[2] { 19900 break 19901 } 19902 if !(y.Uses == 1 && l.Uses == 1 && clobber(y) && clobber(l)) { 19903 break 19904 } 19905 v.reset(OpAMD64ADDQmodify) 19906 v.AuxInt = off 19907 v.Aux = sym 19908 v.AddArg(ptr) 19909 v.AddArg(x) 19910 v.AddArg(mem) 19911 return true 19912 } 19913 // match: (MOVQstore {sym} [off] ptr y:(SUBQ l:(MOVQload [off] {sym} ptr mem) x) mem) 19914 // cond: y.Uses==1 && l.Uses==1 && clobber(y) && clobber(l) 19915 // result: (SUBQmodify [off] {sym} ptr x mem) 19916 for { 19917 off := v.AuxInt 19918 sym := v.Aux 19919 _ = v.Args[2] 19920 ptr := v.Args[0] 19921 y := v.Args[1] 19922 if y.Op != OpAMD64SUBQ { 19923 break 19924 } 19925 _ = y.Args[1] 19926 l := y.Args[0] 19927 if l.Op != OpAMD64MOVQload { 19928 break 19929 } 19930 if l.AuxInt != off { 19931 break 19932 } 19933 if l.Aux != sym { 19934 break 19935 } 19936 _ = l.Args[1] 19937 if ptr != l.Args[0] { 19938 break 19939 } 19940 mem := l.Args[1] 19941 x := y.Args[1] 19942 if mem != v.Args[2] { 19943 break 19944 } 19945 if !(y.Uses == 1 && l.Uses == 1 && clobber(y) && clobber(l)) { 19946 break 19947 } 19948 v.reset(OpAMD64SUBQmodify) 19949 v.AuxInt = off 19950 v.Aux = sym 19951 v.AddArg(ptr) 19952 v.AddArg(x) 19953 v.AddArg(mem) 19954 return true 19955 } 19956 // match: (MOVQstore {sym} [off] ptr y:(ANDQ l:(MOVQload [off] {sym} ptr mem) x) mem) 19957 // cond: y.Uses==1 && l.Uses==1 && clobber(y) && clobber(l) 19958 // result: (ANDQmodify [off] {sym} ptr x mem) 19959 for { 19960 off := v.AuxInt 19961 sym := v.Aux 19962 _ = v.Args[2] 19963 ptr := v.Args[0] 19964 y := v.Args[1] 19965 if y.Op != OpAMD64ANDQ { 19966 break 19967 } 19968 _ = y.Args[1] 19969 l := y.Args[0] 19970 if l.Op != OpAMD64MOVQload { 19971 break 19972 } 19973 if l.AuxInt != off { 19974 break 19975 } 19976 if l.Aux != sym { 19977 break 19978 } 19979 _ = l.Args[1] 19980 if ptr != l.Args[0] { 19981 break 19982 } 19983 mem := l.Args[1] 19984 x := y.Args[1] 19985 if mem != v.Args[2] { 19986 break 19987 } 19988 if !(y.Uses == 1 && l.Uses == 1 && clobber(y) && clobber(l)) { 19989 break 19990 } 19991 v.reset(OpAMD64ANDQmodify) 19992 v.AuxInt = off 19993 v.Aux = sym 19994 v.AddArg(ptr) 19995 v.AddArg(x) 19996 v.AddArg(mem) 19997 return true 19998 } 19999 // match: (MOVQstore {sym} [off] ptr y:(ANDQ x l:(MOVQload [off] {sym} ptr mem)) mem) 20000 // cond: y.Uses==1 && l.Uses==1 && clobber(y) && clobber(l) 20001 // result: (ANDQmodify [off] {sym} ptr x mem) 20002 for { 20003 off := v.AuxInt 20004 sym := v.Aux 20005 _ = v.Args[2] 20006 ptr := v.Args[0] 20007 y := v.Args[1] 20008 if y.Op != OpAMD64ANDQ { 20009 break 20010 } 20011 _ = y.Args[1] 20012 x := y.Args[0] 20013 l := y.Args[1] 20014 if l.Op != OpAMD64MOVQload { 20015 break 20016 } 20017 if l.AuxInt != off { 20018 break 20019 } 20020 if l.Aux != sym { 20021 break 20022 } 20023 _ = l.Args[1] 20024 if ptr != l.Args[0] { 20025 break 20026 } 20027 mem := l.Args[1] 20028 if mem != v.Args[2] { 20029 break 20030 } 20031 if !(y.Uses == 1 && l.Uses == 1 && clobber(y) && clobber(l)) { 20032 break 20033 } 20034 v.reset(OpAMD64ANDQmodify) 20035 v.AuxInt = off 20036 v.Aux = sym 20037 v.AddArg(ptr) 20038 v.AddArg(x) 20039 v.AddArg(mem) 20040 return true 20041 } 20042 // match: (MOVQstore {sym} [off] ptr y:(ORQ l:(MOVQload [off] {sym} ptr mem) x) mem) 20043 // cond: y.Uses==1 && l.Uses==1 && clobber(y) && clobber(l) 20044 // result: (ORQmodify [off] {sym} ptr x mem) 20045 for { 20046 off := v.AuxInt 20047 sym := v.Aux 20048 _ = v.Args[2] 20049 ptr := v.Args[0] 20050 y := v.Args[1] 20051 if y.Op != OpAMD64ORQ { 20052 break 20053 } 20054 _ = y.Args[1] 20055 l := y.Args[0] 20056 if l.Op != OpAMD64MOVQload { 20057 break 20058 } 20059 if l.AuxInt != off { 20060 break 20061 } 20062 if l.Aux != sym { 20063 break 20064 } 20065 _ = l.Args[1] 20066 if ptr != l.Args[0] { 20067 break 20068 } 20069 mem := l.Args[1] 20070 x := y.Args[1] 20071 if mem != v.Args[2] { 20072 break 20073 } 20074 if !(y.Uses == 1 && l.Uses == 1 && clobber(y) && clobber(l)) { 20075 break 20076 } 20077 v.reset(OpAMD64ORQmodify) 20078 v.AuxInt = off 20079 v.Aux = sym 20080 v.AddArg(ptr) 20081 v.AddArg(x) 20082 v.AddArg(mem) 20083 return true 20084 } 20085 // match: (MOVQstore {sym} [off] ptr y:(ORQ x l:(MOVQload [off] {sym} ptr mem)) mem) 20086 // cond: y.Uses==1 && l.Uses==1 && clobber(y) && clobber(l) 20087 // result: (ORQmodify [off] {sym} ptr x mem) 20088 for { 20089 off := v.AuxInt 20090 sym := v.Aux 20091 _ = v.Args[2] 20092 ptr := v.Args[0] 20093 y := v.Args[1] 20094 if y.Op != OpAMD64ORQ { 20095 break 20096 } 20097 _ = y.Args[1] 20098 x := y.Args[0] 20099 l := y.Args[1] 20100 if l.Op != OpAMD64MOVQload { 20101 break 20102 } 20103 if l.AuxInt != off { 20104 break 20105 } 20106 if l.Aux != sym { 20107 break 20108 } 20109 _ = l.Args[1] 20110 if ptr != l.Args[0] { 20111 break 20112 } 20113 mem := l.Args[1] 20114 if mem != v.Args[2] { 20115 break 20116 } 20117 if !(y.Uses == 1 && l.Uses == 1 && clobber(y) && clobber(l)) { 20118 break 20119 } 20120 v.reset(OpAMD64ORQmodify) 20121 v.AuxInt = off 20122 v.Aux = sym 20123 v.AddArg(ptr) 20124 v.AddArg(x) 20125 v.AddArg(mem) 20126 return true 20127 } 20128 // match: (MOVQstore {sym} [off] ptr y:(XORQ l:(MOVQload [off] {sym} ptr mem) x) mem) 20129 // cond: y.Uses==1 && l.Uses==1 && clobber(y) && clobber(l) 20130 // result: (XORQmodify [off] {sym} ptr x mem) 20131 for { 20132 off := v.AuxInt 20133 sym := v.Aux 20134 _ = v.Args[2] 20135 ptr := v.Args[0] 20136 y := v.Args[1] 20137 if y.Op != OpAMD64XORQ { 20138 break 20139 } 20140 _ = y.Args[1] 20141 l := y.Args[0] 20142 if l.Op != OpAMD64MOVQload { 20143 break 20144 } 20145 if l.AuxInt != off { 20146 break 20147 } 20148 if l.Aux != sym { 20149 break 20150 } 20151 _ = l.Args[1] 20152 if ptr != l.Args[0] { 20153 break 20154 } 20155 mem := l.Args[1] 20156 x := y.Args[1] 20157 if mem != v.Args[2] { 20158 break 20159 } 20160 if !(y.Uses == 1 && l.Uses == 1 && clobber(y) && clobber(l)) { 20161 break 20162 } 20163 v.reset(OpAMD64XORQmodify) 20164 v.AuxInt = off 20165 v.Aux = sym 20166 v.AddArg(ptr) 20167 v.AddArg(x) 20168 v.AddArg(mem) 20169 return true 20170 } 20171 return false 20172 } 20173 func rewriteValueAMD64_OpAMD64MOVQstore_20(v *Value) bool { 20174 // match: (MOVQstore {sym} [off] ptr y:(XORQ x l:(MOVQload [off] {sym} ptr mem)) mem) 20175 // cond: y.Uses==1 && l.Uses==1 && clobber(y) && clobber(l) 20176 // result: (XORQmodify [off] {sym} ptr x mem) 20177 for { 20178 off := v.AuxInt 20179 sym := v.Aux 20180 _ = v.Args[2] 20181 ptr := v.Args[0] 20182 y := v.Args[1] 20183 if y.Op != OpAMD64XORQ { 20184 break 20185 } 20186 _ = y.Args[1] 20187 x := y.Args[0] 20188 l := y.Args[1] 20189 if l.Op != OpAMD64MOVQload { 20190 break 20191 } 20192 if l.AuxInt != off { 20193 break 20194 } 20195 if l.Aux != sym { 20196 break 20197 } 20198 _ = l.Args[1] 20199 if ptr != l.Args[0] { 20200 break 20201 } 20202 mem := l.Args[1] 20203 if mem != v.Args[2] { 20204 break 20205 } 20206 if !(y.Uses == 1 && l.Uses == 1 && clobber(y) && clobber(l)) { 20207 break 20208 } 20209 v.reset(OpAMD64XORQmodify) 20210 v.AuxInt = off 20211 v.Aux = sym 20212 v.AddArg(ptr) 20213 v.AddArg(x) 20214 v.AddArg(mem) 20215 return true 20216 } 20217 // match: (MOVQstore {sym} [off] ptr y:(BTCQ l:(MOVQload [off] {sym} ptr mem) x) mem) 20218 // cond: y.Uses==1 && l.Uses==1 && clobber(y) && clobber(l) 20219 // result: (BTCQmodify [off] {sym} ptr x mem) 20220 for { 20221 off := v.AuxInt 20222 sym := v.Aux 20223 _ = v.Args[2] 20224 ptr := v.Args[0] 20225 y := v.Args[1] 20226 if y.Op != OpAMD64BTCQ { 20227 break 20228 } 20229 _ = y.Args[1] 20230 l := y.Args[0] 20231 if l.Op != OpAMD64MOVQload { 20232 break 20233 } 20234 if l.AuxInt != off { 20235 break 20236 } 20237 if l.Aux != sym { 20238 break 20239 } 20240 _ = l.Args[1] 20241 if ptr != l.Args[0] { 20242 break 20243 } 20244 mem := l.Args[1] 20245 x := y.Args[1] 20246 if mem != v.Args[2] { 20247 break 20248 } 20249 if !(y.Uses == 1 && l.Uses == 1 && clobber(y) && clobber(l)) { 20250 break 20251 } 20252 v.reset(OpAMD64BTCQmodify) 20253 v.AuxInt = off 20254 v.Aux = sym 20255 v.AddArg(ptr) 20256 v.AddArg(x) 20257 v.AddArg(mem) 20258 return true 20259 } 20260 // match: (MOVQstore {sym} [off] ptr y:(BTRQ l:(MOVQload [off] {sym} ptr mem) x) mem) 20261 // cond: y.Uses==1 && l.Uses==1 && clobber(y) && clobber(l) 20262 // result: (BTRQmodify [off] {sym} ptr x mem) 20263 for { 20264 off := v.AuxInt 20265 sym := v.Aux 20266 _ = v.Args[2] 20267 ptr := v.Args[0] 20268 y := v.Args[1] 20269 if y.Op != OpAMD64BTRQ { 20270 break 20271 } 20272 _ = y.Args[1] 20273 l := y.Args[0] 20274 if l.Op != OpAMD64MOVQload { 20275 break 20276 } 20277 if l.AuxInt != off { 20278 break 20279 } 20280 if l.Aux != sym { 20281 break 20282 } 20283 _ = l.Args[1] 20284 if ptr != l.Args[0] { 20285 break 20286 } 20287 mem := l.Args[1] 20288 x := y.Args[1] 20289 if mem != v.Args[2] { 20290 break 20291 } 20292 if !(y.Uses == 1 && l.Uses == 1 && clobber(y) && clobber(l)) { 20293 break 20294 } 20295 v.reset(OpAMD64BTRQmodify) 20296 v.AuxInt = off 20297 v.Aux = sym 20298 v.AddArg(ptr) 20299 v.AddArg(x) 20300 v.AddArg(mem) 20301 return true 20302 } 20303 // match: (MOVQstore {sym} [off] ptr y:(BTSQ l:(MOVQload [off] {sym} ptr mem) x) mem) 20304 // cond: y.Uses==1 && l.Uses==1 && clobber(y) && clobber(l) 20305 // result: (BTSQmodify [off] {sym} ptr x mem) 20306 for { 20307 off := v.AuxInt 20308 sym := v.Aux 20309 _ = v.Args[2] 20310 ptr := v.Args[0] 20311 y := v.Args[1] 20312 if y.Op != OpAMD64BTSQ { 20313 break 20314 } 20315 _ = y.Args[1] 20316 l := y.Args[0] 20317 if l.Op != OpAMD64MOVQload { 20318 break 20319 } 20320 if l.AuxInt != off { 20321 break 20322 } 20323 if l.Aux != sym { 20324 break 20325 } 20326 _ = l.Args[1] 20327 if ptr != l.Args[0] { 20328 break 20329 } 20330 mem := l.Args[1] 20331 x := y.Args[1] 20332 if mem != v.Args[2] { 20333 break 20334 } 20335 if !(y.Uses == 1 && l.Uses == 1 && clobber(y) && clobber(l)) { 20336 break 20337 } 20338 v.reset(OpAMD64BTSQmodify) 20339 v.AuxInt = off 20340 v.Aux = sym 20341 v.AddArg(ptr) 20342 v.AddArg(x) 20343 v.AddArg(mem) 20344 return true 20345 } 20346 // match: (MOVQstore [off] {sym} ptr a:(ADDQconst [c] l:(MOVQload [off] {sym} ptr2 mem)) mem) 20347 // cond: isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && validValAndOff(c,off) && clobber(l) && clobber(a) 20348 // result: (ADDQconstmodify {sym} [makeValAndOff(c,off)] ptr mem) 20349 for { 20350 off := v.AuxInt 20351 sym := v.Aux 20352 _ = v.Args[2] 20353 ptr := v.Args[0] 20354 a := v.Args[1] 20355 if a.Op != OpAMD64ADDQconst { 20356 break 20357 } 20358 c := a.AuxInt 20359 l := a.Args[0] 20360 if l.Op != OpAMD64MOVQload { 20361 break 20362 } 20363 if l.AuxInt != off { 20364 break 20365 } 20366 if l.Aux != sym { 20367 break 20368 } 20369 _ = l.Args[1] 20370 ptr2 := l.Args[0] 20371 mem := l.Args[1] 20372 if mem != v.Args[2] { 20373 break 20374 } 20375 if !(isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && validValAndOff(c, off) && clobber(l) && clobber(a)) { 20376 break 20377 } 20378 v.reset(OpAMD64ADDQconstmodify) 20379 v.AuxInt = makeValAndOff(c, off) 20380 v.Aux = sym 20381 v.AddArg(ptr) 20382 v.AddArg(mem) 20383 return true 20384 } 20385 // match: (MOVQstore [off] {sym} ptr a:(ANDQconst [c] l:(MOVQload [off] {sym} ptr2 mem)) mem) 20386 // cond: isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && validValAndOff(c,off) && clobber(l) && clobber(a) 20387 // result: (ANDQconstmodify {sym} [makeValAndOff(c,off)] ptr mem) 20388 for { 20389 off := v.AuxInt 20390 sym := v.Aux 20391 _ = v.Args[2] 20392 ptr := v.Args[0] 20393 a := v.Args[1] 20394 if a.Op != OpAMD64ANDQconst { 20395 break 20396 } 20397 c := a.AuxInt 20398 l := a.Args[0] 20399 if l.Op != OpAMD64MOVQload { 20400 break 20401 } 20402 if l.AuxInt != off { 20403 break 20404 } 20405 if l.Aux != sym { 20406 break 20407 } 20408 _ = l.Args[1] 20409 ptr2 := l.Args[0] 20410 mem := l.Args[1] 20411 if mem != v.Args[2] { 20412 break 20413 } 20414 if !(isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && validValAndOff(c, off) && clobber(l) && clobber(a)) { 20415 break 20416 } 20417 v.reset(OpAMD64ANDQconstmodify) 20418 v.AuxInt = makeValAndOff(c, off) 20419 v.Aux = sym 20420 v.AddArg(ptr) 20421 v.AddArg(mem) 20422 return true 20423 } 20424 // match: (MOVQstore [off] {sym} ptr a:(ORQconst [c] l:(MOVQload [off] {sym} ptr2 mem)) mem) 20425 // cond: isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && validValAndOff(c,off) && clobber(l) && clobber(a) 20426 // result: (ORQconstmodify {sym} [makeValAndOff(c,off)] ptr mem) 20427 for { 20428 off := v.AuxInt 20429 sym := v.Aux 20430 _ = v.Args[2] 20431 ptr := v.Args[0] 20432 a := v.Args[1] 20433 if a.Op != OpAMD64ORQconst { 20434 break 20435 } 20436 c := a.AuxInt 20437 l := a.Args[0] 20438 if l.Op != OpAMD64MOVQload { 20439 break 20440 } 20441 if l.AuxInt != off { 20442 break 20443 } 20444 if l.Aux != sym { 20445 break 20446 } 20447 _ = l.Args[1] 20448 ptr2 := l.Args[0] 20449 mem := l.Args[1] 20450 if mem != v.Args[2] { 20451 break 20452 } 20453 if !(isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && validValAndOff(c, off) && clobber(l) && clobber(a)) { 20454 break 20455 } 20456 v.reset(OpAMD64ORQconstmodify) 20457 v.AuxInt = makeValAndOff(c, off) 20458 v.Aux = sym 20459 v.AddArg(ptr) 20460 v.AddArg(mem) 20461 return true 20462 } 20463 // match: (MOVQstore [off] {sym} ptr a:(XORQconst [c] l:(MOVQload [off] {sym} ptr2 mem)) mem) 20464 // cond: isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && validValAndOff(c,off) && clobber(l) && clobber(a) 20465 // result: (XORQconstmodify {sym} [makeValAndOff(c,off)] ptr mem) 20466 for { 20467 off := v.AuxInt 20468 sym := v.Aux 20469 _ = v.Args[2] 20470 ptr := v.Args[0] 20471 a := v.Args[1] 20472 if a.Op != OpAMD64XORQconst { 20473 break 20474 } 20475 c := a.AuxInt 20476 l := a.Args[0] 20477 if l.Op != OpAMD64MOVQload { 20478 break 20479 } 20480 if l.AuxInt != off { 20481 break 20482 } 20483 if l.Aux != sym { 20484 break 20485 } 20486 _ = l.Args[1] 20487 ptr2 := l.Args[0] 20488 mem := l.Args[1] 20489 if mem != v.Args[2] { 20490 break 20491 } 20492 if !(isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && validValAndOff(c, off) && clobber(l) && clobber(a)) { 20493 break 20494 } 20495 v.reset(OpAMD64XORQconstmodify) 20496 v.AuxInt = makeValAndOff(c, off) 20497 v.Aux = sym 20498 v.AddArg(ptr) 20499 v.AddArg(mem) 20500 return true 20501 } 20502 // match: (MOVQstore [off] {sym} ptr a:(BTCQconst [c] l:(MOVQload [off] {sym} ptr2 mem)) mem) 20503 // cond: isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && validValAndOff(c,off) && clobber(l) && clobber(a) 20504 // result: (BTCQconstmodify {sym} [makeValAndOff(c,off)] ptr mem) 20505 for { 20506 off := v.AuxInt 20507 sym := v.Aux 20508 _ = v.Args[2] 20509 ptr := v.Args[0] 20510 a := v.Args[1] 20511 if a.Op != OpAMD64BTCQconst { 20512 break 20513 } 20514 c := a.AuxInt 20515 l := a.Args[0] 20516 if l.Op != OpAMD64MOVQload { 20517 break 20518 } 20519 if l.AuxInt != off { 20520 break 20521 } 20522 if l.Aux != sym { 20523 break 20524 } 20525 _ = l.Args[1] 20526 ptr2 := l.Args[0] 20527 mem := l.Args[1] 20528 if mem != v.Args[2] { 20529 break 20530 } 20531 if !(isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && validValAndOff(c, off) && clobber(l) && clobber(a)) { 20532 break 20533 } 20534 v.reset(OpAMD64BTCQconstmodify) 20535 v.AuxInt = makeValAndOff(c, off) 20536 v.Aux = sym 20537 v.AddArg(ptr) 20538 v.AddArg(mem) 20539 return true 20540 } 20541 // match: (MOVQstore [off] {sym} ptr a:(BTRQconst [c] l:(MOVQload [off] {sym} ptr2 mem)) mem) 20542 // cond: isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && validValAndOff(c,off) && clobber(l) && clobber(a) 20543 // result: (BTRQconstmodify {sym} [makeValAndOff(c,off)] ptr mem) 20544 for { 20545 off := v.AuxInt 20546 sym := v.Aux 20547 _ = v.Args[2] 20548 ptr := v.Args[0] 20549 a := v.Args[1] 20550 if a.Op != OpAMD64BTRQconst { 20551 break 20552 } 20553 c := a.AuxInt 20554 l := a.Args[0] 20555 if l.Op != OpAMD64MOVQload { 20556 break 20557 } 20558 if l.AuxInt != off { 20559 break 20560 } 20561 if l.Aux != sym { 20562 break 20563 } 20564 _ = l.Args[1] 20565 ptr2 := l.Args[0] 20566 mem := l.Args[1] 20567 if mem != v.Args[2] { 20568 break 20569 } 20570 if !(isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && validValAndOff(c, off) && clobber(l) && clobber(a)) { 20571 break 20572 } 20573 v.reset(OpAMD64BTRQconstmodify) 20574 v.AuxInt = makeValAndOff(c, off) 20575 v.Aux = sym 20576 v.AddArg(ptr) 20577 v.AddArg(mem) 20578 return true 20579 } 20580 return false 20581 } 20582 func rewriteValueAMD64_OpAMD64MOVQstore_30(v *Value) bool { 20583 // match: (MOVQstore [off] {sym} ptr a:(BTSQconst [c] l:(MOVQload [off] {sym} ptr2 mem)) mem) 20584 // cond: isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && validValAndOff(c,off) && clobber(l) && clobber(a) 20585 // result: (BTSQconstmodify {sym} [makeValAndOff(c,off)] ptr mem) 20586 for { 20587 off := v.AuxInt 20588 sym := v.Aux 20589 _ = v.Args[2] 20590 ptr := v.Args[0] 20591 a := v.Args[1] 20592 if a.Op != OpAMD64BTSQconst { 20593 break 20594 } 20595 c := a.AuxInt 20596 l := a.Args[0] 20597 if l.Op != OpAMD64MOVQload { 20598 break 20599 } 20600 if l.AuxInt != off { 20601 break 20602 } 20603 if l.Aux != sym { 20604 break 20605 } 20606 _ = l.Args[1] 20607 ptr2 := l.Args[0] 20608 mem := l.Args[1] 20609 if mem != v.Args[2] { 20610 break 20611 } 20612 if !(isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && validValAndOff(c, off) && clobber(l) && clobber(a)) { 20613 break 20614 } 20615 v.reset(OpAMD64BTSQconstmodify) 20616 v.AuxInt = makeValAndOff(c, off) 20617 v.Aux = sym 20618 v.AddArg(ptr) 20619 v.AddArg(mem) 20620 return true 20621 } 20622 // match: (MOVQstore [off] {sym} ptr (MOVQf2i val) mem) 20623 // cond: 20624 // result: (MOVSDstore [off] {sym} ptr val mem) 20625 for { 20626 off := v.AuxInt 20627 sym := v.Aux 20628 _ = v.Args[2] 20629 ptr := v.Args[0] 20630 v_1 := v.Args[1] 20631 if v_1.Op != OpAMD64MOVQf2i { 20632 break 20633 } 20634 val := v_1.Args[0] 20635 mem := v.Args[2] 20636 v.reset(OpAMD64MOVSDstore) 20637 v.AuxInt = off 20638 v.Aux = sym 20639 v.AddArg(ptr) 20640 v.AddArg(val) 20641 v.AddArg(mem) 20642 return true 20643 } 20644 return false 20645 } 20646 func rewriteValueAMD64_OpAMD64MOVQstoreconst_0(v *Value) bool { 20647 b := v.Block 20648 _ = b 20649 config := b.Func.Config 20650 _ = config 20651 // match: (MOVQstoreconst [sc] {s} (ADDQconst [off] ptr) mem) 20652 // cond: ValAndOff(sc).canAdd(off) 20653 // result: (MOVQstoreconst [ValAndOff(sc).add(off)] {s} ptr mem) 20654 for { 20655 sc := v.AuxInt 20656 s := v.Aux 20657 _ = v.Args[1] 20658 v_0 := v.Args[0] 20659 if v_0.Op != OpAMD64ADDQconst { 20660 break 20661 } 20662 off := v_0.AuxInt 20663 ptr := v_0.Args[0] 20664 mem := v.Args[1] 20665 if !(ValAndOff(sc).canAdd(off)) { 20666 break 20667 } 20668 v.reset(OpAMD64MOVQstoreconst) 20669 v.AuxInt = ValAndOff(sc).add(off) 20670 v.Aux = s 20671 v.AddArg(ptr) 20672 v.AddArg(mem) 20673 return true 20674 } 20675 // match: (MOVQstoreconst [sc] {sym1} (LEAQ [off] {sym2} ptr) mem) 20676 // cond: canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off) 20677 // result: (MOVQstoreconst [ValAndOff(sc).add(off)] {mergeSym(sym1, sym2)} ptr mem) 20678 for { 20679 sc := v.AuxInt 20680 sym1 := v.Aux 20681 _ = v.Args[1] 20682 v_0 := v.Args[0] 20683 if v_0.Op != OpAMD64LEAQ { 20684 break 20685 } 20686 off := v_0.AuxInt 20687 sym2 := v_0.Aux 20688 ptr := v_0.Args[0] 20689 mem := v.Args[1] 20690 if !(canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off)) { 20691 break 20692 } 20693 v.reset(OpAMD64MOVQstoreconst) 20694 v.AuxInt = ValAndOff(sc).add(off) 20695 v.Aux = mergeSym(sym1, sym2) 20696 v.AddArg(ptr) 20697 v.AddArg(mem) 20698 return true 20699 } 20700 // match: (MOVQstoreconst [x] {sym1} (LEAQ1 [off] {sym2} ptr idx) mem) 20701 // cond: canMergeSym(sym1, sym2) 20702 // result: (MOVQstoreconstidx1 [ValAndOff(x).add(off)] {mergeSym(sym1,sym2)} ptr idx mem) 20703 for { 20704 x := v.AuxInt 20705 sym1 := v.Aux 20706 _ = v.Args[1] 20707 v_0 := v.Args[0] 20708 if v_0.Op != OpAMD64LEAQ1 { 20709 break 20710 } 20711 off := v_0.AuxInt 20712 sym2 := v_0.Aux 20713 _ = v_0.Args[1] 20714 ptr := v_0.Args[0] 20715 idx := v_0.Args[1] 20716 mem := v.Args[1] 20717 if !(canMergeSym(sym1, sym2)) { 20718 break 20719 } 20720 v.reset(OpAMD64MOVQstoreconstidx1) 20721 v.AuxInt = ValAndOff(x).add(off) 20722 v.Aux = mergeSym(sym1, sym2) 20723 v.AddArg(ptr) 20724 v.AddArg(idx) 20725 v.AddArg(mem) 20726 return true 20727 } 20728 // match: (MOVQstoreconst [x] {sym1} (LEAQ8 [off] {sym2} ptr idx) mem) 20729 // cond: canMergeSym(sym1, sym2) 20730 // result: (MOVQstoreconstidx8 [ValAndOff(x).add(off)] {mergeSym(sym1,sym2)} ptr idx mem) 20731 for { 20732 x := v.AuxInt 20733 sym1 := v.Aux 20734 _ = v.Args[1] 20735 v_0 := v.Args[0] 20736 if v_0.Op != OpAMD64LEAQ8 { 20737 break 20738 } 20739 off := v_0.AuxInt 20740 sym2 := v_0.Aux 20741 _ = v_0.Args[1] 20742 ptr := v_0.Args[0] 20743 idx := v_0.Args[1] 20744 mem := v.Args[1] 20745 if !(canMergeSym(sym1, sym2)) { 20746 break 20747 } 20748 v.reset(OpAMD64MOVQstoreconstidx8) 20749 v.AuxInt = ValAndOff(x).add(off) 20750 v.Aux = mergeSym(sym1, sym2) 20751 v.AddArg(ptr) 20752 v.AddArg(idx) 20753 v.AddArg(mem) 20754 return true 20755 } 20756 // match: (MOVQstoreconst [x] {sym} (ADDQ ptr idx) mem) 20757 // cond: 20758 // result: (MOVQstoreconstidx1 [x] {sym} ptr idx mem) 20759 for { 20760 x := v.AuxInt 20761 sym := v.Aux 20762 _ = v.Args[1] 20763 v_0 := v.Args[0] 20764 if v_0.Op != OpAMD64ADDQ { 20765 break 20766 } 20767 _ = v_0.Args[1] 20768 ptr := v_0.Args[0] 20769 idx := v_0.Args[1] 20770 mem := v.Args[1] 20771 v.reset(OpAMD64MOVQstoreconstidx1) 20772 v.AuxInt = x 20773 v.Aux = sym 20774 v.AddArg(ptr) 20775 v.AddArg(idx) 20776 v.AddArg(mem) 20777 return true 20778 } 20779 // match: (MOVQstoreconst [c] {s} p x:(MOVQstoreconst [c2] {s} p mem)) 20780 // cond: config.useSSE && x.Uses == 1 && ValAndOff(c2).Off() + 8 == ValAndOff(c).Off() && ValAndOff(c).Val() == 0 && ValAndOff(c2).Val() == 0 && clobber(x) 20781 // result: (MOVOstore [ValAndOff(c2).Off()] {s} p (MOVOconst [0]) mem) 20782 for { 20783 c := v.AuxInt 20784 s := v.Aux 20785 _ = v.Args[1] 20786 p := v.Args[0] 20787 x := v.Args[1] 20788 if x.Op != OpAMD64MOVQstoreconst { 20789 break 20790 } 20791 c2 := x.AuxInt 20792 if x.Aux != s { 20793 break 20794 } 20795 _ = x.Args[1] 20796 if p != x.Args[0] { 20797 break 20798 } 20799 mem := x.Args[1] 20800 if !(config.useSSE && x.Uses == 1 && ValAndOff(c2).Off()+8 == ValAndOff(c).Off() && ValAndOff(c).Val() == 0 && ValAndOff(c2).Val() == 0 && clobber(x)) { 20801 break 20802 } 20803 v.reset(OpAMD64MOVOstore) 20804 v.AuxInt = ValAndOff(c2).Off() 20805 v.Aux = s 20806 v.AddArg(p) 20807 v0 := b.NewValue0(x.Pos, OpAMD64MOVOconst, types.TypeInt128) 20808 v0.AuxInt = 0 20809 v.AddArg(v0) 20810 v.AddArg(mem) 20811 return true 20812 } 20813 // match: (MOVQstoreconst [sc] {sym1} (LEAL [off] {sym2} ptr) mem) 20814 // cond: canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off) 20815 // result: (MOVQstoreconst [ValAndOff(sc).add(off)] {mergeSym(sym1, sym2)} ptr mem) 20816 for { 20817 sc := v.AuxInt 20818 sym1 := v.Aux 20819 _ = v.Args[1] 20820 v_0 := v.Args[0] 20821 if v_0.Op != OpAMD64LEAL { 20822 break 20823 } 20824 off := v_0.AuxInt 20825 sym2 := v_0.Aux 20826 ptr := v_0.Args[0] 20827 mem := v.Args[1] 20828 if !(canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off)) { 20829 break 20830 } 20831 v.reset(OpAMD64MOVQstoreconst) 20832 v.AuxInt = ValAndOff(sc).add(off) 20833 v.Aux = mergeSym(sym1, sym2) 20834 v.AddArg(ptr) 20835 v.AddArg(mem) 20836 return true 20837 } 20838 // match: (MOVQstoreconst [sc] {s} (ADDLconst [off] ptr) mem) 20839 // cond: ValAndOff(sc).canAdd(off) 20840 // result: (MOVQstoreconst [ValAndOff(sc).add(off)] {s} ptr mem) 20841 for { 20842 sc := v.AuxInt 20843 s := v.Aux 20844 _ = v.Args[1] 20845 v_0 := v.Args[0] 20846 if v_0.Op != OpAMD64ADDLconst { 20847 break 20848 } 20849 off := v_0.AuxInt 20850 ptr := v_0.Args[0] 20851 mem := v.Args[1] 20852 if !(ValAndOff(sc).canAdd(off)) { 20853 break 20854 } 20855 v.reset(OpAMD64MOVQstoreconst) 20856 v.AuxInt = ValAndOff(sc).add(off) 20857 v.Aux = s 20858 v.AddArg(ptr) 20859 v.AddArg(mem) 20860 return true 20861 } 20862 return false 20863 } 20864 func rewriteValueAMD64_OpAMD64MOVQstoreconstidx1_0(v *Value) bool { 20865 // match: (MOVQstoreconstidx1 [c] {sym} ptr (SHLQconst [3] idx) mem) 20866 // cond: 20867 // result: (MOVQstoreconstidx8 [c] {sym} ptr idx mem) 20868 for { 20869 c := v.AuxInt 20870 sym := v.Aux 20871 _ = v.Args[2] 20872 ptr := v.Args[0] 20873 v_1 := v.Args[1] 20874 if v_1.Op != OpAMD64SHLQconst { 20875 break 20876 } 20877 if v_1.AuxInt != 3 { 20878 break 20879 } 20880 idx := v_1.Args[0] 20881 mem := v.Args[2] 20882 v.reset(OpAMD64MOVQstoreconstidx8) 20883 v.AuxInt = c 20884 v.Aux = sym 20885 v.AddArg(ptr) 20886 v.AddArg(idx) 20887 v.AddArg(mem) 20888 return true 20889 } 20890 // match: (MOVQstoreconstidx1 [x] {sym} (ADDQconst [c] ptr) idx mem) 20891 // cond: ValAndOff(x).canAdd(c) 20892 // result: (MOVQstoreconstidx1 [ValAndOff(x).add(c)] {sym} ptr idx mem) 20893 for { 20894 x := v.AuxInt 20895 sym := v.Aux 20896 _ = v.Args[2] 20897 v_0 := v.Args[0] 20898 if v_0.Op != OpAMD64ADDQconst { 20899 break 20900 } 20901 c := v_0.AuxInt 20902 ptr := v_0.Args[0] 20903 idx := v.Args[1] 20904 mem := v.Args[2] 20905 if !(ValAndOff(x).canAdd(c)) { 20906 break 20907 } 20908 v.reset(OpAMD64MOVQstoreconstidx1) 20909 v.AuxInt = ValAndOff(x).add(c) 20910 v.Aux = sym 20911 v.AddArg(ptr) 20912 v.AddArg(idx) 20913 v.AddArg(mem) 20914 return true 20915 } 20916 // match: (MOVQstoreconstidx1 [x] {sym} ptr (ADDQconst [c] idx) mem) 20917 // cond: ValAndOff(x).canAdd(c) 20918 // result: (MOVQstoreconstidx1 [ValAndOff(x).add(c)] {sym} ptr idx mem) 20919 for { 20920 x := v.AuxInt 20921 sym := v.Aux 20922 _ = v.Args[2] 20923 ptr := v.Args[0] 20924 v_1 := v.Args[1] 20925 if v_1.Op != OpAMD64ADDQconst { 20926 break 20927 } 20928 c := v_1.AuxInt 20929 idx := v_1.Args[0] 20930 mem := v.Args[2] 20931 if !(ValAndOff(x).canAdd(c)) { 20932 break 20933 } 20934 v.reset(OpAMD64MOVQstoreconstidx1) 20935 v.AuxInt = ValAndOff(x).add(c) 20936 v.Aux = sym 20937 v.AddArg(ptr) 20938 v.AddArg(idx) 20939 v.AddArg(mem) 20940 return true 20941 } 20942 return false 20943 } 20944 func rewriteValueAMD64_OpAMD64MOVQstoreconstidx8_0(v *Value) bool { 20945 // match: (MOVQstoreconstidx8 [x] {sym} (ADDQconst [c] ptr) idx mem) 20946 // cond: ValAndOff(x).canAdd(c) 20947 // result: (MOVQstoreconstidx8 [ValAndOff(x).add(c)] {sym} ptr idx mem) 20948 for { 20949 x := v.AuxInt 20950 sym := v.Aux 20951 _ = v.Args[2] 20952 v_0 := v.Args[0] 20953 if v_0.Op != OpAMD64ADDQconst { 20954 break 20955 } 20956 c := v_0.AuxInt 20957 ptr := v_0.Args[0] 20958 idx := v.Args[1] 20959 mem := v.Args[2] 20960 if !(ValAndOff(x).canAdd(c)) { 20961 break 20962 } 20963 v.reset(OpAMD64MOVQstoreconstidx8) 20964 v.AuxInt = ValAndOff(x).add(c) 20965 v.Aux = sym 20966 v.AddArg(ptr) 20967 v.AddArg(idx) 20968 v.AddArg(mem) 20969 return true 20970 } 20971 // match: (MOVQstoreconstidx8 [x] {sym} ptr (ADDQconst [c] idx) mem) 20972 // cond: ValAndOff(x).canAdd(8*c) 20973 // result: (MOVQstoreconstidx8 [ValAndOff(x).add(8*c)] {sym} ptr idx mem) 20974 for { 20975 x := v.AuxInt 20976 sym := v.Aux 20977 _ = v.Args[2] 20978 ptr := v.Args[0] 20979 v_1 := v.Args[1] 20980 if v_1.Op != OpAMD64ADDQconst { 20981 break 20982 } 20983 c := v_1.AuxInt 20984 idx := v_1.Args[0] 20985 mem := v.Args[2] 20986 if !(ValAndOff(x).canAdd(8 * c)) { 20987 break 20988 } 20989 v.reset(OpAMD64MOVQstoreconstidx8) 20990 v.AuxInt = ValAndOff(x).add(8 * c) 20991 v.Aux = sym 20992 v.AddArg(ptr) 20993 v.AddArg(idx) 20994 v.AddArg(mem) 20995 return true 20996 } 20997 return false 20998 } 20999 func rewriteValueAMD64_OpAMD64MOVQstoreidx1_0(v *Value) bool { 21000 // match: (MOVQstoreidx1 [c] {sym} ptr (SHLQconst [3] idx) val mem) 21001 // cond: 21002 // result: (MOVQstoreidx8 [c] {sym} ptr idx val mem) 21003 for { 21004 c := v.AuxInt 21005 sym := v.Aux 21006 _ = v.Args[3] 21007 ptr := v.Args[0] 21008 v_1 := v.Args[1] 21009 if v_1.Op != OpAMD64SHLQconst { 21010 break 21011 } 21012 if v_1.AuxInt != 3 { 21013 break 21014 } 21015 idx := v_1.Args[0] 21016 val := v.Args[2] 21017 mem := v.Args[3] 21018 v.reset(OpAMD64MOVQstoreidx8) 21019 v.AuxInt = c 21020 v.Aux = sym 21021 v.AddArg(ptr) 21022 v.AddArg(idx) 21023 v.AddArg(val) 21024 v.AddArg(mem) 21025 return true 21026 } 21027 // match: (MOVQstoreidx1 [c] {sym} (ADDQconst [d] ptr) idx val mem) 21028 // cond: is32Bit(c+d) 21029 // result: (MOVQstoreidx1 [c+d] {sym} ptr idx val mem) 21030 for { 21031 c := v.AuxInt 21032 sym := v.Aux 21033 _ = v.Args[3] 21034 v_0 := v.Args[0] 21035 if v_0.Op != OpAMD64ADDQconst { 21036 break 21037 } 21038 d := v_0.AuxInt 21039 ptr := v_0.Args[0] 21040 idx := v.Args[1] 21041 val := v.Args[2] 21042 mem := v.Args[3] 21043 if !(is32Bit(c + d)) { 21044 break 21045 } 21046 v.reset(OpAMD64MOVQstoreidx1) 21047 v.AuxInt = c + d 21048 v.Aux = sym 21049 v.AddArg(ptr) 21050 v.AddArg(idx) 21051 v.AddArg(val) 21052 v.AddArg(mem) 21053 return true 21054 } 21055 // match: (MOVQstoreidx1 [c] {sym} ptr (ADDQconst [d] idx) val mem) 21056 // cond: is32Bit(c+d) 21057 // result: (MOVQstoreidx1 [c+d] {sym} ptr idx val mem) 21058 for { 21059 c := v.AuxInt 21060 sym := v.Aux 21061 _ = v.Args[3] 21062 ptr := v.Args[0] 21063 v_1 := v.Args[1] 21064 if v_1.Op != OpAMD64ADDQconst { 21065 break 21066 } 21067 d := v_1.AuxInt 21068 idx := v_1.Args[0] 21069 val := v.Args[2] 21070 mem := v.Args[3] 21071 if !(is32Bit(c + d)) { 21072 break 21073 } 21074 v.reset(OpAMD64MOVQstoreidx1) 21075 v.AuxInt = c + d 21076 v.Aux = sym 21077 v.AddArg(ptr) 21078 v.AddArg(idx) 21079 v.AddArg(val) 21080 v.AddArg(mem) 21081 return true 21082 } 21083 // match: (MOVQstoreidx1 [i] {s} p (MOVQconst [c]) w mem) 21084 // cond: is32Bit(i+c) 21085 // result: (MOVQstore [i+c] {s} p w mem) 21086 for { 21087 i := v.AuxInt 21088 s := v.Aux 21089 _ = v.Args[3] 21090 p := v.Args[0] 21091 v_1 := v.Args[1] 21092 if v_1.Op != OpAMD64MOVQconst { 21093 break 21094 } 21095 c := v_1.AuxInt 21096 w := v.Args[2] 21097 mem := v.Args[3] 21098 if !(is32Bit(i + c)) { 21099 break 21100 } 21101 v.reset(OpAMD64MOVQstore) 21102 v.AuxInt = i + c 21103 v.Aux = s 21104 v.AddArg(p) 21105 v.AddArg(w) 21106 v.AddArg(mem) 21107 return true 21108 } 21109 return false 21110 } 21111 func rewriteValueAMD64_OpAMD64MOVQstoreidx8_0(v *Value) bool { 21112 // match: (MOVQstoreidx8 [c] {sym} (ADDQconst [d] ptr) idx val mem) 21113 // cond: is32Bit(c+d) 21114 // result: (MOVQstoreidx8 [c+d] {sym} ptr idx val mem) 21115 for { 21116 c := v.AuxInt 21117 sym := v.Aux 21118 _ = v.Args[3] 21119 v_0 := v.Args[0] 21120 if v_0.Op != OpAMD64ADDQconst { 21121 break 21122 } 21123 d := v_0.AuxInt 21124 ptr := v_0.Args[0] 21125 idx := v.Args[1] 21126 val := v.Args[2] 21127 mem := v.Args[3] 21128 if !(is32Bit(c + d)) { 21129 break 21130 } 21131 v.reset(OpAMD64MOVQstoreidx8) 21132 v.AuxInt = c + d 21133 v.Aux = sym 21134 v.AddArg(ptr) 21135 v.AddArg(idx) 21136 v.AddArg(val) 21137 v.AddArg(mem) 21138 return true 21139 } 21140 // match: (MOVQstoreidx8 [c] {sym} ptr (ADDQconst [d] idx) val mem) 21141 // cond: is32Bit(c+8*d) 21142 // result: (MOVQstoreidx8 [c+8*d] {sym} ptr idx val mem) 21143 for { 21144 c := v.AuxInt 21145 sym := v.Aux 21146 _ = v.Args[3] 21147 ptr := v.Args[0] 21148 v_1 := v.Args[1] 21149 if v_1.Op != OpAMD64ADDQconst { 21150 break 21151 } 21152 d := v_1.AuxInt 21153 idx := v_1.Args[0] 21154 val := v.Args[2] 21155 mem := v.Args[3] 21156 if !(is32Bit(c + 8*d)) { 21157 break 21158 } 21159 v.reset(OpAMD64MOVQstoreidx8) 21160 v.AuxInt = c + 8*d 21161 v.Aux = sym 21162 v.AddArg(ptr) 21163 v.AddArg(idx) 21164 v.AddArg(val) 21165 v.AddArg(mem) 21166 return true 21167 } 21168 // match: (MOVQstoreidx8 [i] {s} p (MOVQconst [c]) w mem) 21169 // cond: is32Bit(i+8*c) 21170 // result: (MOVQstore [i+8*c] {s} p w mem) 21171 for { 21172 i := v.AuxInt 21173 s := v.Aux 21174 _ = v.Args[3] 21175 p := v.Args[0] 21176 v_1 := v.Args[1] 21177 if v_1.Op != OpAMD64MOVQconst { 21178 break 21179 } 21180 c := v_1.AuxInt 21181 w := v.Args[2] 21182 mem := v.Args[3] 21183 if !(is32Bit(i + 8*c)) { 21184 break 21185 } 21186 v.reset(OpAMD64MOVQstore) 21187 v.AuxInt = i + 8*c 21188 v.Aux = s 21189 v.AddArg(p) 21190 v.AddArg(w) 21191 v.AddArg(mem) 21192 return true 21193 } 21194 return false 21195 } 21196 func rewriteValueAMD64_OpAMD64MOVSDload_0(v *Value) bool { 21197 // match: (MOVSDload [off1] {sym} (ADDQconst [off2] ptr) mem) 21198 // cond: is32Bit(off1+off2) 21199 // result: (MOVSDload [off1+off2] {sym} ptr mem) 21200 for { 21201 off1 := v.AuxInt 21202 sym := v.Aux 21203 _ = v.Args[1] 21204 v_0 := v.Args[0] 21205 if v_0.Op != OpAMD64ADDQconst { 21206 break 21207 } 21208 off2 := v_0.AuxInt 21209 ptr := v_0.Args[0] 21210 mem := v.Args[1] 21211 if !(is32Bit(off1 + off2)) { 21212 break 21213 } 21214 v.reset(OpAMD64MOVSDload) 21215 v.AuxInt = off1 + off2 21216 v.Aux = sym 21217 v.AddArg(ptr) 21218 v.AddArg(mem) 21219 return true 21220 } 21221 // match: (MOVSDload [off1] {sym1} (LEAQ [off2] {sym2} base) mem) 21222 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 21223 // result: (MOVSDload [off1+off2] {mergeSym(sym1,sym2)} base mem) 21224 for { 21225 off1 := v.AuxInt 21226 sym1 := v.Aux 21227 _ = v.Args[1] 21228 v_0 := v.Args[0] 21229 if v_0.Op != OpAMD64LEAQ { 21230 break 21231 } 21232 off2 := v_0.AuxInt 21233 sym2 := v_0.Aux 21234 base := v_0.Args[0] 21235 mem := v.Args[1] 21236 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 21237 break 21238 } 21239 v.reset(OpAMD64MOVSDload) 21240 v.AuxInt = off1 + off2 21241 v.Aux = mergeSym(sym1, sym2) 21242 v.AddArg(base) 21243 v.AddArg(mem) 21244 return true 21245 } 21246 // match: (MOVSDload [off1] {sym1} (LEAQ1 [off2] {sym2} ptr idx) mem) 21247 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 21248 // result: (MOVSDloadidx1 [off1+off2] {mergeSym(sym1,sym2)} ptr idx mem) 21249 for { 21250 off1 := v.AuxInt 21251 sym1 := v.Aux 21252 _ = v.Args[1] 21253 v_0 := v.Args[0] 21254 if v_0.Op != OpAMD64LEAQ1 { 21255 break 21256 } 21257 off2 := v_0.AuxInt 21258 sym2 := v_0.Aux 21259 _ = v_0.Args[1] 21260 ptr := v_0.Args[0] 21261 idx := v_0.Args[1] 21262 mem := v.Args[1] 21263 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 21264 break 21265 } 21266 v.reset(OpAMD64MOVSDloadidx1) 21267 v.AuxInt = off1 + off2 21268 v.Aux = mergeSym(sym1, sym2) 21269 v.AddArg(ptr) 21270 v.AddArg(idx) 21271 v.AddArg(mem) 21272 return true 21273 } 21274 // match: (MOVSDload [off1] {sym1} (LEAQ8 [off2] {sym2} ptr idx) mem) 21275 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 21276 // result: (MOVSDloadidx8 [off1+off2] {mergeSym(sym1,sym2)} ptr idx mem) 21277 for { 21278 off1 := v.AuxInt 21279 sym1 := v.Aux 21280 _ = v.Args[1] 21281 v_0 := v.Args[0] 21282 if v_0.Op != OpAMD64LEAQ8 { 21283 break 21284 } 21285 off2 := v_0.AuxInt 21286 sym2 := v_0.Aux 21287 _ = v_0.Args[1] 21288 ptr := v_0.Args[0] 21289 idx := v_0.Args[1] 21290 mem := v.Args[1] 21291 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 21292 break 21293 } 21294 v.reset(OpAMD64MOVSDloadidx8) 21295 v.AuxInt = off1 + off2 21296 v.Aux = mergeSym(sym1, sym2) 21297 v.AddArg(ptr) 21298 v.AddArg(idx) 21299 v.AddArg(mem) 21300 return true 21301 } 21302 // match: (MOVSDload [off] {sym} (ADDQ ptr idx) mem) 21303 // cond: ptr.Op != OpSB 21304 // result: (MOVSDloadidx1 [off] {sym} ptr idx mem) 21305 for { 21306 off := v.AuxInt 21307 sym := v.Aux 21308 _ = v.Args[1] 21309 v_0 := v.Args[0] 21310 if v_0.Op != OpAMD64ADDQ { 21311 break 21312 } 21313 _ = v_0.Args[1] 21314 ptr := v_0.Args[0] 21315 idx := v_0.Args[1] 21316 mem := v.Args[1] 21317 if !(ptr.Op != OpSB) { 21318 break 21319 } 21320 v.reset(OpAMD64MOVSDloadidx1) 21321 v.AuxInt = off 21322 v.Aux = sym 21323 v.AddArg(ptr) 21324 v.AddArg(idx) 21325 v.AddArg(mem) 21326 return true 21327 } 21328 // match: (MOVSDload [off] {sym} ptr (MOVQstore [off] {sym} ptr val _)) 21329 // cond: 21330 // result: (MOVQi2f val) 21331 for { 21332 off := v.AuxInt 21333 sym := v.Aux 21334 _ = v.Args[1] 21335 ptr := v.Args[0] 21336 v_1 := v.Args[1] 21337 if v_1.Op != OpAMD64MOVQstore { 21338 break 21339 } 21340 if v_1.AuxInt != off { 21341 break 21342 } 21343 if v_1.Aux != sym { 21344 break 21345 } 21346 _ = v_1.Args[2] 21347 if ptr != v_1.Args[0] { 21348 break 21349 } 21350 val := v_1.Args[1] 21351 v.reset(OpAMD64MOVQi2f) 21352 v.AddArg(val) 21353 return true 21354 } 21355 return false 21356 } 21357 func rewriteValueAMD64_OpAMD64MOVSDloadidx1_0(v *Value) bool { 21358 // match: (MOVSDloadidx1 [c] {sym} ptr (SHLQconst [3] idx) mem) 21359 // cond: 21360 // result: (MOVSDloadidx8 [c] {sym} ptr idx mem) 21361 for { 21362 c := v.AuxInt 21363 sym := v.Aux 21364 _ = v.Args[2] 21365 ptr := v.Args[0] 21366 v_1 := v.Args[1] 21367 if v_1.Op != OpAMD64SHLQconst { 21368 break 21369 } 21370 if v_1.AuxInt != 3 { 21371 break 21372 } 21373 idx := v_1.Args[0] 21374 mem := v.Args[2] 21375 v.reset(OpAMD64MOVSDloadidx8) 21376 v.AuxInt = c 21377 v.Aux = sym 21378 v.AddArg(ptr) 21379 v.AddArg(idx) 21380 v.AddArg(mem) 21381 return true 21382 } 21383 // match: (MOVSDloadidx1 [c] {sym} (ADDQconst [d] ptr) idx mem) 21384 // cond: is32Bit(c+d) 21385 // result: (MOVSDloadidx1 [c+d] {sym} ptr idx mem) 21386 for { 21387 c := v.AuxInt 21388 sym := v.Aux 21389 _ = v.Args[2] 21390 v_0 := v.Args[0] 21391 if v_0.Op != OpAMD64ADDQconst { 21392 break 21393 } 21394 d := v_0.AuxInt 21395 ptr := v_0.Args[0] 21396 idx := v.Args[1] 21397 mem := v.Args[2] 21398 if !(is32Bit(c + d)) { 21399 break 21400 } 21401 v.reset(OpAMD64MOVSDloadidx1) 21402 v.AuxInt = c + d 21403 v.Aux = sym 21404 v.AddArg(ptr) 21405 v.AddArg(idx) 21406 v.AddArg(mem) 21407 return true 21408 } 21409 // match: (MOVSDloadidx1 [c] {sym} ptr (ADDQconst [d] idx) mem) 21410 // cond: is32Bit(c+d) 21411 // result: (MOVSDloadidx1 [c+d] {sym} ptr idx mem) 21412 for { 21413 c := v.AuxInt 21414 sym := v.Aux 21415 _ = v.Args[2] 21416 ptr := v.Args[0] 21417 v_1 := v.Args[1] 21418 if v_1.Op != OpAMD64ADDQconst { 21419 break 21420 } 21421 d := v_1.AuxInt 21422 idx := v_1.Args[0] 21423 mem := v.Args[2] 21424 if !(is32Bit(c + d)) { 21425 break 21426 } 21427 v.reset(OpAMD64MOVSDloadidx1) 21428 v.AuxInt = c + d 21429 v.Aux = sym 21430 v.AddArg(ptr) 21431 v.AddArg(idx) 21432 v.AddArg(mem) 21433 return true 21434 } 21435 // match: (MOVSDloadidx1 [i] {s} p (MOVQconst [c]) mem) 21436 // cond: is32Bit(i+c) 21437 // result: (MOVSDload [i+c] {s} p mem) 21438 for { 21439 i := v.AuxInt 21440 s := v.Aux 21441 _ = v.Args[2] 21442 p := v.Args[0] 21443 v_1 := v.Args[1] 21444 if v_1.Op != OpAMD64MOVQconst { 21445 break 21446 } 21447 c := v_1.AuxInt 21448 mem := v.Args[2] 21449 if !(is32Bit(i + c)) { 21450 break 21451 } 21452 v.reset(OpAMD64MOVSDload) 21453 v.AuxInt = i + c 21454 v.Aux = s 21455 v.AddArg(p) 21456 v.AddArg(mem) 21457 return true 21458 } 21459 return false 21460 } 21461 func rewriteValueAMD64_OpAMD64MOVSDloadidx8_0(v *Value) bool { 21462 // match: (MOVSDloadidx8 [c] {sym} (ADDQconst [d] ptr) idx mem) 21463 // cond: is32Bit(c+d) 21464 // result: (MOVSDloadidx8 [c+d] {sym} ptr idx mem) 21465 for { 21466 c := v.AuxInt 21467 sym := v.Aux 21468 _ = v.Args[2] 21469 v_0 := v.Args[0] 21470 if v_0.Op != OpAMD64ADDQconst { 21471 break 21472 } 21473 d := v_0.AuxInt 21474 ptr := v_0.Args[0] 21475 idx := v.Args[1] 21476 mem := v.Args[2] 21477 if !(is32Bit(c + d)) { 21478 break 21479 } 21480 v.reset(OpAMD64MOVSDloadidx8) 21481 v.AuxInt = c + d 21482 v.Aux = sym 21483 v.AddArg(ptr) 21484 v.AddArg(idx) 21485 v.AddArg(mem) 21486 return true 21487 } 21488 // match: (MOVSDloadidx8 [c] {sym} ptr (ADDQconst [d] idx) mem) 21489 // cond: is32Bit(c+8*d) 21490 // result: (MOVSDloadidx8 [c+8*d] {sym} ptr idx mem) 21491 for { 21492 c := v.AuxInt 21493 sym := v.Aux 21494 _ = v.Args[2] 21495 ptr := v.Args[0] 21496 v_1 := v.Args[1] 21497 if v_1.Op != OpAMD64ADDQconst { 21498 break 21499 } 21500 d := v_1.AuxInt 21501 idx := v_1.Args[0] 21502 mem := v.Args[2] 21503 if !(is32Bit(c + 8*d)) { 21504 break 21505 } 21506 v.reset(OpAMD64MOVSDloadidx8) 21507 v.AuxInt = c + 8*d 21508 v.Aux = sym 21509 v.AddArg(ptr) 21510 v.AddArg(idx) 21511 v.AddArg(mem) 21512 return true 21513 } 21514 // match: (MOVSDloadidx8 [i] {s} p (MOVQconst [c]) mem) 21515 // cond: is32Bit(i+8*c) 21516 // result: (MOVSDload [i+8*c] {s} p mem) 21517 for { 21518 i := v.AuxInt 21519 s := v.Aux 21520 _ = v.Args[2] 21521 p := v.Args[0] 21522 v_1 := v.Args[1] 21523 if v_1.Op != OpAMD64MOVQconst { 21524 break 21525 } 21526 c := v_1.AuxInt 21527 mem := v.Args[2] 21528 if !(is32Bit(i + 8*c)) { 21529 break 21530 } 21531 v.reset(OpAMD64MOVSDload) 21532 v.AuxInt = i + 8*c 21533 v.Aux = s 21534 v.AddArg(p) 21535 v.AddArg(mem) 21536 return true 21537 } 21538 return false 21539 } 21540 func rewriteValueAMD64_OpAMD64MOVSDstore_0(v *Value) bool { 21541 // match: (MOVSDstore [off1] {sym} (ADDQconst [off2] ptr) val mem) 21542 // cond: is32Bit(off1+off2) 21543 // result: (MOVSDstore [off1+off2] {sym} ptr val mem) 21544 for { 21545 off1 := v.AuxInt 21546 sym := v.Aux 21547 _ = v.Args[2] 21548 v_0 := v.Args[0] 21549 if v_0.Op != OpAMD64ADDQconst { 21550 break 21551 } 21552 off2 := v_0.AuxInt 21553 ptr := v_0.Args[0] 21554 val := v.Args[1] 21555 mem := v.Args[2] 21556 if !(is32Bit(off1 + off2)) { 21557 break 21558 } 21559 v.reset(OpAMD64MOVSDstore) 21560 v.AuxInt = off1 + off2 21561 v.Aux = sym 21562 v.AddArg(ptr) 21563 v.AddArg(val) 21564 v.AddArg(mem) 21565 return true 21566 } 21567 // match: (MOVSDstore [off1] {sym1} (LEAQ [off2] {sym2} base) val mem) 21568 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 21569 // result: (MOVSDstore [off1+off2] {mergeSym(sym1,sym2)} base val mem) 21570 for { 21571 off1 := v.AuxInt 21572 sym1 := v.Aux 21573 _ = v.Args[2] 21574 v_0 := v.Args[0] 21575 if v_0.Op != OpAMD64LEAQ { 21576 break 21577 } 21578 off2 := v_0.AuxInt 21579 sym2 := v_0.Aux 21580 base := v_0.Args[0] 21581 val := v.Args[1] 21582 mem := v.Args[2] 21583 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 21584 break 21585 } 21586 v.reset(OpAMD64MOVSDstore) 21587 v.AuxInt = off1 + off2 21588 v.Aux = mergeSym(sym1, sym2) 21589 v.AddArg(base) 21590 v.AddArg(val) 21591 v.AddArg(mem) 21592 return true 21593 } 21594 // match: (MOVSDstore [off1] {sym1} (LEAQ1 [off2] {sym2} ptr idx) val mem) 21595 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 21596 // result: (MOVSDstoreidx1 [off1+off2] {mergeSym(sym1,sym2)} ptr idx val mem) 21597 for { 21598 off1 := v.AuxInt 21599 sym1 := v.Aux 21600 _ = v.Args[2] 21601 v_0 := v.Args[0] 21602 if v_0.Op != OpAMD64LEAQ1 { 21603 break 21604 } 21605 off2 := v_0.AuxInt 21606 sym2 := v_0.Aux 21607 _ = v_0.Args[1] 21608 ptr := v_0.Args[0] 21609 idx := v_0.Args[1] 21610 val := v.Args[1] 21611 mem := v.Args[2] 21612 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 21613 break 21614 } 21615 v.reset(OpAMD64MOVSDstoreidx1) 21616 v.AuxInt = off1 + off2 21617 v.Aux = mergeSym(sym1, sym2) 21618 v.AddArg(ptr) 21619 v.AddArg(idx) 21620 v.AddArg(val) 21621 v.AddArg(mem) 21622 return true 21623 } 21624 // match: (MOVSDstore [off1] {sym1} (LEAQ8 [off2] {sym2} ptr idx) val mem) 21625 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 21626 // result: (MOVSDstoreidx8 [off1+off2] {mergeSym(sym1,sym2)} ptr idx val mem) 21627 for { 21628 off1 := v.AuxInt 21629 sym1 := v.Aux 21630 _ = v.Args[2] 21631 v_0 := v.Args[0] 21632 if v_0.Op != OpAMD64LEAQ8 { 21633 break 21634 } 21635 off2 := v_0.AuxInt 21636 sym2 := v_0.Aux 21637 _ = v_0.Args[1] 21638 ptr := v_0.Args[0] 21639 idx := v_0.Args[1] 21640 val := v.Args[1] 21641 mem := v.Args[2] 21642 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 21643 break 21644 } 21645 v.reset(OpAMD64MOVSDstoreidx8) 21646 v.AuxInt = off1 + off2 21647 v.Aux = mergeSym(sym1, sym2) 21648 v.AddArg(ptr) 21649 v.AddArg(idx) 21650 v.AddArg(val) 21651 v.AddArg(mem) 21652 return true 21653 } 21654 // match: (MOVSDstore [off] {sym} (ADDQ ptr idx) val mem) 21655 // cond: ptr.Op != OpSB 21656 // result: (MOVSDstoreidx1 [off] {sym} ptr idx val mem) 21657 for { 21658 off := v.AuxInt 21659 sym := v.Aux 21660 _ = v.Args[2] 21661 v_0 := v.Args[0] 21662 if v_0.Op != OpAMD64ADDQ { 21663 break 21664 } 21665 _ = v_0.Args[1] 21666 ptr := v_0.Args[0] 21667 idx := v_0.Args[1] 21668 val := v.Args[1] 21669 mem := v.Args[2] 21670 if !(ptr.Op != OpSB) { 21671 break 21672 } 21673 v.reset(OpAMD64MOVSDstoreidx1) 21674 v.AuxInt = off 21675 v.Aux = sym 21676 v.AddArg(ptr) 21677 v.AddArg(idx) 21678 v.AddArg(val) 21679 v.AddArg(mem) 21680 return true 21681 } 21682 // match: (MOVSDstore [off] {sym} ptr (MOVQi2f val) mem) 21683 // cond: 21684 // result: (MOVQstore [off] {sym} ptr val mem) 21685 for { 21686 off := v.AuxInt 21687 sym := v.Aux 21688 _ = v.Args[2] 21689 ptr := v.Args[0] 21690 v_1 := v.Args[1] 21691 if v_1.Op != OpAMD64MOVQi2f { 21692 break 21693 } 21694 val := v_1.Args[0] 21695 mem := v.Args[2] 21696 v.reset(OpAMD64MOVQstore) 21697 v.AuxInt = off 21698 v.Aux = sym 21699 v.AddArg(ptr) 21700 v.AddArg(val) 21701 v.AddArg(mem) 21702 return true 21703 } 21704 return false 21705 } 21706 func rewriteValueAMD64_OpAMD64MOVSDstoreidx1_0(v *Value) bool { 21707 // match: (MOVSDstoreidx1 [c] {sym} ptr (SHLQconst [3] idx) val mem) 21708 // cond: 21709 // result: (MOVSDstoreidx8 [c] {sym} ptr idx val mem) 21710 for { 21711 c := v.AuxInt 21712 sym := v.Aux 21713 _ = v.Args[3] 21714 ptr := v.Args[0] 21715 v_1 := v.Args[1] 21716 if v_1.Op != OpAMD64SHLQconst { 21717 break 21718 } 21719 if v_1.AuxInt != 3 { 21720 break 21721 } 21722 idx := v_1.Args[0] 21723 val := v.Args[2] 21724 mem := v.Args[3] 21725 v.reset(OpAMD64MOVSDstoreidx8) 21726 v.AuxInt = c 21727 v.Aux = sym 21728 v.AddArg(ptr) 21729 v.AddArg(idx) 21730 v.AddArg(val) 21731 v.AddArg(mem) 21732 return true 21733 } 21734 // match: (MOVSDstoreidx1 [c] {sym} (ADDQconst [d] ptr) idx val mem) 21735 // cond: is32Bit(c+d) 21736 // result: (MOVSDstoreidx1 [c+d] {sym} ptr idx val mem) 21737 for { 21738 c := v.AuxInt 21739 sym := v.Aux 21740 _ = v.Args[3] 21741 v_0 := v.Args[0] 21742 if v_0.Op != OpAMD64ADDQconst { 21743 break 21744 } 21745 d := v_0.AuxInt 21746 ptr := v_0.Args[0] 21747 idx := v.Args[1] 21748 val := v.Args[2] 21749 mem := v.Args[3] 21750 if !(is32Bit(c + d)) { 21751 break 21752 } 21753 v.reset(OpAMD64MOVSDstoreidx1) 21754 v.AuxInt = c + d 21755 v.Aux = sym 21756 v.AddArg(ptr) 21757 v.AddArg(idx) 21758 v.AddArg(val) 21759 v.AddArg(mem) 21760 return true 21761 } 21762 // match: (MOVSDstoreidx1 [c] {sym} ptr (ADDQconst [d] idx) val mem) 21763 // cond: is32Bit(c+d) 21764 // result: (MOVSDstoreidx1 [c+d] {sym} ptr idx val mem) 21765 for { 21766 c := v.AuxInt 21767 sym := v.Aux 21768 _ = v.Args[3] 21769 ptr := v.Args[0] 21770 v_1 := v.Args[1] 21771 if v_1.Op != OpAMD64ADDQconst { 21772 break 21773 } 21774 d := v_1.AuxInt 21775 idx := v_1.Args[0] 21776 val := v.Args[2] 21777 mem := v.Args[3] 21778 if !(is32Bit(c + d)) { 21779 break 21780 } 21781 v.reset(OpAMD64MOVSDstoreidx1) 21782 v.AuxInt = c + d 21783 v.Aux = sym 21784 v.AddArg(ptr) 21785 v.AddArg(idx) 21786 v.AddArg(val) 21787 v.AddArg(mem) 21788 return true 21789 } 21790 // match: (MOVSDstoreidx1 [i] {s} p (MOVQconst [c]) w mem) 21791 // cond: is32Bit(i+c) 21792 // result: (MOVSDstore [i+c] {s} p w mem) 21793 for { 21794 i := v.AuxInt 21795 s := v.Aux 21796 _ = v.Args[3] 21797 p := v.Args[0] 21798 v_1 := v.Args[1] 21799 if v_1.Op != OpAMD64MOVQconst { 21800 break 21801 } 21802 c := v_1.AuxInt 21803 w := v.Args[2] 21804 mem := v.Args[3] 21805 if !(is32Bit(i + c)) { 21806 break 21807 } 21808 v.reset(OpAMD64MOVSDstore) 21809 v.AuxInt = i + c 21810 v.Aux = s 21811 v.AddArg(p) 21812 v.AddArg(w) 21813 v.AddArg(mem) 21814 return true 21815 } 21816 return false 21817 } 21818 func rewriteValueAMD64_OpAMD64MOVSDstoreidx8_0(v *Value) bool { 21819 // match: (MOVSDstoreidx8 [c] {sym} (ADDQconst [d] ptr) idx val mem) 21820 // cond: is32Bit(c+d) 21821 // result: (MOVSDstoreidx8 [c+d] {sym} ptr idx val mem) 21822 for { 21823 c := v.AuxInt 21824 sym := v.Aux 21825 _ = v.Args[3] 21826 v_0 := v.Args[0] 21827 if v_0.Op != OpAMD64ADDQconst { 21828 break 21829 } 21830 d := v_0.AuxInt 21831 ptr := v_0.Args[0] 21832 idx := v.Args[1] 21833 val := v.Args[2] 21834 mem := v.Args[3] 21835 if !(is32Bit(c + d)) { 21836 break 21837 } 21838 v.reset(OpAMD64MOVSDstoreidx8) 21839 v.AuxInt = c + d 21840 v.Aux = sym 21841 v.AddArg(ptr) 21842 v.AddArg(idx) 21843 v.AddArg(val) 21844 v.AddArg(mem) 21845 return true 21846 } 21847 // match: (MOVSDstoreidx8 [c] {sym} ptr (ADDQconst [d] idx) val mem) 21848 // cond: is32Bit(c+8*d) 21849 // result: (MOVSDstoreidx8 [c+8*d] {sym} ptr idx val mem) 21850 for { 21851 c := v.AuxInt 21852 sym := v.Aux 21853 _ = v.Args[3] 21854 ptr := v.Args[0] 21855 v_1 := v.Args[1] 21856 if v_1.Op != OpAMD64ADDQconst { 21857 break 21858 } 21859 d := v_1.AuxInt 21860 idx := v_1.Args[0] 21861 val := v.Args[2] 21862 mem := v.Args[3] 21863 if !(is32Bit(c + 8*d)) { 21864 break 21865 } 21866 v.reset(OpAMD64MOVSDstoreidx8) 21867 v.AuxInt = c + 8*d 21868 v.Aux = sym 21869 v.AddArg(ptr) 21870 v.AddArg(idx) 21871 v.AddArg(val) 21872 v.AddArg(mem) 21873 return true 21874 } 21875 // match: (MOVSDstoreidx8 [i] {s} p (MOVQconst [c]) w mem) 21876 // cond: is32Bit(i+8*c) 21877 // result: (MOVSDstore [i+8*c] {s} p w mem) 21878 for { 21879 i := v.AuxInt 21880 s := v.Aux 21881 _ = v.Args[3] 21882 p := v.Args[0] 21883 v_1 := v.Args[1] 21884 if v_1.Op != OpAMD64MOVQconst { 21885 break 21886 } 21887 c := v_1.AuxInt 21888 w := v.Args[2] 21889 mem := v.Args[3] 21890 if !(is32Bit(i + 8*c)) { 21891 break 21892 } 21893 v.reset(OpAMD64MOVSDstore) 21894 v.AuxInt = i + 8*c 21895 v.Aux = s 21896 v.AddArg(p) 21897 v.AddArg(w) 21898 v.AddArg(mem) 21899 return true 21900 } 21901 return false 21902 } 21903 func rewriteValueAMD64_OpAMD64MOVSSload_0(v *Value) bool { 21904 // match: (MOVSSload [off1] {sym} (ADDQconst [off2] ptr) mem) 21905 // cond: is32Bit(off1+off2) 21906 // result: (MOVSSload [off1+off2] {sym} ptr mem) 21907 for { 21908 off1 := v.AuxInt 21909 sym := v.Aux 21910 _ = v.Args[1] 21911 v_0 := v.Args[0] 21912 if v_0.Op != OpAMD64ADDQconst { 21913 break 21914 } 21915 off2 := v_0.AuxInt 21916 ptr := v_0.Args[0] 21917 mem := v.Args[1] 21918 if !(is32Bit(off1 + off2)) { 21919 break 21920 } 21921 v.reset(OpAMD64MOVSSload) 21922 v.AuxInt = off1 + off2 21923 v.Aux = sym 21924 v.AddArg(ptr) 21925 v.AddArg(mem) 21926 return true 21927 } 21928 // match: (MOVSSload [off1] {sym1} (LEAQ [off2] {sym2} base) mem) 21929 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 21930 // result: (MOVSSload [off1+off2] {mergeSym(sym1,sym2)} base mem) 21931 for { 21932 off1 := v.AuxInt 21933 sym1 := v.Aux 21934 _ = v.Args[1] 21935 v_0 := v.Args[0] 21936 if v_0.Op != OpAMD64LEAQ { 21937 break 21938 } 21939 off2 := v_0.AuxInt 21940 sym2 := v_0.Aux 21941 base := v_0.Args[0] 21942 mem := v.Args[1] 21943 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 21944 break 21945 } 21946 v.reset(OpAMD64MOVSSload) 21947 v.AuxInt = off1 + off2 21948 v.Aux = mergeSym(sym1, sym2) 21949 v.AddArg(base) 21950 v.AddArg(mem) 21951 return true 21952 } 21953 // match: (MOVSSload [off1] {sym1} (LEAQ1 [off2] {sym2} ptr idx) mem) 21954 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 21955 // result: (MOVSSloadidx1 [off1+off2] {mergeSym(sym1,sym2)} ptr idx mem) 21956 for { 21957 off1 := v.AuxInt 21958 sym1 := v.Aux 21959 _ = v.Args[1] 21960 v_0 := v.Args[0] 21961 if v_0.Op != OpAMD64LEAQ1 { 21962 break 21963 } 21964 off2 := v_0.AuxInt 21965 sym2 := v_0.Aux 21966 _ = v_0.Args[1] 21967 ptr := v_0.Args[0] 21968 idx := v_0.Args[1] 21969 mem := v.Args[1] 21970 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 21971 break 21972 } 21973 v.reset(OpAMD64MOVSSloadidx1) 21974 v.AuxInt = off1 + off2 21975 v.Aux = mergeSym(sym1, sym2) 21976 v.AddArg(ptr) 21977 v.AddArg(idx) 21978 v.AddArg(mem) 21979 return true 21980 } 21981 // match: (MOVSSload [off1] {sym1} (LEAQ4 [off2] {sym2} ptr idx) mem) 21982 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 21983 // result: (MOVSSloadidx4 [off1+off2] {mergeSym(sym1,sym2)} ptr idx mem) 21984 for { 21985 off1 := v.AuxInt 21986 sym1 := v.Aux 21987 _ = v.Args[1] 21988 v_0 := v.Args[0] 21989 if v_0.Op != OpAMD64LEAQ4 { 21990 break 21991 } 21992 off2 := v_0.AuxInt 21993 sym2 := v_0.Aux 21994 _ = v_0.Args[1] 21995 ptr := v_0.Args[0] 21996 idx := v_0.Args[1] 21997 mem := v.Args[1] 21998 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 21999 break 22000 } 22001 v.reset(OpAMD64MOVSSloadidx4) 22002 v.AuxInt = off1 + off2 22003 v.Aux = mergeSym(sym1, sym2) 22004 v.AddArg(ptr) 22005 v.AddArg(idx) 22006 v.AddArg(mem) 22007 return true 22008 } 22009 // match: (MOVSSload [off] {sym} (ADDQ ptr idx) mem) 22010 // cond: ptr.Op != OpSB 22011 // result: (MOVSSloadidx1 [off] {sym} ptr idx mem) 22012 for { 22013 off := v.AuxInt 22014 sym := v.Aux 22015 _ = v.Args[1] 22016 v_0 := v.Args[0] 22017 if v_0.Op != OpAMD64ADDQ { 22018 break 22019 } 22020 _ = v_0.Args[1] 22021 ptr := v_0.Args[0] 22022 idx := v_0.Args[1] 22023 mem := v.Args[1] 22024 if !(ptr.Op != OpSB) { 22025 break 22026 } 22027 v.reset(OpAMD64MOVSSloadidx1) 22028 v.AuxInt = off 22029 v.Aux = sym 22030 v.AddArg(ptr) 22031 v.AddArg(idx) 22032 v.AddArg(mem) 22033 return true 22034 } 22035 // match: (MOVSSload [off] {sym} ptr (MOVLstore [off] {sym} ptr val _)) 22036 // cond: 22037 // result: (MOVLi2f val) 22038 for { 22039 off := v.AuxInt 22040 sym := v.Aux 22041 _ = v.Args[1] 22042 ptr := v.Args[0] 22043 v_1 := v.Args[1] 22044 if v_1.Op != OpAMD64MOVLstore { 22045 break 22046 } 22047 if v_1.AuxInt != off { 22048 break 22049 } 22050 if v_1.Aux != sym { 22051 break 22052 } 22053 _ = v_1.Args[2] 22054 if ptr != v_1.Args[0] { 22055 break 22056 } 22057 val := v_1.Args[1] 22058 v.reset(OpAMD64MOVLi2f) 22059 v.AddArg(val) 22060 return true 22061 } 22062 return false 22063 } 22064 func rewriteValueAMD64_OpAMD64MOVSSloadidx1_0(v *Value) bool { 22065 // match: (MOVSSloadidx1 [c] {sym} ptr (SHLQconst [2] idx) mem) 22066 // cond: 22067 // result: (MOVSSloadidx4 [c] {sym} ptr idx mem) 22068 for { 22069 c := v.AuxInt 22070 sym := v.Aux 22071 _ = v.Args[2] 22072 ptr := v.Args[0] 22073 v_1 := v.Args[1] 22074 if v_1.Op != OpAMD64SHLQconst { 22075 break 22076 } 22077 if v_1.AuxInt != 2 { 22078 break 22079 } 22080 idx := v_1.Args[0] 22081 mem := v.Args[2] 22082 v.reset(OpAMD64MOVSSloadidx4) 22083 v.AuxInt = c 22084 v.Aux = sym 22085 v.AddArg(ptr) 22086 v.AddArg(idx) 22087 v.AddArg(mem) 22088 return true 22089 } 22090 // match: (MOVSSloadidx1 [c] {sym} (ADDQconst [d] ptr) idx mem) 22091 // cond: is32Bit(c+d) 22092 // result: (MOVSSloadidx1 [c+d] {sym} ptr idx mem) 22093 for { 22094 c := v.AuxInt 22095 sym := v.Aux 22096 _ = v.Args[2] 22097 v_0 := v.Args[0] 22098 if v_0.Op != OpAMD64ADDQconst { 22099 break 22100 } 22101 d := v_0.AuxInt 22102 ptr := v_0.Args[0] 22103 idx := v.Args[1] 22104 mem := v.Args[2] 22105 if !(is32Bit(c + d)) { 22106 break 22107 } 22108 v.reset(OpAMD64MOVSSloadidx1) 22109 v.AuxInt = c + d 22110 v.Aux = sym 22111 v.AddArg(ptr) 22112 v.AddArg(idx) 22113 v.AddArg(mem) 22114 return true 22115 } 22116 // match: (MOVSSloadidx1 [c] {sym} ptr (ADDQconst [d] idx) mem) 22117 // cond: is32Bit(c+d) 22118 // result: (MOVSSloadidx1 [c+d] {sym} ptr idx mem) 22119 for { 22120 c := v.AuxInt 22121 sym := v.Aux 22122 _ = v.Args[2] 22123 ptr := v.Args[0] 22124 v_1 := v.Args[1] 22125 if v_1.Op != OpAMD64ADDQconst { 22126 break 22127 } 22128 d := v_1.AuxInt 22129 idx := v_1.Args[0] 22130 mem := v.Args[2] 22131 if !(is32Bit(c + d)) { 22132 break 22133 } 22134 v.reset(OpAMD64MOVSSloadidx1) 22135 v.AuxInt = c + d 22136 v.Aux = sym 22137 v.AddArg(ptr) 22138 v.AddArg(idx) 22139 v.AddArg(mem) 22140 return true 22141 } 22142 // match: (MOVSSloadidx1 [i] {s} p (MOVQconst [c]) mem) 22143 // cond: is32Bit(i+c) 22144 // result: (MOVSSload [i+c] {s} p mem) 22145 for { 22146 i := v.AuxInt 22147 s := v.Aux 22148 _ = v.Args[2] 22149 p := v.Args[0] 22150 v_1 := v.Args[1] 22151 if v_1.Op != OpAMD64MOVQconst { 22152 break 22153 } 22154 c := v_1.AuxInt 22155 mem := v.Args[2] 22156 if !(is32Bit(i + c)) { 22157 break 22158 } 22159 v.reset(OpAMD64MOVSSload) 22160 v.AuxInt = i + c 22161 v.Aux = s 22162 v.AddArg(p) 22163 v.AddArg(mem) 22164 return true 22165 } 22166 return false 22167 } 22168 func rewriteValueAMD64_OpAMD64MOVSSloadidx4_0(v *Value) bool { 22169 // match: (MOVSSloadidx4 [c] {sym} (ADDQconst [d] ptr) idx mem) 22170 // cond: is32Bit(c+d) 22171 // result: (MOVSSloadidx4 [c+d] {sym} ptr idx mem) 22172 for { 22173 c := v.AuxInt 22174 sym := v.Aux 22175 _ = v.Args[2] 22176 v_0 := v.Args[0] 22177 if v_0.Op != OpAMD64ADDQconst { 22178 break 22179 } 22180 d := v_0.AuxInt 22181 ptr := v_0.Args[0] 22182 idx := v.Args[1] 22183 mem := v.Args[2] 22184 if !(is32Bit(c + d)) { 22185 break 22186 } 22187 v.reset(OpAMD64MOVSSloadidx4) 22188 v.AuxInt = c + d 22189 v.Aux = sym 22190 v.AddArg(ptr) 22191 v.AddArg(idx) 22192 v.AddArg(mem) 22193 return true 22194 } 22195 // match: (MOVSSloadidx4 [c] {sym} ptr (ADDQconst [d] idx) mem) 22196 // cond: is32Bit(c+4*d) 22197 // result: (MOVSSloadidx4 [c+4*d] {sym} ptr idx mem) 22198 for { 22199 c := v.AuxInt 22200 sym := v.Aux 22201 _ = v.Args[2] 22202 ptr := v.Args[0] 22203 v_1 := v.Args[1] 22204 if v_1.Op != OpAMD64ADDQconst { 22205 break 22206 } 22207 d := v_1.AuxInt 22208 idx := v_1.Args[0] 22209 mem := v.Args[2] 22210 if !(is32Bit(c + 4*d)) { 22211 break 22212 } 22213 v.reset(OpAMD64MOVSSloadidx4) 22214 v.AuxInt = c + 4*d 22215 v.Aux = sym 22216 v.AddArg(ptr) 22217 v.AddArg(idx) 22218 v.AddArg(mem) 22219 return true 22220 } 22221 // match: (MOVSSloadidx4 [i] {s} p (MOVQconst [c]) mem) 22222 // cond: is32Bit(i+4*c) 22223 // result: (MOVSSload [i+4*c] {s} p mem) 22224 for { 22225 i := v.AuxInt 22226 s := v.Aux 22227 _ = v.Args[2] 22228 p := v.Args[0] 22229 v_1 := v.Args[1] 22230 if v_1.Op != OpAMD64MOVQconst { 22231 break 22232 } 22233 c := v_1.AuxInt 22234 mem := v.Args[2] 22235 if !(is32Bit(i + 4*c)) { 22236 break 22237 } 22238 v.reset(OpAMD64MOVSSload) 22239 v.AuxInt = i + 4*c 22240 v.Aux = s 22241 v.AddArg(p) 22242 v.AddArg(mem) 22243 return true 22244 } 22245 return false 22246 } 22247 func rewriteValueAMD64_OpAMD64MOVSSstore_0(v *Value) bool { 22248 // match: (MOVSSstore [off1] {sym} (ADDQconst [off2] ptr) val mem) 22249 // cond: is32Bit(off1+off2) 22250 // result: (MOVSSstore [off1+off2] {sym} ptr val mem) 22251 for { 22252 off1 := v.AuxInt 22253 sym := v.Aux 22254 _ = v.Args[2] 22255 v_0 := v.Args[0] 22256 if v_0.Op != OpAMD64ADDQconst { 22257 break 22258 } 22259 off2 := v_0.AuxInt 22260 ptr := v_0.Args[0] 22261 val := v.Args[1] 22262 mem := v.Args[2] 22263 if !(is32Bit(off1 + off2)) { 22264 break 22265 } 22266 v.reset(OpAMD64MOVSSstore) 22267 v.AuxInt = off1 + off2 22268 v.Aux = sym 22269 v.AddArg(ptr) 22270 v.AddArg(val) 22271 v.AddArg(mem) 22272 return true 22273 } 22274 // match: (MOVSSstore [off1] {sym1} (LEAQ [off2] {sym2} base) val mem) 22275 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 22276 // result: (MOVSSstore [off1+off2] {mergeSym(sym1,sym2)} base val mem) 22277 for { 22278 off1 := v.AuxInt 22279 sym1 := v.Aux 22280 _ = v.Args[2] 22281 v_0 := v.Args[0] 22282 if v_0.Op != OpAMD64LEAQ { 22283 break 22284 } 22285 off2 := v_0.AuxInt 22286 sym2 := v_0.Aux 22287 base := v_0.Args[0] 22288 val := v.Args[1] 22289 mem := v.Args[2] 22290 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 22291 break 22292 } 22293 v.reset(OpAMD64MOVSSstore) 22294 v.AuxInt = off1 + off2 22295 v.Aux = mergeSym(sym1, sym2) 22296 v.AddArg(base) 22297 v.AddArg(val) 22298 v.AddArg(mem) 22299 return true 22300 } 22301 // match: (MOVSSstore [off1] {sym1} (LEAQ1 [off2] {sym2} ptr idx) val mem) 22302 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 22303 // result: (MOVSSstoreidx1 [off1+off2] {mergeSym(sym1,sym2)} ptr idx val mem) 22304 for { 22305 off1 := v.AuxInt 22306 sym1 := v.Aux 22307 _ = v.Args[2] 22308 v_0 := v.Args[0] 22309 if v_0.Op != OpAMD64LEAQ1 { 22310 break 22311 } 22312 off2 := v_0.AuxInt 22313 sym2 := v_0.Aux 22314 _ = v_0.Args[1] 22315 ptr := v_0.Args[0] 22316 idx := v_0.Args[1] 22317 val := v.Args[1] 22318 mem := v.Args[2] 22319 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 22320 break 22321 } 22322 v.reset(OpAMD64MOVSSstoreidx1) 22323 v.AuxInt = off1 + off2 22324 v.Aux = mergeSym(sym1, sym2) 22325 v.AddArg(ptr) 22326 v.AddArg(idx) 22327 v.AddArg(val) 22328 v.AddArg(mem) 22329 return true 22330 } 22331 // match: (MOVSSstore [off1] {sym1} (LEAQ4 [off2] {sym2} ptr idx) val mem) 22332 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 22333 // result: (MOVSSstoreidx4 [off1+off2] {mergeSym(sym1,sym2)} ptr idx val mem) 22334 for { 22335 off1 := v.AuxInt 22336 sym1 := v.Aux 22337 _ = v.Args[2] 22338 v_0 := v.Args[0] 22339 if v_0.Op != OpAMD64LEAQ4 { 22340 break 22341 } 22342 off2 := v_0.AuxInt 22343 sym2 := v_0.Aux 22344 _ = v_0.Args[1] 22345 ptr := v_0.Args[0] 22346 idx := v_0.Args[1] 22347 val := v.Args[1] 22348 mem := v.Args[2] 22349 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 22350 break 22351 } 22352 v.reset(OpAMD64MOVSSstoreidx4) 22353 v.AuxInt = off1 + off2 22354 v.Aux = mergeSym(sym1, sym2) 22355 v.AddArg(ptr) 22356 v.AddArg(idx) 22357 v.AddArg(val) 22358 v.AddArg(mem) 22359 return true 22360 } 22361 // match: (MOVSSstore [off] {sym} (ADDQ ptr idx) val mem) 22362 // cond: ptr.Op != OpSB 22363 // result: (MOVSSstoreidx1 [off] {sym} ptr idx val mem) 22364 for { 22365 off := v.AuxInt 22366 sym := v.Aux 22367 _ = v.Args[2] 22368 v_0 := v.Args[0] 22369 if v_0.Op != OpAMD64ADDQ { 22370 break 22371 } 22372 _ = v_0.Args[1] 22373 ptr := v_0.Args[0] 22374 idx := v_0.Args[1] 22375 val := v.Args[1] 22376 mem := v.Args[2] 22377 if !(ptr.Op != OpSB) { 22378 break 22379 } 22380 v.reset(OpAMD64MOVSSstoreidx1) 22381 v.AuxInt = off 22382 v.Aux = sym 22383 v.AddArg(ptr) 22384 v.AddArg(idx) 22385 v.AddArg(val) 22386 v.AddArg(mem) 22387 return true 22388 } 22389 // match: (MOVSSstore [off] {sym} ptr (MOVLi2f val) mem) 22390 // cond: 22391 // result: (MOVLstore [off] {sym} ptr val mem) 22392 for { 22393 off := v.AuxInt 22394 sym := v.Aux 22395 _ = v.Args[2] 22396 ptr := v.Args[0] 22397 v_1 := v.Args[1] 22398 if v_1.Op != OpAMD64MOVLi2f { 22399 break 22400 } 22401 val := v_1.Args[0] 22402 mem := v.Args[2] 22403 v.reset(OpAMD64MOVLstore) 22404 v.AuxInt = off 22405 v.Aux = sym 22406 v.AddArg(ptr) 22407 v.AddArg(val) 22408 v.AddArg(mem) 22409 return true 22410 } 22411 return false 22412 } 22413 func rewriteValueAMD64_OpAMD64MOVSSstoreidx1_0(v *Value) bool { 22414 // match: (MOVSSstoreidx1 [c] {sym} ptr (SHLQconst [2] idx) val mem) 22415 // cond: 22416 // result: (MOVSSstoreidx4 [c] {sym} ptr idx val mem) 22417 for { 22418 c := v.AuxInt 22419 sym := v.Aux 22420 _ = v.Args[3] 22421 ptr := v.Args[0] 22422 v_1 := v.Args[1] 22423 if v_1.Op != OpAMD64SHLQconst { 22424 break 22425 } 22426 if v_1.AuxInt != 2 { 22427 break 22428 } 22429 idx := v_1.Args[0] 22430 val := v.Args[2] 22431 mem := v.Args[3] 22432 v.reset(OpAMD64MOVSSstoreidx4) 22433 v.AuxInt = c 22434 v.Aux = sym 22435 v.AddArg(ptr) 22436 v.AddArg(idx) 22437 v.AddArg(val) 22438 v.AddArg(mem) 22439 return true 22440 } 22441 // match: (MOVSSstoreidx1 [c] {sym} (ADDQconst [d] ptr) idx val mem) 22442 // cond: is32Bit(c+d) 22443 // result: (MOVSSstoreidx1 [c+d] {sym} ptr idx val mem) 22444 for { 22445 c := v.AuxInt 22446 sym := v.Aux 22447 _ = v.Args[3] 22448 v_0 := v.Args[0] 22449 if v_0.Op != OpAMD64ADDQconst { 22450 break 22451 } 22452 d := v_0.AuxInt 22453 ptr := v_0.Args[0] 22454 idx := v.Args[1] 22455 val := v.Args[2] 22456 mem := v.Args[3] 22457 if !(is32Bit(c + d)) { 22458 break 22459 } 22460 v.reset(OpAMD64MOVSSstoreidx1) 22461 v.AuxInt = c + d 22462 v.Aux = sym 22463 v.AddArg(ptr) 22464 v.AddArg(idx) 22465 v.AddArg(val) 22466 v.AddArg(mem) 22467 return true 22468 } 22469 // match: (MOVSSstoreidx1 [c] {sym} ptr (ADDQconst [d] idx) val mem) 22470 // cond: is32Bit(c+d) 22471 // result: (MOVSSstoreidx1 [c+d] {sym} ptr idx val mem) 22472 for { 22473 c := v.AuxInt 22474 sym := v.Aux 22475 _ = v.Args[3] 22476 ptr := v.Args[0] 22477 v_1 := v.Args[1] 22478 if v_1.Op != OpAMD64ADDQconst { 22479 break 22480 } 22481 d := v_1.AuxInt 22482 idx := v_1.Args[0] 22483 val := v.Args[2] 22484 mem := v.Args[3] 22485 if !(is32Bit(c + d)) { 22486 break 22487 } 22488 v.reset(OpAMD64MOVSSstoreidx1) 22489 v.AuxInt = c + d 22490 v.Aux = sym 22491 v.AddArg(ptr) 22492 v.AddArg(idx) 22493 v.AddArg(val) 22494 v.AddArg(mem) 22495 return true 22496 } 22497 // match: (MOVSSstoreidx1 [i] {s} p (MOVQconst [c]) w mem) 22498 // cond: is32Bit(i+c) 22499 // result: (MOVSSstore [i+c] {s} p w mem) 22500 for { 22501 i := v.AuxInt 22502 s := v.Aux 22503 _ = v.Args[3] 22504 p := v.Args[0] 22505 v_1 := v.Args[1] 22506 if v_1.Op != OpAMD64MOVQconst { 22507 break 22508 } 22509 c := v_1.AuxInt 22510 w := v.Args[2] 22511 mem := v.Args[3] 22512 if !(is32Bit(i + c)) { 22513 break 22514 } 22515 v.reset(OpAMD64MOVSSstore) 22516 v.AuxInt = i + c 22517 v.Aux = s 22518 v.AddArg(p) 22519 v.AddArg(w) 22520 v.AddArg(mem) 22521 return true 22522 } 22523 return false 22524 } 22525 func rewriteValueAMD64_OpAMD64MOVSSstoreidx4_0(v *Value) bool { 22526 // match: (MOVSSstoreidx4 [c] {sym} (ADDQconst [d] ptr) idx val mem) 22527 // cond: is32Bit(c+d) 22528 // result: (MOVSSstoreidx4 [c+d] {sym} ptr idx val mem) 22529 for { 22530 c := v.AuxInt 22531 sym := v.Aux 22532 _ = v.Args[3] 22533 v_0 := v.Args[0] 22534 if v_0.Op != OpAMD64ADDQconst { 22535 break 22536 } 22537 d := v_0.AuxInt 22538 ptr := v_0.Args[0] 22539 idx := v.Args[1] 22540 val := v.Args[2] 22541 mem := v.Args[3] 22542 if !(is32Bit(c + d)) { 22543 break 22544 } 22545 v.reset(OpAMD64MOVSSstoreidx4) 22546 v.AuxInt = c + d 22547 v.Aux = sym 22548 v.AddArg(ptr) 22549 v.AddArg(idx) 22550 v.AddArg(val) 22551 v.AddArg(mem) 22552 return true 22553 } 22554 // match: (MOVSSstoreidx4 [c] {sym} ptr (ADDQconst [d] idx) val mem) 22555 // cond: is32Bit(c+4*d) 22556 // result: (MOVSSstoreidx4 [c+4*d] {sym} ptr idx val mem) 22557 for { 22558 c := v.AuxInt 22559 sym := v.Aux 22560 _ = v.Args[3] 22561 ptr := v.Args[0] 22562 v_1 := v.Args[1] 22563 if v_1.Op != OpAMD64ADDQconst { 22564 break 22565 } 22566 d := v_1.AuxInt 22567 idx := v_1.Args[0] 22568 val := v.Args[2] 22569 mem := v.Args[3] 22570 if !(is32Bit(c + 4*d)) { 22571 break 22572 } 22573 v.reset(OpAMD64MOVSSstoreidx4) 22574 v.AuxInt = c + 4*d 22575 v.Aux = sym 22576 v.AddArg(ptr) 22577 v.AddArg(idx) 22578 v.AddArg(val) 22579 v.AddArg(mem) 22580 return true 22581 } 22582 // match: (MOVSSstoreidx4 [i] {s} p (MOVQconst [c]) w mem) 22583 // cond: is32Bit(i+4*c) 22584 // result: (MOVSSstore [i+4*c] {s} p w mem) 22585 for { 22586 i := v.AuxInt 22587 s := v.Aux 22588 _ = v.Args[3] 22589 p := v.Args[0] 22590 v_1 := v.Args[1] 22591 if v_1.Op != OpAMD64MOVQconst { 22592 break 22593 } 22594 c := v_1.AuxInt 22595 w := v.Args[2] 22596 mem := v.Args[3] 22597 if !(is32Bit(i + 4*c)) { 22598 break 22599 } 22600 v.reset(OpAMD64MOVSSstore) 22601 v.AuxInt = i + 4*c 22602 v.Aux = s 22603 v.AddArg(p) 22604 v.AddArg(w) 22605 v.AddArg(mem) 22606 return true 22607 } 22608 return false 22609 } 22610 func rewriteValueAMD64_OpAMD64MOVWQSX_0(v *Value) bool { 22611 b := v.Block 22612 _ = b 22613 // match: (MOVWQSX x:(MOVWload [off] {sym} ptr mem)) 22614 // cond: x.Uses == 1 && clobber(x) 22615 // result: @x.Block (MOVWQSXload <v.Type> [off] {sym} ptr mem) 22616 for { 22617 x := v.Args[0] 22618 if x.Op != OpAMD64MOVWload { 22619 break 22620 } 22621 off := x.AuxInt 22622 sym := x.Aux 22623 _ = x.Args[1] 22624 ptr := x.Args[0] 22625 mem := x.Args[1] 22626 if !(x.Uses == 1 && clobber(x)) { 22627 break 22628 } 22629 b = x.Block 22630 v0 := b.NewValue0(x.Pos, OpAMD64MOVWQSXload, v.Type) 22631 v.reset(OpCopy) 22632 v.AddArg(v0) 22633 v0.AuxInt = off 22634 v0.Aux = sym 22635 v0.AddArg(ptr) 22636 v0.AddArg(mem) 22637 return true 22638 } 22639 // match: (MOVWQSX x:(MOVLload [off] {sym} ptr mem)) 22640 // cond: x.Uses == 1 && clobber(x) 22641 // result: @x.Block (MOVWQSXload <v.Type> [off] {sym} ptr mem) 22642 for { 22643 x := v.Args[0] 22644 if x.Op != OpAMD64MOVLload { 22645 break 22646 } 22647 off := x.AuxInt 22648 sym := x.Aux 22649 _ = x.Args[1] 22650 ptr := x.Args[0] 22651 mem := x.Args[1] 22652 if !(x.Uses == 1 && clobber(x)) { 22653 break 22654 } 22655 b = x.Block 22656 v0 := b.NewValue0(x.Pos, OpAMD64MOVWQSXload, v.Type) 22657 v.reset(OpCopy) 22658 v.AddArg(v0) 22659 v0.AuxInt = off 22660 v0.Aux = sym 22661 v0.AddArg(ptr) 22662 v0.AddArg(mem) 22663 return true 22664 } 22665 // match: (MOVWQSX x:(MOVQload [off] {sym} ptr mem)) 22666 // cond: x.Uses == 1 && clobber(x) 22667 // result: @x.Block (MOVWQSXload <v.Type> [off] {sym} ptr mem) 22668 for { 22669 x := v.Args[0] 22670 if x.Op != OpAMD64MOVQload { 22671 break 22672 } 22673 off := x.AuxInt 22674 sym := x.Aux 22675 _ = x.Args[1] 22676 ptr := x.Args[0] 22677 mem := x.Args[1] 22678 if !(x.Uses == 1 && clobber(x)) { 22679 break 22680 } 22681 b = x.Block 22682 v0 := b.NewValue0(x.Pos, OpAMD64MOVWQSXload, v.Type) 22683 v.reset(OpCopy) 22684 v.AddArg(v0) 22685 v0.AuxInt = off 22686 v0.Aux = sym 22687 v0.AddArg(ptr) 22688 v0.AddArg(mem) 22689 return true 22690 } 22691 // match: (MOVWQSX (ANDLconst [c] x)) 22692 // cond: c & 0x8000 == 0 22693 // result: (ANDLconst [c & 0x7fff] x) 22694 for { 22695 v_0 := v.Args[0] 22696 if v_0.Op != OpAMD64ANDLconst { 22697 break 22698 } 22699 c := v_0.AuxInt 22700 x := v_0.Args[0] 22701 if !(c&0x8000 == 0) { 22702 break 22703 } 22704 v.reset(OpAMD64ANDLconst) 22705 v.AuxInt = c & 0x7fff 22706 v.AddArg(x) 22707 return true 22708 } 22709 // match: (MOVWQSX (MOVWQSX x)) 22710 // cond: 22711 // result: (MOVWQSX x) 22712 for { 22713 v_0 := v.Args[0] 22714 if v_0.Op != OpAMD64MOVWQSX { 22715 break 22716 } 22717 x := v_0.Args[0] 22718 v.reset(OpAMD64MOVWQSX) 22719 v.AddArg(x) 22720 return true 22721 } 22722 // match: (MOVWQSX (MOVBQSX x)) 22723 // cond: 22724 // result: (MOVBQSX x) 22725 for { 22726 v_0 := v.Args[0] 22727 if v_0.Op != OpAMD64MOVBQSX { 22728 break 22729 } 22730 x := v_0.Args[0] 22731 v.reset(OpAMD64MOVBQSX) 22732 v.AddArg(x) 22733 return true 22734 } 22735 return false 22736 } 22737 func rewriteValueAMD64_OpAMD64MOVWQSXload_0(v *Value) bool { 22738 // match: (MOVWQSXload [off] {sym} ptr (MOVWstore [off2] {sym2} ptr2 x _)) 22739 // cond: sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) 22740 // result: (MOVWQSX x) 22741 for { 22742 off := v.AuxInt 22743 sym := v.Aux 22744 _ = v.Args[1] 22745 ptr := v.Args[0] 22746 v_1 := v.Args[1] 22747 if v_1.Op != OpAMD64MOVWstore { 22748 break 22749 } 22750 off2 := v_1.AuxInt 22751 sym2 := v_1.Aux 22752 _ = v_1.Args[2] 22753 ptr2 := v_1.Args[0] 22754 x := v_1.Args[1] 22755 if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) { 22756 break 22757 } 22758 v.reset(OpAMD64MOVWQSX) 22759 v.AddArg(x) 22760 return true 22761 } 22762 // match: (MOVWQSXload [off1] {sym1} (LEAQ [off2] {sym2} base) mem) 22763 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 22764 // result: (MOVWQSXload [off1+off2] {mergeSym(sym1,sym2)} base mem) 22765 for { 22766 off1 := v.AuxInt 22767 sym1 := v.Aux 22768 _ = v.Args[1] 22769 v_0 := v.Args[0] 22770 if v_0.Op != OpAMD64LEAQ { 22771 break 22772 } 22773 off2 := v_0.AuxInt 22774 sym2 := v_0.Aux 22775 base := v_0.Args[0] 22776 mem := v.Args[1] 22777 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 22778 break 22779 } 22780 v.reset(OpAMD64MOVWQSXload) 22781 v.AuxInt = off1 + off2 22782 v.Aux = mergeSym(sym1, sym2) 22783 v.AddArg(base) 22784 v.AddArg(mem) 22785 return true 22786 } 22787 return false 22788 } 22789 func rewriteValueAMD64_OpAMD64MOVWQZX_0(v *Value) bool { 22790 b := v.Block 22791 _ = b 22792 // match: (MOVWQZX x:(MOVWload [off] {sym} ptr mem)) 22793 // cond: x.Uses == 1 && clobber(x) 22794 // result: @x.Block (MOVWload <v.Type> [off] {sym} ptr mem) 22795 for { 22796 x := v.Args[0] 22797 if x.Op != OpAMD64MOVWload { 22798 break 22799 } 22800 off := x.AuxInt 22801 sym := x.Aux 22802 _ = x.Args[1] 22803 ptr := x.Args[0] 22804 mem := x.Args[1] 22805 if !(x.Uses == 1 && clobber(x)) { 22806 break 22807 } 22808 b = x.Block 22809 v0 := b.NewValue0(x.Pos, OpAMD64MOVWload, v.Type) 22810 v.reset(OpCopy) 22811 v.AddArg(v0) 22812 v0.AuxInt = off 22813 v0.Aux = sym 22814 v0.AddArg(ptr) 22815 v0.AddArg(mem) 22816 return true 22817 } 22818 // match: (MOVWQZX x:(MOVLload [off] {sym} ptr mem)) 22819 // cond: x.Uses == 1 && clobber(x) 22820 // result: @x.Block (MOVWload <v.Type> [off] {sym} ptr mem) 22821 for { 22822 x := v.Args[0] 22823 if x.Op != OpAMD64MOVLload { 22824 break 22825 } 22826 off := x.AuxInt 22827 sym := x.Aux 22828 _ = x.Args[1] 22829 ptr := x.Args[0] 22830 mem := x.Args[1] 22831 if !(x.Uses == 1 && clobber(x)) { 22832 break 22833 } 22834 b = x.Block 22835 v0 := b.NewValue0(x.Pos, OpAMD64MOVWload, v.Type) 22836 v.reset(OpCopy) 22837 v.AddArg(v0) 22838 v0.AuxInt = off 22839 v0.Aux = sym 22840 v0.AddArg(ptr) 22841 v0.AddArg(mem) 22842 return true 22843 } 22844 // match: (MOVWQZX x:(MOVQload [off] {sym} ptr mem)) 22845 // cond: x.Uses == 1 && clobber(x) 22846 // result: @x.Block (MOVWload <v.Type> [off] {sym} ptr mem) 22847 for { 22848 x := v.Args[0] 22849 if x.Op != OpAMD64MOVQload { 22850 break 22851 } 22852 off := x.AuxInt 22853 sym := x.Aux 22854 _ = x.Args[1] 22855 ptr := x.Args[0] 22856 mem := x.Args[1] 22857 if !(x.Uses == 1 && clobber(x)) { 22858 break 22859 } 22860 b = x.Block 22861 v0 := b.NewValue0(x.Pos, OpAMD64MOVWload, v.Type) 22862 v.reset(OpCopy) 22863 v.AddArg(v0) 22864 v0.AuxInt = off 22865 v0.Aux = sym 22866 v0.AddArg(ptr) 22867 v0.AddArg(mem) 22868 return true 22869 } 22870 // match: (MOVWQZX x) 22871 // cond: zeroUpper48Bits(x,3) 22872 // result: x 22873 for { 22874 x := v.Args[0] 22875 if !(zeroUpper48Bits(x, 3)) { 22876 break 22877 } 22878 v.reset(OpCopy) 22879 v.Type = x.Type 22880 v.AddArg(x) 22881 return true 22882 } 22883 // match: (MOVWQZX x:(MOVWloadidx1 [off] {sym} ptr idx mem)) 22884 // cond: x.Uses == 1 && clobber(x) 22885 // result: @x.Block (MOVWloadidx1 <v.Type> [off] {sym} ptr idx mem) 22886 for { 22887 x := v.Args[0] 22888 if x.Op != OpAMD64MOVWloadidx1 { 22889 break 22890 } 22891 off := x.AuxInt 22892 sym := x.Aux 22893 _ = x.Args[2] 22894 ptr := x.Args[0] 22895 idx := x.Args[1] 22896 mem := x.Args[2] 22897 if !(x.Uses == 1 && clobber(x)) { 22898 break 22899 } 22900 b = x.Block 22901 v0 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, v.Type) 22902 v.reset(OpCopy) 22903 v.AddArg(v0) 22904 v0.AuxInt = off 22905 v0.Aux = sym 22906 v0.AddArg(ptr) 22907 v0.AddArg(idx) 22908 v0.AddArg(mem) 22909 return true 22910 } 22911 // match: (MOVWQZX x:(MOVWloadidx2 [off] {sym} ptr idx mem)) 22912 // cond: x.Uses == 1 && clobber(x) 22913 // result: @x.Block (MOVWloadidx2 <v.Type> [off] {sym} ptr idx mem) 22914 for { 22915 x := v.Args[0] 22916 if x.Op != OpAMD64MOVWloadidx2 { 22917 break 22918 } 22919 off := x.AuxInt 22920 sym := x.Aux 22921 _ = x.Args[2] 22922 ptr := x.Args[0] 22923 idx := x.Args[1] 22924 mem := x.Args[2] 22925 if !(x.Uses == 1 && clobber(x)) { 22926 break 22927 } 22928 b = x.Block 22929 v0 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx2, v.Type) 22930 v.reset(OpCopy) 22931 v.AddArg(v0) 22932 v0.AuxInt = off 22933 v0.Aux = sym 22934 v0.AddArg(ptr) 22935 v0.AddArg(idx) 22936 v0.AddArg(mem) 22937 return true 22938 } 22939 // match: (MOVWQZX (ANDLconst [c] x)) 22940 // cond: 22941 // result: (ANDLconst [c & 0xffff] x) 22942 for { 22943 v_0 := v.Args[0] 22944 if v_0.Op != OpAMD64ANDLconst { 22945 break 22946 } 22947 c := v_0.AuxInt 22948 x := v_0.Args[0] 22949 v.reset(OpAMD64ANDLconst) 22950 v.AuxInt = c & 0xffff 22951 v.AddArg(x) 22952 return true 22953 } 22954 // match: (MOVWQZX (MOVWQZX x)) 22955 // cond: 22956 // result: (MOVWQZX x) 22957 for { 22958 v_0 := v.Args[0] 22959 if v_0.Op != OpAMD64MOVWQZX { 22960 break 22961 } 22962 x := v_0.Args[0] 22963 v.reset(OpAMD64MOVWQZX) 22964 v.AddArg(x) 22965 return true 22966 } 22967 // match: (MOVWQZX (MOVBQZX x)) 22968 // cond: 22969 // result: (MOVBQZX x) 22970 for { 22971 v_0 := v.Args[0] 22972 if v_0.Op != OpAMD64MOVBQZX { 22973 break 22974 } 22975 x := v_0.Args[0] 22976 v.reset(OpAMD64MOVBQZX) 22977 v.AddArg(x) 22978 return true 22979 } 22980 return false 22981 } 22982 func rewriteValueAMD64_OpAMD64MOVWload_0(v *Value) bool { 22983 b := v.Block 22984 _ = b 22985 config := b.Func.Config 22986 _ = config 22987 // match: (MOVWload [off] {sym} ptr (MOVWstore [off2] {sym2} ptr2 x _)) 22988 // cond: sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) 22989 // result: (MOVWQZX x) 22990 for { 22991 off := v.AuxInt 22992 sym := v.Aux 22993 _ = v.Args[1] 22994 ptr := v.Args[0] 22995 v_1 := v.Args[1] 22996 if v_1.Op != OpAMD64MOVWstore { 22997 break 22998 } 22999 off2 := v_1.AuxInt 23000 sym2 := v_1.Aux 23001 _ = v_1.Args[2] 23002 ptr2 := v_1.Args[0] 23003 x := v_1.Args[1] 23004 if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) { 23005 break 23006 } 23007 v.reset(OpAMD64MOVWQZX) 23008 v.AddArg(x) 23009 return true 23010 } 23011 // match: (MOVWload [off1] {sym} (ADDQconst [off2] ptr) mem) 23012 // cond: is32Bit(off1+off2) 23013 // result: (MOVWload [off1+off2] {sym} ptr mem) 23014 for { 23015 off1 := v.AuxInt 23016 sym := v.Aux 23017 _ = v.Args[1] 23018 v_0 := v.Args[0] 23019 if v_0.Op != OpAMD64ADDQconst { 23020 break 23021 } 23022 off2 := v_0.AuxInt 23023 ptr := v_0.Args[0] 23024 mem := v.Args[1] 23025 if !(is32Bit(off1 + off2)) { 23026 break 23027 } 23028 v.reset(OpAMD64MOVWload) 23029 v.AuxInt = off1 + off2 23030 v.Aux = sym 23031 v.AddArg(ptr) 23032 v.AddArg(mem) 23033 return true 23034 } 23035 // match: (MOVWload [off1] {sym1} (LEAQ [off2] {sym2} base) mem) 23036 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 23037 // result: (MOVWload [off1+off2] {mergeSym(sym1,sym2)} base mem) 23038 for { 23039 off1 := v.AuxInt 23040 sym1 := v.Aux 23041 _ = v.Args[1] 23042 v_0 := v.Args[0] 23043 if v_0.Op != OpAMD64LEAQ { 23044 break 23045 } 23046 off2 := v_0.AuxInt 23047 sym2 := v_0.Aux 23048 base := v_0.Args[0] 23049 mem := v.Args[1] 23050 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 23051 break 23052 } 23053 v.reset(OpAMD64MOVWload) 23054 v.AuxInt = off1 + off2 23055 v.Aux = mergeSym(sym1, sym2) 23056 v.AddArg(base) 23057 v.AddArg(mem) 23058 return true 23059 } 23060 // match: (MOVWload [off1] {sym1} (LEAQ1 [off2] {sym2} ptr idx) mem) 23061 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 23062 // result: (MOVWloadidx1 [off1+off2] {mergeSym(sym1,sym2)} ptr idx mem) 23063 for { 23064 off1 := v.AuxInt 23065 sym1 := v.Aux 23066 _ = v.Args[1] 23067 v_0 := v.Args[0] 23068 if v_0.Op != OpAMD64LEAQ1 { 23069 break 23070 } 23071 off2 := v_0.AuxInt 23072 sym2 := v_0.Aux 23073 _ = v_0.Args[1] 23074 ptr := v_0.Args[0] 23075 idx := v_0.Args[1] 23076 mem := v.Args[1] 23077 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 23078 break 23079 } 23080 v.reset(OpAMD64MOVWloadidx1) 23081 v.AuxInt = off1 + off2 23082 v.Aux = mergeSym(sym1, sym2) 23083 v.AddArg(ptr) 23084 v.AddArg(idx) 23085 v.AddArg(mem) 23086 return true 23087 } 23088 // match: (MOVWload [off1] {sym1} (LEAQ2 [off2] {sym2} ptr idx) mem) 23089 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 23090 // result: (MOVWloadidx2 [off1+off2] {mergeSym(sym1,sym2)} ptr idx mem) 23091 for { 23092 off1 := v.AuxInt 23093 sym1 := v.Aux 23094 _ = v.Args[1] 23095 v_0 := v.Args[0] 23096 if v_0.Op != OpAMD64LEAQ2 { 23097 break 23098 } 23099 off2 := v_0.AuxInt 23100 sym2 := v_0.Aux 23101 _ = v_0.Args[1] 23102 ptr := v_0.Args[0] 23103 idx := v_0.Args[1] 23104 mem := v.Args[1] 23105 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 23106 break 23107 } 23108 v.reset(OpAMD64MOVWloadidx2) 23109 v.AuxInt = off1 + off2 23110 v.Aux = mergeSym(sym1, sym2) 23111 v.AddArg(ptr) 23112 v.AddArg(idx) 23113 v.AddArg(mem) 23114 return true 23115 } 23116 // match: (MOVWload [off] {sym} (ADDQ ptr idx) mem) 23117 // cond: ptr.Op != OpSB 23118 // result: (MOVWloadidx1 [off] {sym} ptr idx mem) 23119 for { 23120 off := v.AuxInt 23121 sym := v.Aux 23122 _ = v.Args[1] 23123 v_0 := v.Args[0] 23124 if v_0.Op != OpAMD64ADDQ { 23125 break 23126 } 23127 _ = v_0.Args[1] 23128 ptr := v_0.Args[0] 23129 idx := v_0.Args[1] 23130 mem := v.Args[1] 23131 if !(ptr.Op != OpSB) { 23132 break 23133 } 23134 v.reset(OpAMD64MOVWloadidx1) 23135 v.AuxInt = off 23136 v.Aux = sym 23137 v.AddArg(ptr) 23138 v.AddArg(idx) 23139 v.AddArg(mem) 23140 return true 23141 } 23142 // match: (MOVWload [off1] {sym1} (LEAL [off2] {sym2} base) mem) 23143 // cond: canMergeSym(sym1, sym2) && is32Bit(off1+off2) 23144 // result: (MOVWload [off1+off2] {mergeSym(sym1,sym2)} base mem) 23145 for { 23146 off1 := v.AuxInt 23147 sym1 := v.Aux 23148 _ = v.Args[1] 23149 v_0 := v.Args[0] 23150 if v_0.Op != OpAMD64LEAL { 23151 break 23152 } 23153 off2 := v_0.AuxInt 23154 sym2 := v_0.Aux 23155 base := v_0.Args[0] 23156 mem := v.Args[1] 23157 if !(canMergeSym(sym1, sym2) && is32Bit(off1+off2)) { 23158 break 23159 } 23160 v.reset(OpAMD64MOVWload) 23161 v.AuxInt = off1 + off2 23162 v.Aux = mergeSym(sym1, sym2) 23163 v.AddArg(base) 23164 v.AddArg(mem) 23165 return true 23166 } 23167 // match: (MOVWload [off1] {sym} (ADDLconst [off2] ptr) mem) 23168 // cond: is32Bit(off1+off2) 23169 // result: (MOVWload [off1+off2] {sym} ptr mem) 23170 for { 23171 off1 := v.AuxInt 23172 sym := v.Aux 23173 _ = v.Args[1] 23174 v_0 := v.Args[0] 23175 if v_0.Op != OpAMD64ADDLconst { 23176 break 23177 } 23178 off2 := v_0.AuxInt 23179 ptr := v_0.Args[0] 23180 mem := v.Args[1] 23181 if !(is32Bit(off1 + off2)) { 23182 break 23183 } 23184 v.reset(OpAMD64MOVWload) 23185 v.AuxInt = off1 + off2 23186 v.Aux = sym 23187 v.AddArg(ptr) 23188 v.AddArg(mem) 23189 return true 23190 } 23191 // match: (MOVWload [off] {sym} (SB) _) 23192 // cond: symIsRO(sym) 23193 // result: (MOVLconst [int64(read16(sym, off, config.BigEndian))]) 23194 for { 23195 off := v.AuxInt 23196 sym := v.Aux 23197 _ = v.Args[1] 23198 v_0 := v.Args[0] 23199 if v_0.Op != OpSB { 23200 break 23201 } 23202 if !(symIsRO(sym)) { 23203 break 23204 } 23205 v.reset(OpAMD64MOVLconst) 23206 v.AuxInt = int64(read16(sym, off, config.BigEndian)) 23207 return true 23208 } 23209 return false 23210 } 23211 func rewriteValueAMD64_OpAMD64MOVWloadidx1_0(v *Value) bool { 23212 // match: (MOVWloadidx1 [c] {sym} ptr (SHLQconst [1] idx) mem) 23213 // cond: 23214 // result: (MOVWloadidx2 [c] {sym} ptr idx mem) 23215 for { 23216 c := v.AuxInt 23217 sym := v.Aux 23218 _ = v.Args[2] 23219 ptr := v.Args[0] 23220 v_1 := v.Args[1] 23221 if v_1.Op != OpAMD64SHLQconst { 23222 break 23223 } 23224 if v_1.AuxInt != 1 { 23225 break 23226 } 23227 idx := v_1.Args[0] 23228 mem := v.Args[2] 23229 v.reset(OpAMD64MOVWloadidx2) 23230 v.AuxInt = c 23231 v.Aux = sym 23232 v.AddArg(ptr) 23233 v.AddArg(idx) 23234 v.AddArg(mem) 23235 return true 23236 } 23237 // match: (MOVWloadidx1 [c] {sym} (SHLQconst [1] idx) ptr mem) 23238 // cond: 23239 // result: (MOVWloadidx2 [c] {sym} ptr idx mem) 23240 for { 23241 c := v.AuxInt 23242 sym := v.Aux 23243 _ = v.Args[2] 23244 v_0 := v.Args[0] 23245 if v_0.Op != OpAMD64SHLQconst { 23246 break 23247 } 23248 if v_0.AuxInt != 1 { 23249 break 23250 } 23251 idx := v_0.Args[0] 23252 ptr := v.Args[1] 23253 mem := v.Args[2] 23254 v.reset(OpAMD64MOVWloadidx2) 23255 v.AuxInt = c 23256 v.Aux = sym 23257 v.AddArg(ptr) 23258 v.AddArg(idx) 23259 v.AddArg(mem) 23260 return true 23261 } 23262 // match: (MOVWloadidx1 [c] {sym} (ADDQconst [d] ptr) idx mem) 23263 // cond: is32Bit(c+d) 23264 // result: (MOVWloadidx1 [c+d] {sym} ptr idx mem) 23265 for { 23266 c := v.AuxInt 23267 sym := v.Aux 23268 _ = v.Args[2] 23269 v_0 := v.Args[0] 23270 if v_0.Op != OpAMD64ADDQconst { 23271 break 23272 } 23273 d := v_0.AuxInt 23274 ptr := v_0.Args[0] 23275 idx := v.Args[1] 23276 mem := v.Args[2] 23277 if !(is32Bit(c + d)) { 23278 break 23279 } 23280 v.reset(OpAMD64MOVWloadidx1) 23281 v.AuxInt = c + d 23282 v.Aux = sym 23283 v.AddArg(ptr) 23284 v.AddArg(idx) 23285 v.AddArg(mem) 23286 return true 23287 } 23288 // match: (MOVWloadidx1 [c] {sym} idx (ADDQconst [d] ptr) mem) 23289 // cond: is32Bit(c+d) 23290 // result: (MOVWloadidx1 [c+d] {sym} ptr idx mem) 23291 for { 23292 c := v.AuxInt 23293 sym := v.Aux 23294 _ = v.Args[2] 23295 idx := v.Args[0] 23296 v_1 := v.Args[1] 23297 if v_1.Op != OpAMD64ADDQconst { 23298 break 23299 } 23300 d := v_1.AuxInt 23301 ptr := v_1.Args[0] 23302 mem := v.Args[2] 23303 if !(is32Bit(c + d)) { 23304 break 23305 } 23306 v.reset(OpAMD64MOVWloadidx1) 23307 v.AuxInt = c + d 23308 v.Aux = sym 23309 v.AddArg(ptr) 23310 v.AddArg(idx) 23311 v.AddArg(mem) 23312 return true 23313 } 23314 // match: (MOVWloadidx1 [c] {sym} ptr (ADDQconst [d] idx) mem) 23315 // cond: is32Bit(c+d) 23316 // result: (MOVWloadidx1 [c+d] {sym} ptr idx mem) 23317 for { 23318 c := v.AuxInt 23319 sym := v.Aux 23320 _ = v.Args[2] 23321 ptr := v.Args[0] 23322 v_1 := v.Args[1] 23323 if v_1.Op != OpAMD64ADDQconst { 23324 break 23325 } 23326 d := v_1.AuxInt 23327 idx := v_1.Args[0] 23328 mem := v.Args[2] 23329 if !(is32Bit(c + d)) { 23330 break 23331 } 23332 v.reset(OpAMD64MOVWloadidx1) 23333 v.AuxInt = c + d 23334 v.Aux = sym 23335 v.AddArg(ptr) 23336 v.AddArg(idx) 23337 v.AddArg(mem) 23338 return true 23339 } 23340 // match: (MOVWloadidx1 [c] {sym} (ADDQconst [d] idx) ptr mem) 23341 // cond: is32Bit(c+d) 23342 // result: (MOVWloadidx1 [c+d] {sym} ptr idx mem) 23343 for { 23344 c := v.AuxInt 23345 sym := v.Aux 23346 _ = v.Args[2] 23347 v_0 := v.Args[0] 23348 if v_0.Op != OpAMD64ADDQconst { 23349 break 23350 } 23351 d := v_0.AuxInt 23352 idx := v_0.Args[0] 23353 ptr := v.Args[1] 23354 mem := v.Args[2] 23355 if !(is32Bit(c + d)) { 23356 break 23357 } 23358 v.reset(OpAMD64MOVWloadidx1) 23359 v.AuxInt = c + d 23360 v.Aux = sym 23361 v.AddArg(ptr) 23362 v.AddArg(idx) 23363 v.AddArg(mem) 23364 return true 23365 } 23366 // match: (MOVWloadidx1 [i] {s} p (MOVQconst [c]) mem) 23367 // cond: is32Bit(i+c) 23368 // result: (MOVWload [i+c] {s} p mem) 23369 for { 23370 i := v.AuxInt 23371 s := v.Aux 23372 _ = v.Args[2] 23373 p := v.Args[0] 23374 v_1 := v.Args[1] 23375 if v_1.Op != OpAMD64MOVQconst { 23376 break 23377 } 23378 c := v_1.AuxInt 23379 mem := v.Args[2] 23380 if !(is32Bit(i + c)) { 23381 break 23382 } 23383 v.reset(OpAMD64MOVWload) 23384 v.AuxInt = i + c 23385 v.Aux = s 23386 v.AddArg(p) 23387 v.AddArg(mem) 23388 return true 23389 } 23390 // match: (MOVWloadidx1 [i] {s} (MOVQconst [c]) p mem) 23391 // cond: is32Bit(i+c) 23392 // result: (MOVWload [i+c] {s} p mem) 23393 for { 23394 i := v.AuxInt 23395 s := v.Aux 23396 _ = v.Args[2] 23397 v_0 := v.Args[0] 23398 if v_0.Op != OpAMD64MOVQconst { 23399 break 23400 } 23401 c := v_0.AuxInt 23402 p := v.Args[1] 23403 mem := v.Args[2] 23404 if !(is32Bit(i + c)) { 23405 break 23406 } 23407 v.reset(OpAMD64MOVWload) 23408 v.AuxInt = i + c 23409 v.Aux = s 23410 v.AddArg(p) 23411 v.AddArg(mem) 23412 return true 23413 } 23414 return false 23415 } 23416 func rewriteValueAMD64_OpAMD64MOVWloadidx2_0(v *Value) bool { 23417 // match: (MOVWloadidx2 [c] {sym} (ADDQconst [d] ptr) idx mem) 23418 // cond: is32Bit(c+d) 23419 // result: (MOVWloadidx2 [c+d] {sym} ptr idx mem) 23420 for { 23421 c := v.AuxInt 23422 sym := v.Aux 23423 _ = v.Args[2] 23424 v_0 := v.Args[0] 23425 if v_0.Op != OpAMD64ADDQconst { 23426 break 23427 } 23428 d := v_0.AuxInt 23429 ptr := v_0.Args[0] 23430 idx := v.Args[1] 23431 mem := v.Args[2] 23432 if !(is32Bit(c + d)) { 23433 break 23434 } 23435 v.reset(OpAMD64MOVWloadidx2) 23436 v.AuxInt = c + d 23437 v.Aux = sym 23438 v.AddArg(ptr) 23439 v.AddArg(idx) 23440 v.AddArg(mem) 23441 return true 23442 } 23443 // match: (MOVWloadidx2 [c] {sym} ptr (ADDQconst [d] idx) mem) 23444 // cond: is32Bit(c+2*d) 23445 // result: (MOVWloadidx2 [c+2*d] {sym} ptr idx mem) 23446 for { 23447 c := v.AuxInt 23448 sym := v.Aux 23449 _ = v.Args[2] 23450 ptr := v.Args[0] 23451 v_1 := v.Args[1] 23452 if v_1.Op != OpAMD64ADDQconst { 23453 break 23454 } 23455 d := v_1.AuxInt 23456 idx := v_1.Args[0] 23457 mem := v.Args[2] 23458 if !(is32Bit(c + 2*d)) { 23459 break 23460 } 23461 v.reset(OpAMD64MOVWloadidx2) 23462 v.AuxInt = c + 2*d 23463 v.Aux = sym 23464 v.AddArg(ptr) 23465 v.AddArg(idx) 23466 v.AddArg(mem) 23467 return true 23468 } 23469 // match: (MOVWloadidx2 [i] {s} p (MOVQconst [c]) mem) 23470 // cond: is32Bit(i+2*c) 23471 // result: (MOVWload [i+2*c] {s} p mem) 23472 for { 23473 i := v.AuxInt 23474 s := v.Aux 23475 _ = v.Args[2] 23476 p := v.Args[0] 23477 v_1 := v.Args[1] 23478 if v_1.Op != OpAMD64MOVQconst { 23479 break 23480 } 23481 c := v_1.AuxInt 23482 mem := v.Args[2] 23483 if !(is32Bit(i + 2*c)) { 23484 break 23485 } 23486 v.reset(OpAMD64MOVWload) 23487 v.AuxInt = i + 2*c 23488 v.Aux = s 23489 v.AddArg(p) 23490 v.AddArg(mem) 23491 return true 23492 } 23493 return false 23494 } 23495 func rewriteValueAMD64_OpAMD64MOVWstore_0(v *Value) bool { 23496 // match: (MOVWstore [off] {sym} ptr (MOVWQSX x) mem) 23497 // cond: 23498 // result: (MOVWstore [off] {sym} ptr x mem) 23499 for { 23500 off := v.AuxInt 23501 sym := v.Aux 23502 _ = v.Args[2] 23503 ptr := v.Args[0] 23504 v_1 := v.Args[1] 23505 if v_1.Op != OpAMD64MOVWQSX { 23506 break 23507 } 23508 x := v_1.Args[0] 23509 mem := v.Args[2] 23510 v.reset(OpAMD64MOVWstore) 23511 v.AuxInt = off 23512 v.Aux = sym 23513 v.AddArg(ptr) 23514 v.AddArg(x) 23515 v.AddArg(mem) 23516 return true 23517 } 23518 // match: (MOVWstore [off] {sym} ptr (MOVWQZX x) mem) 23519 // cond: 23520 // result: (MOVWstore [off] {sym} ptr x mem) 23521 for { 23522 off := v.AuxInt 23523 sym := v.Aux 23524 _ = v.Args[2] 23525 ptr := v.Args[0] 23526 v_1 := v.Args[1] 23527 if v_1.Op != OpAMD64MOVWQZX { 23528 break 23529 } 23530 x := v_1.Args[0] 23531 mem := v.Args[2] 23532 v.reset(OpAMD64MOVWstore) 23533 v.AuxInt = off 23534 v.Aux = sym 23535 v.AddArg(ptr) 23536 v.AddArg(x) 23537 v.AddArg(mem) 23538 return true 23539 } 23540 // match: (MOVWstore [off1] {sym} (ADDQconst [off2] ptr) val mem) 23541 // cond: is32Bit(off1+off2) 23542 // result: (MOVWstore [off1+off2] {sym} ptr val mem) 23543 for { 23544 off1 := v.AuxInt 23545 sym := v.Aux 23546 _ = v.Args[2] 23547 v_0 := v.Args[0] 23548 if v_0.Op != OpAMD64ADDQconst { 23549 break 23550 } 23551 off2 := v_0.AuxInt 23552 ptr := v_0.Args[0] 23553 val := v.Args[1] 23554 mem := v.Args[2] 23555 if !(is32Bit(off1 + off2)) { 23556 break 23557 } 23558 v.reset(OpAMD64MOVWstore) 23559 v.AuxInt = off1 + off2 23560 v.Aux = sym 23561 v.AddArg(ptr) 23562 v.AddArg(val) 23563 v.AddArg(mem) 23564 return true 23565 } 23566 // match: (MOVWstore [off] {sym} ptr (MOVLconst [c]) mem) 23567 // cond: validOff(off) 23568 // result: (MOVWstoreconst [makeValAndOff(int64(int16(c)),off)] {sym} ptr mem) 23569 for { 23570 off := v.AuxInt 23571 sym := v.Aux 23572 _ = v.Args[2] 23573 ptr := v.Args[0] 23574 v_1 := v.Args[1] 23575 if v_1.Op != OpAMD64MOVLconst { 23576 break 23577 } 23578 c := v_1.AuxInt 23579 mem := v.Args[2] 23580 if !(validOff(off)) { 23581 break 23582 } 23583 v.reset(OpAMD64MOVWstoreconst) 23584 v.AuxInt = makeValAndOff(int64(int16(c)), off) 23585 v.Aux = sym 23586 v.AddArg(ptr) 23587 v.AddArg(mem) 23588 return true 23589 } 23590 // match: (MOVWstore [off] {sym} ptr (MOVQconst [c]) mem) 23591 // cond: validOff(off) 23592 // result: (MOVWstoreconst [makeValAndOff(int64(int16(c)),off)] {sym} ptr mem) 23593 for { 23594 off := v.AuxInt 23595 sym := v.Aux 23596 _ = v.Args[2] 23597 ptr := v.Args[0] 23598 v_1 := v.Args[1] 23599 if v_1.Op != OpAMD64MOVQconst { 23600 break 23601 } 23602 c := v_1.AuxInt 23603 mem := v.Args[2] 23604 if !(validOff(off)) { 23605 break 23606 } 23607 v.reset(OpAMD64MOVWstoreconst) 23608 v.AuxInt = makeValAndOff(int64(int16(c)), off) 23609 v.Aux = sym 23610 v.AddArg(ptr) 23611 v.AddArg(mem) 23612 return true 23613 } 23614 // match: (MOVWstore [off1] {sym1} (LEAQ [off2] {sym2} base) val mem) 23615 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 23616 // result: (MOVWstore [off1+off2] {mergeSym(sym1,sym2)} base val mem) 23617 for { 23618 off1 := v.AuxInt 23619 sym1 := v.Aux 23620 _ = v.Args[2] 23621 v_0 := v.Args[0] 23622 if v_0.Op != OpAMD64LEAQ { 23623 break 23624 } 23625 off2 := v_0.AuxInt 23626 sym2 := v_0.Aux 23627 base := v_0.Args[0] 23628 val := v.Args[1] 23629 mem := v.Args[2] 23630 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 23631 break 23632 } 23633 v.reset(OpAMD64MOVWstore) 23634 v.AuxInt = off1 + off2 23635 v.Aux = mergeSym(sym1, sym2) 23636 v.AddArg(base) 23637 v.AddArg(val) 23638 v.AddArg(mem) 23639 return true 23640 } 23641 // match: (MOVWstore [off1] {sym1} (LEAQ1 [off2] {sym2} ptr idx) val mem) 23642 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 23643 // result: (MOVWstoreidx1 [off1+off2] {mergeSym(sym1,sym2)} ptr idx val mem) 23644 for { 23645 off1 := v.AuxInt 23646 sym1 := v.Aux 23647 _ = v.Args[2] 23648 v_0 := v.Args[0] 23649 if v_0.Op != OpAMD64LEAQ1 { 23650 break 23651 } 23652 off2 := v_0.AuxInt 23653 sym2 := v_0.Aux 23654 _ = v_0.Args[1] 23655 ptr := v_0.Args[0] 23656 idx := v_0.Args[1] 23657 val := v.Args[1] 23658 mem := v.Args[2] 23659 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 23660 break 23661 } 23662 v.reset(OpAMD64MOVWstoreidx1) 23663 v.AuxInt = off1 + off2 23664 v.Aux = mergeSym(sym1, sym2) 23665 v.AddArg(ptr) 23666 v.AddArg(idx) 23667 v.AddArg(val) 23668 v.AddArg(mem) 23669 return true 23670 } 23671 // match: (MOVWstore [off1] {sym1} (LEAQ2 [off2] {sym2} ptr idx) val mem) 23672 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 23673 // result: (MOVWstoreidx2 [off1+off2] {mergeSym(sym1,sym2)} ptr idx val mem) 23674 for { 23675 off1 := v.AuxInt 23676 sym1 := v.Aux 23677 _ = v.Args[2] 23678 v_0 := v.Args[0] 23679 if v_0.Op != OpAMD64LEAQ2 { 23680 break 23681 } 23682 off2 := v_0.AuxInt 23683 sym2 := v_0.Aux 23684 _ = v_0.Args[1] 23685 ptr := v_0.Args[0] 23686 idx := v_0.Args[1] 23687 val := v.Args[1] 23688 mem := v.Args[2] 23689 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 23690 break 23691 } 23692 v.reset(OpAMD64MOVWstoreidx2) 23693 v.AuxInt = off1 + off2 23694 v.Aux = mergeSym(sym1, sym2) 23695 v.AddArg(ptr) 23696 v.AddArg(idx) 23697 v.AddArg(val) 23698 v.AddArg(mem) 23699 return true 23700 } 23701 // match: (MOVWstore [off] {sym} (ADDQ ptr idx) val mem) 23702 // cond: ptr.Op != OpSB 23703 // result: (MOVWstoreidx1 [off] {sym} ptr idx val mem) 23704 for { 23705 off := v.AuxInt 23706 sym := v.Aux 23707 _ = v.Args[2] 23708 v_0 := v.Args[0] 23709 if v_0.Op != OpAMD64ADDQ { 23710 break 23711 } 23712 _ = v_0.Args[1] 23713 ptr := v_0.Args[0] 23714 idx := v_0.Args[1] 23715 val := v.Args[1] 23716 mem := v.Args[2] 23717 if !(ptr.Op != OpSB) { 23718 break 23719 } 23720 v.reset(OpAMD64MOVWstoreidx1) 23721 v.AuxInt = off 23722 v.Aux = sym 23723 v.AddArg(ptr) 23724 v.AddArg(idx) 23725 v.AddArg(val) 23726 v.AddArg(mem) 23727 return true 23728 } 23729 // match: (MOVWstore [i] {s} p (SHRLconst [16] w) x:(MOVWstore [i-2] {s} p w mem)) 23730 // cond: x.Uses == 1 && clobber(x) 23731 // result: (MOVLstore [i-2] {s} p w mem) 23732 for { 23733 i := v.AuxInt 23734 s := v.Aux 23735 _ = v.Args[2] 23736 p := v.Args[0] 23737 v_1 := v.Args[1] 23738 if v_1.Op != OpAMD64SHRLconst { 23739 break 23740 } 23741 if v_1.AuxInt != 16 { 23742 break 23743 } 23744 w := v_1.Args[0] 23745 x := v.Args[2] 23746 if x.Op != OpAMD64MOVWstore { 23747 break 23748 } 23749 if x.AuxInt != i-2 { 23750 break 23751 } 23752 if x.Aux != s { 23753 break 23754 } 23755 _ = x.Args[2] 23756 if p != x.Args[0] { 23757 break 23758 } 23759 if w != x.Args[1] { 23760 break 23761 } 23762 mem := x.Args[2] 23763 if !(x.Uses == 1 && clobber(x)) { 23764 break 23765 } 23766 v.reset(OpAMD64MOVLstore) 23767 v.AuxInt = i - 2 23768 v.Aux = s 23769 v.AddArg(p) 23770 v.AddArg(w) 23771 v.AddArg(mem) 23772 return true 23773 } 23774 return false 23775 } 23776 func rewriteValueAMD64_OpAMD64MOVWstore_10(v *Value) bool { 23777 b := v.Block 23778 _ = b 23779 typ := &b.Func.Config.Types 23780 _ = typ 23781 // match: (MOVWstore [i] {s} p (SHRQconst [16] w) x:(MOVWstore [i-2] {s} p w mem)) 23782 // cond: x.Uses == 1 && clobber(x) 23783 // result: (MOVLstore [i-2] {s} p w mem) 23784 for { 23785 i := v.AuxInt 23786 s := v.Aux 23787 _ = v.Args[2] 23788 p := v.Args[0] 23789 v_1 := v.Args[1] 23790 if v_1.Op != OpAMD64SHRQconst { 23791 break 23792 } 23793 if v_1.AuxInt != 16 { 23794 break 23795 } 23796 w := v_1.Args[0] 23797 x := v.Args[2] 23798 if x.Op != OpAMD64MOVWstore { 23799 break 23800 } 23801 if x.AuxInt != i-2 { 23802 break 23803 } 23804 if x.Aux != s { 23805 break 23806 } 23807 _ = x.Args[2] 23808 if p != x.Args[0] { 23809 break 23810 } 23811 if w != x.Args[1] { 23812 break 23813 } 23814 mem := x.Args[2] 23815 if !(x.Uses == 1 && clobber(x)) { 23816 break 23817 } 23818 v.reset(OpAMD64MOVLstore) 23819 v.AuxInt = i - 2 23820 v.Aux = s 23821 v.AddArg(p) 23822 v.AddArg(w) 23823 v.AddArg(mem) 23824 return true 23825 } 23826 // match: (MOVWstore [i] {s} p (SHRLconst [j] w) x:(MOVWstore [i-2] {s} p w0:(SHRLconst [j-16] w) mem)) 23827 // cond: x.Uses == 1 && clobber(x) 23828 // result: (MOVLstore [i-2] {s} p w0 mem) 23829 for { 23830 i := v.AuxInt 23831 s := v.Aux 23832 _ = v.Args[2] 23833 p := v.Args[0] 23834 v_1 := v.Args[1] 23835 if v_1.Op != OpAMD64SHRLconst { 23836 break 23837 } 23838 j := v_1.AuxInt 23839 w := v_1.Args[0] 23840 x := v.Args[2] 23841 if x.Op != OpAMD64MOVWstore { 23842 break 23843 } 23844 if x.AuxInt != i-2 { 23845 break 23846 } 23847 if x.Aux != s { 23848 break 23849 } 23850 _ = x.Args[2] 23851 if p != x.Args[0] { 23852 break 23853 } 23854 w0 := x.Args[1] 23855 if w0.Op != OpAMD64SHRLconst { 23856 break 23857 } 23858 if w0.AuxInt != j-16 { 23859 break 23860 } 23861 if w != w0.Args[0] { 23862 break 23863 } 23864 mem := x.Args[2] 23865 if !(x.Uses == 1 && clobber(x)) { 23866 break 23867 } 23868 v.reset(OpAMD64MOVLstore) 23869 v.AuxInt = i - 2 23870 v.Aux = s 23871 v.AddArg(p) 23872 v.AddArg(w0) 23873 v.AddArg(mem) 23874 return true 23875 } 23876 // match: (MOVWstore [i] {s} p (SHRQconst [j] w) x:(MOVWstore [i-2] {s} p w0:(SHRQconst [j-16] w) mem)) 23877 // cond: x.Uses == 1 && clobber(x) 23878 // result: (MOVLstore [i-2] {s} p w0 mem) 23879 for { 23880 i := v.AuxInt 23881 s := v.Aux 23882 _ = v.Args[2] 23883 p := v.Args[0] 23884 v_1 := v.Args[1] 23885 if v_1.Op != OpAMD64SHRQconst { 23886 break 23887 } 23888 j := v_1.AuxInt 23889 w := v_1.Args[0] 23890 x := v.Args[2] 23891 if x.Op != OpAMD64MOVWstore { 23892 break 23893 } 23894 if x.AuxInt != i-2 { 23895 break 23896 } 23897 if x.Aux != s { 23898 break 23899 } 23900 _ = x.Args[2] 23901 if p != x.Args[0] { 23902 break 23903 } 23904 w0 := x.Args[1] 23905 if w0.Op != OpAMD64SHRQconst { 23906 break 23907 } 23908 if w0.AuxInt != j-16 { 23909 break 23910 } 23911 if w != w0.Args[0] { 23912 break 23913 } 23914 mem := x.Args[2] 23915 if !(x.Uses == 1 && clobber(x)) { 23916 break 23917 } 23918 v.reset(OpAMD64MOVLstore) 23919 v.AuxInt = i - 2 23920 v.Aux = s 23921 v.AddArg(p) 23922 v.AddArg(w0) 23923 v.AddArg(mem) 23924 return true 23925 } 23926 // match: (MOVWstore [i] {s} p x1:(MOVWload [j] {s2} p2 mem) mem2:(MOVWstore [i-2] {s} p x2:(MOVWload [j-2] {s2} p2 mem) mem)) 23927 // cond: x1.Uses == 1 && x2.Uses == 1 && mem2.Uses == 1 && clobber(x1) && clobber(x2) && clobber(mem2) 23928 // result: (MOVLstore [i-2] {s} p (MOVLload [j-2] {s2} p2 mem) mem) 23929 for { 23930 i := v.AuxInt 23931 s := v.Aux 23932 _ = v.Args[2] 23933 p := v.Args[0] 23934 x1 := v.Args[1] 23935 if x1.Op != OpAMD64MOVWload { 23936 break 23937 } 23938 j := x1.AuxInt 23939 s2 := x1.Aux 23940 _ = x1.Args[1] 23941 p2 := x1.Args[0] 23942 mem := x1.Args[1] 23943 mem2 := v.Args[2] 23944 if mem2.Op != OpAMD64MOVWstore { 23945 break 23946 } 23947 if mem2.AuxInt != i-2 { 23948 break 23949 } 23950 if mem2.Aux != s { 23951 break 23952 } 23953 _ = mem2.Args[2] 23954 if p != mem2.Args[0] { 23955 break 23956 } 23957 x2 := mem2.Args[1] 23958 if x2.Op != OpAMD64MOVWload { 23959 break 23960 } 23961 if x2.AuxInt != j-2 { 23962 break 23963 } 23964 if x2.Aux != s2 { 23965 break 23966 } 23967 _ = x2.Args[1] 23968 if p2 != x2.Args[0] { 23969 break 23970 } 23971 if mem != x2.Args[1] { 23972 break 23973 } 23974 if mem != mem2.Args[2] { 23975 break 23976 } 23977 if !(x1.Uses == 1 && x2.Uses == 1 && mem2.Uses == 1 && clobber(x1) && clobber(x2) && clobber(mem2)) { 23978 break 23979 } 23980 v.reset(OpAMD64MOVLstore) 23981 v.AuxInt = i - 2 23982 v.Aux = s 23983 v.AddArg(p) 23984 v0 := b.NewValue0(x2.Pos, OpAMD64MOVLload, typ.UInt32) 23985 v0.AuxInt = j - 2 23986 v0.Aux = s2 23987 v0.AddArg(p2) 23988 v0.AddArg(mem) 23989 v.AddArg(v0) 23990 v.AddArg(mem) 23991 return true 23992 } 23993 // match: (MOVWstore [off1] {sym1} (LEAL [off2] {sym2} base) val mem) 23994 // cond: canMergeSym(sym1, sym2) && is32Bit(off1+off2) 23995 // result: (MOVWstore [off1+off2] {mergeSym(sym1,sym2)} base val mem) 23996 for { 23997 off1 := v.AuxInt 23998 sym1 := v.Aux 23999 _ = v.Args[2] 24000 v_0 := v.Args[0] 24001 if v_0.Op != OpAMD64LEAL { 24002 break 24003 } 24004 off2 := v_0.AuxInt 24005 sym2 := v_0.Aux 24006 base := v_0.Args[0] 24007 val := v.Args[1] 24008 mem := v.Args[2] 24009 if !(canMergeSym(sym1, sym2) && is32Bit(off1+off2)) { 24010 break 24011 } 24012 v.reset(OpAMD64MOVWstore) 24013 v.AuxInt = off1 + off2 24014 v.Aux = mergeSym(sym1, sym2) 24015 v.AddArg(base) 24016 v.AddArg(val) 24017 v.AddArg(mem) 24018 return true 24019 } 24020 // match: (MOVWstore [off1] {sym} (ADDLconst [off2] ptr) val mem) 24021 // cond: is32Bit(off1+off2) 24022 // result: (MOVWstore [off1+off2] {sym} ptr val mem) 24023 for { 24024 off1 := v.AuxInt 24025 sym := v.Aux 24026 _ = v.Args[2] 24027 v_0 := v.Args[0] 24028 if v_0.Op != OpAMD64ADDLconst { 24029 break 24030 } 24031 off2 := v_0.AuxInt 24032 ptr := v_0.Args[0] 24033 val := v.Args[1] 24034 mem := v.Args[2] 24035 if !(is32Bit(off1 + off2)) { 24036 break 24037 } 24038 v.reset(OpAMD64MOVWstore) 24039 v.AuxInt = off1 + off2 24040 v.Aux = sym 24041 v.AddArg(ptr) 24042 v.AddArg(val) 24043 v.AddArg(mem) 24044 return true 24045 } 24046 return false 24047 } 24048 func rewriteValueAMD64_OpAMD64MOVWstoreconst_0(v *Value) bool { 24049 // match: (MOVWstoreconst [sc] {s} (ADDQconst [off] ptr) mem) 24050 // cond: ValAndOff(sc).canAdd(off) 24051 // result: (MOVWstoreconst [ValAndOff(sc).add(off)] {s} ptr mem) 24052 for { 24053 sc := v.AuxInt 24054 s := v.Aux 24055 _ = v.Args[1] 24056 v_0 := v.Args[0] 24057 if v_0.Op != OpAMD64ADDQconst { 24058 break 24059 } 24060 off := v_0.AuxInt 24061 ptr := v_0.Args[0] 24062 mem := v.Args[1] 24063 if !(ValAndOff(sc).canAdd(off)) { 24064 break 24065 } 24066 v.reset(OpAMD64MOVWstoreconst) 24067 v.AuxInt = ValAndOff(sc).add(off) 24068 v.Aux = s 24069 v.AddArg(ptr) 24070 v.AddArg(mem) 24071 return true 24072 } 24073 // match: (MOVWstoreconst [sc] {sym1} (LEAQ [off] {sym2} ptr) mem) 24074 // cond: canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off) 24075 // result: (MOVWstoreconst [ValAndOff(sc).add(off)] {mergeSym(sym1, sym2)} ptr mem) 24076 for { 24077 sc := v.AuxInt 24078 sym1 := v.Aux 24079 _ = v.Args[1] 24080 v_0 := v.Args[0] 24081 if v_0.Op != OpAMD64LEAQ { 24082 break 24083 } 24084 off := v_0.AuxInt 24085 sym2 := v_0.Aux 24086 ptr := v_0.Args[0] 24087 mem := v.Args[1] 24088 if !(canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off)) { 24089 break 24090 } 24091 v.reset(OpAMD64MOVWstoreconst) 24092 v.AuxInt = ValAndOff(sc).add(off) 24093 v.Aux = mergeSym(sym1, sym2) 24094 v.AddArg(ptr) 24095 v.AddArg(mem) 24096 return true 24097 } 24098 // match: (MOVWstoreconst [x] {sym1} (LEAQ1 [off] {sym2} ptr idx) mem) 24099 // cond: canMergeSym(sym1, sym2) 24100 // result: (MOVWstoreconstidx1 [ValAndOff(x).add(off)] {mergeSym(sym1,sym2)} ptr idx mem) 24101 for { 24102 x := v.AuxInt 24103 sym1 := v.Aux 24104 _ = v.Args[1] 24105 v_0 := v.Args[0] 24106 if v_0.Op != OpAMD64LEAQ1 { 24107 break 24108 } 24109 off := v_0.AuxInt 24110 sym2 := v_0.Aux 24111 _ = v_0.Args[1] 24112 ptr := v_0.Args[0] 24113 idx := v_0.Args[1] 24114 mem := v.Args[1] 24115 if !(canMergeSym(sym1, sym2)) { 24116 break 24117 } 24118 v.reset(OpAMD64MOVWstoreconstidx1) 24119 v.AuxInt = ValAndOff(x).add(off) 24120 v.Aux = mergeSym(sym1, sym2) 24121 v.AddArg(ptr) 24122 v.AddArg(idx) 24123 v.AddArg(mem) 24124 return true 24125 } 24126 // match: (MOVWstoreconst [x] {sym1} (LEAQ2 [off] {sym2} ptr idx) mem) 24127 // cond: canMergeSym(sym1, sym2) 24128 // result: (MOVWstoreconstidx2 [ValAndOff(x).add(off)] {mergeSym(sym1,sym2)} ptr idx mem) 24129 for { 24130 x := v.AuxInt 24131 sym1 := v.Aux 24132 _ = v.Args[1] 24133 v_0 := v.Args[0] 24134 if v_0.Op != OpAMD64LEAQ2 { 24135 break 24136 } 24137 off := v_0.AuxInt 24138 sym2 := v_0.Aux 24139 _ = v_0.Args[1] 24140 ptr := v_0.Args[0] 24141 idx := v_0.Args[1] 24142 mem := v.Args[1] 24143 if !(canMergeSym(sym1, sym2)) { 24144 break 24145 } 24146 v.reset(OpAMD64MOVWstoreconstidx2) 24147 v.AuxInt = ValAndOff(x).add(off) 24148 v.Aux = mergeSym(sym1, sym2) 24149 v.AddArg(ptr) 24150 v.AddArg(idx) 24151 v.AddArg(mem) 24152 return true 24153 } 24154 // match: (MOVWstoreconst [x] {sym} (ADDQ ptr idx) mem) 24155 // cond: 24156 // result: (MOVWstoreconstidx1 [x] {sym} ptr idx mem) 24157 for { 24158 x := v.AuxInt 24159 sym := v.Aux 24160 _ = v.Args[1] 24161 v_0 := v.Args[0] 24162 if v_0.Op != OpAMD64ADDQ { 24163 break 24164 } 24165 _ = v_0.Args[1] 24166 ptr := v_0.Args[0] 24167 idx := v_0.Args[1] 24168 mem := v.Args[1] 24169 v.reset(OpAMD64MOVWstoreconstidx1) 24170 v.AuxInt = x 24171 v.Aux = sym 24172 v.AddArg(ptr) 24173 v.AddArg(idx) 24174 v.AddArg(mem) 24175 return true 24176 } 24177 // match: (MOVWstoreconst [c] {s} p x:(MOVWstoreconst [a] {s} p mem)) 24178 // cond: x.Uses == 1 && ValAndOff(a).Off() + 2 == ValAndOff(c).Off() && clobber(x) 24179 // result: (MOVLstoreconst [makeValAndOff(ValAndOff(a).Val()&0xffff | ValAndOff(c).Val()<<16, ValAndOff(a).Off())] {s} p mem) 24180 for { 24181 c := v.AuxInt 24182 s := v.Aux 24183 _ = v.Args[1] 24184 p := v.Args[0] 24185 x := v.Args[1] 24186 if x.Op != OpAMD64MOVWstoreconst { 24187 break 24188 } 24189 a := x.AuxInt 24190 if x.Aux != s { 24191 break 24192 } 24193 _ = x.Args[1] 24194 if p != x.Args[0] { 24195 break 24196 } 24197 mem := x.Args[1] 24198 if !(x.Uses == 1 && ValAndOff(a).Off()+2 == ValAndOff(c).Off() && clobber(x)) { 24199 break 24200 } 24201 v.reset(OpAMD64MOVLstoreconst) 24202 v.AuxInt = makeValAndOff(ValAndOff(a).Val()&0xffff|ValAndOff(c).Val()<<16, ValAndOff(a).Off()) 24203 v.Aux = s 24204 v.AddArg(p) 24205 v.AddArg(mem) 24206 return true 24207 } 24208 // match: (MOVWstoreconst [a] {s} p x:(MOVWstoreconst [c] {s} p mem)) 24209 // cond: x.Uses == 1 && ValAndOff(a).Off() + 2 == ValAndOff(c).Off() && clobber(x) 24210 // result: (MOVLstoreconst [makeValAndOff(ValAndOff(a).Val()&0xffff | ValAndOff(c).Val()<<16, ValAndOff(a).Off())] {s} p mem) 24211 for { 24212 a := v.AuxInt 24213 s := v.Aux 24214 _ = v.Args[1] 24215 p := v.Args[0] 24216 x := v.Args[1] 24217 if x.Op != OpAMD64MOVWstoreconst { 24218 break 24219 } 24220 c := x.AuxInt 24221 if x.Aux != s { 24222 break 24223 } 24224 _ = x.Args[1] 24225 if p != x.Args[0] { 24226 break 24227 } 24228 mem := x.Args[1] 24229 if !(x.Uses == 1 && ValAndOff(a).Off()+2 == ValAndOff(c).Off() && clobber(x)) { 24230 break 24231 } 24232 v.reset(OpAMD64MOVLstoreconst) 24233 v.AuxInt = makeValAndOff(ValAndOff(a).Val()&0xffff|ValAndOff(c).Val()<<16, ValAndOff(a).Off()) 24234 v.Aux = s 24235 v.AddArg(p) 24236 v.AddArg(mem) 24237 return true 24238 } 24239 // match: (MOVWstoreconst [sc] {sym1} (LEAL [off] {sym2} ptr) mem) 24240 // cond: canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off) 24241 // result: (MOVWstoreconst [ValAndOff(sc).add(off)] {mergeSym(sym1, sym2)} ptr mem) 24242 for { 24243 sc := v.AuxInt 24244 sym1 := v.Aux 24245 _ = v.Args[1] 24246 v_0 := v.Args[0] 24247 if v_0.Op != OpAMD64LEAL { 24248 break 24249 } 24250 off := v_0.AuxInt 24251 sym2 := v_0.Aux 24252 ptr := v_0.Args[0] 24253 mem := v.Args[1] 24254 if !(canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off)) { 24255 break 24256 } 24257 v.reset(OpAMD64MOVWstoreconst) 24258 v.AuxInt = ValAndOff(sc).add(off) 24259 v.Aux = mergeSym(sym1, sym2) 24260 v.AddArg(ptr) 24261 v.AddArg(mem) 24262 return true 24263 } 24264 // match: (MOVWstoreconst [sc] {s} (ADDLconst [off] ptr) mem) 24265 // cond: ValAndOff(sc).canAdd(off) 24266 // result: (MOVWstoreconst [ValAndOff(sc).add(off)] {s} ptr mem) 24267 for { 24268 sc := v.AuxInt 24269 s := v.Aux 24270 _ = v.Args[1] 24271 v_0 := v.Args[0] 24272 if v_0.Op != OpAMD64ADDLconst { 24273 break 24274 } 24275 off := v_0.AuxInt 24276 ptr := v_0.Args[0] 24277 mem := v.Args[1] 24278 if !(ValAndOff(sc).canAdd(off)) { 24279 break 24280 } 24281 v.reset(OpAMD64MOVWstoreconst) 24282 v.AuxInt = ValAndOff(sc).add(off) 24283 v.Aux = s 24284 v.AddArg(ptr) 24285 v.AddArg(mem) 24286 return true 24287 } 24288 return false 24289 } 24290 func rewriteValueAMD64_OpAMD64MOVWstoreconstidx1_0(v *Value) bool { 24291 // match: (MOVWstoreconstidx1 [c] {sym} ptr (SHLQconst [1] idx) mem) 24292 // cond: 24293 // result: (MOVWstoreconstidx2 [c] {sym} ptr idx mem) 24294 for { 24295 c := v.AuxInt 24296 sym := v.Aux 24297 _ = v.Args[2] 24298 ptr := v.Args[0] 24299 v_1 := v.Args[1] 24300 if v_1.Op != OpAMD64SHLQconst { 24301 break 24302 } 24303 if v_1.AuxInt != 1 { 24304 break 24305 } 24306 idx := v_1.Args[0] 24307 mem := v.Args[2] 24308 v.reset(OpAMD64MOVWstoreconstidx2) 24309 v.AuxInt = c 24310 v.Aux = sym 24311 v.AddArg(ptr) 24312 v.AddArg(idx) 24313 v.AddArg(mem) 24314 return true 24315 } 24316 // match: (MOVWstoreconstidx1 [x] {sym} (ADDQconst [c] ptr) idx mem) 24317 // cond: ValAndOff(x).canAdd(c) 24318 // result: (MOVWstoreconstidx1 [ValAndOff(x).add(c)] {sym} ptr idx mem) 24319 for { 24320 x := v.AuxInt 24321 sym := v.Aux 24322 _ = v.Args[2] 24323 v_0 := v.Args[0] 24324 if v_0.Op != OpAMD64ADDQconst { 24325 break 24326 } 24327 c := v_0.AuxInt 24328 ptr := v_0.Args[0] 24329 idx := v.Args[1] 24330 mem := v.Args[2] 24331 if !(ValAndOff(x).canAdd(c)) { 24332 break 24333 } 24334 v.reset(OpAMD64MOVWstoreconstidx1) 24335 v.AuxInt = ValAndOff(x).add(c) 24336 v.Aux = sym 24337 v.AddArg(ptr) 24338 v.AddArg(idx) 24339 v.AddArg(mem) 24340 return true 24341 } 24342 // match: (MOVWstoreconstidx1 [x] {sym} ptr (ADDQconst [c] idx) mem) 24343 // cond: ValAndOff(x).canAdd(c) 24344 // result: (MOVWstoreconstidx1 [ValAndOff(x).add(c)] {sym} ptr idx mem) 24345 for { 24346 x := v.AuxInt 24347 sym := v.Aux 24348 _ = v.Args[2] 24349 ptr := v.Args[0] 24350 v_1 := v.Args[1] 24351 if v_1.Op != OpAMD64ADDQconst { 24352 break 24353 } 24354 c := v_1.AuxInt 24355 idx := v_1.Args[0] 24356 mem := v.Args[2] 24357 if !(ValAndOff(x).canAdd(c)) { 24358 break 24359 } 24360 v.reset(OpAMD64MOVWstoreconstidx1) 24361 v.AuxInt = ValAndOff(x).add(c) 24362 v.Aux = sym 24363 v.AddArg(ptr) 24364 v.AddArg(idx) 24365 v.AddArg(mem) 24366 return true 24367 } 24368 // match: (MOVWstoreconstidx1 [c] {s} p i x:(MOVWstoreconstidx1 [a] {s} p i mem)) 24369 // cond: x.Uses == 1 && ValAndOff(a).Off() + 2 == ValAndOff(c).Off() && clobber(x) 24370 // result: (MOVLstoreconstidx1 [makeValAndOff(ValAndOff(a).Val()&0xffff | ValAndOff(c).Val()<<16, ValAndOff(a).Off())] {s} p i mem) 24371 for { 24372 c := v.AuxInt 24373 s := v.Aux 24374 _ = v.Args[2] 24375 p := v.Args[0] 24376 i := v.Args[1] 24377 x := v.Args[2] 24378 if x.Op != OpAMD64MOVWstoreconstidx1 { 24379 break 24380 } 24381 a := x.AuxInt 24382 if x.Aux != s { 24383 break 24384 } 24385 _ = x.Args[2] 24386 if p != x.Args[0] { 24387 break 24388 } 24389 if i != x.Args[1] { 24390 break 24391 } 24392 mem := x.Args[2] 24393 if !(x.Uses == 1 && ValAndOff(a).Off()+2 == ValAndOff(c).Off() && clobber(x)) { 24394 break 24395 } 24396 v.reset(OpAMD64MOVLstoreconstidx1) 24397 v.AuxInt = makeValAndOff(ValAndOff(a).Val()&0xffff|ValAndOff(c).Val()<<16, ValAndOff(a).Off()) 24398 v.Aux = s 24399 v.AddArg(p) 24400 v.AddArg(i) 24401 v.AddArg(mem) 24402 return true 24403 } 24404 return false 24405 } 24406 func rewriteValueAMD64_OpAMD64MOVWstoreconstidx2_0(v *Value) bool { 24407 b := v.Block 24408 _ = b 24409 // match: (MOVWstoreconstidx2 [x] {sym} (ADDQconst [c] ptr) idx mem) 24410 // cond: ValAndOff(x).canAdd(c) 24411 // result: (MOVWstoreconstidx2 [ValAndOff(x).add(c)] {sym} ptr idx mem) 24412 for { 24413 x := v.AuxInt 24414 sym := v.Aux 24415 _ = v.Args[2] 24416 v_0 := v.Args[0] 24417 if v_0.Op != OpAMD64ADDQconst { 24418 break 24419 } 24420 c := v_0.AuxInt 24421 ptr := v_0.Args[0] 24422 idx := v.Args[1] 24423 mem := v.Args[2] 24424 if !(ValAndOff(x).canAdd(c)) { 24425 break 24426 } 24427 v.reset(OpAMD64MOVWstoreconstidx2) 24428 v.AuxInt = ValAndOff(x).add(c) 24429 v.Aux = sym 24430 v.AddArg(ptr) 24431 v.AddArg(idx) 24432 v.AddArg(mem) 24433 return true 24434 } 24435 // match: (MOVWstoreconstidx2 [x] {sym} ptr (ADDQconst [c] idx) mem) 24436 // cond: ValAndOff(x).canAdd(2*c) 24437 // result: (MOVWstoreconstidx2 [ValAndOff(x).add(2*c)] {sym} ptr idx mem) 24438 for { 24439 x := v.AuxInt 24440 sym := v.Aux 24441 _ = v.Args[2] 24442 ptr := v.Args[0] 24443 v_1 := v.Args[1] 24444 if v_1.Op != OpAMD64ADDQconst { 24445 break 24446 } 24447 c := v_1.AuxInt 24448 idx := v_1.Args[0] 24449 mem := v.Args[2] 24450 if !(ValAndOff(x).canAdd(2 * c)) { 24451 break 24452 } 24453 v.reset(OpAMD64MOVWstoreconstidx2) 24454 v.AuxInt = ValAndOff(x).add(2 * c) 24455 v.Aux = sym 24456 v.AddArg(ptr) 24457 v.AddArg(idx) 24458 v.AddArg(mem) 24459 return true 24460 } 24461 // match: (MOVWstoreconstidx2 [c] {s} p i x:(MOVWstoreconstidx2 [a] {s} p i mem)) 24462 // cond: x.Uses == 1 && ValAndOff(a).Off() + 2 == ValAndOff(c).Off() && clobber(x) 24463 // result: (MOVLstoreconstidx1 [makeValAndOff(ValAndOff(a).Val()&0xffff | ValAndOff(c).Val()<<16, ValAndOff(a).Off())] {s} p (SHLQconst <i.Type> [1] i) mem) 24464 for { 24465 c := v.AuxInt 24466 s := v.Aux 24467 _ = v.Args[2] 24468 p := v.Args[0] 24469 i := v.Args[1] 24470 x := v.Args[2] 24471 if x.Op != OpAMD64MOVWstoreconstidx2 { 24472 break 24473 } 24474 a := x.AuxInt 24475 if x.Aux != s { 24476 break 24477 } 24478 _ = x.Args[2] 24479 if p != x.Args[0] { 24480 break 24481 } 24482 if i != x.Args[1] { 24483 break 24484 } 24485 mem := x.Args[2] 24486 if !(x.Uses == 1 && ValAndOff(a).Off()+2 == ValAndOff(c).Off() && clobber(x)) { 24487 break 24488 } 24489 v.reset(OpAMD64MOVLstoreconstidx1) 24490 v.AuxInt = makeValAndOff(ValAndOff(a).Val()&0xffff|ValAndOff(c).Val()<<16, ValAndOff(a).Off()) 24491 v.Aux = s 24492 v.AddArg(p) 24493 v0 := b.NewValue0(v.Pos, OpAMD64SHLQconst, i.Type) 24494 v0.AuxInt = 1 24495 v0.AddArg(i) 24496 v.AddArg(v0) 24497 v.AddArg(mem) 24498 return true 24499 } 24500 return false 24501 } 24502 func rewriteValueAMD64_OpAMD64MOVWstoreidx1_0(v *Value) bool { 24503 // match: (MOVWstoreidx1 [c] {sym} ptr (SHLQconst [1] idx) val mem) 24504 // cond: 24505 // result: (MOVWstoreidx2 [c] {sym} ptr idx val mem) 24506 for { 24507 c := v.AuxInt 24508 sym := v.Aux 24509 _ = v.Args[3] 24510 ptr := v.Args[0] 24511 v_1 := v.Args[1] 24512 if v_1.Op != OpAMD64SHLQconst { 24513 break 24514 } 24515 if v_1.AuxInt != 1 { 24516 break 24517 } 24518 idx := v_1.Args[0] 24519 val := v.Args[2] 24520 mem := v.Args[3] 24521 v.reset(OpAMD64MOVWstoreidx2) 24522 v.AuxInt = c 24523 v.Aux = sym 24524 v.AddArg(ptr) 24525 v.AddArg(idx) 24526 v.AddArg(val) 24527 v.AddArg(mem) 24528 return true 24529 } 24530 // match: (MOVWstoreidx1 [c] {sym} (ADDQconst [d] ptr) idx val mem) 24531 // cond: is32Bit(c+d) 24532 // result: (MOVWstoreidx1 [c+d] {sym} ptr idx val mem) 24533 for { 24534 c := v.AuxInt 24535 sym := v.Aux 24536 _ = v.Args[3] 24537 v_0 := v.Args[0] 24538 if v_0.Op != OpAMD64ADDQconst { 24539 break 24540 } 24541 d := v_0.AuxInt 24542 ptr := v_0.Args[0] 24543 idx := v.Args[1] 24544 val := v.Args[2] 24545 mem := v.Args[3] 24546 if !(is32Bit(c + d)) { 24547 break 24548 } 24549 v.reset(OpAMD64MOVWstoreidx1) 24550 v.AuxInt = c + d 24551 v.Aux = sym 24552 v.AddArg(ptr) 24553 v.AddArg(idx) 24554 v.AddArg(val) 24555 v.AddArg(mem) 24556 return true 24557 } 24558 // match: (MOVWstoreidx1 [c] {sym} ptr (ADDQconst [d] idx) val mem) 24559 // cond: is32Bit(c+d) 24560 // result: (MOVWstoreidx1 [c+d] {sym} ptr idx val mem) 24561 for { 24562 c := v.AuxInt 24563 sym := v.Aux 24564 _ = v.Args[3] 24565 ptr := v.Args[0] 24566 v_1 := v.Args[1] 24567 if v_1.Op != OpAMD64ADDQconst { 24568 break 24569 } 24570 d := v_1.AuxInt 24571 idx := v_1.Args[0] 24572 val := v.Args[2] 24573 mem := v.Args[3] 24574 if !(is32Bit(c + d)) { 24575 break 24576 } 24577 v.reset(OpAMD64MOVWstoreidx1) 24578 v.AuxInt = c + d 24579 v.Aux = sym 24580 v.AddArg(ptr) 24581 v.AddArg(idx) 24582 v.AddArg(val) 24583 v.AddArg(mem) 24584 return true 24585 } 24586 // match: (MOVWstoreidx1 [i] {s} p idx (SHRLconst [16] w) x:(MOVWstoreidx1 [i-2] {s} p idx w mem)) 24587 // cond: x.Uses == 1 && clobber(x) 24588 // result: (MOVLstoreidx1 [i-2] {s} p idx w mem) 24589 for { 24590 i := v.AuxInt 24591 s := v.Aux 24592 _ = v.Args[3] 24593 p := v.Args[0] 24594 idx := v.Args[1] 24595 v_2 := v.Args[2] 24596 if v_2.Op != OpAMD64SHRLconst { 24597 break 24598 } 24599 if v_2.AuxInt != 16 { 24600 break 24601 } 24602 w := v_2.Args[0] 24603 x := v.Args[3] 24604 if x.Op != OpAMD64MOVWstoreidx1 { 24605 break 24606 } 24607 if x.AuxInt != i-2 { 24608 break 24609 } 24610 if x.Aux != s { 24611 break 24612 } 24613 _ = x.Args[3] 24614 if p != x.Args[0] { 24615 break 24616 } 24617 if idx != x.Args[1] { 24618 break 24619 } 24620 if w != x.Args[2] { 24621 break 24622 } 24623 mem := x.Args[3] 24624 if !(x.Uses == 1 && clobber(x)) { 24625 break 24626 } 24627 v.reset(OpAMD64MOVLstoreidx1) 24628 v.AuxInt = i - 2 24629 v.Aux = s 24630 v.AddArg(p) 24631 v.AddArg(idx) 24632 v.AddArg(w) 24633 v.AddArg(mem) 24634 return true 24635 } 24636 // match: (MOVWstoreidx1 [i] {s} p idx (SHRQconst [16] w) x:(MOVWstoreidx1 [i-2] {s} p idx w mem)) 24637 // cond: x.Uses == 1 && clobber(x) 24638 // result: (MOVLstoreidx1 [i-2] {s} p idx w mem) 24639 for { 24640 i := v.AuxInt 24641 s := v.Aux 24642 _ = v.Args[3] 24643 p := v.Args[0] 24644 idx := v.Args[1] 24645 v_2 := v.Args[2] 24646 if v_2.Op != OpAMD64SHRQconst { 24647 break 24648 } 24649 if v_2.AuxInt != 16 { 24650 break 24651 } 24652 w := v_2.Args[0] 24653 x := v.Args[3] 24654 if x.Op != OpAMD64MOVWstoreidx1 { 24655 break 24656 } 24657 if x.AuxInt != i-2 { 24658 break 24659 } 24660 if x.Aux != s { 24661 break 24662 } 24663 _ = x.Args[3] 24664 if p != x.Args[0] { 24665 break 24666 } 24667 if idx != x.Args[1] { 24668 break 24669 } 24670 if w != x.Args[2] { 24671 break 24672 } 24673 mem := x.Args[3] 24674 if !(x.Uses == 1 && clobber(x)) { 24675 break 24676 } 24677 v.reset(OpAMD64MOVLstoreidx1) 24678 v.AuxInt = i - 2 24679 v.Aux = s 24680 v.AddArg(p) 24681 v.AddArg(idx) 24682 v.AddArg(w) 24683 v.AddArg(mem) 24684 return true 24685 } 24686 // match: (MOVWstoreidx1 [i] {s} p idx (SHRLconst [j] w) x:(MOVWstoreidx1 [i-2] {s} p idx w0:(SHRLconst [j-16] w) mem)) 24687 // cond: x.Uses == 1 && clobber(x) 24688 // result: (MOVLstoreidx1 [i-2] {s} p idx w0 mem) 24689 for { 24690 i := v.AuxInt 24691 s := v.Aux 24692 _ = v.Args[3] 24693 p := v.Args[0] 24694 idx := v.Args[1] 24695 v_2 := v.Args[2] 24696 if v_2.Op != OpAMD64SHRLconst { 24697 break 24698 } 24699 j := v_2.AuxInt 24700 w := v_2.Args[0] 24701 x := v.Args[3] 24702 if x.Op != OpAMD64MOVWstoreidx1 { 24703 break 24704 } 24705 if x.AuxInt != i-2 { 24706 break 24707 } 24708 if x.Aux != s { 24709 break 24710 } 24711 _ = x.Args[3] 24712 if p != x.Args[0] { 24713 break 24714 } 24715 if idx != x.Args[1] { 24716 break 24717 } 24718 w0 := x.Args[2] 24719 if w0.Op != OpAMD64SHRLconst { 24720 break 24721 } 24722 if w0.AuxInt != j-16 { 24723 break 24724 } 24725 if w != w0.Args[0] { 24726 break 24727 } 24728 mem := x.Args[3] 24729 if !(x.Uses == 1 && clobber(x)) { 24730 break 24731 } 24732 v.reset(OpAMD64MOVLstoreidx1) 24733 v.AuxInt = i - 2 24734 v.Aux = s 24735 v.AddArg(p) 24736 v.AddArg(idx) 24737 v.AddArg(w0) 24738 v.AddArg(mem) 24739 return true 24740 } 24741 // match: (MOVWstoreidx1 [i] {s} p idx (SHRQconst [j] w) x:(MOVWstoreidx1 [i-2] {s} p idx w0:(SHRQconst [j-16] w) mem)) 24742 // cond: x.Uses == 1 && clobber(x) 24743 // result: (MOVLstoreidx1 [i-2] {s} p idx w0 mem) 24744 for { 24745 i := v.AuxInt 24746 s := v.Aux 24747 _ = v.Args[3] 24748 p := v.Args[0] 24749 idx := v.Args[1] 24750 v_2 := v.Args[2] 24751 if v_2.Op != OpAMD64SHRQconst { 24752 break 24753 } 24754 j := v_2.AuxInt 24755 w := v_2.Args[0] 24756 x := v.Args[3] 24757 if x.Op != OpAMD64MOVWstoreidx1 { 24758 break 24759 } 24760 if x.AuxInt != i-2 { 24761 break 24762 } 24763 if x.Aux != s { 24764 break 24765 } 24766 _ = x.Args[3] 24767 if p != x.Args[0] { 24768 break 24769 } 24770 if idx != x.Args[1] { 24771 break 24772 } 24773 w0 := x.Args[2] 24774 if w0.Op != OpAMD64SHRQconst { 24775 break 24776 } 24777 if w0.AuxInt != j-16 { 24778 break 24779 } 24780 if w != w0.Args[0] { 24781 break 24782 } 24783 mem := x.Args[3] 24784 if !(x.Uses == 1 && clobber(x)) { 24785 break 24786 } 24787 v.reset(OpAMD64MOVLstoreidx1) 24788 v.AuxInt = i - 2 24789 v.Aux = s 24790 v.AddArg(p) 24791 v.AddArg(idx) 24792 v.AddArg(w0) 24793 v.AddArg(mem) 24794 return true 24795 } 24796 // match: (MOVWstoreidx1 [i] {s} p (MOVQconst [c]) w mem) 24797 // cond: is32Bit(i+c) 24798 // result: (MOVWstore [i+c] {s} p w mem) 24799 for { 24800 i := v.AuxInt 24801 s := v.Aux 24802 _ = v.Args[3] 24803 p := v.Args[0] 24804 v_1 := v.Args[1] 24805 if v_1.Op != OpAMD64MOVQconst { 24806 break 24807 } 24808 c := v_1.AuxInt 24809 w := v.Args[2] 24810 mem := v.Args[3] 24811 if !(is32Bit(i + c)) { 24812 break 24813 } 24814 v.reset(OpAMD64MOVWstore) 24815 v.AuxInt = i + c 24816 v.Aux = s 24817 v.AddArg(p) 24818 v.AddArg(w) 24819 v.AddArg(mem) 24820 return true 24821 } 24822 return false 24823 } 24824 func rewriteValueAMD64_OpAMD64MOVWstoreidx2_0(v *Value) bool { 24825 b := v.Block 24826 _ = b 24827 // match: (MOVWstoreidx2 [c] {sym} (ADDQconst [d] ptr) idx val mem) 24828 // cond: is32Bit(c+d) 24829 // result: (MOVWstoreidx2 [c+d] {sym} ptr idx val mem) 24830 for { 24831 c := v.AuxInt 24832 sym := v.Aux 24833 _ = v.Args[3] 24834 v_0 := v.Args[0] 24835 if v_0.Op != OpAMD64ADDQconst { 24836 break 24837 } 24838 d := v_0.AuxInt 24839 ptr := v_0.Args[0] 24840 idx := v.Args[1] 24841 val := v.Args[2] 24842 mem := v.Args[3] 24843 if !(is32Bit(c + d)) { 24844 break 24845 } 24846 v.reset(OpAMD64MOVWstoreidx2) 24847 v.AuxInt = c + d 24848 v.Aux = sym 24849 v.AddArg(ptr) 24850 v.AddArg(idx) 24851 v.AddArg(val) 24852 v.AddArg(mem) 24853 return true 24854 } 24855 // match: (MOVWstoreidx2 [c] {sym} ptr (ADDQconst [d] idx) val mem) 24856 // cond: is32Bit(c+2*d) 24857 // result: (MOVWstoreidx2 [c+2*d] {sym} ptr idx val mem) 24858 for { 24859 c := v.AuxInt 24860 sym := v.Aux 24861 _ = v.Args[3] 24862 ptr := v.Args[0] 24863 v_1 := v.Args[1] 24864 if v_1.Op != OpAMD64ADDQconst { 24865 break 24866 } 24867 d := v_1.AuxInt 24868 idx := v_1.Args[0] 24869 val := v.Args[2] 24870 mem := v.Args[3] 24871 if !(is32Bit(c + 2*d)) { 24872 break 24873 } 24874 v.reset(OpAMD64MOVWstoreidx2) 24875 v.AuxInt = c + 2*d 24876 v.Aux = sym 24877 v.AddArg(ptr) 24878 v.AddArg(idx) 24879 v.AddArg(val) 24880 v.AddArg(mem) 24881 return true 24882 } 24883 // match: (MOVWstoreidx2 [i] {s} p idx (SHRLconst [16] w) x:(MOVWstoreidx2 [i-2] {s} p idx w mem)) 24884 // cond: x.Uses == 1 && clobber(x) 24885 // result: (MOVLstoreidx1 [i-2] {s} p (SHLQconst <idx.Type> [1] idx) w mem) 24886 for { 24887 i := v.AuxInt 24888 s := v.Aux 24889 _ = v.Args[3] 24890 p := v.Args[0] 24891 idx := v.Args[1] 24892 v_2 := v.Args[2] 24893 if v_2.Op != OpAMD64SHRLconst { 24894 break 24895 } 24896 if v_2.AuxInt != 16 { 24897 break 24898 } 24899 w := v_2.Args[0] 24900 x := v.Args[3] 24901 if x.Op != OpAMD64MOVWstoreidx2 { 24902 break 24903 } 24904 if x.AuxInt != i-2 { 24905 break 24906 } 24907 if x.Aux != s { 24908 break 24909 } 24910 _ = x.Args[3] 24911 if p != x.Args[0] { 24912 break 24913 } 24914 if idx != x.Args[1] { 24915 break 24916 } 24917 if w != x.Args[2] { 24918 break 24919 } 24920 mem := x.Args[3] 24921 if !(x.Uses == 1 && clobber(x)) { 24922 break 24923 } 24924 v.reset(OpAMD64MOVLstoreidx1) 24925 v.AuxInt = i - 2 24926 v.Aux = s 24927 v.AddArg(p) 24928 v0 := b.NewValue0(v.Pos, OpAMD64SHLQconst, idx.Type) 24929 v0.AuxInt = 1 24930 v0.AddArg(idx) 24931 v.AddArg(v0) 24932 v.AddArg(w) 24933 v.AddArg(mem) 24934 return true 24935 } 24936 // match: (MOVWstoreidx2 [i] {s} p idx (SHRQconst [16] w) x:(MOVWstoreidx2 [i-2] {s} p idx w mem)) 24937 // cond: x.Uses == 1 && clobber(x) 24938 // result: (MOVLstoreidx1 [i-2] {s} p (SHLQconst <idx.Type> [1] idx) w mem) 24939 for { 24940 i := v.AuxInt 24941 s := v.Aux 24942 _ = v.Args[3] 24943 p := v.Args[0] 24944 idx := v.Args[1] 24945 v_2 := v.Args[2] 24946 if v_2.Op != OpAMD64SHRQconst { 24947 break 24948 } 24949 if v_2.AuxInt != 16 { 24950 break 24951 } 24952 w := v_2.Args[0] 24953 x := v.Args[3] 24954 if x.Op != OpAMD64MOVWstoreidx2 { 24955 break 24956 } 24957 if x.AuxInt != i-2 { 24958 break 24959 } 24960 if x.Aux != s { 24961 break 24962 } 24963 _ = x.Args[3] 24964 if p != x.Args[0] { 24965 break 24966 } 24967 if idx != x.Args[1] { 24968 break 24969 } 24970 if w != x.Args[2] { 24971 break 24972 } 24973 mem := x.Args[3] 24974 if !(x.Uses == 1 && clobber(x)) { 24975 break 24976 } 24977 v.reset(OpAMD64MOVLstoreidx1) 24978 v.AuxInt = i - 2 24979 v.Aux = s 24980 v.AddArg(p) 24981 v0 := b.NewValue0(v.Pos, OpAMD64SHLQconst, idx.Type) 24982 v0.AuxInt = 1 24983 v0.AddArg(idx) 24984 v.AddArg(v0) 24985 v.AddArg(w) 24986 v.AddArg(mem) 24987 return true 24988 } 24989 // match: (MOVWstoreidx2 [i] {s} p idx (SHRQconst [j] w) x:(MOVWstoreidx2 [i-2] {s} p idx w0:(SHRQconst [j-16] w) mem)) 24990 // cond: x.Uses == 1 && clobber(x) 24991 // result: (MOVLstoreidx1 [i-2] {s} p (SHLQconst <idx.Type> [1] idx) w0 mem) 24992 for { 24993 i := v.AuxInt 24994 s := v.Aux 24995 _ = v.Args[3] 24996 p := v.Args[0] 24997 idx := v.Args[1] 24998 v_2 := v.Args[2] 24999 if v_2.Op != OpAMD64SHRQconst { 25000 break 25001 } 25002 j := v_2.AuxInt 25003 w := v_2.Args[0] 25004 x := v.Args[3] 25005 if x.Op != OpAMD64MOVWstoreidx2 { 25006 break 25007 } 25008 if x.AuxInt != i-2 { 25009 break 25010 } 25011 if x.Aux != s { 25012 break 25013 } 25014 _ = x.Args[3] 25015 if p != x.Args[0] { 25016 break 25017 } 25018 if idx != x.Args[1] { 25019 break 25020 } 25021 w0 := x.Args[2] 25022 if w0.Op != OpAMD64SHRQconst { 25023 break 25024 } 25025 if w0.AuxInt != j-16 { 25026 break 25027 } 25028 if w != w0.Args[0] { 25029 break 25030 } 25031 mem := x.Args[3] 25032 if !(x.Uses == 1 && clobber(x)) { 25033 break 25034 } 25035 v.reset(OpAMD64MOVLstoreidx1) 25036 v.AuxInt = i - 2 25037 v.Aux = s 25038 v.AddArg(p) 25039 v0 := b.NewValue0(v.Pos, OpAMD64SHLQconst, idx.Type) 25040 v0.AuxInt = 1 25041 v0.AddArg(idx) 25042 v.AddArg(v0) 25043 v.AddArg(w0) 25044 v.AddArg(mem) 25045 return true 25046 } 25047 // match: (MOVWstoreidx2 [i] {s} p (MOVQconst [c]) w mem) 25048 // cond: is32Bit(i+2*c) 25049 // result: (MOVWstore [i+2*c] {s} p w mem) 25050 for { 25051 i := v.AuxInt 25052 s := v.Aux 25053 _ = v.Args[3] 25054 p := v.Args[0] 25055 v_1 := v.Args[1] 25056 if v_1.Op != OpAMD64MOVQconst { 25057 break 25058 } 25059 c := v_1.AuxInt 25060 w := v.Args[2] 25061 mem := v.Args[3] 25062 if !(is32Bit(i + 2*c)) { 25063 break 25064 } 25065 v.reset(OpAMD64MOVWstore) 25066 v.AuxInt = i + 2*c 25067 v.Aux = s 25068 v.AddArg(p) 25069 v.AddArg(w) 25070 v.AddArg(mem) 25071 return true 25072 } 25073 return false 25074 } 25075 func rewriteValueAMD64_OpAMD64MULL_0(v *Value) bool { 25076 // match: (MULL x (MOVLconst [c])) 25077 // cond: 25078 // result: (MULLconst [c] x) 25079 for { 25080 _ = v.Args[1] 25081 x := v.Args[0] 25082 v_1 := v.Args[1] 25083 if v_1.Op != OpAMD64MOVLconst { 25084 break 25085 } 25086 c := v_1.AuxInt 25087 v.reset(OpAMD64MULLconst) 25088 v.AuxInt = c 25089 v.AddArg(x) 25090 return true 25091 } 25092 // match: (MULL (MOVLconst [c]) x) 25093 // cond: 25094 // result: (MULLconst [c] x) 25095 for { 25096 _ = v.Args[1] 25097 v_0 := v.Args[0] 25098 if v_0.Op != OpAMD64MOVLconst { 25099 break 25100 } 25101 c := v_0.AuxInt 25102 x := v.Args[1] 25103 v.reset(OpAMD64MULLconst) 25104 v.AuxInt = c 25105 v.AddArg(x) 25106 return true 25107 } 25108 return false 25109 } 25110 func rewriteValueAMD64_OpAMD64MULLconst_0(v *Value) bool { 25111 b := v.Block 25112 _ = b 25113 // match: (MULLconst [c] (MULLconst [d] x)) 25114 // cond: 25115 // result: (MULLconst [int64(int32(c * d))] x) 25116 for { 25117 c := v.AuxInt 25118 v_0 := v.Args[0] 25119 if v_0.Op != OpAMD64MULLconst { 25120 break 25121 } 25122 d := v_0.AuxInt 25123 x := v_0.Args[0] 25124 v.reset(OpAMD64MULLconst) 25125 v.AuxInt = int64(int32(c * d)) 25126 v.AddArg(x) 25127 return true 25128 } 25129 // match: (MULLconst [-9] x) 25130 // cond: 25131 // result: (NEGL (LEAL8 <v.Type> x x)) 25132 for { 25133 if v.AuxInt != -9 { 25134 break 25135 } 25136 x := v.Args[0] 25137 v.reset(OpAMD64NEGL) 25138 v0 := b.NewValue0(v.Pos, OpAMD64LEAL8, v.Type) 25139 v0.AddArg(x) 25140 v0.AddArg(x) 25141 v.AddArg(v0) 25142 return true 25143 } 25144 // match: (MULLconst [-5] x) 25145 // cond: 25146 // result: (NEGL (LEAL4 <v.Type> x x)) 25147 for { 25148 if v.AuxInt != -5 { 25149 break 25150 } 25151 x := v.Args[0] 25152 v.reset(OpAMD64NEGL) 25153 v0 := b.NewValue0(v.Pos, OpAMD64LEAL4, v.Type) 25154 v0.AddArg(x) 25155 v0.AddArg(x) 25156 v.AddArg(v0) 25157 return true 25158 } 25159 // match: (MULLconst [-3] x) 25160 // cond: 25161 // result: (NEGL (LEAL2 <v.Type> x x)) 25162 for { 25163 if v.AuxInt != -3 { 25164 break 25165 } 25166 x := v.Args[0] 25167 v.reset(OpAMD64NEGL) 25168 v0 := b.NewValue0(v.Pos, OpAMD64LEAL2, v.Type) 25169 v0.AddArg(x) 25170 v0.AddArg(x) 25171 v.AddArg(v0) 25172 return true 25173 } 25174 // match: (MULLconst [-1] x) 25175 // cond: 25176 // result: (NEGL x) 25177 for { 25178 if v.AuxInt != -1 { 25179 break 25180 } 25181 x := v.Args[0] 25182 v.reset(OpAMD64NEGL) 25183 v.AddArg(x) 25184 return true 25185 } 25186 // match: (MULLconst [ 0] _) 25187 // cond: 25188 // result: (MOVLconst [0]) 25189 for { 25190 if v.AuxInt != 0 { 25191 break 25192 } 25193 v.reset(OpAMD64MOVLconst) 25194 v.AuxInt = 0 25195 return true 25196 } 25197 // match: (MULLconst [ 1] x) 25198 // cond: 25199 // result: x 25200 for { 25201 if v.AuxInt != 1 { 25202 break 25203 } 25204 x := v.Args[0] 25205 v.reset(OpCopy) 25206 v.Type = x.Type 25207 v.AddArg(x) 25208 return true 25209 } 25210 // match: (MULLconst [ 3] x) 25211 // cond: 25212 // result: (LEAL2 x x) 25213 for { 25214 if v.AuxInt != 3 { 25215 break 25216 } 25217 x := v.Args[0] 25218 v.reset(OpAMD64LEAL2) 25219 v.AddArg(x) 25220 v.AddArg(x) 25221 return true 25222 } 25223 // match: (MULLconst [ 5] x) 25224 // cond: 25225 // result: (LEAL4 x x) 25226 for { 25227 if v.AuxInt != 5 { 25228 break 25229 } 25230 x := v.Args[0] 25231 v.reset(OpAMD64LEAL4) 25232 v.AddArg(x) 25233 v.AddArg(x) 25234 return true 25235 } 25236 // match: (MULLconst [ 7] x) 25237 // cond: 25238 // result: (LEAL2 x (LEAL2 <v.Type> x x)) 25239 for { 25240 if v.AuxInt != 7 { 25241 break 25242 } 25243 x := v.Args[0] 25244 v.reset(OpAMD64LEAL2) 25245 v.AddArg(x) 25246 v0 := b.NewValue0(v.Pos, OpAMD64LEAL2, v.Type) 25247 v0.AddArg(x) 25248 v0.AddArg(x) 25249 v.AddArg(v0) 25250 return true 25251 } 25252 return false 25253 } 25254 func rewriteValueAMD64_OpAMD64MULLconst_10(v *Value) bool { 25255 b := v.Block 25256 _ = b 25257 // match: (MULLconst [ 9] x) 25258 // cond: 25259 // result: (LEAL8 x x) 25260 for { 25261 if v.AuxInt != 9 { 25262 break 25263 } 25264 x := v.Args[0] 25265 v.reset(OpAMD64LEAL8) 25266 v.AddArg(x) 25267 v.AddArg(x) 25268 return true 25269 } 25270 // match: (MULLconst [11] x) 25271 // cond: 25272 // result: (LEAL2 x (LEAL4 <v.Type> x x)) 25273 for { 25274 if v.AuxInt != 11 { 25275 break 25276 } 25277 x := v.Args[0] 25278 v.reset(OpAMD64LEAL2) 25279 v.AddArg(x) 25280 v0 := b.NewValue0(v.Pos, OpAMD64LEAL4, v.Type) 25281 v0.AddArg(x) 25282 v0.AddArg(x) 25283 v.AddArg(v0) 25284 return true 25285 } 25286 // match: (MULLconst [13] x) 25287 // cond: 25288 // result: (LEAL4 x (LEAL2 <v.Type> x x)) 25289 for { 25290 if v.AuxInt != 13 { 25291 break 25292 } 25293 x := v.Args[0] 25294 v.reset(OpAMD64LEAL4) 25295 v.AddArg(x) 25296 v0 := b.NewValue0(v.Pos, OpAMD64LEAL2, v.Type) 25297 v0.AddArg(x) 25298 v0.AddArg(x) 25299 v.AddArg(v0) 25300 return true 25301 } 25302 // match: (MULLconst [19] x) 25303 // cond: 25304 // result: (LEAL2 x (LEAL8 <v.Type> x x)) 25305 for { 25306 if v.AuxInt != 19 { 25307 break 25308 } 25309 x := v.Args[0] 25310 v.reset(OpAMD64LEAL2) 25311 v.AddArg(x) 25312 v0 := b.NewValue0(v.Pos, OpAMD64LEAL8, v.Type) 25313 v0.AddArg(x) 25314 v0.AddArg(x) 25315 v.AddArg(v0) 25316 return true 25317 } 25318 // match: (MULLconst [21] x) 25319 // cond: 25320 // result: (LEAL4 x (LEAL4 <v.Type> x x)) 25321 for { 25322 if v.AuxInt != 21 { 25323 break 25324 } 25325 x := v.Args[0] 25326 v.reset(OpAMD64LEAL4) 25327 v.AddArg(x) 25328 v0 := b.NewValue0(v.Pos, OpAMD64LEAL4, v.Type) 25329 v0.AddArg(x) 25330 v0.AddArg(x) 25331 v.AddArg(v0) 25332 return true 25333 } 25334 // match: (MULLconst [25] x) 25335 // cond: 25336 // result: (LEAL8 x (LEAL2 <v.Type> x x)) 25337 for { 25338 if v.AuxInt != 25 { 25339 break 25340 } 25341 x := v.Args[0] 25342 v.reset(OpAMD64LEAL8) 25343 v.AddArg(x) 25344 v0 := b.NewValue0(v.Pos, OpAMD64LEAL2, v.Type) 25345 v0.AddArg(x) 25346 v0.AddArg(x) 25347 v.AddArg(v0) 25348 return true 25349 } 25350 // match: (MULLconst [27] x) 25351 // cond: 25352 // result: (LEAL8 (LEAL2 <v.Type> x x) (LEAL2 <v.Type> x x)) 25353 for { 25354 if v.AuxInt != 27 { 25355 break 25356 } 25357 x := v.Args[0] 25358 v.reset(OpAMD64LEAL8) 25359 v0 := b.NewValue0(v.Pos, OpAMD64LEAL2, v.Type) 25360 v0.AddArg(x) 25361 v0.AddArg(x) 25362 v.AddArg(v0) 25363 v1 := b.NewValue0(v.Pos, OpAMD64LEAL2, v.Type) 25364 v1.AddArg(x) 25365 v1.AddArg(x) 25366 v.AddArg(v1) 25367 return true 25368 } 25369 // match: (MULLconst [37] x) 25370 // cond: 25371 // result: (LEAL4 x (LEAL8 <v.Type> x x)) 25372 for { 25373 if v.AuxInt != 37 { 25374 break 25375 } 25376 x := v.Args[0] 25377 v.reset(OpAMD64LEAL4) 25378 v.AddArg(x) 25379 v0 := b.NewValue0(v.Pos, OpAMD64LEAL8, v.Type) 25380 v0.AddArg(x) 25381 v0.AddArg(x) 25382 v.AddArg(v0) 25383 return true 25384 } 25385 // match: (MULLconst [41] x) 25386 // cond: 25387 // result: (LEAL8 x (LEAL4 <v.Type> x x)) 25388 for { 25389 if v.AuxInt != 41 { 25390 break 25391 } 25392 x := v.Args[0] 25393 v.reset(OpAMD64LEAL8) 25394 v.AddArg(x) 25395 v0 := b.NewValue0(v.Pos, OpAMD64LEAL4, v.Type) 25396 v0.AddArg(x) 25397 v0.AddArg(x) 25398 v.AddArg(v0) 25399 return true 25400 } 25401 // match: (MULLconst [45] x) 25402 // cond: 25403 // result: (LEAL8 (LEAL4 <v.Type> x x) (LEAL4 <v.Type> x x)) 25404 for { 25405 if v.AuxInt != 45 { 25406 break 25407 } 25408 x := v.Args[0] 25409 v.reset(OpAMD64LEAL8) 25410 v0 := b.NewValue0(v.Pos, OpAMD64LEAL4, v.Type) 25411 v0.AddArg(x) 25412 v0.AddArg(x) 25413 v.AddArg(v0) 25414 v1 := b.NewValue0(v.Pos, OpAMD64LEAL4, v.Type) 25415 v1.AddArg(x) 25416 v1.AddArg(x) 25417 v.AddArg(v1) 25418 return true 25419 } 25420 return false 25421 } 25422 func rewriteValueAMD64_OpAMD64MULLconst_20(v *Value) bool { 25423 b := v.Block 25424 _ = b 25425 // match: (MULLconst [73] x) 25426 // cond: 25427 // result: (LEAL8 x (LEAL8 <v.Type> x x)) 25428 for { 25429 if v.AuxInt != 73 { 25430 break 25431 } 25432 x := v.Args[0] 25433 v.reset(OpAMD64LEAL8) 25434 v.AddArg(x) 25435 v0 := b.NewValue0(v.Pos, OpAMD64LEAL8, v.Type) 25436 v0.AddArg(x) 25437 v0.AddArg(x) 25438 v.AddArg(v0) 25439 return true 25440 } 25441 // match: (MULLconst [81] x) 25442 // cond: 25443 // result: (LEAL8 (LEAL8 <v.Type> x x) (LEAL8 <v.Type> x x)) 25444 for { 25445 if v.AuxInt != 81 { 25446 break 25447 } 25448 x := v.Args[0] 25449 v.reset(OpAMD64LEAL8) 25450 v0 := b.NewValue0(v.Pos, OpAMD64LEAL8, v.Type) 25451 v0.AddArg(x) 25452 v0.AddArg(x) 25453 v.AddArg(v0) 25454 v1 := b.NewValue0(v.Pos, OpAMD64LEAL8, v.Type) 25455 v1.AddArg(x) 25456 v1.AddArg(x) 25457 v.AddArg(v1) 25458 return true 25459 } 25460 // match: (MULLconst [c] x) 25461 // cond: isPowerOfTwo(c+1) && c >= 15 25462 // result: (SUBL (SHLLconst <v.Type> [log2(c+1)] x) x) 25463 for { 25464 c := v.AuxInt 25465 x := v.Args[0] 25466 if !(isPowerOfTwo(c+1) && c >= 15) { 25467 break 25468 } 25469 v.reset(OpAMD64SUBL) 25470 v0 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type) 25471 v0.AuxInt = log2(c + 1) 25472 v0.AddArg(x) 25473 v.AddArg(v0) 25474 v.AddArg(x) 25475 return true 25476 } 25477 // match: (MULLconst [c] x) 25478 // cond: isPowerOfTwo(c-1) && c >= 17 25479 // result: (LEAL1 (SHLLconst <v.Type> [log2(c-1)] x) x) 25480 for { 25481 c := v.AuxInt 25482 x := v.Args[0] 25483 if !(isPowerOfTwo(c-1) && c >= 17) { 25484 break 25485 } 25486 v.reset(OpAMD64LEAL1) 25487 v0 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type) 25488 v0.AuxInt = log2(c - 1) 25489 v0.AddArg(x) 25490 v.AddArg(v0) 25491 v.AddArg(x) 25492 return true 25493 } 25494 // match: (MULLconst [c] x) 25495 // cond: isPowerOfTwo(c-2) && c >= 34 25496 // result: (LEAL2 (SHLLconst <v.Type> [log2(c-2)] x) x) 25497 for { 25498 c := v.AuxInt 25499 x := v.Args[0] 25500 if !(isPowerOfTwo(c-2) && c >= 34) { 25501 break 25502 } 25503 v.reset(OpAMD64LEAL2) 25504 v0 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type) 25505 v0.AuxInt = log2(c - 2) 25506 v0.AddArg(x) 25507 v.AddArg(v0) 25508 v.AddArg(x) 25509 return true 25510 } 25511 // match: (MULLconst [c] x) 25512 // cond: isPowerOfTwo(c-4) && c >= 68 25513 // result: (LEAL4 (SHLLconst <v.Type> [log2(c-4)] x) x) 25514 for { 25515 c := v.AuxInt 25516 x := v.Args[0] 25517 if !(isPowerOfTwo(c-4) && c >= 68) { 25518 break 25519 } 25520 v.reset(OpAMD64LEAL4) 25521 v0 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type) 25522 v0.AuxInt = log2(c - 4) 25523 v0.AddArg(x) 25524 v.AddArg(v0) 25525 v.AddArg(x) 25526 return true 25527 } 25528 // match: (MULLconst [c] x) 25529 // cond: isPowerOfTwo(c-8) && c >= 136 25530 // result: (LEAL8 (SHLLconst <v.Type> [log2(c-8)] x) x) 25531 for { 25532 c := v.AuxInt 25533 x := v.Args[0] 25534 if !(isPowerOfTwo(c-8) && c >= 136) { 25535 break 25536 } 25537 v.reset(OpAMD64LEAL8) 25538 v0 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type) 25539 v0.AuxInt = log2(c - 8) 25540 v0.AddArg(x) 25541 v.AddArg(v0) 25542 v.AddArg(x) 25543 return true 25544 } 25545 // match: (MULLconst [c] x) 25546 // cond: c%3 == 0 && isPowerOfTwo(c/3) 25547 // result: (SHLLconst [log2(c/3)] (LEAL2 <v.Type> x x)) 25548 for { 25549 c := v.AuxInt 25550 x := v.Args[0] 25551 if !(c%3 == 0 && isPowerOfTwo(c/3)) { 25552 break 25553 } 25554 v.reset(OpAMD64SHLLconst) 25555 v.AuxInt = log2(c / 3) 25556 v0 := b.NewValue0(v.Pos, OpAMD64LEAL2, v.Type) 25557 v0.AddArg(x) 25558 v0.AddArg(x) 25559 v.AddArg(v0) 25560 return true 25561 } 25562 // match: (MULLconst [c] x) 25563 // cond: c%5 == 0 && isPowerOfTwo(c/5) 25564 // result: (SHLLconst [log2(c/5)] (LEAL4 <v.Type> x x)) 25565 for { 25566 c := v.AuxInt 25567 x := v.Args[0] 25568 if !(c%5 == 0 && isPowerOfTwo(c/5)) { 25569 break 25570 } 25571 v.reset(OpAMD64SHLLconst) 25572 v.AuxInt = log2(c / 5) 25573 v0 := b.NewValue0(v.Pos, OpAMD64LEAL4, v.Type) 25574 v0.AddArg(x) 25575 v0.AddArg(x) 25576 v.AddArg(v0) 25577 return true 25578 } 25579 // match: (MULLconst [c] x) 25580 // cond: c%9 == 0 && isPowerOfTwo(c/9) 25581 // result: (SHLLconst [log2(c/9)] (LEAL8 <v.Type> x x)) 25582 for { 25583 c := v.AuxInt 25584 x := v.Args[0] 25585 if !(c%9 == 0 && isPowerOfTwo(c/9)) { 25586 break 25587 } 25588 v.reset(OpAMD64SHLLconst) 25589 v.AuxInt = log2(c / 9) 25590 v0 := b.NewValue0(v.Pos, OpAMD64LEAL8, v.Type) 25591 v0.AddArg(x) 25592 v0.AddArg(x) 25593 v.AddArg(v0) 25594 return true 25595 } 25596 return false 25597 } 25598 func rewriteValueAMD64_OpAMD64MULLconst_30(v *Value) bool { 25599 // match: (MULLconst [c] (MOVLconst [d])) 25600 // cond: 25601 // result: (MOVLconst [int64(int32(c*d))]) 25602 for { 25603 c := v.AuxInt 25604 v_0 := v.Args[0] 25605 if v_0.Op != OpAMD64MOVLconst { 25606 break 25607 } 25608 d := v_0.AuxInt 25609 v.reset(OpAMD64MOVLconst) 25610 v.AuxInt = int64(int32(c * d)) 25611 return true 25612 } 25613 return false 25614 } 25615 func rewriteValueAMD64_OpAMD64MULQ_0(v *Value) bool { 25616 // match: (MULQ x (MOVQconst [c])) 25617 // cond: is32Bit(c) 25618 // result: (MULQconst [c] x) 25619 for { 25620 _ = v.Args[1] 25621 x := v.Args[0] 25622 v_1 := v.Args[1] 25623 if v_1.Op != OpAMD64MOVQconst { 25624 break 25625 } 25626 c := v_1.AuxInt 25627 if !(is32Bit(c)) { 25628 break 25629 } 25630 v.reset(OpAMD64MULQconst) 25631 v.AuxInt = c 25632 v.AddArg(x) 25633 return true 25634 } 25635 // match: (MULQ (MOVQconst [c]) x) 25636 // cond: is32Bit(c) 25637 // result: (MULQconst [c] x) 25638 for { 25639 _ = v.Args[1] 25640 v_0 := v.Args[0] 25641 if v_0.Op != OpAMD64MOVQconst { 25642 break 25643 } 25644 c := v_0.AuxInt 25645 x := v.Args[1] 25646 if !(is32Bit(c)) { 25647 break 25648 } 25649 v.reset(OpAMD64MULQconst) 25650 v.AuxInt = c 25651 v.AddArg(x) 25652 return true 25653 } 25654 return false 25655 } 25656 func rewriteValueAMD64_OpAMD64MULQconst_0(v *Value) bool { 25657 b := v.Block 25658 _ = b 25659 // match: (MULQconst [c] (MULQconst [d] x)) 25660 // cond: is32Bit(c*d) 25661 // result: (MULQconst [c * d] x) 25662 for { 25663 c := v.AuxInt 25664 v_0 := v.Args[0] 25665 if v_0.Op != OpAMD64MULQconst { 25666 break 25667 } 25668 d := v_0.AuxInt 25669 x := v_0.Args[0] 25670 if !(is32Bit(c * d)) { 25671 break 25672 } 25673 v.reset(OpAMD64MULQconst) 25674 v.AuxInt = c * d 25675 v.AddArg(x) 25676 return true 25677 } 25678 // match: (MULQconst [-9] x) 25679 // cond: 25680 // result: (NEGQ (LEAQ8 <v.Type> x x)) 25681 for { 25682 if v.AuxInt != -9 { 25683 break 25684 } 25685 x := v.Args[0] 25686 v.reset(OpAMD64NEGQ) 25687 v0 := b.NewValue0(v.Pos, OpAMD64LEAQ8, v.Type) 25688 v0.AddArg(x) 25689 v0.AddArg(x) 25690 v.AddArg(v0) 25691 return true 25692 } 25693 // match: (MULQconst [-5] x) 25694 // cond: 25695 // result: (NEGQ (LEAQ4 <v.Type> x x)) 25696 for { 25697 if v.AuxInt != -5 { 25698 break 25699 } 25700 x := v.Args[0] 25701 v.reset(OpAMD64NEGQ) 25702 v0 := b.NewValue0(v.Pos, OpAMD64LEAQ4, v.Type) 25703 v0.AddArg(x) 25704 v0.AddArg(x) 25705 v.AddArg(v0) 25706 return true 25707 } 25708 // match: (MULQconst [-3] x) 25709 // cond: 25710 // result: (NEGQ (LEAQ2 <v.Type> x x)) 25711 for { 25712 if v.AuxInt != -3 { 25713 break 25714 } 25715 x := v.Args[0] 25716 v.reset(OpAMD64NEGQ) 25717 v0 := b.NewValue0(v.Pos, OpAMD64LEAQ2, v.Type) 25718 v0.AddArg(x) 25719 v0.AddArg(x) 25720 v.AddArg(v0) 25721 return true 25722 } 25723 // match: (MULQconst [-1] x) 25724 // cond: 25725 // result: (NEGQ x) 25726 for { 25727 if v.AuxInt != -1 { 25728 break 25729 } 25730 x := v.Args[0] 25731 v.reset(OpAMD64NEGQ) 25732 v.AddArg(x) 25733 return true 25734 } 25735 // match: (MULQconst [ 0] _) 25736 // cond: 25737 // result: (MOVQconst [0]) 25738 for { 25739 if v.AuxInt != 0 { 25740 break 25741 } 25742 v.reset(OpAMD64MOVQconst) 25743 v.AuxInt = 0 25744 return true 25745 } 25746 // match: (MULQconst [ 1] x) 25747 // cond: 25748 // result: x 25749 for { 25750 if v.AuxInt != 1 { 25751 break 25752 } 25753 x := v.Args[0] 25754 v.reset(OpCopy) 25755 v.Type = x.Type 25756 v.AddArg(x) 25757 return true 25758 } 25759 // match: (MULQconst [ 3] x) 25760 // cond: 25761 // result: (LEAQ2 x x) 25762 for { 25763 if v.AuxInt != 3 { 25764 break 25765 } 25766 x := v.Args[0] 25767 v.reset(OpAMD64LEAQ2) 25768 v.AddArg(x) 25769 v.AddArg(x) 25770 return true 25771 } 25772 // match: (MULQconst [ 5] x) 25773 // cond: 25774 // result: (LEAQ4 x x) 25775 for { 25776 if v.AuxInt != 5 { 25777 break 25778 } 25779 x := v.Args[0] 25780 v.reset(OpAMD64LEAQ4) 25781 v.AddArg(x) 25782 v.AddArg(x) 25783 return true 25784 } 25785 // match: (MULQconst [ 7] x) 25786 // cond: 25787 // result: (LEAQ2 x (LEAQ2 <v.Type> x x)) 25788 for { 25789 if v.AuxInt != 7 { 25790 break 25791 } 25792 x := v.Args[0] 25793 v.reset(OpAMD64LEAQ2) 25794 v.AddArg(x) 25795 v0 := b.NewValue0(v.Pos, OpAMD64LEAQ2, v.Type) 25796 v0.AddArg(x) 25797 v0.AddArg(x) 25798 v.AddArg(v0) 25799 return true 25800 } 25801 return false 25802 } 25803 func rewriteValueAMD64_OpAMD64MULQconst_10(v *Value) bool { 25804 b := v.Block 25805 _ = b 25806 // match: (MULQconst [ 9] x) 25807 // cond: 25808 // result: (LEAQ8 x x) 25809 for { 25810 if v.AuxInt != 9 { 25811 break 25812 } 25813 x := v.Args[0] 25814 v.reset(OpAMD64LEAQ8) 25815 v.AddArg(x) 25816 v.AddArg(x) 25817 return true 25818 } 25819 // match: (MULQconst [11] x) 25820 // cond: 25821 // result: (LEAQ2 x (LEAQ4 <v.Type> x x)) 25822 for { 25823 if v.AuxInt != 11 { 25824 break 25825 } 25826 x := v.Args[0] 25827 v.reset(OpAMD64LEAQ2) 25828 v.AddArg(x) 25829 v0 := b.NewValue0(v.Pos, OpAMD64LEAQ4, v.Type) 25830 v0.AddArg(x) 25831 v0.AddArg(x) 25832 v.AddArg(v0) 25833 return true 25834 } 25835 // match: (MULQconst [13] x) 25836 // cond: 25837 // result: (LEAQ4 x (LEAQ2 <v.Type> x x)) 25838 for { 25839 if v.AuxInt != 13 { 25840 break 25841 } 25842 x := v.Args[0] 25843 v.reset(OpAMD64LEAQ4) 25844 v.AddArg(x) 25845 v0 := b.NewValue0(v.Pos, OpAMD64LEAQ2, v.Type) 25846 v0.AddArg(x) 25847 v0.AddArg(x) 25848 v.AddArg(v0) 25849 return true 25850 } 25851 // match: (MULQconst [19] x) 25852 // cond: 25853 // result: (LEAQ2 x (LEAQ8 <v.Type> x x)) 25854 for { 25855 if v.AuxInt != 19 { 25856 break 25857 } 25858 x := v.Args[0] 25859 v.reset(OpAMD64LEAQ2) 25860 v.AddArg(x) 25861 v0 := b.NewValue0(v.Pos, OpAMD64LEAQ8, v.Type) 25862 v0.AddArg(x) 25863 v0.AddArg(x) 25864 v.AddArg(v0) 25865 return true 25866 } 25867 // match: (MULQconst [21] x) 25868 // cond: 25869 // result: (LEAQ4 x (LEAQ4 <v.Type> x x)) 25870 for { 25871 if v.AuxInt != 21 { 25872 break 25873 } 25874 x := v.Args[0] 25875 v.reset(OpAMD64LEAQ4) 25876 v.AddArg(x) 25877 v0 := b.NewValue0(v.Pos, OpAMD64LEAQ4, v.Type) 25878 v0.AddArg(x) 25879 v0.AddArg(x) 25880 v.AddArg(v0) 25881 return true 25882 } 25883 // match: (MULQconst [25] x) 25884 // cond: 25885 // result: (LEAQ8 x (LEAQ2 <v.Type> x x)) 25886 for { 25887 if v.AuxInt != 25 { 25888 break 25889 } 25890 x := v.Args[0] 25891 v.reset(OpAMD64LEAQ8) 25892 v.AddArg(x) 25893 v0 := b.NewValue0(v.Pos, OpAMD64LEAQ2, v.Type) 25894 v0.AddArg(x) 25895 v0.AddArg(x) 25896 v.AddArg(v0) 25897 return true 25898 } 25899 // match: (MULQconst [27] x) 25900 // cond: 25901 // result: (LEAQ8 (LEAQ2 <v.Type> x x) (LEAQ2 <v.Type> x x)) 25902 for { 25903 if v.AuxInt != 27 { 25904 break 25905 } 25906 x := v.Args[0] 25907 v.reset(OpAMD64LEAQ8) 25908 v0 := b.NewValue0(v.Pos, OpAMD64LEAQ2, v.Type) 25909 v0.AddArg(x) 25910 v0.AddArg(x) 25911 v.AddArg(v0) 25912 v1 := b.NewValue0(v.Pos, OpAMD64LEAQ2, v.Type) 25913 v1.AddArg(x) 25914 v1.AddArg(x) 25915 v.AddArg(v1) 25916 return true 25917 } 25918 // match: (MULQconst [37] x) 25919 // cond: 25920 // result: (LEAQ4 x (LEAQ8 <v.Type> x x)) 25921 for { 25922 if v.AuxInt != 37 { 25923 break 25924 } 25925 x := v.Args[0] 25926 v.reset(OpAMD64LEAQ4) 25927 v.AddArg(x) 25928 v0 := b.NewValue0(v.Pos, OpAMD64LEAQ8, v.Type) 25929 v0.AddArg(x) 25930 v0.AddArg(x) 25931 v.AddArg(v0) 25932 return true 25933 } 25934 // match: (MULQconst [41] x) 25935 // cond: 25936 // result: (LEAQ8 x (LEAQ4 <v.Type> x x)) 25937 for { 25938 if v.AuxInt != 41 { 25939 break 25940 } 25941 x := v.Args[0] 25942 v.reset(OpAMD64LEAQ8) 25943 v.AddArg(x) 25944 v0 := b.NewValue0(v.Pos, OpAMD64LEAQ4, v.Type) 25945 v0.AddArg(x) 25946 v0.AddArg(x) 25947 v.AddArg(v0) 25948 return true 25949 } 25950 // match: (MULQconst [45] x) 25951 // cond: 25952 // result: (LEAQ8 (LEAQ4 <v.Type> x x) (LEAQ4 <v.Type> x x)) 25953 for { 25954 if v.AuxInt != 45 { 25955 break 25956 } 25957 x := v.Args[0] 25958 v.reset(OpAMD64LEAQ8) 25959 v0 := b.NewValue0(v.Pos, OpAMD64LEAQ4, v.Type) 25960 v0.AddArg(x) 25961 v0.AddArg(x) 25962 v.AddArg(v0) 25963 v1 := b.NewValue0(v.Pos, OpAMD64LEAQ4, v.Type) 25964 v1.AddArg(x) 25965 v1.AddArg(x) 25966 v.AddArg(v1) 25967 return true 25968 } 25969 return false 25970 } 25971 func rewriteValueAMD64_OpAMD64MULQconst_20(v *Value) bool { 25972 b := v.Block 25973 _ = b 25974 // match: (MULQconst [73] x) 25975 // cond: 25976 // result: (LEAQ8 x (LEAQ8 <v.Type> x x)) 25977 for { 25978 if v.AuxInt != 73 { 25979 break 25980 } 25981 x := v.Args[0] 25982 v.reset(OpAMD64LEAQ8) 25983 v.AddArg(x) 25984 v0 := b.NewValue0(v.Pos, OpAMD64LEAQ8, v.Type) 25985 v0.AddArg(x) 25986 v0.AddArg(x) 25987 v.AddArg(v0) 25988 return true 25989 } 25990 // match: (MULQconst [81] x) 25991 // cond: 25992 // result: (LEAQ8 (LEAQ8 <v.Type> x x) (LEAQ8 <v.Type> x x)) 25993 for { 25994 if v.AuxInt != 81 { 25995 break 25996 } 25997 x := v.Args[0] 25998 v.reset(OpAMD64LEAQ8) 25999 v0 := b.NewValue0(v.Pos, OpAMD64LEAQ8, v.Type) 26000 v0.AddArg(x) 26001 v0.AddArg(x) 26002 v.AddArg(v0) 26003 v1 := b.NewValue0(v.Pos, OpAMD64LEAQ8, v.Type) 26004 v1.AddArg(x) 26005 v1.AddArg(x) 26006 v.AddArg(v1) 26007 return true 26008 } 26009 // match: (MULQconst [c] x) 26010 // cond: isPowerOfTwo(c+1) && c >= 15 26011 // result: (SUBQ (SHLQconst <v.Type> [log2(c+1)] x) x) 26012 for { 26013 c := v.AuxInt 26014 x := v.Args[0] 26015 if !(isPowerOfTwo(c+1) && c >= 15) { 26016 break 26017 } 26018 v.reset(OpAMD64SUBQ) 26019 v0 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 26020 v0.AuxInt = log2(c + 1) 26021 v0.AddArg(x) 26022 v.AddArg(v0) 26023 v.AddArg(x) 26024 return true 26025 } 26026 // match: (MULQconst [c] x) 26027 // cond: isPowerOfTwo(c-1) && c >= 17 26028 // result: (LEAQ1 (SHLQconst <v.Type> [log2(c-1)] x) x) 26029 for { 26030 c := v.AuxInt 26031 x := v.Args[0] 26032 if !(isPowerOfTwo(c-1) && c >= 17) { 26033 break 26034 } 26035 v.reset(OpAMD64LEAQ1) 26036 v0 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 26037 v0.AuxInt = log2(c - 1) 26038 v0.AddArg(x) 26039 v.AddArg(v0) 26040 v.AddArg(x) 26041 return true 26042 } 26043 // match: (MULQconst [c] x) 26044 // cond: isPowerOfTwo(c-2) && c >= 34 26045 // result: (LEAQ2 (SHLQconst <v.Type> [log2(c-2)] x) x) 26046 for { 26047 c := v.AuxInt 26048 x := v.Args[0] 26049 if !(isPowerOfTwo(c-2) && c >= 34) { 26050 break 26051 } 26052 v.reset(OpAMD64LEAQ2) 26053 v0 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 26054 v0.AuxInt = log2(c - 2) 26055 v0.AddArg(x) 26056 v.AddArg(v0) 26057 v.AddArg(x) 26058 return true 26059 } 26060 // match: (MULQconst [c] x) 26061 // cond: isPowerOfTwo(c-4) && c >= 68 26062 // result: (LEAQ4 (SHLQconst <v.Type> [log2(c-4)] x) x) 26063 for { 26064 c := v.AuxInt 26065 x := v.Args[0] 26066 if !(isPowerOfTwo(c-4) && c >= 68) { 26067 break 26068 } 26069 v.reset(OpAMD64LEAQ4) 26070 v0 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 26071 v0.AuxInt = log2(c - 4) 26072 v0.AddArg(x) 26073 v.AddArg(v0) 26074 v.AddArg(x) 26075 return true 26076 } 26077 // match: (MULQconst [c] x) 26078 // cond: isPowerOfTwo(c-8) && c >= 136 26079 // result: (LEAQ8 (SHLQconst <v.Type> [log2(c-8)] x) x) 26080 for { 26081 c := v.AuxInt 26082 x := v.Args[0] 26083 if !(isPowerOfTwo(c-8) && c >= 136) { 26084 break 26085 } 26086 v.reset(OpAMD64LEAQ8) 26087 v0 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 26088 v0.AuxInt = log2(c - 8) 26089 v0.AddArg(x) 26090 v.AddArg(v0) 26091 v.AddArg(x) 26092 return true 26093 } 26094 // match: (MULQconst [c] x) 26095 // cond: c%3 == 0 && isPowerOfTwo(c/3) 26096 // result: (SHLQconst [log2(c/3)] (LEAQ2 <v.Type> x x)) 26097 for { 26098 c := v.AuxInt 26099 x := v.Args[0] 26100 if !(c%3 == 0 && isPowerOfTwo(c/3)) { 26101 break 26102 } 26103 v.reset(OpAMD64SHLQconst) 26104 v.AuxInt = log2(c / 3) 26105 v0 := b.NewValue0(v.Pos, OpAMD64LEAQ2, v.Type) 26106 v0.AddArg(x) 26107 v0.AddArg(x) 26108 v.AddArg(v0) 26109 return true 26110 } 26111 // match: (MULQconst [c] x) 26112 // cond: c%5 == 0 && isPowerOfTwo(c/5) 26113 // result: (SHLQconst [log2(c/5)] (LEAQ4 <v.Type> x x)) 26114 for { 26115 c := v.AuxInt 26116 x := v.Args[0] 26117 if !(c%5 == 0 && isPowerOfTwo(c/5)) { 26118 break 26119 } 26120 v.reset(OpAMD64SHLQconst) 26121 v.AuxInt = log2(c / 5) 26122 v0 := b.NewValue0(v.Pos, OpAMD64LEAQ4, v.Type) 26123 v0.AddArg(x) 26124 v0.AddArg(x) 26125 v.AddArg(v0) 26126 return true 26127 } 26128 // match: (MULQconst [c] x) 26129 // cond: c%9 == 0 && isPowerOfTwo(c/9) 26130 // result: (SHLQconst [log2(c/9)] (LEAQ8 <v.Type> x x)) 26131 for { 26132 c := v.AuxInt 26133 x := v.Args[0] 26134 if !(c%9 == 0 && isPowerOfTwo(c/9)) { 26135 break 26136 } 26137 v.reset(OpAMD64SHLQconst) 26138 v.AuxInt = log2(c / 9) 26139 v0 := b.NewValue0(v.Pos, OpAMD64LEAQ8, v.Type) 26140 v0.AddArg(x) 26141 v0.AddArg(x) 26142 v.AddArg(v0) 26143 return true 26144 } 26145 return false 26146 } 26147 func rewriteValueAMD64_OpAMD64MULQconst_30(v *Value) bool { 26148 // match: (MULQconst [c] (MOVQconst [d])) 26149 // cond: 26150 // result: (MOVQconst [c*d]) 26151 for { 26152 c := v.AuxInt 26153 v_0 := v.Args[0] 26154 if v_0.Op != OpAMD64MOVQconst { 26155 break 26156 } 26157 d := v_0.AuxInt 26158 v.reset(OpAMD64MOVQconst) 26159 v.AuxInt = c * d 26160 return true 26161 } 26162 return false 26163 } 26164 func rewriteValueAMD64_OpAMD64MULSD_0(v *Value) bool { 26165 // match: (MULSD x l:(MOVSDload [off] {sym} ptr mem)) 26166 // cond: canMergeLoadClobber(v, l, x) && clobber(l) 26167 // result: (MULSDload x [off] {sym} ptr mem) 26168 for { 26169 _ = v.Args[1] 26170 x := v.Args[0] 26171 l := v.Args[1] 26172 if l.Op != OpAMD64MOVSDload { 26173 break 26174 } 26175 off := l.AuxInt 26176 sym := l.Aux 26177 _ = l.Args[1] 26178 ptr := l.Args[0] 26179 mem := l.Args[1] 26180 if !(canMergeLoadClobber(v, l, x) && clobber(l)) { 26181 break 26182 } 26183 v.reset(OpAMD64MULSDload) 26184 v.AuxInt = off 26185 v.Aux = sym 26186 v.AddArg(x) 26187 v.AddArg(ptr) 26188 v.AddArg(mem) 26189 return true 26190 } 26191 // match: (MULSD l:(MOVSDload [off] {sym} ptr mem) x) 26192 // cond: canMergeLoadClobber(v, l, x) && clobber(l) 26193 // result: (MULSDload x [off] {sym} ptr mem) 26194 for { 26195 _ = v.Args[1] 26196 l := v.Args[0] 26197 if l.Op != OpAMD64MOVSDload { 26198 break 26199 } 26200 off := l.AuxInt 26201 sym := l.Aux 26202 _ = l.Args[1] 26203 ptr := l.Args[0] 26204 mem := l.Args[1] 26205 x := v.Args[1] 26206 if !(canMergeLoadClobber(v, l, x) && clobber(l)) { 26207 break 26208 } 26209 v.reset(OpAMD64MULSDload) 26210 v.AuxInt = off 26211 v.Aux = sym 26212 v.AddArg(x) 26213 v.AddArg(ptr) 26214 v.AddArg(mem) 26215 return true 26216 } 26217 return false 26218 } 26219 func rewriteValueAMD64_OpAMD64MULSDload_0(v *Value) bool { 26220 b := v.Block 26221 _ = b 26222 typ := &b.Func.Config.Types 26223 _ = typ 26224 // match: (MULSDload [off1] {sym} val (ADDQconst [off2] base) mem) 26225 // cond: is32Bit(off1+off2) 26226 // result: (MULSDload [off1+off2] {sym} val base mem) 26227 for { 26228 off1 := v.AuxInt 26229 sym := v.Aux 26230 _ = v.Args[2] 26231 val := v.Args[0] 26232 v_1 := v.Args[1] 26233 if v_1.Op != OpAMD64ADDQconst { 26234 break 26235 } 26236 off2 := v_1.AuxInt 26237 base := v_1.Args[0] 26238 mem := v.Args[2] 26239 if !(is32Bit(off1 + off2)) { 26240 break 26241 } 26242 v.reset(OpAMD64MULSDload) 26243 v.AuxInt = off1 + off2 26244 v.Aux = sym 26245 v.AddArg(val) 26246 v.AddArg(base) 26247 v.AddArg(mem) 26248 return true 26249 } 26250 // match: (MULSDload [off1] {sym1} val (LEAQ [off2] {sym2} base) mem) 26251 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 26252 // result: (MULSDload [off1+off2] {mergeSym(sym1,sym2)} val base mem) 26253 for { 26254 off1 := v.AuxInt 26255 sym1 := v.Aux 26256 _ = v.Args[2] 26257 val := v.Args[0] 26258 v_1 := v.Args[1] 26259 if v_1.Op != OpAMD64LEAQ { 26260 break 26261 } 26262 off2 := v_1.AuxInt 26263 sym2 := v_1.Aux 26264 base := v_1.Args[0] 26265 mem := v.Args[2] 26266 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 26267 break 26268 } 26269 v.reset(OpAMD64MULSDload) 26270 v.AuxInt = off1 + off2 26271 v.Aux = mergeSym(sym1, sym2) 26272 v.AddArg(val) 26273 v.AddArg(base) 26274 v.AddArg(mem) 26275 return true 26276 } 26277 // match: (MULSDload x [off] {sym} ptr (MOVQstore [off] {sym} ptr y _)) 26278 // cond: 26279 // result: (MULSD x (MOVQi2f y)) 26280 for { 26281 off := v.AuxInt 26282 sym := v.Aux 26283 _ = v.Args[2] 26284 x := v.Args[0] 26285 ptr := v.Args[1] 26286 v_2 := v.Args[2] 26287 if v_2.Op != OpAMD64MOVQstore { 26288 break 26289 } 26290 if v_2.AuxInt != off { 26291 break 26292 } 26293 if v_2.Aux != sym { 26294 break 26295 } 26296 _ = v_2.Args[2] 26297 if ptr != v_2.Args[0] { 26298 break 26299 } 26300 y := v_2.Args[1] 26301 v.reset(OpAMD64MULSD) 26302 v.AddArg(x) 26303 v0 := b.NewValue0(v_2.Pos, OpAMD64MOVQi2f, typ.Float64) 26304 v0.AddArg(y) 26305 v.AddArg(v0) 26306 return true 26307 } 26308 return false 26309 } 26310 func rewriteValueAMD64_OpAMD64MULSS_0(v *Value) bool { 26311 // match: (MULSS x l:(MOVSSload [off] {sym} ptr mem)) 26312 // cond: canMergeLoadClobber(v, l, x) && clobber(l) 26313 // result: (MULSSload x [off] {sym} ptr mem) 26314 for { 26315 _ = v.Args[1] 26316 x := v.Args[0] 26317 l := v.Args[1] 26318 if l.Op != OpAMD64MOVSSload { 26319 break 26320 } 26321 off := l.AuxInt 26322 sym := l.Aux 26323 _ = l.Args[1] 26324 ptr := l.Args[0] 26325 mem := l.Args[1] 26326 if !(canMergeLoadClobber(v, l, x) && clobber(l)) { 26327 break 26328 } 26329 v.reset(OpAMD64MULSSload) 26330 v.AuxInt = off 26331 v.Aux = sym 26332 v.AddArg(x) 26333 v.AddArg(ptr) 26334 v.AddArg(mem) 26335 return true 26336 } 26337 // match: (MULSS l:(MOVSSload [off] {sym} ptr mem) x) 26338 // cond: canMergeLoadClobber(v, l, x) && clobber(l) 26339 // result: (MULSSload x [off] {sym} ptr mem) 26340 for { 26341 _ = v.Args[1] 26342 l := v.Args[0] 26343 if l.Op != OpAMD64MOVSSload { 26344 break 26345 } 26346 off := l.AuxInt 26347 sym := l.Aux 26348 _ = l.Args[1] 26349 ptr := l.Args[0] 26350 mem := l.Args[1] 26351 x := v.Args[1] 26352 if !(canMergeLoadClobber(v, l, x) && clobber(l)) { 26353 break 26354 } 26355 v.reset(OpAMD64MULSSload) 26356 v.AuxInt = off 26357 v.Aux = sym 26358 v.AddArg(x) 26359 v.AddArg(ptr) 26360 v.AddArg(mem) 26361 return true 26362 } 26363 return false 26364 } 26365 func rewriteValueAMD64_OpAMD64MULSSload_0(v *Value) bool { 26366 b := v.Block 26367 _ = b 26368 typ := &b.Func.Config.Types 26369 _ = typ 26370 // match: (MULSSload [off1] {sym} val (ADDQconst [off2] base) mem) 26371 // cond: is32Bit(off1+off2) 26372 // result: (MULSSload [off1+off2] {sym} val base mem) 26373 for { 26374 off1 := v.AuxInt 26375 sym := v.Aux 26376 _ = v.Args[2] 26377 val := v.Args[0] 26378 v_1 := v.Args[1] 26379 if v_1.Op != OpAMD64ADDQconst { 26380 break 26381 } 26382 off2 := v_1.AuxInt 26383 base := v_1.Args[0] 26384 mem := v.Args[2] 26385 if !(is32Bit(off1 + off2)) { 26386 break 26387 } 26388 v.reset(OpAMD64MULSSload) 26389 v.AuxInt = off1 + off2 26390 v.Aux = sym 26391 v.AddArg(val) 26392 v.AddArg(base) 26393 v.AddArg(mem) 26394 return true 26395 } 26396 // match: (MULSSload [off1] {sym1} val (LEAQ [off2] {sym2} base) mem) 26397 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 26398 // result: (MULSSload [off1+off2] {mergeSym(sym1,sym2)} val base mem) 26399 for { 26400 off1 := v.AuxInt 26401 sym1 := v.Aux 26402 _ = v.Args[2] 26403 val := v.Args[0] 26404 v_1 := v.Args[1] 26405 if v_1.Op != OpAMD64LEAQ { 26406 break 26407 } 26408 off2 := v_1.AuxInt 26409 sym2 := v_1.Aux 26410 base := v_1.Args[0] 26411 mem := v.Args[2] 26412 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 26413 break 26414 } 26415 v.reset(OpAMD64MULSSload) 26416 v.AuxInt = off1 + off2 26417 v.Aux = mergeSym(sym1, sym2) 26418 v.AddArg(val) 26419 v.AddArg(base) 26420 v.AddArg(mem) 26421 return true 26422 } 26423 // match: (MULSSload x [off] {sym} ptr (MOVLstore [off] {sym} ptr y _)) 26424 // cond: 26425 // result: (MULSS x (MOVLi2f y)) 26426 for { 26427 off := v.AuxInt 26428 sym := v.Aux 26429 _ = v.Args[2] 26430 x := v.Args[0] 26431 ptr := v.Args[1] 26432 v_2 := v.Args[2] 26433 if v_2.Op != OpAMD64MOVLstore { 26434 break 26435 } 26436 if v_2.AuxInt != off { 26437 break 26438 } 26439 if v_2.Aux != sym { 26440 break 26441 } 26442 _ = v_2.Args[2] 26443 if ptr != v_2.Args[0] { 26444 break 26445 } 26446 y := v_2.Args[1] 26447 v.reset(OpAMD64MULSS) 26448 v.AddArg(x) 26449 v0 := b.NewValue0(v_2.Pos, OpAMD64MOVLi2f, typ.Float32) 26450 v0.AddArg(y) 26451 v.AddArg(v0) 26452 return true 26453 } 26454 return false 26455 } 26456 func rewriteValueAMD64_OpAMD64NEGL_0(v *Value) bool { 26457 // match: (NEGL (NEGL x)) 26458 // cond: 26459 // result: x 26460 for { 26461 v_0 := v.Args[0] 26462 if v_0.Op != OpAMD64NEGL { 26463 break 26464 } 26465 x := v_0.Args[0] 26466 v.reset(OpCopy) 26467 v.Type = x.Type 26468 v.AddArg(x) 26469 return true 26470 } 26471 // match: (NEGL (MOVLconst [c])) 26472 // cond: 26473 // result: (MOVLconst [int64(int32(-c))]) 26474 for { 26475 v_0 := v.Args[0] 26476 if v_0.Op != OpAMD64MOVLconst { 26477 break 26478 } 26479 c := v_0.AuxInt 26480 v.reset(OpAMD64MOVLconst) 26481 v.AuxInt = int64(int32(-c)) 26482 return true 26483 } 26484 return false 26485 } 26486 func rewriteValueAMD64_OpAMD64NEGQ_0(v *Value) bool { 26487 // match: (NEGQ (NEGQ x)) 26488 // cond: 26489 // result: x 26490 for { 26491 v_0 := v.Args[0] 26492 if v_0.Op != OpAMD64NEGQ { 26493 break 26494 } 26495 x := v_0.Args[0] 26496 v.reset(OpCopy) 26497 v.Type = x.Type 26498 v.AddArg(x) 26499 return true 26500 } 26501 // match: (NEGQ (MOVQconst [c])) 26502 // cond: 26503 // result: (MOVQconst [-c]) 26504 for { 26505 v_0 := v.Args[0] 26506 if v_0.Op != OpAMD64MOVQconst { 26507 break 26508 } 26509 c := v_0.AuxInt 26510 v.reset(OpAMD64MOVQconst) 26511 v.AuxInt = -c 26512 return true 26513 } 26514 // match: (NEGQ (ADDQconst [c] (NEGQ x))) 26515 // cond: c != -(1<<31) 26516 // result: (ADDQconst [-c] x) 26517 for { 26518 v_0 := v.Args[0] 26519 if v_0.Op != OpAMD64ADDQconst { 26520 break 26521 } 26522 c := v_0.AuxInt 26523 v_0_0 := v_0.Args[0] 26524 if v_0_0.Op != OpAMD64NEGQ { 26525 break 26526 } 26527 x := v_0_0.Args[0] 26528 if !(c != -(1 << 31)) { 26529 break 26530 } 26531 v.reset(OpAMD64ADDQconst) 26532 v.AuxInt = -c 26533 v.AddArg(x) 26534 return true 26535 } 26536 return false 26537 } 26538 func rewriteValueAMD64_OpAMD64NOTL_0(v *Value) bool { 26539 // match: (NOTL (MOVLconst [c])) 26540 // cond: 26541 // result: (MOVLconst [^c]) 26542 for { 26543 v_0 := v.Args[0] 26544 if v_0.Op != OpAMD64MOVLconst { 26545 break 26546 } 26547 c := v_0.AuxInt 26548 v.reset(OpAMD64MOVLconst) 26549 v.AuxInt = ^c 26550 return true 26551 } 26552 return false 26553 } 26554 func rewriteValueAMD64_OpAMD64NOTQ_0(v *Value) bool { 26555 // match: (NOTQ (MOVQconst [c])) 26556 // cond: 26557 // result: (MOVQconst [^c]) 26558 for { 26559 v_0 := v.Args[0] 26560 if v_0.Op != OpAMD64MOVQconst { 26561 break 26562 } 26563 c := v_0.AuxInt 26564 v.reset(OpAMD64MOVQconst) 26565 v.AuxInt = ^c 26566 return true 26567 } 26568 return false 26569 } 26570 func rewriteValueAMD64_OpAMD64ORL_0(v *Value) bool { 26571 b := v.Block 26572 _ = b 26573 config := b.Func.Config 26574 _ = config 26575 // match: (ORL (SHLL (MOVLconst [1]) y) x) 26576 // cond: !config.nacl 26577 // result: (BTSL x y) 26578 for { 26579 _ = v.Args[1] 26580 v_0 := v.Args[0] 26581 if v_0.Op != OpAMD64SHLL { 26582 break 26583 } 26584 _ = v_0.Args[1] 26585 v_0_0 := v_0.Args[0] 26586 if v_0_0.Op != OpAMD64MOVLconst { 26587 break 26588 } 26589 if v_0_0.AuxInt != 1 { 26590 break 26591 } 26592 y := v_0.Args[1] 26593 x := v.Args[1] 26594 if !(!config.nacl) { 26595 break 26596 } 26597 v.reset(OpAMD64BTSL) 26598 v.AddArg(x) 26599 v.AddArg(y) 26600 return true 26601 } 26602 // match: (ORL x (SHLL (MOVLconst [1]) y)) 26603 // cond: !config.nacl 26604 // result: (BTSL x y) 26605 for { 26606 _ = v.Args[1] 26607 x := v.Args[0] 26608 v_1 := v.Args[1] 26609 if v_1.Op != OpAMD64SHLL { 26610 break 26611 } 26612 _ = v_1.Args[1] 26613 v_1_0 := v_1.Args[0] 26614 if v_1_0.Op != OpAMD64MOVLconst { 26615 break 26616 } 26617 if v_1_0.AuxInt != 1 { 26618 break 26619 } 26620 y := v_1.Args[1] 26621 if !(!config.nacl) { 26622 break 26623 } 26624 v.reset(OpAMD64BTSL) 26625 v.AddArg(x) 26626 v.AddArg(y) 26627 return true 26628 } 26629 // match: (ORL (MOVLconst [c]) x) 26630 // cond: isUint32PowerOfTwo(c) && uint64(c) >= 128 && !config.nacl 26631 // result: (BTSLconst [log2uint32(c)] x) 26632 for { 26633 _ = v.Args[1] 26634 v_0 := v.Args[0] 26635 if v_0.Op != OpAMD64MOVLconst { 26636 break 26637 } 26638 c := v_0.AuxInt 26639 x := v.Args[1] 26640 if !(isUint32PowerOfTwo(c) && uint64(c) >= 128 && !config.nacl) { 26641 break 26642 } 26643 v.reset(OpAMD64BTSLconst) 26644 v.AuxInt = log2uint32(c) 26645 v.AddArg(x) 26646 return true 26647 } 26648 // match: (ORL x (MOVLconst [c])) 26649 // cond: isUint32PowerOfTwo(c) && uint64(c) >= 128 && !config.nacl 26650 // result: (BTSLconst [log2uint32(c)] x) 26651 for { 26652 _ = v.Args[1] 26653 x := v.Args[0] 26654 v_1 := v.Args[1] 26655 if v_1.Op != OpAMD64MOVLconst { 26656 break 26657 } 26658 c := v_1.AuxInt 26659 if !(isUint32PowerOfTwo(c) && uint64(c) >= 128 && !config.nacl) { 26660 break 26661 } 26662 v.reset(OpAMD64BTSLconst) 26663 v.AuxInt = log2uint32(c) 26664 v.AddArg(x) 26665 return true 26666 } 26667 // match: (ORL x (MOVLconst [c])) 26668 // cond: 26669 // result: (ORLconst [c] x) 26670 for { 26671 _ = v.Args[1] 26672 x := v.Args[0] 26673 v_1 := v.Args[1] 26674 if v_1.Op != OpAMD64MOVLconst { 26675 break 26676 } 26677 c := v_1.AuxInt 26678 v.reset(OpAMD64ORLconst) 26679 v.AuxInt = c 26680 v.AddArg(x) 26681 return true 26682 } 26683 // match: (ORL (MOVLconst [c]) x) 26684 // cond: 26685 // result: (ORLconst [c] x) 26686 for { 26687 _ = v.Args[1] 26688 v_0 := v.Args[0] 26689 if v_0.Op != OpAMD64MOVLconst { 26690 break 26691 } 26692 c := v_0.AuxInt 26693 x := v.Args[1] 26694 v.reset(OpAMD64ORLconst) 26695 v.AuxInt = c 26696 v.AddArg(x) 26697 return true 26698 } 26699 // match: (ORL (SHLLconst x [c]) (SHRLconst x [d])) 26700 // cond: d==32-c 26701 // result: (ROLLconst x [c]) 26702 for { 26703 _ = v.Args[1] 26704 v_0 := v.Args[0] 26705 if v_0.Op != OpAMD64SHLLconst { 26706 break 26707 } 26708 c := v_0.AuxInt 26709 x := v_0.Args[0] 26710 v_1 := v.Args[1] 26711 if v_1.Op != OpAMD64SHRLconst { 26712 break 26713 } 26714 d := v_1.AuxInt 26715 if x != v_1.Args[0] { 26716 break 26717 } 26718 if !(d == 32-c) { 26719 break 26720 } 26721 v.reset(OpAMD64ROLLconst) 26722 v.AuxInt = c 26723 v.AddArg(x) 26724 return true 26725 } 26726 // match: (ORL (SHRLconst x [d]) (SHLLconst x [c])) 26727 // cond: d==32-c 26728 // result: (ROLLconst x [c]) 26729 for { 26730 _ = v.Args[1] 26731 v_0 := v.Args[0] 26732 if v_0.Op != OpAMD64SHRLconst { 26733 break 26734 } 26735 d := v_0.AuxInt 26736 x := v_0.Args[0] 26737 v_1 := v.Args[1] 26738 if v_1.Op != OpAMD64SHLLconst { 26739 break 26740 } 26741 c := v_1.AuxInt 26742 if x != v_1.Args[0] { 26743 break 26744 } 26745 if !(d == 32-c) { 26746 break 26747 } 26748 v.reset(OpAMD64ROLLconst) 26749 v.AuxInt = c 26750 v.AddArg(x) 26751 return true 26752 } 26753 // match: (ORL <t> (SHLLconst x [c]) (SHRWconst x [d])) 26754 // cond: d==16-c && c < 16 && t.Size() == 2 26755 // result: (ROLWconst x [c]) 26756 for { 26757 t := v.Type 26758 _ = v.Args[1] 26759 v_0 := v.Args[0] 26760 if v_0.Op != OpAMD64SHLLconst { 26761 break 26762 } 26763 c := v_0.AuxInt 26764 x := v_0.Args[0] 26765 v_1 := v.Args[1] 26766 if v_1.Op != OpAMD64SHRWconst { 26767 break 26768 } 26769 d := v_1.AuxInt 26770 if x != v_1.Args[0] { 26771 break 26772 } 26773 if !(d == 16-c && c < 16 && t.Size() == 2) { 26774 break 26775 } 26776 v.reset(OpAMD64ROLWconst) 26777 v.AuxInt = c 26778 v.AddArg(x) 26779 return true 26780 } 26781 // match: (ORL <t> (SHRWconst x [d]) (SHLLconst x [c])) 26782 // cond: d==16-c && c < 16 && t.Size() == 2 26783 // result: (ROLWconst x [c]) 26784 for { 26785 t := v.Type 26786 _ = v.Args[1] 26787 v_0 := v.Args[0] 26788 if v_0.Op != OpAMD64SHRWconst { 26789 break 26790 } 26791 d := v_0.AuxInt 26792 x := v_0.Args[0] 26793 v_1 := v.Args[1] 26794 if v_1.Op != OpAMD64SHLLconst { 26795 break 26796 } 26797 c := v_1.AuxInt 26798 if x != v_1.Args[0] { 26799 break 26800 } 26801 if !(d == 16-c && c < 16 && t.Size() == 2) { 26802 break 26803 } 26804 v.reset(OpAMD64ROLWconst) 26805 v.AuxInt = c 26806 v.AddArg(x) 26807 return true 26808 } 26809 return false 26810 } 26811 func rewriteValueAMD64_OpAMD64ORL_10(v *Value) bool { 26812 // match: (ORL <t> (SHLLconst x [c]) (SHRBconst x [d])) 26813 // cond: d==8-c && c < 8 && t.Size() == 1 26814 // result: (ROLBconst x [c]) 26815 for { 26816 t := v.Type 26817 _ = v.Args[1] 26818 v_0 := v.Args[0] 26819 if v_0.Op != OpAMD64SHLLconst { 26820 break 26821 } 26822 c := v_0.AuxInt 26823 x := v_0.Args[0] 26824 v_1 := v.Args[1] 26825 if v_1.Op != OpAMD64SHRBconst { 26826 break 26827 } 26828 d := v_1.AuxInt 26829 if x != v_1.Args[0] { 26830 break 26831 } 26832 if !(d == 8-c && c < 8 && t.Size() == 1) { 26833 break 26834 } 26835 v.reset(OpAMD64ROLBconst) 26836 v.AuxInt = c 26837 v.AddArg(x) 26838 return true 26839 } 26840 // match: (ORL <t> (SHRBconst x [d]) (SHLLconst x [c])) 26841 // cond: d==8-c && c < 8 && t.Size() == 1 26842 // result: (ROLBconst x [c]) 26843 for { 26844 t := v.Type 26845 _ = v.Args[1] 26846 v_0 := v.Args[0] 26847 if v_0.Op != OpAMD64SHRBconst { 26848 break 26849 } 26850 d := v_0.AuxInt 26851 x := v_0.Args[0] 26852 v_1 := v.Args[1] 26853 if v_1.Op != OpAMD64SHLLconst { 26854 break 26855 } 26856 c := v_1.AuxInt 26857 if x != v_1.Args[0] { 26858 break 26859 } 26860 if !(d == 8-c && c < 8 && t.Size() == 1) { 26861 break 26862 } 26863 v.reset(OpAMD64ROLBconst) 26864 v.AuxInt = c 26865 v.AddArg(x) 26866 return true 26867 } 26868 // match: (ORL (SHLL x y) (ANDL (SHRL x (NEGQ y)) (SBBLcarrymask (CMPQconst (NEGQ (ADDQconst (ANDQconst y [31]) [-32])) [32])))) 26869 // cond: 26870 // result: (ROLL x y) 26871 for { 26872 _ = v.Args[1] 26873 v_0 := v.Args[0] 26874 if v_0.Op != OpAMD64SHLL { 26875 break 26876 } 26877 _ = v_0.Args[1] 26878 x := v_0.Args[0] 26879 y := v_0.Args[1] 26880 v_1 := v.Args[1] 26881 if v_1.Op != OpAMD64ANDL { 26882 break 26883 } 26884 _ = v_1.Args[1] 26885 v_1_0 := v_1.Args[0] 26886 if v_1_0.Op != OpAMD64SHRL { 26887 break 26888 } 26889 _ = v_1_0.Args[1] 26890 if x != v_1_0.Args[0] { 26891 break 26892 } 26893 v_1_0_1 := v_1_0.Args[1] 26894 if v_1_0_1.Op != OpAMD64NEGQ { 26895 break 26896 } 26897 if y != v_1_0_1.Args[0] { 26898 break 26899 } 26900 v_1_1 := v_1.Args[1] 26901 if v_1_1.Op != OpAMD64SBBLcarrymask { 26902 break 26903 } 26904 v_1_1_0 := v_1_1.Args[0] 26905 if v_1_1_0.Op != OpAMD64CMPQconst { 26906 break 26907 } 26908 if v_1_1_0.AuxInt != 32 { 26909 break 26910 } 26911 v_1_1_0_0 := v_1_1_0.Args[0] 26912 if v_1_1_0_0.Op != OpAMD64NEGQ { 26913 break 26914 } 26915 v_1_1_0_0_0 := v_1_1_0_0.Args[0] 26916 if v_1_1_0_0_0.Op != OpAMD64ADDQconst { 26917 break 26918 } 26919 if v_1_1_0_0_0.AuxInt != -32 { 26920 break 26921 } 26922 v_1_1_0_0_0_0 := v_1_1_0_0_0.Args[0] 26923 if v_1_1_0_0_0_0.Op != OpAMD64ANDQconst { 26924 break 26925 } 26926 if v_1_1_0_0_0_0.AuxInt != 31 { 26927 break 26928 } 26929 if y != v_1_1_0_0_0_0.Args[0] { 26930 break 26931 } 26932 v.reset(OpAMD64ROLL) 26933 v.AddArg(x) 26934 v.AddArg(y) 26935 return true 26936 } 26937 // match: (ORL (SHLL x y) (ANDL (SBBLcarrymask (CMPQconst (NEGQ (ADDQconst (ANDQconst y [31]) [-32])) [32])) (SHRL x (NEGQ y)))) 26938 // cond: 26939 // result: (ROLL x y) 26940 for { 26941 _ = v.Args[1] 26942 v_0 := v.Args[0] 26943 if v_0.Op != OpAMD64SHLL { 26944 break 26945 } 26946 _ = v_0.Args[1] 26947 x := v_0.Args[0] 26948 y := v_0.Args[1] 26949 v_1 := v.Args[1] 26950 if v_1.Op != OpAMD64ANDL { 26951 break 26952 } 26953 _ = v_1.Args[1] 26954 v_1_0 := v_1.Args[0] 26955 if v_1_0.Op != OpAMD64SBBLcarrymask { 26956 break 26957 } 26958 v_1_0_0 := v_1_0.Args[0] 26959 if v_1_0_0.Op != OpAMD64CMPQconst { 26960 break 26961 } 26962 if v_1_0_0.AuxInt != 32 { 26963 break 26964 } 26965 v_1_0_0_0 := v_1_0_0.Args[0] 26966 if v_1_0_0_0.Op != OpAMD64NEGQ { 26967 break 26968 } 26969 v_1_0_0_0_0 := v_1_0_0_0.Args[0] 26970 if v_1_0_0_0_0.Op != OpAMD64ADDQconst { 26971 break 26972 } 26973 if v_1_0_0_0_0.AuxInt != -32 { 26974 break 26975 } 26976 v_1_0_0_0_0_0 := v_1_0_0_0_0.Args[0] 26977 if v_1_0_0_0_0_0.Op != OpAMD64ANDQconst { 26978 break 26979 } 26980 if v_1_0_0_0_0_0.AuxInt != 31 { 26981 break 26982 } 26983 if y != v_1_0_0_0_0_0.Args[0] { 26984 break 26985 } 26986 v_1_1 := v_1.Args[1] 26987 if v_1_1.Op != OpAMD64SHRL { 26988 break 26989 } 26990 _ = v_1_1.Args[1] 26991 if x != v_1_1.Args[0] { 26992 break 26993 } 26994 v_1_1_1 := v_1_1.Args[1] 26995 if v_1_1_1.Op != OpAMD64NEGQ { 26996 break 26997 } 26998 if y != v_1_1_1.Args[0] { 26999 break 27000 } 27001 v.reset(OpAMD64ROLL) 27002 v.AddArg(x) 27003 v.AddArg(y) 27004 return true 27005 } 27006 // match: (ORL (ANDL (SHRL x (NEGQ y)) (SBBLcarrymask (CMPQconst (NEGQ (ADDQconst (ANDQconst y [31]) [-32])) [32]))) (SHLL x y)) 27007 // cond: 27008 // result: (ROLL x y) 27009 for { 27010 _ = v.Args[1] 27011 v_0 := v.Args[0] 27012 if v_0.Op != OpAMD64ANDL { 27013 break 27014 } 27015 _ = v_0.Args[1] 27016 v_0_0 := v_0.Args[0] 27017 if v_0_0.Op != OpAMD64SHRL { 27018 break 27019 } 27020 _ = v_0_0.Args[1] 27021 x := v_0_0.Args[0] 27022 v_0_0_1 := v_0_0.Args[1] 27023 if v_0_0_1.Op != OpAMD64NEGQ { 27024 break 27025 } 27026 y := v_0_0_1.Args[0] 27027 v_0_1 := v_0.Args[1] 27028 if v_0_1.Op != OpAMD64SBBLcarrymask { 27029 break 27030 } 27031 v_0_1_0 := v_0_1.Args[0] 27032 if v_0_1_0.Op != OpAMD64CMPQconst { 27033 break 27034 } 27035 if v_0_1_0.AuxInt != 32 { 27036 break 27037 } 27038 v_0_1_0_0 := v_0_1_0.Args[0] 27039 if v_0_1_0_0.Op != OpAMD64NEGQ { 27040 break 27041 } 27042 v_0_1_0_0_0 := v_0_1_0_0.Args[0] 27043 if v_0_1_0_0_0.Op != OpAMD64ADDQconst { 27044 break 27045 } 27046 if v_0_1_0_0_0.AuxInt != -32 { 27047 break 27048 } 27049 v_0_1_0_0_0_0 := v_0_1_0_0_0.Args[0] 27050 if v_0_1_0_0_0_0.Op != OpAMD64ANDQconst { 27051 break 27052 } 27053 if v_0_1_0_0_0_0.AuxInt != 31 { 27054 break 27055 } 27056 if y != v_0_1_0_0_0_0.Args[0] { 27057 break 27058 } 27059 v_1 := v.Args[1] 27060 if v_1.Op != OpAMD64SHLL { 27061 break 27062 } 27063 _ = v_1.Args[1] 27064 if x != v_1.Args[0] { 27065 break 27066 } 27067 if y != v_1.Args[1] { 27068 break 27069 } 27070 v.reset(OpAMD64ROLL) 27071 v.AddArg(x) 27072 v.AddArg(y) 27073 return true 27074 } 27075 // match: (ORL (ANDL (SBBLcarrymask (CMPQconst (NEGQ (ADDQconst (ANDQconst y [31]) [-32])) [32])) (SHRL x (NEGQ y))) (SHLL x y)) 27076 // cond: 27077 // result: (ROLL x y) 27078 for { 27079 _ = v.Args[1] 27080 v_0 := v.Args[0] 27081 if v_0.Op != OpAMD64ANDL { 27082 break 27083 } 27084 _ = v_0.Args[1] 27085 v_0_0 := v_0.Args[0] 27086 if v_0_0.Op != OpAMD64SBBLcarrymask { 27087 break 27088 } 27089 v_0_0_0 := v_0_0.Args[0] 27090 if v_0_0_0.Op != OpAMD64CMPQconst { 27091 break 27092 } 27093 if v_0_0_0.AuxInt != 32 { 27094 break 27095 } 27096 v_0_0_0_0 := v_0_0_0.Args[0] 27097 if v_0_0_0_0.Op != OpAMD64NEGQ { 27098 break 27099 } 27100 v_0_0_0_0_0 := v_0_0_0_0.Args[0] 27101 if v_0_0_0_0_0.Op != OpAMD64ADDQconst { 27102 break 27103 } 27104 if v_0_0_0_0_0.AuxInt != -32 { 27105 break 27106 } 27107 v_0_0_0_0_0_0 := v_0_0_0_0_0.Args[0] 27108 if v_0_0_0_0_0_0.Op != OpAMD64ANDQconst { 27109 break 27110 } 27111 if v_0_0_0_0_0_0.AuxInt != 31 { 27112 break 27113 } 27114 y := v_0_0_0_0_0_0.Args[0] 27115 v_0_1 := v_0.Args[1] 27116 if v_0_1.Op != OpAMD64SHRL { 27117 break 27118 } 27119 _ = v_0_1.Args[1] 27120 x := v_0_1.Args[0] 27121 v_0_1_1 := v_0_1.Args[1] 27122 if v_0_1_1.Op != OpAMD64NEGQ { 27123 break 27124 } 27125 if y != v_0_1_1.Args[0] { 27126 break 27127 } 27128 v_1 := v.Args[1] 27129 if v_1.Op != OpAMD64SHLL { 27130 break 27131 } 27132 _ = v_1.Args[1] 27133 if x != v_1.Args[0] { 27134 break 27135 } 27136 if y != v_1.Args[1] { 27137 break 27138 } 27139 v.reset(OpAMD64ROLL) 27140 v.AddArg(x) 27141 v.AddArg(y) 27142 return true 27143 } 27144 // match: (ORL (SHLL x y) (ANDL (SHRL x (NEGL y)) (SBBLcarrymask (CMPLconst (NEGL (ADDLconst (ANDLconst y [31]) [-32])) [32])))) 27145 // cond: 27146 // result: (ROLL x y) 27147 for { 27148 _ = v.Args[1] 27149 v_0 := v.Args[0] 27150 if v_0.Op != OpAMD64SHLL { 27151 break 27152 } 27153 _ = v_0.Args[1] 27154 x := v_0.Args[0] 27155 y := v_0.Args[1] 27156 v_1 := v.Args[1] 27157 if v_1.Op != OpAMD64ANDL { 27158 break 27159 } 27160 _ = v_1.Args[1] 27161 v_1_0 := v_1.Args[0] 27162 if v_1_0.Op != OpAMD64SHRL { 27163 break 27164 } 27165 _ = v_1_0.Args[1] 27166 if x != v_1_0.Args[0] { 27167 break 27168 } 27169 v_1_0_1 := v_1_0.Args[1] 27170 if v_1_0_1.Op != OpAMD64NEGL { 27171 break 27172 } 27173 if y != v_1_0_1.Args[0] { 27174 break 27175 } 27176 v_1_1 := v_1.Args[1] 27177 if v_1_1.Op != OpAMD64SBBLcarrymask { 27178 break 27179 } 27180 v_1_1_0 := v_1_1.Args[0] 27181 if v_1_1_0.Op != OpAMD64CMPLconst { 27182 break 27183 } 27184 if v_1_1_0.AuxInt != 32 { 27185 break 27186 } 27187 v_1_1_0_0 := v_1_1_0.Args[0] 27188 if v_1_1_0_0.Op != OpAMD64NEGL { 27189 break 27190 } 27191 v_1_1_0_0_0 := v_1_1_0_0.Args[0] 27192 if v_1_1_0_0_0.Op != OpAMD64ADDLconst { 27193 break 27194 } 27195 if v_1_1_0_0_0.AuxInt != -32 { 27196 break 27197 } 27198 v_1_1_0_0_0_0 := v_1_1_0_0_0.Args[0] 27199 if v_1_1_0_0_0_0.Op != OpAMD64ANDLconst { 27200 break 27201 } 27202 if v_1_1_0_0_0_0.AuxInt != 31 { 27203 break 27204 } 27205 if y != v_1_1_0_0_0_0.Args[0] { 27206 break 27207 } 27208 v.reset(OpAMD64ROLL) 27209 v.AddArg(x) 27210 v.AddArg(y) 27211 return true 27212 } 27213 // match: (ORL (SHLL x y) (ANDL (SBBLcarrymask (CMPLconst (NEGL (ADDLconst (ANDLconst y [31]) [-32])) [32])) (SHRL x (NEGL y)))) 27214 // cond: 27215 // result: (ROLL x y) 27216 for { 27217 _ = v.Args[1] 27218 v_0 := v.Args[0] 27219 if v_0.Op != OpAMD64SHLL { 27220 break 27221 } 27222 _ = v_0.Args[1] 27223 x := v_0.Args[0] 27224 y := v_0.Args[1] 27225 v_1 := v.Args[1] 27226 if v_1.Op != OpAMD64ANDL { 27227 break 27228 } 27229 _ = v_1.Args[1] 27230 v_1_0 := v_1.Args[0] 27231 if v_1_0.Op != OpAMD64SBBLcarrymask { 27232 break 27233 } 27234 v_1_0_0 := v_1_0.Args[0] 27235 if v_1_0_0.Op != OpAMD64CMPLconst { 27236 break 27237 } 27238 if v_1_0_0.AuxInt != 32 { 27239 break 27240 } 27241 v_1_0_0_0 := v_1_0_0.Args[0] 27242 if v_1_0_0_0.Op != OpAMD64NEGL { 27243 break 27244 } 27245 v_1_0_0_0_0 := v_1_0_0_0.Args[0] 27246 if v_1_0_0_0_0.Op != OpAMD64ADDLconst { 27247 break 27248 } 27249 if v_1_0_0_0_0.AuxInt != -32 { 27250 break 27251 } 27252 v_1_0_0_0_0_0 := v_1_0_0_0_0.Args[0] 27253 if v_1_0_0_0_0_0.Op != OpAMD64ANDLconst { 27254 break 27255 } 27256 if v_1_0_0_0_0_0.AuxInt != 31 { 27257 break 27258 } 27259 if y != v_1_0_0_0_0_0.Args[0] { 27260 break 27261 } 27262 v_1_1 := v_1.Args[1] 27263 if v_1_1.Op != OpAMD64SHRL { 27264 break 27265 } 27266 _ = v_1_1.Args[1] 27267 if x != v_1_1.Args[0] { 27268 break 27269 } 27270 v_1_1_1 := v_1_1.Args[1] 27271 if v_1_1_1.Op != OpAMD64NEGL { 27272 break 27273 } 27274 if y != v_1_1_1.Args[0] { 27275 break 27276 } 27277 v.reset(OpAMD64ROLL) 27278 v.AddArg(x) 27279 v.AddArg(y) 27280 return true 27281 } 27282 // match: (ORL (ANDL (SHRL x (NEGL y)) (SBBLcarrymask (CMPLconst (NEGL (ADDLconst (ANDLconst y [31]) [-32])) [32]))) (SHLL x y)) 27283 // cond: 27284 // result: (ROLL x y) 27285 for { 27286 _ = v.Args[1] 27287 v_0 := v.Args[0] 27288 if v_0.Op != OpAMD64ANDL { 27289 break 27290 } 27291 _ = v_0.Args[1] 27292 v_0_0 := v_0.Args[0] 27293 if v_0_0.Op != OpAMD64SHRL { 27294 break 27295 } 27296 _ = v_0_0.Args[1] 27297 x := v_0_0.Args[0] 27298 v_0_0_1 := v_0_0.Args[1] 27299 if v_0_0_1.Op != OpAMD64NEGL { 27300 break 27301 } 27302 y := v_0_0_1.Args[0] 27303 v_0_1 := v_0.Args[1] 27304 if v_0_1.Op != OpAMD64SBBLcarrymask { 27305 break 27306 } 27307 v_0_1_0 := v_0_1.Args[0] 27308 if v_0_1_0.Op != OpAMD64CMPLconst { 27309 break 27310 } 27311 if v_0_1_0.AuxInt != 32 { 27312 break 27313 } 27314 v_0_1_0_0 := v_0_1_0.Args[0] 27315 if v_0_1_0_0.Op != OpAMD64NEGL { 27316 break 27317 } 27318 v_0_1_0_0_0 := v_0_1_0_0.Args[0] 27319 if v_0_1_0_0_0.Op != OpAMD64ADDLconst { 27320 break 27321 } 27322 if v_0_1_0_0_0.AuxInt != -32 { 27323 break 27324 } 27325 v_0_1_0_0_0_0 := v_0_1_0_0_0.Args[0] 27326 if v_0_1_0_0_0_0.Op != OpAMD64ANDLconst { 27327 break 27328 } 27329 if v_0_1_0_0_0_0.AuxInt != 31 { 27330 break 27331 } 27332 if y != v_0_1_0_0_0_0.Args[0] { 27333 break 27334 } 27335 v_1 := v.Args[1] 27336 if v_1.Op != OpAMD64SHLL { 27337 break 27338 } 27339 _ = v_1.Args[1] 27340 if x != v_1.Args[0] { 27341 break 27342 } 27343 if y != v_1.Args[1] { 27344 break 27345 } 27346 v.reset(OpAMD64ROLL) 27347 v.AddArg(x) 27348 v.AddArg(y) 27349 return true 27350 } 27351 // match: (ORL (ANDL (SBBLcarrymask (CMPLconst (NEGL (ADDLconst (ANDLconst y [31]) [-32])) [32])) (SHRL x (NEGL y))) (SHLL x y)) 27352 // cond: 27353 // result: (ROLL x y) 27354 for { 27355 _ = v.Args[1] 27356 v_0 := v.Args[0] 27357 if v_0.Op != OpAMD64ANDL { 27358 break 27359 } 27360 _ = v_0.Args[1] 27361 v_0_0 := v_0.Args[0] 27362 if v_0_0.Op != OpAMD64SBBLcarrymask { 27363 break 27364 } 27365 v_0_0_0 := v_0_0.Args[0] 27366 if v_0_0_0.Op != OpAMD64CMPLconst { 27367 break 27368 } 27369 if v_0_0_0.AuxInt != 32 { 27370 break 27371 } 27372 v_0_0_0_0 := v_0_0_0.Args[0] 27373 if v_0_0_0_0.Op != OpAMD64NEGL { 27374 break 27375 } 27376 v_0_0_0_0_0 := v_0_0_0_0.Args[0] 27377 if v_0_0_0_0_0.Op != OpAMD64ADDLconst { 27378 break 27379 } 27380 if v_0_0_0_0_0.AuxInt != -32 { 27381 break 27382 } 27383 v_0_0_0_0_0_0 := v_0_0_0_0_0.Args[0] 27384 if v_0_0_0_0_0_0.Op != OpAMD64ANDLconst { 27385 break 27386 } 27387 if v_0_0_0_0_0_0.AuxInt != 31 { 27388 break 27389 } 27390 y := v_0_0_0_0_0_0.Args[0] 27391 v_0_1 := v_0.Args[1] 27392 if v_0_1.Op != OpAMD64SHRL { 27393 break 27394 } 27395 _ = v_0_1.Args[1] 27396 x := v_0_1.Args[0] 27397 v_0_1_1 := v_0_1.Args[1] 27398 if v_0_1_1.Op != OpAMD64NEGL { 27399 break 27400 } 27401 if y != v_0_1_1.Args[0] { 27402 break 27403 } 27404 v_1 := v.Args[1] 27405 if v_1.Op != OpAMD64SHLL { 27406 break 27407 } 27408 _ = v_1.Args[1] 27409 if x != v_1.Args[0] { 27410 break 27411 } 27412 if y != v_1.Args[1] { 27413 break 27414 } 27415 v.reset(OpAMD64ROLL) 27416 v.AddArg(x) 27417 v.AddArg(y) 27418 return true 27419 } 27420 return false 27421 } 27422 func rewriteValueAMD64_OpAMD64ORL_20(v *Value) bool { 27423 // match: (ORL (SHRL x y) (ANDL (SHLL x (NEGQ y)) (SBBLcarrymask (CMPQconst (NEGQ (ADDQconst (ANDQconst y [31]) [-32])) [32])))) 27424 // cond: 27425 // result: (RORL x y) 27426 for { 27427 _ = v.Args[1] 27428 v_0 := v.Args[0] 27429 if v_0.Op != OpAMD64SHRL { 27430 break 27431 } 27432 _ = v_0.Args[1] 27433 x := v_0.Args[0] 27434 y := v_0.Args[1] 27435 v_1 := v.Args[1] 27436 if v_1.Op != OpAMD64ANDL { 27437 break 27438 } 27439 _ = v_1.Args[1] 27440 v_1_0 := v_1.Args[0] 27441 if v_1_0.Op != OpAMD64SHLL { 27442 break 27443 } 27444 _ = v_1_0.Args[1] 27445 if x != v_1_0.Args[0] { 27446 break 27447 } 27448 v_1_0_1 := v_1_0.Args[1] 27449 if v_1_0_1.Op != OpAMD64NEGQ { 27450 break 27451 } 27452 if y != v_1_0_1.Args[0] { 27453 break 27454 } 27455 v_1_1 := v_1.Args[1] 27456 if v_1_1.Op != OpAMD64SBBLcarrymask { 27457 break 27458 } 27459 v_1_1_0 := v_1_1.Args[0] 27460 if v_1_1_0.Op != OpAMD64CMPQconst { 27461 break 27462 } 27463 if v_1_1_0.AuxInt != 32 { 27464 break 27465 } 27466 v_1_1_0_0 := v_1_1_0.Args[0] 27467 if v_1_1_0_0.Op != OpAMD64NEGQ { 27468 break 27469 } 27470 v_1_1_0_0_0 := v_1_1_0_0.Args[0] 27471 if v_1_1_0_0_0.Op != OpAMD64ADDQconst { 27472 break 27473 } 27474 if v_1_1_0_0_0.AuxInt != -32 { 27475 break 27476 } 27477 v_1_1_0_0_0_0 := v_1_1_0_0_0.Args[0] 27478 if v_1_1_0_0_0_0.Op != OpAMD64ANDQconst { 27479 break 27480 } 27481 if v_1_1_0_0_0_0.AuxInt != 31 { 27482 break 27483 } 27484 if y != v_1_1_0_0_0_0.Args[0] { 27485 break 27486 } 27487 v.reset(OpAMD64RORL) 27488 v.AddArg(x) 27489 v.AddArg(y) 27490 return true 27491 } 27492 // match: (ORL (SHRL x y) (ANDL (SBBLcarrymask (CMPQconst (NEGQ (ADDQconst (ANDQconst y [31]) [-32])) [32])) (SHLL x (NEGQ y)))) 27493 // cond: 27494 // result: (RORL x y) 27495 for { 27496 _ = v.Args[1] 27497 v_0 := v.Args[0] 27498 if v_0.Op != OpAMD64SHRL { 27499 break 27500 } 27501 _ = v_0.Args[1] 27502 x := v_0.Args[0] 27503 y := v_0.Args[1] 27504 v_1 := v.Args[1] 27505 if v_1.Op != OpAMD64ANDL { 27506 break 27507 } 27508 _ = v_1.Args[1] 27509 v_1_0 := v_1.Args[0] 27510 if v_1_0.Op != OpAMD64SBBLcarrymask { 27511 break 27512 } 27513 v_1_0_0 := v_1_0.Args[0] 27514 if v_1_0_0.Op != OpAMD64CMPQconst { 27515 break 27516 } 27517 if v_1_0_0.AuxInt != 32 { 27518 break 27519 } 27520 v_1_0_0_0 := v_1_0_0.Args[0] 27521 if v_1_0_0_0.Op != OpAMD64NEGQ { 27522 break 27523 } 27524 v_1_0_0_0_0 := v_1_0_0_0.Args[0] 27525 if v_1_0_0_0_0.Op != OpAMD64ADDQconst { 27526 break 27527 } 27528 if v_1_0_0_0_0.AuxInt != -32 { 27529 break 27530 } 27531 v_1_0_0_0_0_0 := v_1_0_0_0_0.Args[0] 27532 if v_1_0_0_0_0_0.Op != OpAMD64ANDQconst { 27533 break 27534 } 27535 if v_1_0_0_0_0_0.AuxInt != 31 { 27536 break 27537 } 27538 if y != v_1_0_0_0_0_0.Args[0] { 27539 break 27540 } 27541 v_1_1 := v_1.Args[1] 27542 if v_1_1.Op != OpAMD64SHLL { 27543 break 27544 } 27545 _ = v_1_1.Args[1] 27546 if x != v_1_1.Args[0] { 27547 break 27548 } 27549 v_1_1_1 := v_1_1.Args[1] 27550 if v_1_1_1.Op != OpAMD64NEGQ { 27551 break 27552 } 27553 if y != v_1_1_1.Args[0] { 27554 break 27555 } 27556 v.reset(OpAMD64RORL) 27557 v.AddArg(x) 27558 v.AddArg(y) 27559 return true 27560 } 27561 // match: (ORL (ANDL (SHLL x (NEGQ y)) (SBBLcarrymask (CMPQconst (NEGQ (ADDQconst (ANDQconst y [31]) [-32])) [32]))) (SHRL x y)) 27562 // cond: 27563 // result: (RORL x y) 27564 for { 27565 _ = v.Args[1] 27566 v_0 := v.Args[0] 27567 if v_0.Op != OpAMD64ANDL { 27568 break 27569 } 27570 _ = v_0.Args[1] 27571 v_0_0 := v_0.Args[0] 27572 if v_0_0.Op != OpAMD64SHLL { 27573 break 27574 } 27575 _ = v_0_0.Args[1] 27576 x := v_0_0.Args[0] 27577 v_0_0_1 := v_0_0.Args[1] 27578 if v_0_0_1.Op != OpAMD64NEGQ { 27579 break 27580 } 27581 y := v_0_0_1.Args[0] 27582 v_0_1 := v_0.Args[1] 27583 if v_0_1.Op != OpAMD64SBBLcarrymask { 27584 break 27585 } 27586 v_0_1_0 := v_0_1.Args[0] 27587 if v_0_1_0.Op != OpAMD64CMPQconst { 27588 break 27589 } 27590 if v_0_1_0.AuxInt != 32 { 27591 break 27592 } 27593 v_0_1_0_0 := v_0_1_0.Args[0] 27594 if v_0_1_0_0.Op != OpAMD64NEGQ { 27595 break 27596 } 27597 v_0_1_0_0_0 := v_0_1_0_0.Args[0] 27598 if v_0_1_0_0_0.Op != OpAMD64ADDQconst { 27599 break 27600 } 27601 if v_0_1_0_0_0.AuxInt != -32 { 27602 break 27603 } 27604 v_0_1_0_0_0_0 := v_0_1_0_0_0.Args[0] 27605 if v_0_1_0_0_0_0.Op != OpAMD64ANDQconst { 27606 break 27607 } 27608 if v_0_1_0_0_0_0.AuxInt != 31 { 27609 break 27610 } 27611 if y != v_0_1_0_0_0_0.Args[0] { 27612 break 27613 } 27614 v_1 := v.Args[1] 27615 if v_1.Op != OpAMD64SHRL { 27616 break 27617 } 27618 _ = v_1.Args[1] 27619 if x != v_1.Args[0] { 27620 break 27621 } 27622 if y != v_1.Args[1] { 27623 break 27624 } 27625 v.reset(OpAMD64RORL) 27626 v.AddArg(x) 27627 v.AddArg(y) 27628 return true 27629 } 27630 // match: (ORL (ANDL (SBBLcarrymask (CMPQconst (NEGQ (ADDQconst (ANDQconst y [31]) [-32])) [32])) (SHLL x (NEGQ y))) (SHRL x y)) 27631 // cond: 27632 // result: (RORL x y) 27633 for { 27634 _ = v.Args[1] 27635 v_0 := v.Args[0] 27636 if v_0.Op != OpAMD64ANDL { 27637 break 27638 } 27639 _ = v_0.Args[1] 27640 v_0_0 := v_0.Args[0] 27641 if v_0_0.Op != OpAMD64SBBLcarrymask { 27642 break 27643 } 27644 v_0_0_0 := v_0_0.Args[0] 27645 if v_0_0_0.Op != OpAMD64CMPQconst { 27646 break 27647 } 27648 if v_0_0_0.AuxInt != 32 { 27649 break 27650 } 27651 v_0_0_0_0 := v_0_0_0.Args[0] 27652 if v_0_0_0_0.Op != OpAMD64NEGQ { 27653 break 27654 } 27655 v_0_0_0_0_0 := v_0_0_0_0.Args[0] 27656 if v_0_0_0_0_0.Op != OpAMD64ADDQconst { 27657 break 27658 } 27659 if v_0_0_0_0_0.AuxInt != -32 { 27660 break 27661 } 27662 v_0_0_0_0_0_0 := v_0_0_0_0_0.Args[0] 27663 if v_0_0_0_0_0_0.Op != OpAMD64ANDQconst { 27664 break 27665 } 27666 if v_0_0_0_0_0_0.AuxInt != 31 { 27667 break 27668 } 27669 y := v_0_0_0_0_0_0.Args[0] 27670 v_0_1 := v_0.Args[1] 27671 if v_0_1.Op != OpAMD64SHLL { 27672 break 27673 } 27674 _ = v_0_1.Args[1] 27675 x := v_0_1.Args[0] 27676 v_0_1_1 := v_0_1.Args[1] 27677 if v_0_1_1.Op != OpAMD64NEGQ { 27678 break 27679 } 27680 if y != v_0_1_1.Args[0] { 27681 break 27682 } 27683 v_1 := v.Args[1] 27684 if v_1.Op != OpAMD64SHRL { 27685 break 27686 } 27687 _ = v_1.Args[1] 27688 if x != v_1.Args[0] { 27689 break 27690 } 27691 if y != v_1.Args[1] { 27692 break 27693 } 27694 v.reset(OpAMD64RORL) 27695 v.AddArg(x) 27696 v.AddArg(y) 27697 return true 27698 } 27699 // match: (ORL (SHRL x y) (ANDL (SHLL x (NEGL y)) (SBBLcarrymask (CMPLconst (NEGL (ADDLconst (ANDLconst y [31]) [-32])) [32])))) 27700 // cond: 27701 // result: (RORL x y) 27702 for { 27703 _ = v.Args[1] 27704 v_0 := v.Args[0] 27705 if v_0.Op != OpAMD64SHRL { 27706 break 27707 } 27708 _ = v_0.Args[1] 27709 x := v_0.Args[0] 27710 y := v_0.Args[1] 27711 v_1 := v.Args[1] 27712 if v_1.Op != OpAMD64ANDL { 27713 break 27714 } 27715 _ = v_1.Args[1] 27716 v_1_0 := v_1.Args[0] 27717 if v_1_0.Op != OpAMD64SHLL { 27718 break 27719 } 27720 _ = v_1_0.Args[1] 27721 if x != v_1_0.Args[0] { 27722 break 27723 } 27724 v_1_0_1 := v_1_0.Args[1] 27725 if v_1_0_1.Op != OpAMD64NEGL { 27726 break 27727 } 27728 if y != v_1_0_1.Args[0] { 27729 break 27730 } 27731 v_1_1 := v_1.Args[1] 27732 if v_1_1.Op != OpAMD64SBBLcarrymask { 27733 break 27734 } 27735 v_1_1_0 := v_1_1.Args[0] 27736 if v_1_1_0.Op != OpAMD64CMPLconst { 27737 break 27738 } 27739 if v_1_1_0.AuxInt != 32 { 27740 break 27741 } 27742 v_1_1_0_0 := v_1_1_0.Args[0] 27743 if v_1_1_0_0.Op != OpAMD64NEGL { 27744 break 27745 } 27746 v_1_1_0_0_0 := v_1_1_0_0.Args[0] 27747 if v_1_1_0_0_0.Op != OpAMD64ADDLconst { 27748 break 27749 } 27750 if v_1_1_0_0_0.AuxInt != -32 { 27751 break 27752 } 27753 v_1_1_0_0_0_0 := v_1_1_0_0_0.Args[0] 27754 if v_1_1_0_0_0_0.Op != OpAMD64ANDLconst { 27755 break 27756 } 27757 if v_1_1_0_0_0_0.AuxInt != 31 { 27758 break 27759 } 27760 if y != v_1_1_0_0_0_0.Args[0] { 27761 break 27762 } 27763 v.reset(OpAMD64RORL) 27764 v.AddArg(x) 27765 v.AddArg(y) 27766 return true 27767 } 27768 // match: (ORL (SHRL x y) (ANDL (SBBLcarrymask (CMPLconst (NEGL (ADDLconst (ANDLconst y [31]) [-32])) [32])) (SHLL x (NEGL y)))) 27769 // cond: 27770 // result: (RORL x y) 27771 for { 27772 _ = v.Args[1] 27773 v_0 := v.Args[0] 27774 if v_0.Op != OpAMD64SHRL { 27775 break 27776 } 27777 _ = v_0.Args[1] 27778 x := v_0.Args[0] 27779 y := v_0.Args[1] 27780 v_1 := v.Args[1] 27781 if v_1.Op != OpAMD64ANDL { 27782 break 27783 } 27784 _ = v_1.Args[1] 27785 v_1_0 := v_1.Args[0] 27786 if v_1_0.Op != OpAMD64SBBLcarrymask { 27787 break 27788 } 27789 v_1_0_0 := v_1_0.Args[0] 27790 if v_1_0_0.Op != OpAMD64CMPLconst { 27791 break 27792 } 27793 if v_1_0_0.AuxInt != 32 { 27794 break 27795 } 27796 v_1_0_0_0 := v_1_0_0.Args[0] 27797 if v_1_0_0_0.Op != OpAMD64NEGL { 27798 break 27799 } 27800 v_1_0_0_0_0 := v_1_0_0_0.Args[0] 27801 if v_1_0_0_0_0.Op != OpAMD64ADDLconst { 27802 break 27803 } 27804 if v_1_0_0_0_0.AuxInt != -32 { 27805 break 27806 } 27807 v_1_0_0_0_0_0 := v_1_0_0_0_0.Args[0] 27808 if v_1_0_0_0_0_0.Op != OpAMD64ANDLconst { 27809 break 27810 } 27811 if v_1_0_0_0_0_0.AuxInt != 31 { 27812 break 27813 } 27814 if y != v_1_0_0_0_0_0.Args[0] { 27815 break 27816 } 27817 v_1_1 := v_1.Args[1] 27818 if v_1_1.Op != OpAMD64SHLL { 27819 break 27820 } 27821 _ = v_1_1.Args[1] 27822 if x != v_1_1.Args[0] { 27823 break 27824 } 27825 v_1_1_1 := v_1_1.Args[1] 27826 if v_1_1_1.Op != OpAMD64NEGL { 27827 break 27828 } 27829 if y != v_1_1_1.Args[0] { 27830 break 27831 } 27832 v.reset(OpAMD64RORL) 27833 v.AddArg(x) 27834 v.AddArg(y) 27835 return true 27836 } 27837 // match: (ORL (ANDL (SHLL x (NEGL y)) (SBBLcarrymask (CMPLconst (NEGL (ADDLconst (ANDLconst y [31]) [-32])) [32]))) (SHRL x y)) 27838 // cond: 27839 // result: (RORL x y) 27840 for { 27841 _ = v.Args[1] 27842 v_0 := v.Args[0] 27843 if v_0.Op != OpAMD64ANDL { 27844 break 27845 } 27846 _ = v_0.Args[1] 27847 v_0_0 := v_0.Args[0] 27848 if v_0_0.Op != OpAMD64SHLL { 27849 break 27850 } 27851 _ = v_0_0.Args[1] 27852 x := v_0_0.Args[0] 27853 v_0_0_1 := v_0_0.Args[1] 27854 if v_0_0_1.Op != OpAMD64NEGL { 27855 break 27856 } 27857 y := v_0_0_1.Args[0] 27858 v_0_1 := v_0.Args[1] 27859 if v_0_1.Op != OpAMD64SBBLcarrymask { 27860 break 27861 } 27862 v_0_1_0 := v_0_1.Args[0] 27863 if v_0_1_0.Op != OpAMD64CMPLconst { 27864 break 27865 } 27866 if v_0_1_0.AuxInt != 32 { 27867 break 27868 } 27869 v_0_1_0_0 := v_0_1_0.Args[0] 27870 if v_0_1_0_0.Op != OpAMD64NEGL { 27871 break 27872 } 27873 v_0_1_0_0_0 := v_0_1_0_0.Args[0] 27874 if v_0_1_0_0_0.Op != OpAMD64ADDLconst { 27875 break 27876 } 27877 if v_0_1_0_0_0.AuxInt != -32 { 27878 break 27879 } 27880 v_0_1_0_0_0_0 := v_0_1_0_0_0.Args[0] 27881 if v_0_1_0_0_0_0.Op != OpAMD64ANDLconst { 27882 break 27883 } 27884 if v_0_1_0_0_0_0.AuxInt != 31 { 27885 break 27886 } 27887 if y != v_0_1_0_0_0_0.Args[0] { 27888 break 27889 } 27890 v_1 := v.Args[1] 27891 if v_1.Op != OpAMD64SHRL { 27892 break 27893 } 27894 _ = v_1.Args[1] 27895 if x != v_1.Args[0] { 27896 break 27897 } 27898 if y != v_1.Args[1] { 27899 break 27900 } 27901 v.reset(OpAMD64RORL) 27902 v.AddArg(x) 27903 v.AddArg(y) 27904 return true 27905 } 27906 // match: (ORL (ANDL (SBBLcarrymask (CMPLconst (NEGL (ADDLconst (ANDLconst y [31]) [-32])) [32])) (SHLL x (NEGL y))) (SHRL x y)) 27907 // cond: 27908 // result: (RORL x y) 27909 for { 27910 _ = v.Args[1] 27911 v_0 := v.Args[0] 27912 if v_0.Op != OpAMD64ANDL { 27913 break 27914 } 27915 _ = v_0.Args[1] 27916 v_0_0 := v_0.Args[0] 27917 if v_0_0.Op != OpAMD64SBBLcarrymask { 27918 break 27919 } 27920 v_0_0_0 := v_0_0.Args[0] 27921 if v_0_0_0.Op != OpAMD64CMPLconst { 27922 break 27923 } 27924 if v_0_0_0.AuxInt != 32 { 27925 break 27926 } 27927 v_0_0_0_0 := v_0_0_0.Args[0] 27928 if v_0_0_0_0.Op != OpAMD64NEGL { 27929 break 27930 } 27931 v_0_0_0_0_0 := v_0_0_0_0.Args[0] 27932 if v_0_0_0_0_0.Op != OpAMD64ADDLconst { 27933 break 27934 } 27935 if v_0_0_0_0_0.AuxInt != -32 { 27936 break 27937 } 27938 v_0_0_0_0_0_0 := v_0_0_0_0_0.Args[0] 27939 if v_0_0_0_0_0_0.Op != OpAMD64ANDLconst { 27940 break 27941 } 27942 if v_0_0_0_0_0_0.AuxInt != 31 { 27943 break 27944 } 27945 y := v_0_0_0_0_0_0.Args[0] 27946 v_0_1 := v_0.Args[1] 27947 if v_0_1.Op != OpAMD64SHLL { 27948 break 27949 } 27950 _ = v_0_1.Args[1] 27951 x := v_0_1.Args[0] 27952 v_0_1_1 := v_0_1.Args[1] 27953 if v_0_1_1.Op != OpAMD64NEGL { 27954 break 27955 } 27956 if y != v_0_1_1.Args[0] { 27957 break 27958 } 27959 v_1 := v.Args[1] 27960 if v_1.Op != OpAMD64SHRL { 27961 break 27962 } 27963 _ = v_1.Args[1] 27964 if x != v_1.Args[0] { 27965 break 27966 } 27967 if y != v_1.Args[1] { 27968 break 27969 } 27970 v.reset(OpAMD64RORL) 27971 v.AddArg(x) 27972 v.AddArg(y) 27973 return true 27974 } 27975 // match: (ORL (SHLL x (ANDQconst y [15])) (ANDL (SHRW x (NEGQ (ADDQconst (ANDQconst y [15]) [-16]))) (SBBLcarrymask (CMPQconst (NEGQ (ADDQconst (ANDQconst y [15]) [-16])) [16])))) 27976 // cond: v.Type.Size() == 2 27977 // result: (ROLW x y) 27978 for { 27979 _ = v.Args[1] 27980 v_0 := v.Args[0] 27981 if v_0.Op != OpAMD64SHLL { 27982 break 27983 } 27984 _ = v_0.Args[1] 27985 x := v_0.Args[0] 27986 v_0_1 := v_0.Args[1] 27987 if v_0_1.Op != OpAMD64ANDQconst { 27988 break 27989 } 27990 if v_0_1.AuxInt != 15 { 27991 break 27992 } 27993 y := v_0_1.Args[0] 27994 v_1 := v.Args[1] 27995 if v_1.Op != OpAMD64ANDL { 27996 break 27997 } 27998 _ = v_1.Args[1] 27999 v_1_0 := v_1.Args[0] 28000 if v_1_0.Op != OpAMD64SHRW { 28001 break 28002 } 28003 _ = v_1_0.Args[1] 28004 if x != v_1_0.Args[0] { 28005 break 28006 } 28007 v_1_0_1 := v_1_0.Args[1] 28008 if v_1_0_1.Op != OpAMD64NEGQ { 28009 break 28010 } 28011 v_1_0_1_0 := v_1_0_1.Args[0] 28012 if v_1_0_1_0.Op != OpAMD64ADDQconst { 28013 break 28014 } 28015 if v_1_0_1_0.AuxInt != -16 { 28016 break 28017 } 28018 v_1_0_1_0_0 := v_1_0_1_0.Args[0] 28019 if v_1_0_1_0_0.Op != OpAMD64ANDQconst { 28020 break 28021 } 28022 if v_1_0_1_0_0.AuxInt != 15 { 28023 break 28024 } 28025 if y != v_1_0_1_0_0.Args[0] { 28026 break 28027 } 28028 v_1_1 := v_1.Args[1] 28029 if v_1_1.Op != OpAMD64SBBLcarrymask { 28030 break 28031 } 28032 v_1_1_0 := v_1_1.Args[0] 28033 if v_1_1_0.Op != OpAMD64CMPQconst { 28034 break 28035 } 28036 if v_1_1_0.AuxInt != 16 { 28037 break 28038 } 28039 v_1_1_0_0 := v_1_1_0.Args[0] 28040 if v_1_1_0_0.Op != OpAMD64NEGQ { 28041 break 28042 } 28043 v_1_1_0_0_0 := v_1_1_0_0.Args[0] 28044 if v_1_1_0_0_0.Op != OpAMD64ADDQconst { 28045 break 28046 } 28047 if v_1_1_0_0_0.AuxInt != -16 { 28048 break 28049 } 28050 v_1_1_0_0_0_0 := v_1_1_0_0_0.Args[0] 28051 if v_1_1_0_0_0_0.Op != OpAMD64ANDQconst { 28052 break 28053 } 28054 if v_1_1_0_0_0_0.AuxInt != 15 { 28055 break 28056 } 28057 if y != v_1_1_0_0_0_0.Args[0] { 28058 break 28059 } 28060 if !(v.Type.Size() == 2) { 28061 break 28062 } 28063 v.reset(OpAMD64ROLW) 28064 v.AddArg(x) 28065 v.AddArg(y) 28066 return true 28067 } 28068 // match: (ORL (SHLL x (ANDQconst y [15])) (ANDL (SBBLcarrymask (CMPQconst (NEGQ (ADDQconst (ANDQconst y [15]) [-16])) [16])) (SHRW x (NEGQ (ADDQconst (ANDQconst y [15]) [-16]))))) 28069 // cond: v.Type.Size() == 2 28070 // result: (ROLW x y) 28071 for { 28072 _ = v.Args[1] 28073 v_0 := v.Args[0] 28074 if v_0.Op != OpAMD64SHLL { 28075 break 28076 } 28077 _ = v_0.Args[1] 28078 x := v_0.Args[0] 28079 v_0_1 := v_0.Args[1] 28080 if v_0_1.Op != OpAMD64ANDQconst { 28081 break 28082 } 28083 if v_0_1.AuxInt != 15 { 28084 break 28085 } 28086 y := v_0_1.Args[0] 28087 v_1 := v.Args[1] 28088 if v_1.Op != OpAMD64ANDL { 28089 break 28090 } 28091 _ = v_1.Args[1] 28092 v_1_0 := v_1.Args[0] 28093 if v_1_0.Op != OpAMD64SBBLcarrymask { 28094 break 28095 } 28096 v_1_0_0 := v_1_0.Args[0] 28097 if v_1_0_0.Op != OpAMD64CMPQconst { 28098 break 28099 } 28100 if v_1_0_0.AuxInt != 16 { 28101 break 28102 } 28103 v_1_0_0_0 := v_1_0_0.Args[0] 28104 if v_1_0_0_0.Op != OpAMD64NEGQ { 28105 break 28106 } 28107 v_1_0_0_0_0 := v_1_0_0_0.Args[0] 28108 if v_1_0_0_0_0.Op != OpAMD64ADDQconst { 28109 break 28110 } 28111 if v_1_0_0_0_0.AuxInt != -16 { 28112 break 28113 } 28114 v_1_0_0_0_0_0 := v_1_0_0_0_0.Args[0] 28115 if v_1_0_0_0_0_0.Op != OpAMD64ANDQconst { 28116 break 28117 } 28118 if v_1_0_0_0_0_0.AuxInt != 15 { 28119 break 28120 } 28121 if y != v_1_0_0_0_0_0.Args[0] { 28122 break 28123 } 28124 v_1_1 := v_1.Args[1] 28125 if v_1_1.Op != OpAMD64SHRW { 28126 break 28127 } 28128 _ = v_1_1.Args[1] 28129 if x != v_1_1.Args[0] { 28130 break 28131 } 28132 v_1_1_1 := v_1_1.Args[1] 28133 if v_1_1_1.Op != OpAMD64NEGQ { 28134 break 28135 } 28136 v_1_1_1_0 := v_1_1_1.Args[0] 28137 if v_1_1_1_0.Op != OpAMD64ADDQconst { 28138 break 28139 } 28140 if v_1_1_1_0.AuxInt != -16 { 28141 break 28142 } 28143 v_1_1_1_0_0 := v_1_1_1_0.Args[0] 28144 if v_1_1_1_0_0.Op != OpAMD64ANDQconst { 28145 break 28146 } 28147 if v_1_1_1_0_0.AuxInt != 15 { 28148 break 28149 } 28150 if y != v_1_1_1_0_0.Args[0] { 28151 break 28152 } 28153 if !(v.Type.Size() == 2) { 28154 break 28155 } 28156 v.reset(OpAMD64ROLW) 28157 v.AddArg(x) 28158 v.AddArg(y) 28159 return true 28160 } 28161 return false 28162 } 28163 func rewriteValueAMD64_OpAMD64ORL_30(v *Value) bool { 28164 // match: (ORL (ANDL (SHRW x (NEGQ (ADDQconst (ANDQconst y [15]) [-16]))) (SBBLcarrymask (CMPQconst (NEGQ (ADDQconst (ANDQconst y [15]) [-16])) [16]))) (SHLL x (ANDQconst y [15]))) 28165 // cond: v.Type.Size() == 2 28166 // result: (ROLW x y) 28167 for { 28168 _ = v.Args[1] 28169 v_0 := v.Args[0] 28170 if v_0.Op != OpAMD64ANDL { 28171 break 28172 } 28173 _ = v_0.Args[1] 28174 v_0_0 := v_0.Args[0] 28175 if v_0_0.Op != OpAMD64SHRW { 28176 break 28177 } 28178 _ = v_0_0.Args[1] 28179 x := v_0_0.Args[0] 28180 v_0_0_1 := v_0_0.Args[1] 28181 if v_0_0_1.Op != OpAMD64NEGQ { 28182 break 28183 } 28184 v_0_0_1_0 := v_0_0_1.Args[0] 28185 if v_0_0_1_0.Op != OpAMD64ADDQconst { 28186 break 28187 } 28188 if v_0_0_1_0.AuxInt != -16 { 28189 break 28190 } 28191 v_0_0_1_0_0 := v_0_0_1_0.Args[0] 28192 if v_0_0_1_0_0.Op != OpAMD64ANDQconst { 28193 break 28194 } 28195 if v_0_0_1_0_0.AuxInt != 15 { 28196 break 28197 } 28198 y := v_0_0_1_0_0.Args[0] 28199 v_0_1 := v_0.Args[1] 28200 if v_0_1.Op != OpAMD64SBBLcarrymask { 28201 break 28202 } 28203 v_0_1_0 := v_0_1.Args[0] 28204 if v_0_1_0.Op != OpAMD64CMPQconst { 28205 break 28206 } 28207 if v_0_1_0.AuxInt != 16 { 28208 break 28209 } 28210 v_0_1_0_0 := v_0_1_0.Args[0] 28211 if v_0_1_0_0.Op != OpAMD64NEGQ { 28212 break 28213 } 28214 v_0_1_0_0_0 := v_0_1_0_0.Args[0] 28215 if v_0_1_0_0_0.Op != OpAMD64ADDQconst { 28216 break 28217 } 28218 if v_0_1_0_0_0.AuxInt != -16 { 28219 break 28220 } 28221 v_0_1_0_0_0_0 := v_0_1_0_0_0.Args[0] 28222 if v_0_1_0_0_0_0.Op != OpAMD64ANDQconst { 28223 break 28224 } 28225 if v_0_1_0_0_0_0.AuxInt != 15 { 28226 break 28227 } 28228 if y != v_0_1_0_0_0_0.Args[0] { 28229 break 28230 } 28231 v_1 := v.Args[1] 28232 if v_1.Op != OpAMD64SHLL { 28233 break 28234 } 28235 _ = v_1.Args[1] 28236 if x != v_1.Args[0] { 28237 break 28238 } 28239 v_1_1 := v_1.Args[1] 28240 if v_1_1.Op != OpAMD64ANDQconst { 28241 break 28242 } 28243 if v_1_1.AuxInt != 15 { 28244 break 28245 } 28246 if y != v_1_1.Args[0] { 28247 break 28248 } 28249 if !(v.Type.Size() == 2) { 28250 break 28251 } 28252 v.reset(OpAMD64ROLW) 28253 v.AddArg(x) 28254 v.AddArg(y) 28255 return true 28256 } 28257 // match: (ORL (ANDL (SBBLcarrymask (CMPQconst (NEGQ (ADDQconst (ANDQconst y [15]) [-16])) [16])) (SHRW x (NEGQ (ADDQconst (ANDQconst y [15]) [-16])))) (SHLL x (ANDQconst y [15]))) 28258 // cond: v.Type.Size() == 2 28259 // result: (ROLW x y) 28260 for { 28261 _ = v.Args[1] 28262 v_0 := v.Args[0] 28263 if v_0.Op != OpAMD64ANDL { 28264 break 28265 } 28266 _ = v_0.Args[1] 28267 v_0_0 := v_0.Args[0] 28268 if v_0_0.Op != OpAMD64SBBLcarrymask { 28269 break 28270 } 28271 v_0_0_0 := v_0_0.Args[0] 28272 if v_0_0_0.Op != OpAMD64CMPQconst { 28273 break 28274 } 28275 if v_0_0_0.AuxInt != 16 { 28276 break 28277 } 28278 v_0_0_0_0 := v_0_0_0.Args[0] 28279 if v_0_0_0_0.Op != OpAMD64NEGQ { 28280 break 28281 } 28282 v_0_0_0_0_0 := v_0_0_0_0.Args[0] 28283 if v_0_0_0_0_0.Op != OpAMD64ADDQconst { 28284 break 28285 } 28286 if v_0_0_0_0_0.AuxInt != -16 { 28287 break 28288 } 28289 v_0_0_0_0_0_0 := v_0_0_0_0_0.Args[0] 28290 if v_0_0_0_0_0_0.Op != OpAMD64ANDQconst { 28291 break 28292 } 28293 if v_0_0_0_0_0_0.AuxInt != 15 { 28294 break 28295 } 28296 y := v_0_0_0_0_0_0.Args[0] 28297 v_0_1 := v_0.Args[1] 28298 if v_0_1.Op != OpAMD64SHRW { 28299 break 28300 } 28301 _ = v_0_1.Args[1] 28302 x := v_0_1.Args[0] 28303 v_0_1_1 := v_0_1.Args[1] 28304 if v_0_1_1.Op != OpAMD64NEGQ { 28305 break 28306 } 28307 v_0_1_1_0 := v_0_1_1.Args[0] 28308 if v_0_1_1_0.Op != OpAMD64ADDQconst { 28309 break 28310 } 28311 if v_0_1_1_0.AuxInt != -16 { 28312 break 28313 } 28314 v_0_1_1_0_0 := v_0_1_1_0.Args[0] 28315 if v_0_1_1_0_0.Op != OpAMD64ANDQconst { 28316 break 28317 } 28318 if v_0_1_1_0_0.AuxInt != 15 { 28319 break 28320 } 28321 if y != v_0_1_1_0_0.Args[0] { 28322 break 28323 } 28324 v_1 := v.Args[1] 28325 if v_1.Op != OpAMD64SHLL { 28326 break 28327 } 28328 _ = v_1.Args[1] 28329 if x != v_1.Args[0] { 28330 break 28331 } 28332 v_1_1 := v_1.Args[1] 28333 if v_1_1.Op != OpAMD64ANDQconst { 28334 break 28335 } 28336 if v_1_1.AuxInt != 15 { 28337 break 28338 } 28339 if y != v_1_1.Args[0] { 28340 break 28341 } 28342 if !(v.Type.Size() == 2) { 28343 break 28344 } 28345 v.reset(OpAMD64ROLW) 28346 v.AddArg(x) 28347 v.AddArg(y) 28348 return true 28349 } 28350 // match: (ORL (SHLL x (ANDLconst y [15])) (ANDL (SHRW x (NEGL (ADDLconst (ANDLconst y [15]) [-16]))) (SBBLcarrymask (CMPLconst (NEGL (ADDLconst (ANDLconst y [15]) [-16])) [16])))) 28351 // cond: v.Type.Size() == 2 28352 // result: (ROLW x y) 28353 for { 28354 _ = v.Args[1] 28355 v_0 := v.Args[0] 28356 if v_0.Op != OpAMD64SHLL { 28357 break 28358 } 28359 _ = v_0.Args[1] 28360 x := v_0.Args[0] 28361 v_0_1 := v_0.Args[1] 28362 if v_0_1.Op != OpAMD64ANDLconst { 28363 break 28364 } 28365 if v_0_1.AuxInt != 15 { 28366 break 28367 } 28368 y := v_0_1.Args[0] 28369 v_1 := v.Args[1] 28370 if v_1.Op != OpAMD64ANDL { 28371 break 28372 } 28373 _ = v_1.Args[1] 28374 v_1_0 := v_1.Args[0] 28375 if v_1_0.Op != OpAMD64SHRW { 28376 break 28377 } 28378 _ = v_1_0.Args[1] 28379 if x != v_1_0.Args[0] { 28380 break 28381 } 28382 v_1_0_1 := v_1_0.Args[1] 28383 if v_1_0_1.Op != OpAMD64NEGL { 28384 break 28385 } 28386 v_1_0_1_0 := v_1_0_1.Args[0] 28387 if v_1_0_1_0.Op != OpAMD64ADDLconst { 28388 break 28389 } 28390 if v_1_0_1_0.AuxInt != -16 { 28391 break 28392 } 28393 v_1_0_1_0_0 := v_1_0_1_0.Args[0] 28394 if v_1_0_1_0_0.Op != OpAMD64ANDLconst { 28395 break 28396 } 28397 if v_1_0_1_0_0.AuxInt != 15 { 28398 break 28399 } 28400 if y != v_1_0_1_0_0.Args[0] { 28401 break 28402 } 28403 v_1_1 := v_1.Args[1] 28404 if v_1_1.Op != OpAMD64SBBLcarrymask { 28405 break 28406 } 28407 v_1_1_0 := v_1_1.Args[0] 28408 if v_1_1_0.Op != OpAMD64CMPLconst { 28409 break 28410 } 28411 if v_1_1_0.AuxInt != 16 { 28412 break 28413 } 28414 v_1_1_0_0 := v_1_1_0.Args[0] 28415 if v_1_1_0_0.Op != OpAMD64NEGL { 28416 break 28417 } 28418 v_1_1_0_0_0 := v_1_1_0_0.Args[0] 28419 if v_1_1_0_0_0.Op != OpAMD64ADDLconst { 28420 break 28421 } 28422 if v_1_1_0_0_0.AuxInt != -16 { 28423 break 28424 } 28425 v_1_1_0_0_0_0 := v_1_1_0_0_0.Args[0] 28426 if v_1_1_0_0_0_0.Op != OpAMD64ANDLconst { 28427 break 28428 } 28429 if v_1_1_0_0_0_0.AuxInt != 15 { 28430 break 28431 } 28432 if y != v_1_1_0_0_0_0.Args[0] { 28433 break 28434 } 28435 if !(v.Type.Size() == 2) { 28436 break 28437 } 28438 v.reset(OpAMD64ROLW) 28439 v.AddArg(x) 28440 v.AddArg(y) 28441 return true 28442 } 28443 // match: (ORL (SHLL x (ANDLconst y [15])) (ANDL (SBBLcarrymask (CMPLconst (NEGL (ADDLconst (ANDLconst y [15]) [-16])) [16])) (SHRW x (NEGL (ADDLconst (ANDLconst y [15]) [-16]))))) 28444 // cond: v.Type.Size() == 2 28445 // result: (ROLW x y) 28446 for { 28447 _ = v.Args[1] 28448 v_0 := v.Args[0] 28449 if v_0.Op != OpAMD64SHLL { 28450 break 28451 } 28452 _ = v_0.Args[1] 28453 x := v_0.Args[0] 28454 v_0_1 := v_0.Args[1] 28455 if v_0_1.Op != OpAMD64ANDLconst { 28456 break 28457 } 28458 if v_0_1.AuxInt != 15 { 28459 break 28460 } 28461 y := v_0_1.Args[0] 28462 v_1 := v.Args[1] 28463 if v_1.Op != OpAMD64ANDL { 28464 break 28465 } 28466 _ = v_1.Args[1] 28467 v_1_0 := v_1.Args[0] 28468 if v_1_0.Op != OpAMD64SBBLcarrymask { 28469 break 28470 } 28471 v_1_0_0 := v_1_0.Args[0] 28472 if v_1_0_0.Op != OpAMD64CMPLconst { 28473 break 28474 } 28475 if v_1_0_0.AuxInt != 16 { 28476 break 28477 } 28478 v_1_0_0_0 := v_1_0_0.Args[0] 28479 if v_1_0_0_0.Op != OpAMD64NEGL { 28480 break 28481 } 28482 v_1_0_0_0_0 := v_1_0_0_0.Args[0] 28483 if v_1_0_0_0_0.Op != OpAMD64ADDLconst { 28484 break 28485 } 28486 if v_1_0_0_0_0.AuxInt != -16 { 28487 break 28488 } 28489 v_1_0_0_0_0_0 := v_1_0_0_0_0.Args[0] 28490 if v_1_0_0_0_0_0.Op != OpAMD64ANDLconst { 28491 break 28492 } 28493 if v_1_0_0_0_0_0.AuxInt != 15 { 28494 break 28495 } 28496 if y != v_1_0_0_0_0_0.Args[0] { 28497 break 28498 } 28499 v_1_1 := v_1.Args[1] 28500 if v_1_1.Op != OpAMD64SHRW { 28501 break 28502 } 28503 _ = v_1_1.Args[1] 28504 if x != v_1_1.Args[0] { 28505 break 28506 } 28507 v_1_1_1 := v_1_1.Args[1] 28508 if v_1_1_1.Op != OpAMD64NEGL { 28509 break 28510 } 28511 v_1_1_1_0 := v_1_1_1.Args[0] 28512 if v_1_1_1_0.Op != OpAMD64ADDLconst { 28513 break 28514 } 28515 if v_1_1_1_0.AuxInt != -16 { 28516 break 28517 } 28518 v_1_1_1_0_0 := v_1_1_1_0.Args[0] 28519 if v_1_1_1_0_0.Op != OpAMD64ANDLconst { 28520 break 28521 } 28522 if v_1_1_1_0_0.AuxInt != 15 { 28523 break 28524 } 28525 if y != v_1_1_1_0_0.Args[0] { 28526 break 28527 } 28528 if !(v.Type.Size() == 2) { 28529 break 28530 } 28531 v.reset(OpAMD64ROLW) 28532 v.AddArg(x) 28533 v.AddArg(y) 28534 return true 28535 } 28536 // match: (ORL (ANDL (SHRW x (NEGL (ADDLconst (ANDLconst y [15]) [-16]))) (SBBLcarrymask (CMPLconst (NEGL (ADDLconst (ANDLconst y [15]) [-16])) [16]))) (SHLL x (ANDLconst y [15]))) 28537 // cond: v.Type.Size() == 2 28538 // result: (ROLW x y) 28539 for { 28540 _ = v.Args[1] 28541 v_0 := v.Args[0] 28542 if v_0.Op != OpAMD64ANDL { 28543 break 28544 } 28545 _ = v_0.Args[1] 28546 v_0_0 := v_0.Args[0] 28547 if v_0_0.Op != OpAMD64SHRW { 28548 break 28549 } 28550 _ = v_0_0.Args[1] 28551 x := v_0_0.Args[0] 28552 v_0_0_1 := v_0_0.Args[1] 28553 if v_0_0_1.Op != OpAMD64NEGL { 28554 break 28555 } 28556 v_0_0_1_0 := v_0_0_1.Args[0] 28557 if v_0_0_1_0.Op != OpAMD64ADDLconst { 28558 break 28559 } 28560 if v_0_0_1_0.AuxInt != -16 { 28561 break 28562 } 28563 v_0_0_1_0_0 := v_0_0_1_0.Args[0] 28564 if v_0_0_1_0_0.Op != OpAMD64ANDLconst { 28565 break 28566 } 28567 if v_0_0_1_0_0.AuxInt != 15 { 28568 break 28569 } 28570 y := v_0_0_1_0_0.Args[0] 28571 v_0_1 := v_0.Args[1] 28572 if v_0_1.Op != OpAMD64SBBLcarrymask { 28573 break 28574 } 28575 v_0_1_0 := v_0_1.Args[0] 28576 if v_0_1_0.Op != OpAMD64CMPLconst { 28577 break 28578 } 28579 if v_0_1_0.AuxInt != 16 { 28580 break 28581 } 28582 v_0_1_0_0 := v_0_1_0.Args[0] 28583 if v_0_1_0_0.Op != OpAMD64NEGL { 28584 break 28585 } 28586 v_0_1_0_0_0 := v_0_1_0_0.Args[0] 28587 if v_0_1_0_0_0.Op != OpAMD64ADDLconst { 28588 break 28589 } 28590 if v_0_1_0_0_0.AuxInt != -16 { 28591 break 28592 } 28593 v_0_1_0_0_0_0 := v_0_1_0_0_0.Args[0] 28594 if v_0_1_0_0_0_0.Op != OpAMD64ANDLconst { 28595 break 28596 } 28597 if v_0_1_0_0_0_0.AuxInt != 15 { 28598 break 28599 } 28600 if y != v_0_1_0_0_0_0.Args[0] { 28601 break 28602 } 28603 v_1 := v.Args[1] 28604 if v_1.Op != OpAMD64SHLL { 28605 break 28606 } 28607 _ = v_1.Args[1] 28608 if x != v_1.Args[0] { 28609 break 28610 } 28611 v_1_1 := v_1.Args[1] 28612 if v_1_1.Op != OpAMD64ANDLconst { 28613 break 28614 } 28615 if v_1_1.AuxInt != 15 { 28616 break 28617 } 28618 if y != v_1_1.Args[0] { 28619 break 28620 } 28621 if !(v.Type.Size() == 2) { 28622 break 28623 } 28624 v.reset(OpAMD64ROLW) 28625 v.AddArg(x) 28626 v.AddArg(y) 28627 return true 28628 } 28629 // match: (ORL (ANDL (SBBLcarrymask (CMPLconst (NEGL (ADDLconst (ANDLconst y [15]) [-16])) [16])) (SHRW x (NEGL (ADDLconst (ANDLconst y [15]) [-16])))) (SHLL x (ANDLconst y [15]))) 28630 // cond: v.Type.Size() == 2 28631 // result: (ROLW x y) 28632 for { 28633 _ = v.Args[1] 28634 v_0 := v.Args[0] 28635 if v_0.Op != OpAMD64ANDL { 28636 break 28637 } 28638 _ = v_0.Args[1] 28639 v_0_0 := v_0.Args[0] 28640 if v_0_0.Op != OpAMD64SBBLcarrymask { 28641 break 28642 } 28643 v_0_0_0 := v_0_0.Args[0] 28644 if v_0_0_0.Op != OpAMD64CMPLconst { 28645 break 28646 } 28647 if v_0_0_0.AuxInt != 16 { 28648 break 28649 } 28650 v_0_0_0_0 := v_0_0_0.Args[0] 28651 if v_0_0_0_0.Op != OpAMD64NEGL { 28652 break 28653 } 28654 v_0_0_0_0_0 := v_0_0_0_0.Args[0] 28655 if v_0_0_0_0_0.Op != OpAMD64ADDLconst { 28656 break 28657 } 28658 if v_0_0_0_0_0.AuxInt != -16 { 28659 break 28660 } 28661 v_0_0_0_0_0_0 := v_0_0_0_0_0.Args[0] 28662 if v_0_0_0_0_0_0.Op != OpAMD64ANDLconst { 28663 break 28664 } 28665 if v_0_0_0_0_0_0.AuxInt != 15 { 28666 break 28667 } 28668 y := v_0_0_0_0_0_0.Args[0] 28669 v_0_1 := v_0.Args[1] 28670 if v_0_1.Op != OpAMD64SHRW { 28671 break 28672 } 28673 _ = v_0_1.Args[1] 28674 x := v_0_1.Args[0] 28675 v_0_1_1 := v_0_1.Args[1] 28676 if v_0_1_1.Op != OpAMD64NEGL { 28677 break 28678 } 28679 v_0_1_1_0 := v_0_1_1.Args[0] 28680 if v_0_1_1_0.Op != OpAMD64ADDLconst { 28681 break 28682 } 28683 if v_0_1_1_0.AuxInt != -16 { 28684 break 28685 } 28686 v_0_1_1_0_0 := v_0_1_1_0.Args[0] 28687 if v_0_1_1_0_0.Op != OpAMD64ANDLconst { 28688 break 28689 } 28690 if v_0_1_1_0_0.AuxInt != 15 { 28691 break 28692 } 28693 if y != v_0_1_1_0_0.Args[0] { 28694 break 28695 } 28696 v_1 := v.Args[1] 28697 if v_1.Op != OpAMD64SHLL { 28698 break 28699 } 28700 _ = v_1.Args[1] 28701 if x != v_1.Args[0] { 28702 break 28703 } 28704 v_1_1 := v_1.Args[1] 28705 if v_1_1.Op != OpAMD64ANDLconst { 28706 break 28707 } 28708 if v_1_1.AuxInt != 15 { 28709 break 28710 } 28711 if y != v_1_1.Args[0] { 28712 break 28713 } 28714 if !(v.Type.Size() == 2) { 28715 break 28716 } 28717 v.reset(OpAMD64ROLW) 28718 v.AddArg(x) 28719 v.AddArg(y) 28720 return true 28721 } 28722 // match: (ORL (SHRW x (ANDQconst y [15])) (SHLL x (NEGQ (ADDQconst (ANDQconst y [15]) [-16])))) 28723 // cond: v.Type.Size() == 2 28724 // result: (RORW x y) 28725 for { 28726 _ = v.Args[1] 28727 v_0 := v.Args[0] 28728 if v_0.Op != OpAMD64SHRW { 28729 break 28730 } 28731 _ = v_0.Args[1] 28732 x := v_0.Args[0] 28733 v_0_1 := v_0.Args[1] 28734 if v_0_1.Op != OpAMD64ANDQconst { 28735 break 28736 } 28737 if v_0_1.AuxInt != 15 { 28738 break 28739 } 28740 y := v_0_1.Args[0] 28741 v_1 := v.Args[1] 28742 if v_1.Op != OpAMD64SHLL { 28743 break 28744 } 28745 _ = v_1.Args[1] 28746 if x != v_1.Args[0] { 28747 break 28748 } 28749 v_1_1 := v_1.Args[1] 28750 if v_1_1.Op != OpAMD64NEGQ { 28751 break 28752 } 28753 v_1_1_0 := v_1_1.Args[0] 28754 if v_1_1_0.Op != OpAMD64ADDQconst { 28755 break 28756 } 28757 if v_1_1_0.AuxInt != -16 { 28758 break 28759 } 28760 v_1_1_0_0 := v_1_1_0.Args[0] 28761 if v_1_1_0_0.Op != OpAMD64ANDQconst { 28762 break 28763 } 28764 if v_1_1_0_0.AuxInt != 15 { 28765 break 28766 } 28767 if y != v_1_1_0_0.Args[0] { 28768 break 28769 } 28770 if !(v.Type.Size() == 2) { 28771 break 28772 } 28773 v.reset(OpAMD64RORW) 28774 v.AddArg(x) 28775 v.AddArg(y) 28776 return true 28777 } 28778 // match: (ORL (SHLL x (NEGQ (ADDQconst (ANDQconst y [15]) [-16]))) (SHRW x (ANDQconst y [15]))) 28779 // cond: v.Type.Size() == 2 28780 // result: (RORW x y) 28781 for { 28782 _ = v.Args[1] 28783 v_0 := v.Args[0] 28784 if v_0.Op != OpAMD64SHLL { 28785 break 28786 } 28787 _ = v_0.Args[1] 28788 x := v_0.Args[0] 28789 v_0_1 := v_0.Args[1] 28790 if v_0_1.Op != OpAMD64NEGQ { 28791 break 28792 } 28793 v_0_1_0 := v_0_1.Args[0] 28794 if v_0_1_0.Op != OpAMD64ADDQconst { 28795 break 28796 } 28797 if v_0_1_0.AuxInt != -16 { 28798 break 28799 } 28800 v_0_1_0_0 := v_0_1_0.Args[0] 28801 if v_0_1_0_0.Op != OpAMD64ANDQconst { 28802 break 28803 } 28804 if v_0_1_0_0.AuxInt != 15 { 28805 break 28806 } 28807 y := v_0_1_0_0.Args[0] 28808 v_1 := v.Args[1] 28809 if v_1.Op != OpAMD64SHRW { 28810 break 28811 } 28812 _ = v_1.Args[1] 28813 if x != v_1.Args[0] { 28814 break 28815 } 28816 v_1_1 := v_1.Args[1] 28817 if v_1_1.Op != OpAMD64ANDQconst { 28818 break 28819 } 28820 if v_1_1.AuxInt != 15 { 28821 break 28822 } 28823 if y != v_1_1.Args[0] { 28824 break 28825 } 28826 if !(v.Type.Size() == 2) { 28827 break 28828 } 28829 v.reset(OpAMD64RORW) 28830 v.AddArg(x) 28831 v.AddArg(y) 28832 return true 28833 } 28834 // match: (ORL (SHRW x (ANDLconst y [15])) (SHLL x (NEGL (ADDLconst (ANDLconst y [15]) [-16])))) 28835 // cond: v.Type.Size() == 2 28836 // result: (RORW x y) 28837 for { 28838 _ = v.Args[1] 28839 v_0 := v.Args[0] 28840 if v_0.Op != OpAMD64SHRW { 28841 break 28842 } 28843 _ = v_0.Args[1] 28844 x := v_0.Args[0] 28845 v_0_1 := v_0.Args[1] 28846 if v_0_1.Op != OpAMD64ANDLconst { 28847 break 28848 } 28849 if v_0_1.AuxInt != 15 { 28850 break 28851 } 28852 y := v_0_1.Args[0] 28853 v_1 := v.Args[1] 28854 if v_1.Op != OpAMD64SHLL { 28855 break 28856 } 28857 _ = v_1.Args[1] 28858 if x != v_1.Args[0] { 28859 break 28860 } 28861 v_1_1 := v_1.Args[1] 28862 if v_1_1.Op != OpAMD64NEGL { 28863 break 28864 } 28865 v_1_1_0 := v_1_1.Args[0] 28866 if v_1_1_0.Op != OpAMD64ADDLconst { 28867 break 28868 } 28869 if v_1_1_0.AuxInt != -16 { 28870 break 28871 } 28872 v_1_1_0_0 := v_1_1_0.Args[0] 28873 if v_1_1_0_0.Op != OpAMD64ANDLconst { 28874 break 28875 } 28876 if v_1_1_0_0.AuxInt != 15 { 28877 break 28878 } 28879 if y != v_1_1_0_0.Args[0] { 28880 break 28881 } 28882 if !(v.Type.Size() == 2) { 28883 break 28884 } 28885 v.reset(OpAMD64RORW) 28886 v.AddArg(x) 28887 v.AddArg(y) 28888 return true 28889 } 28890 // match: (ORL (SHLL x (NEGL (ADDLconst (ANDLconst y [15]) [-16]))) (SHRW x (ANDLconst y [15]))) 28891 // cond: v.Type.Size() == 2 28892 // result: (RORW x y) 28893 for { 28894 _ = v.Args[1] 28895 v_0 := v.Args[0] 28896 if v_0.Op != OpAMD64SHLL { 28897 break 28898 } 28899 _ = v_0.Args[1] 28900 x := v_0.Args[0] 28901 v_0_1 := v_0.Args[1] 28902 if v_0_1.Op != OpAMD64NEGL { 28903 break 28904 } 28905 v_0_1_0 := v_0_1.Args[0] 28906 if v_0_1_0.Op != OpAMD64ADDLconst { 28907 break 28908 } 28909 if v_0_1_0.AuxInt != -16 { 28910 break 28911 } 28912 v_0_1_0_0 := v_0_1_0.Args[0] 28913 if v_0_1_0_0.Op != OpAMD64ANDLconst { 28914 break 28915 } 28916 if v_0_1_0_0.AuxInt != 15 { 28917 break 28918 } 28919 y := v_0_1_0_0.Args[0] 28920 v_1 := v.Args[1] 28921 if v_1.Op != OpAMD64SHRW { 28922 break 28923 } 28924 _ = v_1.Args[1] 28925 if x != v_1.Args[0] { 28926 break 28927 } 28928 v_1_1 := v_1.Args[1] 28929 if v_1_1.Op != OpAMD64ANDLconst { 28930 break 28931 } 28932 if v_1_1.AuxInt != 15 { 28933 break 28934 } 28935 if y != v_1_1.Args[0] { 28936 break 28937 } 28938 if !(v.Type.Size() == 2) { 28939 break 28940 } 28941 v.reset(OpAMD64RORW) 28942 v.AddArg(x) 28943 v.AddArg(y) 28944 return true 28945 } 28946 return false 28947 } 28948 func rewriteValueAMD64_OpAMD64ORL_40(v *Value) bool { 28949 // match: (ORL (SHLL x (ANDQconst y [ 7])) (ANDL (SHRB x (NEGQ (ADDQconst (ANDQconst y [ 7]) [ -8]))) (SBBLcarrymask (CMPQconst (NEGQ (ADDQconst (ANDQconst y [ 7]) [ -8])) [ 8])))) 28950 // cond: v.Type.Size() == 1 28951 // result: (ROLB x y) 28952 for { 28953 _ = v.Args[1] 28954 v_0 := v.Args[0] 28955 if v_0.Op != OpAMD64SHLL { 28956 break 28957 } 28958 _ = v_0.Args[1] 28959 x := v_0.Args[0] 28960 v_0_1 := v_0.Args[1] 28961 if v_0_1.Op != OpAMD64ANDQconst { 28962 break 28963 } 28964 if v_0_1.AuxInt != 7 { 28965 break 28966 } 28967 y := v_0_1.Args[0] 28968 v_1 := v.Args[1] 28969 if v_1.Op != OpAMD64ANDL { 28970 break 28971 } 28972 _ = v_1.Args[1] 28973 v_1_0 := v_1.Args[0] 28974 if v_1_0.Op != OpAMD64SHRB { 28975 break 28976 } 28977 _ = v_1_0.Args[1] 28978 if x != v_1_0.Args[0] { 28979 break 28980 } 28981 v_1_0_1 := v_1_0.Args[1] 28982 if v_1_0_1.Op != OpAMD64NEGQ { 28983 break 28984 } 28985 v_1_0_1_0 := v_1_0_1.Args[0] 28986 if v_1_0_1_0.Op != OpAMD64ADDQconst { 28987 break 28988 } 28989 if v_1_0_1_0.AuxInt != -8 { 28990 break 28991 } 28992 v_1_0_1_0_0 := v_1_0_1_0.Args[0] 28993 if v_1_0_1_0_0.Op != OpAMD64ANDQconst { 28994 break 28995 } 28996 if v_1_0_1_0_0.AuxInt != 7 { 28997 break 28998 } 28999 if y != v_1_0_1_0_0.Args[0] { 29000 break 29001 } 29002 v_1_1 := v_1.Args[1] 29003 if v_1_1.Op != OpAMD64SBBLcarrymask { 29004 break 29005 } 29006 v_1_1_0 := v_1_1.Args[0] 29007 if v_1_1_0.Op != OpAMD64CMPQconst { 29008 break 29009 } 29010 if v_1_1_0.AuxInt != 8 { 29011 break 29012 } 29013 v_1_1_0_0 := v_1_1_0.Args[0] 29014 if v_1_1_0_0.Op != OpAMD64NEGQ { 29015 break 29016 } 29017 v_1_1_0_0_0 := v_1_1_0_0.Args[0] 29018 if v_1_1_0_0_0.Op != OpAMD64ADDQconst { 29019 break 29020 } 29021 if v_1_1_0_0_0.AuxInt != -8 { 29022 break 29023 } 29024 v_1_1_0_0_0_0 := v_1_1_0_0_0.Args[0] 29025 if v_1_1_0_0_0_0.Op != OpAMD64ANDQconst { 29026 break 29027 } 29028 if v_1_1_0_0_0_0.AuxInt != 7 { 29029 break 29030 } 29031 if y != v_1_1_0_0_0_0.Args[0] { 29032 break 29033 } 29034 if !(v.Type.Size() == 1) { 29035 break 29036 } 29037 v.reset(OpAMD64ROLB) 29038 v.AddArg(x) 29039 v.AddArg(y) 29040 return true 29041 } 29042 // match: (ORL (SHLL x (ANDQconst y [ 7])) (ANDL (SBBLcarrymask (CMPQconst (NEGQ (ADDQconst (ANDQconst y [ 7]) [ -8])) [ 8])) (SHRB x (NEGQ (ADDQconst (ANDQconst y [ 7]) [ -8]))))) 29043 // cond: v.Type.Size() == 1 29044 // result: (ROLB x y) 29045 for { 29046 _ = v.Args[1] 29047 v_0 := v.Args[0] 29048 if v_0.Op != OpAMD64SHLL { 29049 break 29050 } 29051 _ = v_0.Args[1] 29052 x := v_0.Args[0] 29053 v_0_1 := v_0.Args[1] 29054 if v_0_1.Op != OpAMD64ANDQconst { 29055 break 29056 } 29057 if v_0_1.AuxInt != 7 { 29058 break 29059 } 29060 y := v_0_1.Args[0] 29061 v_1 := v.Args[1] 29062 if v_1.Op != OpAMD64ANDL { 29063 break 29064 } 29065 _ = v_1.Args[1] 29066 v_1_0 := v_1.Args[0] 29067 if v_1_0.Op != OpAMD64SBBLcarrymask { 29068 break 29069 } 29070 v_1_0_0 := v_1_0.Args[0] 29071 if v_1_0_0.Op != OpAMD64CMPQconst { 29072 break 29073 } 29074 if v_1_0_0.AuxInt != 8 { 29075 break 29076 } 29077 v_1_0_0_0 := v_1_0_0.Args[0] 29078 if v_1_0_0_0.Op != OpAMD64NEGQ { 29079 break 29080 } 29081 v_1_0_0_0_0 := v_1_0_0_0.Args[0] 29082 if v_1_0_0_0_0.Op != OpAMD64ADDQconst { 29083 break 29084 } 29085 if v_1_0_0_0_0.AuxInt != -8 { 29086 break 29087 } 29088 v_1_0_0_0_0_0 := v_1_0_0_0_0.Args[0] 29089 if v_1_0_0_0_0_0.Op != OpAMD64ANDQconst { 29090 break 29091 } 29092 if v_1_0_0_0_0_0.AuxInt != 7 { 29093 break 29094 } 29095 if y != v_1_0_0_0_0_0.Args[0] { 29096 break 29097 } 29098 v_1_1 := v_1.Args[1] 29099 if v_1_1.Op != OpAMD64SHRB { 29100 break 29101 } 29102 _ = v_1_1.Args[1] 29103 if x != v_1_1.Args[0] { 29104 break 29105 } 29106 v_1_1_1 := v_1_1.Args[1] 29107 if v_1_1_1.Op != OpAMD64NEGQ { 29108 break 29109 } 29110 v_1_1_1_0 := v_1_1_1.Args[0] 29111 if v_1_1_1_0.Op != OpAMD64ADDQconst { 29112 break 29113 } 29114 if v_1_1_1_0.AuxInt != -8 { 29115 break 29116 } 29117 v_1_1_1_0_0 := v_1_1_1_0.Args[0] 29118 if v_1_1_1_0_0.Op != OpAMD64ANDQconst { 29119 break 29120 } 29121 if v_1_1_1_0_0.AuxInt != 7 { 29122 break 29123 } 29124 if y != v_1_1_1_0_0.Args[0] { 29125 break 29126 } 29127 if !(v.Type.Size() == 1) { 29128 break 29129 } 29130 v.reset(OpAMD64ROLB) 29131 v.AddArg(x) 29132 v.AddArg(y) 29133 return true 29134 } 29135 // match: (ORL (ANDL (SHRB x (NEGQ (ADDQconst (ANDQconst y [ 7]) [ -8]))) (SBBLcarrymask (CMPQconst (NEGQ (ADDQconst (ANDQconst y [ 7]) [ -8])) [ 8]))) (SHLL x (ANDQconst y [ 7]))) 29136 // cond: v.Type.Size() == 1 29137 // result: (ROLB x y) 29138 for { 29139 _ = v.Args[1] 29140 v_0 := v.Args[0] 29141 if v_0.Op != OpAMD64ANDL { 29142 break 29143 } 29144 _ = v_0.Args[1] 29145 v_0_0 := v_0.Args[0] 29146 if v_0_0.Op != OpAMD64SHRB { 29147 break 29148 } 29149 _ = v_0_0.Args[1] 29150 x := v_0_0.Args[0] 29151 v_0_0_1 := v_0_0.Args[1] 29152 if v_0_0_1.Op != OpAMD64NEGQ { 29153 break 29154 } 29155 v_0_0_1_0 := v_0_0_1.Args[0] 29156 if v_0_0_1_0.Op != OpAMD64ADDQconst { 29157 break 29158 } 29159 if v_0_0_1_0.AuxInt != -8 { 29160 break 29161 } 29162 v_0_0_1_0_0 := v_0_0_1_0.Args[0] 29163 if v_0_0_1_0_0.Op != OpAMD64ANDQconst { 29164 break 29165 } 29166 if v_0_0_1_0_0.AuxInt != 7 { 29167 break 29168 } 29169 y := v_0_0_1_0_0.Args[0] 29170 v_0_1 := v_0.Args[1] 29171 if v_0_1.Op != OpAMD64SBBLcarrymask { 29172 break 29173 } 29174 v_0_1_0 := v_0_1.Args[0] 29175 if v_0_1_0.Op != OpAMD64CMPQconst { 29176 break 29177 } 29178 if v_0_1_0.AuxInt != 8 { 29179 break 29180 } 29181 v_0_1_0_0 := v_0_1_0.Args[0] 29182 if v_0_1_0_0.Op != OpAMD64NEGQ { 29183 break 29184 } 29185 v_0_1_0_0_0 := v_0_1_0_0.Args[0] 29186 if v_0_1_0_0_0.Op != OpAMD64ADDQconst { 29187 break 29188 } 29189 if v_0_1_0_0_0.AuxInt != -8 { 29190 break 29191 } 29192 v_0_1_0_0_0_0 := v_0_1_0_0_0.Args[0] 29193 if v_0_1_0_0_0_0.Op != OpAMD64ANDQconst { 29194 break 29195 } 29196 if v_0_1_0_0_0_0.AuxInt != 7 { 29197 break 29198 } 29199 if y != v_0_1_0_0_0_0.Args[0] { 29200 break 29201 } 29202 v_1 := v.Args[1] 29203 if v_1.Op != OpAMD64SHLL { 29204 break 29205 } 29206 _ = v_1.Args[1] 29207 if x != v_1.Args[0] { 29208 break 29209 } 29210 v_1_1 := v_1.Args[1] 29211 if v_1_1.Op != OpAMD64ANDQconst { 29212 break 29213 } 29214 if v_1_1.AuxInt != 7 { 29215 break 29216 } 29217 if y != v_1_1.Args[0] { 29218 break 29219 } 29220 if !(v.Type.Size() == 1) { 29221 break 29222 } 29223 v.reset(OpAMD64ROLB) 29224 v.AddArg(x) 29225 v.AddArg(y) 29226 return true 29227 } 29228 // match: (ORL (ANDL (SBBLcarrymask (CMPQconst (NEGQ (ADDQconst (ANDQconst y [ 7]) [ -8])) [ 8])) (SHRB x (NEGQ (ADDQconst (ANDQconst y [ 7]) [ -8])))) (SHLL x (ANDQconst y [ 7]))) 29229 // cond: v.Type.Size() == 1 29230 // result: (ROLB x y) 29231 for { 29232 _ = v.Args[1] 29233 v_0 := v.Args[0] 29234 if v_0.Op != OpAMD64ANDL { 29235 break 29236 } 29237 _ = v_0.Args[1] 29238 v_0_0 := v_0.Args[0] 29239 if v_0_0.Op != OpAMD64SBBLcarrymask { 29240 break 29241 } 29242 v_0_0_0 := v_0_0.Args[0] 29243 if v_0_0_0.Op != OpAMD64CMPQconst { 29244 break 29245 } 29246 if v_0_0_0.AuxInt != 8 { 29247 break 29248 } 29249 v_0_0_0_0 := v_0_0_0.Args[0] 29250 if v_0_0_0_0.Op != OpAMD64NEGQ { 29251 break 29252 } 29253 v_0_0_0_0_0 := v_0_0_0_0.Args[0] 29254 if v_0_0_0_0_0.Op != OpAMD64ADDQconst { 29255 break 29256 } 29257 if v_0_0_0_0_0.AuxInt != -8 { 29258 break 29259 } 29260 v_0_0_0_0_0_0 := v_0_0_0_0_0.Args[0] 29261 if v_0_0_0_0_0_0.Op != OpAMD64ANDQconst { 29262 break 29263 } 29264 if v_0_0_0_0_0_0.AuxInt != 7 { 29265 break 29266 } 29267 y := v_0_0_0_0_0_0.Args[0] 29268 v_0_1 := v_0.Args[1] 29269 if v_0_1.Op != OpAMD64SHRB { 29270 break 29271 } 29272 _ = v_0_1.Args[1] 29273 x := v_0_1.Args[0] 29274 v_0_1_1 := v_0_1.Args[1] 29275 if v_0_1_1.Op != OpAMD64NEGQ { 29276 break 29277 } 29278 v_0_1_1_0 := v_0_1_1.Args[0] 29279 if v_0_1_1_0.Op != OpAMD64ADDQconst { 29280 break 29281 } 29282 if v_0_1_1_0.AuxInt != -8 { 29283 break 29284 } 29285 v_0_1_1_0_0 := v_0_1_1_0.Args[0] 29286 if v_0_1_1_0_0.Op != OpAMD64ANDQconst { 29287 break 29288 } 29289 if v_0_1_1_0_0.AuxInt != 7 { 29290 break 29291 } 29292 if y != v_0_1_1_0_0.Args[0] { 29293 break 29294 } 29295 v_1 := v.Args[1] 29296 if v_1.Op != OpAMD64SHLL { 29297 break 29298 } 29299 _ = v_1.Args[1] 29300 if x != v_1.Args[0] { 29301 break 29302 } 29303 v_1_1 := v_1.Args[1] 29304 if v_1_1.Op != OpAMD64ANDQconst { 29305 break 29306 } 29307 if v_1_1.AuxInt != 7 { 29308 break 29309 } 29310 if y != v_1_1.Args[0] { 29311 break 29312 } 29313 if !(v.Type.Size() == 1) { 29314 break 29315 } 29316 v.reset(OpAMD64ROLB) 29317 v.AddArg(x) 29318 v.AddArg(y) 29319 return true 29320 } 29321 // match: (ORL (SHLL x (ANDLconst y [ 7])) (ANDL (SHRB x (NEGL (ADDLconst (ANDLconst y [ 7]) [ -8]))) (SBBLcarrymask (CMPLconst (NEGL (ADDLconst (ANDLconst y [ 7]) [ -8])) [ 8])))) 29322 // cond: v.Type.Size() == 1 29323 // result: (ROLB x y) 29324 for { 29325 _ = v.Args[1] 29326 v_0 := v.Args[0] 29327 if v_0.Op != OpAMD64SHLL { 29328 break 29329 } 29330 _ = v_0.Args[1] 29331 x := v_0.Args[0] 29332 v_0_1 := v_0.Args[1] 29333 if v_0_1.Op != OpAMD64ANDLconst { 29334 break 29335 } 29336 if v_0_1.AuxInt != 7 { 29337 break 29338 } 29339 y := v_0_1.Args[0] 29340 v_1 := v.Args[1] 29341 if v_1.Op != OpAMD64ANDL { 29342 break 29343 } 29344 _ = v_1.Args[1] 29345 v_1_0 := v_1.Args[0] 29346 if v_1_0.Op != OpAMD64SHRB { 29347 break 29348 } 29349 _ = v_1_0.Args[1] 29350 if x != v_1_0.Args[0] { 29351 break 29352 } 29353 v_1_0_1 := v_1_0.Args[1] 29354 if v_1_0_1.Op != OpAMD64NEGL { 29355 break 29356 } 29357 v_1_0_1_0 := v_1_0_1.Args[0] 29358 if v_1_0_1_0.Op != OpAMD64ADDLconst { 29359 break 29360 } 29361 if v_1_0_1_0.AuxInt != -8 { 29362 break 29363 } 29364 v_1_0_1_0_0 := v_1_0_1_0.Args[0] 29365 if v_1_0_1_0_0.Op != OpAMD64ANDLconst { 29366 break 29367 } 29368 if v_1_0_1_0_0.AuxInt != 7 { 29369 break 29370 } 29371 if y != v_1_0_1_0_0.Args[0] { 29372 break 29373 } 29374 v_1_1 := v_1.Args[1] 29375 if v_1_1.Op != OpAMD64SBBLcarrymask { 29376 break 29377 } 29378 v_1_1_0 := v_1_1.Args[0] 29379 if v_1_1_0.Op != OpAMD64CMPLconst { 29380 break 29381 } 29382 if v_1_1_0.AuxInt != 8 { 29383 break 29384 } 29385 v_1_1_0_0 := v_1_1_0.Args[0] 29386 if v_1_1_0_0.Op != OpAMD64NEGL { 29387 break 29388 } 29389 v_1_1_0_0_0 := v_1_1_0_0.Args[0] 29390 if v_1_1_0_0_0.Op != OpAMD64ADDLconst { 29391 break 29392 } 29393 if v_1_1_0_0_0.AuxInt != -8 { 29394 break 29395 } 29396 v_1_1_0_0_0_0 := v_1_1_0_0_0.Args[0] 29397 if v_1_1_0_0_0_0.Op != OpAMD64ANDLconst { 29398 break 29399 } 29400 if v_1_1_0_0_0_0.AuxInt != 7 { 29401 break 29402 } 29403 if y != v_1_1_0_0_0_0.Args[0] { 29404 break 29405 } 29406 if !(v.Type.Size() == 1) { 29407 break 29408 } 29409 v.reset(OpAMD64ROLB) 29410 v.AddArg(x) 29411 v.AddArg(y) 29412 return true 29413 } 29414 // match: (ORL (SHLL x (ANDLconst y [ 7])) (ANDL (SBBLcarrymask (CMPLconst (NEGL (ADDLconst (ANDLconst y [ 7]) [ -8])) [ 8])) (SHRB x (NEGL (ADDLconst (ANDLconst y [ 7]) [ -8]))))) 29415 // cond: v.Type.Size() == 1 29416 // result: (ROLB x y) 29417 for { 29418 _ = v.Args[1] 29419 v_0 := v.Args[0] 29420 if v_0.Op != OpAMD64SHLL { 29421 break 29422 } 29423 _ = v_0.Args[1] 29424 x := v_0.Args[0] 29425 v_0_1 := v_0.Args[1] 29426 if v_0_1.Op != OpAMD64ANDLconst { 29427 break 29428 } 29429 if v_0_1.AuxInt != 7 { 29430 break 29431 } 29432 y := v_0_1.Args[0] 29433 v_1 := v.Args[1] 29434 if v_1.Op != OpAMD64ANDL { 29435 break 29436 } 29437 _ = v_1.Args[1] 29438 v_1_0 := v_1.Args[0] 29439 if v_1_0.Op != OpAMD64SBBLcarrymask { 29440 break 29441 } 29442 v_1_0_0 := v_1_0.Args[0] 29443 if v_1_0_0.Op != OpAMD64CMPLconst { 29444 break 29445 } 29446 if v_1_0_0.AuxInt != 8 { 29447 break 29448 } 29449 v_1_0_0_0 := v_1_0_0.Args[0] 29450 if v_1_0_0_0.Op != OpAMD64NEGL { 29451 break 29452 } 29453 v_1_0_0_0_0 := v_1_0_0_0.Args[0] 29454 if v_1_0_0_0_0.Op != OpAMD64ADDLconst { 29455 break 29456 } 29457 if v_1_0_0_0_0.AuxInt != -8 { 29458 break 29459 } 29460 v_1_0_0_0_0_0 := v_1_0_0_0_0.Args[0] 29461 if v_1_0_0_0_0_0.Op != OpAMD64ANDLconst { 29462 break 29463 } 29464 if v_1_0_0_0_0_0.AuxInt != 7 { 29465 break 29466 } 29467 if y != v_1_0_0_0_0_0.Args[0] { 29468 break 29469 } 29470 v_1_1 := v_1.Args[1] 29471 if v_1_1.Op != OpAMD64SHRB { 29472 break 29473 } 29474 _ = v_1_1.Args[1] 29475 if x != v_1_1.Args[0] { 29476 break 29477 } 29478 v_1_1_1 := v_1_1.Args[1] 29479 if v_1_1_1.Op != OpAMD64NEGL { 29480 break 29481 } 29482 v_1_1_1_0 := v_1_1_1.Args[0] 29483 if v_1_1_1_0.Op != OpAMD64ADDLconst { 29484 break 29485 } 29486 if v_1_1_1_0.AuxInt != -8 { 29487 break 29488 } 29489 v_1_1_1_0_0 := v_1_1_1_0.Args[0] 29490 if v_1_1_1_0_0.Op != OpAMD64ANDLconst { 29491 break 29492 } 29493 if v_1_1_1_0_0.AuxInt != 7 { 29494 break 29495 } 29496 if y != v_1_1_1_0_0.Args[0] { 29497 break 29498 } 29499 if !(v.Type.Size() == 1) { 29500 break 29501 } 29502 v.reset(OpAMD64ROLB) 29503 v.AddArg(x) 29504 v.AddArg(y) 29505 return true 29506 } 29507 // match: (ORL (ANDL (SHRB x (NEGL (ADDLconst (ANDLconst y [ 7]) [ -8]))) (SBBLcarrymask (CMPLconst (NEGL (ADDLconst (ANDLconst y [ 7]) [ -8])) [ 8]))) (SHLL x (ANDLconst y [ 7]))) 29508 // cond: v.Type.Size() == 1 29509 // result: (ROLB x y) 29510 for { 29511 _ = v.Args[1] 29512 v_0 := v.Args[0] 29513 if v_0.Op != OpAMD64ANDL { 29514 break 29515 } 29516 _ = v_0.Args[1] 29517 v_0_0 := v_0.Args[0] 29518 if v_0_0.Op != OpAMD64SHRB { 29519 break 29520 } 29521 _ = v_0_0.Args[1] 29522 x := v_0_0.Args[0] 29523 v_0_0_1 := v_0_0.Args[1] 29524 if v_0_0_1.Op != OpAMD64NEGL { 29525 break 29526 } 29527 v_0_0_1_0 := v_0_0_1.Args[0] 29528 if v_0_0_1_0.Op != OpAMD64ADDLconst { 29529 break 29530 } 29531 if v_0_0_1_0.AuxInt != -8 { 29532 break 29533 } 29534 v_0_0_1_0_0 := v_0_0_1_0.Args[0] 29535 if v_0_0_1_0_0.Op != OpAMD64ANDLconst { 29536 break 29537 } 29538 if v_0_0_1_0_0.AuxInt != 7 { 29539 break 29540 } 29541 y := v_0_0_1_0_0.Args[0] 29542 v_0_1 := v_0.Args[1] 29543 if v_0_1.Op != OpAMD64SBBLcarrymask { 29544 break 29545 } 29546 v_0_1_0 := v_0_1.Args[0] 29547 if v_0_1_0.Op != OpAMD64CMPLconst { 29548 break 29549 } 29550 if v_0_1_0.AuxInt != 8 { 29551 break 29552 } 29553 v_0_1_0_0 := v_0_1_0.Args[0] 29554 if v_0_1_0_0.Op != OpAMD64NEGL { 29555 break 29556 } 29557 v_0_1_0_0_0 := v_0_1_0_0.Args[0] 29558 if v_0_1_0_0_0.Op != OpAMD64ADDLconst { 29559 break 29560 } 29561 if v_0_1_0_0_0.AuxInt != -8 { 29562 break 29563 } 29564 v_0_1_0_0_0_0 := v_0_1_0_0_0.Args[0] 29565 if v_0_1_0_0_0_0.Op != OpAMD64ANDLconst { 29566 break 29567 } 29568 if v_0_1_0_0_0_0.AuxInt != 7 { 29569 break 29570 } 29571 if y != v_0_1_0_0_0_0.Args[0] { 29572 break 29573 } 29574 v_1 := v.Args[1] 29575 if v_1.Op != OpAMD64SHLL { 29576 break 29577 } 29578 _ = v_1.Args[1] 29579 if x != v_1.Args[0] { 29580 break 29581 } 29582 v_1_1 := v_1.Args[1] 29583 if v_1_1.Op != OpAMD64ANDLconst { 29584 break 29585 } 29586 if v_1_1.AuxInt != 7 { 29587 break 29588 } 29589 if y != v_1_1.Args[0] { 29590 break 29591 } 29592 if !(v.Type.Size() == 1) { 29593 break 29594 } 29595 v.reset(OpAMD64ROLB) 29596 v.AddArg(x) 29597 v.AddArg(y) 29598 return true 29599 } 29600 // match: (ORL (ANDL (SBBLcarrymask (CMPLconst (NEGL (ADDLconst (ANDLconst y [ 7]) [ -8])) [ 8])) (SHRB x (NEGL (ADDLconst (ANDLconst y [ 7]) [ -8])))) (SHLL x (ANDLconst y [ 7]))) 29601 // cond: v.Type.Size() == 1 29602 // result: (ROLB x y) 29603 for { 29604 _ = v.Args[1] 29605 v_0 := v.Args[0] 29606 if v_0.Op != OpAMD64ANDL { 29607 break 29608 } 29609 _ = v_0.Args[1] 29610 v_0_0 := v_0.Args[0] 29611 if v_0_0.Op != OpAMD64SBBLcarrymask { 29612 break 29613 } 29614 v_0_0_0 := v_0_0.Args[0] 29615 if v_0_0_0.Op != OpAMD64CMPLconst { 29616 break 29617 } 29618 if v_0_0_0.AuxInt != 8 { 29619 break 29620 } 29621 v_0_0_0_0 := v_0_0_0.Args[0] 29622 if v_0_0_0_0.Op != OpAMD64NEGL { 29623 break 29624 } 29625 v_0_0_0_0_0 := v_0_0_0_0.Args[0] 29626 if v_0_0_0_0_0.Op != OpAMD64ADDLconst { 29627 break 29628 } 29629 if v_0_0_0_0_0.AuxInt != -8 { 29630 break 29631 } 29632 v_0_0_0_0_0_0 := v_0_0_0_0_0.Args[0] 29633 if v_0_0_0_0_0_0.Op != OpAMD64ANDLconst { 29634 break 29635 } 29636 if v_0_0_0_0_0_0.AuxInt != 7 { 29637 break 29638 } 29639 y := v_0_0_0_0_0_0.Args[0] 29640 v_0_1 := v_0.Args[1] 29641 if v_0_1.Op != OpAMD64SHRB { 29642 break 29643 } 29644 _ = v_0_1.Args[1] 29645 x := v_0_1.Args[0] 29646 v_0_1_1 := v_0_1.Args[1] 29647 if v_0_1_1.Op != OpAMD64NEGL { 29648 break 29649 } 29650 v_0_1_1_0 := v_0_1_1.Args[0] 29651 if v_0_1_1_0.Op != OpAMD64ADDLconst { 29652 break 29653 } 29654 if v_0_1_1_0.AuxInt != -8 { 29655 break 29656 } 29657 v_0_1_1_0_0 := v_0_1_1_0.Args[0] 29658 if v_0_1_1_0_0.Op != OpAMD64ANDLconst { 29659 break 29660 } 29661 if v_0_1_1_0_0.AuxInt != 7 { 29662 break 29663 } 29664 if y != v_0_1_1_0_0.Args[0] { 29665 break 29666 } 29667 v_1 := v.Args[1] 29668 if v_1.Op != OpAMD64SHLL { 29669 break 29670 } 29671 _ = v_1.Args[1] 29672 if x != v_1.Args[0] { 29673 break 29674 } 29675 v_1_1 := v_1.Args[1] 29676 if v_1_1.Op != OpAMD64ANDLconst { 29677 break 29678 } 29679 if v_1_1.AuxInt != 7 { 29680 break 29681 } 29682 if y != v_1_1.Args[0] { 29683 break 29684 } 29685 if !(v.Type.Size() == 1) { 29686 break 29687 } 29688 v.reset(OpAMD64ROLB) 29689 v.AddArg(x) 29690 v.AddArg(y) 29691 return true 29692 } 29693 // match: (ORL (SHRB x (ANDQconst y [ 7])) (SHLL x (NEGQ (ADDQconst (ANDQconst y [ 7]) [ -8])))) 29694 // cond: v.Type.Size() == 1 29695 // result: (RORB x y) 29696 for { 29697 _ = v.Args[1] 29698 v_0 := v.Args[0] 29699 if v_0.Op != OpAMD64SHRB { 29700 break 29701 } 29702 _ = v_0.Args[1] 29703 x := v_0.Args[0] 29704 v_0_1 := v_0.Args[1] 29705 if v_0_1.Op != OpAMD64ANDQconst { 29706 break 29707 } 29708 if v_0_1.AuxInt != 7 { 29709 break 29710 } 29711 y := v_0_1.Args[0] 29712 v_1 := v.Args[1] 29713 if v_1.Op != OpAMD64SHLL { 29714 break 29715 } 29716 _ = v_1.Args[1] 29717 if x != v_1.Args[0] { 29718 break 29719 } 29720 v_1_1 := v_1.Args[1] 29721 if v_1_1.Op != OpAMD64NEGQ { 29722 break 29723 } 29724 v_1_1_0 := v_1_1.Args[0] 29725 if v_1_1_0.Op != OpAMD64ADDQconst { 29726 break 29727 } 29728 if v_1_1_0.AuxInt != -8 { 29729 break 29730 } 29731 v_1_1_0_0 := v_1_1_0.Args[0] 29732 if v_1_1_0_0.Op != OpAMD64ANDQconst { 29733 break 29734 } 29735 if v_1_1_0_0.AuxInt != 7 { 29736 break 29737 } 29738 if y != v_1_1_0_0.Args[0] { 29739 break 29740 } 29741 if !(v.Type.Size() == 1) { 29742 break 29743 } 29744 v.reset(OpAMD64RORB) 29745 v.AddArg(x) 29746 v.AddArg(y) 29747 return true 29748 } 29749 // match: (ORL (SHLL x (NEGQ (ADDQconst (ANDQconst y [ 7]) [ -8]))) (SHRB x (ANDQconst y [ 7]))) 29750 // cond: v.Type.Size() == 1 29751 // result: (RORB x y) 29752 for { 29753 _ = v.Args[1] 29754 v_0 := v.Args[0] 29755 if v_0.Op != OpAMD64SHLL { 29756 break 29757 } 29758 _ = v_0.Args[1] 29759 x := v_0.Args[0] 29760 v_0_1 := v_0.Args[1] 29761 if v_0_1.Op != OpAMD64NEGQ { 29762 break 29763 } 29764 v_0_1_0 := v_0_1.Args[0] 29765 if v_0_1_0.Op != OpAMD64ADDQconst { 29766 break 29767 } 29768 if v_0_1_0.AuxInt != -8 { 29769 break 29770 } 29771 v_0_1_0_0 := v_0_1_0.Args[0] 29772 if v_0_1_0_0.Op != OpAMD64ANDQconst { 29773 break 29774 } 29775 if v_0_1_0_0.AuxInt != 7 { 29776 break 29777 } 29778 y := v_0_1_0_0.Args[0] 29779 v_1 := v.Args[1] 29780 if v_1.Op != OpAMD64SHRB { 29781 break 29782 } 29783 _ = v_1.Args[1] 29784 if x != v_1.Args[0] { 29785 break 29786 } 29787 v_1_1 := v_1.Args[1] 29788 if v_1_1.Op != OpAMD64ANDQconst { 29789 break 29790 } 29791 if v_1_1.AuxInt != 7 { 29792 break 29793 } 29794 if y != v_1_1.Args[0] { 29795 break 29796 } 29797 if !(v.Type.Size() == 1) { 29798 break 29799 } 29800 v.reset(OpAMD64RORB) 29801 v.AddArg(x) 29802 v.AddArg(y) 29803 return true 29804 } 29805 return false 29806 } 29807 func rewriteValueAMD64_OpAMD64ORL_50(v *Value) bool { 29808 b := v.Block 29809 _ = b 29810 typ := &b.Func.Config.Types 29811 _ = typ 29812 // match: (ORL (SHRB x (ANDLconst y [ 7])) (SHLL x (NEGL (ADDLconst (ANDLconst y [ 7]) [ -8])))) 29813 // cond: v.Type.Size() == 1 29814 // result: (RORB x y) 29815 for { 29816 _ = v.Args[1] 29817 v_0 := v.Args[0] 29818 if v_0.Op != OpAMD64SHRB { 29819 break 29820 } 29821 _ = v_0.Args[1] 29822 x := v_0.Args[0] 29823 v_0_1 := v_0.Args[1] 29824 if v_0_1.Op != OpAMD64ANDLconst { 29825 break 29826 } 29827 if v_0_1.AuxInt != 7 { 29828 break 29829 } 29830 y := v_0_1.Args[0] 29831 v_1 := v.Args[1] 29832 if v_1.Op != OpAMD64SHLL { 29833 break 29834 } 29835 _ = v_1.Args[1] 29836 if x != v_1.Args[0] { 29837 break 29838 } 29839 v_1_1 := v_1.Args[1] 29840 if v_1_1.Op != OpAMD64NEGL { 29841 break 29842 } 29843 v_1_1_0 := v_1_1.Args[0] 29844 if v_1_1_0.Op != OpAMD64ADDLconst { 29845 break 29846 } 29847 if v_1_1_0.AuxInt != -8 { 29848 break 29849 } 29850 v_1_1_0_0 := v_1_1_0.Args[0] 29851 if v_1_1_0_0.Op != OpAMD64ANDLconst { 29852 break 29853 } 29854 if v_1_1_0_0.AuxInt != 7 { 29855 break 29856 } 29857 if y != v_1_1_0_0.Args[0] { 29858 break 29859 } 29860 if !(v.Type.Size() == 1) { 29861 break 29862 } 29863 v.reset(OpAMD64RORB) 29864 v.AddArg(x) 29865 v.AddArg(y) 29866 return true 29867 } 29868 // match: (ORL (SHLL x (NEGL (ADDLconst (ANDLconst y [ 7]) [ -8]))) (SHRB x (ANDLconst y [ 7]))) 29869 // cond: v.Type.Size() == 1 29870 // result: (RORB x y) 29871 for { 29872 _ = v.Args[1] 29873 v_0 := v.Args[0] 29874 if v_0.Op != OpAMD64SHLL { 29875 break 29876 } 29877 _ = v_0.Args[1] 29878 x := v_0.Args[0] 29879 v_0_1 := v_0.Args[1] 29880 if v_0_1.Op != OpAMD64NEGL { 29881 break 29882 } 29883 v_0_1_0 := v_0_1.Args[0] 29884 if v_0_1_0.Op != OpAMD64ADDLconst { 29885 break 29886 } 29887 if v_0_1_0.AuxInt != -8 { 29888 break 29889 } 29890 v_0_1_0_0 := v_0_1_0.Args[0] 29891 if v_0_1_0_0.Op != OpAMD64ANDLconst { 29892 break 29893 } 29894 if v_0_1_0_0.AuxInt != 7 { 29895 break 29896 } 29897 y := v_0_1_0_0.Args[0] 29898 v_1 := v.Args[1] 29899 if v_1.Op != OpAMD64SHRB { 29900 break 29901 } 29902 _ = v_1.Args[1] 29903 if x != v_1.Args[0] { 29904 break 29905 } 29906 v_1_1 := v_1.Args[1] 29907 if v_1_1.Op != OpAMD64ANDLconst { 29908 break 29909 } 29910 if v_1_1.AuxInt != 7 { 29911 break 29912 } 29913 if y != v_1_1.Args[0] { 29914 break 29915 } 29916 if !(v.Type.Size() == 1) { 29917 break 29918 } 29919 v.reset(OpAMD64RORB) 29920 v.AddArg(x) 29921 v.AddArg(y) 29922 return true 29923 } 29924 // match: (ORL x x) 29925 // cond: 29926 // result: x 29927 for { 29928 _ = v.Args[1] 29929 x := v.Args[0] 29930 if x != v.Args[1] { 29931 break 29932 } 29933 v.reset(OpCopy) 29934 v.Type = x.Type 29935 v.AddArg(x) 29936 return true 29937 } 29938 // match: (ORL x0:(MOVBload [i0] {s} p mem) sh:(SHLLconst [8] x1:(MOVBload [i1] {s} p mem))) 29939 // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 29940 // result: @mergePoint(b,x0,x1) (MOVWload [i0] {s} p mem) 29941 for { 29942 _ = v.Args[1] 29943 x0 := v.Args[0] 29944 if x0.Op != OpAMD64MOVBload { 29945 break 29946 } 29947 i0 := x0.AuxInt 29948 s := x0.Aux 29949 _ = x0.Args[1] 29950 p := x0.Args[0] 29951 mem := x0.Args[1] 29952 sh := v.Args[1] 29953 if sh.Op != OpAMD64SHLLconst { 29954 break 29955 } 29956 if sh.AuxInt != 8 { 29957 break 29958 } 29959 x1 := sh.Args[0] 29960 if x1.Op != OpAMD64MOVBload { 29961 break 29962 } 29963 i1 := x1.AuxInt 29964 if x1.Aux != s { 29965 break 29966 } 29967 _ = x1.Args[1] 29968 if p != x1.Args[0] { 29969 break 29970 } 29971 if mem != x1.Args[1] { 29972 break 29973 } 29974 if !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 29975 break 29976 } 29977 b = mergePoint(b, x0, x1) 29978 v0 := b.NewValue0(x1.Pos, OpAMD64MOVWload, typ.UInt16) 29979 v.reset(OpCopy) 29980 v.AddArg(v0) 29981 v0.AuxInt = i0 29982 v0.Aux = s 29983 v0.AddArg(p) 29984 v0.AddArg(mem) 29985 return true 29986 } 29987 // match: (ORL sh:(SHLLconst [8] x1:(MOVBload [i1] {s} p mem)) x0:(MOVBload [i0] {s} p mem)) 29988 // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 29989 // result: @mergePoint(b,x0,x1) (MOVWload [i0] {s} p mem) 29990 for { 29991 _ = v.Args[1] 29992 sh := v.Args[0] 29993 if sh.Op != OpAMD64SHLLconst { 29994 break 29995 } 29996 if sh.AuxInt != 8 { 29997 break 29998 } 29999 x1 := sh.Args[0] 30000 if x1.Op != OpAMD64MOVBload { 30001 break 30002 } 30003 i1 := x1.AuxInt 30004 s := x1.Aux 30005 _ = x1.Args[1] 30006 p := x1.Args[0] 30007 mem := x1.Args[1] 30008 x0 := v.Args[1] 30009 if x0.Op != OpAMD64MOVBload { 30010 break 30011 } 30012 i0 := x0.AuxInt 30013 if x0.Aux != s { 30014 break 30015 } 30016 _ = x0.Args[1] 30017 if p != x0.Args[0] { 30018 break 30019 } 30020 if mem != x0.Args[1] { 30021 break 30022 } 30023 if !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 30024 break 30025 } 30026 b = mergePoint(b, x0, x1) 30027 v0 := b.NewValue0(x0.Pos, OpAMD64MOVWload, typ.UInt16) 30028 v.reset(OpCopy) 30029 v.AddArg(v0) 30030 v0.AuxInt = i0 30031 v0.Aux = s 30032 v0.AddArg(p) 30033 v0.AddArg(mem) 30034 return true 30035 } 30036 // match: (ORL x0:(MOVWload [i0] {s} p mem) sh:(SHLLconst [16] x1:(MOVWload [i1] {s} p mem))) 30037 // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 30038 // result: @mergePoint(b,x0,x1) (MOVLload [i0] {s} p mem) 30039 for { 30040 _ = v.Args[1] 30041 x0 := v.Args[0] 30042 if x0.Op != OpAMD64MOVWload { 30043 break 30044 } 30045 i0 := x0.AuxInt 30046 s := x0.Aux 30047 _ = x0.Args[1] 30048 p := x0.Args[0] 30049 mem := x0.Args[1] 30050 sh := v.Args[1] 30051 if sh.Op != OpAMD64SHLLconst { 30052 break 30053 } 30054 if sh.AuxInt != 16 { 30055 break 30056 } 30057 x1 := sh.Args[0] 30058 if x1.Op != OpAMD64MOVWload { 30059 break 30060 } 30061 i1 := x1.AuxInt 30062 if x1.Aux != s { 30063 break 30064 } 30065 _ = x1.Args[1] 30066 if p != x1.Args[0] { 30067 break 30068 } 30069 if mem != x1.Args[1] { 30070 break 30071 } 30072 if !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 30073 break 30074 } 30075 b = mergePoint(b, x0, x1) 30076 v0 := b.NewValue0(x1.Pos, OpAMD64MOVLload, typ.UInt32) 30077 v.reset(OpCopy) 30078 v.AddArg(v0) 30079 v0.AuxInt = i0 30080 v0.Aux = s 30081 v0.AddArg(p) 30082 v0.AddArg(mem) 30083 return true 30084 } 30085 // match: (ORL sh:(SHLLconst [16] x1:(MOVWload [i1] {s} p mem)) x0:(MOVWload [i0] {s} p mem)) 30086 // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 30087 // result: @mergePoint(b,x0,x1) (MOVLload [i0] {s} p mem) 30088 for { 30089 _ = v.Args[1] 30090 sh := v.Args[0] 30091 if sh.Op != OpAMD64SHLLconst { 30092 break 30093 } 30094 if sh.AuxInt != 16 { 30095 break 30096 } 30097 x1 := sh.Args[0] 30098 if x1.Op != OpAMD64MOVWload { 30099 break 30100 } 30101 i1 := x1.AuxInt 30102 s := x1.Aux 30103 _ = x1.Args[1] 30104 p := x1.Args[0] 30105 mem := x1.Args[1] 30106 x0 := v.Args[1] 30107 if x0.Op != OpAMD64MOVWload { 30108 break 30109 } 30110 i0 := x0.AuxInt 30111 if x0.Aux != s { 30112 break 30113 } 30114 _ = x0.Args[1] 30115 if p != x0.Args[0] { 30116 break 30117 } 30118 if mem != x0.Args[1] { 30119 break 30120 } 30121 if !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 30122 break 30123 } 30124 b = mergePoint(b, x0, x1) 30125 v0 := b.NewValue0(x0.Pos, OpAMD64MOVLload, typ.UInt32) 30126 v.reset(OpCopy) 30127 v.AddArg(v0) 30128 v0.AuxInt = i0 30129 v0.Aux = s 30130 v0.AddArg(p) 30131 v0.AddArg(mem) 30132 return true 30133 } 30134 // match: (ORL s1:(SHLLconst [j1] x1:(MOVBload [i1] {s} p mem)) or:(ORL s0:(SHLLconst [j0] x0:(MOVBload [i0] {s} p mem)) y)) 30135 // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 30136 // result: @mergePoint(b,x0,x1) (ORL <v.Type> (SHLLconst <v.Type> [j0] (MOVWload [i0] {s} p mem)) y) 30137 for { 30138 _ = v.Args[1] 30139 s1 := v.Args[0] 30140 if s1.Op != OpAMD64SHLLconst { 30141 break 30142 } 30143 j1 := s1.AuxInt 30144 x1 := s1.Args[0] 30145 if x1.Op != OpAMD64MOVBload { 30146 break 30147 } 30148 i1 := x1.AuxInt 30149 s := x1.Aux 30150 _ = x1.Args[1] 30151 p := x1.Args[0] 30152 mem := x1.Args[1] 30153 or := v.Args[1] 30154 if or.Op != OpAMD64ORL { 30155 break 30156 } 30157 _ = or.Args[1] 30158 s0 := or.Args[0] 30159 if s0.Op != OpAMD64SHLLconst { 30160 break 30161 } 30162 j0 := s0.AuxInt 30163 x0 := s0.Args[0] 30164 if x0.Op != OpAMD64MOVBload { 30165 break 30166 } 30167 i0 := x0.AuxInt 30168 if x0.Aux != s { 30169 break 30170 } 30171 _ = x0.Args[1] 30172 if p != x0.Args[0] { 30173 break 30174 } 30175 if mem != x0.Args[1] { 30176 break 30177 } 30178 y := or.Args[1] 30179 if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 30180 break 30181 } 30182 b = mergePoint(b, x0, x1) 30183 v0 := b.NewValue0(x0.Pos, OpAMD64ORL, v.Type) 30184 v.reset(OpCopy) 30185 v.AddArg(v0) 30186 v1 := b.NewValue0(x0.Pos, OpAMD64SHLLconst, v.Type) 30187 v1.AuxInt = j0 30188 v2 := b.NewValue0(x0.Pos, OpAMD64MOVWload, typ.UInt16) 30189 v2.AuxInt = i0 30190 v2.Aux = s 30191 v2.AddArg(p) 30192 v2.AddArg(mem) 30193 v1.AddArg(v2) 30194 v0.AddArg(v1) 30195 v0.AddArg(y) 30196 return true 30197 } 30198 // match: (ORL s1:(SHLLconst [j1] x1:(MOVBload [i1] {s} p mem)) or:(ORL y s0:(SHLLconst [j0] x0:(MOVBload [i0] {s} p mem)))) 30199 // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 30200 // result: @mergePoint(b,x0,x1) (ORL <v.Type> (SHLLconst <v.Type> [j0] (MOVWload [i0] {s} p mem)) y) 30201 for { 30202 _ = v.Args[1] 30203 s1 := v.Args[0] 30204 if s1.Op != OpAMD64SHLLconst { 30205 break 30206 } 30207 j1 := s1.AuxInt 30208 x1 := s1.Args[0] 30209 if x1.Op != OpAMD64MOVBload { 30210 break 30211 } 30212 i1 := x1.AuxInt 30213 s := x1.Aux 30214 _ = x1.Args[1] 30215 p := x1.Args[0] 30216 mem := x1.Args[1] 30217 or := v.Args[1] 30218 if or.Op != OpAMD64ORL { 30219 break 30220 } 30221 _ = or.Args[1] 30222 y := or.Args[0] 30223 s0 := or.Args[1] 30224 if s0.Op != OpAMD64SHLLconst { 30225 break 30226 } 30227 j0 := s0.AuxInt 30228 x0 := s0.Args[0] 30229 if x0.Op != OpAMD64MOVBload { 30230 break 30231 } 30232 i0 := x0.AuxInt 30233 if x0.Aux != s { 30234 break 30235 } 30236 _ = x0.Args[1] 30237 if p != x0.Args[0] { 30238 break 30239 } 30240 if mem != x0.Args[1] { 30241 break 30242 } 30243 if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 30244 break 30245 } 30246 b = mergePoint(b, x0, x1) 30247 v0 := b.NewValue0(x0.Pos, OpAMD64ORL, v.Type) 30248 v.reset(OpCopy) 30249 v.AddArg(v0) 30250 v1 := b.NewValue0(x0.Pos, OpAMD64SHLLconst, v.Type) 30251 v1.AuxInt = j0 30252 v2 := b.NewValue0(x0.Pos, OpAMD64MOVWload, typ.UInt16) 30253 v2.AuxInt = i0 30254 v2.Aux = s 30255 v2.AddArg(p) 30256 v2.AddArg(mem) 30257 v1.AddArg(v2) 30258 v0.AddArg(v1) 30259 v0.AddArg(y) 30260 return true 30261 } 30262 // match: (ORL or:(ORL s0:(SHLLconst [j0] x0:(MOVBload [i0] {s} p mem)) y) s1:(SHLLconst [j1] x1:(MOVBload [i1] {s} p mem))) 30263 // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 30264 // result: @mergePoint(b,x0,x1) (ORL <v.Type> (SHLLconst <v.Type> [j0] (MOVWload [i0] {s} p mem)) y) 30265 for { 30266 _ = v.Args[1] 30267 or := v.Args[0] 30268 if or.Op != OpAMD64ORL { 30269 break 30270 } 30271 _ = or.Args[1] 30272 s0 := or.Args[0] 30273 if s0.Op != OpAMD64SHLLconst { 30274 break 30275 } 30276 j0 := s0.AuxInt 30277 x0 := s0.Args[0] 30278 if x0.Op != OpAMD64MOVBload { 30279 break 30280 } 30281 i0 := x0.AuxInt 30282 s := x0.Aux 30283 _ = x0.Args[1] 30284 p := x0.Args[0] 30285 mem := x0.Args[1] 30286 y := or.Args[1] 30287 s1 := v.Args[1] 30288 if s1.Op != OpAMD64SHLLconst { 30289 break 30290 } 30291 j1 := s1.AuxInt 30292 x1 := s1.Args[0] 30293 if x1.Op != OpAMD64MOVBload { 30294 break 30295 } 30296 i1 := x1.AuxInt 30297 if x1.Aux != s { 30298 break 30299 } 30300 _ = x1.Args[1] 30301 if p != x1.Args[0] { 30302 break 30303 } 30304 if mem != x1.Args[1] { 30305 break 30306 } 30307 if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 30308 break 30309 } 30310 b = mergePoint(b, x0, x1) 30311 v0 := b.NewValue0(x1.Pos, OpAMD64ORL, v.Type) 30312 v.reset(OpCopy) 30313 v.AddArg(v0) 30314 v1 := b.NewValue0(x1.Pos, OpAMD64SHLLconst, v.Type) 30315 v1.AuxInt = j0 30316 v2 := b.NewValue0(x1.Pos, OpAMD64MOVWload, typ.UInt16) 30317 v2.AuxInt = i0 30318 v2.Aux = s 30319 v2.AddArg(p) 30320 v2.AddArg(mem) 30321 v1.AddArg(v2) 30322 v0.AddArg(v1) 30323 v0.AddArg(y) 30324 return true 30325 } 30326 return false 30327 } 30328 func rewriteValueAMD64_OpAMD64ORL_60(v *Value) bool { 30329 b := v.Block 30330 _ = b 30331 typ := &b.Func.Config.Types 30332 _ = typ 30333 // match: (ORL or:(ORL y s0:(SHLLconst [j0] x0:(MOVBload [i0] {s} p mem))) s1:(SHLLconst [j1] x1:(MOVBload [i1] {s} p mem))) 30334 // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 30335 // result: @mergePoint(b,x0,x1) (ORL <v.Type> (SHLLconst <v.Type> [j0] (MOVWload [i0] {s} p mem)) y) 30336 for { 30337 _ = v.Args[1] 30338 or := v.Args[0] 30339 if or.Op != OpAMD64ORL { 30340 break 30341 } 30342 _ = or.Args[1] 30343 y := or.Args[0] 30344 s0 := or.Args[1] 30345 if s0.Op != OpAMD64SHLLconst { 30346 break 30347 } 30348 j0 := s0.AuxInt 30349 x0 := s0.Args[0] 30350 if x0.Op != OpAMD64MOVBload { 30351 break 30352 } 30353 i0 := x0.AuxInt 30354 s := x0.Aux 30355 _ = x0.Args[1] 30356 p := x0.Args[0] 30357 mem := x0.Args[1] 30358 s1 := v.Args[1] 30359 if s1.Op != OpAMD64SHLLconst { 30360 break 30361 } 30362 j1 := s1.AuxInt 30363 x1 := s1.Args[0] 30364 if x1.Op != OpAMD64MOVBload { 30365 break 30366 } 30367 i1 := x1.AuxInt 30368 if x1.Aux != s { 30369 break 30370 } 30371 _ = x1.Args[1] 30372 if p != x1.Args[0] { 30373 break 30374 } 30375 if mem != x1.Args[1] { 30376 break 30377 } 30378 if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 30379 break 30380 } 30381 b = mergePoint(b, x0, x1) 30382 v0 := b.NewValue0(x1.Pos, OpAMD64ORL, v.Type) 30383 v.reset(OpCopy) 30384 v.AddArg(v0) 30385 v1 := b.NewValue0(x1.Pos, OpAMD64SHLLconst, v.Type) 30386 v1.AuxInt = j0 30387 v2 := b.NewValue0(x1.Pos, OpAMD64MOVWload, typ.UInt16) 30388 v2.AuxInt = i0 30389 v2.Aux = s 30390 v2.AddArg(p) 30391 v2.AddArg(mem) 30392 v1.AddArg(v2) 30393 v0.AddArg(v1) 30394 v0.AddArg(y) 30395 return true 30396 } 30397 // match: (ORL x0:(MOVBloadidx1 [i0] {s} p idx mem) sh:(SHLLconst [8] x1:(MOVBloadidx1 [i1] {s} p idx mem))) 30398 // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 30399 // result: @mergePoint(b,x0,x1) (MOVWloadidx1 <v.Type> [i0] {s} p idx mem) 30400 for { 30401 _ = v.Args[1] 30402 x0 := v.Args[0] 30403 if x0.Op != OpAMD64MOVBloadidx1 { 30404 break 30405 } 30406 i0 := x0.AuxInt 30407 s := x0.Aux 30408 _ = x0.Args[2] 30409 p := x0.Args[0] 30410 idx := x0.Args[1] 30411 mem := x0.Args[2] 30412 sh := v.Args[1] 30413 if sh.Op != OpAMD64SHLLconst { 30414 break 30415 } 30416 if sh.AuxInt != 8 { 30417 break 30418 } 30419 x1 := sh.Args[0] 30420 if x1.Op != OpAMD64MOVBloadidx1 { 30421 break 30422 } 30423 i1 := x1.AuxInt 30424 if x1.Aux != s { 30425 break 30426 } 30427 _ = x1.Args[2] 30428 if p != x1.Args[0] { 30429 break 30430 } 30431 if idx != x1.Args[1] { 30432 break 30433 } 30434 if mem != x1.Args[2] { 30435 break 30436 } 30437 if !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 30438 break 30439 } 30440 b = mergePoint(b, x0, x1) 30441 v0 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, v.Type) 30442 v.reset(OpCopy) 30443 v.AddArg(v0) 30444 v0.AuxInt = i0 30445 v0.Aux = s 30446 v0.AddArg(p) 30447 v0.AddArg(idx) 30448 v0.AddArg(mem) 30449 return true 30450 } 30451 // match: (ORL x0:(MOVBloadidx1 [i0] {s} idx p mem) sh:(SHLLconst [8] x1:(MOVBloadidx1 [i1] {s} p idx mem))) 30452 // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 30453 // result: @mergePoint(b,x0,x1) (MOVWloadidx1 <v.Type> [i0] {s} p idx mem) 30454 for { 30455 _ = v.Args[1] 30456 x0 := v.Args[0] 30457 if x0.Op != OpAMD64MOVBloadidx1 { 30458 break 30459 } 30460 i0 := x0.AuxInt 30461 s := x0.Aux 30462 _ = x0.Args[2] 30463 idx := x0.Args[0] 30464 p := x0.Args[1] 30465 mem := x0.Args[2] 30466 sh := v.Args[1] 30467 if sh.Op != OpAMD64SHLLconst { 30468 break 30469 } 30470 if sh.AuxInt != 8 { 30471 break 30472 } 30473 x1 := sh.Args[0] 30474 if x1.Op != OpAMD64MOVBloadidx1 { 30475 break 30476 } 30477 i1 := x1.AuxInt 30478 if x1.Aux != s { 30479 break 30480 } 30481 _ = x1.Args[2] 30482 if p != x1.Args[0] { 30483 break 30484 } 30485 if idx != x1.Args[1] { 30486 break 30487 } 30488 if mem != x1.Args[2] { 30489 break 30490 } 30491 if !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 30492 break 30493 } 30494 b = mergePoint(b, x0, x1) 30495 v0 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, v.Type) 30496 v.reset(OpCopy) 30497 v.AddArg(v0) 30498 v0.AuxInt = i0 30499 v0.Aux = s 30500 v0.AddArg(p) 30501 v0.AddArg(idx) 30502 v0.AddArg(mem) 30503 return true 30504 } 30505 // match: (ORL x0:(MOVBloadidx1 [i0] {s} p idx mem) sh:(SHLLconst [8] x1:(MOVBloadidx1 [i1] {s} idx p mem))) 30506 // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 30507 // result: @mergePoint(b,x0,x1) (MOVWloadidx1 <v.Type> [i0] {s} p idx mem) 30508 for { 30509 _ = v.Args[1] 30510 x0 := v.Args[0] 30511 if x0.Op != OpAMD64MOVBloadidx1 { 30512 break 30513 } 30514 i0 := x0.AuxInt 30515 s := x0.Aux 30516 _ = x0.Args[2] 30517 p := x0.Args[0] 30518 idx := x0.Args[1] 30519 mem := x0.Args[2] 30520 sh := v.Args[1] 30521 if sh.Op != OpAMD64SHLLconst { 30522 break 30523 } 30524 if sh.AuxInt != 8 { 30525 break 30526 } 30527 x1 := sh.Args[0] 30528 if x1.Op != OpAMD64MOVBloadidx1 { 30529 break 30530 } 30531 i1 := x1.AuxInt 30532 if x1.Aux != s { 30533 break 30534 } 30535 _ = x1.Args[2] 30536 if idx != x1.Args[0] { 30537 break 30538 } 30539 if p != x1.Args[1] { 30540 break 30541 } 30542 if mem != x1.Args[2] { 30543 break 30544 } 30545 if !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 30546 break 30547 } 30548 b = mergePoint(b, x0, x1) 30549 v0 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, v.Type) 30550 v.reset(OpCopy) 30551 v.AddArg(v0) 30552 v0.AuxInt = i0 30553 v0.Aux = s 30554 v0.AddArg(p) 30555 v0.AddArg(idx) 30556 v0.AddArg(mem) 30557 return true 30558 } 30559 // match: (ORL x0:(MOVBloadidx1 [i0] {s} idx p mem) sh:(SHLLconst [8] x1:(MOVBloadidx1 [i1] {s} idx p mem))) 30560 // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 30561 // result: @mergePoint(b,x0,x1) (MOVWloadidx1 <v.Type> [i0] {s} p idx mem) 30562 for { 30563 _ = v.Args[1] 30564 x0 := v.Args[0] 30565 if x0.Op != OpAMD64MOVBloadidx1 { 30566 break 30567 } 30568 i0 := x0.AuxInt 30569 s := x0.Aux 30570 _ = x0.Args[2] 30571 idx := x0.Args[0] 30572 p := x0.Args[1] 30573 mem := x0.Args[2] 30574 sh := v.Args[1] 30575 if sh.Op != OpAMD64SHLLconst { 30576 break 30577 } 30578 if sh.AuxInt != 8 { 30579 break 30580 } 30581 x1 := sh.Args[0] 30582 if x1.Op != OpAMD64MOVBloadidx1 { 30583 break 30584 } 30585 i1 := x1.AuxInt 30586 if x1.Aux != s { 30587 break 30588 } 30589 _ = x1.Args[2] 30590 if idx != x1.Args[0] { 30591 break 30592 } 30593 if p != x1.Args[1] { 30594 break 30595 } 30596 if mem != x1.Args[2] { 30597 break 30598 } 30599 if !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 30600 break 30601 } 30602 b = mergePoint(b, x0, x1) 30603 v0 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, v.Type) 30604 v.reset(OpCopy) 30605 v.AddArg(v0) 30606 v0.AuxInt = i0 30607 v0.Aux = s 30608 v0.AddArg(p) 30609 v0.AddArg(idx) 30610 v0.AddArg(mem) 30611 return true 30612 } 30613 // match: (ORL sh:(SHLLconst [8] x1:(MOVBloadidx1 [i1] {s} p idx mem)) x0:(MOVBloadidx1 [i0] {s} p idx mem)) 30614 // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 30615 // result: @mergePoint(b,x0,x1) (MOVWloadidx1 <v.Type> [i0] {s} p idx mem) 30616 for { 30617 _ = v.Args[1] 30618 sh := v.Args[0] 30619 if sh.Op != OpAMD64SHLLconst { 30620 break 30621 } 30622 if sh.AuxInt != 8 { 30623 break 30624 } 30625 x1 := sh.Args[0] 30626 if x1.Op != OpAMD64MOVBloadidx1 { 30627 break 30628 } 30629 i1 := x1.AuxInt 30630 s := x1.Aux 30631 _ = x1.Args[2] 30632 p := x1.Args[0] 30633 idx := x1.Args[1] 30634 mem := x1.Args[2] 30635 x0 := v.Args[1] 30636 if x0.Op != OpAMD64MOVBloadidx1 { 30637 break 30638 } 30639 i0 := x0.AuxInt 30640 if x0.Aux != s { 30641 break 30642 } 30643 _ = x0.Args[2] 30644 if p != x0.Args[0] { 30645 break 30646 } 30647 if idx != x0.Args[1] { 30648 break 30649 } 30650 if mem != x0.Args[2] { 30651 break 30652 } 30653 if !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 30654 break 30655 } 30656 b = mergePoint(b, x0, x1) 30657 v0 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, v.Type) 30658 v.reset(OpCopy) 30659 v.AddArg(v0) 30660 v0.AuxInt = i0 30661 v0.Aux = s 30662 v0.AddArg(p) 30663 v0.AddArg(idx) 30664 v0.AddArg(mem) 30665 return true 30666 } 30667 // match: (ORL sh:(SHLLconst [8] x1:(MOVBloadidx1 [i1] {s} idx p mem)) x0:(MOVBloadidx1 [i0] {s} p idx mem)) 30668 // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 30669 // result: @mergePoint(b,x0,x1) (MOVWloadidx1 <v.Type> [i0] {s} p idx mem) 30670 for { 30671 _ = v.Args[1] 30672 sh := v.Args[0] 30673 if sh.Op != OpAMD64SHLLconst { 30674 break 30675 } 30676 if sh.AuxInt != 8 { 30677 break 30678 } 30679 x1 := sh.Args[0] 30680 if x1.Op != OpAMD64MOVBloadidx1 { 30681 break 30682 } 30683 i1 := x1.AuxInt 30684 s := x1.Aux 30685 _ = x1.Args[2] 30686 idx := x1.Args[0] 30687 p := x1.Args[1] 30688 mem := x1.Args[2] 30689 x0 := v.Args[1] 30690 if x0.Op != OpAMD64MOVBloadidx1 { 30691 break 30692 } 30693 i0 := x0.AuxInt 30694 if x0.Aux != s { 30695 break 30696 } 30697 _ = x0.Args[2] 30698 if p != x0.Args[0] { 30699 break 30700 } 30701 if idx != x0.Args[1] { 30702 break 30703 } 30704 if mem != x0.Args[2] { 30705 break 30706 } 30707 if !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 30708 break 30709 } 30710 b = mergePoint(b, x0, x1) 30711 v0 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, v.Type) 30712 v.reset(OpCopy) 30713 v.AddArg(v0) 30714 v0.AuxInt = i0 30715 v0.Aux = s 30716 v0.AddArg(p) 30717 v0.AddArg(idx) 30718 v0.AddArg(mem) 30719 return true 30720 } 30721 // match: (ORL sh:(SHLLconst [8] x1:(MOVBloadidx1 [i1] {s} p idx mem)) x0:(MOVBloadidx1 [i0] {s} idx p mem)) 30722 // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 30723 // result: @mergePoint(b,x0,x1) (MOVWloadidx1 <v.Type> [i0] {s} p idx mem) 30724 for { 30725 _ = v.Args[1] 30726 sh := v.Args[0] 30727 if sh.Op != OpAMD64SHLLconst { 30728 break 30729 } 30730 if sh.AuxInt != 8 { 30731 break 30732 } 30733 x1 := sh.Args[0] 30734 if x1.Op != OpAMD64MOVBloadidx1 { 30735 break 30736 } 30737 i1 := x1.AuxInt 30738 s := x1.Aux 30739 _ = x1.Args[2] 30740 p := x1.Args[0] 30741 idx := x1.Args[1] 30742 mem := x1.Args[2] 30743 x0 := v.Args[1] 30744 if x0.Op != OpAMD64MOVBloadidx1 { 30745 break 30746 } 30747 i0 := x0.AuxInt 30748 if x0.Aux != s { 30749 break 30750 } 30751 _ = x0.Args[2] 30752 if idx != x0.Args[0] { 30753 break 30754 } 30755 if p != x0.Args[1] { 30756 break 30757 } 30758 if mem != x0.Args[2] { 30759 break 30760 } 30761 if !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 30762 break 30763 } 30764 b = mergePoint(b, x0, x1) 30765 v0 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, v.Type) 30766 v.reset(OpCopy) 30767 v.AddArg(v0) 30768 v0.AuxInt = i0 30769 v0.Aux = s 30770 v0.AddArg(p) 30771 v0.AddArg(idx) 30772 v0.AddArg(mem) 30773 return true 30774 } 30775 // match: (ORL sh:(SHLLconst [8] x1:(MOVBloadidx1 [i1] {s} idx p mem)) x0:(MOVBloadidx1 [i0] {s} idx p mem)) 30776 // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 30777 // result: @mergePoint(b,x0,x1) (MOVWloadidx1 <v.Type> [i0] {s} p idx mem) 30778 for { 30779 _ = v.Args[1] 30780 sh := v.Args[0] 30781 if sh.Op != OpAMD64SHLLconst { 30782 break 30783 } 30784 if sh.AuxInt != 8 { 30785 break 30786 } 30787 x1 := sh.Args[0] 30788 if x1.Op != OpAMD64MOVBloadidx1 { 30789 break 30790 } 30791 i1 := x1.AuxInt 30792 s := x1.Aux 30793 _ = x1.Args[2] 30794 idx := x1.Args[0] 30795 p := x1.Args[1] 30796 mem := x1.Args[2] 30797 x0 := v.Args[1] 30798 if x0.Op != OpAMD64MOVBloadidx1 { 30799 break 30800 } 30801 i0 := x0.AuxInt 30802 if x0.Aux != s { 30803 break 30804 } 30805 _ = x0.Args[2] 30806 if idx != x0.Args[0] { 30807 break 30808 } 30809 if p != x0.Args[1] { 30810 break 30811 } 30812 if mem != x0.Args[2] { 30813 break 30814 } 30815 if !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 30816 break 30817 } 30818 b = mergePoint(b, x0, x1) 30819 v0 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, v.Type) 30820 v.reset(OpCopy) 30821 v.AddArg(v0) 30822 v0.AuxInt = i0 30823 v0.Aux = s 30824 v0.AddArg(p) 30825 v0.AddArg(idx) 30826 v0.AddArg(mem) 30827 return true 30828 } 30829 // match: (ORL x0:(MOVWloadidx1 [i0] {s} p idx mem) sh:(SHLLconst [16] x1:(MOVWloadidx1 [i1] {s} p idx mem))) 30830 // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 30831 // result: @mergePoint(b,x0,x1) (MOVLloadidx1 [i0] {s} p idx mem) 30832 for { 30833 _ = v.Args[1] 30834 x0 := v.Args[0] 30835 if x0.Op != OpAMD64MOVWloadidx1 { 30836 break 30837 } 30838 i0 := x0.AuxInt 30839 s := x0.Aux 30840 _ = x0.Args[2] 30841 p := x0.Args[0] 30842 idx := x0.Args[1] 30843 mem := x0.Args[2] 30844 sh := v.Args[1] 30845 if sh.Op != OpAMD64SHLLconst { 30846 break 30847 } 30848 if sh.AuxInt != 16 { 30849 break 30850 } 30851 x1 := sh.Args[0] 30852 if x1.Op != OpAMD64MOVWloadidx1 { 30853 break 30854 } 30855 i1 := x1.AuxInt 30856 if x1.Aux != s { 30857 break 30858 } 30859 _ = x1.Args[2] 30860 if p != x1.Args[0] { 30861 break 30862 } 30863 if idx != x1.Args[1] { 30864 break 30865 } 30866 if mem != x1.Args[2] { 30867 break 30868 } 30869 if !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 30870 break 30871 } 30872 b = mergePoint(b, x0, x1) 30873 v0 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 30874 v.reset(OpCopy) 30875 v.AddArg(v0) 30876 v0.AuxInt = i0 30877 v0.Aux = s 30878 v0.AddArg(p) 30879 v0.AddArg(idx) 30880 v0.AddArg(mem) 30881 return true 30882 } 30883 return false 30884 } 30885 func rewriteValueAMD64_OpAMD64ORL_70(v *Value) bool { 30886 b := v.Block 30887 _ = b 30888 typ := &b.Func.Config.Types 30889 _ = typ 30890 // match: (ORL x0:(MOVWloadidx1 [i0] {s} idx p mem) sh:(SHLLconst [16] x1:(MOVWloadidx1 [i1] {s} p idx mem))) 30891 // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 30892 // result: @mergePoint(b,x0,x1) (MOVLloadidx1 [i0] {s} p idx mem) 30893 for { 30894 _ = v.Args[1] 30895 x0 := v.Args[0] 30896 if x0.Op != OpAMD64MOVWloadidx1 { 30897 break 30898 } 30899 i0 := x0.AuxInt 30900 s := x0.Aux 30901 _ = x0.Args[2] 30902 idx := x0.Args[0] 30903 p := x0.Args[1] 30904 mem := x0.Args[2] 30905 sh := v.Args[1] 30906 if sh.Op != OpAMD64SHLLconst { 30907 break 30908 } 30909 if sh.AuxInt != 16 { 30910 break 30911 } 30912 x1 := sh.Args[0] 30913 if x1.Op != OpAMD64MOVWloadidx1 { 30914 break 30915 } 30916 i1 := x1.AuxInt 30917 if x1.Aux != s { 30918 break 30919 } 30920 _ = x1.Args[2] 30921 if p != x1.Args[0] { 30922 break 30923 } 30924 if idx != x1.Args[1] { 30925 break 30926 } 30927 if mem != x1.Args[2] { 30928 break 30929 } 30930 if !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 30931 break 30932 } 30933 b = mergePoint(b, x0, x1) 30934 v0 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 30935 v.reset(OpCopy) 30936 v.AddArg(v0) 30937 v0.AuxInt = i0 30938 v0.Aux = s 30939 v0.AddArg(p) 30940 v0.AddArg(idx) 30941 v0.AddArg(mem) 30942 return true 30943 } 30944 // match: (ORL x0:(MOVWloadidx1 [i0] {s} p idx mem) sh:(SHLLconst [16] x1:(MOVWloadidx1 [i1] {s} idx p mem))) 30945 // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 30946 // result: @mergePoint(b,x0,x1) (MOVLloadidx1 [i0] {s} p idx mem) 30947 for { 30948 _ = v.Args[1] 30949 x0 := v.Args[0] 30950 if x0.Op != OpAMD64MOVWloadidx1 { 30951 break 30952 } 30953 i0 := x0.AuxInt 30954 s := x0.Aux 30955 _ = x0.Args[2] 30956 p := x0.Args[0] 30957 idx := x0.Args[1] 30958 mem := x0.Args[2] 30959 sh := v.Args[1] 30960 if sh.Op != OpAMD64SHLLconst { 30961 break 30962 } 30963 if sh.AuxInt != 16 { 30964 break 30965 } 30966 x1 := sh.Args[0] 30967 if x1.Op != OpAMD64MOVWloadidx1 { 30968 break 30969 } 30970 i1 := x1.AuxInt 30971 if x1.Aux != s { 30972 break 30973 } 30974 _ = x1.Args[2] 30975 if idx != x1.Args[0] { 30976 break 30977 } 30978 if p != x1.Args[1] { 30979 break 30980 } 30981 if mem != x1.Args[2] { 30982 break 30983 } 30984 if !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 30985 break 30986 } 30987 b = mergePoint(b, x0, x1) 30988 v0 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 30989 v.reset(OpCopy) 30990 v.AddArg(v0) 30991 v0.AuxInt = i0 30992 v0.Aux = s 30993 v0.AddArg(p) 30994 v0.AddArg(idx) 30995 v0.AddArg(mem) 30996 return true 30997 } 30998 // match: (ORL x0:(MOVWloadidx1 [i0] {s} idx p mem) sh:(SHLLconst [16] x1:(MOVWloadidx1 [i1] {s} idx p mem))) 30999 // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 31000 // result: @mergePoint(b,x0,x1) (MOVLloadidx1 [i0] {s} p idx mem) 31001 for { 31002 _ = v.Args[1] 31003 x0 := v.Args[0] 31004 if x0.Op != OpAMD64MOVWloadidx1 { 31005 break 31006 } 31007 i0 := x0.AuxInt 31008 s := x0.Aux 31009 _ = x0.Args[2] 31010 idx := x0.Args[0] 31011 p := x0.Args[1] 31012 mem := x0.Args[2] 31013 sh := v.Args[1] 31014 if sh.Op != OpAMD64SHLLconst { 31015 break 31016 } 31017 if sh.AuxInt != 16 { 31018 break 31019 } 31020 x1 := sh.Args[0] 31021 if x1.Op != OpAMD64MOVWloadidx1 { 31022 break 31023 } 31024 i1 := x1.AuxInt 31025 if x1.Aux != s { 31026 break 31027 } 31028 _ = x1.Args[2] 31029 if idx != x1.Args[0] { 31030 break 31031 } 31032 if p != x1.Args[1] { 31033 break 31034 } 31035 if mem != x1.Args[2] { 31036 break 31037 } 31038 if !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 31039 break 31040 } 31041 b = mergePoint(b, x0, x1) 31042 v0 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 31043 v.reset(OpCopy) 31044 v.AddArg(v0) 31045 v0.AuxInt = i0 31046 v0.Aux = s 31047 v0.AddArg(p) 31048 v0.AddArg(idx) 31049 v0.AddArg(mem) 31050 return true 31051 } 31052 // match: (ORL sh:(SHLLconst [16] x1:(MOVWloadidx1 [i1] {s} p idx mem)) x0:(MOVWloadidx1 [i0] {s} p idx mem)) 31053 // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 31054 // result: @mergePoint(b,x0,x1) (MOVLloadidx1 [i0] {s} p idx mem) 31055 for { 31056 _ = v.Args[1] 31057 sh := v.Args[0] 31058 if sh.Op != OpAMD64SHLLconst { 31059 break 31060 } 31061 if sh.AuxInt != 16 { 31062 break 31063 } 31064 x1 := sh.Args[0] 31065 if x1.Op != OpAMD64MOVWloadidx1 { 31066 break 31067 } 31068 i1 := x1.AuxInt 31069 s := x1.Aux 31070 _ = x1.Args[2] 31071 p := x1.Args[0] 31072 idx := x1.Args[1] 31073 mem := x1.Args[2] 31074 x0 := v.Args[1] 31075 if x0.Op != OpAMD64MOVWloadidx1 { 31076 break 31077 } 31078 i0 := x0.AuxInt 31079 if x0.Aux != s { 31080 break 31081 } 31082 _ = x0.Args[2] 31083 if p != x0.Args[0] { 31084 break 31085 } 31086 if idx != x0.Args[1] { 31087 break 31088 } 31089 if mem != x0.Args[2] { 31090 break 31091 } 31092 if !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 31093 break 31094 } 31095 b = mergePoint(b, x0, x1) 31096 v0 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 31097 v.reset(OpCopy) 31098 v.AddArg(v0) 31099 v0.AuxInt = i0 31100 v0.Aux = s 31101 v0.AddArg(p) 31102 v0.AddArg(idx) 31103 v0.AddArg(mem) 31104 return true 31105 } 31106 // match: (ORL sh:(SHLLconst [16] x1:(MOVWloadidx1 [i1] {s} idx p mem)) x0:(MOVWloadidx1 [i0] {s} p idx mem)) 31107 // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 31108 // result: @mergePoint(b,x0,x1) (MOVLloadidx1 [i0] {s} p idx mem) 31109 for { 31110 _ = v.Args[1] 31111 sh := v.Args[0] 31112 if sh.Op != OpAMD64SHLLconst { 31113 break 31114 } 31115 if sh.AuxInt != 16 { 31116 break 31117 } 31118 x1 := sh.Args[0] 31119 if x1.Op != OpAMD64MOVWloadidx1 { 31120 break 31121 } 31122 i1 := x1.AuxInt 31123 s := x1.Aux 31124 _ = x1.Args[2] 31125 idx := x1.Args[0] 31126 p := x1.Args[1] 31127 mem := x1.Args[2] 31128 x0 := v.Args[1] 31129 if x0.Op != OpAMD64MOVWloadidx1 { 31130 break 31131 } 31132 i0 := x0.AuxInt 31133 if x0.Aux != s { 31134 break 31135 } 31136 _ = x0.Args[2] 31137 if p != x0.Args[0] { 31138 break 31139 } 31140 if idx != x0.Args[1] { 31141 break 31142 } 31143 if mem != x0.Args[2] { 31144 break 31145 } 31146 if !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 31147 break 31148 } 31149 b = mergePoint(b, x0, x1) 31150 v0 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 31151 v.reset(OpCopy) 31152 v.AddArg(v0) 31153 v0.AuxInt = i0 31154 v0.Aux = s 31155 v0.AddArg(p) 31156 v0.AddArg(idx) 31157 v0.AddArg(mem) 31158 return true 31159 } 31160 // match: (ORL sh:(SHLLconst [16] x1:(MOVWloadidx1 [i1] {s} p idx mem)) x0:(MOVWloadidx1 [i0] {s} idx p mem)) 31161 // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 31162 // result: @mergePoint(b,x0,x1) (MOVLloadidx1 [i0] {s} p idx mem) 31163 for { 31164 _ = v.Args[1] 31165 sh := v.Args[0] 31166 if sh.Op != OpAMD64SHLLconst { 31167 break 31168 } 31169 if sh.AuxInt != 16 { 31170 break 31171 } 31172 x1 := sh.Args[0] 31173 if x1.Op != OpAMD64MOVWloadidx1 { 31174 break 31175 } 31176 i1 := x1.AuxInt 31177 s := x1.Aux 31178 _ = x1.Args[2] 31179 p := x1.Args[0] 31180 idx := x1.Args[1] 31181 mem := x1.Args[2] 31182 x0 := v.Args[1] 31183 if x0.Op != OpAMD64MOVWloadidx1 { 31184 break 31185 } 31186 i0 := x0.AuxInt 31187 if x0.Aux != s { 31188 break 31189 } 31190 _ = x0.Args[2] 31191 if idx != x0.Args[0] { 31192 break 31193 } 31194 if p != x0.Args[1] { 31195 break 31196 } 31197 if mem != x0.Args[2] { 31198 break 31199 } 31200 if !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 31201 break 31202 } 31203 b = mergePoint(b, x0, x1) 31204 v0 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 31205 v.reset(OpCopy) 31206 v.AddArg(v0) 31207 v0.AuxInt = i0 31208 v0.Aux = s 31209 v0.AddArg(p) 31210 v0.AddArg(idx) 31211 v0.AddArg(mem) 31212 return true 31213 } 31214 // match: (ORL sh:(SHLLconst [16] x1:(MOVWloadidx1 [i1] {s} idx p mem)) x0:(MOVWloadidx1 [i0] {s} idx p mem)) 31215 // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 31216 // result: @mergePoint(b,x0,x1) (MOVLloadidx1 [i0] {s} p idx mem) 31217 for { 31218 _ = v.Args[1] 31219 sh := v.Args[0] 31220 if sh.Op != OpAMD64SHLLconst { 31221 break 31222 } 31223 if sh.AuxInt != 16 { 31224 break 31225 } 31226 x1 := sh.Args[0] 31227 if x1.Op != OpAMD64MOVWloadidx1 { 31228 break 31229 } 31230 i1 := x1.AuxInt 31231 s := x1.Aux 31232 _ = x1.Args[2] 31233 idx := x1.Args[0] 31234 p := x1.Args[1] 31235 mem := x1.Args[2] 31236 x0 := v.Args[1] 31237 if x0.Op != OpAMD64MOVWloadidx1 { 31238 break 31239 } 31240 i0 := x0.AuxInt 31241 if x0.Aux != s { 31242 break 31243 } 31244 _ = x0.Args[2] 31245 if idx != x0.Args[0] { 31246 break 31247 } 31248 if p != x0.Args[1] { 31249 break 31250 } 31251 if mem != x0.Args[2] { 31252 break 31253 } 31254 if !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 31255 break 31256 } 31257 b = mergePoint(b, x0, x1) 31258 v0 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 31259 v.reset(OpCopy) 31260 v.AddArg(v0) 31261 v0.AuxInt = i0 31262 v0.Aux = s 31263 v0.AddArg(p) 31264 v0.AddArg(idx) 31265 v0.AddArg(mem) 31266 return true 31267 } 31268 // match: (ORL s1:(SHLLconst [j1] x1:(MOVBloadidx1 [i1] {s} p idx mem)) or:(ORL s0:(SHLLconst [j0] x0:(MOVBloadidx1 [i0] {s} p idx mem)) y)) 31269 // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 31270 // result: @mergePoint(b,x0,x1) (ORL <v.Type> (SHLLconst <v.Type> [j0] (MOVWloadidx1 [i0] {s} p idx mem)) y) 31271 for { 31272 _ = v.Args[1] 31273 s1 := v.Args[0] 31274 if s1.Op != OpAMD64SHLLconst { 31275 break 31276 } 31277 j1 := s1.AuxInt 31278 x1 := s1.Args[0] 31279 if x1.Op != OpAMD64MOVBloadidx1 { 31280 break 31281 } 31282 i1 := x1.AuxInt 31283 s := x1.Aux 31284 _ = x1.Args[2] 31285 p := x1.Args[0] 31286 idx := x1.Args[1] 31287 mem := x1.Args[2] 31288 or := v.Args[1] 31289 if or.Op != OpAMD64ORL { 31290 break 31291 } 31292 _ = or.Args[1] 31293 s0 := or.Args[0] 31294 if s0.Op != OpAMD64SHLLconst { 31295 break 31296 } 31297 j0 := s0.AuxInt 31298 x0 := s0.Args[0] 31299 if x0.Op != OpAMD64MOVBloadidx1 { 31300 break 31301 } 31302 i0 := x0.AuxInt 31303 if x0.Aux != s { 31304 break 31305 } 31306 _ = x0.Args[2] 31307 if p != x0.Args[0] { 31308 break 31309 } 31310 if idx != x0.Args[1] { 31311 break 31312 } 31313 if mem != x0.Args[2] { 31314 break 31315 } 31316 y := or.Args[1] 31317 if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 31318 break 31319 } 31320 b = mergePoint(b, x0, x1) 31321 v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type) 31322 v.reset(OpCopy) 31323 v.AddArg(v0) 31324 v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type) 31325 v1.AuxInt = j0 31326 v2 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 31327 v2.AuxInt = i0 31328 v2.Aux = s 31329 v2.AddArg(p) 31330 v2.AddArg(idx) 31331 v2.AddArg(mem) 31332 v1.AddArg(v2) 31333 v0.AddArg(v1) 31334 v0.AddArg(y) 31335 return true 31336 } 31337 // match: (ORL s1:(SHLLconst [j1] x1:(MOVBloadidx1 [i1] {s} idx p mem)) or:(ORL s0:(SHLLconst [j0] x0:(MOVBloadidx1 [i0] {s} p idx mem)) y)) 31338 // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 31339 // result: @mergePoint(b,x0,x1) (ORL <v.Type> (SHLLconst <v.Type> [j0] (MOVWloadidx1 [i0] {s} p idx mem)) y) 31340 for { 31341 _ = v.Args[1] 31342 s1 := v.Args[0] 31343 if s1.Op != OpAMD64SHLLconst { 31344 break 31345 } 31346 j1 := s1.AuxInt 31347 x1 := s1.Args[0] 31348 if x1.Op != OpAMD64MOVBloadidx1 { 31349 break 31350 } 31351 i1 := x1.AuxInt 31352 s := x1.Aux 31353 _ = x1.Args[2] 31354 idx := x1.Args[0] 31355 p := x1.Args[1] 31356 mem := x1.Args[2] 31357 or := v.Args[1] 31358 if or.Op != OpAMD64ORL { 31359 break 31360 } 31361 _ = or.Args[1] 31362 s0 := or.Args[0] 31363 if s0.Op != OpAMD64SHLLconst { 31364 break 31365 } 31366 j0 := s0.AuxInt 31367 x0 := s0.Args[0] 31368 if x0.Op != OpAMD64MOVBloadidx1 { 31369 break 31370 } 31371 i0 := x0.AuxInt 31372 if x0.Aux != s { 31373 break 31374 } 31375 _ = x0.Args[2] 31376 if p != x0.Args[0] { 31377 break 31378 } 31379 if idx != x0.Args[1] { 31380 break 31381 } 31382 if mem != x0.Args[2] { 31383 break 31384 } 31385 y := or.Args[1] 31386 if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 31387 break 31388 } 31389 b = mergePoint(b, x0, x1) 31390 v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type) 31391 v.reset(OpCopy) 31392 v.AddArg(v0) 31393 v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type) 31394 v1.AuxInt = j0 31395 v2 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 31396 v2.AuxInt = i0 31397 v2.Aux = s 31398 v2.AddArg(p) 31399 v2.AddArg(idx) 31400 v2.AddArg(mem) 31401 v1.AddArg(v2) 31402 v0.AddArg(v1) 31403 v0.AddArg(y) 31404 return true 31405 } 31406 // match: (ORL s1:(SHLLconst [j1] x1:(MOVBloadidx1 [i1] {s} p idx mem)) or:(ORL s0:(SHLLconst [j0] x0:(MOVBloadidx1 [i0] {s} idx p mem)) y)) 31407 // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 31408 // result: @mergePoint(b,x0,x1) (ORL <v.Type> (SHLLconst <v.Type> [j0] (MOVWloadidx1 [i0] {s} p idx mem)) y) 31409 for { 31410 _ = v.Args[1] 31411 s1 := v.Args[0] 31412 if s1.Op != OpAMD64SHLLconst { 31413 break 31414 } 31415 j1 := s1.AuxInt 31416 x1 := s1.Args[0] 31417 if x1.Op != OpAMD64MOVBloadidx1 { 31418 break 31419 } 31420 i1 := x1.AuxInt 31421 s := x1.Aux 31422 _ = x1.Args[2] 31423 p := x1.Args[0] 31424 idx := x1.Args[1] 31425 mem := x1.Args[2] 31426 or := v.Args[1] 31427 if or.Op != OpAMD64ORL { 31428 break 31429 } 31430 _ = or.Args[1] 31431 s0 := or.Args[0] 31432 if s0.Op != OpAMD64SHLLconst { 31433 break 31434 } 31435 j0 := s0.AuxInt 31436 x0 := s0.Args[0] 31437 if x0.Op != OpAMD64MOVBloadidx1 { 31438 break 31439 } 31440 i0 := x0.AuxInt 31441 if x0.Aux != s { 31442 break 31443 } 31444 _ = x0.Args[2] 31445 if idx != x0.Args[0] { 31446 break 31447 } 31448 if p != x0.Args[1] { 31449 break 31450 } 31451 if mem != x0.Args[2] { 31452 break 31453 } 31454 y := or.Args[1] 31455 if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 31456 break 31457 } 31458 b = mergePoint(b, x0, x1) 31459 v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type) 31460 v.reset(OpCopy) 31461 v.AddArg(v0) 31462 v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type) 31463 v1.AuxInt = j0 31464 v2 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 31465 v2.AuxInt = i0 31466 v2.Aux = s 31467 v2.AddArg(p) 31468 v2.AddArg(idx) 31469 v2.AddArg(mem) 31470 v1.AddArg(v2) 31471 v0.AddArg(v1) 31472 v0.AddArg(y) 31473 return true 31474 } 31475 return false 31476 } 31477 func rewriteValueAMD64_OpAMD64ORL_80(v *Value) bool { 31478 b := v.Block 31479 _ = b 31480 typ := &b.Func.Config.Types 31481 _ = typ 31482 // match: (ORL s1:(SHLLconst [j1] x1:(MOVBloadidx1 [i1] {s} idx p mem)) or:(ORL s0:(SHLLconst [j0] x0:(MOVBloadidx1 [i0] {s} idx p mem)) y)) 31483 // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 31484 // result: @mergePoint(b,x0,x1) (ORL <v.Type> (SHLLconst <v.Type> [j0] (MOVWloadidx1 [i0] {s} p idx mem)) y) 31485 for { 31486 _ = v.Args[1] 31487 s1 := v.Args[0] 31488 if s1.Op != OpAMD64SHLLconst { 31489 break 31490 } 31491 j1 := s1.AuxInt 31492 x1 := s1.Args[0] 31493 if x1.Op != OpAMD64MOVBloadidx1 { 31494 break 31495 } 31496 i1 := x1.AuxInt 31497 s := x1.Aux 31498 _ = x1.Args[2] 31499 idx := x1.Args[0] 31500 p := x1.Args[1] 31501 mem := x1.Args[2] 31502 or := v.Args[1] 31503 if or.Op != OpAMD64ORL { 31504 break 31505 } 31506 _ = or.Args[1] 31507 s0 := or.Args[0] 31508 if s0.Op != OpAMD64SHLLconst { 31509 break 31510 } 31511 j0 := s0.AuxInt 31512 x0 := s0.Args[0] 31513 if x0.Op != OpAMD64MOVBloadidx1 { 31514 break 31515 } 31516 i0 := x0.AuxInt 31517 if x0.Aux != s { 31518 break 31519 } 31520 _ = x0.Args[2] 31521 if idx != x0.Args[0] { 31522 break 31523 } 31524 if p != x0.Args[1] { 31525 break 31526 } 31527 if mem != x0.Args[2] { 31528 break 31529 } 31530 y := or.Args[1] 31531 if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 31532 break 31533 } 31534 b = mergePoint(b, x0, x1) 31535 v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type) 31536 v.reset(OpCopy) 31537 v.AddArg(v0) 31538 v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type) 31539 v1.AuxInt = j0 31540 v2 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 31541 v2.AuxInt = i0 31542 v2.Aux = s 31543 v2.AddArg(p) 31544 v2.AddArg(idx) 31545 v2.AddArg(mem) 31546 v1.AddArg(v2) 31547 v0.AddArg(v1) 31548 v0.AddArg(y) 31549 return true 31550 } 31551 // match: (ORL s1:(SHLLconst [j1] x1:(MOVBloadidx1 [i1] {s} p idx mem)) or:(ORL y s0:(SHLLconst [j0] x0:(MOVBloadidx1 [i0] {s} p idx mem)))) 31552 // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 31553 // result: @mergePoint(b,x0,x1) (ORL <v.Type> (SHLLconst <v.Type> [j0] (MOVWloadidx1 [i0] {s} p idx mem)) y) 31554 for { 31555 _ = v.Args[1] 31556 s1 := v.Args[0] 31557 if s1.Op != OpAMD64SHLLconst { 31558 break 31559 } 31560 j1 := s1.AuxInt 31561 x1 := s1.Args[0] 31562 if x1.Op != OpAMD64MOVBloadidx1 { 31563 break 31564 } 31565 i1 := x1.AuxInt 31566 s := x1.Aux 31567 _ = x1.Args[2] 31568 p := x1.Args[0] 31569 idx := x1.Args[1] 31570 mem := x1.Args[2] 31571 or := v.Args[1] 31572 if or.Op != OpAMD64ORL { 31573 break 31574 } 31575 _ = or.Args[1] 31576 y := or.Args[0] 31577 s0 := or.Args[1] 31578 if s0.Op != OpAMD64SHLLconst { 31579 break 31580 } 31581 j0 := s0.AuxInt 31582 x0 := s0.Args[0] 31583 if x0.Op != OpAMD64MOVBloadidx1 { 31584 break 31585 } 31586 i0 := x0.AuxInt 31587 if x0.Aux != s { 31588 break 31589 } 31590 _ = x0.Args[2] 31591 if p != x0.Args[0] { 31592 break 31593 } 31594 if idx != x0.Args[1] { 31595 break 31596 } 31597 if mem != x0.Args[2] { 31598 break 31599 } 31600 if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 31601 break 31602 } 31603 b = mergePoint(b, x0, x1) 31604 v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type) 31605 v.reset(OpCopy) 31606 v.AddArg(v0) 31607 v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type) 31608 v1.AuxInt = j0 31609 v2 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 31610 v2.AuxInt = i0 31611 v2.Aux = s 31612 v2.AddArg(p) 31613 v2.AddArg(idx) 31614 v2.AddArg(mem) 31615 v1.AddArg(v2) 31616 v0.AddArg(v1) 31617 v0.AddArg(y) 31618 return true 31619 } 31620 // match: (ORL s1:(SHLLconst [j1] x1:(MOVBloadidx1 [i1] {s} idx p mem)) or:(ORL y s0:(SHLLconst [j0] x0:(MOVBloadidx1 [i0] {s} p idx mem)))) 31621 // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 31622 // result: @mergePoint(b,x0,x1) (ORL <v.Type> (SHLLconst <v.Type> [j0] (MOVWloadidx1 [i0] {s} p idx mem)) y) 31623 for { 31624 _ = v.Args[1] 31625 s1 := v.Args[0] 31626 if s1.Op != OpAMD64SHLLconst { 31627 break 31628 } 31629 j1 := s1.AuxInt 31630 x1 := s1.Args[0] 31631 if x1.Op != OpAMD64MOVBloadidx1 { 31632 break 31633 } 31634 i1 := x1.AuxInt 31635 s := x1.Aux 31636 _ = x1.Args[2] 31637 idx := x1.Args[0] 31638 p := x1.Args[1] 31639 mem := x1.Args[2] 31640 or := v.Args[1] 31641 if or.Op != OpAMD64ORL { 31642 break 31643 } 31644 _ = or.Args[1] 31645 y := or.Args[0] 31646 s0 := or.Args[1] 31647 if s0.Op != OpAMD64SHLLconst { 31648 break 31649 } 31650 j0 := s0.AuxInt 31651 x0 := s0.Args[0] 31652 if x0.Op != OpAMD64MOVBloadidx1 { 31653 break 31654 } 31655 i0 := x0.AuxInt 31656 if x0.Aux != s { 31657 break 31658 } 31659 _ = x0.Args[2] 31660 if p != x0.Args[0] { 31661 break 31662 } 31663 if idx != x0.Args[1] { 31664 break 31665 } 31666 if mem != x0.Args[2] { 31667 break 31668 } 31669 if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 31670 break 31671 } 31672 b = mergePoint(b, x0, x1) 31673 v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type) 31674 v.reset(OpCopy) 31675 v.AddArg(v0) 31676 v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type) 31677 v1.AuxInt = j0 31678 v2 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 31679 v2.AuxInt = i0 31680 v2.Aux = s 31681 v2.AddArg(p) 31682 v2.AddArg(idx) 31683 v2.AddArg(mem) 31684 v1.AddArg(v2) 31685 v0.AddArg(v1) 31686 v0.AddArg(y) 31687 return true 31688 } 31689 // match: (ORL s1:(SHLLconst [j1] x1:(MOVBloadidx1 [i1] {s} p idx mem)) or:(ORL y s0:(SHLLconst [j0] x0:(MOVBloadidx1 [i0] {s} idx p mem)))) 31690 // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 31691 // result: @mergePoint(b,x0,x1) (ORL <v.Type> (SHLLconst <v.Type> [j0] (MOVWloadidx1 [i0] {s} p idx mem)) y) 31692 for { 31693 _ = v.Args[1] 31694 s1 := v.Args[0] 31695 if s1.Op != OpAMD64SHLLconst { 31696 break 31697 } 31698 j1 := s1.AuxInt 31699 x1 := s1.Args[0] 31700 if x1.Op != OpAMD64MOVBloadidx1 { 31701 break 31702 } 31703 i1 := x1.AuxInt 31704 s := x1.Aux 31705 _ = x1.Args[2] 31706 p := x1.Args[0] 31707 idx := x1.Args[1] 31708 mem := x1.Args[2] 31709 or := v.Args[1] 31710 if or.Op != OpAMD64ORL { 31711 break 31712 } 31713 _ = or.Args[1] 31714 y := or.Args[0] 31715 s0 := or.Args[1] 31716 if s0.Op != OpAMD64SHLLconst { 31717 break 31718 } 31719 j0 := s0.AuxInt 31720 x0 := s0.Args[0] 31721 if x0.Op != OpAMD64MOVBloadidx1 { 31722 break 31723 } 31724 i0 := x0.AuxInt 31725 if x0.Aux != s { 31726 break 31727 } 31728 _ = x0.Args[2] 31729 if idx != x0.Args[0] { 31730 break 31731 } 31732 if p != x0.Args[1] { 31733 break 31734 } 31735 if mem != x0.Args[2] { 31736 break 31737 } 31738 if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 31739 break 31740 } 31741 b = mergePoint(b, x0, x1) 31742 v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type) 31743 v.reset(OpCopy) 31744 v.AddArg(v0) 31745 v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type) 31746 v1.AuxInt = j0 31747 v2 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 31748 v2.AuxInt = i0 31749 v2.Aux = s 31750 v2.AddArg(p) 31751 v2.AddArg(idx) 31752 v2.AddArg(mem) 31753 v1.AddArg(v2) 31754 v0.AddArg(v1) 31755 v0.AddArg(y) 31756 return true 31757 } 31758 // match: (ORL s1:(SHLLconst [j1] x1:(MOVBloadidx1 [i1] {s} idx p mem)) or:(ORL y s0:(SHLLconst [j0] x0:(MOVBloadidx1 [i0] {s} idx p mem)))) 31759 // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 31760 // result: @mergePoint(b,x0,x1) (ORL <v.Type> (SHLLconst <v.Type> [j0] (MOVWloadidx1 [i0] {s} p idx mem)) y) 31761 for { 31762 _ = v.Args[1] 31763 s1 := v.Args[0] 31764 if s1.Op != OpAMD64SHLLconst { 31765 break 31766 } 31767 j1 := s1.AuxInt 31768 x1 := s1.Args[0] 31769 if x1.Op != OpAMD64MOVBloadidx1 { 31770 break 31771 } 31772 i1 := x1.AuxInt 31773 s := x1.Aux 31774 _ = x1.Args[2] 31775 idx := x1.Args[0] 31776 p := x1.Args[1] 31777 mem := x1.Args[2] 31778 or := v.Args[1] 31779 if or.Op != OpAMD64ORL { 31780 break 31781 } 31782 _ = or.Args[1] 31783 y := or.Args[0] 31784 s0 := or.Args[1] 31785 if s0.Op != OpAMD64SHLLconst { 31786 break 31787 } 31788 j0 := s0.AuxInt 31789 x0 := s0.Args[0] 31790 if x0.Op != OpAMD64MOVBloadidx1 { 31791 break 31792 } 31793 i0 := x0.AuxInt 31794 if x0.Aux != s { 31795 break 31796 } 31797 _ = x0.Args[2] 31798 if idx != x0.Args[0] { 31799 break 31800 } 31801 if p != x0.Args[1] { 31802 break 31803 } 31804 if mem != x0.Args[2] { 31805 break 31806 } 31807 if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 31808 break 31809 } 31810 b = mergePoint(b, x0, x1) 31811 v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type) 31812 v.reset(OpCopy) 31813 v.AddArg(v0) 31814 v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type) 31815 v1.AuxInt = j0 31816 v2 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 31817 v2.AuxInt = i0 31818 v2.Aux = s 31819 v2.AddArg(p) 31820 v2.AddArg(idx) 31821 v2.AddArg(mem) 31822 v1.AddArg(v2) 31823 v0.AddArg(v1) 31824 v0.AddArg(y) 31825 return true 31826 } 31827 // match: (ORL or:(ORL s0:(SHLLconst [j0] x0:(MOVBloadidx1 [i0] {s} p idx mem)) y) s1:(SHLLconst [j1] x1:(MOVBloadidx1 [i1] {s} p idx mem))) 31828 // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 31829 // result: @mergePoint(b,x0,x1) (ORL <v.Type> (SHLLconst <v.Type> [j0] (MOVWloadidx1 [i0] {s} p idx mem)) y) 31830 for { 31831 _ = v.Args[1] 31832 or := v.Args[0] 31833 if or.Op != OpAMD64ORL { 31834 break 31835 } 31836 _ = or.Args[1] 31837 s0 := or.Args[0] 31838 if s0.Op != OpAMD64SHLLconst { 31839 break 31840 } 31841 j0 := s0.AuxInt 31842 x0 := s0.Args[0] 31843 if x0.Op != OpAMD64MOVBloadidx1 { 31844 break 31845 } 31846 i0 := x0.AuxInt 31847 s := x0.Aux 31848 _ = x0.Args[2] 31849 p := x0.Args[0] 31850 idx := x0.Args[1] 31851 mem := x0.Args[2] 31852 y := or.Args[1] 31853 s1 := v.Args[1] 31854 if s1.Op != OpAMD64SHLLconst { 31855 break 31856 } 31857 j1 := s1.AuxInt 31858 x1 := s1.Args[0] 31859 if x1.Op != OpAMD64MOVBloadidx1 { 31860 break 31861 } 31862 i1 := x1.AuxInt 31863 if x1.Aux != s { 31864 break 31865 } 31866 _ = x1.Args[2] 31867 if p != x1.Args[0] { 31868 break 31869 } 31870 if idx != x1.Args[1] { 31871 break 31872 } 31873 if mem != x1.Args[2] { 31874 break 31875 } 31876 if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 31877 break 31878 } 31879 b = mergePoint(b, x0, x1) 31880 v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type) 31881 v.reset(OpCopy) 31882 v.AddArg(v0) 31883 v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type) 31884 v1.AuxInt = j0 31885 v2 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 31886 v2.AuxInt = i0 31887 v2.Aux = s 31888 v2.AddArg(p) 31889 v2.AddArg(idx) 31890 v2.AddArg(mem) 31891 v1.AddArg(v2) 31892 v0.AddArg(v1) 31893 v0.AddArg(y) 31894 return true 31895 } 31896 // match: (ORL or:(ORL s0:(SHLLconst [j0] x0:(MOVBloadidx1 [i0] {s} idx p mem)) y) s1:(SHLLconst [j1] x1:(MOVBloadidx1 [i1] {s} p idx mem))) 31897 // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 31898 // result: @mergePoint(b,x0,x1) (ORL <v.Type> (SHLLconst <v.Type> [j0] (MOVWloadidx1 [i0] {s} p idx mem)) y) 31899 for { 31900 _ = v.Args[1] 31901 or := v.Args[0] 31902 if or.Op != OpAMD64ORL { 31903 break 31904 } 31905 _ = or.Args[1] 31906 s0 := or.Args[0] 31907 if s0.Op != OpAMD64SHLLconst { 31908 break 31909 } 31910 j0 := s0.AuxInt 31911 x0 := s0.Args[0] 31912 if x0.Op != OpAMD64MOVBloadidx1 { 31913 break 31914 } 31915 i0 := x0.AuxInt 31916 s := x0.Aux 31917 _ = x0.Args[2] 31918 idx := x0.Args[0] 31919 p := x0.Args[1] 31920 mem := x0.Args[2] 31921 y := or.Args[1] 31922 s1 := v.Args[1] 31923 if s1.Op != OpAMD64SHLLconst { 31924 break 31925 } 31926 j1 := s1.AuxInt 31927 x1 := s1.Args[0] 31928 if x1.Op != OpAMD64MOVBloadidx1 { 31929 break 31930 } 31931 i1 := x1.AuxInt 31932 if x1.Aux != s { 31933 break 31934 } 31935 _ = x1.Args[2] 31936 if p != x1.Args[0] { 31937 break 31938 } 31939 if idx != x1.Args[1] { 31940 break 31941 } 31942 if mem != x1.Args[2] { 31943 break 31944 } 31945 if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 31946 break 31947 } 31948 b = mergePoint(b, x0, x1) 31949 v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type) 31950 v.reset(OpCopy) 31951 v.AddArg(v0) 31952 v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type) 31953 v1.AuxInt = j0 31954 v2 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 31955 v2.AuxInt = i0 31956 v2.Aux = s 31957 v2.AddArg(p) 31958 v2.AddArg(idx) 31959 v2.AddArg(mem) 31960 v1.AddArg(v2) 31961 v0.AddArg(v1) 31962 v0.AddArg(y) 31963 return true 31964 } 31965 // match: (ORL or:(ORL y s0:(SHLLconst [j0] x0:(MOVBloadidx1 [i0] {s} p idx mem))) s1:(SHLLconst [j1] x1:(MOVBloadidx1 [i1] {s} p idx mem))) 31966 // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 31967 // result: @mergePoint(b,x0,x1) (ORL <v.Type> (SHLLconst <v.Type> [j0] (MOVWloadidx1 [i0] {s} p idx mem)) y) 31968 for { 31969 _ = v.Args[1] 31970 or := v.Args[0] 31971 if or.Op != OpAMD64ORL { 31972 break 31973 } 31974 _ = or.Args[1] 31975 y := or.Args[0] 31976 s0 := or.Args[1] 31977 if s0.Op != OpAMD64SHLLconst { 31978 break 31979 } 31980 j0 := s0.AuxInt 31981 x0 := s0.Args[0] 31982 if x0.Op != OpAMD64MOVBloadidx1 { 31983 break 31984 } 31985 i0 := x0.AuxInt 31986 s := x0.Aux 31987 _ = x0.Args[2] 31988 p := x0.Args[0] 31989 idx := x0.Args[1] 31990 mem := x0.Args[2] 31991 s1 := v.Args[1] 31992 if s1.Op != OpAMD64SHLLconst { 31993 break 31994 } 31995 j1 := s1.AuxInt 31996 x1 := s1.Args[0] 31997 if x1.Op != OpAMD64MOVBloadidx1 { 31998 break 31999 } 32000 i1 := x1.AuxInt 32001 if x1.Aux != s { 32002 break 32003 } 32004 _ = x1.Args[2] 32005 if p != x1.Args[0] { 32006 break 32007 } 32008 if idx != x1.Args[1] { 32009 break 32010 } 32011 if mem != x1.Args[2] { 32012 break 32013 } 32014 if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 32015 break 32016 } 32017 b = mergePoint(b, x0, x1) 32018 v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type) 32019 v.reset(OpCopy) 32020 v.AddArg(v0) 32021 v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type) 32022 v1.AuxInt = j0 32023 v2 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 32024 v2.AuxInt = i0 32025 v2.Aux = s 32026 v2.AddArg(p) 32027 v2.AddArg(idx) 32028 v2.AddArg(mem) 32029 v1.AddArg(v2) 32030 v0.AddArg(v1) 32031 v0.AddArg(y) 32032 return true 32033 } 32034 // match: (ORL or:(ORL y s0:(SHLLconst [j0] x0:(MOVBloadidx1 [i0] {s} idx p mem))) s1:(SHLLconst [j1] x1:(MOVBloadidx1 [i1] {s} p idx mem))) 32035 // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 32036 // result: @mergePoint(b,x0,x1) (ORL <v.Type> (SHLLconst <v.Type> [j0] (MOVWloadidx1 [i0] {s} p idx mem)) y) 32037 for { 32038 _ = v.Args[1] 32039 or := v.Args[0] 32040 if or.Op != OpAMD64ORL { 32041 break 32042 } 32043 _ = or.Args[1] 32044 y := or.Args[0] 32045 s0 := or.Args[1] 32046 if s0.Op != OpAMD64SHLLconst { 32047 break 32048 } 32049 j0 := s0.AuxInt 32050 x0 := s0.Args[0] 32051 if x0.Op != OpAMD64MOVBloadidx1 { 32052 break 32053 } 32054 i0 := x0.AuxInt 32055 s := x0.Aux 32056 _ = x0.Args[2] 32057 idx := x0.Args[0] 32058 p := x0.Args[1] 32059 mem := x0.Args[2] 32060 s1 := v.Args[1] 32061 if s1.Op != OpAMD64SHLLconst { 32062 break 32063 } 32064 j1 := s1.AuxInt 32065 x1 := s1.Args[0] 32066 if x1.Op != OpAMD64MOVBloadidx1 { 32067 break 32068 } 32069 i1 := x1.AuxInt 32070 if x1.Aux != s { 32071 break 32072 } 32073 _ = x1.Args[2] 32074 if p != x1.Args[0] { 32075 break 32076 } 32077 if idx != x1.Args[1] { 32078 break 32079 } 32080 if mem != x1.Args[2] { 32081 break 32082 } 32083 if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 32084 break 32085 } 32086 b = mergePoint(b, x0, x1) 32087 v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type) 32088 v.reset(OpCopy) 32089 v.AddArg(v0) 32090 v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type) 32091 v1.AuxInt = j0 32092 v2 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 32093 v2.AuxInt = i0 32094 v2.Aux = s 32095 v2.AddArg(p) 32096 v2.AddArg(idx) 32097 v2.AddArg(mem) 32098 v1.AddArg(v2) 32099 v0.AddArg(v1) 32100 v0.AddArg(y) 32101 return true 32102 } 32103 // match: (ORL or:(ORL s0:(SHLLconst [j0] x0:(MOVBloadidx1 [i0] {s} p idx mem)) y) s1:(SHLLconst [j1] x1:(MOVBloadidx1 [i1] {s} idx p mem))) 32104 // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 32105 // result: @mergePoint(b,x0,x1) (ORL <v.Type> (SHLLconst <v.Type> [j0] (MOVWloadidx1 [i0] {s} p idx mem)) y) 32106 for { 32107 _ = v.Args[1] 32108 or := v.Args[0] 32109 if or.Op != OpAMD64ORL { 32110 break 32111 } 32112 _ = or.Args[1] 32113 s0 := or.Args[0] 32114 if s0.Op != OpAMD64SHLLconst { 32115 break 32116 } 32117 j0 := s0.AuxInt 32118 x0 := s0.Args[0] 32119 if x0.Op != OpAMD64MOVBloadidx1 { 32120 break 32121 } 32122 i0 := x0.AuxInt 32123 s := x0.Aux 32124 _ = x0.Args[2] 32125 p := x0.Args[0] 32126 idx := x0.Args[1] 32127 mem := x0.Args[2] 32128 y := or.Args[1] 32129 s1 := v.Args[1] 32130 if s1.Op != OpAMD64SHLLconst { 32131 break 32132 } 32133 j1 := s1.AuxInt 32134 x1 := s1.Args[0] 32135 if x1.Op != OpAMD64MOVBloadidx1 { 32136 break 32137 } 32138 i1 := x1.AuxInt 32139 if x1.Aux != s { 32140 break 32141 } 32142 _ = x1.Args[2] 32143 if idx != x1.Args[0] { 32144 break 32145 } 32146 if p != x1.Args[1] { 32147 break 32148 } 32149 if mem != x1.Args[2] { 32150 break 32151 } 32152 if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 32153 break 32154 } 32155 b = mergePoint(b, x0, x1) 32156 v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type) 32157 v.reset(OpCopy) 32158 v.AddArg(v0) 32159 v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type) 32160 v1.AuxInt = j0 32161 v2 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 32162 v2.AuxInt = i0 32163 v2.Aux = s 32164 v2.AddArg(p) 32165 v2.AddArg(idx) 32166 v2.AddArg(mem) 32167 v1.AddArg(v2) 32168 v0.AddArg(v1) 32169 v0.AddArg(y) 32170 return true 32171 } 32172 return false 32173 } 32174 func rewriteValueAMD64_OpAMD64ORL_90(v *Value) bool { 32175 b := v.Block 32176 _ = b 32177 typ := &b.Func.Config.Types 32178 _ = typ 32179 // match: (ORL or:(ORL s0:(SHLLconst [j0] x0:(MOVBloadidx1 [i0] {s} idx p mem)) y) s1:(SHLLconst [j1] x1:(MOVBloadidx1 [i1] {s} idx p mem))) 32180 // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 32181 // result: @mergePoint(b,x0,x1) (ORL <v.Type> (SHLLconst <v.Type> [j0] (MOVWloadidx1 [i0] {s} p idx mem)) y) 32182 for { 32183 _ = v.Args[1] 32184 or := v.Args[0] 32185 if or.Op != OpAMD64ORL { 32186 break 32187 } 32188 _ = or.Args[1] 32189 s0 := or.Args[0] 32190 if s0.Op != OpAMD64SHLLconst { 32191 break 32192 } 32193 j0 := s0.AuxInt 32194 x0 := s0.Args[0] 32195 if x0.Op != OpAMD64MOVBloadidx1 { 32196 break 32197 } 32198 i0 := x0.AuxInt 32199 s := x0.Aux 32200 _ = x0.Args[2] 32201 idx := x0.Args[0] 32202 p := x0.Args[1] 32203 mem := x0.Args[2] 32204 y := or.Args[1] 32205 s1 := v.Args[1] 32206 if s1.Op != OpAMD64SHLLconst { 32207 break 32208 } 32209 j1 := s1.AuxInt 32210 x1 := s1.Args[0] 32211 if x1.Op != OpAMD64MOVBloadidx1 { 32212 break 32213 } 32214 i1 := x1.AuxInt 32215 if x1.Aux != s { 32216 break 32217 } 32218 _ = x1.Args[2] 32219 if idx != x1.Args[0] { 32220 break 32221 } 32222 if p != x1.Args[1] { 32223 break 32224 } 32225 if mem != x1.Args[2] { 32226 break 32227 } 32228 if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 32229 break 32230 } 32231 b = mergePoint(b, x0, x1) 32232 v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type) 32233 v.reset(OpCopy) 32234 v.AddArg(v0) 32235 v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type) 32236 v1.AuxInt = j0 32237 v2 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 32238 v2.AuxInt = i0 32239 v2.Aux = s 32240 v2.AddArg(p) 32241 v2.AddArg(idx) 32242 v2.AddArg(mem) 32243 v1.AddArg(v2) 32244 v0.AddArg(v1) 32245 v0.AddArg(y) 32246 return true 32247 } 32248 // match: (ORL or:(ORL y s0:(SHLLconst [j0] x0:(MOVBloadidx1 [i0] {s} p idx mem))) s1:(SHLLconst [j1] x1:(MOVBloadidx1 [i1] {s} idx p mem))) 32249 // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 32250 // result: @mergePoint(b,x0,x1) (ORL <v.Type> (SHLLconst <v.Type> [j0] (MOVWloadidx1 [i0] {s} p idx mem)) y) 32251 for { 32252 _ = v.Args[1] 32253 or := v.Args[0] 32254 if or.Op != OpAMD64ORL { 32255 break 32256 } 32257 _ = or.Args[1] 32258 y := or.Args[0] 32259 s0 := or.Args[1] 32260 if s0.Op != OpAMD64SHLLconst { 32261 break 32262 } 32263 j0 := s0.AuxInt 32264 x0 := s0.Args[0] 32265 if x0.Op != OpAMD64MOVBloadidx1 { 32266 break 32267 } 32268 i0 := x0.AuxInt 32269 s := x0.Aux 32270 _ = x0.Args[2] 32271 p := x0.Args[0] 32272 idx := x0.Args[1] 32273 mem := x0.Args[2] 32274 s1 := v.Args[1] 32275 if s1.Op != OpAMD64SHLLconst { 32276 break 32277 } 32278 j1 := s1.AuxInt 32279 x1 := s1.Args[0] 32280 if x1.Op != OpAMD64MOVBloadidx1 { 32281 break 32282 } 32283 i1 := x1.AuxInt 32284 if x1.Aux != s { 32285 break 32286 } 32287 _ = x1.Args[2] 32288 if idx != x1.Args[0] { 32289 break 32290 } 32291 if p != x1.Args[1] { 32292 break 32293 } 32294 if mem != x1.Args[2] { 32295 break 32296 } 32297 if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 32298 break 32299 } 32300 b = mergePoint(b, x0, x1) 32301 v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type) 32302 v.reset(OpCopy) 32303 v.AddArg(v0) 32304 v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type) 32305 v1.AuxInt = j0 32306 v2 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 32307 v2.AuxInt = i0 32308 v2.Aux = s 32309 v2.AddArg(p) 32310 v2.AddArg(idx) 32311 v2.AddArg(mem) 32312 v1.AddArg(v2) 32313 v0.AddArg(v1) 32314 v0.AddArg(y) 32315 return true 32316 } 32317 // match: (ORL or:(ORL y s0:(SHLLconst [j0] x0:(MOVBloadidx1 [i0] {s} idx p mem))) s1:(SHLLconst [j1] x1:(MOVBloadidx1 [i1] {s} idx p mem))) 32318 // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 32319 // result: @mergePoint(b,x0,x1) (ORL <v.Type> (SHLLconst <v.Type> [j0] (MOVWloadidx1 [i0] {s} p idx mem)) y) 32320 for { 32321 _ = v.Args[1] 32322 or := v.Args[0] 32323 if or.Op != OpAMD64ORL { 32324 break 32325 } 32326 _ = or.Args[1] 32327 y := or.Args[0] 32328 s0 := or.Args[1] 32329 if s0.Op != OpAMD64SHLLconst { 32330 break 32331 } 32332 j0 := s0.AuxInt 32333 x0 := s0.Args[0] 32334 if x0.Op != OpAMD64MOVBloadidx1 { 32335 break 32336 } 32337 i0 := x0.AuxInt 32338 s := x0.Aux 32339 _ = x0.Args[2] 32340 idx := x0.Args[0] 32341 p := x0.Args[1] 32342 mem := x0.Args[2] 32343 s1 := v.Args[1] 32344 if s1.Op != OpAMD64SHLLconst { 32345 break 32346 } 32347 j1 := s1.AuxInt 32348 x1 := s1.Args[0] 32349 if x1.Op != OpAMD64MOVBloadidx1 { 32350 break 32351 } 32352 i1 := x1.AuxInt 32353 if x1.Aux != s { 32354 break 32355 } 32356 _ = x1.Args[2] 32357 if idx != x1.Args[0] { 32358 break 32359 } 32360 if p != x1.Args[1] { 32361 break 32362 } 32363 if mem != x1.Args[2] { 32364 break 32365 } 32366 if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 32367 break 32368 } 32369 b = mergePoint(b, x0, x1) 32370 v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type) 32371 v.reset(OpCopy) 32372 v.AddArg(v0) 32373 v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type) 32374 v1.AuxInt = j0 32375 v2 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 32376 v2.AuxInt = i0 32377 v2.Aux = s 32378 v2.AddArg(p) 32379 v2.AddArg(idx) 32380 v2.AddArg(mem) 32381 v1.AddArg(v2) 32382 v0.AddArg(v1) 32383 v0.AddArg(y) 32384 return true 32385 } 32386 // match: (ORL x1:(MOVBload [i1] {s} p mem) sh:(SHLLconst [8] x0:(MOVBload [i0] {s} p mem))) 32387 // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 32388 // result: @mergePoint(b,x0,x1) (ROLWconst <v.Type> [8] (MOVWload [i0] {s} p mem)) 32389 for { 32390 _ = v.Args[1] 32391 x1 := v.Args[0] 32392 if x1.Op != OpAMD64MOVBload { 32393 break 32394 } 32395 i1 := x1.AuxInt 32396 s := x1.Aux 32397 _ = x1.Args[1] 32398 p := x1.Args[0] 32399 mem := x1.Args[1] 32400 sh := v.Args[1] 32401 if sh.Op != OpAMD64SHLLconst { 32402 break 32403 } 32404 if sh.AuxInt != 8 { 32405 break 32406 } 32407 x0 := sh.Args[0] 32408 if x0.Op != OpAMD64MOVBload { 32409 break 32410 } 32411 i0 := x0.AuxInt 32412 if x0.Aux != s { 32413 break 32414 } 32415 _ = x0.Args[1] 32416 if p != x0.Args[0] { 32417 break 32418 } 32419 if mem != x0.Args[1] { 32420 break 32421 } 32422 if !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 32423 break 32424 } 32425 b = mergePoint(b, x0, x1) 32426 v0 := b.NewValue0(x0.Pos, OpAMD64ROLWconst, v.Type) 32427 v.reset(OpCopy) 32428 v.AddArg(v0) 32429 v0.AuxInt = 8 32430 v1 := b.NewValue0(x0.Pos, OpAMD64MOVWload, typ.UInt16) 32431 v1.AuxInt = i0 32432 v1.Aux = s 32433 v1.AddArg(p) 32434 v1.AddArg(mem) 32435 v0.AddArg(v1) 32436 return true 32437 } 32438 // match: (ORL sh:(SHLLconst [8] x0:(MOVBload [i0] {s} p mem)) x1:(MOVBload [i1] {s} p mem)) 32439 // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 32440 // result: @mergePoint(b,x0,x1) (ROLWconst <v.Type> [8] (MOVWload [i0] {s} p mem)) 32441 for { 32442 _ = v.Args[1] 32443 sh := v.Args[0] 32444 if sh.Op != OpAMD64SHLLconst { 32445 break 32446 } 32447 if sh.AuxInt != 8 { 32448 break 32449 } 32450 x0 := sh.Args[0] 32451 if x0.Op != OpAMD64MOVBload { 32452 break 32453 } 32454 i0 := x0.AuxInt 32455 s := x0.Aux 32456 _ = x0.Args[1] 32457 p := x0.Args[0] 32458 mem := x0.Args[1] 32459 x1 := v.Args[1] 32460 if x1.Op != OpAMD64MOVBload { 32461 break 32462 } 32463 i1 := x1.AuxInt 32464 if x1.Aux != s { 32465 break 32466 } 32467 _ = x1.Args[1] 32468 if p != x1.Args[0] { 32469 break 32470 } 32471 if mem != x1.Args[1] { 32472 break 32473 } 32474 if !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 32475 break 32476 } 32477 b = mergePoint(b, x0, x1) 32478 v0 := b.NewValue0(x1.Pos, OpAMD64ROLWconst, v.Type) 32479 v.reset(OpCopy) 32480 v.AddArg(v0) 32481 v0.AuxInt = 8 32482 v1 := b.NewValue0(x1.Pos, OpAMD64MOVWload, typ.UInt16) 32483 v1.AuxInt = i0 32484 v1.Aux = s 32485 v1.AddArg(p) 32486 v1.AddArg(mem) 32487 v0.AddArg(v1) 32488 return true 32489 } 32490 // match: (ORL r1:(ROLWconst [8] x1:(MOVWload [i1] {s} p mem)) sh:(SHLLconst [16] r0:(ROLWconst [8] x0:(MOVWload [i0] {s} p mem)))) 32491 // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh) 32492 // result: @mergePoint(b,x0,x1) (BSWAPL <v.Type> (MOVLload [i0] {s} p mem)) 32493 for { 32494 _ = v.Args[1] 32495 r1 := v.Args[0] 32496 if r1.Op != OpAMD64ROLWconst { 32497 break 32498 } 32499 if r1.AuxInt != 8 { 32500 break 32501 } 32502 x1 := r1.Args[0] 32503 if x1.Op != OpAMD64MOVWload { 32504 break 32505 } 32506 i1 := x1.AuxInt 32507 s := x1.Aux 32508 _ = x1.Args[1] 32509 p := x1.Args[0] 32510 mem := x1.Args[1] 32511 sh := v.Args[1] 32512 if sh.Op != OpAMD64SHLLconst { 32513 break 32514 } 32515 if sh.AuxInt != 16 { 32516 break 32517 } 32518 r0 := sh.Args[0] 32519 if r0.Op != OpAMD64ROLWconst { 32520 break 32521 } 32522 if r0.AuxInt != 8 { 32523 break 32524 } 32525 x0 := r0.Args[0] 32526 if x0.Op != OpAMD64MOVWload { 32527 break 32528 } 32529 i0 := x0.AuxInt 32530 if x0.Aux != s { 32531 break 32532 } 32533 _ = x0.Args[1] 32534 if p != x0.Args[0] { 32535 break 32536 } 32537 if mem != x0.Args[1] { 32538 break 32539 } 32540 if !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh)) { 32541 break 32542 } 32543 b = mergePoint(b, x0, x1) 32544 v0 := b.NewValue0(x0.Pos, OpAMD64BSWAPL, v.Type) 32545 v.reset(OpCopy) 32546 v.AddArg(v0) 32547 v1 := b.NewValue0(x0.Pos, OpAMD64MOVLload, typ.UInt32) 32548 v1.AuxInt = i0 32549 v1.Aux = s 32550 v1.AddArg(p) 32551 v1.AddArg(mem) 32552 v0.AddArg(v1) 32553 return true 32554 } 32555 // match: (ORL sh:(SHLLconst [16] r0:(ROLWconst [8] x0:(MOVWload [i0] {s} p mem))) r1:(ROLWconst [8] x1:(MOVWload [i1] {s} p mem))) 32556 // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh) 32557 // result: @mergePoint(b,x0,x1) (BSWAPL <v.Type> (MOVLload [i0] {s} p mem)) 32558 for { 32559 _ = v.Args[1] 32560 sh := v.Args[0] 32561 if sh.Op != OpAMD64SHLLconst { 32562 break 32563 } 32564 if sh.AuxInt != 16 { 32565 break 32566 } 32567 r0 := sh.Args[0] 32568 if r0.Op != OpAMD64ROLWconst { 32569 break 32570 } 32571 if r0.AuxInt != 8 { 32572 break 32573 } 32574 x0 := r0.Args[0] 32575 if x0.Op != OpAMD64MOVWload { 32576 break 32577 } 32578 i0 := x0.AuxInt 32579 s := x0.Aux 32580 _ = x0.Args[1] 32581 p := x0.Args[0] 32582 mem := x0.Args[1] 32583 r1 := v.Args[1] 32584 if r1.Op != OpAMD64ROLWconst { 32585 break 32586 } 32587 if r1.AuxInt != 8 { 32588 break 32589 } 32590 x1 := r1.Args[0] 32591 if x1.Op != OpAMD64MOVWload { 32592 break 32593 } 32594 i1 := x1.AuxInt 32595 if x1.Aux != s { 32596 break 32597 } 32598 _ = x1.Args[1] 32599 if p != x1.Args[0] { 32600 break 32601 } 32602 if mem != x1.Args[1] { 32603 break 32604 } 32605 if !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh)) { 32606 break 32607 } 32608 b = mergePoint(b, x0, x1) 32609 v0 := b.NewValue0(x1.Pos, OpAMD64BSWAPL, v.Type) 32610 v.reset(OpCopy) 32611 v.AddArg(v0) 32612 v1 := b.NewValue0(x1.Pos, OpAMD64MOVLload, typ.UInt32) 32613 v1.AuxInt = i0 32614 v1.Aux = s 32615 v1.AddArg(p) 32616 v1.AddArg(mem) 32617 v0.AddArg(v1) 32618 return true 32619 } 32620 // match: (ORL s0:(SHLLconst [j0] x0:(MOVBload [i0] {s} p mem)) or:(ORL s1:(SHLLconst [j1] x1:(MOVBload [i1] {s} p mem)) y)) 32621 // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 32622 // result: @mergePoint(b,x0,x1) (ORL <v.Type> (SHLLconst <v.Type> [j1] (ROLWconst <typ.UInt16> [8] (MOVWload [i0] {s} p mem))) y) 32623 for { 32624 _ = v.Args[1] 32625 s0 := v.Args[0] 32626 if s0.Op != OpAMD64SHLLconst { 32627 break 32628 } 32629 j0 := s0.AuxInt 32630 x0 := s0.Args[0] 32631 if x0.Op != OpAMD64MOVBload { 32632 break 32633 } 32634 i0 := x0.AuxInt 32635 s := x0.Aux 32636 _ = x0.Args[1] 32637 p := x0.Args[0] 32638 mem := x0.Args[1] 32639 or := v.Args[1] 32640 if or.Op != OpAMD64ORL { 32641 break 32642 } 32643 _ = or.Args[1] 32644 s1 := or.Args[0] 32645 if s1.Op != OpAMD64SHLLconst { 32646 break 32647 } 32648 j1 := s1.AuxInt 32649 x1 := s1.Args[0] 32650 if x1.Op != OpAMD64MOVBload { 32651 break 32652 } 32653 i1 := x1.AuxInt 32654 if x1.Aux != s { 32655 break 32656 } 32657 _ = x1.Args[1] 32658 if p != x1.Args[0] { 32659 break 32660 } 32661 if mem != x1.Args[1] { 32662 break 32663 } 32664 y := or.Args[1] 32665 if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 32666 break 32667 } 32668 b = mergePoint(b, x0, x1) 32669 v0 := b.NewValue0(x1.Pos, OpAMD64ORL, v.Type) 32670 v.reset(OpCopy) 32671 v.AddArg(v0) 32672 v1 := b.NewValue0(x1.Pos, OpAMD64SHLLconst, v.Type) 32673 v1.AuxInt = j1 32674 v2 := b.NewValue0(x1.Pos, OpAMD64ROLWconst, typ.UInt16) 32675 v2.AuxInt = 8 32676 v3 := b.NewValue0(x1.Pos, OpAMD64MOVWload, typ.UInt16) 32677 v3.AuxInt = i0 32678 v3.Aux = s 32679 v3.AddArg(p) 32680 v3.AddArg(mem) 32681 v2.AddArg(v3) 32682 v1.AddArg(v2) 32683 v0.AddArg(v1) 32684 v0.AddArg(y) 32685 return true 32686 } 32687 // match: (ORL s0:(SHLLconst [j0] x0:(MOVBload [i0] {s} p mem)) or:(ORL y s1:(SHLLconst [j1] x1:(MOVBload [i1] {s} p mem)))) 32688 // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 32689 // result: @mergePoint(b,x0,x1) (ORL <v.Type> (SHLLconst <v.Type> [j1] (ROLWconst <typ.UInt16> [8] (MOVWload [i0] {s} p mem))) y) 32690 for { 32691 _ = v.Args[1] 32692 s0 := v.Args[0] 32693 if s0.Op != OpAMD64SHLLconst { 32694 break 32695 } 32696 j0 := s0.AuxInt 32697 x0 := s0.Args[0] 32698 if x0.Op != OpAMD64MOVBload { 32699 break 32700 } 32701 i0 := x0.AuxInt 32702 s := x0.Aux 32703 _ = x0.Args[1] 32704 p := x0.Args[0] 32705 mem := x0.Args[1] 32706 or := v.Args[1] 32707 if or.Op != OpAMD64ORL { 32708 break 32709 } 32710 _ = or.Args[1] 32711 y := or.Args[0] 32712 s1 := or.Args[1] 32713 if s1.Op != OpAMD64SHLLconst { 32714 break 32715 } 32716 j1 := s1.AuxInt 32717 x1 := s1.Args[0] 32718 if x1.Op != OpAMD64MOVBload { 32719 break 32720 } 32721 i1 := x1.AuxInt 32722 if x1.Aux != s { 32723 break 32724 } 32725 _ = x1.Args[1] 32726 if p != x1.Args[0] { 32727 break 32728 } 32729 if mem != x1.Args[1] { 32730 break 32731 } 32732 if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 32733 break 32734 } 32735 b = mergePoint(b, x0, x1) 32736 v0 := b.NewValue0(x1.Pos, OpAMD64ORL, v.Type) 32737 v.reset(OpCopy) 32738 v.AddArg(v0) 32739 v1 := b.NewValue0(x1.Pos, OpAMD64SHLLconst, v.Type) 32740 v1.AuxInt = j1 32741 v2 := b.NewValue0(x1.Pos, OpAMD64ROLWconst, typ.UInt16) 32742 v2.AuxInt = 8 32743 v3 := b.NewValue0(x1.Pos, OpAMD64MOVWload, typ.UInt16) 32744 v3.AuxInt = i0 32745 v3.Aux = s 32746 v3.AddArg(p) 32747 v3.AddArg(mem) 32748 v2.AddArg(v3) 32749 v1.AddArg(v2) 32750 v0.AddArg(v1) 32751 v0.AddArg(y) 32752 return true 32753 } 32754 // match: (ORL or:(ORL s1:(SHLLconst [j1] x1:(MOVBload [i1] {s} p mem)) y) s0:(SHLLconst [j0] x0:(MOVBload [i0] {s} p mem))) 32755 // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 32756 // result: @mergePoint(b,x0,x1) (ORL <v.Type> (SHLLconst <v.Type> [j1] (ROLWconst <typ.UInt16> [8] (MOVWload [i0] {s} p mem))) y) 32757 for { 32758 _ = v.Args[1] 32759 or := v.Args[0] 32760 if or.Op != OpAMD64ORL { 32761 break 32762 } 32763 _ = or.Args[1] 32764 s1 := or.Args[0] 32765 if s1.Op != OpAMD64SHLLconst { 32766 break 32767 } 32768 j1 := s1.AuxInt 32769 x1 := s1.Args[0] 32770 if x1.Op != OpAMD64MOVBload { 32771 break 32772 } 32773 i1 := x1.AuxInt 32774 s := x1.Aux 32775 _ = x1.Args[1] 32776 p := x1.Args[0] 32777 mem := x1.Args[1] 32778 y := or.Args[1] 32779 s0 := v.Args[1] 32780 if s0.Op != OpAMD64SHLLconst { 32781 break 32782 } 32783 j0 := s0.AuxInt 32784 x0 := s0.Args[0] 32785 if x0.Op != OpAMD64MOVBload { 32786 break 32787 } 32788 i0 := x0.AuxInt 32789 if x0.Aux != s { 32790 break 32791 } 32792 _ = x0.Args[1] 32793 if p != x0.Args[0] { 32794 break 32795 } 32796 if mem != x0.Args[1] { 32797 break 32798 } 32799 if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 32800 break 32801 } 32802 b = mergePoint(b, x0, x1) 32803 v0 := b.NewValue0(x0.Pos, OpAMD64ORL, v.Type) 32804 v.reset(OpCopy) 32805 v.AddArg(v0) 32806 v1 := b.NewValue0(x0.Pos, OpAMD64SHLLconst, v.Type) 32807 v1.AuxInt = j1 32808 v2 := b.NewValue0(x0.Pos, OpAMD64ROLWconst, typ.UInt16) 32809 v2.AuxInt = 8 32810 v3 := b.NewValue0(x0.Pos, OpAMD64MOVWload, typ.UInt16) 32811 v3.AuxInt = i0 32812 v3.Aux = s 32813 v3.AddArg(p) 32814 v3.AddArg(mem) 32815 v2.AddArg(v3) 32816 v1.AddArg(v2) 32817 v0.AddArg(v1) 32818 v0.AddArg(y) 32819 return true 32820 } 32821 return false 32822 } 32823 func rewriteValueAMD64_OpAMD64ORL_100(v *Value) bool { 32824 b := v.Block 32825 _ = b 32826 typ := &b.Func.Config.Types 32827 _ = typ 32828 // match: (ORL or:(ORL y s1:(SHLLconst [j1] x1:(MOVBload [i1] {s} p mem))) s0:(SHLLconst [j0] x0:(MOVBload [i0] {s} p mem))) 32829 // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 32830 // result: @mergePoint(b,x0,x1) (ORL <v.Type> (SHLLconst <v.Type> [j1] (ROLWconst <typ.UInt16> [8] (MOVWload [i0] {s} p mem))) y) 32831 for { 32832 _ = v.Args[1] 32833 or := v.Args[0] 32834 if or.Op != OpAMD64ORL { 32835 break 32836 } 32837 _ = or.Args[1] 32838 y := or.Args[0] 32839 s1 := or.Args[1] 32840 if s1.Op != OpAMD64SHLLconst { 32841 break 32842 } 32843 j1 := s1.AuxInt 32844 x1 := s1.Args[0] 32845 if x1.Op != OpAMD64MOVBload { 32846 break 32847 } 32848 i1 := x1.AuxInt 32849 s := x1.Aux 32850 _ = x1.Args[1] 32851 p := x1.Args[0] 32852 mem := x1.Args[1] 32853 s0 := v.Args[1] 32854 if s0.Op != OpAMD64SHLLconst { 32855 break 32856 } 32857 j0 := s0.AuxInt 32858 x0 := s0.Args[0] 32859 if x0.Op != OpAMD64MOVBload { 32860 break 32861 } 32862 i0 := x0.AuxInt 32863 if x0.Aux != s { 32864 break 32865 } 32866 _ = x0.Args[1] 32867 if p != x0.Args[0] { 32868 break 32869 } 32870 if mem != x0.Args[1] { 32871 break 32872 } 32873 if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 32874 break 32875 } 32876 b = mergePoint(b, x0, x1) 32877 v0 := b.NewValue0(x0.Pos, OpAMD64ORL, v.Type) 32878 v.reset(OpCopy) 32879 v.AddArg(v0) 32880 v1 := b.NewValue0(x0.Pos, OpAMD64SHLLconst, v.Type) 32881 v1.AuxInt = j1 32882 v2 := b.NewValue0(x0.Pos, OpAMD64ROLWconst, typ.UInt16) 32883 v2.AuxInt = 8 32884 v3 := b.NewValue0(x0.Pos, OpAMD64MOVWload, typ.UInt16) 32885 v3.AuxInt = i0 32886 v3.Aux = s 32887 v3.AddArg(p) 32888 v3.AddArg(mem) 32889 v2.AddArg(v3) 32890 v1.AddArg(v2) 32891 v0.AddArg(v1) 32892 v0.AddArg(y) 32893 return true 32894 } 32895 // match: (ORL x1:(MOVBloadidx1 [i1] {s} p idx mem) sh:(SHLLconst [8] x0:(MOVBloadidx1 [i0] {s} p idx mem))) 32896 // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 32897 // result: @mergePoint(b,x0,x1) (ROLWconst <v.Type> [8] (MOVWloadidx1 [i0] {s} p idx mem)) 32898 for { 32899 _ = v.Args[1] 32900 x1 := v.Args[0] 32901 if x1.Op != OpAMD64MOVBloadidx1 { 32902 break 32903 } 32904 i1 := x1.AuxInt 32905 s := x1.Aux 32906 _ = x1.Args[2] 32907 p := x1.Args[0] 32908 idx := x1.Args[1] 32909 mem := x1.Args[2] 32910 sh := v.Args[1] 32911 if sh.Op != OpAMD64SHLLconst { 32912 break 32913 } 32914 if sh.AuxInt != 8 { 32915 break 32916 } 32917 x0 := sh.Args[0] 32918 if x0.Op != OpAMD64MOVBloadidx1 { 32919 break 32920 } 32921 i0 := x0.AuxInt 32922 if x0.Aux != s { 32923 break 32924 } 32925 _ = x0.Args[2] 32926 if p != x0.Args[0] { 32927 break 32928 } 32929 if idx != x0.Args[1] { 32930 break 32931 } 32932 if mem != x0.Args[2] { 32933 break 32934 } 32935 if !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 32936 break 32937 } 32938 b = mergePoint(b, x0, x1) 32939 v0 := b.NewValue0(v.Pos, OpAMD64ROLWconst, v.Type) 32940 v.reset(OpCopy) 32941 v.AddArg(v0) 32942 v0.AuxInt = 8 32943 v1 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 32944 v1.AuxInt = i0 32945 v1.Aux = s 32946 v1.AddArg(p) 32947 v1.AddArg(idx) 32948 v1.AddArg(mem) 32949 v0.AddArg(v1) 32950 return true 32951 } 32952 // match: (ORL x1:(MOVBloadidx1 [i1] {s} idx p mem) sh:(SHLLconst [8] x0:(MOVBloadidx1 [i0] {s} p idx mem))) 32953 // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 32954 // result: @mergePoint(b,x0,x1) (ROLWconst <v.Type> [8] (MOVWloadidx1 [i0] {s} p idx mem)) 32955 for { 32956 _ = v.Args[1] 32957 x1 := v.Args[0] 32958 if x1.Op != OpAMD64MOVBloadidx1 { 32959 break 32960 } 32961 i1 := x1.AuxInt 32962 s := x1.Aux 32963 _ = x1.Args[2] 32964 idx := x1.Args[0] 32965 p := x1.Args[1] 32966 mem := x1.Args[2] 32967 sh := v.Args[1] 32968 if sh.Op != OpAMD64SHLLconst { 32969 break 32970 } 32971 if sh.AuxInt != 8 { 32972 break 32973 } 32974 x0 := sh.Args[0] 32975 if x0.Op != OpAMD64MOVBloadidx1 { 32976 break 32977 } 32978 i0 := x0.AuxInt 32979 if x0.Aux != s { 32980 break 32981 } 32982 _ = x0.Args[2] 32983 if p != x0.Args[0] { 32984 break 32985 } 32986 if idx != x0.Args[1] { 32987 break 32988 } 32989 if mem != x0.Args[2] { 32990 break 32991 } 32992 if !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 32993 break 32994 } 32995 b = mergePoint(b, x0, x1) 32996 v0 := b.NewValue0(v.Pos, OpAMD64ROLWconst, v.Type) 32997 v.reset(OpCopy) 32998 v.AddArg(v0) 32999 v0.AuxInt = 8 33000 v1 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 33001 v1.AuxInt = i0 33002 v1.Aux = s 33003 v1.AddArg(p) 33004 v1.AddArg(idx) 33005 v1.AddArg(mem) 33006 v0.AddArg(v1) 33007 return true 33008 } 33009 // match: (ORL x1:(MOVBloadidx1 [i1] {s} p idx mem) sh:(SHLLconst [8] x0:(MOVBloadidx1 [i0] {s} idx p mem))) 33010 // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 33011 // result: @mergePoint(b,x0,x1) (ROLWconst <v.Type> [8] (MOVWloadidx1 [i0] {s} p idx mem)) 33012 for { 33013 _ = v.Args[1] 33014 x1 := v.Args[0] 33015 if x1.Op != OpAMD64MOVBloadidx1 { 33016 break 33017 } 33018 i1 := x1.AuxInt 33019 s := x1.Aux 33020 _ = x1.Args[2] 33021 p := x1.Args[0] 33022 idx := x1.Args[1] 33023 mem := x1.Args[2] 33024 sh := v.Args[1] 33025 if sh.Op != OpAMD64SHLLconst { 33026 break 33027 } 33028 if sh.AuxInt != 8 { 33029 break 33030 } 33031 x0 := sh.Args[0] 33032 if x0.Op != OpAMD64MOVBloadidx1 { 33033 break 33034 } 33035 i0 := x0.AuxInt 33036 if x0.Aux != s { 33037 break 33038 } 33039 _ = x0.Args[2] 33040 if idx != x0.Args[0] { 33041 break 33042 } 33043 if p != x0.Args[1] { 33044 break 33045 } 33046 if mem != x0.Args[2] { 33047 break 33048 } 33049 if !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 33050 break 33051 } 33052 b = mergePoint(b, x0, x1) 33053 v0 := b.NewValue0(v.Pos, OpAMD64ROLWconst, v.Type) 33054 v.reset(OpCopy) 33055 v.AddArg(v0) 33056 v0.AuxInt = 8 33057 v1 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 33058 v1.AuxInt = i0 33059 v1.Aux = s 33060 v1.AddArg(p) 33061 v1.AddArg(idx) 33062 v1.AddArg(mem) 33063 v0.AddArg(v1) 33064 return true 33065 } 33066 // match: (ORL x1:(MOVBloadidx1 [i1] {s} idx p mem) sh:(SHLLconst [8] x0:(MOVBloadidx1 [i0] {s} idx p mem))) 33067 // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 33068 // result: @mergePoint(b,x0,x1) (ROLWconst <v.Type> [8] (MOVWloadidx1 [i0] {s} p idx mem)) 33069 for { 33070 _ = v.Args[1] 33071 x1 := v.Args[0] 33072 if x1.Op != OpAMD64MOVBloadidx1 { 33073 break 33074 } 33075 i1 := x1.AuxInt 33076 s := x1.Aux 33077 _ = x1.Args[2] 33078 idx := x1.Args[0] 33079 p := x1.Args[1] 33080 mem := x1.Args[2] 33081 sh := v.Args[1] 33082 if sh.Op != OpAMD64SHLLconst { 33083 break 33084 } 33085 if sh.AuxInt != 8 { 33086 break 33087 } 33088 x0 := sh.Args[0] 33089 if x0.Op != OpAMD64MOVBloadidx1 { 33090 break 33091 } 33092 i0 := x0.AuxInt 33093 if x0.Aux != s { 33094 break 33095 } 33096 _ = x0.Args[2] 33097 if idx != x0.Args[0] { 33098 break 33099 } 33100 if p != x0.Args[1] { 33101 break 33102 } 33103 if mem != x0.Args[2] { 33104 break 33105 } 33106 if !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 33107 break 33108 } 33109 b = mergePoint(b, x0, x1) 33110 v0 := b.NewValue0(v.Pos, OpAMD64ROLWconst, v.Type) 33111 v.reset(OpCopy) 33112 v.AddArg(v0) 33113 v0.AuxInt = 8 33114 v1 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 33115 v1.AuxInt = i0 33116 v1.Aux = s 33117 v1.AddArg(p) 33118 v1.AddArg(idx) 33119 v1.AddArg(mem) 33120 v0.AddArg(v1) 33121 return true 33122 } 33123 // match: (ORL sh:(SHLLconst [8] x0:(MOVBloadidx1 [i0] {s} p idx mem)) x1:(MOVBloadidx1 [i1] {s} p idx mem)) 33124 // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 33125 // result: @mergePoint(b,x0,x1) (ROLWconst <v.Type> [8] (MOVWloadidx1 [i0] {s} p idx mem)) 33126 for { 33127 _ = v.Args[1] 33128 sh := v.Args[0] 33129 if sh.Op != OpAMD64SHLLconst { 33130 break 33131 } 33132 if sh.AuxInt != 8 { 33133 break 33134 } 33135 x0 := sh.Args[0] 33136 if x0.Op != OpAMD64MOVBloadidx1 { 33137 break 33138 } 33139 i0 := x0.AuxInt 33140 s := x0.Aux 33141 _ = x0.Args[2] 33142 p := x0.Args[0] 33143 idx := x0.Args[1] 33144 mem := x0.Args[2] 33145 x1 := v.Args[1] 33146 if x1.Op != OpAMD64MOVBloadidx1 { 33147 break 33148 } 33149 i1 := x1.AuxInt 33150 if x1.Aux != s { 33151 break 33152 } 33153 _ = x1.Args[2] 33154 if p != x1.Args[0] { 33155 break 33156 } 33157 if idx != x1.Args[1] { 33158 break 33159 } 33160 if mem != x1.Args[2] { 33161 break 33162 } 33163 if !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 33164 break 33165 } 33166 b = mergePoint(b, x0, x1) 33167 v0 := b.NewValue0(v.Pos, OpAMD64ROLWconst, v.Type) 33168 v.reset(OpCopy) 33169 v.AddArg(v0) 33170 v0.AuxInt = 8 33171 v1 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 33172 v1.AuxInt = i0 33173 v1.Aux = s 33174 v1.AddArg(p) 33175 v1.AddArg(idx) 33176 v1.AddArg(mem) 33177 v0.AddArg(v1) 33178 return true 33179 } 33180 // match: (ORL sh:(SHLLconst [8] x0:(MOVBloadidx1 [i0] {s} idx p mem)) x1:(MOVBloadidx1 [i1] {s} p idx mem)) 33181 // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 33182 // result: @mergePoint(b,x0,x1) (ROLWconst <v.Type> [8] (MOVWloadidx1 [i0] {s} p idx mem)) 33183 for { 33184 _ = v.Args[1] 33185 sh := v.Args[0] 33186 if sh.Op != OpAMD64SHLLconst { 33187 break 33188 } 33189 if sh.AuxInt != 8 { 33190 break 33191 } 33192 x0 := sh.Args[0] 33193 if x0.Op != OpAMD64MOVBloadidx1 { 33194 break 33195 } 33196 i0 := x0.AuxInt 33197 s := x0.Aux 33198 _ = x0.Args[2] 33199 idx := x0.Args[0] 33200 p := x0.Args[1] 33201 mem := x0.Args[2] 33202 x1 := v.Args[1] 33203 if x1.Op != OpAMD64MOVBloadidx1 { 33204 break 33205 } 33206 i1 := x1.AuxInt 33207 if x1.Aux != s { 33208 break 33209 } 33210 _ = x1.Args[2] 33211 if p != x1.Args[0] { 33212 break 33213 } 33214 if idx != x1.Args[1] { 33215 break 33216 } 33217 if mem != x1.Args[2] { 33218 break 33219 } 33220 if !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 33221 break 33222 } 33223 b = mergePoint(b, x0, x1) 33224 v0 := b.NewValue0(v.Pos, OpAMD64ROLWconst, v.Type) 33225 v.reset(OpCopy) 33226 v.AddArg(v0) 33227 v0.AuxInt = 8 33228 v1 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 33229 v1.AuxInt = i0 33230 v1.Aux = s 33231 v1.AddArg(p) 33232 v1.AddArg(idx) 33233 v1.AddArg(mem) 33234 v0.AddArg(v1) 33235 return true 33236 } 33237 // match: (ORL sh:(SHLLconst [8] x0:(MOVBloadidx1 [i0] {s} p idx mem)) x1:(MOVBloadidx1 [i1] {s} idx p mem)) 33238 // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 33239 // result: @mergePoint(b,x0,x1) (ROLWconst <v.Type> [8] (MOVWloadidx1 [i0] {s} p idx mem)) 33240 for { 33241 _ = v.Args[1] 33242 sh := v.Args[0] 33243 if sh.Op != OpAMD64SHLLconst { 33244 break 33245 } 33246 if sh.AuxInt != 8 { 33247 break 33248 } 33249 x0 := sh.Args[0] 33250 if x0.Op != OpAMD64MOVBloadidx1 { 33251 break 33252 } 33253 i0 := x0.AuxInt 33254 s := x0.Aux 33255 _ = x0.Args[2] 33256 p := x0.Args[0] 33257 idx := x0.Args[1] 33258 mem := x0.Args[2] 33259 x1 := v.Args[1] 33260 if x1.Op != OpAMD64MOVBloadidx1 { 33261 break 33262 } 33263 i1 := x1.AuxInt 33264 if x1.Aux != s { 33265 break 33266 } 33267 _ = x1.Args[2] 33268 if idx != x1.Args[0] { 33269 break 33270 } 33271 if p != x1.Args[1] { 33272 break 33273 } 33274 if mem != x1.Args[2] { 33275 break 33276 } 33277 if !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 33278 break 33279 } 33280 b = mergePoint(b, x0, x1) 33281 v0 := b.NewValue0(v.Pos, OpAMD64ROLWconst, v.Type) 33282 v.reset(OpCopy) 33283 v.AddArg(v0) 33284 v0.AuxInt = 8 33285 v1 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 33286 v1.AuxInt = i0 33287 v1.Aux = s 33288 v1.AddArg(p) 33289 v1.AddArg(idx) 33290 v1.AddArg(mem) 33291 v0.AddArg(v1) 33292 return true 33293 } 33294 // match: (ORL sh:(SHLLconst [8] x0:(MOVBloadidx1 [i0] {s} idx p mem)) x1:(MOVBloadidx1 [i1] {s} idx p mem)) 33295 // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 33296 // result: @mergePoint(b,x0,x1) (ROLWconst <v.Type> [8] (MOVWloadidx1 [i0] {s} p idx mem)) 33297 for { 33298 _ = v.Args[1] 33299 sh := v.Args[0] 33300 if sh.Op != OpAMD64SHLLconst { 33301 break 33302 } 33303 if sh.AuxInt != 8 { 33304 break 33305 } 33306 x0 := sh.Args[0] 33307 if x0.Op != OpAMD64MOVBloadidx1 { 33308 break 33309 } 33310 i0 := x0.AuxInt 33311 s := x0.Aux 33312 _ = x0.Args[2] 33313 idx := x0.Args[0] 33314 p := x0.Args[1] 33315 mem := x0.Args[2] 33316 x1 := v.Args[1] 33317 if x1.Op != OpAMD64MOVBloadidx1 { 33318 break 33319 } 33320 i1 := x1.AuxInt 33321 if x1.Aux != s { 33322 break 33323 } 33324 _ = x1.Args[2] 33325 if idx != x1.Args[0] { 33326 break 33327 } 33328 if p != x1.Args[1] { 33329 break 33330 } 33331 if mem != x1.Args[2] { 33332 break 33333 } 33334 if !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 33335 break 33336 } 33337 b = mergePoint(b, x0, x1) 33338 v0 := b.NewValue0(v.Pos, OpAMD64ROLWconst, v.Type) 33339 v.reset(OpCopy) 33340 v.AddArg(v0) 33341 v0.AuxInt = 8 33342 v1 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 33343 v1.AuxInt = i0 33344 v1.Aux = s 33345 v1.AddArg(p) 33346 v1.AddArg(idx) 33347 v1.AddArg(mem) 33348 v0.AddArg(v1) 33349 return true 33350 } 33351 // match: (ORL r1:(ROLWconst [8] x1:(MOVWloadidx1 [i1] {s} p idx mem)) sh:(SHLLconst [16] r0:(ROLWconst [8] x0:(MOVWloadidx1 [i0] {s} p idx mem)))) 33352 // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh) 33353 // result: @mergePoint(b,x0,x1) (BSWAPL <v.Type> (MOVLloadidx1 [i0] {s} p idx mem)) 33354 for { 33355 _ = v.Args[1] 33356 r1 := v.Args[0] 33357 if r1.Op != OpAMD64ROLWconst { 33358 break 33359 } 33360 if r1.AuxInt != 8 { 33361 break 33362 } 33363 x1 := r1.Args[0] 33364 if x1.Op != OpAMD64MOVWloadidx1 { 33365 break 33366 } 33367 i1 := x1.AuxInt 33368 s := x1.Aux 33369 _ = x1.Args[2] 33370 p := x1.Args[0] 33371 idx := x1.Args[1] 33372 mem := x1.Args[2] 33373 sh := v.Args[1] 33374 if sh.Op != OpAMD64SHLLconst { 33375 break 33376 } 33377 if sh.AuxInt != 16 { 33378 break 33379 } 33380 r0 := sh.Args[0] 33381 if r0.Op != OpAMD64ROLWconst { 33382 break 33383 } 33384 if r0.AuxInt != 8 { 33385 break 33386 } 33387 x0 := r0.Args[0] 33388 if x0.Op != OpAMD64MOVWloadidx1 { 33389 break 33390 } 33391 i0 := x0.AuxInt 33392 if x0.Aux != s { 33393 break 33394 } 33395 _ = x0.Args[2] 33396 if p != x0.Args[0] { 33397 break 33398 } 33399 if idx != x0.Args[1] { 33400 break 33401 } 33402 if mem != x0.Args[2] { 33403 break 33404 } 33405 if !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh)) { 33406 break 33407 } 33408 b = mergePoint(b, x0, x1) 33409 v0 := b.NewValue0(v.Pos, OpAMD64BSWAPL, v.Type) 33410 v.reset(OpCopy) 33411 v.AddArg(v0) 33412 v1 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 33413 v1.AuxInt = i0 33414 v1.Aux = s 33415 v1.AddArg(p) 33416 v1.AddArg(idx) 33417 v1.AddArg(mem) 33418 v0.AddArg(v1) 33419 return true 33420 } 33421 return false 33422 } 33423 func rewriteValueAMD64_OpAMD64ORL_110(v *Value) bool { 33424 b := v.Block 33425 _ = b 33426 typ := &b.Func.Config.Types 33427 _ = typ 33428 // match: (ORL r1:(ROLWconst [8] x1:(MOVWloadidx1 [i1] {s} idx p mem)) sh:(SHLLconst [16] r0:(ROLWconst [8] x0:(MOVWloadidx1 [i0] {s} p idx mem)))) 33429 // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh) 33430 // result: @mergePoint(b,x0,x1) (BSWAPL <v.Type> (MOVLloadidx1 [i0] {s} p idx mem)) 33431 for { 33432 _ = v.Args[1] 33433 r1 := v.Args[0] 33434 if r1.Op != OpAMD64ROLWconst { 33435 break 33436 } 33437 if r1.AuxInt != 8 { 33438 break 33439 } 33440 x1 := r1.Args[0] 33441 if x1.Op != OpAMD64MOVWloadidx1 { 33442 break 33443 } 33444 i1 := x1.AuxInt 33445 s := x1.Aux 33446 _ = x1.Args[2] 33447 idx := x1.Args[0] 33448 p := x1.Args[1] 33449 mem := x1.Args[2] 33450 sh := v.Args[1] 33451 if sh.Op != OpAMD64SHLLconst { 33452 break 33453 } 33454 if sh.AuxInt != 16 { 33455 break 33456 } 33457 r0 := sh.Args[0] 33458 if r0.Op != OpAMD64ROLWconst { 33459 break 33460 } 33461 if r0.AuxInt != 8 { 33462 break 33463 } 33464 x0 := r0.Args[0] 33465 if x0.Op != OpAMD64MOVWloadidx1 { 33466 break 33467 } 33468 i0 := x0.AuxInt 33469 if x0.Aux != s { 33470 break 33471 } 33472 _ = x0.Args[2] 33473 if p != x0.Args[0] { 33474 break 33475 } 33476 if idx != x0.Args[1] { 33477 break 33478 } 33479 if mem != x0.Args[2] { 33480 break 33481 } 33482 if !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh)) { 33483 break 33484 } 33485 b = mergePoint(b, x0, x1) 33486 v0 := b.NewValue0(v.Pos, OpAMD64BSWAPL, v.Type) 33487 v.reset(OpCopy) 33488 v.AddArg(v0) 33489 v1 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 33490 v1.AuxInt = i0 33491 v1.Aux = s 33492 v1.AddArg(p) 33493 v1.AddArg(idx) 33494 v1.AddArg(mem) 33495 v0.AddArg(v1) 33496 return true 33497 } 33498 // match: (ORL r1:(ROLWconst [8] x1:(MOVWloadidx1 [i1] {s} p idx mem)) sh:(SHLLconst [16] r0:(ROLWconst [8] x0:(MOVWloadidx1 [i0] {s} idx p mem)))) 33499 // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh) 33500 // result: @mergePoint(b,x0,x1) (BSWAPL <v.Type> (MOVLloadidx1 [i0] {s} p idx mem)) 33501 for { 33502 _ = v.Args[1] 33503 r1 := v.Args[0] 33504 if r1.Op != OpAMD64ROLWconst { 33505 break 33506 } 33507 if r1.AuxInt != 8 { 33508 break 33509 } 33510 x1 := r1.Args[0] 33511 if x1.Op != OpAMD64MOVWloadidx1 { 33512 break 33513 } 33514 i1 := x1.AuxInt 33515 s := x1.Aux 33516 _ = x1.Args[2] 33517 p := x1.Args[0] 33518 idx := x1.Args[1] 33519 mem := x1.Args[2] 33520 sh := v.Args[1] 33521 if sh.Op != OpAMD64SHLLconst { 33522 break 33523 } 33524 if sh.AuxInt != 16 { 33525 break 33526 } 33527 r0 := sh.Args[0] 33528 if r0.Op != OpAMD64ROLWconst { 33529 break 33530 } 33531 if r0.AuxInt != 8 { 33532 break 33533 } 33534 x0 := r0.Args[0] 33535 if x0.Op != OpAMD64MOVWloadidx1 { 33536 break 33537 } 33538 i0 := x0.AuxInt 33539 if x0.Aux != s { 33540 break 33541 } 33542 _ = x0.Args[2] 33543 if idx != x0.Args[0] { 33544 break 33545 } 33546 if p != x0.Args[1] { 33547 break 33548 } 33549 if mem != x0.Args[2] { 33550 break 33551 } 33552 if !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh)) { 33553 break 33554 } 33555 b = mergePoint(b, x0, x1) 33556 v0 := b.NewValue0(v.Pos, OpAMD64BSWAPL, v.Type) 33557 v.reset(OpCopy) 33558 v.AddArg(v0) 33559 v1 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 33560 v1.AuxInt = i0 33561 v1.Aux = s 33562 v1.AddArg(p) 33563 v1.AddArg(idx) 33564 v1.AddArg(mem) 33565 v0.AddArg(v1) 33566 return true 33567 } 33568 // match: (ORL r1:(ROLWconst [8] x1:(MOVWloadidx1 [i1] {s} idx p mem)) sh:(SHLLconst [16] r0:(ROLWconst [8] x0:(MOVWloadidx1 [i0] {s} idx p mem)))) 33569 // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh) 33570 // result: @mergePoint(b,x0,x1) (BSWAPL <v.Type> (MOVLloadidx1 [i0] {s} p idx mem)) 33571 for { 33572 _ = v.Args[1] 33573 r1 := v.Args[0] 33574 if r1.Op != OpAMD64ROLWconst { 33575 break 33576 } 33577 if r1.AuxInt != 8 { 33578 break 33579 } 33580 x1 := r1.Args[0] 33581 if x1.Op != OpAMD64MOVWloadidx1 { 33582 break 33583 } 33584 i1 := x1.AuxInt 33585 s := x1.Aux 33586 _ = x1.Args[2] 33587 idx := x1.Args[0] 33588 p := x1.Args[1] 33589 mem := x1.Args[2] 33590 sh := v.Args[1] 33591 if sh.Op != OpAMD64SHLLconst { 33592 break 33593 } 33594 if sh.AuxInt != 16 { 33595 break 33596 } 33597 r0 := sh.Args[0] 33598 if r0.Op != OpAMD64ROLWconst { 33599 break 33600 } 33601 if r0.AuxInt != 8 { 33602 break 33603 } 33604 x0 := r0.Args[0] 33605 if x0.Op != OpAMD64MOVWloadidx1 { 33606 break 33607 } 33608 i0 := x0.AuxInt 33609 if x0.Aux != s { 33610 break 33611 } 33612 _ = x0.Args[2] 33613 if idx != x0.Args[0] { 33614 break 33615 } 33616 if p != x0.Args[1] { 33617 break 33618 } 33619 if mem != x0.Args[2] { 33620 break 33621 } 33622 if !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh)) { 33623 break 33624 } 33625 b = mergePoint(b, x0, x1) 33626 v0 := b.NewValue0(v.Pos, OpAMD64BSWAPL, v.Type) 33627 v.reset(OpCopy) 33628 v.AddArg(v0) 33629 v1 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 33630 v1.AuxInt = i0 33631 v1.Aux = s 33632 v1.AddArg(p) 33633 v1.AddArg(idx) 33634 v1.AddArg(mem) 33635 v0.AddArg(v1) 33636 return true 33637 } 33638 // match: (ORL sh:(SHLLconst [16] r0:(ROLWconst [8] x0:(MOVWloadidx1 [i0] {s} p idx mem))) r1:(ROLWconst [8] x1:(MOVWloadidx1 [i1] {s} p idx mem))) 33639 // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh) 33640 // result: @mergePoint(b,x0,x1) (BSWAPL <v.Type> (MOVLloadidx1 [i0] {s} p idx mem)) 33641 for { 33642 _ = v.Args[1] 33643 sh := v.Args[0] 33644 if sh.Op != OpAMD64SHLLconst { 33645 break 33646 } 33647 if sh.AuxInt != 16 { 33648 break 33649 } 33650 r0 := sh.Args[0] 33651 if r0.Op != OpAMD64ROLWconst { 33652 break 33653 } 33654 if r0.AuxInt != 8 { 33655 break 33656 } 33657 x0 := r0.Args[0] 33658 if x0.Op != OpAMD64MOVWloadidx1 { 33659 break 33660 } 33661 i0 := x0.AuxInt 33662 s := x0.Aux 33663 _ = x0.Args[2] 33664 p := x0.Args[0] 33665 idx := x0.Args[1] 33666 mem := x0.Args[2] 33667 r1 := v.Args[1] 33668 if r1.Op != OpAMD64ROLWconst { 33669 break 33670 } 33671 if r1.AuxInt != 8 { 33672 break 33673 } 33674 x1 := r1.Args[0] 33675 if x1.Op != OpAMD64MOVWloadidx1 { 33676 break 33677 } 33678 i1 := x1.AuxInt 33679 if x1.Aux != s { 33680 break 33681 } 33682 _ = x1.Args[2] 33683 if p != x1.Args[0] { 33684 break 33685 } 33686 if idx != x1.Args[1] { 33687 break 33688 } 33689 if mem != x1.Args[2] { 33690 break 33691 } 33692 if !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh)) { 33693 break 33694 } 33695 b = mergePoint(b, x0, x1) 33696 v0 := b.NewValue0(v.Pos, OpAMD64BSWAPL, v.Type) 33697 v.reset(OpCopy) 33698 v.AddArg(v0) 33699 v1 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 33700 v1.AuxInt = i0 33701 v1.Aux = s 33702 v1.AddArg(p) 33703 v1.AddArg(idx) 33704 v1.AddArg(mem) 33705 v0.AddArg(v1) 33706 return true 33707 } 33708 // match: (ORL sh:(SHLLconst [16] r0:(ROLWconst [8] x0:(MOVWloadidx1 [i0] {s} idx p mem))) r1:(ROLWconst [8] x1:(MOVWloadidx1 [i1] {s} p idx mem))) 33709 // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh) 33710 // result: @mergePoint(b,x0,x1) (BSWAPL <v.Type> (MOVLloadidx1 [i0] {s} p idx mem)) 33711 for { 33712 _ = v.Args[1] 33713 sh := v.Args[0] 33714 if sh.Op != OpAMD64SHLLconst { 33715 break 33716 } 33717 if sh.AuxInt != 16 { 33718 break 33719 } 33720 r0 := sh.Args[0] 33721 if r0.Op != OpAMD64ROLWconst { 33722 break 33723 } 33724 if r0.AuxInt != 8 { 33725 break 33726 } 33727 x0 := r0.Args[0] 33728 if x0.Op != OpAMD64MOVWloadidx1 { 33729 break 33730 } 33731 i0 := x0.AuxInt 33732 s := x0.Aux 33733 _ = x0.Args[2] 33734 idx := x0.Args[0] 33735 p := x0.Args[1] 33736 mem := x0.Args[2] 33737 r1 := v.Args[1] 33738 if r1.Op != OpAMD64ROLWconst { 33739 break 33740 } 33741 if r1.AuxInt != 8 { 33742 break 33743 } 33744 x1 := r1.Args[0] 33745 if x1.Op != OpAMD64MOVWloadidx1 { 33746 break 33747 } 33748 i1 := x1.AuxInt 33749 if x1.Aux != s { 33750 break 33751 } 33752 _ = x1.Args[2] 33753 if p != x1.Args[0] { 33754 break 33755 } 33756 if idx != x1.Args[1] { 33757 break 33758 } 33759 if mem != x1.Args[2] { 33760 break 33761 } 33762 if !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh)) { 33763 break 33764 } 33765 b = mergePoint(b, x0, x1) 33766 v0 := b.NewValue0(v.Pos, OpAMD64BSWAPL, v.Type) 33767 v.reset(OpCopy) 33768 v.AddArg(v0) 33769 v1 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 33770 v1.AuxInt = i0 33771 v1.Aux = s 33772 v1.AddArg(p) 33773 v1.AddArg(idx) 33774 v1.AddArg(mem) 33775 v0.AddArg(v1) 33776 return true 33777 } 33778 // match: (ORL sh:(SHLLconst [16] r0:(ROLWconst [8] x0:(MOVWloadidx1 [i0] {s} p idx mem))) r1:(ROLWconst [8] x1:(MOVWloadidx1 [i1] {s} idx p mem))) 33779 // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh) 33780 // result: @mergePoint(b,x0,x1) (BSWAPL <v.Type> (MOVLloadidx1 [i0] {s} p idx mem)) 33781 for { 33782 _ = v.Args[1] 33783 sh := v.Args[0] 33784 if sh.Op != OpAMD64SHLLconst { 33785 break 33786 } 33787 if sh.AuxInt != 16 { 33788 break 33789 } 33790 r0 := sh.Args[0] 33791 if r0.Op != OpAMD64ROLWconst { 33792 break 33793 } 33794 if r0.AuxInt != 8 { 33795 break 33796 } 33797 x0 := r0.Args[0] 33798 if x0.Op != OpAMD64MOVWloadidx1 { 33799 break 33800 } 33801 i0 := x0.AuxInt 33802 s := x0.Aux 33803 _ = x0.Args[2] 33804 p := x0.Args[0] 33805 idx := x0.Args[1] 33806 mem := x0.Args[2] 33807 r1 := v.Args[1] 33808 if r1.Op != OpAMD64ROLWconst { 33809 break 33810 } 33811 if r1.AuxInt != 8 { 33812 break 33813 } 33814 x1 := r1.Args[0] 33815 if x1.Op != OpAMD64MOVWloadidx1 { 33816 break 33817 } 33818 i1 := x1.AuxInt 33819 if x1.Aux != s { 33820 break 33821 } 33822 _ = x1.Args[2] 33823 if idx != x1.Args[0] { 33824 break 33825 } 33826 if p != x1.Args[1] { 33827 break 33828 } 33829 if mem != x1.Args[2] { 33830 break 33831 } 33832 if !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh)) { 33833 break 33834 } 33835 b = mergePoint(b, x0, x1) 33836 v0 := b.NewValue0(v.Pos, OpAMD64BSWAPL, v.Type) 33837 v.reset(OpCopy) 33838 v.AddArg(v0) 33839 v1 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 33840 v1.AuxInt = i0 33841 v1.Aux = s 33842 v1.AddArg(p) 33843 v1.AddArg(idx) 33844 v1.AddArg(mem) 33845 v0.AddArg(v1) 33846 return true 33847 } 33848 // match: (ORL sh:(SHLLconst [16] r0:(ROLWconst [8] x0:(MOVWloadidx1 [i0] {s} idx p mem))) r1:(ROLWconst [8] x1:(MOVWloadidx1 [i1] {s} idx p mem))) 33849 // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh) 33850 // result: @mergePoint(b,x0,x1) (BSWAPL <v.Type> (MOVLloadidx1 [i0] {s} p idx mem)) 33851 for { 33852 _ = v.Args[1] 33853 sh := v.Args[0] 33854 if sh.Op != OpAMD64SHLLconst { 33855 break 33856 } 33857 if sh.AuxInt != 16 { 33858 break 33859 } 33860 r0 := sh.Args[0] 33861 if r0.Op != OpAMD64ROLWconst { 33862 break 33863 } 33864 if r0.AuxInt != 8 { 33865 break 33866 } 33867 x0 := r0.Args[0] 33868 if x0.Op != OpAMD64MOVWloadidx1 { 33869 break 33870 } 33871 i0 := x0.AuxInt 33872 s := x0.Aux 33873 _ = x0.Args[2] 33874 idx := x0.Args[0] 33875 p := x0.Args[1] 33876 mem := x0.Args[2] 33877 r1 := v.Args[1] 33878 if r1.Op != OpAMD64ROLWconst { 33879 break 33880 } 33881 if r1.AuxInt != 8 { 33882 break 33883 } 33884 x1 := r1.Args[0] 33885 if x1.Op != OpAMD64MOVWloadidx1 { 33886 break 33887 } 33888 i1 := x1.AuxInt 33889 if x1.Aux != s { 33890 break 33891 } 33892 _ = x1.Args[2] 33893 if idx != x1.Args[0] { 33894 break 33895 } 33896 if p != x1.Args[1] { 33897 break 33898 } 33899 if mem != x1.Args[2] { 33900 break 33901 } 33902 if !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh)) { 33903 break 33904 } 33905 b = mergePoint(b, x0, x1) 33906 v0 := b.NewValue0(v.Pos, OpAMD64BSWAPL, v.Type) 33907 v.reset(OpCopy) 33908 v.AddArg(v0) 33909 v1 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 33910 v1.AuxInt = i0 33911 v1.Aux = s 33912 v1.AddArg(p) 33913 v1.AddArg(idx) 33914 v1.AddArg(mem) 33915 v0.AddArg(v1) 33916 return true 33917 } 33918 // match: (ORL s0:(SHLLconst [j0] x0:(MOVBloadidx1 [i0] {s} p idx mem)) or:(ORL s1:(SHLLconst [j1] x1:(MOVBloadidx1 [i1] {s} p idx mem)) y)) 33919 // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 33920 // result: @mergePoint(b,x0,x1) (ORL <v.Type> (SHLLconst <v.Type> [j1] (ROLWconst <typ.UInt16> [8] (MOVWloadidx1 [i0] {s} p idx mem))) y) 33921 for { 33922 _ = v.Args[1] 33923 s0 := v.Args[0] 33924 if s0.Op != OpAMD64SHLLconst { 33925 break 33926 } 33927 j0 := s0.AuxInt 33928 x0 := s0.Args[0] 33929 if x0.Op != OpAMD64MOVBloadidx1 { 33930 break 33931 } 33932 i0 := x0.AuxInt 33933 s := x0.Aux 33934 _ = x0.Args[2] 33935 p := x0.Args[0] 33936 idx := x0.Args[1] 33937 mem := x0.Args[2] 33938 or := v.Args[1] 33939 if or.Op != OpAMD64ORL { 33940 break 33941 } 33942 _ = or.Args[1] 33943 s1 := or.Args[0] 33944 if s1.Op != OpAMD64SHLLconst { 33945 break 33946 } 33947 j1 := s1.AuxInt 33948 x1 := s1.Args[0] 33949 if x1.Op != OpAMD64MOVBloadidx1 { 33950 break 33951 } 33952 i1 := x1.AuxInt 33953 if x1.Aux != s { 33954 break 33955 } 33956 _ = x1.Args[2] 33957 if p != x1.Args[0] { 33958 break 33959 } 33960 if idx != x1.Args[1] { 33961 break 33962 } 33963 if mem != x1.Args[2] { 33964 break 33965 } 33966 y := or.Args[1] 33967 if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 33968 break 33969 } 33970 b = mergePoint(b, x0, x1) 33971 v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type) 33972 v.reset(OpCopy) 33973 v.AddArg(v0) 33974 v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type) 33975 v1.AuxInt = j1 33976 v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, typ.UInt16) 33977 v2.AuxInt = 8 33978 v3 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 33979 v3.AuxInt = i0 33980 v3.Aux = s 33981 v3.AddArg(p) 33982 v3.AddArg(idx) 33983 v3.AddArg(mem) 33984 v2.AddArg(v3) 33985 v1.AddArg(v2) 33986 v0.AddArg(v1) 33987 v0.AddArg(y) 33988 return true 33989 } 33990 // match: (ORL s0:(SHLLconst [j0] x0:(MOVBloadidx1 [i0] {s} idx p mem)) or:(ORL s1:(SHLLconst [j1] x1:(MOVBloadidx1 [i1] {s} p idx mem)) y)) 33991 // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 33992 // result: @mergePoint(b,x0,x1) (ORL <v.Type> (SHLLconst <v.Type> [j1] (ROLWconst <typ.UInt16> [8] (MOVWloadidx1 [i0] {s} p idx mem))) y) 33993 for { 33994 _ = v.Args[1] 33995 s0 := v.Args[0] 33996 if s0.Op != OpAMD64SHLLconst { 33997 break 33998 } 33999 j0 := s0.AuxInt 34000 x0 := s0.Args[0] 34001 if x0.Op != OpAMD64MOVBloadidx1 { 34002 break 34003 } 34004 i0 := x0.AuxInt 34005 s := x0.Aux 34006 _ = x0.Args[2] 34007 idx := x0.Args[0] 34008 p := x0.Args[1] 34009 mem := x0.Args[2] 34010 or := v.Args[1] 34011 if or.Op != OpAMD64ORL { 34012 break 34013 } 34014 _ = or.Args[1] 34015 s1 := or.Args[0] 34016 if s1.Op != OpAMD64SHLLconst { 34017 break 34018 } 34019 j1 := s1.AuxInt 34020 x1 := s1.Args[0] 34021 if x1.Op != OpAMD64MOVBloadidx1 { 34022 break 34023 } 34024 i1 := x1.AuxInt 34025 if x1.Aux != s { 34026 break 34027 } 34028 _ = x1.Args[2] 34029 if p != x1.Args[0] { 34030 break 34031 } 34032 if idx != x1.Args[1] { 34033 break 34034 } 34035 if mem != x1.Args[2] { 34036 break 34037 } 34038 y := or.Args[1] 34039 if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 34040 break 34041 } 34042 b = mergePoint(b, x0, x1) 34043 v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type) 34044 v.reset(OpCopy) 34045 v.AddArg(v0) 34046 v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type) 34047 v1.AuxInt = j1 34048 v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, typ.UInt16) 34049 v2.AuxInt = 8 34050 v3 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 34051 v3.AuxInt = i0 34052 v3.Aux = s 34053 v3.AddArg(p) 34054 v3.AddArg(idx) 34055 v3.AddArg(mem) 34056 v2.AddArg(v3) 34057 v1.AddArg(v2) 34058 v0.AddArg(v1) 34059 v0.AddArg(y) 34060 return true 34061 } 34062 // match: (ORL s0:(SHLLconst [j0] x0:(MOVBloadidx1 [i0] {s} p idx mem)) or:(ORL s1:(SHLLconst [j1] x1:(MOVBloadidx1 [i1] {s} idx p mem)) y)) 34063 // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 34064 // result: @mergePoint(b,x0,x1) (ORL <v.Type> (SHLLconst <v.Type> [j1] (ROLWconst <typ.UInt16> [8] (MOVWloadidx1 [i0] {s} p idx mem))) y) 34065 for { 34066 _ = v.Args[1] 34067 s0 := v.Args[0] 34068 if s0.Op != OpAMD64SHLLconst { 34069 break 34070 } 34071 j0 := s0.AuxInt 34072 x0 := s0.Args[0] 34073 if x0.Op != OpAMD64MOVBloadidx1 { 34074 break 34075 } 34076 i0 := x0.AuxInt 34077 s := x0.Aux 34078 _ = x0.Args[2] 34079 p := x0.Args[0] 34080 idx := x0.Args[1] 34081 mem := x0.Args[2] 34082 or := v.Args[1] 34083 if or.Op != OpAMD64ORL { 34084 break 34085 } 34086 _ = or.Args[1] 34087 s1 := or.Args[0] 34088 if s1.Op != OpAMD64SHLLconst { 34089 break 34090 } 34091 j1 := s1.AuxInt 34092 x1 := s1.Args[0] 34093 if x1.Op != OpAMD64MOVBloadidx1 { 34094 break 34095 } 34096 i1 := x1.AuxInt 34097 if x1.Aux != s { 34098 break 34099 } 34100 _ = x1.Args[2] 34101 if idx != x1.Args[0] { 34102 break 34103 } 34104 if p != x1.Args[1] { 34105 break 34106 } 34107 if mem != x1.Args[2] { 34108 break 34109 } 34110 y := or.Args[1] 34111 if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 34112 break 34113 } 34114 b = mergePoint(b, x0, x1) 34115 v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type) 34116 v.reset(OpCopy) 34117 v.AddArg(v0) 34118 v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type) 34119 v1.AuxInt = j1 34120 v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, typ.UInt16) 34121 v2.AuxInt = 8 34122 v3 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 34123 v3.AuxInt = i0 34124 v3.Aux = s 34125 v3.AddArg(p) 34126 v3.AddArg(idx) 34127 v3.AddArg(mem) 34128 v2.AddArg(v3) 34129 v1.AddArg(v2) 34130 v0.AddArg(v1) 34131 v0.AddArg(y) 34132 return true 34133 } 34134 return false 34135 } 34136 func rewriteValueAMD64_OpAMD64ORL_120(v *Value) bool { 34137 b := v.Block 34138 _ = b 34139 typ := &b.Func.Config.Types 34140 _ = typ 34141 // match: (ORL s0:(SHLLconst [j0] x0:(MOVBloadidx1 [i0] {s} idx p mem)) or:(ORL s1:(SHLLconst [j1] x1:(MOVBloadidx1 [i1] {s} idx p mem)) y)) 34142 // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 34143 // result: @mergePoint(b,x0,x1) (ORL <v.Type> (SHLLconst <v.Type> [j1] (ROLWconst <typ.UInt16> [8] (MOVWloadidx1 [i0] {s} p idx mem))) y) 34144 for { 34145 _ = v.Args[1] 34146 s0 := v.Args[0] 34147 if s0.Op != OpAMD64SHLLconst { 34148 break 34149 } 34150 j0 := s0.AuxInt 34151 x0 := s0.Args[0] 34152 if x0.Op != OpAMD64MOVBloadidx1 { 34153 break 34154 } 34155 i0 := x0.AuxInt 34156 s := x0.Aux 34157 _ = x0.Args[2] 34158 idx := x0.Args[0] 34159 p := x0.Args[1] 34160 mem := x0.Args[2] 34161 or := v.Args[1] 34162 if or.Op != OpAMD64ORL { 34163 break 34164 } 34165 _ = or.Args[1] 34166 s1 := or.Args[0] 34167 if s1.Op != OpAMD64SHLLconst { 34168 break 34169 } 34170 j1 := s1.AuxInt 34171 x1 := s1.Args[0] 34172 if x1.Op != OpAMD64MOVBloadidx1 { 34173 break 34174 } 34175 i1 := x1.AuxInt 34176 if x1.Aux != s { 34177 break 34178 } 34179 _ = x1.Args[2] 34180 if idx != x1.Args[0] { 34181 break 34182 } 34183 if p != x1.Args[1] { 34184 break 34185 } 34186 if mem != x1.Args[2] { 34187 break 34188 } 34189 y := or.Args[1] 34190 if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 34191 break 34192 } 34193 b = mergePoint(b, x0, x1) 34194 v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type) 34195 v.reset(OpCopy) 34196 v.AddArg(v0) 34197 v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type) 34198 v1.AuxInt = j1 34199 v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, typ.UInt16) 34200 v2.AuxInt = 8 34201 v3 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 34202 v3.AuxInt = i0 34203 v3.Aux = s 34204 v3.AddArg(p) 34205 v3.AddArg(idx) 34206 v3.AddArg(mem) 34207 v2.AddArg(v3) 34208 v1.AddArg(v2) 34209 v0.AddArg(v1) 34210 v0.AddArg(y) 34211 return true 34212 } 34213 // match: (ORL s0:(SHLLconst [j0] x0:(MOVBloadidx1 [i0] {s} p idx mem)) or:(ORL y s1:(SHLLconst [j1] x1:(MOVBloadidx1 [i1] {s} p idx mem)))) 34214 // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 34215 // result: @mergePoint(b,x0,x1) (ORL <v.Type> (SHLLconst <v.Type> [j1] (ROLWconst <typ.UInt16> [8] (MOVWloadidx1 [i0] {s} p idx mem))) y) 34216 for { 34217 _ = v.Args[1] 34218 s0 := v.Args[0] 34219 if s0.Op != OpAMD64SHLLconst { 34220 break 34221 } 34222 j0 := s0.AuxInt 34223 x0 := s0.Args[0] 34224 if x0.Op != OpAMD64MOVBloadidx1 { 34225 break 34226 } 34227 i0 := x0.AuxInt 34228 s := x0.Aux 34229 _ = x0.Args[2] 34230 p := x0.Args[0] 34231 idx := x0.Args[1] 34232 mem := x0.Args[2] 34233 or := v.Args[1] 34234 if or.Op != OpAMD64ORL { 34235 break 34236 } 34237 _ = or.Args[1] 34238 y := or.Args[0] 34239 s1 := or.Args[1] 34240 if s1.Op != OpAMD64SHLLconst { 34241 break 34242 } 34243 j1 := s1.AuxInt 34244 x1 := s1.Args[0] 34245 if x1.Op != OpAMD64MOVBloadidx1 { 34246 break 34247 } 34248 i1 := x1.AuxInt 34249 if x1.Aux != s { 34250 break 34251 } 34252 _ = x1.Args[2] 34253 if p != x1.Args[0] { 34254 break 34255 } 34256 if idx != x1.Args[1] { 34257 break 34258 } 34259 if mem != x1.Args[2] { 34260 break 34261 } 34262 if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 34263 break 34264 } 34265 b = mergePoint(b, x0, x1) 34266 v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type) 34267 v.reset(OpCopy) 34268 v.AddArg(v0) 34269 v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type) 34270 v1.AuxInt = j1 34271 v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, typ.UInt16) 34272 v2.AuxInt = 8 34273 v3 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 34274 v3.AuxInt = i0 34275 v3.Aux = s 34276 v3.AddArg(p) 34277 v3.AddArg(idx) 34278 v3.AddArg(mem) 34279 v2.AddArg(v3) 34280 v1.AddArg(v2) 34281 v0.AddArg(v1) 34282 v0.AddArg(y) 34283 return true 34284 } 34285 // match: (ORL s0:(SHLLconst [j0] x0:(MOVBloadidx1 [i0] {s} idx p mem)) or:(ORL y s1:(SHLLconst [j1] x1:(MOVBloadidx1 [i1] {s} p idx mem)))) 34286 // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 34287 // result: @mergePoint(b,x0,x1) (ORL <v.Type> (SHLLconst <v.Type> [j1] (ROLWconst <typ.UInt16> [8] (MOVWloadidx1 [i0] {s} p idx mem))) y) 34288 for { 34289 _ = v.Args[1] 34290 s0 := v.Args[0] 34291 if s0.Op != OpAMD64SHLLconst { 34292 break 34293 } 34294 j0 := s0.AuxInt 34295 x0 := s0.Args[0] 34296 if x0.Op != OpAMD64MOVBloadidx1 { 34297 break 34298 } 34299 i0 := x0.AuxInt 34300 s := x0.Aux 34301 _ = x0.Args[2] 34302 idx := x0.Args[0] 34303 p := x0.Args[1] 34304 mem := x0.Args[2] 34305 or := v.Args[1] 34306 if or.Op != OpAMD64ORL { 34307 break 34308 } 34309 _ = or.Args[1] 34310 y := or.Args[0] 34311 s1 := or.Args[1] 34312 if s1.Op != OpAMD64SHLLconst { 34313 break 34314 } 34315 j1 := s1.AuxInt 34316 x1 := s1.Args[0] 34317 if x1.Op != OpAMD64MOVBloadidx1 { 34318 break 34319 } 34320 i1 := x1.AuxInt 34321 if x1.Aux != s { 34322 break 34323 } 34324 _ = x1.Args[2] 34325 if p != x1.Args[0] { 34326 break 34327 } 34328 if idx != x1.Args[1] { 34329 break 34330 } 34331 if mem != x1.Args[2] { 34332 break 34333 } 34334 if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 34335 break 34336 } 34337 b = mergePoint(b, x0, x1) 34338 v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type) 34339 v.reset(OpCopy) 34340 v.AddArg(v0) 34341 v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type) 34342 v1.AuxInt = j1 34343 v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, typ.UInt16) 34344 v2.AuxInt = 8 34345 v3 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 34346 v3.AuxInt = i0 34347 v3.Aux = s 34348 v3.AddArg(p) 34349 v3.AddArg(idx) 34350 v3.AddArg(mem) 34351 v2.AddArg(v3) 34352 v1.AddArg(v2) 34353 v0.AddArg(v1) 34354 v0.AddArg(y) 34355 return true 34356 } 34357 // match: (ORL s0:(SHLLconst [j0] x0:(MOVBloadidx1 [i0] {s} p idx mem)) or:(ORL y s1:(SHLLconst [j1] x1:(MOVBloadidx1 [i1] {s} idx p mem)))) 34358 // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 34359 // result: @mergePoint(b,x0,x1) (ORL <v.Type> (SHLLconst <v.Type> [j1] (ROLWconst <typ.UInt16> [8] (MOVWloadidx1 [i0] {s} p idx mem))) y) 34360 for { 34361 _ = v.Args[1] 34362 s0 := v.Args[0] 34363 if s0.Op != OpAMD64SHLLconst { 34364 break 34365 } 34366 j0 := s0.AuxInt 34367 x0 := s0.Args[0] 34368 if x0.Op != OpAMD64MOVBloadidx1 { 34369 break 34370 } 34371 i0 := x0.AuxInt 34372 s := x0.Aux 34373 _ = x0.Args[2] 34374 p := x0.Args[0] 34375 idx := x0.Args[1] 34376 mem := x0.Args[2] 34377 or := v.Args[1] 34378 if or.Op != OpAMD64ORL { 34379 break 34380 } 34381 _ = or.Args[1] 34382 y := or.Args[0] 34383 s1 := or.Args[1] 34384 if s1.Op != OpAMD64SHLLconst { 34385 break 34386 } 34387 j1 := s1.AuxInt 34388 x1 := s1.Args[0] 34389 if x1.Op != OpAMD64MOVBloadidx1 { 34390 break 34391 } 34392 i1 := x1.AuxInt 34393 if x1.Aux != s { 34394 break 34395 } 34396 _ = x1.Args[2] 34397 if idx != x1.Args[0] { 34398 break 34399 } 34400 if p != x1.Args[1] { 34401 break 34402 } 34403 if mem != x1.Args[2] { 34404 break 34405 } 34406 if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 34407 break 34408 } 34409 b = mergePoint(b, x0, x1) 34410 v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type) 34411 v.reset(OpCopy) 34412 v.AddArg(v0) 34413 v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type) 34414 v1.AuxInt = j1 34415 v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, typ.UInt16) 34416 v2.AuxInt = 8 34417 v3 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 34418 v3.AuxInt = i0 34419 v3.Aux = s 34420 v3.AddArg(p) 34421 v3.AddArg(idx) 34422 v3.AddArg(mem) 34423 v2.AddArg(v3) 34424 v1.AddArg(v2) 34425 v0.AddArg(v1) 34426 v0.AddArg(y) 34427 return true 34428 } 34429 // match: (ORL s0:(SHLLconst [j0] x0:(MOVBloadidx1 [i0] {s} idx p mem)) or:(ORL y s1:(SHLLconst [j1] x1:(MOVBloadidx1 [i1] {s} idx p mem)))) 34430 // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 34431 // result: @mergePoint(b,x0,x1) (ORL <v.Type> (SHLLconst <v.Type> [j1] (ROLWconst <typ.UInt16> [8] (MOVWloadidx1 [i0] {s} p idx mem))) y) 34432 for { 34433 _ = v.Args[1] 34434 s0 := v.Args[0] 34435 if s0.Op != OpAMD64SHLLconst { 34436 break 34437 } 34438 j0 := s0.AuxInt 34439 x0 := s0.Args[0] 34440 if x0.Op != OpAMD64MOVBloadidx1 { 34441 break 34442 } 34443 i0 := x0.AuxInt 34444 s := x0.Aux 34445 _ = x0.Args[2] 34446 idx := x0.Args[0] 34447 p := x0.Args[1] 34448 mem := x0.Args[2] 34449 or := v.Args[1] 34450 if or.Op != OpAMD64ORL { 34451 break 34452 } 34453 _ = or.Args[1] 34454 y := or.Args[0] 34455 s1 := or.Args[1] 34456 if s1.Op != OpAMD64SHLLconst { 34457 break 34458 } 34459 j1 := s1.AuxInt 34460 x1 := s1.Args[0] 34461 if x1.Op != OpAMD64MOVBloadidx1 { 34462 break 34463 } 34464 i1 := x1.AuxInt 34465 if x1.Aux != s { 34466 break 34467 } 34468 _ = x1.Args[2] 34469 if idx != x1.Args[0] { 34470 break 34471 } 34472 if p != x1.Args[1] { 34473 break 34474 } 34475 if mem != x1.Args[2] { 34476 break 34477 } 34478 if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 34479 break 34480 } 34481 b = mergePoint(b, x0, x1) 34482 v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type) 34483 v.reset(OpCopy) 34484 v.AddArg(v0) 34485 v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type) 34486 v1.AuxInt = j1 34487 v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, typ.UInt16) 34488 v2.AuxInt = 8 34489 v3 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 34490 v3.AuxInt = i0 34491 v3.Aux = s 34492 v3.AddArg(p) 34493 v3.AddArg(idx) 34494 v3.AddArg(mem) 34495 v2.AddArg(v3) 34496 v1.AddArg(v2) 34497 v0.AddArg(v1) 34498 v0.AddArg(y) 34499 return true 34500 } 34501 // match: (ORL or:(ORL s1:(SHLLconst [j1] x1:(MOVBloadidx1 [i1] {s} p idx mem)) y) s0:(SHLLconst [j0] x0:(MOVBloadidx1 [i0] {s} p idx mem))) 34502 // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 34503 // result: @mergePoint(b,x0,x1) (ORL <v.Type> (SHLLconst <v.Type> [j1] (ROLWconst <typ.UInt16> [8] (MOVWloadidx1 [i0] {s} p idx mem))) y) 34504 for { 34505 _ = v.Args[1] 34506 or := v.Args[0] 34507 if or.Op != OpAMD64ORL { 34508 break 34509 } 34510 _ = or.Args[1] 34511 s1 := or.Args[0] 34512 if s1.Op != OpAMD64SHLLconst { 34513 break 34514 } 34515 j1 := s1.AuxInt 34516 x1 := s1.Args[0] 34517 if x1.Op != OpAMD64MOVBloadidx1 { 34518 break 34519 } 34520 i1 := x1.AuxInt 34521 s := x1.Aux 34522 _ = x1.Args[2] 34523 p := x1.Args[0] 34524 idx := x1.Args[1] 34525 mem := x1.Args[2] 34526 y := or.Args[1] 34527 s0 := v.Args[1] 34528 if s0.Op != OpAMD64SHLLconst { 34529 break 34530 } 34531 j0 := s0.AuxInt 34532 x0 := s0.Args[0] 34533 if x0.Op != OpAMD64MOVBloadidx1 { 34534 break 34535 } 34536 i0 := x0.AuxInt 34537 if x0.Aux != s { 34538 break 34539 } 34540 _ = x0.Args[2] 34541 if p != x0.Args[0] { 34542 break 34543 } 34544 if idx != x0.Args[1] { 34545 break 34546 } 34547 if mem != x0.Args[2] { 34548 break 34549 } 34550 if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 34551 break 34552 } 34553 b = mergePoint(b, x0, x1) 34554 v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type) 34555 v.reset(OpCopy) 34556 v.AddArg(v0) 34557 v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type) 34558 v1.AuxInt = j1 34559 v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, typ.UInt16) 34560 v2.AuxInt = 8 34561 v3 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 34562 v3.AuxInt = i0 34563 v3.Aux = s 34564 v3.AddArg(p) 34565 v3.AddArg(idx) 34566 v3.AddArg(mem) 34567 v2.AddArg(v3) 34568 v1.AddArg(v2) 34569 v0.AddArg(v1) 34570 v0.AddArg(y) 34571 return true 34572 } 34573 // match: (ORL or:(ORL s1:(SHLLconst [j1] x1:(MOVBloadidx1 [i1] {s} idx p mem)) y) s0:(SHLLconst [j0] x0:(MOVBloadidx1 [i0] {s} p idx mem))) 34574 // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 34575 // result: @mergePoint(b,x0,x1) (ORL <v.Type> (SHLLconst <v.Type> [j1] (ROLWconst <typ.UInt16> [8] (MOVWloadidx1 [i0] {s} p idx mem))) y) 34576 for { 34577 _ = v.Args[1] 34578 or := v.Args[0] 34579 if or.Op != OpAMD64ORL { 34580 break 34581 } 34582 _ = or.Args[1] 34583 s1 := or.Args[0] 34584 if s1.Op != OpAMD64SHLLconst { 34585 break 34586 } 34587 j1 := s1.AuxInt 34588 x1 := s1.Args[0] 34589 if x1.Op != OpAMD64MOVBloadidx1 { 34590 break 34591 } 34592 i1 := x1.AuxInt 34593 s := x1.Aux 34594 _ = x1.Args[2] 34595 idx := x1.Args[0] 34596 p := x1.Args[1] 34597 mem := x1.Args[2] 34598 y := or.Args[1] 34599 s0 := v.Args[1] 34600 if s0.Op != OpAMD64SHLLconst { 34601 break 34602 } 34603 j0 := s0.AuxInt 34604 x0 := s0.Args[0] 34605 if x0.Op != OpAMD64MOVBloadidx1 { 34606 break 34607 } 34608 i0 := x0.AuxInt 34609 if x0.Aux != s { 34610 break 34611 } 34612 _ = x0.Args[2] 34613 if p != x0.Args[0] { 34614 break 34615 } 34616 if idx != x0.Args[1] { 34617 break 34618 } 34619 if mem != x0.Args[2] { 34620 break 34621 } 34622 if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 34623 break 34624 } 34625 b = mergePoint(b, x0, x1) 34626 v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type) 34627 v.reset(OpCopy) 34628 v.AddArg(v0) 34629 v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type) 34630 v1.AuxInt = j1 34631 v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, typ.UInt16) 34632 v2.AuxInt = 8 34633 v3 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 34634 v3.AuxInt = i0 34635 v3.Aux = s 34636 v3.AddArg(p) 34637 v3.AddArg(idx) 34638 v3.AddArg(mem) 34639 v2.AddArg(v3) 34640 v1.AddArg(v2) 34641 v0.AddArg(v1) 34642 v0.AddArg(y) 34643 return true 34644 } 34645 // match: (ORL or:(ORL y s1:(SHLLconst [j1] x1:(MOVBloadidx1 [i1] {s} p idx mem))) s0:(SHLLconst [j0] x0:(MOVBloadidx1 [i0] {s} p idx mem))) 34646 // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 34647 // result: @mergePoint(b,x0,x1) (ORL <v.Type> (SHLLconst <v.Type> [j1] (ROLWconst <typ.UInt16> [8] (MOVWloadidx1 [i0] {s} p idx mem))) y) 34648 for { 34649 _ = v.Args[1] 34650 or := v.Args[0] 34651 if or.Op != OpAMD64ORL { 34652 break 34653 } 34654 _ = or.Args[1] 34655 y := or.Args[0] 34656 s1 := or.Args[1] 34657 if s1.Op != OpAMD64SHLLconst { 34658 break 34659 } 34660 j1 := s1.AuxInt 34661 x1 := s1.Args[0] 34662 if x1.Op != OpAMD64MOVBloadidx1 { 34663 break 34664 } 34665 i1 := x1.AuxInt 34666 s := x1.Aux 34667 _ = x1.Args[2] 34668 p := x1.Args[0] 34669 idx := x1.Args[1] 34670 mem := x1.Args[2] 34671 s0 := v.Args[1] 34672 if s0.Op != OpAMD64SHLLconst { 34673 break 34674 } 34675 j0 := s0.AuxInt 34676 x0 := s0.Args[0] 34677 if x0.Op != OpAMD64MOVBloadidx1 { 34678 break 34679 } 34680 i0 := x0.AuxInt 34681 if x0.Aux != s { 34682 break 34683 } 34684 _ = x0.Args[2] 34685 if p != x0.Args[0] { 34686 break 34687 } 34688 if idx != x0.Args[1] { 34689 break 34690 } 34691 if mem != x0.Args[2] { 34692 break 34693 } 34694 if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 34695 break 34696 } 34697 b = mergePoint(b, x0, x1) 34698 v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type) 34699 v.reset(OpCopy) 34700 v.AddArg(v0) 34701 v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type) 34702 v1.AuxInt = j1 34703 v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, typ.UInt16) 34704 v2.AuxInt = 8 34705 v3 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 34706 v3.AuxInt = i0 34707 v3.Aux = s 34708 v3.AddArg(p) 34709 v3.AddArg(idx) 34710 v3.AddArg(mem) 34711 v2.AddArg(v3) 34712 v1.AddArg(v2) 34713 v0.AddArg(v1) 34714 v0.AddArg(y) 34715 return true 34716 } 34717 // match: (ORL or:(ORL y s1:(SHLLconst [j1] x1:(MOVBloadidx1 [i1] {s} idx p mem))) s0:(SHLLconst [j0] x0:(MOVBloadidx1 [i0] {s} p idx mem))) 34718 // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 34719 // result: @mergePoint(b,x0,x1) (ORL <v.Type> (SHLLconst <v.Type> [j1] (ROLWconst <typ.UInt16> [8] (MOVWloadidx1 [i0] {s} p idx mem))) y) 34720 for { 34721 _ = v.Args[1] 34722 or := v.Args[0] 34723 if or.Op != OpAMD64ORL { 34724 break 34725 } 34726 _ = or.Args[1] 34727 y := or.Args[0] 34728 s1 := or.Args[1] 34729 if s1.Op != OpAMD64SHLLconst { 34730 break 34731 } 34732 j1 := s1.AuxInt 34733 x1 := s1.Args[0] 34734 if x1.Op != OpAMD64MOVBloadidx1 { 34735 break 34736 } 34737 i1 := x1.AuxInt 34738 s := x1.Aux 34739 _ = x1.Args[2] 34740 idx := x1.Args[0] 34741 p := x1.Args[1] 34742 mem := x1.Args[2] 34743 s0 := v.Args[1] 34744 if s0.Op != OpAMD64SHLLconst { 34745 break 34746 } 34747 j0 := s0.AuxInt 34748 x0 := s0.Args[0] 34749 if x0.Op != OpAMD64MOVBloadidx1 { 34750 break 34751 } 34752 i0 := x0.AuxInt 34753 if x0.Aux != s { 34754 break 34755 } 34756 _ = x0.Args[2] 34757 if p != x0.Args[0] { 34758 break 34759 } 34760 if idx != x0.Args[1] { 34761 break 34762 } 34763 if mem != x0.Args[2] { 34764 break 34765 } 34766 if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 34767 break 34768 } 34769 b = mergePoint(b, x0, x1) 34770 v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type) 34771 v.reset(OpCopy) 34772 v.AddArg(v0) 34773 v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type) 34774 v1.AuxInt = j1 34775 v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, typ.UInt16) 34776 v2.AuxInt = 8 34777 v3 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 34778 v3.AuxInt = i0 34779 v3.Aux = s 34780 v3.AddArg(p) 34781 v3.AddArg(idx) 34782 v3.AddArg(mem) 34783 v2.AddArg(v3) 34784 v1.AddArg(v2) 34785 v0.AddArg(v1) 34786 v0.AddArg(y) 34787 return true 34788 } 34789 // match: (ORL or:(ORL s1:(SHLLconst [j1] x1:(MOVBloadidx1 [i1] {s} p idx mem)) y) s0:(SHLLconst [j0] x0:(MOVBloadidx1 [i0] {s} idx p mem))) 34790 // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 34791 // result: @mergePoint(b,x0,x1) (ORL <v.Type> (SHLLconst <v.Type> [j1] (ROLWconst <typ.UInt16> [8] (MOVWloadidx1 [i0] {s} p idx mem))) y) 34792 for { 34793 _ = v.Args[1] 34794 or := v.Args[0] 34795 if or.Op != OpAMD64ORL { 34796 break 34797 } 34798 _ = or.Args[1] 34799 s1 := or.Args[0] 34800 if s1.Op != OpAMD64SHLLconst { 34801 break 34802 } 34803 j1 := s1.AuxInt 34804 x1 := s1.Args[0] 34805 if x1.Op != OpAMD64MOVBloadidx1 { 34806 break 34807 } 34808 i1 := x1.AuxInt 34809 s := x1.Aux 34810 _ = x1.Args[2] 34811 p := x1.Args[0] 34812 idx := x1.Args[1] 34813 mem := x1.Args[2] 34814 y := or.Args[1] 34815 s0 := v.Args[1] 34816 if s0.Op != OpAMD64SHLLconst { 34817 break 34818 } 34819 j0 := s0.AuxInt 34820 x0 := s0.Args[0] 34821 if x0.Op != OpAMD64MOVBloadidx1 { 34822 break 34823 } 34824 i0 := x0.AuxInt 34825 if x0.Aux != s { 34826 break 34827 } 34828 _ = x0.Args[2] 34829 if idx != x0.Args[0] { 34830 break 34831 } 34832 if p != x0.Args[1] { 34833 break 34834 } 34835 if mem != x0.Args[2] { 34836 break 34837 } 34838 if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 34839 break 34840 } 34841 b = mergePoint(b, x0, x1) 34842 v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type) 34843 v.reset(OpCopy) 34844 v.AddArg(v0) 34845 v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type) 34846 v1.AuxInt = j1 34847 v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, typ.UInt16) 34848 v2.AuxInt = 8 34849 v3 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 34850 v3.AuxInt = i0 34851 v3.Aux = s 34852 v3.AddArg(p) 34853 v3.AddArg(idx) 34854 v3.AddArg(mem) 34855 v2.AddArg(v3) 34856 v1.AddArg(v2) 34857 v0.AddArg(v1) 34858 v0.AddArg(y) 34859 return true 34860 } 34861 return false 34862 } 34863 func rewriteValueAMD64_OpAMD64ORL_130(v *Value) bool { 34864 b := v.Block 34865 _ = b 34866 typ := &b.Func.Config.Types 34867 _ = typ 34868 // match: (ORL or:(ORL s1:(SHLLconst [j1] x1:(MOVBloadidx1 [i1] {s} idx p mem)) y) s0:(SHLLconst [j0] x0:(MOVBloadidx1 [i0] {s} idx p mem))) 34869 // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 34870 // result: @mergePoint(b,x0,x1) (ORL <v.Type> (SHLLconst <v.Type> [j1] (ROLWconst <typ.UInt16> [8] (MOVWloadidx1 [i0] {s} p idx mem))) y) 34871 for { 34872 _ = v.Args[1] 34873 or := v.Args[0] 34874 if or.Op != OpAMD64ORL { 34875 break 34876 } 34877 _ = or.Args[1] 34878 s1 := or.Args[0] 34879 if s1.Op != OpAMD64SHLLconst { 34880 break 34881 } 34882 j1 := s1.AuxInt 34883 x1 := s1.Args[0] 34884 if x1.Op != OpAMD64MOVBloadidx1 { 34885 break 34886 } 34887 i1 := x1.AuxInt 34888 s := x1.Aux 34889 _ = x1.Args[2] 34890 idx := x1.Args[0] 34891 p := x1.Args[1] 34892 mem := x1.Args[2] 34893 y := or.Args[1] 34894 s0 := v.Args[1] 34895 if s0.Op != OpAMD64SHLLconst { 34896 break 34897 } 34898 j0 := s0.AuxInt 34899 x0 := s0.Args[0] 34900 if x0.Op != OpAMD64MOVBloadidx1 { 34901 break 34902 } 34903 i0 := x0.AuxInt 34904 if x0.Aux != s { 34905 break 34906 } 34907 _ = x0.Args[2] 34908 if idx != x0.Args[0] { 34909 break 34910 } 34911 if p != x0.Args[1] { 34912 break 34913 } 34914 if mem != x0.Args[2] { 34915 break 34916 } 34917 if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 34918 break 34919 } 34920 b = mergePoint(b, x0, x1) 34921 v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type) 34922 v.reset(OpCopy) 34923 v.AddArg(v0) 34924 v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type) 34925 v1.AuxInt = j1 34926 v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, typ.UInt16) 34927 v2.AuxInt = 8 34928 v3 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 34929 v3.AuxInt = i0 34930 v3.Aux = s 34931 v3.AddArg(p) 34932 v3.AddArg(idx) 34933 v3.AddArg(mem) 34934 v2.AddArg(v3) 34935 v1.AddArg(v2) 34936 v0.AddArg(v1) 34937 v0.AddArg(y) 34938 return true 34939 } 34940 // match: (ORL or:(ORL y s1:(SHLLconst [j1] x1:(MOVBloadidx1 [i1] {s} p idx mem))) s0:(SHLLconst [j0] x0:(MOVBloadidx1 [i0] {s} idx p mem))) 34941 // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 34942 // result: @mergePoint(b,x0,x1) (ORL <v.Type> (SHLLconst <v.Type> [j1] (ROLWconst <typ.UInt16> [8] (MOVWloadidx1 [i0] {s} p idx mem))) y) 34943 for { 34944 _ = v.Args[1] 34945 or := v.Args[0] 34946 if or.Op != OpAMD64ORL { 34947 break 34948 } 34949 _ = or.Args[1] 34950 y := or.Args[0] 34951 s1 := or.Args[1] 34952 if s1.Op != OpAMD64SHLLconst { 34953 break 34954 } 34955 j1 := s1.AuxInt 34956 x1 := s1.Args[0] 34957 if x1.Op != OpAMD64MOVBloadidx1 { 34958 break 34959 } 34960 i1 := x1.AuxInt 34961 s := x1.Aux 34962 _ = x1.Args[2] 34963 p := x1.Args[0] 34964 idx := x1.Args[1] 34965 mem := x1.Args[2] 34966 s0 := v.Args[1] 34967 if s0.Op != OpAMD64SHLLconst { 34968 break 34969 } 34970 j0 := s0.AuxInt 34971 x0 := s0.Args[0] 34972 if x0.Op != OpAMD64MOVBloadidx1 { 34973 break 34974 } 34975 i0 := x0.AuxInt 34976 if x0.Aux != s { 34977 break 34978 } 34979 _ = x0.Args[2] 34980 if idx != x0.Args[0] { 34981 break 34982 } 34983 if p != x0.Args[1] { 34984 break 34985 } 34986 if mem != x0.Args[2] { 34987 break 34988 } 34989 if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 34990 break 34991 } 34992 b = mergePoint(b, x0, x1) 34993 v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type) 34994 v.reset(OpCopy) 34995 v.AddArg(v0) 34996 v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type) 34997 v1.AuxInt = j1 34998 v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, typ.UInt16) 34999 v2.AuxInt = 8 35000 v3 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 35001 v3.AuxInt = i0 35002 v3.Aux = s 35003 v3.AddArg(p) 35004 v3.AddArg(idx) 35005 v3.AddArg(mem) 35006 v2.AddArg(v3) 35007 v1.AddArg(v2) 35008 v0.AddArg(v1) 35009 v0.AddArg(y) 35010 return true 35011 } 35012 // match: (ORL or:(ORL y s1:(SHLLconst [j1] x1:(MOVBloadidx1 [i1] {s} idx p mem))) s0:(SHLLconst [j0] x0:(MOVBloadidx1 [i0] {s} idx p mem))) 35013 // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 35014 // result: @mergePoint(b,x0,x1) (ORL <v.Type> (SHLLconst <v.Type> [j1] (ROLWconst <typ.UInt16> [8] (MOVWloadidx1 [i0] {s} p idx mem))) y) 35015 for { 35016 _ = v.Args[1] 35017 or := v.Args[0] 35018 if or.Op != OpAMD64ORL { 35019 break 35020 } 35021 _ = or.Args[1] 35022 y := or.Args[0] 35023 s1 := or.Args[1] 35024 if s1.Op != OpAMD64SHLLconst { 35025 break 35026 } 35027 j1 := s1.AuxInt 35028 x1 := s1.Args[0] 35029 if x1.Op != OpAMD64MOVBloadidx1 { 35030 break 35031 } 35032 i1 := x1.AuxInt 35033 s := x1.Aux 35034 _ = x1.Args[2] 35035 idx := x1.Args[0] 35036 p := x1.Args[1] 35037 mem := x1.Args[2] 35038 s0 := v.Args[1] 35039 if s0.Op != OpAMD64SHLLconst { 35040 break 35041 } 35042 j0 := s0.AuxInt 35043 x0 := s0.Args[0] 35044 if x0.Op != OpAMD64MOVBloadidx1 { 35045 break 35046 } 35047 i0 := x0.AuxInt 35048 if x0.Aux != s { 35049 break 35050 } 35051 _ = x0.Args[2] 35052 if idx != x0.Args[0] { 35053 break 35054 } 35055 if p != x0.Args[1] { 35056 break 35057 } 35058 if mem != x0.Args[2] { 35059 break 35060 } 35061 if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 35062 break 35063 } 35064 b = mergePoint(b, x0, x1) 35065 v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type) 35066 v.reset(OpCopy) 35067 v.AddArg(v0) 35068 v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type) 35069 v1.AuxInt = j1 35070 v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, typ.UInt16) 35071 v2.AuxInt = 8 35072 v3 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 35073 v3.AuxInt = i0 35074 v3.Aux = s 35075 v3.AddArg(p) 35076 v3.AddArg(idx) 35077 v3.AddArg(mem) 35078 v2.AddArg(v3) 35079 v1.AddArg(v2) 35080 v0.AddArg(v1) 35081 v0.AddArg(y) 35082 return true 35083 } 35084 // match: (ORL x l:(MOVLload [off] {sym} ptr mem)) 35085 // cond: canMergeLoadClobber(v, l, x) && clobber(l) 35086 // result: (ORLload x [off] {sym} ptr mem) 35087 for { 35088 _ = v.Args[1] 35089 x := v.Args[0] 35090 l := v.Args[1] 35091 if l.Op != OpAMD64MOVLload { 35092 break 35093 } 35094 off := l.AuxInt 35095 sym := l.Aux 35096 _ = l.Args[1] 35097 ptr := l.Args[0] 35098 mem := l.Args[1] 35099 if !(canMergeLoadClobber(v, l, x) && clobber(l)) { 35100 break 35101 } 35102 v.reset(OpAMD64ORLload) 35103 v.AuxInt = off 35104 v.Aux = sym 35105 v.AddArg(x) 35106 v.AddArg(ptr) 35107 v.AddArg(mem) 35108 return true 35109 } 35110 // match: (ORL l:(MOVLload [off] {sym} ptr mem) x) 35111 // cond: canMergeLoadClobber(v, l, x) && clobber(l) 35112 // result: (ORLload x [off] {sym} ptr mem) 35113 for { 35114 _ = v.Args[1] 35115 l := v.Args[0] 35116 if l.Op != OpAMD64MOVLload { 35117 break 35118 } 35119 off := l.AuxInt 35120 sym := l.Aux 35121 _ = l.Args[1] 35122 ptr := l.Args[0] 35123 mem := l.Args[1] 35124 x := v.Args[1] 35125 if !(canMergeLoadClobber(v, l, x) && clobber(l)) { 35126 break 35127 } 35128 v.reset(OpAMD64ORLload) 35129 v.AuxInt = off 35130 v.Aux = sym 35131 v.AddArg(x) 35132 v.AddArg(ptr) 35133 v.AddArg(mem) 35134 return true 35135 } 35136 return false 35137 } 35138 func rewriteValueAMD64_OpAMD64ORLconst_0(v *Value) bool { 35139 b := v.Block 35140 _ = b 35141 config := b.Func.Config 35142 _ = config 35143 // match: (ORLconst [c] x) 35144 // cond: isUint32PowerOfTwo(c) && uint64(c) >= 128 && !config.nacl 35145 // result: (BTSLconst [log2uint32(c)] x) 35146 for { 35147 c := v.AuxInt 35148 x := v.Args[0] 35149 if !(isUint32PowerOfTwo(c) && uint64(c) >= 128 && !config.nacl) { 35150 break 35151 } 35152 v.reset(OpAMD64BTSLconst) 35153 v.AuxInt = log2uint32(c) 35154 v.AddArg(x) 35155 return true 35156 } 35157 // match: (ORLconst [c] (ORLconst [d] x)) 35158 // cond: 35159 // result: (ORLconst [c | d] x) 35160 for { 35161 c := v.AuxInt 35162 v_0 := v.Args[0] 35163 if v_0.Op != OpAMD64ORLconst { 35164 break 35165 } 35166 d := v_0.AuxInt 35167 x := v_0.Args[0] 35168 v.reset(OpAMD64ORLconst) 35169 v.AuxInt = c | d 35170 v.AddArg(x) 35171 return true 35172 } 35173 // match: (ORLconst [c] (BTSLconst [d] x)) 35174 // cond: 35175 // result: (ORLconst [c | 1<<uint32(d)] x) 35176 for { 35177 c := v.AuxInt 35178 v_0 := v.Args[0] 35179 if v_0.Op != OpAMD64BTSLconst { 35180 break 35181 } 35182 d := v_0.AuxInt 35183 x := v_0.Args[0] 35184 v.reset(OpAMD64ORLconst) 35185 v.AuxInt = c | 1<<uint32(d) 35186 v.AddArg(x) 35187 return true 35188 } 35189 // match: (ORLconst [c] x) 35190 // cond: int32(c)==0 35191 // result: x 35192 for { 35193 c := v.AuxInt 35194 x := v.Args[0] 35195 if !(int32(c) == 0) { 35196 break 35197 } 35198 v.reset(OpCopy) 35199 v.Type = x.Type 35200 v.AddArg(x) 35201 return true 35202 } 35203 // match: (ORLconst [c] _) 35204 // cond: int32(c)==-1 35205 // result: (MOVLconst [-1]) 35206 for { 35207 c := v.AuxInt 35208 if !(int32(c) == -1) { 35209 break 35210 } 35211 v.reset(OpAMD64MOVLconst) 35212 v.AuxInt = -1 35213 return true 35214 } 35215 // match: (ORLconst [c] (MOVLconst [d])) 35216 // cond: 35217 // result: (MOVLconst [c|d]) 35218 for { 35219 c := v.AuxInt 35220 v_0 := v.Args[0] 35221 if v_0.Op != OpAMD64MOVLconst { 35222 break 35223 } 35224 d := v_0.AuxInt 35225 v.reset(OpAMD64MOVLconst) 35226 v.AuxInt = c | d 35227 return true 35228 } 35229 return false 35230 } 35231 func rewriteValueAMD64_OpAMD64ORLconstmodify_0(v *Value) bool { 35232 // match: (ORLconstmodify [valoff1] {sym} (ADDQconst [off2] base) mem) 35233 // cond: ValAndOff(valoff1).canAdd(off2) 35234 // result: (ORLconstmodify [ValAndOff(valoff1).add(off2)] {sym} base mem) 35235 for { 35236 valoff1 := v.AuxInt 35237 sym := v.Aux 35238 _ = v.Args[1] 35239 v_0 := v.Args[0] 35240 if v_0.Op != OpAMD64ADDQconst { 35241 break 35242 } 35243 off2 := v_0.AuxInt 35244 base := v_0.Args[0] 35245 mem := v.Args[1] 35246 if !(ValAndOff(valoff1).canAdd(off2)) { 35247 break 35248 } 35249 v.reset(OpAMD64ORLconstmodify) 35250 v.AuxInt = ValAndOff(valoff1).add(off2) 35251 v.Aux = sym 35252 v.AddArg(base) 35253 v.AddArg(mem) 35254 return true 35255 } 35256 // match: (ORLconstmodify [valoff1] {sym1} (LEAQ [off2] {sym2} base) mem) 35257 // cond: ValAndOff(valoff1).canAdd(off2) && canMergeSym(sym1, sym2) 35258 // result: (ORLconstmodify [ValAndOff(valoff1).add(off2)] {mergeSym(sym1,sym2)} base mem) 35259 for { 35260 valoff1 := v.AuxInt 35261 sym1 := v.Aux 35262 _ = v.Args[1] 35263 v_0 := v.Args[0] 35264 if v_0.Op != OpAMD64LEAQ { 35265 break 35266 } 35267 off2 := v_0.AuxInt 35268 sym2 := v_0.Aux 35269 base := v_0.Args[0] 35270 mem := v.Args[1] 35271 if !(ValAndOff(valoff1).canAdd(off2) && canMergeSym(sym1, sym2)) { 35272 break 35273 } 35274 v.reset(OpAMD64ORLconstmodify) 35275 v.AuxInt = ValAndOff(valoff1).add(off2) 35276 v.Aux = mergeSym(sym1, sym2) 35277 v.AddArg(base) 35278 v.AddArg(mem) 35279 return true 35280 } 35281 return false 35282 } 35283 func rewriteValueAMD64_OpAMD64ORLload_0(v *Value) bool { 35284 b := v.Block 35285 _ = b 35286 typ := &b.Func.Config.Types 35287 _ = typ 35288 // match: (ORLload [off1] {sym} val (ADDQconst [off2] base) mem) 35289 // cond: is32Bit(off1+off2) 35290 // result: (ORLload [off1+off2] {sym} val base mem) 35291 for { 35292 off1 := v.AuxInt 35293 sym := v.Aux 35294 _ = v.Args[2] 35295 val := v.Args[0] 35296 v_1 := v.Args[1] 35297 if v_1.Op != OpAMD64ADDQconst { 35298 break 35299 } 35300 off2 := v_1.AuxInt 35301 base := v_1.Args[0] 35302 mem := v.Args[2] 35303 if !(is32Bit(off1 + off2)) { 35304 break 35305 } 35306 v.reset(OpAMD64ORLload) 35307 v.AuxInt = off1 + off2 35308 v.Aux = sym 35309 v.AddArg(val) 35310 v.AddArg(base) 35311 v.AddArg(mem) 35312 return true 35313 } 35314 // match: (ORLload [off1] {sym1} val (LEAQ [off2] {sym2} base) mem) 35315 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 35316 // result: (ORLload [off1+off2] {mergeSym(sym1,sym2)} val base mem) 35317 for { 35318 off1 := v.AuxInt 35319 sym1 := v.Aux 35320 _ = v.Args[2] 35321 val := v.Args[0] 35322 v_1 := v.Args[1] 35323 if v_1.Op != OpAMD64LEAQ { 35324 break 35325 } 35326 off2 := v_1.AuxInt 35327 sym2 := v_1.Aux 35328 base := v_1.Args[0] 35329 mem := v.Args[2] 35330 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 35331 break 35332 } 35333 v.reset(OpAMD64ORLload) 35334 v.AuxInt = off1 + off2 35335 v.Aux = mergeSym(sym1, sym2) 35336 v.AddArg(val) 35337 v.AddArg(base) 35338 v.AddArg(mem) 35339 return true 35340 } 35341 // match: (ORLload x [off] {sym} ptr (MOVSSstore [off] {sym} ptr y _)) 35342 // cond: 35343 // result: ( ORL x (MOVLf2i y)) 35344 for { 35345 off := v.AuxInt 35346 sym := v.Aux 35347 _ = v.Args[2] 35348 x := v.Args[0] 35349 ptr := v.Args[1] 35350 v_2 := v.Args[2] 35351 if v_2.Op != OpAMD64MOVSSstore { 35352 break 35353 } 35354 if v_2.AuxInt != off { 35355 break 35356 } 35357 if v_2.Aux != sym { 35358 break 35359 } 35360 _ = v_2.Args[2] 35361 if ptr != v_2.Args[0] { 35362 break 35363 } 35364 y := v_2.Args[1] 35365 v.reset(OpAMD64ORL) 35366 v.AddArg(x) 35367 v0 := b.NewValue0(v_2.Pos, OpAMD64MOVLf2i, typ.UInt32) 35368 v0.AddArg(y) 35369 v.AddArg(v0) 35370 return true 35371 } 35372 return false 35373 } 35374 func rewriteValueAMD64_OpAMD64ORLmodify_0(v *Value) bool { 35375 // match: (ORLmodify [off1] {sym} (ADDQconst [off2] base) val mem) 35376 // cond: is32Bit(off1+off2) 35377 // result: (ORLmodify [off1+off2] {sym} base val mem) 35378 for { 35379 off1 := v.AuxInt 35380 sym := v.Aux 35381 _ = v.Args[2] 35382 v_0 := v.Args[0] 35383 if v_0.Op != OpAMD64ADDQconst { 35384 break 35385 } 35386 off2 := v_0.AuxInt 35387 base := v_0.Args[0] 35388 val := v.Args[1] 35389 mem := v.Args[2] 35390 if !(is32Bit(off1 + off2)) { 35391 break 35392 } 35393 v.reset(OpAMD64ORLmodify) 35394 v.AuxInt = off1 + off2 35395 v.Aux = sym 35396 v.AddArg(base) 35397 v.AddArg(val) 35398 v.AddArg(mem) 35399 return true 35400 } 35401 // match: (ORLmodify [off1] {sym1} (LEAQ [off2] {sym2} base) val mem) 35402 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 35403 // result: (ORLmodify [off1+off2] {mergeSym(sym1,sym2)} base val mem) 35404 for { 35405 off1 := v.AuxInt 35406 sym1 := v.Aux 35407 _ = v.Args[2] 35408 v_0 := v.Args[0] 35409 if v_0.Op != OpAMD64LEAQ { 35410 break 35411 } 35412 off2 := v_0.AuxInt 35413 sym2 := v_0.Aux 35414 base := v_0.Args[0] 35415 val := v.Args[1] 35416 mem := v.Args[2] 35417 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 35418 break 35419 } 35420 v.reset(OpAMD64ORLmodify) 35421 v.AuxInt = off1 + off2 35422 v.Aux = mergeSym(sym1, sym2) 35423 v.AddArg(base) 35424 v.AddArg(val) 35425 v.AddArg(mem) 35426 return true 35427 } 35428 return false 35429 } 35430 func rewriteValueAMD64_OpAMD64ORQ_0(v *Value) bool { 35431 b := v.Block 35432 _ = b 35433 config := b.Func.Config 35434 _ = config 35435 // match: (ORQ (SHLQ (MOVQconst [1]) y) x) 35436 // cond: !config.nacl 35437 // result: (BTSQ x y) 35438 for { 35439 _ = v.Args[1] 35440 v_0 := v.Args[0] 35441 if v_0.Op != OpAMD64SHLQ { 35442 break 35443 } 35444 _ = v_0.Args[1] 35445 v_0_0 := v_0.Args[0] 35446 if v_0_0.Op != OpAMD64MOVQconst { 35447 break 35448 } 35449 if v_0_0.AuxInt != 1 { 35450 break 35451 } 35452 y := v_0.Args[1] 35453 x := v.Args[1] 35454 if !(!config.nacl) { 35455 break 35456 } 35457 v.reset(OpAMD64BTSQ) 35458 v.AddArg(x) 35459 v.AddArg(y) 35460 return true 35461 } 35462 // match: (ORQ x (SHLQ (MOVQconst [1]) y)) 35463 // cond: !config.nacl 35464 // result: (BTSQ x y) 35465 for { 35466 _ = v.Args[1] 35467 x := v.Args[0] 35468 v_1 := v.Args[1] 35469 if v_1.Op != OpAMD64SHLQ { 35470 break 35471 } 35472 _ = v_1.Args[1] 35473 v_1_0 := v_1.Args[0] 35474 if v_1_0.Op != OpAMD64MOVQconst { 35475 break 35476 } 35477 if v_1_0.AuxInt != 1 { 35478 break 35479 } 35480 y := v_1.Args[1] 35481 if !(!config.nacl) { 35482 break 35483 } 35484 v.reset(OpAMD64BTSQ) 35485 v.AddArg(x) 35486 v.AddArg(y) 35487 return true 35488 } 35489 // match: (ORQ (MOVQconst [c]) x) 35490 // cond: isUint64PowerOfTwo(c) && uint64(c) >= 128 && !config.nacl 35491 // result: (BTSQconst [log2(c)] x) 35492 for { 35493 _ = v.Args[1] 35494 v_0 := v.Args[0] 35495 if v_0.Op != OpAMD64MOVQconst { 35496 break 35497 } 35498 c := v_0.AuxInt 35499 x := v.Args[1] 35500 if !(isUint64PowerOfTwo(c) && uint64(c) >= 128 && !config.nacl) { 35501 break 35502 } 35503 v.reset(OpAMD64BTSQconst) 35504 v.AuxInt = log2(c) 35505 v.AddArg(x) 35506 return true 35507 } 35508 // match: (ORQ x (MOVQconst [c])) 35509 // cond: isUint64PowerOfTwo(c) && uint64(c) >= 128 && !config.nacl 35510 // result: (BTSQconst [log2(c)] x) 35511 for { 35512 _ = v.Args[1] 35513 x := v.Args[0] 35514 v_1 := v.Args[1] 35515 if v_1.Op != OpAMD64MOVQconst { 35516 break 35517 } 35518 c := v_1.AuxInt 35519 if !(isUint64PowerOfTwo(c) && uint64(c) >= 128 && !config.nacl) { 35520 break 35521 } 35522 v.reset(OpAMD64BTSQconst) 35523 v.AuxInt = log2(c) 35524 v.AddArg(x) 35525 return true 35526 } 35527 // match: (ORQ x (MOVQconst [c])) 35528 // cond: is32Bit(c) 35529 // result: (ORQconst [c] x) 35530 for { 35531 _ = v.Args[1] 35532 x := v.Args[0] 35533 v_1 := v.Args[1] 35534 if v_1.Op != OpAMD64MOVQconst { 35535 break 35536 } 35537 c := v_1.AuxInt 35538 if !(is32Bit(c)) { 35539 break 35540 } 35541 v.reset(OpAMD64ORQconst) 35542 v.AuxInt = c 35543 v.AddArg(x) 35544 return true 35545 } 35546 // match: (ORQ (MOVQconst [c]) x) 35547 // cond: is32Bit(c) 35548 // result: (ORQconst [c] x) 35549 for { 35550 _ = v.Args[1] 35551 v_0 := v.Args[0] 35552 if v_0.Op != OpAMD64MOVQconst { 35553 break 35554 } 35555 c := v_0.AuxInt 35556 x := v.Args[1] 35557 if !(is32Bit(c)) { 35558 break 35559 } 35560 v.reset(OpAMD64ORQconst) 35561 v.AuxInt = c 35562 v.AddArg(x) 35563 return true 35564 } 35565 // match: (ORQ (SHLQconst x [c]) (SHRQconst x [d])) 35566 // cond: d==64-c 35567 // result: (ROLQconst x [c]) 35568 for { 35569 _ = v.Args[1] 35570 v_0 := v.Args[0] 35571 if v_0.Op != OpAMD64SHLQconst { 35572 break 35573 } 35574 c := v_0.AuxInt 35575 x := v_0.Args[0] 35576 v_1 := v.Args[1] 35577 if v_1.Op != OpAMD64SHRQconst { 35578 break 35579 } 35580 d := v_1.AuxInt 35581 if x != v_1.Args[0] { 35582 break 35583 } 35584 if !(d == 64-c) { 35585 break 35586 } 35587 v.reset(OpAMD64ROLQconst) 35588 v.AuxInt = c 35589 v.AddArg(x) 35590 return true 35591 } 35592 // match: (ORQ (SHRQconst x [d]) (SHLQconst x [c])) 35593 // cond: d==64-c 35594 // result: (ROLQconst x [c]) 35595 for { 35596 _ = v.Args[1] 35597 v_0 := v.Args[0] 35598 if v_0.Op != OpAMD64SHRQconst { 35599 break 35600 } 35601 d := v_0.AuxInt 35602 x := v_0.Args[0] 35603 v_1 := v.Args[1] 35604 if v_1.Op != OpAMD64SHLQconst { 35605 break 35606 } 35607 c := v_1.AuxInt 35608 if x != v_1.Args[0] { 35609 break 35610 } 35611 if !(d == 64-c) { 35612 break 35613 } 35614 v.reset(OpAMD64ROLQconst) 35615 v.AuxInt = c 35616 v.AddArg(x) 35617 return true 35618 } 35619 // match: (ORQ (SHLQ x y) (ANDQ (SHRQ x (NEGQ y)) (SBBQcarrymask (CMPQconst (NEGQ (ADDQconst (ANDQconst y [63]) [-64])) [64])))) 35620 // cond: 35621 // result: (ROLQ x y) 35622 for { 35623 _ = v.Args[1] 35624 v_0 := v.Args[0] 35625 if v_0.Op != OpAMD64SHLQ { 35626 break 35627 } 35628 _ = v_0.Args[1] 35629 x := v_0.Args[0] 35630 y := v_0.Args[1] 35631 v_1 := v.Args[1] 35632 if v_1.Op != OpAMD64ANDQ { 35633 break 35634 } 35635 _ = v_1.Args[1] 35636 v_1_0 := v_1.Args[0] 35637 if v_1_0.Op != OpAMD64SHRQ { 35638 break 35639 } 35640 _ = v_1_0.Args[1] 35641 if x != v_1_0.Args[0] { 35642 break 35643 } 35644 v_1_0_1 := v_1_0.Args[1] 35645 if v_1_0_1.Op != OpAMD64NEGQ { 35646 break 35647 } 35648 if y != v_1_0_1.Args[0] { 35649 break 35650 } 35651 v_1_1 := v_1.Args[1] 35652 if v_1_1.Op != OpAMD64SBBQcarrymask { 35653 break 35654 } 35655 v_1_1_0 := v_1_1.Args[0] 35656 if v_1_1_0.Op != OpAMD64CMPQconst { 35657 break 35658 } 35659 if v_1_1_0.AuxInt != 64 { 35660 break 35661 } 35662 v_1_1_0_0 := v_1_1_0.Args[0] 35663 if v_1_1_0_0.Op != OpAMD64NEGQ { 35664 break 35665 } 35666 v_1_1_0_0_0 := v_1_1_0_0.Args[0] 35667 if v_1_1_0_0_0.Op != OpAMD64ADDQconst { 35668 break 35669 } 35670 if v_1_1_0_0_0.AuxInt != -64 { 35671 break 35672 } 35673 v_1_1_0_0_0_0 := v_1_1_0_0_0.Args[0] 35674 if v_1_1_0_0_0_0.Op != OpAMD64ANDQconst { 35675 break 35676 } 35677 if v_1_1_0_0_0_0.AuxInt != 63 { 35678 break 35679 } 35680 if y != v_1_1_0_0_0_0.Args[0] { 35681 break 35682 } 35683 v.reset(OpAMD64ROLQ) 35684 v.AddArg(x) 35685 v.AddArg(y) 35686 return true 35687 } 35688 // match: (ORQ (SHLQ x y) (ANDQ (SBBQcarrymask (CMPQconst (NEGQ (ADDQconst (ANDQconst y [63]) [-64])) [64])) (SHRQ x (NEGQ y)))) 35689 // cond: 35690 // result: (ROLQ x y) 35691 for { 35692 _ = v.Args[1] 35693 v_0 := v.Args[0] 35694 if v_0.Op != OpAMD64SHLQ { 35695 break 35696 } 35697 _ = v_0.Args[1] 35698 x := v_0.Args[0] 35699 y := v_0.Args[1] 35700 v_1 := v.Args[1] 35701 if v_1.Op != OpAMD64ANDQ { 35702 break 35703 } 35704 _ = v_1.Args[1] 35705 v_1_0 := v_1.Args[0] 35706 if v_1_0.Op != OpAMD64SBBQcarrymask { 35707 break 35708 } 35709 v_1_0_0 := v_1_0.Args[0] 35710 if v_1_0_0.Op != OpAMD64CMPQconst { 35711 break 35712 } 35713 if v_1_0_0.AuxInt != 64 { 35714 break 35715 } 35716 v_1_0_0_0 := v_1_0_0.Args[0] 35717 if v_1_0_0_0.Op != OpAMD64NEGQ { 35718 break 35719 } 35720 v_1_0_0_0_0 := v_1_0_0_0.Args[0] 35721 if v_1_0_0_0_0.Op != OpAMD64ADDQconst { 35722 break 35723 } 35724 if v_1_0_0_0_0.AuxInt != -64 { 35725 break 35726 } 35727 v_1_0_0_0_0_0 := v_1_0_0_0_0.Args[0] 35728 if v_1_0_0_0_0_0.Op != OpAMD64ANDQconst { 35729 break 35730 } 35731 if v_1_0_0_0_0_0.AuxInt != 63 { 35732 break 35733 } 35734 if y != v_1_0_0_0_0_0.Args[0] { 35735 break 35736 } 35737 v_1_1 := v_1.Args[1] 35738 if v_1_1.Op != OpAMD64SHRQ { 35739 break 35740 } 35741 _ = v_1_1.Args[1] 35742 if x != v_1_1.Args[0] { 35743 break 35744 } 35745 v_1_1_1 := v_1_1.Args[1] 35746 if v_1_1_1.Op != OpAMD64NEGQ { 35747 break 35748 } 35749 if y != v_1_1_1.Args[0] { 35750 break 35751 } 35752 v.reset(OpAMD64ROLQ) 35753 v.AddArg(x) 35754 v.AddArg(y) 35755 return true 35756 } 35757 return false 35758 } 35759 func rewriteValueAMD64_OpAMD64ORQ_10(v *Value) bool { 35760 // match: (ORQ (ANDQ (SHRQ x (NEGQ y)) (SBBQcarrymask (CMPQconst (NEGQ (ADDQconst (ANDQconst y [63]) [-64])) [64]))) (SHLQ x y)) 35761 // cond: 35762 // result: (ROLQ x y) 35763 for { 35764 _ = v.Args[1] 35765 v_0 := v.Args[0] 35766 if v_0.Op != OpAMD64ANDQ { 35767 break 35768 } 35769 _ = v_0.Args[1] 35770 v_0_0 := v_0.Args[0] 35771 if v_0_0.Op != OpAMD64SHRQ { 35772 break 35773 } 35774 _ = v_0_0.Args[1] 35775 x := v_0_0.Args[0] 35776 v_0_0_1 := v_0_0.Args[1] 35777 if v_0_0_1.Op != OpAMD64NEGQ { 35778 break 35779 } 35780 y := v_0_0_1.Args[0] 35781 v_0_1 := v_0.Args[1] 35782 if v_0_1.Op != OpAMD64SBBQcarrymask { 35783 break 35784 } 35785 v_0_1_0 := v_0_1.Args[0] 35786 if v_0_1_0.Op != OpAMD64CMPQconst { 35787 break 35788 } 35789 if v_0_1_0.AuxInt != 64 { 35790 break 35791 } 35792 v_0_1_0_0 := v_0_1_0.Args[0] 35793 if v_0_1_0_0.Op != OpAMD64NEGQ { 35794 break 35795 } 35796 v_0_1_0_0_0 := v_0_1_0_0.Args[0] 35797 if v_0_1_0_0_0.Op != OpAMD64ADDQconst { 35798 break 35799 } 35800 if v_0_1_0_0_0.AuxInt != -64 { 35801 break 35802 } 35803 v_0_1_0_0_0_0 := v_0_1_0_0_0.Args[0] 35804 if v_0_1_0_0_0_0.Op != OpAMD64ANDQconst { 35805 break 35806 } 35807 if v_0_1_0_0_0_0.AuxInt != 63 { 35808 break 35809 } 35810 if y != v_0_1_0_0_0_0.Args[0] { 35811 break 35812 } 35813 v_1 := v.Args[1] 35814 if v_1.Op != OpAMD64SHLQ { 35815 break 35816 } 35817 _ = v_1.Args[1] 35818 if x != v_1.Args[0] { 35819 break 35820 } 35821 if y != v_1.Args[1] { 35822 break 35823 } 35824 v.reset(OpAMD64ROLQ) 35825 v.AddArg(x) 35826 v.AddArg(y) 35827 return true 35828 } 35829 // match: (ORQ (ANDQ (SBBQcarrymask (CMPQconst (NEGQ (ADDQconst (ANDQconst y [63]) [-64])) [64])) (SHRQ x (NEGQ y))) (SHLQ x y)) 35830 // cond: 35831 // result: (ROLQ x y) 35832 for { 35833 _ = v.Args[1] 35834 v_0 := v.Args[0] 35835 if v_0.Op != OpAMD64ANDQ { 35836 break 35837 } 35838 _ = v_0.Args[1] 35839 v_0_0 := v_0.Args[0] 35840 if v_0_0.Op != OpAMD64SBBQcarrymask { 35841 break 35842 } 35843 v_0_0_0 := v_0_0.Args[0] 35844 if v_0_0_0.Op != OpAMD64CMPQconst { 35845 break 35846 } 35847 if v_0_0_0.AuxInt != 64 { 35848 break 35849 } 35850 v_0_0_0_0 := v_0_0_0.Args[0] 35851 if v_0_0_0_0.Op != OpAMD64NEGQ { 35852 break 35853 } 35854 v_0_0_0_0_0 := v_0_0_0_0.Args[0] 35855 if v_0_0_0_0_0.Op != OpAMD64ADDQconst { 35856 break 35857 } 35858 if v_0_0_0_0_0.AuxInt != -64 { 35859 break 35860 } 35861 v_0_0_0_0_0_0 := v_0_0_0_0_0.Args[0] 35862 if v_0_0_0_0_0_0.Op != OpAMD64ANDQconst { 35863 break 35864 } 35865 if v_0_0_0_0_0_0.AuxInt != 63 { 35866 break 35867 } 35868 y := v_0_0_0_0_0_0.Args[0] 35869 v_0_1 := v_0.Args[1] 35870 if v_0_1.Op != OpAMD64SHRQ { 35871 break 35872 } 35873 _ = v_0_1.Args[1] 35874 x := v_0_1.Args[0] 35875 v_0_1_1 := v_0_1.Args[1] 35876 if v_0_1_1.Op != OpAMD64NEGQ { 35877 break 35878 } 35879 if y != v_0_1_1.Args[0] { 35880 break 35881 } 35882 v_1 := v.Args[1] 35883 if v_1.Op != OpAMD64SHLQ { 35884 break 35885 } 35886 _ = v_1.Args[1] 35887 if x != v_1.Args[0] { 35888 break 35889 } 35890 if y != v_1.Args[1] { 35891 break 35892 } 35893 v.reset(OpAMD64ROLQ) 35894 v.AddArg(x) 35895 v.AddArg(y) 35896 return true 35897 } 35898 // match: (ORQ (SHLQ x y) (ANDQ (SHRQ x (NEGL y)) (SBBQcarrymask (CMPLconst (NEGL (ADDLconst (ANDLconst y [63]) [-64])) [64])))) 35899 // cond: 35900 // result: (ROLQ x y) 35901 for { 35902 _ = v.Args[1] 35903 v_0 := v.Args[0] 35904 if v_0.Op != OpAMD64SHLQ { 35905 break 35906 } 35907 _ = v_0.Args[1] 35908 x := v_0.Args[0] 35909 y := v_0.Args[1] 35910 v_1 := v.Args[1] 35911 if v_1.Op != OpAMD64ANDQ { 35912 break 35913 } 35914 _ = v_1.Args[1] 35915 v_1_0 := v_1.Args[0] 35916 if v_1_0.Op != OpAMD64SHRQ { 35917 break 35918 } 35919 _ = v_1_0.Args[1] 35920 if x != v_1_0.Args[0] { 35921 break 35922 } 35923 v_1_0_1 := v_1_0.Args[1] 35924 if v_1_0_1.Op != OpAMD64NEGL { 35925 break 35926 } 35927 if y != v_1_0_1.Args[0] { 35928 break 35929 } 35930 v_1_1 := v_1.Args[1] 35931 if v_1_1.Op != OpAMD64SBBQcarrymask { 35932 break 35933 } 35934 v_1_1_0 := v_1_1.Args[0] 35935 if v_1_1_0.Op != OpAMD64CMPLconst { 35936 break 35937 } 35938 if v_1_1_0.AuxInt != 64 { 35939 break 35940 } 35941 v_1_1_0_0 := v_1_1_0.Args[0] 35942 if v_1_1_0_0.Op != OpAMD64NEGL { 35943 break 35944 } 35945 v_1_1_0_0_0 := v_1_1_0_0.Args[0] 35946 if v_1_1_0_0_0.Op != OpAMD64ADDLconst { 35947 break 35948 } 35949 if v_1_1_0_0_0.AuxInt != -64 { 35950 break 35951 } 35952 v_1_1_0_0_0_0 := v_1_1_0_0_0.Args[0] 35953 if v_1_1_0_0_0_0.Op != OpAMD64ANDLconst { 35954 break 35955 } 35956 if v_1_1_0_0_0_0.AuxInt != 63 { 35957 break 35958 } 35959 if y != v_1_1_0_0_0_0.Args[0] { 35960 break 35961 } 35962 v.reset(OpAMD64ROLQ) 35963 v.AddArg(x) 35964 v.AddArg(y) 35965 return true 35966 } 35967 // match: (ORQ (SHLQ x y) (ANDQ (SBBQcarrymask (CMPLconst (NEGL (ADDLconst (ANDLconst y [63]) [-64])) [64])) (SHRQ x (NEGL y)))) 35968 // cond: 35969 // result: (ROLQ x y) 35970 for { 35971 _ = v.Args[1] 35972 v_0 := v.Args[0] 35973 if v_0.Op != OpAMD64SHLQ { 35974 break 35975 } 35976 _ = v_0.Args[1] 35977 x := v_0.Args[0] 35978 y := v_0.Args[1] 35979 v_1 := v.Args[1] 35980 if v_1.Op != OpAMD64ANDQ { 35981 break 35982 } 35983 _ = v_1.Args[1] 35984 v_1_0 := v_1.Args[0] 35985 if v_1_0.Op != OpAMD64SBBQcarrymask { 35986 break 35987 } 35988 v_1_0_0 := v_1_0.Args[0] 35989 if v_1_0_0.Op != OpAMD64CMPLconst { 35990 break 35991 } 35992 if v_1_0_0.AuxInt != 64 { 35993 break 35994 } 35995 v_1_0_0_0 := v_1_0_0.Args[0] 35996 if v_1_0_0_0.Op != OpAMD64NEGL { 35997 break 35998 } 35999 v_1_0_0_0_0 := v_1_0_0_0.Args[0] 36000 if v_1_0_0_0_0.Op != OpAMD64ADDLconst { 36001 break 36002 } 36003 if v_1_0_0_0_0.AuxInt != -64 { 36004 break 36005 } 36006 v_1_0_0_0_0_0 := v_1_0_0_0_0.Args[0] 36007 if v_1_0_0_0_0_0.Op != OpAMD64ANDLconst { 36008 break 36009 } 36010 if v_1_0_0_0_0_0.AuxInt != 63 { 36011 break 36012 } 36013 if y != v_1_0_0_0_0_0.Args[0] { 36014 break 36015 } 36016 v_1_1 := v_1.Args[1] 36017 if v_1_1.Op != OpAMD64SHRQ { 36018 break 36019 } 36020 _ = v_1_1.Args[1] 36021 if x != v_1_1.Args[0] { 36022 break 36023 } 36024 v_1_1_1 := v_1_1.Args[1] 36025 if v_1_1_1.Op != OpAMD64NEGL { 36026 break 36027 } 36028 if y != v_1_1_1.Args[0] { 36029 break 36030 } 36031 v.reset(OpAMD64ROLQ) 36032 v.AddArg(x) 36033 v.AddArg(y) 36034 return true 36035 } 36036 // match: (ORQ (ANDQ (SHRQ x (NEGL y)) (SBBQcarrymask (CMPLconst (NEGL (ADDLconst (ANDLconst y [63]) [-64])) [64]))) (SHLQ x y)) 36037 // cond: 36038 // result: (ROLQ x y) 36039 for { 36040 _ = v.Args[1] 36041 v_0 := v.Args[0] 36042 if v_0.Op != OpAMD64ANDQ { 36043 break 36044 } 36045 _ = v_0.Args[1] 36046 v_0_0 := v_0.Args[0] 36047 if v_0_0.Op != OpAMD64SHRQ { 36048 break 36049 } 36050 _ = v_0_0.Args[1] 36051 x := v_0_0.Args[0] 36052 v_0_0_1 := v_0_0.Args[1] 36053 if v_0_0_1.Op != OpAMD64NEGL { 36054 break 36055 } 36056 y := v_0_0_1.Args[0] 36057 v_0_1 := v_0.Args[1] 36058 if v_0_1.Op != OpAMD64SBBQcarrymask { 36059 break 36060 } 36061 v_0_1_0 := v_0_1.Args[0] 36062 if v_0_1_0.Op != OpAMD64CMPLconst { 36063 break 36064 } 36065 if v_0_1_0.AuxInt != 64 { 36066 break 36067 } 36068 v_0_1_0_0 := v_0_1_0.Args[0] 36069 if v_0_1_0_0.Op != OpAMD64NEGL { 36070 break 36071 } 36072 v_0_1_0_0_0 := v_0_1_0_0.Args[0] 36073 if v_0_1_0_0_0.Op != OpAMD64ADDLconst { 36074 break 36075 } 36076 if v_0_1_0_0_0.AuxInt != -64 { 36077 break 36078 } 36079 v_0_1_0_0_0_0 := v_0_1_0_0_0.Args[0] 36080 if v_0_1_0_0_0_0.Op != OpAMD64ANDLconst { 36081 break 36082 } 36083 if v_0_1_0_0_0_0.AuxInt != 63 { 36084 break 36085 } 36086 if y != v_0_1_0_0_0_0.Args[0] { 36087 break 36088 } 36089 v_1 := v.Args[1] 36090 if v_1.Op != OpAMD64SHLQ { 36091 break 36092 } 36093 _ = v_1.Args[1] 36094 if x != v_1.Args[0] { 36095 break 36096 } 36097 if y != v_1.Args[1] { 36098 break 36099 } 36100 v.reset(OpAMD64ROLQ) 36101 v.AddArg(x) 36102 v.AddArg(y) 36103 return true 36104 } 36105 // match: (ORQ (ANDQ (SBBQcarrymask (CMPLconst (NEGL (ADDLconst (ANDLconst y [63]) [-64])) [64])) (SHRQ x (NEGL y))) (SHLQ x y)) 36106 // cond: 36107 // result: (ROLQ x y) 36108 for { 36109 _ = v.Args[1] 36110 v_0 := v.Args[0] 36111 if v_0.Op != OpAMD64ANDQ { 36112 break 36113 } 36114 _ = v_0.Args[1] 36115 v_0_0 := v_0.Args[0] 36116 if v_0_0.Op != OpAMD64SBBQcarrymask { 36117 break 36118 } 36119 v_0_0_0 := v_0_0.Args[0] 36120 if v_0_0_0.Op != OpAMD64CMPLconst { 36121 break 36122 } 36123 if v_0_0_0.AuxInt != 64 { 36124 break 36125 } 36126 v_0_0_0_0 := v_0_0_0.Args[0] 36127 if v_0_0_0_0.Op != OpAMD64NEGL { 36128 break 36129 } 36130 v_0_0_0_0_0 := v_0_0_0_0.Args[0] 36131 if v_0_0_0_0_0.Op != OpAMD64ADDLconst { 36132 break 36133 } 36134 if v_0_0_0_0_0.AuxInt != -64 { 36135 break 36136 } 36137 v_0_0_0_0_0_0 := v_0_0_0_0_0.Args[0] 36138 if v_0_0_0_0_0_0.Op != OpAMD64ANDLconst { 36139 break 36140 } 36141 if v_0_0_0_0_0_0.AuxInt != 63 { 36142 break 36143 } 36144 y := v_0_0_0_0_0_0.Args[0] 36145 v_0_1 := v_0.Args[1] 36146 if v_0_1.Op != OpAMD64SHRQ { 36147 break 36148 } 36149 _ = v_0_1.Args[1] 36150 x := v_0_1.Args[0] 36151 v_0_1_1 := v_0_1.Args[1] 36152 if v_0_1_1.Op != OpAMD64NEGL { 36153 break 36154 } 36155 if y != v_0_1_1.Args[0] { 36156 break 36157 } 36158 v_1 := v.Args[1] 36159 if v_1.Op != OpAMD64SHLQ { 36160 break 36161 } 36162 _ = v_1.Args[1] 36163 if x != v_1.Args[0] { 36164 break 36165 } 36166 if y != v_1.Args[1] { 36167 break 36168 } 36169 v.reset(OpAMD64ROLQ) 36170 v.AddArg(x) 36171 v.AddArg(y) 36172 return true 36173 } 36174 // match: (ORQ (SHRQ x y) (ANDQ (SHLQ x (NEGQ y)) (SBBQcarrymask (CMPQconst (NEGQ (ADDQconst (ANDQconst y [63]) [-64])) [64])))) 36175 // cond: 36176 // result: (RORQ x y) 36177 for { 36178 _ = v.Args[1] 36179 v_0 := v.Args[0] 36180 if v_0.Op != OpAMD64SHRQ { 36181 break 36182 } 36183 _ = v_0.Args[1] 36184 x := v_0.Args[0] 36185 y := v_0.Args[1] 36186 v_1 := v.Args[1] 36187 if v_1.Op != OpAMD64ANDQ { 36188 break 36189 } 36190 _ = v_1.Args[1] 36191 v_1_0 := v_1.Args[0] 36192 if v_1_0.Op != OpAMD64SHLQ { 36193 break 36194 } 36195 _ = v_1_0.Args[1] 36196 if x != v_1_0.Args[0] { 36197 break 36198 } 36199 v_1_0_1 := v_1_0.Args[1] 36200 if v_1_0_1.Op != OpAMD64NEGQ { 36201 break 36202 } 36203 if y != v_1_0_1.Args[0] { 36204 break 36205 } 36206 v_1_1 := v_1.Args[1] 36207 if v_1_1.Op != OpAMD64SBBQcarrymask { 36208 break 36209 } 36210 v_1_1_0 := v_1_1.Args[0] 36211 if v_1_1_0.Op != OpAMD64CMPQconst { 36212 break 36213 } 36214 if v_1_1_0.AuxInt != 64 { 36215 break 36216 } 36217 v_1_1_0_0 := v_1_1_0.Args[0] 36218 if v_1_1_0_0.Op != OpAMD64NEGQ { 36219 break 36220 } 36221 v_1_1_0_0_0 := v_1_1_0_0.Args[0] 36222 if v_1_1_0_0_0.Op != OpAMD64ADDQconst { 36223 break 36224 } 36225 if v_1_1_0_0_0.AuxInt != -64 { 36226 break 36227 } 36228 v_1_1_0_0_0_0 := v_1_1_0_0_0.Args[0] 36229 if v_1_1_0_0_0_0.Op != OpAMD64ANDQconst { 36230 break 36231 } 36232 if v_1_1_0_0_0_0.AuxInt != 63 { 36233 break 36234 } 36235 if y != v_1_1_0_0_0_0.Args[0] { 36236 break 36237 } 36238 v.reset(OpAMD64RORQ) 36239 v.AddArg(x) 36240 v.AddArg(y) 36241 return true 36242 } 36243 // match: (ORQ (SHRQ x y) (ANDQ (SBBQcarrymask (CMPQconst (NEGQ (ADDQconst (ANDQconst y [63]) [-64])) [64])) (SHLQ x (NEGQ y)))) 36244 // cond: 36245 // result: (RORQ x y) 36246 for { 36247 _ = v.Args[1] 36248 v_0 := v.Args[0] 36249 if v_0.Op != OpAMD64SHRQ { 36250 break 36251 } 36252 _ = v_0.Args[1] 36253 x := v_0.Args[0] 36254 y := v_0.Args[1] 36255 v_1 := v.Args[1] 36256 if v_1.Op != OpAMD64ANDQ { 36257 break 36258 } 36259 _ = v_1.Args[1] 36260 v_1_0 := v_1.Args[0] 36261 if v_1_0.Op != OpAMD64SBBQcarrymask { 36262 break 36263 } 36264 v_1_0_0 := v_1_0.Args[0] 36265 if v_1_0_0.Op != OpAMD64CMPQconst { 36266 break 36267 } 36268 if v_1_0_0.AuxInt != 64 { 36269 break 36270 } 36271 v_1_0_0_0 := v_1_0_0.Args[0] 36272 if v_1_0_0_0.Op != OpAMD64NEGQ { 36273 break 36274 } 36275 v_1_0_0_0_0 := v_1_0_0_0.Args[0] 36276 if v_1_0_0_0_0.Op != OpAMD64ADDQconst { 36277 break 36278 } 36279 if v_1_0_0_0_0.AuxInt != -64 { 36280 break 36281 } 36282 v_1_0_0_0_0_0 := v_1_0_0_0_0.Args[0] 36283 if v_1_0_0_0_0_0.Op != OpAMD64ANDQconst { 36284 break 36285 } 36286 if v_1_0_0_0_0_0.AuxInt != 63 { 36287 break 36288 } 36289 if y != v_1_0_0_0_0_0.Args[0] { 36290 break 36291 } 36292 v_1_1 := v_1.Args[1] 36293 if v_1_1.Op != OpAMD64SHLQ { 36294 break 36295 } 36296 _ = v_1_1.Args[1] 36297 if x != v_1_1.Args[0] { 36298 break 36299 } 36300 v_1_1_1 := v_1_1.Args[1] 36301 if v_1_1_1.Op != OpAMD64NEGQ { 36302 break 36303 } 36304 if y != v_1_1_1.Args[0] { 36305 break 36306 } 36307 v.reset(OpAMD64RORQ) 36308 v.AddArg(x) 36309 v.AddArg(y) 36310 return true 36311 } 36312 // match: (ORQ (ANDQ (SHLQ x (NEGQ y)) (SBBQcarrymask (CMPQconst (NEGQ (ADDQconst (ANDQconst y [63]) [-64])) [64]))) (SHRQ x y)) 36313 // cond: 36314 // result: (RORQ x y) 36315 for { 36316 _ = v.Args[1] 36317 v_0 := v.Args[0] 36318 if v_0.Op != OpAMD64ANDQ { 36319 break 36320 } 36321 _ = v_0.Args[1] 36322 v_0_0 := v_0.Args[0] 36323 if v_0_0.Op != OpAMD64SHLQ { 36324 break 36325 } 36326 _ = v_0_0.Args[1] 36327 x := v_0_0.Args[0] 36328 v_0_0_1 := v_0_0.Args[1] 36329 if v_0_0_1.Op != OpAMD64NEGQ { 36330 break 36331 } 36332 y := v_0_0_1.Args[0] 36333 v_0_1 := v_0.Args[1] 36334 if v_0_1.Op != OpAMD64SBBQcarrymask { 36335 break 36336 } 36337 v_0_1_0 := v_0_1.Args[0] 36338 if v_0_1_0.Op != OpAMD64CMPQconst { 36339 break 36340 } 36341 if v_0_1_0.AuxInt != 64 { 36342 break 36343 } 36344 v_0_1_0_0 := v_0_1_0.Args[0] 36345 if v_0_1_0_0.Op != OpAMD64NEGQ { 36346 break 36347 } 36348 v_0_1_0_0_0 := v_0_1_0_0.Args[0] 36349 if v_0_1_0_0_0.Op != OpAMD64ADDQconst { 36350 break 36351 } 36352 if v_0_1_0_0_0.AuxInt != -64 { 36353 break 36354 } 36355 v_0_1_0_0_0_0 := v_0_1_0_0_0.Args[0] 36356 if v_0_1_0_0_0_0.Op != OpAMD64ANDQconst { 36357 break 36358 } 36359 if v_0_1_0_0_0_0.AuxInt != 63 { 36360 break 36361 } 36362 if y != v_0_1_0_0_0_0.Args[0] { 36363 break 36364 } 36365 v_1 := v.Args[1] 36366 if v_1.Op != OpAMD64SHRQ { 36367 break 36368 } 36369 _ = v_1.Args[1] 36370 if x != v_1.Args[0] { 36371 break 36372 } 36373 if y != v_1.Args[1] { 36374 break 36375 } 36376 v.reset(OpAMD64RORQ) 36377 v.AddArg(x) 36378 v.AddArg(y) 36379 return true 36380 } 36381 // match: (ORQ (ANDQ (SBBQcarrymask (CMPQconst (NEGQ (ADDQconst (ANDQconst y [63]) [-64])) [64])) (SHLQ x (NEGQ y))) (SHRQ x y)) 36382 // cond: 36383 // result: (RORQ x y) 36384 for { 36385 _ = v.Args[1] 36386 v_0 := v.Args[0] 36387 if v_0.Op != OpAMD64ANDQ { 36388 break 36389 } 36390 _ = v_0.Args[1] 36391 v_0_0 := v_0.Args[0] 36392 if v_0_0.Op != OpAMD64SBBQcarrymask { 36393 break 36394 } 36395 v_0_0_0 := v_0_0.Args[0] 36396 if v_0_0_0.Op != OpAMD64CMPQconst { 36397 break 36398 } 36399 if v_0_0_0.AuxInt != 64 { 36400 break 36401 } 36402 v_0_0_0_0 := v_0_0_0.Args[0] 36403 if v_0_0_0_0.Op != OpAMD64NEGQ { 36404 break 36405 } 36406 v_0_0_0_0_0 := v_0_0_0_0.Args[0] 36407 if v_0_0_0_0_0.Op != OpAMD64ADDQconst { 36408 break 36409 } 36410 if v_0_0_0_0_0.AuxInt != -64 { 36411 break 36412 } 36413 v_0_0_0_0_0_0 := v_0_0_0_0_0.Args[0] 36414 if v_0_0_0_0_0_0.Op != OpAMD64ANDQconst { 36415 break 36416 } 36417 if v_0_0_0_0_0_0.AuxInt != 63 { 36418 break 36419 } 36420 y := v_0_0_0_0_0_0.Args[0] 36421 v_0_1 := v_0.Args[1] 36422 if v_0_1.Op != OpAMD64SHLQ { 36423 break 36424 } 36425 _ = v_0_1.Args[1] 36426 x := v_0_1.Args[0] 36427 v_0_1_1 := v_0_1.Args[1] 36428 if v_0_1_1.Op != OpAMD64NEGQ { 36429 break 36430 } 36431 if y != v_0_1_1.Args[0] { 36432 break 36433 } 36434 v_1 := v.Args[1] 36435 if v_1.Op != OpAMD64SHRQ { 36436 break 36437 } 36438 _ = v_1.Args[1] 36439 if x != v_1.Args[0] { 36440 break 36441 } 36442 if y != v_1.Args[1] { 36443 break 36444 } 36445 v.reset(OpAMD64RORQ) 36446 v.AddArg(x) 36447 v.AddArg(y) 36448 return true 36449 } 36450 return false 36451 } 36452 func rewriteValueAMD64_OpAMD64ORQ_20(v *Value) bool { 36453 b := v.Block 36454 _ = b 36455 typ := &b.Func.Config.Types 36456 _ = typ 36457 // match: (ORQ (SHRQ x y) (ANDQ (SHLQ x (NEGL y)) (SBBQcarrymask (CMPLconst (NEGL (ADDLconst (ANDLconst y [63]) [-64])) [64])))) 36458 // cond: 36459 // result: (RORQ x y) 36460 for { 36461 _ = v.Args[1] 36462 v_0 := v.Args[0] 36463 if v_0.Op != OpAMD64SHRQ { 36464 break 36465 } 36466 _ = v_0.Args[1] 36467 x := v_0.Args[0] 36468 y := v_0.Args[1] 36469 v_1 := v.Args[1] 36470 if v_1.Op != OpAMD64ANDQ { 36471 break 36472 } 36473 _ = v_1.Args[1] 36474 v_1_0 := v_1.Args[0] 36475 if v_1_0.Op != OpAMD64SHLQ { 36476 break 36477 } 36478 _ = v_1_0.Args[1] 36479 if x != v_1_0.Args[0] { 36480 break 36481 } 36482 v_1_0_1 := v_1_0.Args[1] 36483 if v_1_0_1.Op != OpAMD64NEGL { 36484 break 36485 } 36486 if y != v_1_0_1.Args[0] { 36487 break 36488 } 36489 v_1_1 := v_1.Args[1] 36490 if v_1_1.Op != OpAMD64SBBQcarrymask { 36491 break 36492 } 36493 v_1_1_0 := v_1_1.Args[0] 36494 if v_1_1_0.Op != OpAMD64CMPLconst { 36495 break 36496 } 36497 if v_1_1_0.AuxInt != 64 { 36498 break 36499 } 36500 v_1_1_0_0 := v_1_1_0.Args[0] 36501 if v_1_1_0_0.Op != OpAMD64NEGL { 36502 break 36503 } 36504 v_1_1_0_0_0 := v_1_1_0_0.Args[0] 36505 if v_1_1_0_0_0.Op != OpAMD64ADDLconst { 36506 break 36507 } 36508 if v_1_1_0_0_0.AuxInt != -64 { 36509 break 36510 } 36511 v_1_1_0_0_0_0 := v_1_1_0_0_0.Args[0] 36512 if v_1_1_0_0_0_0.Op != OpAMD64ANDLconst { 36513 break 36514 } 36515 if v_1_1_0_0_0_0.AuxInt != 63 { 36516 break 36517 } 36518 if y != v_1_1_0_0_0_0.Args[0] { 36519 break 36520 } 36521 v.reset(OpAMD64RORQ) 36522 v.AddArg(x) 36523 v.AddArg(y) 36524 return true 36525 } 36526 // match: (ORQ (SHRQ x y) (ANDQ (SBBQcarrymask (CMPLconst (NEGL (ADDLconst (ANDLconst y [63]) [-64])) [64])) (SHLQ x (NEGL y)))) 36527 // cond: 36528 // result: (RORQ x y) 36529 for { 36530 _ = v.Args[1] 36531 v_0 := v.Args[0] 36532 if v_0.Op != OpAMD64SHRQ { 36533 break 36534 } 36535 _ = v_0.Args[1] 36536 x := v_0.Args[0] 36537 y := v_0.Args[1] 36538 v_1 := v.Args[1] 36539 if v_1.Op != OpAMD64ANDQ { 36540 break 36541 } 36542 _ = v_1.Args[1] 36543 v_1_0 := v_1.Args[0] 36544 if v_1_0.Op != OpAMD64SBBQcarrymask { 36545 break 36546 } 36547 v_1_0_0 := v_1_0.Args[0] 36548 if v_1_0_0.Op != OpAMD64CMPLconst { 36549 break 36550 } 36551 if v_1_0_0.AuxInt != 64 { 36552 break 36553 } 36554 v_1_0_0_0 := v_1_0_0.Args[0] 36555 if v_1_0_0_0.Op != OpAMD64NEGL { 36556 break 36557 } 36558 v_1_0_0_0_0 := v_1_0_0_0.Args[0] 36559 if v_1_0_0_0_0.Op != OpAMD64ADDLconst { 36560 break 36561 } 36562 if v_1_0_0_0_0.AuxInt != -64 { 36563 break 36564 } 36565 v_1_0_0_0_0_0 := v_1_0_0_0_0.Args[0] 36566 if v_1_0_0_0_0_0.Op != OpAMD64ANDLconst { 36567 break 36568 } 36569 if v_1_0_0_0_0_0.AuxInt != 63 { 36570 break 36571 } 36572 if y != v_1_0_0_0_0_0.Args[0] { 36573 break 36574 } 36575 v_1_1 := v_1.Args[1] 36576 if v_1_1.Op != OpAMD64SHLQ { 36577 break 36578 } 36579 _ = v_1_1.Args[1] 36580 if x != v_1_1.Args[0] { 36581 break 36582 } 36583 v_1_1_1 := v_1_1.Args[1] 36584 if v_1_1_1.Op != OpAMD64NEGL { 36585 break 36586 } 36587 if y != v_1_1_1.Args[0] { 36588 break 36589 } 36590 v.reset(OpAMD64RORQ) 36591 v.AddArg(x) 36592 v.AddArg(y) 36593 return true 36594 } 36595 // match: (ORQ (ANDQ (SHLQ x (NEGL y)) (SBBQcarrymask (CMPLconst (NEGL (ADDLconst (ANDLconst y [63]) [-64])) [64]))) (SHRQ x y)) 36596 // cond: 36597 // result: (RORQ x y) 36598 for { 36599 _ = v.Args[1] 36600 v_0 := v.Args[0] 36601 if v_0.Op != OpAMD64ANDQ { 36602 break 36603 } 36604 _ = v_0.Args[1] 36605 v_0_0 := v_0.Args[0] 36606 if v_0_0.Op != OpAMD64SHLQ { 36607 break 36608 } 36609 _ = v_0_0.Args[1] 36610 x := v_0_0.Args[0] 36611 v_0_0_1 := v_0_0.Args[1] 36612 if v_0_0_1.Op != OpAMD64NEGL { 36613 break 36614 } 36615 y := v_0_0_1.Args[0] 36616 v_0_1 := v_0.Args[1] 36617 if v_0_1.Op != OpAMD64SBBQcarrymask { 36618 break 36619 } 36620 v_0_1_0 := v_0_1.Args[0] 36621 if v_0_1_0.Op != OpAMD64CMPLconst { 36622 break 36623 } 36624 if v_0_1_0.AuxInt != 64 { 36625 break 36626 } 36627 v_0_1_0_0 := v_0_1_0.Args[0] 36628 if v_0_1_0_0.Op != OpAMD64NEGL { 36629 break 36630 } 36631 v_0_1_0_0_0 := v_0_1_0_0.Args[0] 36632 if v_0_1_0_0_0.Op != OpAMD64ADDLconst { 36633 break 36634 } 36635 if v_0_1_0_0_0.AuxInt != -64 { 36636 break 36637 } 36638 v_0_1_0_0_0_0 := v_0_1_0_0_0.Args[0] 36639 if v_0_1_0_0_0_0.Op != OpAMD64ANDLconst { 36640 break 36641 } 36642 if v_0_1_0_0_0_0.AuxInt != 63 { 36643 break 36644 } 36645 if y != v_0_1_0_0_0_0.Args[0] { 36646 break 36647 } 36648 v_1 := v.Args[1] 36649 if v_1.Op != OpAMD64SHRQ { 36650 break 36651 } 36652 _ = v_1.Args[1] 36653 if x != v_1.Args[0] { 36654 break 36655 } 36656 if y != v_1.Args[1] { 36657 break 36658 } 36659 v.reset(OpAMD64RORQ) 36660 v.AddArg(x) 36661 v.AddArg(y) 36662 return true 36663 } 36664 // match: (ORQ (ANDQ (SBBQcarrymask (CMPLconst (NEGL (ADDLconst (ANDLconst y [63]) [-64])) [64])) (SHLQ x (NEGL y))) (SHRQ x y)) 36665 // cond: 36666 // result: (RORQ x y) 36667 for { 36668 _ = v.Args[1] 36669 v_0 := v.Args[0] 36670 if v_0.Op != OpAMD64ANDQ { 36671 break 36672 } 36673 _ = v_0.Args[1] 36674 v_0_0 := v_0.Args[0] 36675 if v_0_0.Op != OpAMD64SBBQcarrymask { 36676 break 36677 } 36678 v_0_0_0 := v_0_0.Args[0] 36679 if v_0_0_0.Op != OpAMD64CMPLconst { 36680 break 36681 } 36682 if v_0_0_0.AuxInt != 64 { 36683 break 36684 } 36685 v_0_0_0_0 := v_0_0_0.Args[0] 36686 if v_0_0_0_0.Op != OpAMD64NEGL { 36687 break 36688 } 36689 v_0_0_0_0_0 := v_0_0_0_0.Args[0] 36690 if v_0_0_0_0_0.Op != OpAMD64ADDLconst { 36691 break 36692 } 36693 if v_0_0_0_0_0.AuxInt != -64 { 36694 break 36695 } 36696 v_0_0_0_0_0_0 := v_0_0_0_0_0.Args[0] 36697 if v_0_0_0_0_0_0.Op != OpAMD64ANDLconst { 36698 break 36699 } 36700 if v_0_0_0_0_0_0.AuxInt != 63 { 36701 break 36702 } 36703 y := v_0_0_0_0_0_0.Args[0] 36704 v_0_1 := v_0.Args[1] 36705 if v_0_1.Op != OpAMD64SHLQ { 36706 break 36707 } 36708 _ = v_0_1.Args[1] 36709 x := v_0_1.Args[0] 36710 v_0_1_1 := v_0_1.Args[1] 36711 if v_0_1_1.Op != OpAMD64NEGL { 36712 break 36713 } 36714 if y != v_0_1_1.Args[0] { 36715 break 36716 } 36717 v_1 := v.Args[1] 36718 if v_1.Op != OpAMD64SHRQ { 36719 break 36720 } 36721 _ = v_1.Args[1] 36722 if x != v_1.Args[0] { 36723 break 36724 } 36725 if y != v_1.Args[1] { 36726 break 36727 } 36728 v.reset(OpAMD64RORQ) 36729 v.AddArg(x) 36730 v.AddArg(y) 36731 return true 36732 } 36733 // match: (ORQ x x) 36734 // cond: 36735 // result: x 36736 for { 36737 _ = v.Args[1] 36738 x := v.Args[0] 36739 if x != v.Args[1] { 36740 break 36741 } 36742 v.reset(OpCopy) 36743 v.Type = x.Type 36744 v.AddArg(x) 36745 return true 36746 } 36747 // match: (ORQ x0:(MOVBload [i0] {s} p mem) sh:(SHLQconst [8] x1:(MOVBload [i1] {s} p mem))) 36748 // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 36749 // result: @mergePoint(b,x0,x1) (MOVWload [i0] {s} p mem) 36750 for { 36751 _ = v.Args[1] 36752 x0 := v.Args[0] 36753 if x0.Op != OpAMD64MOVBload { 36754 break 36755 } 36756 i0 := x0.AuxInt 36757 s := x0.Aux 36758 _ = x0.Args[1] 36759 p := x0.Args[0] 36760 mem := x0.Args[1] 36761 sh := v.Args[1] 36762 if sh.Op != OpAMD64SHLQconst { 36763 break 36764 } 36765 if sh.AuxInt != 8 { 36766 break 36767 } 36768 x1 := sh.Args[0] 36769 if x1.Op != OpAMD64MOVBload { 36770 break 36771 } 36772 i1 := x1.AuxInt 36773 if x1.Aux != s { 36774 break 36775 } 36776 _ = x1.Args[1] 36777 if p != x1.Args[0] { 36778 break 36779 } 36780 if mem != x1.Args[1] { 36781 break 36782 } 36783 if !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 36784 break 36785 } 36786 b = mergePoint(b, x0, x1) 36787 v0 := b.NewValue0(x1.Pos, OpAMD64MOVWload, typ.UInt16) 36788 v.reset(OpCopy) 36789 v.AddArg(v0) 36790 v0.AuxInt = i0 36791 v0.Aux = s 36792 v0.AddArg(p) 36793 v0.AddArg(mem) 36794 return true 36795 } 36796 // match: (ORQ sh:(SHLQconst [8] x1:(MOVBload [i1] {s} p mem)) x0:(MOVBload [i0] {s} p mem)) 36797 // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 36798 // result: @mergePoint(b,x0,x1) (MOVWload [i0] {s} p mem) 36799 for { 36800 _ = v.Args[1] 36801 sh := v.Args[0] 36802 if sh.Op != OpAMD64SHLQconst { 36803 break 36804 } 36805 if sh.AuxInt != 8 { 36806 break 36807 } 36808 x1 := sh.Args[0] 36809 if x1.Op != OpAMD64MOVBload { 36810 break 36811 } 36812 i1 := x1.AuxInt 36813 s := x1.Aux 36814 _ = x1.Args[1] 36815 p := x1.Args[0] 36816 mem := x1.Args[1] 36817 x0 := v.Args[1] 36818 if x0.Op != OpAMD64MOVBload { 36819 break 36820 } 36821 i0 := x0.AuxInt 36822 if x0.Aux != s { 36823 break 36824 } 36825 _ = x0.Args[1] 36826 if p != x0.Args[0] { 36827 break 36828 } 36829 if mem != x0.Args[1] { 36830 break 36831 } 36832 if !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 36833 break 36834 } 36835 b = mergePoint(b, x0, x1) 36836 v0 := b.NewValue0(x0.Pos, OpAMD64MOVWload, typ.UInt16) 36837 v.reset(OpCopy) 36838 v.AddArg(v0) 36839 v0.AuxInt = i0 36840 v0.Aux = s 36841 v0.AddArg(p) 36842 v0.AddArg(mem) 36843 return true 36844 } 36845 // match: (ORQ x0:(MOVWload [i0] {s} p mem) sh:(SHLQconst [16] x1:(MOVWload [i1] {s} p mem))) 36846 // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 36847 // result: @mergePoint(b,x0,x1) (MOVLload [i0] {s} p mem) 36848 for { 36849 _ = v.Args[1] 36850 x0 := v.Args[0] 36851 if x0.Op != OpAMD64MOVWload { 36852 break 36853 } 36854 i0 := x0.AuxInt 36855 s := x0.Aux 36856 _ = x0.Args[1] 36857 p := x0.Args[0] 36858 mem := x0.Args[1] 36859 sh := v.Args[1] 36860 if sh.Op != OpAMD64SHLQconst { 36861 break 36862 } 36863 if sh.AuxInt != 16 { 36864 break 36865 } 36866 x1 := sh.Args[0] 36867 if x1.Op != OpAMD64MOVWload { 36868 break 36869 } 36870 i1 := x1.AuxInt 36871 if x1.Aux != s { 36872 break 36873 } 36874 _ = x1.Args[1] 36875 if p != x1.Args[0] { 36876 break 36877 } 36878 if mem != x1.Args[1] { 36879 break 36880 } 36881 if !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 36882 break 36883 } 36884 b = mergePoint(b, x0, x1) 36885 v0 := b.NewValue0(x1.Pos, OpAMD64MOVLload, typ.UInt32) 36886 v.reset(OpCopy) 36887 v.AddArg(v0) 36888 v0.AuxInt = i0 36889 v0.Aux = s 36890 v0.AddArg(p) 36891 v0.AddArg(mem) 36892 return true 36893 } 36894 // match: (ORQ sh:(SHLQconst [16] x1:(MOVWload [i1] {s} p mem)) x0:(MOVWload [i0] {s} p mem)) 36895 // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 36896 // result: @mergePoint(b,x0,x1) (MOVLload [i0] {s} p mem) 36897 for { 36898 _ = v.Args[1] 36899 sh := v.Args[0] 36900 if sh.Op != OpAMD64SHLQconst { 36901 break 36902 } 36903 if sh.AuxInt != 16 { 36904 break 36905 } 36906 x1 := sh.Args[0] 36907 if x1.Op != OpAMD64MOVWload { 36908 break 36909 } 36910 i1 := x1.AuxInt 36911 s := x1.Aux 36912 _ = x1.Args[1] 36913 p := x1.Args[0] 36914 mem := x1.Args[1] 36915 x0 := v.Args[1] 36916 if x0.Op != OpAMD64MOVWload { 36917 break 36918 } 36919 i0 := x0.AuxInt 36920 if x0.Aux != s { 36921 break 36922 } 36923 _ = x0.Args[1] 36924 if p != x0.Args[0] { 36925 break 36926 } 36927 if mem != x0.Args[1] { 36928 break 36929 } 36930 if !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 36931 break 36932 } 36933 b = mergePoint(b, x0, x1) 36934 v0 := b.NewValue0(x0.Pos, OpAMD64MOVLload, typ.UInt32) 36935 v.reset(OpCopy) 36936 v.AddArg(v0) 36937 v0.AuxInt = i0 36938 v0.Aux = s 36939 v0.AddArg(p) 36940 v0.AddArg(mem) 36941 return true 36942 } 36943 // match: (ORQ x0:(MOVLload [i0] {s} p mem) sh:(SHLQconst [32] x1:(MOVLload [i1] {s} p mem))) 36944 // cond: i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 36945 // result: @mergePoint(b,x0,x1) (MOVQload [i0] {s} p mem) 36946 for { 36947 _ = v.Args[1] 36948 x0 := v.Args[0] 36949 if x0.Op != OpAMD64MOVLload { 36950 break 36951 } 36952 i0 := x0.AuxInt 36953 s := x0.Aux 36954 _ = x0.Args[1] 36955 p := x0.Args[0] 36956 mem := x0.Args[1] 36957 sh := v.Args[1] 36958 if sh.Op != OpAMD64SHLQconst { 36959 break 36960 } 36961 if sh.AuxInt != 32 { 36962 break 36963 } 36964 x1 := sh.Args[0] 36965 if x1.Op != OpAMD64MOVLload { 36966 break 36967 } 36968 i1 := x1.AuxInt 36969 if x1.Aux != s { 36970 break 36971 } 36972 _ = x1.Args[1] 36973 if p != x1.Args[0] { 36974 break 36975 } 36976 if mem != x1.Args[1] { 36977 break 36978 } 36979 if !(i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 36980 break 36981 } 36982 b = mergePoint(b, x0, x1) 36983 v0 := b.NewValue0(x1.Pos, OpAMD64MOVQload, typ.UInt64) 36984 v.reset(OpCopy) 36985 v.AddArg(v0) 36986 v0.AuxInt = i0 36987 v0.Aux = s 36988 v0.AddArg(p) 36989 v0.AddArg(mem) 36990 return true 36991 } 36992 return false 36993 } 36994 func rewriteValueAMD64_OpAMD64ORQ_30(v *Value) bool { 36995 b := v.Block 36996 _ = b 36997 typ := &b.Func.Config.Types 36998 _ = typ 36999 // match: (ORQ sh:(SHLQconst [32] x1:(MOVLload [i1] {s} p mem)) x0:(MOVLload [i0] {s} p mem)) 37000 // cond: i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 37001 // result: @mergePoint(b,x0,x1) (MOVQload [i0] {s} p mem) 37002 for { 37003 _ = v.Args[1] 37004 sh := v.Args[0] 37005 if sh.Op != OpAMD64SHLQconst { 37006 break 37007 } 37008 if sh.AuxInt != 32 { 37009 break 37010 } 37011 x1 := sh.Args[0] 37012 if x1.Op != OpAMD64MOVLload { 37013 break 37014 } 37015 i1 := x1.AuxInt 37016 s := x1.Aux 37017 _ = x1.Args[1] 37018 p := x1.Args[0] 37019 mem := x1.Args[1] 37020 x0 := v.Args[1] 37021 if x0.Op != OpAMD64MOVLload { 37022 break 37023 } 37024 i0 := x0.AuxInt 37025 if x0.Aux != s { 37026 break 37027 } 37028 _ = x0.Args[1] 37029 if p != x0.Args[0] { 37030 break 37031 } 37032 if mem != x0.Args[1] { 37033 break 37034 } 37035 if !(i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 37036 break 37037 } 37038 b = mergePoint(b, x0, x1) 37039 v0 := b.NewValue0(x0.Pos, OpAMD64MOVQload, typ.UInt64) 37040 v.reset(OpCopy) 37041 v.AddArg(v0) 37042 v0.AuxInt = i0 37043 v0.Aux = s 37044 v0.AddArg(p) 37045 v0.AddArg(mem) 37046 return true 37047 } 37048 // match: (ORQ s1:(SHLQconst [j1] x1:(MOVBload [i1] {s} p mem)) or:(ORQ s0:(SHLQconst [j0] x0:(MOVBload [i0] {s} p mem)) y)) 37049 // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 37050 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j0] (MOVWload [i0] {s} p mem)) y) 37051 for { 37052 _ = v.Args[1] 37053 s1 := v.Args[0] 37054 if s1.Op != OpAMD64SHLQconst { 37055 break 37056 } 37057 j1 := s1.AuxInt 37058 x1 := s1.Args[0] 37059 if x1.Op != OpAMD64MOVBload { 37060 break 37061 } 37062 i1 := x1.AuxInt 37063 s := x1.Aux 37064 _ = x1.Args[1] 37065 p := x1.Args[0] 37066 mem := x1.Args[1] 37067 or := v.Args[1] 37068 if or.Op != OpAMD64ORQ { 37069 break 37070 } 37071 _ = or.Args[1] 37072 s0 := or.Args[0] 37073 if s0.Op != OpAMD64SHLQconst { 37074 break 37075 } 37076 j0 := s0.AuxInt 37077 x0 := s0.Args[0] 37078 if x0.Op != OpAMD64MOVBload { 37079 break 37080 } 37081 i0 := x0.AuxInt 37082 if x0.Aux != s { 37083 break 37084 } 37085 _ = x0.Args[1] 37086 if p != x0.Args[0] { 37087 break 37088 } 37089 if mem != x0.Args[1] { 37090 break 37091 } 37092 y := or.Args[1] 37093 if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 37094 break 37095 } 37096 b = mergePoint(b, x0, x1) 37097 v0 := b.NewValue0(x0.Pos, OpAMD64ORQ, v.Type) 37098 v.reset(OpCopy) 37099 v.AddArg(v0) 37100 v1 := b.NewValue0(x0.Pos, OpAMD64SHLQconst, v.Type) 37101 v1.AuxInt = j0 37102 v2 := b.NewValue0(x0.Pos, OpAMD64MOVWload, typ.UInt16) 37103 v2.AuxInt = i0 37104 v2.Aux = s 37105 v2.AddArg(p) 37106 v2.AddArg(mem) 37107 v1.AddArg(v2) 37108 v0.AddArg(v1) 37109 v0.AddArg(y) 37110 return true 37111 } 37112 // match: (ORQ s1:(SHLQconst [j1] x1:(MOVBload [i1] {s} p mem)) or:(ORQ y s0:(SHLQconst [j0] x0:(MOVBload [i0] {s} p mem)))) 37113 // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 37114 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j0] (MOVWload [i0] {s} p mem)) y) 37115 for { 37116 _ = v.Args[1] 37117 s1 := v.Args[0] 37118 if s1.Op != OpAMD64SHLQconst { 37119 break 37120 } 37121 j1 := s1.AuxInt 37122 x1 := s1.Args[0] 37123 if x1.Op != OpAMD64MOVBload { 37124 break 37125 } 37126 i1 := x1.AuxInt 37127 s := x1.Aux 37128 _ = x1.Args[1] 37129 p := x1.Args[0] 37130 mem := x1.Args[1] 37131 or := v.Args[1] 37132 if or.Op != OpAMD64ORQ { 37133 break 37134 } 37135 _ = or.Args[1] 37136 y := or.Args[0] 37137 s0 := or.Args[1] 37138 if s0.Op != OpAMD64SHLQconst { 37139 break 37140 } 37141 j0 := s0.AuxInt 37142 x0 := s0.Args[0] 37143 if x0.Op != OpAMD64MOVBload { 37144 break 37145 } 37146 i0 := x0.AuxInt 37147 if x0.Aux != s { 37148 break 37149 } 37150 _ = x0.Args[1] 37151 if p != x0.Args[0] { 37152 break 37153 } 37154 if mem != x0.Args[1] { 37155 break 37156 } 37157 if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 37158 break 37159 } 37160 b = mergePoint(b, x0, x1) 37161 v0 := b.NewValue0(x0.Pos, OpAMD64ORQ, v.Type) 37162 v.reset(OpCopy) 37163 v.AddArg(v0) 37164 v1 := b.NewValue0(x0.Pos, OpAMD64SHLQconst, v.Type) 37165 v1.AuxInt = j0 37166 v2 := b.NewValue0(x0.Pos, OpAMD64MOVWload, typ.UInt16) 37167 v2.AuxInt = i0 37168 v2.Aux = s 37169 v2.AddArg(p) 37170 v2.AddArg(mem) 37171 v1.AddArg(v2) 37172 v0.AddArg(v1) 37173 v0.AddArg(y) 37174 return true 37175 } 37176 // match: (ORQ or:(ORQ s0:(SHLQconst [j0] x0:(MOVBload [i0] {s} p mem)) y) s1:(SHLQconst [j1] x1:(MOVBload [i1] {s} p mem))) 37177 // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 37178 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j0] (MOVWload [i0] {s} p mem)) y) 37179 for { 37180 _ = v.Args[1] 37181 or := v.Args[0] 37182 if or.Op != OpAMD64ORQ { 37183 break 37184 } 37185 _ = or.Args[1] 37186 s0 := or.Args[0] 37187 if s0.Op != OpAMD64SHLQconst { 37188 break 37189 } 37190 j0 := s0.AuxInt 37191 x0 := s0.Args[0] 37192 if x0.Op != OpAMD64MOVBload { 37193 break 37194 } 37195 i0 := x0.AuxInt 37196 s := x0.Aux 37197 _ = x0.Args[1] 37198 p := x0.Args[0] 37199 mem := x0.Args[1] 37200 y := or.Args[1] 37201 s1 := v.Args[1] 37202 if s1.Op != OpAMD64SHLQconst { 37203 break 37204 } 37205 j1 := s1.AuxInt 37206 x1 := s1.Args[0] 37207 if x1.Op != OpAMD64MOVBload { 37208 break 37209 } 37210 i1 := x1.AuxInt 37211 if x1.Aux != s { 37212 break 37213 } 37214 _ = x1.Args[1] 37215 if p != x1.Args[0] { 37216 break 37217 } 37218 if mem != x1.Args[1] { 37219 break 37220 } 37221 if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 37222 break 37223 } 37224 b = mergePoint(b, x0, x1) 37225 v0 := b.NewValue0(x1.Pos, OpAMD64ORQ, v.Type) 37226 v.reset(OpCopy) 37227 v.AddArg(v0) 37228 v1 := b.NewValue0(x1.Pos, OpAMD64SHLQconst, v.Type) 37229 v1.AuxInt = j0 37230 v2 := b.NewValue0(x1.Pos, OpAMD64MOVWload, typ.UInt16) 37231 v2.AuxInt = i0 37232 v2.Aux = s 37233 v2.AddArg(p) 37234 v2.AddArg(mem) 37235 v1.AddArg(v2) 37236 v0.AddArg(v1) 37237 v0.AddArg(y) 37238 return true 37239 } 37240 // match: (ORQ or:(ORQ y s0:(SHLQconst [j0] x0:(MOVBload [i0] {s} p mem))) s1:(SHLQconst [j1] x1:(MOVBload [i1] {s} p mem))) 37241 // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 37242 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j0] (MOVWload [i0] {s} p mem)) y) 37243 for { 37244 _ = v.Args[1] 37245 or := v.Args[0] 37246 if or.Op != OpAMD64ORQ { 37247 break 37248 } 37249 _ = or.Args[1] 37250 y := or.Args[0] 37251 s0 := or.Args[1] 37252 if s0.Op != OpAMD64SHLQconst { 37253 break 37254 } 37255 j0 := s0.AuxInt 37256 x0 := s0.Args[0] 37257 if x0.Op != OpAMD64MOVBload { 37258 break 37259 } 37260 i0 := x0.AuxInt 37261 s := x0.Aux 37262 _ = x0.Args[1] 37263 p := x0.Args[0] 37264 mem := x0.Args[1] 37265 s1 := v.Args[1] 37266 if s1.Op != OpAMD64SHLQconst { 37267 break 37268 } 37269 j1 := s1.AuxInt 37270 x1 := s1.Args[0] 37271 if x1.Op != OpAMD64MOVBload { 37272 break 37273 } 37274 i1 := x1.AuxInt 37275 if x1.Aux != s { 37276 break 37277 } 37278 _ = x1.Args[1] 37279 if p != x1.Args[0] { 37280 break 37281 } 37282 if mem != x1.Args[1] { 37283 break 37284 } 37285 if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 37286 break 37287 } 37288 b = mergePoint(b, x0, x1) 37289 v0 := b.NewValue0(x1.Pos, OpAMD64ORQ, v.Type) 37290 v.reset(OpCopy) 37291 v.AddArg(v0) 37292 v1 := b.NewValue0(x1.Pos, OpAMD64SHLQconst, v.Type) 37293 v1.AuxInt = j0 37294 v2 := b.NewValue0(x1.Pos, OpAMD64MOVWload, typ.UInt16) 37295 v2.AuxInt = i0 37296 v2.Aux = s 37297 v2.AddArg(p) 37298 v2.AddArg(mem) 37299 v1.AddArg(v2) 37300 v0.AddArg(v1) 37301 v0.AddArg(y) 37302 return true 37303 } 37304 // match: (ORQ s1:(SHLQconst [j1] x1:(MOVWload [i1] {s} p mem)) or:(ORQ s0:(SHLQconst [j0] x0:(MOVWload [i0] {s} p mem)) y)) 37305 // cond: i1 == i0+2 && j1 == j0+16 && j0 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 37306 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j0] (MOVLload [i0] {s} p mem)) y) 37307 for { 37308 _ = v.Args[1] 37309 s1 := v.Args[0] 37310 if s1.Op != OpAMD64SHLQconst { 37311 break 37312 } 37313 j1 := s1.AuxInt 37314 x1 := s1.Args[0] 37315 if x1.Op != OpAMD64MOVWload { 37316 break 37317 } 37318 i1 := x1.AuxInt 37319 s := x1.Aux 37320 _ = x1.Args[1] 37321 p := x1.Args[0] 37322 mem := x1.Args[1] 37323 or := v.Args[1] 37324 if or.Op != OpAMD64ORQ { 37325 break 37326 } 37327 _ = or.Args[1] 37328 s0 := or.Args[0] 37329 if s0.Op != OpAMD64SHLQconst { 37330 break 37331 } 37332 j0 := s0.AuxInt 37333 x0 := s0.Args[0] 37334 if x0.Op != OpAMD64MOVWload { 37335 break 37336 } 37337 i0 := x0.AuxInt 37338 if x0.Aux != s { 37339 break 37340 } 37341 _ = x0.Args[1] 37342 if p != x0.Args[0] { 37343 break 37344 } 37345 if mem != x0.Args[1] { 37346 break 37347 } 37348 y := or.Args[1] 37349 if !(i1 == i0+2 && j1 == j0+16 && j0%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 37350 break 37351 } 37352 b = mergePoint(b, x0, x1) 37353 v0 := b.NewValue0(x0.Pos, OpAMD64ORQ, v.Type) 37354 v.reset(OpCopy) 37355 v.AddArg(v0) 37356 v1 := b.NewValue0(x0.Pos, OpAMD64SHLQconst, v.Type) 37357 v1.AuxInt = j0 37358 v2 := b.NewValue0(x0.Pos, OpAMD64MOVLload, typ.UInt32) 37359 v2.AuxInt = i0 37360 v2.Aux = s 37361 v2.AddArg(p) 37362 v2.AddArg(mem) 37363 v1.AddArg(v2) 37364 v0.AddArg(v1) 37365 v0.AddArg(y) 37366 return true 37367 } 37368 // match: (ORQ s1:(SHLQconst [j1] x1:(MOVWload [i1] {s} p mem)) or:(ORQ y s0:(SHLQconst [j0] x0:(MOVWload [i0] {s} p mem)))) 37369 // cond: i1 == i0+2 && j1 == j0+16 && j0 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 37370 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j0] (MOVLload [i0] {s} p mem)) y) 37371 for { 37372 _ = v.Args[1] 37373 s1 := v.Args[0] 37374 if s1.Op != OpAMD64SHLQconst { 37375 break 37376 } 37377 j1 := s1.AuxInt 37378 x1 := s1.Args[0] 37379 if x1.Op != OpAMD64MOVWload { 37380 break 37381 } 37382 i1 := x1.AuxInt 37383 s := x1.Aux 37384 _ = x1.Args[1] 37385 p := x1.Args[0] 37386 mem := x1.Args[1] 37387 or := v.Args[1] 37388 if or.Op != OpAMD64ORQ { 37389 break 37390 } 37391 _ = or.Args[1] 37392 y := or.Args[0] 37393 s0 := or.Args[1] 37394 if s0.Op != OpAMD64SHLQconst { 37395 break 37396 } 37397 j0 := s0.AuxInt 37398 x0 := s0.Args[0] 37399 if x0.Op != OpAMD64MOVWload { 37400 break 37401 } 37402 i0 := x0.AuxInt 37403 if x0.Aux != s { 37404 break 37405 } 37406 _ = x0.Args[1] 37407 if p != x0.Args[0] { 37408 break 37409 } 37410 if mem != x0.Args[1] { 37411 break 37412 } 37413 if !(i1 == i0+2 && j1 == j0+16 && j0%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 37414 break 37415 } 37416 b = mergePoint(b, x0, x1) 37417 v0 := b.NewValue0(x0.Pos, OpAMD64ORQ, v.Type) 37418 v.reset(OpCopy) 37419 v.AddArg(v0) 37420 v1 := b.NewValue0(x0.Pos, OpAMD64SHLQconst, v.Type) 37421 v1.AuxInt = j0 37422 v2 := b.NewValue0(x0.Pos, OpAMD64MOVLload, typ.UInt32) 37423 v2.AuxInt = i0 37424 v2.Aux = s 37425 v2.AddArg(p) 37426 v2.AddArg(mem) 37427 v1.AddArg(v2) 37428 v0.AddArg(v1) 37429 v0.AddArg(y) 37430 return true 37431 } 37432 // match: (ORQ or:(ORQ s0:(SHLQconst [j0] x0:(MOVWload [i0] {s} p mem)) y) s1:(SHLQconst [j1] x1:(MOVWload [i1] {s} p mem))) 37433 // cond: i1 == i0+2 && j1 == j0+16 && j0 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 37434 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j0] (MOVLload [i0] {s} p mem)) y) 37435 for { 37436 _ = v.Args[1] 37437 or := v.Args[0] 37438 if or.Op != OpAMD64ORQ { 37439 break 37440 } 37441 _ = or.Args[1] 37442 s0 := or.Args[0] 37443 if s0.Op != OpAMD64SHLQconst { 37444 break 37445 } 37446 j0 := s0.AuxInt 37447 x0 := s0.Args[0] 37448 if x0.Op != OpAMD64MOVWload { 37449 break 37450 } 37451 i0 := x0.AuxInt 37452 s := x0.Aux 37453 _ = x0.Args[1] 37454 p := x0.Args[0] 37455 mem := x0.Args[1] 37456 y := or.Args[1] 37457 s1 := v.Args[1] 37458 if s1.Op != OpAMD64SHLQconst { 37459 break 37460 } 37461 j1 := s1.AuxInt 37462 x1 := s1.Args[0] 37463 if x1.Op != OpAMD64MOVWload { 37464 break 37465 } 37466 i1 := x1.AuxInt 37467 if x1.Aux != s { 37468 break 37469 } 37470 _ = x1.Args[1] 37471 if p != x1.Args[0] { 37472 break 37473 } 37474 if mem != x1.Args[1] { 37475 break 37476 } 37477 if !(i1 == i0+2 && j1 == j0+16 && j0%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 37478 break 37479 } 37480 b = mergePoint(b, x0, x1) 37481 v0 := b.NewValue0(x1.Pos, OpAMD64ORQ, v.Type) 37482 v.reset(OpCopy) 37483 v.AddArg(v0) 37484 v1 := b.NewValue0(x1.Pos, OpAMD64SHLQconst, v.Type) 37485 v1.AuxInt = j0 37486 v2 := b.NewValue0(x1.Pos, OpAMD64MOVLload, typ.UInt32) 37487 v2.AuxInt = i0 37488 v2.Aux = s 37489 v2.AddArg(p) 37490 v2.AddArg(mem) 37491 v1.AddArg(v2) 37492 v0.AddArg(v1) 37493 v0.AddArg(y) 37494 return true 37495 } 37496 // match: (ORQ or:(ORQ y s0:(SHLQconst [j0] x0:(MOVWload [i0] {s} p mem))) s1:(SHLQconst [j1] x1:(MOVWload [i1] {s} p mem))) 37497 // cond: i1 == i0+2 && j1 == j0+16 && j0 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 37498 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j0] (MOVLload [i0] {s} p mem)) y) 37499 for { 37500 _ = v.Args[1] 37501 or := v.Args[0] 37502 if or.Op != OpAMD64ORQ { 37503 break 37504 } 37505 _ = or.Args[1] 37506 y := or.Args[0] 37507 s0 := or.Args[1] 37508 if s0.Op != OpAMD64SHLQconst { 37509 break 37510 } 37511 j0 := s0.AuxInt 37512 x0 := s0.Args[0] 37513 if x0.Op != OpAMD64MOVWload { 37514 break 37515 } 37516 i0 := x0.AuxInt 37517 s := x0.Aux 37518 _ = x0.Args[1] 37519 p := x0.Args[0] 37520 mem := x0.Args[1] 37521 s1 := v.Args[1] 37522 if s1.Op != OpAMD64SHLQconst { 37523 break 37524 } 37525 j1 := s1.AuxInt 37526 x1 := s1.Args[0] 37527 if x1.Op != OpAMD64MOVWload { 37528 break 37529 } 37530 i1 := x1.AuxInt 37531 if x1.Aux != s { 37532 break 37533 } 37534 _ = x1.Args[1] 37535 if p != x1.Args[0] { 37536 break 37537 } 37538 if mem != x1.Args[1] { 37539 break 37540 } 37541 if !(i1 == i0+2 && j1 == j0+16 && j0%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 37542 break 37543 } 37544 b = mergePoint(b, x0, x1) 37545 v0 := b.NewValue0(x1.Pos, OpAMD64ORQ, v.Type) 37546 v.reset(OpCopy) 37547 v.AddArg(v0) 37548 v1 := b.NewValue0(x1.Pos, OpAMD64SHLQconst, v.Type) 37549 v1.AuxInt = j0 37550 v2 := b.NewValue0(x1.Pos, OpAMD64MOVLload, typ.UInt32) 37551 v2.AuxInt = i0 37552 v2.Aux = s 37553 v2.AddArg(p) 37554 v2.AddArg(mem) 37555 v1.AddArg(v2) 37556 v0.AddArg(v1) 37557 v0.AddArg(y) 37558 return true 37559 } 37560 // match: (ORQ x0:(MOVBloadidx1 [i0] {s} p idx mem) sh:(SHLQconst [8] x1:(MOVBloadidx1 [i1] {s} p idx mem))) 37561 // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 37562 // result: @mergePoint(b,x0,x1) (MOVWloadidx1 <v.Type> [i0] {s} p idx mem) 37563 for { 37564 _ = v.Args[1] 37565 x0 := v.Args[0] 37566 if x0.Op != OpAMD64MOVBloadidx1 { 37567 break 37568 } 37569 i0 := x0.AuxInt 37570 s := x0.Aux 37571 _ = x0.Args[2] 37572 p := x0.Args[0] 37573 idx := x0.Args[1] 37574 mem := x0.Args[2] 37575 sh := v.Args[1] 37576 if sh.Op != OpAMD64SHLQconst { 37577 break 37578 } 37579 if sh.AuxInt != 8 { 37580 break 37581 } 37582 x1 := sh.Args[0] 37583 if x1.Op != OpAMD64MOVBloadidx1 { 37584 break 37585 } 37586 i1 := x1.AuxInt 37587 if x1.Aux != s { 37588 break 37589 } 37590 _ = x1.Args[2] 37591 if p != x1.Args[0] { 37592 break 37593 } 37594 if idx != x1.Args[1] { 37595 break 37596 } 37597 if mem != x1.Args[2] { 37598 break 37599 } 37600 if !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 37601 break 37602 } 37603 b = mergePoint(b, x0, x1) 37604 v0 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, v.Type) 37605 v.reset(OpCopy) 37606 v.AddArg(v0) 37607 v0.AuxInt = i0 37608 v0.Aux = s 37609 v0.AddArg(p) 37610 v0.AddArg(idx) 37611 v0.AddArg(mem) 37612 return true 37613 } 37614 return false 37615 } 37616 func rewriteValueAMD64_OpAMD64ORQ_40(v *Value) bool { 37617 b := v.Block 37618 _ = b 37619 typ := &b.Func.Config.Types 37620 _ = typ 37621 // match: (ORQ x0:(MOVBloadidx1 [i0] {s} idx p mem) sh:(SHLQconst [8] x1:(MOVBloadidx1 [i1] {s} p idx mem))) 37622 // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 37623 // result: @mergePoint(b,x0,x1) (MOVWloadidx1 <v.Type> [i0] {s} p idx mem) 37624 for { 37625 _ = v.Args[1] 37626 x0 := v.Args[0] 37627 if x0.Op != OpAMD64MOVBloadidx1 { 37628 break 37629 } 37630 i0 := x0.AuxInt 37631 s := x0.Aux 37632 _ = x0.Args[2] 37633 idx := x0.Args[0] 37634 p := x0.Args[1] 37635 mem := x0.Args[2] 37636 sh := v.Args[1] 37637 if sh.Op != OpAMD64SHLQconst { 37638 break 37639 } 37640 if sh.AuxInt != 8 { 37641 break 37642 } 37643 x1 := sh.Args[0] 37644 if x1.Op != OpAMD64MOVBloadidx1 { 37645 break 37646 } 37647 i1 := x1.AuxInt 37648 if x1.Aux != s { 37649 break 37650 } 37651 _ = x1.Args[2] 37652 if p != x1.Args[0] { 37653 break 37654 } 37655 if idx != x1.Args[1] { 37656 break 37657 } 37658 if mem != x1.Args[2] { 37659 break 37660 } 37661 if !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 37662 break 37663 } 37664 b = mergePoint(b, x0, x1) 37665 v0 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, v.Type) 37666 v.reset(OpCopy) 37667 v.AddArg(v0) 37668 v0.AuxInt = i0 37669 v0.Aux = s 37670 v0.AddArg(p) 37671 v0.AddArg(idx) 37672 v0.AddArg(mem) 37673 return true 37674 } 37675 // match: (ORQ x0:(MOVBloadidx1 [i0] {s} p idx mem) sh:(SHLQconst [8] x1:(MOVBloadidx1 [i1] {s} idx p mem))) 37676 // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 37677 // result: @mergePoint(b,x0,x1) (MOVWloadidx1 <v.Type> [i0] {s} p idx mem) 37678 for { 37679 _ = v.Args[1] 37680 x0 := v.Args[0] 37681 if x0.Op != OpAMD64MOVBloadidx1 { 37682 break 37683 } 37684 i0 := x0.AuxInt 37685 s := x0.Aux 37686 _ = x0.Args[2] 37687 p := x0.Args[0] 37688 idx := x0.Args[1] 37689 mem := x0.Args[2] 37690 sh := v.Args[1] 37691 if sh.Op != OpAMD64SHLQconst { 37692 break 37693 } 37694 if sh.AuxInt != 8 { 37695 break 37696 } 37697 x1 := sh.Args[0] 37698 if x1.Op != OpAMD64MOVBloadidx1 { 37699 break 37700 } 37701 i1 := x1.AuxInt 37702 if x1.Aux != s { 37703 break 37704 } 37705 _ = x1.Args[2] 37706 if idx != x1.Args[0] { 37707 break 37708 } 37709 if p != x1.Args[1] { 37710 break 37711 } 37712 if mem != x1.Args[2] { 37713 break 37714 } 37715 if !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 37716 break 37717 } 37718 b = mergePoint(b, x0, x1) 37719 v0 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, v.Type) 37720 v.reset(OpCopy) 37721 v.AddArg(v0) 37722 v0.AuxInt = i0 37723 v0.Aux = s 37724 v0.AddArg(p) 37725 v0.AddArg(idx) 37726 v0.AddArg(mem) 37727 return true 37728 } 37729 // match: (ORQ x0:(MOVBloadidx1 [i0] {s} idx p mem) sh:(SHLQconst [8] x1:(MOVBloadidx1 [i1] {s} idx p mem))) 37730 // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 37731 // result: @mergePoint(b,x0,x1) (MOVWloadidx1 <v.Type> [i0] {s} p idx mem) 37732 for { 37733 _ = v.Args[1] 37734 x0 := v.Args[0] 37735 if x0.Op != OpAMD64MOVBloadidx1 { 37736 break 37737 } 37738 i0 := x0.AuxInt 37739 s := x0.Aux 37740 _ = x0.Args[2] 37741 idx := x0.Args[0] 37742 p := x0.Args[1] 37743 mem := x0.Args[2] 37744 sh := v.Args[1] 37745 if sh.Op != OpAMD64SHLQconst { 37746 break 37747 } 37748 if sh.AuxInt != 8 { 37749 break 37750 } 37751 x1 := sh.Args[0] 37752 if x1.Op != OpAMD64MOVBloadidx1 { 37753 break 37754 } 37755 i1 := x1.AuxInt 37756 if x1.Aux != s { 37757 break 37758 } 37759 _ = x1.Args[2] 37760 if idx != x1.Args[0] { 37761 break 37762 } 37763 if p != x1.Args[1] { 37764 break 37765 } 37766 if mem != x1.Args[2] { 37767 break 37768 } 37769 if !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 37770 break 37771 } 37772 b = mergePoint(b, x0, x1) 37773 v0 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, v.Type) 37774 v.reset(OpCopy) 37775 v.AddArg(v0) 37776 v0.AuxInt = i0 37777 v0.Aux = s 37778 v0.AddArg(p) 37779 v0.AddArg(idx) 37780 v0.AddArg(mem) 37781 return true 37782 } 37783 // match: (ORQ sh:(SHLQconst [8] x1:(MOVBloadidx1 [i1] {s} p idx mem)) x0:(MOVBloadidx1 [i0] {s} p idx mem)) 37784 // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 37785 // result: @mergePoint(b,x0,x1) (MOVWloadidx1 <v.Type> [i0] {s} p idx mem) 37786 for { 37787 _ = v.Args[1] 37788 sh := v.Args[0] 37789 if sh.Op != OpAMD64SHLQconst { 37790 break 37791 } 37792 if sh.AuxInt != 8 { 37793 break 37794 } 37795 x1 := sh.Args[0] 37796 if x1.Op != OpAMD64MOVBloadidx1 { 37797 break 37798 } 37799 i1 := x1.AuxInt 37800 s := x1.Aux 37801 _ = x1.Args[2] 37802 p := x1.Args[0] 37803 idx := x1.Args[1] 37804 mem := x1.Args[2] 37805 x0 := v.Args[1] 37806 if x0.Op != OpAMD64MOVBloadidx1 { 37807 break 37808 } 37809 i0 := x0.AuxInt 37810 if x0.Aux != s { 37811 break 37812 } 37813 _ = x0.Args[2] 37814 if p != x0.Args[0] { 37815 break 37816 } 37817 if idx != x0.Args[1] { 37818 break 37819 } 37820 if mem != x0.Args[2] { 37821 break 37822 } 37823 if !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 37824 break 37825 } 37826 b = mergePoint(b, x0, x1) 37827 v0 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, v.Type) 37828 v.reset(OpCopy) 37829 v.AddArg(v0) 37830 v0.AuxInt = i0 37831 v0.Aux = s 37832 v0.AddArg(p) 37833 v0.AddArg(idx) 37834 v0.AddArg(mem) 37835 return true 37836 } 37837 // match: (ORQ sh:(SHLQconst [8] x1:(MOVBloadidx1 [i1] {s} idx p mem)) x0:(MOVBloadidx1 [i0] {s} p idx mem)) 37838 // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 37839 // result: @mergePoint(b,x0,x1) (MOVWloadidx1 <v.Type> [i0] {s} p idx mem) 37840 for { 37841 _ = v.Args[1] 37842 sh := v.Args[0] 37843 if sh.Op != OpAMD64SHLQconst { 37844 break 37845 } 37846 if sh.AuxInt != 8 { 37847 break 37848 } 37849 x1 := sh.Args[0] 37850 if x1.Op != OpAMD64MOVBloadidx1 { 37851 break 37852 } 37853 i1 := x1.AuxInt 37854 s := x1.Aux 37855 _ = x1.Args[2] 37856 idx := x1.Args[0] 37857 p := x1.Args[1] 37858 mem := x1.Args[2] 37859 x0 := v.Args[1] 37860 if x0.Op != OpAMD64MOVBloadidx1 { 37861 break 37862 } 37863 i0 := x0.AuxInt 37864 if x0.Aux != s { 37865 break 37866 } 37867 _ = x0.Args[2] 37868 if p != x0.Args[0] { 37869 break 37870 } 37871 if idx != x0.Args[1] { 37872 break 37873 } 37874 if mem != x0.Args[2] { 37875 break 37876 } 37877 if !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 37878 break 37879 } 37880 b = mergePoint(b, x0, x1) 37881 v0 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, v.Type) 37882 v.reset(OpCopy) 37883 v.AddArg(v0) 37884 v0.AuxInt = i0 37885 v0.Aux = s 37886 v0.AddArg(p) 37887 v0.AddArg(idx) 37888 v0.AddArg(mem) 37889 return true 37890 } 37891 // match: (ORQ sh:(SHLQconst [8] x1:(MOVBloadidx1 [i1] {s} p idx mem)) x0:(MOVBloadidx1 [i0] {s} idx p mem)) 37892 // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 37893 // result: @mergePoint(b,x0,x1) (MOVWloadidx1 <v.Type> [i0] {s} p idx mem) 37894 for { 37895 _ = v.Args[1] 37896 sh := v.Args[0] 37897 if sh.Op != OpAMD64SHLQconst { 37898 break 37899 } 37900 if sh.AuxInt != 8 { 37901 break 37902 } 37903 x1 := sh.Args[0] 37904 if x1.Op != OpAMD64MOVBloadidx1 { 37905 break 37906 } 37907 i1 := x1.AuxInt 37908 s := x1.Aux 37909 _ = x1.Args[2] 37910 p := x1.Args[0] 37911 idx := x1.Args[1] 37912 mem := x1.Args[2] 37913 x0 := v.Args[1] 37914 if x0.Op != OpAMD64MOVBloadidx1 { 37915 break 37916 } 37917 i0 := x0.AuxInt 37918 if x0.Aux != s { 37919 break 37920 } 37921 _ = x0.Args[2] 37922 if idx != x0.Args[0] { 37923 break 37924 } 37925 if p != x0.Args[1] { 37926 break 37927 } 37928 if mem != x0.Args[2] { 37929 break 37930 } 37931 if !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 37932 break 37933 } 37934 b = mergePoint(b, x0, x1) 37935 v0 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, v.Type) 37936 v.reset(OpCopy) 37937 v.AddArg(v0) 37938 v0.AuxInt = i0 37939 v0.Aux = s 37940 v0.AddArg(p) 37941 v0.AddArg(idx) 37942 v0.AddArg(mem) 37943 return true 37944 } 37945 // match: (ORQ sh:(SHLQconst [8] x1:(MOVBloadidx1 [i1] {s} idx p mem)) x0:(MOVBloadidx1 [i0] {s} idx p mem)) 37946 // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 37947 // result: @mergePoint(b,x0,x1) (MOVWloadidx1 <v.Type> [i0] {s} p idx mem) 37948 for { 37949 _ = v.Args[1] 37950 sh := v.Args[0] 37951 if sh.Op != OpAMD64SHLQconst { 37952 break 37953 } 37954 if sh.AuxInt != 8 { 37955 break 37956 } 37957 x1 := sh.Args[0] 37958 if x1.Op != OpAMD64MOVBloadidx1 { 37959 break 37960 } 37961 i1 := x1.AuxInt 37962 s := x1.Aux 37963 _ = x1.Args[2] 37964 idx := x1.Args[0] 37965 p := x1.Args[1] 37966 mem := x1.Args[2] 37967 x0 := v.Args[1] 37968 if x0.Op != OpAMD64MOVBloadidx1 { 37969 break 37970 } 37971 i0 := x0.AuxInt 37972 if x0.Aux != s { 37973 break 37974 } 37975 _ = x0.Args[2] 37976 if idx != x0.Args[0] { 37977 break 37978 } 37979 if p != x0.Args[1] { 37980 break 37981 } 37982 if mem != x0.Args[2] { 37983 break 37984 } 37985 if !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 37986 break 37987 } 37988 b = mergePoint(b, x0, x1) 37989 v0 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, v.Type) 37990 v.reset(OpCopy) 37991 v.AddArg(v0) 37992 v0.AuxInt = i0 37993 v0.Aux = s 37994 v0.AddArg(p) 37995 v0.AddArg(idx) 37996 v0.AddArg(mem) 37997 return true 37998 } 37999 // match: (ORQ x0:(MOVWloadidx1 [i0] {s} p idx mem) sh:(SHLQconst [16] x1:(MOVWloadidx1 [i1] {s} p idx mem))) 38000 // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 38001 // result: @mergePoint(b,x0,x1) (MOVLloadidx1 [i0] {s} p idx mem) 38002 for { 38003 _ = v.Args[1] 38004 x0 := v.Args[0] 38005 if x0.Op != OpAMD64MOVWloadidx1 { 38006 break 38007 } 38008 i0 := x0.AuxInt 38009 s := x0.Aux 38010 _ = x0.Args[2] 38011 p := x0.Args[0] 38012 idx := x0.Args[1] 38013 mem := x0.Args[2] 38014 sh := v.Args[1] 38015 if sh.Op != OpAMD64SHLQconst { 38016 break 38017 } 38018 if sh.AuxInt != 16 { 38019 break 38020 } 38021 x1 := sh.Args[0] 38022 if x1.Op != OpAMD64MOVWloadidx1 { 38023 break 38024 } 38025 i1 := x1.AuxInt 38026 if x1.Aux != s { 38027 break 38028 } 38029 _ = x1.Args[2] 38030 if p != x1.Args[0] { 38031 break 38032 } 38033 if idx != x1.Args[1] { 38034 break 38035 } 38036 if mem != x1.Args[2] { 38037 break 38038 } 38039 if !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 38040 break 38041 } 38042 b = mergePoint(b, x0, x1) 38043 v0 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 38044 v.reset(OpCopy) 38045 v.AddArg(v0) 38046 v0.AuxInt = i0 38047 v0.Aux = s 38048 v0.AddArg(p) 38049 v0.AddArg(idx) 38050 v0.AddArg(mem) 38051 return true 38052 } 38053 // match: (ORQ x0:(MOVWloadidx1 [i0] {s} idx p mem) sh:(SHLQconst [16] x1:(MOVWloadidx1 [i1] {s} p idx mem))) 38054 // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 38055 // result: @mergePoint(b,x0,x1) (MOVLloadidx1 [i0] {s} p idx mem) 38056 for { 38057 _ = v.Args[1] 38058 x0 := v.Args[0] 38059 if x0.Op != OpAMD64MOVWloadidx1 { 38060 break 38061 } 38062 i0 := x0.AuxInt 38063 s := x0.Aux 38064 _ = x0.Args[2] 38065 idx := x0.Args[0] 38066 p := x0.Args[1] 38067 mem := x0.Args[2] 38068 sh := v.Args[1] 38069 if sh.Op != OpAMD64SHLQconst { 38070 break 38071 } 38072 if sh.AuxInt != 16 { 38073 break 38074 } 38075 x1 := sh.Args[0] 38076 if x1.Op != OpAMD64MOVWloadidx1 { 38077 break 38078 } 38079 i1 := x1.AuxInt 38080 if x1.Aux != s { 38081 break 38082 } 38083 _ = x1.Args[2] 38084 if p != x1.Args[0] { 38085 break 38086 } 38087 if idx != x1.Args[1] { 38088 break 38089 } 38090 if mem != x1.Args[2] { 38091 break 38092 } 38093 if !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 38094 break 38095 } 38096 b = mergePoint(b, x0, x1) 38097 v0 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 38098 v.reset(OpCopy) 38099 v.AddArg(v0) 38100 v0.AuxInt = i0 38101 v0.Aux = s 38102 v0.AddArg(p) 38103 v0.AddArg(idx) 38104 v0.AddArg(mem) 38105 return true 38106 } 38107 // match: (ORQ x0:(MOVWloadidx1 [i0] {s} p idx mem) sh:(SHLQconst [16] x1:(MOVWloadidx1 [i1] {s} idx p mem))) 38108 // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 38109 // result: @mergePoint(b,x0,x1) (MOVLloadidx1 [i0] {s} p idx mem) 38110 for { 38111 _ = v.Args[1] 38112 x0 := v.Args[0] 38113 if x0.Op != OpAMD64MOVWloadidx1 { 38114 break 38115 } 38116 i0 := x0.AuxInt 38117 s := x0.Aux 38118 _ = x0.Args[2] 38119 p := x0.Args[0] 38120 idx := x0.Args[1] 38121 mem := x0.Args[2] 38122 sh := v.Args[1] 38123 if sh.Op != OpAMD64SHLQconst { 38124 break 38125 } 38126 if sh.AuxInt != 16 { 38127 break 38128 } 38129 x1 := sh.Args[0] 38130 if x1.Op != OpAMD64MOVWloadidx1 { 38131 break 38132 } 38133 i1 := x1.AuxInt 38134 if x1.Aux != s { 38135 break 38136 } 38137 _ = x1.Args[2] 38138 if idx != x1.Args[0] { 38139 break 38140 } 38141 if p != x1.Args[1] { 38142 break 38143 } 38144 if mem != x1.Args[2] { 38145 break 38146 } 38147 if !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 38148 break 38149 } 38150 b = mergePoint(b, x0, x1) 38151 v0 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 38152 v.reset(OpCopy) 38153 v.AddArg(v0) 38154 v0.AuxInt = i0 38155 v0.Aux = s 38156 v0.AddArg(p) 38157 v0.AddArg(idx) 38158 v0.AddArg(mem) 38159 return true 38160 } 38161 return false 38162 } 38163 func rewriteValueAMD64_OpAMD64ORQ_50(v *Value) bool { 38164 b := v.Block 38165 _ = b 38166 typ := &b.Func.Config.Types 38167 _ = typ 38168 // match: (ORQ x0:(MOVWloadidx1 [i0] {s} idx p mem) sh:(SHLQconst [16] x1:(MOVWloadidx1 [i1] {s} idx p mem))) 38169 // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 38170 // result: @mergePoint(b,x0,x1) (MOVLloadidx1 [i0] {s} p idx mem) 38171 for { 38172 _ = v.Args[1] 38173 x0 := v.Args[0] 38174 if x0.Op != OpAMD64MOVWloadidx1 { 38175 break 38176 } 38177 i0 := x0.AuxInt 38178 s := x0.Aux 38179 _ = x0.Args[2] 38180 idx := x0.Args[0] 38181 p := x0.Args[1] 38182 mem := x0.Args[2] 38183 sh := v.Args[1] 38184 if sh.Op != OpAMD64SHLQconst { 38185 break 38186 } 38187 if sh.AuxInt != 16 { 38188 break 38189 } 38190 x1 := sh.Args[0] 38191 if x1.Op != OpAMD64MOVWloadidx1 { 38192 break 38193 } 38194 i1 := x1.AuxInt 38195 if x1.Aux != s { 38196 break 38197 } 38198 _ = x1.Args[2] 38199 if idx != x1.Args[0] { 38200 break 38201 } 38202 if p != x1.Args[1] { 38203 break 38204 } 38205 if mem != x1.Args[2] { 38206 break 38207 } 38208 if !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 38209 break 38210 } 38211 b = mergePoint(b, x0, x1) 38212 v0 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 38213 v.reset(OpCopy) 38214 v.AddArg(v0) 38215 v0.AuxInt = i0 38216 v0.Aux = s 38217 v0.AddArg(p) 38218 v0.AddArg(idx) 38219 v0.AddArg(mem) 38220 return true 38221 } 38222 // match: (ORQ sh:(SHLQconst [16] x1:(MOVWloadidx1 [i1] {s} p idx mem)) x0:(MOVWloadidx1 [i0] {s} p idx mem)) 38223 // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 38224 // result: @mergePoint(b,x0,x1) (MOVLloadidx1 [i0] {s} p idx mem) 38225 for { 38226 _ = v.Args[1] 38227 sh := v.Args[0] 38228 if sh.Op != OpAMD64SHLQconst { 38229 break 38230 } 38231 if sh.AuxInt != 16 { 38232 break 38233 } 38234 x1 := sh.Args[0] 38235 if x1.Op != OpAMD64MOVWloadidx1 { 38236 break 38237 } 38238 i1 := x1.AuxInt 38239 s := x1.Aux 38240 _ = x1.Args[2] 38241 p := x1.Args[0] 38242 idx := x1.Args[1] 38243 mem := x1.Args[2] 38244 x0 := v.Args[1] 38245 if x0.Op != OpAMD64MOVWloadidx1 { 38246 break 38247 } 38248 i0 := x0.AuxInt 38249 if x0.Aux != s { 38250 break 38251 } 38252 _ = x0.Args[2] 38253 if p != x0.Args[0] { 38254 break 38255 } 38256 if idx != x0.Args[1] { 38257 break 38258 } 38259 if mem != x0.Args[2] { 38260 break 38261 } 38262 if !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 38263 break 38264 } 38265 b = mergePoint(b, x0, x1) 38266 v0 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 38267 v.reset(OpCopy) 38268 v.AddArg(v0) 38269 v0.AuxInt = i0 38270 v0.Aux = s 38271 v0.AddArg(p) 38272 v0.AddArg(idx) 38273 v0.AddArg(mem) 38274 return true 38275 } 38276 // match: (ORQ sh:(SHLQconst [16] x1:(MOVWloadidx1 [i1] {s} idx p mem)) x0:(MOVWloadidx1 [i0] {s} p idx mem)) 38277 // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 38278 // result: @mergePoint(b,x0,x1) (MOVLloadidx1 [i0] {s} p idx mem) 38279 for { 38280 _ = v.Args[1] 38281 sh := v.Args[0] 38282 if sh.Op != OpAMD64SHLQconst { 38283 break 38284 } 38285 if sh.AuxInt != 16 { 38286 break 38287 } 38288 x1 := sh.Args[0] 38289 if x1.Op != OpAMD64MOVWloadidx1 { 38290 break 38291 } 38292 i1 := x1.AuxInt 38293 s := x1.Aux 38294 _ = x1.Args[2] 38295 idx := x1.Args[0] 38296 p := x1.Args[1] 38297 mem := x1.Args[2] 38298 x0 := v.Args[1] 38299 if x0.Op != OpAMD64MOVWloadidx1 { 38300 break 38301 } 38302 i0 := x0.AuxInt 38303 if x0.Aux != s { 38304 break 38305 } 38306 _ = x0.Args[2] 38307 if p != x0.Args[0] { 38308 break 38309 } 38310 if idx != x0.Args[1] { 38311 break 38312 } 38313 if mem != x0.Args[2] { 38314 break 38315 } 38316 if !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 38317 break 38318 } 38319 b = mergePoint(b, x0, x1) 38320 v0 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 38321 v.reset(OpCopy) 38322 v.AddArg(v0) 38323 v0.AuxInt = i0 38324 v0.Aux = s 38325 v0.AddArg(p) 38326 v0.AddArg(idx) 38327 v0.AddArg(mem) 38328 return true 38329 } 38330 // match: (ORQ sh:(SHLQconst [16] x1:(MOVWloadidx1 [i1] {s} p idx mem)) x0:(MOVWloadidx1 [i0] {s} idx p mem)) 38331 // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 38332 // result: @mergePoint(b,x0,x1) (MOVLloadidx1 [i0] {s} p idx mem) 38333 for { 38334 _ = v.Args[1] 38335 sh := v.Args[0] 38336 if sh.Op != OpAMD64SHLQconst { 38337 break 38338 } 38339 if sh.AuxInt != 16 { 38340 break 38341 } 38342 x1 := sh.Args[0] 38343 if x1.Op != OpAMD64MOVWloadidx1 { 38344 break 38345 } 38346 i1 := x1.AuxInt 38347 s := x1.Aux 38348 _ = x1.Args[2] 38349 p := x1.Args[0] 38350 idx := x1.Args[1] 38351 mem := x1.Args[2] 38352 x0 := v.Args[1] 38353 if x0.Op != OpAMD64MOVWloadidx1 { 38354 break 38355 } 38356 i0 := x0.AuxInt 38357 if x0.Aux != s { 38358 break 38359 } 38360 _ = x0.Args[2] 38361 if idx != x0.Args[0] { 38362 break 38363 } 38364 if p != x0.Args[1] { 38365 break 38366 } 38367 if mem != x0.Args[2] { 38368 break 38369 } 38370 if !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 38371 break 38372 } 38373 b = mergePoint(b, x0, x1) 38374 v0 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 38375 v.reset(OpCopy) 38376 v.AddArg(v0) 38377 v0.AuxInt = i0 38378 v0.Aux = s 38379 v0.AddArg(p) 38380 v0.AddArg(idx) 38381 v0.AddArg(mem) 38382 return true 38383 } 38384 // match: (ORQ sh:(SHLQconst [16] x1:(MOVWloadidx1 [i1] {s} idx p mem)) x0:(MOVWloadidx1 [i0] {s} idx p mem)) 38385 // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 38386 // result: @mergePoint(b,x0,x1) (MOVLloadidx1 [i0] {s} p idx mem) 38387 for { 38388 _ = v.Args[1] 38389 sh := v.Args[0] 38390 if sh.Op != OpAMD64SHLQconst { 38391 break 38392 } 38393 if sh.AuxInt != 16 { 38394 break 38395 } 38396 x1 := sh.Args[0] 38397 if x1.Op != OpAMD64MOVWloadidx1 { 38398 break 38399 } 38400 i1 := x1.AuxInt 38401 s := x1.Aux 38402 _ = x1.Args[2] 38403 idx := x1.Args[0] 38404 p := x1.Args[1] 38405 mem := x1.Args[2] 38406 x0 := v.Args[1] 38407 if x0.Op != OpAMD64MOVWloadidx1 { 38408 break 38409 } 38410 i0 := x0.AuxInt 38411 if x0.Aux != s { 38412 break 38413 } 38414 _ = x0.Args[2] 38415 if idx != x0.Args[0] { 38416 break 38417 } 38418 if p != x0.Args[1] { 38419 break 38420 } 38421 if mem != x0.Args[2] { 38422 break 38423 } 38424 if !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 38425 break 38426 } 38427 b = mergePoint(b, x0, x1) 38428 v0 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 38429 v.reset(OpCopy) 38430 v.AddArg(v0) 38431 v0.AuxInt = i0 38432 v0.Aux = s 38433 v0.AddArg(p) 38434 v0.AddArg(idx) 38435 v0.AddArg(mem) 38436 return true 38437 } 38438 // match: (ORQ x0:(MOVLloadidx1 [i0] {s} p idx mem) sh:(SHLQconst [32] x1:(MOVLloadidx1 [i1] {s} p idx mem))) 38439 // cond: i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 38440 // result: @mergePoint(b,x0,x1) (MOVQloadidx1 [i0] {s} p idx mem) 38441 for { 38442 _ = v.Args[1] 38443 x0 := v.Args[0] 38444 if x0.Op != OpAMD64MOVLloadidx1 { 38445 break 38446 } 38447 i0 := x0.AuxInt 38448 s := x0.Aux 38449 _ = x0.Args[2] 38450 p := x0.Args[0] 38451 idx := x0.Args[1] 38452 mem := x0.Args[2] 38453 sh := v.Args[1] 38454 if sh.Op != OpAMD64SHLQconst { 38455 break 38456 } 38457 if sh.AuxInt != 32 { 38458 break 38459 } 38460 x1 := sh.Args[0] 38461 if x1.Op != OpAMD64MOVLloadidx1 { 38462 break 38463 } 38464 i1 := x1.AuxInt 38465 if x1.Aux != s { 38466 break 38467 } 38468 _ = x1.Args[2] 38469 if p != x1.Args[0] { 38470 break 38471 } 38472 if idx != x1.Args[1] { 38473 break 38474 } 38475 if mem != x1.Args[2] { 38476 break 38477 } 38478 if !(i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 38479 break 38480 } 38481 b = mergePoint(b, x0, x1) 38482 v0 := b.NewValue0(v.Pos, OpAMD64MOVQloadidx1, typ.UInt64) 38483 v.reset(OpCopy) 38484 v.AddArg(v0) 38485 v0.AuxInt = i0 38486 v0.Aux = s 38487 v0.AddArg(p) 38488 v0.AddArg(idx) 38489 v0.AddArg(mem) 38490 return true 38491 } 38492 // match: (ORQ x0:(MOVLloadidx1 [i0] {s} idx p mem) sh:(SHLQconst [32] x1:(MOVLloadidx1 [i1] {s} p idx mem))) 38493 // cond: i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 38494 // result: @mergePoint(b,x0,x1) (MOVQloadidx1 [i0] {s} p idx mem) 38495 for { 38496 _ = v.Args[1] 38497 x0 := v.Args[0] 38498 if x0.Op != OpAMD64MOVLloadidx1 { 38499 break 38500 } 38501 i0 := x0.AuxInt 38502 s := x0.Aux 38503 _ = x0.Args[2] 38504 idx := x0.Args[0] 38505 p := x0.Args[1] 38506 mem := x0.Args[2] 38507 sh := v.Args[1] 38508 if sh.Op != OpAMD64SHLQconst { 38509 break 38510 } 38511 if sh.AuxInt != 32 { 38512 break 38513 } 38514 x1 := sh.Args[0] 38515 if x1.Op != OpAMD64MOVLloadidx1 { 38516 break 38517 } 38518 i1 := x1.AuxInt 38519 if x1.Aux != s { 38520 break 38521 } 38522 _ = x1.Args[2] 38523 if p != x1.Args[0] { 38524 break 38525 } 38526 if idx != x1.Args[1] { 38527 break 38528 } 38529 if mem != x1.Args[2] { 38530 break 38531 } 38532 if !(i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 38533 break 38534 } 38535 b = mergePoint(b, x0, x1) 38536 v0 := b.NewValue0(v.Pos, OpAMD64MOVQloadidx1, typ.UInt64) 38537 v.reset(OpCopy) 38538 v.AddArg(v0) 38539 v0.AuxInt = i0 38540 v0.Aux = s 38541 v0.AddArg(p) 38542 v0.AddArg(idx) 38543 v0.AddArg(mem) 38544 return true 38545 } 38546 // match: (ORQ x0:(MOVLloadidx1 [i0] {s} p idx mem) sh:(SHLQconst [32] x1:(MOVLloadidx1 [i1] {s} idx p mem))) 38547 // cond: i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 38548 // result: @mergePoint(b,x0,x1) (MOVQloadidx1 [i0] {s} p idx mem) 38549 for { 38550 _ = v.Args[1] 38551 x0 := v.Args[0] 38552 if x0.Op != OpAMD64MOVLloadidx1 { 38553 break 38554 } 38555 i0 := x0.AuxInt 38556 s := x0.Aux 38557 _ = x0.Args[2] 38558 p := x0.Args[0] 38559 idx := x0.Args[1] 38560 mem := x0.Args[2] 38561 sh := v.Args[1] 38562 if sh.Op != OpAMD64SHLQconst { 38563 break 38564 } 38565 if sh.AuxInt != 32 { 38566 break 38567 } 38568 x1 := sh.Args[0] 38569 if x1.Op != OpAMD64MOVLloadidx1 { 38570 break 38571 } 38572 i1 := x1.AuxInt 38573 if x1.Aux != s { 38574 break 38575 } 38576 _ = x1.Args[2] 38577 if idx != x1.Args[0] { 38578 break 38579 } 38580 if p != x1.Args[1] { 38581 break 38582 } 38583 if mem != x1.Args[2] { 38584 break 38585 } 38586 if !(i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 38587 break 38588 } 38589 b = mergePoint(b, x0, x1) 38590 v0 := b.NewValue0(v.Pos, OpAMD64MOVQloadidx1, typ.UInt64) 38591 v.reset(OpCopy) 38592 v.AddArg(v0) 38593 v0.AuxInt = i0 38594 v0.Aux = s 38595 v0.AddArg(p) 38596 v0.AddArg(idx) 38597 v0.AddArg(mem) 38598 return true 38599 } 38600 // match: (ORQ x0:(MOVLloadidx1 [i0] {s} idx p mem) sh:(SHLQconst [32] x1:(MOVLloadidx1 [i1] {s} idx p mem))) 38601 // cond: i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 38602 // result: @mergePoint(b,x0,x1) (MOVQloadidx1 [i0] {s} p idx mem) 38603 for { 38604 _ = v.Args[1] 38605 x0 := v.Args[0] 38606 if x0.Op != OpAMD64MOVLloadidx1 { 38607 break 38608 } 38609 i0 := x0.AuxInt 38610 s := x0.Aux 38611 _ = x0.Args[2] 38612 idx := x0.Args[0] 38613 p := x0.Args[1] 38614 mem := x0.Args[2] 38615 sh := v.Args[1] 38616 if sh.Op != OpAMD64SHLQconst { 38617 break 38618 } 38619 if sh.AuxInt != 32 { 38620 break 38621 } 38622 x1 := sh.Args[0] 38623 if x1.Op != OpAMD64MOVLloadidx1 { 38624 break 38625 } 38626 i1 := x1.AuxInt 38627 if x1.Aux != s { 38628 break 38629 } 38630 _ = x1.Args[2] 38631 if idx != x1.Args[0] { 38632 break 38633 } 38634 if p != x1.Args[1] { 38635 break 38636 } 38637 if mem != x1.Args[2] { 38638 break 38639 } 38640 if !(i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 38641 break 38642 } 38643 b = mergePoint(b, x0, x1) 38644 v0 := b.NewValue0(v.Pos, OpAMD64MOVQloadidx1, typ.UInt64) 38645 v.reset(OpCopy) 38646 v.AddArg(v0) 38647 v0.AuxInt = i0 38648 v0.Aux = s 38649 v0.AddArg(p) 38650 v0.AddArg(idx) 38651 v0.AddArg(mem) 38652 return true 38653 } 38654 // match: (ORQ sh:(SHLQconst [32] x1:(MOVLloadidx1 [i1] {s} p idx mem)) x0:(MOVLloadidx1 [i0] {s} p idx mem)) 38655 // cond: i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 38656 // result: @mergePoint(b,x0,x1) (MOVQloadidx1 [i0] {s} p idx mem) 38657 for { 38658 _ = v.Args[1] 38659 sh := v.Args[0] 38660 if sh.Op != OpAMD64SHLQconst { 38661 break 38662 } 38663 if sh.AuxInt != 32 { 38664 break 38665 } 38666 x1 := sh.Args[0] 38667 if x1.Op != OpAMD64MOVLloadidx1 { 38668 break 38669 } 38670 i1 := x1.AuxInt 38671 s := x1.Aux 38672 _ = x1.Args[2] 38673 p := x1.Args[0] 38674 idx := x1.Args[1] 38675 mem := x1.Args[2] 38676 x0 := v.Args[1] 38677 if x0.Op != OpAMD64MOVLloadidx1 { 38678 break 38679 } 38680 i0 := x0.AuxInt 38681 if x0.Aux != s { 38682 break 38683 } 38684 _ = x0.Args[2] 38685 if p != x0.Args[0] { 38686 break 38687 } 38688 if idx != x0.Args[1] { 38689 break 38690 } 38691 if mem != x0.Args[2] { 38692 break 38693 } 38694 if !(i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 38695 break 38696 } 38697 b = mergePoint(b, x0, x1) 38698 v0 := b.NewValue0(v.Pos, OpAMD64MOVQloadidx1, typ.UInt64) 38699 v.reset(OpCopy) 38700 v.AddArg(v0) 38701 v0.AuxInt = i0 38702 v0.Aux = s 38703 v0.AddArg(p) 38704 v0.AddArg(idx) 38705 v0.AddArg(mem) 38706 return true 38707 } 38708 return false 38709 } 38710 func rewriteValueAMD64_OpAMD64ORQ_60(v *Value) bool { 38711 b := v.Block 38712 _ = b 38713 typ := &b.Func.Config.Types 38714 _ = typ 38715 // match: (ORQ sh:(SHLQconst [32] x1:(MOVLloadidx1 [i1] {s} idx p mem)) x0:(MOVLloadidx1 [i0] {s} p idx mem)) 38716 // cond: i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 38717 // result: @mergePoint(b,x0,x1) (MOVQloadidx1 [i0] {s} p idx mem) 38718 for { 38719 _ = v.Args[1] 38720 sh := v.Args[0] 38721 if sh.Op != OpAMD64SHLQconst { 38722 break 38723 } 38724 if sh.AuxInt != 32 { 38725 break 38726 } 38727 x1 := sh.Args[0] 38728 if x1.Op != OpAMD64MOVLloadidx1 { 38729 break 38730 } 38731 i1 := x1.AuxInt 38732 s := x1.Aux 38733 _ = x1.Args[2] 38734 idx := x1.Args[0] 38735 p := x1.Args[1] 38736 mem := x1.Args[2] 38737 x0 := v.Args[1] 38738 if x0.Op != OpAMD64MOVLloadidx1 { 38739 break 38740 } 38741 i0 := x0.AuxInt 38742 if x0.Aux != s { 38743 break 38744 } 38745 _ = x0.Args[2] 38746 if p != x0.Args[0] { 38747 break 38748 } 38749 if idx != x0.Args[1] { 38750 break 38751 } 38752 if mem != x0.Args[2] { 38753 break 38754 } 38755 if !(i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 38756 break 38757 } 38758 b = mergePoint(b, x0, x1) 38759 v0 := b.NewValue0(v.Pos, OpAMD64MOVQloadidx1, typ.UInt64) 38760 v.reset(OpCopy) 38761 v.AddArg(v0) 38762 v0.AuxInt = i0 38763 v0.Aux = s 38764 v0.AddArg(p) 38765 v0.AddArg(idx) 38766 v0.AddArg(mem) 38767 return true 38768 } 38769 // match: (ORQ sh:(SHLQconst [32] x1:(MOVLloadidx1 [i1] {s} p idx mem)) x0:(MOVLloadidx1 [i0] {s} idx p mem)) 38770 // cond: i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 38771 // result: @mergePoint(b,x0,x1) (MOVQloadidx1 [i0] {s} p idx mem) 38772 for { 38773 _ = v.Args[1] 38774 sh := v.Args[0] 38775 if sh.Op != OpAMD64SHLQconst { 38776 break 38777 } 38778 if sh.AuxInt != 32 { 38779 break 38780 } 38781 x1 := sh.Args[0] 38782 if x1.Op != OpAMD64MOVLloadidx1 { 38783 break 38784 } 38785 i1 := x1.AuxInt 38786 s := x1.Aux 38787 _ = x1.Args[2] 38788 p := x1.Args[0] 38789 idx := x1.Args[1] 38790 mem := x1.Args[2] 38791 x0 := v.Args[1] 38792 if x0.Op != OpAMD64MOVLloadidx1 { 38793 break 38794 } 38795 i0 := x0.AuxInt 38796 if x0.Aux != s { 38797 break 38798 } 38799 _ = x0.Args[2] 38800 if idx != x0.Args[0] { 38801 break 38802 } 38803 if p != x0.Args[1] { 38804 break 38805 } 38806 if mem != x0.Args[2] { 38807 break 38808 } 38809 if !(i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 38810 break 38811 } 38812 b = mergePoint(b, x0, x1) 38813 v0 := b.NewValue0(v.Pos, OpAMD64MOVQloadidx1, typ.UInt64) 38814 v.reset(OpCopy) 38815 v.AddArg(v0) 38816 v0.AuxInt = i0 38817 v0.Aux = s 38818 v0.AddArg(p) 38819 v0.AddArg(idx) 38820 v0.AddArg(mem) 38821 return true 38822 } 38823 // match: (ORQ sh:(SHLQconst [32] x1:(MOVLloadidx1 [i1] {s} idx p mem)) x0:(MOVLloadidx1 [i0] {s} idx p mem)) 38824 // cond: i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 38825 // result: @mergePoint(b,x0,x1) (MOVQloadidx1 [i0] {s} p idx mem) 38826 for { 38827 _ = v.Args[1] 38828 sh := v.Args[0] 38829 if sh.Op != OpAMD64SHLQconst { 38830 break 38831 } 38832 if sh.AuxInt != 32 { 38833 break 38834 } 38835 x1 := sh.Args[0] 38836 if x1.Op != OpAMD64MOVLloadidx1 { 38837 break 38838 } 38839 i1 := x1.AuxInt 38840 s := x1.Aux 38841 _ = x1.Args[2] 38842 idx := x1.Args[0] 38843 p := x1.Args[1] 38844 mem := x1.Args[2] 38845 x0 := v.Args[1] 38846 if x0.Op != OpAMD64MOVLloadidx1 { 38847 break 38848 } 38849 i0 := x0.AuxInt 38850 if x0.Aux != s { 38851 break 38852 } 38853 _ = x0.Args[2] 38854 if idx != x0.Args[0] { 38855 break 38856 } 38857 if p != x0.Args[1] { 38858 break 38859 } 38860 if mem != x0.Args[2] { 38861 break 38862 } 38863 if !(i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 38864 break 38865 } 38866 b = mergePoint(b, x0, x1) 38867 v0 := b.NewValue0(v.Pos, OpAMD64MOVQloadidx1, typ.UInt64) 38868 v.reset(OpCopy) 38869 v.AddArg(v0) 38870 v0.AuxInt = i0 38871 v0.Aux = s 38872 v0.AddArg(p) 38873 v0.AddArg(idx) 38874 v0.AddArg(mem) 38875 return true 38876 } 38877 // match: (ORQ s1:(SHLQconst [j1] x1:(MOVBloadidx1 [i1] {s} p idx mem)) or:(ORQ s0:(SHLQconst [j0] x0:(MOVBloadidx1 [i0] {s} p idx mem)) y)) 38878 // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 38879 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j0] (MOVWloadidx1 [i0] {s} p idx mem)) y) 38880 for { 38881 _ = v.Args[1] 38882 s1 := v.Args[0] 38883 if s1.Op != OpAMD64SHLQconst { 38884 break 38885 } 38886 j1 := s1.AuxInt 38887 x1 := s1.Args[0] 38888 if x1.Op != OpAMD64MOVBloadidx1 { 38889 break 38890 } 38891 i1 := x1.AuxInt 38892 s := x1.Aux 38893 _ = x1.Args[2] 38894 p := x1.Args[0] 38895 idx := x1.Args[1] 38896 mem := x1.Args[2] 38897 or := v.Args[1] 38898 if or.Op != OpAMD64ORQ { 38899 break 38900 } 38901 _ = or.Args[1] 38902 s0 := or.Args[0] 38903 if s0.Op != OpAMD64SHLQconst { 38904 break 38905 } 38906 j0 := s0.AuxInt 38907 x0 := s0.Args[0] 38908 if x0.Op != OpAMD64MOVBloadidx1 { 38909 break 38910 } 38911 i0 := x0.AuxInt 38912 if x0.Aux != s { 38913 break 38914 } 38915 _ = x0.Args[2] 38916 if p != x0.Args[0] { 38917 break 38918 } 38919 if idx != x0.Args[1] { 38920 break 38921 } 38922 if mem != x0.Args[2] { 38923 break 38924 } 38925 y := or.Args[1] 38926 if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 38927 break 38928 } 38929 b = mergePoint(b, x0, x1) 38930 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 38931 v.reset(OpCopy) 38932 v.AddArg(v0) 38933 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 38934 v1.AuxInt = j0 38935 v2 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 38936 v2.AuxInt = i0 38937 v2.Aux = s 38938 v2.AddArg(p) 38939 v2.AddArg(idx) 38940 v2.AddArg(mem) 38941 v1.AddArg(v2) 38942 v0.AddArg(v1) 38943 v0.AddArg(y) 38944 return true 38945 } 38946 // match: (ORQ s1:(SHLQconst [j1] x1:(MOVBloadidx1 [i1] {s} idx p mem)) or:(ORQ s0:(SHLQconst [j0] x0:(MOVBloadidx1 [i0] {s} p idx mem)) y)) 38947 // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 38948 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j0] (MOVWloadidx1 [i0] {s} p idx mem)) y) 38949 for { 38950 _ = v.Args[1] 38951 s1 := v.Args[0] 38952 if s1.Op != OpAMD64SHLQconst { 38953 break 38954 } 38955 j1 := s1.AuxInt 38956 x1 := s1.Args[0] 38957 if x1.Op != OpAMD64MOVBloadidx1 { 38958 break 38959 } 38960 i1 := x1.AuxInt 38961 s := x1.Aux 38962 _ = x1.Args[2] 38963 idx := x1.Args[0] 38964 p := x1.Args[1] 38965 mem := x1.Args[2] 38966 or := v.Args[1] 38967 if or.Op != OpAMD64ORQ { 38968 break 38969 } 38970 _ = or.Args[1] 38971 s0 := or.Args[0] 38972 if s0.Op != OpAMD64SHLQconst { 38973 break 38974 } 38975 j0 := s0.AuxInt 38976 x0 := s0.Args[0] 38977 if x0.Op != OpAMD64MOVBloadidx1 { 38978 break 38979 } 38980 i0 := x0.AuxInt 38981 if x0.Aux != s { 38982 break 38983 } 38984 _ = x0.Args[2] 38985 if p != x0.Args[0] { 38986 break 38987 } 38988 if idx != x0.Args[1] { 38989 break 38990 } 38991 if mem != x0.Args[2] { 38992 break 38993 } 38994 y := or.Args[1] 38995 if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 38996 break 38997 } 38998 b = mergePoint(b, x0, x1) 38999 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 39000 v.reset(OpCopy) 39001 v.AddArg(v0) 39002 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 39003 v1.AuxInt = j0 39004 v2 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 39005 v2.AuxInt = i0 39006 v2.Aux = s 39007 v2.AddArg(p) 39008 v2.AddArg(idx) 39009 v2.AddArg(mem) 39010 v1.AddArg(v2) 39011 v0.AddArg(v1) 39012 v0.AddArg(y) 39013 return true 39014 } 39015 // match: (ORQ s1:(SHLQconst [j1] x1:(MOVBloadidx1 [i1] {s} p idx mem)) or:(ORQ s0:(SHLQconst [j0] x0:(MOVBloadidx1 [i0] {s} idx p mem)) y)) 39016 // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 39017 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j0] (MOVWloadidx1 [i0] {s} p idx mem)) y) 39018 for { 39019 _ = v.Args[1] 39020 s1 := v.Args[0] 39021 if s1.Op != OpAMD64SHLQconst { 39022 break 39023 } 39024 j1 := s1.AuxInt 39025 x1 := s1.Args[0] 39026 if x1.Op != OpAMD64MOVBloadidx1 { 39027 break 39028 } 39029 i1 := x1.AuxInt 39030 s := x1.Aux 39031 _ = x1.Args[2] 39032 p := x1.Args[0] 39033 idx := x1.Args[1] 39034 mem := x1.Args[2] 39035 or := v.Args[1] 39036 if or.Op != OpAMD64ORQ { 39037 break 39038 } 39039 _ = or.Args[1] 39040 s0 := or.Args[0] 39041 if s0.Op != OpAMD64SHLQconst { 39042 break 39043 } 39044 j0 := s0.AuxInt 39045 x0 := s0.Args[0] 39046 if x0.Op != OpAMD64MOVBloadidx1 { 39047 break 39048 } 39049 i0 := x0.AuxInt 39050 if x0.Aux != s { 39051 break 39052 } 39053 _ = x0.Args[2] 39054 if idx != x0.Args[0] { 39055 break 39056 } 39057 if p != x0.Args[1] { 39058 break 39059 } 39060 if mem != x0.Args[2] { 39061 break 39062 } 39063 y := or.Args[1] 39064 if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 39065 break 39066 } 39067 b = mergePoint(b, x0, x1) 39068 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 39069 v.reset(OpCopy) 39070 v.AddArg(v0) 39071 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 39072 v1.AuxInt = j0 39073 v2 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 39074 v2.AuxInt = i0 39075 v2.Aux = s 39076 v2.AddArg(p) 39077 v2.AddArg(idx) 39078 v2.AddArg(mem) 39079 v1.AddArg(v2) 39080 v0.AddArg(v1) 39081 v0.AddArg(y) 39082 return true 39083 } 39084 // match: (ORQ s1:(SHLQconst [j1] x1:(MOVBloadidx1 [i1] {s} idx p mem)) or:(ORQ s0:(SHLQconst [j0] x0:(MOVBloadidx1 [i0] {s} idx p mem)) y)) 39085 // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 39086 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j0] (MOVWloadidx1 [i0] {s} p idx mem)) y) 39087 for { 39088 _ = v.Args[1] 39089 s1 := v.Args[0] 39090 if s1.Op != OpAMD64SHLQconst { 39091 break 39092 } 39093 j1 := s1.AuxInt 39094 x1 := s1.Args[0] 39095 if x1.Op != OpAMD64MOVBloadidx1 { 39096 break 39097 } 39098 i1 := x1.AuxInt 39099 s := x1.Aux 39100 _ = x1.Args[2] 39101 idx := x1.Args[0] 39102 p := x1.Args[1] 39103 mem := x1.Args[2] 39104 or := v.Args[1] 39105 if or.Op != OpAMD64ORQ { 39106 break 39107 } 39108 _ = or.Args[1] 39109 s0 := or.Args[0] 39110 if s0.Op != OpAMD64SHLQconst { 39111 break 39112 } 39113 j0 := s0.AuxInt 39114 x0 := s0.Args[0] 39115 if x0.Op != OpAMD64MOVBloadidx1 { 39116 break 39117 } 39118 i0 := x0.AuxInt 39119 if x0.Aux != s { 39120 break 39121 } 39122 _ = x0.Args[2] 39123 if idx != x0.Args[0] { 39124 break 39125 } 39126 if p != x0.Args[1] { 39127 break 39128 } 39129 if mem != x0.Args[2] { 39130 break 39131 } 39132 y := or.Args[1] 39133 if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 39134 break 39135 } 39136 b = mergePoint(b, x0, x1) 39137 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 39138 v.reset(OpCopy) 39139 v.AddArg(v0) 39140 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 39141 v1.AuxInt = j0 39142 v2 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 39143 v2.AuxInt = i0 39144 v2.Aux = s 39145 v2.AddArg(p) 39146 v2.AddArg(idx) 39147 v2.AddArg(mem) 39148 v1.AddArg(v2) 39149 v0.AddArg(v1) 39150 v0.AddArg(y) 39151 return true 39152 } 39153 // match: (ORQ s1:(SHLQconst [j1] x1:(MOVBloadidx1 [i1] {s} p idx mem)) or:(ORQ y s0:(SHLQconst [j0] x0:(MOVBloadidx1 [i0] {s} p idx mem)))) 39154 // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 39155 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j0] (MOVWloadidx1 [i0] {s} p idx mem)) y) 39156 for { 39157 _ = v.Args[1] 39158 s1 := v.Args[0] 39159 if s1.Op != OpAMD64SHLQconst { 39160 break 39161 } 39162 j1 := s1.AuxInt 39163 x1 := s1.Args[0] 39164 if x1.Op != OpAMD64MOVBloadidx1 { 39165 break 39166 } 39167 i1 := x1.AuxInt 39168 s := x1.Aux 39169 _ = x1.Args[2] 39170 p := x1.Args[0] 39171 idx := x1.Args[1] 39172 mem := x1.Args[2] 39173 or := v.Args[1] 39174 if or.Op != OpAMD64ORQ { 39175 break 39176 } 39177 _ = or.Args[1] 39178 y := or.Args[0] 39179 s0 := or.Args[1] 39180 if s0.Op != OpAMD64SHLQconst { 39181 break 39182 } 39183 j0 := s0.AuxInt 39184 x0 := s0.Args[0] 39185 if x0.Op != OpAMD64MOVBloadidx1 { 39186 break 39187 } 39188 i0 := x0.AuxInt 39189 if x0.Aux != s { 39190 break 39191 } 39192 _ = x0.Args[2] 39193 if p != x0.Args[0] { 39194 break 39195 } 39196 if idx != x0.Args[1] { 39197 break 39198 } 39199 if mem != x0.Args[2] { 39200 break 39201 } 39202 if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 39203 break 39204 } 39205 b = mergePoint(b, x0, x1) 39206 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 39207 v.reset(OpCopy) 39208 v.AddArg(v0) 39209 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 39210 v1.AuxInt = j0 39211 v2 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 39212 v2.AuxInt = i0 39213 v2.Aux = s 39214 v2.AddArg(p) 39215 v2.AddArg(idx) 39216 v2.AddArg(mem) 39217 v1.AddArg(v2) 39218 v0.AddArg(v1) 39219 v0.AddArg(y) 39220 return true 39221 } 39222 // match: (ORQ s1:(SHLQconst [j1] x1:(MOVBloadidx1 [i1] {s} idx p mem)) or:(ORQ y s0:(SHLQconst [j0] x0:(MOVBloadidx1 [i0] {s} p idx mem)))) 39223 // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 39224 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j0] (MOVWloadidx1 [i0] {s} p idx mem)) y) 39225 for { 39226 _ = v.Args[1] 39227 s1 := v.Args[0] 39228 if s1.Op != OpAMD64SHLQconst { 39229 break 39230 } 39231 j1 := s1.AuxInt 39232 x1 := s1.Args[0] 39233 if x1.Op != OpAMD64MOVBloadidx1 { 39234 break 39235 } 39236 i1 := x1.AuxInt 39237 s := x1.Aux 39238 _ = x1.Args[2] 39239 idx := x1.Args[0] 39240 p := x1.Args[1] 39241 mem := x1.Args[2] 39242 or := v.Args[1] 39243 if or.Op != OpAMD64ORQ { 39244 break 39245 } 39246 _ = or.Args[1] 39247 y := or.Args[0] 39248 s0 := or.Args[1] 39249 if s0.Op != OpAMD64SHLQconst { 39250 break 39251 } 39252 j0 := s0.AuxInt 39253 x0 := s0.Args[0] 39254 if x0.Op != OpAMD64MOVBloadidx1 { 39255 break 39256 } 39257 i0 := x0.AuxInt 39258 if x0.Aux != s { 39259 break 39260 } 39261 _ = x0.Args[2] 39262 if p != x0.Args[0] { 39263 break 39264 } 39265 if idx != x0.Args[1] { 39266 break 39267 } 39268 if mem != x0.Args[2] { 39269 break 39270 } 39271 if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 39272 break 39273 } 39274 b = mergePoint(b, x0, x1) 39275 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 39276 v.reset(OpCopy) 39277 v.AddArg(v0) 39278 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 39279 v1.AuxInt = j0 39280 v2 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 39281 v2.AuxInt = i0 39282 v2.Aux = s 39283 v2.AddArg(p) 39284 v2.AddArg(idx) 39285 v2.AddArg(mem) 39286 v1.AddArg(v2) 39287 v0.AddArg(v1) 39288 v0.AddArg(y) 39289 return true 39290 } 39291 // match: (ORQ s1:(SHLQconst [j1] x1:(MOVBloadidx1 [i1] {s} p idx mem)) or:(ORQ y s0:(SHLQconst [j0] x0:(MOVBloadidx1 [i0] {s} idx p mem)))) 39292 // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 39293 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j0] (MOVWloadidx1 [i0] {s} p idx mem)) y) 39294 for { 39295 _ = v.Args[1] 39296 s1 := v.Args[0] 39297 if s1.Op != OpAMD64SHLQconst { 39298 break 39299 } 39300 j1 := s1.AuxInt 39301 x1 := s1.Args[0] 39302 if x1.Op != OpAMD64MOVBloadidx1 { 39303 break 39304 } 39305 i1 := x1.AuxInt 39306 s := x1.Aux 39307 _ = x1.Args[2] 39308 p := x1.Args[0] 39309 idx := x1.Args[1] 39310 mem := x1.Args[2] 39311 or := v.Args[1] 39312 if or.Op != OpAMD64ORQ { 39313 break 39314 } 39315 _ = or.Args[1] 39316 y := or.Args[0] 39317 s0 := or.Args[1] 39318 if s0.Op != OpAMD64SHLQconst { 39319 break 39320 } 39321 j0 := s0.AuxInt 39322 x0 := s0.Args[0] 39323 if x0.Op != OpAMD64MOVBloadidx1 { 39324 break 39325 } 39326 i0 := x0.AuxInt 39327 if x0.Aux != s { 39328 break 39329 } 39330 _ = x0.Args[2] 39331 if idx != x0.Args[0] { 39332 break 39333 } 39334 if p != x0.Args[1] { 39335 break 39336 } 39337 if mem != x0.Args[2] { 39338 break 39339 } 39340 if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 39341 break 39342 } 39343 b = mergePoint(b, x0, x1) 39344 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 39345 v.reset(OpCopy) 39346 v.AddArg(v0) 39347 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 39348 v1.AuxInt = j0 39349 v2 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 39350 v2.AuxInt = i0 39351 v2.Aux = s 39352 v2.AddArg(p) 39353 v2.AddArg(idx) 39354 v2.AddArg(mem) 39355 v1.AddArg(v2) 39356 v0.AddArg(v1) 39357 v0.AddArg(y) 39358 return true 39359 } 39360 return false 39361 } 39362 func rewriteValueAMD64_OpAMD64ORQ_70(v *Value) bool { 39363 b := v.Block 39364 _ = b 39365 typ := &b.Func.Config.Types 39366 _ = typ 39367 // match: (ORQ s1:(SHLQconst [j1] x1:(MOVBloadidx1 [i1] {s} idx p mem)) or:(ORQ y s0:(SHLQconst [j0] x0:(MOVBloadidx1 [i0] {s} idx p mem)))) 39368 // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 39369 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j0] (MOVWloadidx1 [i0] {s} p idx mem)) y) 39370 for { 39371 _ = v.Args[1] 39372 s1 := v.Args[0] 39373 if s1.Op != OpAMD64SHLQconst { 39374 break 39375 } 39376 j1 := s1.AuxInt 39377 x1 := s1.Args[0] 39378 if x1.Op != OpAMD64MOVBloadidx1 { 39379 break 39380 } 39381 i1 := x1.AuxInt 39382 s := x1.Aux 39383 _ = x1.Args[2] 39384 idx := x1.Args[0] 39385 p := x1.Args[1] 39386 mem := x1.Args[2] 39387 or := v.Args[1] 39388 if or.Op != OpAMD64ORQ { 39389 break 39390 } 39391 _ = or.Args[1] 39392 y := or.Args[0] 39393 s0 := or.Args[1] 39394 if s0.Op != OpAMD64SHLQconst { 39395 break 39396 } 39397 j0 := s0.AuxInt 39398 x0 := s0.Args[0] 39399 if x0.Op != OpAMD64MOVBloadidx1 { 39400 break 39401 } 39402 i0 := x0.AuxInt 39403 if x0.Aux != s { 39404 break 39405 } 39406 _ = x0.Args[2] 39407 if idx != x0.Args[0] { 39408 break 39409 } 39410 if p != x0.Args[1] { 39411 break 39412 } 39413 if mem != x0.Args[2] { 39414 break 39415 } 39416 if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 39417 break 39418 } 39419 b = mergePoint(b, x0, x1) 39420 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 39421 v.reset(OpCopy) 39422 v.AddArg(v0) 39423 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 39424 v1.AuxInt = j0 39425 v2 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 39426 v2.AuxInt = i0 39427 v2.Aux = s 39428 v2.AddArg(p) 39429 v2.AddArg(idx) 39430 v2.AddArg(mem) 39431 v1.AddArg(v2) 39432 v0.AddArg(v1) 39433 v0.AddArg(y) 39434 return true 39435 } 39436 // match: (ORQ or:(ORQ s0:(SHLQconst [j0] x0:(MOVBloadidx1 [i0] {s} p idx mem)) y) s1:(SHLQconst [j1] x1:(MOVBloadidx1 [i1] {s} p idx mem))) 39437 // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 39438 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j0] (MOVWloadidx1 [i0] {s} p idx mem)) y) 39439 for { 39440 _ = v.Args[1] 39441 or := v.Args[0] 39442 if or.Op != OpAMD64ORQ { 39443 break 39444 } 39445 _ = or.Args[1] 39446 s0 := or.Args[0] 39447 if s0.Op != OpAMD64SHLQconst { 39448 break 39449 } 39450 j0 := s0.AuxInt 39451 x0 := s0.Args[0] 39452 if x0.Op != OpAMD64MOVBloadidx1 { 39453 break 39454 } 39455 i0 := x0.AuxInt 39456 s := x0.Aux 39457 _ = x0.Args[2] 39458 p := x0.Args[0] 39459 idx := x0.Args[1] 39460 mem := x0.Args[2] 39461 y := or.Args[1] 39462 s1 := v.Args[1] 39463 if s1.Op != OpAMD64SHLQconst { 39464 break 39465 } 39466 j1 := s1.AuxInt 39467 x1 := s1.Args[0] 39468 if x1.Op != OpAMD64MOVBloadidx1 { 39469 break 39470 } 39471 i1 := x1.AuxInt 39472 if x1.Aux != s { 39473 break 39474 } 39475 _ = x1.Args[2] 39476 if p != x1.Args[0] { 39477 break 39478 } 39479 if idx != x1.Args[1] { 39480 break 39481 } 39482 if mem != x1.Args[2] { 39483 break 39484 } 39485 if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 39486 break 39487 } 39488 b = mergePoint(b, x0, x1) 39489 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 39490 v.reset(OpCopy) 39491 v.AddArg(v0) 39492 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 39493 v1.AuxInt = j0 39494 v2 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 39495 v2.AuxInt = i0 39496 v2.Aux = s 39497 v2.AddArg(p) 39498 v2.AddArg(idx) 39499 v2.AddArg(mem) 39500 v1.AddArg(v2) 39501 v0.AddArg(v1) 39502 v0.AddArg(y) 39503 return true 39504 } 39505 // match: (ORQ or:(ORQ s0:(SHLQconst [j0] x0:(MOVBloadidx1 [i0] {s} idx p mem)) y) s1:(SHLQconst [j1] x1:(MOVBloadidx1 [i1] {s} p idx mem))) 39506 // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 39507 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j0] (MOVWloadidx1 [i0] {s} p idx mem)) y) 39508 for { 39509 _ = v.Args[1] 39510 or := v.Args[0] 39511 if or.Op != OpAMD64ORQ { 39512 break 39513 } 39514 _ = or.Args[1] 39515 s0 := or.Args[0] 39516 if s0.Op != OpAMD64SHLQconst { 39517 break 39518 } 39519 j0 := s0.AuxInt 39520 x0 := s0.Args[0] 39521 if x0.Op != OpAMD64MOVBloadidx1 { 39522 break 39523 } 39524 i0 := x0.AuxInt 39525 s := x0.Aux 39526 _ = x0.Args[2] 39527 idx := x0.Args[0] 39528 p := x0.Args[1] 39529 mem := x0.Args[2] 39530 y := or.Args[1] 39531 s1 := v.Args[1] 39532 if s1.Op != OpAMD64SHLQconst { 39533 break 39534 } 39535 j1 := s1.AuxInt 39536 x1 := s1.Args[0] 39537 if x1.Op != OpAMD64MOVBloadidx1 { 39538 break 39539 } 39540 i1 := x1.AuxInt 39541 if x1.Aux != s { 39542 break 39543 } 39544 _ = x1.Args[2] 39545 if p != x1.Args[0] { 39546 break 39547 } 39548 if idx != x1.Args[1] { 39549 break 39550 } 39551 if mem != x1.Args[2] { 39552 break 39553 } 39554 if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 39555 break 39556 } 39557 b = mergePoint(b, x0, x1) 39558 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 39559 v.reset(OpCopy) 39560 v.AddArg(v0) 39561 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 39562 v1.AuxInt = j0 39563 v2 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 39564 v2.AuxInt = i0 39565 v2.Aux = s 39566 v2.AddArg(p) 39567 v2.AddArg(idx) 39568 v2.AddArg(mem) 39569 v1.AddArg(v2) 39570 v0.AddArg(v1) 39571 v0.AddArg(y) 39572 return true 39573 } 39574 // match: (ORQ or:(ORQ y s0:(SHLQconst [j0] x0:(MOVBloadidx1 [i0] {s} p idx mem))) s1:(SHLQconst [j1] x1:(MOVBloadidx1 [i1] {s} p idx mem))) 39575 // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 39576 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j0] (MOVWloadidx1 [i0] {s} p idx mem)) y) 39577 for { 39578 _ = v.Args[1] 39579 or := v.Args[0] 39580 if or.Op != OpAMD64ORQ { 39581 break 39582 } 39583 _ = or.Args[1] 39584 y := or.Args[0] 39585 s0 := or.Args[1] 39586 if s0.Op != OpAMD64SHLQconst { 39587 break 39588 } 39589 j0 := s0.AuxInt 39590 x0 := s0.Args[0] 39591 if x0.Op != OpAMD64MOVBloadidx1 { 39592 break 39593 } 39594 i0 := x0.AuxInt 39595 s := x0.Aux 39596 _ = x0.Args[2] 39597 p := x0.Args[0] 39598 idx := x0.Args[1] 39599 mem := x0.Args[2] 39600 s1 := v.Args[1] 39601 if s1.Op != OpAMD64SHLQconst { 39602 break 39603 } 39604 j1 := s1.AuxInt 39605 x1 := s1.Args[0] 39606 if x1.Op != OpAMD64MOVBloadidx1 { 39607 break 39608 } 39609 i1 := x1.AuxInt 39610 if x1.Aux != s { 39611 break 39612 } 39613 _ = x1.Args[2] 39614 if p != x1.Args[0] { 39615 break 39616 } 39617 if idx != x1.Args[1] { 39618 break 39619 } 39620 if mem != x1.Args[2] { 39621 break 39622 } 39623 if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 39624 break 39625 } 39626 b = mergePoint(b, x0, x1) 39627 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 39628 v.reset(OpCopy) 39629 v.AddArg(v0) 39630 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 39631 v1.AuxInt = j0 39632 v2 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 39633 v2.AuxInt = i0 39634 v2.Aux = s 39635 v2.AddArg(p) 39636 v2.AddArg(idx) 39637 v2.AddArg(mem) 39638 v1.AddArg(v2) 39639 v0.AddArg(v1) 39640 v0.AddArg(y) 39641 return true 39642 } 39643 // match: (ORQ or:(ORQ y s0:(SHLQconst [j0] x0:(MOVBloadidx1 [i0] {s} idx p mem))) s1:(SHLQconst [j1] x1:(MOVBloadidx1 [i1] {s} p idx mem))) 39644 // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 39645 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j0] (MOVWloadidx1 [i0] {s} p idx mem)) y) 39646 for { 39647 _ = v.Args[1] 39648 or := v.Args[0] 39649 if or.Op != OpAMD64ORQ { 39650 break 39651 } 39652 _ = or.Args[1] 39653 y := or.Args[0] 39654 s0 := or.Args[1] 39655 if s0.Op != OpAMD64SHLQconst { 39656 break 39657 } 39658 j0 := s0.AuxInt 39659 x0 := s0.Args[0] 39660 if x0.Op != OpAMD64MOVBloadidx1 { 39661 break 39662 } 39663 i0 := x0.AuxInt 39664 s := x0.Aux 39665 _ = x0.Args[2] 39666 idx := x0.Args[0] 39667 p := x0.Args[1] 39668 mem := x0.Args[2] 39669 s1 := v.Args[1] 39670 if s1.Op != OpAMD64SHLQconst { 39671 break 39672 } 39673 j1 := s1.AuxInt 39674 x1 := s1.Args[0] 39675 if x1.Op != OpAMD64MOVBloadidx1 { 39676 break 39677 } 39678 i1 := x1.AuxInt 39679 if x1.Aux != s { 39680 break 39681 } 39682 _ = x1.Args[2] 39683 if p != x1.Args[0] { 39684 break 39685 } 39686 if idx != x1.Args[1] { 39687 break 39688 } 39689 if mem != x1.Args[2] { 39690 break 39691 } 39692 if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 39693 break 39694 } 39695 b = mergePoint(b, x0, x1) 39696 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 39697 v.reset(OpCopy) 39698 v.AddArg(v0) 39699 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 39700 v1.AuxInt = j0 39701 v2 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 39702 v2.AuxInt = i0 39703 v2.Aux = s 39704 v2.AddArg(p) 39705 v2.AddArg(idx) 39706 v2.AddArg(mem) 39707 v1.AddArg(v2) 39708 v0.AddArg(v1) 39709 v0.AddArg(y) 39710 return true 39711 } 39712 // match: (ORQ or:(ORQ s0:(SHLQconst [j0] x0:(MOVBloadidx1 [i0] {s} p idx mem)) y) s1:(SHLQconst [j1] x1:(MOVBloadidx1 [i1] {s} idx p mem))) 39713 // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 39714 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j0] (MOVWloadidx1 [i0] {s} p idx mem)) y) 39715 for { 39716 _ = v.Args[1] 39717 or := v.Args[0] 39718 if or.Op != OpAMD64ORQ { 39719 break 39720 } 39721 _ = or.Args[1] 39722 s0 := or.Args[0] 39723 if s0.Op != OpAMD64SHLQconst { 39724 break 39725 } 39726 j0 := s0.AuxInt 39727 x0 := s0.Args[0] 39728 if x0.Op != OpAMD64MOVBloadidx1 { 39729 break 39730 } 39731 i0 := x0.AuxInt 39732 s := x0.Aux 39733 _ = x0.Args[2] 39734 p := x0.Args[0] 39735 idx := x0.Args[1] 39736 mem := x0.Args[2] 39737 y := or.Args[1] 39738 s1 := v.Args[1] 39739 if s1.Op != OpAMD64SHLQconst { 39740 break 39741 } 39742 j1 := s1.AuxInt 39743 x1 := s1.Args[0] 39744 if x1.Op != OpAMD64MOVBloadidx1 { 39745 break 39746 } 39747 i1 := x1.AuxInt 39748 if x1.Aux != s { 39749 break 39750 } 39751 _ = x1.Args[2] 39752 if idx != x1.Args[0] { 39753 break 39754 } 39755 if p != x1.Args[1] { 39756 break 39757 } 39758 if mem != x1.Args[2] { 39759 break 39760 } 39761 if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 39762 break 39763 } 39764 b = mergePoint(b, x0, x1) 39765 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 39766 v.reset(OpCopy) 39767 v.AddArg(v0) 39768 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 39769 v1.AuxInt = j0 39770 v2 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 39771 v2.AuxInt = i0 39772 v2.Aux = s 39773 v2.AddArg(p) 39774 v2.AddArg(idx) 39775 v2.AddArg(mem) 39776 v1.AddArg(v2) 39777 v0.AddArg(v1) 39778 v0.AddArg(y) 39779 return true 39780 } 39781 // match: (ORQ or:(ORQ s0:(SHLQconst [j0] x0:(MOVBloadidx1 [i0] {s} idx p mem)) y) s1:(SHLQconst [j1] x1:(MOVBloadidx1 [i1] {s} idx p mem))) 39782 // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 39783 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j0] (MOVWloadidx1 [i0] {s} p idx mem)) y) 39784 for { 39785 _ = v.Args[1] 39786 or := v.Args[0] 39787 if or.Op != OpAMD64ORQ { 39788 break 39789 } 39790 _ = or.Args[1] 39791 s0 := or.Args[0] 39792 if s0.Op != OpAMD64SHLQconst { 39793 break 39794 } 39795 j0 := s0.AuxInt 39796 x0 := s0.Args[0] 39797 if x0.Op != OpAMD64MOVBloadidx1 { 39798 break 39799 } 39800 i0 := x0.AuxInt 39801 s := x0.Aux 39802 _ = x0.Args[2] 39803 idx := x0.Args[0] 39804 p := x0.Args[1] 39805 mem := x0.Args[2] 39806 y := or.Args[1] 39807 s1 := v.Args[1] 39808 if s1.Op != OpAMD64SHLQconst { 39809 break 39810 } 39811 j1 := s1.AuxInt 39812 x1 := s1.Args[0] 39813 if x1.Op != OpAMD64MOVBloadidx1 { 39814 break 39815 } 39816 i1 := x1.AuxInt 39817 if x1.Aux != s { 39818 break 39819 } 39820 _ = x1.Args[2] 39821 if idx != x1.Args[0] { 39822 break 39823 } 39824 if p != x1.Args[1] { 39825 break 39826 } 39827 if mem != x1.Args[2] { 39828 break 39829 } 39830 if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 39831 break 39832 } 39833 b = mergePoint(b, x0, x1) 39834 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 39835 v.reset(OpCopy) 39836 v.AddArg(v0) 39837 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 39838 v1.AuxInt = j0 39839 v2 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 39840 v2.AuxInt = i0 39841 v2.Aux = s 39842 v2.AddArg(p) 39843 v2.AddArg(idx) 39844 v2.AddArg(mem) 39845 v1.AddArg(v2) 39846 v0.AddArg(v1) 39847 v0.AddArg(y) 39848 return true 39849 } 39850 // match: (ORQ or:(ORQ y s0:(SHLQconst [j0] x0:(MOVBloadidx1 [i0] {s} p idx mem))) s1:(SHLQconst [j1] x1:(MOVBloadidx1 [i1] {s} idx p mem))) 39851 // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 39852 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j0] (MOVWloadidx1 [i0] {s} p idx mem)) y) 39853 for { 39854 _ = v.Args[1] 39855 or := v.Args[0] 39856 if or.Op != OpAMD64ORQ { 39857 break 39858 } 39859 _ = or.Args[1] 39860 y := or.Args[0] 39861 s0 := or.Args[1] 39862 if s0.Op != OpAMD64SHLQconst { 39863 break 39864 } 39865 j0 := s0.AuxInt 39866 x0 := s0.Args[0] 39867 if x0.Op != OpAMD64MOVBloadidx1 { 39868 break 39869 } 39870 i0 := x0.AuxInt 39871 s := x0.Aux 39872 _ = x0.Args[2] 39873 p := x0.Args[0] 39874 idx := x0.Args[1] 39875 mem := x0.Args[2] 39876 s1 := v.Args[1] 39877 if s1.Op != OpAMD64SHLQconst { 39878 break 39879 } 39880 j1 := s1.AuxInt 39881 x1 := s1.Args[0] 39882 if x1.Op != OpAMD64MOVBloadidx1 { 39883 break 39884 } 39885 i1 := x1.AuxInt 39886 if x1.Aux != s { 39887 break 39888 } 39889 _ = x1.Args[2] 39890 if idx != x1.Args[0] { 39891 break 39892 } 39893 if p != x1.Args[1] { 39894 break 39895 } 39896 if mem != x1.Args[2] { 39897 break 39898 } 39899 if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 39900 break 39901 } 39902 b = mergePoint(b, x0, x1) 39903 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 39904 v.reset(OpCopy) 39905 v.AddArg(v0) 39906 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 39907 v1.AuxInt = j0 39908 v2 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 39909 v2.AuxInt = i0 39910 v2.Aux = s 39911 v2.AddArg(p) 39912 v2.AddArg(idx) 39913 v2.AddArg(mem) 39914 v1.AddArg(v2) 39915 v0.AddArg(v1) 39916 v0.AddArg(y) 39917 return true 39918 } 39919 // match: (ORQ or:(ORQ y s0:(SHLQconst [j0] x0:(MOVBloadidx1 [i0] {s} idx p mem))) s1:(SHLQconst [j1] x1:(MOVBloadidx1 [i1] {s} idx p mem))) 39920 // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 39921 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j0] (MOVWloadidx1 [i0] {s} p idx mem)) y) 39922 for { 39923 _ = v.Args[1] 39924 or := v.Args[0] 39925 if or.Op != OpAMD64ORQ { 39926 break 39927 } 39928 _ = or.Args[1] 39929 y := or.Args[0] 39930 s0 := or.Args[1] 39931 if s0.Op != OpAMD64SHLQconst { 39932 break 39933 } 39934 j0 := s0.AuxInt 39935 x0 := s0.Args[0] 39936 if x0.Op != OpAMD64MOVBloadidx1 { 39937 break 39938 } 39939 i0 := x0.AuxInt 39940 s := x0.Aux 39941 _ = x0.Args[2] 39942 idx := x0.Args[0] 39943 p := x0.Args[1] 39944 mem := x0.Args[2] 39945 s1 := v.Args[1] 39946 if s1.Op != OpAMD64SHLQconst { 39947 break 39948 } 39949 j1 := s1.AuxInt 39950 x1 := s1.Args[0] 39951 if x1.Op != OpAMD64MOVBloadidx1 { 39952 break 39953 } 39954 i1 := x1.AuxInt 39955 if x1.Aux != s { 39956 break 39957 } 39958 _ = x1.Args[2] 39959 if idx != x1.Args[0] { 39960 break 39961 } 39962 if p != x1.Args[1] { 39963 break 39964 } 39965 if mem != x1.Args[2] { 39966 break 39967 } 39968 if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 39969 break 39970 } 39971 b = mergePoint(b, x0, x1) 39972 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 39973 v.reset(OpCopy) 39974 v.AddArg(v0) 39975 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 39976 v1.AuxInt = j0 39977 v2 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 39978 v2.AuxInt = i0 39979 v2.Aux = s 39980 v2.AddArg(p) 39981 v2.AddArg(idx) 39982 v2.AddArg(mem) 39983 v1.AddArg(v2) 39984 v0.AddArg(v1) 39985 v0.AddArg(y) 39986 return true 39987 } 39988 // match: (ORQ s1:(SHLQconst [j1] x1:(MOVWloadidx1 [i1] {s} p idx mem)) or:(ORQ s0:(SHLQconst [j0] x0:(MOVWloadidx1 [i0] {s} p idx mem)) y)) 39989 // cond: i1 == i0+2 && j1 == j0+16 && j0 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 39990 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j0] (MOVLloadidx1 [i0] {s} p idx mem)) y) 39991 for { 39992 _ = v.Args[1] 39993 s1 := v.Args[0] 39994 if s1.Op != OpAMD64SHLQconst { 39995 break 39996 } 39997 j1 := s1.AuxInt 39998 x1 := s1.Args[0] 39999 if x1.Op != OpAMD64MOVWloadidx1 { 40000 break 40001 } 40002 i1 := x1.AuxInt 40003 s := x1.Aux 40004 _ = x1.Args[2] 40005 p := x1.Args[0] 40006 idx := x1.Args[1] 40007 mem := x1.Args[2] 40008 or := v.Args[1] 40009 if or.Op != OpAMD64ORQ { 40010 break 40011 } 40012 _ = or.Args[1] 40013 s0 := or.Args[0] 40014 if s0.Op != OpAMD64SHLQconst { 40015 break 40016 } 40017 j0 := s0.AuxInt 40018 x0 := s0.Args[0] 40019 if x0.Op != OpAMD64MOVWloadidx1 { 40020 break 40021 } 40022 i0 := x0.AuxInt 40023 if x0.Aux != s { 40024 break 40025 } 40026 _ = x0.Args[2] 40027 if p != x0.Args[0] { 40028 break 40029 } 40030 if idx != x0.Args[1] { 40031 break 40032 } 40033 if mem != x0.Args[2] { 40034 break 40035 } 40036 y := or.Args[1] 40037 if !(i1 == i0+2 && j1 == j0+16 && j0%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 40038 break 40039 } 40040 b = mergePoint(b, x0, x1) 40041 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 40042 v.reset(OpCopy) 40043 v.AddArg(v0) 40044 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 40045 v1.AuxInt = j0 40046 v2 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 40047 v2.AuxInt = i0 40048 v2.Aux = s 40049 v2.AddArg(p) 40050 v2.AddArg(idx) 40051 v2.AddArg(mem) 40052 v1.AddArg(v2) 40053 v0.AddArg(v1) 40054 v0.AddArg(y) 40055 return true 40056 } 40057 return false 40058 } 40059 func rewriteValueAMD64_OpAMD64ORQ_80(v *Value) bool { 40060 b := v.Block 40061 _ = b 40062 typ := &b.Func.Config.Types 40063 _ = typ 40064 // match: (ORQ s1:(SHLQconst [j1] x1:(MOVWloadidx1 [i1] {s} idx p mem)) or:(ORQ s0:(SHLQconst [j0] x0:(MOVWloadidx1 [i0] {s} p idx mem)) y)) 40065 // cond: i1 == i0+2 && j1 == j0+16 && j0 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 40066 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j0] (MOVLloadidx1 [i0] {s} p idx mem)) y) 40067 for { 40068 _ = v.Args[1] 40069 s1 := v.Args[0] 40070 if s1.Op != OpAMD64SHLQconst { 40071 break 40072 } 40073 j1 := s1.AuxInt 40074 x1 := s1.Args[0] 40075 if x1.Op != OpAMD64MOVWloadidx1 { 40076 break 40077 } 40078 i1 := x1.AuxInt 40079 s := x1.Aux 40080 _ = x1.Args[2] 40081 idx := x1.Args[0] 40082 p := x1.Args[1] 40083 mem := x1.Args[2] 40084 or := v.Args[1] 40085 if or.Op != OpAMD64ORQ { 40086 break 40087 } 40088 _ = or.Args[1] 40089 s0 := or.Args[0] 40090 if s0.Op != OpAMD64SHLQconst { 40091 break 40092 } 40093 j0 := s0.AuxInt 40094 x0 := s0.Args[0] 40095 if x0.Op != OpAMD64MOVWloadidx1 { 40096 break 40097 } 40098 i0 := x0.AuxInt 40099 if x0.Aux != s { 40100 break 40101 } 40102 _ = x0.Args[2] 40103 if p != x0.Args[0] { 40104 break 40105 } 40106 if idx != x0.Args[1] { 40107 break 40108 } 40109 if mem != x0.Args[2] { 40110 break 40111 } 40112 y := or.Args[1] 40113 if !(i1 == i0+2 && j1 == j0+16 && j0%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 40114 break 40115 } 40116 b = mergePoint(b, x0, x1) 40117 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 40118 v.reset(OpCopy) 40119 v.AddArg(v0) 40120 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 40121 v1.AuxInt = j0 40122 v2 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 40123 v2.AuxInt = i0 40124 v2.Aux = s 40125 v2.AddArg(p) 40126 v2.AddArg(idx) 40127 v2.AddArg(mem) 40128 v1.AddArg(v2) 40129 v0.AddArg(v1) 40130 v0.AddArg(y) 40131 return true 40132 } 40133 // match: (ORQ s1:(SHLQconst [j1] x1:(MOVWloadidx1 [i1] {s} p idx mem)) or:(ORQ s0:(SHLQconst [j0] x0:(MOVWloadidx1 [i0] {s} idx p mem)) y)) 40134 // cond: i1 == i0+2 && j1 == j0+16 && j0 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 40135 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j0] (MOVLloadidx1 [i0] {s} p idx mem)) y) 40136 for { 40137 _ = v.Args[1] 40138 s1 := v.Args[0] 40139 if s1.Op != OpAMD64SHLQconst { 40140 break 40141 } 40142 j1 := s1.AuxInt 40143 x1 := s1.Args[0] 40144 if x1.Op != OpAMD64MOVWloadidx1 { 40145 break 40146 } 40147 i1 := x1.AuxInt 40148 s := x1.Aux 40149 _ = x1.Args[2] 40150 p := x1.Args[0] 40151 idx := x1.Args[1] 40152 mem := x1.Args[2] 40153 or := v.Args[1] 40154 if or.Op != OpAMD64ORQ { 40155 break 40156 } 40157 _ = or.Args[1] 40158 s0 := or.Args[0] 40159 if s0.Op != OpAMD64SHLQconst { 40160 break 40161 } 40162 j0 := s0.AuxInt 40163 x0 := s0.Args[0] 40164 if x0.Op != OpAMD64MOVWloadidx1 { 40165 break 40166 } 40167 i0 := x0.AuxInt 40168 if x0.Aux != s { 40169 break 40170 } 40171 _ = x0.Args[2] 40172 if idx != x0.Args[0] { 40173 break 40174 } 40175 if p != x0.Args[1] { 40176 break 40177 } 40178 if mem != x0.Args[2] { 40179 break 40180 } 40181 y := or.Args[1] 40182 if !(i1 == i0+2 && j1 == j0+16 && j0%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 40183 break 40184 } 40185 b = mergePoint(b, x0, x1) 40186 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 40187 v.reset(OpCopy) 40188 v.AddArg(v0) 40189 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 40190 v1.AuxInt = j0 40191 v2 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 40192 v2.AuxInt = i0 40193 v2.Aux = s 40194 v2.AddArg(p) 40195 v2.AddArg(idx) 40196 v2.AddArg(mem) 40197 v1.AddArg(v2) 40198 v0.AddArg(v1) 40199 v0.AddArg(y) 40200 return true 40201 } 40202 // match: (ORQ s1:(SHLQconst [j1] x1:(MOVWloadidx1 [i1] {s} idx p mem)) or:(ORQ s0:(SHLQconst [j0] x0:(MOVWloadidx1 [i0] {s} idx p mem)) y)) 40203 // cond: i1 == i0+2 && j1 == j0+16 && j0 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 40204 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j0] (MOVLloadidx1 [i0] {s} p idx mem)) y) 40205 for { 40206 _ = v.Args[1] 40207 s1 := v.Args[0] 40208 if s1.Op != OpAMD64SHLQconst { 40209 break 40210 } 40211 j1 := s1.AuxInt 40212 x1 := s1.Args[0] 40213 if x1.Op != OpAMD64MOVWloadidx1 { 40214 break 40215 } 40216 i1 := x1.AuxInt 40217 s := x1.Aux 40218 _ = x1.Args[2] 40219 idx := x1.Args[0] 40220 p := x1.Args[1] 40221 mem := x1.Args[2] 40222 or := v.Args[1] 40223 if or.Op != OpAMD64ORQ { 40224 break 40225 } 40226 _ = or.Args[1] 40227 s0 := or.Args[0] 40228 if s0.Op != OpAMD64SHLQconst { 40229 break 40230 } 40231 j0 := s0.AuxInt 40232 x0 := s0.Args[0] 40233 if x0.Op != OpAMD64MOVWloadidx1 { 40234 break 40235 } 40236 i0 := x0.AuxInt 40237 if x0.Aux != s { 40238 break 40239 } 40240 _ = x0.Args[2] 40241 if idx != x0.Args[0] { 40242 break 40243 } 40244 if p != x0.Args[1] { 40245 break 40246 } 40247 if mem != x0.Args[2] { 40248 break 40249 } 40250 y := or.Args[1] 40251 if !(i1 == i0+2 && j1 == j0+16 && j0%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 40252 break 40253 } 40254 b = mergePoint(b, x0, x1) 40255 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 40256 v.reset(OpCopy) 40257 v.AddArg(v0) 40258 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 40259 v1.AuxInt = j0 40260 v2 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 40261 v2.AuxInt = i0 40262 v2.Aux = s 40263 v2.AddArg(p) 40264 v2.AddArg(idx) 40265 v2.AddArg(mem) 40266 v1.AddArg(v2) 40267 v0.AddArg(v1) 40268 v0.AddArg(y) 40269 return true 40270 } 40271 // match: (ORQ s1:(SHLQconst [j1] x1:(MOVWloadidx1 [i1] {s} p idx mem)) or:(ORQ y s0:(SHLQconst [j0] x0:(MOVWloadidx1 [i0] {s} p idx mem)))) 40272 // cond: i1 == i0+2 && j1 == j0+16 && j0 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 40273 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j0] (MOVLloadidx1 [i0] {s} p idx mem)) y) 40274 for { 40275 _ = v.Args[1] 40276 s1 := v.Args[0] 40277 if s1.Op != OpAMD64SHLQconst { 40278 break 40279 } 40280 j1 := s1.AuxInt 40281 x1 := s1.Args[0] 40282 if x1.Op != OpAMD64MOVWloadidx1 { 40283 break 40284 } 40285 i1 := x1.AuxInt 40286 s := x1.Aux 40287 _ = x1.Args[2] 40288 p := x1.Args[0] 40289 idx := x1.Args[1] 40290 mem := x1.Args[2] 40291 or := v.Args[1] 40292 if or.Op != OpAMD64ORQ { 40293 break 40294 } 40295 _ = or.Args[1] 40296 y := or.Args[0] 40297 s0 := or.Args[1] 40298 if s0.Op != OpAMD64SHLQconst { 40299 break 40300 } 40301 j0 := s0.AuxInt 40302 x0 := s0.Args[0] 40303 if x0.Op != OpAMD64MOVWloadidx1 { 40304 break 40305 } 40306 i0 := x0.AuxInt 40307 if x0.Aux != s { 40308 break 40309 } 40310 _ = x0.Args[2] 40311 if p != x0.Args[0] { 40312 break 40313 } 40314 if idx != x0.Args[1] { 40315 break 40316 } 40317 if mem != x0.Args[2] { 40318 break 40319 } 40320 if !(i1 == i0+2 && j1 == j0+16 && j0%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 40321 break 40322 } 40323 b = mergePoint(b, x0, x1) 40324 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 40325 v.reset(OpCopy) 40326 v.AddArg(v0) 40327 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 40328 v1.AuxInt = j0 40329 v2 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 40330 v2.AuxInt = i0 40331 v2.Aux = s 40332 v2.AddArg(p) 40333 v2.AddArg(idx) 40334 v2.AddArg(mem) 40335 v1.AddArg(v2) 40336 v0.AddArg(v1) 40337 v0.AddArg(y) 40338 return true 40339 } 40340 // match: (ORQ s1:(SHLQconst [j1] x1:(MOVWloadidx1 [i1] {s} idx p mem)) or:(ORQ y s0:(SHLQconst [j0] x0:(MOVWloadidx1 [i0] {s} p idx mem)))) 40341 // cond: i1 == i0+2 && j1 == j0+16 && j0 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 40342 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j0] (MOVLloadidx1 [i0] {s} p idx mem)) y) 40343 for { 40344 _ = v.Args[1] 40345 s1 := v.Args[0] 40346 if s1.Op != OpAMD64SHLQconst { 40347 break 40348 } 40349 j1 := s1.AuxInt 40350 x1 := s1.Args[0] 40351 if x1.Op != OpAMD64MOVWloadidx1 { 40352 break 40353 } 40354 i1 := x1.AuxInt 40355 s := x1.Aux 40356 _ = x1.Args[2] 40357 idx := x1.Args[0] 40358 p := x1.Args[1] 40359 mem := x1.Args[2] 40360 or := v.Args[1] 40361 if or.Op != OpAMD64ORQ { 40362 break 40363 } 40364 _ = or.Args[1] 40365 y := or.Args[0] 40366 s0 := or.Args[1] 40367 if s0.Op != OpAMD64SHLQconst { 40368 break 40369 } 40370 j0 := s0.AuxInt 40371 x0 := s0.Args[0] 40372 if x0.Op != OpAMD64MOVWloadidx1 { 40373 break 40374 } 40375 i0 := x0.AuxInt 40376 if x0.Aux != s { 40377 break 40378 } 40379 _ = x0.Args[2] 40380 if p != x0.Args[0] { 40381 break 40382 } 40383 if idx != x0.Args[1] { 40384 break 40385 } 40386 if mem != x0.Args[2] { 40387 break 40388 } 40389 if !(i1 == i0+2 && j1 == j0+16 && j0%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 40390 break 40391 } 40392 b = mergePoint(b, x0, x1) 40393 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 40394 v.reset(OpCopy) 40395 v.AddArg(v0) 40396 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 40397 v1.AuxInt = j0 40398 v2 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 40399 v2.AuxInt = i0 40400 v2.Aux = s 40401 v2.AddArg(p) 40402 v2.AddArg(idx) 40403 v2.AddArg(mem) 40404 v1.AddArg(v2) 40405 v0.AddArg(v1) 40406 v0.AddArg(y) 40407 return true 40408 } 40409 // match: (ORQ s1:(SHLQconst [j1] x1:(MOVWloadidx1 [i1] {s} p idx mem)) or:(ORQ y s0:(SHLQconst [j0] x0:(MOVWloadidx1 [i0] {s} idx p mem)))) 40410 // cond: i1 == i0+2 && j1 == j0+16 && j0 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 40411 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j0] (MOVLloadidx1 [i0] {s} p idx mem)) y) 40412 for { 40413 _ = v.Args[1] 40414 s1 := v.Args[0] 40415 if s1.Op != OpAMD64SHLQconst { 40416 break 40417 } 40418 j1 := s1.AuxInt 40419 x1 := s1.Args[0] 40420 if x1.Op != OpAMD64MOVWloadidx1 { 40421 break 40422 } 40423 i1 := x1.AuxInt 40424 s := x1.Aux 40425 _ = x1.Args[2] 40426 p := x1.Args[0] 40427 idx := x1.Args[1] 40428 mem := x1.Args[2] 40429 or := v.Args[1] 40430 if or.Op != OpAMD64ORQ { 40431 break 40432 } 40433 _ = or.Args[1] 40434 y := or.Args[0] 40435 s0 := or.Args[1] 40436 if s0.Op != OpAMD64SHLQconst { 40437 break 40438 } 40439 j0 := s0.AuxInt 40440 x0 := s0.Args[0] 40441 if x0.Op != OpAMD64MOVWloadidx1 { 40442 break 40443 } 40444 i0 := x0.AuxInt 40445 if x0.Aux != s { 40446 break 40447 } 40448 _ = x0.Args[2] 40449 if idx != x0.Args[0] { 40450 break 40451 } 40452 if p != x0.Args[1] { 40453 break 40454 } 40455 if mem != x0.Args[2] { 40456 break 40457 } 40458 if !(i1 == i0+2 && j1 == j0+16 && j0%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 40459 break 40460 } 40461 b = mergePoint(b, x0, x1) 40462 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 40463 v.reset(OpCopy) 40464 v.AddArg(v0) 40465 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 40466 v1.AuxInt = j0 40467 v2 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 40468 v2.AuxInt = i0 40469 v2.Aux = s 40470 v2.AddArg(p) 40471 v2.AddArg(idx) 40472 v2.AddArg(mem) 40473 v1.AddArg(v2) 40474 v0.AddArg(v1) 40475 v0.AddArg(y) 40476 return true 40477 } 40478 // match: (ORQ s1:(SHLQconst [j1] x1:(MOVWloadidx1 [i1] {s} idx p mem)) or:(ORQ y s0:(SHLQconst [j0] x0:(MOVWloadidx1 [i0] {s} idx p mem)))) 40479 // cond: i1 == i0+2 && j1 == j0+16 && j0 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 40480 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j0] (MOVLloadidx1 [i0] {s} p idx mem)) y) 40481 for { 40482 _ = v.Args[1] 40483 s1 := v.Args[0] 40484 if s1.Op != OpAMD64SHLQconst { 40485 break 40486 } 40487 j1 := s1.AuxInt 40488 x1 := s1.Args[0] 40489 if x1.Op != OpAMD64MOVWloadidx1 { 40490 break 40491 } 40492 i1 := x1.AuxInt 40493 s := x1.Aux 40494 _ = x1.Args[2] 40495 idx := x1.Args[0] 40496 p := x1.Args[1] 40497 mem := x1.Args[2] 40498 or := v.Args[1] 40499 if or.Op != OpAMD64ORQ { 40500 break 40501 } 40502 _ = or.Args[1] 40503 y := or.Args[0] 40504 s0 := or.Args[1] 40505 if s0.Op != OpAMD64SHLQconst { 40506 break 40507 } 40508 j0 := s0.AuxInt 40509 x0 := s0.Args[0] 40510 if x0.Op != OpAMD64MOVWloadidx1 { 40511 break 40512 } 40513 i0 := x0.AuxInt 40514 if x0.Aux != s { 40515 break 40516 } 40517 _ = x0.Args[2] 40518 if idx != x0.Args[0] { 40519 break 40520 } 40521 if p != x0.Args[1] { 40522 break 40523 } 40524 if mem != x0.Args[2] { 40525 break 40526 } 40527 if !(i1 == i0+2 && j1 == j0+16 && j0%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 40528 break 40529 } 40530 b = mergePoint(b, x0, x1) 40531 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 40532 v.reset(OpCopy) 40533 v.AddArg(v0) 40534 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 40535 v1.AuxInt = j0 40536 v2 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 40537 v2.AuxInt = i0 40538 v2.Aux = s 40539 v2.AddArg(p) 40540 v2.AddArg(idx) 40541 v2.AddArg(mem) 40542 v1.AddArg(v2) 40543 v0.AddArg(v1) 40544 v0.AddArg(y) 40545 return true 40546 } 40547 // match: (ORQ or:(ORQ s0:(SHLQconst [j0] x0:(MOVWloadidx1 [i0] {s} p idx mem)) y) s1:(SHLQconst [j1] x1:(MOVWloadidx1 [i1] {s} p idx mem))) 40548 // cond: i1 == i0+2 && j1 == j0+16 && j0 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 40549 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j0] (MOVLloadidx1 [i0] {s} p idx mem)) y) 40550 for { 40551 _ = v.Args[1] 40552 or := v.Args[0] 40553 if or.Op != OpAMD64ORQ { 40554 break 40555 } 40556 _ = or.Args[1] 40557 s0 := or.Args[0] 40558 if s0.Op != OpAMD64SHLQconst { 40559 break 40560 } 40561 j0 := s0.AuxInt 40562 x0 := s0.Args[0] 40563 if x0.Op != OpAMD64MOVWloadidx1 { 40564 break 40565 } 40566 i0 := x0.AuxInt 40567 s := x0.Aux 40568 _ = x0.Args[2] 40569 p := x0.Args[0] 40570 idx := x0.Args[1] 40571 mem := x0.Args[2] 40572 y := or.Args[1] 40573 s1 := v.Args[1] 40574 if s1.Op != OpAMD64SHLQconst { 40575 break 40576 } 40577 j1 := s1.AuxInt 40578 x1 := s1.Args[0] 40579 if x1.Op != OpAMD64MOVWloadidx1 { 40580 break 40581 } 40582 i1 := x1.AuxInt 40583 if x1.Aux != s { 40584 break 40585 } 40586 _ = x1.Args[2] 40587 if p != x1.Args[0] { 40588 break 40589 } 40590 if idx != x1.Args[1] { 40591 break 40592 } 40593 if mem != x1.Args[2] { 40594 break 40595 } 40596 if !(i1 == i0+2 && j1 == j0+16 && j0%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 40597 break 40598 } 40599 b = mergePoint(b, x0, x1) 40600 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 40601 v.reset(OpCopy) 40602 v.AddArg(v0) 40603 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 40604 v1.AuxInt = j0 40605 v2 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 40606 v2.AuxInt = i0 40607 v2.Aux = s 40608 v2.AddArg(p) 40609 v2.AddArg(idx) 40610 v2.AddArg(mem) 40611 v1.AddArg(v2) 40612 v0.AddArg(v1) 40613 v0.AddArg(y) 40614 return true 40615 } 40616 // match: (ORQ or:(ORQ s0:(SHLQconst [j0] x0:(MOVWloadidx1 [i0] {s} idx p mem)) y) s1:(SHLQconst [j1] x1:(MOVWloadidx1 [i1] {s} p idx mem))) 40617 // cond: i1 == i0+2 && j1 == j0+16 && j0 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 40618 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j0] (MOVLloadidx1 [i0] {s} p idx mem)) y) 40619 for { 40620 _ = v.Args[1] 40621 or := v.Args[0] 40622 if or.Op != OpAMD64ORQ { 40623 break 40624 } 40625 _ = or.Args[1] 40626 s0 := or.Args[0] 40627 if s0.Op != OpAMD64SHLQconst { 40628 break 40629 } 40630 j0 := s0.AuxInt 40631 x0 := s0.Args[0] 40632 if x0.Op != OpAMD64MOVWloadidx1 { 40633 break 40634 } 40635 i0 := x0.AuxInt 40636 s := x0.Aux 40637 _ = x0.Args[2] 40638 idx := x0.Args[0] 40639 p := x0.Args[1] 40640 mem := x0.Args[2] 40641 y := or.Args[1] 40642 s1 := v.Args[1] 40643 if s1.Op != OpAMD64SHLQconst { 40644 break 40645 } 40646 j1 := s1.AuxInt 40647 x1 := s1.Args[0] 40648 if x1.Op != OpAMD64MOVWloadidx1 { 40649 break 40650 } 40651 i1 := x1.AuxInt 40652 if x1.Aux != s { 40653 break 40654 } 40655 _ = x1.Args[2] 40656 if p != x1.Args[0] { 40657 break 40658 } 40659 if idx != x1.Args[1] { 40660 break 40661 } 40662 if mem != x1.Args[2] { 40663 break 40664 } 40665 if !(i1 == i0+2 && j1 == j0+16 && j0%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 40666 break 40667 } 40668 b = mergePoint(b, x0, x1) 40669 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 40670 v.reset(OpCopy) 40671 v.AddArg(v0) 40672 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 40673 v1.AuxInt = j0 40674 v2 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 40675 v2.AuxInt = i0 40676 v2.Aux = s 40677 v2.AddArg(p) 40678 v2.AddArg(idx) 40679 v2.AddArg(mem) 40680 v1.AddArg(v2) 40681 v0.AddArg(v1) 40682 v0.AddArg(y) 40683 return true 40684 } 40685 // match: (ORQ or:(ORQ y s0:(SHLQconst [j0] x0:(MOVWloadidx1 [i0] {s} p idx mem))) s1:(SHLQconst [j1] x1:(MOVWloadidx1 [i1] {s} p idx mem))) 40686 // cond: i1 == i0+2 && j1 == j0+16 && j0 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 40687 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j0] (MOVLloadidx1 [i0] {s} p idx mem)) y) 40688 for { 40689 _ = v.Args[1] 40690 or := v.Args[0] 40691 if or.Op != OpAMD64ORQ { 40692 break 40693 } 40694 _ = or.Args[1] 40695 y := or.Args[0] 40696 s0 := or.Args[1] 40697 if s0.Op != OpAMD64SHLQconst { 40698 break 40699 } 40700 j0 := s0.AuxInt 40701 x0 := s0.Args[0] 40702 if x0.Op != OpAMD64MOVWloadidx1 { 40703 break 40704 } 40705 i0 := x0.AuxInt 40706 s := x0.Aux 40707 _ = x0.Args[2] 40708 p := x0.Args[0] 40709 idx := x0.Args[1] 40710 mem := x0.Args[2] 40711 s1 := v.Args[1] 40712 if s1.Op != OpAMD64SHLQconst { 40713 break 40714 } 40715 j1 := s1.AuxInt 40716 x1 := s1.Args[0] 40717 if x1.Op != OpAMD64MOVWloadidx1 { 40718 break 40719 } 40720 i1 := x1.AuxInt 40721 if x1.Aux != s { 40722 break 40723 } 40724 _ = x1.Args[2] 40725 if p != x1.Args[0] { 40726 break 40727 } 40728 if idx != x1.Args[1] { 40729 break 40730 } 40731 if mem != x1.Args[2] { 40732 break 40733 } 40734 if !(i1 == i0+2 && j1 == j0+16 && j0%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 40735 break 40736 } 40737 b = mergePoint(b, x0, x1) 40738 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 40739 v.reset(OpCopy) 40740 v.AddArg(v0) 40741 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 40742 v1.AuxInt = j0 40743 v2 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 40744 v2.AuxInt = i0 40745 v2.Aux = s 40746 v2.AddArg(p) 40747 v2.AddArg(idx) 40748 v2.AddArg(mem) 40749 v1.AddArg(v2) 40750 v0.AddArg(v1) 40751 v0.AddArg(y) 40752 return true 40753 } 40754 return false 40755 } 40756 func rewriteValueAMD64_OpAMD64ORQ_90(v *Value) bool { 40757 b := v.Block 40758 _ = b 40759 typ := &b.Func.Config.Types 40760 _ = typ 40761 // match: (ORQ or:(ORQ y s0:(SHLQconst [j0] x0:(MOVWloadidx1 [i0] {s} idx p mem))) s1:(SHLQconst [j1] x1:(MOVWloadidx1 [i1] {s} p idx mem))) 40762 // cond: i1 == i0+2 && j1 == j0+16 && j0 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 40763 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j0] (MOVLloadidx1 [i0] {s} p idx mem)) y) 40764 for { 40765 _ = v.Args[1] 40766 or := v.Args[0] 40767 if or.Op != OpAMD64ORQ { 40768 break 40769 } 40770 _ = or.Args[1] 40771 y := or.Args[0] 40772 s0 := or.Args[1] 40773 if s0.Op != OpAMD64SHLQconst { 40774 break 40775 } 40776 j0 := s0.AuxInt 40777 x0 := s0.Args[0] 40778 if x0.Op != OpAMD64MOVWloadidx1 { 40779 break 40780 } 40781 i0 := x0.AuxInt 40782 s := x0.Aux 40783 _ = x0.Args[2] 40784 idx := x0.Args[0] 40785 p := x0.Args[1] 40786 mem := x0.Args[2] 40787 s1 := v.Args[1] 40788 if s1.Op != OpAMD64SHLQconst { 40789 break 40790 } 40791 j1 := s1.AuxInt 40792 x1 := s1.Args[0] 40793 if x1.Op != OpAMD64MOVWloadidx1 { 40794 break 40795 } 40796 i1 := x1.AuxInt 40797 if x1.Aux != s { 40798 break 40799 } 40800 _ = x1.Args[2] 40801 if p != x1.Args[0] { 40802 break 40803 } 40804 if idx != x1.Args[1] { 40805 break 40806 } 40807 if mem != x1.Args[2] { 40808 break 40809 } 40810 if !(i1 == i0+2 && j1 == j0+16 && j0%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 40811 break 40812 } 40813 b = mergePoint(b, x0, x1) 40814 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 40815 v.reset(OpCopy) 40816 v.AddArg(v0) 40817 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 40818 v1.AuxInt = j0 40819 v2 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 40820 v2.AuxInt = i0 40821 v2.Aux = s 40822 v2.AddArg(p) 40823 v2.AddArg(idx) 40824 v2.AddArg(mem) 40825 v1.AddArg(v2) 40826 v0.AddArg(v1) 40827 v0.AddArg(y) 40828 return true 40829 } 40830 // match: (ORQ or:(ORQ s0:(SHLQconst [j0] x0:(MOVWloadidx1 [i0] {s} p idx mem)) y) s1:(SHLQconst [j1] x1:(MOVWloadidx1 [i1] {s} idx p mem))) 40831 // cond: i1 == i0+2 && j1 == j0+16 && j0 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 40832 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j0] (MOVLloadidx1 [i0] {s} p idx mem)) y) 40833 for { 40834 _ = v.Args[1] 40835 or := v.Args[0] 40836 if or.Op != OpAMD64ORQ { 40837 break 40838 } 40839 _ = or.Args[1] 40840 s0 := or.Args[0] 40841 if s0.Op != OpAMD64SHLQconst { 40842 break 40843 } 40844 j0 := s0.AuxInt 40845 x0 := s0.Args[0] 40846 if x0.Op != OpAMD64MOVWloadidx1 { 40847 break 40848 } 40849 i0 := x0.AuxInt 40850 s := x0.Aux 40851 _ = x0.Args[2] 40852 p := x0.Args[0] 40853 idx := x0.Args[1] 40854 mem := x0.Args[2] 40855 y := or.Args[1] 40856 s1 := v.Args[1] 40857 if s1.Op != OpAMD64SHLQconst { 40858 break 40859 } 40860 j1 := s1.AuxInt 40861 x1 := s1.Args[0] 40862 if x1.Op != OpAMD64MOVWloadidx1 { 40863 break 40864 } 40865 i1 := x1.AuxInt 40866 if x1.Aux != s { 40867 break 40868 } 40869 _ = x1.Args[2] 40870 if idx != x1.Args[0] { 40871 break 40872 } 40873 if p != x1.Args[1] { 40874 break 40875 } 40876 if mem != x1.Args[2] { 40877 break 40878 } 40879 if !(i1 == i0+2 && j1 == j0+16 && j0%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 40880 break 40881 } 40882 b = mergePoint(b, x0, x1) 40883 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 40884 v.reset(OpCopy) 40885 v.AddArg(v0) 40886 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 40887 v1.AuxInt = j0 40888 v2 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 40889 v2.AuxInt = i0 40890 v2.Aux = s 40891 v2.AddArg(p) 40892 v2.AddArg(idx) 40893 v2.AddArg(mem) 40894 v1.AddArg(v2) 40895 v0.AddArg(v1) 40896 v0.AddArg(y) 40897 return true 40898 } 40899 // match: (ORQ or:(ORQ s0:(SHLQconst [j0] x0:(MOVWloadidx1 [i0] {s} idx p mem)) y) s1:(SHLQconst [j1] x1:(MOVWloadidx1 [i1] {s} idx p mem))) 40900 // cond: i1 == i0+2 && j1 == j0+16 && j0 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 40901 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j0] (MOVLloadidx1 [i0] {s} p idx mem)) y) 40902 for { 40903 _ = v.Args[1] 40904 or := v.Args[0] 40905 if or.Op != OpAMD64ORQ { 40906 break 40907 } 40908 _ = or.Args[1] 40909 s0 := or.Args[0] 40910 if s0.Op != OpAMD64SHLQconst { 40911 break 40912 } 40913 j0 := s0.AuxInt 40914 x0 := s0.Args[0] 40915 if x0.Op != OpAMD64MOVWloadidx1 { 40916 break 40917 } 40918 i0 := x0.AuxInt 40919 s := x0.Aux 40920 _ = x0.Args[2] 40921 idx := x0.Args[0] 40922 p := x0.Args[1] 40923 mem := x0.Args[2] 40924 y := or.Args[1] 40925 s1 := v.Args[1] 40926 if s1.Op != OpAMD64SHLQconst { 40927 break 40928 } 40929 j1 := s1.AuxInt 40930 x1 := s1.Args[0] 40931 if x1.Op != OpAMD64MOVWloadidx1 { 40932 break 40933 } 40934 i1 := x1.AuxInt 40935 if x1.Aux != s { 40936 break 40937 } 40938 _ = x1.Args[2] 40939 if idx != x1.Args[0] { 40940 break 40941 } 40942 if p != x1.Args[1] { 40943 break 40944 } 40945 if mem != x1.Args[2] { 40946 break 40947 } 40948 if !(i1 == i0+2 && j1 == j0+16 && j0%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 40949 break 40950 } 40951 b = mergePoint(b, x0, x1) 40952 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 40953 v.reset(OpCopy) 40954 v.AddArg(v0) 40955 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 40956 v1.AuxInt = j0 40957 v2 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 40958 v2.AuxInt = i0 40959 v2.Aux = s 40960 v2.AddArg(p) 40961 v2.AddArg(idx) 40962 v2.AddArg(mem) 40963 v1.AddArg(v2) 40964 v0.AddArg(v1) 40965 v0.AddArg(y) 40966 return true 40967 } 40968 // match: (ORQ or:(ORQ y s0:(SHLQconst [j0] x0:(MOVWloadidx1 [i0] {s} p idx mem))) s1:(SHLQconst [j1] x1:(MOVWloadidx1 [i1] {s} idx p mem))) 40969 // cond: i1 == i0+2 && j1 == j0+16 && j0 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 40970 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j0] (MOVLloadidx1 [i0] {s} p idx mem)) y) 40971 for { 40972 _ = v.Args[1] 40973 or := v.Args[0] 40974 if or.Op != OpAMD64ORQ { 40975 break 40976 } 40977 _ = or.Args[1] 40978 y := or.Args[0] 40979 s0 := or.Args[1] 40980 if s0.Op != OpAMD64SHLQconst { 40981 break 40982 } 40983 j0 := s0.AuxInt 40984 x0 := s0.Args[0] 40985 if x0.Op != OpAMD64MOVWloadidx1 { 40986 break 40987 } 40988 i0 := x0.AuxInt 40989 s := x0.Aux 40990 _ = x0.Args[2] 40991 p := x0.Args[0] 40992 idx := x0.Args[1] 40993 mem := x0.Args[2] 40994 s1 := v.Args[1] 40995 if s1.Op != OpAMD64SHLQconst { 40996 break 40997 } 40998 j1 := s1.AuxInt 40999 x1 := s1.Args[0] 41000 if x1.Op != OpAMD64MOVWloadidx1 { 41001 break 41002 } 41003 i1 := x1.AuxInt 41004 if x1.Aux != s { 41005 break 41006 } 41007 _ = x1.Args[2] 41008 if idx != x1.Args[0] { 41009 break 41010 } 41011 if p != x1.Args[1] { 41012 break 41013 } 41014 if mem != x1.Args[2] { 41015 break 41016 } 41017 if !(i1 == i0+2 && j1 == j0+16 && j0%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 41018 break 41019 } 41020 b = mergePoint(b, x0, x1) 41021 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 41022 v.reset(OpCopy) 41023 v.AddArg(v0) 41024 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 41025 v1.AuxInt = j0 41026 v2 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 41027 v2.AuxInt = i0 41028 v2.Aux = s 41029 v2.AddArg(p) 41030 v2.AddArg(idx) 41031 v2.AddArg(mem) 41032 v1.AddArg(v2) 41033 v0.AddArg(v1) 41034 v0.AddArg(y) 41035 return true 41036 } 41037 // match: (ORQ or:(ORQ y s0:(SHLQconst [j0] x0:(MOVWloadidx1 [i0] {s} idx p mem))) s1:(SHLQconst [j1] x1:(MOVWloadidx1 [i1] {s} idx p mem))) 41038 // cond: i1 == i0+2 && j1 == j0+16 && j0 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 41039 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j0] (MOVLloadidx1 [i0] {s} p idx mem)) y) 41040 for { 41041 _ = v.Args[1] 41042 or := v.Args[0] 41043 if or.Op != OpAMD64ORQ { 41044 break 41045 } 41046 _ = or.Args[1] 41047 y := or.Args[0] 41048 s0 := or.Args[1] 41049 if s0.Op != OpAMD64SHLQconst { 41050 break 41051 } 41052 j0 := s0.AuxInt 41053 x0 := s0.Args[0] 41054 if x0.Op != OpAMD64MOVWloadidx1 { 41055 break 41056 } 41057 i0 := x0.AuxInt 41058 s := x0.Aux 41059 _ = x0.Args[2] 41060 idx := x0.Args[0] 41061 p := x0.Args[1] 41062 mem := x0.Args[2] 41063 s1 := v.Args[1] 41064 if s1.Op != OpAMD64SHLQconst { 41065 break 41066 } 41067 j1 := s1.AuxInt 41068 x1 := s1.Args[0] 41069 if x1.Op != OpAMD64MOVWloadidx1 { 41070 break 41071 } 41072 i1 := x1.AuxInt 41073 if x1.Aux != s { 41074 break 41075 } 41076 _ = x1.Args[2] 41077 if idx != x1.Args[0] { 41078 break 41079 } 41080 if p != x1.Args[1] { 41081 break 41082 } 41083 if mem != x1.Args[2] { 41084 break 41085 } 41086 if !(i1 == i0+2 && j1 == j0+16 && j0%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 41087 break 41088 } 41089 b = mergePoint(b, x0, x1) 41090 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 41091 v.reset(OpCopy) 41092 v.AddArg(v0) 41093 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 41094 v1.AuxInt = j0 41095 v2 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 41096 v2.AuxInt = i0 41097 v2.Aux = s 41098 v2.AddArg(p) 41099 v2.AddArg(idx) 41100 v2.AddArg(mem) 41101 v1.AddArg(v2) 41102 v0.AddArg(v1) 41103 v0.AddArg(y) 41104 return true 41105 } 41106 // match: (ORQ x1:(MOVBload [i1] {s} p mem) sh:(SHLQconst [8] x0:(MOVBload [i0] {s} p mem))) 41107 // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 41108 // result: @mergePoint(b,x0,x1) (ROLWconst <v.Type> [8] (MOVWload [i0] {s} p mem)) 41109 for { 41110 _ = v.Args[1] 41111 x1 := v.Args[0] 41112 if x1.Op != OpAMD64MOVBload { 41113 break 41114 } 41115 i1 := x1.AuxInt 41116 s := x1.Aux 41117 _ = x1.Args[1] 41118 p := x1.Args[0] 41119 mem := x1.Args[1] 41120 sh := v.Args[1] 41121 if sh.Op != OpAMD64SHLQconst { 41122 break 41123 } 41124 if sh.AuxInt != 8 { 41125 break 41126 } 41127 x0 := sh.Args[0] 41128 if x0.Op != OpAMD64MOVBload { 41129 break 41130 } 41131 i0 := x0.AuxInt 41132 if x0.Aux != s { 41133 break 41134 } 41135 _ = x0.Args[1] 41136 if p != x0.Args[0] { 41137 break 41138 } 41139 if mem != x0.Args[1] { 41140 break 41141 } 41142 if !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 41143 break 41144 } 41145 b = mergePoint(b, x0, x1) 41146 v0 := b.NewValue0(x0.Pos, OpAMD64ROLWconst, v.Type) 41147 v.reset(OpCopy) 41148 v.AddArg(v0) 41149 v0.AuxInt = 8 41150 v1 := b.NewValue0(x0.Pos, OpAMD64MOVWload, typ.UInt16) 41151 v1.AuxInt = i0 41152 v1.Aux = s 41153 v1.AddArg(p) 41154 v1.AddArg(mem) 41155 v0.AddArg(v1) 41156 return true 41157 } 41158 // match: (ORQ sh:(SHLQconst [8] x0:(MOVBload [i0] {s} p mem)) x1:(MOVBload [i1] {s} p mem)) 41159 // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 41160 // result: @mergePoint(b,x0,x1) (ROLWconst <v.Type> [8] (MOVWload [i0] {s} p mem)) 41161 for { 41162 _ = v.Args[1] 41163 sh := v.Args[0] 41164 if sh.Op != OpAMD64SHLQconst { 41165 break 41166 } 41167 if sh.AuxInt != 8 { 41168 break 41169 } 41170 x0 := sh.Args[0] 41171 if x0.Op != OpAMD64MOVBload { 41172 break 41173 } 41174 i0 := x0.AuxInt 41175 s := x0.Aux 41176 _ = x0.Args[1] 41177 p := x0.Args[0] 41178 mem := x0.Args[1] 41179 x1 := v.Args[1] 41180 if x1.Op != OpAMD64MOVBload { 41181 break 41182 } 41183 i1 := x1.AuxInt 41184 if x1.Aux != s { 41185 break 41186 } 41187 _ = x1.Args[1] 41188 if p != x1.Args[0] { 41189 break 41190 } 41191 if mem != x1.Args[1] { 41192 break 41193 } 41194 if !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 41195 break 41196 } 41197 b = mergePoint(b, x0, x1) 41198 v0 := b.NewValue0(x1.Pos, OpAMD64ROLWconst, v.Type) 41199 v.reset(OpCopy) 41200 v.AddArg(v0) 41201 v0.AuxInt = 8 41202 v1 := b.NewValue0(x1.Pos, OpAMD64MOVWload, typ.UInt16) 41203 v1.AuxInt = i0 41204 v1.Aux = s 41205 v1.AddArg(p) 41206 v1.AddArg(mem) 41207 v0.AddArg(v1) 41208 return true 41209 } 41210 // match: (ORQ r1:(ROLWconst [8] x1:(MOVWload [i1] {s} p mem)) sh:(SHLQconst [16] r0:(ROLWconst [8] x0:(MOVWload [i0] {s} p mem)))) 41211 // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh) 41212 // result: @mergePoint(b,x0,x1) (BSWAPL <v.Type> (MOVLload [i0] {s} p mem)) 41213 for { 41214 _ = v.Args[1] 41215 r1 := v.Args[0] 41216 if r1.Op != OpAMD64ROLWconst { 41217 break 41218 } 41219 if r1.AuxInt != 8 { 41220 break 41221 } 41222 x1 := r1.Args[0] 41223 if x1.Op != OpAMD64MOVWload { 41224 break 41225 } 41226 i1 := x1.AuxInt 41227 s := x1.Aux 41228 _ = x1.Args[1] 41229 p := x1.Args[0] 41230 mem := x1.Args[1] 41231 sh := v.Args[1] 41232 if sh.Op != OpAMD64SHLQconst { 41233 break 41234 } 41235 if sh.AuxInt != 16 { 41236 break 41237 } 41238 r0 := sh.Args[0] 41239 if r0.Op != OpAMD64ROLWconst { 41240 break 41241 } 41242 if r0.AuxInt != 8 { 41243 break 41244 } 41245 x0 := r0.Args[0] 41246 if x0.Op != OpAMD64MOVWload { 41247 break 41248 } 41249 i0 := x0.AuxInt 41250 if x0.Aux != s { 41251 break 41252 } 41253 _ = x0.Args[1] 41254 if p != x0.Args[0] { 41255 break 41256 } 41257 if mem != x0.Args[1] { 41258 break 41259 } 41260 if !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh)) { 41261 break 41262 } 41263 b = mergePoint(b, x0, x1) 41264 v0 := b.NewValue0(x0.Pos, OpAMD64BSWAPL, v.Type) 41265 v.reset(OpCopy) 41266 v.AddArg(v0) 41267 v1 := b.NewValue0(x0.Pos, OpAMD64MOVLload, typ.UInt32) 41268 v1.AuxInt = i0 41269 v1.Aux = s 41270 v1.AddArg(p) 41271 v1.AddArg(mem) 41272 v0.AddArg(v1) 41273 return true 41274 } 41275 // match: (ORQ sh:(SHLQconst [16] r0:(ROLWconst [8] x0:(MOVWload [i0] {s} p mem))) r1:(ROLWconst [8] x1:(MOVWload [i1] {s} p mem))) 41276 // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh) 41277 // result: @mergePoint(b,x0,x1) (BSWAPL <v.Type> (MOVLload [i0] {s} p mem)) 41278 for { 41279 _ = v.Args[1] 41280 sh := v.Args[0] 41281 if sh.Op != OpAMD64SHLQconst { 41282 break 41283 } 41284 if sh.AuxInt != 16 { 41285 break 41286 } 41287 r0 := sh.Args[0] 41288 if r0.Op != OpAMD64ROLWconst { 41289 break 41290 } 41291 if r0.AuxInt != 8 { 41292 break 41293 } 41294 x0 := r0.Args[0] 41295 if x0.Op != OpAMD64MOVWload { 41296 break 41297 } 41298 i0 := x0.AuxInt 41299 s := x0.Aux 41300 _ = x0.Args[1] 41301 p := x0.Args[0] 41302 mem := x0.Args[1] 41303 r1 := v.Args[1] 41304 if r1.Op != OpAMD64ROLWconst { 41305 break 41306 } 41307 if r1.AuxInt != 8 { 41308 break 41309 } 41310 x1 := r1.Args[0] 41311 if x1.Op != OpAMD64MOVWload { 41312 break 41313 } 41314 i1 := x1.AuxInt 41315 if x1.Aux != s { 41316 break 41317 } 41318 _ = x1.Args[1] 41319 if p != x1.Args[0] { 41320 break 41321 } 41322 if mem != x1.Args[1] { 41323 break 41324 } 41325 if !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh)) { 41326 break 41327 } 41328 b = mergePoint(b, x0, x1) 41329 v0 := b.NewValue0(x1.Pos, OpAMD64BSWAPL, v.Type) 41330 v.reset(OpCopy) 41331 v.AddArg(v0) 41332 v1 := b.NewValue0(x1.Pos, OpAMD64MOVLload, typ.UInt32) 41333 v1.AuxInt = i0 41334 v1.Aux = s 41335 v1.AddArg(p) 41336 v1.AddArg(mem) 41337 v0.AddArg(v1) 41338 return true 41339 } 41340 // match: (ORQ r1:(BSWAPL x1:(MOVLload [i1] {s} p mem)) sh:(SHLQconst [32] r0:(BSWAPL x0:(MOVLload [i0] {s} p mem)))) 41341 // cond: i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh) 41342 // result: @mergePoint(b,x0,x1) (BSWAPQ <v.Type> (MOVQload [i0] {s} p mem)) 41343 for { 41344 _ = v.Args[1] 41345 r1 := v.Args[0] 41346 if r1.Op != OpAMD64BSWAPL { 41347 break 41348 } 41349 x1 := r1.Args[0] 41350 if x1.Op != OpAMD64MOVLload { 41351 break 41352 } 41353 i1 := x1.AuxInt 41354 s := x1.Aux 41355 _ = x1.Args[1] 41356 p := x1.Args[0] 41357 mem := x1.Args[1] 41358 sh := v.Args[1] 41359 if sh.Op != OpAMD64SHLQconst { 41360 break 41361 } 41362 if sh.AuxInt != 32 { 41363 break 41364 } 41365 r0 := sh.Args[0] 41366 if r0.Op != OpAMD64BSWAPL { 41367 break 41368 } 41369 x0 := r0.Args[0] 41370 if x0.Op != OpAMD64MOVLload { 41371 break 41372 } 41373 i0 := x0.AuxInt 41374 if x0.Aux != s { 41375 break 41376 } 41377 _ = x0.Args[1] 41378 if p != x0.Args[0] { 41379 break 41380 } 41381 if mem != x0.Args[1] { 41382 break 41383 } 41384 if !(i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh)) { 41385 break 41386 } 41387 b = mergePoint(b, x0, x1) 41388 v0 := b.NewValue0(x0.Pos, OpAMD64BSWAPQ, v.Type) 41389 v.reset(OpCopy) 41390 v.AddArg(v0) 41391 v1 := b.NewValue0(x0.Pos, OpAMD64MOVQload, typ.UInt64) 41392 v1.AuxInt = i0 41393 v1.Aux = s 41394 v1.AddArg(p) 41395 v1.AddArg(mem) 41396 v0.AddArg(v1) 41397 return true 41398 } 41399 return false 41400 } 41401 func rewriteValueAMD64_OpAMD64ORQ_100(v *Value) bool { 41402 b := v.Block 41403 _ = b 41404 typ := &b.Func.Config.Types 41405 _ = typ 41406 // match: (ORQ sh:(SHLQconst [32] r0:(BSWAPL x0:(MOVLload [i0] {s} p mem))) r1:(BSWAPL x1:(MOVLload [i1] {s} p mem))) 41407 // cond: i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh) 41408 // result: @mergePoint(b,x0,x1) (BSWAPQ <v.Type> (MOVQload [i0] {s} p mem)) 41409 for { 41410 _ = v.Args[1] 41411 sh := v.Args[0] 41412 if sh.Op != OpAMD64SHLQconst { 41413 break 41414 } 41415 if sh.AuxInt != 32 { 41416 break 41417 } 41418 r0 := sh.Args[0] 41419 if r0.Op != OpAMD64BSWAPL { 41420 break 41421 } 41422 x0 := r0.Args[0] 41423 if x0.Op != OpAMD64MOVLload { 41424 break 41425 } 41426 i0 := x0.AuxInt 41427 s := x0.Aux 41428 _ = x0.Args[1] 41429 p := x0.Args[0] 41430 mem := x0.Args[1] 41431 r1 := v.Args[1] 41432 if r1.Op != OpAMD64BSWAPL { 41433 break 41434 } 41435 x1 := r1.Args[0] 41436 if x1.Op != OpAMD64MOVLload { 41437 break 41438 } 41439 i1 := x1.AuxInt 41440 if x1.Aux != s { 41441 break 41442 } 41443 _ = x1.Args[1] 41444 if p != x1.Args[0] { 41445 break 41446 } 41447 if mem != x1.Args[1] { 41448 break 41449 } 41450 if !(i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh)) { 41451 break 41452 } 41453 b = mergePoint(b, x0, x1) 41454 v0 := b.NewValue0(x1.Pos, OpAMD64BSWAPQ, v.Type) 41455 v.reset(OpCopy) 41456 v.AddArg(v0) 41457 v1 := b.NewValue0(x1.Pos, OpAMD64MOVQload, typ.UInt64) 41458 v1.AuxInt = i0 41459 v1.Aux = s 41460 v1.AddArg(p) 41461 v1.AddArg(mem) 41462 v0.AddArg(v1) 41463 return true 41464 } 41465 // match: (ORQ s0:(SHLQconst [j0] x0:(MOVBload [i0] {s} p mem)) or:(ORQ s1:(SHLQconst [j1] x1:(MOVBload [i1] {s} p mem)) y)) 41466 // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 41467 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (ROLWconst <typ.UInt16> [8] (MOVWload [i0] {s} p mem))) y) 41468 for { 41469 _ = v.Args[1] 41470 s0 := v.Args[0] 41471 if s0.Op != OpAMD64SHLQconst { 41472 break 41473 } 41474 j0 := s0.AuxInt 41475 x0 := s0.Args[0] 41476 if x0.Op != OpAMD64MOVBload { 41477 break 41478 } 41479 i0 := x0.AuxInt 41480 s := x0.Aux 41481 _ = x0.Args[1] 41482 p := x0.Args[0] 41483 mem := x0.Args[1] 41484 or := v.Args[1] 41485 if or.Op != OpAMD64ORQ { 41486 break 41487 } 41488 _ = or.Args[1] 41489 s1 := or.Args[0] 41490 if s1.Op != OpAMD64SHLQconst { 41491 break 41492 } 41493 j1 := s1.AuxInt 41494 x1 := s1.Args[0] 41495 if x1.Op != OpAMD64MOVBload { 41496 break 41497 } 41498 i1 := x1.AuxInt 41499 if x1.Aux != s { 41500 break 41501 } 41502 _ = x1.Args[1] 41503 if p != x1.Args[0] { 41504 break 41505 } 41506 if mem != x1.Args[1] { 41507 break 41508 } 41509 y := or.Args[1] 41510 if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 41511 break 41512 } 41513 b = mergePoint(b, x0, x1) 41514 v0 := b.NewValue0(x1.Pos, OpAMD64ORQ, v.Type) 41515 v.reset(OpCopy) 41516 v.AddArg(v0) 41517 v1 := b.NewValue0(x1.Pos, OpAMD64SHLQconst, v.Type) 41518 v1.AuxInt = j1 41519 v2 := b.NewValue0(x1.Pos, OpAMD64ROLWconst, typ.UInt16) 41520 v2.AuxInt = 8 41521 v3 := b.NewValue0(x1.Pos, OpAMD64MOVWload, typ.UInt16) 41522 v3.AuxInt = i0 41523 v3.Aux = s 41524 v3.AddArg(p) 41525 v3.AddArg(mem) 41526 v2.AddArg(v3) 41527 v1.AddArg(v2) 41528 v0.AddArg(v1) 41529 v0.AddArg(y) 41530 return true 41531 } 41532 // match: (ORQ s0:(SHLQconst [j0] x0:(MOVBload [i0] {s} p mem)) or:(ORQ y s1:(SHLQconst [j1] x1:(MOVBload [i1] {s} p mem)))) 41533 // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 41534 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (ROLWconst <typ.UInt16> [8] (MOVWload [i0] {s} p mem))) y) 41535 for { 41536 _ = v.Args[1] 41537 s0 := v.Args[0] 41538 if s0.Op != OpAMD64SHLQconst { 41539 break 41540 } 41541 j0 := s0.AuxInt 41542 x0 := s0.Args[0] 41543 if x0.Op != OpAMD64MOVBload { 41544 break 41545 } 41546 i0 := x0.AuxInt 41547 s := x0.Aux 41548 _ = x0.Args[1] 41549 p := x0.Args[0] 41550 mem := x0.Args[1] 41551 or := v.Args[1] 41552 if or.Op != OpAMD64ORQ { 41553 break 41554 } 41555 _ = or.Args[1] 41556 y := or.Args[0] 41557 s1 := or.Args[1] 41558 if s1.Op != OpAMD64SHLQconst { 41559 break 41560 } 41561 j1 := s1.AuxInt 41562 x1 := s1.Args[0] 41563 if x1.Op != OpAMD64MOVBload { 41564 break 41565 } 41566 i1 := x1.AuxInt 41567 if x1.Aux != s { 41568 break 41569 } 41570 _ = x1.Args[1] 41571 if p != x1.Args[0] { 41572 break 41573 } 41574 if mem != x1.Args[1] { 41575 break 41576 } 41577 if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 41578 break 41579 } 41580 b = mergePoint(b, x0, x1) 41581 v0 := b.NewValue0(x1.Pos, OpAMD64ORQ, v.Type) 41582 v.reset(OpCopy) 41583 v.AddArg(v0) 41584 v1 := b.NewValue0(x1.Pos, OpAMD64SHLQconst, v.Type) 41585 v1.AuxInt = j1 41586 v2 := b.NewValue0(x1.Pos, OpAMD64ROLWconst, typ.UInt16) 41587 v2.AuxInt = 8 41588 v3 := b.NewValue0(x1.Pos, OpAMD64MOVWload, typ.UInt16) 41589 v3.AuxInt = i0 41590 v3.Aux = s 41591 v3.AddArg(p) 41592 v3.AddArg(mem) 41593 v2.AddArg(v3) 41594 v1.AddArg(v2) 41595 v0.AddArg(v1) 41596 v0.AddArg(y) 41597 return true 41598 } 41599 // match: (ORQ or:(ORQ s1:(SHLQconst [j1] x1:(MOVBload [i1] {s} p mem)) y) s0:(SHLQconst [j0] x0:(MOVBload [i0] {s} p mem))) 41600 // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 41601 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (ROLWconst <typ.UInt16> [8] (MOVWload [i0] {s} p mem))) y) 41602 for { 41603 _ = v.Args[1] 41604 or := v.Args[0] 41605 if or.Op != OpAMD64ORQ { 41606 break 41607 } 41608 _ = or.Args[1] 41609 s1 := or.Args[0] 41610 if s1.Op != OpAMD64SHLQconst { 41611 break 41612 } 41613 j1 := s1.AuxInt 41614 x1 := s1.Args[0] 41615 if x1.Op != OpAMD64MOVBload { 41616 break 41617 } 41618 i1 := x1.AuxInt 41619 s := x1.Aux 41620 _ = x1.Args[1] 41621 p := x1.Args[0] 41622 mem := x1.Args[1] 41623 y := or.Args[1] 41624 s0 := v.Args[1] 41625 if s0.Op != OpAMD64SHLQconst { 41626 break 41627 } 41628 j0 := s0.AuxInt 41629 x0 := s0.Args[0] 41630 if x0.Op != OpAMD64MOVBload { 41631 break 41632 } 41633 i0 := x0.AuxInt 41634 if x0.Aux != s { 41635 break 41636 } 41637 _ = x0.Args[1] 41638 if p != x0.Args[0] { 41639 break 41640 } 41641 if mem != x0.Args[1] { 41642 break 41643 } 41644 if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 41645 break 41646 } 41647 b = mergePoint(b, x0, x1) 41648 v0 := b.NewValue0(x0.Pos, OpAMD64ORQ, v.Type) 41649 v.reset(OpCopy) 41650 v.AddArg(v0) 41651 v1 := b.NewValue0(x0.Pos, OpAMD64SHLQconst, v.Type) 41652 v1.AuxInt = j1 41653 v2 := b.NewValue0(x0.Pos, OpAMD64ROLWconst, typ.UInt16) 41654 v2.AuxInt = 8 41655 v3 := b.NewValue0(x0.Pos, OpAMD64MOVWload, typ.UInt16) 41656 v3.AuxInt = i0 41657 v3.Aux = s 41658 v3.AddArg(p) 41659 v3.AddArg(mem) 41660 v2.AddArg(v3) 41661 v1.AddArg(v2) 41662 v0.AddArg(v1) 41663 v0.AddArg(y) 41664 return true 41665 } 41666 // match: (ORQ or:(ORQ y s1:(SHLQconst [j1] x1:(MOVBload [i1] {s} p mem))) s0:(SHLQconst [j0] x0:(MOVBload [i0] {s} p mem))) 41667 // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 41668 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (ROLWconst <typ.UInt16> [8] (MOVWload [i0] {s} p mem))) y) 41669 for { 41670 _ = v.Args[1] 41671 or := v.Args[0] 41672 if or.Op != OpAMD64ORQ { 41673 break 41674 } 41675 _ = or.Args[1] 41676 y := or.Args[0] 41677 s1 := or.Args[1] 41678 if s1.Op != OpAMD64SHLQconst { 41679 break 41680 } 41681 j1 := s1.AuxInt 41682 x1 := s1.Args[0] 41683 if x1.Op != OpAMD64MOVBload { 41684 break 41685 } 41686 i1 := x1.AuxInt 41687 s := x1.Aux 41688 _ = x1.Args[1] 41689 p := x1.Args[0] 41690 mem := x1.Args[1] 41691 s0 := v.Args[1] 41692 if s0.Op != OpAMD64SHLQconst { 41693 break 41694 } 41695 j0 := s0.AuxInt 41696 x0 := s0.Args[0] 41697 if x0.Op != OpAMD64MOVBload { 41698 break 41699 } 41700 i0 := x0.AuxInt 41701 if x0.Aux != s { 41702 break 41703 } 41704 _ = x0.Args[1] 41705 if p != x0.Args[0] { 41706 break 41707 } 41708 if mem != x0.Args[1] { 41709 break 41710 } 41711 if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 41712 break 41713 } 41714 b = mergePoint(b, x0, x1) 41715 v0 := b.NewValue0(x0.Pos, OpAMD64ORQ, v.Type) 41716 v.reset(OpCopy) 41717 v.AddArg(v0) 41718 v1 := b.NewValue0(x0.Pos, OpAMD64SHLQconst, v.Type) 41719 v1.AuxInt = j1 41720 v2 := b.NewValue0(x0.Pos, OpAMD64ROLWconst, typ.UInt16) 41721 v2.AuxInt = 8 41722 v3 := b.NewValue0(x0.Pos, OpAMD64MOVWload, typ.UInt16) 41723 v3.AuxInt = i0 41724 v3.Aux = s 41725 v3.AddArg(p) 41726 v3.AddArg(mem) 41727 v2.AddArg(v3) 41728 v1.AddArg(v2) 41729 v0.AddArg(v1) 41730 v0.AddArg(y) 41731 return true 41732 } 41733 // match: (ORQ s0:(SHLQconst [j0] r0:(ROLWconst [8] x0:(MOVWload [i0] {s} p mem))) or:(ORQ s1:(SHLQconst [j1] r1:(ROLWconst [8] x1:(MOVWload [i1] {s} p mem))) y)) 41734 // cond: i1 == i0+2 && j1 == j0-16 && j1 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or) 41735 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (BSWAPL <typ.UInt32> (MOVLload [i0] {s} p mem))) y) 41736 for { 41737 _ = v.Args[1] 41738 s0 := v.Args[0] 41739 if s0.Op != OpAMD64SHLQconst { 41740 break 41741 } 41742 j0 := s0.AuxInt 41743 r0 := s0.Args[0] 41744 if r0.Op != OpAMD64ROLWconst { 41745 break 41746 } 41747 if r0.AuxInt != 8 { 41748 break 41749 } 41750 x0 := r0.Args[0] 41751 if x0.Op != OpAMD64MOVWload { 41752 break 41753 } 41754 i0 := x0.AuxInt 41755 s := x0.Aux 41756 _ = x0.Args[1] 41757 p := x0.Args[0] 41758 mem := x0.Args[1] 41759 or := v.Args[1] 41760 if or.Op != OpAMD64ORQ { 41761 break 41762 } 41763 _ = or.Args[1] 41764 s1 := or.Args[0] 41765 if s1.Op != OpAMD64SHLQconst { 41766 break 41767 } 41768 j1 := s1.AuxInt 41769 r1 := s1.Args[0] 41770 if r1.Op != OpAMD64ROLWconst { 41771 break 41772 } 41773 if r1.AuxInt != 8 { 41774 break 41775 } 41776 x1 := r1.Args[0] 41777 if x1.Op != OpAMD64MOVWload { 41778 break 41779 } 41780 i1 := x1.AuxInt 41781 if x1.Aux != s { 41782 break 41783 } 41784 _ = x1.Args[1] 41785 if p != x1.Args[0] { 41786 break 41787 } 41788 if mem != x1.Args[1] { 41789 break 41790 } 41791 y := or.Args[1] 41792 if !(i1 == i0+2 && j1 == j0-16 && j1%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or)) { 41793 break 41794 } 41795 b = mergePoint(b, x0, x1) 41796 v0 := b.NewValue0(x1.Pos, OpAMD64ORQ, v.Type) 41797 v.reset(OpCopy) 41798 v.AddArg(v0) 41799 v1 := b.NewValue0(x1.Pos, OpAMD64SHLQconst, v.Type) 41800 v1.AuxInt = j1 41801 v2 := b.NewValue0(x1.Pos, OpAMD64BSWAPL, typ.UInt32) 41802 v3 := b.NewValue0(x1.Pos, OpAMD64MOVLload, typ.UInt32) 41803 v3.AuxInt = i0 41804 v3.Aux = s 41805 v3.AddArg(p) 41806 v3.AddArg(mem) 41807 v2.AddArg(v3) 41808 v1.AddArg(v2) 41809 v0.AddArg(v1) 41810 v0.AddArg(y) 41811 return true 41812 } 41813 // match: (ORQ s0:(SHLQconst [j0] r0:(ROLWconst [8] x0:(MOVWload [i0] {s} p mem))) or:(ORQ y s1:(SHLQconst [j1] r1:(ROLWconst [8] x1:(MOVWload [i1] {s} p mem))))) 41814 // cond: i1 == i0+2 && j1 == j0-16 && j1 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or) 41815 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (BSWAPL <typ.UInt32> (MOVLload [i0] {s} p mem))) y) 41816 for { 41817 _ = v.Args[1] 41818 s0 := v.Args[0] 41819 if s0.Op != OpAMD64SHLQconst { 41820 break 41821 } 41822 j0 := s0.AuxInt 41823 r0 := s0.Args[0] 41824 if r0.Op != OpAMD64ROLWconst { 41825 break 41826 } 41827 if r0.AuxInt != 8 { 41828 break 41829 } 41830 x0 := r0.Args[0] 41831 if x0.Op != OpAMD64MOVWload { 41832 break 41833 } 41834 i0 := x0.AuxInt 41835 s := x0.Aux 41836 _ = x0.Args[1] 41837 p := x0.Args[0] 41838 mem := x0.Args[1] 41839 or := v.Args[1] 41840 if or.Op != OpAMD64ORQ { 41841 break 41842 } 41843 _ = or.Args[1] 41844 y := or.Args[0] 41845 s1 := or.Args[1] 41846 if s1.Op != OpAMD64SHLQconst { 41847 break 41848 } 41849 j1 := s1.AuxInt 41850 r1 := s1.Args[0] 41851 if r1.Op != OpAMD64ROLWconst { 41852 break 41853 } 41854 if r1.AuxInt != 8 { 41855 break 41856 } 41857 x1 := r1.Args[0] 41858 if x1.Op != OpAMD64MOVWload { 41859 break 41860 } 41861 i1 := x1.AuxInt 41862 if x1.Aux != s { 41863 break 41864 } 41865 _ = x1.Args[1] 41866 if p != x1.Args[0] { 41867 break 41868 } 41869 if mem != x1.Args[1] { 41870 break 41871 } 41872 if !(i1 == i0+2 && j1 == j0-16 && j1%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or)) { 41873 break 41874 } 41875 b = mergePoint(b, x0, x1) 41876 v0 := b.NewValue0(x1.Pos, OpAMD64ORQ, v.Type) 41877 v.reset(OpCopy) 41878 v.AddArg(v0) 41879 v1 := b.NewValue0(x1.Pos, OpAMD64SHLQconst, v.Type) 41880 v1.AuxInt = j1 41881 v2 := b.NewValue0(x1.Pos, OpAMD64BSWAPL, typ.UInt32) 41882 v3 := b.NewValue0(x1.Pos, OpAMD64MOVLload, typ.UInt32) 41883 v3.AuxInt = i0 41884 v3.Aux = s 41885 v3.AddArg(p) 41886 v3.AddArg(mem) 41887 v2.AddArg(v3) 41888 v1.AddArg(v2) 41889 v0.AddArg(v1) 41890 v0.AddArg(y) 41891 return true 41892 } 41893 // match: (ORQ or:(ORQ s1:(SHLQconst [j1] r1:(ROLWconst [8] x1:(MOVWload [i1] {s} p mem))) y) s0:(SHLQconst [j0] r0:(ROLWconst [8] x0:(MOVWload [i0] {s} p mem)))) 41894 // cond: i1 == i0+2 && j1 == j0-16 && j1 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or) 41895 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (BSWAPL <typ.UInt32> (MOVLload [i0] {s} p mem))) y) 41896 for { 41897 _ = v.Args[1] 41898 or := v.Args[0] 41899 if or.Op != OpAMD64ORQ { 41900 break 41901 } 41902 _ = or.Args[1] 41903 s1 := or.Args[0] 41904 if s1.Op != OpAMD64SHLQconst { 41905 break 41906 } 41907 j1 := s1.AuxInt 41908 r1 := s1.Args[0] 41909 if r1.Op != OpAMD64ROLWconst { 41910 break 41911 } 41912 if r1.AuxInt != 8 { 41913 break 41914 } 41915 x1 := r1.Args[0] 41916 if x1.Op != OpAMD64MOVWload { 41917 break 41918 } 41919 i1 := x1.AuxInt 41920 s := x1.Aux 41921 _ = x1.Args[1] 41922 p := x1.Args[0] 41923 mem := x1.Args[1] 41924 y := or.Args[1] 41925 s0 := v.Args[1] 41926 if s0.Op != OpAMD64SHLQconst { 41927 break 41928 } 41929 j0 := s0.AuxInt 41930 r0 := s0.Args[0] 41931 if r0.Op != OpAMD64ROLWconst { 41932 break 41933 } 41934 if r0.AuxInt != 8 { 41935 break 41936 } 41937 x0 := r0.Args[0] 41938 if x0.Op != OpAMD64MOVWload { 41939 break 41940 } 41941 i0 := x0.AuxInt 41942 if x0.Aux != s { 41943 break 41944 } 41945 _ = x0.Args[1] 41946 if p != x0.Args[0] { 41947 break 41948 } 41949 if mem != x0.Args[1] { 41950 break 41951 } 41952 if !(i1 == i0+2 && j1 == j0-16 && j1%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or)) { 41953 break 41954 } 41955 b = mergePoint(b, x0, x1) 41956 v0 := b.NewValue0(x0.Pos, OpAMD64ORQ, v.Type) 41957 v.reset(OpCopy) 41958 v.AddArg(v0) 41959 v1 := b.NewValue0(x0.Pos, OpAMD64SHLQconst, v.Type) 41960 v1.AuxInt = j1 41961 v2 := b.NewValue0(x0.Pos, OpAMD64BSWAPL, typ.UInt32) 41962 v3 := b.NewValue0(x0.Pos, OpAMD64MOVLload, typ.UInt32) 41963 v3.AuxInt = i0 41964 v3.Aux = s 41965 v3.AddArg(p) 41966 v3.AddArg(mem) 41967 v2.AddArg(v3) 41968 v1.AddArg(v2) 41969 v0.AddArg(v1) 41970 v0.AddArg(y) 41971 return true 41972 } 41973 // match: (ORQ or:(ORQ y s1:(SHLQconst [j1] r1:(ROLWconst [8] x1:(MOVWload [i1] {s} p mem)))) s0:(SHLQconst [j0] r0:(ROLWconst [8] x0:(MOVWload [i0] {s} p mem)))) 41974 // cond: i1 == i0+2 && j1 == j0-16 && j1 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or) 41975 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (BSWAPL <typ.UInt32> (MOVLload [i0] {s} p mem))) y) 41976 for { 41977 _ = v.Args[1] 41978 or := v.Args[0] 41979 if or.Op != OpAMD64ORQ { 41980 break 41981 } 41982 _ = or.Args[1] 41983 y := or.Args[0] 41984 s1 := or.Args[1] 41985 if s1.Op != OpAMD64SHLQconst { 41986 break 41987 } 41988 j1 := s1.AuxInt 41989 r1 := s1.Args[0] 41990 if r1.Op != OpAMD64ROLWconst { 41991 break 41992 } 41993 if r1.AuxInt != 8 { 41994 break 41995 } 41996 x1 := r1.Args[0] 41997 if x1.Op != OpAMD64MOVWload { 41998 break 41999 } 42000 i1 := x1.AuxInt 42001 s := x1.Aux 42002 _ = x1.Args[1] 42003 p := x1.Args[0] 42004 mem := x1.Args[1] 42005 s0 := v.Args[1] 42006 if s0.Op != OpAMD64SHLQconst { 42007 break 42008 } 42009 j0 := s0.AuxInt 42010 r0 := s0.Args[0] 42011 if r0.Op != OpAMD64ROLWconst { 42012 break 42013 } 42014 if r0.AuxInt != 8 { 42015 break 42016 } 42017 x0 := r0.Args[0] 42018 if x0.Op != OpAMD64MOVWload { 42019 break 42020 } 42021 i0 := x0.AuxInt 42022 if x0.Aux != s { 42023 break 42024 } 42025 _ = x0.Args[1] 42026 if p != x0.Args[0] { 42027 break 42028 } 42029 if mem != x0.Args[1] { 42030 break 42031 } 42032 if !(i1 == i0+2 && j1 == j0-16 && j1%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or)) { 42033 break 42034 } 42035 b = mergePoint(b, x0, x1) 42036 v0 := b.NewValue0(x0.Pos, OpAMD64ORQ, v.Type) 42037 v.reset(OpCopy) 42038 v.AddArg(v0) 42039 v1 := b.NewValue0(x0.Pos, OpAMD64SHLQconst, v.Type) 42040 v1.AuxInt = j1 42041 v2 := b.NewValue0(x0.Pos, OpAMD64BSWAPL, typ.UInt32) 42042 v3 := b.NewValue0(x0.Pos, OpAMD64MOVLload, typ.UInt32) 42043 v3.AuxInt = i0 42044 v3.Aux = s 42045 v3.AddArg(p) 42046 v3.AddArg(mem) 42047 v2.AddArg(v3) 42048 v1.AddArg(v2) 42049 v0.AddArg(v1) 42050 v0.AddArg(y) 42051 return true 42052 } 42053 // match: (ORQ x1:(MOVBloadidx1 [i1] {s} p idx mem) sh:(SHLQconst [8] x0:(MOVBloadidx1 [i0] {s} p idx mem))) 42054 // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 42055 // result: @mergePoint(b,x0,x1) (ROLWconst <v.Type> [8] (MOVWloadidx1 [i0] {s} p idx mem)) 42056 for { 42057 _ = v.Args[1] 42058 x1 := v.Args[0] 42059 if x1.Op != OpAMD64MOVBloadidx1 { 42060 break 42061 } 42062 i1 := x1.AuxInt 42063 s := x1.Aux 42064 _ = x1.Args[2] 42065 p := x1.Args[0] 42066 idx := x1.Args[1] 42067 mem := x1.Args[2] 42068 sh := v.Args[1] 42069 if sh.Op != OpAMD64SHLQconst { 42070 break 42071 } 42072 if sh.AuxInt != 8 { 42073 break 42074 } 42075 x0 := sh.Args[0] 42076 if x0.Op != OpAMD64MOVBloadidx1 { 42077 break 42078 } 42079 i0 := x0.AuxInt 42080 if x0.Aux != s { 42081 break 42082 } 42083 _ = x0.Args[2] 42084 if p != x0.Args[0] { 42085 break 42086 } 42087 if idx != x0.Args[1] { 42088 break 42089 } 42090 if mem != x0.Args[2] { 42091 break 42092 } 42093 if !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 42094 break 42095 } 42096 b = mergePoint(b, x0, x1) 42097 v0 := b.NewValue0(v.Pos, OpAMD64ROLWconst, v.Type) 42098 v.reset(OpCopy) 42099 v.AddArg(v0) 42100 v0.AuxInt = 8 42101 v1 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 42102 v1.AuxInt = i0 42103 v1.Aux = s 42104 v1.AddArg(p) 42105 v1.AddArg(idx) 42106 v1.AddArg(mem) 42107 v0.AddArg(v1) 42108 return true 42109 } 42110 return false 42111 } 42112 func rewriteValueAMD64_OpAMD64ORQ_110(v *Value) bool { 42113 b := v.Block 42114 _ = b 42115 typ := &b.Func.Config.Types 42116 _ = typ 42117 // match: (ORQ x1:(MOVBloadidx1 [i1] {s} idx p mem) sh:(SHLQconst [8] x0:(MOVBloadidx1 [i0] {s} p idx mem))) 42118 // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 42119 // result: @mergePoint(b,x0,x1) (ROLWconst <v.Type> [8] (MOVWloadidx1 [i0] {s} p idx mem)) 42120 for { 42121 _ = v.Args[1] 42122 x1 := v.Args[0] 42123 if x1.Op != OpAMD64MOVBloadidx1 { 42124 break 42125 } 42126 i1 := x1.AuxInt 42127 s := x1.Aux 42128 _ = x1.Args[2] 42129 idx := x1.Args[0] 42130 p := x1.Args[1] 42131 mem := x1.Args[2] 42132 sh := v.Args[1] 42133 if sh.Op != OpAMD64SHLQconst { 42134 break 42135 } 42136 if sh.AuxInt != 8 { 42137 break 42138 } 42139 x0 := sh.Args[0] 42140 if x0.Op != OpAMD64MOVBloadidx1 { 42141 break 42142 } 42143 i0 := x0.AuxInt 42144 if x0.Aux != s { 42145 break 42146 } 42147 _ = x0.Args[2] 42148 if p != x0.Args[0] { 42149 break 42150 } 42151 if idx != x0.Args[1] { 42152 break 42153 } 42154 if mem != x0.Args[2] { 42155 break 42156 } 42157 if !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 42158 break 42159 } 42160 b = mergePoint(b, x0, x1) 42161 v0 := b.NewValue0(v.Pos, OpAMD64ROLWconst, v.Type) 42162 v.reset(OpCopy) 42163 v.AddArg(v0) 42164 v0.AuxInt = 8 42165 v1 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 42166 v1.AuxInt = i0 42167 v1.Aux = s 42168 v1.AddArg(p) 42169 v1.AddArg(idx) 42170 v1.AddArg(mem) 42171 v0.AddArg(v1) 42172 return true 42173 } 42174 // match: (ORQ x1:(MOVBloadidx1 [i1] {s} p idx mem) sh:(SHLQconst [8] x0:(MOVBloadidx1 [i0] {s} idx p mem))) 42175 // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 42176 // result: @mergePoint(b,x0,x1) (ROLWconst <v.Type> [8] (MOVWloadidx1 [i0] {s} p idx mem)) 42177 for { 42178 _ = v.Args[1] 42179 x1 := v.Args[0] 42180 if x1.Op != OpAMD64MOVBloadidx1 { 42181 break 42182 } 42183 i1 := x1.AuxInt 42184 s := x1.Aux 42185 _ = x1.Args[2] 42186 p := x1.Args[0] 42187 idx := x1.Args[1] 42188 mem := x1.Args[2] 42189 sh := v.Args[1] 42190 if sh.Op != OpAMD64SHLQconst { 42191 break 42192 } 42193 if sh.AuxInt != 8 { 42194 break 42195 } 42196 x0 := sh.Args[0] 42197 if x0.Op != OpAMD64MOVBloadidx1 { 42198 break 42199 } 42200 i0 := x0.AuxInt 42201 if x0.Aux != s { 42202 break 42203 } 42204 _ = x0.Args[2] 42205 if idx != x0.Args[0] { 42206 break 42207 } 42208 if p != x0.Args[1] { 42209 break 42210 } 42211 if mem != x0.Args[2] { 42212 break 42213 } 42214 if !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 42215 break 42216 } 42217 b = mergePoint(b, x0, x1) 42218 v0 := b.NewValue0(v.Pos, OpAMD64ROLWconst, v.Type) 42219 v.reset(OpCopy) 42220 v.AddArg(v0) 42221 v0.AuxInt = 8 42222 v1 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 42223 v1.AuxInt = i0 42224 v1.Aux = s 42225 v1.AddArg(p) 42226 v1.AddArg(idx) 42227 v1.AddArg(mem) 42228 v0.AddArg(v1) 42229 return true 42230 } 42231 // match: (ORQ x1:(MOVBloadidx1 [i1] {s} idx p mem) sh:(SHLQconst [8] x0:(MOVBloadidx1 [i0] {s} idx p mem))) 42232 // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 42233 // result: @mergePoint(b,x0,x1) (ROLWconst <v.Type> [8] (MOVWloadidx1 [i0] {s} p idx mem)) 42234 for { 42235 _ = v.Args[1] 42236 x1 := v.Args[0] 42237 if x1.Op != OpAMD64MOVBloadidx1 { 42238 break 42239 } 42240 i1 := x1.AuxInt 42241 s := x1.Aux 42242 _ = x1.Args[2] 42243 idx := x1.Args[0] 42244 p := x1.Args[1] 42245 mem := x1.Args[2] 42246 sh := v.Args[1] 42247 if sh.Op != OpAMD64SHLQconst { 42248 break 42249 } 42250 if sh.AuxInt != 8 { 42251 break 42252 } 42253 x0 := sh.Args[0] 42254 if x0.Op != OpAMD64MOVBloadidx1 { 42255 break 42256 } 42257 i0 := x0.AuxInt 42258 if x0.Aux != s { 42259 break 42260 } 42261 _ = x0.Args[2] 42262 if idx != x0.Args[0] { 42263 break 42264 } 42265 if p != x0.Args[1] { 42266 break 42267 } 42268 if mem != x0.Args[2] { 42269 break 42270 } 42271 if !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 42272 break 42273 } 42274 b = mergePoint(b, x0, x1) 42275 v0 := b.NewValue0(v.Pos, OpAMD64ROLWconst, v.Type) 42276 v.reset(OpCopy) 42277 v.AddArg(v0) 42278 v0.AuxInt = 8 42279 v1 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 42280 v1.AuxInt = i0 42281 v1.Aux = s 42282 v1.AddArg(p) 42283 v1.AddArg(idx) 42284 v1.AddArg(mem) 42285 v0.AddArg(v1) 42286 return true 42287 } 42288 // match: (ORQ sh:(SHLQconst [8] x0:(MOVBloadidx1 [i0] {s} p idx mem)) x1:(MOVBloadidx1 [i1] {s} p idx mem)) 42289 // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 42290 // result: @mergePoint(b,x0,x1) (ROLWconst <v.Type> [8] (MOVWloadidx1 [i0] {s} p idx mem)) 42291 for { 42292 _ = v.Args[1] 42293 sh := v.Args[0] 42294 if sh.Op != OpAMD64SHLQconst { 42295 break 42296 } 42297 if sh.AuxInt != 8 { 42298 break 42299 } 42300 x0 := sh.Args[0] 42301 if x0.Op != OpAMD64MOVBloadidx1 { 42302 break 42303 } 42304 i0 := x0.AuxInt 42305 s := x0.Aux 42306 _ = x0.Args[2] 42307 p := x0.Args[0] 42308 idx := x0.Args[1] 42309 mem := x0.Args[2] 42310 x1 := v.Args[1] 42311 if x1.Op != OpAMD64MOVBloadidx1 { 42312 break 42313 } 42314 i1 := x1.AuxInt 42315 if x1.Aux != s { 42316 break 42317 } 42318 _ = x1.Args[2] 42319 if p != x1.Args[0] { 42320 break 42321 } 42322 if idx != x1.Args[1] { 42323 break 42324 } 42325 if mem != x1.Args[2] { 42326 break 42327 } 42328 if !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 42329 break 42330 } 42331 b = mergePoint(b, x0, x1) 42332 v0 := b.NewValue0(v.Pos, OpAMD64ROLWconst, v.Type) 42333 v.reset(OpCopy) 42334 v.AddArg(v0) 42335 v0.AuxInt = 8 42336 v1 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 42337 v1.AuxInt = i0 42338 v1.Aux = s 42339 v1.AddArg(p) 42340 v1.AddArg(idx) 42341 v1.AddArg(mem) 42342 v0.AddArg(v1) 42343 return true 42344 } 42345 // match: (ORQ sh:(SHLQconst [8] x0:(MOVBloadidx1 [i0] {s} idx p mem)) x1:(MOVBloadidx1 [i1] {s} p idx mem)) 42346 // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 42347 // result: @mergePoint(b,x0,x1) (ROLWconst <v.Type> [8] (MOVWloadidx1 [i0] {s} p idx mem)) 42348 for { 42349 _ = v.Args[1] 42350 sh := v.Args[0] 42351 if sh.Op != OpAMD64SHLQconst { 42352 break 42353 } 42354 if sh.AuxInt != 8 { 42355 break 42356 } 42357 x0 := sh.Args[0] 42358 if x0.Op != OpAMD64MOVBloadidx1 { 42359 break 42360 } 42361 i0 := x0.AuxInt 42362 s := x0.Aux 42363 _ = x0.Args[2] 42364 idx := x0.Args[0] 42365 p := x0.Args[1] 42366 mem := x0.Args[2] 42367 x1 := v.Args[1] 42368 if x1.Op != OpAMD64MOVBloadidx1 { 42369 break 42370 } 42371 i1 := x1.AuxInt 42372 if x1.Aux != s { 42373 break 42374 } 42375 _ = x1.Args[2] 42376 if p != x1.Args[0] { 42377 break 42378 } 42379 if idx != x1.Args[1] { 42380 break 42381 } 42382 if mem != x1.Args[2] { 42383 break 42384 } 42385 if !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 42386 break 42387 } 42388 b = mergePoint(b, x0, x1) 42389 v0 := b.NewValue0(v.Pos, OpAMD64ROLWconst, v.Type) 42390 v.reset(OpCopy) 42391 v.AddArg(v0) 42392 v0.AuxInt = 8 42393 v1 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 42394 v1.AuxInt = i0 42395 v1.Aux = s 42396 v1.AddArg(p) 42397 v1.AddArg(idx) 42398 v1.AddArg(mem) 42399 v0.AddArg(v1) 42400 return true 42401 } 42402 // match: (ORQ sh:(SHLQconst [8] x0:(MOVBloadidx1 [i0] {s} p idx mem)) x1:(MOVBloadidx1 [i1] {s} idx p mem)) 42403 // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 42404 // result: @mergePoint(b,x0,x1) (ROLWconst <v.Type> [8] (MOVWloadidx1 [i0] {s} p idx mem)) 42405 for { 42406 _ = v.Args[1] 42407 sh := v.Args[0] 42408 if sh.Op != OpAMD64SHLQconst { 42409 break 42410 } 42411 if sh.AuxInt != 8 { 42412 break 42413 } 42414 x0 := sh.Args[0] 42415 if x0.Op != OpAMD64MOVBloadidx1 { 42416 break 42417 } 42418 i0 := x0.AuxInt 42419 s := x0.Aux 42420 _ = x0.Args[2] 42421 p := x0.Args[0] 42422 idx := x0.Args[1] 42423 mem := x0.Args[2] 42424 x1 := v.Args[1] 42425 if x1.Op != OpAMD64MOVBloadidx1 { 42426 break 42427 } 42428 i1 := x1.AuxInt 42429 if x1.Aux != s { 42430 break 42431 } 42432 _ = x1.Args[2] 42433 if idx != x1.Args[0] { 42434 break 42435 } 42436 if p != x1.Args[1] { 42437 break 42438 } 42439 if mem != x1.Args[2] { 42440 break 42441 } 42442 if !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 42443 break 42444 } 42445 b = mergePoint(b, x0, x1) 42446 v0 := b.NewValue0(v.Pos, OpAMD64ROLWconst, v.Type) 42447 v.reset(OpCopy) 42448 v.AddArg(v0) 42449 v0.AuxInt = 8 42450 v1 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 42451 v1.AuxInt = i0 42452 v1.Aux = s 42453 v1.AddArg(p) 42454 v1.AddArg(idx) 42455 v1.AddArg(mem) 42456 v0.AddArg(v1) 42457 return true 42458 } 42459 // match: (ORQ sh:(SHLQconst [8] x0:(MOVBloadidx1 [i0] {s} idx p mem)) x1:(MOVBloadidx1 [i1] {s} idx p mem)) 42460 // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 42461 // result: @mergePoint(b,x0,x1) (ROLWconst <v.Type> [8] (MOVWloadidx1 [i0] {s} p idx mem)) 42462 for { 42463 _ = v.Args[1] 42464 sh := v.Args[0] 42465 if sh.Op != OpAMD64SHLQconst { 42466 break 42467 } 42468 if sh.AuxInt != 8 { 42469 break 42470 } 42471 x0 := sh.Args[0] 42472 if x0.Op != OpAMD64MOVBloadidx1 { 42473 break 42474 } 42475 i0 := x0.AuxInt 42476 s := x0.Aux 42477 _ = x0.Args[2] 42478 idx := x0.Args[0] 42479 p := x0.Args[1] 42480 mem := x0.Args[2] 42481 x1 := v.Args[1] 42482 if x1.Op != OpAMD64MOVBloadidx1 { 42483 break 42484 } 42485 i1 := x1.AuxInt 42486 if x1.Aux != s { 42487 break 42488 } 42489 _ = x1.Args[2] 42490 if idx != x1.Args[0] { 42491 break 42492 } 42493 if p != x1.Args[1] { 42494 break 42495 } 42496 if mem != x1.Args[2] { 42497 break 42498 } 42499 if !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 42500 break 42501 } 42502 b = mergePoint(b, x0, x1) 42503 v0 := b.NewValue0(v.Pos, OpAMD64ROLWconst, v.Type) 42504 v.reset(OpCopy) 42505 v.AddArg(v0) 42506 v0.AuxInt = 8 42507 v1 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 42508 v1.AuxInt = i0 42509 v1.Aux = s 42510 v1.AddArg(p) 42511 v1.AddArg(idx) 42512 v1.AddArg(mem) 42513 v0.AddArg(v1) 42514 return true 42515 } 42516 // match: (ORQ r1:(ROLWconst [8] x1:(MOVWloadidx1 [i1] {s} p idx mem)) sh:(SHLQconst [16] r0:(ROLWconst [8] x0:(MOVWloadidx1 [i0] {s} p idx mem)))) 42517 // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh) 42518 // result: @mergePoint(b,x0,x1) (BSWAPL <v.Type> (MOVLloadidx1 [i0] {s} p idx mem)) 42519 for { 42520 _ = v.Args[1] 42521 r1 := v.Args[0] 42522 if r1.Op != OpAMD64ROLWconst { 42523 break 42524 } 42525 if r1.AuxInt != 8 { 42526 break 42527 } 42528 x1 := r1.Args[0] 42529 if x1.Op != OpAMD64MOVWloadidx1 { 42530 break 42531 } 42532 i1 := x1.AuxInt 42533 s := x1.Aux 42534 _ = x1.Args[2] 42535 p := x1.Args[0] 42536 idx := x1.Args[1] 42537 mem := x1.Args[2] 42538 sh := v.Args[1] 42539 if sh.Op != OpAMD64SHLQconst { 42540 break 42541 } 42542 if sh.AuxInt != 16 { 42543 break 42544 } 42545 r0 := sh.Args[0] 42546 if r0.Op != OpAMD64ROLWconst { 42547 break 42548 } 42549 if r0.AuxInt != 8 { 42550 break 42551 } 42552 x0 := r0.Args[0] 42553 if x0.Op != OpAMD64MOVWloadidx1 { 42554 break 42555 } 42556 i0 := x0.AuxInt 42557 if x0.Aux != s { 42558 break 42559 } 42560 _ = x0.Args[2] 42561 if p != x0.Args[0] { 42562 break 42563 } 42564 if idx != x0.Args[1] { 42565 break 42566 } 42567 if mem != x0.Args[2] { 42568 break 42569 } 42570 if !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh)) { 42571 break 42572 } 42573 b = mergePoint(b, x0, x1) 42574 v0 := b.NewValue0(v.Pos, OpAMD64BSWAPL, v.Type) 42575 v.reset(OpCopy) 42576 v.AddArg(v0) 42577 v1 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 42578 v1.AuxInt = i0 42579 v1.Aux = s 42580 v1.AddArg(p) 42581 v1.AddArg(idx) 42582 v1.AddArg(mem) 42583 v0.AddArg(v1) 42584 return true 42585 } 42586 // match: (ORQ r1:(ROLWconst [8] x1:(MOVWloadidx1 [i1] {s} idx p mem)) sh:(SHLQconst [16] r0:(ROLWconst [8] x0:(MOVWloadidx1 [i0] {s} p idx mem)))) 42587 // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh) 42588 // result: @mergePoint(b,x0,x1) (BSWAPL <v.Type> (MOVLloadidx1 [i0] {s} p idx mem)) 42589 for { 42590 _ = v.Args[1] 42591 r1 := v.Args[0] 42592 if r1.Op != OpAMD64ROLWconst { 42593 break 42594 } 42595 if r1.AuxInt != 8 { 42596 break 42597 } 42598 x1 := r1.Args[0] 42599 if x1.Op != OpAMD64MOVWloadidx1 { 42600 break 42601 } 42602 i1 := x1.AuxInt 42603 s := x1.Aux 42604 _ = x1.Args[2] 42605 idx := x1.Args[0] 42606 p := x1.Args[1] 42607 mem := x1.Args[2] 42608 sh := v.Args[1] 42609 if sh.Op != OpAMD64SHLQconst { 42610 break 42611 } 42612 if sh.AuxInt != 16 { 42613 break 42614 } 42615 r0 := sh.Args[0] 42616 if r0.Op != OpAMD64ROLWconst { 42617 break 42618 } 42619 if r0.AuxInt != 8 { 42620 break 42621 } 42622 x0 := r0.Args[0] 42623 if x0.Op != OpAMD64MOVWloadidx1 { 42624 break 42625 } 42626 i0 := x0.AuxInt 42627 if x0.Aux != s { 42628 break 42629 } 42630 _ = x0.Args[2] 42631 if p != x0.Args[0] { 42632 break 42633 } 42634 if idx != x0.Args[1] { 42635 break 42636 } 42637 if mem != x0.Args[2] { 42638 break 42639 } 42640 if !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh)) { 42641 break 42642 } 42643 b = mergePoint(b, x0, x1) 42644 v0 := b.NewValue0(v.Pos, OpAMD64BSWAPL, v.Type) 42645 v.reset(OpCopy) 42646 v.AddArg(v0) 42647 v1 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 42648 v1.AuxInt = i0 42649 v1.Aux = s 42650 v1.AddArg(p) 42651 v1.AddArg(idx) 42652 v1.AddArg(mem) 42653 v0.AddArg(v1) 42654 return true 42655 } 42656 // match: (ORQ r1:(ROLWconst [8] x1:(MOVWloadidx1 [i1] {s} p idx mem)) sh:(SHLQconst [16] r0:(ROLWconst [8] x0:(MOVWloadidx1 [i0] {s} idx p mem)))) 42657 // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh) 42658 // result: @mergePoint(b,x0,x1) (BSWAPL <v.Type> (MOVLloadidx1 [i0] {s} p idx mem)) 42659 for { 42660 _ = v.Args[1] 42661 r1 := v.Args[0] 42662 if r1.Op != OpAMD64ROLWconst { 42663 break 42664 } 42665 if r1.AuxInt != 8 { 42666 break 42667 } 42668 x1 := r1.Args[0] 42669 if x1.Op != OpAMD64MOVWloadidx1 { 42670 break 42671 } 42672 i1 := x1.AuxInt 42673 s := x1.Aux 42674 _ = x1.Args[2] 42675 p := x1.Args[0] 42676 idx := x1.Args[1] 42677 mem := x1.Args[2] 42678 sh := v.Args[1] 42679 if sh.Op != OpAMD64SHLQconst { 42680 break 42681 } 42682 if sh.AuxInt != 16 { 42683 break 42684 } 42685 r0 := sh.Args[0] 42686 if r0.Op != OpAMD64ROLWconst { 42687 break 42688 } 42689 if r0.AuxInt != 8 { 42690 break 42691 } 42692 x0 := r0.Args[0] 42693 if x0.Op != OpAMD64MOVWloadidx1 { 42694 break 42695 } 42696 i0 := x0.AuxInt 42697 if x0.Aux != s { 42698 break 42699 } 42700 _ = x0.Args[2] 42701 if idx != x0.Args[0] { 42702 break 42703 } 42704 if p != x0.Args[1] { 42705 break 42706 } 42707 if mem != x0.Args[2] { 42708 break 42709 } 42710 if !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh)) { 42711 break 42712 } 42713 b = mergePoint(b, x0, x1) 42714 v0 := b.NewValue0(v.Pos, OpAMD64BSWAPL, v.Type) 42715 v.reset(OpCopy) 42716 v.AddArg(v0) 42717 v1 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 42718 v1.AuxInt = i0 42719 v1.Aux = s 42720 v1.AddArg(p) 42721 v1.AddArg(idx) 42722 v1.AddArg(mem) 42723 v0.AddArg(v1) 42724 return true 42725 } 42726 return false 42727 } 42728 func rewriteValueAMD64_OpAMD64ORQ_120(v *Value) bool { 42729 b := v.Block 42730 _ = b 42731 typ := &b.Func.Config.Types 42732 _ = typ 42733 // match: (ORQ r1:(ROLWconst [8] x1:(MOVWloadidx1 [i1] {s} idx p mem)) sh:(SHLQconst [16] r0:(ROLWconst [8] x0:(MOVWloadidx1 [i0] {s} idx p mem)))) 42734 // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh) 42735 // result: @mergePoint(b,x0,x1) (BSWAPL <v.Type> (MOVLloadidx1 [i0] {s} p idx mem)) 42736 for { 42737 _ = v.Args[1] 42738 r1 := v.Args[0] 42739 if r1.Op != OpAMD64ROLWconst { 42740 break 42741 } 42742 if r1.AuxInt != 8 { 42743 break 42744 } 42745 x1 := r1.Args[0] 42746 if x1.Op != OpAMD64MOVWloadidx1 { 42747 break 42748 } 42749 i1 := x1.AuxInt 42750 s := x1.Aux 42751 _ = x1.Args[2] 42752 idx := x1.Args[0] 42753 p := x1.Args[1] 42754 mem := x1.Args[2] 42755 sh := v.Args[1] 42756 if sh.Op != OpAMD64SHLQconst { 42757 break 42758 } 42759 if sh.AuxInt != 16 { 42760 break 42761 } 42762 r0 := sh.Args[0] 42763 if r0.Op != OpAMD64ROLWconst { 42764 break 42765 } 42766 if r0.AuxInt != 8 { 42767 break 42768 } 42769 x0 := r0.Args[0] 42770 if x0.Op != OpAMD64MOVWloadidx1 { 42771 break 42772 } 42773 i0 := x0.AuxInt 42774 if x0.Aux != s { 42775 break 42776 } 42777 _ = x0.Args[2] 42778 if idx != x0.Args[0] { 42779 break 42780 } 42781 if p != x0.Args[1] { 42782 break 42783 } 42784 if mem != x0.Args[2] { 42785 break 42786 } 42787 if !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh)) { 42788 break 42789 } 42790 b = mergePoint(b, x0, x1) 42791 v0 := b.NewValue0(v.Pos, OpAMD64BSWAPL, v.Type) 42792 v.reset(OpCopy) 42793 v.AddArg(v0) 42794 v1 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 42795 v1.AuxInt = i0 42796 v1.Aux = s 42797 v1.AddArg(p) 42798 v1.AddArg(idx) 42799 v1.AddArg(mem) 42800 v0.AddArg(v1) 42801 return true 42802 } 42803 // match: (ORQ sh:(SHLQconst [16] r0:(ROLWconst [8] x0:(MOVWloadidx1 [i0] {s} p idx mem))) r1:(ROLWconst [8] x1:(MOVWloadidx1 [i1] {s} p idx mem))) 42804 // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh) 42805 // result: @mergePoint(b,x0,x1) (BSWAPL <v.Type> (MOVLloadidx1 [i0] {s} p idx mem)) 42806 for { 42807 _ = v.Args[1] 42808 sh := v.Args[0] 42809 if sh.Op != OpAMD64SHLQconst { 42810 break 42811 } 42812 if sh.AuxInt != 16 { 42813 break 42814 } 42815 r0 := sh.Args[0] 42816 if r0.Op != OpAMD64ROLWconst { 42817 break 42818 } 42819 if r0.AuxInt != 8 { 42820 break 42821 } 42822 x0 := r0.Args[0] 42823 if x0.Op != OpAMD64MOVWloadidx1 { 42824 break 42825 } 42826 i0 := x0.AuxInt 42827 s := x0.Aux 42828 _ = x0.Args[2] 42829 p := x0.Args[0] 42830 idx := x0.Args[1] 42831 mem := x0.Args[2] 42832 r1 := v.Args[1] 42833 if r1.Op != OpAMD64ROLWconst { 42834 break 42835 } 42836 if r1.AuxInt != 8 { 42837 break 42838 } 42839 x1 := r1.Args[0] 42840 if x1.Op != OpAMD64MOVWloadidx1 { 42841 break 42842 } 42843 i1 := x1.AuxInt 42844 if x1.Aux != s { 42845 break 42846 } 42847 _ = x1.Args[2] 42848 if p != x1.Args[0] { 42849 break 42850 } 42851 if idx != x1.Args[1] { 42852 break 42853 } 42854 if mem != x1.Args[2] { 42855 break 42856 } 42857 if !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh)) { 42858 break 42859 } 42860 b = mergePoint(b, x0, x1) 42861 v0 := b.NewValue0(v.Pos, OpAMD64BSWAPL, v.Type) 42862 v.reset(OpCopy) 42863 v.AddArg(v0) 42864 v1 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 42865 v1.AuxInt = i0 42866 v1.Aux = s 42867 v1.AddArg(p) 42868 v1.AddArg(idx) 42869 v1.AddArg(mem) 42870 v0.AddArg(v1) 42871 return true 42872 } 42873 // match: (ORQ sh:(SHLQconst [16] r0:(ROLWconst [8] x0:(MOVWloadidx1 [i0] {s} idx p mem))) r1:(ROLWconst [8] x1:(MOVWloadidx1 [i1] {s} p idx mem))) 42874 // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh) 42875 // result: @mergePoint(b,x0,x1) (BSWAPL <v.Type> (MOVLloadidx1 [i0] {s} p idx mem)) 42876 for { 42877 _ = v.Args[1] 42878 sh := v.Args[0] 42879 if sh.Op != OpAMD64SHLQconst { 42880 break 42881 } 42882 if sh.AuxInt != 16 { 42883 break 42884 } 42885 r0 := sh.Args[0] 42886 if r0.Op != OpAMD64ROLWconst { 42887 break 42888 } 42889 if r0.AuxInt != 8 { 42890 break 42891 } 42892 x0 := r0.Args[0] 42893 if x0.Op != OpAMD64MOVWloadidx1 { 42894 break 42895 } 42896 i0 := x0.AuxInt 42897 s := x0.Aux 42898 _ = x0.Args[2] 42899 idx := x0.Args[0] 42900 p := x0.Args[1] 42901 mem := x0.Args[2] 42902 r1 := v.Args[1] 42903 if r1.Op != OpAMD64ROLWconst { 42904 break 42905 } 42906 if r1.AuxInt != 8 { 42907 break 42908 } 42909 x1 := r1.Args[0] 42910 if x1.Op != OpAMD64MOVWloadidx1 { 42911 break 42912 } 42913 i1 := x1.AuxInt 42914 if x1.Aux != s { 42915 break 42916 } 42917 _ = x1.Args[2] 42918 if p != x1.Args[0] { 42919 break 42920 } 42921 if idx != x1.Args[1] { 42922 break 42923 } 42924 if mem != x1.Args[2] { 42925 break 42926 } 42927 if !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh)) { 42928 break 42929 } 42930 b = mergePoint(b, x0, x1) 42931 v0 := b.NewValue0(v.Pos, OpAMD64BSWAPL, v.Type) 42932 v.reset(OpCopy) 42933 v.AddArg(v0) 42934 v1 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 42935 v1.AuxInt = i0 42936 v1.Aux = s 42937 v1.AddArg(p) 42938 v1.AddArg(idx) 42939 v1.AddArg(mem) 42940 v0.AddArg(v1) 42941 return true 42942 } 42943 // match: (ORQ sh:(SHLQconst [16] r0:(ROLWconst [8] x0:(MOVWloadidx1 [i0] {s} p idx mem))) r1:(ROLWconst [8] x1:(MOVWloadidx1 [i1] {s} idx p mem))) 42944 // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh) 42945 // result: @mergePoint(b,x0,x1) (BSWAPL <v.Type> (MOVLloadidx1 [i0] {s} p idx mem)) 42946 for { 42947 _ = v.Args[1] 42948 sh := v.Args[0] 42949 if sh.Op != OpAMD64SHLQconst { 42950 break 42951 } 42952 if sh.AuxInt != 16 { 42953 break 42954 } 42955 r0 := sh.Args[0] 42956 if r0.Op != OpAMD64ROLWconst { 42957 break 42958 } 42959 if r0.AuxInt != 8 { 42960 break 42961 } 42962 x0 := r0.Args[0] 42963 if x0.Op != OpAMD64MOVWloadidx1 { 42964 break 42965 } 42966 i0 := x0.AuxInt 42967 s := x0.Aux 42968 _ = x0.Args[2] 42969 p := x0.Args[0] 42970 idx := x0.Args[1] 42971 mem := x0.Args[2] 42972 r1 := v.Args[1] 42973 if r1.Op != OpAMD64ROLWconst { 42974 break 42975 } 42976 if r1.AuxInt != 8 { 42977 break 42978 } 42979 x1 := r1.Args[0] 42980 if x1.Op != OpAMD64MOVWloadidx1 { 42981 break 42982 } 42983 i1 := x1.AuxInt 42984 if x1.Aux != s { 42985 break 42986 } 42987 _ = x1.Args[2] 42988 if idx != x1.Args[0] { 42989 break 42990 } 42991 if p != x1.Args[1] { 42992 break 42993 } 42994 if mem != x1.Args[2] { 42995 break 42996 } 42997 if !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh)) { 42998 break 42999 } 43000 b = mergePoint(b, x0, x1) 43001 v0 := b.NewValue0(v.Pos, OpAMD64BSWAPL, v.Type) 43002 v.reset(OpCopy) 43003 v.AddArg(v0) 43004 v1 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 43005 v1.AuxInt = i0 43006 v1.Aux = s 43007 v1.AddArg(p) 43008 v1.AddArg(idx) 43009 v1.AddArg(mem) 43010 v0.AddArg(v1) 43011 return true 43012 } 43013 // match: (ORQ sh:(SHLQconst [16] r0:(ROLWconst [8] x0:(MOVWloadidx1 [i0] {s} idx p mem))) r1:(ROLWconst [8] x1:(MOVWloadidx1 [i1] {s} idx p mem))) 43014 // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh) 43015 // result: @mergePoint(b,x0,x1) (BSWAPL <v.Type> (MOVLloadidx1 [i0] {s} p idx mem)) 43016 for { 43017 _ = v.Args[1] 43018 sh := v.Args[0] 43019 if sh.Op != OpAMD64SHLQconst { 43020 break 43021 } 43022 if sh.AuxInt != 16 { 43023 break 43024 } 43025 r0 := sh.Args[0] 43026 if r0.Op != OpAMD64ROLWconst { 43027 break 43028 } 43029 if r0.AuxInt != 8 { 43030 break 43031 } 43032 x0 := r0.Args[0] 43033 if x0.Op != OpAMD64MOVWloadidx1 { 43034 break 43035 } 43036 i0 := x0.AuxInt 43037 s := x0.Aux 43038 _ = x0.Args[2] 43039 idx := x0.Args[0] 43040 p := x0.Args[1] 43041 mem := x0.Args[2] 43042 r1 := v.Args[1] 43043 if r1.Op != OpAMD64ROLWconst { 43044 break 43045 } 43046 if r1.AuxInt != 8 { 43047 break 43048 } 43049 x1 := r1.Args[0] 43050 if x1.Op != OpAMD64MOVWloadidx1 { 43051 break 43052 } 43053 i1 := x1.AuxInt 43054 if x1.Aux != s { 43055 break 43056 } 43057 _ = x1.Args[2] 43058 if idx != x1.Args[0] { 43059 break 43060 } 43061 if p != x1.Args[1] { 43062 break 43063 } 43064 if mem != x1.Args[2] { 43065 break 43066 } 43067 if !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh)) { 43068 break 43069 } 43070 b = mergePoint(b, x0, x1) 43071 v0 := b.NewValue0(v.Pos, OpAMD64BSWAPL, v.Type) 43072 v.reset(OpCopy) 43073 v.AddArg(v0) 43074 v1 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 43075 v1.AuxInt = i0 43076 v1.Aux = s 43077 v1.AddArg(p) 43078 v1.AddArg(idx) 43079 v1.AddArg(mem) 43080 v0.AddArg(v1) 43081 return true 43082 } 43083 // match: (ORQ r1:(BSWAPL x1:(MOVLloadidx1 [i1] {s} p idx mem)) sh:(SHLQconst [32] r0:(BSWAPL x0:(MOVLloadidx1 [i0] {s} p idx mem)))) 43084 // cond: i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh) 43085 // result: @mergePoint(b,x0,x1) (BSWAPQ <v.Type> (MOVQloadidx1 [i0] {s} p idx mem)) 43086 for { 43087 _ = v.Args[1] 43088 r1 := v.Args[0] 43089 if r1.Op != OpAMD64BSWAPL { 43090 break 43091 } 43092 x1 := r1.Args[0] 43093 if x1.Op != OpAMD64MOVLloadidx1 { 43094 break 43095 } 43096 i1 := x1.AuxInt 43097 s := x1.Aux 43098 _ = x1.Args[2] 43099 p := x1.Args[0] 43100 idx := x1.Args[1] 43101 mem := x1.Args[2] 43102 sh := v.Args[1] 43103 if sh.Op != OpAMD64SHLQconst { 43104 break 43105 } 43106 if sh.AuxInt != 32 { 43107 break 43108 } 43109 r0 := sh.Args[0] 43110 if r0.Op != OpAMD64BSWAPL { 43111 break 43112 } 43113 x0 := r0.Args[0] 43114 if x0.Op != OpAMD64MOVLloadidx1 { 43115 break 43116 } 43117 i0 := x0.AuxInt 43118 if x0.Aux != s { 43119 break 43120 } 43121 _ = x0.Args[2] 43122 if p != x0.Args[0] { 43123 break 43124 } 43125 if idx != x0.Args[1] { 43126 break 43127 } 43128 if mem != x0.Args[2] { 43129 break 43130 } 43131 if !(i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh)) { 43132 break 43133 } 43134 b = mergePoint(b, x0, x1) 43135 v0 := b.NewValue0(v.Pos, OpAMD64BSWAPQ, v.Type) 43136 v.reset(OpCopy) 43137 v.AddArg(v0) 43138 v1 := b.NewValue0(v.Pos, OpAMD64MOVQloadidx1, typ.UInt64) 43139 v1.AuxInt = i0 43140 v1.Aux = s 43141 v1.AddArg(p) 43142 v1.AddArg(idx) 43143 v1.AddArg(mem) 43144 v0.AddArg(v1) 43145 return true 43146 } 43147 // match: (ORQ r1:(BSWAPL x1:(MOVLloadidx1 [i1] {s} idx p mem)) sh:(SHLQconst [32] r0:(BSWAPL x0:(MOVLloadidx1 [i0] {s} p idx mem)))) 43148 // cond: i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh) 43149 // result: @mergePoint(b,x0,x1) (BSWAPQ <v.Type> (MOVQloadidx1 [i0] {s} p idx mem)) 43150 for { 43151 _ = v.Args[1] 43152 r1 := v.Args[0] 43153 if r1.Op != OpAMD64BSWAPL { 43154 break 43155 } 43156 x1 := r1.Args[0] 43157 if x1.Op != OpAMD64MOVLloadidx1 { 43158 break 43159 } 43160 i1 := x1.AuxInt 43161 s := x1.Aux 43162 _ = x1.Args[2] 43163 idx := x1.Args[0] 43164 p := x1.Args[1] 43165 mem := x1.Args[2] 43166 sh := v.Args[1] 43167 if sh.Op != OpAMD64SHLQconst { 43168 break 43169 } 43170 if sh.AuxInt != 32 { 43171 break 43172 } 43173 r0 := sh.Args[0] 43174 if r0.Op != OpAMD64BSWAPL { 43175 break 43176 } 43177 x0 := r0.Args[0] 43178 if x0.Op != OpAMD64MOVLloadidx1 { 43179 break 43180 } 43181 i0 := x0.AuxInt 43182 if x0.Aux != s { 43183 break 43184 } 43185 _ = x0.Args[2] 43186 if p != x0.Args[0] { 43187 break 43188 } 43189 if idx != x0.Args[1] { 43190 break 43191 } 43192 if mem != x0.Args[2] { 43193 break 43194 } 43195 if !(i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh)) { 43196 break 43197 } 43198 b = mergePoint(b, x0, x1) 43199 v0 := b.NewValue0(v.Pos, OpAMD64BSWAPQ, v.Type) 43200 v.reset(OpCopy) 43201 v.AddArg(v0) 43202 v1 := b.NewValue0(v.Pos, OpAMD64MOVQloadidx1, typ.UInt64) 43203 v1.AuxInt = i0 43204 v1.Aux = s 43205 v1.AddArg(p) 43206 v1.AddArg(idx) 43207 v1.AddArg(mem) 43208 v0.AddArg(v1) 43209 return true 43210 } 43211 // match: (ORQ r1:(BSWAPL x1:(MOVLloadidx1 [i1] {s} p idx mem)) sh:(SHLQconst [32] r0:(BSWAPL x0:(MOVLloadidx1 [i0] {s} idx p mem)))) 43212 // cond: i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh) 43213 // result: @mergePoint(b,x0,x1) (BSWAPQ <v.Type> (MOVQloadidx1 [i0] {s} p idx mem)) 43214 for { 43215 _ = v.Args[1] 43216 r1 := v.Args[0] 43217 if r1.Op != OpAMD64BSWAPL { 43218 break 43219 } 43220 x1 := r1.Args[0] 43221 if x1.Op != OpAMD64MOVLloadidx1 { 43222 break 43223 } 43224 i1 := x1.AuxInt 43225 s := x1.Aux 43226 _ = x1.Args[2] 43227 p := x1.Args[0] 43228 idx := x1.Args[1] 43229 mem := x1.Args[2] 43230 sh := v.Args[1] 43231 if sh.Op != OpAMD64SHLQconst { 43232 break 43233 } 43234 if sh.AuxInt != 32 { 43235 break 43236 } 43237 r0 := sh.Args[0] 43238 if r0.Op != OpAMD64BSWAPL { 43239 break 43240 } 43241 x0 := r0.Args[0] 43242 if x0.Op != OpAMD64MOVLloadidx1 { 43243 break 43244 } 43245 i0 := x0.AuxInt 43246 if x0.Aux != s { 43247 break 43248 } 43249 _ = x0.Args[2] 43250 if idx != x0.Args[0] { 43251 break 43252 } 43253 if p != x0.Args[1] { 43254 break 43255 } 43256 if mem != x0.Args[2] { 43257 break 43258 } 43259 if !(i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh)) { 43260 break 43261 } 43262 b = mergePoint(b, x0, x1) 43263 v0 := b.NewValue0(v.Pos, OpAMD64BSWAPQ, v.Type) 43264 v.reset(OpCopy) 43265 v.AddArg(v0) 43266 v1 := b.NewValue0(v.Pos, OpAMD64MOVQloadidx1, typ.UInt64) 43267 v1.AuxInt = i0 43268 v1.Aux = s 43269 v1.AddArg(p) 43270 v1.AddArg(idx) 43271 v1.AddArg(mem) 43272 v0.AddArg(v1) 43273 return true 43274 } 43275 // match: (ORQ r1:(BSWAPL x1:(MOVLloadidx1 [i1] {s} idx p mem)) sh:(SHLQconst [32] r0:(BSWAPL x0:(MOVLloadidx1 [i0] {s} idx p mem)))) 43276 // cond: i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh) 43277 // result: @mergePoint(b,x0,x1) (BSWAPQ <v.Type> (MOVQloadidx1 [i0] {s} p idx mem)) 43278 for { 43279 _ = v.Args[1] 43280 r1 := v.Args[0] 43281 if r1.Op != OpAMD64BSWAPL { 43282 break 43283 } 43284 x1 := r1.Args[0] 43285 if x1.Op != OpAMD64MOVLloadidx1 { 43286 break 43287 } 43288 i1 := x1.AuxInt 43289 s := x1.Aux 43290 _ = x1.Args[2] 43291 idx := x1.Args[0] 43292 p := x1.Args[1] 43293 mem := x1.Args[2] 43294 sh := v.Args[1] 43295 if sh.Op != OpAMD64SHLQconst { 43296 break 43297 } 43298 if sh.AuxInt != 32 { 43299 break 43300 } 43301 r0 := sh.Args[0] 43302 if r0.Op != OpAMD64BSWAPL { 43303 break 43304 } 43305 x0 := r0.Args[0] 43306 if x0.Op != OpAMD64MOVLloadidx1 { 43307 break 43308 } 43309 i0 := x0.AuxInt 43310 if x0.Aux != s { 43311 break 43312 } 43313 _ = x0.Args[2] 43314 if idx != x0.Args[0] { 43315 break 43316 } 43317 if p != x0.Args[1] { 43318 break 43319 } 43320 if mem != x0.Args[2] { 43321 break 43322 } 43323 if !(i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh)) { 43324 break 43325 } 43326 b = mergePoint(b, x0, x1) 43327 v0 := b.NewValue0(v.Pos, OpAMD64BSWAPQ, v.Type) 43328 v.reset(OpCopy) 43329 v.AddArg(v0) 43330 v1 := b.NewValue0(v.Pos, OpAMD64MOVQloadidx1, typ.UInt64) 43331 v1.AuxInt = i0 43332 v1.Aux = s 43333 v1.AddArg(p) 43334 v1.AddArg(idx) 43335 v1.AddArg(mem) 43336 v0.AddArg(v1) 43337 return true 43338 } 43339 // match: (ORQ sh:(SHLQconst [32] r0:(BSWAPL x0:(MOVLloadidx1 [i0] {s} p idx mem))) r1:(BSWAPL x1:(MOVLloadidx1 [i1] {s} p idx mem))) 43340 // cond: i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh) 43341 // result: @mergePoint(b,x0,x1) (BSWAPQ <v.Type> (MOVQloadidx1 [i0] {s} p idx mem)) 43342 for { 43343 _ = v.Args[1] 43344 sh := v.Args[0] 43345 if sh.Op != OpAMD64SHLQconst { 43346 break 43347 } 43348 if sh.AuxInt != 32 { 43349 break 43350 } 43351 r0 := sh.Args[0] 43352 if r0.Op != OpAMD64BSWAPL { 43353 break 43354 } 43355 x0 := r0.Args[0] 43356 if x0.Op != OpAMD64MOVLloadidx1 { 43357 break 43358 } 43359 i0 := x0.AuxInt 43360 s := x0.Aux 43361 _ = x0.Args[2] 43362 p := x0.Args[0] 43363 idx := x0.Args[1] 43364 mem := x0.Args[2] 43365 r1 := v.Args[1] 43366 if r1.Op != OpAMD64BSWAPL { 43367 break 43368 } 43369 x1 := r1.Args[0] 43370 if x1.Op != OpAMD64MOVLloadidx1 { 43371 break 43372 } 43373 i1 := x1.AuxInt 43374 if x1.Aux != s { 43375 break 43376 } 43377 _ = x1.Args[2] 43378 if p != x1.Args[0] { 43379 break 43380 } 43381 if idx != x1.Args[1] { 43382 break 43383 } 43384 if mem != x1.Args[2] { 43385 break 43386 } 43387 if !(i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh)) { 43388 break 43389 } 43390 b = mergePoint(b, x0, x1) 43391 v0 := b.NewValue0(v.Pos, OpAMD64BSWAPQ, v.Type) 43392 v.reset(OpCopy) 43393 v.AddArg(v0) 43394 v1 := b.NewValue0(v.Pos, OpAMD64MOVQloadidx1, typ.UInt64) 43395 v1.AuxInt = i0 43396 v1.Aux = s 43397 v1.AddArg(p) 43398 v1.AddArg(idx) 43399 v1.AddArg(mem) 43400 v0.AddArg(v1) 43401 return true 43402 } 43403 return false 43404 } 43405 func rewriteValueAMD64_OpAMD64ORQ_130(v *Value) bool { 43406 b := v.Block 43407 _ = b 43408 typ := &b.Func.Config.Types 43409 _ = typ 43410 // match: (ORQ sh:(SHLQconst [32] r0:(BSWAPL x0:(MOVLloadidx1 [i0] {s} idx p mem))) r1:(BSWAPL x1:(MOVLloadidx1 [i1] {s} p idx mem))) 43411 // cond: i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh) 43412 // result: @mergePoint(b,x0,x1) (BSWAPQ <v.Type> (MOVQloadidx1 [i0] {s} p idx mem)) 43413 for { 43414 _ = v.Args[1] 43415 sh := v.Args[0] 43416 if sh.Op != OpAMD64SHLQconst { 43417 break 43418 } 43419 if sh.AuxInt != 32 { 43420 break 43421 } 43422 r0 := sh.Args[0] 43423 if r0.Op != OpAMD64BSWAPL { 43424 break 43425 } 43426 x0 := r0.Args[0] 43427 if x0.Op != OpAMD64MOVLloadidx1 { 43428 break 43429 } 43430 i0 := x0.AuxInt 43431 s := x0.Aux 43432 _ = x0.Args[2] 43433 idx := x0.Args[0] 43434 p := x0.Args[1] 43435 mem := x0.Args[2] 43436 r1 := v.Args[1] 43437 if r1.Op != OpAMD64BSWAPL { 43438 break 43439 } 43440 x1 := r1.Args[0] 43441 if x1.Op != OpAMD64MOVLloadidx1 { 43442 break 43443 } 43444 i1 := x1.AuxInt 43445 if x1.Aux != s { 43446 break 43447 } 43448 _ = x1.Args[2] 43449 if p != x1.Args[0] { 43450 break 43451 } 43452 if idx != x1.Args[1] { 43453 break 43454 } 43455 if mem != x1.Args[2] { 43456 break 43457 } 43458 if !(i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh)) { 43459 break 43460 } 43461 b = mergePoint(b, x0, x1) 43462 v0 := b.NewValue0(v.Pos, OpAMD64BSWAPQ, v.Type) 43463 v.reset(OpCopy) 43464 v.AddArg(v0) 43465 v1 := b.NewValue0(v.Pos, OpAMD64MOVQloadidx1, typ.UInt64) 43466 v1.AuxInt = i0 43467 v1.Aux = s 43468 v1.AddArg(p) 43469 v1.AddArg(idx) 43470 v1.AddArg(mem) 43471 v0.AddArg(v1) 43472 return true 43473 } 43474 // match: (ORQ sh:(SHLQconst [32] r0:(BSWAPL x0:(MOVLloadidx1 [i0] {s} p idx mem))) r1:(BSWAPL x1:(MOVLloadidx1 [i1] {s} idx p mem))) 43475 // cond: i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh) 43476 // result: @mergePoint(b,x0,x1) (BSWAPQ <v.Type> (MOVQloadidx1 [i0] {s} p idx mem)) 43477 for { 43478 _ = v.Args[1] 43479 sh := v.Args[0] 43480 if sh.Op != OpAMD64SHLQconst { 43481 break 43482 } 43483 if sh.AuxInt != 32 { 43484 break 43485 } 43486 r0 := sh.Args[0] 43487 if r0.Op != OpAMD64BSWAPL { 43488 break 43489 } 43490 x0 := r0.Args[0] 43491 if x0.Op != OpAMD64MOVLloadidx1 { 43492 break 43493 } 43494 i0 := x0.AuxInt 43495 s := x0.Aux 43496 _ = x0.Args[2] 43497 p := x0.Args[0] 43498 idx := x0.Args[1] 43499 mem := x0.Args[2] 43500 r1 := v.Args[1] 43501 if r1.Op != OpAMD64BSWAPL { 43502 break 43503 } 43504 x1 := r1.Args[0] 43505 if x1.Op != OpAMD64MOVLloadidx1 { 43506 break 43507 } 43508 i1 := x1.AuxInt 43509 if x1.Aux != s { 43510 break 43511 } 43512 _ = x1.Args[2] 43513 if idx != x1.Args[0] { 43514 break 43515 } 43516 if p != x1.Args[1] { 43517 break 43518 } 43519 if mem != x1.Args[2] { 43520 break 43521 } 43522 if !(i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh)) { 43523 break 43524 } 43525 b = mergePoint(b, x0, x1) 43526 v0 := b.NewValue0(v.Pos, OpAMD64BSWAPQ, v.Type) 43527 v.reset(OpCopy) 43528 v.AddArg(v0) 43529 v1 := b.NewValue0(v.Pos, OpAMD64MOVQloadidx1, typ.UInt64) 43530 v1.AuxInt = i0 43531 v1.Aux = s 43532 v1.AddArg(p) 43533 v1.AddArg(idx) 43534 v1.AddArg(mem) 43535 v0.AddArg(v1) 43536 return true 43537 } 43538 // match: (ORQ sh:(SHLQconst [32] r0:(BSWAPL x0:(MOVLloadidx1 [i0] {s} idx p mem))) r1:(BSWAPL x1:(MOVLloadidx1 [i1] {s} idx p mem))) 43539 // cond: i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh) 43540 // result: @mergePoint(b,x0,x1) (BSWAPQ <v.Type> (MOVQloadidx1 [i0] {s} p idx mem)) 43541 for { 43542 _ = v.Args[1] 43543 sh := v.Args[0] 43544 if sh.Op != OpAMD64SHLQconst { 43545 break 43546 } 43547 if sh.AuxInt != 32 { 43548 break 43549 } 43550 r0 := sh.Args[0] 43551 if r0.Op != OpAMD64BSWAPL { 43552 break 43553 } 43554 x0 := r0.Args[0] 43555 if x0.Op != OpAMD64MOVLloadidx1 { 43556 break 43557 } 43558 i0 := x0.AuxInt 43559 s := x0.Aux 43560 _ = x0.Args[2] 43561 idx := x0.Args[0] 43562 p := x0.Args[1] 43563 mem := x0.Args[2] 43564 r1 := v.Args[1] 43565 if r1.Op != OpAMD64BSWAPL { 43566 break 43567 } 43568 x1 := r1.Args[0] 43569 if x1.Op != OpAMD64MOVLloadidx1 { 43570 break 43571 } 43572 i1 := x1.AuxInt 43573 if x1.Aux != s { 43574 break 43575 } 43576 _ = x1.Args[2] 43577 if idx != x1.Args[0] { 43578 break 43579 } 43580 if p != x1.Args[1] { 43581 break 43582 } 43583 if mem != x1.Args[2] { 43584 break 43585 } 43586 if !(i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh)) { 43587 break 43588 } 43589 b = mergePoint(b, x0, x1) 43590 v0 := b.NewValue0(v.Pos, OpAMD64BSWAPQ, v.Type) 43591 v.reset(OpCopy) 43592 v.AddArg(v0) 43593 v1 := b.NewValue0(v.Pos, OpAMD64MOVQloadidx1, typ.UInt64) 43594 v1.AuxInt = i0 43595 v1.Aux = s 43596 v1.AddArg(p) 43597 v1.AddArg(idx) 43598 v1.AddArg(mem) 43599 v0.AddArg(v1) 43600 return true 43601 } 43602 // match: (ORQ s0:(SHLQconst [j0] x0:(MOVBloadidx1 [i0] {s} p idx mem)) or:(ORQ s1:(SHLQconst [j1] x1:(MOVBloadidx1 [i1] {s} p idx mem)) y)) 43603 // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 43604 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (ROLWconst <typ.UInt16> [8] (MOVWloadidx1 [i0] {s} p idx mem))) y) 43605 for { 43606 _ = v.Args[1] 43607 s0 := v.Args[0] 43608 if s0.Op != OpAMD64SHLQconst { 43609 break 43610 } 43611 j0 := s0.AuxInt 43612 x0 := s0.Args[0] 43613 if x0.Op != OpAMD64MOVBloadidx1 { 43614 break 43615 } 43616 i0 := x0.AuxInt 43617 s := x0.Aux 43618 _ = x0.Args[2] 43619 p := x0.Args[0] 43620 idx := x0.Args[1] 43621 mem := x0.Args[2] 43622 or := v.Args[1] 43623 if or.Op != OpAMD64ORQ { 43624 break 43625 } 43626 _ = or.Args[1] 43627 s1 := or.Args[0] 43628 if s1.Op != OpAMD64SHLQconst { 43629 break 43630 } 43631 j1 := s1.AuxInt 43632 x1 := s1.Args[0] 43633 if x1.Op != OpAMD64MOVBloadidx1 { 43634 break 43635 } 43636 i1 := x1.AuxInt 43637 if x1.Aux != s { 43638 break 43639 } 43640 _ = x1.Args[2] 43641 if p != x1.Args[0] { 43642 break 43643 } 43644 if idx != x1.Args[1] { 43645 break 43646 } 43647 if mem != x1.Args[2] { 43648 break 43649 } 43650 y := or.Args[1] 43651 if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 43652 break 43653 } 43654 b = mergePoint(b, x0, x1) 43655 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 43656 v.reset(OpCopy) 43657 v.AddArg(v0) 43658 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 43659 v1.AuxInt = j1 43660 v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, typ.UInt16) 43661 v2.AuxInt = 8 43662 v3 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 43663 v3.AuxInt = i0 43664 v3.Aux = s 43665 v3.AddArg(p) 43666 v3.AddArg(idx) 43667 v3.AddArg(mem) 43668 v2.AddArg(v3) 43669 v1.AddArg(v2) 43670 v0.AddArg(v1) 43671 v0.AddArg(y) 43672 return true 43673 } 43674 // match: (ORQ s0:(SHLQconst [j0] x0:(MOVBloadidx1 [i0] {s} idx p mem)) or:(ORQ s1:(SHLQconst [j1] x1:(MOVBloadidx1 [i1] {s} p idx mem)) y)) 43675 // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 43676 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (ROLWconst <typ.UInt16> [8] (MOVWloadidx1 [i0] {s} p idx mem))) y) 43677 for { 43678 _ = v.Args[1] 43679 s0 := v.Args[0] 43680 if s0.Op != OpAMD64SHLQconst { 43681 break 43682 } 43683 j0 := s0.AuxInt 43684 x0 := s0.Args[0] 43685 if x0.Op != OpAMD64MOVBloadidx1 { 43686 break 43687 } 43688 i0 := x0.AuxInt 43689 s := x0.Aux 43690 _ = x0.Args[2] 43691 idx := x0.Args[0] 43692 p := x0.Args[1] 43693 mem := x0.Args[2] 43694 or := v.Args[1] 43695 if or.Op != OpAMD64ORQ { 43696 break 43697 } 43698 _ = or.Args[1] 43699 s1 := or.Args[0] 43700 if s1.Op != OpAMD64SHLQconst { 43701 break 43702 } 43703 j1 := s1.AuxInt 43704 x1 := s1.Args[0] 43705 if x1.Op != OpAMD64MOVBloadidx1 { 43706 break 43707 } 43708 i1 := x1.AuxInt 43709 if x1.Aux != s { 43710 break 43711 } 43712 _ = x1.Args[2] 43713 if p != x1.Args[0] { 43714 break 43715 } 43716 if idx != x1.Args[1] { 43717 break 43718 } 43719 if mem != x1.Args[2] { 43720 break 43721 } 43722 y := or.Args[1] 43723 if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 43724 break 43725 } 43726 b = mergePoint(b, x0, x1) 43727 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 43728 v.reset(OpCopy) 43729 v.AddArg(v0) 43730 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 43731 v1.AuxInt = j1 43732 v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, typ.UInt16) 43733 v2.AuxInt = 8 43734 v3 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 43735 v3.AuxInt = i0 43736 v3.Aux = s 43737 v3.AddArg(p) 43738 v3.AddArg(idx) 43739 v3.AddArg(mem) 43740 v2.AddArg(v3) 43741 v1.AddArg(v2) 43742 v0.AddArg(v1) 43743 v0.AddArg(y) 43744 return true 43745 } 43746 // match: (ORQ s0:(SHLQconst [j0] x0:(MOVBloadidx1 [i0] {s} p idx mem)) or:(ORQ s1:(SHLQconst [j1] x1:(MOVBloadidx1 [i1] {s} idx p mem)) y)) 43747 // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 43748 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (ROLWconst <typ.UInt16> [8] (MOVWloadidx1 [i0] {s} p idx mem))) y) 43749 for { 43750 _ = v.Args[1] 43751 s0 := v.Args[0] 43752 if s0.Op != OpAMD64SHLQconst { 43753 break 43754 } 43755 j0 := s0.AuxInt 43756 x0 := s0.Args[0] 43757 if x0.Op != OpAMD64MOVBloadidx1 { 43758 break 43759 } 43760 i0 := x0.AuxInt 43761 s := x0.Aux 43762 _ = x0.Args[2] 43763 p := x0.Args[0] 43764 idx := x0.Args[1] 43765 mem := x0.Args[2] 43766 or := v.Args[1] 43767 if or.Op != OpAMD64ORQ { 43768 break 43769 } 43770 _ = or.Args[1] 43771 s1 := or.Args[0] 43772 if s1.Op != OpAMD64SHLQconst { 43773 break 43774 } 43775 j1 := s1.AuxInt 43776 x1 := s1.Args[0] 43777 if x1.Op != OpAMD64MOVBloadidx1 { 43778 break 43779 } 43780 i1 := x1.AuxInt 43781 if x1.Aux != s { 43782 break 43783 } 43784 _ = x1.Args[2] 43785 if idx != x1.Args[0] { 43786 break 43787 } 43788 if p != x1.Args[1] { 43789 break 43790 } 43791 if mem != x1.Args[2] { 43792 break 43793 } 43794 y := or.Args[1] 43795 if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 43796 break 43797 } 43798 b = mergePoint(b, x0, x1) 43799 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 43800 v.reset(OpCopy) 43801 v.AddArg(v0) 43802 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 43803 v1.AuxInt = j1 43804 v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, typ.UInt16) 43805 v2.AuxInt = 8 43806 v3 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 43807 v3.AuxInt = i0 43808 v3.Aux = s 43809 v3.AddArg(p) 43810 v3.AddArg(idx) 43811 v3.AddArg(mem) 43812 v2.AddArg(v3) 43813 v1.AddArg(v2) 43814 v0.AddArg(v1) 43815 v0.AddArg(y) 43816 return true 43817 } 43818 // match: (ORQ s0:(SHLQconst [j0] x0:(MOVBloadidx1 [i0] {s} idx p mem)) or:(ORQ s1:(SHLQconst [j1] x1:(MOVBloadidx1 [i1] {s} idx p mem)) y)) 43819 // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 43820 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (ROLWconst <typ.UInt16> [8] (MOVWloadidx1 [i0] {s} p idx mem))) y) 43821 for { 43822 _ = v.Args[1] 43823 s0 := v.Args[0] 43824 if s0.Op != OpAMD64SHLQconst { 43825 break 43826 } 43827 j0 := s0.AuxInt 43828 x0 := s0.Args[0] 43829 if x0.Op != OpAMD64MOVBloadidx1 { 43830 break 43831 } 43832 i0 := x0.AuxInt 43833 s := x0.Aux 43834 _ = x0.Args[2] 43835 idx := x0.Args[0] 43836 p := x0.Args[1] 43837 mem := x0.Args[2] 43838 or := v.Args[1] 43839 if or.Op != OpAMD64ORQ { 43840 break 43841 } 43842 _ = or.Args[1] 43843 s1 := or.Args[0] 43844 if s1.Op != OpAMD64SHLQconst { 43845 break 43846 } 43847 j1 := s1.AuxInt 43848 x1 := s1.Args[0] 43849 if x1.Op != OpAMD64MOVBloadidx1 { 43850 break 43851 } 43852 i1 := x1.AuxInt 43853 if x1.Aux != s { 43854 break 43855 } 43856 _ = x1.Args[2] 43857 if idx != x1.Args[0] { 43858 break 43859 } 43860 if p != x1.Args[1] { 43861 break 43862 } 43863 if mem != x1.Args[2] { 43864 break 43865 } 43866 y := or.Args[1] 43867 if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 43868 break 43869 } 43870 b = mergePoint(b, x0, x1) 43871 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 43872 v.reset(OpCopy) 43873 v.AddArg(v0) 43874 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 43875 v1.AuxInt = j1 43876 v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, typ.UInt16) 43877 v2.AuxInt = 8 43878 v3 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 43879 v3.AuxInt = i0 43880 v3.Aux = s 43881 v3.AddArg(p) 43882 v3.AddArg(idx) 43883 v3.AddArg(mem) 43884 v2.AddArg(v3) 43885 v1.AddArg(v2) 43886 v0.AddArg(v1) 43887 v0.AddArg(y) 43888 return true 43889 } 43890 // match: (ORQ s0:(SHLQconst [j0] x0:(MOVBloadidx1 [i0] {s} p idx mem)) or:(ORQ y s1:(SHLQconst [j1] x1:(MOVBloadidx1 [i1] {s} p idx mem)))) 43891 // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 43892 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (ROLWconst <typ.UInt16> [8] (MOVWloadidx1 [i0] {s} p idx mem))) y) 43893 for { 43894 _ = v.Args[1] 43895 s0 := v.Args[0] 43896 if s0.Op != OpAMD64SHLQconst { 43897 break 43898 } 43899 j0 := s0.AuxInt 43900 x0 := s0.Args[0] 43901 if x0.Op != OpAMD64MOVBloadidx1 { 43902 break 43903 } 43904 i0 := x0.AuxInt 43905 s := x0.Aux 43906 _ = x0.Args[2] 43907 p := x0.Args[0] 43908 idx := x0.Args[1] 43909 mem := x0.Args[2] 43910 or := v.Args[1] 43911 if or.Op != OpAMD64ORQ { 43912 break 43913 } 43914 _ = or.Args[1] 43915 y := or.Args[0] 43916 s1 := or.Args[1] 43917 if s1.Op != OpAMD64SHLQconst { 43918 break 43919 } 43920 j1 := s1.AuxInt 43921 x1 := s1.Args[0] 43922 if x1.Op != OpAMD64MOVBloadidx1 { 43923 break 43924 } 43925 i1 := x1.AuxInt 43926 if x1.Aux != s { 43927 break 43928 } 43929 _ = x1.Args[2] 43930 if p != x1.Args[0] { 43931 break 43932 } 43933 if idx != x1.Args[1] { 43934 break 43935 } 43936 if mem != x1.Args[2] { 43937 break 43938 } 43939 if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 43940 break 43941 } 43942 b = mergePoint(b, x0, x1) 43943 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 43944 v.reset(OpCopy) 43945 v.AddArg(v0) 43946 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 43947 v1.AuxInt = j1 43948 v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, typ.UInt16) 43949 v2.AuxInt = 8 43950 v3 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 43951 v3.AuxInt = i0 43952 v3.Aux = s 43953 v3.AddArg(p) 43954 v3.AddArg(idx) 43955 v3.AddArg(mem) 43956 v2.AddArg(v3) 43957 v1.AddArg(v2) 43958 v0.AddArg(v1) 43959 v0.AddArg(y) 43960 return true 43961 } 43962 // match: (ORQ s0:(SHLQconst [j0] x0:(MOVBloadidx1 [i0] {s} idx p mem)) or:(ORQ y s1:(SHLQconst [j1] x1:(MOVBloadidx1 [i1] {s} p idx mem)))) 43963 // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 43964 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (ROLWconst <typ.UInt16> [8] (MOVWloadidx1 [i0] {s} p idx mem))) y) 43965 for { 43966 _ = v.Args[1] 43967 s0 := v.Args[0] 43968 if s0.Op != OpAMD64SHLQconst { 43969 break 43970 } 43971 j0 := s0.AuxInt 43972 x0 := s0.Args[0] 43973 if x0.Op != OpAMD64MOVBloadidx1 { 43974 break 43975 } 43976 i0 := x0.AuxInt 43977 s := x0.Aux 43978 _ = x0.Args[2] 43979 idx := x0.Args[0] 43980 p := x0.Args[1] 43981 mem := x0.Args[2] 43982 or := v.Args[1] 43983 if or.Op != OpAMD64ORQ { 43984 break 43985 } 43986 _ = or.Args[1] 43987 y := or.Args[0] 43988 s1 := or.Args[1] 43989 if s1.Op != OpAMD64SHLQconst { 43990 break 43991 } 43992 j1 := s1.AuxInt 43993 x1 := s1.Args[0] 43994 if x1.Op != OpAMD64MOVBloadidx1 { 43995 break 43996 } 43997 i1 := x1.AuxInt 43998 if x1.Aux != s { 43999 break 44000 } 44001 _ = x1.Args[2] 44002 if p != x1.Args[0] { 44003 break 44004 } 44005 if idx != x1.Args[1] { 44006 break 44007 } 44008 if mem != x1.Args[2] { 44009 break 44010 } 44011 if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 44012 break 44013 } 44014 b = mergePoint(b, x0, x1) 44015 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 44016 v.reset(OpCopy) 44017 v.AddArg(v0) 44018 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 44019 v1.AuxInt = j1 44020 v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, typ.UInt16) 44021 v2.AuxInt = 8 44022 v3 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 44023 v3.AuxInt = i0 44024 v3.Aux = s 44025 v3.AddArg(p) 44026 v3.AddArg(idx) 44027 v3.AddArg(mem) 44028 v2.AddArg(v3) 44029 v1.AddArg(v2) 44030 v0.AddArg(v1) 44031 v0.AddArg(y) 44032 return true 44033 } 44034 // match: (ORQ s0:(SHLQconst [j0] x0:(MOVBloadidx1 [i0] {s} p idx mem)) or:(ORQ y s1:(SHLQconst [j1] x1:(MOVBloadidx1 [i1] {s} idx p mem)))) 44035 // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 44036 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (ROLWconst <typ.UInt16> [8] (MOVWloadidx1 [i0] {s} p idx mem))) y) 44037 for { 44038 _ = v.Args[1] 44039 s0 := v.Args[0] 44040 if s0.Op != OpAMD64SHLQconst { 44041 break 44042 } 44043 j0 := s0.AuxInt 44044 x0 := s0.Args[0] 44045 if x0.Op != OpAMD64MOVBloadidx1 { 44046 break 44047 } 44048 i0 := x0.AuxInt 44049 s := x0.Aux 44050 _ = x0.Args[2] 44051 p := x0.Args[0] 44052 idx := x0.Args[1] 44053 mem := x0.Args[2] 44054 or := v.Args[1] 44055 if or.Op != OpAMD64ORQ { 44056 break 44057 } 44058 _ = or.Args[1] 44059 y := or.Args[0] 44060 s1 := or.Args[1] 44061 if s1.Op != OpAMD64SHLQconst { 44062 break 44063 } 44064 j1 := s1.AuxInt 44065 x1 := s1.Args[0] 44066 if x1.Op != OpAMD64MOVBloadidx1 { 44067 break 44068 } 44069 i1 := x1.AuxInt 44070 if x1.Aux != s { 44071 break 44072 } 44073 _ = x1.Args[2] 44074 if idx != x1.Args[0] { 44075 break 44076 } 44077 if p != x1.Args[1] { 44078 break 44079 } 44080 if mem != x1.Args[2] { 44081 break 44082 } 44083 if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 44084 break 44085 } 44086 b = mergePoint(b, x0, x1) 44087 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 44088 v.reset(OpCopy) 44089 v.AddArg(v0) 44090 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 44091 v1.AuxInt = j1 44092 v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, typ.UInt16) 44093 v2.AuxInt = 8 44094 v3 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 44095 v3.AuxInt = i0 44096 v3.Aux = s 44097 v3.AddArg(p) 44098 v3.AddArg(idx) 44099 v3.AddArg(mem) 44100 v2.AddArg(v3) 44101 v1.AddArg(v2) 44102 v0.AddArg(v1) 44103 v0.AddArg(y) 44104 return true 44105 } 44106 return false 44107 } 44108 func rewriteValueAMD64_OpAMD64ORQ_140(v *Value) bool { 44109 b := v.Block 44110 _ = b 44111 typ := &b.Func.Config.Types 44112 _ = typ 44113 // match: (ORQ s0:(SHLQconst [j0] x0:(MOVBloadidx1 [i0] {s} idx p mem)) or:(ORQ y s1:(SHLQconst [j1] x1:(MOVBloadidx1 [i1] {s} idx p mem)))) 44114 // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 44115 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (ROLWconst <typ.UInt16> [8] (MOVWloadidx1 [i0] {s} p idx mem))) y) 44116 for { 44117 _ = v.Args[1] 44118 s0 := v.Args[0] 44119 if s0.Op != OpAMD64SHLQconst { 44120 break 44121 } 44122 j0 := s0.AuxInt 44123 x0 := s0.Args[0] 44124 if x0.Op != OpAMD64MOVBloadidx1 { 44125 break 44126 } 44127 i0 := x0.AuxInt 44128 s := x0.Aux 44129 _ = x0.Args[2] 44130 idx := x0.Args[0] 44131 p := x0.Args[1] 44132 mem := x0.Args[2] 44133 or := v.Args[1] 44134 if or.Op != OpAMD64ORQ { 44135 break 44136 } 44137 _ = or.Args[1] 44138 y := or.Args[0] 44139 s1 := or.Args[1] 44140 if s1.Op != OpAMD64SHLQconst { 44141 break 44142 } 44143 j1 := s1.AuxInt 44144 x1 := s1.Args[0] 44145 if x1.Op != OpAMD64MOVBloadidx1 { 44146 break 44147 } 44148 i1 := x1.AuxInt 44149 if x1.Aux != s { 44150 break 44151 } 44152 _ = x1.Args[2] 44153 if idx != x1.Args[0] { 44154 break 44155 } 44156 if p != x1.Args[1] { 44157 break 44158 } 44159 if mem != x1.Args[2] { 44160 break 44161 } 44162 if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 44163 break 44164 } 44165 b = mergePoint(b, x0, x1) 44166 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 44167 v.reset(OpCopy) 44168 v.AddArg(v0) 44169 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 44170 v1.AuxInt = j1 44171 v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, typ.UInt16) 44172 v2.AuxInt = 8 44173 v3 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 44174 v3.AuxInt = i0 44175 v3.Aux = s 44176 v3.AddArg(p) 44177 v3.AddArg(idx) 44178 v3.AddArg(mem) 44179 v2.AddArg(v3) 44180 v1.AddArg(v2) 44181 v0.AddArg(v1) 44182 v0.AddArg(y) 44183 return true 44184 } 44185 // match: (ORQ or:(ORQ s1:(SHLQconst [j1] x1:(MOVBloadidx1 [i1] {s} p idx mem)) y) s0:(SHLQconst [j0] x0:(MOVBloadidx1 [i0] {s} p idx mem))) 44186 // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 44187 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (ROLWconst <typ.UInt16> [8] (MOVWloadidx1 [i0] {s} p idx mem))) y) 44188 for { 44189 _ = v.Args[1] 44190 or := v.Args[0] 44191 if or.Op != OpAMD64ORQ { 44192 break 44193 } 44194 _ = or.Args[1] 44195 s1 := or.Args[0] 44196 if s1.Op != OpAMD64SHLQconst { 44197 break 44198 } 44199 j1 := s1.AuxInt 44200 x1 := s1.Args[0] 44201 if x1.Op != OpAMD64MOVBloadidx1 { 44202 break 44203 } 44204 i1 := x1.AuxInt 44205 s := x1.Aux 44206 _ = x1.Args[2] 44207 p := x1.Args[0] 44208 idx := x1.Args[1] 44209 mem := x1.Args[2] 44210 y := or.Args[1] 44211 s0 := v.Args[1] 44212 if s0.Op != OpAMD64SHLQconst { 44213 break 44214 } 44215 j0 := s0.AuxInt 44216 x0 := s0.Args[0] 44217 if x0.Op != OpAMD64MOVBloadidx1 { 44218 break 44219 } 44220 i0 := x0.AuxInt 44221 if x0.Aux != s { 44222 break 44223 } 44224 _ = x0.Args[2] 44225 if p != x0.Args[0] { 44226 break 44227 } 44228 if idx != x0.Args[1] { 44229 break 44230 } 44231 if mem != x0.Args[2] { 44232 break 44233 } 44234 if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 44235 break 44236 } 44237 b = mergePoint(b, x0, x1) 44238 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 44239 v.reset(OpCopy) 44240 v.AddArg(v0) 44241 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 44242 v1.AuxInt = j1 44243 v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, typ.UInt16) 44244 v2.AuxInt = 8 44245 v3 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 44246 v3.AuxInt = i0 44247 v3.Aux = s 44248 v3.AddArg(p) 44249 v3.AddArg(idx) 44250 v3.AddArg(mem) 44251 v2.AddArg(v3) 44252 v1.AddArg(v2) 44253 v0.AddArg(v1) 44254 v0.AddArg(y) 44255 return true 44256 } 44257 // match: (ORQ or:(ORQ s1:(SHLQconst [j1] x1:(MOVBloadidx1 [i1] {s} idx p mem)) y) s0:(SHLQconst [j0] x0:(MOVBloadidx1 [i0] {s} p idx mem))) 44258 // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 44259 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (ROLWconst <typ.UInt16> [8] (MOVWloadidx1 [i0] {s} p idx mem))) y) 44260 for { 44261 _ = v.Args[1] 44262 or := v.Args[0] 44263 if or.Op != OpAMD64ORQ { 44264 break 44265 } 44266 _ = or.Args[1] 44267 s1 := or.Args[0] 44268 if s1.Op != OpAMD64SHLQconst { 44269 break 44270 } 44271 j1 := s1.AuxInt 44272 x1 := s1.Args[0] 44273 if x1.Op != OpAMD64MOVBloadidx1 { 44274 break 44275 } 44276 i1 := x1.AuxInt 44277 s := x1.Aux 44278 _ = x1.Args[2] 44279 idx := x1.Args[0] 44280 p := x1.Args[1] 44281 mem := x1.Args[2] 44282 y := or.Args[1] 44283 s0 := v.Args[1] 44284 if s0.Op != OpAMD64SHLQconst { 44285 break 44286 } 44287 j0 := s0.AuxInt 44288 x0 := s0.Args[0] 44289 if x0.Op != OpAMD64MOVBloadidx1 { 44290 break 44291 } 44292 i0 := x0.AuxInt 44293 if x0.Aux != s { 44294 break 44295 } 44296 _ = x0.Args[2] 44297 if p != x0.Args[0] { 44298 break 44299 } 44300 if idx != x0.Args[1] { 44301 break 44302 } 44303 if mem != x0.Args[2] { 44304 break 44305 } 44306 if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 44307 break 44308 } 44309 b = mergePoint(b, x0, x1) 44310 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 44311 v.reset(OpCopy) 44312 v.AddArg(v0) 44313 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 44314 v1.AuxInt = j1 44315 v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, typ.UInt16) 44316 v2.AuxInt = 8 44317 v3 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 44318 v3.AuxInt = i0 44319 v3.Aux = s 44320 v3.AddArg(p) 44321 v3.AddArg(idx) 44322 v3.AddArg(mem) 44323 v2.AddArg(v3) 44324 v1.AddArg(v2) 44325 v0.AddArg(v1) 44326 v0.AddArg(y) 44327 return true 44328 } 44329 // match: (ORQ or:(ORQ y s1:(SHLQconst [j1] x1:(MOVBloadidx1 [i1] {s} p idx mem))) s0:(SHLQconst [j0] x0:(MOVBloadidx1 [i0] {s} p idx mem))) 44330 // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 44331 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (ROLWconst <typ.UInt16> [8] (MOVWloadidx1 [i0] {s} p idx mem))) y) 44332 for { 44333 _ = v.Args[1] 44334 or := v.Args[0] 44335 if or.Op != OpAMD64ORQ { 44336 break 44337 } 44338 _ = or.Args[1] 44339 y := or.Args[0] 44340 s1 := or.Args[1] 44341 if s1.Op != OpAMD64SHLQconst { 44342 break 44343 } 44344 j1 := s1.AuxInt 44345 x1 := s1.Args[0] 44346 if x1.Op != OpAMD64MOVBloadidx1 { 44347 break 44348 } 44349 i1 := x1.AuxInt 44350 s := x1.Aux 44351 _ = x1.Args[2] 44352 p := x1.Args[0] 44353 idx := x1.Args[1] 44354 mem := x1.Args[2] 44355 s0 := v.Args[1] 44356 if s0.Op != OpAMD64SHLQconst { 44357 break 44358 } 44359 j0 := s0.AuxInt 44360 x0 := s0.Args[0] 44361 if x0.Op != OpAMD64MOVBloadidx1 { 44362 break 44363 } 44364 i0 := x0.AuxInt 44365 if x0.Aux != s { 44366 break 44367 } 44368 _ = x0.Args[2] 44369 if p != x0.Args[0] { 44370 break 44371 } 44372 if idx != x0.Args[1] { 44373 break 44374 } 44375 if mem != x0.Args[2] { 44376 break 44377 } 44378 if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 44379 break 44380 } 44381 b = mergePoint(b, x0, x1) 44382 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 44383 v.reset(OpCopy) 44384 v.AddArg(v0) 44385 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 44386 v1.AuxInt = j1 44387 v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, typ.UInt16) 44388 v2.AuxInt = 8 44389 v3 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 44390 v3.AuxInt = i0 44391 v3.Aux = s 44392 v3.AddArg(p) 44393 v3.AddArg(idx) 44394 v3.AddArg(mem) 44395 v2.AddArg(v3) 44396 v1.AddArg(v2) 44397 v0.AddArg(v1) 44398 v0.AddArg(y) 44399 return true 44400 } 44401 // match: (ORQ or:(ORQ y s1:(SHLQconst [j1] x1:(MOVBloadidx1 [i1] {s} idx p mem))) s0:(SHLQconst [j0] x0:(MOVBloadidx1 [i0] {s} p idx mem))) 44402 // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 44403 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (ROLWconst <typ.UInt16> [8] (MOVWloadidx1 [i0] {s} p idx mem))) y) 44404 for { 44405 _ = v.Args[1] 44406 or := v.Args[0] 44407 if or.Op != OpAMD64ORQ { 44408 break 44409 } 44410 _ = or.Args[1] 44411 y := or.Args[0] 44412 s1 := or.Args[1] 44413 if s1.Op != OpAMD64SHLQconst { 44414 break 44415 } 44416 j1 := s1.AuxInt 44417 x1 := s1.Args[0] 44418 if x1.Op != OpAMD64MOVBloadidx1 { 44419 break 44420 } 44421 i1 := x1.AuxInt 44422 s := x1.Aux 44423 _ = x1.Args[2] 44424 idx := x1.Args[0] 44425 p := x1.Args[1] 44426 mem := x1.Args[2] 44427 s0 := v.Args[1] 44428 if s0.Op != OpAMD64SHLQconst { 44429 break 44430 } 44431 j0 := s0.AuxInt 44432 x0 := s0.Args[0] 44433 if x0.Op != OpAMD64MOVBloadidx1 { 44434 break 44435 } 44436 i0 := x0.AuxInt 44437 if x0.Aux != s { 44438 break 44439 } 44440 _ = x0.Args[2] 44441 if p != x0.Args[0] { 44442 break 44443 } 44444 if idx != x0.Args[1] { 44445 break 44446 } 44447 if mem != x0.Args[2] { 44448 break 44449 } 44450 if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 44451 break 44452 } 44453 b = mergePoint(b, x0, x1) 44454 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 44455 v.reset(OpCopy) 44456 v.AddArg(v0) 44457 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 44458 v1.AuxInt = j1 44459 v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, typ.UInt16) 44460 v2.AuxInt = 8 44461 v3 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 44462 v3.AuxInt = i0 44463 v3.Aux = s 44464 v3.AddArg(p) 44465 v3.AddArg(idx) 44466 v3.AddArg(mem) 44467 v2.AddArg(v3) 44468 v1.AddArg(v2) 44469 v0.AddArg(v1) 44470 v0.AddArg(y) 44471 return true 44472 } 44473 // match: (ORQ or:(ORQ s1:(SHLQconst [j1] x1:(MOVBloadidx1 [i1] {s} p idx mem)) y) s0:(SHLQconst [j0] x0:(MOVBloadidx1 [i0] {s} idx p mem))) 44474 // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 44475 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (ROLWconst <typ.UInt16> [8] (MOVWloadidx1 [i0] {s} p idx mem))) y) 44476 for { 44477 _ = v.Args[1] 44478 or := v.Args[0] 44479 if or.Op != OpAMD64ORQ { 44480 break 44481 } 44482 _ = or.Args[1] 44483 s1 := or.Args[0] 44484 if s1.Op != OpAMD64SHLQconst { 44485 break 44486 } 44487 j1 := s1.AuxInt 44488 x1 := s1.Args[0] 44489 if x1.Op != OpAMD64MOVBloadidx1 { 44490 break 44491 } 44492 i1 := x1.AuxInt 44493 s := x1.Aux 44494 _ = x1.Args[2] 44495 p := x1.Args[0] 44496 idx := x1.Args[1] 44497 mem := x1.Args[2] 44498 y := or.Args[1] 44499 s0 := v.Args[1] 44500 if s0.Op != OpAMD64SHLQconst { 44501 break 44502 } 44503 j0 := s0.AuxInt 44504 x0 := s0.Args[0] 44505 if x0.Op != OpAMD64MOVBloadidx1 { 44506 break 44507 } 44508 i0 := x0.AuxInt 44509 if x0.Aux != s { 44510 break 44511 } 44512 _ = x0.Args[2] 44513 if idx != x0.Args[0] { 44514 break 44515 } 44516 if p != x0.Args[1] { 44517 break 44518 } 44519 if mem != x0.Args[2] { 44520 break 44521 } 44522 if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 44523 break 44524 } 44525 b = mergePoint(b, x0, x1) 44526 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 44527 v.reset(OpCopy) 44528 v.AddArg(v0) 44529 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 44530 v1.AuxInt = j1 44531 v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, typ.UInt16) 44532 v2.AuxInt = 8 44533 v3 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 44534 v3.AuxInt = i0 44535 v3.Aux = s 44536 v3.AddArg(p) 44537 v3.AddArg(idx) 44538 v3.AddArg(mem) 44539 v2.AddArg(v3) 44540 v1.AddArg(v2) 44541 v0.AddArg(v1) 44542 v0.AddArg(y) 44543 return true 44544 } 44545 // match: (ORQ or:(ORQ s1:(SHLQconst [j1] x1:(MOVBloadidx1 [i1] {s} idx p mem)) y) s0:(SHLQconst [j0] x0:(MOVBloadidx1 [i0] {s} idx p mem))) 44546 // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 44547 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (ROLWconst <typ.UInt16> [8] (MOVWloadidx1 [i0] {s} p idx mem))) y) 44548 for { 44549 _ = v.Args[1] 44550 or := v.Args[0] 44551 if or.Op != OpAMD64ORQ { 44552 break 44553 } 44554 _ = or.Args[1] 44555 s1 := or.Args[0] 44556 if s1.Op != OpAMD64SHLQconst { 44557 break 44558 } 44559 j1 := s1.AuxInt 44560 x1 := s1.Args[0] 44561 if x1.Op != OpAMD64MOVBloadidx1 { 44562 break 44563 } 44564 i1 := x1.AuxInt 44565 s := x1.Aux 44566 _ = x1.Args[2] 44567 idx := x1.Args[0] 44568 p := x1.Args[1] 44569 mem := x1.Args[2] 44570 y := or.Args[1] 44571 s0 := v.Args[1] 44572 if s0.Op != OpAMD64SHLQconst { 44573 break 44574 } 44575 j0 := s0.AuxInt 44576 x0 := s0.Args[0] 44577 if x0.Op != OpAMD64MOVBloadidx1 { 44578 break 44579 } 44580 i0 := x0.AuxInt 44581 if x0.Aux != s { 44582 break 44583 } 44584 _ = x0.Args[2] 44585 if idx != x0.Args[0] { 44586 break 44587 } 44588 if p != x0.Args[1] { 44589 break 44590 } 44591 if mem != x0.Args[2] { 44592 break 44593 } 44594 if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 44595 break 44596 } 44597 b = mergePoint(b, x0, x1) 44598 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 44599 v.reset(OpCopy) 44600 v.AddArg(v0) 44601 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 44602 v1.AuxInt = j1 44603 v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, typ.UInt16) 44604 v2.AuxInt = 8 44605 v3 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 44606 v3.AuxInt = i0 44607 v3.Aux = s 44608 v3.AddArg(p) 44609 v3.AddArg(idx) 44610 v3.AddArg(mem) 44611 v2.AddArg(v3) 44612 v1.AddArg(v2) 44613 v0.AddArg(v1) 44614 v0.AddArg(y) 44615 return true 44616 } 44617 // match: (ORQ or:(ORQ y s1:(SHLQconst [j1] x1:(MOVBloadidx1 [i1] {s} p idx mem))) s0:(SHLQconst [j0] x0:(MOVBloadidx1 [i0] {s} idx p mem))) 44618 // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 44619 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (ROLWconst <typ.UInt16> [8] (MOVWloadidx1 [i0] {s} p idx mem))) y) 44620 for { 44621 _ = v.Args[1] 44622 or := v.Args[0] 44623 if or.Op != OpAMD64ORQ { 44624 break 44625 } 44626 _ = or.Args[1] 44627 y := or.Args[0] 44628 s1 := or.Args[1] 44629 if s1.Op != OpAMD64SHLQconst { 44630 break 44631 } 44632 j1 := s1.AuxInt 44633 x1 := s1.Args[0] 44634 if x1.Op != OpAMD64MOVBloadidx1 { 44635 break 44636 } 44637 i1 := x1.AuxInt 44638 s := x1.Aux 44639 _ = x1.Args[2] 44640 p := x1.Args[0] 44641 idx := x1.Args[1] 44642 mem := x1.Args[2] 44643 s0 := v.Args[1] 44644 if s0.Op != OpAMD64SHLQconst { 44645 break 44646 } 44647 j0 := s0.AuxInt 44648 x0 := s0.Args[0] 44649 if x0.Op != OpAMD64MOVBloadidx1 { 44650 break 44651 } 44652 i0 := x0.AuxInt 44653 if x0.Aux != s { 44654 break 44655 } 44656 _ = x0.Args[2] 44657 if idx != x0.Args[0] { 44658 break 44659 } 44660 if p != x0.Args[1] { 44661 break 44662 } 44663 if mem != x0.Args[2] { 44664 break 44665 } 44666 if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 44667 break 44668 } 44669 b = mergePoint(b, x0, x1) 44670 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 44671 v.reset(OpCopy) 44672 v.AddArg(v0) 44673 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 44674 v1.AuxInt = j1 44675 v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, typ.UInt16) 44676 v2.AuxInt = 8 44677 v3 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 44678 v3.AuxInt = i0 44679 v3.Aux = s 44680 v3.AddArg(p) 44681 v3.AddArg(idx) 44682 v3.AddArg(mem) 44683 v2.AddArg(v3) 44684 v1.AddArg(v2) 44685 v0.AddArg(v1) 44686 v0.AddArg(y) 44687 return true 44688 } 44689 // match: (ORQ or:(ORQ y s1:(SHLQconst [j1] x1:(MOVBloadidx1 [i1] {s} idx p mem))) s0:(SHLQconst [j0] x0:(MOVBloadidx1 [i0] {s} idx p mem))) 44690 // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 44691 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (ROLWconst <typ.UInt16> [8] (MOVWloadidx1 [i0] {s} p idx mem))) y) 44692 for { 44693 _ = v.Args[1] 44694 or := v.Args[0] 44695 if or.Op != OpAMD64ORQ { 44696 break 44697 } 44698 _ = or.Args[1] 44699 y := or.Args[0] 44700 s1 := or.Args[1] 44701 if s1.Op != OpAMD64SHLQconst { 44702 break 44703 } 44704 j1 := s1.AuxInt 44705 x1 := s1.Args[0] 44706 if x1.Op != OpAMD64MOVBloadidx1 { 44707 break 44708 } 44709 i1 := x1.AuxInt 44710 s := x1.Aux 44711 _ = x1.Args[2] 44712 idx := x1.Args[0] 44713 p := x1.Args[1] 44714 mem := x1.Args[2] 44715 s0 := v.Args[1] 44716 if s0.Op != OpAMD64SHLQconst { 44717 break 44718 } 44719 j0 := s0.AuxInt 44720 x0 := s0.Args[0] 44721 if x0.Op != OpAMD64MOVBloadidx1 { 44722 break 44723 } 44724 i0 := x0.AuxInt 44725 if x0.Aux != s { 44726 break 44727 } 44728 _ = x0.Args[2] 44729 if idx != x0.Args[0] { 44730 break 44731 } 44732 if p != x0.Args[1] { 44733 break 44734 } 44735 if mem != x0.Args[2] { 44736 break 44737 } 44738 if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 44739 break 44740 } 44741 b = mergePoint(b, x0, x1) 44742 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 44743 v.reset(OpCopy) 44744 v.AddArg(v0) 44745 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 44746 v1.AuxInt = j1 44747 v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, typ.UInt16) 44748 v2.AuxInt = 8 44749 v3 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 44750 v3.AuxInt = i0 44751 v3.Aux = s 44752 v3.AddArg(p) 44753 v3.AddArg(idx) 44754 v3.AddArg(mem) 44755 v2.AddArg(v3) 44756 v1.AddArg(v2) 44757 v0.AddArg(v1) 44758 v0.AddArg(y) 44759 return true 44760 } 44761 // match: (ORQ s0:(SHLQconst [j0] r0:(ROLWconst [8] x0:(MOVWloadidx1 [i0] {s} p idx mem))) or:(ORQ s1:(SHLQconst [j1] r1:(ROLWconst [8] x1:(MOVWloadidx1 [i1] {s} p idx mem))) y)) 44762 // cond: i1 == i0+2 && j1 == j0-16 && j1 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or) 44763 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (BSWAPL <typ.UInt32> (MOVLloadidx1 [i0] {s} p idx mem))) y) 44764 for { 44765 _ = v.Args[1] 44766 s0 := v.Args[0] 44767 if s0.Op != OpAMD64SHLQconst { 44768 break 44769 } 44770 j0 := s0.AuxInt 44771 r0 := s0.Args[0] 44772 if r0.Op != OpAMD64ROLWconst { 44773 break 44774 } 44775 if r0.AuxInt != 8 { 44776 break 44777 } 44778 x0 := r0.Args[0] 44779 if x0.Op != OpAMD64MOVWloadidx1 { 44780 break 44781 } 44782 i0 := x0.AuxInt 44783 s := x0.Aux 44784 _ = x0.Args[2] 44785 p := x0.Args[0] 44786 idx := x0.Args[1] 44787 mem := x0.Args[2] 44788 or := v.Args[1] 44789 if or.Op != OpAMD64ORQ { 44790 break 44791 } 44792 _ = or.Args[1] 44793 s1 := or.Args[0] 44794 if s1.Op != OpAMD64SHLQconst { 44795 break 44796 } 44797 j1 := s1.AuxInt 44798 r1 := s1.Args[0] 44799 if r1.Op != OpAMD64ROLWconst { 44800 break 44801 } 44802 if r1.AuxInt != 8 { 44803 break 44804 } 44805 x1 := r1.Args[0] 44806 if x1.Op != OpAMD64MOVWloadidx1 { 44807 break 44808 } 44809 i1 := x1.AuxInt 44810 if x1.Aux != s { 44811 break 44812 } 44813 _ = x1.Args[2] 44814 if p != x1.Args[0] { 44815 break 44816 } 44817 if idx != x1.Args[1] { 44818 break 44819 } 44820 if mem != x1.Args[2] { 44821 break 44822 } 44823 y := or.Args[1] 44824 if !(i1 == i0+2 && j1 == j0-16 && j1%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or)) { 44825 break 44826 } 44827 b = mergePoint(b, x0, x1) 44828 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 44829 v.reset(OpCopy) 44830 v.AddArg(v0) 44831 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 44832 v1.AuxInt = j1 44833 v2 := b.NewValue0(v.Pos, OpAMD64BSWAPL, typ.UInt32) 44834 v3 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 44835 v3.AuxInt = i0 44836 v3.Aux = s 44837 v3.AddArg(p) 44838 v3.AddArg(idx) 44839 v3.AddArg(mem) 44840 v2.AddArg(v3) 44841 v1.AddArg(v2) 44842 v0.AddArg(v1) 44843 v0.AddArg(y) 44844 return true 44845 } 44846 return false 44847 } 44848 func rewriteValueAMD64_OpAMD64ORQ_150(v *Value) bool { 44849 b := v.Block 44850 _ = b 44851 typ := &b.Func.Config.Types 44852 _ = typ 44853 // match: (ORQ s0:(SHLQconst [j0] r0:(ROLWconst [8] x0:(MOVWloadidx1 [i0] {s} idx p mem))) or:(ORQ s1:(SHLQconst [j1] r1:(ROLWconst [8] x1:(MOVWloadidx1 [i1] {s} p idx mem))) y)) 44854 // cond: i1 == i0+2 && j1 == j0-16 && j1 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or) 44855 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (BSWAPL <typ.UInt32> (MOVLloadidx1 [i0] {s} p idx mem))) y) 44856 for { 44857 _ = v.Args[1] 44858 s0 := v.Args[0] 44859 if s0.Op != OpAMD64SHLQconst { 44860 break 44861 } 44862 j0 := s0.AuxInt 44863 r0 := s0.Args[0] 44864 if r0.Op != OpAMD64ROLWconst { 44865 break 44866 } 44867 if r0.AuxInt != 8 { 44868 break 44869 } 44870 x0 := r0.Args[0] 44871 if x0.Op != OpAMD64MOVWloadidx1 { 44872 break 44873 } 44874 i0 := x0.AuxInt 44875 s := x0.Aux 44876 _ = x0.Args[2] 44877 idx := x0.Args[0] 44878 p := x0.Args[1] 44879 mem := x0.Args[2] 44880 or := v.Args[1] 44881 if or.Op != OpAMD64ORQ { 44882 break 44883 } 44884 _ = or.Args[1] 44885 s1 := or.Args[0] 44886 if s1.Op != OpAMD64SHLQconst { 44887 break 44888 } 44889 j1 := s1.AuxInt 44890 r1 := s1.Args[0] 44891 if r1.Op != OpAMD64ROLWconst { 44892 break 44893 } 44894 if r1.AuxInt != 8 { 44895 break 44896 } 44897 x1 := r1.Args[0] 44898 if x1.Op != OpAMD64MOVWloadidx1 { 44899 break 44900 } 44901 i1 := x1.AuxInt 44902 if x1.Aux != s { 44903 break 44904 } 44905 _ = x1.Args[2] 44906 if p != x1.Args[0] { 44907 break 44908 } 44909 if idx != x1.Args[1] { 44910 break 44911 } 44912 if mem != x1.Args[2] { 44913 break 44914 } 44915 y := or.Args[1] 44916 if !(i1 == i0+2 && j1 == j0-16 && j1%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or)) { 44917 break 44918 } 44919 b = mergePoint(b, x0, x1) 44920 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 44921 v.reset(OpCopy) 44922 v.AddArg(v0) 44923 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 44924 v1.AuxInt = j1 44925 v2 := b.NewValue0(v.Pos, OpAMD64BSWAPL, typ.UInt32) 44926 v3 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 44927 v3.AuxInt = i0 44928 v3.Aux = s 44929 v3.AddArg(p) 44930 v3.AddArg(idx) 44931 v3.AddArg(mem) 44932 v2.AddArg(v3) 44933 v1.AddArg(v2) 44934 v0.AddArg(v1) 44935 v0.AddArg(y) 44936 return true 44937 } 44938 // match: (ORQ s0:(SHLQconst [j0] r0:(ROLWconst [8] x0:(MOVWloadidx1 [i0] {s} p idx mem))) or:(ORQ s1:(SHLQconst [j1] r1:(ROLWconst [8] x1:(MOVWloadidx1 [i1] {s} idx p mem))) y)) 44939 // cond: i1 == i0+2 && j1 == j0-16 && j1 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or) 44940 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (BSWAPL <typ.UInt32> (MOVLloadidx1 [i0] {s} p idx mem))) y) 44941 for { 44942 _ = v.Args[1] 44943 s0 := v.Args[0] 44944 if s0.Op != OpAMD64SHLQconst { 44945 break 44946 } 44947 j0 := s0.AuxInt 44948 r0 := s0.Args[0] 44949 if r0.Op != OpAMD64ROLWconst { 44950 break 44951 } 44952 if r0.AuxInt != 8 { 44953 break 44954 } 44955 x0 := r0.Args[0] 44956 if x0.Op != OpAMD64MOVWloadidx1 { 44957 break 44958 } 44959 i0 := x0.AuxInt 44960 s := x0.Aux 44961 _ = x0.Args[2] 44962 p := x0.Args[0] 44963 idx := x0.Args[1] 44964 mem := x0.Args[2] 44965 or := v.Args[1] 44966 if or.Op != OpAMD64ORQ { 44967 break 44968 } 44969 _ = or.Args[1] 44970 s1 := or.Args[0] 44971 if s1.Op != OpAMD64SHLQconst { 44972 break 44973 } 44974 j1 := s1.AuxInt 44975 r1 := s1.Args[0] 44976 if r1.Op != OpAMD64ROLWconst { 44977 break 44978 } 44979 if r1.AuxInt != 8 { 44980 break 44981 } 44982 x1 := r1.Args[0] 44983 if x1.Op != OpAMD64MOVWloadidx1 { 44984 break 44985 } 44986 i1 := x1.AuxInt 44987 if x1.Aux != s { 44988 break 44989 } 44990 _ = x1.Args[2] 44991 if idx != x1.Args[0] { 44992 break 44993 } 44994 if p != x1.Args[1] { 44995 break 44996 } 44997 if mem != x1.Args[2] { 44998 break 44999 } 45000 y := or.Args[1] 45001 if !(i1 == i0+2 && j1 == j0-16 && j1%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or)) { 45002 break 45003 } 45004 b = mergePoint(b, x0, x1) 45005 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 45006 v.reset(OpCopy) 45007 v.AddArg(v0) 45008 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 45009 v1.AuxInt = j1 45010 v2 := b.NewValue0(v.Pos, OpAMD64BSWAPL, typ.UInt32) 45011 v3 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 45012 v3.AuxInt = i0 45013 v3.Aux = s 45014 v3.AddArg(p) 45015 v3.AddArg(idx) 45016 v3.AddArg(mem) 45017 v2.AddArg(v3) 45018 v1.AddArg(v2) 45019 v0.AddArg(v1) 45020 v0.AddArg(y) 45021 return true 45022 } 45023 // match: (ORQ s0:(SHLQconst [j0] r0:(ROLWconst [8] x0:(MOVWloadidx1 [i0] {s} idx p mem))) or:(ORQ s1:(SHLQconst [j1] r1:(ROLWconst [8] x1:(MOVWloadidx1 [i1] {s} idx p mem))) y)) 45024 // cond: i1 == i0+2 && j1 == j0-16 && j1 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or) 45025 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (BSWAPL <typ.UInt32> (MOVLloadidx1 [i0] {s} p idx mem))) y) 45026 for { 45027 _ = v.Args[1] 45028 s0 := v.Args[0] 45029 if s0.Op != OpAMD64SHLQconst { 45030 break 45031 } 45032 j0 := s0.AuxInt 45033 r0 := s0.Args[0] 45034 if r0.Op != OpAMD64ROLWconst { 45035 break 45036 } 45037 if r0.AuxInt != 8 { 45038 break 45039 } 45040 x0 := r0.Args[0] 45041 if x0.Op != OpAMD64MOVWloadidx1 { 45042 break 45043 } 45044 i0 := x0.AuxInt 45045 s := x0.Aux 45046 _ = x0.Args[2] 45047 idx := x0.Args[0] 45048 p := x0.Args[1] 45049 mem := x0.Args[2] 45050 or := v.Args[1] 45051 if or.Op != OpAMD64ORQ { 45052 break 45053 } 45054 _ = or.Args[1] 45055 s1 := or.Args[0] 45056 if s1.Op != OpAMD64SHLQconst { 45057 break 45058 } 45059 j1 := s1.AuxInt 45060 r1 := s1.Args[0] 45061 if r1.Op != OpAMD64ROLWconst { 45062 break 45063 } 45064 if r1.AuxInt != 8 { 45065 break 45066 } 45067 x1 := r1.Args[0] 45068 if x1.Op != OpAMD64MOVWloadidx1 { 45069 break 45070 } 45071 i1 := x1.AuxInt 45072 if x1.Aux != s { 45073 break 45074 } 45075 _ = x1.Args[2] 45076 if idx != x1.Args[0] { 45077 break 45078 } 45079 if p != x1.Args[1] { 45080 break 45081 } 45082 if mem != x1.Args[2] { 45083 break 45084 } 45085 y := or.Args[1] 45086 if !(i1 == i0+2 && j1 == j0-16 && j1%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or)) { 45087 break 45088 } 45089 b = mergePoint(b, x0, x1) 45090 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 45091 v.reset(OpCopy) 45092 v.AddArg(v0) 45093 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 45094 v1.AuxInt = j1 45095 v2 := b.NewValue0(v.Pos, OpAMD64BSWAPL, typ.UInt32) 45096 v3 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 45097 v3.AuxInt = i0 45098 v3.Aux = s 45099 v3.AddArg(p) 45100 v3.AddArg(idx) 45101 v3.AddArg(mem) 45102 v2.AddArg(v3) 45103 v1.AddArg(v2) 45104 v0.AddArg(v1) 45105 v0.AddArg(y) 45106 return true 45107 } 45108 // match: (ORQ s0:(SHLQconst [j0] r0:(ROLWconst [8] x0:(MOVWloadidx1 [i0] {s} p idx mem))) or:(ORQ y s1:(SHLQconst [j1] r1:(ROLWconst [8] x1:(MOVWloadidx1 [i1] {s} p idx mem))))) 45109 // cond: i1 == i0+2 && j1 == j0-16 && j1 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or) 45110 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (BSWAPL <typ.UInt32> (MOVLloadidx1 [i0] {s} p idx mem))) y) 45111 for { 45112 _ = v.Args[1] 45113 s0 := v.Args[0] 45114 if s0.Op != OpAMD64SHLQconst { 45115 break 45116 } 45117 j0 := s0.AuxInt 45118 r0 := s0.Args[0] 45119 if r0.Op != OpAMD64ROLWconst { 45120 break 45121 } 45122 if r0.AuxInt != 8 { 45123 break 45124 } 45125 x0 := r0.Args[0] 45126 if x0.Op != OpAMD64MOVWloadidx1 { 45127 break 45128 } 45129 i0 := x0.AuxInt 45130 s := x0.Aux 45131 _ = x0.Args[2] 45132 p := x0.Args[0] 45133 idx := x0.Args[1] 45134 mem := x0.Args[2] 45135 or := v.Args[1] 45136 if or.Op != OpAMD64ORQ { 45137 break 45138 } 45139 _ = or.Args[1] 45140 y := or.Args[0] 45141 s1 := or.Args[1] 45142 if s1.Op != OpAMD64SHLQconst { 45143 break 45144 } 45145 j1 := s1.AuxInt 45146 r1 := s1.Args[0] 45147 if r1.Op != OpAMD64ROLWconst { 45148 break 45149 } 45150 if r1.AuxInt != 8 { 45151 break 45152 } 45153 x1 := r1.Args[0] 45154 if x1.Op != OpAMD64MOVWloadidx1 { 45155 break 45156 } 45157 i1 := x1.AuxInt 45158 if x1.Aux != s { 45159 break 45160 } 45161 _ = x1.Args[2] 45162 if p != x1.Args[0] { 45163 break 45164 } 45165 if idx != x1.Args[1] { 45166 break 45167 } 45168 if mem != x1.Args[2] { 45169 break 45170 } 45171 if !(i1 == i0+2 && j1 == j0-16 && j1%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or)) { 45172 break 45173 } 45174 b = mergePoint(b, x0, x1) 45175 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 45176 v.reset(OpCopy) 45177 v.AddArg(v0) 45178 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 45179 v1.AuxInt = j1 45180 v2 := b.NewValue0(v.Pos, OpAMD64BSWAPL, typ.UInt32) 45181 v3 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 45182 v3.AuxInt = i0 45183 v3.Aux = s 45184 v3.AddArg(p) 45185 v3.AddArg(idx) 45186 v3.AddArg(mem) 45187 v2.AddArg(v3) 45188 v1.AddArg(v2) 45189 v0.AddArg(v1) 45190 v0.AddArg(y) 45191 return true 45192 } 45193 // match: (ORQ s0:(SHLQconst [j0] r0:(ROLWconst [8] x0:(MOVWloadidx1 [i0] {s} idx p mem))) or:(ORQ y s1:(SHLQconst [j1] r1:(ROLWconst [8] x1:(MOVWloadidx1 [i1] {s} p idx mem))))) 45194 // cond: i1 == i0+2 && j1 == j0-16 && j1 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or) 45195 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (BSWAPL <typ.UInt32> (MOVLloadidx1 [i0] {s} p idx mem))) y) 45196 for { 45197 _ = v.Args[1] 45198 s0 := v.Args[0] 45199 if s0.Op != OpAMD64SHLQconst { 45200 break 45201 } 45202 j0 := s0.AuxInt 45203 r0 := s0.Args[0] 45204 if r0.Op != OpAMD64ROLWconst { 45205 break 45206 } 45207 if r0.AuxInt != 8 { 45208 break 45209 } 45210 x0 := r0.Args[0] 45211 if x0.Op != OpAMD64MOVWloadidx1 { 45212 break 45213 } 45214 i0 := x0.AuxInt 45215 s := x0.Aux 45216 _ = x0.Args[2] 45217 idx := x0.Args[0] 45218 p := x0.Args[1] 45219 mem := x0.Args[2] 45220 or := v.Args[1] 45221 if or.Op != OpAMD64ORQ { 45222 break 45223 } 45224 _ = or.Args[1] 45225 y := or.Args[0] 45226 s1 := or.Args[1] 45227 if s1.Op != OpAMD64SHLQconst { 45228 break 45229 } 45230 j1 := s1.AuxInt 45231 r1 := s1.Args[0] 45232 if r1.Op != OpAMD64ROLWconst { 45233 break 45234 } 45235 if r1.AuxInt != 8 { 45236 break 45237 } 45238 x1 := r1.Args[0] 45239 if x1.Op != OpAMD64MOVWloadidx1 { 45240 break 45241 } 45242 i1 := x1.AuxInt 45243 if x1.Aux != s { 45244 break 45245 } 45246 _ = x1.Args[2] 45247 if p != x1.Args[0] { 45248 break 45249 } 45250 if idx != x1.Args[1] { 45251 break 45252 } 45253 if mem != x1.Args[2] { 45254 break 45255 } 45256 if !(i1 == i0+2 && j1 == j0-16 && j1%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or)) { 45257 break 45258 } 45259 b = mergePoint(b, x0, x1) 45260 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 45261 v.reset(OpCopy) 45262 v.AddArg(v0) 45263 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 45264 v1.AuxInt = j1 45265 v2 := b.NewValue0(v.Pos, OpAMD64BSWAPL, typ.UInt32) 45266 v3 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 45267 v3.AuxInt = i0 45268 v3.Aux = s 45269 v3.AddArg(p) 45270 v3.AddArg(idx) 45271 v3.AddArg(mem) 45272 v2.AddArg(v3) 45273 v1.AddArg(v2) 45274 v0.AddArg(v1) 45275 v0.AddArg(y) 45276 return true 45277 } 45278 // match: (ORQ s0:(SHLQconst [j0] r0:(ROLWconst [8] x0:(MOVWloadidx1 [i0] {s} p idx mem))) or:(ORQ y s1:(SHLQconst [j1] r1:(ROLWconst [8] x1:(MOVWloadidx1 [i1] {s} idx p mem))))) 45279 // cond: i1 == i0+2 && j1 == j0-16 && j1 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or) 45280 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (BSWAPL <typ.UInt32> (MOVLloadidx1 [i0] {s} p idx mem))) y) 45281 for { 45282 _ = v.Args[1] 45283 s0 := v.Args[0] 45284 if s0.Op != OpAMD64SHLQconst { 45285 break 45286 } 45287 j0 := s0.AuxInt 45288 r0 := s0.Args[0] 45289 if r0.Op != OpAMD64ROLWconst { 45290 break 45291 } 45292 if r0.AuxInt != 8 { 45293 break 45294 } 45295 x0 := r0.Args[0] 45296 if x0.Op != OpAMD64MOVWloadidx1 { 45297 break 45298 } 45299 i0 := x0.AuxInt 45300 s := x0.Aux 45301 _ = x0.Args[2] 45302 p := x0.Args[0] 45303 idx := x0.Args[1] 45304 mem := x0.Args[2] 45305 or := v.Args[1] 45306 if or.Op != OpAMD64ORQ { 45307 break 45308 } 45309 _ = or.Args[1] 45310 y := or.Args[0] 45311 s1 := or.Args[1] 45312 if s1.Op != OpAMD64SHLQconst { 45313 break 45314 } 45315 j1 := s1.AuxInt 45316 r1 := s1.Args[0] 45317 if r1.Op != OpAMD64ROLWconst { 45318 break 45319 } 45320 if r1.AuxInt != 8 { 45321 break 45322 } 45323 x1 := r1.Args[0] 45324 if x1.Op != OpAMD64MOVWloadidx1 { 45325 break 45326 } 45327 i1 := x1.AuxInt 45328 if x1.Aux != s { 45329 break 45330 } 45331 _ = x1.Args[2] 45332 if idx != x1.Args[0] { 45333 break 45334 } 45335 if p != x1.Args[1] { 45336 break 45337 } 45338 if mem != x1.Args[2] { 45339 break 45340 } 45341 if !(i1 == i0+2 && j1 == j0-16 && j1%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or)) { 45342 break 45343 } 45344 b = mergePoint(b, x0, x1) 45345 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 45346 v.reset(OpCopy) 45347 v.AddArg(v0) 45348 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 45349 v1.AuxInt = j1 45350 v2 := b.NewValue0(v.Pos, OpAMD64BSWAPL, typ.UInt32) 45351 v3 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 45352 v3.AuxInt = i0 45353 v3.Aux = s 45354 v3.AddArg(p) 45355 v3.AddArg(idx) 45356 v3.AddArg(mem) 45357 v2.AddArg(v3) 45358 v1.AddArg(v2) 45359 v0.AddArg(v1) 45360 v0.AddArg(y) 45361 return true 45362 } 45363 // match: (ORQ s0:(SHLQconst [j0] r0:(ROLWconst [8] x0:(MOVWloadidx1 [i0] {s} idx p mem))) or:(ORQ y s1:(SHLQconst [j1] r1:(ROLWconst [8] x1:(MOVWloadidx1 [i1] {s} idx p mem))))) 45364 // cond: i1 == i0+2 && j1 == j0-16 && j1 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or) 45365 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (BSWAPL <typ.UInt32> (MOVLloadidx1 [i0] {s} p idx mem))) y) 45366 for { 45367 _ = v.Args[1] 45368 s0 := v.Args[0] 45369 if s0.Op != OpAMD64SHLQconst { 45370 break 45371 } 45372 j0 := s0.AuxInt 45373 r0 := s0.Args[0] 45374 if r0.Op != OpAMD64ROLWconst { 45375 break 45376 } 45377 if r0.AuxInt != 8 { 45378 break 45379 } 45380 x0 := r0.Args[0] 45381 if x0.Op != OpAMD64MOVWloadidx1 { 45382 break 45383 } 45384 i0 := x0.AuxInt 45385 s := x0.Aux 45386 _ = x0.Args[2] 45387 idx := x0.Args[0] 45388 p := x0.Args[1] 45389 mem := x0.Args[2] 45390 or := v.Args[1] 45391 if or.Op != OpAMD64ORQ { 45392 break 45393 } 45394 _ = or.Args[1] 45395 y := or.Args[0] 45396 s1 := or.Args[1] 45397 if s1.Op != OpAMD64SHLQconst { 45398 break 45399 } 45400 j1 := s1.AuxInt 45401 r1 := s1.Args[0] 45402 if r1.Op != OpAMD64ROLWconst { 45403 break 45404 } 45405 if r1.AuxInt != 8 { 45406 break 45407 } 45408 x1 := r1.Args[0] 45409 if x1.Op != OpAMD64MOVWloadidx1 { 45410 break 45411 } 45412 i1 := x1.AuxInt 45413 if x1.Aux != s { 45414 break 45415 } 45416 _ = x1.Args[2] 45417 if idx != x1.Args[0] { 45418 break 45419 } 45420 if p != x1.Args[1] { 45421 break 45422 } 45423 if mem != x1.Args[2] { 45424 break 45425 } 45426 if !(i1 == i0+2 && j1 == j0-16 && j1%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or)) { 45427 break 45428 } 45429 b = mergePoint(b, x0, x1) 45430 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 45431 v.reset(OpCopy) 45432 v.AddArg(v0) 45433 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 45434 v1.AuxInt = j1 45435 v2 := b.NewValue0(v.Pos, OpAMD64BSWAPL, typ.UInt32) 45436 v3 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 45437 v3.AuxInt = i0 45438 v3.Aux = s 45439 v3.AddArg(p) 45440 v3.AddArg(idx) 45441 v3.AddArg(mem) 45442 v2.AddArg(v3) 45443 v1.AddArg(v2) 45444 v0.AddArg(v1) 45445 v0.AddArg(y) 45446 return true 45447 } 45448 // match: (ORQ or:(ORQ s1:(SHLQconst [j1] r1:(ROLWconst [8] x1:(MOVWloadidx1 [i1] {s} p idx mem))) y) s0:(SHLQconst [j0] r0:(ROLWconst [8] x0:(MOVWloadidx1 [i0] {s} p idx mem)))) 45449 // cond: i1 == i0+2 && j1 == j0-16 && j1 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or) 45450 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (BSWAPL <typ.UInt32> (MOVLloadidx1 [i0] {s} p idx mem))) y) 45451 for { 45452 _ = v.Args[1] 45453 or := v.Args[0] 45454 if or.Op != OpAMD64ORQ { 45455 break 45456 } 45457 _ = or.Args[1] 45458 s1 := or.Args[0] 45459 if s1.Op != OpAMD64SHLQconst { 45460 break 45461 } 45462 j1 := s1.AuxInt 45463 r1 := s1.Args[0] 45464 if r1.Op != OpAMD64ROLWconst { 45465 break 45466 } 45467 if r1.AuxInt != 8 { 45468 break 45469 } 45470 x1 := r1.Args[0] 45471 if x1.Op != OpAMD64MOVWloadidx1 { 45472 break 45473 } 45474 i1 := x1.AuxInt 45475 s := x1.Aux 45476 _ = x1.Args[2] 45477 p := x1.Args[0] 45478 idx := x1.Args[1] 45479 mem := x1.Args[2] 45480 y := or.Args[1] 45481 s0 := v.Args[1] 45482 if s0.Op != OpAMD64SHLQconst { 45483 break 45484 } 45485 j0 := s0.AuxInt 45486 r0 := s0.Args[0] 45487 if r0.Op != OpAMD64ROLWconst { 45488 break 45489 } 45490 if r0.AuxInt != 8 { 45491 break 45492 } 45493 x0 := r0.Args[0] 45494 if x0.Op != OpAMD64MOVWloadidx1 { 45495 break 45496 } 45497 i0 := x0.AuxInt 45498 if x0.Aux != s { 45499 break 45500 } 45501 _ = x0.Args[2] 45502 if p != x0.Args[0] { 45503 break 45504 } 45505 if idx != x0.Args[1] { 45506 break 45507 } 45508 if mem != x0.Args[2] { 45509 break 45510 } 45511 if !(i1 == i0+2 && j1 == j0-16 && j1%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or)) { 45512 break 45513 } 45514 b = mergePoint(b, x0, x1) 45515 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 45516 v.reset(OpCopy) 45517 v.AddArg(v0) 45518 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 45519 v1.AuxInt = j1 45520 v2 := b.NewValue0(v.Pos, OpAMD64BSWAPL, typ.UInt32) 45521 v3 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 45522 v3.AuxInt = i0 45523 v3.Aux = s 45524 v3.AddArg(p) 45525 v3.AddArg(idx) 45526 v3.AddArg(mem) 45527 v2.AddArg(v3) 45528 v1.AddArg(v2) 45529 v0.AddArg(v1) 45530 v0.AddArg(y) 45531 return true 45532 } 45533 // match: (ORQ or:(ORQ s1:(SHLQconst [j1] r1:(ROLWconst [8] x1:(MOVWloadidx1 [i1] {s} idx p mem))) y) s0:(SHLQconst [j0] r0:(ROLWconst [8] x0:(MOVWloadidx1 [i0] {s} p idx mem)))) 45534 // cond: i1 == i0+2 && j1 == j0-16 && j1 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or) 45535 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (BSWAPL <typ.UInt32> (MOVLloadidx1 [i0] {s} p idx mem))) y) 45536 for { 45537 _ = v.Args[1] 45538 or := v.Args[0] 45539 if or.Op != OpAMD64ORQ { 45540 break 45541 } 45542 _ = or.Args[1] 45543 s1 := or.Args[0] 45544 if s1.Op != OpAMD64SHLQconst { 45545 break 45546 } 45547 j1 := s1.AuxInt 45548 r1 := s1.Args[0] 45549 if r1.Op != OpAMD64ROLWconst { 45550 break 45551 } 45552 if r1.AuxInt != 8 { 45553 break 45554 } 45555 x1 := r1.Args[0] 45556 if x1.Op != OpAMD64MOVWloadidx1 { 45557 break 45558 } 45559 i1 := x1.AuxInt 45560 s := x1.Aux 45561 _ = x1.Args[2] 45562 idx := x1.Args[0] 45563 p := x1.Args[1] 45564 mem := x1.Args[2] 45565 y := or.Args[1] 45566 s0 := v.Args[1] 45567 if s0.Op != OpAMD64SHLQconst { 45568 break 45569 } 45570 j0 := s0.AuxInt 45571 r0 := s0.Args[0] 45572 if r0.Op != OpAMD64ROLWconst { 45573 break 45574 } 45575 if r0.AuxInt != 8 { 45576 break 45577 } 45578 x0 := r0.Args[0] 45579 if x0.Op != OpAMD64MOVWloadidx1 { 45580 break 45581 } 45582 i0 := x0.AuxInt 45583 if x0.Aux != s { 45584 break 45585 } 45586 _ = x0.Args[2] 45587 if p != x0.Args[0] { 45588 break 45589 } 45590 if idx != x0.Args[1] { 45591 break 45592 } 45593 if mem != x0.Args[2] { 45594 break 45595 } 45596 if !(i1 == i0+2 && j1 == j0-16 && j1%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or)) { 45597 break 45598 } 45599 b = mergePoint(b, x0, x1) 45600 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 45601 v.reset(OpCopy) 45602 v.AddArg(v0) 45603 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 45604 v1.AuxInt = j1 45605 v2 := b.NewValue0(v.Pos, OpAMD64BSWAPL, typ.UInt32) 45606 v3 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 45607 v3.AuxInt = i0 45608 v3.Aux = s 45609 v3.AddArg(p) 45610 v3.AddArg(idx) 45611 v3.AddArg(mem) 45612 v2.AddArg(v3) 45613 v1.AddArg(v2) 45614 v0.AddArg(v1) 45615 v0.AddArg(y) 45616 return true 45617 } 45618 // match: (ORQ or:(ORQ y s1:(SHLQconst [j1] r1:(ROLWconst [8] x1:(MOVWloadidx1 [i1] {s} p idx mem)))) s0:(SHLQconst [j0] r0:(ROLWconst [8] x0:(MOVWloadidx1 [i0] {s} p idx mem)))) 45619 // cond: i1 == i0+2 && j1 == j0-16 && j1 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or) 45620 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (BSWAPL <typ.UInt32> (MOVLloadidx1 [i0] {s} p idx mem))) y) 45621 for { 45622 _ = v.Args[1] 45623 or := v.Args[0] 45624 if or.Op != OpAMD64ORQ { 45625 break 45626 } 45627 _ = or.Args[1] 45628 y := or.Args[0] 45629 s1 := or.Args[1] 45630 if s1.Op != OpAMD64SHLQconst { 45631 break 45632 } 45633 j1 := s1.AuxInt 45634 r1 := s1.Args[0] 45635 if r1.Op != OpAMD64ROLWconst { 45636 break 45637 } 45638 if r1.AuxInt != 8 { 45639 break 45640 } 45641 x1 := r1.Args[0] 45642 if x1.Op != OpAMD64MOVWloadidx1 { 45643 break 45644 } 45645 i1 := x1.AuxInt 45646 s := x1.Aux 45647 _ = x1.Args[2] 45648 p := x1.Args[0] 45649 idx := x1.Args[1] 45650 mem := x1.Args[2] 45651 s0 := v.Args[1] 45652 if s0.Op != OpAMD64SHLQconst { 45653 break 45654 } 45655 j0 := s0.AuxInt 45656 r0 := s0.Args[0] 45657 if r0.Op != OpAMD64ROLWconst { 45658 break 45659 } 45660 if r0.AuxInt != 8 { 45661 break 45662 } 45663 x0 := r0.Args[0] 45664 if x0.Op != OpAMD64MOVWloadidx1 { 45665 break 45666 } 45667 i0 := x0.AuxInt 45668 if x0.Aux != s { 45669 break 45670 } 45671 _ = x0.Args[2] 45672 if p != x0.Args[0] { 45673 break 45674 } 45675 if idx != x0.Args[1] { 45676 break 45677 } 45678 if mem != x0.Args[2] { 45679 break 45680 } 45681 if !(i1 == i0+2 && j1 == j0-16 && j1%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or)) { 45682 break 45683 } 45684 b = mergePoint(b, x0, x1) 45685 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 45686 v.reset(OpCopy) 45687 v.AddArg(v0) 45688 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 45689 v1.AuxInt = j1 45690 v2 := b.NewValue0(v.Pos, OpAMD64BSWAPL, typ.UInt32) 45691 v3 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 45692 v3.AuxInt = i0 45693 v3.Aux = s 45694 v3.AddArg(p) 45695 v3.AddArg(idx) 45696 v3.AddArg(mem) 45697 v2.AddArg(v3) 45698 v1.AddArg(v2) 45699 v0.AddArg(v1) 45700 v0.AddArg(y) 45701 return true 45702 } 45703 return false 45704 } 45705 func rewriteValueAMD64_OpAMD64ORQ_160(v *Value) bool { 45706 b := v.Block 45707 _ = b 45708 typ := &b.Func.Config.Types 45709 _ = typ 45710 // match: (ORQ or:(ORQ y s1:(SHLQconst [j1] r1:(ROLWconst [8] x1:(MOVWloadidx1 [i1] {s} idx p mem)))) s0:(SHLQconst [j0] r0:(ROLWconst [8] x0:(MOVWloadidx1 [i0] {s} p idx mem)))) 45711 // cond: i1 == i0+2 && j1 == j0-16 && j1 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or) 45712 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (BSWAPL <typ.UInt32> (MOVLloadidx1 [i0] {s} p idx mem))) y) 45713 for { 45714 _ = v.Args[1] 45715 or := v.Args[0] 45716 if or.Op != OpAMD64ORQ { 45717 break 45718 } 45719 _ = or.Args[1] 45720 y := or.Args[0] 45721 s1 := or.Args[1] 45722 if s1.Op != OpAMD64SHLQconst { 45723 break 45724 } 45725 j1 := s1.AuxInt 45726 r1 := s1.Args[0] 45727 if r1.Op != OpAMD64ROLWconst { 45728 break 45729 } 45730 if r1.AuxInt != 8 { 45731 break 45732 } 45733 x1 := r1.Args[0] 45734 if x1.Op != OpAMD64MOVWloadidx1 { 45735 break 45736 } 45737 i1 := x1.AuxInt 45738 s := x1.Aux 45739 _ = x1.Args[2] 45740 idx := x1.Args[0] 45741 p := x1.Args[1] 45742 mem := x1.Args[2] 45743 s0 := v.Args[1] 45744 if s0.Op != OpAMD64SHLQconst { 45745 break 45746 } 45747 j0 := s0.AuxInt 45748 r0 := s0.Args[0] 45749 if r0.Op != OpAMD64ROLWconst { 45750 break 45751 } 45752 if r0.AuxInt != 8 { 45753 break 45754 } 45755 x0 := r0.Args[0] 45756 if x0.Op != OpAMD64MOVWloadidx1 { 45757 break 45758 } 45759 i0 := x0.AuxInt 45760 if x0.Aux != s { 45761 break 45762 } 45763 _ = x0.Args[2] 45764 if p != x0.Args[0] { 45765 break 45766 } 45767 if idx != x0.Args[1] { 45768 break 45769 } 45770 if mem != x0.Args[2] { 45771 break 45772 } 45773 if !(i1 == i0+2 && j1 == j0-16 && j1%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or)) { 45774 break 45775 } 45776 b = mergePoint(b, x0, x1) 45777 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 45778 v.reset(OpCopy) 45779 v.AddArg(v0) 45780 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 45781 v1.AuxInt = j1 45782 v2 := b.NewValue0(v.Pos, OpAMD64BSWAPL, typ.UInt32) 45783 v3 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 45784 v3.AuxInt = i0 45785 v3.Aux = s 45786 v3.AddArg(p) 45787 v3.AddArg(idx) 45788 v3.AddArg(mem) 45789 v2.AddArg(v3) 45790 v1.AddArg(v2) 45791 v0.AddArg(v1) 45792 v0.AddArg(y) 45793 return true 45794 } 45795 // match: (ORQ or:(ORQ s1:(SHLQconst [j1] r1:(ROLWconst [8] x1:(MOVWloadidx1 [i1] {s} p idx mem))) y) s0:(SHLQconst [j0] r0:(ROLWconst [8] x0:(MOVWloadidx1 [i0] {s} idx p mem)))) 45796 // cond: i1 == i0+2 && j1 == j0-16 && j1 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or) 45797 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (BSWAPL <typ.UInt32> (MOVLloadidx1 [i0] {s} p idx mem))) y) 45798 for { 45799 _ = v.Args[1] 45800 or := v.Args[0] 45801 if or.Op != OpAMD64ORQ { 45802 break 45803 } 45804 _ = or.Args[1] 45805 s1 := or.Args[0] 45806 if s1.Op != OpAMD64SHLQconst { 45807 break 45808 } 45809 j1 := s1.AuxInt 45810 r1 := s1.Args[0] 45811 if r1.Op != OpAMD64ROLWconst { 45812 break 45813 } 45814 if r1.AuxInt != 8 { 45815 break 45816 } 45817 x1 := r1.Args[0] 45818 if x1.Op != OpAMD64MOVWloadidx1 { 45819 break 45820 } 45821 i1 := x1.AuxInt 45822 s := x1.Aux 45823 _ = x1.Args[2] 45824 p := x1.Args[0] 45825 idx := x1.Args[1] 45826 mem := x1.Args[2] 45827 y := or.Args[1] 45828 s0 := v.Args[1] 45829 if s0.Op != OpAMD64SHLQconst { 45830 break 45831 } 45832 j0 := s0.AuxInt 45833 r0 := s0.Args[0] 45834 if r0.Op != OpAMD64ROLWconst { 45835 break 45836 } 45837 if r0.AuxInt != 8 { 45838 break 45839 } 45840 x0 := r0.Args[0] 45841 if x0.Op != OpAMD64MOVWloadidx1 { 45842 break 45843 } 45844 i0 := x0.AuxInt 45845 if x0.Aux != s { 45846 break 45847 } 45848 _ = x0.Args[2] 45849 if idx != x0.Args[0] { 45850 break 45851 } 45852 if p != x0.Args[1] { 45853 break 45854 } 45855 if mem != x0.Args[2] { 45856 break 45857 } 45858 if !(i1 == i0+2 && j1 == j0-16 && j1%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or)) { 45859 break 45860 } 45861 b = mergePoint(b, x0, x1) 45862 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 45863 v.reset(OpCopy) 45864 v.AddArg(v0) 45865 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 45866 v1.AuxInt = j1 45867 v2 := b.NewValue0(v.Pos, OpAMD64BSWAPL, typ.UInt32) 45868 v3 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 45869 v3.AuxInt = i0 45870 v3.Aux = s 45871 v3.AddArg(p) 45872 v3.AddArg(idx) 45873 v3.AddArg(mem) 45874 v2.AddArg(v3) 45875 v1.AddArg(v2) 45876 v0.AddArg(v1) 45877 v0.AddArg(y) 45878 return true 45879 } 45880 // match: (ORQ or:(ORQ s1:(SHLQconst [j1] r1:(ROLWconst [8] x1:(MOVWloadidx1 [i1] {s} idx p mem))) y) s0:(SHLQconst [j0] r0:(ROLWconst [8] x0:(MOVWloadidx1 [i0] {s} idx p mem)))) 45881 // cond: i1 == i0+2 && j1 == j0-16 && j1 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or) 45882 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (BSWAPL <typ.UInt32> (MOVLloadidx1 [i0] {s} p idx mem))) y) 45883 for { 45884 _ = v.Args[1] 45885 or := v.Args[0] 45886 if or.Op != OpAMD64ORQ { 45887 break 45888 } 45889 _ = or.Args[1] 45890 s1 := or.Args[0] 45891 if s1.Op != OpAMD64SHLQconst { 45892 break 45893 } 45894 j1 := s1.AuxInt 45895 r1 := s1.Args[0] 45896 if r1.Op != OpAMD64ROLWconst { 45897 break 45898 } 45899 if r1.AuxInt != 8 { 45900 break 45901 } 45902 x1 := r1.Args[0] 45903 if x1.Op != OpAMD64MOVWloadidx1 { 45904 break 45905 } 45906 i1 := x1.AuxInt 45907 s := x1.Aux 45908 _ = x1.Args[2] 45909 idx := x1.Args[0] 45910 p := x1.Args[1] 45911 mem := x1.Args[2] 45912 y := or.Args[1] 45913 s0 := v.Args[1] 45914 if s0.Op != OpAMD64SHLQconst { 45915 break 45916 } 45917 j0 := s0.AuxInt 45918 r0 := s0.Args[0] 45919 if r0.Op != OpAMD64ROLWconst { 45920 break 45921 } 45922 if r0.AuxInt != 8 { 45923 break 45924 } 45925 x0 := r0.Args[0] 45926 if x0.Op != OpAMD64MOVWloadidx1 { 45927 break 45928 } 45929 i0 := x0.AuxInt 45930 if x0.Aux != s { 45931 break 45932 } 45933 _ = x0.Args[2] 45934 if idx != x0.Args[0] { 45935 break 45936 } 45937 if p != x0.Args[1] { 45938 break 45939 } 45940 if mem != x0.Args[2] { 45941 break 45942 } 45943 if !(i1 == i0+2 && j1 == j0-16 && j1%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or)) { 45944 break 45945 } 45946 b = mergePoint(b, x0, x1) 45947 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 45948 v.reset(OpCopy) 45949 v.AddArg(v0) 45950 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 45951 v1.AuxInt = j1 45952 v2 := b.NewValue0(v.Pos, OpAMD64BSWAPL, typ.UInt32) 45953 v3 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 45954 v3.AuxInt = i0 45955 v3.Aux = s 45956 v3.AddArg(p) 45957 v3.AddArg(idx) 45958 v3.AddArg(mem) 45959 v2.AddArg(v3) 45960 v1.AddArg(v2) 45961 v0.AddArg(v1) 45962 v0.AddArg(y) 45963 return true 45964 } 45965 // match: (ORQ or:(ORQ y s1:(SHLQconst [j1] r1:(ROLWconst [8] x1:(MOVWloadidx1 [i1] {s} p idx mem)))) s0:(SHLQconst [j0] r0:(ROLWconst [8] x0:(MOVWloadidx1 [i0] {s} idx p mem)))) 45966 // cond: i1 == i0+2 && j1 == j0-16 && j1 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or) 45967 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (BSWAPL <typ.UInt32> (MOVLloadidx1 [i0] {s} p idx mem))) y) 45968 for { 45969 _ = v.Args[1] 45970 or := v.Args[0] 45971 if or.Op != OpAMD64ORQ { 45972 break 45973 } 45974 _ = or.Args[1] 45975 y := or.Args[0] 45976 s1 := or.Args[1] 45977 if s1.Op != OpAMD64SHLQconst { 45978 break 45979 } 45980 j1 := s1.AuxInt 45981 r1 := s1.Args[0] 45982 if r1.Op != OpAMD64ROLWconst { 45983 break 45984 } 45985 if r1.AuxInt != 8 { 45986 break 45987 } 45988 x1 := r1.Args[0] 45989 if x1.Op != OpAMD64MOVWloadidx1 { 45990 break 45991 } 45992 i1 := x1.AuxInt 45993 s := x1.Aux 45994 _ = x1.Args[2] 45995 p := x1.Args[0] 45996 idx := x1.Args[1] 45997 mem := x1.Args[2] 45998 s0 := v.Args[1] 45999 if s0.Op != OpAMD64SHLQconst { 46000 break 46001 } 46002 j0 := s0.AuxInt 46003 r0 := s0.Args[0] 46004 if r0.Op != OpAMD64ROLWconst { 46005 break 46006 } 46007 if r0.AuxInt != 8 { 46008 break 46009 } 46010 x0 := r0.Args[0] 46011 if x0.Op != OpAMD64MOVWloadidx1 { 46012 break 46013 } 46014 i0 := x0.AuxInt 46015 if x0.Aux != s { 46016 break 46017 } 46018 _ = x0.Args[2] 46019 if idx != x0.Args[0] { 46020 break 46021 } 46022 if p != x0.Args[1] { 46023 break 46024 } 46025 if mem != x0.Args[2] { 46026 break 46027 } 46028 if !(i1 == i0+2 && j1 == j0-16 && j1%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or)) { 46029 break 46030 } 46031 b = mergePoint(b, x0, x1) 46032 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 46033 v.reset(OpCopy) 46034 v.AddArg(v0) 46035 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 46036 v1.AuxInt = j1 46037 v2 := b.NewValue0(v.Pos, OpAMD64BSWAPL, typ.UInt32) 46038 v3 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 46039 v3.AuxInt = i0 46040 v3.Aux = s 46041 v3.AddArg(p) 46042 v3.AddArg(idx) 46043 v3.AddArg(mem) 46044 v2.AddArg(v3) 46045 v1.AddArg(v2) 46046 v0.AddArg(v1) 46047 v0.AddArg(y) 46048 return true 46049 } 46050 // match: (ORQ or:(ORQ y s1:(SHLQconst [j1] r1:(ROLWconst [8] x1:(MOVWloadidx1 [i1] {s} idx p mem)))) s0:(SHLQconst [j0] r0:(ROLWconst [8] x0:(MOVWloadidx1 [i0] {s} idx p mem)))) 46051 // cond: i1 == i0+2 && j1 == j0-16 && j1 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or) 46052 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (BSWAPL <typ.UInt32> (MOVLloadidx1 [i0] {s} p idx mem))) y) 46053 for { 46054 _ = v.Args[1] 46055 or := v.Args[0] 46056 if or.Op != OpAMD64ORQ { 46057 break 46058 } 46059 _ = or.Args[1] 46060 y := or.Args[0] 46061 s1 := or.Args[1] 46062 if s1.Op != OpAMD64SHLQconst { 46063 break 46064 } 46065 j1 := s1.AuxInt 46066 r1 := s1.Args[0] 46067 if r1.Op != OpAMD64ROLWconst { 46068 break 46069 } 46070 if r1.AuxInt != 8 { 46071 break 46072 } 46073 x1 := r1.Args[0] 46074 if x1.Op != OpAMD64MOVWloadidx1 { 46075 break 46076 } 46077 i1 := x1.AuxInt 46078 s := x1.Aux 46079 _ = x1.Args[2] 46080 idx := x1.Args[0] 46081 p := x1.Args[1] 46082 mem := x1.Args[2] 46083 s0 := v.Args[1] 46084 if s0.Op != OpAMD64SHLQconst { 46085 break 46086 } 46087 j0 := s0.AuxInt 46088 r0 := s0.Args[0] 46089 if r0.Op != OpAMD64ROLWconst { 46090 break 46091 } 46092 if r0.AuxInt != 8 { 46093 break 46094 } 46095 x0 := r0.Args[0] 46096 if x0.Op != OpAMD64MOVWloadidx1 { 46097 break 46098 } 46099 i0 := x0.AuxInt 46100 if x0.Aux != s { 46101 break 46102 } 46103 _ = x0.Args[2] 46104 if idx != x0.Args[0] { 46105 break 46106 } 46107 if p != x0.Args[1] { 46108 break 46109 } 46110 if mem != x0.Args[2] { 46111 break 46112 } 46113 if !(i1 == i0+2 && j1 == j0-16 && j1%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or)) { 46114 break 46115 } 46116 b = mergePoint(b, x0, x1) 46117 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 46118 v.reset(OpCopy) 46119 v.AddArg(v0) 46120 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 46121 v1.AuxInt = j1 46122 v2 := b.NewValue0(v.Pos, OpAMD64BSWAPL, typ.UInt32) 46123 v3 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 46124 v3.AuxInt = i0 46125 v3.Aux = s 46126 v3.AddArg(p) 46127 v3.AddArg(idx) 46128 v3.AddArg(mem) 46129 v2.AddArg(v3) 46130 v1.AddArg(v2) 46131 v0.AddArg(v1) 46132 v0.AddArg(y) 46133 return true 46134 } 46135 // match: (ORQ x l:(MOVQload [off] {sym} ptr mem)) 46136 // cond: canMergeLoadClobber(v, l, x) && clobber(l) 46137 // result: (ORQload x [off] {sym} ptr mem) 46138 for { 46139 _ = v.Args[1] 46140 x := v.Args[0] 46141 l := v.Args[1] 46142 if l.Op != OpAMD64MOVQload { 46143 break 46144 } 46145 off := l.AuxInt 46146 sym := l.Aux 46147 _ = l.Args[1] 46148 ptr := l.Args[0] 46149 mem := l.Args[1] 46150 if !(canMergeLoadClobber(v, l, x) && clobber(l)) { 46151 break 46152 } 46153 v.reset(OpAMD64ORQload) 46154 v.AuxInt = off 46155 v.Aux = sym 46156 v.AddArg(x) 46157 v.AddArg(ptr) 46158 v.AddArg(mem) 46159 return true 46160 } 46161 // match: (ORQ l:(MOVQload [off] {sym} ptr mem) x) 46162 // cond: canMergeLoadClobber(v, l, x) && clobber(l) 46163 // result: (ORQload x [off] {sym} ptr mem) 46164 for { 46165 _ = v.Args[1] 46166 l := v.Args[0] 46167 if l.Op != OpAMD64MOVQload { 46168 break 46169 } 46170 off := l.AuxInt 46171 sym := l.Aux 46172 _ = l.Args[1] 46173 ptr := l.Args[0] 46174 mem := l.Args[1] 46175 x := v.Args[1] 46176 if !(canMergeLoadClobber(v, l, x) && clobber(l)) { 46177 break 46178 } 46179 v.reset(OpAMD64ORQload) 46180 v.AuxInt = off 46181 v.Aux = sym 46182 v.AddArg(x) 46183 v.AddArg(ptr) 46184 v.AddArg(mem) 46185 return true 46186 } 46187 return false 46188 } 46189 func rewriteValueAMD64_OpAMD64ORQconst_0(v *Value) bool { 46190 b := v.Block 46191 _ = b 46192 config := b.Func.Config 46193 _ = config 46194 // match: (ORQconst [c] x) 46195 // cond: isUint64PowerOfTwo(c) && uint64(c) >= 128 && !config.nacl 46196 // result: (BTSQconst [log2(c)] x) 46197 for { 46198 c := v.AuxInt 46199 x := v.Args[0] 46200 if !(isUint64PowerOfTwo(c) && uint64(c) >= 128 && !config.nacl) { 46201 break 46202 } 46203 v.reset(OpAMD64BTSQconst) 46204 v.AuxInt = log2(c) 46205 v.AddArg(x) 46206 return true 46207 } 46208 // match: (ORQconst [c] (ORQconst [d] x)) 46209 // cond: 46210 // result: (ORQconst [c | d] x) 46211 for { 46212 c := v.AuxInt 46213 v_0 := v.Args[0] 46214 if v_0.Op != OpAMD64ORQconst { 46215 break 46216 } 46217 d := v_0.AuxInt 46218 x := v_0.Args[0] 46219 v.reset(OpAMD64ORQconst) 46220 v.AuxInt = c | d 46221 v.AddArg(x) 46222 return true 46223 } 46224 // match: (ORQconst [c] (BTSQconst [d] x)) 46225 // cond: 46226 // result: (ORQconst [c | 1<<uint32(d)] x) 46227 for { 46228 c := v.AuxInt 46229 v_0 := v.Args[0] 46230 if v_0.Op != OpAMD64BTSQconst { 46231 break 46232 } 46233 d := v_0.AuxInt 46234 x := v_0.Args[0] 46235 v.reset(OpAMD64ORQconst) 46236 v.AuxInt = c | 1<<uint32(d) 46237 v.AddArg(x) 46238 return true 46239 } 46240 // match: (ORQconst [0] x) 46241 // cond: 46242 // result: x 46243 for { 46244 if v.AuxInt != 0 { 46245 break 46246 } 46247 x := v.Args[0] 46248 v.reset(OpCopy) 46249 v.Type = x.Type 46250 v.AddArg(x) 46251 return true 46252 } 46253 // match: (ORQconst [-1] _) 46254 // cond: 46255 // result: (MOVQconst [-1]) 46256 for { 46257 if v.AuxInt != -1 { 46258 break 46259 } 46260 v.reset(OpAMD64MOVQconst) 46261 v.AuxInt = -1 46262 return true 46263 } 46264 // match: (ORQconst [c] (MOVQconst [d])) 46265 // cond: 46266 // result: (MOVQconst [c|d]) 46267 for { 46268 c := v.AuxInt 46269 v_0 := v.Args[0] 46270 if v_0.Op != OpAMD64MOVQconst { 46271 break 46272 } 46273 d := v_0.AuxInt 46274 v.reset(OpAMD64MOVQconst) 46275 v.AuxInt = c | d 46276 return true 46277 } 46278 return false 46279 } 46280 func rewriteValueAMD64_OpAMD64ORQconstmodify_0(v *Value) bool { 46281 // match: (ORQconstmodify [valoff1] {sym} (ADDQconst [off2] base) mem) 46282 // cond: ValAndOff(valoff1).canAdd(off2) 46283 // result: (ORQconstmodify [ValAndOff(valoff1).add(off2)] {sym} base mem) 46284 for { 46285 valoff1 := v.AuxInt 46286 sym := v.Aux 46287 _ = v.Args[1] 46288 v_0 := v.Args[0] 46289 if v_0.Op != OpAMD64ADDQconst { 46290 break 46291 } 46292 off2 := v_0.AuxInt 46293 base := v_0.Args[0] 46294 mem := v.Args[1] 46295 if !(ValAndOff(valoff1).canAdd(off2)) { 46296 break 46297 } 46298 v.reset(OpAMD64ORQconstmodify) 46299 v.AuxInt = ValAndOff(valoff1).add(off2) 46300 v.Aux = sym 46301 v.AddArg(base) 46302 v.AddArg(mem) 46303 return true 46304 } 46305 // match: (ORQconstmodify [valoff1] {sym1} (LEAQ [off2] {sym2} base) mem) 46306 // cond: ValAndOff(valoff1).canAdd(off2) && canMergeSym(sym1, sym2) 46307 // result: (ORQconstmodify [ValAndOff(valoff1).add(off2)] {mergeSym(sym1,sym2)} base mem) 46308 for { 46309 valoff1 := v.AuxInt 46310 sym1 := v.Aux 46311 _ = v.Args[1] 46312 v_0 := v.Args[0] 46313 if v_0.Op != OpAMD64LEAQ { 46314 break 46315 } 46316 off2 := v_0.AuxInt 46317 sym2 := v_0.Aux 46318 base := v_0.Args[0] 46319 mem := v.Args[1] 46320 if !(ValAndOff(valoff1).canAdd(off2) && canMergeSym(sym1, sym2)) { 46321 break 46322 } 46323 v.reset(OpAMD64ORQconstmodify) 46324 v.AuxInt = ValAndOff(valoff1).add(off2) 46325 v.Aux = mergeSym(sym1, sym2) 46326 v.AddArg(base) 46327 v.AddArg(mem) 46328 return true 46329 } 46330 return false 46331 } 46332 func rewriteValueAMD64_OpAMD64ORQload_0(v *Value) bool { 46333 b := v.Block 46334 _ = b 46335 typ := &b.Func.Config.Types 46336 _ = typ 46337 // match: (ORQload [off1] {sym} val (ADDQconst [off2] base) mem) 46338 // cond: is32Bit(off1+off2) 46339 // result: (ORQload [off1+off2] {sym} val base mem) 46340 for { 46341 off1 := v.AuxInt 46342 sym := v.Aux 46343 _ = v.Args[2] 46344 val := v.Args[0] 46345 v_1 := v.Args[1] 46346 if v_1.Op != OpAMD64ADDQconst { 46347 break 46348 } 46349 off2 := v_1.AuxInt 46350 base := v_1.Args[0] 46351 mem := v.Args[2] 46352 if !(is32Bit(off1 + off2)) { 46353 break 46354 } 46355 v.reset(OpAMD64ORQload) 46356 v.AuxInt = off1 + off2 46357 v.Aux = sym 46358 v.AddArg(val) 46359 v.AddArg(base) 46360 v.AddArg(mem) 46361 return true 46362 } 46363 // match: (ORQload [off1] {sym1} val (LEAQ [off2] {sym2} base) mem) 46364 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 46365 // result: (ORQload [off1+off2] {mergeSym(sym1,sym2)} val base mem) 46366 for { 46367 off1 := v.AuxInt 46368 sym1 := v.Aux 46369 _ = v.Args[2] 46370 val := v.Args[0] 46371 v_1 := v.Args[1] 46372 if v_1.Op != OpAMD64LEAQ { 46373 break 46374 } 46375 off2 := v_1.AuxInt 46376 sym2 := v_1.Aux 46377 base := v_1.Args[0] 46378 mem := v.Args[2] 46379 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 46380 break 46381 } 46382 v.reset(OpAMD64ORQload) 46383 v.AuxInt = off1 + off2 46384 v.Aux = mergeSym(sym1, sym2) 46385 v.AddArg(val) 46386 v.AddArg(base) 46387 v.AddArg(mem) 46388 return true 46389 } 46390 // match: (ORQload x [off] {sym} ptr (MOVSDstore [off] {sym} ptr y _)) 46391 // cond: 46392 // result: ( ORQ x (MOVQf2i y)) 46393 for { 46394 off := v.AuxInt 46395 sym := v.Aux 46396 _ = v.Args[2] 46397 x := v.Args[0] 46398 ptr := v.Args[1] 46399 v_2 := v.Args[2] 46400 if v_2.Op != OpAMD64MOVSDstore { 46401 break 46402 } 46403 if v_2.AuxInt != off { 46404 break 46405 } 46406 if v_2.Aux != sym { 46407 break 46408 } 46409 _ = v_2.Args[2] 46410 if ptr != v_2.Args[0] { 46411 break 46412 } 46413 y := v_2.Args[1] 46414 v.reset(OpAMD64ORQ) 46415 v.AddArg(x) 46416 v0 := b.NewValue0(v_2.Pos, OpAMD64MOVQf2i, typ.UInt64) 46417 v0.AddArg(y) 46418 v.AddArg(v0) 46419 return true 46420 } 46421 return false 46422 } 46423 func rewriteValueAMD64_OpAMD64ORQmodify_0(v *Value) bool { 46424 // match: (ORQmodify [off1] {sym} (ADDQconst [off2] base) val mem) 46425 // cond: is32Bit(off1+off2) 46426 // result: (ORQmodify [off1+off2] {sym} base val mem) 46427 for { 46428 off1 := v.AuxInt 46429 sym := v.Aux 46430 _ = v.Args[2] 46431 v_0 := v.Args[0] 46432 if v_0.Op != OpAMD64ADDQconst { 46433 break 46434 } 46435 off2 := v_0.AuxInt 46436 base := v_0.Args[0] 46437 val := v.Args[1] 46438 mem := v.Args[2] 46439 if !(is32Bit(off1 + off2)) { 46440 break 46441 } 46442 v.reset(OpAMD64ORQmodify) 46443 v.AuxInt = off1 + off2 46444 v.Aux = sym 46445 v.AddArg(base) 46446 v.AddArg(val) 46447 v.AddArg(mem) 46448 return true 46449 } 46450 // match: (ORQmodify [off1] {sym1} (LEAQ [off2] {sym2} base) val mem) 46451 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 46452 // result: (ORQmodify [off1+off2] {mergeSym(sym1,sym2)} base val mem) 46453 for { 46454 off1 := v.AuxInt 46455 sym1 := v.Aux 46456 _ = v.Args[2] 46457 v_0 := v.Args[0] 46458 if v_0.Op != OpAMD64LEAQ { 46459 break 46460 } 46461 off2 := v_0.AuxInt 46462 sym2 := v_0.Aux 46463 base := v_0.Args[0] 46464 val := v.Args[1] 46465 mem := v.Args[2] 46466 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 46467 break 46468 } 46469 v.reset(OpAMD64ORQmodify) 46470 v.AuxInt = off1 + off2 46471 v.Aux = mergeSym(sym1, sym2) 46472 v.AddArg(base) 46473 v.AddArg(val) 46474 v.AddArg(mem) 46475 return true 46476 } 46477 return false 46478 } 46479 func rewriteValueAMD64_OpAMD64ROLB_0(v *Value) bool { 46480 // match: (ROLB x (NEGQ y)) 46481 // cond: 46482 // result: (RORB x y) 46483 for { 46484 _ = v.Args[1] 46485 x := v.Args[0] 46486 v_1 := v.Args[1] 46487 if v_1.Op != OpAMD64NEGQ { 46488 break 46489 } 46490 y := v_1.Args[0] 46491 v.reset(OpAMD64RORB) 46492 v.AddArg(x) 46493 v.AddArg(y) 46494 return true 46495 } 46496 // match: (ROLB x (NEGL y)) 46497 // cond: 46498 // result: (RORB x y) 46499 for { 46500 _ = v.Args[1] 46501 x := v.Args[0] 46502 v_1 := v.Args[1] 46503 if v_1.Op != OpAMD64NEGL { 46504 break 46505 } 46506 y := v_1.Args[0] 46507 v.reset(OpAMD64RORB) 46508 v.AddArg(x) 46509 v.AddArg(y) 46510 return true 46511 } 46512 // match: (ROLB x (MOVQconst [c])) 46513 // cond: 46514 // result: (ROLBconst [c&7 ] x) 46515 for { 46516 _ = v.Args[1] 46517 x := v.Args[0] 46518 v_1 := v.Args[1] 46519 if v_1.Op != OpAMD64MOVQconst { 46520 break 46521 } 46522 c := v_1.AuxInt 46523 v.reset(OpAMD64ROLBconst) 46524 v.AuxInt = c & 7 46525 v.AddArg(x) 46526 return true 46527 } 46528 // match: (ROLB x (MOVLconst [c])) 46529 // cond: 46530 // result: (ROLBconst [c&7 ] x) 46531 for { 46532 _ = v.Args[1] 46533 x := v.Args[0] 46534 v_1 := v.Args[1] 46535 if v_1.Op != OpAMD64MOVLconst { 46536 break 46537 } 46538 c := v_1.AuxInt 46539 v.reset(OpAMD64ROLBconst) 46540 v.AuxInt = c & 7 46541 v.AddArg(x) 46542 return true 46543 } 46544 return false 46545 } 46546 func rewriteValueAMD64_OpAMD64ROLBconst_0(v *Value) bool { 46547 // match: (ROLBconst [c] (ROLBconst [d] x)) 46548 // cond: 46549 // result: (ROLBconst [(c+d)& 7] x) 46550 for { 46551 c := v.AuxInt 46552 v_0 := v.Args[0] 46553 if v_0.Op != OpAMD64ROLBconst { 46554 break 46555 } 46556 d := v_0.AuxInt 46557 x := v_0.Args[0] 46558 v.reset(OpAMD64ROLBconst) 46559 v.AuxInt = (c + d) & 7 46560 v.AddArg(x) 46561 return true 46562 } 46563 // match: (ROLBconst x [0]) 46564 // cond: 46565 // result: x 46566 for { 46567 if v.AuxInt != 0 { 46568 break 46569 } 46570 x := v.Args[0] 46571 v.reset(OpCopy) 46572 v.Type = x.Type 46573 v.AddArg(x) 46574 return true 46575 } 46576 return false 46577 } 46578 func rewriteValueAMD64_OpAMD64ROLL_0(v *Value) bool { 46579 // match: (ROLL x (NEGQ y)) 46580 // cond: 46581 // result: (RORL x y) 46582 for { 46583 _ = v.Args[1] 46584 x := v.Args[0] 46585 v_1 := v.Args[1] 46586 if v_1.Op != OpAMD64NEGQ { 46587 break 46588 } 46589 y := v_1.Args[0] 46590 v.reset(OpAMD64RORL) 46591 v.AddArg(x) 46592 v.AddArg(y) 46593 return true 46594 } 46595 // match: (ROLL x (NEGL y)) 46596 // cond: 46597 // result: (RORL x y) 46598 for { 46599 _ = v.Args[1] 46600 x := v.Args[0] 46601 v_1 := v.Args[1] 46602 if v_1.Op != OpAMD64NEGL { 46603 break 46604 } 46605 y := v_1.Args[0] 46606 v.reset(OpAMD64RORL) 46607 v.AddArg(x) 46608 v.AddArg(y) 46609 return true 46610 } 46611 // match: (ROLL x (MOVQconst [c])) 46612 // cond: 46613 // result: (ROLLconst [c&31] x) 46614 for { 46615 _ = v.Args[1] 46616 x := v.Args[0] 46617 v_1 := v.Args[1] 46618 if v_1.Op != OpAMD64MOVQconst { 46619 break 46620 } 46621 c := v_1.AuxInt 46622 v.reset(OpAMD64ROLLconst) 46623 v.AuxInt = c & 31 46624 v.AddArg(x) 46625 return true 46626 } 46627 // match: (ROLL x (MOVLconst [c])) 46628 // cond: 46629 // result: (ROLLconst [c&31] x) 46630 for { 46631 _ = v.Args[1] 46632 x := v.Args[0] 46633 v_1 := v.Args[1] 46634 if v_1.Op != OpAMD64MOVLconst { 46635 break 46636 } 46637 c := v_1.AuxInt 46638 v.reset(OpAMD64ROLLconst) 46639 v.AuxInt = c & 31 46640 v.AddArg(x) 46641 return true 46642 } 46643 return false 46644 } 46645 func rewriteValueAMD64_OpAMD64ROLLconst_0(v *Value) bool { 46646 // match: (ROLLconst [c] (ROLLconst [d] x)) 46647 // cond: 46648 // result: (ROLLconst [(c+d)&31] x) 46649 for { 46650 c := v.AuxInt 46651 v_0 := v.Args[0] 46652 if v_0.Op != OpAMD64ROLLconst { 46653 break 46654 } 46655 d := v_0.AuxInt 46656 x := v_0.Args[0] 46657 v.reset(OpAMD64ROLLconst) 46658 v.AuxInt = (c + d) & 31 46659 v.AddArg(x) 46660 return true 46661 } 46662 // match: (ROLLconst x [0]) 46663 // cond: 46664 // result: x 46665 for { 46666 if v.AuxInt != 0 { 46667 break 46668 } 46669 x := v.Args[0] 46670 v.reset(OpCopy) 46671 v.Type = x.Type 46672 v.AddArg(x) 46673 return true 46674 } 46675 return false 46676 } 46677 func rewriteValueAMD64_OpAMD64ROLQ_0(v *Value) bool { 46678 // match: (ROLQ x (NEGQ y)) 46679 // cond: 46680 // result: (RORQ x y) 46681 for { 46682 _ = v.Args[1] 46683 x := v.Args[0] 46684 v_1 := v.Args[1] 46685 if v_1.Op != OpAMD64NEGQ { 46686 break 46687 } 46688 y := v_1.Args[0] 46689 v.reset(OpAMD64RORQ) 46690 v.AddArg(x) 46691 v.AddArg(y) 46692 return true 46693 } 46694 // match: (ROLQ x (NEGL y)) 46695 // cond: 46696 // result: (RORQ x y) 46697 for { 46698 _ = v.Args[1] 46699 x := v.Args[0] 46700 v_1 := v.Args[1] 46701 if v_1.Op != OpAMD64NEGL { 46702 break 46703 } 46704 y := v_1.Args[0] 46705 v.reset(OpAMD64RORQ) 46706 v.AddArg(x) 46707 v.AddArg(y) 46708 return true 46709 } 46710 // match: (ROLQ x (MOVQconst [c])) 46711 // cond: 46712 // result: (ROLQconst [c&63] x) 46713 for { 46714 _ = v.Args[1] 46715 x := v.Args[0] 46716 v_1 := v.Args[1] 46717 if v_1.Op != OpAMD64MOVQconst { 46718 break 46719 } 46720 c := v_1.AuxInt 46721 v.reset(OpAMD64ROLQconst) 46722 v.AuxInt = c & 63 46723 v.AddArg(x) 46724 return true 46725 } 46726 // match: (ROLQ x (MOVLconst [c])) 46727 // cond: 46728 // result: (ROLQconst [c&63] x) 46729 for { 46730 _ = v.Args[1] 46731 x := v.Args[0] 46732 v_1 := v.Args[1] 46733 if v_1.Op != OpAMD64MOVLconst { 46734 break 46735 } 46736 c := v_1.AuxInt 46737 v.reset(OpAMD64ROLQconst) 46738 v.AuxInt = c & 63 46739 v.AddArg(x) 46740 return true 46741 } 46742 return false 46743 } 46744 func rewriteValueAMD64_OpAMD64ROLQconst_0(v *Value) bool { 46745 // match: (ROLQconst [c] (ROLQconst [d] x)) 46746 // cond: 46747 // result: (ROLQconst [(c+d)&63] x) 46748 for { 46749 c := v.AuxInt 46750 v_0 := v.Args[0] 46751 if v_0.Op != OpAMD64ROLQconst { 46752 break 46753 } 46754 d := v_0.AuxInt 46755 x := v_0.Args[0] 46756 v.reset(OpAMD64ROLQconst) 46757 v.AuxInt = (c + d) & 63 46758 v.AddArg(x) 46759 return true 46760 } 46761 // match: (ROLQconst x [0]) 46762 // cond: 46763 // result: x 46764 for { 46765 if v.AuxInt != 0 { 46766 break 46767 } 46768 x := v.Args[0] 46769 v.reset(OpCopy) 46770 v.Type = x.Type 46771 v.AddArg(x) 46772 return true 46773 } 46774 return false 46775 } 46776 func rewriteValueAMD64_OpAMD64ROLW_0(v *Value) bool { 46777 // match: (ROLW x (NEGQ y)) 46778 // cond: 46779 // result: (RORW x y) 46780 for { 46781 _ = v.Args[1] 46782 x := v.Args[0] 46783 v_1 := v.Args[1] 46784 if v_1.Op != OpAMD64NEGQ { 46785 break 46786 } 46787 y := v_1.Args[0] 46788 v.reset(OpAMD64RORW) 46789 v.AddArg(x) 46790 v.AddArg(y) 46791 return true 46792 } 46793 // match: (ROLW x (NEGL y)) 46794 // cond: 46795 // result: (RORW x y) 46796 for { 46797 _ = v.Args[1] 46798 x := v.Args[0] 46799 v_1 := v.Args[1] 46800 if v_1.Op != OpAMD64NEGL { 46801 break 46802 } 46803 y := v_1.Args[0] 46804 v.reset(OpAMD64RORW) 46805 v.AddArg(x) 46806 v.AddArg(y) 46807 return true 46808 } 46809 // match: (ROLW x (MOVQconst [c])) 46810 // cond: 46811 // result: (ROLWconst [c&15] x) 46812 for { 46813 _ = v.Args[1] 46814 x := v.Args[0] 46815 v_1 := v.Args[1] 46816 if v_1.Op != OpAMD64MOVQconst { 46817 break 46818 } 46819 c := v_1.AuxInt 46820 v.reset(OpAMD64ROLWconst) 46821 v.AuxInt = c & 15 46822 v.AddArg(x) 46823 return true 46824 } 46825 // match: (ROLW x (MOVLconst [c])) 46826 // cond: 46827 // result: (ROLWconst [c&15] x) 46828 for { 46829 _ = v.Args[1] 46830 x := v.Args[0] 46831 v_1 := v.Args[1] 46832 if v_1.Op != OpAMD64MOVLconst { 46833 break 46834 } 46835 c := v_1.AuxInt 46836 v.reset(OpAMD64ROLWconst) 46837 v.AuxInt = c & 15 46838 v.AddArg(x) 46839 return true 46840 } 46841 return false 46842 } 46843 func rewriteValueAMD64_OpAMD64ROLWconst_0(v *Value) bool { 46844 // match: (ROLWconst [c] (ROLWconst [d] x)) 46845 // cond: 46846 // result: (ROLWconst [(c+d)&15] x) 46847 for { 46848 c := v.AuxInt 46849 v_0 := v.Args[0] 46850 if v_0.Op != OpAMD64ROLWconst { 46851 break 46852 } 46853 d := v_0.AuxInt 46854 x := v_0.Args[0] 46855 v.reset(OpAMD64ROLWconst) 46856 v.AuxInt = (c + d) & 15 46857 v.AddArg(x) 46858 return true 46859 } 46860 // match: (ROLWconst x [0]) 46861 // cond: 46862 // result: x 46863 for { 46864 if v.AuxInt != 0 { 46865 break 46866 } 46867 x := v.Args[0] 46868 v.reset(OpCopy) 46869 v.Type = x.Type 46870 v.AddArg(x) 46871 return true 46872 } 46873 return false 46874 } 46875 func rewriteValueAMD64_OpAMD64RORB_0(v *Value) bool { 46876 // match: (RORB x (NEGQ y)) 46877 // cond: 46878 // result: (ROLB x y) 46879 for { 46880 _ = v.Args[1] 46881 x := v.Args[0] 46882 v_1 := v.Args[1] 46883 if v_1.Op != OpAMD64NEGQ { 46884 break 46885 } 46886 y := v_1.Args[0] 46887 v.reset(OpAMD64ROLB) 46888 v.AddArg(x) 46889 v.AddArg(y) 46890 return true 46891 } 46892 // match: (RORB x (NEGL y)) 46893 // cond: 46894 // result: (ROLB x y) 46895 for { 46896 _ = v.Args[1] 46897 x := v.Args[0] 46898 v_1 := v.Args[1] 46899 if v_1.Op != OpAMD64NEGL { 46900 break 46901 } 46902 y := v_1.Args[0] 46903 v.reset(OpAMD64ROLB) 46904 v.AddArg(x) 46905 v.AddArg(y) 46906 return true 46907 } 46908 // match: (RORB x (MOVQconst [c])) 46909 // cond: 46910 // result: (ROLBconst [(-c)&7 ] x) 46911 for { 46912 _ = v.Args[1] 46913 x := v.Args[0] 46914 v_1 := v.Args[1] 46915 if v_1.Op != OpAMD64MOVQconst { 46916 break 46917 } 46918 c := v_1.AuxInt 46919 v.reset(OpAMD64ROLBconst) 46920 v.AuxInt = (-c) & 7 46921 v.AddArg(x) 46922 return true 46923 } 46924 // match: (RORB x (MOVLconst [c])) 46925 // cond: 46926 // result: (ROLBconst [(-c)&7 ] x) 46927 for { 46928 _ = v.Args[1] 46929 x := v.Args[0] 46930 v_1 := v.Args[1] 46931 if v_1.Op != OpAMD64MOVLconst { 46932 break 46933 } 46934 c := v_1.AuxInt 46935 v.reset(OpAMD64ROLBconst) 46936 v.AuxInt = (-c) & 7 46937 v.AddArg(x) 46938 return true 46939 } 46940 return false 46941 } 46942 func rewriteValueAMD64_OpAMD64RORL_0(v *Value) bool { 46943 // match: (RORL x (NEGQ y)) 46944 // cond: 46945 // result: (ROLL x y) 46946 for { 46947 _ = v.Args[1] 46948 x := v.Args[0] 46949 v_1 := v.Args[1] 46950 if v_1.Op != OpAMD64NEGQ { 46951 break 46952 } 46953 y := v_1.Args[0] 46954 v.reset(OpAMD64ROLL) 46955 v.AddArg(x) 46956 v.AddArg(y) 46957 return true 46958 } 46959 // match: (RORL x (NEGL y)) 46960 // cond: 46961 // result: (ROLL x y) 46962 for { 46963 _ = v.Args[1] 46964 x := v.Args[0] 46965 v_1 := v.Args[1] 46966 if v_1.Op != OpAMD64NEGL { 46967 break 46968 } 46969 y := v_1.Args[0] 46970 v.reset(OpAMD64ROLL) 46971 v.AddArg(x) 46972 v.AddArg(y) 46973 return true 46974 } 46975 // match: (RORL x (MOVQconst [c])) 46976 // cond: 46977 // result: (ROLLconst [(-c)&31] x) 46978 for { 46979 _ = v.Args[1] 46980 x := v.Args[0] 46981 v_1 := v.Args[1] 46982 if v_1.Op != OpAMD64MOVQconst { 46983 break 46984 } 46985 c := v_1.AuxInt 46986 v.reset(OpAMD64ROLLconst) 46987 v.AuxInt = (-c) & 31 46988 v.AddArg(x) 46989 return true 46990 } 46991 // match: (RORL x (MOVLconst [c])) 46992 // cond: 46993 // result: (ROLLconst [(-c)&31] x) 46994 for { 46995 _ = v.Args[1] 46996 x := v.Args[0] 46997 v_1 := v.Args[1] 46998 if v_1.Op != OpAMD64MOVLconst { 46999 break 47000 } 47001 c := v_1.AuxInt 47002 v.reset(OpAMD64ROLLconst) 47003 v.AuxInt = (-c) & 31 47004 v.AddArg(x) 47005 return true 47006 } 47007 return false 47008 } 47009 func rewriteValueAMD64_OpAMD64RORQ_0(v *Value) bool { 47010 // match: (RORQ x (NEGQ y)) 47011 // cond: 47012 // result: (ROLQ x y) 47013 for { 47014 _ = v.Args[1] 47015 x := v.Args[0] 47016 v_1 := v.Args[1] 47017 if v_1.Op != OpAMD64NEGQ { 47018 break 47019 } 47020 y := v_1.Args[0] 47021 v.reset(OpAMD64ROLQ) 47022 v.AddArg(x) 47023 v.AddArg(y) 47024 return true 47025 } 47026 // match: (RORQ x (NEGL y)) 47027 // cond: 47028 // result: (ROLQ x y) 47029 for { 47030 _ = v.Args[1] 47031 x := v.Args[0] 47032 v_1 := v.Args[1] 47033 if v_1.Op != OpAMD64NEGL { 47034 break 47035 } 47036 y := v_1.Args[0] 47037 v.reset(OpAMD64ROLQ) 47038 v.AddArg(x) 47039 v.AddArg(y) 47040 return true 47041 } 47042 // match: (RORQ x (MOVQconst [c])) 47043 // cond: 47044 // result: (ROLQconst [(-c)&63] x) 47045 for { 47046 _ = v.Args[1] 47047 x := v.Args[0] 47048 v_1 := v.Args[1] 47049 if v_1.Op != OpAMD64MOVQconst { 47050 break 47051 } 47052 c := v_1.AuxInt 47053 v.reset(OpAMD64ROLQconst) 47054 v.AuxInt = (-c) & 63 47055 v.AddArg(x) 47056 return true 47057 } 47058 // match: (RORQ x (MOVLconst [c])) 47059 // cond: 47060 // result: (ROLQconst [(-c)&63] x) 47061 for { 47062 _ = v.Args[1] 47063 x := v.Args[0] 47064 v_1 := v.Args[1] 47065 if v_1.Op != OpAMD64MOVLconst { 47066 break 47067 } 47068 c := v_1.AuxInt 47069 v.reset(OpAMD64ROLQconst) 47070 v.AuxInt = (-c) & 63 47071 v.AddArg(x) 47072 return true 47073 } 47074 return false 47075 } 47076 func rewriteValueAMD64_OpAMD64RORW_0(v *Value) bool { 47077 // match: (RORW x (NEGQ y)) 47078 // cond: 47079 // result: (ROLW x y) 47080 for { 47081 _ = v.Args[1] 47082 x := v.Args[0] 47083 v_1 := v.Args[1] 47084 if v_1.Op != OpAMD64NEGQ { 47085 break 47086 } 47087 y := v_1.Args[0] 47088 v.reset(OpAMD64ROLW) 47089 v.AddArg(x) 47090 v.AddArg(y) 47091 return true 47092 } 47093 // match: (RORW x (NEGL y)) 47094 // cond: 47095 // result: (ROLW x y) 47096 for { 47097 _ = v.Args[1] 47098 x := v.Args[0] 47099 v_1 := v.Args[1] 47100 if v_1.Op != OpAMD64NEGL { 47101 break 47102 } 47103 y := v_1.Args[0] 47104 v.reset(OpAMD64ROLW) 47105 v.AddArg(x) 47106 v.AddArg(y) 47107 return true 47108 } 47109 // match: (RORW x (MOVQconst [c])) 47110 // cond: 47111 // result: (ROLWconst [(-c)&15] x) 47112 for { 47113 _ = v.Args[1] 47114 x := v.Args[0] 47115 v_1 := v.Args[1] 47116 if v_1.Op != OpAMD64MOVQconst { 47117 break 47118 } 47119 c := v_1.AuxInt 47120 v.reset(OpAMD64ROLWconst) 47121 v.AuxInt = (-c) & 15 47122 v.AddArg(x) 47123 return true 47124 } 47125 // match: (RORW x (MOVLconst [c])) 47126 // cond: 47127 // result: (ROLWconst [(-c)&15] x) 47128 for { 47129 _ = v.Args[1] 47130 x := v.Args[0] 47131 v_1 := v.Args[1] 47132 if v_1.Op != OpAMD64MOVLconst { 47133 break 47134 } 47135 c := v_1.AuxInt 47136 v.reset(OpAMD64ROLWconst) 47137 v.AuxInt = (-c) & 15 47138 v.AddArg(x) 47139 return true 47140 } 47141 return false 47142 } 47143 func rewriteValueAMD64_OpAMD64SARB_0(v *Value) bool { 47144 // match: (SARB x (MOVQconst [c])) 47145 // cond: 47146 // result: (SARBconst [min(c&31,7)] x) 47147 for { 47148 _ = v.Args[1] 47149 x := v.Args[0] 47150 v_1 := v.Args[1] 47151 if v_1.Op != OpAMD64MOVQconst { 47152 break 47153 } 47154 c := v_1.AuxInt 47155 v.reset(OpAMD64SARBconst) 47156 v.AuxInt = min(c&31, 7) 47157 v.AddArg(x) 47158 return true 47159 } 47160 // match: (SARB x (MOVLconst [c])) 47161 // cond: 47162 // result: (SARBconst [min(c&31,7)] x) 47163 for { 47164 _ = v.Args[1] 47165 x := v.Args[0] 47166 v_1 := v.Args[1] 47167 if v_1.Op != OpAMD64MOVLconst { 47168 break 47169 } 47170 c := v_1.AuxInt 47171 v.reset(OpAMD64SARBconst) 47172 v.AuxInt = min(c&31, 7) 47173 v.AddArg(x) 47174 return true 47175 } 47176 return false 47177 } 47178 func rewriteValueAMD64_OpAMD64SARBconst_0(v *Value) bool { 47179 // match: (SARBconst x [0]) 47180 // cond: 47181 // result: x 47182 for { 47183 if v.AuxInt != 0 { 47184 break 47185 } 47186 x := v.Args[0] 47187 v.reset(OpCopy) 47188 v.Type = x.Type 47189 v.AddArg(x) 47190 return true 47191 } 47192 // match: (SARBconst [c] (MOVQconst [d])) 47193 // cond: 47194 // result: (MOVQconst [int64(int8(d))>>uint64(c)]) 47195 for { 47196 c := v.AuxInt 47197 v_0 := v.Args[0] 47198 if v_0.Op != OpAMD64MOVQconst { 47199 break 47200 } 47201 d := v_0.AuxInt 47202 v.reset(OpAMD64MOVQconst) 47203 v.AuxInt = int64(int8(d)) >> uint64(c) 47204 return true 47205 } 47206 return false 47207 } 47208 func rewriteValueAMD64_OpAMD64SARL_0(v *Value) bool { 47209 b := v.Block 47210 _ = b 47211 // match: (SARL x (MOVQconst [c])) 47212 // cond: 47213 // result: (SARLconst [c&31] x) 47214 for { 47215 _ = v.Args[1] 47216 x := v.Args[0] 47217 v_1 := v.Args[1] 47218 if v_1.Op != OpAMD64MOVQconst { 47219 break 47220 } 47221 c := v_1.AuxInt 47222 v.reset(OpAMD64SARLconst) 47223 v.AuxInt = c & 31 47224 v.AddArg(x) 47225 return true 47226 } 47227 // match: (SARL x (MOVLconst [c])) 47228 // cond: 47229 // result: (SARLconst [c&31] x) 47230 for { 47231 _ = v.Args[1] 47232 x := v.Args[0] 47233 v_1 := v.Args[1] 47234 if v_1.Op != OpAMD64MOVLconst { 47235 break 47236 } 47237 c := v_1.AuxInt 47238 v.reset(OpAMD64SARLconst) 47239 v.AuxInt = c & 31 47240 v.AddArg(x) 47241 return true 47242 } 47243 // match: (SARL x (ADDQconst [c] y)) 47244 // cond: c & 31 == 0 47245 // result: (SARL x y) 47246 for { 47247 _ = v.Args[1] 47248 x := v.Args[0] 47249 v_1 := v.Args[1] 47250 if v_1.Op != OpAMD64ADDQconst { 47251 break 47252 } 47253 c := v_1.AuxInt 47254 y := v_1.Args[0] 47255 if !(c&31 == 0) { 47256 break 47257 } 47258 v.reset(OpAMD64SARL) 47259 v.AddArg(x) 47260 v.AddArg(y) 47261 return true 47262 } 47263 // match: (SARL x (NEGQ <t> (ADDQconst [c] y))) 47264 // cond: c & 31 == 0 47265 // result: (SARL x (NEGQ <t> y)) 47266 for { 47267 _ = v.Args[1] 47268 x := v.Args[0] 47269 v_1 := v.Args[1] 47270 if v_1.Op != OpAMD64NEGQ { 47271 break 47272 } 47273 t := v_1.Type 47274 v_1_0 := v_1.Args[0] 47275 if v_1_0.Op != OpAMD64ADDQconst { 47276 break 47277 } 47278 c := v_1_0.AuxInt 47279 y := v_1_0.Args[0] 47280 if !(c&31 == 0) { 47281 break 47282 } 47283 v.reset(OpAMD64SARL) 47284 v.AddArg(x) 47285 v0 := b.NewValue0(v.Pos, OpAMD64NEGQ, t) 47286 v0.AddArg(y) 47287 v.AddArg(v0) 47288 return true 47289 } 47290 // match: (SARL x (ANDQconst [c] y)) 47291 // cond: c & 31 == 31 47292 // result: (SARL x y) 47293 for { 47294 _ = v.Args[1] 47295 x := v.Args[0] 47296 v_1 := v.Args[1] 47297 if v_1.Op != OpAMD64ANDQconst { 47298 break 47299 } 47300 c := v_1.AuxInt 47301 y := v_1.Args[0] 47302 if !(c&31 == 31) { 47303 break 47304 } 47305 v.reset(OpAMD64SARL) 47306 v.AddArg(x) 47307 v.AddArg(y) 47308 return true 47309 } 47310 // match: (SARL x (NEGQ <t> (ANDQconst [c] y))) 47311 // cond: c & 31 == 31 47312 // result: (SARL x (NEGQ <t> y)) 47313 for { 47314 _ = v.Args[1] 47315 x := v.Args[0] 47316 v_1 := v.Args[1] 47317 if v_1.Op != OpAMD64NEGQ { 47318 break 47319 } 47320 t := v_1.Type 47321 v_1_0 := v_1.Args[0] 47322 if v_1_0.Op != OpAMD64ANDQconst { 47323 break 47324 } 47325 c := v_1_0.AuxInt 47326 y := v_1_0.Args[0] 47327 if !(c&31 == 31) { 47328 break 47329 } 47330 v.reset(OpAMD64SARL) 47331 v.AddArg(x) 47332 v0 := b.NewValue0(v.Pos, OpAMD64NEGQ, t) 47333 v0.AddArg(y) 47334 v.AddArg(v0) 47335 return true 47336 } 47337 // match: (SARL x (ADDLconst [c] y)) 47338 // cond: c & 31 == 0 47339 // result: (SARL x y) 47340 for { 47341 _ = v.Args[1] 47342 x := v.Args[0] 47343 v_1 := v.Args[1] 47344 if v_1.Op != OpAMD64ADDLconst { 47345 break 47346 } 47347 c := v_1.AuxInt 47348 y := v_1.Args[0] 47349 if !(c&31 == 0) { 47350 break 47351 } 47352 v.reset(OpAMD64SARL) 47353 v.AddArg(x) 47354 v.AddArg(y) 47355 return true 47356 } 47357 // match: (SARL x (NEGL <t> (ADDLconst [c] y))) 47358 // cond: c & 31 == 0 47359 // result: (SARL x (NEGL <t> y)) 47360 for { 47361 _ = v.Args[1] 47362 x := v.Args[0] 47363 v_1 := v.Args[1] 47364 if v_1.Op != OpAMD64NEGL { 47365 break 47366 } 47367 t := v_1.Type 47368 v_1_0 := v_1.Args[0] 47369 if v_1_0.Op != OpAMD64ADDLconst { 47370 break 47371 } 47372 c := v_1_0.AuxInt 47373 y := v_1_0.Args[0] 47374 if !(c&31 == 0) { 47375 break 47376 } 47377 v.reset(OpAMD64SARL) 47378 v.AddArg(x) 47379 v0 := b.NewValue0(v.Pos, OpAMD64NEGL, t) 47380 v0.AddArg(y) 47381 v.AddArg(v0) 47382 return true 47383 } 47384 // match: (SARL x (ANDLconst [c] y)) 47385 // cond: c & 31 == 31 47386 // result: (SARL x y) 47387 for { 47388 _ = v.Args[1] 47389 x := v.Args[0] 47390 v_1 := v.Args[1] 47391 if v_1.Op != OpAMD64ANDLconst { 47392 break 47393 } 47394 c := v_1.AuxInt 47395 y := v_1.Args[0] 47396 if !(c&31 == 31) { 47397 break 47398 } 47399 v.reset(OpAMD64SARL) 47400 v.AddArg(x) 47401 v.AddArg(y) 47402 return true 47403 } 47404 // match: (SARL x (NEGL <t> (ANDLconst [c] y))) 47405 // cond: c & 31 == 31 47406 // result: (SARL x (NEGL <t> y)) 47407 for { 47408 _ = v.Args[1] 47409 x := v.Args[0] 47410 v_1 := v.Args[1] 47411 if v_1.Op != OpAMD64NEGL { 47412 break 47413 } 47414 t := v_1.Type 47415 v_1_0 := v_1.Args[0] 47416 if v_1_0.Op != OpAMD64ANDLconst { 47417 break 47418 } 47419 c := v_1_0.AuxInt 47420 y := v_1_0.Args[0] 47421 if !(c&31 == 31) { 47422 break 47423 } 47424 v.reset(OpAMD64SARL) 47425 v.AddArg(x) 47426 v0 := b.NewValue0(v.Pos, OpAMD64NEGL, t) 47427 v0.AddArg(y) 47428 v.AddArg(v0) 47429 return true 47430 } 47431 return false 47432 } 47433 func rewriteValueAMD64_OpAMD64SARLconst_0(v *Value) bool { 47434 // match: (SARLconst x [0]) 47435 // cond: 47436 // result: x 47437 for { 47438 if v.AuxInt != 0 { 47439 break 47440 } 47441 x := v.Args[0] 47442 v.reset(OpCopy) 47443 v.Type = x.Type 47444 v.AddArg(x) 47445 return true 47446 } 47447 // match: (SARLconst [c] (MOVQconst [d])) 47448 // cond: 47449 // result: (MOVQconst [int64(int32(d))>>uint64(c)]) 47450 for { 47451 c := v.AuxInt 47452 v_0 := v.Args[0] 47453 if v_0.Op != OpAMD64MOVQconst { 47454 break 47455 } 47456 d := v_0.AuxInt 47457 v.reset(OpAMD64MOVQconst) 47458 v.AuxInt = int64(int32(d)) >> uint64(c) 47459 return true 47460 } 47461 return false 47462 } 47463 func rewriteValueAMD64_OpAMD64SARQ_0(v *Value) bool { 47464 b := v.Block 47465 _ = b 47466 // match: (SARQ x (MOVQconst [c])) 47467 // cond: 47468 // result: (SARQconst [c&63] x) 47469 for { 47470 _ = v.Args[1] 47471 x := v.Args[0] 47472 v_1 := v.Args[1] 47473 if v_1.Op != OpAMD64MOVQconst { 47474 break 47475 } 47476 c := v_1.AuxInt 47477 v.reset(OpAMD64SARQconst) 47478 v.AuxInt = c & 63 47479 v.AddArg(x) 47480 return true 47481 } 47482 // match: (SARQ x (MOVLconst [c])) 47483 // cond: 47484 // result: (SARQconst [c&63] x) 47485 for { 47486 _ = v.Args[1] 47487 x := v.Args[0] 47488 v_1 := v.Args[1] 47489 if v_1.Op != OpAMD64MOVLconst { 47490 break 47491 } 47492 c := v_1.AuxInt 47493 v.reset(OpAMD64SARQconst) 47494 v.AuxInt = c & 63 47495 v.AddArg(x) 47496 return true 47497 } 47498 // match: (SARQ x (ADDQconst [c] y)) 47499 // cond: c & 63 == 0 47500 // result: (SARQ x y) 47501 for { 47502 _ = v.Args[1] 47503 x := v.Args[0] 47504 v_1 := v.Args[1] 47505 if v_1.Op != OpAMD64ADDQconst { 47506 break 47507 } 47508 c := v_1.AuxInt 47509 y := v_1.Args[0] 47510 if !(c&63 == 0) { 47511 break 47512 } 47513 v.reset(OpAMD64SARQ) 47514 v.AddArg(x) 47515 v.AddArg(y) 47516 return true 47517 } 47518 // match: (SARQ x (NEGQ <t> (ADDQconst [c] y))) 47519 // cond: c & 63 == 0 47520 // result: (SARQ x (NEGQ <t> y)) 47521 for { 47522 _ = v.Args[1] 47523 x := v.Args[0] 47524 v_1 := v.Args[1] 47525 if v_1.Op != OpAMD64NEGQ { 47526 break 47527 } 47528 t := v_1.Type 47529 v_1_0 := v_1.Args[0] 47530 if v_1_0.Op != OpAMD64ADDQconst { 47531 break 47532 } 47533 c := v_1_0.AuxInt 47534 y := v_1_0.Args[0] 47535 if !(c&63 == 0) { 47536 break 47537 } 47538 v.reset(OpAMD64SARQ) 47539 v.AddArg(x) 47540 v0 := b.NewValue0(v.Pos, OpAMD64NEGQ, t) 47541 v0.AddArg(y) 47542 v.AddArg(v0) 47543 return true 47544 } 47545 // match: (SARQ x (ANDQconst [c] y)) 47546 // cond: c & 63 == 63 47547 // result: (SARQ x y) 47548 for { 47549 _ = v.Args[1] 47550 x := v.Args[0] 47551 v_1 := v.Args[1] 47552 if v_1.Op != OpAMD64ANDQconst { 47553 break 47554 } 47555 c := v_1.AuxInt 47556 y := v_1.Args[0] 47557 if !(c&63 == 63) { 47558 break 47559 } 47560 v.reset(OpAMD64SARQ) 47561 v.AddArg(x) 47562 v.AddArg(y) 47563 return true 47564 } 47565 // match: (SARQ x (NEGQ <t> (ANDQconst [c] y))) 47566 // cond: c & 63 == 63 47567 // result: (SARQ x (NEGQ <t> y)) 47568 for { 47569 _ = v.Args[1] 47570 x := v.Args[0] 47571 v_1 := v.Args[1] 47572 if v_1.Op != OpAMD64NEGQ { 47573 break 47574 } 47575 t := v_1.Type 47576 v_1_0 := v_1.Args[0] 47577 if v_1_0.Op != OpAMD64ANDQconst { 47578 break 47579 } 47580 c := v_1_0.AuxInt 47581 y := v_1_0.Args[0] 47582 if !(c&63 == 63) { 47583 break 47584 } 47585 v.reset(OpAMD64SARQ) 47586 v.AddArg(x) 47587 v0 := b.NewValue0(v.Pos, OpAMD64NEGQ, t) 47588 v0.AddArg(y) 47589 v.AddArg(v0) 47590 return true 47591 } 47592 // match: (SARQ x (ADDLconst [c] y)) 47593 // cond: c & 63 == 0 47594 // result: (SARQ x y) 47595 for { 47596 _ = v.Args[1] 47597 x := v.Args[0] 47598 v_1 := v.Args[1] 47599 if v_1.Op != OpAMD64ADDLconst { 47600 break 47601 } 47602 c := v_1.AuxInt 47603 y := v_1.Args[0] 47604 if !(c&63 == 0) { 47605 break 47606 } 47607 v.reset(OpAMD64SARQ) 47608 v.AddArg(x) 47609 v.AddArg(y) 47610 return true 47611 } 47612 // match: (SARQ x (NEGL <t> (ADDLconst [c] y))) 47613 // cond: c & 63 == 0 47614 // result: (SARQ x (NEGL <t> y)) 47615 for { 47616 _ = v.Args[1] 47617 x := v.Args[0] 47618 v_1 := v.Args[1] 47619 if v_1.Op != OpAMD64NEGL { 47620 break 47621 } 47622 t := v_1.Type 47623 v_1_0 := v_1.Args[0] 47624 if v_1_0.Op != OpAMD64ADDLconst { 47625 break 47626 } 47627 c := v_1_0.AuxInt 47628 y := v_1_0.Args[0] 47629 if !(c&63 == 0) { 47630 break 47631 } 47632 v.reset(OpAMD64SARQ) 47633 v.AddArg(x) 47634 v0 := b.NewValue0(v.Pos, OpAMD64NEGL, t) 47635 v0.AddArg(y) 47636 v.AddArg(v0) 47637 return true 47638 } 47639 // match: (SARQ x (ANDLconst [c] y)) 47640 // cond: c & 63 == 63 47641 // result: (SARQ x y) 47642 for { 47643 _ = v.Args[1] 47644 x := v.Args[0] 47645 v_1 := v.Args[1] 47646 if v_1.Op != OpAMD64ANDLconst { 47647 break 47648 } 47649 c := v_1.AuxInt 47650 y := v_1.Args[0] 47651 if !(c&63 == 63) { 47652 break 47653 } 47654 v.reset(OpAMD64SARQ) 47655 v.AddArg(x) 47656 v.AddArg(y) 47657 return true 47658 } 47659 // match: (SARQ x (NEGL <t> (ANDLconst [c] y))) 47660 // cond: c & 63 == 63 47661 // result: (SARQ x (NEGL <t> y)) 47662 for { 47663 _ = v.Args[1] 47664 x := v.Args[0] 47665 v_1 := v.Args[1] 47666 if v_1.Op != OpAMD64NEGL { 47667 break 47668 } 47669 t := v_1.Type 47670 v_1_0 := v_1.Args[0] 47671 if v_1_0.Op != OpAMD64ANDLconst { 47672 break 47673 } 47674 c := v_1_0.AuxInt 47675 y := v_1_0.Args[0] 47676 if !(c&63 == 63) { 47677 break 47678 } 47679 v.reset(OpAMD64SARQ) 47680 v.AddArg(x) 47681 v0 := b.NewValue0(v.Pos, OpAMD64NEGL, t) 47682 v0.AddArg(y) 47683 v.AddArg(v0) 47684 return true 47685 } 47686 return false 47687 } 47688 func rewriteValueAMD64_OpAMD64SARQconst_0(v *Value) bool { 47689 // match: (SARQconst x [0]) 47690 // cond: 47691 // result: x 47692 for { 47693 if v.AuxInt != 0 { 47694 break 47695 } 47696 x := v.Args[0] 47697 v.reset(OpCopy) 47698 v.Type = x.Type 47699 v.AddArg(x) 47700 return true 47701 } 47702 // match: (SARQconst [c] (MOVQconst [d])) 47703 // cond: 47704 // result: (MOVQconst [d>>uint64(c)]) 47705 for { 47706 c := v.AuxInt 47707 v_0 := v.Args[0] 47708 if v_0.Op != OpAMD64MOVQconst { 47709 break 47710 } 47711 d := v_0.AuxInt 47712 v.reset(OpAMD64MOVQconst) 47713 v.AuxInt = d >> uint64(c) 47714 return true 47715 } 47716 return false 47717 } 47718 func rewriteValueAMD64_OpAMD64SARW_0(v *Value) bool { 47719 // match: (SARW x (MOVQconst [c])) 47720 // cond: 47721 // result: (SARWconst [min(c&31,15)] x) 47722 for { 47723 _ = v.Args[1] 47724 x := v.Args[0] 47725 v_1 := v.Args[1] 47726 if v_1.Op != OpAMD64MOVQconst { 47727 break 47728 } 47729 c := v_1.AuxInt 47730 v.reset(OpAMD64SARWconst) 47731 v.AuxInt = min(c&31, 15) 47732 v.AddArg(x) 47733 return true 47734 } 47735 // match: (SARW x (MOVLconst [c])) 47736 // cond: 47737 // result: (SARWconst [min(c&31,15)] x) 47738 for { 47739 _ = v.Args[1] 47740 x := v.Args[0] 47741 v_1 := v.Args[1] 47742 if v_1.Op != OpAMD64MOVLconst { 47743 break 47744 } 47745 c := v_1.AuxInt 47746 v.reset(OpAMD64SARWconst) 47747 v.AuxInt = min(c&31, 15) 47748 v.AddArg(x) 47749 return true 47750 } 47751 return false 47752 } 47753 func rewriteValueAMD64_OpAMD64SARWconst_0(v *Value) bool { 47754 // match: (SARWconst x [0]) 47755 // cond: 47756 // result: x 47757 for { 47758 if v.AuxInt != 0 { 47759 break 47760 } 47761 x := v.Args[0] 47762 v.reset(OpCopy) 47763 v.Type = x.Type 47764 v.AddArg(x) 47765 return true 47766 } 47767 // match: (SARWconst [c] (MOVQconst [d])) 47768 // cond: 47769 // result: (MOVQconst [int64(int16(d))>>uint64(c)]) 47770 for { 47771 c := v.AuxInt 47772 v_0 := v.Args[0] 47773 if v_0.Op != OpAMD64MOVQconst { 47774 break 47775 } 47776 d := v_0.AuxInt 47777 v.reset(OpAMD64MOVQconst) 47778 v.AuxInt = int64(int16(d)) >> uint64(c) 47779 return true 47780 } 47781 return false 47782 } 47783 func rewriteValueAMD64_OpAMD64SBBLcarrymask_0(v *Value) bool { 47784 // match: (SBBLcarrymask (FlagEQ)) 47785 // cond: 47786 // result: (MOVLconst [0]) 47787 for { 47788 v_0 := v.Args[0] 47789 if v_0.Op != OpAMD64FlagEQ { 47790 break 47791 } 47792 v.reset(OpAMD64MOVLconst) 47793 v.AuxInt = 0 47794 return true 47795 } 47796 // match: (SBBLcarrymask (FlagLT_ULT)) 47797 // cond: 47798 // result: (MOVLconst [-1]) 47799 for { 47800 v_0 := v.Args[0] 47801 if v_0.Op != OpAMD64FlagLT_ULT { 47802 break 47803 } 47804 v.reset(OpAMD64MOVLconst) 47805 v.AuxInt = -1 47806 return true 47807 } 47808 // match: (SBBLcarrymask (FlagLT_UGT)) 47809 // cond: 47810 // result: (MOVLconst [0]) 47811 for { 47812 v_0 := v.Args[0] 47813 if v_0.Op != OpAMD64FlagLT_UGT { 47814 break 47815 } 47816 v.reset(OpAMD64MOVLconst) 47817 v.AuxInt = 0 47818 return true 47819 } 47820 // match: (SBBLcarrymask (FlagGT_ULT)) 47821 // cond: 47822 // result: (MOVLconst [-1]) 47823 for { 47824 v_0 := v.Args[0] 47825 if v_0.Op != OpAMD64FlagGT_ULT { 47826 break 47827 } 47828 v.reset(OpAMD64MOVLconst) 47829 v.AuxInt = -1 47830 return true 47831 } 47832 // match: (SBBLcarrymask (FlagGT_UGT)) 47833 // cond: 47834 // result: (MOVLconst [0]) 47835 for { 47836 v_0 := v.Args[0] 47837 if v_0.Op != OpAMD64FlagGT_UGT { 47838 break 47839 } 47840 v.reset(OpAMD64MOVLconst) 47841 v.AuxInt = 0 47842 return true 47843 } 47844 return false 47845 } 47846 func rewriteValueAMD64_OpAMD64SBBQ_0(v *Value) bool { 47847 // match: (SBBQ x (MOVQconst [c]) borrow) 47848 // cond: is32Bit(c) 47849 // result: (SBBQconst x [c] borrow) 47850 for { 47851 _ = v.Args[2] 47852 x := v.Args[0] 47853 v_1 := v.Args[1] 47854 if v_1.Op != OpAMD64MOVQconst { 47855 break 47856 } 47857 c := v_1.AuxInt 47858 borrow := v.Args[2] 47859 if !(is32Bit(c)) { 47860 break 47861 } 47862 v.reset(OpAMD64SBBQconst) 47863 v.AuxInt = c 47864 v.AddArg(x) 47865 v.AddArg(borrow) 47866 return true 47867 } 47868 // match: (SBBQ x y (FlagEQ)) 47869 // cond: 47870 // result: (SUBQborrow x y) 47871 for { 47872 _ = v.Args[2] 47873 x := v.Args[0] 47874 y := v.Args[1] 47875 v_2 := v.Args[2] 47876 if v_2.Op != OpAMD64FlagEQ { 47877 break 47878 } 47879 v.reset(OpAMD64SUBQborrow) 47880 v.AddArg(x) 47881 v.AddArg(y) 47882 return true 47883 } 47884 return false 47885 } 47886 func rewriteValueAMD64_OpAMD64SBBQcarrymask_0(v *Value) bool { 47887 // match: (SBBQcarrymask (FlagEQ)) 47888 // cond: 47889 // result: (MOVQconst [0]) 47890 for { 47891 v_0 := v.Args[0] 47892 if v_0.Op != OpAMD64FlagEQ { 47893 break 47894 } 47895 v.reset(OpAMD64MOVQconst) 47896 v.AuxInt = 0 47897 return true 47898 } 47899 // match: (SBBQcarrymask (FlagLT_ULT)) 47900 // cond: 47901 // result: (MOVQconst [-1]) 47902 for { 47903 v_0 := v.Args[0] 47904 if v_0.Op != OpAMD64FlagLT_ULT { 47905 break 47906 } 47907 v.reset(OpAMD64MOVQconst) 47908 v.AuxInt = -1 47909 return true 47910 } 47911 // match: (SBBQcarrymask (FlagLT_UGT)) 47912 // cond: 47913 // result: (MOVQconst [0]) 47914 for { 47915 v_0 := v.Args[0] 47916 if v_0.Op != OpAMD64FlagLT_UGT { 47917 break 47918 } 47919 v.reset(OpAMD64MOVQconst) 47920 v.AuxInt = 0 47921 return true 47922 } 47923 // match: (SBBQcarrymask (FlagGT_ULT)) 47924 // cond: 47925 // result: (MOVQconst [-1]) 47926 for { 47927 v_0 := v.Args[0] 47928 if v_0.Op != OpAMD64FlagGT_ULT { 47929 break 47930 } 47931 v.reset(OpAMD64MOVQconst) 47932 v.AuxInt = -1 47933 return true 47934 } 47935 // match: (SBBQcarrymask (FlagGT_UGT)) 47936 // cond: 47937 // result: (MOVQconst [0]) 47938 for { 47939 v_0 := v.Args[0] 47940 if v_0.Op != OpAMD64FlagGT_UGT { 47941 break 47942 } 47943 v.reset(OpAMD64MOVQconst) 47944 v.AuxInt = 0 47945 return true 47946 } 47947 return false 47948 } 47949 func rewriteValueAMD64_OpAMD64SBBQconst_0(v *Value) bool { 47950 // match: (SBBQconst x [c] (FlagEQ)) 47951 // cond: 47952 // result: (SUBQconstborrow x [c]) 47953 for { 47954 c := v.AuxInt 47955 _ = v.Args[1] 47956 x := v.Args[0] 47957 v_1 := v.Args[1] 47958 if v_1.Op != OpAMD64FlagEQ { 47959 break 47960 } 47961 v.reset(OpAMD64SUBQconstborrow) 47962 v.AuxInt = c 47963 v.AddArg(x) 47964 return true 47965 } 47966 return false 47967 } 47968 func rewriteValueAMD64_OpAMD64SETA_0(v *Value) bool { 47969 // match: (SETA (InvertFlags x)) 47970 // cond: 47971 // result: (SETB x) 47972 for { 47973 v_0 := v.Args[0] 47974 if v_0.Op != OpAMD64InvertFlags { 47975 break 47976 } 47977 x := v_0.Args[0] 47978 v.reset(OpAMD64SETB) 47979 v.AddArg(x) 47980 return true 47981 } 47982 // match: (SETA (FlagEQ)) 47983 // cond: 47984 // result: (MOVLconst [0]) 47985 for { 47986 v_0 := v.Args[0] 47987 if v_0.Op != OpAMD64FlagEQ { 47988 break 47989 } 47990 v.reset(OpAMD64MOVLconst) 47991 v.AuxInt = 0 47992 return true 47993 } 47994 // match: (SETA (FlagLT_ULT)) 47995 // cond: 47996 // result: (MOVLconst [0]) 47997 for { 47998 v_0 := v.Args[0] 47999 if v_0.Op != OpAMD64FlagLT_ULT { 48000 break 48001 } 48002 v.reset(OpAMD64MOVLconst) 48003 v.AuxInt = 0 48004 return true 48005 } 48006 // match: (SETA (FlagLT_UGT)) 48007 // cond: 48008 // result: (MOVLconst [1]) 48009 for { 48010 v_0 := v.Args[0] 48011 if v_0.Op != OpAMD64FlagLT_UGT { 48012 break 48013 } 48014 v.reset(OpAMD64MOVLconst) 48015 v.AuxInt = 1 48016 return true 48017 } 48018 // match: (SETA (FlagGT_ULT)) 48019 // cond: 48020 // result: (MOVLconst [0]) 48021 for { 48022 v_0 := v.Args[0] 48023 if v_0.Op != OpAMD64FlagGT_ULT { 48024 break 48025 } 48026 v.reset(OpAMD64MOVLconst) 48027 v.AuxInt = 0 48028 return true 48029 } 48030 // match: (SETA (FlagGT_UGT)) 48031 // cond: 48032 // result: (MOVLconst [1]) 48033 for { 48034 v_0 := v.Args[0] 48035 if v_0.Op != OpAMD64FlagGT_UGT { 48036 break 48037 } 48038 v.reset(OpAMD64MOVLconst) 48039 v.AuxInt = 1 48040 return true 48041 } 48042 return false 48043 } 48044 func rewriteValueAMD64_OpAMD64SETAE_0(v *Value) bool { 48045 // match: (SETAE (InvertFlags x)) 48046 // cond: 48047 // result: (SETBE x) 48048 for { 48049 v_0 := v.Args[0] 48050 if v_0.Op != OpAMD64InvertFlags { 48051 break 48052 } 48053 x := v_0.Args[0] 48054 v.reset(OpAMD64SETBE) 48055 v.AddArg(x) 48056 return true 48057 } 48058 // match: (SETAE (FlagEQ)) 48059 // cond: 48060 // result: (MOVLconst [1]) 48061 for { 48062 v_0 := v.Args[0] 48063 if v_0.Op != OpAMD64FlagEQ { 48064 break 48065 } 48066 v.reset(OpAMD64MOVLconst) 48067 v.AuxInt = 1 48068 return true 48069 } 48070 // match: (SETAE (FlagLT_ULT)) 48071 // cond: 48072 // result: (MOVLconst [0]) 48073 for { 48074 v_0 := v.Args[0] 48075 if v_0.Op != OpAMD64FlagLT_ULT { 48076 break 48077 } 48078 v.reset(OpAMD64MOVLconst) 48079 v.AuxInt = 0 48080 return true 48081 } 48082 // match: (SETAE (FlagLT_UGT)) 48083 // cond: 48084 // result: (MOVLconst [1]) 48085 for { 48086 v_0 := v.Args[0] 48087 if v_0.Op != OpAMD64FlagLT_UGT { 48088 break 48089 } 48090 v.reset(OpAMD64MOVLconst) 48091 v.AuxInt = 1 48092 return true 48093 } 48094 // match: (SETAE (FlagGT_ULT)) 48095 // cond: 48096 // result: (MOVLconst [0]) 48097 for { 48098 v_0 := v.Args[0] 48099 if v_0.Op != OpAMD64FlagGT_ULT { 48100 break 48101 } 48102 v.reset(OpAMD64MOVLconst) 48103 v.AuxInt = 0 48104 return true 48105 } 48106 // match: (SETAE (FlagGT_UGT)) 48107 // cond: 48108 // result: (MOVLconst [1]) 48109 for { 48110 v_0 := v.Args[0] 48111 if v_0.Op != OpAMD64FlagGT_UGT { 48112 break 48113 } 48114 v.reset(OpAMD64MOVLconst) 48115 v.AuxInt = 1 48116 return true 48117 } 48118 return false 48119 } 48120 func rewriteValueAMD64_OpAMD64SETAEstore_0(v *Value) bool { 48121 b := v.Block 48122 _ = b 48123 // match: (SETAEstore [off] {sym} ptr (InvertFlags x) mem) 48124 // cond: 48125 // result: (SETBEstore [off] {sym} ptr x mem) 48126 for { 48127 off := v.AuxInt 48128 sym := v.Aux 48129 _ = v.Args[2] 48130 ptr := v.Args[0] 48131 v_1 := v.Args[1] 48132 if v_1.Op != OpAMD64InvertFlags { 48133 break 48134 } 48135 x := v_1.Args[0] 48136 mem := v.Args[2] 48137 v.reset(OpAMD64SETBEstore) 48138 v.AuxInt = off 48139 v.Aux = sym 48140 v.AddArg(ptr) 48141 v.AddArg(x) 48142 v.AddArg(mem) 48143 return true 48144 } 48145 // match: (SETAEstore [off1] {sym} (ADDQconst [off2] base) val mem) 48146 // cond: is32Bit(off1+off2) 48147 // result: (SETAEstore [off1+off2] {sym} base val mem) 48148 for { 48149 off1 := v.AuxInt 48150 sym := v.Aux 48151 _ = v.Args[2] 48152 v_0 := v.Args[0] 48153 if v_0.Op != OpAMD64ADDQconst { 48154 break 48155 } 48156 off2 := v_0.AuxInt 48157 base := v_0.Args[0] 48158 val := v.Args[1] 48159 mem := v.Args[2] 48160 if !(is32Bit(off1 + off2)) { 48161 break 48162 } 48163 v.reset(OpAMD64SETAEstore) 48164 v.AuxInt = off1 + off2 48165 v.Aux = sym 48166 v.AddArg(base) 48167 v.AddArg(val) 48168 v.AddArg(mem) 48169 return true 48170 } 48171 // match: (SETAEstore [off1] {sym1} (LEAQ [off2] {sym2} base) val mem) 48172 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 48173 // result: (SETAEstore [off1+off2] {mergeSym(sym1,sym2)} base val mem) 48174 for { 48175 off1 := v.AuxInt 48176 sym1 := v.Aux 48177 _ = v.Args[2] 48178 v_0 := v.Args[0] 48179 if v_0.Op != OpAMD64LEAQ { 48180 break 48181 } 48182 off2 := v_0.AuxInt 48183 sym2 := v_0.Aux 48184 base := v_0.Args[0] 48185 val := v.Args[1] 48186 mem := v.Args[2] 48187 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 48188 break 48189 } 48190 v.reset(OpAMD64SETAEstore) 48191 v.AuxInt = off1 + off2 48192 v.Aux = mergeSym(sym1, sym2) 48193 v.AddArg(base) 48194 v.AddArg(val) 48195 v.AddArg(mem) 48196 return true 48197 } 48198 // match: (SETAEstore [off] {sym} ptr x:(FlagEQ) mem) 48199 // cond: 48200 // result: (MOVBstore [off] {sym} ptr (MOVLconst <x.Type> [1]) mem) 48201 for { 48202 off := v.AuxInt 48203 sym := v.Aux 48204 _ = v.Args[2] 48205 ptr := v.Args[0] 48206 x := v.Args[1] 48207 if x.Op != OpAMD64FlagEQ { 48208 break 48209 } 48210 mem := v.Args[2] 48211 v.reset(OpAMD64MOVBstore) 48212 v.AuxInt = off 48213 v.Aux = sym 48214 v.AddArg(ptr) 48215 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, x.Type) 48216 v0.AuxInt = 1 48217 v.AddArg(v0) 48218 v.AddArg(mem) 48219 return true 48220 } 48221 // match: (SETAEstore [off] {sym} ptr x:(FlagLT_ULT) mem) 48222 // cond: 48223 // result: (MOVBstore [off] {sym} ptr (MOVLconst <x.Type> [0]) mem) 48224 for { 48225 off := v.AuxInt 48226 sym := v.Aux 48227 _ = v.Args[2] 48228 ptr := v.Args[0] 48229 x := v.Args[1] 48230 if x.Op != OpAMD64FlagLT_ULT { 48231 break 48232 } 48233 mem := v.Args[2] 48234 v.reset(OpAMD64MOVBstore) 48235 v.AuxInt = off 48236 v.Aux = sym 48237 v.AddArg(ptr) 48238 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, x.Type) 48239 v0.AuxInt = 0 48240 v.AddArg(v0) 48241 v.AddArg(mem) 48242 return true 48243 } 48244 // match: (SETAEstore [off] {sym} ptr x:(FlagLT_UGT) mem) 48245 // cond: 48246 // result: (MOVBstore [off] {sym} ptr (MOVLconst <x.Type> [1]) mem) 48247 for { 48248 off := v.AuxInt 48249 sym := v.Aux 48250 _ = v.Args[2] 48251 ptr := v.Args[0] 48252 x := v.Args[1] 48253 if x.Op != OpAMD64FlagLT_UGT { 48254 break 48255 } 48256 mem := v.Args[2] 48257 v.reset(OpAMD64MOVBstore) 48258 v.AuxInt = off 48259 v.Aux = sym 48260 v.AddArg(ptr) 48261 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, x.Type) 48262 v0.AuxInt = 1 48263 v.AddArg(v0) 48264 v.AddArg(mem) 48265 return true 48266 } 48267 // match: (SETAEstore [off] {sym} ptr x:(FlagGT_ULT) mem) 48268 // cond: 48269 // result: (MOVBstore [off] {sym} ptr (MOVLconst <x.Type> [0]) mem) 48270 for { 48271 off := v.AuxInt 48272 sym := v.Aux 48273 _ = v.Args[2] 48274 ptr := v.Args[0] 48275 x := v.Args[1] 48276 if x.Op != OpAMD64FlagGT_ULT { 48277 break 48278 } 48279 mem := v.Args[2] 48280 v.reset(OpAMD64MOVBstore) 48281 v.AuxInt = off 48282 v.Aux = sym 48283 v.AddArg(ptr) 48284 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, x.Type) 48285 v0.AuxInt = 0 48286 v.AddArg(v0) 48287 v.AddArg(mem) 48288 return true 48289 } 48290 // match: (SETAEstore [off] {sym} ptr x:(FlagGT_UGT) mem) 48291 // cond: 48292 // result: (MOVBstore [off] {sym} ptr (MOVLconst <x.Type> [1]) mem) 48293 for { 48294 off := v.AuxInt 48295 sym := v.Aux 48296 _ = v.Args[2] 48297 ptr := v.Args[0] 48298 x := v.Args[1] 48299 if x.Op != OpAMD64FlagGT_UGT { 48300 break 48301 } 48302 mem := v.Args[2] 48303 v.reset(OpAMD64MOVBstore) 48304 v.AuxInt = off 48305 v.Aux = sym 48306 v.AddArg(ptr) 48307 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, x.Type) 48308 v0.AuxInt = 1 48309 v.AddArg(v0) 48310 v.AddArg(mem) 48311 return true 48312 } 48313 return false 48314 } 48315 func rewriteValueAMD64_OpAMD64SETAstore_0(v *Value) bool { 48316 b := v.Block 48317 _ = b 48318 // match: (SETAstore [off] {sym} ptr (InvertFlags x) mem) 48319 // cond: 48320 // result: (SETBstore [off] {sym} ptr x mem) 48321 for { 48322 off := v.AuxInt 48323 sym := v.Aux 48324 _ = v.Args[2] 48325 ptr := v.Args[0] 48326 v_1 := v.Args[1] 48327 if v_1.Op != OpAMD64InvertFlags { 48328 break 48329 } 48330 x := v_1.Args[0] 48331 mem := v.Args[2] 48332 v.reset(OpAMD64SETBstore) 48333 v.AuxInt = off 48334 v.Aux = sym 48335 v.AddArg(ptr) 48336 v.AddArg(x) 48337 v.AddArg(mem) 48338 return true 48339 } 48340 // match: (SETAstore [off1] {sym} (ADDQconst [off2] base) val mem) 48341 // cond: is32Bit(off1+off2) 48342 // result: (SETAstore [off1+off2] {sym} base val mem) 48343 for { 48344 off1 := v.AuxInt 48345 sym := v.Aux 48346 _ = v.Args[2] 48347 v_0 := v.Args[0] 48348 if v_0.Op != OpAMD64ADDQconst { 48349 break 48350 } 48351 off2 := v_0.AuxInt 48352 base := v_0.Args[0] 48353 val := v.Args[1] 48354 mem := v.Args[2] 48355 if !(is32Bit(off1 + off2)) { 48356 break 48357 } 48358 v.reset(OpAMD64SETAstore) 48359 v.AuxInt = off1 + off2 48360 v.Aux = sym 48361 v.AddArg(base) 48362 v.AddArg(val) 48363 v.AddArg(mem) 48364 return true 48365 } 48366 // match: (SETAstore [off1] {sym1} (LEAQ [off2] {sym2} base) val mem) 48367 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 48368 // result: (SETAstore [off1+off2] {mergeSym(sym1,sym2)} base val mem) 48369 for { 48370 off1 := v.AuxInt 48371 sym1 := v.Aux 48372 _ = v.Args[2] 48373 v_0 := v.Args[0] 48374 if v_0.Op != OpAMD64LEAQ { 48375 break 48376 } 48377 off2 := v_0.AuxInt 48378 sym2 := v_0.Aux 48379 base := v_0.Args[0] 48380 val := v.Args[1] 48381 mem := v.Args[2] 48382 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 48383 break 48384 } 48385 v.reset(OpAMD64SETAstore) 48386 v.AuxInt = off1 + off2 48387 v.Aux = mergeSym(sym1, sym2) 48388 v.AddArg(base) 48389 v.AddArg(val) 48390 v.AddArg(mem) 48391 return true 48392 } 48393 // match: (SETAstore [off] {sym} ptr x:(FlagEQ) mem) 48394 // cond: 48395 // result: (MOVBstore [off] {sym} ptr (MOVLconst <x.Type> [0]) mem) 48396 for { 48397 off := v.AuxInt 48398 sym := v.Aux 48399 _ = v.Args[2] 48400 ptr := v.Args[0] 48401 x := v.Args[1] 48402 if x.Op != OpAMD64FlagEQ { 48403 break 48404 } 48405 mem := v.Args[2] 48406 v.reset(OpAMD64MOVBstore) 48407 v.AuxInt = off 48408 v.Aux = sym 48409 v.AddArg(ptr) 48410 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, x.Type) 48411 v0.AuxInt = 0 48412 v.AddArg(v0) 48413 v.AddArg(mem) 48414 return true 48415 } 48416 // match: (SETAstore [off] {sym} ptr x:(FlagLT_ULT) mem) 48417 // cond: 48418 // result: (MOVBstore [off] {sym} ptr (MOVLconst <x.Type> [0]) mem) 48419 for { 48420 off := v.AuxInt 48421 sym := v.Aux 48422 _ = v.Args[2] 48423 ptr := v.Args[0] 48424 x := v.Args[1] 48425 if x.Op != OpAMD64FlagLT_ULT { 48426 break 48427 } 48428 mem := v.Args[2] 48429 v.reset(OpAMD64MOVBstore) 48430 v.AuxInt = off 48431 v.Aux = sym 48432 v.AddArg(ptr) 48433 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, x.Type) 48434 v0.AuxInt = 0 48435 v.AddArg(v0) 48436 v.AddArg(mem) 48437 return true 48438 } 48439 // match: (SETAstore [off] {sym} ptr x:(FlagLT_UGT) mem) 48440 // cond: 48441 // result: (MOVBstore [off] {sym} ptr (MOVLconst <x.Type> [1]) mem) 48442 for { 48443 off := v.AuxInt 48444 sym := v.Aux 48445 _ = v.Args[2] 48446 ptr := v.Args[0] 48447 x := v.Args[1] 48448 if x.Op != OpAMD64FlagLT_UGT { 48449 break 48450 } 48451 mem := v.Args[2] 48452 v.reset(OpAMD64MOVBstore) 48453 v.AuxInt = off 48454 v.Aux = sym 48455 v.AddArg(ptr) 48456 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, x.Type) 48457 v0.AuxInt = 1 48458 v.AddArg(v0) 48459 v.AddArg(mem) 48460 return true 48461 } 48462 // match: (SETAstore [off] {sym} ptr x:(FlagGT_ULT) mem) 48463 // cond: 48464 // result: (MOVBstore [off] {sym} ptr (MOVLconst <x.Type> [0]) mem) 48465 for { 48466 off := v.AuxInt 48467 sym := v.Aux 48468 _ = v.Args[2] 48469 ptr := v.Args[0] 48470 x := v.Args[1] 48471 if x.Op != OpAMD64FlagGT_ULT { 48472 break 48473 } 48474 mem := v.Args[2] 48475 v.reset(OpAMD64MOVBstore) 48476 v.AuxInt = off 48477 v.Aux = sym 48478 v.AddArg(ptr) 48479 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, x.Type) 48480 v0.AuxInt = 0 48481 v.AddArg(v0) 48482 v.AddArg(mem) 48483 return true 48484 } 48485 // match: (SETAstore [off] {sym} ptr x:(FlagGT_UGT) mem) 48486 // cond: 48487 // result: (MOVBstore [off] {sym} ptr (MOVLconst <x.Type> [1]) mem) 48488 for { 48489 off := v.AuxInt 48490 sym := v.Aux 48491 _ = v.Args[2] 48492 ptr := v.Args[0] 48493 x := v.Args[1] 48494 if x.Op != OpAMD64FlagGT_UGT { 48495 break 48496 } 48497 mem := v.Args[2] 48498 v.reset(OpAMD64MOVBstore) 48499 v.AuxInt = off 48500 v.Aux = sym 48501 v.AddArg(ptr) 48502 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, x.Type) 48503 v0.AuxInt = 1 48504 v.AddArg(v0) 48505 v.AddArg(mem) 48506 return true 48507 } 48508 return false 48509 } 48510 func rewriteValueAMD64_OpAMD64SETB_0(v *Value) bool { 48511 // match: (SETB (InvertFlags x)) 48512 // cond: 48513 // result: (SETA x) 48514 for { 48515 v_0 := v.Args[0] 48516 if v_0.Op != OpAMD64InvertFlags { 48517 break 48518 } 48519 x := v_0.Args[0] 48520 v.reset(OpAMD64SETA) 48521 v.AddArg(x) 48522 return true 48523 } 48524 // match: (SETB (FlagEQ)) 48525 // cond: 48526 // result: (MOVLconst [0]) 48527 for { 48528 v_0 := v.Args[0] 48529 if v_0.Op != OpAMD64FlagEQ { 48530 break 48531 } 48532 v.reset(OpAMD64MOVLconst) 48533 v.AuxInt = 0 48534 return true 48535 } 48536 // match: (SETB (FlagLT_ULT)) 48537 // cond: 48538 // result: (MOVLconst [1]) 48539 for { 48540 v_0 := v.Args[0] 48541 if v_0.Op != OpAMD64FlagLT_ULT { 48542 break 48543 } 48544 v.reset(OpAMD64MOVLconst) 48545 v.AuxInt = 1 48546 return true 48547 } 48548 // match: (SETB (FlagLT_UGT)) 48549 // cond: 48550 // result: (MOVLconst [0]) 48551 for { 48552 v_0 := v.Args[0] 48553 if v_0.Op != OpAMD64FlagLT_UGT { 48554 break 48555 } 48556 v.reset(OpAMD64MOVLconst) 48557 v.AuxInt = 0 48558 return true 48559 } 48560 // match: (SETB (FlagGT_ULT)) 48561 // cond: 48562 // result: (MOVLconst [1]) 48563 for { 48564 v_0 := v.Args[0] 48565 if v_0.Op != OpAMD64FlagGT_ULT { 48566 break 48567 } 48568 v.reset(OpAMD64MOVLconst) 48569 v.AuxInt = 1 48570 return true 48571 } 48572 // match: (SETB (FlagGT_UGT)) 48573 // cond: 48574 // result: (MOVLconst [0]) 48575 for { 48576 v_0 := v.Args[0] 48577 if v_0.Op != OpAMD64FlagGT_UGT { 48578 break 48579 } 48580 v.reset(OpAMD64MOVLconst) 48581 v.AuxInt = 0 48582 return true 48583 } 48584 return false 48585 } 48586 func rewriteValueAMD64_OpAMD64SETBE_0(v *Value) bool { 48587 // match: (SETBE (InvertFlags x)) 48588 // cond: 48589 // result: (SETAE x) 48590 for { 48591 v_0 := v.Args[0] 48592 if v_0.Op != OpAMD64InvertFlags { 48593 break 48594 } 48595 x := v_0.Args[0] 48596 v.reset(OpAMD64SETAE) 48597 v.AddArg(x) 48598 return true 48599 } 48600 // match: (SETBE (FlagEQ)) 48601 // cond: 48602 // result: (MOVLconst [1]) 48603 for { 48604 v_0 := v.Args[0] 48605 if v_0.Op != OpAMD64FlagEQ { 48606 break 48607 } 48608 v.reset(OpAMD64MOVLconst) 48609 v.AuxInt = 1 48610 return true 48611 } 48612 // match: (SETBE (FlagLT_ULT)) 48613 // cond: 48614 // result: (MOVLconst [1]) 48615 for { 48616 v_0 := v.Args[0] 48617 if v_0.Op != OpAMD64FlagLT_ULT { 48618 break 48619 } 48620 v.reset(OpAMD64MOVLconst) 48621 v.AuxInt = 1 48622 return true 48623 } 48624 // match: (SETBE (FlagLT_UGT)) 48625 // cond: 48626 // result: (MOVLconst [0]) 48627 for { 48628 v_0 := v.Args[0] 48629 if v_0.Op != OpAMD64FlagLT_UGT { 48630 break 48631 } 48632 v.reset(OpAMD64MOVLconst) 48633 v.AuxInt = 0 48634 return true 48635 } 48636 // match: (SETBE (FlagGT_ULT)) 48637 // cond: 48638 // result: (MOVLconst [1]) 48639 for { 48640 v_0 := v.Args[0] 48641 if v_0.Op != OpAMD64FlagGT_ULT { 48642 break 48643 } 48644 v.reset(OpAMD64MOVLconst) 48645 v.AuxInt = 1 48646 return true 48647 } 48648 // match: (SETBE (FlagGT_UGT)) 48649 // cond: 48650 // result: (MOVLconst [0]) 48651 for { 48652 v_0 := v.Args[0] 48653 if v_0.Op != OpAMD64FlagGT_UGT { 48654 break 48655 } 48656 v.reset(OpAMD64MOVLconst) 48657 v.AuxInt = 0 48658 return true 48659 } 48660 return false 48661 } 48662 func rewriteValueAMD64_OpAMD64SETBEstore_0(v *Value) bool { 48663 b := v.Block 48664 _ = b 48665 // match: (SETBEstore [off] {sym} ptr (InvertFlags x) mem) 48666 // cond: 48667 // result: (SETAEstore [off] {sym} ptr x mem) 48668 for { 48669 off := v.AuxInt 48670 sym := v.Aux 48671 _ = v.Args[2] 48672 ptr := v.Args[0] 48673 v_1 := v.Args[1] 48674 if v_1.Op != OpAMD64InvertFlags { 48675 break 48676 } 48677 x := v_1.Args[0] 48678 mem := v.Args[2] 48679 v.reset(OpAMD64SETAEstore) 48680 v.AuxInt = off 48681 v.Aux = sym 48682 v.AddArg(ptr) 48683 v.AddArg(x) 48684 v.AddArg(mem) 48685 return true 48686 } 48687 // match: (SETBEstore [off1] {sym} (ADDQconst [off2] base) val mem) 48688 // cond: is32Bit(off1+off2) 48689 // result: (SETBEstore [off1+off2] {sym} base val mem) 48690 for { 48691 off1 := v.AuxInt 48692 sym := v.Aux 48693 _ = v.Args[2] 48694 v_0 := v.Args[0] 48695 if v_0.Op != OpAMD64ADDQconst { 48696 break 48697 } 48698 off2 := v_0.AuxInt 48699 base := v_0.Args[0] 48700 val := v.Args[1] 48701 mem := v.Args[2] 48702 if !(is32Bit(off1 + off2)) { 48703 break 48704 } 48705 v.reset(OpAMD64SETBEstore) 48706 v.AuxInt = off1 + off2 48707 v.Aux = sym 48708 v.AddArg(base) 48709 v.AddArg(val) 48710 v.AddArg(mem) 48711 return true 48712 } 48713 // match: (SETBEstore [off1] {sym1} (LEAQ [off2] {sym2} base) val mem) 48714 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 48715 // result: (SETBEstore [off1+off2] {mergeSym(sym1,sym2)} base val mem) 48716 for { 48717 off1 := v.AuxInt 48718 sym1 := v.Aux 48719 _ = v.Args[2] 48720 v_0 := v.Args[0] 48721 if v_0.Op != OpAMD64LEAQ { 48722 break 48723 } 48724 off2 := v_0.AuxInt 48725 sym2 := v_0.Aux 48726 base := v_0.Args[0] 48727 val := v.Args[1] 48728 mem := v.Args[2] 48729 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 48730 break 48731 } 48732 v.reset(OpAMD64SETBEstore) 48733 v.AuxInt = off1 + off2 48734 v.Aux = mergeSym(sym1, sym2) 48735 v.AddArg(base) 48736 v.AddArg(val) 48737 v.AddArg(mem) 48738 return true 48739 } 48740 // match: (SETBEstore [off] {sym} ptr x:(FlagEQ) mem) 48741 // cond: 48742 // result: (MOVBstore [off] {sym} ptr (MOVLconst <x.Type> [1]) mem) 48743 for { 48744 off := v.AuxInt 48745 sym := v.Aux 48746 _ = v.Args[2] 48747 ptr := v.Args[0] 48748 x := v.Args[1] 48749 if x.Op != OpAMD64FlagEQ { 48750 break 48751 } 48752 mem := v.Args[2] 48753 v.reset(OpAMD64MOVBstore) 48754 v.AuxInt = off 48755 v.Aux = sym 48756 v.AddArg(ptr) 48757 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, x.Type) 48758 v0.AuxInt = 1 48759 v.AddArg(v0) 48760 v.AddArg(mem) 48761 return true 48762 } 48763 // match: (SETBEstore [off] {sym} ptr x:(FlagLT_ULT) mem) 48764 // cond: 48765 // result: (MOVBstore [off] {sym} ptr (MOVLconst <x.Type> [1]) mem) 48766 for { 48767 off := v.AuxInt 48768 sym := v.Aux 48769 _ = v.Args[2] 48770 ptr := v.Args[0] 48771 x := v.Args[1] 48772 if x.Op != OpAMD64FlagLT_ULT { 48773 break 48774 } 48775 mem := v.Args[2] 48776 v.reset(OpAMD64MOVBstore) 48777 v.AuxInt = off 48778 v.Aux = sym 48779 v.AddArg(ptr) 48780 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, x.Type) 48781 v0.AuxInt = 1 48782 v.AddArg(v0) 48783 v.AddArg(mem) 48784 return true 48785 } 48786 // match: (SETBEstore [off] {sym} ptr x:(FlagLT_UGT) mem) 48787 // cond: 48788 // result: (MOVBstore [off] {sym} ptr (MOVLconst <x.Type> [0]) mem) 48789 for { 48790 off := v.AuxInt 48791 sym := v.Aux 48792 _ = v.Args[2] 48793 ptr := v.Args[0] 48794 x := v.Args[1] 48795 if x.Op != OpAMD64FlagLT_UGT { 48796 break 48797 } 48798 mem := v.Args[2] 48799 v.reset(OpAMD64MOVBstore) 48800 v.AuxInt = off 48801 v.Aux = sym 48802 v.AddArg(ptr) 48803 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, x.Type) 48804 v0.AuxInt = 0 48805 v.AddArg(v0) 48806 v.AddArg(mem) 48807 return true 48808 } 48809 // match: (SETBEstore [off] {sym} ptr x:(FlagGT_ULT) mem) 48810 // cond: 48811 // result: (MOVBstore [off] {sym} ptr (MOVLconst <x.Type> [1]) mem) 48812 for { 48813 off := v.AuxInt 48814 sym := v.Aux 48815 _ = v.Args[2] 48816 ptr := v.Args[0] 48817 x := v.Args[1] 48818 if x.Op != OpAMD64FlagGT_ULT { 48819 break 48820 } 48821 mem := v.Args[2] 48822 v.reset(OpAMD64MOVBstore) 48823 v.AuxInt = off 48824 v.Aux = sym 48825 v.AddArg(ptr) 48826 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, x.Type) 48827 v0.AuxInt = 1 48828 v.AddArg(v0) 48829 v.AddArg(mem) 48830 return true 48831 } 48832 // match: (SETBEstore [off] {sym} ptr x:(FlagGT_UGT) mem) 48833 // cond: 48834 // result: (MOVBstore [off] {sym} ptr (MOVLconst <x.Type> [0]) mem) 48835 for { 48836 off := v.AuxInt 48837 sym := v.Aux 48838 _ = v.Args[2] 48839 ptr := v.Args[0] 48840 x := v.Args[1] 48841 if x.Op != OpAMD64FlagGT_UGT { 48842 break 48843 } 48844 mem := v.Args[2] 48845 v.reset(OpAMD64MOVBstore) 48846 v.AuxInt = off 48847 v.Aux = sym 48848 v.AddArg(ptr) 48849 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, x.Type) 48850 v0.AuxInt = 0 48851 v.AddArg(v0) 48852 v.AddArg(mem) 48853 return true 48854 } 48855 return false 48856 } 48857 func rewriteValueAMD64_OpAMD64SETBstore_0(v *Value) bool { 48858 b := v.Block 48859 _ = b 48860 // match: (SETBstore [off] {sym} ptr (InvertFlags x) mem) 48861 // cond: 48862 // result: (SETAstore [off] {sym} ptr x mem) 48863 for { 48864 off := v.AuxInt 48865 sym := v.Aux 48866 _ = v.Args[2] 48867 ptr := v.Args[0] 48868 v_1 := v.Args[1] 48869 if v_1.Op != OpAMD64InvertFlags { 48870 break 48871 } 48872 x := v_1.Args[0] 48873 mem := v.Args[2] 48874 v.reset(OpAMD64SETAstore) 48875 v.AuxInt = off 48876 v.Aux = sym 48877 v.AddArg(ptr) 48878 v.AddArg(x) 48879 v.AddArg(mem) 48880 return true 48881 } 48882 // match: (SETBstore [off1] {sym} (ADDQconst [off2] base) val mem) 48883 // cond: is32Bit(off1+off2) 48884 // result: (SETBstore [off1+off2] {sym} base val mem) 48885 for { 48886 off1 := v.AuxInt 48887 sym := v.Aux 48888 _ = v.Args[2] 48889 v_0 := v.Args[0] 48890 if v_0.Op != OpAMD64ADDQconst { 48891 break 48892 } 48893 off2 := v_0.AuxInt 48894 base := v_0.Args[0] 48895 val := v.Args[1] 48896 mem := v.Args[2] 48897 if !(is32Bit(off1 + off2)) { 48898 break 48899 } 48900 v.reset(OpAMD64SETBstore) 48901 v.AuxInt = off1 + off2 48902 v.Aux = sym 48903 v.AddArg(base) 48904 v.AddArg(val) 48905 v.AddArg(mem) 48906 return true 48907 } 48908 // match: (SETBstore [off1] {sym1} (LEAQ [off2] {sym2} base) val mem) 48909 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 48910 // result: (SETBstore [off1+off2] {mergeSym(sym1,sym2)} base val mem) 48911 for { 48912 off1 := v.AuxInt 48913 sym1 := v.Aux 48914 _ = v.Args[2] 48915 v_0 := v.Args[0] 48916 if v_0.Op != OpAMD64LEAQ { 48917 break 48918 } 48919 off2 := v_0.AuxInt 48920 sym2 := v_0.Aux 48921 base := v_0.Args[0] 48922 val := v.Args[1] 48923 mem := v.Args[2] 48924 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 48925 break 48926 } 48927 v.reset(OpAMD64SETBstore) 48928 v.AuxInt = off1 + off2 48929 v.Aux = mergeSym(sym1, sym2) 48930 v.AddArg(base) 48931 v.AddArg(val) 48932 v.AddArg(mem) 48933 return true 48934 } 48935 // match: (SETBstore [off] {sym} ptr x:(FlagEQ) mem) 48936 // cond: 48937 // result: (MOVBstore [off] {sym} ptr (MOVLconst <x.Type> [0]) mem) 48938 for { 48939 off := v.AuxInt 48940 sym := v.Aux 48941 _ = v.Args[2] 48942 ptr := v.Args[0] 48943 x := v.Args[1] 48944 if x.Op != OpAMD64FlagEQ { 48945 break 48946 } 48947 mem := v.Args[2] 48948 v.reset(OpAMD64MOVBstore) 48949 v.AuxInt = off 48950 v.Aux = sym 48951 v.AddArg(ptr) 48952 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, x.Type) 48953 v0.AuxInt = 0 48954 v.AddArg(v0) 48955 v.AddArg(mem) 48956 return true 48957 } 48958 // match: (SETBstore [off] {sym} ptr x:(FlagLT_ULT) mem) 48959 // cond: 48960 // result: (MOVBstore [off] {sym} ptr (MOVLconst <x.Type> [1]) mem) 48961 for { 48962 off := v.AuxInt 48963 sym := v.Aux 48964 _ = v.Args[2] 48965 ptr := v.Args[0] 48966 x := v.Args[1] 48967 if x.Op != OpAMD64FlagLT_ULT { 48968 break 48969 } 48970 mem := v.Args[2] 48971 v.reset(OpAMD64MOVBstore) 48972 v.AuxInt = off 48973 v.Aux = sym 48974 v.AddArg(ptr) 48975 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, x.Type) 48976 v0.AuxInt = 1 48977 v.AddArg(v0) 48978 v.AddArg(mem) 48979 return true 48980 } 48981 // match: (SETBstore [off] {sym} ptr x:(FlagLT_UGT) mem) 48982 // cond: 48983 // result: (MOVBstore [off] {sym} ptr (MOVLconst <x.Type> [0]) mem) 48984 for { 48985 off := v.AuxInt 48986 sym := v.Aux 48987 _ = v.Args[2] 48988 ptr := v.Args[0] 48989 x := v.Args[1] 48990 if x.Op != OpAMD64FlagLT_UGT { 48991 break 48992 } 48993 mem := v.Args[2] 48994 v.reset(OpAMD64MOVBstore) 48995 v.AuxInt = off 48996 v.Aux = sym 48997 v.AddArg(ptr) 48998 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, x.Type) 48999 v0.AuxInt = 0 49000 v.AddArg(v0) 49001 v.AddArg(mem) 49002 return true 49003 } 49004 // match: (SETBstore [off] {sym} ptr x:(FlagGT_ULT) mem) 49005 // cond: 49006 // result: (MOVBstore [off] {sym} ptr (MOVLconst <x.Type> [1]) mem) 49007 for { 49008 off := v.AuxInt 49009 sym := v.Aux 49010 _ = v.Args[2] 49011 ptr := v.Args[0] 49012 x := v.Args[1] 49013 if x.Op != OpAMD64FlagGT_ULT { 49014 break 49015 } 49016 mem := v.Args[2] 49017 v.reset(OpAMD64MOVBstore) 49018 v.AuxInt = off 49019 v.Aux = sym 49020 v.AddArg(ptr) 49021 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, x.Type) 49022 v0.AuxInt = 1 49023 v.AddArg(v0) 49024 v.AddArg(mem) 49025 return true 49026 } 49027 // match: (SETBstore [off] {sym} ptr x:(FlagGT_UGT) mem) 49028 // cond: 49029 // result: (MOVBstore [off] {sym} ptr (MOVLconst <x.Type> [0]) mem) 49030 for { 49031 off := v.AuxInt 49032 sym := v.Aux 49033 _ = v.Args[2] 49034 ptr := v.Args[0] 49035 x := v.Args[1] 49036 if x.Op != OpAMD64FlagGT_UGT { 49037 break 49038 } 49039 mem := v.Args[2] 49040 v.reset(OpAMD64MOVBstore) 49041 v.AuxInt = off 49042 v.Aux = sym 49043 v.AddArg(ptr) 49044 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, x.Type) 49045 v0.AuxInt = 0 49046 v.AddArg(v0) 49047 v.AddArg(mem) 49048 return true 49049 } 49050 return false 49051 } 49052 func rewriteValueAMD64_OpAMD64SETEQ_0(v *Value) bool { 49053 b := v.Block 49054 _ = b 49055 config := b.Func.Config 49056 _ = config 49057 // match: (SETEQ (TESTL (SHLL (MOVLconst [1]) x) y)) 49058 // cond: !config.nacl 49059 // result: (SETAE (BTL x y)) 49060 for { 49061 v_0 := v.Args[0] 49062 if v_0.Op != OpAMD64TESTL { 49063 break 49064 } 49065 _ = v_0.Args[1] 49066 v_0_0 := v_0.Args[0] 49067 if v_0_0.Op != OpAMD64SHLL { 49068 break 49069 } 49070 _ = v_0_0.Args[1] 49071 v_0_0_0 := v_0_0.Args[0] 49072 if v_0_0_0.Op != OpAMD64MOVLconst { 49073 break 49074 } 49075 if v_0_0_0.AuxInt != 1 { 49076 break 49077 } 49078 x := v_0_0.Args[1] 49079 y := v_0.Args[1] 49080 if !(!config.nacl) { 49081 break 49082 } 49083 v.reset(OpAMD64SETAE) 49084 v0 := b.NewValue0(v.Pos, OpAMD64BTL, types.TypeFlags) 49085 v0.AddArg(x) 49086 v0.AddArg(y) 49087 v.AddArg(v0) 49088 return true 49089 } 49090 // match: (SETEQ (TESTL y (SHLL (MOVLconst [1]) x))) 49091 // cond: !config.nacl 49092 // result: (SETAE (BTL x y)) 49093 for { 49094 v_0 := v.Args[0] 49095 if v_0.Op != OpAMD64TESTL { 49096 break 49097 } 49098 _ = v_0.Args[1] 49099 y := v_0.Args[0] 49100 v_0_1 := v_0.Args[1] 49101 if v_0_1.Op != OpAMD64SHLL { 49102 break 49103 } 49104 _ = v_0_1.Args[1] 49105 v_0_1_0 := v_0_1.Args[0] 49106 if v_0_1_0.Op != OpAMD64MOVLconst { 49107 break 49108 } 49109 if v_0_1_0.AuxInt != 1 { 49110 break 49111 } 49112 x := v_0_1.Args[1] 49113 if !(!config.nacl) { 49114 break 49115 } 49116 v.reset(OpAMD64SETAE) 49117 v0 := b.NewValue0(v.Pos, OpAMD64BTL, types.TypeFlags) 49118 v0.AddArg(x) 49119 v0.AddArg(y) 49120 v.AddArg(v0) 49121 return true 49122 } 49123 // match: (SETEQ (TESTQ (SHLQ (MOVQconst [1]) x) y)) 49124 // cond: !config.nacl 49125 // result: (SETAE (BTQ x y)) 49126 for { 49127 v_0 := v.Args[0] 49128 if v_0.Op != OpAMD64TESTQ { 49129 break 49130 } 49131 _ = v_0.Args[1] 49132 v_0_0 := v_0.Args[0] 49133 if v_0_0.Op != OpAMD64SHLQ { 49134 break 49135 } 49136 _ = v_0_0.Args[1] 49137 v_0_0_0 := v_0_0.Args[0] 49138 if v_0_0_0.Op != OpAMD64MOVQconst { 49139 break 49140 } 49141 if v_0_0_0.AuxInt != 1 { 49142 break 49143 } 49144 x := v_0_0.Args[1] 49145 y := v_0.Args[1] 49146 if !(!config.nacl) { 49147 break 49148 } 49149 v.reset(OpAMD64SETAE) 49150 v0 := b.NewValue0(v.Pos, OpAMD64BTQ, types.TypeFlags) 49151 v0.AddArg(x) 49152 v0.AddArg(y) 49153 v.AddArg(v0) 49154 return true 49155 } 49156 // match: (SETEQ (TESTQ y (SHLQ (MOVQconst [1]) x))) 49157 // cond: !config.nacl 49158 // result: (SETAE (BTQ x y)) 49159 for { 49160 v_0 := v.Args[0] 49161 if v_0.Op != OpAMD64TESTQ { 49162 break 49163 } 49164 _ = v_0.Args[1] 49165 y := v_0.Args[0] 49166 v_0_1 := v_0.Args[1] 49167 if v_0_1.Op != OpAMD64SHLQ { 49168 break 49169 } 49170 _ = v_0_1.Args[1] 49171 v_0_1_0 := v_0_1.Args[0] 49172 if v_0_1_0.Op != OpAMD64MOVQconst { 49173 break 49174 } 49175 if v_0_1_0.AuxInt != 1 { 49176 break 49177 } 49178 x := v_0_1.Args[1] 49179 if !(!config.nacl) { 49180 break 49181 } 49182 v.reset(OpAMD64SETAE) 49183 v0 := b.NewValue0(v.Pos, OpAMD64BTQ, types.TypeFlags) 49184 v0.AddArg(x) 49185 v0.AddArg(y) 49186 v.AddArg(v0) 49187 return true 49188 } 49189 // match: (SETEQ (TESTLconst [c] x)) 49190 // cond: isUint32PowerOfTwo(c) && !config.nacl 49191 // result: (SETAE (BTLconst [log2uint32(c)] x)) 49192 for { 49193 v_0 := v.Args[0] 49194 if v_0.Op != OpAMD64TESTLconst { 49195 break 49196 } 49197 c := v_0.AuxInt 49198 x := v_0.Args[0] 49199 if !(isUint32PowerOfTwo(c) && !config.nacl) { 49200 break 49201 } 49202 v.reset(OpAMD64SETAE) 49203 v0 := b.NewValue0(v.Pos, OpAMD64BTLconst, types.TypeFlags) 49204 v0.AuxInt = log2uint32(c) 49205 v0.AddArg(x) 49206 v.AddArg(v0) 49207 return true 49208 } 49209 // match: (SETEQ (TESTQconst [c] x)) 49210 // cond: isUint64PowerOfTwo(c) && !config.nacl 49211 // result: (SETAE (BTQconst [log2(c)] x)) 49212 for { 49213 v_0 := v.Args[0] 49214 if v_0.Op != OpAMD64TESTQconst { 49215 break 49216 } 49217 c := v_0.AuxInt 49218 x := v_0.Args[0] 49219 if !(isUint64PowerOfTwo(c) && !config.nacl) { 49220 break 49221 } 49222 v.reset(OpAMD64SETAE) 49223 v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags) 49224 v0.AuxInt = log2(c) 49225 v0.AddArg(x) 49226 v.AddArg(v0) 49227 return true 49228 } 49229 // match: (SETEQ (TESTQ (MOVQconst [c]) x)) 49230 // cond: isUint64PowerOfTwo(c) && !config.nacl 49231 // result: (SETAE (BTQconst [log2(c)] x)) 49232 for { 49233 v_0 := v.Args[0] 49234 if v_0.Op != OpAMD64TESTQ { 49235 break 49236 } 49237 _ = v_0.Args[1] 49238 v_0_0 := v_0.Args[0] 49239 if v_0_0.Op != OpAMD64MOVQconst { 49240 break 49241 } 49242 c := v_0_0.AuxInt 49243 x := v_0.Args[1] 49244 if !(isUint64PowerOfTwo(c) && !config.nacl) { 49245 break 49246 } 49247 v.reset(OpAMD64SETAE) 49248 v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags) 49249 v0.AuxInt = log2(c) 49250 v0.AddArg(x) 49251 v.AddArg(v0) 49252 return true 49253 } 49254 // match: (SETEQ (TESTQ x (MOVQconst [c]))) 49255 // cond: isUint64PowerOfTwo(c) && !config.nacl 49256 // result: (SETAE (BTQconst [log2(c)] x)) 49257 for { 49258 v_0 := v.Args[0] 49259 if v_0.Op != OpAMD64TESTQ { 49260 break 49261 } 49262 _ = v_0.Args[1] 49263 x := v_0.Args[0] 49264 v_0_1 := v_0.Args[1] 49265 if v_0_1.Op != OpAMD64MOVQconst { 49266 break 49267 } 49268 c := v_0_1.AuxInt 49269 if !(isUint64PowerOfTwo(c) && !config.nacl) { 49270 break 49271 } 49272 v.reset(OpAMD64SETAE) 49273 v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags) 49274 v0.AuxInt = log2(c) 49275 v0.AddArg(x) 49276 v.AddArg(v0) 49277 return true 49278 } 49279 // match: (SETEQ (CMPLconst [1] s:(ANDLconst [1] _))) 49280 // cond: 49281 // result: (SETNE (CMPLconst [0] s)) 49282 for { 49283 v_0 := v.Args[0] 49284 if v_0.Op != OpAMD64CMPLconst { 49285 break 49286 } 49287 if v_0.AuxInt != 1 { 49288 break 49289 } 49290 s := v_0.Args[0] 49291 if s.Op != OpAMD64ANDLconst { 49292 break 49293 } 49294 if s.AuxInt != 1 { 49295 break 49296 } 49297 v.reset(OpAMD64SETNE) 49298 v0 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags) 49299 v0.AuxInt = 0 49300 v0.AddArg(s) 49301 v.AddArg(v0) 49302 return true 49303 } 49304 // match: (SETEQ (CMPQconst [1] s:(ANDQconst [1] _))) 49305 // cond: 49306 // result: (SETNE (CMPQconst [0] s)) 49307 for { 49308 v_0 := v.Args[0] 49309 if v_0.Op != OpAMD64CMPQconst { 49310 break 49311 } 49312 if v_0.AuxInt != 1 { 49313 break 49314 } 49315 s := v_0.Args[0] 49316 if s.Op != OpAMD64ANDQconst { 49317 break 49318 } 49319 if s.AuxInt != 1 { 49320 break 49321 } 49322 v.reset(OpAMD64SETNE) 49323 v0 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags) 49324 v0.AuxInt = 0 49325 v0.AddArg(s) 49326 v.AddArg(v0) 49327 return true 49328 } 49329 return false 49330 } 49331 func rewriteValueAMD64_OpAMD64SETEQ_10(v *Value) bool { 49332 b := v.Block 49333 _ = b 49334 config := b.Func.Config 49335 _ = config 49336 // match: (SETEQ (TESTQ z1:(SHLQconst [63] (SHRQconst [63] x)) z2)) 49337 // cond: z1==z2 && !config.nacl 49338 // result: (SETAE (BTQconst [63] x)) 49339 for { 49340 v_0 := v.Args[0] 49341 if v_0.Op != OpAMD64TESTQ { 49342 break 49343 } 49344 _ = v_0.Args[1] 49345 z1 := v_0.Args[0] 49346 if z1.Op != OpAMD64SHLQconst { 49347 break 49348 } 49349 if z1.AuxInt != 63 { 49350 break 49351 } 49352 z1_0 := z1.Args[0] 49353 if z1_0.Op != OpAMD64SHRQconst { 49354 break 49355 } 49356 if z1_0.AuxInt != 63 { 49357 break 49358 } 49359 x := z1_0.Args[0] 49360 z2 := v_0.Args[1] 49361 if !(z1 == z2 && !config.nacl) { 49362 break 49363 } 49364 v.reset(OpAMD64SETAE) 49365 v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags) 49366 v0.AuxInt = 63 49367 v0.AddArg(x) 49368 v.AddArg(v0) 49369 return true 49370 } 49371 // match: (SETEQ (TESTQ z2 z1:(SHLQconst [63] (SHRQconst [63] x)))) 49372 // cond: z1==z2 && !config.nacl 49373 // result: (SETAE (BTQconst [63] x)) 49374 for { 49375 v_0 := v.Args[0] 49376 if v_0.Op != OpAMD64TESTQ { 49377 break 49378 } 49379 _ = v_0.Args[1] 49380 z2 := v_0.Args[0] 49381 z1 := v_0.Args[1] 49382 if z1.Op != OpAMD64SHLQconst { 49383 break 49384 } 49385 if z1.AuxInt != 63 { 49386 break 49387 } 49388 z1_0 := z1.Args[0] 49389 if z1_0.Op != OpAMD64SHRQconst { 49390 break 49391 } 49392 if z1_0.AuxInt != 63 { 49393 break 49394 } 49395 x := z1_0.Args[0] 49396 if !(z1 == z2 && !config.nacl) { 49397 break 49398 } 49399 v.reset(OpAMD64SETAE) 49400 v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags) 49401 v0.AuxInt = 63 49402 v0.AddArg(x) 49403 v.AddArg(v0) 49404 return true 49405 } 49406 // match: (SETEQ (TESTL z1:(SHLLconst [31] (SHRQconst [31] x)) z2)) 49407 // cond: z1==z2 && !config.nacl 49408 // result: (SETAE (BTQconst [31] x)) 49409 for { 49410 v_0 := v.Args[0] 49411 if v_0.Op != OpAMD64TESTL { 49412 break 49413 } 49414 _ = v_0.Args[1] 49415 z1 := v_0.Args[0] 49416 if z1.Op != OpAMD64SHLLconst { 49417 break 49418 } 49419 if z1.AuxInt != 31 { 49420 break 49421 } 49422 z1_0 := z1.Args[0] 49423 if z1_0.Op != OpAMD64SHRQconst { 49424 break 49425 } 49426 if z1_0.AuxInt != 31 { 49427 break 49428 } 49429 x := z1_0.Args[0] 49430 z2 := v_0.Args[1] 49431 if !(z1 == z2 && !config.nacl) { 49432 break 49433 } 49434 v.reset(OpAMD64SETAE) 49435 v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags) 49436 v0.AuxInt = 31 49437 v0.AddArg(x) 49438 v.AddArg(v0) 49439 return true 49440 } 49441 // match: (SETEQ (TESTL z2 z1:(SHLLconst [31] (SHRQconst [31] x)))) 49442 // cond: z1==z2 && !config.nacl 49443 // result: (SETAE (BTQconst [31] x)) 49444 for { 49445 v_0 := v.Args[0] 49446 if v_0.Op != OpAMD64TESTL { 49447 break 49448 } 49449 _ = v_0.Args[1] 49450 z2 := v_0.Args[0] 49451 z1 := v_0.Args[1] 49452 if z1.Op != OpAMD64SHLLconst { 49453 break 49454 } 49455 if z1.AuxInt != 31 { 49456 break 49457 } 49458 z1_0 := z1.Args[0] 49459 if z1_0.Op != OpAMD64SHRQconst { 49460 break 49461 } 49462 if z1_0.AuxInt != 31 { 49463 break 49464 } 49465 x := z1_0.Args[0] 49466 if !(z1 == z2 && !config.nacl) { 49467 break 49468 } 49469 v.reset(OpAMD64SETAE) 49470 v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags) 49471 v0.AuxInt = 31 49472 v0.AddArg(x) 49473 v.AddArg(v0) 49474 return true 49475 } 49476 // match: (SETEQ (TESTQ z1:(SHRQconst [63] (SHLQconst [63] x)) z2)) 49477 // cond: z1==z2 && !config.nacl 49478 // result: (SETAE (BTQconst [0] x)) 49479 for { 49480 v_0 := v.Args[0] 49481 if v_0.Op != OpAMD64TESTQ { 49482 break 49483 } 49484 _ = v_0.Args[1] 49485 z1 := v_0.Args[0] 49486 if z1.Op != OpAMD64SHRQconst { 49487 break 49488 } 49489 if z1.AuxInt != 63 { 49490 break 49491 } 49492 z1_0 := z1.Args[0] 49493 if z1_0.Op != OpAMD64SHLQconst { 49494 break 49495 } 49496 if z1_0.AuxInt != 63 { 49497 break 49498 } 49499 x := z1_0.Args[0] 49500 z2 := v_0.Args[1] 49501 if !(z1 == z2 && !config.nacl) { 49502 break 49503 } 49504 v.reset(OpAMD64SETAE) 49505 v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags) 49506 v0.AuxInt = 0 49507 v0.AddArg(x) 49508 v.AddArg(v0) 49509 return true 49510 } 49511 // match: (SETEQ (TESTQ z2 z1:(SHRQconst [63] (SHLQconst [63] x)))) 49512 // cond: z1==z2 && !config.nacl 49513 // result: (SETAE (BTQconst [0] x)) 49514 for { 49515 v_0 := v.Args[0] 49516 if v_0.Op != OpAMD64TESTQ { 49517 break 49518 } 49519 _ = v_0.Args[1] 49520 z2 := v_0.Args[0] 49521 z1 := v_0.Args[1] 49522 if z1.Op != OpAMD64SHRQconst { 49523 break 49524 } 49525 if z1.AuxInt != 63 { 49526 break 49527 } 49528 z1_0 := z1.Args[0] 49529 if z1_0.Op != OpAMD64SHLQconst { 49530 break 49531 } 49532 if z1_0.AuxInt != 63 { 49533 break 49534 } 49535 x := z1_0.Args[0] 49536 if !(z1 == z2 && !config.nacl) { 49537 break 49538 } 49539 v.reset(OpAMD64SETAE) 49540 v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags) 49541 v0.AuxInt = 0 49542 v0.AddArg(x) 49543 v.AddArg(v0) 49544 return true 49545 } 49546 // match: (SETEQ (TESTL z1:(SHRLconst [31] (SHLLconst [31] x)) z2)) 49547 // cond: z1==z2 && !config.nacl 49548 // result: (SETAE (BTLconst [0] x)) 49549 for { 49550 v_0 := v.Args[0] 49551 if v_0.Op != OpAMD64TESTL { 49552 break 49553 } 49554 _ = v_0.Args[1] 49555 z1 := v_0.Args[0] 49556 if z1.Op != OpAMD64SHRLconst { 49557 break 49558 } 49559 if z1.AuxInt != 31 { 49560 break 49561 } 49562 z1_0 := z1.Args[0] 49563 if z1_0.Op != OpAMD64SHLLconst { 49564 break 49565 } 49566 if z1_0.AuxInt != 31 { 49567 break 49568 } 49569 x := z1_0.Args[0] 49570 z2 := v_0.Args[1] 49571 if !(z1 == z2 && !config.nacl) { 49572 break 49573 } 49574 v.reset(OpAMD64SETAE) 49575 v0 := b.NewValue0(v.Pos, OpAMD64BTLconst, types.TypeFlags) 49576 v0.AuxInt = 0 49577 v0.AddArg(x) 49578 v.AddArg(v0) 49579 return true 49580 } 49581 // match: (SETEQ (TESTL z2 z1:(SHRLconst [31] (SHLLconst [31] x)))) 49582 // cond: z1==z2 && !config.nacl 49583 // result: (SETAE (BTLconst [0] x)) 49584 for { 49585 v_0 := v.Args[0] 49586 if v_0.Op != OpAMD64TESTL { 49587 break 49588 } 49589 _ = v_0.Args[1] 49590 z2 := v_0.Args[0] 49591 z1 := v_0.Args[1] 49592 if z1.Op != OpAMD64SHRLconst { 49593 break 49594 } 49595 if z1.AuxInt != 31 { 49596 break 49597 } 49598 z1_0 := z1.Args[0] 49599 if z1_0.Op != OpAMD64SHLLconst { 49600 break 49601 } 49602 if z1_0.AuxInt != 31 { 49603 break 49604 } 49605 x := z1_0.Args[0] 49606 if !(z1 == z2 && !config.nacl) { 49607 break 49608 } 49609 v.reset(OpAMD64SETAE) 49610 v0 := b.NewValue0(v.Pos, OpAMD64BTLconst, types.TypeFlags) 49611 v0.AuxInt = 0 49612 v0.AddArg(x) 49613 v.AddArg(v0) 49614 return true 49615 } 49616 // match: (SETEQ (TESTQ z1:(SHRQconst [63] x) z2)) 49617 // cond: z1==z2 && !config.nacl 49618 // result: (SETAE (BTQconst [63] x)) 49619 for { 49620 v_0 := v.Args[0] 49621 if v_0.Op != OpAMD64TESTQ { 49622 break 49623 } 49624 _ = v_0.Args[1] 49625 z1 := v_0.Args[0] 49626 if z1.Op != OpAMD64SHRQconst { 49627 break 49628 } 49629 if z1.AuxInt != 63 { 49630 break 49631 } 49632 x := z1.Args[0] 49633 z2 := v_0.Args[1] 49634 if !(z1 == z2 && !config.nacl) { 49635 break 49636 } 49637 v.reset(OpAMD64SETAE) 49638 v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags) 49639 v0.AuxInt = 63 49640 v0.AddArg(x) 49641 v.AddArg(v0) 49642 return true 49643 } 49644 // match: (SETEQ (TESTQ z2 z1:(SHRQconst [63] x))) 49645 // cond: z1==z2 && !config.nacl 49646 // result: (SETAE (BTQconst [63] x)) 49647 for { 49648 v_0 := v.Args[0] 49649 if v_0.Op != OpAMD64TESTQ { 49650 break 49651 } 49652 _ = v_0.Args[1] 49653 z2 := v_0.Args[0] 49654 z1 := v_0.Args[1] 49655 if z1.Op != OpAMD64SHRQconst { 49656 break 49657 } 49658 if z1.AuxInt != 63 { 49659 break 49660 } 49661 x := z1.Args[0] 49662 if !(z1 == z2 && !config.nacl) { 49663 break 49664 } 49665 v.reset(OpAMD64SETAE) 49666 v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags) 49667 v0.AuxInt = 63 49668 v0.AddArg(x) 49669 v.AddArg(v0) 49670 return true 49671 } 49672 return false 49673 } 49674 func rewriteValueAMD64_OpAMD64SETEQ_20(v *Value) bool { 49675 b := v.Block 49676 _ = b 49677 config := b.Func.Config 49678 _ = config 49679 // match: (SETEQ (TESTL z1:(SHRLconst [31] x) z2)) 49680 // cond: z1==z2 && !config.nacl 49681 // result: (SETAE (BTLconst [31] x)) 49682 for { 49683 v_0 := v.Args[0] 49684 if v_0.Op != OpAMD64TESTL { 49685 break 49686 } 49687 _ = v_0.Args[1] 49688 z1 := v_0.Args[0] 49689 if z1.Op != OpAMD64SHRLconst { 49690 break 49691 } 49692 if z1.AuxInt != 31 { 49693 break 49694 } 49695 x := z1.Args[0] 49696 z2 := v_0.Args[1] 49697 if !(z1 == z2 && !config.nacl) { 49698 break 49699 } 49700 v.reset(OpAMD64SETAE) 49701 v0 := b.NewValue0(v.Pos, OpAMD64BTLconst, types.TypeFlags) 49702 v0.AuxInt = 31 49703 v0.AddArg(x) 49704 v.AddArg(v0) 49705 return true 49706 } 49707 // match: (SETEQ (TESTL z2 z1:(SHRLconst [31] x))) 49708 // cond: z1==z2 && !config.nacl 49709 // result: (SETAE (BTLconst [31] x)) 49710 for { 49711 v_0 := v.Args[0] 49712 if v_0.Op != OpAMD64TESTL { 49713 break 49714 } 49715 _ = v_0.Args[1] 49716 z2 := v_0.Args[0] 49717 z1 := v_0.Args[1] 49718 if z1.Op != OpAMD64SHRLconst { 49719 break 49720 } 49721 if z1.AuxInt != 31 { 49722 break 49723 } 49724 x := z1.Args[0] 49725 if !(z1 == z2 && !config.nacl) { 49726 break 49727 } 49728 v.reset(OpAMD64SETAE) 49729 v0 := b.NewValue0(v.Pos, OpAMD64BTLconst, types.TypeFlags) 49730 v0.AuxInt = 31 49731 v0.AddArg(x) 49732 v.AddArg(v0) 49733 return true 49734 } 49735 // match: (SETEQ (InvertFlags x)) 49736 // cond: 49737 // result: (SETEQ x) 49738 for { 49739 v_0 := v.Args[0] 49740 if v_0.Op != OpAMD64InvertFlags { 49741 break 49742 } 49743 x := v_0.Args[0] 49744 v.reset(OpAMD64SETEQ) 49745 v.AddArg(x) 49746 return true 49747 } 49748 // match: (SETEQ (FlagEQ)) 49749 // cond: 49750 // result: (MOVLconst [1]) 49751 for { 49752 v_0 := v.Args[0] 49753 if v_0.Op != OpAMD64FlagEQ { 49754 break 49755 } 49756 v.reset(OpAMD64MOVLconst) 49757 v.AuxInt = 1 49758 return true 49759 } 49760 // match: (SETEQ (FlagLT_ULT)) 49761 // cond: 49762 // result: (MOVLconst [0]) 49763 for { 49764 v_0 := v.Args[0] 49765 if v_0.Op != OpAMD64FlagLT_ULT { 49766 break 49767 } 49768 v.reset(OpAMD64MOVLconst) 49769 v.AuxInt = 0 49770 return true 49771 } 49772 // match: (SETEQ (FlagLT_UGT)) 49773 // cond: 49774 // result: (MOVLconst [0]) 49775 for { 49776 v_0 := v.Args[0] 49777 if v_0.Op != OpAMD64FlagLT_UGT { 49778 break 49779 } 49780 v.reset(OpAMD64MOVLconst) 49781 v.AuxInt = 0 49782 return true 49783 } 49784 // match: (SETEQ (FlagGT_ULT)) 49785 // cond: 49786 // result: (MOVLconst [0]) 49787 for { 49788 v_0 := v.Args[0] 49789 if v_0.Op != OpAMD64FlagGT_ULT { 49790 break 49791 } 49792 v.reset(OpAMD64MOVLconst) 49793 v.AuxInt = 0 49794 return true 49795 } 49796 // match: (SETEQ (FlagGT_UGT)) 49797 // cond: 49798 // result: (MOVLconst [0]) 49799 for { 49800 v_0 := v.Args[0] 49801 if v_0.Op != OpAMD64FlagGT_UGT { 49802 break 49803 } 49804 v.reset(OpAMD64MOVLconst) 49805 v.AuxInt = 0 49806 return true 49807 } 49808 return false 49809 } 49810 func rewriteValueAMD64_OpAMD64SETEQstore_0(v *Value) bool { 49811 b := v.Block 49812 _ = b 49813 config := b.Func.Config 49814 _ = config 49815 // match: (SETEQstore [off] {sym} ptr (TESTL (SHLL (MOVLconst [1]) x) y) mem) 49816 // cond: !config.nacl 49817 // result: (SETAEstore [off] {sym} ptr (BTL x y) mem) 49818 for { 49819 off := v.AuxInt 49820 sym := v.Aux 49821 _ = v.Args[2] 49822 ptr := v.Args[0] 49823 v_1 := v.Args[1] 49824 if v_1.Op != OpAMD64TESTL { 49825 break 49826 } 49827 _ = v_1.Args[1] 49828 v_1_0 := v_1.Args[0] 49829 if v_1_0.Op != OpAMD64SHLL { 49830 break 49831 } 49832 _ = v_1_0.Args[1] 49833 v_1_0_0 := v_1_0.Args[0] 49834 if v_1_0_0.Op != OpAMD64MOVLconst { 49835 break 49836 } 49837 if v_1_0_0.AuxInt != 1 { 49838 break 49839 } 49840 x := v_1_0.Args[1] 49841 y := v_1.Args[1] 49842 mem := v.Args[2] 49843 if !(!config.nacl) { 49844 break 49845 } 49846 v.reset(OpAMD64SETAEstore) 49847 v.AuxInt = off 49848 v.Aux = sym 49849 v.AddArg(ptr) 49850 v0 := b.NewValue0(v.Pos, OpAMD64BTL, types.TypeFlags) 49851 v0.AddArg(x) 49852 v0.AddArg(y) 49853 v.AddArg(v0) 49854 v.AddArg(mem) 49855 return true 49856 } 49857 // match: (SETEQstore [off] {sym} ptr (TESTL y (SHLL (MOVLconst [1]) x)) mem) 49858 // cond: !config.nacl 49859 // result: (SETAEstore [off] {sym} ptr (BTL x y) mem) 49860 for { 49861 off := v.AuxInt 49862 sym := v.Aux 49863 _ = v.Args[2] 49864 ptr := v.Args[0] 49865 v_1 := v.Args[1] 49866 if v_1.Op != OpAMD64TESTL { 49867 break 49868 } 49869 _ = v_1.Args[1] 49870 y := v_1.Args[0] 49871 v_1_1 := v_1.Args[1] 49872 if v_1_1.Op != OpAMD64SHLL { 49873 break 49874 } 49875 _ = v_1_1.Args[1] 49876 v_1_1_0 := v_1_1.Args[0] 49877 if v_1_1_0.Op != OpAMD64MOVLconst { 49878 break 49879 } 49880 if v_1_1_0.AuxInt != 1 { 49881 break 49882 } 49883 x := v_1_1.Args[1] 49884 mem := v.Args[2] 49885 if !(!config.nacl) { 49886 break 49887 } 49888 v.reset(OpAMD64SETAEstore) 49889 v.AuxInt = off 49890 v.Aux = sym 49891 v.AddArg(ptr) 49892 v0 := b.NewValue0(v.Pos, OpAMD64BTL, types.TypeFlags) 49893 v0.AddArg(x) 49894 v0.AddArg(y) 49895 v.AddArg(v0) 49896 v.AddArg(mem) 49897 return true 49898 } 49899 // match: (SETEQstore [off] {sym} ptr (TESTQ (SHLQ (MOVQconst [1]) x) y) mem) 49900 // cond: !config.nacl 49901 // result: (SETAEstore [off] {sym} ptr (BTQ x y) mem) 49902 for { 49903 off := v.AuxInt 49904 sym := v.Aux 49905 _ = v.Args[2] 49906 ptr := v.Args[0] 49907 v_1 := v.Args[1] 49908 if v_1.Op != OpAMD64TESTQ { 49909 break 49910 } 49911 _ = v_1.Args[1] 49912 v_1_0 := v_1.Args[0] 49913 if v_1_0.Op != OpAMD64SHLQ { 49914 break 49915 } 49916 _ = v_1_0.Args[1] 49917 v_1_0_0 := v_1_0.Args[0] 49918 if v_1_0_0.Op != OpAMD64MOVQconst { 49919 break 49920 } 49921 if v_1_0_0.AuxInt != 1 { 49922 break 49923 } 49924 x := v_1_0.Args[1] 49925 y := v_1.Args[1] 49926 mem := v.Args[2] 49927 if !(!config.nacl) { 49928 break 49929 } 49930 v.reset(OpAMD64SETAEstore) 49931 v.AuxInt = off 49932 v.Aux = sym 49933 v.AddArg(ptr) 49934 v0 := b.NewValue0(v.Pos, OpAMD64BTQ, types.TypeFlags) 49935 v0.AddArg(x) 49936 v0.AddArg(y) 49937 v.AddArg(v0) 49938 v.AddArg(mem) 49939 return true 49940 } 49941 // match: (SETEQstore [off] {sym} ptr (TESTQ y (SHLQ (MOVQconst [1]) x)) mem) 49942 // cond: !config.nacl 49943 // result: (SETAEstore [off] {sym} ptr (BTQ x y) mem) 49944 for { 49945 off := v.AuxInt 49946 sym := v.Aux 49947 _ = v.Args[2] 49948 ptr := v.Args[0] 49949 v_1 := v.Args[1] 49950 if v_1.Op != OpAMD64TESTQ { 49951 break 49952 } 49953 _ = v_1.Args[1] 49954 y := v_1.Args[0] 49955 v_1_1 := v_1.Args[1] 49956 if v_1_1.Op != OpAMD64SHLQ { 49957 break 49958 } 49959 _ = v_1_1.Args[1] 49960 v_1_1_0 := v_1_1.Args[0] 49961 if v_1_1_0.Op != OpAMD64MOVQconst { 49962 break 49963 } 49964 if v_1_1_0.AuxInt != 1 { 49965 break 49966 } 49967 x := v_1_1.Args[1] 49968 mem := v.Args[2] 49969 if !(!config.nacl) { 49970 break 49971 } 49972 v.reset(OpAMD64SETAEstore) 49973 v.AuxInt = off 49974 v.Aux = sym 49975 v.AddArg(ptr) 49976 v0 := b.NewValue0(v.Pos, OpAMD64BTQ, types.TypeFlags) 49977 v0.AddArg(x) 49978 v0.AddArg(y) 49979 v.AddArg(v0) 49980 v.AddArg(mem) 49981 return true 49982 } 49983 // match: (SETEQstore [off] {sym} ptr (TESTLconst [c] x) mem) 49984 // cond: isUint32PowerOfTwo(c) && !config.nacl 49985 // result: (SETAEstore [off] {sym} ptr (BTLconst [log2uint32(c)] x) mem) 49986 for { 49987 off := v.AuxInt 49988 sym := v.Aux 49989 _ = v.Args[2] 49990 ptr := v.Args[0] 49991 v_1 := v.Args[1] 49992 if v_1.Op != OpAMD64TESTLconst { 49993 break 49994 } 49995 c := v_1.AuxInt 49996 x := v_1.Args[0] 49997 mem := v.Args[2] 49998 if !(isUint32PowerOfTwo(c) && !config.nacl) { 49999 break 50000 } 50001 v.reset(OpAMD64SETAEstore) 50002 v.AuxInt = off 50003 v.Aux = sym 50004 v.AddArg(ptr) 50005 v0 := b.NewValue0(v.Pos, OpAMD64BTLconst, types.TypeFlags) 50006 v0.AuxInt = log2uint32(c) 50007 v0.AddArg(x) 50008 v.AddArg(v0) 50009 v.AddArg(mem) 50010 return true 50011 } 50012 // match: (SETEQstore [off] {sym} ptr (TESTQconst [c] x) mem) 50013 // cond: isUint64PowerOfTwo(c) && !config.nacl 50014 // result: (SETAEstore [off] {sym} ptr (BTQconst [log2(c)] x) mem) 50015 for { 50016 off := v.AuxInt 50017 sym := v.Aux 50018 _ = v.Args[2] 50019 ptr := v.Args[0] 50020 v_1 := v.Args[1] 50021 if v_1.Op != OpAMD64TESTQconst { 50022 break 50023 } 50024 c := v_1.AuxInt 50025 x := v_1.Args[0] 50026 mem := v.Args[2] 50027 if !(isUint64PowerOfTwo(c) && !config.nacl) { 50028 break 50029 } 50030 v.reset(OpAMD64SETAEstore) 50031 v.AuxInt = off 50032 v.Aux = sym 50033 v.AddArg(ptr) 50034 v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags) 50035 v0.AuxInt = log2(c) 50036 v0.AddArg(x) 50037 v.AddArg(v0) 50038 v.AddArg(mem) 50039 return true 50040 } 50041 // match: (SETEQstore [off] {sym} ptr (TESTQ (MOVQconst [c]) x) mem) 50042 // cond: isUint64PowerOfTwo(c) && !config.nacl 50043 // result: (SETAEstore [off] {sym} ptr (BTQconst [log2(c)] x) mem) 50044 for { 50045 off := v.AuxInt 50046 sym := v.Aux 50047 _ = v.Args[2] 50048 ptr := v.Args[0] 50049 v_1 := v.Args[1] 50050 if v_1.Op != OpAMD64TESTQ { 50051 break 50052 } 50053 _ = v_1.Args[1] 50054 v_1_0 := v_1.Args[0] 50055 if v_1_0.Op != OpAMD64MOVQconst { 50056 break 50057 } 50058 c := v_1_0.AuxInt 50059 x := v_1.Args[1] 50060 mem := v.Args[2] 50061 if !(isUint64PowerOfTwo(c) && !config.nacl) { 50062 break 50063 } 50064 v.reset(OpAMD64SETAEstore) 50065 v.AuxInt = off 50066 v.Aux = sym 50067 v.AddArg(ptr) 50068 v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags) 50069 v0.AuxInt = log2(c) 50070 v0.AddArg(x) 50071 v.AddArg(v0) 50072 v.AddArg(mem) 50073 return true 50074 } 50075 // match: (SETEQstore [off] {sym} ptr (TESTQ x (MOVQconst [c])) mem) 50076 // cond: isUint64PowerOfTwo(c) && !config.nacl 50077 // result: (SETAEstore [off] {sym} ptr (BTQconst [log2(c)] x) mem) 50078 for { 50079 off := v.AuxInt 50080 sym := v.Aux 50081 _ = v.Args[2] 50082 ptr := v.Args[0] 50083 v_1 := v.Args[1] 50084 if v_1.Op != OpAMD64TESTQ { 50085 break 50086 } 50087 _ = v_1.Args[1] 50088 x := v_1.Args[0] 50089 v_1_1 := v_1.Args[1] 50090 if v_1_1.Op != OpAMD64MOVQconst { 50091 break 50092 } 50093 c := v_1_1.AuxInt 50094 mem := v.Args[2] 50095 if !(isUint64PowerOfTwo(c) && !config.nacl) { 50096 break 50097 } 50098 v.reset(OpAMD64SETAEstore) 50099 v.AuxInt = off 50100 v.Aux = sym 50101 v.AddArg(ptr) 50102 v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags) 50103 v0.AuxInt = log2(c) 50104 v0.AddArg(x) 50105 v.AddArg(v0) 50106 v.AddArg(mem) 50107 return true 50108 } 50109 // match: (SETEQstore [off] {sym} ptr (CMPLconst [1] s:(ANDLconst [1] _)) mem) 50110 // cond: 50111 // result: (SETNEstore [off] {sym} ptr (CMPLconst [0] s) mem) 50112 for { 50113 off := v.AuxInt 50114 sym := v.Aux 50115 _ = v.Args[2] 50116 ptr := v.Args[0] 50117 v_1 := v.Args[1] 50118 if v_1.Op != OpAMD64CMPLconst { 50119 break 50120 } 50121 if v_1.AuxInt != 1 { 50122 break 50123 } 50124 s := v_1.Args[0] 50125 if s.Op != OpAMD64ANDLconst { 50126 break 50127 } 50128 if s.AuxInt != 1 { 50129 break 50130 } 50131 mem := v.Args[2] 50132 v.reset(OpAMD64SETNEstore) 50133 v.AuxInt = off 50134 v.Aux = sym 50135 v.AddArg(ptr) 50136 v0 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags) 50137 v0.AuxInt = 0 50138 v0.AddArg(s) 50139 v.AddArg(v0) 50140 v.AddArg(mem) 50141 return true 50142 } 50143 // match: (SETEQstore [off] {sym} ptr (CMPQconst [1] s:(ANDQconst [1] _)) mem) 50144 // cond: 50145 // result: (SETNEstore [off] {sym} ptr (CMPQconst [0] s) mem) 50146 for { 50147 off := v.AuxInt 50148 sym := v.Aux 50149 _ = v.Args[2] 50150 ptr := v.Args[0] 50151 v_1 := v.Args[1] 50152 if v_1.Op != OpAMD64CMPQconst { 50153 break 50154 } 50155 if v_1.AuxInt != 1 { 50156 break 50157 } 50158 s := v_1.Args[0] 50159 if s.Op != OpAMD64ANDQconst { 50160 break 50161 } 50162 if s.AuxInt != 1 { 50163 break 50164 } 50165 mem := v.Args[2] 50166 v.reset(OpAMD64SETNEstore) 50167 v.AuxInt = off 50168 v.Aux = sym 50169 v.AddArg(ptr) 50170 v0 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags) 50171 v0.AuxInt = 0 50172 v0.AddArg(s) 50173 v.AddArg(v0) 50174 v.AddArg(mem) 50175 return true 50176 } 50177 return false 50178 } 50179 func rewriteValueAMD64_OpAMD64SETEQstore_10(v *Value) bool { 50180 b := v.Block 50181 _ = b 50182 config := b.Func.Config 50183 _ = config 50184 // match: (SETEQstore [off] {sym} ptr (TESTQ z1:(SHLQconst [63] (SHRQconst [63] x)) z2) mem) 50185 // cond: z1==z2 && !config.nacl 50186 // result: (SETAEstore [off] {sym} ptr (BTQconst [63] x) mem) 50187 for { 50188 off := v.AuxInt 50189 sym := v.Aux 50190 _ = v.Args[2] 50191 ptr := v.Args[0] 50192 v_1 := v.Args[1] 50193 if v_1.Op != OpAMD64TESTQ { 50194 break 50195 } 50196 _ = v_1.Args[1] 50197 z1 := v_1.Args[0] 50198 if z1.Op != OpAMD64SHLQconst { 50199 break 50200 } 50201 if z1.AuxInt != 63 { 50202 break 50203 } 50204 z1_0 := z1.Args[0] 50205 if z1_0.Op != OpAMD64SHRQconst { 50206 break 50207 } 50208 if z1_0.AuxInt != 63 { 50209 break 50210 } 50211 x := z1_0.Args[0] 50212 z2 := v_1.Args[1] 50213 mem := v.Args[2] 50214 if !(z1 == z2 && !config.nacl) { 50215 break 50216 } 50217 v.reset(OpAMD64SETAEstore) 50218 v.AuxInt = off 50219 v.Aux = sym 50220 v.AddArg(ptr) 50221 v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags) 50222 v0.AuxInt = 63 50223 v0.AddArg(x) 50224 v.AddArg(v0) 50225 v.AddArg(mem) 50226 return true 50227 } 50228 // match: (SETEQstore [off] {sym} ptr (TESTQ z2 z1:(SHLQconst [63] (SHRQconst [63] x))) mem) 50229 // cond: z1==z2 && !config.nacl 50230 // result: (SETAEstore [off] {sym} ptr (BTQconst [63] x) mem) 50231 for { 50232 off := v.AuxInt 50233 sym := v.Aux 50234 _ = v.Args[2] 50235 ptr := v.Args[0] 50236 v_1 := v.Args[1] 50237 if v_1.Op != OpAMD64TESTQ { 50238 break 50239 } 50240 _ = v_1.Args[1] 50241 z2 := v_1.Args[0] 50242 z1 := v_1.Args[1] 50243 if z1.Op != OpAMD64SHLQconst { 50244 break 50245 } 50246 if z1.AuxInt != 63 { 50247 break 50248 } 50249 z1_0 := z1.Args[0] 50250 if z1_0.Op != OpAMD64SHRQconst { 50251 break 50252 } 50253 if z1_0.AuxInt != 63 { 50254 break 50255 } 50256 x := z1_0.Args[0] 50257 mem := v.Args[2] 50258 if !(z1 == z2 && !config.nacl) { 50259 break 50260 } 50261 v.reset(OpAMD64SETAEstore) 50262 v.AuxInt = off 50263 v.Aux = sym 50264 v.AddArg(ptr) 50265 v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags) 50266 v0.AuxInt = 63 50267 v0.AddArg(x) 50268 v.AddArg(v0) 50269 v.AddArg(mem) 50270 return true 50271 } 50272 // match: (SETEQstore [off] {sym} ptr (TESTL z1:(SHLLconst [31] (SHRLconst [31] x)) z2) mem) 50273 // cond: z1==z2 && !config.nacl 50274 // result: (SETAEstore [off] {sym} ptr (BTLconst [31] x) mem) 50275 for { 50276 off := v.AuxInt 50277 sym := v.Aux 50278 _ = v.Args[2] 50279 ptr := v.Args[0] 50280 v_1 := v.Args[1] 50281 if v_1.Op != OpAMD64TESTL { 50282 break 50283 } 50284 _ = v_1.Args[1] 50285 z1 := v_1.Args[0] 50286 if z1.Op != OpAMD64SHLLconst { 50287 break 50288 } 50289 if z1.AuxInt != 31 { 50290 break 50291 } 50292 z1_0 := z1.Args[0] 50293 if z1_0.Op != OpAMD64SHRLconst { 50294 break 50295 } 50296 if z1_0.AuxInt != 31 { 50297 break 50298 } 50299 x := z1_0.Args[0] 50300 z2 := v_1.Args[1] 50301 mem := v.Args[2] 50302 if !(z1 == z2 && !config.nacl) { 50303 break 50304 } 50305 v.reset(OpAMD64SETAEstore) 50306 v.AuxInt = off 50307 v.Aux = sym 50308 v.AddArg(ptr) 50309 v0 := b.NewValue0(v.Pos, OpAMD64BTLconst, types.TypeFlags) 50310 v0.AuxInt = 31 50311 v0.AddArg(x) 50312 v.AddArg(v0) 50313 v.AddArg(mem) 50314 return true 50315 } 50316 // match: (SETEQstore [off] {sym} ptr (TESTL z2 z1:(SHLLconst [31] (SHRLconst [31] x))) mem) 50317 // cond: z1==z2 && !config.nacl 50318 // result: (SETAEstore [off] {sym} ptr (BTLconst [31] x) mem) 50319 for { 50320 off := v.AuxInt 50321 sym := v.Aux 50322 _ = v.Args[2] 50323 ptr := v.Args[0] 50324 v_1 := v.Args[1] 50325 if v_1.Op != OpAMD64TESTL { 50326 break 50327 } 50328 _ = v_1.Args[1] 50329 z2 := v_1.Args[0] 50330 z1 := v_1.Args[1] 50331 if z1.Op != OpAMD64SHLLconst { 50332 break 50333 } 50334 if z1.AuxInt != 31 { 50335 break 50336 } 50337 z1_0 := z1.Args[0] 50338 if z1_0.Op != OpAMD64SHRLconst { 50339 break 50340 } 50341 if z1_0.AuxInt != 31 { 50342 break 50343 } 50344 x := z1_0.Args[0] 50345 mem := v.Args[2] 50346 if !(z1 == z2 && !config.nacl) { 50347 break 50348 } 50349 v.reset(OpAMD64SETAEstore) 50350 v.AuxInt = off 50351 v.Aux = sym 50352 v.AddArg(ptr) 50353 v0 := b.NewValue0(v.Pos, OpAMD64BTLconst, types.TypeFlags) 50354 v0.AuxInt = 31 50355 v0.AddArg(x) 50356 v.AddArg(v0) 50357 v.AddArg(mem) 50358 return true 50359 } 50360 // match: (SETEQstore [off] {sym} ptr (TESTQ z1:(SHRQconst [63] (SHLQconst [63] x)) z2) mem) 50361 // cond: z1==z2 && !config.nacl 50362 // result: (SETAEstore [off] {sym} ptr (BTQconst [0] x) mem) 50363 for { 50364 off := v.AuxInt 50365 sym := v.Aux 50366 _ = v.Args[2] 50367 ptr := v.Args[0] 50368 v_1 := v.Args[1] 50369 if v_1.Op != OpAMD64TESTQ { 50370 break 50371 } 50372 _ = v_1.Args[1] 50373 z1 := v_1.Args[0] 50374 if z1.Op != OpAMD64SHRQconst { 50375 break 50376 } 50377 if z1.AuxInt != 63 { 50378 break 50379 } 50380 z1_0 := z1.Args[0] 50381 if z1_0.Op != OpAMD64SHLQconst { 50382 break 50383 } 50384 if z1_0.AuxInt != 63 { 50385 break 50386 } 50387 x := z1_0.Args[0] 50388 z2 := v_1.Args[1] 50389 mem := v.Args[2] 50390 if !(z1 == z2 && !config.nacl) { 50391 break 50392 } 50393 v.reset(OpAMD64SETAEstore) 50394 v.AuxInt = off 50395 v.Aux = sym 50396 v.AddArg(ptr) 50397 v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags) 50398 v0.AuxInt = 0 50399 v0.AddArg(x) 50400 v.AddArg(v0) 50401 v.AddArg(mem) 50402 return true 50403 } 50404 // match: (SETEQstore [off] {sym} ptr (TESTQ z2 z1:(SHRQconst [63] (SHLQconst [63] x))) mem) 50405 // cond: z1==z2 && !config.nacl 50406 // result: (SETAEstore [off] {sym} ptr (BTQconst [0] x) mem) 50407 for { 50408 off := v.AuxInt 50409 sym := v.Aux 50410 _ = v.Args[2] 50411 ptr := v.Args[0] 50412 v_1 := v.Args[1] 50413 if v_1.Op != OpAMD64TESTQ { 50414 break 50415 } 50416 _ = v_1.Args[1] 50417 z2 := v_1.Args[0] 50418 z1 := v_1.Args[1] 50419 if z1.Op != OpAMD64SHRQconst { 50420 break 50421 } 50422 if z1.AuxInt != 63 { 50423 break 50424 } 50425 z1_0 := z1.Args[0] 50426 if z1_0.Op != OpAMD64SHLQconst { 50427 break 50428 } 50429 if z1_0.AuxInt != 63 { 50430 break 50431 } 50432 x := z1_0.Args[0] 50433 mem := v.Args[2] 50434 if !(z1 == z2 && !config.nacl) { 50435 break 50436 } 50437 v.reset(OpAMD64SETAEstore) 50438 v.AuxInt = off 50439 v.Aux = sym 50440 v.AddArg(ptr) 50441 v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags) 50442 v0.AuxInt = 0 50443 v0.AddArg(x) 50444 v.AddArg(v0) 50445 v.AddArg(mem) 50446 return true 50447 } 50448 // match: (SETEQstore [off] {sym} ptr (TESTL z1:(SHRLconst [31] (SHLLconst [31] x)) z2) mem) 50449 // cond: z1==z2 && !config.nacl 50450 // result: (SETAEstore [off] {sym} ptr (BTLconst [0] x) mem) 50451 for { 50452 off := v.AuxInt 50453 sym := v.Aux 50454 _ = v.Args[2] 50455 ptr := v.Args[0] 50456 v_1 := v.Args[1] 50457 if v_1.Op != OpAMD64TESTL { 50458 break 50459 } 50460 _ = v_1.Args[1] 50461 z1 := v_1.Args[0] 50462 if z1.Op != OpAMD64SHRLconst { 50463 break 50464 } 50465 if z1.AuxInt != 31 { 50466 break 50467 } 50468 z1_0 := z1.Args[0] 50469 if z1_0.Op != OpAMD64SHLLconst { 50470 break 50471 } 50472 if z1_0.AuxInt != 31 { 50473 break 50474 } 50475 x := z1_0.Args[0] 50476 z2 := v_1.Args[1] 50477 mem := v.Args[2] 50478 if !(z1 == z2 && !config.nacl) { 50479 break 50480 } 50481 v.reset(OpAMD64SETAEstore) 50482 v.AuxInt = off 50483 v.Aux = sym 50484 v.AddArg(ptr) 50485 v0 := b.NewValue0(v.Pos, OpAMD64BTLconst, types.TypeFlags) 50486 v0.AuxInt = 0 50487 v0.AddArg(x) 50488 v.AddArg(v0) 50489 v.AddArg(mem) 50490 return true 50491 } 50492 // match: (SETEQstore [off] {sym} ptr (TESTL z2 z1:(SHRLconst [31] (SHLLconst [31] x))) mem) 50493 // cond: z1==z2 && !config.nacl 50494 // result: (SETAEstore [off] {sym} ptr (BTLconst [0] x) mem) 50495 for { 50496 off := v.AuxInt 50497 sym := v.Aux 50498 _ = v.Args[2] 50499 ptr := v.Args[0] 50500 v_1 := v.Args[1] 50501 if v_1.Op != OpAMD64TESTL { 50502 break 50503 } 50504 _ = v_1.Args[1] 50505 z2 := v_1.Args[0] 50506 z1 := v_1.Args[1] 50507 if z1.Op != OpAMD64SHRLconst { 50508 break 50509 } 50510 if z1.AuxInt != 31 { 50511 break 50512 } 50513 z1_0 := z1.Args[0] 50514 if z1_0.Op != OpAMD64SHLLconst { 50515 break 50516 } 50517 if z1_0.AuxInt != 31 { 50518 break 50519 } 50520 x := z1_0.Args[0] 50521 mem := v.Args[2] 50522 if !(z1 == z2 && !config.nacl) { 50523 break 50524 } 50525 v.reset(OpAMD64SETAEstore) 50526 v.AuxInt = off 50527 v.Aux = sym 50528 v.AddArg(ptr) 50529 v0 := b.NewValue0(v.Pos, OpAMD64BTLconst, types.TypeFlags) 50530 v0.AuxInt = 0 50531 v0.AddArg(x) 50532 v.AddArg(v0) 50533 v.AddArg(mem) 50534 return true 50535 } 50536 // match: (SETEQstore [off] {sym} ptr (TESTQ z1:(SHRQconst [63] x) z2) mem) 50537 // cond: z1==z2 && !config.nacl 50538 // result: (SETAEstore [off] {sym} ptr (BTQconst [63] x) mem) 50539 for { 50540 off := v.AuxInt 50541 sym := v.Aux 50542 _ = v.Args[2] 50543 ptr := v.Args[0] 50544 v_1 := v.Args[1] 50545 if v_1.Op != OpAMD64TESTQ { 50546 break 50547 } 50548 _ = v_1.Args[1] 50549 z1 := v_1.Args[0] 50550 if z1.Op != OpAMD64SHRQconst { 50551 break 50552 } 50553 if z1.AuxInt != 63 { 50554 break 50555 } 50556 x := z1.Args[0] 50557 z2 := v_1.Args[1] 50558 mem := v.Args[2] 50559 if !(z1 == z2 && !config.nacl) { 50560 break 50561 } 50562 v.reset(OpAMD64SETAEstore) 50563 v.AuxInt = off 50564 v.Aux = sym 50565 v.AddArg(ptr) 50566 v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags) 50567 v0.AuxInt = 63 50568 v0.AddArg(x) 50569 v.AddArg(v0) 50570 v.AddArg(mem) 50571 return true 50572 } 50573 // match: (SETEQstore [off] {sym} ptr (TESTQ z2 z1:(SHRQconst [63] x)) mem) 50574 // cond: z1==z2 && !config.nacl 50575 // result: (SETAEstore [off] {sym} ptr (BTQconst [63] x) mem) 50576 for { 50577 off := v.AuxInt 50578 sym := v.Aux 50579 _ = v.Args[2] 50580 ptr := v.Args[0] 50581 v_1 := v.Args[1] 50582 if v_1.Op != OpAMD64TESTQ { 50583 break 50584 } 50585 _ = v_1.Args[1] 50586 z2 := v_1.Args[0] 50587 z1 := v_1.Args[1] 50588 if z1.Op != OpAMD64SHRQconst { 50589 break 50590 } 50591 if z1.AuxInt != 63 { 50592 break 50593 } 50594 x := z1.Args[0] 50595 mem := v.Args[2] 50596 if !(z1 == z2 && !config.nacl) { 50597 break 50598 } 50599 v.reset(OpAMD64SETAEstore) 50600 v.AuxInt = off 50601 v.Aux = sym 50602 v.AddArg(ptr) 50603 v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags) 50604 v0.AuxInt = 63 50605 v0.AddArg(x) 50606 v.AddArg(v0) 50607 v.AddArg(mem) 50608 return true 50609 } 50610 return false 50611 } 50612 func rewriteValueAMD64_OpAMD64SETEQstore_20(v *Value) bool { 50613 b := v.Block 50614 _ = b 50615 config := b.Func.Config 50616 _ = config 50617 // match: (SETEQstore [off] {sym} ptr (TESTL z1:(SHRLconst [31] x) z2) mem) 50618 // cond: z1==z2 && !config.nacl 50619 // result: (SETAEstore [off] {sym} ptr (BTLconst [31] x) mem) 50620 for { 50621 off := v.AuxInt 50622 sym := v.Aux 50623 _ = v.Args[2] 50624 ptr := v.Args[0] 50625 v_1 := v.Args[1] 50626 if v_1.Op != OpAMD64TESTL { 50627 break 50628 } 50629 _ = v_1.Args[1] 50630 z1 := v_1.Args[0] 50631 if z1.Op != OpAMD64SHRLconst { 50632 break 50633 } 50634 if z1.AuxInt != 31 { 50635 break 50636 } 50637 x := z1.Args[0] 50638 z2 := v_1.Args[1] 50639 mem := v.Args[2] 50640 if !(z1 == z2 && !config.nacl) { 50641 break 50642 } 50643 v.reset(OpAMD64SETAEstore) 50644 v.AuxInt = off 50645 v.Aux = sym 50646 v.AddArg(ptr) 50647 v0 := b.NewValue0(v.Pos, OpAMD64BTLconst, types.TypeFlags) 50648 v0.AuxInt = 31 50649 v0.AddArg(x) 50650 v.AddArg(v0) 50651 v.AddArg(mem) 50652 return true 50653 } 50654 // match: (SETEQstore [off] {sym} ptr (TESTL z2 z1:(SHRLconst [31] x)) mem) 50655 // cond: z1==z2 && !config.nacl 50656 // result: (SETAEstore [off] {sym} ptr (BTLconst [31] x) mem) 50657 for { 50658 off := v.AuxInt 50659 sym := v.Aux 50660 _ = v.Args[2] 50661 ptr := v.Args[0] 50662 v_1 := v.Args[1] 50663 if v_1.Op != OpAMD64TESTL { 50664 break 50665 } 50666 _ = v_1.Args[1] 50667 z2 := v_1.Args[0] 50668 z1 := v_1.Args[1] 50669 if z1.Op != OpAMD64SHRLconst { 50670 break 50671 } 50672 if z1.AuxInt != 31 { 50673 break 50674 } 50675 x := z1.Args[0] 50676 mem := v.Args[2] 50677 if !(z1 == z2 && !config.nacl) { 50678 break 50679 } 50680 v.reset(OpAMD64SETAEstore) 50681 v.AuxInt = off 50682 v.Aux = sym 50683 v.AddArg(ptr) 50684 v0 := b.NewValue0(v.Pos, OpAMD64BTLconst, types.TypeFlags) 50685 v0.AuxInt = 31 50686 v0.AddArg(x) 50687 v.AddArg(v0) 50688 v.AddArg(mem) 50689 return true 50690 } 50691 // match: (SETEQstore [off] {sym} ptr (InvertFlags x) mem) 50692 // cond: 50693 // result: (SETEQstore [off] {sym} ptr x mem) 50694 for { 50695 off := v.AuxInt 50696 sym := v.Aux 50697 _ = v.Args[2] 50698 ptr := v.Args[0] 50699 v_1 := v.Args[1] 50700 if v_1.Op != OpAMD64InvertFlags { 50701 break 50702 } 50703 x := v_1.Args[0] 50704 mem := v.Args[2] 50705 v.reset(OpAMD64SETEQstore) 50706 v.AuxInt = off 50707 v.Aux = sym 50708 v.AddArg(ptr) 50709 v.AddArg(x) 50710 v.AddArg(mem) 50711 return true 50712 } 50713 // match: (SETEQstore [off1] {sym} (ADDQconst [off2] base) val mem) 50714 // cond: is32Bit(off1+off2) 50715 // result: (SETEQstore [off1+off2] {sym} base val mem) 50716 for { 50717 off1 := v.AuxInt 50718 sym := v.Aux 50719 _ = v.Args[2] 50720 v_0 := v.Args[0] 50721 if v_0.Op != OpAMD64ADDQconst { 50722 break 50723 } 50724 off2 := v_0.AuxInt 50725 base := v_0.Args[0] 50726 val := v.Args[1] 50727 mem := v.Args[2] 50728 if !(is32Bit(off1 + off2)) { 50729 break 50730 } 50731 v.reset(OpAMD64SETEQstore) 50732 v.AuxInt = off1 + off2 50733 v.Aux = sym 50734 v.AddArg(base) 50735 v.AddArg(val) 50736 v.AddArg(mem) 50737 return true 50738 } 50739 // match: (SETEQstore [off1] {sym1} (LEAQ [off2] {sym2} base) val mem) 50740 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 50741 // result: (SETEQstore [off1+off2] {mergeSym(sym1,sym2)} base val mem) 50742 for { 50743 off1 := v.AuxInt 50744 sym1 := v.Aux 50745 _ = v.Args[2] 50746 v_0 := v.Args[0] 50747 if v_0.Op != OpAMD64LEAQ { 50748 break 50749 } 50750 off2 := v_0.AuxInt 50751 sym2 := v_0.Aux 50752 base := v_0.Args[0] 50753 val := v.Args[1] 50754 mem := v.Args[2] 50755 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 50756 break 50757 } 50758 v.reset(OpAMD64SETEQstore) 50759 v.AuxInt = off1 + off2 50760 v.Aux = mergeSym(sym1, sym2) 50761 v.AddArg(base) 50762 v.AddArg(val) 50763 v.AddArg(mem) 50764 return true 50765 } 50766 // match: (SETEQstore [off] {sym} ptr x:(FlagEQ) mem) 50767 // cond: 50768 // result: (MOVBstore [off] {sym} ptr (MOVLconst <x.Type> [1]) mem) 50769 for { 50770 off := v.AuxInt 50771 sym := v.Aux 50772 _ = v.Args[2] 50773 ptr := v.Args[0] 50774 x := v.Args[1] 50775 if x.Op != OpAMD64FlagEQ { 50776 break 50777 } 50778 mem := v.Args[2] 50779 v.reset(OpAMD64MOVBstore) 50780 v.AuxInt = off 50781 v.Aux = sym 50782 v.AddArg(ptr) 50783 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, x.Type) 50784 v0.AuxInt = 1 50785 v.AddArg(v0) 50786 v.AddArg(mem) 50787 return true 50788 } 50789 // match: (SETEQstore [off] {sym} ptr x:(FlagLT_ULT) mem) 50790 // cond: 50791 // result: (MOVBstore [off] {sym} ptr (MOVLconst <x.Type> [0]) mem) 50792 for { 50793 off := v.AuxInt 50794 sym := v.Aux 50795 _ = v.Args[2] 50796 ptr := v.Args[0] 50797 x := v.Args[1] 50798 if x.Op != OpAMD64FlagLT_ULT { 50799 break 50800 } 50801 mem := v.Args[2] 50802 v.reset(OpAMD64MOVBstore) 50803 v.AuxInt = off 50804 v.Aux = sym 50805 v.AddArg(ptr) 50806 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, x.Type) 50807 v0.AuxInt = 0 50808 v.AddArg(v0) 50809 v.AddArg(mem) 50810 return true 50811 } 50812 // match: (SETEQstore [off] {sym} ptr x:(FlagLT_UGT) mem) 50813 // cond: 50814 // result: (MOVBstore [off] {sym} ptr (MOVLconst <x.Type> [0]) mem) 50815 for { 50816 off := v.AuxInt 50817 sym := v.Aux 50818 _ = v.Args[2] 50819 ptr := v.Args[0] 50820 x := v.Args[1] 50821 if x.Op != OpAMD64FlagLT_UGT { 50822 break 50823 } 50824 mem := v.Args[2] 50825 v.reset(OpAMD64MOVBstore) 50826 v.AuxInt = off 50827 v.Aux = sym 50828 v.AddArg(ptr) 50829 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, x.Type) 50830 v0.AuxInt = 0 50831 v.AddArg(v0) 50832 v.AddArg(mem) 50833 return true 50834 } 50835 // match: (SETEQstore [off] {sym} ptr x:(FlagGT_ULT) mem) 50836 // cond: 50837 // result: (MOVBstore [off] {sym} ptr (MOVLconst <x.Type> [0]) mem) 50838 for { 50839 off := v.AuxInt 50840 sym := v.Aux 50841 _ = v.Args[2] 50842 ptr := v.Args[0] 50843 x := v.Args[1] 50844 if x.Op != OpAMD64FlagGT_ULT { 50845 break 50846 } 50847 mem := v.Args[2] 50848 v.reset(OpAMD64MOVBstore) 50849 v.AuxInt = off 50850 v.Aux = sym 50851 v.AddArg(ptr) 50852 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, x.Type) 50853 v0.AuxInt = 0 50854 v.AddArg(v0) 50855 v.AddArg(mem) 50856 return true 50857 } 50858 // match: (SETEQstore [off] {sym} ptr x:(FlagGT_UGT) mem) 50859 // cond: 50860 // result: (MOVBstore [off] {sym} ptr (MOVLconst <x.Type> [0]) mem) 50861 for { 50862 off := v.AuxInt 50863 sym := v.Aux 50864 _ = v.Args[2] 50865 ptr := v.Args[0] 50866 x := v.Args[1] 50867 if x.Op != OpAMD64FlagGT_UGT { 50868 break 50869 } 50870 mem := v.Args[2] 50871 v.reset(OpAMD64MOVBstore) 50872 v.AuxInt = off 50873 v.Aux = sym 50874 v.AddArg(ptr) 50875 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, x.Type) 50876 v0.AuxInt = 0 50877 v.AddArg(v0) 50878 v.AddArg(mem) 50879 return true 50880 } 50881 return false 50882 } 50883 func rewriteValueAMD64_OpAMD64SETG_0(v *Value) bool { 50884 // match: (SETG (InvertFlags x)) 50885 // cond: 50886 // result: (SETL x) 50887 for { 50888 v_0 := v.Args[0] 50889 if v_0.Op != OpAMD64InvertFlags { 50890 break 50891 } 50892 x := v_0.Args[0] 50893 v.reset(OpAMD64SETL) 50894 v.AddArg(x) 50895 return true 50896 } 50897 // match: (SETG (FlagEQ)) 50898 // cond: 50899 // result: (MOVLconst [0]) 50900 for { 50901 v_0 := v.Args[0] 50902 if v_0.Op != OpAMD64FlagEQ { 50903 break 50904 } 50905 v.reset(OpAMD64MOVLconst) 50906 v.AuxInt = 0 50907 return true 50908 } 50909 // match: (SETG (FlagLT_ULT)) 50910 // cond: 50911 // result: (MOVLconst [0]) 50912 for { 50913 v_0 := v.Args[0] 50914 if v_0.Op != OpAMD64FlagLT_ULT { 50915 break 50916 } 50917 v.reset(OpAMD64MOVLconst) 50918 v.AuxInt = 0 50919 return true 50920 } 50921 // match: (SETG (FlagLT_UGT)) 50922 // cond: 50923 // result: (MOVLconst [0]) 50924 for { 50925 v_0 := v.Args[0] 50926 if v_0.Op != OpAMD64FlagLT_UGT { 50927 break 50928 } 50929 v.reset(OpAMD64MOVLconst) 50930 v.AuxInt = 0 50931 return true 50932 } 50933 // match: (SETG (FlagGT_ULT)) 50934 // cond: 50935 // result: (MOVLconst [1]) 50936 for { 50937 v_0 := v.Args[0] 50938 if v_0.Op != OpAMD64FlagGT_ULT { 50939 break 50940 } 50941 v.reset(OpAMD64MOVLconst) 50942 v.AuxInt = 1 50943 return true 50944 } 50945 // match: (SETG (FlagGT_UGT)) 50946 // cond: 50947 // result: (MOVLconst [1]) 50948 for { 50949 v_0 := v.Args[0] 50950 if v_0.Op != OpAMD64FlagGT_UGT { 50951 break 50952 } 50953 v.reset(OpAMD64MOVLconst) 50954 v.AuxInt = 1 50955 return true 50956 } 50957 return false 50958 } 50959 func rewriteValueAMD64_OpAMD64SETGE_0(v *Value) bool { 50960 // match: (SETGE (InvertFlags x)) 50961 // cond: 50962 // result: (SETLE x) 50963 for { 50964 v_0 := v.Args[0] 50965 if v_0.Op != OpAMD64InvertFlags { 50966 break 50967 } 50968 x := v_0.Args[0] 50969 v.reset(OpAMD64SETLE) 50970 v.AddArg(x) 50971 return true 50972 } 50973 // match: (SETGE (FlagEQ)) 50974 // cond: 50975 // result: (MOVLconst [1]) 50976 for { 50977 v_0 := v.Args[0] 50978 if v_0.Op != OpAMD64FlagEQ { 50979 break 50980 } 50981 v.reset(OpAMD64MOVLconst) 50982 v.AuxInt = 1 50983 return true 50984 } 50985 // match: (SETGE (FlagLT_ULT)) 50986 // cond: 50987 // result: (MOVLconst [0]) 50988 for { 50989 v_0 := v.Args[0] 50990 if v_0.Op != OpAMD64FlagLT_ULT { 50991 break 50992 } 50993 v.reset(OpAMD64MOVLconst) 50994 v.AuxInt = 0 50995 return true 50996 } 50997 // match: (SETGE (FlagLT_UGT)) 50998 // cond: 50999 // result: (MOVLconst [0]) 51000 for { 51001 v_0 := v.Args[0] 51002 if v_0.Op != OpAMD64FlagLT_UGT { 51003 break 51004 } 51005 v.reset(OpAMD64MOVLconst) 51006 v.AuxInt = 0 51007 return true 51008 } 51009 // match: (SETGE (FlagGT_ULT)) 51010 // cond: 51011 // result: (MOVLconst [1]) 51012 for { 51013 v_0 := v.Args[0] 51014 if v_0.Op != OpAMD64FlagGT_ULT { 51015 break 51016 } 51017 v.reset(OpAMD64MOVLconst) 51018 v.AuxInt = 1 51019 return true 51020 } 51021 // match: (SETGE (FlagGT_UGT)) 51022 // cond: 51023 // result: (MOVLconst [1]) 51024 for { 51025 v_0 := v.Args[0] 51026 if v_0.Op != OpAMD64FlagGT_UGT { 51027 break 51028 } 51029 v.reset(OpAMD64MOVLconst) 51030 v.AuxInt = 1 51031 return true 51032 } 51033 return false 51034 } 51035 func rewriteValueAMD64_OpAMD64SETGEstore_0(v *Value) bool { 51036 b := v.Block 51037 _ = b 51038 // match: (SETGEstore [off] {sym} ptr (InvertFlags x) mem) 51039 // cond: 51040 // result: (SETLEstore [off] {sym} ptr x mem) 51041 for { 51042 off := v.AuxInt 51043 sym := v.Aux 51044 _ = v.Args[2] 51045 ptr := v.Args[0] 51046 v_1 := v.Args[1] 51047 if v_1.Op != OpAMD64InvertFlags { 51048 break 51049 } 51050 x := v_1.Args[0] 51051 mem := v.Args[2] 51052 v.reset(OpAMD64SETLEstore) 51053 v.AuxInt = off 51054 v.Aux = sym 51055 v.AddArg(ptr) 51056 v.AddArg(x) 51057 v.AddArg(mem) 51058 return true 51059 } 51060 // match: (SETGEstore [off1] {sym} (ADDQconst [off2] base) val mem) 51061 // cond: is32Bit(off1+off2) 51062 // result: (SETGEstore [off1+off2] {sym} base val mem) 51063 for { 51064 off1 := v.AuxInt 51065 sym := v.Aux 51066 _ = v.Args[2] 51067 v_0 := v.Args[0] 51068 if v_0.Op != OpAMD64ADDQconst { 51069 break 51070 } 51071 off2 := v_0.AuxInt 51072 base := v_0.Args[0] 51073 val := v.Args[1] 51074 mem := v.Args[2] 51075 if !(is32Bit(off1 + off2)) { 51076 break 51077 } 51078 v.reset(OpAMD64SETGEstore) 51079 v.AuxInt = off1 + off2 51080 v.Aux = sym 51081 v.AddArg(base) 51082 v.AddArg(val) 51083 v.AddArg(mem) 51084 return true 51085 } 51086 // match: (SETGEstore [off1] {sym1} (LEAQ [off2] {sym2} base) val mem) 51087 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 51088 // result: (SETGEstore [off1+off2] {mergeSym(sym1,sym2)} base val mem) 51089 for { 51090 off1 := v.AuxInt 51091 sym1 := v.Aux 51092 _ = v.Args[2] 51093 v_0 := v.Args[0] 51094 if v_0.Op != OpAMD64LEAQ { 51095 break 51096 } 51097 off2 := v_0.AuxInt 51098 sym2 := v_0.Aux 51099 base := v_0.Args[0] 51100 val := v.Args[1] 51101 mem := v.Args[2] 51102 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 51103 break 51104 } 51105 v.reset(OpAMD64SETGEstore) 51106 v.AuxInt = off1 + off2 51107 v.Aux = mergeSym(sym1, sym2) 51108 v.AddArg(base) 51109 v.AddArg(val) 51110 v.AddArg(mem) 51111 return true 51112 } 51113 // match: (SETGEstore [off] {sym} ptr x:(FlagEQ) mem) 51114 // cond: 51115 // result: (MOVBstore [off] {sym} ptr (MOVLconst <x.Type> [1]) mem) 51116 for { 51117 off := v.AuxInt 51118 sym := v.Aux 51119 _ = v.Args[2] 51120 ptr := v.Args[0] 51121 x := v.Args[1] 51122 if x.Op != OpAMD64FlagEQ { 51123 break 51124 } 51125 mem := v.Args[2] 51126 v.reset(OpAMD64MOVBstore) 51127 v.AuxInt = off 51128 v.Aux = sym 51129 v.AddArg(ptr) 51130 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, x.Type) 51131 v0.AuxInt = 1 51132 v.AddArg(v0) 51133 v.AddArg(mem) 51134 return true 51135 } 51136 // match: (SETGEstore [off] {sym} ptr x:(FlagLT_ULT) mem) 51137 // cond: 51138 // result: (MOVBstore [off] {sym} ptr (MOVLconst <x.Type> [0]) mem) 51139 for { 51140 off := v.AuxInt 51141 sym := v.Aux 51142 _ = v.Args[2] 51143 ptr := v.Args[0] 51144 x := v.Args[1] 51145 if x.Op != OpAMD64FlagLT_ULT { 51146 break 51147 } 51148 mem := v.Args[2] 51149 v.reset(OpAMD64MOVBstore) 51150 v.AuxInt = off 51151 v.Aux = sym 51152 v.AddArg(ptr) 51153 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, x.Type) 51154 v0.AuxInt = 0 51155 v.AddArg(v0) 51156 v.AddArg(mem) 51157 return true 51158 } 51159 // match: (SETGEstore [off] {sym} ptr x:(FlagLT_UGT) mem) 51160 // cond: 51161 // result: (MOVBstore [off] {sym} ptr (MOVLconst <x.Type> [0]) mem) 51162 for { 51163 off := v.AuxInt 51164 sym := v.Aux 51165 _ = v.Args[2] 51166 ptr := v.Args[0] 51167 x := v.Args[1] 51168 if x.Op != OpAMD64FlagLT_UGT { 51169 break 51170 } 51171 mem := v.Args[2] 51172 v.reset(OpAMD64MOVBstore) 51173 v.AuxInt = off 51174 v.Aux = sym 51175 v.AddArg(ptr) 51176 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, x.Type) 51177 v0.AuxInt = 0 51178 v.AddArg(v0) 51179 v.AddArg(mem) 51180 return true 51181 } 51182 // match: (SETGEstore [off] {sym} ptr x:(FlagGT_ULT) mem) 51183 // cond: 51184 // result: (MOVBstore [off] {sym} ptr (MOVLconst <x.Type> [1]) mem) 51185 for { 51186 off := v.AuxInt 51187 sym := v.Aux 51188 _ = v.Args[2] 51189 ptr := v.Args[0] 51190 x := v.Args[1] 51191 if x.Op != OpAMD64FlagGT_ULT { 51192 break 51193 } 51194 mem := v.Args[2] 51195 v.reset(OpAMD64MOVBstore) 51196 v.AuxInt = off 51197 v.Aux = sym 51198 v.AddArg(ptr) 51199 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, x.Type) 51200 v0.AuxInt = 1 51201 v.AddArg(v0) 51202 v.AddArg(mem) 51203 return true 51204 } 51205 // match: (SETGEstore [off] {sym} ptr x:(FlagGT_UGT) mem) 51206 // cond: 51207 // result: (MOVBstore [off] {sym} ptr (MOVLconst <x.Type> [1]) mem) 51208 for { 51209 off := v.AuxInt 51210 sym := v.Aux 51211 _ = v.Args[2] 51212 ptr := v.Args[0] 51213 x := v.Args[1] 51214 if x.Op != OpAMD64FlagGT_UGT { 51215 break 51216 } 51217 mem := v.Args[2] 51218 v.reset(OpAMD64MOVBstore) 51219 v.AuxInt = off 51220 v.Aux = sym 51221 v.AddArg(ptr) 51222 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, x.Type) 51223 v0.AuxInt = 1 51224 v.AddArg(v0) 51225 v.AddArg(mem) 51226 return true 51227 } 51228 return false 51229 } 51230 func rewriteValueAMD64_OpAMD64SETGstore_0(v *Value) bool { 51231 b := v.Block 51232 _ = b 51233 // match: (SETGstore [off] {sym} ptr (InvertFlags x) mem) 51234 // cond: 51235 // result: (SETLstore [off] {sym} ptr x mem) 51236 for { 51237 off := v.AuxInt 51238 sym := v.Aux 51239 _ = v.Args[2] 51240 ptr := v.Args[0] 51241 v_1 := v.Args[1] 51242 if v_1.Op != OpAMD64InvertFlags { 51243 break 51244 } 51245 x := v_1.Args[0] 51246 mem := v.Args[2] 51247 v.reset(OpAMD64SETLstore) 51248 v.AuxInt = off 51249 v.Aux = sym 51250 v.AddArg(ptr) 51251 v.AddArg(x) 51252 v.AddArg(mem) 51253 return true 51254 } 51255 // match: (SETGstore [off1] {sym} (ADDQconst [off2] base) val mem) 51256 // cond: is32Bit(off1+off2) 51257 // result: (SETGstore [off1+off2] {sym} base val mem) 51258 for { 51259 off1 := v.AuxInt 51260 sym := v.Aux 51261 _ = v.Args[2] 51262 v_0 := v.Args[0] 51263 if v_0.Op != OpAMD64ADDQconst { 51264 break 51265 } 51266 off2 := v_0.AuxInt 51267 base := v_0.Args[0] 51268 val := v.Args[1] 51269 mem := v.Args[2] 51270 if !(is32Bit(off1 + off2)) { 51271 break 51272 } 51273 v.reset(OpAMD64SETGstore) 51274 v.AuxInt = off1 + off2 51275 v.Aux = sym 51276 v.AddArg(base) 51277 v.AddArg(val) 51278 v.AddArg(mem) 51279 return true 51280 } 51281 // match: (SETGstore [off1] {sym1} (LEAQ [off2] {sym2} base) val mem) 51282 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 51283 // result: (SETGstore [off1+off2] {mergeSym(sym1,sym2)} base val mem) 51284 for { 51285 off1 := v.AuxInt 51286 sym1 := v.Aux 51287 _ = v.Args[2] 51288 v_0 := v.Args[0] 51289 if v_0.Op != OpAMD64LEAQ { 51290 break 51291 } 51292 off2 := v_0.AuxInt 51293 sym2 := v_0.Aux 51294 base := v_0.Args[0] 51295 val := v.Args[1] 51296 mem := v.Args[2] 51297 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 51298 break 51299 } 51300 v.reset(OpAMD64SETGstore) 51301 v.AuxInt = off1 + off2 51302 v.Aux = mergeSym(sym1, sym2) 51303 v.AddArg(base) 51304 v.AddArg(val) 51305 v.AddArg(mem) 51306 return true 51307 } 51308 // match: (SETGstore [off] {sym} ptr x:(FlagEQ) mem) 51309 // cond: 51310 // result: (MOVBstore [off] {sym} ptr (MOVLconst <x.Type> [0]) mem) 51311 for { 51312 off := v.AuxInt 51313 sym := v.Aux 51314 _ = v.Args[2] 51315 ptr := v.Args[0] 51316 x := v.Args[1] 51317 if x.Op != OpAMD64FlagEQ { 51318 break 51319 } 51320 mem := v.Args[2] 51321 v.reset(OpAMD64MOVBstore) 51322 v.AuxInt = off 51323 v.Aux = sym 51324 v.AddArg(ptr) 51325 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, x.Type) 51326 v0.AuxInt = 0 51327 v.AddArg(v0) 51328 v.AddArg(mem) 51329 return true 51330 } 51331 // match: (SETGstore [off] {sym} ptr x:(FlagLT_ULT) mem) 51332 // cond: 51333 // result: (MOVBstore [off] {sym} ptr (MOVLconst <x.Type> [0]) mem) 51334 for { 51335 off := v.AuxInt 51336 sym := v.Aux 51337 _ = v.Args[2] 51338 ptr := v.Args[0] 51339 x := v.Args[1] 51340 if x.Op != OpAMD64FlagLT_ULT { 51341 break 51342 } 51343 mem := v.Args[2] 51344 v.reset(OpAMD64MOVBstore) 51345 v.AuxInt = off 51346 v.Aux = sym 51347 v.AddArg(ptr) 51348 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, x.Type) 51349 v0.AuxInt = 0 51350 v.AddArg(v0) 51351 v.AddArg(mem) 51352 return true 51353 } 51354 // match: (SETGstore [off] {sym} ptr x:(FlagLT_UGT) mem) 51355 // cond: 51356 // result: (MOVBstore [off] {sym} ptr (MOVLconst <x.Type> [0]) mem) 51357 for { 51358 off := v.AuxInt 51359 sym := v.Aux 51360 _ = v.Args[2] 51361 ptr := v.Args[0] 51362 x := v.Args[1] 51363 if x.Op != OpAMD64FlagLT_UGT { 51364 break 51365 } 51366 mem := v.Args[2] 51367 v.reset(OpAMD64MOVBstore) 51368 v.AuxInt = off 51369 v.Aux = sym 51370 v.AddArg(ptr) 51371 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, x.Type) 51372 v0.AuxInt = 0 51373 v.AddArg(v0) 51374 v.AddArg(mem) 51375 return true 51376 } 51377 // match: (SETGstore [off] {sym} ptr x:(FlagGT_ULT) mem) 51378 // cond: 51379 // result: (MOVBstore [off] {sym} ptr (MOVLconst <x.Type> [1]) mem) 51380 for { 51381 off := v.AuxInt 51382 sym := v.Aux 51383 _ = v.Args[2] 51384 ptr := v.Args[0] 51385 x := v.Args[1] 51386 if x.Op != OpAMD64FlagGT_ULT { 51387 break 51388 } 51389 mem := v.Args[2] 51390 v.reset(OpAMD64MOVBstore) 51391 v.AuxInt = off 51392 v.Aux = sym 51393 v.AddArg(ptr) 51394 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, x.Type) 51395 v0.AuxInt = 1 51396 v.AddArg(v0) 51397 v.AddArg(mem) 51398 return true 51399 } 51400 // match: (SETGstore [off] {sym} ptr x:(FlagGT_UGT) mem) 51401 // cond: 51402 // result: (MOVBstore [off] {sym} ptr (MOVLconst <x.Type> [1]) mem) 51403 for { 51404 off := v.AuxInt 51405 sym := v.Aux 51406 _ = v.Args[2] 51407 ptr := v.Args[0] 51408 x := v.Args[1] 51409 if x.Op != OpAMD64FlagGT_UGT { 51410 break 51411 } 51412 mem := v.Args[2] 51413 v.reset(OpAMD64MOVBstore) 51414 v.AuxInt = off 51415 v.Aux = sym 51416 v.AddArg(ptr) 51417 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, x.Type) 51418 v0.AuxInt = 1 51419 v.AddArg(v0) 51420 v.AddArg(mem) 51421 return true 51422 } 51423 return false 51424 } 51425 func rewriteValueAMD64_OpAMD64SETL_0(v *Value) bool { 51426 // match: (SETL (InvertFlags x)) 51427 // cond: 51428 // result: (SETG x) 51429 for { 51430 v_0 := v.Args[0] 51431 if v_0.Op != OpAMD64InvertFlags { 51432 break 51433 } 51434 x := v_0.Args[0] 51435 v.reset(OpAMD64SETG) 51436 v.AddArg(x) 51437 return true 51438 } 51439 // match: (SETL (FlagEQ)) 51440 // cond: 51441 // result: (MOVLconst [0]) 51442 for { 51443 v_0 := v.Args[0] 51444 if v_0.Op != OpAMD64FlagEQ { 51445 break 51446 } 51447 v.reset(OpAMD64MOVLconst) 51448 v.AuxInt = 0 51449 return true 51450 } 51451 // match: (SETL (FlagLT_ULT)) 51452 // cond: 51453 // result: (MOVLconst [1]) 51454 for { 51455 v_0 := v.Args[0] 51456 if v_0.Op != OpAMD64FlagLT_ULT { 51457 break 51458 } 51459 v.reset(OpAMD64MOVLconst) 51460 v.AuxInt = 1 51461 return true 51462 } 51463 // match: (SETL (FlagLT_UGT)) 51464 // cond: 51465 // result: (MOVLconst [1]) 51466 for { 51467 v_0 := v.Args[0] 51468 if v_0.Op != OpAMD64FlagLT_UGT { 51469 break 51470 } 51471 v.reset(OpAMD64MOVLconst) 51472 v.AuxInt = 1 51473 return true 51474 } 51475 // match: (SETL (FlagGT_ULT)) 51476 // cond: 51477 // result: (MOVLconst [0]) 51478 for { 51479 v_0 := v.Args[0] 51480 if v_0.Op != OpAMD64FlagGT_ULT { 51481 break 51482 } 51483 v.reset(OpAMD64MOVLconst) 51484 v.AuxInt = 0 51485 return true 51486 } 51487 // match: (SETL (FlagGT_UGT)) 51488 // cond: 51489 // result: (MOVLconst [0]) 51490 for { 51491 v_0 := v.Args[0] 51492 if v_0.Op != OpAMD64FlagGT_UGT { 51493 break 51494 } 51495 v.reset(OpAMD64MOVLconst) 51496 v.AuxInt = 0 51497 return true 51498 } 51499 return false 51500 } 51501 func rewriteValueAMD64_OpAMD64SETLE_0(v *Value) bool { 51502 // match: (SETLE (InvertFlags x)) 51503 // cond: 51504 // result: (SETGE x) 51505 for { 51506 v_0 := v.Args[0] 51507 if v_0.Op != OpAMD64InvertFlags { 51508 break 51509 } 51510 x := v_0.Args[0] 51511 v.reset(OpAMD64SETGE) 51512 v.AddArg(x) 51513 return true 51514 } 51515 // match: (SETLE (FlagEQ)) 51516 // cond: 51517 // result: (MOVLconst [1]) 51518 for { 51519 v_0 := v.Args[0] 51520 if v_0.Op != OpAMD64FlagEQ { 51521 break 51522 } 51523 v.reset(OpAMD64MOVLconst) 51524 v.AuxInt = 1 51525 return true 51526 } 51527 // match: (SETLE (FlagLT_ULT)) 51528 // cond: 51529 // result: (MOVLconst [1]) 51530 for { 51531 v_0 := v.Args[0] 51532 if v_0.Op != OpAMD64FlagLT_ULT { 51533 break 51534 } 51535 v.reset(OpAMD64MOVLconst) 51536 v.AuxInt = 1 51537 return true 51538 } 51539 // match: (SETLE (FlagLT_UGT)) 51540 // cond: 51541 // result: (MOVLconst [1]) 51542 for { 51543 v_0 := v.Args[0] 51544 if v_0.Op != OpAMD64FlagLT_UGT { 51545 break 51546 } 51547 v.reset(OpAMD64MOVLconst) 51548 v.AuxInt = 1 51549 return true 51550 } 51551 // match: (SETLE (FlagGT_ULT)) 51552 // cond: 51553 // result: (MOVLconst [0]) 51554 for { 51555 v_0 := v.Args[0] 51556 if v_0.Op != OpAMD64FlagGT_ULT { 51557 break 51558 } 51559 v.reset(OpAMD64MOVLconst) 51560 v.AuxInt = 0 51561 return true 51562 } 51563 // match: (SETLE (FlagGT_UGT)) 51564 // cond: 51565 // result: (MOVLconst [0]) 51566 for { 51567 v_0 := v.Args[0] 51568 if v_0.Op != OpAMD64FlagGT_UGT { 51569 break 51570 } 51571 v.reset(OpAMD64MOVLconst) 51572 v.AuxInt = 0 51573 return true 51574 } 51575 return false 51576 } 51577 func rewriteValueAMD64_OpAMD64SETLEstore_0(v *Value) bool { 51578 b := v.Block 51579 _ = b 51580 // match: (SETLEstore [off] {sym} ptr (InvertFlags x) mem) 51581 // cond: 51582 // result: (SETGEstore [off] {sym} ptr x mem) 51583 for { 51584 off := v.AuxInt 51585 sym := v.Aux 51586 _ = v.Args[2] 51587 ptr := v.Args[0] 51588 v_1 := v.Args[1] 51589 if v_1.Op != OpAMD64InvertFlags { 51590 break 51591 } 51592 x := v_1.Args[0] 51593 mem := v.Args[2] 51594 v.reset(OpAMD64SETGEstore) 51595 v.AuxInt = off 51596 v.Aux = sym 51597 v.AddArg(ptr) 51598 v.AddArg(x) 51599 v.AddArg(mem) 51600 return true 51601 } 51602 // match: (SETLEstore [off1] {sym} (ADDQconst [off2] base) val mem) 51603 // cond: is32Bit(off1+off2) 51604 // result: (SETLEstore [off1+off2] {sym} base val mem) 51605 for { 51606 off1 := v.AuxInt 51607 sym := v.Aux 51608 _ = v.Args[2] 51609 v_0 := v.Args[0] 51610 if v_0.Op != OpAMD64ADDQconst { 51611 break 51612 } 51613 off2 := v_0.AuxInt 51614 base := v_0.Args[0] 51615 val := v.Args[1] 51616 mem := v.Args[2] 51617 if !(is32Bit(off1 + off2)) { 51618 break 51619 } 51620 v.reset(OpAMD64SETLEstore) 51621 v.AuxInt = off1 + off2 51622 v.Aux = sym 51623 v.AddArg(base) 51624 v.AddArg(val) 51625 v.AddArg(mem) 51626 return true 51627 } 51628 // match: (SETLEstore [off1] {sym1} (LEAQ [off2] {sym2} base) val mem) 51629 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 51630 // result: (SETLEstore [off1+off2] {mergeSym(sym1,sym2)} base val mem) 51631 for { 51632 off1 := v.AuxInt 51633 sym1 := v.Aux 51634 _ = v.Args[2] 51635 v_0 := v.Args[0] 51636 if v_0.Op != OpAMD64LEAQ { 51637 break 51638 } 51639 off2 := v_0.AuxInt 51640 sym2 := v_0.Aux 51641 base := v_0.Args[0] 51642 val := v.Args[1] 51643 mem := v.Args[2] 51644 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 51645 break 51646 } 51647 v.reset(OpAMD64SETLEstore) 51648 v.AuxInt = off1 + off2 51649 v.Aux = mergeSym(sym1, sym2) 51650 v.AddArg(base) 51651 v.AddArg(val) 51652 v.AddArg(mem) 51653 return true 51654 } 51655 // match: (SETLEstore [off] {sym} ptr x:(FlagEQ) mem) 51656 // cond: 51657 // result: (MOVBstore [off] {sym} ptr (MOVLconst <x.Type> [1]) mem) 51658 for { 51659 off := v.AuxInt 51660 sym := v.Aux 51661 _ = v.Args[2] 51662 ptr := v.Args[0] 51663 x := v.Args[1] 51664 if x.Op != OpAMD64FlagEQ { 51665 break 51666 } 51667 mem := v.Args[2] 51668 v.reset(OpAMD64MOVBstore) 51669 v.AuxInt = off 51670 v.Aux = sym 51671 v.AddArg(ptr) 51672 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, x.Type) 51673 v0.AuxInt = 1 51674 v.AddArg(v0) 51675 v.AddArg(mem) 51676 return true 51677 } 51678 // match: (SETLEstore [off] {sym} ptr x:(FlagLT_ULT) mem) 51679 // cond: 51680 // result: (MOVBstore [off] {sym} ptr (MOVLconst <x.Type> [1]) mem) 51681 for { 51682 off := v.AuxInt 51683 sym := v.Aux 51684 _ = v.Args[2] 51685 ptr := v.Args[0] 51686 x := v.Args[1] 51687 if x.Op != OpAMD64FlagLT_ULT { 51688 break 51689 } 51690 mem := v.Args[2] 51691 v.reset(OpAMD64MOVBstore) 51692 v.AuxInt = off 51693 v.Aux = sym 51694 v.AddArg(ptr) 51695 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, x.Type) 51696 v0.AuxInt = 1 51697 v.AddArg(v0) 51698 v.AddArg(mem) 51699 return true 51700 } 51701 // match: (SETLEstore [off] {sym} ptr x:(FlagLT_UGT) mem) 51702 // cond: 51703 // result: (MOVBstore [off] {sym} ptr (MOVLconst <x.Type> [1]) mem) 51704 for { 51705 off := v.AuxInt 51706 sym := v.Aux 51707 _ = v.Args[2] 51708 ptr := v.Args[0] 51709 x := v.Args[1] 51710 if x.Op != OpAMD64FlagLT_UGT { 51711 break 51712 } 51713 mem := v.Args[2] 51714 v.reset(OpAMD64MOVBstore) 51715 v.AuxInt = off 51716 v.Aux = sym 51717 v.AddArg(ptr) 51718 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, x.Type) 51719 v0.AuxInt = 1 51720 v.AddArg(v0) 51721 v.AddArg(mem) 51722 return true 51723 } 51724 // match: (SETLEstore [off] {sym} ptr x:(FlagGT_ULT) mem) 51725 // cond: 51726 // result: (MOVBstore [off] {sym} ptr (MOVLconst <x.Type> [0]) mem) 51727 for { 51728 off := v.AuxInt 51729 sym := v.Aux 51730 _ = v.Args[2] 51731 ptr := v.Args[0] 51732 x := v.Args[1] 51733 if x.Op != OpAMD64FlagGT_ULT { 51734 break 51735 } 51736 mem := v.Args[2] 51737 v.reset(OpAMD64MOVBstore) 51738 v.AuxInt = off 51739 v.Aux = sym 51740 v.AddArg(ptr) 51741 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, x.Type) 51742 v0.AuxInt = 0 51743 v.AddArg(v0) 51744 v.AddArg(mem) 51745 return true 51746 } 51747 // match: (SETLEstore [off] {sym} ptr x:(FlagGT_UGT) mem) 51748 // cond: 51749 // result: (MOVBstore [off] {sym} ptr (MOVLconst <x.Type> [0]) mem) 51750 for { 51751 off := v.AuxInt 51752 sym := v.Aux 51753 _ = v.Args[2] 51754 ptr := v.Args[0] 51755 x := v.Args[1] 51756 if x.Op != OpAMD64FlagGT_UGT { 51757 break 51758 } 51759 mem := v.Args[2] 51760 v.reset(OpAMD64MOVBstore) 51761 v.AuxInt = off 51762 v.Aux = sym 51763 v.AddArg(ptr) 51764 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, x.Type) 51765 v0.AuxInt = 0 51766 v.AddArg(v0) 51767 v.AddArg(mem) 51768 return true 51769 } 51770 return false 51771 } 51772 func rewriteValueAMD64_OpAMD64SETLstore_0(v *Value) bool { 51773 b := v.Block 51774 _ = b 51775 // match: (SETLstore [off] {sym} ptr (InvertFlags x) mem) 51776 // cond: 51777 // result: (SETGstore [off] {sym} ptr x mem) 51778 for { 51779 off := v.AuxInt 51780 sym := v.Aux 51781 _ = v.Args[2] 51782 ptr := v.Args[0] 51783 v_1 := v.Args[1] 51784 if v_1.Op != OpAMD64InvertFlags { 51785 break 51786 } 51787 x := v_1.Args[0] 51788 mem := v.Args[2] 51789 v.reset(OpAMD64SETGstore) 51790 v.AuxInt = off 51791 v.Aux = sym 51792 v.AddArg(ptr) 51793 v.AddArg(x) 51794 v.AddArg(mem) 51795 return true 51796 } 51797 // match: (SETLstore [off1] {sym} (ADDQconst [off2] base) val mem) 51798 // cond: is32Bit(off1+off2) 51799 // result: (SETLstore [off1+off2] {sym} base val mem) 51800 for { 51801 off1 := v.AuxInt 51802 sym := v.Aux 51803 _ = v.Args[2] 51804 v_0 := v.Args[0] 51805 if v_0.Op != OpAMD64ADDQconst { 51806 break 51807 } 51808 off2 := v_0.AuxInt 51809 base := v_0.Args[0] 51810 val := v.Args[1] 51811 mem := v.Args[2] 51812 if !(is32Bit(off1 + off2)) { 51813 break 51814 } 51815 v.reset(OpAMD64SETLstore) 51816 v.AuxInt = off1 + off2 51817 v.Aux = sym 51818 v.AddArg(base) 51819 v.AddArg(val) 51820 v.AddArg(mem) 51821 return true 51822 } 51823 // match: (SETLstore [off1] {sym1} (LEAQ [off2] {sym2} base) val mem) 51824 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 51825 // result: (SETLstore [off1+off2] {mergeSym(sym1,sym2)} base val mem) 51826 for { 51827 off1 := v.AuxInt 51828 sym1 := v.Aux 51829 _ = v.Args[2] 51830 v_0 := v.Args[0] 51831 if v_0.Op != OpAMD64LEAQ { 51832 break 51833 } 51834 off2 := v_0.AuxInt 51835 sym2 := v_0.Aux 51836 base := v_0.Args[0] 51837 val := v.Args[1] 51838 mem := v.Args[2] 51839 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 51840 break 51841 } 51842 v.reset(OpAMD64SETLstore) 51843 v.AuxInt = off1 + off2 51844 v.Aux = mergeSym(sym1, sym2) 51845 v.AddArg(base) 51846 v.AddArg(val) 51847 v.AddArg(mem) 51848 return true 51849 } 51850 // match: (SETLstore [off] {sym} ptr x:(FlagEQ) mem) 51851 // cond: 51852 // result: (MOVBstore [off] {sym} ptr (MOVLconst <x.Type> [0]) mem) 51853 for { 51854 off := v.AuxInt 51855 sym := v.Aux 51856 _ = v.Args[2] 51857 ptr := v.Args[0] 51858 x := v.Args[1] 51859 if x.Op != OpAMD64FlagEQ { 51860 break 51861 } 51862 mem := v.Args[2] 51863 v.reset(OpAMD64MOVBstore) 51864 v.AuxInt = off 51865 v.Aux = sym 51866 v.AddArg(ptr) 51867 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, x.Type) 51868 v0.AuxInt = 0 51869 v.AddArg(v0) 51870 v.AddArg(mem) 51871 return true 51872 } 51873 // match: (SETLstore [off] {sym} ptr x:(FlagLT_ULT) mem) 51874 // cond: 51875 // result: (MOVBstore [off] {sym} ptr (MOVLconst <x.Type> [1]) mem) 51876 for { 51877 off := v.AuxInt 51878 sym := v.Aux 51879 _ = v.Args[2] 51880 ptr := v.Args[0] 51881 x := v.Args[1] 51882 if x.Op != OpAMD64FlagLT_ULT { 51883 break 51884 } 51885 mem := v.Args[2] 51886 v.reset(OpAMD64MOVBstore) 51887 v.AuxInt = off 51888 v.Aux = sym 51889 v.AddArg(ptr) 51890 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, x.Type) 51891 v0.AuxInt = 1 51892 v.AddArg(v0) 51893 v.AddArg(mem) 51894 return true 51895 } 51896 // match: (SETLstore [off] {sym} ptr x:(FlagLT_UGT) mem) 51897 // cond: 51898 // result: (MOVBstore [off] {sym} ptr (MOVLconst <x.Type> [1]) mem) 51899 for { 51900 off := v.AuxInt 51901 sym := v.Aux 51902 _ = v.Args[2] 51903 ptr := v.Args[0] 51904 x := v.Args[1] 51905 if x.Op != OpAMD64FlagLT_UGT { 51906 break 51907 } 51908 mem := v.Args[2] 51909 v.reset(OpAMD64MOVBstore) 51910 v.AuxInt = off 51911 v.Aux = sym 51912 v.AddArg(ptr) 51913 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, x.Type) 51914 v0.AuxInt = 1 51915 v.AddArg(v0) 51916 v.AddArg(mem) 51917 return true 51918 } 51919 // match: (SETLstore [off] {sym} ptr x:(FlagGT_ULT) mem) 51920 // cond: 51921 // result: (MOVBstore [off] {sym} ptr (MOVLconst <x.Type> [0]) mem) 51922 for { 51923 off := v.AuxInt 51924 sym := v.Aux 51925 _ = v.Args[2] 51926 ptr := v.Args[0] 51927 x := v.Args[1] 51928 if x.Op != OpAMD64FlagGT_ULT { 51929 break 51930 } 51931 mem := v.Args[2] 51932 v.reset(OpAMD64MOVBstore) 51933 v.AuxInt = off 51934 v.Aux = sym 51935 v.AddArg(ptr) 51936 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, x.Type) 51937 v0.AuxInt = 0 51938 v.AddArg(v0) 51939 v.AddArg(mem) 51940 return true 51941 } 51942 // match: (SETLstore [off] {sym} ptr x:(FlagGT_UGT) mem) 51943 // cond: 51944 // result: (MOVBstore [off] {sym} ptr (MOVLconst <x.Type> [0]) mem) 51945 for { 51946 off := v.AuxInt 51947 sym := v.Aux 51948 _ = v.Args[2] 51949 ptr := v.Args[0] 51950 x := v.Args[1] 51951 if x.Op != OpAMD64FlagGT_UGT { 51952 break 51953 } 51954 mem := v.Args[2] 51955 v.reset(OpAMD64MOVBstore) 51956 v.AuxInt = off 51957 v.Aux = sym 51958 v.AddArg(ptr) 51959 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, x.Type) 51960 v0.AuxInt = 0 51961 v.AddArg(v0) 51962 v.AddArg(mem) 51963 return true 51964 } 51965 return false 51966 } 51967 func rewriteValueAMD64_OpAMD64SETNE_0(v *Value) bool { 51968 b := v.Block 51969 _ = b 51970 config := b.Func.Config 51971 _ = config 51972 // match: (SETNE (TESTL (SHLL (MOVLconst [1]) x) y)) 51973 // cond: !config.nacl 51974 // result: (SETB (BTL x y)) 51975 for { 51976 v_0 := v.Args[0] 51977 if v_0.Op != OpAMD64TESTL { 51978 break 51979 } 51980 _ = v_0.Args[1] 51981 v_0_0 := v_0.Args[0] 51982 if v_0_0.Op != OpAMD64SHLL { 51983 break 51984 } 51985 _ = v_0_0.Args[1] 51986 v_0_0_0 := v_0_0.Args[0] 51987 if v_0_0_0.Op != OpAMD64MOVLconst { 51988 break 51989 } 51990 if v_0_0_0.AuxInt != 1 { 51991 break 51992 } 51993 x := v_0_0.Args[1] 51994 y := v_0.Args[1] 51995 if !(!config.nacl) { 51996 break 51997 } 51998 v.reset(OpAMD64SETB) 51999 v0 := b.NewValue0(v.Pos, OpAMD64BTL, types.TypeFlags) 52000 v0.AddArg(x) 52001 v0.AddArg(y) 52002 v.AddArg(v0) 52003 return true 52004 } 52005 // match: (SETNE (TESTL y (SHLL (MOVLconst [1]) x))) 52006 // cond: !config.nacl 52007 // result: (SETB (BTL x y)) 52008 for { 52009 v_0 := v.Args[0] 52010 if v_0.Op != OpAMD64TESTL { 52011 break 52012 } 52013 _ = v_0.Args[1] 52014 y := v_0.Args[0] 52015 v_0_1 := v_0.Args[1] 52016 if v_0_1.Op != OpAMD64SHLL { 52017 break 52018 } 52019 _ = v_0_1.Args[1] 52020 v_0_1_0 := v_0_1.Args[0] 52021 if v_0_1_0.Op != OpAMD64MOVLconst { 52022 break 52023 } 52024 if v_0_1_0.AuxInt != 1 { 52025 break 52026 } 52027 x := v_0_1.Args[1] 52028 if !(!config.nacl) { 52029 break 52030 } 52031 v.reset(OpAMD64SETB) 52032 v0 := b.NewValue0(v.Pos, OpAMD64BTL, types.TypeFlags) 52033 v0.AddArg(x) 52034 v0.AddArg(y) 52035 v.AddArg(v0) 52036 return true 52037 } 52038 // match: (SETNE (TESTQ (SHLQ (MOVQconst [1]) x) y)) 52039 // cond: !config.nacl 52040 // result: (SETB (BTQ x y)) 52041 for { 52042 v_0 := v.Args[0] 52043 if v_0.Op != OpAMD64TESTQ { 52044 break 52045 } 52046 _ = v_0.Args[1] 52047 v_0_0 := v_0.Args[0] 52048 if v_0_0.Op != OpAMD64SHLQ { 52049 break 52050 } 52051 _ = v_0_0.Args[1] 52052 v_0_0_0 := v_0_0.Args[0] 52053 if v_0_0_0.Op != OpAMD64MOVQconst { 52054 break 52055 } 52056 if v_0_0_0.AuxInt != 1 { 52057 break 52058 } 52059 x := v_0_0.Args[1] 52060 y := v_0.Args[1] 52061 if !(!config.nacl) { 52062 break 52063 } 52064 v.reset(OpAMD64SETB) 52065 v0 := b.NewValue0(v.Pos, OpAMD64BTQ, types.TypeFlags) 52066 v0.AddArg(x) 52067 v0.AddArg(y) 52068 v.AddArg(v0) 52069 return true 52070 } 52071 // match: (SETNE (TESTQ y (SHLQ (MOVQconst [1]) x))) 52072 // cond: !config.nacl 52073 // result: (SETB (BTQ x y)) 52074 for { 52075 v_0 := v.Args[0] 52076 if v_0.Op != OpAMD64TESTQ { 52077 break 52078 } 52079 _ = v_0.Args[1] 52080 y := v_0.Args[0] 52081 v_0_1 := v_0.Args[1] 52082 if v_0_1.Op != OpAMD64SHLQ { 52083 break 52084 } 52085 _ = v_0_1.Args[1] 52086 v_0_1_0 := v_0_1.Args[0] 52087 if v_0_1_0.Op != OpAMD64MOVQconst { 52088 break 52089 } 52090 if v_0_1_0.AuxInt != 1 { 52091 break 52092 } 52093 x := v_0_1.Args[1] 52094 if !(!config.nacl) { 52095 break 52096 } 52097 v.reset(OpAMD64SETB) 52098 v0 := b.NewValue0(v.Pos, OpAMD64BTQ, types.TypeFlags) 52099 v0.AddArg(x) 52100 v0.AddArg(y) 52101 v.AddArg(v0) 52102 return true 52103 } 52104 // match: (SETNE (TESTLconst [c] x)) 52105 // cond: isUint32PowerOfTwo(c) && !config.nacl 52106 // result: (SETB (BTLconst [log2uint32(c)] x)) 52107 for { 52108 v_0 := v.Args[0] 52109 if v_0.Op != OpAMD64TESTLconst { 52110 break 52111 } 52112 c := v_0.AuxInt 52113 x := v_0.Args[0] 52114 if !(isUint32PowerOfTwo(c) && !config.nacl) { 52115 break 52116 } 52117 v.reset(OpAMD64SETB) 52118 v0 := b.NewValue0(v.Pos, OpAMD64BTLconst, types.TypeFlags) 52119 v0.AuxInt = log2uint32(c) 52120 v0.AddArg(x) 52121 v.AddArg(v0) 52122 return true 52123 } 52124 // match: (SETNE (TESTQconst [c] x)) 52125 // cond: isUint64PowerOfTwo(c) && !config.nacl 52126 // result: (SETB (BTQconst [log2(c)] x)) 52127 for { 52128 v_0 := v.Args[0] 52129 if v_0.Op != OpAMD64TESTQconst { 52130 break 52131 } 52132 c := v_0.AuxInt 52133 x := v_0.Args[0] 52134 if !(isUint64PowerOfTwo(c) && !config.nacl) { 52135 break 52136 } 52137 v.reset(OpAMD64SETB) 52138 v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags) 52139 v0.AuxInt = log2(c) 52140 v0.AddArg(x) 52141 v.AddArg(v0) 52142 return true 52143 } 52144 // match: (SETNE (TESTQ (MOVQconst [c]) x)) 52145 // cond: isUint64PowerOfTwo(c) && !config.nacl 52146 // result: (SETB (BTQconst [log2(c)] x)) 52147 for { 52148 v_0 := v.Args[0] 52149 if v_0.Op != OpAMD64TESTQ { 52150 break 52151 } 52152 _ = v_0.Args[1] 52153 v_0_0 := v_0.Args[0] 52154 if v_0_0.Op != OpAMD64MOVQconst { 52155 break 52156 } 52157 c := v_0_0.AuxInt 52158 x := v_0.Args[1] 52159 if !(isUint64PowerOfTwo(c) && !config.nacl) { 52160 break 52161 } 52162 v.reset(OpAMD64SETB) 52163 v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags) 52164 v0.AuxInt = log2(c) 52165 v0.AddArg(x) 52166 v.AddArg(v0) 52167 return true 52168 } 52169 // match: (SETNE (TESTQ x (MOVQconst [c]))) 52170 // cond: isUint64PowerOfTwo(c) && !config.nacl 52171 // result: (SETB (BTQconst [log2(c)] x)) 52172 for { 52173 v_0 := v.Args[0] 52174 if v_0.Op != OpAMD64TESTQ { 52175 break 52176 } 52177 _ = v_0.Args[1] 52178 x := v_0.Args[0] 52179 v_0_1 := v_0.Args[1] 52180 if v_0_1.Op != OpAMD64MOVQconst { 52181 break 52182 } 52183 c := v_0_1.AuxInt 52184 if !(isUint64PowerOfTwo(c) && !config.nacl) { 52185 break 52186 } 52187 v.reset(OpAMD64SETB) 52188 v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags) 52189 v0.AuxInt = log2(c) 52190 v0.AddArg(x) 52191 v.AddArg(v0) 52192 return true 52193 } 52194 // match: (SETNE (CMPLconst [1] s:(ANDLconst [1] _))) 52195 // cond: 52196 // result: (SETEQ (CMPLconst [0] s)) 52197 for { 52198 v_0 := v.Args[0] 52199 if v_0.Op != OpAMD64CMPLconst { 52200 break 52201 } 52202 if v_0.AuxInt != 1 { 52203 break 52204 } 52205 s := v_0.Args[0] 52206 if s.Op != OpAMD64ANDLconst { 52207 break 52208 } 52209 if s.AuxInt != 1 { 52210 break 52211 } 52212 v.reset(OpAMD64SETEQ) 52213 v0 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags) 52214 v0.AuxInt = 0 52215 v0.AddArg(s) 52216 v.AddArg(v0) 52217 return true 52218 } 52219 // match: (SETNE (CMPQconst [1] s:(ANDQconst [1] _))) 52220 // cond: 52221 // result: (SETEQ (CMPQconst [0] s)) 52222 for { 52223 v_0 := v.Args[0] 52224 if v_0.Op != OpAMD64CMPQconst { 52225 break 52226 } 52227 if v_0.AuxInt != 1 { 52228 break 52229 } 52230 s := v_0.Args[0] 52231 if s.Op != OpAMD64ANDQconst { 52232 break 52233 } 52234 if s.AuxInt != 1 { 52235 break 52236 } 52237 v.reset(OpAMD64SETEQ) 52238 v0 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags) 52239 v0.AuxInt = 0 52240 v0.AddArg(s) 52241 v.AddArg(v0) 52242 return true 52243 } 52244 return false 52245 } 52246 func rewriteValueAMD64_OpAMD64SETNE_10(v *Value) bool { 52247 b := v.Block 52248 _ = b 52249 config := b.Func.Config 52250 _ = config 52251 // match: (SETNE (TESTQ z1:(SHLQconst [63] (SHRQconst [63] x)) z2)) 52252 // cond: z1==z2 && !config.nacl 52253 // result: (SETB (BTQconst [63] x)) 52254 for { 52255 v_0 := v.Args[0] 52256 if v_0.Op != OpAMD64TESTQ { 52257 break 52258 } 52259 _ = v_0.Args[1] 52260 z1 := v_0.Args[0] 52261 if z1.Op != OpAMD64SHLQconst { 52262 break 52263 } 52264 if z1.AuxInt != 63 { 52265 break 52266 } 52267 z1_0 := z1.Args[0] 52268 if z1_0.Op != OpAMD64SHRQconst { 52269 break 52270 } 52271 if z1_0.AuxInt != 63 { 52272 break 52273 } 52274 x := z1_0.Args[0] 52275 z2 := v_0.Args[1] 52276 if !(z1 == z2 && !config.nacl) { 52277 break 52278 } 52279 v.reset(OpAMD64SETB) 52280 v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags) 52281 v0.AuxInt = 63 52282 v0.AddArg(x) 52283 v.AddArg(v0) 52284 return true 52285 } 52286 // match: (SETNE (TESTQ z2 z1:(SHLQconst [63] (SHRQconst [63] x)))) 52287 // cond: z1==z2 && !config.nacl 52288 // result: (SETB (BTQconst [63] x)) 52289 for { 52290 v_0 := v.Args[0] 52291 if v_0.Op != OpAMD64TESTQ { 52292 break 52293 } 52294 _ = v_0.Args[1] 52295 z2 := v_0.Args[0] 52296 z1 := v_0.Args[1] 52297 if z1.Op != OpAMD64SHLQconst { 52298 break 52299 } 52300 if z1.AuxInt != 63 { 52301 break 52302 } 52303 z1_0 := z1.Args[0] 52304 if z1_0.Op != OpAMD64SHRQconst { 52305 break 52306 } 52307 if z1_0.AuxInt != 63 { 52308 break 52309 } 52310 x := z1_0.Args[0] 52311 if !(z1 == z2 && !config.nacl) { 52312 break 52313 } 52314 v.reset(OpAMD64SETB) 52315 v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags) 52316 v0.AuxInt = 63 52317 v0.AddArg(x) 52318 v.AddArg(v0) 52319 return true 52320 } 52321 // match: (SETNE (TESTL z1:(SHLLconst [31] (SHRQconst [31] x)) z2)) 52322 // cond: z1==z2 && !config.nacl 52323 // result: (SETB (BTQconst [31] x)) 52324 for { 52325 v_0 := v.Args[0] 52326 if v_0.Op != OpAMD64TESTL { 52327 break 52328 } 52329 _ = v_0.Args[1] 52330 z1 := v_0.Args[0] 52331 if z1.Op != OpAMD64SHLLconst { 52332 break 52333 } 52334 if z1.AuxInt != 31 { 52335 break 52336 } 52337 z1_0 := z1.Args[0] 52338 if z1_0.Op != OpAMD64SHRQconst { 52339 break 52340 } 52341 if z1_0.AuxInt != 31 { 52342 break 52343 } 52344 x := z1_0.Args[0] 52345 z2 := v_0.Args[1] 52346 if !(z1 == z2 && !config.nacl) { 52347 break 52348 } 52349 v.reset(OpAMD64SETB) 52350 v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags) 52351 v0.AuxInt = 31 52352 v0.AddArg(x) 52353 v.AddArg(v0) 52354 return true 52355 } 52356 // match: (SETNE (TESTL z2 z1:(SHLLconst [31] (SHRQconst [31] x)))) 52357 // cond: z1==z2 && !config.nacl 52358 // result: (SETB (BTQconst [31] x)) 52359 for { 52360 v_0 := v.Args[0] 52361 if v_0.Op != OpAMD64TESTL { 52362 break 52363 } 52364 _ = v_0.Args[1] 52365 z2 := v_0.Args[0] 52366 z1 := v_0.Args[1] 52367 if z1.Op != OpAMD64SHLLconst { 52368 break 52369 } 52370 if z1.AuxInt != 31 { 52371 break 52372 } 52373 z1_0 := z1.Args[0] 52374 if z1_0.Op != OpAMD64SHRQconst { 52375 break 52376 } 52377 if z1_0.AuxInt != 31 { 52378 break 52379 } 52380 x := z1_0.Args[0] 52381 if !(z1 == z2 && !config.nacl) { 52382 break 52383 } 52384 v.reset(OpAMD64SETB) 52385 v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags) 52386 v0.AuxInt = 31 52387 v0.AddArg(x) 52388 v.AddArg(v0) 52389 return true 52390 } 52391 // match: (SETNE (TESTQ z1:(SHRQconst [63] (SHLQconst [63] x)) z2)) 52392 // cond: z1==z2 && !config.nacl 52393 // result: (SETB (BTQconst [0] x)) 52394 for { 52395 v_0 := v.Args[0] 52396 if v_0.Op != OpAMD64TESTQ { 52397 break 52398 } 52399 _ = v_0.Args[1] 52400 z1 := v_0.Args[0] 52401 if z1.Op != OpAMD64SHRQconst { 52402 break 52403 } 52404 if z1.AuxInt != 63 { 52405 break 52406 } 52407 z1_0 := z1.Args[0] 52408 if z1_0.Op != OpAMD64SHLQconst { 52409 break 52410 } 52411 if z1_0.AuxInt != 63 { 52412 break 52413 } 52414 x := z1_0.Args[0] 52415 z2 := v_0.Args[1] 52416 if !(z1 == z2 && !config.nacl) { 52417 break 52418 } 52419 v.reset(OpAMD64SETB) 52420 v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags) 52421 v0.AuxInt = 0 52422 v0.AddArg(x) 52423 v.AddArg(v0) 52424 return true 52425 } 52426 // match: (SETNE (TESTQ z2 z1:(SHRQconst [63] (SHLQconst [63] x)))) 52427 // cond: z1==z2 && !config.nacl 52428 // result: (SETB (BTQconst [0] x)) 52429 for { 52430 v_0 := v.Args[0] 52431 if v_0.Op != OpAMD64TESTQ { 52432 break 52433 } 52434 _ = v_0.Args[1] 52435 z2 := v_0.Args[0] 52436 z1 := v_0.Args[1] 52437 if z1.Op != OpAMD64SHRQconst { 52438 break 52439 } 52440 if z1.AuxInt != 63 { 52441 break 52442 } 52443 z1_0 := z1.Args[0] 52444 if z1_0.Op != OpAMD64SHLQconst { 52445 break 52446 } 52447 if z1_0.AuxInt != 63 { 52448 break 52449 } 52450 x := z1_0.Args[0] 52451 if !(z1 == z2 && !config.nacl) { 52452 break 52453 } 52454 v.reset(OpAMD64SETB) 52455 v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags) 52456 v0.AuxInt = 0 52457 v0.AddArg(x) 52458 v.AddArg(v0) 52459 return true 52460 } 52461 // match: (SETNE (TESTL z1:(SHRLconst [31] (SHLLconst [31] x)) z2)) 52462 // cond: z1==z2 && !config.nacl 52463 // result: (SETB (BTLconst [0] x)) 52464 for { 52465 v_0 := v.Args[0] 52466 if v_0.Op != OpAMD64TESTL { 52467 break 52468 } 52469 _ = v_0.Args[1] 52470 z1 := v_0.Args[0] 52471 if z1.Op != OpAMD64SHRLconst { 52472 break 52473 } 52474 if z1.AuxInt != 31 { 52475 break 52476 } 52477 z1_0 := z1.Args[0] 52478 if z1_0.Op != OpAMD64SHLLconst { 52479 break 52480 } 52481 if z1_0.AuxInt != 31 { 52482 break 52483 } 52484 x := z1_0.Args[0] 52485 z2 := v_0.Args[1] 52486 if !(z1 == z2 && !config.nacl) { 52487 break 52488 } 52489 v.reset(OpAMD64SETB) 52490 v0 := b.NewValue0(v.Pos, OpAMD64BTLconst, types.TypeFlags) 52491 v0.AuxInt = 0 52492 v0.AddArg(x) 52493 v.AddArg(v0) 52494 return true 52495 } 52496 // match: (SETNE (TESTL z2 z1:(SHRLconst [31] (SHLLconst [31] x)))) 52497 // cond: z1==z2 && !config.nacl 52498 // result: (SETB (BTLconst [0] x)) 52499 for { 52500 v_0 := v.Args[0] 52501 if v_0.Op != OpAMD64TESTL { 52502 break 52503 } 52504 _ = v_0.Args[1] 52505 z2 := v_0.Args[0] 52506 z1 := v_0.Args[1] 52507 if z1.Op != OpAMD64SHRLconst { 52508 break 52509 } 52510 if z1.AuxInt != 31 { 52511 break 52512 } 52513 z1_0 := z1.Args[0] 52514 if z1_0.Op != OpAMD64SHLLconst { 52515 break 52516 } 52517 if z1_0.AuxInt != 31 { 52518 break 52519 } 52520 x := z1_0.Args[0] 52521 if !(z1 == z2 && !config.nacl) { 52522 break 52523 } 52524 v.reset(OpAMD64SETB) 52525 v0 := b.NewValue0(v.Pos, OpAMD64BTLconst, types.TypeFlags) 52526 v0.AuxInt = 0 52527 v0.AddArg(x) 52528 v.AddArg(v0) 52529 return true 52530 } 52531 // match: (SETNE (TESTQ z1:(SHRQconst [63] x) z2)) 52532 // cond: z1==z2 && !config.nacl 52533 // result: (SETB (BTQconst [63] x)) 52534 for { 52535 v_0 := v.Args[0] 52536 if v_0.Op != OpAMD64TESTQ { 52537 break 52538 } 52539 _ = v_0.Args[1] 52540 z1 := v_0.Args[0] 52541 if z1.Op != OpAMD64SHRQconst { 52542 break 52543 } 52544 if z1.AuxInt != 63 { 52545 break 52546 } 52547 x := z1.Args[0] 52548 z2 := v_0.Args[1] 52549 if !(z1 == z2 && !config.nacl) { 52550 break 52551 } 52552 v.reset(OpAMD64SETB) 52553 v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags) 52554 v0.AuxInt = 63 52555 v0.AddArg(x) 52556 v.AddArg(v0) 52557 return true 52558 } 52559 // match: (SETNE (TESTQ z2 z1:(SHRQconst [63] x))) 52560 // cond: z1==z2 && !config.nacl 52561 // result: (SETB (BTQconst [63] x)) 52562 for { 52563 v_0 := v.Args[0] 52564 if v_0.Op != OpAMD64TESTQ { 52565 break 52566 } 52567 _ = v_0.Args[1] 52568 z2 := v_0.Args[0] 52569 z1 := v_0.Args[1] 52570 if z1.Op != OpAMD64SHRQconst { 52571 break 52572 } 52573 if z1.AuxInt != 63 { 52574 break 52575 } 52576 x := z1.Args[0] 52577 if !(z1 == z2 && !config.nacl) { 52578 break 52579 } 52580 v.reset(OpAMD64SETB) 52581 v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags) 52582 v0.AuxInt = 63 52583 v0.AddArg(x) 52584 v.AddArg(v0) 52585 return true 52586 } 52587 return false 52588 } 52589 func rewriteValueAMD64_OpAMD64SETNE_20(v *Value) bool { 52590 b := v.Block 52591 _ = b 52592 config := b.Func.Config 52593 _ = config 52594 // match: (SETNE (TESTL z1:(SHRLconst [31] x) z2)) 52595 // cond: z1==z2 && !config.nacl 52596 // result: (SETB (BTLconst [31] x)) 52597 for { 52598 v_0 := v.Args[0] 52599 if v_0.Op != OpAMD64TESTL { 52600 break 52601 } 52602 _ = v_0.Args[1] 52603 z1 := v_0.Args[0] 52604 if z1.Op != OpAMD64SHRLconst { 52605 break 52606 } 52607 if z1.AuxInt != 31 { 52608 break 52609 } 52610 x := z1.Args[0] 52611 z2 := v_0.Args[1] 52612 if !(z1 == z2 && !config.nacl) { 52613 break 52614 } 52615 v.reset(OpAMD64SETB) 52616 v0 := b.NewValue0(v.Pos, OpAMD64BTLconst, types.TypeFlags) 52617 v0.AuxInt = 31 52618 v0.AddArg(x) 52619 v.AddArg(v0) 52620 return true 52621 } 52622 // match: (SETNE (TESTL z2 z1:(SHRLconst [31] x))) 52623 // cond: z1==z2 && !config.nacl 52624 // result: (SETB (BTLconst [31] x)) 52625 for { 52626 v_0 := v.Args[0] 52627 if v_0.Op != OpAMD64TESTL { 52628 break 52629 } 52630 _ = v_0.Args[1] 52631 z2 := v_0.Args[0] 52632 z1 := v_0.Args[1] 52633 if z1.Op != OpAMD64SHRLconst { 52634 break 52635 } 52636 if z1.AuxInt != 31 { 52637 break 52638 } 52639 x := z1.Args[0] 52640 if !(z1 == z2 && !config.nacl) { 52641 break 52642 } 52643 v.reset(OpAMD64SETB) 52644 v0 := b.NewValue0(v.Pos, OpAMD64BTLconst, types.TypeFlags) 52645 v0.AuxInt = 31 52646 v0.AddArg(x) 52647 v.AddArg(v0) 52648 return true 52649 } 52650 // match: (SETNE (InvertFlags x)) 52651 // cond: 52652 // result: (SETNE x) 52653 for { 52654 v_0 := v.Args[0] 52655 if v_0.Op != OpAMD64InvertFlags { 52656 break 52657 } 52658 x := v_0.Args[0] 52659 v.reset(OpAMD64SETNE) 52660 v.AddArg(x) 52661 return true 52662 } 52663 // match: (SETNE (FlagEQ)) 52664 // cond: 52665 // result: (MOVLconst [0]) 52666 for { 52667 v_0 := v.Args[0] 52668 if v_0.Op != OpAMD64FlagEQ { 52669 break 52670 } 52671 v.reset(OpAMD64MOVLconst) 52672 v.AuxInt = 0 52673 return true 52674 } 52675 // match: (SETNE (FlagLT_ULT)) 52676 // cond: 52677 // result: (MOVLconst [1]) 52678 for { 52679 v_0 := v.Args[0] 52680 if v_0.Op != OpAMD64FlagLT_ULT { 52681 break 52682 } 52683 v.reset(OpAMD64MOVLconst) 52684 v.AuxInt = 1 52685 return true 52686 } 52687 // match: (SETNE (FlagLT_UGT)) 52688 // cond: 52689 // result: (MOVLconst [1]) 52690 for { 52691 v_0 := v.Args[0] 52692 if v_0.Op != OpAMD64FlagLT_UGT { 52693 break 52694 } 52695 v.reset(OpAMD64MOVLconst) 52696 v.AuxInt = 1 52697 return true 52698 } 52699 // match: (SETNE (FlagGT_ULT)) 52700 // cond: 52701 // result: (MOVLconst [1]) 52702 for { 52703 v_0 := v.Args[0] 52704 if v_0.Op != OpAMD64FlagGT_ULT { 52705 break 52706 } 52707 v.reset(OpAMD64MOVLconst) 52708 v.AuxInt = 1 52709 return true 52710 } 52711 // match: (SETNE (FlagGT_UGT)) 52712 // cond: 52713 // result: (MOVLconst [1]) 52714 for { 52715 v_0 := v.Args[0] 52716 if v_0.Op != OpAMD64FlagGT_UGT { 52717 break 52718 } 52719 v.reset(OpAMD64MOVLconst) 52720 v.AuxInt = 1 52721 return true 52722 } 52723 return false 52724 } 52725 func rewriteValueAMD64_OpAMD64SETNEstore_0(v *Value) bool { 52726 b := v.Block 52727 _ = b 52728 config := b.Func.Config 52729 _ = config 52730 // match: (SETNEstore [off] {sym} ptr (TESTL (SHLL (MOVLconst [1]) x) y) mem) 52731 // cond: !config.nacl 52732 // result: (SETBstore [off] {sym} ptr (BTL x y) mem) 52733 for { 52734 off := v.AuxInt 52735 sym := v.Aux 52736 _ = v.Args[2] 52737 ptr := v.Args[0] 52738 v_1 := v.Args[1] 52739 if v_1.Op != OpAMD64TESTL { 52740 break 52741 } 52742 _ = v_1.Args[1] 52743 v_1_0 := v_1.Args[0] 52744 if v_1_0.Op != OpAMD64SHLL { 52745 break 52746 } 52747 _ = v_1_0.Args[1] 52748 v_1_0_0 := v_1_0.Args[0] 52749 if v_1_0_0.Op != OpAMD64MOVLconst { 52750 break 52751 } 52752 if v_1_0_0.AuxInt != 1 { 52753 break 52754 } 52755 x := v_1_0.Args[1] 52756 y := v_1.Args[1] 52757 mem := v.Args[2] 52758 if !(!config.nacl) { 52759 break 52760 } 52761 v.reset(OpAMD64SETBstore) 52762 v.AuxInt = off 52763 v.Aux = sym 52764 v.AddArg(ptr) 52765 v0 := b.NewValue0(v.Pos, OpAMD64BTL, types.TypeFlags) 52766 v0.AddArg(x) 52767 v0.AddArg(y) 52768 v.AddArg(v0) 52769 v.AddArg(mem) 52770 return true 52771 } 52772 // match: (SETNEstore [off] {sym} ptr (TESTL y (SHLL (MOVLconst [1]) x)) mem) 52773 // cond: !config.nacl 52774 // result: (SETBstore [off] {sym} ptr (BTL x y) mem) 52775 for { 52776 off := v.AuxInt 52777 sym := v.Aux 52778 _ = v.Args[2] 52779 ptr := v.Args[0] 52780 v_1 := v.Args[1] 52781 if v_1.Op != OpAMD64TESTL { 52782 break 52783 } 52784 _ = v_1.Args[1] 52785 y := v_1.Args[0] 52786 v_1_1 := v_1.Args[1] 52787 if v_1_1.Op != OpAMD64SHLL { 52788 break 52789 } 52790 _ = v_1_1.Args[1] 52791 v_1_1_0 := v_1_1.Args[0] 52792 if v_1_1_0.Op != OpAMD64MOVLconst { 52793 break 52794 } 52795 if v_1_1_0.AuxInt != 1 { 52796 break 52797 } 52798 x := v_1_1.Args[1] 52799 mem := v.Args[2] 52800 if !(!config.nacl) { 52801 break 52802 } 52803 v.reset(OpAMD64SETBstore) 52804 v.AuxInt = off 52805 v.Aux = sym 52806 v.AddArg(ptr) 52807 v0 := b.NewValue0(v.Pos, OpAMD64BTL, types.TypeFlags) 52808 v0.AddArg(x) 52809 v0.AddArg(y) 52810 v.AddArg(v0) 52811 v.AddArg(mem) 52812 return true 52813 } 52814 // match: (SETNEstore [off] {sym} ptr (TESTQ (SHLQ (MOVQconst [1]) x) y) mem) 52815 // cond: !config.nacl 52816 // result: (SETBstore [off] {sym} ptr (BTQ x y) mem) 52817 for { 52818 off := v.AuxInt 52819 sym := v.Aux 52820 _ = v.Args[2] 52821 ptr := v.Args[0] 52822 v_1 := v.Args[1] 52823 if v_1.Op != OpAMD64TESTQ { 52824 break 52825 } 52826 _ = v_1.Args[1] 52827 v_1_0 := v_1.Args[0] 52828 if v_1_0.Op != OpAMD64SHLQ { 52829 break 52830 } 52831 _ = v_1_0.Args[1] 52832 v_1_0_0 := v_1_0.Args[0] 52833 if v_1_0_0.Op != OpAMD64MOVQconst { 52834 break 52835 } 52836 if v_1_0_0.AuxInt != 1 { 52837 break 52838 } 52839 x := v_1_0.Args[1] 52840 y := v_1.Args[1] 52841 mem := v.Args[2] 52842 if !(!config.nacl) { 52843 break 52844 } 52845 v.reset(OpAMD64SETBstore) 52846 v.AuxInt = off 52847 v.Aux = sym 52848 v.AddArg(ptr) 52849 v0 := b.NewValue0(v.Pos, OpAMD64BTQ, types.TypeFlags) 52850 v0.AddArg(x) 52851 v0.AddArg(y) 52852 v.AddArg(v0) 52853 v.AddArg(mem) 52854 return true 52855 } 52856 // match: (SETNEstore [off] {sym} ptr (TESTQ y (SHLQ (MOVQconst [1]) x)) mem) 52857 // cond: !config.nacl 52858 // result: (SETBstore [off] {sym} ptr (BTQ x y) mem) 52859 for { 52860 off := v.AuxInt 52861 sym := v.Aux 52862 _ = v.Args[2] 52863 ptr := v.Args[0] 52864 v_1 := v.Args[1] 52865 if v_1.Op != OpAMD64TESTQ { 52866 break 52867 } 52868 _ = v_1.Args[1] 52869 y := v_1.Args[0] 52870 v_1_1 := v_1.Args[1] 52871 if v_1_1.Op != OpAMD64SHLQ { 52872 break 52873 } 52874 _ = v_1_1.Args[1] 52875 v_1_1_0 := v_1_1.Args[0] 52876 if v_1_1_0.Op != OpAMD64MOVQconst { 52877 break 52878 } 52879 if v_1_1_0.AuxInt != 1 { 52880 break 52881 } 52882 x := v_1_1.Args[1] 52883 mem := v.Args[2] 52884 if !(!config.nacl) { 52885 break 52886 } 52887 v.reset(OpAMD64SETBstore) 52888 v.AuxInt = off 52889 v.Aux = sym 52890 v.AddArg(ptr) 52891 v0 := b.NewValue0(v.Pos, OpAMD64BTQ, types.TypeFlags) 52892 v0.AddArg(x) 52893 v0.AddArg(y) 52894 v.AddArg(v0) 52895 v.AddArg(mem) 52896 return true 52897 } 52898 // match: (SETNEstore [off] {sym} ptr (TESTLconst [c] x) mem) 52899 // cond: isUint32PowerOfTwo(c) && !config.nacl 52900 // result: (SETBstore [off] {sym} ptr (BTLconst [log2uint32(c)] x) mem) 52901 for { 52902 off := v.AuxInt 52903 sym := v.Aux 52904 _ = v.Args[2] 52905 ptr := v.Args[0] 52906 v_1 := v.Args[1] 52907 if v_1.Op != OpAMD64TESTLconst { 52908 break 52909 } 52910 c := v_1.AuxInt 52911 x := v_1.Args[0] 52912 mem := v.Args[2] 52913 if !(isUint32PowerOfTwo(c) && !config.nacl) { 52914 break 52915 } 52916 v.reset(OpAMD64SETBstore) 52917 v.AuxInt = off 52918 v.Aux = sym 52919 v.AddArg(ptr) 52920 v0 := b.NewValue0(v.Pos, OpAMD64BTLconst, types.TypeFlags) 52921 v0.AuxInt = log2uint32(c) 52922 v0.AddArg(x) 52923 v.AddArg(v0) 52924 v.AddArg(mem) 52925 return true 52926 } 52927 // match: (SETNEstore [off] {sym} ptr (TESTQconst [c] x) mem) 52928 // cond: isUint64PowerOfTwo(c) && !config.nacl 52929 // result: (SETBstore [off] {sym} ptr (BTQconst [log2(c)] x) mem) 52930 for { 52931 off := v.AuxInt 52932 sym := v.Aux 52933 _ = v.Args[2] 52934 ptr := v.Args[0] 52935 v_1 := v.Args[1] 52936 if v_1.Op != OpAMD64TESTQconst { 52937 break 52938 } 52939 c := v_1.AuxInt 52940 x := v_1.Args[0] 52941 mem := v.Args[2] 52942 if !(isUint64PowerOfTwo(c) && !config.nacl) { 52943 break 52944 } 52945 v.reset(OpAMD64SETBstore) 52946 v.AuxInt = off 52947 v.Aux = sym 52948 v.AddArg(ptr) 52949 v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags) 52950 v0.AuxInt = log2(c) 52951 v0.AddArg(x) 52952 v.AddArg(v0) 52953 v.AddArg(mem) 52954 return true 52955 } 52956 // match: (SETNEstore [off] {sym} ptr (TESTQ (MOVQconst [c]) x) mem) 52957 // cond: isUint64PowerOfTwo(c) && !config.nacl 52958 // result: (SETBstore [off] {sym} ptr (BTQconst [log2(c)] x) mem) 52959 for { 52960 off := v.AuxInt 52961 sym := v.Aux 52962 _ = v.Args[2] 52963 ptr := v.Args[0] 52964 v_1 := v.Args[1] 52965 if v_1.Op != OpAMD64TESTQ { 52966 break 52967 } 52968 _ = v_1.Args[1] 52969 v_1_0 := v_1.Args[0] 52970 if v_1_0.Op != OpAMD64MOVQconst { 52971 break 52972 } 52973 c := v_1_0.AuxInt 52974 x := v_1.Args[1] 52975 mem := v.Args[2] 52976 if !(isUint64PowerOfTwo(c) && !config.nacl) { 52977 break 52978 } 52979 v.reset(OpAMD64SETBstore) 52980 v.AuxInt = off 52981 v.Aux = sym 52982 v.AddArg(ptr) 52983 v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags) 52984 v0.AuxInt = log2(c) 52985 v0.AddArg(x) 52986 v.AddArg(v0) 52987 v.AddArg(mem) 52988 return true 52989 } 52990 // match: (SETNEstore [off] {sym} ptr (TESTQ x (MOVQconst [c])) mem) 52991 // cond: isUint64PowerOfTwo(c) && !config.nacl 52992 // result: (SETBstore [off] {sym} ptr (BTQconst [log2(c)] x) mem) 52993 for { 52994 off := v.AuxInt 52995 sym := v.Aux 52996 _ = v.Args[2] 52997 ptr := v.Args[0] 52998 v_1 := v.Args[1] 52999 if v_1.Op != OpAMD64TESTQ { 53000 break 53001 } 53002 _ = v_1.Args[1] 53003 x := v_1.Args[0] 53004 v_1_1 := v_1.Args[1] 53005 if v_1_1.Op != OpAMD64MOVQconst { 53006 break 53007 } 53008 c := v_1_1.AuxInt 53009 mem := v.Args[2] 53010 if !(isUint64PowerOfTwo(c) && !config.nacl) { 53011 break 53012 } 53013 v.reset(OpAMD64SETBstore) 53014 v.AuxInt = off 53015 v.Aux = sym 53016 v.AddArg(ptr) 53017 v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags) 53018 v0.AuxInt = log2(c) 53019 v0.AddArg(x) 53020 v.AddArg(v0) 53021 v.AddArg(mem) 53022 return true 53023 } 53024 // match: (SETNEstore [off] {sym} ptr (CMPLconst [1] s:(ANDLconst [1] _)) mem) 53025 // cond: 53026 // result: (SETEQstore [off] {sym} ptr (CMPLconst [0] s) mem) 53027 for { 53028 off := v.AuxInt 53029 sym := v.Aux 53030 _ = v.Args[2] 53031 ptr := v.Args[0] 53032 v_1 := v.Args[1] 53033 if v_1.Op != OpAMD64CMPLconst { 53034 break 53035 } 53036 if v_1.AuxInt != 1 { 53037 break 53038 } 53039 s := v_1.Args[0] 53040 if s.Op != OpAMD64ANDLconst { 53041 break 53042 } 53043 if s.AuxInt != 1 { 53044 break 53045 } 53046 mem := v.Args[2] 53047 v.reset(OpAMD64SETEQstore) 53048 v.AuxInt = off 53049 v.Aux = sym 53050 v.AddArg(ptr) 53051 v0 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags) 53052 v0.AuxInt = 0 53053 v0.AddArg(s) 53054 v.AddArg(v0) 53055 v.AddArg(mem) 53056 return true 53057 } 53058 // match: (SETNEstore [off] {sym} ptr (CMPQconst [1] s:(ANDQconst [1] _)) mem) 53059 // cond: 53060 // result: (SETEQstore [off] {sym} ptr (CMPQconst [0] s) mem) 53061 for { 53062 off := v.AuxInt 53063 sym := v.Aux 53064 _ = v.Args[2] 53065 ptr := v.Args[0] 53066 v_1 := v.Args[1] 53067 if v_1.Op != OpAMD64CMPQconst { 53068 break 53069 } 53070 if v_1.AuxInt != 1 { 53071 break 53072 } 53073 s := v_1.Args[0] 53074 if s.Op != OpAMD64ANDQconst { 53075 break 53076 } 53077 if s.AuxInt != 1 { 53078 break 53079 } 53080 mem := v.Args[2] 53081 v.reset(OpAMD64SETEQstore) 53082 v.AuxInt = off 53083 v.Aux = sym 53084 v.AddArg(ptr) 53085 v0 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags) 53086 v0.AuxInt = 0 53087 v0.AddArg(s) 53088 v.AddArg(v0) 53089 v.AddArg(mem) 53090 return true 53091 } 53092 return false 53093 } 53094 func rewriteValueAMD64_OpAMD64SETNEstore_10(v *Value) bool { 53095 b := v.Block 53096 _ = b 53097 config := b.Func.Config 53098 _ = config 53099 // match: (SETNEstore [off] {sym} ptr (TESTQ z1:(SHLQconst [63] (SHRQconst [63] x)) z2) mem) 53100 // cond: z1==z2 && !config.nacl 53101 // result: (SETBstore [off] {sym} ptr (BTQconst [63] x) mem) 53102 for { 53103 off := v.AuxInt 53104 sym := v.Aux 53105 _ = v.Args[2] 53106 ptr := v.Args[0] 53107 v_1 := v.Args[1] 53108 if v_1.Op != OpAMD64TESTQ { 53109 break 53110 } 53111 _ = v_1.Args[1] 53112 z1 := v_1.Args[0] 53113 if z1.Op != OpAMD64SHLQconst { 53114 break 53115 } 53116 if z1.AuxInt != 63 { 53117 break 53118 } 53119 z1_0 := z1.Args[0] 53120 if z1_0.Op != OpAMD64SHRQconst { 53121 break 53122 } 53123 if z1_0.AuxInt != 63 { 53124 break 53125 } 53126 x := z1_0.Args[0] 53127 z2 := v_1.Args[1] 53128 mem := v.Args[2] 53129 if !(z1 == z2 && !config.nacl) { 53130 break 53131 } 53132 v.reset(OpAMD64SETBstore) 53133 v.AuxInt = off 53134 v.Aux = sym 53135 v.AddArg(ptr) 53136 v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags) 53137 v0.AuxInt = 63 53138 v0.AddArg(x) 53139 v.AddArg(v0) 53140 v.AddArg(mem) 53141 return true 53142 } 53143 // match: (SETNEstore [off] {sym} ptr (TESTQ z2 z1:(SHLQconst [63] (SHRQconst [63] x))) mem) 53144 // cond: z1==z2 && !config.nacl 53145 // result: (SETBstore [off] {sym} ptr (BTQconst [63] x) mem) 53146 for { 53147 off := v.AuxInt 53148 sym := v.Aux 53149 _ = v.Args[2] 53150 ptr := v.Args[0] 53151 v_1 := v.Args[1] 53152 if v_1.Op != OpAMD64TESTQ { 53153 break 53154 } 53155 _ = v_1.Args[1] 53156 z2 := v_1.Args[0] 53157 z1 := v_1.Args[1] 53158 if z1.Op != OpAMD64SHLQconst { 53159 break 53160 } 53161 if z1.AuxInt != 63 { 53162 break 53163 } 53164 z1_0 := z1.Args[0] 53165 if z1_0.Op != OpAMD64SHRQconst { 53166 break 53167 } 53168 if z1_0.AuxInt != 63 { 53169 break 53170 } 53171 x := z1_0.Args[0] 53172 mem := v.Args[2] 53173 if !(z1 == z2 && !config.nacl) { 53174 break 53175 } 53176 v.reset(OpAMD64SETBstore) 53177 v.AuxInt = off 53178 v.Aux = sym 53179 v.AddArg(ptr) 53180 v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags) 53181 v0.AuxInt = 63 53182 v0.AddArg(x) 53183 v.AddArg(v0) 53184 v.AddArg(mem) 53185 return true 53186 } 53187 // match: (SETNEstore [off] {sym} ptr (TESTL z1:(SHLLconst [31] (SHRLconst [31] x)) z2) mem) 53188 // cond: z1==z2 && !config.nacl 53189 // result: (SETBstore [off] {sym} ptr (BTLconst [31] x) mem) 53190 for { 53191 off := v.AuxInt 53192 sym := v.Aux 53193 _ = v.Args[2] 53194 ptr := v.Args[0] 53195 v_1 := v.Args[1] 53196 if v_1.Op != OpAMD64TESTL { 53197 break 53198 } 53199 _ = v_1.Args[1] 53200 z1 := v_1.Args[0] 53201 if z1.Op != OpAMD64SHLLconst { 53202 break 53203 } 53204 if z1.AuxInt != 31 { 53205 break 53206 } 53207 z1_0 := z1.Args[0] 53208 if z1_0.Op != OpAMD64SHRLconst { 53209 break 53210 } 53211 if z1_0.AuxInt != 31 { 53212 break 53213 } 53214 x := z1_0.Args[0] 53215 z2 := v_1.Args[1] 53216 mem := v.Args[2] 53217 if !(z1 == z2 && !config.nacl) { 53218 break 53219 } 53220 v.reset(OpAMD64SETBstore) 53221 v.AuxInt = off 53222 v.Aux = sym 53223 v.AddArg(ptr) 53224 v0 := b.NewValue0(v.Pos, OpAMD64BTLconst, types.TypeFlags) 53225 v0.AuxInt = 31 53226 v0.AddArg(x) 53227 v.AddArg(v0) 53228 v.AddArg(mem) 53229 return true 53230 } 53231 // match: (SETNEstore [off] {sym} ptr (TESTL z2 z1:(SHLLconst [31] (SHRLconst [31] x))) mem) 53232 // cond: z1==z2 && !config.nacl 53233 // result: (SETBstore [off] {sym} ptr (BTLconst [31] x) mem) 53234 for { 53235 off := v.AuxInt 53236 sym := v.Aux 53237 _ = v.Args[2] 53238 ptr := v.Args[0] 53239 v_1 := v.Args[1] 53240 if v_1.Op != OpAMD64TESTL { 53241 break 53242 } 53243 _ = v_1.Args[1] 53244 z2 := v_1.Args[0] 53245 z1 := v_1.Args[1] 53246 if z1.Op != OpAMD64SHLLconst { 53247 break 53248 } 53249 if z1.AuxInt != 31 { 53250 break 53251 } 53252 z1_0 := z1.Args[0] 53253 if z1_0.Op != OpAMD64SHRLconst { 53254 break 53255 } 53256 if z1_0.AuxInt != 31 { 53257 break 53258 } 53259 x := z1_0.Args[0] 53260 mem := v.Args[2] 53261 if !(z1 == z2 && !config.nacl) { 53262 break 53263 } 53264 v.reset(OpAMD64SETBstore) 53265 v.AuxInt = off 53266 v.Aux = sym 53267 v.AddArg(ptr) 53268 v0 := b.NewValue0(v.Pos, OpAMD64BTLconst, types.TypeFlags) 53269 v0.AuxInt = 31 53270 v0.AddArg(x) 53271 v.AddArg(v0) 53272 v.AddArg(mem) 53273 return true 53274 } 53275 // match: (SETNEstore [off] {sym} ptr (TESTQ z1:(SHRQconst [63] (SHLQconst [63] x)) z2) mem) 53276 // cond: z1==z2 && !config.nacl 53277 // result: (SETBstore [off] {sym} ptr (BTQconst [0] x) mem) 53278 for { 53279 off := v.AuxInt 53280 sym := v.Aux 53281 _ = v.Args[2] 53282 ptr := v.Args[0] 53283 v_1 := v.Args[1] 53284 if v_1.Op != OpAMD64TESTQ { 53285 break 53286 } 53287 _ = v_1.Args[1] 53288 z1 := v_1.Args[0] 53289 if z1.Op != OpAMD64SHRQconst { 53290 break 53291 } 53292 if z1.AuxInt != 63 { 53293 break 53294 } 53295 z1_0 := z1.Args[0] 53296 if z1_0.Op != OpAMD64SHLQconst { 53297 break 53298 } 53299 if z1_0.AuxInt != 63 { 53300 break 53301 } 53302 x := z1_0.Args[0] 53303 z2 := v_1.Args[1] 53304 mem := v.Args[2] 53305 if !(z1 == z2 && !config.nacl) { 53306 break 53307 } 53308 v.reset(OpAMD64SETBstore) 53309 v.AuxInt = off 53310 v.Aux = sym 53311 v.AddArg(ptr) 53312 v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags) 53313 v0.AuxInt = 0 53314 v0.AddArg(x) 53315 v.AddArg(v0) 53316 v.AddArg(mem) 53317 return true 53318 } 53319 // match: (SETNEstore [off] {sym} ptr (TESTQ z2 z1:(SHRQconst [63] (SHLQconst [63] x))) mem) 53320 // cond: z1==z2 && !config.nacl 53321 // result: (SETBstore [off] {sym} ptr (BTQconst [0] x) mem) 53322 for { 53323 off := v.AuxInt 53324 sym := v.Aux 53325 _ = v.Args[2] 53326 ptr := v.Args[0] 53327 v_1 := v.Args[1] 53328 if v_1.Op != OpAMD64TESTQ { 53329 break 53330 } 53331 _ = v_1.Args[1] 53332 z2 := v_1.Args[0] 53333 z1 := v_1.Args[1] 53334 if z1.Op != OpAMD64SHRQconst { 53335 break 53336 } 53337 if z1.AuxInt != 63 { 53338 break 53339 } 53340 z1_0 := z1.Args[0] 53341 if z1_0.Op != OpAMD64SHLQconst { 53342 break 53343 } 53344 if z1_0.AuxInt != 63 { 53345 break 53346 } 53347 x := z1_0.Args[0] 53348 mem := v.Args[2] 53349 if !(z1 == z2 && !config.nacl) { 53350 break 53351 } 53352 v.reset(OpAMD64SETBstore) 53353 v.AuxInt = off 53354 v.Aux = sym 53355 v.AddArg(ptr) 53356 v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags) 53357 v0.AuxInt = 0 53358 v0.AddArg(x) 53359 v.AddArg(v0) 53360 v.AddArg(mem) 53361 return true 53362 } 53363 // match: (SETNEstore [off] {sym} ptr (TESTL z1:(SHRLconst [31] (SHLLconst [31] x)) z2) mem) 53364 // cond: z1==z2 && !config.nacl 53365 // result: (SETBstore [off] {sym} ptr (BTLconst [0] x) mem) 53366 for { 53367 off := v.AuxInt 53368 sym := v.Aux 53369 _ = v.Args[2] 53370 ptr := v.Args[0] 53371 v_1 := v.Args[1] 53372 if v_1.Op != OpAMD64TESTL { 53373 break 53374 } 53375 _ = v_1.Args[1] 53376 z1 := v_1.Args[0] 53377 if z1.Op != OpAMD64SHRLconst { 53378 break 53379 } 53380 if z1.AuxInt != 31 { 53381 break 53382 } 53383 z1_0 := z1.Args[0] 53384 if z1_0.Op != OpAMD64SHLLconst { 53385 break 53386 } 53387 if z1_0.AuxInt != 31 { 53388 break 53389 } 53390 x := z1_0.Args[0] 53391 z2 := v_1.Args[1] 53392 mem := v.Args[2] 53393 if !(z1 == z2 && !config.nacl) { 53394 break 53395 } 53396 v.reset(OpAMD64SETBstore) 53397 v.AuxInt = off 53398 v.Aux = sym 53399 v.AddArg(ptr) 53400 v0 := b.NewValue0(v.Pos, OpAMD64BTLconst, types.TypeFlags) 53401 v0.AuxInt = 0 53402 v0.AddArg(x) 53403 v.AddArg(v0) 53404 v.AddArg(mem) 53405 return true 53406 } 53407 // match: (SETNEstore [off] {sym} ptr (TESTL z2 z1:(SHRLconst [31] (SHLLconst [31] x))) mem) 53408 // cond: z1==z2 && !config.nacl 53409 // result: (SETBstore [off] {sym} ptr (BTLconst [0] x) mem) 53410 for { 53411 off := v.AuxInt 53412 sym := v.Aux 53413 _ = v.Args[2] 53414 ptr := v.Args[0] 53415 v_1 := v.Args[1] 53416 if v_1.Op != OpAMD64TESTL { 53417 break 53418 } 53419 _ = v_1.Args[1] 53420 z2 := v_1.Args[0] 53421 z1 := v_1.Args[1] 53422 if z1.Op != OpAMD64SHRLconst { 53423 break 53424 } 53425 if z1.AuxInt != 31 { 53426 break 53427 } 53428 z1_0 := z1.Args[0] 53429 if z1_0.Op != OpAMD64SHLLconst { 53430 break 53431 } 53432 if z1_0.AuxInt != 31 { 53433 break 53434 } 53435 x := z1_0.Args[0] 53436 mem := v.Args[2] 53437 if !(z1 == z2 && !config.nacl) { 53438 break 53439 } 53440 v.reset(OpAMD64SETBstore) 53441 v.AuxInt = off 53442 v.Aux = sym 53443 v.AddArg(ptr) 53444 v0 := b.NewValue0(v.Pos, OpAMD64BTLconst, types.TypeFlags) 53445 v0.AuxInt = 0 53446 v0.AddArg(x) 53447 v.AddArg(v0) 53448 v.AddArg(mem) 53449 return true 53450 } 53451 // match: (SETNEstore [off] {sym} ptr (TESTQ z1:(SHRQconst [63] x) z2) mem) 53452 // cond: z1==z2 && !config.nacl 53453 // result: (SETBstore [off] {sym} ptr (BTQconst [63] x) mem) 53454 for { 53455 off := v.AuxInt 53456 sym := v.Aux 53457 _ = v.Args[2] 53458 ptr := v.Args[0] 53459 v_1 := v.Args[1] 53460 if v_1.Op != OpAMD64TESTQ { 53461 break 53462 } 53463 _ = v_1.Args[1] 53464 z1 := v_1.Args[0] 53465 if z1.Op != OpAMD64SHRQconst { 53466 break 53467 } 53468 if z1.AuxInt != 63 { 53469 break 53470 } 53471 x := z1.Args[0] 53472 z2 := v_1.Args[1] 53473 mem := v.Args[2] 53474 if !(z1 == z2 && !config.nacl) { 53475 break 53476 } 53477 v.reset(OpAMD64SETBstore) 53478 v.AuxInt = off 53479 v.Aux = sym 53480 v.AddArg(ptr) 53481 v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags) 53482 v0.AuxInt = 63 53483 v0.AddArg(x) 53484 v.AddArg(v0) 53485 v.AddArg(mem) 53486 return true 53487 } 53488 // match: (SETNEstore [off] {sym} ptr (TESTQ z2 z1:(SHRQconst [63] x)) mem) 53489 // cond: z1==z2 && !config.nacl 53490 // result: (SETBstore [off] {sym} ptr (BTQconst [63] x) mem) 53491 for { 53492 off := v.AuxInt 53493 sym := v.Aux 53494 _ = v.Args[2] 53495 ptr := v.Args[0] 53496 v_1 := v.Args[1] 53497 if v_1.Op != OpAMD64TESTQ { 53498 break 53499 } 53500 _ = v_1.Args[1] 53501 z2 := v_1.Args[0] 53502 z1 := v_1.Args[1] 53503 if z1.Op != OpAMD64SHRQconst { 53504 break 53505 } 53506 if z1.AuxInt != 63 { 53507 break 53508 } 53509 x := z1.Args[0] 53510 mem := v.Args[2] 53511 if !(z1 == z2 && !config.nacl) { 53512 break 53513 } 53514 v.reset(OpAMD64SETBstore) 53515 v.AuxInt = off 53516 v.Aux = sym 53517 v.AddArg(ptr) 53518 v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags) 53519 v0.AuxInt = 63 53520 v0.AddArg(x) 53521 v.AddArg(v0) 53522 v.AddArg(mem) 53523 return true 53524 } 53525 return false 53526 } 53527 func rewriteValueAMD64_OpAMD64SETNEstore_20(v *Value) bool { 53528 b := v.Block 53529 _ = b 53530 config := b.Func.Config 53531 _ = config 53532 // match: (SETNEstore [off] {sym} ptr (TESTL z1:(SHRLconst [31] x) z2) mem) 53533 // cond: z1==z2 && !config.nacl 53534 // result: (SETBstore [off] {sym} ptr (BTLconst [31] x) mem) 53535 for { 53536 off := v.AuxInt 53537 sym := v.Aux 53538 _ = v.Args[2] 53539 ptr := v.Args[0] 53540 v_1 := v.Args[1] 53541 if v_1.Op != OpAMD64TESTL { 53542 break 53543 } 53544 _ = v_1.Args[1] 53545 z1 := v_1.Args[0] 53546 if z1.Op != OpAMD64SHRLconst { 53547 break 53548 } 53549 if z1.AuxInt != 31 { 53550 break 53551 } 53552 x := z1.Args[0] 53553 z2 := v_1.Args[1] 53554 mem := v.Args[2] 53555 if !(z1 == z2 && !config.nacl) { 53556 break 53557 } 53558 v.reset(OpAMD64SETBstore) 53559 v.AuxInt = off 53560 v.Aux = sym 53561 v.AddArg(ptr) 53562 v0 := b.NewValue0(v.Pos, OpAMD64BTLconst, types.TypeFlags) 53563 v0.AuxInt = 31 53564 v0.AddArg(x) 53565 v.AddArg(v0) 53566 v.AddArg(mem) 53567 return true 53568 } 53569 // match: (SETNEstore [off] {sym} ptr (TESTL z2 z1:(SHRLconst [31] x)) mem) 53570 // cond: z1==z2 && !config.nacl 53571 // result: (SETBstore [off] {sym} ptr (BTLconst [31] x) mem) 53572 for { 53573 off := v.AuxInt 53574 sym := v.Aux 53575 _ = v.Args[2] 53576 ptr := v.Args[0] 53577 v_1 := v.Args[1] 53578 if v_1.Op != OpAMD64TESTL { 53579 break 53580 } 53581 _ = v_1.Args[1] 53582 z2 := v_1.Args[0] 53583 z1 := v_1.Args[1] 53584 if z1.Op != OpAMD64SHRLconst { 53585 break 53586 } 53587 if z1.AuxInt != 31 { 53588 break 53589 } 53590 x := z1.Args[0] 53591 mem := v.Args[2] 53592 if !(z1 == z2 && !config.nacl) { 53593 break 53594 } 53595 v.reset(OpAMD64SETBstore) 53596 v.AuxInt = off 53597 v.Aux = sym 53598 v.AddArg(ptr) 53599 v0 := b.NewValue0(v.Pos, OpAMD64BTLconst, types.TypeFlags) 53600 v0.AuxInt = 31 53601 v0.AddArg(x) 53602 v.AddArg(v0) 53603 v.AddArg(mem) 53604 return true 53605 } 53606 // match: (SETNEstore [off] {sym} ptr (InvertFlags x) mem) 53607 // cond: 53608 // result: (SETNEstore [off] {sym} ptr x mem) 53609 for { 53610 off := v.AuxInt 53611 sym := v.Aux 53612 _ = v.Args[2] 53613 ptr := v.Args[0] 53614 v_1 := v.Args[1] 53615 if v_1.Op != OpAMD64InvertFlags { 53616 break 53617 } 53618 x := v_1.Args[0] 53619 mem := v.Args[2] 53620 v.reset(OpAMD64SETNEstore) 53621 v.AuxInt = off 53622 v.Aux = sym 53623 v.AddArg(ptr) 53624 v.AddArg(x) 53625 v.AddArg(mem) 53626 return true 53627 } 53628 // match: (SETNEstore [off1] {sym} (ADDQconst [off2] base) val mem) 53629 // cond: is32Bit(off1+off2) 53630 // result: (SETNEstore [off1+off2] {sym} base val mem) 53631 for { 53632 off1 := v.AuxInt 53633 sym := v.Aux 53634 _ = v.Args[2] 53635 v_0 := v.Args[0] 53636 if v_0.Op != OpAMD64ADDQconst { 53637 break 53638 } 53639 off2 := v_0.AuxInt 53640 base := v_0.Args[0] 53641 val := v.Args[1] 53642 mem := v.Args[2] 53643 if !(is32Bit(off1 + off2)) { 53644 break 53645 } 53646 v.reset(OpAMD64SETNEstore) 53647 v.AuxInt = off1 + off2 53648 v.Aux = sym 53649 v.AddArg(base) 53650 v.AddArg(val) 53651 v.AddArg(mem) 53652 return true 53653 } 53654 // match: (SETNEstore [off1] {sym1} (LEAQ [off2] {sym2} base) val mem) 53655 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 53656 // result: (SETNEstore [off1+off2] {mergeSym(sym1,sym2)} base val mem) 53657 for { 53658 off1 := v.AuxInt 53659 sym1 := v.Aux 53660 _ = v.Args[2] 53661 v_0 := v.Args[0] 53662 if v_0.Op != OpAMD64LEAQ { 53663 break 53664 } 53665 off2 := v_0.AuxInt 53666 sym2 := v_0.Aux 53667 base := v_0.Args[0] 53668 val := v.Args[1] 53669 mem := v.Args[2] 53670 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 53671 break 53672 } 53673 v.reset(OpAMD64SETNEstore) 53674 v.AuxInt = off1 + off2 53675 v.Aux = mergeSym(sym1, sym2) 53676 v.AddArg(base) 53677 v.AddArg(val) 53678 v.AddArg(mem) 53679 return true 53680 } 53681 // match: (SETNEstore [off] {sym} ptr x:(FlagEQ) mem) 53682 // cond: 53683 // result: (MOVBstore [off] {sym} ptr (MOVLconst <x.Type> [0]) mem) 53684 for { 53685 off := v.AuxInt 53686 sym := v.Aux 53687 _ = v.Args[2] 53688 ptr := v.Args[0] 53689 x := v.Args[1] 53690 if x.Op != OpAMD64FlagEQ { 53691 break 53692 } 53693 mem := v.Args[2] 53694 v.reset(OpAMD64MOVBstore) 53695 v.AuxInt = off 53696 v.Aux = sym 53697 v.AddArg(ptr) 53698 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, x.Type) 53699 v0.AuxInt = 0 53700 v.AddArg(v0) 53701 v.AddArg(mem) 53702 return true 53703 } 53704 // match: (SETNEstore [off] {sym} ptr x:(FlagLT_ULT) mem) 53705 // cond: 53706 // result: (MOVBstore [off] {sym} ptr (MOVLconst <x.Type> [1]) mem) 53707 for { 53708 off := v.AuxInt 53709 sym := v.Aux 53710 _ = v.Args[2] 53711 ptr := v.Args[0] 53712 x := v.Args[1] 53713 if x.Op != OpAMD64FlagLT_ULT { 53714 break 53715 } 53716 mem := v.Args[2] 53717 v.reset(OpAMD64MOVBstore) 53718 v.AuxInt = off 53719 v.Aux = sym 53720 v.AddArg(ptr) 53721 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, x.Type) 53722 v0.AuxInt = 1 53723 v.AddArg(v0) 53724 v.AddArg(mem) 53725 return true 53726 } 53727 // match: (SETNEstore [off] {sym} ptr x:(FlagLT_UGT) mem) 53728 // cond: 53729 // result: (MOVBstore [off] {sym} ptr (MOVLconst <x.Type> [1]) mem) 53730 for { 53731 off := v.AuxInt 53732 sym := v.Aux 53733 _ = v.Args[2] 53734 ptr := v.Args[0] 53735 x := v.Args[1] 53736 if x.Op != OpAMD64FlagLT_UGT { 53737 break 53738 } 53739 mem := v.Args[2] 53740 v.reset(OpAMD64MOVBstore) 53741 v.AuxInt = off 53742 v.Aux = sym 53743 v.AddArg(ptr) 53744 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, x.Type) 53745 v0.AuxInt = 1 53746 v.AddArg(v0) 53747 v.AddArg(mem) 53748 return true 53749 } 53750 // match: (SETNEstore [off] {sym} ptr x:(FlagGT_ULT) mem) 53751 // cond: 53752 // result: (MOVBstore [off] {sym} ptr (MOVLconst <x.Type> [1]) mem) 53753 for { 53754 off := v.AuxInt 53755 sym := v.Aux 53756 _ = v.Args[2] 53757 ptr := v.Args[0] 53758 x := v.Args[1] 53759 if x.Op != OpAMD64FlagGT_ULT { 53760 break 53761 } 53762 mem := v.Args[2] 53763 v.reset(OpAMD64MOVBstore) 53764 v.AuxInt = off 53765 v.Aux = sym 53766 v.AddArg(ptr) 53767 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, x.Type) 53768 v0.AuxInt = 1 53769 v.AddArg(v0) 53770 v.AddArg(mem) 53771 return true 53772 } 53773 // match: (SETNEstore [off] {sym} ptr x:(FlagGT_UGT) mem) 53774 // cond: 53775 // result: (MOVBstore [off] {sym} ptr (MOVLconst <x.Type> [1]) mem) 53776 for { 53777 off := v.AuxInt 53778 sym := v.Aux 53779 _ = v.Args[2] 53780 ptr := v.Args[0] 53781 x := v.Args[1] 53782 if x.Op != OpAMD64FlagGT_UGT { 53783 break 53784 } 53785 mem := v.Args[2] 53786 v.reset(OpAMD64MOVBstore) 53787 v.AuxInt = off 53788 v.Aux = sym 53789 v.AddArg(ptr) 53790 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, x.Type) 53791 v0.AuxInt = 1 53792 v.AddArg(v0) 53793 v.AddArg(mem) 53794 return true 53795 } 53796 return false 53797 } 53798 func rewriteValueAMD64_OpAMD64SHLL_0(v *Value) bool { 53799 b := v.Block 53800 _ = b 53801 // match: (SHLL x (MOVQconst [c])) 53802 // cond: 53803 // result: (SHLLconst [c&31] x) 53804 for { 53805 _ = v.Args[1] 53806 x := v.Args[0] 53807 v_1 := v.Args[1] 53808 if v_1.Op != OpAMD64MOVQconst { 53809 break 53810 } 53811 c := v_1.AuxInt 53812 v.reset(OpAMD64SHLLconst) 53813 v.AuxInt = c & 31 53814 v.AddArg(x) 53815 return true 53816 } 53817 // match: (SHLL x (MOVLconst [c])) 53818 // cond: 53819 // result: (SHLLconst [c&31] x) 53820 for { 53821 _ = v.Args[1] 53822 x := v.Args[0] 53823 v_1 := v.Args[1] 53824 if v_1.Op != OpAMD64MOVLconst { 53825 break 53826 } 53827 c := v_1.AuxInt 53828 v.reset(OpAMD64SHLLconst) 53829 v.AuxInt = c & 31 53830 v.AddArg(x) 53831 return true 53832 } 53833 // match: (SHLL x (ADDQconst [c] y)) 53834 // cond: c & 31 == 0 53835 // result: (SHLL x y) 53836 for { 53837 _ = v.Args[1] 53838 x := v.Args[0] 53839 v_1 := v.Args[1] 53840 if v_1.Op != OpAMD64ADDQconst { 53841 break 53842 } 53843 c := v_1.AuxInt 53844 y := v_1.Args[0] 53845 if !(c&31 == 0) { 53846 break 53847 } 53848 v.reset(OpAMD64SHLL) 53849 v.AddArg(x) 53850 v.AddArg(y) 53851 return true 53852 } 53853 // match: (SHLL x (NEGQ <t> (ADDQconst [c] y))) 53854 // cond: c & 31 == 0 53855 // result: (SHLL x (NEGQ <t> y)) 53856 for { 53857 _ = v.Args[1] 53858 x := v.Args[0] 53859 v_1 := v.Args[1] 53860 if v_1.Op != OpAMD64NEGQ { 53861 break 53862 } 53863 t := v_1.Type 53864 v_1_0 := v_1.Args[0] 53865 if v_1_0.Op != OpAMD64ADDQconst { 53866 break 53867 } 53868 c := v_1_0.AuxInt 53869 y := v_1_0.Args[0] 53870 if !(c&31 == 0) { 53871 break 53872 } 53873 v.reset(OpAMD64SHLL) 53874 v.AddArg(x) 53875 v0 := b.NewValue0(v.Pos, OpAMD64NEGQ, t) 53876 v0.AddArg(y) 53877 v.AddArg(v0) 53878 return true 53879 } 53880 // match: (SHLL x (ANDQconst [c] y)) 53881 // cond: c & 31 == 31 53882 // result: (SHLL x y) 53883 for { 53884 _ = v.Args[1] 53885 x := v.Args[0] 53886 v_1 := v.Args[1] 53887 if v_1.Op != OpAMD64ANDQconst { 53888 break 53889 } 53890 c := v_1.AuxInt 53891 y := v_1.Args[0] 53892 if !(c&31 == 31) { 53893 break 53894 } 53895 v.reset(OpAMD64SHLL) 53896 v.AddArg(x) 53897 v.AddArg(y) 53898 return true 53899 } 53900 // match: (SHLL x (NEGQ <t> (ANDQconst [c] y))) 53901 // cond: c & 31 == 31 53902 // result: (SHLL x (NEGQ <t> y)) 53903 for { 53904 _ = v.Args[1] 53905 x := v.Args[0] 53906 v_1 := v.Args[1] 53907 if v_1.Op != OpAMD64NEGQ { 53908 break 53909 } 53910 t := v_1.Type 53911 v_1_0 := v_1.Args[0] 53912 if v_1_0.Op != OpAMD64ANDQconst { 53913 break 53914 } 53915 c := v_1_0.AuxInt 53916 y := v_1_0.Args[0] 53917 if !(c&31 == 31) { 53918 break 53919 } 53920 v.reset(OpAMD64SHLL) 53921 v.AddArg(x) 53922 v0 := b.NewValue0(v.Pos, OpAMD64NEGQ, t) 53923 v0.AddArg(y) 53924 v.AddArg(v0) 53925 return true 53926 } 53927 // match: (SHLL x (ADDLconst [c] y)) 53928 // cond: c & 31 == 0 53929 // result: (SHLL x y) 53930 for { 53931 _ = v.Args[1] 53932 x := v.Args[0] 53933 v_1 := v.Args[1] 53934 if v_1.Op != OpAMD64ADDLconst { 53935 break 53936 } 53937 c := v_1.AuxInt 53938 y := v_1.Args[0] 53939 if !(c&31 == 0) { 53940 break 53941 } 53942 v.reset(OpAMD64SHLL) 53943 v.AddArg(x) 53944 v.AddArg(y) 53945 return true 53946 } 53947 // match: (SHLL x (NEGL <t> (ADDLconst [c] y))) 53948 // cond: c & 31 == 0 53949 // result: (SHLL x (NEGL <t> y)) 53950 for { 53951 _ = v.Args[1] 53952 x := v.Args[0] 53953 v_1 := v.Args[1] 53954 if v_1.Op != OpAMD64NEGL { 53955 break 53956 } 53957 t := v_1.Type 53958 v_1_0 := v_1.Args[0] 53959 if v_1_0.Op != OpAMD64ADDLconst { 53960 break 53961 } 53962 c := v_1_0.AuxInt 53963 y := v_1_0.Args[0] 53964 if !(c&31 == 0) { 53965 break 53966 } 53967 v.reset(OpAMD64SHLL) 53968 v.AddArg(x) 53969 v0 := b.NewValue0(v.Pos, OpAMD64NEGL, t) 53970 v0.AddArg(y) 53971 v.AddArg(v0) 53972 return true 53973 } 53974 // match: (SHLL x (ANDLconst [c] y)) 53975 // cond: c & 31 == 31 53976 // result: (SHLL x y) 53977 for { 53978 _ = v.Args[1] 53979 x := v.Args[0] 53980 v_1 := v.Args[1] 53981 if v_1.Op != OpAMD64ANDLconst { 53982 break 53983 } 53984 c := v_1.AuxInt 53985 y := v_1.Args[0] 53986 if !(c&31 == 31) { 53987 break 53988 } 53989 v.reset(OpAMD64SHLL) 53990 v.AddArg(x) 53991 v.AddArg(y) 53992 return true 53993 } 53994 // match: (SHLL x (NEGL <t> (ANDLconst [c] y))) 53995 // cond: c & 31 == 31 53996 // result: (SHLL x (NEGL <t> y)) 53997 for { 53998 _ = v.Args[1] 53999 x := v.Args[0] 54000 v_1 := v.Args[1] 54001 if v_1.Op != OpAMD64NEGL { 54002 break 54003 } 54004 t := v_1.Type 54005 v_1_0 := v_1.Args[0] 54006 if v_1_0.Op != OpAMD64ANDLconst { 54007 break 54008 } 54009 c := v_1_0.AuxInt 54010 y := v_1_0.Args[0] 54011 if !(c&31 == 31) { 54012 break 54013 } 54014 v.reset(OpAMD64SHLL) 54015 v.AddArg(x) 54016 v0 := b.NewValue0(v.Pos, OpAMD64NEGL, t) 54017 v0.AddArg(y) 54018 v.AddArg(v0) 54019 return true 54020 } 54021 return false 54022 } 54023 func rewriteValueAMD64_OpAMD64SHLLconst_0(v *Value) bool { 54024 b := v.Block 54025 _ = b 54026 config := b.Func.Config 54027 _ = config 54028 // match: (SHLLconst [1] (SHRLconst [1] x)) 54029 // cond: !config.nacl 54030 // result: (BTRLconst [0] x) 54031 for { 54032 if v.AuxInt != 1 { 54033 break 54034 } 54035 v_0 := v.Args[0] 54036 if v_0.Op != OpAMD64SHRLconst { 54037 break 54038 } 54039 if v_0.AuxInt != 1 { 54040 break 54041 } 54042 x := v_0.Args[0] 54043 if !(!config.nacl) { 54044 break 54045 } 54046 v.reset(OpAMD64BTRLconst) 54047 v.AuxInt = 0 54048 v.AddArg(x) 54049 return true 54050 } 54051 // match: (SHLLconst x [0]) 54052 // cond: 54053 // result: x 54054 for { 54055 if v.AuxInt != 0 { 54056 break 54057 } 54058 x := v.Args[0] 54059 v.reset(OpCopy) 54060 v.Type = x.Type 54061 v.AddArg(x) 54062 return true 54063 } 54064 return false 54065 } 54066 func rewriteValueAMD64_OpAMD64SHLQ_0(v *Value) bool { 54067 b := v.Block 54068 _ = b 54069 // match: (SHLQ x (MOVQconst [c])) 54070 // cond: 54071 // result: (SHLQconst [c&63] x) 54072 for { 54073 _ = v.Args[1] 54074 x := v.Args[0] 54075 v_1 := v.Args[1] 54076 if v_1.Op != OpAMD64MOVQconst { 54077 break 54078 } 54079 c := v_1.AuxInt 54080 v.reset(OpAMD64SHLQconst) 54081 v.AuxInt = c & 63 54082 v.AddArg(x) 54083 return true 54084 } 54085 // match: (SHLQ x (MOVLconst [c])) 54086 // cond: 54087 // result: (SHLQconst [c&63] x) 54088 for { 54089 _ = v.Args[1] 54090 x := v.Args[0] 54091 v_1 := v.Args[1] 54092 if v_1.Op != OpAMD64MOVLconst { 54093 break 54094 } 54095 c := v_1.AuxInt 54096 v.reset(OpAMD64SHLQconst) 54097 v.AuxInt = c & 63 54098 v.AddArg(x) 54099 return true 54100 } 54101 // match: (SHLQ x (ADDQconst [c] y)) 54102 // cond: c & 63 == 0 54103 // result: (SHLQ x y) 54104 for { 54105 _ = v.Args[1] 54106 x := v.Args[0] 54107 v_1 := v.Args[1] 54108 if v_1.Op != OpAMD64ADDQconst { 54109 break 54110 } 54111 c := v_1.AuxInt 54112 y := v_1.Args[0] 54113 if !(c&63 == 0) { 54114 break 54115 } 54116 v.reset(OpAMD64SHLQ) 54117 v.AddArg(x) 54118 v.AddArg(y) 54119 return true 54120 } 54121 // match: (SHLQ x (NEGQ <t> (ADDQconst [c] y))) 54122 // cond: c & 63 == 0 54123 // result: (SHLQ x (NEGQ <t> y)) 54124 for { 54125 _ = v.Args[1] 54126 x := v.Args[0] 54127 v_1 := v.Args[1] 54128 if v_1.Op != OpAMD64NEGQ { 54129 break 54130 } 54131 t := v_1.Type 54132 v_1_0 := v_1.Args[0] 54133 if v_1_0.Op != OpAMD64ADDQconst { 54134 break 54135 } 54136 c := v_1_0.AuxInt 54137 y := v_1_0.Args[0] 54138 if !(c&63 == 0) { 54139 break 54140 } 54141 v.reset(OpAMD64SHLQ) 54142 v.AddArg(x) 54143 v0 := b.NewValue0(v.Pos, OpAMD64NEGQ, t) 54144 v0.AddArg(y) 54145 v.AddArg(v0) 54146 return true 54147 } 54148 // match: (SHLQ x (ANDQconst [c] y)) 54149 // cond: c & 63 == 63 54150 // result: (SHLQ x y) 54151 for { 54152 _ = v.Args[1] 54153 x := v.Args[0] 54154 v_1 := v.Args[1] 54155 if v_1.Op != OpAMD64ANDQconst { 54156 break 54157 } 54158 c := v_1.AuxInt 54159 y := v_1.Args[0] 54160 if !(c&63 == 63) { 54161 break 54162 } 54163 v.reset(OpAMD64SHLQ) 54164 v.AddArg(x) 54165 v.AddArg(y) 54166 return true 54167 } 54168 // match: (SHLQ x (NEGQ <t> (ANDQconst [c] y))) 54169 // cond: c & 63 == 63 54170 // result: (SHLQ x (NEGQ <t> y)) 54171 for { 54172 _ = v.Args[1] 54173 x := v.Args[0] 54174 v_1 := v.Args[1] 54175 if v_1.Op != OpAMD64NEGQ { 54176 break 54177 } 54178 t := v_1.Type 54179 v_1_0 := v_1.Args[0] 54180 if v_1_0.Op != OpAMD64ANDQconst { 54181 break 54182 } 54183 c := v_1_0.AuxInt 54184 y := v_1_0.Args[0] 54185 if !(c&63 == 63) { 54186 break 54187 } 54188 v.reset(OpAMD64SHLQ) 54189 v.AddArg(x) 54190 v0 := b.NewValue0(v.Pos, OpAMD64NEGQ, t) 54191 v0.AddArg(y) 54192 v.AddArg(v0) 54193 return true 54194 } 54195 // match: (SHLQ x (ADDLconst [c] y)) 54196 // cond: c & 63 == 0 54197 // result: (SHLQ x y) 54198 for { 54199 _ = v.Args[1] 54200 x := v.Args[0] 54201 v_1 := v.Args[1] 54202 if v_1.Op != OpAMD64ADDLconst { 54203 break 54204 } 54205 c := v_1.AuxInt 54206 y := v_1.Args[0] 54207 if !(c&63 == 0) { 54208 break 54209 } 54210 v.reset(OpAMD64SHLQ) 54211 v.AddArg(x) 54212 v.AddArg(y) 54213 return true 54214 } 54215 // match: (SHLQ x (NEGL <t> (ADDLconst [c] y))) 54216 // cond: c & 63 == 0 54217 // result: (SHLQ x (NEGL <t> y)) 54218 for { 54219 _ = v.Args[1] 54220 x := v.Args[0] 54221 v_1 := v.Args[1] 54222 if v_1.Op != OpAMD64NEGL { 54223 break 54224 } 54225 t := v_1.Type 54226 v_1_0 := v_1.Args[0] 54227 if v_1_0.Op != OpAMD64ADDLconst { 54228 break 54229 } 54230 c := v_1_0.AuxInt 54231 y := v_1_0.Args[0] 54232 if !(c&63 == 0) { 54233 break 54234 } 54235 v.reset(OpAMD64SHLQ) 54236 v.AddArg(x) 54237 v0 := b.NewValue0(v.Pos, OpAMD64NEGL, t) 54238 v0.AddArg(y) 54239 v.AddArg(v0) 54240 return true 54241 } 54242 // match: (SHLQ x (ANDLconst [c] y)) 54243 // cond: c & 63 == 63 54244 // result: (SHLQ x y) 54245 for { 54246 _ = v.Args[1] 54247 x := v.Args[0] 54248 v_1 := v.Args[1] 54249 if v_1.Op != OpAMD64ANDLconst { 54250 break 54251 } 54252 c := v_1.AuxInt 54253 y := v_1.Args[0] 54254 if !(c&63 == 63) { 54255 break 54256 } 54257 v.reset(OpAMD64SHLQ) 54258 v.AddArg(x) 54259 v.AddArg(y) 54260 return true 54261 } 54262 // match: (SHLQ x (NEGL <t> (ANDLconst [c] y))) 54263 // cond: c & 63 == 63 54264 // result: (SHLQ x (NEGL <t> y)) 54265 for { 54266 _ = v.Args[1] 54267 x := v.Args[0] 54268 v_1 := v.Args[1] 54269 if v_1.Op != OpAMD64NEGL { 54270 break 54271 } 54272 t := v_1.Type 54273 v_1_0 := v_1.Args[0] 54274 if v_1_0.Op != OpAMD64ANDLconst { 54275 break 54276 } 54277 c := v_1_0.AuxInt 54278 y := v_1_0.Args[0] 54279 if !(c&63 == 63) { 54280 break 54281 } 54282 v.reset(OpAMD64SHLQ) 54283 v.AddArg(x) 54284 v0 := b.NewValue0(v.Pos, OpAMD64NEGL, t) 54285 v0.AddArg(y) 54286 v.AddArg(v0) 54287 return true 54288 } 54289 return false 54290 } 54291 func rewriteValueAMD64_OpAMD64SHLQconst_0(v *Value) bool { 54292 b := v.Block 54293 _ = b 54294 config := b.Func.Config 54295 _ = config 54296 // match: (SHLQconst [1] (SHRQconst [1] x)) 54297 // cond: !config.nacl 54298 // result: (BTRQconst [0] x) 54299 for { 54300 if v.AuxInt != 1 { 54301 break 54302 } 54303 v_0 := v.Args[0] 54304 if v_0.Op != OpAMD64SHRQconst { 54305 break 54306 } 54307 if v_0.AuxInt != 1 { 54308 break 54309 } 54310 x := v_0.Args[0] 54311 if !(!config.nacl) { 54312 break 54313 } 54314 v.reset(OpAMD64BTRQconst) 54315 v.AuxInt = 0 54316 v.AddArg(x) 54317 return true 54318 } 54319 // match: (SHLQconst x [0]) 54320 // cond: 54321 // result: x 54322 for { 54323 if v.AuxInt != 0 { 54324 break 54325 } 54326 x := v.Args[0] 54327 v.reset(OpCopy) 54328 v.Type = x.Type 54329 v.AddArg(x) 54330 return true 54331 } 54332 return false 54333 } 54334 func rewriteValueAMD64_OpAMD64SHRB_0(v *Value) bool { 54335 // match: (SHRB x (MOVQconst [c])) 54336 // cond: c&31 < 8 54337 // result: (SHRBconst [c&31] x) 54338 for { 54339 _ = v.Args[1] 54340 x := v.Args[0] 54341 v_1 := v.Args[1] 54342 if v_1.Op != OpAMD64MOVQconst { 54343 break 54344 } 54345 c := v_1.AuxInt 54346 if !(c&31 < 8) { 54347 break 54348 } 54349 v.reset(OpAMD64SHRBconst) 54350 v.AuxInt = c & 31 54351 v.AddArg(x) 54352 return true 54353 } 54354 // match: (SHRB x (MOVLconst [c])) 54355 // cond: c&31 < 8 54356 // result: (SHRBconst [c&31] x) 54357 for { 54358 _ = v.Args[1] 54359 x := v.Args[0] 54360 v_1 := v.Args[1] 54361 if v_1.Op != OpAMD64MOVLconst { 54362 break 54363 } 54364 c := v_1.AuxInt 54365 if !(c&31 < 8) { 54366 break 54367 } 54368 v.reset(OpAMD64SHRBconst) 54369 v.AuxInt = c & 31 54370 v.AddArg(x) 54371 return true 54372 } 54373 // match: (SHRB _ (MOVQconst [c])) 54374 // cond: c&31 >= 8 54375 // result: (MOVLconst [0]) 54376 for { 54377 _ = v.Args[1] 54378 v_1 := v.Args[1] 54379 if v_1.Op != OpAMD64MOVQconst { 54380 break 54381 } 54382 c := v_1.AuxInt 54383 if !(c&31 >= 8) { 54384 break 54385 } 54386 v.reset(OpAMD64MOVLconst) 54387 v.AuxInt = 0 54388 return true 54389 } 54390 // match: (SHRB _ (MOVLconst [c])) 54391 // cond: c&31 >= 8 54392 // result: (MOVLconst [0]) 54393 for { 54394 _ = v.Args[1] 54395 v_1 := v.Args[1] 54396 if v_1.Op != OpAMD64MOVLconst { 54397 break 54398 } 54399 c := v_1.AuxInt 54400 if !(c&31 >= 8) { 54401 break 54402 } 54403 v.reset(OpAMD64MOVLconst) 54404 v.AuxInt = 0 54405 return true 54406 } 54407 return false 54408 } 54409 func rewriteValueAMD64_OpAMD64SHRBconst_0(v *Value) bool { 54410 // match: (SHRBconst x [0]) 54411 // cond: 54412 // result: x 54413 for { 54414 if v.AuxInt != 0 { 54415 break 54416 } 54417 x := v.Args[0] 54418 v.reset(OpCopy) 54419 v.Type = x.Type 54420 v.AddArg(x) 54421 return true 54422 } 54423 return false 54424 } 54425 func rewriteValueAMD64_OpAMD64SHRL_0(v *Value) bool { 54426 b := v.Block 54427 _ = b 54428 // match: (SHRL x (MOVQconst [c])) 54429 // cond: 54430 // result: (SHRLconst [c&31] x) 54431 for { 54432 _ = v.Args[1] 54433 x := v.Args[0] 54434 v_1 := v.Args[1] 54435 if v_1.Op != OpAMD64MOVQconst { 54436 break 54437 } 54438 c := v_1.AuxInt 54439 v.reset(OpAMD64SHRLconst) 54440 v.AuxInt = c & 31 54441 v.AddArg(x) 54442 return true 54443 } 54444 // match: (SHRL x (MOVLconst [c])) 54445 // cond: 54446 // result: (SHRLconst [c&31] x) 54447 for { 54448 _ = v.Args[1] 54449 x := v.Args[0] 54450 v_1 := v.Args[1] 54451 if v_1.Op != OpAMD64MOVLconst { 54452 break 54453 } 54454 c := v_1.AuxInt 54455 v.reset(OpAMD64SHRLconst) 54456 v.AuxInt = c & 31 54457 v.AddArg(x) 54458 return true 54459 } 54460 // match: (SHRL x (ADDQconst [c] y)) 54461 // cond: c & 31 == 0 54462 // result: (SHRL x y) 54463 for { 54464 _ = v.Args[1] 54465 x := v.Args[0] 54466 v_1 := v.Args[1] 54467 if v_1.Op != OpAMD64ADDQconst { 54468 break 54469 } 54470 c := v_1.AuxInt 54471 y := v_1.Args[0] 54472 if !(c&31 == 0) { 54473 break 54474 } 54475 v.reset(OpAMD64SHRL) 54476 v.AddArg(x) 54477 v.AddArg(y) 54478 return true 54479 } 54480 // match: (SHRL x (NEGQ <t> (ADDQconst [c] y))) 54481 // cond: c & 31 == 0 54482 // result: (SHRL x (NEGQ <t> y)) 54483 for { 54484 _ = v.Args[1] 54485 x := v.Args[0] 54486 v_1 := v.Args[1] 54487 if v_1.Op != OpAMD64NEGQ { 54488 break 54489 } 54490 t := v_1.Type 54491 v_1_0 := v_1.Args[0] 54492 if v_1_0.Op != OpAMD64ADDQconst { 54493 break 54494 } 54495 c := v_1_0.AuxInt 54496 y := v_1_0.Args[0] 54497 if !(c&31 == 0) { 54498 break 54499 } 54500 v.reset(OpAMD64SHRL) 54501 v.AddArg(x) 54502 v0 := b.NewValue0(v.Pos, OpAMD64NEGQ, t) 54503 v0.AddArg(y) 54504 v.AddArg(v0) 54505 return true 54506 } 54507 // match: (SHRL x (ANDQconst [c] y)) 54508 // cond: c & 31 == 31 54509 // result: (SHRL x y) 54510 for { 54511 _ = v.Args[1] 54512 x := v.Args[0] 54513 v_1 := v.Args[1] 54514 if v_1.Op != OpAMD64ANDQconst { 54515 break 54516 } 54517 c := v_1.AuxInt 54518 y := v_1.Args[0] 54519 if !(c&31 == 31) { 54520 break 54521 } 54522 v.reset(OpAMD64SHRL) 54523 v.AddArg(x) 54524 v.AddArg(y) 54525 return true 54526 } 54527 // match: (SHRL x (NEGQ <t> (ANDQconst [c] y))) 54528 // cond: c & 31 == 31 54529 // result: (SHRL x (NEGQ <t> y)) 54530 for { 54531 _ = v.Args[1] 54532 x := v.Args[0] 54533 v_1 := v.Args[1] 54534 if v_1.Op != OpAMD64NEGQ { 54535 break 54536 } 54537 t := v_1.Type 54538 v_1_0 := v_1.Args[0] 54539 if v_1_0.Op != OpAMD64ANDQconst { 54540 break 54541 } 54542 c := v_1_0.AuxInt 54543 y := v_1_0.Args[0] 54544 if !(c&31 == 31) { 54545 break 54546 } 54547 v.reset(OpAMD64SHRL) 54548 v.AddArg(x) 54549 v0 := b.NewValue0(v.Pos, OpAMD64NEGQ, t) 54550 v0.AddArg(y) 54551 v.AddArg(v0) 54552 return true 54553 } 54554 // match: (SHRL x (ADDLconst [c] y)) 54555 // cond: c & 31 == 0 54556 // result: (SHRL x y) 54557 for { 54558 _ = v.Args[1] 54559 x := v.Args[0] 54560 v_1 := v.Args[1] 54561 if v_1.Op != OpAMD64ADDLconst { 54562 break 54563 } 54564 c := v_1.AuxInt 54565 y := v_1.Args[0] 54566 if !(c&31 == 0) { 54567 break 54568 } 54569 v.reset(OpAMD64SHRL) 54570 v.AddArg(x) 54571 v.AddArg(y) 54572 return true 54573 } 54574 // match: (SHRL x (NEGL <t> (ADDLconst [c] y))) 54575 // cond: c & 31 == 0 54576 // result: (SHRL x (NEGL <t> y)) 54577 for { 54578 _ = v.Args[1] 54579 x := v.Args[0] 54580 v_1 := v.Args[1] 54581 if v_1.Op != OpAMD64NEGL { 54582 break 54583 } 54584 t := v_1.Type 54585 v_1_0 := v_1.Args[0] 54586 if v_1_0.Op != OpAMD64ADDLconst { 54587 break 54588 } 54589 c := v_1_0.AuxInt 54590 y := v_1_0.Args[0] 54591 if !(c&31 == 0) { 54592 break 54593 } 54594 v.reset(OpAMD64SHRL) 54595 v.AddArg(x) 54596 v0 := b.NewValue0(v.Pos, OpAMD64NEGL, t) 54597 v0.AddArg(y) 54598 v.AddArg(v0) 54599 return true 54600 } 54601 // match: (SHRL x (ANDLconst [c] y)) 54602 // cond: c & 31 == 31 54603 // result: (SHRL x y) 54604 for { 54605 _ = v.Args[1] 54606 x := v.Args[0] 54607 v_1 := v.Args[1] 54608 if v_1.Op != OpAMD64ANDLconst { 54609 break 54610 } 54611 c := v_1.AuxInt 54612 y := v_1.Args[0] 54613 if !(c&31 == 31) { 54614 break 54615 } 54616 v.reset(OpAMD64SHRL) 54617 v.AddArg(x) 54618 v.AddArg(y) 54619 return true 54620 } 54621 // match: (SHRL x (NEGL <t> (ANDLconst [c] y))) 54622 // cond: c & 31 == 31 54623 // result: (SHRL x (NEGL <t> y)) 54624 for { 54625 _ = v.Args[1] 54626 x := v.Args[0] 54627 v_1 := v.Args[1] 54628 if v_1.Op != OpAMD64NEGL { 54629 break 54630 } 54631 t := v_1.Type 54632 v_1_0 := v_1.Args[0] 54633 if v_1_0.Op != OpAMD64ANDLconst { 54634 break 54635 } 54636 c := v_1_0.AuxInt 54637 y := v_1_0.Args[0] 54638 if !(c&31 == 31) { 54639 break 54640 } 54641 v.reset(OpAMD64SHRL) 54642 v.AddArg(x) 54643 v0 := b.NewValue0(v.Pos, OpAMD64NEGL, t) 54644 v0.AddArg(y) 54645 v.AddArg(v0) 54646 return true 54647 } 54648 return false 54649 } 54650 func rewriteValueAMD64_OpAMD64SHRLconst_0(v *Value) bool { 54651 b := v.Block 54652 _ = b 54653 config := b.Func.Config 54654 _ = config 54655 // match: (SHRLconst [1] (SHLLconst [1] x)) 54656 // cond: !config.nacl 54657 // result: (BTRLconst [31] x) 54658 for { 54659 if v.AuxInt != 1 { 54660 break 54661 } 54662 v_0 := v.Args[0] 54663 if v_0.Op != OpAMD64SHLLconst { 54664 break 54665 } 54666 if v_0.AuxInt != 1 { 54667 break 54668 } 54669 x := v_0.Args[0] 54670 if !(!config.nacl) { 54671 break 54672 } 54673 v.reset(OpAMD64BTRLconst) 54674 v.AuxInt = 31 54675 v.AddArg(x) 54676 return true 54677 } 54678 // match: (SHRLconst x [0]) 54679 // cond: 54680 // result: x 54681 for { 54682 if v.AuxInt != 0 { 54683 break 54684 } 54685 x := v.Args[0] 54686 v.reset(OpCopy) 54687 v.Type = x.Type 54688 v.AddArg(x) 54689 return true 54690 } 54691 return false 54692 } 54693 func rewriteValueAMD64_OpAMD64SHRQ_0(v *Value) bool { 54694 b := v.Block 54695 _ = b 54696 // match: (SHRQ x (MOVQconst [c])) 54697 // cond: 54698 // result: (SHRQconst [c&63] x) 54699 for { 54700 _ = v.Args[1] 54701 x := v.Args[0] 54702 v_1 := v.Args[1] 54703 if v_1.Op != OpAMD64MOVQconst { 54704 break 54705 } 54706 c := v_1.AuxInt 54707 v.reset(OpAMD64SHRQconst) 54708 v.AuxInt = c & 63 54709 v.AddArg(x) 54710 return true 54711 } 54712 // match: (SHRQ x (MOVLconst [c])) 54713 // cond: 54714 // result: (SHRQconst [c&63] x) 54715 for { 54716 _ = v.Args[1] 54717 x := v.Args[0] 54718 v_1 := v.Args[1] 54719 if v_1.Op != OpAMD64MOVLconst { 54720 break 54721 } 54722 c := v_1.AuxInt 54723 v.reset(OpAMD64SHRQconst) 54724 v.AuxInt = c & 63 54725 v.AddArg(x) 54726 return true 54727 } 54728 // match: (SHRQ x (ADDQconst [c] y)) 54729 // cond: c & 63 == 0 54730 // result: (SHRQ x y) 54731 for { 54732 _ = v.Args[1] 54733 x := v.Args[0] 54734 v_1 := v.Args[1] 54735 if v_1.Op != OpAMD64ADDQconst { 54736 break 54737 } 54738 c := v_1.AuxInt 54739 y := v_1.Args[0] 54740 if !(c&63 == 0) { 54741 break 54742 } 54743 v.reset(OpAMD64SHRQ) 54744 v.AddArg(x) 54745 v.AddArg(y) 54746 return true 54747 } 54748 // match: (SHRQ x (NEGQ <t> (ADDQconst [c] y))) 54749 // cond: c & 63 == 0 54750 // result: (SHRQ x (NEGQ <t> y)) 54751 for { 54752 _ = v.Args[1] 54753 x := v.Args[0] 54754 v_1 := v.Args[1] 54755 if v_1.Op != OpAMD64NEGQ { 54756 break 54757 } 54758 t := v_1.Type 54759 v_1_0 := v_1.Args[0] 54760 if v_1_0.Op != OpAMD64ADDQconst { 54761 break 54762 } 54763 c := v_1_0.AuxInt 54764 y := v_1_0.Args[0] 54765 if !(c&63 == 0) { 54766 break 54767 } 54768 v.reset(OpAMD64SHRQ) 54769 v.AddArg(x) 54770 v0 := b.NewValue0(v.Pos, OpAMD64NEGQ, t) 54771 v0.AddArg(y) 54772 v.AddArg(v0) 54773 return true 54774 } 54775 // match: (SHRQ x (ANDQconst [c] y)) 54776 // cond: c & 63 == 63 54777 // result: (SHRQ x y) 54778 for { 54779 _ = v.Args[1] 54780 x := v.Args[0] 54781 v_1 := v.Args[1] 54782 if v_1.Op != OpAMD64ANDQconst { 54783 break 54784 } 54785 c := v_1.AuxInt 54786 y := v_1.Args[0] 54787 if !(c&63 == 63) { 54788 break 54789 } 54790 v.reset(OpAMD64SHRQ) 54791 v.AddArg(x) 54792 v.AddArg(y) 54793 return true 54794 } 54795 // match: (SHRQ x (NEGQ <t> (ANDQconst [c] y))) 54796 // cond: c & 63 == 63 54797 // result: (SHRQ x (NEGQ <t> y)) 54798 for { 54799 _ = v.Args[1] 54800 x := v.Args[0] 54801 v_1 := v.Args[1] 54802 if v_1.Op != OpAMD64NEGQ { 54803 break 54804 } 54805 t := v_1.Type 54806 v_1_0 := v_1.Args[0] 54807 if v_1_0.Op != OpAMD64ANDQconst { 54808 break 54809 } 54810 c := v_1_0.AuxInt 54811 y := v_1_0.Args[0] 54812 if !(c&63 == 63) { 54813 break 54814 } 54815 v.reset(OpAMD64SHRQ) 54816 v.AddArg(x) 54817 v0 := b.NewValue0(v.Pos, OpAMD64NEGQ, t) 54818 v0.AddArg(y) 54819 v.AddArg(v0) 54820 return true 54821 } 54822 // match: (SHRQ x (ADDLconst [c] y)) 54823 // cond: c & 63 == 0 54824 // result: (SHRQ x y) 54825 for { 54826 _ = v.Args[1] 54827 x := v.Args[0] 54828 v_1 := v.Args[1] 54829 if v_1.Op != OpAMD64ADDLconst { 54830 break 54831 } 54832 c := v_1.AuxInt 54833 y := v_1.Args[0] 54834 if !(c&63 == 0) { 54835 break 54836 } 54837 v.reset(OpAMD64SHRQ) 54838 v.AddArg(x) 54839 v.AddArg(y) 54840 return true 54841 } 54842 // match: (SHRQ x (NEGL <t> (ADDLconst [c] y))) 54843 // cond: c & 63 == 0 54844 // result: (SHRQ x (NEGL <t> y)) 54845 for { 54846 _ = v.Args[1] 54847 x := v.Args[0] 54848 v_1 := v.Args[1] 54849 if v_1.Op != OpAMD64NEGL { 54850 break 54851 } 54852 t := v_1.Type 54853 v_1_0 := v_1.Args[0] 54854 if v_1_0.Op != OpAMD64ADDLconst { 54855 break 54856 } 54857 c := v_1_0.AuxInt 54858 y := v_1_0.Args[0] 54859 if !(c&63 == 0) { 54860 break 54861 } 54862 v.reset(OpAMD64SHRQ) 54863 v.AddArg(x) 54864 v0 := b.NewValue0(v.Pos, OpAMD64NEGL, t) 54865 v0.AddArg(y) 54866 v.AddArg(v0) 54867 return true 54868 } 54869 // match: (SHRQ x (ANDLconst [c] y)) 54870 // cond: c & 63 == 63 54871 // result: (SHRQ x y) 54872 for { 54873 _ = v.Args[1] 54874 x := v.Args[0] 54875 v_1 := v.Args[1] 54876 if v_1.Op != OpAMD64ANDLconst { 54877 break 54878 } 54879 c := v_1.AuxInt 54880 y := v_1.Args[0] 54881 if !(c&63 == 63) { 54882 break 54883 } 54884 v.reset(OpAMD64SHRQ) 54885 v.AddArg(x) 54886 v.AddArg(y) 54887 return true 54888 } 54889 // match: (SHRQ x (NEGL <t> (ANDLconst [c] y))) 54890 // cond: c & 63 == 63 54891 // result: (SHRQ x (NEGL <t> y)) 54892 for { 54893 _ = v.Args[1] 54894 x := v.Args[0] 54895 v_1 := v.Args[1] 54896 if v_1.Op != OpAMD64NEGL { 54897 break 54898 } 54899 t := v_1.Type 54900 v_1_0 := v_1.Args[0] 54901 if v_1_0.Op != OpAMD64ANDLconst { 54902 break 54903 } 54904 c := v_1_0.AuxInt 54905 y := v_1_0.Args[0] 54906 if !(c&63 == 63) { 54907 break 54908 } 54909 v.reset(OpAMD64SHRQ) 54910 v.AddArg(x) 54911 v0 := b.NewValue0(v.Pos, OpAMD64NEGL, t) 54912 v0.AddArg(y) 54913 v.AddArg(v0) 54914 return true 54915 } 54916 return false 54917 } 54918 func rewriteValueAMD64_OpAMD64SHRQconst_0(v *Value) bool { 54919 b := v.Block 54920 _ = b 54921 config := b.Func.Config 54922 _ = config 54923 // match: (SHRQconst [1] (SHLQconst [1] x)) 54924 // cond: !config.nacl 54925 // result: (BTRQconst [63] x) 54926 for { 54927 if v.AuxInt != 1 { 54928 break 54929 } 54930 v_0 := v.Args[0] 54931 if v_0.Op != OpAMD64SHLQconst { 54932 break 54933 } 54934 if v_0.AuxInt != 1 { 54935 break 54936 } 54937 x := v_0.Args[0] 54938 if !(!config.nacl) { 54939 break 54940 } 54941 v.reset(OpAMD64BTRQconst) 54942 v.AuxInt = 63 54943 v.AddArg(x) 54944 return true 54945 } 54946 // match: (SHRQconst x [0]) 54947 // cond: 54948 // result: x 54949 for { 54950 if v.AuxInt != 0 { 54951 break 54952 } 54953 x := v.Args[0] 54954 v.reset(OpCopy) 54955 v.Type = x.Type 54956 v.AddArg(x) 54957 return true 54958 } 54959 return false 54960 } 54961 func rewriteValueAMD64_OpAMD64SHRW_0(v *Value) bool { 54962 // match: (SHRW x (MOVQconst [c])) 54963 // cond: c&31 < 16 54964 // result: (SHRWconst [c&31] x) 54965 for { 54966 _ = v.Args[1] 54967 x := v.Args[0] 54968 v_1 := v.Args[1] 54969 if v_1.Op != OpAMD64MOVQconst { 54970 break 54971 } 54972 c := v_1.AuxInt 54973 if !(c&31 < 16) { 54974 break 54975 } 54976 v.reset(OpAMD64SHRWconst) 54977 v.AuxInt = c & 31 54978 v.AddArg(x) 54979 return true 54980 } 54981 // match: (SHRW x (MOVLconst [c])) 54982 // cond: c&31 < 16 54983 // result: (SHRWconst [c&31] x) 54984 for { 54985 _ = v.Args[1] 54986 x := v.Args[0] 54987 v_1 := v.Args[1] 54988 if v_1.Op != OpAMD64MOVLconst { 54989 break 54990 } 54991 c := v_1.AuxInt 54992 if !(c&31 < 16) { 54993 break 54994 } 54995 v.reset(OpAMD64SHRWconst) 54996 v.AuxInt = c & 31 54997 v.AddArg(x) 54998 return true 54999 } 55000 // match: (SHRW _ (MOVQconst [c])) 55001 // cond: c&31 >= 16 55002 // result: (MOVLconst [0]) 55003 for { 55004 _ = v.Args[1] 55005 v_1 := v.Args[1] 55006 if v_1.Op != OpAMD64MOVQconst { 55007 break 55008 } 55009 c := v_1.AuxInt 55010 if !(c&31 >= 16) { 55011 break 55012 } 55013 v.reset(OpAMD64MOVLconst) 55014 v.AuxInt = 0 55015 return true 55016 } 55017 // match: (SHRW _ (MOVLconst [c])) 55018 // cond: c&31 >= 16 55019 // result: (MOVLconst [0]) 55020 for { 55021 _ = v.Args[1] 55022 v_1 := v.Args[1] 55023 if v_1.Op != OpAMD64MOVLconst { 55024 break 55025 } 55026 c := v_1.AuxInt 55027 if !(c&31 >= 16) { 55028 break 55029 } 55030 v.reset(OpAMD64MOVLconst) 55031 v.AuxInt = 0 55032 return true 55033 } 55034 return false 55035 } 55036 func rewriteValueAMD64_OpAMD64SHRWconst_0(v *Value) bool { 55037 // match: (SHRWconst x [0]) 55038 // cond: 55039 // result: x 55040 for { 55041 if v.AuxInt != 0 { 55042 break 55043 } 55044 x := v.Args[0] 55045 v.reset(OpCopy) 55046 v.Type = x.Type 55047 v.AddArg(x) 55048 return true 55049 } 55050 return false 55051 } 55052 func rewriteValueAMD64_OpAMD64SUBL_0(v *Value) bool { 55053 b := v.Block 55054 _ = b 55055 // match: (SUBL x (MOVLconst [c])) 55056 // cond: 55057 // result: (SUBLconst x [c]) 55058 for { 55059 _ = v.Args[1] 55060 x := v.Args[0] 55061 v_1 := v.Args[1] 55062 if v_1.Op != OpAMD64MOVLconst { 55063 break 55064 } 55065 c := v_1.AuxInt 55066 v.reset(OpAMD64SUBLconst) 55067 v.AuxInt = c 55068 v.AddArg(x) 55069 return true 55070 } 55071 // match: (SUBL (MOVLconst [c]) x) 55072 // cond: 55073 // result: (NEGL (SUBLconst <v.Type> x [c])) 55074 for { 55075 _ = v.Args[1] 55076 v_0 := v.Args[0] 55077 if v_0.Op != OpAMD64MOVLconst { 55078 break 55079 } 55080 c := v_0.AuxInt 55081 x := v.Args[1] 55082 v.reset(OpAMD64NEGL) 55083 v0 := b.NewValue0(v.Pos, OpAMD64SUBLconst, v.Type) 55084 v0.AuxInt = c 55085 v0.AddArg(x) 55086 v.AddArg(v0) 55087 return true 55088 } 55089 // match: (SUBL x x) 55090 // cond: 55091 // result: (MOVLconst [0]) 55092 for { 55093 _ = v.Args[1] 55094 x := v.Args[0] 55095 if x != v.Args[1] { 55096 break 55097 } 55098 v.reset(OpAMD64MOVLconst) 55099 v.AuxInt = 0 55100 return true 55101 } 55102 // match: (SUBL x l:(MOVLload [off] {sym} ptr mem)) 55103 // cond: canMergeLoadClobber(v, l, x) && clobber(l) 55104 // result: (SUBLload x [off] {sym} ptr mem) 55105 for { 55106 _ = v.Args[1] 55107 x := v.Args[0] 55108 l := v.Args[1] 55109 if l.Op != OpAMD64MOVLload { 55110 break 55111 } 55112 off := l.AuxInt 55113 sym := l.Aux 55114 _ = l.Args[1] 55115 ptr := l.Args[0] 55116 mem := l.Args[1] 55117 if !(canMergeLoadClobber(v, l, x) && clobber(l)) { 55118 break 55119 } 55120 v.reset(OpAMD64SUBLload) 55121 v.AuxInt = off 55122 v.Aux = sym 55123 v.AddArg(x) 55124 v.AddArg(ptr) 55125 v.AddArg(mem) 55126 return true 55127 } 55128 return false 55129 } 55130 func rewriteValueAMD64_OpAMD64SUBLconst_0(v *Value) bool { 55131 // match: (SUBLconst [c] x) 55132 // cond: int32(c) == 0 55133 // result: x 55134 for { 55135 c := v.AuxInt 55136 x := v.Args[0] 55137 if !(int32(c) == 0) { 55138 break 55139 } 55140 v.reset(OpCopy) 55141 v.Type = x.Type 55142 v.AddArg(x) 55143 return true 55144 } 55145 // match: (SUBLconst [c] x) 55146 // cond: 55147 // result: (ADDLconst [int64(int32(-c))] x) 55148 for { 55149 c := v.AuxInt 55150 x := v.Args[0] 55151 v.reset(OpAMD64ADDLconst) 55152 v.AuxInt = int64(int32(-c)) 55153 v.AddArg(x) 55154 return true 55155 } 55156 } 55157 func rewriteValueAMD64_OpAMD64SUBLload_0(v *Value) bool { 55158 b := v.Block 55159 _ = b 55160 typ := &b.Func.Config.Types 55161 _ = typ 55162 // match: (SUBLload [off1] {sym} val (ADDQconst [off2] base) mem) 55163 // cond: is32Bit(off1+off2) 55164 // result: (SUBLload [off1+off2] {sym} val base mem) 55165 for { 55166 off1 := v.AuxInt 55167 sym := v.Aux 55168 _ = v.Args[2] 55169 val := v.Args[0] 55170 v_1 := v.Args[1] 55171 if v_1.Op != OpAMD64ADDQconst { 55172 break 55173 } 55174 off2 := v_1.AuxInt 55175 base := v_1.Args[0] 55176 mem := v.Args[2] 55177 if !(is32Bit(off1 + off2)) { 55178 break 55179 } 55180 v.reset(OpAMD64SUBLload) 55181 v.AuxInt = off1 + off2 55182 v.Aux = sym 55183 v.AddArg(val) 55184 v.AddArg(base) 55185 v.AddArg(mem) 55186 return true 55187 } 55188 // match: (SUBLload [off1] {sym1} val (LEAQ [off2] {sym2} base) mem) 55189 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 55190 // result: (SUBLload [off1+off2] {mergeSym(sym1,sym2)} val base mem) 55191 for { 55192 off1 := v.AuxInt 55193 sym1 := v.Aux 55194 _ = v.Args[2] 55195 val := v.Args[0] 55196 v_1 := v.Args[1] 55197 if v_1.Op != OpAMD64LEAQ { 55198 break 55199 } 55200 off2 := v_1.AuxInt 55201 sym2 := v_1.Aux 55202 base := v_1.Args[0] 55203 mem := v.Args[2] 55204 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 55205 break 55206 } 55207 v.reset(OpAMD64SUBLload) 55208 v.AuxInt = off1 + off2 55209 v.Aux = mergeSym(sym1, sym2) 55210 v.AddArg(val) 55211 v.AddArg(base) 55212 v.AddArg(mem) 55213 return true 55214 } 55215 // match: (SUBLload x [off] {sym} ptr (MOVSSstore [off] {sym} ptr y _)) 55216 // cond: 55217 // result: (SUBL x (MOVLf2i y)) 55218 for { 55219 off := v.AuxInt 55220 sym := v.Aux 55221 _ = v.Args[2] 55222 x := v.Args[0] 55223 ptr := v.Args[1] 55224 v_2 := v.Args[2] 55225 if v_2.Op != OpAMD64MOVSSstore { 55226 break 55227 } 55228 if v_2.AuxInt != off { 55229 break 55230 } 55231 if v_2.Aux != sym { 55232 break 55233 } 55234 _ = v_2.Args[2] 55235 if ptr != v_2.Args[0] { 55236 break 55237 } 55238 y := v_2.Args[1] 55239 v.reset(OpAMD64SUBL) 55240 v.AddArg(x) 55241 v0 := b.NewValue0(v_2.Pos, OpAMD64MOVLf2i, typ.UInt32) 55242 v0.AddArg(y) 55243 v.AddArg(v0) 55244 return true 55245 } 55246 return false 55247 } 55248 func rewriteValueAMD64_OpAMD64SUBLmodify_0(v *Value) bool { 55249 // match: (SUBLmodify [off1] {sym} (ADDQconst [off2] base) val mem) 55250 // cond: is32Bit(off1+off2) 55251 // result: (SUBLmodify [off1+off2] {sym} base val mem) 55252 for { 55253 off1 := v.AuxInt 55254 sym := v.Aux 55255 _ = v.Args[2] 55256 v_0 := v.Args[0] 55257 if v_0.Op != OpAMD64ADDQconst { 55258 break 55259 } 55260 off2 := v_0.AuxInt 55261 base := v_0.Args[0] 55262 val := v.Args[1] 55263 mem := v.Args[2] 55264 if !(is32Bit(off1 + off2)) { 55265 break 55266 } 55267 v.reset(OpAMD64SUBLmodify) 55268 v.AuxInt = off1 + off2 55269 v.Aux = sym 55270 v.AddArg(base) 55271 v.AddArg(val) 55272 v.AddArg(mem) 55273 return true 55274 } 55275 // match: (SUBLmodify [off1] {sym1} (LEAQ [off2] {sym2} base) val mem) 55276 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 55277 // result: (SUBLmodify [off1+off2] {mergeSym(sym1,sym2)} base val mem) 55278 for { 55279 off1 := v.AuxInt 55280 sym1 := v.Aux 55281 _ = v.Args[2] 55282 v_0 := v.Args[0] 55283 if v_0.Op != OpAMD64LEAQ { 55284 break 55285 } 55286 off2 := v_0.AuxInt 55287 sym2 := v_0.Aux 55288 base := v_0.Args[0] 55289 val := v.Args[1] 55290 mem := v.Args[2] 55291 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 55292 break 55293 } 55294 v.reset(OpAMD64SUBLmodify) 55295 v.AuxInt = off1 + off2 55296 v.Aux = mergeSym(sym1, sym2) 55297 v.AddArg(base) 55298 v.AddArg(val) 55299 v.AddArg(mem) 55300 return true 55301 } 55302 return false 55303 } 55304 func rewriteValueAMD64_OpAMD64SUBQ_0(v *Value) bool { 55305 b := v.Block 55306 _ = b 55307 // match: (SUBQ x (MOVQconst [c])) 55308 // cond: is32Bit(c) 55309 // result: (SUBQconst x [c]) 55310 for { 55311 _ = v.Args[1] 55312 x := v.Args[0] 55313 v_1 := v.Args[1] 55314 if v_1.Op != OpAMD64MOVQconst { 55315 break 55316 } 55317 c := v_1.AuxInt 55318 if !(is32Bit(c)) { 55319 break 55320 } 55321 v.reset(OpAMD64SUBQconst) 55322 v.AuxInt = c 55323 v.AddArg(x) 55324 return true 55325 } 55326 // match: (SUBQ (MOVQconst [c]) x) 55327 // cond: is32Bit(c) 55328 // result: (NEGQ (SUBQconst <v.Type> x [c])) 55329 for { 55330 _ = v.Args[1] 55331 v_0 := v.Args[0] 55332 if v_0.Op != OpAMD64MOVQconst { 55333 break 55334 } 55335 c := v_0.AuxInt 55336 x := v.Args[1] 55337 if !(is32Bit(c)) { 55338 break 55339 } 55340 v.reset(OpAMD64NEGQ) 55341 v0 := b.NewValue0(v.Pos, OpAMD64SUBQconst, v.Type) 55342 v0.AuxInt = c 55343 v0.AddArg(x) 55344 v.AddArg(v0) 55345 return true 55346 } 55347 // match: (SUBQ x x) 55348 // cond: 55349 // result: (MOVQconst [0]) 55350 for { 55351 _ = v.Args[1] 55352 x := v.Args[0] 55353 if x != v.Args[1] { 55354 break 55355 } 55356 v.reset(OpAMD64MOVQconst) 55357 v.AuxInt = 0 55358 return true 55359 } 55360 // match: (SUBQ x l:(MOVQload [off] {sym} ptr mem)) 55361 // cond: canMergeLoadClobber(v, l, x) && clobber(l) 55362 // result: (SUBQload x [off] {sym} ptr mem) 55363 for { 55364 _ = v.Args[1] 55365 x := v.Args[0] 55366 l := v.Args[1] 55367 if l.Op != OpAMD64MOVQload { 55368 break 55369 } 55370 off := l.AuxInt 55371 sym := l.Aux 55372 _ = l.Args[1] 55373 ptr := l.Args[0] 55374 mem := l.Args[1] 55375 if !(canMergeLoadClobber(v, l, x) && clobber(l)) { 55376 break 55377 } 55378 v.reset(OpAMD64SUBQload) 55379 v.AuxInt = off 55380 v.Aux = sym 55381 v.AddArg(x) 55382 v.AddArg(ptr) 55383 v.AddArg(mem) 55384 return true 55385 } 55386 return false 55387 } 55388 func rewriteValueAMD64_OpAMD64SUBQborrow_0(v *Value) bool { 55389 // match: (SUBQborrow x (MOVQconst [c])) 55390 // cond: is32Bit(c) 55391 // result: (SUBQconstborrow x [c]) 55392 for { 55393 _ = v.Args[1] 55394 x := v.Args[0] 55395 v_1 := v.Args[1] 55396 if v_1.Op != OpAMD64MOVQconst { 55397 break 55398 } 55399 c := v_1.AuxInt 55400 if !(is32Bit(c)) { 55401 break 55402 } 55403 v.reset(OpAMD64SUBQconstborrow) 55404 v.AuxInt = c 55405 v.AddArg(x) 55406 return true 55407 } 55408 return false 55409 } 55410 func rewriteValueAMD64_OpAMD64SUBQconst_0(v *Value) bool { 55411 // match: (SUBQconst [0] x) 55412 // cond: 55413 // result: x 55414 for { 55415 if v.AuxInt != 0 { 55416 break 55417 } 55418 x := v.Args[0] 55419 v.reset(OpCopy) 55420 v.Type = x.Type 55421 v.AddArg(x) 55422 return true 55423 } 55424 // match: (SUBQconst [c] x) 55425 // cond: c != -(1<<31) 55426 // result: (ADDQconst [-c] x) 55427 for { 55428 c := v.AuxInt 55429 x := v.Args[0] 55430 if !(c != -(1 << 31)) { 55431 break 55432 } 55433 v.reset(OpAMD64ADDQconst) 55434 v.AuxInt = -c 55435 v.AddArg(x) 55436 return true 55437 } 55438 // match: (SUBQconst (MOVQconst [d]) [c]) 55439 // cond: 55440 // result: (MOVQconst [d-c]) 55441 for { 55442 c := v.AuxInt 55443 v_0 := v.Args[0] 55444 if v_0.Op != OpAMD64MOVQconst { 55445 break 55446 } 55447 d := v_0.AuxInt 55448 v.reset(OpAMD64MOVQconst) 55449 v.AuxInt = d - c 55450 return true 55451 } 55452 // match: (SUBQconst (SUBQconst x [d]) [c]) 55453 // cond: is32Bit(-c-d) 55454 // result: (ADDQconst [-c-d] x) 55455 for { 55456 c := v.AuxInt 55457 v_0 := v.Args[0] 55458 if v_0.Op != OpAMD64SUBQconst { 55459 break 55460 } 55461 d := v_0.AuxInt 55462 x := v_0.Args[0] 55463 if !(is32Bit(-c - d)) { 55464 break 55465 } 55466 v.reset(OpAMD64ADDQconst) 55467 v.AuxInt = -c - d 55468 v.AddArg(x) 55469 return true 55470 } 55471 return false 55472 } 55473 func rewriteValueAMD64_OpAMD64SUBQload_0(v *Value) bool { 55474 b := v.Block 55475 _ = b 55476 typ := &b.Func.Config.Types 55477 _ = typ 55478 // match: (SUBQload [off1] {sym} val (ADDQconst [off2] base) mem) 55479 // cond: is32Bit(off1+off2) 55480 // result: (SUBQload [off1+off2] {sym} val base mem) 55481 for { 55482 off1 := v.AuxInt 55483 sym := v.Aux 55484 _ = v.Args[2] 55485 val := v.Args[0] 55486 v_1 := v.Args[1] 55487 if v_1.Op != OpAMD64ADDQconst { 55488 break 55489 } 55490 off2 := v_1.AuxInt 55491 base := v_1.Args[0] 55492 mem := v.Args[2] 55493 if !(is32Bit(off1 + off2)) { 55494 break 55495 } 55496 v.reset(OpAMD64SUBQload) 55497 v.AuxInt = off1 + off2 55498 v.Aux = sym 55499 v.AddArg(val) 55500 v.AddArg(base) 55501 v.AddArg(mem) 55502 return true 55503 } 55504 // match: (SUBQload [off1] {sym1} val (LEAQ [off2] {sym2} base) mem) 55505 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 55506 // result: (SUBQload [off1+off2] {mergeSym(sym1,sym2)} val base mem) 55507 for { 55508 off1 := v.AuxInt 55509 sym1 := v.Aux 55510 _ = v.Args[2] 55511 val := v.Args[0] 55512 v_1 := v.Args[1] 55513 if v_1.Op != OpAMD64LEAQ { 55514 break 55515 } 55516 off2 := v_1.AuxInt 55517 sym2 := v_1.Aux 55518 base := v_1.Args[0] 55519 mem := v.Args[2] 55520 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 55521 break 55522 } 55523 v.reset(OpAMD64SUBQload) 55524 v.AuxInt = off1 + off2 55525 v.Aux = mergeSym(sym1, sym2) 55526 v.AddArg(val) 55527 v.AddArg(base) 55528 v.AddArg(mem) 55529 return true 55530 } 55531 // match: (SUBQload x [off] {sym} ptr (MOVSDstore [off] {sym} ptr y _)) 55532 // cond: 55533 // result: (SUBQ x (MOVQf2i y)) 55534 for { 55535 off := v.AuxInt 55536 sym := v.Aux 55537 _ = v.Args[2] 55538 x := v.Args[0] 55539 ptr := v.Args[1] 55540 v_2 := v.Args[2] 55541 if v_2.Op != OpAMD64MOVSDstore { 55542 break 55543 } 55544 if v_2.AuxInt != off { 55545 break 55546 } 55547 if v_2.Aux != sym { 55548 break 55549 } 55550 _ = v_2.Args[2] 55551 if ptr != v_2.Args[0] { 55552 break 55553 } 55554 y := v_2.Args[1] 55555 v.reset(OpAMD64SUBQ) 55556 v.AddArg(x) 55557 v0 := b.NewValue0(v_2.Pos, OpAMD64MOVQf2i, typ.UInt64) 55558 v0.AddArg(y) 55559 v.AddArg(v0) 55560 return true 55561 } 55562 return false 55563 } 55564 func rewriteValueAMD64_OpAMD64SUBQmodify_0(v *Value) bool { 55565 // match: (SUBQmodify [off1] {sym} (ADDQconst [off2] base) val mem) 55566 // cond: is32Bit(off1+off2) 55567 // result: (SUBQmodify [off1+off2] {sym} base val mem) 55568 for { 55569 off1 := v.AuxInt 55570 sym := v.Aux 55571 _ = v.Args[2] 55572 v_0 := v.Args[0] 55573 if v_0.Op != OpAMD64ADDQconst { 55574 break 55575 } 55576 off2 := v_0.AuxInt 55577 base := v_0.Args[0] 55578 val := v.Args[1] 55579 mem := v.Args[2] 55580 if !(is32Bit(off1 + off2)) { 55581 break 55582 } 55583 v.reset(OpAMD64SUBQmodify) 55584 v.AuxInt = off1 + off2 55585 v.Aux = sym 55586 v.AddArg(base) 55587 v.AddArg(val) 55588 v.AddArg(mem) 55589 return true 55590 } 55591 // match: (SUBQmodify [off1] {sym1} (LEAQ [off2] {sym2} base) val mem) 55592 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 55593 // result: (SUBQmodify [off1+off2] {mergeSym(sym1,sym2)} base val mem) 55594 for { 55595 off1 := v.AuxInt 55596 sym1 := v.Aux 55597 _ = v.Args[2] 55598 v_0 := v.Args[0] 55599 if v_0.Op != OpAMD64LEAQ { 55600 break 55601 } 55602 off2 := v_0.AuxInt 55603 sym2 := v_0.Aux 55604 base := v_0.Args[0] 55605 val := v.Args[1] 55606 mem := v.Args[2] 55607 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 55608 break 55609 } 55610 v.reset(OpAMD64SUBQmodify) 55611 v.AuxInt = off1 + off2 55612 v.Aux = mergeSym(sym1, sym2) 55613 v.AddArg(base) 55614 v.AddArg(val) 55615 v.AddArg(mem) 55616 return true 55617 } 55618 return false 55619 } 55620 func rewriteValueAMD64_OpAMD64SUBSD_0(v *Value) bool { 55621 // match: (SUBSD x l:(MOVSDload [off] {sym} ptr mem)) 55622 // cond: canMergeLoadClobber(v, l, x) && clobber(l) 55623 // result: (SUBSDload x [off] {sym} ptr mem) 55624 for { 55625 _ = v.Args[1] 55626 x := v.Args[0] 55627 l := v.Args[1] 55628 if l.Op != OpAMD64MOVSDload { 55629 break 55630 } 55631 off := l.AuxInt 55632 sym := l.Aux 55633 _ = l.Args[1] 55634 ptr := l.Args[0] 55635 mem := l.Args[1] 55636 if !(canMergeLoadClobber(v, l, x) && clobber(l)) { 55637 break 55638 } 55639 v.reset(OpAMD64SUBSDload) 55640 v.AuxInt = off 55641 v.Aux = sym 55642 v.AddArg(x) 55643 v.AddArg(ptr) 55644 v.AddArg(mem) 55645 return true 55646 } 55647 return false 55648 } 55649 func rewriteValueAMD64_OpAMD64SUBSDload_0(v *Value) bool { 55650 b := v.Block 55651 _ = b 55652 typ := &b.Func.Config.Types 55653 _ = typ 55654 // match: (SUBSDload [off1] {sym} val (ADDQconst [off2] base) mem) 55655 // cond: is32Bit(off1+off2) 55656 // result: (SUBSDload [off1+off2] {sym} val base mem) 55657 for { 55658 off1 := v.AuxInt 55659 sym := v.Aux 55660 _ = v.Args[2] 55661 val := v.Args[0] 55662 v_1 := v.Args[1] 55663 if v_1.Op != OpAMD64ADDQconst { 55664 break 55665 } 55666 off2 := v_1.AuxInt 55667 base := v_1.Args[0] 55668 mem := v.Args[2] 55669 if !(is32Bit(off1 + off2)) { 55670 break 55671 } 55672 v.reset(OpAMD64SUBSDload) 55673 v.AuxInt = off1 + off2 55674 v.Aux = sym 55675 v.AddArg(val) 55676 v.AddArg(base) 55677 v.AddArg(mem) 55678 return true 55679 } 55680 // match: (SUBSDload [off1] {sym1} val (LEAQ [off2] {sym2} base) mem) 55681 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 55682 // result: (SUBSDload [off1+off2] {mergeSym(sym1,sym2)} val base mem) 55683 for { 55684 off1 := v.AuxInt 55685 sym1 := v.Aux 55686 _ = v.Args[2] 55687 val := v.Args[0] 55688 v_1 := v.Args[1] 55689 if v_1.Op != OpAMD64LEAQ { 55690 break 55691 } 55692 off2 := v_1.AuxInt 55693 sym2 := v_1.Aux 55694 base := v_1.Args[0] 55695 mem := v.Args[2] 55696 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 55697 break 55698 } 55699 v.reset(OpAMD64SUBSDload) 55700 v.AuxInt = off1 + off2 55701 v.Aux = mergeSym(sym1, sym2) 55702 v.AddArg(val) 55703 v.AddArg(base) 55704 v.AddArg(mem) 55705 return true 55706 } 55707 // match: (SUBSDload x [off] {sym} ptr (MOVQstore [off] {sym} ptr y _)) 55708 // cond: 55709 // result: (SUBSD x (MOVQi2f y)) 55710 for { 55711 off := v.AuxInt 55712 sym := v.Aux 55713 _ = v.Args[2] 55714 x := v.Args[0] 55715 ptr := v.Args[1] 55716 v_2 := v.Args[2] 55717 if v_2.Op != OpAMD64MOVQstore { 55718 break 55719 } 55720 if v_2.AuxInt != off { 55721 break 55722 } 55723 if v_2.Aux != sym { 55724 break 55725 } 55726 _ = v_2.Args[2] 55727 if ptr != v_2.Args[0] { 55728 break 55729 } 55730 y := v_2.Args[1] 55731 v.reset(OpAMD64SUBSD) 55732 v.AddArg(x) 55733 v0 := b.NewValue0(v_2.Pos, OpAMD64MOVQi2f, typ.Float64) 55734 v0.AddArg(y) 55735 v.AddArg(v0) 55736 return true 55737 } 55738 return false 55739 } 55740 func rewriteValueAMD64_OpAMD64SUBSS_0(v *Value) bool { 55741 // match: (SUBSS x l:(MOVSSload [off] {sym} ptr mem)) 55742 // cond: canMergeLoadClobber(v, l, x) && clobber(l) 55743 // result: (SUBSSload x [off] {sym} ptr mem) 55744 for { 55745 _ = v.Args[1] 55746 x := v.Args[0] 55747 l := v.Args[1] 55748 if l.Op != OpAMD64MOVSSload { 55749 break 55750 } 55751 off := l.AuxInt 55752 sym := l.Aux 55753 _ = l.Args[1] 55754 ptr := l.Args[0] 55755 mem := l.Args[1] 55756 if !(canMergeLoadClobber(v, l, x) && clobber(l)) { 55757 break 55758 } 55759 v.reset(OpAMD64SUBSSload) 55760 v.AuxInt = off 55761 v.Aux = sym 55762 v.AddArg(x) 55763 v.AddArg(ptr) 55764 v.AddArg(mem) 55765 return true 55766 } 55767 return false 55768 } 55769 func rewriteValueAMD64_OpAMD64SUBSSload_0(v *Value) bool { 55770 b := v.Block 55771 _ = b 55772 typ := &b.Func.Config.Types 55773 _ = typ 55774 // match: (SUBSSload [off1] {sym} val (ADDQconst [off2] base) mem) 55775 // cond: is32Bit(off1+off2) 55776 // result: (SUBSSload [off1+off2] {sym} val base mem) 55777 for { 55778 off1 := v.AuxInt 55779 sym := v.Aux 55780 _ = v.Args[2] 55781 val := v.Args[0] 55782 v_1 := v.Args[1] 55783 if v_1.Op != OpAMD64ADDQconst { 55784 break 55785 } 55786 off2 := v_1.AuxInt 55787 base := v_1.Args[0] 55788 mem := v.Args[2] 55789 if !(is32Bit(off1 + off2)) { 55790 break 55791 } 55792 v.reset(OpAMD64SUBSSload) 55793 v.AuxInt = off1 + off2 55794 v.Aux = sym 55795 v.AddArg(val) 55796 v.AddArg(base) 55797 v.AddArg(mem) 55798 return true 55799 } 55800 // match: (SUBSSload [off1] {sym1} val (LEAQ [off2] {sym2} base) mem) 55801 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 55802 // result: (SUBSSload [off1+off2] {mergeSym(sym1,sym2)} val base mem) 55803 for { 55804 off1 := v.AuxInt 55805 sym1 := v.Aux 55806 _ = v.Args[2] 55807 val := v.Args[0] 55808 v_1 := v.Args[1] 55809 if v_1.Op != OpAMD64LEAQ { 55810 break 55811 } 55812 off2 := v_1.AuxInt 55813 sym2 := v_1.Aux 55814 base := v_1.Args[0] 55815 mem := v.Args[2] 55816 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 55817 break 55818 } 55819 v.reset(OpAMD64SUBSSload) 55820 v.AuxInt = off1 + off2 55821 v.Aux = mergeSym(sym1, sym2) 55822 v.AddArg(val) 55823 v.AddArg(base) 55824 v.AddArg(mem) 55825 return true 55826 } 55827 // match: (SUBSSload x [off] {sym} ptr (MOVLstore [off] {sym} ptr y _)) 55828 // cond: 55829 // result: (SUBSS x (MOVLi2f y)) 55830 for { 55831 off := v.AuxInt 55832 sym := v.Aux 55833 _ = v.Args[2] 55834 x := v.Args[0] 55835 ptr := v.Args[1] 55836 v_2 := v.Args[2] 55837 if v_2.Op != OpAMD64MOVLstore { 55838 break 55839 } 55840 if v_2.AuxInt != off { 55841 break 55842 } 55843 if v_2.Aux != sym { 55844 break 55845 } 55846 _ = v_2.Args[2] 55847 if ptr != v_2.Args[0] { 55848 break 55849 } 55850 y := v_2.Args[1] 55851 v.reset(OpAMD64SUBSS) 55852 v.AddArg(x) 55853 v0 := b.NewValue0(v_2.Pos, OpAMD64MOVLi2f, typ.Float32) 55854 v0.AddArg(y) 55855 v.AddArg(v0) 55856 return true 55857 } 55858 return false 55859 } 55860 func rewriteValueAMD64_OpAMD64TESTB_0(v *Value) bool { 55861 b := v.Block 55862 _ = b 55863 // match: (TESTB (MOVLconst [c]) x) 55864 // cond: 55865 // result: (TESTBconst [c] x) 55866 for { 55867 _ = v.Args[1] 55868 v_0 := v.Args[0] 55869 if v_0.Op != OpAMD64MOVLconst { 55870 break 55871 } 55872 c := v_0.AuxInt 55873 x := v.Args[1] 55874 v.reset(OpAMD64TESTBconst) 55875 v.AuxInt = c 55876 v.AddArg(x) 55877 return true 55878 } 55879 // match: (TESTB x (MOVLconst [c])) 55880 // cond: 55881 // result: (TESTBconst [c] x) 55882 for { 55883 _ = v.Args[1] 55884 x := v.Args[0] 55885 v_1 := v.Args[1] 55886 if v_1.Op != OpAMD64MOVLconst { 55887 break 55888 } 55889 c := v_1.AuxInt 55890 v.reset(OpAMD64TESTBconst) 55891 v.AuxInt = c 55892 v.AddArg(x) 55893 return true 55894 } 55895 // match: (TESTB l:(MOVBload {sym} [off] ptr mem) l2) 55896 // cond: l == l2 && l.Uses == 2 && validValAndOff(0,off) && clobber(l) 55897 // result: @l.Block (CMPBconstload {sym} [makeValAndOff(0,off)] ptr mem) 55898 for { 55899 _ = v.Args[1] 55900 l := v.Args[0] 55901 if l.Op != OpAMD64MOVBload { 55902 break 55903 } 55904 off := l.AuxInt 55905 sym := l.Aux 55906 _ = l.Args[1] 55907 ptr := l.Args[0] 55908 mem := l.Args[1] 55909 l2 := v.Args[1] 55910 if !(l == l2 && l.Uses == 2 && validValAndOff(0, off) && clobber(l)) { 55911 break 55912 } 55913 b = l.Block 55914 v0 := b.NewValue0(l.Pos, OpAMD64CMPBconstload, types.TypeFlags) 55915 v.reset(OpCopy) 55916 v.AddArg(v0) 55917 v0.AuxInt = makeValAndOff(0, off) 55918 v0.Aux = sym 55919 v0.AddArg(ptr) 55920 v0.AddArg(mem) 55921 return true 55922 } 55923 // match: (TESTB l2 l:(MOVBload {sym} [off] ptr mem)) 55924 // cond: l == l2 && l.Uses == 2 && validValAndOff(0,off) && clobber(l) 55925 // result: @l.Block (CMPBconstload {sym} [makeValAndOff(0,off)] ptr mem) 55926 for { 55927 _ = v.Args[1] 55928 l2 := v.Args[0] 55929 l := v.Args[1] 55930 if l.Op != OpAMD64MOVBload { 55931 break 55932 } 55933 off := l.AuxInt 55934 sym := l.Aux 55935 _ = l.Args[1] 55936 ptr := l.Args[0] 55937 mem := l.Args[1] 55938 if !(l == l2 && l.Uses == 2 && validValAndOff(0, off) && clobber(l)) { 55939 break 55940 } 55941 b = l.Block 55942 v0 := b.NewValue0(l.Pos, OpAMD64CMPBconstload, types.TypeFlags) 55943 v.reset(OpCopy) 55944 v.AddArg(v0) 55945 v0.AuxInt = makeValAndOff(0, off) 55946 v0.Aux = sym 55947 v0.AddArg(ptr) 55948 v0.AddArg(mem) 55949 return true 55950 } 55951 return false 55952 } 55953 func rewriteValueAMD64_OpAMD64TESTBconst_0(v *Value) bool { 55954 // match: (TESTBconst [-1] x) 55955 // cond: x.Op != OpAMD64MOVLconst 55956 // result: (TESTB x x) 55957 for { 55958 if v.AuxInt != -1 { 55959 break 55960 } 55961 x := v.Args[0] 55962 if !(x.Op != OpAMD64MOVLconst) { 55963 break 55964 } 55965 v.reset(OpAMD64TESTB) 55966 v.AddArg(x) 55967 v.AddArg(x) 55968 return true 55969 } 55970 return false 55971 } 55972 func rewriteValueAMD64_OpAMD64TESTL_0(v *Value) bool { 55973 b := v.Block 55974 _ = b 55975 // match: (TESTL (MOVLconst [c]) x) 55976 // cond: 55977 // result: (TESTLconst [c] x) 55978 for { 55979 _ = v.Args[1] 55980 v_0 := v.Args[0] 55981 if v_0.Op != OpAMD64MOVLconst { 55982 break 55983 } 55984 c := v_0.AuxInt 55985 x := v.Args[1] 55986 v.reset(OpAMD64TESTLconst) 55987 v.AuxInt = c 55988 v.AddArg(x) 55989 return true 55990 } 55991 // match: (TESTL x (MOVLconst [c])) 55992 // cond: 55993 // result: (TESTLconst [c] x) 55994 for { 55995 _ = v.Args[1] 55996 x := v.Args[0] 55997 v_1 := v.Args[1] 55998 if v_1.Op != OpAMD64MOVLconst { 55999 break 56000 } 56001 c := v_1.AuxInt 56002 v.reset(OpAMD64TESTLconst) 56003 v.AuxInt = c 56004 v.AddArg(x) 56005 return true 56006 } 56007 // match: (TESTL l:(MOVLload {sym} [off] ptr mem) l2) 56008 // cond: l == l2 && l.Uses == 2 && validValAndOff(0,off) && clobber(l) 56009 // result: @l.Block (CMPLconstload {sym} [makeValAndOff(0,off)] ptr mem) 56010 for { 56011 _ = v.Args[1] 56012 l := v.Args[0] 56013 if l.Op != OpAMD64MOVLload { 56014 break 56015 } 56016 off := l.AuxInt 56017 sym := l.Aux 56018 _ = l.Args[1] 56019 ptr := l.Args[0] 56020 mem := l.Args[1] 56021 l2 := v.Args[1] 56022 if !(l == l2 && l.Uses == 2 && validValAndOff(0, off) && clobber(l)) { 56023 break 56024 } 56025 b = l.Block 56026 v0 := b.NewValue0(l.Pos, OpAMD64CMPLconstload, types.TypeFlags) 56027 v.reset(OpCopy) 56028 v.AddArg(v0) 56029 v0.AuxInt = makeValAndOff(0, off) 56030 v0.Aux = sym 56031 v0.AddArg(ptr) 56032 v0.AddArg(mem) 56033 return true 56034 } 56035 // match: (TESTL l2 l:(MOVLload {sym} [off] ptr mem)) 56036 // cond: l == l2 && l.Uses == 2 && validValAndOff(0,off) && clobber(l) 56037 // result: @l.Block (CMPLconstload {sym} [makeValAndOff(0,off)] ptr mem) 56038 for { 56039 _ = v.Args[1] 56040 l2 := v.Args[0] 56041 l := v.Args[1] 56042 if l.Op != OpAMD64MOVLload { 56043 break 56044 } 56045 off := l.AuxInt 56046 sym := l.Aux 56047 _ = l.Args[1] 56048 ptr := l.Args[0] 56049 mem := l.Args[1] 56050 if !(l == l2 && l.Uses == 2 && validValAndOff(0, off) && clobber(l)) { 56051 break 56052 } 56053 b = l.Block 56054 v0 := b.NewValue0(l.Pos, OpAMD64CMPLconstload, types.TypeFlags) 56055 v.reset(OpCopy) 56056 v.AddArg(v0) 56057 v0.AuxInt = makeValAndOff(0, off) 56058 v0.Aux = sym 56059 v0.AddArg(ptr) 56060 v0.AddArg(mem) 56061 return true 56062 } 56063 return false 56064 } 56065 func rewriteValueAMD64_OpAMD64TESTLconst_0(v *Value) bool { 56066 // match: (TESTLconst [-1] x) 56067 // cond: x.Op != OpAMD64MOVLconst 56068 // result: (TESTL x x) 56069 for { 56070 if v.AuxInt != -1 { 56071 break 56072 } 56073 x := v.Args[0] 56074 if !(x.Op != OpAMD64MOVLconst) { 56075 break 56076 } 56077 v.reset(OpAMD64TESTL) 56078 v.AddArg(x) 56079 v.AddArg(x) 56080 return true 56081 } 56082 return false 56083 } 56084 func rewriteValueAMD64_OpAMD64TESTQ_0(v *Value) bool { 56085 b := v.Block 56086 _ = b 56087 // match: (TESTQ (MOVQconst [c]) x) 56088 // cond: is32Bit(c) 56089 // result: (TESTQconst [c] x) 56090 for { 56091 _ = v.Args[1] 56092 v_0 := v.Args[0] 56093 if v_0.Op != OpAMD64MOVQconst { 56094 break 56095 } 56096 c := v_0.AuxInt 56097 x := v.Args[1] 56098 if !(is32Bit(c)) { 56099 break 56100 } 56101 v.reset(OpAMD64TESTQconst) 56102 v.AuxInt = c 56103 v.AddArg(x) 56104 return true 56105 } 56106 // match: (TESTQ x (MOVQconst [c])) 56107 // cond: is32Bit(c) 56108 // result: (TESTQconst [c] x) 56109 for { 56110 _ = v.Args[1] 56111 x := v.Args[0] 56112 v_1 := v.Args[1] 56113 if v_1.Op != OpAMD64MOVQconst { 56114 break 56115 } 56116 c := v_1.AuxInt 56117 if !(is32Bit(c)) { 56118 break 56119 } 56120 v.reset(OpAMD64TESTQconst) 56121 v.AuxInt = c 56122 v.AddArg(x) 56123 return true 56124 } 56125 // match: (TESTQ l:(MOVQload {sym} [off] ptr mem) l2) 56126 // cond: l == l2 && l.Uses == 2 && validValAndOff(0,off) && clobber(l) 56127 // result: @l.Block (CMPQconstload {sym} [makeValAndOff(0,off)] ptr mem) 56128 for { 56129 _ = v.Args[1] 56130 l := v.Args[0] 56131 if l.Op != OpAMD64MOVQload { 56132 break 56133 } 56134 off := l.AuxInt 56135 sym := l.Aux 56136 _ = l.Args[1] 56137 ptr := l.Args[0] 56138 mem := l.Args[1] 56139 l2 := v.Args[1] 56140 if !(l == l2 && l.Uses == 2 && validValAndOff(0, off) && clobber(l)) { 56141 break 56142 } 56143 b = l.Block 56144 v0 := b.NewValue0(l.Pos, OpAMD64CMPQconstload, types.TypeFlags) 56145 v.reset(OpCopy) 56146 v.AddArg(v0) 56147 v0.AuxInt = makeValAndOff(0, off) 56148 v0.Aux = sym 56149 v0.AddArg(ptr) 56150 v0.AddArg(mem) 56151 return true 56152 } 56153 // match: (TESTQ l2 l:(MOVQload {sym} [off] ptr mem)) 56154 // cond: l == l2 && l.Uses == 2 && validValAndOff(0,off) && clobber(l) 56155 // result: @l.Block (CMPQconstload {sym} [makeValAndOff(0,off)] ptr mem) 56156 for { 56157 _ = v.Args[1] 56158 l2 := v.Args[0] 56159 l := v.Args[1] 56160 if l.Op != OpAMD64MOVQload { 56161 break 56162 } 56163 off := l.AuxInt 56164 sym := l.Aux 56165 _ = l.Args[1] 56166 ptr := l.Args[0] 56167 mem := l.Args[1] 56168 if !(l == l2 && l.Uses == 2 && validValAndOff(0, off) && clobber(l)) { 56169 break 56170 } 56171 b = l.Block 56172 v0 := b.NewValue0(l.Pos, OpAMD64CMPQconstload, types.TypeFlags) 56173 v.reset(OpCopy) 56174 v.AddArg(v0) 56175 v0.AuxInt = makeValAndOff(0, off) 56176 v0.Aux = sym 56177 v0.AddArg(ptr) 56178 v0.AddArg(mem) 56179 return true 56180 } 56181 return false 56182 } 56183 func rewriteValueAMD64_OpAMD64TESTQconst_0(v *Value) bool { 56184 // match: (TESTQconst [-1] x) 56185 // cond: x.Op != OpAMD64MOVQconst 56186 // result: (TESTQ x x) 56187 for { 56188 if v.AuxInt != -1 { 56189 break 56190 } 56191 x := v.Args[0] 56192 if !(x.Op != OpAMD64MOVQconst) { 56193 break 56194 } 56195 v.reset(OpAMD64TESTQ) 56196 v.AddArg(x) 56197 v.AddArg(x) 56198 return true 56199 } 56200 return false 56201 } 56202 func rewriteValueAMD64_OpAMD64TESTW_0(v *Value) bool { 56203 b := v.Block 56204 _ = b 56205 // match: (TESTW (MOVLconst [c]) x) 56206 // cond: 56207 // result: (TESTWconst [c] x) 56208 for { 56209 _ = v.Args[1] 56210 v_0 := v.Args[0] 56211 if v_0.Op != OpAMD64MOVLconst { 56212 break 56213 } 56214 c := v_0.AuxInt 56215 x := v.Args[1] 56216 v.reset(OpAMD64TESTWconst) 56217 v.AuxInt = c 56218 v.AddArg(x) 56219 return true 56220 } 56221 // match: (TESTW x (MOVLconst [c])) 56222 // cond: 56223 // result: (TESTWconst [c] x) 56224 for { 56225 _ = v.Args[1] 56226 x := v.Args[0] 56227 v_1 := v.Args[1] 56228 if v_1.Op != OpAMD64MOVLconst { 56229 break 56230 } 56231 c := v_1.AuxInt 56232 v.reset(OpAMD64TESTWconst) 56233 v.AuxInt = c 56234 v.AddArg(x) 56235 return true 56236 } 56237 // match: (TESTW l:(MOVWload {sym} [off] ptr mem) l2) 56238 // cond: l == l2 && l.Uses == 2 && validValAndOff(0,off) && clobber(l) 56239 // result: @l.Block (CMPWconstload {sym} [makeValAndOff(0,off)] ptr mem) 56240 for { 56241 _ = v.Args[1] 56242 l := v.Args[0] 56243 if l.Op != OpAMD64MOVWload { 56244 break 56245 } 56246 off := l.AuxInt 56247 sym := l.Aux 56248 _ = l.Args[1] 56249 ptr := l.Args[0] 56250 mem := l.Args[1] 56251 l2 := v.Args[1] 56252 if !(l == l2 && l.Uses == 2 && validValAndOff(0, off) && clobber(l)) { 56253 break 56254 } 56255 b = l.Block 56256 v0 := b.NewValue0(l.Pos, OpAMD64CMPWconstload, types.TypeFlags) 56257 v.reset(OpCopy) 56258 v.AddArg(v0) 56259 v0.AuxInt = makeValAndOff(0, off) 56260 v0.Aux = sym 56261 v0.AddArg(ptr) 56262 v0.AddArg(mem) 56263 return true 56264 } 56265 // match: (TESTW l2 l:(MOVWload {sym} [off] ptr mem)) 56266 // cond: l == l2 && l.Uses == 2 && validValAndOff(0,off) && clobber(l) 56267 // result: @l.Block (CMPWconstload {sym} [makeValAndOff(0,off)] ptr mem) 56268 for { 56269 _ = v.Args[1] 56270 l2 := v.Args[0] 56271 l := v.Args[1] 56272 if l.Op != OpAMD64MOVWload { 56273 break 56274 } 56275 off := l.AuxInt 56276 sym := l.Aux 56277 _ = l.Args[1] 56278 ptr := l.Args[0] 56279 mem := l.Args[1] 56280 if !(l == l2 && l.Uses == 2 && validValAndOff(0, off) && clobber(l)) { 56281 break 56282 } 56283 b = l.Block 56284 v0 := b.NewValue0(l.Pos, OpAMD64CMPWconstload, types.TypeFlags) 56285 v.reset(OpCopy) 56286 v.AddArg(v0) 56287 v0.AuxInt = makeValAndOff(0, off) 56288 v0.Aux = sym 56289 v0.AddArg(ptr) 56290 v0.AddArg(mem) 56291 return true 56292 } 56293 return false 56294 } 56295 func rewriteValueAMD64_OpAMD64TESTWconst_0(v *Value) bool { 56296 // match: (TESTWconst [-1] x) 56297 // cond: x.Op != OpAMD64MOVLconst 56298 // result: (TESTW x x) 56299 for { 56300 if v.AuxInt != -1 { 56301 break 56302 } 56303 x := v.Args[0] 56304 if !(x.Op != OpAMD64MOVLconst) { 56305 break 56306 } 56307 v.reset(OpAMD64TESTW) 56308 v.AddArg(x) 56309 v.AddArg(x) 56310 return true 56311 } 56312 return false 56313 } 56314 func rewriteValueAMD64_OpAMD64XADDLlock_0(v *Value) bool { 56315 // match: (XADDLlock [off1] {sym} val (ADDQconst [off2] ptr) mem) 56316 // cond: is32Bit(off1+off2) 56317 // result: (XADDLlock [off1+off2] {sym} val ptr mem) 56318 for { 56319 off1 := v.AuxInt 56320 sym := v.Aux 56321 _ = v.Args[2] 56322 val := v.Args[0] 56323 v_1 := v.Args[1] 56324 if v_1.Op != OpAMD64ADDQconst { 56325 break 56326 } 56327 off2 := v_1.AuxInt 56328 ptr := v_1.Args[0] 56329 mem := v.Args[2] 56330 if !(is32Bit(off1 + off2)) { 56331 break 56332 } 56333 v.reset(OpAMD64XADDLlock) 56334 v.AuxInt = off1 + off2 56335 v.Aux = sym 56336 v.AddArg(val) 56337 v.AddArg(ptr) 56338 v.AddArg(mem) 56339 return true 56340 } 56341 return false 56342 } 56343 func rewriteValueAMD64_OpAMD64XADDQlock_0(v *Value) bool { 56344 // match: (XADDQlock [off1] {sym} val (ADDQconst [off2] ptr) mem) 56345 // cond: is32Bit(off1+off2) 56346 // result: (XADDQlock [off1+off2] {sym} val ptr mem) 56347 for { 56348 off1 := v.AuxInt 56349 sym := v.Aux 56350 _ = v.Args[2] 56351 val := v.Args[0] 56352 v_1 := v.Args[1] 56353 if v_1.Op != OpAMD64ADDQconst { 56354 break 56355 } 56356 off2 := v_1.AuxInt 56357 ptr := v_1.Args[0] 56358 mem := v.Args[2] 56359 if !(is32Bit(off1 + off2)) { 56360 break 56361 } 56362 v.reset(OpAMD64XADDQlock) 56363 v.AuxInt = off1 + off2 56364 v.Aux = sym 56365 v.AddArg(val) 56366 v.AddArg(ptr) 56367 v.AddArg(mem) 56368 return true 56369 } 56370 return false 56371 } 56372 func rewriteValueAMD64_OpAMD64XCHGL_0(v *Value) bool { 56373 // match: (XCHGL [off1] {sym} val (ADDQconst [off2] ptr) mem) 56374 // cond: is32Bit(off1+off2) 56375 // result: (XCHGL [off1+off2] {sym} val ptr mem) 56376 for { 56377 off1 := v.AuxInt 56378 sym := v.Aux 56379 _ = v.Args[2] 56380 val := v.Args[0] 56381 v_1 := v.Args[1] 56382 if v_1.Op != OpAMD64ADDQconst { 56383 break 56384 } 56385 off2 := v_1.AuxInt 56386 ptr := v_1.Args[0] 56387 mem := v.Args[2] 56388 if !(is32Bit(off1 + off2)) { 56389 break 56390 } 56391 v.reset(OpAMD64XCHGL) 56392 v.AuxInt = off1 + off2 56393 v.Aux = sym 56394 v.AddArg(val) 56395 v.AddArg(ptr) 56396 v.AddArg(mem) 56397 return true 56398 } 56399 // match: (XCHGL [off1] {sym1} val (LEAQ [off2] {sym2} ptr) mem) 56400 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) && ptr.Op != OpSB 56401 // result: (XCHGL [off1+off2] {mergeSym(sym1,sym2)} val ptr mem) 56402 for { 56403 off1 := v.AuxInt 56404 sym1 := v.Aux 56405 _ = v.Args[2] 56406 val := v.Args[0] 56407 v_1 := v.Args[1] 56408 if v_1.Op != OpAMD64LEAQ { 56409 break 56410 } 56411 off2 := v_1.AuxInt 56412 sym2 := v_1.Aux 56413 ptr := v_1.Args[0] 56414 mem := v.Args[2] 56415 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2) && ptr.Op != OpSB) { 56416 break 56417 } 56418 v.reset(OpAMD64XCHGL) 56419 v.AuxInt = off1 + off2 56420 v.Aux = mergeSym(sym1, sym2) 56421 v.AddArg(val) 56422 v.AddArg(ptr) 56423 v.AddArg(mem) 56424 return true 56425 } 56426 return false 56427 } 56428 func rewriteValueAMD64_OpAMD64XCHGQ_0(v *Value) bool { 56429 // match: (XCHGQ [off1] {sym} val (ADDQconst [off2] ptr) mem) 56430 // cond: is32Bit(off1+off2) 56431 // result: (XCHGQ [off1+off2] {sym} val ptr mem) 56432 for { 56433 off1 := v.AuxInt 56434 sym := v.Aux 56435 _ = v.Args[2] 56436 val := v.Args[0] 56437 v_1 := v.Args[1] 56438 if v_1.Op != OpAMD64ADDQconst { 56439 break 56440 } 56441 off2 := v_1.AuxInt 56442 ptr := v_1.Args[0] 56443 mem := v.Args[2] 56444 if !(is32Bit(off1 + off2)) { 56445 break 56446 } 56447 v.reset(OpAMD64XCHGQ) 56448 v.AuxInt = off1 + off2 56449 v.Aux = sym 56450 v.AddArg(val) 56451 v.AddArg(ptr) 56452 v.AddArg(mem) 56453 return true 56454 } 56455 // match: (XCHGQ [off1] {sym1} val (LEAQ [off2] {sym2} ptr) mem) 56456 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) && ptr.Op != OpSB 56457 // result: (XCHGQ [off1+off2] {mergeSym(sym1,sym2)} val ptr mem) 56458 for { 56459 off1 := v.AuxInt 56460 sym1 := v.Aux 56461 _ = v.Args[2] 56462 val := v.Args[0] 56463 v_1 := v.Args[1] 56464 if v_1.Op != OpAMD64LEAQ { 56465 break 56466 } 56467 off2 := v_1.AuxInt 56468 sym2 := v_1.Aux 56469 ptr := v_1.Args[0] 56470 mem := v.Args[2] 56471 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2) && ptr.Op != OpSB) { 56472 break 56473 } 56474 v.reset(OpAMD64XCHGQ) 56475 v.AuxInt = off1 + off2 56476 v.Aux = mergeSym(sym1, sym2) 56477 v.AddArg(val) 56478 v.AddArg(ptr) 56479 v.AddArg(mem) 56480 return true 56481 } 56482 return false 56483 } 56484 func rewriteValueAMD64_OpAMD64XORL_0(v *Value) bool { 56485 b := v.Block 56486 _ = b 56487 config := b.Func.Config 56488 _ = config 56489 // match: (XORL (SHLL (MOVLconst [1]) y) x) 56490 // cond: !config.nacl 56491 // result: (BTCL x y) 56492 for { 56493 _ = v.Args[1] 56494 v_0 := v.Args[0] 56495 if v_0.Op != OpAMD64SHLL { 56496 break 56497 } 56498 _ = v_0.Args[1] 56499 v_0_0 := v_0.Args[0] 56500 if v_0_0.Op != OpAMD64MOVLconst { 56501 break 56502 } 56503 if v_0_0.AuxInt != 1 { 56504 break 56505 } 56506 y := v_0.Args[1] 56507 x := v.Args[1] 56508 if !(!config.nacl) { 56509 break 56510 } 56511 v.reset(OpAMD64BTCL) 56512 v.AddArg(x) 56513 v.AddArg(y) 56514 return true 56515 } 56516 // match: (XORL x (SHLL (MOVLconst [1]) y)) 56517 // cond: !config.nacl 56518 // result: (BTCL x y) 56519 for { 56520 _ = v.Args[1] 56521 x := v.Args[0] 56522 v_1 := v.Args[1] 56523 if v_1.Op != OpAMD64SHLL { 56524 break 56525 } 56526 _ = v_1.Args[1] 56527 v_1_0 := v_1.Args[0] 56528 if v_1_0.Op != OpAMD64MOVLconst { 56529 break 56530 } 56531 if v_1_0.AuxInt != 1 { 56532 break 56533 } 56534 y := v_1.Args[1] 56535 if !(!config.nacl) { 56536 break 56537 } 56538 v.reset(OpAMD64BTCL) 56539 v.AddArg(x) 56540 v.AddArg(y) 56541 return true 56542 } 56543 // match: (XORL (MOVLconst [c]) x) 56544 // cond: isUint32PowerOfTwo(c) && uint64(c) >= 128 && !config.nacl 56545 // result: (BTCLconst [log2uint32(c)] x) 56546 for { 56547 _ = v.Args[1] 56548 v_0 := v.Args[0] 56549 if v_0.Op != OpAMD64MOVLconst { 56550 break 56551 } 56552 c := v_0.AuxInt 56553 x := v.Args[1] 56554 if !(isUint32PowerOfTwo(c) && uint64(c) >= 128 && !config.nacl) { 56555 break 56556 } 56557 v.reset(OpAMD64BTCLconst) 56558 v.AuxInt = log2uint32(c) 56559 v.AddArg(x) 56560 return true 56561 } 56562 // match: (XORL x (MOVLconst [c])) 56563 // cond: isUint32PowerOfTwo(c) && uint64(c) >= 128 && !config.nacl 56564 // result: (BTCLconst [log2uint32(c)] x) 56565 for { 56566 _ = v.Args[1] 56567 x := v.Args[0] 56568 v_1 := v.Args[1] 56569 if v_1.Op != OpAMD64MOVLconst { 56570 break 56571 } 56572 c := v_1.AuxInt 56573 if !(isUint32PowerOfTwo(c) && uint64(c) >= 128 && !config.nacl) { 56574 break 56575 } 56576 v.reset(OpAMD64BTCLconst) 56577 v.AuxInt = log2uint32(c) 56578 v.AddArg(x) 56579 return true 56580 } 56581 // match: (XORL x (MOVLconst [c])) 56582 // cond: 56583 // result: (XORLconst [c] x) 56584 for { 56585 _ = v.Args[1] 56586 x := v.Args[0] 56587 v_1 := v.Args[1] 56588 if v_1.Op != OpAMD64MOVLconst { 56589 break 56590 } 56591 c := v_1.AuxInt 56592 v.reset(OpAMD64XORLconst) 56593 v.AuxInt = c 56594 v.AddArg(x) 56595 return true 56596 } 56597 // match: (XORL (MOVLconst [c]) x) 56598 // cond: 56599 // result: (XORLconst [c] x) 56600 for { 56601 _ = v.Args[1] 56602 v_0 := v.Args[0] 56603 if v_0.Op != OpAMD64MOVLconst { 56604 break 56605 } 56606 c := v_0.AuxInt 56607 x := v.Args[1] 56608 v.reset(OpAMD64XORLconst) 56609 v.AuxInt = c 56610 v.AddArg(x) 56611 return true 56612 } 56613 // match: (XORL (SHLLconst x [c]) (SHRLconst x [d])) 56614 // cond: d==32-c 56615 // result: (ROLLconst x [c]) 56616 for { 56617 _ = v.Args[1] 56618 v_0 := v.Args[0] 56619 if v_0.Op != OpAMD64SHLLconst { 56620 break 56621 } 56622 c := v_0.AuxInt 56623 x := v_0.Args[0] 56624 v_1 := v.Args[1] 56625 if v_1.Op != OpAMD64SHRLconst { 56626 break 56627 } 56628 d := v_1.AuxInt 56629 if x != v_1.Args[0] { 56630 break 56631 } 56632 if !(d == 32-c) { 56633 break 56634 } 56635 v.reset(OpAMD64ROLLconst) 56636 v.AuxInt = c 56637 v.AddArg(x) 56638 return true 56639 } 56640 // match: (XORL (SHRLconst x [d]) (SHLLconst x [c])) 56641 // cond: d==32-c 56642 // result: (ROLLconst x [c]) 56643 for { 56644 _ = v.Args[1] 56645 v_0 := v.Args[0] 56646 if v_0.Op != OpAMD64SHRLconst { 56647 break 56648 } 56649 d := v_0.AuxInt 56650 x := v_0.Args[0] 56651 v_1 := v.Args[1] 56652 if v_1.Op != OpAMD64SHLLconst { 56653 break 56654 } 56655 c := v_1.AuxInt 56656 if x != v_1.Args[0] { 56657 break 56658 } 56659 if !(d == 32-c) { 56660 break 56661 } 56662 v.reset(OpAMD64ROLLconst) 56663 v.AuxInt = c 56664 v.AddArg(x) 56665 return true 56666 } 56667 // match: (XORL <t> (SHLLconst x [c]) (SHRWconst x [d])) 56668 // cond: d==16-c && c < 16 && t.Size() == 2 56669 // result: (ROLWconst x [c]) 56670 for { 56671 t := v.Type 56672 _ = v.Args[1] 56673 v_0 := v.Args[0] 56674 if v_0.Op != OpAMD64SHLLconst { 56675 break 56676 } 56677 c := v_0.AuxInt 56678 x := v_0.Args[0] 56679 v_1 := v.Args[1] 56680 if v_1.Op != OpAMD64SHRWconst { 56681 break 56682 } 56683 d := v_1.AuxInt 56684 if x != v_1.Args[0] { 56685 break 56686 } 56687 if !(d == 16-c && c < 16 && t.Size() == 2) { 56688 break 56689 } 56690 v.reset(OpAMD64ROLWconst) 56691 v.AuxInt = c 56692 v.AddArg(x) 56693 return true 56694 } 56695 // match: (XORL <t> (SHRWconst x [d]) (SHLLconst x [c])) 56696 // cond: d==16-c && c < 16 && t.Size() == 2 56697 // result: (ROLWconst x [c]) 56698 for { 56699 t := v.Type 56700 _ = v.Args[1] 56701 v_0 := v.Args[0] 56702 if v_0.Op != OpAMD64SHRWconst { 56703 break 56704 } 56705 d := v_0.AuxInt 56706 x := v_0.Args[0] 56707 v_1 := v.Args[1] 56708 if v_1.Op != OpAMD64SHLLconst { 56709 break 56710 } 56711 c := v_1.AuxInt 56712 if x != v_1.Args[0] { 56713 break 56714 } 56715 if !(d == 16-c && c < 16 && t.Size() == 2) { 56716 break 56717 } 56718 v.reset(OpAMD64ROLWconst) 56719 v.AuxInt = c 56720 v.AddArg(x) 56721 return true 56722 } 56723 return false 56724 } 56725 func rewriteValueAMD64_OpAMD64XORL_10(v *Value) bool { 56726 // match: (XORL <t> (SHLLconst x [c]) (SHRBconst x [d])) 56727 // cond: d==8-c && c < 8 && t.Size() == 1 56728 // result: (ROLBconst x [c]) 56729 for { 56730 t := v.Type 56731 _ = v.Args[1] 56732 v_0 := v.Args[0] 56733 if v_0.Op != OpAMD64SHLLconst { 56734 break 56735 } 56736 c := v_0.AuxInt 56737 x := v_0.Args[0] 56738 v_1 := v.Args[1] 56739 if v_1.Op != OpAMD64SHRBconst { 56740 break 56741 } 56742 d := v_1.AuxInt 56743 if x != v_1.Args[0] { 56744 break 56745 } 56746 if !(d == 8-c && c < 8 && t.Size() == 1) { 56747 break 56748 } 56749 v.reset(OpAMD64ROLBconst) 56750 v.AuxInt = c 56751 v.AddArg(x) 56752 return true 56753 } 56754 // match: (XORL <t> (SHRBconst x [d]) (SHLLconst x [c])) 56755 // cond: d==8-c && c < 8 && t.Size() == 1 56756 // result: (ROLBconst x [c]) 56757 for { 56758 t := v.Type 56759 _ = v.Args[1] 56760 v_0 := v.Args[0] 56761 if v_0.Op != OpAMD64SHRBconst { 56762 break 56763 } 56764 d := v_0.AuxInt 56765 x := v_0.Args[0] 56766 v_1 := v.Args[1] 56767 if v_1.Op != OpAMD64SHLLconst { 56768 break 56769 } 56770 c := v_1.AuxInt 56771 if x != v_1.Args[0] { 56772 break 56773 } 56774 if !(d == 8-c && c < 8 && t.Size() == 1) { 56775 break 56776 } 56777 v.reset(OpAMD64ROLBconst) 56778 v.AuxInt = c 56779 v.AddArg(x) 56780 return true 56781 } 56782 // match: (XORL x x) 56783 // cond: 56784 // result: (MOVLconst [0]) 56785 for { 56786 _ = v.Args[1] 56787 x := v.Args[0] 56788 if x != v.Args[1] { 56789 break 56790 } 56791 v.reset(OpAMD64MOVLconst) 56792 v.AuxInt = 0 56793 return true 56794 } 56795 // match: (XORL x l:(MOVLload [off] {sym} ptr mem)) 56796 // cond: canMergeLoadClobber(v, l, x) && clobber(l) 56797 // result: (XORLload x [off] {sym} ptr mem) 56798 for { 56799 _ = v.Args[1] 56800 x := v.Args[0] 56801 l := v.Args[1] 56802 if l.Op != OpAMD64MOVLload { 56803 break 56804 } 56805 off := l.AuxInt 56806 sym := l.Aux 56807 _ = l.Args[1] 56808 ptr := l.Args[0] 56809 mem := l.Args[1] 56810 if !(canMergeLoadClobber(v, l, x) && clobber(l)) { 56811 break 56812 } 56813 v.reset(OpAMD64XORLload) 56814 v.AuxInt = off 56815 v.Aux = sym 56816 v.AddArg(x) 56817 v.AddArg(ptr) 56818 v.AddArg(mem) 56819 return true 56820 } 56821 // match: (XORL l:(MOVLload [off] {sym} ptr mem) x) 56822 // cond: canMergeLoadClobber(v, l, x) && clobber(l) 56823 // result: (XORLload x [off] {sym} ptr mem) 56824 for { 56825 _ = v.Args[1] 56826 l := v.Args[0] 56827 if l.Op != OpAMD64MOVLload { 56828 break 56829 } 56830 off := l.AuxInt 56831 sym := l.Aux 56832 _ = l.Args[1] 56833 ptr := l.Args[0] 56834 mem := l.Args[1] 56835 x := v.Args[1] 56836 if !(canMergeLoadClobber(v, l, x) && clobber(l)) { 56837 break 56838 } 56839 v.reset(OpAMD64XORLload) 56840 v.AuxInt = off 56841 v.Aux = sym 56842 v.AddArg(x) 56843 v.AddArg(ptr) 56844 v.AddArg(mem) 56845 return true 56846 } 56847 return false 56848 } 56849 func rewriteValueAMD64_OpAMD64XORLconst_0(v *Value) bool { 56850 b := v.Block 56851 _ = b 56852 config := b.Func.Config 56853 _ = config 56854 // match: (XORLconst [c] x) 56855 // cond: isUint32PowerOfTwo(c) && uint64(c) >= 128 && !config.nacl 56856 // result: (BTCLconst [log2uint32(c)] x) 56857 for { 56858 c := v.AuxInt 56859 x := v.Args[0] 56860 if !(isUint32PowerOfTwo(c) && uint64(c) >= 128 && !config.nacl) { 56861 break 56862 } 56863 v.reset(OpAMD64BTCLconst) 56864 v.AuxInt = log2uint32(c) 56865 v.AddArg(x) 56866 return true 56867 } 56868 // match: (XORLconst [1] (SETNE x)) 56869 // cond: 56870 // result: (SETEQ x) 56871 for { 56872 if v.AuxInt != 1 { 56873 break 56874 } 56875 v_0 := v.Args[0] 56876 if v_0.Op != OpAMD64SETNE { 56877 break 56878 } 56879 x := v_0.Args[0] 56880 v.reset(OpAMD64SETEQ) 56881 v.AddArg(x) 56882 return true 56883 } 56884 // match: (XORLconst [1] (SETEQ x)) 56885 // cond: 56886 // result: (SETNE x) 56887 for { 56888 if v.AuxInt != 1 { 56889 break 56890 } 56891 v_0 := v.Args[0] 56892 if v_0.Op != OpAMD64SETEQ { 56893 break 56894 } 56895 x := v_0.Args[0] 56896 v.reset(OpAMD64SETNE) 56897 v.AddArg(x) 56898 return true 56899 } 56900 // match: (XORLconst [1] (SETL x)) 56901 // cond: 56902 // result: (SETGE x) 56903 for { 56904 if v.AuxInt != 1 { 56905 break 56906 } 56907 v_0 := v.Args[0] 56908 if v_0.Op != OpAMD64SETL { 56909 break 56910 } 56911 x := v_0.Args[0] 56912 v.reset(OpAMD64SETGE) 56913 v.AddArg(x) 56914 return true 56915 } 56916 // match: (XORLconst [1] (SETGE x)) 56917 // cond: 56918 // result: (SETL x) 56919 for { 56920 if v.AuxInt != 1 { 56921 break 56922 } 56923 v_0 := v.Args[0] 56924 if v_0.Op != OpAMD64SETGE { 56925 break 56926 } 56927 x := v_0.Args[0] 56928 v.reset(OpAMD64SETL) 56929 v.AddArg(x) 56930 return true 56931 } 56932 // match: (XORLconst [1] (SETLE x)) 56933 // cond: 56934 // result: (SETG x) 56935 for { 56936 if v.AuxInt != 1 { 56937 break 56938 } 56939 v_0 := v.Args[0] 56940 if v_0.Op != OpAMD64SETLE { 56941 break 56942 } 56943 x := v_0.Args[0] 56944 v.reset(OpAMD64SETG) 56945 v.AddArg(x) 56946 return true 56947 } 56948 // match: (XORLconst [1] (SETG x)) 56949 // cond: 56950 // result: (SETLE x) 56951 for { 56952 if v.AuxInt != 1 { 56953 break 56954 } 56955 v_0 := v.Args[0] 56956 if v_0.Op != OpAMD64SETG { 56957 break 56958 } 56959 x := v_0.Args[0] 56960 v.reset(OpAMD64SETLE) 56961 v.AddArg(x) 56962 return true 56963 } 56964 // match: (XORLconst [1] (SETB x)) 56965 // cond: 56966 // result: (SETAE x) 56967 for { 56968 if v.AuxInt != 1 { 56969 break 56970 } 56971 v_0 := v.Args[0] 56972 if v_0.Op != OpAMD64SETB { 56973 break 56974 } 56975 x := v_0.Args[0] 56976 v.reset(OpAMD64SETAE) 56977 v.AddArg(x) 56978 return true 56979 } 56980 // match: (XORLconst [1] (SETAE x)) 56981 // cond: 56982 // result: (SETB x) 56983 for { 56984 if v.AuxInt != 1 { 56985 break 56986 } 56987 v_0 := v.Args[0] 56988 if v_0.Op != OpAMD64SETAE { 56989 break 56990 } 56991 x := v_0.Args[0] 56992 v.reset(OpAMD64SETB) 56993 v.AddArg(x) 56994 return true 56995 } 56996 // match: (XORLconst [1] (SETBE x)) 56997 // cond: 56998 // result: (SETA x) 56999 for { 57000 if v.AuxInt != 1 { 57001 break 57002 } 57003 v_0 := v.Args[0] 57004 if v_0.Op != OpAMD64SETBE { 57005 break 57006 } 57007 x := v_0.Args[0] 57008 v.reset(OpAMD64SETA) 57009 v.AddArg(x) 57010 return true 57011 } 57012 return false 57013 } 57014 func rewriteValueAMD64_OpAMD64XORLconst_10(v *Value) bool { 57015 // match: (XORLconst [1] (SETA x)) 57016 // cond: 57017 // result: (SETBE x) 57018 for { 57019 if v.AuxInt != 1 { 57020 break 57021 } 57022 v_0 := v.Args[0] 57023 if v_0.Op != OpAMD64SETA { 57024 break 57025 } 57026 x := v_0.Args[0] 57027 v.reset(OpAMD64SETBE) 57028 v.AddArg(x) 57029 return true 57030 } 57031 // match: (XORLconst [c] (XORLconst [d] x)) 57032 // cond: 57033 // result: (XORLconst [c ^ d] x) 57034 for { 57035 c := v.AuxInt 57036 v_0 := v.Args[0] 57037 if v_0.Op != OpAMD64XORLconst { 57038 break 57039 } 57040 d := v_0.AuxInt 57041 x := v_0.Args[0] 57042 v.reset(OpAMD64XORLconst) 57043 v.AuxInt = c ^ d 57044 v.AddArg(x) 57045 return true 57046 } 57047 // match: (XORLconst [c] (BTCLconst [d] x)) 57048 // cond: 57049 // result: (XORLconst [c ^ 1<<uint32(d)] x) 57050 for { 57051 c := v.AuxInt 57052 v_0 := v.Args[0] 57053 if v_0.Op != OpAMD64BTCLconst { 57054 break 57055 } 57056 d := v_0.AuxInt 57057 x := v_0.Args[0] 57058 v.reset(OpAMD64XORLconst) 57059 v.AuxInt = c ^ 1<<uint32(d) 57060 v.AddArg(x) 57061 return true 57062 } 57063 // match: (XORLconst [c] x) 57064 // cond: int32(c)==0 57065 // result: x 57066 for { 57067 c := v.AuxInt 57068 x := v.Args[0] 57069 if !(int32(c) == 0) { 57070 break 57071 } 57072 v.reset(OpCopy) 57073 v.Type = x.Type 57074 v.AddArg(x) 57075 return true 57076 } 57077 // match: (XORLconst [c] (MOVLconst [d])) 57078 // cond: 57079 // result: (MOVLconst [c^d]) 57080 for { 57081 c := v.AuxInt 57082 v_0 := v.Args[0] 57083 if v_0.Op != OpAMD64MOVLconst { 57084 break 57085 } 57086 d := v_0.AuxInt 57087 v.reset(OpAMD64MOVLconst) 57088 v.AuxInt = c ^ d 57089 return true 57090 } 57091 return false 57092 } 57093 func rewriteValueAMD64_OpAMD64XORLconstmodify_0(v *Value) bool { 57094 // match: (XORLconstmodify [valoff1] {sym} (ADDQconst [off2] base) mem) 57095 // cond: ValAndOff(valoff1).canAdd(off2) 57096 // result: (XORLconstmodify [ValAndOff(valoff1).add(off2)] {sym} base mem) 57097 for { 57098 valoff1 := v.AuxInt 57099 sym := v.Aux 57100 _ = v.Args[1] 57101 v_0 := v.Args[0] 57102 if v_0.Op != OpAMD64ADDQconst { 57103 break 57104 } 57105 off2 := v_0.AuxInt 57106 base := v_0.Args[0] 57107 mem := v.Args[1] 57108 if !(ValAndOff(valoff1).canAdd(off2)) { 57109 break 57110 } 57111 v.reset(OpAMD64XORLconstmodify) 57112 v.AuxInt = ValAndOff(valoff1).add(off2) 57113 v.Aux = sym 57114 v.AddArg(base) 57115 v.AddArg(mem) 57116 return true 57117 } 57118 // match: (XORLconstmodify [valoff1] {sym1} (LEAQ [off2] {sym2} base) mem) 57119 // cond: ValAndOff(valoff1).canAdd(off2) && canMergeSym(sym1, sym2) 57120 // result: (XORLconstmodify [ValAndOff(valoff1).add(off2)] {mergeSym(sym1,sym2)} base mem) 57121 for { 57122 valoff1 := v.AuxInt 57123 sym1 := v.Aux 57124 _ = v.Args[1] 57125 v_0 := v.Args[0] 57126 if v_0.Op != OpAMD64LEAQ { 57127 break 57128 } 57129 off2 := v_0.AuxInt 57130 sym2 := v_0.Aux 57131 base := v_0.Args[0] 57132 mem := v.Args[1] 57133 if !(ValAndOff(valoff1).canAdd(off2) && canMergeSym(sym1, sym2)) { 57134 break 57135 } 57136 v.reset(OpAMD64XORLconstmodify) 57137 v.AuxInt = ValAndOff(valoff1).add(off2) 57138 v.Aux = mergeSym(sym1, sym2) 57139 v.AddArg(base) 57140 v.AddArg(mem) 57141 return true 57142 } 57143 return false 57144 } 57145 func rewriteValueAMD64_OpAMD64XORLload_0(v *Value) bool { 57146 b := v.Block 57147 _ = b 57148 typ := &b.Func.Config.Types 57149 _ = typ 57150 // match: (XORLload [off1] {sym} val (ADDQconst [off2] base) mem) 57151 // cond: is32Bit(off1+off2) 57152 // result: (XORLload [off1+off2] {sym} val base mem) 57153 for { 57154 off1 := v.AuxInt 57155 sym := v.Aux 57156 _ = v.Args[2] 57157 val := v.Args[0] 57158 v_1 := v.Args[1] 57159 if v_1.Op != OpAMD64ADDQconst { 57160 break 57161 } 57162 off2 := v_1.AuxInt 57163 base := v_1.Args[0] 57164 mem := v.Args[2] 57165 if !(is32Bit(off1 + off2)) { 57166 break 57167 } 57168 v.reset(OpAMD64XORLload) 57169 v.AuxInt = off1 + off2 57170 v.Aux = sym 57171 v.AddArg(val) 57172 v.AddArg(base) 57173 v.AddArg(mem) 57174 return true 57175 } 57176 // match: (XORLload [off1] {sym1} val (LEAQ [off2] {sym2} base) mem) 57177 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 57178 // result: (XORLload [off1+off2] {mergeSym(sym1,sym2)} val base mem) 57179 for { 57180 off1 := v.AuxInt 57181 sym1 := v.Aux 57182 _ = v.Args[2] 57183 val := v.Args[0] 57184 v_1 := v.Args[1] 57185 if v_1.Op != OpAMD64LEAQ { 57186 break 57187 } 57188 off2 := v_1.AuxInt 57189 sym2 := v_1.Aux 57190 base := v_1.Args[0] 57191 mem := v.Args[2] 57192 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 57193 break 57194 } 57195 v.reset(OpAMD64XORLload) 57196 v.AuxInt = off1 + off2 57197 v.Aux = mergeSym(sym1, sym2) 57198 v.AddArg(val) 57199 v.AddArg(base) 57200 v.AddArg(mem) 57201 return true 57202 } 57203 // match: (XORLload x [off] {sym} ptr (MOVSSstore [off] {sym} ptr y _)) 57204 // cond: 57205 // result: (XORL x (MOVLf2i y)) 57206 for { 57207 off := v.AuxInt 57208 sym := v.Aux 57209 _ = v.Args[2] 57210 x := v.Args[0] 57211 ptr := v.Args[1] 57212 v_2 := v.Args[2] 57213 if v_2.Op != OpAMD64MOVSSstore { 57214 break 57215 } 57216 if v_2.AuxInt != off { 57217 break 57218 } 57219 if v_2.Aux != sym { 57220 break 57221 } 57222 _ = v_2.Args[2] 57223 if ptr != v_2.Args[0] { 57224 break 57225 } 57226 y := v_2.Args[1] 57227 v.reset(OpAMD64XORL) 57228 v.AddArg(x) 57229 v0 := b.NewValue0(v_2.Pos, OpAMD64MOVLf2i, typ.UInt32) 57230 v0.AddArg(y) 57231 v.AddArg(v0) 57232 return true 57233 } 57234 return false 57235 } 57236 func rewriteValueAMD64_OpAMD64XORLmodify_0(v *Value) bool { 57237 // match: (XORLmodify [off1] {sym} (ADDQconst [off2] base) val mem) 57238 // cond: is32Bit(off1+off2) 57239 // result: (XORLmodify [off1+off2] {sym} base val mem) 57240 for { 57241 off1 := v.AuxInt 57242 sym := v.Aux 57243 _ = v.Args[2] 57244 v_0 := v.Args[0] 57245 if v_0.Op != OpAMD64ADDQconst { 57246 break 57247 } 57248 off2 := v_0.AuxInt 57249 base := v_0.Args[0] 57250 val := v.Args[1] 57251 mem := v.Args[2] 57252 if !(is32Bit(off1 + off2)) { 57253 break 57254 } 57255 v.reset(OpAMD64XORLmodify) 57256 v.AuxInt = off1 + off2 57257 v.Aux = sym 57258 v.AddArg(base) 57259 v.AddArg(val) 57260 v.AddArg(mem) 57261 return true 57262 } 57263 // match: (XORLmodify [off1] {sym1} (LEAQ [off2] {sym2} base) val mem) 57264 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 57265 // result: (XORLmodify [off1+off2] {mergeSym(sym1,sym2)} base val mem) 57266 for { 57267 off1 := v.AuxInt 57268 sym1 := v.Aux 57269 _ = v.Args[2] 57270 v_0 := v.Args[0] 57271 if v_0.Op != OpAMD64LEAQ { 57272 break 57273 } 57274 off2 := v_0.AuxInt 57275 sym2 := v_0.Aux 57276 base := v_0.Args[0] 57277 val := v.Args[1] 57278 mem := v.Args[2] 57279 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 57280 break 57281 } 57282 v.reset(OpAMD64XORLmodify) 57283 v.AuxInt = off1 + off2 57284 v.Aux = mergeSym(sym1, sym2) 57285 v.AddArg(base) 57286 v.AddArg(val) 57287 v.AddArg(mem) 57288 return true 57289 } 57290 return false 57291 } 57292 func rewriteValueAMD64_OpAMD64XORQ_0(v *Value) bool { 57293 b := v.Block 57294 _ = b 57295 config := b.Func.Config 57296 _ = config 57297 // match: (XORQ (SHLQ (MOVQconst [1]) y) x) 57298 // cond: !config.nacl 57299 // result: (BTCQ x y) 57300 for { 57301 _ = v.Args[1] 57302 v_0 := v.Args[0] 57303 if v_0.Op != OpAMD64SHLQ { 57304 break 57305 } 57306 _ = v_0.Args[1] 57307 v_0_0 := v_0.Args[0] 57308 if v_0_0.Op != OpAMD64MOVQconst { 57309 break 57310 } 57311 if v_0_0.AuxInt != 1 { 57312 break 57313 } 57314 y := v_0.Args[1] 57315 x := v.Args[1] 57316 if !(!config.nacl) { 57317 break 57318 } 57319 v.reset(OpAMD64BTCQ) 57320 v.AddArg(x) 57321 v.AddArg(y) 57322 return true 57323 } 57324 // match: (XORQ x (SHLQ (MOVQconst [1]) y)) 57325 // cond: !config.nacl 57326 // result: (BTCQ x y) 57327 for { 57328 _ = v.Args[1] 57329 x := v.Args[0] 57330 v_1 := v.Args[1] 57331 if v_1.Op != OpAMD64SHLQ { 57332 break 57333 } 57334 _ = v_1.Args[1] 57335 v_1_0 := v_1.Args[0] 57336 if v_1_0.Op != OpAMD64MOVQconst { 57337 break 57338 } 57339 if v_1_0.AuxInt != 1 { 57340 break 57341 } 57342 y := v_1.Args[1] 57343 if !(!config.nacl) { 57344 break 57345 } 57346 v.reset(OpAMD64BTCQ) 57347 v.AddArg(x) 57348 v.AddArg(y) 57349 return true 57350 } 57351 // match: (XORQ (MOVQconst [c]) x) 57352 // cond: isUint64PowerOfTwo(c) && uint64(c) >= 128 && !config.nacl 57353 // result: (BTCQconst [log2(c)] x) 57354 for { 57355 _ = v.Args[1] 57356 v_0 := v.Args[0] 57357 if v_0.Op != OpAMD64MOVQconst { 57358 break 57359 } 57360 c := v_0.AuxInt 57361 x := v.Args[1] 57362 if !(isUint64PowerOfTwo(c) && uint64(c) >= 128 && !config.nacl) { 57363 break 57364 } 57365 v.reset(OpAMD64BTCQconst) 57366 v.AuxInt = log2(c) 57367 v.AddArg(x) 57368 return true 57369 } 57370 // match: (XORQ x (MOVQconst [c])) 57371 // cond: isUint64PowerOfTwo(c) && uint64(c) >= 128 && !config.nacl 57372 // result: (BTCQconst [log2(c)] x) 57373 for { 57374 _ = v.Args[1] 57375 x := v.Args[0] 57376 v_1 := v.Args[1] 57377 if v_1.Op != OpAMD64MOVQconst { 57378 break 57379 } 57380 c := v_1.AuxInt 57381 if !(isUint64PowerOfTwo(c) && uint64(c) >= 128 && !config.nacl) { 57382 break 57383 } 57384 v.reset(OpAMD64BTCQconst) 57385 v.AuxInt = log2(c) 57386 v.AddArg(x) 57387 return true 57388 } 57389 // match: (XORQ x (MOVQconst [c])) 57390 // cond: is32Bit(c) 57391 // result: (XORQconst [c] x) 57392 for { 57393 _ = v.Args[1] 57394 x := v.Args[0] 57395 v_1 := v.Args[1] 57396 if v_1.Op != OpAMD64MOVQconst { 57397 break 57398 } 57399 c := v_1.AuxInt 57400 if !(is32Bit(c)) { 57401 break 57402 } 57403 v.reset(OpAMD64XORQconst) 57404 v.AuxInt = c 57405 v.AddArg(x) 57406 return true 57407 } 57408 // match: (XORQ (MOVQconst [c]) x) 57409 // cond: is32Bit(c) 57410 // result: (XORQconst [c] x) 57411 for { 57412 _ = v.Args[1] 57413 v_0 := v.Args[0] 57414 if v_0.Op != OpAMD64MOVQconst { 57415 break 57416 } 57417 c := v_0.AuxInt 57418 x := v.Args[1] 57419 if !(is32Bit(c)) { 57420 break 57421 } 57422 v.reset(OpAMD64XORQconst) 57423 v.AuxInt = c 57424 v.AddArg(x) 57425 return true 57426 } 57427 // match: (XORQ (SHLQconst x [c]) (SHRQconst x [d])) 57428 // cond: d==64-c 57429 // result: (ROLQconst x [c]) 57430 for { 57431 _ = v.Args[1] 57432 v_0 := v.Args[0] 57433 if v_0.Op != OpAMD64SHLQconst { 57434 break 57435 } 57436 c := v_0.AuxInt 57437 x := v_0.Args[0] 57438 v_1 := v.Args[1] 57439 if v_1.Op != OpAMD64SHRQconst { 57440 break 57441 } 57442 d := v_1.AuxInt 57443 if x != v_1.Args[0] { 57444 break 57445 } 57446 if !(d == 64-c) { 57447 break 57448 } 57449 v.reset(OpAMD64ROLQconst) 57450 v.AuxInt = c 57451 v.AddArg(x) 57452 return true 57453 } 57454 // match: (XORQ (SHRQconst x [d]) (SHLQconst x [c])) 57455 // cond: d==64-c 57456 // result: (ROLQconst x [c]) 57457 for { 57458 _ = v.Args[1] 57459 v_0 := v.Args[0] 57460 if v_0.Op != OpAMD64SHRQconst { 57461 break 57462 } 57463 d := v_0.AuxInt 57464 x := v_0.Args[0] 57465 v_1 := v.Args[1] 57466 if v_1.Op != OpAMD64SHLQconst { 57467 break 57468 } 57469 c := v_1.AuxInt 57470 if x != v_1.Args[0] { 57471 break 57472 } 57473 if !(d == 64-c) { 57474 break 57475 } 57476 v.reset(OpAMD64ROLQconst) 57477 v.AuxInt = c 57478 v.AddArg(x) 57479 return true 57480 } 57481 // match: (XORQ x x) 57482 // cond: 57483 // result: (MOVQconst [0]) 57484 for { 57485 _ = v.Args[1] 57486 x := v.Args[0] 57487 if x != v.Args[1] { 57488 break 57489 } 57490 v.reset(OpAMD64MOVQconst) 57491 v.AuxInt = 0 57492 return true 57493 } 57494 // match: (XORQ x l:(MOVQload [off] {sym} ptr mem)) 57495 // cond: canMergeLoadClobber(v, l, x) && clobber(l) 57496 // result: (XORQload x [off] {sym} ptr mem) 57497 for { 57498 _ = v.Args[1] 57499 x := v.Args[0] 57500 l := v.Args[1] 57501 if l.Op != OpAMD64MOVQload { 57502 break 57503 } 57504 off := l.AuxInt 57505 sym := l.Aux 57506 _ = l.Args[1] 57507 ptr := l.Args[0] 57508 mem := l.Args[1] 57509 if !(canMergeLoadClobber(v, l, x) && clobber(l)) { 57510 break 57511 } 57512 v.reset(OpAMD64XORQload) 57513 v.AuxInt = off 57514 v.Aux = sym 57515 v.AddArg(x) 57516 v.AddArg(ptr) 57517 v.AddArg(mem) 57518 return true 57519 } 57520 return false 57521 } 57522 func rewriteValueAMD64_OpAMD64XORQ_10(v *Value) bool { 57523 // match: (XORQ l:(MOVQload [off] {sym} ptr mem) x) 57524 // cond: canMergeLoadClobber(v, l, x) && clobber(l) 57525 // result: (XORQload x [off] {sym} ptr mem) 57526 for { 57527 _ = v.Args[1] 57528 l := v.Args[0] 57529 if l.Op != OpAMD64MOVQload { 57530 break 57531 } 57532 off := l.AuxInt 57533 sym := l.Aux 57534 _ = l.Args[1] 57535 ptr := l.Args[0] 57536 mem := l.Args[1] 57537 x := v.Args[1] 57538 if !(canMergeLoadClobber(v, l, x) && clobber(l)) { 57539 break 57540 } 57541 v.reset(OpAMD64XORQload) 57542 v.AuxInt = off 57543 v.Aux = sym 57544 v.AddArg(x) 57545 v.AddArg(ptr) 57546 v.AddArg(mem) 57547 return true 57548 } 57549 return false 57550 } 57551 func rewriteValueAMD64_OpAMD64XORQconst_0(v *Value) bool { 57552 b := v.Block 57553 _ = b 57554 config := b.Func.Config 57555 _ = config 57556 // match: (XORQconst [c] x) 57557 // cond: isUint64PowerOfTwo(c) && uint64(c) >= 128 && !config.nacl 57558 // result: (BTCQconst [log2(c)] x) 57559 for { 57560 c := v.AuxInt 57561 x := v.Args[0] 57562 if !(isUint64PowerOfTwo(c) && uint64(c) >= 128 && !config.nacl) { 57563 break 57564 } 57565 v.reset(OpAMD64BTCQconst) 57566 v.AuxInt = log2(c) 57567 v.AddArg(x) 57568 return true 57569 } 57570 // match: (XORQconst [c] (XORQconst [d] x)) 57571 // cond: 57572 // result: (XORQconst [c ^ d] x) 57573 for { 57574 c := v.AuxInt 57575 v_0 := v.Args[0] 57576 if v_0.Op != OpAMD64XORQconst { 57577 break 57578 } 57579 d := v_0.AuxInt 57580 x := v_0.Args[0] 57581 v.reset(OpAMD64XORQconst) 57582 v.AuxInt = c ^ d 57583 v.AddArg(x) 57584 return true 57585 } 57586 // match: (XORQconst [c] (BTCQconst [d] x)) 57587 // cond: 57588 // result: (XORQconst [c ^ 1<<uint32(d)] x) 57589 for { 57590 c := v.AuxInt 57591 v_0 := v.Args[0] 57592 if v_0.Op != OpAMD64BTCQconst { 57593 break 57594 } 57595 d := v_0.AuxInt 57596 x := v_0.Args[0] 57597 v.reset(OpAMD64XORQconst) 57598 v.AuxInt = c ^ 1<<uint32(d) 57599 v.AddArg(x) 57600 return true 57601 } 57602 // match: (XORQconst [0] x) 57603 // cond: 57604 // result: x 57605 for { 57606 if v.AuxInt != 0 { 57607 break 57608 } 57609 x := v.Args[0] 57610 v.reset(OpCopy) 57611 v.Type = x.Type 57612 v.AddArg(x) 57613 return true 57614 } 57615 // match: (XORQconst [c] (MOVQconst [d])) 57616 // cond: 57617 // result: (MOVQconst [c^d]) 57618 for { 57619 c := v.AuxInt 57620 v_0 := v.Args[0] 57621 if v_0.Op != OpAMD64MOVQconst { 57622 break 57623 } 57624 d := v_0.AuxInt 57625 v.reset(OpAMD64MOVQconst) 57626 v.AuxInt = c ^ d 57627 return true 57628 } 57629 return false 57630 } 57631 func rewriteValueAMD64_OpAMD64XORQconstmodify_0(v *Value) bool { 57632 // match: (XORQconstmodify [valoff1] {sym} (ADDQconst [off2] base) mem) 57633 // cond: ValAndOff(valoff1).canAdd(off2) 57634 // result: (XORQconstmodify [ValAndOff(valoff1).add(off2)] {sym} base mem) 57635 for { 57636 valoff1 := v.AuxInt 57637 sym := v.Aux 57638 _ = v.Args[1] 57639 v_0 := v.Args[0] 57640 if v_0.Op != OpAMD64ADDQconst { 57641 break 57642 } 57643 off2 := v_0.AuxInt 57644 base := v_0.Args[0] 57645 mem := v.Args[1] 57646 if !(ValAndOff(valoff1).canAdd(off2)) { 57647 break 57648 } 57649 v.reset(OpAMD64XORQconstmodify) 57650 v.AuxInt = ValAndOff(valoff1).add(off2) 57651 v.Aux = sym 57652 v.AddArg(base) 57653 v.AddArg(mem) 57654 return true 57655 } 57656 // match: (XORQconstmodify [valoff1] {sym1} (LEAQ [off2] {sym2} base) mem) 57657 // cond: ValAndOff(valoff1).canAdd(off2) && canMergeSym(sym1, sym2) 57658 // result: (XORQconstmodify [ValAndOff(valoff1).add(off2)] {mergeSym(sym1,sym2)} base mem) 57659 for { 57660 valoff1 := v.AuxInt 57661 sym1 := v.Aux 57662 _ = v.Args[1] 57663 v_0 := v.Args[0] 57664 if v_0.Op != OpAMD64LEAQ { 57665 break 57666 } 57667 off2 := v_0.AuxInt 57668 sym2 := v_0.Aux 57669 base := v_0.Args[0] 57670 mem := v.Args[1] 57671 if !(ValAndOff(valoff1).canAdd(off2) && canMergeSym(sym1, sym2)) { 57672 break 57673 } 57674 v.reset(OpAMD64XORQconstmodify) 57675 v.AuxInt = ValAndOff(valoff1).add(off2) 57676 v.Aux = mergeSym(sym1, sym2) 57677 v.AddArg(base) 57678 v.AddArg(mem) 57679 return true 57680 } 57681 return false 57682 } 57683 func rewriteValueAMD64_OpAMD64XORQload_0(v *Value) bool { 57684 b := v.Block 57685 _ = b 57686 typ := &b.Func.Config.Types 57687 _ = typ 57688 // match: (XORQload [off1] {sym} val (ADDQconst [off2] base) mem) 57689 // cond: is32Bit(off1+off2) 57690 // result: (XORQload [off1+off2] {sym} val base mem) 57691 for { 57692 off1 := v.AuxInt 57693 sym := v.Aux 57694 _ = v.Args[2] 57695 val := v.Args[0] 57696 v_1 := v.Args[1] 57697 if v_1.Op != OpAMD64ADDQconst { 57698 break 57699 } 57700 off2 := v_1.AuxInt 57701 base := v_1.Args[0] 57702 mem := v.Args[2] 57703 if !(is32Bit(off1 + off2)) { 57704 break 57705 } 57706 v.reset(OpAMD64XORQload) 57707 v.AuxInt = off1 + off2 57708 v.Aux = sym 57709 v.AddArg(val) 57710 v.AddArg(base) 57711 v.AddArg(mem) 57712 return true 57713 } 57714 // match: (XORQload [off1] {sym1} val (LEAQ [off2] {sym2} base) mem) 57715 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 57716 // result: (XORQload [off1+off2] {mergeSym(sym1,sym2)} val base mem) 57717 for { 57718 off1 := v.AuxInt 57719 sym1 := v.Aux 57720 _ = v.Args[2] 57721 val := v.Args[0] 57722 v_1 := v.Args[1] 57723 if v_1.Op != OpAMD64LEAQ { 57724 break 57725 } 57726 off2 := v_1.AuxInt 57727 sym2 := v_1.Aux 57728 base := v_1.Args[0] 57729 mem := v.Args[2] 57730 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 57731 break 57732 } 57733 v.reset(OpAMD64XORQload) 57734 v.AuxInt = off1 + off2 57735 v.Aux = mergeSym(sym1, sym2) 57736 v.AddArg(val) 57737 v.AddArg(base) 57738 v.AddArg(mem) 57739 return true 57740 } 57741 // match: (XORQload x [off] {sym} ptr (MOVSDstore [off] {sym} ptr y _)) 57742 // cond: 57743 // result: (XORQ x (MOVQf2i y)) 57744 for { 57745 off := v.AuxInt 57746 sym := v.Aux 57747 _ = v.Args[2] 57748 x := v.Args[0] 57749 ptr := v.Args[1] 57750 v_2 := v.Args[2] 57751 if v_2.Op != OpAMD64MOVSDstore { 57752 break 57753 } 57754 if v_2.AuxInt != off { 57755 break 57756 } 57757 if v_2.Aux != sym { 57758 break 57759 } 57760 _ = v_2.Args[2] 57761 if ptr != v_2.Args[0] { 57762 break 57763 } 57764 y := v_2.Args[1] 57765 v.reset(OpAMD64XORQ) 57766 v.AddArg(x) 57767 v0 := b.NewValue0(v_2.Pos, OpAMD64MOVQf2i, typ.UInt64) 57768 v0.AddArg(y) 57769 v.AddArg(v0) 57770 return true 57771 } 57772 return false 57773 } 57774 func rewriteValueAMD64_OpAMD64XORQmodify_0(v *Value) bool { 57775 // match: (XORQmodify [off1] {sym} (ADDQconst [off2] base) val mem) 57776 // cond: is32Bit(off1+off2) 57777 // result: (XORQmodify [off1+off2] {sym} base val mem) 57778 for { 57779 off1 := v.AuxInt 57780 sym := v.Aux 57781 _ = v.Args[2] 57782 v_0 := v.Args[0] 57783 if v_0.Op != OpAMD64ADDQconst { 57784 break 57785 } 57786 off2 := v_0.AuxInt 57787 base := v_0.Args[0] 57788 val := v.Args[1] 57789 mem := v.Args[2] 57790 if !(is32Bit(off1 + off2)) { 57791 break 57792 } 57793 v.reset(OpAMD64XORQmodify) 57794 v.AuxInt = off1 + off2 57795 v.Aux = sym 57796 v.AddArg(base) 57797 v.AddArg(val) 57798 v.AddArg(mem) 57799 return true 57800 } 57801 // match: (XORQmodify [off1] {sym1} (LEAQ [off2] {sym2} base) val mem) 57802 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 57803 // result: (XORQmodify [off1+off2] {mergeSym(sym1,sym2)} base val mem) 57804 for { 57805 off1 := v.AuxInt 57806 sym1 := v.Aux 57807 _ = v.Args[2] 57808 v_0 := v.Args[0] 57809 if v_0.Op != OpAMD64LEAQ { 57810 break 57811 } 57812 off2 := v_0.AuxInt 57813 sym2 := v_0.Aux 57814 base := v_0.Args[0] 57815 val := v.Args[1] 57816 mem := v.Args[2] 57817 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 57818 break 57819 } 57820 v.reset(OpAMD64XORQmodify) 57821 v.AuxInt = off1 + off2 57822 v.Aux = mergeSym(sym1, sym2) 57823 v.AddArg(base) 57824 v.AddArg(val) 57825 v.AddArg(mem) 57826 return true 57827 } 57828 return false 57829 } 57830 func rewriteValueAMD64_OpAdd16_0(v *Value) bool { 57831 // match: (Add16 x y) 57832 // cond: 57833 // result: (ADDL x y) 57834 for { 57835 _ = v.Args[1] 57836 x := v.Args[0] 57837 y := v.Args[1] 57838 v.reset(OpAMD64ADDL) 57839 v.AddArg(x) 57840 v.AddArg(y) 57841 return true 57842 } 57843 } 57844 func rewriteValueAMD64_OpAdd32_0(v *Value) bool { 57845 // match: (Add32 x y) 57846 // cond: 57847 // result: (ADDL x y) 57848 for { 57849 _ = v.Args[1] 57850 x := v.Args[0] 57851 y := v.Args[1] 57852 v.reset(OpAMD64ADDL) 57853 v.AddArg(x) 57854 v.AddArg(y) 57855 return true 57856 } 57857 } 57858 func rewriteValueAMD64_OpAdd32F_0(v *Value) bool { 57859 // match: (Add32F x y) 57860 // cond: 57861 // result: (ADDSS x y) 57862 for { 57863 _ = v.Args[1] 57864 x := v.Args[0] 57865 y := v.Args[1] 57866 v.reset(OpAMD64ADDSS) 57867 v.AddArg(x) 57868 v.AddArg(y) 57869 return true 57870 } 57871 } 57872 func rewriteValueAMD64_OpAdd64_0(v *Value) bool { 57873 // match: (Add64 x y) 57874 // cond: 57875 // result: (ADDQ x y) 57876 for { 57877 _ = v.Args[1] 57878 x := v.Args[0] 57879 y := v.Args[1] 57880 v.reset(OpAMD64ADDQ) 57881 v.AddArg(x) 57882 v.AddArg(y) 57883 return true 57884 } 57885 } 57886 func rewriteValueAMD64_OpAdd64F_0(v *Value) bool { 57887 // match: (Add64F x y) 57888 // cond: 57889 // result: (ADDSD x y) 57890 for { 57891 _ = v.Args[1] 57892 x := v.Args[0] 57893 y := v.Args[1] 57894 v.reset(OpAMD64ADDSD) 57895 v.AddArg(x) 57896 v.AddArg(y) 57897 return true 57898 } 57899 } 57900 func rewriteValueAMD64_OpAdd8_0(v *Value) bool { 57901 // match: (Add8 x y) 57902 // cond: 57903 // result: (ADDL x y) 57904 for { 57905 _ = v.Args[1] 57906 x := v.Args[0] 57907 y := v.Args[1] 57908 v.reset(OpAMD64ADDL) 57909 v.AddArg(x) 57910 v.AddArg(y) 57911 return true 57912 } 57913 } 57914 func rewriteValueAMD64_OpAddPtr_0(v *Value) bool { 57915 b := v.Block 57916 _ = b 57917 config := b.Func.Config 57918 _ = config 57919 // match: (AddPtr x y) 57920 // cond: config.PtrSize == 8 57921 // result: (ADDQ x y) 57922 for { 57923 _ = v.Args[1] 57924 x := v.Args[0] 57925 y := v.Args[1] 57926 if !(config.PtrSize == 8) { 57927 break 57928 } 57929 v.reset(OpAMD64ADDQ) 57930 v.AddArg(x) 57931 v.AddArg(y) 57932 return true 57933 } 57934 // match: (AddPtr x y) 57935 // cond: config.PtrSize == 4 57936 // result: (ADDL x y) 57937 for { 57938 _ = v.Args[1] 57939 x := v.Args[0] 57940 y := v.Args[1] 57941 if !(config.PtrSize == 4) { 57942 break 57943 } 57944 v.reset(OpAMD64ADDL) 57945 v.AddArg(x) 57946 v.AddArg(y) 57947 return true 57948 } 57949 return false 57950 } 57951 func rewriteValueAMD64_OpAddr_0(v *Value) bool { 57952 b := v.Block 57953 _ = b 57954 config := b.Func.Config 57955 _ = config 57956 // match: (Addr {sym} base) 57957 // cond: config.PtrSize == 8 57958 // result: (LEAQ {sym} base) 57959 for { 57960 sym := v.Aux 57961 base := v.Args[0] 57962 if !(config.PtrSize == 8) { 57963 break 57964 } 57965 v.reset(OpAMD64LEAQ) 57966 v.Aux = sym 57967 v.AddArg(base) 57968 return true 57969 } 57970 // match: (Addr {sym} base) 57971 // cond: config.PtrSize == 4 57972 // result: (LEAL {sym} base) 57973 for { 57974 sym := v.Aux 57975 base := v.Args[0] 57976 if !(config.PtrSize == 4) { 57977 break 57978 } 57979 v.reset(OpAMD64LEAL) 57980 v.Aux = sym 57981 v.AddArg(base) 57982 return true 57983 } 57984 return false 57985 } 57986 func rewriteValueAMD64_OpAnd16_0(v *Value) bool { 57987 // match: (And16 x y) 57988 // cond: 57989 // result: (ANDL x y) 57990 for { 57991 _ = v.Args[1] 57992 x := v.Args[0] 57993 y := v.Args[1] 57994 v.reset(OpAMD64ANDL) 57995 v.AddArg(x) 57996 v.AddArg(y) 57997 return true 57998 } 57999 } 58000 func rewriteValueAMD64_OpAnd32_0(v *Value) bool { 58001 // match: (And32 x y) 58002 // cond: 58003 // result: (ANDL x y) 58004 for { 58005 _ = v.Args[1] 58006 x := v.Args[0] 58007 y := v.Args[1] 58008 v.reset(OpAMD64ANDL) 58009 v.AddArg(x) 58010 v.AddArg(y) 58011 return true 58012 } 58013 } 58014 func rewriteValueAMD64_OpAnd64_0(v *Value) bool { 58015 // match: (And64 x y) 58016 // cond: 58017 // result: (ANDQ x y) 58018 for { 58019 _ = v.Args[1] 58020 x := v.Args[0] 58021 y := v.Args[1] 58022 v.reset(OpAMD64ANDQ) 58023 v.AddArg(x) 58024 v.AddArg(y) 58025 return true 58026 } 58027 } 58028 func rewriteValueAMD64_OpAnd8_0(v *Value) bool { 58029 // match: (And8 x y) 58030 // cond: 58031 // result: (ANDL x y) 58032 for { 58033 _ = v.Args[1] 58034 x := v.Args[0] 58035 y := v.Args[1] 58036 v.reset(OpAMD64ANDL) 58037 v.AddArg(x) 58038 v.AddArg(y) 58039 return true 58040 } 58041 } 58042 func rewriteValueAMD64_OpAndB_0(v *Value) bool { 58043 // match: (AndB x y) 58044 // cond: 58045 // result: (ANDL x y) 58046 for { 58047 _ = v.Args[1] 58048 x := v.Args[0] 58049 y := v.Args[1] 58050 v.reset(OpAMD64ANDL) 58051 v.AddArg(x) 58052 v.AddArg(y) 58053 return true 58054 } 58055 } 58056 func rewriteValueAMD64_OpAtomicAdd32_0(v *Value) bool { 58057 b := v.Block 58058 _ = b 58059 typ := &b.Func.Config.Types 58060 _ = typ 58061 // match: (AtomicAdd32 ptr val mem) 58062 // cond: 58063 // result: (AddTupleFirst32 val (XADDLlock val ptr mem)) 58064 for { 58065 _ = v.Args[2] 58066 ptr := v.Args[0] 58067 val := v.Args[1] 58068 mem := v.Args[2] 58069 v.reset(OpAMD64AddTupleFirst32) 58070 v.AddArg(val) 58071 v0 := b.NewValue0(v.Pos, OpAMD64XADDLlock, types.NewTuple(typ.UInt32, types.TypeMem)) 58072 v0.AddArg(val) 58073 v0.AddArg(ptr) 58074 v0.AddArg(mem) 58075 v.AddArg(v0) 58076 return true 58077 } 58078 } 58079 func rewriteValueAMD64_OpAtomicAdd64_0(v *Value) bool { 58080 b := v.Block 58081 _ = b 58082 typ := &b.Func.Config.Types 58083 _ = typ 58084 // match: (AtomicAdd64 ptr val mem) 58085 // cond: 58086 // result: (AddTupleFirst64 val (XADDQlock val ptr mem)) 58087 for { 58088 _ = v.Args[2] 58089 ptr := v.Args[0] 58090 val := v.Args[1] 58091 mem := v.Args[2] 58092 v.reset(OpAMD64AddTupleFirst64) 58093 v.AddArg(val) 58094 v0 := b.NewValue0(v.Pos, OpAMD64XADDQlock, types.NewTuple(typ.UInt64, types.TypeMem)) 58095 v0.AddArg(val) 58096 v0.AddArg(ptr) 58097 v0.AddArg(mem) 58098 v.AddArg(v0) 58099 return true 58100 } 58101 } 58102 func rewriteValueAMD64_OpAtomicAnd8_0(v *Value) bool { 58103 // match: (AtomicAnd8 ptr val mem) 58104 // cond: 58105 // result: (ANDBlock ptr val mem) 58106 for { 58107 _ = v.Args[2] 58108 ptr := v.Args[0] 58109 val := v.Args[1] 58110 mem := v.Args[2] 58111 v.reset(OpAMD64ANDBlock) 58112 v.AddArg(ptr) 58113 v.AddArg(val) 58114 v.AddArg(mem) 58115 return true 58116 } 58117 } 58118 func rewriteValueAMD64_OpAtomicCompareAndSwap32_0(v *Value) bool { 58119 // match: (AtomicCompareAndSwap32 ptr old new_ mem) 58120 // cond: 58121 // result: (CMPXCHGLlock ptr old new_ mem) 58122 for { 58123 _ = v.Args[3] 58124 ptr := v.Args[0] 58125 old := v.Args[1] 58126 new_ := v.Args[2] 58127 mem := v.Args[3] 58128 v.reset(OpAMD64CMPXCHGLlock) 58129 v.AddArg(ptr) 58130 v.AddArg(old) 58131 v.AddArg(new_) 58132 v.AddArg(mem) 58133 return true 58134 } 58135 } 58136 func rewriteValueAMD64_OpAtomicCompareAndSwap64_0(v *Value) bool { 58137 // match: (AtomicCompareAndSwap64 ptr old new_ mem) 58138 // cond: 58139 // result: (CMPXCHGQlock ptr old new_ mem) 58140 for { 58141 _ = v.Args[3] 58142 ptr := v.Args[0] 58143 old := v.Args[1] 58144 new_ := v.Args[2] 58145 mem := v.Args[3] 58146 v.reset(OpAMD64CMPXCHGQlock) 58147 v.AddArg(ptr) 58148 v.AddArg(old) 58149 v.AddArg(new_) 58150 v.AddArg(mem) 58151 return true 58152 } 58153 } 58154 func rewriteValueAMD64_OpAtomicExchange32_0(v *Value) bool { 58155 // match: (AtomicExchange32 ptr val mem) 58156 // cond: 58157 // result: (XCHGL val ptr mem) 58158 for { 58159 _ = v.Args[2] 58160 ptr := v.Args[0] 58161 val := v.Args[1] 58162 mem := v.Args[2] 58163 v.reset(OpAMD64XCHGL) 58164 v.AddArg(val) 58165 v.AddArg(ptr) 58166 v.AddArg(mem) 58167 return true 58168 } 58169 } 58170 func rewriteValueAMD64_OpAtomicExchange64_0(v *Value) bool { 58171 // match: (AtomicExchange64 ptr val mem) 58172 // cond: 58173 // result: (XCHGQ val ptr mem) 58174 for { 58175 _ = v.Args[2] 58176 ptr := v.Args[0] 58177 val := v.Args[1] 58178 mem := v.Args[2] 58179 v.reset(OpAMD64XCHGQ) 58180 v.AddArg(val) 58181 v.AddArg(ptr) 58182 v.AddArg(mem) 58183 return true 58184 } 58185 } 58186 func rewriteValueAMD64_OpAtomicLoad32_0(v *Value) bool { 58187 // match: (AtomicLoad32 ptr mem) 58188 // cond: 58189 // result: (MOVLatomicload ptr mem) 58190 for { 58191 _ = v.Args[1] 58192 ptr := v.Args[0] 58193 mem := v.Args[1] 58194 v.reset(OpAMD64MOVLatomicload) 58195 v.AddArg(ptr) 58196 v.AddArg(mem) 58197 return true 58198 } 58199 } 58200 func rewriteValueAMD64_OpAtomicLoad64_0(v *Value) bool { 58201 // match: (AtomicLoad64 ptr mem) 58202 // cond: 58203 // result: (MOVQatomicload ptr mem) 58204 for { 58205 _ = v.Args[1] 58206 ptr := v.Args[0] 58207 mem := v.Args[1] 58208 v.reset(OpAMD64MOVQatomicload) 58209 v.AddArg(ptr) 58210 v.AddArg(mem) 58211 return true 58212 } 58213 } 58214 func rewriteValueAMD64_OpAtomicLoadPtr_0(v *Value) bool { 58215 b := v.Block 58216 _ = b 58217 config := b.Func.Config 58218 _ = config 58219 // match: (AtomicLoadPtr ptr mem) 58220 // cond: config.PtrSize == 8 58221 // result: (MOVQatomicload ptr mem) 58222 for { 58223 _ = v.Args[1] 58224 ptr := v.Args[0] 58225 mem := v.Args[1] 58226 if !(config.PtrSize == 8) { 58227 break 58228 } 58229 v.reset(OpAMD64MOVQatomicload) 58230 v.AddArg(ptr) 58231 v.AddArg(mem) 58232 return true 58233 } 58234 // match: (AtomicLoadPtr ptr mem) 58235 // cond: config.PtrSize == 4 58236 // result: (MOVLatomicload ptr mem) 58237 for { 58238 _ = v.Args[1] 58239 ptr := v.Args[0] 58240 mem := v.Args[1] 58241 if !(config.PtrSize == 4) { 58242 break 58243 } 58244 v.reset(OpAMD64MOVLatomicload) 58245 v.AddArg(ptr) 58246 v.AddArg(mem) 58247 return true 58248 } 58249 return false 58250 } 58251 func rewriteValueAMD64_OpAtomicOr8_0(v *Value) bool { 58252 // match: (AtomicOr8 ptr val mem) 58253 // cond: 58254 // result: (ORBlock ptr val mem) 58255 for { 58256 _ = v.Args[2] 58257 ptr := v.Args[0] 58258 val := v.Args[1] 58259 mem := v.Args[2] 58260 v.reset(OpAMD64ORBlock) 58261 v.AddArg(ptr) 58262 v.AddArg(val) 58263 v.AddArg(mem) 58264 return true 58265 } 58266 } 58267 func rewriteValueAMD64_OpAtomicStore32_0(v *Value) bool { 58268 b := v.Block 58269 _ = b 58270 typ := &b.Func.Config.Types 58271 _ = typ 58272 // match: (AtomicStore32 ptr val mem) 58273 // cond: 58274 // result: (Select1 (XCHGL <types.NewTuple(typ.UInt32,types.TypeMem)> val ptr mem)) 58275 for { 58276 _ = v.Args[2] 58277 ptr := v.Args[0] 58278 val := v.Args[1] 58279 mem := v.Args[2] 58280 v.reset(OpSelect1) 58281 v0 := b.NewValue0(v.Pos, OpAMD64XCHGL, types.NewTuple(typ.UInt32, types.TypeMem)) 58282 v0.AddArg(val) 58283 v0.AddArg(ptr) 58284 v0.AddArg(mem) 58285 v.AddArg(v0) 58286 return true 58287 } 58288 } 58289 func rewriteValueAMD64_OpAtomicStore64_0(v *Value) bool { 58290 b := v.Block 58291 _ = b 58292 typ := &b.Func.Config.Types 58293 _ = typ 58294 // match: (AtomicStore64 ptr val mem) 58295 // cond: 58296 // result: (Select1 (XCHGQ <types.NewTuple(typ.UInt64,types.TypeMem)> val ptr mem)) 58297 for { 58298 _ = v.Args[2] 58299 ptr := v.Args[0] 58300 val := v.Args[1] 58301 mem := v.Args[2] 58302 v.reset(OpSelect1) 58303 v0 := b.NewValue0(v.Pos, OpAMD64XCHGQ, types.NewTuple(typ.UInt64, types.TypeMem)) 58304 v0.AddArg(val) 58305 v0.AddArg(ptr) 58306 v0.AddArg(mem) 58307 v.AddArg(v0) 58308 return true 58309 } 58310 } 58311 func rewriteValueAMD64_OpAtomicStorePtrNoWB_0(v *Value) bool { 58312 b := v.Block 58313 _ = b 58314 config := b.Func.Config 58315 _ = config 58316 typ := &b.Func.Config.Types 58317 _ = typ 58318 // match: (AtomicStorePtrNoWB ptr val mem) 58319 // cond: config.PtrSize == 8 58320 // result: (Select1 (XCHGQ <types.NewTuple(typ.BytePtr,types.TypeMem)> val ptr mem)) 58321 for { 58322 _ = v.Args[2] 58323 ptr := v.Args[0] 58324 val := v.Args[1] 58325 mem := v.Args[2] 58326 if !(config.PtrSize == 8) { 58327 break 58328 } 58329 v.reset(OpSelect1) 58330 v0 := b.NewValue0(v.Pos, OpAMD64XCHGQ, types.NewTuple(typ.BytePtr, types.TypeMem)) 58331 v0.AddArg(val) 58332 v0.AddArg(ptr) 58333 v0.AddArg(mem) 58334 v.AddArg(v0) 58335 return true 58336 } 58337 // match: (AtomicStorePtrNoWB ptr val mem) 58338 // cond: config.PtrSize == 4 58339 // result: (Select1 (XCHGL <types.NewTuple(typ.BytePtr,types.TypeMem)> val ptr mem)) 58340 for { 58341 _ = v.Args[2] 58342 ptr := v.Args[0] 58343 val := v.Args[1] 58344 mem := v.Args[2] 58345 if !(config.PtrSize == 4) { 58346 break 58347 } 58348 v.reset(OpSelect1) 58349 v0 := b.NewValue0(v.Pos, OpAMD64XCHGL, types.NewTuple(typ.BytePtr, types.TypeMem)) 58350 v0.AddArg(val) 58351 v0.AddArg(ptr) 58352 v0.AddArg(mem) 58353 v.AddArg(v0) 58354 return true 58355 } 58356 return false 58357 } 58358 func rewriteValueAMD64_OpAvg64u_0(v *Value) bool { 58359 // match: (Avg64u x y) 58360 // cond: 58361 // result: (AVGQU x y) 58362 for { 58363 _ = v.Args[1] 58364 x := v.Args[0] 58365 y := v.Args[1] 58366 v.reset(OpAMD64AVGQU) 58367 v.AddArg(x) 58368 v.AddArg(y) 58369 return true 58370 } 58371 } 58372 func rewriteValueAMD64_OpBitLen16_0(v *Value) bool { 58373 b := v.Block 58374 _ = b 58375 typ := &b.Func.Config.Types 58376 _ = typ 58377 // match: (BitLen16 x) 58378 // cond: 58379 // result: (BSRL (LEAL1 <typ.UInt32> [1] (MOVWQZX <typ.UInt32> x) (MOVWQZX <typ.UInt32> x))) 58380 for { 58381 x := v.Args[0] 58382 v.reset(OpAMD64BSRL) 58383 v0 := b.NewValue0(v.Pos, OpAMD64LEAL1, typ.UInt32) 58384 v0.AuxInt = 1 58385 v1 := b.NewValue0(v.Pos, OpAMD64MOVWQZX, typ.UInt32) 58386 v1.AddArg(x) 58387 v0.AddArg(v1) 58388 v2 := b.NewValue0(v.Pos, OpAMD64MOVWQZX, typ.UInt32) 58389 v2.AddArg(x) 58390 v0.AddArg(v2) 58391 v.AddArg(v0) 58392 return true 58393 } 58394 } 58395 func rewriteValueAMD64_OpBitLen32_0(v *Value) bool { 58396 b := v.Block 58397 _ = b 58398 typ := &b.Func.Config.Types 58399 _ = typ 58400 // match: (BitLen32 x) 58401 // cond: 58402 // result: (Select0 (BSRQ (LEAQ1 <typ.UInt64> [1] (MOVLQZX <typ.UInt64> x) (MOVLQZX <typ.UInt64> x)))) 58403 for { 58404 x := v.Args[0] 58405 v.reset(OpSelect0) 58406 v0 := b.NewValue0(v.Pos, OpAMD64BSRQ, types.NewTuple(typ.UInt64, types.TypeFlags)) 58407 v1 := b.NewValue0(v.Pos, OpAMD64LEAQ1, typ.UInt64) 58408 v1.AuxInt = 1 58409 v2 := b.NewValue0(v.Pos, OpAMD64MOVLQZX, typ.UInt64) 58410 v2.AddArg(x) 58411 v1.AddArg(v2) 58412 v3 := b.NewValue0(v.Pos, OpAMD64MOVLQZX, typ.UInt64) 58413 v3.AddArg(x) 58414 v1.AddArg(v3) 58415 v0.AddArg(v1) 58416 v.AddArg(v0) 58417 return true 58418 } 58419 } 58420 func rewriteValueAMD64_OpBitLen64_0(v *Value) bool { 58421 b := v.Block 58422 _ = b 58423 typ := &b.Func.Config.Types 58424 _ = typ 58425 // match: (BitLen64 <t> x) 58426 // cond: 58427 // result: (ADDQconst [1] (CMOVQEQ <t> (Select0 <t> (BSRQ x)) (MOVQconst <t> [-1]) (Select1 <types.TypeFlags> (BSRQ x)))) 58428 for { 58429 t := v.Type 58430 x := v.Args[0] 58431 v.reset(OpAMD64ADDQconst) 58432 v.AuxInt = 1 58433 v0 := b.NewValue0(v.Pos, OpAMD64CMOVQEQ, t) 58434 v1 := b.NewValue0(v.Pos, OpSelect0, t) 58435 v2 := b.NewValue0(v.Pos, OpAMD64BSRQ, types.NewTuple(typ.UInt64, types.TypeFlags)) 58436 v2.AddArg(x) 58437 v1.AddArg(v2) 58438 v0.AddArg(v1) 58439 v3 := b.NewValue0(v.Pos, OpAMD64MOVQconst, t) 58440 v3.AuxInt = -1 58441 v0.AddArg(v3) 58442 v4 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags) 58443 v5 := b.NewValue0(v.Pos, OpAMD64BSRQ, types.NewTuple(typ.UInt64, types.TypeFlags)) 58444 v5.AddArg(x) 58445 v4.AddArg(v5) 58446 v0.AddArg(v4) 58447 v.AddArg(v0) 58448 return true 58449 } 58450 } 58451 func rewriteValueAMD64_OpBitLen8_0(v *Value) bool { 58452 b := v.Block 58453 _ = b 58454 typ := &b.Func.Config.Types 58455 _ = typ 58456 // match: (BitLen8 x) 58457 // cond: 58458 // result: (BSRL (LEAL1 <typ.UInt32> [1] (MOVBQZX <typ.UInt32> x) (MOVBQZX <typ.UInt32> x))) 58459 for { 58460 x := v.Args[0] 58461 v.reset(OpAMD64BSRL) 58462 v0 := b.NewValue0(v.Pos, OpAMD64LEAL1, typ.UInt32) 58463 v0.AuxInt = 1 58464 v1 := b.NewValue0(v.Pos, OpAMD64MOVBQZX, typ.UInt32) 58465 v1.AddArg(x) 58466 v0.AddArg(v1) 58467 v2 := b.NewValue0(v.Pos, OpAMD64MOVBQZX, typ.UInt32) 58468 v2.AddArg(x) 58469 v0.AddArg(v2) 58470 v.AddArg(v0) 58471 return true 58472 } 58473 } 58474 func rewriteValueAMD64_OpBswap32_0(v *Value) bool { 58475 // match: (Bswap32 x) 58476 // cond: 58477 // result: (BSWAPL x) 58478 for { 58479 x := v.Args[0] 58480 v.reset(OpAMD64BSWAPL) 58481 v.AddArg(x) 58482 return true 58483 } 58484 } 58485 func rewriteValueAMD64_OpBswap64_0(v *Value) bool { 58486 // match: (Bswap64 x) 58487 // cond: 58488 // result: (BSWAPQ x) 58489 for { 58490 x := v.Args[0] 58491 v.reset(OpAMD64BSWAPQ) 58492 v.AddArg(x) 58493 return true 58494 } 58495 } 58496 func rewriteValueAMD64_OpCeil_0(v *Value) bool { 58497 // match: (Ceil x) 58498 // cond: 58499 // result: (ROUNDSD [2] x) 58500 for { 58501 x := v.Args[0] 58502 v.reset(OpAMD64ROUNDSD) 58503 v.AuxInt = 2 58504 v.AddArg(x) 58505 return true 58506 } 58507 } 58508 func rewriteValueAMD64_OpClosureCall_0(v *Value) bool { 58509 // match: (ClosureCall [argwid] entry closure mem) 58510 // cond: 58511 // result: (CALLclosure [argwid] entry closure mem) 58512 for { 58513 argwid := v.AuxInt 58514 _ = v.Args[2] 58515 entry := v.Args[0] 58516 closure := v.Args[1] 58517 mem := v.Args[2] 58518 v.reset(OpAMD64CALLclosure) 58519 v.AuxInt = argwid 58520 v.AddArg(entry) 58521 v.AddArg(closure) 58522 v.AddArg(mem) 58523 return true 58524 } 58525 } 58526 func rewriteValueAMD64_OpCom16_0(v *Value) bool { 58527 // match: (Com16 x) 58528 // cond: 58529 // result: (NOTL x) 58530 for { 58531 x := v.Args[0] 58532 v.reset(OpAMD64NOTL) 58533 v.AddArg(x) 58534 return true 58535 } 58536 } 58537 func rewriteValueAMD64_OpCom32_0(v *Value) bool { 58538 // match: (Com32 x) 58539 // cond: 58540 // result: (NOTL x) 58541 for { 58542 x := v.Args[0] 58543 v.reset(OpAMD64NOTL) 58544 v.AddArg(x) 58545 return true 58546 } 58547 } 58548 func rewriteValueAMD64_OpCom64_0(v *Value) bool { 58549 // match: (Com64 x) 58550 // cond: 58551 // result: (NOTQ x) 58552 for { 58553 x := v.Args[0] 58554 v.reset(OpAMD64NOTQ) 58555 v.AddArg(x) 58556 return true 58557 } 58558 } 58559 func rewriteValueAMD64_OpCom8_0(v *Value) bool { 58560 // match: (Com8 x) 58561 // cond: 58562 // result: (NOTL x) 58563 for { 58564 x := v.Args[0] 58565 v.reset(OpAMD64NOTL) 58566 v.AddArg(x) 58567 return true 58568 } 58569 } 58570 func rewriteValueAMD64_OpCondSelect_0(v *Value) bool { 58571 // match: (CondSelect <t> x y (SETEQ cond)) 58572 // cond: (is64BitInt(t) || isPtr(t)) 58573 // result: (CMOVQEQ y x cond) 58574 for { 58575 t := v.Type 58576 _ = v.Args[2] 58577 x := v.Args[0] 58578 y := v.Args[1] 58579 v_2 := v.Args[2] 58580 if v_2.Op != OpAMD64SETEQ { 58581 break 58582 } 58583 cond := v_2.Args[0] 58584 if !(is64BitInt(t) || isPtr(t)) { 58585 break 58586 } 58587 v.reset(OpAMD64CMOVQEQ) 58588 v.AddArg(y) 58589 v.AddArg(x) 58590 v.AddArg(cond) 58591 return true 58592 } 58593 // match: (CondSelect <t> x y (SETNE cond)) 58594 // cond: (is64BitInt(t) || isPtr(t)) 58595 // result: (CMOVQNE y x cond) 58596 for { 58597 t := v.Type 58598 _ = v.Args[2] 58599 x := v.Args[0] 58600 y := v.Args[1] 58601 v_2 := v.Args[2] 58602 if v_2.Op != OpAMD64SETNE { 58603 break 58604 } 58605 cond := v_2.Args[0] 58606 if !(is64BitInt(t) || isPtr(t)) { 58607 break 58608 } 58609 v.reset(OpAMD64CMOVQNE) 58610 v.AddArg(y) 58611 v.AddArg(x) 58612 v.AddArg(cond) 58613 return true 58614 } 58615 // match: (CondSelect <t> x y (SETL cond)) 58616 // cond: (is64BitInt(t) || isPtr(t)) 58617 // result: (CMOVQLT y x cond) 58618 for { 58619 t := v.Type 58620 _ = v.Args[2] 58621 x := v.Args[0] 58622 y := v.Args[1] 58623 v_2 := v.Args[2] 58624 if v_2.Op != OpAMD64SETL { 58625 break 58626 } 58627 cond := v_2.Args[0] 58628 if !(is64BitInt(t) || isPtr(t)) { 58629 break 58630 } 58631 v.reset(OpAMD64CMOVQLT) 58632 v.AddArg(y) 58633 v.AddArg(x) 58634 v.AddArg(cond) 58635 return true 58636 } 58637 // match: (CondSelect <t> x y (SETG cond)) 58638 // cond: (is64BitInt(t) || isPtr(t)) 58639 // result: (CMOVQGT y x cond) 58640 for { 58641 t := v.Type 58642 _ = v.Args[2] 58643 x := v.Args[0] 58644 y := v.Args[1] 58645 v_2 := v.Args[2] 58646 if v_2.Op != OpAMD64SETG { 58647 break 58648 } 58649 cond := v_2.Args[0] 58650 if !(is64BitInt(t) || isPtr(t)) { 58651 break 58652 } 58653 v.reset(OpAMD64CMOVQGT) 58654 v.AddArg(y) 58655 v.AddArg(x) 58656 v.AddArg(cond) 58657 return true 58658 } 58659 // match: (CondSelect <t> x y (SETLE cond)) 58660 // cond: (is64BitInt(t) || isPtr(t)) 58661 // result: (CMOVQLE y x cond) 58662 for { 58663 t := v.Type 58664 _ = v.Args[2] 58665 x := v.Args[0] 58666 y := v.Args[1] 58667 v_2 := v.Args[2] 58668 if v_2.Op != OpAMD64SETLE { 58669 break 58670 } 58671 cond := v_2.Args[0] 58672 if !(is64BitInt(t) || isPtr(t)) { 58673 break 58674 } 58675 v.reset(OpAMD64CMOVQLE) 58676 v.AddArg(y) 58677 v.AddArg(x) 58678 v.AddArg(cond) 58679 return true 58680 } 58681 // match: (CondSelect <t> x y (SETGE cond)) 58682 // cond: (is64BitInt(t) || isPtr(t)) 58683 // result: (CMOVQGE y x cond) 58684 for { 58685 t := v.Type 58686 _ = v.Args[2] 58687 x := v.Args[0] 58688 y := v.Args[1] 58689 v_2 := v.Args[2] 58690 if v_2.Op != OpAMD64SETGE { 58691 break 58692 } 58693 cond := v_2.Args[0] 58694 if !(is64BitInt(t) || isPtr(t)) { 58695 break 58696 } 58697 v.reset(OpAMD64CMOVQGE) 58698 v.AddArg(y) 58699 v.AddArg(x) 58700 v.AddArg(cond) 58701 return true 58702 } 58703 // match: (CondSelect <t> x y (SETA cond)) 58704 // cond: (is64BitInt(t) || isPtr(t)) 58705 // result: (CMOVQHI y x cond) 58706 for { 58707 t := v.Type 58708 _ = v.Args[2] 58709 x := v.Args[0] 58710 y := v.Args[1] 58711 v_2 := v.Args[2] 58712 if v_2.Op != OpAMD64SETA { 58713 break 58714 } 58715 cond := v_2.Args[0] 58716 if !(is64BitInt(t) || isPtr(t)) { 58717 break 58718 } 58719 v.reset(OpAMD64CMOVQHI) 58720 v.AddArg(y) 58721 v.AddArg(x) 58722 v.AddArg(cond) 58723 return true 58724 } 58725 // match: (CondSelect <t> x y (SETB cond)) 58726 // cond: (is64BitInt(t) || isPtr(t)) 58727 // result: (CMOVQCS y x cond) 58728 for { 58729 t := v.Type 58730 _ = v.Args[2] 58731 x := v.Args[0] 58732 y := v.Args[1] 58733 v_2 := v.Args[2] 58734 if v_2.Op != OpAMD64SETB { 58735 break 58736 } 58737 cond := v_2.Args[0] 58738 if !(is64BitInt(t) || isPtr(t)) { 58739 break 58740 } 58741 v.reset(OpAMD64CMOVQCS) 58742 v.AddArg(y) 58743 v.AddArg(x) 58744 v.AddArg(cond) 58745 return true 58746 } 58747 // match: (CondSelect <t> x y (SETAE cond)) 58748 // cond: (is64BitInt(t) || isPtr(t)) 58749 // result: (CMOVQCC y x cond) 58750 for { 58751 t := v.Type 58752 _ = v.Args[2] 58753 x := v.Args[0] 58754 y := v.Args[1] 58755 v_2 := v.Args[2] 58756 if v_2.Op != OpAMD64SETAE { 58757 break 58758 } 58759 cond := v_2.Args[0] 58760 if !(is64BitInt(t) || isPtr(t)) { 58761 break 58762 } 58763 v.reset(OpAMD64CMOVQCC) 58764 v.AddArg(y) 58765 v.AddArg(x) 58766 v.AddArg(cond) 58767 return true 58768 } 58769 // match: (CondSelect <t> x y (SETBE cond)) 58770 // cond: (is64BitInt(t) || isPtr(t)) 58771 // result: (CMOVQLS y x cond) 58772 for { 58773 t := v.Type 58774 _ = v.Args[2] 58775 x := v.Args[0] 58776 y := v.Args[1] 58777 v_2 := v.Args[2] 58778 if v_2.Op != OpAMD64SETBE { 58779 break 58780 } 58781 cond := v_2.Args[0] 58782 if !(is64BitInt(t) || isPtr(t)) { 58783 break 58784 } 58785 v.reset(OpAMD64CMOVQLS) 58786 v.AddArg(y) 58787 v.AddArg(x) 58788 v.AddArg(cond) 58789 return true 58790 } 58791 return false 58792 } 58793 func rewriteValueAMD64_OpCondSelect_10(v *Value) bool { 58794 // match: (CondSelect <t> x y (SETEQF cond)) 58795 // cond: (is64BitInt(t) || isPtr(t)) 58796 // result: (CMOVQEQF y x cond) 58797 for { 58798 t := v.Type 58799 _ = v.Args[2] 58800 x := v.Args[0] 58801 y := v.Args[1] 58802 v_2 := v.Args[2] 58803 if v_2.Op != OpAMD64SETEQF { 58804 break 58805 } 58806 cond := v_2.Args[0] 58807 if !(is64BitInt(t) || isPtr(t)) { 58808 break 58809 } 58810 v.reset(OpAMD64CMOVQEQF) 58811 v.AddArg(y) 58812 v.AddArg(x) 58813 v.AddArg(cond) 58814 return true 58815 } 58816 // match: (CondSelect <t> x y (SETNEF cond)) 58817 // cond: (is64BitInt(t) || isPtr(t)) 58818 // result: (CMOVQNEF y x cond) 58819 for { 58820 t := v.Type 58821 _ = v.Args[2] 58822 x := v.Args[0] 58823 y := v.Args[1] 58824 v_2 := v.Args[2] 58825 if v_2.Op != OpAMD64SETNEF { 58826 break 58827 } 58828 cond := v_2.Args[0] 58829 if !(is64BitInt(t) || isPtr(t)) { 58830 break 58831 } 58832 v.reset(OpAMD64CMOVQNEF) 58833 v.AddArg(y) 58834 v.AddArg(x) 58835 v.AddArg(cond) 58836 return true 58837 } 58838 // match: (CondSelect <t> x y (SETGF cond)) 58839 // cond: (is64BitInt(t) || isPtr(t)) 58840 // result: (CMOVQGTF y x cond) 58841 for { 58842 t := v.Type 58843 _ = v.Args[2] 58844 x := v.Args[0] 58845 y := v.Args[1] 58846 v_2 := v.Args[2] 58847 if v_2.Op != OpAMD64SETGF { 58848 break 58849 } 58850 cond := v_2.Args[0] 58851 if !(is64BitInt(t) || isPtr(t)) { 58852 break 58853 } 58854 v.reset(OpAMD64CMOVQGTF) 58855 v.AddArg(y) 58856 v.AddArg(x) 58857 v.AddArg(cond) 58858 return true 58859 } 58860 // match: (CondSelect <t> x y (SETGEF cond)) 58861 // cond: (is64BitInt(t) || isPtr(t)) 58862 // result: (CMOVQGEF y x cond) 58863 for { 58864 t := v.Type 58865 _ = v.Args[2] 58866 x := v.Args[0] 58867 y := v.Args[1] 58868 v_2 := v.Args[2] 58869 if v_2.Op != OpAMD64SETGEF { 58870 break 58871 } 58872 cond := v_2.Args[0] 58873 if !(is64BitInt(t) || isPtr(t)) { 58874 break 58875 } 58876 v.reset(OpAMD64CMOVQGEF) 58877 v.AddArg(y) 58878 v.AddArg(x) 58879 v.AddArg(cond) 58880 return true 58881 } 58882 // match: (CondSelect <t> x y (SETEQ cond)) 58883 // cond: is32BitInt(t) 58884 // result: (CMOVLEQ y x cond) 58885 for { 58886 t := v.Type 58887 _ = v.Args[2] 58888 x := v.Args[0] 58889 y := v.Args[1] 58890 v_2 := v.Args[2] 58891 if v_2.Op != OpAMD64SETEQ { 58892 break 58893 } 58894 cond := v_2.Args[0] 58895 if !(is32BitInt(t)) { 58896 break 58897 } 58898 v.reset(OpAMD64CMOVLEQ) 58899 v.AddArg(y) 58900 v.AddArg(x) 58901 v.AddArg(cond) 58902 return true 58903 } 58904 // match: (CondSelect <t> x y (SETNE cond)) 58905 // cond: is32BitInt(t) 58906 // result: (CMOVLNE y x cond) 58907 for { 58908 t := v.Type 58909 _ = v.Args[2] 58910 x := v.Args[0] 58911 y := v.Args[1] 58912 v_2 := v.Args[2] 58913 if v_2.Op != OpAMD64SETNE { 58914 break 58915 } 58916 cond := v_2.Args[0] 58917 if !(is32BitInt(t)) { 58918 break 58919 } 58920 v.reset(OpAMD64CMOVLNE) 58921 v.AddArg(y) 58922 v.AddArg(x) 58923 v.AddArg(cond) 58924 return true 58925 } 58926 // match: (CondSelect <t> x y (SETL cond)) 58927 // cond: is32BitInt(t) 58928 // result: (CMOVLLT y x cond) 58929 for { 58930 t := v.Type 58931 _ = v.Args[2] 58932 x := v.Args[0] 58933 y := v.Args[1] 58934 v_2 := v.Args[2] 58935 if v_2.Op != OpAMD64SETL { 58936 break 58937 } 58938 cond := v_2.Args[0] 58939 if !(is32BitInt(t)) { 58940 break 58941 } 58942 v.reset(OpAMD64CMOVLLT) 58943 v.AddArg(y) 58944 v.AddArg(x) 58945 v.AddArg(cond) 58946 return true 58947 } 58948 // match: (CondSelect <t> x y (SETG cond)) 58949 // cond: is32BitInt(t) 58950 // result: (CMOVLGT y x cond) 58951 for { 58952 t := v.Type 58953 _ = v.Args[2] 58954 x := v.Args[0] 58955 y := v.Args[1] 58956 v_2 := v.Args[2] 58957 if v_2.Op != OpAMD64SETG { 58958 break 58959 } 58960 cond := v_2.Args[0] 58961 if !(is32BitInt(t)) { 58962 break 58963 } 58964 v.reset(OpAMD64CMOVLGT) 58965 v.AddArg(y) 58966 v.AddArg(x) 58967 v.AddArg(cond) 58968 return true 58969 } 58970 // match: (CondSelect <t> x y (SETLE cond)) 58971 // cond: is32BitInt(t) 58972 // result: (CMOVLLE y x cond) 58973 for { 58974 t := v.Type 58975 _ = v.Args[2] 58976 x := v.Args[0] 58977 y := v.Args[1] 58978 v_2 := v.Args[2] 58979 if v_2.Op != OpAMD64SETLE { 58980 break 58981 } 58982 cond := v_2.Args[0] 58983 if !(is32BitInt(t)) { 58984 break 58985 } 58986 v.reset(OpAMD64CMOVLLE) 58987 v.AddArg(y) 58988 v.AddArg(x) 58989 v.AddArg(cond) 58990 return true 58991 } 58992 // match: (CondSelect <t> x y (SETGE cond)) 58993 // cond: is32BitInt(t) 58994 // result: (CMOVLGE y x cond) 58995 for { 58996 t := v.Type 58997 _ = v.Args[2] 58998 x := v.Args[0] 58999 y := v.Args[1] 59000 v_2 := v.Args[2] 59001 if v_2.Op != OpAMD64SETGE { 59002 break 59003 } 59004 cond := v_2.Args[0] 59005 if !(is32BitInt(t)) { 59006 break 59007 } 59008 v.reset(OpAMD64CMOVLGE) 59009 v.AddArg(y) 59010 v.AddArg(x) 59011 v.AddArg(cond) 59012 return true 59013 } 59014 return false 59015 } 59016 func rewriteValueAMD64_OpCondSelect_20(v *Value) bool { 59017 // match: (CondSelect <t> x y (SETA cond)) 59018 // cond: is32BitInt(t) 59019 // result: (CMOVLHI y x cond) 59020 for { 59021 t := v.Type 59022 _ = v.Args[2] 59023 x := v.Args[0] 59024 y := v.Args[1] 59025 v_2 := v.Args[2] 59026 if v_2.Op != OpAMD64SETA { 59027 break 59028 } 59029 cond := v_2.Args[0] 59030 if !(is32BitInt(t)) { 59031 break 59032 } 59033 v.reset(OpAMD64CMOVLHI) 59034 v.AddArg(y) 59035 v.AddArg(x) 59036 v.AddArg(cond) 59037 return true 59038 } 59039 // match: (CondSelect <t> x y (SETB cond)) 59040 // cond: is32BitInt(t) 59041 // result: (CMOVLCS y x cond) 59042 for { 59043 t := v.Type 59044 _ = v.Args[2] 59045 x := v.Args[0] 59046 y := v.Args[1] 59047 v_2 := v.Args[2] 59048 if v_2.Op != OpAMD64SETB { 59049 break 59050 } 59051 cond := v_2.Args[0] 59052 if !(is32BitInt(t)) { 59053 break 59054 } 59055 v.reset(OpAMD64CMOVLCS) 59056 v.AddArg(y) 59057 v.AddArg(x) 59058 v.AddArg(cond) 59059 return true 59060 } 59061 // match: (CondSelect <t> x y (SETAE cond)) 59062 // cond: is32BitInt(t) 59063 // result: (CMOVLCC y x cond) 59064 for { 59065 t := v.Type 59066 _ = v.Args[2] 59067 x := v.Args[0] 59068 y := v.Args[1] 59069 v_2 := v.Args[2] 59070 if v_2.Op != OpAMD64SETAE { 59071 break 59072 } 59073 cond := v_2.Args[0] 59074 if !(is32BitInt(t)) { 59075 break 59076 } 59077 v.reset(OpAMD64CMOVLCC) 59078 v.AddArg(y) 59079 v.AddArg(x) 59080 v.AddArg(cond) 59081 return true 59082 } 59083 // match: (CondSelect <t> x y (SETBE cond)) 59084 // cond: is32BitInt(t) 59085 // result: (CMOVLLS y x cond) 59086 for { 59087 t := v.Type 59088 _ = v.Args[2] 59089 x := v.Args[0] 59090 y := v.Args[1] 59091 v_2 := v.Args[2] 59092 if v_2.Op != OpAMD64SETBE { 59093 break 59094 } 59095 cond := v_2.Args[0] 59096 if !(is32BitInt(t)) { 59097 break 59098 } 59099 v.reset(OpAMD64CMOVLLS) 59100 v.AddArg(y) 59101 v.AddArg(x) 59102 v.AddArg(cond) 59103 return true 59104 } 59105 // match: (CondSelect <t> x y (SETEQF cond)) 59106 // cond: is32BitInt(t) 59107 // result: (CMOVLEQF y x cond) 59108 for { 59109 t := v.Type 59110 _ = v.Args[2] 59111 x := v.Args[0] 59112 y := v.Args[1] 59113 v_2 := v.Args[2] 59114 if v_2.Op != OpAMD64SETEQF { 59115 break 59116 } 59117 cond := v_2.Args[0] 59118 if !(is32BitInt(t)) { 59119 break 59120 } 59121 v.reset(OpAMD64CMOVLEQF) 59122 v.AddArg(y) 59123 v.AddArg(x) 59124 v.AddArg(cond) 59125 return true 59126 } 59127 // match: (CondSelect <t> x y (SETNEF cond)) 59128 // cond: is32BitInt(t) 59129 // result: (CMOVLNEF y x cond) 59130 for { 59131 t := v.Type 59132 _ = v.Args[2] 59133 x := v.Args[0] 59134 y := v.Args[1] 59135 v_2 := v.Args[2] 59136 if v_2.Op != OpAMD64SETNEF { 59137 break 59138 } 59139 cond := v_2.Args[0] 59140 if !(is32BitInt(t)) { 59141 break 59142 } 59143 v.reset(OpAMD64CMOVLNEF) 59144 v.AddArg(y) 59145 v.AddArg(x) 59146 v.AddArg(cond) 59147 return true 59148 } 59149 // match: (CondSelect <t> x y (SETGF cond)) 59150 // cond: is32BitInt(t) 59151 // result: (CMOVLGTF y x cond) 59152 for { 59153 t := v.Type 59154 _ = v.Args[2] 59155 x := v.Args[0] 59156 y := v.Args[1] 59157 v_2 := v.Args[2] 59158 if v_2.Op != OpAMD64SETGF { 59159 break 59160 } 59161 cond := v_2.Args[0] 59162 if !(is32BitInt(t)) { 59163 break 59164 } 59165 v.reset(OpAMD64CMOVLGTF) 59166 v.AddArg(y) 59167 v.AddArg(x) 59168 v.AddArg(cond) 59169 return true 59170 } 59171 // match: (CondSelect <t> x y (SETGEF cond)) 59172 // cond: is32BitInt(t) 59173 // result: (CMOVLGEF y x cond) 59174 for { 59175 t := v.Type 59176 _ = v.Args[2] 59177 x := v.Args[0] 59178 y := v.Args[1] 59179 v_2 := v.Args[2] 59180 if v_2.Op != OpAMD64SETGEF { 59181 break 59182 } 59183 cond := v_2.Args[0] 59184 if !(is32BitInt(t)) { 59185 break 59186 } 59187 v.reset(OpAMD64CMOVLGEF) 59188 v.AddArg(y) 59189 v.AddArg(x) 59190 v.AddArg(cond) 59191 return true 59192 } 59193 // match: (CondSelect <t> x y (SETEQ cond)) 59194 // cond: is16BitInt(t) 59195 // result: (CMOVWEQ y x cond) 59196 for { 59197 t := v.Type 59198 _ = v.Args[2] 59199 x := v.Args[0] 59200 y := v.Args[1] 59201 v_2 := v.Args[2] 59202 if v_2.Op != OpAMD64SETEQ { 59203 break 59204 } 59205 cond := v_2.Args[0] 59206 if !(is16BitInt(t)) { 59207 break 59208 } 59209 v.reset(OpAMD64CMOVWEQ) 59210 v.AddArg(y) 59211 v.AddArg(x) 59212 v.AddArg(cond) 59213 return true 59214 } 59215 // match: (CondSelect <t> x y (SETNE cond)) 59216 // cond: is16BitInt(t) 59217 // result: (CMOVWNE y x cond) 59218 for { 59219 t := v.Type 59220 _ = v.Args[2] 59221 x := v.Args[0] 59222 y := v.Args[1] 59223 v_2 := v.Args[2] 59224 if v_2.Op != OpAMD64SETNE { 59225 break 59226 } 59227 cond := v_2.Args[0] 59228 if !(is16BitInt(t)) { 59229 break 59230 } 59231 v.reset(OpAMD64CMOVWNE) 59232 v.AddArg(y) 59233 v.AddArg(x) 59234 v.AddArg(cond) 59235 return true 59236 } 59237 return false 59238 } 59239 func rewriteValueAMD64_OpCondSelect_30(v *Value) bool { 59240 // match: (CondSelect <t> x y (SETL cond)) 59241 // cond: is16BitInt(t) 59242 // result: (CMOVWLT y x cond) 59243 for { 59244 t := v.Type 59245 _ = v.Args[2] 59246 x := v.Args[0] 59247 y := v.Args[1] 59248 v_2 := v.Args[2] 59249 if v_2.Op != OpAMD64SETL { 59250 break 59251 } 59252 cond := v_2.Args[0] 59253 if !(is16BitInt(t)) { 59254 break 59255 } 59256 v.reset(OpAMD64CMOVWLT) 59257 v.AddArg(y) 59258 v.AddArg(x) 59259 v.AddArg(cond) 59260 return true 59261 } 59262 // match: (CondSelect <t> x y (SETG cond)) 59263 // cond: is16BitInt(t) 59264 // result: (CMOVWGT y x cond) 59265 for { 59266 t := v.Type 59267 _ = v.Args[2] 59268 x := v.Args[0] 59269 y := v.Args[1] 59270 v_2 := v.Args[2] 59271 if v_2.Op != OpAMD64SETG { 59272 break 59273 } 59274 cond := v_2.Args[0] 59275 if !(is16BitInt(t)) { 59276 break 59277 } 59278 v.reset(OpAMD64CMOVWGT) 59279 v.AddArg(y) 59280 v.AddArg(x) 59281 v.AddArg(cond) 59282 return true 59283 } 59284 // match: (CondSelect <t> x y (SETLE cond)) 59285 // cond: is16BitInt(t) 59286 // result: (CMOVWLE y x cond) 59287 for { 59288 t := v.Type 59289 _ = v.Args[2] 59290 x := v.Args[0] 59291 y := v.Args[1] 59292 v_2 := v.Args[2] 59293 if v_2.Op != OpAMD64SETLE { 59294 break 59295 } 59296 cond := v_2.Args[0] 59297 if !(is16BitInt(t)) { 59298 break 59299 } 59300 v.reset(OpAMD64CMOVWLE) 59301 v.AddArg(y) 59302 v.AddArg(x) 59303 v.AddArg(cond) 59304 return true 59305 } 59306 // match: (CondSelect <t> x y (SETGE cond)) 59307 // cond: is16BitInt(t) 59308 // result: (CMOVWGE y x cond) 59309 for { 59310 t := v.Type 59311 _ = v.Args[2] 59312 x := v.Args[0] 59313 y := v.Args[1] 59314 v_2 := v.Args[2] 59315 if v_2.Op != OpAMD64SETGE { 59316 break 59317 } 59318 cond := v_2.Args[0] 59319 if !(is16BitInt(t)) { 59320 break 59321 } 59322 v.reset(OpAMD64CMOVWGE) 59323 v.AddArg(y) 59324 v.AddArg(x) 59325 v.AddArg(cond) 59326 return true 59327 } 59328 // match: (CondSelect <t> x y (SETA cond)) 59329 // cond: is16BitInt(t) 59330 // result: (CMOVWHI y x cond) 59331 for { 59332 t := v.Type 59333 _ = v.Args[2] 59334 x := v.Args[0] 59335 y := v.Args[1] 59336 v_2 := v.Args[2] 59337 if v_2.Op != OpAMD64SETA { 59338 break 59339 } 59340 cond := v_2.Args[0] 59341 if !(is16BitInt(t)) { 59342 break 59343 } 59344 v.reset(OpAMD64CMOVWHI) 59345 v.AddArg(y) 59346 v.AddArg(x) 59347 v.AddArg(cond) 59348 return true 59349 } 59350 // match: (CondSelect <t> x y (SETB cond)) 59351 // cond: is16BitInt(t) 59352 // result: (CMOVWCS y x cond) 59353 for { 59354 t := v.Type 59355 _ = v.Args[2] 59356 x := v.Args[0] 59357 y := v.Args[1] 59358 v_2 := v.Args[2] 59359 if v_2.Op != OpAMD64SETB { 59360 break 59361 } 59362 cond := v_2.Args[0] 59363 if !(is16BitInt(t)) { 59364 break 59365 } 59366 v.reset(OpAMD64CMOVWCS) 59367 v.AddArg(y) 59368 v.AddArg(x) 59369 v.AddArg(cond) 59370 return true 59371 } 59372 // match: (CondSelect <t> x y (SETAE cond)) 59373 // cond: is16BitInt(t) 59374 // result: (CMOVWCC y x cond) 59375 for { 59376 t := v.Type 59377 _ = v.Args[2] 59378 x := v.Args[0] 59379 y := v.Args[1] 59380 v_2 := v.Args[2] 59381 if v_2.Op != OpAMD64SETAE { 59382 break 59383 } 59384 cond := v_2.Args[0] 59385 if !(is16BitInt(t)) { 59386 break 59387 } 59388 v.reset(OpAMD64CMOVWCC) 59389 v.AddArg(y) 59390 v.AddArg(x) 59391 v.AddArg(cond) 59392 return true 59393 } 59394 // match: (CondSelect <t> x y (SETBE cond)) 59395 // cond: is16BitInt(t) 59396 // result: (CMOVWLS y x cond) 59397 for { 59398 t := v.Type 59399 _ = v.Args[2] 59400 x := v.Args[0] 59401 y := v.Args[1] 59402 v_2 := v.Args[2] 59403 if v_2.Op != OpAMD64SETBE { 59404 break 59405 } 59406 cond := v_2.Args[0] 59407 if !(is16BitInt(t)) { 59408 break 59409 } 59410 v.reset(OpAMD64CMOVWLS) 59411 v.AddArg(y) 59412 v.AddArg(x) 59413 v.AddArg(cond) 59414 return true 59415 } 59416 // match: (CondSelect <t> x y (SETEQF cond)) 59417 // cond: is16BitInt(t) 59418 // result: (CMOVWEQF y x cond) 59419 for { 59420 t := v.Type 59421 _ = v.Args[2] 59422 x := v.Args[0] 59423 y := v.Args[1] 59424 v_2 := v.Args[2] 59425 if v_2.Op != OpAMD64SETEQF { 59426 break 59427 } 59428 cond := v_2.Args[0] 59429 if !(is16BitInt(t)) { 59430 break 59431 } 59432 v.reset(OpAMD64CMOVWEQF) 59433 v.AddArg(y) 59434 v.AddArg(x) 59435 v.AddArg(cond) 59436 return true 59437 } 59438 // match: (CondSelect <t> x y (SETNEF cond)) 59439 // cond: is16BitInt(t) 59440 // result: (CMOVWNEF y x cond) 59441 for { 59442 t := v.Type 59443 _ = v.Args[2] 59444 x := v.Args[0] 59445 y := v.Args[1] 59446 v_2 := v.Args[2] 59447 if v_2.Op != OpAMD64SETNEF { 59448 break 59449 } 59450 cond := v_2.Args[0] 59451 if !(is16BitInt(t)) { 59452 break 59453 } 59454 v.reset(OpAMD64CMOVWNEF) 59455 v.AddArg(y) 59456 v.AddArg(x) 59457 v.AddArg(cond) 59458 return true 59459 } 59460 return false 59461 } 59462 func rewriteValueAMD64_OpCondSelect_40(v *Value) bool { 59463 b := v.Block 59464 _ = b 59465 typ := &b.Func.Config.Types 59466 _ = typ 59467 // match: (CondSelect <t> x y (SETGF cond)) 59468 // cond: is16BitInt(t) 59469 // result: (CMOVWGTF y x cond) 59470 for { 59471 t := v.Type 59472 _ = v.Args[2] 59473 x := v.Args[0] 59474 y := v.Args[1] 59475 v_2 := v.Args[2] 59476 if v_2.Op != OpAMD64SETGF { 59477 break 59478 } 59479 cond := v_2.Args[0] 59480 if !(is16BitInt(t)) { 59481 break 59482 } 59483 v.reset(OpAMD64CMOVWGTF) 59484 v.AddArg(y) 59485 v.AddArg(x) 59486 v.AddArg(cond) 59487 return true 59488 } 59489 // match: (CondSelect <t> x y (SETGEF cond)) 59490 // cond: is16BitInt(t) 59491 // result: (CMOVWGEF y x cond) 59492 for { 59493 t := v.Type 59494 _ = v.Args[2] 59495 x := v.Args[0] 59496 y := v.Args[1] 59497 v_2 := v.Args[2] 59498 if v_2.Op != OpAMD64SETGEF { 59499 break 59500 } 59501 cond := v_2.Args[0] 59502 if !(is16BitInt(t)) { 59503 break 59504 } 59505 v.reset(OpAMD64CMOVWGEF) 59506 v.AddArg(y) 59507 v.AddArg(x) 59508 v.AddArg(cond) 59509 return true 59510 } 59511 // match: (CondSelect <t> x y check) 59512 // cond: !check.Type.IsFlags() && check.Type.Size() == 1 59513 // result: (CondSelect <t> x y (MOVBQZX <typ.UInt64> check)) 59514 for { 59515 t := v.Type 59516 _ = v.Args[2] 59517 x := v.Args[0] 59518 y := v.Args[1] 59519 check := v.Args[2] 59520 if !(!check.Type.IsFlags() && check.Type.Size() == 1) { 59521 break 59522 } 59523 v.reset(OpCondSelect) 59524 v.Type = t 59525 v.AddArg(x) 59526 v.AddArg(y) 59527 v0 := b.NewValue0(v.Pos, OpAMD64MOVBQZX, typ.UInt64) 59528 v0.AddArg(check) 59529 v.AddArg(v0) 59530 return true 59531 } 59532 // match: (CondSelect <t> x y check) 59533 // cond: !check.Type.IsFlags() && check.Type.Size() == 2 59534 // result: (CondSelect <t> x y (MOVWQZX <typ.UInt64> check)) 59535 for { 59536 t := v.Type 59537 _ = v.Args[2] 59538 x := v.Args[0] 59539 y := v.Args[1] 59540 check := v.Args[2] 59541 if !(!check.Type.IsFlags() && check.Type.Size() == 2) { 59542 break 59543 } 59544 v.reset(OpCondSelect) 59545 v.Type = t 59546 v.AddArg(x) 59547 v.AddArg(y) 59548 v0 := b.NewValue0(v.Pos, OpAMD64MOVWQZX, typ.UInt64) 59549 v0.AddArg(check) 59550 v.AddArg(v0) 59551 return true 59552 } 59553 // match: (CondSelect <t> x y check) 59554 // cond: !check.Type.IsFlags() && check.Type.Size() == 4 59555 // result: (CondSelect <t> x y (MOVLQZX <typ.UInt64> check)) 59556 for { 59557 t := v.Type 59558 _ = v.Args[2] 59559 x := v.Args[0] 59560 y := v.Args[1] 59561 check := v.Args[2] 59562 if !(!check.Type.IsFlags() && check.Type.Size() == 4) { 59563 break 59564 } 59565 v.reset(OpCondSelect) 59566 v.Type = t 59567 v.AddArg(x) 59568 v.AddArg(y) 59569 v0 := b.NewValue0(v.Pos, OpAMD64MOVLQZX, typ.UInt64) 59570 v0.AddArg(check) 59571 v.AddArg(v0) 59572 return true 59573 } 59574 // match: (CondSelect <t> x y check) 59575 // cond: !check.Type.IsFlags() && check.Type.Size() == 8 && (is64BitInt(t) || isPtr(t)) 59576 // result: (CMOVQNE y x (CMPQconst [0] check)) 59577 for { 59578 t := v.Type 59579 _ = v.Args[2] 59580 x := v.Args[0] 59581 y := v.Args[1] 59582 check := v.Args[2] 59583 if !(!check.Type.IsFlags() && check.Type.Size() == 8 && (is64BitInt(t) || isPtr(t))) { 59584 break 59585 } 59586 v.reset(OpAMD64CMOVQNE) 59587 v.AddArg(y) 59588 v.AddArg(x) 59589 v0 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags) 59590 v0.AuxInt = 0 59591 v0.AddArg(check) 59592 v.AddArg(v0) 59593 return true 59594 } 59595 // match: (CondSelect <t> x y check) 59596 // cond: !check.Type.IsFlags() && check.Type.Size() == 8 && is32BitInt(t) 59597 // result: (CMOVLNE y x (CMPQconst [0] check)) 59598 for { 59599 t := v.Type 59600 _ = v.Args[2] 59601 x := v.Args[0] 59602 y := v.Args[1] 59603 check := v.Args[2] 59604 if !(!check.Type.IsFlags() && check.Type.Size() == 8 && is32BitInt(t)) { 59605 break 59606 } 59607 v.reset(OpAMD64CMOVLNE) 59608 v.AddArg(y) 59609 v.AddArg(x) 59610 v0 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags) 59611 v0.AuxInt = 0 59612 v0.AddArg(check) 59613 v.AddArg(v0) 59614 return true 59615 } 59616 // match: (CondSelect <t> x y check) 59617 // cond: !check.Type.IsFlags() && check.Type.Size() == 8 && is16BitInt(t) 59618 // result: (CMOVWNE y x (CMPQconst [0] check)) 59619 for { 59620 t := v.Type 59621 _ = v.Args[2] 59622 x := v.Args[0] 59623 y := v.Args[1] 59624 check := v.Args[2] 59625 if !(!check.Type.IsFlags() && check.Type.Size() == 8 && is16BitInt(t)) { 59626 break 59627 } 59628 v.reset(OpAMD64CMOVWNE) 59629 v.AddArg(y) 59630 v.AddArg(x) 59631 v0 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags) 59632 v0.AuxInt = 0 59633 v0.AddArg(check) 59634 v.AddArg(v0) 59635 return true 59636 } 59637 return false 59638 } 59639 func rewriteValueAMD64_OpConst16_0(v *Value) bool { 59640 // match: (Const16 [val]) 59641 // cond: 59642 // result: (MOVLconst [val]) 59643 for { 59644 val := v.AuxInt 59645 v.reset(OpAMD64MOVLconst) 59646 v.AuxInt = val 59647 return true 59648 } 59649 } 59650 func rewriteValueAMD64_OpConst32_0(v *Value) bool { 59651 // match: (Const32 [val]) 59652 // cond: 59653 // result: (MOVLconst [val]) 59654 for { 59655 val := v.AuxInt 59656 v.reset(OpAMD64MOVLconst) 59657 v.AuxInt = val 59658 return true 59659 } 59660 } 59661 func rewriteValueAMD64_OpConst32F_0(v *Value) bool { 59662 // match: (Const32F [val]) 59663 // cond: 59664 // result: (MOVSSconst [val]) 59665 for { 59666 val := v.AuxInt 59667 v.reset(OpAMD64MOVSSconst) 59668 v.AuxInt = val 59669 return true 59670 } 59671 } 59672 func rewriteValueAMD64_OpConst64_0(v *Value) bool { 59673 // match: (Const64 [val]) 59674 // cond: 59675 // result: (MOVQconst [val]) 59676 for { 59677 val := v.AuxInt 59678 v.reset(OpAMD64MOVQconst) 59679 v.AuxInt = val 59680 return true 59681 } 59682 } 59683 func rewriteValueAMD64_OpConst64F_0(v *Value) bool { 59684 // match: (Const64F [val]) 59685 // cond: 59686 // result: (MOVSDconst [val]) 59687 for { 59688 val := v.AuxInt 59689 v.reset(OpAMD64MOVSDconst) 59690 v.AuxInt = val 59691 return true 59692 } 59693 } 59694 func rewriteValueAMD64_OpConst8_0(v *Value) bool { 59695 // match: (Const8 [val]) 59696 // cond: 59697 // result: (MOVLconst [val]) 59698 for { 59699 val := v.AuxInt 59700 v.reset(OpAMD64MOVLconst) 59701 v.AuxInt = val 59702 return true 59703 } 59704 } 59705 func rewriteValueAMD64_OpConstBool_0(v *Value) bool { 59706 // match: (ConstBool [b]) 59707 // cond: 59708 // result: (MOVLconst [b]) 59709 for { 59710 b := v.AuxInt 59711 v.reset(OpAMD64MOVLconst) 59712 v.AuxInt = b 59713 return true 59714 } 59715 } 59716 func rewriteValueAMD64_OpConstNil_0(v *Value) bool { 59717 b := v.Block 59718 _ = b 59719 config := b.Func.Config 59720 _ = config 59721 // match: (ConstNil) 59722 // cond: config.PtrSize == 8 59723 // result: (MOVQconst [0]) 59724 for { 59725 if !(config.PtrSize == 8) { 59726 break 59727 } 59728 v.reset(OpAMD64MOVQconst) 59729 v.AuxInt = 0 59730 return true 59731 } 59732 // match: (ConstNil) 59733 // cond: config.PtrSize == 4 59734 // result: (MOVLconst [0]) 59735 for { 59736 if !(config.PtrSize == 4) { 59737 break 59738 } 59739 v.reset(OpAMD64MOVLconst) 59740 v.AuxInt = 0 59741 return true 59742 } 59743 return false 59744 } 59745 func rewriteValueAMD64_OpCtz16_0(v *Value) bool { 59746 b := v.Block 59747 _ = b 59748 typ := &b.Func.Config.Types 59749 _ = typ 59750 // match: (Ctz16 x) 59751 // cond: 59752 // result: (BSFL (BTSLconst <typ.UInt32> [16] x)) 59753 for { 59754 x := v.Args[0] 59755 v.reset(OpAMD64BSFL) 59756 v0 := b.NewValue0(v.Pos, OpAMD64BTSLconst, typ.UInt32) 59757 v0.AuxInt = 16 59758 v0.AddArg(x) 59759 v.AddArg(v0) 59760 return true 59761 } 59762 } 59763 func rewriteValueAMD64_OpCtz16NonZero_0(v *Value) bool { 59764 // match: (Ctz16NonZero x) 59765 // cond: 59766 // result: (BSFL x) 59767 for { 59768 x := v.Args[0] 59769 v.reset(OpAMD64BSFL) 59770 v.AddArg(x) 59771 return true 59772 } 59773 } 59774 func rewriteValueAMD64_OpCtz32_0(v *Value) bool { 59775 b := v.Block 59776 _ = b 59777 typ := &b.Func.Config.Types 59778 _ = typ 59779 // match: (Ctz32 x) 59780 // cond: 59781 // result: (Select0 (BSFQ (BTSQconst <typ.UInt64> [32] x))) 59782 for { 59783 x := v.Args[0] 59784 v.reset(OpSelect0) 59785 v0 := b.NewValue0(v.Pos, OpAMD64BSFQ, types.NewTuple(typ.UInt64, types.TypeFlags)) 59786 v1 := b.NewValue0(v.Pos, OpAMD64BTSQconst, typ.UInt64) 59787 v1.AuxInt = 32 59788 v1.AddArg(x) 59789 v0.AddArg(v1) 59790 v.AddArg(v0) 59791 return true 59792 } 59793 } 59794 func rewriteValueAMD64_OpCtz32NonZero_0(v *Value) bool { 59795 // match: (Ctz32NonZero x) 59796 // cond: 59797 // result: (BSFL x) 59798 for { 59799 x := v.Args[0] 59800 v.reset(OpAMD64BSFL) 59801 v.AddArg(x) 59802 return true 59803 } 59804 } 59805 func rewriteValueAMD64_OpCtz64_0(v *Value) bool { 59806 b := v.Block 59807 _ = b 59808 typ := &b.Func.Config.Types 59809 _ = typ 59810 // match: (Ctz64 <t> x) 59811 // cond: 59812 // result: (CMOVQEQ (Select0 <t> (BSFQ x)) (MOVQconst <t> [64]) (Select1 <types.TypeFlags> (BSFQ x))) 59813 for { 59814 t := v.Type 59815 x := v.Args[0] 59816 v.reset(OpAMD64CMOVQEQ) 59817 v0 := b.NewValue0(v.Pos, OpSelect0, t) 59818 v1 := b.NewValue0(v.Pos, OpAMD64BSFQ, types.NewTuple(typ.UInt64, types.TypeFlags)) 59819 v1.AddArg(x) 59820 v0.AddArg(v1) 59821 v.AddArg(v0) 59822 v2 := b.NewValue0(v.Pos, OpAMD64MOVQconst, t) 59823 v2.AuxInt = 64 59824 v.AddArg(v2) 59825 v3 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags) 59826 v4 := b.NewValue0(v.Pos, OpAMD64BSFQ, types.NewTuple(typ.UInt64, types.TypeFlags)) 59827 v4.AddArg(x) 59828 v3.AddArg(v4) 59829 v.AddArg(v3) 59830 return true 59831 } 59832 } 59833 func rewriteValueAMD64_OpCtz64NonZero_0(v *Value) bool { 59834 b := v.Block 59835 _ = b 59836 typ := &b.Func.Config.Types 59837 _ = typ 59838 // match: (Ctz64NonZero x) 59839 // cond: 59840 // result: (Select0 (BSFQ x)) 59841 for { 59842 x := v.Args[0] 59843 v.reset(OpSelect0) 59844 v0 := b.NewValue0(v.Pos, OpAMD64BSFQ, types.NewTuple(typ.UInt64, types.TypeFlags)) 59845 v0.AddArg(x) 59846 v.AddArg(v0) 59847 return true 59848 } 59849 } 59850 func rewriteValueAMD64_OpCtz8_0(v *Value) bool { 59851 b := v.Block 59852 _ = b 59853 typ := &b.Func.Config.Types 59854 _ = typ 59855 // match: (Ctz8 x) 59856 // cond: 59857 // result: (BSFL (BTSLconst <typ.UInt32> [ 8] x)) 59858 for { 59859 x := v.Args[0] 59860 v.reset(OpAMD64BSFL) 59861 v0 := b.NewValue0(v.Pos, OpAMD64BTSLconst, typ.UInt32) 59862 v0.AuxInt = 8 59863 v0.AddArg(x) 59864 v.AddArg(v0) 59865 return true 59866 } 59867 } 59868 func rewriteValueAMD64_OpCtz8NonZero_0(v *Value) bool { 59869 // match: (Ctz8NonZero x) 59870 // cond: 59871 // result: (BSFL x) 59872 for { 59873 x := v.Args[0] 59874 v.reset(OpAMD64BSFL) 59875 v.AddArg(x) 59876 return true 59877 } 59878 } 59879 func rewriteValueAMD64_OpCvt32Fto32_0(v *Value) bool { 59880 // match: (Cvt32Fto32 x) 59881 // cond: 59882 // result: (CVTTSS2SL x) 59883 for { 59884 x := v.Args[0] 59885 v.reset(OpAMD64CVTTSS2SL) 59886 v.AddArg(x) 59887 return true 59888 } 59889 } 59890 func rewriteValueAMD64_OpCvt32Fto64_0(v *Value) bool { 59891 // match: (Cvt32Fto64 x) 59892 // cond: 59893 // result: (CVTTSS2SQ x) 59894 for { 59895 x := v.Args[0] 59896 v.reset(OpAMD64CVTTSS2SQ) 59897 v.AddArg(x) 59898 return true 59899 } 59900 } 59901 func rewriteValueAMD64_OpCvt32Fto64F_0(v *Value) bool { 59902 // match: (Cvt32Fto64F x) 59903 // cond: 59904 // result: (CVTSS2SD x) 59905 for { 59906 x := v.Args[0] 59907 v.reset(OpAMD64CVTSS2SD) 59908 v.AddArg(x) 59909 return true 59910 } 59911 } 59912 func rewriteValueAMD64_OpCvt32to32F_0(v *Value) bool { 59913 // match: (Cvt32to32F x) 59914 // cond: 59915 // result: (CVTSL2SS x) 59916 for { 59917 x := v.Args[0] 59918 v.reset(OpAMD64CVTSL2SS) 59919 v.AddArg(x) 59920 return true 59921 } 59922 } 59923 func rewriteValueAMD64_OpCvt32to64F_0(v *Value) bool { 59924 // match: (Cvt32to64F x) 59925 // cond: 59926 // result: (CVTSL2SD x) 59927 for { 59928 x := v.Args[0] 59929 v.reset(OpAMD64CVTSL2SD) 59930 v.AddArg(x) 59931 return true 59932 } 59933 } 59934 func rewriteValueAMD64_OpCvt64Fto32_0(v *Value) bool { 59935 // match: (Cvt64Fto32 x) 59936 // cond: 59937 // result: (CVTTSD2SL x) 59938 for { 59939 x := v.Args[0] 59940 v.reset(OpAMD64CVTTSD2SL) 59941 v.AddArg(x) 59942 return true 59943 } 59944 } 59945 func rewriteValueAMD64_OpCvt64Fto32F_0(v *Value) bool { 59946 // match: (Cvt64Fto32F x) 59947 // cond: 59948 // result: (CVTSD2SS x) 59949 for { 59950 x := v.Args[0] 59951 v.reset(OpAMD64CVTSD2SS) 59952 v.AddArg(x) 59953 return true 59954 } 59955 } 59956 func rewriteValueAMD64_OpCvt64Fto64_0(v *Value) bool { 59957 // match: (Cvt64Fto64 x) 59958 // cond: 59959 // result: (CVTTSD2SQ x) 59960 for { 59961 x := v.Args[0] 59962 v.reset(OpAMD64CVTTSD2SQ) 59963 v.AddArg(x) 59964 return true 59965 } 59966 } 59967 func rewriteValueAMD64_OpCvt64to32F_0(v *Value) bool { 59968 // match: (Cvt64to32F x) 59969 // cond: 59970 // result: (CVTSQ2SS x) 59971 for { 59972 x := v.Args[0] 59973 v.reset(OpAMD64CVTSQ2SS) 59974 v.AddArg(x) 59975 return true 59976 } 59977 } 59978 func rewriteValueAMD64_OpCvt64to64F_0(v *Value) bool { 59979 // match: (Cvt64to64F x) 59980 // cond: 59981 // result: (CVTSQ2SD x) 59982 for { 59983 x := v.Args[0] 59984 v.reset(OpAMD64CVTSQ2SD) 59985 v.AddArg(x) 59986 return true 59987 } 59988 } 59989 func rewriteValueAMD64_OpDiv128u_0(v *Value) bool { 59990 // match: (Div128u xhi xlo y) 59991 // cond: 59992 // result: (DIVQU2 xhi xlo y) 59993 for { 59994 _ = v.Args[2] 59995 xhi := v.Args[0] 59996 xlo := v.Args[1] 59997 y := v.Args[2] 59998 v.reset(OpAMD64DIVQU2) 59999 v.AddArg(xhi) 60000 v.AddArg(xlo) 60001 v.AddArg(y) 60002 return true 60003 } 60004 } 60005 func rewriteValueAMD64_OpDiv16_0(v *Value) bool { 60006 b := v.Block 60007 _ = b 60008 typ := &b.Func.Config.Types 60009 _ = typ 60010 // match: (Div16 [a] x y) 60011 // cond: 60012 // result: (Select0 (DIVW [a] x y)) 60013 for { 60014 a := v.AuxInt 60015 _ = v.Args[1] 60016 x := v.Args[0] 60017 y := v.Args[1] 60018 v.reset(OpSelect0) 60019 v0 := b.NewValue0(v.Pos, OpAMD64DIVW, types.NewTuple(typ.Int16, typ.Int16)) 60020 v0.AuxInt = a 60021 v0.AddArg(x) 60022 v0.AddArg(y) 60023 v.AddArg(v0) 60024 return true 60025 } 60026 } 60027 func rewriteValueAMD64_OpDiv16u_0(v *Value) bool { 60028 b := v.Block 60029 _ = b 60030 typ := &b.Func.Config.Types 60031 _ = typ 60032 // match: (Div16u x y) 60033 // cond: 60034 // result: (Select0 (DIVWU x y)) 60035 for { 60036 _ = v.Args[1] 60037 x := v.Args[0] 60038 y := v.Args[1] 60039 v.reset(OpSelect0) 60040 v0 := b.NewValue0(v.Pos, OpAMD64DIVWU, types.NewTuple(typ.UInt16, typ.UInt16)) 60041 v0.AddArg(x) 60042 v0.AddArg(y) 60043 v.AddArg(v0) 60044 return true 60045 } 60046 } 60047 func rewriteValueAMD64_OpDiv32_0(v *Value) bool { 60048 b := v.Block 60049 _ = b 60050 typ := &b.Func.Config.Types 60051 _ = typ 60052 // match: (Div32 [a] x y) 60053 // cond: 60054 // result: (Select0 (DIVL [a] x y)) 60055 for { 60056 a := v.AuxInt 60057 _ = v.Args[1] 60058 x := v.Args[0] 60059 y := v.Args[1] 60060 v.reset(OpSelect0) 60061 v0 := b.NewValue0(v.Pos, OpAMD64DIVL, types.NewTuple(typ.Int32, typ.Int32)) 60062 v0.AuxInt = a 60063 v0.AddArg(x) 60064 v0.AddArg(y) 60065 v.AddArg(v0) 60066 return true 60067 } 60068 } 60069 func rewriteValueAMD64_OpDiv32F_0(v *Value) bool { 60070 // match: (Div32F x y) 60071 // cond: 60072 // result: (DIVSS x y) 60073 for { 60074 _ = v.Args[1] 60075 x := v.Args[0] 60076 y := v.Args[1] 60077 v.reset(OpAMD64DIVSS) 60078 v.AddArg(x) 60079 v.AddArg(y) 60080 return true 60081 } 60082 } 60083 func rewriteValueAMD64_OpDiv32u_0(v *Value) bool { 60084 b := v.Block 60085 _ = b 60086 typ := &b.Func.Config.Types 60087 _ = typ 60088 // match: (Div32u x y) 60089 // cond: 60090 // result: (Select0 (DIVLU x y)) 60091 for { 60092 _ = v.Args[1] 60093 x := v.Args[0] 60094 y := v.Args[1] 60095 v.reset(OpSelect0) 60096 v0 := b.NewValue0(v.Pos, OpAMD64DIVLU, types.NewTuple(typ.UInt32, typ.UInt32)) 60097 v0.AddArg(x) 60098 v0.AddArg(y) 60099 v.AddArg(v0) 60100 return true 60101 } 60102 } 60103 func rewriteValueAMD64_OpDiv64_0(v *Value) bool { 60104 b := v.Block 60105 _ = b 60106 typ := &b.Func.Config.Types 60107 _ = typ 60108 // match: (Div64 [a] x y) 60109 // cond: 60110 // result: (Select0 (DIVQ [a] x y)) 60111 for { 60112 a := v.AuxInt 60113 _ = v.Args[1] 60114 x := v.Args[0] 60115 y := v.Args[1] 60116 v.reset(OpSelect0) 60117 v0 := b.NewValue0(v.Pos, OpAMD64DIVQ, types.NewTuple(typ.Int64, typ.Int64)) 60118 v0.AuxInt = a 60119 v0.AddArg(x) 60120 v0.AddArg(y) 60121 v.AddArg(v0) 60122 return true 60123 } 60124 } 60125 func rewriteValueAMD64_OpDiv64F_0(v *Value) bool { 60126 // match: (Div64F x y) 60127 // cond: 60128 // result: (DIVSD x y) 60129 for { 60130 _ = v.Args[1] 60131 x := v.Args[0] 60132 y := v.Args[1] 60133 v.reset(OpAMD64DIVSD) 60134 v.AddArg(x) 60135 v.AddArg(y) 60136 return true 60137 } 60138 } 60139 func rewriteValueAMD64_OpDiv64u_0(v *Value) bool { 60140 b := v.Block 60141 _ = b 60142 typ := &b.Func.Config.Types 60143 _ = typ 60144 // match: (Div64u x y) 60145 // cond: 60146 // result: (Select0 (DIVQU x y)) 60147 for { 60148 _ = v.Args[1] 60149 x := v.Args[0] 60150 y := v.Args[1] 60151 v.reset(OpSelect0) 60152 v0 := b.NewValue0(v.Pos, OpAMD64DIVQU, types.NewTuple(typ.UInt64, typ.UInt64)) 60153 v0.AddArg(x) 60154 v0.AddArg(y) 60155 v.AddArg(v0) 60156 return true 60157 } 60158 } 60159 func rewriteValueAMD64_OpDiv8_0(v *Value) bool { 60160 b := v.Block 60161 _ = b 60162 typ := &b.Func.Config.Types 60163 _ = typ 60164 // match: (Div8 x y) 60165 // cond: 60166 // result: (Select0 (DIVW (SignExt8to16 x) (SignExt8to16 y))) 60167 for { 60168 _ = v.Args[1] 60169 x := v.Args[0] 60170 y := v.Args[1] 60171 v.reset(OpSelect0) 60172 v0 := b.NewValue0(v.Pos, OpAMD64DIVW, types.NewTuple(typ.Int16, typ.Int16)) 60173 v1 := b.NewValue0(v.Pos, OpSignExt8to16, typ.Int16) 60174 v1.AddArg(x) 60175 v0.AddArg(v1) 60176 v2 := b.NewValue0(v.Pos, OpSignExt8to16, typ.Int16) 60177 v2.AddArg(y) 60178 v0.AddArg(v2) 60179 v.AddArg(v0) 60180 return true 60181 } 60182 } 60183 func rewriteValueAMD64_OpDiv8u_0(v *Value) bool { 60184 b := v.Block 60185 _ = b 60186 typ := &b.Func.Config.Types 60187 _ = typ 60188 // match: (Div8u x y) 60189 // cond: 60190 // result: (Select0 (DIVWU (ZeroExt8to16 x) (ZeroExt8to16 y))) 60191 for { 60192 _ = v.Args[1] 60193 x := v.Args[0] 60194 y := v.Args[1] 60195 v.reset(OpSelect0) 60196 v0 := b.NewValue0(v.Pos, OpAMD64DIVWU, types.NewTuple(typ.UInt16, typ.UInt16)) 60197 v1 := b.NewValue0(v.Pos, OpZeroExt8to16, typ.UInt16) 60198 v1.AddArg(x) 60199 v0.AddArg(v1) 60200 v2 := b.NewValue0(v.Pos, OpZeroExt8to16, typ.UInt16) 60201 v2.AddArg(y) 60202 v0.AddArg(v2) 60203 v.AddArg(v0) 60204 return true 60205 } 60206 } 60207 func rewriteValueAMD64_OpEq16_0(v *Value) bool { 60208 b := v.Block 60209 _ = b 60210 // match: (Eq16 x y) 60211 // cond: 60212 // result: (SETEQ (CMPW x y)) 60213 for { 60214 _ = v.Args[1] 60215 x := v.Args[0] 60216 y := v.Args[1] 60217 v.reset(OpAMD64SETEQ) 60218 v0 := b.NewValue0(v.Pos, OpAMD64CMPW, types.TypeFlags) 60219 v0.AddArg(x) 60220 v0.AddArg(y) 60221 v.AddArg(v0) 60222 return true 60223 } 60224 } 60225 func rewriteValueAMD64_OpEq32_0(v *Value) bool { 60226 b := v.Block 60227 _ = b 60228 // match: (Eq32 x y) 60229 // cond: 60230 // result: (SETEQ (CMPL x y)) 60231 for { 60232 _ = v.Args[1] 60233 x := v.Args[0] 60234 y := v.Args[1] 60235 v.reset(OpAMD64SETEQ) 60236 v0 := b.NewValue0(v.Pos, OpAMD64CMPL, types.TypeFlags) 60237 v0.AddArg(x) 60238 v0.AddArg(y) 60239 v.AddArg(v0) 60240 return true 60241 } 60242 } 60243 func rewriteValueAMD64_OpEq32F_0(v *Value) bool { 60244 b := v.Block 60245 _ = b 60246 // match: (Eq32F x y) 60247 // cond: 60248 // result: (SETEQF (UCOMISS x y)) 60249 for { 60250 _ = v.Args[1] 60251 x := v.Args[0] 60252 y := v.Args[1] 60253 v.reset(OpAMD64SETEQF) 60254 v0 := b.NewValue0(v.Pos, OpAMD64UCOMISS, types.TypeFlags) 60255 v0.AddArg(x) 60256 v0.AddArg(y) 60257 v.AddArg(v0) 60258 return true 60259 } 60260 } 60261 func rewriteValueAMD64_OpEq64_0(v *Value) bool { 60262 b := v.Block 60263 _ = b 60264 // match: (Eq64 x y) 60265 // cond: 60266 // result: (SETEQ (CMPQ x y)) 60267 for { 60268 _ = v.Args[1] 60269 x := v.Args[0] 60270 y := v.Args[1] 60271 v.reset(OpAMD64SETEQ) 60272 v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, types.TypeFlags) 60273 v0.AddArg(x) 60274 v0.AddArg(y) 60275 v.AddArg(v0) 60276 return true 60277 } 60278 } 60279 func rewriteValueAMD64_OpEq64F_0(v *Value) bool { 60280 b := v.Block 60281 _ = b 60282 // match: (Eq64F x y) 60283 // cond: 60284 // result: (SETEQF (UCOMISD x y)) 60285 for { 60286 _ = v.Args[1] 60287 x := v.Args[0] 60288 y := v.Args[1] 60289 v.reset(OpAMD64SETEQF) 60290 v0 := b.NewValue0(v.Pos, OpAMD64UCOMISD, types.TypeFlags) 60291 v0.AddArg(x) 60292 v0.AddArg(y) 60293 v.AddArg(v0) 60294 return true 60295 } 60296 } 60297 func rewriteValueAMD64_OpEq8_0(v *Value) bool { 60298 b := v.Block 60299 _ = b 60300 // match: (Eq8 x y) 60301 // cond: 60302 // result: (SETEQ (CMPB x y)) 60303 for { 60304 _ = v.Args[1] 60305 x := v.Args[0] 60306 y := v.Args[1] 60307 v.reset(OpAMD64SETEQ) 60308 v0 := b.NewValue0(v.Pos, OpAMD64CMPB, types.TypeFlags) 60309 v0.AddArg(x) 60310 v0.AddArg(y) 60311 v.AddArg(v0) 60312 return true 60313 } 60314 } 60315 func rewriteValueAMD64_OpEqB_0(v *Value) bool { 60316 b := v.Block 60317 _ = b 60318 // match: (EqB x y) 60319 // cond: 60320 // result: (SETEQ (CMPB x y)) 60321 for { 60322 _ = v.Args[1] 60323 x := v.Args[0] 60324 y := v.Args[1] 60325 v.reset(OpAMD64SETEQ) 60326 v0 := b.NewValue0(v.Pos, OpAMD64CMPB, types.TypeFlags) 60327 v0.AddArg(x) 60328 v0.AddArg(y) 60329 v.AddArg(v0) 60330 return true 60331 } 60332 } 60333 func rewriteValueAMD64_OpEqPtr_0(v *Value) bool { 60334 b := v.Block 60335 _ = b 60336 config := b.Func.Config 60337 _ = config 60338 // match: (EqPtr x y) 60339 // cond: config.PtrSize == 8 60340 // result: (SETEQ (CMPQ x y)) 60341 for { 60342 _ = v.Args[1] 60343 x := v.Args[0] 60344 y := v.Args[1] 60345 if !(config.PtrSize == 8) { 60346 break 60347 } 60348 v.reset(OpAMD64SETEQ) 60349 v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, types.TypeFlags) 60350 v0.AddArg(x) 60351 v0.AddArg(y) 60352 v.AddArg(v0) 60353 return true 60354 } 60355 // match: (EqPtr x y) 60356 // cond: config.PtrSize == 4 60357 // result: (SETEQ (CMPL x y)) 60358 for { 60359 _ = v.Args[1] 60360 x := v.Args[0] 60361 y := v.Args[1] 60362 if !(config.PtrSize == 4) { 60363 break 60364 } 60365 v.reset(OpAMD64SETEQ) 60366 v0 := b.NewValue0(v.Pos, OpAMD64CMPL, types.TypeFlags) 60367 v0.AddArg(x) 60368 v0.AddArg(y) 60369 v.AddArg(v0) 60370 return true 60371 } 60372 return false 60373 } 60374 func rewriteValueAMD64_OpFloor_0(v *Value) bool { 60375 // match: (Floor x) 60376 // cond: 60377 // result: (ROUNDSD [1] x) 60378 for { 60379 x := v.Args[0] 60380 v.reset(OpAMD64ROUNDSD) 60381 v.AuxInt = 1 60382 v.AddArg(x) 60383 return true 60384 } 60385 } 60386 func rewriteValueAMD64_OpGeq16_0(v *Value) bool { 60387 b := v.Block 60388 _ = b 60389 // match: (Geq16 x y) 60390 // cond: 60391 // result: (SETGE (CMPW x y)) 60392 for { 60393 _ = v.Args[1] 60394 x := v.Args[0] 60395 y := v.Args[1] 60396 v.reset(OpAMD64SETGE) 60397 v0 := b.NewValue0(v.Pos, OpAMD64CMPW, types.TypeFlags) 60398 v0.AddArg(x) 60399 v0.AddArg(y) 60400 v.AddArg(v0) 60401 return true 60402 } 60403 } 60404 func rewriteValueAMD64_OpGeq16U_0(v *Value) bool { 60405 b := v.Block 60406 _ = b 60407 // match: (Geq16U x y) 60408 // cond: 60409 // result: (SETAE (CMPW x y)) 60410 for { 60411 _ = v.Args[1] 60412 x := v.Args[0] 60413 y := v.Args[1] 60414 v.reset(OpAMD64SETAE) 60415 v0 := b.NewValue0(v.Pos, OpAMD64CMPW, types.TypeFlags) 60416 v0.AddArg(x) 60417 v0.AddArg(y) 60418 v.AddArg(v0) 60419 return true 60420 } 60421 } 60422 func rewriteValueAMD64_OpGeq32_0(v *Value) bool { 60423 b := v.Block 60424 _ = b 60425 // match: (Geq32 x y) 60426 // cond: 60427 // result: (SETGE (CMPL x y)) 60428 for { 60429 _ = v.Args[1] 60430 x := v.Args[0] 60431 y := v.Args[1] 60432 v.reset(OpAMD64SETGE) 60433 v0 := b.NewValue0(v.Pos, OpAMD64CMPL, types.TypeFlags) 60434 v0.AddArg(x) 60435 v0.AddArg(y) 60436 v.AddArg(v0) 60437 return true 60438 } 60439 } 60440 func rewriteValueAMD64_OpGeq32F_0(v *Value) bool { 60441 b := v.Block 60442 _ = b 60443 // match: (Geq32F x y) 60444 // cond: 60445 // result: (SETGEF (UCOMISS x y)) 60446 for { 60447 _ = v.Args[1] 60448 x := v.Args[0] 60449 y := v.Args[1] 60450 v.reset(OpAMD64SETGEF) 60451 v0 := b.NewValue0(v.Pos, OpAMD64UCOMISS, types.TypeFlags) 60452 v0.AddArg(x) 60453 v0.AddArg(y) 60454 v.AddArg(v0) 60455 return true 60456 } 60457 } 60458 func rewriteValueAMD64_OpGeq32U_0(v *Value) bool { 60459 b := v.Block 60460 _ = b 60461 // match: (Geq32U x y) 60462 // cond: 60463 // result: (SETAE (CMPL x y)) 60464 for { 60465 _ = v.Args[1] 60466 x := v.Args[0] 60467 y := v.Args[1] 60468 v.reset(OpAMD64SETAE) 60469 v0 := b.NewValue0(v.Pos, OpAMD64CMPL, types.TypeFlags) 60470 v0.AddArg(x) 60471 v0.AddArg(y) 60472 v.AddArg(v0) 60473 return true 60474 } 60475 } 60476 func rewriteValueAMD64_OpGeq64_0(v *Value) bool { 60477 b := v.Block 60478 _ = b 60479 // match: (Geq64 x y) 60480 // cond: 60481 // result: (SETGE (CMPQ x y)) 60482 for { 60483 _ = v.Args[1] 60484 x := v.Args[0] 60485 y := v.Args[1] 60486 v.reset(OpAMD64SETGE) 60487 v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, types.TypeFlags) 60488 v0.AddArg(x) 60489 v0.AddArg(y) 60490 v.AddArg(v0) 60491 return true 60492 } 60493 } 60494 func rewriteValueAMD64_OpGeq64F_0(v *Value) bool { 60495 b := v.Block 60496 _ = b 60497 // match: (Geq64F x y) 60498 // cond: 60499 // result: (SETGEF (UCOMISD x y)) 60500 for { 60501 _ = v.Args[1] 60502 x := v.Args[0] 60503 y := v.Args[1] 60504 v.reset(OpAMD64SETGEF) 60505 v0 := b.NewValue0(v.Pos, OpAMD64UCOMISD, types.TypeFlags) 60506 v0.AddArg(x) 60507 v0.AddArg(y) 60508 v.AddArg(v0) 60509 return true 60510 } 60511 } 60512 func rewriteValueAMD64_OpGeq64U_0(v *Value) bool { 60513 b := v.Block 60514 _ = b 60515 // match: (Geq64U x y) 60516 // cond: 60517 // result: (SETAE (CMPQ x y)) 60518 for { 60519 _ = v.Args[1] 60520 x := v.Args[0] 60521 y := v.Args[1] 60522 v.reset(OpAMD64SETAE) 60523 v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, types.TypeFlags) 60524 v0.AddArg(x) 60525 v0.AddArg(y) 60526 v.AddArg(v0) 60527 return true 60528 } 60529 } 60530 func rewriteValueAMD64_OpGeq8_0(v *Value) bool { 60531 b := v.Block 60532 _ = b 60533 // match: (Geq8 x y) 60534 // cond: 60535 // result: (SETGE (CMPB x y)) 60536 for { 60537 _ = v.Args[1] 60538 x := v.Args[0] 60539 y := v.Args[1] 60540 v.reset(OpAMD64SETGE) 60541 v0 := b.NewValue0(v.Pos, OpAMD64CMPB, types.TypeFlags) 60542 v0.AddArg(x) 60543 v0.AddArg(y) 60544 v.AddArg(v0) 60545 return true 60546 } 60547 } 60548 func rewriteValueAMD64_OpGeq8U_0(v *Value) bool { 60549 b := v.Block 60550 _ = b 60551 // match: (Geq8U x y) 60552 // cond: 60553 // result: (SETAE (CMPB x y)) 60554 for { 60555 _ = v.Args[1] 60556 x := v.Args[0] 60557 y := v.Args[1] 60558 v.reset(OpAMD64SETAE) 60559 v0 := b.NewValue0(v.Pos, OpAMD64CMPB, types.TypeFlags) 60560 v0.AddArg(x) 60561 v0.AddArg(y) 60562 v.AddArg(v0) 60563 return true 60564 } 60565 } 60566 func rewriteValueAMD64_OpGetCallerPC_0(v *Value) bool { 60567 // match: (GetCallerPC) 60568 // cond: 60569 // result: (LoweredGetCallerPC) 60570 for { 60571 v.reset(OpAMD64LoweredGetCallerPC) 60572 return true 60573 } 60574 } 60575 func rewriteValueAMD64_OpGetCallerSP_0(v *Value) bool { 60576 // match: (GetCallerSP) 60577 // cond: 60578 // result: (LoweredGetCallerSP) 60579 for { 60580 v.reset(OpAMD64LoweredGetCallerSP) 60581 return true 60582 } 60583 } 60584 func rewriteValueAMD64_OpGetClosurePtr_0(v *Value) bool { 60585 // match: (GetClosurePtr) 60586 // cond: 60587 // result: (LoweredGetClosurePtr) 60588 for { 60589 v.reset(OpAMD64LoweredGetClosurePtr) 60590 return true 60591 } 60592 } 60593 func rewriteValueAMD64_OpGetG_0(v *Value) bool { 60594 // match: (GetG mem) 60595 // cond: 60596 // result: (LoweredGetG mem) 60597 for { 60598 mem := v.Args[0] 60599 v.reset(OpAMD64LoweredGetG) 60600 v.AddArg(mem) 60601 return true 60602 } 60603 } 60604 func rewriteValueAMD64_OpGreater16_0(v *Value) bool { 60605 b := v.Block 60606 _ = b 60607 // match: (Greater16 x y) 60608 // cond: 60609 // result: (SETG (CMPW x y)) 60610 for { 60611 _ = v.Args[1] 60612 x := v.Args[0] 60613 y := v.Args[1] 60614 v.reset(OpAMD64SETG) 60615 v0 := b.NewValue0(v.Pos, OpAMD64CMPW, types.TypeFlags) 60616 v0.AddArg(x) 60617 v0.AddArg(y) 60618 v.AddArg(v0) 60619 return true 60620 } 60621 } 60622 func rewriteValueAMD64_OpGreater16U_0(v *Value) bool { 60623 b := v.Block 60624 _ = b 60625 // match: (Greater16U x y) 60626 // cond: 60627 // result: (SETA (CMPW x y)) 60628 for { 60629 _ = v.Args[1] 60630 x := v.Args[0] 60631 y := v.Args[1] 60632 v.reset(OpAMD64SETA) 60633 v0 := b.NewValue0(v.Pos, OpAMD64CMPW, types.TypeFlags) 60634 v0.AddArg(x) 60635 v0.AddArg(y) 60636 v.AddArg(v0) 60637 return true 60638 } 60639 } 60640 func rewriteValueAMD64_OpGreater32_0(v *Value) bool { 60641 b := v.Block 60642 _ = b 60643 // match: (Greater32 x y) 60644 // cond: 60645 // result: (SETG (CMPL x y)) 60646 for { 60647 _ = v.Args[1] 60648 x := v.Args[0] 60649 y := v.Args[1] 60650 v.reset(OpAMD64SETG) 60651 v0 := b.NewValue0(v.Pos, OpAMD64CMPL, types.TypeFlags) 60652 v0.AddArg(x) 60653 v0.AddArg(y) 60654 v.AddArg(v0) 60655 return true 60656 } 60657 } 60658 func rewriteValueAMD64_OpGreater32F_0(v *Value) bool { 60659 b := v.Block 60660 _ = b 60661 // match: (Greater32F x y) 60662 // cond: 60663 // result: (SETGF (UCOMISS x y)) 60664 for { 60665 _ = v.Args[1] 60666 x := v.Args[0] 60667 y := v.Args[1] 60668 v.reset(OpAMD64SETGF) 60669 v0 := b.NewValue0(v.Pos, OpAMD64UCOMISS, types.TypeFlags) 60670 v0.AddArg(x) 60671 v0.AddArg(y) 60672 v.AddArg(v0) 60673 return true 60674 } 60675 } 60676 func rewriteValueAMD64_OpGreater32U_0(v *Value) bool { 60677 b := v.Block 60678 _ = b 60679 // match: (Greater32U x y) 60680 // cond: 60681 // result: (SETA (CMPL x y)) 60682 for { 60683 _ = v.Args[1] 60684 x := v.Args[0] 60685 y := v.Args[1] 60686 v.reset(OpAMD64SETA) 60687 v0 := b.NewValue0(v.Pos, OpAMD64CMPL, types.TypeFlags) 60688 v0.AddArg(x) 60689 v0.AddArg(y) 60690 v.AddArg(v0) 60691 return true 60692 } 60693 } 60694 func rewriteValueAMD64_OpGreater64_0(v *Value) bool { 60695 b := v.Block 60696 _ = b 60697 // match: (Greater64 x y) 60698 // cond: 60699 // result: (SETG (CMPQ x y)) 60700 for { 60701 _ = v.Args[1] 60702 x := v.Args[0] 60703 y := v.Args[1] 60704 v.reset(OpAMD64SETG) 60705 v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, types.TypeFlags) 60706 v0.AddArg(x) 60707 v0.AddArg(y) 60708 v.AddArg(v0) 60709 return true 60710 } 60711 } 60712 func rewriteValueAMD64_OpGreater64F_0(v *Value) bool { 60713 b := v.Block 60714 _ = b 60715 // match: (Greater64F x y) 60716 // cond: 60717 // result: (SETGF (UCOMISD x y)) 60718 for { 60719 _ = v.Args[1] 60720 x := v.Args[0] 60721 y := v.Args[1] 60722 v.reset(OpAMD64SETGF) 60723 v0 := b.NewValue0(v.Pos, OpAMD64UCOMISD, types.TypeFlags) 60724 v0.AddArg(x) 60725 v0.AddArg(y) 60726 v.AddArg(v0) 60727 return true 60728 } 60729 } 60730 func rewriteValueAMD64_OpGreater64U_0(v *Value) bool { 60731 b := v.Block 60732 _ = b 60733 // match: (Greater64U x y) 60734 // cond: 60735 // result: (SETA (CMPQ x y)) 60736 for { 60737 _ = v.Args[1] 60738 x := v.Args[0] 60739 y := v.Args[1] 60740 v.reset(OpAMD64SETA) 60741 v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, types.TypeFlags) 60742 v0.AddArg(x) 60743 v0.AddArg(y) 60744 v.AddArg(v0) 60745 return true 60746 } 60747 } 60748 func rewriteValueAMD64_OpGreater8_0(v *Value) bool { 60749 b := v.Block 60750 _ = b 60751 // match: (Greater8 x y) 60752 // cond: 60753 // result: (SETG (CMPB x y)) 60754 for { 60755 _ = v.Args[1] 60756 x := v.Args[0] 60757 y := v.Args[1] 60758 v.reset(OpAMD64SETG) 60759 v0 := b.NewValue0(v.Pos, OpAMD64CMPB, types.TypeFlags) 60760 v0.AddArg(x) 60761 v0.AddArg(y) 60762 v.AddArg(v0) 60763 return true 60764 } 60765 } 60766 func rewriteValueAMD64_OpGreater8U_0(v *Value) bool { 60767 b := v.Block 60768 _ = b 60769 // match: (Greater8U x y) 60770 // cond: 60771 // result: (SETA (CMPB x y)) 60772 for { 60773 _ = v.Args[1] 60774 x := v.Args[0] 60775 y := v.Args[1] 60776 v.reset(OpAMD64SETA) 60777 v0 := b.NewValue0(v.Pos, OpAMD64CMPB, types.TypeFlags) 60778 v0.AddArg(x) 60779 v0.AddArg(y) 60780 v.AddArg(v0) 60781 return true 60782 } 60783 } 60784 func rewriteValueAMD64_OpHmul32_0(v *Value) bool { 60785 // match: (Hmul32 x y) 60786 // cond: 60787 // result: (HMULL x y) 60788 for { 60789 _ = v.Args[1] 60790 x := v.Args[0] 60791 y := v.Args[1] 60792 v.reset(OpAMD64HMULL) 60793 v.AddArg(x) 60794 v.AddArg(y) 60795 return true 60796 } 60797 } 60798 func rewriteValueAMD64_OpHmul32u_0(v *Value) bool { 60799 // match: (Hmul32u x y) 60800 // cond: 60801 // result: (HMULLU x y) 60802 for { 60803 _ = v.Args[1] 60804 x := v.Args[0] 60805 y := v.Args[1] 60806 v.reset(OpAMD64HMULLU) 60807 v.AddArg(x) 60808 v.AddArg(y) 60809 return true 60810 } 60811 } 60812 func rewriteValueAMD64_OpHmul64_0(v *Value) bool { 60813 // match: (Hmul64 x y) 60814 // cond: 60815 // result: (HMULQ x y) 60816 for { 60817 _ = v.Args[1] 60818 x := v.Args[0] 60819 y := v.Args[1] 60820 v.reset(OpAMD64HMULQ) 60821 v.AddArg(x) 60822 v.AddArg(y) 60823 return true 60824 } 60825 } 60826 func rewriteValueAMD64_OpHmul64u_0(v *Value) bool { 60827 // match: (Hmul64u x y) 60828 // cond: 60829 // result: (HMULQU x y) 60830 for { 60831 _ = v.Args[1] 60832 x := v.Args[0] 60833 y := v.Args[1] 60834 v.reset(OpAMD64HMULQU) 60835 v.AddArg(x) 60836 v.AddArg(y) 60837 return true 60838 } 60839 } 60840 func rewriteValueAMD64_OpInt64Hi_0(v *Value) bool { 60841 // match: (Int64Hi x) 60842 // cond: 60843 // result: (SHRQconst [32] x) 60844 for { 60845 x := v.Args[0] 60846 v.reset(OpAMD64SHRQconst) 60847 v.AuxInt = 32 60848 v.AddArg(x) 60849 return true 60850 } 60851 } 60852 func rewriteValueAMD64_OpInterCall_0(v *Value) bool { 60853 // match: (InterCall [argwid] entry mem) 60854 // cond: 60855 // result: (CALLinter [argwid] entry mem) 60856 for { 60857 argwid := v.AuxInt 60858 _ = v.Args[1] 60859 entry := v.Args[0] 60860 mem := v.Args[1] 60861 v.reset(OpAMD64CALLinter) 60862 v.AuxInt = argwid 60863 v.AddArg(entry) 60864 v.AddArg(mem) 60865 return true 60866 } 60867 } 60868 func rewriteValueAMD64_OpIsInBounds_0(v *Value) bool { 60869 b := v.Block 60870 _ = b 60871 config := b.Func.Config 60872 _ = config 60873 // match: (IsInBounds idx len) 60874 // cond: config.PtrSize == 8 60875 // result: (SETB (CMPQ idx len)) 60876 for { 60877 _ = v.Args[1] 60878 idx := v.Args[0] 60879 len := v.Args[1] 60880 if !(config.PtrSize == 8) { 60881 break 60882 } 60883 v.reset(OpAMD64SETB) 60884 v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, types.TypeFlags) 60885 v0.AddArg(idx) 60886 v0.AddArg(len) 60887 v.AddArg(v0) 60888 return true 60889 } 60890 // match: (IsInBounds idx len) 60891 // cond: config.PtrSize == 4 60892 // result: (SETB (CMPL idx len)) 60893 for { 60894 _ = v.Args[1] 60895 idx := v.Args[0] 60896 len := v.Args[1] 60897 if !(config.PtrSize == 4) { 60898 break 60899 } 60900 v.reset(OpAMD64SETB) 60901 v0 := b.NewValue0(v.Pos, OpAMD64CMPL, types.TypeFlags) 60902 v0.AddArg(idx) 60903 v0.AddArg(len) 60904 v.AddArg(v0) 60905 return true 60906 } 60907 return false 60908 } 60909 func rewriteValueAMD64_OpIsNonNil_0(v *Value) bool { 60910 b := v.Block 60911 _ = b 60912 config := b.Func.Config 60913 _ = config 60914 // match: (IsNonNil p) 60915 // cond: config.PtrSize == 8 60916 // result: (SETNE (TESTQ p p)) 60917 for { 60918 p := v.Args[0] 60919 if !(config.PtrSize == 8) { 60920 break 60921 } 60922 v.reset(OpAMD64SETNE) 60923 v0 := b.NewValue0(v.Pos, OpAMD64TESTQ, types.TypeFlags) 60924 v0.AddArg(p) 60925 v0.AddArg(p) 60926 v.AddArg(v0) 60927 return true 60928 } 60929 // match: (IsNonNil p) 60930 // cond: config.PtrSize == 4 60931 // result: (SETNE (TESTL p p)) 60932 for { 60933 p := v.Args[0] 60934 if !(config.PtrSize == 4) { 60935 break 60936 } 60937 v.reset(OpAMD64SETNE) 60938 v0 := b.NewValue0(v.Pos, OpAMD64TESTL, types.TypeFlags) 60939 v0.AddArg(p) 60940 v0.AddArg(p) 60941 v.AddArg(v0) 60942 return true 60943 } 60944 return false 60945 } 60946 func rewriteValueAMD64_OpIsSliceInBounds_0(v *Value) bool { 60947 b := v.Block 60948 _ = b 60949 config := b.Func.Config 60950 _ = config 60951 // match: (IsSliceInBounds idx len) 60952 // cond: config.PtrSize == 8 60953 // result: (SETBE (CMPQ idx len)) 60954 for { 60955 _ = v.Args[1] 60956 idx := v.Args[0] 60957 len := v.Args[1] 60958 if !(config.PtrSize == 8) { 60959 break 60960 } 60961 v.reset(OpAMD64SETBE) 60962 v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, types.TypeFlags) 60963 v0.AddArg(idx) 60964 v0.AddArg(len) 60965 v.AddArg(v0) 60966 return true 60967 } 60968 // match: (IsSliceInBounds idx len) 60969 // cond: config.PtrSize == 4 60970 // result: (SETBE (CMPL idx len)) 60971 for { 60972 _ = v.Args[1] 60973 idx := v.Args[0] 60974 len := v.Args[1] 60975 if !(config.PtrSize == 4) { 60976 break 60977 } 60978 v.reset(OpAMD64SETBE) 60979 v0 := b.NewValue0(v.Pos, OpAMD64CMPL, types.TypeFlags) 60980 v0.AddArg(idx) 60981 v0.AddArg(len) 60982 v.AddArg(v0) 60983 return true 60984 } 60985 return false 60986 } 60987 func rewriteValueAMD64_OpLeq16_0(v *Value) bool { 60988 b := v.Block 60989 _ = b 60990 // match: (Leq16 x y) 60991 // cond: 60992 // result: (SETLE (CMPW x y)) 60993 for { 60994 _ = v.Args[1] 60995 x := v.Args[0] 60996 y := v.Args[1] 60997 v.reset(OpAMD64SETLE) 60998 v0 := b.NewValue0(v.Pos, OpAMD64CMPW, types.TypeFlags) 60999 v0.AddArg(x) 61000 v0.AddArg(y) 61001 v.AddArg(v0) 61002 return true 61003 } 61004 } 61005 func rewriteValueAMD64_OpLeq16U_0(v *Value) bool { 61006 b := v.Block 61007 _ = b 61008 // match: (Leq16U x y) 61009 // cond: 61010 // result: (SETBE (CMPW x y)) 61011 for { 61012 _ = v.Args[1] 61013 x := v.Args[0] 61014 y := v.Args[1] 61015 v.reset(OpAMD64SETBE) 61016 v0 := b.NewValue0(v.Pos, OpAMD64CMPW, types.TypeFlags) 61017 v0.AddArg(x) 61018 v0.AddArg(y) 61019 v.AddArg(v0) 61020 return true 61021 } 61022 } 61023 func rewriteValueAMD64_OpLeq32_0(v *Value) bool { 61024 b := v.Block 61025 _ = b 61026 // match: (Leq32 x y) 61027 // cond: 61028 // result: (SETLE (CMPL x y)) 61029 for { 61030 _ = v.Args[1] 61031 x := v.Args[0] 61032 y := v.Args[1] 61033 v.reset(OpAMD64SETLE) 61034 v0 := b.NewValue0(v.Pos, OpAMD64CMPL, types.TypeFlags) 61035 v0.AddArg(x) 61036 v0.AddArg(y) 61037 v.AddArg(v0) 61038 return true 61039 } 61040 } 61041 func rewriteValueAMD64_OpLeq32F_0(v *Value) bool { 61042 b := v.Block 61043 _ = b 61044 // match: (Leq32F x y) 61045 // cond: 61046 // result: (SETGEF (UCOMISS y x)) 61047 for { 61048 _ = v.Args[1] 61049 x := v.Args[0] 61050 y := v.Args[1] 61051 v.reset(OpAMD64SETGEF) 61052 v0 := b.NewValue0(v.Pos, OpAMD64UCOMISS, types.TypeFlags) 61053 v0.AddArg(y) 61054 v0.AddArg(x) 61055 v.AddArg(v0) 61056 return true 61057 } 61058 } 61059 func rewriteValueAMD64_OpLeq32U_0(v *Value) bool { 61060 b := v.Block 61061 _ = b 61062 // match: (Leq32U x y) 61063 // cond: 61064 // result: (SETBE (CMPL x y)) 61065 for { 61066 _ = v.Args[1] 61067 x := v.Args[0] 61068 y := v.Args[1] 61069 v.reset(OpAMD64SETBE) 61070 v0 := b.NewValue0(v.Pos, OpAMD64CMPL, types.TypeFlags) 61071 v0.AddArg(x) 61072 v0.AddArg(y) 61073 v.AddArg(v0) 61074 return true 61075 } 61076 } 61077 func rewriteValueAMD64_OpLeq64_0(v *Value) bool { 61078 b := v.Block 61079 _ = b 61080 // match: (Leq64 x y) 61081 // cond: 61082 // result: (SETLE (CMPQ x y)) 61083 for { 61084 _ = v.Args[1] 61085 x := v.Args[0] 61086 y := v.Args[1] 61087 v.reset(OpAMD64SETLE) 61088 v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, types.TypeFlags) 61089 v0.AddArg(x) 61090 v0.AddArg(y) 61091 v.AddArg(v0) 61092 return true 61093 } 61094 } 61095 func rewriteValueAMD64_OpLeq64F_0(v *Value) bool { 61096 b := v.Block 61097 _ = b 61098 // match: (Leq64F x y) 61099 // cond: 61100 // result: (SETGEF (UCOMISD y x)) 61101 for { 61102 _ = v.Args[1] 61103 x := v.Args[0] 61104 y := v.Args[1] 61105 v.reset(OpAMD64SETGEF) 61106 v0 := b.NewValue0(v.Pos, OpAMD64UCOMISD, types.TypeFlags) 61107 v0.AddArg(y) 61108 v0.AddArg(x) 61109 v.AddArg(v0) 61110 return true 61111 } 61112 } 61113 func rewriteValueAMD64_OpLeq64U_0(v *Value) bool { 61114 b := v.Block 61115 _ = b 61116 // match: (Leq64U x y) 61117 // cond: 61118 // result: (SETBE (CMPQ x y)) 61119 for { 61120 _ = v.Args[1] 61121 x := v.Args[0] 61122 y := v.Args[1] 61123 v.reset(OpAMD64SETBE) 61124 v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, types.TypeFlags) 61125 v0.AddArg(x) 61126 v0.AddArg(y) 61127 v.AddArg(v0) 61128 return true 61129 } 61130 } 61131 func rewriteValueAMD64_OpLeq8_0(v *Value) bool { 61132 b := v.Block 61133 _ = b 61134 // match: (Leq8 x y) 61135 // cond: 61136 // result: (SETLE (CMPB x y)) 61137 for { 61138 _ = v.Args[1] 61139 x := v.Args[0] 61140 y := v.Args[1] 61141 v.reset(OpAMD64SETLE) 61142 v0 := b.NewValue0(v.Pos, OpAMD64CMPB, types.TypeFlags) 61143 v0.AddArg(x) 61144 v0.AddArg(y) 61145 v.AddArg(v0) 61146 return true 61147 } 61148 } 61149 func rewriteValueAMD64_OpLeq8U_0(v *Value) bool { 61150 b := v.Block 61151 _ = b 61152 // match: (Leq8U x y) 61153 // cond: 61154 // result: (SETBE (CMPB x y)) 61155 for { 61156 _ = v.Args[1] 61157 x := v.Args[0] 61158 y := v.Args[1] 61159 v.reset(OpAMD64SETBE) 61160 v0 := b.NewValue0(v.Pos, OpAMD64CMPB, types.TypeFlags) 61161 v0.AddArg(x) 61162 v0.AddArg(y) 61163 v.AddArg(v0) 61164 return true 61165 } 61166 } 61167 func rewriteValueAMD64_OpLess16_0(v *Value) bool { 61168 b := v.Block 61169 _ = b 61170 // match: (Less16 x y) 61171 // cond: 61172 // result: (SETL (CMPW x y)) 61173 for { 61174 _ = v.Args[1] 61175 x := v.Args[0] 61176 y := v.Args[1] 61177 v.reset(OpAMD64SETL) 61178 v0 := b.NewValue0(v.Pos, OpAMD64CMPW, types.TypeFlags) 61179 v0.AddArg(x) 61180 v0.AddArg(y) 61181 v.AddArg(v0) 61182 return true 61183 } 61184 } 61185 func rewriteValueAMD64_OpLess16U_0(v *Value) bool { 61186 b := v.Block 61187 _ = b 61188 // match: (Less16U x y) 61189 // cond: 61190 // result: (SETB (CMPW x y)) 61191 for { 61192 _ = v.Args[1] 61193 x := v.Args[0] 61194 y := v.Args[1] 61195 v.reset(OpAMD64SETB) 61196 v0 := b.NewValue0(v.Pos, OpAMD64CMPW, types.TypeFlags) 61197 v0.AddArg(x) 61198 v0.AddArg(y) 61199 v.AddArg(v0) 61200 return true 61201 } 61202 } 61203 func rewriteValueAMD64_OpLess32_0(v *Value) bool { 61204 b := v.Block 61205 _ = b 61206 // match: (Less32 x y) 61207 // cond: 61208 // result: (SETL (CMPL x y)) 61209 for { 61210 _ = v.Args[1] 61211 x := v.Args[0] 61212 y := v.Args[1] 61213 v.reset(OpAMD64SETL) 61214 v0 := b.NewValue0(v.Pos, OpAMD64CMPL, types.TypeFlags) 61215 v0.AddArg(x) 61216 v0.AddArg(y) 61217 v.AddArg(v0) 61218 return true 61219 } 61220 } 61221 func rewriteValueAMD64_OpLess32F_0(v *Value) bool { 61222 b := v.Block 61223 _ = b 61224 // match: (Less32F x y) 61225 // cond: 61226 // result: (SETGF (UCOMISS y x)) 61227 for { 61228 _ = v.Args[1] 61229 x := v.Args[0] 61230 y := v.Args[1] 61231 v.reset(OpAMD64SETGF) 61232 v0 := b.NewValue0(v.Pos, OpAMD64UCOMISS, types.TypeFlags) 61233 v0.AddArg(y) 61234 v0.AddArg(x) 61235 v.AddArg(v0) 61236 return true 61237 } 61238 } 61239 func rewriteValueAMD64_OpLess32U_0(v *Value) bool { 61240 b := v.Block 61241 _ = b 61242 // match: (Less32U x y) 61243 // cond: 61244 // result: (SETB (CMPL x y)) 61245 for { 61246 _ = v.Args[1] 61247 x := v.Args[0] 61248 y := v.Args[1] 61249 v.reset(OpAMD64SETB) 61250 v0 := b.NewValue0(v.Pos, OpAMD64CMPL, types.TypeFlags) 61251 v0.AddArg(x) 61252 v0.AddArg(y) 61253 v.AddArg(v0) 61254 return true 61255 } 61256 } 61257 func rewriteValueAMD64_OpLess64_0(v *Value) bool { 61258 b := v.Block 61259 _ = b 61260 // match: (Less64 x y) 61261 // cond: 61262 // result: (SETL (CMPQ x y)) 61263 for { 61264 _ = v.Args[1] 61265 x := v.Args[0] 61266 y := v.Args[1] 61267 v.reset(OpAMD64SETL) 61268 v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, types.TypeFlags) 61269 v0.AddArg(x) 61270 v0.AddArg(y) 61271 v.AddArg(v0) 61272 return true 61273 } 61274 } 61275 func rewriteValueAMD64_OpLess64F_0(v *Value) bool { 61276 b := v.Block 61277 _ = b 61278 // match: (Less64F x y) 61279 // cond: 61280 // result: (SETGF (UCOMISD y x)) 61281 for { 61282 _ = v.Args[1] 61283 x := v.Args[0] 61284 y := v.Args[1] 61285 v.reset(OpAMD64SETGF) 61286 v0 := b.NewValue0(v.Pos, OpAMD64UCOMISD, types.TypeFlags) 61287 v0.AddArg(y) 61288 v0.AddArg(x) 61289 v.AddArg(v0) 61290 return true 61291 } 61292 } 61293 func rewriteValueAMD64_OpLess64U_0(v *Value) bool { 61294 b := v.Block 61295 _ = b 61296 // match: (Less64U x y) 61297 // cond: 61298 // result: (SETB (CMPQ x y)) 61299 for { 61300 _ = v.Args[1] 61301 x := v.Args[0] 61302 y := v.Args[1] 61303 v.reset(OpAMD64SETB) 61304 v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, types.TypeFlags) 61305 v0.AddArg(x) 61306 v0.AddArg(y) 61307 v.AddArg(v0) 61308 return true 61309 } 61310 } 61311 func rewriteValueAMD64_OpLess8_0(v *Value) bool { 61312 b := v.Block 61313 _ = b 61314 // match: (Less8 x y) 61315 // cond: 61316 // result: (SETL (CMPB x y)) 61317 for { 61318 _ = v.Args[1] 61319 x := v.Args[0] 61320 y := v.Args[1] 61321 v.reset(OpAMD64SETL) 61322 v0 := b.NewValue0(v.Pos, OpAMD64CMPB, types.TypeFlags) 61323 v0.AddArg(x) 61324 v0.AddArg(y) 61325 v.AddArg(v0) 61326 return true 61327 } 61328 } 61329 func rewriteValueAMD64_OpLess8U_0(v *Value) bool { 61330 b := v.Block 61331 _ = b 61332 // match: (Less8U x y) 61333 // cond: 61334 // result: (SETB (CMPB x y)) 61335 for { 61336 _ = v.Args[1] 61337 x := v.Args[0] 61338 y := v.Args[1] 61339 v.reset(OpAMD64SETB) 61340 v0 := b.NewValue0(v.Pos, OpAMD64CMPB, types.TypeFlags) 61341 v0.AddArg(x) 61342 v0.AddArg(y) 61343 v.AddArg(v0) 61344 return true 61345 } 61346 } 61347 func rewriteValueAMD64_OpLoad_0(v *Value) bool { 61348 b := v.Block 61349 _ = b 61350 config := b.Func.Config 61351 _ = config 61352 // match: (Load <t> ptr mem) 61353 // cond: (is64BitInt(t) || isPtr(t) && config.PtrSize == 8) 61354 // result: (MOVQload ptr mem) 61355 for { 61356 t := v.Type 61357 _ = v.Args[1] 61358 ptr := v.Args[0] 61359 mem := v.Args[1] 61360 if !(is64BitInt(t) || isPtr(t) && config.PtrSize == 8) { 61361 break 61362 } 61363 v.reset(OpAMD64MOVQload) 61364 v.AddArg(ptr) 61365 v.AddArg(mem) 61366 return true 61367 } 61368 // match: (Load <t> ptr mem) 61369 // cond: (is32BitInt(t) || isPtr(t) && config.PtrSize == 4) 61370 // result: (MOVLload ptr mem) 61371 for { 61372 t := v.Type 61373 _ = v.Args[1] 61374 ptr := v.Args[0] 61375 mem := v.Args[1] 61376 if !(is32BitInt(t) || isPtr(t) && config.PtrSize == 4) { 61377 break 61378 } 61379 v.reset(OpAMD64MOVLload) 61380 v.AddArg(ptr) 61381 v.AddArg(mem) 61382 return true 61383 } 61384 // match: (Load <t> ptr mem) 61385 // cond: is16BitInt(t) 61386 // result: (MOVWload ptr mem) 61387 for { 61388 t := v.Type 61389 _ = v.Args[1] 61390 ptr := v.Args[0] 61391 mem := v.Args[1] 61392 if !(is16BitInt(t)) { 61393 break 61394 } 61395 v.reset(OpAMD64MOVWload) 61396 v.AddArg(ptr) 61397 v.AddArg(mem) 61398 return true 61399 } 61400 // match: (Load <t> ptr mem) 61401 // cond: (t.IsBoolean() || is8BitInt(t)) 61402 // result: (MOVBload ptr mem) 61403 for { 61404 t := v.Type 61405 _ = v.Args[1] 61406 ptr := v.Args[0] 61407 mem := v.Args[1] 61408 if !(t.IsBoolean() || is8BitInt(t)) { 61409 break 61410 } 61411 v.reset(OpAMD64MOVBload) 61412 v.AddArg(ptr) 61413 v.AddArg(mem) 61414 return true 61415 } 61416 // match: (Load <t> ptr mem) 61417 // cond: is32BitFloat(t) 61418 // result: (MOVSSload ptr mem) 61419 for { 61420 t := v.Type 61421 _ = v.Args[1] 61422 ptr := v.Args[0] 61423 mem := v.Args[1] 61424 if !(is32BitFloat(t)) { 61425 break 61426 } 61427 v.reset(OpAMD64MOVSSload) 61428 v.AddArg(ptr) 61429 v.AddArg(mem) 61430 return true 61431 } 61432 // match: (Load <t> ptr mem) 61433 // cond: is64BitFloat(t) 61434 // result: (MOVSDload ptr mem) 61435 for { 61436 t := v.Type 61437 _ = v.Args[1] 61438 ptr := v.Args[0] 61439 mem := v.Args[1] 61440 if !(is64BitFloat(t)) { 61441 break 61442 } 61443 v.reset(OpAMD64MOVSDload) 61444 v.AddArg(ptr) 61445 v.AddArg(mem) 61446 return true 61447 } 61448 return false 61449 } 61450 func rewriteValueAMD64_OpLocalAddr_0(v *Value) bool { 61451 b := v.Block 61452 _ = b 61453 config := b.Func.Config 61454 _ = config 61455 // match: (LocalAddr {sym} base _) 61456 // cond: config.PtrSize == 8 61457 // result: (LEAQ {sym} base) 61458 for { 61459 sym := v.Aux 61460 _ = v.Args[1] 61461 base := v.Args[0] 61462 if !(config.PtrSize == 8) { 61463 break 61464 } 61465 v.reset(OpAMD64LEAQ) 61466 v.Aux = sym 61467 v.AddArg(base) 61468 return true 61469 } 61470 // match: (LocalAddr {sym} base _) 61471 // cond: config.PtrSize == 4 61472 // result: (LEAL {sym} base) 61473 for { 61474 sym := v.Aux 61475 _ = v.Args[1] 61476 base := v.Args[0] 61477 if !(config.PtrSize == 4) { 61478 break 61479 } 61480 v.reset(OpAMD64LEAL) 61481 v.Aux = sym 61482 v.AddArg(base) 61483 return true 61484 } 61485 return false 61486 } 61487 func rewriteValueAMD64_OpLsh16x16_0(v *Value) bool { 61488 b := v.Block 61489 _ = b 61490 // match: (Lsh16x16 <t> x y) 61491 // cond: !shiftIsBounded(v) 61492 // result: (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPWconst y [32]))) 61493 for { 61494 t := v.Type 61495 _ = v.Args[1] 61496 x := v.Args[0] 61497 y := v.Args[1] 61498 if !(!shiftIsBounded(v)) { 61499 break 61500 } 61501 v.reset(OpAMD64ANDL) 61502 v0 := b.NewValue0(v.Pos, OpAMD64SHLL, t) 61503 v0.AddArg(x) 61504 v0.AddArg(y) 61505 v.AddArg(v0) 61506 v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) 61507 v2 := b.NewValue0(v.Pos, OpAMD64CMPWconst, types.TypeFlags) 61508 v2.AuxInt = 32 61509 v2.AddArg(y) 61510 v1.AddArg(v2) 61511 v.AddArg(v1) 61512 return true 61513 } 61514 // match: (Lsh16x16 x y) 61515 // cond: shiftIsBounded(v) 61516 // result: (SHLL x y) 61517 for { 61518 _ = v.Args[1] 61519 x := v.Args[0] 61520 y := v.Args[1] 61521 if !(shiftIsBounded(v)) { 61522 break 61523 } 61524 v.reset(OpAMD64SHLL) 61525 v.AddArg(x) 61526 v.AddArg(y) 61527 return true 61528 } 61529 return false 61530 } 61531 func rewriteValueAMD64_OpLsh16x32_0(v *Value) bool { 61532 b := v.Block 61533 _ = b 61534 // match: (Lsh16x32 <t> x y) 61535 // cond: !shiftIsBounded(v) 61536 // result: (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPLconst y [32]))) 61537 for { 61538 t := v.Type 61539 _ = v.Args[1] 61540 x := v.Args[0] 61541 y := v.Args[1] 61542 if !(!shiftIsBounded(v)) { 61543 break 61544 } 61545 v.reset(OpAMD64ANDL) 61546 v0 := b.NewValue0(v.Pos, OpAMD64SHLL, t) 61547 v0.AddArg(x) 61548 v0.AddArg(y) 61549 v.AddArg(v0) 61550 v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) 61551 v2 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags) 61552 v2.AuxInt = 32 61553 v2.AddArg(y) 61554 v1.AddArg(v2) 61555 v.AddArg(v1) 61556 return true 61557 } 61558 // match: (Lsh16x32 x y) 61559 // cond: shiftIsBounded(v) 61560 // result: (SHLL x y) 61561 for { 61562 _ = v.Args[1] 61563 x := v.Args[0] 61564 y := v.Args[1] 61565 if !(shiftIsBounded(v)) { 61566 break 61567 } 61568 v.reset(OpAMD64SHLL) 61569 v.AddArg(x) 61570 v.AddArg(y) 61571 return true 61572 } 61573 return false 61574 } 61575 func rewriteValueAMD64_OpLsh16x64_0(v *Value) bool { 61576 b := v.Block 61577 _ = b 61578 // match: (Lsh16x64 <t> x y) 61579 // cond: !shiftIsBounded(v) 61580 // result: (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPQconst y [32]))) 61581 for { 61582 t := v.Type 61583 _ = v.Args[1] 61584 x := v.Args[0] 61585 y := v.Args[1] 61586 if !(!shiftIsBounded(v)) { 61587 break 61588 } 61589 v.reset(OpAMD64ANDL) 61590 v0 := b.NewValue0(v.Pos, OpAMD64SHLL, t) 61591 v0.AddArg(x) 61592 v0.AddArg(y) 61593 v.AddArg(v0) 61594 v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) 61595 v2 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags) 61596 v2.AuxInt = 32 61597 v2.AddArg(y) 61598 v1.AddArg(v2) 61599 v.AddArg(v1) 61600 return true 61601 } 61602 // match: (Lsh16x64 x y) 61603 // cond: shiftIsBounded(v) 61604 // result: (SHLL x y) 61605 for { 61606 _ = v.Args[1] 61607 x := v.Args[0] 61608 y := v.Args[1] 61609 if !(shiftIsBounded(v)) { 61610 break 61611 } 61612 v.reset(OpAMD64SHLL) 61613 v.AddArg(x) 61614 v.AddArg(y) 61615 return true 61616 } 61617 return false 61618 } 61619 func rewriteValueAMD64_OpLsh16x8_0(v *Value) bool { 61620 b := v.Block 61621 _ = b 61622 // match: (Lsh16x8 <t> x y) 61623 // cond: !shiftIsBounded(v) 61624 // result: (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPBconst y [32]))) 61625 for { 61626 t := v.Type 61627 _ = v.Args[1] 61628 x := v.Args[0] 61629 y := v.Args[1] 61630 if !(!shiftIsBounded(v)) { 61631 break 61632 } 61633 v.reset(OpAMD64ANDL) 61634 v0 := b.NewValue0(v.Pos, OpAMD64SHLL, t) 61635 v0.AddArg(x) 61636 v0.AddArg(y) 61637 v.AddArg(v0) 61638 v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) 61639 v2 := b.NewValue0(v.Pos, OpAMD64CMPBconst, types.TypeFlags) 61640 v2.AuxInt = 32 61641 v2.AddArg(y) 61642 v1.AddArg(v2) 61643 v.AddArg(v1) 61644 return true 61645 } 61646 // match: (Lsh16x8 x y) 61647 // cond: shiftIsBounded(v) 61648 // result: (SHLL x y) 61649 for { 61650 _ = v.Args[1] 61651 x := v.Args[0] 61652 y := v.Args[1] 61653 if !(shiftIsBounded(v)) { 61654 break 61655 } 61656 v.reset(OpAMD64SHLL) 61657 v.AddArg(x) 61658 v.AddArg(y) 61659 return true 61660 } 61661 return false 61662 } 61663 func rewriteValueAMD64_OpLsh32x16_0(v *Value) bool { 61664 b := v.Block 61665 _ = b 61666 // match: (Lsh32x16 <t> x y) 61667 // cond: !shiftIsBounded(v) 61668 // result: (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPWconst y [32]))) 61669 for { 61670 t := v.Type 61671 _ = v.Args[1] 61672 x := v.Args[0] 61673 y := v.Args[1] 61674 if !(!shiftIsBounded(v)) { 61675 break 61676 } 61677 v.reset(OpAMD64ANDL) 61678 v0 := b.NewValue0(v.Pos, OpAMD64SHLL, t) 61679 v0.AddArg(x) 61680 v0.AddArg(y) 61681 v.AddArg(v0) 61682 v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) 61683 v2 := b.NewValue0(v.Pos, OpAMD64CMPWconst, types.TypeFlags) 61684 v2.AuxInt = 32 61685 v2.AddArg(y) 61686 v1.AddArg(v2) 61687 v.AddArg(v1) 61688 return true 61689 } 61690 // match: (Lsh32x16 x y) 61691 // cond: shiftIsBounded(v) 61692 // result: (SHLL x y) 61693 for { 61694 _ = v.Args[1] 61695 x := v.Args[0] 61696 y := v.Args[1] 61697 if !(shiftIsBounded(v)) { 61698 break 61699 } 61700 v.reset(OpAMD64SHLL) 61701 v.AddArg(x) 61702 v.AddArg(y) 61703 return true 61704 } 61705 return false 61706 } 61707 func rewriteValueAMD64_OpLsh32x32_0(v *Value) bool { 61708 b := v.Block 61709 _ = b 61710 // match: (Lsh32x32 <t> x y) 61711 // cond: !shiftIsBounded(v) 61712 // result: (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPLconst y [32]))) 61713 for { 61714 t := v.Type 61715 _ = v.Args[1] 61716 x := v.Args[0] 61717 y := v.Args[1] 61718 if !(!shiftIsBounded(v)) { 61719 break 61720 } 61721 v.reset(OpAMD64ANDL) 61722 v0 := b.NewValue0(v.Pos, OpAMD64SHLL, t) 61723 v0.AddArg(x) 61724 v0.AddArg(y) 61725 v.AddArg(v0) 61726 v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) 61727 v2 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags) 61728 v2.AuxInt = 32 61729 v2.AddArg(y) 61730 v1.AddArg(v2) 61731 v.AddArg(v1) 61732 return true 61733 } 61734 // match: (Lsh32x32 x y) 61735 // cond: shiftIsBounded(v) 61736 // result: (SHLL x y) 61737 for { 61738 _ = v.Args[1] 61739 x := v.Args[0] 61740 y := v.Args[1] 61741 if !(shiftIsBounded(v)) { 61742 break 61743 } 61744 v.reset(OpAMD64SHLL) 61745 v.AddArg(x) 61746 v.AddArg(y) 61747 return true 61748 } 61749 return false 61750 } 61751 func rewriteValueAMD64_OpLsh32x64_0(v *Value) bool { 61752 b := v.Block 61753 _ = b 61754 // match: (Lsh32x64 <t> x y) 61755 // cond: !shiftIsBounded(v) 61756 // result: (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPQconst y [32]))) 61757 for { 61758 t := v.Type 61759 _ = v.Args[1] 61760 x := v.Args[0] 61761 y := v.Args[1] 61762 if !(!shiftIsBounded(v)) { 61763 break 61764 } 61765 v.reset(OpAMD64ANDL) 61766 v0 := b.NewValue0(v.Pos, OpAMD64SHLL, t) 61767 v0.AddArg(x) 61768 v0.AddArg(y) 61769 v.AddArg(v0) 61770 v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) 61771 v2 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags) 61772 v2.AuxInt = 32 61773 v2.AddArg(y) 61774 v1.AddArg(v2) 61775 v.AddArg(v1) 61776 return true 61777 } 61778 // match: (Lsh32x64 x y) 61779 // cond: shiftIsBounded(v) 61780 // result: (SHLL x y) 61781 for { 61782 _ = v.Args[1] 61783 x := v.Args[0] 61784 y := v.Args[1] 61785 if !(shiftIsBounded(v)) { 61786 break 61787 } 61788 v.reset(OpAMD64SHLL) 61789 v.AddArg(x) 61790 v.AddArg(y) 61791 return true 61792 } 61793 return false 61794 } 61795 func rewriteValueAMD64_OpLsh32x8_0(v *Value) bool { 61796 b := v.Block 61797 _ = b 61798 // match: (Lsh32x8 <t> x y) 61799 // cond: !shiftIsBounded(v) 61800 // result: (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPBconst y [32]))) 61801 for { 61802 t := v.Type 61803 _ = v.Args[1] 61804 x := v.Args[0] 61805 y := v.Args[1] 61806 if !(!shiftIsBounded(v)) { 61807 break 61808 } 61809 v.reset(OpAMD64ANDL) 61810 v0 := b.NewValue0(v.Pos, OpAMD64SHLL, t) 61811 v0.AddArg(x) 61812 v0.AddArg(y) 61813 v.AddArg(v0) 61814 v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) 61815 v2 := b.NewValue0(v.Pos, OpAMD64CMPBconst, types.TypeFlags) 61816 v2.AuxInt = 32 61817 v2.AddArg(y) 61818 v1.AddArg(v2) 61819 v.AddArg(v1) 61820 return true 61821 } 61822 // match: (Lsh32x8 x y) 61823 // cond: shiftIsBounded(v) 61824 // result: (SHLL x y) 61825 for { 61826 _ = v.Args[1] 61827 x := v.Args[0] 61828 y := v.Args[1] 61829 if !(shiftIsBounded(v)) { 61830 break 61831 } 61832 v.reset(OpAMD64SHLL) 61833 v.AddArg(x) 61834 v.AddArg(y) 61835 return true 61836 } 61837 return false 61838 } 61839 func rewriteValueAMD64_OpLsh64x16_0(v *Value) bool { 61840 b := v.Block 61841 _ = b 61842 // match: (Lsh64x16 <t> x y) 61843 // cond: !shiftIsBounded(v) 61844 // result: (ANDQ (SHLQ <t> x y) (SBBQcarrymask <t> (CMPWconst y [64]))) 61845 for { 61846 t := v.Type 61847 _ = v.Args[1] 61848 x := v.Args[0] 61849 y := v.Args[1] 61850 if !(!shiftIsBounded(v)) { 61851 break 61852 } 61853 v.reset(OpAMD64ANDQ) 61854 v0 := b.NewValue0(v.Pos, OpAMD64SHLQ, t) 61855 v0.AddArg(x) 61856 v0.AddArg(y) 61857 v.AddArg(v0) 61858 v1 := b.NewValue0(v.Pos, OpAMD64SBBQcarrymask, t) 61859 v2 := b.NewValue0(v.Pos, OpAMD64CMPWconst, types.TypeFlags) 61860 v2.AuxInt = 64 61861 v2.AddArg(y) 61862 v1.AddArg(v2) 61863 v.AddArg(v1) 61864 return true 61865 } 61866 // match: (Lsh64x16 x y) 61867 // cond: shiftIsBounded(v) 61868 // result: (SHLQ x y) 61869 for { 61870 _ = v.Args[1] 61871 x := v.Args[0] 61872 y := v.Args[1] 61873 if !(shiftIsBounded(v)) { 61874 break 61875 } 61876 v.reset(OpAMD64SHLQ) 61877 v.AddArg(x) 61878 v.AddArg(y) 61879 return true 61880 } 61881 return false 61882 } 61883 func rewriteValueAMD64_OpLsh64x32_0(v *Value) bool { 61884 b := v.Block 61885 _ = b 61886 // match: (Lsh64x32 <t> x y) 61887 // cond: !shiftIsBounded(v) 61888 // result: (ANDQ (SHLQ <t> x y) (SBBQcarrymask <t> (CMPLconst y [64]))) 61889 for { 61890 t := v.Type 61891 _ = v.Args[1] 61892 x := v.Args[0] 61893 y := v.Args[1] 61894 if !(!shiftIsBounded(v)) { 61895 break 61896 } 61897 v.reset(OpAMD64ANDQ) 61898 v0 := b.NewValue0(v.Pos, OpAMD64SHLQ, t) 61899 v0.AddArg(x) 61900 v0.AddArg(y) 61901 v.AddArg(v0) 61902 v1 := b.NewValue0(v.Pos, OpAMD64SBBQcarrymask, t) 61903 v2 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags) 61904 v2.AuxInt = 64 61905 v2.AddArg(y) 61906 v1.AddArg(v2) 61907 v.AddArg(v1) 61908 return true 61909 } 61910 // match: (Lsh64x32 x y) 61911 // cond: shiftIsBounded(v) 61912 // result: (SHLQ x y) 61913 for { 61914 _ = v.Args[1] 61915 x := v.Args[0] 61916 y := v.Args[1] 61917 if !(shiftIsBounded(v)) { 61918 break 61919 } 61920 v.reset(OpAMD64SHLQ) 61921 v.AddArg(x) 61922 v.AddArg(y) 61923 return true 61924 } 61925 return false 61926 } 61927 func rewriteValueAMD64_OpLsh64x64_0(v *Value) bool { 61928 b := v.Block 61929 _ = b 61930 // match: (Lsh64x64 <t> x y) 61931 // cond: !shiftIsBounded(v) 61932 // result: (ANDQ (SHLQ <t> x y) (SBBQcarrymask <t> (CMPQconst y [64]))) 61933 for { 61934 t := v.Type 61935 _ = v.Args[1] 61936 x := v.Args[0] 61937 y := v.Args[1] 61938 if !(!shiftIsBounded(v)) { 61939 break 61940 } 61941 v.reset(OpAMD64ANDQ) 61942 v0 := b.NewValue0(v.Pos, OpAMD64SHLQ, t) 61943 v0.AddArg(x) 61944 v0.AddArg(y) 61945 v.AddArg(v0) 61946 v1 := b.NewValue0(v.Pos, OpAMD64SBBQcarrymask, t) 61947 v2 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags) 61948 v2.AuxInt = 64 61949 v2.AddArg(y) 61950 v1.AddArg(v2) 61951 v.AddArg(v1) 61952 return true 61953 } 61954 // match: (Lsh64x64 x y) 61955 // cond: shiftIsBounded(v) 61956 // result: (SHLQ x y) 61957 for { 61958 _ = v.Args[1] 61959 x := v.Args[0] 61960 y := v.Args[1] 61961 if !(shiftIsBounded(v)) { 61962 break 61963 } 61964 v.reset(OpAMD64SHLQ) 61965 v.AddArg(x) 61966 v.AddArg(y) 61967 return true 61968 } 61969 return false 61970 } 61971 func rewriteValueAMD64_OpLsh64x8_0(v *Value) bool { 61972 b := v.Block 61973 _ = b 61974 // match: (Lsh64x8 <t> x y) 61975 // cond: !shiftIsBounded(v) 61976 // result: (ANDQ (SHLQ <t> x y) (SBBQcarrymask <t> (CMPBconst y [64]))) 61977 for { 61978 t := v.Type 61979 _ = v.Args[1] 61980 x := v.Args[0] 61981 y := v.Args[1] 61982 if !(!shiftIsBounded(v)) { 61983 break 61984 } 61985 v.reset(OpAMD64ANDQ) 61986 v0 := b.NewValue0(v.Pos, OpAMD64SHLQ, t) 61987 v0.AddArg(x) 61988 v0.AddArg(y) 61989 v.AddArg(v0) 61990 v1 := b.NewValue0(v.Pos, OpAMD64SBBQcarrymask, t) 61991 v2 := b.NewValue0(v.Pos, OpAMD64CMPBconst, types.TypeFlags) 61992 v2.AuxInt = 64 61993 v2.AddArg(y) 61994 v1.AddArg(v2) 61995 v.AddArg(v1) 61996 return true 61997 } 61998 // match: (Lsh64x8 x y) 61999 // cond: shiftIsBounded(v) 62000 // result: (SHLQ x y) 62001 for { 62002 _ = v.Args[1] 62003 x := v.Args[0] 62004 y := v.Args[1] 62005 if !(shiftIsBounded(v)) { 62006 break 62007 } 62008 v.reset(OpAMD64SHLQ) 62009 v.AddArg(x) 62010 v.AddArg(y) 62011 return true 62012 } 62013 return false 62014 } 62015 func rewriteValueAMD64_OpLsh8x16_0(v *Value) bool { 62016 b := v.Block 62017 _ = b 62018 // match: (Lsh8x16 <t> x y) 62019 // cond: !shiftIsBounded(v) 62020 // result: (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPWconst y [32]))) 62021 for { 62022 t := v.Type 62023 _ = v.Args[1] 62024 x := v.Args[0] 62025 y := v.Args[1] 62026 if !(!shiftIsBounded(v)) { 62027 break 62028 } 62029 v.reset(OpAMD64ANDL) 62030 v0 := b.NewValue0(v.Pos, OpAMD64SHLL, t) 62031 v0.AddArg(x) 62032 v0.AddArg(y) 62033 v.AddArg(v0) 62034 v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) 62035 v2 := b.NewValue0(v.Pos, OpAMD64CMPWconst, types.TypeFlags) 62036 v2.AuxInt = 32 62037 v2.AddArg(y) 62038 v1.AddArg(v2) 62039 v.AddArg(v1) 62040 return true 62041 } 62042 // match: (Lsh8x16 x y) 62043 // cond: shiftIsBounded(v) 62044 // result: (SHLL x y) 62045 for { 62046 _ = v.Args[1] 62047 x := v.Args[0] 62048 y := v.Args[1] 62049 if !(shiftIsBounded(v)) { 62050 break 62051 } 62052 v.reset(OpAMD64SHLL) 62053 v.AddArg(x) 62054 v.AddArg(y) 62055 return true 62056 } 62057 return false 62058 } 62059 func rewriteValueAMD64_OpLsh8x32_0(v *Value) bool { 62060 b := v.Block 62061 _ = b 62062 // match: (Lsh8x32 <t> x y) 62063 // cond: !shiftIsBounded(v) 62064 // result: (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPLconst y [32]))) 62065 for { 62066 t := v.Type 62067 _ = v.Args[1] 62068 x := v.Args[0] 62069 y := v.Args[1] 62070 if !(!shiftIsBounded(v)) { 62071 break 62072 } 62073 v.reset(OpAMD64ANDL) 62074 v0 := b.NewValue0(v.Pos, OpAMD64SHLL, t) 62075 v0.AddArg(x) 62076 v0.AddArg(y) 62077 v.AddArg(v0) 62078 v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) 62079 v2 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags) 62080 v2.AuxInt = 32 62081 v2.AddArg(y) 62082 v1.AddArg(v2) 62083 v.AddArg(v1) 62084 return true 62085 } 62086 // match: (Lsh8x32 x y) 62087 // cond: shiftIsBounded(v) 62088 // result: (SHLL x y) 62089 for { 62090 _ = v.Args[1] 62091 x := v.Args[0] 62092 y := v.Args[1] 62093 if !(shiftIsBounded(v)) { 62094 break 62095 } 62096 v.reset(OpAMD64SHLL) 62097 v.AddArg(x) 62098 v.AddArg(y) 62099 return true 62100 } 62101 return false 62102 } 62103 func rewriteValueAMD64_OpLsh8x64_0(v *Value) bool { 62104 b := v.Block 62105 _ = b 62106 // match: (Lsh8x64 <t> x y) 62107 // cond: !shiftIsBounded(v) 62108 // result: (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPQconst y [32]))) 62109 for { 62110 t := v.Type 62111 _ = v.Args[1] 62112 x := v.Args[0] 62113 y := v.Args[1] 62114 if !(!shiftIsBounded(v)) { 62115 break 62116 } 62117 v.reset(OpAMD64ANDL) 62118 v0 := b.NewValue0(v.Pos, OpAMD64SHLL, t) 62119 v0.AddArg(x) 62120 v0.AddArg(y) 62121 v.AddArg(v0) 62122 v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) 62123 v2 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags) 62124 v2.AuxInt = 32 62125 v2.AddArg(y) 62126 v1.AddArg(v2) 62127 v.AddArg(v1) 62128 return true 62129 } 62130 // match: (Lsh8x64 x y) 62131 // cond: shiftIsBounded(v) 62132 // result: (SHLL x y) 62133 for { 62134 _ = v.Args[1] 62135 x := v.Args[0] 62136 y := v.Args[1] 62137 if !(shiftIsBounded(v)) { 62138 break 62139 } 62140 v.reset(OpAMD64SHLL) 62141 v.AddArg(x) 62142 v.AddArg(y) 62143 return true 62144 } 62145 return false 62146 } 62147 func rewriteValueAMD64_OpLsh8x8_0(v *Value) bool { 62148 b := v.Block 62149 _ = b 62150 // match: (Lsh8x8 <t> x y) 62151 // cond: !shiftIsBounded(v) 62152 // result: (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPBconst y [32]))) 62153 for { 62154 t := v.Type 62155 _ = v.Args[1] 62156 x := v.Args[0] 62157 y := v.Args[1] 62158 if !(!shiftIsBounded(v)) { 62159 break 62160 } 62161 v.reset(OpAMD64ANDL) 62162 v0 := b.NewValue0(v.Pos, OpAMD64SHLL, t) 62163 v0.AddArg(x) 62164 v0.AddArg(y) 62165 v.AddArg(v0) 62166 v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) 62167 v2 := b.NewValue0(v.Pos, OpAMD64CMPBconst, types.TypeFlags) 62168 v2.AuxInt = 32 62169 v2.AddArg(y) 62170 v1.AddArg(v2) 62171 v.AddArg(v1) 62172 return true 62173 } 62174 // match: (Lsh8x8 x y) 62175 // cond: shiftIsBounded(v) 62176 // result: (SHLL x y) 62177 for { 62178 _ = v.Args[1] 62179 x := v.Args[0] 62180 y := v.Args[1] 62181 if !(shiftIsBounded(v)) { 62182 break 62183 } 62184 v.reset(OpAMD64SHLL) 62185 v.AddArg(x) 62186 v.AddArg(y) 62187 return true 62188 } 62189 return false 62190 } 62191 func rewriteValueAMD64_OpMod16_0(v *Value) bool { 62192 b := v.Block 62193 _ = b 62194 typ := &b.Func.Config.Types 62195 _ = typ 62196 // match: (Mod16 [a] x y) 62197 // cond: 62198 // result: (Select1 (DIVW [a] x y)) 62199 for { 62200 a := v.AuxInt 62201 _ = v.Args[1] 62202 x := v.Args[0] 62203 y := v.Args[1] 62204 v.reset(OpSelect1) 62205 v0 := b.NewValue0(v.Pos, OpAMD64DIVW, types.NewTuple(typ.Int16, typ.Int16)) 62206 v0.AuxInt = a 62207 v0.AddArg(x) 62208 v0.AddArg(y) 62209 v.AddArg(v0) 62210 return true 62211 } 62212 } 62213 func rewriteValueAMD64_OpMod16u_0(v *Value) bool { 62214 b := v.Block 62215 _ = b 62216 typ := &b.Func.Config.Types 62217 _ = typ 62218 // match: (Mod16u x y) 62219 // cond: 62220 // result: (Select1 (DIVWU x y)) 62221 for { 62222 _ = v.Args[1] 62223 x := v.Args[0] 62224 y := v.Args[1] 62225 v.reset(OpSelect1) 62226 v0 := b.NewValue0(v.Pos, OpAMD64DIVWU, types.NewTuple(typ.UInt16, typ.UInt16)) 62227 v0.AddArg(x) 62228 v0.AddArg(y) 62229 v.AddArg(v0) 62230 return true 62231 } 62232 } 62233 func rewriteValueAMD64_OpMod32_0(v *Value) bool { 62234 b := v.Block 62235 _ = b 62236 typ := &b.Func.Config.Types 62237 _ = typ 62238 // match: (Mod32 [a] x y) 62239 // cond: 62240 // result: (Select1 (DIVL [a] x y)) 62241 for { 62242 a := v.AuxInt 62243 _ = v.Args[1] 62244 x := v.Args[0] 62245 y := v.Args[1] 62246 v.reset(OpSelect1) 62247 v0 := b.NewValue0(v.Pos, OpAMD64DIVL, types.NewTuple(typ.Int32, typ.Int32)) 62248 v0.AuxInt = a 62249 v0.AddArg(x) 62250 v0.AddArg(y) 62251 v.AddArg(v0) 62252 return true 62253 } 62254 } 62255 func rewriteValueAMD64_OpMod32u_0(v *Value) bool { 62256 b := v.Block 62257 _ = b 62258 typ := &b.Func.Config.Types 62259 _ = typ 62260 // match: (Mod32u x y) 62261 // cond: 62262 // result: (Select1 (DIVLU x y)) 62263 for { 62264 _ = v.Args[1] 62265 x := v.Args[0] 62266 y := v.Args[1] 62267 v.reset(OpSelect1) 62268 v0 := b.NewValue0(v.Pos, OpAMD64DIVLU, types.NewTuple(typ.UInt32, typ.UInt32)) 62269 v0.AddArg(x) 62270 v0.AddArg(y) 62271 v.AddArg(v0) 62272 return true 62273 } 62274 } 62275 func rewriteValueAMD64_OpMod64_0(v *Value) bool { 62276 b := v.Block 62277 _ = b 62278 typ := &b.Func.Config.Types 62279 _ = typ 62280 // match: (Mod64 [a] x y) 62281 // cond: 62282 // result: (Select1 (DIVQ [a] x y)) 62283 for { 62284 a := v.AuxInt 62285 _ = v.Args[1] 62286 x := v.Args[0] 62287 y := v.Args[1] 62288 v.reset(OpSelect1) 62289 v0 := b.NewValue0(v.Pos, OpAMD64DIVQ, types.NewTuple(typ.Int64, typ.Int64)) 62290 v0.AuxInt = a 62291 v0.AddArg(x) 62292 v0.AddArg(y) 62293 v.AddArg(v0) 62294 return true 62295 } 62296 } 62297 func rewriteValueAMD64_OpMod64u_0(v *Value) bool { 62298 b := v.Block 62299 _ = b 62300 typ := &b.Func.Config.Types 62301 _ = typ 62302 // match: (Mod64u x y) 62303 // cond: 62304 // result: (Select1 (DIVQU x y)) 62305 for { 62306 _ = v.Args[1] 62307 x := v.Args[0] 62308 y := v.Args[1] 62309 v.reset(OpSelect1) 62310 v0 := b.NewValue0(v.Pos, OpAMD64DIVQU, types.NewTuple(typ.UInt64, typ.UInt64)) 62311 v0.AddArg(x) 62312 v0.AddArg(y) 62313 v.AddArg(v0) 62314 return true 62315 } 62316 } 62317 func rewriteValueAMD64_OpMod8_0(v *Value) bool { 62318 b := v.Block 62319 _ = b 62320 typ := &b.Func.Config.Types 62321 _ = typ 62322 // match: (Mod8 x y) 62323 // cond: 62324 // result: (Select1 (DIVW (SignExt8to16 x) (SignExt8to16 y))) 62325 for { 62326 _ = v.Args[1] 62327 x := v.Args[0] 62328 y := v.Args[1] 62329 v.reset(OpSelect1) 62330 v0 := b.NewValue0(v.Pos, OpAMD64DIVW, types.NewTuple(typ.Int16, typ.Int16)) 62331 v1 := b.NewValue0(v.Pos, OpSignExt8to16, typ.Int16) 62332 v1.AddArg(x) 62333 v0.AddArg(v1) 62334 v2 := b.NewValue0(v.Pos, OpSignExt8to16, typ.Int16) 62335 v2.AddArg(y) 62336 v0.AddArg(v2) 62337 v.AddArg(v0) 62338 return true 62339 } 62340 } 62341 func rewriteValueAMD64_OpMod8u_0(v *Value) bool { 62342 b := v.Block 62343 _ = b 62344 typ := &b.Func.Config.Types 62345 _ = typ 62346 // match: (Mod8u x y) 62347 // cond: 62348 // result: (Select1 (DIVWU (ZeroExt8to16 x) (ZeroExt8to16 y))) 62349 for { 62350 _ = v.Args[1] 62351 x := v.Args[0] 62352 y := v.Args[1] 62353 v.reset(OpSelect1) 62354 v0 := b.NewValue0(v.Pos, OpAMD64DIVWU, types.NewTuple(typ.UInt16, typ.UInt16)) 62355 v1 := b.NewValue0(v.Pos, OpZeroExt8to16, typ.UInt16) 62356 v1.AddArg(x) 62357 v0.AddArg(v1) 62358 v2 := b.NewValue0(v.Pos, OpZeroExt8to16, typ.UInt16) 62359 v2.AddArg(y) 62360 v0.AddArg(v2) 62361 v.AddArg(v0) 62362 return true 62363 } 62364 } 62365 func rewriteValueAMD64_OpMove_0(v *Value) bool { 62366 b := v.Block 62367 _ = b 62368 config := b.Func.Config 62369 _ = config 62370 typ := &b.Func.Config.Types 62371 _ = typ 62372 // match: (Move [0] _ _ mem) 62373 // cond: 62374 // result: mem 62375 for { 62376 if v.AuxInt != 0 { 62377 break 62378 } 62379 _ = v.Args[2] 62380 mem := v.Args[2] 62381 v.reset(OpCopy) 62382 v.Type = mem.Type 62383 v.AddArg(mem) 62384 return true 62385 } 62386 // match: (Move [1] dst src mem) 62387 // cond: 62388 // result: (MOVBstore dst (MOVBload src mem) mem) 62389 for { 62390 if v.AuxInt != 1 { 62391 break 62392 } 62393 _ = v.Args[2] 62394 dst := v.Args[0] 62395 src := v.Args[1] 62396 mem := v.Args[2] 62397 v.reset(OpAMD64MOVBstore) 62398 v.AddArg(dst) 62399 v0 := b.NewValue0(v.Pos, OpAMD64MOVBload, typ.UInt8) 62400 v0.AddArg(src) 62401 v0.AddArg(mem) 62402 v.AddArg(v0) 62403 v.AddArg(mem) 62404 return true 62405 } 62406 // match: (Move [2] dst src mem) 62407 // cond: 62408 // result: (MOVWstore dst (MOVWload src mem) mem) 62409 for { 62410 if v.AuxInt != 2 { 62411 break 62412 } 62413 _ = v.Args[2] 62414 dst := v.Args[0] 62415 src := v.Args[1] 62416 mem := v.Args[2] 62417 v.reset(OpAMD64MOVWstore) 62418 v.AddArg(dst) 62419 v0 := b.NewValue0(v.Pos, OpAMD64MOVWload, typ.UInt16) 62420 v0.AddArg(src) 62421 v0.AddArg(mem) 62422 v.AddArg(v0) 62423 v.AddArg(mem) 62424 return true 62425 } 62426 // match: (Move [4] dst src mem) 62427 // cond: 62428 // result: (MOVLstore dst (MOVLload src mem) mem) 62429 for { 62430 if v.AuxInt != 4 { 62431 break 62432 } 62433 _ = v.Args[2] 62434 dst := v.Args[0] 62435 src := v.Args[1] 62436 mem := v.Args[2] 62437 v.reset(OpAMD64MOVLstore) 62438 v.AddArg(dst) 62439 v0 := b.NewValue0(v.Pos, OpAMD64MOVLload, typ.UInt32) 62440 v0.AddArg(src) 62441 v0.AddArg(mem) 62442 v.AddArg(v0) 62443 v.AddArg(mem) 62444 return true 62445 } 62446 // match: (Move [8] dst src mem) 62447 // cond: 62448 // result: (MOVQstore dst (MOVQload src mem) mem) 62449 for { 62450 if v.AuxInt != 8 { 62451 break 62452 } 62453 _ = v.Args[2] 62454 dst := v.Args[0] 62455 src := v.Args[1] 62456 mem := v.Args[2] 62457 v.reset(OpAMD64MOVQstore) 62458 v.AddArg(dst) 62459 v0 := b.NewValue0(v.Pos, OpAMD64MOVQload, typ.UInt64) 62460 v0.AddArg(src) 62461 v0.AddArg(mem) 62462 v.AddArg(v0) 62463 v.AddArg(mem) 62464 return true 62465 } 62466 // match: (Move [16] dst src mem) 62467 // cond: config.useSSE 62468 // result: (MOVOstore dst (MOVOload src mem) mem) 62469 for { 62470 if v.AuxInt != 16 { 62471 break 62472 } 62473 _ = v.Args[2] 62474 dst := v.Args[0] 62475 src := v.Args[1] 62476 mem := v.Args[2] 62477 if !(config.useSSE) { 62478 break 62479 } 62480 v.reset(OpAMD64MOVOstore) 62481 v.AddArg(dst) 62482 v0 := b.NewValue0(v.Pos, OpAMD64MOVOload, types.TypeInt128) 62483 v0.AddArg(src) 62484 v0.AddArg(mem) 62485 v.AddArg(v0) 62486 v.AddArg(mem) 62487 return true 62488 } 62489 // match: (Move [16] dst src mem) 62490 // cond: !config.useSSE 62491 // result: (MOVQstore [8] dst (MOVQload [8] src mem) (MOVQstore dst (MOVQload src mem) mem)) 62492 for { 62493 if v.AuxInt != 16 { 62494 break 62495 } 62496 _ = v.Args[2] 62497 dst := v.Args[0] 62498 src := v.Args[1] 62499 mem := v.Args[2] 62500 if !(!config.useSSE) { 62501 break 62502 } 62503 v.reset(OpAMD64MOVQstore) 62504 v.AuxInt = 8 62505 v.AddArg(dst) 62506 v0 := b.NewValue0(v.Pos, OpAMD64MOVQload, typ.UInt64) 62507 v0.AuxInt = 8 62508 v0.AddArg(src) 62509 v0.AddArg(mem) 62510 v.AddArg(v0) 62511 v1 := b.NewValue0(v.Pos, OpAMD64MOVQstore, types.TypeMem) 62512 v1.AddArg(dst) 62513 v2 := b.NewValue0(v.Pos, OpAMD64MOVQload, typ.UInt64) 62514 v2.AddArg(src) 62515 v2.AddArg(mem) 62516 v1.AddArg(v2) 62517 v1.AddArg(mem) 62518 v.AddArg(v1) 62519 return true 62520 } 62521 // match: (Move [32] dst src mem) 62522 // cond: 62523 // result: (Move [16] (OffPtr <dst.Type> dst [16]) (OffPtr <src.Type> src [16]) (Move [16] dst src mem)) 62524 for { 62525 if v.AuxInt != 32 { 62526 break 62527 } 62528 _ = v.Args[2] 62529 dst := v.Args[0] 62530 src := v.Args[1] 62531 mem := v.Args[2] 62532 v.reset(OpMove) 62533 v.AuxInt = 16 62534 v0 := b.NewValue0(v.Pos, OpOffPtr, dst.Type) 62535 v0.AuxInt = 16 62536 v0.AddArg(dst) 62537 v.AddArg(v0) 62538 v1 := b.NewValue0(v.Pos, OpOffPtr, src.Type) 62539 v1.AuxInt = 16 62540 v1.AddArg(src) 62541 v.AddArg(v1) 62542 v2 := b.NewValue0(v.Pos, OpMove, types.TypeMem) 62543 v2.AuxInt = 16 62544 v2.AddArg(dst) 62545 v2.AddArg(src) 62546 v2.AddArg(mem) 62547 v.AddArg(v2) 62548 return true 62549 } 62550 // match: (Move [48] dst src mem) 62551 // cond: config.useSSE 62552 // result: (Move [32] (OffPtr <dst.Type> dst [16]) (OffPtr <src.Type> src [16]) (Move [16] dst src mem)) 62553 for { 62554 if v.AuxInt != 48 { 62555 break 62556 } 62557 _ = v.Args[2] 62558 dst := v.Args[0] 62559 src := v.Args[1] 62560 mem := v.Args[2] 62561 if !(config.useSSE) { 62562 break 62563 } 62564 v.reset(OpMove) 62565 v.AuxInt = 32 62566 v0 := b.NewValue0(v.Pos, OpOffPtr, dst.Type) 62567 v0.AuxInt = 16 62568 v0.AddArg(dst) 62569 v.AddArg(v0) 62570 v1 := b.NewValue0(v.Pos, OpOffPtr, src.Type) 62571 v1.AuxInt = 16 62572 v1.AddArg(src) 62573 v.AddArg(v1) 62574 v2 := b.NewValue0(v.Pos, OpMove, types.TypeMem) 62575 v2.AuxInt = 16 62576 v2.AddArg(dst) 62577 v2.AddArg(src) 62578 v2.AddArg(mem) 62579 v.AddArg(v2) 62580 return true 62581 } 62582 // match: (Move [64] dst src mem) 62583 // cond: config.useSSE 62584 // result: (Move [32] (OffPtr <dst.Type> dst [32]) (OffPtr <src.Type> src [32]) (Move [32] dst src mem)) 62585 for { 62586 if v.AuxInt != 64 { 62587 break 62588 } 62589 _ = v.Args[2] 62590 dst := v.Args[0] 62591 src := v.Args[1] 62592 mem := v.Args[2] 62593 if !(config.useSSE) { 62594 break 62595 } 62596 v.reset(OpMove) 62597 v.AuxInt = 32 62598 v0 := b.NewValue0(v.Pos, OpOffPtr, dst.Type) 62599 v0.AuxInt = 32 62600 v0.AddArg(dst) 62601 v.AddArg(v0) 62602 v1 := b.NewValue0(v.Pos, OpOffPtr, src.Type) 62603 v1.AuxInt = 32 62604 v1.AddArg(src) 62605 v.AddArg(v1) 62606 v2 := b.NewValue0(v.Pos, OpMove, types.TypeMem) 62607 v2.AuxInt = 32 62608 v2.AddArg(dst) 62609 v2.AddArg(src) 62610 v2.AddArg(mem) 62611 v.AddArg(v2) 62612 return true 62613 } 62614 return false 62615 } 62616 func rewriteValueAMD64_OpMove_10(v *Value) bool { 62617 b := v.Block 62618 _ = b 62619 config := b.Func.Config 62620 _ = config 62621 typ := &b.Func.Config.Types 62622 _ = typ 62623 // match: (Move [3] dst src mem) 62624 // cond: 62625 // result: (MOVBstore [2] dst (MOVBload [2] src mem) (MOVWstore dst (MOVWload src mem) mem)) 62626 for { 62627 if v.AuxInt != 3 { 62628 break 62629 } 62630 _ = v.Args[2] 62631 dst := v.Args[0] 62632 src := v.Args[1] 62633 mem := v.Args[2] 62634 v.reset(OpAMD64MOVBstore) 62635 v.AuxInt = 2 62636 v.AddArg(dst) 62637 v0 := b.NewValue0(v.Pos, OpAMD64MOVBload, typ.UInt8) 62638 v0.AuxInt = 2 62639 v0.AddArg(src) 62640 v0.AddArg(mem) 62641 v.AddArg(v0) 62642 v1 := b.NewValue0(v.Pos, OpAMD64MOVWstore, types.TypeMem) 62643 v1.AddArg(dst) 62644 v2 := b.NewValue0(v.Pos, OpAMD64MOVWload, typ.UInt16) 62645 v2.AddArg(src) 62646 v2.AddArg(mem) 62647 v1.AddArg(v2) 62648 v1.AddArg(mem) 62649 v.AddArg(v1) 62650 return true 62651 } 62652 // match: (Move [5] dst src mem) 62653 // cond: 62654 // result: (MOVBstore [4] dst (MOVBload [4] src mem) (MOVLstore dst (MOVLload src mem) mem)) 62655 for { 62656 if v.AuxInt != 5 { 62657 break 62658 } 62659 _ = v.Args[2] 62660 dst := v.Args[0] 62661 src := v.Args[1] 62662 mem := v.Args[2] 62663 v.reset(OpAMD64MOVBstore) 62664 v.AuxInt = 4 62665 v.AddArg(dst) 62666 v0 := b.NewValue0(v.Pos, OpAMD64MOVBload, typ.UInt8) 62667 v0.AuxInt = 4 62668 v0.AddArg(src) 62669 v0.AddArg(mem) 62670 v.AddArg(v0) 62671 v1 := b.NewValue0(v.Pos, OpAMD64MOVLstore, types.TypeMem) 62672 v1.AddArg(dst) 62673 v2 := b.NewValue0(v.Pos, OpAMD64MOVLload, typ.UInt32) 62674 v2.AddArg(src) 62675 v2.AddArg(mem) 62676 v1.AddArg(v2) 62677 v1.AddArg(mem) 62678 v.AddArg(v1) 62679 return true 62680 } 62681 // match: (Move [6] dst src mem) 62682 // cond: 62683 // result: (MOVWstore [4] dst (MOVWload [4] src mem) (MOVLstore dst (MOVLload src mem) mem)) 62684 for { 62685 if v.AuxInt != 6 { 62686 break 62687 } 62688 _ = v.Args[2] 62689 dst := v.Args[0] 62690 src := v.Args[1] 62691 mem := v.Args[2] 62692 v.reset(OpAMD64MOVWstore) 62693 v.AuxInt = 4 62694 v.AddArg(dst) 62695 v0 := b.NewValue0(v.Pos, OpAMD64MOVWload, typ.UInt16) 62696 v0.AuxInt = 4 62697 v0.AddArg(src) 62698 v0.AddArg(mem) 62699 v.AddArg(v0) 62700 v1 := b.NewValue0(v.Pos, OpAMD64MOVLstore, types.TypeMem) 62701 v1.AddArg(dst) 62702 v2 := b.NewValue0(v.Pos, OpAMD64MOVLload, typ.UInt32) 62703 v2.AddArg(src) 62704 v2.AddArg(mem) 62705 v1.AddArg(v2) 62706 v1.AddArg(mem) 62707 v.AddArg(v1) 62708 return true 62709 } 62710 // match: (Move [7] dst src mem) 62711 // cond: 62712 // result: (MOVLstore [3] dst (MOVLload [3] src mem) (MOVLstore dst (MOVLload src mem) mem)) 62713 for { 62714 if v.AuxInt != 7 { 62715 break 62716 } 62717 _ = v.Args[2] 62718 dst := v.Args[0] 62719 src := v.Args[1] 62720 mem := v.Args[2] 62721 v.reset(OpAMD64MOVLstore) 62722 v.AuxInt = 3 62723 v.AddArg(dst) 62724 v0 := b.NewValue0(v.Pos, OpAMD64MOVLload, typ.UInt32) 62725 v0.AuxInt = 3 62726 v0.AddArg(src) 62727 v0.AddArg(mem) 62728 v.AddArg(v0) 62729 v1 := b.NewValue0(v.Pos, OpAMD64MOVLstore, types.TypeMem) 62730 v1.AddArg(dst) 62731 v2 := b.NewValue0(v.Pos, OpAMD64MOVLload, typ.UInt32) 62732 v2.AddArg(src) 62733 v2.AddArg(mem) 62734 v1.AddArg(v2) 62735 v1.AddArg(mem) 62736 v.AddArg(v1) 62737 return true 62738 } 62739 // match: (Move [9] dst src mem) 62740 // cond: 62741 // result: (MOVBstore [8] dst (MOVBload [8] src mem) (MOVQstore dst (MOVQload src mem) mem)) 62742 for { 62743 if v.AuxInt != 9 { 62744 break 62745 } 62746 _ = v.Args[2] 62747 dst := v.Args[0] 62748 src := v.Args[1] 62749 mem := v.Args[2] 62750 v.reset(OpAMD64MOVBstore) 62751 v.AuxInt = 8 62752 v.AddArg(dst) 62753 v0 := b.NewValue0(v.Pos, OpAMD64MOVBload, typ.UInt8) 62754 v0.AuxInt = 8 62755 v0.AddArg(src) 62756 v0.AddArg(mem) 62757 v.AddArg(v0) 62758 v1 := b.NewValue0(v.Pos, OpAMD64MOVQstore, types.TypeMem) 62759 v1.AddArg(dst) 62760 v2 := b.NewValue0(v.Pos, OpAMD64MOVQload, typ.UInt64) 62761 v2.AddArg(src) 62762 v2.AddArg(mem) 62763 v1.AddArg(v2) 62764 v1.AddArg(mem) 62765 v.AddArg(v1) 62766 return true 62767 } 62768 // match: (Move [10] dst src mem) 62769 // cond: 62770 // result: (MOVWstore [8] dst (MOVWload [8] src mem) (MOVQstore dst (MOVQload src mem) mem)) 62771 for { 62772 if v.AuxInt != 10 { 62773 break 62774 } 62775 _ = v.Args[2] 62776 dst := v.Args[0] 62777 src := v.Args[1] 62778 mem := v.Args[2] 62779 v.reset(OpAMD64MOVWstore) 62780 v.AuxInt = 8 62781 v.AddArg(dst) 62782 v0 := b.NewValue0(v.Pos, OpAMD64MOVWload, typ.UInt16) 62783 v0.AuxInt = 8 62784 v0.AddArg(src) 62785 v0.AddArg(mem) 62786 v.AddArg(v0) 62787 v1 := b.NewValue0(v.Pos, OpAMD64MOVQstore, types.TypeMem) 62788 v1.AddArg(dst) 62789 v2 := b.NewValue0(v.Pos, OpAMD64MOVQload, typ.UInt64) 62790 v2.AddArg(src) 62791 v2.AddArg(mem) 62792 v1.AddArg(v2) 62793 v1.AddArg(mem) 62794 v.AddArg(v1) 62795 return true 62796 } 62797 // match: (Move [12] dst src mem) 62798 // cond: 62799 // result: (MOVLstore [8] dst (MOVLload [8] src mem) (MOVQstore dst (MOVQload src mem) mem)) 62800 for { 62801 if v.AuxInt != 12 { 62802 break 62803 } 62804 _ = v.Args[2] 62805 dst := v.Args[0] 62806 src := v.Args[1] 62807 mem := v.Args[2] 62808 v.reset(OpAMD64MOVLstore) 62809 v.AuxInt = 8 62810 v.AddArg(dst) 62811 v0 := b.NewValue0(v.Pos, OpAMD64MOVLload, typ.UInt32) 62812 v0.AuxInt = 8 62813 v0.AddArg(src) 62814 v0.AddArg(mem) 62815 v.AddArg(v0) 62816 v1 := b.NewValue0(v.Pos, OpAMD64MOVQstore, types.TypeMem) 62817 v1.AddArg(dst) 62818 v2 := b.NewValue0(v.Pos, OpAMD64MOVQload, typ.UInt64) 62819 v2.AddArg(src) 62820 v2.AddArg(mem) 62821 v1.AddArg(v2) 62822 v1.AddArg(mem) 62823 v.AddArg(v1) 62824 return true 62825 } 62826 // match: (Move [s] dst src mem) 62827 // cond: s == 11 || s >= 13 && s <= 15 62828 // result: (MOVQstore [s-8] dst (MOVQload [s-8] src mem) (MOVQstore dst (MOVQload src mem) mem)) 62829 for { 62830 s := v.AuxInt 62831 _ = v.Args[2] 62832 dst := v.Args[0] 62833 src := v.Args[1] 62834 mem := v.Args[2] 62835 if !(s == 11 || s >= 13 && s <= 15) { 62836 break 62837 } 62838 v.reset(OpAMD64MOVQstore) 62839 v.AuxInt = s - 8 62840 v.AddArg(dst) 62841 v0 := b.NewValue0(v.Pos, OpAMD64MOVQload, typ.UInt64) 62842 v0.AuxInt = s - 8 62843 v0.AddArg(src) 62844 v0.AddArg(mem) 62845 v.AddArg(v0) 62846 v1 := b.NewValue0(v.Pos, OpAMD64MOVQstore, types.TypeMem) 62847 v1.AddArg(dst) 62848 v2 := b.NewValue0(v.Pos, OpAMD64MOVQload, typ.UInt64) 62849 v2.AddArg(src) 62850 v2.AddArg(mem) 62851 v1.AddArg(v2) 62852 v1.AddArg(mem) 62853 v.AddArg(v1) 62854 return true 62855 } 62856 // match: (Move [s] dst src mem) 62857 // cond: s > 16 && s%16 != 0 && s%16 <= 8 62858 // result: (Move [s-s%16] (OffPtr <dst.Type> dst [s%16]) (OffPtr <src.Type> src [s%16]) (MOVQstore dst (MOVQload src mem) mem)) 62859 for { 62860 s := v.AuxInt 62861 _ = v.Args[2] 62862 dst := v.Args[0] 62863 src := v.Args[1] 62864 mem := v.Args[2] 62865 if !(s > 16 && s%16 != 0 && s%16 <= 8) { 62866 break 62867 } 62868 v.reset(OpMove) 62869 v.AuxInt = s - s%16 62870 v0 := b.NewValue0(v.Pos, OpOffPtr, dst.Type) 62871 v0.AuxInt = s % 16 62872 v0.AddArg(dst) 62873 v.AddArg(v0) 62874 v1 := b.NewValue0(v.Pos, OpOffPtr, src.Type) 62875 v1.AuxInt = s % 16 62876 v1.AddArg(src) 62877 v.AddArg(v1) 62878 v2 := b.NewValue0(v.Pos, OpAMD64MOVQstore, types.TypeMem) 62879 v2.AddArg(dst) 62880 v3 := b.NewValue0(v.Pos, OpAMD64MOVQload, typ.UInt64) 62881 v3.AddArg(src) 62882 v3.AddArg(mem) 62883 v2.AddArg(v3) 62884 v2.AddArg(mem) 62885 v.AddArg(v2) 62886 return true 62887 } 62888 // match: (Move [s] dst src mem) 62889 // cond: s > 16 && s%16 != 0 && s%16 > 8 && config.useSSE 62890 // result: (Move [s-s%16] (OffPtr <dst.Type> dst [s%16]) (OffPtr <src.Type> src [s%16]) (MOVOstore dst (MOVOload src mem) mem)) 62891 for { 62892 s := v.AuxInt 62893 _ = v.Args[2] 62894 dst := v.Args[0] 62895 src := v.Args[1] 62896 mem := v.Args[2] 62897 if !(s > 16 && s%16 != 0 && s%16 > 8 && config.useSSE) { 62898 break 62899 } 62900 v.reset(OpMove) 62901 v.AuxInt = s - s%16 62902 v0 := b.NewValue0(v.Pos, OpOffPtr, dst.Type) 62903 v0.AuxInt = s % 16 62904 v0.AddArg(dst) 62905 v.AddArg(v0) 62906 v1 := b.NewValue0(v.Pos, OpOffPtr, src.Type) 62907 v1.AuxInt = s % 16 62908 v1.AddArg(src) 62909 v.AddArg(v1) 62910 v2 := b.NewValue0(v.Pos, OpAMD64MOVOstore, types.TypeMem) 62911 v2.AddArg(dst) 62912 v3 := b.NewValue0(v.Pos, OpAMD64MOVOload, types.TypeInt128) 62913 v3.AddArg(src) 62914 v3.AddArg(mem) 62915 v2.AddArg(v3) 62916 v2.AddArg(mem) 62917 v.AddArg(v2) 62918 return true 62919 } 62920 return false 62921 } 62922 func rewriteValueAMD64_OpMove_20(v *Value) bool { 62923 b := v.Block 62924 _ = b 62925 config := b.Func.Config 62926 _ = config 62927 typ := &b.Func.Config.Types 62928 _ = typ 62929 // match: (Move [s] dst src mem) 62930 // cond: s > 16 && s%16 != 0 && s%16 > 8 && !config.useSSE 62931 // result: (Move [s-s%16] (OffPtr <dst.Type> dst [s%16]) (OffPtr <src.Type> src [s%16]) (MOVQstore [8] dst (MOVQload [8] src mem) (MOVQstore dst (MOVQload src mem) mem))) 62932 for { 62933 s := v.AuxInt 62934 _ = v.Args[2] 62935 dst := v.Args[0] 62936 src := v.Args[1] 62937 mem := v.Args[2] 62938 if !(s > 16 && s%16 != 0 && s%16 > 8 && !config.useSSE) { 62939 break 62940 } 62941 v.reset(OpMove) 62942 v.AuxInt = s - s%16 62943 v0 := b.NewValue0(v.Pos, OpOffPtr, dst.Type) 62944 v0.AuxInt = s % 16 62945 v0.AddArg(dst) 62946 v.AddArg(v0) 62947 v1 := b.NewValue0(v.Pos, OpOffPtr, src.Type) 62948 v1.AuxInt = s % 16 62949 v1.AddArg(src) 62950 v.AddArg(v1) 62951 v2 := b.NewValue0(v.Pos, OpAMD64MOVQstore, types.TypeMem) 62952 v2.AuxInt = 8 62953 v2.AddArg(dst) 62954 v3 := b.NewValue0(v.Pos, OpAMD64MOVQload, typ.UInt64) 62955 v3.AuxInt = 8 62956 v3.AddArg(src) 62957 v3.AddArg(mem) 62958 v2.AddArg(v3) 62959 v4 := b.NewValue0(v.Pos, OpAMD64MOVQstore, types.TypeMem) 62960 v4.AddArg(dst) 62961 v5 := b.NewValue0(v.Pos, OpAMD64MOVQload, typ.UInt64) 62962 v5.AddArg(src) 62963 v5.AddArg(mem) 62964 v4.AddArg(v5) 62965 v4.AddArg(mem) 62966 v2.AddArg(v4) 62967 v.AddArg(v2) 62968 return true 62969 } 62970 // match: (Move [s] dst src mem) 62971 // cond: s > 64 && s <= 16*64 && s%16 == 0 && !config.noDuffDevice 62972 // result: (DUFFCOPY [14*(64-s/16)] dst src mem) 62973 for { 62974 s := v.AuxInt 62975 _ = v.Args[2] 62976 dst := v.Args[0] 62977 src := v.Args[1] 62978 mem := v.Args[2] 62979 if !(s > 64 && s <= 16*64 && s%16 == 0 && !config.noDuffDevice) { 62980 break 62981 } 62982 v.reset(OpAMD64DUFFCOPY) 62983 v.AuxInt = 14 * (64 - s/16) 62984 v.AddArg(dst) 62985 v.AddArg(src) 62986 v.AddArg(mem) 62987 return true 62988 } 62989 // match: (Move [s] dst src mem) 62990 // cond: (s > 16*64 || config.noDuffDevice) && s%8 == 0 62991 // result: (REPMOVSQ dst src (MOVQconst [s/8]) mem) 62992 for { 62993 s := v.AuxInt 62994 _ = v.Args[2] 62995 dst := v.Args[0] 62996 src := v.Args[1] 62997 mem := v.Args[2] 62998 if !((s > 16*64 || config.noDuffDevice) && s%8 == 0) { 62999 break 63000 } 63001 v.reset(OpAMD64REPMOVSQ) 63002 v.AddArg(dst) 63003 v.AddArg(src) 63004 v0 := b.NewValue0(v.Pos, OpAMD64MOVQconst, typ.UInt64) 63005 v0.AuxInt = s / 8 63006 v.AddArg(v0) 63007 v.AddArg(mem) 63008 return true 63009 } 63010 return false 63011 } 63012 func rewriteValueAMD64_OpMul16_0(v *Value) bool { 63013 // match: (Mul16 x y) 63014 // cond: 63015 // result: (MULL x y) 63016 for { 63017 _ = v.Args[1] 63018 x := v.Args[0] 63019 y := v.Args[1] 63020 v.reset(OpAMD64MULL) 63021 v.AddArg(x) 63022 v.AddArg(y) 63023 return true 63024 } 63025 } 63026 func rewriteValueAMD64_OpMul32_0(v *Value) bool { 63027 // match: (Mul32 x y) 63028 // cond: 63029 // result: (MULL x y) 63030 for { 63031 _ = v.Args[1] 63032 x := v.Args[0] 63033 y := v.Args[1] 63034 v.reset(OpAMD64MULL) 63035 v.AddArg(x) 63036 v.AddArg(y) 63037 return true 63038 } 63039 } 63040 func rewriteValueAMD64_OpMul32F_0(v *Value) bool { 63041 // match: (Mul32F x y) 63042 // cond: 63043 // result: (MULSS x y) 63044 for { 63045 _ = v.Args[1] 63046 x := v.Args[0] 63047 y := v.Args[1] 63048 v.reset(OpAMD64MULSS) 63049 v.AddArg(x) 63050 v.AddArg(y) 63051 return true 63052 } 63053 } 63054 func rewriteValueAMD64_OpMul64_0(v *Value) bool { 63055 // match: (Mul64 x y) 63056 // cond: 63057 // result: (MULQ x y) 63058 for { 63059 _ = v.Args[1] 63060 x := v.Args[0] 63061 y := v.Args[1] 63062 v.reset(OpAMD64MULQ) 63063 v.AddArg(x) 63064 v.AddArg(y) 63065 return true 63066 } 63067 } 63068 func rewriteValueAMD64_OpMul64F_0(v *Value) bool { 63069 // match: (Mul64F x y) 63070 // cond: 63071 // result: (MULSD x y) 63072 for { 63073 _ = v.Args[1] 63074 x := v.Args[0] 63075 y := v.Args[1] 63076 v.reset(OpAMD64MULSD) 63077 v.AddArg(x) 63078 v.AddArg(y) 63079 return true 63080 } 63081 } 63082 func rewriteValueAMD64_OpMul64uhilo_0(v *Value) bool { 63083 // match: (Mul64uhilo x y) 63084 // cond: 63085 // result: (MULQU2 x y) 63086 for { 63087 _ = v.Args[1] 63088 x := v.Args[0] 63089 y := v.Args[1] 63090 v.reset(OpAMD64MULQU2) 63091 v.AddArg(x) 63092 v.AddArg(y) 63093 return true 63094 } 63095 } 63096 func rewriteValueAMD64_OpMul8_0(v *Value) bool { 63097 // match: (Mul8 x y) 63098 // cond: 63099 // result: (MULL x y) 63100 for { 63101 _ = v.Args[1] 63102 x := v.Args[0] 63103 y := v.Args[1] 63104 v.reset(OpAMD64MULL) 63105 v.AddArg(x) 63106 v.AddArg(y) 63107 return true 63108 } 63109 } 63110 func rewriteValueAMD64_OpNeg16_0(v *Value) bool { 63111 // match: (Neg16 x) 63112 // cond: 63113 // result: (NEGL x) 63114 for { 63115 x := v.Args[0] 63116 v.reset(OpAMD64NEGL) 63117 v.AddArg(x) 63118 return true 63119 } 63120 } 63121 func rewriteValueAMD64_OpNeg32_0(v *Value) bool { 63122 // match: (Neg32 x) 63123 // cond: 63124 // result: (NEGL x) 63125 for { 63126 x := v.Args[0] 63127 v.reset(OpAMD64NEGL) 63128 v.AddArg(x) 63129 return true 63130 } 63131 } 63132 func rewriteValueAMD64_OpNeg32F_0(v *Value) bool { 63133 b := v.Block 63134 _ = b 63135 typ := &b.Func.Config.Types 63136 _ = typ 63137 // match: (Neg32F x) 63138 // cond: 63139 // result: (PXOR x (MOVSSconst <typ.Float32> [auxFrom32F(float32(math.Copysign(0, -1)))])) 63140 for { 63141 x := v.Args[0] 63142 v.reset(OpAMD64PXOR) 63143 v.AddArg(x) 63144 v0 := b.NewValue0(v.Pos, OpAMD64MOVSSconst, typ.Float32) 63145 v0.AuxInt = auxFrom32F(float32(math.Copysign(0, -1))) 63146 v.AddArg(v0) 63147 return true 63148 } 63149 } 63150 func rewriteValueAMD64_OpNeg64_0(v *Value) bool { 63151 // match: (Neg64 x) 63152 // cond: 63153 // result: (NEGQ x) 63154 for { 63155 x := v.Args[0] 63156 v.reset(OpAMD64NEGQ) 63157 v.AddArg(x) 63158 return true 63159 } 63160 } 63161 func rewriteValueAMD64_OpNeg64F_0(v *Value) bool { 63162 b := v.Block 63163 _ = b 63164 typ := &b.Func.Config.Types 63165 _ = typ 63166 // match: (Neg64F x) 63167 // cond: 63168 // result: (PXOR x (MOVSDconst <typ.Float64> [auxFrom64F(math.Copysign(0, -1))])) 63169 for { 63170 x := v.Args[0] 63171 v.reset(OpAMD64PXOR) 63172 v.AddArg(x) 63173 v0 := b.NewValue0(v.Pos, OpAMD64MOVSDconst, typ.Float64) 63174 v0.AuxInt = auxFrom64F(math.Copysign(0, -1)) 63175 v.AddArg(v0) 63176 return true 63177 } 63178 } 63179 func rewriteValueAMD64_OpNeg8_0(v *Value) bool { 63180 // match: (Neg8 x) 63181 // cond: 63182 // result: (NEGL x) 63183 for { 63184 x := v.Args[0] 63185 v.reset(OpAMD64NEGL) 63186 v.AddArg(x) 63187 return true 63188 } 63189 } 63190 func rewriteValueAMD64_OpNeq16_0(v *Value) bool { 63191 b := v.Block 63192 _ = b 63193 // match: (Neq16 x y) 63194 // cond: 63195 // result: (SETNE (CMPW x y)) 63196 for { 63197 _ = v.Args[1] 63198 x := v.Args[0] 63199 y := v.Args[1] 63200 v.reset(OpAMD64SETNE) 63201 v0 := b.NewValue0(v.Pos, OpAMD64CMPW, types.TypeFlags) 63202 v0.AddArg(x) 63203 v0.AddArg(y) 63204 v.AddArg(v0) 63205 return true 63206 } 63207 } 63208 func rewriteValueAMD64_OpNeq32_0(v *Value) bool { 63209 b := v.Block 63210 _ = b 63211 // match: (Neq32 x y) 63212 // cond: 63213 // result: (SETNE (CMPL x y)) 63214 for { 63215 _ = v.Args[1] 63216 x := v.Args[0] 63217 y := v.Args[1] 63218 v.reset(OpAMD64SETNE) 63219 v0 := b.NewValue0(v.Pos, OpAMD64CMPL, types.TypeFlags) 63220 v0.AddArg(x) 63221 v0.AddArg(y) 63222 v.AddArg(v0) 63223 return true 63224 } 63225 } 63226 func rewriteValueAMD64_OpNeq32F_0(v *Value) bool { 63227 b := v.Block 63228 _ = b 63229 // match: (Neq32F x y) 63230 // cond: 63231 // result: (SETNEF (UCOMISS x y)) 63232 for { 63233 _ = v.Args[1] 63234 x := v.Args[0] 63235 y := v.Args[1] 63236 v.reset(OpAMD64SETNEF) 63237 v0 := b.NewValue0(v.Pos, OpAMD64UCOMISS, types.TypeFlags) 63238 v0.AddArg(x) 63239 v0.AddArg(y) 63240 v.AddArg(v0) 63241 return true 63242 } 63243 } 63244 func rewriteValueAMD64_OpNeq64_0(v *Value) bool { 63245 b := v.Block 63246 _ = b 63247 // match: (Neq64 x y) 63248 // cond: 63249 // result: (SETNE (CMPQ x y)) 63250 for { 63251 _ = v.Args[1] 63252 x := v.Args[0] 63253 y := v.Args[1] 63254 v.reset(OpAMD64SETNE) 63255 v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, types.TypeFlags) 63256 v0.AddArg(x) 63257 v0.AddArg(y) 63258 v.AddArg(v0) 63259 return true 63260 } 63261 } 63262 func rewriteValueAMD64_OpNeq64F_0(v *Value) bool { 63263 b := v.Block 63264 _ = b 63265 // match: (Neq64F x y) 63266 // cond: 63267 // result: (SETNEF (UCOMISD x y)) 63268 for { 63269 _ = v.Args[1] 63270 x := v.Args[0] 63271 y := v.Args[1] 63272 v.reset(OpAMD64SETNEF) 63273 v0 := b.NewValue0(v.Pos, OpAMD64UCOMISD, types.TypeFlags) 63274 v0.AddArg(x) 63275 v0.AddArg(y) 63276 v.AddArg(v0) 63277 return true 63278 } 63279 } 63280 func rewriteValueAMD64_OpNeq8_0(v *Value) bool { 63281 b := v.Block 63282 _ = b 63283 // match: (Neq8 x y) 63284 // cond: 63285 // result: (SETNE (CMPB x y)) 63286 for { 63287 _ = v.Args[1] 63288 x := v.Args[0] 63289 y := v.Args[1] 63290 v.reset(OpAMD64SETNE) 63291 v0 := b.NewValue0(v.Pos, OpAMD64CMPB, types.TypeFlags) 63292 v0.AddArg(x) 63293 v0.AddArg(y) 63294 v.AddArg(v0) 63295 return true 63296 } 63297 } 63298 func rewriteValueAMD64_OpNeqB_0(v *Value) bool { 63299 b := v.Block 63300 _ = b 63301 // match: (NeqB x y) 63302 // cond: 63303 // result: (SETNE (CMPB x y)) 63304 for { 63305 _ = v.Args[1] 63306 x := v.Args[0] 63307 y := v.Args[1] 63308 v.reset(OpAMD64SETNE) 63309 v0 := b.NewValue0(v.Pos, OpAMD64CMPB, types.TypeFlags) 63310 v0.AddArg(x) 63311 v0.AddArg(y) 63312 v.AddArg(v0) 63313 return true 63314 } 63315 } 63316 func rewriteValueAMD64_OpNeqPtr_0(v *Value) bool { 63317 b := v.Block 63318 _ = b 63319 config := b.Func.Config 63320 _ = config 63321 // match: (NeqPtr x y) 63322 // cond: config.PtrSize == 8 63323 // result: (SETNE (CMPQ x y)) 63324 for { 63325 _ = v.Args[1] 63326 x := v.Args[0] 63327 y := v.Args[1] 63328 if !(config.PtrSize == 8) { 63329 break 63330 } 63331 v.reset(OpAMD64SETNE) 63332 v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, types.TypeFlags) 63333 v0.AddArg(x) 63334 v0.AddArg(y) 63335 v.AddArg(v0) 63336 return true 63337 } 63338 // match: (NeqPtr x y) 63339 // cond: config.PtrSize == 4 63340 // result: (SETNE (CMPL x y)) 63341 for { 63342 _ = v.Args[1] 63343 x := v.Args[0] 63344 y := v.Args[1] 63345 if !(config.PtrSize == 4) { 63346 break 63347 } 63348 v.reset(OpAMD64SETNE) 63349 v0 := b.NewValue0(v.Pos, OpAMD64CMPL, types.TypeFlags) 63350 v0.AddArg(x) 63351 v0.AddArg(y) 63352 v.AddArg(v0) 63353 return true 63354 } 63355 return false 63356 } 63357 func rewriteValueAMD64_OpNilCheck_0(v *Value) bool { 63358 // match: (NilCheck ptr mem) 63359 // cond: 63360 // result: (LoweredNilCheck ptr mem) 63361 for { 63362 _ = v.Args[1] 63363 ptr := v.Args[0] 63364 mem := v.Args[1] 63365 v.reset(OpAMD64LoweredNilCheck) 63366 v.AddArg(ptr) 63367 v.AddArg(mem) 63368 return true 63369 } 63370 } 63371 func rewriteValueAMD64_OpNot_0(v *Value) bool { 63372 // match: (Not x) 63373 // cond: 63374 // result: (XORLconst [1] x) 63375 for { 63376 x := v.Args[0] 63377 v.reset(OpAMD64XORLconst) 63378 v.AuxInt = 1 63379 v.AddArg(x) 63380 return true 63381 } 63382 } 63383 func rewriteValueAMD64_OpOffPtr_0(v *Value) bool { 63384 b := v.Block 63385 _ = b 63386 config := b.Func.Config 63387 _ = config 63388 typ := &b.Func.Config.Types 63389 _ = typ 63390 // match: (OffPtr [off] ptr) 63391 // cond: config.PtrSize == 8 && is32Bit(off) 63392 // result: (ADDQconst [off] ptr) 63393 for { 63394 off := v.AuxInt 63395 ptr := v.Args[0] 63396 if !(config.PtrSize == 8 && is32Bit(off)) { 63397 break 63398 } 63399 v.reset(OpAMD64ADDQconst) 63400 v.AuxInt = off 63401 v.AddArg(ptr) 63402 return true 63403 } 63404 // match: (OffPtr [off] ptr) 63405 // cond: config.PtrSize == 8 63406 // result: (ADDQ (MOVQconst [off]) ptr) 63407 for { 63408 off := v.AuxInt 63409 ptr := v.Args[0] 63410 if !(config.PtrSize == 8) { 63411 break 63412 } 63413 v.reset(OpAMD64ADDQ) 63414 v0 := b.NewValue0(v.Pos, OpAMD64MOVQconst, typ.UInt64) 63415 v0.AuxInt = off 63416 v.AddArg(v0) 63417 v.AddArg(ptr) 63418 return true 63419 } 63420 // match: (OffPtr [off] ptr) 63421 // cond: config.PtrSize == 4 63422 // result: (ADDLconst [off] ptr) 63423 for { 63424 off := v.AuxInt 63425 ptr := v.Args[0] 63426 if !(config.PtrSize == 4) { 63427 break 63428 } 63429 v.reset(OpAMD64ADDLconst) 63430 v.AuxInt = off 63431 v.AddArg(ptr) 63432 return true 63433 } 63434 return false 63435 } 63436 func rewriteValueAMD64_OpOr16_0(v *Value) bool { 63437 // match: (Or16 x y) 63438 // cond: 63439 // result: (ORL x y) 63440 for { 63441 _ = v.Args[1] 63442 x := v.Args[0] 63443 y := v.Args[1] 63444 v.reset(OpAMD64ORL) 63445 v.AddArg(x) 63446 v.AddArg(y) 63447 return true 63448 } 63449 } 63450 func rewriteValueAMD64_OpOr32_0(v *Value) bool { 63451 // match: (Or32 x y) 63452 // cond: 63453 // result: (ORL x y) 63454 for { 63455 _ = v.Args[1] 63456 x := v.Args[0] 63457 y := v.Args[1] 63458 v.reset(OpAMD64ORL) 63459 v.AddArg(x) 63460 v.AddArg(y) 63461 return true 63462 } 63463 } 63464 func rewriteValueAMD64_OpOr64_0(v *Value) bool { 63465 // match: (Or64 x y) 63466 // cond: 63467 // result: (ORQ x y) 63468 for { 63469 _ = v.Args[1] 63470 x := v.Args[0] 63471 y := v.Args[1] 63472 v.reset(OpAMD64ORQ) 63473 v.AddArg(x) 63474 v.AddArg(y) 63475 return true 63476 } 63477 } 63478 func rewriteValueAMD64_OpOr8_0(v *Value) bool { 63479 // match: (Or8 x y) 63480 // cond: 63481 // result: (ORL x y) 63482 for { 63483 _ = v.Args[1] 63484 x := v.Args[0] 63485 y := v.Args[1] 63486 v.reset(OpAMD64ORL) 63487 v.AddArg(x) 63488 v.AddArg(y) 63489 return true 63490 } 63491 } 63492 func rewriteValueAMD64_OpOrB_0(v *Value) bool { 63493 // match: (OrB x y) 63494 // cond: 63495 // result: (ORL x y) 63496 for { 63497 _ = v.Args[1] 63498 x := v.Args[0] 63499 y := v.Args[1] 63500 v.reset(OpAMD64ORL) 63501 v.AddArg(x) 63502 v.AddArg(y) 63503 return true 63504 } 63505 } 63506 func rewriteValueAMD64_OpPopCount16_0(v *Value) bool { 63507 b := v.Block 63508 _ = b 63509 typ := &b.Func.Config.Types 63510 _ = typ 63511 // match: (PopCount16 x) 63512 // cond: 63513 // result: (POPCNTL (MOVWQZX <typ.UInt32> x)) 63514 for { 63515 x := v.Args[0] 63516 v.reset(OpAMD64POPCNTL) 63517 v0 := b.NewValue0(v.Pos, OpAMD64MOVWQZX, typ.UInt32) 63518 v0.AddArg(x) 63519 v.AddArg(v0) 63520 return true 63521 } 63522 } 63523 func rewriteValueAMD64_OpPopCount32_0(v *Value) bool { 63524 // match: (PopCount32 x) 63525 // cond: 63526 // result: (POPCNTL x) 63527 for { 63528 x := v.Args[0] 63529 v.reset(OpAMD64POPCNTL) 63530 v.AddArg(x) 63531 return true 63532 } 63533 } 63534 func rewriteValueAMD64_OpPopCount64_0(v *Value) bool { 63535 // match: (PopCount64 x) 63536 // cond: 63537 // result: (POPCNTQ x) 63538 for { 63539 x := v.Args[0] 63540 v.reset(OpAMD64POPCNTQ) 63541 v.AddArg(x) 63542 return true 63543 } 63544 } 63545 func rewriteValueAMD64_OpPopCount8_0(v *Value) bool { 63546 b := v.Block 63547 _ = b 63548 typ := &b.Func.Config.Types 63549 _ = typ 63550 // match: (PopCount8 x) 63551 // cond: 63552 // result: (POPCNTL (MOVBQZX <typ.UInt32> x)) 63553 for { 63554 x := v.Args[0] 63555 v.reset(OpAMD64POPCNTL) 63556 v0 := b.NewValue0(v.Pos, OpAMD64MOVBQZX, typ.UInt32) 63557 v0.AddArg(x) 63558 v.AddArg(v0) 63559 return true 63560 } 63561 } 63562 func rewriteValueAMD64_OpRotateLeft16_0(v *Value) bool { 63563 // match: (RotateLeft16 a b) 63564 // cond: 63565 // result: (ROLW a b) 63566 for { 63567 _ = v.Args[1] 63568 a := v.Args[0] 63569 b := v.Args[1] 63570 v.reset(OpAMD64ROLW) 63571 v.AddArg(a) 63572 v.AddArg(b) 63573 return true 63574 } 63575 } 63576 func rewriteValueAMD64_OpRotateLeft32_0(v *Value) bool { 63577 // match: (RotateLeft32 a b) 63578 // cond: 63579 // result: (ROLL a b) 63580 for { 63581 _ = v.Args[1] 63582 a := v.Args[0] 63583 b := v.Args[1] 63584 v.reset(OpAMD64ROLL) 63585 v.AddArg(a) 63586 v.AddArg(b) 63587 return true 63588 } 63589 } 63590 func rewriteValueAMD64_OpRotateLeft64_0(v *Value) bool { 63591 // match: (RotateLeft64 a b) 63592 // cond: 63593 // result: (ROLQ a b) 63594 for { 63595 _ = v.Args[1] 63596 a := v.Args[0] 63597 b := v.Args[1] 63598 v.reset(OpAMD64ROLQ) 63599 v.AddArg(a) 63600 v.AddArg(b) 63601 return true 63602 } 63603 } 63604 func rewriteValueAMD64_OpRotateLeft8_0(v *Value) bool { 63605 // match: (RotateLeft8 a b) 63606 // cond: 63607 // result: (ROLB a b) 63608 for { 63609 _ = v.Args[1] 63610 a := v.Args[0] 63611 b := v.Args[1] 63612 v.reset(OpAMD64ROLB) 63613 v.AddArg(a) 63614 v.AddArg(b) 63615 return true 63616 } 63617 } 63618 func rewriteValueAMD64_OpRound32F_0(v *Value) bool { 63619 // match: (Round32F x) 63620 // cond: 63621 // result: x 63622 for { 63623 x := v.Args[0] 63624 v.reset(OpCopy) 63625 v.Type = x.Type 63626 v.AddArg(x) 63627 return true 63628 } 63629 } 63630 func rewriteValueAMD64_OpRound64F_0(v *Value) bool { 63631 // match: (Round64F x) 63632 // cond: 63633 // result: x 63634 for { 63635 x := v.Args[0] 63636 v.reset(OpCopy) 63637 v.Type = x.Type 63638 v.AddArg(x) 63639 return true 63640 } 63641 } 63642 func rewriteValueAMD64_OpRoundToEven_0(v *Value) bool { 63643 // match: (RoundToEven x) 63644 // cond: 63645 // result: (ROUNDSD [0] x) 63646 for { 63647 x := v.Args[0] 63648 v.reset(OpAMD64ROUNDSD) 63649 v.AuxInt = 0 63650 v.AddArg(x) 63651 return true 63652 } 63653 } 63654 func rewriteValueAMD64_OpRsh16Ux16_0(v *Value) bool { 63655 b := v.Block 63656 _ = b 63657 // match: (Rsh16Ux16 <t> x y) 63658 // cond: !shiftIsBounded(v) 63659 // result: (ANDL (SHRW <t> x y) (SBBLcarrymask <t> (CMPWconst y [16]))) 63660 for { 63661 t := v.Type 63662 _ = v.Args[1] 63663 x := v.Args[0] 63664 y := v.Args[1] 63665 if !(!shiftIsBounded(v)) { 63666 break 63667 } 63668 v.reset(OpAMD64ANDL) 63669 v0 := b.NewValue0(v.Pos, OpAMD64SHRW, t) 63670 v0.AddArg(x) 63671 v0.AddArg(y) 63672 v.AddArg(v0) 63673 v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) 63674 v2 := b.NewValue0(v.Pos, OpAMD64CMPWconst, types.TypeFlags) 63675 v2.AuxInt = 16 63676 v2.AddArg(y) 63677 v1.AddArg(v2) 63678 v.AddArg(v1) 63679 return true 63680 } 63681 // match: (Rsh16Ux16 x y) 63682 // cond: shiftIsBounded(v) 63683 // result: (SHRW x y) 63684 for { 63685 _ = v.Args[1] 63686 x := v.Args[0] 63687 y := v.Args[1] 63688 if !(shiftIsBounded(v)) { 63689 break 63690 } 63691 v.reset(OpAMD64SHRW) 63692 v.AddArg(x) 63693 v.AddArg(y) 63694 return true 63695 } 63696 return false 63697 } 63698 func rewriteValueAMD64_OpRsh16Ux32_0(v *Value) bool { 63699 b := v.Block 63700 _ = b 63701 // match: (Rsh16Ux32 <t> x y) 63702 // cond: !shiftIsBounded(v) 63703 // result: (ANDL (SHRW <t> x y) (SBBLcarrymask <t> (CMPLconst y [16]))) 63704 for { 63705 t := v.Type 63706 _ = v.Args[1] 63707 x := v.Args[0] 63708 y := v.Args[1] 63709 if !(!shiftIsBounded(v)) { 63710 break 63711 } 63712 v.reset(OpAMD64ANDL) 63713 v0 := b.NewValue0(v.Pos, OpAMD64SHRW, t) 63714 v0.AddArg(x) 63715 v0.AddArg(y) 63716 v.AddArg(v0) 63717 v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) 63718 v2 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags) 63719 v2.AuxInt = 16 63720 v2.AddArg(y) 63721 v1.AddArg(v2) 63722 v.AddArg(v1) 63723 return true 63724 } 63725 // match: (Rsh16Ux32 x y) 63726 // cond: shiftIsBounded(v) 63727 // result: (SHRW x y) 63728 for { 63729 _ = v.Args[1] 63730 x := v.Args[0] 63731 y := v.Args[1] 63732 if !(shiftIsBounded(v)) { 63733 break 63734 } 63735 v.reset(OpAMD64SHRW) 63736 v.AddArg(x) 63737 v.AddArg(y) 63738 return true 63739 } 63740 return false 63741 } 63742 func rewriteValueAMD64_OpRsh16Ux64_0(v *Value) bool { 63743 b := v.Block 63744 _ = b 63745 // match: (Rsh16Ux64 <t> x y) 63746 // cond: !shiftIsBounded(v) 63747 // result: (ANDL (SHRW <t> x y) (SBBLcarrymask <t> (CMPQconst y [16]))) 63748 for { 63749 t := v.Type 63750 _ = v.Args[1] 63751 x := v.Args[0] 63752 y := v.Args[1] 63753 if !(!shiftIsBounded(v)) { 63754 break 63755 } 63756 v.reset(OpAMD64ANDL) 63757 v0 := b.NewValue0(v.Pos, OpAMD64SHRW, t) 63758 v0.AddArg(x) 63759 v0.AddArg(y) 63760 v.AddArg(v0) 63761 v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) 63762 v2 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags) 63763 v2.AuxInt = 16 63764 v2.AddArg(y) 63765 v1.AddArg(v2) 63766 v.AddArg(v1) 63767 return true 63768 } 63769 // match: (Rsh16Ux64 x y) 63770 // cond: shiftIsBounded(v) 63771 // result: (SHRW x y) 63772 for { 63773 _ = v.Args[1] 63774 x := v.Args[0] 63775 y := v.Args[1] 63776 if !(shiftIsBounded(v)) { 63777 break 63778 } 63779 v.reset(OpAMD64SHRW) 63780 v.AddArg(x) 63781 v.AddArg(y) 63782 return true 63783 } 63784 return false 63785 } 63786 func rewriteValueAMD64_OpRsh16Ux8_0(v *Value) bool { 63787 b := v.Block 63788 _ = b 63789 // match: (Rsh16Ux8 <t> x y) 63790 // cond: !shiftIsBounded(v) 63791 // result: (ANDL (SHRW <t> x y) (SBBLcarrymask <t> (CMPBconst y [16]))) 63792 for { 63793 t := v.Type 63794 _ = v.Args[1] 63795 x := v.Args[0] 63796 y := v.Args[1] 63797 if !(!shiftIsBounded(v)) { 63798 break 63799 } 63800 v.reset(OpAMD64ANDL) 63801 v0 := b.NewValue0(v.Pos, OpAMD64SHRW, t) 63802 v0.AddArg(x) 63803 v0.AddArg(y) 63804 v.AddArg(v0) 63805 v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) 63806 v2 := b.NewValue0(v.Pos, OpAMD64CMPBconst, types.TypeFlags) 63807 v2.AuxInt = 16 63808 v2.AddArg(y) 63809 v1.AddArg(v2) 63810 v.AddArg(v1) 63811 return true 63812 } 63813 // match: (Rsh16Ux8 x y) 63814 // cond: shiftIsBounded(v) 63815 // result: (SHRW x y) 63816 for { 63817 _ = v.Args[1] 63818 x := v.Args[0] 63819 y := v.Args[1] 63820 if !(shiftIsBounded(v)) { 63821 break 63822 } 63823 v.reset(OpAMD64SHRW) 63824 v.AddArg(x) 63825 v.AddArg(y) 63826 return true 63827 } 63828 return false 63829 } 63830 func rewriteValueAMD64_OpRsh16x16_0(v *Value) bool { 63831 b := v.Block 63832 _ = b 63833 // match: (Rsh16x16 <t> x y) 63834 // cond: !shiftIsBounded(v) 63835 // result: (SARW <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPWconst y [16]))))) 63836 for { 63837 t := v.Type 63838 _ = v.Args[1] 63839 x := v.Args[0] 63840 y := v.Args[1] 63841 if !(!shiftIsBounded(v)) { 63842 break 63843 } 63844 v.reset(OpAMD64SARW) 63845 v.Type = t 63846 v.AddArg(x) 63847 v0 := b.NewValue0(v.Pos, OpAMD64ORL, y.Type) 63848 v0.AddArg(y) 63849 v1 := b.NewValue0(v.Pos, OpAMD64NOTL, y.Type) 63850 v2 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, y.Type) 63851 v3 := b.NewValue0(v.Pos, OpAMD64CMPWconst, types.TypeFlags) 63852 v3.AuxInt = 16 63853 v3.AddArg(y) 63854 v2.AddArg(v3) 63855 v1.AddArg(v2) 63856 v0.AddArg(v1) 63857 v.AddArg(v0) 63858 return true 63859 } 63860 // match: (Rsh16x16 x y) 63861 // cond: shiftIsBounded(v) 63862 // result: (SARW x y) 63863 for { 63864 _ = v.Args[1] 63865 x := v.Args[0] 63866 y := v.Args[1] 63867 if !(shiftIsBounded(v)) { 63868 break 63869 } 63870 v.reset(OpAMD64SARW) 63871 v.AddArg(x) 63872 v.AddArg(y) 63873 return true 63874 } 63875 return false 63876 } 63877 func rewriteValueAMD64_OpRsh16x32_0(v *Value) bool { 63878 b := v.Block 63879 _ = b 63880 // match: (Rsh16x32 <t> x y) 63881 // cond: !shiftIsBounded(v) 63882 // result: (SARW <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPLconst y [16]))))) 63883 for { 63884 t := v.Type 63885 _ = v.Args[1] 63886 x := v.Args[0] 63887 y := v.Args[1] 63888 if !(!shiftIsBounded(v)) { 63889 break 63890 } 63891 v.reset(OpAMD64SARW) 63892 v.Type = t 63893 v.AddArg(x) 63894 v0 := b.NewValue0(v.Pos, OpAMD64ORL, y.Type) 63895 v0.AddArg(y) 63896 v1 := b.NewValue0(v.Pos, OpAMD64NOTL, y.Type) 63897 v2 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, y.Type) 63898 v3 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags) 63899 v3.AuxInt = 16 63900 v3.AddArg(y) 63901 v2.AddArg(v3) 63902 v1.AddArg(v2) 63903 v0.AddArg(v1) 63904 v.AddArg(v0) 63905 return true 63906 } 63907 // match: (Rsh16x32 x y) 63908 // cond: shiftIsBounded(v) 63909 // result: (SARW x y) 63910 for { 63911 _ = v.Args[1] 63912 x := v.Args[0] 63913 y := v.Args[1] 63914 if !(shiftIsBounded(v)) { 63915 break 63916 } 63917 v.reset(OpAMD64SARW) 63918 v.AddArg(x) 63919 v.AddArg(y) 63920 return true 63921 } 63922 return false 63923 } 63924 func rewriteValueAMD64_OpRsh16x64_0(v *Value) bool { 63925 b := v.Block 63926 _ = b 63927 // match: (Rsh16x64 <t> x y) 63928 // cond: !shiftIsBounded(v) 63929 // result: (SARW <t> x (ORQ <y.Type> y (NOTQ <y.Type> (SBBQcarrymask <y.Type> (CMPQconst y [16]))))) 63930 for { 63931 t := v.Type 63932 _ = v.Args[1] 63933 x := v.Args[0] 63934 y := v.Args[1] 63935 if !(!shiftIsBounded(v)) { 63936 break 63937 } 63938 v.reset(OpAMD64SARW) 63939 v.Type = t 63940 v.AddArg(x) 63941 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, y.Type) 63942 v0.AddArg(y) 63943 v1 := b.NewValue0(v.Pos, OpAMD64NOTQ, y.Type) 63944 v2 := b.NewValue0(v.Pos, OpAMD64SBBQcarrymask, y.Type) 63945 v3 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags) 63946 v3.AuxInt = 16 63947 v3.AddArg(y) 63948 v2.AddArg(v3) 63949 v1.AddArg(v2) 63950 v0.AddArg(v1) 63951 v.AddArg(v0) 63952 return true 63953 } 63954 // match: (Rsh16x64 x y) 63955 // cond: shiftIsBounded(v) 63956 // result: (SARW x y) 63957 for { 63958 _ = v.Args[1] 63959 x := v.Args[0] 63960 y := v.Args[1] 63961 if !(shiftIsBounded(v)) { 63962 break 63963 } 63964 v.reset(OpAMD64SARW) 63965 v.AddArg(x) 63966 v.AddArg(y) 63967 return true 63968 } 63969 return false 63970 } 63971 func rewriteValueAMD64_OpRsh16x8_0(v *Value) bool { 63972 b := v.Block 63973 _ = b 63974 // match: (Rsh16x8 <t> x y) 63975 // cond: !shiftIsBounded(v) 63976 // result: (SARW <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPBconst y [16]))))) 63977 for { 63978 t := v.Type 63979 _ = v.Args[1] 63980 x := v.Args[0] 63981 y := v.Args[1] 63982 if !(!shiftIsBounded(v)) { 63983 break 63984 } 63985 v.reset(OpAMD64SARW) 63986 v.Type = t 63987 v.AddArg(x) 63988 v0 := b.NewValue0(v.Pos, OpAMD64ORL, y.Type) 63989 v0.AddArg(y) 63990 v1 := b.NewValue0(v.Pos, OpAMD64NOTL, y.Type) 63991 v2 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, y.Type) 63992 v3 := b.NewValue0(v.Pos, OpAMD64CMPBconst, types.TypeFlags) 63993 v3.AuxInt = 16 63994 v3.AddArg(y) 63995 v2.AddArg(v3) 63996 v1.AddArg(v2) 63997 v0.AddArg(v1) 63998 v.AddArg(v0) 63999 return true 64000 } 64001 // match: (Rsh16x8 x y) 64002 // cond: shiftIsBounded(v) 64003 // result: (SARW x y) 64004 for { 64005 _ = v.Args[1] 64006 x := v.Args[0] 64007 y := v.Args[1] 64008 if !(shiftIsBounded(v)) { 64009 break 64010 } 64011 v.reset(OpAMD64SARW) 64012 v.AddArg(x) 64013 v.AddArg(y) 64014 return true 64015 } 64016 return false 64017 } 64018 func rewriteValueAMD64_OpRsh32Ux16_0(v *Value) bool { 64019 b := v.Block 64020 _ = b 64021 // match: (Rsh32Ux16 <t> x y) 64022 // cond: !shiftIsBounded(v) 64023 // result: (ANDL (SHRL <t> x y) (SBBLcarrymask <t> (CMPWconst y [32]))) 64024 for { 64025 t := v.Type 64026 _ = v.Args[1] 64027 x := v.Args[0] 64028 y := v.Args[1] 64029 if !(!shiftIsBounded(v)) { 64030 break 64031 } 64032 v.reset(OpAMD64ANDL) 64033 v0 := b.NewValue0(v.Pos, OpAMD64SHRL, t) 64034 v0.AddArg(x) 64035 v0.AddArg(y) 64036 v.AddArg(v0) 64037 v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) 64038 v2 := b.NewValue0(v.Pos, OpAMD64CMPWconst, types.TypeFlags) 64039 v2.AuxInt = 32 64040 v2.AddArg(y) 64041 v1.AddArg(v2) 64042 v.AddArg(v1) 64043 return true 64044 } 64045 // match: (Rsh32Ux16 x y) 64046 // cond: shiftIsBounded(v) 64047 // result: (SHRL x y) 64048 for { 64049 _ = v.Args[1] 64050 x := v.Args[0] 64051 y := v.Args[1] 64052 if !(shiftIsBounded(v)) { 64053 break 64054 } 64055 v.reset(OpAMD64SHRL) 64056 v.AddArg(x) 64057 v.AddArg(y) 64058 return true 64059 } 64060 return false 64061 } 64062 func rewriteValueAMD64_OpRsh32Ux32_0(v *Value) bool { 64063 b := v.Block 64064 _ = b 64065 // match: (Rsh32Ux32 <t> x y) 64066 // cond: !shiftIsBounded(v) 64067 // result: (ANDL (SHRL <t> x y) (SBBLcarrymask <t> (CMPLconst y [32]))) 64068 for { 64069 t := v.Type 64070 _ = v.Args[1] 64071 x := v.Args[0] 64072 y := v.Args[1] 64073 if !(!shiftIsBounded(v)) { 64074 break 64075 } 64076 v.reset(OpAMD64ANDL) 64077 v0 := b.NewValue0(v.Pos, OpAMD64SHRL, t) 64078 v0.AddArg(x) 64079 v0.AddArg(y) 64080 v.AddArg(v0) 64081 v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) 64082 v2 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags) 64083 v2.AuxInt = 32 64084 v2.AddArg(y) 64085 v1.AddArg(v2) 64086 v.AddArg(v1) 64087 return true 64088 } 64089 // match: (Rsh32Ux32 x y) 64090 // cond: shiftIsBounded(v) 64091 // result: (SHRL x y) 64092 for { 64093 _ = v.Args[1] 64094 x := v.Args[0] 64095 y := v.Args[1] 64096 if !(shiftIsBounded(v)) { 64097 break 64098 } 64099 v.reset(OpAMD64SHRL) 64100 v.AddArg(x) 64101 v.AddArg(y) 64102 return true 64103 } 64104 return false 64105 } 64106 func rewriteValueAMD64_OpRsh32Ux64_0(v *Value) bool { 64107 b := v.Block 64108 _ = b 64109 // match: (Rsh32Ux64 <t> x y) 64110 // cond: !shiftIsBounded(v) 64111 // result: (ANDL (SHRL <t> x y) (SBBLcarrymask <t> (CMPQconst y [32]))) 64112 for { 64113 t := v.Type 64114 _ = v.Args[1] 64115 x := v.Args[0] 64116 y := v.Args[1] 64117 if !(!shiftIsBounded(v)) { 64118 break 64119 } 64120 v.reset(OpAMD64ANDL) 64121 v0 := b.NewValue0(v.Pos, OpAMD64SHRL, t) 64122 v0.AddArg(x) 64123 v0.AddArg(y) 64124 v.AddArg(v0) 64125 v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) 64126 v2 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags) 64127 v2.AuxInt = 32 64128 v2.AddArg(y) 64129 v1.AddArg(v2) 64130 v.AddArg(v1) 64131 return true 64132 } 64133 // match: (Rsh32Ux64 x y) 64134 // cond: shiftIsBounded(v) 64135 // result: (SHRL x y) 64136 for { 64137 _ = v.Args[1] 64138 x := v.Args[0] 64139 y := v.Args[1] 64140 if !(shiftIsBounded(v)) { 64141 break 64142 } 64143 v.reset(OpAMD64SHRL) 64144 v.AddArg(x) 64145 v.AddArg(y) 64146 return true 64147 } 64148 return false 64149 } 64150 func rewriteValueAMD64_OpRsh32Ux8_0(v *Value) bool { 64151 b := v.Block 64152 _ = b 64153 // match: (Rsh32Ux8 <t> x y) 64154 // cond: !shiftIsBounded(v) 64155 // result: (ANDL (SHRL <t> x y) (SBBLcarrymask <t> (CMPBconst y [32]))) 64156 for { 64157 t := v.Type 64158 _ = v.Args[1] 64159 x := v.Args[0] 64160 y := v.Args[1] 64161 if !(!shiftIsBounded(v)) { 64162 break 64163 } 64164 v.reset(OpAMD64ANDL) 64165 v0 := b.NewValue0(v.Pos, OpAMD64SHRL, t) 64166 v0.AddArg(x) 64167 v0.AddArg(y) 64168 v.AddArg(v0) 64169 v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) 64170 v2 := b.NewValue0(v.Pos, OpAMD64CMPBconst, types.TypeFlags) 64171 v2.AuxInt = 32 64172 v2.AddArg(y) 64173 v1.AddArg(v2) 64174 v.AddArg(v1) 64175 return true 64176 } 64177 // match: (Rsh32Ux8 x y) 64178 // cond: shiftIsBounded(v) 64179 // result: (SHRL x y) 64180 for { 64181 _ = v.Args[1] 64182 x := v.Args[0] 64183 y := v.Args[1] 64184 if !(shiftIsBounded(v)) { 64185 break 64186 } 64187 v.reset(OpAMD64SHRL) 64188 v.AddArg(x) 64189 v.AddArg(y) 64190 return true 64191 } 64192 return false 64193 } 64194 func rewriteValueAMD64_OpRsh32x16_0(v *Value) bool { 64195 b := v.Block 64196 _ = b 64197 // match: (Rsh32x16 <t> x y) 64198 // cond: !shiftIsBounded(v) 64199 // result: (SARL <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPWconst y [32]))))) 64200 for { 64201 t := v.Type 64202 _ = v.Args[1] 64203 x := v.Args[0] 64204 y := v.Args[1] 64205 if !(!shiftIsBounded(v)) { 64206 break 64207 } 64208 v.reset(OpAMD64SARL) 64209 v.Type = t 64210 v.AddArg(x) 64211 v0 := b.NewValue0(v.Pos, OpAMD64ORL, y.Type) 64212 v0.AddArg(y) 64213 v1 := b.NewValue0(v.Pos, OpAMD64NOTL, y.Type) 64214 v2 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, y.Type) 64215 v3 := b.NewValue0(v.Pos, OpAMD64CMPWconst, types.TypeFlags) 64216 v3.AuxInt = 32 64217 v3.AddArg(y) 64218 v2.AddArg(v3) 64219 v1.AddArg(v2) 64220 v0.AddArg(v1) 64221 v.AddArg(v0) 64222 return true 64223 } 64224 // match: (Rsh32x16 x y) 64225 // cond: shiftIsBounded(v) 64226 // result: (SARL x y) 64227 for { 64228 _ = v.Args[1] 64229 x := v.Args[0] 64230 y := v.Args[1] 64231 if !(shiftIsBounded(v)) { 64232 break 64233 } 64234 v.reset(OpAMD64SARL) 64235 v.AddArg(x) 64236 v.AddArg(y) 64237 return true 64238 } 64239 return false 64240 } 64241 func rewriteValueAMD64_OpRsh32x32_0(v *Value) bool { 64242 b := v.Block 64243 _ = b 64244 // match: (Rsh32x32 <t> x y) 64245 // cond: !shiftIsBounded(v) 64246 // result: (SARL <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPLconst y [32]))))) 64247 for { 64248 t := v.Type 64249 _ = v.Args[1] 64250 x := v.Args[0] 64251 y := v.Args[1] 64252 if !(!shiftIsBounded(v)) { 64253 break 64254 } 64255 v.reset(OpAMD64SARL) 64256 v.Type = t 64257 v.AddArg(x) 64258 v0 := b.NewValue0(v.Pos, OpAMD64ORL, y.Type) 64259 v0.AddArg(y) 64260 v1 := b.NewValue0(v.Pos, OpAMD64NOTL, y.Type) 64261 v2 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, y.Type) 64262 v3 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags) 64263 v3.AuxInt = 32 64264 v3.AddArg(y) 64265 v2.AddArg(v3) 64266 v1.AddArg(v2) 64267 v0.AddArg(v1) 64268 v.AddArg(v0) 64269 return true 64270 } 64271 // match: (Rsh32x32 x y) 64272 // cond: shiftIsBounded(v) 64273 // result: (SARL x y) 64274 for { 64275 _ = v.Args[1] 64276 x := v.Args[0] 64277 y := v.Args[1] 64278 if !(shiftIsBounded(v)) { 64279 break 64280 } 64281 v.reset(OpAMD64SARL) 64282 v.AddArg(x) 64283 v.AddArg(y) 64284 return true 64285 } 64286 return false 64287 } 64288 func rewriteValueAMD64_OpRsh32x64_0(v *Value) bool { 64289 b := v.Block 64290 _ = b 64291 // match: (Rsh32x64 <t> x y) 64292 // cond: !shiftIsBounded(v) 64293 // result: (SARL <t> x (ORQ <y.Type> y (NOTQ <y.Type> (SBBQcarrymask <y.Type> (CMPQconst y [32]))))) 64294 for { 64295 t := v.Type 64296 _ = v.Args[1] 64297 x := v.Args[0] 64298 y := v.Args[1] 64299 if !(!shiftIsBounded(v)) { 64300 break 64301 } 64302 v.reset(OpAMD64SARL) 64303 v.Type = t 64304 v.AddArg(x) 64305 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, y.Type) 64306 v0.AddArg(y) 64307 v1 := b.NewValue0(v.Pos, OpAMD64NOTQ, y.Type) 64308 v2 := b.NewValue0(v.Pos, OpAMD64SBBQcarrymask, y.Type) 64309 v3 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags) 64310 v3.AuxInt = 32 64311 v3.AddArg(y) 64312 v2.AddArg(v3) 64313 v1.AddArg(v2) 64314 v0.AddArg(v1) 64315 v.AddArg(v0) 64316 return true 64317 } 64318 // match: (Rsh32x64 x y) 64319 // cond: shiftIsBounded(v) 64320 // result: (SARL x y) 64321 for { 64322 _ = v.Args[1] 64323 x := v.Args[0] 64324 y := v.Args[1] 64325 if !(shiftIsBounded(v)) { 64326 break 64327 } 64328 v.reset(OpAMD64SARL) 64329 v.AddArg(x) 64330 v.AddArg(y) 64331 return true 64332 } 64333 return false 64334 } 64335 func rewriteValueAMD64_OpRsh32x8_0(v *Value) bool { 64336 b := v.Block 64337 _ = b 64338 // match: (Rsh32x8 <t> x y) 64339 // cond: !shiftIsBounded(v) 64340 // result: (SARL <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPBconst y [32]))))) 64341 for { 64342 t := v.Type 64343 _ = v.Args[1] 64344 x := v.Args[0] 64345 y := v.Args[1] 64346 if !(!shiftIsBounded(v)) { 64347 break 64348 } 64349 v.reset(OpAMD64SARL) 64350 v.Type = t 64351 v.AddArg(x) 64352 v0 := b.NewValue0(v.Pos, OpAMD64ORL, y.Type) 64353 v0.AddArg(y) 64354 v1 := b.NewValue0(v.Pos, OpAMD64NOTL, y.Type) 64355 v2 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, y.Type) 64356 v3 := b.NewValue0(v.Pos, OpAMD64CMPBconst, types.TypeFlags) 64357 v3.AuxInt = 32 64358 v3.AddArg(y) 64359 v2.AddArg(v3) 64360 v1.AddArg(v2) 64361 v0.AddArg(v1) 64362 v.AddArg(v0) 64363 return true 64364 } 64365 // match: (Rsh32x8 x y) 64366 // cond: shiftIsBounded(v) 64367 // result: (SARL x y) 64368 for { 64369 _ = v.Args[1] 64370 x := v.Args[0] 64371 y := v.Args[1] 64372 if !(shiftIsBounded(v)) { 64373 break 64374 } 64375 v.reset(OpAMD64SARL) 64376 v.AddArg(x) 64377 v.AddArg(y) 64378 return true 64379 } 64380 return false 64381 } 64382 func rewriteValueAMD64_OpRsh64Ux16_0(v *Value) bool { 64383 b := v.Block 64384 _ = b 64385 // match: (Rsh64Ux16 <t> x y) 64386 // cond: !shiftIsBounded(v) 64387 // result: (ANDQ (SHRQ <t> x y) (SBBQcarrymask <t> (CMPWconst y [64]))) 64388 for { 64389 t := v.Type 64390 _ = v.Args[1] 64391 x := v.Args[0] 64392 y := v.Args[1] 64393 if !(!shiftIsBounded(v)) { 64394 break 64395 } 64396 v.reset(OpAMD64ANDQ) 64397 v0 := b.NewValue0(v.Pos, OpAMD64SHRQ, t) 64398 v0.AddArg(x) 64399 v0.AddArg(y) 64400 v.AddArg(v0) 64401 v1 := b.NewValue0(v.Pos, OpAMD64SBBQcarrymask, t) 64402 v2 := b.NewValue0(v.Pos, OpAMD64CMPWconst, types.TypeFlags) 64403 v2.AuxInt = 64 64404 v2.AddArg(y) 64405 v1.AddArg(v2) 64406 v.AddArg(v1) 64407 return true 64408 } 64409 // match: (Rsh64Ux16 x y) 64410 // cond: shiftIsBounded(v) 64411 // result: (SHRQ x y) 64412 for { 64413 _ = v.Args[1] 64414 x := v.Args[0] 64415 y := v.Args[1] 64416 if !(shiftIsBounded(v)) { 64417 break 64418 } 64419 v.reset(OpAMD64SHRQ) 64420 v.AddArg(x) 64421 v.AddArg(y) 64422 return true 64423 } 64424 return false 64425 } 64426 func rewriteValueAMD64_OpRsh64Ux32_0(v *Value) bool { 64427 b := v.Block 64428 _ = b 64429 // match: (Rsh64Ux32 <t> x y) 64430 // cond: !shiftIsBounded(v) 64431 // result: (ANDQ (SHRQ <t> x y) (SBBQcarrymask <t> (CMPLconst y [64]))) 64432 for { 64433 t := v.Type 64434 _ = v.Args[1] 64435 x := v.Args[0] 64436 y := v.Args[1] 64437 if !(!shiftIsBounded(v)) { 64438 break 64439 } 64440 v.reset(OpAMD64ANDQ) 64441 v0 := b.NewValue0(v.Pos, OpAMD64SHRQ, t) 64442 v0.AddArg(x) 64443 v0.AddArg(y) 64444 v.AddArg(v0) 64445 v1 := b.NewValue0(v.Pos, OpAMD64SBBQcarrymask, t) 64446 v2 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags) 64447 v2.AuxInt = 64 64448 v2.AddArg(y) 64449 v1.AddArg(v2) 64450 v.AddArg(v1) 64451 return true 64452 } 64453 // match: (Rsh64Ux32 x y) 64454 // cond: shiftIsBounded(v) 64455 // result: (SHRQ x y) 64456 for { 64457 _ = v.Args[1] 64458 x := v.Args[0] 64459 y := v.Args[1] 64460 if !(shiftIsBounded(v)) { 64461 break 64462 } 64463 v.reset(OpAMD64SHRQ) 64464 v.AddArg(x) 64465 v.AddArg(y) 64466 return true 64467 } 64468 return false 64469 } 64470 func rewriteValueAMD64_OpRsh64Ux64_0(v *Value) bool { 64471 b := v.Block 64472 _ = b 64473 // match: (Rsh64Ux64 <t> x y) 64474 // cond: !shiftIsBounded(v) 64475 // result: (ANDQ (SHRQ <t> x y) (SBBQcarrymask <t> (CMPQconst y [64]))) 64476 for { 64477 t := v.Type 64478 _ = v.Args[1] 64479 x := v.Args[0] 64480 y := v.Args[1] 64481 if !(!shiftIsBounded(v)) { 64482 break 64483 } 64484 v.reset(OpAMD64ANDQ) 64485 v0 := b.NewValue0(v.Pos, OpAMD64SHRQ, t) 64486 v0.AddArg(x) 64487 v0.AddArg(y) 64488 v.AddArg(v0) 64489 v1 := b.NewValue0(v.Pos, OpAMD64SBBQcarrymask, t) 64490 v2 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags) 64491 v2.AuxInt = 64 64492 v2.AddArg(y) 64493 v1.AddArg(v2) 64494 v.AddArg(v1) 64495 return true 64496 } 64497 // match: (Rsh64Ux64 x y) 64498 // cond: shiftIsBounded(v) 64499 // result: (SHRQ x y) 64500 for { 64501 _ = v.Args[1] 64502 x := v.Args[0] 64503 y := v.Args[1] 64504 if !(shiftIsBounded(v)) { 64505 break 64506 } 64507 v.reset(OpAMD64SHRQ) 64508 v.AddArg(x) 64509 v.AddArg(y) 64510 return true 64511 } 64512 return false 64513 } 64514 func rewriteValueAMD64_OpRsh64Ux8_0(v *Value) bool { 64515 b := v.Block 64516 _ = b 64517 // match: (Rsh64Ux8 <t> x y) 64518 // cond: !shiftIsBounded(v) 64519 // result: (ANDQ (SHRQ <t> x y) (SBBQcarrymask <t> (CMPBconst y [64]))) 64520 for { 64521 t := v.Type 64522 _ = v.Args[1] 64523 x := v.Args[0] 64524 y := v.Args[1] 64525 if !(!shiftIsBounded(v)) { 64526 break 64527 } 64528 v.reset(OpAMD64ANDQ) 64529 v0 := b.NewValue0(v.Pos, OpAMD64SHRQ, t) 64530 v0.AddArg(x) 64531 v0.AddArg(y) 64532 v.AddArg(v0) 64533 v1 := b.NewValue0(v.Pos, OpAMD64SBBQcarrymask, t) 64534 v2 := b.NewValue0(v.Pos, OpAMD64CMPBconst, types.TypeFlags) 64535 v2.AuxInt = 64 64536 v2.AddArg(y) 64537 v1.AddArg(v2) 64538 v.AddArg(v1) 64539 return true 64540 } 64541 // match: (Rsh64Ux8 x y) 64542 // cond: shiftIsBounded(v) 64543 // result: (SHRQ x y) 64544 for { 64545 _ = v.Args[1] 64546 x := v.Args[0] 64547 y := v.Args[1] 64548 if !(shiftIsBounded(v)) { 64549 break 64550 } 64551 v.reset(OpAMD64SHRQ) 64552 v.AddArg(x) 64553 v.AddArg(y) 64554 return true 64555 } 64556 return false 64557 } 64558 func rewriteValueAMD64_OpRsh64x16_0(v *Value) bool { 64559 b := v.Block 64560 _ = b 64561 // match: (Rsh64x16 <t> x y) 64562 // cond: !shiftIsBounded(v) 64563 // result: (SARQ <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPWconst y [64]))))) 64564 for { 64565 t := v.Type 64566 _ = v.Args[1] 64567 x := v.Args[0] 64568 y := v.Args[1] 64569 if !(!shiftIsBounded(v)) { 64570 break 64571 } 64572 v.reset(OpAMD64SARQ) 64573 v.Type = t 64574 v.AddArg(x) 64575 v0 := b.NewValue0(v.Pos, OpAMD64ORL, y.Type) 64576 v0.AddArg(y) 64577 v1 := b.NewValue0(v.Pos, OpAMD64NOTL, y.Type) 64578 v2 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, y.Type) 64579 v3 := b.NewValue0(v.Pos, OpAMD64CMPWconst, types.TypeFlags) 64580 v3.AuxInt = 64 64581 v3.AddArg(y) 64582 v2.AddArg(v3) 64583 v1.AddArg(v2) 64584 v0.AddArg(v1) 64585 v.AddArg(v0) 64586 return true 64587 } 64588 // match: (Rsh64x16 x y) 64589 // cond: shiftIsBounded(v) 64590 // result: (SARQ x y) 64591 for { 64592 _ = v.Args[1] 64593 x := v.Args[0] 64594 y := v.Args[1] 64595 if !(shiftIsBounded(v)) { 64596 break 64597 } 64598 v.reset(OpAMD64SARQ) 64599 v.AddArg(x) 64600 v.AddArg(y) 64601 return true 64602 } 64603 return false 64604 } 64605 func rewriteValueAMD64_OpRsh64x32_0(v *Value) bool { 64606 b := v.Block 64607 _ = b 64608 // match: (Rsh64x32 <t> x y) 64609 // cond: !shiftIsBounded(v) 64610 // result: (SARQ <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPLconst y [64]))))) 64611 for { 64612 t := v.Type 64613 _ = v.Args[1] 64614 x := v.Args[0] 64615 y := v.Args[1] 64616 if !(!shiftIsBounded(v)) { 64617 break 64618 } 64619 v.reset(OpAMD64SARQ) 64620 v.Type = t 64621 v.AddArg(x) 64622 v0 := b.NewValue0(v.Pos, OpAMD64ORL, y.Type) 64623 v0.AddArg(y) 64624 v1 := b.NewValue0(v.Pos, OpAMD64NOTL, y.Type) 64625 v2 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, y.Type) 64626 v3 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags) 64627 v3.AuxInt = 64 64628 v3.AddArg(y) 64629 v2.AddArg(v3) 64630 v1.AddArg(v2) 64631 v0.AddArg(v1) 64632 v.AddArg(v0) 64633 return true 64634 } 64635 // match: (Rsh64x32 x y) 64636 // cond: shiftIsBounded(v) 64637 // result: (SARQ x y) 64638 for { 64639 _ = v.Args[1] 64640 x := v.Args[0] 64641 y := v.Args[1] 64642 if !(shiftIsBounded(v)) { 64643 break 64644 } 64645 v.reset(OpAMD64SARQ) 64646 v.AddArg(x) 64647 v.AddArg(y) 64648 return true 64649 } 64650 return false 64651 } 64652 func rewriteValueAMD64_OpRsh64x64_0(v *Value) bool { 64653 b := v.Block 64654 _ = b 64655 // match: (Rsh64x64 <t> x y) 64656 // cond: !shiftIsBounded(v) 64657 // result: (SARQ <t> x (ORQ <y.Type> y (NOTQ <y.Type> (SBBQcarrymask <y.Type> (CMPQconst y [64]))))) 64658 for { 64659 t := v.Type 64660 _ = v.Args[1] 64661 x := v.Args[0] 64662 y := v.Args[1] 64663 if !(!shiftIsBounded(v)) { 64664 break 64665 } 64666 v.reset(OpAMD64SARQ) 64667 v.Type = t 64668 v.AddArg(x) 64669 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, y.Type) 64670 v0.AddArg(y) 64671 v1 := b.NewValue0(v.Pos, OpAMD64NOTQ, y.Type) 64672 v2 := b.NewValue0(v.Pos, OpAMD64SBBQcarrymask, y.Type) 64673 v3 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags) 64674 v3.AuxInt = 64 64675 v3.AddArg(y) 64676 v2.AddArg(v3) 64677 v1.AddArg(v2) 64678 v0.AddArg(v1) 64679 v.AddArg(v0) 64680 return true 64681 } 64682 // match: (Rsh64x64 x y) 64683 // cond: shiftIsBounded(v) 64684 // result: (SARQ x y) 64685 for { 64686 _ = v.Args[1] 64687 x := v.Args[0] 64688 y := v.Args[1] 64689 if !(shiftIsBounded(v)) { 64690 break 64691 } 64692 v.reset(OpAMD64SARQ) 64693 v.AddArg(x) 64694 v.AddArg(y) 64695 return true 64696 } 64697 return false 64698 } 64699 func rewriteValueAMD64_OpRsh64x8_0(v *Value) bool { 64700 b := v.Block 64701 _ = b 64702 // match: (Rsh64x8 <t> x y) 64703 // cond: !shiftIsBounded(v) 64704 // result: (SARQ <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPBconst y [64]))))) 64705 for { 64706 t := v.Type 64707 _ = v.Args[1] 64708 x := v.Args[0] 64709 y := v.Args[1] 64710 if !(!shiftIsBounded(v)) { 64711 break 64712 } 64713 v.reset(OpAMD64SARQ) 64714 v.Type = t 64715 v.AddArg(x) 64716 v0 := b.NewValue0(v.Pos, OpAMD64ORL, y.Type) 64717 v0.AddArg(y) 64718 v1 := b.NewValue0(v.Pos, OpAMD64NOTL, y.Type) 64719 v2 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, y.Type) 64720 v3 := b.NewValue0(v.Pos, OpAMD64CMPBconst, types.TypeFlags) 64721 v3.AuxInt = 64 64722 v3.AddArg(y) 64723 v2.AddArg(v3) 64724 v1.AddArg(v2) 64725 v0.AddArg(v1) 64726 v.AddArg(v0) 64727 return true 64728 } 64729 // match: (Rsh64x8 x y) 64730 // cond: shiftIsBounded(v) 64731 // result: (SARQ x y) 64732 for { 64733 _ = v.Args[1] 64734 x := v.Args[0] 64735 y := v.Args[1] 64736 if !(shiftIsBounded(v)) { 64737 break 64738 } 64739 v.reset(OpAMD64SARQ) 64740 v.AddArg(x) 64741 v.AddArg(y) 64742 return true 64743 } 64744 return false 64745 } 64746 func rewriteValueAMD64_OpRsh8Ux16_0(v *Value) bool { 64747 b := v.Block 64748 _ = b 64749 // match: (Rsh8Ux16 <t> x y) 64750 // cond: !shiftIsBounded(v) 64751 // result: (ANDL (SHRB <t> x y) (SBBLcarrymask <t> (CMPWconst y [8]))) 64752 for { 64753 t := v.Type 64754 _ = v.Args[1] 64755 x := v.Args[0] 64756 y := v.Args[1] 64757 if !(!shiftIsBounded(v)) { 64758 break 64759 } 64760 v.reset(OpAMD64ANDL) 64761 v0 := b.NewValue0(v.Pos, OpAMD64SHRB, t) 64762 v0.AddArg(x) 64763 v0.AddArg(y) 64764 v.AddArg(v0) 64765 v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) 64766 v2 := b.NewValue0(v.Pos, OpAMD64CMPWconst, types.TypeFlags) 64767 v2.AuxInt = 8 64768 v2.AddArg(y) 64769 v1.AddArg(v2) 64770 v.AddArg(v1) 64771 return true 64772 } 64773 // match: (Rsh8Ux16 x y) 64774 // cond: shiftIsBounded(v) 64775 // result: (SHRB x y) 64776 for { 64777 _ = v.Args[1] 64778 x := v.Args[0] 64779 y := v.Args[1] 64780 if !(shiftIsBounded(v)) { 64781 break 64782 } 64783 v.reset(OpAMD64SHRB) 64784 v.AddArg(x) 64785 v.AddArg(y) 64786 return true 64787 } 64788 return false 64789 } 64790 func rewriteValueAMD64_OpRsh8Ux32_0(v *Value) bool { 64791 b := v.Block 64792 _ = b 64793 // match: (Rsh8Ux32 <t> x y) 64794 // cond: !shiftIsBounded(v) 64795 // result: (ANDL (SHRB <t> x y) (SBBLcarrymask <t> (CMPLconst y [8]))) 64796 for { 64797 t := v.Type 64798 _ = v.Args[1] 64799 x := v.Args[0] 64800 y := v.Args[1] 64801 if !(!shiftIsBounded(v)) { 64802 break 64803 } 64804 v.reset(OpAMD64ANDL) 64805 v0 := b.NewValue0(v.Pos, OpAMD64SHRB, t) 64806 v0.AddArg(x) 64807 v0.AddArg(y) 64808 v.AddArg(v0) 64809 v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) 64810 v2 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags) 64811 v2.AuxInt = 8 64812 v2.AddArg(y) 64813 v1.AddArg(v2) 64814 v.AddArg(v1) 64815 return true 64816 } 64817 // match: (Rsh8Ux32 x y) 64818 // cond: shiftIsBounded(v) 64819 // result: (SHRB x y) 64820 for { 64821 _ = v.Args[1] 64822 x := v.Args[0] 64823 y := v.Args[1] 64824 if !(shiftIsBounded(v)) { 64825 break 64826 } 64827 v.reset(OpAMD64SHRB) 64828 v.AddArg(x) 64829 v.AddArg(y) 64830 return true 64831 } 64832 return false 64833 } 64834 func rewriteValueAMD64_OpRsh8Ux64_0(v *Value) bool { 64835 b := v.Block 64836 _ = b 64837 // match: (Rsh8Ux64 <t> x y) 64838 // cond: !shiftIsBounded(v) 64839 // result: (ANDL (SHRB <t> x y) (SBBLcarrymask <t> (CMPQconst y [8]))) 64840 for { 64841 t := v.Type 64842 _ = v.Args[1] 64843 x := v.Args[0] 64844 y := v.Args[1] 64845 if !(!shiftIsBounded(v)) { 64846 break 64847 } 64848 v.reset(OpAMD64ANDL) 64849 v0 := b.NewValue0(v.Pos, OpAMD64SHRB, t) 64850 v0.AddArg(x) 64851 v0.AddArg(y) 64852 v.AddArg(v0) 64853 v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) 64854 v2 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags) 64855 v2.AuxInt = 8 64856 v2.AddArg(y) 64857 v1.AddArg(v2) 64858 v.AddArg(v1) 64859 return true 64860 } 64861 // match: (Rsh8Ux64 x y) 64862 // cond: shiftIsBounded(v) 64863 // result: (SHRB x y) 64864 for { 64865 _ = v.Args[1] 64866 x := v.Args[0] 64867 y := v.Args[1] 64868 if !(shiftIsBounded(v)) { 64869 break 64870 } 64871 v.reset(OpAMD64SHRB) 64872 v.AddArg(x) 64873 v.AddArg(y) 64874 return true 64875 } 64876 return false 64877 } 64878 func rewriteValueAMD64_OpRsh8Ux8_0(v *Value) bool { 64879 b := v.Block 64880 _ = b 64881 // match: (Rsh8Ux8 <t> x y) 64882 // cond: !shiftIsBounded(v) 64883 // result: (ANDL (SHRB <t> x y) (SBBLcarrymask <t> (CMPBconst y [8]))) 64884 for { 64885 t := v.Type 64886 _ = v.Args[1] 64887 x := v.Args[0] 64888 y := v.Args[1] 64889 if !(!shiftIsBounded(v)) { 64890 break 64891 } 64892 v.reset(OpAMD64ANDL) 64893 v0 := b.NewValue0(v.Pos, OpAMD64SHRB, t) 64894 v0.AddArg(x) 64895 v0.AddArg(y) 64896 v.AddArg(v0) 64897 v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) 64898 v2 := b.NewValue0(v.Pos, OpAMD64CMPBconst, types.TypeFlags) 64899 v2.AuxInt = 8 64900 v2.AddArg(y) 64901 v1.AddArg(v2) 64902 v.AddArg(v1) 64903 return true 64904 } 64905 // match: (Rsh8Ux8 x y) 64906 // cond: shiftIsBounded(v) 64907 // result: (SHRB x y) 64908 for { 64909 _ = v.Args[1] 64910 x := v.Args[0] 64911 y := v.Args[1] 64912 if !(shiftIsBounded(v)) { 64913 break 64914 } 64915 v.reset(OpAMD64SHRB) 64916 v.AddArg(x) 64917 v.AddArg(y) 64918 return true 64919 } 64920 return false 64921 } 64922 func rewriteValueAMD64_OpRsh8x16_0(v *Value) bool { 64923 b := v.Block 64924 _ = b 64925 // match: (Rsh8x16 <t> x y) 64926 // cond: !shiftIsBounded(v) 64927 // result: (SARB <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPWconst y [8]))))) 64928 for { 64929 t := v.Type 64930 _ = v.Args[1] 64931 x := v.Args[0] 64932 y := v.Args[1] 64933 if !(!shiftIsBounded(v)) { 64934 break 64935 } 64936 v.reset(OpAMD64SARB) 64937 v.Type = t 64938 v.AddArg(x) 64939 v0 := b.NewValue0(v.Pos, OpAMD64ORL, y.Type) 64940 v0.AddArg(y) 64941 v1 := b.NewValue0(v.Pos, OpAMD64NOTL, y.Type) 64942 v2 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, y.Type) 64943 v3 := b.NewValue0(v.Pos, OpAMD64CMPWconst, types.TypeFlags) 64944 v3.AuxInt = 8 64945 v3.AddArg(y) 64946 v2.AddArg(v3) 64947 v1.AddArg(v2) 64948 v0.AddArg(v1) 64949 v.AddArg(v0) 64950 return true 64951 } 64952 // match: (Rsh8x16 x y) 64953 // cond: shiftIsBounded(v) 64954 // result: (SARB x y) 64955 for { 64956 _ = v.Args[1] 64957 x := v.Args[0] 64958 y := v.Args[1] 64959 if !(shiftIsBounded(v)) { 64960 break 64961 } 64962 v.reset(OpAMD64SARB) 64963 v.AddArg(x) 64964 v.AddArg(y) 64965 return true 64966 } 64967 return false 64968 } 64969 func rewriteValueAMD64_OpRsh8x32_0(v *Value) bool { 64970 b := v.Block 64971 _ = b 64972 // match: (Rsh8x32 <t> x y) 64973 // cond: !shiftIsBounded(v) 64974 // result: (SARB <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPLconst y [8]))))) 64975 for { 64976 t := v.Type 64977 _ = v.Args[1] 64978 x := v.Args[0] 64979 y := v.Args[1] 64980 if !(!shiftIsBounded(v)) { 64981 break 64982 } 64983 v.reset(OpAMD64SARB) 64984 v.Type = t 64985 v.AddArg(x) 64986 v0 := b.NewValue0(v.Pos, OpAMD64ORL, y.Type) 64987 v0.AddArg(y) 64988 v1 := b.NewValue0(v.Pos, OpAMD64NOTL, y.Type) 64989 v2 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, y.Type) 64990 v3 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags) 64991 v3.AuxInt = 8 64992 v3.AddArg(y) 64993 v2.AddArg(v3) 64994 v1.AddArg(v2) 64995 v0.AddArg(v1) 64996 v.AddArg(v0) 64997 return true 64998 } 64999 // match: (Rsh8x32 x y) 65000 // cond: shiftIsBounded(v) 65001 // result: (SARB x y) 65002 for { 65003 _ = v.Args[1] 65004 x := v.Args[0] 65005 y := v.Args[1] 65006 if !(shiftIsBounded(v)) { 65007 break 65008 } 65009 v.reset(OpAMD64SARB) 65010 v.AddArg(x) 65011 v.AddArg(y) 65012 return true 65013 } 65014 return false 65015 } 65016 func rewriteValueAMD64_OpRsh8x64_0(v *Value) bool { 65017 b := v.Block 65018 _ = b 65019 // match: (Rsh8x64 <t> x y) 65020 // cond: !shiftIsBounded(v) 65021 // result: (SARB <t> x (ORQ <y.Type> y (NOTQ <y.Type> (SBBQcarrymask <y.Type> (CMPQconst y [8]))))) 65022 for { 65023 t := v.Type 65024 _ = v.Args[1] 65025 x := v.Args[0] 65026 y := v.Args[1] 65027 if !(!shiftIsBounded(v)) { 65028 break 65029 } 65030 v.reset(OpAMD64SARB) 65031 v.Type = t 65032 v.AddArg(x) 65033 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, y.Type) 65034 v0.AddArg(y) 65035 v1 := b.NewValue0(v.Pos, OpAMD64NOTQ, y.Type) 65036 v2 := b.NewValue0(v.Pos, OpAMD64SBBQcarrymask, y.Type) 65037 v3 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags) 65038 v3.AuxInt = 8 65039 v3.AddArg(y) 65040 v2.AddArg(v3) 65041 v1.AddArg(v2) 65042 v0.AddArg(v1) 65043 v.AddArg(v0) 65044 return true 65045 } 65046 // match: (Rsh8x64 x y) 65047 // cond: shiftIsBounded(v) 65048 // result: (SARB x y) 65049 for { 65050 _ = v.Args[1] 65051 x := v.Args[0] 65052 y := v.Args[1] 65053 if !(shiftIsBounded(v)) { 65054 break 65055 } 65056 v.reset(OpAMD64SARB) 65057 v.AddArg(x) 65058 v.AddArg(y) 65059 return true 65060 } 65061 return false 65062 } 65063 func rewriteValueAMD64_OpRsh8x8_0(v *Value) bool { 65064 b := v.Block 65065 _ = b 65066 // match: (Rsh8x8 <t> x y) 65067 // cond: !shiftIsBounded(v) 65068 // result: (SARB <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPBconst y [8]))))) 65069 for { 65070 t := v.Type 65071 _ = v.Args[1] 65072 x := v.Args[0] 65073 y := v.Args[1] 65074 if !(!shiftIsBounded(v)) { 65075 break 65076 } 65077 v.reset(OpAMD64SARB) 65078 v.Type = t 65079 v.AddArg(x) 65080 v0 := b.NewValue0(v.Pos, OpAMD64ORL, y.Type) 65081 v0.AddArg(y) 65082 v1 := b.NewValue0(v.Pos, OpAMD64NOTL, y.Type) 65083 v2 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, y.Type) 65084 v3 := b.NewValue0(v.Pos, OpAMD64CMPBconst, types.TypeFlags) 65085 v3.AuxInt = 8 65086 v3.AddArg(y) 65087 v2.AddArg(v3) 65088 v1.AddArg(v2) 65089 v0.AddArg(v1) 65090 v.AddArg(v0) 65091 return true 65092 } 65093 // match: (Rsh8x8 x y) 65094 // cond: shiftIsBounded(v) 65095 // result: (SARB x y) 65096 for { 65097 _ = v.Args[1] 65098 x := v.Args[0] 65099 y := v.Args[1] 65100 if !(shiftIsBounded(v)) { 65101 break 65102 } 65103 v.reset(OpAMD64SARB) 65104 v.AddArg(x) 65105 v.AddArg(y) 65106 return true 65107 } 65108 return false 65109 } 65110 func rewriteValueAMD64_OpSelect0_0(v *Value) bool { 65111 b := v.Block 65112 _ = b 65113 typ := &b.Func.Config.Types 65114 _ = typ 65115 // match: (Select0 (Mul64uover x y)) 65116 // cond: 65117 // result: (Select0 <typ.UInt64> (MULQU x y)) 65118 for { 65119 v_0 := v.Args[0] 65120 if v_0.Op != OpMul64uover { 65121 break 65122 } 65123 _ = v_0.Args[1] 65124 x := v_0.Args[0] 65125 y := v_0.Args[1] 65126 v.reset(OpSelect0) 65127 v.Type = typ.UInt64 65128 v0 := b.NewValue0(v.Pos, OpAMD64MULQU, types.NewTuple(typ.UInt64, types.TypeFlags)) 65129 v0.AddArg(x) 65130 v0.AddArg(y) 65131 v.AddArg(v0) 65132 return true 65133 } 65134 // match: (Select0 (Mul32uover x y)) 65135 // cond: 65136 // result: (Select0 <typ.UInt32> (MULLU x y)) 65137 for { 65138 v_0 := v.Args[0] 65139 if v_0.Op != OpMul32uover { 65140 break 65141 } 65142 _ = v_0.Args[1] 65143 x := v_0.Args[0] 65144 y := v_0.Args[1] 65145 v.reset(OpSelect0) 65146 v.Type = typ.UInt32 65147 v0 := b.NewValue0(v.Pos, OpAMD64MULLU, types.NewTuple(typ.UInt32, types.TypeFlags)) 65148 v0.AddArg(x) 65149 v0.AddArg(y) 65150 v.AddArg(v0) 65151 return true 65152 } 65153 // match: (Select0 (Add64carry x y c)) 65154 // cond: 65155 // result: (Select0 <typ.UInt64> (ADCQ x y (Select1 <types.TypeFlags> (NEGLflags c)))) 65156 for { 65157 v_0 := v.Args[0] 65158 if v_0.Op != OpAdd64carry { 65159 break 65160 } 65161 _ = v_0.Args[2] 65162 x := v_0.Args[0] 65163 y := v_0.Args[1] 65164 c := v_0.Args[2] 65165 v.reset(OpSelect0) 65166 v.Type = typ.UInt64 65167 v0 := b.NewValue0(v.Pos, OpAMD64ADCQ, types.NewTuple(typ.UInt64, types.TypeFlags)) 65168 v0.AddArg(x) 65169 v0.AddArg(y) 65170 v1 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags) 65171 v2 := b.NewValue0(v.Pos, OpAMD64NEGLflags, types.NewTuple(typ.UInt32, types.TypeFlags)) 65172 v2.AddArg(c) 65173 v1.AddArg(v2) 65174 v0.AddArg(v1) 65175 v.AddArg(v0) 65176 return true 65177 } 65178 // match: (Select0 (Sub64borrow x y c)) 65179 // cond: 65180 // result: (Select0 <typ.UInt64> (SBBQ x y (Select1 <types.TypeFlags> (NEGLflags c)))) 65181 for { 65182 v_0 := v.Args[0] 65183 if v_0.Op != OpSub64borrow { 65184 break 65185 } 65186 _ = v_0.Args[2] 65187 x := v_0.Args[0] 65188 y := v_0.Args[1] 65189 c := v_0.Args[2] 65190 v.reset(OpSelect0) 65191 v.Type = typ.UInt64 65192 v0 := b.NewValue0(v.Pos, OpAMD64SBBQ, types.NewTuple(typ.UInt64, types.TypeFlags)) 65193 v0.AddArg(x) 65194 v0.AddArg(y) 65195 v1 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags) 65196 v2 := b.NewValue0(v.Pos, OpAMD64NEGLflags, types.NewTuple(typ.UInt32, types.TypeFlags)) 65197 v2.AddArg(c) 65198 v1.AddArg(v2) 65199 v0.AddArg(v1) 65200 v.AddArg(v0) 65201 return true 65202 } 65203 // match: (Select0 <t> (AddTupleFirst32 val tuple)) 65204 // cond: 65205 // result: (ADDL val (Select0 <t> tuple)) 65206 for { 65207 t := v.Type 65208 v_0 := v.Args[0] 65209 if v_0.Op != OpAMD64AddTupleFirst32 { 65210 break 65211 } 65212 _ = v_0.Args[1] 65213 val := v_0.Args[0] 65214 tuple := v_0.Args[1] 65215 v.reset(OpAMD64ADDL) 65216 v.AddArg(val) 65217 v0 := b.NewValue0(v.Pos, OpSelect0, t) 65218 v0.AddArg(tuple) 65219 v.AddArg(v0) 65220 return true 65221 } 65222 // match: (Select0 <t> (AddTupleFirst64 val tuple)) 65223 // cond: 65224 // result: (ADDQ val (Select0 <t> tuple)) 65225 for { 65226 t := v.Type 65227 v_0 := v.Args[0] 65228 if v_0.Op != OpAMD64AddTupleFirst64 { 65229 break 65230 } 65231 _ = v_0.Args[1] 65232 val := v_0.Args[0] 65233 tuple := v_0.Args[1] 65234 v.reset(OpAMD64ADDQ) 65235 v.AddArg(val) 65236 v0 := b.NewValue0(v.Pos, OpSelect0, t) 65237 v0.AddArg(tuple) 65238 v.AddArg(v0) 65239 return true 65240 } 65241 return false 65242 } 65243 func rewriteValueAMD64_OpSelect1_0(v *Value) bool { 65244 b := v.Block 65245 _ = b 65246 typ := &b.Func.Config.Types 65247 _ = typ 65248 // match: (Select1 (Mul64uover x y)) 65249 // cond: 65250 // result: (SETO (Select1 <types.TypeFlags> (MULQU x y))) 65251 for { 65252 v_0 := v.Args[0] 65253 if v_0.Op != OpMul64uover { 65254 break 65255 } 65256 _ = v_0.Args[1] 65257 x := v_0.Args[0] 65258 y := v_0.Args[1] 65259 v.reset(OpAMD64SETO) 65260 v0 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags) 65261 v1 := b.NewValue0(v.Pos, OpAMD64MULQU, types.NewTuple(typ.UInt64, types.TypeFlags)) 65262 v1.AddArg(x) 65263 v1.AddArg(y) 65264 v0.AddArg(v1) 65265 v.AddArg(v0) 65266 return true 65267 } 65268 // match: (Select1 (Mul32uover x y)) 65269 // cond: 65270 // result: (SETO (Select1 <types.TypeFlags> (MULLU x y))) 65271 for { 65272 v_0 := v.Args[0] 65273 if v_0.Op != OpMul32uover { 65274 break 65275 } 65276 _ = v_0.Args[1] 65277 x := v_0.Args[0] 65278 y := v_0.Args[1] 65279 v.reset(OpAMD64SETO) 65280 v0 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags) 65281 v1 := b.NewValue0(v.Pos, OpAMD64MULLU, types.NewTuple(typ.UInt32, types.TypeFlags)) 65282 v1.AddArg(x) 65283 v1.AddArg(y) 65284 v0.AddArg(v1) 65285 v.AddArg(v0) 65286 return true 65287 } 65288 // match: (Select1 (Add64carry x y c)) 65289 // cond: 65290 // result: (NEGQ <typ.UInt64> (SBBQcarrymask <typ.UInt64> (Select1 <types.TypeFlags> (ADCQ x y (Select1 <types.TypeFlags> (NEGLflags c)))))) 65291 for { 65292 v_0 := v.Args[0] 65293 if v_0.Op != OpAdd64carry { 65294 break 65295 } 65296 _ = v_0.Args[2] 65297 x := v_0.Args[0] 65298 y := v_0.Args[1] 65299 c := v_0.Args[2] 65300 v.reset(OpAMD64NEGQ) 65301 v.Type = typ.UInt64 65302 v0 := b.NewValue0(v.Pos, OpAMD64SBBQcarrymask, typ.UInt64) 65303 v1 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags) 65304 v2 := b.NewValue0(v.Pos, OpAMD64ADCQ, types.NewTuple(typ.UInt64, types.TypeFlags)) 65305 v2.AddArg(x) 65306 v2.AddArg(y) 65307 v3 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags) 65308 v4 := b.NewValue0(v.Pos, OpAMD64NEGLflags, types.NewTuple(typ.UInt32, types.TypeFlags)) 65309 v4.AddArg(c) 65310 v3.AddArg(v4) 65311 v2.AddArg(v3) 65312 v1.AddArg(v2) 65313 v0.AddArg(v1) 65314 v.AddArg(v0) 65315 return true 65316 } 65317 // match: (Select1 (Sub64borrow x y c)) 65318 // cond: 65319 // result: (NEGQ <typ.UInt64> (SBBQcarrymask <typ.UInt64> (Select1 <types.TypeFlags> (SBBQ x y (Select1 <types.TypeFlags> (NEGLflags c)))))) 65320 for { 65321 v_0 := v.Args[0] 65322 if v_0.Op != OpSub64borrow { 65323 break 65324 } 65325 _ = v_0.Args[2] 65326 x := v_0.Args[0] 65327 y := v_0.Args[1] 65328 c := v_0.Args[2] 65329 v.reset(OpAMD64NEGQ) 65330 v.Type = typ.UInt64 65331 v0 := b.NewValue0(v.Pos, OpAMD64SBBQcarrymask, typ.UInt64) 65332 v1 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags) 65333 v2 := b.NewValue0(v.Pos, OpAMD64SBBQ, types.NewTuple(typ.UInt64, types.TypeFlags)) 65334 v2.AddArg(x) 65335 v2.AddArg(y) 65336 v3 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags) 65337 v4 := b.NewValue0(v.Pos, OpAMD64NEGLflags, types.NewTuple(typ.UInt32, types.TypeFlags)) 65338 v4.AddArg(c) 65339 v3.AddArg(v4) 65340 v2.AddArg(v3) 65341 v1.AddArg(v2) 65342 v0.AddArg(v1) 65343 v.AddArg(v0) 65344 return true 65345 } 65346 // match: (Select1 (NEGLflags (MOVQconst [0]))) 65347 // cond: 65348 // result: (FlagEQ) 65349 for { 65350 v_0 := v.Args[0] 65351 if v_0.Op != OpAMD64NEGLflags { 65352 break 65353 } 65354 v_0_0 := v_0.Args[0] 65355 if v_0_0.Op != OpAMD64MOVQconst { 65356 break 65357 } 65358 if v_0_0.AuxInt != 0 { 65359 break 65360 } 65361 v.reset(OpAMD64FlagEQ) 65362 return true 65363 } 65364 // match: (Select1 (NEGLflags (NEGQ (SBBQcarrymask x)))) 65365 // cond: 65366 // result: x 65367 for { 65368 v_0 := v.Args[0] 65369 if v_0.Op != OpAMD64NEGLflags { 65370 break 65371 } 65372 v_0_0 := v_0.Args[0] 65373 if v_0_0.Op != OpAMD64NEGQ { 65374 break 65375 } 65376 v_0_0_0 := v_0_0.Args[0] 65377 if v_0_0_0.Op != OpAMD64SBBQcarrymask { 65378 break 65379 } 65380 x := v_0_0_0.Args[0] 65381 v.reset(OpCopy) 65382 v.Type = x.Type 65383 v.AddArg(x) 65384 return true 65385 } 65386 // match: (Select1 (AddTupleFirst32 _ tuple)) 65387 // cond: 65388 // result: (Select1 tuple) 65389 for { 65390 v_0 := v.Args[0] 65391 if v_0.Op != OpAMD64AddTupleFirst32 { 65392 break 65393 } 65394 _ = v_0.Args[1] 65395 tuple := v_0.Args[1] 65396 v.reset(OpSelect1) 65397 v.AddArg(tuple) 65398 return true 65399 } 65400 // match: (Select1 (AddTupleFirst64 _ tuple)) 65401 // cond: 65402 // result: (Select1 tuple) 65403 for { 65404 v_0 := v.Args[0] 65405 if v_0.Op != OpAMD64AddTupleFirst64 { 65406 break 65407 } 65408 _ = v_0.Args[1] 65409 tuple := v_0.Args[1] 65410 v.reset(OpSelect1) 65411 v.AddArg(tuple) 65412 return true 65413 } 65414 return false 65415 } 65416 func rewriteValueAMD64_OpSignExt16to32_0(v *Value) bool { 65417 // match: (SignExt16to32 x) 65418 // cond: 65419 // result: (MOVWQSX x) 65420 for { 65421 x := v.Args[0] 65422 v.reset(OpAMD64MOVWQSX) 65423 v.AddArg(x) 65424 return true 65425 } 65426 } 65427 func rewriteValueAMD64_OpSignExt16to64_0(v *Value) bool { 65428 // match: (SignExt16to64 x) 65429 // cond: 65430 // result: (MOVWQSX x) 65431 for { 65432 x := v.Args[0] 65433 v.reset(OpAMD64MOVWQSX) 65434 v.AddArg(x) 65435 return true 65436 } 65437 } 65438 func rewriteValueAMD64_OpSignExt32to64_0(v *Value) bool { 65439 // match: (SignExt32to64 x) 65440 // cond: 65441 // result: (MOVLQSX x) 65442 for { 65443 x := v.Args[0] 65444 v.reset(OpAMD64MOVLQSX) 65445 v.AddArg(x) 65446 return true 65447 } 65448 } 65449 func rewriteValueAMD64_OpSignExt8to16_0(v *Value) bool { 65450 // match: (SignExt8to16 x) 65451 // cond: 65452 // result: (MOVBQSX x) 65453 for { 65454 x := v.Args[0] 65455 v.reset(OpAMD64MOVBQSX) 65456 v.AddArg(x) 65457 return true 65458 } 65459 } 65460 func rewriteValueAMD64_OpSignExt8to32_0(v *Value) bool { 65461 // match: (SignExt8to32 x) 65462 // cond: 65463 // result: (MOVBQSX x) 65464 for { 65465 x := v.Args[0] 65466 v.reset(OpAMD64MOVBQSX) 65467 v.AddArg(x) 65468 return true 65469 } 65470 } 65471 func rewriteValueAMD64_OpSignExt8to64_0(v *Value) bool { 65472 // match: (SignExt8to64 x) 65473 // cond: 65474 // result: (MOVBQSX x) 65475 for { 65476 x := v.Args[0] 65477 v.reset(OpAMD64MOVBQSX) 65478 v.AddArg(x) 65479 return true 65480 } 65481 } 65482 func rewriteValueAMD64_OpSlicemask_0(v *Value) bool { 65483 b := v.Block 65484 _ = b 65485 // match: (Slicemask <t> x) 65486 // cond: 65487 // result: (SARQconst (NEGQ <t> x) [63]) 65488 for { 65489 t := v.Type 65490 x := v.Args[0] 65491 v.reset(OpAMD64SARQconst) 65492 v.AuxInt = 63 65493 v0 := b.NewValue0(v.Pos, OpAMD64NEGQ, t) 65494 v0.AddArg(x) 65495 v.AddArg(v0) 65496 return true 65497 } 65498 } 65499 func rewriteValueAMD64_OpSqrt_0(v *Value) bool { 65500 // match: (Sqrt x) 65501 // cond: 65502 // result: (SQRTSD x) 65503 for { 65504 x := v.Args[0] 65505 v.reset(OpAMD64SQRTSD) 65506 v.AddArg(x) 65507 return true 65508 } 65509 } 65510 func rewriteValueAMD64_OpStaticCall_0(v *Value) bool { 65511 // match: (StaticCall [argwid] {target} mem) 65512 // cond: 65513 // result: (CALLstatic [argwid] {target} mem) 65514 for { 65515 argwid := v.AuxInt 65516 target := v.Aux 65517 mem := v.Args[0] 65518 v.reset(OpAMD64CALLstatic) 65519 v.AuxInt = argwid 65520 v.Aux = target 65521 v.AddArg(mem) 65522 return true 65523 } 65524 } 65525 func rewriteValueAMD64_OpStore_0(v *Value) bool { 65526 // match: (Store {t} ptr val mem) 65527 // cond: t.(*types.Type).Size() == 8 && is64BitFloat(val.Type) 65528 // result: (MOVSDstore ptr val mem) 65529 for { 65530 t := v.Aux 65531 _ = v.Args[2] 65532 ptr := v.Args[0] 65533 val := v.Args[1] 65534 mem := v.Args[2] 65535 if !(t.(*types.Type).Size() == 8 && is64BitFloat(val.Type)) { 65536 break 65537 } 65538 v.reset(OpAMD64MOVSDstore) 65539 v.AddArg(ptr) 65540 v.AddArg(val) 65541 v.AddArg(mem) 65542 return true 65543 } 65544 // match: (Store {t} ptr val mem) 65545 // cond: t.(*types.Type).Size() == 4 && is32BitFloat(val.Type) 65546 // result: (MOVSSstore ptr val mem) 65547 for { 65548 t := v.Aux 65549 _ = v.Args[2] 65550 ptr := v.Args[0] 65551 val := v.Args[1] 65552 mem := v.Args[2] 65553 if !(t.(*types.Type).Size() == 4 && is32BitFloat(val.Type)) { 65554 break 65555 } 65556 v.reset(OpAMD64MOVSSstore) 65557 v.AddArg(ptr) 65558 v.AddArg(val) 65559 v.AddArg(mem) 65560 return true 65561 } 65562 // match: (Store {t} ptr val mem) 65563 // cond: t.(*types.Type).Size() == 8 65564 // result: (MOVQstore ptr val mem) 65565 for { 65566 t := v.Aux 65567 _ = v.Args[2] 65568 ptr := v.Args[0] 65569 val := v.Args[1] 65570 mem := v.Args[2] 65571 if !(t.(*types.Type).Size() == 8) { 65572 break 65573 } 65574 v.reset(OpAMD64MOVQstore) 65575 v.AddArg(ptr) 65576 v.AddArg(val) 65577 v.AddArg(mem) 65578 return true 65579 } 65580 // match: (Store {t} ptr val mem) 65581 // cond: t.(*types.Type).Size() == 4 65582 // result: (MOVLstore ptr val mem) 65583 for { 65584 t := v.Aux 65585 _ = v.Args[2] 65586 ptr := v.Args[0] 65587 val := v.Args[1] 65588 mem := v.Args[2] 65589 if !(t.(*types.Type).Size() == 4) { 65590 break 65591 } 65592 v.reset(OpAMD64MOVLstore) 65593 v.AddArg(ptr) 65594 v.AddArg(val) 65595 v.AddArg(mem) 65596 return true 65597 } 65598 // match: (Store {t} ptr val mem) 65599 // cond: t.(*types.Type).Size() == 2 65600 // result: (MOVWstore ptr val mem) 65601 for { 65602 t := v.Aux 65603 _ = v.Args[2] 65604 ptr := v.Args[0] 65605 val := v.Args[1] 65606 mem := v.Args[2] 65607 if !(t.(*types.Type).Size() == 2) { 65608 break 65609 } 65610 v.reset(OpAMD64MOVWstore) 65611 v.AddArg(ptr) 65612 v.AddArg(val) 65613 v.AddArg(mem) 65614 return true 65615 } 65616 // match: (Store {t} ptr val mem) 65617 // cond: t.(*types.Type).Size() == 1 65618 // result: (MOVBstore ptr val mem) 65619 for { 65620 t := v.Aux 65621 _ = v.Args[2] 65622 ptr := v.Args[0] 65623 val := v.Args[1] 65624 mem := v.Args[2] 65625 if !(t.(*types.Type).Size() == 1) { 65626 break 65627 } 65628 v.reset(OpAMD64MOVBstore) 65629 v.AddArg(ptr) 65630 v.AddArg(val) 65631 v.AddArg(mem) 65632 return true 65633 } 65634 return false 65635 } 65636 func rewriteValueAMD64_OpSub16_0(v *Value) bool { 65637 // match: (Sub16 x y) 65638 // cond: 65639 // result: (SUBL x y) 65640 for { 65641 _ = v.Args[1] 65642 x := v.Args[0] 65643 y := v.Args[1] 65644 v.reset(OpAMD64SUBL) 65645 v.AddArg(x) 65646 v.AddArg(y) 65647 return true 65648 } 65649 } 65650 func rewriteValueAMD64_OpSub32_0(v *Value) bool { 65651 // match: (Sub32 x y) 65652 // cond: 65653 // result: (SUBL x y) 65654 for { 65655 _ = v.Args[1] 65656 x := v.Args[0] 65657 y := v.Args[1] 65658 v.reset(OpAMD64SUBL) 65659 v.AddArg(x) 65660 v.AddArg(y) 65661 return true 65662 } 65663 } 65664 func rewriteValueAMD64_OpSub32F_0(v *Value) bool { 65665 // match: (Sub32F x y) 65666 // cond: 65667 // result: (SUBSS x y) 65668 for { 65669 _ = v.Args[1] 65670 x := v.Args[0] 65671 y := v.Args[1] 65672 v.reset(OpAMD64SUBSS) 65673 v.AddArg(x) 65674 v.AddArg(y) 65675 return true 65676 } 65677 } 65678 func rewriteValueAMD64_OpSub64_0(v *Value) bool { 65679 // match: (Sub64 x y) 65680 // cond: 65681 // result: (SUBQ x y) 65682 for { 65683 _ = v.Args[1] 65684 x := v.Args[0] 65685 y := v.Args[1] 65686 v.reset(OpAMD64SUBQ) 65687 v.AddArg(x) 65688 v.AddArg(y) 65689 return true 65690 } 65691 } 65692 func rewriteValueAMD64_OpSub64F_0(v *Value) bool { 65693 // match: (Sub64F x y) 65694 // cond: 65695 // result: (SUBSD x y) 65696 for { 65697 _ = v.Args[1] 65698 x := v.Args[0] 65699 y := v.Args[1] 65700 v.reset(OpAMD64SUBSD) 65701 v.AddArg(x) 65702 v.AddArg(y) 65703 return true 65704 } 65705 } 65706 func rewriteValueAMD64_OpSub8_0(v *Value) bool { 65707 // match: (Sub8 x y) 65708 // cond: 65709 // result: (SUBL x y) 65710 for { 65711 _ = v.Args[1] 65712 x := v.Args[0] 65713 y := v.Args[1] 65714 v.reset(OpAMD64SUBL) 65715 v.AddArg(x) 65716 v.AddArg(y) 65717 return true 65718 } 65719 } 65720 func rewriteValueAMD64_OpSubPtr_0(v *Value) bool { 65721 b := v.Block 65722 _ = b 65723 config := b.Func.Config 65724 _ = config 65725 // match: (SubPtr x y) 65726 // cond: config.PtrSize == 8 65727 // result: (SUBQ x y) 65728 for { 65729 _ = v.Args[1] 65730 x := v.Args[0] 65731 y := v.Args[1] 65732 if !(config.PtrSize == 8) { 65733 break 65734 } 65735 v.reset(OpAMD64SUBQ) 65736 v.AddArg(x) 65737 v.AddArg(y) 65738 return true 65739 } 65740 // match: (SubPtr x y) 65741 // cond: config.PtrSize == 4 65742 // result: (SUBL x y) 65743 for { 65744 _ = v.Args[1] 65745 x := v.Args[0] 65746 y := v.Args[1] 65747 if !(config.PtrSize == 4) { 65748 break 65749 } 65750 v.reset(OpAMD64SUBL) 65751 v.AddArg(x) 65752 v.AddArg(y) 65753 return true 65754 } 65755 return false 65756 } 65757 func rewriteValueAMD64_OpTrunc_0(v *Value) bool { 65758 // match: (Trunc x) 65759 // cond: 65760 // result: (ROUNDSD [3] x) 65761 for { 65762 x := v.Args[0] 65763 v.reset(OpAMD64ROUNDSD) 65764 v.AuxInt = 3 65765 v.AddArg(x) 65766 return true 65767 } 65768 } 65769 func rewriteValueAMD64_OpTrunc16to8_0(v *Value) bool { 65770 // match: (Trunc16to8 x) 65771 // cond: 65772 // result: x 65773 for { 65774 x := v.Args[0] 65775 v.reset(OpCopy) 65776 v.Type = x.Type 65777 v.AddArg(x) 65778 return true 65779 } 65780 } 65781 func rewriteValueAMD64_OpTrunc32to16_0(v *Value) bool { 65782 // match: (Trunc32to16 x) 65783 // cond: 65784 // result: x 65785 for { 65786 x := v.Args[0] 65787 v.reset(OpCopy) 65788 v.Type = x.Type 65789 v.AddArg(x) 65790 return true 65791 } 65792 } 65793 func rewriteValueAMD64_OpTrunc32to8_0(v *Value) bool { 65794 // match: (Trunc32to8 x) 65795 // cond: 65796 // result: x 65797 for { 65798 x := v.Args[0] 65799 v.reset(OpCopy) 65800 v.Type = x.Type 65801 v.AddArg(x) 65802 return true 65803 } 65804 } 65805 func rewriteValueAMD64_OpTrunc64to16_0(v *Value) bool { 65806 // match: (Trunc64to16 x) 65807 // cond: 65808 // result: x 65809 for { 65810 x := v.Args[0] 65811 v.reset(OpCopy) 65812 v.Type = x.Type 65813 v.AddArg(x) 65814 return true 65815 } 65816 } 65817 func rewriteValueAMD64_OpTrunc64to32_0(v *Value) bool { 65818 // match: (Trunc64to32 x) 65819 // cond: 65820 // result: x 65821 for { 65822 x := v.Args[0] 65823 v.reset(OpCopy) 65824 v.Type = x.Type 65825 v.AddArg(x) 65826 return true 65827 } 65828 } 65829 func rewriteValueAMD64_OpTrunc64to8_0(v *Value) bool { 65830 // match: (Trunc64to8 x) 65831 // cond: 65832 // result: x 65833 for { 65834 x := v.Args[0] 65835 v.reset(OpCopy) 65836 v.Type = x.Type 65837 v.AddArg(x) 65838 return true 65839 } 65840 } 65841 func rewriteValueAMD64_OpWB_0(v *Value) bool { 65842 // match: (WB {fn} destptr srcptr mem) 65843 // cond: 65844 // result: (LoweredWB {fn} destptr srcptr mem) 65845 for { 65846 fn := v.Aux 65847 _ = v.Args[2] 65848 destptr := v.Args[0] 65849 srcptr := v.Args[1] 65850 mem := v.Args[2] 65851 v.reset(OpAMD64LoweredWB) 65852 v.Aux = fn 65853 v.AddArg(destptr) 65854 v.AddArg(srcptr) 65855 v.AddArg(mem) 65856 return true 65857 } 65858 } 65859 func rewriteValueAMD64_OpXor16_0(v *Value) bool { 65860 // match: (Xor16 x y) 65861 // cond: 65862 // result: (XORL x y) 65863 for { 65864 _ = v.Args[1] 65865 x := v.Args[0] 65866 y := v.Args[1] 65867 v.reset(OpAMD64XORL) 65868 v.AddArg(x) 65869 v.AddArg(y) 65870 return true 65871 } 65872 } 65873 func rewriteValueAMD64_OpXor32_0(v *Value) bool { 65874 // match: (Xor32 x y) 65875 // cond: 65876 // result: (XORL x y) 65877 for { 65878 _ = v.Args[1] 65879 x := v.Args[0] 65880 y := v.Args[1] 65881 v.reset(OpAMD64XORL) 65882 v.AddArg(x) 65883 v.AddArg(y) 65884 return true 65885 } 65886 } 65887 func rewriteValueAMD64_OpXor64_0(v *Value) bool { 65888 // match: (Xor64 x y) 65889 // cond: 65890 // result: (XORQ x y) 65891 for { 65892 _ = v.Args[1] 65893 x := v.Args[0] 65894 y := v.Args[1] 65895 v.reset(OpAMD64XORQ) 65896 v.AddArg(x) 65897 v.AddArg(y) 65898 return true 65899 } 65900 } 65901 func rewriteValueAMD64_OpXor8_0(v *Value) bool { 65902 // match: (Xor8 x y) 65903 // cond: 65904 // result: (XORL x y) 65905 for { 65906 _ = v.Args[1] 65907 x := v.Args[0] 65908 y := v.Args[1] 65909 v.reset(OpAMD64XORL) 65910 v.AddArg(x) 65911 v.AddArg(y) 65912 return true 65913 } 65914 } 65915 func rewriteValueAMD64_OpZero_0(v *Value) bool { 65916 b := v.Block 65917 _ = b 65918 config := b.Func.Config 65919 _ = config 65920 // match: (Zero [0] _ mem) 65921 // cond: 65922 // result: mem 65923 for { 65924 if v.AuxInt != 0 { 65925 break 65926 } 65927 _ = v.Args[1] 65928 mem := v.Args[1] 65929 v.reset(OpCopy) 65930 v.Type = mem.Type 65931 v.AddArg(mem) 65932 return true 65933 } 65934 // match: (Zero [1] destptr mem) 65935 // cond: 65936 // result: (MOVBstoreconst [0] destptr mem) 65937 for { 65938 if v.AuxInt != 1 { 65939 break 65940 } 65941 _ = v.Args[1] 65942 destptr := v.Args[0] 65943 mem := v.Args[1] 65944 v.reset(OpAMD64MOVBstoreconst) 65945 v.AuxInt = 0 65946 v.AddArg(destptr) 65947 v.AddArg(mem) 65948 return true 65949 } 65950 // match: (Zero [2] destptr mem) 65951 // cond: 65952 // result: (MOVWstoreconst [0] destptr mem) 65953 for { 65954 if v.AuxInt != 2 { 65955 break 65956 } 65957 _ = v.Args[1] 65958 destptr := v.Args[0] 65959 mem := v.Args[1] 65960 v.reset(OpAMD64MOVWstoreconst) 65961 v.AuxInt = 0 65962 v.AddArg(destptr) 65963 v.AddArg(mem) 65964 return true 65965 } 65966 // match: (Zero [4] destptr mem) 65967 // cond: 65968 // result: (MOVLstoreconst [0] destptr mem) 65969 for { 65970 if v.AuxInt != 4 { 65971 break 65972 } 65973 _ = v.Args[1] 65974 destptr := v.Args[0] 65975 mem := v.Args[1] 65976 v.reset(OpAMD64MOVLstoreconst) 65977 v.AuxInt = 0 65978 v.AddArg(destptr) 65979 v.AddArg(mem) 65980 return true 65981 } 65982 // match: (Zero [8] destptr mem) 65983 // cond: 65984 // result: (MOVQstoreconst [0] destptr mem) 65985 for { 65986 if v.AuxInt != 8 { 65987 break 65988 } 65989 _ = v.Args[1] 65990 destptr := v.Args[0] 65991 mem := v.Args[1] 65992 v.reset(OpAMD64MOVQstoreconst) 65993 v.AuxInt = 0 65994 v.AddArg(destptr) 65995 v.AddArg(mem) 65996 return true 65997 } 65998 // match: (Zero [3] destptr mem) 65999 // cond: 66000 // result: (MOVBstoreconst [makeValAndOff(0,2)] destptr (MOVWstoreconst [0] destptr mem)) 66001 for { 66002 if v.AuxInt != 3 { 66003 break 66004 } 66005 _ = v.Args[1] 66006 destptr := v.Args[0] 66007 mem := v.Args[1] 66008 v.reset(OpAMD64MOVBstoreconst) 66009 v.AuxInt = makeValAndOff(0, 2) 66010 v.AddArg(destptr) 66011 v0 := b.NewValue0(v.Pos, OpAMD64MOVWstoreconst, types.TypeMem) 66012 v0.AuxInt = 0 66013 v0.AddArg(destptr) 66014 v0.AddArg(mem) 66015 v.AddArg(v0) 66016 return true 66017 } 66018 // match: (Zero [5] destptr mem) 66019 // cond: 66020 // result: (MOVBstoreconst [makeValAndOff(0,4)] destptr (MOVLstoreconst [0] destptr mem)) 66021 for { 66022 if v.AuxInt != 5 { 66023 break 66024 } 66025 _ = v.Args[1] 66026 destptr := v.Args[0] 66027 mem := v.Args[1] 66028 v.reset(OpAMD64MOVBstoreconst) 66029 v.AuxInt = makeValAndOff(0, 4) 66030 v.AddArg(destptr) 66031 v0 := b.NewValue0(v.Pos, OpAMD64MOVLstoreconst, types.TypeMem) 66032 v0.AuxInt = 0 66033 v0.AddArg(destptr) 66034 v0.AddArg(mem) 66035 v.AddArg(v0) 66036 return true 66037 } 66038 // match: (Zero [6] destptr mem) 66039 // cond: 66040 // result: (MOVWstoreconst [makeValAndOff(0,4)] destptr (MOVLstoreconst [0] destptr mem)) 66041 for { 66042 if v.AuxInt != 6 { 66043 break 66044 } 66045 _ = v.Args[1] 66046 destptr := v.Args[0] 66047 mem := v.Args[1] 66048 v.reset(OpAMD64MOVWstoreconst) 66049 v.AuxInt = makeValAndOff(0, 4) 66050 v.AddArg(destptr) 66051 v0 := b.NewValue0(v.Pos, OpAMD64MOVLstoreconst, types.TypeMem) 66052 v0.AuxInt = 0 66053 v0.AddArg(destptr) 66054 v0.AddArg(mem) 66055 v.AddArg(v0) 66056 return true 66057 } 66058 // match: (Zero [7] destptr mem) 66059 // cond: 66060 // result: (MOVLstoreconst [makeValAndOff(0,3)] destptr (MOVLstoreconst [0] destptr mem)) 66061 for { 66062 if v.AuxInt != 7 { 66063 break 66064 } 66065 _ = v.Args[1] 66066 destptr := v.Args[0] 66067 mem := v.Args[1] 66068 v.reset(OpAMD64MOVLstoreconst) 66069 v.AuxInt = makeValAndOff(0, 3) 66070 v.AddArg(destptr) 66071 v0 := b.NewValue0(v.Pos, OpAMD64MOVLstoreconst, types.TypeMem) 66072 v0.AuxInt = 0 66073 v0.AddArg(destptr) 66074 v0.AddArg(mem) 66075 v.AddArg(v0) 66076 return true 66077 } 66078 // match: (Zero [s] destptr mem) 66079 // cond: s%8 != 0 && s > 8 && !config.useSSE 66080 // result: (Zero [s-s%8] (OffPtr <destptr.Type> destptr [s%8]) (MOVQstoreconst [0] destptr mem)) 66081 for { 66082 s := v.AuxInt 66083 _ = v.Args[1] 66084 destptr := v.Args[0] 66085 mem := v.Args[1] 66086 if !(s%8 != 0 && s > 8 && !config.useSSE) { 66087 break 66088 } 66089 v.reset(OpZero) 66090 v.AuxInt = s - s%8 66091 v0 := b.NewValue0(v.Pos, OpOffPtr, destptr.Type) 66092 v0.AuxInt = s % 8 66093 v0.AddArg(destptr) 66094 v.AddArg(v0) 66095 v1 := b.NewValue0(v.Pos, OpAMD64MOVQstoreconst, types.TypeMem) 66096 v1.AuxInt = 0 66097 v1.AddArg(destptr) 66098 v1.AddArg(mem) 66099 v.AddArg(v1) 66100 return true 66101 } 66102 return false 66103 } 66104 func rewriteValueAMD64_OpZero_10(v *Value) bool { 66105 b := v.Block 66106 _ = b 66107 config := b.Func.Config 66108 _ = config 66109 // match: (Zero [16] destptr mem) 66110 // cond: !config.useSSE 66111 // result: (MOVQstoreconst [makeValAndOff(0,8)] destptr (MOVQstoreconst [0] destptr mem)) 66112 for { 66113 if v.AuxInt != 16 { 66114 break 66115 } 66116 _ = v.Args[1] 66117 destptr := v.Args[0] 66118 mem := v.Args[1] 66119 if !(!config.useSSE) { 66120 break 66121 } 66122 v.reset(OpAMD64MOVQstoreconst) 66123 v.AuxInt = makeValAndOff(0, 8) 66124 v.AddArg(destptr) 66125 v0 := b.NewValue0(v.Pos, OpAMD64MOVQstoreconst, types.TypeMem) 66126 v0.AuxInt = 0 66127 v0.AddArg(destptr) 66128 v0.AddArg(mem) 66129 v.AddArg(v0) 66130 return true 66131 } 66132 // match: (Zero [24] destptr mem) 66133 // cond: !config.useSSE 66134 // result: (MOVQstoreconst [makeValAndOff(0,16)] destptr (MOVQstoreconst [makeValAndOff(0,8)] destptr (MOVQstoreconst [0] destptr mem))) 66135 for { 66136 if v.AuxInt != 24 { 66137 break 66138 } 66139 _ = v.Args[1] 66140 destptr := v.Args[0] 66141 mem := v.Args[1] 66142 if !(!config.useSSE) { 66143 break 66144 } 66145 v.reset(OpAMD64MOVQstoreconst) 66146 v.AuxInt = makeValAndOff(0, 16) 66147 v.AddArg(destptr) 66148 v0 := b.NewValue0(v.Pos, OpAMD64MOVQstoreconst, types.TypeMem) 66149 v0.AuxInt = makeValAndOff(0, 8) 66150 v0.AddArg(destptr) 66151 v1 := b.NewValue0(v.Pos, OpAMD64MOVQstoreconst, types.TypeMem) 66152 v1.AuxInt = 0 66153 v1.AddArg(destptr) 66154 v1.AddArg(mem) 66155 v0.AddArg(v1) 66156 v.AddArg(v0) 66157 return true 66158 } 66159 // match: (Zero [32] destptr mem) 66160 // cond: !config.useSSE 66161 // result: (MOVQstoreconst [makeValAndOff(0,24)] destptr (MOVQstoreconst [makeValAndOff(0,16)] destptr (MOVQstoreconst [makeValAndOff(0,8)] destptr (MOVQstoreconst [0] destptr mem)))) 66162 for { 66163 if v.AuxInt != 32 { 66164 break 66165 } 66166 _ = v.Args[1] 66167 destptr := v.Args[0] 66168 mem := v.Args[1] 66169 if !(!config.useSSE) { 66170 break 66171 } 66172 v.reset(OpAMD64MOVQstoreconst) 66173 v.AuxInt = makeValAndOff(0, 24) 66174 v.AddArg(destptr) 66175 v0 := b.NewValue0(v.Pos, OpAMD64MOVQstoreconst, types.TypeMem) 66176 v0.AuxInt = makeValAndOff(0, 16) 66177 v0.AddArg(destptr) 66178 v1 := b.NewValue0(v.Pos, OpAMD64MOVQstoreconst, types.TypeMem) 66179 v1.AuxInt = makeValAndOff(0, 8) 66180 v1.AddArg(destptr) 66181 v2 := b.NewValue0(v.Pos, OpAMD64MOVQstoreconst, types.TypeMem) 66182 v2.AuxInt = 0 66183 v2.AddArg(destptr) 66184 v2.AddArg(mem) 66185 v1.AddArg(v2) 66186 v0.AddArg(v1) 66187 v.AddArg(v0) 66188 return true 66189 } 66190 // match: (Zero [s] destptr mem) 66191 // cond: s > 8 && s < 16 && config.useSSE 66192 // result: (MOVQstoreconst [makeValAndOff(0,s-8)] destptr (MOVQstoreconst [0] destptr mem)) 66193 for { 66194 s := v.AuxInt 66195 _ = v.Args[1] 66196 destptr := v.Args[0] 66197 mem := v.Args[1] 66198 if !(s > 8 && s < 16 && config.useSSE) { 66199 break 66200 } 66201 v.reset(OpAMD64MOVQstoreconst) 66202 v.AuxInt = makeValAndOff(0, s-8) 66203 v.AddArg(destptr) 66204 v0 := b.NewValue0(v.Pos, OpAMD64MOVQstoreconst, types.TypeMem) 66205 v0.AuxInt = 0 66206 v0.AddArg(destptr) 66207 v0.AddArg(mem) 66208 v.AddArg(v0) 66209 return true 66210 } 66211 // match: (Zero [s] destptr mem) 66212 // cond: s%16 != 0 && s > 16 && s%16 > 8 && config.useSSE 66213 // result: (Zero [s-s%16] (OffPtr <destptr.Type> destptr [s%16]) (MOVOstore destptr (MOVOconst [0]) mem)) 66214 for { 66215 s := v.AuxInt 66216 _ = v.Args[1] 66217 destptr := v.Args[0] 66218 mem := v.Args[1] 66219 if !(s%16 != 0 && s > 16 && s%16 > 8 && config.useSSE) { 66220 break 66221 } 66222 v.reset(OpZero) 66223 v.AuxInt = s - s%16 66224 v0 := b.NewValue0(v.Pos, OpOffPtr, destptr.Type) 66225 v0.AuxInt = s % 16 66226 v0.AddArg(destptr) 66227 v.AddArg(v0) 66228 v1 := b.NewValue0(v.Pos, OpAMD64MOVOstore, types.TypeMem) 66229 v1.AddArg(destptr) 66230 v2 := b.NewValue0(v.Pos, OpAMD64MOVOconst, types.TypeInt128) 66231 v2.AuxInt = 0 66232 v1.AddArg(v2) 66233 v1.AddArg(mem) 66234 v.AddArg(v1) 66235 return true 66236 } 66237 // match: (Zero [s] destptr mem) 66238 // cond: s%16 != 0 && s > 16 && s%16 <= 8 && config.useSSE 66239 // result: (Zero [s-s%16] (OffPtr <destptr.Type> destptr [s%16]) (MOVQstoreconst [0] destptr mem)) 66240 for { 66241 s := v.AuxInt 66242 _ = v.Args[1] 66243 destptr := v.Args[0] 66244 mem := v.Args[1] 66245 if !(s%16 != 0 && s > 16 && s%16 <= 8 && config.useSSE) { 66246 break 66247 } 66248 v.reset(OpZero) 66249 v.AuxInt = s - s%16 66250 v0 := b.NewValue0(v.Pos, OpOffPtr, destptr.Type) 66251 v0.AuxInt = s % 16 66252 v0.AddArg(destptr) 66253 v.AddArg(v0) 66254 v1 := b.NewValue0(v.Pos, OpAMD64MOVQstoreconst, types.TypeMem) 66255 v1.AuxInt = 0 66256 v1.AddArg(destptr) 66257 v1.AddArg(mem) 66258 v.AddArg(v1) 66259 return true 66260 } 66261 // match: (Zero [16] destptr mem) 66262 // cond: config.useSSE 66263 // result: (MOVOstore destptr (MOVOconst [0]) mem) 66264 for { 66265 if v.AuxInt != 16 { 66266 break 66267 } 66268 _ = v.Args[1] 66269 destptr := v.Args[0] 66270 mem := v.Args[1] 66271 if !(config.useSSE) { 66272 break 66273 } 66274 v.reset(OpAMD64MOVOstore) 66275 v.AddArg(destptr) 66276 v0 := b.NewValue0(v.Pos, OpAMD64MOVOconst, types.TypeInt128) 66277 v0.AuxInt = 0 66278 v.AddArg(v0) 66279 v.AddArg(mem) 66280 return true 66281 } 66282 // match: (Zero [32] destptr mem) 66283 // cond: config.useSSE 66284 // result: (MOVOstore (OffPtr <destptr.Type> destptr [16]) (MOVOconst [0]) (MOVOstore destptr (MOVOconst [0]) mem)) 66285 for { 66286 if v.AuxInt != 32 { 66287 break 66288 } 66289 _ = v.Args[1] 66290 destptr := v.Args[0] 66291 mem := v.Args[1] 66292 if !(config.useSSE) { 66293 break 66294 } 66295 v.reset(OpAMD64MOVOstore) 66296 v0 := b.NewValue0(v.Pos, OpOffPtr, destptr.Type) 66297 v0.AuxInt = 16 66298 v0.AddArg(destptr) 66299 v.AddArg(v0) 66300 v1 := b.NewValue0(v.Pos, OpAMD64MOVOconst, types.TypeInt128) 66301 v1.AuxInt = 0 66302 v.AddArg(v1) 66303 v2 := b.NewValue0(v.Pos, OpAMD64MOVOstore, types.TypeMem) 66304 v2.AddArg(destptr) 66305 v3 := b.NewValue0(v.Pos, OpAMD64MOVOconst, types.TypeInt128) 66306 v3.AuxInt = 0 66307 v2.AddArg(v3) 66308 v2.AddArg(mem) 66309 v.AddArg(v2) 66310 return true 66311 } 66312 // match: (Zero [48] destptr mem) 66313 // cond: config.useSSE 66314 // result: (MOVOstore (OffPtr <destptr.Type> destptr [32]) (MOVOconst [0]) (MOVOstore (OffPtr <destptr.Type> destptr [16]) (MOVOconst [0]) (MOVOstore destptr (MOVOconst [0]) mem))) 66315 for { 66316 if v.AuxInt != 48 { 66317 break 66318 } 66319 _ = v.Args[1] 66320 destptr := v.Args[0] 66321 mem := v.Args[1] 66322 if !(config.useSSE) { 66323 break 66324 } 66325 v.reset(OpAMD64MOVOstore) 66326 v0 := b.NewValue0(v.Pos, OpOffPtr, destptr.Type) 66327 v0.AuxInt = 32 66328 v0.AddArg(destptr) 66329 v.AddArg(v0) 66330 v1 := b.NewValue0(v.Pos, OpAMD64MOVOconst, types.TypeInt128) 66331 v1.AuxInt = 0 66332 v.AddArg(v1) 66333 v2 := b.NewValue0(v.Pos, OpAMD64MOVOstore, types.TypeMem) 66334 v3 := b.NewValue0(v.Pos, OpOffPtr, destptr.Type) 66335 v3.AuxInt = 16 66336 v3.AddArg(destptr) 66337 v2.AddArg(v3) 66338 v4 := b.NewValue0(v.Pos, OpAMD64MOVOconst, types.TypeInt128) 66339 v4.AuxInt = 0 66340 v2.AddArg(v4) 66341 v5 := b.NewValue0(v.Pos, OpAMD64MOVOstore, types.TypeMem) 66342 v5.AddArg(destptr) 66343 v6 := b.NewValue0(v.Pos, OpAMD64MOVOconst, types.TypeInt128) 66344 v6.AuxInt = 0 66345 v5.AddArg(v6) 66346 v5.AddArg(mem) 66347 v2.AddArg(v5) 66348 v.AddArg(v2) 66349 return true 66350 } 66351 // match: (Zero [64] destptr mem) 66352 // cond: config.useSSE 66353 // result: (MOVOstore (OffPtr <destptr.Type> destptr [48]) (MOVOconst [0]) (MOVOstore (OffPtr <destptr.Type> destptr [32]) (MOVOconst [0]) (MOVOstore (OffPtr <destptr.Type> destptr [16]) (MOVOconst [0]) (MOVOstore destptr (MOVOconst [0]) mem)))) 66354 for { 66355 if v.AuxInt != 64 { 66356 break 66357 } 66358 _ = v.Args[1] 66359 destptr := v.Args[0] 66360 mem := v.Args[1] 66361 if !(config.useSSE) { 66362 break 66363 } 66364 v.reset(OpAMD64MOVOstore) 66365 v0 := b.NewValue0(v.Pos, OpOffPtr, destptr.Type) 66366 v0.AuxInt = 48 66367 v0.AddArg(destptr) 66368 v.AddArg(v0) 66369 v1 := b.NewValue0(v.Pos, OpAMD64MOVOconst, types.TypeInt128) 66370 v1.AuxInt = 0 66371 v.AddArg(v1) 66372 v2 := b.NewValue0(v.Pos, OpAMD64MOVOstore, types.TypeMem) 66373 v3 := b.NewValue0(v.Pos, OpOffPtr, destptr.Type) 66374 v3.AuxInt = 32 66375 v3.AddArg(destptr) 66376 v2.AddArg(v3) 66377 v4 := b.NewValue0(v.Pos, OpAMD64MOVOconst, types.TypeInt128) 66378 v4.AuxInt = 0 66379 v2.AddArg(v4) 66380 v5 := b.NewValue0(v.Pos, OpAMD64MOVOstore, types.TypeMem) 66381 v6 := b.NewValue0(v.Pos, OpOffPtr, destptr.Type) 66382 v6.AuxInt = 16 66383 v6.AddArg(destptr) 66384 v5.AddArg(v6) 66385 v7 := b.NewValue0(v.Pos, OpAMD64MOVOconst, types.TypeInt128) 66386 v7.AuxInt = 0 66387 v5.AddArg(v7) 66388 v8 := b.NewValue0(v.Pos, OpAMD64MOVOstore, types.TypeMem) 66389 v8.AddArg(destptr) 66390 v9 := b.NewValue0(v.Pos, OpAMD64MOVOconst, types.TypeInt128) 66391 v9.AuxInt = 0 66392 v8.AddArg(v9) 66393 v8.AddArg(mem) 66394 v5.AddArg(v8) 66395 v2.AddArg(v5) 66396 v.AddArg(v2) 66397 return true 66398 } 66399 return false 66400 } 66401 func rewriteValueAMD64_OpZero_20(v *Value) bool { 66402 b := v.Block 66403 _ = b 66404 config := b.Func.Config 66405 _ = config 66406 typ := &b.Func.Config.Types 66407 _ = typ 66408 // match: (Zero [s] destptr mem) 66409 // cond: s > 64 && s <= 1024 && s%16 == 0 && !config.noDuffDevice 66410 // result: (DUFFZERO [s] destptr (MOVOconst [0]) mem) 66411 for { 66412 s := v.AuxInt 66413 _ = v.Args[1] 66414 destptr := v.Args[0] 66415 mem := v.Args[1] 66416 if !(s > 64 && s <= 1024 && s%16 == 0 && !config.noDuffDevice) { 66417 break 66418 } 66419 v.reset(OpAMD64DUFFZERO) 66420 v.AuxInt = s 66421 v.AddArg(destptr) 66422 v0 := b.NewValue0(v.Pos, OpAMD64MOVOconst, types.TypeInt128) 66423 v0.AuxInt = 0 66424 v.AddArg(v0) 66425 v.AddArg(mem) 66426 return true 66427 } 66428 // match: (Zero [s] destptr mem) 66429 // cond: (s > 1024 || (config.noDuffDevice && s > 64 || !config.useSSE && s > 32)) && s%8 == 0 66430 // result: (REPSTOSQ destptr (MOVQconst [s/8]) (MOVQconst [0]) mem) 66431 for { 66432 s := v.AuxInt 66433 _ = v.Args[1] 66434 destptr := v.Args[0] 66435 mem := v.Args[1] 66436 if !((s > 1024 || (config.noDuffDevice && s > 64 || !config.useSSE && s > 32)) && s%8 == 0) { 66437 break 66438 } 66439 v.reset(OpAMD64REPSTOSQ) 66440 v.AddArg(destptr) 66441 v0 := b.NewValue0(v.Pos, OpAMD64MOVQconst, typ.UInt64) 66442 v0.AuxInt = s / 8 66443 v.AddArg(v0) 66444 v1 := b.NewValue0(v.Pos, OpAMD64MOVQconst, typ.UInt64) 66445 v1.AuxInt = 0 66446 v.AddArg(v1) 66447 v.AddArg(mem) 66448 return true 66449 } 66450 return false 66451 } 66452 func rewriteValueAMD64_OpZeroExt16to32_0(v *Value) bool { 66453 // match: (ZeroExt16to32 x) 66454 // cond: 66455 // result: (MOVWQZX x) 66456 for { 66457 x := v.Args[0] 66458 v.reset(OpAMD64MOVWQZX) 66459 v.AddArg(x) 66460 return true 66461 } 66462 } 66463 func rewriteValueAMD64_OpZeroExt16to64_0(v *Value) bool { 66464 // match: (ZeroExt16to64 x) 66465 // cond: 66466 // result: (MOVWQZX x) 66467 for { 66468 x := v.Args[0] 66469 v.reset(OpAMD64MOVWQZX) 66470 v.AddArg(x) 66471 return true 66472 } 66473 } 66474 func rewriteValueAMD64_OpZeroExt32to64_0(v *Value) bool { 66475 // match: (ZeroExt32to64 x) 66476 // cond: 66477 // result: (MOVLQZX x) 66478 for { 66479 x := v.Args[0] 66480 v.reset(OpAMD64MOVLQZX) 66481 v.AddArg(x) 66482 return true 66483 } 66484 } 66485 func rewriteValueAMD64_OpZeroExt8to16_0(v *Value) bool { 66486 // match: (ZeroExt8to16 x) 66487 // cond: 66488 // result: (MOVBQZX x) 66489 for { 66490 x := v.Args[0] 66491 v.reset(OpAMD64MOVBQZX) 66492 v.AddArg(x) 66493 return true 66494 } 66495 } 66496 func rewriteValueAMD64_OpZeroExt8to32_0(v *Value) bool { 66497 // match: (ZeroExt8to32 x) 66498 // cond: 66499 // result: (MOVBQZX x) 66500 for { 66501 x := v.Args[0] 66502 v.reset(OpAMD64MOVBQZX) 66503 v.AddArg(x) 66504 return true 66505 } 66506 } 66507 func rewriteValueAMD64_OpZeroExt8to64_0(v *Value) bool { 66508 // match: (ZeroExt8to64 x) 66509 // cond: 66510 // result: (MOVBQZX x) 66511 for { 66512 x := v.Args[0] 66513 v.reset(OpAMD64MOVBQZX) 66514 v.AddArg(x) 66515 return true 66516 } 66517 } 66518 func rewriteBlockAMD64(b *Block) bool { 66519 config := b.Func.Config 66520 _ = config 66521 fe := b.Func.fe 66522 _ = fe 66523 typ := &config.Types 66524 _ = typ 66525 switch b.Kind { 66526 case BlockAMD64EQ: 66527 // match: (EQ (TESTL (SHLL (MOVLconst [1]) x) y)) 66528 // cond: !config.nacl 66529 // result: (UGE (BTL x y)) 66530 for { 66531 v := b.Control 66532 if v.Op != OpAMD64TESTL { 66533 break 66534 } 66535 _ = v.Args[1] 66536 v_0 := v.Args[0] 66537 if v_0.Op != OpAMD64SHLL { 66538 break 66539 } 66540 _ = v_0.Args[1] 66541 v_0_0 := v_0.Args[0] 66542 if v_0_0.Op != OpAMD64MOVLconst { 66543 break 66544 } 66545 if v_0_0.AuxInt != 1 { 66546 break 66547 } 66548 x := v_0.Args[1] 66549 y := v.Args[1] 66550 if !(!config.nacl) { 66551 break 66552 } 66553 b.Kind = BlockAMD64UGE 66554 v0 := b.NewValue0(v.Pos, OpAMD64BTL, types.TypeFlags) 66555 v0.AddArg(x) 66556 v0.AddArg(y) 66557 b.SetControl(v0) 66558 b.Aux = nil 66559 return true 66560 } 66561 // match: (EQ (TESTL y (SHLL (MOVLconst [1]) x))) 66562 // cond: !config.nacl 66563 // result: (UGE (BTL x y)) 66564 for { 66565 v := b.Control 66566 if v.Op != OpAMD64TESTL { 66567 break 66568 } 66569 _ = v.Args[1] 66570 y := v.Args[0] 66571 v_1 := v.Args[1] 66572 if v_1.Op != OpAMD64SHLL { 66573 break 66574 } 66575 _ = v_1.Args[1] 66576 v_1_0 := v_1.Args[0] 66577 if v_1_0.Op != OpAMD64MOVLconst { 66578 break 66579 } 66580 if v_1_0.AuxInt != 1 { 66581 break 66582 } 66583 x := v_1.Args[1] 66584 if !(!config.nacl) { 66585 break 66586 } 66587 b.Kind = BlockAMD64UGE 66588 v0 := b.NewValue0(v.Pos, OpAMD64BTL, types.TypeFlags) 66589 v0.AddArg(x) 66590 v0.AddArg(y) 66591 b.SetControl(v0) 66592 b.Aux = nil 66593 return true 66594 } 66595 // match: (EQ (TESTQ (SHLQ (MOVQconst [1]) x) y)) 66596 // cond: !config.nacl 66597 // result: (UGE (BTQ x y)) 66598 for { 66599 v := b.Control 66600 if v.Op != OpAMD64TESTQ { 66601 break 66602 } 66603 _ = v.Args[1] 66604 v_0 := v.Args[0] 66605 if v_0.Op != OpAMD64SHLQ { 66606 break 66607 } 66608 _ = v_0.Args[1] 66609 v_0_0 := v_0.Args[0] 66610 if v_0_0.Op != OpAMD64MOVQconst { 66611 break 66612 } 66613 if v_0_0.AuxInt != 1 { 66614 break 66615 } 66616 x := v_0.Args[1] 66617 y := v.Args[1] 66618 if !(!config.nacl) { 66619 break 66620 } 66621 b.Kind = BlockAMD64UGE 66622 v0 := b.NewValue0(v.Pos, OpAMD64BTQ, types.TypeFlags) 66623 v0.AddArg(x) 66624 v0.AddArg(y) 66625 b.SetControl(v0) 66626 b.Aux = nil 66627 return true 66628 } 66629 // match: (EQ (TESTQ y (SHLQ (MOVQconst [1]) x))) 66630 // cond: !config.nacl 66631 // result: (UGE (BTQ x y)) 66632 for { 66633 v := b.Control 66634 if v.Op != OpAMD64TESTQ { 66635 break 66636 } 66637 _ = v.Args[1] 66638 y := v.Args[0] 66639 v_1 := v.Args[1] 66640 if v_1.Op != OpAMD64SHLQ { 66641 break 66642 } 66643 _ = v_1.Args[1] 66644 v_1_0 := v_1.Args[0] 66645 if v_1_0.Op != OpAMD64MOVQconst { 66646 break 66647 } 66648 if v_1_0.AuxInt != 1 { 66649 break 66650 } 66651 x := v_1.Args[1] 66652 if !(!config.nacl) { 66653 break 66654 } 66655 b.Kind = BlockAMD64UGE 66656 v0 := b.NewValue0(v.Pos, OpAMD64BTQ, types.TypeFlags) 66657 v0.AddArg(x) 66658 v0.AddArg(y) 66659 b.SetControl(v0) 66660 b.Aux = nil 66661 return true 66662 } 66663 // match: (EQ (TESTLconst [c] x)) 66664 // cond: isUint32PowerOfTwo(c) && !config.nacl 66665 // result: (UGE (BTLconst [log2uint32(c)] x)) 66666 for { 66667 v := b.Control 66668 if v.Op != OpAMD64TESTLconst { 66669 break 66670 } 66671 c := v.AuxInt 66672 x := v.Args[0] 66673 if !(isUint32PowerOfTwo(c) && !config.nacl) { 66674 break 66675 } 66676 b.Kind = BlockAMD64UGE 66677 v0 := b.NewValue0(v.Pos, OpAMD64BTLconst, types.TypeFlags) 66678 v0.AuxInt = log2uint32(c) 66679 v0.AddArg(x) 66680 b.SetControl(v0) 66681 b.Aux = nil 66682 return true 66683 } 66684 // match: (EQ (TESTQconst [c] x)) 66685 // cond: isUint64PowerOfTwo(c) && !config.nacl 66686 // result: (UGE (BTQconst [log2(c)] x)) 66687 for { 66688 v := b.Control 66689 if v.Op != OpAMD64TESTQconst { 66690 break 66691 } 66692 c := v.AuxInt 66693 x := v.Args[0] 66694 if !(isUint64PowerOfTwo(c) && !config.nacl) { 66695 break 66696 } 66697 b.Kind = BlockAMD64UGE 66698 v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags) 66699 v0.AuxInt = log2(c) 66700 v0.AddArg(x) 66701 b.SetControl(v0) 66702 b.Aux = nil 66703 return true 66704 } 66705 // match: (EQ (TESTQ (MOVQconst [c]) x)) 66706 // cond: isUint64PowerOfTwo(c) && !config.nacl 66707 // result: (UGE (BTQconst [log2(c)] x)) 66708 for { 66709 v := b.Control 66710 if v.Op != OpAMD64TESTQ { 66711 break 66712 } 66713 _ = v.Args[1] 66714 v_0 := v.Args[0] 66715 if v_0.Op != OpAMD64MOVQconst { 66716 break 66717 } 66718 c := v_0.AuxInt 66719 x := v.Args[1] 66720 if !(isUint64PowerOfTwo(c) && !config.nacl) { 66721 break 66722 } 66723 b.Kind = BlockAMD64UGE 66724 v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags) 66725 v0.AuxInt = log2(c) 66726 v0.AddArg(x) 66727 b.SetControl(v0) 66728 b.Aux = nil 66729 return true 66730 } 66731 // match: (EQ (TESTQ x (MOVQconst [c]))) 66732 // cond: isUint64PowerOfTwo(c) && !config.nacl 66733 // result: (UGE (BTQconst [log2(c)] x)) 66734 for { 66735 v := b.Control 66736 if v.Op != OpAMD64TESTQ { 66737 break 66738 } 66739 _ = v.Args[1] 66740 x := v.Args[0] 66741 v_1 := v.Args[1] 66742 if v_1.Op != OpAMD64MOVQconst { 66743 break 66744 } 66745 c := v_1.AuxInt 66746 if !(isUint64PowerOfTwo(c) && !config.nacl) { 66747 break 66748 } 66749 b.Kind = BlockAMD64UGE 66750 v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags) 66751 v0.AuxInt = log2(c) 66752 v0.AddArg(x) 66753 b.SetControl(v0) 66754 b.Aux = nil 66755 return true 66756 } 66757 // match: (EQ (TESTQ z1:(SHLQconst [63] (SHRQconst [63] x)) z2)) 66758 // cond: z1==z2 && !config.nacl 66759 // result: (UGE (BTQconst [63] x)) 66760 for { 66761 v := b.Control 66762 if v.Op != OpAMD64TESTQ { 66763 break 66764 } 66765 _ = v.Args[1] 66766 z1 := v.Args[0] 66767 if z1.Op != OpAMD64SHLQconst { 66768 break 66769 } 66770 if z1.AuxInt != 63 { 66771 break 66772 } 66773 z1_0 := z1.Args[0] 66774 if z1_0.Op != OpAMD64SHRQconst { 66775 break 66776 } 66777 if z1_0.AuxInt != 63 { 66778 break 66779 } 66780 x := z1_0.Args[0] 66781 z2 := v.Args[1] 66782 if !(z1 == z2 && !config.nacl) { 66783 break 66784 } 66785 b.Kind = BlockAMD64UGE 66786 v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags) 66787 v0.AuxInt = 63 66788 v0.AddArg(x) 66789 b.SetControl(v0) 66790 b.Aux = nil 66791 return true 66792 } 66793 // match: (EQ (TESTQ z2 z1:(SHLQconst [63] (SHRQconst [63] x)))) 66794 // cond: z1==z2 && !config.nacl 66795 // result: (UGE (BTQconst [63] x)) 66796 for { 66797 v := b.Control 66798 if v.Op != OpAMD64TESTQ { 66799 break 66800 } 66801 _ = v.Args[1] 66802 z2 := v.Args[0] 66803 z1 := v.Args[1] 66804 if z1.Op != OpAMD64SHLQconst { 66805 break 66806 } 66807 if z1.AuxInt != 63 { 66808 break 66809 } 66810 z1_0 := z1.Args[0] 66811 if z1_0.Op != OpAMD64SHRQconst { 66812 break 66813 } 66814 if z1_0.AuxInt != 63 { 66815 break 66816 } 66817 x := z1_0.Args[0] 66818 if !(z1 == z2 && !config.nacl) { 66819 break 66820 } 66821 b.Kind = BlockAMD64UGE 66822 v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags) 66823 v0.AuxInt = 63 66824 v0.AddArg(x) 66825 b.SetControl(v0) 66826 b.Aux = nil 66827 return true 66828 } 66829 // match: (EQ (TESTL z1:(SHLLconst [31] (SHRQconst [31] x)) z2)) 66830 // cond: z1==z2 && !config.nacl 66831 // result: (UGE (BTQconst [31] x)) 66832 for { 66833 v := b.Control 66834 if v.Op != OpAMD64TESTL { 66835 break 66836 } 66837 _ = v.Args[1] 66838 z1 := v.Args[0] 66839 if z1.Op != OpAMD64SHLLconst { 66840 break 66841 } 66842 if z1.AuxInt != 31 { 66843 break 66844 } 66845 z1_0 := z1.Args[0] 66846 if z1_0.Op != OpAMD64SHRQconst { 66847 break 66848 } 66849 if z1_0.AuxInt != 31 { 66850 break 66851 } 66852 x := z1_0.Args[0] 66853 z2 := v.Args[1] 66854 if !(z1 == z2 && !config.nacl) { 66855 break 66856 } 66857 b.Kind = BlockAMD64UGE 66858 v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags) 66859 v0.AuxInt = 31 66860 v0.AddArg(x) 66861 b.SetControl(v0) 66862 b.Aux = nil 66863 return true 66864 } 66865 // match: (EQ (TESTL z2 z1:(SHLLconst [31] (SHRQconst [31] x)))) 66866 // cond: z1==z2 && !config.nacl 66867 // result: (UGE (BTQconst [31] x)) 66868 for { 66869 v := b.Control 66870 if v.Op != OpAMD64TESTL { 66871 break 66872 } 66873 _ = v.Args[1] 66874 z2 := v.Args[0] 66875 z1 := v.Args[1] 66876 if z1.Op != OpAMD64SHLLconst { 66877 break 66878 } 66879 if z1.AuxInt != 31 { 66880 break 66881 } 66882 z1_0 := z1.Args[0] 66883 if z1_0.Op != OpAMD64SHRQconst { 66884 break 66885 } 66886 if z1_0.AuxInt != 31 { 66887 break 66888 } 66889 x := z1_0.Args[0] 66890 if !(z1 == z2 && !config.nacl) { 66891 break 66892 } 66893 b.Kind = BlockAMD64UGE 66894 v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags) 66895 v0.AuxInt = 31 66896 v0.AddArg(x) 66897 b.SetControl(v0) 66898 b.Aux = nil 66899 return true 66900 } 66901 // match: (EQ (TESTQ z1:(SHRQconst [63] (SHLQconst [63] x)) z2)) 66902 // cond: z1==z2 && !config.nacl 66903 // result: (UGE (BTQconst [0] x)) 66904 for { 66905 v := b.Control 66906 if v.Op != OpAMD64TESTQ { 66907 break 66908 } 66909 _ = v.Args[1] 66910 z1 := v.Args[0] 66911 if z1.Op != OpAMD64SHRQconst { 66912 break 66913 } 66914 if z1.AuxInt != 63 { 66915 break 66916 } 66917 z1_0 := z1.Args[0] 66918 if z1_0.Op != OpAMD64SHLQconst { 66919 break 66920 } 66921 if z1_0.AuxInt != 63 { 66922 break 66923 } 66924 x := z1_0.Args[0] 66925 z2 := v.Args[1] 66926 if !(z1 == z2 && !config.nacl) { 66927 break 66928 } 66929 b.Kind = BlockAMD64UGE 66930 v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags) 66931 v0.AuxInt = 0 66932 v0.AddArg(x) 66933 b.SetControl(v0) 66934 b.Aux = nil 66935 return true 66936 } 66937 // match: (EQ (TESTQ z2 z1:(SHRQconst [63] (SHLQconst [63] x)))) 66938 // cond: z1==z2 && !config.nacl 66939 // result: (UGE (BTQconst [0] x)) 66940 for { 66941 v := b.Control 66942 if v.Op != OpAMD64TESTQ { 66943 break 66944 } 66945 _ = v.Args[1] 66946 z2 := v.Args[0] 66947 z1 := v.Args[1] 66948 if z1.Op != OpAMD64SHRQconst { 66949 break 66950 } 66951 if z1.AuxInt != 63 { 66952 break 66953 } 66954 z1_0 := z1.Args[0] 66955 if z1_0.Op != OpAMD64SHLQconst { 66956 break 66957 } 66958 if z1_0.AuxInt != 63 { 66959 break 66960 } 66961 x := z1_0.Args[0] 66962 if !(z1 == z2 && !config.nacl) { 66963 break 66964 } 66965 b.Kind = BlockAMD64UGE 66966 v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags) 66967 v0.AuxInt = 0 66968 v0.AddArg(x) 66969 b.SetControl(v0) 66970 b.Aux = nil 66971 return true 66972 } 66973 // match: (EQ (TESTL z1:(SHRLconst [31] (SHLLconst [31] x)) z2)) 66974 // cond: z1==z2 && !config.nacl 66975 // result: (UGE (BTLconst [0] x)) 66976 for { 66977 v := b.Control 66978 if v.Op != OpAMD64TESTL { 66979 break 66980 } 66981 _ = v.Args[1] 66982 z1 := v.Args[0] 66983 if z1.Op != OpAMD64SHRLconst { 66984 break 66985 } 66986 if z1.AuxInt != 31 { 66987 break 66988 } 66989 z1_0 := z1.Args[0] 66990 if z1_0.Op != OpAMD64SHLLconst { 66991 break 66992 } 66993 if z1_0.AuxInt != 31 { 66994 break 66995 } 66996 x := z1_0.Args[0] 66997 z2 := v.Args[1] 66998 if !(z1 == z2 && !config.nacl) { 66999 break 67000 } 67001 b.Kind = BlockAMD64UGE 67002 v0 := b.NewValue0(v.Pos, OpAMD64BTLconst, types.TypeFlags) 67003 v0.AuxInt = 0 67004 v0.AddArg(x) 67005 b.SetControl(v0) 67006 b.Aux = nil 67007 return true 67008 } 67009 // match: (EQ (TESTL z2 z1:(SHRLconst [31] (SHLLconst [31] x)))) 67010 // cond: z1==z2 && !config.nacl 67011 // result: (UGE (BTLconst [0] x)) 67012 for { 67013 v := b.Control 67014 if v.Op != OpAMD64TESTL { 67015 break 67016 } 67017 _ = v.Args[1] 67018 z2 := v.Args[0] 67019 z1 := v.Args[1] 67020 if z1.Op != OpAMD64SHRLconst { 67021 break 67022 } 67023 if z1.AuxInt != 31 { 67024 break 67025 } 67026 z1_0 := z1.Args[0] 67027 if z1_0.Op != OpAMD64SHLLconst { 67028 break 67029 } 67030 if z1_0.AuxInt != 31 { 67031 break 67032 } 67033 x := z1_0.Args[0] 67034 if !(z1 == z2 && !config.nacl) { 67035 break 67036 } 67037 b.Kind = BlockAMD64UGE 67038 v0 := b.NewValue0(v.Pos, OpAMD64BTLconst, types.TypeFlags) 67039 v0.AuxInt = 0 67040 v0.AddArg(x) 67041 b.SetControl(v0) 67042 b.Aux = nil 67043 return true 67044 } 67045 // match: (EQ (TESTQ z1:(SHRQconst [63] x) z2)) 67046 // cond: z1==z2 && !config.nacl 67047 // result: (UGE (BTQconst [63] x)) 67048 for { 67049 v := b.Control 67050 if v.Op != OpAMD64TESTQ { 67051 break 67052 } 67053 _ = v.Args[1] 67054 z1 := v.Args[0] 67055 if z1.Op != OpAMD64SHRQconst { 67056 break 67057 } 67058 if z1.AuxInt != 63 { 67059 break 67060 } 67061 x := z1.Args[0] 67062 z2 := v.Args[1] 67063 if !(z1 == z2 && !config.nacl) { 67064 break 67065 } 67066 b.Kind = BlockAMD64UGE 67067 v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags) 67068 v0.AuxInt = 63 67069 v0.AddArg(x) 67070 b.SetControl(v0) 67071 b.Aux = nil 67072 return true 67073 } 67074 // match: (EQ (TESTQ z2 z1:(SHRQconst [63] x))) 67075 // cond: z1==z2 && !config.nacl 67076 // result: (UGE (BTQconst [63] x)) 67077 for { 67078 v := b.Control 67079 if v.Op != OpAMD64TESTQ { 67080 break 67081 } 67082 _ = v.Args[1] 67083 z2 := v.Args[0] 67084 z1 := v.Args[1] 67085 if z1.Op != OpAMD64SHRQconst { 67086 break 67087 } 67088 if z1.AuxInt != 63 { 67089 break 67090 } 67091 x := z1.Args[0] 67092 if !(z1 == z2 && !config.nacl) { 67093 break 67094 } 67095 b.Kind = BlockAMD64UGE 67096 v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags) 67097 v0.AuxInt = 63 67098 v0.AddArg(x) 67099 b.SetControl(v0) 67100 b.Aux = nil 67101 return true 67102 } 67103 // match: (EQ (TESTL z1:(SHRLconst [31] x) z2)) 67104 // cond: z1==z2 && !config.nacl 67105 // result: (UGE (BTLconst [31] x)) 67106 for { 67107 v := b.Control 67108 if v.Op != OpAMD64TESTL { 67109 break 67110 } 67111 _ = v.Args[1] 67112 z1 := v.Args[0] 67113 if z1.Op != OpAMD64SHRLconst { 67114 break 67115 } 67116 if z1.AuxInt != 31 { 67117 break 67118 } 67119 x := z1.Args[0] 67120 z2 := v.Args[1] 67121 if !(z1 == z2 && !config.nacl) { 67122 break 67123 } 67124 b.Kind = BlockAMD64UGE 67125 v0 := b.NewValue0(v.Pos, OpAMD64BTLconst, types.TypeFlags) 67126 v0.AuxInt = 31 67127 v0.AddArg(x) 67128 b.SetControl(v0) 67129 b.Aux = nil 67130 return true 67131 } 67132 // match: (EQ (TESTL z2 z1:(SHRLconst [31] x))) 67133 // cond: z1==z2 && !config.nacl 67134 // result: (UGE (BTLconst [31] x)) 67135 for { 67136 v := b.Control 67137 if v.Op != OpAMD64TESTL { 67138 break 67139 } 67140 _ = v.Args[1] 67141 z2 := v.Args[0] 67142 z1 := v.Args[1] 67143 if z1.Op != OpAMD64SHRLconst { 67144 break 67145 } 67146 if z1.AuxInt != 31 { 67147 break 67148 } 67149 x := z1.Args[0] 67150 if !(z1 == z2 && !config.nacl) { 67151 break 67152 } 67153 b.Kind = BlockAMD64UGE 67154 v0 := b.NewValue0(v.Pos, OpAMD64BTLconst, types.TypeFlags) 67155 v0.AuxInt = 31 67156 v0.AddArg(x) 67157 b.SetControl(v0) 67158 b.Aux = nil 67159 return true 67160 } 67161 // match: (EQ (InvertFlags cmp) yes no) 67162 // cond: 67163 // result: (EQ cmp yes no) 67164 for { 67165 v := b.Control 67166 if v.Op != OpAMD64InvertFlags { 67167 break 67168 } 67169 cmp := v.Args[0] 67170 b.Kind = BlockAMD64EQ 67171 b.SetControl(cmp) 67172 b.Aux = nil 67173 return true 67174 } 67175 // match: (EQ (FlagEQ) yes no) 67176 // cond: 67177 // result: (First nil yes no) 67178 for { 67179 v := b.Control 67180 if v.Op != OpAMD64FlagEQ { 67181 break 67182 } 67183 b.Kind = BlockFirst 67184 b.SetControl(nil) 67185 b.Aux = nil 67186 return true 67187 } 67188 // match: (EQ (FlagLT_ULT) yes no) 67189 // cond: 67190 // result: (First nil no yes) 67191 for { 67192 v := b.Control 67193 if v.Op != OpAMD64FlagLT_ULT { 67194 break 67195 } 67196 b.Kind = BlockFirst 67197 b.SetControl(nil) 67198 b.Aux = nil 67199 b.swapSuccessors() 67200 return true 67201 } 67202 // match: (EQ (FlagLT_UGT) yes no) 67203 // cond: 67204 // result: (First nil no yes) 67205 for { 67206 v := b.Control 67207 if v.Op != OpAMD64FlagLT_UGT { 67208 break 67209 } 67210 b.Kind = BlockFirst 67211 b.SetControl(nil) 67212 b.Aux = nil 67213 b.swapSuccessors() 67214 return true 67215 } 67216 // match: (EQ (FlagGT_ULT) yes no) 67217 // cond: 67218 // result: (First nil no yes) 67219 for { 67220 v := b.Control 67221 if v.Op != OpAMD64FlagGT_ULT { 67222 break 67223 } 67224 b.Kind = BlockFirst 67225 b.SetControl(nil) 67226 b.Aux = nil 67227 b.swapSuccessors() 67228 return true 67229 } 67230 // match: (EQ (FlagGT_UGT) yes no) 67231 // cond: 67232 // result: (First nil no yes) 67233 for { 67234 v := b.Control 67235 if v.Op != OpAMD64FlagGT_UGT { 67236 break 67237 } 67238 b.Kind = BlockFirst 67239 b.SetControl(nil) 67240 b.Aux = nil 67241 b.swapSuccessors() 67242 return true 67243 } 67244 case BlockAMD64GE: 67245 // match: (GE (InvertFlags cmp) yes no) 67246 // cond: 67247 // result: (LE cmp yes no) 67248 for { 67249 v := b.Control 67250 if v.Op != OpAMD64InvertFlags { 67251 break 67252 } 67253 cmp := v.Args[0] 67254 b.Kind = BlockAMD64LE 67255 b.SetControl(cmp) 67256 b.Aux = nil 67257 return true 67258 } 67259 // match: (GE (FlagEQ) yes no) 67260 // cond: 67261 // result: (First nil yes no) 67262 for { 67263 v := b.Control 67264 if v.Op != OpAMD64FlagEQ { 67265 break 67266 } 67267 b.Kind = BlockFirst 67268 b.SetControl(nil) 67269 b.Aux = nil 67270 return true 67271 } 67272 // match: (GE (FlagLT_ULT) yes no) 67273 // cond: 67274 // result: (First nil no yes) 67275 for { 67276 v := b.Control 67277 if v.Op != OpAMD64FlagLT_ULT { 67278 break 67279 } 67280 b.Kind = BlockFirst 67281 b.SetControl(nil) 67282 b.Aux = nil 67283 b.swapSuccessors() 67284 return true 67285 } 67286 // match: (GE (FlagLT_UGT) yes no) 67287 // cond: 67288 // result: (First nil no yes) 67289 for { 67290 v := b.Control 67291 if v.Op != OpAMD64FlagLT_UGT { 67292 break 67293 } 67294 b.Kind = BlockFirst 67295 b.SetControl(nil) 67296 b.Aux = nil 67297 b.swapSuccessors() 67298 return true 67299 } 67300 // match: (GE (FlagGT_ULT) yes no) 67301 // cond: 67302 // result: (First nil yes no) 67303 for { 67304 v := b.Control 67305 if v.Op != OpAMD64FlagGT_ULT { 67306 break 67307 } 67308 b.Kind = BlockFirst 67309 b.SetControl(nil) 67310 b.Aux = nil 67311 return true 67312 } 67313 // match: (GE (FlagGT_UGT) yes no) 67314 // cond: 67315 // result: (First nil yes no) 67316 for { 67317 v := b.Control 67318 if v.Op != OpAMD64FlagGT_UGT { 67319 break 67320 } 67321 b.Kind = BlockFirst 67322 b.SetControl(nil) 67323 b.Aux = nil 67324 return true 67325 } 67326 case BlockAMD64GT: 67327 // match: (GT (InvertFlags cmp) yes no) 67328 // cond: 67329 // result: (LT cmp yes no) 67330 for { 67331 v := b.Control 67332 if v.Op != OpAMD64InvertFlags { 67333 break 67334 } 67335 cmp := v.Args[0] 67336 b.Kind = BlockAMD64LT 67337 b.SetControl(cmp) 67338 b.Aux = nil 67339 return true 67340 } 67341 // match: (GT (FlagEQ) yes no) 67342 // cond: 67343 // result: (First nil no yes) 67344 for { 67345 v := b.Control 67346 if v.Op != OpAMD64FlagEQ { 67347 break 67348 } 67349 b.Kind = BlockFirst 67350 b.SetControl(nil) 67351 b.Aux = nil 67352 b.swapSuccessors() 67353 return true 67354 } 67355 // match: (GT (FlagLT_ULT) yes no) 67356 // cond: 67357 // result: (First nil no yes) 67358 for { 67359 v := b.Control 67360 if v.Op != OpAMD64FlagLT_ULT { 67361 break 67362 } 67363 b.Kind = BlockFirst 67364 b.SetControl(nil) 67365 b.Aux = nil 67366 b.swapSuccessors() 67367 return true 67368 } 67369 // match: (GT (FlagLT_UGT) yes no) 67370 // cond: 67371 // result: (First nil no yes) 67372 for { 67373 v := b.Control 67374 if v.Op != OpAMD64FlagLT_UGT { 67375 break 67376 } 67377 b.Kind = BlockFirst 67378 b.SetControl(nil) 67379 b.Aux = nil 67380 b.swapSuccessors() 67381 return true 67382 } 67383 // match: (GT (FlagGT_ULT) yes no) 67384 // cond: 67385 // result: (First nil yes no) 67386 for { 67387 v := b.Control 67388 if v.Op != OpAMD64FlagGT_ULT { 67389 break 67390 } 67391 b.Kind = BlockFirst 67392 b.SetControl(nil) 67393 b.Aux = nil 67394 return true 67395 } 67396 // match: (GT (FlagGT_UGT) yes no) 67397 // cond: 67398 // result: (First nil yes no) 67399 for { 67400 v := b.Control 67401 if v.Op != OpAMD64FlagGT_UGT { 67402 break 67403 } 67404 b.Kind = BlockFirst 67405 b.SetControl(nil) 67406 b.Aux = nil 67407 return true 67408 } 67409 case BlockIf: 67410 // match: (If (SETL cmp) yes no) 67411 // cond: 67412 // result: (LT cmp yes no) 67413 for { 67414 v := b.Control 67415 if v.Op != OpAMD64SETL { 67416 break 67417 } 67418 cmp := v.Args[0] 67419 b.Kind = BlockAMD64LT 67420 b.SetControl(cmp) 67421 b.Aux = nil 67422 return true 67423 } 67424 // match: (If (SETLE cmp) yes no) 67425 // cond: 67426 // result: (LE cmp yes no) 67427 for { 67428 v := b.Control 67429 if v.Op != OpAMD64SETLE { 67430 break 67431 } 67432 cmp := v.Args[0] 67433 b.Kind = BlockAMD64LE 67434 b.SetControl(cmp) 67435 b.Aux = nil 67436 return true 67437 } 67438 // match: (If (SETG cmp) yes no) 67439 // cond: 67440 // result: (GT cmp yes no) 67441 for { 67442 v := b.Control 67443 if v.Op != OpAMD64SETG { 67444 break 67445 } 67446 cmp := v.Args[0] 67447 b.Kind = BlockAMD64GT 67448 b.SetControl(cmp) 67449 b.Aux = nil 67450 return true 67451 } 67452 // match: (If (SETGE cmp) yes no) 67453 // cond: 67454 // result: (GE cmp yes no) 67455 for { 67456 v := b.Control 67457 if v.Op != OpAMD64SETGE { 67458 break 67459 } 67460 cmp := v.Args[0] 67461 b.Kind = BlockAMD64GE 67462 b.SetControl(cmp) 67463 b.Aux = nil 67464 return true 67465 } 67466 // match: (If (SETEQ cmp) yes no) 67467 // cond: 67468 // result: (EQ cmp yes no) 67469 for { 67470 v := b.Control 67471 if v.Op != OpAMD64SETEQ { 67472 break 67473 } 67474 cmp := v.Args[0] 67475 b.Kind = BlockAMD64EQ 67476 b.SetControl(cmp) 67477 b.Aux = nil 67478 return true 67479 } 67480 // match: (If (SETNE cmp) yes no) 67481 // cond: 67482 // result: (NE cmp yes no) 67483 for { 67484 v := b.Control 67485 if v.Op != OpAMD64SETNE { 67486 break 67487 } 67488 cmp := v.Args[0] 67489 b.Kind = BlockAMD64NE 67490 b.SetControl(cmp) 67491 b.Aux = nil 67492 return true 67493 } 67494 // match: (If (SETB cmp) yes no) 67495 // cond: 67496 // result: (ULT cmp yes no) 67497 for { 67498 v := b.Control 67499 if v.Op != OpAMD64SETB { 67500 break 67501 } 67502 cmp := v.Args[0] 67503 b.Kind = BlockAMD64ULT 67504 b.SetControl(cmp) 67505 b.Aux = nil 67506 return true 67507 } 67508 // match: (If (SETBE cmp) yes no) 67509 // cond: 67510 // result: (ULE cmp yes no) 67511 for { 67512 v := b.Control 67513 if v.Op != OpAMD64SETBE { 67514 break 67515 } 67516 cmp := v.Args[0] 67517 b.Kind = BlockAMD64ULE 67518 b.SetControl(cmp) 67519 b.Aux = nil 67520 return true 67521 } 67522 // match: (If (SETA cmp) yes no) 67523 // cond: 67524 // result: (UGT cmp yes no) 67525 for { 67526 v := b.Control 67527 if v.Op != OpAMD64SETA { 67528 break 67529 } 67530 cmp := v.Args[0] 67531 b.Kind = BlockAMD64UGT 67532 b.SetControl(cmp) 67533 b.Aux = nil 67534 return true 67535 } 67536 // match: (If (SETAE cmp) yes no) 67537 // cond: 67538 // result: (UGE cmp yes no) 67539 for { 67540 v := b.Control 67541 if v.Op != OpAMD64SETAE { 67542 break 67543 } 67544 cmp := v.Args[0] 67545 b.Kind = BlockAMD64UGE 67546 b.SetControl(cmp) 67547 b.Aux = nil 67548 return true 67549 } 67550 // match: (If (SETO cmp) yes no) 67551 // cond: 67552 // result: (OS cmp yes no) 67553 for { 67554 v := b.Control 67555 if v.Op != OpAMD64SETO { 67556 break 67557 } 67558 cmp := v.Args[0] 67559 b.Kind = BlockAMD64OS 67560 b.SetControl(cmp) 67561 b.Aux = nil 67562 return true 67563 } 67564 // match: (If (SETGF cmp) yes no) 67565 // cond: 67566 // result: (UGT cmp yes no) 67567 for { 67568 v := b.Control 67569 if v.Op != OpAMD64SETGF { 67570 break 67571 } 67572 cmp := v.Args[0] 67573 b.Kind = BlockAMD64UGT 67574 b.SetControl(cmp) 67575 b.Aux = nil 67576 return true 67577 } 67578 // match: (If (SETGEF cmp) yes no) 67579 // cond: 67580 // result: (UGE cmp yes no) 67581 for { 67582 v := b.Control 67583 if v.Op != OpAMD64SETGEF { 67584 break 67585 } 67586 cmp := v.Args[0] 67587 b.Kind = BlockAMD64UGE 67588 b.SetControl(cmp) 67589 b.Aux = nil 67590 return true 67591 } 67592 // match: (If (SETEQF cmp) yes no) 67593 // cond: 67594 // result: (EQF cmp yes no) 67595 for { 67596 v := b.Control 67597 if v.Op != OpAMD64SETEQF { 67598 break 67599 } 67600 cmp := v.Args[0] 67601 b.Kind = BlockAMD64EQF 67602 b.SetControl(cmp) 67603 b.Aux = nil 67604 return true 67605 } 67606 // match: (If (SETNEF cmp) yes no) 67607 // cond: 67608 // result: (NEF cmp yes no) 67609 for { 67610 v := b.Control 67611 if v.Op != OpAMD64SETNEF { 67612 break 67613 } 67614 cmp := v.Args[0] 67615 b.Kind = BlockAMD64NEF 67616 b.SetControl(cmp) 67617 b.Aux = nil 67618 return true 67619 } 67620 // match: (If cond yes no) 67621 // cond: 67622 // result: (NE (TESTB cond cond) yes no) 67623 for { 67624 v := b.Control 67625 _ = v 67626 cond := b.Control 67627 b.Kind = BlockAMD64NE 67628 v0 := b.NewValue0(v.Pos, OpAMD64TESTB, types.TypeFlags) 67629 v0.AddArg(cond) 67630 v0.AddArg(cond) 67631 b.SetControl(v0) 67632 b.Aux = nil 67633 return true 67634 } 67635 case BlockAMD64LE: 67636 // match: (LE (InvertFlags cmp) yes no) 67637 // cond: 67638 // result: (GE cmp yes no) 67639 for { 67640 v := b.Control 67641 if v.Op != OpAMD64InvertFlags { 67642 break 67643 } 67644 cmp := v.Args[0] 67645 b.Kind = BlockAMD64GE 67646 b.SetControl(cmp) 67647 b.Aux = nil 67648 return true 67649 } 67650 // match: (LE (FlagEQ) yes no) 67651 // cond: 67652 // result: (First nil yes no) 67653 for { 67654 v := b.Control 67655 if v.Op != OpAMD64FlagEQ { 67656 break 67657 } 67658 b.Kind = BlockFirst 67659 b.SetControl(nil) 67660 b.Aux = nil 67661 return true 67662 } 67663 // match: (LE (FlagLT_ULT) yes no) 67664 // cond: 67665 // result: (First nil yes no) 67666 for { 67667 v := b.Control 67668 if v.Op != OpAMD64FlagLT_ULT { 67669 break 67670 } 67671 b.Kind = BlockFirst 67672 b.SetControl(nil) 67673 b.Aux = nil 67674 return true 67675 } 67676 // match: (LE (FlagLT_UGT) yes no) 67677 // cond: 67678 // result: (First nil yes no) 67679 for { 67680 v := b.Control 67681 if v.Op != OpAMD64FlagLT_UGT { 67682 break 67683 } 67684 b.Kind = BlockFirst 67685 b.SetControl(nil) 67686 b.Aux = nil 67687 return true 67688 } 67689 // match: (LE (FlagGT_ULT) yes no) 67690 // cond: 67691 // result: (First nil no yes) 67692 for { 67693 v := b.Control 67694 if v.Op != OpAMD64FlagGT_ULT { 67695 break 67696 } 67697 b.Kind = BlockFirst 67698 b.SetControl(nil) 67699 b.Aux = nil 67700 b.swapSuccessors() 67701 return true 67702 } 67703 // match: (LE (FlagGT_UGT) yes no) 67704 // cond: 67705 // result: (First nil no yes) 67706 for { 67707 v := b.Control 67708 if v.Op != OpAMD64FlagGT_UGT { 67709 break 67710 } 67711 b.Kind = BlockFirst 67712 b.SetControl(nil) 67713 b.Aux = nil 67714 b.swapSuccessors() 67715 return true 67716 } 67717 case BlockAMD64LT: 67718 // match: (LT (InvertFlags cmp) yes no) 67719 // cond: 67720 // result: (GT cmp yes no) 67721 for { 67722 v := b.Control 67723 if v.Op != OpAMD64InvertFlags { 67724 break 67725 } 67726 cmp := v.Args[0] 67727 b.Kind = BlockAMD64GT 67728 b.SetControl(cmp) 67729 b.Aux = nil 67730 return true 67731 } 67732 // match: (LT (FlagEQ) yes no) 67733 // cond: 67734 // result: (First nil no yes) 67735 for { 67736 v := b.Control 67737 if v.Op != OpAMD64FlagEQ { 67738 break 67739 } 67740 b.Kind = BlockFirst 67741 b.SetControl(nil) 67742 b.Aux = nil 67743 b.swapSuccessors() 67744 return true 67745 } 67746 // match: (LT (FlagLT_ULT) yes no) 67747 // cond: 67748 // result: (First nil yes no) 67749 for { 67750 v := b.Control 67751 if v.Op != OpAMD64FlagLT_ULT { 67752 break 67753 } 67754 b.Kind = BlockFirst 67755 b.SetControl(nil) 67756 b.Aux = nil 67757 return true 67758 } 67759 // match: (LT (FlagLT_UGT) yes no) 67760 // cond: 67761 // result: (First nil yes no) 67762 for { 67763 v := b.Control 67764 if v.Op != OpAMD64FlagLT_UGT { 67765 break 67766 } 67767 b.Kind = BlockFirst 67768 b.SetControl(nil) 67769 b.Aux = nil 67770 return true 67771 } 67772 // match: (LT (FlagGT_ULT) yes no) 67773 // cond: 67774 // result: (First nil no yes) 67775 for { 67776 v := b.Control 67777 if v.Op != OpAMD64FlagGT_ULT { 67778 break 67779 } 67780 b.Kind = BlockFirst 67781 b.SetControl(nil) 67782 b.Aux = nil 67783 b.swapSuccessors() 67784 return true 67785 } 67786 // match: (LT (FlagGT_UGT) yes no) 67787 // cond: 67788 // result: (First nil no yes) 67789 for { 67790 v := b.Control 67791 if v.Op != OpAMD64FlagGT_UGT { 67792 break 67793 } 67794 b.Kind = BlockFirst 67795 b.SetControl(nil) 67796 b.Aux = nil 67797 b.swapSuccessors() 67798 return true 67799 } 67800 case BlockAMD64NE: 67801 // match: (NE (TESTB (SETL cmp) (SETL cmp)) yes no) 67802 // cond: 67803 // result: (LT cmp yes no) 67804 for { 67805 v := b.Control 67806 if v.Op != OpAMD64TESTB { 67807 break 67808 } 67809 _ = v.Args[1] 67810 v_0 := v.Args[0] 67811 if v_0.Op != OpAMD64SETL { 67812 break 67813 } 67814 cmp := v_0.Args[0] 67815 v_1 := v.Args[1] 67816 if v_1.Op != OpAMD64SETL { 67817 break 67818 } 67819 if cmp != v_1.Args[0] { 67820 break 67821 } 67822 b.Kind = BlockAMD64LT 67823 b.SetControl(cmp) 67824 b.Aux = nil 67825 return true 67826 } 67827 // match: (NE (TESTB (SETL cmp) (SETL cmp)) yes no) 67828 // cond: 67829 // result: (LT cmp yes no) 67830 for { 67831 v := b.Control 67832 if v.Op != OpAMD64TESTB { 67833 break 67834 } 67835 _ = v.Args[1] 67836 v_0 := v.Args[0] 67837 if v_0.Op != OpAMD64SETL { 67838 break 67839 } 67840 cmp := v_0.Args[0] 67841 v_1 := v.Args[1] 67842 if v_1.Op != OpAMD64SETL { 67843 break 67844 } 67845 if cmp != v_1.Args[0] { 67846 break 67847 } 67848 b.Kind = BlockAMD64LT 67849 b.SetControl(cmp) 67850 b.Aux = nil 67851 return true 67852 } 67853 // match: (NE (TESTB (SETLE cmp) (SETLE cmp)) yes no) 67854 // cond: 67855 // result: (LE cmp yes no) 67856 for { 67857 v := b.Control 67858 if v.Op != OpAMD64TESTB { 67859 break 67860 } 67861 _ = v.Args[1] 67862 v_0 := v.Args[0] 67863 if v_0.Op != OpAMD64SETLE { 67864 break 67865 } 67866 cmp := v_0.Args[0] 67867 v_1 := v.Args[1] 67868 if v_1.Op != OpAMD64SETLE { 67869 break 67870 } 67871 if cmp != v_1.Args[0] { 67872 break 67873 } 67874 b.Kind = BlockAMD64LE 67875 b.SetControl(cmp) 67876 b.Aux = nil 67877 return true 67878 } 67879 // match: (NE (TESTB (SETLE cmp) (SETLE cmp)) yes no) 67880 // cond: 67881 // result: (LE cmp yes no) 67882 for { 67883 v := b.Control 67884 if v.Op != OpAMD64TESTB { 67885 break 67886 } 67887 _ = v.Args[1] 67888 v_0 := v.Args[0] 67889 if v_0.Op != OpAMD64SETLE { 67890 break 67891 } 67892 cmp := v_0.Args[0] 67893 v_1 := v.Args[1] 67894 if v_1.Op != OpAMD64SETLE { 67895 break 67896 } 67897 if cmp != v_1.Args[0] { 67898 break 67899 } 67900 b.Kind = BlockAMD64LE 67901 b.SetControl(cmp) 67902 b.Aux = nil 67903 return true 67904 } 67905 // match: (NE (TESTB (SETG cmp) (SETG cmp)) yes no) 67906 // cond: 67907 // result: (GT cmp yes no) 67908 for { 67909 v := b.Control 67910 if v.Op != OpAMD64TESTB { 67911 break 67912 } 67913 _ = v.Args[1] 67914 v_0 := v.Args[0] 67915 if v_0.Op != OpAMD64SETG { 67916 break 67917 } 67918 cmp := v_0.Args[0] 67919 v_1 := v.Args[1] 67920 if v_1.Op != OpAMD64SETG { 67921 break 67922 } 67923 if cmp != v_1.Args[0] { 67924 break 67925 } 67926 b.Kind = BlockAMD64GT 67927 b.SetControl(cmp) 67928 b.Aux = nil 67929 return true 67930 } 67931 // match: (NE (TESTB (SETG cmp) (SETG cmp)) yes no) 67932 // cond: 67933 // result: (GT cmp yes no) 67934 for { 67935 v := b.Control 67936 if v.Op != OpAMD64TESTB { 67937 break 67938 } 67939 _ = v.Args[1] 67940 v_0 := v.Args[0] 67941 if v_0.Op != OpAMD64SETG { 67942 break 67943 } 67944 cmp := v_0.Args[0] 67945 v_1 := v.Args[1] 67946 if v_1.Op != OpAMD64SETG { 67947 break 67948 } 67949 if cmp != v_1.Args[0] { 67950 break 67951 } 67952 b.Kind = BlockAMD64GT 67953 b.SetControl(cmp) 67954 b.Aux = nil 67955 return true 67956 } 67957 // match: (NE (TESTB (SETGE cmp) (SETGE cmp)) yes no) 67958 // cond: 67959 // result: (GE cmp yes no) 67960 for { 67961 v := b.Control 67962 if v.Op != OpAMD64TESTB { 67963 break 67964 } 67965 _ = v.Args[1] 67966 v_0 := v.Args[0] 67967 if v_0.Op != OpAMD64SETGE { 67968 break 67969 } 67970 cmp := v_0.Args[0] 67971 v_1 := v.Args[1] 67972 if v_1.Op != OpAMD64SETGE { 67973 break 67974 } 67975 if cmp != v_1.Args[0] { 67976 break 67977 } 67978 b.Kind = BlockAMD64GE 67979 b.SetControl(cmp) 67980 b.Aux = nil 67981 return true 67982 } 67983 // match: (NE (TESTB (SETGE cmp) (SETGE cmp)) yes no) 67984 // cond: 67985 // result: (GE cmp yes no) 67986 for { 67987 v := b.Control 67988 if v.Op != OpAMD64TESTB { 67989 break 67990 } 67991 _ = v.Args[1] 67992 v_0 := v.Args[0] 67993 if v_0.Op != OpAMD64SETGE { 67994 break 67995 } 67996 cmp := v_0.Args[0] 67997 v_1 := v.Args[1] 67998 if v_1.Op != OpAMD64SETGE { 67999 break 68000 } 68001 if cmp != v_1.Args[0] { 68002 break 68003 } 68004 b.Kind = BlockAMD64GE 68005 b.SetControl(cmp) 68006 b.Aux = nil 68007 return true 68008 } 68009 // match: (NE (TESTB (SETEQ cmp) (SETEQ cmp)) yes no) 68010 // cond: 68011 // result: (EQ cmp yes no) 68012 for { 68013 v := b.Control 68014 if v.Op != OpAMD64TESTB { 68015 break 68016 } 68017 _ = v.Args[1] 68018 v_0 := v.Args[0] 68019 if v_0.Op != OpAMD64SETEQ { 68020 break 68021 } 68022 cmp := v_0.Args[0] 68023 v_1 := v.Args[1] 68024 if v_1.Op != OpAMD64SETEQ { 68025 break 68026 } 68027 if cmp != v_1.Args[0] { 68028 break 68029 } 68030 b.Kind = BlockAMD64EQ 68031 b.SetControl(cmp) 68032 b.Aux = nil 68033 return true 68034 } 68035 // match: (NE (TESTB (SETEQ cmp) (SETEQ cmp)) yes no) 68036 // cond: 68037 // result: (EQ cmp yes no) 68038 for { 68039 v := b.Control 68040 if v.Op != OpAMD64TESTB { 68041 break 68042 } 68043 _ = v.Args[1] 68044 v_0 := v.Args[0] 68045 if v_0.Op != OpAMD64SETEQ { 68046 break 68047 } 68048 cmp := v_0.Args[0] 68049 v_1 := v.Args[1] 68050 if v_1.Op != OpAMD64SETEQ { 68051 break 68052 } 68053 if cmp != v_1.Args[0] { 68054 break 68055 } 68056 b.Kind = BlockAMD64EQ 68057 b.SetControl(cmp) 68058 b.Aux = nil 68059 return true 68060 } 68061 // match: (NE (TESTB (SETNE cmp) (SETNE cmp)) yes no) 68062 // cond: 68063 // result: (NE cmp yes no) 68064 for { 68065 v := b.Control 68066 if v.Op != OpAMD64TESTB { 68067 break 68068 } 68069 _ = v.Args[1] 68070 v_0 := v.Args[0] 68071 if v_0.Op != OpAMD64SETNE { 68072 break 68073 } 68074 cmp := v_0.Args[0] 68075 v_1 := v.Args[1] 68076 if v_1.Op != OpAMD64SETNE { 68077 break 68078 } 68079 if cmp != v_1.Args[0] { 68080 break 68081 } 68082 b.Kind = BlockAMD64NE 68083 b.SetControl(cmp) 68084 b.Aux = nil 68085 return true 68086 } 68087 // match: (NE (TESTB (SETNE cmp) (SETNE cmp)) yes no) 68088 // cond: 68089 // result: (NE cmp yes no) 68090 for { 68091 v := b.Control 68092 if v.Op != OpAMD64TESTB { 68093 break 68094 } 68095 _ = v.Args[1] 68096 v_0 := v.Args[0] 68097 if v_0.Op != OpAMD64SETNE { 68098 break 68099 } 68100 cmp := v_0.Args[0] 68101 v_1 := v.Args[1] 68102 if v_1.Op != OpAMD64SETNE { 68103 break 68104 } 68105 if cmp != v_1.Args[0] { 68106 break 68107 } 68108 b.Kind = BlockAMD64NE 68109 b.SetControl(cmp) 68110 b.Aux = nil 68111 return true 68112 } 68113 // match: (NE (TESTB (SETB cmp) (SETB cmp)) yes no) 68114 // cond: 68115 // result: (ULT cmp yes no) 68116 for { 68117 v := b.Control 68118 if v.Op != OpAMD64TESTB { 68119 break 68120 } 68121 _ = v.Args[1] 68122 v_0 := v.Args[0] 68123 if v_0.Op != OpAMD64SETB { 68124 break 68125 } 68126 cmp := v_0.Args[0] 68127 v_1 := v.Args[1] 68128 if v_1.Op != OpAMD64SETB { 68129 break 68130 } 68131 if cmp != v_1.Args[0] { 68132 break 68133 } 68134 b.Kind = BlockAMD64ULT 68135 b.SetControl(cmp) 68136 b.Aux = nil 68137 return true 68138 } 68139 // match: (NE (TESTB (SETB cmp) (SETB cmp)) yes no) 68140 // cond: 68141 // result: (ULT cmp yes no) 68142 for { 68143 v := b.Control 68144 if v.Op != OpAMD64TESTB { 68145 break 68146 } 68147 _ = v.Args[1] 68148 v_0 := v.Args[0] 68149 if v_0.Op != OpAMD64SETB { 68150 break 68151 } 68152 cmp := v_0.Args[0] 68153 v_1 := v.Args[1] 68154 if v_1.Op != OpAMD64SETB { 68155 break 68156 } 68157 if cmp != v_1.Args[0] { 68158 break 68159 } 68160 b.Kind = BlockAMD64ULT 68161 b.SetControl(cmp) 68162 b.Aux = nil 68163 return true 68164 } 68165 // match: (NE (TESTB (SETBE cmp) (SETBE cmp)) yes no) 68166 // cond: 68167 // result: (ULE cmp yes no) 68168 for { 68169 v := b.Control 68170 if v.Op != OpAMD64TESTB { 68171 break 68172 } 68173 _ = v.Args[1] 68174 v_0 := v.Args[0] 68175 if v_0.Op != OpAMD64SETBE { 68176 break 68177 } 68178 cmp := v_0.Args[0] 68179 v_1 := v.Args[1] 68180 if v_1.Op != OpAMD64SETBE { 68181 break 68182 } 68183 if cmp != v_1.Args[0] { 68184 break 68185 } 68186 b.Kind = BlockAMD64ULE 68187 b.SetControl(cmp) 68188 b.Aux = nil 68189 return true 68190 } 68191 // match: (NE (TESTB (SETBE cmp) (SETBE cmp)) yes no) 68192 // cond: 68193 // result: (ULE cmp yes no) 68194 for { 68195 v := b.Control 68196 if v.Op != OpAMD64TESTB { 68197 break 68198 } 68199 _ = v.Args[1] 68200 v_0 := v.Args[0] 68201 if v_0.Op != OpAMD64SETBE { 68202 break 68203 } 68204 cmp := v_0.Args[0] 68205 v_1 := v.Args[1] 68206 if v_1.Op != OpAMD64SETBE { 68207 break 68208 } 68209 if cmp != v_1.Args[0] { 68210 break 68211 } 68212 b.Kind = BlockAMD64ULE 68213 b.SetControl(cmp) 68214 b.Aux = nil 68215 return true 68216 } 68217 // match: (NE (TESTB (SETA cmp) (SETA cmp)) yes no) 68218 // cond: 68219 // result: (UGT cmp yes no) 68220 for { 68221 v := b.Control 68222 if v.Op != OpAMD64TESTB { 68223 break 68224 } 68225 _ = v.Args[1] 68226 v_0 := v.Args[0] 68227 if v_0.Op != OpAMD64SETA { 68228 break 68229 } 68230 cmp := v_0.Args[0] 68231 v_1 := v.Args[1] 68232 if v_1.Op != OpAMD64SETA { 68233 break 68234 } 68235 if cmp != v_1.Args[0] { 68236 break 68237 } 68238 b.Kind = BlockAMD64UGT 68239 b.SetControl(cmp) 68240 b.Aux = nil 68241 return true 68242 } 68243 // match: (NE (TESTB (SETA cmp) (SETA cmp)) yes no) 68244 // cond: 68245 // result: (UGT cmp yes no) 68246 for { 68247 v := b.Control 68248 if v.Op != OpAMD64TESTB { 68249 break 68250 } 68251 _ = v.Args[1] 68252 v_0 := v.Args[0] 68253 if v_0.Op != OpAMD64SETA { 68254 break 68255 } 68256 cmp := v_0.Args[0] 68257 v_1 := v.Args[1] 68258 if v_1.Op != OpAMD64SETA { 68259 break 68260 } 68261 if cmp != v_1.Args[0] { 68262 break 68263 } 68264 b.Kind = BlockAMD64UGT 68265 b.SetControl(cmp) 68266 b.Aux = nil 68267 return true 68268 } 68269 // match: (NE (TESTB (SETAE cmp) (SETAE cmp)) yes no) 68270 // cond: 68271 // result: (UGE cmp yes no) 68272 for { 68273 v := b.Control 68274 if v.Op != OpAMD64TESTB { 68275 break 68276 } 68277 _ = v.Args[1] 68278 v_0 := v.Args[0] 68279 if v_0.Op != OpAMD64SETAE { 68280 break 68281 } 68282 cmp := v_0.Args[0] 68283 v_1 := v.Args[1] 68284 if v_1.Op != OpAMD64SETAE { 68285 break 68286 } 68287 if cmp != v_1.Args[0] { 68288 break 68289 } 68290 b.Kind = BlockAMD64UGE 68291 b.SetControl(cmp) 68292 b.Aux = nil 68293 return true 68294 } 68295 // match: (NE (TESTB (SETAE cmp) (SETAE cmp)) yes no) 68296 // cond: 68297 // result: (UGE cmp yes no) 68298 for { 68299 v := b.Control 68300 if v.Op != OpAMD64TESTB { 68301 break 68302 } 68303 _ = v.Args[1] 68304 v_0 := v.Args[0] 68305 if v_0.Op != OpAMD64SETAE { 68306 break 68307 } 68308 cmp := v_0.Args[0] 68309 v_1 := v.Args[1] 68310 if v_1.Op != OpAMD64SETAE { 68311 break 68312 } 68313 if cmp != v_1.Args[0] { 68314 break 68315 } 68316 b.Kind = BlockAMD64UGE 68317 b.SetControl(cmp) 68318 b.Aux = nil 68319 return true 68320 } 68321 // match: (NE (TESTB (SETO cmp) (SETO cmp)) yes no) 68322 // cond: 68323 // result: (OS cmp yes no) 68324 for { 68325 v := b.Control 68326 if v.Op != OpAMD64TESTB { 68327 break 68328 } 68329 _ = v.Args[1] 68330 v_0 := v.Args[0] 68331 if v_0.Op != OpAMD64SETO { 68332 break 68333 } 68334 cmp := v_0.Args[0] 68335 v_1 := v.Args[1] 68336 if v_1.Op != OpAMD64SETO { 68337 break 68338 } 68339 if cmp != v_1.Args[0] { 68340 break 68341 } 68342 b.Kind = BlockAMD64OS 68343 b.SetControl(cmp) 68344 b.Aux = nil 68345 return true 68346 } 68347 // match: (NE (TESTB (SETO cmp) (SETO cmp)) yes no) 68348 // cond: 68349 // result: (OS cmp yes no) 68350 for { 68351 v := b.Control 68352 if v.Op != OpAMD64TESTB { 68353 break 68354 } 68355 _ = v.Args[1] 68356 v_0 := v.Args[0] 68357 if v_0.Op != OpAMD64SETO { 68358 break 68359 } 68360 cmp := v_0.Args[0] 68361 v_1 := v.Args[1] 68362 if v_1.Op != OpAMD64SETO { 68363 break 68364 } 68365 if cmp != v_1.Args[0] { 68366 break 68367 } 68368 b.Kind = BlockAMD64OS 68369 b.SetControl(cmp) 68370 b.Aux = nil 68371 return true 68372 } 68373 // match: (NE (TESTL (SHLL (MOVLconst [1]) x) y)) 68374 // cond: !config.nacl 68375 // result: (ULT (BTL x y)) 68376 for { 68377 v := b.Control 68378 if v.Op != OpAMD64TESTL { 68379 break 68380 } 68381 _ = v.Args[1] 68382 v_0 := v.Args[0] 68383 if v_0.Op != OpAMD64SHLL { 68384 break 68385 } 68386 _ = v_0.Args[1] 68387 v_0_0 := v_0.Args[0] 68388 if v_0_0.Op != OpAMD64MOVLconst { 68389 break 68390 } 68391 if v_0_0.AuxInt != 1 { 68392 break 68393 } 68394 x := v_0.Args[1] 68395 y := v.Args[1] 68396 if !(!config.nacl) { 68397 break 68398 } 68399 b.Kind = BlockAMD64ULT 68400 v0 := b.NewValue0(v.Pos, OpAMD64BTL, types.TypeFlags) 68401 v0.AddArg(x) 68402 v0.AddArg(y) 68403 b.SetControl(v0) 68404 b.Aux = nil 68405 return true 68406 } 68407 // match: (NE (TESTL y (SHLL (MOVLconst [1]) x))) 68408 // cond: !config.nacl 68409 // result: (ULT (BTL x y)) 68410 for { 68411 v := b.Control 68412 if v.Op != OpAMD64TESTL { 68413 break 68414 } 68415 _ = v.Args[1] 68416 y := v.Args[0] 68417 v_1 := v.Args[1] 68418 if v_1.Op != OpAMD64SHLL { 68419 break 68420 } 68421 _ = v_1.Args[1] 68422 v_1_0 := v_1.Args[0] 68423 if v_1_0.Op != OpAMD64MOVLconst { 68424 break 68425 } 68426 if v_1_0.AuxInt != 1 { 68427 break 68428 } 68429 x := v_1.Args[1] 68430 if !(!config.nacl) { 68431 break 68432 } 68433 b.Kind = BlockAMD64ULT 68434 v0 := b.NewValue0(v.Pos, OpAMD64BTL, types.TypeFlags) 68435 v0.AddArg(x) 68436 v0.AddArg(y) 68437 b.SetControl(v0) 68438 b.Aux = nil 68439 return true 68440 } 68441 // match: (NE (TESTQ (SHLQ (MOVQconst [1]) x) y)) 68442 // cond: !config.nacl 68443 // result: (ULT (BTQ x y)) 68444 for { 68445 v := b.Control 68446 if v.Op != OpAMD64TESTQ { 68447 break 68448 } 68449 _ = v.Args[1] 68450 v_0 := v.Args[0] 68451 if v_0.Op != OpAMD64SHLQ { 68452 break 68453 } 68454 _ = v_0.Args[1] 68455 v_0_0 := v_0.Args[0] 68456 if v_0_0.Op != OpAMD64MOVQconst { 68457 break 68458 } 68459 if v_0_0.AuxInt != 1 { 68460 break 68461 } 68462 x := v_0.Args[1] 68463 y := v.Args[1] 68464 if !(!config.nacl) { 68465 break 68466 } 68467 b.Kind = BlockAMD64ULT 68468 v0 := b.NewValue0(v.Pos, OpAMD64BTQ, types.TypeFlags) 68469 v0.AddArg(x) 68470 v0.AddArg(y) 68471 b.SetControl(v0) 68472 b.Aux = nil 68473 return true 68474 } 68475 // match: (NE (TESTQ y (SHLQ (MOVQconst [1]) x))) 68476 // cond: !config.nacl 68477 // result: (ULT (BTQ x y)) 68478 for { 68479 v := b.Control 68480 if v.Op != OpAMD64TESTQ { 68481 break 68482 } 68483 _ = v.Args[1] 68484 y := v.Args[0] 68485 v_1 := v.Args[1] 68486 if v_1.Op != OpAMD64SHLQ { 68487 break 68488 } 68489 _ = v_1.Args[1] 68490 v_1_0 := v_1.Args[0] 68491 if v_1_0.Op != OpAMD64MOVQconst { 68492 break 68493 } 68494 if v_1_0.AuxInt != 1 { 68495 break 68496 } 68497 x := v_1.Args[1] 68498 if !(!config.nacl) { 68499 break 68500 } 68501 b.Kind = BlockAMD64ULT 68502 v0 := b.NewValue0(v.Pos, OpAMD64BTQ, types.TypeFlags) 68503 v0.AddArg(x) 68504 v0.AddArg(y) 68505 b.SetControl(v0) 68506 b.Aux = nil 68507 return true 68508 } 68509 // match: (NE (TESTLconst [c] x)) 68510 // cond: isUint32PowerOfTwo(c) && !config.nacl 68511 // result: (ULT (BTLconst [log2uint32(c)] x)) 68512 for { 68513 v := b.Control 68514 if v.Op != OpAMD64TESTLconst { 68515 break 68516 } 68517 c := v.AuxInt 68518 x := v.Args[0] 68519 if !(isUint32PowerOfTwo(c) && !config.nacl) { 68520 break 68521 } 68522 b.Kind = BlockAMD64ULT 68523 v0 := b.NewValue0(v.Pos, OpAMD64BTLconst, types.TypeFlags) 68524 v0.AuxInt = log2uint32(c) 68525 v0.AddArg(x) 68526 b.SetControl(v0) 68527 b.Aux = nil 68528 return true 68529 } 68530 // match: (NE (TESTQconst [c] x)) 68531 // cond: isUint64PowerOfTwo(c) && !config.nacl 68532 // result: (ULT (BTQconst [log2(c)] x)) 68533 for { 68534 v := b.Control 68535 if v.Op != OpAMD64TESTQconst { 68536 break 68537 } 68538 c := v.AuxInt 68539 x := v.Args[0] 68540 if !(isUint64PowerOfTwo(c) && !config.nacl) { 68541 break 68542 } 68543 b.Kind = BlockAMD64ULT 68544 v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags) 68545 v0.AuxInt = log2(c) 68546 v0.AddArg(x) 68547 b.SetControl(v0) 68548 b.Aux = nil 68549 return true 68550 } 68551 // match: (NE (TESTQ (MOVQconst [c]) x)) 68552 // cond: isUint64PowerOfTwo(c) && !config.nacl 68553 // result: (ULT (BTQconst [log2(c)] x)) 68554 for { 68555 v := b.Control 68556 if v.Op != OpAMD64TESTQ { 68557 break 68558 } 68559 _ = v.Args[1] 68560 v_0 := v.Args[0] 68561 if v_0.Op != OpAMD64MOVQconst { 68562 break 68563 } 68564 c := v_0.AuxInt 68565 x := v.Args[1] 68566 if !(isUint64PowerOfTwo(c) && !config.nacl) { 68567 break 68568 } 68569 b.Kind = BlockAMD64ULT 68570 v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags) 68571 v0.AuxInt = log2(c) 68572 v0.AddArg(x) 68573 b.SetControl(v0) 68574 b.Aux = nil 68575 return true 68576 } 68577 // match: (NE (TESTQ x (MOVQconst [c]))) 68578 // cond: isUint64PowerOfTwo(c) && !config.nacl 68579 // result: (ULT (BTQconst [log2(c)] x)) 68580 for { 68581 v := b.Control 68582 if v.Op != OpAMD64TESTQ { 68583 break 68584 } 68585 _ = v.Args[1] 68586 x := v.Args[0] 68587 v_1 := v.Args[1] 68588 if v_1.Op != OpAMD64MOVQconst { 68589 break 68590 } 68591 c := v_1.AuxInt 68592 if !(isUint64PowerOfTwo(c) && !config.nacl) { 68593 break 68594 } 68595 b.Kind = BlockAMD64ULT 68596 v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags) 68597 v0.AuxInt = log2(c) 68598 v0.AddArg(x) 68599 b.SetControl(v0) 68600 b.Aux = nil 68601 return true 68602 } 68603 // match: (NE (TESTQ z1:(SHLQconst [63] (SHRQconst [63] x)) z2)) 68604 // cond: z1==z2 && !config.nacl 68605 // result: (ULT (BTQconst [63] x)) 68606 for { 68607 v := b.Control 68608 if v.Op != OpAMD64TESTQ { 68609 break 68610 } 68611 _ = v.Args[1] 68612 z1 := v.Args[0] 68613 if z1.Op != OpAMD64SHLQconst { 68614 break 68615 } 68616 if z1.AuxInt != 63 { 68617 break 68618 } 68619 z1_0 := z1.Args[0] 68620 if z1_0.Op != OpAMD64SHRQconst { 68621 break 68622 } 68623 if z1_0.AuxInt != 63 { 68624 break 68625 } 68626 x := z1_0.Args[0] 68627 z2 := v.Args[1] 68628 if !(z1 == z2 && !config.nacl) { 68629 break 68630 } 68631 b.Kind = BlockAMD64ULT 68632 v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags) 68633 v0.AuxInt = 63 68634 v0.AddArg(x) 68635 b.SetControl(v0) 68636 b.Aux = nil 68637 return true 68638 } 68639 // match: (NE (TESTQ z2 z1:(SHLQconst [63] (SHRQconst [63] x)))) 68640 // cond: z1==z2 && !config.nacl 68641 // result: (ULT (BTQconst [63] x)) 68642 for { 68643 v := b.Control 68644 if v.Op != OpAMD64TESTQ { 68645 break 68646 } 68647 _ = v.Args[1] 68648 z2 := v.Args[0] 68649 z1 := v.Args[1] 68650 if z1.Op != OpAMD64SHLQconst { 68651 break 68652 } 68653 if z1.AuxInt != 63 { 68654 break 68655 } 68656 z1_0 := z1.Args[0] 68657 if z1_0.Op != OpAMD64SHRQconst { 68658 break 68659 } 68660 if z1_0.AuxInt != 63 { 68661 break 68662 } 68663 x := z1_0.Args[0] 68664 if !(z1 == z2 && !config.nacl) { 68665 break 68666 } 68667 b.Kind = BlockAMD64ULT 68668 v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags) 68669 v0.AuxInt = 63 68670 v0.AddArg(x) 68671 b.SetControl(v0) 68672 b.Aux = nil 68673 return true 68674 } 68675 // match: (NE (TESTL z1:(SHLLconst [31] (SHRQconst [31] x)) z2)) 68676 // cond: z1==z2 && !config.nacl 68677 // result: (ULT (BTQconst [31] x)) 68678 for { 68679 v := b.Control 68680 if v.Op != OpAMD64TESTL { 68681 break 68682 } 68683 _ = v.Args[1] 68684 z1 := v.Args[0] 68685 if z1.Op != OpAMD64SHLLconst { 68686 break 68687 } 68688 if z1.AuxInt != 31 { 68689 break 68690 } 68691 z1_0 := z1.Args[0] 68692 if z1_0.Op != OpAMD64SHRQconst { 68693 break 68694 } 68695 if z1_0.AuxInt != 31 { 68696 break 68697 } 68698 x := z1_0.Args[0] 68699 z2 := v.Args[1] 68700 if !(z1 == z2 && !config.nacl) { 68701 break 68702 } 68703 b.Kind = BlockAMD64ULT 68704 v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags) 68705 v0.AuxInt = 31 68706 v0.AddArg(x) 68707 b.SetControl(v0) 68708 b.Aux = nil 68709 return true 68710 } 68711 // match: (NE (TESTL z2 z1:(SHLLconst [31] (SHRQconst [31] x)))) 68712 // cond: z1==z2 && !config.nacl 68713 // result: (ULT (BTQconst [31] x)) 68714 for { 68715 v := b.Control 68716 if v.Op != OpAMD64TESTL { 68717 break 68718 } 68719 _ = v.Args[1] 68720 z2 := v.Args[0] 68721 z1 := v.Args[1] 68722 if z1.Op != OpAMD64SHLLconst { 68723 break 68724 } 68725 if z1.AuxInt != 31 { 68726 break 68727 } 68728 z1_0 := z1.Args[0] 68729 if z1_0.Op != OpAMD64SHRQconst { 68730 break 68731 } 68732 if z1_0.AuxInt != 31 { 68733 break 68734 } 68735 x := z1_0.Args[0] 68736 if !(z1 == z2 && !config.nacl) { 68737 break 68738 } 68739 b.Kind = BlockAMD64ULT 68740 v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags) 68741 v0.AuxInt = 31 68742 v0.AddArg(x) 68743 b.SetControl(v0) 68744 b.Aux = nil 68745 return true 68746 } 68747 // match: (NE (TESTQ z1:(SHRQconst [63] (SHLQconst [63] x)) z2)) 68748 // cond: z1==z2 && !config.nacl 68749 // result: (ULT (BTQconst [0] x)) 68750 for { 68751 v := b.Control 68752 if v.Op != OpAMD64TESTQ { 68753 break 68754 } 68755 _ = v.Args[1] 68756 z1 := v.Args[0] 68757 if z1.Op != OpAMD64SHRQconst { 68758 break 68759 } 68760 if z1.AuxInt != 63 { 68761 break 68762 } 68763 z1_0 := z1.Args[0] 68764 if z1_0.Op != OpAMD64SHLQconst { 68765 break 68766 } 68767 if z1_0.AuxInt != 63 { 68768 break 68769 } 68770 x := z1_0.Args[0] 68771 z2 := v.Args[1] 68772 if !(z1 == z2 && !config.nacl) { 68773 break 68774 } 68775 b.Kind = BlockAMD64ULT 68776 v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags) 68777 v0.AuxInt = 0 68778 v0.AddArg(x) 68779 b.SetControl(v0) 68780 b.Aux = nil 68781 return true 68782 } 68783 // match: (NE (TESTQ z2 z1:(SHRQconst [63] (SHLQconst [63] x)))) 68784 // cond: z1==z2 && !config.nacl 68785 // result: (ULT (BTQconst [0] x)) 68786 for { 68787 v := b.Control 68788 if v.Op != OpAMD64TESTQ { 68789 break 68790 } 68791 _ = v.Args[1] 68792 z2 := v.Args[0] 68793 z1 := v.Args[1] 68794 if z1.Op != OpAMD64SHRQconst { 68795 break 68796 } 68797 if z1.AuxInt != 63 { 68798 break 68799 } 68800 z1_0 := z1.Args[0] 68801 if z1_0.Op != OpAMD64SHLQconst { 68802 break 68803 } 68804 if z1_0.AuxInt != 63 { 68805 break 68806 } 68807 x := z1_0.Args[0] 68808 if !(z1 == z2 && !config.nacl) { 68809 break 68810 } 68811 b.Kind = BlockAMD64ULT 68812 v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags) 68813 v0.AuxInt = 0 68814 v0.AddArg(x) 68815 b.SetControl(v0) 68816 b.Aux = nil 68817 return true 68818 } 68819 // match: (NE (TESTL z1:(SHRLconst [31] (SHLLconst [31] x)) z2)) 68820 // cond: z1==z2 && !config.nacl 68821 // result: (ULT (BTLconst [0] x)) 68822 for { 68823 v := b.Control 68824 if v.Op != OpAMD64TESTL { 68825 break 68826 } 68827 _ = v.Args[1] 68828 z1 := v.Args[0] 68829 if z1.Op != OpAMD64SHRLconst { 68830 break 68831 } 68832 if z1.AuxInt != 31 { 68833 break 68834 } 68835 z1_0 := z1.Args[0] 68836 if z1_0.Op != OpAMD64SHLLconst { 68837 break 68838 } 68839 if z1_0.AuxInt != 31 { 68840 break 68841 } 68842 x := z1_0.Args[0] 68843 z2 := v.Args[1] 68844 if !(z1 == z2 && !config.nacl) { 68845 break 68846 } 68847 b.Kind = BlockAMD64ULT 68848 v0 := b.NewValue0(v.Pos, OpAMD64BTLconst, types.TypeFlags) 68849 v0.AuxInt = 0 68850 v0.AddArg(x) 68851 b.SetControl(v0) 68852 b.Aux = nil 68853 return true 68854 } 68855 // match: (NE (TESTL z2 z1:(SHRLconst [31] (SHLLconst [31] x)))) 68856 // cond: z1==z2 && !config.nacl 68857 // result: (ULT (BTLconst [0] x)) 68858 for { 68859 v := b.Control 68860 if v.Op != OpAMD64TESTL { 68861 break 68862 } 68863 _ = v.Args[1] 68864 z2 := v.Args[0] 68865 z1 := v.Args[1] 68866 if z1.Op != OpAMD64SHRLconst { 68867 break 68868 } 68869 if z1.AuxInt != 31 { 68870 break 68871 } 68872 z1_0 := z1.Args[0] 68873 if z1_0.Op != OpAMD64SHLLconst { 68874 break 68875 } 68876 if z1_0.AuxInt != 31 { 68877 break 68878 } 68879 x := z1_0.Args[0] 68880 if !(z1 == z2 && !config.nacl) { 68881 break 68882 } 68883 b.Kind = BlockAMD64ULT 68884 v0 := b.NewValue0(v.Pos, OpAMD64BTLconst, types.TypeFlags) 68885 v0.AuxInt = 0 68886 v0.AddArg(x) 68887 b.SetControl(v0) 68888 b.Aux = nil 68889 return true 68890 } 68891 // match: (NE (TESTQ z1:(SHRQconst [63] x) z2)) 68892 // cond: z1==z2 && !config.nacl 68893 // result: (ULT (BTQconst [63] x)) 68894 for { 68895 v := b.Control 68896 if v.Op != OpAMD64TESTQ { 68897 break 68898 } 68899 _ = v.Args[1] 68900 z1 := v.Args[0] 68901 if z1.Op != OpAMD64SHRQconst { 68902 break 68903 } 68904 if z1.AuxInt != 63 { 68905 break 68906 } 68907 x := z1.Args[0] 68908 z2 := v.Args[1] 68909 if !(z1 == z2 && !config.nacl) { 68910 break 68911 } 68912 b.Kind = BlockAMD64ULT 68913 v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags) 68914 v0.AuxInt = 63 68915 v0.AddArg(x) 68916 b.SetControl(v0) 68917 b.Aux = nil 68918 return true 68919 } 68920 // match: (NE (TESTQ z2 z1:(SHRQconst [63] x))) 68921 // cond: z1==z2 && !config.nacl 68922 // result: (ULT (BTQconst [63] x)) 68923 for { 68924 v := b.Control 68925 if v.Op != OpAMD64TESTQ { 68926 break 68927 } 68928 _ = v.Args[1] 68929 z2 := v.Args[0] 68930 z1 := v.Args[1] 68931 if z1.Op != OpAMD64SHRQconst { 68932 break 68933 } 68934 if z1.AuxInt != 63 { 68935 break 68936 } 68937 x := z1.Args[0] 68938 if !(z1 == z2 && !config.nacl) { 68939 break 68940 } 68941 b.Kind = BlockAMD64ULT 68942 v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags) 68943 v0.AuxInt = 63 68944 v0.AddArg(x) 68945 b.SetControl(v0) 68946 b.Aux = nil 68947 return true 68948 } 68949 // match: (NE (TESTL z1:(SHRLconst [31] x) z2)) 68950 // cond: z1==z2 && !config.nacl 68951 // result: (ULT (BTLconst [31] x)) 68952 for { 68953 v := b.Control 68954 if v.Op != OpAMD64TESTL { 68955 break 68956 } 68957 _ = v.Args[1] 68958 z1 := v.Args[0] 68959 if z1.Op != OpAMD64SHRLconst { 68960 break 68961 } 68962 if z1.AuxInt != 31 { 68963 break 68964 } 68965 x := z1.Args[0] 68966 z2 := v.Args[1] 68967 if !(z1 == z2 && !config.nacl) { 68968 break 68969 } 68970 b.Kind = BlockAMD64ULT 68971 v0 := b.NewValue0(v.Pos, OpAMD64BTLconst, types.TypeFlags) 68972 v0.AuxInt = 31 68973 v0.AddArg(x) 68974 b.SetControl(v0) 68975 b.Aux = nil 68976 return true 68977 } 68978 // match: (NE (TESTL z2 z1:(SHRLconst [31] x))) 68979 // cond: z1==z2 && !config.nacl 68980 // result: (ULT (BTLconst [31] x)) 68981 for { 68982 v := b.Control 68983 if v.Op != OpAMD64TESTL { 68984 break 68985 } 68986 _ = v.Args[1] 68987 z2 := v.Args[0] 68988 z1 := v.Args[1] 68989 if z1.Op != OpAMD64SHRLconst { 68990 break 68991 } 68992 if z1.AuxInt != 31 { 68993 break 68994 } 68995 x := z1.Args[0] 68996 if !(z1 == z2 && !config.nacl) { 68997 break 68998 } 68999 b.Kind = BlockAMD64ULT 69000 v0 := b.NewValue0(v.Pos, OpAMD64BTLconst, types.TypeFlags) 69001 v0.AuxInt = 31 69002 v0.AddArg(x) 69003 b.SetControl(v0) 69004 b.Aux = nil 69005 return true 69006 } 69007 // match: (NE (TESTB (SETGF cmp) (SETGF cmp)) yes no) 69008 // cond: 69009 // result: (UGT cmp yes no) 69010 for { 69011 v := b.Control 69012 if v.Op != OpAMD64TESTB { 69013 break 69014 } 69015 _ = v.Args[1] 69016 v_0 := v.Args[0] 69017 if v_0.Op != OpAMD64SETGF { 69018 break 69019 } 69020 cmp := v_0.Args[0] 69021 v_1 := v.Args[1] 69022 if v_1.Op != OpAMD64SETGF { 69023 break 69024 } 69025 if cmp != v_1.Args[0] { 69026 break 69027 } 69028 b.Kind = BlockAMD64UGT 69029 b.SetControl(cmp) 69030 b.Aux = nil 69031 return true 69032 } 69033 // match: (NE (TESTB (SETGF cmp) (SETGF cmp)) yes no) 69034 // cond: 69035 // result: (UGT cmp yes no) 69036 for { 69037 v := b.Control 69038 if v.Op != OpAMD64TESTB { 69039 break 69040 } 69041 _ = v.Args[1] 69042 v_0 := v.Args[0] 69043 if v_0.Op != OpAMD64SETGF { 69044 break 69045 } 69046 cmp := v_0.Args[0] 69047 v_1 := v.Args[1] 69048 if v_1.Op != OpAMD64SETGF { 69049 break 69050 } 69051 if cmp != v_1.Args[0] { 69052 break 69053 } 69054 b.Kind = BlockAMD64UGT 69055 b.SetControl(cmp) 69056 b.Aux = nil 69057 return true 69058 } 69059 // match: (NE (TESTB (SETGEF cmp) (SETGEF cmp)) yes no) 69060 // cond: 69061 // result: (UGE cmp yes no) 69062 for { 69063 v := b.Control 69064 if v.Op != OpAMD64TESTB { 69065 break 69066 } 69067 _ = v.Args[1] 69068 v_0 := v.Args[0] 69069 if v_0.Op != OpAMD64SETGEF { 69070 break 69071 } 69072 cmp := v_0.Args[0] 69073 v_1 := v.Args[1] 69074 if v_1.Op != OpAMD64SETGEF { 69075 break 69076 } 69077 if cmp != v_1.Args[0] { 69078 break 69079 } 69080 b.Kind = BlockAMD64UGE 69081 b.SetControl(cmp) 69082 b.Aux = nil 69083 return true 69084 } 69085 // match: (NE (TESTB (SETGEF cmp) (SETGEF cmp)) yes no) 69086 // cond: 69087 // result: (UGE cmp yes no) 69088 for { 69089 v := b.Control 69090 if v.Op != OpAMD64TESTB { 69091 break 69092 } 69093 _ = v.Args[1] 69094 v_0 := v.Args[0] 69095 if v_0.Op != OpAMD64SETGEF { 69096 break 69097 } 69098 cmp := v_0.Args[0] 69099 v_1 := v.Args[1] 69100 if v_1.Op != OpAMD64SETGEF { 69101 break 69102 } 69103 if cmp != v_1.Args[0] { 69104 break 69105 } 69106 b.Kind = BlockAMD64UGE 69107 b.SetControl(cmp) 69108 b.Aux = nil 69109 return true 69110 } 69111 // match: (NE (TESTB (SETEQF cmp) (SETEQF cmp)) yes no) 69112 // cond: 69113 // result: (EQF cmp yes no) 69114 for { 69115 v := b.Control 69116 if v.Op != OpAMD64TESTB { 69117 break 69118 } 69119 _ = v.Args[1] 69120 v_0 := v.Args[0] 69121 if v_0.Op != OpAMD64SETEQF { 69122 break 69123 } 69124 cmp := v_0.Args[0] 69125 v_1 := v.Args[1] 69126 if v_1.Op != OpAMD64SETEQF { 69127 break 69128 } 69129 if cmp != v_1.Args[0] { 69130 break 69131 } 69132 b.Kind = BlockAMD64EQF 69133 b.SetControl(cmp) 69134 b.Aux = nil 69135 return true 69136 } 69137 // match: (NE (TESTB (SETEQF cmp) (SETEQF cmp)) yes no) 69138 // cond: 69139 // result: (EQF cmp yes no) 69140 for { 69141 v := b.Control 69142 if v.Op != OpAMD64TESTB { 69143 break 69144 } 69145 _ = v.Args[1] 69146 v_0 := v.Args[0] 69147 if v_0.Op != OpAMD64SETEQF { 69148 break 69149 } 69150 cmp := v_0.Args[0] 69151 v_1 := v.Args[1] 69152 if v_1.Op != OpAMD64SETEQF { 69153 break 69154 } 69155 if cmp != v_1.Args[0] { 69156 break 69157 } 69158 b.Kind = BlockAMD64EQF 69159 b.SetControl(cmp) 69160 b.Aux = nil 69161 return true 69162 } 69163 // match: (NE (TESTB (SETNEF cmp) (SETNEF cmp)) yes no) 69164 // cond: 69165 // result: (NEF cmp yes no) 69166 for { 69167 v := b.Control 69168 if v.Op != OpAMD64TESTB { 69169 break 69170 } 69171 _ = v.Args[1] 69172 v_0 := v.Args[0] 69173 if v_0.Op != OpAMD64SETNEF { 69174 break 69175 } 69176 cmp := v_0.Args[0] 69177 v_1 := v.Args[1] 69178 if v_1.Op != OpAMD64SETNEF { 69179 break 69180 } 69181 if cmp != v_1.Args[0] { 69182 break 69183 } 69184 b.Kind = BlockAMD64NEF 69185 b.SetControl(cmp) 69186 b.Aux = nil 69187 return true 69188 } 69189 // match: (NE (TESTB (SETNEF cmp) (SETNEF cmp)) yes no) 69190 // cond: 69191 // result: (NEF cmp yes no) 69192 for { 69193 v := b.Control 69194 if v.Op != OpAMD64TESTB { 69195 break 69196 } 69197 _ = v.Args[1] 69198 v_0 := v.Args[0] 69199 if v_0.Op != OpAMD64SETNEF { 69200 break 69201 } 69202 cmp := v_0.Args[0] 69203 v_1 := v.Args[1] 69204 if v_1.Op != OpAMD64SETNEF { 69205 break 69206 } 69207 if cmp != v_1.Args[0] { 69208 break 69209 } 69210 b.Kind = BlockAMD64NEF 69211 b.SetControl(cmp) 69212 b.Aux = nil 69213 return true 69214 } 69215 // match: (NE (InvertFlags cmp) yes no) 69216 // cond: 69217 // result: (NE cmp yes no) 69218 for { 69219 v := b.Control 69220 if v.Op != OpAMD64InvertFlags { 69221 break 69222 } 69223 cmp := v.Args[0] 69224 b.Kind = BlockAMD64NE 69225 b.SetControl(cmp) 69226 b.Aux = nil 69227 return true 69228 } 69229 // match: (NE (FlagEQ) yes no) 69230 // cond: 69231 // result: (First nil no yes) 69232 for { 69233 v := b.Control 69234 if v.Op != OpAMD64FlagEQ { 69235 break 69236 } 69237 b.Kind = BlockFirst 69238 b.SetControl(nil) 69239 b.Aux = nil 69240 b.swapSuccessors() 69241 return true 69242 } 69243 // match: (NE (FlagLT_ULT) yes no) 69244 // cond: 69245 // result: (First nil yes no) 69246 for { 69247 v := b.Control 69248 if v.Op != OpAMD64FlagLT_ULT { 69249 break 69250 } 69251 b.Kind = BlockFirst 69252 b.SetControl(nil) 69253 b.Aux = nil 69254 return true 69255 } 69256 // match: (NE (FlagLT_UGT) yes no) 69257 // cond: 69258 // result: (First nil yes no) 69259 for { 69260 v := b.Control 69261 if v.Op != OpAMD64FlagLT_UGT { 69262 break 69263 } 69264 b.Kind = BlockFirst 69265 b.SetControl(nil) 69266 b.Aux = nil 69267 return true 69268 } 69269 // match: (NE (FlagGT_ULT) yes no) 69270 // cond: 69271 // result: (First nil yes no) 69272 for { 69273 v := b.Control 69274 if v.Op != OpAMD64FlagGT_ULT { 69275 break 69276 } 69277 b.Kind = BlockFirst 69278 b.SetControl(nil) 69279 b.Aux = nil 69280 return true 69281 } 69282 // match: (NE (FlagGT_UGT) yes no) 69283 // cond: 69284 // result: (First nil yes no) 69285 for { 69286 v := b.Control 69287 if v.Op != OpAMD64FlagGT_UGT { 69288 break 69289 } 69290 b.Kind = BlockFirst 69291 b.SetControl(nil) 69292 b.Aux = nil 69293 return true 69294 } 69295 case BlockAMD64UGE: 69296 // match: (UGE (InvertFlags cmp) yes no) 69297 // cond: 69298 // result: (ULE cmp yes no) 69299 for { 69300 v := b.Control 69301 if v.Op != OpAMD64InvertFlags { 69302 break 69303 } 69304 cmp := v.Args[0] 69305 b.Kind = BlockAMD64ULE 69306 b.SetControl(cmp) 69307 b.Aux = nil 69308 return true 69309 } 69310 // match: (UGE (FlagEQ) yes no) 69311 // cond: 69312 // result: (First nil yes no) 69313 for { 69314 v := b.Control 69315 if v.Op != OpAMD64FlagEQ { 69316 break 69317 } 69318 b.Kind = BlockFirst 69319 b.SetControl(nil) 69320 b.Aux = nil 69321 return true 69322 } 69323 // match: (UGE (FlagLT_ULT) yes no) 69324 // cond: 69325 // result: (First nil no yes) 69326 for { 69327 v := b.Control 69328 if v.Op != OpAMD64FlagLT_ULT { 69329 break 69330 } 69331 b.Kind = BlockFirst 69332 b.SetControl(nil) 69333 b.Aux = nil 69334 b.swapSuccessors() 69335 return true 69336 } 69337 // match: (UGE (FlagLT_UGT) yes no) 69338 // cond: 69339 // result: (First nil yes no) 69340 for { 69341 v := b.Control 69342 if v.Op != OpAMD64FlagLT_UGT { 69343 break 69344 } 69345 b.Kind = BlockFirst 69346 b.SetControl(nil) 69347 b.Aux = nil 69348 return true 69349 } 69350 // match: (UGE (FlagGT_ULT) yes no) 69351 // cond: 69352 // result: (First nil no yes) 69353 for { 69354 v := b.Control 69355 if v.Op != OpAMD64FlagGT_ULT { 69356 break 69357 } 69358 b.Kind = BlockFirst 69359 b.SetControl(nil) 69360 b.Aux = nil 69361 b.swapSuccessors() 69362 return true 69363 } 69364 // match: (UGE (FlagGT_UGT) yes no) 69365 // cond: 69366 // result: (First nil yes no) 69367 for { 69368 v := b.Control 69369 if v.Op != OpAMD64FlagGT_UGT { 69370 break 69371 } 69372 b.Kind = BlockFirst 69373 b.SetControl(nil) 69374 b.Aux = nil 69375 return true 69376 } 69377 case BlockAMD64UGT: 69378 // match: (UGT (InvertFlags cmp) yes no) 69379 // cond: 69380 // result: (ULT cmp yes no) 69381 for { 69382 v := b.Control 69383 if v.Op != OpAMD64InvertFlags { 69384 break 69385 } 69386 cmp := v.Args[0] 69387 b.Kind = BlockAMD64ULT 69388 b.SetControl(cmp) 69389 b.Aux = nil 69390 return true 69391 } 69392 // match: (UGT (FlagEQ) yes no) 69393 // cond: 69394 // result: (First nil no yes) 69395 for { 69396 v := b.Control 69397 if v.Op != OpAMD64FlagEQ { 69398 break 69399 } 69400 b.Kind = BlockFirst 69401 b.SetControl(nil) 69402 b.Aux = nil 69403 b.swapSuccessors() 69404 return true 69405 } 69406 // match: (UGT (FlagLT_ULT) yes no) 69407 // cond: 69408 // result: (First nil no yes) 69409 for { 69410 v := b.Control 69411 if v.Op != OpAMD64FlagLT_ULT { 69412 break 69413 } 69414 b.Kind = BlockFirst 69415 b.SetControl(nil) 69416 b.Aux = nil 69417 b.swapSuccessors() 69418 return true 69419 } 69420 // match: (UGT (FlagLT_UGT) yes no) 69421 // cond: 69422 // result: (First nil yes no) 69423 for { 69424 v := b.Control 69425 if v.Op != OpAMD64FlagLT_UGT { 69426 break 69427 } 69428 b.Kind = BlockFirst 69429 b.SetControl(nil) 69430 b.Aux = nil 69431 return true 69432 } 69433 // match: (UGT (FlagGT_ULT) yes no) 69434 // cond: 69435 // result: (First nil no yes) 69436 for { 69437 v := b.Control 69438 if v.Op != OpAMD64FlagGT_ULT { 69439 break 69440 } 69441 b.Kind = BlockFirst 69442 b.SetControl(nil) 69443 b.Aux = nil 69444 b.swapSuccessors() 69445 return true 69446 } 69447 // match: (UGT (FlagGT_UGT) yes no) 69448 // cond: 69449 // result: (First nil yes no) 69450 for { 69451 v := b.Control 69452 if v.Op != OpAMD64FlagGT_UGT { 69453 break 69454 } 69455 b.Kind = BlockFirst 69456 b.SetControl(nil) 69457 b.Aux = nil 69458 return true 69459 } 69460 case BlockAMD64ULE: 69461 // match: (ULE (InvertFlags cmp) yes no) 69462 // cond: 69463 // result: (UGE cmp yes no) 69464 for { 69465 v := b.Control 69466 if v.Op != OpAMD64InvertFlags { 69467 break 69468 } 69469 cmp := v.Args[0] 69470 b.Kind = BlockAMD64UGE 69471 b.SetControl(cmp) 69472 b.Aux = nil 69473 return true 69474 } 69475 // match: (ULE (FlagEQ) yes no) 69476 // cond: 69477 // result: (First nil yes no) 69478 for { 69479 v := b.Control 69480 if v.Op != OpAMD64FlagEQ { 69481 break 69482 } 69483 b.Kind = BlockFirst 69484 b.SetControl(nil) 69485 b.Aux = nil 69486 return true 69487 } 69488 // match: (ULE (FlagLT_ULT) yes no) 69489 // cond: 69490 // result: (First nil yes no) 69491 for { 69492 v := b.Control 69493 if v.Op != OpAMD64FlagLT_ULT { 69494 break 69495 } 69496 b.Kind = BlockFirst 69497 b.SetControl(nil) 69498 b.Aux = nil 69499 return true 69500 } 69501 // match: (ULE (FlagLT_UGT) yes no) 69502 // cond: 69503 // result: (First nil no yes) 69504 for { 69505 v := b.Control 69506 if v.Op != OpAMD64FlagLT_UGT { 69507 break 69508 } 69509 b.Kind = BlockFirst 69510 b.SetControl(nil) 69511 b.Aux = nil 69512 b.swapSuccessors() 69513 return true 69514 } 69515 // match: (ULE (FlagGT_ULT) yes no) 69516 // cond: 69517 // result: (First nil yes no) 69518 for { 69519 v := b.Control 69520 if v.Op != OpAMD64FlagGT_ULT { 69521 break 69522 } 69523 b.Kind = BlockFirst 69524 b.SetControl(nil) 69525 b.Aux = nil 69526 return true 69527 } 69528 // match: (ULE (FlagGT_UGT) yes no) 69529 // cond: 69530 // result: (First nil no yes) 69531 for { 69532 v := b.Control 69533 if v.Op != OpAMD64FlagGT_UGT { 69534 break 69535 } 69536 b.Kind = BlockFirst 69537 b.SetControl(nil) 69538 b.Aux = nil 69539 b.swapSuccessors() 69540 return true 69541 } 69542 case BlockAMD64ULT: 69543 // match: (ULT (InvertFlags cmp) yes no) 69544 // cond: 69545 // result: (UGT cmp yes no) 69546 for { 69547 v := b.Control 69548 if v.Op != OpAMD64InvertFlags { 69549 break 69550 } 69551 cmp := v.Args[0] 69552 b.Kind = BlockAMD64UGT 69553 b.SetControl(cmp) 69554 b.Aux = nil 69555 return true 69556 } 69557 // match: (ULT (FlagEQ) yes no) 69558 // cond: 69559 // result: (First nil no yes) 69560 for { 69561 v := b.Control 69562 if v.Op != OpAMD64FlagEQ { 69563 break 69564 } 69565 b.Kind = BlockFirst 69566 b.SetControl(nil) 69567 b.Aux = nil 69568 b.swapSuccessors() 69569 return true 69570 } 69571 // match: (ULT (FlagLT_ULT) yes no) 69572 // cond: 69573 // result: (First nil yes no) 69574 for { 69575 v := b.Control 69576 if v.Op != OpAMD64FlagLT_ULT { 69577 break 69578 } 69579 b.Kind = BlockFirst 69580 b.SetControl(nil) 69581 b.Aux = nil 69582 return true 69583 } 69584 // match: (ULT (FlagLT_UGT) yes no) 69585 // cond: 69586 // result: (First nil no yes) 69587 for { 69588 v := b.Control 69589 if v.Op != OpAMD64FlagLT_UGT { 69590 break 69591 } 69592 b.Kind = BlockFirst 69593 b.SetControl(nil) 69594 b.Aux = nil 69595 b.swapSuccessors() 69596 return true 69597 } 69598 // match: (ULT (FlagGT_ULT) yes no) 69599 // cond: 69600 // result: (First nil yes no) 69601 for { 69602 v := b.Control 69603 if v.Op != OpAMD64FlagGT_ULT { 69604 break 69605 } 69606 b.Kind = BlockFirst 69607 b.SetControl(nil) 69608 b.Aux = nil 69609 return true 69610 } 69611 // match: (ULT (FlagGT_UGT) yes no) 69612 // cond: 69613 // result: (First nil no yes) 69614 for { 69615 v := b.Control 69616 if v.Op != OpAMD64FlagGT_UGT { 69617 break 69618 } 69619 b.Kind = BlockFirst 69620 b.SetControl(nil) 69621 b.Aux = nil 69622 b.swapSuccessors() 69623 return true 69624 } 69625 } 69626 return false 69627 }