github.com/miolini/go@v0.0.0-20160405192216-fca68c8cb408/src/cmd/compile/internal/ssa/rewriteAMD64.go (about) 1 // autogenerated from gen/AMD64.rules: do not edit! 2 // generated with: cd gen; go run *.go 3 4 package ssa 5 6 import "math" 7 8 var _ = math.MinInt8 // in case not otherwise used 9 func rewriteValueAMD64(v *Value, config *Config) bool { 10 switch v.Op { 11 case OpAMD64ADDB: 12 return rewriteValueAMD64_OpAMD64ADDB(v, config) 13 case OpAMD64ADDBconst: 14 return rewriteValueAMD64_OpAMD64ADDBconst(v, config) 15 case OpAMD64ADDL: 16 return rewriteValueAMD64_OpAMD64ADDL(v, config) 17 case OpAMD64ADDLconst: 18 return rewriteValueAMD64_OpAMD64ADDLconst(v, config) 19 case OpAMD64ADDQ: 20 return rewriteValueAMD64_OpAMD64ADDQ(v, config) 21 case OpAMD64ADDQconst: 22 return rewriteValueAMD64_OpAMD64ADDQconst(v, config) 23 case OpAMD64ADDW: 24 return rewriteValueAMD64_OpAMD64ADDW(v, config) 25 case OpAMD64ADDWconst: 26 return rewriteValueAMD64_OpAMD64ADDWconst(v, config) 27 case OpAMD64ANDB: 28 return rewriteValueAMD64_OpAMD64ANDB(v, config) 29 case OpAMD64ANDBconst: 30 return rewriteValueAMD64_OpAMD64ANDBconst(v, config) 31 case OpAMD64ANDL: 32 return rewriteValueAMD64_OpAMD64ANDL(v, config) 33 case OpAMD64ANDLconst: 34 return rewriteValueAMD64_OpAMD64ANDLconst(v, config) 35 case OpAMD64ANDQ: 36 return rewriteValueAMD64_OpAMD64ANDQ(v, config) 37 case OpAMD64ANDQconst: 38 return rewriteValueAMD64_OpAMD64ANDQconst(v, config) 39 case OpAMD64ANDW: 40 return rewriteValueAMD64_OpAMD64ANDW(v, config) 41 case OpAMD64ANDWconst: 42 return rewriteValueAMD64_OpAMD64ANDWconst(v, config) 43 case OpAdd16: 44 return rewriteValueAMD64_OpAdd16(v, config) 45 case OpAdd32: 46 return rewriteValueAMD64_OpAdd32(v, config) 47 case OpAdd32F: 48 return rewriteValueAMD64_OpAdd32F(v, config) 49 case OpAdd64: 50 return rewriteValueAMD64_OpAdd64(v, config) 51 case OpAdd64F: 52 return rewriteValueAMD64_OpAdd64F(v, config) 53 case OpAdd8: 54 return rewriteValueAMD64_OpAdd8(v, config) 55 case OpAddPtr: 56 return rewriteValueAMD64_OpAddPtr(v, config) 57 case OpAddr: 58 return rewriteValueAMD64_OpAddr(v, config) 59 case OpAnd16: 60 return rewriteValueAMD64_OpAnd16(v, config) 61 case OpAnd32: 62 return rewriteValueAMD64_OpAnd32(v, config) 63 case OpAnd64: 64 return rewriteValueAMD64_OpAnd64(v, config) 65 case OpAnd8: 66 return rewriteValueAMD64_OpAnd8(v, config) 67 case OpAvg64u: 68 return rewriteValueAMD64_OpAvg64u(v, config) 69 case OpBswap32: 70 return rewriteValueAMD64_OpBswap32(v, config) 71 case OpBswap64: 72 return rewriteValueAMD64_OpBswap64(v, config) 73 case OpAMD64CMOVLEQconst: 74 return rewriteValueAMD64_OpAMD64CMOVLEQconst(v, config) 75 case OpAMD64CMOVQEQconst: 76 return rewriteValueAMD64_OpAMD64CMOVQEQconst(v, config) 77 case OpAMD64CMOVWEQconst: 78 return rewriteValueAMD64_OpAMD64CMOVWEQconst(v, config) 79 case OpAMD64CMPB: 80 return rewriteValueAMD64_OpAMD64CMPB(v, config) 81 case OpAMD64CMPBconst: 82 return rewriteValueAMD64_OpAMD64CMPBconst(v, config) 83 case OpAMD64CMPL: 84 return rewriteValueAMD64_OpAMD64CMPL(v, config) 85 case OpAMD64CMPLconst: 86 return rewriteValueAMD64_OpAMD64CMPLconst(v, config) 87 case OpAMD64CMPQ: 88 return rewriteValueAMD64_OpAMD64CMPQ(v, config) 89 case OpAMD64CMPQconst: 90 return rewriteValueAMD64_OpAMD64CMPQconst(v, config) 91 case OpAMD64CMPW: 92 return rewriteValueAMD64_OpAMD64CMPW(v, config) 93 case OpAMD64CMPWconst: 94 return rewriteValueAMD64_OpAMD64CMPWconst(v, config) 95 case OpClosureCall: 96 return rewriteValueAMD64_OpClosureCall(v, config) 97 case OpCom16: 98 return rewriteValueAMD64_OpCom16(v, config) 99 case OpCom32: 100 return rewriteValueAMD64_OpCom32(v, config) 101 case OpCom64: 102 return rewriteValueAMD64_OpCom64(v, config) 103 case OpCom8: 104 return rewriteValueAMD64_OpCom8(v, config) 105 case OpConst16: 106 return rewriteValueAMD64_OpConst16(v, config) 107 case OpConst32: 108 return rewriteValueAMD64_OpConst32(v, config) 109 case OpConst32F: 110 return rewriteValueAMD64_OpConst32F(v, config) 111 case OpConst64: 112 return rewriteValueAMD64_OpConst64(v, config) 113 case OpConst64F: 114 return rewriteValueAMD64_OpConst64F(v, config) 115 case OpConst8: 116 return rewriteValueAMD64_OpConst8(v, config) 117 case OpConstBool: 118 return rewriteValueAMD64_OpConstBool(v, config) 119 case OpConstNil: 120 return rewriteValueAMD64_OpConstNil(v, config) 121 case OpConvert: 122 return rewriteValueAMD64_OpConvert(v, config) 123 case OpCtz16: 124 return rewriteValueAMD64_OpCtz16(v, config) 125 case OpCtz32: 126 return rewriteValueAMD64_OpCtz32(v, config) 127 case OpCtz64: 128 return rewriteValueAMD64_OpCtz64(v, config) 129 case OpCvt32Fto32: 130 return rewriteValueAMD64_OpCvt32Fto32(v, config) 131 case OpCvt32Fto64: 132 return rewriteValueAMD64_OpCvt32Fto64(v, config) 133 case OpCvt32Fto64F: 134 return rewriteValueAMD64_OpCvt32Fto64F(v, config) 135 case OpCvt32to32F: 136 return rewriteValueAMD64_OpCvt32to32F(v, config) 137 case OpCvt32to64F: 138 return rewriteValueAMD64_OpCvt32to64F(v, config) 139 case OpCvt64Fto32: 140 return rewriteValueAMD64_OpCvt64Fto32(v, config) 141 case OpCvt64Fto32F: 142 return rewriteValueAMD64_OpCvt64Fto32F(v, config) 143 case OpCvt64Fto64: 144 return rewriteValueAMD64_OpCvt64Fto64(v, config) 145 case OpCvt64to32F: 146 return rewriteValueAMD64_OpCvt64to32F(v, config) 147 case OpCvt64to64F: 148 return rewriteValueAMD64_OpCvt64to64F(v, config) 149 case OpDeferCall: 150 return rewriteValueAMD64_OpDeferCall(v, config) 151 case OpDiv16: 152 return rewriteValueAMD64_OpDiv16(v, config) 153 case OpDiv16u: 154 return rewriteValueAMD64_OpDiv16u(v, config) 155 case OpDiv32: 156 return rewriteValueAMD64_OpDiv32(v, config) 157 case OpDiv32F: 158 return rewriteValueAMD64_OpDiv32F(v, config) 159 case OpDiv32u: 160 return rewriteValueAMD64_OpDiv32u(v, config) 161 case OpDiv64: 162 return rewriteValueAMD64_OpDiv64(v, config) 163 case OpDiv64F: 164 return rewriteValueAMD64_OpDiv64F(v, config) 165 case OpDiv64u: 166 return rewriteValueAMD64_OpDiv64u(v, config) 167 case OpDiv8: 168 return rewriteValueAMD64_OpDiv8(v, config) 169 case OpDiv8u: 170 return rewriteValueAMD64_OpDiv8u(v, config) 171 case OpEq16: 172 return rewriteValueAMD64_OpEq16(v, config) 173 case OpEq32: 174 return rewriteValueAMD64_OpEq32(v, config) 175 case OpEq32F: 176 return rewriteValueAMD64_OpEq32F(v, config) 177 case OpEq64: 178 return rewriteValueAMD64_OpEq64(v, config) 179 case OpEq64F: 180 return rewriteValueAMD64_OpEq64F(v, config) 181 case OpEq8: 182 return rewriteValueAMD64_OpEq8(v, config) 183 case OpEqPtr: 184 return rewriteValueAMD64_OpEqPtr(v, config) 185 case OpGeq16: 186 return rewriteValueAMD64_OpGeq16(v, config) 187 case OpGeq16U: 188 return rewriteValueAMD64_OpGeq16U(v, config) 189 case OpGeq32: 190 return rewriteValueAMD64_OpGeq32(v, config) 191 case OpGeq32F: 192 return rewriteValueAMD64_OpGeq32F(v, config) 193 case OpGeq32U: 194 return rewriteValueAMD64_OpGeq32U(v, config) 195 case OpGeq64: 196 return rewriteValueAMD64_OpGeq64(v, config) 197 case OpGeq64F: 198 return rewriteValueAMD64_OpGeq64F(v, config) 199 case OpGeq64U: 200 return rewriteValueAMD64_OpGeq64U(v, config) 201 case OpGeq8: 202 return rewriteValueAMD64_OpGeq8(v, config) 203 case OpGeq8U: 204 return rewriteValueAMD64_OpGeq8U(v, config) 205 case OpGetClosurePtr: 206 return rewriteValueAMD64_OpGetClosurePtr(v, config) 207 case OpGetG: 208 return rewriteValueAMD64_OpGetG(v, config) 209 case OpGoCall: 210 return rewriteValueAMD64_OpGoCall(v, config) 211 case OpGreater16: 212 return rewriteValueAMD64_OpGreater16(v, config) 213 case OpGreater16U: 214 return rewriteValueAMD64_OpGreater16U(v, config) 215 case OpGreater32: 216 return rewriteValueAMD64_OpGreater32(v, config) 217 case OpGreater32F: 218 return rewriteValueAMD64_OpGreater32F(v, config) 219 case OpGreater32U: 220 return rewriteValueAMD64_OpGreater32U(v, config) 221 case OpGreater64: 222 return rewriteValueAMD64_OpGreater64(v, config) 223 case OpGreater64F: 224 return rewriteValueAMD64_OpGreater64F(v, config) 225 case OpGreater64U: 226 return rewriteValueAMD64_OpGreater64U(v, config) 227 case OpGreater8: 228 return rewriteValueAMD64_OpGreater8(v, config) 229 case OpGreater8U: 230 return rewriteValueAMD64_OpGreater8U(v, config) 231 case OpHmul16: 232 return rewriteValueAMD64_OpHmul16(v, config) 233 case OpHmul16u: 234 return rewriteValueAMD64_OpHmul16u(v, config) 235 case OpHmul32: 236 return rewriteValueAMD64_OpHmul32(v, config) 237 case OpHmul32u: 238 return rewriteValueAMD64_OpHmul32u(v, config) 239 case OpHmul64: 240 return rewriteValueAMD64_OpHmul64(v, config) 241 case OpHmul64u: 242 return rewriteValueAMD64_OpHmul64u(v, config) 243 case OpHmul8: 244 return rewriteValueAMD64_OpHmul8(v, config) 245 case OpHmul8u: 246 return rewriteValueAMD64_OpHmul8u(v, config) 247 case OpITab: 248 return rewriteValueAMD64_OpITab(v, config) 249 case OpInterCall: 250 return rewriteValueAMD64_OpInterCall(v, config) 251 case OpIsInBounds: 252 return rewriteValueAMD64_OpIsInBounds(v, config) 253 case OpIsNonNil: 254 return rewriteValueAMD64_OpIsNonNil(v, config) 255 case OpIsSliceInBounds: 256 return rewriteValueAMD64_OpIsSliceInBounds(v, config) 257 case OpAMD64LEAQ: 258 return rewriteValueAMD64_OpAMD64LEAQ(v, config) 259 case OpAMD64LEAQ1: 260 return rewriteValueAMD64_OpAMD64LEAQ1(v, config) 261 case OpAMD64LEAQ2: 262 return rewriteValueAMD64_OpAMD64LEAQ2(v, config) 263 case OpAMD64LEAQ4: 264 return rewriteValueAMD64_OpAMD64LEAQ4(v, config) 265 case OpAMD64LEAQ8: 266 return rewriteValueAMD64_OpAMD64LEAQ8(v, config) 267 case OpLeq16: 268 return rewriteValueAMD64_OpLeq16(v, config) 269 case OpLeq16U: 270 return rewriteValueAMD64_OpLeq16U(v, config) 271 case OpLeq32: 272 return rewriteValueAMD64_OpLeq32(v, config) 273 case OpLeq32F: 274 return rewriteValueAMD64_OpLeq32F(v, config) 275 case OpLeq32U: 276 return rewriteValueAMD64_OpLeq32U(v, config) 277 case OpLeq64: 278 return rewriteValueAMD64_OpLeq64(v, config) 279 case OpLeq64F: 280 return rewriteValueAMD64_OpLeq64F(v, config) 281 case OpLeq64U: 282 return rewriteValueAMD64_OpLeq64U(v, config) 283 case OpLeq8: 284 return rewriteValueAMD64_OpLeq8(v, config) 285 case OpLeq8U: 286 return rewriteValueAMD64_OpLeq8U(v, config) 287 case OpLess16: 288 return rewriteValueAMD64_OpLess16(v, config) 289 case OpLess16U: 290 return rewriteValueAMD64_OpLess16U(v, config) 291 case OpLess32: 292 return rewriteValueAMD64_OpLess32(v, config) 293 case OpLess32F: 294 return rewriteValueAMD64_OpLess32F(v, config) 295 case OpLess32U: 296 return rewriteValueAMD64_OpLess32U(v, config) 297 case OpLess64: 298 return rewriteValueAMD64_OpLess64(v, config) 299 case OpLess64F: 300 return rewriteValueAMD64_OpLess64F(v, config) 301 case OpLess64U: 302 return rewriteValueAMD64_OpLess64U(v, config) 303 case OpLess8: 304 return rewriteValueAMD64_OpLess8(v, config) 305 case OpLess8U: 306 return rewriteValueAMD64_OpLess8U(v, config) 307 case OpLoad: 308 return rewriteValueAMD64_OpLoad(v, config) 309 case OpLrot16: 310 return rewriteValueAMD64_OpLrot16(v, config) 311 case OpLrot32: 312 return rewriteValueAMD64_OpLrot32(v, config) 313 case OpLrot64: 314 return rewriteValueAMD64_OpLrot64(v, config) 315 case OpLrot8: 316 return rewriteValueAMD64_OpLrot8(v, config) 317 case OpLsh16x16: 318 return rewriteValueAMD64_OpLsh16x16(v, config) 319 case OpLsh16x32: 320 return rewriteValueAMD64_OpLsh16x32(v, config) 321 case OpLsh16x64: 322 return rewriteValueAMD64_OpLsh16x64(v, config) 323 case OpLsh16x8: 324 return rewriteValueAMD64_OpLsh16x8(v, config) 325 case OpLsh32x16: 326 return rewriteValueAMD64_OpLsh32x16(v, config) 327 case OpLsh32x32: 328 return rewriteValueAMD64_OpLsh32x32(v, config) 329 case OpLsh32x64: 330 return rewriteValueAMD64_OpLsh32x64(v, config) 331 case OpLsh32x8: 332 return rewriteValueAMD64_OpLsh32x8(v, config) 333 case OpLsh64x16: 334 return rewriteValueAMD64_OpLsh64x16(v, config) 335 case OpLsh64x32: 336 return rewriteValueAMD64_OpLsh64x32(v, config) 337 case OpLsh64x64: 338 return rewriteValueAMD64_OpLsh64x64(v, config) 339 case OpLsh64x8: 340 return rewriteValueAMD64_OpLsh64x8(v, config) 341 case OpLsh8x16: 342 return rewriteValueAMD64_OpLsh8x16(v, config) 343 case OpLsh8x32: 344 return rewriteValueAMD64_OpLsh8x32(v, config) 345 case OpLsh8x64: 346 return rewriteValueAMD64_OpLsh8x64(v, config) 347 case OpLsh8x8: 348 return rewriteValueAMD64_OpLsh8x8(v, config) 349 case OpAMD64MOVBQSX: 350 return rewriteValueAMD64_OpAMD64MOVBQSX(v, config) 351 case OpAMD64MOVBQSXload: 352 return rewriteValueAMD64_OpAMD64MOVBQSXload(v, config) 353 case OpAMD64MOVBQZX: 354 return rewriteValueAMD64_OpAMD64MOVBQZX(v, config) 355 case OpAMD64MOVBload: 356 return rewriteValueAMD64_OpAMD64MOVBload(v, config) 357 case OpAMD64MOVBloadidx1: 358 return rewriteValueAMD64_OpAMD64MOVBloadidx1(v, config) 359 case OpAMD64MOVBstore: 360 return rewriteValueAMD64_OpAMD64MOVBstore(v, config) 361 case OpAMD64MOVBstoreconst: 362 return rewriteValueAMD64_OpAMD64MOVBstoreconst(v, config) 363 case OpAMD64MOVBstoreconstidx1: 364 return rewriteValueAMD64_OpAMD64MOVBstoreconstidx1(v, config) 365 case OpAMD64MOVBstoreidx1: 366 return rewriteValueAMD64_OpAMD64MOVBstoreidx1(v, config) 367 case OpAMD64MOVLQSX: 368 return rewriteValueAMD64_OpAMD64MOVLQSX(v, config) 369 case OpAMD64MOVLQSXload: 370 return rewriteValueAMD64_OpAMD64MOVLQSXload(v, config) 371 case OpAMD64MOVLQZX: 372 return rewriteValueAMD64_OpAMD64MOVLQZX(v, config) 373 case OpAMD64MOVLload: 374 return rewriteValueAMD64_OpAMD64MOVLload(v, config) 375 case OpAMD64MOVLloadidx1: 376 return rewriteValueAMD64_OpAMD64MOVLloadidx1(v, config) 377 case OpAMD64MOVLloadidx4: 378 return rewriteValueAMD64_OpAMD64MOVLloadidx4(v, config) 379 case OpAMD64MOVLstore: 380 return rewriteValueAMD64_OpAMD64MOVLstore(v, config) 381 case OpAMD64MOVLstoreconst: 382 return rewriteValueAMD64_OpAMD64MOVLstoreconst(v, config) 383 case OpAMD64MOVLstoreconstidx1: 384 return rewriteValueAMD64_OpAMD64MOVLstoreconstidx1(v, config) 385 case OpAMD64MOVLstoreconstidx4: 386 return rewriteValueAMD64_OpAMD64MOVLstoreconstidx4(v, config) 387 case OpAMD64MOVLstoreidx1: 388 return rewriteValueAMD64_OpAMD64MOVLstoreidx1(v, config) 389 case OpAMD64MOVLstoreidx4: 390 return rewriteValueAMD64_OpAMD64MOVLstoreidx4(v, config) 391 case OpAMD64MOVOload: 392 return rewriteValueAMD64_OpAMD64MOVOload(v, config) 393 case OpAMD64MOVOstore: 394 return rewriteValueAMD64_OpAMD64MOVOstore(v, config) 395 case OpAMD64MOVQload: 396 return rewriteValueAMD64_OpAMD64MOVQload(v, config) 397 case OpAMD64MOVQloadidx1: 398 return rewriteValueAMD64_OpAMD64MOVQloadidx1(v, config) 399 case OpAMD64MOVQloadidx8: 400 return rewriteValueAMD64_OpAMD64MOVQloadidx8(v, config) 401 case OpAMD64MOVQstore: 402 return rewriteValueAMD64_OpAMD64MOVQstore(v, config) 403 case OpAMD64MOVQstoreconst: 404 return rewriteValueAMD64_OpAMD64MOVQstoreconst(v, config) 405 case OpAMD64MOVQstoreconstidx1: 406 return rewriteValueAMD64_OpAMD64MOVQstoreconstidx1(v, config) 407 case OpAMD64MOVQstoreconstidx8: 408 return rewriteValueAMD64_OpAMD64MOVQstoreconstidx8(v, config) 409 case OpAMD64MOVQstoreidx1: 410 return rewriteValueAMD64_OpAMD64MOVQstoreidx1(v, config) 411 case OpAMD64MOVQstoreidx8: 412 return rewriteValueAMD64_OpAMD64MOVQstoreidx8(v, config) 413 case OpAMD64MOVSDload: 414 return rewriteValueAMD64_OpAMD64MOVSDload(v, config) 415 case OpAMD64MOVSDloadidx1: 416 return rewriteValueAMD64_OpAMD64MOVSDloadidx1(v, config) 417 case OpAMD64MOVSDloadidx8: 418 return rewriteValueAMD64_OpAMD64MOVSDloadidx8(v, config) 419 case OpAMD64MOVSDstore: 420 return rewriteValueAMD64_OpAMD64MOVSDstore(v, config) 421 case OpAMD64MOVSDstoreidx1: 422 return rewriteValueAMD64_OpAMD64MOVSDstoreidx1(v, config) 423 case OpAMD64MOVSDstoreidx8: 424 return rewriteValueAMD64_OpAMD64MOVSDstoreidx8(v, config) 425 case OpAMD64MOVSSload: 426 return rewriteValueAMD64_OpAMD64MOVSSload(v, config) 427 case OpAMD64MOVSSloadidx1: 428 return rewriteValueAMD64_OpAMD64MOVSSloadidx1(v, config) 429 case OpAMD64MOVSSloadidx4: 430 return rewriteValueAMD64_OpAMD64MOVSSloadidx4(v, config) 431 case OpAMD64MOVSSstore: 432 return rewriteValueAMD64_OpAMD64MOVSSstore(v, config) 433 case OpAMD64MOVSSstoreidx1: 434 return rewriteValueAMD64_OpAMD64MOVSSstoreidx1(v, config) 435 case OpAMD64MOVSSstoreidx4: 436 return rewriteValueAMD64_OpAMD64MOVSSstoreidx4(v, config) 437 case OpAMD64MOVWQSX: 438 return rewriteValueAMD64_OpAMD64MOVWQSX(v, config) 439 case OpAMD64MOVWQSXload: 440 return rewriteValueAMD64_OpAMD64MOVWQSXload(v, config) 441 case OpAMD64MOVWQZX: 442 return rewriteValueAMD64_OpAMD64MOVWQZX(v, config) 443 case OpAMD64MOVWload: 444 return rewriteValueAMD64_OpAMD64MOVWload(v, config) 445 case OpAMD64MOVWloadidx1: 446 return rewriteValueAMD64_OpAMD64MOVWloadidx1(v, config) 447 case OpAMD64MOVWloadidx2: 448 return rewriteValueAMD64_OpAMD64MOVWloadidx2(v, config) 449 case OpAMD64MOVWstore: 450 return rewriteValueAMD64_OpAMD64MOVWstore(v, config) 451 case OpAMD64MOVWstoreconst: 452 return rewriteValueAMD64_OpAMD64MOVWstoreconst(v, config) 453 case OpAMD64MOVWstoreconstidx1: 454 return rewriteValueAMD64_OpAMD64MOVWstoreconstidx1(v, config) 455 case OpAMD64MOVWstoreconstidx2: 456 return rewriteValueAMD64_OpAMD64MOVWstoreconstidx2(v, config) 457 case OpAMD64MOVWstoreidx1: 458 return rewriteValueAMD64_OpAMD64MOVWstoreidx1(v, config) 459 case OpAMD64MOVWstoreidx2: 460 return rewriteValueAMD64_OpAMD64MOVWstoreidx2(v, config) 461 case OpAMD64MULB: 462 return rewriteValueAMD64_OpAMD64MULB(v, config) 463 case OpAMD64MULBconst: 464 return rewriteValueAMD64_OpAMD64MULBconst(v, config) 465 case OpAMD64MULL: 466 return rewriteValueAMD64_OpAMD64MULL(v, config) 467 case OpAMD64MULLconst: 468 return rewriteValueAMD64_OpAMD64MULLconst(v, config) 469 case OpAMD64MULQ: 470 return rewriteValueAMD64_OpAMD64MULQ(v, config) 471 case OpAMD64MULQconst: 472 return rewriteValueAMD64_OpAMD64MULQconst(v, config) 473 case OpAMD64MULW: 474 return rewriteValueAMD64_OpAMD64MULW(v, config) 475 case OpAMD64MULWconst: 476 return rewriteValueAMD64_OpAMD64MULWconst(v, config) 477 case OpMod16: 478 return rewriteValueAMD64_OpMod16(v, config) 479 case OpMod16u: 480 return rewriteValueAMD64_OpMod16u(v, config) 481 case OpMod32: 482 return rewriteValueAMD64_OpMod32(v, config) 483 case OpMod32u: 484 return rewriteValueAMD64_OpMod32u(v, config) 485 case OpMod64: 486 return rewriteValueAMD64_OpMod64(v, config) 487 case OpMod64u: 488 return rewriteValueAMD64_OpMod64u(v, config) 489 case OpMod8: 490 return rewriteValueAMD64_OpMod8(v, config) 491 case OpMod8u: 492 return rewriteValueAMD64_OpMod8u(v, config) 493 case OpMove: 494 return rewriteValueAMD64_OpMove(v, config) 495 case OpMul16: 496 return rewriteValueAMD64_OpMul16(v, config) 497 case OpMul32: 498 return rewriteValueAMD64_OpMul32(v, config) 499 case OpMul32F: 500 return rewriteValueAMD64_OpMul32F(v, config) 501 case OpMul64: 502 return rewriteValueAMD64_OpMul64(v, config) 503 case OpMul64F: 504 return rewriteValueAMD64_OpMul64F(v, config) 505 case OpMul8: 506 return rewriteValueAMD64_OpMul8(v, config) 507 case OpAMD64NEGB: 508 return rewriteValueAMD64_OpAMD64NEGB(v, config) 509 case OpAMD64NEGL: 510 return rewriteValueAMD64_OpAMD64NEGL(v, config) 511 case OpAMD64NEGQ: 512 return rewriteValueAMD64_OpAMD64NEGQ(v, config) 513 case OpAMD64NEGW: 514 return rewriteValueAMD64_OpAMD64NEGW(v, config) 515 case OpAMD64NOTB: 516 return rewriteValueAMD64_OpAMD64NOTB(v, config) 517 case OpAMD64NOTL: 518 return rewriteValueAMD64_OpAMD64NOTL(v, config) 519 case OpAMD64NOTQ: 520 return rewriteValueAMD64_OpAMD64NOTQ(v, config) 521 case OpAMD64NOTW: 522 return rewriteValueAMD64_OpAMD64NOTW(v, config) 523 case OpNeg16: 524 return rewriteValueAMD64_OpNeg16(v, config) 525 case OpNeg32: 526 return rewriteValueAMD64_OpNeg32(v, config) 527 case OpNeg32F: 528 return rewriteValueAMD64_OpNeg32F(v, config) 529 case OpNeg64: 530 return rewriteValueAMD64_OpNeg64(v, config) 531 case OpNeg64F: 532 return rewriteValueAMD64_OpNeg64F(v, config) 533 case OpNeg8: 534 return rewriteValueAMD64_OpNeg8(v, config) 535 case OpNeq16: 536 return rewriteValueAMD64_OpNeq16(v, config) 537 case OpNeq32: 538 return rewriteValueAMD64_OpNeq32(v, config) 539 case OpNeq32F: 540 return rewriteValueAMD64_OpNeq32F(v, config) 541 case OpNeq64: 542 return rewriteValueAMD64_OpNeq64(v, config) 543 case OpNeq64F: 544 return rewriteValueAMD64_OpNeq64F(v, config) 545 case OpNeq8: 546 return rewriteValueAMD64_OpNeq8(v, config) 547 case OpNeqPtr: 548 return rewriteValueAMD64_OpNeqPtr(v, config) 549 case OpNilCheck: 550 return rewriteValueAMD64_OpNilCheck(v, config) 551 case OpNot: 552 return rewriteValueAMD64_OpNot(v, config) 553 case OpAMD64ORB: 554 return rewriteValueAMD64_OpAMD64ORB(v, config) 555 case OpAMD64ORBconst: 556 return rewriteValueAMD64_OpAMD64ORBconst(v, config) 557 case OpAMD64ORL: 558 return rewriteValueAMD64_OpAMD64ORL(v, config) 559 case OpAMD64ORLconst: 560 return rewriteValueAMD64_OpAMD64ORLconst(v, config) 561 case OpAMD64ORQ: 562 return rewriteValueAMD64_OpAMD64ORQ(v, config) 563 case OpAMD64ORQconst: 564 return rewriteValueAMD64_OpAMD64ORQconst(v, config) 565 case OpAMD64ORW: 566 return rewriteValueAMD64_OpAMD64ORW(v, config) 567 case OpAMD64ORWconst: 568 return rewriteValueAMD64_OpAMD64ORWconst(v, config) 569 case OpOffPtr: 570 return rewriteValueAMD64_OpOffPtr(v, config) 571 case OpOr16: 572 return rewriteValueAMD64_OpOr16(v, config) 573 case OpOr32: 574 return rewriteValueAMD64_OpOr32(v, config) 575 case OpOr64: 576 return rewriteValueAMD64_OpOr64(v, config) 577 case OpOr8: 578 return rewriteValueAMD64_OpOr8(v, config) 579 case OpRsh16Ux16: 580 return rewriteValueAMD64_OpRsh16Ux16(v, config) 581 case OpRsh16Ux32: 582 return rewriteValueAMD64_OpRsh16Ux32(v, config) 583 case OpRsh16Ux64: 584 return rewriteValueAMD64_OpRsh16Ux64(v, config) 585 case OpRsh16Ux8: 586 return rewriteValueAMD64_OpRsh16Ux8(v, config) 587 case OpRsh16x16: 588 return rewriteValueAMD64_OpRsh16x16(v, config) 589 case OpRsh16x32: 590 return rewriteValueAMD64_OpRsh16x32(v, config) 591 case OpRsh16x64: 592 return rewriteValueAMD64_OpRsh16x64(v, config) 593 case OpRsh16x8: 594 return rewriteValueAMD64_OpRsh16x8(v, config) 595 case OpRsh32Ux16: 596 return rewriteValueAMD64_OpRsh32Ux16(v, config) 597 case OpRsh32Ux32: 598 return rewriteValueAMD64_OpRsh32Ux32(v, config) 599 case OpRsh32Ux64: 600 return rewriteValueAMD64_OpRsh32Ux64(v, config) 601 case OpRsh32Ux8: 602 return rewriteValueAMD64_OpRsh32Ux8(v, config) 603 case OpRsh32x16: 604 return rewriteValueAMD64_OpRsh32x16(v, config) 605 case OpRsh32x32: 606 return rewriteValueAMD64_OpRsh32x32(v, config) 607 case OpRsh32x64: 608 return rewriteValueAMD64_OpRsh32x64(v, config) 609 case OpRsh32x8: 610 return rewriteValueAMD64_OpRsh32x8(v, config) 611 case OpRsh64Ux16: 612 return rewriteValueAMD64_OpRsh64Ux16(v, config) 613 case OpRsh64Ux32: 614 return rewriteValueAMD64_OpRsh64Ux32(v, config) 615 case OpRsh64Ux64: 616 return rewriteValueAMD64_OpRsh64Ux64(v, config) 617 case OpRsh64Ux8: 618 return rewriteValueAMD64_OpRsh64Ux8(v, config) 619 case OpRsh64x16: 620 return rewriteValueAMD64_OpRsh64x16(v, config) 621 case OpRsh64x32: 622 return rewriteValueAMD64_OpRsh64x32(v, config) 623 case OpRsh64x64: 624 return rewriteValueAMD64_OpRsh64x64(v, config) 625 case OpRsh64x8: 626 return rewriteValueAMD64_OpRsh64x8(v, config) 627 case OpRsh8Ux16: 628 return rewriteValueAMD64_OpRsh8Ux16(v, config) 629 case OpRsh8Ux32: 630 return rewriteValueAMD64_OpRsh8Ux32(v, config) 631 case OpRsh8Ux64: 632 return rewriteValueAMD64_OpRsh8Ux64(v, config) 633 case OpRsh8Ux8: 634 return rewriteValueAMD64_OpRsh8Ux8(v, config) 635 case OpRsh8x16: 636 return rewriteValueAMD64_OpRsh8x16(v, config) 637 case OpRsh8x32: 638 return rewriteValueAMD64_OpRsh8x32(v, config) 639 case OpRsh8x64: 640 return rewriteValueAMD64_OpRsh8x64(v, config) 641 case OpRsh8x8: 642 return rewriteValueAMD64_OpRsh8x8(v, config) 643 case OpAMD64SARB: 644 return rewriteValueAMD64_OpAMD64SARB(v, config) 645 case OpAMD64SARBconst: 646 return rewriteValueAMD64_OpAMD64SARBconst(v, config) 647 case OpAMD64SARL: 648 return rewriteValueAMD64_OpAMD64SARL(v, config) 649 case OpAMD64SARLconst: 650 return rewriteValueAMD64_OpAMD64SARLconst(v, config) 651 case OpAMD64SARQ: 652 return rewriteValueAMD64_OpAMD64SARQ(v, config) 653 case OpAMD64SARQconst: 654 return rewriteValueAMD64_OpAMD64SARQconst(v, config) 655 case OpAMD64SARW: 656 return rewriteValueAMD64_OpAMD64SARW(v, config) 657 case OpAMD64SARWconst: 658 return rewriteValueAMD64_OpAMD64SARWconst(v, config) 659 case OpAMD64SBBLcarrymask: 660 return rewriteValueAMD64_OpAMD64SBBLcarrymask(v, config) 661 case OpAMD64SBBQcarrymask: 662 return rewriteValueAMD64_OpAMD64SBBQcarrymask(v, config) 663 case OpAMD64SETA: 664 return rewriteValueAMD64_OpAMD64SETA(v, config) 665 case OpAMD64SETAE: 666 return rewriteValueAMD64_OpAMD64SETAE(v, config) 667 case OpAMD64SETB: 668 return rewriteValueAMD64_OpAMD64SETB(v, config) 669 case OpAMD64SETBE: 670 return rewriteValueAMD64_OpAMD64SETBE(v, config) 671 case OpAMD64SETEQ: 672 return rewriteValueAMD64_OpAMD64SETEQ(v, config) 673 case OpAMD64SETG: 674 return rewriteValueAMD64_OpAMD64SETG(v, config) 675 case OpAMD64SETGE: 676 return rewriteValueAMD64_OpAMD64SETGE(v, config) 677 case OpAMD64SETL: 678 return rewriteValueAMD64_OpAMD64SETL(v, config) 679 case OpAMD64SETLE: 680 return rewriteValueAMD64_OpAMD64SETLE(v, config) 681 case OpAMD64SETNE: 682 return rewriteValueAMD64_OpAMD64SETNE(v, config) 683 case OpAMD64SHLB: 684 return rewriteValueAMD64_OpAMD64SHLB(v, config) 685 case OpAMD64SHLL: 686 return rewriteValueAMD64_OpAMD64SHLL(v, config) 687 case OpAMD64SHLQ: 688 return rewriteValueAMD64_OpAMD64SHLQ(v, config) 689 case OpAMD64SHLW: 690 return rewriteValueAMD64_OpAMD64SHLW(v, config) 691 case OpAMD64SHRB: 692 return rewriteValueAMD64_OpAMD64SHRB(v, config) 693 case OpAMD64SHRL: 694 return rewriteValueAMD64_OpAMD64SHRL(v, config) 695 case OpAMD64SHRQ: 696 return rewriteValueAMD64_OpAMD64SHRQ(v, config) 697 case OpAMD64SHRW: 698 return rewriteValueAMD64_OpAMD64SHRW(v, config) 699 case OpAMD64SUBB: 700 return rewriteValueAMD64_OpAMD64SUBB(v, config) 701 case OpAMD64SUBBconst: 702 return rewriteValueAMD64_OpAMD64SUBBconst(v, config) 703 case OpAMD64SUBL: 704 return rewriteValueAMD64_OpAMD64SUBL(v, config) 705 case OpAMD64SUBLconst: 706 return rewriteValueAMD64_OpAMD64SUBLconst(v, config) 707 case OpAMD64SUBQ: 708 return rewriteValueAMD64_OpAMD64SUBQ(v, config) 709 case OpAMD64SUBQconst: 710 return rewriteValueAMD64_OpAMD64SUBQconst(v, config) 711 case OpAMD64SUBW: 712 return rewriteValueAMD64_OpAMD64SUBW(v, config) 713 case OpAMD64SUBWconst: 714 return rewriteValueAMD64_OpAMD64SUBWconst(v, config) 715 case OpSignExt16to32: 716 return rewriteValueAMD64_OpSignExt16to32(v, config) 717 case OpSignExt16to64: 718 return rewriteValueAMD64_OpSignExt16to64(v, config) 719 case OpSignExt32to64: 720 return rewriteValueAMD64_OpSignExt32to64(v, config) 721 case OpSignExt8to16: 722 return rewriteValueAMD64_OpSignExt8to16(v, config) 723 case OpSignExt8to32: 724 return rewriteValueAMD64_OpSignExt8to32(v, config) 725 case OpSignExt8to64: 726 return rewriteValueAMD64_OpSignExt8to64(v, config) 727 case OpSqrt: 728 return rewriteValueAMD64_OpSqrt(v, config) 729 case OpStaticCall: 730 return rewriteValueAMD64_OpStaticCall(v, config) 731 case OpStore: 732 return rewriteValueAMD64_OpStore(v, config) 733 case OpSub16: 734 return rewriteValueAMD64_OpSub16(v, config) 735 case OpSub32: 736 return rewriteValueAMD64_OpSub32(v, config) 737 case OpSub32F: 738 return rewriteValueAMD64_OpSub32F(v, config) 739 case OpSub64: 740 return rewriteValueAMD64_OpSub64(v, config) 741 case OpSub64F: 742 return rewriteValueAMD64_OpSub64F(v, config) 743 case OpSub8: 744 return rewriteValueAMD64_OpSub8(v, config) 745 case OpSubPtr: 746 return rewriteValueAMD64_OpSubPtr(v, config) 747 case OpTrunc16to8: 748 return rewriteValueAMD64_OpTrunc16to8(v, config) 749 case OpTrunc32to16: 750 return rewriteValueAMD64_OpTrunc32to16(v, config) 751 case OpTrunc32to8: 752 return rewriteValueAMD64_OpTrunc32to8(v, config) 753 case OpTrunc64to16: 754 return rewriteValueAMD64_OpTrunc64to16(v, config) 755 case OpTrunc64to32: 756 return rewriteValueAMD64_OpTrunc64to32(v, config) 757 case OpTrunc64to8: 758 return rewriteValueAMD64_OpTrunc64to8(v, config) 759 case OpAMD64XORB: 760 return rewriteValueAMD64_OpAMD64XORB(v, config) 761 case OpAMD64XORBconst: 762 return rewriteValueAMD64_OpAMD64XORBconst(v, config) 763 case OpAMD64XORL: 764 return rewriteValueAMD64_OpAMD64XORL(v, config) 765 case OpAMD64XORLconst: 766 return rewriteValueAMD64_OpAMD64XORLconst(v, config) 767 case OpAMD64XORQ: 768 return rewriteValueAMD64_OpAMD64XORQ(v, config) 769 case OpAMD64XORQconst: 770 return rewriteValueAMD64_OpAMD64XORQconst(v, config) 771 case OpAMD64XORW: 772 return rewriteValueAMD64_OpAMD64XORW(v, config) 773 case OpAMD64XORWconst: 774 return rewriteValueAMD64_OpAMD64XORWconst(v, config) 775 case OpXor16: 776 return rewriteValueAMD64_OpXor16(v, config) 777 case OpXor32: 778 return rewriteValueAMD64_OpXor32(v, config) 779 case OpXor64: 780 return rewriteValueAMD64_OpXor64(v, config) 781 case OpXor8: 782 return rewriteValueAMD64_OpXor8(v, config) 783 case OpZero: 784 return rewriteValueAMD64_OpZero(v, config) 785 case OpZeroExt16to32: 786 return rewriteValueAMD64_OpZeroExt16to32(v, config) 787 case OpZeroExt16to64: 788 return rewriteValueAMD64_OpZeroExt16to64(v, config) 789 case OpZeroExt32to64: 790 return rewriteValueAMD64_OpZeroExt32to64(v, config) 791 case OpZeroExt8to16: 792 return rewriteValueAMD64_OpZeroExt8to16(v, config) 793 case OpZeroExt8to32: 794 return rewriteValueAMD64_OpZeroExt8to32(v, config) 795 case OpZeroExt8to64: 796 return rewriteValueAMD64_OpZeroExt8to64(v, config) 797 } 798 return false 799 } 800 func rewriteValueAMD64_OpAMD64ADDB(v *Value, config *Config) bool { 801 b := v.Block 802 _ = b 803 // match: (ADDB x (MOVBconst [c])) 804 // cond: 805 // result: (ADDBconst [c] x) 806 for { 807 x := v.Args[0] 808 v_1 := v.Args[1] 809 if v_1.Op != OpAMD64MOVBconst { 810 break 811 } 812 c := v_1.AuxInt 813 v.reset(OpAMD64ADDBconst) 814 v.AuxInt = c 815 v.AddArg(x) 816 return true 817 } 818 // match: (ADDB (MOVBconst [c]) x) 819 // cond: 820 // result: (ADDBconst [c] x) 821 for { 822 v_0 := v.Args[0] 823 if v_0.Op != OpAMD64MOVBconst { 824 break 825 } 826 c := v_0.AuxInt 827 x := v.Args[1] 828 v.reset(OpAMD64ADDBconst) 829 v.AuxInt = c 830 v.AddArg(x) 831 return true 832 } 833 // match: (ADDB x (NEGB y)) 834 // cond: 835 // result: (SUBB x y) 836 for { 837 x := v.Args[0] 838 v_1 := v.Args[1] 839 if v_1.Op != OpAMD64NEGB { 840 break 841 } 842 y := v_1.Args[0] 843 v.reset(OpAMD64SUBB) 844 v.AddArg(x) 845 v.AddArg(y) 846 return true 847 } 848 return false 849 } 850 func rewriteValueAMD64_OpAMD64ADDBconst(v *Value, config *Config) bool { 851 b := v.Block 852 _ = b 853 // match: (ADDBconst [c] x) 854 // cond: int8(c)==0 855 // result: x 856 for { 857 c := v.AuxInt 858 x := v.Args[0] 859 if !(int8(c) == 0) { 860 break 861 } 862 v.reset(OpCopy) 863 v.Type = x.Type 864 v.AddArg(x) 865 return true 866 } 867 // match: (ADDBconst [c] (MOVBconst [d])) 868 // cond: 869 // result: (MOVBconst [int64(int8(c+d))]) 870 for { 871 c := v.AuxInt 872 v_0 := v.Args[0] 873 if v_0.Op != OpAMD64MOVBconst { 874 break 875 } 876 d := v_0.AuxInt 877 v.reset(OpAMD64MOVBconst) 878 v.AuxInt = int64(int8(c + d)) 879 return true 880 } 881 // match: (ADDBconst [c] (ADDBconst [d] x)) 882 // cond: 883 // result: (ADDBconst [int64(int8(c+d))] x) 884 for { 885 c := v.AuxInt 886 v_0 := v.Args[0] 887 if v_0.Op != OpAMD64ADDBconst { 888 break 889 } 890 d := v_0.AuxInt 891 x := v_0.Args[0] 892 v.reset(OpAMD64ADDBconst) 893 v.AuxInt = int64(int8(c + d)) 894 v.AddArg(x) 895 return true 896 } 897 return false 898 } 899 func rewriteValueAMD64_OpAMD64ADDL(v *Value, config *Config) bool { 900 b := v.Block 901 _ = b 902 // match: (ADDL x (MOVLconst [c])) 903 // cond: 904 // result: (ADDLconst [c] x) 905 for { 906 x := v.Args[0] 907 v_1 := v.Args[1] 908 if v_1.Op != OpAMD64MOVLconst { 909 break 910 } 911 c := v_1.AuxInt 912 v.reset(OpAMD64ADDLconst) 913 v.AuxInt = c 914 v.AddArg(x) 915 return true 916 } 917 // match: (ADDL (MOVLconst [c]) x) 918 // cond: 919 // result: (ADDLconst [c] x) 920 for { 921 v_0 := v.Args[0] 922 if v_0.Op != OpAMD64MOVLconst { 923 break 924 } 925 c := v_0.AuxInt 926 x := v.Args[1] 927 v.reset(OpAMD64ADDLconst) 928 v.AuxInt = c 929 v.AddArg(x) 930 return true 931 } 932 // match: (ADDL x (NEGL y)) 933 // cond: 934 // result: (SUBL x y) 935 for { 936 x := v.Args[0] 937 v_1 := v.Args[1] 938 if v_1.Op != OpAMD64NEGL { 939 break 940 } 941 y := v_1.Args[0] 942 v.reset(OpAMD64SUBL) 943 v.AddArg(x) 944 v.AddArg(y) 945 return true 946 } 947 return false 948 } 949 func rewriteValueAMD64_OpAMD64ADDLconst(v *Value, config *Config) bool { 950 b := v.Block 951 _ = b 952 // match: (ADDLconst [c] x) 953 // cond: int32(c)==0 954 // result: x 955 for { 956 c := v.AuxInt 957 x := v.Args[0] 958 if !(int32(c) == 0) { 959 break 960 } 961 v.reset(OpCopy) 962 v.Type = x.Type 963 v.AddArg(x) 964 return true 965 } 966 // match: (ADDLconst [c] (MOVLconst [d])) 967 // cond: 968 // result: (MOVLconst [int64(int32(c+d))]) 969 for { 970 c := v.AuxInt 971 v_0 := v.Args[0] 972 if v_0.Op != OpAMD64MOVLconst { 973 break 974 } 975 d := v_0.AuxInt 976 v.reset(OpAMD64MOVLconst) 977 v.AuxInt = int64(int32(c + d)) 978 return true 979 } 980 // match: (ADDLconst [c] (ADDLconst [d] x)) 981 // cond: 982 // result: (ADDLconst [int64(int32(c+d))] x) 983 for { 984 c := v.AuxInt 985 v_0 := v.Args[0] 986 if v_0.Op != OpAMD64ADDLconst { 987 break 988 } 989 d := v_0.AuxInt 990 x := v_0.Args[0] 991 v.reset(OpAMD64ADDLconst) 992 v.AuxInt = int64(int32(c + d)) 993 v.AddArg(x) 994 return true 995 } 996 return false 997 } 998 func rewriteValueAMD64_OpAMD64ADDQ(v *Value, config *Config) bool { 999 b := v.Block 1000 _ = b 1001 // match: (ADDQ x (MOVQconst [c])) 1002 // cond: is32Bit(c) 1003 // result: (ADDQconst [c] x) 1004 for { 1005 x := v.Args[0] 1006 v_1 := v.Args[1] 1007 if v_1.Op != OpAMD64MOVQconst { 1008 break 1009 } 1010 c := v_1.AuxInt 1011 if !(is32Bit(c)) { 1012 break 1013 } 1014 v.reset(OpAMD64ADDQconst) 1015 v.AuxInt = c 1016 v.AddArg(x) 1017 return true 1018 } 1019 // match: (ADDQ (MOVQconst [c]) x) 1020 // cond: is32Bit(c) 1021 // result: (ADDQconst [c] x) 1022 for { 1023 v_0 := v.Args[0] 1024 if v_0.Op != OpAMD64MOVQconst { 1025 break 1026 } 1027 c := v_0.AuxInt 1028 x := v.Args[1] 1029 if !(is32Bit(c)) { 1030 break 1031 } 1032 v.reset(OpAMD64ADDQconst) 1033 v.AuxInt = c 1034 v.AddArg(x) 1035 return true 1036 } 1037 // match: (ADDQ x (SHLQconst [3] y)) 1038 // cond: 1039 // result: (LEAQ8 x y) 1040 for { 1041 x := v.Args[0] 1042 v_1 := v.Args[1] 1043 if v_1.Op != OpAMD64SHLQconst { 1044 break 1045 } 1046 if v_1.AuxInt != 3 { 1047 break 1048 } 1049 y := v_1.Args[0] 1050 v.reset(OpAMD64LEAQ8) 1051 v.AddArg(x) 1052 v.AddArg(y) 1053 return true 1054 } 1055 // match: (ADDQ x (SHLQconst [2] y)) 1056 // cond: 1057 // result: (LEAQ4 x y) 1058 for { 1059 x := v.Args[0] 1060 v_1 := v.Args[1] 1061 if v_1.Op != OpAMD64SHLQconst { 1062 break 1063 } 1064 if v_1.AuxInt != 2 { 1065 break 1066 } 1067 y := v_1.Args[0] 1068 v.reset(OpAMD64LEAQ4) 1069 v.AddArg(x) 1070 v.AddArg(y) 1071 return true 1072 } 1073 // match: (ADDQ x (SHLQconst [1] y)) 1074 // cond: 1075 // result: (LEAQ2 x y) 1076 for { 1077 x := v.Args[0] 1078 v_1 := v.Args[1] 1079 if v_1.Op != OpAMD64SHLQconst { 1080 break 1081 } 1082 if v_1.AuxInt != 1 { 1083 break 1084 } 1085 y := v_1.Args[0] 1086 v.reset(OpAMD64LEAQ2) 1087 v.AddArg(x) 1088 v.AddArg(y) 1089 return true 1090 } 1091 // match: (ADDQ x (ADDQ y y)) 1092 // cond: 1093 // result: (LEAQ2 x y) 1094 for { 1095 x := v.Args[0] 1096 v_1 := v.Args[1] 1097 if v_1.Op != OpAMD64ADDQ { 1098 break 1099 } 1100 y := v_1.Args[0] 1101 if y != v_1.Args[1] { 1102 break 1103 } 1104 v.reset(OpAMD64LEAQ2) 1105 v.AddArg(x) 1106 v.AddArg(y) 1107 return true 1108 } 1109 // match: (ADDQ x (ADDQ x y)) 1110 // cond: 1111 // result: (LEAQ2 y x) 1112 for { 1113 x := v.Args[0] 1114 v_1 := v.Args[1] 1115 if v_1.Op != OpAMD64ADDQ { 1116 break 1117 } 1118 if x != v_1.Args[0] { 1119 break 1120 } 1121 y := v_1.Args[1] 1122 v.reset(OpAMD64LEAQ2) 1123 v.AddArg(y) 1124 v.AddArg(x) 1125 return true 1126 } 1127 // match: (ADDQ x (ADDQ y x)) 1128 // cond: 1129 // result: (LEAQ2 y x) 1130 for { 1131 x := v.Args[0] 1132 v_1 := v.Args[1] 1133 if v_1.Op != OpAMD64ADDQ { 1134 break 1135 } 1136 y := v_1.Args[0] 1137 if x != v_1.Args[1] { 1138 break 1139 } 1140 v.reset(OpAMD64LEAQ2) 1141 v.AddArg(y) 1142 v.AddArg(x) 1143 return true 1144 } 1145 // match: (ADDQ (ADDQconst [c] x) y) 1146 // cond: 1147 // result: (LEAQ1 [c] x y) 1148 for { 1149 v_0 := v.Args[0] 1150 if v_0.Op != OpAMD64ADDQconst { 1151 break 1152 } 1153 c := v_0.AuxInt 1154 x := v_0.Args[0] 1155 y := v.Args[1] 1156 v.reset(OpAMD64LEAQ1) 1157 v.AuxInt = c 1158 v.AddArg(x) 1159 v.AddArg(y) 1160 return true 1161 } 1162 // match: (ADDQ x (ADDQconst [c] y)) 1163 // cond: 1164 // result: (LEAQ1 [c] x y) 1165 for { 1166 x := v.Args[0] 1167 v_1 := v.Args[1] 1168 if v_1.Op != OpAMD64ADDQconst { 1169 break 1170 } 1171 c := v_1.AuxInt 1172 y := v_1.Args[0] 1173 v.reset(OpAMD64LEAQ1) 1174 v.AuxInt = c 1175 v.AddArg(x) 1176 v.AddArg(y) 1177 return true 1178 } 1179 // match: (ADDQ x (LEAQ [c] {s} y)) 1180 // cond: x.Op != OpSB && y.Op != OpSB 1181 // result: (LEAQ1 [c] {s} x y) 1182 for { 1183 x := v.Args[0] 1184 v_1 := v.Args[1] 1185 if v_1.Op != OpAMD64LEAQ { 1186 break 1187 } 1188 c := v_1.AuxInt 1189 s := v_1.Aux 1190 y := v_1.Args[0] 1191 if !(x.Op != OpSB && y.Op != OpSB) { 1192 break 1193 } 1194 v.reset(OpAMD64LEAQ1) 1195 v.AuxInt = c 1196 v.Aux = s 1197 v.AddArg(x) 1198 v.AddArg(y) 1199 return true 1200 } 1201 // match: (ADDQ (LEAQ [c] {s} x) y) 1202 // cond: x.Op != OpSB && y.Op != OpSB 1203 // result: (LEAQ1 [c] {s} x y) 1204 for { 1205 v_0 := v.Args[0] 1206 if v_0.Op != OpAMD64LEAQ { 1207 break 1208 } 1209 c := v_0.AuxInt 1210 s := v_0.Aux 1211 x := v_0.Args[0] 1212 y := v.Args[1] 1213 if !(x.Op != OpSB && y.Op != OpSB) { 1214 break 1215 } 1216 v.reset(OpAMD64LEAQ1) 1217 v.AuxInt = c 1218 v.Aux = s 1219 v.AddArg(x) 1220 v.AddArg(y) 1221 return true 1222 } 1223 // match: (ADDQ x (NEGQ y)) 1224 // cond: 1225 // result: (SUBQ x y) 1226 for { 1227 x := v.Args[0] 1228 v_1 := v.Args[1] 1229 if v_1.Op != OpAMD64NEGQ { 1230 break 1231 } 1232 y := v_1.Args[0] 1233 v.reset(OpAMD64SUBQ) 1234 v.AddArg(x) 1235 v.AddArg(y) 1236 return true 1237 } 1238 return false 1239 } 1240 func rewriteValueAMD64_OpAMD64ADDQconst(v *Value, config *Config) bool { 1241 b := v.Block 1242 _ = b 1243 // match: (ADDQconst [c] (ADDQ x y)) 1244 // cond: 1245 // result: (LEAQ1 [c] x y) 1246 for { 1247 c := v.AuxInt 1248 v_0 := v.Args[0] 1249 if v_0.Op != OpAMD64ADDQ { 1250 break 1251 } 1252 x := v_0.Args[0] 1253 y := v_0.Args[1] 1254 v.reset(OpAMD64LEAQ1) 1255 v.AuxInt = c 1256 v.AddArg(x) 1257 v.AddArg(y) 1258 return true 1259 } 1260 // match: (ADDQconst [c] (LEAQ [d] {s} x)) 1261 // cond: is32Bit(c+d) 1262 // result: (LEAQ [c+d] {s} x) 1263 for { 1264 c := v.AuxInt 1265 v_0 := v.Args[0] 1266 if v_0.Op != OpAMD64LEAQ { 1267 break 1268 } 1269 d := v_0.AuxInt 1270 s := v_0.Aux 1271 x := v_0.Args[0] 1272 if !(is32Bit(c + d)) { 1273 break 1274 } 1275 v.reset(OpAMD64LEAQ) 1276 v.AuxInt = c + d 1277 v.Aux = s 1278 v.AddArg(x) 1279 return true 1280 } 1281 // match: (ADDQconst [c] (LEAQ1 [d] {s} x y)) 1282 // cond: is32Bit(c+d) 1283 // result: (LEAQ1 [c+d] {s} x y) 1284 for { 1285 c := v.AuxInt 1286 v_0 := v.Args[0] 1287 if v_0.Op != OpAMD64LEAQ1 { 1288 break 1289 } 1290 d := v_0.AuxInt 1291 s := v_0.Aux 1292 x := v_0.Args[0] 1293 y := v_0.Args[1] 1294 if !(is32Bit(c + d)) { 1295 break 1296 } 1297 v.reset(OpAMD64LEAQ1) 1298 v.AuxInt = c + d 1299 v.Aux = s 1300 v.AddArg(x) 1301 v.AddArg(y) 1302 return true 1303 } 1304 // match: (ADDQconst [c] (LEAQ2 [d] {s} x y)) 1305 // cond: is32Bit(c+d) 1306 // result: (LEAQ2 [c+d] {s} x y) 1307 for { 1308 c := v.AuxInt 1309 v_0 := v.Args[0] 1310 if v_0.Op != OpAMD64LEAQ2 { 1311 break 1312 } 1313 d := v_0.AuxInt 1314 s := v_0.Aux 1315 x := v_0.Args[0] 1316 y := v_0.Args[1] 1317 if !(is32Bit(c + d)) { 1318 break 1319 } 1320 v.reset(OpAMD64LEAQ2) 1321 v.AuxInt = c + d 1322 v.Aux = s 1323 v.AddArg(x) 1324 v.AddArg(y) 1325 return true 1326 } 1327 // match: (ADDQconst [c] (LEAQ4 [d] {s} x y)) 1328 // cond: is32Bit(c+d) 1329 // result: (LEAQ4 [c+d] {s} x y) 1330 for { 1331 c := v.AuxInt 1332 v_0 := v.Args[0] 1333 if v_0.Op != OpAMD64LEAQ4 { 1334 break 1335 } 1336 d := v_0.AuxInt 1337 s := v_0.Aux 1338 x := v_0.Args[0] 1339 y := v_0.Args[1] 1340 if !(is32Bit(c + d)) { 1341 break 1342 } 1343 v.reset(OpAMD64LEAQ4) 1344 v.AuxInt = c + d 1345 v.Aux = s 1346 v.AddArg(x) 1347 v.AddArg(y) 1348 return true 1349 } 1350 // match: (ADDQconst [c] (LEAQ8 [d] {s} x y)) 1351 // cond: is32Bit(c+d) 1352 // result: (LEAQ8 [c+d] {s} x y) 1353 for { 1354 c := v.AuxInt 1355 v_0 := v.Args[0] 1356 if v_0.Op != OpAMD64LEAQ8 { 1357 break 1358 } 1359 d := v_0.AuxInt 1360 s := v_0.Aux 1361 x := v_0.Args[0] 1362 y := v_0.Args[1] 1363 if !(is32Bit(c + d)) { 1364 break 1365 } 1366 v.reset(OpAMD64LEAQ8) 1367 v.AuxInt = c + d 1368 v.Aux = s 1369 v.AddArg(x) 1370 v.AddArg(y) 1371 return true 1372 } 1373 // match: (ADDQconst [0] x) 1374 // cond: 1375 // result: x 1376 for { 1377 if v.AuxInt != 0 { 1378 break 1379 } 1380 x := v.Args[0] 1381 v.reset(OpCopy) 1382 v.Type = x.Type 1383 v.AddArg(x) 1384 return true 1385 } 1386 // match: (ADDQconst [c] (MOVQconst [d])) 1387 // cond: 1388 // result: (MOVQconst [c+d]) 1389 for { 1390 c := v.AuxInt 1391 v_0 := v.Args[0] 1392 if v_0.Op != OpAMD64MOVQconst { 1393 break 1394 } 1395 d := v_0.AuxInt 1396 v.reset(OpAMD64MOVQconst) 1397 v.AuxInt = c + d 1398 return true 1399 } 1400 // match: (ADDQconst [c] (ADDQconst [d] x)) 1401 // cond: is32Bit(c+d) 1402 // result: (ADDQconst [c+d] x) 1403 for { 1404 c := v.AuxInt 1405 v_0 := v.Args[0] 1406 if v_0.Op != OpAMD64ADDQconst { 1407 break 1408 } 1409 d := v_0.AuxInt 1410 x := v_0.Args[0] 1411 if !(is32Bit(c + d)) { 1412 break 1413 } 1414 v.reset(OpAMD64ADDQconst) 1415 v.AuxInt = c + d 1416 v.AddArg(x) 1417 return true 1418 } 1419 return false 1420 } 1421 func rewriteValueAMD64_OpAMD64ADDW(v *Value, config *Config) bool { 1422 b := v.Block 1423 _ = b 1424 // match: (ADDW x (MOVWconst [c])) 1425 // cond: 1426 // result: (ADDWconst [c] x) 1427 for { 1428 x := v.Args[0] 1429 v_1 := v.Args[1] 1430 if v_1.Op != OpAMD64MOVWconst { 1431 break 1432 } 1433 c := v_1.AuxInt 1434 v.reset(OpAMD64ADDWconst) 1435 v.AuxInt = c 1436 v.AddArg(x) 1437 return true 1438 } 1439 // match: (ADDW (MOVWconst [c]) x) 1440 // cond: 1441 // result: (ADDWconst [c] x) 1442 for { 1443 v_0 := v.Args[0] 1444 if v_0.Op != OpAMD64MOVWconst { 1445 break 1446 } 1447 c := v_0.AuxInt 1448 x := v.Args[1] 1449 v.reset(OpAMD64ADDWconst) 1450 v.AuxInt = c 1451 v.AddArg(x) 1452 return true 1453 } 1454 // match: (ADDW x (NEGW y)) 1455 // cond: 1456 // result: (SUBW x y) 1457 for { 1458 x := v.Args[0] 1459 v_1 := v.Args[1] 1460 if v_1.Op != OpAMD64NEGW { 1461 break 1462 } 1463 y := v_1.Args[0] 1464 v.reset(OpAMD64SUBW) 1465 v.AddArg(x) 1466 v.AddArg(y) 1467 return true 1468 } 1469 return false 1470 } 1471 func rewriteValueAMD64_OpAMD64ADDWconst(v *Value, config *Config) bool { 1472 b := v.Block 1473 _ = b 1474 // match: (ADDWconst [c] x) 1475 // cond: int16(c)==0 1476 // result: x 1477 for { 1478 c := v.AuxInt 1479 x := v.Args[0] 1480 if !(int16(c) == 0) { 1481 break 1482 } 1483 v.reset(OpCopy) 1484 v.Type = x.Type 1485 v.AddArg(x) 1486 return true 1487 } 1488 // match: (ADDWconst [c] (MOVWconst [d])) 1489 // cond: 1490 // result: (MOVWconst [int64(int16(c+d))]) 1491 for { 1492 c := v.AuxInt 1493 v_0 := v.Args[0] 1494 if v_0.Op != OpAMD64MOVWconst { 1495 break 1496 } 1497 d := v_0.AuxInt 1498 v.reset(OpAMD64MOVWconst) 1499 v.AuxInt = int64(int16(c + d)) 1500 return true 1501 } 1502 // match: (ADDWconst [c] (ADDWconst [d] x)) 1503 // cond: 1504 // result: (ADDWconst [int64(int16(c+d))] x) 1505 for { 1506 c := v.AuxInt 1507 v_0 := v.Args[0] 1508 if v_0.Op != OpAMD64ADDWconst { 1509 break 1510 } 1511 d := v_0.AuxInt 1512 x := v_0.Args[0] 1513 v.reset(OpAMD64ADDWconst) 1514 v.AuxInt = int64(int16(c + d)) 1515 v.AddArg(x) 1516 return true 1517 } 1518 return false 1519 } 1520 func rewriteValueAMD64_OpAMD64ANDB(v *Value, config *Config) bool { 1521 b := v.Block 1522 _ = b 1523 // match: (ANDB x (MOVLconst [c])) 1524 // cond: 1525 // result: (ANDBconst [c] x) 1526 for { 1527 x := v.Args[0] 1528 v_1 := v.Args[1] 1529 if v_1.Op != OpAMD64MOVLconst { 1530 break 1531 } 1532 c := v_1.AuxInt 1533 v.reset(OpAMD64ANDBconst) 1534 v.AuxInt = c 1535 v.AddArg(x) 1536 return true 1537 } 1538 // match: (ANDB (MOVLconst [c]) x) 1539 // cond: 1540 // result: (ANDBconst [c] x) 1541 for { 1542 v_0 := v.Args[0] 1543 if v_0.Op != OpAMD64MOVLconst { 1544 break 1545 } 1546 c := v_0.AuxInt 1547 x := v.Args[1] 1548 v.reset(OpAMD64ANDBconst) 1549 v.AuxInt = c 1550 v.AddArg(x) 1551 return true 1552 } 1553 // match: (ANDB x (MOVBconst [c])) 1554 // cond: 1555 // result: (ANDBconst [c] x) 1556 for { 1557 x := v.Args[0] 1558 v_1 := v.Args[1] 1559 if v_1.Op != OpAMD64MOVBconst { 1560 break 1561 } 1562 c := v_1.AuxInt 1563 v.reset(OpAMD64ANDBconst) 1564 v.AuxInt = c 1565 v.AddArg(x) 1566 return true 1567 } 1568 // match: (ANDB (MOVBconst [c]) x) 1569 // cond: 1570 // result: (ANDBconst [c] x) 1571 for { 1572 v_0 := v.Args[0] 1573 if v_0.Op != OpAMD64MOVBconst { 1574 break 1575 } 1576 c := v_0.AuxInt 1577 x := v.Args[1] 1578 v.reset(OpAMD64ANDBconst) 1579 v.AuxInt = c 1580 v.AddArg(x) 1581 return true 1582 } 1583 // match: (ANDB x x) 1584 // cond: 1585 // result: x 1586 for { 1587 x := v.Args[0] 1588 if x != v.Args[1] { 1589 break 1590 } 1591 v.reset(OpCopy) 1592 v.Type = x.Type 1593 v.AddArg(x) 1594 return true 1595 } 1596 return false 1597 } 1598 func rewriteValueAMD64_OpAMD64ANDBconst(v *Value, config *Config) bool { 1599 b := v.Block 1600 _ = b 1601 // match: (ANDBconst [c] (ANDBconst [d] x)) 1602 // cond: 1603 // result: (ANDBconst [c & d] x) 1604 for { 1605 c := v.AuxInt 1606 v_0 := v.Args[0] 1607 if v_0.Op != OpAMD64ANDBconst { 1608 break 1609 } 1610 d := v_0.AuxInt 1611 x := v_0.Args[0] 1612 v.reset(OpAMD64ANDBconst) 1613 v.AuxInt = c & d 1614 v.AddArg(x) 1615 return true 1616 } 1617 // match: (ANDBconst [c] _) 1618 // cond: int8(c)==0 1619 // result: (MOVBconst [0]) 1620 for { 1621 c := v.AuxInt 1622 if !(int8(c) == 0) { 1623 break 1624 } 1625 v.reset(OpAMD64MOVBconst) 1626 v.AuxInt = 0 1627 return true 1628 } 1629 // match: (ANDBconst [c] x) 1630 // cond: int8(c)==-1 1631 // result: x 1632 for { 1633 c := v.AuxInt 1634 x := v.Args[0] 1635 if !(int8(c) == -1) { 1636 break 1637 } 1638 v.reset(OpCopy) 1639 v.Type = x.Type 1640 v.AddArg(x) 1641 return true 1642 } 1643 // match: (ANDBconst [c] (MOVBconst [d])) 1644 // cond: 1645 // result: (MOVBconst [c&d]) 1646 for { 1647 c := v.AuxInt 1648 v_0 := v.Args[0] 1649 if v_0.Op != OpAMD64MOVBconst { 1650 break 1651 } 1652 d := v_0.AuxInt 1653 v.reset(OpAMD64MOVBconst) 1654 v.AuxInt = c & d 1655 return true 1656 } 1657 return false 1658 } 1659 func rewriteValueAMD64_OpAMD64ANDL(v *Value, config *Config) bool { 1660 b := v.Block 1661 _ = b 1662 // match: (ANDL x (MOVLconst [c])) 1663 // cond: 1664 // result: (ANDLconst [c] x) 1665 for { 1666 x := v.Args[0] 1667 v_1 := v.Args[1] 1668 if v_1.Op != OpAMD64MOVLconst { 1669 break 1670 } 1671 c := v_1.AuxInt 1672 v.reset(OpAMD64ANDLconst) 1673 v.AuxInt = c 1674 v.AddArg(x) 1675 return true 1676 } 1677 // match: (ANDL (MOVLconst [c]) x) 1678 // cond: 1679 // result: (ANDLconst [c] x) 1680 for { 1681 v_0 := v.Args[0] 1682 if v_0.Op != OpAMD64MOVLconst { 1683 break 1684 } 1685 c := v_0.AuxInt 1686 x := v.Args[1] 1687 v.reset(OpAMD64ANDLconst) 1688 v.AuxInt = c 1689 v.AddArg(x) 1690 return true 1691 } 1692 // match: (ANDL x x) 1693 // cond: 1694 // result: x 1695 for { 1696 x := v.Args[0] 1697 if x != v.Args[1] { 1698 break 1699 } 1700 v.reset(OpCopy) 1701 v.Type = x.Type 1702 v.AddArg(x) 1703 return true 1704 } 1705 return false 1706 } 1707 func rewriteValueAMD64_OpAMD64ANDLconst(v *Value, config *Config) bool { 1708 b := v.Block 1709 _ = b 1710 // match: (ANDLconst [c] (ANDLconst [d] x)) 1711 // cond: 1712 // result: (ANDLconst [c & d] x) 1713 for { 1714 c := v.AuxInt 1715 v_0 := v.Args[0] 1716 if v_0.Op != OpAMD64ANDLconst { 1717 break 1718 } 1719 d := v_0.AuxInt 1720 x := v_0.Args[0] 1721 v.reset(OpAMD64ANDLconst) 1722 v.AuxInt = c & d 1723 v.AddArg(x) 1724 return true 1725 } 1726 // match: (ANDLconst [c] _) 1727 // cond: int32(c)==0 1728 // result: (MOVLconst [0]) 1729 for { 1730 c := v.AuxInt 1731 if !(int32(c) == 0) { 1732 break 1733 } 1734 v.reset(OpAMD64MOVLconst) 1735 v.AuxInt = 0 1736 return true 1737 } 1738 // match: (ANDLconst [c] x) 1739 // cond: int32(c)==-1 1740 // result: x 1741 for { 1742 c := v.AuxInt 1743 x := v.Args[0] 1744 if !(int32(c) == -1) { 1745 break 1746 } 1747 v.reset(OpCopy) 1748 v.Type = x.Type 1749 v.AddArg(x) 1750 return true 1751 } 1752 // match: (ANDLconst [c] (MOVLconst [d])) 1753 // cond: 1754 // result: (MOVLconst [c&d]) 1755 for { 1756 c := v.AuxInt 1757 v_0 := v.Args[0] 1758 if v_0.Op != OpAMD64MOVLconst { 1759 break 1760 } 1761 d := v_0.AuxInt 1762 v.reset(OpAMD64MOVLconst) 1763 v.AuxInt = c & d 1764 return true 1765 } 1766 return false 1767 } 1768 func rewriteValueAMD64_OpAMD64ANDQ(v *Value, config *Config) bool { 1769 b := v.Block 1770 _ = b 1771 // match: (ANDQ x (MOVQconst [c])) 1772 // cond: is32Bit(c) 1773 // result: (ANDQconst [c] x) 1774 for { 1775 x := v.Args[0] 1776 v_1 := v.Args[1] 1777 if v_1.Op != OpAMD64MOVQconst { 1778 break 1779 } 1780 c := v_1.AuxInt 1781 if !(is32Bit(c)) { 1782 break 1783 } 1784 v.reset(OpAMD64ANDQconst) 1785 v.AuxInt = c 1786 v.AddArg(x) 1787 return true 1788 } 1789 // match: (ANDQ (MOVQconst [c]) x) 1790 // cond: is32Bit(c) 1791 // result: (ANDQconst [c] x) 1792 for { 1793 v_0 := v.Args[0] 1794 if v_0.Op != OpAMD64MOVQconst { 1795 break 1796 } 1797 c := v_0.AuxInt 1798 x := v.Args[1] 1799 if !(is32Bit(c)) { 1800 break 1801 } 1802 v.reset(OpAMD64ANDQconst) 1803 v.AuxInt = c 1804 v.AddArg(x) 1805 return true 1806 } 1807 // match: (ANDQ x x) 1808 // cond: 1809 // result: x 1810 for { 1811 x := v.Args[0] 1812 if x != v.Args[1] { 1813 break 1814 } 1815 v.reset(OpCopy) 1816 v.Type = x.Type 1817 v.AddArg(x) 1818 return true 1819 } 1820 return false 1821 } 1822 func rewriteValueAMD64_OpAMD64ANDQconst(v *Value, config *Config) bool { 1823 b := v.Block 1824 _ = b 1825 // match: (ANDQconst [c] (ANDQconst [d] x)) 1826 // cond: 1827 // result: (ANDQconst [c & d] x) 1828 for { 1829 c := v.AuxInt 1830 v_0 := v.Args[0] 1831 if v_0.Op != OpAMD64ANDQconst { 1832 break 1833 } 1834 d := v_0.AuxInt 1835 x := v_0.Args[0] 1836 v.reset(OpAMD64ANDQconst) 1837 v.AuxInt = c & d 1838 v.AddArg(x) 1839 return true 1840 } 1841 // match: (ANDQconst [0] _) 1842 // cond: 1843 // result: (MOVQconst [0]) 1844 for { 1845 if v.AuxInt != 0 { 1846 break 1847 } 1848 v.reset(OpAMD64MOVQconst) 1849 v.AuxInt = 0 1850 return true 1851 } 1852 // match: (ANDQconst [-1] x) 1853 // cond: 1854 // result: x 1855 for { 1856 if v.AuxInt != -1 { 1857 break 1858 } 1859 x := v.Args[0] 1860 v.reset(OpCopy) 1861 v.Type = x.Type 1862 v.AddArg(x) 1863 return true 1864 } 1865 // match: (ANDQconst [c] (MOVQconst [d])) 1866 // cond: 1867 // result: (MOVQconst [c&d]) 1868 for { 1869 c := v.AuxInt 1870 v_0 := v.Args[0] 1871 if v_0.Op != OpAMD64MOVQconst { 1872 break 1873 } 1874 d := v_0.AuxInt 1875 v.reset(OpAMD64MOVQconst) 1876 v.AuxInt = c & d 1877 return true 1878 } 1879 return false 1880 } 1881 func rewriteValueAMD64_OpAMD64ANDW(v *Value, config *Config) bool { 1882 b := v.Block 1883 _ = b 1884 // match: (ANDW x (MOVLconst [c])) 1885 // cond: 1886 // result: (ANDWconst [c] x) 1887 for { 1888 x := v.Args[0] 1889 v_1 := v.Args[1] 1890 if v_1.Op != OpAMD64MOVLconst { 1891 break 1892 } 1893 c := v_1.AuxInt 1894 v.reset(OpAMD64ANDWconst) 1895 v.AuxInt = c 1896 v.AddArg(x) 1897 return true 1898 } 1899 // match: (ANDW (MOVLconst [c]) x) 1900 // cond: 1901 // result: (ANDWconst [c] x) 1902 for { 1903 v_0 := v.Args[0] 1904 if v_0.Op != OpAMD64MOVLconst { 1905 break 1906 } 1907 c := v_0.AuxInt 1908 x := v.Args[1] 1909 v.reset(OpAMD64ANDWconst) 1910 v.AuxInt = c 1911 v.AddArg(x) 1912 return true 1913 } 1914 // match: (ANDW x (MOVWconst [c])) 1915 // cond: 1916 // result: (ANDWconst [c] x) 1917 for { 1918 x := v.Args[0] 1919 v_1 := v.Args[1] 1920 if v_1.Op != OpAMD64MOVWconst { 1921 break 1922 } 1923 c := v_1.AuxInt 1924 v.reset(OpAMD64ANDWconst) 1925 v.AuxInt = c 1926 v.AddArg(x) 1927 return true 1928 } 1929 // match: (ANDW (MOVWconst [c]) x) 1930 // cond: 1931 // result: (ANDWconst [c] x) 1932 for { 1933 v_0 := v.Args[0] 1934 if v_0.Op != OpAMD64MOVWconst { 1935 break 1936 } 1937 c := v_0.AuxInt 1938 x := v.Args[1] 1939 v.reset(OpAMD64ANDWconst) 1940 v.AuxInt = c 1941 v.AddArg(x) 1942 return true 1943 } 1944 // match: (ANDW x x) 1945 // cond: 1946 // result: x 1947 for { 1948 x := v.Args[0] 1949 if x != v.Args[1] { 1950 break 1951 } 1952 v.reset(OpCopy) 1953 v.Type = x.Type 1954 v.AddArg(x) 1955 return true 1956 } 1957 return false 1958 } 1959 func rewriteValueAMD64_OpAMD64ANDWconst(v *Value, config *Config) bool { 1960 b := v.Block 1961 _ = b 1962 // match: (ANDWconst [c] (ANDWconst [d] x)) 1963 // cond: 1964 // result: (ANDWconst [c & d] x) 1965 for { 1966 c := v.AuxInt 1967 v_0 := v.Args[0] 1968 if v_0.Op != OpAMD64ANDWconst { 1969 break 1970 } 1971 d := v_0.AuxInt 1972 x := v_0.Args[0] 1973 v.reset(OpAMD64ANDWconst) 1974 v.AuxInt = c & d 1975 v.AddArg(x) 1976 return true 1977 } 1978 // match: (ANDWconst [c] _) 1979 // cond: int16(c)==0 1980 // result: (MOVWconst [0]) 1981 for { 1982 c := v.AuxInt 1983 if !(int16(c) == 0) { 1984 break 1985 } 1986 v.reset(OpAMD64MOVWconst) 1987 v.AuxInt = 0 1988 return true 1989 } 1990 // match: (ANDWconst [c] x) 1991 // cond: int16(c)==-1 1992 // result: x 1993 for { 1994 c := v.AuxInt 1995 x := v.Args[0] 1996 if !(int16(c) == -1) { 1997 break 1998 } 1999 v.reset(OpCopy) 2000 v.Type = x.Type 2001 v.AddArg(x) 2002 return true 2003 } 2004 // match: (ANDWconst [c] (MOVWconst [d])) 2005 // cond: 2006 // result: (MOVWconst [c&d]) 2007 for { 2008 c := v.AuxInt 2009 v_0 := v.Args[0] 2010 if v_0.Op != OpAMD64MOVWconst { 2011 break 2012 } 2013 d := v_0.AuxInt 2014 v.reset(OpAMD64MOVWconst) 2015 v.AuxInt = c & d 2016 return true 2017 } 2018 return false 2019 } 2020 func rewriteValueAMD64_OpAdd16(v *Value, config *Config) bool { 2021 b := v.Block 2022 _ = b 2023 // match: (Add16 x y) 2024 // cond: 2025 // result: (ADDW x y) 2026 for { 2027 x := v.Args[0] 2028 y := v.Args[1] 2029 v.reset(OpAMD64ADDW) 2030 v.AddArg(x) 2031 v.AddArg(y) 2032 return true 2033 } 2034 return false 2035 } 2036 func rewriteValueAMD64_OpAdd32(v *Value, config *Config) bool { 2037 b := v.Block 2038 _ = b 2039 // match: (Add32 x y) 2040 // cond: 2041 // result: (ADDL x y) 2042 for { 2043 x := v.Args[0] 2044 y := v.Args[1] 2045 v.reset(OpAMD64ADDL) 2046 v.AddArg(x) 2047 v.AddArg(y) 2048 return true 2049 } 2050 return false 2051 } 2052 func rewriteValueAMD64_OpAdd32F(v *Value, config *Config) bool { 2053 b := v.Block 2054 _ = b 2055 // match: (Add32F x y) 2056 // cond: 2057 // result: (ADDSS x y) 2058 for { 2059 x := v.Args[0] 2060 y := v.Args[1] 2061 v.reset(OpAMD64ADDSS) 2062 v.AddArg(x) 2063 v.AddArg(y) 2064 return true 2065 } 2066 return false 2067 } 2068 func rewriteValueAMD64_OpAdd64(v *Value, config *Config) bool { 2069 b := v.Block 2070 _ = b 2071 // match: (Add64 x y) 2072 // cond: 2073 // result: (ADDQ x y) 2074 for { 2075 x := v.Args[0] 2076 y := v.Args[1] 2077 v.reset(OpAMD64ADDQ) 2078 v.AddArg(x) 2079 v.AddArg(y) 2080 return true 2081 } 2082 return false 2083 } 2084 func rewriteValueAMD64_OpAdd64F(v *Value, config *Config) bool { 2085 b := v.Block 2086 _ = b 2087 // match: (Add64F x y) 2088 // cond: 2089 // result: (ADDSD x y) 2090 for { 2091 x := v.Args[0] 2092 y := v.Args[1] 2093 v.reset(OpAMD64ADDSD) 2094 v.AddArg(x) 2095 v.AddArg(y) 2096 return true 2097 } 2098 return false 2099 } 2100 func rewriteValueAMD64_OpAdd8(v *Value, config *Config) bool { 2101 b := v.Block 2102 _ = b 2103 // match: (Add8 x y) 2104 // cond: 2105 // result: (ADDB x y) 2106 for { 2107 x := v.Args[0] 2108 y := v.Args[1] 2109 v.reset(OpAMD64ADDB) 2110 v.AddArg(x) 2111 v.AddArg(y) 2112 return true 2113 } 2114 return false 2115 } 2116 func rewriteValueAMD64_OpAddPtr(v *Value, config *Config) bool { 2117 b := v.Block 2118 _ = b 2119 // match: (AddPtr x y) 2120 // cond: 2121 // result: (ADDQ x y) 2122 for { 2123 x := v.Args[0] 2124 y := v.Args[1] 2125 v.reset(OpAMD64ADDQ) 2126 v.AddArg(x) 2127 v.AddArg(y) 2128 return true 2129 } 2130 return false 2131 } 2132 func rewriteValueAMD64_OpAddr(v *Value, config *Config) bool { 2133 b := v.Block 2134 _ = b 2135 // match: (Addr {sym} base) 2136 // cond: 2137 // result: (LEAQ {sym} base) 2138 for { 2139 sym := v.Aux 2140 base := v.Args[0] 2141 v.reset(OpAMD64LEAQ) 2142 v.Aux = sym 2143 v.AddArg(base) 2144 return true 2145 } 2146 return false 2147 } 2148 func rewriteValueAMD64_OpAnd16(v *Value, config *Config) bool { 2149 b := v.Block 2150 _ = b 2151 // match: (And16 x y) 2152 // cond: 2153 // result: (ANDW x y) 2154 for { 2155 x := v.Args[0] 2156 y := v.Args[1] 2157 v.reset(OpAMD64ANDW) 2158 v.AddArg(x) 2159 v.AddArg(y) 2160 return true 2161 } 2162 return false 2163 } 2164 func rewriteValueAMD64_OpAnd32(v *Value, config *Config) bool { 2165 b := v.Block 2166 _ = b 2167 // match: (And32 x y) 2168 // cond: 2169 // result: (ANDL x y) 2170 for { 2171 x := v.Args[0] 2172 y := v.Args[1] 2173 v.reset(OpAMD64ANDL) 2174 v.AddArg(x) 2175 v.AddArg(y) 2176 return true 2177 } 2178 return false 2179 } 2180 func rewriteValueAMD64_OpAnd64(v *Value, config *Config) bool { 2181 b := v.Block 2182 _ = b 2183 // match: (And64 x y) 2184 // cond: 2185 // result: (ANDQ x y) 2186 for { 2187 x := v.Args[0] 2188 y := v.Args[1] 2189 v.reset(OpAMD64ANDQ) 2190 v.AddArg(x) 2191 v.AddArg(y) 2192 return true 2193 } 2194 return false 2195 } 2196 func rewriteValueAMD64_OpAnd8(v *Value, config *Config) bool { 2197 b := v.Block 2198 _ = b 2199 // match: (And8 x y) 2200 // cond: 2201 // result: (ANDB x y) 2202 for { 2203 x := v.Args[0] 2204 y := v.Args[1] 2205 v.reset(OpAMD64ANDB) 2206 v.AddArg(x) 2207 v.AddArg(y) 2208 return true 2209 } 2210 return false 2211 } 2212 func rewriteValueAMD64_OpAvg64u(v *Value, config *Config) bool { 2213 b := v.Block 2214 _ = b 2215 // match: (Avg64u x y) 2216 // cond: 2217 // result: (AVGQU x y) 2218 for { 2219 x := v.Args[0] 2220 y := v.Args[1] 2221 v.reset(OpAMD64AVGQU) 2222 v.AddArg(x) 2223 v.AddArg(y) 2224 return true 2225 } 2226 return false 2227 } 2228 func rewriteValueAMD64_OpBswap32(v *Value, config *Config) bool { 2229 b := v.Block 2230 _ = b 2231 // match: (Bswap32 x) 2232 // cond: 2233 // result: (BSWAPL x) 2234 for { 2235 x := v.Args[0] 2236 v.reset(OpAMD64BSWAPL) 2237 v.AddArg(x) 2238 return true 2239 } 2240 return false 2241 } 2242 func rewriteValueAMD64_OpBswap64(v *Value, config *Config) bool { 2243 b := v.Block 2244 _ = b 2245 // match: (Bswap64 x) 2246 // cond: 2247 // result: (BSWAPQ x) 2248 for { 2249 x := v.Args[0] 2250 v.reset(OpAMD64BSWAPQ) 2251 v.AddArg(x) 2252 return true 2253 } 2254 return false 2255 } 2256 func rewriteValueAMD64_OpAMD64CMOVLEQconst(v *Value, config *Config) bool { 2257 b := v.Block 2258 _ = b 2259 // match: (CMOVLEQconst x (InvertFlags y) [c]) 2260 // cond: 2261 // result: (CMOVLNEconst x y [c]) 2262 for { 2263 x := v.Args[0] 2264 v_1 := v.Args[1] 2265 if v_1.Op != OpAMD64InvertFlags { 2266 break 2267 } 2268 y := v_1.Args[0] 2269 c := v.AuxInt 2270 v.reset(OpAMD64CMOVLNEconst) 2271 v.AddArg(x) 2272 v.AddArg(y) 2273 v.AuxInt = c 2274 return true 2275 } 2276 // match: (CMOVLEQconst _ (FlagEQ) [c]) 2277 // cond: 2278 // result: (Const32 [c]) 2279 for { 2280 v_1 := v.Args[1] 2281 if v_1.Op != OpAMD64FlagEQ { 2282 break 2283 } 2284 c := v.AuxInt 2285 v.reset(OpConst32) 2286 v.AuxInt = c 2287 return true 2288 } 2289 // match: (CMOVLEQconst x (FlagLT_ULT)) 2290 // cond: 2291 // result: x 2292 for { 2293 x := v.Args[0] 2294 v_1 := v.Args[1] 2295 if v_1.Op != OpAMD64FlagLT_ULT { 2296 break 2297 } 2298 v.reset(OpCopy) 2299 v.Type = x.Type 2300 v.AddArg(x) 2301 return true 2302 } 2303 // match: (CMOVLEQconst x (FlagLT_UGT)) 2304 // cond: 2305 // result: x 2306 for { 2307 x := v.Args[0] 2308 v_1 := v.Args[1] 2309 if v_1.Op != OpAMD64FlagLT_UGT { 2310 break 2311 } 2312 v.reset(OpCopy) 2313 v.Type = x.Type 2314 v.AddArg(x) 2315 return true 2316 } 2317 // match: (CMOVLEQconst x (FlagGT_ULT)) 2318 // cond: 2319 // result: x 2320 for { 2321 x := v.Args[0] 2322 v_1 := v.Args[1] 2323 if v_1.Op != OpAMD64FlagGT_ULT { 2324 break 2325 } 2326 v.reset(OpCopy) 2327 v.Type = x.Type 2328 v.AddArg(x) 2329 return true 2330 } 2331 // match: (CMOVLEQconst x (FlagGT_UGT)) 2332 // cond: 2333 // result: x 2334 for { 2335 x := v.Args[0] 2336 v_1 := v.Args[1] 2337 if v_1.Op != OpAMD64FlagGT_UGT { 2338 break 2339 } 2340 v.reset(OpCopy) 2341 v.Type = x.Type 2342 v.AddArg(x) 2343 return true 2344 } 2345 return false 2346 } 2347 func rewriteValueAMD64_OpAMD64CMOVQEQconst(v *Value, config *Config) bool { 2348 b := v.Block 2349 _ = b 2350 // match: (CMOVQEQconst x (InvertFlags y) [c]) 2351 // cond: 2352 // result: (CMOVQNEconst x y [c]) 2353 for { 2354 x := v.Args[0] 2355 v_1 := v.Args[1] 2356 if v_1.Op != OpAMD64InvertFlags { 2357 break 2358 } 2359 y := v_1.Args[0] 2360 c := v.AuxInt 2361 v.reset(OpAMD64CMOVQNEconst) 2362 v.AddArg(x) 2363 v.AddArg(y) 2364 v.AuxInt = c 2365 return true 2366 } 2367 // match: (CMOVQEQconst _ (FlagEQ) [c]) 2368 // cond: 2369 // result: (Const64 [c]) 2370 for { 2371 v_1 := v.Args[1] 2372 if v_1.Op != OpAMD64FlagEQ { 2373 break 2374 } 2375 c := v.AuxInt 2376 v.reset(OpConst64) 2377 v.AuxInt = c 2378 return true 2379 } 2380 // match: (CMOVQEQconst x (FlagLT_ULT)) 2381 // cond: 2382 // result: x 2383 for { 2384 x := v.Args[0] 2385 v_1 := v.Args[1] 2386 if v_1.Op != OpAMD64FlagLT_ULT { 2387 break 2388 } 2389 v.reset(OpCopy) 2390 v.Type = x.Type 2391 v.AddArg(x) 2392 return true 2393 } 2394 // match: (CMOVQEQconst x (FlagLT_UGT)) 2395 // cond: 2396 // result: x 2397 for { 2398 x := v.Args[0] 2399 v_1 := v.Args[1] 2400 if v_1.Op != OpAMD64FlagLT_UGT { 2401 break 2402 } 2403 v.reset(OpCopy) 2404 v.Type = x.Type 2405 v.AddArg(x) 2406 return true 2407 } 2408 // match: (CMOVQEQconst x (FlagGT_ULT)) 2409 // cond: 2410 // result: x 2411 for { 2412 x := v.Args[0] 2413 v_1 := v.Args[1] 2414 if v_1.Op != OpAMD64FlagGT_ULT { 2415 break 2416 } 2417 v.reset(OpCopy) 2418 v.Type = x.Type 2419 v.AddArg(x) 2420 return true 2421 } 2422 // match: (CMOVQEQconst x (FlagGT_UGT)) 2423 // cond: 2424 // result: x 2425 for { 2426 x := v.Args[0] 2427 v_1 := v.Args[1] 2428 if v_1.Op != OpAMD64FlagGT_UGT { 2429 break 2430 } 2431 v.reset(OpCopy) 2432 v.Type = x.Type 2433 v.AddArg(x) 2434 return true 2435 } 2436 return false 2437 } 2438 func rewriteValueAMD64_OpAMD64CMOVWEQconst(v *Value, config *Config) bool { 2439 b := v.Block 2440 _ = b 2441 // match: (CMOVWEQconst x (InvertFlags y) [c]) 2442 // cond: 2443 // result: (CMOVWNEconst x y [c]) 2444 for { 2445 x := v.Args[0] 2446 v_1 := v.Args[1] 2447 if v_1.Op != OpAMD64InvertFlags { 2448 break 2449 } 2450 y := v_1.Args[0] 2451 c := v.AuxInt 2452 v.reset(OpAMD64CMOVWNEconst) 2453 v.AddArg(x) 2454 v.AddArg(y) 2455 v.AuxInt = c 2456 return true 2457 } 2458 // match: (CMOVWEQconst _ (FlagEQ) [c]) 2459 // cond: 2460 // result: (Const16 [c]) 2461 for { 2462 v_1 := v.Args[1] 2463 if v_1.Op != OpAMD64FlagEQ { 2464 break 2465 } 2466 c := v.AuxInt 2467 v.reset(OpConst16) 2468 v.AuxInt = c 2469 return true 2470 } 2471 // match: (CMOVWEQconst x (FlagLT_ULT)) 2472 // cond: 2473 // result: x 2474 for { 2475 x := v.Args[0] 2476 v_1 := v.Args[1] 2477 if v_1.Op != OpAMD64FlagLT_ULT { 2478 break 2479 } 2480 v.reset(OpCopy) 2481 v.Type = x.Type 2482 v.AddArg(x) 2483 return true 2484 } 2485 // match: (CMOVWEQconst x (FlagLT_UGT)) 2486 // cond: 2487 // result: x 2488 for { 2489 x := v.Args[0] 2490 v_1 := v.Args[1] 2491 if v_1.Op != OpAMD64FlagLT_UGT { 2492 break 2493 } 2494 v.reset(OpCopy) 2495 v.Type = x.Type 2496 v.AddArg(x) 2497 return true 2498 } 2499 // match: (CMOVWEQconst x (FlagGT_ULT)) 2500 // cond: 2501 // result: x 2502 for { 2503 x := v.Args[0] 2504 v_1 := v.Args[1] 2505 if v_1.Op != OpAMD64FlagGT_ULT { 2506 break 2507 } 2508 v.reset(OpCopy) 2509 v.Type = x.Type 2510 v.AddArg(x) 2511 return true 2512 } 2513 // match: (CMOVWEQconst x (FlagGT_UGT)) 2514 // cond: 2515 // result: x 2516 for { 2517 x := v.Args[0] 2518 v_1 := v.Args[1] 2519 if v_1.Op != OpAMD64FlagGT_UGT { 2520 break 2521 } 2522 v.reset(OpCopy) 2523 v.Type = x.Type 2524 v.AddArg(x) 2525 return true 2526 } 2527 return false 2528 } 2529 func rewriteValueAMD64_OpAMD64CMPB(v *Value, config *Config) bool { 2530 b := v.Block 2531 _ = b 2532 // match: (CMPB x (MOVBconst [c])) 2533 // cond: 2534 // result: (CMPBconst x [c]) 2535 for { 2536 x := v.Args[0] 2537 v_1 := v.Args[1] 2538 if v_1.Op != OpAMD64MOVBconst { 2539 break 2540 } 2541 c := v_1.AuxInt 2542 v.reset(OpAMD64CMPBconst) 2543 v.AddArg(x) 2544 v.AuxInt = c 2545 return true 2546 } 2547 // match: (CMPB (MOVBconst [c]) x) 2548 // cond: 2549 // result: (InvertFlags (CMPBconst x [c])) 2550 for { 2551 v_0 := v.Args[0] 2552 if v_0.Op != OpAMD64MOVBconst { 2553 break 2554 } 2555 c := v_0.AuxInt 2556 x := v.Args[1] 2557 v.reset(OpAMD64InvertFlags) 2558 v0 := b.NewValue0(v.Line, OpAMD64CMPBconst, TypeFlags) 2559 v0.AddArg(x) 2560 v0.AuxInt = c 2561 v.AddArg(v0) 2562 return true 2563 } 2564 return false 2565 } 2566 func rewriteValueAMD64_OpAMD64CMPBconst(v *Value, config *Config) bool { 2567 b := v.Block 2568 _ = b 2569 // match: (CMPBconst (MOVBconst [x]) [y]) 2570 // cond: int8(x)==int8(y) 2571 // result: (FlagEQ) 2572 for { 2573 v_0 := v.Args[0] 2574 if v_0.Op != OpAMD64MOVBconst { 2575 break 2576 } 2577 x := v_0.AuxInt 2578 y := v.AuxInt 2579 if !(int8(x) == int8(y)) { 2580 break 2581 } 2582 v.reset(OpAMD64FlagEQ) 2583 return true 2584 } 2585 // match: (CMPBconst (MOVBconst [x]) [y]) 2586 // cond: int8(x)<int8(y) && uint8(x)<uint8(y) 2587 // result: (FlagLT_ULT) 2588 for { 2589 v_0 := v.Args[0] 2590 if v_0.Op != OpAMD64MOVBconst { 2591 break 2592 } 2593 x := v_0.AuxInt 2594 y := v.AuxInt 2595 if !(int8(x) < int8(y) && uint8(x) < uint8(y)) { 2596 break 2597 } 2598 v.reset(OpAMD64FlagLT_ULT) 2599 return true 2600 } 2601 // match: (CMPBconst (MOVBconst [x]) [y]) 2602 // cond: int8(x)<int8(y) && uint8(x)>uint8(y) 2603 // result: (FlagLT_UGT) 2604 for { 2605 v_0 := v.Args[0] 2606 if v_0.Op != OpAMD64MOVBconst { 2607 break 2608 } 2609 x := v_0.AuxInt 2610 y := v.AuxInt 2611 if !(int8(x) < int8(y) && uint8(x) > uint8(y)) { 2612 break 2613 } 2614 v.reset(OpAMD64FlagLT_UGT) 2615 return true 2616 } 2617 // match: (CMPBconst (MOVBconst [x]) [y]) 2618 // cond: int8(x)>int8(y) && uint8(x)<uint8(y) 2619 // result: (FlagGT_ULT) 2620 for { 2621 v_0 := v.Args[0] 2622 if v_0.Op != OpAMD64MOVBconst { 2623 break 2624 } 2625 x := v_0.AuxInt 2626 y := v.AuxInt 2627 if !(int8(x) > int8(y) && uint8(x) < uint8(y)) { 2628 break 2629 } 2630 v.reset(OpAMD64FlagGT_ULT) 2631 return true 2632 } 2633 // match: (CMPBconst (MOVBconst [x]) [y]) 2634 // cond: int8(x)>int8(y) && uint8(x)>uint8(y) 2635 // result: (FlagGT_UGT) 2636 for { 2637 v_0 := v.Args[0] 2638 if v_0.Op != OpAMD64MOVBconst { 2639 break 2640 } 2641 x := v_0.AuxInt 2642 y := v.AuxInt 2643 if !(int8(x) > int8(y) && uint8(x) > uint8(y)) { 2644 break 2645 } 2646 v.reset(OpAMD64FlagGT_UGT) 2647 return true 2648 } 2649 // match: (CMPBconst (ANDBconst _ [m]) [n]) 2650 // cond: 0 <= int8(m) && int8(m) < int8(n) 2651 // result: (FlagLT_ULT) 2652 for { 2653 v_0 := v.Args[0] 2654 if v_0.Op != OpAMD64ANDBconst { 2655 break 2656 } 2657 m := v_0.AuxInt 2658 n := v.AuxInt 2659 if !(0 <= int8(m) && int8(m) < int8(n)) { 2660 break 2661 } 2662 v.reset(OpAMD64FlagLT_ULT) 2663 return true 2664 } 2665 // match: (CMPBconst (ANDB x y) [0]) 2666 // cond: 2667 // result: (TESTB x y) 2668 for { 2669 v_0 := v.Args[0] 2670 if v_0.Op != OpAMD64ANDB { 2671 break 2672 } 2673 x := v_0.Args[0] 2674 y := v_0.Args[1] 2675 if v.AuxInt != 0 { 2676 break 2677 } 2678 v.reset(OpAMD64TESTB) 2679 v.AddArg(x) 2680 v.AddArg(y) 2681 return true 2682 } 2683 // match: (CMPBconst (ANDBconst [c] x) [0]) 2684 // cond: 2685 // result: (TESTBconst [c] x) 2686 for { 2687 v_0 := v.Args[0] 2688 if v_0.Op != OpAMD64ANDBconst { 2689 break 2690 } 2691 c := v_0.AuxInt 2692 x := v_0.Args[0] 2693 if v.AuxInt != 0 { 2694 break 2695 } 2696 v.reset(OpAMD64TESTBconst) 2697 v.AuxInt = c 2698 v.AddArg(x) 2699 return true 2700 } 2701 // match: (CMPBconst x [0]) 2702 // cond: 2703 // result: (TESTB x x) 2704 for { 2705 x := v.Args[0] 2706 if v.AuxInt != 0 { 2707 break 2708 } 2709 v.reset(OpAMD64TESTB) 2710 v.AddArg(x) 2711 v.AddArg(x) 2712 return true 2713 } 2714 return false 2715 } 2716 func rewriteValueAMD64_OpAMD64CMPL(v *Value, config *Config) bool { 2717 b := v.Block 2718 _ = b 2719 // match: (CMPL x (MOVLconst [c])) 2720 // cond: 2721 // result: (CMPLconst x [c]) 2722 for { 2723 x := v.Args[0] 2724 v_1 := v.Args[1] 2725 if v_1.Op != OpAMD64MOVLconst { 2726 break 2727 } 2728 c := v_1.AuxInt 2729 v.reset(OpAMD64CMPLconst) 2730 v.AddArg(x) 2731 v.AuxInt = c 2732 return true 2733 } 2734 // match: (CMPL (MOVLconst [c]) x) 2735 // cond: 2736 // result: (InvertFlags (CMPLconst x [c])) 2737 for { 2738 v_0 := v.Args[0] 2739 if v_0.Op != OpAMD64MOVLconst { 2740 break 2741 } 2742 c := v_0.AuxInt 2743 x := v.Args[1] 2744 v.reset(OpAMD64InvertFlags) 2745 v0 := b.NewValue0(v.Line, OpAMD64CMPLconst, TypeFlags) 2746 v0.AddArg(x) 2747 v0.AuxInt = c 2748 v.AddArg(v0) 2749 return true 2750 } 2751 return false 2752 } 2753 func rewriteValueAMD64_OpAMD64CMPLconst(v *Value, config *Config) bool { 2754 b := v.Block 2755 _ = b 2756 // match: (CMPLconst (MOVLconst [x]) [y]) 2757 // cond: int32(x)==int32(y) 2758 // result: (FlagEQ) 2759 for { 2760 v_0 := v.Args[0] 2761 if v_0.Op != OpAMD64MOVLconst { 2762 break 2763 } 2764 x := v_0.AuxInt 2765 y := v.AuxInt 2766 if !(int32(x) == int32(y)) { 2767 break 2768 } 2769 v.reset(OpAMD64FlagEQ) 2770 return true 2771 } 2772 // match: (CMPLconst (MOVLconst [x]) [y]) 2773 // cond: int32(x)<int32(y) && uint32(x)<uint32(y) 2774 // result: (FlagLT_ULT) 2775 for { 2776 v_0 := v.Args[0] 2777 if v_0.Op != OpAMD64MOVLconst { 2778 break 2779 } 2780 x := v_0.AuxInt 2781 y := v.AuxInt 2782 if !(int32(x) < int32(y) && uint32(x) < uint32(y)) { 2783 break 2784 } 2785 v.reset(OpAMD64FlagLT_ULT) 2786 return true 2787 } 2788 // match: (CMPLconst (MOVLconst [x]) [y]) 2789 // cond: int32(x)<int32(y) && uint32(x)>uint32(y) 2790 // result: (FlagLT_UGT) 2791 for { 2792 v_0 := v.Args[0] 2793 if v_0.Op != OpAMD64MOVLconst { 2794 break 2795 } 2796 x := v_0.AuxInt 2797 y := v.AuxInt 2798 if !(int32(x) < int32(y) && uint32(x) > uint32(y)) { 2799 break 2800 } 2801 v.reset(OpAMD64FlagLT_UGT) 2802 return true 2803 } 2804 // match: (CMPLconst (MOVLconst [x]) [y]) 2805 // cond: int32(x)>int32(y) && uint32(x)<uint32(y) 2806 // result: (FlagGT_ULT) 2807 for { 2808 v_0 := v.Args[0] 2809 if v_0.Op != OpAMD64MOVLconst { 2810 break 2811 } 2812 x := v_0.AuxInt 2813 y := v.AuxInt 2814 if !(int32(x) > int32(y) && uint32(x) < uint32(y)) { 2815 break 2816 } 2817 v.reset(OpAMD64FlagGT_ULT) 2818 return true 2819 } 2820 // match: (CMPLconst (MOVLconst [x]) [y]) 2821 // cond: int32(x)>int32(y) && uint32(x)>uint32(y) 2822 // result: (FlagGT_UGT) 2823 for { 2824 v_0 := v.Args[0] 2825 if v_0.Op != OpAMD64MOVLconst { 2826 break 2827 } 2828 x := v_0.AuxInt 2829 y := v.AuxInt 2830 if !(int32(x) > int32(y) && uint32(x) > uint32(y)) { 2831 break 2832 } 2833 v.reset(OpAMD64FlagGT_UGT) 2834 return true 2835 } 2836 // match: (CMPLconst (ANDLconst _ [m]) [n]) 2837 // cond: 0 <= int32(m) && int32(m) < int32(n) 2838 // result: (FlagLT_ULT) 2839 for { 2840 v_0 := v.Args[0] 2841 if v_0.Op != OpAMD64ANDLconst { 2842 break 2843 } 2844 m := v_0.AuxInt 2845 n := v.AuxInt 2846 if !(0 <= int32(m) && int32(m) < int32(n)) { 2847 break 2848 } 2849 v.reset(OpAMD64FlagLT_ULT) 2850 return true 2851 } 2852 // match: (CMPLconst (ANDL x y) [0]) 2853 // cond: 2854 // result: (TESTL x y) 2855 for { 2856 v_0 := v.Args[0] 2857 if v_0.Op != OpAMD64ANDL { 2858 break 2859 } 2860 x := v_0.Args[0] 2861 y := v_0.Args[1] 2862 if v.AuxInt != 0 { 2863 break 2864 } 2865 v.reset(OpAMD64TESTL) 2866 v.AddArg(x) 2867 v.AddArg(y) 2868 return true 2869 } 2870 // match: (CMPLconst (ANDLconst [c] x) [0]) 2871 // cond: 2872 // result: (TESTLconst [c] x) 2873 for { 2874 v_0 := v.Args[0] 2875 if v_0.Op != OpAMD64ANDLconst { 2876 break 2877 } 2878 c := v_0.AuxInt 2879 x := v_0.Args[0] 2880 if v.AuxInt != 0 { 2881 break 2882 } 2883 v.reset(OpAMD64TESTLconst) 2884 v.AuxInt = c 2885 v.AddArg(x) 2886 return true 2887 } 2888 // match: (CMPLconst x [0]) 2889 // cond: 2890 // result: (TESTL x x) 2891 for { 2892 x := v.Args[0] 2893 if v.AuxInt != 0 { 2894 break 2895 } 2896 v.reset(OpAMD64TESTL) 2897 v.AddArg(x) 2898 v.AddArg(x) 2899 return true 2900 } 2901 return false 2902 } 2903 func rewriteValueAMD64_OpAMD64CMPQ(v *Value, config *Config) bool { 2904 b := v.Block 2905 _ = b 2906 // match: (CMPQ x (MOVQconst [c])) 2907 // cond: is32Bit(c) 2908 // result: (CMPQconst x [c]) 2909 for { 2910 x := v.Args[0] 2911 v_1 := v.Args[1] 2912 if v_1.Op != OpAMD64MOVQconst { 2913 break 2914 } 2915 c := v_1.AuxInt 2916 if !(is32Bit(c)) { 2917 break 2918 } 2919 v.reset(OpAMD64CMPQconst) 2920 v.AddArg(x) 2921 v.AuxInt = c 2922 return true 2923 } 2924 // match: (CMPQ (MOVQconst [c]) x) 2925 // cond: is32Bit(c) 2926 // result: (InvertFlags (CMPQconst x [c])) 2927 for { 2928 v_0 := v.Args[0] 2929 if v_0.Op != OpAMD64MOVQconst { 2930 break 2931 } 2932 c := v_0.AuxInt 2933 x := v.Args[1] 2934 if !(is32Bit(c)) { 2935 break 2936 } 2937 v.reset(OpAMD64InvertFlags) 2938 v0 := b.NewValue0(v.Line, OpAMD64CMPQconst, TypeFlags) 2939 v0.AddArg(x) 2940 v0.AuxInt = c 2941 v.AddArg(v0) 2942 return true 2943 } 2944 return false 2945 } 2946 func rewriteValueAMD64_OpAMD64CMPQconst(v *Value, config *Config) bool { 2947 b := v.Block 2948 _ = b 2949 // match: (CMPQconst (MOVQconst [x]) [y]) 2950 // cond: x==y 2951 // result: (FlagEQ) 2952 for { 2953 v_0 := v.Args[0] 2954 if v_0.Op != OpAMD64MOVQconst { 2955 break 2956 } 2957 x := v_0.AuxInt 2958 y := v.AuxInt 2959 if !(x == y) { 2960 break 2961 } 2962 v.reset(OpAMD64FlagEQ) 2963 return true 2964 } 2965 // match: (CMPQconst (MOVQconst [x]) [y]) 2966 // cond: x<y && uint64(x)<uint64(y) 2967 // result: (FlagLT_ULT) 2968 for { 2969 v_0 := v.Args[0] 2970 if v_0.Op != OpAMD64MOVQconst { 2971 break 2972 } 2973 x := v_0.AuxInt 2974 y := v.AuxInt 2975 if !(x < y && uint64(x) < uint64(y)) { 2976 break 2977 } 2978 v.reset(OpAMD64FlagLT_ULT) 2979 return true 2980 } 2981 // match: (CMPQconst (MOVQconst [x]) [y]) 2982 // cond: x<y && uint64(x)>uint64(y) 2983 // result: (FlagLT_UGT) 2984 for { 2985 v_0 := v.Args[0] 2986 if v_0.Op != OpAMD64MOVQconst { 2987 break 2988 } 2989 x := v_0.AuxInt 2990 y := v.AuxInt 2991 if !(x < y && uint64(x) > uint64(y)) { 2992 break 2993 } 2994 v.reset(OpAMD64FlagLT_UGT) 2995 return true 2996 } 2997 // match: (CMPQconst (MOVQconst [x]) [y]) 2998 // cond: x>y && uint64(x)<uint64(y) 2999 // result: (FlagGT_ULT) 3000 for { 3001 v_0 := v.Args[0] 3002 if v_0.Op != OpAMD64MOVQconst { 3003 break 3004 } 3005 x := v_0.AuxInt 3006 y := v.AuxInt 3007 if !(x > y && uint64(x) < uint64(y)) { 3008 break 3009 } 3010 v.reset(OpAMD64FlagGT_ULT) 3011 return true 3012 } 3013 // match: (CMPQconst (MOVQconst [x]) [y]) 3014 // cond: x>y && uint64(x)>uint64(y) 3015 // result: (FlagGT_UGT) 3016 for { 3017 v_0 := v.Args[0] 3018 if v_0.Op != OpAMD64MOVQconst { 3019 break 3020 } 3021 x := v_0.AuxInt 3022 y := v.AuxInt 3023 if !(x > y && uint64(x) > uint64(y)) { 3024 break 3025 } 3026 v.reset(OpAMD64FlagGT_UGT) 3027 return true 3028 } 3029 // match: (CMPQconst (ANDQconst _ [m]) [n]) 3030 // cond: 0 <= m && m < n 3031 // result: (FlagLT_ULT) 3032 for { 3033 v_0 := v.Args[0] 3034 if v_0.Op != OpAMD64ANDQconst { 3035 break 3036 } 3037 m := v_0.AuxInt 3038 n := v.AuxInt 3039 if !(0 <= m && m < n) { 3040 break 3041 } 3042 v.reset(OpAMD64FlagLT_ULT) 3043 return true 3044 } 3045 // match: (CMPQconst (ANDQ x y) [0]) 3046 // cond: 3047 // result: (TESTQ x y) 3048 for { 3049 v_0 := v.Args[0] 3050 if v_0.Op != OpAMD64ANDQ { 3051 break 3052 } 3053 x := v_0.Args[0] 3054 y := v_0.Args[1] 3055 if v.AuxInt != 0 { 3056 break 3057 } 3058 v.reset(OpAMD64TESTQ) 3059 v.AddArg(x) 3060 v.AddArg(y) 3061 return true 3062 } 3063 // match: (CMPQconst (ANDQconst [c] x) [0]) 3064 // cond: 3065 // result: (TESTQconst [c] x) 3066 for { 3067 v_0 := v.Args[0] 3068 if v_0.Op != OpAMD64ANDQconst { 3069 break 3070 } 3071 c := v_0.AuxInt 3072 x := v_0.Args[0] 3073 if v.AuxInt != 0 { 3074 break 3075 } 3076 v.reset(OpAMD64TESTQconst) 3077 v.AuxInt = c 3078 v.AddArg(x) 3079 return true 3080 } 3081 // match: (CMPQconst x [0]) 3082 // cond: 3083 // result: (TESTQ x x) 3084 for { 3085 x := v.Args[0] 3086 if v.AuxInt != 0 { 3087 break 3088 } 3089 v.reset(OpAMD64TESTQ) 3090 v.AddArg(x) 3091 v.AddArg(x) 3092 return true 3093 } 3094 return false 3095 } 3096 func rewriteValueAMD64_OpAMD64CMPW(v *Value, config *Config) bool { 3097 b := v.Block 3098 _ = b 3099 // match: (CMPW x (MOVWconst [c])) 3100 // cond: 3101 // result: (CMPWconst x [c]) 3102 for { 3103 x := v.Args[0] 3104 v_1 := v.Args[1] 3105 if v_1.Op != OpAMD64MOVWconst { 3106 break 3107 } 3108 c := v_1.AuxInt 3109 v.reset(OpAMD64CMPWconst) 3110 v.AddArg(x) 3111 v.AuxInt = c 3112 return true 3113 } 3114 // match: (CMPW (MOVWconst [c]) x) 3115 // cond: 3116 // result: (InvertFlags (CMPWconst x [c])) 3117 for { 3118 v_0 := v.Args[0] 3119 if v_0.Op != OpAMD64MOVWconst { 3120 break 3121 } 3122 c := v_0.AuxInt 3123 x := v.Args[1] 3124 v.reset(OpAMD64InvertFlags) 3125 v0 := b.NewValue0(v.Line, OpAMD64CMPWconst, TypeFlags) 3126 v0.AddArg(x) 3127 v0.AuxInt = c 3128 v.AddArg(v0) 3129 return true 3130 } 3131 return false 3132 } 3133 func rewriteValueAMD64_OpAMD64CMPWconst(v *Value, config *Config) bool { 3134 b := v.Block 3135 _ = b 3136 // match: (CMPWconst (MOVWconst [x]) [y]) 3137 // cond: int16(x)==int16(y) 3138 // result: (FlagEQ) 3139 for { 3140 v_0 := v.Args[0] 3141 if v_0.Op != OpAMD64MOVWconst { 3142 break 3143 } 3144 x := v_0.AuxInt 3145 y := v.AuxInt 3146 if !(int16(x) == int16(y)) { 3147 break 3148 } 3149 v.reset(OpAMD64FlagEQ) 3150 return true 3151 } 3152 // match: (CMPWconst (MOVWconst [x]) [y]) 3153 // cond: int16(x)<int16(y) && uint16(x)<uint16(y) 3154 // result: (FlagLT_ULT) 3155 for { 3156 v_0 := v.Args[0] 3157 if v_0.Op != OpAMD64MOVWconst { 3158 break 3159 } 3160 x := v_0.AuxInt 3161 y := v.AuxInt 3162 if !(int16(x) < int16(y) && uint16(x) < uint16(y)) { 3163 break 3164 } 3165 v.reset(OpAMD64FlagLT_ULT) 3166 return true 3167 } 3168 // match: (CMPWconst (MOVWconst [x]) [y]) 3169 // cond: int16(x)<int16(y) && uint16(x)>uint16(y) 3170 // result: (FlagLT_UGT) 3171 for { 3172 v_0 := v.Args[0] 3173 if v_0.Op != OpAMD64MOVWconst { 3174 break 3175 } 3176 x := v_0.AuxInt 3177 y := v.AuxInt 3178 if !(int16(x) < int16(y) && uint16(x) > uint16(y)) { 3179 break 3180 } 3181 v.reset(OpAMD64FlagLT_UGT) 3182 return true 3183 } 3184 // match: (CMPWconst (MOVWconst [x]) [y]) 3185 // cond: int16(x)>int16(y) && uint16(x)<uint16(y) 3186 // result: (FlagGT_ULT) 3187 for { 3188 v_0 := v.Args[0] 3189 if v_0.Op != OpAMD64MOVWconst { 3190 break 3191 } 3192 x := v_0.AuxInt 3193 y := v.AuxInt 3194 if !(int16(x) > int16(y) && uint16(x) < uint16(y)) { 3195 break 3196 } 3197 v.reset(OpAMD64FlagGT_ULT) 3198 return true 3199 } 3200 // match: (CMPWconst (MOVWconst [x]) [y]) 3201 // cond: int16(x)>int16(y) && uint16(x)>uint16(y) 3202 // result: (FlagGT_UGT) 3203 for { 3204 v_0 := v.Args[0] 3205 if v_0.Op != OpAMD64MOVWconst { 3206 break 3207 } 3208 x := v_0.AuxInt 3209 y := v.AuxInt 3210 if !(int16(x) > int16(y) && uint16(x) > uint16(y)) { 3211 break 3212 } 3213 v.reset(OpAMD64FlagGT_UGT) 3214 return true 3215 } 3216 // match: (CMPWconst (ANDWconst _ [m]) [n]) 3217 // cond: 0 <= int16(m) && int16(m) < int16(n) 3218 // result: (FlagLT_ULT) 3219 for { 3220 v_0 := v.Args[0] 3221 if v_0.Op != OpAMD64ANDWconst { 3222 break 3223 } 3224 m := v_0.AuxInt 3225 n := v.AuxInt 3226 if !(0 <= int16(m) && int16(m) < int16(n)) { 3227 break 3228 } 3229 v.reset(OpAMD64FlagLT_ULT) 3230 return true 3231 } 3232 // match: (CMPWconst (ANDW x y) [0]) 3233 // cond: 3234 // result: (TESTW x y) 3235 for { 3236 v_0 := v.Args[0] 3237 if v_0.Op != OpAMD64ANDW { 3238 break 3239 } 3240 x := v_0.Args[0] 3241 y := v_0.Args[1] 3242 if v.AuxInt != 0 { 3243 break 3244 } 3245 v.reset(OpAMD64TESTW) 3246 v.AddArg(x) 3247 v.AddArg(y) 3248 return true 3249 } 3250 // match: (CMPWconst (ANDWconst [c] x) [0]) 3251 // cond: 3252 // result: (TESTWconst [c] x) 3253 for { 3254 v_0 := v.Args[0] 3255 if v_0.Op != OpAMD64ANDWconst { 3256 break 3257 } 3258 c := v_0.AuxInt 3259 x := v_0.Args[0] 3260 if v.AuxInt != 0 { 3261 break 3262 } 3263 v.reset(OpAMD64TESTWconst) 3264 v.AuxInt = c 3265 v.AddArg(x) 3266 return true 3267 } 3268 // match: (CMPWconst x [0]) 3269 // cond: 3270 // result: (TESTW x x) 3271 for { 3272 x := v.Args[0] 3273 if v.AuxInt != 0 { 3274 break 3275 } 3276 v.reset(OpAMD64TESTW) 3277 v.AddArg(x) 3278 v.AddArg(x) 3279 return true 3280 } 3281 return false 3282 } 3283 func rewriteValueAMD64_OpClosureCall(v *Value, config *Config) bool { 3284 b := v.Block 3285 _ = b 3286 // match: (ClosureCall [argwid] entry closure mem) 3287 // cond: 3288 // result: (CALLclosure [argwid] entry closure mem) 3289 for { 3290 argwid := v.AuxInt 3291 entry := v.Args[0] 3292 closure := v.Args[1] 3293 mem := v.Args[2] 3294 v.reset(OpAMD64CALLclosure) 3295 v.AuxInt = argwid 3296 v.AddArg(entry) 3297 v.AddArg(closure) 3298 v.AddArg(mem) 3299 return true 3300 } 3301 return false 3302 } 3303 func rewriteValueAMD64_OpCom16(v *Value, config *Config) bool { 3304 b := v.Block 3305 _ = b 3306 // match: (Com16 x) 3307 // cond: 3308 // result: (NOTW x) 3309 for { 3310 x := v.Args[0] 3311 v.reset(OpAMD64NOTW) 3312 v.AddArg(x) 3313 return true 3314 } 3315 return false 3316 } 3317 func rewriteValueAMD64_OpCom32(v *Value, config *Config) bool { 3318 b := v.Block 3319 _ = b 3320 // match: (Com32 x) 3321 // cond: 3322 // result: (NOTL x) 3323 for { 3324 x := v.Args[0] 3325 v.reset(OpAMD64NOTL) 3326 v.AddArg(x) 3327 return true 3328 } 3329 return false 3330 } 3331 func rewriteValueAMD64_OpCom64(v *Value, config *Config) bool { 3332 b := v.Block 3333 _ = b 3334 // match: (Com64 x) 3335 // cond: 3336 // result: (NOTQ x) 3337 for { 3338 x := v.Args[0] 3339 v.reset(OpAMD64NOTQ) 3340 v.AddArg(x) 3341 return true 3342 } 3343 return false 3344 } 3345 func rewriteValueAMD64_OpCom8(v *Value, config *Config) bool { 3346 b := v.Block 3347 _ = b 3348 // match: (Com8 x) 3349 // cond: 3350 // result: (NOTB x) 3351 for { 3352 x := v.Args[0] 3353 v.reset(OpAMD64NOTB) 3354 v.AddArg(x) 3355 return true 3356 } 3357 return false 3358 } 3359 func rewriteValueAMD64_OpConst16(v *Value, config *Config) bool { 3360 b := v.Block 3361 _ = b 3362 // match: (Const16 [val]) 3363 // cond: 3364 // result: (MOVWconst [val]) 3365 for { 3366 val := v.AuxInt 3367 v.reset(OpAMD64MOVWconst) 3368 v.AuxInt = val 3369 return true 3370 } 3371 return false 3372 } 3373 func rewriteValueAMD64_OpConst32(v *Value, config *Config) bool { 3374 b := v.Block 3375 _ = b 3376 // match: (Const32 [val]) 3377 // cond: 3378 // result: (MOVLconst [val]) 3379 for { 3380 val := v.AuxInt 3381 v.reset(OpAMD64MOVLconst) 3382 v.AuxInt = val 3383 return true 3384 } 3385 return false 3386 } 3387 func rewriteValueAMD64_OpConst32F(v *Value, config *Config) bool { 3388 b := v.Block 3389 _ = b 3390 // match: (Const32F [val]) 3391 // cond: 3392 // result: (MOVSSconst [val]) 3393 for { 3394 val := v.AuxInt 3395 v.reset(OpAMD64MOVSSconst) 3396 v.AuxInt = val 3397 return true 3398 } 3399 return false 3400 } 3401 func rewriteValueAMD64_OpConst64(v *Value, config *Config) bool { 3402 b := v.Block 3403 _ = b 3404 // match: (Const64 [val]) 3405 // cond: 3406 // result: (MOVQconst [val]) 3407 for { 3408 val := v.AuxInt 3409 v.reset(OpAMD64MOVQconst) 3410 v.AuxInt = val 3411 return true 3412 } 3413 return false 3414 } 3415 func rewriteValueAMD64_OpConst64F(v *Value, config *Config) bool { 3416 b := v.Block 3417 _ = b 3418 // match: (Const64F [val]) 3419 // cond: 3420 // result: (MOVSDconst [val]) 3421 for { 3422 val := v.AuxInt 3423 v.reset(OpAMD64MOVSDconst) 3424 v.AuxInt = val 3425 return true 3426 } 3427 return false 3428 } 3429 func rewriteValueAMD64_OpConst8(v *Value, config *Config) bool { 3430 b := v.Block 3431 _ = b 3432 // match: (Const8 [val]) 3433 // cond: 3434 // result: (MOVBconst [val]) 3435 for { 3436 val := v.AuxInt 3437 v.reset(OpAMD64MOVBconst) 3438 v.AuxInt = val 3439 return true 3440 } 3441 return false 3442 } 3443 func rewriteValueAMD64_OpConstBool(v *Value, config *Config) bool { 3444 b := v.Block 3445 _ = b 3446 // match: (ConstBool [b]) 3447 // cond: 3448 // result: (MOVBconst [b]) 3449 for { 3450 b := v.AuxInt 3451 v.reset(OpAMD64MOVBconst) 3452 v.AuxInt = b 3453 return true 3454 } 3455 return false 3456 } 3457 func rewriteValueAMD64_OpConstNil(v *Value, config *Config) bool { 3458 b := v.Block 3459 _ = b 3460 // match: (ConstNil) 3461 // cond: 3462 // result: (MOVQconst [0]) 3463 for { 3464 v.reset(OpAMD64MOVQconst) 3465 v.AuxInt = 0 3466 return true 3467 } 3468 return false 3469 } 3470 func rewriteValueAMD64_OpConvert(v *Value, config *Config) bool { 3471 b := v.Block 3472 _ = b 3473 // match: (Convert <t> x mem) 3474 // cond: 3475 // result: (MOVQconvert <t> x mem) 3476 for { 3477 t := v.Type 3478 x := v.Args[0] 3479 mem := v.Args[1] 3480 v.reset(OpAMD64MOVQconvert) 3481 v.Type = t 3482 v.AddArg(x) 3483 v.AddArg(mem) 3484 return true 3485 } 3486 return false 3487 } 3488 func rewriteValueAMD64_OpCtz16(v *Value, config *Config) bool { 3489 b := v.Block 3490 _ = b 3491 // match: (Ctz16 <t> x) 3492 // cond: 3493 // result: (CMOVWEQconst (BSFW <t> x) (CMPWconst x [0]) [16]) 3494 for { 3495 t := v.Type 3496 x := v.Args[0] 3497 v.reset(OpAMD64CMOVWEQconst) 3498 v0 := b.NewValue0(v.Line, OpAMD64BSFW, t) 3499 v0.AddArg(x) 3500 v.AddArg(v0) 3501 v1 := b.NewValue0(v.Line, OpAMD64CMPWconst, TypeFlags) 3502 v1.AddArg(x) 3503 v1.AuxInt = 0 3504 v.AddArg(v1) 3505 v.AuxInt = 16 3506 return true 3507 } 3508 return false 3509 } 3510 func rewriteValueAMD64_OpCtz32(v *Value, config *Config) bool { 3511 b := v.Block 3512 _ = b 3513 // match: (Ctz32 <t> x) 3514 // cond: 3515 // result: (CMOVLEQconst (BSFL <t> x) (CMPLconst x [0]) [32]) 3516 for { 3517 t := v.Type 3518 x := v.Args[0] 3519 v.reset(OpAMD64CMOVLEQconst) 3520 v0 := b.NewValue0(v.Line, OpAMD64BSFL, t) 3521 v0.AddArg(x) 3522 v.AddArg(v0) 3523 v1 := b.NewValue0(v.Line, OpAMD64CMPLconst, TypeFlags) 3524 v1.AddArg(x) 3525 v1.AuxInt = 0 3526 v.AddArg(v1) 3527 v.AuxInt = 32 3528 return true 3529 } 3530 return false 3531 } 3532 func rewriteValueAMD64_OpCtz64(v *Value, config *Config) bool { 3533 b := v.Block 3534 _ = b 3535 // match: (Ctz64 <t> x) 3536 // cond: 3537 // result: (CMOVQEQconst (BSFQ <t> x) (CMPQconst x [0]) [64]) 3538 for { 3539 t := v.Type 3540 x := v.Args[0] 3541 v.reset(OpAMD64CMOVQEQconst) 3542 v0 := b.NewValue0(v.Line, OpAMD64BSFQ, t) 3543 v0.AddArg(x) 3544 v.AddArg(v0) 3545 v1 := b.NewValue0(v.Line, OpAMD64CMPQconst, TypeFlags) 3546 v1.AddArg(x) 3547 v1.AuxInt = 0 3548 v.AddArg(v1) 3549 v.AuxInt = 64 3550 return true 3551 } 3552 return false 3553 } 3554 func rewriteValueAMD64_OpCvt32Fto32(v *Value, config *Config) bool { 3555 b := v.Block 3556 _ = b 3557 // match: (Cvt32Fto32 x) 3558 // cond: 3559 // result: (CVTTSS2SL x) 3560 for { 3561 x := v.Args[0] 3562 v.reset(OpAMD64CVTTSS2SL) 3563 v.AddArg(x) 3564 return true 3565 } 3566 return false 3567 } 3568 func rewriteValueAMD64_OpCvt32Fto64(v *Value, config *Config) bool { 3569 b := v.Block 3570 _ = b 3571 // match: (Cvt32Fto64 x) 3572 // cond: 3573 // result: (CVTTSS2SQ x) 3574 for { 3575 x := v.Args[0] 3576 v.reset(OpAMD64CVTTSS2SQ) 3577 v.AddArg(x) 3578 return true 3579 } 3580 return false 3581 } 3582 func rewriteValueAMD64_OpCvt32Fto64F(v *Value, config *Config) bool { 3583 b := v.Block 3584 _ = b 3585 // match: (Cvt32Fto64F x) 3586 // cond: 3587 // result: (CVTSS2SD x) 3588 for { 3589 x := v.Args[0] 3590 v.reset(OpAMD64CVTSS2SD) 3591 v.AddArg(x) 3592 return true 3593 } 3594 return false 3595 } 3596 func rewriteValueAMD64_OpCvt32to32F(v *Value, config *Config) bool { 3597 b := v.Block 3598 _ = b 3599 // match: (Cvt32to32F x) 3600 // cond: 3601 // result: (CVTSL2SS x) 3602 for { 3603 x := v.Args[0] 3604 v.reset(OpAMD64CVTSL2SS) 3605 v.AddArg(x) 3606 return true 3607 } 3608 return false 3609 } 3610 func rewriteValueAMD64_OpCvt32to64F(v *Value, config *Config) bool { 3611 b := v.Block 3612 _ = b 3613 // match: (Cvt32to64F x) 3614 // cond: 3615 // result: (CVTSL2SD x) 3616 for { 3617 x := v.Args[0] 3618 v.reset(OpAMD64CVTSL2SD) 3619 v.AddArg(x) 3620 return true 3621 } 3622 return false 3623 } 3624 func rewriteValueAMD64_OpCvt64Fto32(v *Value, config *Config) bool { 3625 b := v.Block 3626 _ = b 3627 // match: (Cvt64Fto32 x) 3628 // cond: 3629 // result: (CVTTSD2SL x) 3630 for { 3631 x := v.Args[0] 3632 v.reset(OpAMD64CVTTSD2SL) 3633 v.AddArg(x) 3634 return true 3635 } 3636 return false 3637 } 3638 func rewriteValueAMD64_OpCvt64Fto32F(v *Value, config *Config) bool { 3639 b := v.Block 3640 _ = b 3641 // match: (Cvt64Fto32F x) 3642 // cond: 3643 // result: (CVTSD2SS x) 3644 for { 3645 x := v.Args[0] 3646 v.reset(OpAMD64CVTSD2SS) 3647 v.AddArg(x) 3648 return true 3649 } 3650 return false 3651 } 3652 func rewriteValueAMD64_OpCvt64Fto64(v *Value, config *Config) bool { 3653 b := v.Block 3654 _ = b 3655 // match: (Cvt64Fto64 x) 3656 // cond: 3657 // result: (CVTTSD2SQ x) 3658 for { 3659 x := v.Args[0] 3660 v.reset(OpAMD64CVTTSD2SQ) 3661 v.AddArg(x) 3662 return true 3663 } 3664 return false 3665 } 3666 func rewriteValueAMD64_OpCvt64to32F(v *Value, config *Config) bool { 3667 b := v.Block 3668 _ = b 3669 // match: (Cvt64to32F x) 3670 // cond: 3671 // result: (CVTSQ2SS x) 3672 for { 3673 x := v.Args[0] 3674 v.reset(OpAMD64CVTSQ2SS) 3675 v.AddArg(x) 3676 return true 3677 } 3678 return false 3679 } 3680 func rewriteValueAMD64_OpCvt64to64F(v *Value, config *Config) bool { 3681 b := v.Block 3682 _ = b 3683 // match: (Cvt64to64F x) 3684 // cond: 3685 // result: (CVTSQ2SD x) 3686 for { 3687 x := v.Args[0] 3688 v.reset(OpAMD64CVTSQ2SD) 3689 v.AddArg(x) 3690 return true 3691 } 3692 return false 3693 } 3694 func rewriteValueAMD64_OpDeferCall(v *Value, config *Config) bool { 3695 b := v.Block 3696 _ = b 3697 // match: (DeferCall [argwid] mem) 3698 // cond: 3699 // result: (CALLdefer [argwid] mem) 3700 for { 3701 argwid := v.AuxInt 3702 mem := v.Args[0] 3703 v.reset(OpAMD64CALLdefer) 3704 v.AuxInt = argwid 3705 v.AddArg(mem) 3706 return true 3707 } 3708 return false 3709 } 3710 func rewriteValueAMD64_OpDiv16(v *Value, config *Config) bool { 3711 b := v.Block 3712 _ = b 3713 // match: (Div16 x y) 3714 // cond: 3715 // result: (DIVW x y) 3716 for { 3717 x := v.Args[0] 3718 y := v.Args[1] 3719 v.reset(OpAMD64DIVW) 3720 v.AddArg(x) 3721 v.AddArg(y) 3722 return true 3723 } 3724 return false 3725 } 3726 func rewriteValueAMD64_OpDiv16u(v *Value, config *Config) bool { 3727 b := v.Block 3728 _ = b 3729 // match: (Div16u x y) 3730 // cond: 3731 // result: (DIVWU x y) 3732 for { 3733 x := v.Args[0] 3734 y := v.Args[1] 3735 v.reset(OpAMD64DIVWU) 3736 v.AddArg(x) 3737 v.AddArg(y) 3738 return true 3739 } 3740 return false 3741 } 3742 func rewriteValueAMD64_OpDiv32(v *Value, config *Config) bool { 3743 b := v.Block 3744 _ = b 3745 // match: (Div32 x y) 3746 // cond: 3747 // result: (DIVL x y) 3748 for { 3749 x := v.Args[0] 3750 y := v.Args[1] 3751 v.reset(OpAMD64DIVL) 3752 v.AddArg(x) 3753 v.AddArg(y) 3754 return true 3755 } 3756 return false 3757 } 3758 func rewriteValueAMD64_OpDiv32F(v *Value, config *Config) bool { 3759 b := v.Block 3760 _ = b 3761 // match: (Div32F x y) 3762 // cond: 3763 // result: (DIVSS x y) 3764 for { 3765 x := v.Args[0] 3766 y := v.Args[1] 3767 v.reset(OpAMD64DIVSS) 3768 v.AddArg(x) 3769 v.AddArg(y) 3770 return true 3771 } 3772 return false 3773 } 3774 func rewriteValueAMD64_OpDiv32u(v *Value, config *Config) bool { 3775 b := v.Block 3776 _ = b 3777 // match: (Div32u x y) 3778 // cond: 3779 // result: (DIVLU x y) 3780 for { 3781 x := v.Args[0] 3782 y := v.Args[1] 3783 v.reset(OpAMD64DIVLU) 3784 v.AddArg(x) 3785 v.AddArg(y) 3786 return true 3787 } 3788 return false 3789 } 3790 func rewriteValueAMD64_OpDiv64(v *Value, config *Config) bool { 3791 b := v.Block 3792 _ = b 3793 // match: (Div64 x y) 3794 // cond: 3795 // result: (DIVQ x y) 3796 for { 3797 x := v.Args[0] 3798 y := v.Args[1] 3799 v.reset(OpAMD64DIVQ) 3800 v.AddArg(x) 3801 v.AddArg(y) 3802 return true 3803 } 3804 return false 3805 } 3806 func rewriteValueAMD64_OpDiv64F(v *Value, config *Config) bool { 3807 b := v.Block 3808 _ = b 3809 // match: (Div64F x y) 3810 // cond: 3811 // result: (DIVSD x y) 3812 for { 3813 x := v.Args[0] 3814 y := v.Args[1] 3815 v.reset(OpAMD64DIVSD) 3816 v.AddArg(x) 3817 v.AddArg(y) 3818 return true 3819 } 3820 return false 3821 } 3822 func rewriteValueAMD64_OpDiv64u(v *Value, config *Config) bool { 3823 b := v.Block 3824 _ = b 3825 // match: (Div64u x y) 3826 // cond: 3827 // result: (DIVQU x y) 3828 for { 3829 x := v.Args[0] 3830 y := v.Args[1] 3831 v.reset(OpAMD64DIVQU) 3832 v.AddArg(x) 3833 v.AddArg(y) 3834 return true 3835 } 3836 return false 3837 } 3838 func rewriteValueAMD64_OpDiv8(v *Value, config *Config) bool { 3839 b := v.Block 3840 _ = b 3841 // match: (Div8 x y) 3842 // cond: 3843 // result: (DIVW (SignExt8to16 x) (SignExt8to16 y)) 3844 for { 3845 x := v.Args[0] 3846 y := v.Args[1] 3847 v.reset(OpAMD64DIVW) 3848 v0 := b.NewValue0(v.Line, OpSignExt8to16, config.fe.TypeInt16()) 3849 v0.AddArg(x) 3850 v.AddArg(v0) 3851 v1 := b.NewValue0(v.Line, OpSignExt8to16, config.fe.TypeInt16()) 3852 v1.AddArg(y) 3853 v.AddArg(v1) 3854 return true 3855 } 3856 return false 3857 } 3858 func rewriteValueAMD64_OpDiv8u(v *Value, config *Config) bool { 3859 b := v.Block 3860 _ = b 3861 // match: (Div8u x y) 3862 // cond: 3863 // result: (DIVWU (ZeroExt8to16 x) (ZeroExt8to16 y)) 3864 for { 3865 x := v.Args[0] 3866 y := v.Args[1] 3867 v.reset(OpAMD64DIVWU) 3868 v0 := b.NewValue0(v.Line, OpZeroExt8to16, config.fe.TypeUInt16()) 3869 v0.AddArg(x) 3870 v.AddArg(v0) 3871 v1 := b.NewValue0(v.Line, OpZeroExt8to16, config.fe.TypeUInt16()) 3872 v1.AddArg(y) 3873 v.AddArg(v1) 3874 return true 3875 } 3876 return false 3877 } 3878 func rewriteValueAMD64_OpEq16(v *Value, config *Config) bool { 3879 b := v.Block 3880 _ = b 3881 // match: (Eq16 x y) 3882 // cond: 3883 // result: (SETEQ (CMPW x y)) 3884 for { 3885 x := v.Args[0] 3886 y := v.Args[1] 3887 v.reset(OpAMD64SETEQ) 3888 v0 := b.NewValue0(v.Line, OpAMD64CMPW, TypeFlags) 3889 v0.AddArg(x) 3890 v0.AddArg(y) 3891 v.AddArg(v0) 3892 return true 3893 } 3894 return false 3895 } 3896 func rewriteValueAMD64_OpEq32(v *Value, config *Config) bool { 3897 b := v.Block 3898 _ = b 3899 // match: (Eq32 x y) 3900 // cond: 3901 // result: (SETEQ (CMPL x y)) 3902 for { 3903 x := v.Args[0] 3904 y := v.Args[1] 3905 v.reset(OpAMD64SETEQ) 3906 v0 := b.NewValue0(v.Line, OpAMD64CMPL, TypeFlags) 3907 v0.AddArg(x) 3908 v0.AddArg(y) 3909 v.AddArg(v0) 3910 return true 3911 } 3912 return false 3913 } 3914 func rewriteValueAMD64_OpEq32F(v *Value, config *Config) bool { 3915 b := v.Block 3916 _ = b 3917 // match: (Eq32F x y) 3918 // cond: 3919 // result: (SETEQF (UCOMISS x y)) 3920 for { 3921 x := v.Args[0] 3922 y := v.Args[1] 3923 v.reset(OpAMD64SETEQF) 3924 v0 := b.NewValue0(v.Line, OpAMD64UCOMISS, TypeFlags) 3925 v0.AddArg(x) 3926 v0.AddArg(y) 3927 v.AddArg(v0) 3928 return true 3929 } 3930 return false 3931 } 3932 func rewriteValueAMD64_OpEq64(v *Value, config *Config) bool { 3933 b := v.Block 3934 _ = b 3935 // match: (Eq64 x y) 3936 // cond: 3937 // result: (SETEQ (CMPQ x y)) 3938 for { 3939 x := v.Args[0] 3940 y := v.Args[1] 3941 v.reset(OpAMD64SETEQ) 3942 v0 := b.NewValue0(v.Line, OpAMD64CMPQ, TypeFlags) 3943 v0.AddArg(x) 3944 v0.AddArg(y) 3945 v.AddArg(v0) 3946 return true 3947 } 3948 return false 3949 } 3950 func rewriteValueAMD64_OpEq64F(v *Value, config *Config) bool { 3951 b := v.Block 3952 _ = b 3953 // match: (Eq64F x y) 3954 // cond: 3955 // result: (SETEQF (UCOMISD x y)) 3956 for { 3957 x := v.Args[0] 3958 y := v.Args[1] 3959 v.reset(OpAMD64SETEQF) 3960 v0 := b.NewValue0(v.Line, OpAMD64UCOMISD, TypeFlags) 3961 v0.AddArg(x) 3962 v0.AddArg(y) 3963 v.AddArg(v0) 3964 return true 3965 } 3966 return false 3967 } 3968 func rewriteValueAMD64_OpEq8(v *Value, config *Config) bool { 3969 b := v.Block 3970 _ = b 3971 // match: (Eq8 x y) 3972 // cond: 3973 // result: (SETEQ (CMPB x y)) 3974 for { 3975 x := v.Args[0] 3976 y := v.Args[1] 3977 v.reset(OpAMD64SETEQ) 3978 v0 := b.NewValue0(v.Line, OpAMD64CMPB, TypeFlags) 3979 v0.AddArg(x) 3980 v0.AddArg(y) 3981 v.AddArg(v0) 3982 return true 3983 } 3984 return false 3985 } 3986 func rewriteValueAMD64_OpEqPtr(v *Value, config *Config) bool { 3987 b := v.Block 3988 _ = b 3989 // match: (EqPtr x y) 3990 // cond: 3991 // result: (SETEQ (CMPQ x y)) 3992 for { 3993 x := v.Args[0] 3994 y := v.Args[1] 3995 v.reset(OpAMD64SETEQ) 3996 v0 := b.NewValue0(v.Line, OpAMD64CMPQ, TypeFlags) 3997 v0.AddArg(x) 3998 v0.AddArg(y) 3999 v.AddArg(v0) 4000 return true 4001 } 4002 return false 4003 } 4004 func rewriteValueAMD64_OpGeq16(v *Value, config *Config) bool { 4005 b := v.Block 4006 _ = b 4007 // match: (Geq16 x y) 4008 // cond: 4009 // result: (SETGE (CMPW x y)) 4010 for { 4011 x := v.Args[0] 4012 y := v.Args[1] 4013 v.reset(OpAMD64SETGE) 4014 v0 := b.NewValue0(v.Line, OpAMD64CMPW, TypeFlags) 4015 v0.AddArg(x) 4016 v0.AddArg(y) 4017 v.AddArg(v0) 4018 return true 4019 } 4020 return false 4021 } 4022 func rewriteValueAMD64_OpGeq16U(v *Value, config *Config) bool { 4023 b := v.Block 4024 _ = b 4025 // match: (Geq16U x y) 4026 // cond: 4027 // result: (SETAE (CMPW x y)) 4028 for { 4029 x := v.Args[0] 4030 y := v.Args[1] 4031 v.reset(OpAMD64SETAE) 4032 v0 := b.NewValue0(v.Line, OpAMD64CMPW, TypeFlags) 4033 v0.AddArg(x) 4034 v0.AddArg(y) 4035 v.AddArg(v0) 4036 return true 4037 } 4038 return false 4039 } 4040 func rewriteValueAMD64_OpGeq32(v *Value, config *Config) bool { 4041 b := v.Block 4042 _ = b 4043 // match: (Geq32 x y) 4044 // cond: 4045 // result: (SETGE (CMPL x y)) 4046 for { 4047 x := v.Args[0] 4048 y := v.Args[1] 4049 v.reset(OpAMD64SETGE) 4050 v0 := b.NewValue0(v.Line, OpAMD64CMPL, TypeFlags) 4051 v0.AddArg(x) 4052 v0.AddArg(y) 4053 v.AddArg(v0) 4054 return true 4055 } 4056 return false 4057 } 4058 func rewriteValueAMD64_OpGeq32F(v *Value, config *Config) bool { 4059 b := v.Block 4060 _ = b 4061 // match: (Geq32F x y) 4062 // cond: 4063 // result: (SETGEF (UCOMISS x y)) 4064 for { 4065 x := v.Args[0] 4066 y := v.Args[1] 4067 v.reset(OpAMD64SETGEF) 4068 v0 := b.NewValue0(v.Line, OpAMD64UCOMISS, TypeFlags) 4069 v0.AddArg(x) 4070 v0.AddArg(y) 4071 v.AddArg(v0) 4072 return true 4073 } 4074 return false 4075 } 4076 func rewriteValueAMD64_OpGeq32U(v *Value, config *Config) bool { 4077 b := v.Block 4078 _ = b 4079 // match: (Geq32U x y) 4080 // cond: 4081 // result: (SETAE (CMPL x y)) 4082 for { 4083 x := v.Args[0] 4084 y := v.Args[1] 4085 v.reset(OpAMD64SETAE) 4086 v0 := b.NewValue0(v.Line, OpAMD64CMPL, TypeFlags) 4087 v0.AddArg(x) 4088 v0.AddArg(y) 4089 v.AddArg(v0) 4090 return true 4091 } 4092 return false 4093 } 4094 func rewriteValueAMD64_OpGeq64(v *Value, config *Config) bool { 4095 b := v.Block 4096 _ = b 4097 // match: (Geq64 x y) 4098 // cond: 4099 // result: (SETGE (CMPQ x y)) 4100 for { 4101 x := v.Args[0] 4102 y := v.Args[1] 4103 v.reset(OpAMD64SETGE) 4104 v0 := b.NewValue0(v.Line, OpAMD64CMPQ, TypeFlags) 4105 v0.AddArg(x) 4106 v0.AddArg(y) 4107 v.AddArg(v0) 4108 return true 4109 } 4110 return false 4111 } 4112 func rewriteValueAMD64_OpGeq64F(v *Value, config *Config) bool { 4113 b := v.Block 4114 _ = b 4115 // match: (Geq64F x y) 4116 // cond: 4117 // result: (SETGEF (UCOMISD x y)) 4118 for { 4119 x := v.Args[0] 4120 y := v.Args[1] 4121 v.reset(OpAMD64SETGEF) 4122 v0 := b.NewValue0(v.Line, OpAMD64UCOMISD, TypeFlags) 4123 v0.AddArg(x) 4124 v0.AddArg(y) 4125 v.AddArg(v0) 4126 return true 4127 } 4128 return false 4129 } 4130 func rewriteValueAMD64_OpGeq64U(v *Value, config *Config) bool { 4131 b := v.Block 4132 _ = b 4133 // match: (Geq64U x y) 4134 // cond: 4135 // result: (SETAE (CMPQ x y)) 4136 for { 4137 x := v.Args[0] 4138 y := v.Args[1] 4139 v.reset(OpAMD64SETAE) 4140 v0 := b.NewValue0(v.Line, OpAMD64CMPQ, TypeFlags) 4141 v0.AddArg(x) 4142 v0.AddArg(y) 4143 v.AddArg(v0) 4144 return true 4145 } 4146 return false 4147 } 4148 func rewriteValueAMD64_OpGeq8(v *Value, config *Config) bool { 4149 b := v.Block 4150 _ = b 4151 // match: (Geq8 x y) 4152 // cond: 4153 // result: (SETGE (CMPB x y)) 4154 for { 4155 x := v.Args[0] 4156 y := v.Args[1] 4157 v.reset(OpAMD64SETGE) 4158 v0 := b.NewValue0(v.Line, OpAMD64CMPB, TypeFlags) 4159 v0.AddArg(x) 4160 v0.AddArg(y) 4161 v.AddArg(v0) 4162 return true 4163 } 4164 return false 4165 } 4166 func rewriteValueAMD64_OpGeq8U(v *Value, config *Config) bool { 4167 b := v.Block 4168 _ = b 4169 // match: (Geq8U x y) 4170 // cond: 4171 // result: (SETAE (CMPB x y)) 4172 for { 4173 x := v.Args[0] 4174 y := v.Args[1] 4175 v.reset(OpAMD64SETAE) 4176 v0 := b.NewValue0(v.Line, OpAMD64CMPB, TypeFlags) 4177 v0.AddArg(x) 4178 v0.AddArg(y) 4179 v.AddArg(v0) 4180 return true 4181 } 4182 return false 4183 } 4184 func rewriteValueAMD64_OpGetClosurePtr(v *Value, config *Config) bool { 4185 b := v.Block 4186 _ = b 4187 // match: (GetClosurePtr) 4188 // cond: 4189 // result: (LoweredGetClosurePtr) 4190 for { 4191 v.reset(OpAMD64LoweredGetClosurePtr) 4192 return true 4193 } 4194 return false 4195 } 4196 func rewriteValueAMD64_OpGetG(v *Value, config *Config) bool { 4197 b := v.Block 4198 _ = b 4199 // match: (GetG mem) 4200 // cond: 4201 // result: (LoweredGetG mem) 4202 for { 4203 mem := v.Args[0] 4204 v.reset(OpAMD64LoweredGetG) 4205 v.AddArg(mem) 4206 return true 4207 } 4208 return false 4209 } 4210 func rewriteValueAMD64_OpGoCall(v *Value, config *Config) bool { 4211 b := v.Block 4212 _ = b 4213 // match: (GoCall [argwid] mem) 4214 // cond: 4215 // result: (CALLgo [argwid] mem) 4216 for { 4217 argwid := v.AuxInt 4218 mem := v.Args[0] 4219 v.reset(OpAMD64CALLgo) 4220 v.AuxInt = argwid 4221 v.AddArg(mem) 4222 return true 4223 } 4224 return false 4225 } 4226 func rewriteValueAMD64_OpGreater16(v *Value, config *Config) bool { 4227 b := v.Block 4228 _ = b 4229 // match: (Greater16 x y) 4230 // cond: 4231 // result: (SETG (CMPW x y)) 4232 for { 4233 x := v.Args[0] 4234 y := v.Args[1] 4235 v.reset(OpAMD64SETG) 4236 v0 := b.NewValue0(v.Line, OpAMD64CMPW, TypeFlags) 4237 v0.AddArg(x) 4238 v0.AddArg(y) 4239 v.AddArg(v0) 4240 return true 4241 } 4242 return false 4243 } 4244 func rewriteValueAMD64_OpGreater16U(v *Value, config *Config) bool { 4245 b := v.Block 4246 _ = b 4247 // match: (Greater16U x y) 4248 // cond: 4249 // result: (SETA (CMPW x y)) 4250 for { 4251 x := v.Args[0] 4252 y := v.Args[1] 4253 v.reset(OpAMD64SETA) 4254 v0 := b.NewValue0(v.Line, OpAMD64CMPW, TypeFlags) 4255 v0.AddArg(x) 4256 v0.AddArg(y) 4257 v.AddArg(v0) 4258 return true 4259 } 4260 return false 4261 } 4262 func rewriteValueAMD64_OpGreater32(v *Value, config *Config) bool { 4263 b := v.Block 4264 _ = b 4265 // match: (Greater32 x y) 4266 // cond: 4267 // result: (SETG (CMPL x y)) 4268 for { 4269 x := v.Args[0] 4270 y := v.Args[1] 4271 v.reset(OpAMD64SETG) 4272 v0 := b.NewValue0(v.Line, OpAMD64CMPL, TypeFlags) 4273 v0.AddArg(x) 4274 v0.AddArg(y) 4275 v.AddArg(v0) 4276 return true 4277 } 4278 return false 4279 } 4280 func rewriteValueAMD64_OpGreater32F(v *Value, config *Config) bool { 4281 b := v.Block 4282 _ = b 4283 // match: (Greater32F x y) 4284 // cond: 4285 // result: (SETGF (UCOMISS x y)) 4286 for { 4287 x := v.Args[0] 4288 y := v.Args[1] 4289 v.reset(OpAMD64SETGF) 4290 v0 := b.NewValue0(v.Line, OpAMD64UCOMISS, TypeFlags) 4291 v0.AddArg(x) 4292 v0.AddArg(y) 4293 v.AddArg(v0) 4294 return true 4295 } 4296 return false 4297 } 4298 func rewriteValueAMD64_OpGreater32U(v *Value, config *Config) bool { 4299 b := v.Block 4300 _ = b 4301 // match: (Greater32U x y) 4302 // cond: 4303 // result: (SETA (CMPL x y)) 4304 for { 4305 x := v.Args[0] 4306 y := v.Args[1] 4307 v.reset(OpAMD64SETA) 4308 v0 := b.NewValue0(v.Line, OpAMD64CMPL, TypeFlags) 4309 v0.AddArg(x) 4310 v0.AddArg(y) 4311 v.AddArg(v0) 4312 return true 4313 } 4314 return false 4315 } 4316 func rewriteValueAMD64_OpGreater64(v *Value, config *Config) bool { 4317 b := v.Block 4318 _ = b 4319 // match: (Greater64 x y) 4320 // cond: 4321 // result: (SETG (CMPQ x y)) 4322 for { 4323 x := v.Args[0] 4324 y := v.Args[1] 4325 v.reset(OpAMD64SETG) 4326 v0 := b.NewValue0(v.Line, OpAMD64CMPQ, TypeFlags) 4327 v0.AddArg(x) 4328 v0.AddArg(y) 4329 v.AddArg(v0) 4330 return true 4331 } 4332 return false 4333 } 4334 func rewriteValueAMD64_OpGreater64F(v *Value, config *Config) bool { 4335 b := v.Block 4336 _ = b 4337 // match: (Greater64F x y) 4338 // cond: 4339 // result: (SETGF (UCOMISD x y)) 4340 for { 4341 x := v.Args[0] 4342 y := v.Args[1] 4343 v.reset(OpAMD64SETGF) 4344 v0 := b.NewValue0(v.Line, OpAMD64UCOMISD, TypeFlags) 4345 v0.AddArg(x) 4346 v0.AddArg(y) 4347 v.AddArg(v0) 4348 return true 4349 } 4350 return false 4351 } 4352 func rewriteValueAMD64_OpGreater64U(v *Value, config *Config) bool { 4353 b := v.Block 4354 _ = b 4355 // match: (Greater64U x y) 4356 // cond: 4357 // result: (SETA (CMPQ x y)) 4358 for { 4359 x := v.Args[0] 4360 y := v.Args[1] 4361 v.reset(OpAMD64SETA) 4362 v0 := b.NewValue0(v.Line, OpAMD64CMPQ, TypeFlags) 4363 v0.AddArg(x) 4364 v0.AddArg(y) 4365 v.AddArg(v0) 4366 return true 4367 } 4368 return false 4369 } 4370 func rewriteValueAMD64_OpGreater8(v *Value, config *Config) bool { 4371 b := v.Block 4372 _ = b 4373 // match: (Greater8 x y) 4374 // cond: 4375 // result: (SETG (CMPB x y)) 4376 for { 4377 x := v.Args[0] 4378 y := v.Args[1] 4379 v.reset(OpAMD64SETG) 4380 v0 := b.NewValue0(v.Line, OpAMD64CMPB, TypeFlags) 4381 v0.AddArg(x) 4382 v0.AddArg(y) 4383 v.AddArg(v0) 4384 return true 4385 } 4386 return false 4387 } 4388 func rewriteValueAMD64_OpGreater8U(v *Value, config *Config) bool { 4389 b := v.Block 4390 _ = b 4391 // match: (Greater8U x y) 4392 // cond: 4393 // result: (SETA (CMPB x y)) 4394 for { 4395 x := v.Args[0] 4396 y := v.Args[1] 4397 v.reset(OpAMD64SETA) 4398 v0 := b.NewValue0(v.Line, OpAMD64CMPB, TypeFlags) 4399 v0.AddArg(x) 4400 v0.AddArg(y) 4401 v.AddArg(v0) 4402 return true 4403 } 4404 return false 4405 } 4406 func rewriteValueAMD64_OpHmul16(v *Value, config *Config) bool { 4407 b := v.Block 4408 _ = b 4409 // match: (Hmul16 x y) 4410 // cond: 4411 // result: (HMULW x y) 4412 for { 4413 x := v.Args[0] 4414 y := v.Args[1] 4415 v.reset(OpAMD64HMULW) 4416 v.AddArg(x) 4417 v.AddArg(y) 4418 return true 4419 } 4420 return false 4421 } 4422 func rewriteValueAMD64_OpHmul16u(v *Value, config *Config) bool { 4423 b := v.Block 4424 _ = b 4425 // match: (Hmul16u x y) 4426 // cond: 4427 // result: (HMULWU x y) 4428 for { 4429 x := v.Args[0] 4430 y := v.Args[1] 4431 v.reset(OpAMD64HMULWU) 4432 v.AddArg(x) 4433 v.AddArg(y) 4434 return true 4435 } 4436 return false 4437 } 4438 func rewriteValueAMD64_OpHmul32(v *Value, config *Config) bool { 4439 b := v.Block 4440 _ = b 4441 // match: (Hmul32 x y) 4442 // cond: 4443 // result: (HMULL x y) 4444 for { 4445 x := v.Args[0] 4446 y := v.Args[1] 4447 v.reset(OpAMD64HMULL) 4448 v.AddArg(x) 4449 v.AddArg(y) 4450 return true 4451 } 4452 return false 4453 } 4454 func rewriteValueAMD64_OpHmul32u(v *Value, config *Config) bool { 4455 b := v.Block 4456 _ = b 4457 // match: (Hmul32u x y) 4458 // cond: 4459 // result: (HMULLU x y) 4460 for { 4461 x := v.Args[0] 4462 y := v.Args[1] 4463 v.reset(OpAMD64HMULLU) 4464 v.AddArg(x) 4465 v.AddArg(y) 4466 return true 4467 } 4468 return false 4469 } 4470 func rewriteValueAMD64_OpHmul64(v *Value, config *Config) bool { 4471 b := v.Block 4472 _ = b 4473 // match: (Hmul64 x y) 4474 // cond: 4475 // result: (HMULQ x y) 4476 for { 4477 x := v.Args[0] 4478 y := v.Args[1] 4479 v.reset(OpAMD64HMULQ) 4480 v.AddArg(x) 4481 v.AddArg(y) 4482 return true 4483 } 4484 return false 4485 } 4486 func rewriteValueAMD64_OpHmul64u(v *Value, config *Config) bool { 4487 b := v.Block 4488 _ = b 4489 // match: (Hmul64u x y) 4490 // cond: 4491 // result: (HMULQU x y) 4492 for { 4493 x := v.Args[0] 4494 y := v.Args[1] 4495 v.reset(OpAMD64HMULQU) 4496 v.AddArg(x) 4497 v.AddArg(y) 4498 return true 4499 } 4500 return false 4501 } 4502 func rewriteValueAMD64_OpHmul8(v *Value, config *Config) bool { 4503 b := v.Block 4504 _ = b 4505 // match: (Hmul8 x y) 4506 // cond: 4507 // result: (HMULB x y) 4508 for { 4509 x := v.Args[0] 4510 y := v.Args[1] 4511 v.reset(OpAMD64HMULB) 4512 v.AddArg(x) 4513 v.AddArg(y) 4514 return true 4515 } 4516 return false 4517 } 4518 func rewriteValueAMD64_OpHmul8u(v *Value, config *Config) bool { 4519 b := v.Block 4520 _ = b 4521 // match: (Hmul8u x y) 4522 // cond: 4523 // result: (HMULBU x y) 4524 for { 4525 x := v.Args[0] 4526 y := v.Args[1] 4527 v.reset(OpAMD64HMULBU) 4528 v.AddArg(x) 4529 v.AddArg(y) 4530 return true 4531 } 4532 return false 4533 } 4534 func rewriteValueAMD64_OpITab(v *Value, config *Config) bool { 4535 b := v.Block 4536 _ = b 4537 // match: (ITab (Load ptr mem)) 4538 // cond: 4539 // result: (MOVQload ptr mem) 4540 for { 4541 v_0 := v.Args[0] 4542 if v_0.Op != OpLoad { 4543 break 4544 } 4545 ptr := v_0.Args[0] 4546 mem := v_0.Args[1] 4547 v.reset(OpAMD64MOVQload) 4548 v.AddArg(ptr) 4549 v.AddArg(mem) 4550 return true 4551 } 4552 return false 4553 } 4554 func rewriteValueAMD64_OpInterCall(v *Value, config *Config) bool { 4555 b := v.Block 4556 _ = b 4557 // match: (InterCall [argwid] entry mem) 4558 // cond: 4559 // result: (CALLinter [argwid] entry mem) 4560 for { 4561 argwid := v.AuxInt 4562 entry := v.Args[0] 4563 mem := v.Args[1] 4564 v.reset(OpAMD64CALLinter) 4565 v.AuxInt = argwid 4566 v.AddArg(entry) 4567 v.AddArg(mem) 4568 return true 4569 } 4570 return false 4571 } 4572 func rewriteValueAMD64_OpIsInBounds(v *Value, config *Config) bool { 4573 b := v.Block 4574 _ = b 4575 // match: (IsInBounds idx len) 4576 // cond: 4577 // result: (SETB (CMPQ idx len)) 4578 for { 4579 idx := v.Args[0] 4580 len := v.Args[1] 4581 v.reset(OpAMD64SETB) 4582 v0 := b.NewValue0(v.Line, OpAMD64CMPQ, TypeFlags) 4583 v0.AddArg(idx) 4584 v0.AddArg(len) 4585 v.AddArg(v0) 4586 return true 4587 } 4588 return false 4589 } 4590 func rewriteValueAMD64_OpIsNonNil(v *Value, config *Config) bool { 4591 b := v.Block 4592 _ = b 4593 // match: (IsNonNil p) 4594 // cond: 4595 // result: (SETNE (TESTQ p p)) 4596 for { 4597 p := v.Args[0] 4598 v.reset(OpAMD64SETNE) 4599 v0 := b.NewValue0(v.Line, OpAMD64TESTQ, TypeFlags) 4600 v0.AddArg(p) 4601 v0.AddArg(p) 4602 v.AddArg(v0) 4603 return true 4604 } 4605 return false 4606 } 4607 func rewriteValueAMD64_OpIsSliceInBounds(v *Value, config *Config) bool { 4608 b := v.Block 4609 _ = b 4610 // match: (IsSliceInBounds idx len) 4611 // cond: 4612 // result: (SETBE (CMPQ idx len)) 4613 for { 4614 idx := v.Args[0] 4615 len := v.Args[1] 4616 v.reset(OpAMD64SETBE) 4617 v0 := b.NewValue0(v.Line, OpAMD64CMPQ, TypeFlags) 4618 v0.AddArg(idx) 4619 v0.AddArg(len) 4620 v.AddArg(v0) 4621 return true 4622 } 4623 return false 4624 } 4625 func rewriteValueAMD64_OpAMD64LEAQ(v *Value, config *Config) bool { 4626 b := v.Block 4627 _ = b 4628 // match: (LEAQ [c] {s} (ADDQconst [d] x)) 4629 // cond: is32Bit(c+d) 4630 // result: (LEAQ [c+d] {s} x) 4631 for { 4632 c := v.AuxInt 4633 s := v.Aux 4634 v_0 := v.Args[0] 4635 if v_0.Op != OpAMD64ADDQconst { 4636 break 4637 } 4638 d := v_0.AuxInt 4639 x := v_0.Args[0] 4640 if !(is32Bit(c + d)) { 4641 break 4642 } 4643 v.reset(OpAMD64LEAQ) 4644 v.AuxInt = c + d 4645 v.Aux = s 4646 v.AddArg(x) 4647 return true 4648 } 4649 // match: (LEAQ [c] {s} (ADDQ x y)) 4650 // cond: x.Op != OpSB && y.Op != OpSB 4651 // result: (LEAQ1 [c] {s} x y) 4652 for { 4653 c := v.AuxInt 4654 s := v.Aux 4655 v_0 := v.Args[0] 4656 if v_0.Op != OpAMD64ADDQ { 4657 break 4658 } 4659 x := v_0.Args[0] 4660 y := v_0.Args[1] 4661 if !(x.Op != OpSB && y.Op != OpSB) { 4662 break 4663 } 4664 v.reset(OpAMD64LEAQ1) 4665 v.AuxInt = c 4666 v.Aux = s 4667 v.AddArg(x) 4668 v.AddArg(y) 4669 return true 4670 } 4671 // match: (LEAQ [off1] {sym1} (LEAQ [off2] {sym2} x)) 4672 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 4673 // result: (LEAQ [off1+off2] {mergeSym(sym1,sym2)} x) 4674 for { 4675 off1 := v.AuxInt 4676 sym1 := v.Aux 4677 v_0 := v.Args[0] 4678 if v_0.Op != OpAMD64LEAQ { 4679 break 4680 } 4681 off2 := v_0.AuxInt 4682 sym2 := v_0.Aux 4683 x := v_0.Args[0] 4684 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 4685 break 4686 } 4687 v.reset(OpAMD64LEAQ) 4688 v.AuxInt = off1 + off2 4689 v.Aux = mergeSym(sym1, sym2) 4690 v.AddArg(x) 4691 return true 4692 } 4693 // match: (LEAQ [off1] {sym1} (LEAQ1 [off2] {sym2} x y)) 4694 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 4695 // result: (LEAQ1 [off1+off2] {mergeSym(sym1,sym2)} x y) 4696 for { 4697 off1 := v.AuxInt 4698 sym1 := v.Aux 4699 v_0 := v.Args[0] 4700 if v_0.Op != OpAMD64LEAQ1 { 4701 break 4702 } 4703 off2 := v_0.AuxInt 4704 sym2 := v_0.Aux 4705 x := v_0.Args[0] 4706 y := v_0.Args[1] 4707 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 4708 break 4709 } 4710 v.reset(OpAMD64LEAQ1) 4711 v.AuxInt = off1 + off2 4712 v.Aux = mergeSym(sym1, sym2) 4713 v.AddArg(x) 4714 v.AddArg(y) 4715 return true 4716 } 4717 // match: (LEAQ [off1] {sym1} (LEAQ2 [off2] {sym2} x y)) 4718 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 4719 // result: (LEAQ2 [off1+off2] {mergeSym(sym1,sym2)} x y) 4720 for { 4721 off1 := v.AuxInt 4722 sym1 := v.Aux 4723 v_0 := v.Args[0] 4724 if v_0.Op != OpAMD64LEAQ2 { 4725 break 4726 } 4727 off2 := v_0.AuxInt 4728 sym2 := v_0.Aux 4729 x := v_0.Args[0] 4730 y := v_0.Args[1] 4731 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 4732 break 4733 } 4734 v.reset(OpAMD64LEAQ2) 4735 v.AuxInt = off1 + off2 4736 v.Aux = mergeSym(sym1, sym2) 4737 v.AddArg(x) 4738 v.AddArg(y) 4739 return true 4740 } 4741 // match: (LEAQ [off1] {sym1} (LEAQ4 [off2] {sym2} x y)) 4742 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 4743 // result: (LEAQ4 [off1+off2] {mergeSym(sym1,sym2)} x y) 4744 for { 4745 off1 := v.AuxInt 4746 sym1 := v.Aux 4747 v_0 := v.Args[0] 4748 if v_0.Op != OpAMD64LEAQ4 { 4749 break 4750 } 4751 off2 := v_0.AuxInt 4752 sym2 := v_0.Aux 4753 x := v_0.Args[0] 4754 y := v_0.Args[1] 4755 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 4756 break 4757 } 4758 v.reset(OpAMD64LEAQ4) 4759 v.AuxInt = off1 + off2 4760 v.Aux = mergeSym(sym1, sym2) 4761 v.AddArg(x) 4762 v.AddArg(y) 4763 return true 4764 } 4765 // match: (LEAQ [off1] {sym1} (LEAQ8 [off2] {sym2} x y)) 4766 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 4767 // result: (LEAQ8 [off1+off2] {mergeSym(sym1,sym2)} x y) 4768 for { 4769 off1 := v.AuxInt 4770 sym1 := v.Aux 4771 v_0 := v.Args[0] 4772 if v_0.Op != OpAMD64LEAQ8 { 4773 break 4774 } 4775 off2 := v_0.AuxInt 4776 sym2 := v_0.Aux 4777 x := v_0.Args[0] 4778 y := v_0.Args[1] 4779 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 4780 break 4781 } 4782 v.reset(OpAMD64LEAQ8) 4783 v.AuxInt = off1 + off2 4784 v.Aux = mergeSym(sym1, sym2) 4785 v.AddArg(x) 4786 v.AddArg(y) 4787 return true 4788 } 4789 return false 4790 } 4791 func rewriteValueAMD64_OpAMD64LEAQ1(v *Value, config *Config) bool { 4792 b := v.Block 4793 _ = b 4794 // match: (LEAQ1 [c] {s} (ADDQconst [d] x) y) 4795 // cond: is32Bit(c+d) && x.Op != OpSB 4796 // result: (LEAQ1 [c+d] {s} x y) 4797 for { 4798 c := v.AuxInt 4799 s := v.Aux 4800 v_0 := v.Args[0] 4801 if v_0.Op != OpAMD64ADDQconst { 4802 break 4803 } 4804 d := v_0.AuxInt 4805 x := v_0.Args[0] 4806 y := v.Args[1] 4807 if !(is32Bit(c+d) && x.Op != OpSB) { 4808 break 4809 } 4810 v.reset(OpAMD64LEAQ1) 4811 v.AuxInt = c + d 4812 v.Aux = s 4813 v.AddArg(x) 4814 v.AddArg(y) 4815 return true 4816 } 4817 // match: (LEAQ1 [c] {s} x (ADDQconst [d] y)) 4818 // cond: is32Bit(c+d) && y.Op != OpSB 4819 // result: (LEAQ1 [c+d] {s} x y) 4820 for { 4821 c := v.AuxInt 4822 s := v.Aux 4823 x := v.Args[0] 4824 v_1 := v.Args[1] 4825 if v_1.Op != OpAMD64ADDQconst { 4826 break 4827 } 4828 d := v_1.AuxInt 4829 y := v_1.Args[0] 4830 if !(is32Bit(c+d) && y.Op != OpSB) { 4831 break 4832 } 4833 v.reset(OpAMD64LEAQ1) 4834 v.AuxInt = c + d 4835 v.Aux = s 4836 v.AddArg(x) 4837 v.AddArg(y) 4838 return true 4839 } 4840 // match: (LEAQ1 [c] {s} x (SHLQconst [1] y)) 4841 // cond: 4842 // result: (LEAQ2 [c] {s} x y) 4843 for { 4844 c := v.AuxInt 4845 s := v.Aux 4846 x := v.Args[0] 4847 v_1 := v.Args[1] 4848 if v_1.Op != OpAMD64SHLQconst { 4849 break 4850 } 4851 if v_1.AuxInt != 1 { 4852 break 4853 } 4854 y := v_1.Args[0] 4855 v.reset(OpAMD64LEAQ2) 4856 v.AuxInt = c 4857 v.Aux = s 4858 v.AddArg(x) 4859 v.AddArg(y) 4860 return true 4861 } 4862 // match: (LEAQ1 [c] {s} (SHLQconst [1] x) y) 4863 // cond: 4864 // result: (LEAQ2 [c] {s} y x) 4865 for { 4866 c := v.AuxInt 4867 s := v.Aux 4868 v_0 := v.Args[0] 4869 if v_0.Op != OpAMD64SHLQconst { 4870 break 4871 } 4872 if v_0.AuxInt != 1 { 4873 break 4874 } 4875 x := v_0.Args[0] 4876 y := v.Args[1] 4877 v.reset(OpAMD64LEAQ2) 4878 v.AuxInt = c 4879 v.Aux = s 4880 v.AddArg(y) 4881 v.AddArg(x) 4882 return true 4883 } 4884 // match: (LEAQ1 [c] {s} x (SHLQconst [2] y)) 4885 // cond: 4886 // result: (LEAQ4 [c] {s} x y) 4887 for { 4888 c := v.AuxInt 4889 s := v.Aux 4890 x := v.Args[0] 4891 v_1 := v.Args[1] 4892 if v_1.Op != OpAMD64SHLQconst { 4893 break 4894 } 4895 if v_1.AuxInt != 2 { 4896 break 4897 } 4898 y := v_1.Args[0] 4899 v.reset(OpAMD64LEAQ4) 4900 v.AuxInt = c 4901 v.Aux = s 4902 v.AddArg(x) 4903 v.AddArg(y) 4904 return true 4905 } 4906 // match: (LEAQ1 [c] {s} (SHLQconst [2] x) y) 4907 // cond: 4908 // result: (LEAQ4 [c] {s} y x) 4909 for { 4910 c := v.AuxInt 4911 s := v.Aux 4912 v_0 := v.Args[0] 4913 if v_0.Op != OpAMD64SHLQconst { 4914 break 4915 } 4916 if v_0.AuxInt != 2 { 4917 break 4918 } 4919 x := v_0.Args[0] 4920 y := v.Args[1] 4921 v.reset(OpAMD64LEAQ4) 4922 v.AuxInt = c 4923 v.Aux = s 4924 v.AddArg(y) 4925 v.AddArg(x) 4926 return true 4927 } 4928 // match: (LEAQ1 [c] {s} x (SHLQconst [3] y)) 4929 // cond: 4930 // result: (LEAQ8 [c] {s} x y) 4931 for { 4932 c := v.AuxInt 4933 s := v.Aux 4934 x := v.Args[0] 4935 v_1 := v.Args[1] 4936 if v_1.Op != OpAMD64SHLQconst { 4937 break 4938 } 4939 if v_1.AuxInt != 3 { 4940 break 4941 } 4942 y := v_1.Args[0] 4943 v.reset(OpAMD64LEAQ8) 4944 v.AuxInt = c 4945 v.Aux = s 4946 v.AddArg(x) 4947 v.AddArg(y) 4948 return true 4949 } 4950 // match: (LEAQ1 [c] {s} (SHLQconst [3] x) y) 4951 // cond: 4952 // result: (LEAQ8 [c] {s} y x) 4953 for { 4954 c := v.AuxInt 4955 s := v.Aux 4956 v_0 := v.Args[0] 4957 if v_0.Op != OpAMD64SHLQconst { 4958 break 4959 } 4960 if v_0.AuxInt != 3 { 4961 break 4962 } 4963 x := v_0.Args[0] 4964 y := v.Args[1] 4965 v.reset(OpAMD64LEAQ8) 4966 v.AuxInt = c 4967 v.Aux = s 4968 v.AddArg(y) 4969 v.AddArg(x) 4970 return true 4971 } 4972 // match: (LEAQ1 [off1] {sym1} (LEAQ [off2] {sym2} x) y) 4973 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) && x.Op != OpSB 4974 // result: (LEAQ1 [off1+off2] {mergeSym(sym1,sym2)} x y) 4975 for { 4976 off1 := v.AuxInt 4977 sym1 := v.Aux 4978 v_0 := v.Args[0] 4979 if v_0.Op != OpAMD64LEAQ { 4980 break 4981 } 4982 off2 := v_0.AuxInt 4983 sym2 := v_0.Aux 4984 x := v_0.Args[0] 4985 y := v.Args[1] 4986 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2) && x.Op != OpSB) { 4987 break 4988 } 4989 v.reset(OpAMD64LEAQ1) 4990 v.AuxInt = off1 + off2 4991 v.Aux = mergeSym(sym1, sym2) 4992 v.AddArg(x) 4993 v.AddArg(y) 4994 return true 4995 } 4996 // match: (LEAQ1 [off1] {sym1} x (LEAQ [off2] {sym2} y)) 4997 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) && y.Op != OpSB 4998 // result: (LEAQ1 [off1+off2] {mergeSym(sym1,sym2)} x y) 4999 for { 5000 off1 := v.AuxInt 5001 sym1 := v.Aux 5002 x := v.Args[0] 5003 v_1 := v.Args[1] 5004 if v_1.Op != OpAMD64LEAQ { 5005 break 5006 } 5007 off2 := v_1.AuxInt 5008 sym2 := v_1.Aux 5009 y := v_1.Args[0] 5010 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2) && y.Op != OpSB) { 5011 break 5012 } 5013 v.reset(OpAMD64LEAQ1) 5014 v.AuxInt = off1 + off2 5015 v.Aux = mergeSym(sym1, sym2) 5016 v.AddArg(x) 5017 v.AddArg(y) 5018 return true 5019 } 5020 return false 5021 } 5022 func rewriteValueAMD64_OpAMD64LEAQ2(v *Value, config *Config) bool { 5023 b := v.Block 5024 _ = b 5025 // match: (LEAQ2 [c] {s} (ADDQconst [d] x) y) 5026 // cond: is32Bit(c+d) && x.Op != OpSB 5027 // result: (LEAQ2 [c+d] {s} x y) 5028 for { 5029 c := v.AuxInt 5030 s := v.Aux 5031 v_0 := v.Args[0] 5032 if v_0.Op != OpAMD64ADDQconst { 5033 break 5034 } 5035 d := v_0.AuxInt 5036 x := v_0.Args[0] 5037 y := v.Args[1] 5038 if !(is32Bit(c+d) && x.Op != OpSB) { 5039 break 5040 } 5041 v.reset(OpAMD64LEAQ2) 5042 v.AuxInt = c + d 5043 v.Aux = s 5044 v.AddArg(x) 5045 v.AddArg(y) 5046 return true 5047 } 5048 // match: (LEAQ2 [c] {s} x (ADDQconst [d] y)) 5049 // cond: is32Bit(c+2*d) && y.Op != OpSB 5050 // result: (LEAQ2 [c+2*d] {s} x y) 5051 for { 5052 c := v.AuxInt 5053 s := v.Aux 5054 x := v.Args[0] 5055 v_1 := v.Args[1] 5056 if v_1.Op != OpAMD64ADDQconst { 5057 break 5058 } 5059 d := v_1.AuxInt 5060 y := v_1.Args[0] 5061 if !(is32Bit(c+2*d) && y.Op != OpSB) { 5062 break 5063 } 5064 v.reset(OpAMD64LEAQ2) 5065 v.AuxInt = c + 2*d 5066 v.Aux = s 5067 v.AddArg(x) 5068 v.AddArg(y) 5069 return true 5070 } 5071 // match: (LEAQ2 [c] {s} x (SHLQconst [1] y)) 5072 // cond: 5073 // result: (LEAQ4 [c] {s} x y) 5074 for { 5075 c := v.AuxInt 5076 s := v.Aux 5077 x := v.Args[0] 5078 v_1 := v.Args[1] 5079 if v_1.Op != OpAMD64SHLQconst { 5080 break 5081 } 5082 if v_1.AuxInt != 1 { 5083 break 5084 } 5085 y := v_1.Args[0] 5086 v.reset(OpAMD64LEAQ4) 5087 v.AuxInt = c 5088 v.Aux = s 5089 v.AddArg(x) 5090 v.AddArg(y) 5091 return true 5092 } 5093 // match: (LEAQ2 [c] {s} x (SHLQconst [2] y)) 5094 // cond: 5095 // result: (LEAQ8 [c] {s} x y) 5096 for { 5097 c := v.AuxInt 5098 s := v.Aux 5099 x := v.Args[0] 5100 v_1 := v.Args[1] 5101 if v_1.Op != OpAMD64SHLQconst { 5102 break 5103 } 5104 if v_1.AuxInt != 2 { 5105 break 5106 } 5107 y := v_1.Args[0] 5108 v.reset(OpAMD64LEAQ8) 5109 v.AuxInt = c 5110 v.Aux = s 5111 v.AddArg(x) 5112 v.AddArg(y) 5113 return true 5114 } 5115 // match: (LEAQ2 [off1] {sym1} (LEAQ [off2] {sym2} x) y) 5116 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) && x.Op != OpSB 5117 // result: (LEAQ2 [off1+off2] {mergeSym(sym1,sym2)} x y) 5118 for { 5119 off1 := v.AuxInt 5120 sym1 := v.Aux 5121 v_0 := v.Args[0] 5122 if v_0.Op != OpAMD64LEAQ { 5123 break 5124 } 5125 off2 := v_0.AuxInt 5126 sym2 := v_0.Aux 5127 x := v_0.Args[0] 5128 y := v.Args[1] 5129 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2) && x.Op != OpSB) { 5130 break 5131 } 5132 v.reset(OpAMD64LEAQ2) 5133 v.AuxInt = off1 + off2 5134 v.Aux = mergeSym(sym1, sym2) 5135 v.AddArg(x) 5136 v.AddArg(y) 5137 return true 5138 } 5139 return false 5140 } 5141 func rewriteValueAMD64_OpAMD64LEAQ4(v *Value, config *Config) bool { 5142 b := v.Block 5143 _ = b 5144 // match: (LEAQ4 [c] {s} (ADDQconst [d] x) y) 5145 // cond: is32Bit(c+d) && x.Op != OpSB 5146 // result: (LEAQ4 [c+d] {s} x y) 5147 for { 5148 c := v.AuxInt 5149 s := v.Aux 5150 v_0 := v.Args[0] 5151 if v_0.Op != OpAMD64ADDQconst { 5152 break 5153 } 5154 d := v_0.AuxInt 5155 x := v_0.Args[0] 5156 y := v.Args[1] 5157 if !(is32Bit(c+d) && x.Op != OpSB) { 5158 break 5159 } 5160 v.reset(OpAMD64LEAQ4) 5161 v.AuxInt = c + d 5162 v.Aux = s 5163 v.AddArg(x) 5164 v.AddArg(y) 5165 return true 5166 } 5167 // match: (LEAQ4 [c] {s} x (ADDQconst [d] y)) 5168 // cond: is32Bit(c+4*d) && y.Op != OpSB 5169 // result: (LEAQ4 [c+4*d] {s} x y) 5170 for { 5171 c := v.AuxInt 5172 s := v.Aux 5173 x := v.Args[0] 5174 v_1 := v.Args[1] 5175 if v_1.Op != OpAMD64ADDQconst { 5176 break 5177 } 5178 d := v_1.AuxInt 5179 y := v_1.Args[0] 5180 if !(is32Bit(c+4*d) && y.Op != OpSB) { 5181 break 5182 } 5183 v.reset(OpAMD64LEAQ4) 5184 v.AuxInt = c + 4*d 5185 v.Aux = s 5186 v.AddArg(x) 5187 v.AddArg(y) 5188 return true 5189 } 5190 // match: (LEAQ4 [c] {s} x (SHLQconst [1] y)) 5191 // cond: 5192 // result: (LEAQ8 [c] {s} x y) 5193 for { 5194 c := v.AuxInt 5195 s := v.Aux 5196 x := v.Args[0] 5197 v_1 := v.Args[1] 5198 if v_1.Op != OpAMD64SHLQconst { 5199 break 5200 } 5201 if v_1.AuxInt != 1 { 5202 break 5203 } 5204 y := v_1.Args[0] 5205 v.reset(OpAMD64LEAQ8) 5206 v.AuxInt = c 5207 v.Aux = s 5208 v.AddArg(x) 5209 v.AddArg(y) 5210 return true 5211 } 5212 // match: (LEAQ4 [off1] {sym1} (LEAQ [off2] {sym2} x) y) 5213 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) && x.Op != OpSB 5214 // result: (LEAQ4 [off1+off2] {mergeSym(sym1,sym2)} x y) 5215 for { 5216 off1 := v.AuxInt 5217 sym1 := v.Aux 5218 v_0 := v.Args[0] 5219 if v_0.Op != OpAMD64LEAQ { 5220 break 5221 } 5222 off2 := v_0.AuxInt 5223 sym2 := v_0.Aux 5224 x := v_0.Args[0] 5225 y := v.Args[1] 5226 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2) && x.Op != OpSB) { 5227 break 5228 } 5229 v.reset(OpAMD64LEAQ4) 5230 v.AuxInt = off1 + off2 5231 v.Aux = mergeSym(sym1, sym2) 5232 v.AddArg(x) 5233 v.AddArg(y) 5234 return true 5235 } 5236 return false 5237 } 5238 func rewriteValueAMD64_OpAMD64LEAQ8(v *Value, config *Config) bool { 5239 b := v.Block 5240 _ = b 5241 // match: (LEAQ8 [c] {s} (ADDQconst [d] x) y) 5242 // cond: is32Bit(c+d) && x.Op != OpSB 5243 // result: (LEAQ8 [c+d] {s} x y) 5244 for { 5245 c := v.AuxInt 5246 s := v.Aux 5247 v_0 := v.Args[0] 5248 if v_0.Op != OpAMD64ADDQconst { 5249 break 5250 } 5251 d := v_0.AuxInt 5252 x := v_0.Args[0] 5253 y := v.Args[1] 5254 if !(is32Bit(c+d) && x.Op != OpSB) { 5255 break 5256 } 5257 v.reset(OpAMD64LEAQ8) 5258 v.AuxInt = c + d 5259 v.Aux = s 5260 v.AddArg(x) 5261 v.AddArg(y) 5262 return true 5263 } 5264 // match: (LEAQ8 [c] {s} x (ADDQconst [d] y)) 5265 // cond: is32Bit(c+8*d) && y.Op != OpSB 5266 // result: (LEAQ8 [c+8*d] {s} x y) 5267 for { 5268 c := v.AuxInt 5269 s := v.Aux 5270 x := v.Args[0] 5271 v_1 := v.Args[1] 5272 if v_1.Op != OpAMD64ADDQconst { 5273 break 5274 } 5275 d := v_1.AuxInt 5276 y := v_1.Args[0] 5277 if !(is32Bit(c+8*d) && y.Op != OpSB) { 5278 break 5279 } 5280 v.reset(OpAMD64LEAQ8) 5281 v.AuxInt = c + 8*d 5282 v.Aux = s 5283 v.AddArg(x) 5284 v.AddArg(y) 5285 return true 5286 } 5287 // match: (LEAQ8 [off1] {sym1} (LEAQ [off2] {sym2} x) y) 5288 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) && x.Op != OpSB 5289 // result: (LEAQ8 [off1+off2] {mergeSym(sym1,sym2)} x y) 5290 for { 5291 off1 := v.AuxInt 5292 sym1 := v.Aux 5293 v_0 := v.Args[0] 5294 if v_0.Op != OpAMD64LEAQ { 5295 break 5296 } 5297 off2 := v_0.AuxInt 5298 sym2 := v_0.Aux 5299 x := v_0.Args[0] 5300 y := v.Args[1] 5301 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2) && x.Op != OpSB) { 5302 break 5303 } 5304 v.reset(OpAMD64LEAQ8) 5305 v.AuxInt = off1 + off2 5306 v.Aux = mergeSym(sym1, sym2) 5307 v.AddArg(x) 5308 v.AddArg(y) 5309 return true 5310 } 5311 return false 5312 } 5313 func rewriteValueAMD64_OpLeq16(v *Value, config *Config) bool { 5314 b := v.Block 5315 _ = b 5316 // match: (Leq16 x y) 5317 // cond: 5318 // result: (SETLE (CMPW x y)) 5319 for { 5320 x := v.Args[0] 5321 y := v.Args[1] 5322 v.reset(OpAMD64SETLE) 5323 v0 := b.NewValue0(v.Line, OpAMD64CMPW, TypeFlags) 5324 v0.AddArg(x) 5325 v0.AddArg(y) 5326 v.AddArg(v0) 5327 return true 5328 } 5329 return false 5330 } 5331 func rewriteValueAMD64_OpLeq16U(v *Value, config *Config) bool { 5332 b := v.Block 5333 _ = b 5334 // match: (Leq16U x y) 5335 // cond: 5336 // result: (SETBE (CMPW x y)) 5337 for { 5338 x := v.Args[0] 5339 y := v.Args[1] 5340 v.reset(OpAMD64SETBE) 5341 v0 := b.NewValue0(v.Line, OpAMD64CMPW, TypeFlags) 5342 v0.AddArg(x) 5343 v0.AddArg(y) 5344 v.AddArg(v0) 5345 return true 5346 } 5347 return false 5348 } 5349 func rewriteValueAMD64_OpLeq32(v *Value, config *Config) bool { 5350 b := v.Block 5351 _ = b 5352 // match: (Leq32 x y) 5353 // cond: 5354 // result: (SETLE (CMPL x y)) 5355 for { 5356 x := v.Args[0] 5357 y := v.Args[1] 5358 v.reset(OpAMD64SETLE) 5359 v0 := b.NewValue0(v.Line, OpAMD64CMPL, TypeFlags) 5360 v0.AddArg(x) 5361 v0.AddArg(y) 5362 v.AddArg(v0) 5363 return true 5364 } 5365 return false 5366 } 5367 func rewriteValueAMD64_OpLeq32F(v *Value, config *Config) bool { 5368 b := v.Block 5369 _ = b 5370 // match: (Leq32F x y) 5371 // cond: 5372 // result: (SETGEF (UCOMISS y x)) 5373 for { 5374 x := v.Args[0] 5375 y := v.Args[1] 5376 v.reset(OpAMD64SETGEF) 5377 v0 := b.NewValue0(v.Line, OpAMD64UCOMISS, TypeFlags) 5378 v0.AddArg(y) 5379 v0.AddArg(x) 5380 v.AddArg(v0) 5381 return true 5382 } 5383 return false 5384 } 5385 func rewriteValueAMD64_OpLeq32U(v *Value, config *Config) bool { 5386 b := v.Block 5387 _ = b 5388 // match: (Leq32U x y) 5389 // cond: 5390 // result: (SETBE (CMPL x y)) 5391 for { 5392 x := v.Args[0] 5393 y := v.Args[1] 5394 v.reset(OpAMD64SETBE) 5395 v0 := b.NewValue0(v.Line, OpAMD64CMPL, TypeFlags) 5396 v0.AddArg(x) 5397 v0.AddArg(y) 5398 v.AddArg(v0) 5399 return true 5400 } 5401 return false 5402 } 5403 func rewriteValueAMD64_OpLeq64(v *Value, config *Config) bool { 5404 b := v.Block 5405 _ = b 5406 // match: (Leq64 x y) 5407 // cond: 5408 // result: (SETLE (CMPQ x y)) 5409 for { 5410 x := v.Args[0] 5411 y := v.Args[1] 5412 v.reset(OpAMD64SETLE) 5413 v0 := b.NewValue0(v.Line, OpAMD64CMPQ, TypeFlags) 5414 v0.AddArg(x) 5415 v0.AddArg(y) 5416 v.AddArg(v0) 5417 return true 5418 } 5419 return false 5420 } 5421 func rewriteValueAMD64_OpLeq64F(v *Value, config *Config) bool { 5422 b := v.Block 5423 _ = b 5424 // match: (Leq64F x y) 5425 // cond: 5426 // result: (SETGEF (UCOMISD y x)) 5427 for { 5428 x := v.Args[0] 5429 y := v.Args[1] 5430 v.reset(OpAMD64SETGEF) 5431 v0 := b.NewValue0(v.Line, OpAMD64UCOMISD, TypeFlags) 5432 v0.AddArg(y) 5433 v0.AddArg(x) 5434 v.AddArg(v0) 5435 return true 5436 } 5437 return false 5438 } 5439 func rewriteValueAMD64_OpLeq64U(v *Value, config *Config) bool { 5440 b := v.Block 5441 _ = b 5442 // match: (Leq64U x y) 5443 // cond: 5444 // result: (SETBE (CMPQ x y)) 5445 for { 5446 x := v.Args[0] 5447 y := v.Args[1] 5448 v.reset(OpAMD64SETBE) 5449 v0 := b.NewValue0(v.Line, OpAMD64CMPQ, TypeFlags) 5450 v0.AddArg(x) 5451 v0.AddArg(y) 5452 v.AddArg(v0) 5453 return true 5454 } 5455 return false 5456 } 5457 func rewriteValueAMD64_OpLeq8(v *Value, config *Config) bool { 5458 b := v.Block 5459 _ = b 5460 // match: (Leq8 x y) 5461 // cond: 5462 // result: (SETLE (CMPB x y)) 5463 for { 5464 x := v.Args[0] 5465 y := v.Args[1] 5466 v.reset(OpAMD64SETLE) 5467 v0 := b.NewValue0(v.Line, OpAMD64CMPB, TypeFlags) 5468 v0.AddArg(x) 5469 v0.AddArg(y) 5470 v.AddArg(v0) 5471 return true 5472 } 5473 return false 5474 } 5475 func rewriteValueAMD64_OpLeq8U(v *Value, config *Config) bool { 5476 b := v.Block 5477 _ = b 5478 // match: (Leq8U x y) 5479 // cond: 5480 // result: (SETBE (CMPB x y)) 5481 for { 5482 x := v.Args[0] 5483 y := v.Args[1] 5484 v.reset(OpAMD64SETBE) 5485 v0 := b.NewValue0(v.Line, OpAMD64CMPB, TypeFlags) 5486 v0.AddArg(x) 5487 v0.AddArg(y) 5488 v.AddArg(v0) 5489 return true 5490 } 5491 return false 5492 } 5493 func rewriteValueAMD64_OpLess16(v *Value, config *Config) bool { 5494 b := v.Block 5495 _ = b 5496 // match: (Less16 x y) 5497 // cond: 5498 // result: (SETL (CMPW x y)) 5499 for { 5500 x := v.Args[0] 5501 y := v.Args[1] 5502 v.reset(OpAMD64SETL) 5503 v0 := b.NewValue0(v.Line, OpAMD64CMPW, TypeFlags) 5504 v0.AddArg(x) 5505 v0.AddArg(y) 5506 v.AddArg(v0) 5507 return true 5508 } 5509 return false 5510 } 5511 func rewriteValueAMD64_OpLess16U(v *Value, config *Config) bool { 5512 b := v.Block 5513 _ = b 5514 // match: (Less16U x y) 5515 // cond: 5516 // result: (SETB (CMPW x y)) 5517 for { 5518 x := v.Args[0] 5519 y := v.Args[1] 5520 v.reset(OpAMD64SETB) 5521 v0 := b.NewValue0(v.Line, OpAMD64CMPW, TypeFlags) 5522 v0.AddArg(x) 5523 v0.AddArg(y) 5524 v.AddArg(v0) 5525 return true 5526 } 5527 return false 5528 } 5529 func rewriteValueAMD64_OpLess32(v *Value, config *Config) bool { 5530 b := v.Block 5531 _ = b 5532 // match: (Less32 x y) 5533 // cond: 5534 // result: (SETL (CMPL x y)) 5535 for { 5536 x := v.Args[0] 5537 y := v.Args[1] 5538 v.reset(OpAMD64SETL) 5539 v0 := b.NewValue0(v.Line, OpAMD64CMPL, TypeFlags) 5540 v0.AddArg(x) 5541 v0.AddArg(y) 5542 v.AddArg(v0) 5543 return true 5544 } 5545 return false 5546 } 5547 func rewriteValueAMD64_OpLess32F(v *Value, config *Config) bool { 5548 b := v.Block 5549 _ = b 5550 // match: (Less32F x y) 5551 // cond: 5552 // result: (SETGF (UCOMISS y x)) 5553 for { 5554 x := v.Args[0] 5555 y := v.Args[1] 5556 v.reset(OpAMD64SETGF) 5557 v0 := b.NewValue0(v.Line, OpAMD64UCOMISS, TypeFlags) 5558 v0.AddArg(y) 5559 v0.AddArg(x) 5560 v.AddArg(v0) 5561 return true 5562 } 5563 return false 5564 } 5565 func rewriteValueAMD64_OpLess32U(v *Value, config *Config) bool { 5566 b := v.Block 5567 _ = b 5568 // match: (Less32U x y) 5569 // cond: 5570 // result: (SETB (CMPL x y)) 5571 for { 5572 x := v.Args[0] 5573 y := v.Args[1] 5574 v.reset(OpAMD64SETB) 5575 v0 := b.NewValue0(v.Line, OpAMD64CMPL, TypeFlags) 5576 v0.AddArg(x) 5577 v0.AddArg(y) 5578 v.AddArg(v0) 5579 return true 5580 } 5581 return false 5582 } 5583 func rewriteValueAMD64_OpLess64(v *Value, config *Config) bool { 5584 b := v.Block 5585 _ = b 5586 // match: (Less64 x y) 5587 // cond: 5588 // result: (SETL (CMPQ x y)) 5589 for { 5590 x := v.Args[0] 5591 y := v.Args[1] 5592 v.reset(OpAMD64SETL) 5593 v0 := b.NewValue0(v.Line, OpAMD64CMPQ, TypeFlags) 5594 v0.AddArg(x) 5595 v0.AddArg(y) 5596 v.AddArg(v0) 5597 return true 5598 } 5599 return false 5600 } 5601 func rewriteValueAMD64_OpLess64F(v *Value, config *Config) bool { 5602 b := v.Block 5603 _ = b 5604 // match: (Less64F x y) 5605 // cond: 5606 // result: (SETGF (UCOMISD y x)) 5607 for { 5608 x := v.Args[0] 5609 y := v.Args[1] 5610 v.reset(OpAMD64SETGF) 5611 v0 := b.NewValue0(v.Line, OpAMD64UCOMISD, TypeFlags) 5612 v0.AddArg(y) 5613 v0.AddArg(x) 5614 v.AddArg(v0) 5615 return true 5616 } 5617 return false 5618 } 5619 func rewriteValueAMD64_OpLess64U(v *Value, config *Config) bool { 5620 b := v.Block 5621 _ = b 5622 // match: (Less64U x y) 5623 // cond: 5624 // result: (SETB (CMPQ x y)) 5625 for { 5626 x := v.Args[0] 5627 y := v.Args[1] 5628 v.reset(OpAMD64SETB) 5629 v0 := b.NewValue0(v.Line, OpAMD64CMPQ, TypeFlags) 5630 v0.AddArg(x) 5631 v0.AddArg(y) 5632 v.AddArg(v0) 5633 return true 5634 } 5635 return false 5636 } 5637 func rewriteValueAMD64_OpLess8(v *Value, config *Config) bool { 5638 b := v.Block 5639 _ = b 5640 // match: (Less8 x y) 5641 // cond: 5642 // result: (SETL (CMPB x y)) 5643 for { 5644 x := v.Args[0] 5645 y := v.Args[1] 5646 v.reset(OpAMD64SETL) 5647 v0 := b.NewValue0(v.Line, OpAMD64CMPB, TypeFlags) 5648 v0.AddArg(x) 5649 v0.AddArg(y) 5650 v.AddArg(v0) 5651 return true 5652 } 5653 return false 5654 } 5655 func rewriteValueAMD64_OpLess8U(v *Value, config *Config) bool { 5656 b := v.Block 5657 _ = b 5658 // match: (Less8U x y) 5659 // cond: 5660 // result: (SETB (CMPB x y)) 5661 for { 5662 x := v.Args[0] 5663 y := v.Args[1] 5664 v.reset(OpAMD64SETB) 5665 v0 := b.NewValue0(v.Line, OpAMD64CMPB, TypeFlags) 5666 v0.AddArg(x) 5667 v0.AddArg(y) 5668 v.AddArg(v0) 5669 return true 5670 } 5671 return false 5672 } 5673 func rewriteValueAMD64_OpLoad(v *Value, config *Config) bool { 5674 b := v.Block 5675 _ = b 5676 // match: (Load <t> ptr mem) 5677 // cond: (is64BitInt(t) || isPtr(t)) 5678 // result: (MOVQload ptr mem) 5679 for { 5680 t := v.Type 5681 ptr := v.Args[0] 5682 mem := v.Args[1] 5683 if !(is64BitInt(t) || isPtr(t)) { 5684 break 5685 } 5686 v.reset(OpAMD64MOVQload) 5687 v.AddArg(ptr) 5688 v.AddArg(mem) 5689 return true 5690 } 5691 // match: (Load <t> ptr mem) 5692 // cond: is32BitInt(t) 5693 // result: (MOVLload ptr mem) 5694 for { 5695 t := v.Type 5696 ptr := v.Args[0] 5697 mem := v.Args[1] 5698 if !(is32BitInt(t)) { 5699 break 5700 } 5701 v.reset(OpAMD64MOVLload) 5702 v.AddArg(ptr) 5703 v.AddArg(mem) 5704 return true 5705 } 5706 // match: (Load <t> ptr mem) 5707 // cond: is16BitInt(t) 5708 // result: (MOVWload ptr mem) 5709 for { 5710 t := v.Type 5711 ptr := v.Args[0] 5712 mem := v.Args[1] 5713 if !(is16BitInt(t)) { 5714 break 5715 } 5716 v.reset(OpAMD64MOVWload) 5717 v.AddArg(ptr) 5718 v.AddArg(mem) 5719 return true 5720 } 5721 // match: (Load <t> ptr mem) 5722 // cond: (t.IsBoolean() || is8BitInt(t)) 5723 // result: (MOVBload ptr mem) 5724 for { 5725 t := v.Type 5726 ptr := v.Args[0] 5727 mem := v.Args[1] 5728 if !(t.IsBoolean() || is8BitInt(t)) { 5729 break 5730 } 5731 v.reset(OpAMD64MOVBload) 5732 v.AddArg(ptr) 5733 v.AddArg(mem) 5734 return true 5735 } 5736 // match: (Load <t> ptr mem) 5737 // cond: is32BitFloat(t) 5738 // result: (MOVSSload ptr mem) 5739 for { 5740 t := v.Type 5741 ptr := v.Args[0] 5742 mem := v.Args[1] 5743 if !(is32BitFloat(t)) { 5744 break 5745 } 5746 v.reset(OpAMD64MOVSSload) 5747 v.AddArg(ptr) 5748 v.AddArg(mem) 5749 return true 5750 } 5751 // match: (Load <t> ptr mem) 5752 // cond: is64BitFloat(t) 5753 // result: (MOVSDload ptr mem) 5754 for { 5755 t := v.Type 5756 ptr := v.Args[0] 5757 mem := v.Args[1] 5758 if !(is64BitFloat(t)) { 5759 break 5760 } 5761 v.reset(OpAMD64MOVSDload) 5762 v.AddArg(ptr) 5763 v.AddArg(mem) 5764 return true 5765 } 5766 return false 5767 } 5768 func rewriteValueAMD64_OpLrot16(v *Value, config *Config) bool { 5769 b := v.Block 5770 _ = b 5771 // match: (Lrot16 <t> x [c]) 5772 // cond: 5773 // result: (ROLWconst <t> [c&15] x) 5774 for { 5775 t := v.Type 5776 x := v.Args[0] 5777 c := v.AuxInt 5778 v.reset(OpAMD64ROLWconst) 5779 v.Type = t 5780 v.AuxInt = c & 15 5781 v.AddArg(x) 5782 return true 5783 } 5784 return false 5785 } 5786 func rewriteValueAMD64_OpLrot32(v *Value, config *Config) bool { 5787 b := v.Block 5788 _ = b 5789 // match: (Lrot32 <t> x [c]) 5790 // cond: 5791 // result: (ROLLconst <t> [c&31] x) 5792 for { 5793 t := v.Type 5794 x := v.Args[0] 5795 c := v.AuxInt 5796 v.reset(OpAMD64ROLLconst) 5797 v.Type = t 5798 v.AuxInt = c & 31 5799 v.AddArg(x) 5800 return true 5801 } 5802 return false 5803 } 5804 func rewriteValueAMD64_OpLrot64(v *Value, config *Config) bool { 5805 b := v.Block 5806 _ = b 5807 // match: (Lrot64 <t> x [c]) 5808 // cond: 5809 // result: (ROLQconst <t> [c&63] x) 5810 for { 5811 t := v.Type 5812 x := v.Args[0] 5813 c := v.AuxInt 5814 v.reset(OpAMD64ROLQconst) 5815 v.Type = t 5816 v.AuxInt = c & 63 5817 v.AddArg(x) 5818 return true 5819 } 5820 return false 5821 } 5822 func rewriteValueAMD64_OpLrot8(v *Value, config *Config) bool { 5823 b := v.Block 5824 _ = b 5825 // match: (Lrot8 <t> x [c]) 5826 // cond: 5827 // result: (ROLBconst <t> [c&7] x) 5828 for { 5829 t := v.Type 5830 x := v.Args[0] 5831 c := v.AuxInt 5832 v.reset(OpAMD64ROLBconst) 5833 v.Type = t 5834 v.AuxInt = c & 7 5835 v.AddArg(x) 5836 return true 5837 } 5838 return false 5839 } 5840 func rewriteValueAMD64_OpLsh16x16(v *Value, config *Config) bool { 5841 b := v.Block 5842 _ = b 5843 // match: (Lsh16x16 <t> x y) 5844 // cond: 5845 // result: (ANDW (SHLW <t> x y) (SBBLcarrymask <t> (CMPWconst y [16]))) 5846 for { 5847 t := v.Type 5848 x := v.Args[0] 5849 y := v.Args[1] 5850 v.reset(OpAMD64ANDW) 5851 v0 := b.NewValue0(v.Line, OpAMD64SHLW, t) 5852 v0.AddArg(x) 5853 v0.AddArg(y) 5854 v.AddArg(v0) 5855 v1 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, t) 5856 v2 := b.NewValue0(v.Line, OpAMD64CMPWconst, TypeFlags) 5857 v2.AddArg(y) 5858 v2.AuxInt = 16 5859 v1.AddArg(v2) 5860 v.AddArg(v1) 5861 return true 5862 } 5863 return false 5864 } 5865 func rewriteValueAMD64_OpLsh16x32(v *Value, config *Config) bool { 5866 b := v.Block 5867 _ = b 5868 // match: (Lsh16x32 <t> x y) 5869 // cond: 5870 // result: (ANDW (SHLW <t> x y) (SBBLcarrymask <t> (CMPLconst y [16]))) 5871 for { 5872 t := v.Type 5873 x := v.Args[0] 5874 y := v.Args[1] 5875 v.reset(OpAMD64ANDW) 5876 v0 := b.NewValue0(v.Line, OpAMD64SHLW, t) 5877 v0.AddArg(x) 5878 v0.AddArg(y) 5879 v.AddArg(v0) 5880 v1 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, t) 5881 v2 := b.NewValue0(v.Line, OpAMD64CMPLconst, TypeFlags) 5882 v2.AddArg(y) 5883 v2.AuxInt = 16 5884 v1.AddArg(v2) 5885 v.AddArg(v1) 5886 return true 5887 } 5888 return false 5889 } 5890 func rewriteValueAMD64_OpLsh16x64(v *Value, config *Config) bool { 5891 b := v.Block 5892 _ = b 5893 // match: (Lsh16x64 <t> x y) 5894 // cond: 5895 // result: (ANDW (SHLW <t> x y) (SBBLcarrymask <t> (CMPQconst y [16]))) 5896 for { 5897 t := v.Type 5898 x := v.Args[0] 5899 y := v.Args[1] 5900 v.reset(OpAMD64ANDW) 5901 v0 := b.NewValue0(v.Line, OpAMD64SHLW, t) 5902 v0.AddArg(x) 5903 v0.AddArg(y) 5904 v.AddArg(v0) 5905 v1 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, t) 5906 v2 := b.NewValue0(v.Line, OpAMD64CMPQconst, TypeFlags) 5907 v2.AddArg(y) 5908 v2.AuxInt = 16 5909 v1.AddArg(v2) 5910 v.AddArg(v1) 5911 return true 5912 } 5913 return false 5914 } 5915 func rewriteValueAMD64_OpLsh16x8(v *Value, config *Config) bool { 5916 b := v.Block 5917 _ = b 5918 // match: (Lsh16x8 <t> x y) 5919 // cond: 5920 // result: (ANDW (SHLW <t> x y) (SBBLcarrymask <t> (CMPBconst y [16]))) 5921 for { 5922 t := v.Type 5923 x := v.Args[0] 5924 y := v.Args[1] 5925 v.reset(OpAMD64ANDW) 5926 v0 := b.NewValue0(v.Line, OpAMD64SHLW, t) 5927 v0.AddArg(x) 5928 v0.AddArg(y) 5929 v.AddArg(v0) 5930 v1 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, t) 5931 v2 := b.NewValue0(v.Line, OpAMD64CMPBconst, TypeFlags) 5932 v2.AddArg(y) 5933 v2.AuxInt = 16 5934 v1.AddArg(v2) 5935 v.AddArg(v1) 5936 return true 5937 } 5938 return false 5939 } 5940 func rewriteValueAMD64_OpLsh32x16(v *Value, config *Config) bool { 5941 b := v.Block 5942 _ = b 5943 // match: (Lsh32x16 <t> x y) 5944 // cond: 5945 // result: (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPWconst y [32]))) 5946 for { 5947 t := v.Type 5948 x := v.Args[0] 5949 y := v.Args[1] 5950 v.reset(OpAMD64ANDL) 5951 v0 := b.NewValue0(v.Line, OpAMD64SHLL, t) 5952 v0.AddArg(x) 5953 v0.AddArg(y) 5954 v.AddArg(v0) 5955 v1 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, t) 5956 v2 := b.NewValue0(v.Line, OpAMD64CMPWconst, TypeFlags) 5957 v2.AddArg(y) 5958 v2.AuxInt = 32 5959 v1.AddArg(v2) 5960 v.AddArg(v1) 5961 return true 5962 } 5963 return false 5964 } 5965 func rewriteValueAMD64_OpLsh32x32(v *Value, config *Config) bool { 5966 b := v.Block 5967 _ = b 5968 // match: (Lsh32x32 <t> x y) 5969 // cond: 5970 // result: (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPLconst y [32]))) 5971 for { 5972 t := v.Type 5973 x := v.Args[0] 5974 y := v.Args[1] 5975 v.reset(OpAMD64ANDL) 5976 v0 := b.NewValue0(v.Line, OpAMD64SHLL, t) 5977 v0.AddArg(x) 5978 v0.AddArg(y) 5979 v.AddArg(v0) 5980 v1 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, t) 5981 v2 := b.NewValue0(v.Line, OpAMD64CMPLconst, TypeFlags) 5982 v2.AddArg(y) 5983 v2.AuxInt = 32 5984 v1.AddArg(v2) 5985 v.AddArg(v1) 5986 return true 5987 } 5988 return false 5989 } 5990 func rewriteValueAMD64_OpLsh32x64(v *Value, config *Config) bool { 5991 b := v.Block 5992 _ = b 5993 // match: (Lsh32x64 <t> x y) 5994 // cond: 5995 // result: (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPQconst y [32]))) 5996 for { 5997 t := v.Type 5998 x := v.Args[0] 5999 y := v.Args[1] 6000 v.reset(OpAMD64ANDL) 6001 v0 := b.NewValue0(v.Line, OpAMD64SHLL, t) 6002 v0.AddArg(x) 6003 v0.AddArg(y) 6004 v.AddArg(v0) 6005 v1 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, t) 6006 v2 := b.NewValue0(v.Line, OpAMD64CMPQconst, TypeFlags) 6007 v2.AddArg(y) 6008 v2.AuxInt = 32 6009 v1.AddArg(v2) 6010 v.AddArg(v1) 6011 return true 6012 } 6013 return false 6014 } 6015 func rewriteValueAMD64_OpLsh32x8(v *Value, config *Config) bool { 6016 b := v.Block 6017 _ = b 6018 // match: (Lsh32x8 <t> x y) 6019 // cond: 6020 // result: (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPBconst y [32]))) 6021 for { 6022 t := v.Type 6023 x := v.Args[0] 6024 y := v.Args[1] 6025 v.reset(OpAMD64ANDL) 6026 v0 := b.NewValue0(v.Line, OpAMD64SHLL, t) 6027 v0.AddArg(x) 6028 v0.AddArg(y) 6029 v.AddArg(v0) 6030 v1 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, t) 6031 v2 := b.NewValue0(v.Line, OpAMD64CMPBconst, TypeFlags) 6032 v2.AddArg(y) 6033 v2.AuxInt = 32 6034 v1.AddArg(v2) 6035 v.AddArg(v1) 6036 return true 6037 } 6038 return false 6039 } 6040 func rewriteValueAMD64_OpLsh64x16(v *Value, config *Config) bool { 6041 b := v.Block 6042 _ = b 6043 // match: (Lsh64x16 <t> x y) 6044 // cond: 6045 // result: (ANDQ (SHLQ <t> x y) (SBBQcarrymask <t> (CMPWconst y [64]))) 6046 for { 6047 t := v.Type 6048 x := v.Args[0] 6049 y := v.Args[1] 6050 v.reset(OpAMD64ANDQ) 6051 v0 := b.NewValue0(v.Line, OpAMD64SHLQ, t) 6052 v0.AddArg(x) 6053 v0.AddArg(y) 6054 v.AddArg(v0) 6055 v1 := b.NewValue0(v.Line, OpAMD64SBBQcarrymask, t) 6056 v2 := b.NewValue0(v.Line, OpAMD64CMPWconst, TypeFlags) 6057 v2.AddArg(y) 6058 v2.AuxInt = 64 6059 v1.AddArg(v2) 6060 v.AddArg(v1) 6061 return true 6062 } 6063 return false 6064 } 6065 func rewriteValueAMD64_OpLsh64x32(v *Value, config *Config) bool { 6066 b := v.Block 6067 _ = b 6068 // match: (Lsh64x32 <t> x y) 6069 // cond: 6070 // result: (ANDQ (SHLQ <t> x y) (SBBQcarrymask <t> (CMPLconst y [64]))) 6071 for { 6072 t := v.Type 6073 x := v.Args[0] 6074 y := v.Args[1] 6075 v.reset(OpAMD64ANDQ) 6076 v0 := b.NewValue0(v.Line, OpAMD64SHLQ, t) 6077 v0.AddArg(x) 6078 v0.AddArg(y) 6079 v.AddArg(v0) 6080 v1 := b.NewValue0(v.Line, OpAMD64SBBQcarrymask, t) 6081 v2 := b.NewValue0(v.Line, OpAMD64CMPLconst, TypeFlags) 6082 v2.AddArg(y) 6083 v2.AuxInt = 64 6084 v1.AddArg(v2) 6085 v.AddArg(v1) 6086 return true 6087 } 6088 return false 6089 } 6090 func rewriteValueAMD64_OpLsh64x64(v *Value, config *Config) bool { 6091 b := v.Block 6092 _ = b 6093 // match: (Lsh64x64 <t> x y) 6094 // cond: 6095 // result: (ANDQ (SHLQ <t> x y) (SBBQcarrymask <t> (CMPQconst y [64]))) 6096 for { 6097 t := v.Type 6098 x := v.Args[0] 6099 y := v.Args[1] 6100 v.reset(OpAMD64ANDQ) 6101 v0 := b.NewValue0(v.Line, OpAMD64SHLQ, t) 6102 v0.AddArg(x) 6103 v0.AddArg(y) 6104 v.AddArg(v0) 6105 v1 := b.NewValue0(v.Line, OpAMD64SBBQcarrymask, t) 6106 v2 := b.NewValue0(v.Line, OpAMD64CMPQconst, TypeFlags) 6107 v2.AddArg(y) 6108 v2.AuxInt = 64 6109 v1.AddArg(v2) 6110 v.AddArg(v1) 6111 return true 6112 } 6113 return false 6114 } 6115 func rewriteValueAMD64_OpLsh64x8(v *Value, config *Config) bool { 6116 b := v.Block 6117 _ = b 6118 // match: (Lsh64x8 <t> x y) 6119 // cond: 6120 // result: (ANDQ (SHLQ <t> x y) (SBBQcarrymask <t> (CMPBconst y [64]))) 6121 for { 6122 t := v.Type 6123 x := v.Args[0] 6124 y := v.Args[1] 6125 v.reset(OpAMD64ANDQ) 6126 v0 := b.NewValue0(v.Line, OpAMD64SHLQ, t) 6127 v0.AddArg(x) 6128 v0.AddArg(y) 6129 v.AddArg(v0) 6130 v1 := b.NewValue0(v.Line, OpAMD64SBBQcarrymask, t) 6131 v2 := b.NewValue0(v.Line, OpAMD64CMPBconst, TypeFlags) 6132 v2.AddArg(y) 6133 v2.AuxInt = 64 6134 v1.AddArg(v2) 6135 v.AddArg(v1) 6136 return true 6137 } 6138 return false 6139 } 6140 func rewriteValueAMD64_OpLsh8x16(v *Value, config *Config) bool { 6141 b := v.Block 6142 _ = b 6143 // match: (Lsh8x16 <t> x y) 6144 // cond: 6145 // result: (ANDB (SHLB <t> x y) (SBBLcarrymask <t> (CMPWconst y [8]))) 6146 for { 6147 t := v.Type 6148 x := v.Args[0] 6149 y := v.Args[1] 6150 v.reset(OpAMD64ANDB) 6151 v0 := b.NewValue0(v.Line, OpAMD64SHLB, t) 6152 v0.AddArg(x) 6153 v0.AddArg(y) 6154 v.AddArg(v0) 6155 v1 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, t) 6156 v2 := b.NewValue0(v.Line, OpAMD64CMPWconst, TypeFlags) 6157 v2.AddArg(y) 6158 v2.AuxInt = 8 6159 v1.AddArg(v2) 6160 v.AddArg(v1) 6161 return true 6162 } 6163 return false 6164 } 6165 func rewriteValueAMD64_OpLsh8x32(v *Value, config *Config) bool { 6166 b := v.Block 6167 _ = b 6168 // match: (Lsh8x32 <t> x y) 6169 // cond: 6170 // result: (ANDB (SHLB <t> x y) (SBBLcarrymask <t> (CMPLconst y [8]))) 6171 for { 6172 t := v.Type 6173 x := v.Args[0] 6174 y := v.Args[1] 6175 v.reset(OpAMD64ANDB) 6176 v0 := b.NewValue0(v.Line, OpAMD64SHLB, t) 6177 v0.AddArg(x) 6178 v0.AddArg(y) 6179 v.AddArg(v0) 6180 v1 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, t) 6181 v2 := b.NewValue0(v.Line, OpAMD64CMPLconst, TypeFlags) 6182 v2.AddArg(y) 6183 v2.AuxInt = 8 6184 v1.AddArg(v2) 6185 v.AddArg(v1) 6186 return true 6187 } 6188 return false 6189 } 6190 func rewriteValueAMD64_OpLsh8x64(v *Value, config *Config) bool { 6191 b := v.Block 6192 _ = b 6193 // match: (Lsh8x64 <t> x y) 6194 // cond: 6195 // result: (ANDB (SHLB <t> x y) (SBBLcarrymask <t> (CMPQconst y [8]))) 6196 for { 6197 t := v.Type 6198 x := v.Args[0] 6199 y := v.Args[1] 6200 v.reset(OpAMD64ANDB) 6201 v0 := b.NewValue0(v.Line, OpAMD64SHLB, t) 6202 v0.AddArg(x) 6203 v0.AddArg(y) 6204 v.AddArg(v0) 6205 v1 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, t) 6206 v2 := b.NewValue0(v.Line, OpAMD64CMPQconst, TypeFlags) 6207 v2.AddArg(y) 6208 v2.AuxInt = 8 6209 v1.AddArg(v2) 6210 v.AddArg(v1) 6211 return true 6212 } 6213 return false 6214 } 6215 func rewriteValueAMD64_OpLsh8x8(v *Value, config *Config) bool { 6216 b := v.Block 6217 _ = b 6218 // match: (Lsh8x8 <t> x y) 6219 // cond: 6220 // result: (ANDB (SHLB <t> x y) (SBBLcarrymask <t> (CMPBconst y [8]))) 6221 for { 6222 t := v.Type 6223 x := v.Args[0] 6224 y := v.Args[1] 6225 v.reset(OpAMD64ANDB) 6226 v0 := b.NewValue0(v.Line, OpAMD64SHLB, t) 6227 v0.AddArg(x) 6228 v0.AddArg(y) 6229 v.AddArg(v0) 6230 v1 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, t) 6231 v2 := b.NewValue0(v.Line, OpAMD64CMPBconst, TypeFlags) 6232 v2.AddArg(y) 6233 v2.AuxInt = 8 6234 v1.AddArg(v2) 6235 v.AddArg(v1) 6236 return true 6237 } 6238 return false 6239 } 6240 func rewriteValueAMD64_OpAMD64MOVBQSX(v *Value, config *Config) bool { 6241 b := v.Block 6242 _ = b 6243 // match: (MOVBQSX x:(MOVBload [off] {sym} ptr mem)) 6244 // cond: x.Uses == 1 6245 // result: @x.Block (MOVBQSXload <v.Type> [off] {sym} ptr mem) 6246 for { 6247 x := v.Args[0] 6248 if x.Op != OpAMD64MOVBload { 6249 break 6250 } 6251 off := x.AuxInt 6252 sym := x.Aux 6253 ptr := x.Args[0] 6254 mem := x.Args[1] 6255 if !(x.Uses == 1) { 6256 break 6257 } 6258 b = x.Block 6259 v0 := b.NewValue0(v.Line, OpAMD64MOVBQSXload, v.Type) 6260 v.reset(OpCopy) 6261 v.AddArg(v0) 6262 v0.AuxInt = off 6263 v0.Aux = sym 6264 v0.AddArg(ptr) 6265 v0.AddArg(mem) 6266 return true 6267 } 6268 // match: (MOVBQSX (ANDBconst [c] x)) 6269 // cond: c & 0x80 == 0 6270 // result: (ANDQconst [c & 0x7f] x) 6271 for { 6272 v_0 := v.Args[0] 6273 if v_0.Op != OpAMD64ANDBconst { 6274 break 6275 } 6276 c := v_0.AuxInt 6277 x := v_0.Args[0] 6278 if !(c&0x80 == 0) { 6279 break 6280 } 6281 v.reset(OpAMD64ANDQconst) 6282 v.AuxInt = c & 0x7f 6283 v.AddArg(x) 6284 return true 6285 } 6286 return false 6287 } 6288 func rewriteValueAMD64_OpAMD64MOVBQSXload(v *Value, config *Config) bool { 6289 b := v.Block 6290 _ = b 6291 // match: (MOVBQSXload [off1] {sym1} (LEAQ [off2] {sym2} base) mem) 6292 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 6293 // result: (MOVBQSXload [off1+off2] {mergeSym(sym1,sym2)} base mem) 6294 for { 6295 off1 := v.AuxInt 6296 sym1 := v.Aux 6297 v_0 := v.Args[0] 6298 if v_0.Op != OpAMD64LEAQ { 6299 break 6300 } 6301 off2 := v_0.AuxInt 6302 sym2 := v_0.Aux 6303 base := v_0.Args[0] 6304 mem := v.Args[1] 6305 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 6306 break 6307 } 6308 v.reset(OpAMD64MOVBQSXload) 6309 v.AuxInt = off1 + off2 6310 v.Aux = mergeSym(sym1, sym2) 6311 v.AddArg(base) 6312 v.AddArg(mem) 6313 return true 6314 } 6315 return false 6316 } 6317 func rewriteValueAMD64_OpAMD64MOVBQZX(v *Value, config *Config) bool { 6318 b := v.Block 6319 _ = b 6320 // match: (MOVBQZX x:(MOVBload [off] {sym} ptr mem)) 6321 // cond: x.Uses == 1 6322 // result: @x.Block (MOVBload <v.Type> [off] {sym} ptr mem) 6323 for { 6324 x := v.Args[0] 6325 if x.Op != OpAMD64MOVBload { 6326 break 6327 } 6328 off := x.AuxInt 6329 sym := x.Aux 6330 ptr := x.Args[0] 6331 mem := x.Args[1] 6332 if !(x.Uses == 1) { 6333 break 6334 } 6335 b = x.Block 6336 v0 := b.NewValue0(v.Line, OpAMD64MOVBload, v.Type) 6337 v.reset(OpCopy) 6338 v.AddArg(v0) 6339 v0.AuxInt = off 6340 v0.Aux = sym 6341 v0.AddArg(ptr) 6342 v0.AddArg(mem) 6343 return true 6344 } 6345 // match: (MOVBQZX x:(MOVBloadidx1 [off] {sym} ptr idx mem)) 6346 // cond: x.Uses == 1 6347 // result: @x.Block (MOVBloadidx1 <v.Type> [off] {sym} ptr idx mem) 6348 for { 6349 x := v.Args[0] 6350 if x.Op != OpAMD64MOVBloadidx1 { 6351 break 6352 } 6353 off := x.AuxInt 6354 sym := x.Aux 6355 ptr := x.Args[0] 6356 idx := x.Args[1] 6357 mem := x.Args[2] 6358 if !(x.Uses == 1) { 6359 break 6360 } 6361 b = x.Block 6362 v0 := b.NewValue0(v.Line, OpAMD64MOVBloadidx1, v.Type) 6363 v.reset(OpCopy) 6364 v.AddArg(v0) 6365 v0.AuxInt = off 6366 v0.Aux = sym 6367 v0.AddArg(ptr) 6368 v0.AddArg(idx) 6369 v0.AddArg(mem) 6370 return true 6371 } 6372 // match: (MOVBQZX (ANDBconst [c] x)) 6373 // cond: 6374 // result: (ANDQconst [c & 0xff] x) 6375 for { 6376 v_0 := v.Args[0] 6377 if v_0.Op != OpAMD64ANDBconst { 6378 break 6379 } 6380 c := v_0.AuxInt 6381 x := v_0.Args[0] 6382 v.reset(OpAMD64ANDQconst) 6383 v.AuxInt = c & 0xff 6384 v.AddArg(x) 6385 return true 6386 } 6387 return false 6388 } 6389 func rewriteValueAMD64_OpAMD64MOVBload(v *Value, config *Config) bool { 6390 b := v.Block 6391 _ = b 6392 // match: (MOVBload [off] {sym} ptr (MOVBstore [off2] {sym2} ptr2 x _)) 6393 // cond: sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) 6394 // result: x 6395 for { 6396 off := v.AuxInt 6397 sym := v.Aux 6398 ptr := v.Args[0] 6399 v_1 := v.Args[1] 6400 if v_1.Op != OpAMD64MOVBstore { 6401 break 6402 } 6403 off2 := v_1.AuxInt 6404 sym2 := v_1.Aux 6405 ptr2 := v_1.Args[0] 6406 x := v_1.Args[1] 6407 if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) { 6408 break 6409 } 6410 v.reset(OpCopy) 6411 v.Type = x.Type 6412 v.AddArg(x) 6413 return true 6414 } 6415 // match: (MOVBload [off1] {sym} (ADDQconst [off2] ptr) mem) 6416 // cond: is32Bit(off1+off2) 6417 // result: (MOVBload [off1+off2] {sym} ptr mem) 6418 for { 6419 off1 := v.AuxInt 6420 sym := v.Aux 6421 v_0 := v.Args[0] 6422 if v_0.Op != OpAMD64ADDQconst { 6423 break 6424 } 6425 off2 := v_0.AuxInt 6426 ptr := v_0.Args[0] 6427 mem := v.Args[1] 6428 if !(is32Bit(off1 + off2)) { 6429 break 6430 } 6431 v.reset(OpAMD64MOVBload) 6432 v.AuxInt = off1 + off2 6433 v.Aux = sym 6434 v.AddArg(ptr) 6435 v.AddArg(mem) 6436 return true 6437 } 6438 // match: (MOVBload [off1] {sym1} (LEAQ [off2] {sym2} base) mem) 6439 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 6440 // result: (MOVBload [off1+off2] {mergeSym(sym1,sym2)} base mem) 6441 for { 6442 off1 := v.AuxInt 6443 sym1 := v.Aux 6444 v_0 := v.Args[0] 6445 if v_0.Op != OpAMD64LEAQ { 6446 break 6447 } 6448 off2 := v_0.AuxInt 6449 sym2 := v_0.Aux 6450 base := v_0.Args[0] 6451 mem := v.Args[1] 6452 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 6453 break 6454 } 6455 v.reset(OpAMD64MOVBload) 6456 v.AuxInt = off1 + off2 6457 v.Aux = mergeSym(sym1, sym2) 6458 v.AddArg(base) 6459 v.AddArg(mem) 6460 return true 6461 } 6462 // match: (MOVBload [off1] {sym1} (LEAQ1 [off2] {sym2} ptr idx) mem) 6463 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 6464 // result: (MOVBloadidx1 [off1+off2] {mergeSym(sym1,sym2)} ptr idx mem) 6465 for { 6466 off1 := v.AuxInt 6467 sym1 := v.Aux 6468 v_0 := v.Args[0] 6469 if v_0.Op != OpAMD64LEAQ1 { 6470 break 6471 } 6472 off2 := v_0.AuxInt 6473 sym2 := v_0.Aux 6474 ptr := v_0.Args[0] 6475 idx := v_0.Args[1] 6476 mem := v.Args[1] 6477 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 6478 break 6479 } 6480 v.reset(OpAMD64MOVBloadidx1) 6481 v.AuxInt = off1 + off2 6482 v.Aux = mergeSym(sym1, sym2) 6483 v.AddArg(ptr) 6484 v.AddArg(idx) 6485 v.AddArg(mem) 6486 return true 6487 } 6488 // match: (MOVBload [off] {sym} (ADDQ ptr idx) mem) 6489 // cond: ptr.Op != OpSB 6490 // result: (MOVBloadidx1 [off] {sym} ptr idx mem) 6491 for { 6492 off := v.AuxInt 6493 sym := v.Aux 6494 v_0 := v.Args[0] 6495 if v_0.Op != OpAMD64ADDQ { 6496 break 6497 } 6498 ptr := v_0.Args[0] 6499 idx := v_0.Args[1] 6500 mem := v.Args[1] 6501 if !(ptr.Op != OpSB) { 6502 break 6503 } 6504 v.reset(OpAMD64MOVBloadidx1) 6505 v.AuxInt = off 6506 v.Aux = sym 6507 v.AddArg(ptr) 6508 v.AddArg(idx) 6509 v.AddArg(mem) 6510 return true 6511 } 6512 return false 6513 } 6514 func rewriteValueAMD64_OpAMD64MOVBloadidx1(v *Value, config *Config) bool { 6515 b := v.Block 6516 _ = b 6517 // match: (MOVBloadidx1 [c] {sym} (ADDQconst [d] ptr) idx mem) 6518 // cond: 6519 // result: (MOVBloadidx1 [c+d] {sym} ptr idx mem) 6520 for { 6521 c := v.AuxInt 6522 sym := v.Aux 6523 v_0 := v.Args[0] 6524 if v_0.Op != OpAMD64ADDQconst { 6525 break 6526 } 6527 d := v_0.AuxInt 6528 ptr := v_0.Args[0] 6529 idx := v.Args[1] 6530 mem := v.Args[2] 6531 v.reset(OpAMD64MOVBloadidx1) 6532 v.AuxInt = c + d 6533 v.Aux = sym 6534 v.AddArg(ptr) 6535 v.AddArg(idx) 6536 v.AddArg(mem) 6537 return true 6538 } 6539 // match: (MOVBloadidx1 [c] {sym} ptr (ADDQconst [d] idx) mem) 6540 // cond: 6541 // result: (MOVBloadidx1 [c+d] {sym} ptr idx mem) 6542 for { 6543 c := v.AuxInt 6544 sym := v.Aux 6545 ptr := v.Args[0] 6546 v_1 := v.Args[1] 6547 if v_1.Op != OpAMD64ADDQconst { 6548 break 6549 } 6550 d := v_1.AuxInt 6551 idx := v_1.Args[0] 6552 mem := v.Args[2] 6553 v.reset(OpAMD64MOVBloadidx1) 6554 v.AuxInt = c + d 6555 v.Aux = sym 6556 v.AddArg(ptr) 6557 v.AddArg(idx) 6558 v.AddArg(mem) 6559 return true 6560 } 6561 return false 6562 } 6563 func rewriteValueAMD64_OpAMD64MOVBstore(v *Value, config *Config) bool { 6564 b := v.Block 6565 _ = b 6566 // match: (MOVBstore [off] {sym} ptr (MOVBQSX x) mem) 6567 // cond: 6568 // result: (MOVBstore [off] {sym} ptr x mem) 6569 for { 6570 off := v.AuxInt 6571 sym := v.Aux 6572 ptr := v.Args[0] 6573 v_1 := v.Args[1] 6574 if v_1.Op != OpAMD64MOVBQSX { 6575 break 6576 } 6577 x := v_1.Args[0] 6578 mem := v.Args[2] 6579 v.reset(OpAMD64MOVBstore) 6580 v.AuxInt = off 6581 v.Aux = sym 6582 v.AddArg(ptr) 6583 v.AddArg(x) 6584 v.AddArg(mem) 6585 return true 6586 } 6587 // match: (MOVBstore [off] {sym} ptr (MOVBQZX x) mem) 6588 // cond: 6589 // result: (MOVBstore [off] {sym} ptr x mem) 6590 for { 6591 off := v.AuxInt 6592 sym := v.Aux 6593 ptr := v.Args[0] 6594 v_1 := v.Args[1] 6595 if v_1.Op != OpAMD64MOVBQZX { 6596 break 6597 } 6598 x := v_1.Args[0] 6599 mem := v.Args[2] 6600 v.reset(OpAMD64MOVBstore) 6601 v.AuxInt = off 6602 v.Aux = sym 6603 v.AddArg(ptr) 6604 v.AddArg(x) 6605 v.AddArg(mem) 6606 return true 6607 } 6608 // match: (MOVBstore [off1] {sym} (ADDQconst [off2] ptr) val mem) 6609 // cond: is32Bit(off1+off2) 6610 // result: (MOVBstore [off1+off2] {sym} ptr val mem) 6611 for { 6612 off1 := v.AuxInt 6613 sym := v.Aux 6614 v_0 := v.Args[0] 6615 if v_0.Op != OpAMD64ADDQconst { 6616 break 6617 } 6618 off2 := v_0.AuxInt 6619 ptr := v_0.Args[0] 6620 val := v.Args[1] 6621 mem := v.Args[2] 6622 if !(is32Bit(off1 + off2)) { 6623 break 6624 } 6625 v.reset(OpAMD64MOVBstore) 6626 v.AuxInt = off1 + off2 6627 v.Aux = sym 6628 v.AddArg(ptr) 6629 v.AddArg(val) 6630 v.AddArg(mem) 6631 return true 6632 } 6633 // match: (MOVBstore [off] {sym} ptr (MOVBconst [c]) mem) 6634 // cond: validOff(off) 6635 // result: (MOVBstoreconst [makeValAndOff(int64(int8(c)),off)] {sym} ptr mem) 6636 for { 6637 off := v.AuxInt 6638 sym := v.Aux 6639 ptr := v.Args[0] 6640 v_1 := v.Args[1] 6641 if v_1.Op != OpAMD64MOVBconst { 6642 break 6643 } 6644 c := v_1.AuxInt 6645 mem := v.Args[2] 6646 if !(validOff(off)) { 6647 break 6648 } 6649 v.reset(OpAMD64MOVBstoreconst) 6650 v.AuxInt = makeValAndOff(int64(int8(c)), off) 6651 v.Aux = sym 6652 v.AddArg(ptr) 6653 v.AddArg(mem) 6654 return true 6655 } 6656 // match: (MOVBstore [off1] {sym1} (LEAQ [off2] {sym2} base) val mem) 6657 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 6658 // result: (MOVBstore [off1+off2] {mergeSym(sym1,sym2)} base val mem) 6659 for { 6660 off1 := v.AuxInt 6661 sym1 := v.Aux 6662 v_0 := v.Args[0] 6663 if v_0.Op != OpAMD64LEAQ { 6664 break 6665 } 6666 off2 := v_0.AuxInt 6667 sym2 := v_0.Aux 6668 base := v_0.Args[0] 6669 val := v.Args[1] 6670 mem := v.Args[2] 6671 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 6672 break 6673 } 6674 v.reset(OpAMD64MOVBstore) 6675 v.AuxInt = off1 + off2 6676 v.Aux = mergeSym(sym1, sym2) 6677 v.AddArg(base) 6678 v.AddArg(val) 6679 v.AddArg(mem) 6680 return true 6681 } 6682 // match: (MOVBstore [off1] {sym1} (LEAQ1 [off2] {sym2} ptr idx) val mem) 6683 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 6684 // result: (MOVBstoreidx1 [off1+off2] {mergeSym(sym1,sym2)} ptr idx val mem) 6685 for { 6686 off1 := v.AuxInt 6687 sym1 := v.Aux 6688 v_0 := v.Args[0] 6689 if v_0.Op != OpAMD64LEAQ1 { 6690 break 6691 } 6692 off2 := v_0.AuxInt 6693 sym2 := v_0.Aux 6694 ptr := v_0.Args[0] 6695 idx := v_0.Args[1] 6696 val := v.Args[1] 6697 mem := v.Args[2] 6698 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 6699 break 6700 } 6701 v.reset(OpAMD64MOVBstoreidx1) 6702 v.AuxInt = off1 + off2 6703 v.Aux = mergeSym(sym1, sym2) 6704 v.AddArg(ptr) 6705 v.AddArg(idx) 6706 v.AddArg(val) 6707 v.AddArg(mem) 6708 return true 6709 } 6710 // match: (MOVBstore [off] {sym} (ADDQ ptr idx) val mem) 6711 // cond: ptr.Op != OpSB 6712 // result: (MOVBstoreidx1 [off] {sym} ptr idx val mem) 6713 for { 6714 off := v.AuxInt 6715 sym := v.Aux 6716 v_0 := v.Args[0] 6717 if v_0.Op != OpAMD64ADDQ { 6718 break 6719 } 6720 ptr := v_0.Args[0] 6721 idx := v_0.Args[1] 6722 val := v.Args[1] 6723 mem := v.Args[2] 6724 if !(ptr.Op != OpSB) { 6725 break 6726 } 6727 v.reset(OpAMD64MOVBstoreidx1) 6728 v.AuxInt = off 6729 v.Aux = sym 6730 v.AddArg(ptr) 6731 v.AddArg(idx) 6732 v.AddArg(val) 6733 v.AddArg(mem) 6734 return true 6735 } 6736 return false 6737 } 6738 func rewriteValueAMD64_OpAMD64MOVBstoreconst(v *Value, config *Config) bool { 6739 b := v.Block 6740 _ = b 6741 // match: (MOVBstoreconst [sc] {s} (ADDQconst [off] ptr) mem) 6742 // cond: ValAndOff(sc).canAdd(off) 6743 // result: (MOVBstoreconst [ValAndOff(sc).add(off)] {s} ptr mem) 6744 for { 6745 sc := v.AuxInt 6746 s := v.Aux 6747 v_0 := v.Args[0] 6748 if v_0.Op != OpAMD64ADDQconst { 6749 break 6750 } 6751 off := v_0.AuxInt 6752 ptr := v_0.Args[0] 6753 mem := v.Args[1] 6754 if !(ValAndOff(sc).canAdd(off)) { 6755 break 6756 } 6757 v.reset(OpAMD64MOVBstoreconst) 6758 v.AuxInt = ValAndOff(sc).add(off) 6759 v.Aux = s 6760 v.AddArg(ptr) 6761 v.AddArg(mem) 6762 return true 6763 } 6764 // match: (MOVBstoreconst [sc] {sym1} (LEAQ [off] {sym2} ptr) mem) 6765 // cond: canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off) 6766 // result: (MOVBstoreconst [ValAndOff(sc).add(off)] {mergeSym(sym1, sym2)} ptr mem) 6767 for { 6768 sc := v.AuxInt 6769 sym1 := v.Aux 6770 v_0 := v.Args[0] 6771 if v_0.Op != OpAMD64LEAQ { 6772 break 6773 } 6774 off := v_0.AuxInt 6775 sym2 := v_0.Aux 6776 ptr := v_0.Args[0] 6777 mem := v.Args[1] 6778 if !(canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off)) { 6779 break 6780 } 6781 v.reset(OpAMD64MOVBstoreconst) 6782 v.AuxInt = ValAndOff(sc).add(off) 6783 v.Aux = mergeSym(sym1, sym2) 6784 v.AddArg(ptr) 6785 v.AddArg(mem) 6786 return true 6787 } 6788 // match: (MOVBstoreconst [x] {sym1} (LEAQ1 [off] {sym2} ptr idx) mem) 6789 // cond: canMergeSym(sym1, sym2) 6790 // result: (MOVBstoreconstidx1 [ValAndOff(x).add(off)] {mergeSym(sym1,sym2)} ptr idx mem) 6791 for { 6792 x := v.AuxInt 6793 sym1 := v.Aux 6794 v_0 := v.Args[0] 6795 if v_0.Op != OpAMD64LEAQ1 { 6796 break 6797 } 6798 off := v_0.AuxInt 6799 sym2 := v_0.Aux 6800 ptr := v_0.Args[0] 6801 idx := v_0.Args[1] 6802 mem := v.Args[1] 6803 if !(canMergeSym(sym1, sym2)) { 6804 break 6805 } 6806 v.reset(OpAMD64MOVBstoreconstidx1) 6807 v.AuxInt = ValAndOff(x).add(off) 6808 v.Aux = mergeSym(sym1, sym2) 6809 v.AddArg(ptr) 6810 v.AddArg(idx) 6811 v.AddArg(mem) 6812 return true 6813 } 6814 // match: (MOVBstoreconst [x] {sym} (ADDQ ptr idx) mem) 6815 // cond: 6816 // result: (MOVBstoreconstidx1 [x] {sym} ptr idx mem) 6817 for { 6818 x := v.AuxInt 6819 sym := v.Aux 6820 v_0 := v.Args[0] 6821 if v_0.Op != OpAMD64ADDQ { 6822 break 6823 } 6824 ptr := v_0.Args[0] 6825 idx := v_0.Args[1] 6826 mem := v.Args[1] 6827 v.reset(OpAMD64MOVBstoreconstidx1) 6828 v.AuxInt = x 6829 v.Aux = sym 6830 v.AddArg(ptr) 6831 v.AddArg(idx) 6832 v.AddArg(mem) 6833 return true 6834 } 6835 return false 6836 } 6837 func rewriteValueAMD64_OpAMD64MOVBstoreconstidx1(v *Value, config *Config) bool { 6838 b := v.Block 6839 _ = b 6840 // match: (MOVBstoreconstidx1 [x] {sym} (ADDQconst [c] ptr) idx mem) 6841 // cond: 6842 // result: (MOVBstoreconstidx1 [ValAndOff(x).add(c)] {sym} ptr idx mem) 6843 for { 6844 x := v.AuxInt 6845 sym := v.Aux 6846 v_0 := v.Args[0] 6847 if v_0.Op != OpAMD64ADDQconst { 6848 break 6849 } 6850 c := v_0.AuxInt 6851 ptr := v_0.Args[0] 6852 idx := v.Args[1] 6853 mem := v.Args[2] 6854 v.reset(OpAMD64MOVBstoreconstidx1) 6855 v.AuxInt = ValAndOff(x).add(c) 6856 v.Aux = sym 6857 v.AddArg(ptr) 6858 v.AddArg(idx) 6859 v.AddArg(mem) 6860 return true 6861 } 6862 // match: (MOVBstoreconstidx1 [x] {sym} ptr (ADDQconst [c] idx) mem) 6863 // cond: 6864 // result: (MOVBstoreconstidx1 [ValAndOff(x).add(c)] {sym} ptr idx mem) 6865 for { 6866 x := v.AuxInt 6867 sym := v.Aux 6868 ptr := v.Args[0] 6869 v_1 := v.Args[1] 6870 if v_1.Op != OpAMD64ADDQconst { 6871 break 6872 } 6873 c := v_1.AuxInt 6874 idx := v_1.Args[0] 6875 mem := v.Args[2] 6876 v.reset(OpAMD64MOVBstoreconstidx1) 6877 v.AuxInt = ValAndOff(x).add(c) 6878 v.Aux = sym 6879 v.AddArg(ptr) 6880 v.AddArg(idx) 6881 v.AddArg(mem) 6882 return true 6883 } 6884 return false 6885 } 6886 func rewriteValueAMD64_OpAMD64MOVBstoreidx1(v *Value, config *Config) bool { 6887 b := v.Block 6888 _ = b 6889 // match: (MOVBstoreidx1 [c] {sym} (ADDQconst [d] ptr) idx val mem) 6890 // cond: 6891 // result: (MOVBstoreidx1 [c+d] {sym} ptr idx val mem) 6892 for { 6893 c := v.AuxInt 6894 sym := v.Aux 6895 v_0 := v.Args[0] 6896 if v_0.Op != OpAMD64ADDQconst { 6897 break 6898 } 6899 d := v_0.AuxInt 6900 ptr := v_0.Args[0] 6901 idx := v.Args[1] 6902 val := v.Args[2] 6903 mem := v.Args[3] 6904 v.reset(OpAMD64MOVBstoreidx1) 6905 v.AuxInt = c + d 6906 v.Aux = sym 6907 v.AddArg(ptr) 6908 v.AddArg(idx) 6909 v.AddArg(val) 6910 v.AddArg(mem) 6911 return true 6912 } 6913 // match: (MOVBstoreidx1 [c] {sym} ptr (ADDQconst [d] idx) val mem) 6914 // cond: 6915 // result: (MOVBstoreidx1 [c+d] {sym} ptr idx val mem) 6916 for { 6917 c := v.AuxInt 6918 sym := v.Aux 6919 ptr := v.Args[0] 6920 v_1 := v.Args[1] 6921 if v_1.Op != OpAMD64ADDQconst { 6922 break 6923 } 6924 d := v_1.AuxInt 6925 idx := v_1.Args[0] 6926 val := v.Args[2] 6927 mem := v.Args[3] 6928 v.reset(OpAMD64MOVBstoreidx1) 6929 v.AuxInt = c + d 6930 v.Aux = sym 6931 v.AddArg(ptr) 6932 v.AddArg(idx) 6933 v.AddArg(val) 6934 v.AddArg(mem) 6935 return true 6936 } 6937 return false 6938 } 6939 func rewriteValueAMD64_OpAMD64MOVLQSX(v *Value, config *Config) bool { 6940 b := v.Block 6941 _ = b 6942 // match: (MOVLQSX x:(MOVLload [off] {sym} ptr mem)) 6943 // cond: x.Uses == 1 6944 // result: @x.Block (MOVLQSXload <v.Type> [off] {sym} ptr mem) 6945 for { 6946 x := v.Args[0] 6947 if x.Op != OpAMD64MOVLload { 6948 break 6949 } 6950 off := x.AuxInt 6951 sym := x.Aux 6952 ptr := x.Args[0] 6953 mem := x.Args[1] 6954 if !(x.Uses == 1) { 6955 break 6956 } 6957 b = x.Block 6958 v0 := b.NewValue0(v.Line, OpAMD64MOVLQSXload, v.Type) 6959 v.reset(OpCopy) 6960 v.AddArg(v0) 6961 v0.AuxInt = off 6962 v0.Aux = sym 6963 v0.AddArg(ptr) 6964 v0.AddArg(mem) 6965 return true 6966 } 6967 // match: (MOVLQSX (ANDLconst [c] x)) 6968 // cond: c & 0x80000000 == 0 6969 // result: (ANDQconst [c & 0x7fffffff] x) 6970 for { 6971 v_0 := v.Args[0] 6972 if v_0.Op != OpAMD64ANDLconst { 6973 break 6974 } 6975 c := v_0.AuxInt 6976 x := v_0.Args[0] 6977 if !(c&0x80000000 == 0) { 6978 break 6979 } 6980 v.reset(OpAMD64ANDQconst) 6981 v.AuxInt = c & 0x7fffffff 6982 v.AddArg(x) 6983 return true 6984 } 6985 return false 6986 } 6987 func rewriteValueAMD64_OpAMD64MOVLQSXload(v *Value, config *Config) bool { 6988 b := v.Block 6989 _ = b 6990 // match: (MOVLQSXload [off1] {sym1} (LEAQ [off2] {sym2} base) mem) 6991 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 6992 // result: (MOVLQSXload [off1+off2] {mergeSym(sym1,sym2)} base mem) 6993 for { 6994 off1 := v.AuxInt 6995 sym1 := v.Aux 6996 v_0 := v.Args[0] 6997 if v_0.Op != OpAMD64LEAQ { 6998 break 6999 } 7000 off2 := v_0.AuxInt 7001 sym2 := v_0.Aux 7002 base := v_0.Args[0] 7003 mem := v.Args[1] 7004 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 7005 break 7006 } 7007 v.reset(OpAMD64MOVLQSXload) 7008 v.AuxInt = off1 + off2 7009 v.Aux = mergeSym(sym1, sym2) 7010 v.AddArg(base) 7011 v.AddArg(mem) 7012 return true 7013 } 7014 return false 7015 } 7016 func rewriteValueAMD64_OpAMD64MOVLQZX(v *Value, config *Config) bool { 7017 b := v.Block 7018 _ = b 7019 // match: (MOVLQZX x:(MOVLload [off] {sym} ptr mem)) 7020 // cond: x.Uses == 1 7021 // result: @x.Block (MOVLload <v.Type> [off] {sym} ptr mem) 7022 for { 7023 x := v.Args[0] 7024 if x.Op != OpAMD64MOVLload { 7025 break 7026 } 7027 off := x.AuxInt 7028 sym := x.Aux 7029 ptr := x.Args[0] 7030 mem := x.Args[1] 7031 if !(x.Uses == 1) { 7032 break 7033 } 7034 b = x.Block 7035 v0 := b.NewValue0(v.Line, OpAMD64MOVLload, v.Type) 7036 v.reset(OpCopy) 7037 v.AddArg(v0) 7038 v0.AuxInt = off 7039 v0.Aux = sym 7040 v0.AddArg(ptr) 7041 v0.AddArg(mem) 7042 return true 7043 } 7044 // match: (MOVLQZX x:(MOVLloadidx1 [off] {sym} ptr idx mem)) 7045 // cond: x.Uses == 1 7046 // result: @x.Block (MOVLloadidx1 <v.Type> [off] {sym} ptr idx mem) 7047 for { 7048 x := v.Args[0] 7049 if x.Op != OpAMD64MOVLloadidx1 { 7050 break 7051 } 7052 off := x.AuxInt 7053 sym := x.Aux 7054 ptr := x.Args[0] 7055 idx := x.Args[1] 7056 mem := x.Args[2] 7057 if !(x.Uses == 1) { 7058 break 7059 } 7060 b = x.Block 7061 v0 := b.NewValue0(v.Line, OpAMD64MOVLloadidx1, v.Type) 7062 v.reset(OpCopy) 7063 v.AddArg(v0) 7064 v0.AuxInt = off 7065 v0.Aux = sym 7066 v0.AddArg(ptr) 7067 v0.AddArg(idx) 7068 v0.AddArg(mem) 7069 return true 7070 } 7071 // match: (MOVLQZX x:(MOVLloadidx4 [off] {sym} ptr idx mem)) 7072 // cond: x.Uses == 1 7073 // result: @x.Block (MOVLloadidx4 <v.Type> [off] {sym} ptr idx mem) 7074 for { 7075 x := v.Args[0] 7076 if x.Op != OpAMD64MOVLloadidx4 { 7077 break 7078 } 7079 off := x.AuxInt 7080 sym := x.Aux 7081 ptr := x.Args[0] 7082 idx := x.Args[1] 7083 mem := x.Args[2] 7084 if !(x.Uses == 1) { 7085 break 7086 } 7087 b = x.Block 7088 v0 := b.NewValue0(v.Line, OpAMD64MOVLloadidx4, v.Type) 7089 v.reset(OpCopy) 7090 v.AddArg(v0) 7091 v0.AuxInt = off 7092 v0.Aux = sym 7093 v0.AddArg(ptr) 7094 v0.AddArg(idx) 7095 v0.AddArg(mem) 7096 return true 7097 } 7098 // match: (MOVLQZX (ANDLconst [c] x)) 7099 // cond: c & 0x80000000 == 0 7100 // result: (ANDQconst [c & 0x7fffffff] x) 7101 for { 7102 v_0 := v.Args[0] 7103 if v_0.Op != OpAMD64ANDLconst { 7104 break 7105 } 7106 c := v_0.AuxInt 7107 x := v_0.Args[0] 7108 if !(c&0x80000000 == 0) { 7109 break 7110 } 7111 v.reset(OpAMD64ANDQconst) 7112 v.AuxInt = c & 0x7fffffff 7113 v.AddArg(x) 7114 return true 7115 } 7116 return false 7117 } 7118 func rewriteValueAMD64_OpAMD64MOVLload(v *Value, config *Config) bool { 7119 b := v.Block 7120 _ = b 7121 // match: (MOVLload [off] {sym} ptr (MOVLstore [off2] {sym2} ptr2 x _)) 7122 // cond: sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) 7123 // result: x 7124 for { 7125 off := v.AuxInt 7126 sym := v.Aux 7127 ptr := v.Args[0] 7128 v_1 := v.Args[1] 7129 if v_1.Op != OpAMD64MOVLstore { 7130 break 7131 } 7132 off2 := v_1.AuxInt 7133 sym2 := v_1.Aux 7134 ptr2 := v_1.Args[0] 7135 x := v_1.Args[1] 7136 if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) { 7137 break 7138 } 7139 v.reset(OpCopy) 7140 v.Type = x.Type 7141 v.AddArg(x) 7142 return true 7143 } 7144 // match: (MOVLload [off1] {sym} (ADDQconst [off2] ptr) mem) 7145 // cond: is32Bit(off1+off2) 7146 // result: (MOVLload [off1+off2] {sym} ptr mem) 7147 for { 7148 off1 := v.AuxInt 7149 sym := v.Aux 7150 v_0 := v.Args[0] 7151 if v_0.Op != OpAMD64ADDQconst { 7152 break 7153 } 7154 off2 := v_0.AuxInt 7155 ptr := v_0.Args[0] 7156 mem := v.Args[1] 7157 if !(is32Bit(off1 + off2)) { 7158 break 7159 } 7160 v.reset(OpAMD64MOVLload) 7161 v.AuxInt = off1 + off2 7162 v.Aux = sym 7163 v.AddArg(ptr) 7164 v.AddArg(mem) 7165 return true 7166 } 7167 // match: (MOVLload [off1] {sym1} (LEAQ [off2] {sym2} base) mem) 7168 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 7169 // result: (MOVLload [off1+off2] {mergeSym(sym1,sym2)} base mem) 7170 for { 7171 off1 := v.AuxInt 7172 sym1 := v.Aux 7173 v_0 := v.Args[0] 7174 if v_0.Op != OpAMD64LEAQ { 7175 break 7176 } 7177 off2 := v_0.AuxInt 7178 sym2 := v_0.Aux 7179 base := v_0.Args[0] 7180 mem := v.Args[1] 7181 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 7182 break 7183 } 7184 v.reset(OpAMD64MOVLload) 7185 v.AuxInt = off1 + off2 7186 v.Aux = mergeSym(sym1, sym2) 7187 v.AddArg(base) 7188 v.AddArg(mem) 7189 return true 7190 } 7191 // match: (MOVLload [off1] {sym1} (LEAQ1 [off2] {sym2} ptr idx) mem) 7192 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 7193 // result: (MOVLloadidx1 [off1+off2] {mergeSym(sym1,sym2)} ptr idx mem) 7194 for { 7195 off1 := v.AuxInt 7196 sym1 := v.Aux 7197 v_0 := v.Args[0] 7198 if v_0.Op != OpAMD64LEAQ1 { 7199 break 7200 } 7201 off2 := v_0.AuxInt 7202 sym2 := v_0.Aux 7203 ptr := v_0.Args[0] 7204 idx := v_0.Args[1] 7205 mem := v.Args[1] 7206 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 7207 break 7208 } 7209 v.reset(OpAMD64MOVLloadidx1) 7210 v.AuxInt = off1 + off2 7211 v.Aux = mergeSym(sym1, sym2) 7212 v.AddArg(ptr) 7213 v.AddArg(idx) 7214 v.AddArg(mem) 7215 return true 7216 } 7217 // match: (MOVLload [off1] {sym1} (LEAQ4 [off2] {sym2} ptr idx) mem) 7218 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 7219 // result: (MOVLloadidx4 [off1+off2] {mergeSym(sym1,sym2)} ptr idx mem) 7220 for { 7221 off1 := v.AuxInt 7222 sym1 := v.Aux 7223 v_0 := v.Args[0] 7224 if v_0.Op != OpAMD64LEAQ4 { 7225 break 7226 } 7227 off2 := v_0.AuxInt 7228 sym2 := v_0.Aux 7229 ptr := v_0.Args[0] 7230 idx := v_0.Args[1] 7231 mem := v.Args[1] 7232 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 7233 break 7234 } 7235 v.reset(OpAMD64MOVLloadidx4) 7236 v.AuxInt = off1 + off2 7237 v.Aux = mergeSym(sym1, sym2) 7238 v.AddArg(ptr) 7239 v.AddArg(idx) 7240 v.AddArg(mem) 7241 return true 7242 } 7243 // match: (MOVLload [off] {sym} (ADDQ ptr idx) mem) 7244 // cond: ptr.Op != OpSB 7245 // result: (MOVLloadidx1 [off] {sym} ptr idx mem) 7246 for { 7247 off := v.AuxInt 7248 sym := v.Aux 7249 v_0 := v.Args[0] 7250 if v_0.Op != OpAMD64ADDQ { 7251 break 7252 } 7253 ptr := v_0.Args[0] 7254 idx := v_0.Args[1] 7255 mem := v.Args[1] 7256 if !(ptr.Op != OpSB) { 7257 break 7258 } 7259 v.reset(OpAMD64MOVLloadidx1) 7260 v.AuxInt = off 7261 v.Aux = sym 7262 v.AddArg(ptr) 7263 v.AddArg(idx) 7264 v.AddArg(mem) 7265 return true 7266 } 7267 return false 7268 } 7269 func rewriteValueAMD64_OpAMD64MOVLloadidx1(v *Value, config *Config) bool { 7270 b := v.Block 7271 _ = b 7272 // match: (MOVLloadidx1 [c] {sym} ptr (SHLQconst [2] idx) mem) 7273 // cond: 7274 // result: (MOVLloadidx4 [c] {sym} ptr idx mem) 7275 for { 7276 c := v.AuxInt 7277 sym := v.Aux 7278 ptr := v.Args[0] 7279 v_1 := v.Args[1] 7280 if v_1.Op != OpAMD64SHLQconst { 7281 break 7282 } 7283 if v_1.AuxInt != 2 { 7284 break 7285 } 7286 idx := v_1.Args[0] 7287 mem := v.Args[2] 7288 v.reset(OpAMD64MOVLloadidx4) 7289 v.AuxInt = c 7290 v.Aux = sym 7291 v.AddArg(ptr) 7292 v.AddArg(idx) 7293 v.AddArg(mem) 7294 return true 7295 } 7296 // match: (MOVLloadidx1 [c] {sym} (ADDQconst [d] ptr) idx mem) 7297 // cond: 7298 // result: (MOVLloadidx1 [c+d] {sym} ptr idx mem) 7299 for { 7300 c := v.AuxInt 7301 sym := v.Aux 7302 v_0 := v.Args[0] 7303 if v_0.Op != OpAMD64ADDQconst { 7304 break 7305 } 7306 d := v_0.AuxInt 7307 ptr := v_0.Args[0] 7308 idx := v.Args[1] 7309 mem := v.Args[2] 7310 v.reset(OpAMD64MOVLloadidx1) 7311 v.AuxInt = c + d 7312 v.Aux = sym 7313 v.AddArg(ptr) 7314 v.AddArg(idx) 7315 v.AddArg(mem) 7316 return true 7317 } 7318 // match: (MOVLloadidx1 [c] {sym} ptr (ADDQconst [d] idx) mem) 7319 // cond: 7320 // result: (MOVLloadidx1 [c+d] {sym} ptr idx mem) 7321 for { 7322 c := v.AuxInt 7323 sym := v.Aux 7324 ptr := v.Args[0] 7325 v_1 := v.Args[1] 7326 if v_1.Op != OpAMD64ADDQconst { 7327 break 7328 } 7329 d := v_1.AuxInt 7330 idx := v_1.Args[0] 7331 mem := v.Args[2] 7332 v.reset(OpAMD64MOVLloadidx1) 7333 v.AuxInt = c + d 7334 v.Aux = sym 7335 v.AddArg(ptr) 7336 v.AddArg(idx) 7337 v.AddArg(mem) 7338 return true 7339 } 7340 return false 7341 } 7342 func rewriteValueAMD64_OpAMD64MOVLloadidx4(v *Value, config *Config) bool { 7343 b := v.Block 7344 _ = b 7345 // match: (MOVLloadidx4 [c] {sym} (ADDQconst [d] ptr) idx mem) 7346 // cond: 7347 // result: (MOVLloadidx4 [c+d] {sym} ptr idx mem) 7348 for { 7349 c := v.AuxInt 7350 sym := v.Aux 7351 v_0 := v.Args[0] 7352 if v_0.Op != OpAMD64ADDQconst { 7353 break 7354 } 7355 d := v_0.AuxInt 7356 ptr := v_0.Args[0] 7357 idx := v.Args[1] 7358 mem := v.Args[2] 7359 v.reset(OpAMD64MOVLloadidx4) 7360 v.AuxInt = c + d 7361 v.Aux = sym 7362 v.AddArg(ptr) 7363 v.AddArg(idx) 7364 v.AddArg(mem) 7365 return true 7366 } 7367 // match: (MOVLloadidx4 [c] {sym} ptr (ADDQconst [d] idx) mem) 7368 // cond: 7369 // result: (MOVLloadidx4 [c+4*d] {sym} ptr idx mem) 7370 for { 7371 c := v.AuxInt 7372 sym := v.Aux 7373 ptr := v.Args[0] 7374 v_1 := v.Args[1] 7375 if v_1.Op != OpAMD64ADDQconst { 7376 break 7377 } 7378 d := v_1.AuxInt 7379 idx := v_1.Args[0] 7380 mem := v.Args[2] 7381 v.reset(OpAMD64MOVLloadidx4) 7382 v.AuxInt = c + 4*d 7383 v.Aux = sym 7384 v.AddArg(ptr) 7385 v.AddArg(idx) 7386 v.AddArg(mem) 7387 return true 7388 } 7389 return false 7390 } 7391 func rewriteValueAMD64_OpAMD64MOVLstore(v *Value, config *Config) bool { 7392 b := v.Block 7393 _ = b 7394 // match: (MOVLstore [off] {sym} ptr (MOVLQSX x) mem) 7395 // cond: 7396 // result: (MOVLstore [off] {sym} ptr x mem) 7397 for { 7398 off := v.AuxInt 7399 sym := v.Aux 7400 ptr := v.Args[0] 7401 v_1 := v.Args[1] 7402 if v_1.Op != OpAMD64MOVLQSX { 7403 break 7404 } 7405 x := v_1.Args[0] 7406 mem := v.Args[2] 7407 v.reset(OpAMD64MOVLstore) 7408 v.AuxInt = off 7409 v.Aux = sym 7410 v.AddArg(ptr) 7411 v.AddArg(x) 7412 v.AddArg(mem) 7413 return true 7414 } 7415 // match: (MOVLstore [off] {sym} ptr (MOVLQZX x) mem) 7416 // cond: 7417 // result: (MOVLstore [off] {sym} ptr x mem) 7418 for { 7419 off := v.AuxInt 7420 sym := v.Aux 7421 ptr := v.Args[0] 7422 v_1 := v.Args[1] 7423 if v_1.Op != OpAMD64MOVLQZX { 7424 break 7425 } 7426 x := v_1.Args[0] 7427 mem := v.Args[2] 7428 v.reset(OpAMD64MOVLstore) 7429 v.AuxInt = off 7430 v.Aux = sym 7431 v.AddArg(ptr) 7432 v.AddArg(x) 7433 v.AddArg(mem) 7434 return true 7435 } 7436 // match: (MOVLstore [off1] {sym} (ADDQconst [off2] ptr) val mem) 7437 // cond: is32Bit(off1+off2) 7438 // result: (MOVLstore [off1+off2] {sym} ptr val mem) 7439 for { 7440 off1 := v.AuxInt 7441 sym := v.Aux 7442 v_0 := v.Args[0] 7443 if v_0.Op != OpAMD64ADDQconst { 7444 break 7445 } 7446 off2 := v_0.AuxInt 7447 ptr := v_0.Args[0] 7448 val := v.Args[1] 7449 mem := v.Args[2] 7450 if !(is32Bit(off1 + off2)) { 7451 break 7452 } 7453 v.reset(OpAMD64MOVLstore) 7454 v.AuxInt = off1 + off2 7455 v.Aux = sym 7456 v.AddArg(ptr) 7457 v.AddArg(val) 7458 v.AddArg(mem) 7459 return true 7460 } 7461 // match: (MOVLstore [off] {sym} ptr (MOVLconst [c]) mem) 7462 // cond: validOff(off) 7463 // result: (MOVLstoreconst [makeValAndOff(int64(int32(c)),off)] {sym} ptr mem) 7464 for { 7465 off := v.AuxInt 7466 sym := v.Aux 7467 ptr := v.Args[0] 7468 v_1 := v.Args[1] 7469 if v_1.Op != OpAMD64MOVLconst { 7470 break 7471 } 7472 c := v_1.AuxInt 7473 mem := v.Args[2] 7474 if !(validOff(off)) { 7475 break 7476 } 7477 v.reset(OpAMD64MOVLstoreconst) 7478 v.AuxInt = makeValAndOff(int64(int32(c)), off) 7479 v.Aux = sym 7480 v.AddArg(ptr) 7481 v.AddArg(mem) 7482 return true 7483 } 7484 // match: (MOVLstore [off1] {sym1} (LEAQ [off2] {sym2} base) val mem) 7485 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 7486 // result: (MOVLstore [off1+off2] {mergeSym(sym1,sym2)} base val mem) 7487 for { 7488 off1 := v.AuxInt 7489 sym1 := v.Aux 7490 v_0 := v.Args[0] 7491 if v_0.Op != OpAMD64LEAQ { 7492 break 7493 } 7494 off2 := v_0.AuxInt 7495 sym2 := v_0.Aux 7496 base := v_0.Args[0] 7497 val := v.Args[1] 7498 mem := v.Args[2] 7499 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 7500 break 7501 } 7502 v.reset(OpAMD64MOVLstore) 7503 v.AuxInt = off1 + off2 7504 v.Aux = mergeSym(sym1, sym2) 7505 v.AddArg(base) 7506 v.AddArg(val) 7507 v.AddArg(mem) 7508 return true 7509 } 7510 // match: (MOVLstore [off1] {sym1} (LEAQ1 [off2] {sym2} ptr idx) val mem) 7511 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 7512 // result: (MOVLstoreidx1 [off1+off2] {mergeSym(sym1,sym2)} ptr idx val mem) 7513 for { 7514 off1 := v.AuxInt 7515 sym1 := v.Aux 7516 v_0 := v.Args[0] 7517 if v_0.Op != OpAMD64LEAQ1 { 7518 break 7519 } 7520 off2 := v_0.AuxInt 7521 sym2 := v_0.Aux 7522 ptr := v_0.Args[0] 7523 idx := v_0.Args[1] 7524 val := v.Args[1] 7525 mem := v.Args[2] 7526 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 7527 break 7528 } 7529 v.reset(OpAMD64MOVLstoreidx1) 7530 v.AuxInt = off1 + off2 7531 v.Aux = mergeSym(sym1, sym2) 7532 v.AddArg(ptr) 7533 v.AddArg(idx) 7534 v.AddArg(val) 7535 v.AddArg(mem) 7536 return true 7537 } 7538 // match: (MOVLstore [off1] {sym1} (LEAQ4 [off2] {sym2} ptr idx) val mem) 7539 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 7540 // result: (MOVLstoreidx4 [off1+off2] {mergeSym(sym1,sym2)} ptr idx val mem) 7541 for { 7542 off1 := v.AuxInt 7543 sym1 := v.Aux 7544 v_0 := v.Args[0] 7545 if v_0.Op != OpAMD64LEAQ4 { 7546 break 7547 } 7548 off2 := v_0.AuxInt 7549 sym2 := v_0.Aux 7550 ptr := v_0.Args[0] 7551 idx := v_0.Args[1] 7552 val := v.Args[1] 7553 mem := v.Args[2] 7554 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 7555 break 7556 } 7557 v.reset(OpAMD64MOVLstoreidx4) 7558 v.AuxInt = off1 + off2 7559 v.Aux = mergeSym(sym1, sym2) 7560 v.AddArg(ptr) 7561 v.AddArg(idx) 7562 v.AddArg(val) 7563 v.AddArg(mem) 7564 return true 7565 } 7566 // match: (MOVLstore [off] {sym} (ADDQ ptr idx) val mem) 7567 // cond: ptr.Op != OpSB 7568 // result: (MOVLstoreidx1 [off] {sym} ptr idx val mem) 7569 for { 7570 off := v.AuxInt 7571 sym := v.Aux 7572 v_0 := v.Args[0] 7573 if v_0.Op != OpAMD64ADDQ { 7574 break 7575 } 7576 ptr := v_0.Args[0] 7577 idx := v_0.Args[1] 7578 val := v.Args[1] 7579 mem := v.Args[2] 7580 if !(ptr.Op != OpSB) { 7581 break 7582 } 7583 v.reset(OpAMD64MOVLstoreidx1) 7584 v.AuxInt = off 7585 v.Aux = sym 7586 v.AddArg(ptr) 7587 v.AddArg(idx) 7588 v.AddArg(val) 7589 v.AddArg(mem) 7590 return true 7591 } 7592 return false 7593 } 7594 func rewriteValueAMD64_OpAMD64MOVLstoreconst(v *Value, config *Config) bool { 7595 b := v.Block 7596 _ = b 7597 // match: (MOVLstoreconst [sc] {s} (ADDQconst [off] ptr) mem) 7598 // cond: ValAndOff(sc).canAdd(off) 7599 // result: (MOVLstoreconst [ValAndOff(sc).add(off)] {s} ptr mem) 7600 for { 7601 sc := v.AuxInt 7602 s := v.Aux 7603 v_0 := v.Args[0] 7604 if v_0.Op != OpAMD64ADDQconst { 7605 break 7606 } 7607 off := v_0.AuxInt 7608 ptr := v_0.Args[0] 7609 mem := v.Args[1] 7610 if !(ValAndOff(sc).canAdd(off)) { 7611 break 7612 } 7613 v.reset(OpAMD64MOVLstoreconst) 7614 v.AuxInt = ValAndOff(sc).add(off) 7615 v.Aux = s 7616 v.AddArg(ptr) 7617 v.AddArg(mem) 7618 return true 7619 } 7620 // match: (MOVLstoreconst [sc] {sym1} (LEAQ [off] {sym2} ptr) mem) 7621 // cond: canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off) 7622 // result: (MOVLstoreconst [ValAndOff(sc).add(off)] {mergeSym(sym1, sym2)} ptr mem) 7623 for { 7624 sc := v.AuxInt 7625 sym1 := v.Aux 7626 v_0 := v.Args[0] 7627 if v_0.Op != OpAMD64LEAQ { 7628 break 7629 } 7630 off := v_0.AuxInt 7631 sym2 := v_0.Aux 7632 ptr := v_0.Args[0] 7633 mem := v.Args[1] 7634 if !(canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off)) { 7635 break 7636 } 7637 v.reset(OpAMD64MOVLstoreconst) 7638 v.AuxInt = ValAndOff(sc).add(off) 7639 v.Aux = mergeSym(sym1, sym2) 7640 v.AddArg(ptr) 7641 v.AddArg(mem) 7642 return true 7643 } 7644 // match: (MOVLstoreconst [x] {sym1} (LEAQ1 [off] {sym2} ptr idx) mem) 7645 // cond: canMergeSym(sym1, sym2) 7646 // result: (MOVLstoreconstidx1 [ValAndOff(x).add(off)] {mergeSym(sym1,sym2)} ptr idx mem) 7647 for { 7648 x := v.AuxInt 7649 sym1 := v.Aux 7650 v_0 := v.Args[0] 7651 if v_0.Op != OpAMD64LEAQ1 { 7652 break 7653 } 7654 off := v_0.AuxInt 7655 sym2 := v_0.Aux 7656 ptr := v_0.Args[0] 7657 idx := v_0.Args[1] 7658 mem := v.Args[1] 7659 if !(canMergeSym(sym1, sym2)) { 7660 break 7661 } 7662 v.reset(OpAMD64MOVLstoreconstidx1) 7663 v.AuxInt = ValAndOff(x).add(off) 7664 v.Aux = mergeSym(sym1, sym2) 7665 v.AddArg(ptr) 7666 v.AddArg(idx) 7667 v.AddArg(mem) 7668 return true 7669 } 7670 // match: (MOVLstoreconst [x] {sym1} (LEAQ4 [off] {sym2} ptr idx) mem) 7671 // cond: canMergeSym(sym1, sym2) 7672 // result: (MOVLstoreconstidx4 [ValAndOff(x).add(off)] {mergeSym(sym1,sym2)} ptr idx mem) 7673 for { 7674 x := v.AuxInt 7675 sym1 := v.Aux 7676 v_0 := v.Args[0] 7677 if v_0.Op != OpAMD64LEAQ4 { 7678 break 7679 } 7680 off := v_0.AuxInt 7681 sym2 := v_0.Aux 7682 ptr := v_0.Args[0] 7683 idx := v_0.Args[1] 7684 mem := v.Args[1] 7685 if !(canMergeSym(sym1, sym2)) { 7686 break 7687 } 7688 v.reset(OpAMD64MOVLstoreconstidx4) 7689 v.AuxInt = ValAndOff(x).add(off) 7690 v.Aux = mergeSym(sym1, sym2) 7691 v.AddArg(ptr) 7692 v.AddArg(idx) 7693 v.AddArg(mem) 7694 return true 7695 } 7696 // match: (MOVLstoreconst [x] {sym} (ADDQ ptr idx) mem) 7697 // cond: 7698 // result: (MOVLstoreconstidx1 [x] {sym} ptr idx mem) 7699 for { 7700 x := v.AuxInt 7701 sym := v.Aux 7702 v_0 := v.Args[0] 7703 if v_0.Op != OpAMD64ADDQ { 7704 break 7705 } 7706 ptr := v_0.Args[0] 7707 idx := v_0.Args[1] 7708 mem := v.Args[1] 7709 v.reset(OpAMD64MOVLstoreconstidx1) 7710 v.AuxInt = x 7711 v.Aux = sym 7712 v.AddArg(ptr) 7713 v.AddArg(idx) 7714 v.AddArg(mem) 7715 return true 7716 } 7717 return false 7718 } 7719 func rewriteValueAMD64_OpAMD64MOVLstoreconstidx1(v *Value, config *Config) bool { 7720 b := v.Block 7721 _ = b 7722 // match: (MOVLstoreconstidx1 [c] {sym} ptr (SHLQconst [2] idx) mem) 7723 // cond: 7724 // result: (MOVLstoreconstidx4 [c] {sym} ptr idx mem) 7725 for { 7726 c := v.AuxInt 7727 sym := v.Aux 7728 ptr := v.Args[0] 7729 v_1 := v.Args[1] 7730 if v_1.Op != OpAMD64SHLQconst { 7731 break 7732 } 7733 if v_1.AuxInt != 2 { 7734 break 7735 } 7736 idx := v_1.Args[0] 7737 mem := v.Args[2] 7738 v.reset(OpAMD64MOVLstoreconstidx4) 7739 v.AuxInt = c 7740 v.Aux = sym 7741 v.AddArg(ptr) 7742 v.AddArg(idx) 7743 v.AddArg(mem) 7744 return true 7745 } 7746 // match: (MOVLstoreconstidx1 [x] {sym} (ADDQconst [c] ptr) idx mem) 7747 // cond: 7748 // result: (MOVLstoreconstidx1 [ValAndOff(x).add(c)] {sym} ptr idx mem) 7749 for { 7750 x := v.AuxInt 7751 sym := v.Aux 7752 v_0 := v.Args[0] 7753 if v_0.Op != OpAMD64ADDQconst { 7754 break 7755 } 7756 c := v_0.AuxInt 7757 ptr := v_0.Args[0] 7758 idx := v.Args[1] 7759 mem := v.Args[2] 7760 v.reset(OpAMD64MOVLstoreconstidx1) 7761 v.AuxInt = ValAndOff(x).add(c) 7762 v.Aux = sym 7763 v.AddArg(ptr) 7764 v.AddArg(idx) 7765 v.AddArg(mem) 7766 return true 7767 } 7768 // match: (MOVLstoreconstidx1 [x] {sym} ptr (ADDQconst [c] idx) mem) 7769 // cond: 7770 // result: (MOVLstoreconstidx1 [ValAndOff(x).add(c)] {sym} ptr idx mem) 7771 for { 7772 x := v.AuxInt 7773 sym := v.Aux 7774 ptr := v.Args[0] 7775 v_1 := v.Args[1] 7776 if v_1.Op != OpAMD64ADDQconst { 7777 break 7778 } 7779 c := v_1.AuxInt 7780 idx := v_1.Args[0] 7781 mem := v.Args[2] 7782 v.reset(OpAMD64MOVLstoreconstidx1) 7783 v.AuxInt = ValAndOff(x).add(c) 7784 v.Aux = sym 7785 v.AddArg(ptr) 7786 v.AddArg(idx) 7787 v.AddArg(mem) 7788 return true 7789 } 7790 return false 7791 } 7792 func rewriteValueAMD64_OpAMD64MOVLstoreconstidx4(v *Value, config *Config) bool { 7793 b := v.Block 7794 _ = b 7795 // match: (MOVLstoreconstidx4 [x] {sym} (ADDQconst [c] ptr) idx mem) 7796 // cond: 7797 // result: (MOVLstoreconstidx4 [ValAndOff(x).add(c)] {sym} ptr idx mem) 7798 for { 7799 x := v.AuxInt 7800 sym := v.Aux 7801 v_0 := v.Args[0] 7802 if v_0.Op != OpAMD64ADDQconst { 7803 break 7804 } 7805 c := v_0.AuxInt 7806 ptr := v_0.Args[0] 7807 idx := v.Args[1] 7808 mem := v.Args[2] 7809 v.reset(OpAMD64MOVLstoreconstidx4) 7810 v.AuxInt = ValAndOff(x).add(c) 7811 v.Aux = sym 7812 v.AddArg(ptr) 7813 v.AddArg(idx) 7814 v.AddArg(mem) 7815 return true 7816 } 7817 // match: (MOVLstoreconstidx4 [x] {sym} ptr (ADDQconst [c] idx) mem) 7818 // cond: 7819 // result: (MOVLstoreconstidx4 [ValAndOff(x).add(4*c)] {sym} ptr idx mem) 7820 for { 7821 x := v.AuxInt 7822 sym := v.Aux 7823 ptr := v.Args[0] 7824 v_1 := v.Args[1] 7825 if v_1.Op != OpAMD64ADDQconst { 7826 break 7827 } 7828 c := v_1.AuxInt 7829 idx := v_1.Args[0] 7830 mem := v.Args[2] 7831 v.reset(OpAMD64MOVLstoreconstidx4) 7832 v.AuxInt = ValAndOff(x).add(4 * c) 7833 v.Aux = sym 7834 v.AddArg(ptr) 7835 v.AddArg(idx) 7836 v.AddArg(mem) 7837 return true 7838 } 7839 return false 7840 } 7841 func rewriteValueAMD64_OpAMD64MOVLstoreidx1(v *Value, config *Config) bool { 7842 b := v.Block 7843 _ = b 7844 // match: (MOVLstoreidx1 [c] {sym} ptr (SHLQconst [2] idx) val mem) 7845 // cond: 7846 // result: (MOVLstoreidx4 [c] {sym} ptr idx val mem) 7847 for { 7848 c := v.AuxInt 7849 sym := v.Aux 7850 ptr := v.Args[0] 7851 v_1 := v.Args[1] 7852 if v_1.Op != OpAMD64SHLQconst { 7853 break 7854 } 7855 if v_1.AuxInt != 2 { 7856 break 7857 } 7858 idx := v_1.Args[0] 7859 val := v.Args[2] 7860 mem := v.Args[3] 7861 v.reset(OpAMD64MOVLstoreidx4) 7862 v.AuxInt = c 7863 v.Aux = sym 7864 v.AddArg(ptr) 7865 v.AddArg(idx) 7866 v.AddArg(val) 7867 v.AddArg(mem) 7868 return true 7869 } 7870 // match: (MOVLstoreidx1 [c] {sym} (ADDQconst [d] ptr) idx val mem) 7871 // cond: 7872 // result: (MOVLstoreidx1 [c+d] {sym} ptr idx val mem) 7873 for { 7874 c := v.AuxInt 7875 sym := v.Aux 7876 v_0 := v.Args[0] 7877 if v_0.Op != OpAMD64ADDQconst { 7878 break 7879 } 7880 d := v_0.AuxInt 7881 ptr := v_0.Args[0] 7882 idx := v.Args[1] 7883 val := v.Args[2] 7884 mem := v.Args[3] 7885 v.reset(OpAMD64MOVLstoreidx1) 7886 v.AuxInt = c + d 7887 v.Aux = sym 7888 v.AddArg(ptr) 7889 v.AddArg(idx) 7890 v.AddArg(val) 7891 v.AddArg(mem) 7892 return true 7893 } 7894 // match: (MOVLstoreidx1 [c] {sym} ptr (ADDQconst [d] idx) val mem) 7895 // cond: 7896 // result: (MOVLstoreidx1 [c+d] {sym} ptr idx val mem) 7897 for { 7898 c := v.AuxInt 7899 sym := v.Aux 7900 ptr := v.Args[0] 7901 v_1 := v.Args[1] 7902 if v_1.Op != OpAMD64ADDQconst { 7903 break 7904 } 7905 d := v_1.AuxInt 7906 idx := v_1.Args[0] 7907 val := v.Args[2] 7908 mem := v.Args[3] 7909 v.reset(OpAMD64MOVLstoreidx1) 7910 v.AuxInt = c + d 7911 v.Aux = sym 7912 v.AddArg(ptr) 7913 v.AddArg(idx) 7914 v.AddArg(val) 7915 v.AddArg(mem) 7916 return true 7917 } 7918 return false 7919 } 7920 func rewriteValueAMD64_OpAMD64MOVLstoreidx4(v *Value, config *Config) bool { 7921 b := v.Block 7922 _ = b 7923 // match: (MOVLstoreidx4 [c] {sym} (ADDQconst [d] ptr) idx val mem) 7924 // cond: 7925 // result: (MOVLstoreidx4 [c+d] {sym} ptr idx val mem) 7926 for { 7927 c := v.AuxInt 7928 sym := v.Aux 7929 v_0 := v.Args[0] 7930 if v_0.Op != OpAMD64ADDQconst { 7931 break 7932 } 7933 d := v_0.AuxInt 7934 ptr := v_0.Args[0] 7935 idx := v.Args[1] 7936 val := v.Args[2] 7937 mem := v.Args[3] 7938 v.reset(OpAMD64MOVLstoreidx4) 7939 v.AuxInt = c + d 7940 v.Aux = sym 7941 v.AddArg(ptr) 7942 v.AddArg(idx) 7943 v.AddArg(val) 7944 v.AddArg(mem) 7945 return true 7946 } 7947 // match: (MOVLstoreidx4 [c] {sym} ptr (ADDQconst [d] idx) val mem) 7948 // cond: 7949 // result: (MOVLstoreidx4 [c+4*d] {sym} ptr idx val mem) 7950 for { 7951 c := v.AuxInt 7952 sym := v.Aux 7953 ptr := v.Args[0] 7954 v_1 := v.Args[1] 7955 if v_1.Op != OpAMD64ADDQconst { 7956 break 7957 } 7958 d := v_1.AuxInt 7959 idx := v_1.Args[0] 7960 val := v.Args[2] 7961 mem := v.Args[3] 7962 v.reset(OpAMD64MOVLstoreidx4) 7963 v.AuxInt = c + 4*d 7964 v.Aux = sym 7965 v.AddArg(ptr) 7966 v.AddArg(idx) 7967 v.AddArg(val) 7968 v.AddArg(mem) 7969 return true 7970 } 7971 return false 7972 } 7973 func rewriteValueAMD64_OpAMD64MOVOload(v *Value, config *Config) bool { 7974 b := v.Block 7975 _ = b 7976 // match: (MOVOload [off1] {sym} (ADDQconst [off2] ptr) mem) 7977 // cond: is32Bit(off1+off2) 7978 // result: (MOVOload [off1+off2] {sym} ptr mem) 7979 for { 7980 off1 := v.AuxInt 7981 sym := v.Aux 7982 v_0 := v.Args[0] 7983 if v_0.Op != OpAMD64ADDQconst { 7984 break 7985 } 7986 off2 := v_0.AuxInt 7987 ptr := v_0.Args[0] 7988 mem := v.Args[1] 7989 if !(is32Bit(off1 + off2)) { 7990 break 7991 } 7992 v.reset(OpAMD64MOVOload) 7993 v.AuxInt = off1 + off2 7994 v.Aux = sym 7995 v.AddArg(ptr) 7996 v.AddArg(mem) 7997 return true 7998 } 7999 // match: (MOVOload [off1] {sym1} (LEAQ [off2] {sym2} base) mem) 8000 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 8001 // result: (MOVOload [off1+off2] {mergeSym(sym1,sym2)} base mem) 8002 for { 8003 off1 := v.AuxInt 8004 sym1 := v.Aux 8005 v_0 := v.Args[0] 8006 if v_0.Op != OpAMD64LEAQ { 8007 break 8008 } 8009 off2 := v_0.AuxInt 8010 sym2 := v_0.Aux 8011 base := v_0.Args[0] 8012 mem := v.Args[1] 8013 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 8014 break 8015 } 8016 v.reset(OpAMD64MOVOload) 8017 v.AuxInt = off1 + off2 8018 v.Aux = mergeSym(sym1, sym2) 8019 v.AddArg(base) 8020 v.AddArg(mem) 8021 return true 8022 } 8023 return false 8024 } 8025 func rewriteValueAMD64_OpAMD64MOVOstore(v *Value, config *Config) bool { 8026 b := v.Block 8027 _ = b 8028 // match: (MOVOstore [off1] {sym} (ADDQconst [off2] ptr) val mem) 8029 // cond: is32Bit(off1+off2) 8030 // result: (MOVOstore [off1+off2] {sym} ptr val mem) 8031 for { 8032 off1 := v.AuxInt 8033 sym := v.Aux 8034 v_0 := v.Args[0] 8035 if v_0.Op != OpAMD64ADDQconst { 8036 break 8037 } 8038 off2 := v_0.AuxInt 8039 ptr := v_0.Args[0] 8040 val := v.Args[1] 8041 mem := v.Args[2] 8042 if !(is32Bit(off1 + off2)) { 8043 break 8044 } 8045 v.reset(OpAMD64MOVOstore) 8046 v.AuxInt = off1 + off2 8047 v.Aux = sym 8048 v.AddArg(ptr) 8049 v.AddArg(val) 8050 v.AddArg(mem) 8051 return true 8052 } 8053 // match: (MOVOstore [off1] {sym1} (LEAQ [off2] {sym2} base) val mem) 8054 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 8055 // result: (MOVOstore [off1+off2] {mergeSym(sym1,sym2)} base val mem) 8056 for { 8057 off1 := v.AuxInt 8058 sym1 := v.Aux 8059 v_0 := v.Args[0] 8060 if v_0.Op != OpAMD64LEAQ { 8061 break 8062 } 8063 off2 := v_0.AuxInt 8064 sym2 := v_0.Aux 8065 base := v_0.Args[0] 8066 val := v.Args[1] 8067 mem := v.Args[2] 8068 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 8069 break 8070 } 8071 v.reset(OpAMD64MOVOstore) 8072 v.AuxInt = off1 + off2 8073 v.Aux = mergeSym(sym1, sym2) 8074 v.AddArg(base) 8075 v.AddArg(val) 8076 v.AddArg(mem) 8077 return true 8078 } 8079 return false 8080 } 8081 func rewriteValueAMD64_OpAMD64MOVQload(v *Value, config *Config) bool { 8082 b := v.Block 8083 _ = b 8084 // match: (MOVQload [off] {sym} ptr (MOVQstore [off2] {sym2} ptr2 x _)) 8085 // cond: sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) 8086 // result: x 8087 for { 8088 off := v.AuxInt 8089 sym := v.Aux 8090 ptr := v.Args[0] 8091 v_1 := v.Args[1] 8092 if v_1.Op != OpAMD64MOVQstore { 8093 break 8094 } 8095 off2 := v_1.AuxInt 8096 sym2 := v_1.Aux 8097 ptr2 := v_1.Args[0] 8098 x := v_1.Args[1] 8099 if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) { 8100 break 8101 } 8102 v.reset(OpCopy) 8103 v.Type = x.Type 8104 v.AddArg(x) 8105 return true 8106 } 8107 // match: (MOVQload [off1] {sym} (ADDQconst [off2] ptr) mem) 8108 // cond: is32Bit(off1+off2) 8109 // result: (MOVQload [off1+off2] {sym} ptr mem) 8110 for { 8111 off1 := v.AuxInt 8112 sym := v.Aux 8113 v_0 := v.Args[0] 8114 if v_0.Op != OpAMD64ADDQconst { 8115 break 8116 } 8117 off2 := v_0.AuxInt 8118 ptr := v_0.Args[0] 8119 mem := v.Args[1] 8120 if !(is32Bit(off1 + off2)) { 8121 break 8122 } 8123 v.reset(OpAMD64MOVQload) 8124 v.AuxInt = off1 + off2 8125 v.Aux = sym 8126 v.AddArg(ptr) 8127 v.AddArg(mem) 8128 return true 8129 } 8130 // match: (MOVQload [off1] {sym1} (LEAQ [off2] {sym2} base) mem) 8131 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 8132 // result: (MOVQload [off1+off2] {mergeSym(sym1,sym2)} base mem) 8133 for { 8134 off1 := v.AuxInt 8135 sym1 := v.Aux 8136 v_0 := v.Args[0] 8137 if v_0.Op != OpAMD64LEAQ { 8138 break 8139 } 8140 off2 := v_0.AuxInt 8141 sym2 := v_0.Aux 8142 base := v_0.Args[0] 8143 mem := v.Args[1] 8144 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 8145 break 8146 } 8147 v.reset(OpAMD64MOVQload) 8148 v.AuxInt = off1 + off2 8149 v.Aux = mergeSym(sym1, sym2) 8150 v.AddArg(base) 8151 v.AddArg(mem) 8152 return true 8153 } 8154 // match: (MOVQload [off1] {sym1} (LEAQ1 [off2] {sym2} ptr idx) mem) 8155 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 8156 // result: (MOVQloadidx1 [off1+off2] {mergeSym(sym1,sym2)} ptr idx mem) 8157 for { 8158 off1 := v.AuxInt 8159 sym1 := v.Aux 8160 v_0 := v.Args[0] 8161 if v_0.Op != OpAMD64LEAQ1 { 8162 break 8163 } 8164 off2 := v_0.AuxInt 8165 sym2 := v_0.Aux 8166 ptr := v_0.Args[0] 8167 idx := v_0.Args[1] 8168 mem := v.Args[1] 8169 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 8170 break 8171 } 8172 v.reset(OpAMD64MOVQloadidx1) 8173 v.AuxInt = off1 + off2 8174 v.Aux = mergeSym(sym1, sym2) 8175 v.AddArg(ptr) 8176 v.AddArg(idx) 8177 v.AddArg(mem) 8178 return true 8179 } 8180 // match: (MOVQload [off1] {sym1} (LEAQ8 [off2] {sym2} ptr idx) mem) 8181 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 8182 // result: (MOVQloadidx8 [off1+off2] {mergeSym(sym1,sym2)} ptr idx mem) 8183 for { 8184 off1 := v.AuxInt 8185 sym1 := v.Aux 8186 v_0 := v.Args[0] 8187 if v_0.Op != OpAMD64LEAQ8 { 8188 break 8189 } 8190 off2 := v_0.AuxInt 8191 sym2 := v_0.Aux 8192 ptr := v_0.Args[0] 8193 idx := v_0.Args[1] 8194 mem := v.Args[1] 8195 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 8196 break 8197 } 8198 v.reset(OpAMD64MOVQloadidx8) 8199 v.AuxInt = off1 + off2 8200 v.Aux = mergeSym(sym1, sym2) 8201 v.AddArg(ptr) 8202 v.AddArg(idx) 8203 v.AddArg(mem) 8204 return true 8205 } 8206 // match: (MOVQload [off] {sym} (ADDQ ptr idx) mem) 8207 // cond: ptr.Op != OpSB 8208 // result: (MOVQloadidx1 [off] {sym} ptr idx mem) 8209 for { 8210 off := v.AuxInt 8211 sym := v.Aux 8212 v_0 := v.Args[0] 8213 if v_0.Op != OpAMD64ADDQ { 8214 break 8215 } 8216 ptr := v_0.Args[0] 8217 idx := v_0.Args[1] 8218 mem := v.Args[1] 8219 if !(ptr.Op != OpSB) { 8220 break 8221 } 8222 v.reset(OpAMD64MOVQloadidx1) 8223 v.AuxInt = off 8224 v.Aux = sym 8225 v.AddArg(ptr) 8226 v.AddArg(idx) 8227 v.AddArg(mem) 8228 return true 8229 } 8230 return false 8231 } 8232 func rewriteValueAMD64_OpAMD64MOVQloadidx1(v *Value, config *Config) bool { 8233 b := v.Block 8234 _ = b 8235 // match: (MOVQloadidx1 [c] {sym} ptr (SHLQconst [3] idx) mem) 8236 // cond: 8237 // result: (MOVQloadidx8 [c] {sym} ptr idx mem) 8238 for { 8239 c := v.AuxInt 8240 sym := v.Aux 8241 ptr := v.Args[0] 8242 v_1 := v.Args[1] 8243 if v_1.Op != OpAMD64SHLQconst { 8244 break 8245 } 8246 if v_1.AuxInt != 3 { 8247 break 8248 } 8249 idx := v_1.Args[0] 8250 mem := v.Args[2] 8251 v.reset(OpAMD64MOVQloadidx8) 8252 v.AuxInt = c 8253 v.Aux = sym 8254 v.AddArg(ptr) 8255 v.AddArg(idx) 8256 v.AddArg(mem) 8257 return true 8258 } 8259 // match: (MOVQloadidx1 [c] {sym} (ADDQconst [d] ptr) idx mem) 8260 // cond: 8261 // result: (MOVQloadidx1 [c+d] {sym} ptr idx mem) 8262 for { 8263 c := v.AuxInt 8264 sym := v.Aux 8265 v_0 := v.Args[0] 8266 if v_0.Op != OpAMD64ADDQconst { 8267 break 8268 } 8269 d := v_0.AuxInt 8270 ptr := v_0.Args[0] 8271 idx := v.Args[1] 8272 mem := v.Args[2] 8273 v.reset(OpAMD64MOVQloadidx1) 8274 v.AuxInt = c + d 8275 v.Aux = sym 8276 v.AddArg(ptr) 8277 v.AddArg(idx) 8278 v.AddArg(mem) 8279 return true 8280 } 8281 // match: (MOVQloadidx1 [c] {sym} ptr (ADDQconst [d] idx) mem) 8282 // cond: 8283 // result: (MOVQloadidx1 [c+d] {sym} ptr idx mem) 8284 for { 8285 c := v.AuxInt 8286 sym := v.Aux 8287 ptr := v.Args[0] 8288 v_1 := v.Args[1] 8289 if v_1.Op != OpAMD64ADDQconst { 8290 break 8291 } 8292 d := v_1.AuxInt 8293 idx := v_1.Args[0] 8294 mem := v.Args[2] 8295 v.reset(OpAMD64MOVQloadidx1) 8296 v.AuxInt = c + d 8297 v.Aux = sym 8298 v.AddArg(ptr) 8299 v.AddArg(idx) 8300 v.AddArg(mem) 8301 return true 8302 } 8303 return false 8304 } 8305 func rewriteValueAMD64_OpAMD64MOVQloadidx8(v *Value, config *Config) bool { 8306 b := v.Block 8307 _ = b 8308 // match: (MOVQloadidx8 [c] {sym} (ADDQconst [d] ptr) idx mem) 8309 // cond: 8310 // result: (MOVQloadidx8 [c+d] {sym} ptr idx mem) 8311 for { 8312 c := v.AuxInt 8313 sym := v.Aux 8314 v_0 := v.Args[0] 8315 if v_0.Op != OpAMD64ADDQconst { 8316 break 8317 } 8318 d := v_0.AuxInt 8319 ptr := v_0.Args[0] 8320 idx := v.Args[1] 8321 mem := v.Args[2] 8322 v.reset(OpAMD64MOVQloadidx8) 8323 v.AuxInt = c + d 8324 v.Aux = sym 8325 v.AddArg(ptr) 8326 v.AddArg(idx) 8327 v.AddArg(mem) 8328 return true 8329 } 8330 // match: (MOVQloadidx8 [c] {sym} ptr (ADDQconst [d] idx) mem) 8331 // cond: 8332 // result: (MOVQloadidx8 [c+8*d] {sym} ptr idx mem) 8333 for { 8334 c := v.AuxInt 8335 sym := v.Aux 8336 ptr := v.Args[0] 8337 v_1 := v.Args[1] 8338 if v_1.Op != OpAMD64ADDQconst { 8339 break 8340 } 8341 d := v_1.AuxInt 8342 idx := v_1.Args[0] 8343 mem := v.Args[2] 8344 v.reset(OpAMD64MOVQloadidx8) 8345 v.AuxInt = c + 8*d 8346 v.Aux = sym 8347 v.AddArg(ptr) 8348 v.AddArg(idx) 8349 v.AddArg(mem) 8350 return true 8351 } 8352 return false 8353 } 8354 func rewriteValueAMD64_OpAMD64MOVQstore(v *Value, config *Config) bool { 8355 b := v.Block 8356 _ = b 8357 // match: (MOVQstore [off1] {sym} (ADDQconst [off2] ptr) val mem) 8358 // cond: is32Bit(off1+off2) 8359 // result: (MOVQstore [off1+off2] {sym} ptr val mem) 8360 for { 8361 off1 := v.AuxInt 8362 sym := v.Aux 8363 v_0 := v.Args[0] 8364 if v_0.Op != OpAMD64ADDQconst { 8365 break 8366 } 8367 off2 := v_0.AuxInt 8368 ptr := v_0.Args[0] 8369 val := v.Args[1] 8370 mem := v.Args[2] 8371 if !(is32Bit(off1 + off2)) { 8372 break 8373 } 8374 v.reset(OpAMD64MOVQstore) 8375 v.AuxInt = off1 + off2 8376 v.Aux = sym 8377 v.AddArg(ptr) 8378 v.AddArg(val) 8379 v.AddArg(mem) 8380 return true 8381 } 8382 // match: (MOVQstore [off] {sym} ptr (MOVQconst [c]) mem) 8383 // cond: validValAndOff(c,off) 8384 // result: (MOVQstoreconst [makeValAndOff(c,off)] {sym} ptr mem) 8385 for { 8386 off := v.AuxInt 8387 sym := v.Aux 8388 ptr := v.Args[0] 8389 v_1 := v.Args[1] 8390 if v_1.Op != OpAMD64MOVQconst { 8391 break 8392 } 8393 c := v_1.AuxInt 8394 mem := v.Args[2] 8395 if !(validValAndOff(c, off)) { 8396 break 8397 } 8398 v.reset(OpAMD64MOVQstoreconst) 8399 v.AuxInt = makeValAndOff(c, off) 8400 v.Aux = sym 8401 v.AddArg(ptr) 8402 v.AddArg(mem) 8403 return true 8404 } 8405 // match: (MOVQstore [off1] {sym1} (LEAQ [off2] {sym2} base) val mem) 8406 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 8407 // result: (MOVQstore [off1+off2] {mergeSym(sym1,sym2)} base val mem) 8408 for { 8409 off1 := v.AuxInt 8410 sym1 := v.Aux 8411 v_0 := v.Args[0] 8412 if v_0.Op != OpAMD64LEAQ { 8413 break 8414 } 8415 off2 := v_0.AuxInt 8416 sym2 := v_0.Aux 8417 base := v_0.Args[0] 8418 val := v.Args[1] 8419 mem := v.Args[2] 8420 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 8421 break 8422 } 8423 v.reset(OpAMD64MOVQstore) 8424 v.AuxInt = off1 + off2 8425 v.Aux = mergeSym(sym1, sym2) 8426 v.AddArg(base) 8427 v.AddArg(val) 8428 v.AddArg(mem) 8429 return true 8430 } 8431 // match: (MOVQstore [off1] {sym1} (LEAQ1 [off2] {sym2} ptr idx) val mem) 8432 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 8433 // result: (MOVQstoreidx1 [off1+off2] {mergeSym(sym1,sym2)} ptr idx val mem) 8434 for { 8435 off1 := v.AuxInt 8436 sym1 := v.Aux 8437 v_0 := v.Args[0] 8438 if v_0.Op != OpAMD64LEAQ1 { 8439 break 8440 } 8441 off2 := v_0.AuxInt 8442 sym2 := v_0.Aux 8443 ptr := v_0.Args[0] 8444 idx := v_0.Args[1] 8445 val := v.Args[1] 8446 mem := v.Args[2] 8447 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 8448 break 8449 } 8450 v.reset(OpAMD64MOVQstoreidx1) 8451 v.AuxInt = off1 + off2 8452 v.Aux = mergeSym(sym1, sym2) 8453 v.AddArg(ptr) 8454 v.AddArg(idx) 8455 v.AddArg(val) 8456 v.AddArg(mem) 8457 return true 8458 } 8459 // match: (MOVQstore [off1] {sym1} (LEAQ8 [off2] {sym2} ptr idx) val mem) 8460 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 8461 // result: (MOVQstoreidx8 [off1+off2] {mergeSym(sym1,sym2)} ptr idx val mem) 8462 for { 8463 off1 := v.AuxInt 8464 sym1 := v.Aux 8465 v_0 := v.Args[0] 8466 if v_0.Op != OpAMD64LEAQ8 { 8467 break 8468 } 8469 off2 := v_0.AuxInt 8470 sym2 := v_0.Aux 8471 ptr := v_0.Args[0] 8472 idx := v_0.Args[1] 8473 val := v.Args[1] 8474 mem := v.Args[2] 8475 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 8476 break 8477 } 8478 v.reset(OpAMD64MOVQstoreidx8) 8479 v.AuxInt = off1 + off2 8480 v.Aux = mergeSym(sym1, sym2) 8481 v.AddArg(ptr) 8482 v.AddArg(idx) 8483 v.AddArg(val) 8484 v.AddArg(mem) 8485 return true 8486 } 8487 // match: (MOVQstore [off] {sym} (ADDQ ptr idx) val mem) 8488 // cond: ptr.Op != OpSB 8489 // result: (MOVQstoreidx1 [off] {sym} ptr idx val mem) 8490 for { 8491 off := v.AuxInt 8492 sym := v.Aux 8493 v_0 := v.Args[0] 8494 if v_0.Op != OpAMD64ADDQ { 8495 break 8496 } 8497 ptr := v_0.Args[0] 8498 idx := v_0.Args[1] 8499 val := v.Args[1] 8500 mem := v.Args[2] 8501 if !(ptr.Op != OpSB) { 8502 break 8503 } 8504 v.reset(OpAMD64MOVQstoreidx1) 8505 v.AuxInt = off 8506 v.Aux = sym 8507 v.AddArg(ptr) 8508 v.AddArg(idx) 8509 v.AddArg(val) 8510 v.AddArg(mem) 8511 return true 8512 } 8513 return false 8514 } 8515 func rewriteValueAMD64_OpAMD64MOVQstoreconst(v *Value, config *Config) bool { 8516 b := v.Block 8517 _ = b 8518 // match: (MOVQstoreconst [sc] {s} (ADDQconst [off] ptr) mem) 8519 // cond: ValAndOff(sc).canAdd(off) 8520 // result: (MOVQstoreconst [ValAndOff(sc).add(off)] {s} ptr mem) 8521 for { 8522 sc := v.AuxInt 8523 s := v.Aux 8524 v_0 := v.Args[0] 8525 if v_0.Op != OpAMD64ADDQconst { 8526 break 8527 } 8528 off := v_0.AuxInt 8529 ptr := v_0.Args[0] 8530 mem := v.Args[1] 8531 if !(ValAndOff(sc).canAdd(off)) { 8532 break 8533 } 8534 v.reset(OpAMD64MOVQstoreconst) 8535 v.AuxInt = ValAndOff(sc).add(off) 8536 v.Aux = s 8537 v.AddArg(ptr) 8538 v.AddArg(mem) 8539 return true 8540 } 8541 // match: (MOVQstoreconst [sc] {sym1} (LEAQ [off] {sym2} ptr) mem) 8542 // cond: canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off) 8543 // result: (MOVQstoreconst [ValAndOff(sc).add(off)] {mergeSym(sym1, sym2)} ptr mem) 8544 for { 8545 sc := v.AuxInt 8546 sym1 := v.Aux 8547 v_0 := v.Args[0] 8548 if v_0.Op != OpAMD64LEAQ { 8549 break 8550 } 8551 off := v_0.AuxInt 8552 sym2 := v_0.Aux 8553 ptr := v_0.Args[0] 8554 mem := v.Args[1] 8555 if !(canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off)) { 8556 break 8557 } 8558 v.reset(OpAMD64MOVQstoreconst) 8559 v.AuxInt = ValAndOff(sc).add(off) 8560 v.Aux = mergeSym(sym1, sym2) 8561 v.AddArg(ptr) 8562 v.AddArg(mem) 8563 return true 8564 } 8565 // match: (MOVQstoreconst [x] {sym1} (LEAQ1 [off] {sym2} ptr idx) mem) 8566 // cond: canMergeSym(sym1, sym2) 8567 // result: (MOVQstoreconstidx1 [ValAndOff(x).add(off)] {mergeSym(sym1,sym2)} ptr idx mem) 8568 for { 8569 x := v.AuxInt 8570 sym1 := v.Aux 8571 v_0 := v.Args[0] 8572 if v_0.Op != OpAMD64LEAQ1 { 8573 break 8574 } 8575 off := v_0.AuxInt 8576 sym2 := v_0.Aux 8577 ptr := v_0.Args[0] 8578 idx := v_0.Args[1] 8579 mem := v.Args[1] 8580 if !(canMergeSym(sym1, sym2)) { 8581 break 8582 } 8583 v.reset(OpAMD64MOVQstoreconstidx1) 8584 v.AuxInt = ValAndOff(x).add(off) 8585 v.Aux = mergeSym(sym1, sym2) 8586 v.AddArg(ptr) 8587 v.AddArg(idx) 8588 v.AddArg(mem) 8589 return true 8590 } 8591 // match: (MOVQstoreconst [x] {sym1} (LEAQ8 [off] {sym2} ptr idx) mem) 8592 // cond: canMergeSym(sym1, sym2) 8593 // result: (MOVQstoreconstidx8 [ValAndOff(x).add(off)] {mergeSym(sym1,sym2)} ptr idx mem) 8594 for { 8595 x := v.AuxInt 8596 sym1 := v.Aux 8597 v_0 := v.Args[0] 8598 if v_0.Op != OpAMD64LEAQ8 { 8599 break 8600 } 8601 off := v_0.AuxInt 8602 sym2 := v_0.Aux 8603 ptr := v_0.Args[0] 8604 idx := v_0.Args[1] 8605 mem := v.Args[1] 8606 if !(canMergeSym(sym1, sym2)) { 8607 break 8608 } 8609 v.reset(OpAMD64MOVQstoreconstidx8) 8610 v.AuxInt = ValAndOff(x).add(off) 8611 v.Aux = mergeSym(sym1, sym2) 8612 v.AddArg(ptr) 8613 v.AddArg(idx) 8614 v.AddArg(mem) 8615 return true 8616 } 8617 // match: (MOVQstoreconst [x] {sym} (ADDQ ptr idx) mem) 8618 // cond: 8619 // result: (MOVQstoreconstidx1 [x] {sym} ptr idx mem) 8620 for { 8621 x := v.AuxInt 8622 sym := v.Aux 8623 v_0 := v.Args[0] 8624 if v_0.Op != OpAMD64ADDQ { 8625 break 8626 } 8627 ptr := v_0.Args[0] 8628 idx := v_0.Args[1] 8629 mem := v.Args[1] 8630 v.reset(OpAMD64MOVQstoreconstidx1) 8631 v.AuxInt = x 8632 v.Aux = sym 8633 v.AddArg(ptr) 8634 v.AddArg(idx) 8635 v.AddArg(mem) 8636 return true 8637 } 8638 return false 8639 } 8640 func rewriteValueAMD64_OpAMD64MOVQstoreconstidx1(v *Value, config *Config) bool { 8641 b := v.Block 8642 _ = b 8643 // match: (MOVQstoreconstidx1 [c] {sym} ptr (SHLQconst [3] idx) mem) 8644 // cond: 8645 // result: (MOVQstoreconstidx8 [c] {sym} ptr idx mem) 8646 for { 8647 c := v.AuxInt 8648 sym := v.Aux 8649 ptr := v.Args[0] 8650 v_1 := v.Args[1] 8651 if v_1.Op != OpAMD64SHLQconst { 8652 break 8653 } 8654 if v_1.AuxInt != 3 { 8655 break 8656 } 8657 idx := v_1.Args[0] 8658 mem := v.Args[2] 8659 v.reset(OpAMD64MOVQstoreconstidx8) 8660 v.AuxInt = c 8661 v.Aux = sym 8662 v.AddArg(ptr) 8663 v.AddArg(idx) 8664 v.AddArg(mem) 8665 return true 8666 } 8667 // match: (MOVQstoreconstidx1 [x] {sym} (ADDQconst [c] ptr) idx mem) 8668 // cond: 8669 // result: (MOVQstoreconstidx1 [ValAndOff(x).add(c)] {sym} ptr idx mem) 8670 for { 8671 x := v.AuxInt 8672 sym := v.Aux 8673 v_0 := v.Args[0] 8674 if v_0.Op != OpAMD64ADDQconst { 8675 break 8676 } 8677 c := v_0.AuxInt 8678 ptr := v_0.Args[0] 8679 idx := v.Args[1] 8680 mem := v.Args[2] 8681 v.reset(OpAMD64MOVQstoreconstidx1) 8682 v.AuxInt = ValAndOff(x).add(c) 8683 v.Aux = sym 8684 v.AddArg(ptr) 8685 v.AddArg(idx) 8686 v.AddArg(mem) 8687 return true 8688 } 8689 // match: (MOVQstoreconstidx1 [x] {sym} ptr (ADDQconst [c] idx) mem) 8690 // cond: 8691 // result: (MOVQstoreconstidx1 [ValAndOff(x).add(c)] {sym} ptr idx mem) 8692 for { 8693 x := v.AuxInt 8694 sym := v.Aux 8695 ptr := v.Args[0] 8696 v_1 := v.Args[1] 8697 if v_1.Op != OpAMD64ADDQconst { 8698 break 8699 } 8700 c := v_1.AuxInt 8701 idx := v_1.Args[0] 8702 mem := v.Args[2] 8703 v.reset(OpAMD64MOVQstoreconstidx1) 8704 v.AuxInt = ValAndOff(x).add(c) 8705 v.Aux = sym 8706 v.AddArg(ptr) 8707 v.AddArg(idx) 8708 v.AddArg(mem) 8709 return true 8710 } 8711 return false 8712 } 8713 func rewriteValueAMD64_OpAMD64MOVQstoreconstidx8(v *Value, config *Config) bool { 8714 b := v.Block 8715 _ = b 8716 // match: (MOVQstoreconstidx8 [x] {sym} (ADDQconst [c] ptr) idx mem) 8717 // cond: 8718 // result: (MOVQstoreconstidx8 [ValAndOff(x).add(c)] {sym} ptr idx mem) 8719 for { 8720 x := v.AuxInt 8721 sym := v.Aux 8722 v_0 := v.Args[0] 8723 if v_0.Op != OpAMD64ADDQconst { 8724 break 8725 } 8726 c := v_0.AuxInt 8727 ptr := v_0.Args[0] 8728 idx := v.Args[1] 8729 mem := v.Args[2] 8730 v.reset(OpAMD64MOVQstoreconstidx8) 8731 v.AuxInt = ValAndOff(x).add(c) 8732 v.Aux = sym 8733 v.AddArg(ptr) 8734 v.AddArg(idx) 8735 v.AddArg(mem) 8736 return true 8737 } 8738 // match: (MOVQstoreconstidx8 [x] {sym} ptr (ADDQconst [c] idx) mem) 8739 // cond: 8740 // result: (MOVQstoreconstidx8 [ValAndOff(x).add(8*c)] {sym} ptr idx mem) 8741 for { 8742 x := v.AuxInt 8743 sym := v.Aux 8744 ptr := v.Args[0] 8745 v_1 := v.Args[1] 8746 if v_1.Op != OpAMD64ADDQconst { 8747 break 8748 } 8749 c := v_1.AuxInt 8750 idx := v_1.Args[0] 8751 mem := v.Args[2] 8752 v.reset(OpAMD64MOVQstoreconstidx8) 8753 v.AuxInt = ValAndOff(x).add(8 * c) 8754 v.Aux = sym 8755 v.AddArg(ptr) 8756 v.AddArg(idx) 8757 v.AddArg(mem) 8758 return true 8759 } 8760 return false 8761 } 8762 func rewriteValueAMD64_OpAMD64MOVQstoreidx1(v *Value, config *Config) bool { 8763 b := v.Block 8764 _ = b 8765 // match: (MOVQstoreidx1 [c] {sym} ptr (SHLQconst [3] idx) val mem) 8766 // cond: 8767 // result: (MOVQstoreidx8 [c] {sym} ptr idx val mem) 8768 for { 8769 c := v.AuxInt 8770 sym := v.Aux 8771 ptr := v.Args[0] 8772 v_1 := v.Args[1] 8773 if v_1.Op != OpAMD64SHLQconst { 8774 break 8775 } 8776 if v_1.AuxInt != 3 { 8777 break 8778 } 8779 idx := v_1.Args[0] 8780 val := v.Args[2] 8781 mem := v.Args[3] 8782 v.reset(OpAMD64MOVQstoreidx8) 8783 v.AuxInt = c 8784 v.Aux = sym 8785 v.AddArg(ptr) 8786 v.AddArg(idx) 8787 v.AddArg(val) 8788 v.AddArg(mem) 8789 return true 8790 } 8791 // match: (MOVQstoreidx1 [c] {sym} (ADDQconst [d] ptr) idx val mem) 8792 // cond: 8793 // result: (MOVQstoreidx1 [c+d] {sym} ptr idx val mem) 8794 for { 8795 c := v.AuxInt 8796 sym := v.Aux 8797 v_0 := v.Args[0] 8798 if v_0.Op != OpAMD64ADDQconst { 8799 break 8800 } 8801 d := v_0.AuxInt 8802 ptr := v_0.Args[0] 8803 idx := v.Args[1] 8804 val := v.Args[2] 8805 mem := v.Args[3] 8806 v.reset(OpAMD64MOVQstoreidx1) 8807 v.AuxInt = c + d 8808 v.Aux = sym 8809 v.AddArg(ptr) 8810 v.AddArg(idx) 8811 v.AddArg(val) 8812 v.AddArg(mem) 8813 return true 8814 } 8815 // match: (MOVQstoreidx1 [c] {sym} ptr (ADDQconst [d] idx) val mem) 8816 // cond: 8817 // result: (MOVQstoreidx1 [c+d] {sym} ptr idx val mem) 8818 for { 8819 c := v.AuxInt 8820 sym := v.Aux 8821 ptr := v.Args[0] 8822 v_1 := v.Args[1] 8823 if v_1.Op != OpAMD64ADDQconst { 8824 break 8825 } 8826 d := v_1.AuxInt 8827 idx := v_1.Args[0] 8828 val := v.Args[2] 8829 mem := v.Args[3] 8830 v.reset(OpAMD64MOVQstoreidx1) 8831 v.AuxInt = c + d 8832 v.Aux = sym 8833 v.AddArg(ptr) 8834 v.AddArg(idx) 8835 v.AddArg(val) 8836 v.AddArg(mem) 8837 return true 8838 } 8839 return false 8840 } 8841 func rewriteValueAMD64_OpAMD64MOVQstoreidx8(v *Value, config *Config) bool { 8842 b := v.Block 8843 _ = b 8844 // match: (MOVQstoreidx8 [c] {sym} (ADDQconst [d] ptr) idx val mem) 8845 // cond: 8846 // result: (MOVQstoreidx8 [c+d] {sym} ptr idx val mem) 8847 for { 8848 c := v.AuxInt 8849 sym := v.Aux 8850 v_0 := v.Args[0] 8851 if v_0.Op != OpAMD64ADDQconst { 8852 break 8853 } 8854 d := v_0.AuxInt 8855 ptr := v_0.Args[0] 8856 idx := v.Args[1] 8857 val := v.Args[2] 8858 mem := v.Args[3] 8859 v.reset(OpAMD64MOVQstoreidx8) 8860 v.AuxInt = c + d 8861 v.Aux = sym 8862 v.AddArg(ptr) 8863 v.AddArg(idx) 8864 v.AddArg(val) 8865 v.AddArg(mem) 8866 return true 8867 } 8868 // match: (MOVQstoreidx8 [c] {sym} ptr (ADDQconst [d] idx) val mem) 8869 // cond: 8870 // result: (MOVQstoreidx8 [c+8*d] {sym} ptr idx val mem) 8871 for { 8872 c := v.AuxInt 8873 sym := v.Aux 8874 ptr := v.Args[0] 8875 v_1 := v.Args[1] 8876 if v_1.Op != OpAMD64ADDQconst { 8877 break 8878 } 8879 d := v_1.AuxInt 8880 idx := v_1.Args[0] 8881 val := v.Args[2] 8882 mem := v.Args[3] 8883 v.reset(OpAMD64MOVQstoreidx8) 8884 v.AuxInt = c + 8*d 8885 v.Aux = sym 8886 v.AddArg(ptr) 8887 v.AddArg(idx) 8888 v.AddArg(val) 8889 v.AddArg(mem) 8890 return true 8891 } 8892 return false 8893 } 8894 func rewriteValueAMD64_OpAMD64MOVSDload(v *Value, config *Config) bool { 8895 b := v.Block 8896 _ = b 8897 // match: (MOVSDload [off1] {sym} (ADDQconst [off2] ptr) mem) 8898 // cond: is32Bit(off1+off2) 8899 // result: (MOVSDload [off1+off2] {sym} ptr mem) 8900 for { 8901 off1 := v.AuxInt 8902 sym := v.Aux 8903 v_0 := v.Args[0] 8904 if v_0.Op != OpAMD64ADDQconst { 8905 break 8906 } 8907 off2 := v_0.AuxInt 8908 ptr := v_0.Args[0] 8909 mem := v.Args[1] 8910 if !(is32Bit(off1 + off2)) { 8911 break 8912 } 8913 v.reset(OpAMD64MOVSDload) 8914 v.AuxInt = off1 + off2 8915 v.Aux = sym 8916 v.AddArg(ptr) 8917 v.AddArg(mem) 8918 return true 8919 } 8920 // match: (MOVSDload [off1] {sym1} (LEAQ [off2] {sym2} base) mem) 8921 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 8922 // result: (MOVSDload [off1+off2] {mergeSym(sym1,sym2)} base mem) 8923 for { 8924 off1 := v.AuxInt 8925 sym1 := v.Aux 8926 v_0 := v.Args[0] 8927 if v_0.Op != OpAMD64LEAQ { 8928 break 8929 } 8930 off2 := v_0.AuxInt 8931 sym2 := v_0.Aux 8932 base := v_0.Args[0] 8933 mem := v.Args[1] 8934 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 8935 break 8936 } 8937 v.reset(OpAMD64MOVSDload) 8938 v.AuxInt = off1 + off2 8939 v.Aux = mergeSym(sym1, sym2) 8940 v.AddArg(base) 8941 v.AddArg(mem) 8942 return true 8943 } 8944 // match: (MOVSDload [off1] {sym1} (LEAQ1 [off2] {sym2} ptr idx) mem) 8945 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 8946 // result: (MOVSDloadidx1 [off1+off2] {mergeSym(sym1,sym2)} ptr idx mem) 8947 for { 8948 off1 := v.AuxInt 8949 sym1 := v.Aux 8950 v_0 := v.Args[0] 8951 if v_0.Op != OpAMD64LEAQ1 { 8952 break 8953 } 8954 off2 := v_0.AuxInt 8955 sym2 := v_0.Aux 8956 ptr := v_0.Args[0] 8957 idx := v_0.Args[1] 8958 mem := v.Args[1] 8959 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 8960 break 8961 } 8962 v.reset(OpAMD64MOVSDloadidx1) 8963 v.AuxInt = off1 + off2 8964 v.Aux = mergeSym(sym1, sym2) 8965 v.AddArg(ptr) 8966 v.AddArg(idx) 8967 v.AddArg(mem) 8968 return true 8969 } 8970 // match: (MOVSDload [off1] {sym1} (LEAQ8 [off2] {sym2} ptr idx) mem) 8971 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 8972 // result: (MOVSDloadidx8 [off1+off2] {mergeSym(sym1,sym2)} ptr idx mem) 8973 for { 8974 off1 := v.AuxInt 8975 sym1 := v.Aux 8976 v_0 := v.Args[0] 8977 if v_0.Op != OpAMD64LEAQ8 { 8978 break 8979 } 8980 off2 := v_0.AuxInt 8981 sym2 := v_0.Aux 8982 ptr := v_0.Args[0] 8983 idx := v_0.Args[1] 8984 mem := v.Args[1] 8985 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 8986 break 8987 } 8988 v.reset(OpAMD64MOVSDloadidx8) 8989 v.AuxInt = off1 + off2 8990 v.Aux = mergeSym(sym1, sym2) 8991 v.AddArg(ptr) 8992 v.AddArg(idx) 8993 v.AddArg(mem) 8994 return true 8995 } 8996 // match: (MOVSDload [off] {sym} (ADDQ ptr idx) mem) 8997 // cond: ptr.Op != OpSB 8998 // result: (MOVSDloadidx1 [off] {sym} ptr idx mem) 8999 for { 9000 off := v.AuxInt 9001 sym := v.Aux 9002 v_0 := v.Args[0] 9003 if v_0.Op != OpAMD64ADDQ { 9004 break 9005 } 9006 ptr := v_0.Args[0] 9007 idx := v_0.Args[1] 9008 mem := v.Args[1] 9009 if !(ptr.Op != OpSB) { 9010 break 9011 } 9012 v.reset(OpAMD64MOVSDloadidx1) 9013 v.AuxInt = off 9014 v.Aux = sym 9015 v.AddArg(ptr) 9016 v.AddArg(idx) 9017 v.AddArg(mem) 9018 return true 9019 } 9020 return false 9021 } 9022 func rewriteValueAMD64_OpAMD64MOVSDloadidx1(v *Value, config *Config) bool { 9023 b := v.Block 9024 _ = b 9025 // match: (MOVSDloadidx1 [c] {sym} (ADDQconst [d] ptr) idx mem) 9026 // cond: 9027 // result: (MOVSDloadidx1 [c+d] {sym} ptr idx mem) 9028 for { 9029 c := v.AuxInt 9030 sym := v.Aux 9031 v_0 := v.Args[0] 9032 if v_0.Op != OpAMD64ADDQconst { 9033 break 9034 } 9035 d := v_0.AuxInt 9036 ptr := v_0.Args[0] 9037 idx := v.Args[1] 9038 mem := v.Args[2] 9039 v.reset(OpAMD64MOVSDloadidx1) 9040 v.AuxInt = c + d 9041 v.Aux = sym 9042 v.AddArg(ptr) 9043 v.AddArg(idx) 9044 v.AddArg(mem) 9045 return true 9046 } 9047 // match: (MOVSDloadidx1 [c] {sym} ptr (ADDQconst [d] idx) mem) 9048 // cond: 9049 // result: (MOVSDloadidx1 [c+d] {sym} ptr idx mem) 9050 for { 9051 c := v.AuxInt 9052 sym := v.Aux 9053 ptr := v.Args[0] 9054 v_1 := v.Args[1] 9055 if v_1.Op != OpAMD64ADDQconst { 9056 break 9057 } 9058 d := v_1.AuxInt 9059 idx := v_1.Args[0] 9060 mem := v.Args[2] 9061 v.reset(OpAMD64MOVSDloadidx1) 9062 v.AuxInt = c + d 9063 v.Aux = sym 9064 v.AddArg(ptr) 9065 v.AddArg(idx) 9066 v.AddArg(mem) 9067 return true 9068 } 9069 return false 9070 } 9071 func rewriteValueAMD64_OpAMD64MOVSDloadidx8(v *Value, config *Config) bool { 9072 b := v.Block 9073 _ = b 9074 // match: (MOVSDloadidx8 [c] {sym} (ADDQconst [d] ptr) idx mem) 9075 // cond: 9076 // result: (MOVSDloadidx8 [c+d] {sym} ptr idx mem) 9077 for { 9078 c := v.AuxInt 9079 sym := v.Aux 9080 v_0 := v.Args[0] 9081 if v_0.Op != OpAMD64ADDQconst { 9082 break 9083 } 9084 d := v_0.AuxInt 9085 ptr := v_0.Args[0] 9086 idx := v.Args[1] 9087 mem := v.Args[2] 9088 v.reset(OpAMD64MOVSDloadidx8) 9089 v.AuxInt = c + d 9090 v.Aux = sym 9091 v.AddArg(ptr) 9092 v.AddArg(idx) 9093 v.AddArg(mem) 9094 return true 9095 } 9096 // match: (MOVSDloadidx8 [c] {sym} ptr (ADDQconst [d] idx) mem) 9097 // cond: 9098 // result: (MOVSDloadidx8 [c+8*d] {sym} ptr idx mem) 9099 for { 9100 c := v.AuxInt 9101 sym := v.Aux 9102 ptr := v.Args[0] 9103 v_1 := v.Args[1] 9104 if v_1.Op != OpAMD64ADDQconst { 9105 break 9106 } 9107 d := v_1.AuxInt 9108 idx := v_1.Args[0] 9109 mem := v.Args[2] 9110 v.reset(OpAMD64MOVSDloadidx8) 9111 v.AuxInt = c + 8*d 9112 v.Aux = sym 9113 v.AddArg(ptr) 9114 v.AddArg(idx) 9115 v.AddArg(mem) 9116 return true 9117 } 9118 return false 9119 } 9120 func rewriteValueAMD64_OpAMD64MOVSDstore(v *Value, config *Config) bool { 9121 b := v.Block 9122 _ = b 9123 // match: (MOVSDstore [off1] {sym} (ADDQconst [off2] ptr) val mem) 9124 // cond: is32Bit(off1+off2) 9125 // result: (MOVSDstore [off1+off2] {sym} ptr val mem) 9126 for { 9127 off1 := v.AuxInt 9128 sym := v.Aux 9129 v_0 := v.Args[0] 9130 if v_0.Op != OpAMD64ADDQconst { 9131 break 9132 } 9133 off2 := v_0.AuxInt 9134 ptr := v_0.Args[0] 9135 val := v.Args[1] 9136 mem := v.Args[2] 9137 if !(is32Bit(off1 + off2)) { 9138 break 9139 } 9140 v.reset(OpAMD64MOVSDstore) 9141 v.AuxInt = off1 + off2 9142 v.Aux = sym 9143 v.AddArg(ptr) 9144 v.AddArg(val) 9145 v.AddArg(mem) 9146 return true 9147 } 9148 // match: (MOVSDstore [off1] {sym1} (LEAQ [off2] {sym2} base) val mem) 9149 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 9150 // result: (MOVSDstore [off1+off2] {mergeSym(sym1,sym2)} base val mem) 9151 for { 9152 off1 := v.AuxInt 9153 sym1 := v.Aux 9154 v_0 := v.Args[0] 9155 if v_0.Op != OpAMD64LEAQ { 9156 break 9157 } 9158 off2 := v_0.AuxInt 9159 sym2 := v_0.Aux 9160 base := v_0.Args[0] 9161 val := v.Args[1] 9162 mem := v.Args[2] 9163 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 9164 break 9165 } 9166 v.reset(OpAMD64MOVSDstore) 9167 v.AuxInt = off1 + off2 9168 v.Aux = mergeSym(sym1, sym2) 9169 v.AddArg(base) 9170 v.AddArg(val) 9171 v.AddArg(mem) 9172 return true 9173 } 9174 // match: (MOVSDstore [off1] {sym1} (LEAQ1 [off2] {sym2} ptr idx) val mem) 9175 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 9176 // result: (MOVSDstoreidx1 [off1+off2] {mergeSym(sym1,sym2)} ptr idx val mem) 9177 for { 9178 off1 := v.AuxInt 9179 sym1 := v.Aux 9180 v_0 := v.Args[0] 9181 if v_0.Op != OpAMD64LEAQ1 { 9182 break 9183 } 9184 off2 := v_0.AuxInt 9185 sym2 := v_0.Aux 9186 ptr := v_0.Args[0] 9187 idx := v_0.Args[1] 9188 val := v.Args[1] 9189 mem := v.Args[2] 9190 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 9191 break 9192 } 9193 v.reset(OpAMD64MOVSDstoreidx1) 9194 v.AuxInt = off1 + off2 9195 v.Aux = mergeSym(sym1, sym2) 9196 v.AddArg(ptr) 9197 v.AddArg(idx) 9198 v.AddArg(val) 9199 v.AddArg(mem) 9200 return true 9201 } 9202 // match: (MOVSDstore [off1] {sym1} (LEAQ8 [off2] {sym2} ptr idx) val mem) 9203 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 9204 // result: (MOVSDstoreidx8 [off1+off2] {mergeSym(sym1,sym2)} ptr idx val mem) 9205 for { 9206 off1 := v.AuxInt 9207 sym1 := v.Aux 9208 v_0 := v.Args[0] 9209 if v_0.Op != OpAMD64LEAQ8 { 9210 break 9211 } 9212 off2 := v_0.AuxInt 9213 sym2 := v_0.Aux 9214 ptr := v_0.Args[0] 9215 idx := v_0.Args[1] 9216 val := v.Args[1] 9217 mem := v.Args[2] 9218 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 9219 break 9220 } 9221 v.reset(OpAMD64MOVSDstoreidx8) 9222 v.AuxInt = off1 + off2 9223 v.Aux = mergeSym(sym1, sym2) 9224 v.AddArg(ptr) 9225 v.AddArg(idx) 9226 v.AddArg(val) 9227 v.AddArg(mem) 9228 return true 9229 } 9230 // match: (MOVSDstore [off] {sym} (ADDQ ptr idx) val mem) 9231 // cond: ptr.Op != OpSB 9232 // result: (MOVSDstoreidx1 [off] {sym} ptr idx val mem) 9233 for { 9234 off := v.AuxInt 9235 sym := v.Aux 9236 v_0 := v.Args[0] 9237 if v_0.Op != OpAMD64ADDQ { 9238 break 9239 } 9240 ptr := v_0.Args[0] 9241 idx := v_0.Args[1] 9242 val := v.Args[1] 9243 mem := v.Args[2] 9244 if !(ptr.Op != OpSB) { 9245 break 9246 } 9247 v.reset(OpAMD64MOVSDstoreidx1) 9248 v.AuxInt = off 9249 v.Aux = sym 9250 v.AddArg(ptr) 9251 v.AddArg(idx) 9252 v.AddArg(val) 9253 v.AddArg(mem) 9254 return true 9255 } 9256 return false 9257 } 9258 func rewriteValueAMD64_OpAMD64MOVSDstoreidx1(v *Value, config *Config) bool { 9259 b := v.Block 9260 _ = b 9261 // match: (MOVSDstoreidx1 [c] {sym} (ADDQconst [d] ptr) idx val mem) 9262 // cond: 9263 // result: (MOVSDstoreidx1 [c+d] {sym} ptr idx val mem) 9264 for { 9265 c := v.AuxInt 9266 sym := v.Aux 9267 v_0 := v.Args[0] 9268 if v_0.Op != OpAMD64ADDQconst { 9269 break 9270 } 9271 d := v_0.AuxInt 9272 ptr := v_0.Args[0] 9273 idx := v.Args[1] 9274 val := v.Args[2] 9275 mem := v.Args[3] 9276 v.reset(OpAMD64MOVSDstoreidx1) 9277 v.AuxInt = c + d 9278 v.Aux = sym 9279 v.AddArg(ptr) 9280 v.AddArg(idx) 9281 v.AddArg(val) 9282 v.AddArg(mem) 9283 return true 9284 } 9285 // match: (MOVSDstoreidx1 [c] {sym} ptr (ADDQconst [d] idx) val mem) 9286 // cond: 9287 // result: (MOVSDstoreidx1 [c+d] {sym} ptr idx val mem) 9288 for { 9289 c := v.AuxInt 9290 sym := v.Aux 9291 ptr := v.Args[0] 9292 v_1 := v.Args[1] 9293 if v_1.Op != OpAMD64ADDQconst { 9294 break 9295 } 9296 d := v_1.AuxInt 9297 idx := v_1.Args[0] 9298 val := v.Args[2] 9299 mem := v.Args[3] 9300 v.reset(OpAMD64MOVSDstoreidx1) 9301 v.AuxInt = c + d 9302 v.Aux = sym 9303 v.AddArg(ptr) 9304 v.AddArg(idx) 9305 v.AddArg(val) 9306 v.AddArg(mem) 9307 return true 9308 } 9309 return false 9310 } 9311 func rewriteValueAMD64_OpAMD64MOVSDstoreidx8(v *Value, config *Config) bool { 9312 b := v.Block 9313 _ = b 9314 // match: (MOVSDstoreidx8 [c] {sym} (ADDQconst [d] ptr) idx val mem) 9315 // cond: 9316 // result: (MOVSDstoreidx8 [c+d] {sym} ptr idx val mem) 9317 for { 9318 c := v.AuxInt 9319 sym := v.Aux 9320 v_0 := v.Args[0] 9321 if v_0.Op != OpAMD64ADDQconst { 9322 break 9323 } 9324 d := v_0.AuxInt 9325 ptr := v_0.Args[0] 9326 idx := v.Args[1] 9327 val := v.Args[2] 9328 mem := v.Args[3] 9329 v.reset(OpAMD64MOVSDstoreidx8) 9330 v.AuxInt = c + d 9331 v.Aux = sym 9332 v.AddArg(ptr) 9333 v.AddArg(idx) 9334 v.AddArg(val) 9335 v.AddArg(mem) 9336 return true 9337 } 9338 // match: (MOVSDstoreidx8 [c] {sym} ptr (ADDQconst [d] idx) val mem) 9339 // cond: 9340 // result: (MOVSDstoreidx8 [c+8*d] {sym} ptr idx val mem) 9341 for { 9342 c := v.AuxInt 9343 sym := v.Aux 9344 ptr := v.Args[0] 9345 v_1 := v.Args[1] 9346 if v_1.Op != OpAMD64ADDQconst { 9347 break 9348 } 9349 d := v_1.AuxInt 9350 idx := v_1.Args[0] 9351 val := v.Args[2] 9352 mem := v.Args[3] 9353 v.reset(OpAMD64MOVSDstoreidx8) 9354 v.AuxInt = c + 8*d 9355 v.Aux = sym 9356 v.AddArg(ptr) 9357 v.AddArg(idx) 9358 v.AddArg(val) 9359 v.AddArg(mem) 9360 return true 9361 } 9362 return false 9363 } 9364 func rewriteValueAMD64_OpAMD64MOVSSload(v *Value, config *Config) bool { 9365 b := v.Block 9366 _ = b 9367 // match: (MOVSSload [off1] {sym} (ADDQconst [off2] ptr) mem) 9368 // cond: is32Bit(off1+off2) 9369 // result: (MOVSSload [off1+off2] {sym} ptr mem) 9370 for { 9371 off1 := v.AuxInt 9372 sym := v.Aux 9373 v_0 := v.Args[0] 9374 if v_0.Op != OpAMD64ADDQconst { 9375 break 9376 } 9377 off2 := v_0.AuxInt 9378 ptr := v_0.Args[0] 9379 mem := v.Args[1] 9380 if !(is32Bit(off1 + off2)) { 9381 break 9382 } 9383 v.reset(OpAMD64MOVSSload) 9384 v.AuxInt = off1 + off2 9385 v.Aux = sym 9386 v.AddArg(ptr) 9387 v.AddArg(mem) 9388 return true 9389 } 9390 // match: (MOVSSload [off1] {sym1} (LEAQ [off2] {sym2} base) mem) 9391 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 9392 // result: (MOVSSload [off1+off2] {mergeSym(sym1,sym2)} base mem) 9393 for { 9394 off1 := v.AuxInt 9395 sym1 := v.Aux 9396 v_0 := v.Args[0] 9397 if v_0.Op != OpAMD64LEAQ { 9398 break 9399 } 9400 off2 := v_0.AuxInt 9401 sym2 := v_0.Aux 9402 base := v_0.Args[0] 9403 mem := v.Args[1] 9404 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 9405 break 9406 } 9407 v.reset(OpAMD64MOVSSload) 9408 v.AuxInt = off1 + off2 9409 v.Aux = mergeSym(sym1, sym2) 9410 v.AddArg(base) 9411 v.AddArg(mem) 9412 return true 9413 } 9414 // match: (MOVSSload [off1] {sym1} (LEAQ1 [off2] {sym2} ptr idx) mem) 9415 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 9416 // result: (MOVSSloadidx1 [off1+off2] {mergeSym(sym1,sym2)} ptr idx mem) 9417 for { 9418 off1 := v.AuxInt 9419 sym1 := v.Aux 9420 v_0 := v.Args[0] 9421 if v_0.Op != OpAMD64LEAQ1 { 9422 break 9423 } 9424 off2 := v_0.AuxInt 9425 sym2 := v_0.Aux 9426 ptr := v_0.Args[0] 9427 idx := v_0.Args[1] 9428 mem := v.Args[1] 9429 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 9430 break 9431 } 9432 v.reset(OpAMD64MOVSSloadidx1) 9433 v.AuxInt = off1 + off2 9434 v.Aux = mergeSym(sym1, sym2) 9435 v.AddArg(ptr) 9436 v.AddArg(idx) 9437 v.AddArg(mem) 9438 return true 9439 } 9440 // match: (MOVSSload [off1] {sym1} (LEAQ4 [off2] {sym2} ptr idx) mem) 9441 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 9442 // result: (MOVSSloadidx4 [off1+off2] {mergeSym(sym1,sym2)} ptr idx mem) 9443 for { 9444 off1 := v.AuxInt 9445 sym1 := v.Aux 9446 v_0 := v.Args[0] 9447 if v_0.Op != OpAMD64LEAQ4 { 9448 break 9449 } 9450 off2 := v_0.AuxInt 9451 sym2 := v_0.Aux 9452 ptr := v_0.Args[0] 9453 idx := v_0.Args[1] 9454 mem := v.Args[1] 9455 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 9456 break 9457 } 9458 v.reset(OpAMD64MOVSSloadidx4) 9459 v.AuxInt = off1 + off2 9460 v.Aux = mergeSym(sym1, sym2) 9461 v.AddArg(ptr) 9462 v.AddArg(idx) 9463 v.AddArg(mem) 9464 return true 9465 } 9466 // match: (MOVSSload [off] {sym} (ADDQ ptr idx) mem) 9467 // cond: ptr.Op != OpSB 9468 // result: (MOVSSloadidx1 [off] {sym} ptr idx mem) 9469 for { 9470 off := v.AuxInt 9471 sym := v.Aux 9472 v_0 := v.Args[0] 9473 if v_0.Op != OpAMD64ADDQ { 9474 break 9475 } 9476 ptr := v_0.Args[0] 9477 idx := v_0.Args[1] 9478 mem := v.Args[1] 9479 if !(ptr.Op != OpSB) { 9480 break 9481 } 9482 v.reset(OpAMD64MOVSSloadidx1) 9483 v.AuxInt = off 9484 v.Aux = sym 9485 v.AddArg(ptr) 9486 v.AddArg(idx) 9487 v.AddArg(mem) 9488 return true 9489 } 9490 return false 9491 } 9492 func rewriteValueAMD64_OpAMD64MOVSSloadidx1(v *Value, config *Config) bool { 9493 b := v.Block 9494 _ = b 9495 // match: (MOVSSloadidx1 [c] {sym} (ADDQconst [d] ptr) idx mem) 9496 // cond: 9497 // result: (MOVSSloadidx1 [c+d] {sym} ptr idx mem) 9498 for { 9499 c := v.AuxInt 9500 sym := v.Aux 9501 v_0 := v.Args[0] 9502 if v_0.Op != OpAMD64ADDQconst { 9503 break 9504 } 9505 d := v_0.AuxInt 9506 ptr := v_0.Args[0] 9507 idx := v.Args[1] 9508 mem := v.Args[2] 9509 v.reset(OpAMD64MOVSSloadidx1) 9510 v.AuxInt = c + d 9511 v.Aux = sym 9512 v.AddArg(ptr) 9513 v.AddArg(idx) 9514 v.AddArg(mem) 9515 return true 9516 } 9517 // match: (MOVSSloadidx1 [c] {sym} ptr (ADDQconst [d] idx) mem) 9518 // cond: 9519 // result: (MOVSSloadidx1 [c+d] {sym} ptr idx mem) 9520 for { 9521 c := v.AuxInt 9522 sym := v.Aux 9523 ptr := v.Args[0] 9524 v_1 := v.Args[1] 9525 if v_1.Op != OpAMD64ADDQconst { 9526 break 9527 } 9528 d := v_1.AuxInt 9529 idx := v_1.Args[0] 9530 mem := v.Args[2] 9531 v.reset(OpAMD64MOVSSloadidx1) 9532 v.AuxInt = c + d 9533 v.Aux = sym 9534 v.AddArg(ptr) 9535 v.AddArg(idx) 9536 v.AddArg(mem) 9537 return true 9538 } 9539 return false 9540 } 9541 func rewriteValueAMD64_OpAMD64MOVSSloadidx4(v *Value, config *Config) bool { 9542 b := v.Block 9543 _ = b 9544 // match: (MOVSSloadidx4 [c] {sym} (ADDQconst [d] ptr) idx mem) 9545 // cond: 9546 // result: (MOVSSloadidx4 [c+d] {sym} ptr idx mem) 9547 for { 9548 c := v.AuxInt 9549 sym := v.Aux 9550 v_0 := v.Args[0] 9551 if v_0.Op != OpAMD64ADDQconst { 9552 break 9553 } 9554 d := v_0.AuxInt 9555 ptr := v_0.Args[0] 9556 idx := v.Args[1] 9557 mem := v.Args[2] 9558 v.reset(OpAMD64MOVSSloadidx4) 9559 v.AuxInt = c + d 9560 v.Aux = sym 9561 v.AddArg(ptr) 9562 v.AddArg(idx) 9563 v.AddArg(mem) 9564 return true 9565 } 9566 // match: (MOVSSloadidx4 [c] {sym} ptr (ADDQconst [d] idx) mem) 9567 // cond: 9568 // result: (MOVSSloadidx4 [c+4*d] {sym} ptr idx mem) 9569 for { 9570 c := v.AuxInt 9571 sym := v.Aux 9572 ptr := v.Args[0] 9573 v_1 := v.Args[1] 9574 if v_1.Op != OpAMD64ADDQconst { 9575 break 9576 } 9577 d := v_1.AuxInt 9578 idx := v_1.Args[0] 9579 mem := v.Args[2] 9580 v.reset(OpAMD64MOVSSloadidx4) 9581 v.AuxInt = c + 4*d 9582 v.Aux = sym 9583 v.AddArg(ptr) 9584 v.AddArg(idx) 9585 v.AddArg(mem) 9586 return true 9587 } 9588 return false 9589 } 9590 func rewriteValueAMD64_OpAMD64MOVSSstore(v *Value, config *Config) bool { 9591 b := v.Block 9592 _ = b 9593 // match: (MOVSSstore [off1] {sym} (ADDQconst [off2] ptr) val mem) 9594 // cond: is32Bit(off1+off2) 9595 // result: (MOVSSstore [off1+off2] {sym} ptr val mem) 9596 for { 9597 off1 := v.AuxInt 9598 sym := v.Aux 9599 v_0 := v.Args[0] 9600 if v_0.Op != OpAMD64ADDQconst { 9601 break 9602 } 9603 off2 := v_0.AuxInt 9604 ptr := v_0.Args[0] 9605 val := v.Args[1] 9606 mem := v.Args[2] 9607 if !(is32Bit(off1 + off2)) { 9608 break 9609 } 9610 v.reset(OpAMD64MOVSSstore) 9611 v.AuxInt = off1 + off2 9612 v.Aux = sym 9613 v.AddArg(ptr) 9614 v.AddArg(val) 9615 v.AddArg(mem) 9616 return true 9617 } 9618 // match: (MOVSSstore [off1] {sym1} (LEAQ [off2] {sym2} base) val mem) 9619 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 9620 // result: (MOVSSstore [off1+off2] {mergeSym(sym1,sym2)} base val mem) 9621 for { 9622 off1 := v.AuxInt 9623 sym1 := v.Aux 9624 v_0 := v.Args[0] 9625 if v_0.Op != OpAMD64LEAQ { 9626 break 9627 } 9628 off2 := v_0.AuxInt 9629 sym2 := v_0.Aux 9630 base := v_0.Args[0] 9631 val := v.Args[1] 9632 mem := v.Args[2] 9633 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 9634 break 9635 } 9636 v.reset(OpAMD64MOVSSstore) 9637 v.AuxInt = off1 + off2 9638 v.Aux = mergeSym(sym1, sym2) 9639 v.AddArg(base) 9640 v.AddArg(val) 9641 v.AddArg(mem) 9642 return true 9643 } 9644 // match: (MOVSSstore [off1] {sym1} (LEAQ1 [off2] {sym2} ptr idx) val mem) 9645 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 9646 // result: (MOVSSstoreidx1 [off1+off2] {mergeSym(sym1,sym2)} ptr idx val mem) 9647 for { 9648 off1 := v.AuxInt 9649 sym1 := v.Aux 9650 v_0 := v.Args[0] 9651 if v_0.Op != OpAMD64LEAQ1 { 9652 break 9653 } 9654 off2 := v_0.AuxInt 9655 sym2 := v_0.Aux 9656 ptr := v_0.Args[0] 9657 idx := v_0.Args[1] 9658 val := v.Args[1] 9659 mem := v.Args[2] 9660 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 9661 break 9662 } 9663 v.reset(OpAMD64MOVSSstoreidx1) 9664 v.AuxInt = off1 + off2 9665 v.Aux = mergeSym(sym1, sym2) 9666 v.AddArg(ptr) 9667 v.AddArg(idx) 9668 v.AddArg(val) 9669 v.AddArg(mem) 9670 return true 9671 } 9672 // match: (MOVSSstore [off1] {sym1} (LEAQ4 [off2] {sym2} ptr idx) val mem) 9673 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 9674 // result: (MOVSSstoreidx4 [off1+off2] {mergeSym(sym1,sym2)} ptr idx val mem) 9675 for { 9676 off1 := v.AuxInt 9677 sym1 := v.Aux 9678 v_0 := v.Args[0] 9679 if v_0.Op != OpAMD64LEAQ4 { 9680 break 9681 } 9682 off2 := v_0.AuxInt 9683 sym2 := v_0.Aux 9684 ptr := v_0.Args[0] 9685 idx := v_0.Args[1] 9686 val := v.Args[1] 9687 mem := v.Args[2] 9688 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 9689 break 9690 } 9691 v.reset(OpAMD64MOVSSstoreidx4) 9692 v.AuxInt = off1 + off2 9693 v.Aux = mergeSym(sym1, sym2) 9694 v.AddArg(ptr) 9695 v.AddArg(idx) 9696 v.AddArg(val) 9697 v.AddArg(mem) 9698 return true 9699 } 9700 // match: (MOVSSstore [off] {sym} (ADDQ ptr idx) val mem) 9701 // cond: ptr.Op != OpSB 9702 // result: (MOVSSstoreidx1 [off] {sym} ptr idx val mem) 9703 for { 9704 off := v.AuxInt 9705 sym := v.Aux 9706 v_0 := v.Args[0] 9707 if v_0.Op != OpAMD64ADDQ { 9708 break 9709 } 9710 ptr := v_0.Args[0] 9711 idx := v_0.Args[1] 9712 val := v.Args[1] 9713 mem := v.Args[2] 9714 if !(ptr.Op != OpSB) { 9715 break 9716 } 9717 v.reset(OpAMD64MOVSSstoreidx1) 9718 v.AuxInt = off 9719 v.Aux = sym 9720 v.AddArg(ptr) 9721 v.AddArg(idx) 9722 v.AddArg(val) 9723 v.AddArg(mem) 9724 return true 9725 } 9726 return false 9727 } 9728 func rewriteValueAMD64_OpAMD64MOVSSstoreidx1(v *Value, config *Config) bool { 9729 b := v.Block 9730 _ = b 9731 // match: (MOVSSstoreidx1 [c] {sym} (ADDQconst [d] ptr) idx val mem) 9732 // cond: 9733 // result: (MOVSSstoreidx1 [c+d] {sym} ptr idx val mem) 9734 for { 9735 c := v.AuxInt 9736 sym := v.Aux 9737 v_0 := v.Args[0] 9738 if v_0.Op != OpAMD64ADDQconst { 9739 break 9740 } 9741 d := v_0.AuxInt 9742 ptr := v_0.Args[0] 9743 idx := v.Args[1] 9744 val := v.Args[2] 9745 mem := v.Args[3] 9746 v.reset(OpAMD64MOVSSstoreidx1) 9747 v.AuxInt = c + d 9748 v.Aux = sym 9749 v.AddArg(ptr) 9750 v.AddArg(idx) 9751 v.AddArg(val) 9752 v.AddArg(mem) 9753 return true 9754 } 9755 // match: (MOVSSstoreidx1 [c] {sym} ptr (ADDQconst [d] idx) val mem) 9756 // cond: 9757 // result: (MOVSSstoreidx1 [c+d] {sym} ptr idx val mem) 9758 for { 9759 c := v.AuxInt 9760 sym := v.Aux 9761 ptr := v.Args[0] 9762 v_1 := v.Args[1] 9763 if v_1.Op != OpAMD64ADDQconst { 9764 break 9765 } 9766 d := v_1.AuxInt 9767 idx := v_1.Args[0] 9768 val := v.Args[2] 9769 mem := v.Args[3] 9770 v.reset(OpAMD64MOVSSstoreidx1) 9771 v.AuxInt = c + d 9772 v.Aux = sym 9773 v.AddArg(ptr) 9774 v.AddArg(idx) 9775 v.AddArg(val) 9776 v.AddArg(mem) 9777 return true 9778 } 9779 return false 9780 } 9781 func rewriteValueAMD64_OpAMD64MOVSSstoreidx4(v *Value, config *Config) bool { 9782 b := v.Block 9783 _ = b 9784 // match: (MOVSSstoreidx4 [c] {sym} (ADDQconst [d] ptr) idx val mem) 9785 // cond: 9786 // result: (MOVSSstoreidx4 [c+d] {sym} ptr idx val mem) 9787 for { 9788 c := v.AuxInt 9789 sym := v.Aux 9790 v_0 := v.Args[0] 9791 if v_0.Op != OpAMD64ADDQconst { 9792 break 9793 } 9794 d := v_0.AuxInt 9795 ptr := v_0.Args[0] 9796 idx := v.Args[1] 9797 val := v.Args[2] 9798 mem := v.Args[3] 9799 v.reset(OpAMD64MOVSSstoreidx4) 9800 v.AuxInt = c + d 9801 v.Aux = sym 9802 v.AddArg(ptr) 9803 v.AddArg(idx) 9804 v.AddArg(val) 9805 v.AddArg(mem) 9806 return true 9807 } 9808 // match: (MOVSSstoreidx4 [c] {sym} ptr (ADDQconst [d] idx) val mem) 9809 // cond: 9810 // result: (MOVSSstoreidx4 [c+4*d] {sym} ptr idx val mem) 9811 for { 9812 c := v.AuxInt 9813 sym := v.Aux 9814 ptr := v.Args[0] 9815 v_1 := v.Args[1] 9816 if v_1.Op != OpAMD64ADDQconst { 9817 break 9818 } 9819 d := v_1.AuxInt 9820 idx := v_1.Args[0] 9821 val := v.Args[2] 9822 mem := v.Args[3] 9823 v.reset(OpAMD64MOVSSstoreidx4) 9824 v.AuxInt = c + 4*d 9825 v.Aux = sym 9826 v.AddArg(ptr) 9827 v.AddArg(idx) 9828 v.AddArg(val) 9829 v.AddArg(mem) 9830 return true 9831 } 9832 return false 9833 } 9834 func rewriteValueAMD64_OpAMD64MOVWQSX(v *Value, config *Config) bool { 9835 b := v.Block 9836 _ = b 9837 // match: (MOVWQSX x:(MOVWload [off] {sym} ptr mem)) 9838 // cond: x.Uses == 1 9839 // result: @x.Block (MOVWQSXload <v.Type> [off] {sym} ptr mem) 9840 for { 9841 x := v.Args[0] 9842 if x.Op != OpAMD64MOVWload { 9843 break 9844 } 9845 off := x.AuxInt 9846 sym := x.Aux 9847 ptr := x.Args[0] 9848 mem := x.Args[1] 9849 if !(x.Uses == 1) { 9850 break 9851 } 9852 b = x.Block 9853 v0 := b.NewValue0(v.Line, OpAMD64MOVWQSXload, v.Type) 9854 v.reset(OpCopy) 9855 v.AddArg(v0) 9856 v0.AuxInt = off 9857 v0.Aux = sym 9858 v0.AddArg(ptr) 9859 v0.AddArg(mem) 9860 return true 9861 } 9862 // match: (MOVWQSX (ANDWconst [c] x)) 9863 // cond: c & 0x8000 == 0 9864 // result: (ANDQconst [c & 0x7fff] x) 9865 for { 9866 v_0 := v.Args[0] 9867 if v_0.Op != OpAMD64ANDWconst { 9868 break 9869 } 9870 c := v_0.AuxInt 9871 x := v_0.Args[0] 9872 if !(c&0x8000 == 0) { 9873 break 9874 } 9875 v.reset(OpAMD64ANDQconst) 9876 v.AuxInt = c & 0x7fff 9877 v.AddArg(x) 9878 return true 9879 } 9880 return false 9881 } 9882 func rewriteValueAMD64_OpAMD64MOVWQSXload(v *Value, config *Config) bool { 9883 b := v.Block 9884 _ = b 9885 // match: (MOVWQSXload [off1] {sym1} (LEAQ [off2] {sym2} base) mem) 9886 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 9887 // result: (MOVWQSXload [off1+off2] {mergeSym(sym1,sym2)} base mem) 9888 for { 9889 off1 := v.AuxInt 9890 sym1 := v.Aux 9891 v_0 := v.Args[0] 9892 if v_0.Op != OpAMD64LEAQ { 9893 break 9894 } 9895 off2 := v_0.AuxInt 9896 sym2 := v_0.Aux 9897 base := v_0.Args[0] 9898 mem := v.Args[1] 9899 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 9900 break 9901 } 9902 v.reset(OpAMD64MOVWQSXload) 9903 v.AuxInt = off1 + off2 9904 v.Aux = mergeSym(sym1, sym2) 9905 v.AddArg(base) 9906 v.AddArg(mem) 9907 return true 9908 } 9909 return false 9910 } 9911 func rewriteValueAMD64_OpAMD64MOVWQZX(v *Value, config *Config) bool { 9912 b := v.Block 9913 _ = b 9914 // match: (MOVWQZX x:(MOVWload [off] {sym} ptr mem)) 9915 // cond: x.Uses == 1 9916 // result: @x.Block (MOVWload <v.Type> [off] {sym} ptr mem) 9917 for { 9918 x := v.Args[0] 9919 if x.Op != OpAMD64MOVWload { 9920 break 9921 } 9922 off := x.AuxInt 9923 sym := x.Aux 9924 ptr := x.Args[0] 9925 mem := x.Args[1] 9926 if !(x.Uses == 1) { 9927 break 9928 } 9929 b = x.Block 9930 v0 := b.NewValue0(v.Line, OpAMD64MOVWload, v.Type) 9931 v.reset(OpCopy) 9932 v.AddArg(v0) 9933 v0.AuxInt = off 9934 v0.Aux = sym 9935 v0.AddArg(ptr) 9936 v0.AddArg(mem) 9937 return true 9938 } 9939 // match: (MOVWQZX x:(MOVWloadidx1 [off] {sym} ptr idx mem)) 9940 // cond: x.Uses == 1 9941 // result: @x.Block (MOVWloadidx1 <v.Type> [off] {sym} ptr idx mem) 9942 for { 9943 x := v.Args[0] 9944 if x.Op != OpAMD64MOVWloadidx1 { 9945 break 9946 } 9947 off := x.AuxInt 9948 sym := x.Aux 9949 ptr := x.Args[0] 9950 idx := x.Args[1] 9951 mem := x.Args[2] 9952 if !(x.Uses == 1) { 9953 break 9954 } 9955 b = x.Block 9956 v0 := b.NewValue0(v.Line, OpAMD64MOVWloadidx1, v.Type) 9957 v.reset(OpCopy) 9958 v.AddArg(v0) 9959 v0.AuxInt = off 9960 v0.Aux = sym 9961 v0.AddArg(ptr) 9962 v0.AddArg(idx) 9963 v0.AddArg(mem) 9964 return true 9965 } 9966 // match: (MOVWQZX x:(MOVWloadidx2 [off] {sym} ptr idx mem)) 9967 // cond: x.Uses == 1 9968 // result: @x.Block (MOVWloadidx2 <v.Type> [off] {sym} ptr idx mem) 9969 for { 9970 x := v.Args[0] 9971 if x.Op != OpAMD64MOVWloadidx2 { 9972 break 9973 } 9974 off := x.AuxInt 9975 sym := x.Aux 9976 ptr := x.Args[0] 9977 idx := x.Args[1] 9978 mem := x.Args[2] 9979 if !(x.Uses == 1) { 9980 break 9981 } 9982 b = x.Block 9983 v0 := b.NewValue0(v.Line, OpAMD64MOVWloadidx2, v.Type) 9984 v.reset(OpCopy) 9985 v.AddArg(v0) 9986 v0.AuxInt = off 9987 v0.Aux = sym 9988 v0.AddArg(ptr) 9989 v0.AddArg(idx) 9990 v0.AddArg(mem) 9991 return true 9992 } 9993 // match: (MOVWQZX (ANDWconst [c] x)) 9994 // cond: 9995 // result: (ANDQconst [c & 0xffff] x) 9996 for { 9997 v_0 := v.Args[0] 9998 if v_0.Op != OpAMD64ANDWconst { 9999 break 10000 } 10001 c := v_0.AuxInt 10002 x := v_0.Args[0] 10003 v.reset(OpAMD64ANDQconst) 10004 v.AuxInt = c & 0xffff 10005 v.AddArg(x) 10006 return true 10007 } 10008 return false 10009 } 10010 func rewriteValueAMD64_OpAMD64MOVWload(v *Value, config *Config) bool { 10011 b := v.Block 10012 _ = b 10013 // match: (MOVWload [off] {sym} ptr (MOVWstore [off2] {sym2} ptr2 x _)) 10014 // cond: sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) 10015 // result: x 10016 for { 10017 off := v.AuxInt 10018 sym := v.Aux 10019 ptr := v.Args[0] 10020 v_1 := v.Args[1] 10021 if v_1.Op != OpAMD64MOVWstore { 10022 break 10023 } 10024 off2 := v_1.AuxInt 10025 sym2 := v_1.Aux 10026 ptr2 := v_1.Args[0] 10027 x := v_1.Args[1] 10028 if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) { 10029 break 10030 } 10031 v.reset(OpCopy) 10032 v.Type = x.Type 10033 v.AddArg(x) 10034 return true 10035 } 10036 // match: (MOVWload [off1] {sym} (ADDQconst [off2] ptr) mem) 10037 // cond: is32Bit(off1+off2) 10038 // result: (MOVWload [off1+off2] {sym} ptr mem) 10039 for { 10040 off1 := v.AuxInt 10041 sym := v.Aux 10042 v_0 := v.Args[0] 10043 if v_0.Op != OpAMD64ADDQconst { 10044 break 10045 } 10046 off2 := v_0.AuxInt 10047 ptr := v_0.Args[0] 10048 mem := v.Args[1] 10049 if !(is32Bit(off1 + off2)) { 10050 break 10051 } 10052 v.reset(OpAMD64MOVWload) 10053 v.AuxInt = off1 + off2 10054 v.Aux = sym 10055 v.AddArg(ptr) 10056 v.AddArg(mem) 10057 return true 10058 } 10059 // match: (MOVWload [off1] {sym1} (LEAQ [off2] {sym2} base) mem) 10060 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 10061 // result: (MOVWload [off1+off2] {mergeSym(sym1,sym2)} base mem) 10062 for { 10063 off1 := v.AuxInt 10064 sym1 := v.Aux 10065 v_0 := v.Args[0] 10066 if v_0.Op != OpAMD64LEAQ { 10067 break 10068 } 10069 off2 := v_0.AuxInt 10070 sym2 := v_0.Aux 10071 base := v_0.Args[0] 10072 mem := v.Args[1] 10073 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 10074 break 10075 } 10076 v.reset(OpAMD64MOVWload) 10077 v.AuxInt = off1 + off2 10078 v.Aux = mergeSym(sym1, sym2) 10079 v.AddArg(base) 10080 v.AddArg(mem) 10081 return true 10082 } 10083 // match: (MOVWload [off1] {sym1} (LEAQ1 [off2] {sym2} ptr idx) mem) 10084 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 10085 // result: (MOVWloadidx1 [off1+off2] {mergeSym(sym1,sym2)} ptr idx mem) 10086 for { 10087 off1 := v.AuxInt 10088 sym1 := v.Aux 10089 v_0 := v.Args[0] 10090 if v_0.Op != OpAMD64LEAQ1 { 10091 break 10092 } 10093 off2 := v_0.AuxInt 10094 sym2 := v_0.Aux 10095 ptr := v_0.Args[0] 10096 idx := v_0.Args[1] 10097 mem := v.Args[1] 10098 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 10099 break 10100 } 10101 v.reset(OpAMD64MOVWloadidx1) 10102 v.AuxInt = off1 + off2 10103 v.Aux = mergeSym(sym1, sym2) 10104 v.AddArg(ptr) 10105 v.AddArg(idx) 10106 v.AddArg(mem) 10107 return true 10108 } 10109 // match: (MOVWload [off1] {sym1} (LEAQ2 [off2] {sym2} ptr idx) mem) 10110 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 10111 // result: (MOVWloadidx2 [off1+off2] {mergeSym(sym1,sym2)} ptr idx mem) 10112 for { 10113 off1 := v.AuxInt 10114 sym1 := v.Aux 10115 v_0 := v.Args[0] 10116 if v_0.Op != OpAMD64LEAQ2 { 10117 break 10118 } 10119 off2 := v_0.AuxInt 10120 sym2 := v_0.Aux 10121 ptr := v_0.Args[0] 10122 idx := v_0.Args[1] 10123 mem := v.Args[1] 10124 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 10125 break 10126 } 10127 v.reset(OpAMD64MOVWloadidx2) 10128 v.AuxInt = off1 + off2 10129 v.Aux = mergeSym(sym1, sym2) 10130 v.AddArg(ptr) 10131 v.AddArg(idx) 10132 v.AddArg(mem) 10133 return true 10134 } 10135 // match: (MOVWload [off] {sym} (ADDQ ptr idx) mem) 10136 // cond: ptr.Op != OpSB 10137 // result: (MOVWloadidx1 [off] {sym} ptr idx mem) 10138 for { 10139 off := v.AuxInt 10140 sym := v.Aux 10141 v_0 := v.Args[0] 10142 if v_0.Op != OpAMD64ADDQ { 10143 break 10144 } 10145 ptr := v_0.Args[0] 10146 idx := v_0.Args[1] 10147 mem := v.Args[1] 10148 if !(ptr.Op != OpSB) { 10149 break 10150 } 10151 v.reset(OpAMD64MOVWloadidx1) 10152 v.AuxInt = off 10153 v.Aux = sym 10154 v.AddArg(ptr) 10155 v.AddArg(idx) 10156 v.AddArg(mem) 10157 return true 10158 } 10159 return false 10160 } 10161 func rewriteValueAMD64_OpAMD64MOVWloadidx1(v *Value, config *Config) bool { 10162 b := v.Block 10163 _ = b 10164 // match: (MOVWloadidx1 [c] {sym} ptr (SHLQconst [1] idx) mem) 10165 // cond: 10166 // result: (MOVWloadidx2 [c] {sym} ptr idx mem) 10167 for { 10168 c := v.AuxInt 10169 sym := v.Aux 10170 ptr := v.Args[0] 10171 v_1 := v.Args[1] 10172 if v_1.Op != OpAMD64SHLQconst { 10173 break 10174 } 10175 if v_1.AuxInt != 1 { 10176 break 10177 } 10178 idx := v_1.Args[0] 10179 mem := v.Args[2] 10180 v.reset(OpAMD64MOVWloadidx2) 10181 v.AuxInt = c 10182 v.Aux = sym 10183 v.AddArg(ptr) 10184 v.AddArg(idx) 10185 v.AddArg(mem) 10186 return true 10187 } 10188 // match: (MOVWloadidx1 [c] {sym} (ADDQconst [d] ptr) idx mem) 10189 // cond: 10190 // result: (MOVWloadidx1 [c+d] {sym} ptr idx mem) 10191 for { 10192 c := v.AuxInt 10193 sym := v.Aux 10194 v_0 := v.Args[0] 10195 if v_0.Op != OpAMD64ADDQconst { 10196 break 10197 } 10198 d := v_0.AuxInt 10199 ptr := v_0.Args[0] 10200 idx := v.Args[1] 10201 mem := v.Args[2] 10202 v.reset(OpAMD64MOVWloadidx1) 10203 v.AuxInt = c + d 10204 v.Aux = sym 10205 v.AddArg(ptr) 10206 v.AddArg(idx) 10207 v.AddArg(mem) 10208 return true 10209 } 10210 // match: (MOVWloadidx1 [c] {sym} ptr (ADDQconst [d] idx) mem) 10211 // cond: 10212 // result: (MOVWloadidx1 [c+d] {sym} ptr idx mem) 10213 for { 10214 c := v.AuxInt 10215 sym := v.Aux 10216 ptr := v.Args[0] 10217 v_1 := v.Args[1] 10218 if v_1.Op != OpAMD64ADDQconst { 10219 break 10220 } 10221 d := v_1.AuxInt 10222 idx := v_1.Args[0] 10223 mem := v.Args[2] 10224 v.reset(OpAMD64MOVWloadidx1) 10225 v.AuxInt = c + d 10226 v.Aux = sym 10227 v.AddArg(ptr) 10228 v.AddArg(idx) 10229 v.AddArg(mem) 10230 return true 10231 } 10232 return false 10233 } 10234 func rewriteValueAMD64_OpAMD64MOVWloadidx2(v *Value, config *Config) bool { 10235 b := v.Block 10236 _ = b 10237 // match: (MOVWloadidx2 [c] {sym} (ADDQconst [d] ptr) idx mem) 10238 // cond: 10239 // result: (MOVWloadidx2 [c+d] {sym} ptr idx mem) 10240 for { 10241 c := v.AuxInt 10242 sym := v.Aux 10243 v_0 := v.Args[0] 10244 if v_0.Op != OpAMD64ADDQconst { 10245 break 10246 } 10247 d := v_0.AuxInt 10248 ptr := v_0.Args[0] 10249 idx := v.Args[1] 10250 mem := v.Args[2] 10251 v.reset(OpAMD64MOVWloadidx2) 10252 v.AuxInt = c + d 10253 v.Aux = sym 10254 v.AddArg(ptr) 10255 v.AddArg(idx) 10256 v.AddArg(mem) 10257 return true 10258 } 10259 // match: (MOVWloadidx2 [c] {sym} ptr (ADDQconst [d] idx) mem) 10260 // cond: 10261 // result: (MOVWloadidx2 [c+2*d] {sym} ptr idx mem) 10262 for { 10263 c := v.AuxInt 10264 sym := v.Aux 10265 ptr := v.Args[0] 10266 v_1 := v.Args[1] 10267 if v_1.Op != OpAMD64ADDQconst { 10268 break 10269 } 10270 d := v_1.AuxInt 10271 idx := v_1.Args[0] 10272 mem := v.Args[2] 10273 v.reset(OpAMD64MOVWloadidx2) 10274 v.AuxInt = c + 2*d 10275 v.Aux = sym 10276 v.AddArg(ptr) 10277 v.AddArg(idx) 10278 v.AddArg(mem) 10279 return true 10280 } 10281 return false 10282 } 10283 func rewriteValueAMD64_OpAMD64MOVWstore(v *Value, config *Config) bool { 10284 b := v.Block 10285 _ = b 10286 // match: (MOVWstore [off] {sym} ptr (MOVWQSX x) mem) 10287 // cond: 10288 // result: (MOVWstore [off] {sym} ptr x mem) 10289 for { 10290 off := v.AuxInt 10291 sym := v.Aux 10292 ptr := v.Args[0] 10293 v_1 := v.Args[1] 10294 if v_1.Op != OpAMD64MOVWQSX { 10295 break 10296 } 10297 x := v_1.Args[0] 10298 mem := v.Args[2] 10299 v.reset(OpAMD64MOVWstore) 10300 v.AuxInt = off 10301 v.Aux = sym 10302 v.AddArg(ptr) 10303 v.AddArg(x) 10304 v.AddArg(mem) 10305 return true 10306 } 10307 // match: (MOVWstore [off] {sym} ptr (MOVWQZX x) mem) 10308 // cond: 10309 // result: (MOVWstore [off] {sym} ptr x mem) 10310 for { 10311 off := v.AuxInt 10312 sym := v.Aux 10313 ptr := v.Args[0] 10314 v_1 := v.Args[1] 10315 if v_1.Op != OpAMD64MOVWQZX { 10316 break 10317 } 10318 x := v_1.Args[0] 10319 mem := v.Args[2] 10320 v.reset(OpAMD64MOVWstore) 10321 v.AuxInt = off 10322 v.Aux = sym 10323 v.AddArg(ptr) 10324 v.AddArg(x) 10325 v.AddArg(mem) 10326 return true 10327 } 10328 // match: (MOVWstore [off1] {sym} (ADDQconst [off2] ptr) val mem) 10329 // cond: is32Bit(off1+off2) 10330 // result: (MOVWstore [off1+off2] {sym} ptr val mem) 10331 for { 10332 off1 := v.AuxInt 10333 sym := v.Aux 10334 v_0 := v.Args[0] 10335 if v_0.Op != OpAMD64ADDQconst { 10336 break 10337 } 10338 off2 := v_0.AuxInt 10339 ptr := v_0.Args[0] 10340 val := v.Args[1] 10341 mem := v.Args[2] 10342 if !(is32Bit(off1 + off2)) { 10343 break 10344 } 10345 v.reset(OpAMD64MOVWstore) 10346 v.AuxInt = off1 + off2 10347 v.Aux = sym 10348 v.AddArg(ptr) 10349 v.AddArg(val) 10350 v.AddArg(mem) 10351 return true 10352 } 10353 // match: (MOVWstore [off] {sym} ptr (MOVWconst [c]) mem) 10354 // cond: validOff(off) 10355 // result: (MOVWstoreconst [makeValAndOff(int64(int16(c)),off)] {sym} ptr mem) 10356 for { 10357 off := v.AuxInt 10358 sym := v.Aux 10359 ptr := v.Args[0] 10360 v_1 := v.Args[1] 10361 if v_1.Op != OpAMD64MOVWconst { 10362 break 10363 } 10364 c := v_1.AuxInt 10365 mem := v.Args[2] 10366 if !(validOff(off)) { 10367 break 10368 } 10369 v.reset(OpAMD64MOVWstoreconst) 10370 v.AuxInt = makeValAndOff(int64(int16(c)), off) 10371 v.Aux = sym 10372 v.AddArg(ptr) 10373 v.AddArg(mem) 10374 return true 10375 } 10376 // match: (MOVWstore [off1] {sym1} (LEAQ [off2] {sym2} base) val mem) 10377 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 10378 // result: (MOVWstore [off1+off2] {mergeSym(sym1,sym2)} base val mem) 10379 for { 10380 off1 := v.AuxInt 10381 sym1 := v.Aux 10382 v_0 := v.Args[0] 10383 if v_0.Op != OpAMD64LEAQ { 10384 break 10385 } 10386 off2 := v_0.AuxInt 10387 sym2 := v_0.Aux 10388 base := v_0.Args[0] 10389 val := v.Args[1] 10390 mem := v.Args[2] 10391 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 10392 break 10393 } 10394 v.reset(OpAMD64MOVWstore) 10395 v.AuxInt = off1 + off2 10396 v.Aux = mergeSym(sym1, sym2) 10397 v.AddArg(base) 10398 v.AddArg(val) 10399 v.AddArg(mem) 10400 return true 10401 } 10402 // match: (MOVWstore [off1] {sym1} (LEAQ1 [off2] {sym2} ptr idx) val mem) 10403 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 10404 // result: (MOVWstoreidx1 [off1+off2] {mergeSym(sym1,sym2)} ptr idx val mem) 10405 for { 10406 off1 := v.AuxInt 10407 sym1 := v.Aux 10408 v_0 := v.Args[0] 10409 if v_0.Op != OpAMD64LEAQ1 { 10410 break 10411 } 10412 off2 := v_0.AuxInt 10413 sym2 := v_0.Aux 10414 ptr := v_0.Args[0] 10415 idx := v_0.Args[1] 10416 val := v.Args[1] 10417 mem := v.Args[2] 10418 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 10419 break 10420 } 10421 v.reset(OpAMD64MOVWstoreidx1) 10422 v.AuxInt = off1 + off2 10423 v.Aux = mergeSym(sym1, sym2) 10424 v.AddArg(ptr) 10425 v.AddArg(idx) 10426 v.AddArg(val) 10427 v.AddArg(mem) 10428 return true 10429 } 10430 // match: (MOVWstore [off1] {sym1} (LEAQ2 [off2] {sym2} ptr idx) val mem) 10431 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 10432 // result: (MOVWstoreidx2 [off1+off2] {mergeSym(sym1,sym2)} ptr idx val mem) 10433 for { 10434 off1 := v.AuxInt 10435 sym1 := v.Aux 10436 v_0 := v.Args[0] 10437 if v_0.Op != OpAMD64LEAQ2 { 10438 break 10439 } 10440 off2 := v_0.AuxInt 10441 sym2 := v_0.Aux 10442 ptr := v_0.Args[0] 10443 idx := v_0.Args[1] 10444 val := v.Args[1] 10445 mem := v.Args[2] 10446 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 10447 break 10448 } 10449 v.reset(OpAMD64MOVWstoreidx2) 10450 v.AuxInt = off1 + off2 10451 v.Aux = mergeSym(sym1, sym2) 10452 v.AddArg(ptr) 10453 v.AddArg(idx) 10454 v.AddArg(val) 10455 v.AddArg(mem) 10456 return true 10457 } 10458 // match: (MOVWstore [off] {sym} (ADDQ ptr idx) val mem) 10459 // cond: ptr.Op != OpSB 10460 // result: (MOVWstoreidx1 [off] {sym} ptr idx val mem) 10461 for { 10462 off := v.AuxInt 10463 sym := v.Aux 10464 v_0 := v.Args[0] 10465 if v_0.Op != OpAMD64ADDQ { 10466 break 10467 } 10468 ptr := v_0.Args[0] 10469 idx := v_0.Args[1] 10470 val := v.Args[1] 10471 mem := v.Args[2] 10472 if !(ptr.Op != OpSB) { 10473 break 10474 } 10475 v.reset(OpAMD64MOVWstoreidx1) 10476 v.AuxInt = off 10477 v.Aux = sym 10478 v.AddArg(ptr) 10479 v.AddArg(idx) 10480 v.AddArg(val) 10481 v.AddArg(mem) 10482 return true 10483 } 10484 return false 10485 } 10486 func rewriteValueAMD64_OpAMD64MOVWstoreconst(v *Value, config *Config) bool { 10487 b := v.Block 10488 _ = b 10489 // match: (MOVWstoreconst [sc] {s} (ADDQconst [off] ptr) mem) 10490 // cond: ValAndOff(sc).canAdd(off) 10491 // result: (MOVWstoreconst [ValAndOff(sc).add(off)] {s} ptr mem) 10492 for { 10493 sc := v.AuxInt 10494 s := v.Aux 10495 v_0 := v.Args[0] 10496 if v_0.Op != OpAMD64ADDQconst { 10497 break 10498 } 10499 off := v_0.AuxInt 10500 ptr := v_0.Args[0] 10501 mem := v.Args[1] 10502 if !(ValAndOff(sc).canAdd(off)) { 10503 break 10504 } 10505 v.reset(OpAMD64MOVWstoreconst) 10506 v.AuxInt = ValAndOff(sc).add(off) 10507 v.Aux = s 10508 v.AddArg(ptr) 10509 v.AddArg(mem) 10510 return true 10511 } 10512 // match: (MOVWstoreconst [sc] {sym1} (LEAQ [off] {sym2} ptr) mem) 10513 // cond: canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off) 10514 // result: (MOVWstoreconst [ValAndOff(sc).add(off)] {mergeSym(sym1, sym2)} ptr mem) 10515 for { 10516 sc := v.AuxInt 10517 sym1 := v.Aux 10518 v_0 := v.Args[0] 10519 if v_0.Op != OpAMD64LEAQ { 10520 break 10521 } 10522 off := v_0.AuxInt 10523 sym2 := v_0.Aux 10524 ptr := v_0.Args[0] 10525 mem := v.Args[1] 10526 if !(canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off)) { 10527 break 10528 } 10529 v.reset(OpAMD64MOVWstoreconst) 10530 v.AuxInt = ValAndOff(sc).add(off) 10531 v.Aux = mergeSym(sym1, sym2) 10532 v.AddArg(ptr) 10533 v.AddArg(mem) 10534 return true 10535 } 10536 // match: (MOVWstoreconst [x] {sym1} (LEAQ1 [off] {sym2} ptr idx) mem) 10537 // cond: canMergeSym(sym1, sym2) 10538 // result: (MOVWstoreconstidx1 [ValAndOff(x).add(off)] {mergeSym(sym1,sym2)} ptr idx mem) 10539 for { 10540 x := v.AuxInt 10541 sym1 := v.Aux 10542 v_0 := v.Args[0] 10543 if v_0.Op != OpAMD64LEAQ1 { 10544 break 10545 } 10546 off := v_0.AuxInt 10547 sym2 := v_0.Aux 10548 ptr := v_0.Args[0] 10549 idx := v_0.Args[1] 10550 mem := v.Args[1] 10551 if !(canMergeSym(sym1, sym2)) { 10552 break 10553 } 10554 v.reset(OpAMD64MOVWstoreconstidx1) 10555 v.AuxInt = ValAndOff(x).add(off) 10556 v.Aux = mergeSym(sym1, sym2) 10557 v.AddArg(ptr) 10558 v.AddArg(idx) 10559 v.AddArg(mem) 10560 return true 10561 } 10562 // match: (MOVWstoreconst [x] {sym1} (LEAQ2 [off] {sym2} ptr idx) mem) 10563 // cond: canMergeSym(sym1, sym2) 10564 // result: (MOVWstoreconstidx2 [ValAndOff(x).add(off)] {mergeSym(sym1,sym2)} ptr idx mem) 10565 for { 10566 x := v.AuxInt 10567 sym1 := v.Aux 10568 v_0 := v.Args[0] 10569 if v_0.Op != OpAMD64LEAQ2 { 10570 break 10571 } 10572 off := v_0.AuxInt 10573 sym2 := v_0.Aux 10574 ptr := v_0.Args[0] 10575 idx := v_0.Args[1] 10576 mem := v.Args[1] 10577 if !(canMergeSym(sym1, sym2)) { 10578 break 10579 } 10580 v.reset(OpAMD64MOVWstoreconstidx2) 10581 v.AuxInt = ValAndOff(x).add(off) 10582 v.Aux = mergeSym(sym1, sym2) 10583 v.AddArg(ptr) 10584 v.AddArg(idx) 10585 v.AddArg(mem) 10586 return true 10587 } 10588 // match: (MOVWstoreconst [x] {sym} (ADDQ ptr idx) mem) 10589 // cond: 10590 // result: (MOVWstoreconstidx1 [x] {sym} ptr idx mem) 10591 for { 10592 x := v.AuxInt 10593 sym := v.Aux 10594 v_0 := v.Args[0] 10595 if v_0.Op != OpAMD64ADDQ { 10596 break 10597 } 10598 ptr := v_0.Args[0] 10599 idx := v_0.Args[1] 10600 mem := v.Args[1] 10601 v.reset(OpAMD64MOVWstoreconstidx1) 10602 v.AuxInt = x 10603 v.Aux = sym 10604 v.AddArg(ptr) 10605 v.AddArg(idx) 10606 v.AddArg(mem) 10607 return true 10608 } 10609 return false 10610 } 10611 func rewriteValueAMD64_OpAMD64MOVWstoreconstidx1(v *Value, config *Config) bool { 10612 b := v.Block 10613 _ = b 10614 // match: (MOVWstoreconstidx1 [c] {sym} ptr (SHLQconst [1] idx) mem) 10615 // cond: 10616 // result: (MOVWstoreconstidx2 [c] {sym} ptr idx mem) 10617 for { 10618 c := v.AuxInt 10619 sym := v.Aux 10620 ptr := v.Args[0] 10621 v_1 := v.Args[1] 10622 if v_1.Op != OpAMD64SHLQconst { 10623 break 10624 } 10625 if v_1.AuxInt != 1 { 10626 break 10627 } 10628 idx := v_1.Args[0] 10629 mem := v.Args[2] 10630 v.reset(OpAMD64MOVWstoreconstidx2) 10631 v.AuxInt = c 10632 v.Aux = sym 10633 v.AddArg(ptr) 10634 v.AddArg(idx) 10635 v.AddArg(mem) 10636 return true 10637 } 10638 // match: (MOVWstoreconstidx1 [x] {sym} (ADDQconst [c] ptr) idx mem) 10639 // cond: 10640 // result: (MOVWstoreconstidx1 [ValAndOff(x).add(c)] {sym} ptr idx mem) 10641 for { 10642 x := v.AuxInt 10643 sym := v.Aux 10644 v_0 := v.Args[0] 10645 if v_0.Op != OpAMD64ADDQconst { 10646 break 10647 } 10648 c := v_0.AuxInt 10649 ptr := v_0.Args[0] 10650 idx := v.Args[1] 10651 mem := v.Args[2] 10652 v.reset(OpAMD64MOVWstoreconstidx1) 10653 v.AuxInt = ValAndOff(x).add(c) 10654 v.Aux = sym 10655 v.AddArg(ptr) 10656 v.AddArg(idx) 10657 v.AddArg(mem) 10658 return true 10659 } 10660 // match: (MOVWstoreconstidx1 [x] {sym} ptr (ADDQconst [c] idx) mem) 10661 // cond: 10662 // result: (MOVWstoreconstidx1 [ValAndOff(x).add(c)] {sym} ptr idx mem) 10663 for { 10664 x := v.AuxInt 10665 sym := v.Aux 10666 ptr := v.Args[0] 10667 v_1 := v.Args[1] 10668 if v_1.Op != OpAMD64ADDQconst { 10669 break 10670 } 10671 c := v_1.AuxInt 10672 idx := v_1.Args[0] 10673 mem := v.Args[2] 10674 v.reset(OpAMD64MOVWstoreconstidx1) 10675 v.AuxInt = ValAndOff(x).add(c) 10676 v.Aux = sym 10677 v.AddArg(ptr) 10678 v.AddArg(idx) 10679 v.AddArg(mem) 10680 return true 10681 } 10682 return false 10683 } 10684 func rewriteValueAMD64_OpAMD64MOVWstoreconstidx2(v *Value, config *Config) bool { 10685 b := v.Block 10686 _ = b 10687 // match: (MOVWstoreconstidx2 [x] {sym} (ADDQconst [c] ptr) idx mem) 10688 // cond: 10689 // result: (MOVWstoreconstidx2 [ValAndOff(x).add(c)] {sym} ptr idx mem) 10690 for { 10691 x := v.AuxInt 10692 sym := v.Aux 10693 v_0 := v.Args[0] 10694 if v_0.Op != OpAMD64ADDQconst { 10695 break 10696 } 10697 c := v_0.AuxInt 10698 ptr := v_0.Args[0] 10699 idx := v.Args[1] 10700 mem := v.Args[2] 10701 v.reset(OpAMD64MOVWstoreconstidx2) 10702 v.AuxInt = ValAndOff(x).add(c) 10703 v.Aux = sym 10704 v.AddArg(ptr) 10705 v.AddArg(idx) 10706 v.AddArg(mem) 10707 return true 10708 } 10709 // match: (MOVWstoreconstidx2 [x] {sym} ptr (ADDQconst [c] idx) mem) 10710 // cond: 10711 // result: (MOVWstoreconstidx2 [ValAndOff(x).add(2*c)] {sym} ptr idx mem) 10712 for { 10713 x := v.AuxInt 10714 sym := v.Aux 10715 ptr := v.Args[0] 10716 v_1 := v.Args[1] 10717 if v_1.Op != OpAMD64ADDQconst { 10718 break 10719 } 10720 c := v_1.AuxInt 10721 idx := v_1.Args[0] 10722 mem := v.Args[2] 10723 v.reset(OpAMD64MOVWstoreconstidx2) 10724 v.AuxInt = ValAndOff(x).add(2 * c) 10725 v.Aux = sym 10726 v.AddArg(ptr) 10727 v.AddArg(idx) 10728 v.AddArg(mem) 10729 return true 10730 } 10731 return false 10732 } 10733 func rewriteValueAMD64_OpAMD64MOVWstoreidx1(v *Value, config *Config) bool { 10734 b := v.Block 10735 _ = b 10736 // match: (MOVWstoreidx1 [c] {sym} ptr (SHLQconst [1] idx) val mem) 10737 // cond: 10738 // result: (MOVWstoreidx2 [c] {sym} ptr idx val mem) 10739 for { 10740 c := v.AuxInt 10741 sym := v.Aux 10742 ptr := v.Args[0] 10743 v_1 := v.Args[1] 10744 if v_1.Op != OpAMD64SHLQconst { 10745 break 10746 } 10747 if v_1.AuxInt != 1 { 10748 break 10749 } 10750 idx := v_1.Args[0] 10751 val := v.Args[2] 10752 mem := v.Args[3] 10753 v.reset(OpAMD64MOVWstoreidx2) 10754 v.AuxInt = c 10755 v.Aux = sym 10756 v.AddArg(ptr) 10757 v.AddArg(idx) 10758 v.AddArg(val) 10759 v.AddArg(mem) 10760 return true 10761 } 10762 // match: (MOVWstoreidx1 [c] {sym} (ADDQconst [d] ptr) idx val mem) 10763 // cond: 10764 // result: (MOVWstoreidx1 [c+d] {sym} ptr idx val mem) 10765 for { 10766 c := v.AuxInt 10767 sym := v.Aux 10768 v_0 := v.Args[0] 10769 if v_0.Op != OpAMD64ADDQconst { 10770 break 10771 } 10772 d := v_0.AuxInt 10773 ptr := v_0.Args[0] 10774 idx := v.Args[1] 10775 val := v.Args[2] 10776 mem := v.Args[3] 10777 v.reset(OpAMD64MOVWstoreidx1) 10778 v.AuxInt = c + d 10779 v.Aux = sym 10780 v.AddArg(ptr) 10781 v.AddArg(idx) 10782 v.AddArg(val) 10783 v.AddArg(mem) 10784 return true 10785 } 10786 // match: (MOVWstoreidx1 [c] {sym} ptr (ADDQconst [d] idx) val mem) 10787 // cond: 10788 // result: (MOVWstoreidx1 [c+d] {sym} ptr idx val mem) 10789 for { 10790 c := v.AuxInt 10791 sym := v.Aux 10792 ptr := v.Args[0] 10793 v_1 := v.Args[1] 10794 if v_1.Op != OpAMD64ADDQconst { 10795 break 10796 } 10797 d := v_1.AuxInt 10798 idx := v_1.Args[0] 10799 val := v.Args[2] 10800 mem := v.Args[3] 10801 v.reset(OpAMD64MOVWstoreidx1) 10802 v.AuxInt = c + d 10803 v.Aux = sym 10804 v.AddArg(ptr) 10805 v.AddArg(idx) 10806 v.AddArg(val) 10807 v.AddArg(mem) 10808 return true 10809 } 10810 return false 10811 } 10812 func rewriteValueAMD64_OpAMD64MOVWstoreidx2(v *Value, config *Config) bool { 10813 b := v.Block 10814 _ = b 10815 // match: (MOVWstoreidx2 [c] {sym} (ADDQconst [d] ptr) idx val mem) 10816 // cond: 10817 // result: (MOVWstoreidx2 [c+d] {sym} ptr idx val mem) 10818 for { 10819 c := v.AuxInt 10820 sym := v.Aux 10821 v_0 := v.Args[0] 10822 if v_0.Op != OpAMD64ADDQconst { 10823 break 10824 } 10825 d := v_0.AuxInt 10826 ptr := v_0.Args[0] 10827 idx := v.Args[1] 10828 val := v.Args[2] 10829 mem := v.Args[3] 10830 v.reset(OpAMD64MOVWstoreidx2) 10831 v.AuxInt = c + d 10832 v.Aux = sym 10833 v.AddArg(ptr) 10834 v.AddArg(idx) 10835 v.AddArg(val) 10836 v.AddArg(mem) 10837 return true 10838 } 10839 // match: (MOVWstoreidx2 [c] {sym} ptr (ADDQconst [d] idx) val mem) 10840 // cond: 10841 // result: (MOVWstoreidx2 [c+2*d] {sym} ptr idx val mem) 10842 for { 10843 c := v.AuxInt 10844 sym := v.Aux 10845 ptr := v.Args[0] 10846 v_1 := v.Args[1] 10847 if v_1.Op != OpAMD64ADDQconst { 10848 break 10849 } 10850 d := v_1.AuxInt 10851 idx := v_1.Args[0] 10852 val := v.Args[2] 10853 mem := v.Args[3] 10854 v.reset(OpAMD64MOVWstoreidx2) 10855 v.AuxInt = c + 2*d 10856 v.Aux = sym 10857 v.AddArg(ptr) 10858 v.AddArg(idx) 10859 v.AddArg(val) 10860 v.AddArg(mem) 10861 return true 10862 } 10863 return false 10864 } 10865 func rewriteValueAMD64_OpAMD64MULB(v *Value, config *Config) bool { 10866 b := v.Block 10867 _ = b 10868 // match: (MULB x (MOVBconst [c])) 10869 // cond: 10870 // result: (MULBconst [c] x) 10871 for { 10872 x := v.Args[0] 10873 v_1 := v.Args[1] 10874 if v_1.Op != OpAMD64MOVBconst { 10875 break 10876 } 10877 c := v_1.AuxInt 10878 v.reset(OpAMD64MULBconst) 10879 v.AuxInt = c 10880 v.AddArg(x) 10881 return true 10882 } 10883 // match: (MULB (MOVBconst [c]) x) 10884 // cond: 10885 // result: (MULBconst [c] x) 10886 for { 10887 v_0 := v.Args[0] 10888 if v_0.Op != OpAMD64MOVBconst { 10889 break 10890 } 10891 c := v_0.AuxInt 10892 x := v.Args[1] 10893 v.reset(OpAMD64MULBconst) 10894 v.AuxInt = c 10895 v.AddArg(x) 10896 return true 10897 } 10898 return false 10899 } 10900 func rewriteValueAMD64_OpAMD64MULBconst(v *Value, config *Config) bool { 10901 b := v.Block 10902 _ = b 10903 // match: (MULBconst [c] (MOVBconst [d])) 10904 // cond: 10905 // result: (MOVBconst [int64(int8(c*d))]) 10906 for { 10907 c := v.AuxInt 10908 v_0 := v.Args[0] 10909 if v_0.Op != OpAMD64MOVBconst { 10910 break 10911 } 10912 d := v_0.AuxInt 10913 v.reset(OpAMD64MOVBconst) 10914 v.AuxInt = int64(int8(c * d)) 10915 return true 10916 } 10917 return false 10918 } 10919 func rewriteValueAMD64_OpAMD64MULL(v *Value, config *Config) bool { 10920 b := v.Block 10921 _ = b 10922 // match: (MULL x (MOVLconst [c])) 10923 // cond: 10924 // result: (MULLconst [c] x) 10925 for { 10926 x := v.Args[0] 10927 v_1 := v.Args[1] 10928 if v_1.Op != OpAMD64MOVLconst { 10929 break 10930 } 10931 c := v_1.AuxInt 10932 v.reset(OpAMD64MULLconst) 10933 v.AuxInt = c 10934 v.AddArg(x) 10935 return true 10936 } 10937 // match: (MULL (MOVLconst [c]) x) 10938 // cond: 10939 // result: (MULLconst [c] x) 10940 for { 10941 v_0 := v.Args[0] 10942 if v_0.Op != OpAMD64MOVLconst { 10943 break 10944 } 10945 c := v_0.AuxInt 10946 x := v.Args[1] 10947 v.reset(OpAMD64MULLconst) 10948 v.AuxInt = c 10949 v.AddArg(x) 10950 return true 10951 } 10952 return false 10953 } 10954 func rewriteValueAMD64_OpAMD64MULLconst(v *Value, config *Config) bool { 10955 b := v.Block 10956 _ = b 10957 // match: (MULLconst [c] (MOVLconst [d])) 10958 // cond: 10959 // result: (MOVLconst [int64(int32(c*d))]) 10960 for { 10961 c := v.AuxInt 10962 v_0 := v.Args[0] 10963 if v_0.Op != OpAMD64MOVLconst { 10964 break 10965 } 10966 d := v_0.AuxInt 10967 v.reset(OpAMD64MOVLconst) 10968 v.AuxInt = int64(int32(c * d)) 10969 return true 10970 } 10971 return false 10972 } 10973 func rewriteValueAMD64_OpAMD64MULQ(v *Value, config *Config) bool { 10974 b := v.Block 10975 _ = b 10976 // match: (MULQ x (MOVQconst [c])) 10977 // cond: is32Bit(c) 10978 // result: (MULQconst [c] x) 10979 for { 10980 x := v.Args[0] 10981 v_1 := v.Args[1] 10982 if v_1.Op != OpAMD64MOVQconst { 10983 break 10984 } 10985 c := v_1.AuxInt 10986 if !(is32Bit(c)) { 10987 break 10988 } 10989 v.reset(OpAMD64MULQconst) 10990 v.AuxInt = c 10991 v.AddArg(x) 10992 return true 10993 } 10994 // match: (MULQ (MOVQconst [c]) x) 10995 // cond: is32Bit(c) 10996 // result: (MULQconst [c] x) 10997 for { 10998 v_0 := v.Args[0] 10999 if v_0.Op != OpAMD64MOVQconst { 11000 break 11001 } 11002 c := v_0.AuxInt 11003 x := v.Args[1] 11004 if !(is32Bit(c)) { 11005 break 11006 } 11007 v.reset(OpAMD64MULQconst) 11008 v.AuxInt = c 11009 v.AddArg(x) 11010 return true 11011 } 11012 return false 11013 } 11014 func rewriteValueAMD64_OpAMD64MULQconst(v *Value, config *Config) bool { 11015 b := v.Block 11016 _ = b 11017 // match: (MULQconst [-1] x) 11018 // cond: 11019 // result: (NEGQ x) 11020 for { 11021 if v.AuxInt != -1 { 11022 break 11023 } 11024 x := v.Args[0] 11025 v.reset(OpAMD64NEGQ) 11026 v.AddArg(x) 11027 return true 11028 } 11029 // match: (MULQconst [0] _) 11030 // cond: 11031 // result: (MOVQconst [0]) 11032 for { 11033 if v.AuxInt != 0 { 11034 break 11035 } 11036 v.reset(OpAMD64MOVQconst) 11037 v.AuxInt = 0 11038 return true 11039 } 11040 // match: (MULQconst [1] x) 11041 // cond: 11042 // result: x 11043 for { 11044 if v.AuxInt != 1 { 11045 break 11046 } 11047 x := v.Args[0] 11048 v.reset(OpCopy) 11049 v.Type = x.Type 11050 v.AddArg(x) 11051 return true 11052 } 11053 // match: (MULQconst [3] x) 11054 // cond: 11055 // result: (LEAQ2 x x) 11056 for { 11057 if v.AuxInt != 3 { 11058 break 11059 } 11060 x := v.Args[0] 11061 v.reset(OpAMD64LEAQ2) 11062 v.AddArg(x) 11063 v.AddArg(x) 11064 return true 11065 } 11066 // match: (MULQconst [5] x) 11067 // cond: 11068 // result: (LEAQ4 x x) 11069 for { 11070 if v.AuxInt != 5 { 11071 break 11072 } 11073 x := v.Args[0] 11074 v.reset(OpAMD64LEAQ4) 11075 v.AddArg(x) 11076 v.AddArg(x) 11077 return true 11078 } 11079 // match: (MULQconst [7] x) 11080 // cond: 11081 // result: (LEAQ8 (NEGQ <v.Type> x) x) 11082 for { 11083 if v.AuxInt != 7 { 11084 break 11085 } 11086 x := v.Args[0] 11087 v.reset(OpAMD64LEAQ8) 11088 v0 := b.NewValue0(v.Line, OpAMD64NEGQ, v.Type) 11089 v0.AddArg(x) 11090 v.AddArg(v0) 11091 v.AddArg(x) 11092 return true 11093 } 11094 // match: (MULQconst [9] x) 11095 // cond: 11096 // result: (LEAQ8 x x) 11097 for { 11098 if v.AuxInt != 9 { 11099 break 11100 } 11101 x := v.Args[0] 11102 v.reset(OpAMD64LEAQ8) 11103 v.AddArg(x) 11104 v.AddArg(x) 11105 return true 11106 } 11107 // match: (MULQconst [11] x) 11108 // cond: 11109 // result: (LEAQ2 x (LEAQ4 <v.Type> x x)) 11110 for { 11111 if v.AuxInt != 11 { 11112 break 11113 } 11114 x := v.Args[0] 11115 v.reset(OpAMD64LEAQ2) 11116 v.AddArg(x) 11117 v0 := b.NewValue0(v.Line, OpAMD64LEAQ4, v.Type) 11118 v0.AddArg(x) 11119 v0.AddArg(x) 11120 v.AddArg(v0) 11121 return true 11122 } 11123 // match: (MULQconst [13] x) 11124 // cond: 11125 // result: (LEAQ4 x (LEAQ2 <v.Type> x x)) 11126 for { 11127 if v.AuxInt != 13 { 11128 break 11129 } 11130 x := v.Args[0] 11131 v.reset(OpAMD64LEAQ4) 11132 v.AddArg(x) 11133 v0 := b.NewValue0(v.Line, OpAMD64LEAQ2, v.Type) 11134 v0.AddArg(x) 11135 v0.AddArg(x) 11136 v.AddArg(v0) 11137 return true 11138 } 11139 // match: (MULQconst [21] x) 11140 // cond: 11141 // result: (LEAQ4 x (LEAQ4 <v.Type> x x)) 11142 for { 11143 if v.AuxInt != 21 { 11144 break 11145 } 11146 x := v.Args[0] 11147 v.reset(OpAMD64LEAQ4) 11148 v.AddArg(x) 11149 v0 := b.NewValue0(v.Line, OpAMD64LEAQ4, v.Type) 11150 v0.AddArg(x) 11151 v0.AddArg(x) 11152 v.AddArg(v0) 11153 return true 11154 } 11155 // match: (MULQconst [25] x) 11156 // cond: 11157 // result: (LEAQ8 x (LEAQ2 <v.Type> x x)) 11158 for { 11159 if v.AuxInt != 25 { 11160 break 11161 } 11162 x := v.Args[0] 11163 v.reset(OpAMD64LEAQ8) 11164 v.AddArg(x) 11165 v0 := b.NewValue0(v.Line, OpAMD64LEAQ2, v.Type) 11166 v0.AddArg(x) 11167 v0.AddArg(x) 11168 v.AddArg(v0) 11169 return true 11170 } 11171 // match: (MULQconst [37] x) 11172 // cond: 11173 // result: (LEAQ4 x (LEAQ8 <v.Type> x x)) 11174 for { 11175 if v.AuxInt != 37 { 11176 break 11177 } 11178 x := v.Args[0] 11179 v.reset(OpAMD64LEAQ4) 11180 v.AddArg(x) 11181 v0 := b.NewValue0(v.Line, OpAMD64LEAQ8, v.Type) 11182 v0.AddArg(x) 11183 v0.AddArg(x) 11184 v.AddArg(v0) 11185 return true 11186 } 11187 // match: (MULQconst [41] x) 11188 // cond: 11189 // result: (LEAQ8 x (LEAQ4 <v.Type> x x)) 11190 for { 11191 if v.AuxInt != 41 { 11192 break 11193 } 11194 x := v.Args[0] 11195 v.reset(OpAMD64LEAQ8) 11196 v.AddArg(x) 11197 v0 := b.NewValue0(v.Line, OpAMD64LEAQ4, v.Type) 11198 v0.AddArg(x) 11199 v0.AddArg(x) 11200 v.AddArg(v0) 11201 return true 11202 } 11203 // match: (MULQconst [73] x) 11204 // cond: 11205 // result: (LEAQ8 x (LEAQ8 <v.Type> x x)) 11206 for { 11207 if v.AuxInt != 73 { 11208 break 11209 } 11210 x := v.Args[0] 11211 v.reset(OpAMD64LEAQ8) 11212 v.AddArg(x) 11213 v0 := b.NewValue0(v.Line, OpAMD64LEAQ8, v.Type) 11214 v0.AddArg(x) 11215 v0.AddArg(x) 11216 v.AddArg(v0) 11217 return true 11218 } 11219 // match: (MULQconst [c] x) 11220 // cond: isPowerOfTwo(c) 11221 // result: (SHLQconst [log2(c)] x) 11222 for { 11223 c := v.AuxInt 11224 x := v.Args[0] 11225 if !(isPowerOfTwo(c)) { 11226 break 11227 } 11228 v.reset(OpAMD64SHLQconst) 11229 v.AuxInt = log2(c) 11230 v.AddArg(x) 11231 return true 11232 } 11233 // match: (MULQconst [c] x) 11234 // cond: isPowerOfTwo(c+1) && c >= 15 11235 // result: (SUBQ (SHLQconst <v.Type> [log2(c+1)] x) x) 11236 for { 11237 c := v.AuxInt 11238 x := v.Args[0] 11239 if !(isPowerOfTwo(c+1) && c >= 15) { 11240 break 11241 } 11242 v.reset(OpAMD64SUBQ) 11243 v0 := b.NewValue0(v.Line, OpAMD64SHLQconst, v.Type) 11244 v0.AuxInt = log2(c + 1) 11245 v0.AddArg(x) 11246 v.AddArg(v0) 11247 v.AddArg(x) 11248 return true 11249 } 11250 // match: (MULQconst [c] x) 11251 // cond: isPowerOfTwo(c-1) && c >= 17 11252 // result: (LEAQ1 (SHLQconst <v.Type> [log2(c-1)] x) x) 11253 for { 11254 c := v.AuxInt 11255 x := v.Args[0] 11256 if !(isPowerOfTwo(c-1) && c >= 17) { 11257 break 11258 } 11259 v.reset(OpAMD64LEAQ1) 11260 v0 := b.NewValue0(v.Line, OpAMD64SHLQconst, v.Type) 11261 v0.AuxInt = log2(c - 1) 11262 v0.AddArg(x) 11263 v.AddArg(v0) 11264 v.AddArg(x) 11265 return true 11266 } 11267 // match: (MULQconst [c] x) 11268 // cond: isPowerOfTwo(c-2) && c >= 34 11269 // result: (LEAQ2 (SHLQconst <v.Type> [log2(c-2)] x) x) 11270 for { 11271 c := v.AuxInt 11272 x := v.Args[0] 11273 if !(isPowerOfTwo(c-2) && c >= 34) { 11274 break 11275 } 11276 v.reset(OpAMD64LEAQ2) 11277 v0 := b.NewValue0(v.Line, OpAMD64SHLQconst, v.Type) 11278 v0.AuxInt = log2(c - 2) 11279 v0.AddArg(x) 11280 v.AddArg(v0) 11281 v.AddArg(x) 11282 return true 11283 } 11284 // match: (MULQconst [c] x) 11285 // cond: isPowerOfTwo(c-4) && c >= 68 11286 // result: (LEAQ4 (SHLQconst <v.Type> [log2(c-4)] x) x) 11287 for { 11288 c := v.AuxInt 11289 x := v.Args[0] 11290 if !(isPowerOfTwo(c-4) && c >= 68) { 11291 break 11292 } 11293 v.reset(OpAMD64LEAQ4) 11294 v0 := b.NewValue0(v.Line, OpAMD64SHLQconst, v.Type) 11295 v0.AuxInt = log2(c - 4) 11296 v0.AddArg(x) 11297 v.AddArg(v0) 11298 v.AddArg(x) 11299 return true 11300 } 11301 // match: (MULQconst [c] x) 11302 // cond: isPowerOfTwo(c-8) && c >= 136 11303 // result: (LEAQ8 (SHLQconst <v.Type> [log2(c-8)] x) x) 11304 for { 11305 c := v.AuxInt 11306 x := v.Args[0] 11307 if !(isPowerOfTwo(c-8) && c >= 136) { 11308 break 11309 } 11310 v.reset(OpAMD64LEAQ8) 11311 v0 := b.NewValue0(v.Line, OpAMD64SHLQconst, v.Type) 11312 v0.AuxInt = log2(c - 8) 11313 v0.AddArg(x) 11314 v.AddArg(v0) 11315 v.AddArg(x) 11316 return true 11317 } 11318 // match: (MULQconst [c] x) 11319 // cond: c%3 == 0 && isPowerOfTwo(c/3) 11320 // result: (SHLQconst [log2(c/3)] (LEAQ2 <v.Type> x x)) 11321 for { 11322 c := v.AuxInt 11323 x := v.Args[0] 11324 if !(c%3 == 0 && isPowerOfTwo(c/3)) { 11325 break 11326 } 11327 v.reset(OpAMD64SHLQconst) 11328 v.AuxInt = log2(c / 3) 11329 v0 := b.NewValue0(v.Line, OpAMD64LEAQ2, v.Type) 11330 v0.AddArg(x) 11331 v0.AddArg(x) 11332 v.AddArg(v0) 11333 return true 11334 } 11335 // match: (MULQconst [c] x) 11336 // cond: c%5 == 0 && isPowerOfTwo(c/5) 11337 // result: (SHLQconst [log2(c/5)] (LEAQ4 <v.Type> x x)) 11338 for { 11339 c := v.AuxInt 11340 x := v.Args[0] 11341 if !(c%5 == 0 && isPowerOfTwo(c/5)) { 11342 break 11343 } 11344 v.reset(OpAMD64SHLQconst) 11345 v.AuxInt = log2(c / 5) 11346 v0 := b.NewValue0(v.Line, OpAMD64LEAQ4, v.Type) 11347 v0.AddArg(x) 11348 v0.AddArg(x) 11349 v.AddArg(v0) 11350 return true 11351 } 11352 // match: (MULQconst [c] x) 11353 // cond: c%9 == 0 && isPowerOfTwo(c/9) 11354 // result: (SHLQconst [log2(c/9)] (LEAQ8 <v.Type> x x)) 11355 for { 11356 c := v.AuxInt 11357 x := v.Args[0] 11358 if !(c%9 == 0 && isPowerOfTwo(c/9)) { 11359 break 11360 } 11361 v.reset(OpAMD64SHLQconst) 11362 v.AuxInt = log2(c / 9) 11363 v0 := b.NewValue0(v.Line, OpAMD64LEAQ8, v.Type) 11364 v0.AddArg(x) 11365 v0.AddArg(x) 11366 v.AddArg(v0) 11367 return true 11368 } 11369 // match: (MULQconst [c] (MOVQconst [d])) 11370 // cond: 11371 // result: (MOVQconst [c*d]) 11372 for { 11373 c := v.AuxInt 11374 v_0 := v.Args[0] 11375 if v_0.Op != OpAMD64MOVQconst { 11376 break 11377 } 11378 d := v_0.AuxInt 11379 v.reset(OpAMD64MOVQconst) 11380 v.AuxInt = c * d 11381 return true 11382 } 11383 return false 11384 } 11385 func rewriteValueAMD64_OpAMD64MULW(v *Value, config *Config) bool { 11386 b := v.Block 11387 _ = b 11388 // match: (MULW x (MOVWconst [c])) 11389 // cond: 11390 // result: (MULWconst [c] x) 11391 for { 11392 x := v.Args[0] 11393 v_1 := v.Args[1] 11394 if v_1.Op != OpAMD64MOVWconst { 11395 break 11396 } 11397 c := v_1.AuxInt 11398 v.reset(OpAMD64MULWconst) 11399 v.AuxInt = c 11400 v.AddArg(x) 11401 return true 11402 } 11403 // match: (MULW (MOVWconst [c]) x) 11404 // cond: 11405 // result: (MULWconst [c] x) 11406 for { 11407 v_0 := v.Args[0] 11408 if v_0.Op != OpAMD64MOVWconst { 11409 break 11410 } 11411 c := v_0.AuxInt 11412 x := v.Args[1] 11413 v.reset(OpAMD64MULWconst) 11414 v.AuxInt = c 11415 v.AddArg(x) 11416 return true 11417 } 11418 return false 11419 } 11420 func rewriteValueAMD64_OpAMD64MULWconst(v *Value, config *Config) bool { 11421 b := v.Block 11422 _ = b 11423 // match: (MULWconst [c] (MOVWconst [d])) 11424 // cond: 11425 // result: (MOVWconst [int64(int16(c*d))]) 11426 for { 11427 c := v.AuxInt 11428 v_0 := v.Args[0] 11429 if v_0.Op != OpAMD64MOVWconst { 11430 break 11431 } 11432 d := v_0.AuxInt 11433 v.reset(OpAMD64MOVWconst) 11434 v.AuxInt = int64(int16(c * d)) 11435 return true 11436 } 11437 return false 11438 } 11439 func rewriteValueAMD64_OpMod16(v *Value, config *Config) bool { 11440 b := v.Block 11441 _ = b 11442 // match: (Mod16 x y) 11443 // cond: 11444 // result: (MODW x y) 11445 for { 11446 x := v.Args[0] 11447 y := v.Args[1] 11448 v.reset(OpAMD64MODW) 11449 v.AddArg(x) 11450 v.AddArg(y) 11451 return true 11452 } 11453 return false 11454 } 11455 func rewriteValueAMD64_OpMod16u(v *Value, config *Config) bool { 11456 b := v.Block 11457 _ = b 11458 // match: (Mod16u x y) 11459 // cond: 11460 // result: (MODWU x y) 11461 for { 11462 x := v.Args[0] 11463 y := v.Args[1] 11464 v.reset(OpAMD64MODWU) 11465 v.AddArg(x) 11466 v.AddArg(y) 11467 return true 11468 } 11469 return false 11470 } 11471 func rewriteValueAMD64_OpMod32(v *Value, config *Config) bool { 11472 b := v.Block 11473 _ = b 11474 // match: (Mod32 x y) 11475 // cond: 11476 // result: (MODL x y) 11477 for { 11478 x := v.Args[0] 11479 y := v.Args[1] 11480 v.reset(OpAMD64MODL) 11481 v.AddArg(x) 11482 v.AddArg(y) 11483 return true 11484 } 11485 return false 11486 } 11487 func rewriteValueAMD64_OpMod32u(v *Value, config *Config) bool { 11488 b := v.Block 11489 _ = b 11490 // match: (Mod32u x y) 11491 // cond: 11492 // result: (MODLU x y) 11493 for { 11494 x := v.Args[0] 11495 y := v.Args[1] 11496 v.reset(OpAMD64MODLU) 11497 v.AddArg(x) 11498 v.AddArg(y) 11499 return true 11500 } 11501 return false 11502 } 11503 func rewriteValueAMD64_OpMod64(v *Value, config *Config) bool { 11504 b := v.Block 11505 _ = b 11506 // match: (Mod64 x y) 11507 // cond: 11508 // result: (MODQ x y) 11509 for { 11510 x := v.Args[0] 11511 y := v.Args[1] 11512 v.reset(OpAMD64MODQ) 11513 v.AddArg(x) 11514 v.AddArg(y) 11515 return true 11516 } 11517 return false 11518 } 11519 func rewriteValueAMD64_OpMod64u(v *Value, config *Config) bool { 11520 b := v.Block 11521 _ = b 11522 // match: (Mod64u x y) 11523 // cond: 11524 // result: (MODQU x y) 11525 for { 11526 x := v.Args[0] 11527 y := v.Args[1] 11528 v.reset(OpAMD64MODQU) 11529 v.AddArg(x) 11530 v.AddArg(y) 11531 return true 11532 } 11533 return false 11534 } 11535 func rewriteValueAMD64_OpMod8(v *Value, config *Config) bool { 11536 b := v.Block 11537 _ = b 11538 // match: (Mod8 x y) 11539 // cond: 11540 // result: (MODW (SignExt8to16 x) (SignExt8to16 y)) 11541 for { 11542 x := v.Args[0] 11543 y := v.Args[1] 11544 v.reset(OpAMD64MODW) 11545 v0 := b.NewValue0(v.Line, OpSignExt8to16, config.fe.TypeInt16()) 11546 v0.AddArg(x) 11547 v.AddArg(v0) 11548 v1 := b.NewValue0(v.Line, OpSignExt8to16, config.fe.TypeInt16()) 11549 v1.AddArg(y) 11550 v.AddArg(v1) 11551 return true 11552 } 11553 return false 11554 } 11555 func rewriteValueAMD64_OpMod8u(v *Value, config *Config) bool { 11556 b := v.Block 11557 _ = b 11558 // match: (Mod8u x y) 11559 // cond: 11560 // result: (MODWU (ZeroExt8to16 x) (ZeroExt8to16 y)) 11561 for { 11562 x := v.Args[0] 11563 y := v.Args[1] 11564 v.reset(OpAMD64MODWU) 11565 v0 := b.NewValue0(v.Line, OpZeroExt8to16, config.fe.TypeUInt16()) 11566 v0.AddArg(x) 11567 v.AddArg(v0) 11568 v1 := b.NewValue0(v.Line, OpZeroExt8to16, config.fe.TypeUInt16()) 11569 v1.AddArg(y) 11570 v.AddArg(v1) 11571 return true 11572 } 11573 return false 11574 } 11575 func rewriteValueAMD64_OpMove(v *Value, config *Config) bool { 11576 b := v.Block 11577 _ = b 11578 // match: (Move [0] _ _ mem) 11579 // cond: 11580 // result: mem 11581 for { 11582 if v.AuxInt != 0 { 11583 break 11584 } 11585 mem := v.Args[2] 11586 v.reset(OpCopy) 11587 v.Type = mem.Type 11588 v.AddArg(mem) 11589 return true 11590 } 11591 // match: (Move [1] dst src mem) 11592 // cond: 11593 // result: (MOVBstore dst (MOVBload src mem) mem) 11594 for { 11595 if v.AuxInt != 1 { 11596 break 11597 } 11598 dst := v.Args[0] 11599 src := v.Args[1] 11600 mem := v.Args[2] 11601 v.reset(OpAMD64MOVBstore) 11602 v.AddArg(dst) 11603 v0 := b.NewValue0(v.Line, OpAMD64MOVBload, config.fe.TypeUInt8()) 11604 v0.AddArg(src) 11605 v0.AddArg(mem) 11606 v.AddArg(v0) 11607 v.AddArg(mem) 11608 return true 11609 } 11610 // match: (Move [2] dst src mem) 11611 // cond: 11612 // result: (MOVWstore dst (MOVWload src mem) mem) 11613 for { 11614 if v.AuxInt != 2 { 11615 break 11616 } 11617 dst := v.Args[0] 11618 src := v.Args[1] 11619 mem := v.Args[2] 11620 v.reset(OpAMD64MOVWstore) 11621 v.AddArg(dst) 11622 v0 := b.NewValue0(v.Line, OpAMD64MOVWload, config.fe.TypeUInt16()) 11623 v0.AddArg(src) 11624 v0.AddArg(mem) 11625 v.AddArg(v0) 11626 v.AddArg(mem) 11627 return true 11628 } 11629 // match: (Move [4] dst src mem) 11630 // cond: 11631 // result: (MOVLstore dst (MOVLload src mem) mem) 11632 for { 11633 if v.AuxInt != 4 { 11634 break 11635 } 11636 dst := v.Args[0] 11637 src := v.Args[1] 11638 mem := v.Args[2] 11639 v.reset(OpAMD64MOVLstore) 11640 v.AddArg(dst) 11641 v0 := b.NewValue0(v.Line, OpAMD64MOVLload, config.fe.TypeUInt32()) 11642 v0.AddArg(src) 11643 v0.AddArg(mem) 11644 v.AddArg(v0) 11645 v.AddArg(mem) 11646 return true 11647 } 11648 // match: (Move [8] dst src mem) 11649 // cond: 11650 // result: (MOVQstore dst (MOVQload src mem) mem) 11651 for { 11652 if v.AuxInt != 8 { 11653 break 11654 } 11655 dst := v.Args[0] 11656 src := v.Args[1] 11657 mem := v.Args[2] 11658 v.reset(OpAMD64MOVQstore) 11659 v.AddArg(dst) 11660 v0 := b.NewValue0(v.Line, OpAMD64MOVQload, config.fe.TypeUInt64()) 11661 v0.AddArg(src) 11662 v0.AddArg(mem) 11663 v.AddArg(v0) 11664 v.AddArg(mem) 11665 return true 11666 } 11667 // match: (Move [16] dst src mem) 11668 // cond: 11669 // result: (MOVOstore dst (MOVOload src mem) mem) 11670 for { 11671 if v.AuxInt != 16 { 11672 break 11673 } 11674 dst := v.Args[0] 11675 src := v.Args[1] 11676 mem := v.Args[2] 11677 v.reset(OpAMD64MOVOstore) 11678 v.AddArg(dst) 11679 v0 := b.NewValue0(v.Line, OpAMD64MOVOload, TypeInt128) 11680 v0.AddArg(src) 11681 v0.AddArg(mem) 11682 v.AddArg(v0) 11683 v.AddArg(mem) 11684 return true 11685 } 11686 // match: (Move [3] dst src mem) 11687 // cond: 11688 // result: (MOVBstore [2] dst (MOVBload [2] src mem) (MOVWstore dst (MOVWload src mem) mem)) 11689 for { 11690 if v.AuxInt != 3 { 11691 break 11692 } 11693 dst := v.Args[0] 11694 src := v.Args[1] 11695 mem := v.Args[2] 11696 v.reset(OpAMD64MOVBstore) 11697 v.AuxInt = 2 11698 v.AddArg(dst) 11699 v0 := b.NewValue0(v.Line, OpAMD64MOVBload, config.fe.TypeUInt8()) 11700 v0.AuxInt = 2 11701 v0.AddArg(src) 11702 v0.AddArg(mem) 11703 v.AddArg(v0) 11704 v1 := b.NewValue0(v.Line, OpAMD64MOVWstore, TypeMem) 11705 v1.AddArg(dst) 11706 v2 := b.NewValue0(v.Line, OpAMD64MOVWload, config.fe.TypeUInt16()) 11707 v2.AddArg(src) 11708 v2.AddArg(mem) 11709 v1.AddArg(v2) 11710 v1.AddArg(mem) 11711 v.AddArg(v1) 11712 return true 11713 } 11714 // match: (Move [5] dst src mem) 11715 // cond: 11716 // result: (MOVBstore [4] dst (MOVBload [4] src mem) (MOVLstore dst (MOVLload src mem) mem)) 11717 for { 11718 if v.AuxInt != 5 { 11719 break 11720 } 11721 dst := v.Args[0] 11722 src := v.Args[1] 11723 mem := v.Args[2] 11724 v.reset(OpAMD64MOVBstore) 11725 v.AuxInt = 4 11726 v.AddArg(dst) 11727 v0 := b.NewValue0(v.Line, OpAMD64MOVBload, config.fe.TypeUInt8()) 11728 v0.AuxInt = 4 11729 v0.AddArg(src) 11730 v0.AddArg(mem) 11731 v.AddArg(v0) 11732 v1 := b.NewValue0(v.Line, OpAMD64MOVLstore, TypeMem) 11733 v1.AddArg(dst) 11734 v2 := b.NewValue0(v.Line, OpAMD64MOVLload, config.fe.TypeUInt32()) 11735 v2.AddArg(src) 11736 v2.AddArg(mem) 11737 v1.AddArg(v2) 11738 v1.AddArg(mem) 11739 v.AddArg(v1) 11740 return true 11741 } 11742 // match: (Move [6] dst src mem) 11743 // cond: 11744 // result: (MOVWstore [4] dst (MOVWload [4] src mem) (MOVLstore dst (MOVLload src mem) mem)) 11745 for { 11746 if v.AuxInt != 6 { 11747 break 11748 } 11749 dst := v.Args[0] 11750 src := v.Args[1] 11751 mem := v.Args[2] 11752 v.reset(OpAMD64MOVWstore) 11753 v.AuxInt = 4 11754 v.AddArg(dst) 11755 v0 := b.NewValue0(v.Line, OpAMD64MOVWload, config.fe.TypeUInt16()) 11756 v0.AuxInt = 4 11757 v0.AddArg(src) 11758 v0.AddArg(mem) 11759 v.AddArg(v0) 11760 v1 := b.NewValue0(v.Line, OpAMD64MOVLstore, TypeMem) 11761 v1.AddArg(dst) 11762 v2 := b.NewValue0(v.Line, OpAMD64MOVLload, config.fe.TypeUInt32()) 11763 v2.AddArg(src) 11764 v2.AddArg(mem) 11765 v1.AddArg(v2) 11766 v1.AddArg(mem) 11767 v.AddArg(v1) 11768 return true 11769 } 11770 // match: (Move [7] dst src mem) 11771 // cond: 11772 // result: (MOVLstore [3] dst (MOVLload [3] src mem) (MOVLstore dst (MOVLload src mem) mem)) 11773 for { 11774 if v.AuxInt != 7 { 11775 break 11776 } 11777 dst := v.Args[0] 11778 src := v.Args[1] 11779 mem := v.Args[2] 11780 v.reset(OpAMD64MOVLstore) 11781 v.AuxInt = 3 11782 v.AddArg(dst) 11783 v0 := b.NewValue0(v.Line, OpAMD64MOVLload, config.fe.TypeUInt32()) 11784 v0.AuxInt = 3 11785 v0.AddArg(src) 11786 v0.AddArg(mem) 11787 v.AddArg(v0) 11788 v1 := b.NewValue0(v.Line, OpAMD64MOVLstore, TypeMem) 11789 v1.AddArg(dst) 11790 v2 := b.NewValue0(v.Line, OpAMD64MOVLload, config.fe.TypeUInt32()) 11791 v2.AddArg(src) 11792 v2.AddArg(mem) 11793 v1.AddArg(v2) 11794 v1.AddArg(mem) 11795 v.AddArg(v1) 11796 return true 11797 } 11798 // match: (Move [size] dst src mem) 11799 // cond: size > 8 && size < 16 11800 // result: (MOVQstore [size-8] dst (MOVQload [size-8] src mem) (MOVQstore dst (MOVQload src mem) mem)) 11801 for { 11802 size := v.AuxInt 11803 dst := v.Args[0] 11804 src := v.Args[1] 11805 mem := v.Args[2] 11806 if !(size > 8 && size < 16) { 11807 break 11808 } 11809 v.reset(OpAMD64MOVQstore) 11810 v.AuxInt = size - 8 11811 v.AddArg(dst) 11812 v0 := b.NewValue0(v.Line, OpAMD64MOVQload, config.fe.TypeUInt64()) 11813 v0.AuxInt = size - 8 11814 v0.AddArg(src) 11815 v0.AddArg(mem) 11816 v.AddArg(v0) 11817 v1 := b.NewValue0(v.Line, OpAMD64MOVQstore, TypeMem) 11818 v1.AddArg(dst) 11819 v2 := b.NewValue0(v.Line, OpAMD64MOVQload, config.fe.TypeUInt64()) 11820 v2.AddArg(src) 11821 v2.AddArg(mem) 11822 v1.AddArg(v2) 11823 v1.AddArg(mem) 11824 v.AddArg(v1) 11825 return true 11826 } 11827 // match: (Move [size] dst src mem) 11828 // cond: size > 16 && size%16 != 0 && size%16 <= 8 11829 // result: (Move [size-size%16] (ADDQconst <dst.Type> dst [size%16]) (ADDQconst <src.Type> src [size%16]) (MOVQstore dst (MOVQload src mem) mem)) 11830 for { 11831 size := v.AuxInt 11832 dst := v.Args[0] 11833 src := v.Args[1] 11834 mem := v.Args[2] 11835 if !(size > 16 && size%16 != 0 && size%16 <= 8) { 11836 break 11837 } 11838 v.reset(OpMove) 11839 v.AuxInt = size - size%16 11840 v0 := b.NewValue0(v.Line, OpAMD64ADDQconst, dst.Type) 11841 v0.AddArg(dst) 11842 v0.AuxInt = size % 16 11843 v.AddArg(v0) 11844 v1 := b.NewValue0(v.Line, OpAMD64ADDQconst, src.Type) 11845 v1.AddArg(src) 11846 v1.AuxInt = size % 16 11847 v.AddArg(v1) 11848 v2 := b.NewValue0(v.Line, OpAMD64MOVQstore, TypeMem) 11849 v2.AddArg(dst) 11850 v3 := b.NewValue0(v.Line, OpAMD64MOVQload, config.fe.TypeUInt64()) 11851 v3.AddArg(src) 11852 v3.AddArg(mem) 11853 v2.AddArg(v3) 11854 v2.AddArg(mem) 11855 v.AddArg(v2) 11856 return true 11857 } 11858 // match: (Move [size] dst src mem) 11859 // cond: size > 16 && size%16 != 0 && size%16 > 8 11860 // result: (Move [size-size%16] (ADDQconst <dst.Type> dst [size%16]) (ADDQconst <src.Type> src [size%16]) (MOVOstore dst (MOVOload src mem) mem)) 11861 for { 11862 size := v.AuxInt 11863 dst := v.Args[0] 11864 src := v.Args[1] 11865 mem := v.Args[2] 11866 if !(size > 16 && size%16 != 0 && size%16 > 8) { 11867 break 11868 } 11869 v.reset(OpMove) 11870 v.AuxInt = size - size%16 11871 v0 := b.NewValue0(v.Line, OpAMD64ADDQconst, dst.Type) 11872 v0.AddArg(dst) 11873 v0.AuxInt = size % 16 11874 v.AddArg(v0) 11875 v1 := b.NewValue0(v.Line, OpAMD64ADDQconst, src.Type) 11876 v1.AddArg(src) 11877 v1.AuxInt = size % 16 11878 v.AddArg(v1) 11879 v2 := b.NewValue0(v.Line, OpAMD64MOVOstore, TypeMem) 11880 v2.AddArg(dst) 11881 v3 := b.NewValue0(v.Line, OpAMD64MOVOload, TypeInt128) 11882 v3.AddArg(src) 11883 v3.AddArg(mem) 11884 v2.AddArg(v3) 11885 v2.AddArg(mem) 11886 v.AddArg(v2) 11887 return true 11888 } 11889 // match: (Move [size] dst src mem) 11890 // cond: size >= 32 && size <= 16*64 && size%16 == 0 && !config.noDuffDevice 11891 // result: (DUFFCOPY [14*(64-size/16)] dst src mem) 11892 for { 11893 size := v.AuxInt 11894 dst := v.Args[0] 11895 src := v.Args[1] 11896 mem := v.Args[2] 11897 if !(size >= 32 && size <= 16*64 && size%16 == 0 && !config.noDuffDevice) { 11898 break 11899 } 11900 v.reset(OpAMD64DUFFCOPY) 11901 v.AuxInt = 14 * (64 - size/16) 11902 v.AddArg(dst) 11903 v.AddArg(src) 11904 v.AddArg(mem) 11905 return true 11906 } 11907 // match: (Move [size] dst src mem) 11908 // cond: (size > 16*64 || config.noDuffDevice) && size%8 == 0 11909 // result: (REPMOVSQ dst src (MOVQconst [size/8]) mem) 11910 for { 11911 size := v.AuxInt 11912 dst := v.Args[0] 11913 src := v.Args[1] 11914 mem := v.Args[2] 11915 if !((size > 16*64 || config.noDuffDevice) && size%8 == 0) { 11916 break 11917 } 11918 v.reset(OpAMD64REPMOVSQ) 11919 v.AddArg(dst) 11920 v.AddArg(src) 11921 v0 := b.NewValue0(v.Line, OpAMD64MOVQconst, config.fe.TypeUInt64()) 11922 v0.AuxInt = size / 8 11923 v.AddArg(v0) 11924 v.AddArg(mem) 11925 return true 11926 } 11927 return false 11928 } 11929 func rewriteValueAMD64_OpMul16(v *Value, config *Config) bool { 11930 b := v.Block 11931 _ = b 11932 // match: (Mul16 x y) 11933 // cond: 11934 // result: (MULW x y) 11935 for { 11936 x := v.Args[0] 11937 y := v.Args[1] 11938 v.reset(OpAMD64MULW) 11939 v.AddArg(x) 11940 v.AddArg(y) 11941 return true 11942 } 11943 return false 11944 } 11945 func rewriteValueAMD64_OpMul32(v *Value, config *Config) bool { 11946 b := v.Block 11947 _ = b 11948 // match: (Mul32 x y) 11949 // cond: 11950 // result: (MULL x y) 11951 for { 11952 x := v.Args[0] 11953 y := v.Args[1] 11954 v.reset(OpAMD64MULL) 11955 v.AddArg(x) 11956 v.AddArg(y) 11957 return true 11958 } 11959 return false 11960 } 11961 func rewriteValueAMD64_OpMul32F(v *Value, config *Config) bool { 11962 b := v.Block 11963 _ = b 11964 // match: (Mul32F x y) 11965 // cond: 11966 // result: (MULSS x y) 11967 for { 11968 x := v.Args[0] 11969 y := v.Args[1] 11970 v.reset(OpAMD64MULSS) 11971 v.AddArg(x) 11972 v.AddArg(y) 11973 return true 11974 } 11975 return false 11976 } 11977 func rewriteValueAMD64_OpMul64(v *Value, config *Config) bool { 11978 b := v.Block 11979 _ = b 11980 // match: (Mul64 x y) 11981 // cond: 11982 // result: (MULQ x y) 11983 for { 11984 x := v.Args[0] 11985 y := v.Args[1] 11986 v.reset(OpAMD64MULQ) 11987 v.AddArg(x) 11988 v.AddArg(y) 11989 return true 11990 } 11991 return false 11992 } 11993 func rewriteValueAMD64_OpMul64F(v *Value, config *Config) bool { 11994 b := v.Block 11995 _ = b 11996 // match: (Mul64F x y) 11997 // cond: 11998 // result: (MULSD x y) 11999 for { 12000 x := v.Args[0] 12001 y := v.Args[1] 12002 v.reset(OpAMD64MULSD) 12003 v.AddArg(x) 12004 v.AddArg(y) 12005 return true 12006 } 12007 return false 12008 } 12009 func rewriteValueAMD64_OpMul8(v *Value, config *Config) bool { 12010 b := v.Block 12011 _ = b 12012 // match: (Mul8 x y) 12013 // cond: 12014 // result: (MULB x y) 12015 for { 12016 x := v.Args[0] 12017 y := v.Args[1] 12018 v.reset(OpAMD64MULB) 12019 v.AddArg(x) 12020 v.AddArg(y) 12021 return true 12022 } 12023 return false 12024 } 12025 func rewriteValueAMD64_OpAMD64NEGB(v *Value, config *Config) bool { 12026 b := v.Block 12027 _ = b 12028 // match: (NEGB (MOVBconst [c])) 12029 // cond: 12030 // result: (MOVBconst [int64(int8(-c))]) 12031 for { 12032 v_0 := v.Args[0] 12033 if v_0.Op != OpAMD64MOVBconst { 12034 break 12035 } 12036 c := v_0.AuxInt 12037 v.reset(OpAMD64MOVBconst) 12038 v.AuxInt = int64(int8(-c)) 12039 return true 12040 } 12041 return false 12042 } 12043 func rewriteValueAMD64_OpAMD64NEGL(v *Value, config *Config) bool { 12044 b := v.Block 12045 _ = b 12046 // match: (NEGL (MOVLconst [c])) 12047 // cond: 12048 // result: (MOVLconst [int64(int32(-c))]) 12049 for { 12050 v_0 := v.Args[0] 12051 if v_0.Op != OpAMD64MOVLconst { 12052 break 12053 } 12054 c := v_0.AuxInt 12055 v.reset(OpAMD64MOVLconst) 12056 v.AuxInt = int64(int32(-c)) 12057 return true 12058 } 12059 return false 12060 } 12061 func rewriteValueAMD64_OpAMD64NEGQ(v *Value, config *Config) bool { 12062 b := v.Block 12063 _ = b 12064 // match: (NEGQ (MOVQconst [c])) 12065 // cond: 12066 // result: (MOVQconst [-c]) 12067 for { 12068 v_0 := v.Args[0] 12069 if v_0.Op != OpAMD64MOVQconst { 12070 break 12071 } 12072 c := v_0.AuxInt 12073 v.reset(OpAMD64MOVQconst) 12074 v.AuxInt = -c 12075 return true 12076 } 12077 return false 12078 } 12079 func rewriteValueAMD64_OpAMD64NEGW(v *Value, config *Config) bool { 12080 b := v.Block 12081 _ = b 12082 // match: (NEGW (MOVWconst [c])) 12083 // cond: 12084 // result: (MOVWconst [int64(int16(-c))]) 12085 for { 12086 v_0 := v.Args[0] 12087 if v_0.Op != OpAMD64MOVWconst { 12088 break 12089 } 12090 c := v_0.AuxInt 12091 v.reset(OpAMD64MOVWconst) 12092 v.AuxInt = int64(int16(-c)) 12093 return true 12094 } 12095 return false 12096 } 12097 func rewriteValueAMD64_OpAMD64NOTB(v *Value, config *Config) bool { 12098 b := v.Block 12099 _ = b 12100 // match: (NOTB (MOVBconst [c])) 12101 // cond: 12102 // result: (MOVBconst [^c]) 12103 for { 12104 v_0 := v.Args[0] 12105 if v_0.Op != OpAMD64MOVBconst { 12106 break 12107 } 12108 c := v_0.AuxInt 12109 v.reset(OpAMD64MOVBconst) 12110 v.AuxInt = ^c 12111 return true 12112 } 12113 return false 12114 } 12115 func rewriteValueAMD64_OpAMD64NOTL(v *Value, config *Config) bool { 12116 b := v.Block 12117 _ = b 12118 // match: (NOTL (MOVLconst [c])) 12119 // cond: 12120 // result: (MOVLconst [^c]) 12121 for { 12122 v_0 := v.Args[0] 12123 if v_0.Op != OpAMD64MOVLconst { 12124 break 12125 } 12126 c := v_0.AuxInt 12127 v.reset(OpAMD64MOVLconst) 12128 v.AuxInt = ^c 12129 return true 12130 } 12131 return false 12132 } 12133 func rewriteValueAMD64_OpAMD64NOTQ(v *Value, config *Config) bool { 12134 b := v.Block 12135 _ = b 12136 // match: (NOTQ (MOVQconst [c])) 12137 // cond: 12138 // result: (MOVQconst [^c]) 12139 for { 12140 v_0 := v.Args[0] 12141 if v_0.Op != OpAMD64MOVQconst { 12142 break 12143 } 12144 c := v_0.AuxInt 12145 v.reset(OpAMD64MOVQconst) 12146 v.AuxInt = ^c 12147 return true 12148 } 12149 return false 12150 } 12151 func rewriteValueAMD64_OpAMD64NOTW(v *Value, config *Config) bool { 12152 b := v.Block 12153 _ = b 12154 // match: (NOTW (MOVWconst [c])) 12155 // cond: 12156 // result: (MOVWconst [^c]) 12157 for { 12158 v_0 := v.Args[0] 12159 if v_0.Op != OpAMD64MOVWconst { 12160 break 12161 } 12162 c := v_0.AuxInt 12163 v.reset(OpAMD64MOVWconst) 12164 v.AuxInt = ^c 12165 return true 12166 } 12167 return false 12168 } 12169 func rewriteValueAMD64_OpNeg16(v *Value, config *Config) bool { 12170 b := v.Block 12171 _ = b 12172 // match: (Neg16 x) 12173 // cond: 12174 // result: (NEGW x) 12175 for { 12176 x := v.Args[0] 12177 v.reset(OpAMD64NEGW) 12178 v.AddArg(x) 12179 return true 12180 } 12181 return false 12182 } 12183 func rewriteValueAMD64_OpNeg32(v *Value, config *Config) bool { 12184 b := v.Block 12185 _ = b 12186 // match: (Neg32 x) 12187 // cond: 12188 // result: (NEGL x) 12189 for { 12190 x := v.Args[0] 12191 v.reset(OpAMD64NEGL) 12192 v.AddArg(x) 12193 return true 12194 } 12195 return false 12196 } 12197 func rewriteValueAMD64_OpNeg32F(v *Value, config *Config) bool { 12198 b := v.Block 12199 _ = b 12200 // match: (Neg32F x) 12201 // cond: 12202 // result: (PXOR x (MOVSSconst <config.Frontend().TypeFloat32()> [f2i(math.Copysign(0, -1))])) 12203 for { 12204 x := v.Args[0] 12205 v.reset(OpAMD64PXOR) 12206 v.AddArg(x) 12207 v0 := b.NewValue0(v.Line, OpAMD64MOVSSconst, config.Frontend().TypeFloat32()) 12208 v0.AuxInt = f2i(math.Copysign(0, -1)) 12209 v.AddArg(v0) 12210 return true 12211 } 12212 return false 12213 } 12214 func rewriteValueAMD64_OpNeg64(v *Value, config *Config) bool { 12215 b := v.Block 12216 _ = b 12217 // match: (Neg64 x) 12218 // cond: 12219 // result: (NEGQ x) 12220 for { 12221 x := v.Args[0] 12222 v.reset(OpAMD64NEGQ) 12223 v.AddArg(x) 12224 return true 12225 } 12226 return false 12227 } 12228 func rewriteValueAMD64_OpNeg64F(v *Value, config *Config) bool { 12229 b := v.Block 12230 _ = b 12231 // match: (Neg64F x) 12232 // cond: 12233 // result: (PXOR x (MOVSDconst <config.Frontend().TypeFloat64()> [f2i(math.Copysign(0, -1))])) 12234 for { 12235 x := v.Args[0] 12236 v.reset(OpAMD64PXOR) 12237 v.AddArg(x) 12238 v0 := b.NewValue0(v.Line, OpAMD64MOVSDconst, config.Frontend().TypeFloat64()) 12239 v0.AuxInt = f2i(math.Copysign(0, -1)) 12240 v.AddArg(v0) 12241 return true 12242 } 12243 return false 12244 } 12245 func rewriteValueAMD64_OpNeg8(v *Value, config *Config) bool { 12246 b := v.Block 12247 _ = b 12248 // match: (Neg8 x) 12249 // cond: 12250 // result: (NEGB x) 12251 for { 12252 x := v.Args[0] 12253 v.reset(OpAMD64NEGB) 12254 v.AddArg(x) 12255 return true 12256 } 12257 return false 12258 } 12259 func rewriteValueAMD64_OpNeq16(v *Value, config *Config) bool { 12260 b := v.Block 12261 _ = b 12262 // match: (Neq16 x y) 12263 // cond: 12264 // result: (SETNE (CMPW x y)) 12265 for { 12266 x := v.Args[0] 12267 y := v.Args[1] 12268 v.reset(OpAMD64SETNE) 12269 v0 := b.NewValue0(v.Line, OpAMD64CMPW, TypeFlags) 12270 v0.AddArg(x) 12271 v0.AddArg(y) 12272 v.AddArg(v0) 12273 return true 12274 } 12275 return false 12276 } 12277 func rewriteValueAMD64_OpNeq32(v *Value, config *Config) bool { 12278 b := v.Block 12279 _ = b 12280 // match: (Neq32 x y) 12281 // cond: 12282 // result: (SETNE (CMPL x y)) 12283 for { 12284 x := v.Args[0] 12285 y := v.Args[1] 12286 v.reset(OpAMD64SETNE) 12287 v0 := b.NewValue0(v.Line, OpAMD64CMPL, TypeFlags) 12288 v0.AddArg(x) 12289 v0.AddArg(y) 12290 v.AddArg(v0) 12291 return true 12292 } 12293 return false 12294 } 12295 func rewriteValueAMD64_OpNeq32F(v *Value, config *Config) bool { 12296 b := v.Block 12297 _ = b 12298 // match: (Neq32F x y) 12299 // cond: 12300 // result: (SETNEF (UCOMISS x y)) 12301 for { 12302 x := v.Args[0] 12303 y := v.Args[1] 12304 v.reset(OpAMD64SETNEF) 12305 v0 := b.NewValue0(v.Line, OpAMD64UCOMISS, TypeFlags) 12306 v0.AddArg(x) 12307 v0.AddArg(y) 12308 v.AddArg(v0) 12309 return true 12310 } 12311 return false 12312 } 12313 func rewriteValueAMD64_OpNeq64(v *Value, config *Config) bool { 12314 b := v.Block 12315 _ = b 12316 // match: (Neq64 x y) 12317 // cond: 12318 // result: (SETNE (CMPQ x y)) 12319 for { 12320 x := v.Args[0] 12321 y := v.Args[1] 12322 v.reset(OpAMD64SETNE) 12323 v0 := b.NewValue0(v.Line, OpAMD64CMPQ, TypeFlags) 12324 v0.AddArg(x) 12325 v0.AddArg(y) 12326 v.AddArg(v0) 12327 return true 12328 } 12329 return false 12330 } 12331 func rewriteValueAMD64_OpNeq64F(v *Value, config *Config) bool { 12332 b := v.Block 12333 _ = b 12334 // match: (Neq64F x y) 12335 // cond: 12336 // result: (SETNEF (UCOMISD x y)) 12337 for { 12338 x := v.Args[0] 12339 y := v.Args[1] 12340 v.reset(OpAMD64SETNEF) 12341 v0 := b.NewValue0(v.Line, OpAMD64UCOMISD, TypeFlags) 12342 v0.AddArg(x) 12343 v0.AddArg(y) 12344 v.AddArg(v0) 12345 return true 12346 } 12347 return false 12348 } 12349 func rewriteValueAMD64_OpNeq8(v *Value, config *Config) bool { 12350 b := v.Block 12351 _ = b 12352 // match: (Neq8 x y) 12353 // cond: 12354 // result: (SETNE (CMPB x y)) 12355 for { 12356 x := v.Args[0] 12357 y := v.Args[1] 12358 v.reset(OpAMD64SETNE) 12359 v0 := b.NewValue0(v.Line, OpAMD64CMPB, TypeFlags) 12360 v0.AddArg(x) 12361 v0.AddArg(y) 12362 v.AddArg(v0) 12363 return true 12364 } 12365 return false 12366 } 12367 func rewriteValueAMD64_OpNeqPtr(v *Value, config *Config) bool { 12368 b := v.Block 12369 _ = b 12370 // match: (NeqPtr x y) 12371 // cond: 12372 // result: (SETNE (CMPQ x y)) 12373 for { 12374 x := v.Args[0] 12375 y := v.Args[1] 12376 v.reset(OpAMD64SETNE) 12377 v0 := b.NewValue0(v.Line, OpAMD64CMPQ, TypeFlags) 12378 v0.AddArg(x) 12379 v0.AddArg(y) 12380 v.AddArg(v0) 12381 return true 12382 } 12383 return false 12384 } 12385 func rewriteValueAMD64_OpNilCheck(v *Value, config *Config) bool { 12386 b := v.Block 12387 _ = b 12388 // match: (NilCheck ptr mem) 12389 // cond: 12390 // result: (LoweredNilCheck ptr mem) 12391 for { 12392 ptr := v.Args[0] 12393 mem := v.Args[1] 12394 v.reset(OpAMD64LoweredNilCheck) 12395 v.AddArg(ptr) 12396 v.AddArg(mem) 12397 return true 12398 } 12399 return false 12400 } 12401 func rewriteValueAMD64_OpNot(v *Value, config *Config) bool { 12402 b := v.Block 12403 _ = b 12404 // match: (Not x) 12405 // cond: 12406 // result: (XORBconst [1] x) 12407 for { 12408 x := v.Args[0] 12409 v.reset(OpAMD64XORBconst) 12410 v.AuxInt = 1 12411 v.AddArg(x) 12412 return true 12413 } 12414 return false 12415 } 12416 func rewriteValueAMD64_OpAMD64ORB(v *Value, config *Config) bool { 12417 b := v.Block 12418 _ = b 12419 // match: (ORB x (MOVBconst [c])) 12420 // cond: 12421 // result: (ORBconst [c] x) 12422 for { 12423 x := v.Args[0] 12424 v_1 := v.Args[1] 12425 if v_1.Op != OpAMD64MOVBconst { 12426 break 12427 } 12428 c := v_1.AuxInt 12429 v.reset(OpAMD64ORBconst) 12430 v.AuxInt = c 12431 v.AddArg(x) 12432 return true 12433 } 12434 // match: (ORB (MOVBconst [c]) x) 12435 // cond: 12436 // result: (ORBconst [c] x) 12437 for { 12438 v_0 := v.Args[0] 12439 if v_0.Op != OpAMD64MOVBconst { 12440 break 12441 } 12442 c := v_0.AuxInt 12443 x := v.Args[1] 12444 v.reset(OpAMD64ORBconst) 12445 v.AuxInt = c 12446 v.AddArg(x) 12447 return true 12448 } 12449 // match: (ORB x x) 12450 // cond: 12451 // result: x 12452 for { 12453 x := v.Args[0] 12454 if x != v.Args[1] { 12455 break 12456 } 12457 v.reset(OpCopy) 12458 v.Type = x.Type 12459 v.AddArg(x) 12460 return true 12461 } 12462 return false 12463 } 12464 func rewriteValueAMD64_OpAMD64ORBconst(v *Value, config *Config) bool { 12465 b := v.Block 12466 _ = b 12467 // match: (ORBconst [c] x) 12468 // cond: int8(c)==0 12469 // result: x 12470 for { 12471 c := v.AuxInt 12472 x := v.Args[0] 12473 if !(int8(c) == 0) { 12474 break 12475 } 12476 v.reset(OpCopy) 12477 v.Type = x.Type 12478 v.AddArg(x) 12479 return true 12480 } 12481 // match: (ORBconst [c] _) 12482 // cond: int8(c)==-1 12483 // result: (MOVBconst [-1]) 12484 for { 12485 c := v.AuxInt 12486 if !(int8(c) == -1) { 12487 break 12488 } 12489 v.reset(OpAMD64MOVBconst) 12490 v.AuxInt = -1 12491 return true 12492 } 12493 // match: (ORBconst [c] (MOVBconst [d])) 12494 // cond: 12495 // result: (MOVBconst [c|d]) 12496 for { 12497 c := v.AuxInt 12498 v_0 := v.Args[0] 12499 if v_0.Op != OpAMD64MOVBconst { 12500 break 12501 } 12502 d := v_0.AuxInt 12503 v.reset(OpAMD64MOVBconst) 12504 v.AuxInt = c | d 12505 return true 12506 } 12507 return false 12508 } 12509 func rewriteValueAMD64_OpAMD64ORL(v *Value, config *Config) bool { 12510 b := v.Block 12511 _ = b 12512 // match: (ORL x (MOVLconst [c])) 12513 // cond: 12514 // result: (ORLconst [c] x) 12515 for { 12516 x := v.Args[0] 12517 v_1 := v.Args[1] 12518 if v_1.Op != OpAMD64MOVLconst { 12519 break 12520 } 12521 c := v_1.AuxInt 12522 v.reset(OpAMD64ORLconst) 12523 v.AuxInt = c 12524 v.AddArg(x) 12525 return true 12526 } 12527 // match: (ORL (MOVLconst [c]) x) 12528 // cond: 12529 // result: (ORLconst [c] x) 12530 for { 12531 v_0 := v.Args[0] 12532 if v_0.Op != OpAMD64MOVLconst { 12533 break 12534 } 12535 c := v_0.AuxInt 12536 x := v.Args[1] 12537 v.reset(OpAMD64ORLconst) 12538 v.AuxInt = c 12539 v.AddArg(x) 12540 return true 12541 } 12542 // match: (ORL x x) 12543 // cond: 12544 // result: x 12545 for { 12546 x := v.Args[0] 12547 if x != v.Args[1] { 12548 break 12549 } 12550 v.reset(OpCopy) 12551 v.Type = x.Type 12552 v.AddArg(x) 12553 return true 12554 } 12555 // match: (ORL (ORL (ORL x0:(MOVBload [i] {s} p mem) (SHLLconst [8] x1:(MOVBload [i+1] {s} p mem))) (SHLLconst [16] x2:(MOVBload [i+2] {s} p mem))) (SHLLconst [24] x3:(MOVBload [i+3] {s} p mem))) 12556 // cond: mergePoint(b,x0,x1,x2,x3) != nil 12557 // result: @mergePoint(b,x0,x1,x2,x3) (MOVLload [i] {s} p mem) 12558 for { 12559 v_0 := v.Args[0] 12560 if v_0.Op != OpAMD64ORL { 12561 break 12562 } 12563 v_0_0 := v_0.Args[0] 12564 if v_0_0.Op != OpAMD64ORL { 12565 break 12566 } 12567 x0 := v_0_0.Args[0] 12568 if x0.Op != OpAMD64MOVBload { 12569 break 12570 } 12571 i := x0.AuxInt 12572 s := x0.Aux 12573 p := x0.Args[0] 12574 mem := x0.Args[1] 12575 v_0_0_1 := v_0_0.Args[1] 12576 if v_0_0_1.Op != OpAMD64SHLLconst { 12577 break 12578 } 12579 if v_0_0_1.AuxInt != 8 { 12580 break 12581 } 12582 x1 := v_0_0_1.Args[0] 12583 if x1.Op != OpAMD64MOVBload { 12584 break 12585 } 12586 if x1.AuxInt != i+1 { 12587 break 12588 } 12589 if x1.Aux != s { 12590 break 12591 } 12592 if p != x1.Args[0] { 12593 break 12594 } 12595 if mem != x1.Args[1] { 12596 break 12597 } 12598 v_0_1 := v_0.Args[1] 12599 if v_0_1.Op != OpAMD64SHLLconst { 12600 break 12601 } 12602 if v_0_1.AuxInt != 16 { 12603 break 12604 } 12605 x2 := v_0_1.Args[0] 12606 if x2.Op != OpAMD64MOVBload { 12607 break 12608 } 12609 if x2.AuxInt != i+2 { 12610 break 12611 } 12612 if x2.Aux != s { 12613 break 12614 } 12615 if p != x2.Args[0] { 12616 break 12617 } 12618 if mem != x2.Args[1] { 12619 break 12620 } 12621 v_1 := v.Args[1] 12622 if v_1.Op != OpAMD64SHLLconst { 12623 break 12624 } 12625 if v_1.AuxInt != 24 { 12626 break 12627 } 12628 x3 := v_1.Args[0] 12629 if x3.Op != OpAMD64MOVBload { 12630 break 12631 } 12632 if x3.AuxInt != i+3 { 12633 break 12634 } 12635 if x3.Aux != s { 12636 break 12637 } 12638 if p != x3.Args[0] { 12639 break 12640 } 12641 if mem != x3.Args[1] { 12642 break 12643 } 12644 if !(mergePoint(b, x0, x1, x2, x3) != nil) { 12645 break 12646 } 12647 b = mergePoint(b, x0, x1, x2, x3) 12648 v0 := b.NewValue0(v.Line, OpAMD64MOVLload, config.fe.TypeUInt32()) 12649 v.reset(OpCopy) 12650 v.AddArg(v0) 12651 v0.AuxInt = i 12652 v0.Aux = s 12653 v0.AddArg(p) 12654 v0.AddArg(mem) 12655 return true 12656 } 12657 // match: (ORL (ORL (ORL x0:(MOVBloadidx1 [i] {s} p idx mem) (SHLLconst [8] x1:(MOVBloadidx1 [i+1] {s} p idx mem))) (SHLLconst [16] x2:(MOVBloadidx1 [i+2] {s} p idx mem))) (SHLLconst [24] x3:(MOVBloadidx1 [i+3] {s} p idx mem))) 12658 // cond: mergePoint(b,x0,x1,x2,x3) != nil 12659 // result: @mergePoint(b,x0,x1,x2,x3) (MOVLloadidx1 <v.Type> [i] {s} p idx mem) 12660 for { 12661 v_0 := v.Args[0] 12662 if v_0.Op != OpAMD64ORL { 12663 break 12664 } 12665 v_0_0 := v_0.Args[0] 12666 if v_0_0.Op != OpAMD64ORL { 12667 break 12668 } 12669 x0 := v_0_0.Args[0] 12670 if x0.Op != OpAMD64MOVBloadidx1 { 12671 break 12672 } 12673 i := x0.AuxInt 12674 s := x0.Aux 12675 p := x0.Args[0] 12676 idx := x0.Args[1] 12677 mem := x0.Args[2] 12678 v_0_0_1 := v_0_0.Args[1] 12679 if v_0_0_1.Op != OpAMD64SHLLconst { 12680 break 12681 } 12682 if v_0_0_1.AuxInt != 8 { 12683 break 12684 } 12685 x1 := v_0_0_1.Args[0] 12686 if x1.Op != OpAMD64MOVBloadidx1 { 12687 break 12688 } 12689 if x1.AuxInt != i+1 { 12690 break 12691 } 12692 if x1.Aux != s { 12693 break 12694 } 12695 if p != x1.Args[0] { 12696 break 12697 } 12698 if idx != x1.Args[1] { 12699 break 12700 } 12701 if mem != x1.Args[2] { 12702 break 12703 } 12704 v_0_1 := v_0.Args[1] 12705 if v_0_1.Op != OpAMD64SHLLconst { 12706 break 12707 } 12708 if v_0_1.AuxInt != 16 { 12709 break 12710 } 12711 x2 := v_0_1.Args[0] 12712 if x2.Op != OpAMD64MOVBloadidx1 { 12713 break 12714 } 12715 if x2.AuxInt != i+2 { 12716 break 12717 } 12718 if x2.Aux != s { 12719 break 12720 } 12721 if p != x2.Args[0] { 12722 break 12723 } 12724 if idx != x2.Args[1] { 12725 break 12726 } 12727 if mem != x2.Args[2] { 12728 break 12729 } 12730 v_1 := v.Args[1] 12731 if v_1.Op != OpAMD64SHLLconst { 12732 break 12733 } 12734 if v_1.AuxInt != 24 { 12735 break 12736 } 12737 x3 := v_1.Args[0] 12738 if x3.Op != OpAMD64MOVBloadidx1 { 12739 break 12740 } 12741 if x3.AuxInt != i+3 { 12742 break 12743 } 12744 if x3.Aux != s { 12745 break 12746 } 12747 if p != x3.Args[0] { 12748 break 12749 } 12750 if idx != x3.Args[1] { 12751 break 12752 } 12753 if mem != x3.Args[2] { 12754 break 12755 } 12756 if !(mergePoint(b, x0, x1, x2, x3) != nil) { 12757 break 12758 } 12759 b = mergePoint(b, x0, x1, x2, x3) 12760 v0 := b.NewValue0(v.Line, OpAMD64MOVLloadidx1, v.Type) 12761 v.reset(OpCopy) 12762 v.AddArg(v0) 12763 v0.AuxInt = i 12764 v0.Aux = s 12765 v0.AddArg(p) 12766 v0.AddArg(idx) 12767 v0.AddArg(mem) 12768 return true 12769 } 12770 return false 12771 } 12772 func rewriteValueAMD64_OpAMD64ORLconst(v *Value, config *Config) bool { 12773 b := v.Block 12774 _ = b 12775 // match: (ORLconst [c] x) 12776 // cond: int32(c)==0 12777 // result: x 12778 for { 12779 c := v.AuxInt 12780 x := v.Args[0] 12781 if !(int32(c) == 0) { 12782 break 12783 } 12784 v.reset(OpCopy) 12785 v.Type = x.Type 12786 v.AddArg(x) 12787 return true 12788 } 12789 // match: (ORLconst [c] _) 12790 // cond: int32(c)==-1 12791 // result: (MOVLconst [-1]) 12792 for { 12793 c := v.AuxInt 12794 if !(int32(c) == -1) { 12795 break 12796 } 12797 v.reset(OpAMD64MOVLconst) 12798 v.AuxInt = -1 12799 return true 12800 } 12801 // match: (ORLconst [c] (MOVLconst [d])) 12802 // cond: 12803 // result: (MOVLconst [c|d]) 12804 for { 12805 c := v.AuxInt 12806 v_0 := v.Args[0] 12807 if v_0.Op != OpAMD64MOVLconst { 12808 break 12809 } 12810 d := v_0.AuxInt 12811 v.reset(OpAMD64MOVLconst) 12812 v.AuxInt = c | d 12813 return true 12814 } 12815 return false 12816 } 12817 func rewriteValueAMD64_OpAMD64ORQ(v *Value, config *Config) bool { 12818 b := v.Block 12819 _ = b 12820 // match: (ORQ x (MOVQconst [c])) 12821 // cond: is32Bit(c) 12822 // result: (ORQconst [c] x) 12823 for { 12824 x := v.Args[0] 12825 v_1 := v.Args[1] 12826 if v_1.Op != OpAMD64MOVQconst { 12827 break 12828 } 12829 c := v_1.AuxInt 12830 if !(is32Bit(c)) { 12831 break 12832 } 12833 v.reset(OpAMD64ORQconst) 12834 v.AuxInt = c 12835 v.AddArg(x) 12836 return true 12837 } 12838 // match: (ORQ (MOVQconst [c]) x) 12839 // cond: is32Bit(c) 12840 // result: (ORQconst [c] x) 12841 for { 12842 v_0 := v.Args[0] 12843 if v_0.Op != OpAMD64MOVQconst { 12844 break 12845 } 12846 c := v_0.AuxInt 12847 x := v.Args[1] 12848 if !(is32Bit(c)) { 12849 break 12850 } 12851 v.reset(OpAMD64ORQconst) 12852 v.AuxInt = c 12853 v.AddArg(x) 12854 return true 12855 } 12856 // match: (ORQ x x) 12857 // cond: 12858 // result: x 12859 for { 12860 x := v.Args[0] 12861 if x != v.Args[1] { 12862 break 12863 } 12864 v.reset(OpCopy) 12865 v.Type = x.Type 12866 v.AddArg(x) 12867 return true 12868 } 12869 // match: (ORQ (ORQ (ORQ (ORQ (ORQ (ORQ (ORQ x0:(MOVBload [i] {s} p mem) (SHLQconst [8] x1:(MOVBload [i+1] {s} p mem))) (SHLQconst [16] x2:(MOVBload [i+2] {s} p mem))) (SHLQconst [24] x3:(MOVBload [i+3] {s} p mem))) (SHLQconst [32] x4:(MOVBload [i+4] {s} p mem))) (SHLQconst [40] x5:(MOVBload [i+5] {s} p mem))) (SHLQconst [48] x6:(MOVBload [i+6] {s} p mem))) (SHLQconst [56] x7:(MOVBload [i+7] {s} p mem))) 12870 // cond: mergePoint(b,x0,x1,x2,x3,x4,x5,x6,x7) != nil 12871 // result: @mergePoint(b,x0,x1,x2,x3,x4,x5,x6,x7) (MOVQload [i] {s} p mem) 12872 for { 12873 v_0 := v.Args[0] 12874 if v_0.Op != OpAMD64ORQ { 12875 break 12876 } 12877 v_0_0 := v_0.Args[0] 12878 if v_0_0.Op != OpAMD64ORQ { 12879 break 12880 } 12881 v_0_0_0 := v_0_0.Args[0] 12882 if v_0_0_0.Op != OpAMD64ORQ { 12883 break 12884 } 12885 v_0_0_0_0 := v_0_0_0.Args[0] 12886 if v_0_0_0_0.Op != OpAMD64ORQ { 12887 break 12888 } 12889 v_0_0_0_0_0 := v_0_0_0_0.Args[0] 12890 if v_0_0_0_0_0.Op != OpAMD64ORQ { 12891 break 12892 } 12893 v_0_0_0_0_0_0 := v_0_0_0_0_0.Args[0] 12894 if v_0_0_0_0_0_0.Op != OpAMD64ORQ { 12895 break 12896 } 12897 x0 := v_0_0_0_0_0_0.Args[0] 12898 if x0.Op != OpAMD64MOVBload { 12899 break 12900 } 12901 i := x0.AuxInt 12902 s := x0.Aux 12903 p := x0.Args[0] 12904 mem := x0.Args[1] 12905 v_0_0_0_0_0_0_1 := v_0_0_0_0_0_0.Args[1] 12906 if v_0_0_0_0_0_0_1.Op != OpAMD64SHLQconst { 12907 break 12908 } 12909 if v_0_0_0_0_0_0_1.AuxInt != 8 { 12910 break 12911 } 12912 x1 := v_0_0_0_0_0_0_1.Args[0] 12913 if x1.Op != OpAMD64MOVBload { 12914 break 12915 } 12916 if x1.AuxInt != i+1 { 12917 break 12918 } 12919 if x1.Aux != s { 12920 break 12921 } 12922 if p != x1.Args[0] { 12923 break 12924 } 12925 if mem != x1.Args[1] { 12926 break 12927 } 12928 v_0_0_0_0_0_1 := v_0_0_0_0_0.Args[1] 12929 if v_0_0_0_0_0_1.Op != OpAMD64SHLQconst { 12930 break 12931 } 12932 if v_0_0_0_0_0_1.AuxInt != 16 { 12933 break 12934 } 12935 x2 := v_0_0_0_0_0_1.Args[0] 12936 if x2.Op != OpAMD64MOVBload { 12937 break 12938 } 12939 if x2.AuxInt != i+2 { 12940 break 12941 } 12942 if x2.Aux != s { 12943 break 12944 } 12945 if p != x2.Args[0] { 12946 break 12947 } 12948 if mem != x2.Args[1] { 12949 break 12950 } 12951 v_0_0_0_0_1 := v_0_0_0_0.Args[1] 12952 if v_0_0_0_0_1.Op != OpAMD64SHLQconst { 12953 break 12954 } 12955 if v_0_0_0_0_1.AuxInt != 24 { 12956 break 12957 } 12958 x3 := v_0_0_0_0_1.Args[0] 12959 if x3.Op != OpAMD64MOVBload { 12960 break 12961 } 12962 if x3.AuxInt != i+3 { 12963 break 12964 } 12965 if x3.Aux != s { 12966 break 12967 } 12968 if p != x3.Args[0] { 12969 break 12970 } 12971 if mem != x3.Args[1] { 12972 break 12973 } 12974 v_0_0_0_1 := v_0_0_0.Args[1] 12975 if v_0_0_0_1.Op != OpAMD64SHLQconst { 12976 break 12977 } 12978 if v_0_0_0_1.AuxInt != 32 { 12979 break 12980 } 12981 x4 := v_0_0_0_1.Args[0] 12982 if x4.Op != OpAMD64MOVBload { 12983 break 12984 } 12985 if x4.AuxInt != i+4 { 12986 break 12987 } 12988 if x4.Aux != s { 12989 break 12990 } 12991 if p != x4.Args[0] { 12992 break 12993 } 12994 if mem != x4.Args[1] { 12995 break 12996 } 12997 v_0_0_1 := v_0_0.Args[1] 12998 if v_0_0_1.Op != OpAMD64SHLQconst { 12999 break 13000 } 13001 if v_0_0_1.AuxInt != 40 { 13002 break 13003 } 13004 x5 := v_0_0_1.Args[0] 13005 if x5.Op != OpAMD64MOVBload { 13006 break 13007 } 13008 if x5.AuxInt != i+5 { 13009 break 13010 } 13011 if x5.Aux != s { 13012 break 13013 } 13014 if p != x5.Args[0] { 13015 break 13016 } 13017 if mem != x5.Args[1] { 13018 break 13019 } 13020 v_0_1 := v_0.Args[1] 13021 if v_0_1.Op != OpAMD64SHLQconst { 13022 break 13023 } 13024 if v_0_1.AuxInt != 48 { 13025 break 13026 } 13027 x6 := v_0_1.Args[0] 13028 if x6.Op != OpAMD64MOVBload { 13029 break 13030 } 13031 if x6.AuxInt != i+6 { 13032 break 13033 } 13034 if x6.Aux != s { 13035 break 13036 } 13037 if p != x6.Args[0] { 13038 break 13039 } 13040 if mem != x6.Args[1] { 13041 break 13042 } 13043 v_1 := v.Args[1] 13044 if v_1.Op != OpAMD64SHLQconst { 13045 break 13046 } 13047 if v_1.AuxInt != 56 { 13048 break 13049 } 13050 x7 := v_1.Args[0] 13051 if x7.Op != OpAMD64MOVBload { 13052 break 13053 } 13054 if x7.AuxInt != i+7 { 13055 break 13056 } 13057 if x7.Aux != s { 13058 break 13059 } 13060 if p != x7.Args[0] { 13061 break 13062 } 13063 if mem != x7.Args[1] { 13064 break 13065 } 13066 if !(mergePoint(b, x0, x1, x2, x3, x4, x5, x6, x7) != nil) { 13067 break 13068 } 13069 b = mergePoint(b, x0, x1, x2, x3, x4, x5, x6, x7) 13070 v0 := b.NewValue0(v.Line, OpAMD64MOVQload, config.fe.TypeUInt64()) 13071 v.reset(OpCopy) 13072 v.AddArg(v0) 13073 v0.AuxInt = i 13074 v0.Aux = s 13075 v0.AddArg(p) 13076 v0.AddArg(mem) 13077 return true 13078 } 13079 // match: (ORQ (ORQ (ORQ (ORQ (ORQ (ORQ (ORQ x0:(MOVBloadidx1 [i] {s} p idx mem) (SHLQconst [8] x1:(MOVBloadidx1 [i+1] {s} p idx mem))) (SHLQconst [16] x2:(MOVBloadidx1 [i+2] {s} p idx mem))) (SHLQconst [24] x3:(MOVBloadidx1 [i+3] {s} p idx mem))) (SHLQconst [32] x4:(MOVBloadidx1 [i+4] {s} p idx mem))) (SHLQconst [40] x5:(MOVBloadidx1 [i+5] {s} p idx mem))) (SHLQconst [48] x6:(MOVBloadidx1 [i+6] {s} p idx mem))) (SHLQconst [56] x7:(MOVBloadidx1 [i+7] {s} p idx mem))) 13080 // cond: mergePoint(b,x0,x1,x2,x3,x4,x5,x6,x7) != nil 13081 // result: @mergePoint(b,x0,x1,x2,x3,x4,x5,x6,x7) (MOVQloadidx1 <v.Type> [i] {s} p idx mem) 13082 for { 13083 v_0 := v.Args[0] 13084 if v_0.Op != OpAMD64ORQ { 13085 break 13086 } 13087 v_0_0 := v_0.Args[0] 13088 if v_0_0.Op != OpAMD64ORQ { 13089 break 13090 } 13091 v_0_0_0 := v_0_0.Args[0] 13092 if v_0_0_0.Op != OpAMD64ORQ { 13093 break 13094 } 13095 v_0_0_0_0 := v_0_0_0.Args[0] 13096 if v_0_0_0_0.Op != OpAMD64ORQ { 13097 break 13098 } 13099 v_0_0_0_0_0 := v_0_0_0_0.Args[0] 13100 if v_0_0_0_0_0.Op != OpAMD64ORQ { 13101 break 13102 } 13103 v_0_0_0_0_0_0 := v_0_0_0_0_0.Args[0] 13104 if v_0_0_0_0_0_0.Op != OpAMD64ORQ { 13105 break 13106 } 13107 x0 := v_0_0_0_0_0_0.Args[0] 13108 if x0.Op != OpAMD64MOVBloadidx1 { 13109 break 13110 } 13111 i := x0.AuxInt 13112 s := x0.Aux 13113 p := x0.Args[0] 13114 idx := x0.Args[1] 13115 mem := x0.Args[2] 13116 v_0_0_0_0_0_0_1 := v_0_0_0_0_0_0.Args[1] 13117 if v_0_0_0_0_0_0_1.Op != OpAMD64SHLQconst { 13118 break 13119 } 13120 if v_0_0_0_0_0_0_1.AuxInt != 8 { 13121 break 13122 } 13123 x1 := v_0_0_0_0_0_0_1.Args[0] 13124 if x1.Op != OpAMD64MOVBloadidx1 { 13125 break 13126 } 13127 if x1.AuxInt != i+1 { 13128 break 13129 } 13130 if x1.Aux != s { 13131 break 13132 } 13133 if p != x1.Args[0] { 13134 break 13135 } 13136 if idx != x1.Args[1] { 13137 break 13138 } 13139 if mem != x1.Args[2] { 13140 break 13141 } 13142 v_0_0_0_0_0_1 := v_0_0_0_0_0.Args[1] 13143 if v_0_0_0_0_0_1.Op != OpAMD64SHLQconst { 13144 break 13145 } 13146 if v_0_0_0_0_0_1.AuxInt != 16 { 13147 break 13148 } 13149 x2 := v_0_0_0_0_0_1.Args[0] 13150 if x2.Op != OpAMD64MOVBloadidx1 { 13151 break 13152 } 13153 if x2.AuxInt != i+2 { 13154 break 13155 } 13156 if x2.Aux != s { 13157 break 13158 } 13159 if p != x2.Args[0] { 13160 break 13161 } 13162 if idx != x2.Args[1] { 13163 break 13164 } 13165 if mem != x2.Args[2] { 13166 break 13167 } 13168 v_0_0_0_0_1 := v_0_0_0_0.Args[1] 13169 if v_0_0_0_0_1.Op != OpAMD64SHLQconst { 13170 break 13171 } 13172 if v_0_0_0_0_1.AuxInt != 24 { 13173 break 13174 } 13175 x3 := v_0_0_0_0_1.Args[0] 13176 if x3.Op != OpAMD64MOVBloadidx1 { 13177 break 13178 } 13179 if x3.AuxInt != i+3 { 13180 break 13181 } 13182 if x3.Aux != s { 13183 break 13184 } 13185 if p != x3.Args[0] { 13186 break 13187 } 13188 if idx != x3.Args[1] { 13189 break 13190 } 13191 if mem != x3.Args[2] { 13192 break 13193 } 13194 v_0_0_0_1 := v_0_0_0.Args[1] 13195 if v_0_0_0_1.Op != OpAMD64SHLQconst { 13196 break 13197 } 13198 if v_0_0_0_1.AuxInt != 32 { 13199 break 13200 } 13201 x4 := v_0_0_0_1.Args[0] 13202 if x4.Op != OpAMD64MOVBloadidx1 { 13203 break 13204 } 13205 if x4.AuxInt != i+4 { 13206 break 13207 } 13208 if x4.Aux != s { 13209 break 13210 } 13211 if p != x4.Args[0] { 13212 break 13213 } 13214 if idx != x4.Args[1] { 13215 break 13216 } 13217 if mem != x4.Args[2] { 13218 break 13219 } 13220 v_0_0_1 := v_0_0.Args[1] 13221 if v_0_0_1.Op != OpAMD64SHLQconst { 13222 break 13223 } 13224 if v_0_0_1.AuxInt != 40 { 13225 break 13226 } 13227 x5 := v_0_0_1.Args[0] 13228 if x5.Op != OpAMD64MOVBloadidx1 { 13229 break 13230 } 13231 if x5.AuxInt != i+5 { 13232 break 13233 } 13234 if x5.Aux != s { 13235 break 13236 } 13237 if p != x5.Args[0] { 13238 break 13239 } 13240 if idx != x5.Args[1] { 13241 break 13242 } 13243 if mem != x5.Args[2] { 13244 break 13245 } 13246 v_0_1 := v_0.Args[1] 13247 if v_0_1.Op != OpAMD64SHLQconst { 13248 break 13249 } 13250 if v_0_1.AuxInt != 48 { 13251 break 13252 } 13253 x6 := v_0_1.Args[0] 13254 if x6.Op != OpAMD64MOVBloadidx1 { 13255 break 13256 } 13257 if x6.AuxInt != i+6 { 13258 break 13259 } 13260 if x6.Aux != s { 13261 break 13262 } 13263 if p != x6.Args[0] { 13264 break 13265 } 13266 if idx != x6.Args[1] { 13267 break 13268 } 13269 if mem != x6.Args[2] { 13270 break 13271 } 13272 v_1 := v.Args[1] 13273 if v_1.Op != OpAMD64SHLQconst { 13274 break 13275 } 13276 if v_1.AuxInt != 56 { 13277 break 13278 } 13279 x7 := v_1.Args[0] 13280 if x7.Op != OpAMD64MOVBloadidx1 { 13281 break 13282 } 13283 if x7.AuxInt != i+7 { 13284 break 13285 } 13286 if x7.Aux != s { 13287 break 13288 } 13289 if p != x7.Args[0] { 13290 break 13291 } 13292 if idx != x7.Args[1] { 13293 break 13294 } 13295 if mem != x7.Args[2] { 13296 break 13297 } 13298 if !(mergePoint(b, x0, x1, x2, x3, x4, x5, x6, x7) != nil) { 13299 break 13300 } 13301 b = mergePoint(b, x0, x1, x2, x3, x4, x5, x6, x7) 13302 v0 := b.NewValue0(v.Line, OpAMD64MOVQloadidx1, v.Type) 13303 v.reset(OpCopy) 13304 v.AddArg(v0) 13305 v0.AuxInt = i 13306 v0.Aux = s 13307 v0.AddArg(p) 13308 v0.AddArg(idx) 13309 v0.AddArg(mem) 13310 return true 13311 } 13312 return false 13313 } 13314 func rewriteValueAMD64_OpAMD64ORQconst(v *Value, config *Config) bool { 13315 b := v.Block 13316 _ = b 13317 // match: (ORQconst [0] x) 13318 // cond: 13319 // result: x 13320 for { 13321 if v.AuxInt != 0 { 13322 break 13323 } 13324 x := v.Args[0] 13325 v.reset(OpCopy) 13326 v.Type = x.Type 13327 v.AddArg(x) 13328 return true 13329 } 13330 // match: (ORQconst [-1] _) 13331 // cond: 13332 // result: (MOVQconst [-1]) 13333 for { 13334 if v.AuxInt != -1 { 13335 break 13336 } 13337 v.reset(OpAMD64MOVQconst) 13338 v.AuxInt = -1 13339 return true 13340 } 13341 // match: (ORQconst [c] (MOVQconst [d])) 13342 // cond: 13343 // result: (MOVQconst [c|d]) 13344 for { 13345 c := v.AuxInt 13346 v_0 := v.Args[0] 13347 if v_0.Op != OpAMD64MOVQconst { 13348 break 13349 } 13350 d := v_0.AuxInt 13351 v.reset(OpAMD64MOVQconst) 13352 v.AuxInt = c | d 13353 return true 13354 } 13355 return false 13356 } 13357 func rewriteValueAMD64_OpAMD64ORW(v *Value, config *Config) bool { 13358 b := v.Block 13359 _ = b 13360 // match: (ORW x (MOVWconst [c])) 13361 // cond: 13362 // result: (ORWconst [c] x) 13363 for { 13364 x := v.Args[0] 13365 v_1 := v.Args[1] 13366 if v_1.Op != OpAMD64MOVWconst { 13367 break 13368 } 13369 c := v_1.AuxInt 13370 v.reset(OpAMD64ORWconst) 13371 v.AuxInt = c 13372 v.AddArg(x) 13373 return true 13374 } 13375 // match: (ORW (MOVWconst [c]) x) 13376 // cond: 13377 // result: (ORWconst [c] x) 13378 for { 13379 v_0 := v.Args[0] 13380 if v_0.Op != OpAMD64MOVWconst { 13381 break 13382 } 13383 c := v_0.AuxInt 13384 x := v.Args[1] 13385 v.reset(OpAMD64ORWconst) 13386 v.AuxInt = c 13387 v.AddArg(x) 13388 return true 13389 } 13390 // match: (ORW x x) 13391 // cond: 13392 // result: x 13393 for { 13394 x := v.Args[0] 13395 if x != v.Args[1] { 13396 break 13397 } 13398 v.reset(OpCopy) 13399 v.Type = x.Type 13400 v.AddArg(x) 13401 return true 13402 } 13403 // match: (ORW x0:(MOVBload [i] {s} p mem) (SHLWconst [8] x1:(MOVBload [i+1] {s} p mem))) 13404 // cond: mergePoint(b,x0,x1) != nil 13405 // result: @mergePoint(b,x0,x1) (MOVWload [i] {s} p mem) 13406 for { 13407 x0 := v.Args[0] 13408 if x0.Op != OpAMD64MOVBload { 13409 break 13410 } 13411 i := x0.AuxInt 13412 s := x0.Aux 13413 p := x0.Args[0] 13414 mem := x0.Args[1] 13415 v_1 := v.Args[1] 13416 if v_1.Op != OpAMD64SHLWconst { 13417 break 13418 } 13419 if v_1.AuxInt != 8 { 13420 break 13421 } 13422 x1 := v_1.Args[0] 13423 if x1.Op != OpAMD64MOVBload { 13424 break 13425 } 13426 if x1.AuxInt != i+1 { 13427 break 13428 } 13429 if x1.Aux != s { 13430 break 13431 } 13432 if p != x1.Args[0] { 13433 break 13434 } 13435 if mem != x1.Args[1] { 13436 break 13437 } 13438 if !(mergePoint(b, x0, x1) != nil) { 13439 break 13440 } 13441 b = mergePoint(b, x0, x1) 13442 v0 := b.NewValue0(v.Line, OpAMD64MOVWload, config.fe.TypeUInt16()) 13443 v.reset(OpCopy) 13444 v.AddArg(v0) 13445 v0.AuxInt = i 13446 v0.Aux = s 13447 v0.AddArg(p) 13448 v0.AddArg(mem) 13449 return true 13450 } 13451 // match: (ORW x0:(MOVBloadidx1 [i] {s} p idx mem) (SHLWconst [8] x1:(MOVBloadidx1 [i+1] {s} p idx mem))) 13452 // cond: mergePoint(b,x0,x1) != nil 13453 // result: @mergePoint(b,x0,x1) (MOVWloadidx1 <v.Type> [i] {s} p idx mem) 13454 for { 13455 x0 := v.Args[0] 13456 if x0.Op != OpAMD64MOVBloadidx1 { 13457 break 13458 } 13459 i := x0.AuxInt 13460 s := x0.Aux 13461 p := x0.Args[0] 13462 idx := x0.Args[1] 13463 mem := x0.Args[2] 13464 v_1 := v.Args[1] 13465 if v_1.Op != OpAMD64SHLWconst { 13466 break 13467 } 13468 if v_1.AuxInt != 8 { 13469 break 13470 } 13471 x1 := v_1.Args[0] 13472 if x1.Op != OpAMD64MOVBloadidx1 { 13473 break 13474 } 13475 if x1.AuxInt != i+1 { 13476 break 13477 } 13478 if x1.Aux != s { 13479 break 13480 } 13481 if p != x1.Args[0] { 13482 break 13483 } 13484 if idx != x1.Args[1] { 13485 break 13486 } 13487 if mem != x1.Args[2] { 13488 break 13489 } 13490 if !(mergePoint(b, x0, x1) != nil) { 13491 break 13492 } 13493 b = mergePoint(b, x0, x1) 13494 v0 := b.NewValue0(v.Line, OpAMD64MOVWloadidx1, v.Type) 13495 v.reset(OpCopy) 13496 v.AddArg(v0) 13497 v0.AuxInt = i 13498 v0.Aux = s 13499 v0.AddArg(p) 13500 v0.AddArg(idx) 13501 v0.AddArg(mem) 13502 return true 13503 } 13504 return false 13505 } 13506 func rewriteValueAMD64_OpAMD64ORWconst(v *Value, config *Config) bool { 13507 b := v.Block 13508 _ = b 13509 // match: (ORWconst [c] x) 13510 // cond: int16(c)==0 13511 // result: x 13512 for { 13513 c := v.AuxInt 13514 x := v.Args[0] 13515 if !(int16(c) == 0) { 13516 break 13517 } 13518 v.reset(OpCopy) 13519 v.Type = x.Type 13520 v.AddArg(x) 13521 return true 13522 } 13523 // match: (ORWconst [c] _) 13524 // cond: int16(c)==-1 13525 // result: (MOVWconst [-1]) 13526 for { 13527 c := v.AuxInt 13528 if !(int16(c) == -1) { 13529 break 13530 } 13531 v.reset(OpAMD64MOVWconst) 13532 v.AuxInt = -1 13533 return true 13534 } 13535 // match: (ORWconst [c] (MOVWconst [d])) 13536 // cond: 13537 // result: (MOVWconst [c|d]) 13538 for { 13539 c := v.AuxInt 13540 v_0 := v.Args[0] 13541 if v_0.Op != OpAMD64MOVWconst { 13542 break 13543 } 13544 d := v_0.AuxInt 13545 v.reset(OpAMD64MOVWconst) 13546 v.AuxInt = c | d 13547 return true 13548 } 13549 return false 13550 } 13551 func rewriteValueAMD64_OpOffPtr(v *Value, config *Config) bool { 13552 b := v.Block 13553 _ = b 13554 // match: (OffPtr [off] ptr) 13555 // cond: is32Bit(off) 13556 // result: (ADDQconst [off] ptr) 13557 for { 13558 off := v.AuxInt 13559 ptr := v.Args[0] 13560 if !(is32Bit(off)) { 13561 break 13562 } 13563 v.reset(OpAMD64ADDQconst) 13564 v.AuxInt = off 13565 v.AddArg(ptr) 13566 return true 13567 } 13568 // match: (OffPtr [off] ptr) 13569 // cond: 13570 // result: (ADDQ (MOVQconst [off]) ptr) 13571 for { 13572 off := v.AuxInt 13573 ptr := v.Args[0] 13574 v.reset(OpAMD64ADDQ) 13575 v0 := b.NewValue0(v.Line, OpAMD64MOVQconst, config.fe.TypeUInt64()) 13576 v0.AuxInt = off 13577 v.AddArg(v0) 13578 v.AddArg(ptr) 13579 return true 13580 } 13581 return false 13582 } 13583 func rewriteValueAMD64_OpOr16(v *Value, config *Config) bool { 13584 b := v.Block 13585 _ = b 13586 // match: (Or16 x y) 13587 // cond: 13588 // result: (ORW x y) 13589 for { 13590 x := v.Args[0] 13591 y := v.Args[1] 13592 v.reset(OpAMD64ORW) 13593 v.AddArg(x) 13594 v.AddArg(y) 13595 return true 13596 } 13597 return false 13598 } 13599 func rewriteValueAMD64_OpOr32(v *Value, config *Config) bool { 13600 b := v.Block 13601 _ = b 13602 // match: (Or32 x y) 13603 // cond: 13604 // result: (ORL x y) 13605 for { 13606 x := v.Args[0] 13607 y := v.Args[1] 13608 v.reset(OpAMD64ORL) 13609 v.AddArg(x) 13610 v.AddArg(y) 13611 return true 13612 } 13613 return false 13614 } 13615 func rewriteValueAMD64_OpOr64(v *Value, config *Config) bool { 13616 b := v.Block 13617 _ = b 13618 // match: (Or64 x y) 13619 // cond: 13620 // result: (ORQ x y) 13621 for { 13622 x := v.Args[0] 13623 y := v.Args[1] 13624 v.reset(OpAMD64ORQ) 13625 v.AddArg(x) 13626 v.AddArg(y) 13627 return true 13628 } 13629 return false 13630 } 13631 func rewriteValueAMD64_OpOr8(v *Value, config *Config) bool { 13632 b := v.Block 13633 _ = b 13634 // match: (Or8 x y) 13635 // cond: 13636 // result: (ORB x y) 13637 for { 13638 x := v.Args[0] 13639 y := v.Args[1] 13640 v.reset(OpAMD64ORB) 13641 v.AddArg(x) 13642 v.AddArg(y) 13643 return true 13644 } 13645 return false 13646 } 13647 func rewriteValueAMD64_OpRsh16Ux16(v *Value, config *Config) bool { 13648 b := v.Block 13649 _ = b 13650 // match: (Rsh16Ux16 <t> x y) 13651 // cond: 13652 // result: (ANDW (SHRW <t> x y) (SBBLcarrymask <t> (CMPWconst y [16]))) 13653 for { 13654 t := v.Type 13655 x := v.Args[0] 13656 y := v.Args[1] 13657 v.reset(OpAMD64ANDW) 13658 v0 := b.NewValue0(v.Line, OpAMD64SHRW, t) 13659 v0.AddArg(x) 13660 v0.AddArg(y) 13661 v.AddArg(v0) 13662 v1 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, t) 13663 v2 := b.NewValue0(v.Line, OpAMD64CMPWconst, TypeFlags) 13664 v2.AddArg(y) 13665 v2.AuxInt = 16 13666 v1.AddArg(v2) 13667 v.AddArg(v1) 13668 return true 13669 } 13670 return false 13671 } 13672 func rewriteValueAMD64_OpRsh16Ux32(v *Value, config *Config) bool { 13673 b := v.Block 13674 _ = b 13675 // match: (Rsh16Ux32 <t> x y) 13676 // cond: 13677 // result: (ANDW (SHRW <t> x y) (SBBLcarrymask <t> (CMPLconst y [16]))) 13678 for { 13679 t := v.Type 13680 x := v.Args[0] 13681 y := v.Args[1] 13682 v.reset(OpAMD64ANDW) 13683 v0 := b.NewValue0(v.Line, OpAMD64SHRW, t) 13684 v0.AddArg(x) 13685 v0.AddArg(y) 13686 v.AddArg(v0) 13687 v1 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, t) 13688 v2 := b.NewValue0(v.Line, OpAMD64CMPLconst, TypeFlags) 13689 v2.AddArg(y) 13690 v2.AuxInt = 16 13691 v1.AddArg(v2) 13692 v.AddArg(v1) 13693 return true 13694 } 13695 return false 13696 } 13697 func rewriteValueAMD64_OpRsh16Ux64(v *Value, config *Config) bool { 13698 b := v.Block 13699 _ = b 13700 // match: (Rsh16Ux64 <t> x y) 13701 // cond: 13702 // result: (ANDW (SHRW <t> x y) (SBBLcarrymask <t> (CMPQconst y [16]))) 13703 for { 13704 t := v.Type 13705 x := v.Args[0] 13706 y := v.Args[1] 13707 v.reset(OpAMD64ANDW) 13708 v0 := b.NewValue0(v.Line, OpAMD64SHRW, t) 13709 v0.AddArg(x) 13710 v0.AddArg(y) 13711 v.AddArg(v0) 13712 v1 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, t) 13713 v2 := b.NewValue0(v.Line, OpAMD64CMPQconst, TypeFlags) 13714 v2.AddArg(y) 13715 v2.AuxInt = 16 13716 v1.AddArg(v2) 13717 v.AddArg(v1) 13718 return true 13719 } 13720 return false 13721 } 13722 func rewriteValueAMD64_OpRsh16Ux8(v *Value, config *Config) bool { 13723 b := v.Block 13724 _ = b 13725 // match: (Rsh16Ux8 <t> x y) 13726 // cond: 13727 // result: (ANDW (SHRW <t> x y) (SBBLcarrymask <t> (CMPBconst y [16]))) 13728 for { 13729 t := v.Type 13730 x := v.Args[0] 13731 y := v.Args[1] 13732 v.reset(OpAMD64ANDW) 13733 v0 := b.NewValue0(v.Line, OpAMD64SHRW, t) 13734 v0.AddArg(x) 13735 v0.AddArg(y) 13736 v.AddArg(v0) 13737 v1 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, t) 13738 v2 := b.NewValue0(v.Line, OpAMD64CMPBconst, TypeFlags) 13739 v2.AddArg(y) 13740 v2.AuxInt = 16 13741 v1.AddArg(v2) 13742 v.AddArg(v1) 13743 return true 13744 } 13745 return false 13746 } 13747 func rewriteValueAMD64_OpRsh16x16(v *Value, config *Config) bool { 13748 b := v.Block 13749 _ = b 13750 // match: (Rsh16x16 <t> x y) 13751 // cond: 13752 // result: (SARW <t> x (ORW <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPWconst y [16]))))) 13753 for { 13754 t := v.Type 13755 x := v.Args[0] 13756 y := v.Args[1] 13757 v.reset(OpAMD64SARW) 13758 v.Type = t 13759 v.AddArg(x) 13760 v0 := b.NewValue0(v.Line, OpAMD64ORW, y.Type) 13761 v0.AddArg(y) 13762 v1 := b.NewValue0(v.Line, OpAMD64NOTL, y.Type) 13763 v2 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, y.Type) 13764 v3 := b.NewValue0(v.Line, OpAMD64CMPWconst, TypeFlags) 13765 v3.AddArg(y) 13766 v3.AuxInt = 16 13767 v2.AddArg(v3) 13768 v1.AddArg(v2) 13769 v0.AddArg(v1) 13770 v.AddArg(v0) 13771 return true 13772 } 13773 return false 13774 } 13775 func rewriteValueAMD64_OpRsh16x32(v *Value, config *Config) bool { 13776 b := v.Block 13777 _ = b 13778 // match: (Rsh16x32 <t> x y) 13779 // cond: 13780 // result: (SARW <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPLconst y [16]))))) 13781 for { 13782 t := v.Type 13783 x := v.Args[0] 13784 y := v.Args[1] 13785 v.reset(OpAMD64SARW) 13786 v.Type = t 13787 v.AddArg(x) 13788 v0 := b.NewValue0(v.Line, OpAMD64ORL, y.Type) 13789 v0.AddArg(y) 13790 v1 := b.NewValue0(v.Line, OpAMD64NOTL, y.Type) 13791 v2 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, y.Type) 13792 v3 := b.NewValue0(v.Line, OpAMD64CMPLconst, TypeFlags) 13793 v3.AddArg(y) 13794 v3.AuxInt = 16 13795 v2.AddArg(v3) 13796 v1.AddArg(v2) 13797 v0.AddArg(v1) 13798 v.AddArg(v0) 13799 return true 13800 } 13801 return false 13802 } 13803 func rewriteValueAMD64_OpRsh16x64(v *Value, config *Config) bool { 13804 b := v.Block 13805 _ = b 13806 // match: (Rsh16x64 <t> x y) 13807 // cond: 13808 // result: (SARW <t> x (ORQ <y.Type> y (NOTQ <y.Type> (SBBQcarrymask <y.Type> (CMPQconst y [16]))))) 13809 for { 13810 t := v.Type 13811 x := v.Args[0] 13812 y := v.Args[1] 13813 v.reset(OpAMD64SARW) 13814 v.Type = t 13815 v.AddArg(x) 13816 v0 := b.NewValue0(v.Line, OpAMD64ORQ, y.Type) 13817 v0.AddArg(y) 13818 v1 := b.NewValue0(v.Line, OpAMD64NOTQ, y.Type) 13819 v2 := b.NewValue0(v.Line, OpAMD64SBBQcarrymask, y.Type) 13820 v3 := b.NewValue0(v.Line, OpAMD64CMPQconst, TypeFlags) 13821 v3.AddArg(y) 13822 v3.AuxInt = 16 13823 v2.AddArg(v3) 13824 v1.AddArg(v2) 13825 v0.AddArg(v1) 13826 v.AddArg(v0) 13827 return true 13828 } 13829 return false 13830 } 13831 func rewriteValueAMD64_OpRsh16x8(v *Value, config *Config) bool { 13832 b := v.Block 13833 _ = b 13834 // match: (Rsh16x8 <t> x y) 13835 // cond: 13836 // result: (SARW <t> x (ORB <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPBconst y [16]))))) 13837 for { 13838 t := v.Type 13839 x := v.Args[0] 13840 y := v.Args[1] 13841 v.reset(OpAMD64SARW) 13842 v.Type = t 13843 v.AddArg(x) 13844 v0 := b.NewValue0(v.Line, OpAMD64ORB, y.Type) 13845 v0.AddArg(y) 13846 v1 := b.NewValue0(v.Line, OpAMD64NOTL, y.Type) 13847 v2 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, y.Type) 13848 v3 := b.NewValue0(v.Line, OpAMD64CMPBconst, TypeFlags) 13849 v3.AddArg(y) 13850 v3.AuxInt = 16 13851 v2.AddArg(v3) 13852 v1.AddArg(v2) 13853 v0.AddArg(v1) 13854 v.AddArg(v0) 13855 return true 13856 } 13857 return false 13858 } 13859 func rewriteValueAMD64_OpRsh32Ux16(v *Value, config *Config) bool { 13860 b := v.Block 13861 _ = b 13862 // match: (Rsh32Ux16 <t> x y) 13863 // cond: 13864 // result: (ANDL (SHRL <t> x y) (SBBLcarrymask <t> (CMPWconst y [32]))) 13865 for { 13866 t := v.Type 13867 x := v.Args[0] 13868 y := v.Args[1] 13869 v.reset(OpAMD64ANDL) 13870 v0 := b.NewValue0(v.Line, OpAMD64SHRL, t) 13871 v0.AddArg(x) 13872 v0.AddArg(y) 13873 v.AddArg(v0) 13874 v1 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, t) 13875 v2 := b.NewValue0(v.Line, OpAMD64CMPWconst, TypeFlags) 13876 v2.AddArg(y) 13877 v2.AuxInt = 32 13878 v1.AddArg(v2) 13879 v.AddArg(v1) 13880 return true 13881 } 13882 return false 13883 } 13884 func rewriteValueAMD64_OpRsh32Ux32(v *Value, config *Config) bool { 13885 b := v.Block 13886 _ = b 13887 // match: (Rsh32Ux32 <t> x y) 13888 // cond: 13889 // result: (ANDL (SHRL <t> x y) (SBBLcarrymask <t> (CMPLconst y [32]))) 13890 for { 13891 t := v.Type 13892 x := v.Args[0] 13893 y := v.Args[1] 13894 v.reset(OpAMD64ANDL) 13895 v0 := b.NewValue0(v.Line, OpAMD64SHRL, t) 13896 v0.AddArg(x) 13897 v0.AddArg(y) 13898 v.AddArg(v0) 13899 v1 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, t) 13900 v2 := b.NewValue0(v.Line, OpAMD64CMPLconst, TypeFlags) 13901 v2.AddArg(y) 13902 v2.AuxInt = 32 13903 v1.AddArg(v2) 13904 v.AddArg(v1) 13905 return true 13906 } 13907 return false 13908 } 13909 func rewriteValueAMD64_OpRsh32Ux64(v *Value, config *Config) bool { 13910 b := v.Block 13911 _ = b 13912 // match: (Rsh32Ux64 <t> x y) 13913 // cond: 13914 // result: (ANDL (SHRL <t> x y) (SBBLcarrymask <t> (CMPQconst y [32]))) 13915 for { 13916 t := v.Type 13917 x := v.Args[0] 13918 y := v.Args[1] 13919 v.reset(OpAMD64ANDL) 13920 v0 := b.NewValue0(v.Line, OpAMD64SHRL, t) 13921 v0.AddArg(x) 13922 v0.AddArg(y) 13923 v.AddArg(v0) 13924 v1 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, t) 13925 v2 := b.NewValue0(v.Line, OpAMD64CMPQconst, TypeFlags) 13926 v2.AddArg(y) 13927 v2.AuxInt = 32 13928 v1.AddArg(v2) 13929 v.AddArg(v1) 13930 return true 13931 } 13932 return false 13933 } 13934 func rewriteValueAMD64_OpRsh32Ux8(v *Value, config *Config) bool { 13935 b := v.Block 13936 _ = b 13937 // match: (Rsh32Ux8 <t> x y) 13938 // cond: 13939 // result: (ANDL (SHRL <t> x y) (SBBLcarrymask <t> (CMPBconst y [32]))) 13940 for { 13941 t := v.Type 13942 x := v.Args[0] 13943 y := v.Args[1] 13944 v.reset(OpAMD64ANDL) 13945 v0 := b.NewValue0(v.Line, OpAMD64SHRL, t) 13946 v0.AddArg(x) 13947 v0.AddArg(y) 13948 v.AddArg(v0) 13949 v1 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, t) 13950 v2 := b.NewValue0(v.Line, OpAMD64CMPBconst, TypeFlags) 13951 v2.AddArg(y) 13952 v2.AuxInt = 32 13953 v1.AddArg(v2) 13954 v.AddArg(v1) 13955 return true 13956 } 13957 return false 13958 } 13959 func rewriteValueAMD64_OpRsh32x16(v *Value, config *Config) bool { 13960 b := v.Block 13961 _ = b 13962 // match: (Rsh32x16 <t> x y) 13963 // cond: 13964 // result: (SARL <t> x (ORW <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPWconst y [32]))))) 13965 for { 13966 t := v.Type 13967 x := v.Args[0] 13968 y := v.Args[1] 13969 v.reset(OpAMD64SARL) 13970 v.Type = t 13971 v.AddArg(x) 13972 v0 := b.NewValue0(v.Line, OpAMD64ORW, y.Type) 13973 v0.AddArg(y) 13974 v1 := b.NewValue0(v.Line, OpAMD64NOTL, y.Type) 13975 v2 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, y.Type) 13976 v3 := b.NewValue0(v.Line, OpAMD64CMPWconst, TypeFlags) 13977 v3.AddArg(y) 13978 v3.AuxInt = 32 13979 v2.AddArg(v3) 13980 v1.AddArg(v2) 13981 v0.AddArg(v1) 13982 v.AddArg(v0) 13983 return true 13984 } 13985 return false 13986 } 13987 func rewriteValueAMD64_OpRsh32x32(v *Value, config *Config) bool { 13988 b := v.Block 13989 _ = b 13990 // match: (Rsh32x32 <t> x y) 13991 // cond: 13992 // result: (SARL <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPLconst y [32]))))) 13993 for { 13994 t := v.Type 13995 x := v.Args[0] 13996 y := v.Args[1] 13997 v.reset(OpAMD64SARL) 13998 v.Type = t 13999 v.AddArg(x) 14000 v0 := b.NewValue0(v.Line, OpAMD64ORL, y.Type) 14001 v0.AddArg(y) 14002 v1 := b.NewValue0(v.Line, OpAMD64NOTL, y.Type) 14003 v2 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, y.Type) 14004 v3 := b.NewValue0(v.Line, OpAMD64CMPLconst, TypeFlags) 14005 v3.AddArg(y) 14006 v3.AuxInt = 32 14007 v2.AddArg(v3) 14008 v1.AddArg(v2) 14009 v0.AddArg(v1) 14010 v.AddArg(v0) 14011 return true 14012 } 14013 return false 14014 } 14015 func rewriteValueAMD64_OpRsh32x64(v *Value, config *Config) bool { 14016 b := v.Block 14017 _ = b 14018 // match: (Rsh32x64 <t> x y) 14019 // cond: 14020 // result: (SARL <t> x (ORQ <y.Type> y (NOTQ <y.Type> (SBBQcarrymask <y.Type> (CMPQconst y [32]))))) 14021 for { 14022 t := v.Type 14023 x := v.Args[0] 14024 y := v.Args[1] 14025 v.reset(OpAMD64SARL) 14026 v.Type = t 14027 v.AddArg(x) 14028 v0 := b.NewValue0(v.Line, OpAMD64ORQ, y.Type) 14029 v0.AddArg(y) 14030 v1 := b.NewValue0(v.Line, OpAMD64NOTQ, y.Type) 14031 v2 := b.NewValue0(v.Line, OpAMD64SBBQcarrymask, y.Type) 14032 v3 := b.NewValue0(v.Line, OpAMD64CMPQconst, TypeFlags) 14033 v3.AddArg(y) 14034 v3.AuxInt = 32 14035 v2.AddArg(v3) 14036 v1.AddArg(v2) 14037 v0.AddArg(v1) 14038 v.AddArg(v0) 14039 return true 14040 } 14041 return false 14042 } 14043 func rewriteValueAMD64_OpRsh32x8(v *Value, config *Config) bool { 14044 b := v.Block 14045 _ = b 14046 // match: (Rsh32x8 <t> x y) 14047 // cond: 14048 // result: (SARL <t> x (ORB <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPBconst y [32]))))) 14049 for { 14050 t := v.Type 14051 x := v.Args[0] 14052 y := v.Args[1] 14053 v.reset(OpAMD64SARL) 14054 v.Type = t 14055 v.AddArg(x) 14056 v0 := b.NewValue0(v.Line, OpAMD64ORB, y.Type) 14057 v0.AddArg(y) 14058 v1 := b.NewValue0(v.Line, OpAMD64NOTL, y.Type) 14059 v2 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, y.Type) 14060 v3 := b.NewValue0(v.Line, OpAMD64CMPBconst, TypeFlags) 14061 v3.AddArg(y) 14062 v3.AuxInt = 32 14063 v2.AddArg(v3) 14064 v1.AddArg(v2) 14065 v0.AddArg(v1) 14066 v.AddArg(v0) 14067 return true 14068 } 14069 return false 14070 } 14071 func rewriteValueAMD64_OpRsh64Ux16(v *Value, config *Config) bool { 14072 b := v.Block 14073 _ = b 14074 // match: (Rsh64Ux16 <t> x y) 14075 // cond: 14076 // result: (ANDQ (SHRQ <t> x y) (SBBQcarrymask <t> (CMPWconst y [64]))) 14077 for { 14078 t := v.Type 14079 x := v.Args[0] 14080 y := v.Args[1] 14081 v.reset(OpAMD64ANDQ) 14082 v0 := b.NewValue0(v.Line, OpAMD64SHRQ, t) 14083 v0.AddArg(x) 14084 v0.AddArg(y) 14085 v.AddArg(v0) 14086 v1 := b.NewValue0(v.Line, OpAMD64SBBQcarrymask, t) 14087 v2 := b.NewValue0(v.Line, OpAMD64CMPWconst, TypeFlags) 14088 v2.AddArg(y) 14089 v2.AuxInt = 64 14090 v1.AddArg(v2) 14091 v.AddArg(v1) 14092 return true 14093 } 14094 return false 14095 } 14096 func rewriteValueAMD64_OpRsh64Ux32(v *Value, config *Config) bool { 14097 b := v.Block 14098 _ = b 14099 // match: (Rsh64Ux32 <t> x y) 14100 // cond: 14101 // result: (ANDQ (SHRQ <t> x y) (SBBQcarrymask <t> (CMPLconst y [64]))) 14102 for { 14103 t := v.Type 14104 x := v.Args[0] 14105 y := v.Args[1] 14106 v.reset(OpAMD64ANDQ) 14107 v0 := b.NewValue0(v.Line, OpAMD64SHRQ, t) 14108 v0.AddArg(x) 14109 v0.AddArg(y) 14110 v.AddArg(v0) 14111 v1 := b.NewValue0(v.Line, OpAMD64SBBQcarrymask, t) 14112 v2 := b.NewValue0(v.Line, OpAMD64CMPLconst, TypeFlags) 14113 v2.AddArg(y) 14114 v2.AuxInt = 64 14115 v1.AddArg(v2) 14116 v.AddArg(v1) 14117 return true 14118 } 14119 return false 14120 } 14121 func rewriteValueAMD64_OpRsh64Ux64(v *Value, config *Config) bool { 14122 b := v.Block 14123 _ = b 14124 // match: (Rsh64Ux64 <t> x y) 14125 // cond: 14126 // result: (ANDQ (SHRQ <t> x y) (SBBQcarrymask <t> (CMPQconst y [64]))) 14127 for { 14128 t := v.Type 14129 x := v.Args[0] 14130 y := v.Args[1] 14131 v.reset(OpAMD64ANDQ) 14132 v0 := b.NewValue0(v.Line, OpAMD64SHRQ, t) 14133 v0.AddArg(x) 14134 v0.AddArg(y) 14135 v.AddArg(v0) 14136 v1 := b.NewValue0(v.Line, OpAMD64SBBQcarrymask, t) 14137 v2 := b.NewValue0(v.Line, OpAMD64CMPQconst, TypeFlags) 14138 v2.AddArg(y) 14139 v2.AuxInt = 64 14140 v1.AddArg(v2) 14141 v.AddArg(v1) 14142 return true 14143 } 14144 return false 14145 } 14146 func rewriteValueAMD64_OpRsh64Ux8(v *Value, config *Config) bool { 14147 b := v.Block 14148 _ = b 14149 // match: (Rsh64Ux8 <t> x y) 14150 // cond: 14151 // result: (ANDQ (SHRQ <t> x y) (SBBQcarrymask <t> (CMPBconst y [64]))) 14152 for { 14153 t := v.Type 14154 x := v.Args[0] 14155 y := v.Args[1] 14156 v.reset(OpAMD64ANDQ) 14157 v0 := b.NewValue0(v.Line, OpAMD64SHRQ, t) 14158 v0.AddArg(x) 14159 v0.AddArg(y) 14160 v.AddArg(v0) 14161 v1 := b.NewValue0(v.Line, OpAMD64SBBQcarrymask, t) 14162 v2 := b.NewValue0(v.Line, OpAMD64CMPBconst, TypeFlags) 14163 v2.AddArg(y) 14164 v2.AuxInt = 64 14165 v1.AddArg(v2) 14166 v.AddArg(v1) 14167 return true 14168 } 14169 return false 14170 } 14171 func rewriteValueAMD64_OpRsh64x16(v *Value, config *Config) bool { 14172 b := v.Block 14173 _ = b 14174 // match: (Rsh64x16 <t> x y) 14175 // cond: 14176 // result: (SARQ <t> x (ORW <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPWconst y [64]))))) 14177 for { 14178 t := v.Type 14179 x := v.Args[0] 14180 y := v.Args[1] 14181 v.reset(OpAMD64SARQ) 14182 v.Type = t 14183 v.AddArg(x) 14184 v0 := b.NewValue0(v.Line, OpAMD64ORW, y.Type) 14185 v0.AddArg(y) 14186 v1 := b.NewValue0(v.Line, OpAMD64NOTL, y.Type) 14187 v2 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, y.Type) 14188 v3 := b.NewValue0(v.Line, OpAMD64CMPWconst, TypeFlags) 14189 v3.AddArg(y) 14190 v3.AuxInt = 64 14191 v2.AddArg(v3) 14192 v1.AddArg(v2) 14193 v0.AddArg(v1) 14194 v.AddArg(v0) 14195 return true 14196 } 14197 return false 14198 } 14199 func rewriteValueAMD64_OpRsh64x32(v *Value, config *Config) bool { 14200 b := v.Block 14201 _ = b 14202 // match: (Rsh64x32 <t> x y) 14203 // cond: 14204 // result: (SARQ <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPLconst y [64]))))) 14205 for { 14206 t := v.Type 14207 x := v.Args[0] 14208 y := v.Args[1] 14209 v.reset(OpAMD64SARQ) 14210 v.Type = t 14211 v.AddArg(x) 14212 v0 := b.NewValue0(v.Line, OpAMD64ORL, y.Type) 14213 v0.AddArg(y) 14214 v1 := b.NewValue0(v.Line, OpAMD64NOTL, y.Type) 14215 v2 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, y.Type) 14216 v3 := b.NewValue0(v.Line, OpAMD64CMPLconst, TypeFlags) 14217 v3.AddArg(y) 14218 v3.AuxInt = 64 14219 v2.AddArg(v3) 14220 v1.AddArg(v2) 14221 v0.AddArg(v1) 14222 v.AddArg(v0) 14223 return true 14224 } 14225 return false 14226 } 14227 func rewriteValueAMD64_OpRsh64x64(v *Value, config *Config) bool { 14228 b := v.Block 14229 _ = b 14230 // match: (Rsh64x64 <t> x y) 14231 // cond: 14232 // result: (SARQ <t> x (ORQ <y.Type> y (NOTQ <y.Type> (SBBQcarrymask <y.Type> (CMPQconst y [64]))))) 14233 for { 14234 t := v.Type 14235 x := v.Args[0] 14236 y := v.Args[1] 14237 v.reset(OpAMD64SARQ) 14238 v.Type = t 14239 v.AddArg(x) 14240 v0 := b.NewValue0(v.Line, OpAMD64ORQ, y.Type) 14241 v0.AddArg(y) 14242 v1 := b.NewValue0(v.Line, OpAMD64NOTQ, y.Type) 14243 v2 := b.NewValue0(v.Line, OpAMD64SBBQcarrymask, y.Type) 14244 v3 := b.NewValue0(v.Line, OpAMD64CMPQconst, TypeFlags) 14245 v3.AddArg(y) 14246 v3.AuxInt = 64 14247 v2.AddArg(v3) 14248 v1.AddArg(v2) 14249 v0.AddArg(v1) 14250 v.AddArg(v0) 14251 return true 14252 } 14253 return false 14254 } 14255 func rewriteValueAMD64_OpRsh64x8(v *Value, config *Config) bool { 14256 b := v.Block 14257 _ = b 14258 // match: (Rsh64x8 <t> x y) 14259 // cond: 14260 // result: (SARQ <t> x (ORB <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPBconst y [64]))))) 14261 for { 14262 t := v.Type 14263 x := v.Args[0] 14264 y := v.Args[1] 14265 v.reset(OpAMD64SARQ) 14266 v.Type = t 14267 v.AddArg(x) 14268 v0 := b.NewValue0(v.Line, OpAMD64ORB, y.Type) 14269 v0.AddArg(y) 14270 v1 := b.NewValue0(v.Line, OpAMD64NOTL, y.Type) 14271 v2 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, y.Type) 14272 v3 := b.NewValue0(v.Line, OpAMD64CMPBconst, TypeFlags) 14273 v3.AddArg(y) 14274 v3.AuxInt = 64 14275 v2.AddArg(v3) 14276 v1.AddArg(v2) 14277 v0.AddArg(v1) 14278 v.AddArg(v0) 14279 return true 14280 } 14281 return false 14282 } 14283 func rewriteValueAMD64_OpRsh8Ux16(v *Value, config *Config) bool { 14284 b := v.Block 14285 _ = b 14286 // match: (Rsh8Ux16 <t> x y) 14287 // cond: 14288 // result: (ANDB (SHRB <t> x y) (SBBLcarrymask <t> (CMPWconst y [8]))) 14289 for { 14290 t := v.Type 14291 x := v.Args[0] 14292 y := v.Args[1] 14293 v.reset(OpAMD64ANDB) 14294 v0 := b.NewValue0(v.Line, OpAMD64SHRB, t) 14295 v0.AddArg(x) 14296 v0.AddArg(y) 14297 v.AddArg(v0) 14298 v1 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, t) 14299 v2 := b.NewValue0(v.Line, OpAMD64CMPWconst, TypeFlags) 14300 v2.AddArg(y) 14301 v2.AuxInt = 8 14302 v1.AddArg(v2) 14303 v.AddArg(v1) 14304 return true 14305 } 14306 return false 14307 } 14308 func rewriteValueAMD64_OpRsh8Ux32(v *Value, config *Config) bool { 14309 b := v.Block 14310 _ = b 14311 // match: (Rsh8Ux32 <t> x y) 14312 // cond: 14313 // result: (ANDB (SHRB <t> x y) (SBBLcarrymask <t> (CMPLconst y [8]))) 14314 for { 14315 t := v.Type 14316 x := v.Args[0] 14317 y := v.Args[1] 14318 v.reset(OpAMD64ANDB) 14319 v0 := b.NewValue0(v.Line, OpAMD64SHRB, t) 14320 v0.AddArg(x) 14321 v0.AddArg(y) 14322 v.AddArg(v0) 14323 v1 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, t) 14324 v2 := b.NewValue0(v.Line, OpAMD64CMPLconst, TypeFlags) 14325 v2.AddArg(y) 14326 v2.AuxInt = 8 14327 v1.AddArg(v2) 14328 v.AddArg(v1) 14329 return true 14330 } 14331 return false 14332 } 14333 func rewriteValueAMD64_OpRsh8Ux64(v *Value, config *Config) bool { 14334 b := v.Block 14335 _ = b 14336 // match: (Rsh8Ux64 <t> x y) 14337 // cond: 14338 // result: (ANDB (SHRB <t> x y) (SBBLcarrymask <t> (CMPQconst y [8]))) 14339 for { 14340 t := v.Type 14341 x := v.Args[0] 14342 y := v.Args[1] 14343 v.reset(OpAMD64ANDB) 14344 v0 := b.NewValue0(v.Line, OpAMD64SHRB, t) 14345 v0.AddArg(x) 14346 v0.AddArg(y) 14347 v.AddArg(v0) 14348 v1 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, t) 14349 v2 := b.NewValue0(v.Line, OpAMD64CMPQconst, TypeFlags) 14350 v2.AddArg(y) 14351 v2.AuxInt = 8 14352 v1.AddArg(v2) 14353 v.AddArg(v1) 14354 return true 14355 } 14356 return false 14357 } 14358 func rewriteValueAMD64_OpRsh8Ux8(v *Value, config *Config) bool { 14359 b := v.Block 14360 _ = b 14361 // match: (Rsh8Ux8 <t> x y) 14362 // cond: 14363 // result: (ANDB (SHRB <t> x y) (SBBLcarrymask <t> (CMPBconst y [8]))) 14364 for { 14365 t := v.Type 14366 x := v.Args[0] 14367 y := v.Args[1] 14368 v.reset(OpAMD64ANDB) 14369 v0 := b.NewValue0(v.Line, OpAMD64SHRB, t) 14370 v0.AddArg(x) 14371 v0.AddArg(y) 14372 v.AddArg(v0) 14373 v1 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, t) 14374 v2 := b.NewValue0(v.Line, OpAMD64CMPBconst, TypeFlags) 14375 v2.AddArg(y) 14376 v2.AuxInt = 8 14377 v1.AddArg(v2) 14378 v.AddArg(v1) 14379 return true 14380 } 14381 return false 14382 } 14383 func rewriteValueAMD64_OpRsh8x16(v *Value, config *Config) bool { 14384 b := v.Block 14385 _ = b 14386 // match: (Rsh8x16 <t> x y) 14387 // cond: 14388 // result: (SARB <t> x (ORW <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPWconst y [8]))))) 14389 for { 14390 t := v.Type 14391 x := v.Args[0] 14392 y := v.Args[1] 14393 v.reset(OpAMD64SARB) 14394 v.Type = t 14395 v.AddArg(x) 14396 v0 := b.NewValue0(v.Line, OpAMD64ORW, y.Type) 14397 v0.AddArg(y) 14398 v1 := b.NewValue0(v.Line, OpAMD64NOTL, y.Type) 14399 v2 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, y.Type) 14400 v3 := b.NewValue0(v.Line, OpAMD64CMPWconst, TypeFlags) 14401 v3.AddArg(y) 14402 v3.AuxInt = 8 14403 v2.AddArg(v3) 14404 v1.AddArg(v2) 14405 v0.AddArg(v1) 14406 v.AddArg(v0) 14407 return true 14408 } 14409 return false 14410 } 14411 func rewriteValueAMD64_OpRsh8x32(v *Value, config *Config) bool { 14412 b := v.Block 14413 _ = b 14414 // match: (Rsh8x32 <t> x y) 14415 // cond: 14416 // result: (SARB <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPLconst y [8]))))) 14417 for { 14418 t := v.Type 14419 x := v.Args[0] 14420 y := v.Args[1] 14421 v.reset(OpAMD64SARB) 14422 v.Type = t 14423 v.AddArg(x) 14424 v0 := b.NewValue0(v.Line, OpAMD64ORL, y.Type) 14425 v0.AddArg(y) 14426 v1 := b.NewValue0(v.Line, OpAMD64NOTL, y.Type) 14427 v2 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, y.Type) 14428 v3 := b.NewValue0(v.Line, OpAMD64CMPLconst, TypeFlags) 14429 v3.AddArg(y) 14430 v3.AuxInt = 8 14431 v2.AddArg(v3) 14432 v1.AddArg(v2) 14433 v0.AddArg(v1) 14434 v.AddArg(v0) 14435 return true 14436 } 14437 return false 14438 } 14439 func rewriteValueAMD64_OpRsh8x64(v *Value, config *Config) bool { 14440 b := v.Block 14441 _ = b 14442 // match: (Rsh8x64 <t> x y) 14443 // cond: 14444 // result: (SARB <t> x (ORQ <y.Type> y (NOTQ <y.Type> (SBBQcarrymask <y.Type> (CMPQconst y [8]))))) 14445 for { 14446 t := v.Type 14447 x := v.Args[0] 14448 y := v.Args[1] 14449 v.reset(OpAMD64SARB) 14450 v.Type = t 14451 v.AddArg(x) 14452 v0 := b.NewValue0(v.Line, OpAMD64ORQ, y.Type) 14453 v0.AddArg(y) 14454 v1 := b.NewValue0(v.Line, OpAMD64NOTQ, y.Type) 14455 v2 := b.NewValue0(v.Line, OpAMD64SBBQcarrymask, y.Type) 14456 v3 := b.NewValue0(v.Line, OpAMD64CMPQconst, TypeFlags) 14457 v3.AddArg(y) 14458 v3.AuxInt = 8 14459 v2.AddArg(v3) 14460 v1.AddArg(v2) 14461 v0.AddArg(v1) 14462 v.AddArg(v0) 14463 return true 14464 } 14465 return false 14466 } 14467 func rewriteValueAMD64_OpRsh8x8(v *Value, config *Config) bool { 14468 b := v.Block 14469 _ = b 14470 // match: (Rsh8x8 <t> x y) 14471 // cond: 14472 // result: (SARB <t> x (ORB <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPBconst y [8]))))) 14473 for { 14474 t := v.Type 14475 x := v.Args[0] 14476 y := v.Args[1] 14477 v.reset(OpAMD64SARB) 14478 v.Type = t 14479 v.AddArg(x) 14480 v0 := b.NewValue0(v.Line, OpAMD64ORB, y.Type) 14481 v0.AddArg(y) 14482 v1 := b.NewValue0(v.Line, OpAMD64NOTL, y.Type) 14483 v2 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, y.Type) 14484 v3 := b.NewValue0(v.Line, OpAMD64CMPBconst, TypeFlags) 14485 v3.AddArg(y) 14486 v3.AuxInt = 8 14487 v2.AddArg(v3) 14488 v1.AddArg(v2) 14489 v0.AddArg(v1) 14490 v.AddArg(v0) 14491 return true 14492 } 14493 return false 14494 } 14495 func rewriteValueAMD64_OpAMD64SARB(v *Value, config *Config) bool { 14496 b := v.Block 14497 _ = b 14498 // match: (SARB x (MOVQconst [c])) 14499 // cond: 14500 // result: (SARBconst [c&31] x) 14501 for { 14502 x := v.Args[0] 14503 v_1 := v.Args[1] 14504 if v_1.Op != OpAMD64MOVQconst { 14505 break 14506 } 14507 c := v_1.AuxInt 14508 v.reset(OpAMD64SARBconst) 14509 v.AuxInt = c & 31 14510 v.AddArg(x) 14511 return true 14512 } 14513 // match: (SARB x (MOVLconst [c])) 14514 // cond: 14515 // result: (SARBconst [c&31] x) 14516 for { 14517 x := v.Args[0] 14518 v_1 := v.Args[1] 14519 if v_1.Op != OpAMD64MOVLconst { 14520 break 14521 } 14522 c := v_1.AuxInt 14523 v.reset(OpAMD64SARBconst) 14524 v.AuxInt = c & 31 14525 v.AddArg(x) 14526 return true 14527 } 14528 // match: (SARB x (MOVWconst [c])) 14529 // cond: 14530 // result: (SARBconst [c&31] x) 14531 for { 14532 x := v.Args[0] 14533 v_1 := v.Args[1] 14534 if v_1.Op != OpAMD64MOVWconst { 14535 break 14536 } 14537 c := v_1.AuxInt 14538 v.reset(OpAMD64SARBconst) 14539 v.AuxInt = c & 31 14540 v.AddArg(x) 14541 return true 14542 } 14543 // match: (SARB x (MOVBconst [c])) 14544 // cond: 14545 // result: (SARBconst [c&31] x) 14546 for { 14547 x := v.Args[0] 14548 v_1 := v.Args[1] 14549 if v_1.Op != OpAMD64MOVBconst { 14550 break 14551 } 14552 c := v_1.AuxInt 14553 v.reset(OpAMD64SARBconst) 14554 v.AuxInt = c & 31 14555 v.AddArg(x) 14556 return true 14557 } 14558 // match: (SARB x (ANDBconst [31] y)) 14559 // cond: 14560 // result: (SARB x y) 14561 for { 14562 x := v.Args[0] 14563 v_1 := v.Args[1] 14564 if v_1.Op != OpAMD64ANDBconst { 14565 break 14566 } 14567 if v_1.AuxInt != 31 { 14568 break 14569 } 14570 y := v_1.Args[0] 14571 v.reset(OpAMD64SARB) 14572 v.AddArg(x) 14573 v.AddArg(y) 14574 return true 14575 } 14576 return false 14577 } 14578 func rewriteValueAMD64_OpAMD64SARBconst(v *Value, config *Config) bool { 14579 b := v.Block 14580 _ = b 14581 // match: (SARBconst [c] (MOVQconst [d])) 14582 // cond: 14583 // result: (MOVQconst [d>>uint64(c)]) 14584 for { 14585 c := v.AuxInt 14586 v_0 := v.Args[0] 14587 if v_0.Op != OpAMD64MOVQconst { 14588 break 14589 } 14590 d := v_0.AuxInt 14591 v.reset(OpAMD64MOVQconst) 14592 v.AuxInt = d >> uint64(c) 14593 return true 14594 } 14595 return false 14596 } 14597 func rewriteValueAMD64_OpAMD64SARL(v *Value, config *Config) bool { 14598 b := v.Block 14599 _ = b 14600 // match: (SARL x (MOVQconst [c])) 14601 // cond: 14602 // result: (SARLconst [c&31] x) 14603 for { 14604 x := v.Args[0] 14605 v_1 := v.Args[1] 14606 if v_1.Op != OpAMD64MOVQconst { 14607 break 14608 } 14609 c := v_1.AuxInt 14610 v.reset(OpAMD64SARLconst) 14611 v.AuxInt = c & 31 14612 v.AddArg(x) 14613 return true 14614 } 14615 // match: (SARL x (MOVLconst [c])) 14616 // cond: 14617 // result: (SARLconst [c&31] x) 14618 for { 14619 x := v.Args[0] 14620 v_1 := v.Args[1] 14621 if v_1.Op != OpAMD64MOVLconst { 14622 break 14623 } 14624 c := v_1.AuxInt 14625 v.reset(OpAMD64SARLconst) 14626 v.AuxInt = c & 31 14627 v.AddArg(x) 14628 return true 14629 } 14630 // match: (SARL x (MOVWconst [c])) 14631 // cond: 14632 // result: (SARLconst [c&31] x) 14633 for { 14634 x := v.Args[0] 14635 v_1 := v.Args[1] 14636 if v_1.Op != OpAMD64MOVWconst { 14637 break 14638 } 14639 c := v_1.AuxInt 14640 v.reset(OpAMD64SARLconst) 14641 v.AuxInt = c & 31 14642 v.AddArg(x) 14643 return true 14644 } 14645 // match: (SARL x (MOVBconst [c])) 14646 // cond: 14647 // result: (SARLconst [c&31] x) 14648 for { 14649 x := v.Args[0] 14650 v_1 := v.Args[1] 14651 if v_1.Op != OpAMD64MOVBconst { 14652 break 14653 } 14654 c := v_1.AuxInt 14655 v.reset(OpAMD64SARLconst) 14656 v.AuxInt = c & 31 14657 v.AddArg(x) 14658 return true 14659 } 14660 // match: (SARL x (ANDLconst [31] y)) 14661 // cond: 14662 // result: (SARL x y) 14663 for { 14664 x := v.Args[0] 14665 v_1 := v.Args[1] 14666 if v_1.Op != OpAMD64ANDLconst { 14667 break 14668 } 14669 if v_1.AuxInt != 31 { 14670 break 14671 } 14672 y := v_1.Args[0] 14673 v.reset(OpAMD64SARL) 14674 v.AddArg(x) 14675 v.AddArg(y) 14676 return true 14677 } 14678 return false 14679 } 14680 func rewriteValueAMD64_OpAMD64SARLconst(v *Value, config *Config) bool { 14681 b := v.Block 14682 _ = b 14683 // match: (SARLconst [c] (MOVQconst [d])) 14684 // cond: 14685 // result: (MOVQconst [d>>uint64(c)]) 14686 for { 14687 c := v.AuxInt 14688 v_0 := v.Args[0] 14689 if v_0.Op != OpAMD64MOVQconst { 14690 break 14691 } 14692 d := v_0.AuxInt 14693 v.reset(OpAMD64MOVQconst) 14694 v.AuxInt = d >> uint64(c) 14695 return true 14696 } 14697 return false 14698 } 14699 func rewriteValueAMD64_OpAMD64SARQ(v *Value, config *Config) bool { 14700 b := v.Block 14701 _ = b 14702 // match: (SARQ x (MOVQconst [c])) 14703 // cond: 14704 // result: (SARQconst [c&63] x) 14705 for { 14706 x := v.Args[0] 14707 v_1 := v.Args[1] 14708 if v_1.Op != OpAMD64MOVQconst { 14709 break 14710 } 14711 c := v_1.AuxInt 14712 v.reset(OpAMD64SARQconst) 14713 v.AuxInt = c & 63 14714 v.AddArg(x) 14715 return true 14716 } 14717 // match: (SARQ x (MOVLconst [c])) 14718 // cond: 14719 // result: (SARQconst [c&63] x) 14720 for { 14721 x := v.Args[0] 14722 v_1 := v.Args[1] 14723 if v_1.Op != OpAMD64MOVLconst { 14724 break 14725 } 14726 c := v_1.AuxInt 14727 v.reset(OpAMD64SARQconst) 14728 v.AuxInt = c & 63 14729 v.AddArg(x) 14730 return true 14731 } 14732 // match: (SARQ x (MOVWconst [c])) 14733 // cond: 14734 // result: (SARQconst [c&63] x) 14735 for { 14736 x := v.Args[0] 14737 v_1 := v.Args[1] 14738 if v_1.Op != OpAMD64MOVWconst { 14739 break 14740 } 14741 c := v_1.AuxInt 14742 v.reset(OpAMD64SARQconst) 14743 v.AuxInt = c & 63 14744 v.AddArg(x) 14745 return true 14746 } 14747 // match: (SARQ x (MOVBconst [c])) 14748 // cond: 14749 // result: (SARQconst [c&63] x) 14750 for { 14751 x := v.Args[0] 14752 v_1 := v.Args[1] 14753 if v_1.Op != OpAMD64MOVBconst { 14754 break 14755 } 14756 c := v_1.AuxInt 14757 v.reset(OpAMD64SARQconst) 14758 v.AuxInt = c & 63 14759 v.AddArg(x) 14760 return true 14761 } 14762 // match: (SARQ x (ANDQconst [63] y)) 14763 // cond: 14764 // result: (SARQ x y) 14765 for { 14766 x := v.Args[0] 14767 v_1 := v.Args[1] 14768 if v_1.Op != OpAMD64ANDQconst { 14769 break 14770 } 14771 if v_1.AuxInt != 63 { 14772 break 14773 } 14774 y := v_1.Args[0] 14775 v.reset(OpAMD64SARQ) 14776 v.AddArg(x) 14777 v.AddArg(y) 14778 return true 14779 } 14780 return false 14781 } 14782 func rewriteValueAMD64_OpAMD64SARQconst(v *Value, config *Config) bool { 14783 b := v.Block 14784 _ = b 14785 // match: (SARQconst [c] (MOVQconst [d])) 14786 // cond: 14787 // result: (MOVQconst [d>>uint64(c)]) 14788 for { 14789 c := v.AuxInt 14790 v_0 := v.Args[0] 14791 if v_0.Op != OpAMD64MOVQconst { 14792 break 14793 } 14794 d := v_0.AuxInt 14795 v.reset(OpAMD64MOVQconst) 14796 v.AuxInt = d >> uint64(c) 14797 return true 14798 } 14799 return false 14800 } 14801 func rewriteValueAMD64_OpAMD64SARW(v *Value, config *Config) bool { 14802 b := v.Block 14803 _ = b 14804 // match: (SARW x (MOVQconst [c])) 14805 // cond: 14806 // result: (SARWconst [c&31] x) 14807 for { 14808 x := v.Args[0] 14809 v_1 := v.Args[1] 14810 if v_1.Op != OpAMD64MOVQconst { 14811 break 14812 } 14813 c := v_1.AuxInt 14814 v.reset(OpAMD64SARWconst) 14815 v.AuxInt = c & 31 14816 v.AddArg(x) 14817 return true 14818 } 14819 // match: (SARW x (MOVLconst [c])) 14820 // cond: 14821 // result: (SARWconst [c&31] x) 14822 for { 14823 x := v.Args[0] 14824 v_1 := v.Args[1] 14825 if v_1.Op != OpAMD64MOVLconst { 14826 break 14827 } 14828 c := v_1.AuxInt 14829 v.reset(OpAMD64SARWconst) 14830 v.AuxInt = c & 31 14831 v.AddArg(x) 14832 return true 14833 } 14834 // match: (SARW x (MOVWconst [c])) 14835 // cond: 14836 // result: (SARWconst [c&31] x) 14837 for { 14838 x := v.Args[0] 14839 v_1 := v.Args[1] 14840 if v_1.Op != OpAMD64MOVWconst { 14841 break 14842 } 14843 c := v_1.AuxInt 14844 v.reset(OpAMD64SARWconst) 14845 v.AuxInt = c & 31 14846 v.AddArg(x) 14847 return true 14848 } 14849 // match: (SARW x (MOVBconst [c])) 14850 // cond: 14851 // result: (SARWconst [c&31] x) 14852 for { 14853 x := v.Args[0] 14854 v_1 := v.Args[1] 14855 if v_1.Op != OpAMD64MOVBconst { 14856 break 14857 } 14858 c := v_1.AuxInt 14859 v.reset(OpAMD64SARWconst) 14860 v.AuxInt = c & 31 14861 v.AddArg(x) 14862 return true 14863 } 14864 // match: (SARW x (ANDWconst [31] y)) 14865 // cond: 14866 // result: (SARW x y) 14867 for { 14868 x := v.Args[0] 14869 v_1 := v.Args[1] 14870 if v_1.Op != OpAMD64ANDWconst { 14871 break 14872 } 14873 if v_1.AuxInt != 31 { 14874 break 14875 } 14876 y := v_1.Args[0] 14877 v.reset(OpAMD64SARW) 14878 v.AddArg(x) 14879 v.AddArg(y) 14880 return true 14881 } 14882 return false 14883 } 14884 func rewriteValueAMD64_OpAMD64SARWconst(v *Value, config *Config) bool { 14885 b := v.Block 14886 _ = b 14887 // match: (SARWconst [c] (MOVQconst [d])) 14888 // cond: 14889 // result: (MOVQconst [d>>uint64(c)]) 14890 for { 14891 c := v.AuxInt 14892 v_0 := v.Args[0] 14893 if v_0.Op != OpAMD64MOVQconst { 14894 break 14895 } 14896 d := v_0.AuxInt 14897 v.reset(OpAMD64MOVQconst) 14898 v.AuxInt = d >> uint64(c) 14899 return true 14900 } 14901 return false 14902 } 14903 func rewriteValueAMD64_OpAMD64SBBLcarrymask(v *Value, config *Config) bool { 14904 b := v.Block 14905 _ = b 14906 // match: (SBBLcarrymask (FlagEQ)) 14907 // cond: 14908 // result: (MOVLconst [0]) 14909 for { 14910 v_0 := v.Args[0] 14911 if v_0.Op != OpAMD64FlagEQ { 14912 break 14913 } 14914 v.reset(OpAMD64MOVLconst) 14915 v.AuxInt = 0 14916 return true 14917 } 14918 // match: (SBBLcarrymask (FlagLT_ULT)) 14919 // cond: 14920 // result: (MOVLconst [-1]) 14921 for { 14922 v_0 := v.Args[0] 14923 if v_0.Op != OpAMD64FlagLT_ULT { 14924 break 14925 } 14926 v.reset(OpAMD64MOVLconst) 14927 v.AuxInt = -1 14928 return true 14929 } 14930 // match: (SBBLcarrymask (FlagLT_UGT)) 14931 // cond: 14932 // result: (MOVLconst [0]) 14933 for { 14934 v_0 := v.Args[0] 14935 if v_0.Op != OpAMD64FlagLT_UGT { 14936 break 14937 } 14938 v.reset(OpAMD64MOVLconst) 14939 v.AuxInt = 0 14940 return true 14941 } 14942 // match: (SBBLcarrymask (FlagGT_ULT)) 14943 // cond: 14944 // result: (MOVLconst [-1]) 14945 for { 14946 v_0 := v.Args[0] 14947 if v_0.Op != OpAMD64FlagGT_ULT { 14948 break 14949 } 14950 v.reset(OpAMD64MOVLconst) 14951 v.AuxInt = -1 14952 return true 14953 } 14954 // match: (SBBLcarrymask (FlagGT_UGT)) 14955 // cond: 14956 // result: (MOVLconst [0]) 14957 for { 14958 v_0 := v.Args[0] 14959 if v_0.Op != OpAMD64FlagGT_UGT { 14960 break 14961 } 14962 v.reset(OpAMD64MOVLconst) 14963 v.AuxInt = 0 14964 return true 14965 } 14966 return false 14967 } 14968 func rewriteValueAMD64_OpAMD64SBBQcarrymask(v *Value, config *Config) bool { 14969 b := v.Block 14970 _ = b 14971 // match: (SBBQcarrymask (FlagEQ)) 14972 // cond: 14973 // result: (MOVQconst [0]) 14974 for { 14975 v_0 := v.Args[0] 14976 if v_0.Op != OpAMD64FlagEQ { 14977 break 14978 } 14979 v.reset(OpAMD64MOVQconst) 14980 v.AuxInt = 0 14981 return true 14982 } 14983 // match: (SBBQcarrymask (FlagLT_ULT)) 14984 // cond: 14985 // result: (MOVQconst [-1]) 14986 for { 14987 v_0 := v.Args[0] 14988 if v_0.Op != OpAMD64FlagLT_ULT { 14989 break 14990 } 14991 v.reset(OpAMD64MOVQconst) 14992 v.AuxInt = -1 14993 return true 14994 } 14995 // match: (SBBQcarrymask (FlagLT_UGT)) 14996 // cond: 14997 // result: (MOVQconst [0]) 14998 for { 14999 v_0 := v.Args[0] 15000 if v_0.Op != OpAMD64FlagLT_UGT { 15001 break 15002 } 15003 v.reset(OpAMD64MOVQconst) 15004 v.AuxInt = 0 15005 return true 15006 } 15007 // match: (SBBQcarrymask (FlagGT_ULT)) 15008 // cond: 15009 // result: (MOVQconst [-1]) 15010 for { 15011 v_0 := v.Args[0] 15012 if v_0.Op != OpAMD64FlagGT_ULT { 15013 break 15014 } 15015 v.reset(OpAMD64MOVQconst) 15016 v.AuxInt = -1 15017 return true 15018 } 15019 // match: (SBBQcarrymask (FlagGT_UGT)) 15020 // cond: 15021 // result: (MOVQconst [0]) 15022 for { 15023 v_0 := v.Args[0] 15024 if v_0.Op != OpAMD64FlagGT_UGT { 15025 break 15026 } 15027 v.reset(OpAMD64MOVQconst) 15028 v.AuxInt = 0 15029 return true 15030 } 15031 return false 15032 } 15033 func rewriteValueAMD64_OpAMD64SETA(v *Value, config *Config) bool { 15034 b := v.Block 15035 _ = b 15036 // match: (SETA (InvertFlags x)) 15037 // cond: 15038 // result: (SETB x) 15039 for { 15040 v_0 := v.Args[0] 15041 if v_0.Op != OpAMD64InvertFlags { 15042 break 15043 } 15044 x := v_0.Args[0] 15045 v.reset(OpAMD64SETB) 15046 v.AddArg(x) 15047 return true 15048 } 15049 // match: (SETA (FlagEQ)) 15050 // cond: 15051 // result: (MOVBconst [0]) 15052 for { 15053 v_0 := v.Args[0] 15054 if v_0.Op != OpAMD64FlagEQ { 15055 break 15056 } 15057 v.reset(OpAMD64MOVBconst) 15058 v.AuxInt = 0 15059 return true 15060 } 15061 // match: (SETA (FlagLT_ULT)) 15062 // cond: 15063 // result: (MOVBconst [0]) 15064 for { 15065 v_0 := v.Args[0] 15066 if v_0.Op != OpAMD64FlagLT_ULT { 15067 break 15068 } 15069 v.reset(OpAMD64MOVBconst) 15070 v.AuxInt = 0 15071 return true 15072 } 15073 // match: (SETA (FlagLT_UGT)) 15074 // cond: 15075 // result: (MOVBconst [1]) 15076 for { 15077 v_0 := v.Args[0] 15078 if v_0.Op != OpAMD64FlagLT_UGT { 15079 break 15080 } 15081 v.reset(OpAMD64MOVBconst) 15082 v.AuxInt = 1 15083 return true 15084 } 15085 // match: (SETA (FlagGT_ULT)) 15086 // cond: 15087 // result: (MOVBconst [0]) 15088 for { 15089 v_0 := v.Args[0] 15090 if v_0.Op != OpAMD64FlagGT_ULT { 15091 break 15092 } 15093 v.reset(OpAMD64MOVBconst) 15094 v.AuxInt = 0 15095 return true 15096 } 15097 // match: (SETA (FlagGT_UGT)) 15098 // cond: 15099 // result: (MOVBconst [1]) 15100 for { 15101 v_0 := v.Args[0] 15102 if v_0.Op != OpAMD64FlagGT_UGT { 15103 break 15104 } 15105 v.reset(OpAMD64MOVBconst) 15106 v.AuxInt = 1 15107 return true 15108 } 15109 return false 15110 } 15111 func rewriteValueAMD64_OpAMD64SETAE(v *Value, config *Config) bool { 15112 b := v.Block 15113 _ = b 15114 // match: (SETAE (InvertFlags x)) 15115 // cond: 15116 // result: (SETBE x) 15117 for { 15118 v_0 := v.Args[0] 15119 if v_0.Op != OpAMD64InvertFlags { 15120 break 15121 } 15122 x := v_0.Args[0] 15123 v.reset(OpAMD64SETBE) 15124 v.AddArg(x) 15125 return true 15126 } 15127 // match: (SETAE (FlagEQ)) 15128 // cond: 15129 // result: (MOVBconst [1]) 15130 for { 15131 v_0 := v.Args[0] 15132 if v_0.Op != OpAMD64FlagEQ { 15133 break 15134 } 15135 v.reset(OpAMD64MOVBconst) 15136 v.AuxInt = 1 15137 return true 15138 } 15139 // match: (SETAE (FlagLT_ULT)) 15140 // cond: 15141 // result: (MOVBconst [0]) 15142 for { 15143 v_0 := v.Args[0] 15144 if v_0.Op != OpAMD64FlagLT_ULT { 15145 break 15146 } 15147 v.reset(OpAMD64MOVBconst) 15148 v.AuxInt = 0 15149 return true 15150 } 15151 // match: (SETAE (FlagLT_UGT)) 15152 // cond: 15153 // result: (MOVBconst [1]) 15154 for { 15155 v_0 := v.Args[0] 15156 if v_0.Op != OpAMD64FlagLT_UGT { 15157 break 15158 } 15159 v.reset(OpAMD64MOVBconst) 15160 v.AuxInt = 1 15161 return true 15162 } 15163 // match: (SETAE (FlagGT_ULT)) 15164 // cond: 15165 // result: (MOVBconst [0]) 15166 for { 15167 v_0 := v.Args[0] 15168 if v_0.Op != OpAMD64FlagGT_ULT { 15169 break 15170 } 15171 v.reset(OpAMD64MOVBconst) 15172 v.AuxInt = 0 15173 return true 15174 } 15175 // match: (SETAE (FlagGT_UGT)) 15176 // cond: 15177 // result: (MOVBconst [1]) 15178 for { 15179 v_0 := v.Args[0] 15180 if v_0.Op != OpAMD64FlagGT_UGT { 15181 break 15182 } 15183 v.reset(OpAMD64MOVBconst) 15184 v.AuxInt = 1 15185 return true 15186 } 15187 return false 15188 } 15189 func rewriteValueAMD64_OpAMD64SETB(v *Value, config *Config) bool { 15190 b := v.Block 15191 _ = b 15192 // match: (SETB (InvertFlags x)) 15193 // cond: 15194 // result: (SETA x) 15195 for { 15196 v_0 := v.Args[0] 15197 if v_0.Op != OpAMD64InvertFlags { 15198 break 15199 } 15200 x := v_0.Args[0] 15201 v.reset(OpAMD64SETA) 15202 v.AddArg(x) 15203 return true 15204 } 15205 // match: (SETB (FlagEQ)) 15206 // cond: 15207 // result: (MOVBconst [0]) 15208 for { 15209 v_0 := v.Args[0] 15210 if v_0.Op != OpAMD64FlagEQ { 15211 break 15212 } 15213 v.reset(OpAMD64MOVBconst) 15214 v.AuxInt = 0 15215 return true 15216 } 15217 // match: (SETB (FlagLT_ULT)) 15218 // cond: 15219 // result: (MOVBconst [1]) 15220 for { 15221 v_0 := v.Args[0] 15222 if v_0.Op != OpAMD64FlagLT_ULT { 15223 break 15224 } 15225 v.reset(OpAMD64MOVBconst) 15226 v.AuxInt = 1 15227 return true 15228 } 15229 // match: (SETB (FlagLT_UGT)) 15230 // cond: 15231 // result: (MOVBconst [0]) 15232 for { 15233 v_0 := v.Args[0] 15234 if v_0.Op != OpAMD64FlagLT_UGT { 15235 break 15236 } 15237 v.reset(OpAMD64MOVBconst) 15238 v.AuxInt = 0 15239 return true 15240 } 15241 // match: (SETB (FlagGT_ULT)) 15242 // cond: 15243 // result: (MOVBconst [1]) 15244 for { 15245 v_0 := v.Args[0] 15246 if v_0.Op != OpAMD64FlagGT_ULT { 15247 break 15248 } 15249 v.reset(OpAMD64MOVBconst) 15250 v.AuxInt = 1 15251 return true 15252 } 15253 // match: (SETB (FlagGT_UGT)) 15254 // cond: 15255 // result: (MOVBconst [0]) 15256 for { 15257 v_0 := v.Args[0] 15258 if v_0.Op != OpAMD64FlagGT_UGT { 15259 break 15260 } 15261 v.reset(OpAMD64MOVBconst) 15262 v.AuxInt = 0 15263 return true 15264 } 15265 return false 15266 } 15267 func rewriteValueAMD64_OpAMD64SETBE(v *Value, config *Config) bool { 15268 b := v.Block 15269 _ = b 15270 // match: (SETBE (InvertFlags x)) 15271 // cond: 15272 // result: (SETAE x) 15273 for { 15274 v_0 := v.Args[0] 15275 if v_0.Op != OpAMD64InvertFlags { 15276 break 15277 } 15278 x := v_0.Args[0] 15279 v.reset(OpAMD64SETAE) 15280 v.AddArg(x) 15281 return true 15282 } 15283 // match: (SETBE (FlagEQ)) 15284 // cond: 15285 // result: (MOVBconst [1]) 15286 for { 15287 v_0 := v.Args[0] 15288 if v_0.Op != OpAMD64FlagEQ { 15289 break 15290 } 15291 v.reset(OpAMD64MOVBconst) 15292 v.AuxInt = 1 15293 return true 15294 } 15295 // match: (SETBE (FlagLT_ULT)) 15296 // cond: 15297 // result: (MOVBconst [1]) 15298 for { 15299 v_0 := v.Args[0] 15300 if v_0.Op != OpAMD64FlagLT_ULT { 15301 break 15302 } 15303 v.reset(OpAMD64MOVBconst) 15304 v.AuxInt = 1 15305 return true 15306 } 15307 // match: (SETBE (FlagLT_UGT)) 15308 // cond: 15309 // result: (MOVBconst [0]) 15310 for { 15311 v_0 := v.Args[0] 15312 if v_0.Op != OpAMD64FlagLT_UGT { 15313 break 15314 } 15315 v.reset(OpAMD64MOVBconst) 15316 v.AuxInt = 0 15317 return true 15318 } 15319 // match: (SETBE (FlagGT_ULT)) 15320 // cond: 15321 // result: (MOVBconst [1]) 15322 for { 15323 v_0 := v.Args[0] 15324 if v_0.Op != OpAMD64FlagGT_ULT { 15325 break 15326 } 15327 v.reset(OpAMD64MOVBconst) 15328 v.AuxInt = 1 15329 return true 15330 } 15331 // match: (SETBE (FlagGT_UGT)) 15332 // cond: 15333 // result: (MOVBconst [0]) 15334 for { 15335 v_0 := v.Args[0] 15336 if v_0.Op != OpAMD64FlagGT_UGT { 15337 break 15338 } 15339 v.reset(OpAMD64MOVBconst) 15340 v.AuxInt = 0 15341 return true 15342 } 15343 return false 15344 } 15345 func rewriteValueAMD64_OpAMD64SETEQ(v *Value, config *Config) bool { 15346 b := v.Block 15347 _ = b 15348 // match: (SETEQ (InvertFlags x)) 15349 // cond: 15350 // result: (SETEQ x) 15351 for { 15352 v_0 := v.Args[0] 15353 if v_0.Op != OpAMD64InvertFlags { 15354 break 15355 } 15356 x := v_0.Args[0] 15357 v.reset(OpAMD64SETEQ) 15358 v.AddArg(x) 15359 return true 15360 } 15361 // match: (SETEQ (FlagEQ)) 15362 // cond: 15363 // result: (MOVBconst [1]) 15364 for { 15365 v_0 := v.Args[0] 15366 if v_0.Op != OpAMD64FlagEQ { 15367 break 15368 } 15369 v.reset(OpAMD64MOVBconst) 15370 v.AuxInt = 1 15371 return true 15372 } 15373 // match: (SETEQ (FlagLT_ULT)) 15374 // cond: 15375 // result: (MOVBconst [0]) 15376 for { 15377 v_0 := v.Args[0] 15378 if v_0.Op != OpAMD64FlagLT_ULT { 15379 break 15380 } 15381 v.reset(OpAMD64MOVBconst) 15382 v.AuxInt = 0 15383 return true 15384 } 15385 // match: (SETEQ (FlagLT_UGT)) 15386 // cond: 15387 // result: (MOVBconst [0]) 15388 for { 15389 v_0 := v.Args[0] 15390 if v_0.Op != OpAMD64FlagLT_UGT { 15391 break 15392 } 15393 v.reset(OpAMD64MOVBconst) 15394 v.AuxInt = 0 15395 return true 15396 } 15397 // match: (SETEQ (FlagGT_ULT)) 15398 // cond: 15399 // result: (MOVBconst [0]) 15400 for { 15401 v_0 := v.Args[0] 15402 if v_0.Op != OpAMD64FlagGT_ULT { 15403 break 15404 } 15405 v.reset(OpAMD64MOVBconst) 15406 v.AuxInt = 0 15407 return true 15408 } 15409 // match: (SETEQ (FlagGT_UGT)) 15410 // cond: 15411 // result: (MOVBconst [0]) 15412 for { 15413 v_0 := v.Args[0] 15414 if v_0.Op != OpAMD64FlagGT_UGT { 15415 break 15416 } 15417 v.reset(OpAMD64MOVBconst) 15418 v.AuxInt = 0 15419 return true 15420 } 15421 return false 15422 } 15423 func rewriteValueAMD64_OpAMD64SETG(v *Value, config *Config) bool { 15424 b := v.Block 15425 _ = b 15426 // match: (SETG (InvertFlags x)) 15427 // cond: 15428 // result: (SETL x) 15429 for { 15430 v_0 := v.Args[0] 15431 if v_0.Op != OpAMD64InvertFlags { 15432 break 15433 } 15434 x := v_0.Args[0] 15435 v.reset(OpAMD64SETL) 15436 v.AddArg(x) 15437 return true 15438 } 15439 // match: (SETG (FlagEQ)) 15440 // cond: 15441 // result: (MOVBconst [0]) 15442 for { 15443 v_0 := v.Args[0] 15444 if v_0.Op != OpAMD64FlagEQ { 15445 break 15446 } 15447 v.reset(OpAMD64MOVBconst) 15448 v.AuxInt = 0 15449 return true 15450 } 15451 // match: (SETG (FlagLT_ULT)) 15452 // cond: 15453 // result: (MOVBconst [0]) 15454 for { 15455 v_0 := v.Args[0] 15456 if v_0.Op != OpAMD64FlagLT_ULT { 15457 break 15458 } 15459 v.reset(OpAMD64MOVBconst) 15460 v.AuxInt = 0 15461 return true 15462 } 15463 // match: (SETG (FlagLT_UGT)) 15464 // cond: 15465 // result: (MOVBconst [0]) 15466 for { 15467 v_0 := v.Args[0] 15468 if v_0.Op != OpAMD64FlagLT_UGT { 15469 break 15470 } 15471 v.reset(OpAMD64MOVBconst) 15472 v.AuxInt = 0 15473 return true 15474 } 15475 // match: (SETG (FlagGT_ULT)) 15476 // cond: 15477 // result: (MOVBconst [1]) 15478 for { 15479 v_0 := v.Args[0] 15480 if v_0.Op != OpAMD64FlagGT_ULT { 15481 break 15482 } 15483 v.reset(OpAMD64MOVBconst) 15484 v.AuxInt = 1 15485 return true 15486 } 15487 // match: (SETG (FlagGT_UGT)) 15488 // cond: 15489 // result: (MOVBconst [1]) 15490 for { 15491 v_0 := v.Args[0] 15492 if v_0.Op != OpAMD64FlagGT_UGT { 15493 break 15494 } 15495 v.reset(OpAMD64MOVBconst) 15496 v.AuxInt = 1 15497 return true 15498 } 15499 return false 15500 } 15501 func rewriteValueAMD64_OpAMD64SETGE(v *Value, config *Config) bool { 15502 b := v.Block 15503 _ = b 15504 // match: (SETGE (InvertFlags x)) 15505 // cond: 15506 // result: (SETLE x) 15507 for { 15508 v_0 := v.Args[0] 15509 if v_0.Op != OpAMD64InvertFlags { 15510 break 15511 } 15512 x := v_0.Args[0] 15513 v.reset(OpAMD64SETLE) 15514 v.AddArg(x) 15515 return true 15516 } 15517 // match: (SETGE (FlagEQ)) 15518 // cond: 15519 // result: (MOVBconst [1]) 15520 for { 15521 v_0 := v.Args[0] 15522 if v_0.Op != OpAMD64FlagEQ { 15523 break 15524 } 15525 v.reset(OpAMD64MOVBconst) 15526 v.AuxInt = 1 15527 return true 15528 } 15529 // match: (SETGE (FlagLT_ULT)) 15530 // cond: 15531 // result: (MOVBconst [0]) 15532 for { 15533 v_0 := v.Args[0] 15534 if v_0.Op != OpAMD64FlagLT_ULT { 15535 break 15536 } 15537 v.reset(OpAMD64MOVBconst) 15538 v.AuxInt = 0 15539 return true 15540 } 15541 // match: (SETGE (FlagLT_UGT)) 15542 // cond: 15543 // result: (MOVBconst [0]) 15544 for { 15545 v_0 := v.Args[0] 15546 if v_0.Op != OpAMD64FlagLT_UGT { 15547 break 15548 } 15549 v.reset(OpAMD64MOVBconst) 15550 v.AuxInt = 0 15551 return true 15552 } 15553 // match: (SETGE (FlagGT_ULT)) 15554 // cond: 15555 // result: (MOVBconst [1]) 15556 for { 15557 v_0 := v.Args[0] 15558 if v_0.Op != OpAMD64FlagGT_ULT { 15559 break 15560 } 15561 v.reset(OpAMD64MOVBconst) 15562 v.AuxInt = 1 15563 return true 15564 } 15565 // match: (SETGE (FlagGT_UGT)) 15566 // cond: 15567 // result: (MOVBconst [1]) 15568 for { 15569 v_0 := v.Args[0] 15570 if v_0.Op != OpAMD64FlagGT_UGT { 15571 break 15572 } 15573 v.reset(OpAMD64MOVBconst) 15574 v.AuxInt = 1 15575 return true 15576 } 15577 return false 15578 } 15579 func rewriteValueAMD64_OpAMD64SETL(v *Value, config *Config) bool { 15580 b := v.Block 15581 _ = b 15582 // match: (SETL (InvertFlags x)) 15583 // cond: 15584 // result: (SETG x) 15585 for { 15586 v_0 := v.Args[0] 15587 if v_0.Op != OpAMD64InvertFlags { 15588 break 15589 } 15590 x := v_0.Args[0] 15591 v.reset(OpAMD64SETG) 15592 v.AddArg(x) 15593 return true 15594 } 15595 // match: (SETL (FlagEQ)) 15596 // cond: 15597 // result: (MOVBconst [0]) 15598 for { 15599 v_0 := v.Args[0] 15600 if v_0.Op != OpAMD64FlagEQ { 15601 break 15602 } 15603 v.reset(OpAMD64MOVBconst) 15604 v.AuxInt = 0 15605 return true 15606 } 15607 // match: (SETL (FlagLT_ULT)) 15608 // cond: 15609 // result: (MOVBconst [1]) 15610 for { 15611 v_0 := v.Args[0] 15612 if v_0.Op != OpAMD64FlagLT_ULT { 15613 break 15614 } 15615 v.reset(OpAMD64MOVBconst) 15616 v.AuxInt = 1 15617 return true 15618 } 15619 // match: (SETL (FlagLT_UGT)) 15620 // cond: 15621 // result: (MOVBconst [1]) 15622 for { 15623 v_0 := v.Args[0] 15624 if v_0.Op != OpAMD64FlagLT_UGT { 15625 break 15626 } 15627 v.reset(OpAMD64MOVBconst) 15628 v.AuxInt = 1 15629 return true 15630 } 15631 // match: (SETL (FlagGT_ULT)) 15632 // cond: 15633 // result: (MOVBconst [0]) 15634 for { 15635 v_0 := v.Args[0] 15636 if v_0.Op != OpAMD64FlagGT_ULT { 15637 break 15638 } 15639 v.reset(OpAMD64MOVBconst) 15640 v.AuxInt = 0 15641 return true 15642 } 15643 // match: (SETL (FlagGT_UGT)) 15644 // cond: 15645 // result: (MOVBconst [0]) 15646 for { 15647 v_0 := v.Args[0] 15648 if v_0.Op != OpAMD64FlagGT_UGT { 15649 break 15650 } 15651 v.reset(OpAMD64MOVBconst) 15652 v.AuxInt = 0 15653 return true 15654 } 15655 return false 15656 } 15657 func rewriteValueAMD64_OpAMD64SETLE(v *Value, config *Config) bool { 15658 b := v.Block 15659 _ = b 15660 // match: (SETLE (InvertFlags x)) 15661 // cond: 15662 // result: (SETGE x) 15663 for { 15664 v_0 := v.Args[0] 15665 if v_0.Op != OpAMD64InvertFlags { 15666 break 15667 } 15668 x := v_0.Args[0] 15669 v.reset(OpAMD64SETGE) 15670 v.AddArg(x) 15671 return true 15672 } 15673 // match: (SETLE (FlagEQ)) 15674 // cond: 15675 // result: (MOVBconst [1]) 15676 for { 15677 v_0 := v.Args[0] 15678 if v_0.Op != OpAMD64FlagEQ { 15679 break 15680 } 15681 v.reset(OpAMD64MOVBconst) 15682 v.AuxInt = 1 15683 return true 15684 } 15685 // match: (SETLE (FlagLT_ULT)) 15686 // cond: 15687 // result: (MOVBconst [1]) 15688 for { 15689 v_0 := v.Args[0] 15690 if v_0.Op != OpAMD64FlagLT_ULT { 15691 break 15692 } 15693 v.reset(OpAMD64MOVBconst) 15694 v.AuxInt = 1 15695 return true 15696 } 15697 // match: (SETLE (FlagLT_UGT)) 15698 // cond: 15699 // result: (MOVBconst [1]) 15700 for { 15701 v_0 := v.Args[0] 15702 if v_0.Op != OpAMD64FlagLT_UGT { 15703 break 15704 } 15705 v.reset(OpAMD64MOVBconst) 15706 v.AuxInt = 1 15707 return true 15708 } 15709 // match: (SETLE (FlagGT_ULT)) 15710 // cond: 15711 // result: (MOVBconst [0]) 15712 for { 15713 v_0 := v.Args[0] 15714 if v_0.Op != OpAMD64FlagGT_ULT { 15715 break 15716 } 15717 v.reset(OpAMD64MOVBconst) 15718 v.AuxInt = 0 15719 return true 15720 } 15721 // match: (SETLE (FlagGT_UGT)) 15722 // cond: 15723 // result: (MOVBconst [0]) 15724 for { 15725 v_0 := v.Args[0] 15726 if v_0.Op != OpAMD64FlagGT_UGT { 15727 break 15728 } 15729 v.reset(OpAMD64MOVBconst) 15730 v.AuxInt = 0 15731 return true 15732 } 15733 return false 15734 } 15735 func rewriteValueAMD64_OpAMD64SETNE(v *Value, config *Config) bool { 15736 b := v.Block 15737 _ = b 15738 // match: (SETNE (InvertFlags x)) 15739 // cond: 15740 // result: (SETNE x) 15741 for { 15742 v_0 := v.Args[0] 15743 if v_0.Op != OpAMD64InvertFlags { 15744 break 15745 } 15746 x := v_0.Args[0] 15747 v.reset(OpAMD64SETNE) 15748 v.AddArg(x) 15749 return true 15750 } 15751 // match: (SETNE (FlagEQ)) 15752 // cond: 15753 // result: (MOVBconst [0]) 15754 for { 15755 v_0 := v.Args[0] 15756 if v_0.Op != OpAMD64FlagEQ { 15757 break 15758 } 15759 v.reset(OpAMD64MOVBconst) 15760 v.AuxInt = 0 15761 return true 15762 } 15763 // match: (SETNE (FlagLT_ULT)) 15764 // cond: 15765 // result: (MOVBconst [1]) 15766 for { 15767 v_0 := v.Args[0] 15768 if v_0.Op != OpAMD64FlagLT_ULT { 15769 break 15770 } 15771 v.reset(OpAMD64MOVBconst) 15772 v.AuxInt = 1 15773 return true 15774 } 15775 // match: (SETNE (FlagLT_UGT)) 15776 // cond: 15777 // result: (MOVBconst [1]) 15778 for { 15779 v_0 := v.Args[0] 15780 if v_0.Op != OpAMD64FlagLT_UGT { 15781 break 15782 } 15783 v.reset(OpAMD64MOVBconst) 15784 v.AuxInt = 1 15785 return true 15786 } 15787 // match: (SETNE (FlagGT_ULT)) 15788 // cond: 15789 // result: (MOVBconst [1]) 15790 for { 15791 v_0 := v.Args[0] 15792 if v_0.Op != OpAMD64FlagGT_ULT { 15793 break 15794 } 15795 v.reset(OpAMD64MOVBconst) 15796 v.AuxInt = 1 15797 return true 15798 } 15799 // match: (SETNE (FlagGT_UGT)) 15800 // cond: 15801 // result: (MOVBconst [1]) 15802 for { 15803 v_0 := v.Args[0] 15804 if v_0.Op != OpAMD64FlagGT_UGT { 15805 break 15806 } 15807 v.reset(OpAMD64MOVBconst) 15808 v.AuxInt = 1 15809 return true 15810 } 15811 return false 15812 } 15813 func rewriteValueAMD64_OpAMD64SHLB(v *Value, config *Config) bool { 15814 b := v.Block 15815 _ = b 15816 // match: (SHLB x (MOVQconst [c])) 15817 // cond: 15818 // result: (SHLBconst [c&31] x) 15819 for { 15820 x := v.Args[0] 15821 v_1 := v.Args[1] 15822 if v_1.Op != OpAMD64MOVQconst { 15823 break 15824 } 15825 c := v_1.AuxInt 15826 v.reset(OpAMD64SHLBconst) 15827 v.AuxInt = c & 31 15828 v.AddArg(x) 15829 return true 15830 } 15831 // match: (SHLB x (MOVLconst [c])) 15832 // cond: 15833 // result: (SHLBconst [c&31] x) 15834 for { 15835 x := v.Args[0] 15836 v_1 := v.Args[1] 15837 if v_1.Op != OpAMD64MOVLconst { 15838 break 15839 } 15840 c := v_1.AuxInt 15841 v.reset(OpAMD64SHLBconst) 15842 v.AuxInt = c & 31 15843 v.AddArg(x) 15844 return true 15845 } 15846 // match: (SHLB x (MOVWconst [c])) 15847 // cond: 15848 // result: (SHLBconst [c&31] x) 15849 for { 15850 x := v.Args[0] 15851 v_1 := v.Args[1] 15852 if v_1.Op != OpAMD64MOVWconst { 15853 break 15854 } 15855 c := v_1.AuxInt 15856 v.reset(OpAMD64SHLBconst) 15857 v.AuxInt = c & 31 15858 v.AddArg(x) 15859 return true 15860 } 15861 // match: (SHLB x (MOVBconst [c])) 15862 // cond: 15863 // result: (SHLBconst [c&31] x) 15864 for { 15865 x := v.Args[0] 15866 v_1 := v.Args[1] 15867 if v_1.Op != OpAMD64MOVBconst { 15868 break 15869 } 15870 c := v_1.AuxInt 15871 v.reset(OpAMD64SHLBconst) 15872 v.AuxInt = c & 31 15873 v.AddArg(x) 15874 return true 15875 } 15876 // match: (SHLB x (ANDBconst [31] y)) 15877 // cond: 15878 // result: (SHLB x y) 15879 for { 15880 x := v.Args[0] 15881 v_1 := v.Args[1] 15882 if v_1.Op != OpAMD64ANDBconst { 15883 break 15884 } 15885 if v_1.AuxInt != 31 { 15886 break 15887 } 15888 y := v_1.Args[0] 15889 v.reset(OpAMD64SHLB) 15890 v.AddArg(x) 15891 v.AddArg(y) 15892 return true 15893 } 15894 return false 15895 } 15896 func rewriteValueAMD64_OpAMD64SHLL(v *Value, config *Config) bool { 15897 b := v.Block 15898 _ = b 15899 // match: (SHLL x (MOVQconst [c])) 15900 // cond: 15901 // result: (SHLLconst [c&31] x) 15902 for { 15903 x := v.Args[0] 15904 v_1 := v.Args[1] 15905 if v_1.Op != OpAMD64MOVQconst { 15906 break 15907 } 15908 c := v_1.AuxInt 15909 v.reset(OpAMD64SHLLconst) 15910 v.AuxInt = c & 31 15911 v.AddArg(x) 15912 return true 15913 } 15914 // match: (SHLL x (MOVLconst [c])) 15915 // cond: 15916 // result: (SHLLconst [c&31] x) 15917 for { 15918 x := v.Args[0] 15919 v_1 := v.Args[1] 15920 if v_1.Op != OpAMD64MOVLconst { 15921 break 15922 } 15923 c := v_1.AuxInt 15924 v.reset(OpAMD64SHLLconst) 15925 v.AuxInt = c & 31 15926 v.AddArg(x) 15927 return true 15928 } 15929 // match: (SHLL x (MOVWconst [c])) 15930 // cond: 15931 // result: (SHLLconst [c&31] x) 15932 for { 15933 x := v.Args[0] 15934 v_1 := v.Args[1] 15935 if v_1.Op != OpAMD64MOVWconst { 15936 break 15937 } 15938 c := v_1.AuxInt 15939 v.reset(OpAMD64SHLLconst) 15940 v.AuxInt = c & 31 15941 v.AddArg(x) 15942 return true 15943 } 15944 // match: (SHLL x (MOVBconst [c])) 15945 // cond: 15946 // result: (SHLLconst [c&31] x) 15947 for { 15948 x := v.Args[0] 15949 v_1 := v.Args[1] 15950 if v_1.Op != OpAMD64MOVBconst { 15951 break 15952 } 15953 c := v_1.AuxInt 15954 v.reset(OpAMD64SHLLconst) 15955 v.AuxInt = c & 31 15956 v.AddArg(x) 15957 return true 15958 } 15959 // match: (SHLL x (ANDLconst [31] y)) 15960 // cond: 15961 // result: (SHLL x y) 15962 for { 15963 x := v.Args[0] 15964 v_1 := v.Args[1] 15965 if v_1.Op != OpAMD64ANDLconst { 15966 break 15967 } 15968 if v_1.AuxInt != 31 { 15969 break 15970 } 15971 y := v_1.Args[0] 15972 v.reset(OpAMD64SHLL) 15973 v.AddArg(x) 15974 v.AddArg(y) 15975 return true 15976 } 15977 return false 15978 } 15979 func rewriteValueAMD64_OpAMD64SHLQ(v *Value, config *Config) bool { 15980 b := v.Block 15981 _ = b 15982 // match: (SHLQ x (MOVQconst [c])) 15983 // cond: 15984 // result: (SHLQconst [c&63] x) 15985 for { 15986 x := v.Args[0] 15987 v_1 := v.Args[1] 15988 if v_1.Op != OpAMD64MOVQconst { 15989 break 15990 } 15991 c := v_1.AuxInt 15992 v.reset(OpAMD64SHLQconst) 15993 v.AuxInt = c & 63 15994 v.AddArg(x) 15995 return true 15996 } 15997 // match: (SHLQ x (MOVLconst [c])) 15998 // cond: 15999 // result: (SHLQconst [c&63] x) 16000 for { 16001 x := v.Args[0] 16002 v_1 := v.Args[1] 16003 if v_1.Op != OpAMD64MOVLconst { 16004 break 16005 } 16006 c := v_1.AuxInt 16007 v.reset(OpAMD64SHLQconst) 16008 v.AuxInt = c & 63 16009 v.AddArg(x) 16010 return true 16011 } 16012 // match: (SHLQ x (MOVWconst [c])) 16013 // cond: 16014 // result: (SHLQconst [c&63] x) 16015 for { 16016 x := v.Args[0] 16017 v_1 := v.Args[1] 16018 if v_1.Op != OpAMD64MOVWconst { 16019 break 16020 } 16021 c := v_1.AuxInt 16022 v.reset(OpAMD64SHLQconst) 16023 v.AuxInt = c & 63 16024 v.AddArg(x) 16025 return true 16026 } 16027 // match: (SHLQ x (MOVBconst [c])) 16028 // cond: 16029 // result: (SHLQconst [c&63] x) 16030 for { 16031 x := v.Args[0] 16032 v_1 := v.Args[1] 16033 if v_1.Op != OpAMD64MOVBconst { 16034 break 16035 } 16036 c := v_1.AuxInt 16037 v.reset(OpAMD64SHLQconst) 16038 v.AuxInt = c & 63 16039 v.AddArg(x) 16040 return true 16041 } 16042 // match: (SHLQ x (ANDQconst [63] y)) 16043 // cond: 16044 // result: (SHLQ x y) 16045 for { 16046 x := v.Args[0] 16047 v_1 := v.Args[1] 16048 if v_1.Op != OpAMD64ANDQconst { 16049 break 16050 } 16051 if v_1.AuxInt != 63 { 16052 break 16053 } 16054 y := v_1.Args[0] 16055 v.reset(OpAMD64SHLQ) 16056 v.AddArg(x) 16057 v.AddArg(y) 16058 return true 16059 } 16060 return false 16061 } 16062 func rewriteValueAMD64_OpAMD64SHLW(v *Value, config *Config) bool { 16063 b := v.Block 16064 _ = b 16065 // match: (SHLW x (MOVQconst [c])) 16066 // cond: 16067 // result: (SHLWconst [c&31] x) 16068 for { 16069 x := v.Args[0] 16070 v_1 := v.Args[1] 16071 if v_1.Op != OpAMD64MOVQconst { 16072 break 16073 } 16074 c := v_1.AuxInt 16075 v.reset(OpAMD64SHLWconst) 16076 v.AuxInt = c & 31 16077 v.AddArg(x) 16078 return true 16079 } 16080 // match: (SHLW x (MOVLconst [c])) 16081 // cond: 16082 // result: (SHLWconst [c&31] x) 16083 for { 16084 x := v.Args[0] 16085 v_1 := v.Args[1] 16086 if v_1.Op != OpAMD64MOVLconst { 16087 break 16088 } 16089 c := v_1.AuxInt 16090 v.reset(OpAMD64SHLWconst) 16091 v.AuxInt = c & 31 16092 v.AddArg(x) 16093 return true 16094 } 16095 // match: (SHLW x (MOVWconst [c])) 16096 // cond: 16097 // result: (SHLWconst [c&31] x) 16098 for { 16099 x := v.Args[0] 16100 v_1 := v.Args[1] 16101 if v_1.Op != OpAMD64MOVWconst { 16102 break 16103 } 16104 c := v_1.AuxInt 16105 v.reset(OpAMD64SHLWconst) 16106 v.AuxInt = c & 31 16107 v.AddArg(x) 16108 return true 16109 } 16110 // match: (SHLW x (MOVBconst [c])) 16111 // cond: 16112 // result: (SHLWconst [c&31] x) 16113 for { 16114 x := v.Args[0] 16115 v_1 := v.Args[1] 16116 if v_1.Op != OpAMD64MOVBconst { 16117 break 16118 } 16119 c := v_1.AuxInt 16120 v.reset(OpAMD64SHLWconst) 16121 v.AuxInt = c & 31 16122 v.AddArg(x) 16123 return true 16124 } 16125 // match: (SHLW x (ANDWconst [31] y)) 16126 // cond: 16127 // result: (SHLW x y) 16128 for { 16129 x := v.Args[0] 16130 v_1 := v.Args[1] 16131 if v_1.Op != OpAMD64ANDWconst { 16132 break 16133 } 16134 if v_1.AuxInt != 31 { 16135 break 16136 } 16137 y := v_1.Args[0] 16138 v.reset(OpAMD64SHLW) 16139 v.AddArg(x) 16140 v.AddArg(y) 16141 return true 16142 } 16143 return false 16144 } 16145 func rewriteValueAMD64_OpAMD64SHRB(v *Value, config *Config) bool { 16146 b := v.Block 16147 _ = b 16148 // match: (SHRB x (MOVQconst [c])) 16149 // cond: 16150 // result: (SHRBconst [c&31] x) 16151 for { 16152 x := v.Args[0] 16153 v_1 := v.Args[1] 16154 if v_1.Op != OpAMD64MOVQconst { 16155 break 16156 } 16157 c := v_1.AuxInt 16158 v.reset(OpAMD64SHRBconst) 16159 v.AuxInt = c & 31 16160 v.AddArg(x) 16161 return true 16162 } 16163 // match: (SHRB x (MOVLconst [c])) 16164 // cond: 16165 // result: (SHRBconst [c&31] x) 16166 for { 16167 x := v.Args[0] 16168 v_1 := v.Args[1] 16169 if v_1.Op != OpAMD64MOVLconst { 16170 break 16171 } 16172 c := v_1.AuxInt 16173 v.reset(OpAMD64SHRBconst) 16174 v.AuxInt = c & 31 16175 v.AddArg(x) 16176 return true 16177 } 16178 // match: (SHRB x (MOVWconst [c])) 16179 // cond: 16180 // result: (SHRBconst [c&31] x) 16181 for { 16182 x := v.Args[0] 16183 v_1 := v.Args[1] 16184 if v_1.Op != OpAMD64MOVWconst { 16185 break 16186 } 16187 c := v_1.AuxInt 16188 v.reset(OpAMD64SHRBconst) 16189 v.AuxInt = c & 31 16190 v.AddArg(x) 16191 return true 16192 } 16193 // match: (SHRB x (MOVBconst [c])) 16194 // cond: 16195 // result: (SHRBconst [c&31] x) 16196 for { 16197 x := v.Args[0] 16198 v_1 := v.Args[1] 16199 if v_1.Op != OpAMD64MOVBconst { 16200 break 16201 } 16202 c := v_1.AuxInt 16203 v.reset(OpAMD64SHRBconst) 16204 v.AuxInt = c & 31 16205 v.AddArg(x) 16206 return true 16207 } 16208 // match: (SHRB x (ANDBconst [31] y)) 16209 // cond: 16210 // result: (SHRB x y) 16211 for { 16212 x := v.Args[0] 16213 v_1 := v.Args[1] 16214 if v_1.Op != OpAMD64ANDBconst { 16215 break 16216 } 16217 if v_1.AuxInt != 31 { 16218 break 16219 } 16220 y := v_1.Args[0] 16221 v.reset(OpAMD64SHRB) 16222 v.AddArg(x) 16223 v.AddArg(y) 16224 return true 16225 } 16226 return false 16227 } 16228 func rewriteValueAMD64_OpAMD64SHRL(v *Value, config *Config) bool { 16229 b := v.Block 16230 _ = b 16231 // match: (SHRL x (MOVQconst [c])) 16232 // cond: 16233 // result: (SHRLconst [c&31] x) 16234 for { 16235 x := v.Args[0] 16236 v_1 := v.Args[1] 16237 if v_1.Op != OpAMD64MOVQconst { 16238 break 16239 } 16240 c := v_1.AuxInt 16241 v.reset(OpAMD64SHRLconst) 16242 v.AuxInt = c & 31 16243 v.AddArg(x) 16244 return true 16245 } 16246 // match: (SHRL x (MOVLconst [c])) 16247 // cond: 16248 // result: (SHRLconst [c&31] x) 16249 for { 16250 x := v.Args[0] 16251 v_1 := v.Args[1] 16252 if v_1.Op != OpAMD64MOVLconst { 16253 break 16254 } 16255 c := v_1.AuxInt 16256 v.reset(OpAMD64SHRLconst) 16257 v.AuxInt = c & 31 16258 v.AddArg(x) 16259 return true 16260 } 16261 // match: (SHRL x (MOVWconst [c])) 16262 // cond: 16263 // result: (SHRLconst [c&31] x) 16264 for { 16265 x := v.Args[0] 16266 v_1 := v.Args[1] 16267 if v_1.Op != OpAMD64MOVWconst { 16268 break 16269 } 16270 c := v_1.AuxInt 16271 v.reset(OpAMD64SHRLconst) 16272 v.AuxInt = c & 31 16273 v.AddArg(x) 16274 return true 16275 } 16276 // match: (SHRL x (MOVBconst [c])) 16277 // cond: 16278 // result: (SHRLconst [c&31] x) 16279 for { 16280 x := v.Args[0] 16281 v_1 := v.Args[1] 16282 if v_1.Op != OpAMD64MOVBconst { 16283 break 16284 } 16285 c := v_1.AuxInt 16286 v.reset(OpAMD64SHRLconst) 16287 v.AuxInt = c & 31 16288 v.AddArg(x) 16289 return true 16290 } 16291 // match: (SHRL x (ANDLconst [31] y)) 16292 // cond: 16293 // result: (SHRL x y) 16294 for { 16295 x := v.Args[0] 16296 v_1 := v.Args[1] 16297 if v_1.Op != OpAMD64ANDLconst { 16298 break 16299 } 16300 if v_1.AuxInt != 31 { 16301 break 16302 } 16303 y := v_1.Args[0] 16304 v.reset(OpAMD64SHRL) 16305 v.AddArg(x) 16306 v.AddArg(y) 16307 return true 16308 } 16309 return false 16310 } 16311 func rewriteValueAMD64_OpAMD64SHRQ(v *Value, config *Config) bool { 16312 b := v.Block 16313 _ = b 16314 // match: (SHRQ x (MOVQconst [c])) 16315 // cond: 16316 // result: (SHRQconst [c&63] x) 16317 for { 16318 x := v.Args[0] 16319 v_1 := v.Args[1] 16320 if v_1.Op != OpAMD64MOVQconst { 16321 break 16322 } 16323 c := v_1.AuxInt 16324 v.reset(OpAMD64SHRQconst) 16325 v.AuxInt = c & 63 16326 v.AddArg(x) 16327 return true 16328 } 16329 // match: (SHRQ x (MOVLconst [c])) 16330 // cond: 16331 // result: (SHRQconst [c&63] x) 16332 for { 16333 x := v.Args[0] 16334 v_1 := v.Args[1] 16335 if v_1.Op != OpAMD64MOVLconst { 16336 break 16337 } 16338 c := v_1.AuxInt 16339 v.reset(OpAMD64SHRQconst) 16340 v.AuxInt = c & 63 16341 v.AddArg(x) 16342 return true 16343 } 16344 // match: (SHRQ x (MOVWconst [c])) 16345 // cond: 16346 // result: (SHRQconst [c&63] x) 16347 for { 16348 x := v.Args[0] 16349 v_1 := v.Args[1] 16350 if v_1.Op != OpAMD64MOVWconst { 16351 break 16352 } 16353 c := v_1.AuxInt 16354 v.reset(OpAMD64SHRQconst) 16355 v.AuxInt = c & 63 16356 v.AddArg(x) 16357 return true 16358 } 16359 // match: (SHRQ x (MOVBconst [c])) 16360 // cond: 16361 // result: (SHRQconst [c&63] x) 16362 for { 16363 x := v.Args[0] 16364 v_1 := v.Args[1] 16365 if v_1.Op != OpAMD64MOVBconst { 16366 break 16367 } 16368 c := v_1.AuxInt 16369 v.reset(OpAMD64SHRQconst) 16370 v.AuxInt = c & 63 16371 v.AddArg(x) 16372 return true 16373 } 16374 // match: (SHRQ x (ANDQconst [63] y)) 16375 // cond: 16376 // result: (SHRQ x y) 16377 for { 16378 x := v.Args[0] 16379 v_1 := v.Args[1] 16380 if v_1.Op != OpAMD64ANDQconst { 16381 break 16382 } 16383 if v_1.AuxInt != 63 { 16384 break 16385 } 16386 y := v_1.Args[0] 16387 v.reset(OpAMD64SHRQ) 16388 v.AddArg(x) 16389 v.AddArg(y) 16390 return true 16391 } 16392 return false 16393 } 16394 func rewriteValueAMD64_OpAMD64SHRW(v *Value, config *Config) bool { 16395 b := v.Block 16396 _ = b 16397 // match: (SHRW x (MOVQconst [c])) 16398 // cond: 16399 // result: (SHRWconst [c&31] x) 16400 for { 16401 x := v.Args[0] 16402 v_1 := v.Args[1] 16403 if v_1.Op != OpAMD64MOVQconst { 16404 break 16405 } 16406 c := v_1.AuxInt 16407 v.reset(OpAMD64SHRWconst) 16408 v.AuxInt = c & 31 16409 v.AddArg(x) 16410 return true 16411 } 16412 // match: (SHRW x (MOVLconst [c])) 16413 // cond: 16414 // result: (SHRWconst [c&31] x) 16415 for { 16416 x := v.Args[0] 16417 v_1 := v.Args[1] 16418 if v_1.Op != OpAMD64MOVLconst { 16419 break 16420 } 16421 c := v_1.AuxInt 16422 v.reset(OpAMD64SHRWconst) 16423 v.AuxInt = c & 31 16424 v.AddArg(x) 16425 return true 16426 } 16427 // match: (SHRW x (MOVWconst [c])) 16428 // cond: 16429 // result: (SHRWconst [c&31] x) 16430 for { 16431 x := v.Args[0] 16432 v_1 := v.Args[1] 16433 if v_1.Op != OpAMD64MOVWconst { 16434 break 16435 } 16436 c := v_1.AuxInt 16437 v.reset(OpAMD64SHRWconst) 16438 v.AuxInt = c & 31 16439 v.AddArg(x) 16440 return true 16441 } 16442 // match: (SHRW x (MOVBconst [c])) 16443 // cond: 16444 // result: (SHRWconst [c&31] x) 16445 for { 16446 x := v.Args[0] 16447 v_1 := v.Args[1] 16448 if v_1.Op != OpAMD64MOVBconst { 16449 break 16450 } 16451 c := v_1.AuxInt 16452 v.reset(OpAMD64SHRWconst) 16453 v.AuxInt = c & 31 16454 v.AddArg(x) 16455 return true 16456 } 16457 // match: (SHRW x (ANDWconst [31] y)) 16458 // cond: 16459 // result: (SHRW x y) 16460 for { 16461 x := v.Args[0] 16462 v_1 := v.Args[1] 16463 if v_1.Op != OpAMD64ANDWconst { 16464 break 16465 } 16466 if v_1.AuxInt != 31 { 16467 break 16468 } 16469 y := v_1.Args[0] 16470 v.reset(OpAMD64SHRW) 16471 v.AddArg(x) 16472 v.AddArg(y) 16473 return true 16474 } 16475 return false 16476 } 16477 func rewriteValueAMD64_OpAMD64SUBB(v *Value, config *Config) bool { 16478 b := v.Block 16479 _ = b 16480 // match: (SUBB x (MOVBconst [c])) 16481 // cond: 16482 // result: (SUBBconst x [c]) 16483 for { 16484 x := v.Args[0] 16485 v_1 := v.Args[1] 16486 if v_1.Op != OpAMD64MOVBconst { 16487 break 16488 } 16489 c := v_1.AuxInt 16490 v.reset(OpAMD64SUBBconst) 16491 v.AddArg(x) 16492 v.AuxInt = c 16493 return true 16494 } 16495 // match: (SUBB (MOVBconst [c]) x) 16496 // cond: 16497 // result: (NEGB (SUBBconst <v.Type> x [c])) 16498 for { 16499 v_0 := v.Args[0] 16500 if v_0.Op != OpAMD64MOVBconst { 16501 break 16502 } 16503 c := v_0.AuxInt 16504 x := v.Args[1] 16505 v.reset(OpAMD64NEGB) 16506 v0 := b.NewValue0(v.Line, OpAMD64SUBBconst, v.Type) 16507 v0.AddArg(x) 16508 v0.AuxInt = c 16509 v.AddArg(v0) 16510 return true 16511 } 16512 // match: (SUBB x x) 16513 // cond: 16514 // result: (MOVBconst [0]) 16515 for { 16516 x := v.Args[0] 16517 if x != v.Args[1] { 16518 break 16519 } 16520 v.reset(OpAMD64MOVBconst) 16521 v.AuxInt = 0 16522 return true 16523 } 16524 return false 16525 } 16526 func rewriteValueAMD64_OpAMD64SUBBconst(v *Value, config *Config) bool { 16527 b := v.Block 16528 _ = b 16529 // match: (SUBBconst [c] x) 16530 // cond: int8(c) == 0 16531 // result: x 16532 for { 16533 c := v.AuxInt 16534 x := v.Args[0] 16535 if !(int8(c) == 0) { 16536 break 16537 } 16538 v.reset(OpCopy) 16539 v.Type = x.Type 16540 v.AddArg(x) 16541 return true 16542 } 16543 // match: (SUBBconst (MOVBconst [d]) [c]) 16544 // cond: 16545 // result: (MOVBconst [int64(int8(d-c))]) 16546 for { 16547 v_0 := v.Args[0] 16548 if v_0.Op != OpAMD64MOVBconst { 16549 break 16550 } 16551 d := v_0.AuxInt 16552 c := v.AuxInt 16553 v.reset(OpAMD64MOVBconst) 16554 v.AuxInt = int64(int8(d - c)) 16555 return true 16556 } 16557 // match: (SUBBconst (SUBBconst x [d]) [c]) 16558 // cond: 16559 // result: (ADDBconst [int64(int8(-c-d))] x) 16560 for { 16561 v_0 := v.Args[0] 16562 if v_0.Op != OpAMD64SUBBconst { 16563 break 16564 } 16565 x := v_0.Args[0] 16566 d := v_0.AuxInt 16567 c := v.AuxInt 16568 v.reset(OpAMD64ADDBconst) 16569 v.AuxInt = int64(int8(-c - d)) 16570 v.AddArg(x) 16571 return true 16572 } 16573 return false 16574 } 16575 func rewriteValueAMD64_OpAMD64SUBL(v *Value, config *Config) bool { 16576 b := v.Block 16577 _ = b 16578 // match: (SUBL x (MOVLconst [c])) 16579 // cond: 16580 // result: (SUBLconst x [c]) 16581 for { 16582 x := v.Args[0] 16583 v_1 := v.Args[1] 16584 if v_1.Op != OpAMD64MOVLconst { 16585 break 16586 } 16587 c := v_1.AuxInt 16588 v.reset(OpAMD64SUBLconst) 16589 v.AddArg(x) 16590 v.AuxInt = c 16591 return true 16592 } 16593 // match: (SUBL (MOVLconst [c]) x) 16594 // cond: 16595 // result: (NEGL (SUBLconst <v.Type> x [c])) 16596 for { 16597 v_0 := v.Args[0] 16598 if v_0.Op != OpAMD64MOVLconst { 16599 break 16600 } 16601 c := v_0.AuxInt 16602 x := v.Args[1] 16603 v.reset(OpAMD64NEGL) 16604 v0 := b.NewValue0(v.Line, OpAMD64SUBLconst, v.Type) 16605 v0.AddArg(x) 16606 v0.AuxInt = c 16607 v.AddArg(v0) 16608 return true 16609 } 16610 // match: (SUBL x x) 16611 // cond: 16612 // result: (MOVLconst [0]) 16613 for { 16614 x := v.Args[0] 16615 if x != v.Args[1] { 16616 break 16617 } 16618 v.reset(OpAMD64MOVLconst) 16619 v.AuxInt = 0 16620 return true 16621 } 16622 return false 16623 } 16624 func rewriteValueAMD64_OpAMD64SUBLconst(v *Value, config *Config) bool { 16625 b := v.Block 16626 _ = b 16627 // match: (SUBLconst [c] x) 16628 // cond: int32(c) == 0 16629 // result: x 16630 for { 16631 c := v.AuxInt 16632 x := v.Args[0] 16633 if !(int32(c) == 0) { 16634 break 16635 } 16636 v.reset(OpCopy) 16637 v.Type = x.Type 16638 v.AddArg(x) 16639 return true 16640 } 16641 // match: (SUBLconst (MOVLconst [d]) [c]) 16642 // cond: 16643 // result: (MOVLconst [int64(int32(d-c))]) 16644 for { 16645 v_0 := v.Args[0] 16646 if v_0.Op != OpAMD64MOVLconst { 16647 break 16648 } 16649 d := v_0.AuxInt 16650 c := v.AuxInt 16651 v.reset(OpAMD64MOVLconst) 16652 v.AuxInt = int64(int32(d - c)) 16653 return true 16654 } 16655 // match: (SUBLconst (SUBLconst x [d]) [c]) 16656 // cond: 16657 // result: (ADDLconst [int64(int32(-c-d))] x) 16658 for { 16659 v_0 := v.Args[0] 16660 if v_0.Op != OpAMD64SUBLconst { 16661 break 16662 } 16663 x := v_0.Args[0] 16664 d := v_0.AuxInt 16665 c := v.AuxInt 16666 v.reset(OpAMD64ADDLconst) 16667 v.AuxInt = int64(int32(-c - d)) 16668 v.AddArg(x) 16669 return true 16670 } 16671 return false 16672 } 16673 func rewriteValueAMD64_OpAMD64SUBQ(v *Value, config *Config) bool { 16674 b := v.Block 16675 _ = b 16676 // match: (SUBQ x (MOVQconst [c])) 16677 // cond: is32Bit(c) 16678 // result: (SUBQconst x [c]) 16679 for { 16680 x := v.Args[0] 16681 v_1 := v.Args[1] 16682 if v_1.Op != OpAMD64MOVQconst { 16683 break 16684 } 16685 c := v_1.AuxInt 16686 if !(is32Bit(c)) { 16687 break 16688 } 16689 v.reset(OpAMD64SUBQconst) 16690 v.AddArg(x) 16691 v.AuxInt = c 16692 return true 16693 } 16694 // match: (SUBQ (MOVQconst [c]) x) 16695 // cond: is32Bit(c) 16696 // result: (NEGQ (SUBQconst <v.Type> x [c])) 16697 for { 16698 v_0 := v.Args[0] 16699 if v_0.Op != OpAMD64MOVQconst { 16700 break 16701 } 16702 c := v_0.AuxInt 16703 x := v.Args[1] 16704 if !(is32Bit(c)) { 16705 break 16706 } 16707 v.reset(OpAMD64NEGQ) 16708 v0 := b.NewValue0(v.Line, OpAMD64SUBQconst, v.Type) 16709 v0.AddArg(x) 16710 v0.AuxInt = c 16711 v.AddArg(v0) 16712 return true 16713 } 16714 // match: (SUBQ x x) 16715 // cond: 16716 // result: (MOVQconst [0]) 16717 for { 16718 x := v.Args[0] 16719 if x != v.Args[1] { 16720 break 16721 } 16722 v.reset(OpAMD64MOVQconst) 16723 v.AuxInt = 0 16724 return true 16725 } 16726 return false 16727 } 16728 func rewriteValueAMD64_OpAMD64SUBQconst(v *Value, config *Config) bool { 16729 b := v.Block 16730 _ = b 16731 // match: (SUBQconst [0] x) 16732 // cond: 16733 // result: x 16734 for { 16735 if v.AuxInt != 0 { 16736 break 16737 } 16738 x := v.Args[0] 16739 v.reset(OpCopy) 16740 v.Type = x.Type 16741 v.AddArg(x) 16742 return true 16743 } 16744 // match: (SUBQconst (MOVQconst [d]) [c]) 16745 // cond: 16746 // result: (MOVQconst [d-c]) 16747 for { 16748 v_0 := v.Args[0] 16749 if v_0.Op != OpAMD64MOVQconst { 16750 break 16751 } 16752 d := v_0.AuxInt 16753 c := v.AuxInt 16754 v.reset(OpAMD64MOVQconst) 16755 v.AuxInt = d - c 16756 return true 16757 } 16758 // match: (SUBQconst (SUBQconst x [d]) [c]) 16759 // cond: is32Bit(-c-d) 16760 // result: (ADDQconst [-c-d] x) 16761 for { 16762 v_0 := v.Args[0] 16763 if v_0.Op != OpAMD64SUBQconst { 16764 break 16765 } 16766 x := v_0.Args[0] 16767 d := v_0.AuxInt 16768 c := v.AuxInt 16769 if !(is32Bit(-c - d)) { 16770 break 16771 } 16772 v.reset(OpAMD64ADDQconst) 16773 v.AuxInt = -c - d 16774 v.AddArg(x) 16775 return true 16776 } 16777 return false 16778 } 16779 func rewriteValueAMD64_OpAMD64SUBW(v *Value, config *Config) bool { 16780 b := v.Block 16781 _ = b 16782 // match: (SUBW x (MOVWconst [c])) 16783 // cond: 16784 // result: (SUBWconst x [c]) 16785 for { 16786 x := v.Args[0] 16787 v_1 := v.Args[1] 16788 if v_1.Op != OpAMD64MOVWconst { 16789 break 16790 } 16791 c := v_1.AuxInt 16792 v.reset(OpAMD64SUBWconst) 16793 v.AddArg(x) 16794 v.AuxInt = c 16795 return true 16796 } 16797 // match: (SUBW (MOVWconst [c]) x) 16798 // cond: 16799 // result: (NEGW (SUBWconst <v.Type> x [c])) 16800 for { 16801 v_0 := v.Args[0] 16802 if v_0.Op != OpAMD64MOVWconst { 16803 break 16804 } 16805 c := v_0.AuxInt 16806 x := v.Args[1] 16807 v.reset(OpAMD64NEGW) 16808 v0 := b.NewValue0(v.Line, OpAMD64SUBWconst, v.Type) 16809 v0.AddArg(x) 16810 v0.AuxInt = c 16811 v.AddArg(v0) 16812 return true 16813 } 16814 // match: (SUBW x x) 16815 // cond: 16816 // result: (MOVWconst [0]) 16817 for { 16818 x := v.Args[0] 16819 if x != v.Args[1] { 16820 break 16821 } 16822 v.reset(OpAMD64MOVWconst) 16823 v.AuxInt = 0 16824 return true 16825 } 16826 return false 16827 } 16828 func rewriteValueAMD64_OpAMD64SUBWconst(v *Value, config *Config) bool { 16829 b := v.Block 16830 _ = b 16831 // match: (SUBWconst [c] x) 16832 // cond: int16(c) == 0 16833 // result: x 16834 for { 16835 c := v.AuxInt 16836 x := v.Args[0] 16837 if !(int16(c) == 0) { 16838 break 16839 } 16840 v.reset(OpCopy) 16841 v.Type = x.Type 16842 v.AddArg(x) 16843 return true 16844 } 16845 // match: (SUBWconst (MOVWconst [d]) [c]) 16846 // cond: 16847 // result: (MOVWconst [int64(int16(d-c))]) 16848 for { 16849 v_0 := v.Args[0] 16850 if v_0.Op != OpAMD64MOVWconst { 16851 break 16852 } 16853 d := v_0.AuxInt 16854 c := v.AuxInt 16855 v.reset(OpAMD64MOVWconst) 16856 v.AuxInt = int64(int16(d - c)) 16857 return true 16858 } 16859 // match: (SUBWconst (SUBWconst x [d]) [c]) 16860 // cond: 16861 // result: (ADDWconst [int64(int16(-c-d))] x) 16862 for { 16863 v_0 := v.Args[0] 16864 if v_0.Op != OpAMD64SUBWconst { 16865 break 16866 } 16867 x := v_0.Args[0] 16868 d := v_0.AuxInt 16869 c := v.AuxInt 16870 v.reset(OpAMD64ADDWconst) 16871 v.AuxInt = int64(int16(-c - d)) 16872 v.AddArg(x) 16873 return true 16874 } 16875 return false 16876 } 16877 func rewriteValueAMD64_OpSignExt16to32(v *Value, config *Config) bool { 16878 b := v.Block 16879 _ = b 16880 // match: (SignExt16to32 x) 16881 // cond: 16882 // result: (MOVWQSX x) 16883 for { 16884 x := v.Args[0] 16885 v.reset(OpAMD64MOVWQSX) 16886 v.AddArg(x) 16887 return true 16888 } 16889 return false 16890 } 16891 func rewriteValueAMD64_OpSignExt16to64(v *Value, config *Config) bool { 16892 b := v.Block 16893 _ = b 16894 // match: (SignExt16to64 x) 16895 // cond: 16896 // result: (MOVWQSX x) 16897 for { 16898 x := v.Args[0] 16899 v.reset(OpAMD64MOVWQSX) 16900 v.AddArg(x) 16901 return true 16902 } 16903 return false 16904 } 16905 func rewriteValueAMD64_OpSignExt32to64(v *Value, config *Config) bool { 16906 b := v.Block 16907 _ = b 16908 // match: (SignExt32to64 x) 16909 // cond: 16910 // result: (MOVLQSX x) 16911 for { 16912 x := v.Args[0] 16913 v.reset(OpAMD64MOVLQSX) 16914 v.AddArg(x) 16915 return true 16916 } 16917 return false 16918 } 16919 func rewriteValueAMD64_OpSignExt8to16(v *Value, config *Config) bool { 16920 b := v.Block 16921 _ = b 16922 // match: (SignExt8to16 x) 16923 // cond: 16924 // result: (MOVBQSX x) 16925 for { 16926 x := v.Args[0] 16927 v.reset(OpAMD64MOVBQSX) 16928 v.AddArg(x) 16929 return true 16930 } 16931 return false 16932 } 16933 func rewriteValueAMD64_OpSignExt8to32(v *Value, config *Config) bool { 16934 b := v.Block 16935 _ = b 16936 // match: (SignExt8to32 x) 16937 // cond: 16938 // result: (MOVBQSX x) 16939 for { 16940 x := v.Args[0] 16941 v.reset(OpAMD64MOVBQSX) 16942 v.AddArg(x) 16943 return true 16944 } 16945 return false 16946 } 16947 func rewriteValueAMD64_OpSignExt8to64(v *Value, config *Config) bool { 16948 b := v.Block 16949 _ = b 16950 // match: (SignExt8to64 x) 16951 // cond: 16952 // result: (MOVBQSX x) 16953 for { 16954 x := v.Args[0] 16955 v.reset(OpAMD64MOVBQSX) 16956 v.AddArg(x) 16957 return true 16958 } 16959 return false 16960 } 16961 func rewriteValueAMD64_OpSqrt(v *Value, config *Config) bool { 16962 b := v.Block 16963 _ = b 16964 // match: (Sqrt x) 16965 // cond: 16966 // result: (SQRTSD x) 16967 for { 16968 x := v.Args[0] 16969 v.reset(OpAMD64SQRTSD) 16970 v.AddArg(x) 16971 return true 16972 } 16973 return false 16974 } 16975 func rewriteValueAMD64_OpStaticCall(v *Value, config *Config) bool { 16976 b := v.Block 16977 _ = b 16978 // match: (StaticCall [argwid] {target} mem) 16979 // cond: 16980 // result: (CALLstatic [argwid] {target} mem) 16981 for { 16982 argwid := v.AuxInt 16983 target := v.Aux 16984 mem := v.Args[0] 16985 v.reset(OpAMD64CALLstatic) 16986 v.AuxInt = argwid 16987 v.Aux = target 16988 v.AddArg(mem) 16989 return true 16990 } 16991 return false 16992 } 16993 func rewriteValueAMD64_OpStore(v *Value, config *Config) bool { 16994 b := v.Block 16995 _ = b 16996 // match: (Store [8] ptr val mem) 16997 // cond: is64BitFloat(val.Type) 16998 // result: (MOVSDstore ptr val mem) 16999 for { 17000 if v.AuxInt != 8 { 17001 break 17002 } 17003 ptr := v.Args[0] 17004 val := v.Args[1] 17005 mem := v.Args[2] 17006 if !(is64BitFloat(val.Type)) { 17007 break 17008 } 17009 v.reset(OpAMD64MOVSDstore) 17010 v.AddArg(ptr) 17011 v.AddArg(val) 17012 v.AddArg(mem) 17013 return true 17014 } 17015 // match: (Store [4] ptr val mem) 17016 // cond: is32BitFloat(val.Type) 17017 // result: (MOVSSstore ptr val mem) 17018 for { 17019 if v.AuxInt != 4 { 17020 break 17021 } 17022 ptr := v.Args[0] 17023 val := v.Args[1] 17024 mem := v.Args[2] 17025 if !(is32BitFloat(val.Type)) { 17026 break 17027 } 17028 v.reset(OpAMD64MOVSSstore) 17029 v.AddArg(ptr) 17030 v.AddArg(val) 17031 v.AddArg(mem) 17032 return true 17033 } 17034 // match: (Store [8] ptr val mem) 17035 // cond: 17036 // result: (MOVQstore ptr val mem) 17037 for { 17038 if v.AuxInt != 8 { 17039 break 17040 } 17041 ptr := v.Args[0] 17042 val := v.Args[1] 17043 mem := v.Args[2] 17044 v.reset(OpAMD64MOVQstore) 17045 v.AddArg(ptr) 17046 v.AddArg(val) 17047 v.AddArg(mem) 17048 return true 17049 } 17050 // match: (Store [4] ptr val mem) 17051 // cond: 17052 // result: (MOVLstore ptr val mem) 17053 for { 17054 if v.AuxInt != 4 { 17055 break 17056 } 17057 ptr := v.Args[0] 17058 val := v.Args[1] 17059 mem := v.Args[2] 17060 v.reset(OpAMD64MOVLstore) 17061 v.AddArg(ptr) 17062 v.AddArg(val) 17063 v.AddArg(mem) 17064 return true 17065 } 17066 // match: (Store [2] ptr val mem) 17067 // cond: 17068 // result: (MOVWstore ptr val mem) 17069 for { 17070 if v.AuxInt != 2 { 17071 break 17072 } 17073 ptr := v.Args[0] 17074 val := v.Args[1] 17075 mem := v.Args[2] 17076 v.reset(OpAMD64MOVWstore) 17077 v.AddArg(ptr) 17078 v.AddArg(val) 17079 v.AddArg(mem) 17080 return true 17081 } 17082 // match: (Store [1] ptr val mem) 17083 // cond: 17084 // result: (MOVBstore ptr val mem) 17085 for { 17086 if v.AuxInt != 1 { 17087 break 17088 } 17089 ptr := v.Args[0] 17090 val := v.Args[1] 17091 mem := v.Args[2] 17092 v.reset(OpAMD64MOVBstore) 17093 v.AddArg(ptr) 17094 v.AddArg(val) 17095 v.AddArg(mem) 17096 return true 17097 } 17098 return false 17099 } 17100 func rewriteValueAMD64_OpSub16(v *Value, config *Config) bool { 17101 b := v.Block 17102 _ = b 17103 // match: (Sub16 x y) 17104 // cond: 17105 // result: (SUBW x y) 17106 for { 17107 x := v.Args[0] 17108 y := v.Args[1] 17109 v.reset(OpAMD64SUBW) 17110 v.AddArg(x) 17111 v.AddArg(y) 17112 return true 17113 } 17114 return false 17115 } 17116 func rewriteValueAMD64_OpSub32(v *Value, config *Config) bool { 17117 b := v.Block 17118 _ = b 17119 // match: (Sub32 x y) 17120 // cond: 17121 // result: (SUBL x y) 17122 for { 17123 x := v.Args[0] 17124 y := v.Args[1] 17125 v.reset(OpAMD64SUBL) 17126 v.AddArg(x) 17127 v.AddArg(y) 17128 return true 17129 } 17130 return false 17131 } 17132 func rewriteValueAMD64_OpSub32F(v *Value, config *Config) bool { 17133 b := v.Block 17134 _ = b 17135 // match: (Sub32F x y) 17136 // cond: 17137 // result: (SUBSS x y) 17138 for { 17139 x := v.Args[0] 17140 y := v.Args[1] 17141 v.reset(OpAMD64SUBSS) 17142 v.AddArg(x) 17143 v.AddArg(y) 17144 return true 17145 } 17146 return false 17147 } 17148 func rewriteValueAMD64_OpSub64(v *Value, config *Config) bool { 17149 b := v.Block 17150 _ = b 17151 // match: (Sub64 x y) 17152 // cond: 17153 // result: (SUBQ x y) 17154 for { 17155 x := v.Args[0] 17156 y := v.Args[1] 17157 v.reset(OpAMD64SUBQ) 17158 v.AddArg(x) 17159 v.AddArg(y) 17160 return true 17161 } 17162 return false 17163 } 17164 func rewriteValueAMD64_OpSub64F(v *Value, config *Config) bool { 17165 b := v.Block 17166 _ = b 17167 // match: (Sub64F x y) 17168 // cond: 17169 // result: (SUBSD x y) 17170 for { 17171 x := v.Args[0] 17172 y := v.Args[1] 17173 v.reset(OpAMD64SUBSD) 17174 v.AddArg(x) 17175 v.AddArg(y) 17176 return true 17177 } 17178 return false 17179 } 17180 func rewriteValueAMD64_OpSub8(v *Value, config *Config) bool { 17181 b := v.Block 17182 _ = b 17183 // match: (Sub8 x y) 17184 // cond: 17185 // result: (SUBB x y) 17186 for { 17187 x := v.Args[0] 17188 y := v.Args[1] 17189 v.reset(OpAMD64SUBB) 17190 v.AddArg(x) 17191 v.AddArg(y) 17192 return true 17193 } 17194 return false 17195 } 17196 func rewriteValueAMD64_OpSubPtr(v *Value, config *Config) bool { 17197 b := v.Block 17198 _ = b 17199 // match: (SubPtr x y) 17200 // cond: 17201 // result: (SUBQ x y) 17202 for { 17203 x := v.Args[0] 17204 y := v.Args[1] 17205 v.reset(OpAMD64SUBQ) 17206 v.AddArg(x) 17207 v.AddArg(y) 17208 return true 17209 } 17210 return false 17211 } 17212 func rewriteValueAMD64_OpTrunc16to8(v *Value, config *Config) bool { 17213 b := v.Block 17214 _ = b 17215 // match: (Trunc16to8 x) 17216 // cond: 17217 // result: x 17218 for { 17219 x := v.Args[0] 17220 v.reset(OpCopy) 17221 v.Type = x.Type 17222 v.AddArg(x) 17223 return true 17224 } 17225 return false 17226 } 17227 func rewriteValueAMD64_OpTrunc32to16(v *Value, config *Config) bool { 17228 b := v.Block 17229 _ = b 17230 // match: (Trunc32to16 x) 17231 // cond: 17232 // result: x 17233 for { 17234 x := v.Args[0] 17235 v.reset(OpCopy) 17236 v.Type = x.Type 17237 v.AddArg(x) 17238 return true 17239 } 17240 return false 17241 } 17242 func rewriteValueAMD64_OpTrunc32to8(v *Value, config *Config) bool { 17243 b := v.Block 17244 _ = b 17245 // match: (Trunc32to8 x) 17246 // cond: 17247 // result: x 17248 for { 17249 x := v.Args[0] 17250 v.reset(OpCopy) 17251 v.Type = x.Type 17252 v.AddArg(x) 17253 return true 17254 } 17255 return false 17256 } 17257 func rewriteValueAMD64_OpTrunc64to16(v *Value, config *Config) bool { 17258 b := v.Block 17259 _ = b 17260 // match: (Trunc64to16 x) 17261 // cond: 17262 // result: x 17263 for { 17264 x := v.Args[0] 17265 v.reset(OpCopy) 17266 v.Type = x.Type 17267 v.AddArg(x) 17268 return true 17269 } 17270 return false 17271 } 17272 func rewriteValueAMD64_OpTrunc64to32(v *Value, config *Config) bool { 17273 b := v.Block 17274 _ = b 17275 // match: (Trunc64to32 x) 17276 // cond: 17277 // result: x 17278 for { 17279 x := v.Args[0] 17280 v.reset(OpCopy) 17281 v.Type = x.Type 17282 v.AddArg(x) 17283 return true 17284 } 17285 return false 17286 } 17287 func rewriteValueAMD64_OpTrunc64to8(v *Value, config *Config) bool { 17288 b := v.Block 17289 _ = b 17290 // match: (Trunc64to8 x) 17291 // cond: 17292 // result: x 17293 for { 17294 x := v.Args[0] 17295 v.reset(OpCopy) 17296 v.Type = x.Type 17297 v.AddArg(x) 17298 return true 17299 } 17300 return false 17301 } 17302 func rewriteValueAMD64_OpAMD64XORB(v *Value, config *Config) bool { 17303 b := v.Block 17304 _ = b 17305 // match: (XORB x (MOVBconst [c])) 17306 // cond: 17307 // result: (XORBconst [c] x) 17308 for { 17309 x := v.Args[0] 17310 v_1 := v.Args[1] 17311 if v_1.Op != OpAMD64MOVBconst { 17312 break 17313 } 17314 c := v_1.AuxInt 17315 v.reset(OpAMD64XORBconst) 17316 v.AuxInt = c 17317 v.AddArg(x) 17318 return true 17319 } 17320 // match: (XORB (MOVBconst [c]) x) 17321 // cond: 17322 // result: (XORBconst [c] x) 17323 for { 17324 v_0 := v.Args[0] 17325 if v_0.Op != OpAMD64MOVBconst { 17326 break 17327 } 17328 c := v_0.AuxInt 17329 x := v.Args[1] 17330 v.reset(OpAMD64XORBconst) 17331 v.AuxInt = c 17332 v.AddArg(x) 17333 return true 17334 } 17335 // match: (XORB x x) 17336 // cond: 17337 // result: (MOVBconst [0]) 17338 for { 17339 x := v.Args[0] 17340 if x != v.Args[1] { 17341 break 17342 } 17343 v.reset(OpAMD64MOVBconst) 17344 v.AuxInt = 0 17345 return true 17346 } 17347 return false 17348 } 17349 func rewriteValueAMD64_OpAMD64XORBconst(v *Value, config *Config) bool { 17350 b := v.Block 17351 _ = b 17352 // match: (XORBconst [c] x) 17353 // cond: int8(c)==0 17354 // result: x 17355 for { 17356 c := v.AuxInt 17357 x := v.Args[0] 17358 if !(int8(c) == 0) { 17359 break 17360 } 17361 v.reset(OpCopy) 17362 v.Type = x.Type 17363 v.AddArg(x) 17364 return true 17365 } 17366 // match: (XORBconst [c] (MOVBconst [d])) 17367 // cond: 17368 // result: (MOVBconst [c^d]) 17369 for { 17370 c := v.AuxInt 17371 v_0 := v.Args[0] 17372 if v_0.Op != OpAMD64MOVBconst { 17373 break 17374 } 17375 d := v_0.AuxInt 17376 v.reset(OpAMD64MOVBconst) 17377 v.AuxInt = c ^ d 17378 return true 17379 } 17380 return false 17381 } 17382 func rewriteValueAMD64_OpAMD64XORL(v *Value, config *Config) bool { 17383 b := v.Block 17384 _ = b 17385 // match: (XORL x (MOVLconst [c])) 17386 // cond: 17387 // result: (XORLconst [c] x) 17388 for { 17389 x := v.Args[0] 17390 v_1 := v.Args[1] 17391 if v_1.Op != OpAMD64MOVLconst { 17392 break 17393 } 17394 c := v_1.AuxInt 17395 v.reset(OpAMD64XORLconst) 17396 v.AuxInt = c 17397 v.AddArg(x) 17398 return true 17399 } 17400 // match: (XORL (MOVLconst [c]) x) 17401 // cond: 17402 // result: (XORLconst [c] x) 17403 for { 17404 v_0 := v.Args[0] 17405 if v_0.Op != OpAMD64MOVLconst { 17406 break 17407 } 17408 c := v_0.AuxInt 17409 x := v.Args[1] 17410 v.reset(OpAMD64XORLconst) 17411 v.AuxInt = c 17412 v.AddArg(x) 17413 return true 17414 } 17415 // match: (XORL x x) 17416 // cond: 17417 // result: (MOVLconst [0]) 17418 for { 17419 x := v.Args[0] 17420 if x != v.Args[1] { 17421 break 17422 } 17423 v.reset(OpAMD64MOVLconst) 17424 v.AuxInt = 0 17425 return true 17426 } 17427 return false 17428 } 17429 func rewriteValueAMD64_OpAMD64XORLconst(v *Value, config *Config) bool { 17430 b := v.Block 17431 _ = b 17432 // match: (XORLconst [c] x) 17433 // cond: int32(c)==0 17434 // result: x 17435 for { 17436 c := v.AuxInt 17437 x := v.Args[0] 17438 if !(int32(c) == 0) { 17439 break 17440 } 17441 v.reset(OpCopy) 17442 v.Type = x.Type 17443 v.AddArg(x) 17444 return true 17445 } 17446 // match: (XORLconst [c] (MOVLconst [d])) 17447 // cond: 17448 // result: (MOVLconst [c^d]) 17449 for { 17450 c := v.AuxInt 17451 v_0 := v.Args[0] 17452 if v_0.Op != OpAMD64MOVLconst { 17453 break 17454 } 17455 d := v_0.AuxInt 17456 v.reset(OpAMD64MOVLconst) 17457 v.AuxInt = c ^ d 17458 return true 17459 } 17460 return false 17461 } 17462 func rewriteValueAMD64_OpAMD64XORQ(v *Value, config *Config) bool { 17463 b := v.Block 17464 _ = b 17465 // match: (XORQ x (MOVQconst [c])) 17466 // cond: is32Bit(c) 17467 // result: (XORQconst [c] x) 17468 for { 17469 x := v.Args[0] 17470 v_1 := v.Args[1] 17471 if v_1.Op != OpAMD64MOVQconst { 17472 break 17473 } 17474 c := v_1.AuxInt 17475 if !(is32Bit(c)) { 17476 break 17477 } 17478 v.reset(OpAMD64XORQconst) 17479 v.AuxInt = c 17480 v.AddArg(x) 17481 return true 17482 } 17483 // match: (XORQ (MOVQconst [c]) x) 17484 // cond: is32Bit(c) 17485 // result: (XORQconst [c] x) 17486 for { 17487 v_0 := v.Args[0] 17488 if v_0.Op != OpAMD64MOVQconst { 17489 break 17490 } 17491 c := v_0.AuxInt 17492 x := v.Args[1] 17493 if !(is32Bit(c)) { 17494 break 17495 } 17496 v.reset(OpAMD64XORQconst) 17497 v.AuxInt = c 17498 v.AddArg(x) 17499 return true 17500 } 17501 // match: (XORQ x x) 17502 // cond: 17503 // result: (MOVQconst [0]) 17504 for { 17505 x := v.Args[0] 17506 if x != v.Args[1] { 17507 break 17508 } 17509 v.reset(OpAMD64MOVQconst) 17510 v.AuxInt = 0 17511 return true 17512 } 17513 return false 17514 } 17515 func rewriteValueAMD64_OpAMD64XORQconst(v *Value, config *Config) bool { 17516 b := v.Block 17517 _ = b 17518 // match: (XORQconst [0] x) 17519 // cond: 17520 // result: x 17521 for { 17522 if v.AuxInt != 0 { 17523 break 17524 } 17525 x := v.Args[0] 17526 v.reset(OpCopy) 17527 v.Type = x.Type 17528 v.AddArg(x) 17529 return true 17530 } 17531 // match: (XORQconst [c] (MOVQconst [d])) 17532 // cond: 17533 // result: (MOVQconst [c^d]) 17534 for { 17535 c := v.AuxInt 17536 v_0 := v.Args[0] 17537 if v_0.Op != OpAMD64MOVQconst { 17538 break 17539 } 17540 d := v_0.AuxInt 17541 v.reset(OpAMD64MOVQconst) 17542 v.AuxInt = c ^ d 17543 return true 17544 } 17545 return false 17546 } 17547 func rewriteValueAMD64_OpAMD64XORW(v *Value, config *Config) bool { 17548 b := v.Block 17549 _ = b 17550 // match: (XORW x (MOVWconst [c])) 17551 // cond: 17552 // result: (XORWconst [c] x) 17553 for { 17554 x := v.Args[0] 17555 v_1 := v.Args[1] 17556 if v_1.Op != OpAMD64MOVWconst { 17557 break 17558 } 17559 c := v_1.AuxInt 17560 v.reset(OpAMD64XORWconst) 17561 v.AuxInt = c 17562 v.AddArg(x) 17563 return true 17564 } 17565 // match: (XORW (MOVWconst [c]) x) 17566 // cond: 17567 // result: (XORWconst [c] x) 17568 for { 17569 v_0 := v.Args[0] 17570 if v_0.Op != OpAMD64MOVWconst { 17571 break 17572 } 17573 c := v_0.AuxInt 17574 x := v.Args[1] 17575 v.reset(OpAMD64XORWconst) 17576 v.AuxInt = c 17577 v.AddArg(x) 17578 return true 17579 } 17580 // match: (XORW x x) 17581 // cond: 17582 // result: (MOVWconst [0]) 17583 for { 17584 x := v.Args[0] 17585 if x != v.Args[1] { 17586 break 17587 } 17588 v.reset(OpAMD64MOVWconst) 17589 v.AuxInt = 0 17590 return true 17591 } 17592 return false 17593 } 17594 func rewriteValueAMD64_OpAMD64XORWconst(v *Value, config *Config) bool { 17595 b := v.Block 17596 _ = b 17597 // match: (XORWconst [c] x) 17598 // cond: int16(c)==0 17599 // result: x 17600 for { 17601 c := v.AuxInt 17602 x := v.Args[0] 17603 if !(int16(c) == 0) { 17604 break 17605 } 17606 v.reset(OpCopy) 17607 v.Type = x.Type 17608 v.AddArg(x) 17609 return true 17610 } 17611 // match: (XORWconst [c] (MOVWconst [d])) 17612 // cond: 17613 // result: (MOVWconst [c^d]) 17614 for { 17615 c := v.AuxInt 17616 v_0 := v.Args[0] 17617 if v_0.Op != OpAMD64MOVWconst { 17618 break 17619 } 17620 d := v_0.AuxInt 17621 v.reset(OpAMD64MOVWconst) 17622 v.AuxInt = c ^ d 17623 return true 17624 } 17625 return false 17626 } 17627 func rewriteValueAMD64_OpXor16(v *Value, config *Config) bool { 17628 b := v.Block 17629 _ = b 17630 // match: (Xor16 x y) 17631 // cond: 17632 // result: (XORW x y) 17633 for { 17634 x := v.Args[0] 17635 y := v.Args[1] 17636 v.reset(OpAMD64XORW) 17637 v.AddArg(x) 17638 v.AddArg(y) 17639 return true 17640 } 17641 return false 17642 } 17643 func rewriteValueAMD64_OpXor32(v *Value, config *Config) bool { 17644 b := v.Block 17645 _ = b 17646 // match: (Xor32 x y) 17647 // cond: 17648 // result: (XORL x y) 17649 for { 17650 x := v.Args[0] 17651 y := v.Args[1] 17652 v.reset(OpAMD64XORL) 17653 v.AddArg(x) 17654 v.AddArg(y) 17655 return true 17656 } 17657 return false 17658 } 17659 func rewriteValueAMD64_OpXor64(v *Value, config *Config) bool { 17660 b := v.Block 17661 _ = b 17662 // match: (Xor64 x y) 17663 // cond: 17664 // result: (XORQ x y) 17665 for { 17666 x := v.Args[0] 17667 y := v.Args[1] 17668 v.reset(OpAMD64XORQ) 17669 v.AddArg(x) 17670 v.AddArg(y) 17671 return true 17672 } 17673 return false 17674 } 17675 func rewriteValueAMD64_OpXor8(v *Value, config *Config) bool { 17676 b := v.Block 17677 _ = b 17678 // match: (Xor8 x y) 17679 // cond: 17680 // result: (XORB x y) 17681 for { 17682 x := v.Args[0] 17683 y := v.Args[1] 17684 v.reset(OpAMD64XORB) 17685 v.AddArg(x) 17686 v.AddArg(y) 17687 return true 17688 } 17689 return false 17690 } 17691 func rewriteValueAMD64_OpZero(v *Value, config *Config) bool { 17692 b := v.Block 17693 _ = b 17694 // match: (Zero [0] _ mem) 17695 // cond: 17696 // result: mem 17697 for { 17698 if v.AuxInt != 0 { 17699 break 17700 } 17701 mem := v.Args[1] 17702 v.reset(OpCopy) 17703 v.Type = mem.Type 17704 v.AddArg(mem) 17705 return true 17706 } 17707 // match: (Zero [1] destptr mem) 17708 // cond: 17709 // result: (MOVBstoreconst [0] destptr mem) 17710 for { 17711 if v.AuxInt != 1 { 17712 break 17713 } 17714 destptr := v.Args[0] 17715 mem := v.Args[1] 17716 v.reset(OpAMD64MOVBstoreconst) 17717 v.AuxInt = 0 17718 v.AddArg(destptr) 17719 v.AddArg(mem) 17720 return true 17721 } 17722 // match: (Zero [2] destptr mem) 17723 // cond: 17724 // result: (MOVWstoreconst [0] destptr mem) 17725 for { 17726 if v.AuxInt != 2 { 17727 break 17728 } 17729 destptr := v.Args[0] 17730 mem := v.Args[1] 17731 v.reset(OpAMD64MOVWstoreconst) 17732 v.AuxInt = 0 17733 v.AddArg(destptr) 17734 v.AddArg(mem) 17735 return true 17736 } 17737 // match: (Zero [4] destptr mem) 17738 // cond: 17739 // result: (MOVLstoreconst [0] destptr mem) 17740 for { 17741 if v.AuxInt != 4 { 17742 break 17743 } 17744 destptr := v.Args[0] 17745 mem := v.Args[1] 17746 v.reset(OpAMD64MOVLstoreconst) 17747 v.AuxInt = 0 17748 v.AddArg(destptr) 17749 v.AddArg(mem) 17750 return true 17751 } 17752 // match: (Zero [8] destptr mem) 17753 // cond: 17754 // result: (MOVQstoreconst [0] destptr mem) 17755 for { 17756 if v.AuxInt != 8 { 17757 break 17758 } 17759 destptr := v.Args[0] 17760 mem := v.Args[1] 17761 v.reset(OpAMD64MOVQstoreconst) 17762 v.AuxInt = 0 17763 v.AddArg(destptr) 17764 v.AddArg(mem) 17765 return true 17766 } 17767 // match: (Zero [3] destptr mem) 17768 // cond: 17769 // result: (MOVBstoreconst [makeValAndOff(0,2)] destptr (MOVWstoreconst [0] destptr mem)) 17770 for { 17771 if v.AuxInt != 3 { 17772 break 17773 } 17774 destptr := v.Args[0] 17775 mem := v.Args[1] 17776 v.reset(OpAMD64MOVBstoreconst) 17777 v.AuxInt = makeValAndOff(0, 2) 17778 v.AddArg(destptr) 17779 v0 := b.NewValue0(v.Line, OpAMD64MOVWstoreconst, TypeMem) 17780 v0.AuxInt = 0 17781 v0.AddArg(destptr) 17782 v0.AddArg(mem) 17783 v.AddArg(v0) 17784 return true 17785 } 17786 // match: (Zero [5] destptr mem) 17787 // cond: 17788 // result: (MOVBstoreconst [makeValAndOff(0,4)] destptr (MOVLstoreconst [0] destptr mem)) 17789 for { 17790 if v.AuxInt != 5 { 17791 break 17792 } 17793 destptr := v.Args[0] 17794 mem := v.Args[1] 17795 v.reset(OpAMD64MOVBstoreconst) 17796 v.AuxInt = makeValAndOff(0, 4) 17797 v.AddArg(destptr) 17798 v0 := b.NewValue0(v.Line, OpAMD64MOVLstoreconst, TypeMem) 17799 v0.AuxInt = 0 17800 v0.AddArg(destptr) 17801 v0.AddArg(mem) 17802 v.AddArg(v0) 17803 return true 17804 } 17805 // match: (Zero [6] destptr mem) 17806 // cond: 17807 // result: (MOVWstoreconst [makeValAndOff(0,4)] destptr (MOVLstoreconst [0] destptr mem)) 17808 for { 17809 if v.AuxInt != 6 { 17810 break 17811 } 17812 destptr := v.Args[0] 17813 mem := v.Args[1] 17814 v.reset(OpAMD64MOVWstoreconst) 17815 v.AuxInt = makeValAndOff(0, 4) 17816 v.AddArg(destptr) 17817 v0 := b.NewValue0(v.Line, OpAMD64MOVLstoreconst, TypeMem) 17818 v0.AuxInt = 0 17819 v0.AddArg(destptr) 17820 v0.AddArg(mem) 17821 v.AddArg(v0) 17822 return true 17823 } 17824 // match: (Zero [7] destptr mem) 17825 // cond: 17826 // result: (MOVLstoreconst [makeValAndOff(0,3)] destptr (MOVLstoreconst [0] destptr mem)) 17827 for { 17828 if v.AuxInt != 7 { 17829 break 17830 } 17831 destptr := v.Args[0] 17832 mem := v.Args[1] 17833 v.reset(OpAMD64MOVLstoreconst) 17834 v.AuxInt = makeValAndOff(0, 3) 17835 v.AddArg(destptr) 17836 v0 := b.NewValue0(v.Line, OpAMD64MOVLstoreconst, TypeMem) 17837 v0.AuxInt = 0 17838 v0.AddArg(destptr) 17839 v0.AddArg(mem) 17840 v.AddArg(v0) 17841 return true 17842 } 17843 // match: (Zero [size] destptr mem) 17844 // cond: size%8 != 0 && size > 8 17845 // result: (Zero [size-size%8] (ADDQconst destptr [size%8]) (MOVQstoreconst [0] destptr mem)) 17846 for { 17847 size := v.AuxInt 17848 destptr := v.Args[0] 17849 mem := v.Args[1] 17850 if !(size%8 != 0 && size > 8) { 17851 break 17852 } 17853 v.reset(OpZero) 17854 v.AuxInt = size - size%8 17855 v0 := b.NewValue0(v.Line, OpAMD64ADDQconst, config.fe.TypeUInt64()) 17856 v0.AddArg(destptr) 17857 v0.AuxInt = size % 8 17858 v.AddArg(v0) 17859 v1 := b.NewValue0(v.Line, OpAMD64MOVQstoreconst, TypeMem) 17860 v1.AuxInt = 0 17861 v1.AddArg(destptr) 17862 v1.AddArg(mem) 17863 v.AddArg(v1) 17864 return true 17865 } 17866 // match: (Zero [16] destptr mem) 17867 // cond: 17868 // result: (MOVQstoreconst [makeValAndOff(0,8)] destptr (MOVQstoreconst [0] destptr mem)) 17869 for { 17870 if v.AuxInt != 16 { 17871 break 17872 } 17873 destptr := v.Args[0] 17874 mem := v.Args[1] 17875 v.reset(OpAMD64MOVQstoreconst) 17876 v.AuxInt = makeValAndOff(0, 8) 17877 v.AddArg(destptr) 17878 v0 := b.NewValue0(v.Line, OpAMD64MOVQstoreconst, TypeMem) 17879 v0.AuxInt = 0 17880 v0.AddArg(destptr) 17881 v0.AddArg(mem) 17882 v.AddArg(v0) 17883 return true 17884 } 17885 // match: (Zero [24] destptr mem) 17886 // cond: 17887 // result: (MOVQstoreconst [makeValAndOff(0,16)] destptr (MOVQstoreconst [makeValAndOff(0,8)] destptr (MOVQstoreconst [0] destptr mem))) 17888 for { 17889 if v.AuxInt != 24 { 17890 break 17891 } 17892 destptr := v.Args[0] 17893 mem := v.Args[1] 17894 v.reset(OpAMD64MOVQstoreconst) 17895 v.AuxInt = makeValAndOff(0, 16) 17896 v.AddArg(destptr) 17897 v0 := b.NewValue0(v.Line, OpAMD64MOVQstoreconst, TypeMem) 17898 v0.AuxInt = makeValAndOff(0, 8) 17899 v0.AddArg(destptr) 17900 v1 := b.NewValue0(v.Line, OpAMD64MOVQstoreconst, TypeMem) 17901 v1.AuxInt = 0 17902 v1.AddArg(destptr) 17903 v1.AddArg(mem) 17904 v0.AddArg(v1) 17905 v.AddArg(v0) 17906 return true 17907 } 17908 // match: (Zero [32] destptr mem) 17909 // cond: 17910 // result: (MOVQstoreconst [makeValAndOff(0,24)] destptr (MOVQstoreconst [makeValAndOff(0,16)] destptr (MOVQstoreconst [makeValAndOff(0,8)] destptr (MOVQstoreconst [0] destptr mem)))) 17911 for { 17912 if v.AuxInt != 32 { 17913 break 17914 } 17915 destptr := v.Args[0] 17916 mem := v.Args[1] 17917 v.reset(OpAMD64MOVQstoreconst) 17918 v.AuxInt = makeValAndOff(0, 24) 17919 v.AddArg(destptr) 17920 v0 := b.NewValue0(v.Line, OpAMD64MOVQstoreconst, TypeMem) 17921 v0.AuxInt = makeValAndOff(0, 16) 17922 v0.AddArg(destptr) 17923 v1 := b.NewValue0(v.Line, OpAMD64MOVQstoreconst, TypeMem) 17924 v1.AuxInt = makeValAndOff(0, 8) 17925 v1.AddArg(destptr) 17926 v2 := b.NewValue0(v.Line, OpAMD64MOVQstoreconst, TypeMem) 17927 v2.AuxInt = 0 17928 v2.AddArg(destptr) 17929 v2.AddArg(mem) 17930 v1.AddArg(v2) 17931 v0.AddArg(v1) 17932 v.AddArg(v0) 17933 return true 17934 } 17935 // match: (Zero [size] destptr mem) 17936 // cond: size <= 1024 && size%8 == 0 && size%16 != 0 && !config.noDuffDevice 17937 // result: (Zero [size-8] (ADDQconst [8] destptr) (MOVQstore destptr (MOVQconst [0]) mem)) 17938 for { 17939 size := v.AuxInt 17940 destptr := v.Args[0] 17941 mem := v.Args[1] 17942 if !(size <= 1024 && size%8 == 0 && size%16 != 0 && !config.noDuffDevice) { 17943 break 17944 } 17945 v.reset(OpZero) 17946 v.AuxInt = size - 8 17947 v0 := b.NewValue0(v.Line, OpAMD64ADDQconst, config.fe.TypeUInt64()) 17948 v0.AuxInt = 8 17949 v0.AddArg(destptr) 17950 v.AddArg(v0) 17951 v1 := b.NewValue0(v.Line, OpAMD64MOVQstore, TypeMem) 17952 v1.AddArg(destptr) 17953 v2 := b.NewValue0(v.Line, OpAMD64MOVQconst, config.fe.TypeUInt64()) 17954 v2.AuxInt = 0 17955 v1.AddArg(v2) 17956 v1.AddArg(mem) 17957 v.AddArg(v1) 17958 return true 17959 } 17960 // match: (Zero [size] destptr mem) 17961 // cond: size <= 1024 && size%16 == 0 && !config.noDuffDevice 17962 // result: (DUFFZERO [duffStart(size)] (ADDQconst [duffAdj(size)] destptr) (MOVOconst [0]) mem) 17963 for { 17964 size := v.AuxInt 17965 destptr := v.Args[0] 17966 mem := v.Args[1] 17967 if !(size <= 1024 && size%16 == 0 && !config.noDuffDevice) { 17968 break 17969 } 17970 v.reset(OpAMD64DUFFZERO) 17971 v.AuxInt = duffStart(size) 17972 v0 := b.NewValue0(v.Line, OpAMD64ADDQconst, config.fe.TypeUInt64()) 17973 v0.AuxInt = duffAdj(size) 17974 v0.AddArg(destptr) 17975 v.AddArg(v0) 17976 v1 := b.NewValue0(v.Line, OpAMD64MOVOconst, TypeInt128) 17977 v1.AuxInt = 0 17978 v.AddArg(v1) 17979 v.AddArg(mem) 17980 return true 17981 } 17982 // match: (Zero [size] destptr mem) 17983 // cond: (size > 1024 || (config.noDuffDevice && size > 32)) && size%8 == 0 17984 // result: (REPSTOSQ destptr (MOVQconst [size/8]) (MOVQconst [0]) mem) 17985 for { 17986 size := v.AuxInt 17987 destptr := v.Args[0] 17988 mem := v.Args[1] 17989 if !((size > 1024 || (config.noDuffDevice && size > 32)) && size%8 == 0) { 17990 break 17991 } 17992 v.reset(OpAMD64REPSTOSQ) 17993 v.AddArg(destptr) 17994 v0 := b.NewValue0(v.Line, OpAMD64MOVQconst, config.fe.TypeUInt64()) 17995 v0.AuxInt = size / 8 17996 v.AddArg(v0) 17997 v1 := b.NewValue0(v.Line, OpAMD64MOVQconst, config.fe.TypeUInt64()) 17998 v1.AuxInt = 0 17999 v.AddArg(v1) 18000 v.AddArg(mem) 18001 return true 18002 } 18003 return false 18004 } 18005 func rewriteValueAMD64_OpZeroExt16to32(v *Value, config *Config) bool { 18006 b := v.Block 18007 _ = b 18008 // match: (ZeroExt16to32 x) 18009 // cond: 18010 // result: (MOVWQZX x) 18011 for { 18012 x := v.Args[0] 18013 v.reset(OpAMD64MOVWQZX) 18014 v.AddArg(x) 18015 return true 18016 } 18017 return false 18018 } 18019 func rewriteValueAMD64_OpZeroExt16to64(v *Value, config *Config) bool { 18020 b := v.Block 18021 _ = b 18022 // match: (ZeroExt16to64 x) 18023 // cond: 18024 // result: (MOVWQZX x) 18025 for { 18026 x := v.Args[0] 18027 v.reset(OpAMD64MOVWQZX) 18028 v.AddArg(x) 18029 return true 18030 } 18031 return false 18032 } 18033 func rewriteValueAMD64_OpZeroExt32to64(v *Value, config *Config) bool { 18034 b := v.Block 18035 _ = b 18036 // match: (ZeroExt32to64 x) 18037 // cond: 18038 // result: (MOVLQZX x) 18039 for { 18040 x := v.Args[0] 18041 v.reset(OpAMD64MOVLQZX) 18042 v.AddArg(x) 18043 return true 18044 } 18045 return false 18046 } 18047 func rewriteValueAMD64_OpZeroExt8to16(v *Value, config *Config) bool { 18048 b := v.Block 18049 _ = b 18050 // match: (ZeroExt8to16 x) 18051 // cond: 18052 // result: (MOVBQZX x) 18053 for { 18054 x := v.Args[0] 18055 v.reset(OpAMD64MOVBQZX) 18056 v.AddArg(x) 18057 return true 18058 } 18059 return false 18060 } 18061 func rewriteValueAMD64_OpZeroExt8to32(v *Value, config *Config) bool { 18062 b := v.Block 18063 _ = b 18064 // match: (ZeroExt8to32 x) 18065 // cond: 18066 // result: (MOVBQZX x) 18067 for { 18068 x := v.Args[0] 18069 v.reset(OpAMD64MOVBQZX) 18070 v.AddArg(x) 18071 return true 18072 } 18073 return false 18074 } 18075 func rewriteValueAMD64_OpZeroExt8to64(v *Value, config *Config) bool { 18076 b := v.Block 18077 _ = b 18078 // match: (ZeroExt8to64 x) 18079 // cond: 18080 // result: (MOVBQZX x) 18081 for { 18082 x := v.Args[0] 18083 v.reset(OpAMD64MOVBQZX) 18084 v.AddArg(x) 18085 return true 18086 } 18087 return false 18088 } 18089 func rewriteBlockAMD64(b *Block) bool { 18090 switch b.Kind { 18091 case BlockAMD64EQ: 18092 // match: (EQ (InvertFlags cmp) yes no) 18093 // cond: 18094 // result: (EQ cmp yes no) 18095 for { 18096 v := b.Control 18097 if v.Op != OpAMD64InvertFlags { 18098 break 18099 } 18100 cmp := v.Args[0] 18101 yes := b.Succs[0] 18102 no := b.Succs[1] 18103 b.Kind = BlockAMD64EQ 18104 b.SetControl(cmp) 18105 b.Succs[0] = yes 18106 b.Succs[1] = no 18107 return true 18108 } 18109 // match: (EQ (FlagEQ) yes no) 18110 // cond: 18111 // result: (First nil yes no) 18112 for { 18113 v := b.Control 18114 if v.Op != OpAMD64FlagEQ { 18115 break 18116 } 18117 yes := b.Succs[0] 18118 no := b.Succs[1] 18119 b.Kind = BlockFirst 18120 b.SetControl(nil) 18121 b.Succs[0] = yes 18122 b.Succs[1] = no 18123 return true 18124 } 18125 // match: (EQ (FlagLT_ULT) yes no) 18126 // cond: 18127 // result: (First nil no yes) 18128 for { 18129 v := b.Control 18130 if v.Op != OpAMD64FlagLT_ULT { 18131 break 18132 } 18133 yes := b.Succs[0] 18134 no := b.Succs[1] 18135 b.Kind = BlockFirst 18136 b.SetControl(nil) 18137 b.Succs[0] = no 18138 b.Succs[1] = yes 18139 b.Likely *= -1 18140 return true 18141 } 18142 // match: (EQ (FlagLT_UGT) yes no) 18143 // cond: 18144 // result: (First nil no yes) 18145 for { 18146 v := b.Control 18147 if v.Op != OpAMD64FlagLT_UGT { 18148 break 18149 } 18150 yes := b.Succs[0] 18151 no := b.Succs[1] 18152 b.Kind = BlockFirst 18153 b.SetControl(nil) 18154 b.Succs[0] = no 18155 b.Succs[1] = yes 18156 b.Likely *= -1 18157 return true 18158 } 18159 // match: (EQ (FlagGT_ULT) yes no) 18160 // cond: 18161 // result: (First nil no yes) 18162 for { 18163 v := b.Control 18164 if v.Op != OpAMD64FlagGT_ULT { 18165 break 18166 } 18167 yes := b.Succs[0] 18168 no := b.Succs[1] 18169 b.Kind = BlockFirst 18170 b.SetControl(nil) 18171 b.Succs[0] = no 18172 b.Succs[1] = yes 18173 b.Likely *= -1 18174 return true 18175 } 18176 // match: (EQ (FlagGT_UGT) yes no) 18177 // cond: 18178 // result: (First nil no yes) 18179 for { 18180 v := b.Control 18181 if v.Op != OpAMD64FlagGT_UGT { 18182 break 18183 } 18184 yes := b.Succs[0] 18185 no := b.Succs[1] 18186 b.Kind = BlockFirst 18187 b.SetControl(nil) 18188 b.Succs[0] = no 18189 b.Succs[1] = yes 18190 b.Likely *= -1 18191 return true 18192 } 18193 case BlockAMD64GE: 18194 // match: (GE (InvertFlags cmp) yes no) 18195 // cond: 18196 // result: (LE cmp yes no) 18197 for { 18198 v := b.Control 18199 if v.Op != OpAMD64InvertFlags { 18200 break 18201 } 18202 cmp := v.Args[0] 18203 yes := b.Succs[0] 18204 no := b.Succs[1] 18205 b.Kind = BlockAMD64LE 18206 b.SetControl(cmp) 18207 b.Succs[0] = yes 18208 b.Succs[1] = no 18209 return true 18210 } 18211 // match: (GE (FlagEQ) yes no) 18212 // cond: 18213 // result: (First nil yes no) 18214 for { 18215 v := b.Control 18216 if v.Op != OpAMD64FlagEQ { 18217 break 18218 } 18219 yes := b.Succs[0] 18220 no := b.Succs[1] 18221 b.Kind = BlockFirst 18222 b.SetControl(nil) 18223 b.Succs[0] = yes 18224 b.Succs[1] = no 18225 return true 18226 } 18227 // match: (GE (FlagLT_ULT) yes no) 18228 // cond: 18229 // result: (First nil no yes) 18230 for { 18231 v := b.Control 18232 if v.Op != OpAMD64FlagLT_ULT { 18233 break 18234 } 18235 yes := b.Succs[0] 18236 no := b.Succs[1] 18237 b.Kind = BlockFirst 18238 b.SetControl(nil) 18239 b.Succs[0] = no 18240 b.Succs[1] = yes 18241 b.Likely *= -1 18242 return true 18243 } 18244 // match: (GE (FlagLT_UGT) yes no) 18245 // cond: 18246 // result: (First nil no yes) 18247 for { 18248 v := b.Control 18249 if v.Op != OpAMD64FlagLT_UGT { 18250 break 18251 } 18252 yes := b.Succs[0] 18253 no := b.Succs[1] 18254 b.Kind = BlockFirst 18255 b.SetControl(nil) 18256 b.Succs[0] = no 18257 b.Succs[1] = yes 18258 b.Likely *= -1 18259 return true 18260 } 18261 // match: (GE (FlagGT_ULT) yes no) 18262 // cond: 18263 // result: (First nil yes no) 18264 for { 18265 v := b.Control 18266 if v.Op != OpAMD64FlagGT_ULT { 18267 break 18268 } 18269 yes := b.Succs[0] 18270 no := b.Succs[1] 18271 b.Kind = BlockFirst 18272 b.SetControl(nil) 18273 b.Succs[0] = yes 18274 b.Succs[1] = no 18275 return true 18276 } 18277 // match: (GE (FlagGT_UGT) yes no) 18278 // cond: 18279 // result: (First nil yes no) 18280 for { 18281 v := b.Control 18282 if v.Op != OpAMD64FlagGT_UGT { 18283 break 18284 } 18285 yes := b.Succs[0] 18286 no := b.Succs[1] 18287 b.Kind = BlockFirst 18288 b.SetControl(nil) 18289 b.Succs[0] = yes 18290 b.Succs[1] = no 18291 return true 18292 } 18293 case BlockAMD64GT: 18294 // match: (GT (InvertFlags cmp) yes no) 18295 // cond: 18296 // result: (LT cmp yes no) 18297 for { 18298 v := b.Control 18299 if v.Op != OpAMD64InvertFlags { 18300 break 18301 } 18302 cmp := v.Args[0] 18303 yes := b.Succs[0] 18304 no := b.Succs[1] 18305 b.Kind = BlockAMD64LT 18306 b.SetControl(cmp) 18307 b.Succs[0] = yes 18308 b.Succs[1] = no 18309 return true 18310 } 18311 // match: (GT (FlagEQ) yes no) 18312 // cond: 18313 // result: (First nil no yes) 18314 for { 18315 v := b.Control 18316 if v.Op != OpAMD64FlagEQ { 18317 break 18318 } 18319 yes := b.Succs[0] 18320 no := b.Succs[1] 18321 b.Kind = BlockFirst 18322 b.SetControl(nil) 18323 b.Succs[0] = no 18324 b.Succs[1] = yes 18325 b.Likely *= -1 18326 return true 18327 } 18328 // match: (GT (FlagLT_ULT) yes no) 18329 // cond: 18330 // result: (First nil no yes) 18331 for { 18332 v := b.Control 18333 if v.Op != OpAMD64FlagLT_ULT { 18334 break 18335 } 18336 yes := b.Succs[0] 18337 no := b.Succs[1] 18338 b.Kind = BlockFirst 18339 b.SetControl(nil) 18340 b.Succs[0] = no 18341 b.Succs[1] = yes 18342 b.Likely *= -1 18343 return true 18344 } 18345 // match: (GT (FlagLT_UGT) yes no) 18346 // cond: 18347 // result: (First nil no yes) 18348 for { 18349 v := b.Control 18350 if v.Op != OpAMD64FlagLT_UGT { 18351 break 18352 } 18353 yes := b.Succs[0] 18354 no := b.Succs[1] 18355 b.Kind = BlockFirst 18356 b.SetControl(nil) 18357 b.Succs[0] = no 18358 b.Succs[1] = yes 18359 b.Likely *= -1 18360 return true 18361 } 18362 // match: (GT (FlagGT_ULT) yes no) 18363 // cond: 18364 // result: (First nil yes no) 18365 for { 18366 v := b.Control 18367 if v.Op != OpAMD64FlagGT_ULT { 18368 break 18369 } 18370 yes := b.Succs[0] 18371 no := b.Succs[1] 18372 b.Kind = BlockFirst 18373 b.SetControl(nil) 18374 b.Succs[0] = yes 18375 b.Succs[1] = no 18376 return true 18377 } 18378 // match: (GT (FlagGT_UGT) yes no) 18379 // cond: 18380 // result: (First nil yes no) 18381 for { 18382 v := b.Control 18383 if v.Op != OpAMD64FlagGT_UGT { 18384 break 18385 } 18386 yes := b.Succs[0] 18387 no := b.Succs[1] 18388 b.Kind = BlockFirst 18389 b.SetControl(nil) 18390 b.Succs[0] = yes 18391 b.Succs[1] = no 18392 return true 18393 } 18394 case BlockIf: 18395 // match: (If (SETL cmp) yes no) 18396 // cond: 18397 // result: (LT cmp yes no) 18398 for { 18399 v := b.Control 18400 if v.Op != OpAMD64SETL { 18401 break 18402 } 18403 cmp := v.Args[0] 18404 yes := b.Succs[0] 18405 no := b.Succs[1] 18406 b.Kind = BlockAMD64LT 18407 b.SetControl(cmp) 18408 b.Succs[0] = yes 18409 b.Succs[1] = no 18410 return true 18411 } 18412 // match: (If (SETLE cmp) yes no) 18413 // cond: 18414 // result: (LE cmp yes no) 18415 for { 18416 v := b.Control 18417 if v.Op != OpAMD64SETLE { 18418 break 18419 } 18420 cmp := v.Args[0] 18421 yes := b.Succs[0] 18422 no := b.Succs[1] 18423 b.Kind = BlockAMD64LE 18424 b.SetControl(cmp) 18425 b.Succs[0] = yes 18426 b.Succs[1] = no 18427 return true 18428 } 18429 // match: (If (SETG cmp) yes no) 18430 // cond: 18431 // result: (GT cmp yes no) 18432 for { 18433 v := b.Control 18434 if v.Op != OpAMD64SETG { 18435 break 18436 } 18437 cmp := v.Args[0] 18438 yes := b.Succs[0] 18439 no := b.Succs[1] 18440 b.Kind = BlockAMD64GT 18441 b.SetControl(cmp) 18442 b.Succs[0] = yes 18443 b.Succs[1] = no 18444 return true 18445 } 18446 // match: (If (SETGE cmp) yes no) 18447 // cond: 18448 // result: (GE cmp yes no) 18449 for { 18450 v := b.Control 18451 if v.Op != OpAMD64SETGE { 18452 break 18453 } 18454 cmp := v.Args[0] 18455 yes := b.Succs[0] 18456 no := b.Succs[1] 18457 b.Kind = BlockAMD64GE 18458 b.SetControl(cmp) 18459 b.Succs[0] = yes 18460 b.Succs[1] = no 18461 return true 18462 } 18463 // match: (If (SETEQ cmp) yes no) 18464 // cond: 18465 // result: (EQ cmp yes no) 18466 for { 18467 v := b.Control 18468 if v.Op != OpAMD64SETEQ { 18469 break 18470 } 18471 cmp := v.Args[0] 18472 yes := b.Succs[0] 18473 no := b.Succs[1] 18474 b.Kind = BlockAMD64EQ 18475 b.SetControl(cmp) 18476 b.Succs[0] = yes 18477 b.Succs[1] = no 18478 return true 18479 } 18480 // match: (If (SETNE cmp) yes no) 18481 // cond: 18482 // result: (NE cmp yes no) 18483 for { 18484 v := b.Control 18485 if v.Op != OpAMD64SETNE { 18486 break 18487 } 18488 cmp := v.Args[0] 18489 yes := b.Succs[0] 18490 no := b.Succs[1] 18491 b.Kind = BlockAMD64NE 18492 b.SetControl(cmp) 18493 b.Succs[0] = yes 18494 b.Succs[1] = no 18495 return true 18496 } 18497 // match: (If (SETB cmp) yes no) 18498 // cond: 18499 // result: (ULT cmp yes no) 18500 for { 18501 v := b.Control 18502 if v.Op != OpAMD64SETB { 18503 break 18504 } 18505 cmp := v.Args[0] 18506 yes := b.Succs[0] 18507 no := b.Succs[1] 18508 b.Kind = BlockAMD64ULT 18509 b.SetControl(cmp) 18510 b.Succs[0] = yes 18511 b.Succs[1] = no 18512 return true 18513 } 18514 // match: (If (SETBE cmp) yes no) 18515 // cond: 18516 // result: (ULE cmp yes no) 18517 for { 18518 v := b.Control 18519 if v.Op != OpAMD64SETBE { 18520 break 18521 } 18522 cmp := v.Args[0] 18523 yes := b.Succs[0] 18524 no := b.Succs[1] 18525 b.Kind = BlockAMD64ULE 18526 b.SetControl(cmp) 18527 b.Succs[0] = yes 18528 b.Succs[1] = no 18529 return true 18530 } 18531 // match: (If (SETA cmp) yes no) 18532 // cond: 18533 // result: (UGT cmp yes no) 18534 for { 18535 v := b.Control 18536 if v.Op != OpAMD64SETA { 18537 break 18538 } 18539 cmp := v.Args[0] 18540 yes := b.Succs[0] 18541 no := b.Succs[1] 18542 b.Kind = BlockAMD64UGT 18543 b.SetControl(cmp) 18544 b.Succs[0] = yes 18545 b.Succs[1] = no 18546 return true 18547 } 18548 // match: (If (SETAE cmp) yes no) 18549 // cond: 18550 // result: (UGE cmp yes no) 18551 for { 18552 v := b.Control 18553 if v.Op != OpAMD64SETAE { 18554 break 18555 } 18556 cmp := v.Args[0] 18557 yes := b.Succs[0] 18558 no := b.Succs[1] 18559 b.Kind = BlockAMD64UGE 18560 b.SetControl(cmp) 18561 b.Succs[0] = yes 18562 b.Succs[1] = no 18563 return true 18564 } 18565 // match: (If (SETGF cmp) yes no) 18566 // cond: 18567 // result: (UGT cmp yes no) 18568 for { 18569 v := b.Control 18570 if v.Op != OpAMD64SETGF { 18571 break 18572 } 18573 cmp := v.Args[0] 18574 yes := b.Succs[0] 18575 no := b.Succs[1] 18576 b.Kind = BlockAMD64UGT 18577 b.SetControl(cmp) 18578 b.Succs[0] = yes 18579 b.Succs[1] = no 18580 return true 18581 } 18582 // match: (If (SETGEF cmp) yes no) 18583 // cond: 18584 // result: (UGE cmp yes no) 18585 for { 18586 v := b.Control 18587 if v.Op != OpAMD64SETGEF { 18588 break 18589 } 18590 cmp := v.Args[0] 18591 yes := b.Succs[0] 18592 no := b.Succs[1] 18593 b.Kind = BlockAMD64UGE 18594 b.SetControl(cmp) 18595 b.Succs[0] = yes 18596 b.Succs[1] = no 18597 return true 18598 } 18599 // match: (If (SETEQF cmp) yes no) 18600 // cond: 18601 // result: (EQF cmp yes no) 18602 for { 18603 v := b.Control 18604 if v.Op != OpAMD64SETEQF { 18605 break 18606 } 18607 cmp := v.Args[0] 18608 yes := b.Succs[0] 18609 no := b.Succs[1] 18610 b.Kind = BlockAMD64EQF 18611 b.SetControl(cmp) 18612 b.Succs[0] = yes 18613 b.Succs[1] = no 18614 return true 18615 } 18616 // match: (If (SETNEF cmp) yes no) 18617 // cond: 18618 // result: (NEF cmp yes no) 18619 for { 18620 v := b.Control 18621 if v.Op != OpAMD64SETNEF { 18622 break 18623 } 18624 cmp := v.Args[0] 18625 yes := b.Succs[0] 18626 no := b.Succs[1] 18627 b.Kind = BlockAMD64NEF 18628 b.SetControl(cmp) 18629 b.Succs[0] = yes 18630 b.Succs[1] = no 18631 return true 18632 } 18633 // match: (If cond yes no) 18634 // cond: 18635 // result: (NE (TESTB cond cond) yes no) 18636 for { 18637 v := b.Control 18638 cond := b.Control 18639 yes := b.Succs[0] 18640 no := b.Succs[1] 18641 b.Kind = BlockAMD64NE 18642 v0 := b.NewValue0(v.Line, OpAMD64TESTB, TypeFlags) 18643 v0.AddArg(cond) 18644 v0.AddArg(cond) 18645 b.SetControl(v0) 18646 b.Succs[0] = yes 18647 b.Succs[1] = no 18648 return true 18649 } 18650 case BlockAMD64LE: 18651 // match: (LE (InvertFlags cmp) yes no) 18652 // cond: 18653 // result: (GE cmp yes no) 18654 for { 18655 v := b.Control 18656 if v.Op != OpAMD64InvertFlags { 18657 break 18658 } 18659 cmp := v.Args[0] 18660 yes := b.Succs[0] 18661 no := b.Succs[1] 18662 b.Kind = BlockAMD64GE 18663 b.SetControl(cmp) 18664 b.Succs[0] = yes 18665 b.Succs[1] = no 18666 return true 18667 } 18668 // match: (LE (FlagEQ) yes no) 18669 // cond: 18670 // result: (First nil yes no) 18671 for { 18672 v := b.Control 18673 if v.Op != OpAMD64FlagEQ { 18674 break 18675 } 18676 yes := b.Succs[0] 18677 no := b.Succs[1] 18678 b.Kind = BlockFirst 18679 b.SetControl(nil) 18680 b.Succs[0] = yes 18681 b.Succs[1] = no 18682 return true 18683 } 18684 // match: (LE (FlagLT_ULT) yes no) 18685 // cond: 18686 // result: (First nil yes no) 18687 for { 18688 v := b.Control 18689 if v.Op != OpAMD64FlagLT_ULT { 18690 break 18691 } 18692 yes := b.Succs[0] 18693 no := b.Succs[1] 18694 b.Kind = BlockFirst 18695 b.SetControl(nil) 18696 b.Succs[0] = yes 18697 b.Succs[1] = no 18698 return true 18699 } 18700 // match: (LE (FlagLT_UGT) yes no) 18701 // cond: 18702 // result: (First nil yes no) 18703 for { 18704 v := b.Control 18705 if v.Op != OpAMD64FlagLT_UGT { 18706 break 18707 } 18708 yes := b.Succs[0] 18709 no := b.Succs[1] 18710 b.Kind = BlockFirst 18711 b.SetControl(nil) 18712 b.Succs[0] = yes 18713 b.Succs[1] = no 18714 return true 18715 } 18716 // match: (LE (FlagGT_ULT) yes no) 18717 // cond: 18718 // result: (First nil no yes) 18719 for { 18720 v := b.Control 18721 if v.Op != OpAMD64FlagGT_ULT { 18722 break 18723 } 18724 yes := b.Succs[0] 18725 no := b.Succs[1] 18726 b.Kind = BlockFirst 18727 b.SetControl(nil) 18728 b.Succs[0] = no 18729 b.Succs[1] = yes 18730 b.Likely *= -1 18731 return true 18732 } 18733 // match: (LE (FlagGT_UGT) yes no) 18734 // cond: 18735 // result: (First nil no yes) 18736 for { 18737 v := b.Control 18738 if v.Op != OpAMD64FlagGT_UGT { 18739 break 18740 } 18741 yes := b.Succs[0] 18742 no := b.Succs[1] 18743 b.Kind = BlockFirst 18744 b.SetControl(nil) 18745 b.Succs[0] = no 18746 b.Succs[1] = yes 18747 b.Likely *= -1 18748 return true 18749 } 18750 case BlockAMD64LT: 18751 // match: (LT (InvertFlags cmp) yes no) 18752 // cond: 18753 // result: (GT cmp yes no) 18754 for { 18755 v := b.Control 18756 if v.Op != OpAMD64InvertFlags { 18757 break 18758 } 18759 cmp := v.Args[0] 18760 yes := b.Succs[0] 18761 no := b.Succs[1] 18762 b.Kind = BlockAMD64GT 18763 b.SetControl(cmp) 18764 b.Succs[0] = yes 18765 b.Succs[1] = no 18766 return true 18767 } 18768 // match: (LT (FlagEQ) yes no) 18769 // cond: 18770 // result: (First nil no yes) 18771 for { 18772 v := b.Control 18773 if v.Op != OpAMD64FlagEQ { 18774 break 18775 } 18776 yes := b.Succs[0] 18777 no := b.Succs[1] 18778 b.Kind = BlockFirst 18779 b.SetControl(nil) 18780 b.Succs[0] = no 18781 b.Succs[1] = yes 18782 b.Likely *= -1 18783 return true 18784 } 18785 // match: (LT (FlagLT_ULT) yes no) 18786 // cond: 18787 // result: (First nil yes no) 18788 for { 18789 v := b.Control 18790 if v.Op != OpAMD64FlagLT_ULT { 18791 break 18792 } 18793 yes := b.Succs[0] 18794 no := b.Succs[1] 18795 b.Kind = BlockFirst 18796 b.SetControl(nil) 18797 b.Succs[0] = yes 18798 b.Succs[1] = no 18799 return true 18800 } 18801 // match: (LT (FlagLT_UGT) yes no) 18802 // cond: 18803 // result: (First nil yes no) 18804 for { 18805 v := b.Control 18806 if v.Op != OpAMD64FlagLT_UGT { 18807 break 18808 } 18809 yes := b.Succs[0] 18810 no := b.Succs[1] 18811 b.Kind = BlockFirst 18812 b.SetControl(nil) 18813 b.Succs[0] = yes 18814 b.Succs[1] = no 18815 return true 18816 } 18817 // match: (LT (FlagGT_ULT) yes no) 18818 // cond: 18819 // result: (First nil no yes) 18820 for { 18821 v := b.Control 18822 if v.Op != OpAMD64FlagGT_ULT { 18823 break 18824 } 18825 yes := b.Succs[0] 18826 no := b.Succs[1] 18827 b.Kind = BlockFirst 18828 b.SetControl(nil) 18829 b.Succs[0] = no 18830 b.Succs[1] = yes 18831 b.Likely *= -1 18832 return true 18833 } 18834 // match: (LT (FlagGT_UGT) yes no) 18835 // cond: 18836 // result: (First nil no yes) 18837 for { 18838 v := b.Control 18839 if v.Op != OpAMD64FlagGT_UGT { 18840 break 18841 } 18842 yes := b.Succs[0] 18843 no := b.Succs[1] 18844 b.Kind = BlockFirst 18845 b.SetControl(nil) 18846 b.Succs[0] = no 18847 b.Succs[1] = yes 18848 b.Likely *= -1 18849 return true 18850 } 18851 case BlockAMD64NE: 18852 // match: (NE (TESTB (SETL cmp)) yes no) 18853 // cond: 18854 // result: (LT cmp yes no) 18855 for { 18856 v := b.Control 18857 if v.Op != OpAMD64TESTB { 18858 break 18859 } 18860 v_0 := v.Args[0] 18861 if v_0.Op != OpAMD64SETL { 18862 break 18863 } 18864 cmp := v_0.Args[0] 18865 yes := b.Succs[0] 18866 no := b.Succs[1] 18867 b.Kind = BlockAMD64LT 18868 b.SetControl(cmp) 18869 b.Succs[0] = yes 18870 b.Succs[1] = no 18871 return true 18872 } 18873 // match: (NE (TESTB (SETLE cmp)) yes no) 18874 // cond: 18875 // result: (LE cmp yes no) 18876 for { 18877 v := b.Control 18878 if v.Op != OpAMD64TESTB { 18879 break 18880 } 18881 v_0 := v.Args[0] 18882 if v_0.Op != OpAMD64SETLE { 18883 break 18884 } 18885 cmp := v_0.Args[0] 18886 yes := b.Succs[0] 18887 no := b.Succs[1] 18888 b.Kind = BlockAMD64LE 18889 b.SetControl(cmp) 18890 b.Succs[0] = yes 18891 b.Succs[1] = no 18892 return true 18893 } 18894 // match: (NE (TESTB (SETG cmp)) yes no) 18895 // cond: 18896 // result: (GT cmp yes no) 18897 for { 18898 v := b.Control 18899 if v.Op != OpAMD64TESTB { 18900 break 18901 } 18902 v_0 := v.Args[0] 18903 if v_0.Op != OpAMD64SETG { 18904 break 18905 } 18906 cmp := v_0.Args[0] 18907 yes := b.Succs[0] 18908 no := b.Succs[1] 18909 b.Kind = BlockAMD64GT 18910 b.SetControl(cmp) 18911 b.Succs[0] = yes 18912 b.Succs[1] = no 18913 return true 18914 } 18915 // match: (NE (TESTB (SETGE cmp)) yes no) 18916 // cond: 18917 // result: (GE cmp yes no) 18918 for { 18919 v := b.Control 18920 if v.Op != OpAMD64TESTB { 18921 break 18922 } 18923 v_0 := v.Args[0] 18924 if v_0.Op != OpAMD64SETGE { 18925 break 18926 } 18927 cmp := v_0.Args[0] 18928 yes := b.Succs[0] 18929 no := b.Succs[1] 18930 b.Kind = BlockAMD64GE 18931 b.SetControl(cmp) 18932 b.Succs[0] = yes 18933 b.Succs[1] = no 18934 return true 18935 } 18936 // match: (NE (TESTB (SETEQ cmp)) yes no) 18937 // cond: 18938 // result: (EQ cmp yes no) 18939 for { 18940 v := b.Control 18941 if v.Op != OpAMD64TESTB { 18942 break 18943 } 18944 v_0 := v.Args[0] 18945 if v_0.Op != OpAMD64SETEQ { 18946 break 18947 } 18948 cmp := v_0.Args[0] 18949 yes := b.Succs[0] 18950 no := b.Succs[1] 18951 b.Kind = BlockAMD64EQ 18952 b.SetControl(cmp) 18953 b.Succs[0] = yes 18954 b.Succs[1] = no 18955 return true 18956 } 18957 // match: (NE (TESTB (SETNE cmp)) yes no) 18958 // cond: 18959 // result: (NE cmp yes no) 18960 for { 18961 v := b.Control 18962 if v.Op != OpAMD64TESTB { 18963 break 18964 } 18965 v_0 := v.Args[0] 18966 if v_0.Op != OpAMD64SETNE { 18967 break 18968 } 18969 cmp := v_0.Args[0] 18970 yes := b.Succs[0] 18971 no := b.Succs[1] 18972 b.Kind = BlockAMD64NE 18973 b.SetControl(cmp) 18974 b.Succs[0] = yes 18975 b.Succs[1] = no 18976 return true 18977 } 18978 // match: (NE (TESTB (SETB cmp)) yes no) 18979 // cond: 18980 // result: (ULT cmp yes no) 18981 for { 18982 v := b.Control 18983 if v.Op != OpAMD64TESTB { 18984 break 18985 } 18986 v_0 := v.Args[0] 18987 if v_0.Op != OpAMD64SETB { 18988 break 18989 } 18990 cmp := v_0.Args[0] 18991 yes := b.Succs[0] 18992 no := b.Succs[1] 18993 b.Kind = BlockAMD64ULT 18994 b.SetControl(cmp) 18995 b.Succs[0] = yes 18996 b.Succs[1] = no 18997 return true 18998 } 18999 // match: (NE (TESTB (SETBE cmp)) yes no) 19000 // cond: 19001 // result: (ULE cmp yes no) 19002 for { 19003 v := b.Control 19004 if v.Op != OpAMD64TESTB { 19005 break 19006 } 19007 v_0 := v.Args[0] 19008 if v_0.Op != OpAMD64SETBE { 19009 break 19010 } 19011 cmp := v_0.Args[0] 19012 yes := b.Succs[0] 19013 no := b.Succs[1] 19014 b.Kind = BlockAMD64ULE 19015 b.SetControl(cmp) 19016 b.Succs[0] = yes 19017 b.Succs[1] = no 19018 return true 19019 } 19020 // match: (NE (TESTB (SETA cmp)) yes no) 19021 // cond: 19022 // result: (UGT cmp yes no) 19023 for { 19024 v := b.Control 19025 if v.Op != OpAMD64TESTB { 19026 break 19027 } 19028 v_0 := v.Args[0] 19029 if v_0.Op != OpAMD64SETA { 19030 break 19031 } 19032 cmp := v_0.Args[0] 19033 yes := b.Succs[0] 19034 no := b.Succs[1] 19035 b.Kind = BlockAMD64UGT 19036 b.SetControl(cmp) 19037 b.Succs[0] = yes 19038 b.Succs[1] = no 19039 return true 19040 } 19041 // match: (NE (TESTB (SETAE cmp)) yes no) 19042 // cond: 19043 // result: (UGE cmp yes no) 19044 for { 19045 v := b.Control 19046 if v.Op != OpAMD64TESTB { 19047 break 19048 } 19049 v_0 := v.Args[0] 19050 if v_0.Op != OpAMD64SETAE { 19051 break 19052 } 19053 cmp := v_0.Args[0] 19054 yes := b.Succs[0] 19055 no := b.Succs[1] 19056 b.Kind = BlockAMD64UGE 19057 b.SetControl(cmp) 19058 b.Succs[0] = yes 19059 b.Succs[1] = no 19060 return true 19061 } 19062 // match: (NE (TESTB (SETGF cmp)) yes no) 19063 // cond: 19064 // result: (UGT cmp yes no) 19065 for { 19066 v := b.Control 19067 if v.Op != OpAMD64TESTB { 19068 break 19069 } 19070 v_0 := v.Args[0] 19071 if v_0.Op != OpAMD64SETGF { 19072 break 19073 } 19074 cmp := v_0.Args[0] 19075 yes := b.Succs[0] 19076 no := b.Succs[1] 19077 b.Kind = BlockAMD64UGT 19078 b.SetControl(cmp) 19079 b.Succs[0] = yes 19080 b.Succs[1] = no 19081 return true 19082 } 19083 // match: (NE (TESTB (SETGEF cmp)) yes no) 19084 // cond: 19085 // result: (UGE cmp yes no) 19086 for { 19087 v := b.Control 19088 if v.Op != OpAMD64TESTB { 19089 break 19090 } 19091 v_0 := v.Args[0] 19092 if v_0.Op != OpAMD64SETGEF { 19093 break 19094 } 19095 cmp := v_0.Args[0] 19096 yes := b.Succs[0] 19097 no := b.Succs[1] 19098 b.Kind = BlockAMD64UGE 19099 b.SetControl(cmp) 19100 b.Succs[0] = yes 19101 b.Succs[1] = no 19102 return true 19103 } 19104 // match: (NE (TESTB (SETEQF cmp)) yes no) 19105 // cond: 19106 // result: (EQF cmp yes no) 19107 for { 19108 v := b.Control 19109 if v.Op != OpAMD64TESTB { 19110 break 19111 } 19112 v_0 := v.Args[0] 19113 if v_0.Op != OpAMD64SETEQF { 19114 break 19115 } 19116 cmp := v_0.Args[0] 19117 yes := b.Succs[0] 19118 no := b.Succs[1] 19119 b.Kind = BlockAMD64EQF 19120 b.SetControl(cmp) 19121 b.Succs[0] = yes 19122 b.Succs[1] = no 19123 return true 19124 } 19125 // match: (NE (TESTB (SETNEF cmp)) yes no) 19126 // cond: 19127 // result: (NEF cmp yes no) 19128 for { 19129 v := b.Control 19130 if v.Op != OpAMD64TESTB { 19131 break 19132 } 19133 v_0 := v.Args[0] 19134 if v_0.Op != OpAMD64SETNEF { 19135 break 19136 } 19137 cmp := v_0.Args[0] 19138 yes := b.Succs[0] 19139 no := b.Succs[1] 19140 b.Kind = BlockAMD64NEF 19141 b.SetControl(cmp) 19142 b.Succs[0] = yes 19143 b.Succs[1] = no 19144 return true 19145 } 19146 // match: (NE (InvertFlags cmp) yes no) 19147 // cond: 19148 // result: (NE cmp yes no) 19149 for { 19150 v := b.Control 19151 if v.Op != OpAMD64InvertFlags { 19152 break 19153 } 19154 cmp := v.Args[0] 19155 yes := b.Succs[0] 19156 no := b.Succs[1] 19157 b.Kind = BlockAMD64NE 19158 b.SetControl(cmp) 19159 b.Succs[0] = yes 19160 b.Succs[1] = no 19161 return true 19162 } 19163 // match: (NE (FlagEQ) yes no) 19164 // cond: 19165 // result: (First nil no yes) 19166 for { 19167 v := b.Control 19168 if v.Op != OpAMD64FlagEQ { 19169 break 19170 } 19171 yes := b.Succs[0] 19172 no := b.Succs[1] 19173 b.Kind = BlockFirst 19174 b.SetControl(nil) 19175 b.Succs[0] = no 19176 b.Succs[1] = yes 19177 b.Likely *= -1 19178 return true 19179 } 19180 // match: (NE (FlagLT_ULT) yes no) 19181 // cond: 19182 // result: (First nil yes no) 19183 for { 19184 v := b.Control 19185 if v.Op != OpAMD64FlagLT_ULT { 19186 break 19187 } 19188 yes := b.Succs[0] 19189 no := b.Succs[1] 19190 b.Kind = BlockFirst 19191 b.SetControl(nil) 19192 b.Succs[0] = yes 19193 b.Succs[1] = no 19194 return true 19195 } 19196 // match: (NE (FlagLT_UGT) yes no) 19197 // cond: 19198 // result: (First nil yes no) 19199 for { 19200 v := b.Control 19201 if v.Op != OpAMD64FlagLT_UGT { 19202 break 19203 } 19204 yes := b.Succs[0] 19205 no := b.Succs[1] 19206 b.Kind = BlockFirst 19207 b.SetControl(nil) 19208 b.Succs[0] = yes 19209 b.Succs[1] = no 19210 return true 19211 } 19212 // match: (NE (FlagGT_ULT) yes no) 19213 // cond: 19214 // result: (First nil yes no) 19215 for { 19216 v := b.Control 19217 if v.Op != OpAMD64FlagGT_ULT { 19218 break 19219 } 19220 yes := b.Succs[0] 19221 no := b.Succs[1] 19222 b.Kind = BlockFirst 19223 b.SetControl(nil) 19224 b.Succs[0] = yes 19225 b.Succs[1] = no 19226 return true 19227 } 19228 // match: (NE (FlagGT_UGT) yes no) 19229 // cond: 19230 // result: (First nil yes no) 19231 for { 19232 v := b.Control 19233 if v.Op != OpAMD64FlagGT_UGT { 19234 break 19235 } 19236 yes := b.Succs[0] 19237 no := b.Succs[1] 19238 b.Kind = BlockFirst 19239 b.SetControl(nil) 19240 b.Succs[0] = yes 19241 b.Succs[1] = no 19242 return true 19243 } 19244 case BlockAMD64UGE: 19245 // match: (UGE (InvertFlags cmp) yes no) 19246 // cond: 19247 // result: (ULE cmp yes no) 19248 for { 19249 v := b.Control 19250 if v.Op != OpAMD64InvertFlags { 19251 break 19252 } 19253 cmp := v.Args[0] 19254 yes := b.Succs[0] 19255 no := b.Succs[1] 19256 b.Kind = BlockAMD64ULE 19257 b.SetControl(cmp) 19258 b.Succs[0] = yes 19259 b.Succs[1] = no 19260 return true 19261 } 19262 // match: (UGE (FlagEQ) yes no) 19263 // cond: 19264 // result: (First nil yes no) 19265 for { 19266 v := b.Control 19267 if v.Op != OpAMD64FlagEQ { 19268 break 19269 } 19270 yes := b.Succs[0] 19271 no := b.Succs[1] 19272 b.Kind = BlockFirst 19273 b.SetControl(nil) 19274 b.Succs[0] = yes 19275 b.Succs[1] = no 19276 return true 19277 } 19278 // match: (UGE (FlagLT_ULT) yes no) 19279 // cond: 19280 // result: (First nil no yes) 19281 for { 19282 v := b.Control 19283 if v.Op != OpAMD64FlagLT_ULT { 19284 break 19285 } 19286 yes := b.Succs[0] 19287 no := b.Succs[1] 19288 b.Kind = BlockFirst 19289 b.SetControl(nil) 19290 b.Succs[0] = no 19291 b.Succs[1] = yes 19292 b.Likely *= -1 19293 return true 19294 } 19295 // match: (UGE (FlagLT_UGT) yes no) 19296 // cond: 19297 // result: (First nil yes no) 19298 for { 19299 v := b.Control 19300 if v.Op != OpAMD64FlagLT_UGT { 19301 break 19302 } 19303 yes := b.Succs[0] 19304 no := b.Succs[1] 19305 b.Kind = BlockFirst 19306 b.SetControl(nil) 19307 b.Succs[0] = yes 19308 b.Succs[1] = no 19309 return true 19310 } 19311 // match: (UGE (FlagGT_ULT) yes no) 19312 // cond: 19313 // result: (First nil no yes) 19314 for { 19315 v := b.Control 19316 if v.Op != OpAMD64FlagGT_ULT { 19317 break 19318 } 19319 yes := b.Succs[0] 19320 no := b.Succs[1] 19321 b.Kind = BlockFirst 19322 b.SetControl(nil) 19323 b.Succs[0] = no 19324 b.Succs[1] = yes 19325 b.Likely *= -1 19326 return true 19327 } 19328 // match: (UGE (FlagGT_UGT) yes no) 19329 // cond: 19330 // result: (First nil yes no) 19331 for { 19332 v := b.Control 19333 if v.Op != OpAMD64FlagGT_UGT { 19334 break 19335 } 19336 yes := b.Succs[0] 19337 no := b.Succs[1] 19338 b.Kind = BlockFirst 19339 b.SetControl(nil) 19340 b.Succs[0] = yes 19341 b.Succs[1] = no 19342 return true 19343 } 19344 case BlockAMD64UGT: 19345 // match: (UGT (InvertFlags cmp) yes no) 19346 // cond: 19347 // result: (ULT cmp yes no) 19348 for { 19349 v := b.Control 19350 if v.Op != OpAMD64InvertFlags { 19351 break 19352 } 19353 cmp := v.Args[0] 19354 yes := b.Succs[0] 19355 no := b.Succs[1] 19356 b.Kind = BlockAMD64ULT 19357 b.SetControl(cmp) 19358 b.Succs[0] = yes 19359 b.Succs[1] = no 19360 return true 19361 } 19362 // match: (UGT (FlagEQ) yes no) 19363 // cond: 19364 // result: (First nil no yes) 19365 for { 19366 v := b.Control 19367 if v.Op != OpAMD64FlagEQ { 19368 break 19369 } 19370 yes := b.Succs[0] 19371 no := b.Succs[1] 19372 b.Kind = BlockFirst 19373 b.SetControl(nil) 19374 b.Succs[0] = no 19375 b.Succs[1] = yes 19376 b.Likely *= -1 19377 return true 19378 } 19379 // match: (UGT (FlagLT_ULT) yes no) 19380 // cond: 19381 // result: (First nil no yes) 19382 for { 19383 v := b.Control 19384 if v.Op != OpAMD64FlagLT_ULT { 19385 break 19386 } 19387 yes := b.Succs[0] 19388 no := b.Succs[1] 19389 b.Kind = BlockFirst 19390 b.SetControl(nil) 19391 b.Succs[0] = no 19392 b.Succs[1] = yes 19393 b.Likely *= -1 19394 return true 19395 } 19396 // match: (UGT (FlagLT_UGT) yes no) 19397 // cond: 19398 // result: (First nil yes no) 19399 for { 19400 v := b.Control 19401 if v.Op != OpAMD64FlagLT_UGT { 19402 break 19403 } 19404 yes := b.Succs[0] 19405 no := b.Succs[1] 19406 b.Kind = BlockFirst 19407 b.SetControl(nil) 19408 b.Succs[0] = yes 19409 b.Succs[1] = no 19410 return true 19411 } 19412 // match: (UGT (FlagGT_ULT) yes no) 19413 // cond: 19414 // result: (First nil no yes) 19415 for { 19416 v := b.Control 19417 if v.Op != OpAMD64FlagGT_ULT { 19418 break 19419 } 19420 yes := b.Succs[0] 19421 no := b.Succs[1] 19422 b.Kind = BlockFirst 19423 b.SetControl(nil) 19424 b.Succs[0] = no 19425 b.Succs[1] = yes 19426 b.Likely *= -1 19427 return true 19428 } 19429 // match: (UGT (FlagGT_UGT) yes no) 19430 // cond: 19431 // result: (First nil yes no) 19432 for { 19433 v := b.Control 19434 if v.Op != OpAMD64FlagGT_UGT { 19435 break 19436 } 19437 yes := b.Succs[0] 19438 no := b.Succs[1] 19439 b.Kind = BlockFirst 19440 b.SetControl(nil) 19441 b.Succs[0] = yes 19442 b.Succs[1] = no 19443 return true 19444 } 19445 case BlockAMD64ULE: 19446 // match: (ULE (InvertFlags cmp) yes no) 19447 // cond: 19448 // result: (UGE cmp yes no) 19449 for { 19450 v := b.Control 19451 if v.Op != OpAMD64InvertFlags { 19452 break 19453 } 19454 cmp := v.Args[0] 19455 yes := b.Succs[0] 19456 no := b.Succs[1] 19457 b.Kind = BlockAMD64UGE 19458 b.SetControl(cmp) 19459 b.Succs[0] = yes 19460 b.Succs[1] = no 19461 return true 19462 } 19463 // match: (ULE (FlagEQ) yes no) 19464 // cond: 19465 // result: (First nil yes no) 19466 for { 19467 v := b.Control 19468 if v.Op != OpAMD64FlagEQ { 19469 break 19470 } 19471 yes := b.Succs[0] 19472 no := b.Succs[1] 19473 b.Kind = BlockFirst 19474 b.SetControl(nil) 19475 b.Succs[0] = yes 19476 b.Succs[1] = no 19477 return true 19478 } 19479 // match: (ULE (FlagLT_ULT) yes no) 19480 // cond: 19481 // result: (First nil yes no) 19482 for { 19483 v := b.Control 19484 if v.Op != OpAMD64FlagLT_ULT { 19485 break 19486 } 19487 yes := b.Succs[0] 19488 no := b.Succs[1] 19489 b.Kind = BlockFirst 19490 b.SetControl(nil) 19491 b.Succs[0] = yes 19492 b.Succs[1] = no 19493 return true 19494 } 19495 // match: (ULE (FlagLT_UGT) yes no) 19496 // cond: 19497 // result: (First nil no yes) 19498 for { 19499 v := b.Control 19500 if v.Op != OpAMD64FlagLT_UGT { 19501 break 19502 } 19503 yes := b.Succs[0] 19504 no := b.Succs[1] 19505 b.Kind = BlockFirst 19506 b.SetControl(nil) 19507 b.Succs[0] = no 19508 b.Succs[1] = yes 19509 b.Likely *= -1 19510 return true 19511 } 19512 // match: (ULE (FlagGT_ULT) yes no) 19513 // cond: 19514 // result: (First nil yes no) 19515 for { 19516 v := b.Control 19517 if v.Op != OpAMD64FlagGT_ULT { 19518 break 19519 } 19520 yes := b.Succs[0] 19521 no := b.Succs[1] 19522 b.Kind = BlockFirst 19523 b.SetControl(nil) 19524 b.Succs[0] = yes 19525 b.Succs[1] = no 19526 return true 19527 } 19528 // match: (ULE (FlagGT_UGT) yes no) 19529 // cond: 19530 // result: (First nil no yes) 19531 for { 19532 v := b.Control 19533 if v.Op != OpAMD64FlagGT_UGT { 19534 break 19535 } 19536 yes := b.Succs[0] 19537 no := b.Succs[1] 19538 b.Kind = BlockFirst 19539 b.SetControl(nil) 19540 b.Succs[0] = no 19541 b.Succs[1] = yes 19542 b.Likely *= -1 19543 return true 19544 } 19545 case BlockAMD64ULT: 19546 // match: (ULT (InvertFlags cmp) yes no) 19547 // cond: 19548 // result: (UGT cmp yes no) 19549 for { 19550 v := b.Control 19551 if v.Op != OpAMD64InvertFlags { 19552 break 19553 } 19554 cmp := v.Args[0] 19555 yes := b.Succs[0] 19556 no := b.Succs[1] 19557 b.Kind = BlockAMD64UGT 19558 b.SetControl(cmp) 19559 b.Succs[0] = yes 19560 b.Succs[1] = no 19561 return true 19562 } 19563 // match: (ULT (FlagEQ) yes no) 19564 // cond: 19565 // result: (First nil no yes) 19566 for { 19567 v := b.Control 19568 if v.Op != OpAMD64FlagEQ { 19569 break 19570 } 19571 yes := b.Succs[0] 19572 no := b.Succs[1] 19573 b.Kind = BlockFirst 19574 b.SetControl(nil) 19575 b.Succs[0] = no 19576 b.Succs[1] = yes 19577 b.Likely *= -1 19578 return true 19579 } 19580 // match: (ULT (FlagLT_ULT) yes no) 19581 // cond: 19582 // result: (First nil yes no) 19583 for { 19584 v := b.Control 19585 if v.Op != OpAMD64FlagLT_ULT { 19586 break 19587 } 19588 yes := b.Succs[0] 19589 no := b.Succs[1] 19590 b.Kind = BlockFirst 19591 b.SetControl(nil) 19592 b.Succs[0] = yes 19593 b.Succs[1] = no 19594 return true 19595 } 19596 // match: (ULT (FlagLT_UGT) yes no) 19597 // cond: 19598 // result: (First nil no yes) 19599 for { 19600 v := b.Control 19601 if v.Op != OpAMD64FlagLT_UGT { 19602 break 19603 } 19604 yes := b.Succs[0] 19605 no := b.Succs[1] 19606 b.Kind = BlockFirst 19607 b.SetControl(nil) 19608 b.Succs[0] = no 19609 b.Succs[1] = yes 19610 b.Likely *= -1 19611 return true 19612 } 19613 // match: (ULT (FlagGT_ULT) yes no) 19614 // cond: 19615 // result: (First nil yes no) 19616 for { 19617 v := b.Control 19618 if v.Op != OpAMD64FlagGT_ULT { 19619 break 19620 } 19621 yes := b.Succs[0] 19622 no := b.Succs[1] 19623 b.Kind = BlockFirst 19624 b.SetControl(nil) 19625 b.Succs[0] = yes 19626 b.Succs[1] = no 19627 return true 19628 } 19629 // match: (ULT (FlagGT_UGT) yes no) 19630 // cond: 19631 // result: (First nil no yes) 19632 for { 19633 v := b.Control 19634 if v.Op != OpAMD64FlagGT_UGT { 19635 break 19636 } 19637 yes := b.Succs[0] 19638 no := b.Succs[1] 19639 b.Kind = BlockFirst 19640 b.SetControl(nil) 19641 b.Succs[0] = no 19642 b.Succs[1] = yes 19643 b.Likely *= -1 19644 return true 19645 } 19646 } 19647 return false 19648 }