github.com/tidwall/go@v0.0.0-20170415222209-6694a6888b7d/src/cmd/compile/internal/ssa/rewriteAMD64.go (about) 1 // Code generated from gen/AMD64.rules; DO NOT EDIT. 2 // generated with: cd gen; go run *.go 3 4 package ssa 5 6 import "math" 7 import "cmd/internal/obj" 8 9 var _ = math.MinInt8 // in case not otherwise used 10 var _ = obj.ANOP // in case not otherwise used 11 func rewriteValueAMD64(v *Value) bool { 12 switch v.Op { 13 case OpAMD64ADDL: 14 return rewriteValueAMD64_OpAMD64ADDL(v) 15 case OpAMD64ADDLconst: 16 return rewriteValueAMD64_OpAMD64ADDLconst(v) 17 case OpAMD64ADDQ: 18 return rewriteValueAMD64_OpAMD64ADDQ(v) 19 case OpAMD64ADDQconst: 20 return rewriteValueAMD64_OpAMD64ADDQconst(v) 21 case OpAMD64ADDSD: 22 return rewriteValueAMD64_OpAMD64ADDSD(v) 23 case OpAMD64ADDSS: 24 return rewriteValueAMD64_OpAMD64ADDSS(v) 25 case OpAMD64ANDL: 26 return rewriteValueAMD64_OpAMD64ANDL(v) 27 case OpAMD64ANDLconst: 28 return rewriteValueAMD64_OpAMD64ANDLconst(v) 29 case OpAMD64ANDQ: 30 return rewriteValueAMD64_OpAMD64ANDQ(v) 31 case OpAMD64ANDQconst: 32 return rewriteValueAMD64_OpAMD64ANDQconst(v) 33 case OpAMD64BSFQ: 34 return rewriteValueAMD64_OpAMD64BSFQ(v) 35 case OpAMD64BTQconst: 36 return rewriteValueAMD64_OpAMD64BTQconst(v) 37 case OpAMD64CMOVQEQ: 38 return rewriteValueAMD64_OpAMD64CMOVQEQ(v) 39 case OpAMD64CMPB: 40 return rewriteValueAMD64_OpAMD64CMPB(v) 41 case OpAMD64CMPBconst: 42 return rewriteValueAMD64_OpAMD64CMPBconst(v) 43 case OpAMD64CMPL: 44 return rewriteValueAMD64_OpAMD64CMPL(v) 45 case OpAMD64CMPLconst: 46 return rewriteValueAMD64_OpAMD64CMPLconst(v) 47 case OpAMD64CMPQ: 48 return rewriteValueAMD64_OpAMD64CMPQ(v) 49 case OpAMD64CMPQconst: 50 return rewriteValueAMD64_OpAMD64CMPQconst(v) 51 case OpAMD64CMPW: 52 return rewriteValueAMD64_OpAMD64CMPW(v) 53 case OpAMD64CMPWconst: 54 return rewriteValueAMD64_OpAMD64CMPWconst(v) 55 case OpAMD64CMPXCHGLlock: 56 return rewriteValueAMD64_OpAMD64CMPXCHGLlock(v) 57 case OpAMD64CMPXCHGQlock: 58 return rewriteValueAMD64_OpAMD64CMPXCHGQlock(v) 59 case OpAMD64LEAL: 60 return rewriteValueAMD64_OpAMD64LEAL(v) 61 case OpAMD64LEAQ: 62 return rewriteValueAMD64_OpAMD64LEAQ(v) 63 case OpAMD64LEAQ1: 64 return rewriteValueAMD64_OpAMD64LEAQ1(v) 65 case OpAMD64LEAQ2: 66 return rewriteValueAMD64_OpAMD64LEAQ2(v) 67 case OpAMD64LEAQ4: 68 return rewriteValueAMD64_OpAMD64LEAQ4(v) 69 case OpAMD64LEAQ8: 70 return rewriteValueAMD64_OpAMD64LEAQ8(v) 71 case OpAMD64MOVBQSX: 72 return rewriteValueAMD64_OpAMD64MOVBQSX(v) 73 case OpAMD64MOVBQSXload: 74 return rewriteValueAMD64_OpAMD64MOVBQSXload(v) 75 case OpAMD64MOVBQZX: 76 return rewriteValueAMD64_OpAMD64MOVBQZX(v) 77 case OpAMD64MOVBload: 78 return rewriteValueAMD64_OpAMD64MOVBload(v) 79 case OpAMD64MOVBloadidx1: 80 return rewriteValueAMD64_OpAMD64MOVBloadidx1(v) 81 case OpAMD64MOVBstore: 82 return rewriteValueAMD64_OpAMD64MOVBstore(v) 83 case OpAMD64MOVBstoreconst: 84 return rewriteValueAMD64_OpAMD64MOVBstoreconst(v) 85 case OpAMD64MOVBstoreconstidx1: 86 return rewriteValueAMD64_OpAMD64MOVBstoreconstidx1(v) 87 case OpAMD64MOVBstoreidx1: 88 return rewriteValueAMD64_OpAMD64MOVBstoreidx1(v) 89 case OpAMD64MOVLQSX: 90 return rewriteValueAMD64_OpAMD64MOVLQSX(v) 91 case OpAMD64MOVLQSXload: 92 return rewriteValueAMD64_OpAMD64MOVLQSXload(v) 93 case OpAMD64MOVLQZX: 94 return rewriteValueAMD64_OpAMD64MOVLQZX(v) 95 case OpAMD64MOVLatomicload: 96 return rewriteValueAMD64_OpAMD64MOVLatomicload(v) 97 case OpAMD64MOVLload: 98 return rewriteValueAMD64_OpAMD64MOVLload(v) 99 case OpAMD64MOVLloadidx1: 100 return rewriteValueAMD64_OpAMD64MOVLloadidx1(v) 101 case OpAMD64MOVLloadidx4: 102 return rewriteValueAMD64_OpAMD64MOVLloadidx4(v) 103 case OpAMD64MOVLstore: 104 return rewriteValueAMD64_OpAMD64MOVLstore(v) 105 case OpAMD64MOVLstoreconst: 106 return rewriteValueAMD64_OpAMD64MOVLstoreconst(v) 107 case OpAMD64MOVLstoreconstidx1: 108 return rewriteValueAMD64_OpAMD64MOVLstoreconstidx1(v) 109 case OpAMD64MOVLstoreconstidx4: 110 return rewriteValueAMD64_OpAMD64MOVLstoreconstidx4(v) 111 case OpAMD64MOVLstoreidx1: 112 return rewriteValueAMD64_OpAMD64MOVLstoreidx1(v) 113 case OpAMD64MOVLstoreidx4: 114 return rewriteValueAMD64_OpAMD64MOVLstoreidx4(v) 115 case OpAMD64MOVOload: 116 return rewriteValueAMD64_OpAMD64MOVOload(v) 117 case OpAMD64MOVOstore: 118 return rewriteValueAMD64_OpAMD64MOVOstore(v) 119 case OpAMD64MOVQatomicload: 120 return rewriteValueAMD64_OpAMD64MOVQatomicload(v) 121 case OpAMD64MOVQload: 122 return rewriteValueAMD64_OpAMD64MOVQload(v) 123 case OpAMD64MOVQloadidx1: 124 return rewriteValueAMD64_OpAMD64MOVQloadidx1(v) 125 case OpAMD64MOVQloadidx8: 126 return rewriteValueAMD64_OpAMD64MOVQloadidx8(v) 127 case OpAMD64MOVQstore: 128 return rewriteValueAMD64_OpAMD64MOVQstore(v) 129 case OpAMD64MOVQstoreconst: 130 return rewriteValueAMD64_OpAMD64MOVQstoreconst(v) 131 case OpAMD64MOVQstoreconstidx1: 132 return rewriteValueAMD64_OpAMD64MOVQstoreconstidx1(v) 133 case OpAMD64MOVQstoreconstidx8: 134 return rewriteValueAMD64_OpAMD64MOVQstoreconstidx8(v) 135 case OpAMD64MOVQstoreidx1: 136 return rewriteValueAMD64_OpAMD64MOVQstoreidx1(v) 137 case OpAMD64MOVQstoreidx8: 138 return rewriteValueAMD64_OpAMD64MOVQstoreidx8(v) 139 case OpAMD64MOVSDload: 140 return rewriteValueAMD64_OpAMD64MOVSDload(v) 141 case OpAMD64MOVSDloadidx1: 142 return rewriteValueAMD64_OpAMD64MOVSDloadidx1(v) 143 case OpAMD64MOVSDloadidx8: 144 return rewriteValueAMD64_OpAMD64MOVSDloadidx8(v) 145 case OpAMD64MOVSDstore: 146 return rewriteValueAMD64_OpAMD64MOVSDstore(v) 147 case OpAMD64MOVSDstoreidx1: 148 return rewriteValueAMD64_OpAMD64MOVSDstoreidx1(v) 149 case OpAMD64MOVSDstoreidx8: 150 return rewriteValueAMD64_OpAMD64MOVSDstoreidx8(v) 151 case OpAMD64MOVSSload: 152 return rewriteValueAMD64_OpAMD64MOVSSload(v) 153 case OpAMD64MOVSSloadidx1: 154 return rewriteValueAMD64_OpAMD64MOVSSloadidx1(v) 155 case OpAMD64MOVSSloadidx4: 156 return rewriteValueAMD64_OpAMD64MOVSSloadidx4(v) 157 case OpAMD64MOVSSstore: 158 return rewriteValueAMD64_OpAMD64MOVSSstore(v) 159 case OpAMD64MOVSSstoreidx1: 160 return rewriteValueAMD64_OpAMD64MOVSSstoreidx1(v) 161 case OpAMD64MOVSSstoreidx4: 162 return rewriteValueAMD64_OpAMD64MOVSSstoreidx4(v) 163 case OpAMD64MOVWQSX: 164 return rewriteValueAMD64_OpAMD64MOVWQSX(v) 165 case OpAMD64MOVWQSXload: 166 return rewriteValueAMD64_OpAMD64MOVWQSXload(v) 167 case OpAMD64MOVWQZX: 168 return rewriteValueAMD64_OpAMD64MOVWQZX(v) 169 case OpAMD64MOVWload: 170 return rewriteValueAMD64_OpAMD64MOVWload(v) 171 case OpAMD64MOVWloadidx1: 172 return rewriteValueAMD64_OpAMD64MOVWloadidx1(v) 173 case OpAMD64MOVWloadidx2: 174 return rewriteValueAMD64_OpAMD64MOVWloadidx2(v) 175 case OpAMD64MOVWstore: 176 return rewriteValueAMD64_OpAMD64MOVWstore(v) 177 case OpAMD64MOVWstoreconst: 178 return rewriteValueAMD64_OpAMD64MOVWstoreconst(v) 179 case OpAMD64MOVWstoreconstidx1: 180 return rewriteValueAMD64_OpAMD64MOVWstoreconstidx1(v) 181 case OpAMD64MOVWstoreconstidx2: 182 return rewriteValueAMD64_OpAMD64MOVWstoreconstidx2(v) 183 case OpAMD64MOVWstoreidx1: 184 return rewriteValueAMD64_OpAMD64MOVWstoreidx1(v) 185 case OpAMD64MOVWstoreidx2: 186 return rewriteValueAMD64_OpAMD64MOVWstoreidx2(v) 187 case OpAMD64MULL: 188 return rewriteValueAMD64_OpAMD64MULL(v) 189 case OpAMD64MULLconst: 190 return rewriteValueAMD64_OpAMD64MULLconst(v) 191 case OpAMD64MULQ: 192 return rewriteValueAMD64_OpAMD64MULQ(v) 193 case OpAMD64MULQconst: 194 return rewriteValueAMD64_OpAMD64MULQconst(v) 195 case OpAMD64MULSD: 196 return rewriteValueAMD64_OpAMD64MULSD(v) 197 case OpAMD64MULSS: 198 return rewriteValueAMD64_OpAMD64MULSS(v) 199 case OpAMD64NEGL: 200 return rewriteValueAMD64_OpAMD64NEGL(v) 201 case OpAMD64NEGQ: 202 return rewriteValueAMD64_OpAMD64NEGQ(v) 203 case OpAMD64NOTL: 204 return rewriteValueAMD64_OpAMD64NOTL(v) 205 case OpAMD64NOTQ: 206 return rewriteValueAMD64_OpAMD64NOTQ(v) 207 case OpAMD64ORL: 208 return rewriteValueAMD64_OpAMD64ORL(v) 209 case OpAMD64ORLconst: 210 return rewriteValueAMD64_OpAMD64ORLconst(v) 211 case OpAMD64ORQ: 212 return rewriteValueAMD64_OpAMD64ORQ(v) 213 case OpAMD64ORQconst: 214 return rewriteValueAMD64_OpAMD64ORQconst(v) 215 case OpAMD64ROLBconst: 216 return rewriteValueAMD64_OpAMD64ROLBconst(v) 217 case OpAMD64ROLLconst: 218 return rewriteValueAMD64_OpAMD64ROLLconst(v) 219 case OpAMD64ROLQconst: 220 return rewriteValueAMD64_OpAMD64ROLQconst(v) 221 case OpAMD64ROLWconst: 222 return rewriteValueAMD64_OpAMD64ROLWconst(v) 223 case OpAMD64SARB: 224 return rewriteValueAMD64_OpAMD64SARB(v) 225 case OpAMD64SARBconst: 226 return rewriteValueAMD64_OpAMD64SARBconst(v) 227 case OpAMD64SARL: 228 return rewriteValueAMD64_OpAMD64SARL(v) 229 case OpAMD64SARLconst: 230 return rewriteValueAMD64_OpAMD64SARLconst(v) 231 case OpAMD64SARQ: 232 return rewriteValueAMD64_OpAMD64SARQ(v) 233 case OpAMD64SARQconst: 234 return rewriteValueAMD64_OpAMD64SARQconst(v) 235 case OpAMD64SARW: 236 return rewriteValueAMD64_OpAMD64SARW(v) 237 case OpAMD64SARWconst: 238 return rewriteValueAMD64_OpAMD64SARWconst(v) 239 case OpAMD64SBBLcarrymask: 240 return rewriteValueAMD64_OpAMD64SBBLcarrymask(v) 241 case OpAMD64SBBQcarrymask: 242 return rewriteValueAMD64_OpAMD64SBBQcarrymask(v) 243 case OpAMD64SETA: 244 return rewriteValueAMD64_OpAMD64SETA(v) 245 case OpAMD64SETAE: 246 return rewriteValueAMD64_OpAMD64SETAE(v) 247 case OpAMD64SETB: 248 return rewriteValueAMD64_OpAMD64SETB(v) 249 case OpAMD64SETBE: 250 return rewriteValueAMD64_OpAMD64SETBE(v) 251 case OpAMD64SETEQ: 252 return rewriteValueAMD64_OpAMD64SETEQ(v) 253 case OpAMD64SETG: 254 return rewriteValueAMD64_OpAMD64SETG(v) 255 case OpAMD64SETGE: 256 return rewriteValueAMD64_OpAMD64SETGE(v) 257 case OpAMD64SETL: 258 return rewriteValueAMD64_OpAMD64SETL(v) 259 case OpAMD64SETLE: 260 return rewriteValueAMD64_OpAMD64SETLE(v) 261 case OpAMD64SETNE: 262 return rewriteValueAMD64_OpAMD64SETNE(v) 263 case OpAMD64SHLL: 264 return rewriteValueAMD64_OpAMD64SHLL(v) 265 case OpAMD64SHLLconst: 266 return rewriteValueAMD64_OpAMD64SHLLconst(v) 267 case OpAMD64SHLQ: 268 return rewriteValueAMD64_OpAMD64SHLQ(v) 269 case OpAMD64SHLQconst: 270 return rewriteValueAMD64_OpAMD64SHLQconst(v) 271 case OpAMD64SHRB: 272 return rewriteValueAMD64_OpAMD64SHRB(v) 273 case OpAMD64SHRBconst: 274 return rewriteValueAMD64_OpAMD64SHRBconst(v) 275 case OpAMD64SHRL: 276 return rewriteValueAMD64_OpAMD64SHRL(v) 277 case OpAMD64SHRLconst: 278 return rewriteValueAMD64_OpAMD64SHRLconst(v) 279 case OpAMD64SHRQ: 280 return rewriteValueAMD64_OpAMD64SHRQ(v) 281 case OpAMD64SHRQconst: 282 return rewriteValueAMD64_OpAMD64SHRQconst(v) 283 case OpAMD64SHRW: 284 return rewriteValueAMD64_OpAMD64SHRW(v) 285 case OpAMD64SHRWconst: 286 return rewriteValueAMD64_OpAMD64SHRWconst(v) 287 case OpAMD64SUBL: 288 return rewriteValueAMD64_OpAMD64SUBL(v) 289 case OpAMD64SUBLconst: 290 return rewriteValueAMD64_OpAMD64SUBLconst(v) 291 case OpAMD64SUBQ: 292 return rewriteValueAMD64_OpAMD64SUBQ(v) 293 case OpAMD64SUBQconst: 294 return rewriteValueAMD64_OpAMD64SUBQconst(v) 295 case OpAMD64SUBSD: 296 return rewriteValueAMD64_OpAMD64SUBSD(v) 297 case OpAMD64SUBSS: 298 return rewriteValueAMD64_OpAMD64SUBSS(v) 299 case OpAMD64TESTB: 300 return rewriteValueAMD64_OpAMD64TESTB(v) 301 case OpAMD64TESTL: 302 return rewriteValueAMD64_OpAMD64TESTL(v) 303 case OpAMD64TESTQ: 304 return rewriteValueAMD64_OpAMD64TESTQ(v) 305 case OpAMD64TESTW: 306 return rewriteValueAMD64_OpAMD64TESTW(v) 307 case OpAMD64XADDLlock: 308 return rewriteValueAMD64_OpAMD64XADDLlock(v) 309 case OpAMD64XADDQlock: 310 return rewriteValueAMD64_OpAMD64XADDQlock(v) 311 case OpAMD64XCHGL: 312 return rewriteValueAMD64_OpAMD64XCHGL(v) 313 case OpAMD64XCHGQ: 314 return rewriteValueAMD64_OpAMD64XCHGQ(v) 315 case OpAMD64XORL: 316 return rewriteValueAMD64_OpAMD64XORL(v) 317 case OpAMD64XORLconst: 318 return rewriteValueAMD64_OpAMD64XORLconst(v) 319 case OpAMD64XORQ: 320 return rewriteValueAMD64_OpAMD64XORQ(v) 321 case OpAMD64XORQconst: 322 return rewriteValueAMD64_OpAMD64XORQconst(v) 323 case OpAdd16: 324 return rewriteValueAMD64_OpAdd16(v) 325 case OpAdd32: 326 return rewriteValueAMD64_OpAdd32(v) 327 case OpAdd32F: 328 return rewriteValueAMD64_OpAdd32F(v) 329 case OpAdd64: 330 return rewriteValueAMD64_OpAdd64(v) 331 case OpAdd64F: 332 return rewriteValueAMD64_OpAdd64F(v) 333 case OpAdd8: 334 return rewriteValueAMD64_OpAdd8(v) 335 case OpAddPtr: 336 return rewriteValueAMD64_OpAddPtr(v) 337 case OpAddr: 338 return rewriteValueAMD64_OpAddr(v) 339 case OpAnd16: 340 return rewriteValueAMD64_OpAnd16(v) 341 case OpAnd32: 342 return rewriteValueAMD64_OpAnd32(v) 343 case OpAnd64: 344 return rewriteValueAMD64_OpAnd64(v) 345 case OpAnd8: 346 return rewriteValueAMD64_OpAnd8(v) 347 case OpAndB: 348 return rewriteValueAMD64_OpAndB(v) 349 case OpAtomicAdd32: 350 return rewriteValueAMD64_OpAtomicAdd32(v) 351 case OpAtomicAdd64: 352 return rewriteValueAMD64_OpAtomicAdd64(v) 353 case OpAtomicAnd8: 354 return rewriteValueAMD64_OpAtomicAnd8(v) 355 case OpAtomicCompareAndSwap32: 356 return rewriteValueAMD64_OpAtomicCompareAndSwap32(v) 357 case OpAtomicCompareAndSwap64: 358 return rewriteValueAMD64_OpAtomicCompareAndSwap64(v) 359 case OpAtomicExchange32: 360 return rewriteValueAMD64_OpAtomicExchange32(v) 361 case OpAtomicExchange64: 362 return rewriteValueAMD64_OpAtomicExchange64(v) 363 case OpAtomicLoad32: 364 return rewriteValueAMD64_OpAtomicLoad32(v) 365 case OpAtomicLoad64: 366 return rewriteValueAMD64_OpAtomicLoad64(v) 367 case OpAtomicLoadPtr: 368 return rewriteValueAMD64_OpAtomicLoadPtr(v) 369 case OpAtomicOr8: 370 return rewriteValueAMD64_OpAtomicOr8(v) 371 case OpAtomicStore32: 372 return rewriteValueAMD64_OpAtomicStore32(v) 373 case OpAtomicStore64: 374 return rewriteValueAMD64_OpAtomicStore64(v) 375 case OpAtomicStorePtrNoWB: 376 return rewriteValueAMD64_OpAtomicStorePtrNoWB(v) 377 case OpAvg64u: 378 return rewriteValueAMD64_OpAvg64u(v) 379 case OpBitLen32: 380 return rewriteValueAMD64_OpBitLen32(v) 381 case OpBitLen64: 382 return rewriteValueAMD64_OpBitLen64(v) 383 case OpBswap32: 384 return rewriteValueAMD64_OpBswap32(v) 385 case OpBswap64: 386 return rewriteValueAMD64_OpBswap64(v) 387 case OpClosureCall: 388 return rewriteValueAMD64_OpClosureCall(v) 389 case OpCom16: 390 return rewriteValueAMD64_OpCom16(v) 391 case OpCom32: 392 return rewriteValueAMD64_OpCom32(v) 393 case OpCom64: 394 return rewriteValueAMD64_OpCom64(v) 395 case OpCom8: 396 return rewriteValueAMD64_OpCom8(v) 397 case OpConst16: 398 return rewriteValueAMD64_OpConst16(v) 399 case OpConst32: 400 return rewriteValueAMD64_OpConst32(v) 401 case OpConst32F: 402 return rewriteValueAMD64_OpConst32F(v) 403 case OpConst64: 404 return rewriteValueAMD64_OpConst64(v) 405 case OpConst64F: 406 return rewriteValueAMD64_OpConst64F(v) 407 case OpConst8: 408 return rewriteValueAMD64_OpConst8(v) 409 case OpConstBool: 410 return rewriteValueAMD64_OpConstBool(v) 411 case OpConstNil: 412 return rewriteValueAMD64_OpConstNil(v) 413 case OpConvert: 414 return rewriteValueAMD64_OpConvert(v) 415 case OpCtz32: 416 return rewriteValueAMD64_OpCtz32(v) 417 case OpCtz64: 418 return rewriteValueAMD64_OpCtz64(v) 419 case OpCvt32Fto32: 420 return rewriteValueAMD64_OpCvt32Fto32(v) 421 case OpCvt32Fto64: 422 return rewriteValueAMD64_OpCvt32Fto64(v) 423 case OpCvt32Fto64F: 424 return rewriteValueAMD64_OpCvt32Fto64F(v) 425 case OpCvt32to32F: 426 return rewriteValueAMD64_OpCvt32to32F(v) 427 case OpCvt32to64F: 428 return rewriteValueAMD64_OpCvt32to64F(v) 429 case OpCvt64Fto32: 430 return rewriteValueAMD64_OpCvt64Fto32(v) 431 case OpCvt64Fto32F: 432 return rewriteValueAMD64_OpCvt64Fto32F(v) 433 case OpCvt64Fto64: 434 return rewriteValueAMD64_OpCvt64Fto64(v) 435 case OpCvt64to32F: 436 return rewriteValueAMD64_OpCvt64to32F(v) 437 case OpCvt64to64F: 438 return rewriteValueAMD64_OpCvt64to64F(v) 439 case OpDiv128u: 440 return rewriteValueAMD64_OpDiv128u(v) 441 case OpDiv16: 442 return rewriteValueAMD64_OpDiv16(v) 443 case OpDiv16u: 444 return rewriteValueAMD64_OpDiv16u(v) 445 case OpDiv32: 446 return rewriteValueAMD64_OpDiv32(v) 447 case OpDiv32F: 448 return rewriteValueAMD64_OpDiv32F(v) 449 case OpDiv32u: 450 return rewriteValueAMD64_OpDiv32u(v) 451 case OpDiv64: 452 return rewriteValueAMD64_OpDiv64(v) 453 case OpDiv64F: 454 return rewriteValueAMD64_OpDiv64F(v) 455 case OpDiv64u: 456 return rewriteValueAMD64_OpDiv64u(v) 457 case OpDiv8: 458 return rewriteValueAMD64_OpDiv8(v) 459 case OpDiv8u: 460 return rewriteValueAMD64_OpDiv8u(v) 461 case OpEq16: 462 return rewriteValueAMD64_OpEq16(v) 463 case OpEq32: 464 return rewriteValueAMD64_OpEq32(v) 465 case OpEq32F: 466 return rewriteValueAMD64_OpEq32F(v) 467 case OpEq64: 468 return rewriteValueAMD64_OpEq64(v) 469 case OpEq64F: 470 return rewriteValueAMD64_OpEq64F(v) 471 case OpEq8: 472 return rewriteValueAMD64_OpEq8(v) 473 case OpEqB: 474 return rewriteValueAMD64_OpEqB(v) 475 case OpEqPtr: 476 return rewriteValueAMD64_OpEqPtr(v) 477 case OpGeq16: 478 return rewriteValueAMD64_OpGeq16(v) 479 case OpGeq16U: 480 return rewriteValueAMD64_OpGeq16U(v) 481 case OpGeq32: 482 return rewriteValueAMD64_OpGeq32(v) 483 case OpGeq32F: 484 return rewriteValueAMD64_OpGeq32F(v) 485 case OpGeq32U: 486 return rewriteValueAMD64_OpGeq32U(v) 487 case OpGeq64: 488 return rewriteValueAMD64_OpGeq64(v) 489 case OpGeq64F: 490 return rewriteValueAMD64_OpGeq64F(v) 491 case OpGeq64U: 492 return rewriteValueAMD64_OpGeq64U(v) 493 case OpGeq8: 494 return rewriteValueAMD64_OpGeq8(v) 495 case OpGeq8U: 496 return rewriteValueAMD64_OpGeq8U(v) 497 case OpGetClosurePtr: 498 return rewriteValueAMD64_OpGetClosurePtr(v) 499 case OpGetG: 500 return rewriteValueAMD64_OpGetG(v) 501 case OpGreater16: 502 return rewriteValueAMD64_OpGreater16(v) 503 case OpGreater16U: 504 return rewriteValueAMD64_OpGreater16U(v) 505 case OpGreater32: 506 return rewriteValueAMD64_OpGreater32(v) 507 case OpGreater32F: 508 return rewriteValueAMD64_OpGreater32F(v) 509 case OpGreater32U: 510 return rewriteValueAMD64_OpGreater32U(v) 511 case OpGreater64: 512 return rewriteValueAMD64_OpGreater64(v) 513 case OpGreater64F: 514 return rewriteValueAMD64_OpGreater64F(v) 515 case OpGreater64U: 516 return rewriteValueAMD64_OpGreater64U(v) 517 case OpGreater8: 518 return rewriteValueAMD64_OpGreater8(v) 519 case OpGreater8U: 520 return rewriteValueAMD64_OpGreater8U(v) 521 case OpHmul32: 522 return rewriteValueAMD64_OpHmul32(v) 523 case OpHmul32u: 524 return rewriteValueAMD64_OpHmul32u(v) 525 case OpHmul64: 526 return rewriteValueAMD64_OpHmul64(v) 527 case OpHmul64u: 528 return rewriteValueAMD64_OpHmul64u(v) 529 case OpInt64Hi: 530 return rewriteValueAMD64_OpInt64Hi(v) 531 case OpInterCall: 532 return rewriteValueAMD64_OpInterCall(v) 533 case OpIsInBounds: 534 return rewriteValueAMD64_OpIsInBounds(v) 535 case OpIsNonNil: 536 return rewriteValueAMD64_OpIsNonNil(v) 537 case OpIsSliceInBounds: 538 return rewriteValueAMD64_OpIsSliceInBounds(v) 539 case OpLeq16: 540 return rewriteValueAMD64_OpLeq16(v) 541 case OpLeq16U: 542 return rewriteValueAMD64_OpLeq16U(v) 543 case OpLeq32: 544 return rewriteValueAMD64_OpLeq32(v) 545 case OpLeq32F: 546 return rewriteValueAMD64_OpLeq32F(v) 547 case OpLeq32U: 548 return rewriteValueAMD64_OpLeq32U(v) 549 case OpLeq64: 550 return rewriteValueAMD64_OpLeq64(v) 551 case OpLeq64F: 552 return rewriteValueAMD64_OpLeq64F(v) 553 case OpLeq64U: 554 return rewriteValueAMD64_OpLeq64U(v) 555 case OpLeq8: 556 return rewriteValueAMD64_OpLeq8(v) 557 case OpLeq8U: 558 return rewriteValueAMD64_OpLeq8U(v) 559 case OpLess16: 560 return rewriteValueAMD64_OpLess16(v) 561 case OpLess16U: 562 return rewriteValueAMD64_OpLess16U(v) 563 case OpLess32: 564 return rewriteValueAMD64_OpLess32(v) 565 case OpLess32F: 566 return rewriteValueAMD64_OpLess32F(v) 567 case OpLess32U: 568 return rewriteValueAMD64_OpLess32U(v) 569 case OpLess64: 570 return rewriteValueAMD64_OpLess64(v) 571 case OpLess64F: 572 return rewriteValueAMD64_OpLess64F(v) 573 case OpLess64U: 574 return rewriteValueAMD64_OpLess64U(v) 575 case OpLess8: 576 return rewriteValueAMD64_OpLess8(v) 577 case OpLess8U: 578 return rewriteValueAMD64_OpLess8U(v) 579 case OpLoad: 580 return rewriteValueAMD64_OpLoad(v) 581 case OpLsh16x16: 582 return rewriteValueAMD64_OpLsh16x16(v) 583 case OpLsh16x32: 584 return rewriteValueAMD64_OpLsh16x32(v) 585 case OpLsh16x64: 586 return rewriteValueAMD64_OpLsh16x64(v) 587 case OpLsh16x8: 588 return rewriteValueAMD64_OpLsh16x8(v) 589 case OpLsh32x16: 590 return rewriteValueAMD64_OpLsh32x16(v) 591 case OpLsh32x32: 592 return rewriteValueAMD64_OpLsh32x32(v) 593 case OpLsh32x64: 594 return rewriteValueAMD64_OpLsh32x64(v) 595 case OpLsh32x8: 596 return rewriteValueAMD64_OpLsh32x8(v) 597 case OpLsh64x16: 598 return rewriteValueAMD64_OpLsh64x16(v) 599 case OpLsh64x32: 600 return rewriteValueAMD64_OpLsh64x32(v) 601 case OpLsh64x64: 602 return rewriteValueAMD64_OpLsh64x64(v) 603 case OpLsh64x8: 604 return rewriteValueAMD64_OpLsh64x8(v) 605 case OpLsh8x16: 606 return rewriteValueAMD64_OpLsh8x16(v) 607 case OpLsh8x32: 608 return rewriteValueAMD64_OpLsh8x32(v) 609 case OpLsh8x64: 610 return rewriteValueAMD64_OpLsh8x64(v) 611 case OpLsh8x8: 612 return rewriteValueAMD64_OpLsh8x8(v) 613 case OpMod16: 614 return rewriteValueAMD64_OpMod16(v) 615 case OpMod16u: 616 return rewriteValueAMD64_OpMod16u(v) 617 case OpMod32: 618 return rewriteValueAMD64_OpMod32(v) 619 case OpMod32u: 620 return rewriteValueAMD64_OpMod32u(v) 621 case OpMod64: 622 return rewriteValueAMD64_OpMod64(v) 623 case OpMod64u: 624 return rewriteValueAMD64_OpMod64u(v) 625 case OpMod8: 626 return rewriteValueAMD64_OpMod8(v) 627 case OpMod8u: 628 return rewriteValueAMD64_OpMod8u(v) 629 case OpMove: 630 return rewriteValueAMD64_OpMove(v) 631 case OpMul16: 632 return rewriteValueAMD64_OpMul16(v) 633 case OpMul32: 634 return rewriteValueAMD64_OpMul32(v) 635 case OpMul32F: 636 return rewriteValueAMD64_OpMul32F(v) 637 case OpMul64: 638 return rewriteValueAMD64_OpMul64(v) 639 case OpMul64F: 640 return rewriteValueAMD64_OpMul64F(v) 641 case OpMul64uhilo: 642 return rewriteValueAMD64_OpMul64uhilo(v) 643 case OpMul8: 644 return rewriteValueAMD64_OpMul8(v) 645 case OpNeg16: 646 return rewriteValueAMD64_OpNeg16(v) 647 case OpNeg32: 648 return rewriteValueAMD64_OpNeg32(v) 649 case OpNeg32F: 650 return rewriteValueAMD64_OpNeg32F(v) 651 case OpNeg64: 652 return rewriteValueAMD64_OpNeg64(v) 653 case OpNeg64F: 654 return rewriteValueAMD64_OpNeg64F(v) 655 case OpNeg8: 656 return rewriteValueAMD64_OpNeg8(v) 657 case OpNeq16: 658 return rewriteValueAMD64_OpNeq16(v) 659 case OpNeq32: 660 return rewriteValueAMD64_OpNeq32(v) 661 case OpNeq32F: 662 return rewriteValueAMD64_OpNeq32F(v) 663 case OpNeq64: 664 return rewriteValueAMD64_OpNeq64(v) 665 case OpNeq64F: 666 return rewriteValueAMD64_OpNeq64F(v) 667 case OpNeq8: 668 return rewriteValueAMD64_OpNeq8(v) 669 case OpNeqB: 670 return rewriteValueAMD64_OpNeqB(v) 671 case OpNeqPtr: 672 return rewriteValueAMD64_OpNeqPtr(v) 673 case OpNilCheck: 674 return rewriteValueAMD64_OpNilCheck(v) 675 case OpNot: 676 return rewriteValueAMD64_OpNot(v) 677 case OpOffPtr: 678 return rewriteValueAMD64_OpOffPtr(v) 679 case OpOr16: 680 return rewriteValueAMD64_OpOr16(v) 681 case OpOr32: 682 return rewriteValueAMD64_OpOr32(v) 683 case OpOr64: 684 return rewriteValueAMD64_OpOr64(v) 685 case OpOr8: 686 return rewriteValueAMD64_OpOr8(v) 687 case OpOrB: 688 return rewriteValueAMD64_OpOrB(v) 689 case OpPopCount16: 690 return rewriteValueAMD64_OpPopCount16(v) 691 case OpPopCount32: 692 return rewriteValueAMD64_OpPopCount32(v) 693 case OpPopCount64: 694 return rewriteValueAMD64_OpPopCount64(v) 695 case OpPopCount8: 696 return rewriteValueAMD64_OpPopCount8(v) 697 case OpRound32F: 698 return rewriteValueAMD64_OpRound32F(v) 699 case OpRound64F: 700 return rewriteValueAMD64_OpRound64F(v) 701 case OpRsh16Ux16: 702 return rewriteValueAMD64_OpRsh16Ux16(v) 703 case OpRsh16Ux32: 704 return rewriteValueAMD64_OpRsh16Ux32(v) 705 case OpRsh16Ux64: 706 return rewriteValueAMD64_OpRsh16Ux64(v) 707 case OpRsh16Ux8: 708 return rewriteValueAMD64_OpRsh16Ux8(v) 709 case OpRsh16x16: 710 return rewriteValueAMD64_OpRsh16x16(v) 711 case OpRsh16x32: 712 return rewriteValueAMD64_OpRsh16x32(v) 713 case OpRsh16x64: 714 return rewriteValueAMD64_OpRsh16x64(v) 715 case OpRsh16x8: 716 return rewriteValueAMD64_OpRsh16x8(v) 717 case OpRsh32Ux16: 718 return rewriteValueAMD64_OpRsh32Ux16(v) 719 case OpRsh32Ux32: 720 return rewriteValueAMD64_OpRsh32Ux32(v) 721 case OpRsh32Ux64: 722 return rewriteValueAMD64_OpRsh32Ux64(v) 723 case OpRsh32Ux8: 724 return rewriteValueAMD64_OpRsh32Ux8(v) 725 case OpRsh32x16: 726 return rewriteValueAMD64_OpRsh32x16(v) 727 case OpRsh32x32: 728 return rewriteValueAMD64_OpRsh32x32(v) 729 case OpRsh32x64: 730 return rewriteValueAMD64_OpRsh32x64(v) 731 case OpRsh32x8: 732 return rewriteValueAMD64_OpRsh32x8(v) 733 case OpRsh64Ux16: 734 return rewriteValueAMD64_OpRsh64Ux16(v) 735 case OpRsh64Ux32: 736 return rewriteValueAMD64_OpRsh64Ux32(v) 737 case OpRsh64Ux64: 738 return rewriteValueAMD64_OpRsh64Ux64(v) 739 case OpRsh64Ux8: 740 return rewriteValueAMD64_OpRsh64Ux8(v) 741 case OpRsh64x16: 742 return rewriteValueAMD64_OpRsh64x16(v) 743 case OpRsh64x32: 744 return rewriteValueAMD64_OpRsh64x32(v) 745 case OpRsh64x64: 746 return rewriteValueAMD64_OpRsh64x64(v) 747 case OpRsh64x8: 748 return rewriteValueAMD64_OpRsh64x8(v) 749 case OpRsh8Ux16: 750 return rewriteValueAMD64_OpRsh8Ux16(v) 751 case OpRsh8Ux32: 752 return rewriteValueAMD64_OpRsh8Ux32(v) 753 case OpRsh8Ux64: 754 return rewriteValueAMD64_OpRsh8Ux64(v) 755 case OpRsh8Ux8: 756 return rewriteValueAMD64_OpRsh8Ux8(v) 757 case OpRsh8x16: 758 return rewriteValueAMD64_OpRsh8x16(v) 759 case OpRsh8x32: 760 return rewriteValueAMD64_OpRsh8x32(v) 761 case OpRsh8x64: 762 return rewriteValueAMD64_OpRsh8x64(v) 763 case OpRsh8x8: 764 return rewriteValueAMD64_OpRsh8x8(v) 765 case OpSelect0: 766 return rewriteValueAMD64_OpSelect0(v) 767 case OpSelect1: 768 return rewriteValueAMD64_OpSelect1(v) 769 case OpSignExt16to32: 770 return rewriteValueAMD64_OpSignExt16to32(v) 771 case OpSignExt16to64: 772 return rewriteValueAMD64_OpSignExt16to64(v) 773 case OpSignExt32to64: 774 return rewriteValueAMD64_OpSignExt32to64(v) 775 case OpSignExt8to16: 776 return rewriteValueAMD64_OpSignExt8to16(v) 777 case OpSignExt8to32: 778 return rewriteValueAMD64_OpSignExt8to32(v) 779 case OpSignExt8to64: 780 return rewriteValueAMD64_OpSignExt8to64(v) 781 case OpSlicemask: 782 return rewriteValueAMD64_OpSlicemask(v) 783 case OpSqrt: 784 return rewriteValueAMD64_OpSqrt(v) 785 case OpStaticCall: 786 return rewriteValueAMD64_OpStaticCall(v) 787 case OpStore: 788 return rewriteValueAMD64_OpStore(v) 789 case OpSub16: 790 return rewriteValueAMD64_OpSub16(v) 791 case OpSub32: 792 return rewriteValueAMD64_OpSub32(v) 793 case OpSub32F: 794 return rewriteValueAMD64_OpSub32F(v) 795 case OpSub64: 796 return rewriteValueAMD64_OpSub64(v) 797 case OpSub64F: 798 return rewriteValueAMD64_OpSub64F(v) 799 case OpSub8: 800 return rewriteValueAMD64_OpSub8(v) 801 case OpSubPtr: 802 return rewriteValueAMD64_OpSubPtr(v) 803 case OpTrunc16to8: 804 return rewriteValueAMD64_OpTrunc16to8(v) 805 case OpTrunc32to16: 806 return rewriteValueAMD64_OpTrunc32to16(v) 807 case OpTrunc32to8: 808 return rewriteValueAMD64_OpTrunc32to8(v) 809 case OpTrunc64to16: 810 return rewriteValueAMD64_OpTrunc64to16(v) 811 case OpTrunc64to32: 812 return rewriteValueAMD64_OpTrunc64to32(v) 813 case OpTrunc64to8: 814 return rewriteValueAMD64_OpTrunc64to8(v) 815 case OpXor16: 816 return rewriteValueAMD64_OpXor16(v) 817 case OpXor32: 818 return rewriteValueAMD64_OpXor32(v) 819 case OpXor64: 820 return rewriteValueAMD64_OpXor64(v) 821 case OpXor8: 822 return rewriteValueAMD64_OpXor8(v) 823 case OpZero: 824 return rewriteValueAMD64_OpZero(v) 825 case OpZeroExt16to32: 826 return rewriteValueAMD64_OpZeroExt16to32(v) 827 case OpZeroExt16to64: 828 return rewriteValueAMD64_OpZeroExt16to64(v) 829 case OpZeroExt32to64: 830 return rewriteValueAMD64_OpZeroExt32to64(v) 831 case OpZeroExt8to16: 832 return rewriteValueAMD64_OpZeroExt8to16(v) 833 case OpZeroExt8to32: 834 return rewriteValueAMD64_OpZeroExt8to32(v) 835 case OpZeroExt8to64: 836 return rewriteValueAMD64_OpZeroExt8to64(v) 837 } 838 return false 839 } 840 func rewriteValueAMD64_OpAMD64ADDL(v *Value) bool { 841 // match: (ADDL x (MOVLconst [c])) 842 // cond: 843 // result: (ADDLconst [c] x) 844 for { 845 x := v.Args[0] 846 v_1 := v.Args[1] 847 if v_1.Op != OpAMD64MOVLconst { 848 break 849 } 850 c := v_1.AuxInt 851 v.reset(OpAMD64ADDLconst) 852 v.AuxInt = c 853 v.AddArg(x) 854 return true 855 } 856 // match: (ADDL (MOVLconst [c]) x) 857 // cond: 858 // result: (ADDLconst [c] x) 859 for { 860 v_0 := v.Args[0] 861 if v_0.Op != OpAMD64MOVLconst { 862 break 863 } 864 c := v_0.AuxInt 865 x := v.Args[1] 866 v.reset(OpAMD64ADDLconst) 867 v.AuxInt = c 868 v.AddArg(x) 869 return true 870 } 871 // match: (ADDL (SHLLconst x [c]) (SHRLconst x [d])) 872 // cond: d==32-c 873 // result: (ROLLconst x [c]) 874 for { 875 v_0 := v.Args[0] 876 if v_0.Op != OpAMD64SHLLconst { 877 break 878 } 879 c := v_0.AuxInt 880 x := v_0.Args[0] 881 v_1 := v.Args[1] 882 if v_1.Op != OpAMD64SHRLconst { 883 break 884 } 885 d := v_1.AuxInt 886 if x != v_1.Args[0] { 887 break 888 } 889 if !(d == 32-c) { 890 break 891 } 892 v.reset(OpAMD64ROLLconst) 893 v.AuxInt = c 894 v.AddArg(x) 895 return true 896 } 897 // match: (ADDL (SHRLconst x [d]) (SHLLconst x [c])) 898 // cond: d==32-c 899 // result: (ROLLconst x [c]) 900 for { 901 v_0 := v.Args[0] 902 if v_0.Op != OpAMD64SHRLconst { 903 break 904 } 905 d := v_0.AuxInt 906 x := v_0.Args[0] 907 v_1 := v.Args[1] 908 if v_1.Op != OpAMD64SHLLconst { 909 break 910 } 911 c := v_1.AuxInt 912 if x != v_1.Args[0] { 913 break 914 } 915 if !(d == 32-c) { 916 break 917 } 918 v.reset(OpAMD64ROLLconst) 919 v.AuxInt = c 920 v.AddArg(x) 921 return true 922 } 923 // match: (ADDL <t> (SHLLconst x [c]) (SHRWconst x [d])) 924 // cond: d==16-c && c < 16 && t.Size() == 2 925 // result: (ROLWconst x [c]) 926 for { 927 t := v.Type 928 v_0 := v.Args[0] 929 if v_0.Op != OpAMD64SHLLconst { 930 break 931 } 932 c := v_0.AuxInt 933 x := v_0.Args[0] 934 v_1 := v.Args[1] 935 if v_1.Op != OpAMD64SHRWconst { 936 break 937 } 938 d := v_1.AuxInt 939 if x != v_1.Args[0] { 940 break 941 } 942 if !(d == 16-c && c < 16 && t.Size() == 2) { 943 break 944 } 945 v.reset(OpAMD64ROLWconst) 946 v.AuxInt = c 947 v.AddArg(x) 948 return true 949 } 950 // match: (ADDL <t> (SHRWconst x [d]) (SHLLconst x [c])) 951 // cond: d==16-c && c < 16 && t.Size() == 2 952 // result: (ROLWconst x [c]) 953 for { 954 t := v.Type 955 v_0 := v.Args[0] 956 if v_0.Op != OpAMD64SHRWconst { 957 break 958 } 959 d := v_0.AuxInt 960 x := v_0.Args[0] 961 v_1 := v.Args[1] 962 if v_1.Op != OpAMD64SHLLconst { 963 break 964 } 965 c := v_1.AuxInt 966 if x != v_1.Args[0] { 967 break 968 } 969 if !(d == 16-c && c < 16 && t.Size() == 2) { 970 break 971 } 972 v.reset(OpAMD64ROLWconst) 973 v.AuxInt = c 974 v.AddArg(x) 975 return true 976 } 977 // match: (ADDL <t> (SHLLconst x [c]) (SHRBconst x [d])) 978 // cond: d==8-c && c < 8 && t.Size() == 1 979 // result: (ROLBconst x [c]) 980 for { 981 t := v.Type 982 v_0 := v.Args[0] 983 if v_0.Op != OpAMD64SHLLconst { 984 break 985 } 986 c := v_0.AuxInt 987 x := v_0.Args[0] 988 v_1 := v.Args[1] 989 if v_1.Op != OpAMD64SHRBconst { 990 break 991 } 992 d := v_1.AuxInt 993 if x != v_1.Args[0] { 994 break 995 } 996 if !(d == 8-c && c < 8 && t.Size() == 1) { 997 break 998 } 999 v.reset(OpAMD64ROLBconst) 1000 v.AuxInt = c 1001 v.AddArg(x) 1002 return true 1003 } 1004 // match: (ADDL <t> (SHRBconst x [d]) (SHLLconst x [c])) 1005 // cond: d==8-c && c < 8 && t.Size() == 1 1006 // result: (ROLBconst x [c]) 1007 for { 1008 t := v.Type 1009 v_0 := v.Args[0] 1010 if v_0.Op != OpAMD64SHRBconst { 1011 break 1012 } 1013 d := v_0.AuxInt 1014 x := v_0.Args[0] 1015 v_1 := v.Args[1] 1016 if v_1.Op != OpAMD64SHLLconst { 1017 break 1018 } 1019 c := v_1.AuxInt 1020 if x != v_1.Args[0] { 1021 break 1022 } 1023 if !(d == 8-c && c < 8 && t.Size() == 1) { 1024 break 1025 } 1026 v.reset(OpAMD64ROLBconst) 1027 v.AuxInt = c 1028 v.AddArg(x) 1029 return true 1030 } 1031 // match: (ADDL x (NEGL y)) 1032 // cond: 1033 // result: (SUBL x y) 1034 for { 1035 x := v.Args[0] 1036 v_1 := v.Args[1] 1037 if v_1.Op != OpAMD64NEGL { 1038 break 1039 } 1040 y := v_1.Args[0] 1041 v.reset(OpAMD64SUBL) 1042 v.AddArg(x) 1043 v.AddArg(y) 1044 return true 1045 } 1046 // match: (ADDL (NEGL y) x) 1047 // cond: 1048 // result: (SUBL x y) 1049 for { 1050 v_0 := v.Args[0] 1051 if v_0.Op != OpAMD64NEGL { 1052 break 1053 } 1054 y := v_0.Args[0] 1055 x := v.Args[1] 1056 v.reset(OpAMD64SUBL) 1057 v.AddArg(x) 1058 v.AddArg(y) 1059 return true 1060 } 1061 // match: (ADDL x l:(MOVLload [off] {sym} ptr mem)) 1062 // cond: canMergeLoad(v, l, x) && clobber(l) 1063 // result: (ADDLmem x [off] {sym} ptr mem) 1064 for { 1065 x := v.Args[0] 1066 l := v.Args[1] 1067 if l.Op != OpAMD64MOVLload { 1068 break 1069 } 1070 off := l.AuxInt 1071 sym := l.Aux 1072 ptr := l.Args[0] 1073 mem := l.Args[1] 1074 if !(canMergeLoad(v, l, x) && clobber(l)) { 1075 break 1076 } 1077 v.reset(OpAMD64ADDLmem) 1078 v.AuxInt = off 1079 v.Aux = sym 1080 v.AddArg(x) 1081 v.AddArg(ptr) 1082 v.AddArg(mem) 1083 return true 1084 } 1085 // match: (ADDL l:(MOVLload [off] {sym} ptr mem) x) 1086 // cond: canMergeLoad(v, l, x) && clobber(l) 1087 // result: (ADDLmem x [off] {sym} ptr mem) 1088 for { 1089 l := v.Args[0] 1090 if l.Op != OpAMD64MOVLload { 1091 break 1092 } 1093 off := l.AuxInt 1094 sym := l.Aux 1095 ptr := l.Args[0] 1096 mem := l.Args[1] 1097 x := v.Args[1] 1098 if !(canMergeLoad(v, l, x) && clobber(l)) { 1099 break 1100 } 1101 v.reset(OpAMD64ADDLmem) 1102 v.AuxInt = off 1103 v.Aux = sym 1104 v.AddArg(x) 1105 v.AddArg(ptr) 1106 v.AddArg(mem) 1107 return true 1108 } 1109 return false 1110 } 1111 func rewriteValueAMD64_OpAMD64ADDLconst(v *Value) bool { 1112 // match: (ADDLconst [c] x) 1113 // cond: int32(c)==0 1114 // result: x 1115 for { 1116 c := v.AuxInt 1117 x := v.Args[0] 1118 if !(int32(c) == 0) { 1119 break 1120 } 1121 v.reset(OpCopy) 1122 v.Type = x.Type 1123 v.AddArg(x) 1124 return true 1125 } 1126 // match: (ADDLconst [c] (MOVLconst [d])) 1127 // cond: 1128 // result: (MOVLconst [int64(int32(c+d))]) 1129 for { 1130 c := v.AuxInt 1131 v_0 := v.Args[0] 1132 if v_0.Op != OpAMD64MOVLconst { 1133 break 1134 } 1135 d := v_0.AuxInt 1136 v.reset(OpAMD64MOVLconst) 1137 v.AuxInt = int64(int32(c + d)) 1138 return true 1139 } 1140 // match: (ADDLconst [c] (ADDLconst [d] x)) 1141 // cond: 1142 // result: (ADDLconst [int64(int32(c+d))] x) 1143 for { 1144 c := v.AuxInt 1145 v_0 := v.Args[0] 1146 if v_0.Op != OpAMD64ADDLconst { 1147 break 1148 } 1149 d := v_0.AuxInt 1150 x := v_0.Args[0] 1151 v.reset(OpAMD64ADDLconst) 1152 v.AuxInt = int64(int32(c + d)) 1153 v.AddArg(x) 1154 return true 1155 } 1156 // match: (ADDLconst [c] (LEAL [d] {s} x)) 1157 // cond: is32Bit(c+d) 1158 // result: (LEAL [c+d] {s} x) 1159 for { 1160 c := v.AuxInt 1161 v_0 := v.Args[0] 1162 if v_0.Op != OpAMD64LEAL { 1163 break 1164 } 1165 d := v_0.AuxInt 1166 s := v_0.Aux 1167 x := v_0.Args[0] 1168 if !(is32Bit(c + d)) { 1169 break 1170 } 1171 v.reset(OpAMD64LEAL) 1172 v.AuxInt = c + d 1173 v.Aux = s 1174 v.AddArg(x) 1175 return true 1176 } 1177 return false 1178 } 1179 func rewriteValueAMD64_OpAMD64ADDQ(v *Value) bool { 1180 // match: (ADDQ x (MOVQconst [c])) 1181 // cond: is32Bit(c) 1182 // result: (ADDQconst [c] x) 1183 for { 1184 x := v.Args[0] 1185 v_1 := v.Args[1] 1186 if v_1.Op != OpAMD64MOVQconst { 1187 break 1188 } 1189 c := v_1.AuxInt 1190 if !(is32Bit(c)) { 1191 break 1192 } 1193 v.reset(OpAMD64ADDQconst) 1194 v.AuxInt = c 1195 v.AddArg(x) 1196 return true 1197 } 1198 // match: (ADDQ (MOVQconst [c]) x) 1199 // cond: is32Bit(c) 1200 // result: (ADDQconst [c] x) 1201 for { 1202 v_0 := v.Args[0] 1203 if v_0.Op != OpAMD64MOVQconst { 1204 break 1205 } 1206 c := v_0.AuxInt 1207 x := v.Args[1] 1208 if !(is32Bit(c)) { 1209 break 1210 } 1211 v.reset(OpAMD64ADDQconst) 1212 v.AuxInt = c 1213 v.AddArg(x) 1214 return true 1215 } 1216 // match: (ADDQ (SHLQconst x [c]) (SHRQconst x [d])) 1217 // cond: d==64-c 1218 // result: (ROLQconst x [c]) 1219 for { 1220 v_0 := v.Args[0] 1221 if v_0.Op != OpAMD64SHLQconst { 1222 break 1223 } 1224 c := v_0.AuxInt 1225 x := v_0.Args[0] 1226 v_1 := v.Args[1] 1227 if v_1.Op != OpAMD64SHRQconst { 1228 break 1229 } 1230 d := v_1.AuxInt 1231 if x != v_1.Args[0] { 1232 break 1233 } 1234 if !(d == 64-c) { 1235 break 1236 } 1237 v.reset(OpAMD64ROLQconst) 1238 v.AuxInt = c 1239 v.AddArg(x) 1240 return true 1241 } 1242 // match: (ADDQ (SHRQconst x [d]) (SHLQconst x [c])) 1243 // cond: d==64-c 1244 // result: (ROLQconst x [c]) 1245 for { 1246 v_0 := v.Args[0] 1247 if v_0.Op != OpAMD64SHRQconst { 1248 break 1249 } 1250 d := v_0.AuxInt 1251 x := v_0.Args[0] 1252 v_1 := v.Args[1] 1253 if v_1.Op != OpAMD64SHLQconst { 1254 break 1255 } 1256 c := v_1.AuxInt 1257 if x != v_1.Args[0] { 1258 break 1259 } 1260 if !(d == 64-c) { 1261 break 1262 } 1263 v.reset(OpAMD64ROLQconst) 1264 v.AuxInt = c 1265 v.AddArg(x) 1266 return true 1267 } 1268 // match: (ADDQ x (SHLQconst [3] y)) 1269 // cond: 1270 // result: (LEAQ8 x y) 1271 for { 1272 x := v.Args[0] 1273 v_1 := v.Args[1] 1274 if v_1.Op != OpAMD64SHLQconst { 1275 break 1276 } 1277 if v_1.AuxInt != 3 { 1278 break 1279 } 1280 y := v_1.Args[0] 1281 v.reset(OpAMD64LEAQ8) 1282 v.AddArg(x) 1283 v.AddArg(y) 1284 return true 1285 } 1286 // match: (ADDQ (SHLQconst [3] y) x) 1287 // cond: 1288 // result: (LEAQ8 x y) 1289 for { 1290 v_0 := v.Args[0] 1291 if v_0.Op != OpAMD64SHLQconst { 1292 break 1293 } 1294 if v_0.AuxInt != 3 { 1295 break 1296 } 1297 y := v_0.Args[0] 1298 x := v.Args[1] 1299 v.reset(OpAMD64LEAQ8) 1300 v.AddArg(x) 1301 v.AddArg(y) 1302 return true 1303 } 1304 // match: (ADDQ x (SHLQconst [2] y)) 1305 // cond: 1306 // result: (LEAQ4 x y) 1307 for { 1308 x := v.Args[0] 1309 v_1 := v.Args[1] 1310 if v_1.Op != OpAMD64SHLQconst { 1311 break 1312 } 1313 if v_1.AuxInt != 2 { 1314 break 1315 } 1316 y := v_1.Args[0] 1317 v.reset(OpAMD64LEAQ4) 1318 v.AddArg(x) 1319 v.AddArg(y) 1320 return true 1321 } 1322 // match: (ADDQ (SHLQconst [2] y) x) 1323 // cond: 1324 // result: (LEAQ4 x y) 1325 for { 1326 v_0 := v.Args[0] 1327 if v_0.Op != OpAMD64SHLQconst { 1328 break 1329 } 1330 if v_0.AuxInt != 2 { 1331 break 1332 } 1333 y := v_0.Args[0] 1334 x := v.Args[1] 1335 v.reset(OpAMD64LEAQ4) 1336 v.AddArg(x) 1337 v.AddArg(y) 1338 return true 1339 } 1340 // match: (ADDQ x (SHLQconst [1] y)) 1341 // cond: 1342 // result: (LEAQ2 x y) 1343 for { 1344 x := v.Args[0] 1345 v_1 := v.Args[1] 1346 if v_1.Op != OpAMD64SHLQconst { 1347 break 1348 } 1349 if v_1.AuxInt != 1 { 1350 break 1351 } 1352 y := v_1.Args[0] 1353 v.reset(OpAMD64LEAQ2) 1354 v.AddArg(x) 1355 v.AddArg(y) 1356 return true 1357 } 1358 // match: (ADDQ (SHLQconst [1] y) x) 1359 // cond: 1360 // result: (LEAQ2 x y) 1361 for { 1362 v_0 := v.Args[0] 1363 if v_0.Op != OpAMD64SHLQconst { 1364 break 1365 } 1366 if v_0.AuxInt != 1 { 1367 break 1368 } 1369 y := v_0.Args[0] 1370 x := v.Args[1] 1371 v.reset(OpAMD64LEAQ2) 1372 v.AddArg(x) 1373 v.AddArg(y) 1374 return true 1375 } 1376 // match: (ADDQ x (ADDQ y y)) 1377 // cond: 1378 // result: (LEAQ2 x y) 1379 for { 1380 x := v.Args[0] 1381 v_1 := v.Args[1] 1382 if v_1.Op != OpAMD64ADDQ { 1383 break 1384 } 1385 y := v_1.Args[0] 1386 if y != v_1.Args[1] { 1387 break 1388 } 1389 v.reset(OpAMD64LEAQ2) 1390 v.AddArg(x) 1391 v.AddArg(y) 1392 return true 1393 } 1394 // match: (ADDQ (ADDQ y y) x) 1395 // cond: 1396 // result: (LEAQ2 x y) 1397 for { 1398 v_0 := v.Args[0] 1399 if v_0.Op != OpAMD64ADDQ { 1400 break 1401 } 1402 y := v_0.Args[0] 1403 if y != v_0.Args[1] { 1404 break 1405 } 1406 x := v.Args[1] 1407 v.reset(OpAMD64LEAQ2) 1408 v.AddArg(x) 1409 v.AddArg(y) 1410 return true 1411 } 1412 // match: (ADDQ x (ADDQ x y)) 1413 // cond: 1414 // result: (LEAQ2 y x) 1415 for { 1416 x := v.Args[0] 1417 v_1 := v.Args[1] 1418 if v_1.Op != OpAMD64ADDQ { 1419 break 1420 } 1421 if x != v_1.Args[0] { 1422 break 1423 } 1424 y := v_1.Args[1] 1425 v.reset(OpAMD64LEAQ2) 1426 v.AddArg(y) 1427 v.AddArg(x) 1428 return true 1429 } 1430 // match: (ADDQ x (ADDQ y x)) 1431 // cond: 1432 // result: (LEAQ2 y x) 1433 for { 1434 x := v.Args[0] 1435 v_1 := v.Args[1] 1436 if v_1.Op != OpAMD64ADDQ { 1437 break 1438 } 1439 y := v_1.Args[0] 1440 if x != v_1.Args[1] { 1441 break 1442 } 1443 v.reset(OpAMD64LEAQ2) 1444 v.AddArg(y) 1445 v.AddArg(x) 1446 return true 1447 } 1448 // match: (ADDQ (ADDQ x y) x) 1449 // cond: 1450 // result: (LEAQ2 y x) 1451 for { 1452 v_0 := v.Args[0] 1453 if v_0.Op != OpAMD64ADDQ { 1454 break 1455 } 1456 x := v_0.Args[0] 1457 y := v_0.Args[1] 1458 if x != v.Args[1] { 1459 break 1460 } 1461 v.reset(OpAMD64LEAQ2) 1462 v.AddArg(y) 1463 v.AddArg(x) 1464 return true 1465 } 1466 // match: (ADDQ (ADDQ y x) x) 1467 // cond: 1468 // result: (LEAQ2 y x) 1469 for { 1470 v_0 := v.Args[0] 1471 if v_0.Op != OpAMD64ADDQ { 1472 break 1473 } 1474 y := v_0.Args[0] 1475 x := v_0.Args[1] 1476 if x != v.Args[1] { 1477 break 1478 } 1479 v.reset(OpAMD64LEAQ2) 1480 v.AddArg(y) 1481 v.AddArg(x) 1482 return true 1483 } 1484 // match: (ADDQ (ADDQconst [c] x) y) 1485 // cond: 1486 // result: (LEAQ1 [c] x y) 1487 for { 1488 v_0 := v.Args[0] 1489 if v_0.Op != OpAMD64ADDQconst { 1490 break 1491 } 1492 c := v_0.AuxInt 1493 x := v_0.Args[0] 1494 y := v.Args[1] 1495 v.reset(OpAMD64LEAQ1) 1496 v.AuxInt = c 1497 v.AddArg(x) 1498 v.AddArg(y) 1499 return true 1500 } 1501 // match: (ADDQ y (ADDQconst [c] x)) 1502 // cond: 1503 // result: (LEAQ1 [c] x y) 1504 for { 1505 y := v.Args[0] 1506 v_1 := v.Args[1] 1507 if v_1.Op != OpAMD64ADDQconst { 1508 break 1509 } 1510 c := v_1.AuxInt 1511 x := v_1.Args[0] 1512 v.reset(OpAMD64LEAQ1) 1513 v.AuxInt = c 1514 v.AddArg(x) 1515 v.AddArg(y) 1516 return true 1517 } 1518 // match: (ADDQ x (LEAQ [c] {s} y)) 1519 // cond: x.Op != OpSB && y.Op != OpSB 1520 // result: (LEAQ1 [c] {s} x y) 1521 for { 1522 x := v.Args[0] 1523 v_1 := v.Args[1] 1524 if v_1.Op != OpAMD64LEAQ { 1525 break 1526 } 1527 c := v_1.AuxInt 1528 s := v_1.Aux 1529 y := v_1.Args[0] 1530 if !(x.Op != OpSB && y.Op != OpSB) { 1531 break 1532 } 1533 v.reset(OpAMD64LEAQ1) 1534 v.AuxInt = c 1535 v.Aux = s 1536 v.AddArg(x) 1537 v.AddArg(y) 1538 return true 1539 } 1540 // match: (ADDQ (LEAQ [c] {s} y) x) 1541 // cond: x.Op != OpSB && y.Op != OpSB 1542 // result: (LEAQ1 [c] {s} x y) 1543 for { 1544 v_0 := v.Args[0] 1545 if v_0.Op != OpAMD64LEAQ { 1546 break 1547 } 1548 c := v_0.AuxInt 1549 s := v_0.Aux 1550 y := v_0.Args[0] 1551 x := v.Args[1] 1552 if !(x.Op != OpSB && y.Op != OpSB) { 1553 break 1554 } 1555 v.reset(OpAMD64LEAQ1) 1556 v.AuxInt = c 1557 v.Aux = s 1558 v.AddArg(x) 1559 v.AddArg(y) 1560 return true 1561 } 1562 // match: (ADDQ x (NEGQ y)) 1563 // cond: 1564 // result: (SUBQ x y) 1565 for { 1566 x := v.Args[0] 1567 v_1 := v.Args[1] 1568 if v_1.Op != OpAMD64NEGQ { 1569 break 1570 } 1571 y := v_1.Args[0] 1572 v.reset(OpAMD64SUBQ) 1573 v.AddArg(x) 1574 v.AddArg(y) 1575 return true 1576 } 1577 // match: (ADDQ (NEGQ y) x) 1578 // cond: 1579 // result: (SUBQ x y) 1580 for { 1581 v_0 := v.Args[0] 1582 if v_0.Op != OpAMD64NEGQ { 1583 break 1584 } 1585 y := v_0.Args[0] 1586 x := v.Args[1] 1587 v.reset(OpAMD64SUBQ) 1588 v.AddArg(x) 1589 v.AddArg(y) 1590 return true 1591 } 1592 // match: (ADDQ x l:(MOVQload [off] {sym} ptr mem)) 1593 // cond: canMergeLoad(v, l, x) && clobber(l) 1594 // result: (ADDQmem x [off] {sym} ptr mem) 1595 for { 1596 x := v.Args[0] 1597 l := v.Args[1] 1598 if l.Op != OpAMD64MOVQload { 1599 break 1600 } 1601 off := l.AuxInt 1602 sym := l.Aux 1603 ptr := l.Args[0] 1604 mem := l.Args[1] 1605 if !(canMergeLoad(v, l, x) && clobber(l)) { 1606 break 1607 } 1608 v.reset(OpAMD64ADDQmem) 1609 v.AuxInt = off 1610 v.Aux = sym 1611 v.AddArg(x) 1612 v.AddArg(ptr) 1613 v.AddArg(mem) 1614 return true 1615 } 1616 // match: (ADDQ l:(MOVQload [off] {sym} ptr mem) x) 1617 // cond: canMergeLoad(v, l, x) && clobber(l) 1618 // result: (ADDQmem x [off] {sym} ptr mem) 1619 for { 1620 l := v.Args[0] 1621 if l.Op != OpAMD64MOVQload { 1622 break 1623 } 1624 off := l.AuxInt 1625 sym := l.Aux 1626 ptr := l.Args[0] 1627 mem := l.Args[1] 1628 x := v.Args[1] 1629 if !(canMergeLoad(v, l, x) && clobber(l)) { 1630 break 1631 } 1632 v.reset(OpAMD64ADDQmem) 1633 v.AuxInt = off 1634 v.Aux = sym 1635 v.AddArg(x) 1636 v.AddArg(ptr) 1637 v.AddArg(mem) 1638 return true 1639 } 1640 return false 1641 } 1642 func rewriteValueAMD64_OpAMD64ADDQconst(v *Value) bool { 1643 // match: (ADDQconst [c] (ADDQ x y)) 1644 // cond: 1645 // result: (LEAQ1 [c] x y) 1646 for { 1647 c := v.AuxInt 1648 v_0 := v.Args[0] 1649 if v_0.Op != OpAMD64ADDQ { 1650 break 1651 } 1652 x := v_0.Args[0] 1653 y := v_0.Args[1] 1654 v.reset(OpAMD64LEAQ1) 1655 v.AuxInt = c 1656 v.AddArg(x) 1657 v.AddArg(y) 1658 return true 1659 } 1660 // match: (ADDQconst [c] (LEAQ [d] {s} x)) 1661 // cond: is32Bit(c+d) 1662 // result: (LEAQ [c+d] {s} x) 1663 for { 1664 c := v.AuxInt 1665 v_0 := v.Args[0] 1666 if v_0.Op != OpAMD64LEAQ { 1667 break 1668 } 1669 d := v_0.AuxInt 1670 s := v_0.Aux 1671 x := v_0.Args[0] 1672 if !(is32Bit(c + d)) { 1673 break 1674 } 1675 v.reset(OpAMD64LEAQ) 1676 v.AuxInt = c + d 1677 v.Aux = s 1678 v.AddArg(x) 1679 return true 1680 } 1681 // match: (ADDQconst [c] (LEAQ1 [d] {s} x y)) 1682 // cond: is32Bit(c+d) 1683 // result: (LEAQ1 [c+d] {s} x y) 1684 for { 1685 c := v.AuxInt 1686 v_0 := v.Args[0] 1687 if v_0.Op != OpAMD64LEAQ1 { 1688 break 1689 } 1690 d := v_0.AuxInt 1691 s := v_0.Aux 1692 x := v_0.Args[0] 1693 y := v_0.Args[1] 1694 if !(is32Bit(c + d)) { 1695 break 1696 } 1697 v.reset(OpAMD64LEAQ1) 1698 v.AuxInt = c + d 1699 v.Aux = s 1700 v.AddArg(x) 1701 v.AddArg(y) 1702 return true 1703 } 1704 // match: (ADDQconst [c] (LEAQ2 [d] {s} x y)) 1705 // cond: is32Bit(c+d) 1706 // result: (LEAQ2 [c+d] {s} x y) 1707 for { 1708 c := v.AuxInt 1709 v_0 := v.Args[0] 1710 if v_0.Op != OpAMD64LEAQ2 { 1711 break 1712 } 1713 d := v_0.AuxInt 1714 s := v_0.Aux 1715 x := v_0.Args[0] 1716 y := v_0.Args[1] 1717 if !(is32Bit(c + d)) { 1718 break 1719 } 1720 v.reset(OpAMD64LEAQ2) 1721 v.AuxInt = c + d 1722 v.Aux = s 1723 v.AddArg(x) 1724 v.AddArg(y) 1725 return true 1726 } 1727 // match: (ADDQconst [c] (LEAQ4 [d] {s} x y)) 1728 // cond: is32Bit(c+d) 1729 // result: (LEAQ4 [c+d] {s} x y) 1730 for { 1731 c := v.AuxInt 1732 v_0 := v.Args[0] 1733 if v_0.Op != OpAMD64LEAQ4 { 1734 break 1735 } 1736 d := v_0.AuxInt 1737 s := v_0.Aux 1738 x := v_0.Args[0] 1739 y := v_0.Args[1] 1740 if !(is32Bit(c + d)) { 1741 break 1742 } 1743 v.reset(OpAMD64LEAQ4) 1744 v.AuxInt = c + d 1745 v.Aux = s 1746 v.AddArg(x) 1747 v.AddArg(y) 1748 return true 1749 } 1750 // match: (ADDQconst [c] (LEAQ8 [d] {s} x y)) 1751 // cond: is32Bit(c+d) 1752 // result: (LEAQ8 [c+d] {s} x y) 1753 for { 1754 c := v.AuxInt 1755 v_0 := v.Args[0] 1756 if v_0.Op != OpAMD64LEAQ8 { 1757 break 1758 } 1759 d := v_0.AuxInt 1760 s := v_0.Aux 1761 x := v_0.Args[0] 1762 y := v_0.Args[1] 1763 if !(is32Bit(c + d)) { 1764 break 1765 } 1766 v.reset(OpAMD64LEAQ8) 1767 v.AuxInt = c + d 1768 v.Aux = s 1769 v.AddArg(x) 1770 v.AddArg(y) 1771 return true 1772 } 1773 // match: (ADDQconst [0] x) 1774 // cond: 1775 // result: x 1776 for { 1777 if v.AuxInt != 0 { 1778 break 1779 } 1780 x := v.Args[0] 1781 v.reset(OpCopy) 1782 v.Type = x.Type 1783 v.AddArg(x) 1784 return true 1785 } 1786 // match: (ADDQconst [c] (MOVQconst [d])) 1787 // cond: 1788 // result: (MOVQconst [c+d]) 1789 for { 1790 c := v.AuxInt 1791 v_0 := v.Args[0] 1792 if v_0.Op != OpAMD64MOVQconst { 1793 break 1794 } 1795 d := v_0.AuxInt 1796 v.reset(OpAMD64MOVQconst) 1797 v.AuxInt = c + d 1798 return true 1799 } 1800 // match: (ADDQconst [c] (ADDQconst [d] x)) 1801 // cond: is32Bit(c+d) 1802 // result: (ADDQconst [c+d] x) 1803 for { 1804 c := v.AuxInt 1805 v_0 := v.Args[0] 1806 if v_0.Op != OpAMD64ADDQconst { 1807 break 1808 } 1809 d := v_0.AuxInt 1810 x := v_0.Args[0] 1811 if !(is32Bit(c + d)) { 1812 break 1813 } 1814 v.reset(OpAMD64ADDQconst) 1815 v.AuxInt = c + d 1816 v.AddArg(x) 1817 return true 1818 } 1819 return false 1820 } 1821 func rewriteValueAMD64_OpAMD64ADDSD(v *Value) bool { 1822 // match: (ADDSD x l:(MOVSDload [off] {sym} ptr mem)) 1823 // cond: canMergeLoad(v, l, x) && clobber(l) 1824 // result: (ADDSDmem x [off] {sym} ptr mem) 1825 for { 1826 x := v.Args[0] 1827 l := v.Args[1] 1828 if l.Op != OpAMD64MOVSDload { 1829 break 1830 } 1831 off := l.AuxInt 1832 sym := l.Aux 1833 ptr := l.Args[0] 1834 mem := l.Args[1] 1835 if !(canMergeLoad(v, l, x) && clobber(l)) { 1836 break 1837 } 1838 v.reset(OpAMD64ADDSDmem) 1839 v.AuxInt = off 1840 v.Aux = sym 1841 v.AddArg(x) 1842 v.AddArg(ptr) 1843 v.AddArg(mem) 1844 return true 1845 } 1846 // match: (ADDSD l:(MOVSDload [off] {sym} ptr mem) x) 1847 // cond: canMergeLoad(v, l, x) && clobber(l) 1848 // result: (ADDSDmem x [off] {sym} ptr mem) 1849 for { 1850 l := v.Args[0] 1851 if l.Op != OpAMD64MOVSDload { 1852 break 1853 } 1854 off := l.AuxInt 1855 sym := l.Aux 1856 ptr := l.Args[0] 1857 mem := l.Args[1] 1858 x := v.Args[1] 1859 if !(canMergeLoad(v, l, x) && clobber(l)) { 1860 break 1861 } 1862 v.reset(OpAMD64ADDSDmem) 1863 v.AuxInt = off 1864 v.Aux = sym 1865 v.AddArg(x) 1866 v.AddArg(ptr) 1867 v.AddArg(mem) 1868 return true 1869 } 1870 return false 1871 } 1872 func rewriteValueAMD64_OpAMD64ADDSS(v *Value) bool { 1873 // match: (ADDSS x l:(MOVSSload [off] {sym} ptr mem)) 1874 // cond: canMergeLoad(v, l, x) && clobber(l) 1875 // result: (ADDSSmem x [off] {sym} ptr mem) 1876 for { 1877 x := v.Args[0] 1878 l := v.Args[1] 1879 if l.Op != OpAMD64MOVSSload { 1880 break 1881 } 1882 off := l.AuxInt 1883 sym := l.Aux 1884 ptr := l.Args[0] 1885 mem := l.Args[1] 1886 if !(canMergeLoad(v, l, x) && clobber(l)) { 1887 break 1888 } 1889 v.reset(OpAMD64ADDSSmem) 1890 v.AuxInt = off 1891 v.Aux = sym 1892 v.AddArg(x) 1893 v.AddArg(ptr) 1894 v.AddArg(mem) 1895 return true 1896 } 1897 // match: (ADDSS l:(MOVSSload [off] {sym} ptr mem) x) 1898 // cond: canMergeLoad(v, l, x) && clobber(l) 1899 // result: (ADDSSmem x [off] {sym} ptr mem) 1900 for { 1901 l := v.Args[0] 1902 if l.Op != OpAMD64MOVSSload { 1903 break 1904 } 1905 off := l.AuxInt 1906 sym := l.Aux 1907 ptr := l.Args[0] 1908 mem := l.Args[1] 1909 x := v.Args[1] 1910 if !(canMergeLoad(v, l, x) && clobber(l)) { 1911 break 1912 } 1913 v.reset(OpAMD64ADDSSmem) 1914 v.AuxInt = off 1915 v.Aux = sym 1916 v.AddArg(x) 1917 v.AddArg(ptr) 1918 v.AddArg(mem) 1919 return true 1920 } 1921 return false 1922 } 1923 func rewriteValueAMD64_OpAMD64ANDL(v *Value) bool { 1924 // match: (ANDL x (MOVLconst [c])) 1925 // cond: 1926 // result: (ANDLconst [c] x) 1927 for { 1928 x := v.Args[0] 1929 v_1 := v.Args[1] 1930 if v_1.Op != OpAMD64MOVLconst { 1931 break 1932 } 1933 c := v_1.AuxInt 1934 v.reset(OpAMD64ANDLconst) 1935 v.AuxInt = c 1936 v.AddArg(x) 1937 return true 1938 } 1939 // match: (ANDL (MOVLconst [c]) x) 1940 // cond: 1941 // result: (ANDLconst [c] x) 1942 for { 1943 v_0 := v.Args[0] 1944 if v_0.Op != OpAMD64MOVLconst { 1945 break 1946 } 1947 c := v_0.AuxInt 1948 x := v.Args[1] 1949 v.reset(OpAMD64ANDLconst) 1950 v.AuxInt = c 1951 v.AddArg(x) 1952 return true 1953 } 1954 // match: (ANDL x x) 1955 // cond: 1956 // result: x 1957 for { 1958 x := v.Args[0] 1959 if x != v.Args[1] { 1960 break 1961 } 1962 v.reset(OpCopy) 1963 v.Type = x.Type 1964 v.AddArg(x) 1965 return true 1966 } 1967 // match: (ANDL x l:(MOVLload [off] {sym} ptr mem)) 1968 // cond: canMergeLoad(v, l, x) && clobber(l) 1969 // result: (ANDLmem x [off] {sym} ptr mem) 1970 for { 1971 x := v.Args[0] 1972 l := v.Args[1] 1973 if l.Op != OpAMD64MOVLload { 1974 break 1975 } 1976 off := l.AuxInt 1977 sym := l.Aux 1978 ptr := l.Args[0] 1979 mem := l.Args[1] 1980 if !(canMergeLoad(v, l, x) && clobber(l)) { 1981 break 1982 } 1983 v.reset(OpAMD64ANDLmem) 1984 v.AuxInt = off 1985 v.Aux = sym 1986 v.AddArg(x) 1987 v.AddArg(ptr) 1988 v.AddArg(mem) 1989 return true 1990 } 1991 // match: (ANDL l:(MOVLload [off] {sym} ptr mem) x) 1992 // cond: canMergeLoad(v, l, x) && clobber(l) 1993 // result: (ANDLmem x [off] {sym} ptr mem) 1994 for { 1995 l := v.Args[0] 1996 if l.Op != OpAMD64MOVLload { 1997 break 1998 } 1999 off := l.AuxInt 2000 sym := l.Aux 2001 ptr := l.Args[0] 2002 mem := l.Args[1] 2003 x := v.Args[1] 2004 if !(canMergeLoad(v, l, x) && clobber(l)) { 2005 break 2006 } 2007 v.reset(OpAMD64ANDLmem) 2008 v.AuxInt = off 2009 v.Aux = sym 2010 v.AddArg(x) 2011 v.AddArg(ptr) 2012 v.AddArg(mem) 2013 return true 2014 } 2015 return false 2016 } 2017 func rewriteValueAMD64_OpAMD64ANDLconst(v *Value) bool { 2018 // match: (ANDLconst [c] (ANDLconst [d] x)) 2019 // cond: 2020 // result: (ANDLconst [c & d] x) 2021 for { 2022 c := v.AuxInt 2023 v_0 := v.Args[0] 2024 if v_0.Op != OpAMD64ANDLconst { 2025 break 2026 } 2027 d := v_0.AuxInt 2028 x := v_0.Args[0] 2029 v.reset(OpAMD64ANDLconst) 2030 v.AuxInt = c & d 2031 v.AddArg(x) 2032 return true 2033 } 2034 // match: (ANDLconst [0xFF] x) 2035 // cond: 2036 // result: (MOVBQZX x) 2037 for { 2038 if v.AuxInt != 0xFF { 2039 break 2040 } 2041 x := v.Args[0] 2042 v.reset(OpAMD64MOVBQZX) 2043 v.AddArg(x) 2044 return true 2045 } 2046 // match: (ANDLconst [0xFFFF] x) 2047 // cond: 2048 // result: (MOVWQZX x) 2049 for { 2050 if v.AuxInt != 0xFFFF { 2051 break 2052 } 2053 x := v.Args[0] 2054 v.reset(OpAMD64MOVWQZX) 2055 v.AddArg(x) 2056 return true 2057 } 2058 // match: (ANDLconst [c] _) 2059 // cond: int32(c)==0 2060 // result: (MOVLconst [0]) 2061 for { 2062 c := v.AuxInt 2063 if !(int32(c) == 0) { 2064 break 2065 } 2066 v.reset(OpAMD64MOVLconst) 2067 v.AuxInt = 0 2068 return true 2069 } 2070 // match: (ANDLconst [c] x) 2071 // cond: int32(c)==-1 2072 // result: x 2073 for { 2074 c := v.AuxInt 2075 x := v.Args[0] 2076 if !(int32(c) == -1) { 2077 break 2078 } 2079 v.reset(OpCopy) 2080 v.Type = x.Type 2081 v.AddArg(x) 2082 return true 2083 } 2084 // match: (ANDLconst [c] (MOVLconst [d])) 2085 // cond: 2086 // result: (MOVLconst [c&d]) 2087 for { 2088 c := v.AuxInt 2089 v_0 := v.Args[0] 2090 if v_0.Op != OpAMD64MOVLconst { 2091 break 2092 } 2093 d := v_0.AuxInt 2094 v.reset(OpAMD64MOVLconst) 2095 v.AuxInt = c & d 2096 return true 2097 } 2098 return false 2099 } 2100 func rewriteValueAMD64_OpAMD64ANDQ(v *Value) bool { 2101 // match: (ANDQ x (MOVQconst [c])) 2102 // cond: is32Bit(c) 2103 // result: (ANDQconst [c] x) 2104 for { 2105 x := v.Args[0] 2106 v_1 := v.Args[1] 2107 if v_1.Op != OpAMD64MOVQconst { 2108 break 2109 } 2110 c := v_1.AuxInt 2111 if !(is32Bit(c)) { 2112 break 2113 } 2114 v.reset(OpAMD64ANDQconst) 2115 v.AuxInt = c 2116 v.AddArg(x) 2117 return true 2118 } 2119 // match: (ANDQ (MOVQconst [c]) x) 2120 // cond: is32Bit(c) 2121 // result: (ANDQconst [c] x) 2122 for { 2123 v_0 := v.Args[0] 2124 if v_0.Op != OpAMD64MOVQconst { 2125 break 2126 } 2127 c := v_0.AuxInt 2128 x := v.Args[1] 2129 if !(is32Bit(c)) { 2130 break 2131 } 2132 v.reset(OpAMD64ANDQconst) 2133 v.AuxInt = c 2134 v.AddArg(x) 2135 return true 2136 } 2137 // match: (ANDQ x x) 2138 // cond: 2139 // result: x 2140 for { 2141 x := v.Args[0] 2142 if x != v.Args[1] { 2143 break 2144 } 2145 v.reset(OpCopy) 2146 v.Type = x.Type 2147 v.AddArg(x) 2148 return true 2149 } 2150 // match: (ANDQ x l:(MOVQload [off] {sym} ptr mem)) 2151 // cond: canMergeLoad(v, l, x) && clobber(l) 2152 // result: (ANDQmem x [off] {sym} ptr mem) 2153 for { 2154 x := v.Args[0] 2155 l := v.Args[1] 2156 if l.Op != OpAMD64MOVQload { 2157 break 2158 } 2159 off := l.AuxInt 2160 sym := l.Aux 2161 ptr := l.Args[0] 2162 mem := l.Args[1] 2163 if !(canMergeLoad(v, l, x) && clobber(l)) { 2164 break 2165 } 2166 v.reset(OpAMD64ANDQmem) 2167 v.AuxInt = off 2168 v.Aux = sym 2169 v.AddArg(x) 2170 v.AddArg(ptr) 2171 v.AddArg(mem) 2172 return true 2173 } 2174 // match: (ANDQ l:(MOVQload [off] {sym} ptr mem) x) 2175 // cond: canMergeLoad(v, l, x) && clobber(l) 2176 // result: (ANDQmem x [off] {sym} ptr mem) 2177 for { 2178 l := v.Args[0] 2179 if l.Op != OpAMD64MOVQload { 2180 break 2181 } 2182 off := l.AuxInt 2183 sym := l.Aux 2184 ptr := l.Args[0] 2185 mem := l.Args[1] 2186 x := v.Args[1] 2187 if !(canMergeLoad(v, l, x) && clobber(l)) { 2188 break 2189 } 2190 v.reset(OpAMD64ANDQmem) 2191 v.AuxInt = off 2192 v.Aux = sym 2193 v.AddArg(x) 2194 v.AddArg(ptr) 2195 v.AddArg(mem) 2196 return true 2197 } 2198 return false 2199 } 2200 func rewriteValueAMD64_OpAMD64ANDQconst(v *Value) bool { 2201 // match: (ANDQconst [c] (ANDQconst [d] x)) 2202 // cond: 2203 // result: (ANDQconst [c & d] x) 2204 for { 2205 c := v.AuxInt 2206 v_0 := v.Args[0] 2207 if v_0.Op != OpAMD64ANDQconst { 2208 break 2209 } 2210 d := v_0.AuxInt 2211 x := v_0.Args[0] 2212 v.reset(OpAMD64ANDQconst) 2213 v.AuxInt = c & d 2214 v.AddArg(x) 2215 return true 2216 } 2217 // match: (ANDQconst [0xFF] x) 2218 // cond: 2219 // result: (MOVBQZX x) 2220 for { 2221 if v.AuxInt != 0xFF { 2222 break 2223 } 2224 x := v.Args[0] 2225 v.reset(OpAMD64MOVBQZX) 2226 v.AddArg(x) 2227 return true 2228 } 2229 // match: (ANDQconst [0xFFFF] x) 2230 // cond: 2231 // result: (MOVWQZX x) 2232 for { 2233 if v.AuxInt != 0xFFFF { 2234 break 2235 } 2236 x := v.Args[0] 2237 v.reset(OpAMD64MOVWQZX) 2238 v.AddArg(x) 2239 return true 2240 } 2241 // match: (ANDQconst [0xFFFFFFFF] x) 2242 // cond: 2243 // result: (MOVLQZX x) 2244 for { 2245 if v.AuxInt != 0xFFFFFFFF { 2246 break 2247 } 2248 x := v.Args[0] 2249 v.reset(OpAMD64MOVLQZX) 2250 v.AddArg(x) 2251 return true 2252 } 2253 // match: (ANDQconst [0] _) 2254 // cond: 2255 // result: (MOVQconst [0]) 2256 for { 2257 if v.AuxInt != 0 { 2258 break 2259 } 2260 v.reset(OpAMD64MOVQconst) 2261 v.AuxInt = 0 2262 return true 2263 } 2264 // match: (ANDQconst [-1] x) 2265 // cond: 2266 // result: x 2267 for { 2268 if v.AuxInt != -1 { 2269 break 2270 } 2271 x := v.Args[0] 2272 v.reset(OpCopy) 2273 v.Type = x.Type 2274 v.AddArg(x) 2275 return true 2276 } 2277 // match: (ANDQconst [c] (MOVQconst [d])) 2278 // cond: 2279 // result: (MOVQconst [c&d]) 2280 for { 2281 c := v.AuxInt 2282 v_0 := v.Args[0] 2283 if v_0.Op != OpAMD64MOVQconst { 2284 break 2285 } 2286 d := v_0.AuxInt 2287 v.reset(OpAMD64MOVQconst) 2288 v.AuxInt = c & d 2289 return true 2290 } 2291 return false 2292 } 2293 func rewriteValueAMD64_OpAMD64BSFQ(v *Value) bool { 2294 b := v.Block 2295 _ = b 2296 // match: (BSFQ (ORQconst <t> [1<<8] (MOVBQZX x))) 2297 // cond: 2298 // result: (BSFQ (ORQconst <t> [1<<8] x)) 2299 for { 2300 v_0 := v.Args[0] 2301 if v_0.Op != OpAMD64ORQconst { 2302 break 2303 } 2304 t := v_0.Type 2305 if v_0.AuxInt != 1<<8 { 2306 break 2307 } 2308 v_0_0 := v_0.Args[0] 2309 if v_0_0.Op != OpAMD64MOVBQZX { 2310 break 2311 } 2312 x := v_0_0.Args[0] 2313 v.reset(OpAMD64BSFQ) 2314 v0 := b.NewValue0(v.Pos, OpAMD64ORQconst, t) 2315 v0.AuxInt = 1 << 8 2316 v0.AddArg(x) 2317 v.AddArg(v0) 2318 return true 2319 } 2320 // match: (BSFQ (ORQconst <t> [1<<16] (MOVWQZX x))) 2321 // cond: 2322 // result: (BSFQ (ORQconst <t> [1<<16] x)) 2323 for { 2324 v_0 := v.Args[0] 2325 if v_0.Op != OpAMD64ORQconst { 2326 break 2327 } 2328 t := v_0.Type 2329 if v_0.AuxInt != 1<<16 { 2330 break 2331 } 2332 v_0_0 := v_0.Args[0] 2333 if v_0_0.Op != OpAMD64MOVWQZX { 2334 break 2335 } 2336 x := v_0_0.Args[0] 2337 v.reset(OpAMD64BSFQ) 2338 v0 := b.NewValue0(v.Pos, OpAMD64ORQconst, t) 2339 v0.AuxInt = 1 << 16 2340 v0.AddArg(x) 2341 v.AddArg(v0) 2342 return true 2343 } 2344 return false 2345 } 2346 func rewriteValueAMD64_OpAMD64BTQconst(v *Value) bool { 2347 // match: (BTQconst [c] x) 2348 // cond: c < 32 2349 // result: (BTLconst [c] x) 2350 for { 2351 c := v.AuxInt 2352 x := v.Args[0] 2353 if !(c < 32) { 2354 break 2355 } 2356 v.reset(OpAMD64BTLconst) 2357 v.AuxInt = c 2358 v.AddArg(x) 2359 return true 2360 } 2361 return false 2362 } 2363 func rewriteValueAMD64_OpAMD64CMOVQEQ(v *Value) bool { 2364 // match: (CMOVQEQ x _ (Select1 (BSFQ (ORQconst [c] _)))) 2365 // cond: c != 0 2366 // result: x 2367 for { 2368 x := v.Args[0] 2369 v_2 := v.Args[2] 2370 if v_2.Op != OpSelect1 { 2371 break 2372 } 2373 v_2_0 := v_2.Args[0] 2374 if v_2_0.Op != OpAMD64BSFQ { 2375 break 2376 } 2377 v_2_0_0 := v_2_0.Args[0] 2378 if v_2_0_0.Op != OpAMD64ORQconst { 2379 break 2380 } 2381 c := v_2_0_0.AuxInt 2382 if !(c != 0) { 2383 break 2384 } 2385 v.reset(OpCopy) 2386 v.Type = x.Type 2387 v.AddArg(x) 2388 return true 2389 } 2390 return false 2391 } 2392 func rewriteValueAMD64_OpAMD64CMPB(v *Value) bool { 2393 b := v.Block 2394 _ = b 2395 // match: (CMPB x (MOVLconst [c])) 2396 // cond: 2397 // result: (CMPBconst x [int64(int8(c))]) 2398 for { 2399 x := v.Args[0] 2400 v_1 := v.Args[1] 2401 if v_1.Op != OpAMD64MOVLconst { 2402 break 2403 } 2404 c := v_1.AuxInt 2405 v.reset(OpAMD64CMPBconst) 2406 v.AuxInt = int64(int8(c)) 2407 v.AddArg(x) 2408 return true 2409 } 2410 // match: (CMPB (MOVLconst [c]) x) 2411 // cond: 2412 // result: (InvertFlags (CMPBconst x [int64(int8(c))])) 2413 for { 2414 v_0 := v.Args[0] 2415 if v_0.Op != OpAMD64MOVLconst { 2416 break 2417 } 2418 c := v_0.AuxInt 2419 x := v.Args[1] 2420 v.reset(OpAMD64InvertFlags) 2421 v0 := b.NewValue0(v.Pos, OpAMD64CMPBconst, TypeFlags) 2422 v0.AuxInt = int64(int8(c)) 2423 v0.AddArg(x) 2424 v.AddArg(v0) 2425 return true 2426 } 2427 return false 2428 } 2429 func rewriteValueAMD64_OpAMD64CMPBconst(v *Value) bool { 2430 // match: (CMPBconst (MOVLconst [x]) [y]) 2431 // cond: int8(x)==int8(y) 2432 // result: (FlagEQ) 2433 for { 2434 y := v.AuxInt 2435 v_0 := v.Args[0] 2436 if v_0.Op != OpAMD64MOVLconst { 2437 break 2438 } 2439 x := v_0.AuxInt 2440 if !(int8(x) == int8(y)) { 2441 break 2442 } 2443 v.reset(OpAMD64FlagEQ) 2444 return true 2445 } 2446 // match: (CMPBconst (MOVLconst [x]) [y]) 2447 // cond: int8(x)<int8(y) && uint8(x)<uint8(y) 2448 // result: (FlagLT_ULT) 2449 for { 2450 y := v.AuxInt 2451 v_0 := v.Args[0] 2452 if v_0.Op != OpAMD64MOVLconst { 2453 break 2454 } 2455 x := v_0.AuxInt 2456 if !(int8(x) < int8(y) && uint8(x) < uint8(y)) { 2457 break 2458 } 2459 v.reset(OpAMD64FlagLT_ULT) 2460 return true 2461 } 2462 // match: (CMPBconst (MOVLconst [x]) [y]) 2463 // cond: int8(x)<int8(y) && uint8(x)>uint8(y) 2464 // result: (FlagLT_UGT) 2465 for { 2466 y := v.AuxInt 2467 v_0 := v.Args[0] 2468 if v_0.Op != OpAMD64MOVLconst { 2469 break 2470 } 2471 x := v_0.AuxInt 2472 if !(int8(x) < int8(y) && uint8(x) > uint8(y)) { 2473 break 2474 } 2475 v.reset(OpAMD64FlagLT_UGT) 2476 return true 2477 } 2478 // match: (CMPBconst (MOVLconst [x]) [y]) 2479 // cond: int8(x)>int8(y) && uint8(x)<uint8(y) 2480 // result: (FlagGT_ULT) 2481 for { 2482 y := v.AuxInt 2483 v_0 := v.Args[0] 2484 if v_0.Op != OpAMD64MOVLconst { 2485 break 2486 } 2487 x := v_0.AuxInt 2488 if !(int8(x) > int8(y) && uint8(x) < uint8(y)) { 2489 break 2490 } 2491 v.reset(OpAMD64FlagGT_ULT) 2492 return true 2493 } 2494 // match: (CMPBconst (MOVLconst [x]) [y]) 2495 // cond: int8(x)>int8(y) && uint8(x)>uint8(y) 2496 // result: (FlagGT_UGT) 2497 for { 2498 y := v.AuxInt 2499 v_0 := v.Args[0] 2500 if v_0.Op != OpAMD64MOVLconst { 2501 break 2502 } 2503 x := v_0.AuxInt 2504 if !(int8(x) > int8(y) && uint8(x) > uint8(y)) { 2505 break 2506 } 2507 v.reset(OpAMD64FlagGT_UGT) 2508 return true 2509 } 2510 // match: (CMPBconst (ANDLconst _ [m]) [n]) 2511 // cond: 0 <= int8(m) && int8(m) < int8(n) 2512 // result: (FlagLT_ULT) 2513 for { 2514 n := v.AuxInt 2515 v_0 := v.Args[0] 2516 if v_0.Op != OpAMD64ANDLconst { 2517 break 2518 } 2519 m := v_0.AuxInt 2520 if !(0 <= int8(m) && int8(m) < int8(n)) { 2521 break 2522 } 2523 v.reset(OpAMD64FlagLT_ULT) 2524 return true 2525 } 2526 // match: (CMPBconst (ANDL x y) [0]) 2527 // cond: 2528 // result: (TESTB x y) 2529 for { 2530 if v.AuxInt != 0 { 2531 break 2532 } 2533 v_0 := v.Args[0] 2534 if v_0.Op != OpAMD64ANDL { 2535 break 2536 } 2537 x := v_0.Args[0] 2538 y := v_0.Args[1] 2539 v.reset(OpAMD64TESTB) 2540 v.AddArg(x) 2541 v.AddArg(y) 2542 return true 2543 } 2544 // match: (CMPBconst (ANDLconst [c] x) [0]) 2545 // cond: 2546 // result: (TESTBconst [int64(int8(c))] x) 2547 for { 2548 if v.AuxInt != 0 { 2549 break 2550 } 2551 v_0 := v.Args[0] 2552 if v_0.Op != OpAMD64ANDLconst { 2553 break 2554 } 2555 c := v_0.AuxInt 2556 x := v_0.Args[0] 2557 v.reset(OpAMD64TESTBconst) 2558 v.AuxInt = int64(int8(c)) 2559 v.AddArg(x) 2560 return true 2561 } 2562 // match: (CMPBconst x [0]) 2563 // cond: 2564 // result: (TESTB x x) 2565 for { 2566 if v.AuxInt != 0 { 2567 break 2568 } 2569 x := v.Args[0] 2570 v.reset(OpAMD64TESTB) 2571 v.AddArg(x) 2572 v.AddArg(x) 2573 return true 2574 } 2575 return false 2576 } 2577 func rewriteValueAMD64_OpAMD64CMPL(v *Value) bool { 2578 b := v.Block 2579 _ = b 2580 // match: (CMPL x (MOVLconst [c])) 2581 // cond: 2582 // result: (CMPLconst x [c]) 2583 for { 2584 x := v.Args[0] 2585 v_1 := v.Args[1] 2586 if v_1.Op != OpAMD64MOVLconst { 2587 break 2588 } 2589 c := v_1.AuxInt 2590 v.reset(OpAMD64CMPLconst) 2591 v.AuxInt = c 2592 v.AddArg(x) 2593 return true 2594 } 2595 // match: (CMPL (MOVLconst [c]) x) 2596 // cond: 2597 // result: (InvertFlags (CMPLconst x [c])) 2598 for { 2599 v_0 := v.Args[0] 2600 if v_0.Op != OpAMD64MOVLconst { 2601 break 2602 } 2603 c := v_0.AuxInt 2604 x := v.Args[1] 2605 v.reset(OpAMD64InvertFlags) 2606 v0 := b.NewValue0(v.Pos, OpAMD64CMPLconst, TypeFlags) 2607 v0.AuxInt = c 2608 v0.AddArg(x) 2609 v.AddArg(v0) 2610 return true 2611 } 2612 return false 2613 } 2614 func rewriteValueAMD64_OpAMD64CMPLconst(v *Value) bool { 2615 // match: (CMPLconst (MOVLconst [x]) [y]) 2616 // cond: int32(x)==int32(y) 2617 // result: (FlagEQ) 2618 for { 2619 y := v.AuxInt 2620 v_0 := v.Args[0] 2621 if v_0.Op != OpAMD64MOVLconst { 2622 break 2623 } 2624 x := v_0.AuxInt 2625 if !(int32(x) == int32(y)) { 2626 break 2627 } 2628 v.reset(OpAMD64FlagEQ) 2629 return true 2630 } 2631 // match: (CMPLconst (MOVLconst [x]) [y]) 2632 // cond: int32(x)<int32(y) && uint32(x)<uint32(y) 2633 // result: (FlagLT_ULT) 2634 for { 2635 y := v.AuxInt 2636 v_0 := v.Args[0] 2637 if v_0.Op != OpAMD64MOVLconst { 2638 break 2639 } 2640 x := v_0.AuxInt 2641 if !(int32(x) < int32(y) && uint32(x) < uint32(y)) { 2642 break 2643 } 2644 v.reset(OpAMD64FlagLT_ULT) 2645 return true 2646 } 2647 // match: (CMPLconst (MOVLconst [x]) [y]) 2648 // cond: int32(x)<int32(y) && uint32(x)>uint32(y) 2649 // result: (FlagLT_UGT) 2650 for { 2651 y := v.AuxInt 2652 v_0 := v.Args[0] 2653 if v_0.Op != OpAMD64MOVLconst { 2654 break 2655 } 2656 x := v_0.AuxInt 2657 if !(int32(x) < int32(y) && uint32(x) > uint32(y)) { 2658 break 2659 } 2660 v.reset(OpAMD64FlagLT_UGT) 2661 return true 2662 } 2663 // match: (CMPLconst (MOVLconst [x]) [y]) 2664 // cond: int32(x)>int32(y) && uint32(x)<uint32(y) 2665 // result: (FlagGT_ULT) 2666 for { 2667 y := v.AuxInt 2668 v_0 := v.Args[0] 2669 if v_0.Op != OpAMD64MOVLconst { 2670 break 2671 } 2672 x := v_0.AuxInt 2673 if !(int32(x) > int32(y) && uint32(x) < uint32(y)) { 2674 break 2675 } 2676 v.reset(OpAMD64FlagGT_ULT) 2677 return true 2678 } 2679 // match: (CMPLconst (MOVLconst [x]) [y]) 2680 // cond: int32(x)>int32(y) && uint32(x)>uint32(y) 2681 // result: (FlagGT_UGT) 2682 for { 2683 y := v.AuxInt 2684 v_0 := v.Args[0] 2685 if v_0.Op != OpAMD64MOVLconst { 2686 break 2687 } 2688 x := v_0.AuxInt 2689 if !(int32(x) > int32(y) && uint32(x) > uint32(y)) { 2690 break 2691 } 2692 v.reset(OpAMD64FlagGT_UGT) 2693 return true 2694 } 2695 // match: (CMPLconst (SHRLconst _ [c]) [n]) 2696 // cond: 0 <= n && 0 < c && c <= 32 && (1<<uint64(32-c)) <= uint64(n) 2697 // result: (FlagLT_ULT) 2698 for { 2699 n := v.AuxInt 2700 v_0 := v.Args[0] 2701 if v_0.Op != OpAMD64SHRLconst { 2702 break 2703 } 2704 c := v_0.AuxInt 2705 if !(0 <= n && 0 < c && c <= 32 && (1<<uint64(32-c)) <= uint64(n)) { 2706 break 2707 } 2708 v.reset(OpAMD64FlagLT_ULT) 2709 return true 2710 } 2711 // match: (CMPLconst (ANDLconst _ [m]) [n]) 2712 // cond: 0 <= int32(m) && int32(m) < int32(n) 2713 // result: (FlagLT_ULT) 2714 for { 2715 n := v.AuxInt 2716 v_0 := v.Args[0] 2717 if v_0.Op != OpAMD64ANDLconst { 2718 break 2719 } 2720 m := v_0.AuxInt 2721 if !(0 <= int32(m) && int32(m) < int32(n)) { 2722 break 2723 } 2724 v.reset(OpAMD64FlagLT_ULT) 2725 return true 2726 } 2727 // match: (CMPLconst (ANDL x y) [0]) 2728 // cond: 2729 // result: (TESTL x y) 2730 for { 2731 if v.AuxInt != 0 { 2732 break 2733 } 2734 v_0 := v.Args[0] 2735 if v_0.Op != OpAMD64ANDL { 2736 break 2737 } 2738 x := v_0.Args[0] 2739 y := v_0.Args[1] 2740 v.reset(OpAMD64TESTL) 2741 v.AddArg(x) 2742 v.AddArg(y) 2743 return true 2744 } 2745 // match: (CMPLconst (ANDLconst [c] x) [0]) 2746 // cond: 2747 // result: (TESTLconst [c] x) 2748 for { 2749 if v.AuxInt != 0 { 2750 break 2751 } 2752 v_0 := v.Args[0] 2753 if v_0.Op != OpAMD64ANDLconst { 2754 break 2755 } 2756 c := v_0.AuxInt 2757 x := v_0.Args[0] 2758 v.reset(OpAMD64TESTLconst) 2759 v.AuxInt = c 2760 v.AddArg(x) 2761 return true 2762 } 2763 // match: (CMPLconst x [0]) 2764 // cond: 2765 // result: (TESTL x x) 2766 for { 2767 if v.AuxInt != 0 { 2768 break 2769 } 2770 x := v.Args[0] 2771 v.reset(OpAMD64TESTL) 2772 v.AddArg(x) 2773 v.AddArg(x) 2774 return true 2775 } 2776 return false 2777 } 2778 func rewriteValueAMD64_OpAMD64CMPQ(v *Value) bool { 2779 b := v.Block 2780 _ = b 2781 // match: (CMPQ x (MOVQconst [c])) 2782 // cond: is32Bit(c) 2783 // result: (CMPQconst x [c]) 2784 for { 2785 x := v.Args[0] 2786 v_1 := v.Args[1] 2787 if v_1.Op != OpAMD64MOVQconst { 2788 break 2789 } 2790 c := v_1.AuxInt 2791 if !(is32Bit(c)) { 2792 break 2793 } 2794 v.reset(OpAMD64CMPQconst) 2795 v.AuxInt = c 2796 v.AddArg(x) 2797 return true 2798 } 2799 // match: (CMPQ (MOVQconst [c]) x) 2800 // cond: is32Bit(c) 2801 // result: (InvertFlags (CMPQconst x [c])) 2802 for { 2803 v_0 := v.Args[0] 2804 if v_0.Op != OpAMD64MOVQconst { 2805 break 2806 } 2807 c := v_0.AuxInt 2808 x := v.Args[1] 2809 if !(is32Bit(c)) { 2810 break 2811 } 2812 v.reset(OpAMD64InvertFlags) 2813 v0 := b.NewValue0(v.Pos, OpAMD64CMPQconst, TypeFlags) 2814 v0.AuxInt = c 2815 v0.AddArg(x) 2816 v.AddArg(v0) 2817 return true 2818 } 2819 return false 2820 } 2821 func rewriteValueAMD64_OpAMD64CMPQconst(v *Value) bool { 2822 // match: (CMPQconst (MOVQconst [x]) [y]) 2823 // cond: x==y 2824 // result: (FlagEQ) 2825 for { 2826 y := v.AuxInt 2827 v_0 := v.Args[0] 2828 if v_0.Op != OpAMD64MOVQconst { 2829 break 2830 } 2831 x := v_0.AuxInt 2832 if !(x == y) { 2833 break 2834 } 2835 v.reset(OpAMD64FlagEQ) 2836 return true 2837 } 2838 // match: (CMPQconst (MOVQconst [x]) [y]) 2839 // cond: x<y && uint64(x)<uint64(y) 2840 // result: (FlagLT_ULT) 2841 for { 2842 y := v.AuxInt 2843 v_0 := v.Args[0] 2844 if v_0.Op != OpAMD64MOVQconst { 2845 break 2846 } 2847 x := v_0.AuxInt 2848 if !(x < y && uint64(x) < uint64(y)) { 2849 break 2850 } 2851 v.reset(OpAMD64FlagLT_ULT) 2852 return true 2853 } 2854 // match: (CMPQconst (MOVQconst [x]) [y]) 2855 // cond: x<y && uint64(x)>uint64(y) 2856 // result: (FlagLT_UGT) 2857 for { 2858 y := v.AuxInt 2859 v_0 := v.Args[0] 2860 if v_0.Op != OpAMD64MOVQconst { 2861 break 2862 } 2863 x := v_0.AuxInt 2864 if !(x < y && uint64(x) > uint64(y)) { 2865 break 2866 } 2867 v.reset(OpAMD64FlagLT_UGT) 2868 return true 2869 } 2870 // match: (CMPQconst (MOVQconst [x]) [y]) 2871 // cond: x>y && uint64(x)<uint64(y) 2872 // result: (FlagGT_ULT) 2873 for { 2874 y := v.AuxInt 2875 v_0 := v.Args[0] 2876 if v_0.Op != OpAMD64MOVQconst { 2877 break 2878 } 2879 x := v_0.AuxInt 2880 if !(x > y && uint64(x) < uint64(y)) { 2881 break 2882 } 2883 v.reset(OpAMD64FlagGT_ULT) 2884 return true 2885 } 2886 // match: (CMPQconst (MOVQconst [x]) [y]) 2887 // cond: x>y && uint64(x)>uint64(y) 2888 // result: (FlagGT_UGT) 2889 for { 2890 y := v.AuxInt 2891 v_0 := v.Args[0] 2892 if v_0.Op != OpAMD64MOVQconst { 2893 break 2894 } 2895 x := v_0.AuxInt 2896 if !(x > y && uint64(x) > uint64(y)) { 2897 break 2898 } 2899 v.reset(OpAMD64FlagGT_UGT) 2900 return true 2901 } 2902 // match: (CMPQconst (MOVBQZX _) [c]) 2903 // cond: 0xFF < c 2904 // result: (FlagLT_ULT) 2905 for { 2906 c := v.AuxInt 2907 v_0 := v.Args[0] 2908 if v_0.Op != OpAMD64MOVBQZX { 2909 break 2910 } 2911 if !(0xFF < c) { 2912 break 2913 } 2914 v.reset(OpAMD64FlagLT_ULT) 2915 return true 2916 } 2917 // match: (CMPQconst (MOVWQZX _) [c]) 2918 // cond: 0xFFFF < c 2919 // result: (FlagLT_ULT) 2920 for { 2921 c := v.AuxInt 2922 v_0 := v.Args[0] 2923 if v_0.Op != OpAMD64MOVWQZX { 2924 break 2925 } 2926 if !(0xFFFF < c) { 2927 break 2928 } 2929 v.reset(OpAMD64FlagLT_ULT) 2930 return true 2931 } 2932 // match: (CMPQconst (MOVLQZX _) [c]) 2933 // cond: 0xFFFFFFFF < c 2934 // result: (FlagLT_ULT) 2935 for { 2936 c := v.AuxInt 2937 v_0 := v.Args[0] 2938 if v_0.Op != OpAMD64MOVLQZX { 2939 break 2940 } 2941 if !(0xFFFFFFFF < c) { 2942 break 2943 } 2944 v.reset(OpAMD64FlagLT_ULT) 2945 return true 2946 } 2947 // match: (CMPQconst (SHRQconst _ [c]) [n]) 2948 // cond: 0 <= n && 0 < c && c <= 64 && (1<<uint64(64-c)) <= uint64(n) 2949 // result: (FlagLT_ULT) 2950 for { 2951 n := v.AuxInt 2952 v_0 := v.Args[0] 2953 if v_0.Op != OpAMD64SHRQconst { 2954 break 2955 } 2956 c := v_0.AuxInt 2957 if !(0 <= n && 0 < c && c <= 64 && (1<<uint64(64-c)) <= uint64(n)) { 2958 break 2959 } 2960 v.reset(OpAMD64FlagLT_ULT) 2961 return true 2962 } 2963 // match: (CMPQconst (ANDQconst _ [m]) [n]) 2964 // cond: 0 <= m && m < n 2965 // result: (FlagLT_ULT) 2966 for { 2967 n := v.AuxInt 2968 v_0 := v.Args[0] 2969 if v_0.Op != OpAMD64ANDQconst { 2970 break 2971 } 2972 m := v_0.AuxInt 2973 if !(0 <= m && m < n) { 2974 break 2975 } 2976 v.reset(OpAMD64FlagLT_ULT) 2977 return true 2978 } 2979 // match: (CMPQconst (ANDLconst _ [m]) [n]) 2980 // cond: 0 <= m && m < n 2981 // result: (FlagLT_ULT) 2982 for { 2983 n := v.AuxInt 2984 v_0 := v.Args[0] 2985 if v_0.Op != OpAMD64ANDLconst { 2986 break 2987 } 2988 m := v_0.AuxInt 2989 if !(0 <= m && m < n) { 2990 break 2991 } 2992 v.reset(OpAMD64FlagLT_ULT) 2993 return true 2994 } 2995 // match: (CMPQconst (ANDQ x y) [0]) 2996 // cond: 2997 // result: (TESTQ x y) 2998 for { 2999 if v.AuxInt != 0 { 3000 break 3001 } 3002 v_0 := v.Args[0] 3003 if v_0.Op != OpAMD64ANDQ { 3004 break 3005 } 3006 x := v_0.Args[0] 3007 y := v_0.Args[1] 3008 v.reset(OpAMD64TESTQ) 3009 v.AddArg(x) 3010 v.AddArg(y) 3011 return true 3012 } 3013 // match: (CMPQconst (ANDQconst [c] x) [0]) 3014 // cond: 3015 // result: (TESTQconst [c] x) 3016 for { 3017 if v.AuxInt != 0 { 3018 break 3019 } 3020 v_0 := v.Args[0] 3021 if v_0.Op != OpAMD64ANDQconst { 3022 break 3023 } 3024 c := v_0.AuxInt 3025 x := v_0.Args[0] 3026 v.reset(OpAMD64TESTQconst) 3027 v.AuxInt = c 3028 v.AddArg(x) 3029 return true 3030 } 3031 // match: (CMPQconst x [0]) 3032 // cond: 3033 // result: (TESTQ x x) 3034 for { 3035 if v.AuxInt != 0 { 3036 break 3037 } 3038 x := v.Args[0] 3039 v.reset(OpAMD64TESTQ) 3040 v.AddArg(x) 3041 v.AddArg(x) 3042 return true 3043 } 3044 return false 3045 } 3046 func rewriteValueAMD64_OpAMD64CMPW(v *Value) bool { 3047 b := v.Block 3048 _ = b 3049 // match: (CMPW x (MOVLconst [c])) 3050 // cond: 3051 // result: (CMPWconst x [int64(int16(c))]) 3052 for { 3053 x := v.Args[0] 3054 v_1 := v.Args[1] 3055 if v_1.Op != OpAMD64MOVLconst { 3056 break 3057 } 3058 c := v_1.AuxInt 3059 v.reset(OpAMD64CMPWconst) 3060 v.AuxInt = int64(int16(c)) 3061 v.AddArg(x) 3062 return true 3063 } 3064 // match: (CMPW (MOVLconst [c]) x) 3065 // cond: 3066 // result: (InvertFlags (CMPWconst x [int64(int16(c))])) 3067 for { 3068 v_0 := v.Args[0] 3069 if v_0.Op != OpAMD64MOVLconst { 3070 break 3071 } 3072 c := v_0.AuxInt 3073 x := v.Args[1] 3074 v.reset(OpAMD64InvertFlags) 3075 v0 := b.NewValue0(v.Pos, OpAMD64CMPWconst, TypeFlags) 3076 v0.AuxInt = int64(int16(c)) 3077 v0.AddArg(x) 3078 v.AddArg(v0) 3079 return true 3080 } 3081 return false 3082 } 3083 func rewriteValueAMD64_OpAMD64CMPWconst(v *Value) bool { 3084 // match: (CMPWconst (MOVLconst [x]) [y]) 3085 // cond: int16(x)==int16(y) 3086 // result: (FlagEQ) 3087 for { 3088 y := v.AuxInt 3089 v_0 := v.Args[0] 3090 if v_0.Op != OpAMD64MOVLconst { 3091 break 3092 } 3093 x := v_0.AuxInt 3094 if !(int16(x) == int16(y)) { 3095 break 3096 } 3097 v.reset(OpAMD64FlagEQ) 3098 return true 3099 } 3100 // match: (CMPWconst (MOVLconst [x]) [y]) 3101 // cond: int16(x)<int16(y) && uint16(x)<uint16(y) 3102 // result: (FlagLT_ULT) 3103 for { 3104 y := v.AuxInt 3105 v_0 := v.Args[0] 3106 if v_0.Op != OpAMD64MOVLconst { 3107 break 3108 } 3109 x := v_0.AuxInt 3110 if !(int16(x) < int16(y) && uint16(x) < uint16(y)) { 3111 break 3112 } 3113 v.reset(OpAMD64FlagLT_ULT) 3114 return true 3115 } 3116 // match: (CMPWconst (MOVLconst [x]) [y]) 3117 // cond: int16(x)<int16(y) && uint16(x)>uint16(y) 3118 // result: (FlagLT_UGT) 3119 for { 3120 y := v.AuxInt 3121 v_0 := v.Args[0] 3122 if v_0.Op != OpAMD64MOVLconst { 3123 break 3124 } 3125 x := v_0.AuxInt 3126 if !(int16(x) < int16(y) && uint16(x) > uint16(y)) { 3127 break 3128 } 3129 v.reset(OpAMD64FlagLT_UGT) 3130 return true 3131 } 3132 // match: (CMPWconst (MOVLconst [x]) [y]) 3133 // cond: int16(x)>int16(y) && uint16(x)<uint16(y) 3134 // result: (FlagGT_ULT) 3135 for { 3136 y := v.AuxInt 3137 v_0 := v.Args[0] 3138 if v_0.Op != OpAMD64MOVLconst { 3139 break 3140 } 3141 x := v_0.AuxInt 3142 if !(int16(x) > int16(y) && uint16(x) < uint16(y)) { 3143 break 3144 } 3145 v.reset(OpAMD64FlagGT_ULT) 3146 return true 3147 } 3148 // match: (CMPWconst (MOVLconst [x]) [y]) 3149 // cond: int16(x)>int16(y) && uint16(x)>uint16(y) 3150 // result: (FlagGT_UGT) 3151 for { 3152 y := v.AuxInt 3153 v_0 := v.Args[0] 3154 if v_0.Op != OpAMD64MOVLconst { 3155 break 3156 } 3157 x := v_0.AuxInt 3158 if !(int16(x) > int16(y) && uint16(x) > uint16(y)) { 3159 break 3160 } 3161 v.reset(OpAMD64FlagGT_UGT) 3162 return true 3163 } 3164 // match: (CMPWconst (ANDLconst _ [m]) [n]) 3165 // cond: 0 <= int16(m) && int16(m) < int16(n) 3166 // result: (FlagLT_ULT) 3167 for { 3168 n := v.AuxInt 3169 v_0 := v.Args[0] 3170 if v_0.Op != OpAMD64ANDLconst { 3171 break 3172 } 3173 m := v_0.AuxInt 3174 if !(0 <= int16(m) && int16(m) < int16(n)) { 3175 break 3176 } 3177 v.reset(OpAMD64FlagLT_ULT) 3178 return true 3179 } 3180 // match: (CMPWconst (ANDL x y) [0]) 3181 // cond: 3182 // result: (TESTW x y) 3183 for { 3184 if v.AuxInt != 0 { 3185 break 3186 } 3187 v_0 := v.Args[0] 3188 if v_0.Op != OpAMD64ANDL { 3189 break 3190 } 3191 x := v_0.Args[0] 3192 y := v_0.Args[1] 3193 v.reset(OpAMD64TESTW) 3194 v.AddArg(x) 3195 v.AddArg(y) 3196 return true 3197 } 3198 // match: (CMPWconst (ANDLconst [c] x) [0]) 3199 // cond: 3200 // result: (TESTWconst [int64(int16(c))] x) 3201 for { 3202 if v.AuxInt != 0 { 3203 break 3204 } 3205 v_0 := v.Args[0] 3206 if v_0.Op != OpAMD64ANDLconst { 3207 break 3208 } 3209 c := v_0.AuxInt 3210 x := v_0.Args[0] 3211 v.reset(OpAMD64TESTWconst) 3212 v.AuxInt = int64(int16(c)) 3213 v.AddArg(x) 3214 return true 3215 } 3216 // match: (CMPWconst x [0]) 3217 // cond: 3218 // result: (TESTW x x) 3219 for { 3220 if v.AuxInt != 0 { 3221 break 3222 } 3223 x := v.Args[0] 3224 v.reset(OpAMD64TESTW) 3225 v.AddArg(x) 3226 v.AddArg(x) 3227 return true 3228 } 3229 return false 3230 } 3231 func rewriteValueAMD64_OpAMD64CMPXCHGLlock(v *Value) bool { 3232 // match: (CMPXCHGLlock [off1] {sym} (ADDQconst [off2] ptr) old new_ mem) 3233 // cond: is32Bit(off1+off2) 3234 // result: (CMPXCHGLlock [off1+off2] {sym} ptr old new_ mem) 3235 for { 3236 off1 := v.AuxInt 3237 sym := v.Aux 3238 v_0 := v.Args[0] 3239 if v_0.Op != OpAMD64ADDQconst { 3240 break 3241 } 3242 off2 := v_0.AuxInt 3243 ptr := v_0.Args[0] 3244 old := v.Args[1] 3245 new_ := v.Args[2] 3246 mem := v.Args[3] 3247 if !(is32Bit(off1 + off2)) { 3248 break 3249 } 3250 v.reset(OpAMD64CMPXCHGLlock) 3251 v.AuxInt = off1 + off2 3252 v.Aux = sym 3253 v.AddArg(ptr) 3254 v.AddArg(old) 3255 v.AddArg(new_) 3256 v.AddArg(mem) 3257 return true 3258 } 3259 return false 3260 } 3261 func rewriteValueAMD64_OpAMD64CMPXCHGQlock(v *Value) bool { 3262 // match: (CMPXCHGQlock [off1] {sym} (ADDQconst [off2] ptr) old new_ mem) 3263 // cond: is32Bit(off1+off2) 3264 // result: (CMPXCHGQlock [off1+off2] {sym} ptr old new_ mem) 3265 for { 3266 off1 := v.AuxInt 3267 sym := v.Aux 3268 v_0 := v.Args[0] 3269 if v_0.Op != OpAMD64ADDQconst { 3270 break 3271 } 3272 off2 := v_0.AuxInt 3273 ptr := v_0.Args[0] 3274 old := v.Args[1] 3275 new_ := v.Args[2] 3276 mem := v.Args[3] 3277 if !(is32Bit(off1 + off2)) { 3278 break 3279 } 3280 v.reset(OpAMD64CMPXCHGQlock) 3281 v.AuxInt = off1 + off2 3282 v.Aux = sym 3283 v.AddArg(ptr) 3284 v.AddArg(old) 3285 v.AddArg(new_) 3286 v.AddArg(mem) 3287 return true 3288 } 3289 return false 3290 } 3291 func rewriteValueAMD64_OpAMD64LEAL(v *Value) bool { 3292 // match: (LEAL [c] {s} (ADDLconst [d] x)) 3293 // cond: is32Bit(c+d) 3294 // result: (LEAL [c+d] {s} x) 3295 for { 3296 c := v.AuxInt 3297 s := v.Aux 3298 v_0 := v.Args[0] 3299 if v_0.Op != OpAMD64ADDLconst { 3300 break 3301 } 3302 d := v_0.AuxInt 3303 x := v_0.Args[0] 3304 if !(is32Bit(c + d)) { 3305 break 3306 } 3307 v.reset(OpAMD64LEAL) 3308 v.AuxInt = c + d 3309 v.Aux = s 3310 v.AddArg(x) 3311 return true 3312 } 3313 return false 3314 } 3315 func rewriteValueAMD64_OpAMD64LEAQ(v *Value) bool { 3316 // match: (LEAQ [c] {s} (ADDQconst [d] x)) 3317 // cond: is32Bit(c+d) 3318 // result: (LEAQ [c+d] {s} x) 3319 for { 3320 c := v.AuxInt 3321 s := v.Aux 3322 v_0 := v.Args[0] 3323 if v_0.Op != OpAMD64ADDQconst { 3324 break 3325 } 3326 d := v_0.AuxInt 3327 x := v_0.Args[0] 3328 if !(is32Bit(c + d)) { 3329 break 3330 } 3331 v.reset(OpAMD64LEAQ) 3332 v.AuxInt = c + d 3333 v.Aux = s 3334 v.AddArg(x) 3335 return true 3336 } 3337 // match: (LEAQ [c] {s} (ADDQ x y)) 3338 // cond: x.Op != OpSB && y.Op != OpSB 3339 // result: (LEAQ1 [c] {s} x y) 3340 for { 3341 c := v.AuxInt 3342 s := v.Aux 3343 v_0 := v.Args[0] 3344 if v_0.Op != OpAMD64ADDQ { 3345 break 3346 } 3347 x := v_0.Args[0] 3348 y := v_0.Args[1] 3349 if !(x.Op != OpSB && y.Op != OpSB) { 3350 break 3351 } 3352 v.reset(OpAMD64LEAQ1) 3353 v.AuxInt = c 3354 v.Aux = s 3355 v.AddArg(x) 3356 v.AddArg(y) 3357 return true 3358 } 3359 // match: (LEAQ [off1] {sym1} (LEAQ [off2] {sym2} x)) 3360 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 3361 // result: (LEAQ [off1+off2] {mergeSym(sym1,sym2)} x) 3362 for { 3363 off1 := v.AuxInt 3364 sym1 := v.Aux 3365 v_0 := v.Args[0] 3366 if v_0.Op != OpAMD64LEAQ { 3367 break 3368 } 3369 off2 := v_0.AuxInt 3370 sym2 := v_0.Aux 3371 x := v_0.Args[0] 3372 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 3373 break 3374 } 3375 v.reset(OpAMD64LEAQ) 3376 v.AuxInt = off1 + off2 3377 v.Aux = mergeSym(sym1, sym2) 3378 v.AddArg(x) 3379 return true 3380 } 3381 // match: (LEAQ [off1] {sym1} (LEAQ1 [off2] {sym2} x y)) 3382 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 3383 // result: (LEAQ1 [off1+off2] {mergeSym(sym1,sym2)} x y) 3384 for { 3385 off1 := v.AuxInt 3386 sym1 := v.Aux 3387 v_0 := v.Args[0] 3388 if v_0.Op != OpAMD64LEAQ1 { 3389 break 3390 } 3391 off2 := v_0.AuxInt 3392 sym2 := v_0.Aux 3393 x := v_0.Args[0] 3394 y := v_0.Args[1] 3395 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 3396 break 3397 } 3398 v.reset(OpAMD64LEAQ1) 3399 v.AuxInt = off1 + off2 3400 v.Aux = mergeSym(sym1, sym2) 3401 v.AddArg(x) 3402 v.AddArg(y) 3403 return true 3404 } 3405 // match: (LEAQ [off1] {sym1} (LEAQ2 [off2] {sym2} x y)) 3406 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 3407 // result: (LEAQ2 [off1+off2] {mergeSym(sym1,sym2)} x y) 3408 for { 3409 off1 := v.AuxInt 3410 sym1 := v.Aux 3411 v_0 := v.Args[0] 3412 if v_0.Op != OpAMD64LEAQ2 { 3413 break 3414 } 3415 off2 := v_0.AuxInt 3416 sym2 := v_0.Aux 3417 x := v_0.Args[0] 3418 y := v_0.Args[1] 3419 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 3420 break 3421 } 3422 v.reset(OpAMD64LEAQ2) 3423 v.AuxInt = off1 + off2 3424 v.Aux = mergeSym(sym1, sym2) 3425 v.AddArg(x) 3426 v.AddArg(y) 3427 return true 3428 } 3429 // match: (LEAQ [off1] {sym1} (LEAQ4 [off2] {sym2} x y)) 3430 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 3431 // result: (LEAQ4 [off1+off2] {mergeSym(sym1,sym2)} x y) 3432 for { 3433 off1 := v.AuxInt 3434 sym1 := v.Aux 3435 v_0 := v.Args[0] 3436 if v_0.Op != OpAMD64LEAQ4 { 3437 break 3438 } 3439 off2 := v_0.AuxInt 3440 sym2 := v_0.Aux 3441 x := v_0.Args[0] 3442 y := v_0.Args[1] 3443 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 3444 break 3445 } 3446 v.reset(OpAMD64LEAQ4) 3447 v.AuxInt = off1 + off2 3448 v.Aux = mergeSym(sym1, sym2) 3449 v.AddArg(x) 3450 v.AddArg(y) 3451 return true 3452 } 3453 // match: (LEAQ [off1] {sym1} (LEAQ8 [off2] {sym2} x y)) 3454 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 3455 // result: (LEAQ8 [off1+off2] {mergeSym(sym1,sym2)} x y) 3456 for { 3457 off1 := v.AuxInt 3458 sym1 := v.Aux 3459 v_0 := v.Args[0] 3460 if v_0.Op != OpAMD64LEAQ8 { 3461 break 3462 } 3463 off2 := v_0.AuxInt 3464 sym2 := v_0.Aux 3465 x := v_0.Args[0] 3466 y := v_0.Args[1] 3467 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 3468 break 3469 } 3470 v.reset(OpAMD64LEAQ8) 3471 v.AuxInt = off1 + off2 3472 v.Aux = mergeSym(sym1, sym2) 3473 v.AddArg(x) 3474 v.AddArg(y) 3475 return true 3476 } 3477 return false 3478 } 3479 func rewriteValueAMD64_OpAMD64LEAQ1(v *Value) bool { 3480 // match: (LEAQ1 [c] {s} (ADDQconst [d] x) y) 3481 // cond: is32Bit(c+d) && x.Op != OpSB 3482 // result: (LEAQ1 [c+d] {s} x y) 3483 for { 3484 c := v.AuxInt 3485 s := v.Aux 3486 v_0 := v.Args[0] 3487 if v_0.Op != OpAMD64ADDQconst { 3488 break 3489 } 3490 d := v_0.AuxInt 3491 x := v_0.Args[0] 3492 y := v.Args[1] 3493 if !(is32Bit(c+d) && x.Op != OpSB) { 3494 break 3495 } 3496 v.reset(OpAMD64LEAQ1) 3497 v.AuxInt = c + d 3498 v.Aux = s 3499 v.AddArg(x) 3500 v.AddArg(y) 3501 return true 3502 } 3503 // match: (LEAQ1 [c] {s} y (ADDQconst [d] x)) 3504 // cond: is32Bit(c+d) && x.Op != OpSB 3505 // result: (LEAQ1 [c+d] {s} x y) 3506 for { 3507 c := v.AuxInt 3508 s := v.Aux 3509 y := v.Args[0] 3510 v_1 := v.Args[1] 3511 if v_1.Op != OpAMD64ADDQconst { 3512 break 3513 } 3514 d := v_1.AuxInt 3515 x := v_1.Args[0] 3516 if !(is32Bit(c+d) && x.Op != OpSB) { 3517 break 3518 } 3519 v.reset(OpAMD64LEAQ1) 3520 v.AuxInt = c + d 3521 v.Aux = s 3522 v.AddArg(x) 3523 v.AddArg(y) 3524 return true 3525 } 3526 // match: (LEAQ1 [c] {s} x (SHLQconst [1] y)) 3527 // cond: 3528 // result: (LEAQ2 [c] {s} x y) 3529 for { 3530 c := v.AuxInt 3531 s := v.Aux 3532 x := v.Args[0] 3533 v_1 := v.Args[1] 3534 if v_1.Op != OpAMD64SHLQconst { 3535 break 3536 } 3537 if v_1.AuxInt != 1 { 3538 break 3539 } 3540 y := v_1.Args[0] 3541 v.reset(OpAMD64LEAQ2) 3542 v.AuxInt = c 3543 v.Aux = s 3544 v.AddArg(x) 3545 v.AddArg(y) 3546 return true 3547 } 3548 // match: (LEAQ1 [c] {s} (SHLQconst [1] y) x) 3549 // cond: 3550 // result: (LEAQ2 [c] {s} x y) 3551 for { 3552 c := v.AuxInt 3553 s := v.Aux 3554 v_0 := v.Args[0] 3555 if v_0.Op != OpAMD64SHLQconst { 3556 break 3557 } 3558 if v_0.AuxInt != 1 { 3559 break 3560 } 3561 y := v_0.Args[0] 3562 x := v.Args[1] 3563 v.reset(OpAMD64LEAQ2) 3564 v.AuxInt = c 3565 v.Aux = s 3566 v.AddArg(x) 3567 v.AddArg(y) 3568 return true 3569 } 3570 // match: (LEAQ1 [c] {s} x (SHLQconst [2] y)) 3571 // cond: 3572 // result: (LEAQ4 [c] {s} x y) 3573 for { 3574 c := v.AuxInt 3575 s := v.Aux 3576 x := v.Args[0] 3577 v_1 := v.Args[1] 3578 if v_1.Op != OpAMD64SHLQconst { 3579 break 3580 } 3581 if v_1.AuxInt != 2 { 3582 break 3583 } 3584 y := v_1.Args[0] 3585 v.reset(OpAMD64LEAQ4) 3586 v.AuxInt = c 3587 v.Aux = s 3588 v.AddArg(x) 3589 v.AddArg(y) 3590 return true 3591 } 3592 // match: (LEAQ1 [c] {s} (SHLQconst [2] y) x) 3593 // cond: 3594 // result: (LEAQ4 [c] {s} x y) 3595 for { 3596 c := v.AuxInt 3597 s := v.Aux 3598 v_0 := v.Args[0] 3599 if v_0.Op != OpAMD64SHLQconst { 3600 break 3601 } 3602 if v_0.AuxInt != 2 { 3603 break 3604 } 3605 y := v_0.Args[0] 3606 x := v.Args[1] 3607 v.reset(OpAMD64LEAQ4) 3608 v.AuxInt = c 3609 v.Aux = s 3610 v.AddArg(x) 3611 v.AddArg(y) 3612 return true 3613 } 3614 // match: (LEAQ1 [c] {s} x (SHLQconst [3] y)) 3615 // cond: 3616 // result: (LEAQ8 [c] {s} x y) 3617 for { 3618 c := v.AuxInt 3619 s := v.Aux 3620 x := v.Args[0] 3621 v_1 := v.Args[1] 3622 if v_1.Op != OpAMD64SHLQconst { 3623 break 3624 } 3625 if v_1.AuxInt != 3 { 3626 break 3627 } 3628 y := v_1.Args[0] 3629 v.reset(OpAMD64LEAQ8) 3630 v.AuxInt = c 3631 v.Aux = s 3632 v.AddArg(x) 3633 v.AddArg(y) 3634 return true 3635 } 3636 // match: (LEAQ1 [c] {s} (SHLQconst [3] y) x) 3637 // cond: 3638 // result: (LEAQ8 [c] {s} x y) 3639 for { 3640 c := v.AuxInt 3641 s := v.Aux 3642 v_0 := v.Args[0] 3643 if v_0.Op != OpAMD64SHLQconst { 3644 break 3645 } 3646 if v_0.AuxInt != 3 { 3647 break 3648 } 3649 y := v_0.Args[0] 3650 x := v.Args[1] 3651 v.reset(OpAMD64LEAQ8) 3652 v.AuxInt = c 3653 v.Aux = s 3654 v.AddArg(x) 3655 v.AddArg(y) 3656 return true 3657 } 3658 // match: (LEAQ1 [off1] {sym1} (LEAQ [off2] {sym2} x) y) 3659 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) && x.Op != OpSB 3660 // result: (LEAQ1 [off1+off2] {mergeSym(sym1,sym2)} x y) 3661 for { 3662 off1 := v.AuxInt 3663 sym1 := v.Aux 3664 v_0 := v.Args[0] 3665 if v_0.Op != OpAMD64LEAQ { 3666 break 3667 } 3668 off2 := v_0.AuxInt 3669 sym2 := v_0.Aux 3670 x := v_0.Args[0] 3671 y := v.Args[1] 3672 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2) && x.Op != OpSB) { 3673 break 3674 } 3675 v.reset(OpAMD64LEAQ1) 3676 v.AuxInt = off1 + off2 3677 v.Aux = mergeSym(sym1, sym2) 3678 v.AddArg(x) 3679 v.AddArg(y) 3680 return true 3681 } 3682 // match: (LEAQ1 [off1] {sym1} y (LEAQ [off2] {sym2} x)) 3683 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) && x.Op != OpSB 3684 // result: (LEAQ1 [off1+off2] {mergeSym(sym1,sym2)} x y) 3685 for { 3686 off1 := v.AuxInt 3687 sym1 := v.Aux 3688 y := v.Args[0] 3689 v_1 := v.Args[1] 3690 if v_1.Op != OpAMD64LEAQ { 3691 break 3692 } 3693 off2 := v_1.AuxInt 3694 sym2 := v_1.Aux 3695 x := v_1.Args[0] 3696 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2) && x.Op != OpSB) { 3697 break 3698 } 3699 v.reset(OpAMD64LEAQ1) 3700 v.AuxInt = off1 + off2 3701 v.Aux = mergeSym(sym1, sym2) 3702 v.AddArg(x) 3703 v.AddArg(y) 3704 return true 3705 } 3706 return false 3707 } 3708 func rewriteValueAMD64_OpAMD64LEAQ2(v *Value) bool { 3709 // match: (LEAQ2 [c] {s} (ADDQconst [d] x) y) 3710 // cond: is32Bit(c+d) && x.Op != OpSB 3711 // result: (LEAQ2 [c+d] {s} x y) 3712 for { 3713 c := v.AuxInt 3714 s := v.Aux 3715 v_0 := v.Args[0] 3716 if v_0.Op != OpAMD64ADDQconst { 3717 break 3718 } 3719 d := v_0.AuxInt 3720 x := v_0.Args[0] 3721 y := v.Args[1] 3722 if !(is32Bit(c+d) && x.Op != OpSB) { 3723 break 3724 } 3725 v.reset(OpAMD64LEAQ2) 3726 v.AuxInt = c + d 3727 v.Aux = s 3728 v.AddArg(x) 3729 v.AddArg(y) 3730 return true 3731 } 3732 // match: (LEAQ2 [c] {s} x (ADDQconst [d] y)) 3733 // cond: is32Bit(c+2*d) && y.Op != OpSB 3734 // result: (LEAQ2 [c+2*d] {s} x y) 3735 for { 3736 c := v.AuxInt 3737 s := v.Aux 3738 x := v.Args[0] 3739 v_1 := v.Args[1] 3740 if v_1.Op != OpAMD64ADDQconst { 3741 break 3742 } 3743 d := v_1.AuxInt 3744 y := v_1.Args[0] 3745 if !(is32Bit(c+2*d) && y.Op != OpSB) { 3746 break 3747 } 3748 v.reset(OpAMD64LEAQ2) 3749 v.AuxInt = c + 2*d 3750 v.Aux = s 3751 v.AddArg(x) 3752 v.AddArg(y) 3753 return true 3754 } 3755 // match: (LEAQ2 [c] {s} x (SHLQconst [1] y)) 3756 // cond: 3757 // result: (LEAQ4 [c] {s} x y) 3758 for { 3759 c := v.AuxInt 3760 s := v.Aux 3761 x := v.Args[0] 3762 v_1 := v.Args[1] 3763 if v_1.Op != OpAMD64SHLQconst { 3764 break 3765 } 3766 if v_1.AuxInt != 1 { 3767 break 3768 } 3769 y := v_1.Args[0] 3770 v.reset(OpAMD64LEAQ4) 3771 v.AuxInt = c 3772 v.Aux = s 3773 v.AddArg(x) 3774 v.AddArg(y) 3775 return true 3776 } 3777 // match: (LEAQ2 [c] {s} x (SHLQconst [2] y)) 3778 // cond: 3779 // result: (LEAQ8 [c] {s} x y) 3780 for { 3781 c := v.AuxInt 3782 s := v.Aux 3783 x := v.Args[0] 3784 v_1 := v.Args[1] 3785 if v_1.Op != OpAMD64SHLQconst { 3786 break 3787 } 3788 if v_1.AuxInt != 2 { 3789 break 3790 } 3791 y := v_1.Args[0] 3792 v.reset(OpAMD64LEAQ8) 3793 v.AuxInt = c 3794 v.Aux = s 3795 v.AddArg(x) 3796 v.AddArg(y) 3797 return true 3798 } 3799 // match: (LEAQ2 [off1] {sym1} (LEAQ [off2] {sym2} x) y) 3800 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) && x.Op != OpSB 3801 // result: (LEAQ2 [off1+off2] {mergeSym(sym1,sym2)} x y) 3802 for { 3803 off1 := v.AuxInt 3804 sym1 := v.Aux 3805 v_0 := v.Args[0] 3806 if v_0.Op != OpAMD64LEAQ { 3807 break 3808 } 3809 off2 := v_0.AuxInt 3810 sym2 := v_0.Aux 3811 x := v_0.Args[0] 3812 y := v.Args[1] 3813 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2) && x.Op != OpSB) { 3814 break 3815 } 3816 v.reset(OpAMD64LEAQ2) 3817 v.AuxInt = off1 + off2 3818 v.Aux = mergeSym(sym1, sym2) 3819 v.AddArg(x) 3820 v.AddArg(y) 3821 return true 3822 } 3823 return false 3824 } 3825 func rewriteValueAMD64_OpAMD64LEAQ4(v *Value) bool { 3826 // match: (LEAQ4 [c] {s} (ADDQconst [d] x) y) 3827 // cond: is32Bit(c+d) && x.Op != OpSB 3828 // result: (LEAQ4 [c+d] {s} x y) 3829 for { 3830 c := v.AuxInt 3831 s := v.Aux 3832 v_0 := v.Args[0] 3833 if v_0.Op != OpAMD64ADDQconst { 3834 break 3835 } 3836 d := v_0.AuxInt 3837 x := v_0.Args[0] 3838 y := v.Args[1] 3839 if !(is32Bit(c+d) && x.Op != OpSB) { 3840 break 3841 } 3842 v.reset(OpAMD64LEAQ4) 3843 v.AuxInt = c + d 3844 v.Aux = s 3845 v.AddArg(x) 3846 v.AddArg(y) 3847 return true 3848 } 3849 // match: (LEAQ4 [c] {s} x (ADDQconst [d] y)) 3850 // cond: is32Bit(c+4*d) && y.Op != OpSB 3851 // result: (LEAQ4 [c+4*d] {s} x y) 3852 for { 3853 c := v.AuxInt 3854 s := v.Aux 3855 x := v.Args[0] 3856 v_1 := v.Args[1] 3857 if v_1.Op != OpAMD64ADDQconst { 3858 break 3859 } 3860 d := v_1.AuxInt 3861 y := v_1.Args[0] 3862 if !(is32Bit(c+4*d) && y.Op != OpSB) { 3863 break 3864 } 3865 v.reset(OpAMD64LEAQ4) 3866 v.AuxInt = c + 4*d 3867 v.Aux = s 3868 v.AddArg(x) 3869 v.AddArg(y) 3870 return true 3871 } 3872 // match: (LEAQ4 [c] {s} x (SHLQconst [1] y)) 3873 // cond: 3874 // result: (LEAQ8 [c] {s} x y) 3875 for { 3876 c := v.AuxInt 3877 s := v.Aux 3878 x := v.Args[0] 3879 v_1 := v.Args[1] 3880 if v_1.Op != OpAMD64SHLQconst { 3881 break 3882 } 3883 if v_1.AuxInt != 1 { 3884 break 3885 } 3886 y := v_1.Args[0] 3887 v.reset(OpAMD64LEAQ8) 3888 v.AuxInt = c 3889 v.Aux = s 3890 v.AddArg(x) 3891 v.AddArg(y) 3892 return true 3893 } 3894 // match: (LEAQ4 [off1] {sym1} (LEAQ [off2] {sym2} x) y) 3895 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) && x.Op != OpSB 3896 // result: (LEAQ4 [off1+off2] {mergeSym(sym1,sym2)} x y) 3897 for { 3898 off1 := v.AuxInt 3899 sym1 := v.Aux 3900 v_0 := v.Args[0] 3901 if v_0.Op != OpAMD64LEAQ { 3902 break 3903 } 3904 off2 := v_0.AuxInt 3905 sym2 := v_0.Aux 3906 x := v_0.Args[0] 3907 y := v.Args[1] 3908 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2) && x.Op != OpSB) { 3909 break 3910 } 3911 v.reset(OpAMD64LEAQ4) 3912 v.AuxInt = off1 + off2 3913 v.Aux = mergeSym(sym1, sym2) 3914 v.AddArg(x) 3915 v.AddArg(y) 3916 return true 3917 } 3918 return false 3919 } 3920 func rewriteValueAMD64_OpAMD64LEAQ8(v *Value) bool { 3921 // match: (LEAQ8 [c] {s} (ADDQconst [d] x) y) 3922 // cond: is32Bit(c+d) && x.Op != OpSB 3923 // result: (LEAQ8 [c+d] {s} x y) 3924 for { 3925 c := v.AuxInt 3926 s := v.Aux 3927 v_0 := v.Args[0] 3928 if v_0.Op != OpAMD64ADDQconst { 3929 break 3930 } 3931 d := v_0.AuxInt 3932 x := v_0.Args[0] 3933 y := v.Args[1] 3934 if !(is32Bit(c+d) && x.Op != OpSB) { 3935 break 3936 } 3937 v.reset(OpAMD64LEAQ8) 3938 v.AuxInt = c + d 3939 v.Aux = s 3940 v.AddArg(x) 3941 v.AddArg(y) 3942 return true 3943 } 3944 // match: (LEAQ8 [c] {s} x (ADDQconst [d] y)) 3945 // cond: is32Bit(c+8*d) && y.Op != OpSB 3946 // result: (LEAQ8 [c+8*d] {s} x y) 3947 for { 3948 c := v.AuxInt 3949 s := v.Aux 3950 x := v.Args[0] 3951 v_1 := v.Args[1] 3952 if v_1.Op != OpAMD64ADDQconst { 3953 break 3954 } 3955 d := v_1.AuxInt 3956 y := v_1.Args[0] 3957 if !(is32Bit(c+8*d) && y.Op != OpSB) { 3958 break 3959 } 3960 v.reset(OpAMD64LEAQ8) 3961 v.AuxInt = c + 8*d 3962 v.Aux = s 3963 v.AddArg(x) 3964 v.AddArg(y) 3965 return true 3966 } 3967 // match: (LEAQ8 [off1] {sym1} (LEAQ [off2] {sym2} x) y) 3968 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) && x.Op != OpSB 3969 // result: (LEAQ8 [off1+off2] {mergeSym(sym1,sym2)} x y) 3970 for { 3971 off1 := v.AuxInt 3972 sym1 := v.Aux 3973 v_0 := v.Args[0] 3974 if v_0.Op != OpAMD64LEAQ { 3975 break 3976 } 3977 off2 := v_0.AuxInt 3978 sym2 := v_0.Aux 3979 x := v_0.Args[0] 3980 y := v.Args[1] 3981 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2) && x.Op != OpSB) { 3982 break 3983 } 3984 v.reset(OpAMD64LEAQ8) 3985 v.AuxInt = off1 + off2 3986 v.Aux = mergeSym(sym1, sym2) 3987 v.AddArg(x) 3988 v.AddArg(y) 3989 return true 3990 } 3991 return false 3992 } 3993 func rewriteValueAMD64_OpAMD64MOVBQSX(v *Value) bool { 3994 b := v.Block 3995 _ = b 3996 // match: (MOVBQSX x:(MOVBload [off] {sym} ptr mem)) 3997 // cond: x.Uses == 1 && clobber(x) 3998 // result: @x.Block (MOVBQSXload <v.Type> [off] {sym} ptr mem) 3999 for { 4000 x := v.Args[0] 4001 if x.Op != OpAMD64MOVBload { 4002 break 4003 } 4004 off := x.AuxInt 4005 sym := x.Aux 4006 ptr := x.Args[0] 4007 mem := x.Args[1] 4008 if !(x.Uses == 1 && clobber(x)) { 4009 break 4010 } 4011 b = x.Block 4012 v0 := b.NewValue0(v.Pos, OpAMD64MOVBQSXload, v.Type) 4013 v.reset(OpCopy) 4014 v.AddArg(v0) 4015 v0.AuxInt = off 4016 v0.Aux = sym 4017 v0.AddArg(ptr) 4018 v0.AddArg(mem) 4019 return true 4020 } 4021 // match: (MOVBQSX x:(MOVWload [off] {sym} ptr mem)) 4022 // cond: x.Uses == 1 && clobber(x) 4023 // result: @x.Block (MOVBQSXload <v.Type> [off] {sym} ptr mem) 4024 for { 4025 x := v.Args[0] 4026 if x.Op != OpAMD64MOVWload { 4027 break 4028 } 4029 off := x.AuxInt 4030 sym := x.Aux 4031 ptr := x.Args[0] 4032 mem := x.Args[1] 4033 if !(x.Uses == 1 && clobber(x)) { 4034 break 4035 } 4036 b = x.Block 4037 v0 := b.NewValue0(v.Pos, OpAMD64MOVBQSXload, v.Type) 4038 v.reset(OpCopy) 4039 v.AddArg(v0) 4040 v0.AuxInt = off 4041 v0.Aux = sym 4042 v0.AddArg(ptr) 4043 v0.AddArg(mem) 4044 return true 4045 } 4046 // match: (MOVBQSX x:(MOVLload [off] {sym} ptr mem)) 4047 // cond: x.Uses == 1 && clobber(x) 4048 // result: @x.Block (MOVBQSXload <v.Type> [off] {sym} ptr mem) 4049 for { 4050 x := v.Args[0] 4051 if x.Op != OpAMD64MOVLload { 4052 break 4053 } 4054 off := x.AuxInt 4055 sym := x.Aux 4056 ptr := x.Args[0] 4057 mem := x.Args[1] 4058 if !(x.Uses == 1 && clobber(x)) { 4059 break 4060 } 4061 b = x.Block 4062 v0 := b.NewValue0(v.Pos, OpAMD64MOVBQSXload, v.Type) 4063 v.reset(OpCopy) 4064 v.AddArg(v0) 4065 v0.AuxInt = off 4066 v0.Aux = sym 4067 v0.AddArg(ptr) 4068 v0.AddArg(mem) 4069 return true 4070 } 4071 // match: (MOVBQSX x:(MOVQload [off] {sym} ptr mem)) 4072 // cond: x.Uses == 1 && clobber(x) 4073 // result: @x.Block (MOVBQSXload <v.Type> [off] {sym} ptr mem) 4074 for { 4075 x := v.Args[0] 4076 if x.Op != OpAMD64MOVQload { 4077 break 4078 } 4079 off := x.AuxInt 4080 sym := x.Aux 4081 ptr := x.Args[0] 4082 mem := x.Args[1] 4083 if !(x.Uses == 1 && clobber(x)) { 4084 break 4085 } 4086 b = x.Block 4087 v0 := b.NewValue0(v.Pos, OpAMD64MOVBQSXload, v.Type) 4088 v.reset(OpCopy) 4089 v.AddArg(v0) 4090 v0.AuxInt = off 4091 v0.Aux = sym 4092 v0.AddArg(ptr) 4093 v0.AddArg(mem) 4094 return true 4095 } 4096 // match: (MOVBQSX (ANDLconst [c] x)) 4097 // cond: c & 0x80 == 0 4098 // result: (ANDLconst [c & 0x7f] x) 4099 for { 4100 v_0 := v.Args[0] 4101 if v_0.Op != OpAMD64ANDLconst { 4102 break 4103 } 4104 c := v_0.AuxInt 4105 x := v_0.Args[0] 4106 if !(c&0x80 == 0) { 4107 break 4108 } 4109 v.reset(OpAMD64ANDLconst) 4110 v.AuxInt = c & 0x7f 4111 v.AddArg(x) 4112 return true 4113 } 4114 // match: (MOVBQSX x:(MOVBQSX _)) 4115 // cond: 4116 // result: x 4117 for { 4118 x := v.Args[0] 4119 if x.Op != OpAMD64MOVBQSX { 4120 break 4121 } 4122 v.reset(OpCopy) 4123 v.Type = x.Type 4124 v.AddArg(x) 4125 return true 4126 } 4127 return false 4128 } 4129 func rewriteValueAMD64_OpAMD64MOVBQSXload(v *Value) bool { 4130 // match: (MOVBQSXload [off1] {sym1} (LEAQ [off2] {sym2} base) mem) 4131 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 4132 // result: (MOVBQSXload [off1+off2] {mergeSym(sym1,sym2)} base mem) 4133 for { 4134 off1 := v.AuxInt 4135 sym1 := v.Aux 4136 v_0 := v.Args[0] 4137 if v_0.Op != OpAMD64LEAQ { 4138 break 4139 } 4140 off2 := v_0.AuxInt 4141 sym2 := v_0.Aux 4142 base := v_0.Args[0] 4143 mem := v.Args[1] 4144 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 4145 break 4146 } 4147 v.reset(OpAMD64MOVBQSXload) 4148 v.AuxInt = off1 + off2 4149 v.Aux = mergeSym(sym1, sym2) 4150 v.AddArg(base) 4151 v.AddArg(mem) 4152 return true 4153 } 4154 return false 4155 } 4156 func rewriteValueAMD64_OpAMD64MOVBQZX(v *Value) bool { 4157 b := v.Block 4158 _ = b 4159 // match: (MOVBQZX x:(MOVBload [off] {sym} ptr mem)) 4160 // cond: x.Uses == 1 && clobber(x) 4161 // result: @x.Block (MOVBload <v.Type> [off] {sym} ptr mem) 4162 for { 4163 x := v.Args[0] 4164 if x.Op != OpAMD64MOVBload { 4165 break 4166 } 4167 off := x.AuxInt 4168 sym := x.Aux 4169 ptr := x.Args[0] 4170 mem := x.Args[1] 4171 if !(x.Uses == 1 && clobber(x)) { 4172 break 4173 } 4174 b = x.Block 4175 v0 := b.NewValue0(v.Pos, OpAMD64MOVBload, v.Type) 4176 v.reset(OpCopy) 4177 v.AddArg(v0) 4178 v0.AuxInt = off 4179 v0.Aux = sym 4180 v0.AddArg(ptr) 4181 v0.AddArg(mem) 4182 return true 4183 } 4184 // match: (MOVBQZX x:(MOVWload [off] {sym} ptr mem)) 4185 // cond: x.Uses == 1 && clobber(x) 4186 // result: @x.Block (MOVBload <v.Type> [off] {sym} ptr mem) 4187 for { 4188 x := v.Args[0] 4189 if x.Op != OpAMD64MOVWload { 4190 break 4191 } 4192 off := x.AuxInt 4193 sym := x.Aux 4194 ptr := x.Args[0] 4195 mem := x.Args[1] 4196 if !(x.Uses == 1 && clobber(x)) { 4197 break 4198 } 4199 b = x.Block 4200 v0 := b.NewValue0(v.Pos, OpAMD64MOVBload, v.Type) 4201 v.reset(OpCopy) 4202 v.AddArg(v0) 4203 v0.AuxInt = off 4204 v0.Aux = sym 4205 v0.AddArg(ptr) 4206 v0.AddArg(mem) 4207 return true 4208 } 4209 // match: (MOVBQZX x:(MOVLload [off] {sym} ptr mem)) 4210 // cond: x.Uses == 1 && clobber(x) 4211 // result: @x.Block (MOVBload <v.Type> [off] {sym} ptr mem) 4212 for { 4213 x := v.Args[0] 4214 if x.Op != OpAMD64MOVLload { 4215 break 4216 } 4217 off := x.AuxInt 4218 sym := x.Aux 4219 ptr := x.Args[0] 4220 mem := x.Args[1] 4221 if !(x.Uses == 1 && clobber(x)) { 4222 break 4223 } 4224 b = x.Block 4225 v0 := b.NewValue0(v.Pos, OpAMD64MOVBload, v.Type) 4226 v.reset(OpCopy) 4227 v.AddArg(v0) 4228 v0.AuxInt = off 4229 v0.Aux = sym 4230 v0.AddArg(ptr) 4231 v0.AddArg(mem) 4232 return true 4233 } 4234 // match: (MOVBQZX x:(MOVQload [off] {sym} ptr mem)) 4235 // cond: x.Uses == 1 && clobber(x) 4236 // result: @x.Block (MOVBload <v.Type> [off] {sym} ptr mem) 4237 for { 4238 x := v.Args[0] 4239 if x.Op != OpAMD64MOVQload { 4240 break 4241 } 4242 off := x.AuxInt 4243 sym := x.Aux 4244 ptr := x.Args[0] 4245 mem := x.Args[1] 4246 if !(x.Uses == 1 && clobber(x)) { 4247 break 4248 } 4249 b = x.Block 4250 v0 := b.NewValue0(v.Pos, OpAMD64MOVBload, v.Type) 4251 v.reset(OpCopy) 4252 v.AddArg(v0) 4253 v0.AuxInt = off 4254 v0.Aux = sym 4255 v0.AddArg(ptr) 4256 v0.AddArg(mem) 4257 return true 4258 } 4259 // match: (MOVBQZX x:(MOVBloadidx1 [off] {sym} ptr idx mem)) 4260 // cond: x.Uses == 1 && clobber(x) 4261 // result: @x.Block (MOVBloadidx1 <v.Type> [off] {sym} ptr idx mem) 4262 for { 4263 x := v.Args[0] 4264 if x.Op != OpAMD64MOVBloadidx1 { 4265 break 4266 } 4267 off := x.AuxInt 4268 sym := x.Aux 4269 ptr := x.Args[0] 4270 idx := x.Args[1] 4271 mem := x.Args[2] 4272 if !(x.Uses == 1 && clobber(x)) { 4273 break 4274 } 4275 b = x.Block 4276 v0 := b.NewValue0(v.Pos, OpAMD64MOVBloadidx1, v.Type) 4277 v.reset(OpCopy) 4278 v.AddArg(v0) 4279 v0.AuxInt = off 4280 v0.Aux = sym 4281 v0.AddArg(ptr) 4282 v0.AddArg(idx) 4283 v0.AddArg(mem) 4284 return true 4285 } 4286 // match: (MOVBQZX (ANDLconst [c] x)) 4287 // cond: 4288 // result: (ANDLconst [c & 0xff] x) 4289 for { 4290 v_0 := v.Args[0] 4291 if v_0.Op != OpAMD64ANDLconst { 4292 break 4293 } 4294 c := v_0.AuxInt 4295 x := v_0.Args[0] 4296 v.reset(OpAMD64ANDLconst) 4297 v.AuxInt = c & 0xff 4298 v.AddArg(x) 4299 return true 4300 } 4301 // match: (MOVBQZX x:(MOVBQZX _)) 4302 // cond: 4303 // result: x 4304 for { 4305 x := v.Args[0] 4306 if x.Op != OpAMD64MOVBQZX { 4307 break 4308 } 4309 v.reset(OpCopy) 4310 v.Type = x.Type 4311 v.AddArg(x) 4312 return true 4313 } 4314 return false 4315 } 4316 func rewriteValueAMD64_OpAMD64MOVBload(v *Value) bool { 4317 // match: (MOVBload [off] {sym} ptr (MOVBstore [off2] {sym2} ptr2 x _)) 4318 // cond: sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) 4319 // result: x 4320 for { 4321 off := v.AuxInt 4322 sym := v.Aux 4323 ptr := v.Args[0] 4324 v_1 := v.Args[1] 4325 if v_1.Op != OpAMD64MOVBstore { 4326 break 4327 } 4328 off2 := v_1.AuxInt 4329 sym2 := v_1.Aux 4330 ptr2 := v_1.Args[0] 4331 x := v_1.Args[1] 4332 if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) { 4333 break 4334 } 4335 v.reset(OpCopy) 4336 v.Type = x.Type 4337 v.AddArg(x) 4338 return true 4339 } 4340 // match: (MOVBload [off1] {sym} (ADDQconst [off2] ptr) mem) 4341 // cond: is32Bit(off1+off2) 4342 // result: (MOVBload [off1+off2] {sym} ptr mem) 4343 for { 4344 off1 := v.AuxInt 4345 sym := v.Aux 4346 v_0 := v.Args[0] 4347 if v_0.Op != OpAMD64ADDQconst { 4348 break 4349 } 4350 off2 := v_0.AuxInt 4351 ptr := v_0.Args[0] 4352 mem := v.Args[1] 4353 if !(is32Bit(off1 + off2)) { 4354 break 4355 } 4356 v.reset(OpAMD64MOVBload) 4357 v.AuxInt = off1 + off2 4358 v.Aux = sym 4359 v.AddArg(ptr) 4360 v.AddArg(mem) 4361 return true 4362 } 4363 // match: (MOVBload [off1] {sym1} (LEAQ [off2] {sym2} base) mem) 4364 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 4365 // result: (MOVBload [off1+off2] {mergeSym(sym1,sym2)} base mem) 4366 for { 4367 off1 := v.AuxInt 4368 sym1 := v.Aux 4369 v_0 := v.Args[0] 4370 if v_0.Op != OpAMD64LEAQ { 4371 break 4372 } 4373 off2 := v_0.AuxInt 4374 sym2 := v_0.Aux 4375 base := v_0.Args[0] 4376 mem := v.Args[1] 4377 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 4378 break 4379 } 4380 v.reset(OpAMD64MOVBload) 4381 v.AuxInt = off1 + off2 4382 v.Aux = mergeSym(sym1, sym2) 4383 v.AddArg(base) 4384 v.AddArg(mem) 4385 return true 4386 } 4387 // match: (MOVBload [off1] {sym1} (LEAQ1 [off2] {sym2} ptr idx) mem) 4388 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 4389 // result: (MOVBloadidx1 [off1+off2] {mergeSym(sym1,sym2)} ptr idx mem) 4390 for { 4391 off1 := v.AuxInt 4392 sym1 := v.Aux 4393 v_0 := v.Args[0] 4394 if v_0.Op != OpAMD64LEAQ1 { 4395 break 4396 } 4397 off2 := v_0.AuxInt 4398 sym2 := v_0.Aux 4399 ptr := v_0.Args[0] 4400 idx := v_0.Args[1] 4401 mem := v.Args[1] 4402 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 4403 break 4404 } 4405 v.reset(OpAMD64MOVBloadidx1) 4406 v.AuxInt = off1 + off2 4407 v.Aux = mergeSym(sym1, sym2) 4408 v.AddArg(ptr) 4409 v.AddArg(idx) 4410 v.AddArg(mem) 4411 return true 4412 } 4413 // match: (MOVBload [off] {sym} (ADDQ ptr idx) mem) 4414 // cond: ptr.Op != OpSB 4415 // result: (MOVBloadidx1 [off] {sym} ptr idx mem) 4416 for { 4417 off := v.AuxInt 4418 sym := v.Aux 4419 v_0 := v.Args[0] 4420 if v_0.Op != OpAMD64ADDQ { 4421 break 4422 } 4423 ptr := v_0.Args[0] 4424 idx := v_0.Args[1] 4425 mem := v.Args[1] 4426 if !(ptr.Op != OpSB) { 4427 break 4428 } 4429 v.reset(OpAMD64MOVBloadidx1) 4430 v.AuxInt = off 4431 v.Aux = sym 4432 v.AddArg(ptr) 4433 v.AddArg(idx) 4434 v.AddArg(mem) 4435 return true 4436 } 4437 // match: (MOVBload [off1] {sym1} (LEAL [off2] {sym2} base) mem) 4438 // cond: canMergeSym(sym1, sym2) 4439 // result: (MOVBload [off1+off2] {mergeSym(sym1,sym2)} base mem) 4440 for { 4441 off1 := v.AuxInt 4442 sym1 := v.Aux 4443 v_0 := v.Args[0] 4444 if v_0.Op != OpAMD64LEAL { 4445 break 4446 } 4447 off2 := v_0.AuxInt 4448 sym2 := v_0.Aux 4449 base := v_0.Args[0] 4450 mem := v.Args[1] 4451 if !(canMergeSym(sym1, sym2)) { 4452 break 4453 } 4454 v.reset(OpAMD64MOVBload) 4455 v.AuxInt = off1 + off2 4456 v.Aux = mergeSym(sym1, sym2) 4457 v.AddArg(base) 4458 v.AddArg(mem) 4459 return true 4460 } 4461 // match: (MOVBload [off1] {sym} (ADDLconst [off2] ptr) mem) 4462 // cond: is32Bit(off1+off2) 4463 // result: (MOVBload [off1+off2] {sym} ptr mem) 4464 for { 4465 off1 := v.AuxInt 4466 sym := v.Aux 4467 v_0 := v.Args[0] 4468 if v_0.Op != OpAMD64ADDLconst { 4469 break 4470 } 4471 off2 := v_0.AuxInt 4472 ptr := v_0.Args[0] 4473 mem := v.Args[1] 4474 if !(is32Bit(off1 + off2)) { 4475 break 4476 } 4477 v.reset(OpAMD64MOVBload) 4478 v.AuxInt = off1 + off2 4479 v.Aux = sym 4480 v.AddArg(ptr) 4481 v.AddArg(mem) 4482 return true 4483 } 4484 return false 4485 } 4486 func rewriteValueAMD64_OpAMD64MOVBloadidx1(v *Value) bool { 4487 // match: (MOVBloadidx1 [c] {sym} (ADDQconst [d] ptr) idx mem) 4488 // cond: 4489 // result: (MOVBloadidx1 [c+d] {sym} ptr idx mem) 4490 for { 4491 c := v.AuxInt 4492 sym := v.Aux 4493 v_0 := v.Args[0] 4494 if v_0.Op != OpAMD64ADDQconst { 4495 break 4496 } 4497 d := v_0.AuxInt 4498 ptr := v_0.Args[0] 4499 idx := v.Args[1] 4500 mem := v.Args[2] 4501 v.reset(OpAMD64MOVBloadidx1) 4502 v.AuxInt = c + d 4503 v.Aux = sym 4504 v.AddArg(ptr) 4505 v.AddArg(idx) 4506 v.AddArg(mem) 4507 return true 4508 } 4509 // match: (MOVBloadidx1 [c] {sym} idx (ADDQconst [d] ptr) mem) 4510 // cond: 4511 // result: (MOVBloadidx1 [c+d] {sym} ptr idx mem) 4512 for { 4513 c := v.AuxInt 4514 sym := v.Aux 4515 idx := v.Args[0] 4516 v_1 := v.Args[1] 4517 if v_1.Op != OpAMD64ADDQconst { 4518 break 4519 } 4520 d := v_1.AuxInt 4521 ptr := v_1.Args[0] 4522 mem := v.Args[2] 4523 v.reset(OpAMD64MOVBloadidx1) 4524 v.AuxInt = c + d 4525 v.Aux = sym 4526 v.AddArg(ptr) 4527 v.AddArg(idx) 4528 v.AddArg(mem) 4529 return true 4530 } 4531 // match: (MOVBloadidx1 [c] {sym} ptr (ADDQconst [d] idx) mem) 4532 // cond: 4533 // result: (MOVBloadidx1 [c+d] {sym} ptr idx mem) 4534 for { 4535 c := v.AuxInt 4536 sym := v.Aux 4537 ptr := v.Args[0] 4538 v_1 := v.Args[1] 4539 if v_1.Op != OpAMD64ADDQconst { 4540 break 4541 } 4542 d := v_1.AuxInt 4543 idx := v_1.Args[0] 4544 mem := v.Args[2] 4545 v.reset(OpAMD64MOVBloadidx1) 4546 v.AuxInt = c + d 4547 v.Aux = sym 4548 v.AddArg(ptr) 4549 v.AddArg(idx) 4550 v.AddArg(mem) 4551 return true 4552 } 4553 // match: (MOVBloadidx1 [c] {sym} (ADDQconst [d] idx) ptr mem) 4554 // cond: 4555 // result: (MOVBloadidx1 [c+d] {sym} ptr idx mem) 4556 for { 4557 c := v.AuxInt 4558 sym := v.Aux 4559 v_0 := v.Args[0] 4560 if v_0.Op != OpAMD64ADDQconst { 4561 break 4562 } 4563 d := v_0.AuxInt 4564 idx := v_0.Args[0] 4565 ptr := v.Args[1] 4566 mem := v.Args[2] 4567 v.reset(OpAMD64MOVBloadidx1) 4568 v.AuxInt = c + d 4569 v.Aux = sym 4570 v.AddArg(ptr) 4571 v.AddArg(idx) 4572 v.AddArg(mem) 4573 return true 4574 } 4575 return false 4576 } 4577 func rewriteValueAMD64_OpAMD64MOVBstore(v *Value) bool { 4578 b := v.Block 4579 _ = b 4580 // match: (MOVBstore [off] {sym} ptr (MOVBQSX x) mem) 4581 // cond: 4582 // result: (MOVBstore [off] {sym} ptr x mem) 4583 for { 4584 off := v.AuxInt 4585 sym := v.Aux 4586 ptr := v.Args[0] 4587 v_1 := v.Args[1] 4588 if v_1.Op != OpAMD64MOVBQSX { 4589 break 4590 } 4591 x := v_1.Args[0] 4592 mem := v.Args[2] 4593 v.reset(OpAMD64MOVBstore) 4594 v.AuxInt = off 4595 v.Aux = sym 4596 v.AddArg(ptr) 4597 v.AddArg(x) 4598 v.AddArg(mem) 4599 return true 4600 } 4601 // match: (MOVBstore [off] {sym} ptr (MOVBQZX x) mem) 4602 // cond: 4603 // result: (MOVBstore [off] {sym} ptr x mem) 4604 for { 4605 off := v.AuxInt 4606 sym := v.Aux 4607 ptr := v.Args[0] 4608 v_1 := v.Args[1] 4609 if v_1.Op != OpAMD64MOVBQZX { 4610 break 4611 } 4612 x := v_1.Args[0] 4613 mem := v.Args[2] 4614 v.reset(OpAMD64MOVBstore) 4615 v.AuxInt = off 4616 v.Aux = sym 4617 v.AddArg(ptr) 4618 v.AddArg(x) 4619 v.AddArg(mem) 4620 return true 4621 } 4622 // match: (MOVBstore [off1] {sym} (ADDQconst [off2] ptr) val mem) 4623 // cond: is32Bit(off1+off2) 4624 // result: (MOVBstore [off1+off2] {sym} ptr val mem) 4625 for { 4626 off1 := v.AuxInt 4627 sym := v.Aux 4628 v_0 := v.Args[0] 4629 if v_0.Op != OpAMD64ADDQconst { 4630 break 4631 } 4632 off2 := v_0.AuxInt 4633 ptr := v_0.Args[0] 4634 val := v.Args[1] 4635 mem := v.Args[2] 4636 if !(is32Bit(off1 + off2)) { 4637 break 4638 } 4639 v.reset(OpAMD64MOVBstore) 4640 v.AuxInt = off1 + off2 4641 v.Aux = sym 4642 v.AddArg(ptr) 4643 v.AddArg(val) 4644 v.AddArg(mem) 4645 return true 4646 } 4647 // match: (MOVBstore [off] {sym} ptr (MOVLconst [c]) mem) 4648 // cond: validOff(off) 4649 // result: (MOVBstoreconst [makeValAndOff(int64(int8(c)),off)] {sym} ptr mem) 4650 for { 4651 off := v.AuxInt 4652 sym := v.Aux 4653 ptr := v.Args[0] 4654 v_1 := v.Args[1] 4655 if v_1.Op != OpAMD64MOVLconst { 4656 break 4657 } 4658 c := v_1.AuxInt 4659 mem := v.Args[2] 4660 if !(validOff(off)) { 4661 break 4662 } 4663 v.reset(OpAMD64MOVBstoreconst) 4664 v.AuxInt = makeValAndOff(int64(int8(c)), off) 4665 v.Aux = sym 4666 v.AddArg(ptr) 4667 v.AddArg(mem) 4668 return true 4669 } 4670 // match: (MOVBstore [off1] {sym1} (LEAQ [off2] {sym2} base) val mem) 4671 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 4672 // result: (MOVBstore [off1+off2] {mergeSym(sym1,sym2)} base val mem) 4673 for { 4674 off1 := v.AuxInt 4675 sym1 := v.Aux 4676 v_0 := v.Args[0] 4677 if v_0.Op != OpAMD64LEAQ { 4678 break 4679 } 4680 off2 := v_0.AuxInt 4681 sym2 := v_0.Aux 4682 base := v_0.Args[0] 4683 val := v.Args[1] 4684 mem := v.Args[2] 4685 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 4686 break 4687 } 4688 v.reset(OpAMD64MOVBstore) 4689 v.AuxInt = off1 + off2 4690 v.Aux = mergeSym(sym1, sym2) 4691 v.AddArg(base) 4692 v.AddArg(val) 4693 v.AddArg(mem) 4694 return true 4695 } 4696 // match: (MOVBstore [off1] {sym1} (LEAQ1 [off2] {sym2} ptr idx) val mem) 4697 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 4698 // result: (MOVBstoreidx1 [off1+off2] {mergeSym(sym1,sym2)} ptr idx val mem) 4699 for { 4700 off1 := v.AuxInt 4701 sym1 := v.Aux 4702 v_0 := v.Args[0] 4703 if v_0.Op != OpAMD64LEAQ1 { 4704 break 4705 } 4706 off2 := v_0.AuxInt 4707 sym2 := v_0.Aux 4708 ptr := v_0.Args[0] 4709 idx := v_0.Args[1] 4710 val := v.Args[1] 4711 mem := v.Args[2] 4712 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 4713 break 4714 } 4715 v.reset(OpAMD64MOVBstoreidx1) 4716 v.AuxInt = off1 + off2 4717 v.Aux = mergeSym(sym1, sym2) 4718 v.AddArg(ptr) 4719 v.AddArg(idx) 4720 v.AddArg(val) 4721 v.AddArg(mem) 4722 return true 4723 } 4724 // match: (MOVBstore [off] {sym} (ADDQ ptr idx) val mem) 4725 // cond: ptr.Op != OpSB 4726 // result: (MOVBstoreidx1 [off] {sym} ptr idx val mem) 4727 for { 4728 off := v.AuxInt 4729 sym := v.Aux 4730 v_0 := v.Args[0] 4731 if v_0.Op != OpAMD64ADDQ { 4732 break 4733 } 4734 ptr := v_0.Args[0] 4735 idx := v_0.Args[1] 4736 val := v.Args[1] 4737 mem := v.Args[2] 4738 if !(ptr.Op != OpSB) { 4739 break 4740 } 4741 v.reset(OpAMD64MOVBstoreidx1) 4742 v.AuxInt = off 4743 v.Aux = sym 4744 v.AddArg(ptr) 4745 v.AddArg(idx) 4746 v.AddArg(val) 4747 v.AddArg(mem) 4748 return true 4749 } 4750 // match: (MOVBstore [i] {s} p w x0:(MOVBstore [i-1] {s} p (SHRWconst [8] w) mem)) 4751 // cond: x0.Uses == 1 && clobber(x0) 4752 // result: (MOVWstore [i-1] {s} p (ROLWconst <w.Type> [8] w) mem) 4753 for { 4754 i := v.AuxInt 4755 s := v.Aux 4756 p := v.Args[0] 4757 w := v.Args[1] 4758 x0 := v.Args[2] 4759 if x0.Op != OpAMD64MOVBstore { 4760 break 4761 } 4762 if x0.AuxInt != i-1 { 4763 break 4764 } 4765 if x0.Aux != s { 4766 break 4767 } 4768 if p != x0.Args[0] { 4769 break 4770 } 4771 x0_1 := x0.Args[1] 4772 if x0_1.Op != OpAMD64SHRWconst { 4773 break 4774 } 4775 if x0_1.AuxInt != 8 { 4776 break 4777 } 4778 if w != x0_1.Args[0] { 4779 break 4780 } 4781 mem := x0.Args[2] 4782 if !(x0.Uses == 1 && clobber(x0)) { 4783 break 4784 } 4785 v.reset(OpAMD64MOVWstore) 4786 v.AuxInt = i - 1 4787 v.Aux = s 4788 v.AddArg(p) 4789 v0 := b.NewValue0(v.Pos, OpAMD64ROLWconst, w.Type) 4790 v0.AuxInt = 8 4791 v0.AddArg(w) 4792 v.AddArg(v0) 4793 v.AddArg(mem) 4794 return true 4795 } 4796 // match: (MOVBstore [i] {s} p w x2:(MOVBstore [i-1] {s} p (SHRLconst [8] w) x1:(MOVBstore [i-2] {s} p (SHRLconst [16] w) x0:(MOVBstore [i-3] {s} p (SHRLconst [24] w) mem)))) 4797 // cond: x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) 4798 // result: (MOVLstore [i-3] {s} p (BSWAPL <w.Type> w) mem) 4799 for { 4800 i := v.AuxInt 4801 s := v.Aux 4802 p := v.Args[0] 4803 w := v.Args[1] 4804 x2 := v.Args[2] 4805 if x2.Op != OpAMD64MOVBstore { 4806 break 4807 } 4808 if x2.AuxInt != i-1 { 4809 break 4810 } 4811 if x2.Aux != s { 4812 break 4813 } 4814 if p != x2.Args[0] { 4815 break 4816 } 4817 x2_1 := x2.Args[1] 4818 if x2_1.Op != OpAMD64SHRLconst { 4819 break 4820 } 4821 if x2_1.AuxInt != 8 { 4822 break 4823 } 4824 if w != x2_1.Args[0] { 4825 break 4826 } 4827 x1 := x2.Args[2] 4828 if x1.Op != OpAMD64MOVBstore { 4829 break 4830 } 4831 if x1.AuxInt != i-2 { 4832 break 4833 } 4834 if x1.Aux != s { 4835 break 4836 } 4837 if p != x1.Args[0] { 4838 break 4839 } 4840 x1_1 := x1.Args[1] 4841 if x1_1.Op != OpAMD64SHRLconst { 4842 break 4843 } 4844 if x1_1.AuxInt != 16 { 4845 break 4846 } 4847 if w != x1_1.Args[0] { 4848 break 4849 } 4850 x0 := x1.Args[2] 4851 if x0.Op != OpAMD64MOVBstore { 4852 break 4853 } 4854 if x0.AuxInt != i-3 { 4855 break 4856 } 4857 if x0.Aux != s { 4858 break 4859 } 4860 if p != x0.Args[0] { 4861 break 4862 } 4863 x0_1 := x0.Args[1] 4864 if x0_1.Op != OpAMD64SHRLconst { 4865 break 4866 } 4867 if x0_1.AuxInt != 24 { 4868 break 4869 } 4870 if w != x0_1.Args[0] { 4871 break 4872 } 4873 mem := x0.Args[2] 4874 if !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2)) { 4875 break 4876 } 4877 v.reset(OpAMD64MOVLstore) 4878 v.AuxInt = i - 3 4879 v.Aux = s 4880 v.AddArg(p) 4881 v0 := b.NewValue0(v.Pos, OpAMD64BSWAPL, w.Type) 4882 v0.AddArg(w) 4883 v.AddArg(v0) 4884 v.AddArg(mem) 4885 return true 4886 } 4887 // match: (MOVBstore [i] {s} p w x6:(MOVBstore [i-1] {s} p (SHRQconst [8] w) x5:(MOVBstore [i-2] {s} p (SHRQconst [16] w) x4:(MOVBstore [i-3] {s} p (SHRQconst [24] w) x3:(MOVBstore [i-4] {s} p (SHRQconst [32] w) x2:(MOVBstore [i-5] {s} p (SHRQconst [40] w) x1:(MOVBstore [i-6] {s} p (SHRQconst [48] w) x0:(MOVBstore [i-7] {s} p (SHRQconst [56] w) mem)))))))) 4888 // cond: x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) 4889 // result: (MOVQstore [i-7] {s} p (BSWAPQ <w.Type> w) mem) 4890 for { 4891 i := v.AuxInt 4892 s := v.Aux 4893 p := v.Args[0] 4894 w := v.Args[1] 4895 x6 := v.Args[2] 4896 if x6.Op != OpAMD64MOVBstore { 4897 break 4898 } 4899 if x6.AuxInt != i-1 { 4900 break 4901 } 4902 if x6.Aux != s { 4903 break 4904 } 4905 if p != x6.Args[0] { 4906 break 4907 } 4908 x6_1 := x6.Args[1] 4909 if x6_1.Op != OpAMD64SHRQconst { 4910 break 4911 } 4912 if x6_1.AuxInt != 8 { 4913 break 4914 } 4915 if w != x6_1.Args[0] { 4916 break 4917 } 4918 x5 := x6.Args[2] 4919 if x5.Op != OpAMD64MOVBstore { 4920 break 4921 } 4922 if x5.AuxInt != i-2 { 4923 break 4924 } 4925 if x5.Aux != s { 4926 break 4927 } 4928 if p != x5.Args[0] { 4929 break 4930 } 4931 x5_1 := x5.Args[1] 4932 if x5_1.Op != OpAMD64SHRQconst { 4933 break 4934 } 4935 if x5_1.AuxInt != 16 { 4936 break 4937 } 4938 if w != x5_1.Args[0] { 4939 break 4940 } 4941 x4 := x5.Args[2] 4942 if x4.Op != OpAMD64MOVBstore { 4943 break 4944 } 4945 if x4.AuxInt != i-3 { 4946 break 4947 } 4948 if x4.Aux != s { 4949 break 4950 } 4951 if p != x4.Args[0] { 4952 break 4953 } 4954 x4_1 := x4.Args[1] 4955 if x4_1.Op != OpAMD64SHRQconst { 4956 break 4957 } 4958 if x4_1.AuxInt != 24 { 4959 break 4960 } 4961 if w != x4_1.Args[0] { 4962 break 4963 } 4964 x3 := x4.Args[2] 4965 if x3.Op != OpAMD64MOVBstore { 4966 break 4967 } 4968 if x3.AuxInt != i-4 { 4969 break 4970 } 4971 if x3.Aux != s { 4972 break 4973 } 4974 if p != x3.Args[0] { 4975 break 4976 } 4977 x3_1 := x3.Args[1] 4978 if x3_1.Op != OpAMD64SHRQconst { 4979 break 4980 } 4981 if x3_1.AuxInt != 32 { 4982 break 4983 } 4984 if w != x3_1.Args[0] { 4985 break 4986 } 4987 x2 := x3.Args[2] 4988 if x2.Op != OpAMD64MOVBstore { 4989 break 4990 } 4991 if x2.AuxInt != i-5 { 4992 break 4993 } 4994 if x2.Aux != s { 4995 break 4996 } 4997 if p != x2.Args[0] { 4998 break 4999 } 5000 x2_1 := x2.Args[1] 5001 if x2_1.Op != OpAMD64SHRQconst { 5002 break 5003 } 5004 if x2_1.AuxInt != 40 { 5005 break 5006 } 5007 if w != x2_1.Args[0] { 5008 break 5009 } 5010 x1 := x2.Args[2] 5011 if x1.Op != OpAMD64MOVBstore { 5012 break 5013 } 5014 if x1.AuxInt != i-6 { 5015 break 5016 } 5017 if x1.Aux != s { 5018 break 5019 } 5020 if p != x1.Args[0] { 5021 break 5022 } 5023 x1_1 := x1.Args[1] 5024 if x1_1.Op != OpAMD64SHRQconst { 5025 break 5026 } 5027 if x1_1.AuxInt != 48 { 5028 break 5029 } 5030 if w != x1_1.Args[0] { 5031 break 5032 } 5033 x0 := x1.Args[2] 5034 if x0.Op != OpAMD64MOVBstore { 5035 break 5036 } 5037 if x0.AuxInt != i-7 { 5038 break 5039 } 5040 if x0.Aux != s { 5041 break 5042 } 5043 if p != x0.Args[0] { 5044 break 5045 } 5046 x0_1 := x0.Args[1] 5047 if x0_1.Op != OpAMD64SHRQconst { 5048 break 5049 } 5050 if x0_1.AuxInt != 56 { 5051 break 5052 } 5053 if w != x0_1.Args[0] { 5054 break 5055 } 5056 mem := x0.Args[2] 5057 if !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6)) { 5058 break 5059 } 5060 v.reset(OpAMD64MOVQstore) 5061 v.AuxInt = i - 7 5062 v.Aux = s 5063 v.AddArg(p) 5064 v0 := b.NewValue0(v.Pos, OpAMD64BSWAPQ, w.Type) 5065 v0.AddArg(w) 5066 v.AddArg(v0) 5067 v.AddArg(mem) 5068 return true 5069 } 5070 // match: (MOVBstore [i] {s} p (SHRQconst [8] w) x:(MOVBstore [i-1] {s} p w mem)) 5071 // cond: x.Uses == 1 && clobber(x) 5072 // result: (MOVWstore [i-1] {s} p w mem) 5073 for { 5074 i := v.AuxInt 5075 s := v.Aux 5076 p := v.Args[0] 5077 v_1 := v.Args[1] 5078 if v_1.Op != OpAMD64SHRQconst { 5079 break 5080 } 5081 if v_1.AuxInt != 8 { 5082 break 5083 } 5084 w := v_1.Args[0] 5085 x := v.Args[2] 5086 if x.Op != OpAMD64MOVBstore { 5087 break 5088 } 5089 if x.AuxInt != i-1 { 5090 break 5091 } 5092 if x.Aux != s { 5093 break 5094 } 5095 if p != x.Args[0] { 5096 break 5097 } 5098 if w != x.Args[1] { 5099 break 5100 } 5101 mem := x.Args[2] 5102 if !(x.Uses == 1 && clobber(x)) { 5103 break 5104 } 5105 v.reset(OpAMD64MOVWstore) 5106 v.AuxInt = i - 1 5107 v.Aux = s 5108 v.AddArg(p) 5109 v.AddArg(w) 5110 v.AddArg(mem) 5111 return true 5112 } 5113 // match: (MOVBstore [i] {s} p (SHRQconst [j] w) x:(MOVBstore [i-1] {s} p w0:(SHRQconst [j-8] w) mem)) 5114 // cond: x.Uses == 1 && clobber(x) 5115 // result: (MOVWstore [i-1] {s} p w0 mem) 5116 for { 5117 i := v.AuxInt 5118 s := v.Aux 5119 p := v.Args[0] 5120 v_1 := v.Args[1] 5121 if v_1.Op != OpAMD64SHRQconst { 5122 break 5123 } 5124 j := v_1.AuxInt 5125 w := v_1.Args[0] 5126 x := v.Args[2] 5127 if x.Op != OpAMD64MOVBstore { 5128 break 5129 } 5130 if x.AuxInt != i-1 { 5131 break 5132 } 5133 if x.Aux != s { 5134 break 5135 } 5136 if p != x.Args[0] { 5137 break 5138 } 5139 w0 := x.Args[1] 5140 if w0.Op != OpAMD64SHRQconst { 5141 break 5142 } 5143 if w0.AuxInt != j-8 { 5144 break 5145 } 5146 if w != w0.Args[0] { 5147 break 5148 } 5149 mem := x.Args[2] 5150 if !(x.Uses == 1 && clobber(x)) { 5151 break 5152 } 5153 v.reset(OpAMD64MOVWstore) 5154 v.AuxInt = i - 1 5155 v.Aux = s 5156 v.AddArg(p) 5157 v.AddArg(w0) 5158 v.AddArg(mem) 5159 return true 5160 } 5161 // match: (MOVBstore [off1] {sym1} (LEAL [off2] {sym2} base) val mem) 5162 // cond: canMergeSym(sym1, sym2) 5163 // result: (MOVBstore [off1+off2] {mergeSym(sym1,sym2)} base val mem) 5164 for { 5165 off1 := v.AuxInt 5166 sym1 := v.Aux 5167 v_0 := v.Args[0] 5168 if v_0.Op != OpAMD64LEAL { 5169 break 5170 } 5171 off2 := v_0.AuxInt 5172 sym2 := v_0.Aux 5173 base := v_0.Args[0] 5174 val := v.Args[1] 5175 mem := v.Args[2] 5176 if !(canMergeSym(sym1, sym2)) { 5177 break 5178 } 5179 v.reset(OpAMD64MOVBstore) 5180 v.AuxInt = off1 + off2 5181 v.Aux = mergeSym(sym1, sym2) 5182 v.AddArg(base) 5183 v.AddArg(val) 5184 v.AddArg(mem) 5185 return true 5186 } 5187 // match: (MOVBstore [off1] {sym} (ADDLconst [off2] ptr) val mem) 5188 // cond: is32Bit(off1+off2) 5189 // result: (MOVBstore [off1+off2] {sym} ptr val mem) 5190 for { 5191 off1 := v.AuxInt 5192 sym := v.Aux 5193 v_0 := v.Args[0] 5194 if v_0.Op != OpAMD64ADDLconst { 5195 break 5196 } 5197 off2 := v_0.AuxInt 5198 ptr := v_0.Args[0] 5199 val := v.Args[1] 5200 mem := v.Args[2] 5201 if !(is32Bit(off1 + off2)) { 5202 break 5203 } 5204 v.reset(OpAMD64MOVBstore) 5205 v.AuxInt = off1 + off2 5206 v.Aux = sym 5207 v.AddArg(ptr) 5208 v.AddArg(val) 5209 v.AddArg(mem) 5210 return true 5211 } 5212 return false 5213 } 5214 func rewriteValueAMD64_OpAMD64MOVBstoreconst(v *Value) bool { 5215 // match: (MOVBstoreconst [sc] {s} (ADDQconst [off] ptr) mem) 5216 // cond: ValAndOff(sc).canAdd(off) 5217 // result: (MOVBstoreconst [ValAndOff(sc).add(off)] {s} ptr mem) 5218 for { 5219 sc := v.AuxInt 5220 s := v.Aux 5221 v_0 := v.Args[0] 5222 if v_0.Op != OpAMD64ADDQconst { 5223 break 5224 } 5225 off := v_0.AuxInt 5226 ptr := v_0.Args[0] 5227 mem := v.Args[1] 5228 if !(ValAndOff(sc).canAdd(off)) { 5229 break 5230 } 5231 v.reset(OpAMD64MOVBstoreconst) 5232 v.AuxInt = ValAndOff(sc).add(off) 5233 v.Aux = s 5234 v.AddArg(ptr) 5235 v.AddArg(mem) 5236 return true 5237 } 5238 // match: (MOVBstoreconst [sc] {sym1} (LEAQ [off] {sym2} ptr) mem) 5239 // cond: canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off) 5240 // result: (MOVBstoreconst [ValAndOff(sc).add(off)] {mergeSym(sym1, sym2)} ptr mem) 5241 for { 5242 sc := v.AuxInt 5243 sym1 := v.Aux 5244 v_0 := v.Args[0] 5245 if v_0.Op != OpAMD64LEAQ { 5246 break 5247 } 5248 off := v_0.AuxInt 5249 sym2 := v_0.Aux 5250 ptr := v_0.Args[0] 5251 mem := v.Args[1] 5252 if !(canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off)) { 5253 break 5254 } 5255 v.reset(OpAMD64MOVBstoreconst) 5256 v.AuxInt = ValAndOff(sc).add(off) 5257 v.Aux = mergeSym(sym1, sym2) 5258 v.AddArg(ptr) 5259 v.AddArg(mem) 5260 return true 5261 } 5262 // match: (MOVBstoreconst [x] {sym1} (LEAQ1 [off] {sym2} ptr idx) mem) 5263 // cond: canMergeSym(sym1, sym2) 5264 // result: (MOVBstoreconstidx1 [ValAndOff(x).add(off)] {mergeSym(sym1,sym2)} ptr idx mem) 5265 for { 5266 x := v.AuxInt 5267 sym1 := v.Aux 5268 v_0 := v.Args[0] 5269 if v_0.Op != OpAMD64LEAQ1 { 5270 break 5271 } 5272 off := v_0.AuxInt 5273 sym2 := v_0.Aux 5274 ptr := v_0.Args[0] 5275 idx := v_0.Args[1] 5276 mem := v.Args[1] 5277 if !(canMergeSym(sym1, sym2)) { 5278 break 5279 } 5280 v.reset(OpAMD64MOVBstoreconstidx1) 5281 v.AuxInt = ValAndOff(x).add(off) 5282 v.Aux = mergeSym(sym1, sym2) 5283 v.AddArg(ptr) 5284 v.AddArg(idx) 5285 v.AddArg(mem) 5286 return true 5287 } 5288 // match: (MOVBstoreconst [x] {sym} (ADDQ ptr idx) mem) 5289 // cond: 5290 // result: (MOVBstoreconstidx1 [x] {sym} ptr idx mem) 5291 for { 5292 x := v.AuxInt 5293 sym := v.Aux 5294 v_0 := v.Args[0] 5295 if v_0.Op != OpAMD64ADDQ { 5296 break 5297 } 5298 ptr := v_0.Args[0] 5299 idx := v_0.Args[1] 5300 mem := v.Args[1] 5301 v.reset(OpAMD64MOVBstoreconstidx1) 5302 v.AuxInt = x 5303 v.Aux = sym 5304 v.AddArg(ptr) 5305 v.AddArg(idx) 5306 v.AddArg(mem) 5307 return true 5308 } 5309 // match: (MOVBstoreconst [c] {s} p x:(MOVBstoreconst [a] {s} p mem)) 5310 // cond: x.Uses == 1 && ValAndOff(a).Off() + 1 == ValAndOff(c).Off() && clobber(x) 5311 // result: (MOVWstoreconst [makeValAndOff(ValAndOff(a).Val()&0xff | ValAndOff(c).Val()<<8, ValAndOff(a).Off())] {s} p mem) 5312 for { 5313 c := v.AuxInt 5314 s := v.Aux 5315 p := v.Args[0] 5316 x := v.Args[1] 5317 if x.Op != OpAMD64MOVBstoreconst { 5318 break 5319 } 5320 a := x.AuxInt 5321 if x.Aux != s { 5322 break 5323 } 5324 if p != x.Args[0] { 5325 break 5326 } 5327 mem := x.Args[1] 5328 if !(x.Uses == 1 && ValAndOff(a).Off()+1 == ValAndOff(c).Off() && clobber(x)) { 5329 break 5330 } 5331 v.reset(OpAMD64MOVWstoreconst) 5332 v.AuxInt = makeValAndOff(ValAndOff(a).Val()&0xff|ValAndOff(c).Val()<<8, ValAndOff(a).Off()) 5333 v.Aux = s 5334 v.AddArg(p) 5335 v.AddArg(mem) 5336 return true 5337 } 5338 // match: (MOVBstoreconst [sc] {sym1} (LEAL [off] {sym2} ptr) mem) 5339 // cond: canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off) 5340 // result: (MOVBstoreconst [ValAndOff(sc).add(off)] {mergeSym(sym1, sym2)} ptr mem) 5341 for { 5342 sc := v.AuxInt 5343 sym1 := v.Aux 5344 v_0 := v.Args[0] 5345 if v_0.Op != OpAMD64LEAL { 5346 break 5347 } 5348 off := v_0.AuxInt 5349 sym2 := v_0.Aux 5350 ptr := v_0.Args[0] 5351 mem := v.Args[1] 5352 if !(canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off)) { 5353 break 5354 } 5355 v.reset(OpAMD64MOVBstoreconst) 5356 v.AuxInt = ValAndOff(sc).add(off) 5357 v.Aux = mergeSym(sym1, sym2) 5358 v.AddArg(ptr) 5359 v.AddArg(mem) 5360 return true 5361 } 5362 // match: (MOVBstoreconst [sc] {s} (ADDLconst [off] ptr) mem) 5363 // cond: ValAndOff(sc).canAdd(off) 5364 // result: (MOVBstoreconst [ValAndOff(sc).add(off)] {s} ptr mem) 5365 for { 5366 sc := v.AuxInt 5367 s := v.Aux 5368 v_0 := v.Args[0] 5369 if v_0.Op != OpAMD64ADDLconst { 5370 break 5371 } 5372 off := v_0.AuxInt 5373 ptr := v_0.Args[0] 5374 mem := v.Args[1] 5375 if !(ValAndOff(sc).canAdd(off)) { 5376 break 5377 } 5378 v.reset(OpAMD64MOVBstoreconst) 5379 v.AuxInt = ValAndOff(sc).add(off) 5380 v.Aux = s 5381 v.AddArg(ptr) 5382 v.AddArg(mem) 5383 return true 5384 } 5385 return false 5386 } 5387 func rewriteValueAMD64_OpAMD64MOVBstoreconstidx1(v *Value) bool { 5388 // match: (MOVBstoreconstidx1 [x] {sym} (ADDQconst [c] ptr) idx mem) 5389 // cond: 5390 // result: (MOVBstoreconstidx1 [ValAndOff(x).add(c)] {sym} ptr idx mem) 5391 for { 5392 x := v.AuxInt 5393 sym := v.Aux 5394 v_0 := v.Args[0] 5395 if v_0.Op != OpAMD64ADDQconst { 5396 break 5397 } 5398 c := v_0.AuxInt 5399 ptr := v_0.Args[0] 5400 idx := v.Args[1] 5401 mem := v.Args[2] 5402 v.reset(OpAMD64MOVBstoreconstidx1) 5403 v.AuxInt = ValAndOff(x).add(c) 5404 v.Aux = sym 5405 v.AddArg(ptr) 5406 v.AddArg(idx) 5407 v.AddArg(mem) 5408 return true 5409 } 5410 // match: (MOVBstoreconstidx1 [x] {sym} ptr (ADDQconst [c] idx) mem) 5411 // cond: 5412 // result: (MOVBstoreconstidx1 [ValAndOff(x).add(c)] {sym} ptr idx mem) 5413 for { 5414 x := v.AuxInt 5415 sym := v.Aux 5416 ptr := v.Args[0] 5417 v_1 := v.Args[1] 5418 if v_1.Op != OpAMD64ADDQconst { 5419 break 5420 } 5421 c := v_1.AuxInt 5422 idx := v_1.Args[0] 5423 mem := v.Args[2] 5424 v.reset(OpAMD64MOVBstoreconstidx1) 5425 v.AuxInt = ValAndOff(x).add(c) 5426 v.Aux = sym 5427 v.AddArg(ptr) 5428 v.AddArg(idx) 5429 v.AddArg(mem) 5430 return true 5431 } 5432 // match: (MOVBstoreconstidx1 [c] {s} p i x:(MOVBstoreconstidx1 [a] {s} p i mem)) 5433 // cond: x.Uses == 1 && ValAndOff(a).Off() + 1 == ValAndOff(c).Off() && clobber(x) 5434 // result: (MOVWstoreconstidx1 [makeValAndOff(ValAndOff(a).Val()&0xff | ValAndOff(c).Val()<<8, ValAndOff(a).Off())] {s} p i mem) 5435 for { 5436 c := v.AuxInt 5437 s := v.Aux 5438 p := v.Args[0] 5439 i := v.Args[1] 5440 x := v.Args[2] 5441 if x.Op != OpAMD64MOVBstoreconstidx1 { 5442 break 5443 } 5444 a := x.AuxInt 5445 if x.Aux != s { 5446 break 5447 } 5448 if p != x.Args[0] { 5449 break 5450 } 5451 if i != x.Args[1] { 5452 break 5453 } 5454 mem := x.Args[2] 5455 if !(x.Uses == 1 && ValAndOff(a).Off()+1 == ValAndOff(c).Off() && clobber(x)) { 5456 break 5457 } 5458 v.reset(OpAMD64MOVWstoreconstidx1) 5459 v.AuxInt = makeValAndOff(ValAndOff(a).Val()&0xff|ValAndOff(c).Val()<<8, ValAndOff(a).Off()) 5460 v.Aux = s 5461 v.AddArg(p) 5462 v.AddArg(i) 5463 v.AddArg(mem) 5464 return true 5465 } 5466 return false 5467 } 5468 func rewriteValueAMD64_OpAMD64MOVBstoreidx1(v *Value) bool { 5469 b := v.Block 5470 _ = b 5471 // match: (MOVBstoreidx1 [c] {sym} (ADDQconst [d] ptr) idx val mem) 5472 // cond: 5473 // result: (MOVBstoreidx1 [c+d] {sym} ptr idx val mem) 5474 for { 5475 c := v.AuxInt 5476 sym := v.Aux 5477 v_0 := v.Args[0] 5478 if v_0.Op != OpAMD64ADDQconst { 5479 break 5480 } 5481 d := v_0.AuxInt 5482 ptr := v_0.Args[0] 5483 idx := v.Args[1] 5484 val := v.Args[2] 5485 mem := v.Args[3] 5486 v.reset(OpAMD64MOVBstoreidx1) 5487 v.AuxInt = c + d 5488 v.Aux = sym 5489 v.AddArg(ptr) 5490 v.AddArg(idx) 5491 v.AddArg(val) 5492 v.AddArg(mem) 5493 return true 5494 } 5495 // match: (MOVBstoreidx1 [c] {sym} ptr (ADDQconst [d] idx) val mem) 5496 // cond: 5497 // result: (MOVBstoreidx1 [c+d] {sym} ptr idx val mem) 5498 for { 5499 c := v.AuxInt 5500 sym := v.Aux 5501 ptr := v.Args[0] 5502 v_1 := v.Args[1] 5503 if v_1.Op != OpAMD64ADDQconst { 5504 break 5505 } 5506 d := v_1.AuxInt 5507 idx := v_1.Args[0] 5508 val := v.Args[2] 5509 mem := v.Args[3] 5510 v.reset(OpAMD64MOVBstoreidx1) 5511 v.AuxInt = c + d 5512 v.Aux = sym 5513 v.AddArg(ptr) 5514 v.AddArg(idx) 5515 v.AddArg(val) 5516 v.AddArg(mem) 5517 return true 5518 } 5519 // match: (MOVBstoreidx1 [i] {s} p idx w x0:(MOVBstoreidx1 [i-1] {s} p idx (SHRWconst [8] w) mem)) 5520 // cond: x0.Uses == 1 && clobber(x0) 5521 // result: (MOVWstoreidx1 [i-1] {s} p idx (ROLWconst <w.Type> [8] w) mem) 5522 for { 5523 i := v.AuxInt 5524 s := v.Aux 5525 p := v.Args[0] 5526 idx := v.Args[1] 5527 w := v.Args[2] 5528 x0 := v.Args[3] 5529 if x0.Op != OpAMD64MOVBstoreidx1 { 5530 break 5531 } 5532 if x0.AuxInt != i-1 { 5533 break 5534 } 5535 if x0.Aux != s { 5536 break 5537 } 5538 if p != x0.Args[0] { 5539 break 5540 } 5541 if idx != x0.Args[1] { 5542 break 5543 } 5544 x0_2 := x0.Args[2] 5545 if x0_2.Op != OpAMD64SHRWconst { 5546 break 5547 } 5548 if x0_2.AuxInt != 8 { 5549 break 5550 } 5551 if w != x0_2.Args[0] { 5552 break 5553 } 5554 mem := x0.Args[3] 5555 if !(x0.Uses == 1 && clobber(x0)) { 5556 break 5557 } 5558 v.reset(OpAMD64MOVWstoreidx1) 5559 v.AuxInt = i - 1 5560 v.Aux = s 5561 v.AddArg(p) 5562 v.AddArg(idx) 5563 v0 := b.NewValue0(v.Pos, OpAMD64ROLWconst, w.Type) 5564 v0.AuxInt = 8 5565 v0.AddArg(w) 5566 v.AddArg(v0) 5567 v.AddArg(mem) 5568 return true 5569 } 5570 // match: (MOVBstoreidx1 [i] {s} p idx w x2:(MOVBstoreidx1 [i-1] {s} p idx (SHRLconst [8] w) x1:(MOVBstoreidx1 [i-2] {s} p idx (SHRLconst [16] w) x0:(MOVBstoreidx1 [i-3] {s} p idx (SHRLconst [24] w) mem)))) 5571 // cond: x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) 5572 // result: (MOVLstoreidx1 [i-3] {s} p idx (BSWAPL <w.Type> w) mem) 5573 for { 5574 i := v.AuxInt 5575 s := v.Aux 5576 p := v.Args[0] 5577 idx := v.Args[1] 5578 w := v.Args[2] 5579 x2 := v.Args[3] 5580 if x2.Op != OpAMD64MOVBstoreidx1 { 5581 break 5582 } 5583 if x2.AuxInt != i-1 { 5584 break 5585 } 5586 if x2.Aux != s { 5587 break 5588 } 5589 if p != x2.Args[0] { 5590 break 5591 } 5592 if idx != x2.Args[1] { 5593 break 5594 } 5595 x2_2 := x2.Args[2] 5596 if x2_2.Op != OpAMD64SHRLconst { 5597 break 5598 } 5599 if x2_2.AuxInt != 8 { 5600 break 5601 } 5602 if w != x2_2.Args[0] { 5603 break 5604 } 5605 x1 := x2.Args[3] 5606 if x1.Op != OpAMD64MOVBstoreidx1 { 5607 break 5608 } 5609 if x1.AuxInt != i-2 { 5610 break 5611 } 5612 if x1.Aux != s { 5613 break 5614 } 5615 if p != x1.Args[0] { 5616 break 5617 } 5618 if idx != x1.Args[1] { 5619 break 5620 } 5621 x1_2 := x1.Args[2] 5622 if x1_2.Op != OpAMD64SHRLconst { 5623 break 5624 } 5625 if x1_2.AuxInt != 16 { 5626 break 5627 } 5628 if w != x1_2.Args[0] { 5629 break 5630 } 5631 x0 := x1.Args[3] 5632 if x0.Op != OpAMD64MOVBstoreidx1 { 5633 break 5634 } 5635 if x0.AuxInt != i-3 { 5636 break 5637 } 5638 if x0.Aux != s { 5639 break 5640 } 5641 if p != x0.Args[0] { 5642 break 5643 } 5644 if idx != x0.Args[1] { 5645 break 5646 } 5647 x0_2 := x0.Args[2] 5648 if x0_2.Op != OpAMD64SHRLconst { 5649 break 5650 } 5651 if x0_2.AuxInt != 24 { 5652 break 5653 } 5654 if w != x0_2.Args[0] { 5655 break 5656 } 5657 mem := x0.Args[3] 5658 if !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2)) { 5659 break 5660 } 5661 v.reset(OpAMD64MOVLstoreidx1) 5662 v.AuxInt = i - 3 5663 v.Aux = s 5664 v.AddArg(p) 5665 v.AddArg(idx) 5666 v0 := b.NewValue0(v.Pos, OpAMD64BSWAPL, w.Type) 5667 v0.AddArg(w) 5668 v.AddArg(v0) 5669 v.AddArg(mem) 5670 return true 5671 } 5672 // match: (MOVBstoreidx1 [i] {s} p idx w x6:(MOVBstoreidx1 [i-1] {s} p idx (SHRQconst [8] w) x5:(MOVBstoreidx1 [i-2] {s} p idx (SHRQconst [16] w) x4:(MOVBstoreidx1 [i-3] {s} p idx (SHRQconst [24] w) x3:(MOVBstoreidx1 [i-4] {s} p idx (SHRQconst [32] w) x2:(MOVBstoreidx1 [i-5] {s} p idx (SHRQconst [40] w) x1:(MOVBstoreidx1 [i-6] {s} p idx (SHRQconst [48] w) x0:(MOVBstoreidx1 [i-7] {s} p idx (SHRQconst [56] w) mem)))))))) 5673 // cond: x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) 5674 // result: (MOVQstoreidx1 [i-7] {s} p idx (BSWAPQ <w.Type> w) mem) 5675 for { 5676 i := v.AuxInt 5677 s := v.Aux 5678 p := v.Args[0] 5679 idx := v.Args[1] 5680 w := v.Args[2] 5681 x6 := v.Args[3] 5682 if x6.Op != OpAMD64MOVBstoreidx1 { 5683 break 5684 } 5685 if x6.AuxInt != i-1 { 5686 break 5687 } 5688 if x6.Aux != s { 5689 break 5690 } 5691 if p != x6.Args[0] { 5692 break 5693 } 5694 if idx != x6.Args[1] { 5695 break 5696 } 5697 x6_2 := x6.Args[2] 5698 if x6_2.Op != OpAMD64SHRQconst { 5699 break 5700 } 5701 if x6_2.AuxInt != 8 { 5702 break 5703 } 5704 if w != x6_2.Args[0] { 5705 break 5706 } 5707 x5 := x6.Args[3] 5708 if x5.Op != OpAMD64MOVBstoreidx1 { 5709 break 5710 } 5711 if x5.AuxInt != i-2 { 5712 break 5713 } 5714 if x5.Aux != s { 5715 break 5716 } 5717 if p != x5.Args[0] { 5718 break 5719 } 5720 if idx != x5.Args[1] { 5721 break 5722 } 5723 x5_2 := x5.Args[2] 5724 if x5_2.Op != OpAMD64SHRQconst { 5725 break 5726 } 5727 if x5_2.AuxInt != 16 { 5728 break 5729 } 5730 if w != x5_2.Args[0] { 5731 break 5732 } 5733 x4 := x5.Args[3] 5734 if x4.Op != OpAMD64MOVBstoreidx1 { 5735 break 5736 } 5737 if x4.AuxInt != i-3 { 5738 break 5739 } 5740 if x4.Aux != s { 5741 break 5742 } 5743 if p != x4.Args[0] { 5744 break 5745 } 5746 if idx != x4.Args[1] { 5747 break 5748 } 5749 x4_2 := x4.Args[2] 5750 if x4_2.Op != OpAMD64SHRQconst { 5751 break 5752 } 5753 if x4_2.AuxInt != 24 { 5754 break 5755 } 5756 if w != x4_2.Args[0] { 5757 break 5758 } 5759 x3 := x4.Args[3] 5760 if x3.Op != OpAMD64MOVBstoreidx1 { 5761 break 5762 } 5763 if x3.AuxInt != i-4 { 5764 break 5765 } 5766 if x3.Aux != s { 5767 break 5768 } 5769 if p != x3.Args[0] { 5770 break 5771 } 5772 if idx != x3.Args[1] { 5773 break 5774 } 5775 x3_2 := x3.Args[2] 5776 if x3_2.Op != OpAMD64SHRQconst { 5777 break 5778 } 5779 if x3_2.AuxInt != 32 { 5780 break 5781 } 5782 if w != x3_2.Args[0] { 5783 break 5784 } 5785 x2 := x3.Args[3] 5786 if x2.Op != OpAMD64MOVBstoreidx1 { 5787 break 5788 } 5789 if x2.AuxInt != i-5 { 5790 break 5791 } 5792 if x2.Aux != s { 5793 break 5794 } 5795 if p != x2.Args[0] { 5796 break 5797 } 5798 if idx != x2.Args[1] { 5799 break 5800 } 5801 x2_2 := x2.Args[2] 5802 if x2_2.Op != OpAMD64SHRQconst { 5803 break 5804 } 5805 if x2_2.AuxInt != 40 { 5806 break 5807 } 5808 if w != x2_2.Args[0] { 5809 break 5810 } 5811 x1 := x2.Args[3] 5812 if x1.Op != OpAMD64MOVBstoreidx1 { 5813 break 5814 } 5815 if x1.AuxInt != i-6 { 5816 break 5817 } 5818 if x1.Aux != s { 5819 break 5820 } 5821 if p != x1.Args[0] { 5822 break 5823 } 5824 if idx != x1.Args[1] { 5825 break 5826 } 5827 x1_2 := x1.Args[2] 5828 if x1_2.Op != OpAMD64SHRQconst { 5829 break 5830 } 5831 if x1_2.AuxInt != 48 { 5832 break 5833 } 5834 if w != x1_2.Args[0] { 5835 break 5836 } 5837 x0 := x1.Args[3] 5838 if x0.Op != OpAMD64MOVBstoreidx1 { 5839 break 5840 } 5841 if x0.AuxInt != i-7 { 5842 break 5843 } 5844 if x0.Aux != s { 5845 break 5846 } 5847 if p != x0.Args[0] { 5848 break 5849 } 5850 if idx != x0.Args[1] { 5851 break 5852 } 5853 x0_2 := x0.Args[2] 5854 if x0_2.Op != OpAMD64SHRQconst { 5855 break 5856 } 5857 if x0_2.AuxInt != 56 { 5858 break 5859 } 5860 if w != x0_2.Args[0] { 5861 break 5862 } 5863 mem := x0.Args[3] 5864 if !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6)) { 5865 break 5866 } 5867 v.reset(OpAMD64MOVQstoreidx1) 5868 v.AuxInt = i - 7 5869 v.Aux = s 5870 v.AddArg(p) 5871 v.AddArg(idx) 5872 v0 := b.NewValue0(v.Pos, OpAMD64BSWAPQ, w.Type) 5873 v0.AddArg(w) 5874 v.AddArg(v0) 5875 v.AddArg(mem) 5876 return true 5877 } 5878 // match: (MOVBstoreidx1 [i] {s} p idx (SHRQconst [8] w) x:(MOVBstoreidx1 [i-1] {s} p idx w mem)) 5879 // cond: x.Uses == 1 && clobber(x) 5880 // result: (MOVWstoreidx1 [i-1] {s} p idx w mem) 5881 for { 5882 i := v.AuxInt 5883 s := v.Aux 5884 p := v.Args[0] 5885 idx := v.Args[1] 5886 v_2 := v.Args[2] 5887 if v_2.Op != OpAMD64SHRQconst { 5888 break 5889 } 5890 if v_2.AuxInt != 8 { 5891 break 5892 } 5893 w := v_2.Args[0] 5894 x := v.Args[3] 5895 if x.Op != OpAMD64MOVBstoreidx1 { 5896 break 5897 } 5898 if x.AuxInt != i-1 { 5899 break 5900 } 5901 if x.Aux != s { 5902 break 5903 } 5904 if p != x.Args[0] { 5905 break 5906 } 5907 if idx != x.Args[1] { 5908 break 5909 } 5910 if w != x.Args[2] { 5911 break 5912 } 5913 mem := x.Args[3] 5914 if !(x.Uses == 1 && clobber(x)) { 5915 break 5916 } 5917 v.reset(OpAMD64MOVWstoreidx1) 5918 v.AuxInt = i - 1 5919 v.Aux = s 5920 v.AddArg(p) 5921 v.AddArg(idx) 5922 v.AddArg(w) 5923 v.AddArg(mem) 5924 return true 5925 } 5926 // match: (MOVBstoreidx1 [i] {s} p idx (SHRQconst [j] w) x:(MOVBstoreidx1 [i-1] {s} p idx w0:(SHRQconst [j-8] w) mem)) 5927 // cond: x.Uses == 1 && clobber(x) 5928 // result: (MOVWstoreidx1 [i-1] {s} p idx w0 mem) 5929 for { 5930 i := v.AuxInt 5931 s := v.Aux 5932 p := v.Args[0] 5933 idx := v.Args[1] 5934 v_2 := v.Args[2] 5935 if v_2.Op != OpAMD64SHRQconst { 5936 break 5937 } 5938 j := v_2.AuxInt 5939 w := v_2.Args[0] 5940 x := v.Args[3] 5941 if x.Op != OpAMD64MOVBstoreidx1 { 5942 break 5943 } 5944 if x.AuxInt != i-1 { 5945 break 5946 } 5947 if x.Aux != s { 5948 break 5949 } 5950 if p != x.Args[0] { 5951 break 5952 } 5953 if idx != x.Args[1] { 5954 break 5955 } 5956 w0 := x.Args[2] 5957 if w0.Op != OpAMD64SHRQconst { 5958 break 5959 } 5960 if w0.AuxInt != j-8 { 5961 break 5962 } 5963 if w != w0.Args[0] { 5964 break 5965 } 5966 mem := x.Args[3] 5967 if !(x.Uses == 1 && clobber(x)) { 5968 break 5969 } 5970 v.reset(OpAMD64MOVWstoreidx1) 5971 v.AuxInt = i - 1 5972 v.Aux = s 5973 v.AddArg(p) 5974 v.AddArg(idx) 5975 v.AddArg(w0) 5976 v.AddArg(mem) 5977 return true 5978 } 5979 return false 5980 } 5981 func rewriteValueAMD64_OpAMD64MOVLQSX(v *Value) bool { 5982 b := v.Block 5983 _ = b 5984 // match: (MOVLQSX x:(MOVLload [off] {sym} ptr mem)) 5985 // cond: x.Uses == 1 && clobber(x) 5986 // result: @x.Block (MOVLQSXload <v.Type> [off] {sym} ptr mem) 5987 for { 5988 x := v.Args[0] 5989 if x.Op != OpAMD64MOVLload { 5990 break 5991 } 5992 off := x.AuxInt 5993 sym := x.Aux 5994 ptr := x.Args[0] 5995 mem := x.Args[1] 5996 if !(x.Uses == 1 && clobber(x)) { 5997 break 5998 } 5999 b = x.Block 6000 v0 := b.NewValue0(v.Pos, OpAMD64MOVLQSXload, v.Type) 6001 v.reset(OpCopy) 6002 v.AddArg(v0) 6003 v0.AuxInt = off 6004 v0.Aux = sym 6005 v0.AddArg(ptr) 6006 v0.AddArg(mem) 6007 return true 6008 } 6009 // match: (MOVLQSX x:(MOVQload [off] {sym} ptr mem)) 6010 // cond: x.Uses == 1 && clobber(x) 6011 // result: @x.Block (MOVLQSXload <v.Type> [off] {sym} ptr mem) 6012 for { 6013 x := v.Args[0] 6014 if x.Op != OpAMD64MOVQload { 6015 break 6016 } 6017 off := x.AuxInt 6018 sym := x.Aux 6019 ptr := x.Args[0] 6020 mem := x.Args[1] 6021 if !(x.Uses == 1 && clobber(x)) { 6022 break 6023 } 6024 b = x.Block 6025 v0 := b.NewValue0(v.Pos, OpAMD64MOVLQSXload, v.Type) 6026 v.reset(OpCopy) 6027 v.AddArg(v0) 6028 v0.AuxInt = off 6029 v0.Aux = sym 6030 v0.AddArg(ptr) 6031 v0.AddArg(mem) 6032 return true 6033 } 6034 // match: (MOVLQSX (ANDLconst [c] x)) 6035 // cond: c & 0x80000000 == 0 6036 // result: (ANDLconst [c & 0x7fffffff] x) 6037 for { 6038 v_0 := v.Args[0] 6039 if v_0.Op != OpAMD64ANDLconst { 6040 break 6041 } 6042 c := v_0.AuxInt 6043 x := v_0.Args[0] 6044 if !(c&0x80000000 == 0) { 6045 break 6046 } 6047 v.reset(OpAMD64ANDLconst) 6048 v.AuxInt = c & 0x7fffffff 6049 v.AddArg(x) 6050 return true 6051 } 6052 // match: (MOVLQSX x:(MOVLQSX _)) 6053 // cond: 6054 // result: x 6055 for { 6056 x := v.Args[0] 6057 if x.Op != OpAMD64MOVLQSX { 6058 break 6059 } 6060 v.reset(OpCopy) 6061 v.Type = x.Type 6062 v.AddArg(x) 6063 return true 6064 } 6065 // match: (MOVLQSX x:(MOVWQSX _)) 6066 // cond: 6067 // result: x 6068 for { 6069 x := v.Args[0] 6070 if x.Op != OpAMD64MOVWQSX { 6071 break 6072 } 6073 v.reset(OpCopy) 6074 v.Type = x.Type 6075 v.AddArg(x) 6076 return true 6077 } 6078 // match: (MOVLQSX x:(MOVBQSX _)) 6079 // cond: 6080 // result: x 6081 for { 6082 x := v.Args[0] 6083 if x.Op != OpAMD64MOVBQSX { 6084 break 6085 } 6086 v.reset(OpCopy) 6087 v.Type = x.Type 6088 v.AddArg(x) 6089 return true 6090 } 6091 return false 6092 } 6093 func rewriteValueAMD64_OpAMD64MOVLQSXload(v *Value) bool { 6094 // match: (MOVLQSXload [off1] {sym1} (LEAQ [off2] {sym2} base) mem) 6095 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 6096 // result: (MOVLQSXload [off1+off2] {mergeSym(sym1,sym2)} base mem) 6097 for { 6098 off1 := v.AuxInt 6099 sym1 := v.Aux 6100 v_0 := v.Args[0] 6101 if v_0.Op != OpAMD64LEAQ { 6102 break 6103 } 6104 off2 := v_0.AuxInt 6105 sym2 := v_0.Aux 6106 base := v_0.Args[0] 6107 mem := v.Args[1] 6108 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 6109 break 6110 } 6111 v.reset(OpAMD64MOVLQSXload) 6112 v.AuxInt = off1 + off2 6113 v.Aux = mergeSym(sym1, sym2) 6114 v.AddArg(base) 6115 v.AddArg(mem) 6116 return true 6117 } 6118 return false 6119 } 6120 func rewriteValueAMD64_OpAMD64MOVLQZX(v *Value) bool { 6121 b := v.Block 6122 _ = b 6123 // match: (MOVLQZX x:(MOVLload [off] {sym} ptr mem)) 6124 // cond: x.Uses == 1 && clobber(x) 6125 // result: @x.Block (MOVLload <v.Type> [off] {sym} ptr mem) 6126 for { 6127 x := v.Args[0] 6128 if x.Op != OpAMD64MOVLload { 6129 break 6130 } 6131 off := x.AuxInt 6132 sym := x.Aux 6133 ptr := x.Args[0] 6134 mem := x.Args[1] 6135 if !(x.Uses == 1 && clobber(x)) { 6136 break 6137 } 6138 b = x.Block 6139 v0 := b.NewValue0(v.Pos, OpAMD64MOVLload, v.Type) 6140 v.reset(OpCopy) 6141 v.AddArg(v0) 6142 v0.AuxInt = off 6143 v0.Aux = sym 6144 v0.AddArg(ptr) 6145 v0.AddArg(mem) 6146 return true 6147 } 6148 // match: (MOVLQZX x:(MOVQload [off] {sym} ptr mem)) 6149 // cond: x.Uses == 1 && clobber(x) 6150 // result: @x.Block (MOVLload <v.Type> [off] {sym} ptr mem) 6151 for { 6152 x := v.Args[0] 6153 if x.Op != OpAMD64MOVQload { 6154 break 6155 } 6156 off := x.AuxInt 6157 sym := x.Aux 6158 ptr := x.Args[0] 6159 mem := x.Args[1] 6160 if !(x.Uses == 1 && clobber(x)) { 6161 break 6162 } 6163 b = x.Block 6164 v0 := b.NewValue0(v.Pos, OpAMD64MOVLload, v.Type) 6165 v.reset(OpCopy) 6166 v.AddArg(v0) 6167 v0.AuxInt = off 6168 v0.Aux = sym 6169 v0.AddArg(ptr) 6170 v0.AddArg(mem) 6171 return true 6172 } 6173 // match: (MOVLQZX x:(MOVLloadidx1 [off] {sym} ptr idx mem)) 6174 // cond: x.Uses == 1 && clobber(x) 6175 // result: @x.Block (MOVLloadidx1 <v.Type> [off] {sym} ptr idx mem) 6176 for { 6177 x := v.Args[0] 6178 if x.Op != OpAMD64MOVLloadidx1 { 6179 break 6180 } 6181 off := x.AuxInt 6182 sym := x.Aux 6183 ptr := x.Args[0] 6184 idx := x.Args[1] 6185 mem := x.Args[2] 6186 if !(x.Uses == 1 && clobber(x)) { 6187 break 6188 } 6189 b = x.Block 6190 v0 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, v.Type) 6191 v.reset(OpCopy) 6192 v.AddArg(v0) 6193 v0.AuxInt = off 6194 v0.Aux = sym 6195 v0.AddArg(ptr) 6196 v0.AddArg(idx) 6197 v0.AddArg(mem) 6198 return true 6199 } 6200 // match: (MOVLQZX x:(MOVLloadidx4 [off] {sym} ptr idx mem)) 6201 // cond: x.Uses == 1 && clobber(x) 6202 // result: @x.Block (MOVLloadidx4 <v.Type> [off] {sym} ptr idx mem) 6203 for { 6204 x := v.Args[0] 6205 if x.Op != OpAMD64MOVLloadidx4 { 6206 break 6207 } 6208 off := x.AuxInt 6209 sym := x.Aux 6210 ptr := x.Args[0] 6211 idx := x.Args[1] 6212 mem := x.Args[2] 6213 if !(x.Uses == 1 && clobber(x)) { 6214 break 6215 } 6216 b = x.Block 6217 v0 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx4, v.Type) 6218 v.reset(OpCopy) 6219 v.AddArg(v0) 6220 v0.AuxInt = off 6221 v0.Aux = sym 6222 v0.AddArg(ptr) 6223 v0.AddArg(idx) 6224 v0.AddArg(mem) 6225 return true 6226 } 6227 // match: (MOVLQZX (ANDLconst [c] x)) 6228 // cond: 6229 // result: (ANDLconst [c] x) 6230 for { 6231 v_0 := v.Args[0] 6232 if v_0.Op != OpAMD64ANDLconst { 6233 break 6234 } 6235 c := v_0.AuxInt 6236 x := v_0.Args[0] 6237 v.reset(OpAMD64ANDLconst) 6238 v.AuxInt = c 6239 v.AddArg(x) 6240 return true 6241 } 6242 // match: (MOVLQZX x:(MOVLQZX _)) 6243 // cond: 6244 // result: x 6245 for { 6246 x := v.Args[0] 6247 if x.Op != OpAMD64MOVLQZX { 6248 break 6249 } 6250 v.reset(OpCopy) 6251 v.Type = x.Type 6252 v.AddArg(x) 6253 return true 6254 } 6255 // match: (MOVLQZX x:(MOVWQZX _)) 6256 // cond: 6257 // result: x 6258 for { 6259 x := v.Args[0] 6260 if x.Op != OpAMD64MOVWQZX { 6261 break 6262 } 6263 v.reset(OpCopy) 6264 v.Type = x.Type 6265 v.AddArg(x) 6266 return true 6267 } 6268 // match: (MOVLQZX x:(MOVBQZX _)) 6269 // cond: 6270 // result: x 6271 for { 6272 x := v.Args[0] 6273 if x.Op != OpAMD64MOVBQZX { 6274 break 6275 } 6276 v.reset(OpCopy) 6277 v.Type = x.Type 6278 v.AddArg(x) 6279 return true 6280 } 6281 return false 6282 } 6283 func rewriteValueAMD64_OpAMD64MOVLatomicload(v *Value) bool { 6284 // match: (MOVLatomicload [off1] {sym} (ADDQconst [off2] ptr) mem) 6285 // cond: is32Bit(off1+off2) 6286 // result: (MOVLatomicload [off1+off2] {sym} ptr mem) 6287 for { 6288 off1 := v.AuxInt 6289 sym := v.Aux 6290 v_0 := v.Args[0] 6291 if v_0.Op != OpAMD64ADDQconst { 6292 break 6293 } 6294 off2 := v_0.AuxInt 6295 ptr := v_0.Args[0] 6296 mem := v.Args[1] 6297 if !(is32Bit(off1 + off2)) { 6298 break 6299 } 6300 v.reset(OpAMD64MOVLatomicload) 6301 v.AuxInt = off1 + off2 6302 v.Aux = sym 6303 v.AddArg(ptr) 6304 v.AddArg(mem) 6305 return true 6306 } 6307 // match: (MOVLatomicload [off1] {sym1} (LEAQ [off2] {sym2} ptr) mem) 6308 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 6309 // result: (MOVLatomicload [off1+off2] {mergeSym(sym1,sym2)} ptr mem) 6310 for { 6311 off1 := v.AuxInt 6312 sym1 := v.Aux 6313 v_0 := v.Args[0] 6314 if v_0.Op != OpAMD64LEAQ { 6315 break 6316 } 6317 off2 := v_0.AuxInt 6318 sym2 := v_0.Aux 6319 ptr := v_0.Args[0] 6320 mem := v.Args[1] 6321 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 6322 break 6323 } 6324 v.reset(OpAMD64MOVLatomicload) 6325 v.AuxInt = off1 + off2 6326 v.Aux = mergeSym(sym1, sym2) 6327 v.AddArg(ptr) 6328 v.AddArg(mem) 6329 return true 6330 } 6331 return false 6332 } 6333 func rewriteValueAMD64_OpAMD64MOVLload(v *Value) bool { 6334 // match: (MOVLload [off] {sym} ptr (MOVLstore [off2] {sym2} ptr2 x _)) 6335 // cond: sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) 6336 // result: x 6337 for { 6338 off := v.AuxInt 6339 sym := v.Aux 6340 ptr := v.Args[0] 6341 v_1 := v.Args[1] 6342 if v_1.Op != OpAMD64MOVLstore { 6343 break 6344 } 6345 off2 := v_1.AuxInt 6346 sym2 := v_1.Aux 6347 ptr2 := v_1.Args[0] 6348 x := v_1.Args[1] 6349 if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) { 6350 break 6351 } 6352 v.reset(OpCopy) 6353 v.Type = x.Type 6354 v.AddArg(x) 6355 return true 6356 } 6357 // match: (MOVLload [off1] {sym} (ADDQconst [off2] ptr) mem) 6358 // cond: is32Bit(off1+off2) 6359 // result: (MOVLload [off1+off2] {sym} ptr mem) 6360 for { 6361 off1 := v.AuxInt 6362 sym := v.Aux 6363 v_0 := v.Args[0] 6364 if v_0.Op != OpAMD64ADDQconst { 6365 break 6366 } 6367 off2 := v_0.AuxInt 6368 ptr := v_0.Args[0] 6369 mem := v.Args[1] 6370 if !(is32Bit(off1 + off2)) { 6371 break 6372 } 6373 v.reset(OpAMD64MOVLload) 6374 v.AuxInt = off1 + off2 6375 v.Aux = sym 6376 v.AddArg(ptr) 6377 v.AddArg(mem) 6378 return true 6379 } 6380 // match: (MOVLload [off1] {sym1} (LEAQ [off2] {sym2} base) mem) 6381 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 6382 // result: (MOVLload [off1+off2] {mergeSym(sym1,sym2)} base mem) 6383 for { 6384 off1 := v.AuxInt 6385 sym1 := v.Aux 6386 v_0 := v.Args[0] 6387 if v_0.Op != OpAMD64LEAQ { 6388 break 6389 } 6390 off2 := v_0.AuxInt 6391 sym2 := v_0.Aux 6392 base := v_0.Args[0] 6393 mem := v.Args[1] 6394 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 6395 break 6396 } 6397 v.reset(OpAMD64MOVLload) 6398 v.AuxInt = off1 + off2 6399 v.Aux = mergeSym(sym1, sym2) 6400 v.AddArg(base) 6401 v.AddArg(mem) 6402 return true 6403 } 6404 // match: (MOVLload [off1] {sym1} (LEAQ1 [off2] {sym2} ptr idx) mem) 6405 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 6406 // result: (MOVLloadidx1 [off1+off2] {mergeSym(sym1,sym2)} ptr idx mem) 6407 for { 6408 off1 := v.AuxInt 6409 sym1 := v.Aux 6410 v_0 := v.Args[0] 6411 if v_0.Op != OpAMD64LEAQ1 { 6412 break 6413 } 6414 off2 := v_0.AuxInt 6415 sym2 := v_0.Aux 6416 ptr := v_0.Args[0] 6417 idx := v_0.Args[1] 6418 mem := v.Args[1] 6419 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 6420 break 6421 } 6422 v.reset(OpAMD64MOVLloadidx1) 6423 v.AuxInt = off1 + off2 6424 v.Aux = mergeSym(sym1, sym2) 6425 v.AddArg(ptr) 6426 v.AddArg(idx) 6427 v.AddArg(mem) 6428 return true 6429 } 6430 // match: (MOVLload [off1] {sym1} (LEAQ4 [off2] {sym2} ptr idx) mem) 6431 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 6432 // result: (MOVLloadidx4 [off1+off2] {mergeSym(sym1,sym2)} ptr idx mem) 6433 for { 6434 off1 := v.AuxInt 6435 sym1 := v.Aux 6436 v_0 := v.Args[0] 6437 if v_0.Op != OpAMD64LEAQ4 { 6438 break 6439 } 6440 off2 := v_0.AuxInt 6441 sym2 := v_0.Aux 6442 ptr := v_0.Args[0] 6443 idx := v_0.Args[1] 6444 mem := v.Args[1] 6445 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 6446 break 6447 } 6448 v.reset(OpAMD64MOVLloadidx4) 6449 v.AuxInt = off1 + off2 6450 v.Aux = mergeSym(sym1, sym2) 6451 v.AddArg(ptr) 6452 v.AddArg(idx) 6453 v.AddArg(mem) 6454 return true 6455 } 6456 // match: (MOVLload [off] {sym} (ADDQ ptr idx) mem) 6457 // cond: ptr.Op != OpSB 6458 // result: (MOVLloadidx1 [off] {sym} ptr idx mem) 6459 for { 6460 off := v.AuxInt 6461 sym := v.Aux 6462 v_0 := v.Args[0] 6463 if v_0.Op != OpAMD64ADDQ { 6464 break 6465 } 6466 ptr := v_0.Args[0] 6467 idx := v_0.Args[1] 6468 mem := v.Args[1] 6469 if !(ptr.Op != OpSB) { 6470 break 6471 } 6472 v.reset(OpAMD64MOVLloadidx1) 6473 v.AuxInt = off 6474 v.Aux = sym 6475 v.AddArg(ptr) 6476 v.AddArg(idx) 6477 v.AddArg(mem) 6478 return true 6479 } 6480 // match: (MOVLload [off1] {sym1} (LEAL [off2] {sym2} base) mem) 6481 // cond: canMergeSym(sym1, sym2) 6482 // result: (MOVLload [off1+off2] {mergeSym(sym1,sym2)} base mem) 6483 for { 6484 off1 := v.AuxInt 6485 sym1 := v.Aux 6486 v_0 := v.Args[0] 6487 if v_0.Op != OpAMD64LEAL { 6488 break 6489 } 6490 off2 := v_0.AuxInt 6491 sym2 := v_0.Aux 6492 base := v_0.Args[0] 6493 mem := v.Args[1] 6494 if !(canMergeSym(sym1, sym2)) { 6495 break 6496 } 6497 v.reset(OpAMD64MOVLload) 6498 v.AuxInt = off1 + off2 6499 v.Aux = mergeSym(sym1, sym2) 6500 v.AddArg(base) 6501 v.AddArg(mem) 6502 return true 6503 } 6504 // match: (MOVLload [off1] {sym} (ADDLconst [off2] ptr) mem) 6505 // cond: is32Bit(off1+off2) 6506 // result: (MOVLload [off1+off2] {sym} ptr mem) 6507 for { 6508 off1 := v.AuxInt 6509 sym := v.Aux 6510 v_0 := v.Args[0] 6511 if v_0.Op != OpAMD64ADDLconst { 6512 break 6513 } 6514 off2 := v_0.AuxInt 6515 ptr := v_0.Args[0] 6516 mem := v.Args[1] 6517 if !(is32Bit(off1 + off2)) { 6518 break 6519 } 6520 v.reset(OpAMD64MOVLload) 6521 v.AuxInt = off1 + off2 6522 v.Aux = sym 6523 v.AddArg(ptr) 6524 v.AddArg(mem) 6525 return true 6526 } 6527 return false 6528 } 6529 func rewriteValueAMD64_OpAMD64MOVLloadidx1(v *Value) bool { 6530 // match: (MOVLloadidx1 [c] {sym} ptr (SHLQconst [2] idx) mem) 6531 // cond: 6532 // result: (MOVLloadidx4 [c] {sym} ptr idx mem) 6533 for { 6534 c := v.AuxInt 6535 sym := v.Aux 6536 ptr := v.Args[0] 6537 v_1 := v.Args[1] 6538 if v_1.Op != OpAMD64SHLQconst { 6539 break 6540 } 6541 if v_1.AuxInt != 2 { 6542 break 6543 } 6544 idx := v_1.Args[0] 6545 mem := v.Args[2] 6546 v.reset(OpAMD64MOVLloadidx4) 6547 v.AuxInt = c 6548 v.Aux = sym 6549 v.AddArg(ptr) 6550 v.AddArg(idx) 6551 v.AddArg(mem) 6552 return true 6553 } 6554 // match: (MOVLloadidx1 [c] {sym} (SHLQconst [2] idx) ptr mem) 6555 // cond: 6556 // result: (MOVLloadidx4 [c] {sym} ptr idx mem) 6557 for { 6558 c := v.AuxInt 6559 sym := v.Aux 6560 v_0 := v.Args[0] 6561 if v_0.Op != OpAMD64SHLQconst { 6562 break 6563 } 6564 if v_0.AuxInt != 2 { 6565 break 6566 } 6567 idx := v_0.Args[0] 6568 ptr := v.Args[1] 6569 mem := v.Args[2] 6570 v.reset(OpAMD64MOVLloadidx4) 6571 v.AuxInt = c 6572 v.Aux = sym 6573 v.AddArg(ptr) 6574 v.AddArg(idx) 6575 v.AddArg(mem) 6576 return true 6577 } 6578 // match: (MOVLloadidx1 [c] {sym} (ADDQconst [d] ptr) idx mem) 6579 // cond: 6580 // result: (MOVLloadidx1 [c+d] {sym} ptr idx mem) 6581 for { 6582 c := v.AuxInt 6583 sym := v.Aux 6584 v_0 := v.Args[0] 6585 if v_0.Op != OpAMD64ADDQconst { 6586 break 6587 } 6588 d := v_0.AuxInt 6589 ptr := v_0.Args[0] 6590 idx := v.Args[1] 6591 mem := v.Args[2] 6592 v.reset(OpAMD64MOVLloadidx1) 6593 v.AuxInt = c + d 6594 v.Aux = sym 6595 v.AddArg(ptr) 6596 v.AddArg(idx) 6597 v.AddArg(mem) 6598 return true 6599 } 6600 // match: (MOVLloadidx1 [c] {sym} idx (ADDQconst [d] ptr) mem) 6601 // cond: 6602 // result: (MOVLloadidx1 [c+d] {sym} ptr idx mem) 6603 for { 6604 c := v.AuxInt 6605 sym := v.Aux 6606 idx := v.Args[0] 6607 v_1 := v.Args[1] 6608 if v_1.Op != OpAMD64ADDQconst { 6609 break 6610 } 6611 d := v_1.AuxInt 6612 ptr := v_1.Args[0] 6613 mem := v.Args[2] 6614 v.reset(OpAMD64MOVLloadidx1) 6615 v.AuxInt = c + d 6616 v.Aux = sym 6617 v.AddArg(ptr) 6618 v.AddArg(idx) 6619 v.AddArg(mem) 6620 return true 6621 } 6622 // match: (MOVLloadidx1 [c] {sym} ptr (ADDQconst [d] idx) mem) 6623 // cond: 6624 // result: (MOVLloadidx1 [c+d] {sym} ptr idx mem) 6625 for { 6626 c := v.AuxInt 6627 sym := v.Aux 6628 ptr := v.Args[0] 6629 v_1 := v.Args[1] 6630 if v_1.Op != OpAMD64ADDQconst { 6631 break 6632 } 6633 d := v_1.AuxInt 6634 idx := v_1.Args[0] 6635 mem := v.Args[2] 6636 v.reset(OpAMD64MOVLloadidx1) 6637 v.AuxInt = c + d 6638 v.Aux = sym 6639 v.AddArg(ptr) 6640 v.AddArg(idx) 6641 v.AddArg(mem) 6642 return true 6643 } 6644 // match: (MOVLloadidx1 [c] {sym} (ADDQconst [d] idx) ptr mem) 6645 // cond: 6646 // result: (MOVLloadidx1 [c+d] {sym} ptr idx mem) 6647 for { 6648 c := v.AuxInt 6649 sym := v.Aux 6650 v_0 := v.Args[0] 6651 if v_0.Op != OpAMD64ADDQconst { 6652 break 6653 } 6654 d := v_0.AuxInt 6655 idx := v_0.Args[0] 6656 ptr := v.Args[1] 6657 mem := v.Args[2] 6658 v.reset(OpAMD64MOVLloadidx1) 6659 v.AuxInt = c + d 6660 v.Aux = sym 6661 v.AddArg(ptr) 6662 v.AddArg(idx) 6663 v.AddArg(mem) 6664 return true 6665 } 6666 return false 6667 } 6668 func rewriteValueAMD64_OpAMD64MOVLloadidx4(v *Value) bool { 6669 // match: (MOVLloadidx4 [c] {sym} (ADDQconst [d] ptr) idx mem) 6670 // cond: 6671 // result: (MOVLloadidx4 [c+d] {sym} ptr idx mem) 6672 for { 6673 c := v.AuxInt 6674 sym := v.Aux 6675 v_0 := v.Args[0] 6676 if v_0.Op != OpAMD64ADDQconst { 6677 break 6678 } 6679 d := v_0.AuxInt 6680 ptr := v_0.Args[0] 6681 idx := v.Args[1] 6682 mem := v.Args[2] 6683 v.reset(OpAMD64MOVLloadidx4) 6684 v.AuxInt = c + d 6685 v.Aux = sym 6686 v.AddArg(ptr) 6687 v.AddArg(idx) 6688 v.AddArg(mem) 6689 return true 6690 } 6691 // match: (MOVLloadidx4 [c] {sym} ptr (ADDQconst [d] idx) mem) 6692 // cond: 6693 // result: (MOVLloadidx4 [c+4*d] {sym} ptr idx mem) 6694 for { 6695 c := v.AuxInt 6696 sym := v.Aux 6697 ptr := v.Args[0] 6698 v_1 := v.Args[1] 6699 if v_1.Op != OpAMD64ADDQconst { 6700 break 6701 } 6702 d := v_1.AuxInt 6703 idx := v_1.Args[0] 6704 mem := v.Args[2] 6705 v.reset(OpAMD64MOVLloadidx4) 6706 v.AuxInt = c + 4*d 6707 v.Aux = sym 6708 v.AddArg(ptr) 6709 v.AddArg(idx) 6710 v.AddArg(mem) 6711 return true 6712 } 6713 return false 6714 } 6715 func rewriteValueAMD64_OpAMD64MOVLstore(v *Value) bool { 6716 // match: (MOVLstore [off] {sym} ptr (MOVLQSX x) mem) 6717 // cond: 6718 // result: (MOVLstore [off] {sym} ptr x mem) 6719 for { 6720 off := v.AuxInt 6721 sym := v.Aux 6722 ptr := v.Args[0] 6723 v_1 := v.Args[1] 6724 if v_1.Op != OpAMD64MOVLQSX { 6725 break 6726 } 6727 x := v_1.Args[0] 6728 mem := v.Args[2] 6729 v.reset(OpAMD64MOVLstore) 6730 v.AuxInt = off 6731 v.Aux = sym 6732 v.AddArg(ptr) 6733 v.AddArg(x) 6734 v.AddArg(mem) 6735 return true 6736 } 6737 // match: (MOVLstore [off] {sym} ptr (MOVLQZX x) mem) 6738 // cond: 6739 // result: (MOVLstore [off] {sym} ptr x mem) 6740 for { 6741 off := v.AuxInt 6742 sym := v.Aux 6743 ptr := v.Args[0] 6744 v_1 := v.Args[1] 6745 if v_1.Op != OpAMD64MOVLQZX { 6746 break 6747 } 6748 x := v_1.Args[0] 6749 mem := v.Args[2] 6750 v.reset(OpAMD64MOVLstore) 6751 v.AuxInt = off 6752 v.Aux = sym 6753 v.AddArg(ptr) 6754 v.AddArg(x) 6755 v.AddArg(mem) 6756 return true 6757 } 6758 // match: (MOVLstore [off1] {sym} (ADDQconst [off2] ptr) val mem) 6759 // cond: is32Bit(off1+off2) 6760 // result: (MOVLstore [off1+off2] {sym} ptr val mem) 6761 for { 6762 off1 := v.AuxInt 6763 sym := v.Aux 6764 v_0 := v.Args[0] 6765 if v_0.Op != OpAMD64ADDQconst { 6766 break 6767 } 6768 off2 := v_0.AuxInt 6769 ptr := v_0.Args[0] 6770 val := v.Args[1] 6771 mem := v.Args[2] 6772 if !(is32Bit(off1 + off2)) { 6773 break 6774 } 6775 v.reset(OpAMD64MOVLstore) 6776 v.AuxInt = off1 + off2 6777 v.Aux = sym 6778 v.AddArg(ptr) 6779 v.AddArg(val) 6780 v.AddArg(mem) 6781 return true 6782 } 6783 // match: (MOVLstore [off] {sym} ptr (MOVLconst [c]) mem) 6784 // cond: validOff(off) 6785 // result: (MOVLstoreconst [makeValAndOff(int64(int32(c)),off)] {sym} ptr mem) 6786 for { 6787 off := v.AuxInt 6788 sym := v.Aux 6789 ptr := v.Args[0] 6790 v_1 := v.Args[1] 6791 if v_1.Op != OpAMD64MOVLconst { 6792 break 6793 } 6794 c := v_1.AuxInt 6795 mem := v.Args[2] 6796 if !(validOff(off)) { 6797 break 6798 } 6799 v.reset(OpAMD64MOVLstoreconst) 6800 v.AuxInt = makeValAndOff(int64(int32(c)), off) 6801 v.Aux = sym 6802 v.AddArg(ptr) 6803 v.AddArg(mem) 6804 return true 6805 } 6806 // match: (MOVLstore [off1] {sym1} (LEAQ [off2] {sym2} base) val mem) 6807 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 6808 // result: (MOVLstore [off1+off2] {mergeSym(sym1,sym2)} base val mem) 6809 for { 6810 off1 := v.AuxInt 6811 sym1 := v.Aux 6812 v_0 := v.Args[0] 6813 if v_0.Op != OpAMD64LEAQ { 6814 break 6815 } 6816 off2 := v_0.AuxInt 6817 sym2 := v_0.Aux 6818 base := v_0.Args[0] 6819 val := v.Args[1] 6820 mem := v.Args[2] 6821 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 6822 break 6823 } 6824 v.reset(OpAMD64MOVLstore) 6825 v.AuxInt = off1 + off2 6826 v.Aux = mergeSym(sym1, sym2) 6827 v.AddArg(base) 6828 v.AddArg(val) 6829 v.AddArg(mem) 6830 return true 6831 } 6832 // match: (MOVLstore [off1] {sym1} (LEAQ1 [off2] {sym2} ptr idx) val mem) 6833 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 6834 // result: (MOVLstoreidx1 [off1+off2] {mergeSym(sym1,sym2)} ptr idx val mem) 6835 for { 6836 off1 := v.AuxInt 6837 sym1 := v.Aux 6838 v_0 := v.Args[0] 6839 if v_0.Op != OpAMD64LEAQ1 { 6840 break 6841 } 6842 off2 := v_0.AuxInt 6843 sym2 := v_0.Aux 6844 ptr := v_0.Args[0] 6845 idx := v_0.Args[1] 6846 val := v.Args[1] 6847 mem := v.Args[2] 6848 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 6849 break 6850 } 6851 v.reset(OpAMD64MOVLstoreidx1) 6852 v.AuxInt = off1 + off2 6853 v.Aux = mergeSym(sym1, sym2) 6854 v.AddArg(ptr) 6855 v.AddArg(idx) 6856 v.AddArg(val) 6857 v.AddArg(mem) 6858 return true 6859 } 6860 // match: (MOVLstore [off1] {sym1} (LEAQ4 [off2] {sym2} ptr idx) val mem) 6861 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 6862 // result: (MOVLstoreidx4 [off1+off2] {mergeSym(sym1,sym2)} ptr idx val mem) 6863 for { 6864 off1 := v.AuxInt 6865 sym1 := v.Aux 6866 v_0 := v.Args[0] 6867 if v_0.Op != OpAMD64LEAQ4 { 6868 break 6869 } 6870 off2 := v_0.AuxInt 6871 sym2 := v_0.Aux 6872 ptr := v_0.Args[0] 6873 idx := v_0.Args[1] 6874 val := v.Args[1] 6875 mem := v.Args[2] 6876 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 6877 break 6878 } 6879 v.reset(OpAMD64MOVLstoreidx4) 6880 v.AuxInt = off1 + off2 6881 v.Aux = mergeSym(sym1, sym2) 6882 v.AddArg(ptr) 6883 v.AddArg(idx) 6884 v.AddArg(val) 6885 v.AddArg(mem) 6886 return true 6887 } 6888 // match: (MOVLstore [off] {sym} (ADDQ ptr idx) val mem) 6889 // cond: ptr.Op != OpSB 6890 // result: (MOVLstoreidx1 [off] {sym} ptr idx val mem) 6891 for { 6892 off := v.AuxInt 6893 sym := v.Aux 6894 v_0 := v.Args[0] 6895 if v_0.Op != OpAMD64ADDQ { 6896 break 6897 } 6898 ptr := v_0.Args[0] 6899 idx := v_0.Args[1] 6900 val := v.Args[1] 6901 mem := v.Args[2] 6902 if !(ptr.Op != OpSB) { 6903 break 6904 } 6905 v.reset(OpAMD64MOVLstoreidx1) 6906 v.AuxInt = off 6907 v.Aux = sym 6908 v.AddArg(ptr) 6909 v.AddArg(idx) 6910 v.AddArg(val) 6911 v.AddArg(mem) 6912 return true 6913 } 6914 // match: (MOVLstore [i] {s} p (SHRQconst [32] w) x:(MOVLstore [i-4] {s} p w mem)) 6915 // cond: x.Uses == 1 && clobber(x) 6916 // result: (MOVQstore [i-4] {s} p w mem) 6917 for { 6918 i := v.AuxInt 6919 s := v.Aux 6920 p := v.Args[0] 6921 v_1 := v.Args[1] 6922 if v_1.Op != OpAMD64SHRQconst { 6923 break 6924 } 6925 if v_1.AuxInt != 32 { 6926 break 6927 } 6928 w := v_1.Args[0] 6929 x := v.Args[2] 6930 if x.Op != OpAMD64MOVLstore { 6931 break 6932 } 6933 if x.AuxInt != i-4 { 6934 break 6935 } 6936 if x.Aux != s { 6937 break 6938 } 6939 if p != x.Args[0] { 6940 break 6941 } 6942 if w != x.Args[1] { 6943 break 6944 } 6945 mem := x.Args[2] 6946 if !(x.Uses == 1 && clobber(x)) { 6947 break 6948 } 6949 v.reset(OpAMD64MOVQstore) 6950 v.AuxInt = i - 4 6951 v.Aux = s 6952 v.AddArg(p) 6953 v.AddArg(w) 6954 v.AddArg(mem) 6955 return true 6956 } 6957 // match: (MOVLstore [i] {s} p (SHRQconst [j] w) x:(MOVLstore [i-4] {s} p w0:(SHRQconst [j-32] w) mem)) 6958 // cond: x.Uses == 1 && clobber(x) 6959 // result: (MOVQstore [i-4] {s} p w0 mem) 6960 for { 6961 i := v.AuxInt 6962 s := v.Aux 6963 p := v.Args[0] 6964 v_1 := v.Args[1] 6965 if v_1.Op != OpAMD64SHRQconst { 6966 break 6967 } 6968 j := v_1.AuxInt 6969 w := v_1.Args[0] 6970 x := v.Args[2] 6971 if x.Op != OpAMD64MOVLstore { 6972 break 6973 } 6974 if x.AuxInt != i-4 { 6975 break 6976 } 6977 if x.Aux != s { 6978 break 6979 } 6980 if p != x.Args[0] { 6981 break 6982 } 6983 w0 := x.Args[1] 6984 if w0.Op != OpAMD64SHRQconst { 6985 break 6986 } 6987 if w0.AuxInt != j-32 { 6988 break 6989 } 6990 if w != w0.Args[0] { 6991 break 6992 } 6993 mem := x.Args[2] 6994 if !(x.Uses == 1 && clobber(x)) { 6995 break 6996 } 6997 v.reset(OpAMD64MOVQstore) 6998 v.AuxInt = i - 4 6999 v.Aux = s 7000 v.AddArg(p) 7001 v.AddArg(w0) 7002 v.AddArg(mem) 7003 return true 7004 } 7005 // match: (MOVLstore [off1] {sym1} (LEAL [off2] {sym2} base) val mem) 7006 // cond: canMergeSym(sym1, sym2) 7007 // result: (MOVLstore [off1+off2] {mergeSym(sym1,sym2)} base val mem) 7008 for { 7009 off1 := v.AuxInt 7010 sym1 := v.Aux 7011 v_0 := v.Args[0] 7012 if v_0.Op != OpAMD64LEAL { 7013 break 7014 } 7015 off2 := v_0.AuxInt 7016 sym2 := v_0.Aux 7017 base := v_0.Args[0] 7018 val := v.Args[1] 7019 mem := v.Args[2] 7020 if !(canMergeSym(sym1, sym2)) { 7021 break 7022 } 7023 v.reset(OpAMD64MOVLstore) 7024 v.AuxInt = off1 + off2 7025 v.Aux = mergeSym(sym1, sym2) 7026 v.AddArg(base) 7027 v.AddArg(val) 7028 v.AddArg(mem) 7029 return true 7030 } 7031 // match: (MOVLstore [off1] {sym} (ADDLconst [off2] ptr) val mem) 7032 // cond: is32Bit(off1+off2) 7033 // result: (MOVLstore [off1+off2] {sym} ptr val mem) 7034 for { 7035 off1 := v.AuxInt 7036 sym := v.Aux 7037 v_0 := v.Args[0] 7038 if v_0.Op != OpAMD64ADDLconst { 7039 break 7040 } 7041 off2 := v_0.AuxInt 7042 ptr := v_0.Args[0] 7043 val := v.Args[1] 7044 mem := v.Args[2] 7045 if !(is32Bit(off1 + off2)) { 7046 break 7047 } 7048 v.reset(OpAMD64MOVLstore) 7049 v.AuxInt = off1 + off2 7050 v.Aux = sym 7051 v.AddArg(ptr) 7052 v.AddArg(val) 7053 v.AddArg(mem) 7054 return true 7055 } 7056 return false 7057 } 7058 func rewriteValueAMD64_OpAMD64MOVLstoreconst(v *Value) bool { 7059 b := v.Block 7060 _ = b 7061 types := &b.Func.Config.Types 7062 _ = types 7063 // match: (MOVLstoreconst [sc] {s} (ADDQconst [off] ptr) mem) 7064 // cond: ValAndOff(sc).canAdd(off) 7065 // result: (MOVLstoreconst [ValAndOff(sc).add(off)] {s} ptr mem) 7066 for { 7067 sc := v.AuxInt 7068 s := v.Aux 7069 v_0 := v.Args[0] 7070 if v_0.Op != OpAMD64ADDQconst { 7071 break 7072 } 7073 off := v_0.AuxInt 7074 ptr := v_0.Args[0] 7075 mem := v.Args[1] 7076 if !(ValAndOff(sc).canAdd(off)) { 7077 break 7078 } 7079 v.reset(OpAMD64MOVLstoreconst) 7080 v.AuxInt = ValAndOff(sc).add(off) 7081 v.Aux = s 7082 v.AddArg(ptr) 7083 v.AddArg(mem) 7084 return true 7085 } 7086 // match: (MOVLstoreconst [sc] {sym1} (LEAQ [off] {sym2} ptr) mem) 7087 // cond: canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off) 7088 // result: (MOVLstoreconst [ValAndOff(sc).add(off)] {mergeSym(sym1, sym2)} ptr mem) 7089 for { 7090 sc := v.AuxInt 7091 sym1 := v.Aux 7092 v_0 := v.Args[0] 7093 if v_0.Op != OpAMD64LEAQ { 7094 break 7095 } 7096 off := v_0.AuxInt 7097 sym2 := v_0.Aux 7098 ptr := v_0.Args[0] 7099 mem := v.Args[1] 7100 if !(canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off)) { 7101 break 7102 } 7103 v.reset(OpAMD64MOVLstoreconst) 7104 v.AuxInt = ValAndOff(sc).add(off) 7105 v.Aux = mergeSym(sym1, sym2) 7106 v.AddArg(ptr) 7107 v.AddArg(mem) 7108 return true 7109 } 7110 // match: (MOVLstoreconst [x] {sym1} (LEAQ1 [off] {sym2} ptr idx) mem) 7111 // cond: canMergeSym(sym1, sym2) 7112 // result: (MOVLstoreconstidx1 [ValAndOff(x).add(off)] {mergeSym(sym1,sym2)} ptr idx mem) 7113 for { 7114 x := v.AuxInt 7115 sym1 := v.Aux 7116 v_0 := v.Args[0] 7117 if v_0.Op != OpAMD64LEAQ1 { 7118 break 7119 } 7120 off := v_0.AuxInt 7121 sym2 := v_0.Aux 7122 ptr := v_0.Args[0] 7123 idx := v_0.Args[1] 7124 mem := v.Args[1] 7125 if !(canMergeSym(sym1, sym2)) { 7126 break 7127 } 7128 v.reset(OpAMD64MOVLstoreconstidx1) 7129 v.AuxInt = ValAndOff(x).add(off) 7130 v.Aux = mergeSym(sym1, sym2) 7131 v.AddArg(ptr) 7132 v.AddArg(idx) 7133 v.AddArg(mem) 7134 return true 7135 } 7136 // match: (MOVLstoreconst [x] {sym1} (LEAQ4 [off] {sym2} ptr idx) mem) 7137 // cond: canMergeSym(sym1, sym2) 7138 // result: (MOVLstoreconstidx4 [ValAndOff(x).add(off)] {mergeSym(sym1,sym2)} ptr idx mem) 7139 for { 7140 x := v.AuxInt 7141 sym1 := v.Aux 7142 v_0 := v.Args[0] 7143 if v_0.Op != OpAMD64LEAQ4 { 7144 break 7145 } 7146 off := v_0.AuxInt 7147 sym2 := v_0.Aux 7148 ptr := v_0.Args[0] 7149 idx := v_0.Args[1] 7150 mem := v.Args[1] 7151 if !(canMergeSym(sym1, sym2)) { 7152 break 7153 } 7154 v.reset(OpAMD64MOVLstoreconstidx4) 7155 v.AuxInt = ValAndOff(x).add(off) 7156 v.Aux = mergeSym(sym1, sym2) 7157 v.AddArg(ptr) 7158 v.AddArg(idx) 7159 v.AddArg(mem) 7160 return true 7161 } 7162 // match: (MOVLstoreconst [x] {sym} (ADDQ ptr idx) mem) 7163 // cond: 7164 // result: (MOVLstoreconstidx1 [x] {sym} ptr idx mem) 7165 for { 7166 x := v.AuxInt 7167 sym := v.Aux 7168 v_0 := v.Args[0] 7169 if v_0.Op != OpAMD64ADDQ { 7170 break 7171 } 7172 ptr := v_0.Args[0] 7173 idx := v_0.Args[1] 7174 mem := v.Args[1] 7175 v.reset(OpAMD64MOVLstoreconstidx1) 7176 v.AuxInt = x 7177 v.Aux = sym 7178 v.AddArg(ptr) 7179 v.AddArg(idx) 7180 v.AddArg(mem) 7181 return true 7182 } 7183 // match: (MOVLstoreconst [c] {s} p x:(MOVLstoreconst [a] {s} p mem)) 7184 // cond: x.Uses == 1 && ValAndOff(a).Off() + 4 == ValAndOff(c).Off() && clobber(x) 7185 // result: (MOVQstore [ValAndOff(a).Off()] {s} p (MOVQconst [ValAndOff(a).Val()&0xffffffff | ValAndOff(c).Val()<<32]) mem) 7186 for { 7187 c := v.AuxInt 7188 s := v.Aux 7189 p := v.Args[0] 7190 x := v.Args[1] 7191 if x.Op != OpAMD64MOVLstoreconst { 7192 break 7193 } 7194 a := x.AuxInt 7195 if x.Aux != s { 7196 break 7197 } 7198 if p != x.Args[0] { 7199 break 7200 } 7201 mem := x.Args[1] 7202 if !(x.Uses == 1 && ValAndOff(a).Off()+4 == ValAndOff(c).Off() && clobber(x)) { 7203 break 7204 } 7205 v.reset(OpAMD64MOVQstore) 7206 v.AuxInt = ValAndOff(a).Off() 7207 v.Aux = s 7208 v.AddArg(p) 7209 v0 := b.NewValue0(v.Pos, OpAMD64MOVQconst, types.UInt64) 7210 v0.AuxInt = ValAndOff(a).Val()&0xffffffff | ValAndOff(c).Val()<<32 7211 v.AddArg(v0) 7212 v.AddArg(mem) 7213 return true 7214 } 7215 // match: (MOVLstoreconst [sc] {sym1} (LEAL [off] {sym2} ptr) mem) 7216 // cond: canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off) 7217 // result: (MOVLstoreconst [ValAndOff(sc).add(off)] {mergeSym(sym1, sym2)} ptr mem) 7218 for { 7219 sc := v.AuxInt 7220 sym1 := v.Aux 7221 v_0 := v.Args[0] 7222 if v_0.Op != OpAMD64LEAL { 7223 break 7224 } 7225 off := v_0.AuxInt 7226 sym2 := v_0.Aux 7227 ptr := v_0.Args[0] 7228 mem := v.Args[1] 7229 if !(canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off)) { 7230 break 7231 } 7232 v.reset(OpAMD64MOVLstoreconst) 7233 v.AuxInt = ValAndOff(sc).add(off) 7234 v.Aux = mergeSym(sym1, sym2) 7235 v.AddArg(ptr) 7236 v.AddArg(mem) 7237 return true 7238 } 7239 // match: (MOVLstoreconst [sc] {s} (ADDLconst [off] ptr) mem) 7240 // cond: ValAndOff(sc).canAdd(off) 7241 // result: (MOVLstoreconst [ValAndOff(sc).add(off)] {s} ptr mem) 7242 for { 7243 sc := v.AuxInt 7244 s := v.Aux 7245 v_0 := v.Args[0] 7246 if v_0.Op != OpAMD64ADDLconst { 7247 break 7248 } 7249 off := v_0.AuxInt 7250 ptr := v_0.Args[0] 7251 mem := v.Args[1] 7252 if !(ValAndOff(sc).canAdd(off)) { 7253 break 7254 } 7255 v.reset(OpAMD64MOVLstoreconst) 7256 v.AuxInt = ValAndOff(sc).add(off) 7257 v.Aux = s 7258 v.AddArg(ptr) 7259 v.AddArg(mem) 7260 return true 7261 } 7262 return false 7263 } 7264 func rewriteValueAMD64_OpAMD64MOVLstoreconstidx1(v *Value) bool { 7265 b := v.Block 7266 _ = b 7267 types := &b.Func.Config.Types 7268 _ = types 7269 // match: (MOVLstoreconstidx1 [c] {sym} ptr (SHLQconst [2] idx) mem) 7270 // cond: 7271 // result: (MOVLstoreconstidx4 [c] {sym} ptr idx mem) 7272 for { 7273 c := v.AuxInt 7274 sym := v.Aux 7275 ptr := v.Args[0] 7276 v_1 := v.Args[1] 7277 if v_1.Op != OpAMD64SHLQconst { 7278 break 7279 } 7280 if v_1.AuxInt != 2 { 7281 break 7282 } 7283 idx := v_1.Args[0] 7284 mem := v.Args[2] 7285 v.reset(OpAMD64MOVLstoreconstidx4) 7286 v.AuxInt = c 7287 v.Aux = sym 7288 v.AddArg(ptr) 7289 v.AddArg(idx) 7290 v.AddArg(mem) 7291 return true 7292 } 7293 // match: (MOVLstoreconstidx1 [x] {sym} (ADDQconst [c] ptr) idx mem) 7294 // cond: 7295 // result: (MOVLstoreconstidx1 [ValAndOff(x).add(c)] {sym} ptr idx mem) 7296 for { 7297 x := v.AuxInt 7298 sym := v.Aux 7299 v_0 := v.Args[0] 7300 if v_0.Op != OpAMD64ADDQconst { 7301 break 7302 } 7303 c := v_0.AuxInt 7304 ptr := v_0.Args[0] 7305 idx := v.Args[1] 7306 mem := v.Args[2] 7307 v.reset(OpAMD64MOVLstoreconstidx1) 7308 v.AuxInt = ValAndOff(x).add(c) 7309 v.Aux = sym 7310 v.AddArg(ptr) 7311 v.AddArg(idx) 7312 v.AddArg(mem) 7313 return true 7314 } 7315 // match: (MOVLstoreconstidx1 [x] {sym} ptr (ADDQconst [c] idx) mem) 7316 // cond: 7317 // result: (MOVLstoreconstidx1 [ValAndOff(x).add(c)] {sym} ptr idx mem) 7318 for { 7319 x := v.AuxInt 7320 sym := v.Aux 7321 ptr := v.Args[0] 7322 v_1 := v.Args[1] 7323 if v_1.Op != OpAMD64ADDQconst { 7324 break 7325 } 7326 c := v_1.AuxInt 7327 idx := v_1.Args[0] 7328 mem := v.Args[2] 7329 v.reset(OpAMD64MOVLstoreconstidx1) 7330 v.AuxInt = ValAndOff(x).add(c) 7331 v.Aux = sym 7332 v.AddArg(ptr) 7333 v.AddArg(idx) 7334 v.AddArg(mem) 7335 return true 7336 } 7337 // match: (MOVLstoreconstidx1 [c] {s} p i x:(MOVLstoreconstidx1 [a] {s} p i mem)) 7338 // cond: x.Uses == 1 && ValAndOff(a).Off() + 4 == ValAndOff(c).Off() && clobber(x) 7339 // result: (MOVQstoreidx1 [ValAndOff(a).Off()] {s} p i (MOVQconst [ValAndOff(a).Val()&0xffffffff | ValAndOff(c).Val()<<32]) mem) 7340 for { 7341 c := v.AuxInt 7342 s := v.Aux 7343 p := v.Args[0] 7344 i := v.Args[1] 7345 x := v.Args[2] 7346 if x.Op != OpAMD64MOVLstoreconstidx1 { 7347 break 7348 } 7349 a := x.AuxInt 7350 if x.Aux != s { 7351 break 7352 } 7353 if p != x.Args[0] { 7354 break 7355 } 7356 if i != x.Args[1] { 7357 break 7358 } 7359 mem := x.Args[2] 7360 if !(x.Uses == 1 && ValAndOff(a).Off()+4 == ValAndOff(c).Off() && clobber(x)) { 7361 break 7362 } 7363 v.reset(OpAMD64MOVQstoreidx1) 7364 v.AuxInt = ValAndOff(a).Off() 7365 v.Aux = s 7366 v.AddArg(p) 7367 v.AddArg(i) 7368 v0 := b.NewValue0(v.Pos, OpAMD64MOVQconst, types.UInt64) 7369 v0.AuxInt = ValAndOff(a).Val()&0xffffffff | ValAndOff(c).Val()<<32 7370 v.AddArg(v0) 7371 v.AddArg(mem) 7372 return true 7373 } 7374 return false 7375 } 7376 func rewriteValueAMD64_OpAMD64MOVLstoreconstidx4(v *Value) bool { 7377 b := v.Block 7378 _ = b 7379 types := &b.Func.Config.Types 7380 _ = types 7381 // match: (MOVLstoreconstidx4 [x] {sym} (ADDQconst [c] ptr) idx mem) 7382 // cond: 7383 // result: (MOVLstoreconstidx4 [ValAndOff(x).add(c)] {sym} ptr idx mem) 7384 for { 7385 x := v.AuxInt 7386 sym := v.Aux 7387 v_0 := v.Args[0] 7388 if v_0.Op != OpAMD64ADDQconst { 7389 break 7390 } 7391 c := v_0.AuxInt 7392 ptr := v_0.Args[0] 7393 idx := v.Args[1] 7394 mem := v.Args[2] 7395 v.reset(OpAMD64MOVLstoreconstidx4) 7396 v.AuxInt = ValAndOff(x).add(c) 7397 v.Aux = sym 7398 v.AddArg(ptr) 7399 v.AddArg(idx) 7400 v.AddArg(mem) 7401 return true 7402 } 7403 // match: (MOVLstoreconstidx4 [x] {sym} ptr (ADDQconst [c] idx) mem) 7404 // cond: 7405 // result: (MOVLstoreconstidx4 [ValAndOff(x).add(4*c)] {sym} ptr idx mem) 7406 for { 7407 x := v.AuxInt 7408 sym := v.Aux 7409 ptr := v.Args[0] 7410 v_1 := v.Args[1] 7411 if v_1.Op != OpAMD64ADDQconst { 7412 break 7413 } 7414 c := v_1.AuxInt 7415 idx := v_1.Args[0] 7416 mem := v.Args[2] 7417 v.reset(OpAMD64MOVLstoreconstidx4) 7418 v.AuxInt = ValAndOff(x).add(4 * c) 7419 v.Aux = sym 7420 v.AddArg(ptr) 7421 v.AddArg(idx) 7422 v.AddArg(mem) 7423 return true 7424 } 7425 // match: (MOVLstoreconstidx4 [c] {s} p i x:(MOVLstoreconstidx4 [a] {s} p i mem)) 7426 // cond: x.Uses == 1 && ValAndOff(a).Off() + 4 == ValAndOff(c).Off() && clobber(x) 7427 // result: (MOVQstoreidx1 [ValAndOff(a).Off()] {s} p (SHLQconst <i.Type> [2] i) (MOVQconst [ValAndOff(a).Val()&0xffffffff | ValAndOff(c).Val()<<32]) mem) 7428 for { 7429 c := v.AuxInt 7430 s := v.Aux 7431 p := v.Args[0] 7432 i := v.Args[1] 7433 x := v.Args[2] 7434 if x.Op != OpAMD64MOVLstoreconstidx4 { 7435 break 7436 } 7437 a := x.AuxInt 7438 if x.Aux != s { 7439 break 7440 } 7441 if p != x.Args[0] { 7442 break 7443 } 7444 if i != x.Args[1] { 7445 break 7446 } 7447 mem := x.Args[2] 7448 if !(x.Uses == 1 && ValAndOff(a).Off()+4 == ValAndOff(c).Off() && clobber(x)) { 7449 break 7450 } 7451 v.reset(OpAMD64MOVQstoreidx1) 7452 v.AuxInt = ValAndOff(a).Off() 7453 v.Aux = s 7454 v.AddArg(p) 7455 v0 := b.NewValue0(v.Pos, OpAMD64SHLQconst, i.Type) 7456 v0.AuxInt = 2 7457 v0.AddArg(i) 7458 v.AddArg(v0) 7459 v1 := b.NewValue0(v.Pos, OpAMD64MOVQconst, types.UInt64) 7460 v1.AuxInt = ValAndOff(a).Val()&0xffffffff | ValAndOff(c).Val()<<32 7461 v.AddArg(v1) 7462 v.AddArg(mem) 7463 return true 7464 } 7465 return false 7466 } 7467 func rewriteValueAMD64_OpAMD64MOVLstoreidx1(v *Value) bool { 7468 // match: (MOVLstoreidx1 [c] {sym} ptr (SHLQconst [2] idx) val mem) 7469 // cond: 7470 // result: (MOVLstoreidx4 [c] {sym} ptr idx val mem) 7471 for { 7472 c := v.AuxInt 7473 sym := v.Aux 7474 ptr := v.Args[0] 7475 v_1 := v.Args[1] 7476 if v_1.Op != OpAMD64SHLQconst { 7477 break 7478 } 7479 if v_1.AuxInt != 2 { 7480 break 7481 } 7482 idx := v_1.Args[0] 7483 val := v.Args[2] 7484 mem := v.Args[3] 7485 v.reset(OpAMD64MOVLstoreidx4) 7486 v.AuxInt = c 7487 v.Aux = sym 7488 v.AddArg(ptr) 7489 v.AddArg(idx) 7490 v.AddArg(val) 7491 v.AddArg(mem) 7492 return true 7493 } 7494 // match: (MOVLstoreidx1 [c] {sym} (ADDQconst [d] ptr) idx val mem) 7495 // cond: 7496 // result: (MOVLstoreidx1 [c+d] {sym} ptr idx val mem) 7497 for { 7498 c := v.AuxInt 7499 sym := v.Aux 7500 v_0 := v.Args[0] 7501 if v_0.Op != OpAMD64ADDQconst { 7502 break 7503 } 7504 d := v_0.AuxInt 7505 ptr := v_0.Args[0] 7506 idx := v.Args[1] 7507 val := v.Args[2] 7508 mem := v.Args[3] 7509 v.reset(OpAMD64MOVLstoreidx1) 7510 v.AuxInt = c + d 7511 v.Aux = sym 7512 v.AddArg(ptr) 7513 v.AddArg(idx) 7514 v.AddArg(val) 7515 v.AddArg(mem) 7516 return true 7517 } 7518 // match: (MOVLstoreidx1 [c] {sym} ptr (ADDQconst [d] idx) val mem) 7519 // cond: 7520 // result: (MOVLstoreidx1 [c+d] {sym} ptr idx val mem) 7521 for { 7522 c := v.AuxInt 7523 sym := v.Aux 7524 ptr := v.Args[0] 7525 v_1 := v.Args[1] 7526 if v_1.Op != OpAMD64ADDQconst { 7527 break 7528 } 7529 d := v_1.AuxInt 7530 idx := v_1.Args[0] 7531 val := v.Args[2] 7532 mem := v.Args[3] 7533 v.reset(OpAMD64MOVLstoreidx1) 7534 v.AuxInt = c + d 7535 v.Aux = sym 7536 v.AddArg(ptr) 7537 v.AddArg(idx) 7538 v.AddArg(val) 7539 v.AddArg(mem) 7540 return true 7541 } 7542 // match: (MOVLstoreidx1 [i] {s} p idx (SHRQconst [32] w) x:(MOVLstoreidx1 [i-4] {s} p idx w mem)) 7543 // cond: x.Uses == 1 && clobber(x) 7544 // result: (MOVQstoreidx1 [i-4] {s} p idx w mem) 7545 for { 7546 i := v.AuxInt 7547 s := v.Aux 7548 p := v.Args[0] 7549 idx := v.Args[1] 7550 v_2 := v.Args[2] 7551 if v_2.Op != OpAMD64SHRQconst { 7552 break 7553 } 7554 if v_2.AuxInt != 32 { 7555 break 7556 } 7557 w := v_2.Args[0] 7558 x := v.Args[3] 7559 if x.Op != OpAMD64MOVLstoreidx1 { 7560 break 7561 } 7562 if x.AuxInt != i-4 { 7563 break 7564 } 7565 if x.Aux != s { 7566 break 7567 } 7568 if p != x.Args[0] { 7569 break 7570 } 7571 if idx != x.Args[1] { 7572 break 7573 } 7574 if w != x.Args[2] { 7575 break 7576 } 7577 mem := x.Args[3] 7578 if !(x.Uses == 1 && clobber(x)) { 7579 break 7580 } 7581 v.reset(OpAMD64MOVQstoreidx1) 7582 v.AuxInt = i - 4 7583 v.Aux = s 7584 v.AddArg(p) 7585 v.AddArg(idx) 7586 v.AddArg(w) 7587 v.AddArg(mem) 7588 return true 7589 } 7590 // match: (MOVLstoreidx1 [i] {s} p idx (SHRQconst [j] w) x:(MOVLstoreidx1 [i-4] {s} p idx w0:(SHRQconst [j-32] w) mem)) 7591 // cond: x.Uses == 1 && clobber(x) 7592 // result: (MOVQstoreidx1 [i-4] {s} p idx w0 mem) 7593 for { 7594 i := v.AuxInt 7595 s := v.Aux 7596 p := v.Args[0] 7597 idx := v.Args[1] 7598 v_2 := v.Args[2] 7599 if v_2.Op != OpAMD64SHRQconst { 7600 break 7601 } 7602 j := v_2.AuxInt 7603 w := v_2.Args[0] 7604 x := v.Args[3] 7605 if x.Op != OpAMD64MOVLstoreidx1 { 7606 break 7607 } 7608 if x.AuxInt != i-4 { 7609 break 7610 } 7611 if x.Aux != s { 7612 break 7613 } 7614 if p != x.Args[0] { 7615 break 7616 } 7617 if idx != x.Args[1] { 7618 break 7619 } 7620 w0 := x.Args[2] 7621 if w0.Op != OpAMD64SHRQconst { 7622 break 7623 } 7624 if w0.AuxInt != j-32 { 7625 break 7626 } 7627 if w != w0.Args[0] { 7628 break 7629 } 7630 mem := x.Args[3] 7631 if !(x.Uses == 1 && clobber(x)) { 7632 break 7633 } 7634 v.reset(OpAMD64MOVQstoreidx1) 7635 v.AuxInt = i - 4 7636 v.Aux = s 7637 v.AddArg(p) 7638 v.AddArg(idx) 7639 v.AddArg(w0) 7640 v.AddArg(mem) 7641 return true 7642 } 7643 return false 7644 } 7645 func rewriteValueAMD64_OpAMD64MOVLstoreidx4(v *Value) bool { 7646 b := v.Block 7647 _ = b 7648 // match: (MOVLstoreidx4 [c] {sym} (ADDQconst [d] ptr) idx val mem) 7649 // cond: 7650 // result: (MOVLstoreidx4 [c+d] {sym} ptr idx val mem) 7651 for { 7652 c := v.AuxInt 7653 sym := v.Aux 7654 v_0 := v.Args[0] 7655 if v_0.Op != OpAMD64ADDQconst { 7656 break 7657 } 7658 d := v_0.AuxInt 7659 ptr := v_0.Args[0] 7660 idx := v.Args[1] 7661 val := v.Args[2] 7662 mem := v.Args[3] 7663 v.reset(OpAMD64MOVLstoreidx4) 7664 v.AuxInt = c + d 7665 v.Aux = sym 7666 v.AddArg(ptr) 7667 v.AddArg(idx) 7668 v.AddArg(val) 7669 v.AddArg(mem) 7670 return true 7671 } 7672 // match: (MOVLstoreidx4 [c] {sym} ptr (ADDQconst [d] idx) val mem) 7673 // cond: 7674 // result: (MOVLstoreidx4 [c+4*d] {sym} ptr idx val mem) 7675 for { 7676 c := v.AuxInt 7677 sym := v.Aux 7678 ptr := v.Args[0] 7679 v_1 := v.Args[1] 7680 if v_1.Op != OpAMD64ADDQconst { 7681 break 7682 } 7683 d := v_1.AuxInt 7684 idx := v_1.Args[0] 7685 val := v.Args[2] 7686 mem := v.Args[3] 7687 v.reset(OpAMD64MOVLstoreidx4) 7688 v.AuxInt = c + 4*d 7689 v.Aux = sym 7690 v.AddArg(ptr) 7691 v.AddArg(idx) 7692 v.AddArg(val) 7693 v.AddArg(mem) 7694 return true 7695 } 7696 // match: (MOVLstoreidx4 [i] {s} p idx (SHRQconst [32] w) x:(MOVLstoreidx4 [i-4] {s} p idx w mem)) 7697 // cond: x.Uses == 1 && clobber(x) 7698 // result: (MOVQstoreidx1 [i-4] {s} p (SHLQconst <idx.Type> [2] idx) w mem) 7699 for { 7700 i := v.AuxInt 7701 s := v.Aux 7702 p := v.Args[0] 7703 idx := v.Args[1] 7704 v_2 := v.Args[2] 7705 if v_2.Op != OpAMD64SHRQconst { 7706 break 7707 } 7708 if v_2.AuxInt != 32 { 7709 break 7710 } 7711 w := v_2.Args[0] 7712 x := v.Args[3] 7713 if x.Op != OpAMD64MOVLstoreidx4 { 7714 break 7715 } 7716 if x.AuxInt != i-4 { 7717 break 7718 } 7719 if x.Aux != s { 7720 break 7721 } 7722 if p != x.Args[0] { 7723 break 7724 } 7725 if idx != x.Args[1] { 7726 break 7727 } 7728 if w != x.Args[2] { 7729 break 7730 } 7731 mem := x.Args[3] 7732 if !(x.Uses == 1 && clobber(x)) { 7733 break 7734 } 7735 v.reset(OpAMD64MOVQstoreidx1) 7736 v.AuxInt = i - 4 7737 v.Aux = s 7738 v.AddArg(p) 7739 v0 := b.NewValue0(v.Pos, OpAMD64SHLQconst, idx.Type) 7740 v0.AuxInt = 2 7741 v0.AddArg(idx) 7742 v.AddArg(v0) 7743 v.AddArg(w) 7744 v.AddArg(mem) 7745 return true 7746 } 7747 // match: (MOVLstoreidx4 [i] {s} p idx (SHRQconst [j] w) x:(MOVLstoreidx4 [i-4] {s} p idx w0:(SHRQconst [j-32] w) mem)) 7748 // cond: x.Uses == 1 && clobber(x) 7749 // result: (MOVQstoreidx1 [i-4] {s} p (SHLQconst <idx.Type> [2] idx) w0 mem) 7750 for { 7751 i := v.AuxInt 7752 s := v.Aux 7753 p := v.Args[0] 7754 idx := v.Args[1] 7755 v_2 := v.Args[2] 7756 if v_2.Op != OpAMD64SHRQconst { 7757 break 7758 } 7759 j := v_2.AuxInt 7760 w := v_2.Args[0] 7761 x := v.Args[3] 7762 if x.Op != OpAMD64MOVLstoreidx4 { 7763 break 7764 } 7765 if x.AuxInt != i-4 { 7766 break 7767 } 7768 if x.Aux != s { 7769 break 7770 } 7771 if p != x.Args[0] { 7772 break 7773 } 7774 if idx != x.Args[1] { 7775 break 7776 } 7777 w0 := x.Args[2] 7778 if w0.Op != OpAMD64SHRQconst { 7779 break 7780 } 7781 if w0.AuxInt != j-32 { 7782 break 7783 } 7784 if w != w0.Args[0] { 7785 break 7786 } 7787 mem := x.Args[3] 7788 if !(x.Uses == 1 && clobber(x)) { 7789 break 7790 } 7791 v.reset(OpAMD64MOVQstoreidx1) 7792 v.AuxInt = i - 4 7793 v.Aux = s 7794 v.AddArg(p) 7795 v0 := b.NewValue0(v.Pos, OpAMD64SHLQconst, idx.Type) 7796 v0.AuxInt = 2 7797 v0.AddArg(idx) 7798 v.AddArg(v0) 7799 v.AddArg(w0) 7800 v.AddArg(mem) 7801 return true 7802 } 7803 return false 7804 } 7805 func rewriteValueAMD64_OpAMD64MOVOload(v *Value) bool { 7806 // match: (MOVOload [off1] {sym} (ADDQconst [off2] ptr) mem) 7807 // cond: is32Bit(off1+off2) 7808 // result: (MOVOload [off1+off2] {sym} ptr mem) 7809 for { 7810 off1 := v.AuxInt 7811 sym := v.Aux 7812 v_0 := v.Args[0] 7813 if v_0.Op != OpAMD64ADDQconst { 7814 break 7815 } 7816 off2 := v_0.AuxInt 7817 ptr := v_0.Args[0] 7818 mem := v.Args[1] 7819 if !(is32Bit(off1 + off2)) { 7820 break 7821 } 7822 v.reset(OpAMD64MOVOload) 7823 v.AuxInt = off1 + off2 7824 v.Aux = sym 7825 v.AddArg(ptr) 7826 v.AddArg(mem) 7827 return true 7828 } 7829 // match: (MOVOload [off1] {sym1} (LEAQ [off2] {sym2} base) mem) 7830 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 7831 // result: (MOVOload [off1+off2] {mergeSym(sym1,sym2)} base mem) 7832 for { 7833 off1 := v.AuxInt 7834 sym1 := v.Aux 7835 v_0 := v.Args[0] 7836 if v_0.Op != OpAMD64LEAQ { 7837 break 7838 } 7839 off2 := v_0.AuxInt 7840 sym2 := v_0.Aux 7841 base := v_0.Args[0] 7842 mem := v.Args[1] 7843 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 7844 break 7845 } 7846 v.reset(OpAMD64MOVOload) 7847 v.AuxInt = off1 + off2 7848 v.Aux = mergeSym(sym1, sym2) 7849 v.AddArg(base) 7850 v.AddArg(mem) 7851 return true 7852 } 7853 return false 7854 } 7855 func rewriteValueAMD64_OpAMD64MOVOstore(v *Value) bool { 7856 // match: (MOVOstore [off1] {sym} (ADDQconst [off2] ptr) val mem) 7857 // cond: is32Bit(off1+off2) 7858 // result: (MOVOstore [off1+off2] {sym} ptr val mem) 7859 for { 7860 off1 := v.AuxInt 7861 sym := v.Aux 7862 v_0 := v.Args[0] 7863 if v_0.Op != OpAMD64ADDQconst { 7864 break 7865 } 7866 off2 := v_0.AuxInt 7867 ptr := v_0.Args[0] 7868 val := v.Args[1] 7869 mem := v.Args[2] 7870 if !(is32Bit(off1 + off2)) { 7871 break 7872 } 7873 v.reset(OpAMD64MOVOstore) 7874 v.AuxInt = off1 + off2 7875 v.Aux = sym 7876 v.AddArg(ptr) 7877 v.AddArg(val) 7878 v.AddArg(mem) 7879 return true 7880 } 7881 // match: (MOVOstore [off1] {sym1} (LEAQ [off2] {sym2} base) val mem) 7882 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 7883 // result: (MOVOstore [off1+off2] {mergeSym(sym1,sym2)} base val mem) 7884 for { 7885 off1 := v.AuxInt 7886 sym1 := v.Aux 7887 v_0 := v.Args[0] 7888 if v_0.Op != OpAMD64LEAQ { 7889 break 7890 } 7891 off2 := v_0.AuxInt 7892 sym2 := v_0.Aux 7893 base := v_0.Args[0] 7894 val := v.Args[1] 7895 mem := v.Args[2] 7896 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 7897 break 7898 } 7899 v.reset(OpAMD64MOVOstore) 7900 v.AuxInt = off1 + off2 7901 v.Aux = mergeSym(sym1, sym2) 7902 v.AddArg(base) 7903 v.AddArg(val) 7904 v.AddArg(mem) 7905 return true 7906 } 7907 return false 7908 } 7909 func rewriteValueAMD64_OpAMD64MOVQatomicload(v *Value) bool { 7910 // match: (MOVQatomicload [off1] {sym} (ADDQconst [off2] ptr) mem) 7911 // cond: is32Bit(off1+off2) 7912 // result: (MOVQatomicload [off1+off2] {sym} ptr mem) 7913 for { 7914 off1 := v.AuxInt 7915 sym := v.Aux 7916 v_0 := v.Args[0] 7917 if v_0.Op != OpAMD64ADDQconst { 7918 break 7919 } 7920 off2 := v_0.AuxInt 7921 ptr := v_0.Args[0] 7922 mem := v.Args[1] 7923 if !(is32Bit(off1 + off2)) { 7924 break 7925 } 7926 v.reset(OpAMD64MOVQatomicload) 7927 v.AuxInt = off1 + off2 7928 v.Aux = sym 7929 v.AddArg(ptr) 7930 v.AddArg(mem) 7931 return true 7932 } 7933 // match: (MOVQatomicload [off1] {sym1} (LEAQ [off2] {sym2} ptr) mem) 7934 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 7935 // result: (MOVQatomicload [off1+off2] {mergeSym(sym1,sym2)} ptr mem) 7936 for { 7937 off1 := v.AuxInt 7938 sym1 := v.Aux 7939 v_0 := v.Args[0] 7940 if v_0.Op != OpAMD64LEAQ { 7941 break 7942 } 7943 off2 := v_0.AuxInt 7944 sym2 := v_0.Aux 7945 ptr := v_0.Args[0] 7946 mem := v.Args[1] 7947 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 7948 break 7949 } 7950 v.reset(OpAMD64MOVQatomicload) 7951 v.AuxInt = off1 + off2 7952 v.Aux = mergeSym(sym1, sym2) 7953 v.AddArg(ptr) 7954 v.AddArg(mem) 7955 return true 7956 } 7957 return false 7958 } 7959 func rewriteValueAMD64_OpAMD64MOVQload(v *Value) bool { 7960 // match: (MOVQload [off] {sym} ptr (MOVQstore [off2] {sym2} ptr2 x _)) 7961 // cond: sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) 7962 // result: x 7963 for { 7964 off := v.AuxInt 7965 sym := v.Aux 7966 ptr := v.Args[0] 7967 v_1 := v.Args[1] 7968 if v_1.Op != OpAMD64MOVQstore { 7969 break 7970 } 7971 off2 := v_1.AuxInt 7972 sym2 := v_1.Aux 7973 ptr2 := v_1.Args[0] 7974 x := v_1.Args[1] 7975 if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) { 7976 break 7977 } 7978 v.reset(OpCopy) 7979 v.Type = x.Type 7980 v.AddArg(x) 7981 return true 7982 } 7983 // match: (MOVQload [off1] {sym} (ADDQconst [off2] ptr) mem) 7984 // cond: is32Bit(off1+off2) 7985 // result: (MOVQload [off1+off2] {sym} ptr mem) 7986 for { 7987 off1 := v.AuxInt 7988 sym := v.Aux 7989 v_0 := v.Args[0] 7990 if v_0.Op != OpAMD64ADDQconst { 7991 break 7992 } 7993 off2 := v_0.AuxInt 7994 ptr := v_0.Args[0] 7995 mem := v.Args[1] 7996 if !(is32Bit(off1 + off2)) { 7997 break 7998 } 7999 v.reset(OpAMD64MOVQload) 8000 v.AuxInt = off1 + off2 8001 v.Aux = sym 8002 v.AddArg(ptr) 8003 v.AddArg(mem) 8004 return true 8005 } 8006 // match: (MOVQload [off1] {sym1} (LEAQ [off2] {sym2} base) mem) 8007 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 8008 // result: (MOVQload [off1+off2] {mergeSym(sym1,sym2)} base mem) 8009 for { 8010 off1 := v.AuxInt 8011 sym1 := v.Aux 8012 v_0 := v.Args[0] 8013 if v_0.Op != OpAMD64LEAQ { 8014 break 8015 } 8016 off2 := v_0.AuxInt 8017 sym2 := v_0.Aux 8018 base := v_0.Args[0] 8019 mem := v.Args[1] 8020 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 8021 break 8022 } 8023 v.reset(OpAMD64MOVQload) 8024 v.AuxInt = off1 + off2 8025 v.Aux = mergeSym(sym1, sym2) 8026 v.AddArg(base) 8027 v.AddArg(mem) 8028 return true 8029 } 8030 // match: (MOVQload [off1] {sym1} (LEAQ1 [off2] {sym2} ptr idx) mem) 8031 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 8032 // result: (MOVQloadidx1 [off1+off2] {mergeSym(sym1,sym2)} ptr idx mem) 8033 for { 8034 off1 := v.AuxInt 8035 sym1 := v.Aux 8036 v_0 := v.Args[0] 8037 if v_0.Op != OpAMD64LEAQ1 { 8038 break 8039 } 8040 off2 := v_0.AuxInt 8041 sym2 := v_0.Aux 8042 ptr := v_0.Args[0] 8043 idx := v_0.Args[1] 8044 mem := v.Args[1] 8045 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 8046 break 8047 } 8048 v.reset(OpAMD64MOVQloadidx1) 8049 v.AuxInt = off1 + off2 8050 v.Aux = mergeSym(sym1, sym2) 8051 v.AddArg(ptr) 8052 v.AddArg(idx) 8053 v.AddArg(mem) 8054 return true 8055 } 8056 // match: (MOVQload [off1] {sym1} (LEAQ8 [off2] {sym2} ptr idx) mem) 8057 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 8058 // result: (MOVQloadidx8 [off1+off2] {mergeSym(sym1,sym2)} ptr idx mem) 8059 for { 8060 off1 := v.AuxInt 8061 sym1 := v.Aux 8062 v_0 := v.Args[0] 8063 if v_0.Op != OpAMD64LEAQ8 { 8064 break 8065 } 8066 off2 := v_0.AuxInt 8067 sym2 := v_0.Aux 8068 ptr := v_0.Args[0] 8069 idx := v_0.Args[1] 8070 mem := v.Args[1] 8071 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 8072 break 8073 } 8074 v.reset(OpAMD64MOVQloadidx8) 8075 v.AuxInt = off1 + off2 8076 v.Aux = mergeSym(sym1, sym2) 8077 v.AddArg(ptr) 8078 v.AddArg(idx) 8079 v.AddArg(mem) 8080 return true 8081 } 8082 // match: (MOVQload [off] {sym} (ADDQ ptr idx) mem) 8083 // cond: ptr.Op != OpSB 8084 // result: (MOVQloadidx1 [off] {sym} ptr idx mem) 8085 for { 8086 off := v.AuxInt 8087 sym := v.Aux 8088 v_0 := v.Args[0] 8089 if v_0.Op != OpAMD64ADDQ { 8090 break 8091 } 8092 ptr := v_0.Args[0] 8093 idx := v_0.Args[1] 8094 mem := v.Args[1] 8095 if !(ptr.Op != OpSB) { 8096 break 8097 } 8098 v.reset(OpAMD64MOVQloadidx1) 8099 v.AuxInt = off 8100 v.Aux = sym 8101 v.AddArg(ptr) 8102 v.AddArg(idx) 8103 v.AddArg(mem) 8104 return true 8105 } 8106 // match: (MOVQload [off1] {sym1} (LEAL [off2] {sym2} base) mem) 8107 // cond: canMergeSym(sym1, sym2) 8108 // result: (MOVQload [off1+off2] {mergeSym(sym1,sym2)} base mem) 8109 for { 8110 off1 := v.AuxInt 8111 sym1 := v.Aux 8112 v_0 := v.Args[0] 8113 if v_0.Op != OpAMD64LEAL { 8114 break 8115 } 8116 off2 := v_0.AuxInt 8117 sym2 := v_0.Aux 8118 base := v_0.Args[0] 8119 mem := v.Args[1] 8120 if !(canMergeSym(sym1, sym2)) { 8121 break 8122 } 8123 v.reset(OpAMD64MOVQload) 8124 v.AuxInt = off1 + off2 8125 v.Aux = mergeSym(sym1, sym2) 8126 v.AddArg(base) 8127 v.AddArg(mem) 8128 return true 8129 } 8130 // match: (MOVQload [off1] {sym} (ADDLconst [off2] ptr) mem) 8131 // cond: is32Bit(off1+off2) 8132 // result: (MOVQload [off1+off2] {sym} ptr mem) 8133 for { 8134 off1 := v.AuxInt 8135 sym := v.Aux 8136 v_0 := v.Args[0] 8137 if v_0.Op != OpAMD64ADDLconst { 8138 break 8139 } 8140 off2 := v_0.AuxInt 8141 ptr := v_0.Args[0] 8142 mem := v.Args[1] 8143 if !(is32Bit(off1 + off2)) { 8144 break 8145 } 8146 v.reset(OpAMD64MOVQload) 8147 v.AuxInt = off1 + off2 8148 v.Aux = sym 8149 v.AddArg(ptr) 8150 v.AddArg(mem) 8151 return true 8152 } 8153 return false 8154 } 8155 func rewriteValueAMD64_OpAMD64MOVQloadidx1(v *Value) bool { 8156 // match: (MOVQloadidx1 [c] {sym} ptr (SHLQconst [3] idx) mem) 8157 // cond: 8158 // result: (MOVQloadidx8 [c] {sym} ptr idx mem) 8159 for { 8160 c := v.AuxInt 8161 sym := v.Aux 8162 ptr := v.Args[0] 8163 v_1 := v.Args[1] 8164 if v_1.Op != OpAMD64SHLQconst { 8165 break 8166 } 8167 if v_1.AuxInt != 3 { 8168 break 8169 } 8170 idx := v_1.Args[0] 8171 mem := v.Args[2] 8172 v.reset(OpAMD64MOVQloadidx8) 8173 v.AuxInt = c 8174 v.Aux = sym 8175 v.AddArg(ptr) 8176 v.AddArg(idx) 8177 v.AddArg(mem) 8178 return true 8179 } 8180 // match: (MOVQloadidx1 [c] {sym} (SHLQconst [3] idx) ptr mem) 8181 // cond: 8182 // result: (MOVQloadidx8 [c] {sym} ptr idx mem) 8183 for { 8184 c := v.AuxInt 8185 sym := v.Aux 8186 v_0 := v.Args[0] 8187 if v_0.Op != OpAMD64SHLQconst { 8188 break 8189 } 8190 if v_0.AuxInt != 3 { 8191 break 8192 } 8193 idx := v_0.Args[0] 8194 ptr := v.Args[1] 8195 mem := v.Args[2] 8196 v.reset(OpAMD64MOVQloadidx8) 8197 v.AuxInt = c 8198 v.Aux = sym 8199 v.AddArg(ptr) 8200 v.AddArg(idx) 8201 v.AddArg(mem) 8202 return true 8203 } 8204 // match: (MOVQloadidx1 [c] {sym} (ADDQconst [d] ptr) idx mem) 8205 // cond: 8206 // result: (MOVQloadidx1 [c+d] {sym} ptr idx mem) 8207 for { 8208 c := v.AuxInt 8209 sym := v.Aux 8210 v_0 := v.Args[0] 8211 if v_0.Op != OpAMD64ADDQconst { 8212 break 8213 } 8214 d := v_0.AuxInt 8215 ptr := v_0.Args[0] 8216 idx := v.Args[1] 8217 mem := v.Args[2] 8218 v.reset(OpAMD64MOVQloadidx1) 8219 v.AuxInt = c + d 8220 v.Aux = sym 8221 v.AddArg(ptr) 8222 v.AddArg(idx) 8223 v.AddArg(mem) 8224 return true 8225 } 8226 // match: (MOVQloadidx1 [c] {sym} idx (ADDQconst [d] ptr) mem) 8227 // cond: 8228 // result: (MOVQloadidx1 [c+d] {sym} ptr idx mem) 8229 for { 8230 c := v.AuxInt 8231 sym := v.Aux 8232 idx := v.Args[0] 8233 v_1 := v.Args[1] 8234 if v_1.Op != OpAMD64ADDQconst { 8235 break 8236 } 8237 d := v_1.AuxInt 8238 ptr := v_1.Args[0] 8239 mem := v.Args[2] 8240 v.reset(OpAMD64MOVQloadidx1) 8241 v.AuxInt = c + d 8242 v.Aux = sym 8243 v.AddArg(ptr) 8244 v.AddArg(idx) 8245 v.AddArg(mem) 8246 return true 8247 } 8248 // match: (MOVQloadidx1 [c] {sym} ptr (ADDQconst [d] idx) mem) 8249 // cond: 8250 // result: (MOVQloadidx1 [c+d] {sym} ptr idx mem) 8251 for { 8252 c := v.AuxInt 8253 sym := v.Aux 8254 ptr := v.Args[0] 8255 v_1 := v.Args[1] 8256 if v_1.Op != OpAMD64ADDQconst { 8257 break 8258 } 8259 d := v_1.AuxInt 8260 idx := v_1.Args[0] 8261 mem := v.Args[2] 8262 v.reset(OpAMD64MOVQloadidx1) 8263 v.AuxInt = c + d 8264 v.Aux = sym 8265 v.AddArg(ptr) 8266 v.AddArg(idx) 8267 v.AddArg(mem) 8268 return true 8269 } 8270 // match: (MOVQloadidx1 [c] {sym} (ADDQconst [d] idx) ptr mem) 8271 // cond: 8272 // result: (MOVQloadidx1 [c+d] {sym} ptr idx mem) 8273 for { 8274 c := v.AuxInt 8275 sym := v.Aux 8276 v_0 := v.Args[0] 8277 if v_0.Op != OpAMD64ADDQconst { 8278 break 8279 } 8280 d := v_0.AuxInt 8281 idx := v_0.Args[0] 8282 ptr := v.Args[1] 8283 mem := v.Args[2] 8284 v.reset(OpAMD64MOVQloadidx1) 8285 v.AuxInt = c + d 8286 v.Aux = sym 8287 v.AddArg(ptr) 8288 v.AddArg(idx) 8289 v.AddArg(mem) 8290 return true 8291 } 8292 return false 8293 } 8294 func rewriteValueAMD64_OpAMD64MOVQloadidx8(v *Value) bool { 8295 // match: (MOVQloadidx8 [c] {sym} (ADDQconst [d] ptr) idx mem) 8296 // cond: 8297 // result: (MOVQloadidx8 [c+d] {sym} ptr idx mem) 8298 for { 8299 c := v.AuxInt 8300 sym := v.Aux 8301 v_0 := v.Args[0] 8302 if v_0.Op != OpAMD64ADDQconst { 8303 break 8304 } 8305 d := v_0.AuxInt 8306 ptr := v_0.Args[0] 8307 idx := v.Args[1] 8308 mem := v.Args[2] 8309 v.reset(OpAMD64MOVQloadidx8) 8310 v.AuxInt = c + d 8311 v.Aux = sym 8312 v.AddArg(ptr) 8313 v.AddArg(idx) 8314 v.AddArg(mem) 8315 return true 8316 } 8317 // match: (MOVQloadidx8 [c] {sym} ptr (ADDQconst [d] idx) mem) 8318 // cond: 8319 // result: (MOVQloadidx8 [c+8*d] {sym} ptr idx mem) 8320 for { 8321 c := v.AuxInt 8322 sym := v.Aux 8323 ptr := v.Args[0] 8324 v_1 := v.Args[1] 8325 if v_1.Op != OpAMD64ADDQconst { 8326 break 8327 } 8328 d := v_1.AuxInt 8329 idx := v_1.Args[0] 8330 mem := v.Args[2] 8331 v.reset(OpAMD64MOVQloadidx8) 8332 v.AuxInt = c + 8*d 8333 v.Aux = sym 8334 v.AddArg(ptr) 8335 v.AddArg(idx) 8336 v.AddArg(mem) 8337 return true 8338 } 8339 return false 8340 } 8341 func rewriteValueAMD64_OpAMD64MOVQstore(v *Value) bool { 8342 // match: (MOVQstore [off1] {sym} (ADDQconst [off2] ptr) val mem) 8343 // cond: is32Bit(off1+off2) 8344 // result: (MOVQstore [off1+off2] {sym} ptr val mem) 8345 for { 8346 off1 := v.AuxInt 8347 sym := v.Aux 8348 v_0 := v.Args[0] 8349 if v_0.Op != OpAMD64ADDQconst { 8350 break 8351 } 8352 off2 := v_0.AuxInt 8353 ptr := v_0.Args[0] 8354 val := v.Args[1] 8355 mem := v.Args[2] 8356 if !(is32Bit(off1 + off2)) { 8357 break 8358 } 8359 v.reset(OpAMD64MOVQstore) 8360 v.AuxInt = off1 + off2 8361 v.Aux = sym 8362 v.AddArg(ptr) 8363 v.AddArg(val) 8364 v.AddArg(mem) 8365 return true 8366 } 8367 // match: (MOVQstore [off] {sym} ptr (MOVQconst [c]) mem) 8368 // cond: validValAndOff(c,off) 8369 // result: (MOVQstoreconst [makeValAndOff(c,off)] {sym} ptr mem) 8370 for { 8371 off := v.AuxInt 8372 sym := v.Aux 8373 ptr := v.Args[0] 8374 v_1 := v.Args[1] 8375 if v_1.Op != OpAMD64MOVQconst { 8376 break 8377 } 8378 c := v_1.AuxInt 8379 mem := v.Args[2] 8380 if !(validValAndOff(c, off)) { 8381 break 8382 } 8383 v.reset(OpAMD64MOVQstoreconst) 8384 v.AuxInt = makeValAndOff(c, off) 8385 v.Aux = sym 8386 v.AddArg(ptr) 8387 v.AddArg(mem) 8388 return true 8389 } 8390 // match: (MOVQstore [off1] {sym1} (LEAQ [off2] {sym2} base) val mem) 8391 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 8392 // result: (MOVQstore [off1+off2] {mergeSym(sym1,sym2)} base val mem) 8393 for { 8394 off1 := v.AuxInt 8395 sym1 := v.Aux 8396 v_0 := v.Args[0] 8397 if v_0.Op != OpAMD64LEAQ { 8398 break 8399 } 8400 off2 := v_0.AuxInt 8401 sym2 := v_0.Aux 8402 base := v_0.Args[0] 8403 val := v.Args[1] 8404 mem := v.Args[2] 8405 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 8406 break 8407 } 8408 v.reset(OpAMD64MOVQstore) 8409 v.AuxInt = off1 + off2 8410 v.Aux = mergeSym(sym1, sym2) 8411 v.AddArg(base) 8412 v.AddArg(val) 8413 v.AddArg(mem) 8414 return true 8415 } 8416 // match: (MOVQstore [off1] {sym1} (LEAQ1 [off2] {sym2} ptr idx) val mem) 8417 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 8418 // result: (MOVQstoreidx1 [off1+off2] {mergeSym(sym1,sym2)} ptr idx val mem) 8419 for { 8420 off1 := v.AuxInt 8421 sym1 := v.Aux 8422 v_0 := v.Args[0] 8423 if v_0.Op != OpAMD64LEAQ1 { 8424 break 8425 } 8426 off2 := v_0.AuxInt 8427 sym2 := v_0.Aux 8428 ptr := v_0.Args[0] 8429 idx := v_0.Args[1] 8430 val := v.Args[1] 8431 mem := v.Args[2] 8432 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 8433 break 8434 } 8435 v.reset(OpAMD64MOVQstoreidx1) 8436 v.AuxInt = off1 + off2 8437 v.Aux = mergeSym(sym1, sym2) 8438 v.AddArg(ptr) 8439 v.AddArg(idx) 8440 v.AddArg(val) 8441 v.AddArg(mem) 8442 return true 8443 } 8444 // match: (MOVQstore [off1] {sym1} (LEAQ8 [off2] {sym2} ptr idx) val mem) 8445 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 8446 // result: (MOVQstoreidx8 [off1+off2] {mergeSym(sym1,sym2)} ptr idx val mem) 8447 for { 8448 off1 := v.AuxInt 8449 sym1 := v.Aux 8450 v_0 := v.Args[0] 8451 if v_0.Op != OpAMD64LEAQ8 { 8452 break 8453 } 8454 off2 := v_0.AuxInt 8455 sym2 := v_0.Aux 8456 ptr := v_0.Args[0] 8457 idx := v_0.Args[1] 8458 val := v.Args[1] 8459 mem := v.Args[2] 8460 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 8461 break 8462 } 8463 v.reset(OpAMD64MOVQstoreidx8) 8464 v.AuxInt = off1 + off2 8465 v.Aux = mergeSym(sym1, sym2) 8466 v.AddArg(ptr) 8467 v.AddArg(idx) 8468 v.AddArg(val) 8469 v.AddArg(mem) 8470 return true 8471 } 8472 // match: (MOVQstore [off] {sym} (ADDQ ptr idx) val mem) 8473 // cond: ptr.Op != OpSB 8474 // result: (MOVQstoreidx1 [off] {sym} ptr idx val mem) 8475 for { 8476 off := v.AuxInt 8477 sym := v.Aux 8478 v_0 := v.Args[0] 8479 if v_0.Op != OpAMD64ADDQ { 8480 break 8481 } 8482 ptr := v_0.Args[0] 8483 idx := v_0.Args[1] 8484 val := v.Args[1] 8485 mem := v.Args[2] 8486 if !(ptr.Op != OpSB) { 8487 break 8488 } 8489 v.reset(OpAMD64MOVQstoreidx1) 8490 v.AuxInt = off 8491 v.Aux = sym 8492 v.AddArg(ptr) 8493 v.AddArg(idx) 8494 v.AddArg(val) 8495 v.AddArg(mem) 8496 return true 8497 } 8498 // match: (MOVQstore [off1] {sym1} (LEAL [off2] {sym2} base) val mem) 8499 // cond: canMergeSym(sym1, sym2) 8500 // result: (MOVQstore [off1+off2] {mergeSym(sym1,sym2)} base val mem) 8501 for { 8502 off1 := v.AuxInt 8503 sym1 := v.Aux 8504 v_0 := v.Args[0] 8505 if v_0.Op != OpAMD64LEAL { 8506 break 8507 } 8508 off2 := v_0.AuxInt 8509 sym2 := v_0.Aux 8510 base := v_0.Args[0] 8511 val := v.Args[1] 8512 mem := v.Args[2] 8513 if !(canMergeSym(sym1, sym2)) { 8514 break 8515 } 8516 v.reset(OpAMD64MOVQstore) 8517 v.AuxInt = off1 + off2 8518 v.Aux = mergeSym(sym1, sym2) 8519 v.AddArg(base) 8520 v.AddArg(val) 8521 v.AddArg(mem) 8522 return true 8523 } 8524 // match: (MOVQstore [off1] {sym} (ADDLconst [off2] ptr) val mem) 8525 // cond: is32Bit(off1+off2) 8526 // result: (MOVQstore [off1+off2] {sym} ptr val mem) 8527 for { 8528 off1 := v.AuxInt 8529 sym := v.Aux 8530 v_0 := v.Args[0] 8531 if v_0.Op != OpAMD64ADDLconst { 8532 break 8533 } 8534 off2 := v_0.AuxInt 8535 ptr := v_0.Args[0] 8536 val := v.Args[1] 8537 mem := v.Args[2] 8538 if !(is32Bit(off1 + off2)) { 8539 break 8540 } 8541 v.reset(OpAMD64MOVQstore) 8542 v.AuxInt = off1 + off2 8543 v.Aux = sym 8544 v.AddArg(ptr) 8545 v.AddArg(val) 8546 v.AddArg(mem) 8547 return true 8548 } 8549 return false 8550 } 8551 func rewriteValueAMD64_OpAMD64MOVQstoreconst(v *Value) bool { 8552 // match: (MOVQstoreconst [sc] {s} (ADDQconst [off] ptr) mem) 8553 // cond: ValAndOff(sc).canAdd(off) 8554 // result: (MOVQstoreconst [ValAndOff(sc).add(off)] {s} ptr mem) 8555 for { 8556 sc := v.AuxInt 8557 s := v.Aux 8558 v_0 := v.Args[0] 8559 if v_0.Op != OpAMD64ADDQconst { 8560 break 8561 } 8562 off := v_0.AuxInt 8563 ptr := v_0.Args[0] 8564 mem := v.Args[1] 8565 if !(ValAndOff(sc).canAdd(off)) { 8566 break 8567 } 8568 v.reset(OpAMD64MOVQstoreconst) 8569 v.AuxInt = ValAndOff(sc).add(off) 8570 v.Aux = s 8571 v.AddArg(ptr) 8572 v.AddArg(mem) 8573 return true 8574 } 8575 // match: (MOVQstoreconst [sc] {sym1} (LEAQ [off] {sym2} ptr) mem) 8576 // cond: canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off) 8577 // result: (MOVQstoreconst [ValAndOff(sc).add(off)] {mergeSym(sym1, sym2)} ptr mem) 8578 for { 8579 sc := v.AuxInt 8580 sym1 := v.Aux 8581 v_0 := v.Args[0] 8582 if v_0.Op != OpAMD64LEAQ { 8583 break 8584 } 8585 off := v_0.AuxInt 8586 sym2 := v_0.Aux 8587 ptr := v_0.Args[0] 8588 mem := v.Args[1] 8589 if !(canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off)) { 8590 break 8591 } 8592 v.reset(OpAMD64MOVQstoreconst) 8593 v.AuxInt = ValAndOff(sc).add(off) 8594 v.Aux = mergeSym(sym1, sym2) 8595 v.AddArg(ptr) 8596 v.AddArg(mem) 8597 return true 8598 } 8599 // match: (MOVQstoreconst [x] {sym1} (LEAQ1 [off] {sym2} ptr idx) mem) 8600 // cond: canMergeSym(sym1, sym2) 8601 // result: (MOVQstoreconstidx1 [ValAndOff(x).add(off)] {mergeSym(sym1,sym2)} ptr idx mem) 8602 for { 8603 x := v.AuxInt 8604 sym1 := v.Aux 8605 v_0 := v.Args[0] 8606 if v_0.Op != OpAMD64LEAQ1 { 8607 break 8608 } 8609 off := v_0.AuxInt 8610 sym2 := v_0.Aux 8611 ptr := v_0.Args[0] 8612 idx := v_0.Args[1] 8613 mem := v.Args[1] 8614 if !(canMergeSym(sym1, sym2)) { 8615 break 8616 } 8617 v.reset(OpAMD64MOVQstoreconstidx1) 8618 v.AuxInt = ValAndOff(x).add(off) 8619 v.Aux = mergeSym(sym1, sym2) 8620 v.AddArg(ptr) 8621 v.AddArg(idx) 8622 v.AddArg(mem) 8623 return true 8624 } 8625 // match: (MOVQstoreconst [x] {sym1} (LEAQ8 [off] {sym2} ptr idx) mem) 8626 // cond: canMergeSym(sym1, sym2) 8627 // result: (MOVQstoreconstidx8 [ValAndOff(x).add(off)] {mergeSym(sym1,sym2)} ptr idx mem) 8628 for { 8629 x := v.AuxInt 8630 sym1 := v.Aux 8631 v_0 := v.Args[0] 8632 if v_0.Op != OpAMD64LEAQ8 { 8633 break 8634 } 8635 off := v_0.AuxInt 8636 sym2 := v_0.Aux 8637 ptr := v_0.Args[0] 8638 idx := v_0.Args[1] 8639 mem := v.Args[1] 8640 if !(canMergeSym(sym1, sym2)) { 8641 break 8642 } 8643 v.reset(OpAMD64MOVQstoreconstidx8) 8644 v.AuxInt = ValAndOff(x).add(off) 8645 v.Aux = mergeSym(sym1, sym2) 8646 v.AddArg(ptr) 8647 v.AddArg(idx) 8648 v.AddArg(mem) 8649 return true 8650 } 8651 // match: (MOVQstoreconst [x] {sym} (ADDQ ptr idx) mem) 8652 // cond: 8653 // result: (MOVQstoreconstidx1 [x] {sym} ptr idx mem) 8654 for { 8655 x := v.AuxInt 8656 sym := v.Aux 8657 v_0 := v.Args[0] 8658 if v_0.Op != OpAMD64ADDQ { 8659 break 8660 } 8661 ptr := v_0.Args[0] 8662 idx := v_0.Args[1] 8663 mem := v.Args[1] 8664 v.reset(OpAMD64MOVQstoreconstidx1) 8665 v.AuxInt = x 8666 v.Aux = sym 8667 v.AddArg(ptr) 8668 v.AddArg(idx) 8669 v.AddArg(mem) 8670 return true 8671 } 8672 // match: (MOVQstoreconst [sc] {sym1} (LEAL [off] {sym2} ptr) mem) 8673 // cond: canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off) 8674 // result: (MOVQstoreconst [ValAndOff(sc).add(off)] {mergeSym(sym1, sym2)} ptr mem) 8675 for { 8676 sc := v.AuxInt 8677 sym1 := v.Aux 8678 v_0 := v.Args[0] 8679 if v_0.Op != OpAMD64LEAL { 8680 break 8681 } 8682 off := v_0.AuxInt 8683 sym2 := v_0.Aux 8684 ptr := v_0.Args[0] 8685 mem := v.Args[1] 8686 if !(canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off)) { 8687 break 8688 } 8689 v.reset(OpAMD64MOVQstoreconst) 8690 v.AuxInt = ValAndOff(sc).add(off) 8691 v.Aux = mergeSym(sym1, sym2) 8692 v.AddArg(ptr) 8693 v.AddArg(mem) 8694 return true 8695 } 8696 // match: (MOVQstoreconst [sc] {s} (ADDLconst [off] ptr) mem) 8697 // cond: ValAndOff(sc).canAdd(off) 8698 // result: (MOVQstoreconst [ValAndOff(sc).add(off)] {s} ptr mem) 8699 for { 8700 sc := v.AuxInt 8701 s := v.Aux 8702 v_0 := v.Args[0] 8703 if v_0.Op != OpAMD64ADDLconst { 8704 break 8705 } 8706 off := v_0.AuxInt 8707 ptr := v_0.Args[0] 8708 mem := v.Args[1] 8709 if !(ValAndOff(sc).canAdd(off)) { 8710 break 8711 } 8712 v.reset(OpAMD64MOVQstoreconst) 8713 v.AuxInt = ValAndOff(sc).add(off) 8714 v.Aux = s 8715 v.AddArg(ptr) 8716 v.AddArg(mem) 8717 return true 8718 } 8719 return false 8720 } 8721 func rewriteValueAMD64_OpAMD64MOVQstoreconstidx1(v *Value) bool { 8722 // match: (MOVQstoreconstidx1 [c] {sym} ptr (SHLQconst [3] idx) mem) 8723 // cond: 8724 // result: (MOVQstoreconstidx8 [c] {sym} ptr idx mem) 8725 for { 8726 c := v.AuxInt 8727 sym := v.Aux 8728 ptr := v.Args[0] 8729 v_1 := v.Args[1] 8730 if v_1.Op != OpAMD64SHLQconst { 8731 break 8732 } 8733 if v_1.AuxInt != 3 { 8734 break 8735 } 8736 idx := v_1.Args[0] 8737 mem := v.Args[2] 8738 v.reset(OpAMD64MOVQstoreconstidx8) 8739 v.AuxInt = c 8740 v.Aux = sym 8741 v.AddArg(ptr) 8742 v.AddArg(idx) 8743 v.AddArg(mem) 8744 return true 8745 } 8746 // match: (MOVQstoreconstidx1 [x] {sym} (ADDQconst [c] ptr) idx mem) 8747 // cond: 8748 // result: (MOVQstoreconstidx1 [ValAndOff(x).add(c)] {sym} ptr idx mem) 8749 for { 8750 x := v.AuxInt 8751 sym := v.Aux 8752 v_0 := v.Args[0] 8753 if v_0.Op != OpAMD64ADDQconst { 8754 break 8755 } 8756 c := v_0.AuxInt 8757 ptr := v_0.Args[0] 8758 idx := v.Args[1] 8759 mem := v.Args[2] 8760 v.reset(OpAMD64MOVQstoreconstidx1) 8761 v.AuxInt = ValAndOff(x).add(c) 8762 v.Aux = sym 8763 v.AddArg(ptr) 8764 v.AddArg(idx) 8765 v.AddArg(mem) 8766 return true 8767 } 8768 // match: (MOVQstoreconstidx1 [x] {sym} ptr (ADDQconst [c] idx) mem) 8769 // cond: 8770 // result: (MOVQstoreconstidx1 [ValAndOff(x).add(c)] {sym} ptr idx mem) 8771 for { 8772 x := v.AuxInt 8773 sym := v.Aux 8774 ptr := v.Args[0] 8775 v_1 := v.Args[1] 8776 if v_1.Op != OpAMD64ADDQconst { 8777 break 8778 } 8779 c := v_1.AuxInt 8780 idx := v_1.Args[0] 8781 mem := v.Args[2] 8782 v.reset(OpAMD64MOVQstoreconstidx1) 8783 v.AuxInt = ValAndOff(x).add(c) 8784 v.Aux = sym 8785 v.AddArg(ptr) 8786 v.AddArg(idx) 8787 v.AddArg(mem) 8788 return true 8789 } 8790 return false 8791 } 8792 func rewriteValueAMD64_OpAMD64MOVQstoreconstidx8(v *Value) bool { 8793 // match: (MOVQstoreconstidx8 [x] {sym} (ADDQconst [c] ptr) idx mem) 8794 // cond: 8795 // result: (MOVQstoreconstidx8 [ValAndOff(x).add(c)] {sym} ptr idx mem) 8796 for { 8797 x := v.AuxInt 8798 sym := v.Aux 8799 v_0 := v.Args[0] 8800 if v_0.Op != OpAMD64ADDQconst { 8801 break 8802 } 8803 c := v_0.AuxInt 8804 ptr := v_0.Args[0] 8805 idx := v.Args[1] 8806 mem := v.Args[2] 8807 v.reset(OpAMD64MOVQstoreconstidx8) 8808 v.AuxInt = ValAndOff(x).add(c) 8809 v.Aux = sym 8810 v.AddArg(ptr) 8811 v.AddArg(idx) 8812 v.AddArg(mem) 8813 return true 8814 } 8815 // match: (MOVQstoreconstidx8 [x] {sym} ptr (ADDQconst [c] idx) mem) 8816 // cond: 8817 // result: (MOVQstoreconstidx8 [ValAndOff(x).add(8*c)] {sym} ptr idx mem) 8818 for { 8819 x := v.AuxInt 8820 sym := v.Aux 8821 ptr := v.Args[0] 8822 v_1 := v.Args[1] 8823 if v_1.Op != OpAMD64ADDQconst { 8824 break 8825 } 8826 c := v_1.AuxInt 8827 idx := v_1.Args[0] 8828 mem := v.Args[2] 8829 v.reset(OpAMD64MOVQstoreconstidx8) 8830 v.AuxInt = ValAndOff(x).add(8 * c) 8831 v.Aux = sym 8832 v.AddArg(ptr) 8833 v.AddArg(idx) 8834 v.AddArg(mem) 8835 return true 8836 } 8837 return false 8838 } 8839 func rewriteValueAMD64_OpAMD64MOVQstoreidx1(v *Value) bool { 8840 // match: (MOVQstoreidx1 [c] {sym} ptr (SHLQconst [3] idx) val mem) 8841 // cond: 8842 // result: (MOVQstoreidx8 [c] {sym} ptr idx val mem) 8843 for { 8844 c := v.AuxInt 8845 sym := v.Aux 8846 ptr := v.Args[0] 8847 v_1 := v.Args[1] 8848 if v_1.Op != OpAMD64SHLQconst { 8849 break 8850 } 8851 if v_1.AuxInt != 3 { 8852 break 8853 } 8854 idx := v_1.Args[0] 8855 val := v.Args[2] 8856 mem := v.Args[3] 8857 v.reset(OpAMD64MOVQstoreidx8) 8858 v.AuxInt = c 8859 v.Aux = sym 8860 v.AddArg(ptr) 8861 v.AddArg(idx) 8862 v.AddArg(val) 8863 v.AddArg(mem) 8864 return true 8865 } 8866 // match: (MOVQstoreidx1 [c] {sym} (ADDQconst [d] ptr) idx val mem) 8867 // cond: 8868 // result: (MOVQstoreidx1 [c+d] {sym} ptr idx val mem) 8869 for { 8870 c := v.AuxInt 8871 sym := v.Aux 8872 v_0 := v.Args[0] 8873 if v_0.Op != OpAMD64ADDQconst { 8874 break 8875 } 8876 d := v_0.AuxInt 8877 ptr := v_0.Args[0] 8878 idx := v.Args[1] 8879 val := v.Args[2] 8880 mem := v.Args[3] 8881 v.reset(OpAMD64MOVQstoreidx1) 8882 v.AuxInt = c + d 8883 v.Aux = sym 8884 v.AddArg(ptr) 8885 v.AddArg(idx) 8886 v.AddArg(val) 8887 v.AddArg(mem) 8888 return true 8889 } 8890 // match: (MOVQstoreidx1 [c] {sym} ptr (ADDQconst [d] idx) val mem) 8891 // cond: 8892 // result: (MOVQstoreidx1 [c+d] {sym} ptr idx val mem) 8893 for { 8894 c := v.AuxInt 8895 sym := v.Aux 8896 ptr := v.Args[0] 8897 v_1 := v.Args[1] 8898 if v_1.Op != OpAMD64ADDQconst { 8899 break 8900 } 8901 d := v_1.AuxInt 8902 idx := v_1.Args[0] 8903 val := v.Args[2] 8904 mem := v.Args[3] 8905 v.reset(OpAMD64MOVQstoreidx1) 8906 v.AuxInt = c + d 8907 v.Aux = sym 8908 v.AddArg(ptr) 8909 v.AddArg(idx) 8910 v.AddArg(val) 8911 v.AddArg(mem) 8912 return true 8913 } 8914 return false 8915 } 8916 func rewriteValueAMD64_OpAMD64MOVQstoreidx8(v *Value) bool { 8917 // match: (MOVQstoreidx8 [c] {sym} (ADDQconst [d] ptr) idx val mem) 8918 // cond: 8919 // result: (MOVQstoreidx8 [c+d] {sym} ptr idx val mem) 8920 for { 8921 c := v.AuxInt 8922 sym := v.Aux 8923 v_0 := v.Args[0] 8924 if v_0.Op != OpAMD64ADDQconst { 8925 break 8926 } 8927 d := v_0.AuxInt 8928 ptr := v_0.Args[0] 8929 idx := v.Args[1] 8930 val := v.Args[2] 8931 mem := v.Args[3] 8932 v.reset(OpAMD64MOVQstoreidx8) 8933 v.AuxInt = c + d 8934 v.Aux = sym 8935 v.AddArg(ptr) 8936 v.AddArg(idx) 8937 v.AddArg(val) 8938 v.AddArg(mem) 8939 return true 8940 } 8941 // match: (MOVQstoreidx8 [c] {sym} ptr (ADDQconst [d] idx) val mem) 8942 // cond: 8943 // result: (MOVQstoreidx8 [c+8*d] {sym} ptr idx val mem) 8944 for { 8945 c := v.AuxInt 8946 sym := v.Aux 8947 ptr := v.Args[0] 8948 v_1 := v.Args[1] 8949 if v_1.Op != OpAMD64ADDQconst { 8950 break 8951 } 8952 d := v_1.AuxInt 8953 idx := v_1.Args[0] 8954 val := v.Args[2] 8955 mem := v.Args[3] 8956 v.reset(OpAMD64MOVQstoreidx8) 8957 v.AuxInt = c + 8*d 8958 v.Aux = sym 8959 v.AddArg(ptr) 8960 v.AddArg(idx) 8961 v.AddArg(val) 8962 v.AddArg(mem) 8963 return true 8964 } 8965 return false 8966 } 8967 func rewriteValueAMD64_OpAMD64MOVSDload(v *Value) bool { 8968 // match: (MOVSDload [off1] {sym} (ADDQconst [off2] ptr) mem) 8969 // cond: is32Bit(off1+off2) 8970 // result: (MOVSDload [off1+off2] {sym} ptr mem) 8971 for { 8972 off1 := v.AuxInt 8973 sym := v.Aux 8974 v_0 := v.Args[0] 8975 if v_0.Op != OpAMD64ADDQconst { 8976 break 8977 } 8978 off2 := v_0.AuxInt 8979 ptr := v_0.Args[0] 8980 mem := v.Args[1] 8981 if !(is32Bit(off1 + off2)) { 8982 break 8983 } 8984 v.reset(OpAMD64MOVSDload) 8985 v.AuxInt = off1 + off2 8986 v.Aux = sym 8987 v.AddArg(ptr) 8988 v.AddArg(mem) 8989 return true 8990 } 8991 // match: (MOVSDload [off1] {sym1} (LEAQ [off2] {sym2} base) mem) 8992 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 8993 // result: (MOVSDload [off1+off2] {mergeSym(sym1,sym2)} base mem) 8994 for { 8995 off1 := v.AuxInt 8996 sym1 := v.Aux 8997 v_0 := v.Args[0] 8998 if v_0.Op != OpAMD64LEAQ { 8999 break 9000 } 9001 off2 := v_0.AuxInt 9002 sym2 := v_0.Aux 9003 base := v_0.Args[0] 9004 mem := v.Args[1] 9005 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 9006 break 9007 } 9008 v.reset(OpAMD64MOVSDload) 9009 v.AuxInt = off1 + off2 9010 v.Aux = mergeSym(sym1, sym2) 9011 v.AddArg(base) 9012 v.AddArg(mem) 9013 return true 9014 } 9015 // match: (MOVSDload [off1] {sym1} (LEAQ1 [off2] {sym2} ptr idx) mem) 9016 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 9017 // result: (MOVSDloadidx1 [off1+off2] {mergeSym(sym1,sym2)} ptr idx mem) 9018 for { 9019 off1 := v.AuxInt 9020 sym1 := v.Aux 9021 v_0 := v.Args[0] 9022 if v_0.Op != OpAMD64LEAQ1 { 9023 break 9024 } 9025 off2 := v_0.AuxInt 9026 sym2 := v_0.Aux 9027 ptr := v_0.Args[0] 9028 idx := v_0.Args[1] 9029 mem := v.Args[1] 9030 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 9031 break 9032 } 9033 v.reset(OpAMD64MOVSDloadidx1) 9034 v.AuxInt = off1 + off2 9035 v.Aux = mergeSym(sym1, sym2) 9036 v.AddArg(ptr) 9037 v.AddArg(idx) 9038 v.AddArg(mem) 9039 return true 9040 } 9041 // match: (MOVSDload [off1] {sym1} (LEAQ8 [off2] {sym2} ptr idx) mem) 9042 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 9043 // result: (MOVSDloadidx8 [off1+off2] {mergeSym(sym1,sym2)} ptr idx mem) 9044 for { 9045 off1 := v.AuxInt 9046 sym1 := v.Aux 9047 v_0 := v.Args[0] 9048 if v_0.Op != OpAMD64LEAQ8 { 9049 break 9050 } 9051 off2 := v_0.AuxInt 9052 sym2 := v_0.Aux 9053 ptr := v_0.Args[0] 9054 idx := v_0.Args[1] 9055 mem := v.Args[1] 9056 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 9057 break 9058 } 9059 v.reset(OpAMD64MOVSDloadidx8) 9060 v.AuxInt = off1 + off2 9061 v.Aux = mergeSym(sym1, sym2) 9062 v.AddArg(ptr) 9063 v.AddArg(idx) 9064 v.AddArg(mem) 9065 return true 9066 } 9067 // match: (MOVSDload [off] {sym} (ADDQ ptr idx) mem) 9068 // cond: ptr.Op != OpSB 9069 // result: (MOVSDloadidx1 [off] {sym} ptr idx mem) 9070 for { 9071 off := v.AuxInt 9072 sym := v.Aux 9073 v_0 := v.Args[0] 9074 if v_0.Op != OpAMD64ADDQ { 9075 break 9076 } 9077 ptr := v_0.Args[0] 9078 idx := v_0.Args[1] 9079 mem := v.Args[1] 9080 if !(ptr.Op != OpSB) { 9081 break 9082 } 9083 v.reset(OpAMD64MOVSDloadidx1) 9084 v.AuxInt = off 9085 v.Aux = sym 9086 v.AddArg(ptr) 9087 v.AddArg(idx) 9088 v.AddArg(mem) 9089 return true 9090 } 9091 return false 9092 } 9093 func rewriteValueAMD64_OpAMD64MOVSDloadidx1(v *Value) bool { 9094 // match: (MOVSDloadidx1 [c] {sym} ptr (SHLQconst [3] idx) mem) 9095 // cond: 9096 // result: (MOVSDloadidx8 [c] {sym} ptr idx mem) 9097 for { 9098 c := v.AuxInt 9099 sym := v.Aux 9100 ptr := v.Args[0] 9101 v_1 := v.Args[1] 9102 if v_1.Op != OpAMD64SHLQconst { 9103 break 9104 } 9105 if v_1.AuxInt != 3 { 9106 break 9107 } 9108 idx := v_1.Args[0] 9109 mem := v.Args[2] 9110 v.reset(OpAMD64MOVSDloadidx8) 9111 v.AuxInt = c 9112 v.Aux = sym 9113 v.AddArg(ptr) 9114 v.AddArg(idx) 9115 v.AddArg(mem) 9116 return true 9117 } 9118 // match: (MOVSDloadidx1 [c] {sym} (ADDQconst [d] ptr) idx mem) 9119 // cond: 9120 // result: (MOVSDloadidx1 [c+d] {sym} ptr idx mem) 9121 for { 9122 c := v.AuxInt 9123 sym := v.Aux 9124 v_0 := v.Args[0] 9125 if v_0.Op != OpAMD64ADDQconst { 9126 break 9127 } 9128 d := v_0.AuxInt 9129 ptr := v_0.Args[0] 9130 idx := v.Args[1] 9131 mem := v.Args[2] 9132 v.reset(OpAMD64MOVSDloadidx1) 9133 v.AuxInt = c + d 9134 v.Aux = sym 9135 v.AddArg(ptr) 9136 v.AddArg(idx) 9137 v.AddArg(mem) 9138 return true 9139 } 9140 // match: (MOVSDloadidx1 [c] {sym} ptr (ADDQconst [d] idx) mem) 9141 // cond: 9142 // result: (MOVSDloadidx1 [c+d] {sym} ptr idx mem) 9143 for { 9144 c := v.AuxInt 9145 sym := v.Aux 9146 ptr := v.Args[0] 9147 v_1 := v.Args[1] 9148 if v_1.Op != OpAMD64ADDQconst { 9149 break 9150 } 9151 d := v_1.AuxInt 9152 idx := v_1.Args[0] 9153 mem := v.Args[2] 9154 v.reset(OpAMD64MOVSDloadidx1) 9155 v.AuxInt = c + d 9156 v.Aux = sym 9157 v.AddArg(ptr) 9158 v.AddArg(idx) 9159 v.AddArg(mem) 9160 return true 9161 } 9162 return false 9163 } 9164 func rewriteValueAMD64_OpAMD64MOVSDloadidx8(v *Value) bool { 9165 // match: (MOVSDloadidx8 [c] {sym} (ADDQconst [d] ptr) idx mem) 9166 // cond: 9167 // result: (MOVSDloadidx8 [c+d] {sym} ptr idx mem) 9168 for { 9169 c := v.AuxInt 9170 sym := v.Aux 9171 v_0 := v.Args[0] 9172 if v_0.Op != OpAMD64ADDQconst { 9173 break 9174 } 9175 d := v_0.AuxInt 9176 ptr := v_0.Args[0] 9177 idx := v.Args[1] 9178 mem := v.Args[2] 9179 v.reset(OpAMD64MOVSDloadidx8) 9180 v.AuxInt = c + d 9181 v.Aux = sym 9182 v.AddArg(ptr) 9183 v.AddArg(idx) 9184 v.AddArg(mem) 9185 return true 9186 } 9187 // match: (MOVSDloadidx8 [c] {sym} ptr (ADDQconst [d] idx) mem) 9188 // cond: 9189 // result: (MOVSDloadidx8 [c+8*d] {sym} ptr idx mem) 9190 for { 9191 c := v.AuxInt 9192 sym := v.Aux 9193 ptr := v.Args[0] 9194 v_1 := v.Args[1] 9195 if v_1.Op != OpAMD64ADDQconst { 9196 break 9197 } 9198 d := v_1.AuxInt 9199 idx := v_1.Args[0] 9200 mem := v.Args[2] 9201 v.reset(OpAMD64MOVSDloadidx8) 9202 v.AuxInt = c + 8*d 9203 v.Aux = sym 9204 v.AddArg(ptr) 9205 v.AddArg(idx) 9206 v.AddArg(mem) 9207 return true 9208 } 9209 return false 9210 } 9211 func rewriteValueAMD64_OpAMD64MOVSDstore(v *Value) bool { 9212 // match: (MOVSDstore [off1] {sym} (ADDQconst [off2] ptr) val mem) 9213 // cond: is32Bit(off1+off2) 9214 // result: (MOVSDstore [off1+off2] {sym} ptr val mem) 9215 for { 9216 off1 := v.AuxInt 9217 sym := v.Aux 9218 v_0 := v.Args[0] 9219 if v_0.Op != OpAMD64ADDQconst { 9220 break 9221 } 9222 off2 := v_0.AuxInt 9223 ptr := v_0.Args[0] 9224 val := v.Args[1] 9225 mem := v.Args[2] 9226 if !(is32Bit(off1 + off2)) { 9227 break 9228 } 9229 v.reset(OpAMD64MOVSDstore) 9230 v.AuxInt = off1 + off2 9231 v.Aux = sym 9232 v.AddArg(ptr) 9233 v.AddArg(val) 9234 v.AddArg(mem) 9235 return true 9236 } 9237 // match: (MOVSDstore [off1] {sym1} (LEAQ [off2] {sym2} base) val mem) 9238 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 9239 // result: (MOVSDstore [off1+off2] {mergeSym(sym1,sym2)} base val mem) 9240 for { 9241 off1 := v.AuxInt 9242 sym1 := v.Aux 9243 v_0 := v.Args[0] 9244 if v_0.Op != OpAMD64LEAQ { 9245 break 9246 } 9247 off2 := v_0.AuxInt 9248 sym2 := v_0.Aux 9249 base := v_0.Args[0] 9250 val := v.Args[1] 9251 mem := v.Args[2] 9252 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 9253 break 9254 } 9255 v.reset(OpAMD64MOVSDstore) 9256 v.AuxInt = off1 + off2 9257 v.Aux = mergeSym(sym1, sym2) 9258 v.AddArg(base) 9259 v.AddArg(val) 9260 v.AddArg(mem) 9261 return true 9262 } 9263 // match: (MOVSDstore [off1] {sym1} (LEAQ1 [off2] {sym2} ptr idx) val mem) 9264 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 9265 // result: (MOVSDstoreidx1 [off1+off2] {mergeSym(sym1,sym2)} ptr idx val mem) 9266 for { 9267 off1 := v.AuxInt 9268 sym1 := v.Aux 9269 v_0 := v.Args[0] 9270 if v_0.Op != OpAMD64LEAQ1 { 9271 break 9272 } 9273 off2 := v_0.AuxInt 9274 sym2 := v_0.Aux 9275 ptr := v_0.Args[0] 9276 idx := v_0.Args[1] 9277 val := v.Args[1] 9278 mem := v.Args[2] 9279 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 9280 break 9281 } 9282 v.reset(OpAMD64MOVSDstoreidx1) 9283 v.AuxInt = off1 + off2 9284 v.Aux = mergeSym(sym1, sym2) 9285 v.AddArg(ptr) 9286 v.AddArg(idx) 9287 v.AddArg(val) 9288 v.AddArg(mem) 9289 return true 9290 } 9291 // match: (MOVSDstore [off1] {sym1} (LEAQ8 [off2] {sym2} ptr idx) val mem) 9292 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 9293 // result: (MOVSDstoreidx8 [off1+off2] {mergeSym(sym1,sym2)} ptr idx val mem) 9294 for { 9295 off1 := v.AuxInt 9296 sym1 := v.Aux 9297 v_0 := v.Args[0] 9298 if v_0.Op != OpAMD64LEAQ8 { 9299 break 9300 } 9301 off2 := v_0.AuxInt 9302 sym2 := v_0.Aux 9303 ptr := v_0.Args[0] 9304 idx := v_0.Args[1] 9305 val := v.Args[1] 9306 mem := v.Args[2] 9307 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 9308 break 9309 } 9310 v.reset(OpAMD64MOVSDstoreidx8) 9311 v.AuxInt = off1 + off2 9312 v.Aux = mergeSym(sym1, sym2) 9313 v.AddArg(ptr) 9314 v.AddArg(idx) 9315 v.AddArg(val) 9316 v.AddArg(mem) 9317 return true 9318 } 9319 // match: (MOVSDstore [off] {sym} (ADDQ ptr idx) val mem) 9320 // cond: ptr.Op != OpSB 9321 // result: (MOVSDstoreidx1 [off] {sym} ptr idx val mem) 9322 for { 9323 off := v.AuxInt 9324 sym := v.Aux 9325 v_0 := v.Args[0] 9326 if v_0.Op != OpAMD64ADDQ { 9327 break 9328 } 9329 ptr := v_0.Args[0] 9330 idx := v_0.Args[1] 9331 val := v.Args[1] 9332 mem := v.Args[2] 9333 if !(ptr.Op != OpSB) { 9334 break 9335 } 9336 v.reset(OpAMD64MOVSDstoreidx1) 9337 v.AuxInt = off 9338 v.Aux = sym 9339 v.AddArg(ptr) 9340 v.AddArg(idx) 9341 v.AddArg(val) 9342 v.AddArg(mem) 9343 return true 9344 } 9345 return false 9346 } 9347 func rewriteValueAMD64_OpAMD64MOVSDstoreidx1(v *Value) bool { 9348 // match: (MOVSDstoreidx1 [c] {sym} ptr (SHLQconst [3] idx) val mem) 9349 // cond: 9350 // result: (MOVSDstoreidx8 [c] {sym} ptr idx val mem) 9351 for { 9352 c := v.AuxInt 9353 sym := v.Aux 9354 ptr := v.Args[0] 9355 v_1 := v.Args[1] 9356 if v_1.Op != OpAMD64SHLQconst { 9357 break 9358 } 9359 if v_1.AuxInt != 3 { 9360 break 9361 } 9362 idx := v_1.Args[0] 9363 val := v.Args[2] 9364 mem := v.Args[3] 9365 v.reset(OpAMD64MOVSDstoreidx8) 9366 v.AuxInt = c 9367 v.Aux = sym 9368 v.AddArg(ptr) 9369 v.AddArg(idx) 9370 v.AddArg(val) 9371 v.AddArg(mem) 9372 return true 9373 } 9374 // match: (MOVSDstoreidx1 [c] {sym} (ADDQconst [d] ptr) idx val mem) 9375 // cond: 9376 // result: (MOVSDstoreidx1 [c+d] {sym} ptr idx val mem) 9377 for { 9378 c := v.AuxInt 9379 sym := v.Aux 9380 v_0 := v.Args[0] 9381 if v_0.Op != OpAMD64ADDQconst { 9382 break 9383 } 9384 d := v_0.AuxInt 9385 ptr := v_0.Args[0] 9386 idx := v.Args[1] 9387 val := v.Args[2] 9388 mem := v.Args[3] 9389 v.reset(OpAMD64MOVSDstoreidx1) 9390 v.AuxInt = c + d 9391 v.Aux = sym 9392 v.AddArg(ptr) 9393 v.AddArg(idx) 9394 v.AddArg(val) 9395 v.AddArg(mem) 9396 return true 9397 } 9398 // match: (MOVSDstoreidx1 [c] {sym} ptr (ADDQconst [d] idx) val mem) 9399 // cond: 9400 // result: (MOVSDstoreidx1 [c+d] {sym} ptr idx val mem) 9401 for { 9402 c := v.AuxInt 9403 sym := v.Aux 9404 ptr := v.Args[0] 9405 v_1 := v.Args[1] 9406 if v_1.Op != OpAMD64ADDQconst { 9407 break 9408 } 9409 d := v_1.AuxInt 9410 idx := v_1.Args[0] 9411 val := v.Args[2] 9412 mem := v.Args[3] 9413 v.reset(OpAMD64MOVSDstoreidx1) 9414 v.AuxInt = c + d 9415 v.Aux = sym 9416 v.AddArg(ptr) 9417 v.AddArg(idx) 9418 v.AddArg(val) 9419 v.AddArg(mem) 9420 return true 9421 } 9422 return false 9423 } 9424 func rewriteValueAMD64_OpAMD64MOVSDstoreidx8(v *Value) bool { 9425 // match: (MOVSDstoreidx8 [c] {sym} (ADDQconst [d] ptr) idx val mem) 9426 // cond: 9427 // result: (MOVSDstoreidx8 [c+d] {sym} ptr idx val mem) 9428 for { 9429 c := v.AuxInt 9430 sym := v.Aux 9431 v_0 := v.Args[0] 9432 if v_0.Op != OpAMD64ADDQconst { 9433 break 9434 } 9435 d := v_0.AuxInt 9436 ptr := v_0.Args[0] 9437 idx := v.Args[1] 9438 val := v.Args[2] 9439 mem := v.Args[3] 9440 v.reset(OpAMD64MOVSDstoreidx8) 9441 v.AuxInt = c + d 9442 v.Aux = sym 9443 v.AddArg(ptr) 9444 v.AddArg(idx) 9445 v.AddArg(val) 9446 v.AddArg(mem) 9447 return true 9448 } 9449 // match: (MOVSDstoreidx8 [c] {sym} ptr (ADDQconst [d] idx) val mem) 9450 // cond: 9451 // result: (MOVSDstoreidx8 [c+8*d] {sym} ptr idx val mem) 9452 for { 9453 c := v.AuxInt 9454 sym := v.Aux 9455 ptr := v.Args[0] 9456 v_1 := v.Args[1] 9457 if v_1.Op != OpAMD64ADDQconst { 9458 break 9459 } 9460 d := v_1.AuxInt 9461 idx := v_1.Args[0] 9462 val := v.Args[2] 9463 mem := v.Args[3] 9464 v.reset(OpAMD64MOVSDstoreidx8) 9465 v.AuxInt = c + 8*d 9466 v.Aux = sym 9467 v.AddArg(ptr) 9468 v.AddArg(idx) 9469 v.AddArg(val) 9470 v.AddArg(mem) 9471 return true 9472 } 9473 return false 9474 } 9475 func rewriteValueAMD64_OpAMD64MOVSSload(v *Value) bool { 9476 // match: (MOVSSload [off1] {sym} (ADDQconst [off2] ptr) mem) 9477 // cond: is32Bit(off1+off2) 9478 // result: (MOVSSload [off1+off2] {sym} ptr mem) 9479 for { 9480 off1 := v.AuxInt 9481 sym := v.Aux 9482 v_0 := v.Args[0] 9483 if v_0.Op != OpAMD64ADDQconst { 9484 break 9485 } 9486 off2 := v_0.AuxInt 9487 ptr := v_0.Args[0] 9488 mem := v.Args[1] 9489 if !(is32Bit(off1 + off2)) { 9490 break 9491 } 9492 v.reset(OpAMD64MOVSSload) 9493 v.AuxInt = off1 + off2 9494 v.Aux = sym 9495 v.AddArg(ptr) 9496 v.AddArg(mem) 9497 return true 9498 } 9499 // match: (MOVSSload [off1] {sym1} (LEAQ [off2] {sym2} base) mem) 9500 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 9501 // result: (MOVSSload [off1+off2] {mergeSym(sym1,sym2)} base mem) 9502 for { 9503 off1 := v.AuxInt 9504 sym1 := v.Aux 9505 v_0 := v.Args[0] 9506 if v_0.Op != OpAMD64LEAQ { 9507 break 9508 } 9509 off2 := v_0.AuxInt 9510 sym2 := v_0.Aux 9511 base := v_0.Args[0] 9512 mem := v.Args[1] 9513 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 9514 break 9515 } 9516 v.reset(OpAMD64MOVSSload) 9517 v.AuxInt = off1 + off2 9518 v.Aux = mergeSym(sym1, sym2) 9519 v.AddArg(base) 9520 v.AddArg(mem) 9521 return true 9522 } 9523 // match: (MOVSSload [off1] {sym1} (LEAQ1 [off2] {sym2} ptr idx) mem) 9524 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 9525 // result: (MOVSSloadidx1 [off1+off2] {mergeSym(sym1,sym2)} ptr idx mem) 9526 for { 9527 off1 := v.AuxInt 9528 sym1 := v.Aux 9529 v_0 := v.Args[0] 9530 if v_0.Op != OpAMD64LEAQ1 { 9531 break 9532 } 9533 off2 := v_0.AuxInt 9534 sym2 := v_0.Aux 9535 ptr := v_0.Args[0] 9536 idx := v_0.Args[1] 9537 mem := v.Args[1] 9538 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 9539 break 9540 } 9541 v.reset(OpAMD64MOVSSloadidx1) 9542 v.AuxInt = off1 + off2 9543 v.Aux = mergeSym(sym1, sym2) 9544 v.AddArg(ptr) 9545 v.AddArg(idx) 9546 v.AddArg(mem) 9547 return true 9548 } 9549 // match: (MOVSSload [off1] {sym1} (LEAQ4 [off2] {sym2} ptr idx) mem) 9550 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 9551 // result: (MOVSSloadidx4 [off1+off2] {mergeSym(sym1,sym2)} ptr idx mem) 9552 for { 9553 off1 := v.AuxInt 9554 sym1 := v.Aux 9555 v_0 := v.Args[0] 9556 if v_0.Op != OpAMD64LEAQ4 { 9557 break 9558 } 9559 off2 := v_0.AuxInt 9560 sym2 := v_0.Aux 9561 ptr := v_0.Args[0] 9562 idx := v_0.Args[1] 9563 mem := v.Args[1] 9564 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 9565 break 9566 } 9567 v.reset(OpAMD64MOVSSloadidx4) 9568 v.AuxInt = off1 + off2 9569 v.Aux = mergeSym(sym1, sym2) 9570 v.AddArg(ptr) 9571 v.AddArg(idx) 9572 v.AddArg(mem) 9573 return true 9574 } 9575 // match: (MOVSSload [off] {sym} (ADDQ ptr idx) mem) 9576 // cond: ptr.Op != OpSB 9577 // result: (MOVSSloadidx1 [off] {sym} ptr idx mem) 9578 for { 9579 off := v.AuxInt 9580 sym := v.Aux 9581 v_0 := v.Args[0] 9582 if v_0.Op != OpAMD64ADDQ { 9583 break 9584 } 9585 ptr := v_0.Args[0] 9586 idx := v_0.Args[1] 9587 mem := v.Args[1] 9588 if !(ptr.Op != OpSB) { 9589 break 9590 } 9591 v.reset(OpAMD64MOVSSloadidx1) 9592 v.AuxInt = off 9593 v.Aux = sym 9594 v.AddArg(ptr) 9595 v.AddArg(idx) 9596 v.AddArg(mem) 9597 return true 9598 } 9599 return false 9600 } 9601 func rewriteValueAMD64_OpAMD64MOVSSloadidx1(v *Value) bool { 9602 // match: (MOVSSloadidx1 [c] {sym} ptr (SHLQconst [2] idx) mem) 9603 // cond: 9604 // result: (MOVSSloadidx4 [c] {sym} ptr idx mem) 9605 for { 9606 c := v.AuxInt 9607 sym := v.Aux 9608 ptr := v.Args[0] 9609 v_1 := v.Args[1] 9610 if v_1.Op != OpAMD64SHLQconst { 9611 break 9612 } 9613 if v_1.AuxInt != 2 { 9614 break 9615 } 9616 idx := v_1.Args[0] 9617 mem := v.Args[2] 9618 v.reset(OpAMD64MOVSSloadidx4) 9619 v.AuxInt = c 9620 v.Aux = sym 9621 v.AddArg(ptr) 9622 v.AddArg(idx) 9623 v.AddArg(mem) 9624 return true 9625 } 9626 // match: (MOVSSloadidx1 [c] {sym} (ADDQconst [d] ptr) idx mem) 9627 // cond: 9628 // result: (MOVSSloadidx1 [c+d] {sym} ptr idx mem) 9629 for { 9630 c := v.AuxInt 9631 sym := v.Aux 9632 v_0 := v.Args[0] 9633 if v_0.Op != OpAMD64ADDQconst { 9634 break 9635 } 9636 d := v_0.AuxInt 9637 ptr := v_0.Args[0] 9638 idx := v.Args[1] 9639 mem := v.Args[2] 9640 v.reset(OpAMD64MOVSSloadidx1) 9641 v.AuxInt = c + d 9642 v.Aux = sym 9643 v.AddArg(ptr) 9644 v.AddArg(idx) 9645 v.AddArg(mem) 9646 return true 9647 } 9648 // match: (MOVSSloadidx1 [c] {sym} ptr (ADDQconst [d] idx) mem) 9649 // cond: 9650 // result: (MOVSSloadidx1 [c+d] {sym} ptr idx mem) 9651 for { 9652 c := v.AuxInt 9653 sym := v.Aux 9654 ptr := v.Args[0] 9655 v_1 := v.Args[1] 9656 if v_1.Op != OpAMD64ADDQconst { 9657 break 9658 } 9659 d := v_1.AuxInt 9660 idx := v_1.Args[0] 9661 mem := v.Args[2] 9662 v.reset(OpAMD64MOVSSloadidx1) 9663 v.AuxInt = c + d 9664 v.Aux = sym 9665 v.AddArg(ptr) 9666 v.AddArg(idx) 9667 v.AddArg(mem) 9668 return true 9669 } 9670 return false 9671 } 9672 func rewriteValueAMD64_OpAMD64MOVSSloadidx4(v *Value) bool { 9673 // match: (MOVSSloadidx4 [c] {sym} (ADDQconst [d] ptr) idx mem) 9674 // cond: 9675 // result: (MOVSSloadidx4 [c+d] {sym} ptr idx mem) 9676 for { 9677 c := v.AuxInt 9678 sym := v.Aux 9679 v_0 := v.Args[0] 9680 if v_0.Op != OpAMD64ADDQconst { 9681 break 9682 } 9683 d := v_0.AuxInt 9684 ptr := v_0.Args[0] 9685 idx := v.Args[1] 9686 mem := v.Args[2] 9687 v.reset(OpAMD64MOVSSloadidx4) 9688 v.AuxInt = c + d 9689 v.Aux = sym 9690 v.AddArg(ptr) 9691 v.AddArg(idx) 9692 v.AddArg(mem) 9693 return true 9694 } 9695 // match: (MOVSSloadidx4 [c] {sym} ptr (ADDQconst [d] idx) mem) 9696 // cond: 9697 // result: (MOVSSloadidx4 [c+4*d] {sym} ptr idx mem) 9698 for { 9699 c := v.AuxInt 9700 sym := v.Aux 9701 ptr := v.Args[0] 9702 v_1 := v.Args[1] 9703 if v_1.Op != OpAMD64ADDQconst { 9704 break 9705 } 9706 d := v_1.AuxInt 9707 idx := v_1.Args[0] 9708 mem := v.Args[2] 9709 v.reset(OpAMD64MOVSSloadidx4) 9710 v.AuxInt = c + 4*d 9711 v.Aux = sym 9712 v.AddArg(ptr) 9713 v.AddArg(idx) 9714 v.AddArg(mem) 9715 return true 9716 } 9717 return false 9718 } 9719 func rewriteValueAMD64_OpAMD64MOVSSstore(v *Value) bool { 9720 // match: (MOVSSstore [off1] {sym} (ADDQconst [off2] ptr) val mem) 9721 // cond: is32Bit(off1+off2) 9722 // result: (MOVSSstore [off1+off2] {sym} ptr val mem) 9723 for { 9724 off1 := v.AuxInt 9725 sym := v.Aux 9726 v_0 := v.Args[0] 9727 if v_0.Op != OpAMD64ADDQconst { 9728 break 9729 } 9730 off2 := v_0.AuxInt 9731 ptr := v_0.Args[0] 9732 val := v.Args[1] 9733 mem := v.Args[2] 9734 if !(is32Bit(off1 + off2)) { 9735 break 9736 } 9737 v.reset(OpAMD64MOVSSstore) 9738 v.AuxInt = off1 + off2 9739 v.Aux = sym 9740 v.AddArg(ptr) 9741 v.AddArg(val) 9742 v.AddArg(mem) 9743 return true 9744 } 9745 // match: (MOVSSstore [off1] {sym1} (LEAQ [off2] {sym2} base) val mem) 9746 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 9747 // result: (MOVSSstore [off1+off2] {mergeSym(sym1,sym2)} base val mem) 9748 for { 9749 off1 := v.AuxInt 9750 sym1 := v.Aux 9751 v_0 := v.Args[0] 9752 if v_0.Op != OpAMD64LEAQ { 9753 break 9754 } 9755 off2 := v_0.AuxInt 9756 sym2 := v_0.Aux 9757 base := v_0.Args[0] 9758 val := v.Args[1] 9759 mem := v.Args[2] 9760 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 9761 break 9762 } 9763 v.reset(OpAMD64MOVSSstore) 9764 v.AuxInt = off1 + off2 9765 v.Aux = mergeSym(sym1, sym2) 9766 v.AddArg(base) 9767 v.AddArg(val) 9768 v.AddArg(mem) 9769 return true 9770 } 9771 // match: (MOVSSstore [off1] {sym1} (LEAQ1 [off2] {sym2} ptr idx) val mem) 9772 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 9773 // result: (MOVSSstoreidx1 [off1+off2] {mergeSym(sym1,sym2)} ptr idx val mem) 9774 for { 9775 off1 := v.AuxInt 9776 sym1 := v.Aux 9777 v_0 := v.Args[0] 9778 if v_0.Op != OpAMD64LEAQ1 { 9779 break 9780 } 9781 off2 := v_0.AuxInt 9782 sym2 := v_0.Aux 9783 ptr := v_0.Args[0] 9784 idx := v_0.Args[1] 9785 val := v.Args[1] 9786 mem := v.Args[2] 9787 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 9788 break 9789 } 9790 v.reset(OpAMD64MOVSSstoreidx1) 9791 v.AuxInt = off1 + off2 9792 v.Aux = mergeSym(sym1, sym2) 9793 v.AddArg(ptr) 9794 v.AddArg(idx) 9795 v.AddArg(val) 9796 v.AddArg(mem) 9797 return true 9798 } 9799 // match: (MOVSSstore [off1] {sym1} (LEAQ4 [off2] {sym2} ptr idx) val mem) 9800 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 9801 // result: (MOVSSstoreidx4 [off1+off2] {mergeSym(sym1,sym2)} ptr idx val mem) 9802 for { 9803 off1 := v.AuxInt 9804 sym1 := v.Aux 9805 v_0 := v.Args[0] 9806 if v_0.Op != OpAMD64LEAQ4 { 9807 break 9808 } 9809 off2 := v_0.AuxInt 9810 sym2 := v_0.Aux 9811 ptr := v_0.Args[0] 9812 idx := v_0.Args[1] 9813 val := v.Args[1] 9814 mem := v.Args[2] 9815 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 9816 break 9817 } 9818 v.reset(OpAMD64MOVSSstoreidx4) 9819 v.AuxInt = off1 + off2 9820 v.Aux = mergeSym(sym1, sym2) 9821 v.AddArg(ptr) 9822 v.AddArg(idx) 9823 v.AddArg(val) 9824 v.AddArg(mem) 9825 return true 9826 } 9827 // match: (MOVSSstore [off] {sym} (ADDQ ptr idx) val mem) 9828 // cond: ptr.Op != OpSB 9829 // result: (MOVSSstoreidx1 [off] {sym} ptr idx val mem) 9830 for { 9831 off := v.AuxInt 9832 sym := v.Aux 9833 v_0 := v.Args[0] 9834 if v_0.Op != OpAMD64ADDQ { 9835 break 9836 } 9837 ptr := v_0.Args[0] 9838 idx := v_0.Args[1] 9839 val := v.Args[1] 9840 mem := v.Args[2] 9841 if !(ptr.Op != OpSB) { 9842 break 9843 } 9844 v.reset(OpAMD64MOVSSstoreidx1) 9845 v.AuxInt = off 9846 v.Aux = sym 9847 v.AddArg(ptr) 9848 v.AddArg(idx) 9849 v.AddArg(val) 9850 v.AddArg(mem) 9851 return true 9852 } 9853 return false 9854 } 9855 func rewriteValueAMD64_OpAMD64MOVSSstoreidx1(v *Value) bool { 9856 // match: (MOVSSstoreidx1 [c] {sym} ptr (SHLQconst [2] idx) val mem) 9857 // cond: 9858 // result: (MOVSSstoreidx4 [c] {sym} ptr idx val mem) 9859 for { 9860 c := v.AuxInt 9861 sym := v.Aux 9862 ptr := v.Args[0] 9863 v_1 := v.Args[1] 9864 if v_1.Op != OpAMD64SHLQconst { 9865 break 9866 } 9867 if v_1.AuxInt != 2 { 9868 break 9869 } 9870 idx := v_1.Args[0] 9871 val := v.Args[2] 9872 mem := v.Args[3] 9873 v.reset(OpAMD64MOVSSstoreidx4) 9874 v.AuxInt = c 9875 v.Aux = sym 9876 v.AddArg(ptr) 9877 v.AddArg(idx) 9878 v.AddArg(val) 9879 v.AddArg(mem) 9880 return true 9881 } 9882 // match: (MOVSSstoreidx1 [c] {sym} (ADDQconst [d] ptr) idx val mem) 9883 // cond: 9884 // result: (MOVSSstoreidx1 [c+d] {sym} ptr idx val mem) 9885 for { 9886 c := v.AuxInt 9887 sym := v.Aux 9888 v_0 := v.Args[0] 9889 if v_0.Op != OpAMD64ADDQconst { 9890 break 9891 } 9892 d := v_0.AuxInt 9893 ptr := v_0.Args[0] 9894 idx := v.Args[1] 9895 val := v.Args[2] 9896 mem := v.Args[3] 9897 v.reset(OpAMD64MOVSSstoreidx1) 9898 v.AuxInt = c + d 9899 v.Aux = sym 9900 v.AddArg(ptr) 9901 v.AddArg(idx) 9902 v.AddArg(val) 9903 v.AddArg(mem) 9904 return true 9905 } 9906 // match: (MOVSSstoreidx1 [c] {sym} ptr (ADDQconst [d] idx) val mem) 9907 // cond: 9908 // result: (MOVSSstoreidx1 [c+d] {sym} ptr idx val mem) 9909 for { 9910 c := v.AuxInt 9911 sym := v.Aux 9912 ptr := v.Args[0] 9913 v_1 := v.Args[1] 9914 if v_1.Op != OpAMD64ADDQconst { 9915 break 9916 } 9917 d := v_1.AuxInt 9918 idx := v_1.Args[0] 9919 val := v.Args[2] 9920 mem := v.Args[3] 9921 v.reset(OpAMD64MOVSSstoreidx1) 9922 v.AuxInt = c + d 9923 v.Aux = sym 9924 v.AddArg(ptr) 9925 v.AddArg(idx) 9926 v.AddArg(val) 9927 v.AddArg(mem) 9928 return true 9929 } 9930 return false 9931 } 9932 func rewriteValueAMD64_OpAMD64MOVSSstoreidx4(v *Value) bool { 9933 // match: (MOVSSstoreidx4 [c] {sym} (ADDQconst [d] ptr) idx val mem) 9934 // cond: 9935 // result: (MOVSSstoreidx4 [c+d] {sym} ptr idx val mem) 9936 for { 9937 c := v.AuxInt 9938 sym := v.Aux 9939 v_0 := v.Args[0] 9940 if v_0.Op != OpAMD64ADDQconst { 9941 break 9942 } 9943 d := v_0.AuxInt 9944 ptr := v_0.Args[0] 9945 idx := v.Args[1] 9946 val := v.Args[2] 9947 mem := v.Args[3] 9948 v.reset(OpAMD64MOVSSstoreidx4) 9949 v.AuxInt = c + d 9950 v.Aux = sym 9951 v.AddArg(ptr) 9952 v.AddArg(idx) 9953 v.AddArg(val) 9954 v.AddArg(mem) 9955 return true 9956 } 9957 // match: (MOVSSstoreidx4 [c] {sym} ptr (ADDQconst [d] idx) val mem) 9958 // cond: 9959 // result: (MOVSSstoreidx4 [c+4*d] {sym} ptr idx val mem) 9960 for { 9961 c := v.AuxInt 9962 sym := v.Aux 9963 ptr := v.Args[0] 9964 v_1 := v.Args[1] 9965 if v_1.Op != OpAMD64ADDQconst { 9966 break 9967 } 9968 d := v_1.AuxInt 9969 idx := v_1.Args[0] 9970 val := v.Args[2] 9971 mem := v.Args[3] 9972 v.reset(OpAMD64MOVSSstoreidx4) 9973 v.AuxInt = c + 4*d 9974 v.Aux = sym 9975 v.AddArg(ptr) 9976 v.AddArg(idx) 9977 v.AddArg(val) 9978 v.AddArg(mem) 9979 return true 9980 } 9981 return false 9982 } 9983 func rewriteValueAMD64_OpAMD64MOVWQSX(v *Value) bool { 9984 b := v.Block 9985 _ = b 9986 // match: (MOVWQSX x:(MOVWload [off] {sym} ptr mem)) 9987 // cond: x.Uses == 1 && clobber(x) 9988 // result: @x.Block (MOVWQSXload <v.Type> [off] {sym} ptr mem) 9989 for { 9990 x := v.Args[0] 9991 if x.Op != OpAMD64MOVWload { 9992 break 9993 } 9994 off := x.AuxInt 9995 sym := x.Aux 9996 ptr := x.Args[0] 9997 mem := x.Args[1] 9998 if !(x.Uses == 1 && clobber(x)) { 9999 break 10000 } 10001 b = x.Block 10002 v0 := b.NewValue0(v.Pos, OpAMD64MOVWQSXload, v.Type) 10003 v.reset(OpCopy) 10004 v.AddArg(v0) 10005 v0.AuxInt = off 10006 v0.Aux = sym 10007 v0.AddArg(ptr) 10008 v0.AddArg(mem) 10009 return true 10010 } 10011 // match: (MOVWQSX x:(MOVLload [off] {sym} ptr mem)) 10012 // cond: x.Uses == 1 && clobber(x) 10013 // result: @x.Block (MOVWQSXload <v.Type> [off] {sym} ptr mem) 10014 for { 10015 x := v.Args[0] 10016 if x.Op != OpAMD64MOVLload { 10017 break 10018 } 10019 off := x.AuxInt 10020 sym := x.Aux 10021 ptr := x.Args[0] 10022 mem := x.Args[1] 10023 if !(x.Uses == 1 && clobber(x)) { 10024 break 10025 } 10026 b = x.Block 10027 v0 := b.NewValue0(v.Pos, OpAMD64MOVWQSXload, v.Type) 10028 v.reset(OpCopy) 10029 v.AddArg(v0) 10030 v0.AuxInt = off 10031 v0.Aux = sym 10032 v0.AddArg(ptr) 10033 v0.AddArg(mem) 10034 return true 10035 } 10036 // match: (MOVWQSX x:(MOVQload [off] {sym} ptr mem)) 10037 // cond: x.Uses == 1 && clobber(x) 10038 // result: @x.Block (MOVWQSXload <v.Type> [off] {sym} ptr mem) 10039 for { 10040 x := v.Args[0] 10041 if x.Op != OpAMD64MOVQload { 10042 break 10043 } 10044 off := x.AuxInt 10045 sym := x.Aux 10046 ptr := x.Args[0] 10047 mem := x.Args[1] 10048 if !(x.Uses == 1 && clobber(x)) { 10049 break 10050 } 10051 b = x.Block 10052 v0 := b.NewValue0(v.Pos, OpAMD64MOVWQSXload, v.Type) 10053 v.reset(OpCopy) 10054 v.AddArg(v0) 10055 v0.AuxInt = off 10056 v0.Aux = sym 10057 v0.AddArg(ptr) 10058 v0.AddArg(mem) 10059 return true 10060 } 10061 // match: (MOVWQSX (ANDLconst [c] x)) 10062 // cond: c & 0x8000 == 0 10063 // result: (ANDLconst [c & 0x7fff] x) 10064 for { 10065 v_0 := v.Args[0] 10066 if v_0.Op != OpAMD64ANDLconst { 10067 break 10068 } 10069 c := v_0.AuxInt 10070 x := v_0.Args[0] 10071 if !(c&0x8000 == 0) { 10072 break 10073 } 10074 v.reset(OpAMD64ANDLconst) 10075 v.AuxInt = c & 0x7fff 10076 v.AddArg(x) 10077 return true 10078 } 10079 // match: (MOVWQSX x:(MOVWQSX _)) 10080 // cond: 10081 // result: x 10082 for { 10083 x := v.Args[0] 10084 if x.Op != OpAMD64MOVWQSX { 10085 break 10086 } 10087 v.reset(OpCopy) 10088 v.Type = x.Type 10089 v.AddArg(x) 10090 return true 10091 } 10092 // match: (MOVWQSX x:(MOVBQSX _)) 10093 // cond: 10094 // result: x 10095 for { 10096 x := v.Args[0] 10097 if x.Op != OpAMD64MOVBQSX { 10098 break 10099 } 10100 v.reset(OpCopy) 10101 v.Type = x.Type 10102 v.AddArg(x) 10103 return true 10104 } 10105 return false 10106 } 10107 func rewriteValueAMD64_OpAMD64MOVWQSXload(v *Value) bool { 10108 // match: (MOVWQSXload [off1] {sym1} (LEAQ [off2] {sym2} base) mem) 10109 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 10110 // result: (MOVWQSXload [off1+off2] {mergeSym(sym1,sym2)} base mem) 10111 for { 10112 off1 := v.AuxInt 10113 sym1 := v.Aux 10114 v_0 := v.Args[0] 10115 if v_0.Op != OpAMD64LEAQ { 10116 break 10117 } 10118 off2 := v_0.AuxInt 10119 sym2 := v_0.Aux 10120 base := v_0.Args[0] 10121 mem := v.Args[1] 10122 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 10123 break 10124 } 10125 v.reset(OpAMD64MOVWQSXload) 10126 v.AuxInt = off1 + off2 10127 v.Aux = mergeSym(sym1, sym2) 10128 v.AddArg(base) 10129 v.AddArg(mem) 10130 return true 10131 } 10132 return false 10133 } 10134 func rewriteValueAMD64_OpAMD64MOVWQZX(v *Value) bool { 10135 b := v.Block 10136 _ = b 10137 // match: (MOVWQZX x:(MOVWload [off] {sym} ptr mem)) 10138 // cond: x.Uses == 1 && clobber(x) 10139 // result: @x.Block (MOVWload <v.Type> [off] {sym} ptr mem) 10140 for { 10141 x := v.Args[0] 10142 if x.Op != OpAMD64MOVWload { 10143 break 10144 } 10145 off := x.AuxInt 10146 sym := x.Aux 10147 ptr := x.Args[0] 10148 mem := x.Args[1] 10149 if !(x.Uses == 1 && clobber(x)) { 10150 break 10151 } 10152 b = x.Block 10153 v0 := b.NewValue0(v.Pos, OpAMD64MOVWload, v.Type) 10154 v.reset(OpCopy) 10155 v.AddArg(v0) 10156 v0.AuxInt = off 10157 v0.Aux = sym 10158 v0.AddArg(ptr) 10159 v0.AddArg(mem) 10160 return true 10161 } 10162 // match: (MOVWQZX x:(MOVLload [off] {sym} ptr mem)) 10163 // cond: x.Uses == 1 && clobber(x) 10164 // result: @x.Block (MOVWload <v.Type> [off] {sym} ptr mem) 10165 for { 10166 x := v.Args[0] 10167 if x.Op != OpAMD64MOVLload { 10168 break 10169 } 10170 off := x.AuxInt 10171 sym := x.Aux 10172 ptr := x.Args[0] 10173 mem := x.Args[1] 10174 if !(x.Uses == 1 && clobber(x)) { 10175 break 10176 } 10177 b = x.Block 10178 v0 := b.NewValue0(v.Pos, OpAMD64MOVWload, v.Type) 10179 v.reset(OpCopy) 10180 v.AddArg(v0) 10181 v0.AuxInt = off 10182 v0.Aux = sym 10183 v0.AddArg(ptr) 10184 v0.AddArg(mem) 10185 return true 10186 } 10187 // match: (MOVWQZX x:(MOVQload [off] {sym} ptr mem)) 10188 // cond: x.Uses == 1 && clobber(x) 10189 // result: @x.Block (MOVWload <v.Type> [off] {sym} ptr mem) 10190 for { 10191 x := v.Args[0] 10192 if x.Op != OpAMD64MOVQload { 10193 break 10194 } 10195 off := x.AuxInt 10196 sym := x.Aux 10197 ptr := x.Args[0] 10198 mem := x.Args[1] 10199 if !(x.Uses == 1 && clobber(x)) { 10200 break 10201 } 10202 b = x.Block 10203 v0 := b.NewValue0(v.Pos, OpAMD64MOVWload, v.Type) 10204 v.reset(OpCopy) 10205 v.AddArg(v0) 10206 v0.AuxInt = off 10207 v0.Aux = sym 10208 v0.AddArg(ptr) 10209 v0.AddArg(mem) 10210 return true 10211 } 10212 // match: (MOVWQZX x:(MOVWloadidx1 [off] {sym} ptr idx mem)) 10213 // cond: x.Uses == 1 && clobber(x) 10214 // result: @x.Block (MOVWloadidx1 <v.Type> [off] {sym} ptr idx mem) 10215 for { 10216 x := v.Args[0] 10217 if x.Op != OpAMD64MOVWloadidx1 { 10218 break 10219 } 10220 off := x.AuxInt 10221 sym := x.Aux 10222 ptr := x.Args[0] 10223 idx := x.Args[1] 10224 mem := x.Args[2] 10225 if !(x.Uses == 1 && clobber(x)) { 10226 break 10227 } 10228 b = x.Block 10229 v0 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, v.Type) 10230 v.reset(OpCopy) 10231 v.AddArg(v0) 10232 v0.AuxInt = off 10233 v0.Aux = sym 10234 v0.AddArg(ptr) 10235 v0.AddArg(idx) 10236 v0.AddArg(mem) 10237 return true 10238 } 10239 // match: (MOVWQZX x:(MOVWloadidx2 [off] {sym} ptr idx mem)) 10240 // cond: x.Uses == 1 && clobber(x) 10241 // result: @x.Block (MOVWloadidx2 <v.Type> [off] {sym} ptr idx mem) 10242 for { 10243 x := v.Args[0] 10244 if x.Op != OpAMD64MOVWloadidx2 { 10245 break 10246 } 10247 off := x.AuxInt 10248 sym := x.Aux 10249 ptr := x.Args[0] 10250 idx := x.Args[1] 10251 mem := x.Args[2] 10252 if !(x.Uses == 1 && clobber(x)) { 10253 break 10254 } 10255 b = x.Block 10256 v0 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx2, v.Type) 10257 v.reset(OpCopy) 10258 v.AddArg(v0) 10259 v0.AuxInt = off 10260 v0.Aux = sym 10261 v0.AddArg(ptr) 10262 v0.AddArg(idx) 10263 v0.AddArg(mem) 10264 return true 10265 } 10266 // match: (MOVWQZX (ANDLconst [c] x)) 10267 // cond: 10268 // result: (ANDLconst [c & 0xffff] x) 10269 for { 10270 v_0 := v.Args[0] 10271 if v_0.Op != OpAMD64ANDLconst { 10272 break 10273 } 10274 c := v_0.AuxInt 10275 x := v_0.Args[0] 10276 v.reset(OpAMD64ANDLconst) 10277 v.AuxInt = c & 0xffff 10278 v.AddArg(x) 10279 return true 10280 } 10281 // match: (MOVWQZX x:(MOVWQZX _)) 10282 // cond: 10283 // result: x 10284 for { 10285 x := v.Args[0] 10286 if x.Op != OpAMD64MOVWQZX { 10287 break 10288 } 10289 v.reset(OpCopy) 10290 v.Type = x.Type 10291 v.AddArg(x) 10292 return true 10293 } 10294 // match: (MOVWQZX x:(MOVBQZX _)) 10295 // cond: 10296 // result: x 10297 for { 10298 x := v.Args[0] 10299 if x.Op != OpAMD64MOVBQZX { 10300 break 10301 } 10302 v.reset(OpCopy) 10303 v.Type = x.Type 10304 v.AddArg(x) 10305 return true 10306 } 10307 return false 10308 } 10309 func rewriteValueAMD64_OpAMD64MOVWload(v *Value) bool { 10310 // match: (MOVWload [off] {sym} ptr (MOVWstore [off2] {sym2} ptr2 x _)) 10311 // cond: sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) 10312 // result: x 10313 for { 10314 off := v.AuxInt 10315 sym := v.Aux 10316 ptr := v.Args[0] 10317 v_1 := v.Args[1] 10318 if v_1.Op != OpAMD64MOVWstore { 10319 break 10320 } 10321 off2 := v_1.AuxInt 10322 sym2 := v_1.Aux 10323 ptr2 := v_1.Args[0] 10324 x := v_1.Args[1] 10325 if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) { 10326 break 10327 } 10328 v.reset(OpCopy) 10329 v.Type = x.Type 10330 v.AddArg(x) 10331 return true 10332 } 10333 // match: (MOVWload [off1] {sym} (ADDQconst [off2] ptr) mem) 10334 // cond: is32Bit(off1+off2) 10335 // result: (MOVWload [off1+off2] {sym} ptr mem) 10336 for { 10337 off1 := v.AuxInt 10338 sym := v.Aux 10339 v_0 := v.Args[0] 10340 if v_0.Op != OpAMD64ADDQconst { 10341 break 10342 } 10343 off2 := v_0.AuxInt 10344 ptr := v_0.Args[0] 10345 mem := v.Args[1] 10346 if !(is32Bit(off1 + off2)) { 10347 break 10348 } 10349 v.reset(OpAMD64MOVWload) 10350 v.AuxInt = off1 + off2 10351 v.Aux = sym 10352 v.AddArg(ptr) 10353 v.AddArg(mem) 10354 return true 10355 } 10356 // match: (MOVWload [off1] {sym1} (LEAQ [off2] {sym2} base) mem) 10357 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 10358 // result: (MOVWload [off1+off2] {mergeSym(sym1,sym2)} base mem) 10359 for { 10360 off1 := v.AuxInt 10361 sym1 := v.Aux 10362 v_0 := v.Args[0] 10363 if v_0.Op != OpAMD64LEAQ { 10364 break 10365 } 10366 off2 := v_0.AuxInt 10367 sym2 := v_0.Aux 10368 base := v_0.Args[0] 10369 mem := v.Args[1] 10370 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 10371 break 10372 } 10373 v.reset(OpAMD64MOVWload) 10374 v.AuxInt = off1 + off2 10375 v.Aux = mergeSym(sym1, sym2) 10376 v.AddArg(base) 10377 v.AddArg(mem) 10378 return true 10379 } 10380 // match: (MOVWload [off1] {sym1} (LEAQ1 [off2] {sym2} ptr idx) mem) 10381 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 10382 // result: (MOVWloadidx1 [off1+off2] {mergeSym(sym1,sym2)} ptr idx mem) 10383 for { 10384 off1 := v.AuxInt 10385 sym1 := v.Aux 10386 v_0 := v.Args[0] 10387 if v_0.Op != OpAMD64LEAQ1 { 10388 break 10389 } 10390 off2 := v_0.AuxInt 10391 sym2 := v_0.Aux 10392 ptr := v_0.Args[0] 10393 idx := v_0.Args[1] 10394 mem := v.Args[1] 10395 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 10396 break 10397 } 10398 v.reset(OpAMD64MOVWloadidx1) 10399 v.AuxInt = off1 + off2 10400 v.Aux = mergeSym(sym1, sym2) 10401 v.AddArg(ptr) 10402 v.AddArg(idx) 10403 v.AddArg(mem) 10404 return true 10405 } 10406 // match: (MOVWload [off1] {sym1} (LEAQ2 [off2] {sym2} ptr idx) mem) 10407 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 10408 // result: (MOVWloadidx2 [off1+off2] {mergeSym(sym1,sym2)} ptr idx mem) 10409 for { 10410 off1 := v.AuxInt 10411 sym1 := v.Aux 10412 v_0 := v.Args[0] 10413 if v_0.Op != OpAMD64LEAQ2 { 10414 break 10415 } 10416 off2 := v_0.AuxInt 10417 sym2 := v_0.Aux 10418 ptr := v_0.Args[0] 10419 idx := v_0.Args[1] 10420 mem := v.Args[1] 10421 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 10422 break 10423 } 10424 v.reset(OpAMD64MOVWloadidx2) 10425 v.AuxInt = off1 + off2 10426 v.Aux = mergeSym(sym1, sym2) 10427 v.AddArg(ptr) 10428 v.AddArg(idx) 10429 v.AddArg(mem) 10430 return true 10431 } 10432 // match: (MOVWload [off] {sym} (ADDQ ptr idx) mem) 10433 // cond: ptr.Op != OpSB 10434 // result: (MOVWloadidx1 [off] {sym} ptr idx mem) 10435 for { 10436 off := v.AuxInt 10437 sym := v.Aux 10438 v_0 := v.Args[0] 10439 if v_0.Op != OpAMD64ADDQ { 10440 break 10441 } 10442 ptr := v_0.Args[0] 10443 idx := v_0.Args[1] 10444 mem := v.Args[1] 10445 if !(ptr.Op != OpSB) { 10446 break 10447 } 10448 v.reset(OpAMD64MOVWloadidx1) 10449 v.AuxInt = off 10450 v.Aux = sym 10451 v.AddArg(ptr) 10452 v.AddArg(idx) 10453 v.AddArg(mem) 10454 return true 10455 } 10456 // match: (MOVWload [off1] {sym1} (LEAL [off2] {sym2} base) mem) 10457 // cond: canMergeSym(sym1, sym2) 10458 // result: (MOVWload [off1+off2] {mergeSym(sym1,sym2)} base mem) 10459 for { 10460 off1 := v.AuxInt 10461 sym1 := v.Aux 10462 v_0 := v.Args[0] 10463 if v_0.Op != OpAMD64LEAL { 10464 break 10465 } 10466 off2 := v_0.AuxInt 10467 sym2 := v_0.Aux 10468 base := v_0.Args[0] 10469 mem := v.Args[1] 10470 if !(canMergeSym(sym1, sym2)) { 10471 break 10472 } 10473 v.reset(OpAMD64MOVWload) 10474 v.AuxInt = off1 + off2 10475 v.Aux = mergeSym(sym1, sym2) 10476 v.AddArg(base) 10477 v.AddArg(mem) 10478 return true 10479 } 10480 // match: (MOVWload [off1] {sym} (ADDLconst [off2] ptr) mem) 10481 // cond: is32Bit(off1+off2) 10482 // result: (MOVWload [off1+off2] {sym} ptr mem) 10483 for { 10484 off1 := v.AuxInt 10485 sym := v.Aux 10486 v_0 := v.Args[0] 10487 if v_0.Op != OpAMD64ADDLconst { 10488 break 10489 } 10490 off2 := v_0.AuxInt 10491 ptr := v_0.Args[0] 10492 mem := v.Args[1] 10493 if !(is32Bit(off1 + off2)) { 10494 break 10495 } 10496 v.reset(OpAMD64MOVWload) 10497 v.AuxInt = off1 + off2 10498 v.Aux = sym 10499 v.AddArg(ptr) 10500 v.AddArg(mem) 10501 return true 10502 } 10503 return false 10504 } 10505 func rewriteValueAMD64_OpAMD64MOVWloadidx1(v *Value) bool { 10506 // match: (MOVWloadidx1 [c] {sym} ptr (SHLQconst [1] idx) mem) 10507 // cond: 10508 // result: (MOVWloadidx2 [c] {sym} ptr idx mem) 10509 for { 10510 c := v.AuxInt 10511 sym := v.Aux 10512 ptr := v.Args[0] 10513 v_1 := v.Args[1] 10514 if v_1.Op != OpAMD64SHLQconst { 10515 break 10516 } 10517 if v_1.AuxInt != 1 { 10518 break 10519 } 10520 idx := v_1.Args[0] 10521 mem := v.Args[2] 10522 v.reset(OpAMD64MOVWloadidx2) 10523 v.AuxInt = c 10524 v.Aux = sym 10525 v.AddArg(ptr) 10526 v.AddArg(idx) 10527 v.AddArg(mem) 10528 return true 10529 } 10530 // match: (MOVWloadidx1 [c] {sym} (SHLQconst [1] idx) ptr mem) 10531 // cond: 10532 // result: (MOVWloadidx2 [c] {sym} ptr idx mem) 10533 for { 10534 c := v.AuxInt 10535 sym := v.Aux 10536 v_0 := v.Args[0] 10537 if v_0.Op != OpAMD64SHLQconst { 10538 break 10539 } 10540 if v_0.AuxInt != 1 { 10541 break 10542 } 10543 idx := v_0.Args[0] 10544 ptr := v.Args[1] 10545 mem := v.Args[2] 10546 v.reset(OpAMD64MOVWloadidx2) 10547 v.AuxInt = c 10548 v.Aux = sym 10549 v.AddArg(ptr) 10550 v.AddArg(idx) 10551 v.AddArg(mem) 10552 return true 10553 } 10554 // match: (MOVWloadidx1 [c] {sym} (ADDQconst [d] ptr) idx mem) 10555 // cond: 10556 // result: (MOVWloadidx1 [c+d] {sym} ptr idx mem) 10557 for { 10558 c := v.AuxInt 10559 sym := v.Aux 10560 v_0 := v.Args[0] 10561 if v_0.Op != OpAMD64ADDQconst { 10562 break 10563 } 10564 d := v_0.AuxInt 10565 ptr := v_0.Args[0] 10566 idx := v.Args[1] 10567 mem := v.Args[2] 10568 v.reset(OpAMD64MOVWloadidx1) 10569 v.AuxInt = c + d 10570 v.Aux = sym 10571 v.AddArg(ptr) 10572 v.AddArg(idx) 10573 v.AddArg(mem) 10574 return true 10575 } 10576 // match: (MOVWloadidx1 [c] {sym} idx (ADDQconst [d] ptr) mem) 10577 // cond: 10578 // result: (MOVWloadidx1 [c+d] {sym} ptr idx mem) 10579 for { 10580 c := v.AuxInt 10581 sym := v.Aux 10582 idx := v.Args[0] 10583 v_1 := v.Args[1] 10584 if v_1.Op != OpAMD64ADDQconst { 10585 break 10586 } 10587 d := v_1.AuxInt 10588 ptr := v_1.Args[0] 10589 mem := v.Args[2] 10590 v.reset(OpAMD64MOVWloadidx1) 10591 v.AuxInt = c + d 10592 v.Aux = sym 10593 v.AddArg(ptr) 10594 v.AddArg(idx) 10595 v.AddArg(mem) 10596 return true 10597 } 10598 // match: (MOVWloadidx1 [c] {sym} ptr (ADDQconst [d] idx) mem) 10599 // cond: 10600 // result: (MOVWloadidx1 [c+d] {sym} ptr idx mem) 10601 for { 10602 c := v.AuxInt 10603 sym := v.Aux 10604 ptr := v.Args[0] 10605 v_1 := v.Args[1] 10606 if v_1.Op != OpAMD64ADDQconst { 10607 break 10608 } 10609 d := v_1.AuxInt 10610 idx := v_1.Args[0] 10611 mem := v.Args[2] 10612 v.reset(OpAMD64MOVWloadidx1) 10613 v.AuxInt = c + d 10614 v.Aux = sym 10615 v.AddArg(ptr) 10616 v.AddArg(idx) 10617 v.AddArg(mem) 10618 return true 10619 } 10620 // match: (MOVWloadidx1 [c] {sym} (ADDQconst [d] idx) ptr mem) 10621 // cond: 10622 // result: (MOVWloadidx1 [c+d] {sym} ptr idx mem) 10623 for { 10624 c := v.AuxInt 10625 sym := v.Aux 10626 v_0 := v.Args[0] 10627 if v_0.Op != OpAMD64ADDQconst { 10628 break 10629 } 10630 d := v_0.AuxInt 10631 idx := v_0.Args[0] 10632 ptr := v.Args[1] 10633 mem := v.Args[2] 10634 v.reset(OpAMD64MOVWloadidx1) 10635 v.AuxInt = c + d 10636 v.Aux = sym 10637 v.AddArg(ptr) 10638 v.AddArg(idx) 10639 v.AddArg(mem) 10640 return true 10641 } 10642 return false 10643 } 10644 func rewriteValueAMD64_OpAMD64MOVWloadidx2(v *Value) bool { 10645 // match: (MOVWloadidx2 [c] {sym} (ADDQconst [d] ptr) idx mem) 10646 // cond: 10647 // result: (MOVWloadidx2 [c+d] {sym} ptr idx mem) 10648 for { 10649 c := v.AuxInt 10650 sym := v.Aux 10651 v_0 := v.Args[0] 10652 if v_0.Op != OpAMD64ADDQconst { 10653 break 10654 } 10655 d := v_0.AuxInt 10656 ptr := v_0.Args[0] 10657 idx := v.Args[1] 10658 mem := v.Args[2] 10659 v.reset(OpAMD64MOVWloadidx2) 10660 v.AuxInt = c + d 10661 v.Aux = sym 10662 v.AddArg(ptr) 10663 v.AddArg(idx) 10664 v.AddArg(mem) 10665 return true 10666 } 10667 // match: (MOVWloadidx2 [c] {sym} ptr (ADDQconst [d] idx) mem) 10668 // cond: 10669 // result: (MOVWloadidx2 [c+2*d] {sym} ptr idx mem) 10670 for { 10671 c := v.AuxInt 10672 sym := v.Aux 10673 ptr := v.Args[0] 10674 v_1 := v.Args[1] 10675 if v_1.Op != OpAMD64ADDQconst { 10676 break 10677 } 10678 d := v_1.AuxInt 10679 idx := v_1.Args[0] 10680 mem := v.Args[2] 10681 v.reset(OpAMD64MOVWloadidx2) 10682 v.AuxInt = c + 2*d 10683 v.Aux = sym 10684 v.AddArg(ptr) 10685 v.AddArg(idx) 10686 v.AddArg(mem) 10687 return true 10688 } 10689 return false 10690 } 10691 func rewriteValueAMD64_OpAMD64MOVWstore(v *Value) bool { 10692 // match: (MOVWstore [off] {sym} ptr (MOVWQSX x) mem) 10693 // cond: 10694 // result: (MOVWstore [off] {sym} ptr x mem) 10695 for { 10696 off := v.AuxInt 10697 sym := v.Aux 10698 ptr := v.Args[0] 10699 v_1 := v.Args[1] 10700 if v_1.Op != OpAMD64MOVWQSX { 10701 break 10702 } 10703 x := v_1.Args[0] 10704 mem := v.Args[2] 10705 v.reset(OpAMD64MOVWstore) 10706 v.AuxInt = off 10707 v.Aux = sym 10708 v.AddArg(ptr) 10709 v.AddArg(x) 10710 v.AddArg(mem) 10711 return true 10712 } 10713 // match: (MOVWstore [off] {sym} ptr (MOVWQZX x) mem) 10714 // cond: 10715 // result: (MOVWstore [off] {sym} ptr x mem) 10716 for { 10717 off := v.AuxInt 10718 sym := v.Aux 10719 ptr := v.Args[0] 10720 v_1 := v.Args[1] 10721 if v_1.Op != OpAMD64MOVWQZX { 10722 break 10723 } 10724 x := v_1.Args[0] 10725 mem := v.Args[2] 10726 v.reset(OpAMD64MOVWstore) 10727 v.AuxInt = off 10728 v.Aux = sym 10729 v.AddArg(ptr) 10730 v.AddArg(x) 10731 v.AddArg(mem) 10732 return true 10733 } 10734 // match: (MOVWstore [off1] {sym} (ADDQconst [off2] ptr) val mem) 10735 // cond: is32Bit(off1+off2) 10736 // result: (MOVWstore [off1+off2] {sym} ptr val mem) 10737 for { 10738 off1 := v.AuxInt 10739 sym := v.Aux 10740 v_0 := v.Args[0] 10741 if v_0.Op != OpAMD64ADDQconst { 10742 break 10743 } 10744 off2 := v_0.AuxInt 10745 ptr := v_0.Args[0] 10746 val := v.Args[1] 10747 mem := v.Args[2] 10748 if !(is32Bit(off1 + off2)) { 10749 break 10750 } 10751 v.reset(OpAMD64MOVWstore) 10752 v.AuxInt = off1 + off2 10753 v.Aux = sym 10754 v.AddArg(ptr) 10755 v.AddArg(val) 10756 v.AddArg(mem) 10757 return true 10758 } 10759 // match: (MOVWstore [off] {sym} ptr (MOVLconst [c]) mem) 10760 // cond: validOff(off) 10761 // result: (MOVWstoreconst [makeValAndOff(int64(int16(c)),off)] {sym} ptr mem) 10762 for { 10763 off := v.AuxInt 10764 sym := v.Aux 10765 ptr := v.Args[0] 10766 v_1 := v.Args[1] 10767 if v_1.Op != OpAMD64MOVLconst { 10768 break 10769 } 10770 c := v_1.AuxInt 10771 mem := v.Args[2] 10772 if !(validOff(off)) { 10773 break 10774 } 10775 v.reset(OpAMD64MOVWstoreconst) 10776 v.AuxInt = makeValAndOff(int64(int16(c)), off) 10777 v.Aux = sym 10778 v.AddArg(ptr) 10779 v.AddArg(mem) 10780 return true 10781 } 10782 // match: (MOVWstore [off1] {sym1} (LEAQ [off2] {sym2} base) val mem) 10783 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 10784 // result: (MOVWstore [off1+off2] {mergeSym(sym1,sym2)} base val mem) 10785 for { 10786 off1 := v.AuxInt 10787 sym1 := v.Aux 10788 v_0 := v.Args[0] 10789 if v_0.Op != OpAMD64LEAQ { 10790 break 10791 } 10792 off2 := v_0.AuxInt 10793 sym2 := v_0.Aux 10794 base := v_0.Args[0] 10795 val := v.Args[1] 10796 mem := v.Args[2] 10797 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 10798 break 10799 } 10800 v.reset(OpAMD64MOVWstore) 10801 v.AuxInt = off1 + off2 10802 v.Aux = mergeSym(sym1, sym2) 10803 v.AddArg(base) 10804 v.AddArg(val) 10805 v.AddArg(mem) 10806 return true 10807 } 10808 // match: (MOVWstore [off1] {sym1} (LEAQ1 [off2] {sym2} ptr idx) val mem) 10809 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 10810 // result: (MOVWstoreidx1 [off1+off2] {mergeSym(sym1,sym2)} ptr idx val mem) 10811 for { 10812 off1 := v.AuxInt 10813 sym1 := v.Aux 10814 v_0 := v.Args[0] 10815 if v_0.Op != OpAMD64LEAQ1 { 10816 break 10817 } 10818 off2 := v_0.AuxInt 10819 sym2 := v_0.Aux 10820 ptr := v_0.Args[0] 10821 idx := v_0.Args[1] 10822 val := v.Args[1] 10823 mem := v.Args[2] 10824 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 10825 break 10826 } 10827 v.reset(OpAMD64MOVWstoreidx1) 10828 v.AuxInt = off1 + off2 10829 v.Aux = mergeSym(sym1, sym2) 10830 v.AddArg(ptr) 10831 v.AddArg(idx) 10832 v.AddArg(val) 10833 v.AddArg(mem) 10834 return true 10835 } 10836 // match: (MOVWstore [off1] {sym1} (LEAQ2 [off2] {sym2} ptr idx) val mem) 10837 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 10838 // result: (MOVWstoreidx2 [off1+off2] {mergeSym(sym1,sym2)} ptr idx val mem) 10839 for { 10840 off1 := v.AuxInt 10841 sym1 := v.Aux 10842 v_0 := v.Args[0] 10843 if v_0.Op != OpAMD64LEAQ2 { 10844 break 10845 } 10846 off2 := v_0.AuxInt 10847 sym2 := v_0.Aux 10848 ptr := v_0.Args[0] 10849 idx := v_0.Args[1] 10850 val := v.Args[1] 10851 mem := v.Args[2] 10852 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 10853 break 10854 } 10855 v.reset(OpAMD64MOVWstoreidx2) 10856 v.AuxInt = off1 + off2 10857 v.Aux = mergeSym(sym1, sym2) 10858 v.AddArg(ptr) 10859 v.AddArg(idx) 10860 v.AddArg(val) 10861 v.AddArg(mem) 10862 return true 10863 } 10864 // match: (MOVWstore [off] {sym} (ADDQ ptr idx) val mem) 10865 // cond: ptr.Op != OpSB 10866 // result: (MOVWstoreidx1 [off] {sym} ptr idx val mem) 10867 for { 10868 off := v.AuxInt 10869 sym := v.Aux 10870 v_0 := v.Args[0] 10871 if v_0.Op != OpAMD64ADDQ { 10872 break 10873 } 10874 ptr := v_0.Args[0] 10875 idx := v_0.Args[1] 10876 val := v.Args[1] 10877 mem := v.Args[2] 10878 if !(ptr.Op != OpSB) { 10879 break 10880 } 10881 v.reset(OpAMD64MOVWstoreidx1) 10882 v.AuxInt = off 10883 v.Aux = sym 10884 v.AddArg(ptr) 10885 v.AddArg(idx) 10886 v.AddArg(val) 10887 v.AddArg(mem) 10888 return true 10889 } 10890 // match: (MOVWstore [i] {s} p (SHRQconst [16] w) x:(MOVWstore [i-2] {s} p w mem)) 10891 // cond: x.Uses == 1 && clobber(x) 10892 // result: (MOVLstore [i-2] {s} p w mem) 10893 for { 10894 i := v.AuxInt 10895 s := v.Aux 10896 p := v.Args[0] 10897 v_1 := v.Args[1] 10898 if v_1.Op != OpAMD64SHRQconst { 10899 break 10900 } 10901 if v_1.AuxInt != 16 { 10902 break 10903 } 10904 w := v_1.Args[0] 10905 x := v.Args[2] 10906 if x.Op != OpAMD64MOVWstore { 10907 break 10908 } 10909 if x.AuxInt != i-2 { 10910 break 10911 } 10912 if x.Aux != s { 10913 break 10914 } 10915 if p != x.Args[0] { 10916 break 10917 } 10918 if w != x.Args[1] { 10919 break 10920 } 10921 mem := x.Args[2] 10922 if !(x.Uses == 1 && clobber(x)) { 10923 break 10924 } 10925 v.reset(OpAMD64MOVLstore) 10926 v.AuxInt = i - 2 10927 v.Aux = s 10928 v.AddArg(p) 10929 v.AddArg(w) 10930 v.AddArg(mem) 10931 return true 10932 } 10933 // match: (MOVWstore [i] {s} p (SHRQconst [j] w) x:(MOVWstore [i-2] {s} p w0:(SHRQconst [j-16] w) mem)) 10934 // cond: x.Uses == 1 && clobber(x) 10935 // result: (MOVLstore [i-2] {s} p w0 mem) 10936 for { 10937 i := v.AuxInt 10938 s := v.Aux 10939 p := v.Args[0] 10940 v_1 := v.Args[1] 10941 if v_1.Op != OpAMD64SHRQconst { 10942 break 10943 } 10944 j := v_1.AuxInt 10945 w := v_1.Args[0] 10946 x := v.Args[2] 10947 if x.Op != OpAMD64MOVWstore { 10948 break 10949 } 10950 if x.AuxInt != i-2 { 10951 break 10952 } 10953 if x.Aux != s { 10954 break 10955 } 10956 if p != x.Args[0] { 10957 break 10958 } 10959 w0 := x.Args[1] 10960 if w0.Op != OpAMD64SHRQconst { 10961 break 10962 } 10963 if w0.AuxInt != j-16 { 10964 break 10965 } 10966 if w != w0.Args[0] { 10967 break 10968 } 10969 mem := x.Args[2] 10970 if !(x.Uses == 1 && clobber(x)) { 10971 break 10972 } 10973 v.reset(OpAMD64MOVLstore) 10974 v.AuxInt = i - 2 10975 v.Aux = s 10976 v.AddArg(p) 10977 v.AddArg(w0) 10978 v.AddArg(mem) 10979 return true 10980 } 10981 // match: (MOVWstore [off1] {sym1} (LEAL [off2] {sym2} base) val mem) 10982 // cond: canMergeSym(sym1, sym2) 10983 // result: (MOVWstore [off1+off2] {mergeSym(sym1,sym2)} base val mem) 10984 for { 10985 off1 := v.AuxInt 10986 sym1 := v.Aux 10987 v_0 := v.Args[0] 10988 if v_0.Op != OpAMD64LEAL { 10989 break 10990 } 10991 off2 := v_0.AuxInt 10992 sym2 := v_0.Aux 10993 base := v_0.Args[0] 10994 val := v.Args[1] 10995 mem := v.Args[2] 10996 if !(canMergeSym(sym1, sym2)) { 10997 break 10998 } 10999 v.reset(OpAMD64MOVWstore) 11000 v.AuxInt = off1 + off2 11001 v.Aux = mergeSym(sym1, sym2) 11002 v.AddArg(base) 11003 v.AddArg(val) 11004 v.AddArg(mem) 11005 return true 11006 } 11007 // match: (MOVWstore [off1] {sym} (ADDLconst [off2] ptr) val mem) 11008 // cond: is32Bit(off1+off2) 11009 // result: (MOVWstore [off1+off2] {sym} ptr val mem) 11010 for { 11011 off1 := v.AuxInt 11012 sym := v.Aux 11013 v_0 := v.Args[0] 11014 if v_0.Op != OpAMD64ADDLconst { 11015 break 11016 } 11017 off2 := v_0.AuxInt 11018 ptr := v_0.Args[0] 11019 val := v.Args[1] 11020 mem := v.Args[2] 11021 if !(is32Bit(off1 + off2)) { 11022 break 11023 } 11024 v.reset(OpAMD64MOVWstore) 11025 v.AuxInt = off1 + off2 11026 v.Aux = sym 11027 v.AddArg(ptr) 11028 v.AddArg(val) 11029 v.AddArg(mem) 11030 return true 11031 } 11032 return false 11033 } 11034 func rewriteValueAMD64_OpAMD64MOVWstoreconst(v *Value) bool { 11035 // match: (MOVWstoreconst [sc] {s} (ADDQconst [off] ptr) mem) 11036 // cond: ValAndOff(sc).canAdd(off) 11037 // result: (MOVWstoreconst [ValAndOff(sc).add(off)] {s} ptr mem) 11038 for { 11039 sc := v.AuxInt 11040 s := v.Aux 11041 v_0 := v.Args[0] 11042 if v_0.Op != OpAMD64ADDQconst { 11043 break 11044 } 11045 off := v_0.AuxInt 11046 ptr := v_0.Args[0] 11047 mem := v.Args[1] 11048 if !(ValAndOff(sc).canAdd(off)) { 11049 break 11050 } 11051 v.reset(OpAMD64MOVWstoreconst) 11052 v.AuxInt = ValAndOff(sc).add(off) 11053 v.Aux = s 11054 v.AddArg(ptr) 11055 v.AddArg(mem) 11056 return true 11057 } 11058 // match: (MOVWstoreconst [sc] {sym1} (LEAQ [off] {sym2} ptr) mem) 11059 // cond: canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off) 11060 // result: (MOVWstoreconst [ValAndOff(sc).add(off)] {mergeSym(sym1, sym2)} ptr mem) 11061 for { 11062 sc := v.AuxInt 11063 sym1 := v.Aux 11064 v_0 := v.Args[0] 11065 if v_0.Op != OpAMD64LEAQ { 11066 break 11067 } 11068 off := v_0.AuxInt 11069 sym2 := v_0.Aux 11070 ptr := v_0.Args[0] 11071 mem := v.Args[1] 11072 if !(canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off)) { 11073 break 11074 } 11075 v.reset(OpAMD64MOVWstoreconst) 11076 v.AuxInt = ValAndOff(sc).add(off) 11077 v.Aux = mergeSym(sym1, sym2) 11078 v.AddArg(ptr) 11079 v.AddArg(mem) 11080 return true 11081 } 11082 // match: (MOVWstoreconst [x] {sym1} (LEAQ1 [off] {sym2} ptr idx) mem) 11083 // cond: canMergeSym(sym1, sym2) 11084 // result: (MOVWstoreconstidx1 [ValAndOff(x).add(off)] {mergeSym(sym1,sym2)} ptr idx mem) 11085 for { 11086 x := v.AuxInt 11087 sym1 := v.Aux 11088 v_0 := v.Args[0] 11089 if v_0.Op != OpAMD64LEAQ1 { 11090 break 11091 } 11092 off := v_0.AuxInt 11093 sym2 := v_0.Aux 11094 ptr := v_0.Args[0] 11095 idx := v_0.Args[1] 11096 mem := v.Args[1] 11097 if !(canMergeSym(sym1, sym2)) { 11098 break 11099 } 11100 v.reset(OpAMD64MOVWstoreconstidx1) 11101 v.AuxInt = ValAndOff(x).add(off) 11102 v.Aux = mergeSym(sym1, sym2) 11103 v.AddArg(ptr) 11104 v.AddArg(idx) 11105 v.AddArg(mem) 11106 return true 11107 } 11108 // match: (MOVWstoreconst [x] {sym1} (LEAQ2 [off] {sym2} ptr idx) mem) 11109 // cond: canMergeSym(sym1, sym2) 11110 // result: (MOVWstoreconstidx2 [ValAndOff(x).add(off)] {mergeSym(sym1,sym2)} ptr idx mem) 11111 for { 11112 x := v.AuxInt 11113 sym1 := v.Aux 11114 v_0 := v.Args[0] 11115 if v_0.Op != OpAMD64LEAQ2 { 11116 break 11117 } 11118 off := v_0.AuxInt 11119 sym2 := v_0.Aux 11120 ptr := v_0.Args[0] 11121 idx := v_0.Args[1] 11122 mem := v.Args[1] 11123 if !(canMergeSym(sym1, sym2)) { 11124 break 11125 } 11126 v.reset(OpAMD64MOVWstoreconstidx2) 11127 v.AuxInt = ValAndOff(x).add(off) 11128 v.Aux = mergeSym(sym1, sym2) 11129 v.AddArg(ptr) 11130 v.AddArg(idx) 11131 v.AddArg(mem) 11132 return true 11133 } 11134 // match: (MOVWstoreconst [x] {sym} (ADDQ ptr idx) mem) 11135 // cond: 11136 // result: (MOVWstoreconstidx1 [x] {sym} ptr idx mem) 11137 for { 11138 x := v.AuxInt 11139 sym := v.Aux 11140 v_0 := v.Args[0] 11141 if v_0.Op != OpAMD64ADDQ { 11142 break 11143 } 11144 ptr := v_0.Args[0] 11145 idx := v_0.Args[1] 11146 mem := v.Args[1] 11147 v.reset(OpAMD64MOVWstoreconstidx1) 11148 v.AuxInt = x 11149 v.Aux = sym 11150 v.AddArg(ptr) 11151 v.AddArg(idx) 11152 v.AddArg(mem) 11153 return true 11154 } 11155 // match: (MOVWstoreconst [c] {s} p x:(MOVWstoreconst [a] {s} p mem)) 11156 // cond: x.Uses == 1 && ValAndOff(a).Off() + 2 == ValAndOff(c).Off() && clobber(x) 11157 // result: (MOVLstoreconst [makeValAndOff(ValAndOff(a).Val()&0xffff | ValAndOff(c).Val()<<16, ValAndOff(a).Off())] {s} p mem) 11158 for { 11159 c := v.AuxInt 11160 s := v.Aux 11161 p := v.Args[0] 11162 x := v.Args[1] 11163 if x.Op != OpAMD64MOVWstoreconst { 11164 break 11165 } 11166 a := x.AuxInt 11167 if x.Aux != s { 11168 break 11169 } 11170 if p != x.Args[0] { 11171 break 11172 } 11173 mem := x.Args[1] 11174 if !(x.Uses == 1 && ValAndOff(a).Off()+2 == ValAndOff(c).Off() && clobber(x)) { 11175 break 11176 } 11177 v.reset(OpAMD64MOVLstoreconst) 11178 v.AuxInt = makeValAndOff(ValAndOff(a).Val()&0xffff|ValAndOff(c).Val()<<16, ValAndOff(a).Off()) 11179 v.Aux = s 11180 v.AddArg(p) 11181 v.AddArg(mem) 11182 return true 11183 } 11184 // match: (MOVWstoreconst [sc] {sym1} (LEAL [off] {sym2} ptr) mem) 11185 // cond: canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off) 11186 // result: (MOVWstoreconst [ValAndOff(sc).add(off)] {mergeSym(sym1, sym2)} ptr mem) 11187 for { 11188 sc := v.AuxInt 11189 sym1 := v.Aux 11190 v_0 := v.Args[0] 11191 if v_0.Op != OpAMD64LEAL { 11192 break 11193 } 11194 off := v_0.AuxInt 11195 sym2 := v_0.Aux 11196 ptr := v_0.Args[0] 11197 mem := v.Args[1] 11198 if !(canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off)) { 11199 break 11200 } 11201 v.reset(OpAMD64MOVWstoreconst) 11202 v.AuxInt = ValAndOff(sc).add(off) 11203 v.Aux = mergeSym(sym1, sym2) 11204 v.AddArg(ptr) 11205 v.AddArg(mem) 11206 return true 11207 } 11208 // match: (MOVWstoreconst [sc] {s} (ADDLconst [off] ptr) mem) 11209 // cond: ValAndOff(sc).canAdd(off) 11210 // result: (MOVWstoreconst [ValAndOff(sc).add(off)] {s} ptr mem) 11211 for { 11212 sc := v.AuxInt 11213 s := v.Aux 11214 v_0 := v.Args[0] 11215 if v_0.Op != OpAMD64ADDLconst { 11216 break 11217 } 11218 off := v_0.AuxInt 11219 ptr := v_0.Args[0] 11220 mem := v.Args[1] 11221 if !(ValAndOff(sc).canAdd(off)) { 11222 break 11223 } 11224 v.reset(OpAMD64MOVWstoreconst) 11225 v.AuxInt = ValAndOff(sc).add(off) 11226 v.Aux = s 11227 v.AddArg(ptr) 11228 v.AddArg(mem) 11229 return true 11230 } 11231 return false 11232 } 11233 func rewriteValueAMD64_OpAMD64MOVWstoreconstidx1(v *Value) bool { 11234 // match: (MOVWstoreconstidx1 [c] {sym} ptr (SHLQconst [1] idx) mem) 11235 // cond: 11236 // result: (MOVWstoreconstidx2 [c] {sym} ptr idx mem) 11237 for { 11238 c := v.AuxInt 11239 sym := v.Aux 11240 ptr := v.Args[0] 11241 v_1 := v.Args[1] 11242 if v_1.Op != OpAMD64SHLQconst { 11243 break 11244 } 11245 if v_1.AuxInt != 1 { 11246 break 11247 } 11248 idx := v_1.Args[0] 11249 mem := v.Args[2] 11250 v.reset(OpAMD64MOVWstoreconstidx2) 11251 v.AuxInt = c 11252 v.Aux = sym 11253 v.AddArg(ptr) 11254 v.AddArg(idx) 11255 v.AddArg(mem) 11256 return true 11257 } 11258 // match: (MOVWstoreconstidx1 [x] {sym} (ADDQconst [c] ptr) idx mem) 11259 // cond: 11260 // result: (MOVWstoreconstidx1 [ValAndOff(x).add(c)] {sym} ptr idx mem) 11261 for { 11262 x := v.AuxInt 11263 sym := v.Aux 11264 v_0 := v.Args[0] 11265 if v_0.Op != OpAMD64ADDQconst { 11266 break 11267 } 11268 c := v_0.AuxInt 11269 ptr := v_0.Args[0] 11270 idx := v.Args[1] 11271 mem := v.Args[2] 11272 v.reset(OpAMD64MOVWstoreconstidx1) 11273 v.AuxInt = ValAndOff(x).add(c) 11274 v.Aux = sym 11275 v.AddArg(ptr) 11276 v.AddArg(idx) 11277 v.AddArg(mem) 11278 return true 11279 } 11280 // match: (MOVWstoreconstidx1 [x] {sym} ptr (ADDQconst [c] idx) mem) 11281 // cond: 11282 // result: (MOVWstoreconstidx1 [ValAndOff(x).add(c)] {sym} ptr idx mem) 11283 for { 11284 x := v.AuxInt 11285 sym := v.Aux 11286 ptr := v.Args[0] 11287 v_1 := v.Args[1] 11288 if v_1.Op != OpAMD64ADDQconst { 11289 break 11290 } 11291 c := v_1.AuxInt 11292 idx := v_1.Args[0] 11293 mem := v.Args[2] 11294 v.reset(OpAMD64MOVWstoreconstidx1) 11295 v.AuxInt = ValAndOff(x).add(c) 11296 v.Aux = sym 11297 v.AddArg(ptr) 11298 v.AddArg(idx) 11299 v.AddArg(mem) 11300 return true 11301 } 11302 // match: (MOVWstoreconstidx1 [c] {s} p i x:(MOVWstoreconstidx1 [a] {s} p i mem)) 11303 // cond: x.Uses == 1 && ValAndOff(a).Off() + 2 == ValAndOff(c).Off() && clobber(x) 11304 // result: (MOVLstoreconstidx1 [makeValAndOff(ValAndOff(a).Val()&0xffff | ValAndOff(c).Val()<<16, ValAndOff(a).Off())] {s} p i mem) 11305 for { 11306 c := v.AuxInt 11307 s := v.Aux 11308 p := v.Args[0] 11309 i := v.Args[1] 11310 x := v.Args[2] 11311 if x.Op != OpAMD64MOVWstoreconstidx1 { 11312 break 11313 } 11314 a := x.AuxInt 11315 if x.Aux != s { 11316 break 11317 } 11318 if p != x.Args[0] { 11319 break 11320 } 11321 if i != x.Args[1] { 11322 break 11323 } 11324 mem := x.Args[2] 11325 if !(x.Uses == 1 && ValAndOff(a).Off()+2 == ValAndOff(c).Off() && clobber(x)) { 11326 break 11327 } 11328 v.reset(OpAMD64MOVLstoreconstidx1) 11329 v.AuxInt = makeValAndOff(ValAndOff(a).Val()&0xffff|ValAndOff(c).Val()<<16, ValAndOff(a).Off()) 11330 v.Aux = s 11331 v.AddArg(p) 11332 v.AddArg(i) 11333 v.AddArg(mem) 11334 return true 11335 } 11336 return false 11337 } 11338 func rewriteValueAMD64_OpAMD64MOVWstoreconstidx2(v *Value) bool { 11339 b := v.Block 11340 _ = b 11341 // match: (MOVWstoreconstidx2 [x] {sym} (ADDQconst [c] ptr) idx mem) 11342 // cond: 11343 // result: (MOVWstoreconstidx2 [ValAndOff(x).add(c)] {sym} ptr idx mem) 11344 for { 11345 x := v.AuxInt 11346 sym := v.Aux 11347 v_0 := v.Args[0] 11348 if v_0.Op != OpAMD64ADDQconst { 11349 break 11350 } 11351 c := v_0.AuxInt 11352 ptr := v_0.Args[0] 11353 idx := v.Args[1] 11354 mem := v.Args[2] 11355 v.reset(OpAMD64MOVWstoreconstidx2) 11356 v.AuxInt = ValAndOff(x).add(c) 11357 v.Aux = sym 11358 v.AddArg(ptr) 11359 v.AddArg(idx) 11360 v.AddArg(mem) 11361 return true 11362 } 11363 // match: (MOVWstoreconstidx2 [x] {sym} ptr (ADDQconst [c] idx) mem) 11364 // cond: 11365 // result: (MOVWstoreconstidx2 [ValAndOff(x).add(2*c)] {sym} ptr idx mem) 11366 for { 11367 x := v.AuxInt 11368 sym := v.Aux 11369 ptr := v.Args[0] 11370 v_1 := v.Args[1] 11371 if v_1.Op != OpAMD64ADDQconst { 11372 break 11373 } 11374 c := v_1.AuxInt 11375 idx := v_1.Args[0] 11376 mem := v.Args[2] 11377 v.reset(OpAMD64MOVWstoreconstidx2) 11378 v.AuxInt = ValAndOff(x).add(2 * c) 11379 v.Aux = sym 11380 v.AddArg(ptr) 11381 v.AddArg(idx) 11382 v.AddArg(mem) 11383 return true 11384 } 11385 // match: (MOVWstoreconstidx2 [c] {s} p i x:(MOVWstoreconstidx2 [a] {s} p i mem)) 11386 // cond: x.Uses == 1 && ValAndOff(a).Off() + 2 == ValAndOff(c).Off() && clobber(x) 11387 // result: (MOVLstoreconstidx1 [makeValAndOff(ValAndOff(a).Val()&0xffff | ValAndOff(c).Val()<<16, ValAndOff(a).Off())] {s} p (SHLQconst <i.Type> [1] i) mem) 11388 for { 11389 c := v.AuxInt 11390 s := v.Aux 11391 p := v.Args[0] 11392 i := v.Args[1] 11393 x := v.Args[2] 11394 if x.Op != OpAMD64MOVWstoreconstidx2 { 11395 break 11396 } 11397 a := x.AuxInt 11398 if x.Aux != s { 11399 break 11400 } 11401 if p != x.Args[0] { 11402 break 11403 } 11404 if i != x.Args[1] { 11405 break 11406 } 11407 mem := x.Args[2] 11408 if !(x.Uses == 1 && ValAndOff(a).Off()+2 == ValAndOff(c).Off() && clobber(x)) { 11409 break 11410 } 11411 v.reset(OpAMD64MOVLstoreconstidx1) 11412 v.AuxInt = makeValAndOff(ValAndOff(a).Val()&0xffff|ValAndOff(c).Val()<<16, ValAndOff(a).Off()) 11413 v.Aux = s 11414 v.AddArg(p) 11415 v0 := b.NewValue0(v.Pos, OpAMD64SHLQconst, i.Type) 11416 v0.AuxInt = 1 11417 v0.AddArg(i) 11418 v.AddArg(v0) 11419 v.AddArg(mem) 11420 return true 11421 } 11422 return false 11423 } 11424 func rewriteValueAMD64_OpAMD64MOVWstoreidx1(v *Value) bool { 11425 // match: (MOVWstoreidx1 [c] {sym} ptr (SHLQconst [1] idx) val mem) 11426 // cond: 11427 // result: (MOVWstoreidx2 [c] {sym} ptr idx val mem) 11428 for { 11429 c := v.AuxInt 11430 sym := v.Aux 11431 ptr := v.Args[0] 11432 v_1 := v.Args[1] 11433 if v_1.Op != OpAMD64SHLQconst { 11434 break 11435 } 11436 if v_1.AuxInt != 1 { 11437 break 11438 } 11439 idx := v_1.Args[0] 11440 val := v.Args[2] 11441 mem := v.Args[3] 11442 v.reset(OpAMD64MOVWstoreidx2) 11443 v.AuxInt = c 11444 v.Aux = sym 11445 v.AddArg(ptr) 11446 v.AddArg(idx) 11447 v.AddArg(val) 11448 v.AddArg(mem) 11449 return true 11450 } 11451 // match: (MOVWstoreidx1 [c] {sym} (ADDQconst [d] ptr) idx val mem) 11452 // cond: 11453 // result: (MOVWstoreidx1 [c+d] {sym} ptr idx val mem) 11454 for { 11455 c := v.AuxInt 11456 sym := v.Aux 11457 v_0 := v.Args[0] 11458 if v_0.Op != OpAMD64ADDQconst { 11459 break 11460 } 11461 d := v_0.AuxInt 11462 ptr := v_0.Args[0] 11463 idx := v.Args[1] 11464 val := v.Args[2] 11465 mem := v.Args[3] 11466 v.reset(OpAMD64MOVWstoreidx1) 11467 v.AuxInt = c + d 11468 v.Aux = sym 11469 v.AddArg(ptr) 11470 v.AddArg(idx) 11471 v.AddArg(val) 11472 v.AddArg(mem) 11473 return true 11474 } 11475 // match: (MOVWstoreidx1 [c] {sym} ptr (ADDQconst [d] idx) val mem) 11476 // cond: 11477 // result: (MOVWstoreidx1 [c+d] {sym} ptr idx val mem) 11478 for { 11479 c := v.AuxInt 11480 sym := v.Aux 11481 ptr := v.Args[0] 11482 v_1 := v.Args[1] 11483 if v_1.Op != OpAMD64ADDQconst { 11484 break 11485 } 11486 d := v_1.AuxInt 11487 idx := v_1.Args[0] 11488 val := v.Args[2] 11489 mem := v.Args[3] 11490 v.reset(OpAMD64MOVWstoreidx1) 11491 v.AuxInt = c + d 11492 v.Aux = sym 11493 v.AddArg(ptr) 11494 v.AddArg(idx) 11495 v.AddArg(val) 11496 v.AddArg(mem) 11497 return true 11498 } 11499 // match: (MOVWstoreidx1 [i] {s} p idx (SHRQconst [16] w) x:(MOVWstoreidx1 [i-2] {s} p idx w mem)) 11500 // cond: x.Uses == 1 && clobber(x) 11501 // result: (MOVLstoreidx1 [i-2] {s} p idx w mem) 11502 for { 11503 i := v.AuxInt 11504 s := v.Aux 11505 p := v.Args[0] 11506 idx := v.Args[1] 11507 v_2 := v.Args[2] 11508 if v_2.Op != OpAMD64SHRQconst { 11509 break 11510 } 11511 if v_2.AuxInt != 16 { 11512 break 11513 } 11514 w := v_2.Args[0] 11515 x := v.Args[3] 11516 if x.Op != OpAMD64MOVWstoreidx1 { 11517 break 11518 } 11519 if x.AuxInt != i-2 { 11520 break 11521 } 11522 if x.Aux != s { 11523 break 11524 } 11525 if p != x.Args[0] { 11526 break 11527 } 11528 if idx != x.Args[1] { 11529 break 11530 } 11531 if w != x.Args[2] { 11532 break 11533 } 11534 mem := x.Args[3] 11535 if !(x.Uses == 1 && clobber(x)) { 11536 break 11537 } 11538 v.reset(OpAMD64MOVLstoreidx1) 11539 v.AuxInt = i - 2 11540 v.Aux = s 11541 v.AddArg(p) 11542 v.AddArg(idx) 11543 v.AddArg(w) 11544 v.AddArg(mem) 11545 return true 11546 } 11547 // match: (MOVWstoreidx1 [i] {s} p idx (SHRQconst [j] w) x:(MOVWstoreidx1 [i-2] {s} p idx w0:(SHRQconst [j-16] w) mem)) 11548 // cond: x.Uses == 1 && clobber(x) 11549 // result: (MOVLstoreidx1 [i-2] {s} p idx w0 mem) 11550 for { 11551 i := v.AuxInt 11552 s := v.Aux 11553 p := v.Args[0] 11554 idx := v.Args[1] 11555 v_2 := v.Args[2] 11556 if v_2.Op != OpAMD64SHRQconst { 11557 break 11558 } 11559 j := v_2.AuxInt 11560 w := v_2.Args[0] 11561 x := v.Args[3] 11562 if x.Op != OpAMD64MOVWstoreidx1 { 11563 break 11564 } 11565 if x.AuxInt != i-2 { 11566 break 11567 } 11568 if x.Aux != s { 11569 break 11570 } 11571 if p != x.Args[0] { 11572 break 11573 } 11574 if idx != x.Args[1] { 11575 break 11576 } 11577 w0 := x.Args[2] 11578 if w0.Op != OpAMD64SHRQconst { 11579 break 11580 } 11581 if w0.AuxInt != j-16 { 11582 break 11583 } 11584 if w != w0.Args[0] { 11585 break 11586 } 11587 mem := x.Args[3] 11588 if !(x.Uses == 1 && clobber(x)) { 11589 break 11590 } 11591 v.reset(OpAMD64MOVLstoreidx1) 11592 v.AuxInt = i - 2 11593 v.Aux = s 11594 v.AddArg(p) 11595 v.AddArg(idx) 11596 v.AddArg(w0) 11597 v.AddArg(mem) 11598 return true 11599 } 11600 return false 11601 } 11602 func rewriteValueAMD64_OpAMD64MOVWstoreidx2(v *Value) bool { 11603 b := v.Block 11604 _ = b 11605 // match: (MOVWstoreidx2 [c] {sym} (ADDQconst [d] ptr) idx val mem) 11606 // cond: 11607 // result: (MOVWstoreidx2 [c+d] {sym} ptr idx val mem) 11608 for { 11609 c := v.AuxInt 11610 sym := v.Aux 11611 v_0 := v.Args[0] 11612 if v_0.Op != OpAMD64ADDQconst { 11613 break 11614 } 11615 d := v_0.AuxInt 11616 ptr := v_0.Args[0] 11617 idx := v.Args[1] 11618 val := v.Args[2] 11619 mem := v.Args[3] 11620 v.reset(OpAMD64MOVWstoreidx2) 11621 v.AuxInt = c + d 11622 v.Aux = sym 11623 v.AddArg(ptr) 11624 v.AddArg(idx) 11625 v.AddArg(val) 11626 v.AddArg(mem) 11627 return true 11628 } 11629 // match: (MOVWstoreidx2 [c] {sym} ptr (ADDQconst [d] idx) val mem) 11630 // cond: 11631 // result: (MOVWstoreidx2 [c+2*d] {sym} ptr idx val mem) 11632 for { 11633 c := v.AuxInt 11634 sym := v.Aux 11635 ptr := v.Args[0] 11636 v_1 := v.Args[1] 11637 if v_1.Op != OpAMD64ADDQconst { 11638 break 11639 } 11640 d := v_1.AuxInt 11641 idx := v_1.Args[0] 11642 val := v.Args[2] 11643 mem := v.Args[3] 11644 v.reset(OpAMD64MOVWstoreidx2) 11645 v.AuxInt = c + 2*d 11646 v.Aux = sym 11647 v.AddArg(ptr) 11648 v.AddArg(idx) 11649 v.AddArg(val) 11650 v.AddArg(mem) 11651 return true 11652 } 11653 // match: (MOVWstoreidx2 [i] {s} p idx (SHRQconst [16] w) x:(MOVWstoreidx2 [i-2] {s} p idx w mem)) 11654 // cond: x.Uses == 1 && clobber(x) 11655 // result: (MOVLstoreidx1 [i-2] {s} p (SHLQconst <idx.Type> [1] idx) w mem) 11656 for { 11657 i := v.AuxInt 11658 s := v.Aux 11659 p := v.Args[0] 11660 idx := v.Args[1] 11661 v_2 := v.Args[2] 11662 if v_2.Op != OpAMD64SHRQconst { 11663 break 11664 } 11665 if v_2.AuxInt != 16 { 11666 break 11667 } 11668 w := v_2.Args[0] 11669 x := v.Args[3] 11670 if x.Op != OpAMD64MOVWstoreidx2 { 11671 break 11672 } 11673 if x.AuxInt != i-2 { 11674 break 11675 } 11676 if x.Aux != s { 11677 break 11678 } 11679 if p != x.Args[0] { 11680 break 11681 } 11682 if idx != x.Args[1] { 11683 break 11684 } 11685 if w != x.Args[2] { 11686 break 11687 } 11688 mem := x.Args[3] 11689 if !(x.Uses == 1 && clobber(x)) { 11690 break 11691 } 11692 v.reset(OpAMD64MOVLstoreidx1) 11693 v.AuxInt = i - 2 11694 v.Aux = s 11695 v.AddArg(p) 11696 v0 := b.NewValue0(v.Pos, OpAMD64SHLQconst, idx.Type) 11697 v0.AuxInt = 1 11698 v0.AddArg(idx) 11699 v.AddArg(v0) 11700 v.AddArg(w) 11701 v.AddArg(mem) 11702 return true 11703 } 11704 // match: (MOVWstoreidx2 [i] {s} p idx (SHRQconst [j] w) x:(MOVWstoreidx2 [i-2] {s} p idx w0:(SHRQconst [j-16] w) mem)) 11705 // cond: x.Uses == 1 && clobber(x) 11706 // result: (MOVLstoreidx1 [i-2] {s} p (SHLQconst <idx.Type> [1] idx) w0 mem) 11707 for { 11708 i := v.AuxInt 11709 s := v.Aux 11710 p := v.Args[0] 11711 idx := v.Args[1] 11712 v_2 := v.Args[2] 11713 if v_2.Op != OpAMD64SHRQconst { 11714 break 11715 } 11716 j := v_2.AuxInt 11717 w := v_2.Args[0] 11718 x := v.Args[3] 11719 if x.Op != OpAMD64MOVWstoreidx2 { 11720 break 11721 } 11722 if x.AuxInt != i-2 { 11723 break 11724 } 11725 if x.Aux != s { 11726 break 11727 } 11728 if p != x.Args[0] { 11729 break 11730 } 11731 if idx != x.Args[1] { 11732 break 11733 } 11734 w0 := x.Args[2] 11735 if w0.Op != OpAMD64SHRQconst { 11736 break 11737 } 11738 if w0.AuxInt != j-16 { 11739 break 11740 } 11741 if w != w0.Args[0] { 11742 break 11743 } 11744 mem := x.Args[3] 11745 if !(x.Uses == 1 && clobber(x)) { 11746 break 11747 } 11748 v.reset(OpAMD64MOVLstoreidx1) 11749 v.AuxInt = i - 2 11750 v.Aux = s 11751 v.AddArg(p) 11752 v0 := b.NewValue0(v.Pos, OpAMD64SHLQconst, idx.Type) 11753 v0.AuxInt = 1 11754 v0.AddArg(idx) 11755 v.AddArg(v0) 11756 v.AddArg(w0) 11757 v.AddArg(mem) 11758 return true 11759 } 11760 return false 11761 } 11762 func rewriteValueAMD64_OpAMD64MULL(v *Value) bool { 11763 // match: (MULL x (MOVLconst [c])) 11764 // cond: 11765 // result: (MULLconst [c] x) 11766 for { 11767 x := v.Args[0] 11768 v_1 := v.Args[1] 11769 if v_1.Op != OpAMD64MOVLconst { 11770 break 11771 } 11772 c := v_1.AuxInt 11773 v.reset(OpAMD64MULLconst) 11774 v.AuxInt = c 11775 v.AddArg(x) 11776 return true 11777 } 11778 // match: (MULL (MOVLconst [c]) x) 11779 // cond: 11780 // result: (MULLconst [c] x) 11781 for { 11782 v_0 := v.Args[0] 11783 if v_0.Op != OpAMD64MOVLconst { 11784 break 11785 } 11786 c := v_0.AuxInt 11787 x := v.Args[1] 11788 v.reset(OpAMD64MULLconst) 11789 v.AuxInt = c 11790 v.AddArg(x) 11791 return true 11792 } 11793 return false 11794 } 11795 func rewriteValueAMD64_OpAMD64MULLconst(v *Value) bool { 11796 // match: (MULLconst [c] (MULLconst [d] x)) 11797 // cond: 11798 // result: (MULLconst [int64(int32(c * d))] x) 11799 for { 11800 c := v.AuxInt 11801 v_0 := v.Args[0] 11802 if v_0.Op != OpAMD64MULLconst { 11803 break 11804 } 11805 d := v_0.AuxInt 11806 x := v_0.Args[0] 11807 v.reset(OpAMD64MULLconst) 11808 v.AuxInt = int64(int32(c * d)) 11809 v.AddArg(x) 11810 return true 11811 } 11812 // match: (MULLconst [c] (MOVLconst [d])) 11813 // cond: 11814 // result: (MOVLconst [int64(int32(c*d))]) 11815 for { 11816 c := v.AuxInt 11817 v_0 := v.Args[0] 11818 if v_0.Op != OpAMD64MOVLconst { 11819 break 11820 } 11821 d := v_0.AuxInt 11822 v.reset(OpAMD64MOVLconst) 11823 v.AuxInt = int64(int32(c * d)) 11824 return true 11825 } 11826 return false 11827 } 11828 func rewriteValueAMD64_OpAMD64MULQ(v *Value) bool { 11829 // match: (MULQ x (MOVQconst [c])) 11830 // cond: is32Bit(c) 11831 // result: (MULQconst [c] x) 11832 for { 11833 x := v.Args[0] 11834 v_1 := v.Args[1] 11835 if v_1.Op != OpAMD64MOVQconst { 11836 break 11837 } 11838 c := v_1.AuxInt 11839 if !(is32Bit(c)) { 11840 break 11841 } 11842 v.reset(OpAMD64MULQconst) 11843 v.AuxInt = c 11844 v.AddArg(x) 11845 return true 11846 } 11847 // match: (MULQ (MOVQconst [c]) x) 11848 // cond: is32Bit(c) 11849 // result: (MULQconst [c] x) 11850 for { 11851 v_0 := v.Args[0] 11852 if v_0.Op != OpAMD64MOVQconst { 11853 break 11854 } 11855 c := v_0.AuxInt 11856 x := v.Args[1] 11857 if !(is32Bit(c)) { 11858 break 11859 } 11860 v.reset(OpAMD64MULQconst) 11861 v.AuxInt = c 11862 v.AddArg(x) 11863 return true 11864 } 11865 return false 11866 } 11867 func rewriteValueAMD64_OpAMD64MULQconst(v *Value) bool { 11868 b := v.Block 11869 _ = b 11870 // match: (MULQconst [c] (MULQconst [d] x)) 11871 // cond: is32Bit(c*d) 11872 // result: (MULQconst [c * d] x) 11873 for { 11874 c := v.AuxInt 11875 v_0 := v.Args[0] 11876 if v_0.Op != OpAMD64MULQconst { 11877 break 11878 } 11879 d := v_0.AuxInt 11880 x := v_0.Args[0] 11881 if !(is32Bit(c * d)) { 11882 break 11883 } 11884 v.reset(OpAMD64MULQconst) 11885 v.AuxInt = c * d 11886 v.AddArg(x) 11887 return true 11888 } 11889 // match: (MULQconst [-1] x) 11890 // cond: 11891 // result: (NEGQ x) 11892 for { 11893 if v.AuxInt != -1 { 11894 break 11895 } 11896 x := v.Args[0] 11897 v.reset(OpAMD64NEGQ) 11898 v.AddArg(x) 11899 return true 11900 } 11901 // match: (MULQconst [0] _) 11902 // cond: 11903 // result: (MOVQconst [0]) 11904 for { 11905 if v.AuxInt != 0 { 11906 break 11907 } 11908 v.reset(OpAMD64MOVQconst) 11909 v.AuxInt = 0 11910 return true 11911 } 11912 // match: (MULQconst [1] x) 11913 // cond: 11914 // result: x 11915 for { 11916 if v.AuxInt != 1 { 11917 break 11918 } 11919 x := v.Args[0] 11920 v.reset(OpCopy) 11921 v.Type = x.Type 11922 v.AddArg(x) 11923 return true 11924 } 11925 // match: (MULQconst [3] x) 11926 // cond: 11927 // result: (LEAQ2 x x) 11928 for { 11929 if v.AuxInt != 3 { 11930 break 11931 } 11932 x := v.Args[0] 11933 v.reset(OpAMD64LEAQ2) 11934 v.AddArg(x) 11935 v.AddArg(x) 11936 return true 11937 } 11938 // match: (MULQconst [5] x) 11939 // cond: 11940 // result: (LEAQ4 x x) 11941 for { 11942 if v.AuxInt != 5 { 11943 break 11944 } 11945 x := v.Args[0] 11946 v.reset(OpAMD64LEAQ4) 11947 v.AddArg(x) 11948 v.AddArg(x) 11949 return true 11950 } 11951 // match: (MULQconst [7] x) 11952 // cond: 11953 // result: (LEAQ8 (NEGQ <v.Type> x) x) 11954 for { 11955 if v.AuxInt != 7 { 11956 break 11957 } 11958 x := v.Args[0] 11959 v.reset(OpAMD64LEAQ8) 11960 v0 := b.NewValue0(v.Pos, OpAMD64NEGQ, v.Type) 11961 v0.AddArg(x) 11962 v.AddArg(v0) 11963 v.AddArg(x) 11964 return true 11965 } 11966 // match: (MULQconst [9] x) 11967 // cond: 11968 // result: (LEAQ8 x x) 11969 for { 11970 if v.AuxInt != 9 { 11971 break 11972 } 11973 x := v.Args[0] 11974 v.reset(OpAMD64LEAQ8) 11975 v.AddArg(x) 11976 v.AddArg(x) 11977 return true 11978 } 11979 // match: (MULQconst [11] x) 11980 // cond: 11981 // result: (LEAQ2 x (LEAQ4 <v.Type> x x)) 11982 for { 11983 if v.AuxInt != 11 { 11984 break 11985 } 11986 x := v.Args[0] 11987 v.reset(OpAMD64LEAQ2) 11988 v.AddArg(x) 11989 v0 := b.NewValue0(v.Pos, OpAMD64LEAQ4, v.Type) 11990 v0.AddArg(x) 11991 v0.AddArg(x) 11992 v.AddArg(v0) 11993 return true 11994 } 11995 // match: (MULQconst [13] x) 11996 // cond: 11997 // result: (LEAQ4 x (LEAQ2 <v.Type> x x)) 11998 for { 11999 if v.AuxInt != 13 { 12000 break 12001 } 12002 x := v.Args[0] 12003 v.reset(OpAMD64LEAQ4) 12004 v.AddArg(x) 12005 v0 := b.NewValue0(v.Pos, OpAMD64LEAQ2, v.Type) 12006 v0.AddArg(x) 12007 v0.AddArg(x) 12008 v.AddArg(v0) 12009 return true 12010 } 12011 // match: (MULQconst [21] x) 12012 // cond: 12013 // result: (LEAQ4 x (LEAQ4 <v.Type> x x)) 12014 for { 12015 if v.AuxInt != 21 { 12016 break 12017 } 12018 x := v.Args[0] 12019 v.reset(OpAMD64LEAQ4) 12020 v.AddArg(x) 12021 v0 := b.NewValue0(v.Pos, OpAMD64LEAQ4, v.Type) 12022 v0.AddArg(x) 12023 v0.AddArg(x) 12024 v.AddArg(v0) 12025 return true 12026 } 12027 // match: (MULQconst [25] x) 12028 // cond: 12029 // result: (LEAQ8 x (LEAQ2 <v.Type> x x)) 12030 for { 12031 if v.AuxInt != 25 { 12032 break 12033 } 12034 x := v.Args[0] 12035 v.reset(OpAMD64LEAQ8) 12036 v.AddArg(x) 12037 v0 := b.NewValue0(v.Pos, OpAMD64LEAQ2, v.Type) 12038 v0.AddArg(x) 12039 v0.AddArg(x) 12040 v.AddArg(v0) 12041 return true 12042 } 12043 // match: (MULQconst [37] x) 12044 // cond: 12045 // result: (LEAQ4 x (LEAQ8 <v.Type> x x)) 12046 for { 12047 if v.AuxInt != 37 { 12048 break 12049 } 12050 x := v.Args[0] 12051 v.reset(OpAMD64LEAQ4) 12052 v.AddArg(x) 12053 v0 := b.NewValue0(v.Pos, OpAMD64LEAQ8, v.Type) 12054 v0.AddArg(x) 12055 v0.AddArg(x) 12056 v.AddArg(v0) 12057 return true 12058 } 12059 // match: (MULQconst [41] x) 12060 // cond: 12061 // result: (LEAQ8 x (LEAQ4 <v.Type> x x)) 12062 for { 12063 if v.AuxInt != 41 { 12064 break 12065 } 12066 x := v.Args[0] 12067 v.reset(OpAMD64LEAQ8) 12068 v.AddArg(x) 12069 v0 := b.NewValue0(v.Pos, OpAMD64LEAQ4, v.Type) 12070 v0.AddArg(x) 12071 v0.AddArg(x) 12072 v.AddArg(v0) 12073 return true 12074 } 12075 // match: (MULQconst [73] x) 12076 // cond: 12077 // result: (LEAQ8 x (LEAQ8 <v.Type> x x)) 12078 for { 12079 if v.AuxInt != 73 { 12080 break 12081 } 12082 x := v.Args[0] 12083 v.reset(OpAMD64LEAQ8) 12084 v.AddArg(x) 12085 v0 := b.NewValue0(v.Pos, OpAMD64LEAQ8, v.Type) 12086 v0.AddArg(x) 12087 v0.AddArg(x) 12088 v.AddArg(v0) 12089 return true 12090 } 12091 // match: (MULQconst [c] x) 12092 // cond: isPowerOfTwo(c) 12093 // result: (SHLQconst [log2(c)] x) 12094 for { 12095 c := v.AuxInt 12096 x := v.Args[0] 12097 if !(isPowerOfTwo(c)) { 12098 break 12099 } 12100 v.reset(OpAMD64SHLQconst) 12101 v.AuxInt = log2(c) 12102 v.AddArg(x) 12103 return true 12104 } 12105 // match: (MULQconst [c] x) 12106 // cond: isPowerOfTwo(c+1) && c >= 15 12107 // result: (SUBQ (SHLQconst <v.Type> [log2(c+1)] x) x) 12108 for { 12109 c := v.AuxInt 12110 x := v.Args[0] 12111 if !(isPowerOfTwo(c+1) && c >= 15) { 12112 break 12113 } 12114 v.reset(OpAMD64SUBQ) 12115 v0 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 12116 v0.AuxInt = log2(c + 1) 12117 v0.AddArg(x) 12118 v.AddArg(v0) 12119 v.AddArg(x) 12120 return true 12121 } 12122 // match: (MULQconst [c] x) 12123 // cond: isPowerOfTwo(c-1) && c >= 17 12124 // result: (LEAQ1 (SHLQconst <v.Type> [log2(c-1)] x) x) 12125 for { 12126 c := v.AuxInt 12127 x := v.Args[0] 12128 if !(isPowerOfTwo(c-1) && c >= 17) { 12129 break 12130 } 12131 v.reset(OpAMD64LEAQ1) 12132 v0 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 12133 v0.AuxInt = log2(c - 1) 12134 v0.AddArg(x) 12135 v.AddArg(v0) 12136 v.AddArg(x) 12137 return true 12138 } 12139 // match: (MULQconst [c] x) 12140 // cond: isPowerOfTwo(c-2) && c >= 34 12141 // result: (LEAQ2 (SHLQconst <v.Type> [log2(c-2)] x) x) 12142 for { 12143 c := v.AuxInt 12144 x := v.Args[0] 12145 if !(isPowerOfTwo(c-2) && c >= 34) { 12146 break 12147 } 12148 v.reset(OpAMD64LEAQ2) 12149 v0 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 12150 v0.AuxInt = log2(c - 2) 12151 v0.AddArg(x) 12152 v.AddArg(v0) 12153 v.AddArg(x) 12154 return true 12155 } 12156 // match: (MULQconst [c] x) 12157 // cond: isPowerOfTwo(c-4) && c >= 68 12158 // result: (LEAQ4 (SHLQconst <v.Type> [log2(c-4)] x) x) 12159 for { 12160 c := v.AuxInt 12161 x := v.Args[0] 12162 if !(isPowerOfTwo(c-4) && c >= 68) { 12163 break 12164 } 12165 v.reset(OpAMD64LEAQ4) 12166 v0 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 12167 v0.AuxInt = log2(c - 4) 12168 v0.AddArg(x) 12169 v.AddArg(v0) 12170 v.AddArg(x) 12171 return true 12172 } 12173 // match: (MULQconst [c] x) 12174 // cond: isPowerOfTwo(c-8) && c >= 136 12175 // result: (LEAQ8 (SHLQconst <v.Type> [log2(c-8)] x) x) 12176 for { 12177 c := v.AuxInt 12178 x := v.Args[0] 12179 if !(isPowerOfTwo(c-8) && c >= 136) { 12180 break 12181 } 12182 v.reset(OpAMD64LEAQ8) 12183 v0 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 12184 v0.AuxInt = log2(c - 8) 12185 v0.AddArg(x) 12186 v.AddArg(v0) 12187 v.AddArg(x) 12188 return true 12189 } 12190 // match: (MULQconst [c] x) 12191 // cond: c%3 == 0 && isPowerOfTwo(c/3) 12192 // result: (SHLQconst [log2(c/3)] (LEAQ2 <v.Type> x x)) 12193 for { 12194 c := v.AuxInt 12195 x := v.Args[0] 12196 if !(c%3 == 0 && isPowerOfTwo(c/3)) { 12197 break 12198 } 12199 v.reset(OpAMD64SHLQconst) 12200 v.AuxInt = log2(c / 3) 12201 v0 := b.NewValue0(v.Pos, OpAMD64LEAQ2, v.Type) 12202 v0.AddArg(x) 12203 v0.AddArg(x) 12204 v.AddArg(v0) 12205 return true 12206 } 12207 // match: (MULQconst [c] x) 12208 // cond: c%5 == 0 && isPowerOfTwo(c/5) 12209 // result: (SHLQconst [log2(c/5)] (LEAQ4 <v.Type> x x)) 12210 for { 12211 c := v.AuxInt 12212 x := v.Args[0] 12213 if !(c%5 == 0 && isPowerOfTwo(c/5)) { 12214 break 12215 } 12216 v.reset(OpAMD64SHLQconst) 12217 v.AuxInt = log2(c / 5) 12218 v0 := b.NewValue0(v.Pos, OpAMD64LEAQ4, v.Type) 12219 v0.AddArg(x) 12220 v0.AddArg(x) 12221 v.AddArg(v0) 12222 return true 12223 } 12224 // match: (MULQconst [c] x) 12225 // cond: c%9 == 0 && isPowerOfTwo(c/9) 12226 // result: (SHLQconst [log2(c/9)] (LEAQ8 <v.Type> x x)) 12227 for { 12228 c := v.AuxInt 12229 x := v.Args[0] 12230 if !(c%9 == 0 && isPowerOfTwo(c/9)) { 12231 break 12232 } 12233 v.reset(OpAMD64SHLQconst) 12234 v.AuxInt = log2(c / 9) 12235 v0 := b.NewValue0(v.Pos, OpAMD64LEAQ8, v.Type) 12236 v0.AddArg(x) 12237 v0.AddArg(x) 12238 v.AddArg(v0) 12239 return true 12240 } 12241 // match: (MULQconst [c] (MOVQconst [d])) 12242 // cond: 12243 // result: (MOVQconst [c*d]) 12244 for { 12245 c := v.AuxInt 12246 v_0 := v.Args[0] 12247 if v_0.Op != OpAMD64MOVQconst { 12248 break 12249 } 12250 d := v_0.AuxInt 12251 v.reset(OpAMD64MOVQconst) 12252 v.AuxInt = c * d 12253 return true 12254 } 12255 return false 12256 } 12257 func rewriteValueAMD64_OpAMD64MULSD(v *Value) bool { 12258 // match: (MULSD x l:(MOVSDload [off] {sym} ptr mem)) 12259 // cond: canMergeLoad(v, l, x) && clobber(l) 12260 // result: (MULSDmem x [off] {sym} ptr mem) 12261 for { 12262 x := v.Args[0] 12263 l := v.Args[1] 12264 if l.Op != OpAMD64MOVSDload { 12265 break 12266 } 12267 off := l.AuxInt 12268 sym := l.Aux 12269 ptr := l.Args[0] 12270 mem := l.Args[1] 12271 if !(canMergeLoad(v, l, x) && clobber(l)) { 12272 break 12273 } 12274 v.reset(OpAMD64MULSDmem) 12275 v.AuxInt = off 12276 v.Aux = sym 12277 v.AddArg(x) 12278 v.AddArg(ptr) 12279 v.AddArg(mem) 12280 return true 12281 } 12282 // match: (MULSD l:(MOVSDload [off] {sym} ptr mem) x) 12283 // cond: canMergeLoad(v, l, x) && clobber(l) 12284 // result: (MULSDmem x [off] {sym} ptr mem) 12285 for { 12286 l := v.Args[0] 12287 if l.Op != OpAMD64MOVSDload { 12288 break 12289 } 12290 off := l.AuxInt 12291 sym := l.Aux 12292 ptr := l.Args[0] 12293 mem := l.Args[1] 12294 x := v.Args[1] 12295 if !(canMergeLoad(v, l, x) && clobber(l)) { 12296 break 12297 } 12298 v.reset(OpAMD64MULSDmem) 12299 v.AuxInt = off 12300 v.Aux = sym 12301 v.AddArg(x) 12302 v.AddArg(ptr) 12303 v.AddArg(mem) 12304 return true 12305 } 12306 return false 12307 } 12308 func rewriteValueAMD64_OpAMD64MULSS(v *Value) bool { 12309 // match: (MULSS x l:(MOVSSload [off] {sym} ptr mem)) 12310 // cond: canMergeLoad(v, l, x) && clobber(l) 12311 // result: (MULSSmem x [off] {sym} ptr mem) 12312 for { 12313 x := v.Args[0] 12314 l := v.Args[1] 12315 if l.Op != OpAMD64MOVSSload { 12316 break 12317 } 12318 off := l.AuxInt 12319 sym := l.Aux 12320 ptr := l.Args[0] 12321 mem := l.Args[1] 12322 if !(canMergeLoad(v, l, x) && clobber(l)) { 12323 break 12324 } 12325 v.reset(OpAMD64MULSSmem) 12326 v.AuxInt = off 12327 v.Aux = sym 12328 v.AddArg(x) 12329 v.AddArg(ptr) 12330 v.AddArg(mem) 12331 return true 12332 } 12333 // match: (MULSS l:(MOVSSload [off] {sym} ptr mem) x) 12334 // cond: canMergeLoad(v, l, x) && clobber(l) 12335 // result: (MULSSmem x [off] {sym} ptr mem) 12336 for { 12337 l := v.Args[0] 12338 if l.Op != OpAMD64MOVSSload { 12339 break 12340 } 12341 off := l.AuxInt 12342 sym := l.Aux 12343 ptr := l.Args[0] 12344 mem := l.Args[1] 12345 x := v.Args[1] 12346 if !(canMergeLoad(v, l, x) && clobber(l)) { 12347 break 12348 } 12349 v.reset(OpAMD64MULSSmem) 12350 v.AuxInt = off 12351 v.Aux = sym 12352 v.AddArg(x) 12353 v.AddArg(ptr) 12354 v.AddArg(mem) 12355 return true 12356 } 12357 return false 12358 } 12359 func rewriteValueAMD64_OpAMD64NEGL(v *Value) bool { 12360 // match: (NEGL (MOVLconst [c])) 12361 // cond: 12362 // result: (MOVLconst [int64(int32(-c))]) 12363 for { 12364 v_0 := v.Args[0] 12365 if v_0.Op != OpAMD64MOVLconst { 12366 break 12367 } 12368 c := v_0.AuxInt 12369 v.reset(OpAMD64MOVLconst) 12370 v.AuxInt = int64(int32(-c)) 12371 return true 12372 } 12373 return false 12374 } 12375 func rewriteValueAMD64_OpAMD64NEGQ(v *Value) bool { 12376 // match: (NEGQ (MOVQconst [c])) 12377 // cond: 12378 // result: (MOVQconst [-c]) 12379 for { 12380 v_0 := v.Args[0] 12381 if v_0.Op != OpAMD64MOVQconst { 12382 break 12383 } 12384 c := v_0.AuxInt 12385 v.reset(OpAMD64MOVQconst) 12386 v.AuxInt = -c 12387 return true 12388 } 12389 // match: (NEGQ (ADDQconst [c] (NEGQ x))) 12390 // cond: c != -(1<<31) 12391 // result: (ADDQconst [-c] x) 12392 for { 12393 v_0 := v.Args[0] 12394 if v_0.Op != OpAMD64ADDQconst { 12395 break 12396 } 12397 c := v_0.AuxInt 12398 v_0_0 := v_0.Args[0] 12399 if v_0_0.Op != OpAMD64NEGQ { 12400 break 12401 } 12402 x := v_0_0.Args[0] 12403 if !(c != -(1 << 31)) { 12404 break 12405 } 12406 v.reset(OpAMD64ADDQconst) 12407 v.AuxInt = -c 12408 v.AddArg(x) 12409 return true 12410 } 12411 return false 12412 } 12413 func rewriteValueAMD64_OpAMD64NOTL(v *Value) bool { 12414 // match: (NOTL (MOVLconst [c])) 12415 // cond: 12416 // result: (MOVLconst [^c]) 12417 for { 12418 v_0 := v.Args[0] 12419 if v_0.Op != OpAMD64MOVLconst { 12420 break 12421 } 12422 c := v_0.AuxInt 12423 v.reset(OpAMD64MOVLconst) 12424 v.AuxInt = ^c 12425 return true 12426 } 12427 return false 12428 } 12429 func rewriteValueAMD64_OpAMD64NOTQ(v *Value) bool { 12430 // match: (NOTQ (MOVQconst [c])) 12431 // cond: 12432 // result: (MOVQconst [^c]) 12433 for { 12434 v_0 := v.Args[0] 12435 if v_0.Op != OpAMD64MOVQconst { 12436 break 12437 } 12438 c := v_0.AuxInt 12439 v.reset(OpAMD64MOVQconst) 12440 v.AuxInt = ^c 12441 return true 12442 } 12443 return false 12444 } 12445 func rewriteValueAMD64_OpAMD64ORL(v *Value) bool { 12446 b := v.Block 12447 _ = b 12448 types := &b.Func.Config.Types 12449 _ = types 12450 // match: (ORL x (MOVLconst [c])) 12451 // cond: 12452 // result: (ORLconst [c] x) 12453 for { 12454 x := v.Args[0] 12455 v_1 := v.Args[1] 12456 if v_1.Op != OpAMD64MOVLconst { 12457 break 12458 } 12459 c := v_1.AuxInt 12460 v.reset(OpAMD64ORLconst) 12461 v.AuxInt = c 12462 v.AddArg(x) 12463 return true 12464 } 12465 // match: (ORL (MOVLconst [c]) x) 12466 // cond: 12467 // result: (ORLconst [c] x) 12468 for { 12469 v_0 := v.Args[0] 12470 if v_0.Op != OpAMD64MOVLconst { 12471 break 12472 } 12473 c := v_0.AuxInt 12474 x := v.Args[1] 12475 v.reset(OpAMD64ORLconst) 12476 v.AuxInt = c 12477 v.AddArg(x) 12478 return true 12479 } 12480 // match: (ORL (SHLLconst x [c]) (SHRLconst x [d])) 12481 // cond: d==32-c 12482 // result: (ROLLconst x [c]) 12483 for { 12484 v_0 := v.Args[0] 12485 if v_0.Op != OpAMD64SHLLconst { 12486 break 12487 } 12488 c := v_0.AuxInt 12489 x := v_0.Args[0] 12490 v_1 := v.Args[1] 12491 if v_1.Op != OpAMD64SHRLconst { 12492 break 12493 } 12494 d := v_1.AuxInt 12495 if x != v_1.Args[0] { 12496 break 12497 } 12498 if !(d == 32-c) { 12499 break 12500 } 12501 v.reset(OpAMD64ROLLconst) 12502 v.AuxInt = c 12503 v.AddArg(x) 12504 return true 12505 } 12506 // match: (ORL (SHRLconst x [d]) (SHLLconst x [c])) 12507 // cond: d==32-c 12508 // result: (ROLLconst x [c]) 12509 for { 12510 v_0 := v.Args[0] 12511 if v_0.Op != OpAMD64SHRLconst { 12512 break 12513 } 12514 d := v_0.AuxInt 12515 x := v_0.Args[0] 12516 v_1 := v.Args[1] 12517 if v_1.Op != OpAMD64SHLLconst { 12518 break 12519 } 12520 c := v_1.AuxInt 12521 if x != v_1.Args[0] { 12522 break 12523 } 12524 if !(d == 32-c) { 12525 break 12526 } 12527 v.reset(OpAMD64ROLLconst) 12528 v.AuxInt = c 12529 v.AddArg(x) 12530 return true 12531 } 12532 // match: (ORL <t> (SHLLconst x [c]) (SHRWconst x [d])) 12533 // cond: d==16-c && c < 16 && t.Size() == 2 12534 // result: (ROLWconst x [c]) 12535 for { 12536 t := v.Type 12537 v_0 := v.Args[0] 12538 if v_0.Op != OpAMD64SHLLconst { 12539 break 12540 } 12541 c := v_0.AuxInt 12542 x := v_0.Args[0] 12543 v_1 := v.Args[1] 12544 if v_1.Op != OpAMD64SHRWconst { 12545 break 12546 } 12547 d := v_1.AuxInt 12548 if x != v_1.Args[0] { 12549 break 12550 } 12551 if !(d == 16-c && c < 16 && t.Size() == 2) { 12552 break 12553 } 12554 v.reset(OpAMD64ROLWconst) 12555 v.AuxInt = c 12556 v.AddArg(x) 12557 return true 12558 } 12559 // match: (ORL <t> (SHRWconst x [d]) (SHLLconst x [c])) 12560 // cond: d==16-c && c < 16 && t.Size() == 2 12561 // result: (ROLWconst x [c]) 12562 for { 12563 t := v.Type 12564 v_0 := v.Args[0] 12565 if v_0.Op != OpAMD64SHRWconst { 12566 break 12567 } 12568 d := v_0.AuxInt 12569 x := v_0.Args[0] 12570 v_1 := v.Args[1] 12571 if v_1.Op != OpAMD64SHLLconst { 12572 break 12573 } 12574 c := v_1.AuxInt 12575 if x != v_1.Args[0] { 12576 break 12577 } 12578 if !(d == 16-c && c < 16 && t.Size() == 2) { 12579 break 12580 } 12581 v.reset(OpAMD64ROLWconst) 12582 v.AuxInt = c 12583 v.AddArg(x) 12584 return true 12585 } 12586 // match: (ORL <t> (SHLLconst x [c]) (SHRBconst x [d])) 12587 // cond: d==8-c && c < 8 && t.Size() == 1 12588 // result: (ROLBconst x [c]) 12589 for { 12590 t := v.Type 12591 v_0 := v.Args[0] 12592 if v_0.Op != OpAMD64SHLLconst { 12593 break 12594 } 12595 c := v_0.AuxInt 12596 x := v_0.Args[0] 12597 v_1 := v.Args[1] 12598 if v_1.Op != OpAMD64SHRBconst { 12599 break 12600 } 12601 d := v_1.AuxInt 12602 if x != v_1.Args[0] { 12603 break 12604 } 12605 if !(d == 8-c && c < 8 && t.Size() == 1) { 12606 break 12607 } 12608 v.reset(OpAMD64ROLBconst) 12609 v.AuxInt = c 12610 v.AddArg(x) 12611 return true 12612 } 12613 // match: (ORL <t> (SHRBconst x [d]) (SHLLconst x [c])) 12614 // cond: d==8-c && c < 8 && t.Size() == 1 12615 // result: (ROLBconst x [c]) 12616 for { 12617 t := v.Type 12618 v_0 := v.Args[0] 12619 if v_0.Op != OpAMD64SHRBconst { 12620 break 12621 } 12622 d := v_0.AuxInt 12623 x := v_0.Args[0] 12624 v_1 := v.Args[1] 12625 if v_1.Op != OpAMD64SHLLconst { 12626 break 12627 } 12628 c := v_1.AuxInt 12629 if x != v_1.Args[0] { 12630 break 12631 } 12632 if !(d == 8-c && c < 8 && t.Size() == 1) { 12633 break 12634 } 12635 v.reset(OpAMD64ROLBconst) 12636 v.AuxInt = c 12637 v.AddArg(x) 12638 return true 12639 } 12640 // match: (ORL x x) 12641 // cond: 12642 // result: x 12643 for { 12644 x := v.Args[0] 12645 if x != v.Args[1] { 12646 break 12647 } 12648 v.reset(OpCopy) 12649 v.Type = x.Type 12650 v.AddArg(x) 12651 return true 12652 } 12653 // match: (ORL x0:(MOVBload [i0] {s} p mem) sh:(SHLLconst [8] x1:(MOVBload [i1] {s} p mem))) 12654 // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 12655 // result: @mergePoint(b,x0,x1) (MOVWload [i0] {s} p mem) 12656 for { 12657 x0 := v.Args[0] 12658 if x0.Op != OpAMD64MOVBload { 12659 break 12660 } 12661 i0 := x0.AuxInt 12662 s := x0.Aux 12663 p := x0.Args[0] 12664 mem := x0.Args[1] 12665 sh := v.Args[1] 12666 if sh.Op != OpAMD64SHLLconst { 12667 break 12668 } 12669 if sh.AuxInt != 8 { 12670 break 12671 } 12672 x1 := sh.Args[0] 12673 if x1.Op != OpAMD64MOVBload { 12674 break 12675 } 12676 i1 := x1.AuxInt 12677 if x1.Aux != s { 12678 break 12679 } 12680 if p != x1.Args[0] { 12681 break 12682 } 12683 if mem != x1.Args[1] { 12684 break 12685 } 12686 if !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 12687 break 12688 } 12689 b = mergePoint(b, x0, x1) 12690 v0 := b.NewValue0(v.Pos, OpAMD64MOVWload, types.UInt16) 12691 v.reset(OpCopy) 12692 v.AddArg(v0) 12693 v0.AuxInt = i0 12694 v0.Aux = s 12695 v0.AddArg(p) 12696 v0.AddArg(mem) 12697 return true 12698 } 12699 // match: (ORL sh:(SHLLconst [8] x1:(MOVBload [i1] {s} p mem)) x0:(MOVBload [i0] {s} p mem)) 12700 // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 12701 // result: @mergePoint(b,x0,x1) (MOVWload [i0] {s} p mem) 12702 for { 12703 sh := v.Args[0] 12704 if sh.Op != OpAMD64SHLLconst { 12705 break 12706 } 12707 if sh.AuxInt != 8 { 12708 break 12709 } 12710 x1 := sh.Args[0] 12711 if x1.Op != OpAMD64MOVBload { 12712 break 12713 } 12714 i1 := x1.AuxInt 12715 s := x1.Aux 12716 p := x1.Args[0] 12717 mem := x1.Args[1] 12718 x0 := v.Args[1] 12719 if x0.Op != OpAMD64MOVBload { 12720 break 12721 } 12722 i0 := x0.AuxInt 12723 if x0.Aux != s { 12724 break 12725 } 12726 if p != x0.Args[0] { 12727 break 12728 } 12729 if mem != x0.Args[1] { 12730 break 12731 } 12732 if !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 12733 break 12734 } 12735 b = mergePoint(b, x0, x1) 12736 v0 := b.NewValue0(v.Pos, OpAMD64MOVWload, types.UInt16) 12737 v.reset(OpCopy) 12738 v.AddArg(v0) 12739 v0.AuxInt = i0 12740 v0.Aux = s 12741 v0.AddArg(p) 12742 v0.AddArg(mem) 12743 return true 12744 } 12745 // match: (ORL x0:(MOVWload [i0] {s} p mem) sh:(SHLLconst [16] x1:(MOVWload [i1] {s} p mem))) 12746 // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 12747 // result: @mergePoint(b,x0,x1) (MOVLload [i0] {s} p mem) 12748 for { 12749 x0 := v.Args[0] 12750 if x0.Op != OpAMD64MOVWload { 12751 break 12752 } 12753 i0 := x0.AuxInt 12754 s := x0.Aux 12755 p := x0.Args[0] 12756 mem := x0.Args[1] 12757 sh := v.Args[1] 12758 if sh.Op != OpAMD64SHLLconst { 12759 break 12760 } 12761 if sh.AuxInt != 16 { 12762 break 12763 } 12764 x1 := sh.Args[0] 12765 if x1.Op != OpAMD64MOVWload { 12766 break 12767 } 12768 i1 := x1.AuxInt 12769 if x1.Aux != s { 12770 break 12771 } 12772 if p != x1.Args[0] { 12773 break 12774 } 12775 if mem != x1.Args[1] { 12776 break 12777 } 12778 if !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 12779 break 12780 } 12781 b = mergePoint(b, x0, x1) 12782 v0 := b.NewValue0(v.Pos, OpAMD64MOVLload, types.UInt32) 12783 v.reset(OpCopy) 12784 v.AddArg(v0) 12785 v0.AuxInt = i0 12786 v0.Aux = s 12787 v0.AddArg(p) 12788 v0.AddArg(mem) 12789 return true 12790 } 12791 // match: (ORL sh:(SHLLconst [16] x1:(MOVWload [i1] {s} p mem)) x0:(MOVWload [i0] {s} p mem)) 12792 // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 12793 // result: @mergePoint(b,x0,x1) (MOVLload [i0] {s} p mem) 12794 for { 12795 sh := v.Args[0] 12796 if sh.Op != OpAMD64SHLLconst { 12797 break 12798 } 12799 if sh.AuxInt != 16 { 12800 break 12801 } 12802 x1 := sh.Args[0] 12803 if x1.Op != OpAMD64MOVWload { 12804 break 12805 } 12806 i1 := x1.AuxInt 12807 s := x1.Aux 12808 p := x1.Args[0] 12809 mem := x1.Args[1] 12810 x0 := v.Args[1] 12811 if x0.Op != OpAMD64MOVWload { 12812 break 12813 } 12814 i0 := x0.AuxInt 12815 if x0.Aux != s { 12816 break 12817 } 12818 if p != x0.Args[0] { 12819 break 12820 } 12821 if mem != x0.Args[1] { 12822 break 12823 } 12824 if !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 12825 break 12826 } 12827 b = mergePoint(b, x0, x1) 12828 v0 := b.NewValue0(v.Pos, OpAMD64MOVLload, types.UInt32) 12829 v.reset(OpCopy) 12830 v.AddArg(v0) 12831 v0.AuxInt = i0 12832 v0.Aux = s 12833 v0.AddArg(p) 12834 v0.AddArg(mem) 12835 return true 12836 } 12837 // match: (ORL s1:(SHLLconst [j1] x1:(MOVBload [i1] {s} p mem)) or:(ORL s0:(SHLLconst [j0] x0:(MOVBload [i0] {s} p mem)) y)) 12838 // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 12839 // result: @mergePoint(b,x0,x1) (ORL <v.Type> (SHLLconst <v.Type> [j0] (MOVWload [i0] {s} p mem)) y) 12840 for { 12841 s1 := v.Args[0] 12842 if s1.Op != OpAMD64SHLLconst { 12843 break 12844 } 12845 j1 := s1.AuxInt 12846 x1 := s1.Args[0] 12847 if x1.Op != OpAMD64MOVBload { 12848 break 12849 } 12850 i1 := x1.AuxInt 12851 s := x1.Aux 12852 p := x1.Args[0] 12853 mem := x1.Args[1] 12854 or := v.Args[1] 12855 if or.Op != OpAMD64ORL { 12856 break 12857 } 12858 s0 := or.Args[0] 12859 if s0.Op != OpAMD64SHLLconst { 12860 break 12861 } 12862 j0 := s0.AuxInt 12863 x0 := s0.Args[0] 12864 if x0.Op != OpAMD64MOVBload { 12865 break 12866 } 12867 i0 := x0.AuxInt 12868 if x0.Aux != s { 12869 break 12870 } 12871 if p != x0.Args[0] { 12872 break 12873 } 12874 if mem != x0.Args[1] { 12875 break 12876 } 12877 y := or.Args[1] 12878 if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 12879 break 12880 } 12881 b = mergePoint(b, x0, x1) 12882 v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type) 12883 v.reset(OpCopy) 12884 v.AddArg(v0) 12885 v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type) 12886 v1.AuxInt = j0 12887 v2 := b.NewValue0(v.Pos, OpAMD64MOVWload, types.UInt16) 12888 v2.AuxInt = i0 12889 v2.Aux = s 12890 v2.AddArg(p) 12891 v2.AddArg(mem) 12892 v1.AddArg(v2) 12893 v0.AddArg(v1) 12894 v0.AddArg(y) 12895 return true 12896 } 12897 // match: (ORL s1:(SHLLconst [j1] x1:(MOVBload [i1] {s} p mem)) or:(ORL y s0:(SHLLconst [j0] x0:(MOVBload [i0] {s} p mem)))) 12898 // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 12899 // result: @mergePoint(b,x0,x1) (ORL <v.Type> (SHLLconst <v.Type> [j0] (MOVWload [i0] {s} p mem)) y) 12900 for { 12901 s1 := v.Args[0] 12902 if s1.Op != OpAMD64SHLLconst { 12903 break 12904 } 12905 j1 := s1.AuxInt 12906 x1 := s1.Args[0] 12907 if x1.Op != OpAMD64MOVBload { 12908 break 12909 } 12910 i1 := x1.AuxInt 12911 s := x1.Aux 12912 p := x1.Args[0] 12913 mem := x1.Args[1] 12914 or := v.Args[1] 12915 if or.Op != OpAMD64ORL { 12916 break 12917 } 12918 y := or.Args[0] 12919 s0 := or.Args[1] 12920 if s0.Op != OpAMD64SHLLconst { 12921 break 12922 } 12923 j0 := s0.AuxInt 12924 x0 := s0.Args[0] 12925 if x0.Op != OpAMD64MOVBload { 12926 break 12927 } 12928 i0 := x0.AuxInt 12929 if x0.Aux != s { 12930 break 12931 } 12932 if p != x0.Args[0] { 12933 break 12934 } 12935 if mem != x0.Args[1] { 12936 break 12937 } 12938 if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 12939 break 12940 } 12941 b = mergePoint(b, x0, x1) 12942 v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type) 12943 v.reset(OpCopy) 12944 v.AddArg(v0) 12945 v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type) 12946 v1.AuxInt = j0 12947 v2 := b.NewValue0(v.Pos, OpAMD64MOVWload, types.UInt16) 12948 v2.AuxInt = i0 12949 v2.Aux = s 12950 v2.AddArg(p) 12951 v2.AddArg(mem) 12952 v1.AddArg(v2) 12953 v0.AddArg(v1) 12954 v0.AddArg(y) 12955 return true 12956 } 12957 // match: (ORL or:(ORL s0:(SHLLconst [j0] x0:(MOVBload [i0] {s} p mem)) y) s1:(SHLLconst [j1] x1:(MOVBload [i1] {s} p mem))) 12958 // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 12959 // result: @mergePoint(b,x0,x1) (ORL <v.Type> (SHLLconst <v.Type> [j0] (MOVWload [i0] {s} p mem)) y) 12960 for { 12961 or := v.Args[0] 12962 if or.Op != OpAMD64ORL { 12963 break 12964 } 12965 s0 := or.Args[0] 12966 if s0.Op != OpAMD64SHLLconst { 12967 break 12968 } 12969 j0 := s0.AuxInt 12970 x0 := s0.Args[0] 12971 if x0.Op != OpAMD64MOVBload { 12972 break 12973 } 12974 i0 := x0.AuxInt 12975 s := x0.Aux 12976 p := x0.Args[0] 12977 mem := x0.Args[1] 12978 y := or.Args[1] 12979 s1 := v.Args[1] 12980 if s1.Op != OpAMD64SHLLconst { 12981 break 12982 } 12983 j1 := s1.AuxInt 12984 x1 := s1.Args[0] 12985 if x1.Op != OpAMD64MOVBload { 12986 break 12987 } 12988 i1 := x1.AuxInt 12989 if x1.Aux != s { 12990 break 12991 } 12992 if p != x1.Args[0] { 12993 break 12994 } 12995 if mem != x1.Args[1] { 12996 break 12997 } 12998 if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 12999 break 13000 } 13001 b = mergePoint(b, x0, x1) 13002 v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type) 13003 v.reset(OpCopy) 13004 v.AddArg(v0) 13005 v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type) 13006 v1.AuxInt = j0 13007 v2 := b.NewValue0(v.Pos, OpAMD64MOVWload, types.UInt16) 13008 v2.AuxInt = i0 13009 v2.Aux = s 13010 v2.AddArg(p) 13011 v2.AddArg(mem) 13012 v1.AddArg(v2) 13013 v0.AddArg(v1) 13014 v0.AddArg(y) 13015 return true 13016 } 13017 // match: (ORL or:(ORL y s0:(SHLLconst [j0] x0:(MOVBload [i0] {s} p mem))) s1:(SHLLconst [j1] x1:(MOVBload [i1] {s} p mem))) 13018 // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 13019 // result: @mergePoint(b,x0,x1) (ORL <v.Type> (SHLLconst <v.Type> [j0] (MOVWload [i0] {s} p mem)) y) 13020 for { 13021 or := v.Args[0] 13022 if or.Op != OpAMD64ORL { 13023 break 13024 } 13025 y := or.Args[0] 13026 s0 := or.Args[1] 13027 if s0.Op != OpAMD64SHLLconst { 13028 break 13029 } 13030 j0 := s0.AuxInt 13031 x0 := s0.Args[0] 13032 if x0.Op != OpAMD64MOVBload { 13033 break 13034 } 13035 i0 := x0.AuxInt 13036 s := x0.Aux 13037 p := x0.Args[0] 13038 mem := x0.Args[1] 13039 s1 := v.Args[1] 13040 if s1.Op != OpAMD64SHLLconst { 13041 break 13042 } 13043 j1 := s1.AuxInt 13044 x1 := s1.Args[0] 13045 if x1.Op != OpAMD64MOVBload { 13046 break 13047 } 13048 i1 := x1.AuxInt 13049 if x1.Aux != s { 13050 break 13051 } 13052 if p != x1.Args[0] { 13053 break 13054 } 13055 if mem != x1.Args[1] { 13056 break 13057 } 13058 if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 13059 break 13060 } 13061 b = mergePoint(b, x0, x1) 13062 v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type) 13063 v.reset(OpCopy) 13064 v.AddArg(v0) 13065 v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type) 13066 v1.AuxInt = j0 13067 v2 := b.NewValue0(v.Pos, OpAMD64MOVWload, types.UInt16) 13068 v2.AuxInt = i0 13069 v2.Aux = s 13070 v2.AddArg(p) 13071 v2.AddArg(mem) 13072 v1.AddArg(v2) 13073 v0.AddArg(v1) 13074 v0.AddArg(y) 13075 return true 13076 } 13077 // match: (ORL x0:(MOVBloadidx1 [i0] {s} p idx mem) sh:(SHLLconst [8] x1:(MOVBloadidx1 [i1] {s} p idx mem))) 13078 // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 13079 // result: @mergePoint(b,x0,x1) (MOVWloadidx1 <v.Type> [i0] {s} p idx mem) 13080 for { 13081 x0 := v.Args[0] 13082 if x0.Op != OpAMD64MOVBloadidx1 { 13083 break 13084 } 13085 i0 := x0.AuxInt 13086 s := x0.Aux 13087 p := x0.Args[0] 13088 idx := x0.Args[1] 13089 mem := x0.Args[2] 13090 sh := v.Args[1] 13091 if sh.Op != OpAMD64SHLLconst { 13092 break 13093 } 13094 if sh.AuxInt != 8 { 13095 break 13096 } 13097 x1 := sh.Args[0] 13098 if x1.Op != OpAMD64MOVBloadidx1 { 13099 break 13100 } 13101 i1 := x1.AuxInt 13102 if x1.Aux != s { 13103 break 13104 } 13105 if p != x1.Args[0] { 13106 break 13107 } 13108 if idx != x1.Args[1] { 13109 break 13110 } 13111 if mem != x1.Args[2] { 13112 break 13113 } 13114 if !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 13115 break 13116 } 13117 b = mergePoint(b, x0, x1) 13118 v0 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, v.Type) 13119 v.reset(OpCopy) 13120 v.AddArg(v0) 13121 v0.AuxInt = i0 13122 v0.Aux = s 13123 v0.AddArg(p) 13124 v0.AddArg(idx) 13125 v0.AddArg(mem) 13126 return true 13127 } 13128 // match: (ORL x0:(MOVBloadidx1 [i0] {s} idx p mem) sh:(SHLLconst [8] x1:(MOVBloadidx1 [i1] {s} p idx mem))) 13129 // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 13130 // result: @mergePoint(b,x0,x1) (MOVWloadidx1 <v.Type> [i0] {s} p idx mem) 13131 for { 13132 x0 := v.Args[0] 13133 if x0.Op != OpAMD64MOVBloadidx1 { 13134 break 13135 } 13136 i0 := x0.AuxInt 13137 s := x0.Aux 13138 idx := x0.Args[0] 13139 p := x0.Args[1] 13140 mem := x0.Args[2] 13141 sh := v.Args[1] 13142 if sh.Op != OpAMD64SHLLconst { 13143 break 13144 } 13145 if sh.AuxInt != 8 { 13146 break 13147 } 13148 x1 := sh.Args[0] 13149 if x1.Op != OpAMD64MOVBloadidx1 { 13150 break 13151 } 13152 i1 := x1.AuxInt 13153 if x1.Aux != s { 13154 break 13155 } 13156 if p != x1.Args[0] { 13157 break 13158 } 13159 if idx != x1.Args[1] { 13160 break 13161 } 13162 if mem != x1.Args[2] { 13163 break 13164 } 13165 if !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 13166 break 13167 } 13168 b = mergePoint(b, x0, x1) 13169 v0 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, v.Type) 13170 v.reset(OpCopy) 13171 v.AddArg(v0) 13172 v0.AuxInt = i0 13173 v0.Aux = s 13174 v0.AddArg(p) 13175 v0.AddArg(idx) 13176 v0.AddArg(mem) 13177 return true 13178 } 13179 // match: (ORL x0:(MOVBloadidx1 [i0] {s} p idx mem) sh:(SHLLconst [8] x1:(MOVBloadidx1 [i1] {s} idx p mem))) 13180 // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 13181 // result: @mergePoint(b,x0,x1) (MOVWloadidx1 <v.Type> [i0] {s} p idx mem) 13182 for { 13183 x0 := v.Args[0] 13184 if x0.Op != OpAMD64MOVBloadidx1 { 13185 break 13186 } 13187 i0 := x0.AuxInt 13188 s := x0.Aux 13189 p := x0.Args[0] 13190 idx := x0.Args[1] 13191 mem := x0.Args[2] 13192 sh := v.Args[1] 13193 if sh.Op != OpAMD64SHLLconst { 13194 break 13195 } 13196 if sh.AuxInt != 8 { 13197 break 13198 } 13199 x1 := sh.Args[0] 13200 if x1.Op != OpAMD64MOVBloadidx1 { 13201 break 13202 } 13203 i1 := x1.AuxInt 13204 if x1.Aux != s { 13205 break 13206 } 13207 if idx != x1.Args[0] { 13208 break 13209 } 13210 if p != x1.Args[1] { 13211 break 13212 } 13213 if mem != x1.Args[2] { 13214 break 13215 } 13216 if !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 13217 break 13218 } 13219 b = mergePoint(b, x0, x1) 13220 v0 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, v.Type) 13221 v.reset(OpCopy) 13222 v.AddArg(v0) 13223 v0.AuxInt = i0 13224 v0.Aux = s 13225 v0.AddArg(p) 13226 v0.AddArg(idx) 13227 v0.AddArg(mem) 13228 return true 13229 } 13230 // match: (ORL x0:(MOVBloadidx1 [i0] {s} idx p mem) sh:(SHLLconst [8] x1:(MOVBloadidx1 [i1] {s} idx p mem))) 13231 // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 13232 // result: @mergePoint(b,x0,x1) (MOVWloadidx1 <v.Type> [i0] {s} p idx mem) 13233 for { 13234 x0 := v.Args[0] 13235 if x0.Op != OpAMD64MOVBloadidx1 { 13236 break 13237 } 13238 i0 := x0.AuxInt 13239 s := x0.Aux 13240 idx := x0.Args[0] 13241 p := x0.Args[1] 13242 mem := x0.Args[2] 13243 sh := v.Args[1] 13244 if sh.Op != OpAMD64SHLLconst { 13245 break 13246 } 13247 if sh.AuxInt != 8 { 13248 break 13249 } 13250 x1 := sh.Args[0] 13251 if x1.Op != OpAMD64MOVBloadidx1 { 13252 break 13253 } 13254 i1 := x1.AuxInt 13255 if x1.Aux != s { 13256 break 13257 } 13258 if idx != x1.Args[0] { 13259 break 13260 } 13261 if p != x1.Args[1] { 13262 break 13263 } 13264 if mem != x1.Args[2] { 13265 break 13266 } 13267 if !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 13268 break 13269 } 13270 b = mergePoint(b, x0, x1) 13271 v0 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, v.Type) 13272 v.reset(OpCopy) 13273 v.AddArg(v0) 13274 v0.AuxInt = i0 13275 v0.Aux = s 13276 v0.AddArg(p) 13277 v0.AddArg(idx) 13278 v0.AddArg(mem) 13279 return true 13280 } 13281 // match: (ORL sh:(SHLLconst [8] x1:(MOVBloadidx1 [i1] {s} p idx mem)) x0:(MOVBloadidx1 [i0] {s} p idx mem)) 13282 // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 13283 // result: @mergePoint(b,x0,x1) (MOVWloadidx1 <v.Type> [i0] {s} p idx mem) 13284 for { 13285 sh := v.Args[0] 13286 if sh.Op != OpAMD64SHLLconst { 13287 break 13288 } 13289 if sh.AuxInt != 8 { 13290 break 13291 } 13292 x1 := sh.Args[0] 13293 if x1.Op != OpAMD64MOVBloadidx1 { 13294 break 13295 } 13296 i1 := x1.AuxInt 13297 s := x1.Aux 13298 p := x1.Args[0] 13299 idx := x1.Args[1] 13300 mem := x1.Args[2] 13301 x0 := v.Args[1] 13302 if x0.Op != OpAMD64MOVBloadidx1 { 13303 break 13304 } 13305 i0 := x0.AuxInt 13306 if x0.Aux != s { 13307 break 13308 } 13309 if p != x0.Args[0] { 13310 break 13311 } 13312 if idx != x0.Args[1] { 13313 break 13314 } 13315 if mem != x0.Args[2] { 13316 break 13317 } 13318 if !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 13319 break 13320 } 13321 b = mergePoint(b, x0, x1) 13322 v0 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, v.Type) 13323 v.reset(OpCopy) 13324 v.AddArg(v0) 13325 v0.AuxInt = i0 13326 v0.Aux = s 13327 v0.AddArg(p) 13328 v0.AddArg(idx) 13329 v0.AddArg(mem) 13330 return true 13331 } 13332 // match: (ORL sh:(SHLLconst [8] x1:(MOVBloadidx1 [i1] {s} idx p mem)) x0:(MOVBloadidx1 [i0] {s} p idx mem)) 13333 // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 13334 // result: @mergePoint(b,x0,x1) (MOVWloadidx1 <v.Type> [i0] {s} p idx mem) 13335 for { 13336 sh := v.Args[0] 13337 if sh.Op != OpAMD64SHLLconst { 13338 break 13339 } 13340 if sh.AuxInt != 8 { 13341 break 13342 } 13343 x1 := sh.Args[0] 13344 if x1.Op != OpAMD64MOVBloadidx1 { 13345 break 13346 } 13347 i1 := x1.AuxInt 13348 s := x1.Aux 13349 idx := x1.Args[0] 13350 p := x1.Args[1] 13351 mem := x1.Args[2] 13352 x0 := v.Args[1] 13353 if x0.Op != OpAMD64MOVBloadidx1 { 13354 break 13355 } 13356 i0 := x0.AuxInt 13357 if x0.Aux != s { 13358 break 13359 } 13360 if p != x0.Args[0] { 13361 break 13362 } 13363 if idx != x0.Args[1] { 13364 break 13365 } 13366 if mem != x0.Args[2] { 13367 break 13368 } 13369 if !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 13370 break 13371 } 13372 b = mergePoint(b, x0, x1) 13373 v0 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, v.Type) 13374 v.reset(OpCopy) 13375 v.AddArg(v0) 13376 v0.AuxInt = i0 13377 v0.Aux = s 13378 v0.AddArg(p) 13379 v0.AddArg(idx) 13380 v0.AddArg(mem) 13381 return true 13382 } 13383 // match: (ORL sh:(SHLLconst [8] x1:(MOVBloadidx1 [i1] {s} p idx mem)) x0:(MOVBloadidx1 [i0] {s} idx p mem)) 13384 // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 13385 // result: @mergePoint(b,x0,x1) (MOVWloadidx1 <v.Type> [i0] {s} p idx mem) 13386 for { 13387 sh := v.Args[0] 13388 if sh.Op != OpAMD64SHLLconst { 13389 break 13390 } 13391 if sh.AuxInt != 8 { 13392 break 13393 } 13394 x1 := sh.Args[0] 13395 if x1.Op != OpAMD64MOVBloadidx1 { 13396 break 13397 } 13398 i1 := x1.AuxInt 13399 s := x1.Aux 13400 p := x1.Args[0] 13401 idx := x1.Args[1] 13402 mem := x1.Args[2] 13403 x0 := v.Args[1] 13404 if x0.Op != OpAMD64MOVBloadidx1 { 13405 break 13406 } 13407 i0 := x0.AuxInt 13408 if x0.Aux != s { 13409 break 13410 } 13411 if idx != x0.Args[0] { 13412 break 13413 } 13414 if p != x0.Args[1] { 13415 break 13416 } 13417 if mem != x0.Args[2] { 13418 break 13419 } 13420 if !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 13421 break 13422 } 13423 b = mergePoint(b, x0, x1) 13424 v0 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, v.Type) 13425 v.reset(OpCopy) 13426 v.AddArg(v0) 13427 v0.AuxInt = i0 13428 v0.Aux = s 13429 v0.AddArg(p) 13430 v0.AddArg(idx) 13431 v0.AddArg(mem) 13432 return true 13433 } 13434 // match: (ORL sh:(SHLLconst [8] x1:(MOVBloadidx1 [i1] {s} idx p mem)) x0:(MOVBloadidx1 [i0] {s} idx p mem)) 13435 // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 13436 // result: @mergePoint(b,x0,x1) (MOVWloadidx1 <v.Type> [i0] {s} p idx mem) 13437 for { 13438 sh := v.Args[0] 13439 if sh.Op != OpAMD64SHLLconst { 13440 break 13441 } 13442 if sh.AuxInt != 8 { 13443 break 13444 } 13445 x1 := sh.Args[0] 13446 if x1.Op != OpAMD64MOVBloadidx1 { 13447 break 13448 } 13449 i1 := x1.AuxInt 13450 s := x1.Aux 13451 idx := x1.Args[0] 13452 p := x1.Args[1] 13453 mem := x1.Args[2] 13454 x0 := v.Args[1] 13455 if x0.Op != OpAMD64MOVBloadidx1 { 13456 break 13457 } 13458 i0 := x0.AuxInt 13459 if x0.Aux != s { 13460 break 13461 } 13462 if idx != x0.Args[0] { 13463 break 13464 } 13465 if p != x0.Args[1] { 13466 break 13467 } 13468 if mem != x0.Args[2] { 13469 break 13470 } 13471 if !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 13472 break 13473 } 13474 b = mergePoint(b, x0, x1) 13475 v0 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, v.Type) 13476 v.reset(OpCopy) 13477 v.AddArg(v0) 13478 v0.AuxInt = i0 13479 v0.Aux = s 13480 v0.AddArg(p) 13481 v0.AddArg(idx) 13482 v0.AddArg(mem) 13483 return true 13484 } 13485 // match: (ORL x0:(MOVWloadidx1 [i0] {s} p idx mem) sh:(SHLLconst [16] x1:(MOVWloadidx1 [i1] {s} p idx mem))) 13486 // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 13487 // result: @mergePoint(b,x0,x1) (MOVLloadidx1 [i0] {s} p idx mem) 13488 for { 13489 x0 := v.Args[0] 13490 if x0.Op != OpAMD64MOVWloadidx1 { 13491 break 13492 } 13493 i0 := x0.AuxInt 13494 s := x0.Aux 13495 p := x0.Args[0] 13496 idx := x0.Args[1] 13497 mem := x0.Args[2] 13498 sh := v.Args[1] 13499 if sh.Op != OpAMD64SHLLconst { 13500 break 13501 } 13502 if sh.AuxInt != 16 { 13503 break 13504 } 13505 x1 := sh.Args[0] 13506 if x1.Op != OpAMD64MOVWloadidx1 { 13507 break 13508 } 13509 i1 := x1.AuxInt 13510 if x1.Aux != s { 13511 break 13512 } 13513 if p != x1.Args[0] { 13514 break 13515 } 13516 if idx != x1.Args[1] { 13517 break 13518 } 13519 if mem != x1.Args[2] { 13520 break 13521 } 13522 if !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 13523 break 13524 } 13525 b = mergePoint(b, x0, x1) 13526 v0 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, types.UInt32) 13527 v.reset(OpCopy) 13528 v.AddArg(v0) 13529 v0.AuxInt = i0 13530 v0.Aux = s 13531 v0.AddArg(p) 13532 v0.AddArg(idx) 13533 v0.AddArg(mem) 13534 return true 13535 } 13536 // match: (ORL x0:(MOVWloadidx1 [i0] {s} idx p mem) sh:(SHLLconst [16] x1:(MOVWloadidx1 [i1] {s} p idx mem))) 13537 // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 13538 // result: @mergePoint(b,x0,x1) (MOVLloadidx1 [i0] {s} p idx mem) 13539 for { 13540 x0 := v.Args[0] 13541 if x0.Op != OpAMD64MOVWloadidx1 { 13542 break 13543 } 13544 i0 := x0.AuxInt 13545 s := x0.Aux 13546 idx := x0.Args[0] 13547 p := x0.Args[1] 13548 mem := x0.Args[2] 13549 sh := v.Args[1] 13550 if sh.Op != OpAMD64SHLLconst { 13551 break 13552 } 13553 if sh.AuxInt != 16 { 13554 break 13555 } 13556 x1 := sh.Args[0] 13557 if x1.Op != OpAMD64MOVWloadidx1 { 13558 break 13559 } 13560 i1 := x1.AuxInt 13561 if x1.Aux != s { 13562 break 13563 } 13564 if p != x1.Args[0] { 13565 break 13566 } 13567 if idx != x1.Args[1] { 13568 break 13569 } 13570 if mem != x1.Args[2] { 13571 break 13572 } 13573 if !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 13574 break 13575 } 13576 b = mergePoint(b, x0, x1) 13577 v0 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, types.UInt32) 13578 v.reset(OpCopy) 13579 v.AddArg(v0) 13580 v0.AuxInt = i0 13581 v0.Aux = s 13582 v0.AddArg(p) 13583 v0.AddArg(idx) 13584 v0.AddArg(mem) 13585 return true 13586 } 13587 // match: (ORL x0:(MOVWloadidx1 [i0] {s} p idx mem) sh:(SHLLconst [16] x1:(MOVWloadidx1 [i1] {s} idx p mem))) 13588 // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 13589 // result: @mergePoint(b,x0,x1) (MOVLloadidx1 [i0] {s} p idx mem) 13590 for { 13591 x0 := v.Args[0] 13592 if x0.Op != OpAMD64MOVWloadidx1 { 13593 break 13594 } 13595 i0 := x0.AuxInt 13596 s := x0.Aux 13597 p := x0.Args[0] 13598 idx := x0.Args[1] 13599 mem := x0.Args[2] 13600 sh := v.Args[1] 13601 if sh.Op != OpAMD64SHLLconst { 13602 break 13603 } 13604 if sh.AuxInt != 16 { 13605 break 13606 } 13607 x1 := sh.Args[0] 13608 if x1.Op != OpAMD64MOVWloadidx1 { 13609 break 13610 } 13611 i1 := x1.AuxInt 13612 if x1.Aux != s { 13613 break 13614 } 13615 if idx != x1.Args[0] { 13616 break 13617 } 13618 if p != x1.Args[1] { 13619 break 13620 } 13621 if mem != x1.Args[2] { 13622 break 13623 } 13624 if !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 13625 break 13626 } 13627 b = mergePoint(b, x0, x1) 13628 v0 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, types.UInt32) 13629 v.reset(OpCopy) 13630 v.AddArg(v0) 13631 v0.AuxInt = i0 13632 v0.Aux = s 13633 v0.AddArg(p) 13634 v0.AddArg(idx) 13635 v0.AddArg(mem) 13636 return true 13637 } 13638 // match: (ORL x0:(MOVWloadidx1 [i0] {s} idx p mem) sh:(SHLLconst [16] x1:(MOVWloadidx1 [i1] {s} idx p mem))) 13639 // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 13640 // result: @mergePoint(b,x0,x1) (MOVLloadidx1 [i0] {s} p idx mem) 13641 for { 13642 x0 := v.Args[0] 13643 if x0.Op != OpAMD64MOVWloadidx1 { 13644 break 13645 } 13646 i0 := x0.AuxInt 13647 s := x0.Aux 13648 idx := x0.Args[0] 13649 p := x0.Args[1] 13650 mem := x0.Args[2] 13651 sh := v.Args[1] 13652 if sh.Op != OpAMD64SHLLconst { 13653 break 13654 } 13655 if sh.AuxInt != 16 { 13656 break 13657 } 13658 x1 := sh.Args[0] 13659 if x1.Op != OpAMD64MOVWloadidx1 { 13660 break 13661 } 13662 i1 := x1.AuxInt 13663 if x1.Aux != s { 13664 break 13665 } 13666 if idx != x1.Args[0] { 13667 break 13668 } 13669 if p != x1.Args[1] { 13670 break 13671 } 13672 if mem != x1.Args[2] { 13673 break 13674 } 13675 if !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 13676 break 13677 } 13678 b = mergePoint(b, x0, x1) 13679 v0 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, types.UInt32) 13680 v.reset(OpCopy) 13681 v.AddArg(v0) 13682 v0.AuxInt = i0 13683 v0.Aux = s 13684 v0.AddArg(p) 13685 v0.AddArg(idx) 13686 v0.AddArg(mem) 13687 return true 13688 } 13689 // match: (ORL sh:(SHLLconst [16] x1:(MOVWloadidx1 [i1] {s} p idx mem)) x0:(MOVWloadidx1 [i0] {s} p idx mem)) 13690 // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 13691 // result: @mergePoint(b,x0,x1) (MOVLloadidx1 [i0] {s} p idx mem) 13692 for { 13693 sh := v.Args[0] 13694 if sh.Op != OpAMD64SHLLconst { 13695 break 13696 } 13697 if sh.AuxInt != 16 { 13698 break 13699 } 13700 x1 := sh.Args[0] 13701 if x1.Op != OpAMD64MOVWloadidx1 { 13702 break 13703 } 13704 i1 := x1.AuxInt 13705 s := x1.Aux 13706 p := x1.Args[0] 13707 idx := x1.Args[1] 13708 mem := x1.Args[2] 13709 x0 := v.Args[1] 13710 if x0.Op != OpAMD64MOVWloadidx1 { 13711 break 13712 } 13713 i0 := x0.AuxInt 13714 if x0.Aux != s { 13715 break 13716 } 13717 if p != x0.Args[0] { 13718 break 13719 } 13720 if idx != x0.Args[1] { 13721 break 13722 } 13723 if mem != x0.Args[2] { 13724 break 13725 } 13726 if !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 13727 break 13728 } 13729 b = mergePoint(b, x0, x1) 13730 v0 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, types.UInt32) 13731 v.reset(OpCopy) 13732 v.AddArg(v0) 13733 v0.AuxInt = i0 13734 v0.Aux = s 13735 v0.AddArg(p) 13736 v0.AddArg(idx) 13737 v0.AddArg(mem) 13738 return true 13739 } 13740 // match: (ORL sh:(SHLLconst [16] x1:(MOVWloadidx1 [i1] {s} idx p mem)) x0:(MOVWloadidx1 [i0] {s} p idx mem)) 13741 // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 13742 // result: @mergePoint(b,x0,x1) (MOVLloadidx1 [i0] {s} p idx mem) 13743 for { 13744 sh := v.Args[0] 13745 if sh.Op != OpAMD64SHLLconst { 13746 break 13747 } 13748 if sh.AuxInt != 16 { 13749 break 13750 } 13751 x1 := sh.Args[0] 13752 if x1.Op != OpAMD64MOVWloadidx1 { 13753 break 13754 } 13755 i1 := x1.AuxInt 13756 s := x1.Aux 13757 idx := x1.Args[0] 13758 p := x1.Args[1] 13759 mem := x1.Args[2] 13760 x0 := v.Args[1] 13761 if x0.Op != OpAMD64MOVWloadidx1 { 13762 break 13763 } 13764 i0 := x0.AuxInt 13765 if x0.Aux != s { 13766 break 13767 } 13768 if p != x0.Args[0] { 13769 break 13770 } 13771 if idx != x0.Args[1] { 13772 break 13773 } 13774 if mem != x0.Args[2] { 13775 break 13776 } 13777 if !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 13778 break 13779 } 13780 b = mergePoint(b, x0, x1) 13781 v0 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, types.UInt32) 13782 v.reset(OpCopy) 13783 v.AddArg(v0) 13784 v0.AuxInt = i0 13785 v0.Aux = s 13786 v0.AddArg(p) 13787 v0.AddArg(idx) 13788 v0.AddArg(mem) 13789 return true 13790 } 13791 // match: (ORL sh:(SHLLconst [16] x1:(MOVWloadidx1 [i1] {s} p idx mem)) x0:(MOVWloadidx1 [i0] {s} idx p mem)) 13792 // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 13793 // result: @mergePoint(b,x0,x1) (MOVLloadidx1 [i0] {s} p idx mem) 13794 for { 13795 sh := v.Args[0] 13796 if sh.Op != OpAMD64SHLLconst { 13797 break 13798 } 13799 if sh.AuxInt != 16 { 13800 break 13801 } 13802 x1 := sh.Args[0] 13803 if x1.Op != OpAMD64MOVWloadidx1 { 13804 break 13805 } 13806 i1 := x1.AuxInt 13807 s := x1.Aux 13808 p := x1.Args[0] 13809 idx := x1.Args[1] 13810 mem := x1.Args[2] 13811 x0 := v.Args[1] 13812 if x0.Op != OpAMD64MOVWloadidx1 { 13813 break 13814 } 13815 i0 := x0.AuxInt 13816 if x0.Aux != s { 13817 break 13818 } 13819 if idx != x0.Args[0] { 13820 break 13821 } 13822 if p != x0.Args[1] { 13823 break 13824 } 13825 if mem != x0.Args[2] { 13826 break 13827 } 13828 if !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 13829 break 13830 } 13831 b = mergePoint(b, x0, x1) 13832 v0 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, types.UInt32) 13833 v.reset(OpCopy) 13834 v.AddArg(v0) 13835 v0.AuxInt = i0 13836 v0.Aux = s 13837 v0.AddArg(p) 13838 v0.AddArg(idx) 13839 v0.AddArg(mem) 13840 return true 13841 } 13842 // match: (ORL sh:(SHLLconst [16] x1:(MOVWloadidx1 [i1] {s} idx p mem)) x0:(MOVWloadidx1 [i0] {s} idx p mem)) 13843 // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 13844 // result: @mergePoint(b,x0,x1) (MOVLloadidx1 [i0] {s} p idx mem) 13845 for { 13846 sh := v.Args[0] 13847 if sh.Op != OpAMD64SHLLconst { 13848 break 13849 } 13850 if sh.AuxInt != 16 { 13851 break 13852 } 13853 x1 := sh.Args[0] 13854 if x1.Op != OpAMD64MOVWloadidx1 { 13855 break 13856 } 13857 i1 := x1.AuxInt 13858 s := x1.Aux 13859 idx := x1.Args[0] 13860 p := x1.Args[1] 13861 mem := x1.Args[2] 13862 x0 := v.Args[1] 13863 if x0.Op != OpAMD64MOVWloadidx1 { 13864 break 13865 } 13866 i0 := x0.AuxInt 13867 if x0.Aux != s { 13868 break 13869 } 13870 if idx != x0.Args[0] { 13871 break 13872 } 13873 if p != x0.Args[1] { 13874 break 13875 } 13876 if mem != x0.Args[2] { 13877 break 13878 } 13879 if !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 13880 break 13881 } 13882 b = mergePoint(b, x0, x1) 13883 v0 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, types.UInt32) 13884 v.reset(OpCopy) 13885 v.AddArg(v0) 13886 v0.AuxInt = i0 13887 v0.Aux = s 13888 v0.AddArg(p) 13889 v0.AddArg(idx) 13890 v0.AddArg(mem) 13891 return true 13892 } 13893 // match: (ORL s1:(SHLLconst [j1] x1:(MOVBloadidx1 [i1] {s} p idx mem)) or:(ORL s0:(SHLLconst [j0] x0:(MOVBloadidx1 [i0] {s} p idx mem)) y)) 13894 // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 13895 // result: @mergePoint(b,x0,x1) (ORL <v.Type> (SHLLconst <v.Type> [j0] (MOVWloadidx1 [i0] {s} p idx mem)) y) 13896 for { 13897 s1 := v.Args[0] 13898 if s1.Op != OpAMD64SHLLconst { 13899 break 13900 } 13901 j1 := s1.AuxInt 13902 x1 := s1.Args[0] 13903 if x1.Op != OpAMD64MOVBloadidx1 { 13904 break 13905 } 13906 i1 := x1.AuxInt 13907 s := x1.Aux 13908 p := x1.Args[0] 13909 idx := x1.Args[1] 13910 mem := x1.Args[2] 13911 or := v.Args[1] 13912 if or.Op != OpAMD64ORL { 13913 break 13914 } 13915 s0 := or.Args[0] 13916 if s0.Op != OpAMD64SHLLconst { 13917 break 13918 } 13919 j0 := s0.AuxInt 13920 x0 := s0.Args[0] 13921 if x0.Op != OpAMD64MOVBloadidx1 { 13922 break 13923 } 13924 i0 := x0.AuxInt 13925 if x0.Aux != s { 13926 break 13927 } 13928 if p != x0.Args[0] { 13929 break 13930 } 13931 if idx != x0.Args[1] { 13932 break 13933 } 13934 if mem != x0.Args[2] { 13935 break 13936 } 13937 y := or.Args[1] 13938 if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 13939 break 13940 } 13941 b = mergePoint(b, x0, x1) 13942 v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type) 13943 v.reset(OpCopy) 13944 v.AddArg(v0) 13945 v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type) 13946 v1.AuxInt = j0 13947 v2 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, types.UInt16) 13948 v2.AuxInt = i0 13949 v2.Aux = s 13950 v2.AddArg(p) 13951 v2.AddArg(idx) 13952 v2.AddArg(mem) 13953 v1.AddArg(v2) 13954 v0.AddArg(v1) 13955 v0.AddArg(y) 13956 return true 13957 } 13958 // match: (ORL s1:(SHLLconst [j1] x1:(MOVBloadidx1 [i1] {s} idx p mem)) or:(ORL s0:(SHLLconst [j0] x0:(MOVBloadidx1 [i0] {s} p idx mem)) y)) 13959 // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 13960 // result: @mergePoint(b,x0,x1) (ORL <v.Type> (SHLLconst <v.Type> [j0] (MOVWloadidx1 [i0] {s} p idx mem)) y) 13961 for { 13962 s1 := v.Args[0] 13963 if s1.Op != OpAMD64SHLLconst { 13964 break 13965 } 13966 j1 := s1.AuxInt 13967 x1 := s1.Args[0] 13968 if x1.Op != OpAMD64MOVBloadidx1 { 13969 break 13970 } 13971 i1 := x1.AuxInt 13972 s := x1.Aux 13973 idx := x1.Args[0] 13974 p := x1.Args[1] 13975 mem := x1.Args[2] 13976 or := v.Args[1] 13977 if or.Op != OpAMD64ORL { 13978 break 13979 } 13980 s0 := or.Args[0] 13981 if s0.Op != OpAMD64SHLLconst { 13982 break 13983 } 13984 j0 := s0.AuxInt 13985 x0 := s0.Args[0] 13986 if x0.Op != OpAMD64MOVBloadidx1 { 13987 break 13988 } 13989 i0 := x0.AuxInt 13990 if x0.Aux != s { 13991 break 13992 } 13993 if p != x0.Args[0] { 13994 break 13995 } 13996 if idx != x0.Args[1] { 13997 break 13998 } 13999 if mem != x0.Args[2] { 14000 break 14001 } 14002 y := or.Args[1] 14003 if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 14004 break 14005 } 14006 b = mergePoint(b, x0, x1) 14007 v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type) 14008 v.reset(OpCopy) 14009 v.AddArg(v0) 14010 v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type) 14011 v1.AuxInt = j0 14012 v2 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, types.UInt16) 14013 v2.AuxInt = i0 14014 v2.Aux = s 14015 v2.AddArg(p) 14016 v2.AddArg(idx) 14017 v2.AddArg(mem) 14018 v1.AddArg(v2) 14019 v0.AddArg(v1) 14020 v0.AddArg(y) 14021 return true 14022 } 14023 // match: (ORL s1:(SHLLconst [j1] x1:(MOVBloadidx1 [i1] {s} p idx mem)) or:(ORL s0:(SHLLconst [j0] x0:(MOVBloadidx1 [i0] {s} idx p mem)) y)) 14024 // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 14025 // result: @mergePoint(b,x0,x1) (ORL <v.Type> (SHLLconst <v.Type> [j0] (MOVWloadidx1 [i0] {s} p idx mem)) y) 14026 for { 14027 s1 := v.Args[0] 14028 if s1.Op != OpAMD64SHLLconst { 14029 break 14030 } 14031 j1 := s1.AuxInt 14032 x1 := s1.Args[0] 14033 if x1.Op != OpAMD64MOVBloadidx1 { 14034 break 14035 } 14036 i1 := x1.AuxInt 14037 s := x1.Aux 14038 p := x1.Args[0] 14039 idx := x1.Args[1] 14040 mem := x1.Args[2] 14041 or := v.Args[1] 14042 if or.Op != OpAMD64ORL { 14043 break 14044 } 14045 s0 := or.Args[0] 14046 if s0.Op != OpAMD64SHLLconst { 14047 break 14048 } 14049 j0 := s0.AuxInt 14050 x0 := s0.Args[0] 14051 if x0.Op != OpAMD64MOVBloadidx1 { 14052 break 14053 } 14054 i0 := x0.AuxInt 14055 if x0.Aux != s { 14056 break 14057 } 14058 if idx != x0.Args[0] { 14059 break 14060 } 14061 if p != x0.Args[1] { 14062 break 14063 } 14064 if mem != x0.Args[2] { 14065 break 14066 } 14067 y := or.Args[1] 14068 if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 14069 break 14070 } 14071 b = mergePoint(b, x0, x1) 14072 v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type) 14073 v.reset(OpCopy) 14074 v.AddArg(v0) 14075 v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type) 14076 v1.AuxInt = j0 14077 v2 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, types.UInt16) 14078 v2.AuxInt = i0 14079 v2.Aux = s 14080 v2.AddArg(p) 14081 v2.AddArg(idx) 14082 v2.AddArg(mem) 14083 v1.AddArg(v2) 14084 v0.AddArg(v1) 14085 v0.AddArg(y) 14086 return true 14087 } 14088 // match: (ORL s1:(SHLLconst [j1] x1:(MOVBloadidx1 [i1] {s} idx p mem)) or:(ORL s0:(SHLLconst [j0] x0:(MOVBloadidx1 [i0] {s} idx p mem)) y)) 14089 // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 14090 // result: @mergePoint(b,x0,x1) (ORL <v.Type> (SHLLconst <v.Type> [j0] (MOVWloadidx1 [i0] {s} p idx mem)) y) 14091 for { 14092 s1 := v.Args[0] 14093 if s1.Op != OpAMD64SHLLconst { 14094 break 14095 } 14096 j1 := s1.AuxInt 14097 x1 := s1.Args[0] 14098 if x1.Op != OpAMD64MOVBloadidx1 { 14099 break 14100 } 14101 i1 := x1.AuxInt 14102 s := x1.Aux 14103 idx := x1.Args[0] 14104 p := x1.Args[1] 14105 mem := x1.Args[2] 14106 or := v.Args[1] 14107 if or.Op != OpAMD64ORL { 14108 break 14109 } 14110 s0 := or.Args[0] 14111 if s0.Op != OpAMD64SHLLconst { 14112 break 14113 } 14114 j0 := s0.AuxInt 14115 x0 := s0.Args[0] 14116 if x0.Op != OpAMD64MOVBloadidx1 { 14117 break 14118 } 14119 i0 := x0.AuxInt 14120 if x0.Aux != s { 14121 break 14122 } 14123 if idx != x0.Args[0] { 14124 break 14125 } 14126 if p != x0.Args[1] { 14127 break 14128 } 14129 if mem != x0.Args[2] { 14130 break 14131 } 14132 y := or.Args[1] 14133 if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 14134 break 14135 } 14136 b = mergePoint(b, x0, x1) 14137 v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type) 14138 v.reset(OpCopy) 14139 v.AddArg(v0) 14140 v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type) 14141 v1.AuxInt = j0 14142 v2 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, types.UInt16) 14143 v2.AuxInt = i0 14144 v2.Aux = s 14145 v2.AddArg(p) 14146 v2.AddArg(idx) 14147 v2.AddArg(mem) 14148 v1.AddArg(v2) 14149 v0.AddArg(v1) 14150 v0.AddArg(y) 14151 return true 14152 } 14153 // match: (ORL s1:(SHLLconst [j1] x1:(MOVBloadidx1 [i1] {s} p idx mem)) or:(ORL y s0:(SHLLconst [j0] x0:(MOVBloadidx1 [i0] {s} p idx mem)))) 14154 // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 14155 // result: @mergePoint(b,x0,x1) (ORL <v.Type> (SHLLconst <v.Type> [j0] (MOVWloadidx1 [i0] {s} p idx mem)) y) 14156 for { 14157 s1 := v.Args[0] 14158 if s1.Op != OpAMD64SHLLconst { 14159 break 14160 } 14161 j1 := s1.AuxInt 14162 x1 := s1.Args[0] 14163 if x1.Op != OpAMD64MOVBloadidx1 { 14164 break 14165 } 14166 i1 := x1.AuxInt 14167 s := x1.Aux 14168 p := x1.Args[0] 14169 idx := x1.Args[1] 14170 mem := x1.Args[2] 14171 or := v.Args[1] 14172 if or.Op != OpAMD64ORL { 14173 break 14174 } 14175 y := or.Args[0] 14176 s0 := or.Args[1] 14177 if s0.Op != OpAMD64SHLLconst { 14178 break 14179 } 14180 j0 := s0.AuxInt 14181 x0 := s0.Args[0] 14182 if x0.Op != OpAMD64MOVBloadidx1 { 14183 break 14184 } 14185 i0 := x0.AuxInt 14186 if x0.Aux != s { 14187 break 14188 } 14189 if p != x0.Args[0] { 14190 break 14191 } 14192 if idx != x0.Args[1] { 14193 break 14194 } 14195 if mem != x0.Args[2] { 14196 break 14197 } 14198 if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 14199 break 14200 } 14201 b = mergePoint(b, x0, x1) 14202 v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type) 14203 v.reset(OpCopy) 14204 v.AddArg(v0) 14205 v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type) 14206 v1.AuxInt = j0 14207 v2 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, types.UInt16) 14208 v2.AuxInt = i0 14209 v2.Aux = s 14210 v2.AddArg(p) 14211 v2.AddArg(idx) 14212 v2.AddArg(mem) 14213 v1.AddArg(v2) 14214 v0.AddArg(v1) 14215 v0.AddArg(y) 14216 return true 14217 } 14218 // match: (ORL s1:(SHLLconst [j1] x1:(MOVBloadidx1 [i1] {s} idx p mem)) or:(ORL y s0:(SHLLconst [j0] x0:(MOVBloadidx1 [i0] {s} p idx mem)))) 14219 // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 14220 // result: @mergePoint(b,x0,x1) (ORL <v.Type> (SHLLconst <v.Type> [j0] (MOVWloadidx1 [i0] {s} p idx mem)) y) 14221 for { 14222 s1 := v.Args[0] 14223 if s1.Op != OpAMD64SHLLconst { 14224 break 14225 } 14226 j1 := s1.AuxInt 14227 x1 := s1.Args[0] 14228 if x1.Op != OpAMD64MOVBloadidx1 { 14229 break 14230 } 14231 i1 := x1.AuxInt 14232 s := x1.Aux 14233 idx := x1.Args[0] 14234 p := x1.Args[1] 14235 mem := x1.Args[2] 14236 or := v.Args[1] 14237 if or.Op != OpAMD64ORL { 14238 break 14239 } 14240 y := or.Args[0] 14241 s0 := or.Args[1] 14242 if s0.Op != OpAMD64SHLLconst { 14243 break 14244 } 14245 j0 := s0.AuxInt 14246 x0 := s0.Args[0] 14247 if x0.Op != OpAMD64MOVBloadidx1 { 14248 break 14249 } 14250 i0 := x0.AuxInt 14251 if x0.Aux != s { 14252 break 14253 } 14254 if p != x0.Args[0] { 14255 break 14256 } 14257 if idx != x0.Args[1] { 14258 break 14259 } 14260 if mem != x0.Args[2] { 14261 break 14262 } 14263 if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 14264 break 14265 } 14266 b = mergePoint(b, x0, x1) 14267 v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type) 14268 v.reset(OpCopy) 14269 v.AddArg(v0) 14270 v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type) 14271 v1.AuxInt = j0 14272 v2 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, types.UInt16) 14273 v2.AuxInt = i0 14274 v2.Aux = s 14275 v2.AddArg(p) 14276 v2.AddArg(idx) 14277 v2.AddArg(mem) 14278 v1.AddArg(v2) 14279 v0.AddArg(v1) 14280 v0.AddArg(y) 14281 return true 14282 } 14283 // match: (ORL s1:(SHLLconst [j1] x1:(MOVBloadidx1 [i1] {s} p idx mem)) or:(ORL y s0:(SHLLconst [j0] x0:(MOVBloadidx1 [i0] {s} idx p mem)))) 14284 // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 14285 // result: @mergePoint(b,x0,x1) (ORL <v.Type> (SHLLconst <v.Type> [j0] (MOVWloadidx1 [i0] {s} p idx mem)) y) 14286 for { 14287 s1 := v.Args[0] 14288 if s1.Op != OpAMD64SHLLconst { 14289 break 14290 } 14291 j1 := s1.AuxInt 14292 x1 := s1.Args[0] 14293 if x1.Op != OpAMD64MOVBloadidx1 { 14294 break 14295 } 14296 i1 := x1.AuxInt 14297 s := x1.Aux 14298 p := x1.Args[0] 14299 idx := x1.Args[1] 14300 mem := x1.Args[2] 14301 or := v.Args[1] 14302 if or.Op != OpAMD64ORL { 14303 break 14304 } 14305 y := or.Args[0] 14306 s0 := or.Args[1] 14307 if s0.Op != OpAMD64SHLLconst { 14308 break 14309 } 14310 j0 := s0.AuxInt 14311 x0 := s0.Args[0] 14312 if x0.Op != OpAMD64MOVBloadidx1 { 14313 break 14314 } 14315 i0 := x0.AuxInt 14316 if x0.Aux != s { 14317 break 14318 } 14319 if idx != x0.Args[0] { 14320 break 14321 } 14322 if p != x0.Args[1] { 14323 break 14324 } 14325 if mem != x0.Args[2] { 14326 break 14327 } 14328 if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 14329 break 14330 } 14331 b = mergePoint(b, x0, x1) 14332 v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type) 14333 v.reset(OpCopy) 14334 v.AddArg(v0) 14335 v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type) 14336 v1.AuxInt = j0 14337 v2 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, types.UInt16) 14338 v2.AuxInt = i0 14339 v2.Aux = s 14340 v2.AddArg(p) 14341 v2.AddArg(idx) 14342 v2.AddArg(mem) 14343 v1.AddArg(v2) 14344 v0.AddArg(v1) 14345 v0.AddArg(y) 14346 return true 14347 } 14348 // match: (ORL s1:(SHLLconst [j1] x1:(MOVBloadidx1 [i1] {s} idx p mem)) or:(ORL y s0:(SHLLconst [j0] x0:(MOVBloadidx1 [i0] {s} idx p mem)))) 14349 // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 14350 // result: @mergePoint(b,x0,x1) (ORL <v.Type> (SHLLconst <v.Type> [j0] (MOVWloadidx1 [i0] {s} p idx mem)) y) 14351 for { 14352 s1 := v.Args[0] 14353 if s1.Op != OpAMD64SHLLconst { 14354 break 14355 } 14356 j1 := s1.AuxInt 14357 x1 := s1.Args[0] 14358 if x1.Op != OpAMD64MOVBloadidx1 { 14359 break 14360 } 14361 i1 := x1.AuxInt 14362 s := x1.Aux 14363 idx := x1.Args[0] 14364 p := x1.Args[1] 14365 mem := x1.Args[2] 14366 or := v.Args[1] 14367 if or.Op != OpAMD64ORL { 14368 break 14369 } 14370 y := or.Args[0] 14371 s0 := or.Args[1] 14372 if s0.Op != OpAMD64SHLLconst { 14373 break 14374 } 14375 j0 := s0.AuxInt 14376 x0 := s0.Args[0] 14377 if x0.Op != OpAMD64MOVBloadidx1 { 14378 break 14379 } 14380 i0 := x0.AuxInt 14381 if x0.Aux != s { 14382 break 14383 } 14384 if idx != x0.Args[0] { 14385 break 14386 } 14387 if p != x0.Args[1] { 14388 break 14389 } 14390 if mem != x0.Args[2] { 14391 break 14392 } 14393 if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 14394 break 14395 } 14396 b = mergePoint(b, x0, x1) 14397 v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type) 14398 v.reset(OpCopy) 14399 v.AddArg(v0) 14400 v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type) 14401 v1.AuxInt = j0 14402 v2 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, types.UInt16) 14403 v2.AuxInt = i0 14404 v2.Aux = s 14405 v2.AddArg(p) 14406 v2.AddArg(idx) 14407 v2.AddArg(mem) 14408 v1.AddArg(v2) 14409 v0.AddArg(v1) 14410 v0.AddArg(y) 14411 return true 14412 } 14413 // match: (ORL or:(ORL s0:(SHLLconst [j0] x0:(MOVBloadidx1 [i0] {s} p idx mem)) y) s1:(SHLLconst [j1] x1:(MOVBloadidx1 [i1] {s} p idx mem))) 14414 // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 14415 // result: @mergePoint(b,x0,x1) (ORL <v.Type> (SHLLconst <v.Type> [j0] (MOVWloadidx1 [i0] {s} p idx mem)) y) 14416 for { 14417 or := v.Args[0] 14418 if or.Op != OpAMD64ORL { 14419 break 14420 } 14421 s0 := or.Args[0] 14422 if s0.Op != OpAMD64SHLLconst { 14423 break 14424 } 14425 j0 := s0.AuxInt 14426 x0 := s0.Args[0] 14427 if x0.Op != OpAMD64MOVBloadidx1 { 14428 break 14429 } 14430 i0 := x0.AuxInt 14431 s := x0.Aux 14432 p := x0.Args[0] 14433 idx := x0.Args[1] 14434 mem := x0.Args[2] 14435 y := or.Args[1] 14436 s1 := v.Args[1] 14437 if s1.Op != OpAMD64SHLLconst { 14438 break 14439 } 14440 j1 := s1.AuxInt 14441 x1 := s1.Args[0] 14442 if x1.Op != OpAMD64MOVBloadidx1 { 14443 break 14444 } 14445 i1 := x1.AuxInt 14446 if x1.Aux != s { 14447 break 14448 } 14449 if p != x1.Args[0] { 14450 break 14451 } 14452 if idx != x1.Args[1] { 14453 break 14454 } 14455 if mem != x1.Args[2] { 14456 break 14457 } 14458 if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 14459 break 14460 } 14461 b = mergePoint(b, x0, x1) 14462 v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type) 14463 v.reset(OpCopy) 14464 v.AddArg(v0) 14465 v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type) 14466 v1.AuxInt = j0 14467 v2 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, types.UInt16) 14468 v2.AuxInt = i0 14469 v2.Aux = s 14470 v2.AddArg(p) 14471 v2.AddArg(idx) 14472 v2.AddArg(mem) 14473 v1.AddArg(v2) 14474 v0.AddArg(v1) 14475 v0.AddArg(y) 14476 return true 14477 } 14478 // match: (ORL or:(ORL s0:(SHLLconst [j0] x0:(MOVBloadidx1 [i0] {s} idx p mem)) y) s1:(SHLLconst [j1] x1:(MOVBloadidx1 [i1] {s} p idx mem))) 14479 // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 14480 // result: @mergePoint(b,x0,x1) (ORL <v.Type> (SHLLconst <v.Type> [j0] (MOVWloadidx1 [i0] {s} p idx mem)) y) 14481 for { 14482 or := v.Args[0] 14483 if or.Op != OpAMD64ORL { 14484 break 14485 } 14486 s0 := or.Args[0] 14487 if s0.Op != OpAMD64SHLLconst { 14488 break 14489 } 14490 j0 := s0.AuxInt 14491 x0 := s0.Args[0] 14492 if x0.Op != OpAMD64MOVBloadidx1 { 14493 break 14494 } 14495 i0 := x0.AuxInt 14496 s := x0.Aux 14497 idx := x0.Args[0] 14498 p := x0.Args[1] 14499 mem := x0.Args[2] 14500 y := or.Args[1] 14501 s1 := v.Args[1] 14502 if s1.Op != OpAMD64SHLLconst { 14503 break 14504 } 14505 j1 := s1.AuxInt 14506 x1 := s1.Args[0] 14507 if x1.Op != OpAMD64MOVBloadidx1 { 14508 break 14509 } 14510 i1 := x1.AuxInt 14511 if x1.Aux != s { 14512 break 14513 } 14514 if p != x1.Args[0] { 14515 break 14516 } 14517 if idx != x1.Args[1] { 14518 break 14519 } 14520 if mem != x1.Args[2] { 14521 break 14522 } 14523 if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 14524 break 14525 } 14526 b = mergePoint(b, x0, x1) 14527 v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type) 14528 v.reset(OpCopy) 14529 v.AddArg(v0) 14530 v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type) 14531 v1.AuxInt = j0 14532 v2 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, types.UInt16) 14533 v2.AuxInt = i0 14534 v2.Aux = s 14535 v2.AddArg(p) 14536 v2.AddArg(idx) 14537 v2.AddArg(mem) 14538 v1.AddArg(v2) 14539 v0.AddArg(v1) 14540 v0.AddArg(y) 14541 return true 14542 } 14543 // match: (ORL or:(ORL y s0:(SHLLconst [j0] x0:(MOVBloadidx1 [i0] {s} p idx mem))) s1:(SHLLconst [j1] x1:(MOVBloadidx1 [i1] {s} p idx mem))) 14544 // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 14545 // result: @mergePoint(b,x0,x1) (ORL <v.Type> (SHLLconst <v.Type> [j0] (MOVWloadidx1 [i0] {s} p idx mem)) y) 14546 for { 14547 or := v.Args[0] 14548 if or.Op != OpAMD64ORL { 14549 break 14550 } 14551 y := or.Args[0] 14552 s0 := or.Args[1] 14553 if s0.Op != OpAMD64SHLLconst { 14554 break 14555 } 14556 j0 := s0.AuxInt 14557 x0 := s0.Args[0] 14558 if x0.Op != OpAMD64MOVBloadidx1 { 14559 break 14560 } 14561 i0 := x0.AuxInt 14562 s := x0.Aux 14563 p := x0.Args[0] 14564 idx := x0.Args[1] 14565 mem := x0.Args[2] 14566 s1 := v.Args[1] 14567 if s1.Op != OpAMD64SHLLconst { 14568 break 14569 } 14570 j1 := s1.AuxInt 14571 x1 := s1.Args[0] 14572 if x1.Op != OpAMD64MOVBloadidx1 { 14573 break 14574 } 14575 i1 := x1.AuxInt 14576 if x1.Aux != s { 14577 break 14578 } 14579 if p != x1.Args[0] { 14580 break 14581 } 14582 if idx != x1.Args[1] { 14583 break 14584 } 14585 if mem != x1.Args[2] { 14586 break 14587 } 14588 if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 14589 break 14590 } 14591 b = mergePoint(b, x0, x1) 14592 v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type) 14593 v.reset(OpCopy) 14594 v.AddArg(v0) 14595 v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type) 14596 v1.AuxInt = j0 14597 v2 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, types.UInt16) 14598 v2.AuxInt = i0 14599 v2.Aux = s 14600 v2.AddArg(p) 14601 v2.AddArg(idx) 14602 v2.AddArg(mem) 14603 v1.AddArg(v2) 14604 v0.AddArg(v1) 14605 v0.AddArg(y) 14606 return true 14607 } 14608 // match: (ORL or:(ORL y s0:(SHLLconst [j0] x0:(MOVBloadidx1 [i0] {s} idx p mem))) s1:(SHLLconst [j1] x1:(MOVBloadidx1 [i1] {s} p idx mem))) 14609 // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 14610 // result: @mergePoint(b,x0,x1) (ORL <v.Type> (SHLLconst <v.Type> [j0] (MOVWloadidx1 [i0] {s} p idx mem)) y) 14611 for { 14612 or := v.Args[0] 14613 if or.Op != OpAMD64ORL { 14614 break 14615 } 14616 y := or.Args[0] 14617 s0 := or.Args[1] 14618 if s0.Op != OpAMD64SHLLconst { 14619 break 14620 } 14621 j0 := s0.AuxInt 14622 x0 := s0.Args[0] 14623 if x0.Op != OpAMD64MOVBloadidx1 { 14624 break 14625 } 14626 i0 := x0.AuxInt 14627 s := x0.Aux 14628 idx := x0.Args[0] 14629 p := x0.Args[1] 14630 mem := x0.Args[2] 14631 s1 := v.Args[1] 14632 if s1.Op != OpAMD64SHLLconst { 14633 break 14634 } 14635 j1 := s1.AuxInt 14636 x1 := s1.Args[0] 14637 if x1.Op != OpAMD64MOVBloadidx1 { 14638 break 14639 } 14640 i1 := x1.AuxInt 14641 if x1.Aux != s { 14642 break 14643 } 14644 if p != x1.Args[0] { 14645 break 14646 } 14647 if idx != x1.Args[1] { 14648 break 14649 } 14650 if mem != x1.Args[2] { 14651 break 14652 } 14653 if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 14654 break 14655 } 14656 b = mergePoint(b, x0, x1) 14657 v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type) 14658 v.reset(OpCopy) 14659 v.AddArg(v0) 14660 v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type) 14661 v1.AuxInt = j0 14662 v2 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, types.UInt16) 14663 v2.AuxInt = i0 14664 v2.Aux = s 14665 v2.AddArg(p) 14666 v2.AddArg(idx) 14667 v2.AddArg(mem) 14668 v1.AddArg(v2) 14669 v0.AddArg(v1) 14670 v0.AddArg(y) 14671 return true 14672 } 14673 // match: (ORL or:(ORL s0:(SHLLconst [j0] x0:(MOVBloadidx1 [i0] {s} p idx mem)) y) s1:(SHLLconst [j1] x1:(MOVBloadidx1 [i1] {s} idx p mem))) 14674 // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 14675 // result: @mergePoint(b,x0,x1) (ORL <v.Type> (SHLLconst <v.Type> [j0] (MOVWloadidx1 [i0] {s} p idx mem)) y) 14676 for { 14677 or := v.Args[0] 14678 if or.Op != OpAMD64ORL { 14679 break 14680 } 14681 s0 := or.Args[0] 14682 if s0.Op != OpAMD64SHLLconst { 14683 break 14684 } 14685 j0 := s0.AuxInt 14686 x0 := s0.Args[0] 14687 if x0.Op != OpAMD64MOVBloadidx1 { 14688 break 14689 } 14690 i0 := x0.AuxInt 14691 s := x0.Aux 14692 p := x0.Args[0] 14693 idx := x0.Args[1] 14694 mem := x0.Args[2] 14695 y := or.Args[1] 14696 s1 := v.Args[1] 14697 if s1.Op != OpAMD64SHLLconst { 14698 break 14699 } 14700 j1 := s1.AuxInt 14701 x1 := s1.Args[0] 14702 if x1.Op != OpAMD64MOVBloadidx1 { 14703 break 14704 } 14705 i1 := x1.AuxInt 14706 if x1.Aux != s { 14707 break 14708 } 14709 if idx != x1.Args[0] { 14710 break 14711 } 14712 if p != x1.Args[1] { 14713 break 14714 } 14715 if mem != x1.Args[2] { 14716 break 14717 } 14718 if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 14719 break 14720 } 14721 b = mergePoint(b, x0, x1) 14722 v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type) 14723 v.reset(OpCopy) 14724 v.AddArg(v0) 14725 v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type) 14726 v1.AuxInt = j0 14727 v2 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, types.UInt16) 14728 v2.AuxInt = i0 14729 v2.Aux = s 14730 v2.AddArg(p) 14731 v2.AddArg(idx) 14732 v2.AddArg(mem) 14733 v1.AddArg(v2) 14734 v0.AddArg(v1) 14735 v0.AddArg(y) 14736 return true 14737 } 14738 // match: (ORL or:(ORL s0:(SHLLconst [j0] x0:(MOVBloadidx1 [i0] {s} idx p mem)) y) s1:(SHLLconst [j1] x1:(MOVBloadidx1 [i1] {s} idx p mem))) 14739 // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 14740 // result: @mergePoint(b,x0,x1) (ORL <v.Type> (SHLLconst <v.Type> [j0] (MOVWloadidx1 [i0] {s} p idx mem)) y) 14741 for { 14742 or := v.Args[0] 14743 if or.Op != OpAMD64ORL { 14744 break 14745 } 14746 s0 := or.Args[0] 14747 if s0.Op != OpAMD64SHLLconst { 14748 break 14749 } 14750 j0 := s0.AuxInt 14751 x0 := s0.Args[0] 14752 if x0.Op != OpAMD64MOVBloadidx1 { 14753 break 14754 } 14755 i0 := x0.AuxInt 14756 s := x0.Aux 14757 idx := x0.Args[0] 14758 p := x0.Args[1] 14759 mem := x0.Args[2] 14760 y := or.Args[1] 14761 s1 := v.Args[1] 14762 if s1.Op != OpAMD64SHLLconst { 14763 break 14764 } 14765 j1 := s1.AuxInt 14766 x1 := s1.Args[0] 14767 if x1.Op != OpAMD64MOVBloadidx1 { 14768 break 14769 } 14770 i1 := x1.AuxInt 14771 if x1.Aux != s { 14772 break 14773 } 14774 if idx != x1.Args[0] { 14775 break 14776 } 14777 if p != x1.Args[1] { 14778 break 14779 } 14780 if mem != x1.Args[2] { 14781 break 14782 } 14783 if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 14784 break 14785 } 14786 b = mergePoint(b, x0, x1) 14787 v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type) 14788 v.reset(OpCopy) 14789 v.AddArg(v0) 14790 v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type) 14791 v1.AuxInt = j0 14792 v2 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, types.UInt16) 14793 v2.AuxInt = i0 14794 v2.Aux = s 14795 v2.AddArg(p) 14796 v2.AddArg(idx) 14797 v2.AddArg(mem) 14798 v1.AddArg(v2) 14799 v0.AddArg(v1) 14800 v0.AddArg(y) 14801 return true 14802 } 14803 // match: (ORL or:(ORL y s0:(SHLLconst [j0] x0:(MOVBloadidx1 [i0] {s} p idx mem))) s1:(SHLLconst [j1] x1:(MOVBloadidx1 [i1] {s} idx p mem))) 14804 // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 14805 // result: @mergePoint(b,x0,x1) (ORL <v.Type> (SHLLconst <v.Type> [j0] (MOVWloadidx1 [i0] {s} p idx mem)) y) 14806 for { 14807 or := v.Args[0] 14808 if or.Op != OpAMD64ORL { 14809 break 14810 } 14811 y := or.Args[0] 14812 s0 := or.Args[1] 14813 if s0.Op != OpAMD64SHLLconst { 14814 break 14815 } 14816 j0 := s0.AuxInt 14817 x0 := s0.Args[0] 14818 if x0.Op != OpAMD64MOVBloadidx1 { 14819 break 14820 } 14821 i0 := x0.AuxInt 14822 s := x0.Aux 14823 p := x0.Args[0] 14824 idx := x0.Args[1] 14825 mem := x0.Args[2] 14826 s1 := v.Args[1] 14827 if s1.Op != OpAMD64SHLLconst { 14828 break 14829 } 14830 j1 := s1.AuxInt 14831 x1 := s1.Args[0] 14832 if x1.Op != OpAMD64MOVBloadidx1 { 14833 break 14834 } 14835 i1 := x1.AuxInt 14836 if x1.Aux != s { 14837 break 14838 } 14839 if idx != x1.Args[0] { 14840 break 14841 } 14842 if p != x1.Args[1] { 14843 break 14844 } 14845 if mem != x1.Args[2] { 14846 break 14847 } 14848 if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 14849 break 14850 } 14851 b = mergePoint(b, x0, x1) 14852 v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type) 14853 v.reset(OpCopy) 14854 v.AddArg(v0) 14855 v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type) 14856 v1.AuxInt = j0 14857 v2 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, types.UInt16) 14858 v2.AuxInt = i0 14859 v2.Aux = s 14860 v2.AddArg(p) 14861 v2.AddArg(idx) 14862 v2.AddArg(mem) 14863 v1.AddArg(v2) 14864 v0.AddArg(v1) 14865 v0.AddArg(y) 14866 return true 14867 } 14868 // match: (ORL or:(ORL y s0:(SHLLconst [j0] x0:(MOVBloadidx1 [i0] {s} idx p mem))) s1:(SHLLconst [j1] x1:(MOVBloadidx1 [i1] {s} idx p mem))) 14869 // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 14870 // result: @mergePoint(b,x0,x1) (ORL <v.Type> (SHLLconst <v.Type> [j0] (MOVWloadidx1 [i0] {s} p idx mem)) y) 14871 for { 14872 or := v.Args[0] 14873 if or.Op != OpAMD64ORL { 14874 break 14875 } 14876 y := or.Args[0] 14877 s0 := or.Args[1] 14878 if s0.Op != OpAMD64SHLLconst { 14879 break 14880 } 14881 j0 := s0.AuxInt 14882 x0 := s0.Args[0] 14883 if x0.Op != OpAMD64MOVBloadidx1 { 14884 break 14885 } 14886 i0 := x0.AuxInt 14887 s := x0.Aux 14888 idx := x0.Args[0] 14889 p := x0.Args[1] 14890 mem := x0.Args[2] 14891 s1 := v.Args[1] 14892 if s1.Op != OpAMD64SHLLconst { 14893 break 14894 } 14895 j1 := s1.AuxInt 14896 x1 := s1.Args[0] 14897 if x1.Op != OpAMD64MOVBloadidx1 { 14898 break 14899 } 14900 i1 := x1.AuxInt 14901 if x1.Aux != s { 14902 break 14903 } 14904 if idx != x1.Args[0] { 14905 break 14906 } 14907 if p != x1.Args[1] { 14908 break 14909 } 14910 if mem != x1.Args[2] { 14911 break 14912 } 14913 if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 14914 break 14915 } 14916 b = mergePoint(b, x0, x1) 14917 v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type) 14918 v.reset(OpCopy) 14919 v.AddArg(v0) 14920 v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type) 14921 v1.AuxInt = j0 14922 v2 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, types.UInt16) 14923 v2.AuxInt = i0 14924 v2.Aux = s 14925 v2.AddArg(p) 14926 v2.AddArg(idx) 14927 v2.AddArg(mem) 14928 v1.AddArg(v2) 14929 v0.AddArg(v1) 14930 v0.AddArg(y) 14931 return true 14932 } 14933 // match: (ORL x1:(MOVBload [i1] {s} p mem) sh:(SHLLconst [8] x0:(MOVBload [i0] {s} p mem))) 14934 // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 14935 // result: @mergePoint(b,x0,x1) (ROLWconst <v.Type> [8] (MOVWload [i0] {s} p mem)) 14936 for { 14937 x1 := v.Args[0] 14938 if x1.Op != OpAMD64MOVBload { 14939 break 14940 } 14941 i1 := x1.AuxInt 14942 s := x1.Aux 14943 p := x1.Args[0] 14944 mem := x1.Args[1] 14945 sh := v.Args[1] 14946 if sh.Op != OpAMD64SHLLconst { 14947 break 14948 } 14949 if sh.AuxInt != 8 { 14950 break 14951 } 14952 x0 := sh.Args[0] 14953 if x0.Op != OpAMD64MOVBload { 14954 break 14955 } 14956 i0 := x0.AuxInt 14957 if x0.Aux != s { 14958 break 14959 } 14960 if p != x0.Args[0] { 14961 break 14962 } 14963 if mem != x0.Args[1] { 14964 break 14965 } 14966 if !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 14967 break 14968 } 14969 b = mergePoint(b, x0, x1) 14970 v0 := b.NewValue0(v.Pos, OpAMD64ROLWconst, v.Type) 14971 v.reset(OpCopy) 14972 v.AddArg(v0) 14973 v0.AuxInt = 8 14974 v1 := b.NewValue0(v.Pos, OpAMD64MOVWload, types.UInt16) 14975 v1.AuxInt = i0 14976 v1.Aux = s 14977 v1.AddArg(p) 14978 v1.AddArg(mem) 14979 v0.AddArg(v1) 14980 return true 14981 } 14982 // match: (ORL sh:(SHLLconst [8] x0:(MOVBload [i0] {s} p mem)) x1:(MOVBload [i1] {s} p mem)) 14983 // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 14984 // result: @mergePoint(b,x0,x1) (ROLWconst <v.Type> [8] (MOVWload [i0] {s} p mem)) 14985 for { 14986 sh := v.Args[0] 14987 if sh.Op != OpAMD64SHLLconst { 14988 break 14989 } 14990 if sh.AuxInt != 8 { 14991 break 14992 } 14993 x0 := sh.Args[0] 14994 if x0.Op != OpAMD64MOVBload { 14995 break 14996 } 14997 i0 := x0.AuxInt 14998 s := x0.Aux 14999 p := x0.Args[0] 15000 mem := x0.Args[1] 15001 x1 := v.Args[1] 15002 if x1.Op != OpAMD64MOVBload { 15003 break 15004 } 15005 i1 := x1.AuxInt 15006 if x1.Aux != s { 15007 break 15008 } 15009 if p != x1.Args[0] { 15010 break 15011 } 15012 if mem != x1.Args[1] { 15013 break 15014 } 15015 if !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 15016 break 15017 } 15018 b = mergePoint(b, x0, x1) 15019 v0 := b.NewValue0(v.Pos, OpAMD64ROLWconst, v.Type) 15020 v.reset(OpCopy) 15021 v.AddArg(v0) 15022 v0.AuxInt = 8 15023 v1 := b.NewValue0(v.Pos, OpAMD64MOVWload, types.UInt16) 15024 v1.AuxInt = i0 15025 v1.Aux = s 15026 v1.AddArg(p) 15027 v1.AddArg(mem) 15028 v0.AddArg(v1) 15029 return true 15030 } 15031 // match: (ORL r1:(ROLWconst [8] x1:(MOVWload [i1] {s} p mem)) sh:(SHLLconst [16] r0:(ROLWconst [8] x0:(MOVWload [i0] {s} p mem)))) 15032 // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh) 15033 // result: @mergePoint(b,x0,x1) (BSWAPL <v.Type> (MOVLload [i0] {s} p mem)) 15034 for { 15035 r1 := v.Args[0] 15036 if r1.Op != OpAMD64ROLWconst { 15037 break 15038 } 15039 if r1.AuxInt != 8 { 15040 break 15041 } 15042 x1 := r1.Args[0] 15043 if x1.Op != OpAMD64MOVWload { 15044 break 15045 } 15046 i1 := x1.AuxInt 15047 s := x1.Aux 15048 p := x1.Args[0] 15049 mem := x1.Args[1] 15050 sh := v.Args[1] 15051 if sh.Op != OpAMD64SHLLconst { 15052 break 15053 } 15054 if sh.AuxInt != 16 { 15055 break 15056 } 15057 r0 := sh.Args[0] 15058 if r0.Op != OpAMD64ROLWconst { 15059 break 15060 } 15061 if r0.AuxInt != 8 { 15062 break 15063 } 15064 x0 := r0.Args[0] 15065 if x0.Op != OpAMD64MOVWload { 15066 break 15067 } 15068 i0 := x0.AuxInt 15069 if x0.Aux != s { 15070 break 15071 } 15072 if p != x0.Args[0] { 15073 break 15074 } 15075 if mem != x0.Args[1] { 15076 break 15077 } 15078 if !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh)) { 15079 break 15080 } 15081 b = mergePoint(b, x0, x1) 15082 v0 := b.NewValue0(v.Pos, OpAMD64BSWAPL, v.Type) 15083 v.reset(OpCopy) 15084 v.AddArg(v0) 15085 v1 := b.NewValue0(v.Pos, OpAMD64MOVLload, types.UInt32) 15086 v1.AuxInt = i0 15087 v1.Aux = s 15088 v1.AddArg(p) 15089 v1.AddArg(mem) 15090 v0.AddArg(v1) 15091 return true 15092 } 15093 // match: (ORL sh:(SHLLconst [16] r0:(ROLWconst [8] x0:(MOVWload [i0] {s} p mem))) r1:(ROLWconst [8] x1:(MOVWload [i1] {s} p mem))) 15094 // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh) 15095 // result: @mergePoint(b,x0,x1) (BSWAPL <v.Type> (MOVLload [i0] {s} p mem)) 15096 for { 15097 sh := v.Args[0] 15098 if sh.Op != OpAMD64SHLLconst { 15099 break 15100 } 15101 if sh.AuxInt != 16 { 15102 break 15103 } 15104 r0 := sh.Args[0] 15105 if r0.Op != OpAMD64ROLWconst { 15106 break 15107 } 15108 if r0.AuxInt != 8 { 15109 break 15110 } 15111 x0 := r0.Args[0] 15112 if x0.Op != OpAMD64MOVWload { 15113 break 15114 } 15115 i0 := x0.AuxInt 15116 s := x0.Aux 15117 p := x0.Args[0] 15118 mem := x0.Args[1] 15119 r1 := v.Args[1] 15120 if r1.Op != OpAMD64ROLWconst { 15121 break 15122 } 15123 if r1.AuxInt != 8 { 15124 break 15125 } 15126 x1 := r1.Args[0] 15127 if x1.Op != OpAMD64MOVWload { 15128 break 15129 } 15130 i1 := x1.AuxInt 15131 if x1.Aux != s { 15132 break 15133 } 15134 if p != x1.Args[0] { 15135 break 15136 } 15137 if mem != x1.Args[1] { 15138 break 15139 } 15140 if !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh)) { 15141 break 15142 } 15143 b = mergePoint(b, x0, x1) 15144 v0 := b.NewValue0(v.Pos, OpAMD64BSWAPL, v.Type) 15145 v.reset(OpCopy) 15146 v.AddArg(v0) 15147 v1 := b.NewValue0(v.Pos, OpAMD64MOVLload, types.UInt32) 15148 v1.AuxInt = i0 15149 v1.Aux = s 15150 v1.AddArg(p) 15151 v1.AddArg(mem) 15152 v0.AddArg(v1) 15153 return true 15154 } 15155 // match: (ORL s0:(SHLLconst [j0] x0:(MOVBload [i0] {s} p mem)) or:(ORL s1:(SHLLconst [j1] x1:(MOVBload [i1] {s} p mem)) y)) 15156 // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 15157 // result: @mergePoint(b,x0,x1) (ORL <v.Type> (SHLLconst <v.Type> [j1] (ROLWconst <types.UInt16> [8] (MOVWload [i0] {s} p mem))) y) 15158 for { 15159 s0 := v.Args[0] 15160 if s0.Op != OpAMD64SHLLconst { 15161 break 15162 } 15163 j0 := s0.AuxInt 15164 x0 := s0.Args[0] 15165 if x0.Op != OpAMD64MOVBload { 15166 break 15167 } 15168 i0 := x0.AuxInt 15169 s := x0.Aux 15170 p := x0.Args[0] 15171 mem := x0.Args[1] 15172 or := v.Args[1] 15173 if or.Op != OpAMD64ORL { 15174 break 15175 } 15176 s1 := or.Args[0] 15177 if s1.Op != OpAMD64SHLLconst { 15178 break 15179 } 15180 j1 := s1.AuxInt 15181 x1 := s1.Args[0] 15182 if x1.Op != OpAMD64MOVBload { 15183 break 15184 } 15185 i1 := x1.AuxInt 15186 if x1.Aux != s { 15187 break 15188 } 15189 if p != x1.Args[0] { 15190 break 15191 } 15192 if mem != x1.Args[1] { 15193 break 15194 } 15195 y := or.Args[1] 15196 if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 15197 break 15198 } 15199 b = mergePoint(b, x0, x1) 15200 v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type) 15201 v.reset(OpCopy) 15202 v.AddArg(v0) 15203 v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type) 15204 v1.AuxInt = j1 15205 v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, types.UInt16) 15206 v2.AuxInt = 8 15207 v3 := b.NewValue0(v.Pos, OpAMD64MOVWload, types.UInt16) 15208 v3.AuxInt = i0 15209 v3.Aux = s 15210 v3.AddArg(p) 15211 v3.AddArg(mem) 15212 v2.AddArg(v3) 15213 v1.AddArg(v2) 15214 v0.AddArg(v1) 15215 v0.AddArg(y) 15216 return true 15217 } 15218 // match: (ORL s0:(SHLLconst [j0] x0:(MOVBload [i0] {s} p mem)) or:(ORL y s1:(SHLLconst [j1] x1:(MOVBload [i1] {s} p mem)))) 15219 // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 15220 // result: @mergePoint(b,x0,x1) (ORL <v.Type> (SHLLconst <v.Type> [j1] (ROLWconst <types.UInt16> [8] (MOVWload [i0] {s} p mem))) y) 15221 for { 15222 s0 := v.Args[0] 15223 if s0.Op != OpAMD64SHLLconst { 15224 break 15225 } 15226 j0 := s0.AuxInt 15227 x0 := s0.Args[0] 15228 if x0.Op != OpAMD64MOVBload { 15229 break 15230 } 15231 i0 := x0.AuxInt 15232 s := x0.Aux 15233 p := x0.Args[0] 15234 mem := x0.Args[1] 15235 or := v.Args[1] 15236 if or.Op != OpAMD64ORL { 15237 break 15238 } 15239 y := or.Args[0] 15240 s1 := or.Args[1] 15241 if s1.Op != OpAMD64SHLLconst { 15242 break 15243 } 15244 j1 := s1.AuxInt 15245 x1 := s1.Args[0] 15246 if x1.Op != OpAMD64MOVBload { 15247 break 15248 } 15249 i1 := x1.AuxInt 15250 if x1.Aux != s { 15251 break 15252 } 15253 if p != x1.Args[0] { 15254 break 15255 } 15256 if mem != x1.Args[1] { 15257 break 15258 } 15259 if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 15260 break 15261 } 15262 b = mergePoint(b, x0, x1) 15263 v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type) 15264 v.reset(OpCopy) 15265 v.AddArg(v0) 15266 v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type) 15267 v1.AuxInt = j1 15268 v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, types.UInt16) 15269 v2.AuxInt = 8 15270 v3 := b.NewValue0(v.Pos, OpAMD64MOVWload, types.UInt16) 15271 v3.AuxInt = i0 15272 v3.Aux = s 15273 v3.AddArg(p) 15274 v3.AddArg(mem) 15275 v2.AddArg(v3) 15276 v1.AddArg(v2) 15277 v0.AddArg(v1) 15278 v0.AddArg(y) 15279 return true 15280 } 15281 // match: (ORL or:(ORL s1:(SHLLconst [j1] x1:(MOVBload [i1] {s} p mem)) y) s0:(SHLLconst [j0] x0:(MOVBload [i0] {s} p mem))) 15282 // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 15283 // result: @mergePoint(b,x0,x1) (ORL <v.Type> (SHLLconst <v.Type> [j1] (ROLWconst <types.UInt16> [8] (MOVWload [i0] {s} p mem))) y) 15284 for { 15285 or := v.Args[0] 15286 if or.Op != OpAMD64ORL { 15287 break 15288 } 15289 s1 := or.Args[0] 15290 if s1.Op != OpAMD64SHLLconst { 15291 break 15292 } 15293 j1 := s1.AuxInt 15294 x1 := s1.Args[0] 15295 if x1.Op != OpAMD64MOVBload { 15296 break 15297 } 15298 i1 := x1.AuxInt 15299 s := x1.Aux 15300 p := x1.Args[0] 15301 mem := x1.Args[1] 15302 y := or.Args[1] 15303 s0 := v.Args[1] 15304 if s0.Op != OpAMD64SHLLconst { 15305 break 15306 } 15307 j0 := s0.AuxInt 15308 x0 := s0.Args[0] 15309 if x0.Op != OpAMD64MOVBload { 15310 break 15311 } 15312 i0 := x0.AuxInt 15313 if x0.Aux != s { 15314 break 15315 } 15316 if p != x0.Args[0] { 15317 break 15318 } 15319 if mem != x0.Args[1] { 15320 break 15321 } 15322 if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 15323 break 15324 } 15325 b = mergePoint(b, x0, x1) 15326 v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type) 15327 v.reset(OpCopy) 15328 v.AddArg(v0) 15329 v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type) 15330 v1.AuxInt = j1 15331 v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, types.UInt16) 15332 v2.AuxInt = 8 15333 v3 := b.NewValue0(v.Pos, OpAMD64MOVWload, types.UInt16) 15334 v3.AuxInt = i0 15335 v3.Aux = s 15336 v3.AddArg(p) 15337 v3.AddArg(mem) 15338 v2.AddArg(v3) 15339 v1.AddArg(v2) 15340 v0.AddArg(v1) 15341 v0.AddArg(y) 15342 return true 15343 } 15344 // match: (ORL or:(ORL y s1:(SHLLconst [j1] x1:(MOVBload [i1] {s} p mem))) s0:(SHLLconst [j0] x0:(MOVBload [i0] {s} p mem))) 15345 // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 15346 // result: @mergePoint(b,x0,x1) (ORL <v.Type> (SHLLconst <v.Type> [j1] (ROLWconst <types.UInt16> [8] (MOVWload [i0] {s} p mem))) y) 15347 for { 15348 or := v.Args[0] 15349 if or.Op != OpAMD64ORL { 15350 break 15351 } 15352 y := or.Args[0] 15353 s1 := or.Args[1] 15354 if s1.Op != OpAMD64SHLLconst { 15355 break 15356 } 15357 j1 := s1.AuxInt 15358 x1 := s1.Args[0] 15359 if x1.Op != OpAMD64MOVBload { 15360 break 15361 } 15362 i1 := x1.AuxInt 15363 s := x1.Aux 15364 p := x1.Args[0] 15365 mem := x1.Args[1] 15366 s0 := v.Args[1] 15367 if s0.Op != OpAMD64SHLLconst { 15368 break 15369 } 15370 j0 := s0.AuxInt 15371 x0 := s0.Args[0] 15372 if x0.Op != OpAMD64MOVBload { 15373 break 15374 } 15375 i0 := x0.AuxInt 15376 if x0.Aux != s { 15377 break 15378 } 15379 if p != x0.Args[0] { 15380 break 15381 } 15382 if mem != x0.Args[1] { 15383 break 15384 } 15385 if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 15386 break 15387 } 15388 b = mergePoint(b, x0, x1) 15389 v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type) 15390 v.reset(OpCopy) 15391 v.AddArg(v0) 15392 v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type) 15393 v1.AuxInt = j1 15394 v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, types.UInt16) 15395 v2.AuxInt = 8 15396 v3 := b.NewValue0(v.Pos, OpAMD64MOVWload, types.UInt16) 15397 v3.AuxInt = i0 15398 v3.Aux = s 15399 v3.AddArg(p) 15400 v3.AddArg(mem) 15401 v2.AddArg(v3) 15402 v1.AddArg(v2) 15403 v0.AddArg(v1) 15404 v0.AddArg(y) 15405 return true 15406 } 15407 // match: (ORL x1:(MOVBloadidx1 [i1] {s} p idx mem) sh:(SHLLconst [8] x0:(MOVBloadidx1 [i0] {s} p idx mem))) 15408 // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 15409 // result: @mergePoint(b,x0,x1) (ROLWconst <v.Type> [8] (MOVWloadidx1 [i0] {s} p idx mem)) 15410 for { 15411 x1 := v.Args[0] 15412 if x1.Op != OpAMD64MOVBloadidx1 { 15413 break 15414 } 15415 i1 := x1.AuxInt 15416 s := x1.Aux 15417 p := x1.Args[0] 15418 idx := x1.Args[1] 15419 mem := x1.Args[2] 15420 sh := v.Args[1] 15421 if sh.Op != OpAMD64SHLLconst { 15422 break 15423 } 15424 if sh.AuxInt != 8 { 15425 break 15426 } 15427 x0 := sh.Args[0] 15428 if x0.Op != OpAMD64MOVBloadidx1 { 15429 break 15430 } 15431 i0 := x0.AuxInt 15432 if x0.Aux != s { 15433 break 15434 } 15435 if p != x0.Args[0] { 15436 break 15437 } 15438 if idx != x0.Args[1] { 15439 break 15440 } 15441 if mem != x0.Args[2] { 15442 break 15443 } 15444 if !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 15445 break 15446 } 15447 b = mergePoint(b, x0, x1) 15448 v0 := b.NewValue0(v.Pos, OpAMD64ROLWconst, v.Type) 15449 v.reset(OpCopy) 15450 v.AddArg(v0) 15451 v0.AuxInt = 8 15452 v1 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, types.UInt16) 15453 v1.AuxInt = i0 15454 v1.Aux = s 15455 v1.AddArg(p) 15456 v1.AddArg(idx) 15457 v1.AddArg(mem) 15458 v0.AddArg(v1) 15459 return true 15460 } 15461 // match: (ORL x1:(MOVBloadidx1 [i1] {s} idx p mem) sh:(SHLLconst [8] x0:(MOVBloadidx1 [i0] {s} p idx mem))) 15462 // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 15463 // result: @mergePoint(b,x0,x1) (ROLWconst <v.Type> [8] (MOVWloadidx1 [i0] {s} p idx mem)) 15464 for { 15465 x1 := v.Args[0] 15466 if x1.Op != OpAMD64MOVBloadidx1 { 15467 break 15468 } 15469 i1 := x1.AuxInt 15470 s := x1.Aux 15471 idx := x1.Args[0] 15472 p := x1.Args[1] 15473 mem := x1.Args[2] 15474 sh := v.Args[1] 15475 if sh.Op != OpAMD64SHLLconst { 15476 break 15477 } 15478 if sh.AuxInt != 8 { 15479 break 15480 } 15481 x0 := sh.Args[0] 15482 if x0.Op != OpAMD64MOVBloadidx1 { 15483 break 15484 } 15485 i0 := x0.AuxInt 15486 if x0.Aux != s { 15487 break 15488 } 15489 if p != x0.Args[0] { 15490 break 15491 } 15492 if idx != x0.Args[1] { 15493 break 15494 } 15495 if mem != x0.Args[2] { 15496 break 15497 } 15498 if !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 15499 break 15500 } 15501 b = mergePoint(b, x0, x1) 15502 v0 := b.NewValue0(v.Pos, OpAMD64ROLWconst, v.Type) 15503 v.reset(OpCopy) 15504 v.AddArg(v0) 15505 v0.AuxInt = 8 15506 v1 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, types.UInt16) 15507 v1.AuxInt = i0 15508 v1.Aux = s 15509 v1.AddArg(p) 15510 v1.AddArg(idx) 15511 v1.AddArg(mem) 15512 v0.AddArg(v1) 15513 return true 15514 } 15515 // match: (ORL x1:(MOVBloadidx1 [i1] {s} p idx mem) sh:(SHLLconst [8] x0:(MOVBloadidx1 [i0] {s} idx p mem))) 15516 // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 15517 // result: @mergePoint(b,x0,x1) (ROLWconst <v.Type> [8] (MOVWloadidx1 [i0] {s} p idx mem)) 15518 for { 15519 x1 := v.Args[0] 15520 if x1.Op != OpAMD64MOVBloadidx1 { 15521 break 15522 } 15523 i1 := x1.AuxInt 15524 s := x1.Aux 15525 p := x1.Args[0] 15526 idx := x1.Args[1] 15527 mem := x1.Args[2] 15528 sh := v.Args[1] 15529 if sh.Op != OpAMD64SHLLconst { 15530 break 15531 } 15532 if sh.AuxInt != 8 { 15533 break 15534 } 15535 x0 := sh.Args[0] 15536 if x0.Op != OpAMD64MOVBloadidx1 { 15537 break 15538 } 15539 i0 := x0.AuxInt 15540 if x0.Aux != s { 15541 break 15542 } 15543 if idx != x0.Args[0] { 15544 break 15545 } 15546 if p != x0.Args[1] { 15547 break 15548 } 15549 if mem != x0.Args[2] { 15550 break 15551 } 15552 if !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 15553 break 15554 } 15555 b = mergePoint(b, x0, x1) 15556 v0 := b.NewValue0(v.Pos, OpAMD64ROLWconst, v.Type) 15557 v.reset(OpCopy) 15558 v.AddArg(v0) 15559 v0.AuxInt = 8 15560 v1 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, types.UInt16) 15561 v1.AuxInt = i0 15562 v1.Aux = s 15563 v1.AddArg(p) 15564 v1.AddArg(idx) 15565 v1.AddArg(mem) 15566 v0.AddArg(v1) 15567 return true 15568 } 15569 // match: (ORL x1:(MOVBloadidx1 [i1] {s} idx p mem) sh:(SHLLconst [8] x0:(MOVBloadidx1 [i0] {s} idx p mem))) 15570 // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 15571 // result: @mergePoint(b,x0,x1) (ROLWconst <v.Type> [8] (MOVWloadidx1 [i0] {s} p idx mem)) 15572 for { 15573 x1 := v.Args[0] 15574 if x1.Op != OpAMD64MOVBloadidx1 { 15575 break 15576 } 15577 i1 := x1.AuxInt 15578 s := x1.Aux 15579 idx := x1.Args[0] 15580 p := x1.Args[1] 15581 mem := x1.Args[2] 15582 sh := v.Args[1] 15583 if sh.Op != OpAMD64SHLLconst { 15584 break 15585 } 15586 if sh.AuxInt != 8 { 15587 break 15588 } 15589 x0 := sh.Args[0] 15590 if x0.Op != OpAMD64MOVBloadidx1 { 15591 break 15592 } 15593 i0 := x0.AuxInt 15594 if x0.Aux != s { 15595 break 15596 } 15597 if idx != x0.Args[0] { 15598 break 15599 } 15600 if p != x0.Args[1] { 15601 break 15602 } 15603 if mem != x0.Args[2] { 15604 break 15605 } 15606 if !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 15607 break 15608 } 15609 b = mergePoint(b, x0, x1) 15610 v0 := b.NewValue0(v.Pos, OpAMD64ROLWconst, v.Type) 15611 v.reset(OpCopy) 15612 v.AddArg(v0) 15613 v0.AuxInt = 8 15614 v1 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, types.UInt16) 15615 v1.AuxInt = i0 15616 v1.Aux = s 15617 v1.AddArg(p) 15618 v1.AddArg(idx) 15619 v1.AddArg(mem) 15620 v0.AddArg(v1) 15621 return true 15622 } 15623 // match: (ORL sh:(SHLLconst [8] x0:(MOVBloadidx1 [i0] {s} p idx mem)) x1:(MOVBloadidx1 [i1] {s} p idx mem)) 15624 // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 15625 // result: @mergePoint(b,x0,x1) (ROLWconst <v.Type> [8] (MOVWloadidx1 [i0] {s} p idx mem)) 15626 for { 15627 sh := v.Args[0] 15628 if sh.Op != OpAMD64SHLLconst { 15629 break 15630 } 15631 if sh.AuxInt != 8 { 15632 break 15633 } 15634 x0 := sh.Args[0] 15635 if x0.Op != OpAMD64MOVBloadidx1 { 15636 break 15637 } 15638 i0 := x0.AuxInt 15639 s := x0.Aux 15640 p := x0.Args[0] 15641 idx := x0.Args[1] 15642 mem := x0.Args[2] 15643 x1 := v.Args[1] 15644 if x1.Op != OpAMD64MOVBloadidx1 { 15645 break 15646 } 15647 i1 := x1.AuxInt 15648 if x1.Aux != s { 15649 break 15650 } 15651 if p != x1.Args[0] { 15652 break 15653 } 15654 if idx != x1.Args[1] { 15655 break 15656 } 15657 if mem != x1.Args[2] { 15658 break 15659 } 15660 if !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 15661 break 15662 } 15663 b = mergePoint(b, x0, x1) 15664 v0 := b.NewValue0(v.Pos, OpAMD64ROLWconst, v.Type) 15665 v.reset(OpCopy) 15666 v.AddArg(v0) 15667 v0.AuxInt = 8 15668 v1 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, types.UInt16) 15669 v1.AuxInt = i0 15670 v1.Aux = s 15671 v1.AddArg(p) 15672 v1.AddArg(idx) 15673 v1.AddArg(mem) 15674 v0.AddArg(v1) 15675 return true 15676 } 15677 // match: (ORL sh:(SHLLconst [8] x0:(MOVBloadidx1 [i0] {s} idx p mem)) x1:(MOVBloadidx1 [i1] {s} p idx mem)) 15678 // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 15679 // result: @mergePoint(b,x0,x1) (ROLWconst <v.Type> [8] (MOVWloadidx1 [i0] {s} p idx mem)) 15680 for { 15681 sh := v.Args[0] 15682 if sh.Op != OpAMD64SHLLconst { 15683 break 15684 } 15685 if sh.AuxInt != 8 { 15686 break 15687 } 15688 x0 := sh.Args[0] 15689 if x0.Op != OpAMD64MOVBloadidx1 { 15690 break 15691 } 15692 i0 := x0.AuxInt 15693 s := x0.Aux 15694 idx := x0.Args[0] 15695 p := x0.Args[1] 15696 mem := x0.Args[2] 15697 x1 := v.Args[1] 15698 if x1.Op != OpAMD64MOVBloadidx1 { 15699 break 15700 } 15701 i1 := x1.AuxInt 15702 if x1.Aux != s { 15703 break 15704 } 15705 if p != x1.Args[0] { 15706 break 15707 } 15708 if idx != x1.Args[1] { 15709 break 15710 } 15711 if mem != x1.Args[2] { 15712 break 15713 } 15714 if !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 15715 break 15716 } 15717 b = mergePoint(b, x0, x1) 15718 v0 := b.NewValue0(v.Pos, OpAMD64ROLWconst, v.Type) 15719 v.reset(OpCopy) 15720 v.AddArg(v0) 15721 v0.AuxInt = 8 15722 v1 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, types.UInt16) 15723 v1.AuxInt = i0 15724 v1.Aux = s 15725 v1.AddArg(p) 15726 v1.AddArg(idx) 15727 v1.AddArg(mem) 15728 v0.AddArg(v1) 15729 return true 15730 } 15731 // match: (ORL sh:(SHLLconst [8] x0:(MOVBloadidx1 [i0] {s} p idx mem)) x1:(MOVBloadidx1 [i1] {s} idx p mem)) 15732 // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 15733 // result: @mergePoint(b,x0,x1) (ROLWconst <v.Type> [8] (MOVWloadidx1 [i0] {s} p idx mem)) 15734 for { 15735 sh := v.Args[0] 15736 if sh.Op != OpAMD64SHLLconst { 15737 break 15738 } 15739 if sh.AuxInt != 8 { 15740 break 15741 } 15742 x0 := sh.Args[0] 15743 if x0.Op != OpAMD64MOVBloadidx1 { 15744 break 15745 } 15746 i0 := x0.AuxInt 15747 s := x0.Aux 15748 p := x0.Args[0] 15749 idx := x0.Args[1] 15750 mem := x0.Args[2] 15751 x1 := v.Args[1] 15752 if x1.Op != OpAMD64MOVBloadidx1 { 15753 break 15754 } 15755 i1 := x1.AuxInt 15756 if x1.Aux != s { 15757 break 15758 } 15759 if idx != x1.Args[0] { 15760 break 15761 } 15762 if p != x1.Args[1] { 15763 break 15764 } 15765 if mem != x1.Args[2] { 15766 break 15767 } 15768 if !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 15769 break 15770 } 15771 b = mergePoint(b, x0, x1) 15772 v0 := b.NewValue0(v.Pos, OpAMD64ROLWconst, v.Type) 15773 v.reset(OpCopy) 15774 v.AddArg(v0) 15775 v0.AuxInt = 8 15776 v1 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, types.UInt16) 15777 v1.AuxInt = i0 15778 v1.Aux = s 15779 v1.AddArg(p) 15780 v1.AddArg(idx) 15781 v1.AddArg(mem) 15782 v0.AddArg(v1) 15783 return true 15784 } 15785 // match: (ORL sh:(SHLLconst [8] x0:(MOVBloadidx1 [i0] {s} idx p mem)) x1:(MOVBloadidx1 [i1] {s} idx p mem)) 15786 // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 15787 // result: @mergePoint(b,x0,x1) (ROLWconst <v.Type> [8] (MOVWloadidx1 [i0] {s} p idx mem)) 15788 for { 15789 sh := v.Args[0] 15790 if sh.Op != OpAMD64SHLLconst { 15791 break 15792 } 15793 if sh.AuxInt != 8 { 15794 break 15795 } 15796 x0 := sh.Args[0] 15797 if x0.Op != OpAMD64MOVBloadidx1 { 15798 break 15799 } 15800 i0 := x0.AuxInt 15801 s := x0.Aux 15802 idx := x0.Args[0] 15803 p := x0.Args[1] 15804 mem := x0.Args[2] 15805 x1 := v.Args[1] 15806 if x1.Op != OpAMD64MOVBloadidx1 { 15807 break 15808 } 15809 i1 := x1.AuxInt 15810 if x1.Aux != s { 15811 break 15812 } 15813 if idx != x1.Args[0] { 15814 break 15815 } 15816 if p != x1.Args[1] { 15817 break 15818 } 15819 if mem != x1.Args[2] { 15820 break 15821 } 15822 if !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 15823 break 15824 } 15825 b = mergePoint(b, x0, x1) 15826 v0 := b.NewValue0(v.Pos, OpAMD64ROLWconst, v.Type) 15827 v.reset(OpCopy) 15828 v.AddArg(v0) 15829 v0.AuxInt = 8 15830 v1 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, types.UInt16) 15831 v1.AuxInt = i0 15832 v1.Aux = s 15833 v1.AddArg(p) 15834 v1.AddArg(idx) 15835 v1.AddArg(mem) 15836 v0.AddArg(v1) 15837 return true 15838 } 15839 // match: (ORL r1:(ROLWconst [8] x1:(MOVWloadidx1 [i1] {s} p idx mem)) sh:(SHLLconst [16] r0:(ROLWconst [8] x0:(MOVWloadidx1 [i0] {s} p idx mem)))) 15840 // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh) 15841 // result: @mergePoint(b,x0,x1) (BSWAPL <v.Type> (MOVLloadidx1 [i0] {s} p idx mem)) 15842 for { 15843 r1 := v.Args[0] 15844 if r1.Op != OpAMD64ROLWconst { 15845 break 15846 } 15847 if r1.AuxInt != 8 { 15848 break 15849 } 15850 x1 := r1.Args[0] 15851 if x1.Op != OpAMD64MOVWloadidx1 { 15852 break 15853 } 15854 i1 := x1.AuxInt 15855 s := x1.Aux 15856 p := x1.Args[0] 15857 idx := x1.Args[1] 15858 mem := x1.Args[2] 15859 sh := v.Args[1] 15860 if sh.Op != OpAMD64SHLLconst { 15861 break 15862 } 15863 if sh.AuxInt != 16 { 15864 break 15865 } 15866 r0 := sh.Args[0] 15867 if r0.Op != OpAMD64ROLWconst { 15868 break 15869 } 15870 if r0.AuxInt != 8 { 15871 break 15872 } 15873 x0 := r0.Args[0] 15874 if x0.Op != OpAMD64MOVWloadidx1 { 15875 break 15876 } 15877 i0 := x0.AuxInt 15878 if x0.Aux != s { 15879 break 15880 } 15881 if p != x0.Args[0] { 15882 break 15883 } 15884 if idx != x0.Args[1] { 15885 break 15886 } 15887 if mem != x0.Args[2] { 15888 break 15889 } 15890 if !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh)) { 15891 break 15892 } 15893 b = mergePoint(b, x0, x1) 15894 v0 := b.NewValue0(v.Pos, OpAMD64BSWAPL, v.Type) 15895 v.reset(OpCopy) 15896 v.AddArg(v0) 15897 v1 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, types.UInt32) 15898 v1.AuxInt = i0 15899 v1.Aux = s 15900 v1.AddArg(p) 15901 v1.AddArg(idx) 15902 v1.AddArg(mem) 15903 v0.AddArg(v1) 15904 return true 15905 } 15906 // match: (ORL r1:(ROLWconst [8] x1:(MOVWloadidx1 [i1] {s} idx p mem)) sh:(SHLLconst [16] r0:(ROLWconst [8] x0:(MOVWloadidx1 [i0] {s} p idx mem)))) 15907 // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh) 15908 // result: @mergePoint(b,x0,x1) (BSWAPL <v.Type> (MOVLloadidx1 [i0] {s} p idx mem)) 15909 for { 15910 r1 := v.Args[0] 15911 if r1.Op != OpAMD64ROLWconst { 15912 break 15913 } 15914 if r1.AuxInt != 8 { 15915 break 15916 } 15917 x1 := r1.Args[0] 15918 if x1.Op != OpAMD64MOVWloadidx1 { 15919 break 15920 } 15921 i1 := x1.AuxInt 15922 s := x1.Aux 15923 idx := x1.Args[0] 15924 p := x1.Args[1] 15925 mem := x1.Args[2] 15926 sh := v.Args[1] 15927 if sh.Op != OpAMD64SHLLconst { 15928 break 15929 } 15930 if sh.AuxInt != 16 { 15931 break 15932 } 15933 r0 := sh.Args[0] 15934 if r0.Op != OpAMD64ROLWconst { 15935 break 15936 } 15937 if r0.AuxInt != 8 { 15938 break 15939 } 15940 x0 := r0.Args[0] 15941 if x0.Op != OpAMD64MOVWloadidx1 { 15942 break 15943 } 15944 i0 := x0.AuxInt 15945 if x0.Aux != s { 15946 break 15947 } 15948 if p != x0.Args[0] { 15949 break 15950 } 15951 if idx != x0.Args[1] { 15952 break 15953 } 15954 if mem != x0.Args[2] { 15955 break 15956 } 15957 if !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh)) { 15958 break 15959 } 15960 b = mergePoint(b, x0, x1) 15961 v0 := b.NewValue0(v.Pos, OpAMD64BSWAPL, v.Type) 15962 v.reset(OpCopy) 15963 v.AddArg(v0) 15964 v1 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, types.UInt32) 15965 v1.AuxInt = i0 15966 v1.Aux = s 15967 v1.AddArg(p) 15968 v1.AddArg(idx) 15969 v1.AddArg(mem) 15970 v0.AddArg(v1) 15971 return true 15972 } 15973 // match: (ORL r1:(ROLWconst [8] x1:(MOVWloadidx1 [i1] {s} p idx mem)) sh:(SHLLconst [16] r0:(ROLWconst [8] x0:(MOVWloadidx1 [i0] {s} idx p mem)))) 15974 // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh) 15975 // result: @mergePoint(b,x0,x1) (BSWAPL <v.Type> (MOVLloadidx1 [i0] {s} p idx mem)) 15976 for { 15977 r1 := v.Args[0] 15978 if r1.Op != OpAMD64ROLWconst { 15979 break 15980 } 15981 if r1.AuxInt != 8 { 15982 break 15983 } 15984 x1 := r1.Args[0] 15985 if x1.Op != OpAMD64MOVWloadidx1 { 15986 break 15987 } 15988 i1 := x1.AuxInt 15989 s := x1.Aux 15990 p := x1.Args[0] 15991 idx := x1.Args[1] 15992 mem := x1.Args[2] 15993 sh := v.Args[1] 15994 if sh.Op != OpAMD64SHLLconst { 15995 break 15996 } 15997 if sh.AuxInt != 16 { 15998 break 15999 } 16000 r0 := sh.Args[0] 16001 if r0.Op != OpAMD64ROLWconst { 16002 break 16003 } 16004 if r0.AuxInt != 8 { 16005 break 16006 } 16007 x0 := r0.Args[0] 16008 if x0.Op != OpAMD64MOVWloadidx1 { 16009 break 16010 } 16011 i0 := x0.AuxInt 16012 if x0.Aux != s { 16013 break 16014 } 16015 if idx != x0.Args[0] { 16016 break 16017 } 16018 if p != x0.Args[1] { 16019 break 16020 } 16021 if mem != x0.Args[2] { 16022 break 16023 } 16024 if !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh)) { 16025 break 16026 } 16027 b = mergePoint(b, x0, x1) 16028 v0 := b.NewValue0(v.Pos, OpAMD64BSWAPL, v.Type) 16029 v.reset(OpCopy) 16030 v.AddArg(v0) 16031 v1 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, types.UInt32) 16032 v1.AuxInt = i0 16033 v1.Aux = s 16034 v1.AddArg(p) 16035 v1.AddArg(idx) 16036 v1.AddArg(mem) 16037 v0.AddArg(v1) 16038 return true 16039 } 16040 // match: (ORL r1:(ROLWconst [8] x1:(MOVWloadidx1 [i1] {s} idx p mem)) sh:(SHLLconst [16] r0:(ROLWconst [8] x0:(MOVWloadidx1 [i0] {s} idx p mem)))) 16041 // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh) 16042 // result: @mergePoint(b,x0,x1) (BSWAPL <v.Type> (MOVLloadidx1 [i0] {s} p idx mem)) 16043 for { 16044 r1 := v.Args[0] 16045 if r1.Op != OpAMD64ROLWconst { 16046 break 16047 } 16048 if r1.AuxInt != 8 { 16049 break 16050 } 16051 x1 := r1.Args[0] 16052 if x1.Op != OpAMD64MOVWloadidx1 { 16053 break 16054 } 16055 i1 := x1.AuxInt 16056 s := x1.Aux 16057 idx := x1.Args[0] 16058 p := x1.Args[1] 16059 mem := x1.Args[2] 16060 sh := v.Args[1] 16061 if sh.Op != OpAMD64SHLLconst { 16062 break 16063 } 16064 if sh.AuxInt != 16 { 16065 break 16066 } 16067 r0 := sh.Args[0] 16068 if r0.Op != OpAMD64ROLWconst { 16069 break 16070 } 16071 if r0.AuxInt != 8 { 16072 break 16073 } 16074 x0 := r0.Args[0] 16075 if x0.Op != OpAMD64MOVWloadidx1 { 16076 break 16077 } 16078 i0 := x0.AuxInt 16079 if x0.Aux != s { 16080 break 16081 } 16082 if idx != x0.Args[0] { 16083 break 16084 } 16085 if p != x0.Args[1] { 16086 break 16087 } 16088 if mem != x0.Args[2] { 16089 break 16090 } 16091 if !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh)) { 16092 break 16093 } 16094 b = mergePoint(b, x0, x1) 16095 v0 := b.NewValue0(v.Pos, OpAMD64BSWAPL, v.Type) 16096 v.reset(OpCopy) 16097 v.AddArg(v0) 16098 v1 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, types.UInt32) 16099 v1.AuxInt = i0 16100 v1.Aux = s 16101 v1.AddArg(p) 16102 v1.AddArg(idx) 16103 v1.AddArg(mem) 16104 v0.AddArg(v1) 16105 return true 16106 } 16107 // match: (ORL sh:(SHLLconst [16] r0:(ROLWconst [8] x0:(MOVWloadidx1 [i0] {s} p idx mem))) r1:(ROLWconst [8] x1:(MOVWloadidx1 [i1] {s} p idx mem))) 16108 // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh) 16109 // result: @mergePoint(b,x0,x1) (BSWAPL <v.Type> (MOVLloadidx1 [i0] {s} p idx mem)) 16110 for { 16111 sh := v.Args[0] 16112 if sh.Op != OpAMD64SHLLconst { 16113 break 16114 } 16115 if sh.AuxInt != 16 { 16116 break 16117 } 16118 r0 := sh.Args[0] 16119 if r0.Op != OpAMD64ROLWconst { 16120 break 16121 } 16122 if r0.AuxInt != 8 { 16123 break 16124 } 16125 x0 := r0.Args[0] 16126 if x0.Op != OpAMD64MOVWloadidx1 { 16127 break 16128 } 16129 i0 := x0.AuxInt 16130 s := x0.Aux 16131 p := x0.Args[0] 16132 idx := x0.Args[1] 16133 mem := x0.Args[2] 16134 r1 := v.Args[1] 16135 if r1.Op != OpAMD64ROLWconst { 16136 break 16137 } 16138 if r1.AuxInt != 8 { 16139 break 16140 } 16141 x1 := r1.Args[0] 16142 if x1.Op != OpAMD64MOVWloadidx1 { 16143 break 16144 } 16145 i1 := x1.AuxInt 16146 if x1.Aux != s { 16147 break 16148 } 16149 if p != x1.Args[0] { 16150 break 16151 } 16152 if idx != x1.Args[1] { 16153 break 16154 } 16155 if mem != x1.Args[2] { 16156 break 16157 } 16158 if !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh)) { 16159 break 16160 } 16161 b = mergePoint(b, x0, x1) 16162 v0 := b.NewValue0(v.Pos, OpAMD64BSWAPL, v.Type) 16163 v.reset(OpCopy) 16164 v.AddArg(v0) 16165 v1 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, types.UInt32) 16166 v1.AuxInt = i0 16167 v1.Aux = s 16168 v1.AddArg(p) 16169 v1.AddArg(idx) 16170 v1.AddArg(mem) 16171 v0.AddArg(v1) 16172 return true 16173 } 16174 // match: (ORL sh:(SHLLconst [16] r0:(ROLWconst [8] x0:(MOVWloadidx1 [i0] {s} idx p mem))) r1:(ROLWconst [8] x1:(MOVWloadidx1 [i1] {s} p idx mem))) 16175 // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh) 16176 // result: @mergePoint(b,x0,x1) (BSWAPL <v.Type> (MOVLloadidx1 [i0] {s} p idx mem)) 16177 for { 16178 sh := v.Args[0] 16179 if sh.Op != OpAMD64SHLLconst { 16180 break 16181 } 16182 if sh.AuxInt != 16 { 16183 break 16184 } 16185 r0 := sh.Args[0] 16186 if r0.Op != OpAMD64ROLWconst { 16187 break 16188 } 16189 if r0.AuxInt != 8 { 16190 break 16191 } 16192 x0 := r0.Args[0] 16193 if x0.Op != OpAMD64MOVWloadidx1 { 16194 break 16195 } 16196 i0 := x0.AuxInt 16197 s := x0.Aux 16198 idx := x0.Args[0] 16199 p := x0.Args[1] 16200 mem := x0.Args[2] 16201 r1 := v.Args[1] 16202 if r1.Op != OpAMD64ROLWconst { 16203 break 16204 } 16205 if r1.AuxInt != 8 { 16206 break 16207 } 16208 x1 := r1.Args[0] 16209 if x1.Op != OpAMD64MOVWloadidx1 { 16210 break 16211 } 16212 i1 := x1.AuxInt 16213 if x1.Aux != s { 16214 break 16215 } 16216 if p != x1.Args[0] { 16217 break 16218 } 16219 if idx != x1.Args[1] { 16220 break 16221 } 16222 if mem != x1.Args[2] { 16223 break 16224 } 16225 if !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh)) { 16226 break 16227 } 16228 b = mergePoint(b, x0, x1) 16229 v0 := b.NewValue0(v.Pos, OpAMD64BSWAPL, v.Type) 16230 v.reset(OpCopy) 16231 v.AddArg(v0) 16232 v1 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, types.UInt32) 16233 v1.AuxInt = i0 16234 v1.Aux = s 16235 v1.AddArg(p) 16236 v1.AddArg(idx) 16237 v1.AddArg(mem) 16238 v0.AddArg(v1) 16239 return true 16240 } 16241 // match: (ORL sh:(SHLLconst [16] r0:(ROLWconst [8] x0:(MOVWloadidx1 [i0] {s} p idx mem))) r1:(ROLWconst [8] x1:(MOVWloadidx1 [i1] {s} idx p mem))) 16242 // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh) 16243 // result: @mergePoint(b,x0,x1) (BSWAPL <v.Type> (MOVLloadidx1 [i0] {s} p idx mem)) 16244 for { 16245 sh := v.Args[0] 16246 if sh.Op != OpAMD64SHLLconst { 16247 break 16248 } 16249 if sh.AuxInt != 16 { 16250 break 16251 } 16252 r0 := sh.Args[0] 16253 if r0.Op != OpAMD64ROLWconst { 16254 break 16255 } 16256 if r0.AuxInt != 8 { 16257 break 16258 } 16259 x0 := r0.Args[0] 16260 if x0.Op != OpAMD64MOVWloadidx1 { 16261 break 16262 } 16263 i0 := x0.AuxInt 16264 s := x0.Aux 16265 p := x0.Args[0] 16266 idx := x0.Args[1] 16267 mem := x0.Args[2] 16268 r1 := v.Args[1] 16269 if r1.Op != OpAMD64ROLWconst { 16270 break 16271 } 16272 if r1.AuxInt != 8 { 16273 break 16274 } 16275 x1 := r1.Args[0] 16276 if x1.Op != OpAMD64MOVWloadidx1 { 16277 break 16278 } 16279 i1 := x1.AuxInt 16280 if x1.Aux != s { 16281 break 16282 } 16283 if idx != x1.Args[0] { 16284 break 16285 } 16286 if p != x1.Args[1] { 16287 break 16288 } 16289 if mem != x1.Args[2] { 16290 break 16291 } 16292 if !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh)) { 16293 break 16294 } 16295 b = mergePoint(b, x0, x1) 16296 v0 := b.NewValue0(v.Pos, OpAMD64BSWAPL, v.Type) 16297 v.reset(OpCopy) 16298 v.AddArg(v0) 16299 v1 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, types.UInt32) 16300 v1.AuxInt = i0 16301 v1.Aux = s 16302 v1.AddArg(p) 16303 v1.AddArg(idx) 16304 v1.AddArg(mem) 16305 v0.AddArg(v1) 16306 return true 16307 } 16308 // match: (ORL sh:(SHLLconst [16] r0:(ROLWconst [8] x0:(MOVWloadidx1 [i0] {s} idx p mem))) r1:(ROLWconst [8] x1:(MOVWloadidx1 [i1] {s} idx p mem))) 16309 // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh) 16310 // result: @mergePoint(b,x0,x1) (BSWAPL <v.Type> (MOVLloadidx1 [i0] {s} p idx mem)) 16311 for { 16312 sh := v.Args[0] 16313 if sh.Op != OpAMD64SHLLconst { 16314 break 16315 } 16316 if sh.AuxInt != 16 { 16317 break 16318 } 16319 r0 := sh.Args[0] 16320 if r0.Op != OpAMD64ROLWconst { 16321 break 16322 } 16323 if r0.AuxInt != 8 { 16324 break 16325 } 16326 x0 := r0.Args[0] 16327 if x0.Op != OpAMD64MOVWloadidx1 { 16328 break 16329 } 16330 i0 := x0.AuxInt 16331 s := x0.Aux 16332 idx := x0.Args[0] 16333 p := x0.Args[1] 16334 mem := x0.Args[2] 16335 r1 := v.Args[1] 16336 if r1.Op != OpAMD64ROLWconst { 16337 break 16338 } 16339 if r1.AuxInt != 8 { 16340 break 16341 } 16342 x1 := r1.Args[0] 16343 if x1.Op != OpAMD64MOVWloadidx1 { 16344 break 16345 } 16346 i1 := x1.AuxInt 16347 if x1.Aux != s { 16348 break 16349 } 16350 if idx != x1.Args[0] { 16351 break 16352 } 16353 if p != x1.Args[1] { 16354 break 16355 } 16356 if mem != x1.Args[2] { 16357 break 16358 } 16359 if !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh)) { 16360 break 16361 } 16362 b = mergePoint(b, x0, x1) 16363 v0 := b.NewValue0(v.Pos, OpAMD64BSWAPL, v.Type) 16364 v.reset(OpCopy) 16365 v.AddArg(v0) 16366 v1 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, types.UInt32) 16367 v1.AuxInt = i0 16368 v1.Aux = s 16369 v1.AddArg(p) 16370 v1.AddArg(idx) 16371 v1.AddArg(mem) 16372 v0.AddArg(v1) 16373 return true 16374 } 16375 // match: (ORL s0:(SHLLconst [j0] x0:(MOVBloadidx1 [i0] {s} p idx mem)) or:(ORL s1:(SHLLconst [j1] x1:(MOVBloadidx1 [i1] {s} p idx mem)) y)) 16376 // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 16377 // result: @mergePoint(b,x0,x1) (ORL <v.Type> (SHLLconst <v.Type> [j1] (ROLWconst <types.UInt16> [8] (MOVWloadidx1 [i0] {s} p idx mem))) y) 16378 for { 16379 s0 := v.Args[0] 16380 if s0.Op != OpAMD64SHLLconst { 16381 break 16382 } 16383 j0 := s0.AuxInt 16384 x0 := s0.Args[0] 16385 if x0.Op != OpAMD64MOVBloadidx1 { 16386 break 16387 } 16388 i0 := x0.AuxInt 16389 s := x0.Aux 16390 p := x0.Args[0] 16391 idx := x0.Args[1] 16392 mem := x0.Args[2] 16393 or := v.Args[1] 16394 if or.Op != OpAMD64ORL { 16395 break 16396 } 16397 s1 := or.Args[0] 16398 if s1.Op != OpAMD64SHLLconst { 16399 break 16400 } 16401 j1 := s1.AuxInt 16402 x1 := s1.Args[0] 16403 if x1.Op != OpAMD64MOVBloadidx1 { 16404 break 16405 } 16406 i1 := x1.AuxInt 16407 if x1.Aux != s { 16408 break 16409 } 16410 if p != x1.Args[0] { 16411 break 16412 } 16413 if idx != x1.Args[1] { 16414 break 16415 } 16416 if mem != x1.Args[2] { 16417 break 16418 } 16419 y := or.Args[1] 16420 if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 16421 break 16422 } 16423 b = mergePoint(b, x0, x1) 16424 v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type) 16425 v.reset(OpCopy) 16426 v.AddArg(v0) 16427 v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type) 16428 v1.AuxInt = j1 16429 v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, types.UInt16) 16430 v2.AuxInt = 8 16431 v3 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, types.UInt16) 16432 v3.AuxInt = i0 16433 v3.Aux = s 16434 v3.AddArg(p) 16435 v3.AddArg(idx) 16436 v3.AddArg(mem) 16437 v2.AddArg(v3) 16438 v1.AddArg(v2) 16439 v0.AddArg(v1) 16440 v0.AddArg(y) 16441 return true 16442 } 16443 // match: (ORL s0:(SHLLconst [j0] x0:(MOVBloadidx1 [i0] {s} idx p mem)) or:(ORL s1:(SHLLconst [j1] x1:(MOVBloadidx1 [i1] {s} p idx mem)) y)) 16444 // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 16445 // result: @mergePoint(b,x0,x1) (ORL <v.Type> (SHLLconst <v.Type> [j1] (ROLWconst <types.UInt16> [8] (MOVWloadidx1 [i0] {s} p idx mem))) y) 16446 for { 16447 s0 := v.Args[0] 16448 if s0.Op != OpAMD64SHLLconst { 16449 break 16450 } 16451 j0 := s0.AuxInt 16452 x0 := s0.Args[0] 16453 if x0.Op != OpAMD64MOVBloadidx1 { 16454 break 16455 } 16456 i0 := x0.AuxInt 16457 s := x0.Aux 16458 idx := x0.Args[0] 16459 p := x0.Args[1] 16460 mem := x0.Args[2] 16461 or := v.Args[1] 16462 if or.Op != OpAMD64ORL { 16463 break 16464 } 16465 s1 := or.Args[0] 16466 if s1.Op != OpAMD64SHLLconst { 16467 break 16468 } 16469 j1 := s1.AuxInt 16470 x1 := s1.Args[0] 16471 if x1.Op != OpAMD64MOVBloadidx1 { 16472 break 16473 } 16474 i1 := x1.AuxInt 16475 if x1.Aux != s { 16476 break 16477 } 16478 if p != x1.Args[0] { 16479 break 16480 } 16481 if idx != x1.Args[1] { 16482 break 16483 } 16484 if mem != x1.Args[2] { 16485 break 16486 } 16487 y := or.Args[1] 16488 if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 16489 break 16490 } 16491 b = mergePoint(b, x0, x1) 16492 v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type) 16493 v.reset(OpCopy) 16494 v.AddArg(v0) 16495 v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type) 16496 v1.AuxInt = j1 16497 v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, types.UInt16) 16498 v2.AuxInt = 8 16499 v3 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, types.UInt16) 16500 v3.AuxInt = i0 16501 v3.Aux = s 16502 v3.AddArg(p) 16503 v3.AddArg(idx) 16504 v3.AddArg(mem) 16505 v2.AddArg(v3) 16506 v1.AddArg(v2) 16507 v0.AddArg(v1) 16508 v0.AddArg(y) 16509 return true 16510 } 16511 // match: (ORL s0:(SHLLconst [j0] x0:(MOVBloadidx1 [i0] {s} p idx mem)) or:(ORL s1:(SHLLconst [j1] x1:(MOVBloadidx1 [i1] {s} idx p mem)) y)) 16512 // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 16513 // result: @mergePoint(b,x0,x1) (ORL <v.Type> (SHLLconst <v.Type> [j1] (ROLWconst <types.UInt16> [8] (MOVWloadidx1 [i0] {s} p idx mem))) y) 16514 for { 16515 s0 := v.Args[0] 16516 if s0.Op != OpAMD64SHLLconst { 16517 break 16518 } 16519 j0 := s0.AuxInt 16520 x0 := s0.Args[0] 16521 if x0.Op != OpAMD64MOVBloadidx1 { 16522 break 16523 } 16524 i0 := x0.AuxInt 16525 s := x0.Aux 16526 p := x0.Args[0] 16527 idx := x0.Args[1] 16528 mem := x0.Args[2] 16529 or := v.Args[1] 16530 if or.Op != OpAMD64ORL { 16531 break 16532 } 16533 s1 := or.Args[0] 16534 if s1.Op != OpAMD64SHLLconst { 16535 break 16536 } 16537 j1 := s1.AuxInt 16538 x1 := s1.Args[0] 16539 if x1.Op != OpAMD64MOVBloadidx1 { 16540 break 16541 } 16542 i1 := x1.AuxInt 16543 if x1.Aux != s { 16544 break 16545 } 16546 if idx != x1.Args[0] { 16547 break 16548 } 16549 if p != x1.Args[1] { 16550 break 16551 } 16552 if mem != x1.Args[2] { 16553 break 16554 } 16555 y := or.Args[1] 16556 if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 16557 break 16558 } 16559 b = mergePoint(b, x0, x1) 16560 v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type) 16561 v.reset(OpCopy) 16562 v.AddArg(v0) 16563 v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type) 16564 v1.AuxInt = j1 16565 v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, types.UInt16) 16566 v2.AuxInt = 8 16567 v3 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, types.UInt16) 16568 v3.AuxInt = i0 16569 v3.Aux = s 16570 v3.AddArg(p) 16571 v3.AddArg(idx) 16572 v3.AddArg(mem) 16573 v2.AddArg(v3) 16574 v1.AddArg(v2) 16575 v0.AddArg(v1) 16576 v0.AddArg(y) 16577 return true 16578 } 16579 // match: (ORL s0:(SHLLconst [j0] x0:(MOVBloadidx1 [i0] {s} idx p mem)) or:(ORL s1:(SHLLconst [j1] x1:(MOVBloadidx1 [i1] {s} idx p mem)) y)) 16580 // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 16581 // result: @mergePoint(b,x0,x1) (ORL <v.Type> (SHLLconst <v.Type> [j1] (ROLWconst <types.UInt16> [8] (MOVWloadidx1 [i0] {s} p idx mem))) y) 16582 for { 16583 s0 := v.Args[0] 16584 if s0.Op != OpAMD64SHLLconst { 16585 break 16586 } 16587 j0 := s0.AuxInt 16588 x0 := s0.Args[0] 16589 if x0.Op != OpAMD64MOVBloadidx1 { 16590 break 16591 } 16592 i0 := x0.AuxInt 16593 s := x0.Aux 16594 idx := x0.Args[0] 16595 p := x0.Args[1] 16596 mem := x0.Args[2] 16597 or := v.Args[1] 16598 if or.Op != OpAMD64ORL { 16599 break 16600 } 16601 s1 := or.Args[0] 16602 if s1.Op != OpAMD64SHLLconst { 16603 break 16604 } 16605 j1 := s1.AuxInt 16606 x1 := s1.Args[0] 16607 if x1.Op != OpAMD64MOVBloadidx1 { 16608 break 16609 } 16610 i1 := x1.AuxInt 16611 if x1.Aux != s { 16612 break 16613 } 16614 if idx != x1.Args[0] { 16615 break 16616 } 16617 if p != x1.Args[1] { 16618 break 16619 } 16620 if mem != x1.Args[2] { 16621 break 16622 } 16623 y := or.Args[1] 16624 if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 16625 break 16626 } 16627 b = mergePoint(b, x0, x1) 16628 v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type) 16629 v.reset(OpCopy) 16630 v.AddArg(v0) 16631 v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type) 16632 v1.AuxInt = j1 16633 v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, types.UInt16) 16634 v2.AuxInt = 8 16635 v3 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, types.UInt16) 16636 v3.AuxInt = i0 16637 v3.Aux = s 16638 v3.AddArg(p) 16639 v3.AddArg(idx) 16640 v3.AddArg(mem) 16641 v2.AddArg(v3) 16642 v1.AddArg(v2) 16643 v0.AddArg(v1) 16644 v0.AddArg(y) 16645 return true 16646 } 16647 // match: (ORL s0:(SHLLconst [j0] x0:(MOVBloadidx1 [i0] {s} p idx mem)) or:(ORL y s1:(SHLLconst [j1] x1:(MOVBloadidx1 [i1] {s} p idx mem)))) 16648 // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 16649 // result: @mergePoint(b,x0,x1) (ORL <v.Type> (SHLLconst <v.Type> [j1] (ROLWconst <types.UInt16> [8] (MOVWloadidx1 [i0] {s} p idx mem))) y) 16650 for { 16651 s0 := v.Args[0] 16652 if s0.Op != OpAMD64SHLLconst { 16653 break 16654 } 16655 j0 := s0.AuxInt 16656 x0 := s0.Args[0] 16657 if x0.Op != OpAMD64MOVBloadidx1 { 16658 break 16659 } 16660 i0 := x0.AuxInt 16661 s := x0.Aux 16662 p := x0.Args[0] 16663 idx := x0.Args[1] 16664 mem := x0.Args[2] 16665 or := v.Args[1] 16666 if or.Op != OpAMD64ORL { 16667 break 16668 } 16669 y := or.Args[0] 16670 s1 := or.Args[1] 16671 if s1.Op != OpAMD64SHLLconst { 16672 break 16673 } 16674 j1 := s1.AuxInt 16675 x1 := s1.Args[0] 16676 if x1.Op != OpAMD64MOVBloadidx1 { 16677 break 16678 } 16679 i1 := x1.AuxInt 16680 if x1.Aux != s { 16681 break 16682 } 16683 if p != x1.Args[0] { 16684 break 16685 } 16686 if idx != x1.Args[1] { 16687 break 16688 } 16689 if mem != x1.Args[2] { 16690 break 16691 } 16692 if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 16693 break 16694 } 16695 b = mergePoint(b, x0, x1) 16696 v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type) 16697 v.reset(OpCopy) 16698 v.AddArg(v0) 16699 v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type) 16700 v1.AuxInt = j1 16701 v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, types.UInt16) 16702 v2.AuxInt = 8 16703 v3 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, types.UInt16) 16704 v3.AuxInt = i0 16705 v3.Aux = s 16706 v3.AddArg(p) 16707 v3.AddArg(idx) 16708 v3.AddArg(mem) 16709 v2.AddArg(v3) 16710 v1.AddArg(v2) 16711 v0.AddArg(v1) 16712 v0.AddArg(y) 16713 return true 16714 } 16715 // match: (ORL s0:(SHLLconst [j0] x0:(MOVBloadidx1 [i0] {s} idx p mem)) or:(ORL y s1:(SHLLconst [j1] x1:(MOVBloadidx1 [i1] {s} p idx mem)))) 16716 // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 16717 // result: @mergePoint(b,x0,x1) (ORL <v.Type> (SHLLconst <v.Type> [j1] (ROLWconst <types.UInt16> [8] (MOVWloadidx1 [i0] {s} p idx mem))) y) 16718 for { 16719 s0 := v.Args[0] 16720 if s0.Op != OpAMD64SHLLconst { 16721 break 16722 } 16723 j0 := s0.AuxInt 16724 x0 := s0.Args[0] 16725 if x0.Op != OpAMD64MOVBloadidx1 { 16726 break 16727 } 16728 i0 := x0.AuxInt 16729 s := x0.Aux 16730 idx := x0.Args[0] 16731 p := x0.Args[1] 16732 mem := x0.Args[2] 16733 or := v.Args[1] 16734 if or.Op != OpAMD64ORL { 16735 break 16736 } 16737 y := or.Args[0] 16738 s1 := or.Args[1] 16739 if s1.Op != OpAMD64SHLLconst { 16740 break 16741 } 16742 j1 := s1.AuxInt 16743 x1 := s1.Args[0] 16744 if x1.Op != OpAMD64MOVBloadidx1 { 16745 break 16746 } 16747 i1 := x1.AuxInt 16748 if x1.Aux != s { 16749 break 16750 } 16751 if p != x1.Args[0] { 16752 break 16753 } 16754 if idx != x1.Args[1] { 16755 break 16756 } 16757 if mem != x1.Args[2] { 16758 break 16759 } 16760 if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 16761 break 16762 } 16763 b = mergePoint(b, x0, x1) 16764 v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type) 16765 v.reset(OpCopy) 16766 v.AddArg(v0) 16767 v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type) 16768 v1.AuxInt = j1 16769 v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, types.UInt16) 16770 v2.AuxInt = 8 16771 v3 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, types.UInt16) 16772 v3.AuxInt = i0 16773 v3.Aux = s 16774 v3.AddArg(p) 16775 v3.AddArg(idx) 16776 v3.AddArg(mem) 16777 v2.AddArg(v3) 16778 v1.AddArg(v2) 16779 v0.AddArg(v1) 16780 v0.AddArg(y) 16781 return true 16782 } 16783 // match: (ORL s0:(SHLLconst [j0] x0:(MOVBloadidx1 [i0] {s} p idx mem)) or:(ORL y s1:(SHLLconst [j1] x1:(MOVBloadidx1 [i1] {s} idx p mem)))) 16784 // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 16785 // result: @mergePoint(b,x0,x1) (ORL <v.Type> (SHLLconst <v.Type> [j1] (ROLWconst <types.UInt16> [8] (MOVWloadidx1 [i0] {s} p idx mem))) y) 16786 for { 16787 s0 := v.Args[0] 16788 if s0.Op != OpAMD64SHLLconst { 16789 break 16790 } 16791 j0 := s0.AuxInt 16792 x0 := s0.Args[0] 16793 if x0.Op != OpAMD64MOVBloadidx1 { 16794 break 16795 } 16796 i0 := x0.AuxInt 16797 s := x0.Aux 16798 p := x0.Args[0] 16799 idx := x0.Args[1] 16800 mem := x0.Args[2] 16801 or := v.Args[1] 16802 if or.Op != OpAMD64ORL { 16803 break 16804 } 16805 y := or.Args[0] 16806 s1 := or.Args[1] 16807 if s1.Op != OpAMD64SHLLconst { 16808 break 16809 } 16810 j1 := s1.AuxInt 16811 x1 := s1.Args[0] 16812 if x1.Op != OpAMD64MOVBloadidx1 { 16813 break 16814 } 16815 i1 := x1.AuxInt 16816 if x1.Aux != s { 16817 break 16818 } 16819 if idx != x1.Args[0] { 16820 break 16821 } 16822 if p != x1.Args[1] { 16823 break 16824 } 16825 if mem != x1.Args[2] { 16826 break 16827 } 16828 if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 16829 break 16830 } 16831 b = mergePoint(b, x0, x1) 16832 v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type) 16833 v.reset(OpCopy) 16834 v.AddArg(v0) 16835 v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type) 16836 v1.AuxInt = j1 16837 v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, types.UInt16) 16838 v2.AuxInt = 8 16839 v3 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, types.UInt16) 16840 v3.AuxInt = i0 16841 v3.Aux = s 16842 v3.AddArg(p) 16843 v3.AddArg(idx) 16844 v3.AddArg(mem) 16845 v2.AddArg(v3) 16846 v1.AddArg(v2) 16847 v0.AddArg(v1) 16848 v0.AddArg(y) 16849 return true 16850 } 16851 // match: (ORL s0:(SHLLconst [j0] x0:(MOVBloadidx1 [i0] {s} idx p mem)) or:(ORL y s1:(SHLLconst [j1] x1:(MOVBloadidx1 [i1] {s} idx p mem)))) 16852 // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 16853 // result: @mergePoint(b,x0,x1) (ORL <v.Type> (SHLLconst <v.Type> [j1] (ROLWconst <types.UInt16> [8] (MOVWloadidx1 [i0] {s} p idx mem))) y) 16854 for { 16855 s0 := v.Args[0] 16856 if s0.Op != OpAMD64SHLLconst { 16857 break 16858 } 16859 j0 := s0.AuxInt 16860 x0 := s0.Args[0] 16861 if x0.Op != OpAMD64MOVBloadidx1 { 16862 break 16863 } 16864 i0 := x0.AuxInt 16865 s := x0.Aux 16866 idx := x0.Args[0] 16867 p := x0.Args[1] 16868 mem := x0.Args[2] 16869 or := v.Args[1] 16870 if or.Op != OpAMD64ORL { 16871 break 16872 } 16873 y := or.Args[0] 16874 s1 := or.Args[1] 16875 if s1.Op != OpAMD64SHLLconst { 16876 break 16877 } 16878 j1 := s1.AuxInt 16879 x1 := s1.Args[0] 16880 if x1.Op != OpAMD64MOVBloadidx1 { 16881 break 16882 } 16883 i1 := x1.AuxInt 16884 if x1.Aux != s { 16885 break 16886 } 16887 if idx != x1.Args[0] { 16888 break 16889 } 16890 if p != x1.Args[1] { 16891 break 16892 } 16893 if mem != x1.Args[2] { 16894 break 16895 } 16896 if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 16897 break 16898 } 16899 b = mergePoint(b, x0, x1) 16900 v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type) 16901 v.reset(OpCopy) 16902 v.AddArg(v0) 16903 v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type) 16904 v1.AuxInt = j1 16905 v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, types.UInt16) 16906 v2.AuxInt = 8 16907 v3 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, types.UInt16) 16908 v3.AuxInt = i0 16909 v3.Aux = s 16910 v3.AddArg(p) 16911 v3.AddArg(idx) 16912 v3.AddArg(mem) 16913 v2.AddArg(v3) 16914 v1.AddArg(v2) 16915 v0.AddArg(v1) 16916 v0.AddArg(y) 16917 return true 16918 } 16919 // match: (ORL or:(ORL s1:(SHLLconst [j1] x1:(MOVBloadidx1 [i1] {s} p idx mem)) y) s0:(SHLLconst [j0] x0:(MOVBloadidx1 [i0] {s} p idx mem))) 16920 // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 16921 // result: @mergePoint(b,x0,x1) (ORL <v.Type> (SHLLconst <v.Type> [j1] (ROLWconst <types.UInt16> [8] (MOVWloadidx1 [i0] {s} p idx mem))) y) 16922 for { 16923 or := v.Args[0] 16924 if or.Op != OpAMD64ORL { 16925 break 16926 } 16927 s1 := or.Args[0] 16928 if s1.Op != OpAMD64SHLLconst { 16929 break 16930 } 16931 j1 := s1.AuxInt 16932 x1 := s1.Args[0] 16933 if x1.Op != OpAMD64MOVBloadidx1 { 16934 break 16935 } 16936 i1 := x1.AuxInt 16937 s := x1.Aux 16938 p := x1.Args[0] 16939 idx := x1.Args[1] 16940 mem := x1.Args[2] 16941 y := or.Args[1] 16942 s0 := v.Args[1] 16943 if s0.Op != OpAMD64SHLLconst { 16944 break 16945 } 16946 j0 := s0.AuxInt 16947 x0 := s0.Args[0] 16948 if x0.Op != OpAMD64MOVBloadidx1 { 16949 break 16950 } 16951 i0 := x0.AuxInt 16952 if x0.Aux != s { 16953 break 16954 } 16955 if p != x0.Args[0] { 16956 break 16957 } 16958 if idx != x0.Args[1] { 16959 break 16960 } 16961 if mem != x0.Args[2] { 16962 break 16963 } 16964 if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 16965 break 16966 } 16967 b = mergePoint(b, x0, x1) 16968 v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type) 16969 v.reset(OpCopy) 16970 v.AddArg(v0) 16971 v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type) 16972 v1.AuxInt = j1 16973 v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, types.UInt16) 16974 v2.AuxInt = 8 16975 v3 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, types.UInt16) 16976 v3.AuxInt = i0 16977 v3.Aux = s 16978 v3.AddArg(p) 16979 v3.AddArg(idx) 16980 v3.AddArg(mem) 16981 v2.AddArg(v3) 16982 v1.AddArg(v2) 16983 v0.AddArg(v1) 16984 v0.AddArg(y) 16985 return true 16986 } 16987 // match: (ORL or:(ORL s1:(SHLLconst [j1] x1:(MOVBloadidx1 [i1] {s} idx p mem)) y) s0:(SHLLconst [j0] x0:(MOVBloadidx1 [i0] {s} p idx mem))) 16988 // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 16989 // result: @mergePoint(b,x0,x1) (ORL <v.Type> (SHLLconst <v.Type> [j1] (ROLWconst <types.UInt16> [8] (MOVWloadidx1 [i0] {s} p idx mem))) y) 16990 for { 16991 or := v.Args[0] 16992 if or.Op != OpAMD64ORL { 16993 break 16994 } 16995 s1 := or.Args[0] 16996 if s1.Op != OpAMD64SHLLconst { 16997 break 16998 } 16999 j1 := s1.AuxInt 17000 x1 := s1.Args[0] 17001 if x1.Op != OpAMD64MOVBloadidx1 { 17002 break 17003 } 17004 i1 := x1.AuxInt 17005 s := x1.Aux 17006 idx := x1.Args[0] 17007 p := x1.Args[1] 17008 mem := x1.Args[2] 17009 y := or.Args[1] 17010 s0 := v.Args[1] 17011 if s0.Op != OpAMD64SHLLconst { 17012 break 17013 } 17014 j0 := s0.AuxInt 17015 x0 := s0.Args[0] 17016 if x0.Op != OpAMD64MOVBloadidx1 { 17017 break 17018 } 17019 i0 := x0.AuxInt 17020 if x0.Aux != s { 17021 break 17022 } 17023 if p != x0.Args[0] { 17024 break 17025 } 17026 if idx != x0.Args[1] { 17027 break 17028 } 17029 if mem != x0.Args[2] { 17030 break 17031 } 17032 if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 17033 break 17034 } 17035 b = mergePoint(b, x0, x1) 17036 v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type) 17037 v.reset(OpCopy) 17038 v.AddArg(v0) 17039 v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type) 17040 v1.AuxInt = j1 17041 v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, types.UInt16) 17042 v2.AuxInt = 8 17043 v3 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, types.UInt16) 17044 v3.AuxInt = i0 17045 v3.Aux = s 17046 v3.AddArg(p) 17047 v3.AddArg(idx) 17048 v3.AddArg(mem) 17049 v2.AddArg(v3) 17050 v1.AddArg(v2) 17051 v0.AddArg(v1) 17052 v0.AddArg(y) 17053 return true 17054 } 17055 // match: (ORL or:(ORL y s1:(SHLLconst [j1] x1:(MOVBloadidx1 [i1] {s} p idx mem))) s0:(SHLLconst [j0] x0:(MOVBloadidx1 [i0] {s} p idx mem))) 17056 // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 17057 // result: @mergePoint(b,x0,x1) (ORL <v.Type> (SHLLconst <v.Type> [j1] (ROLWconst <types.UInt16> [8] (MOVWloadidx1 [i0] {s} p idx mem))) y) 17058 for { 17059 or := v.Args[0] 17060 if or.Op != OpAMD64ORL { 17061 break 17062 } 17063 y := or.Args[0] 17064 s1 := or.Args[1] 17065 if s1.Op != OpAMD64SHLLconst { 17066 break 17067 } 17068 j1 := s1.AuxInt 17069 x1 := s1.Args[0] 17070 if x1.Op != OpAMD64MOVBloadidx1 { 17071 break 17072 } 17073 i1 := x1.AuxInt 17074 s := x1.Aux 17075 p := x1.Args[0] 17076 idx := x1.Args[1] 17077 mem := x1.Args[2] 17078 s0 := v.Args[1] 17079 if s0.Op != OpAMD64SHLLconst { 17080 break 17081 } 17082 j0 := s0.AuxInt 17083 x0 := s0.Args[0] 17084 if x0.Op != OpAMD64MOVBloadidx1 { 17085 break 17086 } 17087 i0 := x0.AuxInt 17088 if x0.Aux != s { 17089 break 17090 } 17091 if p != x0.Args[0] { 17092 break 17093 } 17094 if idx != x0.Args[1] { 17095 break 17096 } 17097 if mem != x0.Args[2] { 17098 break 17099 } 17100 if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 17101 break 17102 } 17103 b = mergePoint(b, x0, x1) 17104 v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type) 17105 v.reset(OpCopy) 17106 v.AddArg(v0) 17107 v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type) 17108 v1.AuxInt = j1 17109 v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, types.UInt16) 17110 v2.AuxInt = 8 17111 v3 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, types.UInt16) 17112 v3.AuxInt = i0 17113 v3.Aux = s 17114 v3.AddArg(p) 17115 v3.AddArg(idx) 17116 v3.AddArg(mem) 17117 v2.AddArg(v3) 17118 v1.AddArg(v2) 17119 v0.AddArg(v1) 17120 v0.AddArg(y) 17121 return true 17122 } 17123 // match: (ORL or:(ORL y s1:(SHLLconst [j1] x1:(MOVBloadidx1 [i1] {s} idx p mem))) s0:(SHLLconst [j0] x0:(MOVBloadidx1 [i0] {s} p idx mem))) 17124 // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 17125 // result: @mergePoint(b,x0,x1) (ORL <v.Type> (SHLLconst <v.Type> [j1] (ROLWconst <types.UInt16> [8] (MOVWloadidx1 [i0] {s} p idx mem))) y) 17126 for { 17127 or := v.Args[0] 17128 if or.Op != OpAMD64ORL { 17129 break 17130 } 17131 y := or.Args[0] 17132 s1 := or.Args[1] 17133 if s1.Op != OpAMD64SHLLconst { 17134 break 17135 } 17136 j1 := s1.AuxInt 17137 x1 := s1.Args[0] 17138 if x1.Op != OpAMD64MOVBloadidx1 { 17139 break 17140 } 17141 i1 := x1.AuxInt 17142 s := x1.Aux 17143 idx := x1.Args[0] 17144 p := x1.Args[1] 17145 mem := x1.Args[2] 17146 s0 := v.Args[1] 17147 if s0.Op != OpAMD64SHLLconst { 17148 break 17149 } 17150 j0 := s0.AuxInt 17151 x0 := s0.Args[0] 17152 if x0.Op != OpAMD64MOVBloadidx1 { 17153 break 17154 } 17155 i0 := x0.AuxInt 17156 if x0.Aux != s { 17157 break 17158 } 17159 if p != x0.Args[0] { 17160 break 17161 } 17162 if idx != x0.Args[1] { 17163 break 17164 } 17165 if mem != x0.Args[2] { 17166 break 17167 } 17168 if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 17169 break 17170 } 17171 b = mergePoint(b, x0, x1) 17172 v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type) 17173 v.reset(OpCopy) 17174 v.AddArg(v0) 17175 v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type) 17176 v1.AuxInt = j1 17177 v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, types.UInt16) 17178 v2.AuxInt = 8 17179 v3 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, types.UInt16) 17180 v3.AuxInt = i0 17181 v3.Aux = s 17182 v3.AddArg(p) 17183 v3.AddArg(idx) 17184 v3.AddArg(mem) 17185 v2.AddArg(v3) 17186 v1.AddArg(v2) 17187 v0.AddArg(v1) 17188 v0.AddArg(y) 17189 return true 17190 } 17191 // match: (ORL or:(ORL s1:(SHLLconst [j1] x1:(MOVBloadidx1 [i1] {s} p idx mem)) y) s0:(SHLLconst [j0] x0:(MOVBloadidx1 [i0] {s} idx p mem))) 17192 // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 17193 // result: @mergePoint(b,x0,x1) (ORL <v.Type> (SHLLconst <v.Type> [j1] (ROLWconst <types.UInt16> [8] (MOVWloadidx1 [i0] {s} p idx mem))) y) 17194 for { 17195 or := v.Args[0] 17196 if or.Op != OpAMD64ORL { 17197 break 17198 } 17199 s1 := or.Args[0] 17200 if s1.Op != OpAMD64SHLLconst { 17201 break 17202 } 17203 j1 := s1.AuxInt 17204 x1 := s1.Args[0] 17205 if x1.Op != OpAMD64MOVBloadidx1 { 17206 break 17207 } 17208 i1 := x1.AuxInt 17209 s := x1.Aux 17210 p := x1.Args[0] 17211 idx := x1.Args[1] 17212 mem := x1.Args[2] 17213 y := or.Args[1] 17214 s0 := v.Args[1] 17215 if s0.Op != OpAMD64SHLLconst { 17216 break 17217 } 17218 j0 := s0.AuxInt 17219 x0 := s0.Args[0] 17220 if x0.Op != OpAMD64MOVBloadidx1 { 17221 break 17222 } 17223 i0 := x0.AuxInt 17224 if x0.Aux != s { 17225 break 17226 } 17227 if idx != x0.Args[0] { 17228 break 17229 } 17230 if p != x0.Args[1] { 17231 break 17232 } 17233 if mem != x0.Args[2] { 17234 break 17235 } 17236 if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 17237 break 17238 } 17239 b = mergePoint(b, x0, x1) 17240 v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type) 17241 v.reset(OpCopy) 17242 v.AddArg(v0) 17243 v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type) 17244 v1.AuxInt = j1 17245 v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, types.UInt16) 17246 v2.AuxInt = 8 17247 v3 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, types.UInt16) 17248 v3.AuxInt = i0 17249 v3.Aux = s 17250 v3.AddArg(p) 17251 v3.AddArg(idx) 17252 v3.AddArg(mem) 17253 v2.AddArg(v3) 17254 v1.AddArg(v2) 17255 v0.AddArg(v1) 17256 v0.AddArg(y) 17257 return true 17258 } 17259 // match: (ORL or:(ORL s1:(SHLLconst [j1] x1:(MOVBloadidx1 [i1] {s} idx p mem)) y) s0:(SHLLconst [j0] x0:(MOVBloadidx1 [i0] {s} idx p mem))) 17260 // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 17261 // result: @mergePoint(b,x0,x1) (ORL <v.Type> (SHLLconst <v.Type> [j1] (ROLWconst <types.UInt16> [8] (MOVWloadidx1 [i0] {s} p idx mem))) y) 17262 for { 17263 or := v.Args[0] 17264 if or.Op != OpAMD64ORL { 17265 break 17266 } 17267 s1 := or.Args[0] 17268 if s1.Op != OpAMD64SHLLconst { 17269 break 17270 } 17271 j1 := s1.AuxInt 17272 x1 := s1.Args[0] 17273 if x1.Op != OpAMD64MOVBloadidx1 { 17274 break 17275 } 17276 i1 := x1.AuxInt 17277 s := x1.Aux 17278 idx := x1.Args[0] 17279 p := x1.Args[1] 17280 mem := x1.Args[2] 17281 y := or.Args[1] 17282 s0 := v.Args[1] 17283 if s0.Op != OpAMD64SHLLconst { 17284 break 17285 } 17286 j0 := s0.AuxInt 17287 x0 := s0.Args[0] 17288 if x0.Op != OpAMD64MOVBloadidx1 { 17289 break 17290 } 17291 i0 := x0.AuxInt 17292 if x0.Aux != s { 17293 break 17294 } 17295 if idx != x0.Args[0] { 17296 break 17297 } 17298 if p != x0.Args[1] { 17299 break 17300 } 17301 if mem != x0.Args[2] { 17302 break 17303 } 17304 if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 17305 break 17306 } 17307 b = mergePoint(b, x0, x1) 17308 v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type) 17309 v.reset(OpCopy) 17310 v.AddArg(v0) 17311 v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type) 17312 v1.AuxInt = j1 17313 v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, types.UInt16) 17314 v2.AuxInt = 8 17315 v3 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, types.UInt16) 17316 v3.AuxInt = i0 17317 v3.Aux = s 17318 v3.AddArg(p) 17319 v3.AddArg(idx) 17320 v3.AddArg(mem) 17321 v2.AddArg(v3) 17322 v1.AddArg(v2) 17323 v0.AddArg(v1) 17324 v0.AddArg(y) 17325 return true 17326 } 17327 // match: (ORL or:(ORL y s1:(SHLLconst [j1] x1:(MOVBloadidx1 [i1] {s} p idx mem))) s0:(SHLLconst [j0] x0:(MOVBloadidx1 [i0] {s} idx p mem))) 17328 // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 17329 // result: @mergePoint(b,x0,x1) (ORL <v.Type> (SHLLconst <v.Type> [j1] (ROLWconst <types.UInt16> [8] (MOVWloadidx1 [i0] {s} p idx mem))) y) 17330 for { 17331 or := v.Args[0] 17332 if or.Op != OpAMD64ORL { 17333 break 17334 } 17335 y := or.Args[0] 17336 s1 := or.Args[1] 17337 if s1.Op != OpAMD64SHLLconst { 17338 break 17339 } 17340 j1 := s1.AuxInt 17341 x1 := s1.Args[0] 17342 if x1.Op != OpAMD64MOVBloadidx1 { 17343 break 17344 } 17345 i1 := x1.AuxInt 17346 s := x1.Aux 17347 p := x1.Args[0] 17348 idx := x1.Args[1] 17349 mem := x1.Args[2] 17350 s0 := v.Args[1] 17351 if s0.Op != OpAMD64SHLLconst { 17352 break 17353 } 17354 j0 := s0.AuxInt 17355 x0 := s0.Args[0] 17356 if x0.Op != OpAMD64MOVBloadidx1 { 17357 break 17358 } 17359 i0 := x0.AuxInt 17360 if x0.Aux != s { 17361 break 17362 } 17363 if idx != x0.Args[0] { 17364 break 17365 } 17366 if p != x0.Args[1] { 17367 break 17368 } 17369 if mem != x0.Args[2] { 17370 break 17371 } 17372 if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 17373 break 17374 } 17375 b = mergePoint(b, x0, x1) 17376 v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type) 17377 v.reset(OpCopy) 17378 v.AddArg(v0) 17379 v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type) 17380 v1.AuxInt = j1 17381 v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, types.UInt16) 17382 v2.AuxInt = 8 17383 v3 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, types.UInt16) 17384 v3.AuxInt = i0 17385 v3.Aux = s 17386 v3.AddArg(p) 17387 v3.AddArg(idx) 17388 v3.AddArg(mem) 17389 v2.AddArg(v3) 17390 v1.AddArg(v2) 17391 v0.AddArg(v1) 17392 v0.AddArg(y) 17393 return true 17394 } 17395 // match: (ORL or:(ORL y s1:(SHLLconst [j1] x1:(MOVBloadidx1 [i1] {s} idx p mem))) s0:(SHLLconst [j0] x0:(MOVBloadidx1 [i0] {s} idx p mem))) 17396 // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 17397 // result: @mergePoint(b,x0,x1) (ORL <v.Type> (SHLLconst <v.Type> [j1] (ROLWconst <types.UInt16> [8] (MOVWloadidx1 [i0] {s} p idx mem))) y) 17398 for { 17399 or := v.Args[0] 17400 if or.Op != OpAMD64ORL { 17401 break 17402 } 17403 y := or.Args[0] 17404 s1 := or.Args[1] 17405 if s1.Op != OpAMD64SHLLconst { 17406 break 17407 } 17408 j1 := s1.AuxInt 17409 x1 := s1.Args[0] 17410 if x1.Op != OpAMD64MOVBloadidx1 { 17411 break 17412 } 17413 i1 := x1.AuxInt 17414 s := x1.Aux 17415 idx := x1.Args[0] 17416 p := x1.Args[1] 17417 mem := x1.Args[2] 17418 s0 := v.Args[1] 17419 if s0.Op != OpAMD64SHLLconst { 17420 break 17421 } 17422 j0 := s0.AuxInt 17423 x0 := s0.Args[0] 17424 if x0.Op != OpAMD64MOVBloadidx1 { 17425 break 17426 } 17427 i0 := x0.AuxInt 17428 if x0.Aux != s { 17429 break 17430 } 17431 if idx != x0.Args[0] { 17432 break 17433 } 17434 if p != x0.Args[1] { 17435 break 17436 } 17437 if mem != x0.Args[2] { 17438 break 17439 } 17440 if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 17441 break 17442 } 17443 b = mergePoint(b, x0, x1) 17444 v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type) 17445 v.reset(OpCopy) 17446 v.AddArg(v0) 17447 v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type) 17448 v1.AuxInt = j1 17449 v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, types.UInt16) 17450 v2.AuxInt = 8 17451 v3 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, types.UInt16) 17452 v3.AuxInt = i0 17453 v3.Aux = s 17454 v3.AddArg(p) 17455 v3.AddArg(idx) 17456 v3.AddArg(mem) 17457 v2.AddArg(v3) 17458 v1.AddArg(v2) 17459 v0.AddArg(v1) 17460 v0.AddArg(y) 17461 return true 17462 } 17463 // match: (ORL x l:(MOVLload [off] {sym} ptr mem)) 17464 // cond: canMergeLoad(v, l, x) && clobber(l) 17465 // result: (ORLmem x [off] {sym} ptr mem) 17466 for { 17467 x := v.Args[0] 17468 l := v.Args[1] 17469 if l.Op != OpAMD64MOVLload { 17470 break 17471 } 17472 off := l.AuxInt 17473 sym := l.Aux 17474 ptr := l.Args[0] 17475 mem := l.Args[1] 17476 if !(canMergeLoad(v, l, x) && clobber(l)) { 17477 break 17478 } 17479 v.reset(OpAMD64ORLmem) 17480 v.AuxInt = off 17481 v.Aux = sym 17482 v.AddArg(x) 17483 v.AddArg(ptr) 17484 v.AddArg(mem) 17485 return true 17486 } 17487 // match: (ORL l:(MOVLload [off] {sym} ptr mem) x) 17488 // cond: canMergeLoad(v, l, x) && clobber(l) 17489 // result: (ORLmem x [off] {sym} ptr mem) 17490 for { 17491 l := v.Args[0] 17492 if l.Op != OpAMD64MOVLload { 17493 break 17494 } 17495 off := l.AuxInt 17496 sym := l.Aux 17497 ptr := l.Args[0] 17498 mem := l.Args[1] 17499 x := v.Args[1] 17500 if !(canMergeLoad(v, l, x) && clobber(l)) { 17501 break 17502 } 17503 v.reset(OpAMD64ORLmem) 17504 v.AuxInt = off 17505 v.Aux = sym 17506 v.AddArg(x) 17507 v.AddArg(ptr) 17508 v.AddArg(mem) 17509 return true 17510 } 17511 return false 17512 } 17513 func rewriteValueAMD64_OpAMD64ORLconst(v *Value) bool { 17514 // match: (ORLconst [c] x) 17515 // cond: int32(c)==0 17516 // result: x 17517 for { 17518 c := v.AuxInt 17519 x := v.Args[0] 17520 if !(int32(c) == 0) { 17521 break 17522 } 17523 v.reset(OpCopy) 17524 v.Type = x.Type 17525 v.AddArg(x) 17526 return true 17527 } 17528 // match: (ORLconst [c] _) 17529 // cond: int32(c)==-1 17530 // result: (MOVLconst [-1]) 17531 for { 17532 c := v.AuxInt 17533 if !(int32(c) == -1) { 17534 break 17535 } 17536 v.reset(OpAMD64MOVLconst) 17537 v.AuxInt = -1 17538 return true 17539 } 17540 // match: (ORLconst [c] (MOVLconst [d])) 17541 // cond: 17542 // result: (MOVLconst [c|d]) 17543 for { 17544 c := v.AuxInt 17545 v_0 := v.Args[0] 17546 if v_0.Op != OpAMD64MOVLconst { 17547 break 17548 } 17549 d := v_0.AuxInt 17550 v.reset(OpAMD64MOVLconst) 17551 v.AuxInt = c | d 17552 return true 17553 } 17554 return false 17555 } 17556 func rewriteValueAMD64_OpAMD64ORQ(v *Value) bool { 17557 b := v.Block 17558 _ = b 17559 types := &b.Func.Config.Types 17560 _ = types 17561 // match: (ORQ x (MOVQconst [c])) 17562 // cond: is32Bit(c) 17563 // result: (ORQconst [c] x) 17564 for { 17565 x := v.Args[0] 17566 v_1 := v.Args[1] 17567 if v_1.Op != OpAMD64MOVQconst { 17568 break 17569 } 17570 c := v_1.AuxInt 17571 if !(is32Bit(c)) { 17572 break 17573 } 17574 v.reset(OpAMD64ORQconst) 17575 v.AuxInt = c 17576 v.AddArg(x) 17577 return true 17578 } 17579 // match: (ORQ (MOVQconst [c]) x) 17580 // cond: is32Bit(c) 17581 // result: (ORQconst [c] x) 17582 for { 17583 v_0 := v.Args[0] 17584 if v_0.Op != OpAMD64MOVQconst { 17585 break 17586 } 17587 c := v_0.AuxInt 17588 x := v.Args[1] 17589 if !(is32Bit(c)) { 17590 break 17591 } 17592 v.reset(OpAMD64ORQconst) 17593 v.AuxInt = c 17594 v.AddArg(x) 17595 return true 17596 } 17597 // match: (ORQ (SHLQconst x [c]) (SHRQconst x [d])) 17598 // cond: d==64-c 17599 // result: (ROLQconst x [c]) 17600 for { 17601 v_0 := v.Args[0] 17602 if v_0.Op != OpAMD64SHLQconst { 17603 break 17604 } 17605 c := v_0.AuxInt 17606 x := v_0.Args[0] 17607 v_1 := v.Args[1] 17608 if v_1.Op != OpAMD64SHRQconst { 17609 break 17610 } 17611 d := v_1.AuxInt 17612 if x != v_1.Args[0] { 17613 break 17614 } 17615 if !(d == 64-c) { 17616 break 17617 } 17618 v.reset(OpAMD64ROLQconst) 17619 v.AuxInt = c 17620 v.AddArg(x) 17621 return true 17622 } 17623 // match: (ORQ (SHRQconst x [d]) (SHLQconst x [c])) 17624 // cond: d==64-c 17625 // result: (ROLQconst x [c]) 17626 for { 17627 v_0 := v.Args[0] 17628 if v_0.Op != OpAMD64SHRQconst { 17629 break 17630 } 17631 d := v_0.AuxInt 17632 x := v_0.Args[0] 17633 v_1 := v.Args[1] 17634 if v_1.Op != OpAMD64SHLQconst { 17635 break 17636 } 17637 c := v_1.AuxInt 17638 if x != v_1.Args[0] { 17639 break 17640 } 17641 if !(d == 64-c) { 17642 break 17643 } 17644 v.reset(OpAMD64ROLQconst) 17645 v.AuxInt = c 17646 v.AddArg(x) 17647 return true 17648 } 17649 // match: (ORQ x x) 17650 // cond: 17651 // result: x 17652 for { 17653 x := v.Args[0] 17654 if x != v.Args[1] { 17655 break 17656 } 17657 v.reset(OpCopy) 17658 v.Type = x.Type 17659 v.AddArg(x) 17660 return true 17661 } 17662 // match: (ORQ x0:(MOVBload [i0] {s} p mem) sh:(SHLQconst [8] x1:(MOVBload [i1] {s} p mem))) 17663 // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 17664 // result: @mergePoint(b,x0,x1) (MOVWload [i0] {s} p mem) 17665 for { 17666 x0 := v.Args[0] 17667 if x0.Op != OpAMD64MOVBload { 17668 break 17669 } 17670 i0 := x0.AuxInt 17671 s := x0.Aux 17672 p := x0.Args[0] 17673 mem := x0.Args[1] 17674 sh := v.Args[1] 17675 if sh.Op != OpAMD64SHLQconst { 17676 break 17677 } 17678 if sh.AuxInt != 8 { 17679 break 17680 } 17681 x1 := sh.Args[0] 17682 if x1.Op != OpAMD64MOVBload { 17683 break 17684 } 17685 i1 := x1.AuxInt 17686 if x1.Aux != s { 17687 break 17688 } 17689 if p != x1.Args[0] { 17690 break 17691 } 17692 if mem != x1.Args[1] { 17693 break 17694 } 17695 if !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 17696 break 17697 } 17698 b = mergePoint(b, x0, x1) 17699 v0 := b.NewValue0(v.Pos, OpAMD64MOVWload, types.UInt16) 17700 v.reset(OpCopy) 17701 v.AddArg(v0) 17702 v0.AuxInt = i0 17703 v0.Aux = s 17704 v0.AddArg(p) 17705 v0.AddArg(mem) 17706 return true 17707 } 17708 // match: (ORQ sh:(SHLQconst [8] x1:(MOVBload [i1] {s} p mem)) x0:(MOVBload [i0] {s} p mem)) 17709 // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 17710 // result: @mergePoint(b,x0,x1) (MOVWload [i0] {s} p mem) 17711 for { 17712 sh := v.Args[0] 17713 if sh.Op != OpAMD64SHLQconst { 17714 break 17715 } 17716 if sh.AuxInt != 8 { 17717 break 17718 } 17719 x1 := sh.Args[0] 17720 if x1.Op != OpAMD64MOVBload { 17721 break 17722 } 17723 i1 := x1.AuxInt 17724 s := x1.Aux 17725 p := x1.Args[0] 17726 mem := x1.Args[1] 17727 x0 := v.Args[1] 17728 if x0.Op != OpAMD64MOVBload { 17729 break 17730 } 17731 i0 := x0.AuxInt 17732 if x0.Aux != s { 17733 break 17734 } 17735 if p != x0.Args[0] { 17736 break 17737 } 17738 if mem != x0.Args[1] { 17739 break 17740 } 17741 if !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 17742 break 17743 } 17744 b = mergePoint(b, x0, x1) 17745 v0 := b.NewValue0(v.Pos, OpAMD64MOVWload, types.UInt16) 17746 v.reset(OpCopy) 17747 v.AddArg(v0) 17748 v0.AuxInt = i0 17749 v0.Aux = s 17750 v0.AddArg(p) 17751 v0.AddArg(mem) 17752 return true 17753 } 17754 // match: (ORQ x0:(MOVWload [i0] {s} p mem) sh:(SHLQconst [16] x1:(MOVWload [i1] {s} p mem))) 17755 // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 17756 // result: @mergePoint(b,x0,x1) (MOVLload [i0] {s} p mem) 17757 for { 17758 x0 := v.Args[0] 17759 if x0.Op != OpAMD64MOVWload { 17760 break 17761 } 17762 i0 := x0.AuxInt 17763 s := x0.Aux 17764 p := x0.Args[0] 17765 mem := x0.Args[1] 17766 sh := v.Args[1] 17767 if sh.Op != OpAMD64SHLQconst { 17768 break 17769 } 17770 if sh.AuxInt != 16 { 17771 break 17772 } 17773 x1 := sh.Args[0] 17774 if x1.Op != OpAMD64MOVWload { 17775 break 17776 } 17777 i1 := x1.AuxInt 17778 if x1.Aux != s { 17779 break 17780 } 17781 if p != x1.Args[0] { 17782 break 17783 } 17784 if mem != x1.Args[1] { 17785 break 17786 } 17787 if !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 17788 break 17789 } 17790 b = mergePoint(b, x0, x1) 17791 v0 := b.NewValue0(v.Pos, OpAMD64MOVLload, types.UInt32) 17792 v.reset(OpCopy) 17793 v.AddArg(v0) 17794 v0.AuxInt = i0 17795 v0.Aux = s 17796 v0.AddArg(p) 17797 v0.AddArg(mem) 17798 return true 17799 } 17800 // match: (ORQ sh:(SHLQconst [16] x1:(MOVWload [i1] {s} p mem)) x0:(MOVWload [i0] {s} p mem)) 17801 // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 17802 // result: @mergePoint(b,x0,x1) (MOVLload [i0] {s} p mem) 17803 for { 17804 sh := v.Args[0] 17805 if sh.Op != OpAMD64SHLQconst { 17806 break 17807 } 17808 if sh.AuxInt != 16 { 17809 break 17810 } 17811 x1 := sh.Args[0] 17812 if x1.Op != OpAMD64MOVWload { 17813 break 17814 } 17815 i1 := x1.AuxInt 17816 s := x1.Aux 17817 p := x1.Args[0] 17818 mem := x1.Args[1] 17819 x0 := v.Args[1] 17820 if x0.Op != OpAMD64MOVWload { 17821 break 17822 } 17823 i0 := x0.AuxInt 17824 if x0.Aux != s { 17825 break 17826 } 17827 if p != x0.Args[0] { 17828 break 17829 } 17830 if mem != x0.Args[1] { 17831 break 17832 } 17833 if !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 17834 break 17835 } 17836 b = mergePoint(b, x0, x1) 17837 v0 := b.NewValue0(v.Pos, OpAMD64MOVLload, types.UInt32) 17838 v.reset(OpCopy) 17839 v.AddArg(v0) 17840 v0.AuxInt = i0 17841 v0.Aux = s 17842 v0.AddArg(p) 17843 v0.AddArg(mem) 17844 return true 17845 } 17846 // match: (ORQ x0:(MOVLload [i0] {s} p mem) sh:(SHLQconst [32] x1:(MOVLload [i1] {s} p mem))) 17847 // cond: i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 17848 // result: @mergePoint(b,x0,x1) (MOVQload [i0] {s} p mem) 17849 for { 17850 x0 := v.Args[0] 17851 if x0.Op != OpAMD64MOVLload { 17852 break 17853 } 17854 i0 := x0.AuxInt 17855 s := x0.Aux 17856 p := x0.Args[0] 17857 mem := x0.Args[1] 17858 sh := v.Args[1] 17859 if sh.Op != OpAMD64SHLQconst { 17860 break 17861 } 17862 if sh.AuxInt != 32 { 17863 break 17864 } 17865 x1 := sh.Args[0] 17866 if x1.Op != OpAMD64MOVLload { 17867 break 17868 } 17869 i1 := x1.AuxInt 17870 if x1.Aux != s { 17871 break 17872 } 17873 if p != x1.Args[0] { 17874 break 17875 } 17876 if mem != x1.Args[1] { 17877 break 17878 } 17879 if !(i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 17880 break 17881 } 17882 b = mergePoint(b, x0, x1) 17883 v0 := b.NewValue0(v.Pos, OpAMD64MOVQload, types.UInt64) 17884 v.reset(OpCopy) 17885 v.AddArg(v0) 17886 v0.AuxInt = i0 17887 v0.Aux = s 17888 v0.AddArg(p) 17889 v0.AddArg(mem) 17890 return true 17891 } 17892 // match: (ORQ sh:(SHLQconst [32] x1:(MOVLload [i1] {s} p mem)) x0:(MOVLload [i0] {s} p mem)) 17893 // cond: i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 17894 // result: @mergePoint(b,x0,x1) (MOVQload [i0] {s} p mem) 17895 for { 17896 sh := v.Args[0] 17897 if sh.Op != OpAMD64SHLQconst { 17898 break 17899 } 17900 if sh.AuxInt != 32 { 17901 break 17902 } 17903 x1 := sh.Args[0] 17904 if x1.Op != OpAMD64MOVLload { 17905 break 17906 } 17907 i1 := x1.AuxInt 17908 s := x1.Aux 17909 p := x1.Args[0] 17910 mem := x1.Args[1] 17911 x0 := v.Args[1] 17912 if x0.Op != OpAMD64MOVLload { 17913 break 17914 } 17915 i0 := x0.AuxInt 17916 if x0.Aux != s { 17917 break 17918 } 17919 if p != x0.Args[0] { 17920 break 17921 } 17922 if mem != x0.Args[1] { 17923 break 17924 } 17925 if !(i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 17926 break 17927 } 17928 b = mergePoint(b, x0, x1) 17929 v0 := b.NewValue0(v.Pos, OpAMD64MOVQload, types.UInt64) 17930 v.reset(OpCopy) 17931 v.AddArg(v0) 17932 v0.AuxInt = i0 17933 v0.Aux = s 17934 v0.AddArg(p) 17935 v0.AddArg(mem) 17936 return true 17937 } 17938 // match: (ORQ s1:(SHLQconst [j1] x1:(MOVBload [i1] {s} p mem)) or:(ORQ s0:(SHLQconst [j0] x0:(MOVBload [i0] {s} p mem)) y)) 17939 // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 17940 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j0] (MOVWload [i0] {s} p mem)) y) 17941 for { 17942 s1 := v.Args[0] 17943 if s1.Op != OpAMD64SHLQconst { 17944 break 17945 } 17946 j1 := s1.AuxInt 17947 x1 := s1.Args[0] 17948 if x1.Op != OpAMD64MOVBload { 17949 break 17950 } 17951 i1 := x1.AuxInt 17952 s := x1.Aux 17953 p := x1.Args[0] 17954 mem := x1.Args[1] 17955 or := v.Args[1] 17956 if or.Op != OpAMD64ORQ { 17957 break 17958 } 17959 s0 := or.Args[0] 17960 if s0.Op != OpAMD64SHLQconst { 17961 break 17962 } 17963 j0 := s0.AuxInt 17964 x0 := s0.Args[0] 17965 if x0.Op != OpAMD64MOVBload { 17966 break 17967 } 17968 i0 := x0.AuxInt 17969 if x0.Aux != s { 17970 break 17971 } 17972 if p != x0.Args[0] { 17973 break 17974 } 17975 if mem != x0.Args[1] { 17976 break 17977 } 17978 y := or.Args[1] 17979 if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 17980 break 17981 } 17982 b = mergePoint(b, x0, x1) 17983 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 17984 v.reset(OpCopy) 17985 v.AddArg(v0) 17986 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 17987 v1.AuxInt = j0 17988 v2 := b.NewValue0(v.Pos, OpAMD64MOVWload, types.UInt16) 17989 v2.AuxInt = i0 17990 v2.Aux = s 17991 v2.AddArg(p) 17992 v2.AddArg(mem) 17993 v1.AddArg(v2) 17994 v0.AddArg(v1) 17995 v0.AddArg(y) 17996 return true 17997 } 17998 // match: (ORQ s1:(SHLQconst [j1] x1:(MOVBload [i1] {s} p mem)) or:(ORQ y s0:(SHLQconst [j0] x0:(MOVBload [i0] {s} p mem)))) 17999 // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 18000 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j0] (MOVWload [i0] {s} p mem)) y) 18001 for { 18002 s1 := v.Args[0] 18003 if s1.Op != OpAMD64SHLQconst { 18004 break 18005 } 18006 j1 := s1.AuxInt 18007 x1 := s1.Args[0] 18008 if x1.Op != OpAMD64MOVBload { 18009 break 18010 } 18011 i1 := x1.AuxInt 18012 s := x1.Aux 18013 p := x1.Args[0] 18014 mem := x1.Args[1] 18015 or := v.Args[1] 18016 if or.Op != OpAMD64ORQ { 18017 break 18018 } 18019 y := or.Args[0] 18020 s0 := or.Args[1] 18021 if s0.Op != OpAMD64SHLQconst { 18022 break 18023 } 18024 j0 := s0.AuxInt 18025 x0 := s0.Args[0] 18026 if x0.Op != OpAMD64MOVBload { 18027 break 18028 } 18029 i0 := x0.AuxInt 18030 if x0.Aux != s { 18031 break 18032 } 18033 if p != x0.Args[0] { 18034 break 18035 } 18036 if mem != x0.Args[1] { 18037 break 18038 } 18039 if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 18040 break 18041 } 18042 b = mergePoint(b, x0, x1) 18043 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 18044 v.reset(OpCopy) 18045 v.AddArg(v0) 18046 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 18047 v1.AuxInt = j0 18048 v2 := b.NewValue0(v.Pos, OpAMD64MOVWload, types.UInt16) 18049 v2.AuxInt = i0 18050 v2.Aux = s 18051 v2.AddArg(p) 18052 v2.AddArg(mem) 18053 v1.AddArg(v2) 18054 v0.AddArg(v1) 18055 v0.AddArg(y) 18056 return true 18057 } 18058 // match: (ORQ or:(ORQ s0:(SHLQconst [j0] x0:(MOVBload [i0] {s} p mem)) y) s1:(SHLQconst [j1] x1:(MOVBload [i1] {s} p mem))) 18059 // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 18060 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j0] (MOVWload [i0] {s} p mem)) y) 18061 for { 18062 or := v.Args[0] 18063 if or.Op != OpAMD64ORQ { 18064 break 18065 } 18066 s0 := or.Args[0] 18067 if s0.Op != OpAMD64SHLQconst { 18068 break 18069 } 18070 j0 := s0.AuxInt 18071 x0 := s0.Args[0] 18072 if x0.Op != OpAMD64MOVBload { 18073 break 18074 } 18075 i0 := x0.AuxInt 18076 s := x0.Aux 18077 p := x0.Args[0] 18078 mem := x0.Args[1] 18079 y := or.Args[1] 18080 s1 := v.Args[1] 18081 if s1.Op != OpAMD64SHLQconst { 18082 break 18083 } 18084 j1 := s1.AuxInt 18085 x1 := s1.Args[0] 18086 if x1.Op != OpAMD64MOVBload { 18087 break 18088 } 18089 i1 := x1.AuxInt 18090 if x1.Aux != s { 18091 break 18092 } 18093 if p != x1.Args[0] { 18094 break 18095 } 18096 if mem != x1.Args[1] { 18097 break 18098 } 18099 if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 18100 break 18101 } 18102 b = mergePoint(b, x0, x1) 18103 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 18104 v.reset(OpCopy) 18105 v.AddArg(v0) 18106 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 18107 v1.AuxInt = j0 18108 v2 := b.NewValue0(v.Pos, OpAMD64MOVWload, types.UInt16) 18109 v2.AuxInt = i0 18110 v2.Aux = s 18111 v2.AddArg(p) 18112 v2.AddArg(mem) 18113 v1.AddArg(v2) 18114 v0.AddArg(v1) 18115 v0.AddArg(y) 18116 return true 18117 } 18118 // match: (ORQ or:(ORQ y s0:(SHLQconst [j0] x0:(MOVBload [i0] {s} p mem))) s1:(SHLQconst [j1] x1:(MOVBload [i1] {s} p mem))) 18119 // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 18120 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j0] (MOVWload [i0] {s} p mem)) y) 18121 for { 18122 or := v.Args[0] 18123 if or.Op != OpAMD64ORQ { 18124 break 18125 } 18126 y := or.Args[0] 18127 s0 := or.Args[1] 18128 if s0.Op != OpAMD64SHLQconst { 18129 break 18130 } 18131 j0 := s0.AuxInt 18132 x0 := s0.Args[0] 18133 if x0.Op != OpAMD64MOVBload { 18134 break 18135 } 18136 i0 := x0.AuxInt 18137 s := x0.Aux 18138 p := x0.Args[0] 18139 mem := x0.Args[1] 18140 s1 := v.Args[1] 18141 if s1.Op != OpAMD64SHLQconst { 18142 break 18143 } 18144 j1 := s1.AuxInt 18145 x1 := s1.Args[0] 18146 if x1.Op != OpAMD64MOVBload { 18147 break 18148 } 18149 i1 := x1.AuxInt 18150 if x1.Aux != s { 18151 break 18152 } 18153 if p != x1.Args[0] { 18154 break 18155 } 18156 if mem != x1.Args[1] { 18157 break 18158 } 18159 if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 18160 break 18161 } 18162 b = mergePoint(b, x0, x1) 18163 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 18164 v.reset(OpCopy) 18165 v.AddArg(v0) 18166 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 18167 v1.AuxInt = j0 18168 v2 := b.NewValue0(v.Pos, OpAMD64MOVWload, types.UInt16) 18169 v2.AuxInt = i0 18170 v2.Aux = s 18171 v2.AddArg(p) 18172 v2.AddArg(mem) 18173 v1.AddArg(v2) 18174 v0.AddArg(v1) 18175 v0.AddArg(y) 18176 return true 18177 } 18178 // match: (ORQ s1:(SHLQconst [j1] x1:(MOVWload [i1] {s} p mem)) or:(ORQ s0:(SHLQconst [j0] x0:(MOVWload [i0] {s} p mem)) y)) 18179 // cond: i1 == i0+2 && j1 == j0+16 && j0 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 18180 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j0] (MOVLload [i0] {s} p mem)) y) 18181 for { 18182 s1 := v.Args[0] 18183 if s1.Op != OpAMD64SHLQconst { 18184 break 18185 } 18186 j1 := s1.AuxInt 18187 x1 := s1.Args[0] 18188 if x1.Op != OpAMD64MOVWload { 18189 break 18190 } 18191 i1 := x1.AuxInt 18192 s := x1.Aux 18193 p := x1.Args[0] 18194 mem := x1.Args[1] 18195 or := v.Args[1] 18196 if or.Op != OpAMD64ORQ { 18197 break 18198 } 18199 s0 := or.Args[0] 18200 if s0.Op != OpAMD64SHLQconst { 18201 break 18202 } 18203 j0 := s0.AuxInt 18204 x0 := s0.Args[0] 18205 if x0.Op != OpAMD64MOVWload { 18206 break 18207 } 18208 i0 := x0.AuxInt 18209 if x0.Aux != s { 18210 break 18211 } 18212 if p != x0.Args[0] { 18213 break 18214 } 18215 if mem != x0.Args[1] { 18216 break 18217 } 18218 y := or.Args[1] 18219 if !(i1 == i0+2 && j1 == j0+16 && j0%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 18220 break 18221 } 18222 b = mergePoint(b, x0, x1) 18223 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 18224 v.reset(OpCopy) 18225 v.AddArg(v0) 18226 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 18227 v1.AuxInt = j0 18228 v2 := b.NewValue0(v.Pos, OpAMD64MOVLload, types.UInt32) 18229 v2.AuxInt = i0 18230 v2.Aux = s 18231 v2.AddArg(p) 18232 v2.AddArg(mem) 18233 v1.AddArg(v2) 18234 v0.AddArg(v1) 18235 v0.AddArg(y) 18236 return true 18237 } 18238 // match: (ORQ s1:(SHLQconst [j1] x1:(MOVWload [i1] {s} p mem)) or:(ORQ y s0:(SHLQconst [j0] x0:(MOVWload [i0] {s} p mem)))) 18239 // cond: i1 == i0+2 && j1 == j0+16 && j0 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 18240 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j0] (MOVLload [i0] {s} p mem)) y) 18241 for { 18242 s1 := v.Args[0] 18243 if s1.Op != OpAMD64SHLQconst { 18244 break 18245 } 18246 j1 := s1.AuxInt 18247 x1 := s1.Args[0] 18248 if x1.Op != OpAMD64MOVWload { 18249 break 18250 } 18251 i1 := x1.AuxInt 18252 s := x1.Aux 18253 p := x1.Args[0] 18254 mem := x1.Args[1] 18255 or := v.Args[1] 18256 if or.Op != OpAMD64ORQ { 18257 break 18258 } 18259 y := or.Args[0] 18260 s0 := or.Args[1] 18261 if s0.Op != OpAMD64SHLQconst { 18262 break 18263 } 18264 j0 := s0.AuxInt 18265 x0 := s0.Args[0] 18266 if x0.Op != OpAMD64MOVWload { 18267 break 18268 } 18269 i0 := x0.AuxInt 18270 if x0.Aux != s { 18271 break 18272 } 18273 if p != x0.Args[0] { 18274 break 18275 } 18276 if mem != x0.Args[1] { 18277 break 18278 } 18279 if !(i1 == i0+2 && j1 == j0+16 && j0%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 18280 break 18281 } 18282 b = mergePoint(b, x0, x1) 18283 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 18284 v.reset(OpCopy) 18285 v.AddArg(v0) 18286 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 18287 v1.AuxInt = j0 18288 v2 := b.NewValue0(v.Pos, OpAMD64MOVLload, types.UInt32) 18289 v2.AuxInt = i0 18290 v2.Aux = s 18291 v2.AddArg(p) 18292 v2.AddArg(mem) 18293 v1.AddArg(v2) 18294 v0.AddArg(v1) 18295 v0.AddArg(y) 18296 return true 18297 } 18298 // match: (ORQ or:(ORQ s0:(SHLQconst [j0] x0:(MOVWload [i0] {s} p mem)) y) s1:(SHLQconst [j1] x1:(MOVWload [i1] {s} p mem))) 18299 // cond: i1 == i0+2 && j1 == j0+16 && j0 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 18300 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j0] (MOVLload [i0] {s} p mem)) y) 18301 for { 18302 or := v.Args[0] 18303 if or.Op != OpAMD64ORQ { 18304 break 18305 } 18306 s0 := or.Args[0] 18307 if s0.Op != OpAMD64SHLQconst { 18308 break 18309 } 18310 j0 := s0.AuxInt 18311 x0 := s0.Args[0] 18312 if x0.Op != OpAMD64MOVWload { 18313 break 18314 } 18315 i0 := x0.AuxInt 18316 s := x0.Aux 18317 p := x0.Args[0] 18318 mem := x0.Args[1] 18319 y := or.Args[1] 18320 s1 := v.Args[1] 18321 if s1.Op != OpAMD64SHLQconst { 18322 break 18323 } 18324 j1 := s1.AuxInt 18325 x1 := s1.Args[0] 18326 if x1.Op != OpAMD64MOVWload { 18327 break 18328 } 18329 i1 := x1.AuxInt 18330 if x1.Aux != s { 18331 break 18332 } 18333 if p != x1.Args[0] { 18334 break 18335 } 18336 if mem != x1.Args[1] { 18337 break 18338 } 18339 if !(i1 == i0+2 && j1 == j0+16 && j0%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 18340 break 18341 } 18342 b = mergePoint(b, x0, x1) 18343 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 18344 v.reset(OpCopy) 18345 v.AddArg(v0) 18346 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 18347 v1.AuxInt = j0 18348 v2 := b.NewValue0(v.Pos, OpAMD64MOVLload, types.UInt32) 18349 v2.AuxInt = i0 18350 v2.Aux = s 18351 v2.AddArg(p) 18352 v2.AddArg(mem) 18353 v1.AddArg(v2) 18354 v0.AddArg(v1) 18355 v0.AddArg(y) 18356 return true 18357 } 18358 // match: (ORQ or:(ORQ y s0:(SHLQconst [j0] x0:(MOVWload [i0] {s} p mem))) s1:(SHLQconst [j1] x1:(MOVWload [i1] {s} p mem))) 18359 // cond: i1 == i0+2 && j1 == j0+16 && j0 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 18360 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j0] (MOVLload [i0] {s} p mem)) y) 18361 for { 18362 or := v.Args[0] 18363 if or.Op != OpAMD64ORQ { 18364 break 18365 } 18366 y := or.Args[0] 18367 s0 := or.Args[1] 18368 if s0.Op != OpAMD64SHLQconst { 18369 break 18370 } 18371 j0 := s0.AuxInt 18372 x0 := s0.Args[0] 18373 if x0.Op != OpAMD64MOVWload { 18374 break 18375 } 18376 i0 := x0.AuxInt 18377 s := x0.Aux 18378 p := x0.Args[0] 18379 mem := x0.Args[1] 18380 s1 := v.Args[1] 18381 if s1.Op != OpAMD64SHLQconst { 18382 break 18383 } 18384 j1 := s1.AuxInt 18385 x1 := s1.Args[0] 18386 if x1.Op != OpAMD64MOVWload { 18387 break 18388 } 18389 i1 := x1.AuxInt 18390 if x1.Aux != s { 18391 break 18392 } 18393 if p != x1.Args[0] { 18394 break 18395 } 18396 if mem != x1.Args[1] { 18397 break 18398 } 18399 if !(i1 == i0+2 && j1 == j0+16 && j0%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 18400 break 18401 } 18402 b = mergePoint(b, x0, x1) 18403 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 18404 v.reset(OpCopy) 18405 v.AddArg(v0) 18406 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 18407 v1.AuxInt = j0 18408 v2 := b.NewValue0(v.Pos, OpAMD64MOVLload, types.UInt32) 18409 v2.AuxInt = i0 18410 v2.Aux = s 18411 v2.AddArg(p) 18412 v2.AddArg(mem) 18413 v1.AddArg(v2) 18414 v0.AddArg(v1) 18415 v0.AddArg(y) 18416 return true 18417 } 18418 // match: (ORQ x0:(MOVBloadidx1 [i0] {s} p idx mem) sh:(SHLQconst [8] x1:(MOVBloadidx1 [i1] {s} p idx mem))) 18419 // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 18420 // result: @mergePoint(b,x0,x1) (MOVWloadidx1 <v.Type> [i0] {s} p idx mem) 18421 for { 18422 x0 := v.Args[0] 18423 if x0.Op != OpAMD64MOVBloadidx1 { 18424 break 18425 } 18426 i0 := x0.AuxInt 18427 s := x0.Aux 18428 p := x0.Args[0] 18429 idx := x0.Args[1] 18430 mem := x0.Args[2] 18431 sh := v.Args[1] 18432 if sh.Op != OpAMD64SHLQconst { 18433 break 18434 } 18435 if sh.AuxInt != 8 { 18436 break 18437 } 18438 x1 := sh.Args[0] 18439 if x1.Op != OpAMD64MOVBloadidx1 { 18440 break 18441 } 18442 i1 := x1.AuxInt 18443 if x1.Aux != s { 18444 break 18445 } 18446 if p != x1.Args[0] { 18447 break 18448 } 18449 if idx != x1.Args[1] { 18450 break 18451 } 18452 if mem != x1.Args[2] { 18453 break 18454 } 18455 if !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 18456 break 18457 } 18458 b = mergePoint(b, x0, x1) 18459 v0 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, v.Type) 18460 v.reset(OpCopy) 18461 v.AddArg(v0) 18462 v0.AuxInt = i0 18463 v0.Aux = s 18464 v0.AddArg(p) 18465 v0.AddArg(idx) 18466 v0.AddArg(mem) 18467 return true 18468 } 18469 // match: (ORQ x0:(MOVBloadidx1 [i0] {s} idx p mem) sh:(SHLQconst [8] x1:(MOVBloadidx1 [i1] {s} p idx mem))) 18470 // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 18471 // result: @mergePoint(b,x0,x1) (MOVWloadidx1 <v.Type> [i0] {s} p idx mem) 18472 for { 18473 x0 := v.Args[0] 18474 if x0.Op != OpAMD64MOVBloadidx1 { 18475 break 18476 } 18477 i0 := x0.AuxInt 18478 s := x0.Aux 18479 idx := x0.Args[0] 18480 p := x0.Args[1] 18481 mem := x0.Args[2] 18482 sh := v.Args[1] 18483 if sh.Op != OpAMD64SHLQconst { 18484 break 18485 } 18486 if sh.AuxInt != 8 { 18487 break 18488 } 18489 x1 := sh.Args[0] 18490 if x1.Op != OpAMD64MOVBloadidx1 { 18491 break 18492 } 18493 i1 := x1.AuxInt 18494 if x1.Aux != s { 18495 break 18496 } 18497 if p != x1.Args[0] { 18498 break 18499 } 18500 if idx != x1.Args[1] { 18501 break 18502 } 18503 if mem != x1.Args[2] { 18504 break 18505 } 18506 if !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 18507 break 18508 } 18509 b = mergePoint(b, x0, x1) 18510 v0 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, v.Type) 18511 v.reset(OpCopy) 18512 v.AddArg(v0) 18513 v0.AuxInt = i0 18514 v0.Aux = s 18515 v0.AddArg(p) 18516 v0.AddArg(idx) 18517 v0.AddArg(mem) 18518 return true 18519 } 18520 // match: (ORQ x0:(MOVBloadidx1 [i0] {s} p idx mem) sh:(SHLQconst [8] x1:(MOVBloadidx1 [i1] {s} idx p mem))) 18521 // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 18522 // result: @mergePoint(b,x0,x1) (MOVWloadidx1 <v.Type> [i0] {s} p idx mem) 18523 for { 18524 x0 := v.Args[0] 18525 if x0.Op != OpAMD64MOVBloadidx1 { 18526 break 18527 } 18528 i0 := x0.AuxInt 18529 s := x0.Aux 18530 p := x0.Args[0] 18531 idx := x0.Args[1] 18532 mem := x0.Args[2] 18533 sh := v.Args[1] 18534 if sh.Op != OpAMD64SHLQconst { 18535 break 18536 } 18537 if sh.AuxInt != 8 { 18538 break 18539 } 18540 x1 := sh.Args[0] 18541 if x1.Op != OpAMD64MOVBloadidx1 { 18542 break 18543 } 18544 i1 := x1.AuxInt 18545 if x1.Aux != s { 18546 break 18547 } 18548 if idx != x1.Args[0] { 18549 break 18550 } 18551 if p != x1.Args[1] { 18552 break 18553 } 18554 if mem != x1.Args[2] { 18555 break 18556 } 18557 if !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 18558 break 18559 } 18560 b = mergePoint(b, x0, x1) 18561 v0 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, v.Type) 18562 v.reset(OpCopy) 18563 v.AddArg(v0) 18564 v0.AuxInt = i0 18565 v0.Aux = s 18566 v0.AddArg(p) 18567 v0.AddArg(idx) 18568 v0.AddArg(mem) 18569 return true 18570 } 18571 // match: (ORQ x0:(MOVBloadidx1 [i0] {s} idx p mem) sh:(SHLQconst [8] x1:(MOVBloadidx1 [i1] {s} idx p mem))) 18572 // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 18573 // result: @mergePoint(b,x0,x1) (MOVWloadidx1 <v.Type> [i0] {s} p idx mem) 18574 for { 18575 x0 := v.Args[0] 18576 if x0.Op != OpAMD64MOVBloadidx1 { 18577 break 18578 } 18579 i0 := x0.AuxInt 18580 s := x0.Aux 18581 idx := x0.Args[0] 18582 p := x0.Args[1] 18583 mem := x0.Args[2] 18584 sh := v.Args[1] 18585 if sh.Op != OpAMD64SHLQconst { 18586 break 18587 } 18588 if sh.AuxInt != 8 { 18589 break 18590 } 18591 x1 := sh.Args[0] 18592 if x1.Op != OpAMD64MOVBloadidx1 { 18593 break 18594 } 18595 i1 := x1.AuxInt 18596 if x1.Aux != s { 18597 break 18598 } 18599 if idx != x1.Args[0] { 18600 break 18601 } 18602 if p != x1.Args[1] { 18603 break 18604 } 18605 if mem != x1.Args[2] { 18606 break 18607 } 18608 if !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 18609 break 18610 } 18611 b = mergePoint(b, x0, x1) 18612 v0 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, v.Type) 18613 v.reset(OpCopy) 18614 v.AddArg(v0) 18615 v0.AuxInt = i0 18616 v0.Aux = s 18617 v0.AddArg(p) 18618 v0.AddArg(idx) 18619 v0.AddArg(mem) 18620 return true 18621 } 18622 // match: (ORQ sh:(SHLQconst [8] x1:(MOVBloadidx1 [i1] {s} p idx mem)) x0:(MOVBloadidx1 [i0] {s} p idx mem)) 18623 // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 18624 // result: @mergePoint(b,x0,x1) (MOVWloadidx1 <v.Type> [i0] {s} p idx mem) 18625 for { 18626 sh := v.Args[0] 18627 if sh.Op != OpAMD64SHLQconst { 18628 break 18629 } 18630 if sh.AuxInt != 8 { 18631 break 18632 } 18633 x1 := sh.Args[0] 18634 if x1.Op != OpAMD64MOVBloadidx1 { 18635 break 18636 } 18637 i1 := x1.AuxInt 18638 s := x1.Aux 18639 p := x1.Args[0] 18640 idx := x1.Args[1] 18641 mem := x1.Args[2] 18642 x0 := v.Args[1] 18643 if x0.Op != OpAMD64MOVBloadidx1 { 18644 break 18645 } 18646 i0 := x0.AuxInt 18647 if x0.Aux != s { 18648 break 18649 } 18650 if p != x0.Args[0] { 18651 break 18652 } 18653 if idx != x0.Args[1] { 18654 break 18655 } 18656 if mem != x0.Args[2] { 18657 break 18658 } 18659 if !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 18660 break 18661 } 18662 b = mergePoint(b, x0, x1) 18663 v0 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, v.Type) 18664 v.reset(OpCopy) 18665 v.AddArg(v0) 18666 v0.AuxInt = i0 18667 v0.Aux = s 18668 v0.AddArg(p) 18669 v0.AddArg(idx) 18670 v0.AddArg(mem) 18671 return true 18672 } 18673 // match: (ORQ sh:(SHLQconst [8] x1:(MOVBloadidx1 [i1] {s} idx p mem)) x0:(MOVBloadidx1 [i0] {s} p idx mem)) 18674 // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 18675 // result: @mergePoint(b,x0,x1) (MOVWloadidx1 <v.Type> [i0] {s} p idx mem) 18676 for { 18677 sh := v.Args[0] 18678 if sh.Op != OpAMD64SHLQconst { 18679 break 18680 } 18681 if sh.AuxInt != 8 { 18682 break 18683 } 18684 x1 := sh.Args[0] 18685 if x1.Op != OpAMD64MOVBloadidx1 { 18686 break 18687 } 18688 i1 := x1.AuxInt 18689 s := x1.Aux 18690 idx := x1.Args[0] 18691 p := x1.Args[1] 18692 mem := x1.Args[2] 18693 x0 := v.Args[1] 18694 if x0.Op != OpAMD64MOVBloadidx1 { 18695 break 18696 } 18697 i0 := x0.AuxInt 18698 if x0.Aux != s { 18699 break 18700 } 18701 if p != x0.Args[0] { 18702 break 18703 } 18704 if idx != x0.Args[1] { 18705 break 18706 } 18707 if mem != x0.Args[2] { 18708 break 18709 } 18710 if !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 18711 break 18712 } 18713 b = mergePoint(b, x0, x1) 18714 v0 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, v.Type) 18715 v.reset(OpCopy) 18716 v.AddArg(v0) 18717 v0.AuxInt = i0 18718 v0.Aux = s 18719 v0.AddArg(p) 18720 v0.AddArg(idx) 18721 v0.AddArg(mem) 18722 return true 18723 } 18724 // match: (ORQ sh:(SHLQconst [8] x1:(MOVBloadidx1 [i1] {s} p idx mem)) x0:(MOVBloadidx1 [i0] {s} idx p mem)) 18725 // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 18726 // result: @mergePoint(b,x0,x1) (MOVWloadidx1 <v.Type> [i0] {s} p idx mem) 18727 for { 18728 sh := v.Args[0] 18729 if sh.Op != OpAMD64SHLQconst { 18730 break 18731 } 18732 if sh.AuxInt != 8 { 18733 break 18734 } 18735 x1 := sh.Args[0] 18736 if x1.Op != OpAMD64MOVBloadidx1 { 18737 break 18738 } 18739 i1 := x1.AuxInt 18740 s := x1.Aux 18741 p := x1.Args[0] 18742 idx := x1.Args[1] 18743 mem := x1.Args[2] 18744 x0 := v.Args[1] 18745 if x0.Op != OpAMD64MOVBloadidx1 { 18746 break 18747 } 18748 i0 := x0.AuxInt 18749 if x0.Aux != s { 18750 break 18751 } 18752 if idx != x0.Args[0] { 18753 break 18754 } 18755 if p != x0.Args[1] { 18756 break 18757 } 18758 if mem != x0.Args[2] { 18759 break 18760 } 18761 if !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 18762 break 18763 } 18764 b = mergePoint(b, x0, x1) 18765 v0 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, v.Type) 18766 v.reset(OpCopy) 18767 v.AddArg(v0) 18768 v0.AuxInt = i0 18769 v0.Aux = s 18770 v0.AddArg(p) 18771 v0.AddArg(idx) 18772 v0.AddArg(mem) 18773 return true 18774 } 18775 // match: (ORQ sh:(SHLQconst [8] x1:(MOVBloadidx1 [i1] {s} idx p mem)) x0:(MOVBloadidx1 [i0] {s} idx p mem)) 18776 // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 18777 // result: @mergePoint(b,x0,x1) (MOVWloadidx1 <v.Type> [i0] {s} p idx mem) 18778 for { 18779 sh := v.Args[0] 18780 if sh.Op != OpAMD64SHLQconst { 18781 break 18782 } 18783 if sh.AuxInt != 8 { 18784 break 18785 } 18786 x1 := sh.Args[0] 18787 if x1.Op != OpAMD64MOVBloadidx1 { 18788 break 18789 } 18790 i1 := x1.AuxInt 18791 s := x1.Aux 18792 idx := x1.Args[0] 18793 p := x1.Args[1] 18794 mem := x1.Args[2] 18795 x0 := v.Args[1] 18796 if x0.Op != OpAMD64MOVBloadidx1 { 18797 break 18798 } 18799 i0 := x0.AuxInt 18800 if x0.Aux != s { 18801 break 18802 } 18803 if idx != x0.Args[0] { 18804 break 18805 } 18806 if p != x0.Args[1] { 18807 break 18808 } 18809 if mem != x0.Args[2] { 18810 break 18811 } 18812 if !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 18813 break 18814 } 18815 b = mergePoint(b, x0, x1) 18816 v0 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, v.Type) 18817 v.reset(OpCopy) 18818 v.AddArg(v0) 18819 v0.AuxInt = i0 18820 v0.Aux = s 18821 v0.AddArg(p) 18822 v0.AddArg(idx) 18823 v0.AddArg(mem) 18824 return true 18825 } 18826 // match: (ORQ x0:(MOVWloadidx1 [i0] {s} p idx mem) sh:(SHLQconst [16] x1:(MOVWloadidx1 [i1] {s} p idx mem))) 18827 // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 18828 // result: @mergePoint(b,x0,x1) (MOVLloadidx1 [i0] {s} p idx mem) 18829 for { 18830 x0 := v.Args[0] 18831 if x0.Op != OpAMD64MOVWloadidx1 { 18832 break 18833 } 18834 i0 := x0.AuxInt 18835 s := x0.Aux 18836 p := x0.Args[0] 18837 idx := x0.Args[1] 18838 mem := x0.Args[2] 18839 sh := v.Args[1] 18840 if sh.Op != OpAMD64SHLQconst { 18841 break 18842 } 18843 if sh.AuxInt != 16 { 18844 break 18845 } 18846 x1 := sh.Args[0] 18847 if x1.Op != OpAMD64MOVWloadidx1 { 18848 break 18849 } 18850 i1 := x1.AuxInt 18851 if x1.Aux != s { 18852 break 18853 } 18854 if p != x1.Args[0] { 18855 break 18856 } 18857 if idx != x1.Args[1] { 18858 break 18859 } 18860 if mem != x1.Args[2] { 18861 break 18862 } 18863 if !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 18864 break 18865 } 18866 b = mergePoint(b, x0, x1) 18867 v0 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, types.UInt32) 18868 v.reset(OpCopy) 18869 v.AddArg(v0) 18870 v0.AuxInt = i0 18871 v0.Aux = s 18872 v0.AddArg(p) 18873 v0.AddArg(idx) 18874 v0.AddArg(mem) 18875 return true 18876 } 18877 // match: (ORQ x0:(MOVWloadidx1 [i0] {s} idx p mem) sh:(SHLQconst [16] x1:(MOVWloadidx1 [i1] {s} p idx mem))) 18878 // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 18879 // result: @mergePoint(b,x0,x1) (MOVLloadidx1 [i0] {s} p idx mem) 18880 for { 18881 x0 := v.Args[0] 18882 if x0.Op != OpAMD64MOVWloadidx1 { 18883 break 18884 } 18885 i0 := x0.AuxInt 18886 s := x0.Aux 18887 idx := x0.Args[0] 18888 p := x0.Args[1] 18889 mem := x0.Args[2] 18890 sh := v.Args[1] 18891 if sh.Op != OpAMD64SHLQconst { 18892 break 18893 } 18894 if sh.AuxInt != 16 { 18895 break 18896 } 18897 x1 := sh.Args[0] 18898 if x1.Op != OpAMD64MOVWloadidx1 { 18899 break 18900 } 18901 i1 := x1.AuxInt 18902 if x1.Aux != s { 18903 break 18904 } 18905 if p != x1.Args[0] { 18906 break 18907 } 18908 if idx != x1.Args[1] { 18909 break 18910 } 18911 if mem != x1.Args[2] { 18912 break 18913 } 18914 if !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 18915 break 18916 } 18917 b = mergePoint(b, x0, x1) 18918 v0 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, types.UInt32) 18919 v.reset(OpCopy) 18920 v.AddArg(v0) 18921 v0.AuxInt = i0 18922 v0.Aux = s 18923 v0.AddArg(p) 18924 v0.AddArg(idx) 18925 v0.AddArg(mem) 18926 return true 18927 } 18928 // match: (ORQ x0:(MOVWloadidx1 [i0] {s} p idx mem) sh:(SHLQconst [16] x1:(MOVWloadidx1 [i1] {s} idx p mem))) 18929 // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 18930 // result: @mergePoint(b,x0,x1) (MOVLloadidx1 [i0] {s} p idx mem) 18931 for { 18932 x0 := v.Args[0] 18933 if x0.Op != OpAMD64MOVWloadidx1 { 18934 break 18935 } 18936 i0 := x0.AuxInt 18937 s := x0.Aux 18938 p := x0.Args[0] 18939 idx := x0.Args[1] 18940 mem := x0.Args[2] 18941 sh := v.Args[1] 18942 if sh.Op != OpAMD64SHLQconst { 18943 break 18944 } 18945 if sh.AuxInt != 16 { 18946 break 18947 } 18948 x1 := sh.Args[0] 18949 if x1.Op != OpAMD64MOVWloadidx1 { 18950 break 18951 } 18952 i1 := x1.AuxInt 18953 if x1.Aux != s { 18954 break 18955 } 18956 if idx != x1.Args[0] { 18957 break 18958 } 18959 if p != x1.Args[1] { 18960 break 18961 } 18962 if mem != x1.Args[2] { 18963 break 18964 } 18965 if !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 18966 break 18967 } 18968 b = mergePoint(b, x0, x1) 18969 v0 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, types.UInt32) 18970 v.reset(OpCopy) 18971 v.AddArg(v0) 18972 v0.AuxInt = i0 18973 v0.Aux = s 18974 v0.AddArg(p) 18975 v0.AddArg(idx) 18976 v0.AddArg(mem) 18977 return true 18978 } 18979 // match: (ORQ x0:(MOVWloadidx1 [i0] {s} idx p mem) sh:(SHLQconst [16] x1:(MOVWloadidx1 [i1] {s} idx p mem))) 18980 // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 18981 // result: @mergePoint(b,x0,x1) (MOVLloadidx1 [i0] {s} p idx mem) 18982 for { 18983 x0 := v.Args[0] 18984 if x0.Op != OpAMD64MOVWloadidx1 { 18985 break 18986 } 18987 i0 := x0.AuxInt 18988 s := x0.Aux 18989 idx := x0.Args[0] 18990 p := x0.Args[1] 18991 mem := x0.Args[2] 18992 sh := v.Args[1] 18993 if sh.Op != OpAMD64SHLQconst { 18994 break 18995 } 18996 if sh.AuxInt != 16 { 18997 break 18998 } 18999 x1 := sh.Args[0] 19000 if x1.Op != OpAMD64MOVWloadidx1 { 19001 break 19002 } 19003 i1 := x1.AuxInt 19004 if x1.Aux != s { 19005 break 19006 } 19007 if idx != x1.Args[0] { 19008 break 19009 } 19010 if p != x1.Args[1] { 19011 break 19012 } 19013 if mem != x1.Args[2] { 19014 break 19015 } 19016 if !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 19017 break 19018 } 19019 b = mergePoint(b, x0, x1) 19020 v0 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, types.UInt32) 19021 v.reset(OpCopy) 19022 v.AddArg(v0) 19023 v0.AuxInt = i0 19024 v0.Aux = s 19025 v0.AddArg(p) 19026 v0.AddArg(idx) 19027 v0.AddArg(mem) 19028 return true 19029 } 19030 // match: (ORQ sh:(SHLQconst [16] x1:(MOVWloadidx1 [i1] {s} p idx mem)) x0:(MOVWloadidx1 [i0] {s} p idx mem)) 19031 // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 19032 // result: @mergePoint(b,x0,x1) (MOVLloadidx1 [i0] {s} p idx mem) 19033 for { 19034 sh := v.Args[0] 19035 if sh.Op != OpAMD64SHLQconst { 19036 break 19037 } 19038 if sh.AuxInt != 16 { 19039 break 19040 } 19041 x1 := sh.Args[0] 19042 if x1.Op != OpAMD64MOVWloadidx1 { 19043 break 19044 } 19045 i1 := x1.AuxInt 19046 s := x1.Aux 19047 p := x1.Args[0] 19048 idx := x1.Args[1] 19049 mem := x1.Args[2] 19050 x0 := v.Args[1] 19051 if x0.Op != OpAMD64MOVWloadidx1 { 19052 break 19053 } 19054 i0 := x0.AuxInt 19055 if x0.Aux != s { 19056 break 19057 } 19058 if p != x0.Args[0] { 19059 break 19060 } 19061 if idx != x0.Args[1] { 19062 break 19063 } 19064 if mem != x0.Args[2] { 19065 break 19066 } 19067 if !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 19068 break 19069 } 19070 b = mergePoint(b, x0, x1) 19071 v0 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, types.UInt32) 19072 v.reset(OpCopy) 19073 v.AddArg(v0) 19074 v0.AuxInt = i0 19075 v0.Aux = s 19076 v0.AddArg(p) 19077 v0.AddArg(idx) 19078 v0.AddArg(mem) 19079 return true 19080 } 19081 // match: (ORQ sh:(SHLQconst [16] x1:(MOVWloadidx1 [i1] {s} idx p mem)) x0:(MOVWloadidx1 [i0] {s} p idx mem)) 19082 // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 19083 // result: @mergePoint(b,x0,x1) (MOVLloadidx1 [i0] {s} p idx mem) 19084 for { 19085 sh := v.Args[0] 19086 if sh.Op != OpAMD64SHLQconst { 19087 break 19088 } 19089 if sh.AuxInt != 16 { 19090 break 19091 } 19092 x1 := sh.Args[0] 19093 if x1.Op != OpAMD64MOVWloadidx1 { 19094 break 19095 } 19096 i1 := x1.AuxInt 19097 s := x1.Aux 19098 idx := x1.Args[0] 19099 p := x1.Args[1] 19100 mem := x1.Args[2] 19101 x0 := v.Args[1] 19102 if x0.Op != OpAMD64MOVWloadidx1 { 19103 break 19104 } 19105 i0 := x0.AuxInt 19106 if x0.Aux != s { 19107 break 19108 } 19109 if p != x0.Args[0] { 19110 break 19111 } 19112 if idx != x0.Args[1] { 19113 break 19114 } 19115 if mem != x0.Args[2] { 19116 break 19117 } 19118 if !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 19119 break 19120 } 19121 b = mergePoint(b, x0, x1) 19122 v0 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, types.UInt32) 19123 v.reset(OpCopy) 19124 v.AddArg(v0) 19125 v0.AuxInt = i0 19126 v0.Aux = s 19127 v0.AddArg(p) 19128 v0.AddArg(idx) 19129 v0.AddArg(mem) 19130 return true 19131 } 19132 // match: (ORQ sh:(SHLQconst [16] x1:(MOVWloadidx1 [i1] {s} p idx mem)) x0:(MOVWloadidx1 [i0] {s} idx p mem)) 19133 // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 19134 // result: @mergePoint(b,x0,x1) (MOVLloadidx1 [i0] {s} p idx mem) 19135 for { 19136 sh := v.Args[0] 19137 if sh.Op != OpAMD64SHLQconst { 19138 break 19139 } 19140 if sh.AuxInt != 16 { 19141 break 19142 } 19143 x1 := sh.Args[0] 19144 if x1.Op != OpAMD64MOVWloadidx1 { 19145 break 19146 } 19147 i1 := x1.AuxInt 19148 s := x1.Aux 19149 p := x1.Args[0] 19150 idx := x1.Args[1] 19151 mem := x1.Args[2] 19152 x0 := v.Args[1] 19153 if x0.Op != OpAMD64MOVWloadidx1 { 19154 break 19155 } 19156 i0 := x0.AuxInt 19157 if x0.Aux != s { 19158 break 19159 } 19160 if idx != x0.Args[0] { 19161 break 19162 } 19163 if p != x0.Args[1] { 19164 break 19165 } 19166 if mem != x0.Args[2] { 19167 break 19168 } 19169 if !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 19170 break 19171 } 19172 b = mergePoint(b, x0, x1) 19173 v0 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, types.UInt32) 19174 v.reset(OpCopy) 19175 v.AddArg(v0) 19176 v0.AuxInt = i0 19177 v0.Aux = s 19178 v0.AddArg(p) 19179 v0.AddArg(idx) 19180 v0.AddArg(mem) 19181 return true 19182 } 19183 // match: (ORQ sh:(SHLQconst [16] x1:(MOVWloadidx1 [i1] {s} idx p mem)) x0:(MOVWloadidx1 [i0] {s} idx p mem)) 19184 // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 19185 // result: @mergePoint(b,x0,x1) (MOVLloadidx1 [i0] {s} p idx mem) 19186 for { 19187 sh := v.Args[0] 19188 if sh.Op != OpAMD64SHLQconst { 19189 break 19190 } 19191 if sh.AuxInt != 16 { 19192 break 19193 } 19194 x1 := sh.Args[0] 19195 if x1.Op != OpAMD64MOVWloadidx1 { 19196 break 19197 } 19198 i1 := x1.AuxInt 19199 s := x1.Aux 19200 idx := x1.Args[0] 19201 p := x1.Args[1] 19202 mem := x1.Args[2] 19203 x0 := v.Args[1] 19204 if x0.Op != OpAMD64MOVWloadidx1 { 19205 break 19206 } 19207 i0 := x0.AuxInt 19208 if x0.Aux != s { 19209 break 19210 } 19211 if idx != x0.Args[0] { 19212 break 19213 } 19214 if p != x0.Args[1] { 19215 break 19216 } 19217 if mem != x0.Args[2] { 19218 break 19219 } 19220 if !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 19221 break 19222 } 19223 b = mergePoint(b, x0, x1) 19224 v0 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, types.UInt32) 19225 v.reset(OpCopy) 19226 v.AddArg(v0) 19227 v0.AuxInt = i0 19228 v0.Aux = s 19229 v0.AddArg(p) 19230 v0.AddArg(idx) 19231 v0.AddArg(mem) 19232 return true 19233 } 19234 // match: (ORQ x0:(MOVLloadidx1 [i0] {s} p idx mem) sh:(SHLQconst [32] x1:(MOVLloadidx1 [i1] {s} p idx mem))) 19235 // cond: i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 19236 // result: @mergePoint(b,x0,x1) (MOVQloadidx1 [i0] {s} p idx mem) 19237 for { 19238 x0 := v.Args[0] 19239 if x0.Op != OpAMD64MOVLloadidx1 { 19240 break 19241 } 19242 i0 := x0.AuxInt 19243 s := x0.Aux 19244 p := x0.Args[0] 19245 idx := x0.Args[1] 19246 mem := x0.Args[2] 19247 sh := v.Args[1] 19248 if sh.Op != OpAMD64SHLQconst { 19249 break 19250 } 19251 if sh.AuxInt != 32 { 19252 break 19253 } 19254 x1 := sh.Args[0] 19255 if x1.Op != OpAMD64MOVLloadidx1 { 19256 break 19257 } 19258 i1 := x1.AuxInt 19259 if x1.Aux != s { 19260 break 19261 } 19262 if p != x1.Args[0] { 19263 break 19264 } 19265 if idx != x1.Args[1] { 19266 break 19267 } 19268 if mem != x1.Args[2] { 19269 break 19270 } 19271 if !(i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 19272 break 19273 } 19274 b = mergePoint(b, x0, x1) 19275 v0 := b.NewValue0(v.Pos, OpAMD64MOVQloadidx1, types.UInt64) 19276 v.reset(OpCopy) 19277 v.AddArg(v0) 19278 v0.AuxInt = i0 19279 v0.Aux = s 19280 v0.AddArg(p) 19281 v0.AddArg(idx) 19282 v0.AddArg(mem) 19283 return true 19284 } 19285 // match: (ORQ x0:(MOVLloadidx1 [i0] {s} idx p mem) sh:(SHLQconst [32] x1:(MOVLloadidx1 [i1] {s} p idx mem))) 19286 // cond: i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 19287 // result: @mergePoint(b,x0,x1) (MOVQloadidx1 [i0] {s} p idx mem) 19288 for { 19289 x0 := v.Args[0] 19290 if x0.Op != OpAMD64MOVLloadidx1 { 19291 break 19292 } 19293 i0 := x0.AuxInt 19294 s := x0.Aux 19295 idx := x0.Args[0] 19296 p := x0.Args[1] 19297 mem := x0.Args[2] 19298 sh := v.Args[1] 19299 if sh.Op != OpAMD64SHLQconst { 19300 break 19301 } 19302 if sh.AuxInt != 32 { 19303 break 19304 } 19305 x1 := sh.Args[0] 19306 if x1.Op != OpAMD64MOVLloadidx1 { 19307 break 19308 } 19309 i1 := x1.AuxInt 19310 if x1.Aux != s { 19311 break 19312 } 19313 if p != x1.Args[0] { 19314 break 19315 } 19316 if idx != x1.Args[1] { 19317 break 19318 } 19319 if mem != x1.Args[2] { 19320 break 19321 } 19322 if !(i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 19323 break 19324 } 19325 b = mergePoint(b, x0, x1) 19326 v0 := b.NewValue0(v.Pos, OpAMD64MOVQloadidx1, types.UInt64) 19327 v.reset(OpCopy) 19328 v.AddArg(v0) 19329 v0.AuxInt = i0 19330 v0.Aux = s 19331 v0.AddArg(p) 19332 v0.AddArg(idx) 19333 v0.AddArg(mem) 19334 return true 19335 } 19336 // match: (ORQ x0:(MOVLloadidx1 [i0] {s} p idx mem) sh:(SHLQconst [32] x1:(MOVLloadidx1 [i1] {s} idx p mem))) 19337 // cond: i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 19338 // result: @mergePoint(b,x0,x1) (MOVQloadidx1 [i0] {s} p idx mem) 19339 for { 19340 x0 := v.Args[0] 19341 if x0.Op != OpAMD64MOVLloadidx1 { 19342 break 19343 } 19344 i0 := x0.AuxInt 19345 s := x0.Aux 19346 p := x0.Args[0] 19347 idx := x0.Args[1] 19348 mem := x0.Args[2] 19349 sh := v.Args[1] 19350 if sh.Op != OpAMD64SHLQconst { 19351 break 19352 } 19353 if sh.AuxInt != 32 { 19354 break 19355 } 19356 x1 := sh.Args[0] 19357 if x1.Op != OpAMD64MOVLloadidx1 { 19358 break 19359 } 19360 i1 := x1.AuxInt 19361 if x1.Aux != s { 19362 break 19363 } 19364 if idx != x1.Args[0] { 19365 break 19366 } 19367 if p != x1.Args[1] { 19368 break 19369 } 19370 if mem != x1.Args[2] { 19371 break 19372 } 19373 if !(i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 19374 break 19375 } 19376 b = mergePoint(b, x0, x1) 19377 v0 := b.NewValue0(v.Pos, OpAMD64MOVQloadidx1, types.UInt64) 19378 v.reset(OpCopy) 19379 v.AddArg(v0) 19380 v0.AuxInt = i0 19381 v0.Aux = s 19382 v0.AddArg(p) 19383 v0.AddArg(idx) 19384 v0.AddArg(mem) 19385 return true 19386 } 19387 // match: (ORQ x0:(MOVLloadidx1 [i0] {s} idx p mem) sh:(SHLQconst [32] x1:(MOVLloadidx1 [i1] {s} idx p mem))) 19388 // cond: i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 19389 // result: @mergePoint(b,x0,x1) (MOVQloadidx1 [i0] {s} p idx mem) 19390 for { 19391 x0 := v.Args[0] 19392 if x0.Op != OpAMD64MOVLloadidx1 { 19393 break 19394 } 19395 i0 := x0.AuxInt 19396 s := x0.Aux 19397 idx := x0.Args[0] 19398 p := x0.Args[1] 19399 mem := x0.Args[2] 19400 sh := v.Args[1] 19401 if sh.Op != OpAMD64SHLQconst { 19402 break 19403 } 19404 if sh.AuxInt != 32 { 19405 break 19406 } 19407 x1 := sh.Args[0] 19408 if x1.Op != OpAMD64MOVLloadidx1 { 19409 break 19410 } 19411 i1 := x1.AuxInt 19412 if x1.Aux != s { 19413 break 19414 } 19415 if idx != x1.Args[0] { 19416 break 19417 } 19418 if p != x1.Args[1] { 19419 break 19420 } 19421 if mem != x1.Args[2] { 19422 break 19423 } 19424 if !(i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 19425 break 19426 } 19427 b = mergePoint(b, x0, x1) 19428 v0 := b.NewValue0(v.Pos, OpAMD64MOVQloadidx1, types.UInt64) 19429 v.reset(OpCopy) 19430 v.AddArg(v0) 19431 v0.AuxInt = i0 19432 v0.Aux = s 19433 v0.AddArg(p) 19434 v0.AddArg(idx) 19435 v0.AddArg(mem) 19436 return true 19437 } 19438 // match: (ORQ sh:(SHLQconst [32] x1:(MOVLloadidx1 [i1] {s} p idx mem)) x0:(MOVLloadidx1 [i0] {s} p idx mem)) 19439 // cond: i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 19440 // result: @mergePoint(b,x0,x1) (MOVQloadidx1 [i0] {s} p idx mem) 19441 for { 19442 sh := v.Args[0] 19443 if sh.Op != OpAMD64SHLQconst { 19444 break 19445 } 19446 if sh.AuxInt != 32 { 19447 break 19448 } 19449 x1 := sh.Args[0] 19450 if x1.Op != OpAMD64MOVLloadidx1 { 19451 break 19452 } 19453 i1 := x1.AuxInt 19454 s := x1.Aux 19455 p := x1.Args[0] 19456 idx := x1.Args[1] 19457 mem := x1.Args[2] 19458 x0 := v.Args[1] 19459 if x0.Op != OpAMD64MOVLloadidx1 { 19460 break 19461 } 19462 i0 := x0.AuxInt 19463 if x0.Aux != s { 19464 break 19465 } 19466 if p != x0.Args[0] { 19467 break 19468 } 19469 if idx != x0.Args[1] { 19470 break 19471 } 19472 if mem != x0.Args[2] { 19473 break 19474 } 19475 if !(i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 19476 break 19477 } 19478 b = mergePoint(b, x0, x1) 19479 v0 := b.NewValue0(v.Pos, OpAMD64MOVQloadidx1, types.UInt64) 19480 v.reset(OpCopy) 19481 v.AddArg(v0) 19482 v0.AuxInt = i0 19483 v0.Aux = s 19484 v0.AddArg(p) 19485 v0.AddArg(idx) 19486 v0.AddArg(mem) 19487 return true 19488 } 19489 // match: (ORQ sh:(SHLQconst [32] x1:(MOVLloadidx1 [i1] {s} idx p mem)) x0:(MOVLloadidx1 [i0] {s} p idx mem)) 19490 // cond: i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 19491 // result: @mergePoint(b,x0,x1) (MOVQloadidx1 [i0] {s} p idx mem) 19492 for { 19493 sh := v.Args[0] 19494 if sh.Op != OpAMD64SHLQconst { 19495 break 19496 } 19497 if sh.AuxInt != 32 { 19498 break 19499 } 19500 x1 := sh.Args[0] 19501 if x1.Op != OpAMD64MOVLloadidx1 { 19502 break 19503 } 19504 i1 := x1.AuxInt 19505 s := x1.Aux 19506 idx := x1.Args[0] 19507 p := x1.Args[1] 19508 mem := x1.Args[2] 19509 x0 := v.Args[1] 19510 if x0.Op != OpAMD64MOVLloadidx1 { 19511 break 19512 } 19513 i0 := x0.AuxInt 19514 if x0.Aux != s { 19515 break 19516 } 19517 if p != x0.Args[0] { 19518 break 19519 } 19520 if idx != x0.Args[1] { 19521 break 19522 } 19523 if mem != x0.Args[2] { 19524 break 19525 } 19526 if !(i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 19527 break 19528 } 19529 b = mergePoint(b, x0, x1) 19530 v0 := b.NewValue0(v.Pos, OpAMD64MOVQloadidx1, types.UInt64) 19531 v.reset(OpCopy) 19532 v.AddArg(v0) 19533 v0.AuxInt = i0 19534 v0.Aux = s 19535 v0.AddArg(p) 19536 v0.AddArg(idx) 19537 v0.AddArg(mem) 19538 return true 19539 } 19540 // match: (ORQ sh:(SHLQconst [32] x1:(MOVLloadidx1 [i1] {s} p idx mem)) x0:(MOVLloadidx1 [i0] {s} idx p mem)) 19541 // cond: i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 19542 // result: @mergePoint(b,x0,x1) (MOVQloadidx1 [i0] {s} p idx mem) 19543 for { 19544 sh := v.Args[0] 19545 if sh.Op != OpAMD64SHLQconst { 19546 break 19547 } 19548 if sh.AuxInt != 32 { 19549 break 19550 } 19551 x1 := sh.Args[0] 19552 if x1.Op != OpAMD64MOVLloadidx1 { 19553 break 19554 } 19555 i1 := x1.AuxInt 19556 s := x1.Aux 19557 p := x1.Args[0] 19558 idx := x1.Args[1] 19559 mem := x1.Args[2] 19560 x0 := v.Args[1] 19561 if x0.Op != OpAMD64MOVLloadidx1 { 19562 break 19563 } 19564 i0 := x0.AuxInt 19565 if x0.Aux != s { 19566 break 19567 } 19568 if idx != x0.Args[0] { 19569 break 19570 } 19571 if p != x0.Args[1] { 19572 break 19573 } 19574 if mem != x0.Args[2] { 19575 break 19576 } 19577 if !(i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 19578 break 19579 } 19580 b = mergePoint(b, x0, x1) 19581 v0 := b.NewValue0(v.Pos, OpAMD64MOVQloadidx1, types.UInt64) 19582 v.reset(OpCopy) 19583 v.AddArg(v0) 19584 v0.AuxInt = i0 19585 v0.Aux = s 19586 v0.AddArg(p) 19587 v0.AddArg(idx) 19588 v0.AddArg(mem) 19589 return true 19590 } 19591 // match: (ORQ sh:(SHLQconst [32] x1:(MOVLloadidx1 [i1] {s} idx p mem)) x0:(MOVLloadidx1 [i0] {s} idx p mem)) 19592 // cond: i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 19593 // result: @mergePoint(b,x0,x1) (MOVQloadidx1 [i0] {s} p idx mem) 19594 for { 19595 sh := v.Args[0] 19596 if sh.Op != OpAMD64SHLQconst { 19597 break 19598 } 19599 if sh.AuxInt != 32 { 19600 break 19601 } 19602 x1 := sh.Args[0] 19603 if x1.Op != OpAMD64MOVLloadidx1 { 19604 break 19605 } 19606 i1 := x1.AuxInt 19607 s := x1.Aux 19608 idx := x1.Args[0] 19609 p := x1.Args[1] 19610 mem := x1.Args[2] 19611 x0 := v.Args[1] 19612 if x0.Op != OpAMD64MOVLloadidx1 { 19613 break 19614 } 19615 i0 := x0.AuxInt 19616 if x0.Aux != s { 19617 break 19618 } 19619 if idx != x0.Args[0] { 19620 break 19621 } 19622 if p != x0.Args[1] { 19623 break 19624 } 19625 if mem != x0.Args[2] { 19626 break 19627 } 19628 if !(i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 19629 break 19630 } 19631 b = mergePoint(b, x0, x1) 19632 v0 := b.NewValue0(v.Pos, OpAMD64MOVQloadidx1, types.UInt64) 19633 v.reset(OpCopy) 19634 v.AddArg(v0) 19635 v0.AuxInt = i0 19636 v0.Aux = s 19637 v0.AddArg(p) 19638 v0.AddArg(idx) 19639 v0.AddArg(mem) 19640 return true 19641 } 19642 // match: (ORQ s1:(SHLQconst [j1] x1:(MOVBloadidx1 [i1] {s} p idx mem)) or:(ORQ s0:(SHLQconst [j0] x0:(MOVBloadidx1 [i0] {s} p idx mem)) y)) 19643 // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 19644 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j0] (MOVWloadidx1 [i0] {s} p idx mem)) y) 19645 for { 19646 s1 := v.Args[0] 19647 if s1.Op != OpAMD64SHLQconst { 19648 break 19649 } 19650 j1 := s1.AuxInt 19651 x1 := s1.Args[0] 19652 if x1.Op != OpAMD64MOVBloadidx1 { 19653 break 19654 } 19655 i1 := x1.AuxInt 19656 s := x1.Aux 19657 p := x1.Args[0] 19658 idx := x1.Args[1] 19659 mem := x1.Args[2] 19660 or := v.Args[1] 19661 if or.Op != OpAMD64ORQ { 19662 break 19663 } 19664 s0 := or.Args[0] 19665 if s0.Op != OpAMD64SHLQconst { 19666 break 19667 } 19668 j0 := s0.AuxInt 19669 x0 := s0.Args[0] 19670 if x0.Op != OpAMD64MOVBloadidx1 { 19671 break 19672 } 19673 i0 := x0.AuxInt 19674 if x0.Aux != s { 19675 break 19676 } 19677 if p != x0.Args[0] { 19678 break 19679 } 19680 if idx != x0.Args[1] { 19681 break 19682 } 19683 if mem != x0.Args[2] { 19684 break 19685 } 19686 y := or.Args[1] 19687 if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 19688 break 19689 } 19690 b = mergePoint(b, x0, x1) 19691 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 19692 v.reset(OpCopy) 19693 v.AddArg(v0) 19694 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 19695 v1.AuxInt = j0 19696 v2 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, types.UInt16) 19697 v2.AuxInt = i0 19698 v2.Aux = s 19699 v2.AddArg(p) 19700 v2.AddArg(idx) 19701 v2.AddArg(mem) 19702 v1.AddArg(v2) 19703 v0.AddArg(v1) 19704 v0.AddArg(y) 19705 return true 19706 } 19707 // match: (ORQ s1:(SHLQconst [j1] x1:(MOVBloadidx1 [i1] {s} idx p mem)) or:(ORQ s0:(SHLQconst [j0] x0:(MOVBloadidx1 [i0] {s} p idx mem)) y)) 19708 // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 19709 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j0] (MOVWloadidx1 [i0] {s} p idx mem)) y) 19710 for { 19711 s1 := v.Args[0] 19712 if s1.Op != OpAMD64SHLQconst { 19713 break 19714 } 19715 j1 := s1.AuxInt 19716 x1 := s1.Args[0] 19717 if x1.Op != OpAMD64MOVBloadidx1 { 19718 break 19719 } 19720 i1 := x1.AuxInt 19721 s := x1.Aux 19722 idx := x1.Args[0] 19723 p := x1.Args[1] 19724 mem := x1.Args[2] 19725 or := v.Args[1] 19726 if or.Op != OpAMD64ORQ { 19727 break 19728 } 19729 s0 := or.Args[0] 19730 if s0.Op != OpAMD64SHLQconst { 19731 break 19732 } 19733 j0 := s0.AuxInt 19734 x0 := s0.Args[0] 19735 if x0.Op != OpAMD64MOVBloadidx1 { 19736 break 19737 } 19738 i0 := x0.AuxInt 19739 if x0.Aux != s { 19740 break 19741 } 19742 if p != x0.Args[0] { 19743 break 19744 } 19745 if idx != x0.Args[1] { 19746 break 19747 } 19748 if mem != x0.Args[2] { 19749 break 19750 } 19751 y := or.Args[1] 19752 if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 19753 break 19754 } 19755 b = mergePoint(b, x0, x1) 19756 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 19757 v.reset(OpCopy) 19758 v.AddArg(v0) 19759 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 19760 v1.AuxInt = j0 19761 v2 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, types.UInt16) 19762 v2.AuxInt = i0 19763 v2.Aux = s 19764 v2.AddArg(p) 19765 v2.AddArg(idx) 19766 v2.AddArg(mem) 19767 v1.AddArg(v2) 19768 v0.AddArg(v1) 19769 v0.AddArg(y) 19770 return true 19771 } 19772 // match: (ORQ s1:(SHLQconst [j1] x1:(MOVBloadidx1 [i1] {s} p idx mem)) or:(ORQ s0:(SHLQconst [j0] x0:(MOVBloadidx1 [i0] {s} idx p mem)) y)) 19773 // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 19774 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j0] (MOVWloadidx1 [i0] {s} p idx mem)) y) 19775 for { 19776 s1 := v.Args[0] 19777 if s1.Op != OpAMD64SHLQconst { 19778 break 19779 } 19780 j1 := s1.AuxInt 19781 x1 := s1.Args[0] 19782 if x1.Op != OpAMD64MOVBloadidx1 { 19783 break 19784 } 19785 i1 := x1.AuxInt 19786 s := x1.Aux 19787 p := x1.Args[0] 19788 idx := x1.Args[1] 19789 mem := x1.Args[2] 19790 or := v.Args[1] 19791 if or.Op != OpAMD64ORQ { 19792 break 19793 } 19794 s0 := or.Args[0] 19795 if s0.Op != OpAMD64SHLQconst { 19796 break 19797 } 19798 j0 := s0.AuxInt 19799 x0 := s0.Args[0] 19800 if x0.Op != OpAMD64MOVBloadidx1 { 19801 break 19802 } 19803 i0 := x0.AuxInt 19804 if x0.Aux != s { 19805 break 19806 } 19807 if idx != x0.Args[0] { 19808 break 19809 } 19810 if p != x0.Args[1] { 19811 break 19812 } 19813 if mem != x0.Args[2] { 19814 break 19815 } 19816 y := or.Args[1] 19817 if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 19818 break 19819 } 19820 b = mergePoint(b, x0, x1) 19821 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 19822 v.reset(OpCopy) 19823 v.AddArg(v0) 19824 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 19825 v1.AuxInt = j0 19826 v2 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, types.UInt16) 19827 v2.AuxInt = i0 19828 v2.Aux = s 19829 v2.AddArg(p) 19830 v2.AddArg(idx) 19831 v2.AddArg(mem) 19832 v1.AddArg(v2) 19833 v0.AddArg(v1) 19834 v0.AddArg(y) 19835 return true 19836 } 19837 // match: (ORQ s1:(SHLQconst [j1] x1:(MOVBloadidx1 [i1] {s} idx p mem)) or:(ORQ s0:(SHLQconst [j0] x0:(MOVBloadidx1 [i0] {s} idx p mem)) y)) 19838 // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 19839 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j0] (MOVWloadidx1 [i0] {s} p idx mem)) y) 19840 for { 19841 s1 := v.Args[0] 19842 if s1.Op != OpAMD64SHLQconst { 19843 break 19844 } 19845 j1 := s1.AuxInt 19846 x1 := s1.Args[0] 19847 if x1.Op != OpAMD64MOVBloadidx1 { 19848 break 19849 } 19850 i1 := x1.AuxInt 19851 s := x1.Aux 19852 idx := x1.Args[0] 19853 p := x1.Args[1] 19854 mem := x1.Args[2] 19855 or := v.Args[1] 19856 if or.Op != OpAMD64ORQ { 19857 break 19858 } 19859 s0 := or.Args[0] 19860 if s0.Op != OpAMD64SHLQconst { 19861 break 19862 } 19863 j0 := s0.AuxInt 19864 x0 := s0.Args[0] 19865 if x0.Op != OpAMD64MOVBloadidx1 { 19866 break 19867 } 19868 i0 := x0.AuxInt 19869 if x0.Aux != s { 19870 break 19871 } 19872 if idx != x0.Args[0] { 19873 break 19874 } 19875 if p != x0.Args[1] { 19876 break 19877 } 19878 if mem != x0.Args[2] { 19879 break 19880 } 19881 y := or.Args[1] 19882 if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 19883 break 19884 } 19885 b = mergePoint(b, x0, x1) 19886 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 19887 v.reset(OpCopy) 19888 v.AddArg(v0) 19889 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 19890 v1.AuxInt = j0 19891 v2 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, types.UInt16) 19892 v2.AuxInt = i0 19893 v2.Aux = s 19894 v2.AddArg(p) 19895 v2.AddArg(idx) 19896 v2.AddArg(mem) 19897 v1.AddArg(v2) 19898 v0.AddArg(v1) 19899 v0.AddArg(y) 19900 return true 19901 } 19902 // match: (ORQ s1:(SHLQconst [j1] x1:(MOVBloadidx1 [i1] {s} p idx mem)) or:(ORQ y s0:(SHLQconst [j0] x0:(MOVBloadidx1 [i0] {s} p idx mem)))) 19903 // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 19904 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j0] (MOVWloadidx1 [i0] {s} p idx mem)) y) 19905 for { 19906 s1 := v.Args[0] 19907 if s1.Op != OpAMD64SHLQconst { 19908 break 19909 } 19910 j1 := s1.AuxInt 19911 x1 := s1.Args[0] 19912 if x1.Op != OpAMD64MOVBloadidx1 { 19913 break 19914 } 19915 i1 := x1.AuxInt 19916 s := x1.Aux 19917 p := x1.Args[0] 19918 idx := x1.Args[1] 19919 mem := x1.Args[2] 19920 or := v.Args[1] 19921 if or.Op != OpAMD64ORQ { 19922 break 19923 } 19924 y := or.Args[0] 19925 s0 := or.Args[1] 19926 if s0.Op != OpAMD64SHLQconst { 19927 break 19928 } 19929 j0 := s0.AuxInt 19930 x0 := s0.Args[0] 19931 if x0.Op != OpAMD64MOVBloadidx1 { 19932 break 19933 } 19934 i0 := x0.AuxInt 19935 if x0.Aux != s { 19936 break 19937 } 19938 if p != x0.Args[0] { 19939 break 19940 } 19941 if idx != x0.Args[1] { 19942 break 19943 } 19944 if mem != x0.Args[2] { 19945 break 19946 } 19947 if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 19948 break 19949 } 19950 b = mergePoint(b, x0, x1) 19951 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 19952 v.reset(OpCopy) 19953 v.AddArg(v0) 19954 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 19955 v1.AuxInt = j0 19956 v2 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, types.UInt16) 19957 v2.AuxInt = i0 19958 v2.Aux = s 19959 v2.AddArg(p) 19960 v2.AddArg(idx) 19961 v2.AddArg(mem) 19962 v1.AddArg(v2) 19963 v0.AddArg(v1) 19964 v0.AddArg(y) 19965 return true 19966 } 19967 // match: (ORQ s1:(SHLQconst [j1] x1:(MOVBloadidx1 [i1] {s} idx p mem)) or:(ORQ y s0:(SHLQconst [j0] x0:(MOVBloadidx1 [i0] {s} p idx mem)))) 19968 // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 19969 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j0] (MOVWloadidx1 [i0] {s} p idx mem)) y) 19970 for { 19971 s1 := v.Args[0] 19972 if s1.Op != OpAMD64SHLQconst { 19973 break 19974 } 19975 j1 := s1.AuxInt 19976 x1 := s1.Args[0] 19977 if x1.Op != OpAMD64MOVBloadidx1 { 19978 break 19979 } 19980 i1 := x1.AuxInt 19981 s := x1.Aux 19982 idx := x1.Args[0] 19983 p := x1.Args[1] 19984 mem := x1.Args[2] 19985 or := v.Args[1] 19986 if or.Op != OpAMD64ORQ { 19987 break 19988 } 19989 y := or.Args[0] 19990 s0 := or.Args[1] 19991 if s0.Op != OpAMD64SHLQconst { 19992 break 19993 } 19994 j0 := s0.AuxInt 19995 x0 := s0.Args[0] 19996 if x0.Op != OpAMD64MOVBloadidx1 { 19997 break 19998 } 19999 i0 := x0.AuxInt 20000 if x0.Aux != s { 20001 break 20002 } 20003 if p != x0.Args[0] { 20004 break 20005 } 20006 if idx != x0.Args[1] { 20007 break 20008 } 20009 if mem != x0.Args[2] { 20010 break 20011 } 20012 if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 20013 break 20014 } 20015 b = mergePoint(b, x0, x1) 20016 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 20017 v.reset(OpCopy) 20018 v.AddArg(v0) 20019 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 20020 v1.AuxInt = j0 20021 v2 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, types.UInt16) 20022 v2.AuxInt = i0 20023 v2.Aux = s 20024 v2.AddArg(p) 20025 v2.AddArg(idx) 20026 v2.AddArg(mem) 20027 v1.AddArg(v2) 20028 v0.AddArg(v1) 20029 v0.AddArg(y) 20030 return true 20031 } 20032 // match: (ORQ s1:(SHLQconst [j1] x1:(MOVBloadidx1 [i1] {s} p idx mem)) or:(ORQ y s0:(SHLQconst [j0] x0:(MOVBloadidx1 [i0] {s} idx p mem)))) 20033 // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 20034 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j0] (MOVWloadidx1 [i0] {s} p idx mem)) y) 20035 for { 20036 s1 := v.Args[0] 20037 if s1.Op != OpAMD64SHLQconst { 20038 break 20039 } 20040 j1 := s1.AuxInt 20041 x1 := s1.Args[0] 20042 if x1.Op != OpAMD64MOVBloadidx1 { 20043 break 20044 } 20045 i1 := x1.AuxInt 20046 s := x1.Aux 20047 p := x1.Args[0] 20048 idx := x1.Args[1] 20049 mem := x1.Args[2] 20050 or := v.Args[1] 20051 if or.Op != OpAMD64ORQ { 20052 break 20053 } 20054 y := or.Args[0] 20055 s0 := or.Args[1] 20056 if s0.Op != OpAMD64SHLQconst { 20057 break 20058 } 20059 j0 := s0.AuxInt 20060 x0 := s0.Args[0] 20061 if x0.Op != OpAMD64MOVBloadidx1 { 20062 break 20063 } 20064 i0 := x0.AuxInt 20065 if x0.Aux != s { 20066 break 20067 } 20068 if idx != x0.Args[0] { 20069 break 20070 } 20071 if p != x0.Args[1] { 20072 break 20073 } 20074 if mem != x0.Args[2] { 20075 break 20076 } 20077 if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 20078 break 20079 } 20080 b = mergePoint(b, x0, x1) 20081 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 20082 v.reset(OpCopy) 20083 v.AddArg(v0) 20084 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 20085 v1.AuxInt = j0 20086 v2 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, types.UInt16) 20087 v2.AuxInt = i0 20088 v2.Aux = s 20089 v2.AddArg(p) 20090 v2.AddArg(idx) 20091 v2.AddArg(mem) 20092 v1.AddArg(v2) 20093 v0.AddArg(v1) 20094 v0.AddArg(y) 20095 return true 20096 } 20097 // match: (ORQ s1:(SHLQconst [j1] x1:(MOVBloadidx1 [i1] {s} idx p mem)) or:(ORQ y s0:(SHLQconst [j0] x0:(MOVBloadidx1 [i0] {s} idx p mem)))) 20098 // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 20099 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j0] (MOVWloadidx1 [i0] {s} p idx mem)) y) 20100 for { 20101 s1 := v.Args[0] 20102 if s1.Op != OpAMD64SHLQconst { 20103 break 20104 } 20105 j1 := s1.AuxInt 20106 x1 := s1.Args[0] 20107 if x1.Op != OpAMD64MOVBloadidx1 { 20108 break 20109 } 20110 i1 := x1.AuxInt 20111 s := x1.Aux 20112 idx := x1.Args[0] 20113 p := x1.Args[1] 20114 mem := x1.Args[2] 20115 or := v.Args[1] 20116 if or.Op != OpAMD64ORQ { 20117 break 20118 } 20119 y := or.Args[0] 20120 s0 := or.Args[1] 20121 if s0.Op != OpAMD64SHLQconst { 20122 break 20123 } 20124 j0 := s0.AuxInt 20125 x0 := s0.Args[0] 20126 if x0.Op != OpAMD64MOVBloadidx1 { 20127 break 20128 } 20129 i0 := x0.AuxInt 20130 if x0.Aux != s { 20131 break 20132 } 20133 if idx != x0.Args[0] { 20134 break 20135 } 20136 if p != x0.Args[1] { 20137 break 20138 } 20139 if mem != x0.Args[2] { 20140 break 20141 } 20142 if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 20143 break 20144 } 20145 b = mergePoint(b, x0, x1) 20146 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 20147 v.reset(OpCopy) 20148 v.AddArg(v0) 20149 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 20150 v1.AuxInt = j0 20151 v2 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, types.UInt16) 20152 v2.AuxInt = i0 20153 v2.Aux = s 20154 v2.AddArg(p) 20155 v2.AddArg(idx) 20156 v2.AddArg(mem) 20157 v1.AddArg(v2) 20158 v0.AddArg(v1) 20159 v0.AddArg(y) 20160 return true 20161 } 20162 // match: (ORQ or:(ORQ s0:(SHLQconst [j0] x0:(MOVBloadidx1 [i0] {s} p idx mem)) y) s1:(SHLQconst [j1] x1:(MOVBloadidx1 [i1] {s} p idx mem))) 20163 // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 20164 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j0] (MOVWloadidx1 [i0] {s} p idx mem)) y) 20165 for { 20166 or := v.Args[0] 20167 if or.Op != OpAMD64ORQ { 20168 break 20169 } 20170 s0 := or.Args[0] 20171 if s0.Op != OpAMD64SHLQconst { 20172 break 20173 } 20174 j0 := s0.AuxInt 20175 x0 := s0.Args[0] 20176 if x0.Op != OpAMD64MOVBloadidx1 { 20177 break 20178 } 20179 i0 := x0.AuxInt 20180 s := x0.Aux 20181 p := x0.Args[0] 20182 idx := x0.Args[1] 20183 mem := x0.Args[2] 20184 y := or.Args[1] 20185 s1 := v.Args[1] 20186 if s1.Op != OpAMD64SHLQconst { 20187 break 20188 } 20189 j1 := s1.AuxInt 20190 x1 := s1.Args[0] 20191 if x1.Op != OpAMD64MOVBloadidx1 { 20192 break 20193 } 20194 i1 := x1.AuxInt 20195 if x1.Aux != s { 20196 break 20197 } 20198 if p != x1.Args[0] { 20199 break 20200 } 20201 if idx != x1.Args[1] { 20202 break 20203 } 20204 if mem != x1.Args[2] { 20205 break 20206 } 20207 if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 20208 break 20209 } 20210 b = mergePoint(b, x0, x1) 20211 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 20212 v.reset(OpCopy) 20213 v.AddArg(v0) 20214 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 20215 v1.AuxInt = j0 20216 v2 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, types.UInt16) 20217 v2.AuxInt = i0 20218 v2.Aux = s 20219 v2.AddArg(p) 20220 v2.AddArg(idx) 20221 v2.AddArg(mem) 20222 v1.AddArg(v2) 20223 v0.AddArg(v1) 20224 v0.AddArg(y) 20225 return true 20226 } 20227 // match: (ORQ or:(ORQ s0:(SHLQconst [j0] x0:(MOVBloadidx1 [i0] {s} idx p mem)) y) s1:(SHLQconst [j1] x1:(MOVBloadidx1 [i1] {s} p idx mem))) 20228 // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 20229 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j0] (MOVWloadidx1 [i0] {s} p idx mem)) y) 20230 for { 20231 or := v.Args[0] 20232 if or.Op != OpAMD64ORQ { 20233 break 20234 } 20235 s0 := or.Args[0] 20236 if s0.Op != OpAMD64SHLQconst { 20237 break 20238 } 20239 j0 := s0.AuxInt 20240 x0 := s0.Args[0] 20241 if x0.Op != OpAMD64MOVBloadidx1 { 20242 break 20243 } 20244 i0 := x0.AuxInt 20245 s := x0.Aux 20246 idx := x0.Args[0] 20247 p := x0.Args[1] 20248 mem := x0.Args[2] 20249 y := or.Args[1] 20250 s1 := v.Args[1] 20251 if s1.Op != OpAMD64SHLQconst { 20252 break 20253 } 20254 j1 := s1.AuxInt 20255 x1 := s1.Args[0] 20256 if x1.Op != OpAMD64MOVBloadidx1 { 20257 break 20258 } 20259 i1 := x1.AuxInt 20260 if x1.Aux != s { 20261 break 20262 } 20263 if p != x1.Args[0] { 20264 break 20265 } 20266 if idx != x1.Args[1] { 20267 break 20268 } 20269 if mem != x1.Args[2] { 20270 break 20271 } 20272 if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 20273 break 20274 } 20275 b = mergePoint(b, x0, x1) 20276 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 20277 v.reset(OpCopy) 20278 v.AddArg(v0) 20279 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 20280 v1.AuxInt = j0 20281 v2 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, types.UInt16) 20282 v2.AuxInt = i0 20283 v2.Aux = s 20284 v2.AddArg(p) 20285 v2.AddArg(idx) 20286 v2.AddArg(mem) 20287 v1.AddArg(v2) 20288 v0.AddArg(v1) 20289 v0.AddArg(y) 20290 return true 20291 } 20292 // match: (ORQ or:(ORQ y s0:(SHLQconst [j0] x0:(MOVBloadidx1 [i0] {s} p idx mem))) s1:(SHLQconst [j1] x1:(MOVBloadidx1 [i1] {s} p idx mem))) 20293 // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 20294 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j0] (MOVWloadidx1 [i0] {s} p idx mem)) y) 20295 for { 20296 or := v.Args[0] 20297 if or.Op != OpAMD64ORQ { 20298 break 20299 } 20300 y := or.Args[0] 20301 s0 := or.Args[1] 20302 if s0.Op != OpAMD64SHLQconst { 20303 break 20304 } 20305 j0 := s0.AuxInt 20306 x0 := s0.Args[0] 20307 if x0.Op != OpAMD64MOVBloadidx1 { 20308 break 20309 } 20310 i0 := x0.AuxInt 20311 s := x0.Aux 20312 p := x0.Args[0] 20313 idx := x0.Args[1] 20314 mem := x0.Args[2] 20315 s1 := v.Args[1] 20316 if s1.Op != OpAMD64SHLQconst { 20317 break 20318 } 20319 j1 := s1.AuxInt 20320 x1 := s1.Args[0] 20321 if x1.Op != OpAMD64MOVBloadidx1 { 20322 break 20323 } 20324 i1 := x1.AuxInt 20325 if x1.Aux != s { 20326 break 20327 } 20328 if p != x1.Args[0] { 20329 break 20330 } 20331 if idx != x1.Args[1] { 20332 break 20333 } 20334 if mem != x1.Args[2] { 20335 break 20336 } 20337 if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 20338 break 20339 } 20340 b = mergePoint(b, x0, x1) 20341 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 20342 v.reset(OpCopy) 20343 v.AddArg(v0) 20344 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 20345 v1.AuxInt = j0 20346 v2 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, types.UInt16) 20347 v2.AuxInt = i0 20348 v2.Aux = s 20349 v2.AddArg(p) 20350 v2.AddArg(idx) 20351 v2.AddArg(mem) 20352 v1.AddArg(v2) 20353 v0.AddArg(v1) 20354 v0.AddArg(y) 20355 return true 20356 } 20357 // match: (ORQ or:(ORQ y s0:(SHLQconst [j0] x0:(MOVBloadidx1 [i0] {s} idx p mem))) s1:(SHLQconst [j1] x1:(MOVBloadidx1 [i1] {s} p idx mem))) 20358 // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 20359 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j0] (MOVWloadidx1 [i0] {s} p idx mem)) y) 20360 for { 20361 or := v.Args[0] 20362 if or.Op != OpAMD64ORQ { 20363 break 20364 } 20365 y := or.Args[0] 20366 s0 := or.Args[1] 20367 if s0.Op != OpAMD64SHLQconst { 20368 break 20369 } 20370 j0 := s0.AuxInt 20371 x0 := s0.Args[0] 20372 if x0.Op != OpAMD64MOVBloadidx1 { 20373 break 20374 } 20375 i0 := x0.AuxInt 20376 s := x0.Aux 20377 idx := x0.Args[0] 20378 p := x0.Args[1] 20379 mem := x0.Args[2] 20380 s1 := v.Args[1] 20381 if s1.Op != OpAMD64SHLQconst { 20382 break 20383 } 20384 j1 := s1.AuxInt 20385 x1 := s1.Args[0] 20386 if x1.Op != OpAMD64MOVBloadidx1 { 20387 break 20388 } 20389 i1 := x1.AuxInt 20390 if x1.Aux != s { 20391 break 20392 } 20393 if p != x1.Args[0] { 20394 break 20395 } 20396 if idx != x1.Args[1] { 20397 break 20398 } 20399 if mem != x1.Args[2] { 20400 break 20401 } 20402 if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 20403 break 20404 } 20405 b = mergePoint(b, x0, x1) 20406 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 20407 v.reset(OpCopy) 20408 v.AddArg(v0) 20409 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 20410 v1.AuxInt = j0 20411 v2 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, types.UInt16) 20412 v2.AuxInt = i0 20413 v2.Aux = s 20414 v2.AddArg(p) 20415 v2.AddArg(idx) 20416 v2.AddArg(mem) 20417 v1.AddArg(v2) 20418 v0.AddArg(v1) 20419 v0.AddArg(y) 20420 return true 20421 } 20422 // match: (ORQ or:(ORQ s0:(SHLQconst [j0] x0:(MOVBloadidx1 [i0] {s} p idx mem)) y) s1:(SHLQconst [j1] x1:(MOVBloadidx1 [i1] {s} idx p mem))) 20423 // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 20424 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j0] (MOVWloadidx1 [i0] {s} p idx mem)) y) 20425 for { 20426 or := v.Args[0] 20427 if or.Op != OpAMD64ORQ { 20428 break 20429 } 20430 s0 := or.Args[0] 20431 if s0.Op != OpAMD64SHLQconst { 20432 break 20433 } 20434 j0 := s0.AuxInt 20435 x0 := s0.Args[0] 20436 if x0.Op != OpAMD64MOVBloadidx1 { 20437 break 20438 } 20439 i0 := x0.AuxInt 20440 s := x0.Aux 20441 p := x0.Args[0] 20442 idx := x0.Args[1] 20443 mem := x0.Args[2] 20444 y := or.Args[1] 20445 s1 := v.Args[1] 20446 if s1.Op != OpAMD64SHLQconst { 20447 break 20448 } 20449 j1 := s1.AuxInt 20450 x1 := s1.Args[0] 20451 if x1.Op != OpAMD64MOVBloadidx1 { 20452 break 20453 } 20454 i1 := x1.AuxInt 20455 if x1.Aux != s { 20456 break 20457 } 20458 if idx != x1.Args[0] { 20459 break 20460 } 20461 if p != x1.Args[1] { 20462 break 20463 } 20464 if mem != x1.Args[2] { 20465 break 20466 } 20467 if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 20468 break 20469 } 20470 b = mergePoint(b, x0, x1) 20471 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 20472 v.reset(OpCopy) 20473 v.AddArg(v0) 20474 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 20475 v1.AuxInt = j0 20476 v2 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, types.UInt16) 20477 v2.AuxInt = i0 20478 v2.Aux = s 20479 v2.AddArg(p) 20480 v2.AddArg(idx) 20481 v2.AddArg(mem) 20482 v1.AddArg(v2) 20483 v0.AddArg(v1) 20484 v0.AddArg(y) 20485 return true 20486 } 20487 // match: (ORQ or:(ORQ s0:(SHLQconst [j0] x0:(MOVBloadidx1 [i0] {s} idx p mem)) y) s1:(SHLQconst [j1] x1:(MOVBloadidx1 [i1] {s} idx p mem))) 20488 // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 20489 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j0] (MOVWloadidx1 [i0] {s} p idx mem)) y) 20490 for { 20491 or := v.Args[0] 20492 if or.Op != OpAMD64ORQ { 20493 break 20494 } 20495 s0 := or.Args[0] 20496 if s0.Op != OpAMD64SHLQconst { 20497 break 20498 } 20499 j0 := s0.AuxInt 20500 x0 := s0.Args[0] 20501 if x0.Op != OpAMD64MOVBloadidx1 { 20502 break 20503 } 20504 i0 := x0.AuxInt 20505 s := x0.Aux 20506 idx := x0.Args[0] 20507 p := x0.Args[1] 20508 mem := x0.Args[2] 20509 y := or.Args[1] 20510 s1 := v.Args[1] 20511 if s1.Op != OpAMD64SHLQconst { 20512 break 20513 } 20514 j1 := s1.AuxInt 20515 x1 := s1.Args[0] 20516 if x1.Op != OpAMD64MOVBloadidx1 { 20517 break 20518 } 20519 i1 := x1.AuxInt 20520 if x1.Aux != s { 20521 break 20522 } 20523 if idx != x1.Args[0] { 20524 break 20525 } 20526 if p != x1.Args[1] { 20527 break 20528 } 20529 if mem != x1.Args[2] { 20530 break 20531 } 20532 if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 20533 break 20534 } 20535 b = mergePoint(b, x0, x1) 20536 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 20537 v.reset(OpCopy) 20538 v.AddArg(v0) 20539 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 20540 v1.AuxInt = j0 20541 v2 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, types.UInt16) 20542 v2.AuxInt = i0 20543 v2.Aux = s 20544 v2.AddArg(p) 20545 v2.AddArg(idx) 20546 v2.AddArg(mem) 20547 v1.AddArg(v2) 20548 v0.AddArg(v1) 20549 v0.AddArg(y) 20550 return true 20551 } 20552 // match: (ORQ or:(ORQ y s0:(SHLQconst [j0] x0:(MOVBloadidx1 [i0] {s} p idx mem))) s1:(SHLQconst [j1] x1:(MOVBloadidx1 [i1] {s} idx p mem))) 20553 // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 20554 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j0] (MOVWloadidx1 [i0] {s} p idx mem)) y) 20555 for { 20556 or := v.Args[0] 20557 if or.Op != OpAMD64ORQ { 20558 break 20559 } 20560 y := or.Args[0] 20561 s0 := or.Args[1] 20562 if s0.Op != OpAMD64SHLQconst { 20563 break 20564 } 20565 j0 := s0.AuxInt 20566 x0 := s0.Args[0] 20567 if x0.Op != OpAMD64MOVBloadidx1 { 20568 break 20569 } 20570 i0 := x0.AuxInt 20571 s := x0.Aux 20572 p := x0.Args[0] 20573 idx := x0.Args[1] 20574 mem := x0.Args[2] 20575 s1 := v.Args[1] 20576 if s1.Op != OpAMD64SHLQconst { 20577 break 20578 } 20579 j1 := s1.AuxInt 20580 x1 := s1.Args[0] 20581 if x1.Op != OpAMD64MOVBloadidx1 { 20582 break 20583 } 20584 i1 := x1.AuxInt 20585 if x1.Aux != s { 20586 break 20587 } 20588 if idx != x1.Args[0] { 20589 break 20590 } 20591 if p != x1.Args[1] { 20592 break 20593 } 20594 if mem != x1.Args[2] { 20595 break 20596 } 20597 if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 20598 break 20599 } 20600 b = mergePoint(b, x0, x1) 20601 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 20602 v.reset(OpCopy) 20603 v.AddArg(v0) 20604 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 20605 v1.AuxInt = j0 20606 v2 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, types.UInt16) 20607 v2.AuxInt = i0 20608 v2.Aux = s 20609 v2.AddArg(p) 20610 v2.AddArg(idx) 20611 v2.AddArg(mem) 20612 v1.AddArg(v2) 20613 v0.AddArg(v1) 20614 v0.AddArg(y) 20615 return true 20616 } 20617 // match: (ORQ or:(ORQ y s0:(SHLQconst [j0] x0:(MOVBloadidx1 [i0] {s} idx p mem))) s1:(SHLQconst [j1] x1:(MOVBloadidx1 [i1] {s} idx p mem))) 20618 // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 20619 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j0] (MOVWloadidx1 [i0] {s} p idx mem)) y) 20620 for { 20621 or := v.Args[0] 20622 if or.Op != OpAMD64ORQ { 20623 break 20624 } 20625 y := or.Args[0] 20626 s0 := or.Args[1] 20627 if s0.Op != OpAMD64SHLQconst { 20628 break 20629 } 20630 j0 := s0.AuxInt 20631 x0 := s0.Args[0] 20632 if x0.Op != OpAMD64MOVBloadidx1 { 20633 break 20634 } 20635 i0 := x0.AuxInt 20636 s := x0.Aux 20637 idx := x0.Args[0] 20638 p := x0.Args[1] 20639 mem := x0.Args[2] 20640 s1 := v.Args[1] 20641 if s1.Op != OpAMD64SHLQconst { 20642 break 20643 } 20644 j1 := s1.AuxInt 20645 x1 := s1.Args[0] 20646 if x1.Op != OpAMD64MOVBloadidx1 { 20647 break 20648 } 20649 i1 := x1.AuxInt 20650 if x1.Aux != s { 20651 break 20652 } 20653 if idx != x1.Args[0] { 20654 break 20655 } 20656 if p != x1.Args[1] { 20657 break 20658 } 20659 if mem != x1.Args[2] { 20660 break 20661 } 20662 if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 20663 break 20664 } 20665 b = mergePoint(b, x0, x1) 20666 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 20667 v.reset(OpCopy) 20668 v.AddArg(v0) 20669 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 20670 v1.AuxInt = j0 20671 v2 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, types.UInt16) 20672 v2.AuxInt = i0 20673 v2.Aux = s 20674 v2.AddArg(p) 20675 v2.AddArg(idx) 20676 v2.AddArg(mem) 20677 v1.AddArg(v2) 20678 v0.AddArg(v1) 20679 v0.AddArg(y) 20680 return true 20681 } 20682 // match: (ORQ s1:(SHLQconst [j1] x1:(MOVWloadidx1 [i1] {s} p idx mem)) or:(ORQ s0:(SHLQconst [j0] x0:(MOVWloadidx1 [i0] {s} p idx mem)) y)) 20683 // cond: i1 == i0+2 && j1 == j0+16 && j0 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 20684 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j0] (MOVLloadidx1 [i0] {s} p idx mem)) y) 20685 for { 20686 s1 := v.Args[0] 20687 if s1.Op != OpAMD64SHLQconst { 20688 break 20689 } 20690 j1 := s1.AuxInt 20691 x1 := s1.Args[0] 20692 if x1.Op != OpAMD64MOVWloadidx1 { 20693 break 20694 } 20695 i1 := x1.AuxInt 20696 s := x1.Aux 20697 p := x1.Args[0] 20698 idx := x1.Args[1] 20699 mem := x1.Args[2] 20700 or := v.Args[1] 20701 if or.Op != OpAMD64ORQ { 20702 break 20703 } 20704 s0 := or.Args[0] 20705 if s0.Op != OpAMD64SHLQconst { 20706 break 20707 } 20708 j0 := s0.AuxInt 20709 x0 := s0.Args[0] 20710 if x0.Op != OpAMD64MOVWloadidx1 { 20711 break 20712 } 20713 i0 := x0.AuxInt 20714 if x0.Aux != s { 20715 break 20716 } 20717 if p != x0.Args[0] { 20718 break 20719 } 20720 if idx != x0.Args[1] { 20721 break 20722 } 20723 if mem != x0.Args[2] { 20724 break 20725 } 20726 y := or.Args[1] 20727 if !(i1 == i0+2 && j1 == j0+16 && j0%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 20728 break 20729 } 20730 b = mergePoint(b, x0, x1) 20731 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 20732 v.reset(OpCopy) 20733 v.AddArg(v0) 20734 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 20735 v1.AuxInt = j0 20736 v2 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, types.UInt32) 20737 v2.AuxInt = i0 20738 v2.Aux = s 20739 v2.AddArg(p) 20740 v2.AddArg(idx) 20741 v2.AddArg(mem) 20742 v1.AddArg(v2) 20743 v0.AddArg(v1) 20744 v0.AddArg(y) 20745 return true 20746 } 20747 // match: (ORQ s1:(SHLQconst [j1] x1:(MOVWloadidx1 [i1] {s} idx p mem)) or:(ORQ s0:(SHLQconst [j0] x0:(MOVWloadidx1 [i0] {s} p idx mem)) y)) 20748 // cond: i1 == i0+2 && j1 == j0+16 && j0 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 20749 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j0] (MOVLloadidx1 [i0] {s} p idx mem)) y) 20750 for { 20751 s1 := v.Args[0] 20752 if s1.Op != OpAMD64SHLQconst { 20753 break 20754 } 20755 j1 := s1.AuxInt 20756 x1 := s1.Args[0] 20757 if x1.Op != OpAMD64MOVWloadidx1 { 20758 break 20759 } 20760 i1 := x1.AuxInt 20761 s := x1.Aux 20762 idx := x1.Args[0] 20763 p := x1.Args[1] 20764 mem := x1.Args[2] 20765 or := v.Args[1] 20766 if or.Op != OpAMD64ORQ { 20767 break 20768 } 20769 s0 := or.Args[0] 20770 if s0.Op != OpAMD64SHLQconst { 20771 break 20772 } 20773 j0 := s0.AuxInt 20774 x0 := s0.Args[0] 20775 if x0.Op != OpAMD64MOVWloadidx1 { 20776 break 20777 } 20778 i0 := x0.AuxInt 20779 if x0.Aux != s { 20780 break 20781 } 20782 if p != x0.Args[0] { 20783 break 20784 } 20785 if idx != x0.Args[1] { 20786 break 20787 } 20788 if mem != x0.Args[2] { 20789 break 20790 } 20791 y := or.Args[1] 20792 if !(i1 == i0+2 && j1 == j0+16 && j0%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 20793 break 20794 } 20795 b = mergePoint(b, x0, x1) 20796 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 20797 v.reset(OpCopy) 20798 v.AddArg(v0) 20799 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 20800 v1.AuxInt = j0 20801 v2 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, types.UInt32) 20802 v2.AuxInt = i0 20803 v2.Aux = s 20804 v2.AddArg(p) 20805 v2.AddArg(idx) 20806 v2.AddArg(mem) 20807 v1.AddArg(v2) 20808 v0.AddArg(v1) 20809 v0.AddArg(y) 20810 return true 20811 } 20812 // match: (ORQ s1:(SHLQconst [j1] x1:(MOVWloadidx1 [i1] {s} p idx mem)) or:(ORQ s0:(SHLQconst [j0] x0:(MOVWloadidx1 [i0] {s} idx p mem)) y)) 20813 // cond: i1 == i0+2 && j1 == j0+16 && j0 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 20814 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j0] (MOVLloadidx1 [i0] {s} p idx mem)) y) 20815 for { 20816 s1 := v.Args[0] 20817 if s1.Op != OpAMD64SHLQconst { 20818 break 20819 } 20820 j1 := s1.AuxInt 20821 x1 := s1.Args[0] 20822 if x1.Op != OpAMD64MOVWloadidx1 { 20823 break 20824 } 20825 i1 := x1.AuxInt 20826 s := x1.Aux 20827 p := x1.Args[0] 20828 idx := x1.Args[1] 20829 mem := x1.Args[2] 20830 or := v.Args[1] 20831 if or.Op != OpAMD64ORQ { 20832 break 20833 } 20834 s0 := or.Args[0] 20835 if s0.Op != OpAMD64SHLQconst { 20836 break 20837 } 20838 j0 := s0.AuxInt 20839 x0 := s0.Args[0] 20840 if x0.Op != OpAMD64MOVWloadidx1 { 20841 break 20842 } 20843 i0 := x0.AuxInt 20844 if x0.Aux != s { 20845 break 20846 } 20847 if idx != x0.Args[0] { 20848 break 20849 } 20850 if p != x0.Args[1] { 20851 break 20852 } 20853 if mem != x0.Args[2] { 20854 break 20855 } 20856 y := or.Args[1] 20857 if !(i1 == i0+2 && j1 == j0+16 && j0%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 20858 break 20859 } 20860 b = mergePoint(b, x0, x1) 20861 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 20862 v.reset(OpCopy) 20863 v.AddArg(v0) 20864 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 20865 v1.AuxInt = j0 20866 v2 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, types.UInt32) 20867 v2.AuxInt = i0 20868 v2.Aux = s 20869 v2.AddArg(p) 20870 v2.AddArg(idx) 20871 v2.AddArg(mem) 20872 v1.AddArg(v2) 20873 v0.AddArg(v1) 20874 v0.AddArg(y) 20875 return true 20876 } 20877 // match: (ORQ s1:(SHLQconst [j1] x1:(MOVWloadidx1 [i1] {s} idx p mem)) or:(ORQ s0:(SHLQconst [j0] x0:(MOVWloadidx1 [i0] {s} idx p mem)) y)) 20878 // cond: i1 == i0+2 && j1 == j0+16 && j0 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 20879 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j0] (MOVLloadidx1 [i0] {s} p idx mem)) y) 20880 for { 20881 s1 := v.Args[0] 20882 if s1.Op != OpAMD64SHLQconst { 20883 break 20884 } 20885 j1 := s1.AuxInt 20886 x1 := s1.Args[0] 20887 if x1.Op != OpAMD64MOVWloadidx1 { 20888 break 20889 } 20890 i1 := x1.AuxInt 20891 s := x1.Aux 20892 idx := x1.Args[0] 20893 p := x1.Args[1] 20894 mem := x1.Args[2] 20895 or := v.Args[1] 20896 if or.Op != OpAMD64ORQ { 20897 break 20898 } 20899 s0 := or.Args[0] 20900 if s0.Op != OpAMD64SHLQconst { 20901 break 20902 } 20903 j0 := s0.AuxInt 20904 x0 := s0.Args[0] 20905 if x0.Op != OpAMD64MOVWloadidx1 { 20906 break 20907 } 20908 i0 := x0.AuxInt 20909 if x0.Aux != s { 20910 break 20911 } 20912 if idx != x0.Args[0] { 20913 break 20914 } 20915 if p != x0.Args[1] { 20916 break 20917 } 20918 if mem != x0.Args[2] { 20919 break 20920 } 20921 y := or.Args[1] 20922 if !(i1 == i0+2 && j1 == j0+16 && j0%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 20923 break 20924 } 20925 b = mergePoint(b, x0, x1) 20926 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 20927 v.reset(OpCopy) 20928 v.AddArg(v0) 20929 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 20930 v1.AuxInt = j0 20931 v2 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, types.UInt32) 20932 v2.AuxInt = i0 20933 v2.Aux = s 20934 v2.AddArg(p) 20935 v2.AddArg(idx) 20936 v2.AddArg(mem) 20937 v1.AddArg(v2) 20938 v0.AddArg(v1) 20939 v0.AddArg(y) 20940 return true 20941 } 20942 // match: (ORQ s1:(SHLQconst [j1] x1:(MOVWloadidx1 [i1] {s} p idx mem)) or:(ORQ y s0:(SHLQconst [j0] x0:(MOVWloadidx1 [i0] {s} p idx mem)))) 20943 // cond: i1 == i0+2 && j1 == j0+16 && j0 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 20944 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j0] (MOVLloadidx1 [i0] {s} p idx mem)) y) 20945 for { 20946 s1 := v.Args[0] 20947 if s1.Op != OpAMD64SHLQconst { 20948 break 20949 } 20950 j1 := s1.AuxInt 20951 x1 := s1.Args[0] 20952 if x1.Op != OpAMD64MOVWloadidx1 { 20953 break 20954 } 20955 i1 := x1.AuxInt 20956 s := x1.Aux 20957 p := x1.Args[0] 20958 idx := x1.Args[1] 20959 mem := x1.Args[2] 20960 or := v.Args[1] 20961 if or.Op != OpAMD64ORQ { 20962 break 20963 } 20964 y := or.Args[0] 20965 s0 := or.Args[1] 20966 if s0.Op != OpAMD64SHLQconst { 20967 break 20968 } 20969 j0 := s0.AuxInt 20970 x0 := s0.Args[0] 20971 if x0.Op != OpAMD64MOVWloadidx1 { 20972 break 20973 } 20974 i0 := x0.AuxInt 20975 if x0.Aux != s { 20976 break 20977 } 20978 if p != x0.Args[0] { 20979 break 20980 } 20981 if idx != x0.Args[1] { 20982 break 20983 } 20984 if mem != x0.Args[2] { 20985 break 20986 } 20987 if !(i1 == i0+2 && j1 == j0+16 && j0%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 20988 break 20989 } 20990 b = mergePoint(b, x0, x1) 20991 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 20992 v.reset(OpCopy) 20993 v.AddArg(v0) 20994 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 20995 v1.AuxInt = j0 20996 v2 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, types.UInt32) 20997 v2.AuxInt = i0 20998 v2.Aux = s 20999 v2.AddArg(p) 21000 v2.AddArg(idx) 21001 v2.AddArg(mem) 21002 v1.AddArg(v2) 21003 v0.AddArg(v1) 21004 v0.AddArg(y) 21005 return true 21006 } 21007 // match: (ORQ s1:(SHLQconst [j1] x1:(MOVWloadidx1 [i1] {s} idx p mem)) or:(ORQ y s0:(SHLQconst [j0] x0:(MOVWloadidx1 [i0] {s} p idx mem)))) 21008 // cond: i1 == i0+2 && j1 == j0+16 && j0 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 21009 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j0] (MOVLloadidx1 [i0] {s} p idx mem)) y) 21010 for { 21011 s1 := v.Args[0] 21012 if s1.Op != OpAMD64SHLQconst { 21013 break 21014 } 21015 j1 := s1.AuxInt 21016 x1 := s1.Args[0] 21017 if x1.Op != OpAMD64MOVWloadidx1 { 21018 break 21019 } 21020 i1 := x1.AuxInt 21021 s := x1.Aux 21022 idx := x1.Args[0] 21023 p := x1.Args[1] 21024 mem := x1.Args[2] 21025 or := v.Args[1] 21026 if or.Op != OpAMD64ORQ { 21027 break 21028 } 21029 y := or.Args[0] 21030 s0 := or.Args[1] 21031 if s0.Op != OpAMD64SHLQconst { 21032 break 21033 } 21034 j0 := s0.AuxInt 21035 x0 := s0.Args[0] 21036 if x0.Op != OpAMD64MOVWloadidx1 { 21037 break 21038 } 21039 i0 := x0.AuxInt 21040 if x0.Aux != s { 21041 break 21042 } 21043 if p != x0.Args[0] { 21044 break 21045 } 21046 if idx != x0.Args[1] { 21047 break 21048 } 21049 if mem != x0.Args[2] { 21050 break 21051 } 21052 if !(i1 == i0+2 && j1 == j0+16 && j0%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 21053 break 21054 } 21055 b = mergePoint(b, x0, x1) 21056 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 21057 v.reset(OpCopy) 21058 v.AddArg(v0) 21059 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 21060 v1.AuxInt = j0 21061 v2 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, types.UInt32) 21062 v2.AuxInt = i0 21063 v2.Aux = s 21064 v2.AddArg(p) 21065 v2.AddArg(idx) 21066 v2.AddArg(mem) 21067 v1.AddArg(v2) 21068 v0.AddArg(v1) 21069 v0.AddArg(y) 21070 return true 21071 } 21072 // match: (ORQ s1:(SHLQconst [j1] x1:(MOVWloadidx1 [i1] {s} p idx mem)) or:(ORQ y s0:(SHLQconst [j0] x0:(MOVWloadidx1 [i0] {s} idx p mem)))) 21073 // cond: i1 == i0+2 && j1 == j0+16 && j0 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 21074 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j0] (MOVLloadidx1 [i0] {s} p idx mem)) y) 21075 for { 21076 s1 := v.Args[0] 21077 if s1.Op != OpAMD64SHLQconst { 21078 break 21079 } 21080 j1 := s1.AuxInt 21081 x1 := s1.Args[0] 21082 if x1.Op != OpAMD64MOVWloadidx1 { 21083 break 21084 } 21085 i1 := x1.AuxInt 21086 s := x1.Aux 21087 p := x1.Args[0] 21088 idx := x1.Args[1] 21089 mem := x1.Args[2] 21090 or := v.Args[1] 21091 if or.Op != OpAMD64ORQ { 21092 break 21093 } 21094 y := or.Args[0] 21095 s0 := or.Args[1] 21096 if s0.Op != OpAMD64SHLQconst { 21097 break 21098 } 21099 j0 := s0.AuxInt 21100 x0 := s0.Args[0] 21101 if x0.Op != OpAMD64MOVWloadidx1 { 21102 break 21103 } 21104 i0 := x0.AuxInt 21105 if x0.Aux != s { 21106 break 21107 } 21108 if idx != x0.Args[0] { 21109 break 21110 } 21111 if p != x0.Args[1] { 21112 break 21113 } 21114 if mem != x0.Args[2] { 21115 break 21116 } 21117 if !(i1 == i0+2 && j1 == j0+16 && j0%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 21118 break 21119 } 21120 b = mergePoint(b, x0, x1) 21121 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 21122 v.reset(OpCopy) 21123 v.AddArg(v0) 21124 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 21125 v1.AuxInt = j0 21126 v2 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, types.UInt32) 21127 v2.AuxInt = i0 21128 v2.Aux = s 21129 v2.AddArg(p) 21130 v2.AddArg(idx) 21131 v2.AddArg(mem) 21132 v1.AddArg(v2) 21133 v0.AddArg(v1) 21134 v0.AddArg(y) 21135 return true 21136 } 21137 // match: (ORQ s1:(SHLQconst [j1] x1:(MOVWloadidx1 [i1] {s} idx p mem)) or:(ORQ y s0:(SHLQconst [j0] x0:(MOVWloadidx1 [i0] {s} idx p mem)))) 21138 // cond: i1 == i0+2 && j1 == j0+16 && j0 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 21139 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j0] (MOVLloadidx1 [i0] {s} p idx mem)) y) 21140 for { 21141 s1 := v.Args[0] 21142 if s1.Op != OpAMD64SHLQconst { 21143 break 21144 } 21145 j1 := s1.AuxInt 21146 x1 := s1.Args[0] 21147 if x1.Op != OpAMD64MOVWloadidx1 { 21148 break 21149 } 21150 i1 := x1.AuxInt 21151 s := x1.Aux 21152 idx := x1.Args[0] 21153 p := x1.Args[1] 21154 mem := x1.Args[2] 21155 or := v.Args[1] 21156 if or.Op != OpAMD64ORQ { 21157 break 21158 } 21159 y := or.Args[0] 21160 s0 := or.Args[1] 21161 if s0.Op != OpAMD64SHLQconst { 21162 break 21163 } 21164 j0 := s0.AuxInt 21165 x0 := s0.Args[0] 21166 if x0.Op != OpAMD64MOVWloadidx1 { 21167 break 21168 } 21169 i0 := x0.AuxInt 21170 if x0.Aux != s { 21171 break 21172 } 21173 if idx != x0.Args[0] { 21174 break 21175 } 21176 if p != x0.Args[1] { 21177 break 21178 } 21179 if mem != x0.Args[2] { 21180 break 21181 } 21182 if !(i1 == i0+2 && j1 == j0+16 && j0%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 21183 break 21184 } 21185 b = mergePoint(b, x0, x1) 21186 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 21187 v.reset(OpCopy) 21188 v.AddArg(v0) 21189 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 21190 v1.AuxInt = j0 21191 v2 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, types.UInt32) 21192 v2.AuxInt = i0 21193 v2.Aux = s 21194 v2.AddArg(p) 21195 v2.AddArg(idx) 21196 v2.AddArg(mem) 21197 v1.AddArg(v2) 21198 v0.AddArg(v1) 21199 v0.AddArg(y) 21200 return true 21201 } 21202 // match: (ORQ or:(ORQ s0:(SHLQconst [j0] x0:(MOVWloadidx1 [i0] {s} p idx mem)) y) s1:(SHLQconst [j1] x1:(MOVWloadidx1 [i1] {s} p idx mem))) 21203 // cond: i1 == i0+2 && j1 == j0+16 && j0 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 21204 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j0] (MOVLloadidx1 [i0] {s} p idx mem)) y) 21205 for { 21206 or := v.Args[0] 21207 if or.Op != OpAMD64ORQ { 21208 break 21209 } 21210 s0 := or.Args[0] 21211 if s0.Op != OpAMD64SHLQconst { 21212 break 21213 } 21214 j0 := s0.AuxInt 21215 x0 := s0.Args[0] 21216 if x0.Op != OpAMD64MOVWloadidx1 { 21217 break 21218 } 21219 i0 := x0.AuxInt 21220 s := x0.Aux 21221 p := x0.Args[0] 21222 idx := x0.Args[1] 21223 mem := x0.Args[2] 21224 y := or.Args[1] 21225 s1 := v.Args[1] 21226 if s1.Op != OpAMD64SHLQconst { 21227 break 21228 } 21229 j1 := s1.AuxInt 21230 x1 := s1.Args[0] 21231 if x1.Op != OpAMD64MOVWloadidx1 { 21232 break 21233 } 21234 i1 := x1.AuxInt 21235 if x1.Aux != s { 21236 break 21237 } 21238 if p != x1.Args[0] { 21239 break 21240 } 21241 if idx != x1.Args[1] { 21242 break 21243 } 21244 if mem != x1.Args[2] { 21245 break 21246 } 21247 if !(i1 == i0+2 && j1 == j0+16 && j0%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 21248 break 21249 } 21250 b = mergePoint(b, x0, x1) 21251 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 21252 v.reset(OpCopy) 21253 v.AddArg(v0) 21254 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 21255 v1.AuxInt = j0 21256 v2 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, types.UInt32) 21257 v2.AuxInt = i0 21258 v2.Aux = s 21259 v2.AddArg(p) 21260 v2.AddArg(idx) 21261 v2.AddArg(mem) 21262 v1.AddArg(v2) 21263 v0.AddArg(v1) 21264 v0.AddArg(y) 21265 return true 21266 } 21267 // match: (ORQ or:(ORQ s0:(SHLQconst [j0] x0:(MOVWloadidx1 [i0] {s} idx p mem)) y) s1:(SHLQconst [j1] x1:(MOVWloadidx1 [i1] {s} p idx mem))) 21268 // cond: i1 == i0+2 && j1 == j0+16 && j0 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 21269 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j0] (MOVLloadidx1 [i0] {s} p idx mem)) y) 21270 for { 21271 or := v.Args[0] 21272 if or.Op != OpAMD64ORQ { 21273 break 21274 } 21275 s0 := or.Args[0] 21276 if s0.Op != OpAMD64SHLQconst { 21277 break 21278 } 21279 j0 := s0.AuxInt 21280 x0 := s0.Args[0] 21281 if x0.Op != OpAMD64MOVWloadidx1 { 21282 break 21283 } 21284 i0 := x0.AuxInt 21285 s := x0.Aux 21286 idx := x0.Args[0] 21287 p := x0.Args[1] 21288 mem := x0.Args[2] 21289 y := or.Args[1] 21290 s1 := v.Args[1] 21291 if s1.Op != OpAMD64SHLQconst { 21292 break 21293 } 21294 j1 := s1.AuxInt 21295 x1 := s1.Args[0] 21296 if x1.Op != OpAMD64MOVWloadidx1 { 21297 break 21298 } 21299 i1 := x1.AuxInt 21300 if x1.Aux != s { 21301 break 21302 } 21303 if p != x1.Args[0] { 21304 break 21305 } 21306 if idx != x1.Args[1] { 21307 break 21308 } 21309 if mem != x1.Args[2] { 21310 break 21311 } 21312 if !(i1 == i0+2 && j1 == j0+16 && j0%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 21313 break 21314 } 21315 b = mergePoint(b, x0, x1) 21316 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 21317 v.reset(OpCopy) 21318 v.AddArg(v0) 21319 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 21320 v1.AuxInt = j0 21321 v2 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, types.UInt32) 21322 v2.AuxInt = i0 21323 v2.Aux = s 21324 v2.AddArg(p) 21325 v2.AddArg(idx) 21326 v2.AddArg(mem) 21327 v1.AddArg(v2) 21328 v0.AddArg(v1) 21329 v0.AddArg(y) 21330 return true 21331 } 21332 // match: (ORQ or:(ORQ y s0:(SHLQconst [j0] x0:(MOVWloadidx1 [i0] {s} p idx mem))) s1:(SHLQconst [j1] x1:(MOVWloadidx1 [i1] {s} p idx mem))) 21333 // cond: i1 == i0+2 && j1 == j0+16 && j0 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 21334 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j0] (MOVLloadidx1 [i0] {s} p idx mem)) y) 21335 for { 21336 or := v.Args[0] 21337 if or.Op != OpAMD64ORQ { 21338 break 21339 } 21340 y := or.Args[0] 21341 s0 := or.Args[1] 21342 if s0.Op != OpAMD64SHLQconst { 21343 break 21344 } 21345 j0 := s0.AuxInt 21346 x0 := s0.Args[0] 21347 if x0.Op != OpAMD64MOVWloadidx1 { 21348 break 21349 } 21350 i0 := x0.AuxInt 21351 s := x0.Aux 21352 p := x0.Args[0] 21353 idx := x0.Args[1] 21354 mem := x0.Args[2] 21355 s1 := v.Args[1] 21356 if s1.Op != OpAMD64SHLQconst { 21357 break 21358 } 21359 j1 := s1.AuxInt 21360 x1 := s1.Args[0] 21361 if x1.Op != OpAMD64MOVWloadidx1 { 21362 break 21363 } 21364 i1 := x1.AuxInt 21365 if x1.Aux != s { 21366 break 21367 } 21368 if p != x1.Args[0] { 21369 break 21370 } 21371 if idx != x1.Args[1] { 21372 break 21373 } 21374 if mem != x1.Args[2] { 21375 break 21376 } 21377 if !(i1 == i0+2 && j1 == j0+16 && j0%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 21378 break 21379 } 21380 b = mergePoint(b, x0, x1) 21381 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 21382 v.reset(OpCopy) 21383 v.AddArg(v0) 21384 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 21385 v1.AuxInt = j0 21386 v2 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, types.UInt32) 21387 v2.AuxInt = i0 21388 v2.Aux = s 21389 v2.AddArg(p) 21390 v2.AddArg(idx) 21391 v2.AddArg(mem) 21392 v1.AddArg(v2) 21393 v0.AddArg(v1) 21394 v0.AddArg(y) 21395 return true 21396 } 21397 // match: (ORQ or:(ORQ y s0:(SHLQconst [j0] x0:(MOVWloadidx1 [i0] {s} idx p mem))) s1:(SHLQconst [j1] x1:(MOVWloadidx1 [i1] {s} p idx mem))) 21398 // cond: i1 == i0+2 && j1 == j0+16 && j0 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 21399 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j0] (MOVLloadidx1 [i0] {s} p idx mem)) y) 21400 for { 21401 or := v.Args[0] 21402 if or.Op != OpAMD64ORQ { 21403 break 21404 } 21405 y := or.Args[0] 21406 s0 := or.Args[1] 21407 if s0.Op != OpAMD64SHLQconst { 21408 break 21409 } 21410 j0 := s0.AuxInt 21411 x0 := s0.Args[0] 21412 if x0.Op != OpAMD64MOVWloadidx1 { 21413 break 21414 } 21415 i0 := x0.AuxInt 21416 s := x0.Aux 21417 idx := x0.Args[0] 21418 p := x0.Args[1] 21419 mem := x0.Args[2] 21420 s1 := v.Args[1] 21421 if s1.Op != OpAMD64SHLQconst { 21422 break 21423 } 21424 j1 := s1.AuxInt 21425 x1 := s1.Args[0] 21426 if x1.Op != OpAMD64MOVWloadidx1 { 21427 break 21428 } 21429 i1 := x1.AuxInt 21430 if x1.Aux != s { 21431 break 21432 } 21433 if p != x1.Args[0] { 21434 break 21435 } 21436 if idx != x1.Args[1] { 21437 break 21438 } 21439 if mem != x1.Args[2] { 21440 break 21441 } 21442 if !(i1 == i0+2 && j1 == j0+16 && j0%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 21443 break 21444 } 21445 b = mergePoint(b, x0, x1) 21446 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 21447 v.reset(OpCopy) 21448 v.AddArg(v0) 21449 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 21450 v1.AuxInt = j0 21451 v2 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, types.UInt32) 21452 v2.AuxInt = i0 21453 v2.Aux = s 21454 v2.AddArg(p) 21455 v2.AddArg(idx) 21456 v2.AddArg(mem) 21457 v1.AddArg(v2) 21458 v0.AddArg(v1) 21459 v0.AddArg(y) 21460 return true 21461 } 21462 // match: (ORQ or:(ORQ s0:(SHLQconst [j0] x0:(MOVWloadidx1 [i0] {s} p idx mem)) y) s1:(SHLQconst [j1] x1:(MOVWloadidx1 [i1] {s} idx p mem))) 21463 // cond: i1 == i0+2 && j1 == j0+16 && j0 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 21464 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j0] (MOVLloadidx1 [i0] {s} p idx mem)) y) 21465 for { 21466 or := v.Args[0] 21467 if or.Op != OpAMD64ORQ { 21468 break 21469 } 21470 s0 := or.Args[0] 21471 if s0.Op != OpAMD64SHLQconst { 21472 break 21473 } 21474 j0 := s0.AuxInt 21475 x0 := s0.Args[0] 21476 if x0.Op != OpAMD64MOVWloadidx1 { 21477 break 21478 } 21479 i0 := x0.AuxInt 21480 s := x0.Aux 21481 p := x0.Args[0] 21482 idx := x0.Args[1] 21483 mem := x0.Args[2] 21484 y := or.Args[1] 21485 s1 := v.Args[1] 21486 if s1.Op != OpAMD64SHLQconst { 21487 break 21488 } 21489 j1 := s1.AuxInt 21490 x1 := s1.Args[0] 21491 if x1.Op != OpAMD64MOVWloadidx1 { 21492 break 21493 } 21494 i1 := x1.AuxInt 21495 if x1.Aux != s { 21496 break 21497 } 21498 if idx != x1.Args[0] { 21499 break 21500 } 21501 if p != x1.Args[1] { 21502 break 21503 } 21504 if mem != x1.Args[2] { 21505 break 21506 } 21507 if !(i1 == i0+2 && j1 == j0+16 && j0%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 21508 break 21509 } 21510 b = mergePoint(b, x0, x1) 21511 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 21512 v.reset(OpCopy) 21513 v.AddArg(v0) 21514 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 21515 v1.AuxInt = j0 21516 v2 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, types.UInt32) 21517 v2.AuxInt = i0 21518 v2.Aux = s 21519 v2.AddArg(p) 21520 v2.AddArg(idx) 21521 v2.AddArg(mem) 21522 v1.AddArg(v2) 21523 v0.AddArg(v1) 21524 v0.AddArg(y) 21525 return true 21526 } 21527 // match: (ORQ or:(ORQ s0:(SHLQconst [j0] x0:(MOVWloadidx1 [i0] {s} idx p mem)) y) s1:(SHLQconst [j1] x1:(MOVWloadidx1 [i1] {s} idx p mem))) 21528 // cond: i1 == i0+2 && j1 == j0+16 && j0 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 21529 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j0] (MOVLloadidx1 [i0] {s} p idx mem)) y) 21530 for { 21531 or := v.Args[0] 21532 if or.Op != OpAMD64ORQ { 21533 break 21534 } 21535 s0 := or.Args[0] 21536 if s0.Op != OpAMD64SHLQconst { 21537 break 21538 } 21539 j0 := s0.AuxInt 21540 x0 := s0.Args[0] 21541 if x0.Op != OpAMD64MOVWloadidx1 { 21542 break 21543 } 21544 i0 := x0.AuxInt 21545 s := x0.Aux 21546 idx := x0.Args[0] 21547 p := x0.Args[1] 21548 mem := x0.Args[2] 21549 y := or.Args[1] 21550 s1 := v.Args[1] 21551 if s1.Op != OpAMD64SHLQconst { 21552 break 21553 } 21554 j1 := s1.AuxInt 21555 x1 := s1.Args[0] 21556 if x1.Op != OpAMD64MOVWloadidx1 { 21557 break 21558 } 21559 i1 := x1.AuxInt 21560 if x1.Aux != s { 21561 break 21562 } 21563 if idx != x1.Args[0] { 21564 break 21565 } 21566 if p != x1.Args[1] { 21567 break 21568 } 21569 if mem != x1.Args[2] { 21570 break 21571 } 21572 if !(i1 == i0+2 && j1 == j0+16 && j0%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 21573 break 21574 } 21575 b = mergePoint(b, x0, x1) 21576 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 21577 v.reset(OpCopy) 21578 v.AddArg(v0) 21579 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 21580 v1.AuxInt = j0 21581 v2 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, types.UInt32) 21582 v2.AuxInt = i0 21583 v2.Aux = s 21584 v2.AddArg(p) 21585 v2.AddArg(idx) 21586 v2.AddArg(mem) 21587 v1.AddArg(v2) 21588 v0.AddArg(v1) 21589 v0.AddArg(y) 21590 return true 21591 } 21592 // match: (ORQ or:(ORQ y s0:(SHLQconst [j0] x0:(MOVWloadidx1 [i0] {s} p idx mem))) s1:(SHLQconst [j1] x1:(MOVWloadidx1 [i1] {s} idx p mem))) 21593 // cond: i1 == i0+2 && j1 == j0+16 && j0 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 21594 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j0] (MOVLloadidx1 [i0] {s} p idx mem)) y) 21595 for { 21596 or := v.Args[0] 21597 if or.Op != OpAMD64ORQ { 21598 break 21599 } 21600 y := or.Args[0] 21601 s0 := or.Args[1] 21602 if s0.Op != OpAMD64SHLQconst { 21603 break 21604 } 21605 j0 := s0.AuxInt 21606 x0 := s0.Args[0] 21607 if x0.Op != OpAMD64MOVWloadidx1 { 21608 break 21609 } 21610 i0 := x0.AuxInt 21611 s := x0.Aux 21612 p := x0.Args[0] 21613 idx := x0.Args[1] 21614 mem := x0.Args[2] 21615 s1 := v.Args[1] 21616 if s1.Op != OpAMD64SHLQconst { 21617 break 21618 } 21619 j1 := s1.AuxInt 21620 x1 := s1.Args[0] 21621 if x1.Op != OpAMD64MOVWloadidx1 { 21622 break 21623 } 21624 i1 := x1.AuxInt 21625 if x1.Aux != s { 21626 break 21627 } 21628 if idx != x1.Args[0] { 21629 break 21630 } 21631 if p != x1.Args[1] { 21632 break 21633 } 21634 if mem != x1.Args[2] { 21635 break 21636 } 21637 if !(i1 == i0+2 && j1 == j0+16 && j0%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 21638 break 21639 } 21640 b = mergePoint(b, x0, x1) 21641 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 21642 v.reset(OpCopy) 21643 v.AddArg(v0) 21644 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 21645 v1.AuxInt = j0 21646 v2 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, types.UInt32) 21647 v2.AuxInt = i0 21648 v2.Aux = s 21649 v2.AddArg(p) 21650 v2.AddArg(idx) 21651 v2.AddArg(mem) 21652 v1.AddArg(v2) 21653 v0.AddArg(v1) 21654 v0.AddArg(y) 21655 return true 21656 } 21657 // match: (ORQ or:(ORQ y s0:(SHLQconst [j0] x0:(MOVWloadidx1 [i0] {s} idx p mem))) s1:(SHLQconst [j1] x1:(MOVWloadidx1 [i1] {s} idx p mem))) 21658 // cond: i1 == i0+2 && j1 == j0+16 && j0 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 21659 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j0] (MOVLloadidx1 [i0] {s} p idx mem)) y) 21660 for { 21661 or := v.Args[0] 21662 if or.Op != OpAMD64ORQ { 21663 break 21664 } 21665 y := or.Args[0] 21666 s0 := or.Args[1] 21667 if s0.Op != OpAMD64SHLQconst { 21668 break 21669 } 21670 j0 := s0.AuxInt 21671 x0 := s0.Args[0] 21672 if x0.Op != OpAMD64MOVWloadidx1 { 21673 break 21674 } 21675 i0 := x0.AuxInt 21676 s := x0.Aux 21677 idx := x0.Args[0] 21678 p := x0.Args[1] 21679 mem := x0.Args[2] 21680 s1 := v.Args[1] 21681 if s1.Op != OpAMD64SHLQconst { 21682 break 21683 } 21684 j1 := s1.AuxInt 21685 x1 := s1.Args[0] 21686 if x1.Op != OpAMD64MOVWloadidx1 { 21687 break 21688 } 21689 i1 := x1.AuxInt 21690 if x1.Aux != s { 21691 break 21692 } 21693 if idx != x1.Args[0] { 21694 break 21695 } 21696 if p != x1.Args[1] { 21697 break 21698 } 21699 if mem != x1.Args[2] { 21700 break 21701 } 21702 if !(i1 == i0+2 && j1 == j0+16 && j0%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 21703 break 21704 } 21705 b = mergePoint(b, x0, x1) 21706 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 21707 v.reset(OpCopy) 21708 v.AddArg(v0) 21709 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 21710 v1.AuxInt = j0 21711 v2 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, types.UInt32) 21712 v2.AuxInt = i0 21713 v2.Aux = s 21714 v2.AddArg(p) 21715 v2.AddArg(idx) 21716 v2.AddArg(mem) 21717 v1.AddArg(v2) 21718 v0.AddArg(v1) 21719 v0.AddArg(y) 21720 return true 21721 } 21722 // match: (ORQ x1:(MOVBload [i1] {s} p mem) sh:(SHLQconst [8] x0:(MOVBload [i0] {s} p mem))) 21723 // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 21724 // result: @mergePoint(b,x0,x1) (ROLWconst <v.Type> [8] (MOVWload [i0] {s} p mem)) 21725 for { 21726 x1 := v.Args[0] 21727 if x1.Op != OpAMD64MOVBload { 21728 break 21729 } 21730 i1 := x1.AuxInt 21731 s := x1.Aux 21732 p := x1.Args[0] 21733 mem := x1.Args[1] 21734 sh := v.Args[1] 21735 if sh.Op != OpAMD64SHLQconst { 21736 break 21737 } 21738 if sh.AuxInt != 8 { 21739 break 21740 } 21741 x0 := sh.Args[0] 21742 if x0.Op != OpAMD64MOVBload { 21743 break 21744 } 21745 i0 := x0.AuxInt 21746 if x0.Aux != s { 21747 break 21748 } 21749 if p != x0.Args[0] { 21750 break 21751 } 21752 if mem != x0.Args[1] { 21753 break 21754 } 21755 if !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 21756 break 21757 } 21758 b = mergePoint(b, x0, x1) 21759 v0 := b.NewValue0(v.Pos, OpAMD64ROLWconst, v.Type) 21760 v.reset(OpCopy) 21761 v.AddArg(v0) 21762 v0.AuxInt = 8 21763 v1 := b.NewValue0(v.Pos, OpAMD64MOVWload, types.UInt16) 21764 v1.AuxInt = i0 21765 v1.Aux = s 21766 v1.AddArg(p) 21767 v1.AddArg(mem) 21768 v0.AddArg(v1) 21769 return true 21770 } 21771 // match: (ORQ sh:(SHLQconst [8] x0:(MOVBload [i0] {s} p mem)) x1:(MOVBload [i1] {s} p mem)) 21772 // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 21773 // result: @mergePoint(b,x0,x1) (ROLWconst <v.Type> [8] (MOVWload [i0] {s} p mem)) 21774 for { 21775 sh := v.Args[0] 21776 if sh.Op != OpAMD64SHLQconst { 21777 break 21778 } 21779 if sh.AuxInt != 8 { 21780 break 21781 } 21782 x0 := sh.Args[0] 21783 if x0.Op != OpAMD64MOVBload { 21784 break 21785 } 21786 i0 := x0.AuxInt 21787 s := x0.Aux 21788 p := x0.Args[0] 21789 mem := x0.Args[1] 21790 x1 := v.Args[1] 21791 if x1.Op != OpAMD64MOVBload { 21792 break 21793 } 21794 i1 := x1.AuxInt 21795 if x1.Aux != s { 21796 break 21797 } 21798 if p != x1.Args[0] { 21799 break 21800 } 21801 if mem != x1.Args[1] { 21802 break 21803 } 21804 if !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 21805 break 21806 } 21807 b = mergePoint(b, x0, x1) 21808 v0 := b.NewValue0(v.Pos, OpAMD64ROLWconst, v.Type) 21809 v.reset(OpCopy) 21810 v.AddArg(v0) 21811 v0.AuxInt = 8 21812 v1 := b.NewValue0(v.Pos, OpAMD64MOVWload, types.UInt16) 21813 v1.AuxInt = i0 21814 v1.Aux = s 21815 v1.AddArg(p) 21816 v1.AddArg(mem) 21817 v0.AddArg(v1) 21818 return true 21819 } 21820 // match: (ORQ r1:(ROLWconst [8] x1:(MOVWload [i1] {s} p mem)) sh:(SHLQconst [16] r0:(ROLWconst [8] x0:(MOVWload [i0] {s} p mem)))) 21821 // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh) 21822 // result: @mergePoint(b,x0,x1) (BSWAPL <v.Type> (MOVLload [i0] {s} p mem)) 21823 for { 21824 r1 := v.Args[0] 21825 if r1.Op != OpAMD64ROLWconst { 21826 break 21827 } 21828 if r1.AuxInt != 8 { 21829 break 21830 } 21831 x1 := r1.Args[0] 21832 if x1.Op != OpAMD64MOVWload { 21833 break 21834 } 21835 i1 := x1.AuxInt 21836 s := x1.Aux 21837 p := x1.Args[0] 21838 mem := x1.Args[1] 21839 sh := v.Args[1] 21840 if sh.Op != OpAMD64SHLQconst { 21841 break 21842 } 21843 if sh.AuxInt != 16 { 21844 break 21845 } 21846 r0 := sh.Args[0] 21847 if r0.Op != OpAMD64ROLWconst { 21848 break 21849 } 21850 if r0.AuxInt != 8 { 21851 break 21852 } 21853 x0 := r0.Args[0] 21854 if x0.Op != OpAMD64MOVWload { 21855 break 21856 } 21857 i0 := x0.AuxInt 21858 if x0.Aux != s { 21859 break 21860 } 21861 if p != x0.Args[0] { 21862 break 21863 } 21864 if mem != x0.Args[1] { 21865 break 21866 } 21867 if !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh)) { 21868 break 21869 } 21870 b = mergePoint(b, x0, x1) 21871 v0 := b.NewValue0(v.Pos, OpAMD64BSWAPL, v.Type) 21872 v.reset(OpCopy) 21873 v.AddArg(v0) 21874 v1 := b.NewValue0(v.Pos, OpAMD64MOVLload, types.UInt32) 21875 v1.AuxInt = i0 21876 v1.Aux = s 21877 v1.AddArg(p) 21878 v1.AddArg(mem) 21879 v0.AddArg(v1) 21880 return true 21881 } 21882 // match: (ORQ sh:(SHLQconst [16] r0:(ROLWconst [8] x0:(MOVWload [i0] {s} p mem))) r1:(ROLWconst [8] x1:(MOVWload [i1] {s} p mem))) 21883 // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh) 21884 // result: @mergePoint(b,x0,x1) (BSWAPL <v.Type> (MOVLload [i0] {s} p mem)) 21885 for { 21886 sh := v.Args[0] 21887 if sh.Op != OpAMD64SHLQconst { 21888 break 21889 } 21890 if sh.AuxInt != 16 { 21891 break 21892 } 21893 r0 := sh.Args[0] 21894 if r0.Op != OpAMD64ROLWconst { 21895 break 21896 } 21897 if r0.AuxInt != 8 { 21898 break 21899 } 21900 x0 := r0.Args[0] 21901 if x0.Op != OpAMD64MOVWload { 21902 break 21903 } 21904 i0 := x0.AuxInt 21905 s := x0.Aux 21906 p := x0.Args[0] 21907 mem := x0.Args[1] 21908 r1 := v.Args[1] 21909 if r1.Op != OpAMD64ROLWconst { 21910 break 21911 } 21912 if r1.AuxInt != 8 { 21913 break 21914 } 21915 x1 := r1.Args[0] 21916 if x1.Op != OpAMD64MOVWload { 21917 break 21918 } 21919 i1 := x1.AuxInt 21920 if x1.Aux != s { 21921 break 21922 } 21923 if p != x1.Args[0] { 21924 break 21925 } 21926 if mem != x1.Args[1] { 21927 break 21928 } 21929 if !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh)) { 21930 break 21931 } 21932 b = mergePoint(b, x0, x1) 21933 v0 := b.NewValue0(v.Pos, OpAMD64BSWAPL, v.Type) 21934 v.reset(OpCopy) 21935 v.AddArg(v0) 21936 v1 := b.NewValue0(v.Pos, OpAMD64MOVLload, types.UInt32) 21937 v1.AuxInt = i0 21938 v1.Aux = s 21939 v1.AddArg(p) 21940 v1.AddArg(mem) 21941 v0.AddArg(v1) 21942 return true 21943 } 21944 // match: (ORQ r1:(BSWAPL x1:(MOVLload [i1] {s} p mem)) sh:(SHLQconst [32] r0:(BSWAPL x0:(MOVLload [i0] {s} p mem)))) 21945 // cond: i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh) 21946 // result: @mergePoint(b,x0,x1) (BSWAPQ <v.Type> (MOVQload [i0] {s} p mem)) 21947 for { 21948 r1 := v.Args[0] 21949 if r1.Op != OpAMD64BSWAPL { 21950 break 21951 } 21952 x1 := r1.Args[0] 21953 if x1.Op != OpAMD64MOVLload { 21954 break 21955 } 21956 i1 := x1.AuxInt 21957 s := x1.Aux 21958 p := x1.Args[0] 21959 mem := x1.Args[1] 21960 sh := v.Args[1] 21961 if sh.Op != OpAMD64SHLQconst { 21962 break 21963 } 21964 if sh.AuxInt != 32 { 21965 break 21966 } 21967 r0 := sh.Args[0] 21968 if r0.Op != OpAMD64BSWAPL { 21969 break 21970 } 21971 x0 := r0.Args[0] 21972 if x0.Op != OpAMD64MOVLload { 21973 break 21974 } 21975 i0 := x0.AuxInt 21976 if x0.Aux != s { 21977 break 21978 } 21979 if p != x0.Args[0] { 21980 break 21981 } 21982 if mem != x0.Args[1] { 21983 break 21984 } 21985 if !(i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh)) { 21986 break 21987 } 21988 b = mergePoint(b, x0, x1) 21989 v0 := b.NewValue0(v.Pos, OpAMD64BSWAPQ, v.Type) 21990 v.reset(OpCopy) 21991 v.AddArg(v0) 21992 v1 := b.NewValue0(v.Pos, OpAMD64MOVQload, types.UInt64) 21993 v1.AuxInt = i0 21994 v1.Aux = s 21995 v1.AddArg(p) 21996 v1.AddArg(mem) 21997 v0.AddArg(v1) 21998 return true 21999 } 22000 // match: (ORQ sh:(SHLQconst [32] r0:(BSWAPL x0:(MOVLload [i0] {s} p mem))) r1:(BSWAPL x1:(MOVLload [i1] {s} p mem))) 22001 // cond: i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh) 22002 // result: @mergePoint(b,x0,x1) (BSWAPQ <v.Type> (MOVQload [i0] {s} p mem)) 22003 for { 22004 sh := v.Args[0] 22005 if sh.Op != OpAMD64SHLQconst { 22006 break 22007 } 22008 if sh.AuxInt != 32 { 22009 break 22010 } 22011 r0 := sh.Args[0] 22012 if r0.Op != OpAMD64BSWAPL { 22013 break 22014 } 22015 x0 := r0.Args[0] 22016 if x0.Op != OpAMD64MOVLload { 22017 break 22018 } 22019 i0 := x0.AuxInt 22020 s := x0.Aux 22021 p := x0.Args[0] 22022 mem := x0.Args[1] 22023 r1 := v.Args[1] 22024 if r1.Op != OpAMD64BSWAPL { 22025 break 22026 } 22027 x1 := r1.Args[0] 22028 if x1.Op != OpAMD64MOVLload { 22029 break 22030 } 22031 i1 := x1.AuxInt 22032 if x1.Aux != s { 22033 break 22034 } 22035 if p != x1.Args[0] { 22036 break 22037 } 22038 if mem != x1.Args[1] { 22039 break 22040 } 22041 if !(i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh)) { 22042 break 22043 } 22044 b = mergePoint(b, x0, x1) 22045 v0 := b.NewValue0(v.Pos, OpAMD64BSWAPQ, v.Type) 22046 v.reset(OpCopy) 22047 v.AddArg(v0) 22048 v1 := b.NewValue0(v.Pos, OpAMD64MOVQload, types.UInt64) 22049 v1.AuxInt = i0 22050 v1.Aux = s 22051 v1.AddArg(p) 22052 v1.AddArg(mem) 22053 v0.AddArg(v1) 22054 return true 22055 } 22056 // match: (ORQ s0:(SHLQconst [j0] x0:(MOVBload [i0] {s} p mem)) or:(ORQ s1:(SHLQconst [j1] x1:(MOVBload [i1] {s} p mem)) y)) 22057 // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 22058 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (ROLWconst <types.UInt16> [8] (MOVWload [i0] {s} p mem))) y) 22059 for { 22060 s0 := v.Args[0] 22061 if s0.Op != OpAMD64SHLQconst { 22062 break 22063 } 22064 j0 := s0.AuxInt 22065 x0 := s0.Args[0] 22066 if x0.Op != OpAMD64MOVBload { 22067 break 22068 } 22069 i0 := x0.AuxInt 22070 s := x0.Aux 22071 p := x0.Args[0] 22072 mem := x0.Args[1] 22073 or := v.Args[1] 22074 if or.Op != OpAMD64ORQ { 22075 break 22076 } 22077 s1 := or.Args[0] 22078 if s1.Op != OpAMD64SHLQconst { 22079 break 22080 } 22081 j1 := s1.AuxInt 22082 x1 := s1.Args[0] 22083 if x1.Op != OpAMD64MOVBload { 22084 break 22085 } 22086 i1 := x1.AuxInt 22087 if x1.Aux != s { 22088 break 22089 } 22090 if p != x1.Args[0] { 22091 break 22092 } 22093 if mem != x1.Args[1] { 22094 break 22095 } 22096 y := or.Args[1] 22097 if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 22098 break 22099 } 22100 b = mergePoint(b, x0, x1) 22101 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 22102 v.reset(OpCopy) 22103 v.AddArg(v0) 22104 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 22105 v1.AuxInt = j1 22106 v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, types.UInt16) 22107 v2.AuxInt = 8 22108 v3 := b.NewValue0(v.Pos, OpAMD64MOVWload, types.UInt16) 22109 v3.AuxInt = i0 22110 v3.Aux = s 22111 v3.AddArg(p) 22112 v3.AddArg(mem) 22113 v2.AddArg(v3) 22114 v1.AddArg(v2) 22115 v0.AddArg(v1) 22116 v0.AddArg(y) 22117 return true 22118 } 22119 // match: (ORQ s0:(SHLQconst [j0] x0:(MOVBload [i0] {s} p mem)) or:(ORQ y s1:(SHLQconst [j1] x1:(MOVBload [i1] {s} p mem)))) 22120 // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 22121 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (ROLWconst <types.UInt16> [8] (MOVWload [i0] {s} p mem))) y) 22122 for { 22123 s0 := v.Args[0] 22124 if s0.Op != OpAMD64SHLQconst { 22125 break 22126 } 22127 j0 := s0.AuxInt 22128 x0 := s0.Args[0] 22129 if x0.Op != OpAMD64MOVBload { 22130 break 22131 } 22132 i0 := x0.AuxInt 22133 s := x0.Aux 22134 p := x0.Args[0] 22135 mem := x0.Args[1] 22136 or := v.Args[1] 22137 if or.Op != OpAMD64ORQ { 22138 break 22139 } 22140 y := or.Args[0] 22141 s1 := or.Args[1] 22142 if s1.Op != OpAMD64SHLQconst { 22143 break 22144 } 22145 j1 := s1.AuxInt 22146 x1 := s1.Args[0] 22147 if x1.Op != OpAMD64MOVBload { 22148 break 22149 } 22150 i1 := x1.AuxInt 22151 if x1.Aux != s { 22152 break 22153 } 22154 if p != x1.Args[0] { 22155 break 22156 } 22157 if mem != x1.Args[1] { 22158 break 22159 } 22160 if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 22161 break 22162 } 22163 b = mergePoint(b, x0, x1) 22164 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 22165 v.reset(OpCopy) 22166 v.AddArg(v0) 22167 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 22168 v1.AuxInt = j1 22169 v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, types.UInt16) 22170 v2.AuxInt = 8 22171 v3 := b.NewValue0(v.Pos, OpAMD64MOVWload, types.UInt16) 22172 v3.AuxInt = i0 22173 v3.Aux = s 22174 v3.AddArg(p) 22175 v3.AddArg(mem) 22176 v2.AddArg(v3) 22177 v1.AddArg(v2) 22178 v0.AddArg(v1) 22179 v0.AddArg(y) 22180 return true 22181 } 22182 // match: (ORQ or:(ORQ s1:(SHLQconst [j1] x1:(MOVBload [i1] {s} p mem)) y) s0:(SHLQconst [j0] x0:(MOVBload [i0] {s} p mem))) 22183 // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 22184 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (ROLWconst <types.UInt16> [8] (MOVWload [i0] {s} p mem))) y) 22185 for { 22186 or := v.Args[0] 22187 if or.Op != OpAMD64ORQ { 22188 break 22189 } 22190 s1 := or.Args[0] 22191 if s1.Op != OpAMD64SHLQconst { 22192 break 22193 } 22194 j1 := s1.AuxInt 22195 x1 := s1.Args[0] 22196 if x1.Op != OpAMD64MOVBload { 22197 break 22198 } 22199 i1 := x1.AuxInt 22200 s := x1.Aux 22201 p := x1.Args[0] 22202 mem := x1.Args[1] 22203 y := or.Args[1] 22204 s0 := v.Args[1] 22205 if s0.Op != OpAMD64SHLQconst { 22206 break 22207 } 22208 j0 := s0.AuxInt 22209 x0 := s0.Args[0] 22210 if x0.Op != OpAMD64MOVBload { 22211 break 22212 } 22213 i0 := x0.AuxInt 22214 if x0.Aux != s { 22215 break 22216 } 22217 if p != x0.Args[0] { 22218 break 22219 } 22220 if mem != x0.Args[1] { 22221 break 22222 } 22223 if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 22224 break 22225 } 22226 b = mergePoint(b, x0, x1) 22227 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 22228 v.reset(OpCopy) 22229 v.AddArg(v0) 22230 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 22231 v1.AuxInt = j1 22232 v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, types.UInt16) 22233 v2.AuxInt = 8 22234 v3 := b.NewValue0(v.Pos, OpAMD64MOVWload, types.UInt16) 22235 v3.AuxInt = i0 22236 v3.Aux = s 22237 v3.AddArg(p) 22238 v3.AddArg(mem) 22239 v2.AddArg(v3) 22240 v1.AddArg(v2) 22241 v0.AddArg(v1) 22242 v0.AddArg(y) 22243 return true 22244 } 22245 // match: (ORQ or:(ORQ y s1:(SHLQconst [j1] x1:(MOVBload [i1] {s} p mem))) s0:(SHLQconst [j0] x0:(MOVBload [i0] {s} p mem))) 22246 // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 22247 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (ROLWconst <types.UInt16> [8] (MOVWload [i0] {s} p mem))) y) 22248 for { 22249 or := v.Args[0] 22250 if or.Op != OpAMD64ORQ { 22251 break 22252 } 22253 y := or.Args[0] 22254 s1 := or.Args[1] 22255 if s1.Op != OpAMD64SHLQconst { 22256 break 22257 } 22258 j1 := s1.AuxInt 22259 x1 := s1.Args[0] 22260 if x1.Op != OpAMD64MOVBload { 22261 break 22262 } 22263 i1 := x1.AuxInt 22264 s := x1.Aux 22265 p := x1.Args[0] 22266 mem := x1.Args[1] 22267 s0 := v.Args[1] 22268 if s0.Op != OpAMD64SHLQconst { 22269 break 22270 } 22271 j0 := s0.AuxInt 22272 x0 := s0.Args[0] 22273 if x0.Op != OpAMD64MOVBload { 22274 break 22275 } 22276 i0 := x0.AuxInt 22277 if x0.Aux != s { 22278 break 22279 } 22280 if p != x0.Args[0] { 22281 break 22282 } 22283 if mem != x0.Args[1] { 22284 break 22285 } 22286 if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 22287 break 22288 } 22289 b = mergePoint(b, x0, x1) 22290 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 22291 v.reset(OpCopy) 22292 v.AddArg(v0) 22293 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 22294 v1.AuxInt = j1 22295 v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, types.UInt16) 22296 v2.AuxInt = 8 22297 v3 := b.NewValue0(v.Pos, OpAMD64MOVWload, types.UInt16) 22298 v3.AuxInt = i0 22299 v3.Aux = s 22300 v3.AddArg(p) 22301 v3.AddArg(mem) 22302 v2.AddArg(v3) 22303 v1.AddArg(v2) 22304 v0.AddArg(v1) 22305 v0.AddArg(y) 22306 return true 22307 } 22308 // match: (ORQ s0:(SHLQconst [j0] r0:(ROLWconst [8] x0:(MOVWload [i0] {s} p mem))) or:(ORQ s1:(SHLQconst [j1] r1:(ROLWconst [8] x1:(MOVWload [i1] {s} p mem))) y)) 22309 // cond: i1 == i0+2 && j1 == j0-16 && j1 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or) 22310 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (BSWAPL <types.UInt32> (MOVLload [i0] {s} p mem))) y) 22311 for { 22312 s0 := v.Args[0] 22313 if s0.Op != OpAMD64SHLQconst { 22314 break 22315 } 22316 j0 := s0.AuxInt 22317 r0 := s0.Args[0] 22318 if r0.Op != OpAMD64ROLWconst { 22319 break 22320 } 22321 if r0.AuxInt != 8 { 22322 break 22323 } 22324 x0 := r0.Args[0] 22325 if x0.Op != OpAMD64MOVWload { 22326 break 22327 } 22328 i0 := x0.AuxInt 22329 s := x0.Aux 22330 p := x0.Args[0] 22331 mem := x0.Args[1] 22332 or := v.Args[1] 22333 if or.Op != OpAMD64ORQ { 22334 break 22335 } 22336 s1 := or.Args[0] 22337 if s1.Op != OpAMD64SHLQconst { 22338 break 22339 } 22340 j1 := s1.AuxInt 22341 r1 := s1.Args[0] 22342 if r1.Op != OpAMD64ROLWconst { 22343 break 22344 } 22345 if r1.AuxInt != 8 { 22346 break 22347 } 22348 x1 := r1.Args[0] 22349 if x1.Op != OpAMD64MOVWload { 22350 break 22351 } 22352 i1 := x1.AuxInt 22353 if x1.Aux != s { 22354 break 22355 } 22356 if p != x1.Args[0] { 22357 break 22358 } 22359 if mem != x1.Args[1] { 22360 break 22361 } 22362 y := or.Args[1] 22363 if !(i1 == i0+2 && j1 == j0-16 && j1%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or)) { 22364 break 22365 } 22366 b = mergePoint(b, x0, x1) 22367 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 22368 v.reset(OpCopy) 22369 v.AddArg(v0) 22370 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 22371 v1.AuxInt = j1 22372 v2 := b.NewValue0(v.Pos, OpAMD64BSWAPL, types.UInt32) 22373 v3 := b.NewValue0(v.Pos, OpAMD64MOVLload, types.UInt32) 22374 v3.AuxInt = i0 22375 v3.Aux = s 22376 v3.AddArg(p) 22377 v3.AddArg(mem) 22378 v2.AddArg(v3) 22379 v1.AddArg(v2) 22380 v0.AddArg(v1) 22381 v0.AddArg(y) 22382 return true 22383 } 22384 // match: (ORQ s0:(SHLQconst [j0] r0:(ROLWconst [8] x0:(MOVWload [i0] {s} p mem))) or:(ORQ y s1:(SHLQconst [j1] r1:(ROLWconst [8] x1:(MOVWload [i1] {s} p mem))))) 22385 // cond: i1 == i0+2 && j1 == j0-16 && j1 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or) 22386 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (BSWAPL <types.UInt32> (MOVLload [i0] {s} p mem))) y) 22387 for { 22388 s0 := v.Args[0] 22389 if s0.Op != OpAMD64SHLQconst { 22390 break 22391 } 22392 j0 := s0.AuxInt 22393 r0 := s0.Args[0] 22394 if r0.Op != OpAMD64ROLWconst { 22395 break 22396 } 22397 if r0.AuxInt != 8 { 22398 break 22399 } 22400 x0 := r0.Args[0] 22401 if x0.Op != OpAMD64MOVWload { 22402 break 22403 } 22404 i0 := x0.AuxInt 22405 s := x0.Aux 22406 p := x0.Args[0] 22407 mem := x0.Args[1] 22408 or := v.Args[1] 22409 if or.Op != OpAMD64ORQ { 22410 break 22411 } 22412 y := or.Args[0] 22413 s1 := or.Args[1] 22414 if s1.Op != OpAMD64SHLQconst { 22415 break 22416 } 22417 j1 := s1.AuxInt 22418 r1 := s1.Args[0] 22419 if r1.Op != OpAMD64ROLWconst { 22420 break 22421 } 22422 if r1.AuxInt != 8 { 22423 break 22424 } 22425 x1 := r1.Args[0] 22426 if x1.Op != OpAMD64MOVWload { 22427 break 22428 } 22429 i1 := x1.AuxInt 22430 if x1.Aux != s { 22431 break 22432 } 22433 if p != x1.Args[0] { 22434 break 22435 } 22436 if mem != x1.Args[1] { 22437 break 22438 } 22439 if !(i1 == i0+2 && j1 == j0-16 && j1%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or)) { 22440 break 22441 } 22442 b = mergePoint(b, x0, x1) 22443 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 22444 v.reset(OpCopy) 22445 v.AddArg(v0) 22446 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 22447 v1.AuxInt = j1 22448 v2 := b.NewValue0(v.Pos, OpAMD64BSWAPL, types.UInt32) 22449 v3 := b.NewValue0(v.Pos, OpAMD64MOVLload, types.UInt32) 22450 v3.AuxInt = i0 22451 v3.Aux = s 22452 v3.AddArg(p) 22453 v3.AddArg(mem) 22454 v2.AddArg(v3) 22455 v1.AddArg(v2) 22456 v0.AddArg(v1) 22457 v0.AddArg(y) 22458 return true 22459 } 22460 // match: (ORQ or:(ORQ s1:(SHLQconst [j1] r1:(ROLWconst [8] x1:(MOVWload [i1] {s} p mem))) y) s0:(SHLQconst [j0] r0:(ROLWconst [8] x0:(MOVWload [i0] {s} p mem)))) 22461 // cond: i1 == i0+2 && j1 == j0-16 && j1 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or) 22462 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (BSWAPL <types.UInt32> (MOVLload [i0] {s} p mem))) y) 22463 for { 22464 or := v.Args[0] 22465 if or.Op != OpAMD64ORQ { 22466 break 22467 } 22468 s1 := or.Args[0] 22469 if s1.Op != OpAMD64SHLQconst { 22470 break 22471 } 22472 j1 := s1.AuxInt 22473 r1 := s1.Args[0] 22474 if r1.Op != OpAMD64ROLWconst { 22475 break 22476 } 22477 if r1.AuxInt != 8 { 22478 break 22479 } 22480 x1 := r1.Args[0] 22481 if x1.Op != OpAMD64MOVWload { 22482 break 22483 } 22484 i1 := x1.AuxInt 22485 s := x1.Aux 22486 p := x1.Args[0] 22487 mem := x1.Args[1] 22488 y := or.Args[1] 22489 s0 := v.Args[1] 22490 if s0.Op != OpAMD64SHLQconst { 22491 break 22492 } 22493 j0 := s0.AuxInt 22494 r0 := s0.Args[0] 22495 if r0.Op != OpAMD64ROLWconst { 22496 break 22497 } 22498 if r0.AuxInt != 8 { 22499 break 22500 } 22501 x0 := r0.Args[0] 22502 if x0.Op != OpAMD64MOVWload { 22503 break 22504 } 22505 i0 := x0.AuxInt 22506 if x0.Aux != s { 22507 break 22508 } 22509 if p != x0.Args[0] { 22510 break 22511 } 22512 if mem != x0.Args[1] { 22513 break 22514 } 22515 if !(i1 == i0+2 && j1 == j0-16 && j1%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or)) { 22516 break 22517 } 22518 b = mergePoint(b, x0, x1) 22519 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 22520 v.reset(OpCopy) 22521 v.AddArg(v0) 22522 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 22523 v1.AuxInt = j1 22524 v2 := b.NewValue0(v.Pos, OpAMD64BSWAPL, types.UInt32) 22525 v3 := b.NewValue0(v.Pos, OpAMD64MOVLload, types.UInt32) 22526 v3.AuxInt = i0 22527 v3.Aux = s 22528 v3.AddArg(p) 22529 v3.AddArg(mem) 22530 v2.AddArg(v3) 22531 v1.AddArg(v2) 22532 v0.AddArg(v1) 22533 v0.AddArg(y) 22534 return true 22535 } 22536 // match: (ORQ or:(ORQ y s1:(SHLQconst [j1] r1:(ROLWconst [8] x1:(MOVWload [i1] {s} p mem)))) s0:(SHLQconst [j0] r0:(ROLWconst [8] x0:(MOVWload [i0] {s} p mem)))) 22537 // cond: i1 == i0+2 && j1 == j0-16 && j1 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or) 22538 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (BSWAPL <types.UInt32> (MOVLload [i0] {s} p mem))) y) 22539 for { 22540 or := v.Args[0] 22541 if or.Op != OpAMD64ORQ { 22542 break 22543 } 22544 y := or.Args[0] 22545 s1 := or.Args[1] 22546 if s1.Op != OpAMD64SHLQconst { 22547 break 22548 } 22549 j1 := s1.AuxInt 22550 r1 := s1.Args[0] 22551 if r1.Op != OpAMD64ROLWconst { 22552 break 22553 } 22554 if r1.AuxInt != 8 { 22555 break 22556 } 22557 x1 := r1.Args[0] 22558 if x1.Op != OpAMD64MOVWload { 22559 break 22560 } 22561 i1 := x1.AuxInt 22562 s := x1.Aux 22563 p := x1.Args[0] 22564 mem := x1.Args[1] 22565 s0 := v.Args[1] 22566 if s0.Op != OpAMD64SHLQconst { 22567 break 22568 } 22569 j0 := s0.AuxInt 22570 r0 := s0.Args[0] 22571 if r0.Op != OpAMD64ROLWconst { 22572 break 22573 } 22574 if r0.AuxInt != 8 { 22575 break 22576 } 22577 x0 := r0.Args[0] 22578 if x0.Op != OpAMD64MOVWload { 22579 break 22580 } 22581 i0 := x0.AuxInt 22582 if x0.Aux != s { 22583 break 22584 } 22585 if p != x0.Args[0] { 22586 break 22587 } 22588 if mem != x0.Args[1] { 22589 break 22590 } 22591 if !(i1 == i0+2 && j1 == j0-16 && j1%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or)) { 22592 break 22593 } 22594 b = mergePoint(b, x0, x1) 22595 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 22596 v.reset(OpCopy) 22597 v.AddArg(v0) 22598 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 22599 v1.AuxInt = j1 22600 v2 := b.NewValue0(v.Pos, OpAMD64BSWAPL, types.UInt32) 22601 v3 := b.NewValue0(v.Pos, OpAMD64MOVLload, types.UInt32) 22602 v3.AuxInt = i0 22603 v3.Aux = s 22604 v3.AddArg(p) 22605 v3.AddArg(mem) 22606 v2.AddArg(v3) 22607 v1.AddArg(v2) 22608 v0.AddArg(v1) 22609 v0.AddArg(y) 22610 return true 22611 } 22612 // match: (ORQ x1:(MOVBloadidx1 [i1] {s} p idx mem) sh:(SHLQconst [8] x0:(MOVBloadidx1 [i0] {s} p idx mem))) 22613 // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 22614 // result: @mergePoint(b,x0,x1) (ROLWconst <v.Type> [8] (MOVWloadidx1 [i0] {s} p idx mem)) 22615 for { 22616 x1 := v.Args[0] 22617 if x1.Op != OpAMD64MOVBloadidx1 { 22618 break 22619 } 22620 i1 := x1.AuxInt 22621 s := x1.Aux 22622 p := x1.Args[0] 22623 idx := x1.Args[1] 22624 mem := x1.Args[2] 22625 sh := v.Args[1] 22626 if sh.Op != OpAMD64SHLQconst { 22627 break 22628 } 22629 if sh.AuxInt != 8 { 22630 break 22631 } 22632 x0 := sh.Args[0] 22633 if x0.Op != OpAMD64MOVBloadidx1 { 22634 break 22635 } 22636 i0 := x0.AuxInt 22637 if x0.Aux != s { 22638 break 22639 } 22640 if p != x0.Args[0] { 22641 break 22642 } 22643 if idx != x0.Args[1] { 22644 break 22645 } 22646 if mem != x0.Args[2] { 22647 break 22648 } 22649 if !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 22650 break 22651 } 22652 b = mergePoint(b, x0, x1) 22653 v0 := b.NewValue0(v.Pos, OpAMD64ROLWconst, v.Type) 22654 v.reset(OpCopy) 22655 v.AddArg(v0) 22656 v0.AuxInt = 8 22657 v1 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, types.UInt16) 22658 v1.AuxInt = i0 22659 v1.Aux = s 22660 v1.AddArg(p) 22661 v1.AddArg(idx) 22662 v1.AddArg(mem) 22663 v0.AddArg(v1) 22664 return true 22665 } 22666 // match: (ORQ x1:(MOVBloadidx1 [i1] {s} idx p mem) sh:(SHLQconst [8] x0:(MOVBloadidx1 [i0] {s} p idx mem))) 22667 // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 22668 // result: @mergePoint(b,x0,x1) (ROLWconst <v.Type> [8] (MOVWloadidx1 [i0] {s} p idx mem)) 22669 for { 22670 x1 := v.Args[0] 22671 if x1.Op != OpAMD64MOVBloadidx1 { 22672 break 22673 } 22674 i1 := x1.AuxInt 22675 s := x1.Aux 22676 idx := x1.Args[0] 22677 p := x1.Args[1] 22678 mem := x1.Args[2] 22679 sh := v.Args[1] 22680 if sh.Op != OpAMD64SHLQconst { 22681 break 22682 } 22683 if sh.AuxInt != 8 { 22684 break 22685 } 22686 x0 := sh.Args[0] 22687 if x0.Op != OpAMD64MOVBloadidx1 { 22688 break 22689 } 22690 i0 := x0.AuxInt 22691 if x0.Aux != s { 22692 break 22693 } 22694 if p != x0.Args[0] { 22695 break 22696 } 22697 if idx != x0.Args[1] { 22698 break 22699 } 22700 if mem != x0.Args[2] { 22701 break 22702 } 22703 if !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 22704 break 22705 } 22706 b = mergePoint(b, x0, x1) 22707 v0 := b.NewValue0(v.Pos, OpAMD64ROLWconst, v.Type) 22708 v.reset(OpCopy) 22709 v.AddArg(v0) 22710 v0.AuxInt = 8 22711 v1 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, types.UInt16) 22712 v1.AuxInt = i0 22713 v1.Aux = s 22714 v1.AddArg(p) 22715 v1.AddArg(idx) 22716 v1.AddArg(mem) 22717 v0.AddArg(v1) 22718 return true 22719 } 22720 // match: (ORQ x1:(MOVBloadidx1 [i1] {s} p idx mem) sh:(SHLQconst [8] x0:(MOVBloadidx1 [i0] {s} idx p mem))) 22721 // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 22722 // result: @mergePoint(b,x0,x1) (ROLWconst <v.Type> [8] (MOVWloadidx1 [i0] {s} p idx mem)) 22723 for { 22724 x1 := v.Args[0] 22725 if x1.Op != OpAMD64MOVBloadidx1 { 22726 break 22727 } 22728 i1 := x1.AuxInt 22729 s := x1.Aux 22730 p := x1.Args[0] 22731 idx := x1.Args[1] 22732 mem := x1.Args[2] 22733 sh := v.Args[1] 22734 if sh.Op != OpAMD64SHLQconst { 22735 break 22736 } 22737 if sh.AuxInt != 8 { 22738 break 22739 } 22740 x0 := sh.Args[0] 22741 if x0.Op != OpAMD64MOVBloadidx1 { 22742 break 22743 } 22744 i0 := x0.AuxInt 22745 if x0.Aux != s { 22746 break 22747 } 22748 if idx != x0.Args[0] { 22749 break 22750 } 22751 if p != x0.Args[1] { 22752 break 22753 } 22754 if mem != x0.Args[2] { 22755 break 22756 } 22757 if !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 22758 break 22759 } 22760 b = mergePoint(b, x0, x1) 22761 v0 := b.NewValue0(v.Pos, OpAMD64ROLWconst, v.Type) 22762 v.reset(OpCopy) 22763 v.AddArg(v0) 22764 v0.AuxInt = 8 22765 v1 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, types.UInt16) 22766 v1.AuxInt = i0 22767 v1.Aux = s 22768 v1.AddArg(p) 22769 v1.AddArg(idx) 22770 v1.AddArg(mem) 22771 v0.AddArg(v1) 22772 return true 22773 } 22774 // match: (ORQ x1:(MOVBloadidx1 [i1] {s} idx p mem) sh:(SHLQconst [8] x0:(MOVBloadidx1 [i0] {s} idx p mem))) 22775 // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 22776 // result: @mergePoint(b,x0,x1) (ROLWconst <v.Type> [8] (MOVWloadidx1 [i0] {s} p idx mem)) 22777 for { 22778 x1 := v.Args[0] 22779 if x1.Op != OpAMD64MOVBloadidx1 { 22780 break 22781 } 22782 i1 := x1.AuxInt 22783 s := x1.Aux 22784 idx := x1.Args[0] 22785 p := x1.Args[1] 22786 mem := x1.Args[2] 22787 sh := v.Args[1] 22788 if sh.Op != OpAMD64SHLQconst { 22789 break 22790 } 22791 if sh.AuxInt != 8 { 22792 break 22793 } 22794 x0 := sh.Args[0] 22795 if x0.Op != OpAMD64MOVBloadidx1 { 22796 break 22797 } 22798 i0 := x0.AuxInt 22799 if x0.Aux != s { 22800 break 22801 } 22802 if idx != x0.Args[0] { 22803 break 22804 } 22805 if p != x0.Args[1] { 22806 break 22807 } 22808 if mem != x0.Args[2] { 22809 break 22810 } 22811 if !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 22812 break 22813 } 22814 b = mergePoint(b, x0, x1) 22815 v0 := b.NewValue0(v.Pos, OpAMD64ROLWconst, v.Type) 22816 v.reset(OpCopy) 22817 v.AddArg(v0) 22818 v0.AuxInt = 8 22819 v1 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, types.UInt16) 22820 v1.AuxInt = i0 22821 v1.Aux = s 22822 v1.AddArg(p) 22823 v1.AddArg(idx) 22824 v1.AddArg(mem) 22825 v0.AddArg(v1) 22826 return true 22827 } 22828 // match: (ORQ sh:(SHLQconst [8] x0:(MOVBloadidx1 [i0] {s} p idx mem)) x1:(MOVBloadidx1 [i1] {s} p idx mem)) 22829 // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 22830 // result: @mergePoint(b,x0,x1) (ROLWconst <v.Type> [8] (MOVWloadidx1 [i0] {s} p idx mem)) 22831 for { 22832 sh := v.Args[0] 22833 if sh.Op != OpAMD64SHLQconst { 22834 break 22835 } 22836 if sh.AuxInt != 8 { 22837 break 22838 } 22839 x0 := sh.Args[0] 22840 if x0.Op != OpAMD64MOVBloadidx1 { 22841 break 22842 } 22843 i0 := x0.AuxInt 22844 s := x0.Aux 22845 p := x0.Args[0] 22846 idx := x0.Args[1] 22847 mem := x0.Args[2] 22848 x1 := v.Args[1] 22849 if x1.Op != OpAMD64MOVBloadidx1 { 22850 break 22851 } 22852 i1 := x1.AuxInt 22853 if x1.Aux != s { 22854 break 22855 } 22856 if p != x1.Args[0] { 22857 break 22858 } 22859 if idx != x1.Args[1] { 22860 break 22861 } 22862 if mem != x1.Args[2] { 22863 break 22864 } 22865 if !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 22866 break 22867 } 22868 b = mergePoint(b, x0, x1) 22869 v0 := b.NewValue0(v.Pos, OpAMD64ROLWconst, v.Type) 22870 v.reset(OpCopy) 22871 v.AddArg(v0) 22872 v0.AuxInt = 8 22873 v1 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, types.UInt16) 22874 v1.AuxInt = i0 22875 v1.Aux = s 22876 v1.AddArg(p) 22877 v1.AddArg(idx) 22878 v1.AddArg(mem) 22879 v0.AddArg(v1) 22880 return true 22881 } 22882 // match: (ORQ sh:(SHLQconst [8] x0:(MOVBloadidx1 [i0] {s} idx p mem)) x1:(MOVBloadidx1 [i1] {s} p idx mem)) 22883 // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 22884 // result: @mergePoint(b,x0,x1) (ROLWconst <v.Type> [8] (MOVWloadidx1 [i0] {s} p idx mem)) 22885 for { 22886 sh := v.Args[0] 22887 if sh.Op != OpAMD64SHLQconst { 22888 break 22889 } 22890 if sh.AuxInt != 8 { 22891 break 22892 } 22893 x0 := sh.Args[0] 22894 if x0.Op != OpAMD64MOVBloadidx1 { 22895 break 22896 } 22897 i0 := x0.AuxInt 22898 s := x0.Aux 22899 idx := x0.Args[0] 22900 p := x0.Args[1] 22901 mem := x0.Args[2] 22902 x1 := v.Args[1] 22903 if x1.Op != OpAMD64MOVBloadidx1 { 22904 break 22905 } 22906 i1 := x1.AuxInt 22907 if x1.Aux != s { 22908 break 22909 } 22910 if p != x1.Args[0] { 22911 break 22912 } 22913 if idx != x1.Args[1] { 22914 break 22915 } 22916 if mem != x1.Args[2] { 22917 break 22918 } 22919 if !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 22920 break 22921 } 22922 b = mergePoint(b, x0, x1) 22923 v0 := b.NewValue0(v.Pos, OpAMD64ROLWconst, v.Type) 22924 v.reset(OpCopy) 22925 v.AddArg(v0) 22926 v0.AuxInt = 8 22927 v1 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, types.UInt16) 22928 v1.AuxInt = i0 22929 v1.Aux = s 22930 v1.AddArg(p) 22931 v1.AddArg(idx) 22932 v1.AddArg(mem) 22933 v0.AddArg(v1) 22934 return true 22935 } 22936 // match: (ORQ sh:(SHLQconst [8] x0:(MOVBloadidx1 [i0] {s} p idx mem)) x1:(MOVBloadidx1 [i1] {s} idx p mem)) 22937 // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 22938 // result: @mergePoint(b,x0,x1) (ROLWconst <v.Type> [8] (MOVWloadidx1 [i0] {s} p idx mem)) 22939 for { 22940 sh := v.Args[0] 22941 if sh.Op != OpAMD64SHLQconst { 22942 break 22943 } 22944 if sh.AuxInt != 8 { 22945 break 22946 } 22947 x0 := sh.Args[0] 22948 if x0.Op != OpAMD64MOVBloadidx1 { 22949 break 22950 } 22951 i0 := x0.AuxInt 22952 s := x0.Aux 22953 p := x0.Args[0] 22954 idx := x0.Args[1] 22955 mem := x0.Args[2] 22956 x1 := v.Args[1] 22957 if x1.Op != OpAMD64MOVBloadidx1 { 22958 break 22959 } 22960 i1 := x1.AuxInt 22961 if x1.Aux != s { 22962 break 22963 } 22964 if idx != x1.Args[0] { 22965 break 22966 } 22967 if p != x1.Args[1] { 22968 break 22969 } 22970 if mem != x1.Args[2] { 22971 break 22972 } 22973 if !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 22974 break 22975 } 22976 b = mergePoint(b, x0, x1) 22977 v0 := b.NewValue0(v.Pos, OpAMD64ROLWconst, v.Type) 22978 v.reset(OpCopy) 22979 v.AddArg(v0) 22980 v0.AuxInt = 8 22981 v1 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, types.UInt16) 22982 v1.AuxInt = i0 22983 v1.Aux = s 22984 v1.AddArg(p) 22985 v1.AddArg(idx) 22986 v1.AddArg(mem) 22987 v0.AddArg(v1) 22988 return true 22989 } 22990 // match: (ORQ sh:(SHLQconst [8] x0:(MOVBloadidx1 [i0] {s} idx p mem)) x1:(MOVBloadidx1 [i1] {s} idx p mem)) 22991 // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 22992 // result: @mergePoint(b,x0,x1) (ROLWconst <v.Type> [8] (MOVWloadidx1 [i0] {s} p idx mem)) 22993 for { 22994 sh := v.Args[0] 22995 if sh.Op != OpAMD64SHLQconst { 22996 break 22997 } 22998 if sh.AuxInt != 8 { 22999 break 23000 } 23001 x0 := sh.Args[0] 23002 if x0.Op != OpAMD64MOVBloadidx1 { 23003 break 23004 } 23005 i0 := x0.AuxInt 23006 s := x0.Aux 23007 idx := x0.Args[0] 23008 p := x0.Args[1] 23009 mem := x0.Args[2] 23010 x1 := v.Args[1] 23011 if x1.Op != OpAMD64MOVBloadidx1 { 23012 break 23013 } 23014 i1 := x1.AuxInt 23015 if x1.Aux != s { 23016 break 23017 } 23018 if idx != x1.Args[0] { 23019 break 23020 } 23021 if p != x1.Args[1] { 23022 break 23023 } 23024 if mem != x1.Args[2] { 23025 break 23026 } 23027 if !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 23028 break 23029 } 23030 b = mergePoint(b, x0, x1) 23031 v0 := b.NewValue0(v.Pos, OpAMD64ROLWconst, v.Type) 23032 v.reset(OpCopy) 23033 v.AddArg(v0) 23034 v0.AuxInt = 8 23035 v1 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, types.UInt16) 23036 v1.AuxInt = i0 23037 v1.Aux = s 23038 v1.AddArg(p) 23039 v1.AddArg(idx) 23040 v1.AddArg(mem) 23041 v0.AddArg(v1) 23042 return true 23043 } 23044 // match: (ORQ r1:(ROLWconst [8] x1:(MOVWloadidx1 [i1] {s} p idx mem)) sh:(SHLQconst [16] r0:(ROLWconst [8] x0:(MOVWloadidx1 [i0] {s} p idx mem)))) 23045 // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh) 23046 // result: @mergePoint(b,x0,x1) (BSWAPL <v.Type> (MOVLloadidx1 [i0] {s} p idx mem)) 23047 for { 23048 r1 := v.Args[0] 23049 if r1.Op != OpAMD64ROLWconst { 23050 break 23051 } 23052 if r1.AuxInt != 8 { 23053 break 23054 } 23055 x1 := r1.Args[0] 23056 if x1.Op != OpAMD64MOVWloadidx1 { 23057 break 23058 } 23059 i1 := x1.AuxInt 23060 s := x1.Aux 23061 p := x1.Args[0] 23062 idx := x1.Args[1] 23063 mem := x1.Args[2] 23064 sh := v.Args[1] 23065 if sh.Op != OpAMD64SHLQconst { 23066 break 23067 } 23068 if sh.AuxInt != 16 { 23069 break 23070 } 23071 r0 := sh.Args[0] 23072 if r0.Op != OpAMD64ROLWconst { 23073 break 23074 } 23075 if r0.AuxInt != 8 { 23076 break 23077 } 23078 x0 := r0.Args[0] 23079 if x0.Op != OpAMD64MOVWloadidx1 { 23080 break 23081 } 23082 i0 := x0.AuxInt 23083 if x0.Aux != s { 23084 break 23085 } 23086 if p != x0.Args[0] { 23087 break 23088 } 23089 if idx != x0.Args[1] { 23090 break 23091 } 23092 if mem != x0.Args[2] { 23093 break 23094 } 23095 if !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh)) { 23096 break 23097 } 23098 b = mergePoint(b, x0, x1) 23099 v0 := b.NewValue0(v.Pos, OpAMD64BSWAPL, v.Type) 23100 v.reset(OpCopy) 23101 v.AddArg(v0) 23102 v1 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, types.UInt32) 23103 v1.AuxInt = i0 23104 v1.Aux = s 23105 v1.AddArg(p) 23106 v1.AddArg(idx) 23107 v1.AddArg(mem) 23108 v0.AddArg(v1) 23109 return true 23110 } 23111 // match: (ORQ r1:(ROLWconst [8] x1:(MOVWloadidx1 [i1] {s} idx p mem)) sh:(SHLQconst [16] r0:(ROLWconst [8] x0:(MOVWloadidx1 [i0] {s} p idx mem)))) 23112 // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh) 23113 // result: @mergePoint(b,x0,x1) (BSWAPL <v.Type> (MOVLloadidx1 [i0] {s} p idx mem)) 23114 for { 23115 r1 := v.Args[0] 23116 if r1.Op != OpAMD64ROLWconst { 23117 break 23118 } 23119 if r1.AuxInt != 8 { 23120 break 23121 } 23122 x1 := r1.Args[0] 23123 if x1.Op != OpAMD64MOVWloadidx1 { 23124 break 23125 } 23126 i1 := x1.AuxInt 23127 s := x1.Aux 23128 idx := x1.Args[0] 23129 p := x1.Args[1] 23130 mem := x1.Args[2] 23131 sh := v.Args[1] 23132 if sh.Op != OpAMD64SHLQconst { 23133 break 23134 } 23135 if sh.AuxInt != 16 { 23136 break 23137 } 23138 r0 := sh.Args[0] 23139 if r0.Op != OpAMD64ROLWconst { 23140 break 23141 } 23142 if r0.AuxInt != 8 { 23143 break 23144 } 23145 x0 := r0.Args[0] 23146 if x0.Op != OpAMD64MOVWloadidx1 { 23147 break 23148 } 23149 i0 := x0.AuxInt 23150 if x0.Aux != s { 23151 break 23152 } 23153 if p != x0.Args[0] { 23154 break 23155 } 23156 if idx != x0.Args[1] { 23157 break 23158 } 23159 if mem != x0.Args[2] { 23160 break 23161 } 23162 if !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh)) { 23163 break 23164 } 23165 b = mergePoint(b, x0, x1) 23166 v0 := b.NewValue0(v.Pos, OpAMD64BSWAPL, v.Type) 23167 v.reset(OpCopy) 23168 v.AddArg(v0) 23169 v1 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, types.UInt32) 23170 v1.AuxInt = i0 23171 v1.Aux = s 23172 v1.AddArg(p) 23173 v1.AddArg(idx) 23174 v1.AddArg(mem) 23175 v0.AddArg(v1) 23176 return true 23177 } 23178 // match: (ORQ r1:(ROLWconst [8] x1:(MOVWloadidx1 [i1] {s} p idx mem)) sh:(SHLQconst [16] r0:(ROLWconst [8] x0:(MOVWloadidx1 [i0] {s} idx p mem)))) 23179 // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh) 23180 // result: @mergePoint(b,x0,x1) (BSWAPL <v.Type> (MOVLloadidx1 [i0] {s} p idx mem)) 23181 for { 23182 r1 := v.Args[0] 23183 if r1.Op != OpAMD64ROLWconst { 23184 break 23185 } 23186 if r1.AuxInt != 8 { 23187 break 23188 } 23189 x1 := r1.Args[0] 23190 if x1.Op != OpAMD64MOVWloadidx1 { 23191 break 23192 } 23193 i1 := x1.AuxInt 23194 s := x1.Aux 23195 p := x1.Args[0] 23196 idx := x1.Args[1] 23197 mem := x1.Args[2] 23198 sh := v.Args[1] 23199 if sh.Op != OpAMD64SHLQconst { 23200 break 23201 } 23202 if sh.AuxInt != 16 { 23203 break 23204 } 23205 r0 := sh.Args[0] 23206 if r0.Op != OpAMD64ROLWconst { 23207 break 23208 } 23209 if r0.AuxInt != 8 { 23210 break 23211 } 23212 x0 := r0.Args[0] 23213 if x0.Op != OpAMD64MOVWloadidx1 { 23214 break 23215 } 23216 i0 := x0.AuxInt 23217 if x0.Aux != s { 23218 break 23219 } 23220 if idx != x0.Args[0] { 23221 break 23222 } 23223 if p != x0.Args[1] { 23224 break 23225 } 23226 if mem != x0.Args[2] { 23227 break 23228 } 23229 if !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh)) { 23230 break 23231 } 23232 b = mergePoint(b, x0, x1) 23233 v0 := b.NewValue0(v.Pos, OpAMD64BSWAPL, v.Type) 23234 v.reset(OpCopy) 23235 v.AddArg(v0) 23236 v1 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, types.UInt32) 23237 v1.AuxInt = i0 23238 v1.Aux = s 23239 v1.AddArg(p) 23240 v1.AddArg(idx) 23241 v1.AddArg(mem) 23242 v0.AddArg(v1) 23243 return true 23244 } 23245 // match: (ORQ r1:(ROLWconst [8] x1:(MOVWloadidx1 [i1] {s} idx p mem)) sh:(SHLQconst [16] r0:(ROLWconst [8] x0:(MOVWloadidx1 [i0] {s} idx p mem)))) 23246 // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh) 23247 // result: @mergePoint(b,x0,x1) (BSWAPL <v.Type> (MOVLloadidx1 [i0] {s} p idx mem)) 23248 for { 23249 r1 := v.Args[0] 23250 if r1.Op != OpAMD64ROLWconst { 23251 break 23252 } 23253 if r1.AuxInt != 8 { 23254 break 23255 } 23256 x1 := r1.Args[0] 23257 if x1.Op != OpAMD64MOVWloadidx1 { 23258 break 23259 } 23260 i1 := x1.AuxInt 23261 s := x1.Aux 23262 idx := x1.Args[0] 23263 p := x1.Args[1] 23264 mem := x1.Args[2] 23265 sh := v.Args[1] 23266 if sh.Op != OpAMD64SHLQconst { 23267 break 23268 } 23269 if sh.AuxInt != 16 { 23270 break 23271 } 23272 r0 := sh.Args[0] 23273 if r0.Op != OpAMD64ROLWconst { 23274 break 23275 } 23276 if r0.AuxInt != 8 { 23277 break 23278 } 23279 x0 := r0.Args[0] 23280 if x0.Op != OpAMD64MOVWloadidx1 { 23281 break 23282 } 23283 i0 := x0.AuxInt 23284 if x0.Aux != s { 23285 break 23286 } 23287 if idx != x0.Args[0] { 23288 break 23289 } 23290 if p != x0.Args[1] { 23291 break 23292 } 23293 if mem != x0.Args[2] { 23294 break 23295 } 23296 if !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh)) { 23297 break 23298 } 23299 b = mergePoint(b, x0, x1) 23300 v0 := b.NewValue0(v.Pos, OpAMD64BSWAPL, v.Type) 23301 v.reset(OpCopy) 23302 v.AddArg(v0) 23303 v1 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, types.UInt32) 23304 v1.AuxInt = i0 23305 v1.Aux = s 23306 v1.AddArg(p) 23307 v1.AddArg(idx) 23308 v1.AddArg(mem) 23309 v0.AddArg(v1) 23310 return true 23311 } 23312 // match: (ORQ sh:(SHLQconst [16] r0:(ROLWconst [8] x0:(MOVWloadidx1 [i0] {s} p idx mem))) r1:(ROLWconst [8] x1:(MOVWloadidx1 [i1] {s} p idx mem))) 23313 // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh) 23314 // result: @mergePoint(b,x0,x1) (BSWAPL <v.Type> (MOVLloadidx1 [i0] {s} p idx mem)) 23315 for { 23316 sh := v.Args[0] 23317 if sh.Op != OpAMD64SHLQconst { 23318 break 23319 } 23320 if sh.AuxInt != 16 { 23321 break 23322 } 23323 r0 := sh.Args[0] 23324 if r0.Op != OpAMD64ROLWconst { 23325 break 23326 } 23327 if r0.AuxInt != 8 { 23328 break 23329 } 23330 x0 := r0.Args[0] 23331 if x0.Op != OpAMD64MOVWloadidx1 { 23332 break 23333 } 23334 i0 := x0.AuxInt 23335 s := x0.Aux 23336 p := x0.Args[0] 23337 idx := x0.Args[1] 23338 mem := x0.Args[2] 23339 r1 := v.Args[1] 23340 if r1.Op != OpAMD64ROLWconst { 23341 break 23342 } 23343 if r1.AuxInt != 8 { 23344 break 23345 } 23346 x1 := r1.Args[0] 23347 if x1.Op != OpAMD64MOVWloadidx1 { 23348 break 23349 } 23350 i1 := x1.AuxInt 23351 if x1.Aux != s { 23352 break 23353 } 23354 if p != x1.Args[0] { 23355 break 23356 } 23357 if idx != x1.Args[1] { 23358 break 23359 } 23360 if mem != x1.Args[2] { 23361 break 23362 } 23363 if !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh)) { 23364 break 23365 } 23366 b = mergePoint(b, x0, x1) 23367 v0 := b.NewValue0(v.Pos, OpAMD64BSWAPL, v.Type) 23368 v.reset(OpCopy) 23369 v.AddArg(v0) 23370 v1 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, types.UInt32) 23371 v1.AuxInt = i0 23372 v1.Aux = s 23373 v1.AddArg(p) 23374 v1.AddArg(idx) 23375 v1.AddArg(mem) 23376 v0.AddArg(v1) 23377 return true 23378 } 23379 // match: (ORQ sh:(SHLQconst [16] r0:(ROLWconst [8] x0:(MOVWloadidx1 [i0] {s} idx p mem))) r1:(ROLWconst [8] x1:(MOVWloadidx1 [i1] {s} p idx mem))) 23380 // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh) 23381 // result: @mergePoint(b,x0,x1) (BSWAPL <v.Type> (MOVLloadidx1 [i0] {s} p idx mem)) 23382 for { 23383 sh := v.Args[0] 23384 if sh.Op != OpAMD64SHLQconst { 23385 break 23386 } 23387 if sh.AuxInt != 16 { 23388 break 23389 } 23390 r0 := sh.Args[0] 23391 if r0.Op != OpAMD64ROLWconst { 23392 break 23393 } 23394 if r0.AuxInt != 8 { 23395 break 23396 } 23397 x0 := r0.Args[0] 23398 if x0.Op != OpAMD64MOVWloadidx1 { 23399 break 23400 } 23401 i0 := x0.AuxInt 23402 s := x0.Aux 23403 idx := x0.Args[0] 23404 p := x0.Args[1] 23405 mem := x0.Args[2] 23406 r1 := v.Args[1] 23407 if r1.Op != OpAMD64ROLWconst { 23408 break 23409 } 23410 if r1.AuxInt != 8 { 23411 break 23412 } 23413 x1 := r1.Args[0] 23414 if x1.Op != OpAMD64MOVWloadidx1 { 23415 break 23416 } 23417 i1 := x1.AuxInt 23418 if x1.Aux != s { 23419 break 23420 } 23421 if p != x1.Args[0] { 23422 break 23423 } 23424 if idx != x1.Args[1] { 23425 break 23426 } 23427 if mem != x1.Args[2] { 23428 break 23429 } 23430 if !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh)) { 23431 break 23432 } 23433 b = mergePoint(b, x0, x1) 23434 v0 := b.NewValue0(v.Pos, OpAMD64BSWAPL, v.Type) 23435 v.reset(OpCopy) 23436 v.AddArg(v0) 23437 v1 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, types.UInt32) 23438 v1.AuxInt = i0 23439 v1.Aux = s 23440 v1.AddArg(p) 23441 v1.AddArg(idx) 23442 v1.AddArg(mem) 23443 v0.AddArg(v1) 23444 return true 23445 } 23446 // match: (ORQ sh:(SHLQconst [16] r0:(ROLWconst [8] x0:(MOVWloadidx1 [i0] {s} p idx mem))) r1:(ROLWconst [8] x1:(MOVWloadidx1 [i1] {s} idx p mem))) 23447 // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh) 23448 // result: @mergePoint(b,x0,x1) (BSWAPL <v.Type> (MOVLloadidx1 [i0] {s} p idx mem)) 23449 for { 23450 sh := v.Args[0] 23451 if sh.Op != OpAMD64SHLQconst { 23452 break 23453 } 23454 if sh.AuxInt != 16 { 23455 break 23456 } 23457 r0 := sh.Args[0] 23458 if r0.Op != OpAMD64ROLWconst { 23459 break 23460 } 23461 if r0.AuxInt != 8 { 23462 break 23463 } 23464 x0 := r0.Args[0] 23465 if x0.Op != OpAMD64MOVWloadidx1 { 23466 break 23467 } 23468 i0 := x0.AuxInt 23469 s := x0.Aux 23470 p := x0.Args[0] 23471 idx := x0.Args[1] 23472 mem := x0.Args[2] 23473 r1 := v.Args[1] 23474 if r1.Op != OpAMD64ROLWconst { 23475 break 23476 } 23477 if r1.AuxInt != 8 { 23478 break 23479 } 23480 x1 := r1.Args[0] 23481 if x1.Op != OpAMD64MOVWloadidx1 { 23482 break 23483 } 23484 i1 := x1.AuxInt 23485 if x1.Aux != s { 23486 break 23487 } 23488 if idx != x1.Args[0] { 23489 break 23490 } 23491 if p != x1.Args[1] { 23492 break 23493 } 23494 if mem != x1.Args[2] { 23495 break 23496 } 23497 if !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh)) { 23498 break 23499 } 23500 b = mergePoint(b, x0, x1) 23501 v0 := b.NewValue0(v.Pos, OpAMD64BSWAPL, v.Type) 23502 v.reset(OpCopy) 23503 v.AddArg(v0) 23504 v1 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, types.UInt32) 23505 v1.AuxInt = i0 23506 v1.Aux = s 23507 v1.AddArg(p) 23508 v1.AddArg(idx) 23509 v1.AddArg(mem) 23510 v0.AddArg(v1) 23511 return true 23512 } 23513 // match: (ORQ sh:(SHLQconst [16] r0:(ROLWconst [8] x0:(MOVWloadidx1 [i0] {s} idx p mem))) r1:(ROLWconst [8] x1:(MOVWloadidx1 [i1] {s} idx p mem))) 23514 // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh) 23515 // result: @mergePoint(b,x0,x1) (BSWAPL <v.Type> (MOVLloadidx1 [i0] {s} p idx mem)) 23516 for { 23517 sh := v.Args[0] 23518 if sh.Op != OpAMD64SHLQconst { 23519 break 23520 } 23521 if sh.AuxInt != 16 { 23522 break 23523 } 23524 r0 := sh.Args[0] 23525 if r0.Op != OpAMD64ROLWconst { 23526 break 23527 } 23528 if r0.AuxInt != 8 { 23529 break 23530 } 23531 x0 := r0.Args[0] 23532 if x0.Op != OpAMD64MOVWloadidx1 { 23533 break 23534 } 23535 i0 := x0.AuxInt 23536 s := x0.Aux 23537 idx := x0.Args[0] 23538 p := x0.Args[1] 23539 mem := x0.Args[2] 23540 r1 := v.Args[1] 23541 if r1.Op != OpAMD64ROLWconst { 23542 break 23543 } 23544 if r1.AuxInt != 8 { 23545 break 23546 } 23547 x1 := r1.Args[0] 23548 if x1.Op != OpAMD64MOVWloadidx1 { 23549 break 23550 } 23551 i1 := x1.AuxInt 23552 if x1.Aux != s { 23553 break 23554 } 23555 if idx != x1.Args[0] { 23556 break 23557 } 23558 if p != x1.Args[1] { 23559 break 23560 } 23561 if mem != x1.Args[2] { 23562 break 23563 } 23564 if !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh)) { 23565 break 23566 } 23567 b = mergePoint(b, x0, x1) 23568 v0 := b.NewValue0(v.Pos, OpAMD64BSWAPL, v.Type) 23569 v.reset(OpCopy) 23570 v.AddArg(v0) 23571 v1 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, types.UInt32) 23572 v1.AuxInt = i0 23573 v1.Aux = s 23574 v1.AddArg(p) 23575 v1.AddArg(idx) 23576 v1.AddArg(mem) 23577 v0.AddArg(v1) 23578 return true 23579 } 23580 // match: (ORQ r1:(BSWAPL x1:(MOVLloadidx1 [i1] {s} p idx mem)) sh:(SHLQconst [32] r0:(BSWAPL x0:(MOVLloadidx1 [i0] {s} p idx mem)))) 23581 // cond: i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh) 23582 // result: @mergePoint(b,x0,x1) (BSWAPQ <v.Type> (MOVQloadidx1 [i0] {s} p idx mem)) 23583 for { 23584 r1 := v.Args[0] 23585 if r1.Op != OpAMD64BSWAPL { 23586 break 23587 } 23588 x1 := r1.Args[0] 23589 if x1.Op != OpAMD64MOVLloadidx1 { 23590 break 23591 } 23592 i1 := x1.AuxInt 23593 s := x1.Aux 23594 p := x1.Args[0] 23595 idx := x1.Args[1] 23596 mem := x1.Args[2] 23597 sh := v.Args[1] 23598 if sh.Op != OpAMD64SHLQconst { 23599 break 23600 } 23601 if sh.AuxInt != 32 { 23602 break 23603 } 23604 r0 := sh.Args[0] 23605 if r0.Op != OpAMD64BSWAPL { 23606 break 23607 } 23608 x0 := r0.Args[0] 23609 if x0.Op != OpAMD64MOVLloadidx1 { 23610 break 23611 } 23612 i0 := x0.AuxInt 23613 if x0.Aux != s { 23614 break 23615 } 23616 if p != x0.Args[0] { 23617 break 23618 } 23619 if idx != x0.Args[1] { 23620 break 23621 } 23622 if mem != x0.Args[2] { 23623 break 23624 } 23625 if !(i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh)) { 23626 break 23627 } 23628 b = mergePoint(b, x0, x1) 23629 v0 := b.NewValue0(v.Pos, OpAMD64BSWAPQ, v.Type) 23630 v.reset(OpCopy) 23631 v.AddArg(v0) 23632 v1 := b.NewValue0(v.Pos, OpAMD64MOVQloadidx1, types.UInt64) 23633 v1.AuxInt = i0 23634 v1.Aux = s 23635 v1.AddArg(p) 23636 v1.AddArg(idx) 23637 v1.AddArg(mem) 23638 v0.AddArg(v1) 23639 return true 23640 } 23641 // match: (ORQ r1:(BSWAPL x1:(MOVLloadidx1 [i1] {s} idx p mem)) sh:(SHLQconst [32] r0:(BSWAPL x0:(MOVLloadidx1 [i0] {s} p idx mem)))) 23642 // cond: i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh) 23643 // result: @mergePoint(b,x0,x1) (BSWAPQ <v.Type> (MOVQloadidx1 [i0] {s} p idx mem)) 23644 for { 23645 r1 := v.Args[0] 23646 if r1.Op != OpAMD64BSWAPL { 23647 break 23648 } 23649 x1 := r1.Args[0] 23650 if x1.Op != OpAMD64MOVLloadidx1 { 23651 break 23652 } 23653 i1 := x1.AuxInt 23654 s := x1.Aux 23655 idx := x1.Args[0] 23656 p := x1.Args[1] 23657 mem := x1.Args[2] 23658 sh := v.Args[1] 23659 if sh.Op != OpAMD64SHLQconst { 23660 break 23661 } 23662 if sh.AuxInt != 32 { 23663 break 23664 } 23665 r0 := sh.Args[0] 23666 if r0.Op != OpAMD64BSWAPL { 23667 break 23668 } 23669 x0 := r0.Args[0] 23670 if x0.Op != OpAMD64MOVLloadidx1 { 23671 break 23672 } 23673 i0 := x0.AuxInt 23674 if x0.Aux != s { 23675 break 23676 } 23677 if p != x0.Args[0] { 23678 break 23679 } 23680 if idx != x0.Args[1] { 23681 break 23682 } 23683 if mem != x0.Args[2] { 23684 break 23685 } 23686 if !(i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh)) { 23687 break 23688 } 23689 b = mergePoint(b, x0, x1) 23690 v0 := b.NewValue0(v.Pos, OpAMD64BSWAPQ, v.Type) 23691 v.reset(OpCopy) 23692 v.AddArg(v0) 23693 v1 := b.NewValue0(v.Pos, OpAMD64MOVQloadidx1, types.UInt64) 23694 v1.AuxInt = i0 23695 v1.Aux = s 23696 v1.AddArg(p) 23697 v1.AddArg(idx) 23698 v1.AddArg(mem) 23699 v0.AddArg(v1) 23700 return true 23701 } 23702 // match: (ORQ r1:(BSWAPL x1:(MOVLloadidx1 [i1] {s} p idx mem)) sh:(SHLQconst [32] r0:(BSWAPL x0:(MOVLloadidx1 [i0] {s} idx p mem)))) 23703 // cond: i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh) 23704 // result: @mergePoint(b,x0,x1) (BSWAPQ <v.Type> (MOVQloadidx1 [i0] {s} p idx mem)) 23705 for { 23706 r1 := v.Args[0] 23707 if r1.Op != OpAMD64BSWAPL { 23708 break 23709 } 23710 x1 := r1.Args[0] 23711 if x1.Op != OpAMD64MOVLloadidx1 { 23712 break 23713 } 23714 i1 := x1.AuxInt 23715 s := x1.Aux 23716 p := x1.Args[0] 23717 idx := x1.Args[1] 23718 mem := x1.Args[2] 23719 sh := v.Args[1] 23720 if sh.Op != OpAMD64SHLQconst { 23721 break 23722 } 23723 if sh.AuxInt != 32 { 23724 break 23725 } 23726 r0 := sh.Args[0] 23727 if r0.Op != OpAMD64BSWAPL { 23728 break 23729 } 23730 x0 := r0.Args[0] 23731 if x0.Op != OpAMD64MOVLloadidx1 { 23732 break 23733 } 23734 i0 := x0.AuxInt 23735 if x0.Aux != s { 23736 break 23737 } 23738 if idx != x0.Args[0] { 23739 break 23740 } 23741 if p != x0.Args[1] { 23742 break 23743 } 23744 if mem != x0.Args[2] { 23745 break 23746 } 23747 if !(i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh)) { 23748 break 23749 } 23750 b = mergePoint(b, x0, x1) 23751 v0 := b.NewValue0(v.Pos, OpAMD64BSWAPQ, v.Type) 23752 v.reset(OpCopy) 23753 v.AddArg(v0) 23754 v1 := b.NewValue0(v.Pos, OpAMD64MOVQloadidx1, types.UInt64) 23755 v1.AuxInt = i0 23756 v1.Aux = s 23757 v1.AddArg(p) 23758 v1.AddArg(idx) 23759 v1.AddArg(mem) 23760 v0.AddArg(v1) 23761 return true 23762 } 23763 // match: (ORQ r1:(BSWAPL x1:(MOVLloadidx1 [i1] {s} idx p mem)) sh:(SHLQconst [32] r0:(BSWAPL x0:(MOVLloadidx1 [i0] {s} idx p mem)))) 23764 // cond: i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh) 23765 // result: @mergePoint(b,x0,x1) (BSWAPQ <v.Type> (MOVQloadidx1 [i0] {s} p idx mem)) 23766 for { 23767 r1 := v.Args[0] 23768 if r1.Op != OpAMD64BSWAPL { 23769 break 23770 } 23771 x1 := r1.Args[0] 23772 if x1.Op != OpAMD64MOVLloadidx1 { 23773 break 23774 } 23775 i1 := x1.AuxInt 23776 s := x1.Aux 23777 idx := x1.Args[0] 23778 p := x1.Args[1] 23779 mem := x1.Args[2] 23780 sh := v.Args[1] 23781 if sh.Op != OpAMD64SHLQconst { 23782 break 23783 } 23784 if sh.AuxInt != 32 { 23785 break 23786 } 23787 r0 := sh.Args[0] 23788 if r0.Op != OpAMD64BSWAPL { 23789 break 23790 } 23791 x0 := r0.Args[0] 23792 if x0.Op != OpAMD64MOVLloadidx1 { 23793 break 23794 } 23795 i0 := x0.AuxInt 23796 if x0.Aux != s { 23797 break 23798 } 23799 if idx != x0.Args[0] { 23800 break 23801 } 23802 if p != x0.Args[1] { 23803 break 23804 } 23805 if mem != x0.Args[2] { 23806 break 23807 } 23808 if !(i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh)) { 23809 break 23810 } 23811 b = mergePoint(b, x0, x1) 23812 v0 := b.NewValue0(v.Pos, OpAMD64BSWAPQ, v.Type) 23813 v.reset(OpCopy) 23814 v.AddArg(v0) 23815 v1 := b.NewValue0(v.Pos, OpAMD64MOVQloadidx1, types.UInt64) 23816 v1.AuxInt = i0 23817 v1.Aux = s 23818 v1.AddArg(p) 23819 v1.AddArg(idx) 23820 v1.AddArg(mem) 23821 v0.AddArg(v1) 23822 return true 23823 } 23824 // match: (ORQ sh:(SHLQconst [32] r0:(BSWAPL x0:(MOVLloadidx1 [i0] {s} p idx mem))) r1:(BSWAPL x1:(MOVLloadidx1 [i1] {s} p idx mem))) 23825 // cond: i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh) 23826 // result: @mergePoint(b,x0,x1) (BSWAPQ <v.Type> (MOVQloadidx1 [i0] {s} p idx mem)) 23827 for { 23828 sh := v.Args[0] 23829 if sh.Op != OpAMD64SHLQconst { 23830 break 23831 } 23832 if sh.AuxInt != 32 { 23833 break 23834 } 23835 r0 := sh.Args[0] 23836 if r0.Op != OpAMD64BSWAPL { 23837 break 23838 } 23839 x0 := r0.Args[0] 23840 if x0.Op != OpAMD64MOVLloadidx1 { 23841 break 23842 } 23843 i0 := x0.AuxInt 23844 s := x0.Aux 23845 p := x0.Args[0] 23846 idx := x0.Args[1] 23847 mem := x0.Args[2] 23848 r1 := v.Args[1] 23849 if r1.Op != OpAMD64BSWAPL { 23850 break 23851 } 23852 x1 := r1.Args[0] 23853 if x1.Op != OpAMD64MOVLloadidx1 { 23854 break 23855 } 23856 i1 := x1.AuxInt 23857 if x1.Aux != s { 23858 break 23859 } 23860 if p != x1.Args[0] { 23861 break 23862 } 23863 if idx != x1.Args[1] { 23864 break 23865 } 23866 if mem != x1.Args[2] { 23867 break 23868 } 23869 if !(i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh)) { 23870 break 23871 } 23872 b = mergePoint(b, x0, x1) 23873 v0 := b.NewValue0(v.Pos, OpAMD64BSWAPQ, v.Type) 23874 v.reset(OpCopy) 23875 v.AddArg(v0) 23876 v1 := b.NewValue0(v.Pos, OpAMD64MOVQloadidx1, types.UInt64) 23877 v1.AuxInt = i0 23878 v1.Aux = s 23879 v1.AddArg(p) 23880 v1.AddArg(idx) 23881 v1.AddArg(mem) 23882 v0.AddArg(v1) 23883 return true 23884 } 23885 // match: (ORQ sh:(SHLQconst [32] r0:(BSWAPL x0:(MOVLloadidx1 [i0] {s} idx p mem))) r1:(BSWAPL x1:(MOVLloadidx1 [i1] {s} p idx mem))) 23886 // cond: i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh) 23887 // result: @mergePoint(b,x0,x1) (BSWAPQ <v.Type> (MOVQloadidx1 [i0] {s} p idx mem)) 23888 for { 23889 sh := v.Args[0] 23890 if sh.Op != OpAMD64SHLQconst { 23891 break 23892 } 23893 if sh.AuxInt != 32 { 23894 break 23895 } 23896 r0 := sh.Args[0] 23897 if r0.Op != OpAMD64BSWAPL { 23898 break 23899 } 23900 x0 := r0.Args[0] 23901 if x0.Op != OpAMD64MOVLloadidx1 { 23902 break 23903 } 23904 i0 := x0.AuxInt 23905 s := x0.Aux 23906 idx := x0.Args[0] 23907 p := x0.Args[1] 23908 mem := x0.Args[2] 23909 r1 := v.Args[1] 23910 if r1.Op != OpAMD64BSWAPL { 23911 break 23912 } 23913 x1 := r1.Args[0] 23914 if x1.Op != OpAMD64MOVLloadidx1 { 23915 break 23916 } 23917 i1 := x1.AuxInt 23918 if x1.Aux != s { 23919 break 23920 } 23921 if p != x1.Args[0] { 23922 break 23923 } 23924 if idx != x1.Args[1] { 23925 break 23926 } 23927 if mem != x1.Args[2] { 23928 break 23929 } 23930 if !(i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh)) { 23931 break 23932 } 23933 b = mergePoint(b, x0, x1) 23934 v0 := b.NewValue0(v.Pos, OpAMD64BSWAPQ, v.Type) 23935 v.reset(OpCopy) 23936 v.AddArg(v0) 23937 v1 := b.NewValue0(v.Pos, OpAMD64MOVQloadidx1, types.UInt64) 23938 v1.AuxInt = i0 23939 v1.Aux = s 23940 v1.AddArg(p) 23941 v1.AddArg(idx) 23942 v1.AddArg(mem) 23943 v0.AddArg(v1) 23944 return true 23945 } 23946 // match: (ORQ sh:(SHLQconst [32] r0:(BSWAPL x0:(MOVLloadidx1 [i0] {s} p idx mem))) r1:(BSWAPL x1:(MOVLloadidx1 [i1] {s} idx p mem))) 23947 // cond: i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh) 23948 // result: @mergePoint(b,x0,x1) (BSWAPQ <v.Type> (MOVQloadidx1 [i0] {s} p idx mem)) 23949 for { 23950 sh := v.Args[0] 23951 if sh.Op != OpAMD64SHLQconst { 23952 break 23953 } 23954 if sh.AuxInt != 32 { 23955 break 23956 } 23957 r0 := sh.Args[0] 23958 if r0.Op != OpAMD64BSWAPL { 23959 break 23960 } 23961 x0 := r0.Args[0] 23962 if x0.Op != OpAMD64MOVLloadidx1 { 23963 break 23964 } 23965 i0 := x0.AuxInt 23966 s := x0.Aux 23967 p := x0.Args[0] 23968 idx := x0.Args[1] 23969 mem := x0.Args[2] 23970 r1 := v.Args[1] 23971 if r1.Op != OpAMD64BSWAPL { 23972 break 23973 } 23974 x1 := r1.Args[0] 23975 if x1.Op != OpAMD64MOVLloadidx1 { 23976 break 23977 } 23978 i1 := x1.AuxInt 23979 if x1.Aux != s { 23980 break 23981 } 23982 if idx != x1.Args[0] { 23983 break 23984 } 23985 if p != x1.Args[1] { 23986 break 23987 } 23988 if mem != x1.Args[2] { 23989 break 23990 } 23991 if !(i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh)) { 23992 break 23993 } 23994 b = mergePoint(b, x0, x1) 23995 v0 := b.NewValue0(v.Pos, OpAMD64BSWAPQ, v.Type) 23996 v.reset(OpCopy) 23997 v.AddArg(v0) 23998 v1 := b.NewValue0(v.Pos, OpAMD64MOVQloadidx1, types.UInt64) 23999 v1.AuxInt = i0 24000 v1.Aux = s 24001 v1.AddArg(p) 24002 v1.AddArg(idx) 24003 v1.AddArg(mem) 24004 v0.AddArg(v1) 24005 return true 24006 } 24007 // match: (ORQ sh:(SHLQconst [32] r0:(BSWAPL x0:(MOVLloadidx1 [i0] {s} idx p mem))) r1:(BSWAPL x1:(MOVLloadidx1 [i1] {s} idx p mem))) 24008 // cond: i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh) 24009 // result: @mergePoint(b,x0,x1) (BSWAPQ <v.Type> (MOVQloadidx1 [i0] {s} p idx mem)) 24010 for { 24011 sh := v.Args[0] 24012 if sh.Op != OpAMD64SHLQconst { 24013 break 24014 } 24015 if sh.AuxInt != 32 { 24016 break 24017 } 24018 r0 := sh.Args[0] 24019 if r0.Op != OpAMD64BSWAPL { 24020 break 24021 } 24022 x0 := r0.Args[0] 24023 if x0.Op != OpAMD64MOVLloadidx1 { 24024 break 24025 } 24026 i0 := x0.AuxInt 24027 s := x0.Aux 24028 idx := x0.Args[0] 24029 p := x0.Args[1] 24030 mem := x0.Args[2] 24031 r1 := v.Args[1] 24032 if r1.Op != OpAMD64BSWAPL { 24033 break 24034 } 24035 x1 := r1.Args[0] 24036 if x1.Op != OpAMD64MOVLloadidx1 { 24037 break 24038 } 24039 i1 := x1.AuxInt 24040 if x1.Aux != s { 24041 break 24042 } 24043 if idx != x1.Args[0] { 24044 break 24045 } 24046 if p != x1.Args[1] { 24047 break 24048 } 24049 if mem != x1.Args[2] { 24050 break 24051 } 24052 if !(i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh)) { 24053 break 24054 } 24055 b = mergePoint(b, x0, x1) 24056 v0 := b.NewValue0(v.Pos, OpAMD64BSWAPQ, v.Type) 24057 v.reset(OpCopy) 24058 v.AddArg(v0) 24059 v1 := b.NewValue0(v.Pos, OpAMD64MOVQloadidx1, types.UInt64) 24060 v1.AuxInt = i0 24061 v1.Aux = s 24062 v1.AddArg(p) 24063 v1.AddArg(idx) 24064 v1.AddArg(mem) 24065 v0.AddArg(v1) 24066 return true 24067 } 24068 // match: (ORQ s0:(SHLQconst [j0] x0:(MOVBloadidx1 [i0] {s} p idx mem)) or:(ORQ s1:(SHLQconst [j1] x1:(MOVBloadidx1 [i1] {s} p idx mem)) y)) 24069 // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 24070 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (ROLWconst <types.UInt16> [8] (MOVWloadidx1 [i0] {s} p idx mem))) y) 24071 for { 24072 s0 := v.Args[0] 24073 if s0.Op != OpAMD64SHLQconst { 24074 break 24075 } 24076 j0 := s0.AuxInt 24077 x0 := s0.Args[0] 24078 if x0.Op != OpAMD64MOVBloadidx1 { 24079 break 24080 } 24081 i0 := x0.AuxInt 24082 s := x0.Aux 24083 p := x0.Args[0] 24084 idx := x0.Args[1] 24085 mem := x0.Args[2] 24086 or := v.Args[1] 24087 if or.Op != OpAMD64ORQ { 24088 break 24089 } 24090 s1 := or.Args[0] 24091 if s1.Op != OpAMD64SHLQconst { 24092 break 24093 } 24094 j1 := s1.AuxInt 24095 x1 := s1.Args[0] 24096 if x1.Op != OpAMD64MOVBloadidx1 { 24097 break 24098 } 24099 i1 := x1.AuxInt 24100 if x1.Aux != s { 24101 break 24102 } 24103 if p != x1.Args[0] { 24104 break 24105 } 24106 if idx != x1.Args[1] { 24107 break 24108 } 24109 if mem != x1.Args[2] { 24110 break 24111 } 24112 y := or.Args[1] 24113 if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 24114 break 24115 } 24116 b = mergePoint(b, x0, x1) 24117 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 24118 v.reset(OpCopy) 24119 v.AddArg(v0) 24120 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 24121 v1.AuxInt = j1 24122 v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, types.UInt16) 24123 v2.AuxInt = 8 24124 v3 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, types.UInt16) 24125 v3.AuxInt = i0 24126 v3.Aux = s 24127 v3.AddArg(p) 24128 v3.AddArg(idx) 24129 v3.AddArg(mem) 24130 v2.AddArg(v3) 24131 v1.AddArg(v2) 24132 v0.AddArg(v1) 24133 v0.AddArg(y) 24134 return true 24135 } 24136 // match: (ORQ s0:(SHLQconst [j0] x0:(MOVBloadidx1 [i0] {s} idx p mem)) or:(ORQ s1:(SHLQconst [j1] x1:(MOVBloadidx1 [i1] {s} p idx mem)) y)) 24137 // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 24138 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (ROLWconst <types.UInt16> [8] (MOVWloadidx1 [i0] {s} p idx mem))) y) 24139 for { 24140 s0 := v.Args[0] 24141 if s0.Op != OpAMD64SHLQconst { 24142 break 24143 } 24144 j0 := s0.AuxInt 24145 x0 := s0.Args[0] 24146 if x0.Op != OpAMD64MOVBloadidx1 { 24147 break 24148 } 24149 i0 := x0.AuxInt 24150 s := x0.Aux 24151 idx := x0.Args[0] 24152 p := x0.Args[1] 24153 mem := x0.Args[2] 24154 or := v.Args[1] 24155 if or.Op != OpAMD64ORQ { 24156 break 24157 } 24158 s1 := or.Args[0] 24159 if s1.Op != OpAMD64SHLQconst { 24160 break 24161 } 24162 j1 := s1.AuxInt 24163 x1 := s1.Args[0] 24164 if x1.Op != OpAMD64MOVBloadidx1 { 24165 break 24166 } 24167 i1 := x1.AuxInt 24168 if x1.Aux != s { 24169 break 24170 } 24171 if p != x1.Args[0] { 24172 break 24173 } 24174 if idx != x1.Args[1] { 24175 break 24176 } 24177 if mem != x1.Args[2] { 24178 break 24179 } 24180 y := or.Args[1] 24181 if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 24182 break 24183 } 24184 b = mergePoint(b, x0, x1) 24185 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 24186 v.reset(OpCopy) 24187 v.AddArg(v0) 24188 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 24189 v1.AuxInt = j1 24190 v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, types.UInt16) 24191 v2.AuxInt = 8 24192 v3 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, types.UInt16) 24193 v3.AuxInt = i0 24194 v3.Aux = s 24195 v3.AddArg(p) 24196 v3.AddArg(idx) 24197 v3.AddArg(mem) 24198 v2.AddArg(v3) 24199 v1.AddArg(v2) 24200 v0.AddArg(v1) 24201 v0.AddArg(y) 24202 return true 24203 } 24204 // match: (ORQ s0:(SHLQconst [j0] x0:(MOVBloadidx1 [i0] {s} p idx mem)) or:(ORQ s1:(SHLQconst [j1] x1:(MOVBloadidx1 [i1] {s} idx p mem)) y)) 24205 // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 24206 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (ROLWconst <types.UInt16> [8] (MOVWloadidx1 [i0] {s} p idx mem))) y) 24207 for { 24208 s0 := v.Args[0] 24209 if s0.Op != OpAMD64SHLQconst { 24210 break 24211 } 24212 j0 := s0.AuxInt 24213 x0 := s0.Args[0] 24214 if x0.Op != OpAMD64MOVBloadidx1 { 24215 break 24216 } 24217 i0 := x0.AuxInt 24218 s := x0.Aux 24219 p := x0.Args[0] 24220 idx := x0.Args[1] 24221 mem := x0.Args[2] 24222 or := v.Args[1] 24223 if or.Op != OpAMD64ORQ { 24224 break 24225 } 24226 s1 := or.Args[0] 24227 if s1.Op != OpAMD64SHLQconst { 24228 break 24229 } 24230 j1 := s1.AuxInt 24231 x1 := s1.Args[0] 24232 if x1.Op != OpAMD64MOVBloadidx1 { 24233 break 24234 } 24235 i1 := x1.AuxInt 24236 if x1.Aux != s { 24237 break 24238 } 24239 if idx != x1.Args[0] { 24240 break 24241 } 24242 if p != x1.Args[1] { 24243 break 24244 } 24245 if mem != x1.Args[2] { 24246 break 24247 } 24248 y := or.Args[1] 24249 if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 24250 break 24251 } 24252 b = mergePoint(b, x0, x1) 24253 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 24254 v.reset(OpCopy) 24255 v.AddArg(v0) 24256 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 24257 v1.AuxInt = j1 24258 v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, types.UInt16) 24259 v2.AuxInt = 8 24260 v3 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, types.UInt16) 24261 v3.AuxInt = i0 24262 v3.Aux = s 24263 v3.AddArg(p) 24264 v3.AddArg(idx) 24265 v3.AddArg(mem) 24266 v2.AddArg(v3) 24267 v1.AddArg(v2) 24268 v0.AddArg(v1) 24269 v0.AddArg(y) 24270 return true 24271 } 24272 // match: (ORQ s0:(SHLQconst [j0] x0:(MOVBloadidx1 [i0] {s} idx p mem)) or:(ORQ s1:(SHLQconst [j1] x1:(MOVBloadidx1 [i1] {s} idx p mem)) y)) 24273 // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 24274 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (ROLWconst <types.UInt16> [8] (MOVWloadidx1 [i0] {s} p idx mem))) y) 24275 for { 24276 s0 := v.Args[0] 24277 if s0.Op != OpAMD64SHLQconst { 24278 break 24279 } 24280 j0 := s0.AuxInt 24281 x0 := s0.Args[0] 24282 if x0.Op != OpAMD64MOVBloadidx1 { 24283 break 24284 } 24285 i0 := x0.AuxInt 24286 s := x0.Aux 24287 idx := x0.Args[0] 24288 p := x0.Args[1] 24289 mem := x0.Args[2] 24290 or := v.Args[1] 24291 if or.Op != OpAMD64ORQ { 24292 break 24293 } 24294 s1 := or.Args[0] 24295 if s1.Op != OpAMD64SHLQconst { 24296 break 24297 } 24298 j1 := s1.AuxInt 24299 x1 := s1.Args[0] 24300 if x1.Op != OpAMD64MOVBloadidx1 { 24301 break 24302 } 24303 i1 := x1.AuxInt 24304 if x1.Aux != s { 24305 break 24306 } 24307 if idx != x1.Args[0] { 24308 break 24309 } 24310 if p != x1.Args[1] { 24311 break 24312 } 24313 if mem != x1.Args[2] { 24314 break 24315 } 24316 y := or.Args[1] 24317 if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 24318 break 24319 } 24320 b = mergePoint(b, x0, x1) 24321 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 24322 v.reset(OpCopy) 24323 v.AddArg(v0) 24324 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 24325 v1.AuxInt = j1 24326 v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, types.UInt16) 24327 v2.AuxInt = 8 24328 v3 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, types.UInt16) 24329 v3.AuxInt = i0 24330 v3.Aux = s 24331 v3.AddArg(p) 24332 v3.AddArg(idx) 24333 v3.AddArg(mem) 24334 v2.AddArg(v3) 24335 v1.AddArg(v2) 24336 v0.AddArg(v1) 24337 v0.AddArg(y) 24338 return true 24339 } 24340 // match: (ORQ s0:(SHLQconst [j0] x0:(MOVBloadidx1 [i0] {s} p idx mem)) or:(ORQ y s1:(SHLQconst [j1] x1:(MOVBloadidx1 [i1] {s} p idx mem)))) 24341 // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 24342 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (ROLWconst <types.UInt16> [8] (MOVWloadidx1 [i0] {s} p idx mem))) y) 24343 for { 24344 s0 := v.Args[0] 24345 if s0.Op != OpAMD64SHLQconst { 24346 break 24347 } 24348 j0 := s0.AuxInt 24349 x0 := s0.Args[0] 24350 if x0.Op != OpAMD64MOVBloadidx1 { 24351 break 24352 } 24353 i0 := x0.AuxInt 24354 s := x0.Aux 24355 p := x0.Args[0] 24356 idx := x0.Args[1] 24357 mem := x0.Args[2] 24358 or := v.Args[1] 24359 if or.Op != OpAMD64ORQ { 24360 break 24361 } 24362 y := or.Args[0] 24363 s1 := or.Args[1] 24364 if s1.Op != OpAMD64SHLQconst { 24365 break 24366 } 24367 j1 := s1.AuxInt 24368 x1 := s1.Args[0] 24369 if x1.Op != OpAMD64MOVBloadidx1 { 24370 break 24371 } 24372 i1 := x1.AuxInt 24373 if x1.Aux != s { 24374 break 24375 } 24376 if p != x1.Args[0] { 24377 break 24378 } 24379 if idx != x1.Args[1] { 24380 break 24381 } 24382 if mem != x1.Args[2] { 24383 break 24384 } 24385 if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 24386 break 24387 } 24388 b = mergePoint(b, x0, x1) 24389 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 24390 v.reset(OpCopy) 24391 v.AddArg(v0) 24392 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 24393 v1.AuxInt = j1 24394 v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, types.UInt16) 24395 v2.AuxInt = 8 24396 v3 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, types.UInt16) 24397 v3.AuxInt = i0 24398 v3.Aux = s 24399 v3.AddArg(p) 24400 v3.AddArg(idx) 24401 v3.AddArg(mem) 24402 v2.AddArg(v3) 24403 v1.AddArg(v2) 24404 v0.AddArg(v1) 24405 v0.AddArg(y) 24406 return true 24407 } 24408 // match: (ORQ s0:(SHLQconst [j0] x0:(MOVBloadidx1 [i0] {s} idx p mem)) or:(ORQ y s1:(SHLQconst [j1] x1:(MOVBloadidx1 [i1] {s} p idx mem)))) 24409 // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 24410 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (ROLWconst <types.UInt16> [8] (MOVWloadidx1 [i0] {s} p idx mem))) y) 24411 for { 24412 s0 := v.Args[0] 24413 if s0.Op != OpAMD64SHLQconst { 24414 break 24415 } 24416 j0 := s0.AuxInt 24417 x0 := s0.Args[0] 24418 if x0.Op != OpAMD64MOVBloadidx1 { 24419 break 24420 } 24421 i0 := x0.AuxInt 24422 s := x0.Aux 24423 idx := x0.Args[0] 24424 p := x0.Args[1] 24425 mem := x0.Args[2] 24426 or := v.Args[1] 24427 if or.Op != OpAMD64ORQ { 24428 break 24429 } 24430 y := or.Args[0] 24431 s1 := or.Args[1] 24432 if s1.Op != OpAMD64SHLQconst { 24433 break 24434 } 24435 j1 := s1.AuxInt 24436 x1 := s1.Args[0] 24437 if x1.Op != OpAMD64MOVBloadidx1 { 24438 break 24439 } 24440 i1 := x1.AuxInt 24441 if x1.Aux != s { 24442 break 24443 } 24444 if p != x1.Args[0] { 24445 break 24446 } 24447 if idx != x1.Args[1] { 24448 break 24449 } 24450 if mem != x1.Args[2] { 24451 break 24452 } 24453 if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 24454 break 24455 } 24456 b = mergePoint(b, x0, x1) 24457 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 24458 v.reset(OpCopy) 24459 v.AddArg(v0) 24460 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 24461 v1.AuxInt = j1 24462 v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, types.UInt16) 24463 v2.AuxInt = 8 24464 v3 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, types.UInt16) 24465 v3.AuxInt = i0 24466 v3.Aux = s 24467 v3.AddArg(p) 24468 v3.AddArg(idx) 24469 v3.AddArg(mem) 24470 v2.AddArg(v3) 24471 v1.AddArg(v2) 24472 v0.AddArg(v1) 24473 v0.AddArg(y) 24474 return true 24475 } 24476 // match: (ORQ s0:(SHLQconst [j0] x0:(MOVBloadidx1 [i0] {s} p idx mem)) or:(ORQ y s1:(SHLQconst [j1] x1:(MOVBloadidx1 [i1] {s} idx p mem)))) 24477 // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 24478 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (ROLWconst <types.UInt16> [8] (MOVWloadidx1 [i0] {s} p idx mem))) y) 24479 for { 24480 s0 := v.Args[0] 24481 if s0.Op != OpAMD64SHLQconst { 24482 break 24483 } 24484 j0 := s0.AuxInt 24485 x0 := s0.Args[0] 24486 if x0.Op != OpAMD64MOVBloadidx1 { 24487 break 24488 } 24489 i0 := x0.AuxInt 24490 s := x0.Aux 24491 p := x0.Args[0] 24492 idx := x0.Args[1] 24493 mem := x0.Args[2] 24494 or := v.Args[1] 24495 if or.Op != OpAMD64ORQ { 24496 break 24497 } 24498 y := or.Args[0] 24499 s1 := or.Args[1] 24500 if s1.Op != OpAMD64SHLQconst { 24501 break 24502 } 24503 j1 := s1.AuxInt 24504 x1 := s1.Args[0] 24505 if x1.Op != OpAMD64MOVBloadidx1 { 24506 break 24507 } 24508 i1 := x1.AuxInt 24509 if x1.Aux != s { 24510 break 24511 } 24512 if idx != x1.Args[0] { 24513 break 24514 } 24515 if p != x1.Args[1] { 24516 break 24517 } 24518 if mem != x1.Args[2] { 24519 break 24520 } 24521 if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 24522 break 24523 } 24524 b = mergePoint(b, x0, x1) 24525 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 24526 v.reset(OpCopy) 24527 v.AddArg(v0) 24528 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 24529 v1.AuxInt = j1 24530 v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, types.UInt16) 24531 v2.AuxInt = 8 24532 v3 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, types.UInt16) 24533 v3.AuxInt = i0 24534 v3.Aux = s 24535 v3.AddArg(p) 24536 v3.AddArg(idx) 24537 v3.AddArg(mem) 24538 v2.AddArg(v3) 24539 v1.AddArg(v2) 24540 v0.AddArg(v1) 24541 v0.AddArg(y) 24542 return true 24543 } 24544 // match: (ORQ s0:(SHLQconst [j0] x0:(MOVBloadidx1 [i0] {s} idx p mem)) or:(ORQ y s1:(SHLQconst [j1] x1:(MOVBloadidx1 [i1] {s} idx p mem)))) 24545 // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 24546 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (ROLWconst <types.UInt16> [8] (MOVWloadidx1 [i0] {s} p idx mem))) y) 24547 for { 24548 s0 := v.Args[0] 24549 if s0.Op != OpAMD64SHLQconst { 24550 break 24551 } 24552 j0 := s0.AuxInt 24553 x0 := s0.Args[0] 24554 if x0.Op != OpAMD64MOVBloadidx1 { 24555 break 24556 } 24557 i0 := x0.AuxInt 24558 s := x0.Aux 24559 idx := x0.Args[0] 24560 p := x0.Args[1] 24561 mem := x0.Args[2] 24562 or := v.Args[1] 24563 if or.Op != OpAMD64ORQ { 24564 break 24565 } 24566 y := or.Args[0] 24567 s1 := or.Args[1] 24568 if s1.Op != OpAMD64SHLQconst { 24569 break 24570 } 24571 j1 := s1.AuxInt 24572 x1 := s1.Args[0] 24573 if x1.Op != OpAMD64MOVBloadidx1 { 24574 break 24575 } 24576 i1 := x1.AuxInt 24577 if x1.Aux != s { 24578 break 24579 } 24580 if idx != x1.Args[0] { 24581 break 24582 } 24583 if p != x1.Args[1] { 24584 break 24585 } 24586 if mem != x1.Args[2] { 24587 break 24588 } 24589 if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 24590 break 24591 } 24592 b = mergePoint(b, x0, x1) 24593 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 24594 v.reset(OpCopy) 24595 v.AddArg(v0) 24596 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 24597 v1.AuxInt = j1 24598 v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, types.UInt16) 24599 v2.AuxInt = 8 24600 v3 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, types.UInt16) 24601 v3.AuxInt = i0 24602 v3.Aux = s 24603 v3.AddArg(p) 24604 v3.AddArg(idx) 24605 v3.AddArg(mem) 24606 v2.AddArg(v3) 24607 v1.AddArg(v2) 24608 v0.AddArg(v1) 24609 v0.AddArg(y) 24610 return true 24611 } 24612 // match: (ORQ or:(ORQ s1:(SHLQconst [j1] x1:(MOVBloadidx1 [i1] {s} p idx mem)) y) s0:(SHLQconst [j0] x0:(MOVBloadidx1 [i0] {s} p idx mem))) 24613 // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 24614 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (ROLWconst <types.UInt16> [8] (MOVWloadidx1 [i0] {s} p idx mem))) y) 24615 for { 24616 or := v.Args[0] 24617 if or.Op != OpAMD64ORQ { 24618 break 24619 } 24620 s1 := or.Args[0] 24621 if s1.Op != OpAMD64SHLQconst { 24622 break 24623 } 24624 j1 := s1.AuxInt 24625 x1 := s1.Args[0] 24626 if x1.Op != OpAMD64MOVBloadidx1 { 24627 break 24628 } 24629 i1 := x1.AuxInt 24630 s := x1.Aux 24631 p := x1.Args[0] 24632 idx := x1.Args[1] 24633 mem := x1.Args[2] 24634 y := or.Args[1] 24635 s0 := v.Args[1] 24636 if s0.Op != OpAMD64SHLQconst { 24637 break 24638 } 24639 j0 := s0.AuxInt 24640 x0 := s0.Args[0] 24641 if x0.Op != OpAMD64MOVBloadidx1 { 24642 break 24643 } 24644 i0 := x0.AuxInt 24645 if x0.Aux != s { 24646 break 24647 } 24648 if p != x0.Args[0] { 24649 break 24650 } 24651 if idx != x0.Args[1] { 24652 break 24653 } 24654 if mem != x0.Args[2] { 24655 break 24656 } 24657 if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 24658 break 24659 } 24660 b = mergePoint(b, x0, x1) 24661 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 24662 v.reset(OpCopy) 24663 v.AddArg(v0) 24664 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 24665 v1.AuxInt = j1 24666 v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, types.UInt16) 24667 v2.AuxInt = 8 24668 v3 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, types.UInt16) 24669 v3.AuxInt = i0 24670 v3.Aux = s 24671 v3.AddArg(p) 24672 v3.AddArg(idx) 24673 v3.AddArg(mem) 24674 v2.AddArg(v3) 24675 v1.AddArg(v2) 24676 v0.AddArg(v1) 24677 v0.AddArg(y) 24678 return true 24679 } 24680 // match: (ORQ or:(ORQ s1:(SHLQconst [j1] x1:(MOVBloadidx1 [i1] {s} idx p mem)) y) s0:(SHLQconst [j0] x0:(MOVBloadidx1 [i0] {s} p idx mem))) 24681 // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 24682 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (ROLWconst <types.UInt16> [8] (MOVWloadidx1 [i0] {s} p idx mem))) y) 24683 for { 24684 or := v.Args[0] 24685 if or.Op != OpAMD64ORQ { 24686 break 24687 } 24688 s1 := or.Args[0] 24689 if s1.Op != OpAMD64SHLQconst { 24690 break 24691 } 24692 j1 := s1.AuxInt 24693 x1 := s1.Args[0] 24694 if x1.Op != OpAMD64MOVBloadidx1 { 24695 break 24696 } 24697 i1 := x1.AuxInt 24698 s := x1.Aux 24699 idx := x1.Args[0] 24700 p := x1.Args[1] 24701 mem := x1.Args[2] 24702 y := or.Args[1] 24703 s0 := v.Args[1] 24704 if s0.Op != OpAMD64SHLQconst { 24705 break 24706 } 24707 j0 := s0.AuxInt 24708 x0 := s0.Args[0] 24709 if x0.Op != OpAMD64MOVBloadidx1 { 24710 break 24711 } 24712 i0 := x0.AuxInt 24713 if x0.Aux != s { 24714 break 24715 } 24716 if p != x0.Args[0] { 24717 break 24718 } 24719 if idx != x0.Args[1] { 24720 break 24721 } 24722 if mem != x0.Args[2] { 24723 break 24724 } 24725 if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 24726 break 24727 } 24728 b = mergePoint(b, x0, x1) 24729 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 24730 v.reset(OpCopy) 24731 v.AddArg(v0) 24732 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 24733 v1.AuxInt = j1 24734 v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, types.UInt16) 24735 v2.AuxInt = 8 24736 v3 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, types.UInt16) 24737 v3.AuxInt = i0 24738 v3.Aux = s 24739 v3.AddArg(p) 24740 v3.AddArg(idx) 24741 v3.AddArg(mem) 24742 v2.AddArg(v3) 24743 v1.AddArg(v2) 24744 v0.AddArg(v1) 24745 v0.AddArg(y) 24746 return true 24747 } 24748 // match: (ORQ or:(ORQ y s1:(SHLQconst [j1] x1:(MOVBloadidx1 [i1] {s} p idx mem))) s0:(SHLQconst [j0] x0:(MOVBloadidx1 [i0] {s} p idx mem))) 24749 // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 24750 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (ROLWconst <types.UInt16> [8] (MOVWloadidx1 [i0] {s} p idx mem))) y) 24751 for { 24752 or := v.Args[0] 24753 if or.Op != OpAMD64ORQ { 24754 break 24755 } 24756 y := or.Args[0] 24757 s1 := or.Args[1] 24758 if s1.Op != OpAMD64SHLQconst { 24759 break 24760 } 24761 j1 := s1.AuxInt 24762 x1 := s1.Args[0] 24763 if x1.Op != OpAMD64MOVBloadidx1 { 24764 break 24765 } 24766 i1 := x1.AuxInt 24767 s := x1.Aux 24768 p := x1.Args[0] 24769 idx := x1.Args[1] 24770 mem := x1.Args[2] 24771 s0 := v.Args[1] 24772 if s0.Op != OpAMD64SHLQconst { 24773 break 24774 } 24775 j0 := s0.AuxInt 24776 x0 := s0.Args[0] 24777 if x0.Op != OpAMD64MOVBloadidx1 { 24778 break 24779 } 24780 i0 := x0.AuxInt 24781 if x0.Aux != s { 24782 break 24783 } 24784 if p != x0.Args[0] { 24785 break 24786 } 24787 if idx != x0.Args[1] { 24788 break 24789 } 24790 if mem != x0.Args[2] { 24791 break 24792 } 24793 if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 24794 break 24795 } 24796 b = mergePoint(b, x0, x1) 24797 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 24798 v.reset(OpCopy) 24799 v.AddArg(v0) 24800 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 24801 v1.AuxInt = j1 24802 v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, types.UInt16) 24803 v2.AuxInt = 8 24804 v3 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, types.UInt16) 24805 v3.AuxInt = i0 24806 v3.Aux = s 24807 v3.AddArg(p) 24808 v3.AddArg(idx) 24809 v3.AddArg(mem) 24810 v2.AddArg(v3) 24811 v1.AddArg(v2) 24812 v0.AddArg(v1) 24813 v0.AddArg(y) 24814 return true 24815 } 24816 // match: (ORQ or:(ORQ y s1:(SHLQconst [j1] x1:(MOVBloadidx1 [i1] {s} idx p mem))) s0:(SHLQconst [j0] x0:(MOVBloadidx1 [i0] {s} p idx mem))) 24817 // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 24818 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (ROLWconst <types.UInt16> [8] (MOVWloadidx1 [i0] {s} p idx mem))) y) 24819 for { 24820 or := v.Args[0] 24821 if or.Op != OpAMD64ORQ { 24822 break 24823 } 24824 y := or.Args[0] 24825 s1 := or.Args[1] 24826 if s1.Op != OpAMD64SHLQconst { 24827 break 24828 } 24829 j1 := s1.AuxInt 24830 x1 := s1.Args[0] 24831 if x1.Op != OpAMD64MOVBloadidx1 { 24832 break 24833 } 24834 i1 := x1.AuxInt 24835 s := x1.Aux 24836 idx := x1.Args[0] 24837 p := x1.Args[1] 24838 mem := x1.Args[2] 24839 s0 := v.Args[1] 24840 if s0.Op != OpAMD64SHLQconst { 24841 break 24842 } 24843 j0 := s0.AuxInt 24844 x0 := s0.Args[0] 24845 if x0.Op != OpAMD64MOVBloadidx1 { 24846 break 24847 } 24848 i0 := x0.AuxInt 24849 if x0.Aux != s { 24850 break 24851 } 24852 if p != x0.Args[0] { 24853 break 24854 } 24855 if idx != x0.Args[1] { 24856 break 24857 } 24858 if mem != x0.Args[2] { 24859 break 24860 } 24861 if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 24862 break 24863 } 24864 b = mergePoint(b, x0, x1) 24865 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 24866 v.reset(OpCopy) 24867 v.AddArg(v0) 24868 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 24869 v1.AuxInt = j1 24870 v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, types.UInt16) 24871 v2.AuxInt = 8 24872 v3 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, types.UInt16) 24873 v3.AuxInt = i0 24874 v3.Aux = s 24875 v3.AddArg(p) 24876 v3.AddArg(idx) 24877 v3.AddArg(mem) 24878 v2.AddArg(v3) 24879 v1.AddArg(v2) 24880 v0.AddArg(v1) 24881 v0.AddArg(y) 24882 return true 24883 } 24884 // match: (ORQ or:(ORQ s1:(SHLQconst [j1] x1:(MOVBloadidx1 [i1] {s} p idx mem)) y) s0:(SHLQconst [j0] x0:(MOVBloadidx1 [i0] {s} idx p mem))) 24885 // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 24886 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (ROLWconst <types.UInt16> [8] (MOVWloadidx1 [i0] {s} p idx mem))) y) 24887 for { 24888 or := v.Args[0] 24889 if or.Op != OpAMD64ORQ { 24890 break 24891 } 24892 s1 := or.Args[0] 24893 if s1.Op != OpAMD64SHLQconst { 24894 break 24895 } 24896 j1 := s1.AuxInt 24897 x1 := s1.Args[0] 24898 if x1.Op != OpAMD64MOVBloadidx1 { 24899 break 24900 } 24901 i1 := x1.AuxInt 24902 s := x1.Aux 24903 p := x1.Args[0] 24904 idx := x1.Args[1] 24905 mem := x1.Args[2] 24906 y := or.Args[1] 24907 s0 := v.Args[1] 24908 if s0.Op != OpAMD64SHLQconst { 24909 break 24910 } 24911 j0 := s0.AuxInt 24912 x0 := s0.Args[0] 24913 if x0.Op != OpAMD64MOVBloadidx1 { 24914 break 24915 } 24916 i0 := x0.AuxInt 24917 if x0.Aux != s { 24918 break 24919 } 24920 if idx != x0.Args[0] { 24921 break 24922 } 24923 if p != x0.Args[1] { 24924 break 24925 } 24926 if mem != x0.Args[2] { 24927 break 24928 } 24929 if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 24930 break 24931 } 24932 b = mergePoint(b, x0, x1) 24933 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 24934 v.reset(OpCopy) 24935 v.AddArg(v0) 24936 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 24937 v1.AuxInt = j1 24938 v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, types.UInt16) 24939 v2.AuxInt = 8 24940 v3 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, types.UInt16) 24941 v3.AuxInt = i0 24942 v3.Aux = s 24943 v3.AddArg(p) 24944 v3.AddArg(idx) 24945 v3.AddArg(mem) 24946 v2.AddArg(v3) 24947 v1.AddArg(v2) 24948 v0.AddArg(v1) 24949 v0.AddArg(y) 24950 return true 24951 } 24952 // match: (ORQ or:(ORQ s1:(SHLQconst [j1] x1:(MOVBloadidx1 [i1] {s} idx p mem)) y) s0:(SHLQconst [j0] x0:(MOVBloadidx1 [i0] {s} idx p mem))) 24953 // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 24954 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (ROLWconst <types.UInt16> [8] (MOVWloadidx1 [i0] {s} p idx mem))) y) 24955 for { 24956 or := v.Args[0] 24957 if or.Op != OpAMD64ORQ { 24958 break 24959 } 24960 s1 := or.Args[0] 24961 if s1.Op != OpAMD64SHLQconst { 24962 break 24963 } 24964 j1 := s1.AuxInt 24965 x1 := s1.Args[0] 24966 if x1.Op != OpAMD64MOVBloadidx1 { 24967 break 24968 } 24969 i1 := x1.AuxInt 24970 s := x1.Aux 24971 idx := x1.Args[0] 24972 p := x1.Args[1] 24973 mem := x1.Args[2] 24974 y := or.Args[1] 24975 s0 := v.Args[1] 24976 if s0.Op != OpAMD64SHLQconst { 24977 break 24978 } 24979 j0 := s0.AuxInt 24980 x0 := s0.Args[0] 24981 if x0.Op != OpAMD64MOVBloadidx1 { 24982 break 24983 } 24984 i0 := x0.AuxInt 24985 if x0.Aux != s { 24986 break 24987 } 24988 if idx != x0.Args[0] { 24989 break 24990 } 24991 if p != x0.Args[1] { 24992 break 24993 } 24994 if mem != x0.Args[2] { 24995 break 24996 } 24997 if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 24998 break 24999 } 25000 b = mergePoint(b, x0, x1) 25001 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 25002 v.reset(OpCopy) 25003 v.AddArg(v0) 25004 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 25005 v1.AuxInt = j1 25006 v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, types.UInt16) 25007 v2.AuxInt = 8 25008 v3 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, types.UInt16) 25009 v3.AuxInt = i0 25010 v3.Aux = s 25011 v3.AddArg(p) 25012 v3.AddArg(idx) 25013 v3.AddArg(mem) 25014 v2.AddArg(v3) 25015 v1.AddArg(v2) 25016 v0.AddArg(v1) 25017 v0.AddArg(y) 25018 return true 25019 } 25020 // match: (ORQ or:(ORQ y s1:(SHLQconst [j1] x1:(MOVBloadidx1 [i1] {s} p idx mem))) s0:(SHLQconst [j0] x0:(MOVBloadidx1 [i0] {s} idx p mem))) 25021 // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 25022 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (ROLWconst <types.UInt16> [8] (MOVWloadidx1 [i0] {s} p idx mem))) y) 25023 for { 25024 or := v.Args[0] 25025 if or.Op != OpAMD64ORQ { 25026 break 25027 } 25028 y := or.Args[0] 25029 s1 := or.Args[1] 25030 if s1.Op != OpAMD64SHLQconst { 25031 break 25032 } 25033 j1 := s1.AuxInt 25034 x1 := s1.Args[0] 25035 if x1.Op != OpAMD64MOVBloadidx1 { 25036 break 25037 } 25038 i1 := x1.AuxInt 25039 s := x1.Aux 25040 p := x1.Args[0] 25041 idx := x1.Args[1] 25042 mem := x1.Args[2] 25043 s0 := v.Args[1] 25044 if s0.Op != OpAMD64SHLQconst { 25045 break 25046 } 25047 j0 := s0.AuxInt 25048 x0 := s0.Args[0] 25049 if x0.Op != OpAMD64MOVBloadidx1 { 25050 break 25051 } 25052 i0 := x0.AuxInt 25053 if x0.Aux != s { 25054 break 25055 } 25056 if idx != x0.Args[0] { 25057 break 25058 } 25059 if p != x0.Args[1] { 25060 break 25061 } 25062 if mem != x0.Args[2] { 25063 break 25064 } 25065 if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 25066 break 25067 } 25068 b = mergePoint(b, x0, x1) 25069 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 25070 v.reset(OpCopy) 25071 v.AddArg(v0) 25072 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 25073 v1.AuxInt = j1 25074 v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, types.UInt16) 25075 v2.AuxInt = 8 25076 v3 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, types.UInt16) 25077 v3.AuxInt = i0 25078 v3.Aux = s 25079 v3.AddArg(p) 25080 v3.AddArg(idx) 25081 v3.AddArg(mem) 25082 v2.AddArg(v3) 25083 v1.AddArg(v2) 25084 v0.AddArg(v1) 25085 v0.AddArg(y) 25086 return true 25087 } 25088 // match: (ORQ or:(ORQ y s1:(SHLQconst [j1] x1:(MOVBloadidx1 [i1] {s} idx p mem))) s0:(SHLQconst [j0] x0:(MOVBloadidx1 [i0] {s} idx p mem))) 25089 // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 25090 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (ROLWconst <types.UInt16> [8] (MOVWloadidx1 [i0] {s} p idx mem))) y) 25091 for { 25092 or := v.Args[0] 25093 if or.Op != OpAMD64ORQ { 25094 break 25095 } 25096 y := or.Args[0] 25097 s1 := or.Args[1] 25098 if s1.Op != OpAMD64SHLQconst { 25099 break 25100 } 25101 j1 := s1.AuxInt 25102 x1 := s1.Args[0] 25103 if x1.Op != OpAMD64MOVBloadidx1 { 25104 break 25105 } 25106 i1 := x1.AuxInt 25107 s := x1.Aux 25108 idx := x1.Args[0] 25109 p := x1.Args[1] 25110 mem := x1.Args[2] 25111 s0 := v.Args[1] 25112 if s0.Op != OpAMD64SHLQconst { 25113 break 25114 } 25115 j0 := s0.AuxInt 25116 x0 := s0.Args[0] 25117 if x0.Op != OpAMD64MOVBloadidx1 { 25118 break 25119 } 25120 i0 := x0.AuxInt 25121 if x0.Aux != s { 25122 break 25123 } 25124 if idx != x0.Args[0] { 25125 break 25126 } 25127 if p != x0.Args[1] { 25128 break 25129 } 25130 if mem != x0.Args[2] { 25131 break 25132 } 25133 if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 25134 break 25135 } 25136 b = mergePoint(b, x0, x1) 25137 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 25138 v.reset(OpCopy) 25139 v.AddArg(v0) 25140 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 25141 v1.AuxInt = j1 25142 v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, types.UInt16) 25143 v2.AuxInt = 8 25144 v3 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, types.UInt16) 25145 v3.AuxInt = i0 25146 v3.Aux = s 25147 v3.AddArg(p) 25148 v3.AddArg(idx) 25149 v3.AddArg(mem) 25150 v2.AddArg(v3) 25151 v1.AddArg(v2) 25152 v0.AddArg(v1) 25153 v0.AddArg(y) 25154 return true 25155 } 25156 // match: (ORQ s0:(SHLQconst [j0] r0:(ROLWconst [8] x0:(MOVWloadidx1 [i0] {s} p idx mem))) or:(ORQ s1:(SHLQconst [j1] r1:(ROLWconst [8] x1:(MOVWloadidx1 [i1] {s} p idx mem))) y)) 25157 // cond: i1 == i0+2 && j1 == j0-16 && j1 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or) 25158 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (BSWAPL <types.UInt32> (MOVLloadidx1 [i0] {s} p idx mem))) y) 25159 for { 25160 s0 := v.Args[0] 25161 if s0.Op != OpAMD64SHLQconst { 25162 break 25163 } 25164 j0 := s0.AuxInt 25165 r0 := s0.Args[0] 25166 if r0.Op != OpAMD64ROLWconst { 25167 break 25168 } 25169 if r0.AuxInt != 8 { 25170 break 25171 } 25172 x0 := r0.Args[0] 25173 if x0.Op != OpAMD64MOVWloadidx1 { 25174 break 25175 } 25176 i0 := x0.AuxInt 25177 s := x0.Aux 25178 p := x0.Args[0] 25179 idx := x0.Args[1] 25180 mem := x0.Args[2] 25181 or := v.Args[1] 25182 if or.Op != OpAMD64ORQ { 25183 break 25184 } 25185 s1 := or.Args[0] 25186 if s1.Op != OpAMD64SHLQconst { 25187 break 25188 } 25189 j1 := s1.AuxInt 25190 r1 := s1.Args[0] 25191 if r1.Op != OpAMD64ROLWconst { 25192 break 25193 } 25194 if r1.AuxInt != 8 { 25195 break 25196 } 25197 x1 := r1.Args[0] 25198 if x1.Op != OpAMD64MOVWloadidx1 { 25199 break 25200 } 25201 i1 := x1.AuxInt 25202 if x1.Aux != s { 25203 break 25204 } 25205 if p != x1.Args[0] { 25206 break 25207 } 25208 if idx != x1.Args[1] { 25209 break 25210 } 25211 if mem != x1.Args[2] { 25212 break 25213 } 25214 y := or.Args[1] 25215 if !(i1 == i0+2 && j1 == j0-16 && j1%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or)) { 25216 break 25217 } 25218 b = mergePoint(b, x0, x1) 25219 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 25220 v.reset(OpCopy) 25221 v.AddArg(v0) 25222 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 25223 v1.AuxInt = j1 25224 v2 := b.NewValue0(v.Pos, OpAMD64BSWAPL, types.UInt32) 25225 v3 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, types.UInt32) 25226 v3.AuxInt = i0 25227 v3.Aux = s 25228 v3.AddArg(p) 25229 v3.AddArg(idx) 25230 v3.AddArg(mem) 25231 v2.AddArg(v3) 25232 v1.AddArg(v2) 25233 v0.AddArg(v1) 25234 v0.AddArg(y) 25235 return true 25236 } 25237 // match: (ORQ s0:(SHLQconst [j0] r0:(ROLWconst [8] x0:(MOVWloadidx1 [i0] {s} idx p mem))) or:(ORQ s1:(SHLQconst [j1] r1:(ROLWconst [8] x1:(MOVWloadidx1 [i1] {s} p idx mem))) y)) 25238 // cond: i1 == i0+2 && j1 == j0-16 && j1 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or) 25239 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (BSWAPL <types.UInt32> (MOVLloadidx1 [i0] {s} p idx mem))) y) 25240 for { 25241 s0 := v.Args[0] 25242 if s0.Op != OpAMD64SHLQconst { 25243 break 25244 } 25245 j0 := s0.AuxInt 25246 r0 := s0.Args[0] 25247 if r0.Op != OpAMD64ROLWconst { 25248 break 25249 } 25250 if r0.AuxInt != 8 { 25251 break 25252 } 25253 x0 := r0.Args[0] 25254 if x0.Op != OpAMD64MOVWloadidx1 { 25255 break 25256 } 25257 i0 := x0.AuxInt 25258 s := x0.Aux 25259 idx := x0.Args[0] 25260 p := x0.Args[1] 25261 mem := x0.Args[2] 25262 or := v.Args[1] 25263 if or.Op != OpAMD64ORQ { 25264 break 25265 } 25266 s1 := or.Args[0] 25267 if s1.Op != OpAMD64SHLQconst { 25268 break 25269 } 25270 j1 := s1.AuxInt 25271 r1 := s1.Args[0] 25272 if r1.Op != OpAMD64ROLWconst { 25273 break 25274 } 25275 if r1.AuxInt != 8 { 25276 break 25277 } 25278 x1 := r1.Args[0] 25279 if x1.Op != OpAMD64MOVWloadidx1 { 25280 break 25281 } 25282 i1 := x1.AuxInt 25283 if x1.Aux != s { 25284 break 25285 } 25286 if p != x1.Args[0] { 25287 break 25288 } 25289 if idx != x1.Args[1] { 25290 break 25291 } 25292 if mem != x1.Args[2] { 25293 break 25294 } 25295 y := or.Args[1] 25296 if !(i1 == i0+2 && j1 == j0-16 && j1%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or)) { 25297 break 25298 } 25299 b = mergePoint(b, x0, x1) 25300 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 25301 v.reset(OpCopy) 25302 v.AddArg(v0) 25303 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 25304 v1.AuxInt = j1 25305 v2 := b.NewValue0(v.Pos, OpAMD64BSWAPL, types.UInt32) 25306 v3 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, types.UInt32) 25307 v3.AuxInt = i0 25308 v3.Aux = s 25309 v3.AddArg(p) 25310 v3.AddArg(idx) 25311 v3.AddArg(mem) 25312 v2.AddArg(v3) 25313 v1.AddArg(v2) 25314 v0.AddArg(v1) 25315 v0.AddArg(y) 25316 return true 25317 } 25318 // match: (ORQ s0:(SHLQconst [j0] r0:(ROLWconst [8] x0:(MOVWloadidx1 [i0] {s} p idx mem))) or:(ORQ s1:(SHLQconst [j1] r1:(ROLWconst [8] x1:(MOVWloadidx1 [i1] {s} idx p mem))) y)) 25319 // cond: i1 == i0+2 && j1 == j0-16 && j1 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or) 25320 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (BSWAPL <types.UInt32> (MOVLloadidx1 [i0] {s} p idx mem))) y) 25321 for { 25322 s0 := v.Args[0] 25323 if s0.Op != OpAMD64SHLQconst { 25324 break 25325 } 25326 j0 := s0.AuxInt 25327 r0 := s0.Args[0] 25328 if r0.Op != OpAMD64ROLWconst { 25329 break 25330 } 25331 if r0.AuxInt != 8 { 25332 break 25333 } 25334 x0 := r0.Args[0] 25335 if x0.Op != OpAMD64MOVWloadidx1 { 25336 break 25337 } 25338 i0 := x0.AuxInt 25339 s := x0.Aux 25340 p := x0.Args[0] 25341 idx := x0.Args[1] 25342 mem := x0.Args[2] 25343 or := v.Args[1] 25344 if or.Op != OpAMD64ORQ { 25345 break 25346 } 25347 s1 := or.Args[0] 25348 if s1.Op != OpAMD64SHLQconst { 25349 break 25350 } 25351 j1 := s1.AuxInt 25352 r1 := s1.Args[0] 25353 if r1.Op != OpAMD64ROLWconst { 25354 break 25355 } 25356 if r1.AuxInt != 8 { 25357 break 25358 } 25359 x1 := r1.Args[0] 25360 if x1.Op != OpAMD64MOVWloadidx1 { 25361 break 25362 } 25363 i1 := x1.AuxInt 25364 if x1.Aux != s { 25365 break 25366 } 25367 if idx != x1.Args[0] { 25368 break 25369 } 25370 if p != x1.Args[1] { 25371 break 25372 } 25373 if mem != x1.Args[2] { 25374 break 25375 } 25376 y := or.Args[1] 25377 if !(i1 == i0+2 && j1 == j0-16 && j1%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or)) { 25378 break 25379 } 25380 b = mergePoint(b, x0, x1) 25381 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 25382 v.reset(OpCopy) 25383 v.AddArg(v0) 25384 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 25385 v1.AuxInt = j1 25386 v2 := b.NewValue0(v.Pos, OpAMD64BSWAPL, types.UInt32) 25387 v3 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, types.UInt32) 25388 v3.AuxInt = i0 25389 v3.Aux = s 25390 v3.AddArg(p) 25391 v3.AddArg(idx) 25392 v3.AddArg(mem) 25393 v2.AddArg(v3) 25394 v1.AddArg(v2) 25395 v0.AddArg(v1) 25396 v0.AddArg(y) 25397 return true 25398 } 25399 // match: (ORQ s0:(SHLQconst [j0] r0:(ROLWconst [8] x0:(MOVWloadidx1 [i0] {s} idx p mem))) or:(ORQ s1:(SHLQconst [j1] r1:(ROLWconst [8] x1:(MOVWloadidx1 [i1] {s} idx p mem))) y)) 25400 // cond: i1 == i0+2 && j1 == j0-16 && j1 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or) 25401 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (BSWAPL <types.UInt32> (MOVLloadidx1 [i0] {s} p idx mem))) y) 25402 for { 25403 s0 := v.Args[0] 25404 if s0.Op != OpAMD64SHLQconst { 25405 break 25406 } 25407 j0 := s0.AuxInt 25408 r0 := s0.Args[0] 25409 if r0.Op != OpAMD64ROLWconst { 25410 break 25411 } 25412 if r0.AuxInt != 8 { 25413 break 25414 } 25415 x0 := r0.Args[0] 25416 if x0.Op != OpAMD64MOVWloadidx1 { 25417 break 25418 } 25419 i0 := x0.AuxInt 25420 s := x0.Aux 25421 idx := x0.Args[0] 25422 p := x0.Args[1] 25423 mem := x0.Args[2] 25424 or := v.Args[1] 25425 if or.Op != OpAMD64ORQ { 25426 break 25427 } 25428 s1 := or.Args[0] 25429 if s1.Op != OpAMD64SHLQconst { 25430 break 25431 } 25432 j1 := s1.AuxInt 25433 r1 := s1.Args[0] 25434 if r1.Op != OpAMD64ROLWconst { 25435 break 25436 } 25437 if r1.AuxInt != 8 { 25438 break 25439 } 25440 x1 := r1.Args[0] 25441 if x1.Op != OpAMD64MOVWloadidx1 { 25442 break 25443 } 25444 i1 := x1.AuxInt 25445 if x1.Aux != s { 25446 break 25447 } 25448 if idx != x1.Args[0] { 25449 break 25450 } 25451 if p != x1.Args[1] { 25452 break 25453 } 25454 if mem != x1.Args[2] { 25455 break 25456 } 25457 y := or.Args[1] 25458 if !(i1 == i0+2 && j1 == j0-16 && j1%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or)) { 25459 break 25460 } 25461 b = mergePoint(b, x0, x1) 25462 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 25463 v.reset(OpCopy) 25464 v.AddArg(v0) 25465 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 25466 v1.AuxInt = j1 25467 v2 := b.NewValue0(v.Pos, OpAMD64BSWAPL, types.UInt32) 25468 v3 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, types.UInt32) 25469 v3.AuxInt = i0 25470 v3.Aux = s 25471 v3.AddArg(p) 25472 v3.AddArg(idx) 25473 v3.AddArg(mem) 25474 v2.AddArg(v3) 25475 v1.AddArg(v2) 25476 v0.AddArg(v1) 25477 v0.AddArg(y) 25478 return true 25479 } 25480 // match: (ORQ s0:(SHLQconst [j0] r0:(ROLWconst [8] x0:(MOVWloadidx1 [i0] {s} p idx mem))) or:(ORQ y s1:(SHLQconst [j1] r1:(ROLWconst [8] x1:(MOVWloadidx1 [i1] {s} p idx mem))))) 25481 // cond: i1 == i0+2 && j1 == j0-16 && j1 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or) 25482 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (BSWAPL <types.UInt32> (MOVLloadidx1 [i0] {s} p idx mem))) y) 25483 for { 25484 s0 := v.Args[0] 25485 if s0.Op != OpAMD64SHLQconst { 25486 break 25487 } 25488 j0 := s0.AuxInt 25489 r0 := s0.Args[0] 25490 if r0.Op != OpAMD64ROLWconst { 25491 break 25492 } 25493 if r0.AuxInt != 8 { 25494 break 25495 } 25496 x0 := r0.Args[0] 25497 if x0.Op != OpAMD64MOVWloadidx1 { 25498 break 25499 } 25500 i0 := x0.AuxInt 25501 s := x0.Aux 25502 p := x0.Args[0] 25503 idx := x0.Args[1] 25504 mem := x0.Args[2] 25505 or := v.Args[1] 25506 if or.Op != OpAMD64ORQ { 25507 break 25508 } 25509 y := or.Args[0] 25510 s1 := or.Args[1] 25511 if s1.Op != OpAMD64SHLQconst { 25512 break 25513 } 25514 j1 := s1.AuxInt 25515 r1 := s1.Args[0] 25516 if r1.Op != OpAMD64ROLWconst { 25517 break 25518 } 25519 if r1.AuxInt != 8 { 25520 break 25521 } 25522 x1 := r1.Args[0] 25523 if x1.Op != OpAMD64MOVWloadidx1 { 25524 break 25525 } 25526 i1 := x1.AuxInt 25527 if x1.Aux != s { 25528 break 25529 } 25530 if p != x1.Args[0] { 25531 break 25532 } 25533 if idx != x1.Args[1] { 25534 break 25535 } 25536 if mem != x1.Args[2] { 25537 break 25538 } 25539 if !(i1 == i0+2 && j1 == j0-16 && j1%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or)) { 25540 break 25541 } 25542 b = mergePoint(b, x0, x1) 25543 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 25544 v.reset(OpCopy) 25545 v.AddArg(v0) 25546 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 25547 v1.AuxInt = j1 25548 v2 := b.NewValue0(v.Pos, OpAMD64BSWAPL, types.UInt32) 25549 v3 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, types.UInt32) 25550 v3.AuxInt = i0 25551 v3.Aux = s 25552 v3.AddArg(p) 25553 v3.AddArg(idx) 25554 v3.AddArg(mem) 25555 v2.AddArg(v3) 25556 v1.AddArg(v2) 25557 v0.AddArg(v1) 25558 v0.AddArg(y) 25559 return true 25560 } 25561 // match: (ORQ s0:(SHLQconst [j0] r0:(ROLWconst [8] x0:(MOVWloadidx1 [i0] {s} idx p mem))) or:(ORQ y s1:(SHLQconst [j1] r1:(ROLWconst [8] x1:(MOVWloadidx1 [i1] {s} p idx mem))))) 25562 // cond: i1 == i0+2 && j1 == j0-16 && j1 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or) 25563 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (BSWAPL <types.UInt32> (MOVLloadidx1 [i0] {s} p idx mem))) y) 25564 for { 25565 s0 := v.Args[0] 25566 if s0.Op != OpAMD64SHLQconst { 25567 break 25568 } 25569 j0 := s0.AuxInt 25570 r0 := s0.Args[0] 25571 if r0.Op != OpAMD64ROLWconst { 25572 break 25573 } 25574 if r0.AuxInt != 8 { 25575 break 25576 } 25577 x0 := r0.Args[0] 25578 if x0.Op != OpAMD64MOVWloadidx1 { 25579 break 25580 } 25581 i0 := x0.AuxInt 25582 s := x0.Aux 25583 idx := x0.Args[0] 25584 p := x0.Args[1] 25585 mem := x0.Args[2] 25586 or := v.Args[1] 25587 if or.Op != OpAMD64ORQ { 25588 break 25589 } 25590 y := or.Args[0] 25591 s1 := or.Args[1] 25592 if s1.Op != OpAMD64SHLQconst { 25593 break 25594 } 25595 j1 := s1.AuxInt 25596 r1 := s1.Args[0] 25597 if r1.Op != OpAMD64ROLWconst { 25598 break 25599 } 25600 if r1.AuxInt != 8 { 25601 break 25602 } 25603 x1 := r1.Args[0] 25604 if x1.Op != OpAMD64MOVWloadidx1 { 25605 break 25606 } 25607 i1 := x1.AuxInt 25608 if x1.Aux != s { 25609 break 25610 } 25611 if p != x1.Args[0] { 25612 break 25613 } 25614 if idx != x1.Args[1] { 25615 break 25616 } 25617 if mem != x1.Args[2] { 25618 break 25619 } 25620 if !(i1 == i0+2 && j1 == j0-16 && j1%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or)) { 25621 break 25622 } 25623 b = mergePoint(b, x0, x1) 25624 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 25625 v.reset(OpCopy) 25626 v.AddArg(v0) 25627 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 25628 v1.AuxInt = j1 25629 v2 := b.NewValue0(v.Pos, OpAMD64BSWAPL, types.UInt32) 25630 v3 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, types.UInt32) 25631 v3.AuxInt = i0 25632 v3.Aux = s 25633 v3.AddArg(p) 25634 v3.AddArg(idx) 25635 v3.AddArg(mem) 25636 v2.AddArg(v3) 25637 v1.AddArg(v2) 25638 v0.AddArg(v1) 25639 v0.AddArg(y) 25640 return true 25641 } 25642 // match: (ORQ s0:(SHLQconst [j0] r0:(ROLWconst [8] x0:(MOVWloadidx1 [i0] {s} p idx mem))) or:(ORQ y s1:(SHLQconst [j1] r1:(ROLWconst [8] x1:(MOVWloadidx1 [i1] {s} idx p mem))))) 25643 // cond: i1 == i0+2 && j1 == j0-16 && j1 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or) 25644 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (BSWAPL <types.UInt32> (MOVLloadidx1 [i0] {s} p idx mem))) y) 25645 for { 25646 s0 := v.Args[0] 25647 if s0.Op != OpAMD64SHLQconst { 25648 break 25649 } 25650 j0 := s0.AuxInt 25651 r0 := s0.Args[0] 25652 if r0.Op != OpAMD64ROLWconst { 25653 break 25654 } 25655 if r0.AuxInt != 8 { 25656 break 25657 } 25658 x0 := r0.Args[0] 25659 if x0.Op != OpAMD64MOVWloadidx1 { 25660 break 25661 } 25662 i0 := x0.AuxInt 25663 s := x0.Aux 25664 p := x0.Args[0] 25665 idx := x0.Args[1] 25666 mem := x0.Args[2] 25667 or := v.Args[1] 25668 if or.Op != OpAMD64ORQ { 25669 break 25670 } 25671 y := or.Args[0] 25672 s1 := or.Args[1] 25673 if s1.Op != OpAMD64SHLQconst { 25674 break 25675 } 25676 j1 := s1.AuxInt 25677 r1 := s1.Args[0] 25678 if r1.Op != OpAMD64ROLWconst { 25679 break 25680 } 25681 if r1.AuxInt != 8 { 25682 break 25683 } 25684 x1 := r1.Args[0] 25685 if x1.Op != OpAMD64MOVWloadidx1 { 25686 break 25687 } 25688 i1 := x1.AuxInt 25689 if x1.Aux != s { 25690 break 25691 } 25692 if idx != x1.Args[0] { 25693 break 25694 } 25695 if p != x1.Args[1] { 25696 break 25697 } 25698 if mem != x1.Args[2] { 25699 break 25700 } 25701 if !(i1 == i0+2 && j1 == j0-16 && j1%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or)) { 25702 break 25703 } 25704 b = mergePoint(b, x0, x1) 25705 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 25706 v.reset(OpCopy) 25707 v.AddArg(v0) 25708 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 25709 v1.AuxInt = j1 25710 v2 := b.NewValue0(v.Pos, OpAMD64BSWAPL, types.UInt32) 25711 v3 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, types.UInt32) 25712 v3.AuxInt = i0 25713 v3.Aux = s 25714 v3.AddArg(p) 25715 v3.AddArg(idx) 25716 v3.AddArg(mem) 25717 v2.AddArg(v3) 25718 v1.AddArg(v2) 25719 v0.AddArg(v1) 25720 v0.AddArg(y) 25721 return true 25722 } 25723 // match: (ORQ s0:(SHLQconst [j0] r0:(ROLWconst [8] x0:(MOVWloadidx1 [i0] {s} idx p mem))) or:(ORQ y s1:(SHLQconst [j1] r1:(ROLWconst [8] x1:(MOVWloadidx1 [i1] {s} idx p mem))))) 25724 // cond: i1 == i0+2 && j1 == j0-16 && j1 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or) 25725 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (BSWAPL <types.UInt32> (MOVLloadidx1 [i0] {s} p idx mem))) y) 25726 for { 25727 s0 := v.Args[0] 25728 if s0.Op != OpAMD64SHLQconst { 25729 break 25730 } 25731 j0 := s0.AuxInt 25732 r0 := s0.Args[0] 25733 if r0.Op != OpAMD64ROLWconst { 25734 break 25735 } 25736 if r0.AuxInt != 8 { 25737 break 25738 } 25739 x0 := r0.Args[0] 25740 if x0.Op != OpAMD64MOVWloadidx1 { 25741 break 25742 } 25743 i0 := x0.AuxInt 25744 s := x0.Aux 25745 idx := x0.Args[0] 25746 p := x0.Args[1] 25747 mem := x0.Args[2] 25748 or := v.Args[1] 25749 if or.Op != OpAMD64ORQ { 25750 break 25751 } 25752 y := or.Args[0] 25753 s1 := or.Args[1] 25754 if s1.Op != OpAMD64SHLQconst { 25755 break 25756 } 25757 j1 := s1.AuxInt 25758 r1 := s1.Args[0] 25759 if r1.Op != OpAMD64ROLWconst { 25760 break 25761 } 25762 if r1.AuxInt != 8 { 25763 break 25764 } 25765 x1 := r1.Args[0] 25766 if x1.Op != OpAMD64MOVWloadidx1 { 25767 break 25768 } 25769 i1 := x1.AuxInt 25770 if x1.Aux != s { 25771 break 25772 } 25773 if idx != x1.Args[0] { 25774 break 25775 } 25776 if p != x1.Args[1] { 25777 break 25778 } 25779 if mem != x1.Args[2] { 25780 break 25781 } 25782 if !(i1 == i0+2 && j1 == j0-16 && j1%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or)) { 25783 break 25784 } 25785 b = mergePoint(b, x0, x1) 25786 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 25787 v.reset(OpCopy) 25788 v.AddArg(v0) 25789 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 25790 v1.AuxInt = j1 25791 v2 := b.NewValue0(v.Pos, OpAMD64BSWAPL, types.UInt32) 25792 v3 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, types.UInt32) 25793 v3.AuxInt = i0 25794 v3.Aux = s 25795 v3.AddArg(p) 25796 v3.AddArg(idx) 25797 v3.AddArg(mem) 25798 v2.AddArg(v3) 25799 v1.AddArg(v2) 25800 v0.AddArg(v1) 25801 v0.AddArg(y) 25802 return true 25803 } 25804 // match: (ORQ or:(ORQ s1:(SHLQconst [j1] r1:(ROLWconst [8] x1:(MOVWloadidx1 [i1] {s} p idx mem))) y) s0:(SHLQconst [j0] r0:(ROLWconst [8] x0:(MOVWloadidx1 [i0] {s} p idx mem)))) 25805 // cond: i1 == i0+2 && j1 == j0-16 && j1 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or) 25806 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (BSWAPL <types.UInt32> (MOVLloadidx1 [i0] {s} p idx mem))) y) 25807 for { 25808 or := v.Args[0] 25809 if or.Op != OpAMD64ORQ { 25810 break 25811 } 25812 s1 := or.Args[0] 25813 if s1.Op != OpAMD64SHLQconst { 25814 break 25815 } 25816 j1 := s1.AuxInt 25817 r1 := s1.Args[0] 25818 if r1.Op != OpAMD64ROLWconst { 25819 break 25820 } 25821 if r1.AuxInt != 8 { 25822 break 25823 } 25824 x1 := r1.Args[0] 25825 if x1.Op != OpAMD64MOVWloadidx1 { 25826 break 25827 } 25828 i1 := x1.AuxInt 25829 s := x1.Aux 25830 p := x1.Args[0] 25831 idx := x1.Args[1] 25832 mem := x1.Args[2] 25833 y := or.Args[1] 25834 s0 := v.Args[1] 25835 if s0.Op != OpAMD64SHLQconst { 25836 break 25837 } 25838 j0 := s0.AuxInt 25839 r0 := s0.Args[0] 25840 if r0.Op != OpAMD64ROLWconst { 25841 break 25842 } 25843 if r0.AuxInt != 8 { 25844 break 25845 } 25846 x0 := r0.Args[0] 25847 if x0.Op != OpAMD64MOVWloadidx1 { 25848 break 25849 } 25850 i0 := x0.AuxInt 25851 if x0.Aux != s { 25852 break 25853 } 25854 if p != x0.Args[0] { 25855 break 25856 } 25857 if idx != x0.Args[1] { 25858 break 25859 } 25860 if mem != x0.Args[2] { 25861 break 25862 } 25863 if !(i1 == i0+2 && j1 == j0-16 && j1%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or)) { 25864 break 25865 } 25866 b = mergePoint(b, x0, x1) 25867 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 25868 v.reset(OpCopy) 25869 v.AddArg(v0) 25870 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 25871 v1.AuxInt = j1 25872 v2 := b.NewValue0(v.Pos, OpAMD64BSWAPL, types.UInt32) 25873 v3 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, types.UInt32) 25874 v3.AuxInt = i0 25875 v3.Aux = s 25876 v3.AddArg(p) 25877 v3.AddArg(idx) 25878 v3.AddArg(mem) 25879 v2.AddArg(v3) 25880 v1.AddArg(v2) 25881 v0.AddArg(v1) 25882 v0.AddArg(y) 25883 return true 25884 } 25885 // match: (ORQ or:(ORQ s1:(SHLQconst [j1] r1:(ROLWconst [8] x1:(MOVWloadidx1 [i1] {s} idx p mem))) y) s0:(SHLQconst [j0] r0:(ROLWconst [8] x0:(MOVWloadidx1 [i0] {s} p idx mem)))) 25886 // cond: i1 == i0+2 && j1 == j0-16 && j1 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or) 25887 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (BSWAPL <types.UInt32> (MOVLloadidx1 [i0] {s} p idx mem))) y) 25888 for { 25889 or := v.Args[0] 25890 if or.Op != OpAMD64ORQ { 25891 break 25892 } 25893 s1 := or.Args[0] 25894 if s1.Op != OpAMD64SHLQconst { 25895 break 25896 } 25897 j1 := s1.AuxInt 25898 r1 := s1.Args[0] 25899 if r1.Op != OpAMD64ROLWconst { 25900 break 25901 } 25902 if r1.AuxInt != 8 { 25903 break 25904 } 25905 x1 := r1.Args[0] 25906 if x1.Op != OpAMD64MOVWloadidx1 { 25907 break 25908 } 25909 i1 := x1.AuxInt 25910 s := x1.Aux 25911 idx := x1.Args[0] 25912 p := x1.Args[1] 25913 mem := x1.Args[2] 25914 y := or.Args[1] 25915 s0 := v.Args[1] 25916 if s0.Op != OpAMD64SHLQconst { 25917 break 25918 } 25919 j0 := s0.AuxInt 25920 r0 := s0.Args[0] 25921 if r0.Op != OpAMD64ROLWconst { 25922 break 25923 } 25924 if r0.AuxInt != 8 { 25925 break 25926 } 25927 x0 := r0.Args[0] 25928 if x0.Op != OpAMD64MOVWloadidx1 { 25929 break 25930 } 25931 i0 := x0.AuxInt 25932 if x0.Aux != s { 25933 break 25934 } 25935 if p != x0.Args[0] { 25936 break 25937 } 25938 if idx != x0.Args[1] { 25939 break 25940 } 25941 if mem != x0.Args[2] { 25942 break 25943 } 25944 if !(i1 == i0+2 && j1 == j0-16 && j1%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or)) { 25945 break 25946 } 25947 b = mergePoint(b, x0, x1) 25948 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 25949 v.reset(OpCopy) 25950 v.AddArg(v0) 25951 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 25952 v1.AuxInt = j1 25953 v2 := b.NewValue0(v.Pos, OpAMD64BSWAPL, types.UInt32) 25954 v3 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, types.UInt32) 25955 v3.AuxInt = i0 25956 v3.Aux = s 25957 v3.AddArg(p) 25958 v3.AddArg(idx) 25959 v3.AddArg(mem) 25960 v2.AddArg(v3) 25961 v1.AddArg(v2) 25962 v0.AddArg(v1) 25963 v0.AddArg(y) 25964 return true 25965 } 25966 // match: (ORQ or:(ORQ y s1:(SHLQconst [j1] r1:(ROLWconst [8] x1:(MOVWloadidx1 [i1] {s} p idx mem)))) s0:(SHLQconst [j0] r0:(ROLWconst [8] x0:(MOVWloadidx1 [i0] {s} p idx mem)))) 25967 // cond: i1 == i0+2 && j1 == j0-16 && j1 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or) 25968 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (BSWAPL <types.UInt32> (MOVLloadidx1 [i0] {s} p idx mem))) y) 25969 for { 25970 or := v.Args[0] 25971 if or.Op != OpAMD64ORQ { 25972 break 25973 } 25974 y := or.Args[0] 25975 s1 := or.Args[1] 25976 if s1.Op != OpAMD64SHLQconst { 25977 break 25978 } 25979 j1 := s1.AuxInt 25980 r1 := s1.Args[0] 25981 if r1.Op != OpAMD64ROLWconst { 25982 break 25983 } 25984 if r1.AuxInt != 8 { 25985 break 25986 } 25987 x1 := r1.Args[0] 25988 if x1.Op != OpAMD64MOVWloadidx1 { 25989 break 25990 } 25991 i1 := x1.AuxInt 25992 s := x1.Aux 25993 p := x1.Args[0] 25994 idx := x1.Args[1] 25995 mem := x1.Args[2] 25996 s0 := v.Args[1] 25997 if s0.Op != OpAMD64SHLQconst { 25998 break 25999 } 26000 j0 := s0.AuxInt 26001 r0 := s0.Args[0] 26002 if r0.Op != OpAMD64ROLWconst { 26003 break 26004 } 26005 if r0.AuxInt != 8 { 26006 break 26007 } 26008 x0 := r0.Args[0] 26009 if x0.Op != OpAMD64MOVWloadidx1 { 26010 break 26011 } 26012 i0 := x0.AuxInt 26013 if x0.Aux != s { 26014 break 26015 } 26016 if p != x0.Args[0] { 26017 break 26018 } 26019 if idx != x0.Args[1] { 26020 break 26021 } 26022 if mem != x0.Args[2] { 26023 break 26024 } 26025 if !(i1 == i0+2 && j1 == j0-16 && j1%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or)) { 26026 break 26027 } 26028 b = mergePoint(b, x0, x1) 26029 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 26030 v.reset(OpCopy) 26031 v.AddArg(v0) 26032 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 26033 v1.AuxInt = j1 26034 v2 := b.NewValue0(v.Pos, OpAMD64BSWAPL, types.UInt32) 26035 v3 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, types.UInt32) 26036 v3.AuxInt = i0 26037 v3.Aux = s 26038 v3.AddArg(p) 26039 v3.AddArg(idx) 26040 v3.AddArg(mem) 26041 v2.AddArg(v3) 26042 v1.AddArg(v2) 26043 v0.AddArg(v1) 26044 v0.AddArg(y) 26045 return true 26046 } 26047 // match: (ORQ or:(ORQ y s1:(SHLQconst [j1] r1:(ROLWconst [8] x1:(MOVWloadidx1 [i1] {s} idx p mem)))) s0:(SHLQconst [j0] r0:(ROLWconst [8] x0:(MOVWloadidx1 [i0] {s} p idx mem)))) 26048 // cond: i1 == i0+2 && j1 == j0-16 && j1 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or) 26049 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (BSWAPL <types.UInt32> (MOVLloadidx1 [i0] {s} p idx mem))) y) 26050 for { 26051 or := v.Args[0] 26052 if or.Op != OpAMD64ORQ { 26053 break 26054 } 26055 y := or.Args[0] 26056 s1 := or.Args[1] 26057 if s1.Op != OpAMD64SHLQconst { 26058 break 26059 } 26060 j1 := s1.AuxInt 26061 r1 := s1.Args[0] 26062 if r1.Op != OpAMD64ROLWconst { 26063 break 26064 } 26065 if r1.AuxInt != 8 { 26066 break 26067 } 26068 x1 := r1.Args[0] 26069 if x1.Op != OpAMD64MOVWloadidx1 { 26070 break 26071 } 26072 i1 := x1.AuxInt 26073 s := x1.Aux 26074 idx := x1.Args[0] 26075 p := x1.Args[1] 26076 mem := x1.Args[2] 26077 s0 := v.Args[1] 26078 if s0.Op != OpAMD64SHLQconst { 26079 break 26080 } 26081 j0 := s0.AuxInt 26082 r0 := s0.Args[0] 26083 if r0.Op != OpAMD64ROLWconst { 26084 break 26085 } 26086 if r0.AuxInt != 8 { 26087 break 26088 } 26089 x0 := r0.Args[0] 26090 if x0.Op != OpAMD64MOVWloadidx1 { 26091 break 26092 } 26093 i0 := x0.AuxInt 26094 if x0.Aux != s { 26095 break 26096 } 26097 if p != x0.Args[0] { 26098 break 26099 } 26100 if idx != x0.Args[1] { 26101 break 26102 } 26103 if mem != x0.Args[2] { 26104 break 26105 } 26106 if !(i1 == i0+2 && j1 == j0-16 && j1%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or)) { 26107 break 26108 } 26109 b = mergePoint(b, x0, x1) 26110 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 26111 v.reset(OpCopy) 26112 v.AddArg(v0) 26113 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 26114 v1.AuxInt = j1 26115 v2 := b.NewValue0(v.Pos, OpAMD64BSWAPL, types.UInt32) 26116 v3 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, types.UInt32) 26117 v3.AuxInt = i0 26118 v3.Aux = s 26119 v3.AddArg(p) 26120 v3.AddArg(idx) 26121 v3.AddArg(mem) 26122 v2.AddArg(v3) 26123 v1.AddArg(v2) 26124 v0.AddArg(v1) 26125 v0.AddArg(y) 26126 return true 26127 } 26128 // match: (ORQ or:(ORQ s1:(SHLQconst [j1] r1:(ROLWconst [8] x1:(MOVWloadidx1 [i1] {s} p idx mem))) y) s0:(SHLQconst [j0] r0:(ROLWconst [8] x0:(MOVWloadidx1 [i0] {s} idx p mem)))) 26129 // cond: i1 == i0+2 && j1 == j0-16 && j1 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or) 26130 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (BSWAPL <types.UInt32> (MOVLloadidx1 [i0] {s} p idx mem))) y) 26131 for { 26132 or := v.Args[0] 26133 if or.Op != OpAMD64ORQ { 26134 break 26135 } 26136 s1 := or.Args[0] 26137 if s1.Op != OpAMD64SHLQconst { 26138 break 26139 } 26140 j1 := s1.AuxInt 26141 r1 := s1.Args[0] 26142 if r1.Op != OpAMD64ROLWconst { 26143 break 26144 } 26145 if r1.AuxInt != 8 { 26146 break 26147 } 26148 x1 := r1.Args[0] 26149 if x1.Op != OpAMD64MOVWloadidx1 { 26150 break 26151 } 26152 i1 := x1.AuxInt 26153 s := x1.Aux 26154 p := x1.Args[0] 26155 idx := x1.Args[1] 26156 mem := x1.Args[2] 26157 y := or.Args[1] 26158 s0 := v.Args[1] 26159 if s0.Op != OpAMD64SHLQconst { 26160 break 26161 } 26162 j0 := s0.AuxInt 26163 r0 := s0.Args[0] 26164 if r0.Op != OpAMD64ROLWconst { 26165 break 26166 } 26167 if r0.AuxInt != 8 { 26168 break 26169 } 26170 x0 := r0.Args[0] 26171 if x0.Op != OpAMD64MOVWloadidx1 { 26172 break 26173 } 26174 i0 := x0.AuxInt 26175 if x0.Aux != s { 26176 break 26177 } 26178 if idx != x0.Args[0] { 26179 break 26180 } 26181 if p != x0.Args[1] { 26182 break 26183 } 26184 if mem != x0.Args[2] { 26185 break 26186 } 26187 if !(i1 == i0+2 && j1 == j0-16 && j1%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or)) { 26188 break 26189 } 26190 b = mergePoint(b, x0, x1) 26191 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 26192 v.reset(OpCopy) 26193 v.AddArg(v0) 26194 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 26195 v1.AuxInt = j1 26196 v2 := b.NewValue0(v.Pos, OpAMD64BSWAPL, types.UInt32) 26197 v3 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, types.UInt32) 26198 v3.AuxInt = i0 26199 v3.Aux = s 26200 v3.AddArg(p) 26201 v3.AddArg(idx) 26202 v3.AddArg(mem) 26203 v2.AddArg(v3) 26204 v1.AddArg(v2) 26205 v0.AddArg(v1) 26206 v0.AddArg(y) 26207 return true 26208 } 26209 // match: (ORQ or:(ORQ s1:(SHLQconst [j1] r1:(ROLWconst [8] x1:(MOVWloadidx1 [i1] {s} idx p mem))) y) s0:(SHLQconst [j0] r0:(ROLWconst [8] x0:(MOVWloadidx1 [i0] {s} idx p mem)))) 26210 // cond: i1 == i0+2 && j1 == j0-16 && j1 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or) 26211 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (BSWAPL <types.UInt32> (MOVLloadidx1 [i0] {s} p idx mem))) y) 26212 for { 26213 or := v.Args[0] 26214 if or.Op != OpAMD64ORQ { 26215 break 26216 } 26217 s1 := or.Args[0] 26218 if s1.Op != OpAMD64SHLQconst { 26219 break 26220 } 26221 j1 := s1.AuxInt 26222 r1 := s1.Args[0] 26223 if r1.Op != OpAMD64ROLWconst { 26224 break 26225 } 26226 if r1.AuxInt != 8 { 26227 break 26228 } 26229 x1 := r1.Args[0] 26230 if x1.Op != OpAMD64MOVWloadidx1 { 26231 break 26232 } 26233 i1 := x1.AuxInt 26234 s := x1.Aux 26235 idx := x1.Args[0] 26236 p := x1.Args[1] 26237 mem := x1.Args[2] 26238 y := or.Args[1] 26239 s0 := v.Args[1] 26240 if s0.Op != OpAMD64SHLQconst { 26241 break 26242 } 26243 j0 := s0.AuxInt 26244 r0 := s0.Args[0] 26245 if r0.Op != OpAMD64ROLWconst { 26246 break 26247 } 26248 if r0.AuxInt != 8 { 26249 break 26250 } 26251 x0 := r0.Args[0] 26252 if x0.Op != OpAMD64MOVWloadidx1 { 26253 break 26254 } 26255 i0 := x0.AuxInt 26256 if x0.Aux != s { 26257 break 26258 } 26259 if idx != x0.Args[0] { 26260 break 26261 } 26262 if p != x0.Args[1] { 26263 break 26264 } 26265 if mem != x0.Args[2] { 26266 break 26267 } 26268 if !(i1 == i0+2 && j1 == j0-16 && j1%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or)) { 26269 break 26270 } 26271 b = mergePoint(b, x0, x1) 26272 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 26273 v.reset(OpCopy) 26274 v.AddArg(v0) 26275 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 26276 v1.AuxInt = j1 26277 v2 := b.NewValue0(v.Pos, OpAMD64BSWAPL, types.UInt32) 26278 v3 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, types.UInt32) 26279 v3.AuxInt = i0 26280 v3.Aux = s 26281 v3.AddArg(p) 26282 v3.AddArg(idx) 26283 v3.AddArg(mem) 26284 v2.AddArg(v3) 26285 v1.AddArg(v2) 26286 v0.AddArg(v1) 26287 v0.AddArg(y) 26288 return true 26289 } 26290 // match: (ORQ or:(ORQ y s1:(SHLQconst [j1] r1:(ROLWconst [8] x1:(MOVWloadidx1 [i1] {s} p idx mem)))) s0:(SHLQconst [j0] r0:(ROLWconst [8] x0:(MOVWloadidx1 [i0] {s} idx p mem)))) 26291 // cond: i1 == i0+2 && j1 == j0-16 && j1 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or) 26292 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (BSWAPL <types.UInt32> (MOVLloadidx1 [i0] {s} p idx mem))) y) 26293 for { 26294 or := v.Args[0] 26295 if or.Op != OpAMD64ORQ { 26296 break 26297 } 26298 y := or.Args[0] 26299 s1 := or.Args[1] 26300 if s1.Op != OpAMD64SHLQconst { 26301 break 26302 } 26303 j1 := s1.AuxInt 26304 r1 := s1.Args[0] 26305 if r1.Op != OpAMD64ROLWconst { 26306 break 26307 } 26308 if r1.AuxInt != 8 { 26309 break 26310 } 26311 x1 := r1.Args[0] 26312 if x1.Op != OpAMD64MOVWloadidx1 { 26313 break 26314 } 26315 i1 := x1.AuxInt 26316 s := x1.Aux 26317 p := x1.Args[0] 26318 idx := x1.Args[1] 26319 mem := x1.Args[2] 26320 s0 := v.Args[1] 26321 if s0.Op != OpAMD64SHLQconst { 26322 break 26323 } 26324 j0 := s0.AuxInt 26325 r0 := s0.Args[0] 26326 if r0.Op != OpAMD64ROLWconst { 26327 break 26328 } 26329 if r0.AuxInt != 8 { 26330 break 26331 } 26332 x0 := r0.Args[0] 26333 if x0.Op != OpAMD64MOVWloadidx1 { 26334 break 26335 } 26336 i0 := x0.AuxInt 26337 if x0.Aux != s { 26338 break 26339 } 26340 if idx != x0.Args[0] { 26341 break 26342 } 26343 if p != x0.Args[1] { 26344 break 26345 } 26346 if mem != x0.Args[2] { 26347 break 26348 } 26349 if !(i1 == i0+2 && j1 == j0-16 && j1%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or)) { 26350 break 26351 } 26352 b = mergePoint(b, x0, x1) 26353 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 26354 v.reset(OpCopy) 26355 v.AddArg(v0) 26356 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 26357 v1.AuxInt = j1 26358 v2 := b.NewValue0(v.Pos, OpAMD64BSWAPL, types.UInt32) 26359 v3 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, types.UInt32) 26360 v3.AuxInt = i0 26361 v3.Aux = s 26362 v3.AddArg(p) 26363 v3.AddArg(idx) 26364 v3.AddArg(mem) 26365 v2.AddArg(v3) 26366 v1.AddArg(v2) 26367 v0.AddArg(v1) 26368 v0.AddArg(y) 26369 return true 26370 } 26371 // match: (ORQ or:(ORQ y s1:(SHLQconst [j1] r1:(ROLWconst [8] x1:(MOVWloadidx1 [i1] {s} idx p mem)))) s0:(SHLQconst [j0] r0:(ROLWconst [8] x0:(MOVWloadidx1 [i0] {s} idx p mem)))) 26372 // cond: i1 == i0+2 && j1 == j0-16 && j1 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or) 26373 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (BSWAPL <types.UInt32> (MOVLloadidx1 [i0] {s} p idx mem))) y) 26374 for { 26375 or := v.Args[0] 26376 if or.Op != OpAMD64ORQ { 26377 break 26378 } 26379 y := or.Args[0] 26380 s1 := or.Args[1] 26381 if s1.Op != OpAMD64SHLQconst { 26382 break 26383 } 26384 j1 := s1.AuxInt 26385 r1 := s1.Args[0] 26386 if r1.Op != OpAMD64ROLWconst { 26387 break 26388 } 26389 if r1.AuxInt != 8 { 26390 break 26391 } 26392 x1 := r1.Args[0] 26393 if x1.Op != OpAMD64MOVWloadidx1 { 26394 break 26395 } 26396 i1 := x1.AuxInt 26397 s := x1.Aux 26398 idx := x1.Args[0] 26399 p := x1.Args[1] 26400 mem := x1.Args[2] 26401 s0 := v.Args[1] 26402 if s0.Op != OpAMD64SHLQconst { 26403 break 26404 } 26405 j0 := s0.AuxInt 26406 r0 := s0.Args[0] 26407 if r0.Op != OpAMD64ROLWconst { 26408 break 26409 } 26410 if r0.AuxInt != 8 { 26411 break 26412 } 26413 x0 := r0.Args[0] 26414 if x0.Op != OpAMD64MOVWloadidx1 { 26415 break 26416 } 26417 i0 := x0.AuxInt 26418 if x0.Aux != s { 26419 break 26420 } 26421 if idx != x0.Args[0] { 26422 break 26423 } 26424 if p != x0.Args[1] { 26425 break 26426 } 26427 if mem != x0.Args[2] { 26428 break 26429 } 26430 if !(i1 == i0+2 && j1 == j0-16 && j1%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or)) { 26431 break 26432 } 26433 b = mergePoint(b, x0, x1) 26434 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 26435 v.reset(OpCopy) 26436 v.AddArg(v0) 26437 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 26438 v1.AuxInt = j1 26439 v2 := b.NewValue0(v.Pos, OpAMD64BSWAPL, types.UInt32) 26440 v3 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, types.UInt32) 26441 v3.AuxInt = i0 26442 v3.Aux = s 26443 v3.AddArg(p) 26444 v3.AddArg(idx) 26445 v3.AddArg(mem) 26446 v2.AddArg(v3) 26447 v1.AddArg(v2) 26448 v0.AddArg(v1) 26449 v0.AddArg(y) 26450 return true 26451 } 26452 // match: (ORQ x l:(MOVQload [off] {sym} ptr mem)) 26453 // cond: canMergeLoad(v, l, x) && clobber(l) 26454 // result: (ORQmem x [off] {sym} ptr mem) 26455 for { 26456 x := v.Args[0] 26457 l := v.Args[1] 26458 if l.Op != OpAMD64MOVQload { 26459 break 26460 } 26461 off := l.AuxInt 26462 sym := l.Aux 26463 ptr := l.Args[0] 26464 mem := l.Args[1] 26465 if !(canMergeLoad(v, l, x) && clobber(l)) { 26466 break 26467 } 26468 v.reset(OpAMD64ORQmem) 26469 v.AuxInt = off 26470 v.Aux = sym 26471 v.AddArg(x) 26472 v.AddArg(ptr) 26473 v.AddArg(mem) 26474 return true 26475 } 26476 // match: (ORQ l:(MOVQload [off] {sym} ptr mem) x) 26477 // cond: canMergeLoad(v, l, x) && clobber(l) 26478 // result: (ORQmem x [off] {sym} ptr mem) 26479 for { 26480 l := v.Args[0] 26481 if l.Op != OpAMD64MOVQload { 26482 break 26483 } 26484 off := l.AuxInt 26485 sym := l.Aux 26486 ptr := l.Args[0] 26487 mem := l.Args[1] 26488 x := v.Args[1] 26489 if !(canMergeLoad(v, l, x) && clobber(l)) { 26490 break 26491 } 26492 v.reset(OpAMD64ORQmem) 26493 v.AuxInt = off 26494 v.Aux = sym 26495 v.AddArg(x) 26496 v.AddArg(ptr) 26497 v.AddArg(mem) 26498 return true 26499 } 26500 return false 26501 } 26502 func rewriteValueAMD64_OpAMD64ORQconst(v *Value) bool { 26503 // match: (ORQconst [0] x) 26504 // cond: 26505 // result: x 26506 for { 26507 if v.AuxInt != 0 { 26508 break 26509 } 26510 x := v.Args[0] 26511 v.reset(OpCopy) 26512 v.Type = x.Type 26513 v.AddArg(x) 26514 return true 26515 } 26516 // match: (ORQconst [-1] _) 26517 // cond: 26518 // result: (MOVQconst [-1]) 26519 for { 26520 if v.AuxInt != -1 { 26521 break 26522 } 26523 v.reset(OpAMD64MOVQconst) 26524 v.AuxInt = -1 26525 return true 26526 } 26527 // match: (ORQconst [c] (MOVQconst [d])) 26528 // cond: 26529 // result: (MOVQconst [c|d]) 26530 for { 26531 c := v.AuxInt 26532 v_0 := v.Args[0] 26533 if v_0.Op != OpAMD64MOVQconst { 26534 break 26535 } 26536 d := v_0.AuxInt 26537 v.reset(OpAMD64MOVQconst) 26538 v.AuxInt = c | d 26539 return true 26540 } 26541 return false 26542 } 26543 func rewriteValueAMD64_OpAMD64ROLBconst(v *Value) bool { 26544 // match: (ROLBconst [c] (ROLBconst [d] x)) 26545 // cond: 26546 // result: (ROLBconst [(c+d)& 7] x) 26547 for { 26548 c := v.AuxInt 26549 v_0 := v.Args[0] 26550 if v_0.Op != OpAMD64ROLBconst { 26551 break 26552 } 26553 d := v_0.AuxInt 26554 x := v_0.Args[0] 26555 v.reset(OpAMD64ROLBconst) 26556 v.AuxInt = (c + d) & 7 26557 v.AddArg(x) 26558 return true 26559 } 26560 // match: (ROLBconst x [0]) 26561 // cond: 26562 // result: x 26563 for { 26564 if v.AuxInt != 0 { 26565 break 26566 } 26567 x := v.Args[0] 26568 v.reset(OpCopy) 26569 v.Type = x.Type 26570 v.AddArg(x) 26571 return true 26572 } 26573 return false 26574 } 26575 func rewriteValueAMD64_OpAMD64ROLLconst(v *Value) bool { 26576 // match: (ROLLconst [c] (ROLLconst [d] x)) 26577 // cond: 26578 // result: (ROLLconst [(c+d)&31] x) 26579 for { 26580 c := v.AuxInt 26581 v_0 := v.Args[0] 26582 if v_0.Op != OpAMD64ROLLconst { 26583 break 26584 } 26585 d := v_0.AuxInt 26586 x := v_0.Args[0] 26587 v.reset(OpAMD64ROLLconst) 26588 v.AuxInt = (c + d) & 31 26589 v.AddArg(x) 26590 return true 26591 } 26592 // match: (ROLLconst x [0]) 26593 // cond: 26594 // result: x 26595 for { 26596 if v.AuxInt != 0 { 26597 break 26598 } 26599 x := v.Args[0] 26600 v.reset(OpCopy) 26601 v.Type = x.Type 26602 v.AddArg(x) 26603 return true 26604 } 26605 return false 26606 } 26607 func rewriteValueAMD64_OpAMD64ROLQconst(v *Value) bool { 26608 // match: (ROLQconst [c] (ROLQconst [d] x)) 26609 // cond: 26610 // result: (ROLQconst [(c+d)&63] x) 26611 for { 26612 c := v.AuxInt 26613 v_0 := v.Args[0] 26614 if v_0.Op != OpAMD64ROLQconst { 26615 break 26616 } 26617 d := v_0.AuxInt 26618 x := v_0.Args[0] 26619 v.reset(OpAMD64ROLQconst) 26620 v.AuxInt = (c + d) & 63 26621 v.AddArg(x) 26622 return true 26623 } 26624 // match: (ROLQconst x [0]) 26625 // cond: 26626 // result: x 26627 for { 26628 if v.AuxInt != 0 { 26629 break 26630 } 26631 x := v.Args[0] 26632 v.reset(OpCopy) 26633 v.Type = x.Type 26634 v.AddArg(x) 26635 return true 26636 } 26637 return false 26638 } 26639 func rewriteValueAMD64_OpAMD64ROLWconst(v *Value) bool { 26640 // match: (ROLWconst [c] (ROLWconst [d] x)) 26641 // cond: 26642 // result: (ROLWconst [(c+d)&15] x) 26643 for { 26644 c := v.AuxInt 26645 v_0 := v.Args[0] 26646 if v_0.Op != OpAMD64ROLWconst { 26647 break 26648 } 26649 d := v_0.AuxInt 26650 x := v_0.Args[0] 26651 v.reset(OpAMD64ROLWconst) 26652 v.AuxInt = (c + d) & 15 26653 v.AddArg(x) 26654 return true 26655 } 26656 // match: (ROLWconst x [0]) 26657 // cond: 26658 // result: x 26659 for { 26660 if v.AuxInt != 0 { 26661 break 26662 } 26663 x := v.Args[0] 26664 v.reset(OpCopy) 26665 v.Type = x.Type 26666 v.AddArg(x) 26667 return true 26668 } 26669 return false 26670 } 26671 func rewriteValueAMD64_OpAMD64SARB(v *Value) bool { 26672 // match: (SARB x (MOVQconst [c])) 26673 // cond: 26674 // result: (SARBconst [min(c&31,7)] x) 26675 for { 26676 x := v.Args[0] 26677 v_1 := v.Args[1] 26678 if v_1.Op != OpAMD64MOVQconst { 26679 break 26680 } 26681 c := v_1.AuxInt 26682 v.reset(OpAMD64SARBconst) 26683 v.AuxInt = min(c&31, 7) 26684 v.AddArg(x) 26685 return true 26686 } 26687 // match: (SARB x (MOVLconst [c])) 26688 // cond: 26689 // result: (SARBconst [min(c&31,7)] x) 26690 for { 26691 x := v.Args[0] 26692 v_1 := v.Args[1] 26693 if v_1.Op != OpAMD64MOVLconst { 26694 break 26695 } 26696 c := v_1.AuxInt 26697 v.reset(OpAMD64SARBconst) 26698 v.AuxInt = min(c&31, 7) 26699 v.AddArg(x) 26700 return true 26701 } 26702 return false 26703 } 26704 func rewriteValueAMD64_OpAMD64SARBconst(v *Value) bool { 26705 // match: (SARBconst x [0]) 26706 // cond: 26707 // result: x 26708 for { 26709 if v.AuxInt != 0 { 26710 break 26711 } 26712 x := v.Args[0] 26713 v.reset(OpCopy) 26714 v.Type = x.Type 26715 v.AddArg(x) 26716 return true 26717 } 26718 // match: (SARBconst [c] (MOVQconst [d])) 26719 // cond: 26720 // result: (MOVQconst [d>>uint64(c)]) 26721 for { 26722 c := v.AuxInt 26723 v_0 := v.Args[0] 26724 if v_0.Op != OpAMD64MOVQconst { 26725 break 26726 } 26727 d := v_0.AuxInt 26728 v.reset(OpAMD64MOVQconst) 26729 v.AuxInt = d >> uint64(c) 26730 return true 26731 } 26732 return false 26733 } 26734 func rewriteValueAMD64_OpAMD64SARL(v *Value) bool { 26735 // match: (SARL x (MOVQconst [c])) 26736 // cond: 26737 // result: (SARLconst [c&31] x) 26738 for { 26739 x := v.Args[0] 26740 v_1 := v.Args[1] 26741 if v_1.Op != OpAMD64MOVQconst { 26742 break 26743 } 26744 c := v_1.AuxInt 26745 v.reset(OpAMD64SARLconst) 26746 v.AuxInt = c & 31 26747 v.AddArg(x) 26748 return true 26749 } 26750 // match: (SARL x (MOVLconst [c])) 26751 // cond: 26752 // result: (SARLconst [c&31] x) 26753 for { 26754 x := v.Args[0] 26755 v_1 := v.Args[1] 26756 if v_1.Op != OpAMD64MOVLconst { 26757 break 26758 } 26759 c := v_1.AuxInt 26760 v.reset(OpAMD64SARLconst) 26761 v.AuxInt = c & 31 26762 v.AddArg(x) 26763 return true 26764 } 26765 // match: (SARL x (ANDLconst [31] y)) 26766 // cond: 26767 // result: (SARL x y) 26768 for { 26769 x := v.Args[0] 26770 v_1 := v.Args[1] 26771 if v_1.Op != OpAMD64ANDLconst { 26772 break 26773 } 26774 if v_1.AuxInt != 31 { 26775 break 26776 } 26777 y := v_1.Args[0] 26778 v.reset(OpAMD64SARL) 26779 v.AddArg(x) 26780 v.AddArg(y) 26781 return true 26782 } 26783 return false 26784 } 26785 func rewriteValueAMD64_OpAMD64SARLconst(v *Value) bool { 26786 // match: (SARLconst x [0]) 26787 // cond: 26788 // result: x 26789 for { 26790 if v.AuxInt != 0 { 26791 break 26792 } 26793 x := v.Args[0] 26794 v.reset(OpCopy) 26795 v.Type = x.Type 26796 v.AddArg(x) 26797 return true 26798 } 26799 // match: (SARLconst [c] (MOVQconst [d])) 26800 // cond: 26801 // result: (MOVQconst [d>>uint64(c)]) 26802 for { 26803 c := v.AuxInt 26804 v_0 := v.Args[0] 26805 if v_0.Op != OpAMD64MOVQconst { 26806 break 26807 } 26808 d := v_0.AuxInt 26809 v.reset(OpAMD64MOVQconst) 26810 v.AuxInt = d >> uint64(c) 26811 return true 26812 } 26813 return false 26814 } 26815 func rewriteValueAMD64_OpAMD64SARQ(v *Value) bool { 26816 // match: (SARQ x (MOVQconst [c])) 26817 // cond: 26818 // result: (SARQconst [c&63] x) 26819 for { 26820 x := v.Args[0] 26821 v_1 := v.Args[1] 26822 if v_1.Op != OpAMD64MOVQconst { 26823 break 26824 } 26825 c := v_1.AuxInt 26826 v.reset(OpAMD64SARQconst) 26827 v.AuxInt = c & 63 26828 v.AddArg(x) 26829 return true 26830 } 26831 // match: (SARQ x (MOVLconst [c])) 26832 // cond: 26833 // result: (SARQconst [c&63] x) 26834 for { 26835 x := v.Args[0] 26836 v_1 := v.Args[1] 26837 if v_1.Op != OpAMD64MOVLconst { 26838 break 26839 } 26840 c := v_1.AuxInt 26841 v.reset(OpAMD64SARQconst) 26842 v.AuxInt = c & 63 26843 v.AddArg(x) 26844 return true 26845 } 26846 // match: (SARQ x (ANDQconst [63] y)) 26847 // cond: 26848 // result: (SARQ x y) 26849 for { 26850 x := v.Args[0] 26851 v_1 := v.Args[1] 26852 if v_1.Op != OpAMD64ANDQconst { 26853 break 26854 } 26855 if v_1.AuxInt != 63 { 26856 break 26857 } 26858 y := v_1.Args[0] 26859 v.reset(OpAMD64SARQ) 26860 v.AddArg(x) 26861 v.AddArg(y) 26862 return true 26863 } 26864 return false 26865 } 26866 func rewriteValueAMD64_OpAMD64SARQconst(v *Value) bool { 26867 // match: (SARQconst x [0]) 26868 // cond: 26869 // result: x 26870 for { 26871 if v.AuxInt != 0 { 26872 break 26873 } 26874 x := v.Args[0] 26875 v.reset(OpCopy) 26876 v.Type = x.Type 26877 v.AddArg(x) 26878 return true 26879 } 26880 // match: (SARQconst [c] (MOVQconst [d])) 26881 // cond: 26882 // result: (MOVQconst [d>>uint64(c)]) 26883 for { 26884 c := v.AuxInt 26885 v_0 := v.Args[0] 26886 if v_0.Op != OpAMD64MOVQconst { 26887 break 26888 } 26889 d := v_0.AuxInt 26890 v.reset(OpAMD64MOVQconst) 26891 v.AuxInt = d >> uint64(c) 26892 return true 26893 } 26894 return false 26895 } 26896 func rewriteValueAMD64_OpAMD64SARW(v *Value) bool { 26897 // match: (SARW x (MOVQconst [c])) 26898 // cond: 26899 // result: (SARWconst [min(c&31,15)] x) 26900 for { 26901 x := v.Args[0] 26902 v_1 := v.Args[1] 26903 if v_1.Op != OpAMD64MOVQconst { 26904 break 26905 } 26906 c := v_1.AuxInt 26907 v.reset(OpAMD64SARWconst) 26908 v.AuxInt = min(c&31, 15) 26909 v.AddArg(x) 26910 return true 26911 } 26912 // match: (SARW x (MOVLconst [c])) 26913 // cond: 26914 // result: (SARWconst [min(c&31,15)] x) 26915 for { 26916 x := v.Args[0] 26917 v_1 := v.Args[1] 26918 if v_1.Op != OpAMD64MOVLconst { 26919 break 26920 } 26921 c := v_1.AuxInt 26922 v.reset(OpAMD64SARWconst) 26923 v.AuxInt = min(c&31, 15) 26924 v.AddArg(x) 26925 return true 26926 } 26927 return false 26928 } 26929 func rewriteValueAMD64_OpAMD64SARWconst(v *Value) bool { 26930 // match: (SARWconst x [0]) 26931 // cond: 26932 // result: x 26933 for { 26934 if v.AuxInt != 0 { 26935 break 26936 } 26937 x := v.Args[0] 26938 v.reset(OpCopy) 26939 v.Type = x.Type 26940 v.AddArg(x) 26941 return true 26942 } 26943 // match: (SARWconst [c] (MOVQconst [d])) 26944 // cond: 26945 // result: (MOVQconst [d>>uint64(c)]) 26946 for { 26947 c := v.AuxInt 26948 v_0 := v.Args[0] 26949 if v_0.Op != OpAMD64MOVQconst { 26950 break 26951 } 26952 d := v_0.AuxInt 26953 v.reset(OpAMD64MOVQconst) 26954 v.AuxInt = d >> uint64(c) 26955 return true 26956 } 26957 return false 26958 } 26959 func rewriteValueAMD64_OpAMD64SBBLcarrymask(v *Value) bool { 26960 // match: (SBBLcarrymask (FlagEQ)) 26961 // cond: 26962 // result: (MOVLconst [0]) 26963 for { 26964 v_0 := v.Args[0] 26965 if v_0.Op != OpAMD64FlagEQ { 26966 break 26967 } 26968 v.reset(OpAMD64MOVLconst) 26969 v.AuxInt = 0 26970 return true 26971 } 26972 // match: (SBBLcarrymask (FlagLT_ULT)) 26973 // cond: 26974 // result: (MOVLconst [-1]) 26975 for { 26976 v_0 := v.Args[0] 26977 if v_0.Op != OpAMD64FlagLT_ULT { 26978 break 26979 } 26980 v.reset(OpAMD64MOVLconst) 26981 v.AuxInt = -1 26982 return true 26983 } 26984 // match: (SBBLcarrymask (FlagLT_UGT)) 26985 // cond: 26986 // result: (MOVLconst [0]) 26987 for { 26988 v_0 := v.Args[0] 26989 if v_0.Op != OpAMD64FlagLT_UGT { 26990 break 26991 } 26992 v.reset(OpAMD64MOVLconst) 26993 v.AuxInt = 0 26994 return true 26995 } 26996 // match: (SBBLcarrymask (FlagGT_ULT)) 26997 // cond: 26998 // result: (MOVLconst [-1]) 26999 for { 27000 v_0 := v.Args[0] 27001 if v_0.Op != OpAMD64FlagGT_ULT { 27002 break 27003 } 27004 v.reset(OpAMD64MOVLconst) 27005 v.AuxInt = -1 27006 return true 27007 } 27008 // match: (SBBLcarrymask (FlagGT_UGT)) 27009 // cond: 27010 // result: (MOVLconst [0]) 27011 for { 27012 v_0 := v.Args[0] 27013 if v_0.Op != OpAMD64FlagGT_UGT { 27014 break 27015 } 27016 v.reset(OpAMD64MOVLconst) 27017 v.AuxInt = 0 27018 return true 27019 } 27020 return false 27021 } 27022 func rewriteValueAMD64_OpAMD64SBBQcarrymask(v *Value) bool { 27023 // match: (SBBQcarrymask (FlagEQ)) 27024 // cond: 27025 // result: (MOVQconst [0]) 27026 for { 27027 v_0 := v.Args[0] 27028 if v_0.Op != OpAMD64FlagEQ { 27029 break 27030 } 27031 v.reset(OpAMD64MOVQconst) 27032 v.AuxInt = 0 27033 return true 27034 } 27035 // match: (SBBQcarrymask (FlagLT_ULT)) 27036 // cond: 27037 // result: (MOVQconst [-1]) 27038 for { 27039 v_0 := v.Args[0] 27040 if v_0.Op != OpAMD64FlagLT_ULT { 27041 break 27042 } 27043 v.reset(OpAMD64MOVQconst) 27044 v.AuxInt = -1 27045 return true 27046 } 27047 // match: (SBBQcarrymask (FlagLT_UGT)) 27048 // cond: 27049 // result: (MOVQconst [0]) 27050 for { 27051 v_0 := v.Args[0] 27052 if v_0.Op != OpAMD64FlagLT_UGT { 27053 break 27054 } 27055 v.reset(OpAMD64MOVQconst) 27056 v.AuxInt = 0 27057 return true 27058 } 27059 // match: (SBBQcarrymask (FlagGT_ULT)) 27060 // cond: 27061 // result: (MOVQconst [-1]) 27062 for { 27063 v_0 := v.Args[0] 27064 if v_0.Op != OpAMD64FlagGT_ULT { 27065 break 27066 } 27067 v.reset(OpAMD64MOVQconst) 27068 v.AuxInt = -1 27069 return true 27070 } 27071 // match: (SBBQcarrymask (FlagGT_UGT)) 27072 // cond: 27073 // result: (MOVQconst [0]) 27074 for { 27075 v_0 := v.Args[0] 27076 if v_0.Op != OpAMD64FlagGT_UGT { 27077 break 27078 } 27079 v.reset(OpAMD64MOVQconst) 27080 v.AuxInt = 0 27081 return true 27082 } 27083 return false 27084 } 27085 func rewriteValueAMD64_OpAMD64SETA(v *Value) bool { 27086 // match: (SETA (InvertFlags x)) 27087 // cond: 27088 // result: (SETB x) 27089 for { 27090 v_0 := v.Args[0] 27091 if v_0.Op != OpAMD64InvertFlags { 27092 break 27093 } 27094 x := v_0.Args[0] 27095 v.reset(OpAMD64SETB) 27096 v.AddArg(x) 27097 return true 27098 } 27099 // match: (SETA (FlagEQ)) 27100 // cond: 27101 // result: (MOVLconst [0]) 27102 for { 27103 v_0 := v.Args[0] 27104 if v_0.Op != OpAMD64FlagEQ { 27105 break 27106 } 27107 v.reset(OpAMD64MOVLconst) 27108 v.AuxInt = 0 27109 return true 27110 } 27111 // match: (SETA (FlagLT_ULT)) 27112 // cond: 27113 // result: (MOVLconst [0]) 27114 for { 27115 v_0 := v.Args[0] 27116 if v_0.Op != OpAMD64FlagLT_ULT { 27117 break 27118 } 27119 v.reset(OpAMD64MOVLconst) 27120 v.AuxInt = 0 27121 return true 27122 } 27123 // match: (SETA (FlagLT_UGT)) 27124 // cond: 27125 // result: (MOVLconst [1]) 27126 for { 27127 v_0 := v.Args[0] 27128 if v_0.Op != OpAMD64FlagLT_UGT { 27129 break 27130 } 27131 v.reset(OpAMD64MOVLconst) 27132 v.AuxInt = 1 27133 return true 27134 } 27135 // match: (SETA (FlagGT_ULT)) 27136 // cond: 27137 // result: (MOVLconst [0]) 27138 for { 27139 v_0 := v.Args[0] 27140 if v_0.Op != OpAMD64FlagGT_ULT { 27141 break 27142 } 27143 v.reset(OpAMD64MOVLconst) 27144 v.AuxInt = 0 27145 return true 27146 } 27147 // match: (SETA (FlagGT_UGT)) 27148 // cond: 27149 // result: (MOVLconst [1]) 27150 for { 27151 v_0 := v.Args[0] 27152 if v_0.Op != OpAMD64FlagGT_UGT { 27153 break 27154 } 27155 v.reset(OpAMD64MOVLconst) 27156 v.AuxInt = 1 27157 return true 27158 } 27159 return false 27160 } 27161 func rewriteValueAMD64_OpAMD64SETAE(v *Value) bool { 27162 // match: (SETAE (InvertFlags x)) 27163 // cond: 27164 // result: (SETBE x) 27165 for { 27166 v_0 := v.Args[0] 27167 if v_0.Op != OpAMD64InvertFlags { 27168 break 27169 } 27170 x := v_0.Args[0] 27171 v.reset(OpAMD64SETBE) 27172 v.AddArg(x) 27173 return true 27174 } 27175 // match: (SETAE (FlagEQ)) 27176 // cond: 27177 // result: (MOVLconst [1]) 27178 for { 27179 v_0 := v.Args[0] 27180 if v_0.Op != OpAMD64FlagEQ { 27181 break 27182 } 27183 v.reset(OpAMD64MOVLconst) 27184 v.AuxInt = 1 27185 return true 27186 } 27187 // match: (SETAE (FlagLT_ULT)) 27188 // cond: 27189 // result: (MOVLconst [0]) 27190 for { 27191 v_0 := v.Args[0] 27192 if v_0.Op != OpAMD64FlagLT_ULT { 27193 break 27194 } 27195 v.reset(OpAMD64MOVLconst) 27196 v.AuxInt = 0 27197 return true 27198 } 27199 // match: (SETAE (FlagLT_UGT)) 27200 // cond: 27201 // result: (MOVLconst [1]) 27202 for { 27203 v_0 := v.Args[0] 27204 if v_0.Op != OpAMD64FlagLT_UGT { 27205 break 27206 } 27207 v.reset(OpAMD64MOVLconst) 27208 v.AuxInt = 1 27209 return true 27210 } 27211 // match: (SETAE (FlagGT_ULT)) 27212 // cond: 27213 // result: (MOVLconst [0]) 27214 for { 27215 v_0 := v.Args[0] 27216 if v_0.Op != OpAMD64FlagGT_ULT { 27217 break 27218 } 27219 v.reset(OpAMD64MOVLconst) 27220 v.AuxInt = 0 27221 return true 27222 } 27223 // match: (SETAE (FlagGT_UGT)) 27224 // cond: 27225 // result: (MOVLconst [1]) 27226 for { 27227 v_0 := v.Args[0] 27228 if v_0.Op != OpAMD64FlagGT_UGT { 27229 break 27230 } 27231 v.reset(OpAMD64MOVLconst) 27232 v.AuxInt = 1 27233 return true 27234 } 27235 return false 27236 } 27237 func rewriteValueAMD64_OpAMD64SETB(v *Value) bool { 27238 // match: (SETB (InvertFlags x)) 27239 // cond: 27240 // result: (SETA x) 27241 for { 27242 v_0 := v.Args[0] 27243 if v_0.Op != OpAMD64InvertFlags { 27244 break 27245 } 27246 x := v_0.Args[0] 27247 v.reset(OpAMD64SETA) 27248 v.AddArg(x) 27249 return true 27250 } 27251 // match: (SETB (FlagEQ)) 27252 // cond: 27253 // result: (MOVLconst [0]) 27254 for { 27255 v_0 := v.Args[0] 27256 if v_0.Op != OpAMD64FlagEQ { 27257 break 27258 } 27259 v.reset(OpAMD64MOVLconst) 27260 v.AuxInt = 0 27261 return true 27262 } 27263 // match: (SETB (FlagLT_ULT)) 27264 // cond: 27265 // result: (MOVLconst [1]) 27266 for { 27267 v_0 := v.Args[0] 27268 if v_0.Op != OpAMD64FlagLT_ULT { 27269 break 27270 } 27271 v.reset(OpAMD64MOVLconst) 27272 v.AuxInt = 1 27273 return true 27274 } 27275 // match: (SETB (FlagLT_UGT)) 27276 // cond: 27277 // result: (MOVLconst [0]) 27278 for { 27279 v_0 := v.Args[0] 27280 if v_0.Op != OpAMD64FlagLT_UGT { 27281 break 27282 } 27283 v.reset(OpAMD64MOVLconst) 27284 v.AuxInt = 0 27285 return true 27286 } 27287 // match: (SETB (FlagGT_ULT)) 27288 // cond: 27289 // result: (MOVLconst [1]) 27290 for { 27291 v_0 := v.Args[0] 27292 if v_0.Op != OpAMD64FlagGT_ULT { 27293 break 27294 } 27295 v.reset(OpAMD64MOVLconst) 27296 v.AuxInt = 1 27297 return true 27298 } 27299 // match: (SETB (FlagGT_UGT)) 27300 // cond: 27301 // result: (MOVLconst [0]) 27302 for { 27303 v_0 := v.Args[0] 27304 if v_0.Op != OpAMD64FlagGT_UGT { 27305 break 27306 } 27307 v.reset(OpAMD64MOVLconst) 27308 v.AuxInt = 0 27309 return true 27310 } 27311 return false 27312 } 27313 func rewriteValueAMD64_OpAMD64SETBE(v *Value) bool { 27314 // match: (SETBE (InvertFlags x)) 27315 // cond: 27316 // result: (SETAE x) 27317 for { 27318 v_0 := v.Args[0] 27319 if v_0.Op != OpAMD64InvertFlags { 27320 break 27321 } 27322 x := v_0.Args[0] 27323 v.reset(OpAMD64SETAE) 27324 v.AddArg(x) 27325 return true 27326 } 27327 // match: (SETBE (FlagEQ)) 27328 // cond: 27329 // result: (MOVLconst [1]) 27330 for { 27331 v_0 := v.Args[0] 27332 if v_0.Op != OpAMD64FlagEQ { 27333 break 27334 } 27335 v.reset(OpAMD64MOVLconst) 27336 v.AuxInt = 1 27337 return true 27338 } 27339 // match: (SETBE (FlagLT_ULT)) 27340 // cond: 27341 // result: (MOVLconst [1]) 27342 for { 27343 v_0 := v.Args[0] 27344 if v_0.Op != OpAMD64FlagLT_ULT { 27345 break 27346 } 27347 v.reset(OpAMD64MOVLconst) 27348 v.AuxInt = 1 27349 return true 27350 } 27351 // match: (SETBE (FlagLT_UGT)) 27352 // cond: 27353 // result: (MOVLconst [0]) 27354 for { 27355 v_0 := v.Args[0] 27356 if v_0.Op != OpAMD64FlagLT_UGT { 27357 break 27358 } 27359 v.reset(OpAMD64MOVLconst) 27360 v.AuxInt = 0 27361 return true 27362 } 27363 // match: (SETBE (FlagGT_ULT)) 27364 // cond: 27365 // result: (MOVLconst [1]) 27366 for { 27367 v_0 := v.Args[0] 27368 if v_0.Op != OpAMD64FlagGT_ULT { 27369 break 27370 } 27371 v.reset(OpAMD64MOVLconst) 27372 v.AuxInt = 1 27373 return true 27374 } 27375 // match: (SETBE (FlagGT_UGT)) 27376 // cond: 27377 // result: (MOVLconst [0]) 27378 for { 27379 v_0 := v.Args[0] 27380 if v_0.Op != OpAMD64FlagGT_UGT { 27381 break 27382 } 27383 v.reset(OpAMD64MOVLconst) 27384 v.AuxInt = 0 27385 return true 27386 } 27387 return false 27388 } 27389 func rewriteValueAMD64_OpAMD64SETEQ(v *Value) bool { 27390 b := v.Block 27391 _ = b 27392 config := b.Func.Config 27393 _ = config 27394 // match: (SETEQ (TESTL (SHLL (MOVLconst [1]) x) y)) 27395 // cond: !config.nacl 27396 // result: (SETAE (BTL x y)) 27397 for { 27398 v_0 := v.Args[0] 27399 if v_0.Op != OpAMD64TESTL { 27400 break 27401 } 27402 v_0_0 := v_0.Args[0] 27403 if v_0_0.Op != OpAMD64SHLL { 27404 break 27405 } 27406 v_0_0_0 := v_0_0.Args[0] 27407 if v_0_0_0.Op != OpAMD64MOVLconst { 27408 break 27409 } 27410 if v_0_0_0.AuxInt != 1 { 27411 break 27412 } 27413 x := v_0_0.Args[1] 27414 y := v_0.Args[1] 27415 if !(!config.nacl) { 27416 break 27417 } 27418 v.reset(OpAMD64SETAE) 27419 v0 := b.NewValue0(v.Pos, OpAMD64BTL, TypeFlags) 27420 v0.AddArg(x) 27421 v0.AddArg(y) 27422 v.AddArg(v0) 27423 return true 27424 } 27425 // match: (SETEQ (TESTL y (SHLL (MOVLconst [1]) x))) 27426 // cond: !config.nacl 27427 // result: (SETAE (BTL x y)) 27428 for { 27429 v_0 := v.Args[0] 27430 if v_0.Op != OpAMD64TESTL { 27431 break 27432 } 27433 y := v_0.Args[0] 27434 v_0_1 := v_0.Args[1] 27435 if v_0_1.Op != OpAMD64SHLL { 27436 break 27437 } 27438 v_0_1_0 := v_0_1.Args[0] 27439 if v_0_1_0.Op != OpAMD64MOVLconst { 27440 break 27441 } 27442 if v_0_1_0.AuxInt != 1 { 27443 break 27444 } 27445 x := v_0_1.Args[1] 27446 if !(!config.nacl) { 27447 break 27448 } 27449 v.reset(OpAMD64SETAE) 27450 v0 := b.NewValue0(v.Pos, OpAMD64BTL, TypeFlags) 27451 v0.AddArg(x) 27452 v0.AddArg(y) 27453 v.AddArg(v0) 27454 return true 27455 } 27456 // match: (SETEQ (TESTQ (SHLQ (MOVQconst [1]) x) y)) 27457 // cond: !config.nacl 27458 // result: (SETAE (BTQ x y)) 27459 for { 27460 v_0 := v.Args[0] 27461 if v_0.Op != OpAMD64TESTQ { 27462 break 27463 } 27464 v_0_0 := v_0.Args[0] 27465 if v_0_0.Op != OpAMD64SHLQ { 27466 break 27467 } 27468 v_0_0_0 := v_0_0.Args[0] 27469 if v_0_0_0.Op != OpAMD64MOVQconst { 27470 break 27471 } 27472 if v_0_0_0.AuxInt != 1 { 27473 break 27474 } 27475 x := v_0_0.Args[1] 27476 y := v_0.Args[1] 27477 if !(!config.nacl) { 27478 break 27479 } 27480 v.reset(OpAMD64SETAE) 27481 v0 := b.NewValue0(v.Pos, OpAMD64BTQ, TypeFlags) 27482 v0.AddArg(x) 27483 v0.AddArg(y) 27484 v.AddArg(v0) 27485 return true 27486 } 27487 // match: (SETEQ (TESTQ y (SHLQ (MOVQconst [1]) x))) 27488 // cond: !config.nacl 27489 // result: (SETAE (BTQ x y)) 27490 for { 27491 v_0 := v.Args[0] 27492 if v_0.Op != OpAMD64TESTQ { 27493 break 27494 } 27495 y := v_0.Args[0] 27496 v_0_1 := v_0.Args[1] 27497 if v_0_1.Op != OpAMD64SHLQ { 27498 break 27499 } 27500 v_0_1_0 := v_0_1.Args[0] 27501 if v_0_1_0.Op != OpAMD64MOVQconst { 27502 break 27503 } 27504 if v_0_1_0.AuxInt != 1 { 27505 break 27506 } 27507 x := v_0_1.Args[1] 27508 if !(!config.nacl) { 27509 break 27510 } 27511 v.reset(OpAMD64SETAE) 27512 v0 := b.NewValue0(v.Pos, OpAMD64BTQ, TypeFlags) 27513 v0.AddArg(x) 27514 v0.AddArg(y) 27515 v.AddArg(v0) 27516 return true 27517 } 27518 // match: (SETEQ (TESTLconst [c] x)) 27519 // cond: isPowerOfTwo(c) && log2(c) < 32 && !config.nacl 27520 // result: (SETAE (BTLconst [log2(c)] x)) 27521 for { 27522 v_0 := v.Args[0] 27523 if v_0.Op != OpAMD64TESTLconst { 27524 break 27525 } 27526 c := v_0.AuxInt 27527 x := v_0.Args[0] 27528 if !(isPowerOfTwo(c) && log2(c) < 32 && !config.nacl) { 27529 break 27530 } 27531 v.reset(OpAMD64SETAE) 27532 v0 := b.NewValue0(v.Pos, OpAMD64BTLconst, TypeFlags) 27533 v0.AuxInt = log2(c) 27534 v0.AddArg(x) 27535 v.AddArg(v0) 27536 return true 27537 } 27538 // match: (SETEQ (TESTQconst [c] x)) 27539 // cond: isPowerOfTwo(c) && log2(c) < 64 && !config.nacl 27540 // result: (SETAE (BTQconst [log2(c)] x)) 27541 for { 27542 v_0 := v.Args[0] 27543 if v_0.Op != OpAMD64TESTQconst { 27544 break 27545 } 27546 c := v_0.AuxInt 27547 x := v_0.Args[0] 27548 if !(isPowerOfTwo(c) && log2(c) < 64 && !config.nacl) { 27549 break 27550 } 27551 v.reset(OpAMD64SETAE) 27552 v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, TypeFlags) 27553 v0.AuxInt = log2(c) 27554 v0.AddArg(x) 27555 v.AddArg(v0) 27556 return true 27557 } 27558 // match: (SETEQ (TESTQ (MOVQconst [c]) x)) 27559 // cond: isPowerOfTwo(c) && log2(c) < 64 && !config.nacl 27560 // result: (SETAE (BTQconst [log2(c)] x)) 27561 for { 27562 v_0 := v.Args[0] 27563 if v_0.Op != OpAMD64TESTQ { 27564 break 27565 } 27566 v_0_0 := v_0.Args[0] 27567 if v_0_0.Op != OpAMD64MOVQconst { 27568 break 27569 } 27570 c := v_0_0.AuxInt 27571 x := v_0.Args[1] 27572 if !(isPowerOfTwo(c) && log2(c) < 64 && !config.nacl) { 27573 break 27574 } 27575 v.reset(OpAMD64SETAE) 27576 v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, TypeFlags) 27577 v0.AuxInt = log2(c) 27578 v0.AddArg(x) 27579 v.AddArg(v0) 27580 return true 27581 } 27582 // match: (SETEQ (TESTQ x (MOVQconst [c]))) 27583 // cond: isPowerOfTwo(c) && log2(c) < 64 && !config.nacl 27584 // result: (SETAE (BTQconst [log2(c)] x)) 27585 for { 27586 v_0 := v.Args[0] 27587 if v_0.Op != OpAMD64TESTQ { 27588 break 27589 } 27590 x := v_0.Args[0] 27591 v_0_1 := v_0.Args[1] 27592 if v_0_1.Op != OpAMD64MOVQconst { 27593 break 27594 } 27595 c := v_0_1.AuxInt 27596 if !(isPowerOfTwo(c) && log2(c) < 64 && !config.nacl) { 27597 break 27598 } 27599 v.reset(OpAMD64SETAE) 27600 v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, TypeFlags) 27601 v0.AuxInt = log2(c) 27602 v0.AddArg(x) 27603 v.AddArg(v0) 27604 return true 27605 } 27606 // match: (SETEQ (InvertFlags x)) 27607 // cond: 27608 // result: (SETEQ x) 27609 for { 27610 v_0 := v.Args[0] 27611 if v_0.Op != OpAMD64InvertFlags { 27612 break 27613 } 27614 x := v_0.Args[0] 27615 v.reset(OpAMD64SETEQ) 27616 v.AddArg(x) 27617 return true 27618 } 27619 // match: (SETEQ (FlagEQ)) 27620 // cond: 27621 // result: (MOVLconst [1]) 27622 for { 27623 v_0 := v.Args[0] 27624 if v_0.Op != OpAMD64FlagEQ { 27625 break 27626 } 27627 v.reset(OpAMD64MOVLconst) 27628 v.AuxInt = 1 27629 return true 27630 } 27631 // match: (SETEQ (FlagLT_ULT)) 27632 // cond: 27633 // result: (MOVLconst [0]) 27634 for { 27635 v_0 := v.Args[0] 27636 if v_0.Op != OpAMD64FlagLT_ULT { 27637 break 27638 } 27639 v.reset(OpAMD64MOVLconst) 27640 v.AuxInt = 0 27641 return true 27642 } 27643 // match: (SETEQ (FlagLT_UGT)) 27644 // cond: 27645 // result: (MOVLconst [0]) 27646 for { 27647 v_0 := v.Args[0] 27648 if v_0.Op != OpAMD64FlagLT_UGT { 27649 break 27650 } 27651 v.reset(OpAMD64MOVLconst) 27652 v.AuxInt = 0 27653 return true 27654 } 27655 // match: (SETEQ (FlagGT_ULT)) 27656 // cond: 27657 // result: (MOVLconst [0]) 27658 for { 27659 v_0 := v.Args[0] 27660 if v_0.Op != OpAMD64FlagGT_ULT { 27661 break 27662 } 27663 v.reset(OpAMD64MOVLconst) 27664 v.AuxInt = 0 27665 return true 27666 } 27667 // match: (SETEQ (FlagGT_UGT)) 27668 // cond: 27669 // result: (MOVLconst [0]) 27670 for { 27671 v_0 := v.Args[0] 27672 if v_0.Op != OpAMD64FlagGT_UGT { 27673 break 27674 } 27675 v.reset(OpAMD64MOVLconst) 27676 v.AuxInt = 0 27677 return true 27678 } 27679 return false 27680 } 27681 func rewriteValueAMD64_OpAMD64SETG(v *Value) bool { 27682 // match: (SETG (InvertFlags x)) 27683 // cond: 27684 // result: (SETL x) 27685 for { 27686 v_0 := v.Args[0] 27687 if v_0.Op != OpAMD64InvertFlags { 27688 break 27689 } 27690 x := v_0.Args[0] 27691 v.reset(OpAMD64SETL) 27692 v.AddArg(x) 27693 return true 27694 } 27695 // match: (SETG (FlagEQ)) 27696 // cond: 27697 // result: (MOVLconst [0]) 27698 for { 27699 v_0 := v.Args[0] 27700 if v_0.Op != OpAMD64FlagEQ { 27701 break 27702 } 27703 v.reset(OpAMD64MOVLconst) 27704 v.AuxInt = 0 27705 return true 27706 } 27707 // match: (SETG (FlagLT_ULT)) 27708 // cond: 27709 // result: (MOVLconst [0]) 27710 for { 27711 v_0 := v.Args[0] 27712 if v_0.Op != OpAMD64FlagLT_ULT { 27713 break 27714 } 27715 v.reset(OpAMD64MOVLconst) 27716 v.AuxInt = 0 27717 return true 27718 } 27719 // match: (SETG (FlagLT_UGT)) 27720 // cond: 27721 // result: (MOVLconst [0]) 27722 for { 27723 v_0 := v.Args[0] 27724 if v_0.Op != OpAMD64FlagLT_UGT { 27725 break 27726 } 27727 v.reset(OpAMD64MOVLconst) 27728 v.AuxInt = 0 27729 return true 27730 } 27731 // match: (SETG (FlagGT_ULT)) 27732 // cond: 27733 // result: (MOVLconst [1]) 27734 for { 27735 v_0 := v.Args[0] 27736 if v_0.Op != OpAMD64FlagGT_ULT { 27737 break 27738 } 27739 v.reset(OpAMD64MOVLconst) 27740 v.AuxInt = 1 27741 return true 27742 } 27743 // match: (SETG (FlagGT_UGT)) 27744 // cond: 27745 // result: (MOVLconst [1]) 27746 for { 27747 v_0 := v.Args[0] 27748 if v_0.Op != OpAMD64FlagGT_UGT { 27749 break 27750 } 27751 v.reset(OpAMD64MOVLconst) 27752 v.AuxInt = 1 27753 return true 27754 } 27755 return false 27756 } 27757 func rewriteValueAMD64_OpAMD64SETGE(v *Value) bool { 27758 // match: (SETGE (InvertFlags x)) 27759 // cond: 27760 // result: (SETLE x) 27761 for { 27762 v_0 := v.Args[0] 27763 if v_0.Op != OpAMD64InvertFlags { 27764 break 27765 } 27766 x := v_0.Args[0] 27767 v.reset(OpAMD64SETLE) 27768 v.AddArg(x) 27769 return true 27770 } 27771 // match: (SETGE (FlagEQ)) 27772 // cond: 27773 // result: (MOVLconst [1]) 27774 for { 27775 v_0 := v.Args[0] 27776 if v_0.Op != OpAMD64FlagEQ { 27777 break 27778 } 27779 v.reset(OpAMD64MOVLconst) 27780 v.AuxInt = 1 27781 return true 27782 } 27783 // match: (SETGE (FlagLT_ULT)) 27784 // cond: 27785 // result: (MOVLconst [0]) 27786 for { 27787 v_0 := v.Args[0] 27788 if v_0.Op != OpAMD64FlagLT_ULT { 27789 break 27790 } 27791 v.reset(OpAMD64MOVLconst) 27792 v.AuxInt = 0 27793 return true 27794 } 27795 // match: (SETGE (FlagLT_UGT)) 27796 // cond: 27797 // result: (MOVLconst [0]) 27798 for { 27799 v_0 := v.Args[0] 27800 if v_0.Op != OpAMD64FlagLT_UGT { 27801 break 27802 } 27803 v.reset(OpAMD64MOVLconst) 27804 v.AuxInt = 0 27805 return true 27806 } 27807 // match: (SETGE (FlagGT_ULT)) 27808 // cond: 27809 // result: (MOVLconst [1]) 27810 for { 27811 v_0 := v.Args[0] 27812 if v_0.Op != OpAMD64FlagGT_ULT { 27813 break 27814 } 27815 v.reset(OpAMD64MOVLconst) 27816 v.AuxInt = 1 27817 return true 27818 } 27819 // match: (SETGE (FlagGT_UGT)) 27820 // cond: 27821 // result: (MOVLconst [1]) 27822 for { 27823 v_0 := v.Args[0] 27824 if v_0.Op != OpAMD64FlagGT_UGT { 27825 break 27826 } 27827 v.reset(OpAMD64MOVLconst) 27828 v.AuxInt = 1 27829 return true 27830 } 27831 return false 27832 } 27833 func rewriteValueAMD64_OpAMD64SETL(v *Value) bool { 27834 // match: (SETL (InvertFlags x)) 27835 // cond: 27836 // result: (SETG x) 27837 for { 27838 v_0 := v.Args[0] 27839 if v_0.Op != OpAMD64InvertFlags { 27840 break 27841 } 27842 x := v_0.Args[0] 27843 v.reset(OpAMD64SETG) 27844 v.AddArg(x) 27845 return true 27846 } 27847 // match: (SETL (FlagEQ)) 27848 // cond: 27849 // result: (MOVLconst [0]) 27850 for { 27851 v_0 := v.Args[0] 27852 if v_0.Op != OpAMD64FlagEQ { 27853 break 27854 } 27855 v.reset(OpAMD64MOVLconst) 27856 v.AuxInt = 0 27857 return true 27858 } 27859 // match: (SETL (FlagLT_ULT)) 27860 // cond: 27861 // result: (MOVLconst [1]) 27862 for { 27863 v_0 := v.Args[0] 27864 if v_0.Op != OpAMD64FlagLT_ULT { 27865 break 27866 } 27867 v.reset(OpAMD64MOVLconst) 27868 v.AuxInt = 1 27869 return true 27870 } 27871 // match: (SETL (FlagLT_UGT)) 27872 // cond: 27873 // result: (MOVLconst [1]) 27874 for { 27875 v_0 := v.Args[0] 27876 if v_0.Op != OpAMD64FlagLT_UGT { 27877 break 27878 } 27879 v.reset(OpAMD64MOVLconst) 27880 v.AuxInt = 1 27881 return true 27882 } 27883 // match: (SETL (FlagGT_ULT)) 27884 // cond: 27885 // result: (MOVLconst [0]) 27886 for { 27887 v_0 := v.Args[0] 27888 if v_0.Op != OpAMD64FlagGT_ULT { 27889 break 27890 } 27891 v.reset(OpAMD64MOVLconst) 27892 v.AuxInt = 0 27893 return true 27894 } 27895 // match: (SETL (FlagGT_UGT)) 27896 // cond: 27897 // result: (MOVLconst [0]) 27898 for { 27899 v_0 := v.Args[0] 27900 if v_0.Op != OpAMD64FlagGT_UGT { 27901 break 27902 } 27903 v.reset(OpAMD64MOVLconst) 27904 v.AuxInt = 0 27905 return true 27906 } 27907 return false 27908 } 27909 func rewriteValueAMD64_OpAMD64SETLE(v *Value) bool { 27910 // match: (SETLE (InvertFlags x)) 27911 // cond: 27912 // result: (SETGE x) 27913 for { 27914 v_0 := v.Args[0] 27915 if v_0.Op != OpAMD64InvertFlags { 27916 break 27917 } 27918 x := v_0.Args[0] 27919 v.reset(OpAMD64SETGE) 27920 v.AddArg(x) 27921 return true 27922 } 27923 // match: (SETLE (FlagEQ)) 27924 // cond: 27925 // result: (MOVLconst [1]) 27926 for { 27927 v_0 := v.Args[0] 27928 if v_0.Op != OpAMD64FlagEQ { 27929 break 27930 } 27931 v.reset(OpAMD64MOVLconst) 27932 v.AuxInt = 1 27933 return true 27934 } 27935 // match: (SETLE (FlagLT_ULT)) 27936 // cond: 27937 // result: (MOVLconst [1]) 27938 for { 27939 v_0 := v.Args[0] 27940 if v_0.Op != OpAMD64FlagLT_ULT { 27941 break 27942 } 27943 v.reset(OpAMD64MOVLconst) 27944 v.AuxInt = 1 27945 return true 27946 } 27947 // match: (SETLE (FlagLT_UGT)) 27948 // cond: 27949 // result: (MOVLconst [1]) 27950 for { 27951 v_0 := v.Args[0] 27952 if v_0.Op != OpAMD64FlagLT_UGT { 27953 break 27954 } 27955 v.reset(OpAMD64MOVLconst) 27956 v.AuxInt = 1 27957 return true 27958 } 27959 // match: (SETLE (FlagGT_ULT)) 27960 // cond: 27961 // result: (MOVLconst [0]) 27962 for { 27963 v_0 := v.Args[0] 27964 if v_0.Op != OpAMD64FlagGT_ULT { 27965 break 27966 } 27967 v.reset(OpAMD64MOVLconst) 27968 v.AuxInt = 0 27969 return true 27970 } 27971 // match: (SETLE (FlagGT_UGT)) 27972 // cond: 27973 // result: (MOVLconst [0]) 27974 for { 27975 v_0 := v.Args[0] 27976 if v_0.Op != OpAMD64FlagGT_UGT { 27977 break 27978 } 27979 v.reset(OpAMD64MOVLconst) 27980 v.AuxInt = 0 27981 return true 27982 } 27983 return false 27984 } 27985 func rewriteValueAMD64_OpAMD64SETNE(v *Value) bool { 27986 b := v.Block 27987 _ = b 27988 config := b.Func.Config 27989 _ = config 27990 // match: (SETNE (TESTL (SHLL (MOVLconst [1]) x) y)) 27991 // cond: !config.nacl 27992 // result: (SETB (BTL x y)) 27993 for { 27994 v_0 := v.Args[0] 27995 if v_0.Op != OpAMD64TESTL { 27996 break 27997 } 27998 v_0_0 := v_0.Args[0] 27999 if v_0_0.Op != OpAMD64SHLL { 28000 break 28001 } 28002 v_0_0_0 := v_0_0.Args[0] 28003 if v_0_0_0.Op != OpAMD64MOVLconst { 28004 break 28005 } 28006 if v_0_0_0.AuxInt != 1 { 28007 break 28008 } 28009 x := v_0_0.Args[1] 28010 y := v_0.Args[1] 28011 if !(!config.nacl) { 28012 break 28013 } 28014 v.reset(OpAMD64SETB) 28015 v0 := b.NewValue0(v.Pos, OpAMD64BTL, TypeFlags) 28016 v0.AddArg(x) 28017 v0.AddArg(y) 28018 v.AddArg(v0) 28019 return true 28020 } 28021 // match: (SETNE (TESTL y (SHLL (MOVLconst [1]) x))) 28022 // cond: !config.nacl 28023 // result: (SETB (BTL x y)) 28024 for { 28025 v_0 := v.Args[0] 28026 if v_0.Op != OpAMD64TESTL { 28027 break 28028 } 28029 y := v_0.Args[0] 28030 v_0_1 := v_0.Args[1] 28031 if v_0_1.Op != OpAMD64SHLL { 28032 break 28033 } 28034 v_0_1_0 := v_0_1.Args[0] 28035 if v_0_1_0.Op != OpAMD64MOVLconst { 28036 break 28037 } 28038 if v_0_1_0.AuxInt != 1 { 28039 break 28040 } 28041 x := v_0_1.Args[1] 28042 if !(!config.nacl) { 28043 break 28044 } 28045 v.reset(OpAMD64SETB) 28046 v0 := b.NewValue0(v.Pos, OpAMD64BTL, TypeFlags) 28047 v0.AddArg(x) 28048 v0.AddArg(y) 28049 v.AddArg(v0) 28050 return true 28051 } 28052 // match: (SETNE (TESTQ (SHLQ (MOVQconst [1]) x) y)) 28053 // cond: !config.nacl 28054 // result: (SETB (BTQ x y)) 28055 for { 28056 v_0 := v.Args[0] 28057 if v_0.Op != OpAMD64TESTQ { 28058 break 28059 } 28060 v_0_0 := v_0.Args[0] 28061 if v_0_0.Op != OpAMD64SHLQ { 28062 break 28063 } 28064 v_0_0_0 := v_0_0.Args[0] 28065 if v_0_0_0.Op != OpAMD64MOVQconst { 28066 break 28067 } 28068 if v_0_0_0.AuxInt != 1 { 28069 break 28070 } 28071 x := v_0_0.Args[1] 28072 y := v_0.Args[1] 28073 if !(!config.nacl) { 28074 break 28075 } 28076 v.reset(OpAMD64SETB) 28077 v0 := b.NewValue0(v.Pos, OpAMD64BTQ, TypeFlags) 28078 v0.AddArg(x) 28079 v0.AddArg(y) 28080 v.AddArg(v0) 28081 return true 28082 } 28083 // match: (SETNE (TESTQ y (SHLQ (MOVQconst [1]) x))) 28084 // cond: !config.nacl 28085 // result: (SETB (BTQ x y)) 28086 for { 28087 v_0 := v.Args[0] 28088 if v_0.Op != OpAMD64TESTQ { 28089 break 28090 } 28091 y := v_0.Args[0] 28092 v_0_1 := v_0.Args[1] 28093 if v_0_1.Op != OpAMD64SHLQ { 28094 break 28095 } 28096 v_0_1_0 := v_0_1.Args[0] 28097 if v_0_1_0.Op != OpAMD64MOVQconst { 28098 break 28099 } 28100 if v_0_1_0.AuxInt != 1 { 28101 break 28102 } 28103 x := v_0_1.Args[1] 28104 if !(!config.nacl) { 28105 break 28106 } 28107 v.reset(OpAMD64SETB) 28108 v0 := b.NewValue0(v.Pos, OpAMD64BTQ, TypeFlags) 28109 v0.AddArg(x) 28110 v0.AddArg(y) 28111 v.AddArg(v0) 28112 return true 28113 } 28114 // match: (SETNE (TESTLconst [c] x)) 28115 // cond: isPowerOfTwo(c) && log2(c) < 32 && !config.nacl 28116 // result: (SETB (BTLconst [log2(c)] x)) 28117 for { 28118 v_0 := v.Args[0] 28119 if v_0.Op != OpAMD64TESTLconst { 28120 break 28121 } 28122 c := v_0.AuxInt 28123 x := v_0.Args[0] 28124 if !(isPowerOfTwo(c) && log2(c) < 32 && !config.nacl) { 28125 break 28126 } 28127 v.reset(OpAMD64SETB) 28128 v0 := b.NewValue0(v.Pos, OpAMD64BTLconst, TypeFlags) 28129 v0.AuxInt = log2(c) 28130 v0.AddArg(x) 28131 v.AddArg(v0) 28132 return true 28133 } 28134 // match: (SETNE (TESTQconst [c] x)) 28135 // cond: isPowerOfTwo(c) && log2(c) < 64 && !config.nacl 28136 // result: (SETB (BTQconst [log2(c)] x)) 28137 for { 28138 v_0 := v.Args[0] 28139 if v_0.Op != OpAMD64TESTQconst { 28140 break 28141 } 28142 c := v_0.AuxInt 28143 x := v_0.Args[0] 28144 if !(isPowerOfTwo(c) && log2(c) < 64 && !config.nacl) { 28145 break 28146 } 28147 v.reset(OpAMD64SETB) 28148 v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, TypeFlags) 28149 v0.AuxInt = log2(c) 28150 v0.AddArg(x) 28151 v.AddArg(v0) 28152 return true 28153 } 28154 // match: (SETNE (TESTQ (MOVQconst [c]) x)) 28155 // cond: isPowerOfTwo(c) && log2(c) < 64 && !config.nacl 28156 // result: (SETB (BTQconst [log2(c)] x)) 28157 for { 28158 v_0 := v.Args[0] 28159 if v_0.Op != OpAMD64TESTQ { 28160 break 28161 } 28162 v_0_0 := v_0.Args[0] 28163 if v_0_0.Op != OpAMD64MOVQconst { 28164 break 28165 } 28166 c := v_0_0.AuxInt 28167 x := v_0.Args[1] 28168 if !(isPowerOfTwo(c) && log2(c) < 64 && !config.nacl) { 28169 break 28170 } 28171 v.reset(OpAMD64SETB) 28172 v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, TypeFlags) 28173 v0.AuxInt = log2(c) 28174 v0.AddArg(x) 28175 v.AddArg(v0) 28176 return true 28177 } 28178 // match: (SETNE (TESTQ x (MOVQconst [c]))) 28179 // cond: isPowerOfTwo(c) && log2(c) < 64 && !config.nacl 28180 // result: (SETB (BTQconst [log2(c)] x)) 28181 for { 28182 v_0 := v.Args[0] 28183 if v_0.Op != OpAMD64TESTQ { 28184 break 28185 } 28186 x := v_0.Args[0] 28187 v_0_1 := v_0.Args[1] 28188 if v_0_1.Op != OpAMD64MOVQconst { 28189 break 28190 } 28191 c := v_0_1.AuxInt 28192 if !(isPowerOfTwo(c) && log2(c) < 64 && !config.nacl) { 28193 break 28194 } 28195 v.reset(OpAMD64SETB) 28196 v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, TypeFlags) 28197 v0.AuxInt = log2(c) 28198 v0.AddArg(x) 28199 v.AddArg(v0) 28200 return true 28201 } 28202 // match: (SETNE (InvertFlags x)) 28203 // cond: 28204 // result: (SETNE x) 28205 for { 28206 v_0 := v.Args[0] 28207 if v_0.Op != OpAMD64InvertFlags { 28208 break 28209 } 28210 x := v_0.Args[0] 28211 v.reset(OpAMD64SETNE) 28212 v.AddArg(x) 28213 return true 28214 } 28215 // match: (SETNE (FlagEQ)) 28216 // cond: 28217 // result: (MOVLconst [0]) 28218 for { 28219 v_0 := v.Args[0] 28220 if v_0.Op != OpAMD64FlagEQ { 28221 break 28222 } 28223 v.reset(OpAMD64MOVLconst) 28224 v.AuxInt = 0 28225 return true 28226 } 28227 // match: (SETNE (FlagLT_ULT)) 28228 // cond: 28229 // result: (MOVLconst [1]) 28230 for { 28231 v_0 := v.Args[0] 28232 if v_0.Op != OpAMD64FlagLT_ULT { 28233 break 28234 } 28235 v.reset(OpAMD64MOVLconst) 28236 v.AuxInt = 1 28237 return true 28238 } 28239 // match: (SETNE (FlagLT_UGT)) 28240 // cond: 28241 // result: (MOVLconst [1]) 28242 for { 28243 v_0 := v.Args[0] 28244 if v_0.Op != OpAMD64FlagLT_UGT { 28245 break 28246 } 28247 v.reset(OpAMD64MOVLconst) 28248 v.AuxInt = 1 28249 return true 28250 } 28251 // match: (SETNE (FlagGT_ULT)) 28252 // cond: 28253 // result: (MOVLconst [1]) 28254 for { 28255 v_0 := v.Args[0] 28256 if v_0.Op != OpAMD64FlagGT_ULT { 28257 break 28258 } 28259 v.reset(OpAMD64MOVLconst) 28260 v.AuxInt = 1 28261 return true 28262 } 28263 // match: (SETNE (FlagGT_UGT)) 28264 // cond: 28265 // result: (MOVLconst [1]) 28266 for { 28267 v_0 := v.Args[0] 28268 if v_0.Op != OpAMD64FlagGT_UGT { 28269 break 28270 } 28271 v.reset(OpAMD64MOVLconst) 28272 v.AuxInt = 1 28273 return true 28274 } 28275 return false 28276 } 28277 func rewriteValueAMD64_OpAMD64SHLL(v *Value) bool { 28278 // match: (SHLL x (MOVQconst [c])) 28279 // cond: 28280 // result: (SHLLconst [c&31] x) 28281 for { 28282 x := v.Args[0] 28283 v_1 := v.Args[1] 28284 if v_1.Op != OpAMD64MOVQconst { 28285 break 28286 } 28287 c := v_1.AuxInt 28288 v.reset(OpAMD64SHLLconst) 28289 v.AuxInt = c & 31 28290 v.AddArg(x) 28291 return true 28292 } 28293 // match: (SHLL x (MOVLconst [c])) 28294 // cond: 28295 // result: (SHLLconst [c&31] x) 28296 for { 28297 x := v.Args[0] 28298 v_1 := v.Args[1] 28299 if v_1.Op != OpAMD64MOVLconst { 28300 break 28301 } 28302 c := v_1.AuxInt 28303 v.reset(OpAMD64SHLLconst) 28304 v.AuxInt = c & 31 28305 v.AddArg(x) 28306 return true 28307 } 28308 // match: (SHLL x (ANDLconst [31] y)) 28309 // cond: 28310 // result: (SHLL x y) 28311 for { 28312 x := v.Args[0] 28313 v_1 := v.Args[1] 28314 if v_1.Op != OpAMD64ANDLconst { 28315 break 28316 } 28317 if v_1.AuxInt != 31 { 28318 break 28319 } 28320 y := v_1.Args[0] 28321 v.reset(OpAMD64SHLL) 28322 v.AddArg(x) 28323 v.AddArg(y) 28324 return true 28325 } 28326 return false 28327 } 28328 func rewriteValueAMD64_OpAMD64SHLLconst(v *Value) bool { 28329 // match: (SHLLconst x [0]) 28330 // cond: 28331 // result: x 28332 for { 28333 if v.AuxInt != 0 { 28334 break 28335 } 28336 x := v.Args[0] 28337 v.reset(OpCopy) 28338 v.Type = x.Type 28339 v.AddArg(x) 28340 return true 28341 } 28342 return false 28343 } 28344 func rewriteValueAMD64_OpAMD64SHLQ(v *Value) bool { 28345 // match: (SHLQ x (MOVQconst [c])) 28346 // cond: 28347 // result: (SHLQconst [c&63] x) 28348 for { 28349 x := v.Args[0] 28350 v_1 := v.Args[1] 28351 if v_1.Op != OpAMD64MOVQconst { 28352 break 28353 } 28354 c := v_1.AuxInt 28355 v.reset(OpAMD64SHLQconst) 28356 v.AuxInt = c & 63 28357 v.AddArg(x) 28358 return true 28359 } 28360 // match: (SHLQ x (MOVLconst [c])) 28361 // cond: 28362 // result: (SHLQconst [c&63] x) 28363 for { 28364 x := v.Args[0] 28365 v_1 := v.Args[1] 28366 if v_1.Op != OpAMD64MOVLconst { 28367 break 28368 } 28369 c := v_1.AuxInt 28370 v.reset(OpAMD64SHLQconst) 28371 v.AuxInt = c & 63 28372 v.AddArg(x) 28373 return true 28374 } 28375 // match: (SHLQ x (ANDQconst [63] y)) 28376 // cond: 28377 // result: (SHLQ x y) 28378 for { 28379 x := v.Args[0] 28380 v_1 := v.Args[1] 28381 if v_1.Op != OpAMD64ANDQconst { 28382 break 28383 } 28384 if v_1.AuxInt != 63 { 28385 break 28386 } 28387 y := v_1.Args[0] 28388 v.reset(OpAMD64SHLQ) 28389 v.AddArg(x) 28390 v.AddArg(y) 28391 return true 28392 } 28393 // match: (SHLQ x (ANDLconst [63] y)) 28394 // cond: 28395 // result: (SHLQ x y) 28396 for { 28397 x := v.Args[0] 28398 v_1 := v.Args[1] 28399 if v_1.Op != OpAMD64ANDLconst { 28400 break 28401 } 28402 if v_1.AuxInt != 63 { 28403 break 28404 } 28405 y := v_1.Args[0] 28406 v.reset(OpAMD64SHLQ) 28407 v.AddArg(x) 28408 v.AddArg(y) 28409 return true 28410 } 28411 return false 28412 } 28413 func rewriteValueAMD64_OpAMD64SHLQconst(v *Value) bool { 28414 // match: (SHLQconst x [0]) 28415 // cond: 28416 // result: x 28417 for { 28418 if v.AuxInt != 0 { 28419 break 28420 } 28421 x := v.Args[0] 28422 v.reset(OpCopy) 28423 v.Type = x.Type 28424 v.AddArg(x) 28425 return true 28426 } 28427 return false 28428 } 28429 func rewriteValueAMD64_OpAMD64SHRB(v *Value) bool { 28430 // match: (SHRB x (MOVQconst [c])) 28431 // cond: c&31 < 8 28432 // result: (SHRBconst [c&31] x) 28433 for { 28434 x := v.Args[0] 28435 v_1 := v.Args[1] 28436 if v_1.Op != OpAMD64MOVQconst { 28437 break 28438 } 28439 c := v_1.AuxInt 28440 if !(c&31 < 8) { 28441 break 28442 } 28443 v.reset(OpAMD64SHRBconst) 28444 v.AuxInt = c & 31 28445 v.AddArg(x) 28446 return true 28447 } 28448 // match: (SHRB x (MOVLconst [c])) 28449 // cond: c&31 < 8 28450 // result: (SHRBconst [c&31] x) 28451 for { 28452 x := v.Args[0] 28453 v_1 := v.Args[1] 28454 if v_1.Op != OpAMD64MOVLconst { 28455 break 28456 } 28457 c := v_1.AuxInt 28458 if !(c&31 < 8) { 28459 break 28460 } 28461 v.reset(OpAMD64SHRBconst) 28462 v.AuxInt = c & 31 28463 v.AddArg(x) 28464 return true 28465 } 28466 // match: (SHRB _ (MOVQconst [c])) 28467 // cond: c&31 >= 8 28468 // result: (MOVLconst [0]) 28469 for { 28470 v_1 := v.Args[1] 28471 if v_1.Op != OpAMD64MOVQconst { 28472 break 28473 } 28474 c := v_1.AuxInt 28475 if !(c&31 >= 8) { 28476 break 28477 } 28478 v.reset(OpAMD64MOVLconst) 28479 v.AuxInt = 0 28480 return true 28481 } 28482 // match: (SHRB _ (MOVLconst [c])) 28483 // cond: c&31 >= 8 28484 // result: (MOVLconst [0]) 28485 for { 28486 v_1 := v.Args[1] 28487 if v_1.Op != OpAMD64MOVLconst { 28488 break 28489 } 28490 c := v_1.AuxInt 28491 if !(c&31 >= 8) { 28492 break 28493 } 28494 v.reset(OpAMD64MOVLconst) 28495 v.AuxInt = 0 28496 return true 28497 } 28498 return false 28499 } 28500 func rewriteValueAMD64_OpAMD64SHRBconst(v *Value) bool { 28501 // match: (SHRBconst x [0]) 28502 // cond: 28503 // result: x 28504 for { 28505 if v.AuxInt != 0 { 28506 break 28507 } 28508 x := v.Args[0] 28509 v.reset(OpCopy) 28510 v.Type = x.Type 28511 v.AddArg(x) 28512 return true 28513 } 28514 return false 28515 } 28516 func rewriteValueAMD64_OpAMD64SHRL(v *Value) bool { 28517 // match: (SHRL x (MOVQconst [c])) 28518 // cond: 28519 // result: (SHRLconst [c&31] x) 28520 for { 28521 x := v.Args[0] 28522 v_1 := v.Args[1] 28523 if v_1.Op != OpAMD64MOVQconst { 28524 break 28525 } 28526 c := v_1.AuxInt 28527 v.reset(OpAMD64SHRLconst) 28528 v.AuxInt = c & 31 28529 v.AddArg(x) 28530 return true 28531 } 28532 // match: (SHRL x (MOVLconst [c])) 28533 // cond: 28534 // result: (SHRLconst [c&31] x) 28535 for { 28536 x := v.Args[0] 28537 v_1 := v.Args[1] 28538 if v_1.Op != OpAMD64MOVLconst { 28539 break 28540 } 28541 c := v_1.AuxInt 28542 v.reset(OpAMD64SHRLconst) 28543 v.AuxInt = c & 31 28544 v.AddArg(x) 28545 return true 28546 } 28547 // match: (SHRL x (ANDLconst [31] y)) 28548 // cond: 28549 // result: (SHRL x y) 28550 for { 28551 x := v.Args[0] 28552 v_1 := v.Args[1] 28553 if v_1.Op != OpAMD64ANDLconst { 28554 break 28555 } 28556 if v_1.AuxInt != 31 { 28557 break 28558 } 28559 y := v_1.Args[0] 28560 v.reset(OpAMD64SHRL) 28561 v.AddArg(x) 28562 v.AddArg(y) 28563 return true 28564 } 28565 return false 28566 } 28567 func rewriteValueAMD64_OpAMD64SHRLconst(v *Value) bool { 28568 // match: (SHRLconst x [0]) 28569 // cond: 28570 // result: x 28571 for { 28572 if v.AuxInt != 0 { 28573 break 28574 } 28575 x := v.Args[0] 28576 v.reset(OpCopy) 28577 v.Type = x.Type 28578 v.AddArg(x) 28579 return true 28580 } 28581 return false 28582 } 28583 func rewriteValueAMD64_OpAMD64SHRQ(v *Value) bool { 28584 // match: (SHRQ x (MOVQconst [c])) 28585 // cond: 28586 // result: (SHRQconst [c&63] x) 28587 for { 28588 x := v.Args[0] 28589 v_1 := v.Args[1] 28590 if v_1.Op != OpAMD64MOVQconst { 28591 break 28592 } 28593 c := v_1.AuxInt 28594 v.reset(OpAMD64SHRQconst) 28595 v.AuxInt = c & 63 28596 v.AddArg(x) 28597 return true 28598 } 28599 // match: (SHRQ x (MOVLconst [c])) 28600 // cond: 28601 // result: (SHRQconst [c&63] x) 28602 for { 28603 x := v.Args[0] 28604 v_1 := v.Args[1] 28605 if v_1.Op != OpAMD64MOVLconst { 28606 break 28607 } 28608 c := v_1.AuxInt 28609 v.reset(OpAMD64SHRQconst) 28610 v.AuxInt = c & 63 28611 v.AddArg(x) 28612 return true 28613 } 28614 // match: (SHRQ x (ANDQconst [63] y)) 28615 // cond: 28616 // result: (SHRQ x y) 28617 for { 28618 x := v.Args[0] 28619 v_1 := v.Args[1] 28620 if v_1.Op != OpAMD64ANDQconst { 28621 break 28622 } 28623 if v_1.AuxInt != 63 { 28624 break 28625 } 28626 y := v_1.Args[0] 28627 v.reset(OpAMD64SHRQ) 28628 v.AddArg(x) 28629 v.AddArg(y) 28630 return true 28631 } 28632 // match: (SHRQ x (ANDLconst [63] y)) 28633 // cond: 28634 // result: (SHRQ x y) 28635 for { 28636 x := v.Args[0] 28637 v_1 := v.Args[1] 28638 if v_1.Op != OpAMD64ANDLconst { 28639 break 28640 } 28641 if v_1.AuxInt != 63 { 28642 break 28643 } 28644 y := v_1.Args[0] 28645 v.reset(OpAMD64SHRQ) 28646 v.AddArg(x) 28647 v.AddArg(y) 28648 return true 28649 } 28650 return false 28651 } 28652 func rewriteValueAMD64_OpAMD64SHRQconst(v *Value) bool { 28653 // match: (SHRQconst x [0]) 28654 // cond: 28655 // result: x 28656 for { 28657 if v.AuxInt != 0 { 28658 break 28659 } 28660 x := v.Args[0] 28661 v.reset(OpCopy) 28662 v.Type = x.Type 28663 v.AddArg(x) 28664 return true 28665 } 28666 return false 28667 } 28668 func rewriteValueAMD64_OpAMD64SHRW(v *Value) bool { 28669 // match: (SHRW x (MOVQconst [c])) 28670 // cond: c&31 < 16 28671 // result: (SHRWconst [c&31] x) 28672 for { 28673 x := v.Args[0] 28674 v_1 := v.Args[1] 28675 if v_1.Op != OpAMD64MOVQconst { 28676 break 28677 } 28678 c := v_1.AuxInt 28679 if !(c&31 < 16) { 28680 break 28681 } 28682 v.reset(OpAMD64SHRWconst) 28683 v.AuxInt = c & 31 28684 v.AddArg(x) 28685 return true 28686 } 28687 // match: (SHRW x (MOVLconst [c])) 28688 // cond: c&31 < 16 28689 // result: (SHRWconst [c&31] x) 28690 for { 28691 x := v.Args[0] 28692 v_1 := v.Args[1] 28693 if v_1.Op != OpAMD64MOVLconst { 28694 break 28695 } 28696 c := v_1.AuxInt 28697 if !(c&31 < 16) { 28698 break 28699 } 28700 v.reset(OpAMD64SHRWconst) 28701 v.AuxInt = c & 31 28702 v.AddArg(x) 28703 return true 28704 } 28705 // match: (SHRW _ (MOVQconst [c])) 28706 // cond: c&31 >= 16 28707 // result: (MOVLconst [0]) 28708 for { 28709 v_1 := v.Args[1] 28710 if v_1.Op != OpAMD64MOVQconst { 28711 break 28712 } 28713 c := v_1.AuxInt 28714 if !(c&31 >= 16) { 28715 break 28716 } 28717 v.reset(OpAMD64MOVLconst) 28718 v.AuxInt = 0 28719 return true 28720 } 28721 // match: (SHRW _ (MOVLconst [c])) 28722 // cond: c&31 >= 16 28723 // result: (MOVLconst [0]) 28724 for { 28725 v_1 := v.Args[1] 28726 if v_1.Op != OpAMD64MOVLconst { 28727 break 28728 } 28729 c := v_1.AuxInt 28730 if !(c&31 >= 16) { 28731 break 28732 } 28733 v.reset(OpAMD64MOVLconst) 28734 v.AuxInt = 0 28735 return true 28736 } 28737 return false 28738 } 28739 func rewriteValueAMD64_OpAMD64SHRWconst(v *Value) bool { 28740 // match: (SHRWconst x [0]) 28741 // cond: 28742 // result: x 28743 for { 28744 if v.AuxInt != 0 { 28745 break 28746 } 28747 x := v.Args[0] 28748 v.reset(OpCopy) 28749 v.Type = x.Type 28750 v.AddArg(x) 28751 return true 28752 } 28753 return false 28754 } 28755 func rewriteValueAMD64_OpAMD64SUBL(v *Value) bool { 28756 b := v.Block 28757 _ = b 28758 // match: (SUBL x (MOVLconst [c])) 28759 // cond: 28760 // result: (SUBLconst x [c]) 28761 for { 28762 x := v.Args[0] 28763 v_1 := v.Args[1] 28764 if v_1.Op != OpAMD64MOVLconst { 28765 break 28766 } 28767 c := v_1.AuxInt 28768 v.reset(OpAMD64SUBLconst) 28769 v.AuxInt = c 28770 v.AddArg(x) 28771 return true 28772 } 28773 // match: (SUBL (MOVLconst [c]) x) 28774 // cond: 28775 // result: (NEGL (SUBLconst <v.Type> x [c])) 28776 for { 28777 v_0 := v.Args[0] 28778 if v_0.Op != OpAMD64MOVLconst { 28779 break 28780 } 28781 c := v_0.AuxInt 28782 x := v.Args[1] 28783 v.reset(OpAMD64NEGL) 28784 v0 := b.NewValue0(v.Pos, OpAMD64SUBLconst, v.Type) 28785 v0.AuxInt = c 28786 v0.AddArg(x) 28787 v.AddArg(v0) 28788 return true 28789 } 28790 // match: (SUBL x x) 28791 // cond: 28792 // result: (MOVLconst [0]) 28793 for { 28794 x := v.Args[0] 28795 if x != v.Args[1] { 28796 break 28797 } 28798 v.reset(OpAMD64MOVLconst) 28799 v.AuxInt = 0 28800 return true 28801 } 28802 // match: (SUBL x l:(MOVLload [off] {sym} ptr mem)) 28803 // cond: canMergeLoad(v, l, x) && clobber(l) 28804 // result: (SUBLmem x [off] {sym} ptr mem) 28805 for { 28806 x := v.Args[0] 28807 l := v.Args[1] 28808 if l.Op != OpAMD64MOVLload { 28809 break 28810 } 28811 off := l.AuxInt 28812 sym := l.Aux 28813 ptr := l.Args[0] 28814 mem := l.Args[1] 28815 if !(canMergeLoad(v, l, x) && clobber(l)) { 28816 break 28817 } 28818 v.reset(OpAMD64SUBLmem) 28819 v.AuxInt = off 28820 v.Aux = sym 28821 v.AddArg(x) 28822 v.AddArg(ptr) 28823 v.AddArg(mem) 28824 return true 28825 } 28826 return false 28827 } 28828 func rewriteValueAMD64_OpAMD64SUBLconst(v *Value) bool { 28829 // match: (SUBLconst [c] x) 28830 // cond: int32(c) == 0 28831 // result: x 28832 for { 28833 c := v.AuxInt 28834 x := v.Args[0] 28835 if !(int32(c) == 0) { 28836 break 28837 } 28838 v.reset(OpCopy) 28839 v.Type = x.Type 28840 v.AddArg(x) 28841 return true 28842 } 28843 // match: (SUBLconst [c] x) 28844 // cond: 28845 // result: (ADDLconst [int64(int32(-c))] x) 28846 for { 28847 c := v.AuxInt 28848 x := v.Args[0] 28849 v.reset(OpAMD64ADDLconst) 28850 v.AuxInt = int64(int32(-c)) 28851 v.AddArg(x) 28852 return true 28853 } 28854 } 28855 func rewriteValueAMD64_OpAMD64SUBQ(v *Value) bool { 28856 b := v.Block 28857 _ = b 28858 // match: (SUBQ x (MOVQconst [c])) 28859 // cond: is32Bit(c) 28860 // result: (SUBQconst x [c]) 28861 for { 28862 x := v.Args[0] 28863 v_1 := v.Args[1] 28864 if v_1.Op != OpAMD64MOVQconst { 28865 break 28866 } 28867 c := v_1.AuxInt 28868 if !(is32Bit(c)) { 28869 break 28870 } 28871 v.reset(OpAMD64SUBQconst) 28872 v.AuxInt = c 28873 v.AddArg(x) 28874 return true 28875 } 28876 // match: (SUBQ (MOVQconst [c]) x) 28877 // cond: is32Bit(c) 28878 // result: (NEGQ (SUBQconst <v.Type> x [c])) 28879 for { 28880 v_0 := v.Args[0] 28881 if v_0.Op != OpAMD64MOVQconst { 28882 break 28883 } 28884 c := v_0.AuxInt 28885 x := v.Args[1] 28886 if !(is32Bit(c)) { 28887 break 28888 } 28889 v.reset(OpAMD64NEGQ) 28890 v0 := b.NewValue0(v.Pos, OpAMD64SUBQconst, v.Type) 28891 v0.AuxInt = c 28892 v0.AddArg(x) 28893 v.AddArg(v0) 28894 return true 28895 } 28896 // match: (SUBQ x x) 28897 // cond: 28898 // result: (MOVQconst [0]) 28899 for { 28900 x := v.Args[0] 28901 if x != v.Args[1] { 28902 break 28903 } 28904 v.reset(OpAMD64MOVQconst) 28905 v.AuxInt = 0 28906 return true 28907 } 28908 // match: (SUBQ x l:(MOVQload [off] {sym} ptr mem)) 28909 // cond: canMergeLoad(v, l, x) && clobber(l) 28910 // result: (SUBQmem x [off] {sym} ptr mem) 28911 for { 28912 x := v.Args[0] 28913 l := v.Args[1] 28914 if l.Op != OpAMD64MOVQload { 28915 break 28916 } 28917 off := l.AuxInt 28918 sym := l.Aux 28919 ptr := l.Args[0] 28920 mem := l.Args[1] 28921 if !(canMergeLoad(v, l, x) && clobber(l)) { 28922 break 28923 } 28924 v.reset(OpAMD64SUBQmem) 28925 v.AuxInt = off 28926 v.Aux = sym 28927 v.AddArg(x) 28928 v.AddArg(ptr) 28929 v.AddArg(mem) 28930 return true 28931 } 28932 return false 28933 } 28934 func rewriteValueAMD64_OpAMD64SUBQconst(v *Value) bool { 28935 // match: (SUBQconst [0] x) 28936 // cond: 28937 // result: x 28938 for { 28939 if v.AuxInt != 0 { 28940 break 28941 } 28942 x := v.Args[0] 28943 v.reset(OpCopy) 28944 v.Type = x.Type 28945 v.AddArg(x) 28946 return true 28947 } 28948 // match: (SUBQconst [c] x) 28949 // cond: c != -(1<<31) 28950 // result: (ADDQconst [-c] x) 28951 for { 28952 c := v.AuxInt 28953 x := v.Args[0] 28954 if !(c != -(1 << 31)) { 28955 break 28956 } 28957 v.reset(OpAMD64ADDQconst) 28958 v.AuxInt = -c 28959 v.AddArg(x) 28960 return true 28961 } 28962 // match: (SUBQconst (MOVQconst [d]) [c]) 28963 // cond: 28964 // result: (MOVQconst [d-c]) 28965 for { 28966 c := v.AuxInt 28967 v_0 := v.Args[0] 28968 if v_0.Op != OpAMD64MOVQconst { 28969 break 28970 } 28971 d := v_0.AuxInt 28972 v.reset(OpAMD64MOVQconst) 28973 v.AuxInt = d - c 28974 return true 28975 } 28976 // match: (SUBQconst (SUBQconst x [d]) [c]) 28977 // cond: is32Bit(-c-d) 28978 // result: (ADDQconst [-c-d] x) 28979 for { 28980 c := v.AuxInt 28981 v_0 := v.Args[0] 28982 if v_0.Op != OpAMD64SUBQconst { 28983 break 28984 } 28985 d := v_0.AuxInt 28986 x := v_0.Args[0] 28987 if !(is32Bit(-c - d)) { 28988 break 28989 } 28990 v.reset(OpAMD64ADDQconst) 28991 v.AuxInt = -c - d 28992 v.AddArg(x) 28993 return true 28994 } 28995 return false 28996 } 28997 func rewriteValueAMD64_OpAMD64SUBSD(v *Value) bool { 28998 // match: (SUBSD x l:(MOVSDload [off] {sym} ptr mem)) 28999 // cond: canMergeLoad(v, l, x) && clobber(l) 29000 // result: (SUBSDmem x [off] {sym} ptr mem) 29001 for { 29002 x := v.Args[0] 29003 l := v.Args[1] 29004 if l.Op != OpAMD64MOVSDload { 29005 break 29006 } 29007 off := l.AuxInt 29008 sym := l.Aux 29009 ptr := l.Args[0] 29010 mem := l.Args[1] 29011 if !(canMergeLoad(v, l, x) && clobber(l)) { 29012 break 29013 } 29014 v.reset(OpAMD64SUBSDmem) 29015 v.AuxInt = off 29016 v.Aux = sym 29017 v.AddArg(x) 29018 v.AddArg(ptr) 29019 v.AddArg(mem) 29020 return true 29021 } 29022 return false 29023 } 29024 func rewriteValueAMD64_OpAMD64SUBSS(v *Value) bool { 29025 // match: (SUBSS x l:(MOVSSload [off] {sym} ptr mem)) 29026 // cond: canMergeLoad(v, l, x) && clobber(l) 29027 // result: (SUBSSmem x [off] {sym} ptr mem) 29028 for { 29029 x := v.Args[0] 29030 l := v.Args[1] 29031 if l.Op != OpAMD64MOVSSload { 29032 break 29033 } 29034 off := l.AuxInt 29035 sym := l.Aux 29036 ptr := l.Args[0] 29037 mem := l.Args[1] 29038 if !(canMergeLoad(v, l, x) && clobber(l)) { 29039 break 29040 } 29041 v.reset(OpAMD64SUBSSmem) 29042 v.AuxInt = off 29043 v.Aux = sym 29044 v.AddArg(x) 29045 v.AddArg(ptr) 29046 v.AddArg(mem) 29047 return true 29048 } 29049 return false 29050 } 29051 func rewriteValueAMD64_OpAMD64TESTB(v *Value) bool { 29052 // match: (TESTB (MOVLconst [c]) x) 29053 // cond: 29054 // result: (TESTBconst [c] x) 29055 for { 29056 v_0 := v.Args[0] 29057 if v_0.Op != OpAMD64MOVLconst { 29058 break 29059 } 29060 c := v_0.AuxInt 29061 x := v.Args[1] 29062 v.reset(OpAMD64TESTBconst) 29063 v.AuxInt = c 29064 v.AddArg(x) 29065 return true 29066 } 29067 // match: (TESTB x (MOVLconst [c])) 29068 // cond: 29069 // result: (TESTBconst [c] x) 29070 for { 29071 x := v.Args[0] 29072 v_1 := v.Args[1] 29073 if v_1.Op != OpAMD64MOVLconst { 29074 break 29075 } 29076 c := v_1.AuxInt 29077 v.reset(OpAMD64TESTBconst) 29078 v.AuxInt = c 29079 v.AddArg(x) 29080 return true 29081 } 29082 return false 29083 } 29084 func rewriteValueAMD64_OpAMD64TESTL(v *Value) bool { 29085 // match: (TESTL (MOVLconst [c]) x) 29086 // cond: 29087 // result: (TESTLconst [c] x) 29088 for { 29089 v_0 := v.Args[0] 29090 if v_0.Op != OpAMD64MOVLconst { 29091 break 29092 } 29093 c := v_0.AuxInt 29094 x := v.Args[1] 29095 v.reset(OpAMD64TESTLconst) 29096 v.AuxInt = c 29097 v.AddArg(x) 29098 return true 29099 } 29100 // match: (TESTL x (MOVLconst [c])) 29101 // cond: 29102 // result: (TESTLconst [c] x) 29103 for { 29104 x := v.Args[0] 29105 v_1 := v.Args[1] 29106 if v_1.Op != OpAMD64MOVLconst { 29107 break 29108 } 29109 c := v_1.AuxInt 29110 v.reset(OpAMD64TESTLconst) 29111 v.AuxInt = c 29112 v.AddArg(x) 29113 return true 29114 } 29115 return false 29116 } 29117 func rewriteValueAMD64_OpAMD64TESTQ(v *Value) bool { 29118 // match: (TESTQ (MOVQconst [c]) x) 29119 // cond: is32Bit(c) 29120 // result: (TESTQconst [c] x) 29121 for { 29122 v_0 := v.Args[0] 29123 if v_0.Op != OpAMD64MOVQconst { 29124 break 29125 } 29126 c := v_0.AuxInt 29127 x := v.Args[1] 29128 if !(is32Bit(c)) { 29129 break 29130 } 29131 v.reset(OpAMD64TESTQconst) 29132 v.AuxInt = c 29133 v.AddArg(x) 29134 return true 29135 } 29136 // match: (TESTQ x (MOVQconst [c])) 29137 // cond: is32Bit(c) 29138 // result: (TESTQconst [c] x) 29139 for { 29140 x := v.Args[0] 29141 v_1 := v.Args[1] 29142 if v_1.Op != OpAMD64MOVQconst { 29143 break 29144 } 29145 c := v_1.AuxInt 29146 if !(is32Bit(c)) { 29147 break 29148 } 29149 v.reset(OpAMD64TESTQconst) 29150 v.AuxInt = c 29151 v.AddArg(x) 29152 return true 29153 } 29154 return false 29155 } 29156 func rewriteValueAMD64_OpAMD64TESTW(v *Value) bool { 29157 // match: (TESTW (MOVLconst [c]) x) 29158 // cond: 29159 // result: (TESTWconst [c] x) 29160 for { 29161 v_0 := v.Args[0] 29162 if v_0.Op != OpAMD64MOVLconst { 29163 break 29164 } 29165 c := v_0.AuxInt 29166 x := v.Args[1] 29167 v.reset(OpAMD64TESTWconst) 29168 v.AuxInt = c 29169 v.AddArg(x) 29170 return true 29171 } 29172 // match: (TESTW x (MOVLconst [c])) 29173 // cond: 29174 // result: (TESTWconst [c] x) 29175 for { 29176 x := v.Args[0] 29177 v_1 := v.Args[1] 29178 if v_1.Op != OpAMD64MOVLconst { 29179 break 29180 } 29181 c := v_1.AuxInt 29182 v.reset(OpAMD64TESTWconst) 29183 v.AuxInt = c 29184 v.AddArg(x) 29185 return true 29186 } 29187 return false 29188 } 29189 func rewriteValueAMD64_OpAMD64XADDLlock(v *Value) bool { 29190 // match: (XADDLlock [off1] {sym} val (ADDQconst [off2] ptr) mem) 29191 // cond: is32Bit(off1+off2) 29192 // result: (XADDLlock [off1+off2] {sym} val ptr mem) 29193 for { 29194 off1 := v.AuxInt 29195 sym := v.Aux 29196 val := v.Args[0] 29197 v_1 := v.Args[1] 29198 if v_1.Op != OpAMD64ADDQconst { 29199 break 29200 } 29201 off2 := v_1.AuxInt 29202 ptr := v_1.Args[0] 29203 mem := v.Args[2] 29204 if !(is32Bit(off1 + off2)) { 29205 break 29206 } 29207 v.reset(OpAMD64XADDLlock) 29208 v.AuxInt = off1 + off2 29209 v.Aux = sym 29210 v.AddArg(val) 29211 v.AddArg(ptr) 29212 v.AddArg(mem) 29213 return true 29214 } 29215 return false 29216 } 29217 func rewriteValueAMD64_OpAMD64XADDQlock(v *Value) bool { 29218 // match: (XADDQlock [off1] {sym} val (ADDQconst [off2] ptr) mem) 29219 // cond: is32Bit(off1+off2) 29220 // result: (XADDQlock [off1+off2] {sym} val ptr mem) 29221 for { 29222 off1 := v.AuxInt 29223 sym := v.Aux 29224 val := v.Args[0] 29225 v_1 := v.Args[1] 29226 if v_1.Op != OpAMD64ADDQconst { 29227 break 29228 } 29229 off2 := v_1.AuxInt 29230 ptr := v_1.Args[0] 29231 mem := v.Args[2] 29232 if !(is32Bit(off1 + off2)) { 29233 break 29234 } 29235 v.reset(OpAMD64XADDQlock) 29236 v.AuxInt = off1 + off2 29237 v.Aux = sym 29238 v.AddArg(val) 29239 v.AddArg(ptr) 29240 v.AddArg(mem) 29241 return true 29242 } 29243 return false 29244 } 29245 func rewriteValueAMD64_OpAMD64XCHGL(v *Value) bool { 29246 // match: (XCHGL [off1] {sym} val (ADDQconst [off2] ptr) mem) 29247 // cond: is32Bit(off1+off2) 29248 // result: (XCHGL [off1+off2] {sym} val ptr mem) 29249 for { 29250 off1 := v.AuxInt 29251 sym := v.Aux 29252 val := v.Args[0] 29253 v_1 := v.Args[1] 29254 if v_1.Op != OpAMD64ADDQconst { 29255 break 29256 } 29257 off2 := v_1.AuxInt 29258 ptr := v_1.Args[0] 29259 mem := v.Args[2] 29260 if !(is32Bit(off1 + off2)) { 29261 break 29262 } 29263 v.reset(OpAMD64XCHGL) 29264 v.AuxInt = off1 + off2 29265 v.Aux = sym 29266 v.AddArg(val) 29267 v.AddArg(ptr) 29268 v.AddArg(mem) 29269 return true 29270 } 29271 // match: (XCHGL [off1] {sym1} val (LEAQ [off2] {sym2} ptr) mem) 29272 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) && ptr.Op != OpSB 29273 // result: (XCHGL [off1+off2] {mergeSym(sym1,sym2)} val ptr mem) 29274 for { 29275 off1 := v.AuxInt 29276 sym1 := v.Aux 29277 val := v.Args[0] 29278 v_1 := v.Args[1] 29279 if v_1.Op != OpAMD64LEAQ { 29280 break 29281 } 29282 off2 := v_1.AuxInt 29283 sym2 := v_1.Aux 29284 ptr := v_1.Args[0] 29285 mem := v.Args[2] 29286 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2) && ptr.Op != OpSB) { 29287 break 29288 } 29289 v.reset(OpAMD64XCHGL) 29290 v.AuxInt = off1 + off2 29291 v.Aux = mergeSym(sym1, sym2) 29292 v.AddArg(val) 29293 v.AddArg(ptr) 29294 v.AddArg(mem) 29295 return true 29296 } 29297 return false 29298 } 29299 func rewriteValueAMD64_OpAMD64XCHGQ(v *Value) bool { 29300 // match: (XCHGQ [off1] {sym} val (ADDQconst [off2] ptr) mem) 29301 // cond: is32Bit(off1+off2) 29302 // result: (XCHGQ [off1+off2] {sym} val ptr mem) 29303 for { 29304 off1 := v.AuxInt 29305 sym := v.Aux 29306 val := v.Args[0] 29307 v_1 := v.Args[1] 29308 if v_1.Op != OpAMD64ADDQconst { 29309 break 29310 } 29311 off2 := v_1.AuxInt 29312 ptr := v_1.Args[0] 29313 mem := v.Args[2] 29314 if !(is32Bit(off1 + off2)) { 29315 break 29316 } 29317 v.reset(OpAMD64XCHGQ) 29318 v.AuxInt = off1 + off2 29319 v.Aux = sym 29320 v.AddArg(val) 29321 v.AddArg(ptr) 29322 v.AddArg(mem) 29323 return true 29324 } 29325 // match: (XCHGQ [off1] {sym1} val (LEAQ [off2] {sym2} ptr) mem) 29326 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) && ptr.Op != OpSB 29327 // result: (XCHGQ [off1+off2] {mergeSym(sym1,sym2)} val ptr mem) 29328 for { 29329 off1 := v.AuxInt 29330 sym1 := v.Aux 29331 val := v.Args[0] 29332 v_1 := v.Args[1] 29333 if v_1.Op != OpAMD64LEAQ { 29334 break 29335 } 29336 off2 := v_1.AuxInt 29337 sym2 := v_1.Aux 29338 ptr := v_1.Args[0] 29339 mem := v.Args[2] 29340 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2) && ptr.Op != OpSB) { 29341 break 29342 } 29343 v.reset(OpAMD64XCHGQ) 29344 v.AuxInt = off1 + off2 29345 v.Aux = mergeSym(sym1, sym2) 29346 v.AddArg(val) 29347 v.AddArg(ptr) 29348 v.AddArg(mem) 29349 return true 29350 } 29351 return false 29352 } 29353 func rewriteValueAMD64_OpAMD64XORL(v *Value) bool { 29354 // match: (XORL x (MOVLconst [c])) 29355 // cond: 29356 // result: (XORLconst [c] x) 29357 for { 29358 x := v.Args[0] 29359 v_1 := v.Args[1] 29360 if v_1.Op != OpAMD64MOVLconst { 29361 break 29362 } 29363 c := v_1.AuxInt 29364 v.reset(OpAMD64XORLconst) 29365 v.AuxInt = c 29366 v.AddArg(x) 29367 return true 29368 } 29369 // match: (XORL (MOVLconst [c]) x) 29370 // cond: 29371 // result: (XORLconst [c] x) 29372 for { 29373 v_0 := v.Args[0] 29374 if v_0.Op != OpAMD64MOVLconst { 29375 break 29376 } 29377 c := v_0.AuxInt 29378 x := v.Args[1] 29379 v.reset(OpAMD64XORLconst) 29380 v.AuxInt = c 29381 v.AddArg(x) 29382 return true 29383 } 29384 // match: (XORL (SHLLconst x [c]) (SHRLconst x [d])) 29385 // cond: d==32-c 29386 // result: (ROLLconst x [c]) 29387 for { 29388 v_0 := v.Args[0] 29389 if v_0.Op != OpAMD64SHLLconst { 29390 break 29391 } 29392 c := v_0.AuxInt 29393 x := v_0.Args[0] 29394 v_1 := v.Args[1] 29395 if v_1.Op != OpAMD64SHRLconst { 29396 break 29397 } 29398 d := v_1.AuxInt 29399 if x != v_1.Args[0] { 29400 break 29401 } 29402 if !(d == 32-c) { 29403 break 29404 } 29405 v.reset(OpAMD64ROLLconst) 29406 v.AuxInt = c 29407 v.AddArg(x) 29408 return true 29409 } 29410 // match: (XORL (SHRLconst x [d]) (SHLLconst x [c])) 29411 // cond: d==32-c 29412 // result: (ROLLconst x [c]) 29413 for { 29414 v_0 := v.Args[0] 29415 if v_0.Op != OpAMD64SHRLconst { 29416 break 29417 } 29418 d := v_0.AuxInt 29419 x := v_0.Args[0] 29420 v_1 := v.Args[1] 29421 if v_1.Op != OpAMD64SHLLconst { 29422 break 29423 } 29424 c := v_1.AuxInt 29425 if x != v_1.Args[0] { 29426 break 29427 } 29428 if !(d == 32-c) { 29429 break 29430 } 29431 v.reset(OpAMD64ROLLconst) 29432 v.AuxInt = c 29433 v.AddArg(x) 29434 return true 29435 } 29436 // match: (XORL <t> (SHLLconst x [c]) (SHRWconst x [d])) 29437 // cond: d==16-c && c < 16 && t.Size() == 2 29438 // result: (ROLWconst x [c]) 29439 for { 29440 t := v.Type 29441 v_0 := v.Args[0] 29442 if v_0.Op != OpAMD64SHLLconst { 29443 break 29444 } 29445 c := v_0.AuxInt 29446 x := v_0.Args[0] 29447 v_1 := v.Args[1] 29448 if v_1.Op != OpAMD64SHRWconst { 29449 break 29450 } 29451 d := v_1.AuxInt 29452 if x != v_1.Args[0] { 29453 break 29454 } 29455 if !(d == 16-c && c < 16 && t.Size() == 2) { 29456 break 29457 } 29458 v.reset(OpAMD64ROLWconst) 29459 v.AuxInt = c 29460 v.AddArg(x) 29461 return true 29462 } 29463 // match: (XORL <t> (SHRWconst x [d]) (SHLLconst x [c])) 29464 // cond: d==16-c && c < 16 && t.Size() == 2 29465 // result: (ROLWconst x [c]) 29466 for { 29467 t := v.Type 29468 v_0 := v.Args[0] 29469 if v_0.Op != OpAMD64SHRWconst { 29470 break 29471 } 29472 d := v_0.AuxInt 29473 x := v_0.Args[0] 29474 v_1 := v.Args[1] 29475 if v_1.Op != OpAMD64SHLLconst { 29476 break 29477 } 29478 c := v_1.AuxInt 29479 if x != v_1.Args[0] { 29480 break 29481 } 29482 if !(d == 16-c && c < 16 && t.Size() == 2) { 29483 break 29484 } 29485 v.reset(OpAMD64ROLWconst) 29486 v.AuxInt = c 29487 v.AddArg(x) 29488 return true 29489 } 29490 // match: (XORL <t> (SHLLconst x [c]) (SHRBconst x [d])) 29491 // cond: d==8-c && c < 8 && t.Size() == 1 29492 // result: (ROLBconst x [c]) 29493 for { 29494 t := v.Type 29495 v_0 := v.Args[0] 29496 if v_0.Op != OpAMD64SHLLconst { 29497 break 29498 } 29499 c := v_0.AuxInt 29500 x := v_0.Args[0] 29501 v_1 := v.Args[1] 29502 if v_1.Op != OpAMD64SHRBconst { 29503 break 29504 } 29505 d := v_1.AuxInt 29506 if x != v_1.Args[0] { 29507 break 29508 } 29509 if !(d == 8-c && c < 8 && t.Size() == 1) { 29510 break 29511 } 29512 v.reset(OpAMD64ROLBconst) 29513 v.AuxInt = c 29514 v.AddArg(x) 29515 return true 29516 } 29517 // match: (XORL <t> (SHRBconst x [d]) (SHLLconst x [c])) 29518 // cond: d==8-c && c < 8 && t.Size() == 1 29519 // result: (ROLBconst x [c]) 29520 for { 29521 t := v.Type 29522 v_0 := v.Args[0] 29523 if v_0.Op != OpAMD64SHRBconst { 29524 break 29525 } 29526 d := v_0.AuxInt 29527 x := v_0.Args[0] 29528 v_1 := v.Args[1] 29529 if v_1.Op != OpAMD64SHLLconst { 29530 break 29531 } 29532 c := v_1.AuxInt 29533 if x != v_1.Args[0] { 29534 break 29535 } 29536 if !(d == 8-c && c < 8 && t.Size() == 1) { 29537 break 29538 } 29539 v.reset(OpAMD64ROLBconst) 29540 v.AuxInt = c 29541 v.AddArg(x) 29542 return true 29543 } 29544 // match: (XORL x x) 29545 // cond: 29546 // result: (MOVLconst [0]) 29547 for { 29548 x := v.Args[0] 29549 if x != v.Args[1] { 29550 break 29551 } 29552 v.reset(OpAMD64MOVLconst) 29553 v.AuxInt = 0 29554 return true 29555 } 29556 // match: (XORL x l:(MOVLload [off] {sym} ptr mem)) 29557 // cond: canMergeLoad(v, l, x) && clobber(l) 29558 // result: (XORLmem x [off] {sym} ptr mem) 29559 for { 29560 x := v.Args[0] 29561 l := v.Args[1] 29562 if l.Op != OpAMD64MOVLload { 29563 break 29564 } 29565 off := l.AuxInt 29566 sym := l.Aux 29567 ptr := l.Args[0] 29568 mem := l.Args[1] 29569 if !(canMergeLoad(v, l, x) && clobber(l)) { 29570 break 29571 } 29572 v.reset(OpAMD64XORLmem) 29573 v.AuxInt = off 29574 v.Aux = sym 29575 v.AddArg(x) 29576 v.AddArg(ptr) 29577 v.AddArg(mem) 29578 return true 29579 } 29580 // match: (XORL l:(MOVLload [off] {sym} ptr mem) x) 29581 // cond: canMergeLoad(v, l, x) && clobber(l) 29582 // result: (XORLmem x [off] {sym} ptr mem) 29583 for { 29584 l := v.Args[0] 29585 if l.Op != OpAMD64MOVLload { 29586 break 29587 } 29588 off := l.AuxInt 29589 sym := l.Aux 29590 ptr := l.Args[0] 29591 mem := l.Args[1] 29592 x := v.Args[1] 29593 if !(canMergeLoad(v, l, x) && clobber(l)) { 29594 break 29595 } 29596 v.reset(OpAMD64XORLmem) 29597 v.AuxInt = off 29598 v.Aux = sym 29599 v.AddArg(x) 29600 v.AddArg(ptr) 29601 v.AddArg(mem) 29602 return true 29603 } 29604 return false 29605 } 29606 func rewriteValueAMD64_OpAMD64XORLconst(v *Value) bool { 29607 // match: (XORLconst [c] (XORLconst [d] x)) 29608 // cond: 29609 // result: (XORLconst [c ^ d] x) 29610 for { 29611 c := v.AuxInt 29612 v_0 := v.Args[0] 29613 if v_0.Op != OpAMD64XORLconst { 29614 break 29615 } 29616 d := v_0.AuxInt 29617 x := v_0.Args[0] 29618 v.reset(OpAMD64XORLconst) 29619 v.AuxInt = c ^ d 29620 v.AddArg(x) 29621 return true 29622 } 29623 // match: (XORLconst [c] x) 29624 // cond: int32(c)==0 29625 // result: x 29626 for { 29627 c := v.AuxInt 29628 x := v.Args[0] 29629 if !(int32(c) == 0) { 29630 break 29631 } 29632 v.reset(OpCopy) 29633 v.Type = x.Type 29634 v.AddArg(x) 29635 return true 29636 } 29637 // match: (XORLconst [c] (MOVLconst [d])) 29638 // cond: 29639 // result: (MOVLconst [c^d]) 29640 for { 29641 c := v.AuxInt 29642 v_0 := v.Args[0] 29643 if v_0.Op != OpAMD64MOVLconst { 29644 break 29645 } 29646 d := v_0.AuxInt 29647 v.reset(OpAMD64MOVLconst) 29648 v.AuxInt = c ^ d 29649 return true 29650 } 29651 return false 29652 } 29653 func rewriteValueAMD64_OpAMD64XORQ(v *Value) bool { 29654 // match: (XORQ x (MOVQconst [c])) 29655 // cond: is32Bit(c) 29656 // result: (XORQconst [c] x) 29657 for { 29658 x := v.Args[0] 29659 v_1 := v.Args[1] 29660 if v_1.Op != OpAMD64MOVQconst { 29661 break 29662 } 29663 c := v_1.AuxInt 29664 if !(is32Bit(c)) { 29665 break 29666 } 29667 v.reset(OpAMD64XORQconst) 29668 v.AuxInt = c 29669 v.AddArg(x) 29670 return true 29671 } 29672 // match: (XORQ (MOVQconst [c]) x) 29673 // cond: is32Bit(c) 29674 // result: (XORQconst [c] x) 29675 for { 29676 v_0 := v.Args[0] 29677 if v_0.Op != OpAMD64MOVQconst { 29678 break 29679 } 29680 c := v_0.AuxInt 29681 x := v.Args[1] 29682 if !(is32Bit(c)) { 29683 break 29684 } 29685 v.reset(OpAMD64XORQconst) 29686 v.AuxInt = c 29687 v.AddArg(x) 29688 return true 29689 } 29690 // match: (XORQ (SHLQconst x [c]) (SHRQconst x [d])) 29691 // cond: d==64-c 29692 // result: (ROLQconst x [c]) 29693 for { 29694 v_0 := v.Args[0] 29695 if v_0.Op != OpAMD64SHLQconst { 29696 break 29697 } 29698 c := v_0.AuxInt 29699 x := v_0.Args[0] 29700 v_1 := v.Args[1] 29701 if v_1.Op != OpAMD64SHRQconst { 29702 break 29703 } 29704 d := v_1.AuxInt 29705 if x != v_1.Args[0] { 29706 break 29707 } 29708 if !(d == 64-c) { 29709 break 29710 } 29711 v.reset(OpAMD64ROLQconst) 29712 v.AuxInt = c 29713 v.AddArg(x) 29714 return true 29715 } 29716 // match: (XORQ (SHRQconst x [d]) (SHLQconst x [c])) 29717 // cond: d==64-c 29718 // result: (ROLQconst x [c]) 29719 for { 29720 v_0 := v.Args[0] 29721 if v_0.Op != OpAMD64SHRQconst { 29722 break 29723 } 29724 d := v_0.AuxInt 29725 x := v_0.Args[0] 29726 v_1 := v.Args[1] 29727 if v_1.Op != OpAMD64SHLQconst { 29728 break 29729 } 29730 c := v_1.AuxInt 29731 if x != v_1.Args[0] { 29732 break 29733 } 29734 if !(d == 64-c) { 29735 break 29736 } 29737 v.reset(OpAMD64ROLQconst) 29738 v.AuxInt = c 29739 v.AddArg(x) 29740 return true 29741 } 29742 // match: (XORQ x x) 29743 // cond: 29744 // result: (MOVQconst [0]) 29745 for { 29746 x := v.Args[0] 29747 if x != v.Args[1] { 29748 break 29749 } 29750 v.reset(OpAMD64MOVQconst) 29751 v.AuxInt = 0 29752 return true 29753 } 29754 // match: (XORQ x l:(MOVQload [off] {sym} ptr mem)) 29755 // cond: canMergeLoad(v, l, x) && clobber(l) 29756 // result: (XORQmem x [off] {sym} ptr mem) 29757 for { 29758 x := v.Args[0] 29759 l := v.Args[1] 29760 if l.Op != OpAMD64MOVQload { 29761 break 29762 } 29763 off := l.AuxInt 29764 sym := l.Aux 29765 ptr := l.Args[0] 29766 mem := l.Args[1] 29767 if !(canMergeLoad(v, l, x) && clobber(l)) { 29768 break 29769 } 29770 v.reset(OpAMD64XORQmem) 29771 v.AuxInt = off 29772 v.Aux = sym 29773 v.AddArg(x) 29774 v.AddArg(ptr) 29775 v.AddArg(mem) 29776 return true 29777 } 29778 // match: (XORQ l:(MOVQload [off] {sym} ptr mem) x) 29779 // cond: canMergeLoad(v, l, x) && clobber(l) 29780 // result: (XORQmem x [off] {sym} ptr mem) 29781 for { 29782 l := v.Args[0] 29783 if l.Op != OpAMD64MOVQload { 29784 break 29785 } 29786 off := l.AuxInt 29787 sym := l.Aux 29788 ptr := l.Args[0] 29789 mem := l.Args[1] 29790 x := v.Args[1] 29791 if !(canMergeLoad(v, l, x) && clobber(l)) { 29792 break 29793 } 29794 v.reset(OpAMD64XORQmem) 29795 v.AuxInt = off 29796 v.Aux = sym 29797 v.AddArg(x) 29798 v.AddArg(ptr) 29799 v.AddArg(mem) 29800 return true 29801 } 29802 return false 29803 } 29804 func rewriteValueAMD64_OpAMD64XORQconst(v *Value) bool { 29805 // match: (XORQconst [c] (XORQconst [d] x)) 29806 // cond: 29807 // result: (XORQconst [c ^ d] x) 29808 for { 29809 c := v.AuxInt 29810 v_0 := v.Args[0] 29811 if v_0.Op != OpAMD64XORQconst { 29812 break 29813 } 29814 d := v_0.AuxInt 29815 x := v_0.Args[0] 29816 v.reset(OpAMD64XORQconst) 29817 v.AuxInt = c ^ d 29818 v.AddArg(x) 29819 return true 29820 } 29821 // match: (XORQconst [0] x) 29822 // cond: 29823 // result: x 29824 for { 29825 if v.AuxInt != 0 { 29826 break 29827 } 29828 x := v.Args[0] 29829 v.reset(OpCopy) 29830 v.Type = x.Type 29831 v.AddArg(x) 29832 return true 29833 } 29834 // match: (XORQconst [c] (MOVQconst [d])) 29835 // cond: 29836 // result: (MOVQconst [c^d]) 29837 for { 29838 c := v.AuxInt 29839 v_0 := v.Args[0] 29840 if v_0.Op != OpAMD64MOVQconst { 29841 break 29842 } 29843 d := v_0.AuxInt 29844 v.reset(OpAMD64MOVQconst) 29845 v.AuxInt = c ^ d 29846 return true 29847 } 29848 return false 29849 } 29850 func rewriteValueAMD64_OpAdd16(v *Value) bool { 29851 // match: (Add16 x y) 29852 // cond: 29853 // result: (ADDL x y) 29854 for { 29855 x := v.Args[0] 29856 y := v.Args[1] 29857 v.reset(OpAMD64ADDL) 29858 v.AddArg(x) 29859 v.AddArg(y) 29860 return true 29861 } 29862 } 29863 func rewriteValueAMD64_OpAdd32(v *Value) bool { 29864 // match: (Add32 x y) 29865 // cond: 29866 // result: (ADDL x y) 29867 for { 29868 x := v.Args[0] 29869 y := v.Args[1] 29870 v.reset(OpAMD64ADDL) 29871 v.AddArg(x) 29872 v.AddArg(y) 29873 return true 29874 } 29875 } 29876 func rewriteValueAMD64_OpAdd32F(v *Value) bool { 29877 // match: (Add32F x y) 29878 // cond: 29879 // result: (ADDSS x y) 29880 for { 29881 x := v.Args[0] 29882 y := v.Args[1] 29883 v.reset(OpAMD64ADDSS) 29884 v.AddArg(x) 29885 v.AddArg(y) 29886 return true 29887 } 29888 } 29889 func rewriteValueAMD64_OpAdd64(v *Value) bool { 29890 // match: (Add64 x y) 29891 // cond: 29892 // result: (ADDQ x y) 29893 for { 29894 x := v.Args[0] 29895 y := v.Args[1] 29896 v.reset(OpAMD64ADDQ) 29897 v.AddArg(x) 29898 v.AddArg(y) 29899 return true 29900 } 29901 } 29902 func rewriteValueAMD64_OpAdd64F(v *Value) bool { 29903 // match: (Add64F x y) 29904 // cond: 29905 // result: (ADDSD x y) 29906 for { 29907 x := v.Args[0] 29908 y := v.Args[1] 29909 v.reset(OpAMD64ADDSD) 29910 v.AddArg(x) 29911 v.AddArg(y) 29912 return true 29913 } 29914 } 29915 func rewriteValueAMD64_OpAdd8(v *Value) bool { 29916 // match: (Add8 x y) 29917 // cond: 29918 // result: (ADDL x y) 29919 for { 29920 x := v.Args[0] 29921 y := v.Args[1] 29922 v.reset(OpAMD64ADDL) 29923 v.AddArg(x) 29924 v.AddArg(y) 29925 return true 29926 } 29927 } 29928 func rewriteValueAMD64_OpAddPtr(v *Value) bool { 29929 b := v.Block 29930 _ = b 29931 config := b.Func.Config 29932 _ = config 29933 // match: (AddPtr x y) 29934 // cond: config.PtrSize == 8 29935 // result: (ADDQ x y) 29936 for { 29937 x := v.Args[0] 29938 y := v.Args[1] 29939 if !(config.PtrSize == 8) { 29940 break 29941 } 29942 v.reset(OpAMD64ADDQ) 29943 v.AddArg(x) 29944 v.AddArg(y) 29945 return true 29946 } 29947 // match: (AddPtr x y) 29948 // cond: config.PtrSize == 4 29949 // result: (ADDL x y) 29950 for { 29951 x := v.Args[0] 29952 y := v.Args[1] 29953 if !(config.PtrSize == 4) { 29954 break 29955 } 29956 v.reset(OpAMD64ADDL) 29957 v.AddArg(x) 29958 v.AddArg(y) 29959 return true 29960 } 29961 return false 29962 } 29963 func rewriteValueAMD64_OpAddr(v *Value) bool { 29964 b := v.Block 29965 _ = b 29966 config := b.Func.Config 29967 _ = config 29968 // match: (Addr {sym} base) 29969 // cond: config.PtrSize == 8 29970 // result: (LEAQ {sym} base) 29971 for { 29972 sym := v.Aux 29973 base := v.Args[0] 29974 if !(config.PtrSize == 8) { 29975 break 29976 } 29977 v.reset(OpAMD64LEAQ) 29978 v.Aux = sym 29979 v.AddArg(base) 29980 return true 29981 } 29982 // match: (Addr {sym} base) 29983 // cond: config.PtrSize == 4 29984 // result: (LEAL {sym} base) 29985 for { 29986 sym := v.Aux 29987 base := v.Args[0] 29988 if !(config.PtrSize == 4) { 29989 break 29990 } 29991 v.reset(OpAMD64LEAL) 29992 v.Aux = sym 29993 v.AddArg(base) 29994 return true 29995 } 29996 return false 29997 } 29998 func rewriteValueAMD64_OpAnd16(v *Value) bool { 29999 // match: (And16 x y) 30000 // cond: 30001 // result: (ANDL x y) 30002 for { 30003 x := v.Args[0] 30004 y := v.Args[1] 30005 v.reset(OpAMD64ANDL) 30006 v.AddArg(x) 30007 v.AddArg(y) 30008 return true 30009 } 30010 } 30011 func rewriteValueAMD64_OpAnd32(v *Value) bool { 30012 // match: (And32 x y) 30013 // cond: 30014 // result: (ANDL x y) 30015 for { 30016 x := v.Args[0] 30017 y := v.Args[1] 30018 v.reset(OpAMD64ANDL) 30019 v.AddArg(x) 30020 v.AddArg(y) 30021 return true 30022 } 30023 } 30024 func rewriteValueAMD64_OpAnd64(v *Value) bool { 30025 // match: (And64 x y) 30026 // cond: 30027 // result: (ANDQ x y) 30028 for { 30029 x := v.Args[0] 30030 y := v.Args[1] 30031 v.reset(OpAMD64ANDQ) 30032 v.AddArg(x) 30033 v.AddArg(y) 30034 return true 30035 } 30036 } 30037 func rewriteValueAMD64_OpAnd8(v *Value) bool { 30038 // match: (And8 x y) 30039 // cond: 30040 // result: (ANDL x y) 30041 for { 30042 x := v.Args[0] 30043 y := v.Args[1] 30044 v.reset(OpAMD64ANDL) 30045 v.AddArg(x) 30046 v.AddArg(y) 30047 return true 30048 } 30049 } 30050 func rewriteValueAMD64_OpAndB(v *Value) bool { 30051 // match: (AndB x y) 30052 // cond: 30053 // result: (ANDL x y) 30054 for { 30055 x := v.Args[0] 30056 y := v.Args[1] 30057 v.reset(OpAMD64ANDL) 30058 v.AddArg(x) 30059 v.AddArg(y) 30060 return true 30061 } 30062 } 30063 func rewriteValueAMD64_OpAtomicAdd32(v *Value) bool { 30064 b := v.Block 30065 _ = b 30066 types := &b.Func.Config.Types 30067 _ = types 30068 // match: (AtomicAdd32 ptr val mem) 30069 // cond: 30070 // result: (AddTupleFirst32 (XADDLlock val ptr mem) val) 30071 for { 30072 ptr := v.Args[0] 30073 val := v.Args[1] 30074 mem := v.Args[2] 30075 v.reset(OpAMD64AddTupleFirst32) 30076 v0 := b.NewValue0(v.Pos, OpAMD64XADDLlock, MakeTuple(types.UInt32, TypeMem)) 30077 v0.AddArg(val) 30078 v0.AddArg(ptr) 30079 v0.AddArg(mem) 30080 v.AddArg(v0) 30081 v.AddArg(val) 30082 return true 30083 } 30084 } 30085 func rewriteValueAMD64_OpAtomicAdd64(v *Value) bool { 30086 b := v.Block 30087 _ = b 30088 types := &b.Func.Config.Types 30089 _ = types 30090 // match: (AtomicAdd64 ptr val mem) 30091 // cond: 30092 // result: (AddTupleFirst64 (XADDQlock val ptr mem) val) 30093 for { 30094 ptr := v.Args[0] 30095 val := v.Args[1] 30096 mem := v.Args[2] 30097 v.reset(OpAMD64AddTupleFirst64) 30098 v0 := b.NewValue0(v.Pos, OpAMD64XADDQlock, MakeTuple(types.UInt64, TypeMem)) 30099 v0.AddArg(val) 30100 v0.AddArg(ptr) 30101 v0.AddArg(mem) 30102 v.AddArg(v0) 30103 v.AddArg(val) 30104 return true 30105 } 30106 } 30107 func rewriteValueAMD64_OpAtomicAnd8(v *Value) bool { 30108 // match: (AtomicAnd8 ptr val mem) 30109 // cond: 30110 // result: (ANDBlock ptr val mem) 30111 for { 30112 ptr := v.Args[0] 30113 val := v.Args[1] 30114 mem := v.Args[2] 30115 v.reset(OpAMD64ANDBlock) 30116 v.AddArg(ptr) 30117 v.AddArg(val) 30118 v.AddArg(mem) 30119 return true 30120 } 30121 } 30122 func rewriteValueAMD64_OpAtomicCompareAndSwap32(v *Value) bool { 30123 // match: (AtomicCompareAndSwap32 ptr old new_ mem) 30124 // cond: 30125 // result: (CMPXCHGLlock ptr old new_ mem) 30126 for { 30127 ptr := v.Args[0] 30128 old := v.Args[1] 30129 new_ := v.Args[2] 30130 mem := v.Args[3] 30131 v.reset(OpAMD64CMPXCHGLlock) 30132 v.AddArg(ptr) 30133 v.AddArg(old) 30134 v.AddArg(new_) 30135 v.AddArg(mem) 30136 return true 30137 } 30138 } 30139 func rewriteValueAMD64_OpAtomicCompareAndSwap64(v *Value) bool { 30140 // match: (AtomicCompareAndSwap64 ptr old new_ mem) 30141 // cond: 30142 // result: (CMPXCHGQlock ptr old new_ mem) 30143 for { 30144 ptr := v.Args[0] 30145 old := v.Args[1] 30146 new_ := v.Args[2] 30147 mem := v.Args[3] 30148 v.reset(OpAMD64CMPXCHGQlock) 30149 v.AddArg(ptr) 30150 v.AddArg(old) 30151 v.AddArg(new_) 30152 v.AddArg(mem) 30153 return true 30154 } 30155 } 30156 func rewriteValueAMD64_OpAtomicExchange32(v *Value) bool { 30157 // match: (AtomicExchange32 ptr val mem) 30158 // cond: 30159 // result: (XCHGL val ptr mem) 30160 for { 30161 ptr := v.Args[0] 30162 val := v.Args[1] 30163 mem := v.Args[2] 30164 v.reset(OpAMD64XCHGL) 30165 v.AddArg(val) 30166 v.AddArg(ptr) 30167 v.AddArg(mem) 30168 return true 30169 } 30170 } 30171 func rewriteValueAMD64_OpAtomicExchange64(v *Value) bool { 30172 // match: (AtomicExchange64 ptr val mem) 30173 // cond: 30174 // result: (XCHGQ val ptr mem) 30175 for { 30176 ptr := v.Args[0] 30177 val := v.Args[1] 30178 mem := v.Args[2] 30179 v.reset(OpAMD64XCHGQ) 30180 v.AddArg(val) 30181 v.AddArg(ptr) 30182 v.AddArg(mem) 30183 return true 30184 } 30185 } 30186 func rewriteValueAMD64_OpAtomicLoad32(v *Value) bool { 30187 // match: (AtomicLoad32 ptr mem) 30188 // cond: 30189 // result: (MOVLatomicload ptr mem) 30190 for { 30191 ptr := v.Args[0] 30192 mem := v.Args[1] 30193 v.reset(OpAMD64MOVLatomicload) 30194 v.AddArg(ptr) 30195 v.AddArg(mem) 30196 return true 30197 } 30198 } 30199 func rewriteValueAMD64_OpAtomicLoad64(v *Value) bool { 30200 // match: (AtomicLoad64 ptr mem) 30201 // cond: 30202 // result: (MOVQatomicload ptr mem) 30203 for { 30204 ptr := v.Args[0] 30205 mem := v.Args[1] 30206 v.reset(OpAMD64MOVQatomicload) 30207 v.AddArg(ptr) 30208 v.AddArg(mem) 30209 return true 30210 } 30211 } 30212 func rewriteValueAMD64_OpAtomicLoadPtr(v *Value) bool { 30213 b := v.Block 30214 _ = b 30215 config := b.Func.Config 30216 _ = config 30217 // match: (AtomicLoadPtr ptr mem) 30218 // cond: config.PtrSize == 8 30219 // result: (MOVQatomicload ptr mem) 30220 for { 30221 ptr := v.Args[0] 30222 mem := v.Args[1] 30223 if !(config.PtrSize == 8) { 30224 break 30225 } 30226 v.reset(OpAMD64MOVQatomicload) 30227 v.AddArg(ptr) 30228 v.AddArg(mem) 30229 return true 30230 } 30231 // match: (AtomicLoadPtr ptr mem) 30232 // cond: config.PtrSize == 4 30233 // result: (MOVLatomicload ptr mem) 30234 for { 30235 ptr := v.Args[0] 30236 mem := v.Args[1] 30237 if !(config.PtrSize == 4) { 30238 break 30239 } 30240 v.reset(OpAMD64MOVLatomicload) 30241 v.AddArg(ptr) 30242 v.AddArg(mem) 30243 return true 30244 } 30245 return false 30246 } 30247 func rewriteValueAMD64_OpAtomicOr8(v *Value) bool { 30248 // match: (AtomicOr8 ptr val mem) 30249 // cond: 30250 // result: (ORBlock ptr val mem) 30251 for { 30252 ptr := v.Args[0] 30253 val := v.Args[1] 30254 mem := v.Args[2] 30255 v.reset(OpAMD64ORBlock) 30256 v.AddArg(ptr) 30257 v.AddArg(val) 30258 v.AddArg(mem) 30259 return true 30260 } 30261 } 30262 func rewriteValueAMD64_OpAtomicStore32(v *Value) bool { 30263 b := v.Block 30264 _ = b 30265 types := &b.Func.Config.Types 30266 _ = types 30267 // match: (AtomicStore32 ptr val mem) 30268 // cond: 30269 // result: (Select1 (XCHGL <MakeTuple(types.UInt32,TypeMem)> val ptr mem)) 30270 for { 30271 ptr := v.Args[0] 30272 val := v.Args[1] 30273 mem := v.Args[2] 30274 v.reset(OpSelect1) 30275 v0 := b.NewValue0(v.Pos, OpAMD64XCHGL, MakeTuple(types.UInt32, TypeMem)) 30276 v0.AddArg(val) 30277 v0.AddArg(ptr) 30278 v0.AddArg(mem) 30279 v.AddArg(v0) 30280 return true 30281 } 30282 } 30283 func rewriteValueAMD64_OpAtomicStore64(v *Value) bool { 30284 b := v.Block 30285 _ = b 30286 types := &b.Func.Config.Types 30287 _ = types 30288 // match: (AtomicStore64 ptr val mem) 30289 // cond: 30290 // result: (Select1 (XCHGQ <MakeTuple(types.UInt64,TypeMem)> val ptr mem)) 30291 for { 30292 ptr := v.Args[0] 30293 val := v.Args[1] 30294 mem := v.Args[2] 30295 v.reset(OpSelect1) 30296 v0 := b.NewValue0(v.Pos, OpAMD64XCHGQ, MakeTuple(types.UInt64, TypeMem)) 30297 v0.AddArg(val) 30298 v0.AddArg(ptr) 30299 v0.AddArg(mem) 30300 v.AddArg(v0) 30301 return true 30302 } 30303 } 30304 func rewriteValueAMD64_OpAtomicStorePtrNoWB(v *Value) bool { 30305 b := v.Block 30306 _ = b 30307 config := b.Func.Config 30308 _ = config 30309 types := &b.Func.Config.Types 30310 _ = types 30311 // match: (AtomicStorePtrNoWB ptr val mem) 30312 // cond: config.PtrSize == 8 30313 // result: (Select1 (XCHGQ <MakeTuple(types.BytePtr,TypeMem)> val ptr mem)) 30314 for { 30315 ptr := v.Args[0] 30316 val := v.Args[1] 30317 mem := v.Args[2] 30318 if !(config.PtrSize == 8) { 30319 break 30320 } 30321 v.reset(OpSelect1) 30322 v0 := b.NewValue0(v.Pos, OpAMD64XCHGQ, MakeTuple(types.BytePtr, TypeMem)) 30323 v0.AddArg(val) 30324 v0.AddArg(ptr) 30325 v0.AddArg(mem) 30326 v.AddArg(v0) 30327 return true 30328 } 30329 // match: (AtomicStorePtrNoWB ptr val mem) 30330 // cond: config.PtrSize == 4 30331 // result: (Select1 (XCHGL <MakeTuple(types.BytePtr,TypeMem)> val ptr mem)) 30332 for { 30333 ptr := v.Args[0] 30334 val := v.Args[1] 30335 mem := v.Args[2] 30336 if !(config.PtrSize == 4) { 30337 break 30338 } 30339 v.reset(OpSelect1) 30340 v0 := b.NewValue0(v.Pos, OpAMD64XCHGL, MakeTuple(types.BytePtr, TypeMem)) 30341 v0.AddArg(val) 30342 v0.AddArg(ptr) 30343 v0.AddArg(mem) 30344 v.AddArg(v0) 30345 return true 30346 } 30347 return false 30348 } 30349 func rewriteValueAMD64_OpAvg64u(v *Value) bool { 30350 // match: (Avg64u x y) 30351 // cond: 30352 // result: (AVGQU x y) 30353 for { 30354 x := v.Args[0] 30355 y := v.Args[1] 30356 v.reset(OpAMD64AVGQU) 30357 v.AddArg(x) 30358 v.AddArg(y) 30359 return true 30360 } 30361 } 30362 func rewriteValueAMD64_OpBitLen32(v *Value) bool { 30363 b := v.Block 30364 _ = b 30365 types := &b.Func.Config.Types 30366 _ = types 30367 // match: (BitLen32 x) 30368 // cond: 30369 // result: (BitLen64 (MOVLQZX <types.UInt64> x)) 30370 for { 30371 x := v.Args[0] 30372 v.reset(OpBitLen64) 30373 v0 := b.NewValue0(v.Pos, OpAMD64MOVLQZX, types.UInt64) 30374 v0.AddArg(x) 30375 v.AddArg(v0) 30376 return true 30377 } 30378 } 30379 func rewriteValueAMD64_OpBitLen64(v *Value) bool { 30380 b := v.Block 30381 _ = b 30382 types := &b.Func.Config.Types 30383 _ = types 30384 // match: (BitLen64 <t> x) 30385 // cond: 30386 // result: (ADDQconst [1] (CMOVQEQ <t> (Select0 <t> (BSRQ x)) (MOVQconst <t> [-1]) (Select1 <TypeFlags> (BSRQ x)))) 30387 for { 30388 t := v.Type 30389 x := v.Args[0] 30390 v.reset(OpAMD64ADDQconst) 30391 v.AuxInt = 1 30392 v0 := b.NewValue0(v.Pos, OpAMD64CMOVQEQ, t) 30393 v1 := b.NewValue0(v.Pos, OpSelect0, t) 30394 v2 := b.NewValue0(v.Pos, OpAMD64BSRQ, MakeTuple(types.UInt64, TypeFlags)) 30395 v2.AddArg(x) 30396 v1.AddArg(v2) 30397 v0.AddArg(v1) 30398 v3 := b.NewValue0(v.Pos, OpAMD64MOVQconst, t) 30399 v3.AuxInt = -1 30400 v0.AddArg(v3) 30401 v4 := b.NewValue0(v.Pos, OpSelect1, TypeFlags) 30402 v5 := b.NewValue0(v.Pos, OpAMD64BSRQ, MakeTuple(types.UInt64, TypeFlags)) 30403 v5.AddArg(x) 30404 v4.AddArg(v5) 30405 v0.AddArg(v4) 30406 v.AddArg(v0) 30407 return true 30408 } 30409 } 30410 func rewriteValueAMD64_OpBswap32(v *Value) bool { 30411 // match: (Bswap32 x) 30412 // cond: 30413 // result: (BSWAPL x) 30414 for { 30415 x := v.Args[0] 30416 v.reset(OpAMD64BSWAPL) 30417 v.AddArg(x) 30418 return true 30419 } 30420 } 30421 func rewriteValueAMD64_OpBswap64(v *Value) bool { 30422 // match: (Bswap64 x) 30423 // cond: 30424 // result: (BSWAPQ x) 30425 for { 30426 x := v.Args[0] 30427 v.reset(OpAMD64BSWAPQ) 30428 v.AddArg(x) 30429 return true 30430 } 30431 } 30432 func rewriteValueAMD64_OpClosureCall(v *Value) bool { 30433 // match: (ClosureCall [argwid] entry closure mem) 30434 // cond: 30435 // result: (CALLclosure [argwid] entry closure mem) 30436 for { 30437 argwid := v.AuxInt 30438 entry := v.Args[0] 30439 closure := v.Args[1] 30440 mem := v.Args[2] 30441 v.reset(OpAMD64CALLclosure) 30442 v.AuxInt = argwid 30443 v.AddArg(entry) 30444 v.AddArg(closure) 30445 v.AddArg(mem) 30446 return true 30447 } 30448 } 30449 func rewriteValueAMD64_OpCom16(v *Value) bool { 30450 // match: (Com16 x) 30451 // cond: 30452 // result: (NOTL x) 30453 for { 30454 x := v.Args[0] 30455 v.reset(OpAMD64NOTL) 30456 v.AddArg(x) 30457 return true 30458 } 30459 } 30460 func rewriteValueAMD64_OpCom32(v *Value) bool { 30461 // match: (Com32 x) 30462 // cond: 30463 // result: (NOTL x) 30464 for { 30465 x := v.Args[0] 30466 v.reset(OpAMD64NOTL) 30467 v.AddArg(x) 30468 return true 30469 } 30470 } 30471 func rewriteValueAMD64_OpCom64(v *Value) bool { 30472 // match: (Com64 x) 30473 // cond: 30474 // result: (NOTQ x) 30475 for { 30476 x := v.Args[0] 30477 v.reset(OpAMD64NOTQ) 30478 v.AddArg(x) 30479 return true 30480 } 30481 } 30482 func rewriteValueAMD64_OpCom8(v *Value) bool { 30483 // match: (Com8 x) 30484 // cond: 30485 // result: (NOTL x) 30486 for { 30487 x := v.Args[0] 30488 v.reset(OpAMD64NOTL) 30489 v.AddArg(x) 30490 return true 30491 } 30492 } 30493 func rewriteValueAMD64_OpConst16(v *Value) bool { 30494 // match: (Const16 [val]) 30495 // cond: 30496 // result: (MOVLconst [val]) 30497 for { 30498 val := v.AuxInt 30499 v.reset(OpAMD64MOVLconst) 30500 v.AuxInt = val 30501 return true 30502 } 30503 } 30504 func rewriteValueAMD64_OpConst32(v *Value) bool { 30505 // match: (Const32 [val]) 30506 // cond: 30507 // result: (MOVLconst [val]) 30508 for { 30509 val := v.AuxInt 30510 v.reset(OpAMD64MOVLconst) 30511 v.AuxInt = val 30512 return true 30513 } 30514 } 30515 func rewriteValueAMD64_OpConst32F(v *Value) bool { 30516 // match: (Const32F [val]) 30517 // cond: 30518 // result: (MOVSSconst [val]) 30519 for { 30520 val := v.AuxInt 30521 v.reset(OpAMD64MOVSSconst) 30522 v.AuxInt = val 30523 return true 30524 } 30525 } 30526 func rewriteValueAMD64_OpConst64(v *Value) bool { 30527 // match: (Const64 [val]) 30528 // cond: 30529 // result: (MOVQconst [val]) 30530 for { 30531 val := v.AuxInt 30532 v.reset(OpAMD64MOVQconst) 30533 v.AuxInt = val 30534 return true 30535 } 30536 } 30537 func rewriteValueAMD64_OpConst64F(v *Value) bool { 30538 // match: (Const64F [val]) 30539 // cond: 30540 // result: (MOVSDconst [val]) 30541 for { 30542 val := v.AuxInt 30543 v.reset(OpAMD64MOVSDconst) 30544 v.AuxInt = val 30545 return true 30546 } 30547 } 30548 func rewriteValueAMD64_OpConst8(v *Value) bool { 30549 // match: (Const8 [val]) 30550 // cond: 30551 // result: (MOVLconst [val]) 30552 for { 30553 val := v.AuxInt 30554 v.reset(OpAMD64MOVLconst) 30555 v.AuxInt = val 30556 return true 30557 } 30558 } 30559 func rewriteValueAMD64_OpConstBool(v *Value) bool { 30560 // match: (ConstBool [b]) 30561 // cond: 30562 // result: (MOVLconst [b]) 30563 for { 30564 b := v.AuxInt 30565 v.reset(OpAMD64MOVLconst) 30566 v.AuxInt = b 30567 return true 30568 } 30569 } 30570 func rewriteValueAMD64_OpConstNil(v *Value) bool { 30571 b := v.Block 30572 _ = b 30573 config := b.Func.Config 30574 _ = config 30575 // match: (ConstNil) 30576 // cond: config.PtrSize == 8 30577 // result: (MOVQconst [0]) 30578 for { 30579 if !(config.PtrSize == 8) { 30580 break 30581 } 30582 v.reset(OpAMD64MOVQconst) 30583 v.AuxInt = 0 30584 return true 30585 } 30586 // match: (ConstNil) 30587 // cond: config.PtrSize == 4 30588 // result: (MOVLconst [0]) 30589 for { 30590 if !(config.PtrSize == 4) { 30591 break 30592 } 30593 v.reset(OpAMD64MOVLconst) 30594 v.AuxInt = 0 30595 return true 30596 } 30597 return false 30598 } 30599 func rewriteValueAMD64_OpConvert(v *Value) bool { 30600 b := v.Block 30601 _ = b 30602 config := b.Func.Config 30603 _ = config 30604 // match: (Convert <t> x mem) 30605 // cond: config.PtrSize == 8 30606 // result: (MOVQconvert <t> x mem) 30607 for { 30608 t := v.Type 30609 x := v.Args[0] 30610 mem := v.Args[1] 30611 if !(config.PtrSize == 8) { 30612 break 30613 } 30614 v.reset(OpAMD64MOVQconvert) 30615 v.Type = t 30616 v.AddArg(x) 30617 v.AddArg(mem) 30618 return true 30619 } 30620 // match: (Convert <t> x mem) 30621 // cond: config.PtrSize == 4 30622 // result: (MOVLconvert <t> x mem) 30623 for { 30624 t := v.Type 30625 x := v.Args[0] 30626 mem := v.Args[1] 30627 if !(config.PtrSize == 4) { 30628 break 30629 } 30630 v.reset(OpAMD64MOVLconvert) 30631 v.Type = t 30632 v.AddArg(x) 30633 v.AddArg(mem) 30634 return true 30635 } 30636 return false 30637 } 30638 func rewriteValueAMD64_OpCtz32(v *Value) bool { 30639 b := v.Block 30640 _ = b 30641 types := &b.Func.Config.Types 30642 _ = types 30643 // match: (Ctz32 x) 30644 // cond: 30645 // result: (Select0 (BSFQ (ORQ <types.UInt64> (MOVQconst [1<<32]) x))) 30646 for { 30647 x := v.Args[0] 30648 v.reset(OpSelect0) 30649 v0 := b.NewValue0(v.Pos, OpAMD64BSFQ, MakeTuple(types.UInt64, TypeFlags)) 30650 v1 := b.NewValue0(v.Pos, OpAMD64ORQ, types.UInt64) 30651 v2 := b.NewValue0(v.Pos, OpAMD64MOVQconst, types.UInt64) 30652 v2.AuxInt = 1 << 32 30653 v1.AddArg(v2) 30654 v1.AddArg(x) 30655 v0.AddArg(v1) 30656 v.AddArg(v0) 30657 return true 30658 } 30659 } 30660 func rewriteValueAMD64_OpCtz64(v *Value) bool { 30661 b := v.Block 30662 _ = b 30663 types := &b.Func.Config.Types 30664 _ = types 30665 // match: (Ctz64 <t> x) 30666 // cond: 30667 // result: (CMOVQEQ (Select0 <t> (BSFQ x)) (MOVQconst <t> [64]) (Select1 <TypeFlags> (BSFQ x))) 30668 for { 30669 t := v.Type 30670 x := v.Args[0] 30671 v.reset(OpAMD64CMOVQEQ) 30672 v0 := b.NewValue0(v.Pos, OpSelect0, t) 30673 v1 := b.NewValue0(v.Pos, OpAMD64BSFQ, MakeTuple(types.UInt64, TypeFlags)) 30674 v1.AddArg(x) 30675 v0.AddArg(v1) 30676 v.AddArg(v0) 30677 v2 := b.NewValue0(v.Pos, OpAMD64MOVQconst, t) 30678 v2.AuxInt = 64 30679 v.AddArg(v2) 30680 v3 := b.NewValue0(v.Pos, OpSelect1, TypeFlags) 30681 v4 := b.NewValue0(v.Pos, OpAMD64BSFQ, MakeTuple(types.UInt64, TypeFlags)) 30682 v4.AddArg(x) 30683 v3.AddArg(v4) 30684 v.AddArg(v3) 30685 return true 30686 } 30687 } 30688 func rewriteValueAMD64_OpCvt32Fto32(v *Value) bool { 30689 // match: (Cvt32Fto32 x) 30690 // cond: 30691 // result: (CVTTSS2SL x) 30692 for { 30693 x := v.Args[0] 30694 v.reset(OpAMD64CVTTSS2SL) 30695 v.AddArg(x) 30696 return true 30697 } 30698 } 30699 func rewriteValueAMD64_OpCvt32Fto64(v *Value) bool { 30700 // match: (Cvt32Fto64 x) 30701 // cond: 30702 // result: (CVTTSS2SQ x) 30703 for { 30704 x := v.Args[0] 30705 v.reset(OpAMD64CVTTSS2SQ) 30706 v.AddArg(x) 30707 return true 30708 } 30709 } 30710 func rewriteValueAMD64_OpCvt32Fto64F(v *Value) bool { 30711 // match: (Cvt32Fto64F x) 30712 // cond: 30713 // result: (CVTSS2SD x) 30714 for { 30715 x := v.Args[0] 30716 v.reset(OpAMD64CVTSS2SD) 30717 v.AddArg(x) 30718 return true 30719 } 30720 } 30721 func rewriteValueAMD64_OpCvt32to32F(v *Value) bool { 30722 // match: (Cvt32to32F x) 30723 // cond: 30724 // result: (CVTSL2SS x) 30725 for { 30726 x := v.Args[0] 30727 v.reset(OpAMD64CVTSL2SS) 30728 v.AddArg(x) 30729 return true 30730 } 30731 } 30732 func rewriteValueAMD64_OpCvt32to64F(v *Value) bool { 30733 // match: (Cvt32to64F x) 30734 // cond: 30735 // result: (CVTSL2SD x) 30736 for { 30737 x := v.Args[0] 30738 v.reset(OpAMD64CVTSL2SD) 30739 v.AddArg(x) 30740 return true 30741 } 30742 } 30743 func rewriteValueAMD64_OpCvt64Fto32(v *Value) bool { 30744 // match: (Cvt64Fto32 x) 30745 // cond: 30746 // result: (CVTTSD2SL x) 30747 for { 30748 x := v.Args[0] 30749 v.reset(OpAMD64CVTTSD2SL) 30750 v.AddArg(x) 30751 return true 30752 } 30753 } 30754 func rewriteValueAMD64_OpCvt64Fto32F(v *Value) bool { 30755 // match: (Cvt64Fto32F x) 30756 // cond: 30757 // result: (CVTSD2SS x) 30758 for { 30759 x := v.Args[0] 30760 v.reset(OpAMD64CVTSD2SS) 30761 v.AddArg(x) 30762 return true 30763 } 30764 } 30765 func rewriteValueAMD64_OpCvt64Fto64(v *Value) bool { 30766 // match: (Cvt64Fto64 x) 30767 // cond: 30768 // result: (CVTTSD2SQ x) 30769 for { 30770 x := v.Args[0] 30771 v.reset(OpAMD64CVTTSD2SQ) 30772 v.AddArg(x) 30773 return true 30774 } 30775 } 30776 func rewriteValueAMD64_OpCvt64to32F(v *Value) bool { 30777 // match: (Cvt64to32F x) 30778 // cond: 30779 // result: (CVTSQ2SS x) 30780 for { 30781 x := v.Args[0] 30782 v.reset(OpAMD64CVTSQ2SS) 30783 v.AddArg(x) 30784 return true 30785 } 30786 } 30787 func rewriteValueAMD64_OpCvt64to64F(v *Value) bool { 30788 // match: (Cvt64to64F x) 30789 // cond: 30790 // result: (CVTSQ2SD x) 30791 for { 30792 x := v.Args[0] 30793 v.reset(OpAMD64CVTSQ2SD) 30794 v.AddArg(x) 30795 return true 30796 } 30797 } 30798 func rewriteValueAMD64_OpDiv128u(v *Value) bool { 30799 // match: (Div128u xhi xlo y) 30800 // cond: 30801 // result: (DIVQU2 xhi xlo y) 30802 for { 30803 xhi := v.Args[0] 30804 xlo := v.Args[1] 30805 y := v.Args[2] 30806 v.reset(OpAMD64DIVQU2) 30807 v.AddArg(xhi) 30808 v.AddArg(xlo) 30809 v.AddArg(y) 30810 return true 30811 } 30812 } 30813 func rewriteValueAMD64_OpDiv16(v *Value) bool { 30814 b := v.Block 30815 _ = b 30816 types := &b.Func.Config.Types 30817 _ = types 30818 // match: (Div16 x y) 30819 // cond: 30820 // result: (Select0 (DIVW x y)) 30821 for { 30822 x := v.Args[0] 30823 y := v.Args[1] 30824 v.reset(OpSelect0) 30825 v0 := b.NewValue0(v.Pos, OpAMD64DIVW, MakeTuple(types.Int16, types.Int16)) 30826 v0.AddArg(x) 30827 v0.AddArg(y) 30828 v.AddArg(v0) 30829 return true 30830 } 30831 } 30832 func rewriteValueAMD64_OpDiv16u(v *Value) bool { 30833 b := v.Block 30834 _ = b 30835 types := &b.Func.Config.Types 30836 _ = types 30837 // match: (Div16u x y) 30838 // cond: 30839 // result: (Select0 (DIVWU x y)) 30840 for { 30841 x := v.Args[0] 30842 y := v.Args[1] 30843 v.reset(OpSelect0) 30844 v0 := b.NewValue0(v.Pos, OpAMD64DIVWU, MakeTuple(types.UInt16, types.UInt16)) 30845 v0.AddArg(x) 30846 v0.AddArg(y) 30847 v.AddArg(v0) 30848 return true 30849 } 30850 } 30851 func rewriteValueAMD64_OpDiv32(v *Value) bool { 30852 b := v.Block 30853 _ = b 30854 types := &b.Func.Config.Types 30855 _ = types 30856 // match: (Div32 x y) 30857 // cond: 30858 // result: (Select0 (DIVL x y)) 30859 for { 30860 x := v.Args[0] 30861 y := v.Args[1] 30862 v.reset(OpSelect0) 30863 v0 := b.NewValue0(v.Pos, OpAMD64DIVL, MakeTuple(types.Int32, types.Int32)) 30864 v0.AddArg(x) 30865 v0.AddArg(y) 30866 v.AddArg(v0) 30867 return true 30868 } 30869 } 30870 func rewriteValueAMD64_OpDiv32F(v *Value) bool { 30871 // match: (Div32F x y) 30872 // cond: 30873 // result: (DIVSS x y) 30874 for { 30875 x := v.Args[0] 30876 y := v.Args[1] 30877 v.reset(OpAMD64DIVSS) 30878 v.AddArg(x) 30879 v.AddArg(y) 30880 return true 30881 } 30882 } 30883 func rewriteValueAMD64_OpDiv32u(v *Value) bool { 30884 b := v.Block 30885 _ = b 30886 types := &b.Func.Config.Types 30887 _ = types 30888 // match: (Div32u x y) 30889 // cond: 30890 // result: (Select0 (DIVLU x y)) 30891 for { 30892 x := v.Args[0] 30893 y := v.Args[1] 30894 v.reset(OpSelect0) 30895 v0 := b.NewValue0(v.Pos, OpAMD64DIVLU, MakeTuple(types.UInt32, types.UInt32)) 30896 v0.AddArg(x) 30897 v0.AddArg(y) 30898 v.AddArg(v0) 30899 return true 30900 } 30901 } 30902 func rewriteValueAMD64_OpDiv64(v *Value) bool { 30903 b := v.Block 30904 _ = b 30905 types := &b.Func.Config.Types 30906 _ = types 30907 // match: (Div64 x y) 30908 // cond: 30909 // result: (Select0 (DIVQ x y)) 30910 for { 30911 x := v.Args[0] 30912 y := v.Args[1] 30913 v.reset(OpSelect0) 30914 v0 := b.NewValue0(v.Pos, OpAMD64DIVQ, MakeTuple(types.Int64, types.Int64)) 30915 v0.AddArg(x) 30916 v0.AddArg(y) 30917 v.AddArg(v0) 30918 return true 30919 } 30920 } 30921 func rewriteValueAMD64_OpDiv64F(v *Value) bool { 30922 // match: (Div64F x y) 30923 // cond: 30924 // result: (DIVSD x y) 30925 for { 30926 x := v.Args[0] 30927 y := v.Args[1] 30928 v.reset(OpAMD64DIVSD) 30929 v.AddArg(x) 30930 v.AddArg(y) 30931 return true 30932 } 30933 } 30934 func rewriteValueAMD64_OpDiv64u(v *Value) bool { 30935 b := v.Block 30936 _ = b 30937 types := &b.Func.Config.Types 30938 _ = types 30939 // match: (Div64u x y) 30940 // cond: 30941 // result: (Select0 (DIVQU x y)) 30942 for { 30943 x := v.Args[0] 30944 y := v.Args[1] 30945 v.reset(OpSelect0) 30946 v0 := b.NewValue0(v.Pos, OpAMD64DIVQU, MakeTuple(types.UInt64, types.UInt64)) 30947 v0.AddArg(x) 30948 v0.AddArg(y) 30949 v.AddArg(v0) 30950 return true 30951 } 30952 } 30953 func rewriteValueAMD64_OpDiv8(v *Value) bool { 30954 b := v.Block 30955 _ = b 30956 types := &b.Func.Config.Types 30957 _ = types 30958 // match: (Div8 x y) 30959 // cond: 30960 // result: (Select0 (DIVW (SignExt8to16 x) (SignExt8to16 y))) 30961 for { 30962 x := v.Args[0] 30963 y := v.Args[1] 30964 v.reset(OpSelect0) 30965 v0 := b.NewValue0(v.Pos, OpAMD64DIVW, MakeTuple(types.Int16, types.Int16)) 30966 v1 := b.NewValue0(v.Pos, OpSignExt8to16, types.Int16) 30967 v1.AddArg(x) 30968 v0.AddArg(v1) 30969 v2 := b.NewValue0(v.Pos, OpSignExt8to16, types.Int16) 30970 v2.AddArg(y) 30971 v0.AddArg(v2) 30972 v.AddArg(v0) 30973 return true 30974 } 30975 } 30976 func rewriteValueAMD64_OpDiv8u(v *Value) bool { 30977 b := v.Block 30978 _ = b 30979 types := &b.Func.Config.Types 30980 _ = types 30981 // match: (Div8u x y) 30982 // cond: 30983 // result: (Select0 (DIVWU (ZeroExt8to16 x) (ZeroExt8to16 y))) 30984 for { 30985 x := v.Args[0] 30986 y := v.Args[1] 30987 v.reset(OpSelect0) 30988 v0 := b.NewValue0(v.Pos, OpAMD64DIVWU, MakeTuple(types.UInt16, types.UInt16)) 30989 v1 := b.NewValue0(v.Pos, OpZeroExt8to16, types.UInt16) 30990 v1.AddArg(x) 30991 v0.AddArg(v1) 30992 v2 := b.NewValue0(v.Pos, OpZeroExt8to16, types.UInt16) 30993 v2.AddArg(y) 30994 v0.AddArg(v2) 30995 v.AddArg(v0) 30996 return true 30997 } 30998 } 30999 func rewriteValueAMD64_OpEq16(v *Value) bool { 31000 b := v.Block 31001 _ = b 31002 // match: (Eq16 x y) 31003 // cond: 31004 // result: (SETEQ (CMPW x y)) 31005 for { 31006 x := v.Args[0] 31007 y := v.Args[1] 31008 v.reset(OpAMD64SETEQ) 31009 v0 := b.NewValue0(v.Pos, OpAMD64CMPW, TypeFlags) 31010 v0.AddArg(x) 31011 v0.AddArg(y) 31012 v.AddArg(v0) 31013 return true 31014 } 31015 } 31016 func rewriteValueAMD64_OpEq32(v *Value) bool { 31017 b := v.Block 31018 _ = b 31019 // match: (Eq32 x y) 31020 // cond: 31021 // result: (SETEQ (CMPL x y)) 31022 for { 31023 x := v.Args[0] 31024 y := v.Args[1] 31025 v.reset(OpAMD64SETEQ) 31026 v0 := b.NewValue0(v.Pos, OpAMD64CMPL, TypeFlags) 31027 v0.AddArg(x) 31028 v0.AddArg(y) 31029 v.AddArg(v0) 31030 return true 31031 } 31032 } 31033 func rewriteValueAMD64_OpEq32F(v *Value) bool { 31034 b := v.Block 31035 _ = b 31036 // match: (Eq32F x y) 31037 // cond: 31038 // result: (SETEQF (UCOMISS x y)) 31039 for { 31040 x := v.Args[0] 31041 y := v.Args[1] 31042 v.reset(OpAMD64SETEQF) 31043 v0 := b.NewValue0(v.Pos, OpAMD64UCOMISS, TypeFlags) 31044 v0.AddArg(x) 31045 v0.AddArg(y) 31046 v.AddArg(v0) 31047 return true 31048 } 31049 } 31050 func rewriteValueAMD64_OpEq64(v *Value) bool { 31051 b := v.Block 31052 _ = b 31053 // match: (Eq64 x y) 31054 // cond: 31055 // result: (SETEQ (CMPQ x y)) 31056 for { 31057 x := v.Args[0] 31058 y := v.Args[1] 31059 v.reset(OpAMD64SETEQ) 31060 v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, TypeFlags) 31061 v0.AddArg(x) 31062 v0.AddArg(y) 31063 v.AddArg(v0) 31064 return true 31065 } 31066 } 31067 func rewriteValueAMD64_OpEq64F(v *Value) bool { 31068 b := v.Block 31069 _ = b 31070 // match: (Eq64F x y) 31071 // cond: 31072 // result: (SETEQF (UCOMISD x y)) 31073 for { 31074 x := v.Args[0] 31075 y := v.Args[1] 31076 v.reset(OpAMD64SETEQF) 31077 v0 := b.NewValue0(v.Pos, OpAMD64UCOMISD, TypeFlags) 31078 v0.AddArg(x) 31079 v0.AddArg(y) 31080 v.AddArg(v0) 31081 return true 31082 } 31083 } 31084 func rewriteValueAMD64_OpEq8(v *Value) bool { 31085 b := v.Block 31086 _ = b 31087 // match: (Eq8 x y) 31088 // cond: 31089 // result: (SETEQ (CMPB x y)) 31090 for { 31091 x := v.Args[0] 31092 y := v.Args[1] 31093 v.reset(OpAMD64SETEQ) 31094 v0 := b.NewValue0(v.Pos, OpAMD64CMPB, TypeFlags) 31095 v0.AddArg(x) 31096 v0.AddArg(y) 31097 v.AddArg(v0) 31098 return true 31099 } 31100 } 31101 func rewriteValueAMD64_OpEqB(v *Value) bool { 31102 b := v.Block 31103 _ = b 31104 // match: (EqB x y) 31105 // cond: 31106 // result: (SETEQ (CMPB x y)) 31107 for { 31108 x := v.Args[0] 31109 y := v.Args[1] 31110 v.reset(OpAMD64SETEQ) 31111 v0 := b.NewValue0(v.Pos, OpAMD64CMPB, TypeFlags) 31112 v0.AddArg(x) 31113 v0.AddArg(y) 31114 v.AddArg(v0) 31115 return true 31116 } 31117 } 31118 func rewriteValueAMD64_OpEqPtr(v *Value) bool { 31119 b := v.Block 31120 _ = b 31121 config := b.Func.Config 31122 _ = config 31123 // match: (EqPtr x y) 31124 // cond: config.PtrSize == 8 31125 // result: (SETEQ (CMPQ x y)) 31126 for { 31127 x := v.Args[0] 31128 y := v.Args[1] 31129 if !(config.PtrSize == 8) { 31130 break 31131 } 31132 v.reset(OpAMD64SETEQ) 31133 v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, TypeFlags) 31134 v0.AddArg(x) 31135 v0.AddArg(y) 31136 v.AddArg(v0) 31137 return true 31138 } 31139 // match: (EqPtr x y) 31140 // cond: config.PtrSize == 4 31141 // result: (SETEQ (CMPL x y)) 31142 for { 31143 x := v.Args[0] 31144 y := v.Args[1] 31145 if !(config.PtrSize == 4) { 31146 break 31147 } 31148 v.reset(OpAMD64SETEQ) 31149 v0 := b.NewValue0(v.Pos, OpAMD64CMPL, TypeFlags) 31150 v0.AddArg(x) 31151 v0.AddArg(y) 31152 v.AddArg(v0) 31153 return true 31154 } 31155 return false 31156 } 31157 func rewriteValueAMD64_OpGeq16(v *Value) bool { 31158 b := v.Block 31159 _ = b 31160 // match: (Geq16 x y) 31161 // cond: 31162 // result: (SETGE (CMPW x y)) 31163 for { 31164 x := v.Args[0] 31165 y := v.Args[1] 31166 v.reset(OpAMD64SETGE) 31167 v0 := b.NewValue0(v.Pos, OpAMD64CMPW, TypeFlags) 31168 v0.AddArg(x) 31169 v0.AddArg(y) 31170 v.AddArg(v0) 31171 return true 31172 } 31173 } 31174 func rewriteValueAMD64_OpGeq16U(v *Value) bool { 31175 b := v.Block 31176 _ = b 31177 // match: (Geq16U x y) 31178 // cond: 31179 // result: (SETAE (CMPW x y)) 31180 for { 31181 x := v.Args[0] 31182 y := v.Args[1] 31183 v.reset(OpAMD64SETAE) 31184 v0 := b.NewValue0(v.Pos, OpAMD64CMPW, TypeFlags) 31185 v0.AddArg(x) 31186 v0.AddArg(y) 31187 v.AddArg(v0) 31188 return true 31189 } 31190 } 31191 func rewriteValueAMD64_OpGeq32(v *Value) bool { 31192 b := v.Block 31193 _ = b 31194 // match: (Geq32 x y) 31195 // cond: 31196 // result: (SETGE (CMPL x y)) 31197 for { 31198 x := v.Args[0] 31199 y := v.Args[1] 31200 v.reset(OpAMD64SETGE) 31201 v0 := b.NewValue0(v.Pos, OpAMD64CMPL, TypeFlags) 31202 v0.AddArg(x) 31203 v0.AddArg(y) 31204 v.AddArg(v0) 31205 return true 31206 } 31207 } 31208 func rewriteValueAMD64_OpGeq32F(v *Value) bool { 31209 b := v.Block 31210 _ = b 31211 // match: (Geq32F x y) 31212 // cond: 31213 // result: (SETGEF (UCOMISS x y)) 31214 for { 31215 x := v.Args[0] 31216 y := v.Args[1] 31217 v.reset(OpAMD64SETGEF) 31218 v0 := b.NewValue0(v.Pos, OpAMD64UCOMISS, TypeFlags) 31219 v0.AddArg(x) 31220 v0.AddArg(y) 31221 v.AddArg(v0) 31222 return true 31223 } 31224 } 31225 func rewriteValueAMD64_OpGeq32U(v *Value) bool { 31226 b := v.Block 31227 _ = b 31228 // match: (Geq32U x y) 31229 // cond: 31230 // result: (SETAE (CMPL x y)) 31231 for { 31232 x := v.Args[0] 31233 y := v.Args[1] 31234 v.reset(OpAMD64SETAE) 31235 v0 := b.NewValue0(v.Pos, OpAMD64CMPL, TypeFlags) 31236 v0.AddArg(x) 31237 v0.AddArg(y) 31238 v.AddArg(v0) 31239 return true 31240 } 31241 } 31242 func rewriteValueAMD64_OpGeq64(v *Value) bool { 31243 b := v.Block 31244 _ = b 31245 // match: (Geq64 x y) 31246 // cond: 31247 // result: (SETGE (CMPQ x y)) 31248 for { 31249 x := v.Args[0] 31250 y := v.Args[1] 31251 v.reset(OpAMD64SETGE) 31252 v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, TypeFlags) 31253 v0.AddArg(x) 31254 v0.AddArg(y) 31255 v.AddArg(v0) 31256 return true 31257 } 31258 } 31259 func rewriteValueAMD64_OpGeq64F(v *Value) bool { 31260 b := v.Block 31261 _ = b 31262 // match: (Geq64F x y) 31263 // cond: 31264 // result: (SETGEF (UCOMISD x y)) 31265 for { 31266 x := v.Args[0] 31267 y := v.Args[1] 31268 v.reset(OpAMD64SETGEF) 31269 v0 := b.NewValue0(v.Pos, OpAMD64UCOMISD, TypeFlags) 31270 v0.AddArg(x) 31271 v0.AddArg(y) 31272 v.AddArg(v0) 31273 return true 31274 } 31275 } 31276 func rewriteValueAMD64_OpGeq64U(v *Value) bool { 31277 b := v.Block 31278 _ = b 31279 // match: (Geq64U x y) 31280 // cond: 31281 // result: (SETAE (CMPQ x y)) 31282 for { 31283 x := v.Args[0] 31284 y := v.Args[1] 31285 v.reset(OpAMD64SETAE) 31286 v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, TypeFlags) 31287 v0.AddArg(x) 31288 v0.AddArg(y) 31289 v.AddArg(v0) 31290 return true 31291 } 31292 } 31293 func rewriteValueAMD64_OpGeq8(v *Value) bool { 31294 b := v.Block 31295 _ = b 31296 // match: (Geq8 x y) 31297 // cond: 31298 // result: (SETGE (CMPB x y)) 31299 for { 31300 x := v.Args[0] 31301 y := v.Args[1] 31302 v.reset(OpAMD64SETGE) 31303 v0 := b.NewValue0(v.Pos, OpAMD64CMPB, TypeFlags) 31304 v0.AddArg(x) 31305 v0.AddArg(y) 31306 v.AddArg(v0) 31307 return true 31308 } 31309 } 31310 func rewriteValueAMD64_OpGeq8U(v *Value) bool { 31311 b := v.Block 31312 _ = b 31313 // match: (Geq8U x y) 31314 // cond: 31315 // result: (SETAE (CMPB x y)) 31316 for { 31317 x := v.Args[0] 31318 y := v.Args[1] 31319 v.reset(OpAMD64SETAE) 31320 v0 := b.NewValue0(v.Pos, OpAMD64CMPB, TypeFlags) 31321 v0.AddArg(x) 31322 v0.AddArg(y) 31323 v.AddArg(v0) 31324 return true 31325 } 31326 } 31327 func rewriteValueAMD64_OpGetClosurePtr(v *Value) bool { 31328 // match: (GetClosurePtr) 31329 // cond: 31330 // result: (LoweredGetClosurePtr) 31331 for { 31332 v.reset(OpAMD64LoweredGetClosurePtr) 31333 return true 31334 } 31335 } 31336 func rewriteValueAMD64_OpGetG(v *Value) bool { 31337 // match: (GetG mem) 31338 // cond: 31339 // result: (LoweredGetG mem) 31340 for { 31341 mem := v.Args[0] 31342 v.reset(OpAMD64LoweredGetG) 31343 v.AddArg(mem) 31344 return true 31345 } 31346 } 31347 func rewriteValueAMD64_OpGreater16(v *Value) bool { 31348 b := v.Block 31349 _ = b 31350 // match: (Greater16 x y) 31351 // cond: 31352 // result: (SETG (CMPW x y)) 31353 for { 31354 x := v.Args[0] 31355 y := v.Args[1] 31356 v.reset(OpAMD64SETG) 31357 v0 := b.NewValue0(v.Pos, OpAMD64CMPW, TypeFlags) 31358 v0.AddArg(x) 31359 v0.AddArg(y) 31360 v.AddArg(v0) 31361 return true 31362 } 31363 } 31364 func rewriteValueAMD64_OpGreater16U(v *Value) bool { 31365 b := v.Block 31366 _ = b 31367 // match: (Greater16U x y) 31368 // cond: 31369 // result: (SETA (CMPW x y)) 31370 for { 31371 x := v.Args[0] 31372 y := v.Args[1] 31373 v.reset(OpAMD64SETA) 31374 v0 := b.NewValue0(v.Pos, OpAMD64CMPW, TypeFlags) 31375 v0.AddArg(x) 31376 v0.AddArg(y) 31377 v.AddArg(v0) 31378 return true 31379 } 31380 } 31381 func rewriteValueAMD64_OpGreater32(v *Value) bool { 31382 b := v.Block 31383 _ = b 31384 // match: (Greater32 x y) 31385 // cond: 31386 // result: (SETG (CMPL x y)) 31387 for { 31388 x := v.Args[0] 31389 y := v.Args[1] 31390 v.reset(OpAMD64SETG) 31391 v0 := b.NewValue0(v.Pos, OpAMD64CMPL, TypeFlags) 31392 v0.AddArg(x) 31393 v0.AddArg(y) 31394 v.AddArg(v0) 31395 return true 31396 } 31397 } 31398 func rewriteValueAMD64_OpGreater32F(v *Value) bool { 31399 b := v.Block 31400 _ = b 31401 // match: (Greater32F x y) 31402 // cond: 31403 // result: (SETGF (UCOMISS x y)) 31404 for { 31405 x := v.Args[0] 31406 y := v.Args[1] 31407 v.reset(OpAMD64SETGF) 31408 v0 := b.NewValue0(v.Pos, OpAMD64UCOMISS, TypeFlags) 31409 v0.AddArg(x) 31410 v0.AddArg(y) 31411 v.AddArg(v0) 31412 return true 31413 } 31414 } 31415 func rewriteValueAMD64_OpGreater32U(v *Value) bool { 31416 b := v.Block 31417 _ = b 31418 // match: (Greater32U x y) 31419 // cond: 31420 // result: (SETA (CMPL x y)) 31421 for { 31422 x := v.Args[0] 31423 y := v.Args[1] 31424 v.reset(OpAMD64SETA) 31425 v0 := b.NewValue0(v.Pos, OpAMD64CMPL, TypeFlags) 31426 v0.AddArg(x) 31427 v0.AddArg(y) 31428 v.AddArg(v0) 31429 return true 31430 } 31431 } 31432 func rewriteValueAMD64_OpGreater64(v *Value) bool { 31433 b := v.Block 31434 _ = b 31435 // match: (Greater64 x y) 31436 // cond: 31437 // result: (SETG (CMPQ x y)) 31438 for { 31439 x := v.Args[0] 31440 y := v.Args[1] 31441 v.reset(OpAMD64SETG) 31442 v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, TypeFlags) 31443 v0.AddArg(x) 31444 v0.AddArg(y) 31445 v.AddArg(v0) 31446 return true 31447 } 31448 } 31449 func rewriteValueAMD64_OpGreater64F(v *Value) bool { 31450 b := v.Block 31451 _ = b 31452 // match: (Greater64F x y) 31453 // cond: 31454 // result: (SETGF (UCOMISD x y)) 31455 for { 31456 x := v.Args[0] 31457 y := v.Args[1] 31458 v.reset(OpAMD64SETGF) 31459 v0 := b.NewValue0(v.Pos, OpAMD64UCOMISD, TypeFlags) 31460 v0.AddArg(x) 31461 v0.AddArg(y) 31462 v.AddArg(v0) 31463 return true 31464 } 31465 } 31466 func rewriteValueAMD64_OpGreater64U(v *Value) bool { 31467 b := v.Block 31468 _ = b 31469 // match: (Greater64U x y) 31470 // cond: 31471 // result: (SETA (CMPQ x y)) 31472 for { 31473 x := v.Args[0] 31474 y := v.Args[1] 31475 v.reset(OpAMD64SETA) 31476 v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, TypeFlags) 31477 v0.AddArg(x) 31478 v0.AddArg(y) 31479 v.AddArg(v0) 31480 return true 31481 } 31482 } 31483 func rewriteValueAMD64_OpGreater8(v *Value) bool { 31484 b := v.Block 31485 _ = b 31486 // match: (Greater8 x y) 31487 // cond: 31488 // result: (SETG (CMPB x y)) 31489 for { 31490 x := v.Args[0] 31491 y := v.Args[1] 31492 v.reset(OpAMD64SETG) 31493 v0 := b.NewValue0(v.Pos, OpAMD64CMPB, TypeFlags) 31494 v0.AddArg(x) 31495 v0.AddArg(y) 31496 v.AddArg(v0) 31497 return true 31498 } 31499 } 31500 func rewriteValueAMD64_OpGreater8U(v *Value) bool { 31501 b := v.Block 31502 _ = b 31503 // match: (Greater8U x y) 31504 // cond: 31505 // result: (SETA (CMPB x y)) 31506 for { 31507 x := v.Args[0] 31508 y := v.Args[1] 31509 v.reset(OpAMD64SETA) 31510 v0 := b.NewValue0(v.Pos, OpAMD64CMPB, TypeFlags) 31511 v0.AddArg(x) 31512 v0.AddArg(y) 31513 v.AddArg(v0) 31514 return true 31515 } 31516 } 31517 func rewriteValueAMD64_OpHmul32(v *Value) bool { 31518 // match: (Hmul32 x y) 31519 // cond: 31520 // result: (HMULL x y) 31521 for { 31522 x := v.Args[0] 31523 y := v.Args[1] 31524 v.reset(OpAMD64HMULL) 31525 v.AddArg(x) 31526 v.AddArg(y) 31527 return true 31528 } 31529 } 31530 func rewriteValueAMD64_OpHmul32u(v *Value) bool { 31531 // match: (Hmul32u x y) 31532 // cond: 31533 // result: (HMULLU x y) 31534 for { 31535 x := v.Args[0] 31536 y := v.Args[1] 31537 v.reset(OpAMD64HMULLU) 31538 v.AddArg(x) 31539 v.AddArg(y) 31540 return true 31541 } 31542 } 31543 func rewriteValueAMD64_OpHmul64(v *Value) bool { 31544 // match: (Hmul64 x y) 31545 // cond: 31546 // result: (HMULQ x y) 31547 for { 31548 x := v.Args[0] 31549 y := v.Args[1] 31550 v.reset(OpAMD64HMULQ) 31551 v.AddArg(x) 31552 v.AddArg(y) 31553 return true 31554 } 31555 } 31556 func rewriteValueAMD64_OpHmul64u(v *Value) bool { 31557 // match: (Hmul64u x y) 31558 // cond: 31559 // result: (HMULQU x y) 31560 for { 31561 x := v.Args[0] 31562 y := v.Args[1] 31563 v.reset(OpAMD64HMULQU) 31564 v.AddArg(x) 31565 v.AddArg(y) 31566 return true 31567 } 31568 } 31569 func rewriteValueAMD64_OpInt64Hi(v *Value) bool { 31570 // match: (Int64Hi x) 31571 // cond: 31572 // result: (SHRQconst [32] x) 31573 for { 31574 x := v.Args[0] 31575 v.reset(OpAMD64SHRQconst) 31576 v.AuxInt = 32 31577 v.AddArg(x) 31578 return true 31579 } 31580 } 31581 func rewriteValueAMD64_OpInterCall(v *Value) bool { 31582 // match: (InterCall [argwid] entry mem) 31583 // cond: 31584 // result: (CALLinter [argwid] entry mem) 31585 for { 31586 argwid := v.AuxInt 31587 entry := v.Args[0] 31588 mem := v.Args[1] 31589 v.reset(OpAMD64CALLinter) 31590 v.AuxInt = argwid 31591 v.AddArg(entry) 31592 v.AddArg(mem) 31593 return true 31594 } 31595 } 31596 func rewriteValueAMD64_OpIsInBounds(v *Value) bool { 31597 b := v.Block 31598 _ = b 31599 // match: (IsInBounds idx len) 31600 // cond: 31601 // result: (SETB (CMPQ idx len)) 31602 for { 31603 idx := v.Args[0] 31604 len := v.Args[1] 31605 v.reset(OpAMD64SETB) 31606 v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, TypeFlags) 31607 v0.AddArg(idx) 31608 v0.AddArg(len) 31609 v.AddArg(v0) 31610 return true 31611 } 31612 } 31613 func rewriteValueAMD64_OpIsNonNil(v *Value) bool { 31614 b := v.Block 31615 _ = b 31616 config := b.Func.Config 31617 _ = config 31618 // match: (IsNonNil p) 31619 // cond: config.PtrSize == 8 31620 // result: (SETNE (TESTQ p p)) 31621 for { 31622 p := v.Args[0] 31623 if !(config.PtrSize == 8) { 31624 break 31625 } 31626 v.reset(OpAMD64SETNE) 31627 v0 := b.NewValue0(v.Pos, OpAMD64TESTQ, TypeFlags) 31628 v0.AddArg(p) 31629 v0.AddArg(p) 31630 v.AddArg(v0) 31631 return true 31632 } 31633 // match: (IsNonNil p) 31634 // cond: config.PtrSize == 4 31635 // result: (SETNE (TESTL p p)) 31636 for { 31637 p := v.Args[0] 31638 if !(config.PtrSize == 4) { 31639 break 31640 } 31641 v.reset(OpAMD64SETNE) 31642 v0 := b.NewValue0(v.Pos, OpAMD64TESTL, TypeFlags) 31643 v0.AddArg(p) 31644 v0.AddArg(p) 31645 v.AddArg(v0) 31646 return true 31647 } 31648 return false 31649 } 31650 func rewriteValueAMD64_OpIsSliceInBounds(v *Value) bool { 31651 b := v.Block 31652 _ = b 31653 // match: (IsSliceInBounds idx len) 31654 // cond: 31655 // result: (SETBE (CMPQ idx len)) 31656 for { 31657 idx := v.Args[0] 31658 len := v.Args[1] 31659 v.reset(OpAMD64SETBE) 31660 v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, TypeFlags) 31661 v0.AddArg(idx) 31662 v0.AddArg(len) 31663 v.AddArg(v0) 31664 return true 31665 } 31666 } 31667 func rewriteValueAMD64_OpLeq16(v *Value) bool { 31668 b := v.Block 31669 _ = b 31670 // match: (Leq16 x y) 31671 // cond: 31672 // result: (SETLE (CMPW x y)) 31673 for { 31674 x := v.Args[0] 31675 y := v.Args[1] 31676 v.reset(OpAMD64SETLE) 31677 v0 := b.NewValue0(v.Pos, OpAMD64CMPW, TypeFlags) 31678 v0.AddArg(x) 31679 v0.AddArg(y) 31680 v.AddArg(v0) 31681 return true 31682 } 31683 } 31684 func rewriteValueAMD64_OpLeq16U(v *Value) bool { 31685 b := v.Block 31686 _ = b 31687 // match: (Leq16U x y) 31688 // cond: 31689 // result: (SETBE (CMPW x y)) 31690 for { 31691 x := v.Args[0] 31692 y := v.Args[1] 31693 v.reset(OpAMD64SETBE) 31694 v0 := b.NewValue0(v.Pos, OpAMD64CMPW, TypeFlags) 31695 v0.AddArg(x) 31696 v0.AddArg(y) 31697 v.AddArg(v0) 31698 return true 31699 } 31700 } 31701 func rewriteValueAMD64_OpLeq32(v *Value) bool { 31702 b := v.Block 31703 _ = b 31704 // match: (Leq32 x y) 31705 // cond: 31706 // result: (SETLE (CMPL x y)) 31707 for { 31708 x := v.Args[0] 31709 y := v.Args[1] 31710 v.reset(OpAMD64SETLE) 31711 v0 := b.NewValue0(v.Pos, OpAMD64CMPL, TypeFlags) 31712 v0.AddArg(x) 31713 v0.AddArg(y) 31714 v.AddArg(v0) 31715 return true 31716 } 31717 } 31718 func rewriteValueAMD64_OpLeq32F(v *Value) bool { 31719 b := v.Block 31720 _ = b 31721 // match: (Leq32F x y) 31722 // cond: 31723 // result: (SETGEF (UCOMISS y x)) 31724 for { 31725 x := v.Args[0] 31726 y := v.Args[1] 31727 v.reset(OpAMD64SETGEF) 31728 v0 := b.NewValue0(v.Pos, OpAMD64UCOMISS, TypeFlags) 31729 v0.AddArg(y) 31730 v0.AddArg(x) 31731 v.AddArg(v0) 31732 return true 31733 } 31734 } 31735 func rewriteValueAMD64_OpLeq32U(v *Value) bool { 31736 b := v.Block 31737 _ = b 31738 // match: (Leq32U x y) 31739 // cond: 31740 // result: (SETBE (CMPL x y)) 31741 for { 31742 x := v.Args[0] 31743 y := v.Args[1] 31744 v.reset(OpAMD64SETBE) 31745 v0 := b.NewValue0(v.Pos, OpAMD64CMPL, TypeFlags) 31746 v0.AddArg(x) 31747 v0.AddArg(y) 31748 v.AddArg(v0) 31749 return true 31750 } 31751 } 31752 func rewriteValueAMD64_OpLeq64(v *Value) bool { 31753 b := v.Block 31754 _ = b 31755 // match: (Leq64 x y) 31756 // cond: 31757 // result: (SETLE (CMPQ x y)) 31758 for { 31759 x := v.Args[0] 31760 y := v.Args[1] 31761 v.reset(OpAMD64SETLE) 31762 v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, TypeFlags) 31763 v0.AddArg(x) 31764 v0.AddArg(y) 31765 v.AddArg(v0) 31766 return true 31767 } 31768 } 31769 func rewriteValueAMD64_OpLeq64F(v *Value) bool { 31770 b := v.Block 31771 _ = b 31772 // match: (Leq64F x y) 31773 // cond: 31774 // result: (SETGEF (UCOMISD y x)) 31775 for { 31776 x := v.Args[0] 31777 y := v.Args[1] 31778 v.reset(OpAMD64SETGEF) 31779 v0 := b.NewValue0(v.Pos, OpAMD64UCOMISD, TypeFlags) 31780 v0.AddArg(y) 31781 v0.AddArg(x) 31782 v.AddArg(v0) 31783 return true 31784 } 31785 } 31786 func rewriteValueAMD64_OpLeq64U(v *Value) bool { 31787 b := v.Block 31788 _ = b 31789 // match: (Leq64U x y) 31790 // cond: 31791 // result: (SETBE (CMPQ x y)) 31792 for { 31793 x := v.Args[0] 31794 y := v.Args[1] 31795 v.reset(OpAMD64SETBE) 31796 v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, TypeFlags) 31797 v0.AddArg(x) 31798 v0.AddArg(y) 31799 v.AddArg(v0) 31800 return true 31801 } 31802 } 31803 func rewriteValueAMD64_OpLeq8(v *Value) bool { 31804 b := v.Block 31805 _ = b 31806 // match: (Leq8 x y) 31807 // cond: 31808 // result: (SETLE (CMPB x y)) 31809 for { 31810 x := v.Args[0] 31811 y := v.Args[1] 31812 v.reset(OpAMD64SETLE) 31813 v0 := b.NewValue0(v.Pos, OpAMD64CMPB, TypeFlags) 31814 v0.AddArg(x) 31815 v0.AddArg(y) 31816 v.AddArg(v0) 31817 return true 31818 } 31819 } 31820 func rewriteValueAMD64_OpLeq8U(v *Value) bool { 31821 b := v.Block 31822 _ = b 31823 // match: (Leq8U x y) 31824 // cond: 31825 // result: (SETBE (CMPB x y)) 31826 for { 31827 x := v.Args[0] 31828 y := v.Args[1] 31829 v.reset(OpAMD64SETBE) 31830 v0 := b.NewValue0(v.Pos, OpAMD64CMPB, TypeFlags) 31831 v0.AddArg(x) 31832 v0.AddArg(y) 31833 v.AddArg(v0) 31834 return true 31835 } 31836 } 31837 func rewriteValueAMD64_OpLess16(v *Value) bool { 31838 b := v.Block 31839 _ = b 31840 // match: (Less16 x y) 31841 // cond: 31842 // result: (SETL (CMPW x y)) 31843 for { 31844 x := v.Args[0] 31845 y := v.Args[1] 31846 v.reset(OpAMD64SETL) 31847 v0 := b.NewValue0(v.Pos, OpAMD64CMPW, TypeFlags) 31848 v0.AddArg(x) 31849 v0.AddArg(y) 31850 v.AddArg(v0) 31851 return true 31852 } 31853 } 31854 func rewriteValueAMD64_OpLess16U(v *Value) bool { 31855 b := v.Block 31856 _ = b 31857 // match: (Less16U x y) 31858 // cond: 31859 // result: (SETB (CMPW x y)) 31860 for { 31861 x := v.Args[0] 31862 y := v.Args[1] 31863 v.reset(OpAMD64SETB) 31864 v0 := b.NewValue0(v.Pos, OpAMD64CMPW, TypeFlags) 31865 v0.AddArg(x) 31866 v0.AddArg(y) 31867 v.AddArg(v0) 31868 return true 31869 } 31870 } 31871 func rewriteValueAMD64_OpLess32(v *Value) bool { 31872 b := v.Block 31873 _ = b 31874 // match: (Less32 x y) 31875 // cond: 31876 // result: (SETL (CMPL x y)) 31877 for { 31878 x := v.Args[0] 31879 y := v.Args[1] 31880 v.reset(OpAMD64SETL) 31881 v0 := b.NewValue0(v.Pos, OpAMD64CMPL, TypeFlags) 31882 v0.AddArg(x) 31883 v0.AddArg(y) 31884 v.AddArg(v0) 31885 return true 31886 } 31887 } 31888 func rewriteValueAMD64_OpLess32F(v *Value) bool { 31889 b := v.Block 31890 _ = b 31891 // match: (Less32F x y) 31892 // cond: 31893 // result: (SETGF (UCOMISS y x)) 31894 for { 31895 x := v.Args[0] 31896 y := v.Args[1] 31897 v.reset(OpAMD64SETGF) 31898 v0 := b.NewValue0(v.Pos, OpAMD64UCOMISS, TypeFlags) 31899 v0.AddArg(y) 31900 v0.AddArg(x) 31901 v.AddArg(v0) 31902 return true 31903 } 31904 } 31905 func rewriteValueAMD64_OpLess32U(v *Value) bool { 31906 b := v.Block 31907 _ = b 31908 // match: (Less32U x y) 31909 // cond: 31910 // result: (SETB (CMPL x y)) 31911 for { 31912 x := v.Args[0] 31913 y := v.Args[1] 31914 v.reset(OpAMD64SETB) 31915 v0 := b.NewValue0(v.Pos, OpAMD64CMPL, TypeFlags) 31916 v0.AddArg(x) 31917 v0.AddArg(y) 31918 v.AddArg(v0) 31919 return true 31920 } 31921 } 31922 func rewriteValueAMD64_OpLess64(v *Value) bool { 31923 b := v.Block 31924 _ = b 31925 // match: (Less64 x y) 31926 // cond: 31927 // result: (SETL (CMPQ x y)) 31928 for { 31929 x := v.Args[0] 31930 y := v.Args[1] 31931 v.reset(OpAMD64SETL) 31932 v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, TypeFlags) 31933 v0.AddArg(x) 31934 v0.AddArg(y) 31935 v.AddArg(v0) 31936 return true 31937 } 31938 } 31939 func rewriteValueAMD64_OpLess64F(v *Value) bool { 31940 b := v.Block 31941 _ = b 31942 // match: (Less64F x y) 31943 // cond: 31944 // result: (SETGF (UCOMISD y x)) 31945 for { 31946 x := v.Args[0] 31947 y := v.Args[1] 31948 v.reset(OpAMD64SETGF) 31949 v0 := b.NewValue0(v.Pos, OpAMD64UCOMISD, TypeFlags) 31950 v0.AddArg(y) 31951 v0.AddArg(x) 31952 v.AddArg(v0) 31953 return true 31954 } 31955 } 31956 func rewriteValueAMD64_OpLess64U(v *Value) bool { 31957 b := v.Block 31958 _ = b 31959 // match: (Less64U x y) 31960 // cond: 31961 // result: (SETB (CMPQ x y)) 31962 for { 31963 x := v.Args[0] 31964 y := v.Args[1] 31965 v.reset(OpAMD64SETB) 31966 v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, TypeFlags) 31967 v0.AddArg(x) 31968 v0.AddArg(y) 31969 v.AddArg(v0) 31970 return true 31971 } 31972 } 31973 func rewriteValueAMD64_OpLess8(v *Value) bool { 31974 b := v.Block 31975 _ = b 31976 // match: (Less8 x y) 31977 // cond: 31978 // result: (SETL (CMPB x y)) 31979 for { 31980 x := v.Args[0] 31981 y := v.Args[1] 31982 v.reset(OpAMD64SETL) 31983 v0 := b.NewValue0(v.Pos, OpAMD64CMPB, TypeFlags) 31984 v0.AddArg(x) 31985 v0.AddArg(y) 31986 v.AddArg(v0) 31987 return true 31988 } 31989 } 31990 func rewriteValueAMD64_OpLess8U(v *Value) bool { 31991 b := v.Block 31992 _ = b 31993 // match: (Less8U x y) 31994 // cond: 31995 // result: (SETB (CMPB x y)) 31996 for { 31997 x := v.Args[0] 31998 y := v.Args[1] 31999 v.reset(OpAMD64SETB) 32000 v0 := b.NewValue0(v.Pos, OpAMD64CMPB, TypeFlags) 32001 v0.AddArg(x) 32002 v0.AddArg(y) 32003 v.AddArg(v0) 32004 return true 32005 } 32006 } 32007 func rewriteValueAMD64_OpLoad(v *Value) bool { 32008 b := v.Block 32009 _ = b 32010 config := b.Func.Config 32011 _ = config 32012 // match: (Load <t> ptr mem) 32013 // cond: (is64BitInt(t) || isPtr(t) && config.PtrSize == 8) 32014 // result: (MOVQload ptr mem) 32015 for { 32016 t := v.Type 32017 ptr := v.Args[0] 32018 mem := v.Args[1] 32019 if !(is64BitInt(t) || isPtr(t) && config.PtrSize == 8) { 32020 break 32021 } 32022 v.reset(OpAMD64MOVQload) 32023 v.AddArg(ptr) 32024 v.AddArg(mem) 32025 return true 32026 } 32027 // match: (Load <t> ptr mem) 32028 // cond: (is32BitInt(t) || isPtr(t) && config.PtrSize == 4) 32029 // result: (MOVLload ptr mem) 32030 for { 32031 t := v.Type 32032 ptr := v.Args[0] 32033 mem := v.Args[1] 32034 if !(is32BitInt(t) || isPtr(t) && config.PtrSize == 4) { 32035 break 32036 } 32037 v.reset(OpAMD64MOVLload) 32038 v.AddArg(ptr) 32039 v.AddArg(mem) 32040 return true 32041 } 32042 // match: (Load <t> ptr mem) 32043 // cond: is16BitInt(t) 32044 // result: (MOVWload ptr mem) 32045 for { 32046 t := v.Type 32047 ptr := v.Args[0] 32048 mem := v.Args[1] 32049 if !(is16BitInt(t)) { 32050 break 32051 } 32052 v.reset(OpAMD64MOVWload) 32053 v.AddArg(ptr) 32054 v.AddArg(mem) 32055 return true 32056 } 32057 // match: (Load <t> ptr mem) 32058 // cond: (t.IsBoolean() || is8BitInt(t)) 32059 // result: (MOVBload ptr mem) 32060 for { 32061 t := v.Type 32062 ptr := v.Args[0] 32063 mem := v.Args[1] 32064 if !(t.IsBoolean() || is8BitInt(t)) { 32065 break 32066 } 32067 v.reset(OpAMD64MOVBload) 32068 v.AddArg(ptr) 32069 v.AddArg(mem) 32070 return true 32071 } 32072 // match: (Load <t> ptr mem) 32073 // cond: is32BitFloat(t) 32074 // result: (MOVSSload ptr mem) 32075 for { 32076 t := v.Type 32077 ptr := v.Args[0] 32078 mem := v.Args[1] 32079 if !(is32BitFloat(t)) { 32080 break 32081 } 32082 v.reset(OpAMD64MOVSSload) 32083 v.AddArg(ptr) 32084 v.AddArg(mem) 32085 return true 32086 } 32087 // match: (Load <t> ptr mem) 32088 // cond: is64BitFloat(t) 32089 // result: (MOVSDload ptr mem) 32090 for { 32091 t := v.Type 32092 ptr := v.Args[0] 32093 mem := v.Args[1] 32094 if !(is64BitFloat(t)) { 32095 break 32096 } 32097 v.reset(OpAMD64MOVSDload) 32098 v.AddArg(ptr) 32099 v.AddArg(mem) 32100 return true 32101 } 32102 return false 32103 } 32104 func rewriteValueAMD64_OpLsh16x16(v *Value) bool { 32105 b := v.Block 32106 _ = b 32107 // match: (Lsh16x16 <t> x y) 32108 // cond: 32109 // result: (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPWconst y [32]))) 32110 for { 32111 t := v.Type 32112 x := v.Args[0] 32113 y := v.Args[1] 32114 v.reset(OpAMD64ANDL) 32115 v0 := b.NewValue0(v.Pos, OpAMD64SHLL, t) 32116 v0.AddArg(x) 32117 v0.AddArg(y) 32118 v.AddArg(v0) 32119 v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) 32120 v2 := b.NewValue0(v.Pos, OpAMD64CMPWconst, TypeFlags) 32121 v2.AuxInt = 32 32122 v2.AddArg(y) 32123 v1.AddArg(v2) 32124 v.AddArg(v1) 32125 return true 32126 } 32127 } 32128 func rewriteValueAMD64_OpLsh16x32(v *Value) bool { 32129 b := v.Block 32130 _ = b 32131 // match: (Lsh16x32 <t> x y) 32132 // cond: 32133 // result: (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPLconst y [32]))) 32134 for { 32135 t := v.Type 32136 x := v.Args[0] 32137 y := v.Args[1] 32138 v.reset(OpAMD64ANDL) 32139 v0 := b.NewValue0(v.Pos, OpAMD64SHLL, t) 32140 v0.AddArg(x) 32141 v0.AddArg(y) 32142 v.AddArg(v0) 32143 v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) 32144 v2 := b.NewValue0(v.Pos, OpAMD64CMPLconst, TypeFlags) 32145 v2.AuxInt = 32 32146 v2.AddArg(y) 32147 v1.AddArg(v2) 32148 v.AddArg(v1) 32149 return true 32150 } 32151 } 32152 func rewriteValueAMD64_OpLsh16x64(v *Value) bool { 32153 b := v.Block 32154 _ = b 32155 // match: (Lsh16x64 <t> x y) 32156 // cond: 32157 // result: (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPQconst y [32]))) 32158 for { 32159 t := v.Type 32160 x := v.Args[0] 32161 y := v.Args[1] 32162 v.reset(OpAMD64ANDL) 32163 v0 := b.NewValue0(v.Pos, OpAMD64SHLL, t) 32164 v0.AddArg(x) 32165 v0.AddArg(y) 32166 v.AddArg(v0) 32167 v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) 32168 v2 := b.NewValue0(v.Pos, OpAMD64CMPQconst, TypeFlags) 32169 v2.AuxInt = 32 32170 v2.AddArg(y) 32171 v1.AddArg(v2) 32172 v.AddArg(v1) 32173 return true 32174 } 32175 } 32176 func rewriteValueAMD64_OpLsh16x8(v *Value) bool { 32177 b := v.Block 32178 _ = b 32179 // match: (Lsh16x8 <t> x y) 32180 // cond: 32181 // result: (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPBconst y [32]))) 32182 for { 32183 t := v.Type 32184 x := v.Args[0] 32185 y := v.Args[1] 32186 v.reset(OpAMD64ANDL) 32187 v0 := b.NewValue0(v.Pos, OpAMD64SHLL, t) 32188 v0.AddArg(x) 32189 v0.AddArg(y) 32190 v.AddArg(v0) 32191 v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) 32192 v2 := b.NewValue0(v.Pos, OpAMD64CMPBconst, TypeFlags) 32193 v2.AuxInt = 32 32194 v2.AddArg(y) 32195 v1.AddArg(v2) 32196 v.AddArg(v1) 32197 return true 32198 } 32199 } 32200 func rewriteValueAMD64_OpLsh32x16(v *Value) bool { 32201 b := v.Block 32202 _ = b 32203 // match: (Lsh32x16 <t> x y) 32204 // cond: 32205 // result: (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPWconst y [32]))) 32206 for { 32207 t := v.Type 32208 x := v.Args[0] 32209 y := v.Args[1] 32210 v.reset(OpAMD64ANDL) 32211 v0 := b.NewValue0(v.Pos, OpAMD64SHLL, t) 32212 v0.AddArg(x) 32213 v0.AddArg(y) 32214 v.AddArg(v0) 32215 v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) 32216 v2 := b.NewValue0(v.Pos, OpAMD64CMPWconst, TypeFlags) 32217 v2.AuxInt = 32 32218 v2.AddArg(y) 32219 v1.AddArg(v2) 32220 v.AddArg(v1) 32221 return true 32222 } 32223 } 32224 func rewriteValueAMD64_OpLsh32x32(v *Value) bool { 32225 b := v.Block 32226 _ = b 32227 // match: (Lsh32x32 <t> x y) 32228 // cond: 32229 // result: (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPLconst y [32]))) 32230 for { 32231 t := v.Type 32232 x := v.Args[0] 32233 y := v.Args[1] 32234 v.reset(OpAMD64ANDL) 32235 v0 := b.NewValue0(v.Pos, OpAMD64SHLL, t) 32236 v0.AddArg(x) 32237 v0.AddArg(y) 32238 v.AddArg(v0) 32239 v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) 32240 v2 := b.NewValue0(v.Pos, OpAMD64CMPLconst, TypeFlags) 32241 v2.AuxInt = 32 32242 v2.AddArg(y) 32243 v1.AddArg(v2) 32244 v.AddArg(v1) 32245 return true 32246 } 32247 } 32248 func rewriteValueAMD64_OpLsh32x64(v *Value) bool { 32249 b := v.Block 32250 _ = b 32251 // match: (Lsh32x64 <t> x y) 32252 // cond: 32253 // result: (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPQconst y [32]))) 32254 for { 32255 t := v.Type 32256 x := v.Args[0] 32257 y := v.Args[1] 32258 v.reset(OpAMD64ANDL) 32259 v0 := b.NewValue0(v.Pos, OpAMD64SHLL, t) 32260 v0.AddArg(x) 32261 v0.AddArg(y) 32262 v.AddArg(v0) 32263 v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) 32264 v2 := b.NewValue0(v.Pos, OpAMD64CMPQconst, TypeFlags) 32265 v2.AuxInt = 32 32266 v2.AddArg(y) 32267 v1.AddArg(v2) 32268 v.AddArg(v1) 32269 return true 32270 } 32271 } 32272 func rewriteValueAMD64_OpLsh32x8(v *Value) bool { 32273 b := v.Block 32274 _ = b 32275 // match: (Lsh32x8 <t> x y) 32276 // cond: 32277 // result: (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPBconst y [32]))) 32278 for { 32279 t := v.Type 32280 x := v.Args[0] 32281 y := v.Args[1] 32282 v.reset(OpAMD64ANDL) 32283 v0 := b.NewValue0(v.Pos, OpAMD64SHLL, t) 32284 v0.AddArg(x) 32285 v0.AddArg(y) 32286 v.AddArg(v0) 32287 v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) 32288 v2 := b.NewValue0(v.Pos, OpAMD64CMPBconst, TypeFlags) 32289 v2.AuxInt = 32 32290 v2.AddArg(y) 32291 v1.AddArg(v2) 32292 v.AddArg(v1) 32293 return true 32294 } 32295 } 32296 func rewriteValueAMD64_OpLsh64x16(v *Value) bool { 32297 b := v.Block 32298 _ = b 32299 // match: (Lsh64x16 <t> x y) 32300 // cond: 32301 // result: (ANDQ (SHLQ <t> x y) (SBBQcarrymask <t> (CMPWconst y [64]))) 32302 for { 32303 t := v.Type 32304 x := v.Args[0] 32305 y := v.Args[1] 32306 v.reset(OpAMD64ANDQ) 32307 v0 := b.NewValue0(v.Pos, OpAMD64SHLQ, t) 32308 v0.AddArg(x) 32309 v0.AddArg(y) 32310 v.AddArg(v0) 32311 v1 := b.NewValue0(v.Pos, OpAMD64SBBQcarrymask, t) 32312 v2 := b.NewValue0(v.Pos, OpAMD64CMPWconst, TypeFlags) 32313 v2.AuxInt = 64 32314 v2.AddArg(y) 32315 v1.AddArg(v2) 32316 v.AddArg(v1) 32317 return true 32318 } 32319 } 32320 func rewriteValueAMD64_OpLsh64x32(v *Value) bool { 32321 b := v.Block 32322 _ = b 32323 // match: (Lsh64x32 <t> x y) 32324 // cond: 32325 // result: (ANDQ (SHLQ <t> x y) (SBBQcarrymask <t> (CMPLconst y [64]))) 32326 for { 32327 t := v.Type 32328 x := v.Args[0] 32329 y := v.Args[1] 32330 v.reset(OpAMD64ANDQ) 32331 v0 := b.NewValue0(v.Pos, OpAMD64SHLQ, t) 32332 v0.AddArg(x) 32333 v0.AddArg(y) 32334 v.AddArg(v0) 32335 v1 := b.NewValue0(v.Pos, OpAMD64SBBQcarrymask, t) 32336 v2 := b.NewValue0(v.Pos, OpAMD64CMPLconst, TypeFlags) 32337 v2.AuxInt = 64 32338 v2.AddArg(y) 32339 v1.AddArg(v2) 32340 v.AddArg(v1) 32341 return true 32342 } 32343 } 32344 func rewriteValueAMD64_OpLsh64x64(v *Value) bool { 32345 b := v.Block 32346 _ = b 32347 // match: (Lsh64x64 <t> x y) 32348 // cond: 32349 // result: (ANDQ (SHLQ <t> x y) (SBBQcarrymask <t> (CMPQconst y [64]))) 32350 for { 32351 t := v.Type 32352 x := v.Args[0] 32353 y := v.Args[1] 32354 v.reset(OpAMD64ANDQ) 32355 v0 := b.NewValue0(v.Pos, OpAMD64SHLQ, t) 32356 v0.AddArg(x) 32357 v0.AddArg(y) 32358 v.AddArg(v0) 32359 v1 := b.NewValue0(v.Pos, OpAMD64SBBQcarrymask, t) 32360 v2 := b.NewValue0(v.Pos, OpAMD64CMPQconst, TypeFlags) 32361 v2.AuxInt = 64 32362 v2.AddArg(y) 32363 v1.AddArg(v2) 32364 v.AddArg(v1) 32365 return true 32366 } 32367 } 32368 func rewriteValueAMD64_OpLsh64x8(v *Value) bool { 32369 b := v.Block 32370 _ = b 32371 // match: (Lsh64x8 <t> x y) 32372 // cond: 32373 // result: (ANDQ (SHLQ <t> x y) (SBBQcarrymask <t> (CMPBconst y [64]))) 32374 for { 32375 t := v.Type 32376 x := v.Args[0] 32377 y := v.Args[1] 32378 v.reset(OpAMD64ANDQ) 32379 v0 := b.NewValue0(v.Pos, OpAMD64SHLQ, t) 32380 v0.AddArg(x) 32381 v0.AddArg(y) 32382 v.AddArg(v0) 32383 v1 := b.NewValue0(v.Pos, OpAMD64SBBQcarrymask, t) 32384 v2 := b.NewValue0(v.Pos, OpAMD64CMPBconst, TypeFlags) 32385 v2.AuxInt = 64 32386 v2.AddArg(y) 32387 v1.AddArg(v2) 32388 v.AddArg(v1) 32389 return true 32390 } 32391 } 32392 func rewriteValueAMD64_OpLsh8x16(v *Value) bool { 32393 b := v.Block 32394 _ = b 32395 // match: (Lsh8x16 <t> x y) 32396 // cond: 32397 // result: (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPWconst y [32]))) 32398 for { 32399 t := v.Type 32400 x := v.Args[0] 32401 y := v.Args[1] 32402 v.reset(OpAMD64ANDL) 32403 v0 := b.NewValue0(v.Pos, OpAMD64SHLL, t) 32404 v0.AddArg(x) 32405 v0.AddArg(y) 32406 v.AddArg(v0) 32407 v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) 32408 v2 := b.NewValue0(v.Pos, OpAMD64CMPWconst, TypeFlags) 32409 v2.AuxInt = 32 32410 v2.AddArg(y) 32411 v1.AddArg(v2) 32412 v.AddArg(v1) 32413 return true 32414 } 32415 } 32416 func rewriteValueAMD64_OpLsh8x32(v *Value) bool { 32417 b := v.Block 32418 _ = b 32419 // match: (Lsh8x32 <t> x y) 32420 // cond: 32421 // result: (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPLconst y [32]))) 32422 for { 32423 t := v.Type 32424 x := v.Args[0] 32425 y := v.Args[1] 32426 v.reset(OpAMD64ANDL) 32427 v0 := b.NewValue0(v.Pos, OpAMD64SHLL, t) 32428 v0.AddArg(x) 32429 v0.AddArg(y) 32430 v.AddArg(v0) 32431 v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) 32432 v2 := b.NewValue0(v.Pos, OpAMD64CMPLconst, TypeFlags) 32433 v2.AuxInt = 32 32434 v2.AddArg(y) 32435 v1.AddArg(v2) 32436 v.AddArg(v1) 32437 return true 32438 } 32439 } 32440 func rewriteValueAMD64_OpLsh8x64(v *Value) bool { 32441 b := v.Block 32442 _ = b 32443 // match: (Lsh8x64 <t> x y) 32444 // cond: 32445 // result: (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPQconst y [32]))) 32446 for { 32447 t := v.Type 32448 x := v.Args[0] 32449 y := v.Args[1] 32450 v.reset(OpAMD64ANDL) 32451 v0 := b.NewValue0(v.Pos, OpAMD64SHLL, t) 32452 v0.AddArg(x) 32453 v0.AddArg(y) 32454 v.AddArg(v0) 32455 v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) 32456 v2 := b.NewValue0(v.Pos, OpAMD64CMPQconst, TypeFlags) 32457 v2.AuxInt = 32 32458 v2.AddArg(y) 32459 v1.AddArg(v2) 32460 v.AddArg(v1) 32461 return true 32462 } 32463 } 32464 func rewriteValueAMD64_OpLsh8x8(v *Value) bool { 32465 b := v.Block 32466 _ = b 32467 // match: (Lsh8x8 <t> x y) 32468 // cond: 32469 // result: (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPBconst y [32]))) 32470 for { 32471 t := v.Type 32472 x := v.Args[0] 32473 y := v.Args[1] 32474 v.reset(OpAMD64ANDL) 32475 v0 := b.NewValue0(v.Pos, OpAMD64SHLL, t) 32476 v0.AddArg(x) 32477 v0.AddArg(y) 32478 v.AddArg(v0) 32479 v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) 32480 v2 := b.NewValue0(v.Pos, OpAMD64CMPBconst, TypeFlags) 32481 v2.AuxInt = 32 32482 v2.AddArg(y) 32483 v1.AddArg(v2) 32484 v.AddArg(v1) 32485 return true 32486 } 32487 } 32488 func rewriteValueAMD64_OpMod16(v *Value) bool { 32489 b := v.Block 32490 _ = b 32491 types := &b.Func.Config.Types 32492 _ = types 32493 // match: (Mod16 x y) 32494 // cond: 32495 // result: (Select1 (DIVW x y)) 32496 for { 32497 x := v.Args[0] 32498 y := v.Args[1] 32499 v.reset(OpSelect1) 32500 v0 := b.NewValue0(v.Pos, OpAMD64DIVW, MakeTuple(types.Int16, types.Int16)) 32501 v0.AddArg(x) 32502 v0.AddArg(y) 32503 v.AddArg(v0) 32504 return true 32505 } 32506 } 32507 func rewriteValueAMD64_OpMod16u(v *Value) bool { 32508 b := v.Block 32509 _ = b 32510 types := &b.Func.Config.Types 32511 _ = types 32512 // match: (Mod16u x y) 32513 // cond: 32514 // result: (Select1 (DIVWU x y)) 32515 for { 32516 x := v.Args[0] 32517 y := v.Args[1] 32518 v.reset(OpSelect1) 32519 v0 := b.NewValue0(v.Pos, OpAMD64DIVWU, MakeTuple(types.UInt16, types.UInt16)) 32520 v0.AddArg(x) 32521 v0.AddArg(y) 32522 v.AddArg(v0) 32523 return true 32524 } 32525 } 32526 func rewriteValueAMD64_OpMod32(v *Value) bool { 32527 b := v.Block 32528 _ = b 32529 types := &b.Func.Config.Types 32530 _ = types 32531 // match: (Mod32 x y) 32532 // cond: 32533 // result: (Select1 (DIVL x y)) 32534 for { 32535 x := v.Args[0] 32536 y := v.Args[1] 32537 v.reset(OpSelect1) 32538 v0 := b.NewValue0(v.Pos, OpAMD64DIVL, MakeTuple(types.Int32, types.Int32)) 32539 v0.AddArg(x) 32540 v0.AddArg(y) 32541 v.AddArg(v0) 32542 return true 32543 } 32544 } 32545 func rewriteValueAMD64_OpMod32u(v *Value) bool { 32546 b := v.Block 32547 _ = b 32548 types := &b.Func.Config.Types 32549 _ = types 32550 // match: (Mod32u x y) 32551 // cond: 32552 // result: (Select1 (DIVLU x y)) 32553 for { 32554 x := v.Args[0] 32555 y := v.Args[1] 32556 v.reset(OpSelect1) 32557 v0 := b.NewValue0(v.Pos, OpAMD64DIVLU, MakeTuple(types.UInt32, types.UInt32)) 32558 v0.AddArg(x) 32559 v0.AddArg(y) 32560 v.AddArg(v0) 32561 return true 32562 } 32563 } 32564 func rewriteValueAMD64_OpMod64(v *Value) bool { 32565 b := v.Block 32566 _ = b 32567 types := &b.Func.Config.Types 32568 _ = types 32569 // match: (Mod64 x y) 32570 // cond: 32571 // result: (Select1 (DIVQ x y)) 32572 for { 32573 x := v.Args[0] 32574 y := v.Args[1] 32575 v.reset(OpSelect1) 32576 v0 := b.NewValue0(v.Pos, OpAMD64DIVQ, MakeTuple(types.Int64, types.Int64)) 32577 v0.AddArg(x) 32578 v0.AddArg(y) 32579 v.AddArg(v0) 32580 return true 32581 } 32582 } 32583 func rewriteValueAMD64_OpMod64u(v *Value) bool { 32584 b := v.Block 32585 _ = b 32586 types := &b.Func.Config.Types 32587 _ = types 32588 // match: (Mod64u x y) 32589 // cond: 32590 // result: (Select1 (DIVQU x y)) 32591 for { 32592 x := v.Args[0] 32593 y := v.Args[1] 32594 v.reset(OpSelect1) 32595 v0 := b.NewValue0(v.Pos, OpAMD64DIVQU, MakeTuple(types.UInt64, types.UInt64)) 32596 v0.AddArg(x) 32597 v0.AddArg(y) 32598 v.AddArg(v0) 32599 return true 32600 } 32601 } 32602 func rewriteValueAMD64_OpMod8(v *Value) bool { 32603 b := v.Block 32604 _ = b 32605 types := &b.Func.Config.Types 32606 _ = types 32607 // match: (Mod8 x y) 32608 // cond: 32609 // result: (Select1 (DIVW (SignExt8to16 x) (SignExt8to16 y))) 32610 for { 32611 x := v.Args[0] 32612 y := v.Args[1] 32613 v.reset(OpSelect1) 32614 v0 := b.NewValue0(v.Pos, OpAMD64DIVW, MakeTuple(types.Int16, types.Int16)) 32615 v1 := b.NewValue0(v.Pos, OpSignExt8to16, types.Int16) 32616 v1.AddArg(x) 32617 v0.AddArg(v1) 32618 v2 := b.NewValue0(v.Pos, OpSignExt8to16, types.Int16) 32619 v2.AddArg(y) 32620 v0.AddArg(v2) 32621 v.AddArg(v0) 32622 return true 32623 } 32624 } 32625 func rewriteValueAMD64_OpMod8u(v *Value) bool { 32626 b := v.Block 32627 _ = b 32628 types := &b.Func.Config.Types 32629 _ = types 32630 // match: (Mod8u x y) 32631 // cond: 32632 // result: (Select1 (DIVWU (ZeroExt8to16 x) (ZeroExt8to16 y))) 32633 for { 32634 x := v.Args[0] 32635 y := v.Args[1] 32636 v.reset(OpSelect1) 32637 v0 := b.NewValue0(v.Pos, OpAMD64DIVWU, MakeTuple(types.UInt16, types.UInt16)) 32638 v1 := b.NewValue0(v.Pos, OpZeroExt8to16, types.UInt16) 32639 v1.AddArg(x) 32640 v0.AddArg(v1) 32641 v2 := b.NewValue0(v.Pos, OpZeroExt8to16, types.UInt16) 32642 v2.AddArg(y) 32643 v0.AddArg(v2) 32644 v.AddArg(v0) 32645 return true 32646 } 32647 } 32648 func rewriteValueAMD64_OpMove(v *Value) bool { 32649 b := v.Block 32650 _ = b 32651 config := b.Func.Config 32652 _ = config 32653 types := &b.Func.Config.Types 32654 _ = types 32655 // match: (Move [0] _ _ mem) 32656 // cond: 32657 // result: mem 32658 for { 32659 if v.AuxInt != 0 { 32660 break 32661 } 32662 mem := v.Args[2] 32663 v.reset(OpCopy) 32664 v.Type = mem.Type 32665 v.AddArg(mem) 32666 return true 32667 } 32668 // match: (Move [1] dst src mem) 32669 // cond: 32670 // result: (MOVBstore dst (MOVBload src mem) mem) 32671 for { 32672 if v.AuxInt != 1 { 32673 break 32674 } 32675 dst := v.Args[0] 32676 src := v.Args[1] 32677 mem := v.Args[2] 32678 v.reset(OpAMD64MOVBstore) 32679 v.AddArg(dst) 32680 v0 := b.NewValue0(v.Pos, OpAMD64MOVBload, types.UInt8) 32681 v0.AddArg(src) 32682 v0.AddArg(mem) 32683 v.AddArg(v0) 32684 v.AddArg(mem) 32685 return true 32686 } 32687 // match: (Move [2] dst src mem) 32688 // cond: 32689 // result: (MOVWstore dst (MOVWload src mem) mem) 32690 for { 32691 if v.AuxInt != 2 { 32692 break 32693 } 32694 dst := v.Args[0] 32695 src := v.Args[1] 32696 mem := v.Args[2] 32697 v.reset(OpAMD64MOVWstore) 32698 v.AddArg(dst) 32699 v0 := b.NewValue0(v.Pos, OpAMD64MOVWload, types.UInt16) 32700 v0.AddArg(src) 32701 v0.AddArg(mem) 32702 v.AddArg(v0) 32703 v.AddArg(mem) 32704 return true 32705 } 32706 // match: (Move [4] dst src mem) 32707 // cond: 32708 // result: (MOVLstore dst (MOVLload src mem) mem) 32709 for { 32710 if v.AuxInt != 4 { 32711 break 32712 } 32713 dst := v.Args[0] 32714 src := v.Args[1] 32715 mem := v.Args[2] 32716 v.reset(OpAMD64MOVLstore) 32717 v.AddArg(dst) 32718 v0 := b.NewValue0(v.Pos, OpAMD64MOVLload, types.UInt32) 32719 v0.AddArg(src) 32720 v0.AddArg(mem) 32721 v.AddArg(v0) 32722 v.AddArg(mem) 32723 return true 32724 } 32725 // match: (Move [8] dst src mem) 32726 // cond: 32727 // result: (MOVQstore dst (MOVQload src mem) mem) 32728 for { 32729 if v.AuxInt != 8 { 32730 break 32731 } 32732 dst := v.Args[0] 32733 src := v.Args[1] 32734 mem := v.Args[2] 32735 v.reset(OpAMD64MOVQstore) 32736 v.AddArg(dst) 32737 v0 := b.NewValue0(v.Pos, OpAMD64MOVQload, types.UInt64) 32738 v0.AddArg(src) 32739 v0.AddArg(mem) 32740 v.AddArg(v0) 32741 v.AddArg(mem) 32742 return true 32743 } 32744 // match: (Move [16] dst src mem) 32745 // cond: 32746 // result: (MOVOstore dst (MOVOload src mem) mem) 32747 for { 32748 if v.AuxInt != 16 { 32749 break 32750 } 32751 dst := v.Args[0] 32752 src := v.Args[1] 32753 mem := v.Args[2] 32754 v.reset(OpAMD64MOVOstore) 32755 v.AddArg(dst) 32756 v0 := b.NewValue0(v.Pos, OpAMD64MOVOload, TypeInt128) 32757 v0.AddArg(src) 32758 v0.AddArg(mem) 32759 v.AddArg(v0) 32760 v.AddArg(mem) 32761 return true 32762 } 32763 // match: (Move [3] dst src mem) 32764 // cond: 32765 // result: (MOVBstore [2] dst (MOVBload [2] src mem) (MOVWstore dst (MOVWload src mem) mem)) 32766 for { 32767 if v.AuxInt != 3 { 32768 break 32769 } 32770 dst := v.Args[0] 32771 src := v.Args[1] 32772 mem := v.Args[2] 32773 v.reset(OpAMD64MOVBstore) 32774 v.AuxInt = 2 32775 v.AddArg(dst) 32776 v0 := b.NewValue0(v.Pos, OpAMD64MOVBload, types.UInt8) 32777 v0.AuxInt = 2 32778 v0.AddArg(src) 32779 v0.AddArg(mem) 32780 v.AddArg(v0) 32781 v1 := b.NewValue0(v.Pos, OpAMD64MOVWstore, TypeMem) 32782 v1.AddArg(dst) 32783 v2 := b.NewValue0(v.Pos, OpAMD64MOVWload, types.UInt16) 32784 v2.AddArg(src) 32785 v2.AddArg(mem) 32786 v1.AddArg(v2) 32787 v1.AddArg(mem) 32788 v.AddArg(v1) 32789 return true 32790 } 32791 // match: (Move [5] dst src mem) 32792 // cond: 32793 // result: (MOVBstore [4] dst (MOVBload [4] src mem) (MOVLstore dst (MOVLload src mem) mem)) 32794 for { 32795 if v.AuxInt != 5 { 32796 break 32797 } 32798 dst := v.Args[0] 32799 src := v.Args[1] 32800 mem := v.Args[2] 32801 v.reset(OpAMD64MOVBstore) 32802 v.AuxInt = 4 32803 v.AddArg(dst) 32804 v0 := b.NewValue0(v.Pos, OpAMD64MOVBload, types.UInt8) 32805 v0.AuxInt = 4 32806 v0.AddArg(src) 32807 v0.AddArg(mem) 32808 v.AddArg(v0) 32809 v1 := b.NewValue0(v.Pos, OpAMD64MOVLstore, TypeMem) 32810 v1.AddArg(dst) 32811 v2 := b.NewValue0(v.Pos, OpAMD64MOVLload, types.UInt32) 32812 v2.AddArg(src) 32813 v2.AddArg(mem) 32814 v1.AddArg(v2) 32815 v1.AddArg(mem) 32816 v.AddArg(v1) 32817 return true 32818 } 32819 // match: (Move [6] dst src mem) 32820 // cond: 32821 // result: (MOVWstore [4] dst (MOVWload [4] src mem) (MOVLstore dst (MOVLload src mem) mem)) 32822 for { 32823 if v.AuxInt != 6 { 32824 break 32825 } 32826 dst := v.Args[0] 32827 src := v.Args[1] 32828 mem := v.Args[2] 32829 v.reset(OpAMD64MOVWstore) 32830 v.AuxInt = 4 32831 v.AddArg(dst) 32832 v0 := b.NewValue0(v.Pos, OpAMD64MOVWload, types.UInt16) 32833 v0.AuxInt = 4 32834 v0.AddArg(src) 32835 v0.AddArg(mem) 32836 v.AddArg(v0) 32837 v1 := b.NewValue0(v.Pos, OpAMD64MOVLstore, TypeMem) 32838 v1.AddArg(dst) 32839 v2 := b.NewValue0(v.Pos, OpAMD64MOVLload, types.UInt32) 32840 v2.AddArg(src) 32841 v2.AddArg(mem) 32842 v1.AddArg(v2) 32843 v1.AddArg(mem) 32844 v.AddArg(v1) 32845 return true 32846 } 32847 // match: (Move [7] dst src mem) 32848 // cond: 32849 // result: (MOVLstore [3] dst (MOVLload [3] src mem) (MOVLstore dst (MOVLload src mem) mem)) 32850 for { 32851 if v.AuxInt != 7 { 32852 break 32853 } 32854 dst := v.Args[0] 32855 src := v.Args[1] 32856 mem := v.Args[2] 32857 v.reset(OpAMD64MOVLstore) 32858 v.AuxInt = 3 32859 v.AddArg(dst) 32860 v0 := b.NewValue0(v.Pos, OpAMD64MOVLload, types.UInt32) 32861 v0.AuxInt = 3 32862 v0.AddArg(src) 32863 v0.AddArg(mem) 32864 v.AddArg(v0) 32865 v1 := b.NewValue0(v.Pos, OpAMD64MOVLstore, TypeMem) 32866 v1.AddArg(dst) 32867 v2 := b.NewValue0(v.Pos, OpAMD64MOVLload, types.UInt32) 32868 v2.AddArg(src) 32869 v2.AddArg(mem) 32870 v1.AddArg(v2) 32871 v1.AddArg(mem) 32872 v.AddArg(v1) 32873 return true 32874 } 32875 // match: (Move [s] dst src mem) 32876 // cond: s > 8 && s < 16 32877 // result: (MOVQstore [s-8] dst (MOVQload [s-8] src mem) (MOVQstore dst (MOVQload src mem) mem)) 32878 for { 32879 s := v.AuxInt 32880 dst := v.Args[0] 32881 src := v.Args[1] 32882 mem := v.Args[2] 32883 if !(s > 8 && s < 16) { 32884 break 32885 } 32886 v.reset(OpAMD64MOVQstore) 32887 v.AuxInt = s - 8 32888 v.AddArg(dst) 32889 v0 := b.NewValue0(v.Pos, OpAMD64MOVQload, types.UInt64) 32890 v0.AuxInt = s - 8 32891 v0.AddArg(src) 32892 v0.AddArg(mem) 32893 v.AddArg(v0) 32894 v1 := b.NewValue0(v.Pos, OpAMD64MOVQstore, TypeMem) 32895 v1.AddArg(dst) 32896 v2 := b.NewValue0(v.Pos, OpAMD64MOVQload, types.UInt64) 32897 v2.AddArg(src) 32898 v2.AddArg(mem) 32899 v1.AddArg(v2) 32900 v1.AddArg(mem) 32901 v.AddArg(v1) 32902 return true 32903 } 32904 // match: (Move [s] dst src mem) 32905 // cond: s > 16 && s%16 != 0 && s%16 <= 8 32906 // result: (Move [s-s%16] (OffPtr <dst.Type> dst [s%16]) (OffPtr <src.Type> src [s%16]) (MOVQstore dst (MOVQload src mem) mem)) 32907 for { 32908 s := v.AuxInt 32909 dst := v.Args[0] 32910 src := v.Args[1] 32911 mem := v.Args[2] 32912 if !(s > 16 && s%16 != 0 && s%16 <= 8) { 32913 break 32914 } 32915 v.reset(OpMove) 32916 v.AuxInt = s - s%16 32917 v0 := b.NewValue0(v.Pos, OpOffPtr, dst.Type) 32918 v0.AuxInt = s % 16 32919 v0.AddArg(dst) 32920 v.AddArg(v0) 32921 v1 := b.NewValue0(v.Pos, OpOffPtr, src.Type) 32922 v1.AuxInt = s % 16 32923 v1.AddArg(src) 32924 v.AddArg(v1) 32925 v2 := b.NewValue0(v.Pos, OpAMD64MOVQstore, TypeMem) 32926 v2.AddArg(dst) 32927 v3 := b.NewValue0(v.Pos, OpAMD64MOVQload, types.UInt64) 32928 v3.AddArg(src) 32929 v3.AddArg(mem) 32930 v2.AddArg(v3) 32931 v2.AddArg(mem) 32932 v.AddArg(v2) 32933 return true 32934 } 32935 // match: (Move [s] dst src mem) 32936 // cond: s > 16 && s%16 != 0 && s%16 > 8 32937 // result: (Move [s-s%16] (OffPtr <dst.Type> dst [s%16]) (OffPtr <src.Type> src [s%16]) (MOVOstore dst (MOVOload src mem) mem)) 32938 for { 32939 s := v.AuxInt 32940 dst := v.Args[0] 32941 src := v.Args[1] 32942 mem := v.Args[2] 32943 if !(s > 16 && s%16 != 0 && s%16 > 8) { 32944 break 32945 } 32946 v.reset(OpMove) 32947 v.AuxInt = s - s%16 32948 v0 := b.NewValue0(v.Pos, OpOffPtr, dst.Type) 32949 v0.AuxInt = s % 16 32950 v0.AddArg(dst) 32951 v.AddArg(v0) 32952 v1 := b.NewValue0(v.Pos, OpOffPtr, src.Type) 32953 v1.AuxInt = s % 16 32954 v1.AddArg(src) 32955 v.AddArg(v1) 32956 v2 := b.NewValue0(v.Pos, OpAMD64MOVOstore, TypeMem) 32957 v2.AddArg(dst) 32958 v3 := b.NewValue0(v.Pos, OpAMD64MOVOload, TypeInt128) 32959 v3.AddArg(src) 32960 v3.AddArg(mem) 32961 v2.AddArg(v3) 32962 v2.AddArg(mem) 32963 v.AddArg(v2) 32964 return true 32965 } 32966 // match: (Move [s] dst src mem) 32967 // cond: s >= 32 && s <= 16*64 && s%16 == 0 && !config.noDuffDevice 32968 // result: (DUFFCOPY [14*(64-s/16)] dst src mem) 32969 for { 32970 s := v.AuxInt 32971 dst := v.Args[0] 32972 src := v.Args[1] 32973 mem := v.Args[2] 32974 if !(s >= 32 && s <= 16*64 && s%16 == 0 && !config.noDuffDevice) { 32975 break 32976 } 32977 v.reset(OpAMD64DUFFCOPY) 32978 v.AuxInt = 14 * (64 - s/16) 32979 v.AddArg(dst) 32980 v.AddArg(src) 32981 v.AddArg(mem) 32982 return true 32983 } 32984 // match: (Move [s] dst src mem) 32985 // cond: (s > 16*64 || config.noDuffDevice) && s%8 == 0 32986 // result: (REPMOVSQ dst src (MOVQconst [s/8]) mem) 32987 for { 32988 s := v.AuxInt 32989 dst := v.Args[0] 32990 src := v.Args[1] 32991 mem := v.Args[2] 32992 if !((s > 16*64 || config.noDuffDevice) && s%8 == 0) { 32993 break 32994 } 32995 v.reset(OpAMD64REPMOVSQ) 32996 v.AddArg(dst) 32997 v.AddArg(src) 32998 v0 := b.NewValue0(v.Pos, OpAMD64MOVQconst, types.UInt64) 32999 v0.AuxInt = s / 8 33000 v.AddArg(v0) 33001 v.AddArg(mem) 33002 return true 33003 } 33004 return false 33005 } 33006 func rewriteValueAMD64_OpMul16(v *Value) bool { 33007 // match: (Mul16 x y) 33008 // cond: 33009 // result: (MULL x y) 33010 for { 33011 x := v.Args[0] 33012 y := v.Args[1] 33013 v.reset(OpAMD64MULL) 33014 v.AddArg(x) 33015 v.AddArg(y) 33016 return true 33017 } 33018 } 33019 func rewriteValueAMD64_OpMul32(v *Value) bool { 33020 // match: (Mul32 x y) 33021 // cond: 33022 // result: (MULL x y) 33023 for { 33024 x := v.Args[0] 33025 y := v.Args[1] 33026 v.reset(OpAMD64MULL) 33027 v.AddArg(x) 33028 v.AddArg(y) 33029 return true 33030 } 33031 } 33032 func rewriteValueAMD64_OpMul32F(v *Value) bool { 33033 // match: (Mul32F x y) 33034 // cond: 33035 // result: (MULSS x y) 33036 for { 33037 x := v.Args[0] 33038 y := v.Args[1] 33039 v.reset(OpAMD64MULSS) 33040 v.AddArg(x) 33041 v.AddArg(y) 33042 return true 33043 } 33044 } 33045 func rewriteValueAMD64_OpMul64(v *Value) bool { 33046 // match: (Mul64 x y) 33047 // cond: 33048 // result: (MULQ x y) 33049 for { 33050 x := v.Args[0] 33051 y := v.Args[1] 33052 v.reset(OpAMD64MULQ) 33053 v.AddArg(x) 33054 v.AddArg(y) 33055 return true 33056 } 33057 } 33058 func rewriteValueAMD64_OpMul64F(v *Value) bool { 33059 // match: (Mul64F x y) 33060 // cond: 33061 // result: (MULSD x y) 33062 for { 33063 x := v.Args[0] 33064 y := v.Args[1] 33065 v.reset(OpAMD64MULSD) 33066 v.AddArg(x) 33067 v.AddArg(y) 33068 return true 33069 } 33070 } 33071 func rewriteValueAMD64_OpMul64uhilo(v *Value) bool { 33072 // match: (Mul64uhilo x y) 33073 // cond: 33074 // result: (MULQU2 x y) 33075 for { 33076 x := v.Args[0] 33077 y := v.Args[1] 33078 v.reset(OpAMD64MULQU2) 33079 v.AddArg(x) 33080 v.AddArg(y) 33081 return true 33082 } 33083 } 33084 func rewriteValueAMD64_OpMul8(v *Value) bool { 33085 // match: (Mul8 x y) 33086 // cond: 33087 // result: (MULL x y) 33088 for { 33089 x := v.Args[0] 33090 y := v.Args[1] 33091 v.reset(OpAMD64MULL) 33092 v.AddArg(x) 33093 v.AddArg(y) 33094 return true 33095 } 33096 } 33097 func rewriteValueAMD64_OpNeg16(v *Value) bool { 33098 // match: (Neg16 x) 33099 // cond: 33100 // result: (NEGL x) 33101 for { 33102 x := v.Args[0] 33103 v.reset(OpAMD64NEGL) 33104 v.AddArg(x) 33105 return true 33106 } 33107 } 33108 func rewriteValueAMD64_OpNeg32(v *Value) bool { 33109 // match: (Neg32 x) 33110 // cond: 33111 // result: (NEGL x) 33112 for { 33113 x := v.Args[0] 33114 v.reset(OpAMD64NEGL) 33115 v.AddArg(x) 33116 return true 33117 } 33118 } 33119 func rewriteValueAMD64_OpNeg32F(v *Value) bool { 33120 b := v.Block 33121 _ = b 33122 types := &b.Func.Config.Types 33123 _ = types 33124 // match: (Neg32F x) 33125 // cond: 33126 // result: (PXOR x (MOVSSconst <types.Float32> [f2i(math.Copysign(0, -1))])) 33127 for { 33128 x := v.Args[0] 33129 v.reset(OpAMD64PXOR) 33130 v.AddArg(x) 33131 v0 := b.NewValue0(v.Pos, OpAMD64MOVSSconst, types.Float32) 33132 v0.AuxInt = f2i(math.Copysign(0, -1)) 33133 v.AddArg(v0) 33134 return true 33135 } 33136 } 33137 func rewriteValueAMD64_OpNeg64(v *Value) bool { 33138 // match: (Neg64 x) 33139 // cond: 33140 // result: (NEGQ x) 33141 for { 33142 x := v.Args[0] 33143 v.reset(OpAMD64NEGQ) 33144 v.AddArg(x) 33145 return true 33146 } 33147 } 33148 func rewriteValueAMD64_OpNeg64F(v *Value) bool { 33149 b := v.Block 33150 _ = b 33151 types := &b.Func.Config.Types 33152 _ = types 33153 // match: (Neg64F x) 33154 // cond: 33155 // result: (PXOR x (MOVSDconst <types.Float64> [f2i(math.Copysign(0, -1))])) 33156 for { 33157 x := v.Args[0] 33158 v.reset(OpAMD64PXOR) 33159 v.AddArg(x) 33160 v0 := b.NewValue0(v.Pos, OpAMD64MOVSDconst, types.Float64) 33161 v0.AuxInt = f2i(math.Copysign(0, -1)) 33162 v.AddArg(v0) 33163 return true 33164 } 33165 } 33166 func rewriteValueAMD64_OpNeg8(v *Value) bool { 33167 // match: (Neg8 x) 33168 // cond: 33169 // result: (NEGL x) 33170 for { 33171 x := v.Args[0] 33172 v.reset(OpAMD64NEGL) 33173 v.AddArg(x) 33174 return true 33175 } 33176 } 33177 func rewriteValueAMD64_OpNeq16(v *Value) bool { 33178 b := v.Block 33179 _ = b 33180 // match: (Neq16 x y) 33181 // cond: 33182 // result: (SETNE (CMPW x y)) 33183 for { 33184 x := v.Args[0] 33185 y := v.Args[1] 33186 v.reset(OpAMD64SETNE) 33187 v0 := b.NewValue0(v.Pos, OpAMD64CMPW, TypeFlags) 33188 v0.AddArg(x) 33189 v0.AddArg(y) 33190 v.AddArg(v0) 33191 return true 33192 } 33193 } 33194 func rewriteValueAMD64_OpNeq32(v *Value) bool { 33195 b := v.Block 33196 _ = b 33197 // match: (Neq32 x y) 33198 // cond: 33199 // result: (SETNE (CMPL x y)) 33200 for { 33201 x := v.Args[0] 33202 y := v.Args[1] 33203 v.reset(OpAMD64SETNE) 33204 v0 := b.NewValue0(v.Pos, OpAMD64CMPL, TypeFlags) 33205 v0.AddArg(x) 33206 v0.AddArg(y) 33207 v.AddArg(v0) 33208 return true 33209 } 33210 } 33211 func rewriteValueAMD64_OpNeq32F(v *Value) bool { 33212 b := v.Block 33213 _ = b 33214 // match: (Neq32F x y) 33215 // cond: 33216 // result: (SETNEF (UCOMISS x y)) 33217 for { 33218 x := v.Args[0] 33219 y := v.Args[1] 33220 v.reset(OpAMD64SETNEF) 33221 v0 := b.NewValue0(v.Pos, OpAMD64UCOMISS, TypeFlags) 33222 v0.AddArg(x) 33223 v0.AddArg(y) 33224 v.AddArg(v0) 33225 return true 33226 } 33227 } 33228 func rewriteValueAMD64_OpNeq64(v *Value) bool { 33229 b := v.Block 33230 _ = b 33231 // match: (Neq64 x y) 33232 // cond: 33233 // result: (SETNE (CMPQ x y)) 33234 for { 33235 x := v.Args[0] 33236 y := v.Args[1] 33237 v.reset(OpAMD64SETNE) 33238 v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, TypeFlags) 33239 v0.AddArg(x) 33240 v0.AddArg(y) 33241 v.AddArg(v0) 33242 return true 33243 } 33244 } 33245 func rewriteValueAMD64_OpNeq64F(v *Value) bool { 33246 b := v.Block 33247 _ = b 33248 // match: (Neq64F x y) 33249 // cond: 33250 // result: (SETNEF (UCOMISD x y)) 33251 for { 33252 x := v.Args[0] 33253 y := v.Args[1] 33254 v.reset(OpAMD64SETNEF) 33255 v0 := b.NewValue0(v.Pos, OpAMD64UCOMISD, TypeFlags) 33256 v0.AddArg(x) 33257 v0.AddArg(y) 33258 v.AddArg(v0) 33259 return true 33260 } 33261 } 33262 func rewriteValueAMD64_OpNeq8(v *Value) bool { 33263 b := v.Block 33264 _ = b 33265 // match: (Neq8 x y) 33266 // cond: 33267 // result: (SETNE (CMPB x y)) 33268 for { 33269 x := v.Args[0] 33270 y := v.Args[1] 33271 v.reset(OpAMD64SETNE) 33272 v0 := b.NewValue0(v.Pos, OpAMD64CMPB, TypeFlags) 33273 v0.AddArg(x) 33274 v0.AddArg(y) 33275 v.AddArg(v0) 33276 return true 33277 } 33278 } 33279 func rewriteValueAMD64_OpNeqB(v *Value) bool { 33280 b := v.Block 33281 _ = b 33282 // match: (NeqB x y) 33283 // cond: 33284 // result: (SETNE (CMPB x y)) 33285 for { 33286 x := v.Args[0] 33287 y := v.Args[1] 33288 v.reset(OpAMD64SETNE) 33289 v0 := b.NewValue0(v.Pos, OpAMD64CMPB, TypeFlags) 33290 v0.AddArg(x) 33291 v0.AddArg(y) 33292 v.AddArg(v0) 33293 return true 33294 } 33295 } 33296 func rewriteValueAMD64_OpNeqPtr(v *Value) bool { 33297 b := v.Block 33298 _ = b 33299 config := b.Func.Config 33300 _ = config 33301 // match: (NeqPtr x y) 33302 // cond: config.PtrSize == 8 33303 // result: (SETNE (CMPQ x y)) 33304 for { 33305 x := v.Args[0] 33306 y := v.Args[1] 33307 if !(config.PtrSize == 8) { 33308 break 33309 } 33310 v.reset(OpAMD64SETNE) 33311 v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, TypeFlags) 33312 v0.AddArg(x) 33313 v0.AddArg(y) 33314 v.AddArg(v0) 33315 return true 33316 } 33317 // match: (NeqPtr x y) 33318 // cond: config.PtrSize == 4 33319 // result: (SETNE (CMPL x y)) 33320 for { 33321 x := v.Args[0] 33322 y := v.Args[1] 33323 if !(config.PtrSize == 4) { 33324 break 33325 } 33326 v.reset(OpAMD64SETNE) 33327 v0 := b.NewValue0(v.Pos, OpAMD64CMPL, TypeFlags) 33328 v0.AddArg(x) 33329 v0.AddArg(y) 33330 v.AddArg(v0) 33331 return true 33332 } 33333 return false 33334 } 33335 func rewriteValueAMD64_OpNilCheck(v *Value) bool { 33336 // match: (NilCheck ptr mem) 33337 // cond: 33338 // result: (LoweredNilCheck ptr mem) 33339 for { 33340 ptr := v.Args[0] 33341 mem := v.Args[1] 33342 v.reset(OpAMD64LoweredNilCheck) 33343 v.AddArg(ptr) 33344 v.AddArg(mem) 33345 return true 33346 } 33347 } 33348 func rewriteValueAMD64_OpNot(v *Value) bool { 33349 // match: (Not x) 33350 // cond: 33351 // result: (XORLconst [1] x) 33352 for { 33353 x := v.Args[0] 33354 v.reset(OpAMD64XORLconst) 33355 v.AuxInt = 1 33356 v.AddArg(x) 33357 return true 33358 } 33359 } 33360 func rewriteValueAMD64_OpOffPtr(v *Value) bool { 33361 b := v.Block 33362 _ = b 33363 config := b.Func.Config 33364 _ = config 33365 types := &b.Func.Config.Types 33366 _ = types 33367 // match: (OffPtr [off] ptr) 33368 // cond: config.PtrSize == 8 && is32Bit(off) 33369 // result: (ADDQconst [off] ptr) 33370 for { 33371 off := v.AuxInt 33372 ptr := v.Args[0] 33373 if !(config.PtrSize == 8 && is32Bit(off)) { 33374 break 33375 } 33376 v.reset(OpAMD64ADDQconst) 33377 v.AuxInt = off 33378 v.AddArg(ptr) 33379 return true 33380 } 33381 // match: (OffPtr [off] ptr) 33382 // cond: config.PtrSize == 8 33383 // result: (ADDQ (MOVQconst [off]) ptr) 33384 for { 33385 off := v.AuxInt 33386 ptr := v.Args[0] 33387 if !(config.PtrSize == 8) { 33388 break 33389 } 33390 v.reset(OpAMD64ADDQ) 33391 v0 := b.NewValue0(v.Pos, OpAMD64MOVQconst, types.UInt64) 33392 v0.AuxInt = off 33393 v.AddArg(v0) 33394 v.AddArg(ptr) 33395 return true 33396 } 33397 // match: (OffPtr [off] ptr) 33398 // cond: config.PtrSize == 4 33399 // result: (ADDLconst [off] ptr) 33400 for { 33401 off := v.AuxInt 33402 ptr := v.Args[0] 33403 if !(config.PtrSize == 4) { 33404 break 33405 } 33406 v.reset(OpAMD64ADDLconst) 33407 v.AuxInt = off 33408 v.AddArg(ptr) 33409 return true 33410 } 33411 return false 33412 } 33413 func rewriteValueAMD64_OpOr16(v *Value) bool { 33414 // match: (Or16 x y) 33415 // cond: 33416 // result: (ORL x y) 33417 for { 33418 x := v.Args[0] 33419 y := v.Args[1] 33420 v.reset(OpAMD64ORL) 33421 v.AddArg(x) 33422 v.AddArg(y) 33423 return true 33424 } 33425 } 33426 func rewriteValueAMD64_OpOr32(v *Value) bool { 33427 // match: (Or32 x y) 33428 // cond: 33429 // result: (ORL x y) 33430 for { 33431 x := v.Args[0] 33432 y := v.Args[1] 33433 v.reset(OpAMD64ORL) 33434 v.AddArg(x) 33435 v.AddArg(y) 33436 return true 33437 } 33438 } 33439 func rewriteValueAMD64_OpOr64(v *Value) bool { 33440 // match: (Or64 x y) 33441 // cond: 33442 // result: (ORQ x y) 33443 for { 33444 x := v.Args[0] 33445 y := v.Args[1] 33446 v.reset(OpAMD64ORQ) 33447 v.AddArg(x) 33448 v.AddArg(y) 33449 return true 33450 } 33451 } 33452 func rewriteValueAMD64_OpOr8(v *Value) bool { 33453 // match: (Or8 x y) 33454 // cond: 33455 // result: (ORL x y) 33456 for { 33457 x := v.Args[0] 33458 y := v.Args[1] 33459 v.reset(OpAMD64ORL) 33460 v.AddArg(x) 33461 v.AddArg(y) 33462 return true 33463 } 33464 } 33465 func rewriteValueAMD64_OpOrB(v *Value) bool { 33466 // match: (OrB x y) 33467 // cond: 33468 // result: (ORL x y) 33469 for { 33470 x := v.Args[0] 33471 y := v.Args[1] 33472 v.reset(OpAMD64ORL) 33473 v.AddArg(x) 33474 v.AddArg(y) 33475 return true 33476 } 33477 } 33478 func rewriteValueAMD64_OpPopCount16(v *Value) bool { 33479 b := v.Block 33480 _ = b 33481 types := &b.Func.Config.Types 33482 _ = types 33483 // match: (PopCount16 x) 33484 // cond: 33485 // result: (POPCNTL (MOVWQZX <types.UInt32> x)) 33486 for { 33487 x := v.Args[0] 33488 v.reset(OpAMD64POPCNTL) 33489 v0 := b.NewValue0(v.Pos, OpAMD64MOVWQZX, types.UInt32) 33490 v0.AddArg(x) 33491 v.AddArg(v0) 33492 return true 33493 } 33494 } 33495 func rewriteValueAMD64_OpPopCount32(v *Value) bool { 33496 // match: (PopCount32 x) 33497 // cond: 33498 // result: (POPCNTL x) 33499 for { 33500 x := v.Args[0] 33501 v.reset(OpAMD64POPCNTL) 33502 v.AddArg(x) 33503 return true 33504 } 33505 } 33506 func rewriteValueAMD64_OpPopCount64(v *Value) bool { 33507 // match: (PopCount64 x) 33508 // cond: 33509 // result: (POPCNTQ x) 33510 for { 33511 x := v.Args[0] 33512 v.reset(OpAMD64POPCNTQ) 33513 v.AddArg(x) 33514 return true 33515 } 33516 } 33517 func rewriteValueAMD64_OpPopCount8(v *Value) bool { 33518 b := v.Block 33519 _ = b 33520 types := &b.Func.Config.Types 33521 _ = types 33522 // match: (PopCount8 x) 33523 // cond: 33524 // result: (POPCNTL (MOVBQZX <types.UInt32> x)) 33525 for { 33526 x := v.Args[0] 33527 v.reset(OpAMD64POPCNTL) 33528 v0 := b.NewValue0(v.Pos, OpAMD64MOVBQZX, types.UInt32) 33529 v0.AddArg(x) 33530 v.AddArg(v0) 33531 return true 33532 } 33533 } 33534 func rewriteValueAMD64_OpRound32F(v *Value) bool { 33535 // match: (Round32F x) 33536 // cond: 33537 // result: x 33538 for { 33539 x := v.Args[0] 33540 v.reset(OpCopy) 33541 v.Type = x.Type 33542 v.AddArg(x) 33543 return true 33544 } 33545 } 33546 func rewriteValueAMD64_OpRound64F(v *Value) bool { 33547 // match: (Round64F x) 33548 // cond: 33549 // result: x 33550 for { 33551 x := v.Args[0] 33552 v.reset(OpCopy) 33553 v.Type = x.Type 33554 v.AddArg(x) 33555 return true 33556 } 33557 } 33558 func rewriteValueAMD64_OpRsh16Ux16(v *Value) bool { 33559 b := v.Block 33560 _ = b 33561 // match: (Rsh16Ux16 <t> x y) 33562 // cond: 33563 // result: (ANDL (SHRW <t> x y) (SBBLcarrymask <t> (CMPWconst y [16]))) 33564 for { 33565 t := v.Type 33566 x := v.Args[0] 33567 y := v.Args[1] 33568 v.reset(OpAMD64ANDL) 33569 v0 := b.NewValue0(v.Pos, OpAMD64SHRW, t) 33570 v0.AddArg(x) 33571 v0.AddArg(y) 33572 v.AddArg(v0) 33573 v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) 33574 v2 := b.NewValue0(v.Pos, OpAMD64CMPWconst, TypeFlags) 33575 v2.AuxInt = 16 33576 v2.AddArg(y) 33577 v1.AddArg(v2) 33578 v.AddArg(v1) 33579 return true 33580 } 33581 } 33582 func rewriteValueAMD64_OpRsh16Ux32(v *Value) bool { 33583 b := v.Block 33584 _ = b 33585 // match: (Rsh16Ux32 <t> x y) 33586 // cond: 33587 // result: (ANDL (SHRW <t> x y) (SBBLcarrymask <t> (CMPLconst y [16]))) 33588 for { 33589 t := v.Type 33590 x := v.Args[0] 33591 y := v.Args[1] 33592 v.reset(OpAMD64ANDL) 33593 v0 := b.NewValue0(v.Pos, OpAMD64SHRW, t) 33594 v0.AddArg(x) 33595 v0.AddArg(y) 33596 v.AddArg(v0) 33597 v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) 33598 v2 := b.NewValue0(v.Pos, OpAMD64CMPLconst, TypeFlags) 33599 v2.AuxInt = 16 33600 v2.AddArg(y) 33601 v1.AddArg(v2) 33602 v.AddArg(v1) 33603 return true 33604 } 33605 } 33606 func rewriteValueAMD64_OpRsh16Ux64(v *Value) bool { 33607 b := v.Block 33608 _ = b 33609 // match: (Rsh16Ux64 <t> x y) 33610 // cond: 33611 // result: (ANDL (SHRW <t> x y) (SBBLcarrymask <t> (CMPQconst y [16]))) 33612 for { 33613 t := v.Type 33614 x := v.Args[0] 33615 y := v.Args[1] 33616 v.reset(OpAMD64ANDL) 33617 v0 := b.NewValue0(v.Pos, OpAMD64SHRW, t) 33618 v0.AddArg(x) 33619 v0.AddArg(y) 33620 v.AddArg(v0) 33621 v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) 33622 v2 := b.NewValue0(v.Pos, OpAMD64CMPQconst, TypeFlags) 33623 v2.AuxInt = 16 33624 v2.AddArg(y) 33625 v1.AddArg(v2) 33626 v.AddArg(v1) 33627 return true 33628 } 33629 } 33630 func rewriteValueAMD64_OpRsh16Ux8(v *Value) bool { 33631 b := v.Block 33632 _ = b 33633 // match: (Rsh16Ux8 <t> x y) 33634 // cond: 33635 // result: (ANDL (SHRW <t> x y) (SBBLcarrymask <t> (CMPBconst y [16]))) 33636 for { 33637 t := v.Type 33638 x := v.Args[0] 33639 y := v.Args[1] 33640 v.reset(OpAMD64ANDL) 33641 v0 := b.NewValue0(v.Pos, OpAMD64SHRW, t) 33642 v0.AddArg(x) 33643 v0.AddArg(y) 33644 v.AddArg(v0) 33645 v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) 33646 v2 := b.NewValue0(v.Pos, OpAMD64CMPBconst, TypeFlags) 33647 v2.AuxInt = 16 33648 v2.AddArg(y) 33649 v1.AddArg(v2) 33650 v.AddArg(v1) 33651 return true 33652 } 33653 } 33654 func rewriteValueAMD64_OpRsh16x16(v *Value) bool { 33655 b := v.Block 33656 _ = b 33657 // match: (Rsh16x16 <t> x y) 33658 // cond: 33659 // result: (SARW <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPWconst y [16]))))) 33660 for { 33661 t := v.Type 33662 x := v.Args[0] 33663 y := v.Args[1] 33664 v.reset(OpAMD64SARW) 33665 v.Type = t 33666 v.AddArg(x) 33667 v0 := b.NewValue0(v.Pos, OpAMD64ORL, y.Type) 33668 v0.AddArg(y) 33669 v1 := b.NewValue0(v.Pos, OpAMD64NOTL, y.Type) 33670 v2 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, y.Type) 33671 v3 := b.NewValue0(v.Pos, OpAMD64CMPWconst, TypeFlags) 33672 v3.AuxInt = 16 33673 v3.AddArg(y) 33674 v2.AddArg(v3) 33675 v1.AddArg(v2) 33676 v0.AddArg(v1) 33677 v.AddArg(v0) 33678 return true 33679 } 33680 } 33681 func rewriteValueAMD64_OpRsh16x32(v *Value) bool { 33682 b := v.Block 33683 _ = b 33684 // match: (Rsh16x32 <t> x y) 33685 // cond: 33686 // result: (SARW <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPLconst y [16]))))) 33687 for { 33688 t := v.Type 33689 x := v.Args[0] 33690 y := v.Args[1] 33691 v.reset(OpAMD64SARW) 33692 v.Type = t 33693 v.AddArg(x) 33694 v0 := b.NewValue0(v.Pos, OpAMD64ORL, y.Type) 33695 v0.AddArg(y) 33696 v1 := b.NewValue0(v.Pos, OpAMD64NOTL, y.Type) 33697 v2 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, y.Type) 33698 v3 := b.NewValue0(v.Pos, OpAMD64CMPLconst, TypeFlags) 33699 v3.AuxInt = 16 33700 v3.AddArg(y) 33701 v2.AddArg(v3) 33702 v1.AddArg(v2) 33703 v0.AddArg(v1) 33704 v.AddArg(v0) 33705 return true 33706 } 33707 } 33708 func rewriteValueAMD64_OpRsh16x64(v *Value) bool { 33709 b := v.Block 33710 _ = b 33711 // match: (Rsh16x64 <t> x y) 33712 // cond: 33713 // result: (SARW <t> x (ORQ <y.Type> y (NOTQ <y.Type> (SBBQcarrymask <y.Type> (CMPQconst y [16]))))) 33714 for { 33715 t := v.Type 33716 x := v.Args[0] 33717 y := v.Args[1] 33718 v.reset(OpAMD64SARW) 33719 v.Type = t 33720 v.AddArg(x) 33721 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, y.Type) 33722 v0.AddArg(y) 33723 v1 := b.NewValue0(v.Pos, OpAMD64NOTQ, y.Type) 33724 v2 := b.NewValue0(v.Pos, OpAMD64SBBQcarrymask, y.Type) 33725 v3 := b.NewValue0(v.Pos, OpAMD64CMPQconst, TypeFlags) 33726 v3.AuxInt = 16 33727 v3.AddArg(y) 33728 v2.AddArg(v3) 33729 v1.AddArg(v2) 33730 v0.AddArg(v1) 33731 v.AddArg(v0) 33732 return true 33733 } 33734 } 33735 func rewriteValueAMD64_OpRsh16x8(v *Value) bool { 33736 b := v.Block 33737 _ = b 33738 // match: (Rsh16x8 <t> x y) 33739 // cond: 33740 // result: (SARW <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPBconst y [16]))))) 33741 for { 33742 t := v.Type 33743 x := v.Args[0] 33744 y := v.Args[1] 33745 v.reset(OpAMD64SARW) 33746 v.Type = t 33747 v.AddArg(x) 33748 v0 := b.NewValue0(v.Pos, OpAMD64ORL, y.Type) 33749 v0.AddArg(y) 33750 v1 := b.NewValue0(v.Pos, OpAMD64NOTL, y.Type) 33751 v2 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, y.Type) 33752 v3 := b.NewValue0(v.Pos, OpAMD64CMPBconst, TypeFlags) 33753 v3.AuxInt = 16 33754 v3.AddArg(y) 33755 v2.AddArg(v3) 33756 v1.AddArg(v2) 33757 v0.AddArg(v1) 33758 v.AddArg(v0) 33759 return true 33760 } 33761 } 33762 func rewriteValueAMD64_OpRsh32Ux16(v *Value) bool { 33763 b := v.Block 33764 _ = b 33765 // match: (Rsh32Ux16 <t> x y) 33766 // cond: 33767 // result: (ANDL (SHRL <t> x y) (SBBLcarrymask <t> (CMPWconst y [32]))) 33768 for { 33769 t := v.Type 33770 x := v.Args[0] 33771 y := v.Args[1] 33772 v.reset(OpAMD64ANDL) 33773 v0 := b.NewValue0(v.Pos, OpAMD64SHRL, t) 33774 v0.AddArg(x) 33775 v0.AddArg(y) 33776 v.AddArg(v0) 33777 v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) 33778 v2 := b.NewValue0(v.Pos, OpAMD64CMPWconst, TypeFlags) 33779 v2.AuxInt = 32 33780 v2.AddArg(y) 33781 v1.AddArg(v2) 33782 v.AddArg(v1) 33783 return true 33784 } 33785 } 33786 func rewriteValueAMD64_OpRsh32Ux32(v *Value) bool { 33787 b := v.Block 33788 _ = b 33789 // match: (Rsh32Ux32 <t> x y) 33790 // cond: 33791 // result: (ANDL (SHRL <t> x y) (SBBLcarrymask <t> (CMPLconst y [32]))) 33792 for { 33793 t := v.Type 33794 x := v.Args[0] 33795 y := v.Args[1] 33796 v.reset(OpAMD64ANDL) 33797 v0 := b.NewValue0(v.Pos, OpAMD64SHRL, t) 33798 v0.AddArg(x) 33799 v0.AddArg(y) 33800 v.AddArg(v0) 33801 v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) 33802 v2 := b.NewValue0(v.Pos, OpAMD64CMPLconst, TypeFlags) 33803 v2.AuxInt = 32 33804 v2.AddArg(y) 33805 v1.AddArg(v2) 33806 v.AddArg(v1) 33807 return true 33808 } 33809 } 33810 func rewriteValueAMD64_OpRsh32Ux64(v *Value) bool { 33811 b := v.Block 33812 _ = b 33813 // match: (Rsh32Ux64 <t> x y) 33814 // cond: 33815 // result: (ANDL (SHRL <t> x y) (SBBLcarrymask <t> (CMPQconst y [32]))) 33816 for { 33817 t := v.Type 33818 x := v.Args[0] 33819 y := v.Args[1] 33820 v.reset(OpAMD64ANDL) 33821 v0 := b.NewValue0(v.Pos, OpAMD64SHRL, t) 33822 v0.AddArg(x) 33823 v0.AddArg(y) 33824 v.AddArg(v0) 33825 v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) 33826 v2 := b.NewValue0(v.Pos, OpAMD64CMPQconst, TypeFlags) 33827 v2.AuxInt = 32 33828 v2.AddArg(y) 33829 v1.AddArg(v2) 33830 v.AddArg(v1) 33831 return true 33832 } 33833 } 33834 func rewriteValueAMD64_OpRsh32Ux8(v *Value) bool { 33835 b := v.Block 33836 _ = b 33837 // match: (Rsh32Ux8 <t> x y) 33838 // cond: 33839 // result: (ANDL (SHRL <t> x y) (SBBLcarrymask <t> (CMPBconst y [32]))) 33840 for { 33841 t := v.Type 33842 x := v.Args[0] 33843 y := v.Args[1] 33844 v.reset(OpAMD64ANDL) 33845 v0 := b.NewValue0(v.Pos, OpAMD64SHRL, t) 33846 v0.AddArg(x) 33847 v0.AddArg(y) 33848 v.AddArg(v0) 33849 v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) 33850 v2 := b.NewValue0(v.Pos, OpAMD64CMPBconst, TypeFlags) 33851 v2.AuxInt = 32 33852 v2.AddArg(y) 33853 v1.AddArg(v2) 33854 v.AddArg(v1) 33855 return true 33856 } 33857 } 33858 func rewriteValueAMD64_OpRsh32x16(v *Value) bool { 33859 b := v.Block 33860 _ = b 33861 // match: (Rsh32x16 <t> x y) 33862 // cond: 33863 // result: (SARL <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPWconst y [32]))))) 33864 for { 33865 t := v.Type 33866 x := v.Args[0] 33867 y := v.Args[1] 33868 v.reset(OpAMD64SARL) 33869 v.Type = t 33870 v.AddArg(x) 33871 v0 := b.NewValue0(v.Pos, OpAMD64ORL, y.Type) 33872 v0.AddArg(y) 33873 v1 := b.NewValue0(v.Pos, OpAMD64NOTL, y.Type) 33874 v2 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, y.Type) 33875 v3 := b.NewValue0(v.Pos, OpAMD64CMPWconst, TypeFlags) 33876 v3.AuxInt = 32 33877 v3.AddArg(y) 33878 v2.AddArg(v3) 33879 v1.AddArg(v2) 33880 v0.AddArg(v1) 33881 v.AddArg(v0) 33882 return true 33883 } 33884 } 33885 func rewriteValueAMD64_OpRsh32x32(v *Value) bool { 33886 b := v.Block 33887 _ = b 33888 // match: (Rsh32x32 <t> x y) 33889 // cond: 33890 // result: (SARL <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPLconst y [32]))))) 33891 for { 33892 t := v.Type 33893 x := v.Args[0] 33894 y := v.Args[1] 33895 v.reset(OpAMD64SARL) 33896 v.Type = t 33897 v.AddArg(x) 33898 v0 := b.NewValue0(v.Pos, OpAMD64ORL, y.Type) 33899 v0.AddArg(y) 33900 v1 := b.NewValue0(v.Pos, OpAMD64NOTL, y.Type) 33901 v2 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, y.Type) 33902 v3 := b.NewValue0(v.Pos, OpAMD64CMPLconst, TypeFlags) 33903 v3.AuxInt = 32 33904 v3.AddArg(y) 33905 v2.AddArg(v3) 33906 v1.AddArg(v2) 33907 v0.AddArg(v1) 33908 v.AddArg(v0) 33909 return true 33910 } 33911 } 33912 func rewriteValueAMD64_OpRsh32x64(v *Value) bool { 33913 b := v.Block 33914 _ = b 33915 // match: (Rsh32x64 <t> x y) 33916 // cond: 33917 // result: (SARL <t> x (ORQ <y.Type> y (NOTQ <y.Type> (SBBQcarrymask <y.Type> (CMPQconst y [32]))))) 33918 for { 33919 t := v.Type 33920 x := v.Args[0] 33921 y := v.Args[1] 33922 v.reset(OpAMD64SARL) 33923 v.Type = t 33924 v.AddArg(x) 33925 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, y.Type) 33926 v0.AddArg(y) 33927 v1 := b.NewValue0(v.Pos, OpAMD64NOTQ, y.Type) 33928 v2 := b.NewValue0(v.Pos, OpAMD64SBBQcarrymask, y.Type) 33929 v3 := b.NewValue0(v.Pos, OpAMD64CMPQconst, TypeFlags) 33930 v3.AuxInt = 32 33931 v3.AddArg(y) 33932 v2.AddArg(v3) 33933 v1.AddArg(v2) 33934 v0.AddArg(v1) 33935 v.AddArg(v0) 33936 return true 33937 } 33938 } 33939 func rewriteValueAMD64_OpRsh32x8(v *Value) bool { 33940 b := v.Block 33941 _ = b 33942 // match: (Rsh32x8 <t> x y) 33943 // cond: 33944 // result: (SARL <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPBconst y [32]))))) 33945 for { 33946 t := v.Type 33947 x := v.Args[0] 33948 y := v.Args[1] 33949 v.reset(OpAMD64SARL) 33950 v.Type = t 33951 v.AddArg(x) 33952 v0 := b.NewValue0(v.Pos, OpAMD64ORL, y.Type) 33953 v0.AddArg(y) 33954 v1 := b.NewValue0(v.Pos, OpAMD64NOTL, y.Type) 33955 v2 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, y.Type) 33956 v3 := b.NewValue0(v.Pos, OpAMD64CMPBconst, TypeFlags) 33957 v3.AuxInt = 32 33958 v3.AddArg(y) 33959 v2.AddArg(v3) 33960 v1.AddArg(v2) 33961 v0.AddArg(v1) 33962 v.AddArg(v0) 33963 return true 33964 } 33965 } 33966 func rewriteValueAMD64_OpRsh64Ux16(v *Value) bool { 33967 b := v.Block 33968 _ = b 33969 // match: (Rsh64Ux16 <t> x y) 33970 // cond: 33971 // result: (ANDQ (SHRQ <t> x y) (SBBQcarrymask <t> (CMPWconst y [64]))) 33972 for { 33973 t := v.Type 33974 x := v.Args[0] 33975 y := v.Args[1] 33976 v.reset(OpAMD64ANDQ) 33977 v0 := b.NewValue0(v.Pos, OpAMD64SHRQ, t) 33978 v0.AddArg(x) 33979 v0.AddArg(y) 33980 v.AddArg(v0) 33981 v1 := b.NewValue0(v.Pos, OpAMD64SBBQcarrymask, t) 33982 v2 := b.NewValue0(v.Pos, OpAMD64CMPWconst, TypeFlags) 33983 v2.AuxInt = 64 33984 v2.AddArg(y) 33985 v1.AddArg(v2) 33986 v.AddArg(v1) 33987 return true 33988 } 33989 } 33990 func rewriteValueAMD64_OpRsh64Ux32(v *Value) bool { 33991 b := v.Block 33992 _ = b 33993 // match: (Rsh64Ux32 <t> x y) 33994 // cond: 33995 // result: (ANDQ (SHRQ <t> x y) (SBBQcarrymask <t> (CMPLconst y [64]))) 33996 for { 33997 t := v.Type 33998 x := v.Args[0] 33999 y := v.Args[1] 34000 v.reset(OpAMD64ANDQ) 34001 v0 := b.NewValue0(v.Pos, OpAMD64SHRQ, t) 34002 v0.AddArg(x) 34003 v0.AddArg(y) 34004 v.AddArg(v0) 34005 v1 := b.NewValue0(v.Pos, OpAMD64SBBQcarrymask, t) 34006 v2 := b.NewValue0(v.Pos, OpAMD64CMPLconst, TypeFlags) 34007 v2.AuxInt = 64 34008 v2.AddArg(y) 34009 v1.AddArg(v2) 34010 v.AddArg(v1) 34011 return true 34012 } 34013 } 34014 func rewriteValueAMD64_OpRsh64Ux64(v *Value) bool { 34015 b := v.Block 34016 _ = b 34017 // match: (Rsh64Ux64 <t> x y) 34018 // cond: 34019 // result: (ANDQ (SHRQ <t> x y) (SBBQcarrymask <t> (CMPQconst y [64]))) 34020 for { 34021 t := v.Type 34022 x := v.Args[0] 34023 y := v.Args[1] 34024 v.reset(OpAMD64ANDQ) 34025 v0 := b.NewValue0(v.Pos, OpAMD64SHRQ, t) 34026 v0.AddArg(x) 34027 v0.AddArg(y) 34028 v.AddArg(v0) 34029 v1 := b.NewValue0(v.Pos, OpAMD64SBBQcarrymask, t) 34030 v2 := b.NewValue0(v.Pos, OpAMD64CMPQconst, TypeFlags) 34031 v2.AuxInt = 64 34032 v2.AddArg(y) 34033 v1.AddArg(v2) 34034 v.AddArg(v1) 34035 return true 34036 } 34037 } 34038 func rewriteValueAMD64_OpRsh64Ux8(v *Value) bool { 34039 b := v.Block 34040 _ = b 34041 // match: (Rsh64Ux8 <t> x y) 34042 // cond: 34043 // result: (ANDQ (SHRQ <t> x y) (SBBQcarrymask <t> (CMPBconst y [64]))) 34044 for { 34045 t := v.Type 34046 x := v.Args[0] 34047 y := v.Args[1] 34048 v.reset(OpAMD64ANDQ) 34049 v0 := b.NewValue0(v.Pos, OpAMD64SHRQ, t) 34050 v0.AddArg(x) 34051 v0.AddArg(y) 34052 v.AddArg(v0) 34053 v1 := b.NewValue0(v.Pos, OpAMD64SBBQcarrymask, t) 34054 v2 := b.NewValue0(v.Pos, OpAMD64CMPBconst, TypeFlags) 34055 v2.AuxInt = 64 34056 v2.AddArg(y) 34057 v1.AddArg(v2) 34058 v.AddArg(v1) 34059 return true 34060 } 34061 } 34062 func rewriteValueAMD64_OpRsh64x16(v *Value) bool { 34063 b := v.Block 34064 _ = b 34065 // match: (Rsh64x16 <t> x y) 34066 // cond: 34067 // result: (SARQ <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPWconst y [64]))))) 34068 for { 34069 t := v.Type 34070 x := v.Args[0] 34071 y := v.Args[1] 34072 v.reset(OpAMD64SARQ) 34073 v.Type = t 34074 v.AddArg(x) 34075 v0 := b.NewValue0(v.Pos, OpAMD64ORL, y.Type) 34076 v0.AddArg(y) 34077 v1 := b.NewValue0(v.Pos, OpAMD64NOTL, y.Type) 34078 v2 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, y.Type) 34079 v3 := b.NewValue0(v.Pos, OpAMD64CMPWconst, TypeFlags) 34080 v3.AuxInt = 64 34081 v3.AddArg(y) 34082 v2.AddArg(v3) 34083 v1.AddArg(v2) 34084 v0.AddArg(v1) 34085 v.AddArg(v0) 34086 return true 34087 } 34088 } 34089 func rewriteValueAMD64_OpRsh64x32(v *Value) bool { 34090 b := v.Block 34091 _ = b 34092 // match: (Rsh64x32 <t> x y) 34093 // cond: 34094 // result: (SARQ <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPLconst y [64]))))) 34095 for { 34096 t := v.Type 34097 x := v.Args[0] 34098 y := v.Args[1] 34099 v.reset(OpAMD64SARQ) 34100 v.Type = t 34101 v.AddArg(x) 34102 v0 := b.NewValue0(v.Pos, OpAMD64ORL, y.Type) 34103 v0.AddArg(y) 34104 v1 := b.NewValue0(v.Pos, OpAMD64NOTL, y.Type) 34105 v2 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, y.Type) 34106 v3 := b.NewValue0(v.Pos, OpAMD64CMPLconst, TypeFlags) 34107 v3.AuxInt = 64 34108 v3.AddArg(y) 34109 v2.AddArg(v3) 34110 v1.AddArg(v2) 34111 v0.AddArg(v1) 34112 v.AddArg(v0) 34113 return true 34114 } 34115 } 34116 func rewriteValueAMD64_OpRsh64x64(v *Value) bool { 34117 b := v.Block 34118 _ = b 34119 // match: (Rsh64x64 <t> x y) 34120 // cond: 34121 // result: (SARQ <t> x (ORQ <y.Type> y (NOTQ <y.Type> (SBBQcarrymask <y.Type> (CMPQconst y [64]))))) 34122 for { 34123 t := v.Type 34124 x := v.Args[0] 34125 y := v.Args[1] 34126 v.reset(OpAMD64SARQ) 34127 v.Type = t 34128 v.AddArg(x) 34129 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, y.Type) 34130 v0.AddArg(y) 34131 v1 := b.NewValue0(v.Pos, OpAMD64NOTQ, y.Type) 34132 v2 := b.NewValue0(v.Pos, OpAMD64SBBQcarrymask, y.Type) 34133 v3 := b.NewValue0(v.Pos, OpAMD64CMPQconst, TypeFlags) 34134 v3.AuxInt = 64 34135 v3.AddArg(y) 34136 v2.AddArg(v3) 34137 v1.AddArg(v2) 34138 v0.AddArg(v1) 34139 v.AddArg(v0) 34140 return true 34141 } 34142 } 34143 func rewriteValueAMD64_OpRsh64x8(v *Value) bool { 34144 b := v.Block 34145 _ = b 34146 // match: (Rsh64x8 <t> x y) 34147 // cond: 34148 // result: (SARQ <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPBconst y [64]))))) 34149 for { 34150 t := v.Type 34151 x := v.Args[0] 34152 y := v.Args[1] 34153 v.reset(OpAMD64SARQ) 34154 v.Type = t 34155 v.AddArg(x) 34156 v0 := b.NewValue0(v.Pos, OpAMD64ORL, y.Type) 34157 v0.AddArg(y) 34158 v1 := b.NewValue0(v.Pos, OpAMD64NOTL, y.Type) 34159 v2 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, y.Type) 34160 v3 := b.NewValue0(v.Pos, OpAMD64CMPBconst, TypeFlags) 34161 v3.AuxInt = 64 34162 v3.AddArg(y) 34163 v2.AddArg(v3) 34164 v1.AddArg(v2) 34165 v0.AddArg(v1) 34166 v.AddArg(v0) 34167 return true 34168 } 34169 } 34170 func rewriteValueAMD64_OpRsh8Ux16(v *Value) bool { 34171 b := v.Block 34172 _ = b 34173 // match: (Rsh8Ux16 <t> x y) 34174 // cond: 34175 // result: (ANDL (SHRB <t> x y) (SBBLcarrymask <t> (CMPWconst y [8]))) 34176 for { 34177 t := v.Type 34178 x := v.Args[0] 34179 y := v.Args[1] 34180 v.reset(OpAMD64ANDL) 34181 v0 := b.NewValue0(v.Pos, OpAMD64SHRB, t) 34182 v0.AddArg(x) 34183 v0.AddArg(y) 34184 v.AddArg(v0) 34185 v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) 34186 v2 := b.NewValue0(v.Pos, OpAMD64CMPWconst, TypeFlags) 34187 v2.AuxInt = 8 34188 v2.AddArg(y) 34189 v1.AddArg(v2) 34190 v.AddArg(v1) 34191 return true 34192 } 34193 } 34194 func rewriteValueAMD64_OpRsh8Ux32(v *Value) bool { 34195 b := v.Block 34196 _ = b 34197 // match: (Rsh8Ux32 <t> x y) 34198 // cond: 34199 // result: (ANDL (SHRB <t> x y) (SBBLcarrymask <t> (CMPLconst y [8]))) 34200 for { 34201 t := v.Type 34202 x := v.Args[0] 34203 y := v.Args[1] 34204 v.reset(OpAMD64ANDL) 34205 v0 := b.NewValue0(v.Pos, OpAMD64SHRB, t) 34206 v0.AddArg(x) 34207 v0.AddArg(y) 34208 v.AddArg(v0) 34209 v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) 34210 v2 := b.NewValue0(v.Pos, OpAMD64CMPLconst, TypeFlags) 34211 v2.AuxInt = 8 34212 v2.AddArg(y) 34213 v1.AddArg(v2) 34214 v.AddArg(v1) 34215 return true 34216 } 34217 } 34218 func rewriteValueAMD64_OpRsh8Ux64(v *Value) bool { 34219 b := v.Block 34220 _ = b 34221 // match: (Rsh8Ux64 <t> x y) 34222 // cond: 34223 // result: (ANDL (SHRB <t> x y) (SBBLcarrymask <t> (CMPQconst y [8]))) 34224 for { 34225 t := v.Type 34226 x := v.Args[0] 34227 y := v.Args[1] 34228 v.reset(OpAMD64ANDL) 34229 v0 := b.NewValue0(v.Pos, OpAMD64SHRB, t) 34230 v0.AddArg(x) 34231 v0.AddArg(y) 34232 v.AddArg(v0) 34233 v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) 34234 v2 := b.NewValue0(v.Pos, OpAMD64CMPQconst, TypeFlags) 34235 v2.AuxInt = 8 34236 v2.AddArg(y) 34237 v1.AddArg(v2) 34238 v.AddArg(v1) 34239 return true 34240 } 34241 } 34242 func rewriteValueAMD64_OpRsh8Ux8(v *Value) bool { 34243 b := v.Block 34244 _ = b 34245 // match: (Rsh8Ux8 <t> x y) 34246 // cond: 34247 // result: (ANDL (SHRB <t> x y) (SBBLcarrymask <t> (CMPBconst y [8]))) 34248 for { 34249 t := v.Type 34250 x := v.Args[0] 34251 y := v.Args[1] 34252 v.reset(OpAMD64ANDL) 34253 v0 := b.NewValue0(v.Pos, OpAMD64SHRB, t) 34254 v0.AddArg(x) 34255 v0.AddArg(y) 34256 v.AddArg(v0) 34257 v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) 34258 v2 := b.NewValue0(v.Pos, OpAMD64CMPBconst, TypeFlags) 34259 v2.AuxInt = 8 34260 v2.AddArg(y) 34261 v1.AddArg(v2) 34262 v.AddArg(v1) 34263 return true 34264 } 34265 } 34266 func rewriteValueAMD64_OpRsh8x16(v *Value) bool { 34267 b := v.Block 34268 _ = b 34269 // match: (Rsh8x16 <t> x y) 34270 // cond: 34271 // result: (SARB <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPWconst y [8]))))) 34272 for { 34273 t := v.Type 34274 x := v.Args[0] 34275 y := v.Args[1] 34276 v.reset(OpAMD64SARB) 34277 v.Type = t 34278 v.AddArg(x) 34279 v0 := b.NewValue0(v.Pos, OpAMD64ORL, y.Type) 34280 v0.AddArg(y) 34281 v1 := b.NewValue0(v.Pos, OpAMD64NOTL, y.Type) 34282 v2 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, y.Type) 34283 v3 := b.NewValue0(v.Pos, OpAMD64CMPWconst, TypeFlags) 34284 v3.AuxInt = 8 34285 v3.AddArg(y) 34286 v2.AddArg(v3) 34287 v1.AddArg(v2) 34288 v0.AddArg(v1) 34289 v.AddArg(v0) 34290 return true 34291 } 34292 } 34293 func rewriteValueAMD64_OpRsh8x32(v *Value) bool { 34294 b := v.Block 34295 _ = b 34296 // match: (Rsh8x32 <t> x y) 34297 // cond: 34298 // result: (SARB <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPLconst y [8]))))) 34299 for { 34300 t := v.Type 34301 x := v.Args[0] 34302 y := v.Args[1] 34303 v.reset(OpAMD64SARB) 34304 v.Type = t 34305 v.AddArg(x) 34306 v0 := b.NewValue0(v.Pos, OpAMD64ORL, y.Type) 34307 v0.AddArg(y) 34308 v1 := b.NewValue0(v.Pos, OpAMD64NOTL, y.Type) 34309 v2 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, y.Type) 34310 v3 := b.NewValue0(v.Pos, OpAMD64CMPLconst, TypeFlags) 34311 v3.AuxInt = 8 34312 v3.AddArg(y) 34313 v2.AddArg(v3) 34314 v1.AddArg(v2) 34315 v0.AddArg(v1) 34316 v.AddArg(v0) 34317 return true 34318 } 34319 } 34320 func rewriteValueAMD64_OpRsh8x64(v *Value) bool { 34321 b := v.Block 34322 _ = b 34323 // match: (Rsh8x64 <t> x y) 34324 // cond: 34325 // result: (SARB <t> x (ORQ <y.Type> y (NOTQ <y.Type> (SBBQcarrymask <y.Type> (CMPQconst y [8]))))) 34326 for { 34327 t := v.Type 34328 x := v.Args[0] 34329 y := v.Args[1] 34330 v.reset(OpAMD64SARB) 34331 v.Type = t 34332 v.AddArg(x) 34333 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, y.Type) 34334 v0.AddArg(y) 34335 v1 := b.NewValue0(v.Pos, OpAMD64NOTQ, y.Type) 34336 v2 := b.NewValue0(v.Pos, OpAMD64SBBQcarrymask, y.Type) 34337 v3 := b.NewValue0(v.Pos, OpAMD64CMPQconst, TypeFlags) 34338 v3.AuxInt = 8 34339 v3.AddArg(y) 34340 v2.AddArg(v3) 34341 v1.AddArg(v2) 34342 v0.AddArg(v1) 34343 v.AddArg(v0) 34344 return true 34345 } 34346 } 34347 func rewriteValueAMD64_OpRsh8x8(v *Value) bool { 34348 b := v.Block 34349 _ = b 34350 // match: (Rsh8x8 <t> x y) 34351 // cond: 34352 // result: (SARB <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPBconst y [8]))))) 34353 for { 34354 t := v.Type 34355 x := v.Args[0] 34356 y := v.Args[1] 34357 v.reset(OpAMD64SARB) 34358 v.Type = t 34359 v.AddArg(x) 34360 v0 := b.NewValue0(v.Pos, OpAMD64ORL, y.Type) 34361 v0.AddArg(y) 34362 v1 := b.NewValue0(v.Pos, OpAMD64NOTL, y.Type) 34363 v2 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, y.Type) 34364 v3 := b.NewValue0(v.Pos, OpAMD64CMPBconst, TypeFlags) 34365 v3.AuxInt = 8 34366 v3.AddArg(y) 34367 v2.AddArg(v3) 34368 v1.AddArg(v2) 34369 v0.AddArg(v1) 34370 v.AddArg(v0) 34371 return true 34372 } 34373 } 34374 func rewriteValueAMD64_OpSelect0(v *Value) bool { 34375 b := v.Block 34376 _ = b 34377 // match: (Select0 <t> (AddTupleFirst32 tuple val)) 34378 // cond: 34379 // result: (ADDL val (Select0 <t> tuple)) 34380 for { 34381 t := v.Type 34382 v_0 := v.Args[0] 34383 if v_0.Op != OpAMD64AddTupleFirst32 { 34384 break 34385 } 34386 tuple := v_0.Args[0] 34387 val := v_0.Args[1] 34388 v.reset(OpAMD64ADDL) 34389 v.AddArg(val) 34390 v0 := b.NewValue0(v.Pos, OpSelect0, t) 34391 v0.AddArg(tuple) 34392 v.AddArg(v0) 34393 return true 34394 } 34395 // match: (Select0 <t> (AddTupleFirst64 tuple val)) 34396 // cond: 34397 // result: (ADDQ val (Select0 <t> tuple)) 34398 for { 34399 t := v.Type 34400 v_0 := v.Args[0] 34401 if v_0.Op != OpAMD64AddTupleFirst64 { 34402 break 34403 } 34404 tuple := v_0.Args[0] 34405 val := v_0.Args[1] 34406 v.reset(OpAMD64ADDQ) 34407 v.AddArg(val) 34408 v0 := b.NewValue0(v.Pos, OpSelect0, t) 34409 v0.AddArg(tuple) 34410 v.AddArg(v0) 34411 return true 34412 } 34413 return false 34414 } 34415 func rewriteValueAMD64_OpSelect1(v *Value) bool { 34416 // match: (Select1 (AddTupleFirst32 tuple _)) 34417 // cond: 34418 // result: (Select1 tuple) 34419 for { 34420 v_0 := v.Args[0] 34421 if v_0.Op != OpAMD64AddTupleFirst32 { 34422 break 34423 } 34424 tuple := v_0.Args[0] 34425 v.reset(OpSelect1) 34426 v.AddArg(tuple) 34427 return true 34428 } 34429 // match: (Select1 (AddTupleFirst64 tuple _)) 34430 // cond: 34431 // result: (Select1 tuple) 34432 for { 34433 v_0 := v.Args[0] 34434 if v_0.Op != OpAMD64AddTupleFirst64 { 34435 break 34436 } 34437 tuple := v_0.Args[0] 34438 v.reset(OpSelect1) 34439 v.AddArg(tuple) 34440 return true 34441 } 34442 return false 34443 } 34444 func rewriteValueAMD64_OpSignExt16to32(v *Value) bool { 34445 // match: (SignExt16to32 x) 34446 // cond: 34447 // result: (MOVWQSX x) 34448 for { 34449 x := v.Args[0] 34450 v.reset(OpAMD64MOVWQSX) 34451 v.AddArg(x) 34452 return true 34453 } 34454 } 34455 func rewriteValueAMD64_OpSignExt16to64(v *Value) bool { 34456 // match: (SignExt16to64 x) 34457 // cond: 34458 // result: (MOVWQSX x) 34459 for { 34460 x := v.Args[0] 34461 v.reset(OpAMD64MOVWQSX) 34462 v.AddArg(x) 34463 return true 34464 } 34465 } 34466 func rewriteValueAMD64_OpSignExt32to64(v *Value) bool { 34467 // match: (SignExt32to64 x) 34468 // cond: 34469 // result: (MOVLQSX x) 34470 for { 34471 x := v.Args[0] 34472 v.reset(OpAMD64MOVLQSX) 34473 v.AddArg(x) 34474 return true 34475 } 34476 } 34477 func rewriteValueAMD64_OpSignExt8to16(v *Value) bool { 34478 // match: (SignExt8to16 x) 34479 // cond: 34480 // result: (MOVBQSX x) 34481 for { 34482 x := v.Args[0] 34483 v.reset(OpAMD64MOVBQSX) 34484 v.AddArg(x) 34485 return true 34486 } 34487 } 34488 func rewriteValueAMD64_OpSignExt8to32(v *Value) bool { 34489 // match: (SignExt8to32 x) 34490 // cond: 34491 // result: (MOVBQSX x) 34492 for { 34493 x := v.Args[0] 34494 v.reset(OpAMD64MOVBQSX) 34495 v.AddArg(x) 34496 return true 34497 } 34498 } 34499 func rewriteValueAMD64_OpSignExt8to64(v *Value) bool { 34500 // match: (SignExt8to64 x) 34501 // cond: 34502 // result: (MOVBQSX x) 34503 for { 34504 x := v.Args[0] 34505 v.reset(OpAMD64MOVBQSX) 34506 v.AddArg(x) 34507 return true 34508 } 34509 } 34510 func rewriteValueAMD64_OpSlicemask(v *Value) bool { 34511 b := v.Block 34512 _ = b 34513 // match: (Slicemask <t> x) 34514 // cond: 34515 // result: (SARQconst (NEGQ <t> x) [63]) 34516 for { 34517 t := v.Type 34518 x := v.Args[0] 34519 v.reset(OpAMD64SARQconst) 34520 v.AuxInt = 63 34521 v0 := b.NewValue0(v.Pos, OpAMD64NEGQ, t) 34522 v0.AddArg(x) 34523 v.AddArg(v0) 34524 return true 34525 } 34526 } 34527 func rewriteValueAMD64_OpSqrt(v *Value) bool { 34528 // match: (Sqrt x) 34529 // cond: 34530 // result: (SQRTSD x) 34531 for { 34532 x := v.Args[0] 34533 v.reset(OpAMD64SQRTSD) 34534 v.AddArg(x) 34535 return true 34536 } 34537 } 34538 func rewriteValueAMD64_OpStaticCall(v *Value) bool { 34539 // match: (StaticCall [argwid] {target} mem) 34540 // cond: 34541 // result: (CALLstatic [argwid] {target} mem) 34542 for { 34543 argwid := v.AuxInt 34544 target := v.Aux 34545 mem := v.Args[0] 34546 v.reset(OpAMD64CALLstatic) 34547 v.AuxInt = argwid 34548 v.Aux = target 34549 v.AddArg(mem) 34550 return true 34551 } 34552 } 34553 func rewriteValueAMD64_OpStore(v *Value) bool { 34554 // match: (Store {t} ptr val mem) 34555 // cond: t.(Type).Size() == 8 && is64BitFloat(val.Type) 34556 // result: (MOVSDstore ptr val mem) 34557 for { 34558 t := v.Aux 34559 ptr := v.Args[0] 34560 val := v.Args[1] 34561 mem := v.Args[2] 34562 if !(t.(Type).Size() == 8 && is64BitFloat(val.Type)) { 34563 break 34564 } 34565 v.reset(OpAMD64MOVSDstore) 34566 v.AddArg(ptr) 34567 v.AddArg(val) 34568 v.AddArg(mem) 34569 return true 34570 } 34571 // match: (Store {t} ptr val mem) 34572 // cond: t.(Type).Size() == 4 && is32BitFloat(val.Type) 34573 // result: (MOVSSstore ptr val mem) 34574 for { 34575 t := v.Aux 34576 ptr := v.Args[0] 34577 val := v.Args[1] 34578 mem := v.Args[2] 34579 if !(t.(Type).Size() == 4 && is32BitFloat(val.Type)) { 34580 break 34581 } 34582 v.reset(OpAMD64MOVSSstore) 34583 v.AddArg(ptr) 34584 v.AddArg(val) 34585 v.AddArg(mem) 34586 return true 34587 } 34588 // match: (Store {t} ptr val mem) 34589 // cond: t.(Type).Size() == 8 34590 // result: (MOVQstore ptr val mem) 34591 for { 34592 t := v.Aux 34593 ptr := v.Args[0] 34594 val := v.Args[1] 34595 mem := v.Args[2] 34596 if !(t.(Type).Size() == 8) { 34597 break 34598 } 34599 v.reset(OpAMD64MOVQstore) 34600 v.AddArg(ptr) 34601 v.AddArg(val) 34602 v.AddArg(mem) 34603 return true 34604 } 34605 // match: (Store {t} ptr val mem) 34606 // cond: t.(Type).Size() == 4 34607 // result: (MOVLstore ptr val mem) 34608 for { 34609 t := v.Aux 34610 ptr := v.Args[0] 34611 val := v.Args[1] 34612 mem := v.Args[2] 34613 if !(t.(Type).Size() == 4) { 34614 break 34615 } 34616 v.reset(OpAMD64MOVLstore) 34617 v.AddArg(ptr) 34618 v.AddArg(val) 34619 v.AddArg(mem) 34620 return true 34621 } 34622 // match: (Store {t} ptr val mem) 34623 // cond: t.(Type).Size() == 2 34624 // result: (MOVWstore ptr val mem) 34625 for { 34626 t := v.Aux 34627 ptr := v.Args[0] 34628 val := v.Args[1] 34629 mem := v.Args[2] 34630 if !(t.(Type).Size() == 2) { 34631 break 34632 } 34633 v.reset(OpAMD64MOVWstore) 34634 v.AddArg(ptr) 34635 v.AddArg(val) 34636 v.AddArg(mem) 34637 return true 34638 } 34639 // match: (Store {t} ptr val mem) 34640 // cond: t.(Type).Size() == 1 34641 // result: (MOVBstore ptr val mem) 34642 for { 34643 t := v.Aux 34644 ptr := v.Args[0] 34645 val := v.Args[1] 34646 mem := v.Args[2] 34647 if !(t.(Type).Size() == 1) { 34648 break 34649 } 34650 v.reset(OpAMD64MOVBstore) 34651 v.AddArg(ptr) 34652 v.AddArg(val) 34653 v.AddArg(mem) 34654 return true 34655 } 34656 return false 34657 } 34658 func rewriteValueAMD64_OpSub16(v *Value) bool { 34659 // match: (Sub16 x y) 34660 // cond: 34661 // result: (SUBL x y) 34662 for { 34663 x := v.Args[0] 34664 y := v.Args[1] 34665 v.reset(OpAMD64SUBL) 34666 v.AddArg(x) 34667 v.AddArg(y) 34668 return true 34669 } 34670 } 34671 func rewriteValueAMD64_OpSub32(v *Value) bool { 34672 // match: (Sub32 x y) 34673 // cond: 34674 // result: (SUBL x y) 34675 for { 34676 x := v.Args[0] 34677 y := v.Args[1] 34678 v.reset(OpAMD64SUBL) 34679 v.AddArg(x) 34680 v.AddArg(y) 34681 return true 34682 } 34683 } 34684 func rewriteValueAMD64_OpSub32F(v *Value) bool { 34685 // match: (Sub32F x y) 34686 // cond: 34687 // result: (SUBSS x y) 34688 for { 34689 x := v.Args[0] 34690 y := v.Args[1] 34691 v.reset(OpAMD64SUBSS) 34692 v.AddArg(x) 34693 v.AddArg(y) 34694 return true 34695 } 34696 } 34697 func rewriteValueAMD64_OpSub64(v *Value) bool { 34698 // match: (Sub64 x y) 34699 // cond: 34700 // result: (SUBQ x y) 34701 for { 34702 x := v.Args[0] 34703 y := v.Args[1] 34704 v.reset(OpAMD64SUBQ) 34705 v.AddArg(x) 34706 v.AddArg(y) 34707 return true 34708 } 34709 } 34710 func rewriteValueAMD64_OpSub64F(v *Value) bool { 34711 // match: (Sub64F x y) 34712 // cond: 34713 // result: (SUBSD x y) 34714 for { 34715 x := v.Args[0] 34716 y := v.Args[1] 34717 v.reset(OpAMD64SUBSD) 34718 v.AddArg(x) 34719 v.AddArg(y) 34720 return true 34721 } 34722 } 34723 func rewriteValueAMD64_OpSub8(v *Value) bool { 34724 // match: (Sub8 x y) 34725 // cond: 34726 // result: (SUBL x y) 34727 for { 34728 x := v.Args[0] 34729 y := v.Args[1] 34730 v.reset(OpAMD64SUBL) 34731 v.AddArg(x) 34732 v.AddArg(y) 34733 return true 34734 } 34735 } 34736 func rewriteValueAMD64_OpSubPtr(v *Value) bool { 34737 b := v.Block 34738 _ = b 34739 config := b.Func.Config 34740 _ = config 34741 // match: (SubPtr x y) 34742 // cond: config.PtrSize == 8 34743 // result: (SUBQ x y) 34744 for { 34745 x := v.Args[0] 34746 y := v.Args[1] 34747 if !(config.PtrSize == 8) { 34748 break 34749 } 34750 v.reset(OpAMD64SUBQ) 34751 v.AddArg(x) 34752 v.AddArg(y) 34753 return true 34754 } 34755 // match: (SubPtr x y) 34756 // cond: config.PtrSize == 4 34757 // result: (SUBL x y) 34758 for { 34759 x := v.Args[0] 34760 y := v.Args[1] 34761 if !(config.PtrSize == 4) { 34762 break 34763 } 34764 v.reset(OpAMD64SUBL) 34765 v.AddArg(x) 34766 v.AddArg(y) 34767 return true 34768 } 34769 return false 34770 } 34771 func rewriteValueAMD64_OpTrunc16to8(v *Value) bool { 34772 // match: (Trunc16to8 x) 34773 // cond: 34774 // result: x 34775 for { 34776 x := v.Args[0] 34777 v.reset(OpCopy) 34778 v.Type = x.Type 34779 v.AddArg(x) 34780 return true 34781 } 34782 } 34783 func rewriteValueAMD64_OpTrunc32to16(v *Value) bool { 34784 // match: (Trunc32to16 x) 34785 // cond: 34786 // result: x 34787 for { 34788 x := v.Args[0] 34789 v.reset(OpCopy) 34790 v.Type = x.Type 34791 v.AddArg(x) 34792 return true 34793 } 34794 } 34795 func rewriteValueAMD64_OpTrunc32to8(v *Value) bool { 34796 // match: (Trunc32to8 x) 34797 // cond: 34798 // result: x 34799 for { 34800 x := v.Args[0] 34801 v.reset(OpCopy) 34802 v.Type = x.Type 34803 v.AddArg(x) 34804 return true 34805 } 34806 } 34807 func rewriteValueAMD64_OpTrunc64to16(v *Value) bool { 34808 // match: (Trunc64to16 x) 34809 // cond: 34810 // result: x 34811 for { 34812 x := v.Args[0] 34813 v.reset(OpCopy) 34814 v.Type = x.Type 34815 v.AddArg(x) 34816 return true 34817 } 34818 } 34819 func rewriteValueAMD64_OpTrunc64to32(v *Value) bool { 34820 // match: (Trunc64to32 x) 34821 // cond: 34822 // result: x 34823 for { 34824 x := v.Args[0] 34825 v.reset(OpCopy) 34826 v.Type = x.Type 34827 v.AddArg(x) 34828 return true 34829 } 34830 } 34831 func rewriteValueAMD64_OpTrunc64to8(v *Value) bool { 34832 // match: (Trunc64to8 x) 34833 // cond: 34834 // result: x 34835 for { 34836 x := v.Args[0] 34837 v.reset(OpCopy) 34838 v.Type = x.Type 34839 v.AddArg(x) 34840 return true 34841 } 34842 } 34843 func rewriteValueAMD64_OpXor16(v *Value) bool { 34844 // match: (Xor16 x y) 34845 // cond: 34846 // result: (XORL x y) 34847 for { 34848 x := v.Args[0] 34849 y := v.Args[1] 34850 v.reset(OpAMD64XORL) 34851 v.AddArg(x) 34852 v.AddArg(y) 34853 return true 34854 } 34855 } 34856 func rewriteValueAMD64_OpXor32(v *Value) bool { 34857 // match: (Xor32 x y) 34858 // cond: 34859 // result: (XORL x y) 34860 for { 34861 x := v.Args[0] 34862 y := v.Args[1] 34863 v.reset(OpAMD64XORL) 34864 v.AddArg(x) 34865 v.AddArg(y) 34866 return true 34867 } 34868 } 34869 func rewriteValueAMD64_OpXor64(v *Value) bool { 34870 // match: (Xor64 x y) 34871 // cond: 34872 // result: (XORQ x y) 34873 for { 34874 x := v.Args[0] 34875 y := v.Args[1] 34876 v.reset(OpAMD64XORQ) 34877 v.AddArg(x) 34878 v.AddArg(y) 34879 return true 34880 } 34881 } 34882 func rewriteValueAMD64_OpXor8(v *Value) bool { 34883 // match: (Xor8 x y) 34884 // cond: 34885 // result: (XORL x y) 34886 for { 34887 x := v.Args[0] 34888 y := v.Args[1] 34889 v.reset(OpAMD64XORL) 34890 v.AddArg(x) 34891 v.AddArg(y) 34892 return true 34893 } 34894 } 34895 func rewriteValueAMD64_OpZero(v *Value) bool { 34896 b := v.Block 34897 _ = b 34898 config := b.Func.Config 34899 _ = config 34900 types := &b.Func.Config.Types 34901 _ = types 34902 // match: (Zero [0] _ mem) 34903 // cond: 34904 // result: mem 34905 for { 34906 if v.AuxInt != 0 { 34907 break 34908 } 34909 mem := v.Args[1] 34910 v.reset(OpCopy) 34911 v.Type = mem.Type 34912 v.AddArg(mem) 34913 return true 34914 } 34915 // match: (Zero [1] destptr mem) 34916 // cond: 34917 // result: (MOVBstoreconst [0] destptr mem) 34918 for { 34919 if v.AuxInt != 1 { 34920 break 34921 } 34922 destptr := v.Args[0] 34923 mem := v.Args[1] 34924 v.reset(OpAMD64MOVBstoreconst) 34925 v.AuxInt = 0 34926 v.AddArg(destptr) 34927 v.AddArg(mem) 34928 return true 34929 } 34930 // match: (Zero [2] destptr mem) 34931 // cond: 34932 // result: (MOVWstoreconst [0] destptr mem) 34933 for { 34934 if v.AuxInt != 2 { 34935 break 34936 } 34937 destptr := v.Args[0] 34938 mem := v.Args[1] 34939 v.reset(OpAMD64MOVWstoreconst) 34940 v.AuxInt = 0 34941 v.AddArg(destptr) 34942 v.AddArg(mem) 34943 return true 34944 } 34945 // match: (Zero [4] destptr mem) 34946 // cond: 34947 // result: (MOVLstoreconst [0] destptr mem) 34948 for { 34949 if v.AuxInt != 4 { 34950 break 34951 } 34952 destptr := v.Args[0] 34953 mem := v.Args[1] 34954 v.reset(OpAMD64MOVLstoreconst) 34955 v.AuxInt = 0 34956 v.AddArg(destptr) 34957 v.AddArg(mem) 34958 return true 34959 } 34960 // match: (Zero [8] destptr mem) 34961 // cond: 34962 // result: (MOVQstoreconst [0] destptr mem) 34963 for { 34964 if v.AuxInt != 8 { 34965 break 34966 } 34967 destptr := v.Args[0] 34968 mem := v.Args[1] 34969 v.reset(OpAMD64MOVQstoreconst) 34970 v.AuxInt = 0 34971 v.AddArg(destptr) 34972 v.AddArg(mem) 34973 return true 34974 } 34975 // match: (Zero [3] destptr mem) 34976 // cond: 34977 // result: (MOVBstoreconst [makeValAndOff(0,2)] destptr (MOVWstoreconst [0] destptr mem)) 34978 for { 34979 if v.AuxInt != 3 { 34980 break 34981 } 34982 destptr := v.Args[0] 34983 mem := v.Args[1] 34984 v.reset(OpAMD64MOVBstoreconst) 34985 v.AuxInt = makeValAndOff(0, 2) 34986 v.AddArg(destptr) 34987 v0 := b.NewValue0(v.Pos, OpAMD64MOVWstoreconst, TypeMem) 34988 v0.AuxInt = 0 34989 v0.AddArg(destptr) 34990 v0.AddArg(mem) 34991 v.AddArg(v0) 34992 return true 34993 } 34994 // match: (Zero [5] destptr mem) 34995 // cond: 34996 // result: (MOVBstoreconst [makeValAndOff(0,4)] destptr (MOVLstoreconst [0] destptr mem)) 34997 for { 34998 if v.AuxInt != 5 { 34999 break 35000 } 35001 destptr := v.Args[0] 35002 mem := v.Args[1] 35003 v.reset(OpAMD64MOVBstoreconst) 35004 v.AuxInt = makeValAndOff(0, 4) 35005 v.AddArg(destptr) 35006 v0 := b.NewValue0(v.Pos, OpAMD64MOVLstoreconst, TypeMem) 35007 v0.AuxInt = 0 35008 v0.AddArg(destptr) 35009 v0.AddArg(mem) 35010 v.AddArg(v0) 35011 return true 35012 } 35013 // match: (Zero [6] destptr mem) 35014 // cond: 35015 // result: (MOVWstoreconst [makeValAndOff(0,4)] destptr (MOVLstoreconst [0] destptr mem)) 35016 for { 35017 if v.AuxInt != 6 { 35018 break 35019 } 35020 destptr := v.Args[0] 35021 mem := v.Args[1] 35022 v.reset(OpAMD64MOVWstoreconst) 35023 v.AuxInt = makeValAndOff(0, 4) 35024 v.AddArg(destptr) 35025 v0 := b.NewValue0(v.Pos, OpAMD64MOVLstoreconst, TypeMem) 35026 v0.AuxInt = 0 35027 v0.AddArg(destptr) 35028 v0.AddArg(mem) 35029 v.AddArg(v0) 35030 return true 35031 } 35032 // match: (Zero [7] destptr mem) 35033 // cond: 35034 // result: (MOVLstoreconst [makeValAndOff(0,3)] destptr (MOVLstoreconst [0] destptr mem)) 35035 for { 35036 if v.AuxInt != 7 { 35037 break 35038 } 35039 destptr := v.Args[0] 35040 mem := v.Args[1] 35041 v.reset(OpAMD64MOVLstoreconst) 35042 v.AuxInt = makeValAndOff(0, 3) 35043 v.AddArg(destptr) 35044 v0 := b.NewValue0(v.Pos, OpAMD64MOVLstoreconst, TypeMem) 35045 v0.AuxInt = 0 35046 v0.AddArg(destptr) 35047 v0.AddArg(mem) 35048 v.AddArg(v0) 35049 return true 35050 } 35051 // match: (Zero [s] destptr mem) 35052 // cond: s%8 != 0 && s > 8 35053 // result: (Zero [s-s%8] (OffPtr <destptr.Type> destptr [s%8]) (MOVQstoreconst [0] destptr mem)) 35054 for { 35055 s := v.AuxInt 35056 destptr := v.Args[0] 35057 mem := v.Args[1] 35058 if !(s%8 != 0 && s > 8) { 35059 break 35060 } 35061 v.reset(OpZero) 35062 v.AuxInt = s - s%8 35063 v0 := b.NewValue0(v.Pos, OpOffPtr, destptr.Type) 35064 v0.AuxInt = s % 8 35065 v0.AddArg(destptr) 35066 v.AddArg(v0) 35067 v1 := b.NewValue0(v.Pos, OpAMD64MOVQstoreconst, TypeMem) 35068 v1.AuxInt = 0 35069 v1.AddArg(destptr) 35070 v1.AddArg(mem) 35071 v.AddArg(v1) 35072 return true 35073 } 35074 // match: (Zero [16] destptr mem) 35075 // cond: 35076 // result: (MOVQstoreconst [makeValAndOff(0,8)] destptr (MOVQstoreconst [0] destptr mem)) 35077 for { 35078 if v.AuxInt != 16 { 35079 break 35080 } 35081 destptr := v.Args[0] 35082 mem := v.Args[1] 35083 v.reset(OpAMD64MOVQstoreconst) 35084 v.AuxInt = makeValAndOff(0, 8) 35085 v.AddArg(destptr) 35086 v0 := b.NewValue0(v.Pos, OpAMD64MOVQstoreconst, TypeMem) 35087 v0.AuxInt = 0 35088 v0.AddArg(destptr) 35089 v0.AddArg(mem) 35090 v.AddArg(v0) 35091 return true 35092 } 35093 // match: (Zero [24] destptr mem) 35094 // cond: 35095 // result: (MOVQstoreconst [makeValAndOff(0,16)] destptr (MOVQstoreconst [makeValAndOff(0,8)] destptr (MOVQstoreconst [0] destptr mem))) 35096 for { 35097 if v.AuxInt != 24 { 35098 break 35099 } 35100 destptr := v.Args[0] 35101 mem := v.Args[1] 35102 v.reset(OpAMD64MOVQstoreconst) 35103 v.AuxInt = makeValAndOff(0, 16) 35104 v.AddArg(destptr) 35105 v0 := b.NewValue0(v.Pos, OpAMD64MOVQstoreconst, TypeMem) 35106 v0.AuxInt = makeValAndOff(0, 8) 35107 v0.AddArg(destptr) 35108 v1 := b.NewValue0(v.Pos, OpAMD64MOVQstoreconst, TypeMem) 35109 v1.AuxInt = 0 35110 v1.AddArg(destptr) 35111 v1.AddArg(mem) 35112 v0.AddArg(v1) 35113 v.AddArg(v0) 35114 return true 35115 } 35116 // match: (Zero [32] destptr mem) 35117 // cond: 35118 // result: (MOVQstoreconst [makeValAndOff(0,24)] destptr (MOVQstoreconst [makeValAndOff(0,16)] destptr (MOVQstoreconst [makeValAndOff(0,8)] destptr (MOVQstoreconst [0] destptr mem)))) 35119 for { 35120 if v.AuxInt != 32 { 35121 break 35122 } 35123 destptr := v.Args[0] 35124 mem := v.Args[1] 35125 v.reset(OpAMD64MOVQstoreconst) 35126 v.AuxInt = makeValAndOff(0, 24) 35127 v.AddArg(destptr) 35128 v0 := b.NewValue0(v.Pos, OpAMD64MOVQstoreconst, TypeMem) 35129 v0.AuxInt = makeValAndOff(0, 16) 35130 v0.AddArg(destptr) 35131 v1 := b.NewValue0(v.Pos, OpAMD64MOVQstoreconst, TypeMem) 35132 v1.AuxInt = makeValAndOff(0, 8) 35133 v1.AddArg(destptr) 35134 v2 := b.NewValue0(v.Pos, OpAMD64MOVQstoreconst, TypeMem) 35135 v2.AuxInt = 0 35136 v2.AddArg(destptr) 35137 v2.AddArg(mem) 35138 v1.AddArg(v2) 35139 v0.AddArg(v1) 35140 v.AddArg(v0) 35141 return true 35142 } 35143 // match: (Zero [s] destptr mem) 35144 // cond: s <= 1024 && s%8 == 0 && s%16 != 0 && !config.noDuffDevice 35145 // result: (Zero [s-8] (OffPtr <destptr.Type> [8] destptr) (MOVQstore destptr (MOVQconst [0]) mem)) 35146 for { 35147 s := v.AuxInt 35148 destptr := v.Args[0] 35149 mem := v.Args[1] 35150 if !(s <= 1024 && s%8 == 0 && s%16 != 0 && !config.noDuffDevice) { 35151 break 35152 } 35153 v.reset(OpZero) 35154 v.AuxInt = s - 8 35155 v0 := b.NewValue0(v.Pos, OpOffPtr, destptr.Type) 35156 v0.AuxInt = 8 35157 v0.AddArg(destptr) 35158 v.AddArg(v0) 35159 v1 := b.NewValue0(v.Pos, OpAMD64MOVQstore, TypeMem) 35160 v1.AddArg(destptr) 35161 v2 := b.NewValue0(v.Pos, OpAMD64MOVQconst, types.UInt64) 35162 v2.AuxInt = 0 35163 v1.AddArg(v2) 35164 v1.AddArg(mem) 35165 v.AddArg(v1) 35166 return true 35167 } 35168 // match: (Zero [s] destptr mem) 35169 // cond: s <= 1024 && s%16 == 0 && !config.noDuffDevice 35170 // result: (DUFFZERO [s] destptr (MOVOconst [0]) mem) 35171 for { 35172 s := v.AuxInt 35173 destptr := v.Args[0] 35174 mem := v.Args[1] 35175 if !(s <= 1024 && s%16 == 0 && !config.noDuffDevice) { 35176 break 35177 } 35178 v.reset(OpAMD64DUFFZERO) 35179 v.AuxInt = s 35180 v.AddArg(destptr) 35181 v0 := b.NewValue0(v.Pos, OpAMD64MOVOconst, TypeInt128) 35182 v0.AuxInt = 0 35183 v.AddArg(v0) 35184 v.AddArg(mem) 35185 return true 35186 } 35187 // match: (Zero [s] destptr mem) 35188 // cond: (s > 1024 || (config.noDuffDevice && s > 32)) && s%8 == 0 35189 // result: (REPSTOSQ destptr (MOVQconst [s/8]) (MOVQconst [0]) mem) 35190 for { 35191 s := v.AuxInt 35192 destptr := v.Args[0] 35193 mem := v.Args[1] 35194 if !((s > 1024 || (config.noDuffDevice && s > 32)) && s%8 == 0) { 35195 break 35196 } 35197 v.reset(OpAMD64REPSTOSQ) 35198 v.AddArg(destptr) 35199 v0 := b.NewValue0(v.Pos, OpAMD64MOVQconst, types.UInt64) 35200 v0.AuxInt = s / 8 35201 v.AddArg(v0) 35202 v1 := b.NewValue0(v.Pos, OpAMD64MOVQconst, types.UInt64) 35203 v1.AuxInt = 0 35204 v.AddArg(v1) 35205 v.AddArg(mem) 35206 return true 35207 } 35208 return false 35209 } 35210 func rewriteValueAMD64_OpZeroExt16to32(v *Value) bool { 35211 // match: (ZeroExt16to32 x) 35212 // cond: 35213 // result: (MOVWQZX x) 35214 for { 35215 x := v.Args[0] 35216 v.reset(OpAMD64MOVWQZX) 35217 v.AddArg(x) 35218 return true 35219 } 35220 } 35221 func rewriteValueAMD64_OpZeroExt16to64(v *Value) bool { 35222 // match: (ZeroExt16to64 x) 35223 // cond: 35224 // result: (MOVWQZX x) 35225 for { 35226 x := v.Args[0] 35227 v.reset(OpAMD64MOVWQZX) 35228 v.AddArg(x) 35229 return true 35230 } 35231 } 35232 func rewriteValueAMD64_OpZeroExt32to64(v *Value) bool { 35233 // match: (ZeroExt32to64 x) 35234 // cond: 35235 // result: (MOVLQZX x) 35236 for { 35237 x := v.Args[0] 35238 v.reset(OpAMD64MOVLQZX) 35239 v.AddArg(x) 35240 return true 35241 } 35242 } 35243 func rewriteValueAMD64_OpZeroExt8to16(v *Value) bool { 35244 // match: (ZeroExt8to16 x) 35245 // cond: 35246 // result: (MOVBQZX x) 35247 for { 35248 x := v.Args[0] 35249 v.reset(OpAMD64MOVBQZX) 35250 v.AddArg(x) 35251 return true 35252 } 35253 } 35254 func rewriteValueAMD64_OpZeroExt8to32(v *Value) bool { 35255 // match: (ZeroExt8to32 x) 35256 // cond: 35257 // result: (MOVBQZX x) 35258 for { 35259 x := v.Args[0] 35260 v.reset(OpAMD64MOVBQZX) 35261 v.AddArg(x) 35262 return true 35263 } 35264 } 35265 func rewriteValueAMD64_OpZeroExt8to64(v *Value) bool { 35266 // match: (ZeroExt8to64 x) 35267 // cond: 35268 // result: (MOVBQZX x) 35269 for { 35270 x := v.Args[0] 35271 v.reset(OpAMD64MOVBQZX) 35272 v.AddArg(x) 35273 return true 35274 } 35275 } 35276 func rewriteBlockAMD64(b *Block) bool { 35277 config := b.Func.Config 35278 _ = config 35279 fe := b.Func.fe 35280 _ = fe 35281 types := &config.Types 35282 _ = types 35283 switch b.Kind { 35284 case BlockAMD64EQ: 35285 // match: (EQ (TESTL (SHLL (MOVLconst [1]) x) y)) 35286 // cond: !config.nacl 35287 // result: (UGE (BTL x y)) 35288 for { 35289 v := b.Control 35290 if v.Op != OpAMD64TESTL { 35291 break 35292 } 35293 v_0 := v.Args[0] 35294 if v_0.Op != OpAMD64SHLL { 35295 break 35296 } 35297 v_0_0 := v_0.Args[0] 35298 if v_0_0.Op != OpAMD64MOVLconst { 35299 break 35300 } 35301 if v_0_0.AuxInt != 1 { 35302 break 35303 } 35304 x := v_0.Args[1] 35305 y := v.Args[1] 35306 if !(!config.nacl) { 35307 break 35308 } 35309 b.Kind = BlockAMD64UGE 35310 v0 := b.NewValue0(v.Pos, OpAMD64BTL, TypeFlags) 35311 v0.AddArg(x) 35312 v0.AddArg(y) 35313 b.SetControl(v0) 35314 return true 35315 } 35316 // match: (EQ (TESTL y (SHLL (MOVLconst [1]) x))) 35317 // cond: !config.nacl 35318 // result: (UGE (BTL x y)) 35319 for { 35320 v := b.Control 35321 if v.Op != OpAMD64TESTL { 35322 break 35323 } 35324 y := v.Args[0] 35325 v_1 := v.Args[1] 35326 if v_1.Op != OpAMD64SHLL { 35327 break 35328 } 35329 v_1_0 := v_1.Args[0] 35330 if v_1_0.Op != OpAMD64MOVLconst { 35331 break 35332 } 35333 if v_1_0.AuxInt != 1 { 35334 break 35335 } 35336 x := v_1.Args[1] 35337 if !(!config.nacl) { 35338 break 35339 } 35340 b.Kind = BlockAMD64UGE 35341 v0 := b.NewValue0(v.Pos, OpAMD64BTL, TypeFlags) 35342 v0.AddArg(x) 35343 v0.AddArg(y) 35344 b.SetControl(v0) 35345 return true 35346 } 35347 // match: (EQ (TESTQ (SHLQ (MOVQconst [1]) x) y)) 35348 // cond: !config.nacl 35349 // result: (UGE (BTQ x y)) 35350 for { 35351 v := b.Control 35352 if v.Op != OpAMD64TESTQ { 35353 break 35354 } 35355 v_0 := v.Args[0] 35356 if v_0.Op != OpAMD64SHLQ { 35357 break 35358 } 35359 v_0_0 := v_0.Args[0] 35360 if v_0_0.Op != OpAMD64MOVQconst { 35361 break 35362 } 35363 if v_0_0.AuxInt != 1 { 35364 break 35365 } 35366 x := v_0.Args[1] 35367 y := v.Args[1] 35368 if !(!config.nacl) { 35369 break 35370 } 35371 b.Kind = BlockAMD64UGE 35372 v0 := b.NewValue0(v.Pos, OpAMD64BTQ, TypeFlags) 35373 v0.AddArg(x) 35374 v0.AddArg(y) 35375 b.SetControl(v0) 35376 return true 35377 } 35378 // match: (EQ (TESTQ y (SHLQ (MOVQconst [1]) x))) 35379 // cond: !config.nacl 35380 // result: (UGE (BTQ x y)) 35381 for { 35382 v := b.Control 35383 if v.Op != OpAMD64TESTQ { 35384 break 35385 } 35386 y := v.Args[0] 35387 v_1 := v.Args[1] 35388 if v_1.Op != OpAMD64SHLQ { 35389 break 35390 } 35391 v_1_0 := v_1.Args[0] 35392 if v_1_0.Op != OpAMD64MOVQconst { 35393 break 35394 } 35395 if v_1_0.AuxInt != 1 { 35396 break 35397 } 35398 x := v_1.Args[1] 35399 if !(!config.nacl) { 35400 break 35401 } 35402 b.Kind = BlockAMD64UGE 35403 v0 := b.NewValue0(v.Pos, OpAMD64BTQ, TypeFlags) 35404 v0.AddArg(x) 35405 v0.AddArg(y) 35406 b.SetControl(v0) 35407 return true 35408 } 35409 // match: (EQ (TESTLconst [c] x)) 35410 // cond: isPowerOfTwo(c) && log2(c) < 32 && !config.nacl 35411 // result: (UGE (BTLconst [log2(c)] x)) 35412 for { 35413 v := b.Control 35414 if v.Op != OpAMD64TESTLconst { 35415 break 35416 } 35417 c := v.AuxInt 35418 x := v.Args[0] 35419 if !(isPowerOfTwo(c) && log2(c) < 32 && !config.nacl) { 35420 break 35421 } 35422 b.Kind = BlockAMD64UGE 35423 v0 := b.NewValue0(v.Pos, OpAMD64BTLconst, TypeFlags) 35424 v0.AuxInt = log2(c) 35425 v0.AddArg(x) 35426 b.SetControl(v0) 35427 return true 35428 } 35429 // match: (EQ (TESTQconst [c] x)) 35430 // cond: isPowerOfTwo(c) && log2(c) < 64 && !config.nacl 35431 // result: (UGE (BTQconst [log2(c)] x)) 35432 for { 35433 v := b.Control 35434 if v.Op != OpAMD64TESTQconst { 35435 break 35436 } 35437 c := v.AuxInt 35438 x := v.Args[0] 35439 if !(isPowerOfTwo(c) && log2(c) < 64 && !config.nacl) { 35440 break 35441 } 35442 b.Kind = BlockAMD64UGE 35443 v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, TypeFlags) 35444 v0.AuxInt = log2(c) 35445 v0.AddArg(x) 35446 b.SetControl(v0) 35447 return true 35448 } 35449 // match: (EQ (TESTQ (MOVQconst [c]) x)) 35450 // cond: isPowerOfTwo(c) && log2(c) < 64 && !config.nacl 35451 // result: (UGE (BTQconst [log2(c)] x)) 35452 for { 35453 v := b.Control 35454 if v.Op != OpAMD64TESTQ { 35455 break 35456 } 35457 v_0 := v.Args[0] 35458 if v_0.Op != OpAMD64MOVQconst { 35459 break 35460 } 35461 c := v_0.AuxInt 35462 x := v.Args[1] 35463 if !(isPowerOfTwo(c) && log2(c) < 64 && !config.nacl) { 35464 break 35465 } 35466 b.Kind = BlockAMD64UGE 35467 v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, TypeFlags) 35468 v0.AuxInt = log2(c) 35469 v0.AddArg(x) 35470 b.SetControl(v0) 35471 return true 35472 } 35473 // match: (EQ (TESTQ x (MOVQconst [c]))) 35474 // cond: isPowerOfTwo(c) && log2(c) < 64 && !config.nacl 35475 // result: (UGE (BTQconst [log2(c)] x)) 35476 for { 35477 v := b.Control 35478 if v.Op != OpAMD64TESTQ { 35479 break 35480 } 35481 x := v.Args[0] 35482 v_1 := v.Args[1] 35483 if v_1.Op != OpAMD64MOVQconst { 35484 break 35485 } 35486 c := v_1.AuxInt 35487 if !(isPowerOfTwo(c) && log2(c) < 64 && !config.nacl) { 35488 break 35489 } 35490 b.Kind = BlockAMD64UGE 35491 v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, TypeFlags) 35492 v0.AuxInt = log2(c) 35493 v0.AddArg(x) 35494 b.SetControl(v0) 35495 return true 35496 } 35497 // match: (EQ (InvertFlags cmp) yes no) 35498 // cond: 35499 // result: (EQ cmp yes no) 35500 for { 35501 v := b.Control 35502 if v.Op != OpAMD64InvertFlags { 35503 break 35504 } 35505 cmp := v.Args[0] 35506 yes := b.Succs[0] 35507 no := b.Succs[1] 35508 b.Kind = BlockAMD64EQ 35509 b.SetControl(cmp) 35510 _ = yes 35511 _ = no 35512 return true 35513 } 35514 // match: (EQ (FlagEQ) yes no) 35515 // cond: 35516 // result: (First nil yes no) 35517 for { 35518 v := b.Control 35519 if v.Op != OpAMD64FlagEQ { 35520 break 35521 } 35522 yes := b.Succs[0] 35523 no := b.Succs[1] 35524 b.Kind = BlockFirst 35525 b.SetControl(nil) 35526 _ = yes 35527 _ = no 35528 return true 35529 } 35530 // match: (EQ (FlagLT_ULT) yes no) 35531 // cond: 35532 // result: (First nil no yes) 35533 for { 35534 v := b.Control 35535 if v.Op != OpAMD64FlagLT_ULT { 35536 break 35537 } 35538 yes := b.Succs[0] 35539 no := b.Succs[1] 35540 b.Kind = BlockFirst 35541 b.SetControl(nil) 35542 b.swapSuccessors() 35543 _ = no 35544 _ = yes 35545 return true 35546 } 35547 // match: (EQ (FlagLT_UGT) yes no) 35548 // cond: 35549 // result: (First nil no yes) 35550 for { 35551 v := b.Control 35552 if v.Op != OpAMD64FlagLT_UGT { 35553 break 35554 } 35555 yes := b.Succs[0] 35556 no := b.Succs[1] 35557 b.Kind = BlockFirst 35558 b.SetControl(nil) 35559 b.swapSuccessors() 35560 _ = no 35561 _ = yes 35562 return true 35563 } 35564 // match: (EQ (FlagGT_ULT) yes no) 35565 // cond: 35566 // result: (First nil no yes) 35567 for { 35568 v := b.Control 35569 if v.Op != OpAMD64FlagGT_ULT { 35570 break 35571 } 35572 yes := b.Succs[0] 35573 no := b.Succs[1] 35574 b.Kind = BlockFirst 35575 b.SetControl(nil) 35576 b.swapSuccessors() 35577 _ = no 35578 _ = yes 35579 return true 35580 } 35581 // match: (EQ (FlagGT_UGT) yes no) 35582 // cond: 35583 // result: (First nil no yes) 35584 for { 35585 v := b.Control 35586 if v.Op != OpAMD64FlagGT_UGT { 35587 break 35588 } 35589 yes := b.Succs[0] 35590 no := b.Succs[1] 35591 b.Kind = BlockFirst 35592 b.SetControl(nil) 35593 b.swapSuccessors() 35594 _ = no 35595 _ = yes 35596 return true 35597 } 35598 case BlockAMD64GE: 35599 // match: (GE (InvertFlags cmp) yes no) 35600 // cond: 35601 // result: (LE cmp yes no) 35602 for { 35603 v := b.Control 35604 if v.Op != OpAMD64InvertFlags { 35605 break 35606 } 35607 cmp := v.Args[0] 35608 yes := b.Succs[0] 35609 no := b.Succs[1] 35610 b.Kind = BlockAMD64LE 35611 b.SetControl(cmp) 35612 _ = yes 35613 _ = no 35614 return true 35615 } 35616 // match: (GE (FlagEQ) yes no) 35617 // cond: 35618 // result: (First nil yes no) 35619 for { 35620 v := b.Control 35621 if v.Op != OpAMD64FlagEQ { 35622 break 35623 } 35624 yes := b.Succs[0] 35625 no := b.Succs[1] 35626 b.Kind = BlockFirst 35627 b.SetControl(nil) 35628 _ = yes 35629 _ = no 35630 return true 35631 } 35632 // match: (GE (FlagLT_ULT) yes no) 35633 // cond: 35634 // result: (First nil no yes) 35635 for { 35636 v := b.Control 35637 if v.Op != OpAMD64FlagLT_ULT { 35638 break 35639 } 35640 yes := b.Succs[0] 35641 no := b.Succs[1] 35642 b.Kind = BlockFirst 35643 b.SetControl(nil) 35644 b.swapSuccessors() 35645 _ = no 35646 _ = yes 35647 return true 35648 } 35649 // match: (GE (FlagLT_UGT) yes no) 35650 // cond: 35651 // result: (First nil no yes) 35652 for { 35653 v := b.Control 35654 if v.Op != OpAMD64FlagLT_UGT { 35655 break 35656 } 35657 yes := b.Succs[0] 35658 no := b.Succs[1] 35659 b.Kind = BlockFirst 35660 b.SetControl(nil) 35661 b.swapSuccessors() 35662 _ = no 35663 _ = yes 35664 return true 35665 } 35666 // match: (GE (FlagGT_ULT) yes no) 35667 // cond: 35668 // result: (First nil yes no) 35669 for { 35670 v := b.Control 35671 if v.Op != OpAMD64FlagGT_ULT { 35672 break 35673 } 35674 yes := b.Succs[0] 35675 no := b.Succs[1] 35676 b.Kind = BlockFirst 35677 b.SetControl(nil) 35678 _ = yes 35679 _ = no 35680 return true 35681 } 35682 // match: (GE (FlagGT_UGT) yes no) 35683 // cond: 35684 // result: (First nil yes no) 35685 for { 35686 v := b.Control 35687 if v.Op != OpAMD64FlagGT_UGT { 35688 break 35689 } 35690 yes := b.Succs[0] 35691 no := b.Succs[1] 35692 b.Kind = BlockFirst 35693 b.SetControl(nil) 35694 _ = yes 35695 _ = no 35696 return true 35697 } 35698 case BlockAMD64GT: 35699 // match: (GT (InvertFlags cmp) yes no) 35700 // cond: 35701 // result: (LT cmp yes no) 35702 for { 35703 v := b.Control 35704 if v.Op != OpAMD64InvertFlags { 35705 break 35706 } 35707 cmp := v.Args[0] 35708 yes := b.Succs[0] 35709 no := b.Succs[1] 35710 b.Kind = BlockAMD64LT 35711 b.SetControl(cmp) 35712 _ = yes 35713 _ = no 35714 return true 35715 } 35716 // match: (GT (FlagEQ) yes no) 35717 // cond: 35718 // result: (First nil no yes) 35719 for { 35720 v := b.Control 35721 if v.Op != OpAMD64FlagEQ { 35722 break 35723 } 35724 yes := b.Succs[0] 35725 no := b.Succs[1] 35726 b.Kind = BlockFirst 35727 b.SetControl(nil) 35728 b.swapSuccessors() 35729 _ = no 35730 _ = yes 35731 return true 35732 } 35733 // match: (GT (FlagLT_ULT) yes no) 35734 // cond: 35735 // result: (First nil no yes) 35736 for { 35737 v := b.Control 35738 if v.Op != OpAMD64FlagLT_ULT { 35739 break 35740 } 35741 yes := b.Succs[0] 35742 no := b.Succs[1] 35743 b.Kind = BlockFirst 35744 b.SetControl(nil) 35745 b.swapSuccessors() 35746 _ = no 35747 _ = yes 35748 return true 35749 } 35750 // match: (GT (FlagLT_UGT) yes no) 35751 // cond: 35752 // result: (First nil no yes) 35753 for { 35754 v := b.Control 35755 if v.Op != OpAMD64FlagLT_UGT { 35756 break 35757 } 35758 yes := b.Succs[0] 35759 no := b.Succs[1] 35760 b.Kind = BlockFirst 35761 b.SetControl(nil) 35762 b.swapSuccessors() 35763 _ = no 35764 _ = yes 35765 return true 35766 } 35767 // match: (GT (FlagGT_ULT) yes no) 35768 // cond: 35769 // result: (First nil yes no) 35770 for { 35771 v := b.Control 35772 if v.Op != OpAMD64FlagGT_ULT { 35773 break 35774 } 35775 yes := b.Succs[0] 35776 no := b.Succs[1] 35777 b.Kind = BlockFirst 35778 b.SetControl(nil) 35779 _ = yes 35780 _ = no 35781 return true 35782 } 35783 // match: (GT (FlagGT_UGT) yes no) 35784 // cond: 35785 // result: (First nil yes no) 35786 for { 35787 v := b.Control 35788 if v.Op != OpAMD64FlagGT_UGT { 35789 break 35790 } 35791 yes := b.Succs[0] 35792 no := b.Succs[1] 35793 b.Kind = BlockFirst 35794 b.SetControl(nil) 35795 _ = yes 35796 _ = no 35797 return true 35798 } 35799 case BlockIf: 35800 // match: (If (SETL cmp) yes no) 35801 // cond: 35802 // result: (LT cmp yes no) 35803 for { 35804 v := b.Control 35805 if v.Op != OpAMD64SETL { 35806 break 35807 } 35808 cmp := v.Args[0] 35809 yes := b.Succs[0] 35810 no := b.Succs[1] 35811 b.Kind = BlockAMD64LT 35812 b.SetControl(cmp) 35813 _ = yes 35814 _ = no 35815 return true 35816 } 35817 // match: (If (SETLE cmp) yes no) 35818 // cond: 35819 // result: (LE cmp yes no) 35820 for { 35821 v := b.Control 35822 if v.Op != OpAMD64SETLE { 35823 break 35824 } 35825 cmp := v.Args[0] 35826 yes := b.Succs[0] 35827 no := b.Succs[1] 35828 b.Kind = BlockAMD64LE 35829 b.SetControl(cmp) 35830 _ = yes 35831 _ = no 35832 return true 35833 } 35834 // match: (If (SETG cmp) yes no) 35835 // cond: 35836 // result: (GT cmp yes no) 35837 for { 35838 v := b.Control 35839 if v.Op != OpAMD64SETG { 35840 break 35841 } 35842 cmp := v.Args[0] 35843 yes := b.Succs[0] 35844 no := b.Succs[1] 35845 b.Kind = BlockAMD64GT 35846 b.SetControl(cmp) 35847 _ = yes 35848 _ = no 35849 return true 35850 } 35851 // match: (If (SETGE cmp) yes no) 35852 // cond: 35853 // result: (GE cmp yes no) 35854 for { 35855 v := b.Control 35856 if v.Op != OpAMD64SETGE { 35857 break 35858 } 35859 cmp := v.Args[0] 35860 yes := b.Succs[0] 35861 no := b.Succs[1] 35862 b.Kind = BlockAMD64GE 35863 b.SetControl(cmp) 35864 _ = yes 35865 _ = no 35866 return true 35867 } 35868 // match: (If (SETEQ cmp) yes no) 35869 // cond: 35870 // result: (EQ cmp yes no) 35871 for { 35872 v := b.Control 35873 if v.Op != OpAMD64SETEQ { 35874 break 35875 } 35876 cmp := v.Args[0] 35877 yes := b.Succs[0] 35878 no := b.Succs[1] 35879 b.Kind = BlockAMD64EQ 35880 b.SetControl(cmp) 35881 _ = yes 35882 _ = no 35883 return true 35884 } 35885 // match: (If (SETNE cmp) yes no) 35886 // cond: 35887 // result: (NE cmp yes no) 35888 for { 35889 v := b.Control 35890 if v.Op != OpAMD64SETNE { 35891 break 35892 } 35893 cmp := v.Args[0] 35894 yes := b.Succs[0] 35895 no := b.Succs[1] 35896 b.Kind = BlockAMD64NE 35897 b.SetControl(cmp) 35898 _ = yes 35899 _ = no 35900 return true 35901 } 35902 // match: (If (SETB cmp) yes no) 35903 // cond: 35904 // result: (ULT cmp yes no) 35905 for { 35906 v := b.Control 35907 if v.Op != OpAMD64SETB { 35908 break 35909 } 35910 cmp := v.Args[0] 35911 yes := b.Succs[0] 35912 no := b.Succs[1] 35913 b.Kind = BlockAMD64ULT 35914 b.SetControl(cmp) 35915 _ = yes 35916 _ = no 35917 return true 35918 } 35919 // match: (If (SETBE cmp) yes no) 35920 // cond: 35921 // result: (ULE cmp yes no) 35922 for { 35923 v := b.Control 35924 if v.Op != OpAMD64SETBE { 35925 break 35926 } 35927 cmp := v.Args[0] 35928 yes := b.Succs[0] 35929 no := b.Succs[1] 35930 b.Kind = BlockAMD64ULE 35931 b.SetControl(cmp) 35932 _ = yes 35933 _ = no 35934 return true 35935 } 35936 // match: (If (SETA cmp) yes no) 35937 // cond: 35938 // result: (UGT cmp yes no) 35939 for { 35940 v := b.Control 35941 if v.Op != OpAMD64SETA { 35942 break 35943 } 35944 cmp := v.Args[0] 35945 yes := b.Succs[0] 35946 no := b.Succs[1] 35947 b.Kind = BlockAMD64UGT 35948 b.SetControl(cmp) 35949 _ = yes 35950 _ = no 35951 return true 35952 } 35953 // match: (If (SETAE cmp) yes no) 35954 // cond: 35955 // result: (UGE cmp yes no) 35956 for { 35957 v := b.Control 35958 if v.Op != OpAMD64SETAE { 35959 break 35960 } 35961 cmp := v.Args[0] 35962 yes := b.Succs[0] 35963 no := b.Succs[1] 35964 b.Kind = BlockAMD64UGE 35965 b.SetControl(cmp) 35966 _ = yes 35967 _ = no 35968 return true 35969 } 35970 // match: (If (SETGF cmp) yes no) 35971 // cond: 35972 // result: (UGT cmp yes no) 35973 for { 35974 v := b.Control 35975 if v.Op != OpAMD64SETGF { 35976 break 35977 } 35978 cmp := v.Args[0] 35979 yes := b.Succs[0] 35980 no := b.Succs[1] 35981 b.Kind = BlockAMD64UGT 35982 b.SetControl(cmp) 35983 _ = yes 35984 _ = no 35985 return true 35986 } 35987 // match: (If (SETGEF cmp) yes no) 35988 // cond: 35989 // result: (UGE cmp yes no) 35990 for { 35991 v := b.Control 35992 if v.Op != OpAMD64SETGEF { 35993 break 35994 } 35995 cmp := v.Args[0] 35996 yes := b.Succs[0] 35997 no := b.Succs[1] 35998 b.Kind = BlockAMD64UGE 35999 b.SetControl(cmp) 36000 _ = yes 36001 _ = no 36002 return true 36003 } 36004 // match: (If (SETEQF cmp) yes no) 36005 // cond: 36006 // result: (EQF cmp yes no) 36007 for { 36008 v := b.Control 36009 if v.Op != OpAMD64SETEQF { 36010 break 36011 } 36012 cmp := v.Args[0] 36013 yes := b.Succs[0] 36014 no := b.Succs[1] 36015 b.Kind = BlockAMD64EQF 36016 b.SetControl(cmp) 36017 _ = yes 36018 _ = no 36019 return true 36020 } 36021 // match: (If (SETNEF cmp) yes no) 36022 // cond: 36023 // result: (NEF cmp yes no) 36024 for { 36025 v := b.Control 36026 if v.Op != OpAMD64SETNEF { 36027 break 36028 } 36029 cmp := v.Args[0] 36030 yes := b.Succs[0] 36031 no := b.Succs[1] 36032 b.Kind = BlockAMD64NEF 36033 b.SetControl(cmp) 36034 _ = yes 36035 _ = no 36036 return true 36037 } 36038 // match: (If cond yes no) 36039 // cond: 36040 // result: (NE (TESTB cond cond) yes no) 36041 for { 36042 v := b.Control 36043 _ = v 36044 cond := b.Control 36045 yes := b.Succs[0] 36046 no := b.Succs[1] 36047 b.Kind = BlockAMD64NE 36048 v0 := b.NewValue0(v.Pos, OpAMD64TESTB, TypeFlags) 36049 v0.AddArg(cond) 36050 v0.AddArg(cond) 36051 b.SetControl(v0) 36052 _ = yes 36053 _ = no 36054 return true 36055 } 36056 case BlockAMD64LE: 36057 // match: (LE (InvertFlags cmp) yes no) 36058 // cond: 36059 // result: (GE cmp yes no) 36060 for { 36061 v := b.Control 36062 if v.Op != OpAMD64InvertFlags { 36063 break 36064 } 36065 cmp := v.Args[0] 36066 yes := b.Succs[0] 36067 no := b.Succs[1] 36068 b.Kind = BlockAMD64GE 36069 b.SetControl(cmp) 36070 _ = yes 36071 _ = no 36072 return true 36073 } 36074 // match: (LE (FlagEQ) yes no) 36075 // cond: 36076 // result: (First nil yes no) 36077 for { 36078 v := b.Control 36079 if v.Op != OpAMD64FlagEQ { 36080 break 36081 } 36082 yes := b.Succs[0] 36083 no := b.Succs[1] 36084 b.Kind = BlockFirst 36085 b.SetControl(nil) 36086 _ = yes 36087 _ = no 36088 return true 36089 } 36090 // match: (LE (FlagLT_ULT) yes no) 36091 // cond: 36092 // result: (First nil yes no) 36093 for { 36094 v := b.Control 36095 if v.Op != OpAMD64FlagLT_ULT { 36096 break 36097 } 36098 yes := b.Succs[0] 36099 no := b.Succs[1] 36100 b.Kind = BlockFirst 36101 b.SetControl(nil) 36102 _ = yes 36103 _ = no 36104 return true 36105 } 36106 // match: (LE (FlagLT_UGT) yes no) 36107 // cond: 36108 // result: (First nil yes no) 36109 for { 36110 v := b.Control 36111 if v.Op != OpAMD64FlagLT_UGT { 36112 break 36113 } 36114 yes := b.Succs[0] 36115 no := b.Succs[1] 36116 b.Kind = BlockFirst 36117 b.SetControl(nil) 36118 _ = yes 36119 _ = no 36120 return true 36121 } 36122 // match: (LE (FlagGT_ULT) yes no) 36123 // cond: 36124 // result: (First nil no yes) 36125 for { 36126 v := b.Control 36127 if v.Op != OpAMD64FlagGT_ULT { 36128 break 36129 } 36130 yes := b.Succs[0] 36131 no := b.Succs[1] 36132 b.Kind = BlockFirst 36133 b.SetControl(nil) 36134 b.swapSuccessors() 36135 _ = no 36136 _ = yes 36137 return true 36138 } 36139 // match: (LE (FlagGT_UGT) yes no) 36140 // cond: 36141 // result: (First nil no yes) 36142 for { 36143 v := b.Control 36144 if v.Op != OpAMD64FlagGT_UGT { 36145 break 36146 } 36147 yes := b.Succs[0] 36148 no := b.Succs[1] 36149 b.Kind = BlockFirst 36150 b.SetControl(nil) 36151 b.swapSuccessors() 36152 _ = no 36153 _ = yes 36154 return true 36155 } 36156 case BlockAMD64LT: 36157 // match: (LT (InvertFlags cmp) yes no) 36158 // cond: 36159 // result: (GT cmp yes no) 36160 for { 36161 v := b.Control 36162 if v.Op != OpAMD64InvertFlags { 36163 break 36164 } 36165 cmp := v.Args[0] 36166 yes := b.Succs[0] 36167 no := b.Succs[1] 36168 b.Kind = BlockAMD64GT 36169 b.SetControl(cmp) 36170 _ = yes 36171 _ = no 36172 return true 36173 } 36174 // match: (LT (FlagEQ) yes no) 36175 // cond: 36176 // result: (First nil no yes) 36177 for { 36178 v := b.Control 36179 if v.Op != OpAMD64FlagEQ { 36180 break 36181 } 36182 yes := b.Succs[0] 36183 no := b.Succs[1] 36184 b.Kind = BlockFirst 36185 b.SetControl(nil) 36186 b.swapSuccessors() 36187 _ = no 36188 _ = yes 36189 return true 36190 } 36191 // match: (LT (FlagLT_ULT) yes no) 36192 // cond: 36193 // result: (First nil yes no) 36194 for { 36195 v := b.Control 36196 if v.Op != OpAMD64FlagLT_ULT { 36197 break 36198 } 36199 yes := b.Succs[0] 36200 no := b.Succs[1] 36201 b.Kind = BlockFirst 36202 b.SetControl(nil) 36203 _ = yes 36204 _ = no 36205 return true 36206 } 36207 // match: (LT (FlagLT_UGT) yes no) 36208 // cond: 36209 // result: (First nil yes no) 36210 for { 36211 v := b.Control 36212 if v.Op != OpAMD64FlagLT_UGT { 36213 break 36214 } 36215 yes := b.Succs[0] 36216 no := b.Succs[1] 36217 b.Kind = BlockFirst 36218 b.SetControl(nil) 36219 _ = yes 36220 _ = no 36221 return true 36222 } 36223 // match: (LT (FlagGT_ULT) yes no) 36224 // cond: 36225 // result: (First nil no yes) 36226 for { 36227 v := b.Control 36228 if v.Op != OpAMD64FlagGT_ULT { 36229 break 36230 } 36231 yes := b.Succs[0] 36232 no := b.Succs[1] 36233 b.Kind = BlockFirst 36234 b.SetControl(nil) 36235 b.swapSuccessors() 36236 _ = no 36237 _ = yes 36238 return true 36239 } 36240 // match: (LT (FlagGT_UGT) yes no) 36241 // cond: 36242 // result: (First nil no yes) 36243 for { 36244 v := b.Control 36245 if v.Op != OpAMD64FlagGT_UGT { 36246 break 36247 } 36248 yes := b.Succs[0] 36249 no := b.Succs[1] 36250 b.Kind = BlockFirst 36251 b.SetControl(nil) 36252 b.swapSuccessors() 36253 _ = no 36254 _ = yes 36255 return true 36256 } 36257 case BlockAMD64NE: 36258 // match: (NE (TESTB (SETL cmp) (SETL cmp)) yes no) 36259 // cond: 36260 // result: (LT cmp yes no) 36261 for { 36262 v := b.Control 36263 if v.Op != OpAMD64TESTB { 36264 break 36265 } 36266 v_0 := v.Args[0] 36267 if v_0.Op != OpAMD64SETL { 36268 break 36269 } 36270 cmp := v_0.Args[0] 36271 v_1 := v.Args[1] 36272 if v_1.Op != OpAMD64SETL { 36273 break 36274 } 36275 if cmp != v_1.Args[0] { 36276 break 36277 } 36278 yes := b.Succs[0] 36279 no := b.Succs[1] 36280 b.Kind = BlockAMD64LT 36281 b.SetControl(cmp) 36282 _ = yes 36283 _ = no 36284 return true 36285 } 36286 // match: (NE (TESTB (SETL cmp) (SETL cmp)) yes no) 36287 // cond: 36288 // result: (LT cmp yes no) 36289 for { 36290 v := b.Control 36291 if v.Op != OpAMD64TESTB { 36292 break 36293 } 36294 v_0 := v.Args[0] 36295 if v_0.Op != OpAMD64SETL { 36296 break 36297 } 36298 cmp := v_0.Args[0] 36299 v_1 := v.Args[1] 36300 if v_1.Op != OpAMD64SETL { 36301 break 36302 } 36303 if cmp != v_1.Args[0] { 36304 break 36305 } 36306 yes := b.Succs[0] 36307 no := b.Succs[1] 36308 b.Kind = BlockAMD64LT 36309 b.SetControl(cmp) 36310 _ = yes 36311 _ = no 36312 return true 36313 } 36314 // match: (NE (TESTB (SETLE cmp) (SETLE cmp)) yes no) 36315 // cond: 36316 // result: (LE cmp yes no) 36317 for { 36318 v := b.Control 36319 if v.Op != OpAMD64TESTB { 36320 break 36321 } 36322 v_0 := v.Args[0] 36323 if v_0.Op != OpAMD64SETLE { 36324 break 36325 } 36326 cmp := v_0.Args[0] 36327 v_1 := v.Args[1] 36328 if v_1.Op != OpAMD64SETLE { 36329 break 36330 } 36331 if cmp != v_1.Args[0] { 36332 break 36333 } 36334 yes := b.Succs[0] 36335 no := b.Succs[1] 36336 b.Kind = BlockAMD64LE 36337 b.SetControl(cmp) 36338 _ = yes 36339 _ = no 36340 return true 36341 } 36342 // match: (NE (TESTB (SETLE cmp) (SETLE cmp)) yes no) 36343 // cond: 36344 // result: (LE cmp yes no) 36345 for { 36346 v := b.Control 36347 if v.Op != OpAMD64TESTB { 36348 break 36349 } 36350 v_0 := v.Args[0] 36351 if v_0.Op != OpAMD64SETLE { 36352 break 36353 } 36354 cmp := v_0.Args[0] 36355 v_1 := v.Args[1] 36356 if v_1.Op != OpAMD64SETLE { 36357 break 36358 } 36359 if cmp != v_1.Args[0] { 36360 break 36361 } 36362 yes := b.Succs[0] 36363 no := b.Succs[1] 36364 b.Kind = BlockAMD64LE 36365 b.SetControl(cmp) 36366 _ = yes 36367 _ = no 36368 return true 36369 } 36370 // match: (NE (TESTB (SETG cmp) (SETG cmp)) yes no) 36371 // cond: 36372 // result: (GT cmp yes no) 36373 for { 36374 v := b.Control 36375 if v.Op != OpAMD64TESTB { 36376 break 36377 } 36378 v_0 := v.Args[0] 36379 if v_0.Op != OpAMD64SETG { 36380 break 36381 } 36382 cmp := v_0.Args[0] 36383 v_1 := v.Args[1] 36384 if v_1.Op != OpAMD64SETG { 36385 break 36386 } 36387 if cmp != v_1.Args[0] { 36388 break 36389 } 36390 yes := b.Succs[0] 36391 no := b.Succs[1] 36392 b.Kind = BlockAMD64GT 36393 b.SetControl(cmp) 36394 _ = yes 36395 _ = no 36396 return true 36397 } 36398 // match: (NE (TESTB (SETG cmp) (SETG cmp)) yes no) 36399 // cond: 36400 // result: (GT cmp yes no) 36401 for { 36402 v := b.Control 36403 if v.Op != OpAMD64TESTB { 36404 break 36405 } 36406 v_0 := v.Args[0] 36407 if v_0.Op != OpAMD64SETG { 36408 break 36409 } 36410 cmp := v_0.Args[0] 36411 v_1 := v.Args[1] 36412 if v_1.Op != OpAMD64SETG { 36413 break 36414 } 36415 if cmp != v_1.Args[0] { 36416 break 36417 } 36418 yes := b.Succs[0] 36419 no := b.Succs[1] 36420 b.Kind = BlockAMD64GT 36421 b.SetControl(cmp) 36422 _ = yes 36423 _ = no 36424 return true 36425 } 36426 // match: (NE (TESTB (SETGE cmp) (SETGE cmp)) yes no) 36427 // cond: 36428 // result: (GE cmp yes no) 36429 for { 36430 v := b.Control 36431 if v.Op != OpAMD64TESTB { 36432 break 36433 } 36434 v_0 := v.Args[0] 36435 if v_0.Op != OpAMD64SETGE { 36436 break 36437 } 36438 cmp := v_0.Args[0] 36439 v_1 := v.Args[1] 36440 if v_1.Op != OpAMD64SETGE { 36441 break 36442 } 36443 if cmp != v_1.Args[0] { 36444 break 36445 } 36446 yes := b.Succs[0] 36447 no := b.Succs[1] 36448 b.Kind = BlockAMD64GE 36449 b.SetControl(cmp) 36450 _ = yes 36451 _ = no 36452 return true 36453 } 36454 // match: (NE (TESTB (SETGE cmp) (SETGE cmp)) yes no) 36455 // cond: 36456 // result: (GE cmp yes no) 36457 for { 36458 v := b.Control 36459 if v.Op != OpAMD64TESTB { 36460 break 36461 } 36462 v_0 := v.Args[0] 36463 if v_0.Op != OpAMD64SETGE { 36464 break 36465 } 36466 cmp := v_0.Args[0] 36467 v_1 := v.Args[1] 36468 if v_1.Op != OpAMD64SETGE { 36469 break 36470 } 36471 if cmp != v_1.Args[0] { 36472 break 36473 } 36474 yes := b.Succs[0] 36475 no := b.Succs[1] 36476 b.Kind = BlockAMD64GE 36477 b.SetControl(cmp) 36478 _ = yes 36479 _ = no 36480 return true 36481 } 36482 // match: (NE (TESTB (SETEQ cmp) (SETEQ cmp)) yes no) 36483 // cond: 36484 // result: (EQ cmp yes no) 36485 for { 36486 v := b.Control 36487 if v.Op != OpAMD64TESTB { 36488 break 36489 } 36490 v_0 := v.Args[0] 36491 if v_0.Op != OpAMD64SETEQ { 36492 break 36493 } 36494 cmp := v_0.Args[0] 36495 v_1 := v.Args[1] 36496 if v_1.Op != OpAMD64SETEQ { 36497 break 36498 } 36499 if cmp != v_1.Args[0] { 36500 break 36501 } 36502 yes := b.Succs[0] 36503 no := b.Succs[1] 36504 b.Kind = BlockAMD64EQ 36505 b.SetControl(cmp) 36506 _ = yes 36507 _ = no 36508 return true 36509 } 36510 // match: (NE (TESTB (SETEQ cmp) (SETEQ cmp)) yes no) 36511 // cond: 36512 // result: (EQ cmp yes no) 36513 for { 36514 v := b.Control 36515 if v.Op != OpAMD64TESTB { 36516 break 36517 } 36518 v_0 := v.Args[0] 36519 if v_0.Op != OpAMD64SETEQ { 36520 break 36521 } 36522 cmp := v_0.Args[0] 36523 v_1 := v.Args[1] 36524 if v_1.Op != OpAMD64SETEQ { 36525 break 36526 } 36527 if cmp != v_1.Args[0] { 36528 break 36529 } 36530 yes := b.Succs[0] 36531 no := b.Succs[1] 36532 b.Kind = BlockAMD64EQ 36533 b.SetControl(cmp) 36534 _ = yes 36535 _ = no 36536 return true 36537 } 36538 // match: (NE (TESTB (SETNE cmp) (SETNE cmp)) yes no) 36539 // cond: 36540 // result: (NE cmp yes no) 36541 for { 36542 v := b.Control 36543 if v.Op != OpAMD64TESTB { 36544 break 36545 } 36546 v_0 := v.Args[0] 36547 if v_0.Op != OpAMD64SETNE { 36548 break 36549 } 36550 cmp := v_0.Args[0] 36551 v_1 := v.Args[1] 36552 if v_1.Op != OpAMD64SETNE { 36553 break 36554 } 36555 if cmp != v_1.Args[0] { 36556 break 36557 } 36558 yes := b.Succs[0] 36559 no := b.Succs[1] 36560 b.Kind = BlockAMD64NE 36561 b.SetControl(cmp) 36562 _ = yes 36563 _ = no 36564 return true 36565 } 36566 // match: (NE (TESTB (SETNE cmp) (SETNE cmp)) yes no) 36567 // cond: 36568 // result: (NE cmp yes no) 36569 for { 36570 v := b.Control 36571 if v.Op != OpAMD64TESTB { 36572 break 36573 } 36574 v_0 := v.Args[0] 36575 if v_0.Op != OpAMD64SETNE { 36576 break 36577 } 36578 cmp := v_0.Args[0] 36579 v_1 := v.Args[1] 36580 if v_1.Op != OpAMD64SETNE { 36581 break 36582 } 36583 if cmp != v_1.Args[0] { 36584 break 36585 } 36586 yes := b.Succs[0] 36587 no := b.Succs[1] 36588 b.Kind = BlockAMD64NE 36589 b.SetControl(cmp) 36590 _ = yes 36591 _ = no 36592 return true 36593 } 36594 // match: (NE (TESTB (SETB cmp) (SETB cmp)) yes no) 36595 // cond: 36596 // result: (ULT cmp yes no) 36597 for { 36598 v := b.Control 36599 if v.Op != OpAMD64TESTB { 36600 break 36601 } 36602 v_0 := v.Args[0] 36603 if v_0.Op != OpAMD64SETB { 36604 break 36605 } 36606 cmp := v_0.Args[0] 36607 v_1 := v.Args[1] 36608 if v_1.Op != OpAMD64SETB { 36609 break 36610 } 36611 if cmp != v_1.Args[0] { 36612 break 36613 } 36614 yes := b.Succs[0] 36615 no := b.Succs[1] 36616 b.Kind = BlockAMD64ULT 36617 b.SetControl(cmp) 36618 _ = yes 36619 _ = no 36620 return true 36621 } 36622 // match: (NE (TESTB (SETB cmp) (SETB cmp)) yes no) 36623 // cond: 36624 // result: (ULT cmp yes no) 36625 for { 36626 v := b.Control 36627 if v.Op != OpAMD64TESTB { 36628 break 36629 } 36630 v_0 := v.Args[0] 36631 if v_0.Op != OpAMD64SETB { 36632 break 36633 } 36634 cmp := v_0.Args[0] 36635 v_1 := v.Args[1] 36636 if v_1.Op != OpAMD64SETB { 36637 break 36638 } 36639 if cmp != v_1.Args[0] { 36640 break 36641 } 36642 yes := b.Succs[0] 36643 no := b.Succs[1] 36644 b.Kind = BlockAMD64ULT 36645 b.SetControl(cmp) 36646 _ = yes 36647 _ = no 36648 return true 36649 } 36650 // match: (NE (TESTB (SETBE cmp) (SETBE cmp)) yes no) 36651 // cond: 36652 // result: (ULE cmp yes no) 36653 for { 36654 v := b.Control 36655 if v.Op != OpAMD64TESTB { 36656 break 36657 } 36658 v_0 := v.Args[0] 36659 if v_0.Op != OpAMD64SETBE { 36660 break 36661 } 36662 cmp := v_0.Args[0] 36663 v_1 := v.Args[1] 36664 if v_1.Op != OpAMD64SETBE { 36665 break 36666 } 36667 if cmp != v_1.Args[0] { 36668 break 36669 } 36670 yes := b.Succs[0] 36671 no := b.Succs[1] 36672 b.Kind = BlockAMD64ULE 36673 b.SetControl(cmp) 36674 _ = yes 36675 _ = no 36676 return true 36677 } 36678 // match: (NE (TESTB (SETBE cmp) (SETBE cmp)) yes no) 36679 // cond: 36680 // result: (ULE cmp yes no) 36681 for { 36682 v := b.Control 36683 if v.Op != OpAMD64TESTB { 36684 break 36685 } 36686 v_0 := v.Args[0] 36687 if v_0.Op != OpAMD64SETBE { 36688 break 36689 } 36690 cmp := v_0.Args[0] 36691 v_1 := v.Args[1] 36692 if v_1.Op != OpAMD64SETBE { 36693 break 36694 } 36695 if cmp != v_1.Args[0] { 36696 break 36697 } 36698 yes := b.Succs[0] 36699 no := b.Succs[1] 36700 b.Kind = BlockAMD64ULE 36701 b.SetControl(cmp) 36702 _ = yes 36703 _ = no 36704 return true 36705 } 36706 // match: (NE (TESTB (SETA cmp) (SETA cmp)) yes no) 36707 // cond: 36708 // result: (UGT cmp yes no) 36709 for { 36710 v := b.Control 36711 if v.Op != OpAMD64TESTB { 36712 break 36713 } 36714 v_0 := v.Args[0] 36715 if v_0.Op != OpAMD64SETA { 36716 break 36717 } 36718 cmp := v_0.Args[0] 36719 v_1 := v.Args[1] 36720 if v_1.Op != OpAMD64SETA { 36721 break 36722 } 36723 if cmp != v_1.Args[0] { 36724 break 36725 } 36726 yes := b.Succs[0] 36727 no := b.Succs[1] 36728 b.Kind = BlockAMD64UGT 36729 b.SetControl(cmp) 36730 _ = yes 36731 _ = no 36732 return true 36733 } 36734 // match: (NE (TESTB (SETA cmp) (SETA cmp)) yes no) 36735 // cond: 36736 // result: (UGT cmp yes no) 36737 for { 36738 v := b.Control 36739 if v.Op != OpAMD64TESTB { 36740 break 36741 } 36742 v_0 := v.Args[0] 36743 if v_0.Op != OpAMD64SETA { 36744 break 36745 } 36746 cmp := v_0.Args[0] 36747 v_1 := v.Args[1] 36748 if v_1.Op != OpAMD64SETA { 36749 break 36750 } 36751 if cmp != v_1.Args[0] { 36752 break 36753 } 36754 yes := b.Succs[0] 36755 no := b.Succs[1] 36756 b.Kind = BlockAMD64UGT 36757 b.SetControl(cmp) 36758 _ = yes 36759 _ = no 36760 return true 36761 } 36762 // match: (NE (TESTB (SETAE cmp) (SETAE cmp)) yes no) 36763 // cond: 36764 // result: (UGE cmp yes no) 36765 for { 36766 v := b.Control 36767 if v.Op != OpAMD64TESTB { 36768 break 36769 } 36770 v_0 := v.Args[0] 36771 if v_0.Op != OpAMD64SETAE { 36772 break 36773 } 36774 cmp := v_0.Args[0] 36775 v_1 := v.Args[1] 36776 if v_1.Op != OpAMD64SETAE { 36777 break 36778 } 36779 if cmp != v_1.Args[0] { 36780 break 36781 } 36782 yes := b.Succs[0] 36783 no := b.Succs[1] 36784 b.Kind = BlockAMD64UGE 36785 b.SetControl(cmp) 36786 _ = yes 36787 _ = no 36788 return true 36789 } 36790 // match: (NE (TESTB (SETAE cmp) (SETAE cmp)) yes no) 36791 // cond: 36792 // result: (UGE cmp yes no) 36793 for { 36794 v := b.Control 36795 if v.Op != OpAMD64TESTB { 36796 break 36797 } 36798 v_0 := v.Args[0] 36799 if v_0.Op != OpAMD64SETAE { 36800 break 36801 } 36802 cmp := v_0.Args[0] 36803 v_1 := v.Args[1] 36804 if v_1.Op != OpAMD64SETAE { 36805 break 36806 } 36807 if cmp != v_1.Args[0] { 36808 break 36809 } 36810 yes := b.Succs[0] 36811 no := b.Succs[1] 36812 b.Kind = BlockAMD64UGE 36813 b.SetControl(cmp) 36814 _ = yes 36815 _ = no 36816 return true 36817 } 36818 // match: (NE (TESTL (SHLL (MOVLconst [1]) x) y)) 36819 // cond: !config.nacl 36820 // result: (ULT (BTL x y)) 36821 for { 36822 v := b.Control 36823 if v.Op != OpAMD64TESTL { 36824 break 36825 } 36826 v_0 := v.Args[0] 36827 if v_0.Op != OpAMD64SHLL { 36828 break 36829 } 36830 v_0_0 := v_0.Args[0] 36831 if v_0_0.Op != OpAMD64MOVLconst { 36832 break 36833 } 36834 if v_0_0.AuxInt != 1 { 36835 break 36836 } 36837 x := v_0.Args[1] 36838 y := v.Args[1] 36839 if !(!config.nacl) { 36840 break 36841 } 36842 b.Kind = BlockAMD64ULT 36843 v0 := b.NewValue0(v.Pos, OpAMD64BTL, TypeFlags) 36844 v0.AddArg(x) 36845 v0.AddArg(y) 36846 b.SetControl(v0) 36847 return true 36848 } 36849 // match: (NE (TESTL y (SHLL (MOVLconst [1]) x))) 36850 // cond: !config.nacl 36851 // result: (ULT (BTL x y)) 36852 for { 36853 v := b.Control 36854 if v.Op != OpAMD64TESTL { 36855 break 36856 } 36857 y := v.Args[0] 36858 v_1 := v.Args[1] 36859 if v_1.Op != OpAMD64SHLL { 36860 break 36861 } 36862 v_1_0 := v_1.Args[0] 36863 if v_1_0.Op != OpAMD64MOVLconst { 36864 break 36865 } 36866 if v_1_0.AuxInt != 1 { 36867 break 36868 } 36869 x := v_1.Args[1] 36870 if !(!config.nacl) { 36871 break 36872 } 36873 b.Kind = BlockAMD64ULT 36874 v0 := b.NewValue0(v.Pos, OpAMD64BTL, TypeFlags) 36875 v0.AddArg(x) 36876 v0.AddArg(y) 36877 b.SetControl(v0) 36878 return true 36879 } 36880 // match: (NE (TESTQ (SHLQ (MOVQconst [1]) x) y)) 36881 // cond: !config.nacl 36882 // result: (ULT (BTQ x y)) 36883 for { 36884 v := b.Control 36885 if v.Op != OpAMD64TESTQ { 36886 break 36887 } 36888 v_0 := v.Args[0] 36889 if v_0.Op != OpAMD64SHLQ { 36890 break 36891 } 36892 v_0_0 := v_0.Args[0] 36893 if v_0_0.Op != OpAMD64MOVQconst { 36894 break 36895 } 36896 if v_0_0.AuxInt != 1 { 36897 break 36898 } 36899 x := v_0.Args[1] 36900 y := v.Args[1] 36901 if !(!config.nacl) { 36902 break 36903 } 36904 b.Kind = BlockAMD64ULT 36905 v0 := b.NewValue0(v.Pos, OpAMD64BTQ, TypeFlags) 36906 v0.AddArg(x) 36907 v0.AddArg(y) 36908 b.SetControl(v0) 36909 return true 36910 } 36911 // match: (NE (TESTQ y (SHLQ (MOVQconst [1]) x))) 36912 // cond: !config.nacl 36913 // result: (ULT (BTQ x y)) 36914 for { 36915 v := b.Control 36916 if v.Op != OpAMD64TESTQ { 36917 break 36918 } 36919 y := v.Args[0] 36920 v_1 := v.Args[1] 36921 if v_1.Op != OpAMD64SHLQ { 36922 break 36923 } 36924 v_1_0 := v_1.Args[0] 36925 if v_1_0.Op != OpAMD64MOVQconst { 36926 break 36927 } 36928 if v_1_0.AuxInt != 1 { 36929 break 36930 } 36931 x := v_1.Args[1] 36932 if !(!config.nacl) { 36933 break 36934 } 36935 b.Kind = BlockAMD64ULT 36936 v0 := b.NewValue0(v.Pos, OpAMD64BTQ, TypeFlags) 36937 v0.AddArg(x) 36938 v0.AddArg(y) 36939 b.SetControl(v0) 36940 return true 36941 } 36942 // match: (NE (TESTLconst [c] x)) 36943 // cond: isPowerOfTwo(c) && log2(c) < 32 && !config.nacl 36944 // result: (ULT (BTLconst [log2(c)] x)) 36945 for { 36946 v := b.Control 36947 if v.Op != OpAMD64TESTLconst { 36948 break 36949 } 36950 c := v.AuxInt 36951 x := v.Args[0] 36952 if !(isPowerOfTwo(c) && log2(c) < 32 && !config.nacl) { 36953 break 36954 } 36955 b.Kind = BlockAMD64ULT 36956 v0 := b.NewValue0(v.Pos, OpAMD64BTLconst, TypeFlags) 36957 v0.AuxInt = log2(c) 36958 v0.AddArg(x) 36959 b.SetControl(v0) 36960 return true 36961 } 36962 // match: (NE (TESTQconst [c] x)) 36963 // cond: isPowerOfTwo(c) && log2(c) < 64 && !config.nacl 36964 // result: (ULT (BTQconst [log2(c)] x)) 36965 for { 36966 v := b.Control 36967 if v.Op != OpAMD64TESTQconst { 36968 break 36969 } 36970 c := v.AuxInt 36971 x := v.Args[0] 36972 if !(isPowerOfTwo(c) && log2(c) < 64 && !config.nacl) { 36973 break 36974 } 36975 b.Kind = BlockAMD64ULT 36976 v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, TypeFlags) 36977 v0.AuxInt = log2(c) 36978 v0.AddArg(x) 36979 b.SetControl(v0) 36980 return true 36981 } 36982 // match: (NE (TESTQ (MOVQconst [c]) x)) 36983 // cond: isPowerOfTwo(c) && log2(c) < 64 && !config.nacl 36984 // result: (ULT (BTQconst [log2(c)] x)) 36985 for { 36986 v := b.Control 36987 if v.Op != OpAMD64TESTQ { 36988 break 36989 } 36990 v_0 := v.Args[0] 36991 if v_0.Op != OpAMD64MOVQconst { 36992 break 36993 } 36994 c := v_0.AuxInt 36995 x := v.Args[1] 36996 if !(isPowerOfTwo(c) && log2(c) < 64 && !config.nacl) { 36997 break 36998 } 36999 b.Kind = BlockAMD64ULT 37000 v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, TypeFlags) 37001 v0.AuxInt = log2(c) 37002 v0.AddArg(x) 37003 b.SetControl(v0) 37004 return true 37005 } 37006 // match: (NE (TESTQ x (MOVQconst [c]))) 37007 // cond: isPowerOfTwo(c) && log2(c) < 64 && !config.nacl 37008 // result: (ULT (BTQconst [log2(c)] x)) 37009 for { 37010 v := b.Control 37011 if v.Op != OpAMD64TESTQ { 37012 break 37013 } 37014 x := v.Args[0] 37015 v_1 := v.Args[1] 37016 if v_1.Op != OpAMD64MOVQconst { 37017 break 37018 } 37019 c := v_1.AuxInt 37020 if !(isPowerOfTwo(c) && log2(c) < 64 && !config.nacl) { 37021 break 37022 } 37023 b.Kind = BlockAMD64ULT 37024 v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, TypeFlags) 37025 v0.AuxInt = log2(c) 37026 v0.AddArg(x) 37027 b.SetControl(v0) 37028 return true 37029 } 37030 // match: (NE (TESTB (SETGF cmp) (SETGF cmp)) yes no) 37031 // cond: 37032 // result: (UGT cmp yes no) 37033 for { 37034 v := b.Control 37035 if v.Op != OpAMD64TESTB { 37036 break 37037 } 37038 v_0 := v.Args[0] 37039 if v_0.Op != OpAMD64SETGF { 37040 break 37041 } 37042 cmp := v_0.Args[0] 37043 v_1 := v.Args[1] 37044 if v_1.Op != OpAMD64SETGF { 37045 break 37046 } 37047 if cmp != v_1.Args[0] { 37048 break 37049 } 37050 yes := b.Succs[0] 37051 no := b.Succs[1] 37052 b.Kind = BlockAMD64UGT 37053 b.SetControl(cmp) 37054 _ = yes 37055 _ = no 37056 return true 37057 } 37058 // match: (NE (TESTB (SETGF cmp) (SETGF cmp)) yes no) 37059 // cond: 37060 // result: (UGT cmp yes no) 37061 for { 37062 v := b.Control 37063 if v.Op != OpAMD64TESTB { 37064 break 37065 } 37066 v_0 := v.Args[0] 37067 if v_0.Op != OpAMD64SETGF { 37068 break 37069 } 37070 cmp := v_0.Args[0] 37071 v_1 := v.Args[1] 37072 if v_1.Op != OpAMD64SETGF { 37073 break 37074 } 37075 if cmp != v_1.Args[0] { 37076 break 37077 } 37078 yes := b.Succs[0] 37079 no := b.Succs[1] 37080 b.Kind = BlockAMD64UGT 37081 b.SetControl(cmp) 37082 _ = yes 37083 _ = no 37084 return true 37085 } 37086 // match: (NE (TESTB (SETGEF cmp) (SETGEF cmp)) yes no) 37087 // cond: 37088 // result: (UGE cmp yes no) 37089 for { 37090 v := b.Control 37091 if v.Op != OpAMD64TESTB { 37092 break 37093 } 37094 v_0 := v.Args[0] 37095 if v_0.Op != OpAMD64SETGEF { 37096 break 37097 } 37098 cmp := v_0.Args[0] 37099 v_1 := v.Args[1] 37100 if v_1.Op != OpAMD64SETGEF { 37101 break 37102 } 37103 if cmp != v_1.Args[0] { 37104 break 37105 } 37106 yes := b.Succs[0] 37107 no := b.Succs[1] 37108 b.Kind = BlockAMD64UGE 37109 b.SetControl(cmp) 37110 _ = yes 37111 _ = no 37112 return true 37113 } 37114 // match: (NE (TESTB (SETGEF cmp) (SETGEF cmp)) yes no) 37115 // cond: 37116 // result: (UGE cmp yes no) 37117 for { 37118 v := b.Control 37119 if v.Op != OpAMD64TESTB { 37120 break 37121 } 37122 v_0 := v.Args[0] 37123 if v_0.Op != OpAMD64SETGEF { 37124 break 37125 } 37126 cmp := v_0.Args[0] 37127 v_1 := v.Args[1] 37128 if v_1.Op != OpAMD64SETGEF { 37129 break 37130 } 37131 if cmp != v_1.Args[0] { 37132 break 37133 } 37134 yes := b.Succs[0] 37135 no := b.Succs[1] 37136 b.Kind = BlockAMD64UGE 37137 b.SetControl(cmp) 37138 _ = yes 37139 _ = no 37140 return true 37141 } 37142 // match: (NE (TESTB (SETEQF cmp) (SETEQF cmp)) yes no) 37143 // cond: 37144 // result: (EQF cmp yes no) 37145 for { 37146 v := b.Control 37147 if v.Op != OpAMD64TESTB { 37148 break 37149 } 37150 v_0 := v.Args[0] 37151 if v_0.Op != OpAMD64SETEQF { 37152 break 37153 } 37154 cmp := v_0.Args[0] 37155 v_1 := v.Args[1] 37156 if v_1.Op != OpAMD64SETEQF { 37157 break 37158 } 37159 if cmp != v_1.Args[0] { 37160 break 37161 } 37162 yes := b.Succs[0] 37163 no := b.Succs[1] 37164 b.Kind = BlockAMD64EQF 37165 b.SetControl(cmp) 37166 _ = yes 37167 _ = no 37168 return true 37169 } 37170 // match: (NE (TESTB (SETEQF cmp) (SETEQF cmp)) yes no) 37171 // cond: 37172 // result: (EQF cmp yes no) 37173 for { 37174 v := b.Control 37175 if v.Op != OpAMD64TESTB { 37176 break 37177 } 37178 v_0 := v.Args[0] 37179 if v_0.Op != OpAMD64SETEQF { 37180 break 37181 } 37182 cmp := v_0.Args[0] 37183 v_1 := v.Args[1] 37184 if v_1.Op != OpAMD64SETEQF { 37185 break 37186 } 37187 if cmp != v_1.Args[0] { 37188 break 37189 } 37190 yes := b.Succs[0] 37191 no := b.Succs[1] 37192 b.Kind = BlockAMD64EQF 37193 b.SetControl(cmp) 37194 _ = yes 37195 _ = no 37196 return true 37197 } 37198 // match: (NE (TESTB (SETNEF cmp) (SETNEF cmp)) yes no) 37199 // cond: 37200 // result: (NEF cmp yes no) 37201 for { 37202 v := b.Control 37203 if v.Op != OpAMD64TESTB { 37204 break 37205 } 37206 v_0 := v.Args[0] 37207 if v_0.Op != OpAMD64SETNEF { 37208 break 37209 } 37210 cmp := v_0.Args[0] 37211 v_1 := v.Args[1] 37212 if v_1.Op != OpAMD64SETNEF { 37213 break 37214 } 37215 if cmp != v_1.Args[0] { 37216 break 37217 } 37218 yes := b.Succs[0] 37219 no := b.Succs[1] 37220 b.Kind = BlockAMD64NEF 37221 b.SetControl(cmp) 37222 _ = yes 37223 _ = no 37224 return true 37225 } 37226 // match: (NE (TESTB (SETNEF cmp) (SETNEF cmp)) yes no) 37227 // cond: 37228 // result: (NEF cmp yes no) 37229 for { 37230 v := b.Control 37231 if v.Op != OpAMD64TESTB { 37232 break 37233 } 37234 v_0 := v.Args[0] 37235 if v_0.Op != OpAMD64SETNEF { 37236 break 37237 } 37238 cmp := v_0.Args[0] 37239 v_1 := v.Args[1] 37240 if v_1.Op != OpAMD64SETNEF { 37241 break 37242 } 37243 if cmp != v_1.Args[0] { 37244 break 37245 } 37246 yes := b.Succs[0] 37247 no := b.Succs[1] 37248 b.Kind = BlockAMD64NEF 37249 b.SetControl(cmp) 37250 _ = yes 37251 _ = no 37252 return true 37253 } 37254 // match: (NE (InvertFlags cmp) yes no) 37255 // cond: 37256 // result: (NE cmp yes no) 37257 for { 37258 v := b.Control 37259 if v.Op != OpAMD64InvertFlags { 37260 break 37261 } 37262 cmp := v.Args[0] 37263 yes := b.Succs[0] 37264 no := b.Succs[1] 37265 b.Kind = BlockAMD64NE 37266 b.SetControl(cmp) 37267 _ = yes 37268 _ = no 37269 return true 37270 } 37271 // match: (NE (FlagEQ) yes no) 37272 // cond: 37273 // result: (First nil no yes) 37274 for { 37275 v := b.Control 37276 if v.Op != OpAMD64FlagEQ { 37277 break 37278 } 37279 yes := b.Succs[0] 37280 no := b.Succs[1] 37281 b.Kind = BlockFirst 37282 b.SetControl(nil) 37283 b.swapSuccessors() 37284 _ = no 37285 _ = yes 37286 return true 37287 } 37288 // match: (NE (FlagLT_ULT) yes no) 37289 // cond: 37290 // result: (First nil yes no) 37291 for { 37292 v := b.Control 37293 if v.Op != OpAMD64FlagLT_ULT { 37294 break 37295 } 37296 yes := b.Succs[0] 37297 no := b.Succs[1] 37298 b.Kind = BlockFirst 37299 b.SetControl(nil) 37300 _ = yes 37301 _ = no 37302 return true 37303 } 37304 // match: (NE (FlagLT_UGT) yes no) 37305 // cond: 37306 // result: (First nil yes no) 37307 for { 37308 v := b.Control 37309 if v.Op != OpAMD64FlagLT_UGT { 37310 break 37311 } 37312 yes := b.Succs[0] 37313 no := b.Succs[1] 37314 b.Kind = BlockFirst 37315 b.SetControl(nil) 37316 _ = yes 37317 _ = no 37318 return true 37319 } 37320 // match: (NE (FlagGT_ULT) yes no) 37321 // cond: 37322 // result: (First nil yes no) 37323 for { 37324 v := b.Control 37325 if v.Op != OpAMD64FlagGT_ULT { 37326 break 37327 } 37328 yes := b.Succs[0] 37329 no := b.Succs[1] 37330 b.Kind = BlockFirst 37331 b.SetControl(nil) 37332 _ = yes 37333 _ = no 37334 return true 37335 } 37336 // match: (NE (FlagGT_UGT) yes no) 37337 // cond: 37338 // result: (First nil yes no) 37339 for { 37340 v := b.Control 37341 if v.Op != OpAMD64FlagGT_UGT { 37342 break 37343 } 37344 yes := b.Succs[0] 37345 no := b.Succs[1] 37346 b.Kind = BlockFirst 37347 b.SetControl(nil) 37348 _ = yes 37349 _ = no 37350 return true 37351 } 37352 case BlockAMD64UGE: 37353 // match: (UGE (InvertFlags cmp) yes no) 37354 // cond: 37355 // result: (ULE cmp yes no) 37356 for { 37357 v := b.Control 37358 if v.Op != OpAMD64InvertFlags { 37359 break 37360 } 37361 cmp := v.Args[0] 37362 yes := b.Succs[0] 37363 no := b.Succs[1] 37364 b.Kind = BlockAMD64ULE 37365 b.SetControl(cmp) 37366 _ = yes 37367 _ = no 37368 return true 37369 } 37370 // match: (UGE (FlagEQ) yes no) 37371 // cond: 37372 // result: (First nil yes no) 37373 for { 37374 v := b.Control 37375 if v.Op != OpAMD64FlagEQ { 37376 break 37377 } 37378 yes := b.Succs[0] 37379 no := b.Succs[1] 37380 b.Kind = BlockFirst 37381 b.SetControl(nil) 37382 _ = yes 37383 _ = no 37384 return true 37385 } 37386 // match: (UGE (FlagLT_ULT) yes no) 37387 // cond: 37388 // result: (First nil no yes) 37389 for { 37390 v := b.Control 37391 if v.Op != OpAMD64FlagLT_ULT { 37392 break 37393 } 37394 yes := b.Succs[0] 37395 no := b.Succs[1] 37396 b.Kind = BlockFirst 37397 b.SetControl(nil) 37398 b.swapSuccessors() 37399 _ = no 37400 _ = yes 37401 return true 37402 } 37403 // match: (UGE (FlagLT_UGT) yes no) 37404 // cond: 37405 // result: (First nil yes no) 37406 for { 37407 v := b.Control 37408 if v.Op != OpAMD64FlagLT_UGT { 37409 break 37410 } 37411 yes := b.Succs[0] 37412 no := b.Succs[1] 37413 b.Kind = BlockFirst 37414 b.SetControl(nil) 37415 _ = yes 37416 _ = no 37417 return true 37418 } 37419 // match: (UGE (FlagGT_ULT) yes no) 37420 // cond: 37421 // result: (First nil no yes) 37422 for { 37423 v := b.Control 37424 if v.Op != OpAMD64FlagGT_ULT { 37425 break 37426 } 37427 yes := b.Succs[0] 37428 no := b.Succs[1] 37429 b.Kind = BlockFirst 37430 b.SetControl(nil) 37431 b.swapSuccessors() 37432 _ = no 37433 _ = yes 37434 return true 37435 } 37436 // match: (UGE (FlagGT_UGT) yes no) 37437 // cond: 37438 // result: (First nil yes no) 37439 for { 37440 v := b.Control 37441 if v.Op != OpAMD64FlagGT_UGT { 37442 break 37443 } 37444 yes := b.Succs[0] 37445 no := b.Succs[1] 37446 b.Kind = BlockFirst 37447 b.SetControl(nil) 37448 _ = yes 37449 _ = no 37450 return true 37451 } 37452 case BlockAMD64UGT: 37453 // match: (UGT (InvertFlags cmp) yes no) 37454 // cond: 37455 // result: (ULT cmp yes no) 37456 for { 37457 v := b.Control 37458 if v.Op != OpAMD64InvertFlags { 37459 break 37460 } 37461 cmp := v.Args[0] 37462 yes := b.Succs[0] 37463 no := b.Succs[1] 37464 b.Kind = BlockAMD64ULT 37465 b.SetControl(cmp) 37466 _ = yes 37467 _ = no 37468 return true 37469 } 37470 // match: (UGT (FlagEQ) yes no) 37471 // cond: 37472 // result: (First nil no yes) 37473 for { 37474 v := b.Control 37475 if v.Op != OpAMD64FlagEQ { 37476 break 37477 } 37478 yes := b.Succs[0] 37479 no := b.Succs[1] 37480 b.Kind = BlockFirst 37481 b.SetControl(nil) 37482 b.swapSuccessors() 37483 _ = no 37484 _ = yes 37485 return true 37486 } 37487 // match: (UGT (FlagLT_ULT) yes no) 37488 // cond: 37489 // result: (First nil no yes) 37490 for { 37491 v := b.Control 37492 if v.Op != OpAMD64FlagLT_ULT { 37493 break 37494 } 37495 yes := b.Succs[0] 37496 no := b.Succs[1] 37497 b.Kind = BlockFirst 37498 b.SetControl(nil) 37499 b.swapSuccessors() 37500 _ = no 37501 _ = yes 37502 return true 37503 } 37504 // match: (UGT (FlagLT_UGT) yes no) 37505 // cond: 37506 // result: (First nil yes no) 37507 for { 37508 v := b.Control 37509 if v.Op != OpAMD64FlagLT_UGT { 37510 break 37511 } 37512 yes := b.Succs[0] 37513 no := b.Succs[1] 37514 b.Kind = BlockFirst 37515 b.SetControl(nil) 37516 _ = yes 37517 _ = no 37518 return true 37519 } 37520 // match: (UGT (FlagGT_ULT) yes no) 37521 // cond: 37522 // result: (First nil no yes) 37523 for { 37524 v := b.Control 37525 if v.Op != OpAMD64FlagGT_ULT { 37526 break 37527 } 37528 yes := b.Succs[0] 37529 no := b.Succs[1] 37530 b.Kind = BlockFirst 37531 b.SetControl(nil) 37532 b.swapSuccessors() 37533 _ = no 37534 _ = yes 37535 return true 37536 } 37537 // match: (UGT (FlagGT_UGT) yes no) 37538 // cond: 37539 // result: (First nil yes no) 37540 for { 37541 v := b.Control 37542 if v.Op != OpAMD64FlagGT_UGT { 37543 break 37544 } 37545 yes := b.Succs[0] 37546 no := b.Succs[1] 37547 b.Kind = BlockFirst 37548 b.SetControl(nil) 37549 _ = yes 37550 _ = no 37551 return true 37552 } 37553 case BlockAMD64ULE: 37554 // match: (ULE (InvertFlags cmp) yes no) 37555 // cond: 37556 // result: (UGE cmp yes no) 37557 for { 37558 v := b.Control 37559 if v.Op != OpAMD64InvertFlags { 37560 break 37561 } 37562 cmp := v.Args[0] 37563 yes := b.Succs[0] 37564 no := b.Succs[1] 37565 b.Kind = BlockAMD64UGE 37566 b.SetControl(cmp) 37567 _ = yes 37568 _ = no 37569 return true 37570 } 37571 // match: (ULE (FlagEQ) yes no) 37572 // cond: 37573 // result: (First nil yes no) 37574 for { 37575 v := b.Control 37576 if v.Op != OpAMD64FlagEQ { 37577 break 37578 } 37579 yes := b.Succs[0] 37580 no := b.Succs[1] 37581 b.Kind = BlockFirst 37582 b.SetControl(nil) 37583 _ = yes 37584 _ = no 37585 return true 37586 } 37587 // match: (ULE (FlagLT_ULT) yes no) 37588 // cond: 37589 // result: (First nil yes no) 37590 for { 37591 v := b.Control 37592 if v.Op != OpAMD64FlagLT_ULT { 37593 break 37594 } 37595 yes := b.Succs[0] 37596 no := b.Succs[1] 37597 b.Kind = BlockFirst 37598 b.SetControl(nil) 37599 _ = yes 37600 _ = no 37601 return true 37602 } 37603 // match: (ULE (FlagLT_UGT) yes no) 37604 // cond: 37605 // result: (First nil no yes) 37606 for { 37607 v := b.Control 37608 if v.Op != OpAMD64FlagLT_UGT { 37609 break 37610 } 37611 yes := b.Succs[0] 37612 no := b.Succs[1] 37613 b.Kind = BlockFirst 37614 b.SetControl(nil) 37615 b.swapSuccessors() 37616 _ = no 37617 _ = yes 37618 return true 37619 } 37620 // match: (ULE (FlagGT_ULT) yes no) 37621 // cond: 37622 // result: (First nil yes no) 37623 for { 37624 v := b.Control 37625 if v.Op != OpAMD64FlagGT_ULT { 37626 break 37627 } 37628 yes := b.Succs[0] 37629 no := b.Succs[1] 37630 b.Kind = BlockFirst 37631 b.SetControl(nil) 37632 _ = yes 37633 _ = no 37634 return true 37635 } 37636 // match: (ULE (FlagGT_UGT) yes no) 37637 // cond: 37638 // result: (First nil no yes) 37639 for { 37640 v := b.Control 37641 if v.Op != OpAMD64FlagGT_UGT { 37642 break 37643 } 37644 yes := b.Succs[0] 37645 no := b.Succs[1] 37646 b.Kind = BlockFirst 37647 b.SetControl(nil) 37648 b.swapSuccessors() 37649 _ = no 37650 _ = yes 37651 return true 37652 } 37653 case BlockAMD64ULT: 37654 // match: (ULT (InvertFlags cmp) yes no) 37655 // cond: 37656 // result: (UGT cmp yes no) 37657 for { 37658 v := b.Control 37659 if v.Op != OpAMD64InvertFlags { 37660 break 37661 } 37662 cmp := v.Args[0] 37663 yes := b.Succs[0] 37664 no := b.Succs[1] 37665 b.Kind = BlockAMD64UGT 37666 b.SetControl(cmp) 37667 _ = yes 37668 _ = no 37669 return true 37670 } 37671 // match: (ULT (FlagEQ) yes no) 37672 // cond: 37673 // result: (First nil no yes) 37674 for { 37675 v := b.Control 37676 if v.Op != OpAMD64FlagEQ { 37677 break 37678 } 37679 yes := b.Succs[0] 37680 no := b.Succs[1] 37681 b.Kind = BlockFirst 37682 b.SetControl(nil) 37683 b.swapSuccessors() 37684 _ = no 37685 _ = yes 37686 return true 37687 } 37688 // match: (ULT (FlagLT_ULT) yes no) 37689 // cond: 37690 // result: (First nil yes no) 37691 for { 37692 v := b.Control 37693 if v.Op != OpAMD64FlagLT_ULT { 37694 break 37695 } 37696 yes := b.Succs[0] 37697 no := b.Succs[1] 37698 b.Kind = BlockFirst 37699 b.SetControl(nil) 37700 _ = yes 37701 _ = no 37702 return true 37703 } 37704 // match: (ULT (FlagLT_UGT) yes no) 37705 // cond: 37706 // result: (First nil no yes) 37707 for { 37708 v := b.Control 37709 if v.Op != OpAMD64FlagLT_UGT { 37710 break 37711 } 37712 yes := b.Succs[0] 37713 no := b.Succs[1] 37714 b.Kind = BlockFirst 37715 b.SetControl(nil) 37716 b.swapSuccessors() 37717 _ = no 37718 _ = yes 37719 return true 37720 } 37721 // match: (ULT (FlagGT_ULT) yes no) 37722 // cond: 37723 // result: (First nil yes no) 37724 for { 37725 v := b.Control 37726 if v.Op != OpAMD64FlagGT_ULT { 37727 break 37728 } 37729 yes := b.Succs[0] 37730 no := b.Succs[1] 37731 b.Kind = BlockFirst 37732 b.SetControl(nil) 37733 _ = yes 37734 _ = no 37735 return true 37736 } 37737 // match: (ULT (FlagGT_UGT) yes no) 37738 // cond: 37739 // result: (First nil no yes) 37740 for { 37741 v := b.Control 37742 if v.Op != OpAMD64FlagGT_UGT { 37743 break 37744 } 37745 yes := b.Succs[0] 37746 no := b.Succs[1] 37747 b.Kind = BlockFirst 37748 b.SetControl(nil) 37749 b.swapSuccessors() 37750 _ = no 37751 _ = yes 37752 return true 37753 } 37754 } 37755 return false 37756 }