github.com/sbinet/go@v0.0.0-20160827155028-54d7de7dd62b/src/cmd/compile/internal/ssa/rewriteAMD64.go (about) 1 // autogenerated from gen/AMD64.rules: do not edit! 2 // generated with: cd gen; go run *.go 3 4 package ssa 5 6 import "math" 7 8 var _ = math.MinInt8 // in case not otherwise used 9 func rewriteValueAMD64(v *Value, config *Config) bool { 10 switch v.Op { 11 case OpAMD64ADDL: 12 return rewriteValueAMD64_OpAMD64ADDL(v, config) 13 case OpAMD64ADDLconst: 14 return rewriteValueAMD64_OpAMD64ADDLconst(v, config) 15 case OpAMD64ADDQ: 16 return rewriteValueAMD64_OpAMD64ADDQ(v, config) 17 case OpAMD64ADDQconst: 18 return rewriteValueAMD64_OpAMD64ADDQconst(v, config) 19 case OpAMD64ANDL: 20 return rewriteValueAMD64_OpAMD64ANDL(v, config) 21 case OpAMD64ANDLconst: 22 return rewriteValueAMD64_OpAMD64ANDLconst(v, config) 23 case OpAMD64ANDQ: 24 return rewriteValueAMD64_OpAMD64ANDQ(v, config) 25 case OpAMD64ANDQconst: 26 return rewriteValueAMD64_OpAMD64ANDQconst(v, config) 27 case OpAMD64CMPB: 28 return rewriteValueAMD64_OpAMD64CMPB(v, config) 29 case OpAMD64CMPBconst: 30 return rewriteValueAMD64_OpAMD64CMPBconst(v, config) 31 case OpAMD64CMPL: 32 return rewriteValueAMD64_OpAMD64CMPL(v, config) 33 case OpAMD64CMPLconst: 34 return rewriteValueAMD64_OpAMD64CMPLconst(v, config) 35 case OpAMD64CMPQ: 36 return rewriteValueAMD64_OpAMD64CMPQ(v, config) 37 case OpAMD64CMPQconst: 38 return rewriteValueAMD64_OpAMD64CMPQconst(v, config) 39 case OpAMD64CMPW: 40 return rewriteValueAMD64_OpAMD64CMPW(v, config) 41 case OpAMD64CMPWconst: 42 return rewriteValueAMD64_OpAMD64CMPWconst(v, config) 43 case OpAMD64LEAL: 44 return rewriteValueAMD64_OpAMD64LEAL(v, config) 45 case OpAMD64LEAQ: 46 return rewriteValueAMD64_OpAMD64LEAQ(v, config) 47 case OpAMD64LEAQ1: 48 return rewriteValueAMD64_OpAMD64LEAQ1(v, config) 49 case OpAMD64LEAQ2: 50 return rewriteValueAMD64_OpAMD64LEAQ2(v, config) 51 case OpAMD64LEAQ4: 52 return rewriteValueAMD64_OpAMD64LEAQ4(v, config) 53 case OpAMD64LEAQ8: 54 return rewriteValueAMD64_OpAMD64LEAQ8(v, config) 55 case OpAMD64MOVBQSX: 56 return rewriteValueAMD64_OpAMD64MOVBQSX(v, config) 57 case OpAMD64MOVBQSXload: 58 return rewriteValueAMD64_OpAMD64MOVBQSXload(v, config) 59 case OpAMD64MOVBQZX: 60 return rewriteValueAMD64_OpAMD64MOVBQZX(v, config) 61 case OpAMD64MOVBload: 62 return rewriteValueAMD64_OpAMD64MOVBload(v, config) 63 case OpAMD64MOVBloadidx1: 64 return rewriteValueAMD64_OpAMD64MOVBloadidx1(v, config) 65 case OpAMD64MOVBstore: 66 return rewriteValueAMD64_OpAMD64MOVBstore(v, config) 67 case OpAMD64MOVBstoreconst: 68 return rewriteValueAMD64_OpAMD64MOVBstoreconst(v, config) 69 case OpAMD64MOVBstoreconstidx1: 70 return rewriteValueAMD64_OpAMD64MOVBstoreconstidx1(v, config) 71 case OpAMD64MOVBstoreidx1: 72 return rewriteValueAMD64_OpAMD64MOVBstoreidx1(v, config) 73 case OpAMD64MOVLQSX: 74 return rewriteValueAMD64_OpAMD64MOVLQSX(v, config) 75 case OpAMD64MOVLQSXload: 76 return rewriteValueAMD64_OpAMD64MOVLQSXload(v, config) 77 case OpAMD64MOVLQZX: 78 return rewriteValueAMD64_OpAMD64MOVLQZX(v, config) 79 case OpAMD64MOVLatomicload: 80 return rewriteValueAMD64_OpAMD64MOVLatomicload(v, config) 81 case OpAMD64MOVLload: 82 return rewriteValueAMD64_OpAMD64MOVLload(v, config) 83 case OpAMD64MOVLloadidx1: 84 return rewriteValueAMD64_OpAMD64MOVLloadidx1(v, config) 85 case OpAMD64MOVLloadidx4: 86 return rewriteValueAMD64_OpAMD64MOVLloadidx4(v, config) 87 case OpAMD64MOVLstore: 88 return rewriteValueAMD64_OpAMD64MOVLstore(v, config) 89 case OpAMD64MOVLstoreconst: 90 return rewriteValueAMD64_OpAMD64MOVLstoreconst(v, config) 91 case OpAMD64MOVLstoreconstidx1: 92 return rewriteValueAMD64_OpAMD64MOVLstoreconstidx1(v, config) 93 case OpAMD64MOVLstoreconstidx4: 94 return rewriteValueAMD64_OpAMD64MOVLstoreconstidx4(v, config) 95 case OpAMD64MOVLstoreidx1: 96 return rewriteValueAMD64_OpAMD64MOVLstoreidx1(v, config) 97 case OpAMD64MOVLstoreidx4: 98 return rewriteValueAMD64_OpAMD64MOVLstoreidx4(v, config) 99 case OpAMD64MOVOload: 100 return rewriteValueAMD64_OpAMD64MOVOload(v, config) 101 case OpAMD64MOVOstore: 102 return rewriteValueAMD64_OpAMD64MOVOstore(v, config) 103 case OpAMD64MOVQatomicload: 104 return rewriteValueAMD64_OpAMD64MOVQatomicload(v, config) 105 case OpAMD64MOVQload: 106 return rewriteValueAMD64_OpAMD64MOVQload(v, config) 107 case OpAMD64MOVQloadidx1: 108 return rewriteValueAMD64_OpAMD64MOVQloadidx1(v, config) 109 case OpAMD64MOVQloadidx8: 110 return rewriteValueAMD64_OpAMD64MOVQloadidx8(v, config) 111 case OpAMD64MOVQstore: 112 return rewriteValueAMD64_OpAMD64MOVQstore(v, config) 113 case OpAMD64MOVQstoreconst: 114 return rewriteValueAMD64_OpAMD64MOVQstoreconst(v, config) 115 case OpAMD64MOVQstoreconstidx1: 116 return rewriteValueAMD64_OpAMD64MOVQstoreconstidx1(v, config) 117 case OpAMD64MOVQstoreconstidx8: 118 return rewriteValueAMD64_OpAMD64MOVQstoreconstidx8(v, config) 119 case OpAMD64MOVQstoreidx1: 120 return rewriteValueAMD64_OpAMD64MOVQstoreidx1(v, config) 121 case OpAMD64MOVQstoreidx8: 122 return rewriteValueAMD64_OpAMD64MOVQstoreidx8(v, config) 123 case OpAMD64MOVSDload: 124 return rewriteValueAMD64_OpAMD64MOVSDload(v, config) 125 case OpAMD64MOVSDloadidx1: 126 return rewriteValueAMD64_OpAMD64MOVSDloadidx1(v, config) 127 case OpAMD64MOVSDloadidx8: 128 return rewriteValueAMD64_OpAMD64MOVSDloadidx8(v, config) 129 case OpAMD64MOVSDstore: 130 return rewriteValueAMD64_OpAMD64MOVSDstore(v, config) 131 case OpAMD64MOVSDstoreidx1: 132 return rewriteValueAMD64_OpAMD64MOVSDstoreidx1(v, config) 133 case OpAMD64MOVSDstoreidx8: 134 return rewriteValueAMD64_OpAMD64MOVSDstoreidx8(v, config) 135 case OpAMD64MOVSSload: 136 return rewriteValueAMD64_OpAMD64MOVSSload(v, config) 137 case OpAMD64MOVSSloadidx1: 138 return rewriteValueAMD64_OpAMD64MOVSSloadidx1(v, config) 139 case OpAMD64MOVSSloadidx4: 140 return rewriteValueAMD64_OpAMD64MOVSSloadidx4(v, config) 141 case OpAMD64MOVSSstore: 142 return rewriteValueAMD64_OpAMD64MOVSSstore(v, config) 143 case OpAMD64MOVSSstoreidx1: 144 return rewriteValueAMD64_OpAMD64MOVSSstoreidx1(v, config) 145 case OpAMD64MOVSSstoreidx4: 146 return rewriteValueAMD64_OpAMD64MOVSSstoreidx4(v, config) 147 case OpAMD64MOVWQSX: 148 return rewriteValueAMD64_OpAMD64MOVWQSX(v, config) 149 case OpAMD64MOVWQSXload: 150 return rewriteValueAMD64_OpAMD64MOVWQSXload(v, config) 151 case OpAMD64MOVWQZX: 152 return rewriteValueAMD64_OpAMD64MOVWQZX(v, config) 153 case OpAMD64MOVWload: 154 return rewriteValueAMD64_OpAMD64MOVWload(v, config) 155 case OpAMD64MOVWloadidx1: 156 return rewriteValueAMD64_OpAMD64MOVWloadidx1(v, config) 157 case OpAMD64MOVWloadidx2: 158 return rewriteValueAMD64_OpAMD64MOVWloadidx2(v, config) 159 case OpAMD64MOVWstore: 160 return rewriteValueAMD64_OpAMD64MOVWstore(v, config) 161 case OpAMD64MOVWstoreconst: 162 return rewriteValueAMD64_OpAMD64MOVWstoreconst(v, config) 163 case OpAMD64MOVWstoreconstidx1: 164 return rewriteValueAMD64_OpAMD64MOVWstoreconstidx1(v, config) 165 case OpAMD64MOVWstoreconstidx2: 166 return rewriteValueAMD64_OpAMD64MOVWstoreconstidx2(v, config) 167 case OpAMD64MOVWstoreidx1: 168 return rewriteValueAMD64_OpAMD64MOVWstoreidx1(v, config) 169 case OpAMD64MOVWstoreidx2: 170 return rewriteValueAMD64_OpAMD64MOVWstoreidx2(v, config) 171 case OpAMD64MULL: 172 return rewriteValueAMD64_OpAMD64MULL(v, config) 173 case OpAMD64MULLconst: 174 return rewriteValueAMD64_OpAMD64MULLconst(v, config) 175 case OpAMD64MULQ: 176 return rewriteValueAMD64_OpAMD64MULQ(v, config) 177 case OpAMD64MULQconst: 178 return rewriteValueAMD64_OpAMD64MULQconst(v, config) 179 case OpAMD64NEGL: 180 return rewriteValueAMD64_OpAMD64NEGL(v, config) 181 case OpAMD64NEGQ: 182 return rewriteValueAMD64_OpAMD64NEGQ(v, config) 183 case OpAMD64NOTL: 184 return rewriteValueAMD64_OpAMD64NOTL(v, config) 185 case OpAMD64NOTQ: 186 return rewriteValueAMD64_OpAMD64NOTQ(v, config) 187 case OpAMD64ORL: 188 return rewriteValueAMD64_OpAMD64ORL(v, config) 189 case OpAMD64ORLconst: 190 return rewriteValueAMD64_OpAMD64ORLconst(v, config) 191 case OpAMD64ORQ: 192 return rewriteValueAMD64_OpAMD64ORQ(v, config) 193 case OpAMD64ORQconst: 194 return rewriteValueAMD64_OpAMD64ORQconst(v, config) 195 case OpAMD64ROLBconst: 196 return rewriteValueAMD64_OpAMD64ROLBconst(v, config) 197 case OpAMD64ROLLconst: 198 return rewriteValueAMD64_OpAMD64ROLLconst(v, config) 199 case OpAMD64ROLQconst: 200 return rewriteValueAMD64_OpAMD64ROLQconst(v, config) 201 case OpAMD64ROLWconst: 202 return rewriteValueAMD64_OpAMD64ROLWconst(v, config) 203 case OpAMD64SARB: 204 return rewriteValueAMD64_OpAMD64SARB(v, config) 205 case OpAMD64SARBconst: 206 return rewriteValueAMD64_OpAMD64SARBconst(v, config) 207 case OpAMD64SARL: 208 return rewriteValueAMD64_OpAMD64SARL(v, config) 209 case OpAMD64SARLconst: 210 return rewriteValueAMD64_OpAMD64SARLconst(v, config) 211 case OpAMD64SARQ: 212 return rewriteValueAMD64_OpAMD64SARQ(v, config) 213 case OpAMD64SARQconst: 214 return rewriteValueAMD64_OpAMD64SARQconst(v, config) 215 case OpAMD64SARW: 216 return rewriteValueAMD64_OpAMD64SARW(v, config) 217 case OpAMD64SARWconst: 218 return rewriteValueAMD64_OpAMD64SARWconst(v, config) 219 case OpAMD64SBBLcarrymask: 220 return rewriteValueAMD64_OpAMD64SBBLcarrymask(v, config) 221 case OpAMD64SBBQcarrymask: 222 return rewriteValueAMD64_OpAMD64SBBQcarrymask(v, config) 223 case OpAMD64SETA: 224 return rewriteValueAMD64_OpAMD64SETA(v, config) 225 case OpAMD64SETAE: 226 return rewriteValueAMD64_OpAMD64SETAE(v, config) 227 case OpAMD64SETB: 228 return rewriteValueAMD64_OpAMD64SETB(v, config) 229 case OpAMD64SETBE: 230 return rewriteValueAMD64_OpAMD64SETBE(v, config) 231 case OpAMD64SETEQ: 232 return rewriteValueAMD64_OpAMD64SETEQ(v, config) 233 case OpAMD64SETG: 234 return rewriteValueAMD64_OpAMD64SETG(v, config) 235 case OpAMD64SETGE: 236 return rewriteValueAMD64_OpAMD64SETGE(v, config) 237 case OpAMD64SETL: 238 return rewriteValueAMD64_OpAMD64SETL(v, config) 239 case OpAMD64SETLE: 240 return rewriteValueAMD64_OpAMD64SETLE(v, config) 241 case OpAMD64SETNE: 242 return rewriteValueAMD64_OpAMD64SETNE(v, config) 243 case OpAMD64SHLL: 244 return rewriteValueAMD64_OpAMD64SHLL(v, config) 245 case OpAMD64SHLQ: 246 return rewriteValueAMD64_OpAMD64SHLQ(v, config) 247 case OpAMD64SHRB: 248 return rewriteValueAMD64_OpAMD64SHRB(v, config) 249 case OpAMD64SHRL: 250 return rewriteValueAMD64_OpAMD64SHRL(v, config) 251 case OpAMD64SHRQ: 252 return rewriteValueAMD64_OpAMD64SHRQ(v, config) 253 case OpAMD64SHRW: 254 return rewriteValueAMD64_OpAMD64SHRW(v, config) 255 case OpAMD64SUBL: 256 return rewriteValueAMD64_OpAMD64SUBL(v, config) 257 case OpAMD64SUBLconst: 258 return rewriteValueAMD64_OpAMD64SUBLconst(v, config) 259 case OpAMD64SUBQ: 260 return rewriteValueAMD64_OpAMD64SUBQ(v, config) 261 case OpAMD64SUBQconst: 262 return rewriteValueAMD64_OpAMD64SUBQconst(v, config) 263 case OpAMD64XCHGL: 264 return rewriteValueAMD64_OpAMD64XCHGL(v, config) 265 case OpAMD64XCHGQ: 266 return rewriteValueAMD64_OpAMD64XCHGQ(v, config) 267 case OpAMD64XORL: 268 return rewriteValueAMD64_OpAMD64XORL(v, config) 269 case OpAMD64XORLconst: 270 return rewriteValueAMD64_OpAMD64XORLconst(v, config) 271 case OpAMD64XORQ: 272 return rewriteValueAMD64_OpAMD64XORQ(v, config) 273 case OpAMD64XORQconst: 274 return rewriteValueAMD64_OpAMD64XORQconst(v, config) 275 case OpAdd16: 276 return rewriteValueAMD64_OpAdd16(v, config) 277 case OpAdd32: 278 return rewriteValueAMD64_OpAdd32(v, config) 279 case OpAdd32F: 280 return rewriteValueAMD64_OpAdd32F(v, config) 281 case OpAdd64: 282 return rewriteValueAMD64_OpAdd64(v, config) 283 case OpAdd64F: 284 return rewriteValueAMD64_OpAdd64F(v, config) 285 case OpAdd8: 286 return rewriteValueAMD64_OpAdd8(v, config) 287 case OpAddPtr: 288 return rewriteValueAMD64_OpAddPtr(v, config) 289 case OpAddr: 290 return rewriteValueAMD64_OpAddr(v, config) 291 case OpAnd16: 292 return rewriteValueAMD64_OpAnd16(v, config) 293 case OpAnd32: 294 return rewriteValueAMD64_OpAnd32(v, config) 295 case OpAnd64: 296 return rewriteValueAMD64_OpAnd64(v, config) 297 case OpAnd8: 298 return rewriteValueAMD64_OpAnd8(v, config) 299 case OpAndB: 300 return rewriteValueAMD64_OpAndB(v, config) 301 case OpAtomicLoad32: 302 return rewriteValueAMD64_OpAtomicLoad32(v, config) 303 case OpAtomicLoad64: 304 return rewriteValueAMD64_OpAtomicLoad64(v, config) 305 case OpAtomicLoadPtr: 306 return rewriteValueAMD64_OpAtomicLoadPtr(v, config) 307 case OpAtomicStore32: 308 return rewriteValueAMD64_OpAtomicStore32(v, config) 309 case OpAtomicStore64: 310 return rewriteValueAMD64_OpAtomicStore64(v, config) 311 case OpAtomicStorePtrNoWB: 312 return rewriteValueAMD64_OpAtomicStorePtrNoWB(v, config) 313 case OpAvg64u: 314 return rewriteValueAMD64_OpAvg64u(v, config) 315 case OpBswap32: 316 return rewriteValueAMD64_OpBswap32(v, config) 317 case OpBswap64: 318 return rewriteValueAMD64_OpBswap64(v, config) 319 case OpClosureCall: 320 return rewriteValueAMD64_OpClosureCall(v, config) 321 case OpCom16: 322 return rewriteValueAMD64_OpCom16(v, config) 323 case OpCom32: 324 return rewriteValueAMD64_OpCom32(v, config) 325 case OpCom64: 326 return rewriteValueAMD64_OpCom64(v, config) 327 case OpCom8: 328 return rewriteValueAMD64_OpCom8(v, config) 329 case OpConst16: 330 return rewriteValueAMD64_OpConst16(v, config) 331 case OpConst32: 332 return rewriteValueAMD64_OpConst32(v, config) 333 case OpConst32F: 334 return rewriteValueAMD64_OpConst32F(v, config) 335 case OpConst64: 336 return rewriteValueAMD64_OpConst64(v, config) 337 case OpConst64F: 338 return rewriteValueAMD64_OpConst64F(v, config) 339 case OpConst8: 340 return rewriteValueAMD64_OpConst8(v, config) 341 case OpConstBool: 342 return rewriteValueAMD64_OpConstBool(v, config) 343 case OpConstNil: 344 return rewriteValueAMD64_OpConstNil(v, config) 345 case OpConvert: 346 return rewriteValueAMD64_OpConvert(v, config) 347 case OpCtz32: 348 return rewriteValueAMD64_OpCtz32(v, config) 349 case OpCtz64: 350 return rewriteValueAMD64_OpCtz64(v, config) 351 case OpCvt32Fto32: 352 return rewriteValueAMD64_OpCvt32Fto32(v, config) 353 case OpCvt32Fto64: 354 return rewriteValueAMD64_OpCvt32Fto64(v, config) 355 case OpCvt32Fto64F: 356 return rewriteValueAMD64_OpCvt32Fto64F(v, config) 357 case OpCvt32to32F: 358 return rewriteValueAMD64_OpCvt32to32F(v, config) 359 case OpCvt32to64F: 360 return rewriteValueAMD64_OpCvt32to64F(v, config) 361 case OpCvt64Fto32: 362 return rewriteValueAMD64_OpCvt64Fto32(v, config) 363 case OpCvt64Fto32F: 364 return rewriteValueAMD64_OpCvt64Fto32F(v, config) 365 case OpCvt64Fto64: 366 return rewriteValueAMD64_OpCvt64Fto64(v, config) 367 case OpCvt64to32F: 368 return rewriteValueAMD64_OpCvt64to32F(v, config) 369 case OpCvt64to64F: 370 return rewriteValueAMD64_OpCvt64to64F(v, config) 371 case OpDeferCall: 372 return rewriteValueAMD64_OpDeferCall(v, config) 373 case OpDiv16: 374 return rewriteValueAMD64_OpDiv16(v, config) 375 case OpDiv16u: 376 return rewriteValueAMD64_OpDiv16u(v, config) 377 case OpDiv32: 378 return rewriteValueAMD64_OpDiv32(v, config) 379 case OpDiv32F: 380 return rewriteValueAMD64_OpDiv32F(v, config) 381 case OpDiv32u: 382 return rewriteValueAMD64_OpDiv32u(v, config) 383 case OpDiv64: 384 return rewriteValueAMD64_OpDiv64(v, config) 385 case OpDiv64F: 386 return rewriteValueAMD64_OpDiv64F(v, config) 387 case OpDiv64u: 388 return rewriteValueAMD64_OpDiv64u(v, config) 389 case OpDiv8: 390 return rewriteValueAMD64_OpDiv8(v, config) 391 case OpDiv8u: 392 return rewriteValueAMD64_OpDiv8u(v, config) 393 case OpEq16: 394 return rewriteValueAMD64_OpEq16(v, config) 395 case OpEq32: 396 return rewriteValueAMD64_OpEq32(v, config) 397 case OpEq32F: 398 return rewriteValueAMD64_OpEq32F(v, config) 399 case OpEq64: 400 return rewriteValueAMD64_OpEq64(v, config) 401 case OpEq64F: 402 return rewriteValueAMD64_OpEq64F(v, config) 403 case OpEq8: 404 return rewriteValueAMD64_OpEq8(v, config) 405 case OpEqB: 406 return rewriteValueAMD64_OpEqB(v, config) 407 case OpEqPtr: 408 return rewriteValueAMD64_OpEqPtr(v, config) 409 case OpGeq16: 410 return rewriteValueAMD64_OpGeq16(v, config) 411 case OpGeq16U: 412 return rewriteValueAMD64_OpGeq16U(v, config) 413 case OpGeq32: 414 return rewriteValueAMD64_OpGeq32(v, config) 415 case OpGeq32F: 416 return rewriteValueAMD64_OpGeq32F(v, config) 417 case OpGeq32U: 418 return rewriteValueAMD64_OpGeq32U(v, config) 419 case OpGeq64: 420 return rewriteValueAMD64_OpGeq64(v, config) 421 case OpGeq64F: 422 return rewriteValueAMD64_OpGeq64F(v, config) 423 case OpGeq64U: 424 return rewriteValueAMD64_OpGeq64U(v, config) 425 case OpGeq8: 426 return rewriteValueAMD64_OpGeq8(v, config) 427 case OpGeq8U: 428 return rewriteValueAMD64_OpGeq8U(v, config) 429 case OpGetClosurePtr: 430 return rewriteValueAMD64_OpGetClosurePtr(v, config) 431 case OpGetG: 432 return rewriteValueAMD64_OpGetG(v, config) 433 case OpGoCall: 434 return rewriteValueAMD64_OpGoCall(v, config) 435 case OpGreater16: 436 return rewriteValueAMD64_OpGreater16(v, config) 437 case OpGreater16U: 438 return rewriteValueAMD64_OpGreater16U(v, config) 439 case OpGreater32: 440 return rewriteValueAMD64_OpGreater32(v, config) 441 case OpGreater32F: 442 return rewriteValueAMD64_OpGreater32F(v, config) 443 case OpGreater32U: 444 return rewriteValueAMD64_OpGreater32U(v, config) 445 case OpGreater64: 446 return rewriteValueAMD64_OpGreater64(v, config) 447 case OpGreater64F: 448 return rewriteValueAMD64_OpGreater64F(v, config) 449 case OpGreater64U: 450 return rewriteValueAMD64_OpGreater64U(v, config) 451 case OpGreater8: 452 return rewriteValueAMD64_OpGreater8(v, config) 453 case OpGreater8U: 454 return rewriteValueAMD64_OpGreater8U(v, config) 455 case OpHmul16: 456 return rewriteValueAMD64_OpHmul16(v, config) 457 case OpHmul16u: 458 return rewriteValueAMD64_OpHmul16u(v, config) 459 case OpHmul32: 460 return rewriteValueAMD64_OpHmul32(v, config) 461 case OpHmul32u: 462 return rewriteValueAMD64_OpHmul32u(v, config) 463 case OpHmul64: 464 return rewriteValueAMD64_OpHmul64(v, config) 465 case OpHmul64u: 466 return rewriteValueAMD64_OpHmul64u(v, config) 467 case OpHmul8: 468 return rewriteValueAMD64_OpHmul8(v, config) 469 case OpHmul8u: 470 return rewriteValueAMD64_OpHmul8u(v, config) 471 case OpInt64Hi: 472 return rewriteValueAMD64_OpInt64Hi(v, config) 473 case OpInterCall: 474 return rewriteValueAMD64_OpInterCall(v, config) 475 case OpIsInBounds: 476 return rewriteValueAMD64_OpIsInBounds(v, config) 477 case OpIsNonNil: 478 return rewriteValueAMD64_OpIsNonNil(v, config) 479 case OpIsSliceInBounds: 480 return rewriteValueAMD64_OpIsSliceInBounds(v, config) 481 case OpLeq16: 482 return rewriteValueAMD64_OpLeq16(v, config) 483 case OpLeq16U: 484 return rewriteValueAMD64_OpLeq16U(v, config) 485 case OpLeq32: 486 return rewriteValueAMD64_OpLeq32(v, config) 487 case OpLeq32F: 488 return rewriteValueAMD64_OpLeq32F(v, config) 489 case OpLeq32U: 490 return rewriteValueAMD64_OpLeq32U(v, config) 491 case OpLeq64: 492 return rewriteValueAMD64_OpLeq64(v, config) 493 case OpLeq64F: 494 return rewriteValueAMD64_OpLeq64F(v, config) 495 case OpLeq64U: 496 return rewriteValueAMD64_OpLeq64U(v, config) 497 case OpLeq8: 498 return rewriteValueAMD64_OpLeq8(v, config) 499 case OpLeq8U: 500 return rewriteValueAMD64_OpLeq8U(v, config) 501 case OpLess16: 502 return rewriteValueAMD64_OpLess16(v, config) 503 case OpLess16U: 504 return rewriteValueAMD64_OpLess16U(v, config) 505 case OpLess32: 506 return rewriteValueAMD64_OpLess32(v, config) 507 case OpLess32F: 508 return rewriteValueAMD64_OpLess32F(v, config) 509 case OpLess32U: 510 return rewriteValueAMD64_OpLess32U(v, config) 511 case OpLess64: 512 return rewriteValueAMD64_OpLess64(v, config) 513 case OpLess64F: 514 return rewriteValueAMD64_OpLess64F(v, config) 515 case OpLess64U: 516 return rewriteValueAMD64_OpLess64U(v, config) 517 case OpLess8: 518 return rewriteValueAMD64_OpLess8(v, config) 519 case OpLess8U: 520 return rewriteValueAMD64_OpLess8U(v, config) 521 case OpLoad: 522 return rewriteValueAMD64_OpLoad(v, config) 523 case OpLrot16: 524 return rewriteValueAMD64_OpLrot16(v, config) 525 case OpLrot32: 526 return rewriteValueAMD64_OpLrot32(v, config) 527 case OpLrot64: 528 return rewriteValueAMD64_OpLrot64(v, config) 529 case OpLrot8: 530 return rewriteValueAMD64_OpLrot8(v, config) 531 case OpLsh16x16: 532 return rewriteValueAMD64_OpLsh16x16(v, config) 533 case OpLsh16x32: 534 return rewriteValueAMD64_OpLsh16x32(v, config) 535 case OpLsh16x64: 536 return rewriteValueAMD64_OpLsh16x64(v, config) 537 case OpLsh16x8: 538 return rewriteValueAMD64_OpLsh16x8(v, config) 539 case OpLsh32x16: 540 return rewriteValueAMD64_OpLsh32x16(v, config) 541 case OpLsh32x32: 542 return rewriteValueAMD64_OpLsh32x32(v, config) 543 case OpLsh32x64: 544 return rewriteValueAMD64_OpLsh32x64(v, config) 545 case OpLsh32x8: 546 return rewriteValueAMD64_OpLsh32x8(v, config) 547 case OpLsh64x16: 548 return rewriteValueAMD64_OpLsh64x16(v, config) 549 case OpLsh64x32: 550 return rewriteValueAMD64_OpLsh64x32(v, config) 551 case OpLsh64x64: 552 return rewriteValueAMD64_OpLsh64x64(v, config) 553 case OpLsh64x8: 554 return rewriteValueAMD64_OpLsh64x8(v, config) 555 case OpLsh8x16: 556 return rewriteValueAMD64_OpLsh8x16(v, config) 557 case OpLsh8x32: 558 return rewriteValueAMD64_OpLsh8x32(v, config) 559 case OpLsh8x64: 560 return rewriteValueAMD64_OpLsh8x64(v, config) 561 case OpLsh8x8: 562 return rewriteValueAMD64_OpLsh8x8(v, config) 563 case OpMod16: 564 return rewriteValueAMD64_OpMod16(v, config) 565 case OpMod16u: 566 return rewriteValueAMD64_OpMod16u(v, config) 567 case OpMod32: 568 return rewriteValueAMD64_OpMod32(v, config) 569 case OpMod32u: 570 return rewriteValueAMD64_OpMod32u(v, config) 571 case OpMod64: 572 return rewriteValueAMD64_OpMod64(v, config) 573 case OpMod64u: 574 return rewriteValueAMD64_OpMod64u(v, config) 575 case OpMod8: 576 return rewriteValueAMD64_OpMod8(v, config) 577 case OpMod8u: 578 return rewriteValueAMD64_OpMod8u(v, config) 579 case OpMove: 580 return rewriteValueAMD64_OpMove(v, config) 581 case OpMul16: 582 return rewriteValueAMD64_OpMul16(v, config) 583 case OpMul32: 584 return rewriteValueAMD64_OpMul32(v, config) 585 case OpMul32F: 586 return rewriteValueAMD64_OpMul32F(v, config) 587 case OpMul64: 588 return rewriteValueAMD64_OpMul64(v, config) 589 case OpMul64F: 590 return rewriteValueAMD64_OpMul64F(v, config) 591 case OpMul8: 592 return rewriteValueAMD64_OpMul8(v, config) 593 case OpNeg16: 594 return rewriteValueAMD64_OpNeg16(v, config) 595 case OpNeg32: 596 return rewriteValueAMD64_OpNeg32(v, config) 597 case OpNeg32F: 598 return rewriteValueAMD64_OpNeg32F(v, config) 599 case OpNeg64: 600 return rewriteValueAMD64_OpNeg64(v, config) 601 case OpNeg64F: 602 return rewriteValueAMD64_OpNeg64F(v, config) 603 case OpNeg8: 604 return rewriteValueAMD64_OpNeg8(v, config) 605 case OpNeq16: 606 return rewriteValueAMD64_OpNeq16(v, config) 607 case OpNeq32: 608 return rewriteValueAMD64_OpNeq32(v, config) 609 case OpNeq32F: 610 return rewriteValueAMD64_OpNeq32F(v, config) 611 case OpNeq64: 612 return rewriteValueAMD64_OpNeq64(v, config) 613 case OpNeq64F: 614 return rewriteValueAMD64_OpNeq64F(v, config) 615 case OpNeq8: 616 return rewriteValueAMD64_OpNeq8(v, config) 617 case OpNeqB: 618 return rewriteValueAMD64_OpNeqB(v, config) 619 case OpNeqPtr: 620 return rewriteValueAMD64_OpNeqPtr(v, config) 621 case OpNilCheck: 622 return rewriteValueAMD64_OpNilCheck(v, config) 623 case OpNot: 624 return rewriteValueAMD64_OpNot(v, config) 625 case OpOffPtr: 626 return rewriteValueAMD64_OpOffPtr(v, config) 627 case OpOr16: 628 return rewriteValueAMD64_OpOr16(v, config) 629 case OpOr32: 630 return rewriteValueAMD64_OpOr32(v, config) 631 case OpOr64: 632 return rewriteValueAMD64_OpOr64(v, config) 633 case OpOr8: 634 return rewriteValueAMD64_OpOr8(v, config) 635 case OpOrB: 636 return rewriteValueAMD64_OpOrB(v, config) 637 case OpRsh16Ux16: 638 return rewriteValueAMD64_OpRsh16Ux16(v, config) 639 case OpRsh16Ux32: 640 return rewriteValueAMD64_OpRsh16Ux32(v, config) 641 case OpRsh16Ux64: 642 return rewriteValueAMD64_OpRsh16Ux64(v, config) 643 case OpRsh16Ux8: 644 return rewriteValueAMD64_OpRsh16Ux8(v, config) 645 case OpRsh16x16: 646 return rewriteValueAMD64_OpRsh16x16(v, config) 647 case OpRsh16x32: 648 return rewriteValueAMD64_OpRsh16x32(v, config) 649 case OpRsh16x64: 650 return rewriteValueAMD64_OpRsh16x64(v, config) 651 case OpRsh16x8: 652 return rewriteValueAMD64_OpRsh16x8(v, config) 653 case OpRsh32Ux16: 654 return rewriteValueAMD64_OpRsh32Ux16(v, config) 655 case OpRsh32Ux32: 656 return rewriteValueAMD64_OpRsh32Ux32(v, config) 657 case OpRsh32Ux64: 658 return rewriteValueAMD64_OpRsh32Ux64(v, config) 659 case OpRsh32Ux8: 660 return rewriteValueAMD64_OpRsh32Ux8(v, config) 661 case OpRsh32x16: 662 return rewriteValueAMD64_OpRsh32x16(v, config) 663 case OpRsh32x32: 664 return rewriteValueAMD64_OpRsh32x32(v, config) 665 case OpRsh32x64: 666 return rewriteValueAMD64_OpRsh32x64(v, config) 667 case OpRsh32x8: 668 return rewriteValueAMD64_OpRsh32x8(v, config) 669 case OpRsh64Ux16: 670 return rewriteValueAMD64_OpRsh64Ux16(v, config) 671 case OpRsh64Ux32: 672 return rewriteValueAMD64_OpRsh64Ux32(v, config) 673 case OpRsh64Ux64: 674 return rewriteValueAMD64_OpRsh64Ux64(v, config) 675 case OpRsh64Ux8: 676 return rewriteValueAMD64_OpRsh64Ux8(v, config) 677 case OpRsh64x16: 678 return rewriteValueAMD64_OpRsh64x16(v, config) 679 case OpRsh64x32: 680 return rewriteValueAMD64_OpRsh64x32(v, config) 681 case OpRsh64x64: 682 return rewriteValueAMD64_OpRsh64x64(v, config) 683 case OpRsh64x8: 684 return rewriteValueAMD64_OpRsh64x8(v, config) 685 case OpRsh8Ux16: 686 return rewriteValueAMD64_OpRsh8Ux16(v, config) 687 case OpRsh8Ux32: 688 return rewriteValueAMD64_OpRsh8Ux32(v, config) 689 case OpRsh8Ux64: 690 return rewriteValueAMD64_OpRsh8Ux64(v, config) 691 case OpRsh8Ux8: 692 return rewriteValueAMD64_OpRsh8Ux8(v, config) 693 case OpRsh8x16: 694 return rewriteValueAMD64_OpRsh8x16(v, config) 695 case OpRsh8x32: 696 return rewriteValueAMD64_OpRsh8x32(v, config) 697 case OpRsh8x64: 698 return rewriteValueAMD64_OpRsh8x64(v, config) 699 case OpRsh8x8: 700 return rewriteValueAMD64_OpRsh8x8(v, config) 701 case OpSignExt16to32: 702 return rewriteValueAMD64_OpSignExt16to32(v, config) 703 case OpSignExt16to64: 704 return rewriteValueAMD64_OpSignExt16to64(v, config) 705 case OpSignExt32to64: 706 return rewriteValueAMD64_OpSignExt32to64(v, config) 707 case OpSignExt8to16: 708 return rewriteValueAMD64_OpSignExt8to16(v, config) 709 case OpSignExt8to32: 710 return rewriteValueAMD64_OpSignExt8to32(v, config) 711 case OpSignExt8to64: 712 return rewriteValueAMD64_OpSignExt8to64(v, config) 713 case OpSqrt: 714 return rewriteValueAMD64_OpSqrt(v, config) 715 case OpStaticCall: 716 return rewriteValueAMD64_OpStaticCall(v, config) 717 case OpStore: 718 return rewriteValueAMD64_OpStore(v, config) 719 case OpSub16: 720 return rewriteValueAMD64_OpSub16(v, config) 721 case OpSub32: 722 return rewriteValueAMD64_OpSub32(v, config) 723 case OpSub32F: 724 return rewriteValueAMD64_OpSub32F(v, config) 725 case OpSub64: 726 return rewriteValueAMD64_OpSub64(v, config) 727 case OpSub64F: 728 return rewriteValueAMD64_OpSub64F(v, config) 729 case OpSub8: 730 return rewriteValueAMD64_OpSub8(v, config) 731 case OpSubPtr: 732 return rewriteValueAMD64_OpSubPtr(v, config) 733 case OpTrunc16to8: 734 return rewriteValueAMD64_OpTrunc16to8(v, config) 735 case OpTrunc32to16: 736 return rewriteValueAMD64_OpTrunc32to16(v, config) 737 case OpTrunc32to8: 738 return rewriteValueAMD64_OpTrunc32to8(v, config) 739 case OpTrunc64to16: 740 return rewriteValueAMD64_OpTrunc64to16(v, config) 741 case OpTrunc64to32: 742 return rewriteValueAMD64_OpTrunc64to32(v, config) 743 case OpTrunc64to8: 744 return rewriteValueAMD64_OpTrunc64to8(v, config) 745 case OpXor16: 746 return rewriteValueAMD64_OpXor16(v, config) 747 case OpXor32: 748 return rewriteValueAMD64_OpXor32(v, config) 749 case OpXor64: 750 return rewriteValueAMD64_OpXor64(v, config) 751 case OpXor8: 752 return rewriteValueAMD64_OpXor8(v, config) 753 case OpZero: 754 return rewriteValueAMD64_OpZero(v, config) 755 case OpZeroExt16to32: 756 return rewriteValueAMD64_OpZeroExt16to32(v, config) 757 case OpZeroExt16to64: 758 return rewriteValueAMD64_OpZeroExt16to64(v, config) 759 case OpZeroExt32to64: 760 return rewriteValueAMD64_OpZeroExt32to64(v, config) 761 case OpZeroExt8to16: 762 return rewriteValueAMD64_OpZeroExt8to16(v, config) 763 case OpZeroExt8to32: 764 return rewriteValueAMD64_OpZeroExt8to32(v, config) 765 case OpZeroExt8to64: 766 return rewriteValueAMD64_OpZeroExt8to64(v, config) 767 } 768 return false 769 } 770 func rewriteValueAMD64_OpAMD64ADDL(v *Value, config *Config) bool { 771 b := v.Block 772 _ = b 773 // match: (ADDL x (MOVLconst [c])) 774 // cond: 775 // result: (ADDLconst [c] x) 776 for { 777 x := v.Args[0] 778 v_1 := v.Args[1] 779 if v_1.Op != OpAMD64MOVLconst { 780 break 781 } 782 c := v_1.AuxInt 783 v.reset(OpAMD64ADDLconst) 784 v.AuxInt = c 785 v.AddArg(x) 786 return true 787 } 788 // match: (ADDL (MOVLconst [c]) x) 789 // cond: 790 // result: (ADDLconst [c] x) 791 for { 792 v_0 := v.Args[0] 793 if v_0.Op != OpAMD64MOVLconst { 794 break 795 } 796 c := v_0.AuxInt 797 x := v.Args[1] 798 v.reset(OpAMD64ADDLconst) 799 v.AuxInt = c 800 v.AddArg(x) 801 return true 802 } 803 // match: (ADDL x (NEGL y)) 804 // cond: 805 // result: (SUBL x y) 806 for { 807 x := v.Args[0] 808 v_1 := v.Args[1] 809 if v_1.Op != OpAMD64NEGL { 810 break 811 } 812 y := v_1.Args[0] 813 v.reset(OpAMD64SUBL) 814 v.AddArg(x) 815 v.AddArg(y) 816 return true 817 } 818 return false 819 } 820 func rewriteValueAMD64_OpAMD64ADDLconst(v *Value, config *Config) bool { 821 b := v.Block 822 _ = b 823 // match: (ADDLconst [c] x) 824 // cond: int32(c)==0 825 // result: x 826 for { 827 c := v.AuxInt 828 x := v.Args[0] 829 if !(int32(c) == 0) { 830 break 831 } 832 v.reset(OpCopy) 833 v.Type = x.Type 834 v.AddArg(x) 835 return true 836 } 837 // match: (ADDLconst [c] (MOVLconst [d])) 838 // cond: 839 // result: (MOVLconst [int64(int32(c+d))]) 840 for { 841 c := v.AuxInt 842 v_0 := v.Args[0] 843 if v_0.Op != OpAMD64MOVLconst { 844 break 845 } 846 d := v_0.AuxInt 847 v.reset(OpAMD64MOVLconst) 848 v.AuxInt = int64(int32(c + d)) 849 return true 850 } 851 // match: (ADDLconst [c] (ADDLconst [d] x)) 852 // cond: 853 // result: (ADDLconst [int64(int32(c+d))] x) 854 for { 855 c := v.AuxInt 856 v_0 := v.Args[0] 857 if v_0.Op != OpAMD64ADDLconst { 858 break 859 } 860 d := v_0.AuxInt 861 x := v_0.Args[0] 862 v.reset(OpAMD64ADDLconst) 863 v.AuxInt = int64(int32(c + d)) 864 v.AddArg(x) 865 return true 866 } 867 // match: (ADDLconst [c] (LEAL [d] {s} x)) 868 // cond: is32Bit(c+d) 869 // result: (LEAL [c+d] {s} x) 870 for { 871 c := v.AuxInt 872 v_0 := v.Args[0] 873 if v_0.Op != OpAMD64LEAL { 874 break 875 } 876 d := v_0.AuxInt 877 s := v_0.Aux 878 x := v_0.Args[0] 879 if !(is32Bit(c + d)) { 880 break 881 } 882 v.reset(OpAMD64LEAL) 883 v.AuxInt = c + d 884 v.Aux = s 885 v.AddArg(x) 886 return true 887 } 888 return false 889 } 890 func rewriteValueAMD64_OpAMD64ADDQ(v *Value, config *Config) bool { 891 b := v.Block 892 _ = b 893 // match: (ADDQ x (MOVQconst [c])) 894 // cond: is32Bit(c) 895 // result: (ADDQconst [c] x) 896 for { 897 x := v.Args[0] 898 v_1 := v.Args[1] 899 if v_1.Op != OpAMD64MOVQconst { 900 break 901 } 902 c := v_1.AuxInt 903 if !(is32Bit(c)) { 904 break 905 } 906 v.reset(OpAMD64ADDQconst) 907 v.AuxInt = c 908 v.AddArg(x) 909 return true 910 } 911 // match: (ADDQ (MOVQconst [c]) x) 912 // cond: is32Bit(c) 913 // result: (ADDQconst [c] x) 914 for { 915 v_0 := v.Args[0] 916 if v_0.Op != OpAMD64MOVQconst { 917 break 918 } 919 c := v_0.AuxInt 920 x := v.Args[1] 921 if !(is32Bit(c)) { 922 break 923 } 924 v.reset(OpAMD64ADDQconst) 925 v.AuxInt = c 926 v.AddArg(x) 927 return true 928 } 929 // match: (ADDQ x (SHLQconst [3] y)) 930 // cond: 931 // result: (LEAQ8 x y) 932 for { 933 x := v.Args[0] 934 v_1 := v.Args[1] 935 if v_1.Op != OpAMD64SHLQconst { 936 break 937 } 938 if v_1.AuxInt != 3 { 939 break 940 } 941 y := v_1.Args[0] 942 v.reset(OpAMD64LEAQ8) 943 v.AddArg(x) 944 v.AddArg(y) 945 return true 946 } 947 // match: (ADDQ x (SHLQconst [2] y)) 948 // cond: 949 // result: (LEAQ4 x y) 950 for { 951 x := v.Args[0] 952 v_1 := v.Args[1] 953 if v_1.Op != OpAMD64SHLQconst { 954 break 955 } 956 if v_1.AuxInt != 2 { 957 break 958 } 959 y := v_1.Args[0] 960 v.reset(OpAMD64LEAQ4) 961 v.AddArg(x) 962 v.AddArg(y) 963 return true 964 } 965 // match: (ADDQ x (SHLQconst [1] y)) 966 // cond: 967 // result: (LEAQ2 x y) 968 for { 969 x := v.Args[0] 970 v_1 := v.Args[1] 971 if v_1.Op != OpAMD64SHLQconst { 972 break 973 } 974 if v_1.AuxInt != 1 { 975 break 976 } 977 y := v_1.Args[0] 978 v.reset(OpAMD64LEAQ2) 979 v.AddArg(x) 980 v.AddArg(y) 981 return true 982 } 983 // match: (ADDQ x (ADDQ y y)) 984 // cond: 985 // result: (LEAQ2 x y) 986 for { 987 x := v.Args[0] 988 v_1 := v.Args[1] 989 if v_1.Op != OpAMD64ADDQ { 990 break 991 } 992 y := v_1.Args[0] 993 if y != v_1.Args[1] { 994 break 995 } 996 v.reset(OpAMD64LEAQ2) 997 v.AddArg(x) 998 v.AddArg(y) 999 return true 1000 } 1001 // match: (ADDQ x (ADDQ x y)) 1002 // cond: 1003 // result: (LEAQ2 y x) 1004 for { 1005 x := v.Args[0] 1006 v_1 := v.Args[1] 1007 if v_1.Op != OpAMD64ADDQ { 1008 break 1009 } 1010 if x != v_1.Args[0] { 1011 break 1012 } 1013 y := v_1.Args[1] 1014 v.reset(OpAMD64LEAQ2) 1015 v.AddArg(y) 1016 v.AddArg(x) 1017 return true 1018 } 1019 // match: (ADDQ x (ADDQ y x)) 1020 // cond: 1021 // result: (LEAQ2 y x) 1022 for { 1023 x := v.Args[0] 1024 v_1 := v.Args[1] 1025 if v_1.Op != OpAMD64ADDQ { 1026 break 1027 } 1028 y := v_1.Args[0] 1029 if x != v_1.Args[1] { 1030 break 1031 } 1032 v.reset(OpAMD64LEAQ2) 1033 v.AddArg(y) 1034 v.AddArg(x) 1035 return true 1036 } 1037 // match: (ADDQ (ADDQconst [c] x) y) 1038 // cond: 1039 // result: (LEAQ1 [c] x y) 1040 for { 1041 v_0 := v.Args[0] 1042 if v_0.Op != OpAMD64ADDQconst { 1043 break 1044 } 1045 c := v_0.AuxInt 1046 x := v_0.Args[0] 1047 y := v.Args[1] 1048 v.reset(OpAMD64LEAQ1) 1049 v.AuxInt = c 1050 v.AddArg(x) 1051 v.AddArg(y) 1052 return true 1053 } 1054 // match: (ADDQ x (ADDQconst [c] y)) 1055 // cond: 1056 // result: (LEAQ1 [c] x y) 1057 for { 1058 x := v.Args[0] 1059 v_1 := v.Args[1] 1060 if v_1.Op != OpAMD64ADDQconst { 1061 break 1062 } 1063 c := v_1.AuxInt 1064 y := v_1.Args[0] 1065 v.reset(OpAMD64LEAQ1) 1066 v.AuxInt = c 1067 v.AddArg(x) 1068 v.AddArg(y) 1069 return true 1070 } 1071 // match: (ADDQ x (LEAQ [c] {s} y)) 1072 // cond: x.Op != OpSB && y.Op != OpSB 1073 // result: (LEAQ1 [c] {s} x y) 1074 for { 1075 x := v.Args[0] 1076 v_1 := v.Args[1] 1077 if v_1.Op != OpAMD64LEAQ { 1078 break 1079 } 1080 c := v_1.AuxInt 1081 s := v_1.Aux 1082 y := v_1.Args[0] 1083 if !(x.Op != OpSB && y.Op != OpSB) { 1084 break 1085 } 1086 v.reset(OpAMD64LEAQ1) 1087 v.AuxInt = c 1088 v.Aux = s 1089 v.AddArg(x) 1090 v.AddArg(y) 1091 return true 1092 } 1093 // match: (ADDQ (LEAQ [c] {s} x) y) 1094 // cond: x.Op != OpSB && y.Op != OpSB 1095 // result: (LEAQ1 [c] {s} x y) 1096 for { 1097 v_0 := v.Args[0] 1098 if v_0.Op != OpAMD64LEAQ { 1099 break 1100 } 1101 c := v_0.AuxInt 1102 s := v_0.Aux 1103 x := v_0.Args[0] 1104 y := v.Args[1] 1105 if !(x.Op != OpSB && y.Op != OpSB) { 1106 break 1107 } 1108 v.reset(OpAMD64LEAQ1) 1109 v.AuxInt = c 1110 v.Aux = s 1111 v.AddArg(x) 1112 v.AddArg(y) 1113 return true 1114 } 1115 // match: (ADDQ x (NEGQ y)) 1116 // cond: 1117 // result: (SUBQ x y) 1118 for { 1119 x := v.Args[0] 1120 v_1 := v.Args[1] 1121 if v_1.Op != OpAMD64NEGQ { 1122 break 1123 } 1124 y := v_1.Args[0] 1125 v.reset(OpAMD64SUBQ) 1126 v.AddArg(x) 1127 v.AddArg(y) 1128 return true 1129 } 1130 return false 1131 } 1132 func rewriteValueAMD64_OpAMD64ADDQconst(v *Value, config *Config) bool { 1133 b := v.Block 1134 _ = b 1135 // match: (ADDQconst [c] (ADDQ x y)) 1136 // cond: 1137 // result: (LEAQ1 [c] x y) 1138 for { 1139 c := v.AuxInt 1140 v_0 := v.Args[0] 1141 if v_0.Op != OpAMD64ADDQ { 1142 break 1143 } 1144 x := v_0.Args[0] 1145 y := v_0.Args[1] 1146 v.reset(OpAMD64LEAQ1) 1147 v.AuxInt = c 1148 v.AddArg(x) 1149 v.AddArg(y) 1150 return true 1151 } 1152 // match: (ADDQconst [c] (LEAQ [d] {s} x)) 1153 // cond: is32Bit(c+d) 1154 // result: (LEAQ [c+d] {s} x) 1155 for { 1156 c := v.AuxInt 1157 v_0 := v.Args[0] 1158 if v_0.Op != OpAMD64LEAQ { 1159 break 1160 } 1161 d := v_0.AuxInt 1162 s := v_0.Aux 1163 x := v_0.Args[0] 1164 if !(is32Bit(c + d)) { 1165 break 1166 } 1167 v.reset(OpAMD64LEAQ) 1168 v.AuxInt = c + d 1169 v.Aux = s 1170 v.AddArg(x) 1171 return true 1172 } 1173 // match: (ADDQconst [c] (LEAQ1 [d] {s} x y)) 1174 // cond: is32Bit(c+d) 1175 // result: (LEAQ1 [c+d] {s} x y) 1176 for { 1177 c := v.AuxInt 1178 v_0 := v.Args[0] 1179 if v_0.Op != OpAMD64LEAQ1 { 1180 break 1181 } 1182 d := v_0.AuxInt 1183 s := v_0.Aux 1184 x := v_0.Args[0] 1185 y := v_0.Args[1] 1186 if !(is32Bit(c + d)) { 1187 break 1188 } 1189 v.reset(OpAMD64LEAQ1) 1190 v.AuxInt = c + d 1191 v.Aux = s 1192 v.AddArg(x) 1193 v.AddArg(y) 1194 return true 1195 } 1196 // match: (ADDQconst [c] (LEAQ2 [d] {s} x y)) 1197 // cond: is32Bit(c+d) 1198 // result: (LEAQ2 [c+d] {s} x y) 1199 for { 1200 c := v.AuxInt 1201 v_0 := v.Args[0] 1202 if v_0.Op != OpAMD64LEAQ2 { 1203 break 1204 } 1205 d := v_0.AuxInt 1206 s := v_0.Aux 1207 x := v_0.Args[0] 1208 y := v_0.Args[1] 1209 if !(is32Bit(c + d)) { 1210 break 1211 } 1212 v.reset(OpAMD64LEAQ2) 1213 v.AuxInt = c + d 1214 v.Aux = s 1215 v.AddArg(x) 1216 v.AddArg(y) 1217 return true 1218 } 1219 // match: (ADDQconst [c] (LEAQ4 [d] {s} x y)) 1220 // cond: is32Bit(c+d) 1221 // result: (LEAQ4 [c+d] {s} x y) 1222 for { 1223 c := v.AuxInt 1224 v_0 := v.Args[0] 1225 if v_0.Op != OpAMD64LEAQ4 { 1226 break 1227 } 1228 d := v_0.AuxInt 1229 s := v_0.Aux 1230 x := v_0.Args[0] 1231 y := v_0.Args[1] 1232 if !(is32Bit(c + d)) { 1233 break 1234 } 1235 v.reset(OpAMD64LEAQ4) 1236 v.AuxInt = c + d 1237 v.Aux = s 1238 v.AddArg(x) 1239 v.AddArg(y) 1240 return true 1241 } 1242 // match: (ADDQconst [c] (LEAQ8 [d] {s} x y)) 1243 // cond: is32Bit(c+d) 1244 // result: (LEAQ8 [c+d] {s} x y) 1245 for { 1246 c := v.AuxInt 1247 v_0 := v.Args[0] 1248 if v_0.Op != OpAMD64LEAQ8 { 1249 break 1250 } 1251 d := v_0.AuxInt 1252 s := v_0.Aux 1253 x := v_0.Args[0] 1254 y := v_0.Args[1] 1255 if !(is32Bit(c + d)) { 1256 break 1257 } 1258 v.reset(OpAMD64LEAQ8) 1259 v.AuxInt = c + d 1260 v.Aux = s 1261 v.AddArg(x) 1262 v.AddArg(y) 1263 return true 1264 } 1265 // match: (ADDQconst [0] x) 1266 // cond: 1267 // result: x 1268 for { 1269 if v.AuxInt != 0 { 1270 break 1271 } 1272 x := v.Args[0] 1273 v.reset(OpCopy) 1274 v.Type = x.Type 1275 v.AddArg(x) 1276 return true 1277 } 1278 // match: (ADDQconst [c] (MOVQconst [d])) 1279 // cond: 1280 // result: (MOVQconst [c+d]) 1281 for { 1282 c := v.AuxInt 1283 v_0 := v.Args[0] 1284 if v_0.Op != OpAMD64MOVQconst { 1285 break 1286 } 1287 d := v_0.AuxInt 1288 v.reset(OpAMD64MOVQconst) 1289 v.AuxInt = c + d 1290 return true 1291 } 1292 // match: (ADDQconst [c] (ADDQconst [d] x)) 1293 // cond: is32Bit(c+d) 1294 // result: (ADDQconst [c+d] x) 1295 for { 1296 c := v.AuxInt 1297 v_0 := v.Args[0] 1298 if v_0.Op != OpAMD64ADDQconst { 1299 break 1300 } 1301 d := v_0.AuxInt 1302 x := v_0.Args[0] 1303 if !(is32Bit(c + d)) { 1304 break 1305 } 1306 v.reset(OpAMD64ADDQconst) 1307 v.AuxInt = c + d 1308 v.AddArg(x) 1309 return true 1310 } 1311 return false 1312 } 1313 func rewriteValueAMD64_OpAMD64ANDL(v *Value, config *Config) bool { 1314 b := v.Block 1315 _ = b 1316 // match: (ANDL x (MOVLconst [c])) 1317 // cond: 1318 // result: (ANDLconst [c] x) 1319 for { 1320 x := v.Args[0] 1321 v_1 := v.Args[1] 1322 if v_1.Op != OpAMD64MOVLconst { 1323 break 1324 } 1325 c := v_1.AuxInt 1326 v.reset(OpAMD64ANDLconst) 1327 v.AuxInt = c 1328 v.AddArg(x) 1329 return true 1330 } 1331 // match: (ANDL (MOVLconst [c]) x) 1332 // cond: 1333 // result: (ANDLconst [c] x) 1334 for { 1335 v_0 := v.Args[0] 1336 if v_0.Op != OpAMD64MOVLconst { 1337 break 1338 } 1339 c := v_0.AuxInt 1340 x := v.Args[1] 1341 v.reset(OpAMD64ANDLconst) 1342 v.AuxInt = c 1343 v.AddArg(x) 1344 return true 1345 } 1346 // match: (ANDL x x) 1347 // cond: 1348 // result: x 1349 for { 1350 x := v.Args[0] 1351 if x != v.Args[1] { 1352 break 1353 } 1354 v.reset(OpCopy) 1355 v.Type = x.Type 1356 v.AddArg(x) 1357 return true 1358 } 1359 return false 1360 } 1361 func rewriteValueAMD64_OpAMD64ANDLconst(v *Value, config *Config) bool { 1362 b := v.Block 1363 _ = b 1364 // match: (ANDLconst [c] (ANDLconst [d] x)) 1365 // cond: 1366 // result: (ANDLconst [c & d] x) 1367 for { 1368 c := v.AuxInt 1369 v_0 := v.Args[0] 1370 if v_0.Op != OpAMD64ANDLconst { 1371 break 1372 } 1373 d := v_0.AuxInt 1374 x := v_0.Args[0] 1375 v.reset(OpAMD64ANDLconst) 1376 v.AuxInt = c & d 1377 v.AddArg(x) 1378 return true 1379 } 1380 // match: (ANDLconst [c] _) 1381 // cond: int32(c)==0 1382 // result: (MOVLconst [0]) 1383 for { 1384 c := v.AuxInt 1385 if !(int32(c) == 0) { 1386 break 1387 } 1388 v.reset(OpAMD64MOVLconst) 1389 v.AuxInt = 0 1390 return true 1391 } 1392 // match: (ANDLconst [c] x) 1393 // cond: int32(c)==-1 1394 // result: x 1395 for { 1396 c := v.AuxInt 1397 x := v.Args[0] 1398 if !(int32(c) == -1) { 1399 break 1400 } 1401 v.reset(OpCopy) 1402 v.Type = x.Type 1403 v.AddArg(x) 1404 return true 1405 } 1406 // match: (ANDLconst [c] (MOVLconst [d])) 1407 // cond: 1408 // result: (MOVLconst [c&d]) 1409 for { 1410 c := v.AuxInt 1411 v_0 := v.Args[0] 1412 if v_0.Op != OpAMD64MOVLconst { 1413 break 1414 } 1415 d := v_0.AuxInt 1416 v.reset(OpAMD64MOVLconst) 1417 v.AuxInt = c & d 1418 return true 1419 } 1420 return false 1421 } 1422 func rewriteValueAMD64_OpAMD64ANDQ(v *Value, config *Config) bool { 1423 b := v.Block 1424 _ = b 1425 // match: (ANDQ x (MOVQconst [c])) 1426 // cond: is32Bit(c) 1427 // result: (ANDQconst [c] x) 1428 for { 1429 x := v.Args[0] 1430 v_1 := v.Args[1] 1431 if v_1.Op != OpAMD64MOVQconst { 1432 break 1433 } 1434 c := v_1.AuxInt 1435 if !(is32Bit(c)) { 1436 break 1437 } 1438 v.reset(OpAMD64ANDQconst) 1439 v.AuxInt = c 1440 v.AddArg(x) 1441 return true 1442 } 1443 // match: (ANDQ (MOVQconst [c]) x) 1444 // cond: is32Bit(c) 1445 // result: (ANDQconst [c] x) 1446 for { 1447 v_0 := v.Args[0] 1448 if v_0.Op != OpAMD64MOVQconst { 1449 break 1450 } 1451 c := v_0.AuxInt 1452 x := v.Args[1] 1453 if !(is32Bit(c)) { 1454 break 1455 } 1456 v.reset(OpAMD64ANDQconst) 1457 v.AuxInt = c 1458 v.AddArg(x) 1459 return true 1460 } 1461 // match: (ANDQ x x) 1462 // cond: 1463 // result: x 1464 for { 1465 x := v.Args[0] 1466 if x != v.Args[1] { 1467 break 1468 } 1469 v.reset(OpCopy) 1470 v.Type = x.Type 1471 v.AddArg(x) 1472 return true 1473 } 1474 return false 1475 } 1476 func rewriteValueAMD64_OpAMD64ANDQconst(v *Value, config *Config) bool { 1477 b := v.Block 1478 _ = b 1479 // match: (ANDQconst [c] (ANDQconst [d] x)) 1480 // cond: 1481 // result: (ANDQconst [c & d] x) 1482 for { 1483 c := v.AuxInt 1484 v_0 := v.Args[0] 1485 if v_0.Op != OpAMD64ANDQconst { 1486 break 1487 } 1488 d := v_0.AuxInt 1489 x := v_0.Args[0] 1490 v.reset(OpAMD64ANDQconst) 1491 v.AuxInt = c & d 1492 v.AddArg(x) 1493 return true 1494 } 1495 // match: (ANDQconst [0xFF] x) 1496 // cond: 1497 // result: (MOVBQZX x) 1498 for { 1499 if v.AuxInt != 0xFF { 1500 break 1501 } 1502 x := v.Args[0] 1503 v.reset(OpAMD64MOVBQZX) 1504 v.AddArg(x) 1505 return true 1506 } 1507 // match: (ANDQconst [0xFFFF] x) 1508 // cond: 1509 // result: (MOVWQZX x) 1510 for { 1511 if v.AuxInt != 0xFFFF { 1512 break 1513 } 1514 x := v.Args[0] 1515 v.reset(OpAMD64MOVWQZX) 1516 v.AddArg(x) 1517 return true 1518 } 1519 // match: (ANDQconst [0xFFFFFFFF] x) 1520 // cond: 1521 // result: (MOVLQZX x) 1522 for { 1523 if v.AuxInt != 0xFFFFFFFF { 1524 break 1525 } 1526 x := v.Args[0] 1527 v.reset(OpAMD64MOVLQZX) 1528 v.AddArg(x) 1529 return true 1530 } 1531 // match: (ANDQconst [0] _) 1532 // cond: 1533 // result: (MOVQconst [0]) 1534 for { 1535 if v.AuxInt != 0 { 1536 break 1537 } 1538 v.reset(OpAMD64MOVQconst) 1539 v.AuxInt = 0 1540 return true 1541 } 1542 // match: (ANDQconst [-1] x) 1543 // cond: 1544 // result: x 1545 for { 1546 if v.AuxInt != -1 { 1547 break 1548 } 1549 x := v.Args[0] 1550 v.reset(OpCopy) 1551 v.Type = x.Type 1552 v.AddArg(x) 1553 return true 1554 } 1555 // match: (ANDQconst [c] (MOVQconst [d])) 1556 // cond: 1557 // result: (MOVQconst [c&d]) 1558 for { 1559 c := v.AuxInt 1560 v_0 := v.Args[0] 1561 if v_0.Op != OpAMD64MOVQconst { 1562 break 1563 } 1564 d := v_0.AuxInt 1565 v.reset(OpAMD64MOVQconst) 1566 v.AuxInt = c & d 1567 return true 1568 } 1569 return false 1570 } 1571 func rewriteValueAMD64_OpAMD64CMPB(v *Value, config *Config) bool { 1572 b := v.Block 1573 _ = b 1574 // match: (CMPB x (MOVLconst [c])) 1575 // cond: 1576 // result: (CMPBconst x [int64(int8(c))]) 1577 for { 1578 x := v.Args[0] 1579 v_1 := v.Args[1] 1580 if v_1.Op != OpAMD64MOVLconst { 1581 break 1582 } 1583 c := v_1.AuxInt 1584 v.reset(OpAMD64CMPBconst) 1585 v.AuxInt = int64(int8(c)) 1586 v.AddArg(x) 1587 return true 1588 } 1589 // match: (CMPB (MOVLconst [c]) x) 1590 // cond: 1591 // result: (InvertFlags (CMPBconst x [int64(int8(c))])) 1592 for { 1593 v_0 := v.Args[0] 1594 if v_0.Op != OpAMD64MOVLconst { 1595 break 1596 } 1597 c := v_0.AuxInt 1598 x := v.Args[1] 1599 v.reset(OpAMD64InvertFlags) 1600 v0 := b.NewValue0(v.Line, OpAMD64CMPBconst, TypeFlags) 1601 v0.AuxInt = int64(int8(c)) 1602 v0.AddArg(x) 1603 v.AddArg(v0) 1604 return true 1605 } 1606 return false 1607 } 1608 func rewriteValueAMD64_OpAMD64CMPBconst(v *Value, config *Config) bool { 1609 b := v.Block 1610 _ = b 1611 // match: (CMPBconst (MOVLconst [x]) [y]) 1612 // cond: int8(x)==int8(y) 1613 // result: (FlagEQ) 1614 for { 1615 y := v.AuxInt 1616 v_0 := v.Args[0] 1617 if v_0.Op != OpAMD64MOVLconst { 1618 break 1619 } 1620 x := v_0.AuxInt 1621 if !(int8(x) == int8(y)) { 1622 break 1623 } 1624 v.reset(OpAMD64FlagEQ) 1625 return true 1626 } 1627 // match: (CMPBconst (MOVLconst [x]) [y]) 1628 // cond: int8(x)<int8(y) && uint8(x)<uint8(y) 1629 // result: (FlagLT_ULT) 1630 for { 1631 y := v.AuxInt 1632 v_0 := v.Args[0] 1633 if v_0.Op != OpAMD64MOVLconst { 1634 break 1635 } 1636 x := v_0.AuxInt 1637 if !(int8(x) < int8(y) && uint8(x) < uint8(y)) { 1638 break 1639 } 1640 v.reset(OpAMD64FlagLT_ULT) 1641 return true 1642 } 1643 // match: (CMPBconst (MOVLconst [x]) [y]) 1644 // cond: int8(x)<int8(y) && uint8(x)>uint8(y) 1645 // result: (FlagLT_UGT) 1646 for { 1647 y := v.AuxInt 1648 v_0 := v.Args[0] 1649 if v_0.Op != OpAMD64MOVLconst { 1650 break 1651 } 1652 x := v_0.AuxInt 1653 if !(int8(x) < int8(y) && uint8(x) > uint8(y)) { 1654 break 1655 } 1656 v.reset(OpAMD64FlagLT_UGT) 1657 return true 1658 } 1659 // match: (CMPBconst (MOVLconst [x]) [y]) 1660 // cond: int8(x)>int8(y) && uint8(x)<uint8(y) 1661 // result: (FlagGT_ULT) 1662 for { 1663 y := v.AuxInt 1664 v_0 := v.Args[0] 1665 if v_0.Op != OpAMD64MOVLconst { 1666 break 1667 } 1668 x := v_0.AuxInt 1669 if !(int8(x) > int8(y) && uint8(x) < uint8(y)) { 1670 break 1671 } 1672 v.reset(OpAMD64FlagGT_ULT) 1673 return true 1674 } 1675 // match: (CMPBconst (MOVLconst [x]) [y]) 1676 // cond: int8(x)>int8(y) && uint8(x)>uint8(y) 1677 // result: (FlagGT_UGT) 1678 for { 1679 y := v.AuxInt 1680 v_0 := v.Args[0] 1681 if v_0.Op != OpAMD64MOVLconst { 1682 break 1683 } 1684 x := v_0.AuxInt 1685 if !(int8(x) > int8(y) && uint8(x) > uint8(y)) { 1686 break 1687 } 1688 v.reset(OpAMD64FlagGT_UGT) 1689 return true 1690 } 1691 // match: (CMPBconst (ANDLconst _ [m]) [n]) 1692 // cond: 0 <= int8(m) && int8(m) < int8(n) 1693 // result: (FlagLT_ULT) 1694 for { 1695 n := v.AuxInt 1696 v_0 := v.Args[0] 1697 if v_0.Op != OpAMD64ANDLconst { 1698 break 1699 } 1700 m := v_0.AuxInt 1701 if !(0 <= int8(m) && int8(m) < int8(n)) { 1702 break 1703 } 1704 v.reset(OpAMD64FlagLT_ULT) 1705 return true 1706 } 1707 // match: (CMPBconst (ANDL x y) [0]) 1708 // cond: 1709 // result: (TESTB x y) 1710 for { 1711 if v.AuxInt != 0 { 1712 break 1713 } 1714 v_0 := v.Args[0] 1715 if v_0.Op != OpAMD64ANDL { 1716 break 1717 } 1718 x := v_0.Args[0] 1719 y := v_0.Args[1] 1720 v.reset(OpAMD64TESTB) 1721 v.AddArg(x) 1722 v.AddArg(y) 1723 return true 1724 } 1725 // match: (CMPBconst (ANDLconst [c] x) [0]) 1726 // cond: 1727 // result: (TESTBconst [int64(int8(c))] x) 1728 for { 1729 if v.AuxInt != 0 { 1730 break 1731 } 1732 v_0 := v.Args[0] 1733 if v_0.Op != OpAMD64ANDLconst { 1734 break 1735 } 1736 c := v_0.AuxInt 1737 x := v_0.Args[0] 1738 v.reset(OpAMD64TESTBconst) 1739 v.AuxInt = int64(int8(c)) 1740 v.AddArg(x) 1741 return true 1742 } 1743 // match: (CMPBconst x [0]) 1744 // cond: 1745 // result: (TESTB x x) 1746 for { 1747 if v.AuxInt != 0 { 1748 break 1749 } 1750 x := v.Args[0] 1751 v.reset(OpAMD64TESTB) 1752 v.AddArg(x) 1753 v.AddArg(x) 1754 return true 1755 } 1756 return false 1757 } 1758 func rewriteValueAMD64_OpAMD64CMPL(v *Value, config *Config) bool { 1759 b := v.Block 1760 _ = b 1761 // match: (CMPL x (MOVLconst [c])) 1762 // cond: 1763 // result: (CMPLconst x [c]) 1764 for { 1765 x := v.Args[0] 1766 v_1 := v.Args[1] 1767 if v_1.Op != OpAMD64MOVLconst { 1768 break 1769 } 1770 c := v_1.AuxInt 1771 v.reset(OpAMD64CMPLconst) 1772 v.AuxInt = c 1773 v.AddArg(x) 1774 return true 1775 } 1776 // match: (CMPL (MOVLconst [c]) x) 1777 // cond: 1778 // result: (InvertFlags (CMPLconst x [c])) 1779 for { 1780 v_0 := v.Args[0] 1781 if v_0.Op != OpAMD64MOVLconst { 1782 break 1783 } 1784 c := v_0.AuxInt 1785 x := v.Args[1] 1786 v.reset(OpAMD64InvertFlags) 1787 v0 := b.NewValue0(v.Line, OpAMD64CMPLconst, TypeFlags) 1788 v0.AuxInt = c 1789 v0.AddArg(x) 1790 v.AddArg(v0) 1791 return true 1792 } 1793 return false 1794 } 1795 func rewriteValueAMD64_OpAMD64CMPLconst(v *Value, config *Config) bool { 1796 b := v.Block 1797 _ = b 1798 // match: (CMPLconst (MOVLconst [x]) [y]) 1799 // cond: int32(x)==int32(y) 1800 // result: (FlagEQ) 1801 for { 1802 y := v.AuxInt 1803 v_0 := v.Args[0] 1804 if v_0.Op != OpAMD64MOVLconst { 1805 break 1806 } 1807 x := v_0.AuxInt 1808 if !(int32(x) == int32(y)) { 1809 break 1810 } 1811 v.reset(OpAMD64FlagEQ) 1812 return true 1813 } 1814 // match: (CMPLconst (MOVLconst [x]) [y]) 1815 // cond: int32(x)<int32(y) && uint32(x)<uint32(y) 1816 // result: (FlagLT_ULT) 1817 for { 1818 y := v.AuxInt 1819 v_0 := v.Args[0] 1820 if v_0.Op != OpAMD64MOVLconst { 1821 break 1822 } 1823 x := v_0.AuxInt 1824 if !(int32(x) < int32(y) && uint32(x) < uint32(y)) { 1825 break 1826 } 1827 v.reset(OpAMD64FlagLT_ULT) 1828 return true 1829 } 1830 // match: (CMPLconst (MOVLconst [x]) [y]) 1831 // cond: int32(x)<int32(y) && uint32(x)>uint32(y) 1832 // result: (FlagLT_UGT) 1833 for { 1834 y := v.AuxInt 1835 v_0 := v.Args[0] 1836 if v_0.Op != OpAMD64MOVLconst { 1837 break 1838 } 1839 x := v_0.AuxInt 1840 if !(int32(x) < int32(y) && uint32(x) > uint32(y)) { 1841 break 1842 } 1843 v.reset(OpAMD64FlagLT_UGT) 1844 return true 1845 } 1846 // match: (CMPLconst (MOVLconst [x]) [y]) 1847 // cond: int32(x)>int32(y) && uint32(x)<uint32(y) 1848 // result: (FlagGT_ULT) 1849 for { 1850 y := v.AuxInt 1851 v_0 := v.Args[0] 1852 if v_0.Op != OpAMD64MOVLconst { 1853 break 1854 } 1855 x := v_0.AuxInt 1856 if !(int32(x) > int32(y) && uint32(x) < uint32(y)) { 1857 break 1858 } 1859 v.reset(OpAMD64FlagGT_ULT) 1860 return true 1861 } 1862 // match: (CMPLconst (MOVLconst [x]) [y]) 1863 // cond: int32(x)>int32(y) && uint32(x)>uint32(y) 1864 // result: (FlagGT_UGT) 1865 for { 1866 y := v.AuxInt 1867 v_0 := v.Args[0] 1868 if v_0.Op != OpAMD64MOVLconst { 1869 break 1870 } 1871 x := v_0.AuxInt 1872 if !(int32(x) > int32(y) && uint32(x) > uint32(y)) { 1873 break 1874 } 1875 v.reset(OpAMD64FlagGT_UGT) 1876 return true 1877 } 1878 // match: (CMPLconst (SHRLconst _ [c]) [n]) 1879 // cond: 0 <= n && 0 < c && c <= 32 && (1<<uint64(32-c)) <= uint64(n) 1880 // result: (FlagLT_ULT) 1881 for { 1882 n := v.AuxInt 1883 v_0 := v.Args[0] 1884 if v_0.Op != OpAMD64SHRLconst { 1885 break 1886 } 1887 c := v_0.AuxInt 1888 if !(0 <= n && 0 < c && c <= 32 && (1<<uint64(32-c)) <= uint64(n)) { 1889 break 1890 } 1891 v.reset(OpAMD64FlagLT_ULT) 1892 return true 1893 } 1894 // match: (CMPLconst (ANDLconst _ [m]) [n]) 1895 // cond: 0 <= int32(m) && int32(m) < int32(n) 1896 // result: (FlagLT_ULT) 1897 for { 1898 n := v.AuxInt 1899 v_0 := v.Args[0] 1900 if v_0.Op != OpAMD64ANDLconst { 1901 break 1902 } 1903 m := v_0.AuxInt 1904 if !(0 <= int32(m) && int32(m) < int32(n)) { 1905 break 1906 } 1907 v.reset(OpAMD64FlagLT_ULT) 1908 return true 1909 } 1910 // match: (CMPLconst (ANDL x y) [0]) 1911 // cond: 1912 // result: (TESTL x y) 1913 for { 1914 if v.AuxInt != 0 { 1915 break 1916 } 1917 v_0 := v.Args[0] 1918 if v_0.Op != OpAMD64ANDL { 1919 break 1920 } 1921 x := v_0.Args[0] 1922 y := v_0.Args[1] 1923 v.reset(OpAMD64TESTL) 1924 v.AddArg(x) 1925 v.AddArg(y) 1926 return true 1927 } 1928 // match: (CMPLconst (ANDLconst [c] x) [0]) 1929 // cond: 1930 // result: (TESTLconst [c] x) 1931 for { 1932 if v.AuxInt != 0 { 1933 break 1934 } 1935 v_0 := v.Args[0] 1936 if v_0.Op != OpAMD64ANDLconst { 1937 break 1938 } 1939 c := v_0.AuxInt 1940 x := v_0.Args[0] 1941 v.reset(OpAMD64TESTLconst) 1942 v.AuxInt = c 1943 v.AddArg(x) 1944 return true 1945 } 1946 // match: (CMPLconst x [0]) 1947 // cond: 1948 // result: (TESTL x x) 1949 for { 1950 if v.AuxInt != 0 { 1951 break 1952 } 1953 x := v.Args[0] 1954 v.reset(OpAMD64TESTL) 1955 v.AddArg(x) 1956 v.AddArg(x) 1957 return true 1958 } 1959 return false 1960 } 1961 func rewriteValueAMD64_OpAMD64CMPQ(v *Value, config *Config) bool { 1962 b := v.Block 1963 _ = b 1964 // match: (CMPQ x (MOVQconst [c])) 1965 // cond: is32Bit(c) 1966 // result: (CMPQconst x [c]) 1967 for { 1968 x := v.Args[0] 1969 v_1 := v.Args[1] 1970 if v_1.Op != OpAMD64MOVQconst { 1971 break 1972 } 1973 c := v_1.AuxInt 1974 if !(is32Bit(c)) { 1975 break 1976 } 1977 v.reset(OpAMD64CMPQconst) 1978 v.AuxInt = c 1979 v.AddArg(x) 1980 return true 1981 } 1982 // match: (CMPQ (MOVQconst [c]) x) 1983 // cond: is32Bit(c) 1984 // result: (InvertFlags (CMPQconst x [c])) 1985 for { 1986 v_0 := v.Args[0] 1987 if v_0.Op != OpAMD64MOVQconst { 1988 break 1989 } 1990 c := v_0.AuxInt 1991 x := v.Args[1] 1992 if !(is32Bit(c)) { 1993 break 1994 } 1995 v.reset(OpAMD64InvertFlags) 1996 v0 := b.NewValue0(v.Line, OpAMD64CMPQconst, TypeFlags) 1997 v0.AuxInt = c 1998 v0.AddArg(x) 1999 v.AddArg(v0) 2000 return true 2001 } 2002 return false 2003 } 2004 func rewriteValueAMD64_OpAMD64CMPQconst(v *Value, config *Config) bool { 2005 b := v.Block 2006 _ = b 2007 // match: (CMPQconst (MOVQconst [x]) [y]) 2008 // cond: x==y 2009 // result: (FlagEQ) 2010 for { 2011 y := v.AuxInt 2012 v_0 := v.Args[0] 2013 if v_0.Op != OpAMD64MOVQconst { 2014 break 2015 } 2016 x := v_0.AuxInt 2017 if !(x == y) { 2018 break 2019 } 2020 v.reset(OpAMD64FlagEQ) 2021 return true 2022 } 2023 // match: (CMPQconst (MOVQconst [x]) [y]) 2024 // cond: x<y && uint64(x)<uint64(y) 2025 // result: (FlagLT_ULT) 2026 for { 2027 y := v.AuxInt 2028 v_0 := v.Args[0] 2029 if v_0.Op != OpAMD64MOVQconst { 2030 break 2031 } 2032 x := v_0.AuxInt 2033 if !(x < y && uint64(x) < uint64(y)) { 2034 break 2035 } 2036 v.reset(OpAMD64FlagLT_ULT) 2037 return true 2038 } 2039 // match: (CMPQconst (MOVQconst [x]) [y]) 2040 // cond: x<y && uint64(x)>uint64(y) 2041 // result: (FlagLT_UGT) 2042 for { 2043 y := v.AuxInt 2044 v_0 := v.Args[0] 2045 if v_0.Op != OpAMD64MOVQconst { 2046 break 2047 } 2048 x := v_0.AuxInt 2049 if !(x < y && uint64(x) > uint64(y)) { 2050 break 2051 } 2052 v.reset(OpAMD64FlagLT_UGT) 2053 return true 2054 } 2055 // match: (CMPQconst (MOVQconst [x]) [y]) 2056 // cond: x>y && uint64(x)<uint64(y) 2057 // result: (FlagGT_ULT) 2058 for { 2059 y := v.AuxInt 2060 v_0 := v.Args[0] 2061 if v_0.Op != OpAMD64MOVQconst { 2062 break 2063 } 2064 x := v_0.AuxInt 2065 if !(x > y && uint64(x) < uint64(y)) { 2066 break 2067 } 2068 v.reset(OpAMD64FlagGT_ULT) 2069 return true 2070 } 2071 // match: (CMPQconst (MOVQconst [x]) [y]) 2072 // cond: x>y && uint64(x)>uint64(y) 2073 // result: (FlagGT_UGT) 2074 for { 2075 y := v.AuxInt 2076 v_0 := v.Args[0] 2077 if v_0.Op != OpAMD64MOVQconst { 2078 break 2079 } 2080 x := v_0.AuxInt 2081 if !(x > y && uint64(x) > uint64(y)) { 2082 break 2083 } 2084 v.reset(OpAMD64FlagGT_UGT) 2085 return true 2086 } 2087 // match: (CMPQconst (MOVBQZX _) [c]) 2088 // cond: 0xFF < c 2089 // result: (FlagLT_ULT) 2090 for { 2091 c := v.AuxInt 2092 v_0 := v.Args[0] 2093 if v_0.Op != OpAMD64MOVBQZX { 2094 break 2095 } 2096 if !(0xFF < c) { 2097 break 2098 } 2099 v.reset(OpAMD64FlagLT_ULT) 2100 return true 2101 } 2102 // match: (CMPQconst (MOVWQZX _) [c]) 2103 // cond: 0xFFFF < c 2104 // result: (FlagLT_ULT) 2105 for { 2106 c := v.AuxInt 2107 v_0 := v.Args[0] 2108 if v_0.Op != OpAMD64MOVWQZX { 2109 break 2110 } 2111 if !(0xFFFF < c) { 2112 break 2113 } 2114 v.reset(OpAMD64FlagLT_ULT) 2115 return true 2116 } 2117 // match: (CMPQconst (MOVLQZX _) [c]) 2118 // cond: 0xFFFFFFFF < c 2119 // result: (FlagLT_ULT) 2120 for { 2121 c := v.AuxInt 2122 v_0 := v.Args[0] 2123 if v_0.Op != OpAMD64MOVLQZX { 2124 break 2125 } 2126 if !(0xFFFFFFFF < c) { 2127 break 2128 } 2129 v.reset(OpAMD64FlagLT_ULT) 2130 return true 2131 } 2132 // match: (CMPQconst (SHRQconst _ [c]) [n]) 2133 // cond: 0 <= n && 0 < c && c <= 64 && (1<<uint64(64-c)) <= uint64(n) 2134 // result: (FlagLT_ULT) 2135 for { 2136 n := v.AuxInt 2137 v_0 := v.Args[0] 2138 if v_0.Op != OpAMD64SHRQconst { 2139 break 2140 } 2141 c := v_0.AuxInt 2142 if !(0 <= n && 0 < c && c <= 64 && (1<<uint64(64-c)) <= uint64(n)) { 2143 break 2144 } 2145 v.reset(OpAMD64FlagLT_ULT) 2146 return true 2147 } 2148 // match: (CMPQconst (ANDQconst _ [m]) [n]) 2149 // cond: 0 <= m && m < n 2150 // result: (FlagLT_ULT) 2151 for { 2152 n := v.AuxInt 2153 v_0 := v.Args[0] 2154 if v_0.Op != OpAMD64ANDQconst { 2155 break 2156 } 2157 m := v_0.AuxInt 2158 if !(0 <= m && m < n) { 2159 break 2160 } 2161 v.reset(OpAMD64FlagLT_ULT) 2162 return true 2163 } 2164 // match: (CMPQconst (ANDQ x y) [0]) 2165 // cond: 2166 // result: (TESTQ x y) 2167 for { 2168 if v.AuxInt != 0 { 2169 break 2170 } 2171 v_0 := v.Args[0] 2172 if v_0.Op != OpAMD64ANDQ { 2173 break 2174 } 2175 x := v_0.Args[0] 2176 y := v_0.Args[1] 2177 v.reset(OpAMD64TESTQ) 2178 v.AddArg(x) 2179 v.AddArg(y) 2180 return true 2181 } 2182 // match: (CMPQconst (ANDQconst [c] x) [0]) 2183 // cond: 2184 // result: (TESTQconst [c] x) 2185 for { 2186 if v.AuxInt != 0 { 2187 break 2188 } 2189 v_0 := v.Args[0] 2190 if v_0.Op != OpAMD64ANDQconst { 2191 break 2192 } 2193 c := v_0.AuxInt 2194 x := v_0.Args[0] 2195 v.reset(OpAMD64TESTQconst) 2196 v.AuxInt = c 2197 v.AddArg(x) 2198 return true 2199 } 2200 // match: (CMPQconst x [0]) 2201 // cond: 2202 // result: (TESTQ x x) 2203 for { 2204 if v.AuxInt != 0 { 2205 break 2206 } 2207 x := v.Args[0] 2208 v.reset(OpAMD64TESTQ) 2209 v.AddArg(x) 2210 v.AddArg(x) 2211 return true 2212 } 2213 return false 2214 } 2215 func rewriteValueAMD64_OpAMD64CMPW(v *Value, config *Config) bool { 2216 b := v.Block 2217 _ = b 2218 // match: (CMPW x (MOVLconst [c])) 2219 // cond: 2220 // result: (CMPWconst x [int64(int16(c))]) 2221 for { 2222 x := v.Args[0] 2223 v_1 := v.Args[1] 2224 if v_1.Op != OpAMD64MOVLconst { 2225 break 2226 } 2227 c := v_1.AuxInt 2228 v.reset(OpAMD64CMPWconst) 2229 v.AuxInt = int64(int16(c)) 2230 v.AddArg(x) 2231 return true 2232 } 2233 // match: (CMPW (MOVLconst [c]) x) 2234 // cond: 2235 // result: (InvertFlags (CMPWconst x [int64(int16(c))])) 2236 for { 2237 v_0 := v.Args[0] 2238 if v_0.Op != OpAMD64MOVLconst { 2239 break 2240 } 2241 c := v_0.AuxInt 2242 x := v.Args[1] 2243 v.reset(OpAMD64InvertFlags) 2244 v0 := b.NewValue0(v.Line, OpAMD64CMPWconst, TypeFlags) 2245 v0.AuxInt = int64(int16(c)) 2246 v0.AddArg(x) 2247 v.AddArg(v0) 2248 return true 2249 } 2250 return false 2251 } 2252 func rewriteValueAMD64_OpAMD64CMPWconst(v *Value, config *Config) bool { 2253 b := v.Block 2254 _ = b 2255 // match: (CMPWconst (MOVLconst [x]) [y]) 2256 // cond: int16(x)==int16(y) 2257 // result: (FlagEQ) 2258 for { 2259 y := v.AuxInt 2260 v_0 := v.Args[0] 2261 if v_0.Op != OpAMD64MOVLconst { 2262 break 2263 } 2264 x := v_0.AuxInt 2265 if !(int16(x) == int16(y)) { 2266 break 2267 } 2268 v.reset(OpAMD64FlagEQ) 2269 return true 2270 } 2271 // match: (CMPWconst (MOVLconst [x]) [y]) 2272 // cond: int16(x)<int16(y) && uint16(x)<uint16(y) 2273 // result: (FlagLT_ULT) 2274 for { 2275 y := v.AuxInt 2276 v_0 := v.Args[0] 2277 if v_0.Op != OpAMD64MOVLconst { 2278 break 2279 } 2280 x := v_0.AuxInt 2281 if !(int16(x) < int16(y) && uint16(x) < uint16(y)) { 2282 break 2283 } 2284 v.reset(OpAMD64FlagLT_ULT) 2285 return true 2286 } 2287 // match: (CMPWconst (MOVLconst [x]) [y]) 2288 // cond: int16(x)<int16(y) && uint16(x)>uint16(y) 2289 // result: (FlagLT_UGT) 2290 for { 2291 y := v.AuxInt 2292 v_0 := v.Args[0] 2293 if v_0.Op != OpAMD64MOVLconst { 2294 break 2295 } 2296 x := v_0.AuxInt 2297 if !(int16(x) < int16(y) && uint16(x) > uint16(y)) { 2298 break 2299 } 2300 v.reset(OpAMD64FlagLT_UGT) 2301 return true 2302 } 2303 // match: (CMPWconst (MOVLconst [x]) [y]) 2304 // cond: int16(x)>int16(y) && uint16(x)<uint16(y) 2305 // result: (FlagGT_ULT) 2306 for { 2307 y := v.AuxInt 2308 v_0 := v.Args[0] 2309 if v_0.Op != OpAMD64MOVLconst { 2310 break 2311 } 2312 x := v_0.AuxInt 2313 if !(int16(x) > int16(y) && uint16(x) < uint16(y)) { 2314 break 2315 } 2316 v.reset(OpAMD64FlagGT_ULT) 2317 return true 2318 } 2319 // match: (CMPWconst (MOVLconst [x]) [y]) 2320 // cond: int16(x)>int16(y) && uint16(x)>uint16(y) 2321 // result: (FlagGT_UGT) 2322 for { 2323 y := v.AuxInt 2324 v_0 := v.Args[0] 2325 if v_0.Op != OpAMD64MOVLconst { 2326 break 2327 } 2328 x := v_0.AuxInt 2329 if !(int16(x) > int16(y) && uint16(x) > uint16(y)) { 2330 break 2331 } 2332 v.reset(OpAMD64FlagGT_UGT) 2333 return true 2334 } 2335 // match: (CMPWconst (ANDLconst _ [m]) [n]) 2336 // cond: 0 <= int16(m) && int16(m) < int16(n) 2337 // result: (FlagLT_ULT) 2338 for { 2339 n := v.AuxInt 2340 v_0 := v.Args[0] 2341 if v_0.Op != OpAMD64ANDLconst { 2342 break 2343 } 2344 m := v_0.AuxInt 2345 if !(0 <= int16(m) && int16(m) < int16(n)) { 2346 break 2347 } 2348 v.reset(OpAMD64FlagLT_ULT) 2349 return true 2350 } 2351 // match: (CMPWconst (ANDL x y) [0]) 2352 // cond: 2353 // result: (TESTW x y) 2354 for { 2355 if v.AuxInt != 0 { 2356 break 2357 } 2358 v_0 := v.Args[0] 2359 if v_0.Op != OpAMD64ANDL { 2360 break 2361 } 2362 x := v_0.Args[0] 2363 y := v_0.Args[1] 2364 v.reset(OpAMD64TESTW) 2365 v.AddArg(x) 2366 v.AddArg(y) 2367 return true 2368 } 2369 // match: (CMPWconst (ANDLconst [c] x) [0]) 2370 // cond: 2371 // result: (TESTWconst [int64(int16(c))] x) 2372 for { 2373 if v.AuxInt != 0 { 2374 break 2375 } 2376 v_0 := v.Args[0] 2377 if v_0.Op != OpAMD64ANDLconst { 2378 break 2379 } 2380 c := v_0.AuxInt 2381 x := v_0.Args[0] 2382 v.reset(OpAMD64TESTWconst) 2383 v.AuxInt = int64(int16(c)) 2384 v.AddArg(x) 2385 return true 2386 } 2387 // match: (CMPWconst x [0]) 2388 // cond: 2389 // result: (TESTW x x) 2390 for { 2391 if v.AuxInt != 0 { 2392 break 2393 } 2394 x := v.Args[0] 2395 v.reset(OpAMD64TESTW) 2396 v.AddArg(x) 2397 v.AddArg(x) 2398 return true 2399 } 2400 return false 2401 } 2402 func rewriteValueAMD64_OpAMD64LEAL(v *Value, config *Config) bool { 2403 b := v.Block 2404 _ = b 2405 // match: (LEAL [c] {s} (ADDLconst [d] x)) 2406 // cond: is32Bit(c+d) 2407 // result: (LEAL [c+d] {s} x) 2408 for { 2409 c := v.AuxInt 2410 s := v.Aux 2411 v_0 := v.Args[0] 2412 if v_0.Op != OpAMD64ADDLconst { 2413 break 2414 } 2415 d := v_0.AuxInt 2416 x := v_0.Args[0] 2417 if !(is32Bit(c + d)) { 2418 break 2419 } 2420 v.reset(OpAMD64LEAL) 2421 v.AuxInt = c + d 2422 v.Aux = s 2423 v.AddArg(x) 2424 return true 2425 } 2426 return false 2427 } 2428 func rewriteValueAMD64_OpAMD64LEAQ(v *Value, config *Config) bool { 2429 b := v.Block 2430 _ = b 2431 // match: (LEAQ [c] {s} (ADDQconst [d] x)) 2432 // cond: is32Bit(c+d) 2433 // result: (LEAQ [c+d] {s} x) 2434 for { 2435 c := v.AuxInt 2436 s := v.Aux 2437 v_0 := v.Args[0] 2438 if v_0.Op != OpAMD64ADDQconst { 2439 break 2440 } 2441 d := v_0.AuxInt 2442 x := v_0.Args[0] 2443 if !(is32Bit(c + d)) { 2444 break 2445 } 2446 v.reset(OpAMD64LEAQ) 2447 v.AuxInt = c + d 2448 v.Aux = s 2449 v.AddArg(x) 2450 return true 2451 } 2452 // match: (LEAQ [c] {s} (ADDQ x y)) 2453 // cond: x.Op != OpSB && y.Op != OpSB 2454 // result: (LEAQ1 [c] {s} x y) 2455 for { 2456 c := v.AuxInt 2457 s := v.Aux 2458 v_0 := v.Args[0] 2459 if v_0.Op != OpAMD64ADDQ { 2460 break 2461 } 2462 x := v_0.Args[0] 2463 y := v_0.Args[1] 2464 if !(x.Op != OpSB && y.Op != OpSB) { 2465 break 2466 } 2467 v.reset(OpAMD64LEAQ1) 2468 v.AuxInt = c 2469 v.Aux = s 2470 v.AddArg(x) 2471 v.AddArg(y) 2472 return true 2473 } 2474 // match: (LEAQ [off1] {sym1} (LEAQ [off2] {sym2} x)) 2475 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 2476 // result: (LEAQ [off1+off2] {mergeSym(sym1,sym2)} x) 2477 for { 2478 off1 := v.AuxInt 2479 sym1 := v.Aux 2480 v_0 := v.Args[0] 2481 if v_0.Op != OpAMD64LEAQ { 2482 break 2483 } 2484 off2 := v_0.AuxInt 2485 sym2 := v_0.Aux 2486 x := v_0.Args[0] 2487 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 2488 break 2489 } 2490 v.reset(OpAMD64LEAQ) 2491 v.AuxInt = off1 + off2 2492 v.Aux = mergeSym(sym1, sym2) 2493 v.AddArg(x) 2494 return true 2495 } 2496 // match: (LEAQ [off1] {sym1} (LEAQ1 [off2] {sym2} x y)) 2497 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 2498 // result: (LEAQ1 [off1+off2] {mergeSym(sym1,sym2)} x y) 2499 for { 2500 off1 := v.AuxInt 2501 sym1 := v.Aux 2502 v_0 := v.Args[0] 2503 if v_0.Op != OpAMD64LEAQ1 { 2504 break 2505 } 2506 off2 := v_0.AuxInt 2507 sym2 := v_0.Aux 2508 x := v_0.Args[0] 2509 y := v_0.Args[1] 2510 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 2511 break 2512 } 2513 v.reset(OpAMD64LEAQ1) 2514 v.AuxInt = off1 + off2 2515 v.Aux = mergeSym(sym1, sym2) 2516 v.AddArg(x) 2517 v.AddArg(y) 2518 return true 2519 } 2520 // match: (LEAQ [off1] {sym1} (LEAQ2 [off2] {sym2} x y)) 2521 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 2522 // result: (LEAQ2 [off1+off2] {mergeSym(sym1,sym2)} x y) 2523 for { 2524 off1 := v.AuxInt 2525 sym1 := v.Aux 2526 v_0 := v.Args[0] 2527 if v_0.Op != OpAMD64LEAQ2 { 2528 break 2529 } 2530 off2 := v_0.AuxInt 2531 sym2 := v_0.Aux 2532 x := v_0.Args[0] 2533 y := v_0.Args[1] 2534 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 2535 break 2536 } 2537 v.reset(OpAMD64LEAQ2) 2538 v.AuxInt = off1 + off2 2539 v.Aux = mergeSym(sym1, sym2) 2540 v.AddArg(x) 2541 v.AddArg(y) 2542 return true 2543 } 2544 // match: (LEAQ [off1] {sym1} (LEAQ4 [off2] {sym2} x y)) 2545 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 2546 // result: (LEAQ4 [off1+off2] {mergeSym(sym1,sym2)} x y) 2547 for { 2548 off1 := v.AuxInt 2549 sym1 := v.Aux 2550 v_0 := v.Args[0] 2551 if v_0.Op != OpAMD64LEAQ4 { 2552 break 2553 } 2554 off2 := v_0.AuxInt 2555 sym2 := v_0.Aux 2556 x := v_0.Args[0] 2557 y := v_0.Args[1] 2558 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 2559 break 2560 } 2561 v.reset(OpAMD64LEAQ4) 2562 v.AuxInt = off1 + off2 2563 v.Aux = mergeSym(sym1, sym2) 2564 v.AddArg(x) 2565 v.AddArg(y) 2566 return true 2567 } 2568 // match: (LEAQ [off1] {sym1} (LEAQ8 [off2] {sym2} x y)) 2569 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 2570 // result: (LEAQ8 [off1+off2] {mergeSym(sym1,sym2)} x y) 2571 for { 2572 off1 := v.AuxInt 2573 sym1 := v.Aux 2574 v_0 := v.Args[0] 2575 if v_0.Op != OpAMD64LEAQ8 { 2576 break 2577 } 2578 off2 := v_0.AuxInt 2579 sym2 := v_0.Aux 2580 x := v_0.Args[0] 2581 y := v_0.Args[1] 2582 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 2583 break 2584 } 2585 v.reset(OpAMD64LEAQ8) 2586 v.AuxInt = off1 + off2 2587 v.Aux = mergeSym(sym1, sym2) 2588 v.AddArg(x) 2589 v.AddArg(y) 2590 return true 2591 } 2592 return false 2593 } 2594 func rewriteValueAMD64_OpAMD64LEAQ1(v *Value, config *Config) bool { 2595 b := v.Block 2596 _ = b 2597 // match: (LEAQ1 [c] {s} (ADDQconst [d] x) y) 2598 // cond: is32Bit(c+d) && x.Op != OpSB 2599 // result: (LEAQ1 [c+d] {s} x y) 2600 for { 2601 c := v.AuxInt 2602 s := v.Aux 2603 v_0 := v.Args[0] 2604 if v_0.Op != OpAMD64ADDQconst { 2605 break 2606 } 2607 d := v_0.AuxInt 2608 x := v_0.Args[0] 2609 y := v.Args[1] 2610 if !(is32Bit(c+d) && x.Op != OpSB) { 2611 break 2612 } 2613 v.reset(OpAMD64LEAQ1) 2614 v.AuxInt = c + d 2615 v.Aux = s 2616 v.AddArg(x) 2617 v.AddArg(y) 2618 return true 2619 } 2620 // match: (LEAQ1 [c] {s} x (ADDQconst [d] y)) 2621 // cond: is32Bit(c+d) && y.Op != OpSB 2622 // result: (LEAQ1 [c+d] {s} x y) 2623 for { 2624 c := v.AuxInt 2625 s := v.Aux 2626 x := v.Args[0] 2627 v_1 := v.Args[1] 2628 if v_1.Op != OpAMD64ADDQconst { 2629 break 2630 } 2631 d := v_1.AuxInt 2632 y := v_1.Args[0] 2633 if !(is32Bit(c+d) && y.Op != OpSB) { 2634 break 2635 } 2636 v.reset(OpAMD64LEAQ1) 2637 v.AuxInt = c + d 2638 v.Aux = s 2639 v.AddArg(x) 2640 v.AddArg(y) 2641 return true 2642 } 2643 // match: (LEAQ1 [c] {s} x (SHLQconst [1] y)) 2644 // cond: 2645 // result: (LEAQ2 [c] {s} x y) 2646 for { 2647 c := v.AuxInt 2648 s := v.Aux 2649 x := v.Args[0] 2650 v_1 := v.Args[1] 2651 if v_1.Op != OpAMD64SHLQconst { 2652 break 2653 } 2654 if v_1.AuxInt != 1 { 2655 break 2656 } 2657 y := v_1.Args[0] 2658 v.reset(OpAMD64LEAQ2) 2659 v.AuxInt = c 2660 v.Aux = s 2661 v.AddArg(x) 2662 v.AddArg(y) 2663 return true 2664 } 2665 // match: (LEAQ1 [c] {s} (SHLQconst [1] x) y) 2666 // cond: 2667 // result: (LEAQ2 [c] {s} y x) 2668 for { 2669 c := v.AuxInt 2670 s := v.Aux 2671 v_0 := v.Args[0] 2672 if v_0.Op != OpAMD64SHLQconst { 2673 break 2674 } 2675 if v_0.AuxInt != 1 { 2676 break 2677 } 2678 x := v_0.Args[0] 2679 y := v.Args[1] 2680 v.reset(OpAMD64LEAQ2) 2681 v.AuxInt = c 2682 v.Aux = s 2683 v.AddArg(y) 2684 v.AddArg(x) 2685 return true 2686 } 2687 // match: (LEAQ1 [c] {s} x (SHLQconst [2] y)) 2688 // cond: 2689 // result: (LEAQ4 [c] {s} x y) 2690 for { 2691 c := v.AuxInt 2692 s := v.Aux 2693 x := v.Args[0] 2694 v_1 := v.Args[1] 2695 if v_1.Op != OpAMD64SHLQconst { 2696 break 2697 } 2698 if v_1.AuxInt != 2 { 2699 break 2700 } 2701 y := v_1.Args[0] 2702 v.reset(OpAMD64LEAQ4) 2703 v.AuxInt = c 2704 v.Aux = s 2705 v.AddArg(x) 2706 v.AddArg(y) 2707 return true 2708 } 2709 // match: (LEAQ1 [c] {s} (SHLQconst [2] x) y) 2710 // cond: 2711 // result: (LEAQ4 [c] {s} y x) 2712 for { 2713 c := v.AuxInt 2714 s := v.Aux 2715 v_0 := v.Args[0] 2716 if v_0.Op != OpAMD64SHLQconst { 2717 break 2718 } 2719 if v_0.AuxInt != 2 { 2720 break 2721 } 2722 x := v_0.Args[0] 2723 y := v.Args[1] 2724 v.reset(OpAMD64LEAQ4) 2725 v.AuxInt = c 2726 v.Aux = s 2727 v.AddArg(y) 2728 v.AddArg(x) 2729 return true 2730 } 2731 // match: (LEAQ1 [c] {s} x (SHLQconst [3] y)) 2732 // cond: 2733 // result: (LEAQ8 [c] {s} x y) 2734 for { 2735 c := v.AuxInt 2736 s := v.Aux 2737 x := v.Args[0] 2738 v_1 := v.Args[1] 2739 if v_1.Op != OpAMD64SHLQconst { 2740 break 2741 } 2742 if v_1.AuxInt != 3 { 2743 break 2744 } 2745 y := v_1.Args[0] 2746 v.reset(OpAMD64LEAQ8) 2747 v.AuxInt = c 2748 v.Aux = s 2749 v.AddArg(x) 2750 v.AddArg(y) 2751 return true 2752 } 2753 // match: (LEAQ1 [c] {s} (SHLQconst [3] x) y) 2754 // cond: 2755 // result: (LEAQ8 [c] {s} y x) 2756 for { 2757 c := v.AuxInt 2758 s := v.Aux 2759 v_0 := v.Args[0] 2760 if v_0.Op != OpAMD64SHLQconst { 2761 break 2762 } 2763 if v_0.AuxInt != 3 { 2764 break 2765 } 2766 x := v_0.Args[0] 2767 y := v.Args[1] 2768 v.reset(OpAMD64LEAQ8) 2769 v.AuxInt = c 2770 v.Aux = s 2771 v.AddArg(y) 2772 v.AddArg(x) 2773 return true 2774 } 2775 // match: (LEAQ1 [off1] {sym1} (LEAQ [off2] {sym2} x) y) 2776 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) && x.Op != OpSB 2777 // result: (LEAQ1 [off1+off2] {mergeSym(sym1,sym2)} x y) 2778 for { 2779 off1 := v.AuxInt 2780 sym1 := v.Aux 2781 v_0 := v.Args[0] 2782 if v_0.Op != OpAMD64LEAQ { 2783 break 2784 } 2785 off2 := v_0.AuxInt 2786 sym2 := v_0.Aux 2787 x := v_0.Args[0] 2788 y := v.Args[1] 2789 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2) && x.Op != OpSB) { 2790 break 2791 } 2792 v.reset(OpAMD64LEAQ1) 2793 v.AuxInt = off1 + off2 2794 v.Aux = mergeSym(sym1, sym2) 2795 v.AddArg(x) 2796 v.AddArg(y) 2797 return true 2798 } 2799 // match: (LEAQ1 [off1] {sym1} x (LEAQ [off2] {sym2} y)) 2800 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) && y.Op != OpSB 2801 // result: (LEAQ1 [off1+off2] {mergeSym(sym1,sym2)} x y) 2802 for { 2803 off1 := v.AuxInt 2804 sym1 := v.Aux 2805 x := v.Args[0] 2806 v_1 := v.Args[1] 2807 if v_1.Op != OpAMD64LEAQ { 2808 break 2809 } 2810 off2 := v_1.AuxInt 2811 sym2 := v_1.Aux 2812 y := v_1.Args[0] 2813 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2) && y.Op != OpSB) { 2814 break 2815 } 2816 v.reset(OpAMD64LEAQ1) 2817 v.AuxInt = off1 + off2 2818 v.Aux = mergeSym(sym1, sym2) 2819 v.AddArg(x) 2820 v.AddArg(y) 2821 return true 2822 } 2823 return false 2824 } 2825 func rewriteValueAMD64_OpAMD64LEAQ2(v *Value, config *Config) bool { 2826 b := v.Block 2827 _ = b 2828 // match: (LEAQ2 [c] {s} (ADDQconst [d] x) y) 2829 // cond: is32Bit(c+d) && x.Op != OpSB 2830 // result: (LEAQ2 [c+d] {s} x y) 2831 for { 2832 c := v.AuxInt 2833 s := v.Aux 2834 v_0 := v.Args[0] 2835 if v_0.Op != OpAMD64ADDQconst { 2836 break 2837 } 2838 d := v_0.AuxInt 2839 x := v_0.Args[0] 2840 y := v.Args[1] 2841 if !(is32Bit(c+d) && x.Op != OpSB) { 2842 break 2843 } 2844 v.reset(OpAMD64LEAQ2) 2845 v.AuxInt = c + d 2846 v.Aux = s 2847 v.AddArg(x) 2848 v.AddArg(y) 2849 return true 2850 } 2851 // match: (LEAQ2 [c] {s} x (ADDQconst [d] y)) 2852 // cond: is32Bit(c+2*d) && y.Op != OpSB 2853 // result: (LEAQ2 [c+2*d] {s} x y) 2854 for { 2855 c := v.AuxInt 2856 s := v.Aux 2857 x := v.Args[0] 2858 v_1 := v.Args[1] 2859 if v_1.Op != OpAMD64ADDQconst { 2860 break 2861 } 2862 d := v_1.AuxInt 2863 y := v_1.Args[0] 2864 if !(is32Bit(c+2*d) && y.Op != OpSB) { 2865 break 2866 } 2867 v.reset(OpAMD64LEAQ2) 2868 v.AuxInt = c + 2*d 2869 v.Aux = s 2870 v.AddArg(x) 2871 v.AddArg(y) 2872 return true 2873 } 2874 // match: (LEAQ2 [c] {s} x (SHLQconst [1] y)) 2875 // cond: 2876 // result: (LEAQ4 [c] {s} x y) 2877 for { 2878 c := v.AuxInt 2879 s := v.Aux 2880 x := v.Args[0] 2881 v_1 := v.Args[1] 2882 if v_1.Op != OpAMD64SHLQconst { 2883 break 2884 } 2885 if v_1.AuxInt != 1 { 2886 break 2887 } 2888 y := v_1.Args[0] 2889 v.reset(OpAMD64LEAQ4) 2890 v.AuxInt = c 2891 v.Aux = s 2892 v.AddArg(x) 2893 v.AddArg(y) 2894 return true 2895 } 2896 // match: (LEAQ2 [c] {s} x (SHLQconst [2] y)) 2897 // cond: 2898 // result: (LEAQ8 [c] {s} x y) 2899 for { 2900 c := v.AuxInt 2901 s := v.Aux 2902 x := v.Args[0] 2903 v_1 := v.Args[1] 2904 if v_1.Op != OpAMD64SHLQconst { 2905 break 2906 } 2907 if v_1.AuxInt != 2 { 2908 break 2909 } 2910 y := v_1.Args[0] 2911 v.reset(OpAMD64LEAQ8) 2912 v.AuxInt = c 2913 v.Aux = s 2914 v.AddArg(x) 2915 v.AddArg(y) 2916 return true 2917 } 2918 // match: (LEAQ2 [off1] {sym1} (LEAQ [off2] {sym2} x) y) 2919 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) && x.Op != OpSB 2920 // result: (LEAQ2 [off1+off2] {mergeSym(sym1,sym2)} x y) 2921 for { 2922 off1 := v.AuxInt 2923 sym1 := v.Aux 2924 v_0 := v.Args[0] 2925 if v_0.Op != OpAMD64LEAQ { 2926 break 2927 } 2928 off2 := v_0.AuxInt 2929 sym2 := v_0.Aux 2930 x := v_0.Args[0] 2931 y := v.Args[1] 2932 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2) && x.Op != OpSB) { 2933 break 2934 } 2935 v.reset(OpAMD64LEAQ2) 2936 v.AuxInt = off1 + off2 2937 v.Aux = mergeSym(sym1, sym2) 2938 v.AddArg(x) 2939 v.AddArg(y) 2940 return true 2941 } 2942 return false 2943 } 2944 func rewriteValueAMD64_OpAMD64LEAQ4(v *Value, config *Config) bool { 2945 b := v.Block 2946 _ = b 2947 // match: (LEAQ4 [c] {s} (ADDQconst [d] x) y) 2948 // cond: is32Bit(c+d) && x.Op != OpSB 2949 // result: (LEAQ4 [c+d] {s} x y) 2950 for { 2951 c := v.AuxInt 2952 s := v.Aux 2953 v_0 := v.Args[0] 2954 if v_0.Op != OpAMD64ADDQconst { 2955 break 2956 } 2957 d := v_0.AuxInt 2958 x := v_0.Args[0] 2959 y := v.Args[1] 2960 if !(is32Bit(c+d) && x.Op != OpSB) { 2961 break 2962 } 2963 v.reset(OpAMD64LEAQ4) 2964 v.AuxInt = c + d 2965 v.Aux = s 2966 v.AddArg(x) 2967 v.AddArg(y) 2968 return true 2969 } 2970 // match: (LEAQ4 [c] {s} x (ADDQconst [d] y)) 2971 // cond: is32Bit(c+4*d) && y.Op != OpSB 2972 // result: (LEAQ4 [c+4*d] {s} x y) 2973 for { 2974 c := v.AuxInt 2975 s := v.Aux 2976 x := v.Args[0] 2977 v_1 := v.Args[1] 2978 if v_1.Op != OpAMD64ADDQconst { 2979 break 2980 } 2981 d := v_1.AuxInt 2982 y := v_1.Args[0] 2983 if !(is32Bit(c+4*d) && y.Op != OpSB) { 2984 break 2985 } 2986 v.reset(OpAMD64LEAQ4) 2987 v.AuxInt = c + 4*d 2988 v.Aux = s 2989 v.AddArg(x) 2990 v.AddArg(y) 2991 return true 2992 } 2993 // match: (LEAQ4 [c] {s} x (SHLQconst [1] y)) 2994 // cond: 2995 // result: (LEAQ8 [c] {s} x y) 2996 for { 2997 c := v.AuxInt 2998 s := v.Aux 2999 x := v.Args[0] 3000 v_1 := v.Args[1] 3001 if v_1.Op != OpAMD64SHLQconst { 3002 break 3003 } 3004 if v_1.AuxInt != 1 { 3005 break 3006 } 3007 y := v_1.Args[0] 3008 v.reset(OpAMD64LEAQ8) 3009 v.AuxInt = c 3010 v.Aux = s 3011 v.AddArg(x) 3012 v.AddArg(y) 3013 return true 3014 } 3015 // match: (LEAQ4 [off1] {sym1} (LEAQ [off2] {sym2} x) y) 3016 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) && x.Op != OpSB 3017 // result: (LEAQ4 [off1+off2] {mergeSym(sym1,sym2)} x y) 3018 for { 3019 off1 := v.AuxInt 3020 sym1 := v.Aux 3021 v_0 := v.Args[0] 3022 if v_0.Op != OpAMD64LEAQ { 3023 break 3024 } 3025 off2 := v_0.AuxInt 3026 sym2 := v_0.Aux 3027 x := v_0.Args[0] 3028 y := v.Args[1] 3029 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2) && x.Op != OpSB) { 3030 break 3031 } 3032 v.reset(OpAMD64LEAQ4) 3033 v.AuxInt = off1 + off2 3034 v.Aux = mergeSym(sym1, sym2) 3035 v.AddArg(x) 3036 v.AddArg(y) 3037 return true 3038 } 3039 return false 3040 } 3041 func rewriteValueAMD64_OpAMD64LEAQ8(v *Value, config *Config) bool { 3042 b := v.Block 3043 _ = b 3044 // match: (LEAQ8 [c] {s} (ADDQconst [d] x) y) 3045 // cond: is32Bit(c+d) && x.Op != OpSB 3046 // result: (LEAQ8 [c+d] {s} x y) 3047 for { 3048 c := v.AuxInt 3049 s := v.Aux 3050 v_0 := v.Args[0] 3051 if v_0.Op != OpAMD64ADDQconst { 3052 break 3053 } 3054 d := v_0.AuxInt 3055 x := v_0.Args[0] 3056 y := v.Args[1] 3057 if !(is32Bit(c+d) && x.Op != OpSB) { 3058 break 3059 } 3060 v.reset(OpAMD64LEAQ8) 3061 v.AuxInt = c + d 3062 v.Aux = s 3063 v.AddArg(x) 3064 v.AddArg(y) 3065 return true 3066 } 3067 // match: (LEAQ8 [c] {s} x (ADDQconst [d] y)) 3068 // cond: is32Bit(c+8*d) && y.Op != OpSB 3069 // result: (LEAQ8 [c+8*d] {s} x y) 3070 for { 3071 c := v.AuxInt 3072 s := v.Aux 3073 x := v.Args[0] 3074 v_1 := v.Args[1] 3075 if v_1.Op != OpAMD64ADDQconst { 3076 break 3077 } 3078 d := v_1.AuxInt 3079 y := v_1.Args[0] 3080 if !(is32Bit(c+8*d) && y.Op != OpSB) { 3081 break 3082 } 3083 v.reset(OpAMD64LEAQ8) 3084 v.AuxInt = c + 8*d 3085 v.Aux = s 3086 v.AddArg(x) 3087 v.AddArg(y) 3088 return true 3089 } 3090 // match: (LEAQ8 [off1] {sym1} (LEAQ [off2] {sym2} x) y) 3091 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) && x.Op != OpSB 3092 // result: (LEAQ8 [off1+off2] {mergeSym(sym1,sym2)} x y) 3093 for { 3094 off1 := v.AuxInt 3095 sym1 := v.Aux 3096 v_0 := v.Args[0] 3097 if v_0.Op != OpAMD64LEAQ { 3098 break 3099 } 3100 off2 := v_0.AuxInt 3101 sym2 := v_0.Aux 3102 x := v_0.Args[0] 3103 y := v.Args[1] 3104 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2) && x.Op != OpSB) { 3105 break 3106 } 3107 v.reset(OpAMD64LEAQ8) 3108 v.AuxInt = off1 + off2 3109 v.Aux = mergeSym(sym1, sym2) 3110 v.AddArg(x) 3111 v.AddArg(y) 3112 return true 3113 } 3114 return false 3115 } 3116 func rewriteValueAMD64_OpAMD64MOVBQSX(v *Value, config *Config) bool { 3117 b := v.Block 3118 _ = b 3119 // match: (MOVBQSX x:(MOVBload [off] {sym} ptr mem)) 3120 // cond: x.Uses == 1 && clobber(x) 3121 // result: @x.Block (MOVBQSXload <v.Type> [off] {sym} ptr mem) 3122 for { 3123 x := v.Args[0] 3124 if x.Op != OpAMD64MOVBload { 3125 break 3126 } 3127 off := x.AuxInt 3128 sym := x.Aux 3129 ptr := x.Args[0] 3130 mem := x.Args[1] 3131 if !(x.Uses == 1 && clobber(x)) { 3132 break 3133 } 3134 b = x.Block 3135 v0 := b.NewValue0(v.Line, OpAMD64MOVBQSXload, v.Type) 3136 v.reset(OpCopy) 3137 v.AddArg(v0) 3138 v0.AuxInt = off 3139 v0.Aux = sym 3140 v0.AddArg(ptr) 3141 v0.AddArg(mem) 3142 return true 3143 } 3144 // match: (MOVBQSX (ANDLconst [c] x)) 3145 // cond: c & 0x80 == 0 3146 // result: (ANDLconst [c & 0x7f] x) 3147 for { 3148 v_0 := v.Args[0] 3149 if v_0.Op != OpAMD64ANDLconst { 3150 break 3151 } 3152 c := v_0.AuxInt 3153 x := v_0.Args[0] 3154 if !(c&0x80 == 0) { 3155 break 3156 } 3157 v.reset(OpAMD64ANDLconst) 3158 v.AuxInt = c & 0x7f 3159 v.AddArg(x) 3160 return true 3161 } 3162 return false 3163 } 3164 func rewriteValueAMD64_OpAMD64MOVBQSXload(v *Value, config *Config) bool { 3165 b := v.Block 3166 _ = b 3167 // match: (MOVBQSXload [off1] {sym1} (LEAQ [off2] {sym2} base) mem) 3168 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 3169 // result: (MOVBQSXload [off1+off2] {mergeSym(sym1,sym2)} base mem) 3170 for { 3171 off1 := v.AuxInt 3172 sym1 := v.Aux 3173 v_0 := v.Args[0] 3174 if v_0.Op != OpAMD64LEAQ { 3175 break 3176 } 3177 off2 := v_0.AuxInt 3178 sym2 := v_0.Aux 3179 base := v_0.Args[0] 3180 mem := v.Args[1] 3181 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 3182 break 3183 } 3184 v.reset(OpAMD64MOVBQSXload) 3185 v.AuxInt = off1 + off2 3186 v.Aux = mergeSym(sym1, sym2) 3187 v.AddArg(base) 3188 v.AddArg(mem) 3189 return true 3190 } 3191 return false 3192 } 3193 func rewriteValueAMD64_OpAMD64MOVBQZX(v *Value, config *Config) bool { 3194 b := v.Block 3195 _ = b 3196 // match: (MOVBQZX x:(MOVBload [off] {sym} ptr mem)) 3197 // cond: x.Uses == 1 && clobber(x) 3198 // result: @x.Block (MOVBload <v.Type> [off] {sym} ptr mem) 3199 for { 3200 x := v.Args[0] 3201 if x.Op != OpAMD64MOVBload { 3202 break 3203 } 3204 off := x.AuxInt 3205 sym := x.Aux 3206 ptr := x.Args[0] 3207 mem := x.Args[1] 3208 if !(x.Uses == 1 && clobber(x)) { 3209 break 3210 } 3211 b = x.Block 3212 v0 := b.NewValue0(v.Line, OpAMD64MOVBload, v.Type) 3213 v.reset(OpCopy) 3214 v.AddArg(v0) 3215 v0.AuxInt = off 3216 v0.Aux = sym 3217 v0.AddArg(ptr) 3218 v0.AddArg(mem) 3219 return true 3220 } 3221 // match: (MOVBQZX x:(MOVBloadidx1 [off] {sym} ptr idx mem)) 3222 // cond: x.Uses == 1 && clobber(x) 3223 // result: @x.Block (MOVBloadidx1 <v.Type> [off] {sym} ptr idx mem) 3224 for { 3225 x := v.Args[0] 3226 if x.Op != OpAMD64MOVBloadidx1 { 3227 break 3228 } 3229 off := x.AuxInt 3230 sym := x.Aux 3231 ptr := x.Args[0] 3232 idx := x.Args[1] 3233 mem := x.Args[2] 3234 if !(x.Uses == 1 && clobber(x)) { 3235 break 3236 } 3237 b = x.Block 3238 v0 := b.NewValue0(v.Line, OpAMD64MOVBloadidx1, v.Type) 3239 v.reset(OpCopy) 3240 v.AddArg(v0) 3241 v0.AuxInt = off 3242 v0.Aux = sym 3243 v0.AddArg(ptr) 3244 v0.AddArg(idx) 3245 v0.AddArg(mem) 3246 return true 3247 } 3248 // match: (MOVBQZX (ANDLconst [c] x)) 3249 // cond: 3250 // result: (ANDLconst [c & 0xff] x) 3251 for { 3252 v_0 := v.Args[0] 3253 if v_0.Op != OpAMD64ANDLconst { 3254 break 3255 } 3256 c := v_0.AuxInt 3257 x := v_0.Args[0] 3258 v.reset(OpAMD64ANDLconst) 3259 v.AuxInt = c & 0xff 3260 v.AddArg(x) 3261 return true 3262 } 3263 return false 3264 } 3265 func rewriteValueAMD64_OpAMD64MOVBload(v *Value, config *Config) bool { 3266 b := v.Block 3267 _ = b 3268 // match: (MOVBload [off] {sym} ptr (MOVBstore [off2] {sym2} ptr2 x _)) 3269 // cond: sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) 3270 // result: x 3271 for { 3272 off := v.AuxInt 3273 sym := v.Aux 3274 ptr := v.Args[0] 3275 v_1 := v.Args[1] 3276 if v_1.Op != OpAMD64MOVBstore { 3277 break 3278 } 3279 off2 := v_1.AuxInt 3280 sym2 := v_1.Aux 3281 ptr2 := v_1.Args[0] 3282 x := v_1.Args[1] 3283 if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) { 3284 break 3285 } 3286 v.reset(OpCopy) 3287 v.Type = x.Type 3288 v.AddArg(x) 3289 return true 3290 } 3291 // match: (MOVBload [off1] {sym} (ADDQconst [off2] ptr) mem) 3292 // cond: is32Bit(off1+off2) 3293 // result: (MOVBload [off1+off2] {sym} ptr mem) 3294 for { 3295 off1 := v.AuxInt 3296 sym := v.Aux 3297 v_0 := v.Args[0] 3298 if v_0.Op != OpAMD64ADDQconst { 3299 break 3300 } 3301 off2 := v_0.AuxInt 3302 ptr := v_0.Args[0] 3303 mem := v.Args[1] 3304 if !(is32Bit(off1 + off2)) { 3305 break 3306 } 3307 v.reset(OpAMD64MOVBload) 3308 v.AuxInt = off1 + off2 3309 v.Aux = sym 3310 v.AddArg(ptr) 3311 v.AddArg(mem) 3312 return true 3313 } 3314 // match: (MOVBload [off1] {sym1} (LEAQ [off2] {sym2} base) mem) 3315 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 3316 // result: (MOVBload [off1+off2] {mergeSym(sym1,sym2)} base mem) 3317 for { 3318 off1 := v.AuxInt 3319 sym1 := v.Aux 3320 v_0 := v.Args[0] 3321 if v_0.Op != OpAMD64LEAQ { 3322 break 3323 } 3324 off2 := v_0.AuxInt 3325 sym2 := v_0.Aux 3326 base := v_0.Args[0] 3327 mem := v.Args[1] 3328 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 3329 break 3330 } 3331 v.reset(OpAMD64MOVBload) 3332 v.AuxInt = off1 + off2 3333 v.Aux = mergeSym(sym1, sym2) 3334 v.AddArg(base) 3335 v.AddArg(mem) 3336 return true 3337 } 3338 // match: (MOVBload [off1] {sym1} (LEAQ1 [off2] {sym2} ptr idx) mem) 3339 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 3340 // result: (MOVBloadidx1 [off1+off2] {mergeSym(sym1,sym2)} ptr idx mem) 3341 for { 3342 off1 := v.AuxInt 3343 sym1 := v.Aux 3344 v_0 := v.Args[0] 3345 if v_0.Op != OpAMD64LEAQ1 { 3346 break 3347 } 3348 off2 := v_0.AuxInt 3349 sym2 := v_0.Aux 3350 ptr := v_0.Args[0] 3351 idx := v_0.Args[1] 3352 mem := v.Args[1] 3353 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 3354 break 3355 } 3356 v.reset(OpAMD64MOVBloadidx1) 3357 v.AuxInt = off1 + off2 3358 v.Aux = mergeSym(sym1, sym2) 3359 v.AddArg(ptr) 3360 v.AddArg(idx) 3361 v.AddArg(mem) 3362 return true 3363 } 3364 // match: (MOVBload [off] {sym} (ADDQ ptr idx) mem) 3365 // cond: ptr.Op != OpSB 3366 // result: (MOVBloadidx1 [off] {sym} ptr idx mem) 3367 for { 3368 off := v.AuxInt 3369 sym := v.Aux 3370 v_0 := v.Args[0] 3371 if v_0.Op != OpAMD64ADDQ { 3372 break 3373 } 3374 ptr := v_0.Args[0] 3375 idx := v_0.Args[1] 3376 mem := v.Args[1] 3377 if !(ptr.Op != OpSB) { 3378 break 3379 } 3380 v.reset(OpAMD64MOVBloadidx1) 3381 v.AuxInt = off 3382 v.Aux = sym 3383 v.AddArg(ptr) 3384 v.AddArg(idx) 3385 v.AddArg(mem) 3386 return true 3387 } 3388 // match: (MOVBload [off1] {sym1} (LEAL [off2] {sym2} base) mem) 3389 // cond: canMergeSym(sym1, sym2) 3390 // result: (MOVBload [off1+off2] {mergeSym(sym1,sym2)} base mem) 3391 for { 3392 off1 := v.AuxInt 3393 sym1 := v.Aux 3394 v_0 := v.Args[0] 3395 if v_0.Op != OpAMD64LEAL { 3396 break 3397 } 3398 off2 := v_0.AuxInt 3399 sym2 := v_0.Aux 3400 base := v_0.Args[0] 3401 mem := v.Args[1] 3402 if !(canMergeSym(sym1, sym2)) { 3403 break 3404 } 3405 v.reset(OpAMD64MOVBload) 3406 v.AuxInt = off1 + off2 3407 v.Aux = mergeSym(sym1, sym2) 3408 v.AddArg(base) 3409 v.AddArg(mem) 3410 return true 3411 } 3412 // match: (MOVBload [off1] {sym} (ADDLconst [off2] ptr) mem) 3413 // cond: is32Bit(off1+off2) 3414 // result: (MOVBload [off1+off2] {sym} ptr mem) 3415 for { 3416 off1 := v.AuxInt 3417 sym := v.Aux 3418 v_0 := v.Args[0] 3419 if v_0.Op != OpAMD64ADDLconst { 3420 break 3421 } 3422 off2 := v_0.AuxInt 3423 ptr := v_0.Args[0] 3424 mem := v.Args[1] 3425 if !(is32Bit(off1 + off2)) { 3426 break 3427 } 3428 v.reset(OpAMD64MOVBload) 3429 v.AuxInt = off1 + off2 3430 v.Aux = sym 3431 v.AddArg(ptr) 3432 v.AddArg(mem) 3433 return true 3434 } 3435 return false 3436 } 3437 func rewriteValueAMD64_OpAMD64MOVBloadidx1(v *Value, config *Config) bool { 3438 b := v.Block 3439 _ = b 3440 // match: (MOVBloadidx1 [c] {sym} (ADDQconst [d] ptr) idx mem) 3441 // cond: 3442 // result: (MOVBloadidx1 [c+d] {sym} ptr idx mem) 3443 for { 3444 c := v.AuxInt 3445 sym := v.Aux 3446 v_0 := v.Args[0] 3447 if v_0.Op != OpAMD64ADDQconst { 3448 break 3449 } 3450 d := v_0.AuxInt 3451 ptr := v_0.Args[0] 3452 idx := v.Args[1] 3453 mem := v.Args[2] 3454 v.reset(OpAMD64MOVBloadidx1) 3455 v.AuxInt = c + d 3456 v.Aux = sym 3457 v.AddArg(ptr) 3458 v.AddArg(idx) 3459 v.AddArg(mem) 3460 return true 3461 } 3462 // match: (MOVBloadidx1 [c] {sym} ptr (ADDQconst [d] idx) mem) 3463 // cond: 3464 // result: (MOVBloadidx1 [c+d] {sym} ptr idx mem) 3465 for { 3466 c := v.AuxInt 3467 sym := v.Aux 3468 ptr := v.Args[0] 3469 v_1 := v.Args[1] 3470 if v_1.Op != OpAMD64ADDQconst { 3471 break 3472 } 3473 d := v_1.AuxInt 3474 idx := v_1.Args[0] 3475 mem := v.Args[2] 3476 v.reset(OpAMD64MOVBloadidx1) 3477 v.AuxInt = c + d 3478 v.Aux = sym 3479 v.AddArg(ptr) 3480 v.AddArg(idx) 3481 v.AddArg(mem) 3482 return true 3483 } 3484 return false 3485 } 3486 func rewriteValueAMD64_OpAMD64MOVBstore(v *Value, config *Config) bool { 3487 b := v.Block 3488 _ = b 3489 // match: (MOVBstore [off] {sym} ptr (MOVBQSX x) mem) 3490 // cond: 3491 // result: (MOVBstore [off] {sym} ptr x mem) 3492 for { 3493 off := v.AuxInt 3494 sym := v.Aux 3495 ptr := v.Args[0] 3496 v_1 := v.Args[1] 3497 if v_1.Op != OpAMD64MOVBQSX { 3498 break 3499 } 3500 x := v_1.Args[0] 3501 mem := v.Args[2] 3502 v.reset(OpAMD64MOVBstore) 3503 v.AuxInt = off 3504 v.Aux = sym 3505 v.AddArg(ptr) 3506 v.AddArg(x) 3507 v.AddArg(mem) 3508 return true 3509 } 3510 // match: (MOVBstore [off] {sym} ptr (MOVBQZX x) mem) 3511 // cond: 3512 // result: (MOVBstore [off] {sym} ptr x mem) 3513 for { 3514 off := v.AuxInt 3515 sym := v.Aux 3516 ptr := v.Args[0] 3517 v_1 := v.Args[1] 3518 if v_1.Op != OpAMD64MOVBQZX { 3519 break 3520 } 3521 x := v_1.Args[0] 3522 mem := v.Args[2] 3523 v.reset(OpAMD64MOVBstore) 3524 v.AuxInt = off 3525 v.Aux = sym 3526 v.AddArg(ptr) 3527 v.AddArg(x) 3528 v.AddArg(mem) 3529 return true 3530 } 3531 // match: (MOVBstore [off1] {sym} (ADDQconst [off2] ptr) val mem) 3532 // cond: is32Bit(off1+off2) 3533 // result: (MOVBstore [off1+off2] {sym} ptr val mem) 3534 for { 3535 off1 := v.AuxInt 3536 sym := v.Aux 3537 v_0 := v.Args[0] 3538 if v_0.Op != OpAMD64ADDQconst { 3539 break 3540 } 3541 off2 := v_0.AuxInt 3542 ptr := v_0.Args[0] 3543 val := v.Args[1] 3544 mem := v.Args[2] 3545 if !(is32Bit(off1 + off2)) { 3546 break 3547 } 3548 v.reset(OpAMD64MOVBstore) 3549 v.AuxInt = off1 + off2 3550 v.Aux = sym 3551 v.AddArg(ptr) 3552 v.AddArg(val) 3553 v.AddArg(mem) 3554 return true 3555 } 3556 // match: (MOVBstore [off] {sym} ptr (MOVLconst [c]) mem) 3557 // cond: validOff(off) 3558 // result: (MOVBstoreconst [makeValAndOff(int64(int8(c)),off)] {sym} ptr mem) 3559 for { 3560 off := v.AuxInt 3561 sym := v.Aux 3562 ptr := v.Args[0] 3563 v_1 := v.Args[1] 3564 if v_1.Op != OpAMD64MOVLconst { 3565 break 3566 } 3567 c := v_1.AuxInt 3568 mem := v.Args[2] 3569 if !(validOff(off)) { 3570 break 3571 } 3572 v.reset(OpAMD64MOVBstoreconst) 3573 v.AuxInt = makeValAndOff(int64(int8(c)), off) 3574 v.Aux = sym 3575 v.AddArg(ptr) 3576 v.AddArg(mem) 3577 return true 3578 } 3579 // match: (MOVBstore [off1] {sym1} (LEAQ [off2] {sym2} base) val mem) 3580 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 3581 // result: (MOVBstore [off1+off2] {mergeSym(sym1,sym2)} base val mem) 3582 for { 3583 off1 := v.AuxInt 3584 sym1 := v.Aux 3585 v_0 := v.Args[0] 3586 if v_0.Op != OpAMD64LEAQ { 3587 break 3588 } 3589 off2 := v_0.AuxInt 3590 sym2 := v_0.Aux 3591 base := v_0.Args[0] 3592 val := v.Args[1] 3593 mem := v.Args[2] 3594 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 3595 break 3596 } 3597 v.reset(OpAMD64MOVBstore) 3598 v.AuxInt = off1 + off2 3599 v.Aux = mergeSym(sym1, sym2) 3600 v.AddArg(base) 3601 v.AddArg(val) 3602 v.AddArg(mem) 3603 return true 3604 } 3605 // match: (MOVBstore [off1] {sym1} (LEAQ1 [off2] {sym2} ptr idx) val mem) 3606 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 3607 // result: (MOVBstoreidx1 [off1+off2] {mergeSym(sym1,sym2)} ptr idx val mem) 3608 for { 3609 off1 := v.AuxInt 3610 sym1 := v.Aux 3611 v_0 := v.Args[0] 3612 if v_0.Op != OpAMD64LEAQ1 { 3613 break 3614 } 3615 off2 := v_0.AuxInt 3616 sym2 := v_0.Aux 3617 ptr := v_0.Args[0] 3618 idx := v_0.Args[1] 3619 val := v.Args[1] 3620 mem := v.Args[2] 3621 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 3622 break 3623 } 3624 v.reset(OpAMD64MOVBstoreidx1) 3625 v.AuxInt = off1 + off2 3626 v.Aux = mergeSym(sym1, sym2) 3627 v.AddArg(ptr) 3628 v.AddArg(idx) 3629 v.AddArg(val) 3630 v.AddArg(mem) 3631 return true 3632 } 3633 // match: (MOVBstore [off] {sym} (ADDQ ptr idx) val mem) 3634 // cond: ptr.Op != OpSB 3635 // result: (MOVBstoreidx1 [off] {sym} ptr idx val mem) 3636 for { 3637 off := v.AuxInt 3638 sym := v.Aux 3639 v_0 := v.Args[0] 3640 if v_0.Op != OpAMD64ADDQ { 3641 break 3642 } 3643 ptr := v_0.Args[0] 3644 idx := v_0.Args[1] 3645 val := v.Args[1] 3646 mem := v.Args[2] 3647 if !(ptr.Op != OpSB) { 3648 break 3649 } 3650 v.reset(OpAMD64MOVBstoreidx1) 3651 v.AuxInt = off 3652 v.Aux = sym 3653 v.AddArg(ptr) 3654 v.AddArg(idx) 3655 v.AddArg(val) 3656 v.AddArg(mem) 3657 return true 3658 } 3659 // match: (MOVBstore [i] {s} p (SHRQconst [8] w) x:(MOVBstore [i-1] {s} p w mem)) 3660 // cond: x.Uses == 1 && clobber(x) 3661 // result: (MOVWstore [i-1] {s} p w mem) 3662 for { 3663 i := v.AuxInt 3664 s := v.Aux 3665 p := v.Args[0] 3666 v_1 := v.Args[1] 3667 if v_1.Op != OpAMD64SHRQconst { 3668 break 3669 } 3670 if v_1.AuxInt != 8 { 3671 break 3672 } 3673 w := v_1.Args[0] 3674 x := v.Args[2] 3675 if x.Op != OpAMD64MOVBstore { 3676 break 3677 } 3678 if x.AuxInt != i-1 { 3679 break 3680 } 3681 if x.Aux != s { 3682 break 3683 } 3684 if p != x.Args[0] { 3685 break 3686 } 3687 if w != x.Args[1] { 3688 break 3689 } 3690 mem := x.Args[2] 3691 if !(x.Uses == 1 && clobber(x)) { 3692 break 3693 } 3694 v.reset(OpAMD64MOVWstore) 3695 v.AuxInt = i - 1 3696 v.Aux = s 3697 v.AddArg(p) 3698 v.AddArg(w) 3699 v.AddArg(mem) 3700 return true 3701 } 3702 // match: (MOVBstore [i] {s} p (SHRQconst [j] w) x:(MOVBstore [i-1] {s} p w0:(SHRQconst [j-8] w) mem)) 3703 // cond: x.Uses == 1 && clobber(x) 3704 // result: (MOVWstore [i-1] {s} p w0 mem) 3705 for { 3706 i := v.AuxInt 3707 s := v.Aux 3708 p := v.Args[0] 3709 v_1 := v.Args[1] 3710 if v_1.Op != OpAMD64SHRQconst { 3711 break 3712 } 3713 j := v_1.AuxInt 3714 w := v_1.Args[0] 3715 x := v.Args[2] 3716 if x.Op != OpAMD64MOVBstore { 3717 break 3718 } 3719 if x.AuxInt != i-1 { 3720 break 3721 } 3722 if x.Aux != s { 3723 break 3724 } 3725 if p != x.Args[0] { 3726 break 3727 } 3728 w0 := x.Args[1] 3729 if w0.Op != OpAMD64SHRQconst { 3730 break 3731 } 3732 if w0.AuxInt != j-8 { 3733 break 3734 } 3735 if w != w0.Args[0] { 3736 break 3737 } 3738 mem := x.Args[2] 3739 if !(x.Uses == 1 && clobber(x)) { 3740 break 3741 } 3742 v.reset(OpAMD64MOVWstore) 3743 v.AuxInt = i - 1 3744 v.Aux = s 3745 v.AddArg(p) 3746 v.AddArg(w0) 3747 v.AddArg(mem) 3748 return true 3749 } 3750 // match: (MOVBstore [off1] {sym1} (LEAL [off2] {sym2} base) val mem) 3751 // cond: canMergeSym(sym1, sym2) 3752 // result: (MOVBstore [off1+off2] {mergeSym(sym1,sym2)} base val mem) 3753 for { 3754 off1 := v.AuxInt 3755 sym1 := v.Aux 3756 v_0 := v.Args[0] 3757 if v_0.Op != OpAMD64LEAL { 3758 break 3759 } 3760 off2 := v_0.AuxInt 3761 sym2 := v_0.Aux 3762 base := v_0.Args[0] 3763 val := v.Args[1] 3764 mem := v.Args[2] 3765 if !(canMergeSym(sym1, sym2)) { 3766 break 3767 } 3768 v.reset(OpAMD64MOVBstore) 3769 v.AuxInt = off1 + off2 3770 v.Aux = mergeSym(sym1, sym2) 3771 v.AddArg(base) 3772 v.AddArg(val) 3773 v.AddArg(mem) 3774 return true 3775 } 3776 // match: (MOVBstore [off1] {sym} (ADDLconst [off2] ptr) val mem) 3777 // cond: is32Bit(off1+off2) 3778 // result: (MOVBstore [off1+off2] {sym} ptr val mem) 3779 for { 3780 off1 := v.AuxInt 3781 sym := v.Aux 3782 v_0 := v.Args[0] 3783 if v_0.Op != OpAMD64ADDLconst { 3784 break 3785 } 3786 off2 := v_0.AuxInt 3787 ptr := v_0.Args[0] 3788 val := v.Args[1] 3789 mem := v.Args[2] 3790 if !(is32Bit(off1 + off2)) { 3791 break 3792 } 3793 v.reset(OpAMD64MOVBstore) 3794 v.AuxInt = off1 + off2 3795 v.Aux = sym 3796 v.AddArg(ptr) 3797 v.AddArg(val) 3798 v.AddArg(mem) 3799 return true 3800 } 3801 return false 3802 } 3803 func rewriteValueAMD64_OpAMD64MOVBstoreconst(v *Value, config *Config) bool { 3804 b := v.Block 3805 _ = b 3806 // match: (MOVBstoreconst [sc] {s} (ADDQconst [off] ptr) mem) 3807 // cond: ValAndOff(sc).canAdd(off) 3808 // result: (MOVBstoreconst [ValAndOff(sc).add(off)] {s} ptr mem) 3809 for { 3810 sc := v.AuxInt 3811 s := v.Aux 3812 v_0 := v.Args[0] 3813 if v_0.Op != OpAMD64ADDQconst { 3814 break 3815 } 3816 off := v_0.AuxInt 3817 ptr := v_0.Args[0] 3818 mem := v.Args[1] 3819 if !(ValAndOff(sc).canAdd(off)) { 3820 break 3821 } 3822 v.reset(OpAMD64MOVBstoreconst) 3823 v.AuxInt = ValAndOff(sc).add(off) 3824 v.Aux = s 3825 v.AddArg(ptr) 3826 v.AddArg(mem) 3827 return true 3828 } 3829 // match: (MOVBstoreconst [sc] {sym1} (LEAQ [off] {sym2} ptr) mem) 3830 // cond: canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off) 3831 // result: (MOVBstoreconst [ValAndOff(sc).add(off)] {mergeSym(sym1, sym2)} ptr mem) 3832 for { 3833 sc := v.AuxInt 3834 sym1 := v.Aux 3835 v_0 := v.Args[0] 3836 if v_0.Op != OpAMD64LEAQ { 3837 break 3838 } 3839 off := v_0.AuxInt 3840 sym2 := v_0.Aux 3841 ptr := v_0.Args[0] 3842 mem := v.Args[1] 3843 if !(canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off)) { 3844 break 3845 } 3846 v.reset(OpAMD64MOVBstoreconst) 3847 v.AuxInt = ValAndOff(sc).add(off) 3848 v.Aux = mergeSym(sym1, sym2) 3849 v.AddArg(ptr) 3850 v.AddArg(mem) 3851 return true 3852 } 3853 // match: (MOVBstoreconst [x] {sym1} (LEAQ1 [off] {sym2} ptr idx) mem) 3854 // cond: canMergeSym(sym1, sym2) 3855 // result: (MOVBstoreconstidx1 [ValAndOff(x).add(off)] {mergeSym(sym1,sym2)} ptr idx mem) 3856 for { 3857 x := v.AuxInt 3858 sym1 := v.Aux 3859 v_0 := v.Args[0] 3860 if v_0.Op != OpAMD64LEAQ1 { 3861 break 3862 } 3863 off := v_0.AuxInt 3864 sym2 := v_0.Aux 3865 ptr := v_0.Args[0] 3866 idx := v_0.Args[1] 3867 mem := v.Args[1] 3868 if !(canMergeSym(sym1, sym2)) { 3869 break 3870 } 3871 v.reset(OpAMD64MOVBstoreconstidx1) 3872 v.AuxInt = ValAndOff(x).add(off) 3873 v.Aux = mergeSym(sym1, sym2) 3874 v.AddArg(ptr) 3875 v.AddArg(idx) 3876 v.AddArg(mem) 3877 return true 3878 } 3879 // match: (MOVBstoreconst [x] {sym} (ADDQ ptr idx) mem) 3880 // cond: 3881 // result: (MOVBstoreconstidx1 [x] {sym} ptr idx mem) 3882 for { 3883 x := v.AuxInt 3884 sym := v.Aux 3885 v_0 := v.Args[0] 3886 if v_0.Op != OpAMD64ADDQ { 3887 break 3888 } 3889 ptr := v_0.Args[0] 3890 idx := v_0.Args[1] 3891 mem := v.Args[1] 3892 v.reset(OpAMD64MOVBstoreconstidx1) 3893 v.AuxInt = x 3894 v.Aux = sym 3895 v.AddArg(ptr) 3896 v.AddArg(idx) 3897 v.AddArg(mem) 3898 return true 3899 } 3900 // match: (MOVBstoreconst [c] {s} p x:(MOVBstoreconst [a] {s} p mem)) 3901 // cond: x.Uses == 1 && ValAndOff(a).Off() + 1 == ValAndOff(c).Off() && clobber(x) 3902 // result: (MOVWstoreconst [makeValAndOff(ValAndOff(a).Val()&0xff | ValAndOff(c).Val()<<8, ValAndOff(a).Off())] {s} p mem) 3903 for { 3904 c := v.AuxInt 3905 s := v.Aux 3906 p := v.Args[0] 3907 x := v.Args[1] 3908 if x.Op != OpAMD64MOVBstoreconst { 3909 break 3910 } 3911 a := x.AuxInt 3912 if x.Aux != s { 3913 break 3914 } 3915 if p != x.Args[0] { 3916 break 3917 } 3918 mem := x.Args[1] 3919 if !(x.Uses == 1 && ValAndOff(a).Off()+1 == ValAndOff(c).Off() && clobber(x)) { 3920 break 3921 } 3922 v.reset(OpAMD64MOVWstoreconst) 3923 v.AuxInt = makeValAndOff(ValAndOff(a).Val()&0xff|ValAndOff(c).Val()<<8, ValAndOff(a).Off()) 3924 v.Aux = s 3925 v.AddArg(p) 3926 v.AddArg(mem) 3927 return true 3928 } 3929 // match: (MOVBstoreconst [sc] {sym1} (LEAL [off] {sym2} ptr) mem) 3930 // cond: canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off) 3931 // result: (MOVBstoreconst [ValAndOff(sc).add(off)] {mergeSym(sym1, sym2)} ptr mem) 3932 for { 3933 sc := v.AuxInt 3934 sym1 := v.Aux 3935 v_0 := v.Args[0] 3936 if v_0.Op != OpAMD64LEAL { 3937 break 3938 } 3939 off := v_0.AuxInt 3940 sym2 := v_0.Aux 3941 ptr := v_0.Args[0] 3942 mem := v.Args[1] 3943 if !(canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off)) { 3944 break 3945 } 3946 v.reset(OpAMD64MOVBstoreconst) 3947 v.AuxInt = ValAndOff(sc).add(off) 3948 v.Aux = mergeSym(sym1, sym2) 3949 v.AddArg(ptr) 3950 v.AddArg(mem) 3951 return true 3952 } 3953 // match: (MOVBstoreconst [sc] {s} (ADDLconst [off] ptr) mem) 3954 // cond: ValAndOff(sc).canAdd(off) 3955 // result: (MOVBstoreconst [ValAndOff(sc).add(off)] {s} ptr mem) 3956 for { 3957 sc := v.AuxInt 3958 s := v.Aux 3959 v_0 := v.Args[0] 3960 if v_0.Op != OpAMD64ADDLconst { 3961 break 3962 } 3963 off := v_0.AuxInt 3964 ptr := v_0.Args[0] 3965 mem := v.Args[1] 3966 if !(ValAndOff(sc).canAdd(off)) { 3967 break 3968 } 3969 v.reset(OpAMD64MOVBstoreconst) 3970 v.AuxInt = ValAndOff(sc).add(off) 3971 v.Aux = s 3972 v.AddArg(ptr) 3973 v.AddArg(mem) 3974 return true 3975 } 3976 return false 3977 } 3978 func rewriteValueAMD64_OpAMD64MOVBstoreconstidx1(v *Value, config *Config) bool { 3979 b := v.Block 3980 _ = b 3981 // match: (MOVBstoreconstidx1 [x] {sym} (ADDQconst [c] ptr) idx mem) 3982 // cond: 3983 // result: (MOVBstoreconstidx1 [ValAndOff(x).add(c)] {sym} ptr idx mem) 3984 for { 3985 x := v.AuxInt 3986 sym := v.Aux 3987 v_0 := v.Args[0] 3988 if v_0.Op != OpAMD64ADDQconst { 3989 break 3990 } 3991 c := v_0.AuxInt 3992 ptr := v_0.Args[0] 3993 idx := v.Args[1] 3994 mem := v.Args[2] 3995 v.reset(OpAMD64MOVBstoreconstidx1) 3996 v.AuxInt = ValAndOff(x).add(c) 3997 v.Aux = sym 3998 v.AddArg(ptr) 3999 v.AddArg(idx) 4000 v.AddArg(mem) 4001 return true 4002 } 4003 // match: (MOVBstoreconstidx1 [x] {sym} ptr (ADDQconst [c] idx) mem) 4004 // cond: 4005 // result: (MOVBstoreconstidx1 [ValAndOff(x).add(c)] {sym} ptr idx mem) 4006 for { 4007 x := v.AuxInt 4008 sym := v.Aux 4009 ptr := v.Args[0] 4010 v_1 := v.Args[1] 4011 if v_1.Op != OpAMD64ADDQconst { 4012 break 4013 } 4014 c := v_1.AuxInt 4015 idx := v_1.Args[0] 4016 mem := v.Args[2] 4017 v.reset(OpAMD64MOVBstoreconstidx1) 4018 v.AuxInt = ValAndOff(x).add(c) 4019 v.Aux = sym 4020 v.AddArg(ptr) 4021 v.AddArg(idx) 4022 v.AddArg(mem) 4023 return true 4024 } 4025 // match: (MOVBstoreconstidx1 [c] {s} p i x:(MOVBstoreconstidx1 [a] {s} p i mem)) 4026 // cond: x.Uses == 1 && ValAndOff(a).Off() + 1 == ValAndOff(c).Off() && clobber(x) 4027 // result: (MOVWstoreconstidx1 [makeValAndOff(ValAndOff(a).Val()&0xff | ValAndOff(c).Val()<<8, ValAndOff(a).Off())] {s} p i mem) 4028 for { 4029 c := v.AuxInt 4030 s := v.Aux 4031 p := v.Args[0] 4032 i := v.Args[1] 4033 x := v.Args[2] 4034 if x.Op != OpAMD64MOVBstoreconstidx1 { 4035 break 4036 } 4037 a := x.AuxInt 4038 if x.Aux != s { 4039 break 4040 } 4041 if p != x.Args[0] { 4042 break 4043 } 4044 if i != x.Args[1] { 4045 break 4046 } 4047 mem := x.Args[2] 4048 if !(x.Uses == 1 && ValAndOff(a).Off()+1 == ValAndOff(c).Off() && clobber(x)) { 4049 break 4050 } 4051 v.reset(OpAMD64MOVWstoreconstidx1) 4052 v.AuxInt = makeValAndOff(ValAndOff(a).Val()&0xff|ValAndOff(c).Val()<<8, ValAndOff(a).Off()) 4053 v.Aux = s 4054 v.AddArg(p) 4055 v.AddArg(i) 4056 v.AddArg(mem) 4057 return true 4058 } 4059 return false 4060 } 4061 func rewriteValueAMD64_OpAMD64MOVBstoreidx1(v *Value, config *Config) bool { 4062 b := v.Block 4063 _ = b 4064 // match: (MOVBstoreidx1 [c] {sym} (ADDQconst [d] ptr) idx val mem) 4065 // cond: 4066 // result: (MOVBstoreidx1 [c+d] {sym} ptr idx val mem) 4067 for { 4068 c := v.AuxInt 4069 sym := v.Aux 4070 v_0 := v.Args[0] 4071 if v_0.Op != OpAMD64ADDQconst { 4072 break 4073 } 4074 d := v_0.AuxInt 4075 ptr := v_0.Args[0] 4076 idx := v.Args[1] 4077 val := v.Args[2] 4078 mem := v.Args[3] 4079 v.reset(OpAMD64MOVBstoreidx1) 4080 v.AuxInt = c + d 4081 v.Aux = sym 4082 v.AddArg(ptr) 4083 v.AddArg(idx) 4084 v.AddArg(val) 4085 v.AddArg(mem) 4086 return true 4087 } 4088 // match: (MOVBstoreidx1 [c] {sym} ptr (ADDQconst [d] idx) val mem) 4089 // cond: 4090 // result: (MOVBstoreidx1 [c+d] {sym} ptr idx val mem) 4091 for { 4092 c := v.AuxInt 4093 sym := v.Aux 4094 ptr := v.Args[0] 4095 v_1 := v.Args[1] 4096 if v_1.Op != OpAMD64ADDQconst { 4097 break 4098 } 4099 d := v_1.AuxInt 4100 idx := v_1.Args[0] 4101 val := v.Args[2] 4102 mem := v.Args[3] 4103 v.reset(OpAMD64MOVBstoreidx1) 4104 v.AuxInt = c + d 4105 v.Aux = sym 4106 v.AddArg(ptr) 4107 v.AddArg(idx) 4108 v.AddArg(val) 4109 v.AddArg(mem) 4110 return true 4111 } 4112 // match: (MOVBstoreidx1 [i] {s} p idx (SHRQconst [8] w) x:(MOVBstoreidx1 [i-1] {s} p idx w mem)) 4113 // cond: x.Uses == 1 && clobber(x) 4114 // result: (MOVWstoreidx1 [i-1] {s} p idx w mem) 4115 for { 4116 i := v.AuxInt 4117 s := v.Aux 4118 p := v.Args[0] 4119 idx := v.Args[1] 4120 v_2 := v.Args[2] 4121 if v_2.Op != OpAMD64SHRQconst { 4122 break 4123 } 4124 if v_2.AuxInt != 8 { 4125 break 4126 } 4127 w := v_2.Args[0] 4128 x := v.Args[3] 4129 if x.Op != OpAMD64MOVBstoreidx1 { 4130 break 4131 } 4132 if x.AuxInt != i-1 { 4133 break 4134 } 4135 if x.Aux != s { 4136 break 4137 } 4138 if p != x.Args[0] { 4139 break 4140 } 4141 if idx != x.Args[1] { 4142 break 4143 } 4144 if w != x.Args[2] { 4145 break 4146 } 4147 mem := x.Args[3] 4148 if !(x.Uses == 1 && clobber(x)) { 4149 break 4150 } 4151 v.reset(OpAMD64MOVWstoreidx1) 4152 v.AuxInt = i - 1 4153 v.Aux = s 4154 v.AddArg(p) 4155 v.AddArg(idx) 4156 v.AddArg(w) 4157 v.AddArg(mem) 4158 return true 4159 } 4160 // match: (MOVBstoreidx1 [i] {s} p idx (SHRQconst [j] w) x:(MOVBstoreidx1 [i-1] {s} p idx w0:(SHRQconst [j-8] w) mem)) 4161 // cond: x.Uses == 1 && clobber(x) 4162 // result: (MOVWstoreidx1 [i-1] {s} p idx w0 mem) 4163 for { 4164 i := v.AuxInt 4165 s := v.Aux 4166 p := v.Args[0] 4167 idx := v.Args[1] 4168 v_2 := v.Args[2] 4169 if v_2.Op != OpAMD64SHRQconst { 4170 break 4171 } 4172 j := v_2.AuxInt 4173 w := v_2.Args[0] 4174 x := v.Args[3] 4175 if x.Op != OpAMD64MOVBstoreidx1 { 4176 break 4177 } 4178 if x.AuxInt != i-1 { 4179 break 4180 } 4181 if x.Aux != s { 4182 break 4183 } 4184 if p != x.Args[0] { 4185 break 4186 } 4187 if idx != x.Args[1] { 4188 break 4189 } 4190 w0 := x.Args[2] 4191 if w0.Op != OpAMD64SHRQconst { 4192 break 4193 } 4194 if w0.AuxInt != j-8 { 4195 break 4196 } 4197 if w != w0.Args[0] { 4198 break 4199 } 4200 mem := x.Args[3] 4201 if !(x.Uses == 1 && clobber(x)) { 4202 break 4203 } 4204 v.reset(OpAMD64MOVWstoreidx1) 4205 v.AuxInt = i - 1 4206 v.Aux = s 4207 v.AddArg(p) 4208 v.AddArg(idx) 4209 v.AddArg(w0) 4210 v.AddArg(mem) 4211 return true 4212 } 4213 return false 4214 } 4215 func rewriteValueAMD64_OpAMD64MOVLQSX(v *Value, config *Config) bool { 4216 b := v.Block 4217 _ = b 4218 // match: (MOVLQSX x:(MOVLload [off] {sym} ptr mem)) 4219 // cond: x.Uses == 1 && clobber(x) 4220 // result: @x.Block (MOVLQSXload <v.Type> [off] {sym} ptr mem) 4221 for { 4222 x := v.Args[0] 4223 if x.Op != OpAMD64MOVLload { 4224 break 4225 } 4226 off := x.AuxInt 4227 sym := x.Aux 4228 ptr := x.Args[0] 4229 mem := x.Args[1] 4230 if !(x.Uses == 1 && clobber(x)) { 4231 break 4232 } 4233 b = x.Block 4234 v0 := b.NewValue0(v.Line, OpAMD64MOVLQSXload, v.Type) 4235 v.reset(OpCopy) 4236 v.AddArg(v0) 4237 v0.AuxInt = off 4238 v0.Aux = sym 4239 v0.AddArg(ptr) 4240 v0.AddArg(mem) 4241 return true 4242 } 4243 // match: (MOVLQSX (ANDLconst [c] x)) 4244 // cond: c & 0x80000000 == 0 4245 // result: (ANDLconst [c & 0x7fffffff] x) 4246 for { 4247 v_0 := v.Args[0] 4248 if v_0.Op != OpAMD64ANDLconst { 4249 break 4250 } 4251 c := v_0.AuxInt 4252 x := v_0.Args[0] 4253 if !(c&0x80000000 == 0) { 4254 break 4255 } 4256 v.reset(OpAMD64ANDLconst) 4257 v.AuxInt = c & 0x7fffffff 4258 v.AddArg(x) 4259 return true 4260 } 4261 return false 4262 } 4263 func rewriteValueAMD64_OpAMD64MOVLQSXload(v *Value, config *Config) bool { 4264 b := v.Block 4265 _ = b 4266 // match: (MOVLQSXload [off1] {sym1} (LEAQ [off2] {sym2} base) mem) 4267 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 4268 // result: (MOVLQSXload [off1+off2] {mergeSym(sym1,sym2)} base mem) 4269 for { 4270 off1 := v.AuxInt 4271 sym1 := v.Aux 4272 v_0 := v.Args[0] 4273 if v_0.Op != OpAMD64LEAQ { 4274 break 4275 } 4276 off2 := v_0.AuxInt 4277 sym2 := v_0.Aux 4278 base := v_0.Args[0] 4279 mem := v.Args[1] 4280 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 4281 break 4282 } 4283 v.reset(OpAMD64MOVLQSXload) 4284 v.AuxInt = off1 + off2 4285 v.Aux = mergeSym(sym1, sym2) 4286 v.AddArg(base) 4287 v.AddArg(mem) 4288 return true 4289 } 4290 return false 4291 } 4292 func rewriteValueAMD64_OpAMD64MOVLQZX(v *Value, config *Config) bool { 4293 b := v.Block 4294 _ = b 4295 // match: (MOVLQZX x:(MOVLload [off] {sym} ptr mem)) 4296 // cond: x.Uses == 1 && clobber(x) 4297 // result: @x.Block (MOVLload <v.Type> [off] {sym} ptr mem) 4298 for { 4299 x := v.Args[0] 4300 if x.Op != OpAMD64MOVLload { 4301 break 4302 } 4303 off := x.AuxInt 4304 sym := x.Aux 4305 ptr := x.Args[0] 4306 mem := x.Args[1] 4307 if !(x.Uses == 1 && clobber(x)) { 4308 break 4309 } 4310 b = x.Block 4311 v0 := b.NewValue0(v.Line, OpAMD64MOVLload, v.Type) 4312 v.reset(OpCopy) 4313 v.AddArg(v0) 4314 v0.AuxInt = off 4315 v0.Aux = sym 4316 v0.AddArg(ptr) 4317 v0.AddArg(mem) 4318 return true 4319 } 4320 // match: (MOVLQZX x:(MOVLloadidx1 [off] {sym} ptr idx mem)) 4321 // cond: x.Uses == 1 && clobber(x) 4322 // result: @x.Block (MOVLloadidx1 <v.Type> [off] {sym} ptr idx mem) 4323 for { 4324 x := v.Args[0] 4325 if x.Op != OpAMD64MOVLloadidx1 { 4326 break 4327 } 4328 off := x.AuxInt 4329 sym := x.Aux 4330 ptr := x.Args[0] 4331 idx := x.Args[1] 4332 mem := x.Args[2] 4333 if !(x.Uses == 1 && clobber(x)) { 4334 break 4335 } 4336 b = x.Block 4337 v0 := b.NewValue0(v.Line, OpAMD64MOVLloadidx1, v.Type) 4338 v.reset(OpCopy) 4339 v.AddArg(v0) 4340 v0.AuxInt = off 4341 v0.Aux = sym 4342 v0.AddArg(ptr) 4343 v0.AddArg(idx) 4344 v0.AddArg(mem) 4345 return true 4346 } 4347 // match: (MOVLQZX x:(MOVLloadidx4 [off] {sym} ptr idx mem)) 4348 // cond: x.Uses == 1 && clobber(x) 4349 // result: @x.Block (MOVLloadidx4 <v.Type> [off] {sym} ptr idx mem) 4350 for { 4351 x := v.Args[0] 4352 if x.Op != OpAMD64MOVLloadidx4 { 4353 break 4354 } 4355 off := x.AuxInt 4356 sym := x.Aux 4357 ptr := x.Args[0] 4358 idx := x.Args[1] 4359 mem := x.Args[2] 4360 if !(x.Uses == 1 && clobber(x)) { 4361 break 4362 } 4363 b = x.Block 4364 v0 := b.NewValue0(v.Line, OpAMD64MOVLloadidx4, v.Type) 4365 v.reset(OpCopy) 4366 v.AddArg(v0) 4367 v0.AuxInt = off 4368 v0.Aux = sym 4369 v0.AddArg(ptr) 4370 v0.AddArg(idx) 4371 v0.AddArg(mem) 4372 return true 4373 } 4374 // match: (MOVLQZX (ANDLconst [c] x)) 4375 // cond: 4376 // result: (ANDLconst [c] x) 4377 for { 4378 v_0 := v.Args[0] 4379 if v_0.Op != OpAMD64ANDLconst { 4380 break 4381 } 4382 c := v_0.AuxInt 4383 x := v_0.Args[0] 4384 v.reset(OpAMD64ANDLconst) 4385 v.AuxInt = c 4386 v.AddArg(x) 4387 return true 4388 } 4389 return false 4390 } 4391 func rewriteValueAMD64_OpAMD64MOVLatomicload(v *Value, config *Config) bool { 4392 b := v.Block 4393 _ = b 4394 // match: (MOVLatomicload [off1] {sym} (ADDQconst [off2] ptr) mem) 4395 // cond: is32Bit(off1+off2) 4396 // result: (MOVLatomicload [off1+off2] {sym} ptr mem) 4397 for { 4398 off1 := v.AuxInt 4399 sym := v.Aux 4400 v_0 := v.Args[0] 4401 if v_0.Op != OpAMD64ADDQconst { 4402 break 4403 } 4404 off2 := v_0.AuxInt 4405 ptr := v_0.Args[0] 4406 mem := v.Args[1] 4407 if !(is32Bit(off1 + off2)) { 4408 break 4409 } 4410 v.reset(OpAMD64MOVLatomicload) 4411 v.AuxInt = off1 + off2 4412 v.Aux = sym 4413 v.AddArg(ptr) 4414 v.AddArg(mem) 4415 return true 4416 } 4417 // match: (MOVLatomicload [off1] {sym1} (LEAQ [off2] {sym2} ptr) mem) 4418 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 4419 // result: (MOVLatomicload [off1+off2] {mergeSym(sym1,sym2)} ptr mem) 4420 for { 4421 off1 := v.AuxInt 4422 sym1 := v.Aux 4423 v_0 := v.Args[0] 4424 if v_0.Op != OpAMD64LEAQ { 4425 break 4426 } 4427 off2 := v_0.AuxInt 4428 sym2 := v_0.Aux 4429 ptr := v_0.Args[0] 4430 mem := v.Args[1] 4431 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 4432 break 4433 } 4434 v.reset(OpAMD64MOVLatomicload) 4435 v.AuxInt = off1 + off2 4436 v.Aux = mergeSym(sym1, sym2) 4437 v.AddArg(ptr) 4438 v.AddArg(mem) 4439 return true 4440 } 4441 return false 4442 } 4443 func rewriteValueAMD64_OpAMD64MOVLload(v *Value, config *Config) bool { 4444 b := v.Block 4445 _ = b 4446 // match: (MOVLload [off] {sym} ptr (MOVLstore [off2] {sym2} ptr2 x _)) 4447 // cond: sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) 4448 // result: x 4449 for { 4450 off := v.AuxInt 4451 sym := v.Aux 4452 ptr := v.Args[0] 4453 v_1 := v.Args[1] 4454 if v_1.Op != OpAMD64MOVLstore { 4455 break 4456 } 4457 off2 := v_1.AuxInt 4458 sym2 := v_1.Aux 4459 ptr2 := v_1.Args[0] 4460 x := v_1.Args[1] 4461 if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) { 4462 break 4463 } 4464 v.reset(OpCopy) 4465 v.Type = x.Type 4466 v.AddArg(x) 4467 return true 4468 } 4469 // match: (MOVLload [off1] {sym} (ADDQconst [off2] ptr) mem) 4470 // cond: is32Bit(off1+off2) 4471 // result: (MOVLload [off1+off2] {sym} ptr mem) 4472 for { 4473 off1 := v.AuxInt 4474 sym := v.Aux 4475 v_0 := v.Args[0] 4476 if v_0.Op != OpAMD64ADDQconst { 4477 break 4478 } 4479 off2 := v_0.AuxInt 4480 ptr := v_0.Args[0] 4481 mem := v.Args[1] 4482 if !(is32Bit(off1 + off2)) { 4483 break 4484 } 4485 v.reset(OpAMD64MOVLload) 4486 v.AuxInt = off1 + off2 4487 v.Aux = sym 4488 v.AddArg(ptr) 4489 v.AddArg(mem) 4490 return true 4491 } 4492 // match: (MOVLload [off1] {sym1} (LEAQ [off2] {sym2} base) mem) 4493 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 4494 // result: (MOVLload [off1+off2] {mergeSym(sym1,sym2)} base mem) 4495 for { 4496 off1 := v.AuxInt 4497 sym1 := v.Aux 4498 v_0 := v.Args[0] 4499 if v_0.Op != OpAMD64LEAQ { 4500 break 4501 } 4502 off2 := v_0.AuxInt 4503 sym2 := v_0.Aux 4504 base := v_0.Args[0] 4505 mem := v.Args[1] 4506 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 4507 break 4508 } 4509 v.reset(OpAMD64MOVLload) 4510 v.AuxInt = off1 + off2 4511 v.Aux = mergeSym(sym1, sym2) 4512 v.AddArg(base) 4513 v.AddArg(mem) 4514 return true 4515 } 4516 // match: (MOVLload [off1] {sym1} (LEAQ1 [off2] {sym2} ptr idx) mem) 4517 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 4518 // result: (MOVLloadidx1 [off1+off2] {mergeSym(sym1,sym2)} ptr idx mem) 4519 for { 4520 off1 := v.AuxInt 4521 sym1 := v.Aux 4522 v_0 := v.Args[0] 4523 if v_0.Op != OpAMD64LEAQ1 { 4524 break 4525 } 4526 off2 := v_0.AuxInt 4527 sym2 := v_0.Aux 4528 ptr := v_0.Args[0] 4529 idx := v_0.Args[1] 4530 mem := v.Args[1] 4531 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 4532 break 4533 } 4534 v.reset(OpAMD64MOVLloadidx1) 4535 v.AuxInt = off1 + off2 4536 v.Aux = mergeSym(sym1, sym2) 4537 v.AddArg(ptr) 4538 v.AddArg(idx) 4539 v.AddArg(mem) 4540 return true 4541 } 4542 // match: (MOVLload [off1] {sym1} (LEAQ4 [off2] {sym2} ptr idx) mem) 4543 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 4544 // result: (MOVLloadidx4 [off1+off2] {mergeSym(sym1,sym2)} ptr idx mem) 4545 for { 4546 off1 := v.AuxInt 4547 sym1 := v.Aux 4548 v_0 := v.Args[0] 4549 if v_0.Op != OpAMD64LEAQ4 { 4550 break 4551 } 4552 off2 := v_0.AuxInt 4553 sym2 := v_0.Aux 4554 ptr := v_0.Args[0] 4555 idx := v_0.Args[1] 4556 mem := v.Args[1] 4557 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 4558 break 4559 } 4560 v.reset(OpAMD64MOVLloadidx4) 4561 v.AuxInt = off1 + off2 4562 v.Aux = mergeSym(sym1, sym2) 4563 v.AddArg(ptr) 4564 v.AddArg(idx) 4565 v.AddArg(mem) 4566 return true 4567 } 4568 // match: (MOVLload [off] {sym} (ADDQ ptr idx) mem) 4569 // cond: ptr.Op != OpSB 4570 // result: (MOVLloadidx1 [off] {sym} ptr idx mem) 4571 for { 4572 off := v.AuxInt 4573 sym := v.Aux 4574 v_0 := v.Args[0] 4575 if v_0.Op != OpAMD64ADDQ { 4576 break 4577 } 4578 ptr := v_0.Args[0] 4579 idx := v_0.Args[1] 4580 mem := v.Args[1] 4581 if !(ptr.Op != OpSB) { 4582 break 4583 } 4584 v.reset(OpAMD64MOVLloadidx1) 4585 v.AuxInt = off 4586 v.Aux = sym 4587 v.AddArg(ptr) 4588 v.AddArg(idx) 4589 v.AddArg(mem) 4590 return true 4591 } 4592 // match: (MOVLload [off1] {sym1} (LEAL [off2] {sym2} base) mem) 4593 // cond: canMergeSym(sym1, sym2) 4594 // result: (MOVLload [off1+off2] {mergeSym(sym1,sym2)} base mem) 4595 for { 4596 off1 := v.AuxInt 4597 sym1 := v.Aux 4598 v_0 := v.Args[0] 4599 if v_0.Op != OpAMD64LEAL { 4600 break 4601 } 4602 off2 := v_0.AuxInt 4603 sym2 := v_0.Aux 4604 base := v_0.Args[0] 4605 mem := v.Args[1] 4606 if !(canMergeSym(sym1, sym2)) { 4607 break 4608 } 4609 v.reset(OpAMD64MOVLload) 4610 v.AuxInt = off1 + off2 4611 v.Aux = mergeSym(sym1, sym2) 4612 v.AddArg(base) 4613 v.AddArg(mem) 4614 return true 4615 } 4616 // match: (MOVLload [off1] {sym} (ADDLconst [off2] ptr) mem) 4617 // cond: is32Bit(off1+off2) 4618 // result: (MOVLload [off1+off2] {sym} ptr mem) 4619 for { 4620 off1 := v.AuxInt 4621 sym := v.Aux 4622 v_0 := v.Args[0] 4623 if v_0.Op != OpAMD64ADDLconst { 4624 break 4625 } 4626 off2 := v_0.AuxInt 4627 ptr := v_0.Args[0] 4628 mem := v.Args[1] 4629 if !(is32Bit(off1 + off2)) { 4630 break 4631 } 4632 v.reset(OpAMD64MOVLload) 4633 v.AuxInt = off1 + off2 4634 v.Aux = sym 4635 v.AddArg(ptr) 4636 v.AddArg(mem) 4637 return true 4638 } 4639 return false 4640 } 4641 func rewriteValueAMD64_OpAMD64MOVLloadidx1(v *Value, config *Config) bool { 4642 b := v.Block 4643 _ = b 4644 // match: (MOVLloadidx1 [c] {sym} ptr (SHLQconst [2] idx) mem) 4645 // cond: 4646 // result: (MOVLloadidx4 [c] {sym} ptr idx mem) 4647 for { 4648 c := v.AuxInt 4649 sym := v.Aux 4650 ptr := v.Args[0] 4651 v_1 := v.Args[1] 4652 if v_1.Op != OpAMD64SHLQconst { 4653 break 4654 } 4655 if v_1.AuxInt != 2 { 4656 break 4657 } 4658 idx := v_1.Args[0] 4659 mem := v.Args[2] 4660 v.reset(OpAMD64MOVLloadidx4) 4661 v.AuxInt = c 4662 v.Aux = sym 4663 v.AddArg(ptr) 4664 v.AddArg(idx) 4665 v.AddArg(mem) 4666 return true 4667 } 4668 // match: (MOVLloadidx1 [c] {sym} (ADDQconst [d] ptr) idx mem) 4669 // cond: 4670 // result: (MOVLloadidx1 [c+d] {sym} ptr idx mem) 4671 for { 4672 c := v.AuxInt 4673 sym := v.Aux 4674 v_0 := v.Args[0] 4675 if v_0.Op != OpAMD64ADDQconst { 4676 break 4677 } 4678 d := v_0.AuxInt 4679 ptr := v_0.Args[0] 4680 idx := v.Args[1] 4681 mem := v.Args[2] 4682 v.reset(OpAMD64MOVLloadidx1) 4683 v.AuxInt = c + d 4684 v.Aux = sym 4685 v.AddArg(ptr) 4686 v.AddArg(idx) 4687 v.AddArg(mem) 4688 return true 4689 } 4690 // match: (MOVLloadidx1 [c] {sym} ptr (ADDQconst [d] idx) mem) 4691 // cond: 4692 // result: (MOVLloadidx1 [c+d] {sym} ptr idx mem) 4693 for { 4694 c := v.AuxInt 4695 sym := v.Aux 4696 ptr := v.Args[0] 4697 v_1 := v.Args[1] 4698 if v_1.Op != OpAMD64ADDQconst { 4699 break 4700 } 4701 d := v_1.AuxInt 4702 idx := v_1.Args[0] 4703 mem := v.Args[2] 4704 v.reset(OpAMD64MOVLloadidx1) 4705 v.AuxInt = c + d 4706 v.Aux = sym 4707 v.AddArg(ptr) 4708 v.AddArg(idx) 4709 v.AddArg(mem) 4710 return true 4711 } 4712 return false 4713 } 4714 func rewriteValueAMD64_OpAMD64MOVLloadidx4(v *Value, config *Config) bool { 4715 b := v.Block 4716 _ = b 4717 // match: (MOVLloadidx4 [c] {sym} (ADDQconst [d] ptr) idx mem) 4718 // cond: 4719 // result: (MOVLloadidx4 [c+d] {sym} ptr idx mem) 4720 for { 4721 c := v.AuxInt 4722 sym := v.Aux 4723 v_0 := v.Args[0] 4724 if v_0.Op != OpAMD64ADDQconst { 4725 break 4726 } 4727 d := v_0.AuxInt 4728 ptr := v_0.Args[0] 4729 idx := v.Args[1] 4730 mem := v.Args[2] 4731 v.reset(OpAMD64MOVLloadidx4) 4732 v.AuxInt = c + d 4733 v.Aux = sym 4734 v.AddArg(ptr) 4735 v.AddArg(idx) 4736 v.AddArg(mem) 4737 return true 4738 } 4739 // match: (MOVLloadidx4 [c] {sym} ptr (ADDQconst [d] idx) mem) 4740 // cond: 4741 // result: (MOVLloadidx4 [c+4*d] {sym} ptr idx mem) 4742 for { 4743 c := v.AuxInt 4744 sym := v.Aux 4745 ptr := v.Args[0] 4746 v_1 := v.Args[1] 4747 if v_1.Op != OpAMD64ADDQconst { 4748 break 4749 } 4750 d := v_1.AuxInt 4751 idx := v_1.Args[0] 4752 mem := v.Args[2] 4753 v.reset(OpAMD64MOVLloadidx4) 4754 v.AuxInt = c + 4*d 4755 v.Aux = sym 4756 v.AddArg(ptr) 4757 v.AddArg(idx) 4758 v.AddArg(mem) 4759 return true 4760 } 4761 return false 4762 } 4763 func rewriteValueAMD64_OpAMD64MOVLstore(v *Value, config *Config) bool { 4764 b := v.Block 4765 _ = b 4766 // match: (MOVLstore [off] {sym} ptr (MOVLQSX x) mem) 4767 // cond: 4768 // result: (MOVLstore [off] {sym} ptr x mem) 4769 for { 4770 off := v.AuxInt 4771 sym := v.Aux 4772 ptr := v.Args[0] 4773 v_1 := v.Args[1] 4774 if v_1.Op != OpAMD64MOVLQSX { 4775 break 4776 } 4777 x := v_1.Args[0] 4778 mem := v.Args[2] 4779 v.reset(OpAMD64MOVLstore) 4780 v.AuxInt = off 4781 v.Aux = sym 4782 v.AddArg(ptr) 4783 v.AddArg(x) 4784 v.AddArg(mem) 4785 return true 4786 } 4787 // match: (MOVLstore [off] {sym} ptr (MOVLQZX x) mem) 4788 // cond: 4789 // result: (MOVLstore [off] {sym} ptr x mem) 4790 for { 4791 off := v.AuxInt 4792 sym := v.Aux 4793 ptr := v.Args[0] 4794 v_1 := v.Args[1] 4795 if v_1.Op != OpAMD64MOVLQZX { 4796 break 4797 } 4798 x := v_1.Args[0] 4799 mem := v.Args[2] 4800 v.reset(OpAMD64MOVLstore) 4801 v.AuxInt = off 4802 v.Aux = sym 4803 v.AddArg(ptr) 4804 v.AddArg(x) 4805 v.AddArg(mem) 4806 return true 4807 } 4808 // match: (MOVLstore [off1] {sym} (ADDQconst [off2] ptr) val mem) 4809 // cond: is32Bit(off1+off2) 4810 // result: (MOVLstore [off1+off2] {sym} ptr val mem) 4811 for { 4812 off1 := v.AuxInt 4813 sym := v.Aux 4814 v_0 := v.Args[0] 4815 if v_0.Op != OpAMD64ADDQconst { 4816 break 4817 } 4818 off2 := v_0.AuxInt 4819 ptr := v_0.Args[0] 4820 val := v.Args[1] 4821 mem := v.Args[2] 4822 if !(is32Bit(off1 + off2)) { 4823 break 4824 } 4825 v.reset(OpAMD64MOVLstore) 4826 v.AuxInt = off1 + off2 4827 v.Aux = sym 4828 v.AddArg(ptr) 4829 v.AddArg(val) 4830 v.AddArg(mem) 4831 return true 4832 } 4833 // match: (MOVLstore [off] {sym} ptr (MOVLconst [c]) mem) 4834 // cond: validOff(off) 4835 // result: (MOVLstoreconst [makeValAndOff(int64(int32(c)),off)] {sym} ptr mem) 4836 for { 4837 off := v.AuxInt 4838 sym := v.Aux 4839 ptr := v.Args[0] 4840 v_1 := v.Args[1] 4841 if v_1.Op != OpAMD64MOVLconst { 4842 break 4843 } 4844 c := v_1.AuxInt 4845 mem := v.Args[2] 4846 if !(validOff(off)) { 4847 break 4848 } 4849 v.reset(OpAMD64MOVLstoreconst) 4850 v.AuxInt = makeValAndOff(int64(int32(c)), off) 4851 v.Aux = sym 4852 v.AddArg(ptr) 4853 v.AddArg(mem) 4854 return true 4855 } 4856 // match: (MOVLstore [off1] {sym1} (LEAQ [off2] {sym2} base) val mem) 4857 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 4858 // result: (MOVLstore [off1+off2] {mergeSym(sym1,sym2)} base val mem) 4859 for { 4860 off1 := v.AuxInt 4861 sym1 := v.Aux 4862 v_0 := v.Args[0] 4863 if v_0.Op != OpAMD64LEAQ { 4864 break 4865 } 4866 off2 := v_0.AuxInt 4867 sym2 := v_0.Aux 4868 base := v_0.Args[0] 4869 val := v.Args[1] 4870 mem := v.Args[2] 4871 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 4872 break 4873 } 4874 v.reset(OpAMD64MOVLstore) 4875 v.AuxInt = off1 + off2 4876 v.Aux = mergeSym(sym1, sym2) 4877 v.AddArg(base) 4878 v.AddArg(val) 4879 v.AddArg(mem) 4880 return true 4881 } 4882 // match: (MOVLstore [off1] {sym1} (LEAQ1 [off2] {sym2} ptr idx) val mem) 4883 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 4884 // result: (MOVLstoreidx1 [off1+off2] {mergeSym(sym1,sym2)} ptr idx val mem) 4885 for { 4886 off1 := v.AuxInt 4887 sym1 := v.Aux 4888 v_0 := v.Args[0] 4889 if v_0.Op != OpAMD64LEAQ1 { 4890 break 4891 } 4892 off2 := v_0.AuxInt 4893 sym2 := v_0.Aux 4894 ptr := v_0.Args[0] 4895 idx := v_0.Args[1] 4896 val := v.Args[1] 4897 mem := v.Args[2] 4898 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 4899 break 4900 } 4901 v.reset(OpAMD64MOVLstoreidx1) 4902 v.AuxInt = off1 + off2 4903 v.Aux = mergeSym(sym1, sym2) 4904 v.AddArg(ptr) 4905 v.AddArg(idx) 4906 v.AddArg(val) 4907 v.AddArg(mem) 4908 return true 4909 } 4910 // match: (MOVLstore [off1] {sym1} (LEAQ4 [off2] {sym2} ptr idx) val mem) 4911 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 4912 // result: (MOVLstoreidx4 [off1+off2] {mergeSym(sym1,sym2)} ptr idx val mem) 4913 for { 4914 off1 := v.AuxInt 4915 sym1 := v.Aux 4916 v_0 := v.Args[0] 4917 if v_0.Op != OpAMD64LEAQ4 { 4918 break 4919 } 4920 off2 := v_0.AuxInt 4921 sym2 := v_0.Aux 4922 ptr := v_0.Args[0] 4923 idx := v_0.Args[1] 4924 val := v.Args[1] 4925 mem := v.Args[2] 4926 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 4927 break 4928 } 4929 v.reset(OpAMD64MOVLstoreidx4) 4930 v.AuxInt = off1 + off2 4931 v.Aux = mergeSym(sym1, sym2) 4932 v.AddArg(ptr) 4933 v.AddArg(idx) 4934 v.AddArg(val) 4935 v.AddArg(mem) 4936 return true 4937 } 4938 // match: (MOVLstore [off] {sym} (ADDQ ptr idx) val mem) 4939 // cond: ptr.Op != OpSB 4940 // result: (MOVLstoreidx1 [off] {sym} ptr idx val mem) 4941 for { 4942 off := v.AuxInt 4943 sym := v.Aux 4944 v_0 := v.Args[0] 4945 if v_0.Op != OpAMD64ADDQ { 4946 break 4947 } 4948 ptr := v_0.Args[0] 4949 idx := v_0.Args[1] 4950 val := v.Args[1] 4951 mem := v.Args[2] 4952 if !(ptr.Op != OpSB) { 4953 break 4954 } 4955 v.reset(OpAMD64MOVLstoreidx1) 4956 v.AuxInt = off 4957 v.Aux = sym 4958 v.AddArg(ptr) 4959 v.AddArg(idx) 4960 v.AddArg(val) 4961 v.AddArg(mem) 4962 return true 4963 } 4964 // match: (MOVLstore [i] {s} p (SHRQconst [32] w) x:(MOVLstore [i-4] {s} p w mem)) 4965 // cond: x.Uses == 1 && clobber(x) 4966 // result: (MOVQstore [i-4] {s} p w mem) 4967 for { 4968 i := v.AuxInt 4969 s := v.Aux 4970 p := v.Args[0] 4971 v_1 := v.Args[1] 4972 if v_1.Op != OpAMD64SHRQconst { 4973 break 4974 } 4975 if v_1.AuxInt != 32 { 4976 break 4977 } 4978 w := v_1.Args[0] 4979 x := v.Args[2] 4980 if x.Op != OpAMD64MOVLstore { 4981 break 4982 } 4983 if x.AuxInt != i-4 { 4984 break 4985 } 4986 if x.Aux != s { 4987 break 4988 } 4989 if p != x.Args[0] { 4990 break 4991 } 4992 if w != x.Args[1] { 4993 break 4994 } 4995 mem := x.Args[2] 4996 if !(x.Uses == 1 && clobber(x)) { 4997 break 4998 } 4999 v.reset(OpAMD64MOVQstore) 5000 v.AuxInt = i - 4 5001 v.Aux = s 5002 v.AddArg(p) 5003 v.AddArg(w) 5004 v.AddArg(mem) 5005 return true 5006 } 5007 // match: (MOVLstore [i] {s} p (SHRQconst [j] w) x:(MOVLstore [i-4] {s} p w0:(SHRQconst [j-32] w) mem)) 5008 // cond: x.Uses == 1 && clobber(x) 5009 // result: (MOVQstore [i-4] {s} p w0 mem) 5010 for { 5011 i := v.AuxInt 5012 s := v.Aux 5013 p := v.Args[0] 5014 v_1 := v.Args[1] 5015 if v_1.Op != OpAMD64SHRQconst { 5016 break 5017 } 5018 j := v_1.AuxInt 5019 w := v_1.Args[0] 5020 x := v.Args[2] 5021 if x.Op != OpAMD64MOVLstore { 5022 break 5023 } 5024 if x.AuxInt != i-4 { 5025 break 5026 } 5027 if x.Aux != s { 5028 break 5029 } 5030 if p != x.Args[0] { 5031 break 5032 } 5033 w0 := x.Args[1] 5034 if w0.Op != OpAMD64SHRQconst { 5035 break 5036 } 5037 if w0.AuxInt != j-32 { 5038 break 5039 } 5040 if w != w0.Args[0] { 5041 break 5042 } 5043 mem := x.Args[2] 5044 if !(x.Uses == 1 && clobber(x)) { 5045 break 5046 } 5047 v.reset(OpAMD64MOVQstore) 5048 v.AuxInt = i - 4 5049 v.Aux = s 5050 v.AddArg(p) 5051 v.AddArg(w0) 5052 v.AddArg(mem) 5053 return true 5054 } 5055 // match: (MOVLstore [off1] {sym1} (LEAL [off2] {sym2} base) val mem) 5056 // cond: canMergeSym(sym1, sym2) 5057 // result: (MOVLstore [off1+off2] {mergeSym(sym1,sym2)} base val mem) 5058 for { 5059 off1 := v.AuxInt 5060 sym1 := v.Aux 5061 v_0 := v.Args[0] 5062 if v_0.Op != OpAMD64LEAL { 5063 break 5064 } 5065 off2 := v_0.AuxInt 5066 sym2 := v_0.Aux 5067 base := v_0.Args[0] 5068 val := v.Args[1] 5069 mem := v.Args[2] 5070 if !(canMergeSym(sym1, sym2)) { 5071 break 5072 } 5073 v.reset(OpAMD64MOVLstore) 5074 v.AuxInt = off1 + off2 5075 v.Aux = mergeSym(sym1, sym2) 5076 v.AddArg(base) 5077 v.AddArg(val) 5078 v.AddArg(mem) 5079 return true 5080 } 5081 // match: (MOVLstore [off1] {sym} (ADDLconst [off2] ptr) val mem) 5082 // cond: is32Bit(off1+off2) 5083 // result: (MOVLstore [off1+off2] {sym} ptr val mem) 5084 for { 5085 off1 := v.AuxInt 5086 sym := v.Aux 5087 v_0 := v.Args[0] 5088 if v_0.Op != OpAMD64ADDLconst { 5089 break 5090 } 5091 off2 := v_0.AuxInt 5092 ptr := v_0.Args[0] 5093 val := v.Args[1] 5094 mem := v.Args[2] 5095 if !(is32Bit(off1 + off2)) { 5096 break 5097 } 5098 v.reset(OpAMD64MOVLstore) 5099 v.AuxInt = off1 + off2 5100 v.Aux = sym 5101 v.AddArg(ptr) 5102 v.AddArg(val) 5103 v.AddArg(mem) 5104 return true 5105 } 5106 return false 5107 } 5108 func rewriteValueAMD64_OpAMD64MOVLstoreconst(v *Value, config *Config) bool { 5109 b := v.Block 5110 _ = b 5111 // match: (MOVLstoreconst [sc] {s} (ADDQconst [off] ptr) mem) 5112 // cond: ValAndOff(sc).canAdd(off) 5113 // result: (MOVLstoreconst [ValAndOff(sc).add(off)] {s} ptr mem) 5114 for { 5115 sc := v.AuxInt 5116 s := v.Aux 5117 v_0 := v.Args[0] 5118 if v_0.Op != OpAMD64ADDQconst { 5119 break 5120 } 5121 off := v_0.AuxInt 5122 ptr := v_0.Args[0] 5123 mem := v.Args[1] 5124 if !(ValAndOff(sc).canAdd(off)) { 5125 break 5126 } 5127 v.reset(OpAMD64MOVLstoreconst) 5128 v.AuxInt = ValAndOff(sc).add(off) 5129 v.Aux = s 5130 v.AddArg(ptr) 5131 v.AddArg(mem) 5132 return true 5133 } 5134 // match: (MOVLstoreconst [sc] {sym1} (LEAQ [off] {sym2} ptr) mem) 5135 // cond: canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off) 5136 // result: (MOVLstoreconst [ValAndOff(sc).add(off)] {mergeSym(sym1, sym2)} ptr mem) 5137 for { 5138 sc := v.AuxInt 5139 sym1 := v.Aux 5140 v_0 := v.Args[0] 5141 if v_0.Op != OpAMD64LEAQ { 5142 break 5143 } 5144 off := v_0.AuxInt 5145 sym2 := v_0.Aux 5146 ptr := v_0.Args[0] 5147 mem := v.Args[1] 5148 if !(canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off)) { 5149 break 5150 } 5151 v.reset(OpAMD64MOVLstoreconst) 5152 v.AuxInt = ValAndOff(sc).add(off) 5153 v.Aux = mergeSym(sym1, sym2) 5154 v.AddArg(ptr) 5155 v.AddArg(mem) 5156 return true 5157 } 5158 // match: (MOVLstoreconst [x] {sym1} (LEAQ1 [off] {sym2} ptr idx) mem) 5159 // cond: canMergeSym(sym1, sym2) 5160 // result: (MOVLstoreconstidx1 [ValAndOff(x).add(off)] {mergeSym(sym1,sym2)} ptr idx mem) 5161 for { 5162 x := v.AuxInt 5163 sym1 := v.Aux 5164 v_0 := v.Args[0] 5165 if v_0.Op != OpAMD64LEAQ1 { 5166 break 5167 } 5168 off := v_0.AuxInt 5169 sym2 := v_0.Aux 5170 ptr := v_0.Args[0] 5171 idx := v_0.Args[1] 5172 mem := v.Args[1] 5173 if !(canMergeSym(sym1, sym2)) { 5174 break 5175 } 5176 v.reset(OpAMD64MOVLstoreconstidx1) 5177 v.AuxInt = ValAndOff(x).add(off) 5178 v.Aux = mergeSym(sym1, sym2) 5179 v.AddArg(ptr) 5180 v.AddArg(idx) 5181 v.AddArg(mem) 5182 return true 5183 } 5184 // match: (MOVLstoreconst [x] {sym1} (LEAQ4 [off] {sym2} ptr idx) mem) 5185 // cond: canMergeSym(sym1, sym2) 5186 // result: (MOVLstoreconstidx4 [ValAndOff(x).add(off)] {mergeSym(sym1,sym2)} ptr idx mem) 5187 for { 5188 x := v.AuxInt 5189 sym1 := v.Aux 5190 v_0 := v.Args[0] 5191 if v_0.Op != OpAMD64LEAQ4 { 5192 break 5193 } 5194 off := v_0.AuxInt 5195 sym2 := v_0.Aux 5196 ptr := v_0.Args[0] 5197 idx := v_0.Args[1] 5198 mem := v.Args[1] 5199 if !(canMergeSym(sym1, sym2)) { 5200 break 5201 } 5202 v.reset(OpAMD64MOVLstoreconstidx4) 5203 v.AuxInt = ValAndOff(x).add(off) 5204 v.Aux = mergeSym(sym1, sym2) 5205 v.AddArg(ptr) 5206 v.AddArg(idx) 5207 v.AddArg(mem) 5208 return true 5209 } 5210 // match: (MOVLstoreconst [x] {sym} (ADDQ ptr idx) mem) 5211 // cond: 5212 // result: (MOVLstoreconstidx1 [x] {sym} ptr idx mem) 5213 for { 5214 x := v.AuxInt 5215 sym := v.Aux 5216 v_0 := v.Args[0] 5217 if v_0.Op != OpAMD64ADDQ { 5218 break 5219 } 5220 ptr := v_0.Args[0] 5221 idx := v_0.Args[1] 5222 mem := v.Args[1] 5223 v.reset(OpAMD64MOVLstoreconstidx1) 5224 v.AuxInt = x 5225 v.Aux = sym 5226 v.AddArg(ptr) 5227 v.AddArg(idx) 5228 v.AddArg(mem) 5229 return true 5230 } 5231 // match: (MOVLstoreconst [c] {s} p x:(MOVLstoreconst [a] {s} p mem)) 5232 // cond: x.Uses == 1 && ValAndOff(a).Off() + 4 == ValAndOff(c).Off() && clobber(x) 5233 // result: (MOVQstore [ValAndOff(a).Off()] {s} p (MOVQconst [ValAndOff(a).Val()&0xffffffff | ValAndOff(c).Val()<<32]) mem) 5234 for { 5235 c := v.AuxInt 5236 s := v.Aux 5237 p := v.Args[0] 5238 x := v.Args[1] 5239 if x.Op != OpAMD64MOVLstoreconst { 5240 break 5241 } 5242 a := x.AuxInt 5243 if x.Aux != s { 5244 break 5245 } 5246 if p != x.Args[0] { 5247 break 5248 } 5249 mem := x.Args[1] 5250 if !(x.Uses == 1 && ValAndOff(a).Off()+4 == ValAndOff(c).Off() && clobber(x)) { 5251 break 5252 } 5253 v.reset(OpAMD64MOVQstore) 5254 v.AuxInt = ValAndOff(a).Off() 5255 v.Aux = s 5256 v.AddArg(p) 5257 v0 := b.NewValue0(v.Line, OpAMD64MOVQconst, config.fe.TypeUInt64()) 5258 v0.AuxInt = ValAndOff(a).Val()&0xffffffff | ValAndOff(c).Val()<<32 5259 v.AddArg(v0) 5260 v.AddArg(mem) 5261 return true 5262 } 5263 // match: (MOVLstoreconst [sc] {sym1} (LEAL [off] {sym2} ptr) mem) 5264 // cond: canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off) 5265 // result: (MOVLstoreconst [ValAndOff(sc).add(off)] {mergeSym(sym1, sym2)} ptr mem) 5266 for { 5267 sc := v.AuxInt 5268 sym1 := v.Aux 5269 v_0 := v.Args[0] 5270 if v_0.Op != OpAMD64LEAL { 5271 break 5272 } 5273 off := v_0.AuxInt 5274 sym2 := v_0.Aux 5275 ptr := v_0.Args[0] 5276 mem := v.Args[1] 5277 if !(canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off)) { 5278 break 5279 } 5280 v.reset(OpAMD64MOVLstoreconst) 5281 v.AuxInt = ValAndOff(sc).add(off) 5282 v.Aux = mergeSym(sym1, sym2) 5283 v.AddArg(ptr) 5284 v.AddArg(mem) 5285 return true 5286 } 5287 // match: (MOVLstoreconst [sc] {s} (ADDLconst [off] ptr) mem) 5288 // cond: ValAndOff(sc).canAdd(off) 5289 // result: (MOVLstoreconst [ValAndOff(sc).add(off)] {s} ptr mem) 5290 for { 5291 sc := v.AuxInt 5292 s := v.Aux 5293 v_0 := v.Args[0] 5294 if v_0.Op != OpAMD64ADDLconst { 5295 break 5296 } 5297 off := v_0.AuxInt 5298 ptr := v_0.Args[0] 5299 mem := v.Args[1] 5300 if !(ValAndOff(sc).canAdd(off)) { 5301 break 5302 } 5303 v.reset(OpAMD64MOVLstoreconst) 5304 v.AuxInt = ValAndOff(sc).add(off) 5305 v.Aux = s 5306 v.AddArg(ptr) 5307 v.AddArg(mem) 5308 return true 5309 } 5310 return false 5311 } 5312 func rewriteValueAMD64_OpAMD64MOVLstoreconstidx1(v *Value, config *Config) bool { 5313 b := v.Block 5314 _ = b 5315 // match: (MOVLstoreconstidx1 [c] {sym} ptr (SHLQconst [2] idx) mem) 5316 // cond: 5317 // result: (MOVLstoreconstidx4 [c] {sym} ptr idx mem) 5318 for { 5319 c := v.AuxInt 5320 sym := v.Aux 5321 ptr := v.Args[0] 5322 v_1 := v.Args[1] 5323 if v_1.Op != OpAMD64SHLQconst { 5324 break 5325 } 5326 if v_1.AuxInt != 2 { 5327 break 5328 } 5329 idx := v_1.Args[0] 5330 mem := v.Args[2] 5331 v.reset(OpAMD64MOVLstoreconstidx4) 5332 v.AuxInt = c 5333 v.Aux = sym 5334 v.AddArg(ptr) 5335 v.AddArg(idx) 5336 v.AddArg(mem) 5337 return true 5338 } 5339 // match: (MOVLstoreconstidx1 [x] {sym} (ADDQconst [c] ptr) idx mem) 5340 // cond: 5341 // result: (MOVLstoreconstidx1 [ValAndOff(x).add(c)] {sym} ptr idx mem) 5342 for { 5343 x := v.AuxInt 5344 sym := v.Aux 5345 v_0 := v.Args[0] 5346 if v_0.Op != OpAMD64ADDQconst { 5347 break 5348 } 5349 c := v_0.AuxInt 5350 ptr := v_0.Args[0] 5351 idx := v.Args[1] 5352 mem := v.Args[2] 5353 v.reset(OpAMD64MOVLstoreconstidx1) 5354 v.AuxInt = ValAndOff(x).add(c) 5355 v.Aux = sym 5356 v.AddArg(ptr) 5357 v.AddArg(idx) 5358 v.AddArg(mem) 5359 return true 5360 } 5361 // match: (MOVLstoreconstidx1 [x] {sym} ptr (ADDQconst [c] idx) mem) 5362 // cond: 5363 // result: (MOVLstoreconstidx1 [ValAndOff(x).add(c)] {sym} ptr idx mem) 5364 for { 5365 x := v.AuxInt 5366 sym := v.Aux 5367 ptr := v.Args[0] 5368 v_1 := v.Args[1] 5369 if v_1.Op != OpAMD64ADDQconst { 5370 break 5371 } 5372 c := v_1.AuxInt 5373 idx := v_1.Args[0] 5374 mem := v.Args[2] 5375 v.reset(OpAMD64MOVLstoreconstidx1) 5376 v.AuxInt = ValAndOff(x).add(c) 5377 v.Aux = sym 5378 v.AddArg(ptr) 5379 v.AddArg(idx) 5380 v.AddArg(mem) 5381 return true 5382 } 5383 // match: (MOVLstoreconstidx1 [c] {s} p i x:(MOVLstoreconstidx1 [a] {s} p i mem)) 5384 // cond: x.Uses == 1 && ValAndOff(a).Off() + 4 == ValAndOff(c).Off() && clobber(x) 5385 // result: (MOVQstoreidx1 [ValAndOff(a).Off()] {s} p i (MOVQconst [ValAndOff(a).Val()&0xffffffff | ValAndOff(c).Val()<<32]) mem) 5386 for { 5387 c := v.AuxInt 5388 s := v.Aux 5389 p := v.Args[0] 5390 i := v.Args[1] 5391 x := v.Args[2] 5392 if x.Op != OpAMD64MOVLstoreconstidx1 { 5393 break 5394 } 5395 a := x.AuxInt 5396 if x.Aux != s { 5397 break 5398 } 5399 if p != x.Args[0] { 5400 break 5401 } 5402 if i != x.Args[1] { 5403 break 5404 } 5405 mem := x.Args[2] 5406 if !(x.Uses == 1 && ValAndOff(a).Off()+4 == ValAndOff(c).Off() && clobber(x)) { 5407 break 5408 } 5409 v.reset(OpAMD64MOVQstoreidx1) 5410 v.AuxInt = ValAndOff(a).Off() 5411 v.Aux = s 5412 v.AddArg(p) 5413 v.AddArg(i) 5414 v0 := b.NewValue0(v.Line, OpAMD64MOVQconst, config.fe.TypeUInt64()) 5415 v0.AuxInt = ValAndOff(a).Val()&0xffffffff | ValAndOff(c).Val()<<32 5416 v.AddArg(v0) 5417 v.AddArg(mem) 5418 return true 5419 } 5420 return false 5421 } 5422 func rewriteValueAMD64_OpAMD64MOVLstoreconstidx4(v *Value, config *Config) bool { 5423 b := v.Block 5424 _ = b 5425 // match: (MOVLstoreconstidx4 [x] {sym} (ADDQconst [c] ptr) idx mem) 5426 // cond: 5427 // result: (MOVLstoreconstidx4 [ValAndOff(x).add(c)] {sym} ptr idx mem) 5428 for { 5429 x := v.AuxInt 5430 sym := v.Aux 5431 v_0 := v.Args[0] 5432 if v_0.Op != OpAMD64ADDQconst { 5433 break 5434 } 5435 c := v_0.AuxInt 5436 ptr := v_0.Args[0] 5437 idx := v.Args[1] 5438 mem := v.Args[2] 5439 v.reset(OpAMD64MOVLstoreconstidx4) 5440 v.AuxInt = ValAndOff(x).add(c) 5441 v.Aux = sym 5442 v.AddArg(ptr) 5443 v.AddArg(idx) 5444 v.AddArg(mem) 5445 return true 5446 } 5447 // match: (MOVLstoreconstidx4 [x] {sym} ptr (ADDQconst [c] idx) mem) 5448 // cond: 5449 // result: (MOVLstoreconstidx4 [ValAndOff(x).add(4*c)] {sym} ptr idx mem) 5450 for { 5451 x := v.AuxInt 5452 sym := v.Aux 5453 ptr := v.Args[0] 5454 v_1 := v.Args[1] 5455 if v_1.Op != OpAMD64ADDQconst { 5456 break 5457 } 5458 c := v_1.AuxInt 5459 idx := v_1.Args[0] 5460 mem := v.Args[2] 5461 v.reset(OpAMD64MOVLstoreconstidx4) 5462 v.AuxInt = ValAndOff(x).add(4 * c) 5463 v.Aux = sym 5464 v.AddArg(ptr) 5465 v.AddArg(idx) 5466 v.AddArg(mem) 5467 return true 5468 } 5469 // match: (MOVLstoreconstidx4 [c] {s} p i x:(MOVLstoreconstidx4 [a] {s} p i mem)) 5470 // cond: x.Uses == 1 && ValAndOff(a).Off() + 4 == ValAndOff(c).Off() && clobber(x) 5471 // result: (MOVQstoreidx1 [ValAndOff(a).Off()] {s} p (SHLQconst <i.Type> [2] i) (MOVQconst [ValAndOff(a).Val()&0xffffffff | ValAndOff(c).Val()<<32]) mem) 5472 for { 5473 c := v.AuxInt 5474 s := v.Aux 5475 p := v.Args[0] 5476 i := v.Args[1] 5477 x := v.Args[2] 5478 if x.Op != OpAMD64MOVLstoreconstidx4 { 5479 break 5480 } 5481 a := x.AuxInt 5482 if x.Aux != s { 5483 break 5484 } 5485 if p != x.Args[0] { 5486 break 5487 } 5488 if i != x.Args[1] { 5489 break 5490 } 5491 mem := x.Args[2] 5492 if !(x.Uses == 1 && ValAndOff(a).Off()+4 == ValAndOff(c).Off() && clobber(x)) { 5493 break 5494 } 5495 v.reset(OpAMD64MOVQstoreidx1) 5496 v.AuxInt = ValAndOff(a).Off() 5497 v.Aux = s 5498 v.AddArg(p) 5499 v0 := b.NewValue0(v.Line, OpAMD64SHLQconst, i.Type) 5500 v0.AuxInt = 2 5501 v0.AddArg(i) 5502 v.AddArg(v0) 5503 v1 := b.NewValue0(v.Line, OpAMD64MOVQconst, config.fe.TypeUInt64()) 5504 v1.AuxInt = ValAndOff(a).Val()&0xffffffff | ValAndOff(c).Val()<<32 5505 v.AddArg(v1) 5506 v.AddArg(mem) 5507 return true 5508 } 5509 return false 5510 } 5511 func rewriteValueAMD64_OpAMD64MOVLstoreidx1(v *Value, config *Config) bool { 5512 b := v.Block 5513 _ = b 5514 // match: (MOVLstoreidx1 [c] {sym} ptr (SHLQconst [2] idx) val mem) 5515 // cond: 5516 // result: (MOVLstoreidx4 [c] {sym} ptr idx val mem) 5517 for { 5518 c := v.AuxInt 5519 sym := v.Aux 5520 ptr := v.Args[0] 5521 v_1 := v.Args[1] 5522 if v_1.Op != OpAMD64SHLQconst { 5523 break 5524 } 5525 if v_1.AuxInt != 2 { 5526 break 5527 } 5528 idx := v_1.Args[0] 5529 val := v.Args[2] 5530 mem := v.Args[3] 5531 v.reset(OpAMD64MOVLstoreidx4) 5532 v.AuxInt = c 5533 v.Aux = sym 5534 v.AddArg(ptr) 5535 v.AddArg(idx) 5536 v.AddArg(val) 5537 v.AddArg(mem) 5538 return true 5539 } 5540 // match: (MOVLstoreidx1 [c] {sym} (ADDQconst [d] ptr) idx val mem) 5541 // cond: 5542 // result: (MOVLstoreidx1 [c+d] {sym} ptr idx val mem) 5543 for { 5544 c := v.AuxInt 5545 sym := v.Aux 5546 v_0 := v.Args[0] 5547 if v_0.Op != OpAMD64ADDQconst { 5548 break 5549 } 5550 d := v_0.AuxInt 5551 ptr := v_0.Args[0] 5552 idx := v.Args[1] 5553 val := v.Args[2] 5554 mem := v.Args[3] 5555 v.reset(OpAMD64MOVLstoreidx1) 5556 v.AuxInt = c + d 5557 v.Aux = sym 5558 v.AddArg(ptr) 5559 v.AddArg(idx) 5560 v.AddArg(val) 5561 v.AddArg(mem) 5562 return true 5563 } 5564 // match: (MOVLstoreidx1 [c] {sym} ptr (ADDQconst [d] idx) val mem) 5565 // cond: 5566 // result: (MOVLstoreidx1 [c+d] {sym} ptr idx val mem) 5567 for { 5568 c := v.AuxInt 5569 sym := v.Aux 5570 ptr := v.Args[0] 5571 v_1 := v.Args[1] 5572 if v_1.Op != OpAMD64ADDQconst { 5573 break 5574 } 5575 d := v_1.AuxInt 5576 idx := v_1.Args[0] 5577 val := v.Args[2] 5578 mem := v.Args[3] 5579 v.reset(OpAMD64MOVLstoreidx1) 5580 v.AuxInt = c + d 5581 v.Aux = sym 5582 v.AddArg(ptr) 5583 v.AddArg(idx) 5584 v.AddArg(val) 5585 v.AddArg(mem) 5586 return true 5587 } 5588 // match: (MOVLstoreidx1 [i] {s} p idx (SHRQconst [32] w) x:(MOVLstoreidx1 [i-4] {s} p idx w mem)) 5589 // cond: x.Uses == 1 && clobber(x) 5590 // result: (MOVQstoreidx1 [i-4] {s} p idx w mem) 5591 for { 5592 i := v.AuxInt 5593 s := v.Aux 5594 p := v.Args[0] 5595 idx := v.Args[1] 5596 v_2 := v.Args[2] 5597 if v_2.Op != OpAMD64SHRQconst { 5598 break 5599 } 5600 if v_2.AuxInt != 32 { 5601 break 5602 } 5603 w := v_2.Args[0] 5604 x := v.Args[3] 5605 if x.Op != OpAMD64MOVLstoreidx1 { 5606 break 5607 } 5608 if x.AuxInt != i-4 { 5609 break 5610 } 5611 if x.Aux != s { 5612 break 5613 } 5614 if p != x.Args[0] { 5615 break 5616 } 5617 if idx != x.Args[1] { 5618 break 5619 } 5620 if w != x.Args[2] { 5621 break 5622 } 5623 mem := x.Args[3] 5624 if !(x.Uses == 1 && clobber(x)) { 5625 break 5626 } 5627 v.reset(OpAMD64MOVQstoreidx1) 5628 v.AuxInt = i - 4 5629 v.Aux = s 5630 v.AddArg(p) 5631 v.AddArg(idx) 5632 v.AddArg(w) 5633 v.AddArg(mem) 5634 return true 5635 } 5636 // match: (MOVLstoreidx1 [i] {s} p idx (SHRQconst [j] w) x:(MOVLstoreidx1 [i-4] {s} p idx w0:(SHRQconst [j-32] w) mem)) 5637 // cond: x.Uses == 1 && clobber(x) 5638 // result: (MOVQstoreidx1 [i-4] {s} p idx w0 mem) 5639 for { 5640 i := v.AuxInt 5641 s := v.Aux 5642 p := v.Args[0] 5643 idx := v.Args[1] 5644 v_2 := v.Args[2] 5645 if v_2.Op != OpAMD64SHRQconst { 5646 break 5647 } 5648 j := v_2.AuxInt 5649 w := v_2.Args[0] 5650 x := v.Args[3] 5651 if x.Op != OpAMD64MOVLstoreidx1 { 5652 break 5653 } 5654 if x.AuxInt != i-4 { 5655 break 5656 } 5657 if x.Aux != s { 5658 break 5659 } 5660 if p != x.Args[0] { 5661 break 5662 } 5663 if idx != x.Args[1] { 5664 break 5665 } 5666 w0 := x.Args[2] 5667 if w0.Op != OpAMD64SHRQconst { 5668 break 5669 } 5670 if w0.AuxInt != j-32 { 5671 break 5672 } 5673 if w != w0.Args[0] { 5674 break 5675 } 5676 mem := x.Args[3] 5677 if !(x.Uses == 1 && clobber(x)) { 5678 break 5679 } 5680 v.reset(OpAMD64MOVQstoreidx1) 5681 v.AuxInt = i - 4 5682 v.Aux = s 5683 v.AddArg(p) 5684 v.AddArg(idx) 5685 v.AddArg(w0) 5686 v.AddArg(mem) 5687 return true 5688 } 5689 return false 5690 } 5691 func rewriteValueAMD64_OpAMD64MOVLstoreidx4(v *Value, config *Config) bool { 5692 b := v.Block 5693 _ = b 5694 // match: (MOVLstoreidx4 [c] {sym} (ADDQconst [d] ptr) idx val mem) 5695 // cond: 5696 // result: (MOVLstoreidx4 [c+d] {sym} ptr idx val mem) 5697 for { 5698 c := v.AuxInt 5699 sym := v.Aux 5700 v_0 := v.Args[0] 5701 if v_0.Op != OpAMD64ADDQconst { 5702 break 5703 } 5704 d := v_0.AuxInt 5705 ptr := v_0.Args[0] 5706 idx := v.Args[1] 5707 val := v.Args[2] 5708 mem := v.Args[3] 5709 v.reset(OpAMD64MOVLstoreidx4) 5710 v.AuxInt = c + d 5711 v.Aux = sym 5712 v.AddArg(ptr) 5713 v.AddArg(idx) 5714 v.AddArg(val) 5715 v.AddArg(mem) 5716 return true 5717 } 5718 // match: (MOVLstoreidx4 [c] {sym} ptr (ADDQconst [d] idx) val mem) 5719 // cond: 5720 // result: (MOVLstoreidx4 [c+4*d] {sym} ptr idx val mem) 5721 for { 5722 c := v.AuxInt 5723 sym := v.Aux 5724 ptr := v.Args[0] 5725 v_1 := v.Args[1] 5726 if v_1.Op != OpAMD64ADDQconst { 5727 break 5728 } 5729 d := v_1.AuxInt 5730 idx := v_1.Args[0] 5731 val := v.Args[2] 5732 mem := v.Args[3] 5733 v.reset(OpAMD64MOVLstoreidx4) 5734 v.AuxInt = c + 4*d 5735 v.Aux = sym 5736 v.AddArg(ptr) 5737 v.AddArg(idx) 5738 v.AddArg(val) 5739 v.AddArg(mem) 5740 return true 5741 } 5742 // match: (MOVLstoreidx4 [i] {s} p idx (SHRQconst [32] w) x:(MOVLstoreidx4 [i-4] {s} p idx w mem)) 5743 // cond: x.Uses == 1 && clobber(x) 5744 // result: (MOVQstoreidx1 [i-4] {s} p (SHLQconst <idx.Type> [2] idx) w mem) 5745 for { 5746 i := v.AuxInt 5747 s := v.Aux 5748 p := v.Args[0] 5749 idx := v.Args[1] 5750 v_2 := v.Args[2] 5751 if v_2.Op != OpAMD64SHRQconst { 5752 break 5753 } 5754 if v_2.AuxInt != 32 { 5755 break 5756 } 5757 w := v_2.Args[0] 5758 x := v.Args[3] 5759 if x.Op != OpAMD64MOVLstoreidx4 { 5760 break 5761 } 5762 if x.AuxInt != i-4 { 5763 break 5764 } 5765 if x.Aux != s { 5766 break 5767 } 5768 if p != x.Args[0] { 5769 break 5770 } 5771 if idx != x.Args[1] { 5772 break 5773 } 5774 if w != x.Args[2] { 5775 break 5776 } 5777 mem := x.Args[3] 5778 if !(x.Uses == 1 && clobber(x)) { 5779 break 5780 } 5781 v.reset(OpAMD64MOVQstoreidx1) 5782 v.AuxInt = i - 4 5783 v.Aux = s 5784 v.AddArg(p) 5785 v0 := b.NewValue0(v.Line, OpAMD64SHLQconst, idx.Type) 5786 v0.AuxInt = 2 5787 v0.AddArg(idx) 5788 v.AddArg(v0) 5789 v.AddArg(w) 5790 v.AddArg(mem) 5791 return true 5792 } 5793 // match: (MOVLstoreidx4 [i] {s} p idx (SHRQconst [j] w) x:(MOVLstoreidx4 [i-4] {s} p idx w0:(SHRQconst [j-32] w) mem)) 5794 // cond: x.Uses == 1 && clobber(x) 5795 // result: (MOVQstoreidx1 [i-4] {s} p (SHLQconst <idx.Type> [2] idx) w0 mem) 5796 for { 5797 i := v.AuxInt 5798 s := v.Aux 5799 p := v.Args[0] 5800 idx := v.Args[1] 5801 v_2 := v.Args[2] 5802 if v_2.Op != OpAMD64SHRQconst { 5803 break 5804 } 5805 j := v_2.AuxInt 5806 w := v_2.Args[0] 5807 x := v.Args[3] 5808 if x.Op != OpAMD64MOVLstoreidx4 { 5809 break 5810 } 5811 if x.AuxInt != i-4 { 5812 break 5813 } 5814 if x.Aux != s { 5815 break 5816 } 5817 if p != x.Args[0] { 5818 break 5819 } 5820 if idx != x.Args[1] { 5821 break 5822 } 5823 w0 := x.Args[2] 5824 if w0.Op != OpAMD64SHRQconst { 5825 break 5826 } 5827 if w0.AuxInt != j-32 { 5828 break 5829 } 5830 if w != w0.Args[0] { 5831 break 5832 } 5833 mem := x.Args[3] 5834 if !(x.Uses == 1 && clobber(x)) { 5835 break 5836 } 5837 v.reset(OpAMD64MOVQstoreidx1) 5838 v.AuxInt = i - 4 5839 v.Aux = s 5840 v.AddArg(p) 5841 v0 := b.NewValue0(v.Line, OpAMD64SHLQconst, idx.Type) 5842 v0.AuxInt = 2 5843 v0.AddArg(idx) 5844 v.AddArg(v0) 5845 v.AddArg(w0) 5846 v.AddArg(mem) 5847 return true 5848 } 5849 return false 5850 } 5851 func rewriteValueAMD64_OpAMD64MOVOload(v *Value, config *Config) bool { 5852 b := v.Block 5853 _ = b 5854 // match: (MOVOload [off1] {sym} (ADDQconst [off2] ptr) mem) 5855 // cond: is32Bit(off1+off2) 5856 // result: (MOVOload [off1+off2] {sym} ptr mem) 5857 for { 5858 off1 := v.AuxInt 5859 sym := v.Aux 5860 v_0 := v.Args[0] 5861 if v_0.Op != OpAMD64ADDQconst { 5862 break 5863 } 5864 off2 := v_0.AuxInt 5865 ptr := v_0.Args[0] 5866 mem := v.Args[1] 5867 if !(is32Bit(off1 + off2)) { 5868 break 5869 } 5870 v.reset(OpAMD64MOVOload) 5871 v.AuxInt = off1 + off2 5872 v.Aux = sym 5873 v.AddArg(ptr) 5874 v.AddArg(mem) 5875 return true 5876 } 5877 // match: (MOVOload [off1] {sym1} (LEAQ [off2] {sym2} base) mem) 5878 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 5879 // result: (MOVOload [off1+off2] {mergeSym(sym1,sym2)} base mem) 5880 for { 5881 off1 := v.AuxInt 5882 sym1 := v.Aux 5883 v_0 := v.Args[0] 5884 if v_0.Op != OpAMD64LEAQ { 5885 break 5886 } 5887 off2 := v_0.AuxInt 5888 sym2 := v_0.Aux 5889 base := v_0.Args[0] 5890 mem := v.Args[1] 5891 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 5892 break 5893 } 5894 v.reset(OpAMD64MOVOload) 5895 v.AuxInt = off1 + off2 5896 v.Aux = mergeSym(sym1, sym2) 5897 v.AddArg(base) 5898 v.AddArg(mem) 5899 return true 5900 } 5901 return false 5902 } 5903 func rewriteValueAMD64_OpAMD64MOVOstore(v *Value, config *Config) bool { 5904 b := v.Block 5905 _ = b 5906 // match: (MOVOstore [off1] {sym} (ADDQconst [off2] ptr) val mem) 5907 // cond: is32Bit(off1+off2) 5908 // result: (MOVOstore [off1+off2] {sym} ptr val mem) 5909 for { 5910 off1 := v.AuxInt 5911 sym := v.Aux 5912 v_0 := v.Args[0] 5913 if v_0.Op != OpAMD64ADDQconst { 5914 break 5915 } 5916 off2 := v_0.AuxInt 5917 ptr := v_0.Args[0] 5918 val := v.Args[1] 5919 mem := v.Args[2] 5920 if !(is32Bit(off1 + off2)) { 5921 break 5922 } 5923 v.reset(OpAMD64MOVOstore) 5924 v.AuxInt = off1 + off2 5925 v.Aux = sym 5926 v.AddArg(ptr) 5927 v.AddArg(val) 5928 v.AddArg(mem) 5929 return true 5930 } 5931 // match: (MOVOstore [off1] {sym1} (LEAQ [off2] {sym2} base) val mem) 5932 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 5933 // result: (MOVOstore [off1+off2] {mergeSym(sym1,sym2)} base val mem) 5934 for { 5935 off1 := v.AuxInt 5936 sym1 := v.Aux 5937 v_0 := v.Args[0] 5938 if v_0.Op != OpAMD64LEAQ { 5939 break 5940 } 5941 off2 := v_0.AuxInt 5942 sym2 := v_0.Aux 5943 base := v_0.Args[0] 5944 val := v.Args[1] 5945 mem := v.Args[2] 5946 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 5947 break 5948 } 5949 v.reset(OpAMD64MOVOstore) 5950 v.AuxInt = off1 + off2 5951 v.Aux = mergeSym(sym1, sym2) 5952 v.AddArg(base) 5953 v.AddArg(val) 5954 v.AddArg(mem) 5955 return true 5956 } 5957 return false 5958 } 5959 func rewriteValueAMD64_OpAMD64MOVQatomicload(v *Value, config *Config) bool { 5960 b := v.Block 5961 _ = b 5962 // match: (MOVQatomicload [off1] {sym} (ADDQconst [off2] ptr) mem) 5963 // cond: is32Bit(off1+off2) 5964 // result: (MOVQatomicload [off1+off2] {sym} ptr mem) 5965 for { 5966 off1 := v.AuxInt 5967 sym := v.Aux 5968 v_0 := v.Args[0] 5969 if v_0.Op != OpAMD64ADDQconst { 5970 break 5971 } 5972 off2 := v_0.AuxInt 5973 ptr := v_0.Args[0] 5974 mem := v.Args[1] 5975 if !(is32Bit(off1 + off2)) { 5976 break 5977 } 5978 v.reset(OpAMD64MOVQatomicload) 5979 v.AuxInt = off1 + off2 5980 v.Aux = sym 5981 v.AddArg(ptr) 5982 v.AddArg(mem) 5983 return true 5984 } 5985 // match: (MOVQatomicload [off1] {sym1} (LEAQ [off2] {sym2} ptr) mem) 5986 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 5987 // result: (MOVQatomicload [off1+off2] {mergeSym(sym1,sym2)} ptr mem) 5988 for { 5989 off1 := v.AuxInt 5990 sym1 := v.Aux 5991 v_0 := v.Args[0] 5992 if v_0.Op != OpAMD64LEAQ { 5993 break 5994 } 5995 off2 := v_0.AuxInt 5996 sym2 := v_0.Aux 5997 ptr := v_0.Args[0] 5998 mem := v.Args[1] 5999 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 6000 break 6001 } 6002 v.reset(OpAMD64MOVQatomicload) 6003 v.AuxInt = off1 + off2 6004 v.Aux = mergeSym(sym1, sym2) 6005 v.AddArg(ptr) 6006 v.AddArg(mem) 6007 return true 6008 } 6009 return false 6010 } 6011 func rewriteValueAMD64_OpAMD64MOVQload(v *Value, config *Config) bool { 6012 b := v.Block 6013 _ = b 6014 // match: (MOVQload [off] {sym} ptr (MOVQstore [off2] {sym2} ptr2 x _)) 6015 // cond: sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) 6016 // result: x 6017 for { 6018 off := v.AuxInt 6019 sym := v.Aux 6020 ptr := v.Args[0] 6021 v_1 := v.Args[1] 6022 if v_1.Op != OpAMD64MOVQstore { 6023 break 6024 } 6025 off2 := v_1.AuxInt 6026 sym2 := v_1.Aux 6027 ptr2 := v_1.Args[0] 6028 x := v_1.Args[1] 6029 if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) { 6030 break 6031 } 6032 v.reset(OpCopy) 6033 v.Type = x.Type 6034 v.AddArg(x) 6035 return true 6036 } 6037 // match: (MOVQload [off1] {sym} (ADDQconst [off2] ptr) mem) 6038 // cond: is32Bit(off1+off2) 6039 // result: (MOVQload [off1+off2] {sym} ptr mem) 6040 for { 6041 off1 := v.AuxInt 6042 sym := v.Aux 6043 v_0 := v.Args[0] 6044 if v_0.Op != OpAMD64ADDQconst { 6045 break 6046 } 6047 off2 := v_0.AuxInt 6048 ptr := v_0.Args[0] 6049 mem := v.Args[1] 6050 if !(is32Bit(off1 + off2)) { 6051 break 6052 } 6053 v.reset(OpAMD64MOVQload) 6054 v.AuxInt = off1 + off2 6055 v.Aux = sym 6056 v.AddArg(ptr) 6057 v.AddArg(mem) 6058 return true 6059 } 6060 // match: (MOVQload [off1] {sym1} (LEAQ [off2] {sym2} base) mem) 6061 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 6062 // result: (MOVQload [off1+off2] {mergeSym(sym1,sym2)} base mem) 6063 for { 6064 off1 := v.AuxInt 6065 sym1 := v.Aux 6066 v_0 := v.Args[0] 6067 if v_0.Op != OpAMD64LEAQ { 6068 break 6069 } 6070 off2 := v_0.AuxInt 6071 sym2 := v_0.Aux 6072 base := v_0.Args[0] 6073 mem := v.Args[1] 6074 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 6075 break 6076 } 6077 v.reset(OpAMD64MOVQload) 6078 v.AuxInt = off1 + off2 6079 v.Aux = mergeSym(sym1, sym2) 6080 v.AddArg(base) 6081 v.AddArg(mem) 6082 return true 6083 } 6084 // match: (MOVQload [off1] {sym1} (LEAQ1 [off2] {sym2} ptr idx) mem) 6085 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 6086 // result: (MOVQloadidx1 [off1+off2] {mergeSym(sym1,sym2)} ptr idx mem) 6087 for { 6088 off1 := v.AuxInt 6089 sym1 := v.Aux 6090 v_0 := v.Args[0] 6091 if v_0.Op != OpAMD64LEAQ1 { 6092 break 6093 } 6094 off2 := v_0.AuxInt 6095 sym2 := v_0.Aux 6096 ptr := v_0.Args[0] 6097 idx := v_0.Args[1] 6098 mem := v.Args[1] 6099 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 6100 break 6101 } 6102 v.reset(OpAMD64MOVQloadidx1) 6103 v.AuxInt = off1 + off2 6104 v.Aux = mergeSym(sym1, sym2) 6105 v.AddArg(ptr) 6106 v.AddArg(idx) 6107 v.AddArg(mem) 6108 return true 6109 } 6110 // match: (MOVQload [off1] {sym1} (LEAQ8 [off2] {sym2} ptr idx) mem) 6111 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 6112 // result: (MOVQloadidx8 [off1+off2] {mergeSym(sym1,sym2)} ptr idx mem) 6113 for { 6114 off1 := v.AuxInt 6115 sym1 := v.Aux 6116 v_0 := v.Args[0] 6117 if v_0.Op != OpAMD64LEAQ8 { 6118 break 6119 } 6120 off2 := v_0.AuxInt 6121 sym2 := v_0.Aux 6122 ptr := v_0.Args[0] 6123 idx := v_0.Args[1] 6124 mem := v.Args[1] 6125 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 6126 break 6127 } 6128 v.reset(OpAMD64MOVQloadidx8) 6129 v.AuxInt = off1 + off2 6130 v.Aux = mergeSym(sym1, sym2) 6131 v.AddArg(ptr) 6132 v.AddArg(idx) 6133 v.AddArg(mem) 6134 return true 6135 } 6136 // match: (MOVQload [off] {sym} (ADDQ ptr idx) mem) 6137 // cond: ptr.Op != OpSB 6138 // result: (MOVQloadidx1 [off] {sym} ptr idx mem) 6139 for { 6140 off := v.AuxInt 6141 sym := v.Aux 6142 v_0 := v.Args[0] 6143 if v_0.Op != OpAMD64ADDQ { 6144 break 6145 } 6146 ptr := v_0.Args[0] 6147 idx := v_0.Args[1] 6148 mem := v.Args[1] 6149 if !(ptr.Op != OpSB) { 6150 break 6151 } 6152 v.reset(OpAMD64MOVQloadidx1) 6153 v.AuxInt = off 6154 v.Aux = sym 6155 v.AddArg(ptr) 6156 v.AddArg(idx) 6157 v.AddArg(mem) 6158 return true 6159 } 6160 // match: (MOVQload [off1] {sym1} (LEAL [off2] {sym2} base) mem) 6161 // cond: canMergeSym(sym1, sym2) 6162 // result: (MOVQload [off1+off2] {mergeSym(sym1,sym2)} base mem) 6163 for { 6164 off1 := v.AuxInt 6165 sym1 := v.Aux 6166 v_0 := v.Args[0] 6167 if v_0.Op != OpAMD64LEAL { 6168 break 6169 } 6170 off2 := v_0.AuxInt 6171 sym2 := v_0.Aux 6172 base := v_0.Args[0] 6173 mem := v.Args[1] 6174 if !(canMergeSym(sym1, sym2)) { 6175 break 6176 } 6177 v.reset(OpAMD64MOVQload) 6178 v.AuxInt = off1 + off2 6179 v.Aux = mergeSym(sym1, sym2) 6180 v.AddArg(base) 6181 v.AddArg(mem) 6182 return true 6183 } 6184 // match: (MOVQload [off1] {sym} (ADDLconst [off2] ptr) mem) 6185 // cond: is32Bit(off1+off2) 6186 // result: (MOVQload [off1+off2] {sym} ptr mem) 6187 for { 6188 off1 := v.AuxInt 6189 sym := v.Aux 6190 v_0 := v.Args[0] 6191 if v_0.Op != OpAMD64ADDLconst { 6192 break 6193 } 6194 off2 := v_0.AuxInt 6195 ptr := v_0.Args[0] 6196 mem := v.Args[1] 6197 if !(is32Bit(off1 + off2)) { 6198 break 6199 } 6200 v.reset(OpAMD64MOVQload) 6201 v.AuxInt = off1 + off2 6202 v.Aux = sym 6203 v.AddArg(ptr) 6204 v.AddArg(mem) 6205 return true 6206 } 6207 return false 6208 } 6209 func rewriteValueAMD64_OpAMD64MOVQloadidx1(v *Value, config *Config) bool { 6210 b := v.Block 6211 _ = b 6212 // match: (MOVQloadidx1 [c] {sym} ptr (SHLQconst [3] idx) mem) 6213 // cond: 6214 // result: (MOVQloadidx8 [c] {sym} ptr idx mem) 6215 for { 6216 c := v.AuxInt 6217 sym := v.Aux 6218 ptr := v.Args[0] 6219 v_1 := v.Args[1] 6220 if v_1.Op != OpAMD64SHLQconst { 6221 break 6222 } 6223 if v_1.AuxInt != 3 { 6224 break 6225 } 6226 idx := v_1.Args[0] 6227 mem := v.Args[2] 6228 v.reset(OpAMD64MOVQloadidx8) 6229 v.AuxInt = c 6230 v.Aux = sym 6231 v.AddArg(ptr) 6232 v.AddArg(idx) 6233 v.AddArg(mem) 6234 return true 6235 } 6236 // match: (MOVQloadidx1 [c] {sym} (ADDQconst [d] ptr) idx mem) 6237 // cond: 6238 // result: (MOVQloadidx1 [c+d] {sym} ptr idx mem) 6239 for { 6240 c := v.AuxInt 6241 sym := v.Aux 6242 v_0 := v.Args[0] 6243 if v_0.Op != OpAMD64ADDQconst { 6244 break 6245 } 6246 d := v_0.AuxInt 6247 ptr := v_0.Args[0] 6248 idx := v.Args[1] 6249 mem := v.Args[2] 6250 v.reset(OpAMD64MOVQloadidx1) 6251 v.AuxInt = c + d 6252 v.Aux = sym 6253 v.AddArg(ptr) 6254 v.AddArg(idx) 6255 v.AddArg(mem) 6256 return true 6257 } 6258 // match: (MOVQloadidx1 [c] {sym} ptr (ADDQconst [d] idx) mem) 6259 // cond: 6260 // result: (MOVQloadidx1 [c+d] {sym} ptr idx mem) 6261 for { 6262 c := v.AuxInt 6263 sym := v.Aux 6264 ptr := v.Args[0] 6265 v_1 := v.Args[1] 6266 if v_1.Op != OpAMD64ADDQconst { 6267 break 6268 } 6269 d := v_1.AuxInt 6270 idx := v_1.Args[0] 6271 mem := v.Args[2] 6272 v.reset(OpAMD64MOVQloadidx1) 6273 v.AuxInt = c + d 6274 v.Aux = sym 6275 v.AddArg(ptr) 6276 v.AddArg(idx) 6277 v.AddArg(mem) 6278 return true 6279 } 6280 return false 6281 } 6282 func rewriteValueAMD64_OpAMD64MOVQloadidx8(v *Value, config *Config) bool { 6283 b := v.Block 6284 _ = b 6285 // match: (MOVQloadidx8 [c] {sym} (ADDQconst [d] ptr) idx mem) 6286 // cond: 6287 // result: (MOVQloadidx8 [c+d] {sym} ptr idx mem) 6288 for { 6289 c := v.AuxInt 6290 sym := v.Aux 6291 v_0 := v.Args[0] 6292 if v_0.Op != OpAMD64ADDQconst { 6293 break 6294 } 6295 d := v_0.AuxInt 6296 ptr := v_0.Args[0] 6297 idx := v.Args[1] 6298 mem := v.Args[2] 6299 v.reset(OpAMD64MOVQloadidx8) 6300 v.AuxInt = c + d 6301 v.Aux = sym 6302 v.AddArg(ptr) 6303 v.AddArg(idx) 6304 v.AddArg(mem) 6305 return true 6306 } 6307 // match: (MOVQloadidx8 [c] {sym} ptr (ADDQconst [d] idx) mem) 6308 // cond: 6309 // result: (MOVQloadidx8 [c+8*d] {sym} ptr idx mem) 6310 for { 6311 c := v.AuxInt 6312 sym := v.Aux 6313 ptr := v.Args[0] 6314 v_1 := v.Args[1] 6315 if v_1.Op != OpAMD64ADDQconst { 6316 break 6317 } 6318 d := v_1.AuxInt 6319 idx := v_1.Args[0] 6320 mem := v.Args[2] 6321 v.reset(OpAMD64MOVQloadidx8) 6322 v.AuxInt = c + 8*d 6323 v.Aux = sym 6324 v.AddArg(ptr) 6325 v.AddArg(idx) 6326 v.AddArg(mem) 6327 return true 6328 } 6329 return false 6330 } 6331 func rewriteValueAMD64_OpAMD64MOVQstore(v *Value, config *Config) bool { 6332 b := v.Block 6333 _ = b 6334 // match: (MOVQstore [off1] {sym} (ADDQconst [off2] ptr) val mem) 6335 // cond: is32Bit(off1+off2) 6336 // result: (MOVQstore [off1+off2] {sym} ptr val mem) 6337 for { 6338 off1 := v.AuxInt 6339 sym := v.Aux 6340 v_0 := v.Args[0] 6341 if v_0.Op != OpAMD64ADDQconst { 6342 break 6343 } 6344 off2 := v_0.AuxInt 6345 ptr := v_0.Args[0] 6346 val := v.Args[1] 6347 mem := v.Args[2] 6348 if !(is32Bit(off1 + off2)) { 6349 break 6350 } 6351 v.reset(OpAMD64MOVQstore) 6352 v.AuxInt = off1 + off2 6353 v.Aux = sym 6354 v.AddArg(ptr) 6355 v.AddArg(val) 6356 v.AddArg(mem) 6357 return true 6358 } 6359 // match: (MOVQstore [off] {sym} ptr (MOVQconst [c]) mem) 6360 // cond: validValAndOff(c,off) 6361 // result: (MOVQstoreconst [makeValAndOff(c,off)] {sym} ptr mem) 6362 for { 6363 off := v.AuxInt 6364 sym := v.Aux 6365 ptr := v.Args[0] 6366 v_1 := v.Args[1] 6367 if v_1.Op != OpAMD64MOVQconst { 6368 break 6369 } 6370 c := v_1.AuxInt 6371 mem := v.Args[2] 6372 if !(validValAndOff(c, off)) { 6373 break 6374 } 6375 v.reset(OpAMD64MOVQstoreconst) 6376 v.AuxInt = makeValAndOff(c, off) 6377 v.Aux = sym 6378 v.AddArg(ptr) 6379 v.AddArg(mem) 6380 return true 6381 } 6382 // match: (MOVQstore [off1] {sym1} (LEAQ [off2] {sym2} base) val mem) 6383 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 6384 // result: (MOVQstore [off1+off2] {mergeSym(sym1,sym2)} base val mem) 6385 for { 6386 off1 := v.AuxInt 6387 sym1 := v.Aux 6388 v_0 := v.Args[0] 6389 if v_0.Op != OpAMD64LEAQ { 6390 break 6391 } 6392 off2 := v_0.AuxInt 6393 sym2 := v_0.Aux 6394 base := v_0.Args[0] 6395 val := v.Args[1] 6396 mem := v.Args[2] 6397 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 6398 break 6399 } 6400 v.reset(OpAMD64MOVQstore) 6401 v.AuxInt = off1 + off2 6402 v.Aux = mergeSym(sym1, sym2) 6403 v.AddArg(base) 6404 v.AddArg(val) 6405 v.AddArg(mem) 6406 return true 6407 } 6408 // match: (MOVQstore [off1] {sym1} (LEAQ1 [off2] {sym2} ptr idx) val mem) 6409 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 6410 // result: (MOVQstoreidx1 [off1+off2] {mergeSym(sym1,sym2)} ptr idx val mem) 6411 for { 6412 off1 := v.AuxInt 6413 sym1 := v.Aux 6414 v_0 := v.Args[0] 6415 if v_0.Op != OpAMD64LEAQ1 { 6416 break 6417 } 6418 off2 := v_0.AuxInt 6419 sym2 := v_0.Aux 6420 ptr := v_0.Args[0] 6421 idx := v_0.Args[1] 6422 val := v.Args[1] 6423 mem := v.Args[2] 6424 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 6425 break 6426 } 6427 v.reset(OpAMD64MOVQstoreidx1) 6428 v.AuxInt = off1 + off2 6429 v.Aux = mergeSym(sym1, sym2) 6430 v.AddArg(ptr) 6431 v.AddArg(idx) 6432 v.AddArg(val) 6433 v.AddArg(mem) 6434 return true 6435 } 6436 // match: (MOVQstore [off1] {sym1} (LEAQ8 [off2] {sym2} ptr idx) val mem) 6437 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 6438 // result: (MOVQstoreidx8 [off1+off2] {mergeSym(sym1,sym2)} ptr idx val mem) 6439 for { 6440 off1 := v.AuxInt 6441 sym1 := v.Aux 6442 v_0 := v.Args[0] 6443 if v_0.Op != OpAMD64LEAQ8 { 6444 break 6445 } 6446 off2 := v_0.AuxInt 6447 sym2 := v_0.Aux 6448 ptr := v_0.Args[0] 6449 idx := v_0.Args[1] 6450 val := v.Args[1] 6451 mem := v.Args[2] 6452 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 6453 break 6454 } 6455 v.reset(OpAMD64MOVQstoreidx8) 6456 v.AuxInt = off1 + off2 6457 v.Aux = mergeSym(sym1, sym2) 6458 v.AddArg(ptr) 6459 v.AddArg(idx) 6460 v.AddArg(val) 6461 v.AddArg(mem) 6462 return true 6463 } 6464 // match: (MOVQstore [off] {sym} (ADDQ ptr idx) val mem) 6465 // cond: ptr.Op != OpSB 6466 // result: (MOVQstoreidx1 [off] {sym} ptr idx val mem) 6467 for { 6468 off := v.AuxInt 6469 sym := v.Aux 6470 v_0 := v.Args[0] 6471 if v_0.Op != OpAMD64ADDQ { 6472 break 6473 } 6474 ptr := v_0.Args[0] 6475 idx := v_0.Args[1] 6476 val := v.Args[1] 6477 mem := v.Args[2] 6478 if !(ptr.Op != OpSB) { 6479 break 6480 } 6481 v.reset(OpAMD64MOVQstoreidx1) 6482 v.AuxInt = off 6483 v.Aux = sym 6484 v.AddArg(ptr) 6485 v.AddArg(idx) 6486 v.AddArg(val) 6487 v.AddArg(mem) 6488 return true 6489 } 6490 // match: (MOVQstore [off1] {sym1} (LEAL [off2] {sym2} base) val mem) 6491 // cond: canMergeSym(sym1, sym2) 6492 // result: (MOVQstore [off1+off2] {mergeSym(sym1,sym2)} base val mem) 6493 for { 6494 off1 := v.AuxInt 6495 sym1 := v.Aux 6496 v_0 := v.Args[0] 6497 if v_0.Op != OpAMD64LEAL { 6498 break 6499 } 6500 off2 := v_0.AuxInt 6501 sym2 := v_0.Aux 6502 base := v_0.Args[0] 6503 val := v.Args[1] 6504 mem := v.Args[2] 6505 if !(canMergeSym(sym1, sym2)) { 6506 break 6507 } 6508 v.reset(OpAMD64MOVQstore) 6509 v.AuxInt = off1 + off2 6510 v.Aux = mergeSym(sym1, sym2) 6511 v.AddArg(base) 6512 v.AddArg(val) 6513 v.AddArg(mem) 6514 return true 6515 } 6516 // match: (MOVQstore [off1] {sym} (ADDLconst [off2] ptr) val mem) 6517 // cond: is32Bit(off1+off2) 6518 // result: (MOVQstore [off1+off2] {sym} ptr val mem) 6519 for { 6520 off1 := v.AuxInt 6521 sym := v.Aux 6522 v_0 := v.Args[0] 6523 if v_0.Op != OpAMD64ADDLconst { 6524 break 6525 } 6526 off2 := v_0.AuxInt 6527 ptr := v_0.Args[0] 6528 val := v.Args[1] 6529 mem := v.Args[2] 6530 if !(is32Bit(off1 + off2)) { 6531 break 6532 } 6533 v.reset(OpAMD64MOVQstore) 6534 v.AuxInt = off1 + off2 6535 v.Aux = sym 6536 v.AddArg(ptr) 6537 v.AddArg(val) 6538 v.AddArg(mem) 6539 return true 6540 } 6541 return false 6542 } 6543 func rewriteValueAMD64_OpAMD64MOVQstoreconst(v *Value, config *Config) bool { 6544 b := v.Block 6545 _ = b 6546 // match: (MOVQstoreconst [sc] {s} (ADDQconst [off] ptr) mem) 6547 // cond: ValAndOff(sc).canAdd(off) 6548 // result: (MOVQstoreconst [ValAndOff(sc).add(off)] {s} ptr mem) 6549 for { 6550 sc := v.AuxInt 6551 s := v.Aux 6552 v_0 := v.Args[0] 6553 if v_0.Op != OpAMD64ADDQconst { 6554 break 6555 } 6556 off := v_0.AuxInt 6557 ptr := v_0.Args[0] 6558 mem := v.Args[1] 6559 if !(ValAndOff(sc).canAdd(off)) { 6560 break 6561 } 6562 v.reset(OpAMD64MOVQstoreconst) 6563 v.AuxInt = ValAndOff(sc).add(off) 6564 v.Aux = s 6565 v.AddArg(ptr) 6566 v.AddArg(mem) 6567 return true 6568 } 6569 // match: (MOVQstoreconst [sc] {sym1} (LEAQ [off] {sym2} ptr) mem) 6570 // cond: canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off) 6571 // result: (MOVQstoreconst [ValAndOff(sc).add(off)] {mergeSym(sym1, sym2)} ptr mem) 6572 for { 6573 sc := v.AuxInt 6574 sym1 := v.Aux 6575 v_0 := v.Args[0] 6576 if v_0.Op != OpAMD64LEAQ { 6577 break 6578 } 6579 off := v_0.AuxInt 6580 sym2 := v_0.Aux 6581 ptr := v_0.Args[0] 6582 mem := v.Args[1] 6583 if !(canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off)) { 6584 break 6585 } 6586 v.reset(OpAMD64MOVQstoreconst) 6587 v.AuxInt = ValAndOff(sc).add(off) 6588 v.Aux = mergeSym(sym1, sym2) 6589 v.AddArg(ptr) 6590 v.AddArg(mem) 6591 return true 6592 } 6593 // match: (MOVQstoreconst [x] {sym1} (LEAQ1 [off] {sym2} ptr idx) mem) 6594 // cond: canMergeSym(sym1, sym2) 6595 // result: (MOVQstoreconstidx1 [ValAndOff(x).add(off)] {mergeSym(sym1,sym2)} ptr idx mem) 6596 for { 6597 x := v.AuxInt 6598 sym1 := v.Aux 6599 v_0 := v.Args[0] 6600 if v_0.Op != OpAMD64LEAQ1 { 6601 break 6602 } 6603 off := v_0.AuxInt 6604 sym2 := v_0.Aux 6605 ptr := v_0.Args[0] 6606 idx := v_0.Args[1] 6607 mem := v.Args[1] 6608 if !(canMergeSym(sym1, sym2)) { 6609 break 6610 } 6611 v.reset(OpAMD64MOVQstoreconstidx1) 6612 v.AuxInt = ValAndOff(x).add(off) 6613 v.Aux = mergeSym(sym1, sym2) 6614 v.AddArg(ptr) 6615 v.AddArg(idx) 6616 v.AddArg(mem) 6617 return true 6618 } 6619 // match: (MOVQstoreconst [x] {sym1} (LEAQ8 [off] {sym2} ptr idx) mem) 6620 // cond: canMergeSym(sym1, sym2) 6621 // result: (MOVQstoreconstidx8 [ValAndOff(x).add(off)] {mergeSym(sym1,sym2)} ptr idx mem) 6622 for { 6623 x := v.AuxInt 6624 sym1 := v.Aux 6625 v_0 := v.Args[0] 6626 if v_0.Op != OpAMD64LEAQ8 { 6627 break 6628 } 6629 off := v_0.AuxInt 6630 sym2 := v_0.Aux 6631 ptr := v_0.Args[0] 6632 idx := v_0.Args[1] 6633 mem := v.Args[1] 6634 if !(canMergeSym(sym1, sym2)) { 6635 break 6636 } 6637 v.reset(OpAMD64MOVQstoreconstidx8) 6638 v.AuxInt = ValAndOff(x).add(off) 6639 v.Aux = mergeSym(sym1, sym2) 6640 v.AddArg(ptr) 6641 v.AddArg(idx) 6642 v.AddArg(mem) 6643 return true 6644 } 6645 // match: (MOVQstoreconst [x] {sym} (ADDQ ptr idx) mem) 6646 // cond: 6647 // result: (MOVQstoreconstidx1 [x] {sym} ptr idx mem) 6648 for { 6649 x := v.AuxInt 6650 sym := v.Aux 6651 v_0 := v.Args[0] 6652 if v_0.Op != OpAMD64ADDQ { 6653 break 6654 } 6655 ptr := v_0.Args[0] 6656 idx := v_0.Args[1] 6657 mem := v.Args[1] 6658 v.reset(OpAMD64MOVQstoreconstidx1) 6659 v.AuxInt = x 6660 v.Aux = sym 6661 v.AddArg(ptr) 6662 v.AddArg(idx) 6663 v.AddArg(mem) 6664 return true 6665 } 6666 // match: (MOVQstoreconst [sc] {sym1} (LEAL [off] {sym2} ptr) mem) 6667 // cond: canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off) 6668 // result: (MOVQstoreconst [ValAndOff(sc).add(off)] {mergeSym(sym1, sym2)} ptr mem) 6669 for { 6670 sc := v.AuxInt 6671 sym1 := v.Aux 6672 v_0 := v.Args[0] 6673 if v_0.Op != OpAMD64LEAL { 6674 break 6675 } 6676 off := v_0.AuxInt 6677 sym2 := v_0.Aux 6678 ptr := v_0.Args[0] 6679 mem := v.Args[1] 6680 if !(canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off)) { 6681 break 6682 } 6683 v.reset(OpAMD64MOVQstoreconst) 6684 v.AuxInt = ValAndOff(sc).add(off) 6685 v.Aux = mergeSym(sym1, sym2) 6686 v.AddArg(ptr) 6687 v.AddArg(mem) 6688 return true 6689 } 6690 // match: (MOVQstoreconst [sc] {s} (ADDLconst [off] ptr) mem) 6691 // cond: ValAndOff(sc).canAdd(off) 6692 // result: (MOVQstoreconst [ValAndOff(sc).add(off)] {s} ptr mem) 6693 for { 6694 sc := v.AuxInt 6695 s := v.Aux 6696 v_0 := v.Args[0] 6697 if v_0.Op != OpAMD64ADDLconst { 6698 break 6699 } 6700 off := v_0.AuxInt 6701 ptr := v_0.Args[0] 6702 mem := v.Args[1] 6703 if !(ValAndOff(sc).canAdd(off)) { 6704 break 6705 } 6706 v.reset(OpAMD64MOVQstoreconst) 6707 v.AuxInt = ValAndOff(sc).add(off) 6708 v.Aux = s 6709 v.AddArg(ptr) 6710 v.AddArg(mem) 6711 return true 6712 } 6713 return false 6714 } 6715 func rewriteValueAMD64_OpAMD64MOVQstoreconstidx1(v *Value, config *Config) bool { 6716 b := v.Block 6717 _ = b 6718 // match: (MOVQstoreconstidx1 [c] {sym} ptr (SHLQconst [3] idx) mem) 6719 // cond: 6720 // result: (MOVQstoreconstidx8 [c] {sym} ptr idx mem) 6721 for { 6722 c := v.AuxInt 6723 sym := v.Aux 6724 ptr := v.Args[0] 6725 v_1 := v.Args[1] 6726 if v_1.Op != OpAMD64SHLQconst { 6727 break 6728 } 6729 if v_1.AuxInt != 3 { 6730 break 6731 } 6732 idx := v_1.Args[0] 6733 mem := v.Args[2] 6734 v.reset(OpAMD64MOVQstoreconstidx8) 6735 v.AuxInt = c 6736 v.Aux = sym 6737 v.AddArg(ptr) 6738 v.AddArg(idx) 6739 v.AddArg(mem) 6740 return true 6741 } 6742 // match: (MOVQstoreconstidx1 [x] {sym} (ADDQconst [c] ptr) idx mem) 6743 // cond: 6744 // result: (MOVQstoreconstidx1 [ValAndOff(x).add(c)] {sym} ptr idx mem) 6745 for { 6746 x := v.AuxInt 6747 sym := v.Aux 6748 v_0 := v.Args[0] 6749 if v_0.Op != OpAMD64ADDQconst { 6750 break 6751 } 6752 c := v_0.AuxInt 6753 ptr := v_0.Args[0] 6754 idx := v.Args[1] 6755 mem := v.Args[2] 6756 v.reset(OpAMD64MOVQstoreconstidx1) 6757 v.AuxInt = ValAndOff(x).add(c) 6758 v.Aux = sym 6759 v.AddArg(ptr) 6760 v.AddArg(idx) 6761 v.AddArg(mem) 6762 return true 6763 } 6764 // match: (MOVQstoreconstidx1 [x] {sym} ptr (ADDQconst [c] idx) mem) 6765 // cond: 6766 // result: (MOVQstoreconstidx1 [ValAndOff(x).add(c)] {sym} ptr idx mem) 6767 for { 6768 x := v.AuxInt 6769 sym := v.Aux 6770 ptr := v.Args[0] 6771 v_1 := v.Args[1] 6772 if v_1.Op != OpAMD64ADDQconst { 6773 break 6774 } 6775 c := v_1.AuxInt 6776 idx := v_1.Args[0] 6777 mem := v.Args[2] 6778 v.reset(OpAMD64MOVQstoreconstidx1) 6779 v.AuxInt = ValAndOff(x).add(c) 6780 v.Aux = sym 6781 v.AddArg(ptr) 6782 v.AddArg(idx) 6783 v.AddArg(mem) 6784 return true 6785 } 6786 return false 6787 } 6788 func rewriteValueAMD64_OpAMD64MOVQstoreconstidx8(v *Value, config *Config) bool { 6789 b := v.Block 6790 _ = b 6791 // match: (MOVQstoreconstidx8 [x] {sym} (ADDQconst [c] ptr) idx mem) 6792 // cond: 6793 // result: (MOVQstoreconstidx8 [ValAndOff(x).add(c)] {sym} ptr idx mem) 6794 for { 6795 x := v.AuxInt 6796 sym := v.Aux 6797 v_0 := v.Args[0] 6798 if v_0.Op != OpAMD64ADDQconst { 6799 break 6800 } 6801 c := v_0.AuxInt 6802 ptr := v_0.Args[0] 6803 idx := v.Args[1] 6804 mem := v.Args[2] 6805 v.reset(OpAMD64MOVQstoreconstidx8) 6806 v.AuxInt = ValAndOff(x).add(c) 6807 v.Aux = sym 6808 v.AddArg(ptr) 6809 v.AddArg(idx) 6810 v.AddArg(mem) 6811 return true 6812 } 6813 // match: (MOVQstoreconstidx8 [x] {sym} ptr (ADDQconst [c] idx) mem) 6814 // cond: 6815 // result: (MOVQstoreconstidx8 [ValAndOff(x).add(8*c)] {sym} ptr idx mem) 6816 for { 6817 x := v.AuxInt 6818 sym := v.Aux 6819 ptr := v.Args[0] 6820 v_1 := v.Args[1] 6821 if v_1.Op != OpAMD64ADDQconst { 6822 break 6823 } 6824 c := v_1.AuxInt 6825 idx := v_1.Args[0] 6826 mem := v.Args[2] 6827 v.reset(OpAMD64MOVQstoreconstidx8) 6828 v.AuxInt = ValAndOff(x).add(8 * c) 6829 v.Aux = sym 6830 v.AddArg(ptr) 6831 v.AddArg(idx) 6832 v.AddArg(mem) 6833 return true 6834 } 6835 return false 6836 } 6837 func rewriteValueAMD64_OpAMD64MOVQstoreidx1(v *Value, config *Config) bool { 6838 b := v.Block 6839 _ = b 6840 // match: (MOVQstoreidx1 [c] {sym} ptr (SHLQconst [3] idx) val mem) 6841 // cond: 6842 // result: (MOVQstoreidx8 [c] {sym} ptr idx val mem) 6843 for { 6844 c := v.AuxInt 6845 sym := v.Aux 6846 ptr := v.Args[0] 6847 v_1 := v.Args[1] 6848 if v_1.Op != OpAMD64SHLQconst { 6849 break 6850 } 6851 if v_1.AuxInt != 3 { 6852 break 6853 } 6854 idx := v_1.Args[0] 6855 val := v.Args[2] 6856 mem := v.Args[3] 6857 v.reset(OpAMD64MOVQstoreidx8) 6858 v.AuxInt = c 6859 v.Aux = sym 6860 v.AddArg(ptr) 6861 v.AddArg(idx) 6862 v.AddArg(val) 6863 v.AddArg(mem) 6864 return true 6865 } 6866 // match: (MOVQstoreidx1 [c] {sym} (ADDQconst [d] ptr) idx val mem) 6867 // cond: 6868 // result: (MOVQstoreidx1 [c+d] {sym} ptr idx val mem) 6869 for { 6870 c := v.AuxInt 6871 sym := v.Aux 6872 v_0 := v.Args[0] 6873 if v_0.Op != OpAMD64ADDQconst { 6874 break 6875 } 6876 d := v_0.AuxInt 6877 ptr := v_0.Args[0] 6878 idx := v.Args[1] 6879 val := v.Args[2] 6880 mem := v.Args[3] 6881 v.reset(OpAMD64MOVQstoreidx1) 6882 v.AuxInt = c + d 6883 v.Aux = sym 6884 v.AddArg(ptr) 6885 v.AddArg(idx) 6886 v.AddArg(val) 6887 v.AddArg(mem) 6888 return true 6889 } 6890 // match: (MOVQstoreidx1 [c] {sym} ptr (ADDQconst [d] idx) val mem) 6891 // cond: 6892 // result: (MOVQstoreidx1 [c+d] {sym} ptr idx val mem) 6893 for { 6894 c := v.AuxInt 6895 sym := v.Aux 6896 ptr := v.Args[0] 6897 v_1 := v.Args[1] 6898 if v_1.Op != OpAMD64ADDQconst { 6899 break 6900 } 6901 d := v_1.AuxInt 6902 idx := v_1.Args[0] 6903 val := v.Args[2] 6904 mem := v.Args[3] 6905 v.reset(OpAMD64MOVQstoreidx1) 6906 v.AuxInt = c + d 6907 v.Aux = sym 6908 v.AddArg(ptr) 6909 v.AddArg(idx) 6910 v.AddArg(val) 6911 v.AddArg(mem) 6912 return true 6913 } 6914 return false 6915 } 6916 func rewriteValueAMD64_OpAMD64MOVQstoreidx8(v *Value, config *Config) bool { 6917 b := v.Block 6918 _ = b 6919 // match: (MOVQstoreidx8 [c] {sym} (ADDQconst [d] ptr) idx val mem) 6920 // cond: 6921 // result: (MOVQstoreidx8 [c+d] {sym} ptr idx val mem) 6922 for { 6923 c := v.AuxInt 6924 sym := v.Aux 6925 v_0 := v.Args[0] 6926 if v_0.Op != OpAMD64ADDQconst { 6927 break 6928 } 6929 d := v_0.AuxInt 6930 ptr := v_0.Args[0] 6931 idx := v.Args[1] 6932 val := v.Args[2] 6933 mem := v.Args[3] 6934 v.reset(OpAMD64MOVQstoreidx8) 6935 v.AuxInt = c + d 6936 v.Aux = sym 6937 v.AddArg(ptr) 6938 v.AddArg(idx) 6939 v.AddArg(val) 6940 v.AddArg(mem) 6941 return true 6942 } 6943 // match: (MOVQstoreidx8 [c] {sym} ptr (ADDQconst [d] idx) val mem) 6944 // cond: 6945 // result: (MOVQstoreidx8 [c+8*d] {sym} ptr idx val mem) 6946 for { 6947 c := v.AuxInt 6948 sym := v.Aux 6949 ptr := v.Args[0] 6950 v_1 := v.Args[1] 6951 if v_1.Op != OpAMD64ADDQconst { 6952 break 6953 } 6954 d := v_1.AuxInt 6955 idx := v_1.Args[0] 6956 val := v.Args[2] 6957 mem := v.Args[3] 6958 v.reset(OpAMD64MOVQstoreidx8) 6959 v.AuxInt = c + 8*d 6960 v.Aux = sym 6961 v.AddArg(ptr) 6962 v.AddArg(idx) 6963 v.AddArg(val) 6964 v.AddArg(mem) 6965 return true 6966 } 6967 return false 6968 } 6969 func rewriteValueAMD64_OpAMD64MOVSDload(v *Value, config *Config) bool { 6970 b := v.Block 6971 _ = b 6972 // match: (MOVSDload [off1] {sym} (ADDQconst [off2] ptr) mem) 6973 // cond: is32Bit(off1+off2) 6974 // result: (MOVSDload [off1+off2] {sym} ptr mem) 6975 for { 6976 off1 := v.AuxInt 6977 sym := v.Aux 6978 v_0 := v.Args[0] 6979 if v_0.Op != OpAMD64ADDQconst { 6980 break 6981 } 6982 off2 := v_0.AuxInt 6983 ptr := v_0.Args[0] 6984 mem := v.Args[1] 6985 if !(is32Bit(off1 + off2)) { 6986 break 6987 } 6988 v.reset(OpAMD64MOVSDload) 6989 v.AuxInt = off1 + off2 6990 v.Aux = sym 6991 v.AddArg(ptr) 6992 v.AddArg(mem) 6993 return true 6994 } 6995 // match: (MOVSDload [off1] {sym1} (LEAQ [off2] {sym2} base) mem) 6996 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 6997 // result: (MOVSDload [off1+off2] {mergeSym(sym1,sym2)} base mem) 6998 for { 6999 off1 := v.AuxInt 7000 sym1 := v.Aux 7001 v_0 := v.Args[0] 7002 if v_0.Op != OpAMD64LEAQ { 7003 break 7004 } 7005 off2 := v_0.AuxInt 7006 sym2 := v_0.Aux 7007 base := v_0.Args[0] 7008 mem := v.Args[1] 7009 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 7010 break 7011 } 7012 v.reset(OpAMD64MOVSDload) 7013 v.AuxInt = off1 + off2 7014 v.Aux = mergeSym(sym1, sym2) 7015 v.AddArg(base) 7016 v.AddArg(mem) 7017 return true 7018 } 7019 // match: (MOVSDload [off1] {sym1} (LEAQ1 [off2] {sym2} ptr idx) mem) 7020 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 7021 // result: (MOVSDloadidx1 [off1+off2] {mergeSym(sym1,sym2)} ptr idx mem) 7022 for { 7023 off1 := v.AuxInt 7024 sym1 := v.Aux 7025 v_0 := v.Args[0] 7026 if v_0.Op != OpAMD64LEAQ1 { 7027 break 7028 } 7029 off2 := v_0.AuxInt 7030 sym2 := v_0.Aux 7031 ptr := v_0.Args[0] 7032 idx := v_0.Args[1] 7033 mem := v.Args[1] 7034 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 7035 break 7036 } 7037 v.reset(OpAMD64MOVSDloadidx1) 7038 v.AuxInt = off1 + off2 7039 v.Aux = mergeSym(sym1, sym2) 7040 v.AddArg(ptr) 7041 v.AddArg(idx) 7042 v.AddArg(mem) 7043 return true 7044 } 7045 // match: (MOVSDload [off1] {sym1} (LEAQ8 [off2] {sym2} ptr idx) mem) 7046 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 7047 // result: (MOVSDloadidx8 [off1+off2] {mergeSym(sym1,sym2)} ptr idx mem) 7048 for { 7049 off1 := v.AuxInt 7050 sym1 := v.Aux 7051 v_0 := v.Args[0] 7052 if v_0.Op != OpAMD64LEAQ8 { 7053 break 7054 } 7055 off2 := v_0.AuxInt 7056 sym2 := v_0.Aux 7057 ptr := v_0.Args[0] 7058 idx := v_0.Args[1] 7059 mem := v.Args[1] 7060 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 7061 break 7062 } 7063 v.reset(OpAMD64MOVSDloadidx8) 7064 v.AuxInt = off1 + off2 7065 v.Aux = mergeSym(sym1, sym2) 7066 v.AddArg(ptr) 7067 v.AddArg(idx) 7068 v.AddArg(mem) 7069 return true 7070 } 7071 // match: (MOVSDload [off] {sym} (ADDQ ptr idx) mem) 7072 // cond: ptr.Op != OpSB 7073 // result: (MOVSDloadidx1 [off] {sym} ptr idx mem) 7074 for { 7075 off := v.AuxInt 7076 sym := v.Aux 7077 v_0 := v.Args[0] 7078 if v_0.Op != OpAMD64ADDQ { 7079 break 7080 } 7081 ptr := v_0.Args[0] 7082 idx := v_0.Args[1] 7083 mem := v.Args[1] 7084 if !(ptr.Op != OpSB) { 7085 break 7086 } 7087 v.reset(OpAMD64MOVSDloadidx1) 7088 v.AuxInt = off 7089 v.Aux = sym 7090 v.AddArg(ptr) 7091 v.AddArg(idx) 7092 v.AddArg(mem) 7093 return true 7094 } 7095 return false 7096 } 7097 func rewriteValueAMD64_OpAMD64MOVSDloadidx1(v *Value, config *Config) bool { 7098 b := v.Block 7099 _ = b 7100 // match: (MOVSDloadidx1 [c] {sym} (ADDQconst [d] ptr) idx mem) 7101 // cond: 7102 // result: (MOVSDloadidx1 [c+d] {sym} ptr idx mem) 7103 for { 7104 c := v.AuxInt 7105 sym := v.Aux 7106 v_0 := v.Args[0] 7107 if v_0.Op != OpAMD64ADDQconst { 7108 break 7109 } 7110 d := v_0.AuxInt 7111 ptr := v_0.Args[0] 7112 idx := v.Args[1] 7113 mem := v.Args[2] 7114 v.reset(OpAMD64MOVSDloadidx1) 7115 v.AuxInt = c + d 7116 v.Aux = sym 7117 v.AddArg(ptr) 7118 v.AddArg(idx) 7119 v.AddArg(mem) 7120 return true 7121 } 7122 // match: (MOVSDloadidx1 [c] {sym} ptr (ADDQconst [d] idx) mem) 7123 // cond: 7124 // result: (MOVSDloadidx1 [c+d] {sym} ptr idx mem) 7125 for { 7126 c := v.AuxInt 7127 sym := v.Aux 7128 ptr := v.Args[0] 7129 v_1 := v.Args[1] 7130 if v_1.Op != OpAMD64ADDQconst { 7131 break 7132 } 7133 d := v_1.AuxInt 7134 idx := v_1.Args[0] 7135 mem := v.Args[2] 7136 v.reset(OpAMD64MOVSDloadidx1) 7137 v.AuxInt = c + d 7138 v.Aux = sym 7139 v.AddArg(ptr) 7140 v.AddArg(idx) 7141 v.AddArg(mem) 7142 return true 7143 } 7144 return false 7145 } 7146 func rewriteValueAMD64_OpAMD64MOVSDloadidx8(v *Value, config *Config) bool { 7147 b := v.Block 7148 _ = b 7149 // match: (MOVSDloadidx8 [c] {sym} (ADDQconst [d] ptr) idx mem) 7150 // cond: 7151 // result: (MOVSDloadidx8 [c+d] {sym} ptr idx mem) 7152 for { 7153 c := v.AuxInt 7154 sym := v.Aux 7155 v_0 := v.Args[0] 7156 if v_0.Op != OpAMD64ADDQconst { 7157 break 7158 } 7159 d := v_0.AuxInt 7160 ptr := v_0.Args[0] 7161 idx := v.Args[1] 7162 mem := v.Args[2] 7163 v.reset(OpAMD64MOVSDloadidx8) 7164 v.AuxInt = c + d 7165 v.Aux = sym 7166 v.AddArg(ptr) 7167 v.AddArg(idx) 7168 v.AddArg(mem) 7169 return true 7170 } 7171 // match: (MOVSDloadidx8 [c] {sym} ptr (ADDQconst [d] idx) mem) 7172 // cond: 7173 // result: (MOVSDloadidx8 [c+8*d] {sym} ptr idx mem) 7174 for { 7175 c := v.AuxInt 7176 sym := v.Aux 7177 ptr := v.Args[0] 7178 v_1 := v.Args[1] 7179 if v_1.Op != OpAMD64ADDQconst { 7180 break 7181 } 7182 d := v_1.AuxInt 7183 idx := v_1.Args[0] 7184 mem := v.Args[2] 7185 v.reset(OpAMD64MOVSDloadidx8) 7186 v.AuxInt = c + 8*d 7187 v.Aux = sym 7188 v.AddArg(ptr) 7189 v.AddArg(idx) 7190 v.AddArg(mem) 7191 return true 7192 } 7193 return false 7194 } 7195 func rewriteValueAMD64_OpAMD64MOVSDstore(v *Value, config *Config) bool { 7196 b := v.Block 7197 _ = b 7198 // match: (MOVSDstore [off1] {sym} (ADDQconst [off2] ptr) val mem) 7199 // cond: is32Bit(off1+off2) 7200 // result: (MOVSDstore [off1+off2] {sym} ptr val mem) 7201 for { 7202 off1 := v.AuxInt 7203 sym := v.Aux 7204 v_0 := v.Args[0] 7205 if v_0.Op != OpAMD64ADDQconst { 7206 break 7207 } 7208 off2 := v_0.AuxInt 7209 ptr := v_0.Args[0] 7210 val := v.Args[1] 7211 mem := v.Args[2] 7212 if !(is32Bit(off1 + off2)) { 7213 break 7214 } 7215 v.reset(OpAMD64MOVSDstore) 7216 v.AuxInt = off1 + off2 7217 v.Aux = sym 7218 v.AddArg(ptr) 7219 v.AddArg(val) 7220 v.AddArg(mem) 7221 return true 7222 } 7223 // match: (MOVSDstore [off1] {sym1} (LEAQ [off2] {sym2} base) val mem) 7224 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 7225 // result: (MOVSDstore [off1+off2] {mergeSym(sym1,sym2)} base val mem) 7226 for { 7227 off1 := v.AuxInt 7228 sym1 := v.Aux 7229 v_0 := v.Args[0] 7230 if v_0.Op != OpAMD64LEAQ { 7231 break 7232 } 7233 off2 := v_0.AuxInt 7234 sym2 := v_0.Aux 7235 base := v_0.Args[0] 7236 val := v.Args[1] 7237 mem := v.Args[2] 7238 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 7239 break 7240 } 7241 v.reset(OpAMD64MOVSDstore) 7242 v.AuxInt = off1 + off2 7243 v.Aux = mergeSym(sym1, sym2) 7244 v.AddArg(base) 7245 v.AddArg(val) 7246 v.AddArg(mem) 7247 return true 7248 } 7249 // match: (MOVSDstore [off1] {sym1} (LEAQ1 [off2] {sym2} ptr idx) val mem) 7250 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 7251 // result: (MOVSDstoreidx1 [off1+off2] {mergeSym(sym1,sym2)} ptr idx val mem) 7252 for { 7253 off1 := v.AuxInt 7254 sym1 := v.Aux 7255 v_0 := v.Args[0] 7256 if v_0.Op != OpAMD64LEAQ1 { 7257 break 7258 } 7259 off2 := v_0.AuxInt 7260 sym2 := v_0.Aux 7261 ptr := v_0.Args[0] 7262 idx := v_0.Args[1] 7263 val := v.Args[1] 7264 mem := v.Args[2] 7265 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 7266 break 7267 } 7268 v.reset(OpAMD64MOVSDstoreidx1) 7269 v.AuxInt = off1 + off2 7270 v.Aux = mergeSym(sym1, sym2) 7271 v.AddArg(ptr) 7272 v.AddArg(idx) 7273 v.AddArg(val) 7274 v.AddArg(mem) 7275 return true 7276 } 7277 // match: (MOVSDstore [off1] {sym1} (LEAQ8 [off2] {sym2} ptr idx) val mem) 7278 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 7279 // result: (MOVSDstoreidx8 [off1+off2] {mergeSym(sym1,sym2)} ptr idx val mem) 7280 for { 7281 off1 := v.AuxInt 7282 sym1 := v.Aux 7283 v_0 := v.Args[0] 7284 if v_0.Op != OpAMD64LEAQ8 { 7285 break 7286 } 7287 off2 := v_0.AuxInt 7288 sym2 := v_0.Aux 7289 ptr := v_0.Args[0] 7290 idx := v_0.Args[1] 7291 val := v.Args[1] 7292 mem := v.Args[2] 7293 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 7294 break 7295 } 7296 v.reset(OpAMD64MOVSDstoreidx8) 7297 v.AuxInt = off1 + off2 7298 v.Aux = mergeSym(sym1, sym2) 7299 v.AddArg(ptr) 7300 v.AddArg(idx) 7301 v.AddArg(val) 7302 v.AddArg(mem) 7303 return true 7304 } 7305 // match: (MOVSDstore [off] {sym} (ADDQ ptr idx) val mem) 7306 // cond: ptr.Op != OpSB 7307 // result: (MOVSDstoreidx1 [off] {sym} ptr idx val mem) 7308 for { 7309 off := v.AuxInt 7310 sym := v.Aux 7311 v_0 := v.Args[0] 7312 if v_0.Op != OpAMD64ADDQ { 7313 break 7314 } 7315 ptr := v_0.Args[0] 7316 idx := v_0.Args[1] 7317 val := v.Args[1] 7318 mem := v.Args[2] 7319 if !(ptr.Op != OpSB) { 7320 break 7321 } 7322 v.reset(OpAMD64MOVSDstoreidx1) 7323 v.AuxInt = off 7324 v.Aux = sym 7325 v.AddArg(ptr) 7326 v.AddArg(idx) 7327 v.AddArg(val) 7328 v.AddArg(mem) 7329 return true 7330 } 7331 return false 7332 } 7333 func rewriteValueAMD64_OpAMD64MOVSDstoreidx1(v *Value, config *Config) bool { 7334 b := v.Block 7335 _ = b 7336 // match: (MOVSDstoreidx1 [c] {sym} (ADDQconst [d] ptr) idx val mem) 7337 // cond: 7338 // result: (MOVSDstoreidx1 [c+d] {sym} ptr idx val mem) 7339 for { 7340 c := v.AuxInt 7341 sym := v.Aux 7342 v_0 := v.Args[0] 7343 if v_0.Op != OpAMD64ADDQconst { 7344 break 7345 } 7346 d := v_0.AuxInt 7347 ptr := v_0.Args[0] 7348 idx := v.Args[1] 7349 val := v.Args[2] 7350 mem := v.Args[3] 7351 v.reset(OpAMD64MOVSDstoreidx1) 7352 v.AuxInt = c + d 7353 v.Aux = sym 7354 v.AddArg(ptr) 7355 v.AddArg(idx) 7356 v.AddArg(val) 7357 v.AddArg(mem) 7358 return true 7359 } 7360 // match: (MOVSDstoreidx1 [c] {sym} ptr (ADDQconst [d] idx) val mem) 7361 // cond: 7362 // result: (MOVSDstoreidx1 [c+d] {sym} ptr idx val mem) 7363 for { 7364 c := v.AuxInt 7365 sym := v.Aux 7366 ptr := v.Args[0] 7367 v_1 := v.Args[1] 7368 if v_1.Op != OpAMD64ADDQconst { 7369 break 7370 } 7371 d := v_1.AuxInt 7372 idx := v_1.Args[0] 7373 val := v.Args[2] 7374 mem := v.Args[3] 7375 v.reset(OpAMD64MOVSDstoreidx1) 7376 v.AuxInt = c + d 7377 v.Aux = sym 7378 v.AddArg(ptr) 7379 v.AddArg(idx) 7380 v.AddArg(val) 7381 v.AddArg(mem) 7382 return true 7383 } 7384 return false 7385 } 7386 func rewriteValueAMD64_OpAMD64MOVSDstoreidx8(v *Value, config *Config) bool { 7387 b := v.Block 7388 _ = b 7389 // match: (MOVSDstoreidx8 [c] {sym} (ADDQconst [d] ptr) idx val mem) 7390 // cond: 7391 // result: (MOVSDstoreidx8 [c+d] {sym} ptr idx val mem) 7392 for { 7393 c := v.AuxInt 7394 sym := v.Aux 7395 v_0 := v.Args[0] 7396 if v_0.Op != OpAMD64ADDQconst { 7397 break 7398 } 7399 d := v_0.AuxInt 7400 ptr := v_0.Args[0] 7401 idx := v.Args[1] 7402 val := v.Args[2] 7403 mem := v.Args[3] 7404 v.reset(OpAMD64MOVSDstoreidx8) 7405 v.AuxInt = c + d 7406 v.Aux = sym 7407 v.AddArg(ptr) 7408 v.AddArg(idx) 7409 v.AddArg(val) 7410 v.AddArg(mem) 7411 return true 7412 } 7413 // match: (MOVSDstoreidx8 [c] {sym} ptr (ADDQconst [d] idx) val mem) 7414 // cond: 7415 // result: (MOVSDstoreidx8 [c+8*d] {sym} ptr idx val mem) 7416 for { 7417 c := v.AuxInt 7418 sym := v.Aux 7419 ptr := v.Args[0] 7420 v_1 := v.Args[1] 7421 if v_1.Op != OpAMD64ADDQconst { 7422 break 7423 } 7424 d := v_1.AuxInt 7425 idx := v_1.Args[0] 7426 val := v.Args[2] 7427 mem := v.Args[3] 7428 v.reset(OpAMD64MOVSDstoreidx8) 7429 v.AuxInt = c + 8*d 7430 v.Aux = sym 7431 v.AddArg(ptr) 7432 v.AddArg(idx) 7433 v.AddArg(val) 7434 v.AddArg(mem) 7435 return true 7436 } 7437 return false 7438 } 7439 func rewriteValueAMD64_OpAMD64MOVSSload(v *Value, config *Config) bool { 7440 b := v.Block 7441 _ = b 7442 // match: (MOVSSload [off1] {sym} (ADDQconst [off2] ptr) mem) 7443 // cond: is32Bit(off1+off2) 7444 // result: (MOVSSload [off1+off2] {sym} ptr mem) 7445 for { 7446 off1 := v.AuxInt 7447 sym := v.Aux 7448 v_0 := v.Args[0] 7449 if v_0.Op != OpAMD64ADDQconst { 7450 break 7451 } 7452 off2 := v_0.AuxInt 7453 ptr := v_0.Args[0] 7454 mem := v.Args[1] 7455 if !(is32Bit(off1 + off2)) { 7456 break 7457 } 7458 v.reset(OpAMD64MOVSSload) 7459 v.AuxInt = off1 + off2 7460 v.Aux = sym 7461 v.AddArg(ptr) 7462 v.AddArg(mem) 7463 return true 7464 } 7465 // match: (MOVSSload [off1] {sym1} (LEAQ [off2] {sym2} base) mem) 7466 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 7467 // result: (MOVSSload [off1+off2] {mergeSym(sym1,sym2)} base mem) 7468 for { 7469 off1 := v.AuxInt 7470 sym1 := v.Aux 7471 v_0 := v.Args[0] 7472 if v_0.Op != OpAMD64LEAQ { 7473 break 7474 } 7475 off2 := v_0.AuxInt 7476 sym2 := v_0.Aux 7477 base := v_0.Args[0] 7478 mem := v.Args[1] 7479 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 7480 break 7481 } 7482 v.reset(OpAMD64MOVSSload) 7483 v.AuxInt = off1 + off2 7484 v.Aux = mergeSym(sym1, sym2) 7485 v.AddArg(base) 7486 v.AddArg(mem) 7487 return true 7488 } 7489 // match: (MOVSSload [off1] {sym1} (LEAQ1 [off2] {sym2} ptr idx) mem) 7490 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 7491 // result: (MOVSSloadidx1 [off1+off2] {mergeSym(sym1,sym2)} ptr idx mem) 7492 for { 7493 off1 := v.AuxInt 7494 sym1 := v.Aux 7495 v_0 := v.Args[0] 7496 if v_0.Op != OpAMD64LEAQ1 { 7497 break 7498 } 7499 off2 := v_0.AuxInt 7500 sym2 := v_0.Aux 7501 ptr := v_0.Args[0] 7502 idx := v_0.Args[1] 7503 mem := v.Args[1] 7504 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 7505 break 7506 } 7507 v.reset(OpAMD64MOVSSloadidx1) 7508 v.AuxInt = off1 + off2 7509 v.Aux = mergeSym(sym1, sym2) 7510 v.AddArg(ptr) 7511 v.AddArg(idx) 7512 v.AddArg(mem) 7513 return true 7514 } 7515 // match: (MOVSSload [off1] {sym1} (LEAQ4 [off2] {sym2} ptr idx) mem) 7516 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 7517 // result: (MOVSSloadidx4 [off1+off2] {mergeSym(sym1,sym2)} ptr idx mem) 7518 for { 7519 off1 := v.AuxInt 7520 sym1 := v.Aux 7521 v_0 := v.Args[0] 7522 if v_0.Op != OpAMD64LEAQ4 { 7523 break 7524 } 7525 off2 := v_0.AuxInt 7526 sym2 := v_0.Aux 7527 ptr := v_0.Args[0] 7528 idx := v_0.Args[1] 7529 mem := v.Args[1] 7530 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 7531 break 7532 } 7533 v.reset(OpAMD64MOVSSloadidx4) 7534 v.AuxInt = off1 + off2 7535 v.Aux = mergeSym(sym1, sym2) 7536 v.AddArg(ptr) 7537 v.AddArg(idx) 7538 v.AddArg(mem) 7539 return true 7540 } 7541 // match: (MOVSSload [off] {sym} (ADDQ ptr idx) mem) 7542 // cond: ptr.Op != OpSB 7543 // result: (MOVSSloadidx1 [off] {sym} ptr idx mem) 7544 for { 7545 off := v.AuxInt 7546 sym := v.Aux 7547 v_0 := v.Args[0] 7548 if v_0.Op != OpAMD64ADDQ { 7549 break 7550 } 7551 ptr := v_0.Args[0] 7552 idx := v_0.Args[1] 7553 mem := v.Args[1] 7554 if !(ptr.Op != OpSB) { 7555 break 7556 } 7557 v.reset(OpAMD64MOVSSloadidx1) 7558 v.AuxInt = off 7559 v.Aux = sym 7560 v.AddArg(ptr) 7561 v.AddArg(idx) 7562 v.AddArg(mem) 7563 return true 7564 } 7565 return false 7566 } 7567 func rewriteValueAMD64_OpAMD64MOVSSloadidx1(v *Value, config *Config) bool { 7568 b := v.Block 7569 _ = b 7570 // match: (MOVSSloadidx1 [c] {sym} (ADDQconst [d] ptr) idx mem) 7571 // cond: 7572 // result: (MOVSSloadidx1 [c+d] {sym} ptr idx mem) 7573 for { 7574 c := v.AuxInt 7575 sym := v.Aux 7576 v_0 := v.Args[0] 7577 if v_0.Op != OpAMD64ADDQconst { 7578 break 7579 } 7580 d := v_0.AuxInt 7581 ptr := v_0.Args[0] 7582 idx := v.Args[1] 7583 mem := v.Args[2] 7584 v.reset(OpAMD64MOVSSloadidx1) 7585 v.AuxInt = c + d 7586 v.Aux = sym 7587 v.AddArg(ptr) 7588 v.AddArg(idx) 7589 v.AddArg(mem) 7590 return true 7591 } 7592 // match: (MOVSSloadidx1 [c] {sym} ptr (ADDQconst [d] idx) mem) 7593 // cond: 7594 // result: (MOVSSloadidx1 [c+d] {sym} ptr idx mem) 7595 for { 7596 c := v.AuxInt 7597 sym := v.Aux 7598 ptr := v.Args[0] 7599 v_1 := v.Args[1] 7600 if v_1.Op != OpAMD64ADDQconst { 7601 break 7602 } 7603 d := v_1.AuxInt 7604 idx := v_1.Args[0] 7605 mem := v.Args[2] 7606 v.reset(OpAMD64MOVSSloadidx1) 7607 v.AuxInt = c + d 7608 v.Aux = sym 7609 v.AddArg(ptr) 7610 v.AddArg(idx) 7611 v.AddArg(mem) 7612 return true 7613 } 7614 return false 7615 } 7616 func rewriteValueAMD64_OpAMD64MOVSSloadidx4(v *Value, config *Config) bool { 7617 b := v.Block 7618 _ = b 7619 // match: (MOVSSloadidx4 [c] {sym} (ADDQconst [d] ptr) idx mem) 7620 // cond: 7621 // result: (MOVSSloadidx4 [c+d] {sym} ptr idx mem) 7622 for { 7623 c := v.AuxInt 7624 sym := v.Aux 7625 v_0 := v.Args[0] 7626 if v_0.Op != OpAMD64ADDQconst { 7627 break 7628 } 7629 d := v_0.AuxInt 7630 ptr := v_0.Args[0] 7631 idx := v.Args[1] 7632 mem := v.Args[2] 7633 v.reset(OpAMD64MOVSSloadidx4) 7634 v.AuxInt = c + d 7635 v.Aux = sym 7636 v.AddArg(ptr) 7637 v.AddArg(idx) 7638 v.AddArg(mem) 7639 return true 7640 } 7641 // match: (MOVSSloadidx4 [c] {sym} ptr (ADDQconst [d] idx) mem) 7642 // cond: 7643 // result: (MOVSSloadidx4 [c+4*d] {sym} ptr idx mem) 7644 for { 7645 c := v.AuxInt 7646 sym := v.Aux 7647 ptr := v.Args[0] 7648 v_1 := v.Args[1] 7649 if v_1.Op != OpAMD64ADDQconst { 7650 break 7651 } 7652 d := v_1.AuxInt 7653 idx := v_1.Args[0] 7654 mem := v.Args[2] 7655 v.reset(OpAMD64MOVSSloadidx4) 7656 v.AuxInt = c + 4*d 7657 v.Aux = sym 7658 v.AddArg(ptr) 7659 v.AddArg(idx) 7660 v.AddArg(mem) 7661 return true 7662 } 7663 return false 7664 } 7665 func rewriteValueAMD64_OpAMD64MOVSSstore(v *Value, config *Config) bool { 7666 b := v.Block 7667 _ = b 7668 // match: (MOVSSstore [off1] {sym} (ADDQconst [off2] ptr) val mem) 7669 // cond: is32Bit(off1+off2) 7670 // result: (MOVSSstore [off1+off2] {sym} ptr val mem) 7671 for { 7672 off1 := v.AuxInt 7673 sym := v.Aux 7674 v_0 := v.Args[0] 7675 if v_0.Op != OpAMD64ADDQconst { 7676 break 7677 } 7678 off2 := v_0.AuxInt 7679 ptr := v_0.Args[0] 7680 val := v.Args[1] 7681 mem := v.Args[2] 7682 if !(is32Bit(off1 + off2)) { 7683 break 7684 } 7685 v.reset(OpAMD64MOVSSstore) 7686 v.AuxInt = off1 + off2 7687 v.Aux = sym 7688 v.AddArg(ptr) 7689 v.AddArg(val) 7690 v.AddArg(mem) 7691 return true 7692 } 7693 // match: (MOVSSstore [off1] {sym1} (LEAQ [off2] {sym2} base) val mem) 7694 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 7695 // result: (MOVSSstore [off1+off2] {mergeSym(sym1,sym2)} base val mem) 7696 for { 7697 off1 := v.AuxInt 7698 sym1 := v.Aux 7699 v_0 := v.Args[0] 7700 if v_0.Op != OpAMD64LEAQ { 7701 break 7702 } 7703 off2 := v_0.AuxInt 7704 sym2 := v_0.Aux 7705 base := v_0.Args[0] 7706 val := v.Args[1] 7707 mem := v.Args[2] 7708 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 7709 break 7710 } 7711 v.reset(OpAMD64MOVSSstore) 7712 v.AuxInt = off1 + off2 7713 v.Aux = mergeSym(sym1, sym2) 7714 v.AddArg(base) 7715 v.AddArg(val) 7716 v.AddArg(mem) 7717 return true 7718 } 7719 // match: (MOVSSstore [off1] {sym1} (LEAQ1 [off2] {sym2} ptr idx) val mem) 7720 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 7721 // result: (MOVSSstoreidx1 [off1+off2] {mergeSym(sym1,sym2)} ptr idx val mem) 7722 for { 7723 off1 := v.AuxInt 7724 sym1 := v.Aux 7725 v_0 := v.Args[0] 7726 if v_0.Op != OpAMD64LEAQ1 { 7727 break 7728 } 7729 off2 := v_0.AuxInt 7730 sym2 := v_0.Aux 7731 ptr := v_0.Args[0] 7732 idx := v_0.Args[1] 7733 val := v.Args[1] 7734 mem := v.Args[2] 7735 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 7736 break 7737 } 7738 v.reset(OpAMD64MOVSSstoreidx1) 7739 v.AuxInt = off1 + off2 7740 v.Aux = mergeSym(sym1, sym2) 7741 v.AddArg(ptr) 7742 v.AddArg(idx) 7743 v.AddArg(val) 7744 v.AddArg(mem) 7745 return true 7746 } 7747 // match: (MOVSSstore [off1] {sym1} (LEAQ4 [off2] {sym2} ptr idx) val mem) 7748 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 7749 // result: (MOVSSstoreidx4 [off1+off2] {mergeSym(sym1,sym2)} ptr idx val mem) 7750 for { 7751 off1 := v.AuxInt 7752 sym1 := v.Aux 7753 v_0 := v.Args[0] 7754 if v_0.Op != OpAMD64LEAQ4 { 7755 break 7756 } 7757 off2 := v_0.AuxInt 7758 sym2 := v_0.Aux 7759 ptr := v_0.Args[0] 7760 idx := v_0.Args[1] 7761 val := v.Args[1] 7762 mem := v.Args[2] 7763 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 7764 break 7765 } 7766 v.reset(OpAMD64MOVSSstoreidx4) 7767 v.AuxInt = off1 + off2 7768 v.Aux = mergeSym(sym1, sym2) 7769 v.AddArg(ptr) 7770 v.AddArg(idx) 7771 v.AddArg(val) 7772 v.AddArg(mem) 7773 return true 7774 } 7775 // match: (MOVSSstore [off] {sym} (ADDQ ptr idx) val mem) 7776 // cond: ptr.Op != OpSB 7777 // result: (MOVSSstoreidx1 [off] {sym} ptr idx val mem) 7778 for { 7779 off := v.AuxInt 7780 sym := v.Aux 7781 v_0 := v.Args[0] 7782 if v_0.Op != OpAMD64ADDQ { 7783 break 7784 } 7785 ptr := v_0.Args[0] 7786 idx := v_0.Args[1] 7787 val := v.Args[1] 7788 mem := v.Args[2] 7789 if !(ptr.Op != OpSB) { 7790 break 7791 } 7792 v.reset(OpAMD64MOVSSstoreidx1) 7793 v.AuxInt = off 7794 v.Aux = sym 7795 v.AddArg(ptr) 7796 v.AddArg(idx) 7797 v.AddArg(val) 7798 v.AddArg(mem) 7799 return true 7800 } 7801 return false 7802 } 7803 func rewriteValueAMD64_OpAMD64MOVSSstoreidx1(v *Value, config *Config) bool { 7804 b := v.Block 7805 _ = b 7806 // match: (MOVSSstoreidx1 [c] {sym} (ADDQconst [d] ptr) idx val mem) 7807 // cond: 7808 // result: (MOVSSstoreidx1 [c+d] {sym} ptr idx val mem) 7809 for { 7810 c := v.AuxInt 7811 sym := v.Aux 7812 v_0 := v.Args[0] 7813 if v_0.Op != OpAMD64ADDQconst { 7814 break 7815 } 7816 d := v_0.AuxInt 7817 ptr := v_0.Args[0] 7818 idx := v.Args[1] 7819 val := v.Args[2] 7820 mem := v.Args[3] 7821 v.reset(OpAMD64MOVSSstoreidx1) 7822 v.AuxInt = c + d 7823 v.Aux = sym 7824 v.AddArg(ptr) 7825 v.AddArg(idx) 7826 v.AddArg(val) 7827 v.AddArg(mem) 7828 return true 7829 } 7830 // match: (MOVSSstoreidx1 [c] {sym} ptr (ADDQconst [d] idx) val mem) 7831 // cond: 7832 // result: (MOVSSstoreidx1 [c+d] {sym} ptr idx val mem) 7833 for { 7834 c := v.AuxInt 7835 sym := v.Aux 7836 ptr := v.Args[0] 7837 v_1 := v.Args[1] 7838 if v_1.Op != OpAMD64ADDQconst { 7839 break 7840 } 7841 d := v_1.AuxInt 7842 idx := v_1.Args[0] 7843 val := v.Args[2] 7844 mem := v.Args[3] 7845 v.reset(OpAMD64MOVSSstoreidx1) 7846 v.AuxInt = c + d 7847 v.Aux = sym 7848 v.AddArg(ptr) 7849 v.AddArg(idx) 7850 v.AddArg(val) 7851 v.AddArg(mem) 7852 return true 7853 } 7854 return false 7855 } 7856 func rewriteValueAMD64_OpAMD64MOVSSstoreidx4(v *Value, config *Config) bool { 7857 b := v.Block 7858 _ = b 7859 // match: (MOVSSstoreidx4 [c] {sym} (ADDQconst [d] ptr) idx val mem) 7860 // cond: 7861 // result: (MOVSSstoreidx4 [c+d] {sym} ptr idx val mem) 7862 for { 7863 c := v.AuxInt 7864 sym := v.Aux 7865 v_0 := v.Args[0] 7866 if v_0.Op != OpAMD64ADDQconst { 7867 break 7868 } 7869 d := v_0.AuxInt 7870 ptr := v_0.Args[0] 7871 idx := v.Args[1] 7872 val := v.Args[2] 7873 mem := v.Args[3] 7874 v.reset(OpAMD64MOVSSstoreidx4) 7875 v.AuxInt = c + d 7876 v.Aux = sym 7877 v.AddArg(ptr) 7878 v.AddArg(idx) 7879 v.AddArg(val) 7880 v.AddArg(mem) 7881 return true 7882 } 7883 // match: (MOVSSstoreidx4 [c] {sym} ptr (ADDQconst [d] idx) val mem) 7884 // cond: 7885 // result: (MOVSSstoreidx4 [c+4*d] {sym} ptr idx val mem) 7886 for { 7887 c := v.AuxInt 7888 sym := v.Aux 7889 ptr := v.Args[0] 7890 v_1 := v.Args[1] 7891 if v_1.Op != OpAMD64ADDQconst { 7892 break 7893 } 7894 d := v_1.AuxInt 7895 idx := v_1.Args[0] 7896 val := v.Args[2] 7897 mem := v.Args[3] 7898 v.reset(OpAMD64MOVSSstoreidx4) 7899 v.AuxInt = c + 4*d 7900 v.Aux = sym 7901 v.AddArg(ptr) 7902 v.AddArg(idx) 7903 v.AddArg(val) 7904 v.AddArg(mem) 7905 return true 7906 } 7907 return false 7908 } 7909 func rewriteValueAMD64_OpAMD64MOVWQSX(v *Value, config *Config) bool { 7910 b := v.Block 7911 _ = b 7912 // match: (MOVWQSX x:(MOVWload [off] {sym} ptr mem)) 7913 // cond: x.Uses == 1 && clobber(x) 7914 // result: @x.Block (MOVWQSXload <v.Type> [off] {sym} ptr mem) 7915 for { 7916 x := v.Args[0] 7917 if x.Op != OpAMD64MOVWload { 7918 break 7919 } 7920 off := x.AuxInt 7921 sym := x.Aux 7922 ptr := x.Args[0] 7923 mem := x.Args[1] 7924 if !(x.Uses == 1 && clobber(x)) { 7925 break 7926 } 7927 b = x.Block 7928 v0 := b.NewValue0(v.Line, OpAMD64MOVWQSXload, v.Type) 7929 v.reset(OpCopy) 7930 v.AddArg(v0) 7931 v0.AuxInt = off 7932 v0.Aux = sym 7933 v0.AddArg(ptr) 7934 v0.AddArg(mem) 7935 return true 7936 } 7937 // match: (MOVWQSX (ANDLconst [c] x)) 7938 // cond: c & 0x8000 == 0 7939 // result: (ANDLconst [c & 0x7fff] x) 7940 for { 7941 v_0 := v.Args[0] 7942 if v_0.Op != OpAMD64ANDLconst { 7943 break 7944 } 7945 c := v_0.AuxInt 7946 x := v_0.Args[0] 7947 if !(c&0x8000 == 0) { 7948 break 7949 } 7950 v.reset(OpAMD64ANDLconst) 7951 v.AuxInt = c & 0x7fff 7952 v.AddArg(x) 7953 return true 7954 } 7955 return false 7956 } 7957 func rewriteValueAMD64_OpAMD64MOVWQSXload(v *Value, config *Config) bool { 7958 b := v.Block 7959 _ = b 7960 // match: (MOVWQSXload [off1] {sym1} (LEAQ [off2] {sym2} base) mem) 7961 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 7962 // result: (MOVWQSXload [off1+off2] {mergeSym(sym1,sym2)} base mem) 7963 for { 7964 off1 := v.AuxInt 7965 sym1 := v.Aux 7966 v_0 := v.Args[0] 7967 if v_0.Op != OpAMD64LEAQ { 7968 break 7969 } 7970 off2 := v_0.AuxInt 7971 sym2 := v_0.Aux 7972 base := v_0.Args[0] 7973 mem := v.Args[1] 7974 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 7975 break 7976 } 7977 v.reset(OpAMD64MOVWQSXload) 7978 v.AuxInt = off1 + off2 7979 v.Aux = mergeSym(sym1, sym2) 7980 v.AddArg(base) 7981 v.AddArg(mem) 7982 return true 7983 } 7984 return false 7985 } 7986 func rewriteValueAMD64_OpAMD64MOVWQZX(v *Value, config *Config) bool { 7987 b := v.Block 7988 _ = b 7989 // match: (MOVWQZX x:(MOVWload [off] {sym} ptr mem)) 7990 // cond: x.Uses == 1 && clobber(x) 7991 // result: @x.Block (MOVWload <v.Type> [off] {sym} ptr mem) 7992 for { 7993 x := v.Args[0] 7994 if x.Op != OpAMD64MOVWload { 7995 break 7996 } 7997 off := x.AuxInt 7998 sym := x.Aux 7999 ptr := x.Args[0] 8000 mem := x.Args[1] 8001 if !(x.Uses == 1 && clobber(x)) { 8002 break 8003 } 8004 b = x.Block 8005 v0 := b.NewValue0(v.Line, OpAMD64MOVWload, v.Type) 8006 v.reset(OpCopy) 8007 v.AddArg(v0) 8008 v0.AuxInt = off 8009 v0.Aux = sym 8010 v0.AddArg(ptr) 8011 v0.AddArg(mem) 8012 return true 8013 } 8014 // match: (MOVWQZX x:(MOVWloadidx1 [off] {sym} ptr idx mem)) 8015 // cond: x.Uses == 1 && clobber(x) 8016 // result: @x.Block (MOVWloadidx1 <v.Type> [off] {sym} ptr idx mem) 8017 for { 8018 x := v.Args[0] 8019 if x.Op != OpAMD64MOVWloadidx1 { 8020 break 8021 } 8022 off := x.AuxInt 8023 sym := x.Aux 8024 ptr := x.Args[0] 8025 idx := x.Args[1] 8026 mem := x.Args[2] 8027 if !(x.Uses == 1 && clobber(x)) { 8028 break 8029 } 8030 b = x.Block 8031 v0 := b.NewValue0(v.Line, OpAMD64MOVWloadidx1, v.Type) 8032 v.reset(OpCopy) 8033 v.AddArg(v0) 8034 v0.AuxInt = off 8035 v0.Aux = sym 8036 v0.AddArg(ptr) 8037 v0.AddArg(idx) 8038 v0.AddArg(mem) 8039 return true 8040 } 8041 // match: (MOVWQZX x:(MOVWloadidx2 [off] {sym} ptr idx mem)) 8042 // cond: x.Uses == 1 && clobber(x) 8043 // result: @x.Block (MOVWloadidx2 <v.Type> [off] {sym} ptr idx mem) 8044 for { 8045 x := v.Args[0] 8046 if x.Op != OpAMD64MOVWloadidx2 { 8047 break 8048 } 8049 off := x.AuxInt 8050 sym := x.Aux 8051 ptr := x.Args[0] 8052 idx := x.Args[1] 8053 mem := x.Args[2] 8054 if !(x.Uses == 1 && clobber(x)) { 8055 break 8056 } 8057 b = x.Block 8058 v0 := b.NewValue0(v.Line, OpAMD64MOVWloadidx2, v.Type) 8059 v.reset(OpCopy) 8060 v.AddArg(v0) 8061 v0.AuxInt = off 8062 v0.Aux = sym 8063 v0.AddArg(ptr) 8064 v0.AddArg(idx) 8065 v0.AddArg(mem) 8066 return true 8067 } 8068 // match: (MOVWQZX (ANDLconst [c] x)) 8069 // cond: 8070 // result: (ANDLconst [c & 0xffff] x) 8071 for { 8072 v_0 := v.Args[0] 8073 if v_0.Op != OpAMD64ANDLconst { 8074 break 8075 } 8076 c := v_0.AuxInt 8077 x := v_0.Args[0] 8078 v.reset(OpAMD64ANDLconst) 8079 v.AuxInt = c & 0xffff 8080 v.AddArg(x) 8081 return true 8082 } 8083 return false 8084 } 8085 func rewriteValueAMD64_OpAMD64MOVWload(v *Value, config *Config) bool { 8086 b := v.Block 8087 _ = b 8088 // match: (MOVWload [off] {sym} ptr (MOVWstore [off2] {sym2} ptr2 x _)) 8089 // cond: sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) 8090 // result: x 8091 for { 8092 off := v.AuxInt 8093 sym := v.Aux 8094 ptr := v.Args[0] 8095 v_1 := v.Args[1] 8096 if v_1.Op != OpAMD64MOVWstore { 8097 break 8098 } 8099 off2 := v_1.AuxInt 8100 sym2 := v_1.Aux 8101 ptr2 := v_1.Args[0] 8102 x := v_1.Args[1] 8103 if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) { 8104 break 8105 } 8106 v.reset(OpCopy) 8107 v.Type = x.Type 8108 v.AddArg(x) 8109 return true 8110 } 8111 // match: (MOVWload [off1] {sym} (ADDQconst [off2] ptr) mem) 8112 // cond: is32Bit(off1+off2) 8113 // result: (MOVWload [off1+off2] {sym} ptr mem) 8114 for { 8115 off1 := v.AuxInt 8116 sym := v.Aux 8117 v_0 := v.Args[0] 8118 if v_0.Op != OpAMD64ADDQconst { 8119 break 8120 } 8121 off2 := v_0.AuxInt 8122 ptr := v_0.Args[0] 8123 mem := v.Args[1] 8124 if !(is32Bit(off1 + off2)) { 8125 break 8126 } 8127 v.reset(OpAMD64MOVWload) 8128 v.AuxInt = off1 + off2 8129 v.Aux = sym 8130 v.AddArg(ptr) 8131 v.AddArg(mem) 8132 return true 8133 } 8134 // match: (MOVWload [off1] {sym1} (LEAQ [off2] {sym2} base) mem) 8135 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 8136 // result: (MOVWload [off1+off2] {mergeSym(sym1,sym2)} base mem) 8137 for { 8138 off1 := v.AuxInt 8139 sym1 := v.Aux 8140 v_0 := v.Args[0] 8141 if v_0.Op != OpAMD64LEAQ { 8142 break 8143 } 8144 off2 := v_0.AuxInt 8145 sym2 := v_0.Aux 8146 base := v_0.Args[0] 8147 mem := v.Args[1] 8148 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 8149 break 8150 } 8151 v.reset(OpAMD64MOVWload) 8152 v.AuxInt = off1 + off2 8153 v.Aux = mergeSym(sym1, sym2) 8154 v.AddArg(base) 8155 v.AddArg(mem) 8156 return true 8157 } 8158 // match: (MOVWload [off1] {sym1} (LEAQ1 [off2] {sym2} ptr idx) mem) 8159 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 8160 // result: (MOVWloadidx1 [off1+off2] {mergeSym(sym1,sym2)} ptr idx mem) 8161 for { 8162 off1 := v.AuxInt 8163 sym1 := v.Aux 8164 v_0 := v.Args[0] 8165 if v_0.Op != OpAMD64LEAQ1 { 8166 break 8167 } 8168 off2 := v_0.AuxInt 8169 sym2 := v_0.Aux 8170 ptr := v_0.Args[0] 8171 idx := v_0.Args[1] 8172 mem := v.Args[1] 8173 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 8174 break 8175 } 8176 v.reset(OpAMD64MOVWloadidx1) 8177 v.AuxInt = off1 + off2 8178 v.Aux = mergeSym(sym1, sym2) 8179 v.AddArg(ptr) 8180 v.AddArg(idx) 8181 v.AddArg(mem) 8182 return true 8183 } 8184 // match: (MOVWload [off1] {sym1} (LEAQ2 [off2] {sym2} ptr idx) mem) 8185 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 8186 // result: (MOVWloadidx2 [off1+off2] {mergeSym(sym1,sym2)} ptr idx mem) 8187 for { 8188 off1 := v.AuxInt 8189 sym1 := v.Aux 8190 v_0 := v.Args[0] 8191 if v_0.Op != OpAMD64LEAQ2 { 8192 break 8193 } 8194 off2 := v_0.AuxInt 8195 sym2 := v_0.Aux 8196 ptr := v_0.Args[0] 8197 idx := v_0.Args[1] 8198 mem := v.Args[1] 8199 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 8200 break 8201 } 8202 v.reset(OpAMD64MOVWloadidx2) 8203 v.AuxInt = off1 + off2 8204 v.Aux = mergeSym(sym1, sym2) 8205 v.AddArg(ptr) 8206 v.AddArg(idx) 8207 v.AddArg(mem) 8208 return true 8209 } 8210 // match: (MOVWload [off] {sym} (ADDQ ptr idx) mem) 8211 // cond: ptr.Op != OpSB 8212 // result: (MOVWloadidx1 [off] {sym} ptr idx mem) 8213 for { 8214 off := v.AuxInt 8215 sym := v.Aux 8216 v_0 := v.Args[0] 8217 if v_0.Op != OpAMD64ADDQ { 8218 break 8219 } 8220 ptr := v_0.Args[0] 8221 idx := v_0.Args[1] 8222 mem := v.Args[1] 8223 if !(ptr.Op != OpSB) { 8224 break 8225 } 8226 v.reset(OpAMD64MOVWloadidx1) 8227 v.AuxInt = off 8228 v.Aux = sym 8229 v.AddArg(ptr) 8230 v.AddArg(idx) 8231 v.AddArg(mem) 8232 return true 8233 } 8234 // match: (MOVWload [off1] {sym1} (LEAL [off2] {sym2} base) mem) 8235 // cond: canMergeSym(sym1, sym2) 8236 // result: (MOVWload [off1+off2] {mergeSym(sym1,sym2)} base mem) 8237 for { 8238 off1 := v.AuxInt 8239 sym1 := v.Aux 8240 v_0 := v.Args[0] 8241 if v_0.Op != OpAMD64LEAL { 8242 break 8243 } 8244 off2 := v_0.AuxInt 8245 sym2 := v_0.Aux 8246 base := v_0.Args[0] 8247 mem := v.Args[1] 8248 if !(canMergeSym(sym1, sym2)) { 8249 break 8250 } 8251 v.reset(OpAMD64MOVWload) 8252 v.AuxInt = off1 + off2 8253 v.Aux = mergeSym(sym1, sym2) 8254 v.AddArg(base) 8255 v.AddArg(mem) 8256 return true 8257 } 8258 // match: (MOVWload [off1] {sym} (ADDLconst [off2] ptr) mem) 8259 // cond: is32Bit(off1+off2) 8260 // result: (MOVWload [off1+off2] {sym} ptr mem) 8261 for { 8262 off1 := v.AuxInt 8263 sym := v.Aux 8264 v_0 := v.Args[0] 8265 if v_0.Op != OpAMD64ADDLconst { 8266 break 8267 } 8268 off2 := v_0.AuxInt 8269 ptr := v_0.Args[0] 8270 mem := v.Args[1] 8271 if !(is32Bit(off1 + off2)) { 8272 break 8273 } 8274 v.reset(OpAMD64MOVWload) 8275 v.AuxInt = off1 + off2 8276 v.Aux = sym 8277 v.AddArg(ptr) 8278 v.AddArg(mem) 8279 return true 8280 } 8281 return false 8282 } 8283 func rewriteValueAMD64_OpAMD64MOVWloadidx1(v *Value, config *Config) bool { 8284 b := v.Block 8285 _ = b 8286 // match: (MOVWloadidx1 [c] {sym} ptr (SHLQconst [1] idx) mem) 8287 // cond: 8288 // result: (MOVWloadidx2 [c] {sym} ptr idx mem) 8289 for { 8290 c := v.AuxInt 8291 sym := v.Aux 8292 ptr := v.Args[0] 8293 v_1 := v.Args[1] 8294 if v_1.Op != OpAMD64SHLQconst { 8295 break 8296 } 8297 if v_1.AuxInt != 1 { 8298 break 8299 } 8300 idx := v_1.Args[0] 8301 mem := v.Args[2] 8302 v.reset(OpAMD64MOVWloadidx2) 8303 v.AuxInt = c 8304 v.Aux = sym 8305 v.AddArg(ptr) 8306 v.AddArg(idx) 8307 v.AddArg(mem) 8308 return true 8309 } 8310 // match: (MOVWloadidx1 [c] {sym} (ADDQconst [d] ptr) idx mem) 8311 // cond: 8312 // result: (MOVWloadidx1 [c+d] {sym} ptr idx mem) 8313 for { 8314 c := v.AuxInt 8315 sym := v.Aux 8316 v_0 := v.Args[0] 8317 if v_0.Op != OpAMD64ADDQconst { 8318 break 8319 } 8320 d := v_0.AuxInt 8321 ptr := v_0.Args[0] 8322 idx := v.Args[1] 8323 mem := v.Args[2] 8324 v.reset(OpAMD64MOVWloadidx1) 8325 v.AuxInt = c + d 8326 v.Aux = sym 8327 v.AddArg(ptr) 8328 v.AddArg(idx) 8329 v.AddArg(mem) 8330 return true 8331 } 8332 // match: (MOVWloadidx1 [c] {sym} ptr (ADDQconst [d] idx) mem) 8333 // cond: 8334 // result: (MOVWloadidx1 [c+d] {sym} ptr idx mem) 8335 for { 8336 c := v.AuxInt 8337 sym := v.Aux 8338 ptr := v.Args[0] 8339 v_1 := v.Args[1] 8340 if v_1.Op != OpAMD64ADDQconst { 8341 break 8342 } 8343 d := v_1.AuxInt 8344 idx := v_1.Args[0] 8345 mem := v.Args[2] 8346 v.reset(OpAMD64MOVWloadidx1) 8347 v.AuxInt = c + d 8348 v.Aux = sym 8349 v.AddArg(ptr) 8350 v.AddArg(idx) 8351 v.AddArg(mem) 8352 return true 8353 } 8354 return false 8355 } 8356 func rewriteValueAMD64_OpAMD64MOVWloadidx2(v *Value, config *Config) bool { 8357 b := v.Block 8358 _ = b 8359 // match: (MOVWloadidx2 [c] {sym} (ADDQconst [d] ptr) idx mem) 8360 // cond: 8361 // result: (MOVWloadidx2 [c+d] {sym} ptr idx mem) 8362 for { 8363 c := v.AuxInt 8364 sym := v.Aux 8365 v_0 := v.Args[0] 8366 if v_0.Op != OpAMD64ADDQconst { 8367 break 8368 } 8369 d := v_0.AuxInt 8370 ptr := v_0.Args[0] 8371 idx := v.Args[1] 8372 mem := v.Args[2] 8373 v.reset(OpAMD64MOVWloadidx2) 8374 v.AuxInt = c + d 8375 v.Aux = sym 8376 v.AddArg(ptr) 8377 v.AddArg(idx) 8378 v.AddArg(mem) 8379 return true 8380 } 8381 // match: (MOVWloadidx2 [c] {sym} ptr (ADDQconst [d] idx) mem) 8382 // cond: 8383 // result: (MOVWloadidx2 [c+2*d] {sym} ptr idx mem) 8384 for { 8385 c := v.AuxInt 8386 sym := v.Aux 8387 ptr := v.Args[0] 8388 v_1 := v.Args[1] 8389 if v_1.Op != OpAMD64ADDQconst { 8390 break 8391 } 8392 d := v_1.AuxInt 8393 idx := v_1.Args[0] 8394 mem := v.Args[2] 8395 v.reset(OpAMD64MOVWloadidx2) 8396 v.AuxInt = c + 2*d 8397 v.Aux = sym 8398 v.AddArg(ptr) 8399 v.AddArg(idx) 8400 v.AddArg(mem) 8401 return true 8402 } 8403 return false 8404 } 8405 func rewriteValueAMD64_OpAMD64MOVWstore(v *Value, config *Config) bool { 8406 b := v.Block 8407 _ = b 8408 // match: (MOVWstore [off] {sym} ptr (MOVWQSX x) mem) 8409 // cond: 8410 // result: (MOVWstore [off] {sym} ptr x mem) 8411 for { 8412 off := v.AuxInt 8413 sym := v.Aux 8414 ptr := v.Args[0] 8415 v_1 := v.Args[1] 8416 if v_1.Op != OpAMD64MOVWQSX { 8417 break 8418 } 8419 x := v_1.Args[0] 8420 mem := v.Args[2] 8421 v.reset(OpAMD64MOVWstore) 8422 v.AuxInt = off 8423 v.Aux = sym 8424 v.AddArg(ptr) 8425 v.AddArg(x) 8426 v.AddArg(mem) 8427 return true 8428 } 8429 // match: (MOVWstore [off] {sym} ptr (MOVWQZX x) mem) 8430 // cond: 8431 // result: (MOVWstore [off] {sym} ptr x mem) 8432 for { 8433 off := v.AuxInt 8434 sym := v.Aux 8435 ptr := v.Args[0] 8436 v_1 := v.Args[1] 8437 if v_1.Op != OpAMD64MOVWQZX { 8438 break 8439 } 8440 x := v_1.Args[0] 8441 mem := v.Args[2] 8442 v.reset(OpAMD64MOVWstore) 8443 v.AuxInt = off 8444 v.Aux = sym 8445 v.AddArg(ptr) 8446 v.AddArg(x) 8447 v.AddArg(mem) 8448 return true 8449 } 8450 // match: (MOVWstore [off1] {sym} (ADDQconst [off2] ptr) val mem) 8451 // cond: is32Bit(off1+off2) 8452 // result: (MOVWstore [off1+off2] {sym} ptr val mem) 8453 for { 8454 off1 := v.AuxInt 8455 sym := v.Aux 8456 v_0 := v.Args[0] 8457 if v_0.Op != OpAMD64ADDQconst { 8458 break 8459 } 8460 off2 := v_0.AuxInt 8461 ptr := v_0.Args[0] 8462 val := v.Args[1] 8463 mem := v.Args[2] 8464 if !(is32Bit(off1 + off2)) { 8465 break 8466 } 8467 v.reset(OpAMD64MOVWstore) 8468 v.AuxInt = off1 + off2 8469 v.Aux = sym 8470 v.AddArg(ptr) 8471 v.AddArg(val) 8472 v.AddArg(mem) 8473 return true 8474 } 8475 // match: (MOVWstore [off] {sym} ptr (MOVLconst [c]) mem) 8476 // cond: validOff(off) 8477 // result: (MOVWstoreconst [makeValAndOff(int64(int16(c)),off)] {sym} ptr mem) 8478 for { 8479 off := v.AuxInt 8480 sym := v.Aux 8481 ptr := v.Args[0] 8482 v_1 := v.Args[1] 8483 if v_1.Op != OpAMD64MOVLconst { 8484 break 8485 } 8486 c := v_1.AuxInt 8487 mem := v.Args[2] 8488 if !(validOff(off)) { 8489 break 8490 } 8491 v.reset(OpAMD64MOVWstoreconst) 8492 v.AuxInt = makeValAndOff(int64(int16(c)), off) 8493 v.Aux = sym 8494 v.AddArg(ptr) 8495 v.AddArg(mem) 8496 return true 8497 } 8498 // match: (MOVWstore [off1] {sym1} (LEAQ [off2] {sym2} base) val mem) 8499 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 8500 // result: (MOVWstore [off1+off2] {mergeSym(sym1,sym2)} base val mem) 8501 for { 8502 off1 := v.AuxInt 8503 sym1 := v.Aux 8504 v_0 := v.Args[0] 8505 if v_0.Op != OpAMD64LEAQ { 8506 break 8507 } 8508 off2 := v_0.AuxInt 8509 sym2 := v_0.Aux 8510 base := v_0.Args[0] 8511 val := v.Args[1] 8512 mem := v.Args[2] 8513 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 8514 break 8515 } 8516 v.reset(OpAMD64MOVWstore) 8517 v.AuxInt = off1 + off2 8518 v.Aux = mergeSym(sym1, sym2) 8519 v.AddArg(base) 8520 v.AddArg(val) 8521 v.AddArg(mem) 8522 return true 8523 } 8524 // match: (MOVWstore [off1] {sym1} (LEAQ1 [off2] {sym2} ptr idx) val mem) 8525 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 8526 // result: (MOVWstoreidx1 [off1+off2] {mergeSym(sym1,sym2)} ptr idx val mem) 8527 for { 8528 off1 := v.AuxInt 8529 sym1 := v.Aux 8530 v_0 := v.Args[0] 8531 if v_0.Op != OpAMD64LEAQ1 { 8532 break 8533 } 8534 off2 := v_0.AuxInt 8535 sym2 := v_0.Aux 8536 ptr := v_0.Args[0] 8537 idx := v_0.Args[1] 8538 val := v.Args[1] 8539 mem := v.Args[2] 8540 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 8541 break 8542 } 8543 v.reset(OpAMD64MOVWstoreidx1) 8544 v.AuxInt = off1 + off2 8545 v.Aux = mergeSym(sym1, sym2) 8546 v.AddArg(ptr) 8547 v.AddArg(idx) 8548 v.AddArg(val) 8549 v.AddArg(mem) 8550 return true 8551 } 8552 // match: (MOVWstore [off1] {sym1} (LEAQ2 [off2] {sym2} ptr idx) val mem) 8553 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 8554 // result: (MOVWstoreidx2 [off1+off2] {mergeSym(sym1,sym2)} ptr idx val mem) 8555 for { 8556 off1 := v.AuxInt 8557 sym1 := v.Aux 8558 v_0 := v.Args[0] 8559 if v_0.Op != OpAMD64LEAQ2 { 8560 break 8561 } 8562 off2 := v_0.AuxInt 8563 sym2 := v_0.Aux 8564 ptr := v_0.Args[0] 8565 idx := v_0.Args[1] 8566 val := v.Args[1] 8567 mem := v.Args[2] 8568 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 8569 break 8570 } 8571 v.reset(OpAMD64MOVWstoreidx2) 8572 v.AuxInt = off1 + off2 8573 v.Aux = mergeSym(sym1, sym2) 8574 v.AddArg(ptr) 8575 v.AddArg(idx) 8576 v.AddArg(val) 8577 v.AddArg(mem) 8578 return true 8579 } 8580 // match: (MOVWstore [off] {sym} (ADDQ ptr idx) val mem) 8581 // cond: ptr.Op != OpSB 8582 // result: (MOVWstoreidx1 [off] {sym} ptr idx val mem) 8583 for { 8584 off := v.AuxInt 8585 sym := v.Aux 8586 v_0 := v.Args[0] 8587 if v_0.Op != OpAMD64ADDQ { 8588 break 8589 } 8590 ptr := v_0.Args[0] 8591 idx := v_0.Args[1] 8592 val := v.Args[1] 8593 mem := v.Args[2] 8594 if !(ptr.Op != OpSB) { 8595 break 8596 } 8597 v.reset(OpAMD64MOVWstoreidx1) 8598 v.AuxInt = off 8599 v.Aux = sym 8600 v.AddArg(ptr) 8601 v.AddArg(idx) 8602 v.AddArg(val) 8603 v.AddArg(mem) 8604 return true 8605 } 8606 // match: (MOVWstore [i] {s} p (SHRQconst [16] w) x:(MOVWstore [i-2] {s} p w mem)) 8607 // cond: x.Uses == 1 && clobber(x) 8608 // result: (MOVLstore [i-2] {s} p w mem) 8609 for { 8610 i := v.AuxInt 8611 s := v.Aux 8612 p := v.Args[0] 8613 v_1 := v.Args[1] 8614 if v_1.Op != OpAMD64SHRQconst { 8615 break 8616 } 8617 if v_1.AuxInt != 16 { 8618 break 8619 } 8620 w := v_1.Args[0] 8621 x := v.Args[2] 8622 if x.Op != OpAMD64MOVWstore { 8623 break 8624 } 8625 if x.AuxInt != i-2 { 8626 break 8627 } 8628 if x.Aux != s { 8629 break 8630 } 8631 if p != x.Args[0] { 8632 break 8633 } 8634 if w != x.Args[1] { 8635 break 8636 } 8637 mem := x.Args[2] 8638 if !(x.Uses == 1 && clobber(x)) { 8639 break 8640 } 8641 v.reset(OpAMD64MOVLstore) 8642 v.AuxInt = i - 2 8643 v.Aux = s 8644 v.AddArg(p) 8645 v.AddArg(w) 8646 v.AddArg(mem) 8647 return true 8648 } 8649 // match: (MOVWstore [i] {s} p (SHRQconst [j] w) x:(MOVWstore [i-2] {s} p w0:(SHRQconst [j-16] w) mem)) 8650 // cond: x.Uses == 1 && clobber(x) 8651 // result: (MOVLstore [i-2] {s} p w0 mem) 8652 for { 8653 i := v.AuxInt 8654 s := v.Aux 8655 p := v.Args[0] 8656 v_1 := v.Args[1] 8657 if v_1.Op != OpAMD64SHRQconst { 8658 break 8659 } 8660 j := v_1.AuxInt 8661 w := v_1.Args[0] 8662 x := v.Args[2] 8663 if x.Op != OpAMD64MOVWstore { 8664 break 8665 } 8666 if x.AuxInt != i-2 { 8667 break 8668 } 8669 if x.Aux != s { 8670 break 8671 } 8672 if p != x.Args[0] { 8673 break 8674 } 8675 w0 := x.Args[1] 8676 if w0.Op != OpAMD64SHRQconst { 8677 break 8678 } 8679 if w0.AuxInt != j-16 { 8680 break 8681 } 8682 if w != w0.Args[0] { 8683 break 8684 } 8685 mem := x.Args[2] 8686 if !(x.Uses == 1 && clobber(x)) { 8687 break 8688 } 8689 v.reset(OpAMD64MOVLstore) 8690 v.AuxInt = i - 2 8691 v.Aux = s 8692 v.AddArg(p) 8693 v.AddArg(w0) 8694 v.AddArg(mem) 8695 return true 8696 } 8697 // match: (MOVWstore [off1] {sym1} (LEAL [off2] {sym2} base) val mem) 8698 // cond: canMergeSym(sym1, sym2) 8699 // result: (MOVWstore [off1+off2] {mergeSym(sym1,sym2)} base val mem) 8700 for { 8701 off1 := v.AuxInt 8702 sym1 := v.Aux 8703 v_0 := v.Args[0] 8704 if v_0.Op != OpAMD64LEAL { 8705 break 8706 } 8707 off2 := v_0.AuxInt 8708 sym2 := v_0.Aux 8709 base := v_0.Args[0] 8710 val := v.Args[1] 8711 mem := v.Args[2] 8712 if !(canMergeSym(sym1, sym2)) { 8713 break 8714 } 8715 v.reset(OpAMD64MOVWstore) 8716 v.AuxInt = off1 + off2 8717 v.Aux = mergeSym(sym1, sym2) 8718 v.AddArg(base) 8719 v.AddArg(val) 8720 v.AddArg(mem) 8721 return true 8722 } 8723 // match: (MOVWstore [off1] {sym} (ADDLconst [off2] ptr) val mem) 8724 // cond: is32Bit(off1+off2) 8725 // result: (MOVWstore [off1+off2] {sym} ptr val mem) 8726 for { 8727 off1 := v.AuxInt 8728 sym := v.Aux 8729 v_0 := v.Args[0] 8730 if v_0.Op != OpAMD64ADDLconst { 8731 break 8732 } 8733 off2 := v_0.AuxInt 8734 ptr := v_0.Args[0] 8735 val := v.Args[1] 8736 mem := v.Args[2] 8737 if !(is32Bit(off1 + off2)) { 8738 break 8739 } 8740 v.reset(OpAMD64MOVWstore) 8741 v.AuxInt = off1 + off2 8742 v.Aux = sym 8743 v.AddArg(ptr) 8744 v.AddArg(val) 8745 v.AddArg(mem) 8746 return true 8747 } 8748 return false 8749 } 8750 func rewriteValueAMD64_OpAMD64MOVWstoreconst(v *Value, config *Config) bool { 8751 b := v.Block 8752 _ = b 8753 // match: (MOVWstoreconst [sc] {s} (ADDQconst [off] ptr) mem) 8754 // cond: ValAndOff(sc).canAdd(off) 8755 // result: (MOVWstoreconst [ValAndOff(sc).add(off)] {s} ptr mem) 8756 for { 8757 sc := v.AuxInt 8758 s := v.Aux 8759 v_0 := v.Args[0] 8760 if v_0.Op != OpAMD64ADDQconst { 8761 break 8762 } 8763 off := v_0.AuxInt 8764 ptr := v_0.Args[0] 8765 mem := v.Args[1] 8766 if !(ValAndOff(sc).canAdd(off)) { 8767 break 8768 } 8769 v.reset(OpAMD64MOVWstoreconst) 8770 v.AuxInt = ValAndOff(sc).add(off) 8771 v.Aux = s 8772 v.AddArg(ptr) 8773 v.AddArg(mem) 8774 return true 8775 } 8776 // match: (MOVWstoreconst [sc] {sym1} (LEAQ [off] {sym2} ptr) mem) 8777 // cond: canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off) 8778 // result: (MOVWstoreconst [ValAndOff(sc).add(off)] {mergeSym(sym1, sym2)} ptr mem) 8779 for { 8780 sc := v.AuxInt 8781 sym1 := v.Aux 8782 v_0 := v.Args[0] 8783 if v_0.Op != OpAMD64LEAQ { 8784 break 8785 } 8786 off := v_0.AuxInt 8787 sym2 := v_0.Aux 8788 ptr := v_0.Args[0] 8789 mem := v.Args[1] 8790 if !(canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off)) { 8791 break 8792 } 8793 v.reset(OpAMD64MOVWstoreconst) 8794 v.AuxInt = ValAndOff(sc).add(off) 8795 v.Aux = mergeSym(sym1, sym2) 8796 v.AddArg(ptr) 8797 v.AddArg(mem) 8798 return true 8799 } 8800 // match: (MOVWstoreconst [x] {sym1} (LEAQ1 [off] {sym2} ptr idx) mem) 8801 // cond: canMergeSym(sym1, sym2) 8802 // result: (MOVWstoreconstidx1 [ValAndOff(x).add(off)] {mergeSym(sym1,sym2)} ptr idx mem) 8803 for { 8804 x := v.AuxInt 8805 sym1 := v.Aux 8806 v_0 := v.Args[0] 8807 if v_0.Op != OpAMD64LEAQ1 { 8808 break 8809 } 8810 off := v_0.AuxInt 8811 sym2 := v_0.Aux 8812 ptr := v_0.Args[0] 8813 idx := v_0.Args[1] 8814 mem := v.Args[1] 8815 if !(canMergeSym(sym1, sym2)) { 8816 break 8817 } 8818 v.reset(OpAMD64MOVWstoreconstidx1) 8819 v.AuxInt = ValAndOff(x).add(off) 8820 v.Aux = mergeSym(sym1, sym2) 8821 v.AddArg(ptr) 8822 v.AddArg(idx) 8823 v.AddArg(mem) 8824 return true 8825 } 8826 // match: (MOVWstoreconst [x] {sym1} (LEAQ2 [off] {sym2} ptr idx) mem) 8827 // cond: canMergeSym(sym1, sym2) 8828 // result: (MOVWstoreconstidx2 [ValAndOff(x).add(off)] {mergeSym(sym1,sym2)} ptr idx mem) 8829 for { 8830 x := v.AuxInt 8831 sym1 := v.Aux 8832 v_0 := v.Args[0] 8833 if v_0.Op != OpAMD64LEAQ2 { 8834 break 8835 } 8836 off := v_0.AuxInt 8837 sym2 := v_0.Aux 8838 ptr := v_0.Args[0] 8839 idx := v_0.Args[1] 8840 mem := v.Args[1] 8841 if !(canMergeSym(sym1, sym2)) { 8842 break 8843 } 8844 v.reset(OpAMD64MOVWstoreconstidx2) 8845 v.AuxInt = ValAndOff(x).add(off) 8846 v.Aux = mergeSym(sym1, sym2) 8847 v.AddArg(ptr) 8848 v.AddArg(idx) 8849 v.AddArg(mem) 8850 return true 8851 } 8852 // match: (MOVWstoreconst [x] {sym} (ADDQ ptr idx) mem) 8853 // cond: 8854 // result: (MOVWstoreconstidx1 [x] {sym} ptr idx mem) 8855 for { 8856 x := v.AuxInt 8857 sym := v.Aux 8858 v_0 := v.Args[0] 8859 if v_0.Op != OpAMD64ADDQ { 8860 break 8861 } 8862 ptr := v_0.Args[0] 8863 idx := v_0.Args[1] 8864 mem := v.Args[1] 8865 v.reset(OpAMD64MOVWstoreconstidx1) 8866 v.AuxInt = x 8867 v.Aux = sym 8868 v.AddArg(ptr) 8869 v.AddArg(idx) 8870 v.AddArg(mem) 8871 return true 8872 } 8873 // match: (MOVWstoreconst [c] {s} p x:(MOVWstoreconst [a] {s} p mem)) 8874 // cond: x.Uses == 1 && ValAndOff(a).Off() + 2 == ValAndOff(c).Off() && clobber(x) 8875 // result: (MOVLstoreconst [makeValAndOff(ValAndOff(a).Val()&0xffff | ValAndOff(c).Val()<<16, ValAndOff(a).Off())] {s} p mem) 8876 for { 8877 c := v.AuxInt 8878 s := v.Aux 8879 p := v.Args[0] 8880 x := v.Args[1] 8881 if x.Op != OpAMD64MOVWstoreconst { 8882 break 8883 } 8884 a := x.AuxInt 8885 if x.Aux != s { 8886 break 8887 } 8888 if p != x.Args[0] { 8889 break 8890 } 8891 mem := x.Args[1] 8892 if !(x.Uses == 1 && ValAndOff(a).Off()+2 == ValAndOff(c).Off() && clobber(x)) { 8893 break 8894 } 8895 v.reset(OpAMD64MOVLstoreconst) 8896 v.AuxInt = makeValAndOff(ValAndOff(a).Val()&0xffff|ValAndOff(c).Val()<<16, ValAndOff(a).Off()) 8897 v.Aux = s 8898 v.AddArg(p) 8899 v.AddArg(mem) 8900 return true 8901 } 8902 // match: (MOVWstoreconst [sc] {sym1} (LEAL [off] {sym2} ptr) mem) 8903 // cond: canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off) 8904 // result: (MOVWstoreconst [ValAndOff(sc).add(off)] {mergeSym(sym1, sym2)} ptr mem) 8905 for { 8906 sc := v.AuxInt 8907 sym1 := v.Aux 8908 v_0 := v.Args[0] 8909 if v_0.Op != OpAMD64LEAL { 8910 break 8911 } 8912 off := v_0.AuxInt 8913 sym2 := v_0.Aux 8914 ptr := v_0.Args[0] 8915 mem := v.Args[1] 8916 if !(canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off)) { 8917 break 8918 } 8919 v.reset(OpAMD64MOVWstoreconst) 8920 v.AuxInt = ValAndOff(sc).add(off) 8921 v.Aux = mergeSym(sym1, sym2) 8922 v.AddArg(ptr) 8923 v.AddArg(mem) 8924 return true 8925 } 8926 // match: (MOVWstoreconst [sc] {s} (ADDLconst [off] ptr) mem) 8927 // cond: ValAndOff(sc).canAdd(off) 8928 // result: (MOVWstoreconst [ValAndOff(sc).add(off)] {s} ptr mem) 8929 for { 8930 sc := v.AuxInt 8931 s := v.Aux 8932 v_0 := v.Args[0] 8933 if v_0.Op != OpAMD64ADDLconst { 8934 break 8935 } 8936 off := v_0.AuxInt 8937 ptr := v_0.Args[0] 8938 mem := v.Args[1] 8939 if !(ValAndOff(sc).canAdd(off)) { 8940 break 8941 } 8942 v.reset(OpAMD64MOVWstoreconst) 8943 v.AuxInt = ValAndOff(sc).add(off) 8944 v.Aux = s 8945 v.AddArg(ptr) 8946 v.AddArg(mem) 8947 return true 8948 } 8949 return false 8950 } 8951 func rewriteValueAMD64_OpAMD64MOVWstoreconstidx1(v *Value, config *Config) bool { 8952 b := v.Block 8953 _ = b 8954 // match: (MOVWstoreconstidx1 [c] {sym} ptr (SHLQconst [1] idx) mem) 8955 // cond: 8956 // result: (MOVWstoreconstidx2 [c] {sym} ptr idx mem) 8957 for { 8958 c := v.AuxInt 8959 sym := v.Aux 8960 ptr := v.Args[0] 8961 v_1 := v.Args[1] 8962 if v_1.Op != OpAMD64SHLQconst { 8963 break 8964 } 8965 if v_1.AuxInt != 1 { 8966 break 8967 } 8968 idx := v_1.Args[0] 8969 mem := v.Args[2] 8970 v.reset(OpAMD64MOVWstoreconstidx2) 8971 v.AuxInt = c 8972 v.Aux = sym 8973 v.AddArg(ptr) 8974 v.AddArg(idx) 8975 v.AddArg(mem) 8976 return true 8977 } 8978 // match: (MOVWstoreconstidx1 [x] {sym} (ADDQconst [c] ptr) idx mem) 8979 // cond: 8980 // result: (MOVWstoreconstidx1 [ValAndOff(x).add(c)] {sym} ptr idx mem) 8981 for { 8982 x := v.AuxInt 8983 sym := v.Aux 8984 v_0 := v.Args[0] 8985 if v_0.Op != OpAMD64ADDQconst { 8986 break 8987 } 8988 c := v_0.AuxInt 8989 ptr := v_0.Args[0] 8990 idx := v.Args[1] 8991 mem := v.Args[2] 8992 v.reset(OpAMD64MOVWstoreconstidx1) 8993 v.AuxInt = ValAndOff(x).add(c) 8994 v.Aux = sym 8995 v.AddArg(ptr) 8996 v.AddArg(idx) 8997 v.AddArg(mem) 8998 return true 8999 } 9000 // match: (MOVWstoreconstidx1 [x] {sym} ptr (ADDQconst [c] idx) mem) 9001 // cond: 9002 // result: (MOVWstoreconstidx1 [ValAndOff(x).add(c)] {sym} ptr idx mem) 9003 for { 9004 x := v.AuxInt 9005 sym := v.Aux 9006 ptr := v.Args[0] 9007 v_1 := v.Args[1] 9008 if v_1.Op != OpAMD64ADDQconst { 9009 break 9010 } 9011 c := v_1.AuxInt 9012 idx := v_1.Args[0] 9013 mem := v.Args[2] 9014 v.reset(OpAMD64MOVWstoreconstidx1) 9015 v.AuxInt = ValAndOff(x).add(c) 9016 v.Aux = sym 9017 v.AddArg(ptr) 9018 v.AddArg(idx) 9019 v.AddArg(mem) 9020 return true 9021 } 9022 // match: (MOVWstoreconstidx1 [c] {s} p i x:(MOVWstoreconstidx1 [a] {s} p i mem)) 9023 // cond: x.Uses == 1 && ValAndOff(a).Off() + 2 == ValAndOff(c).Off() && clobber(x) 9024 // result: (MOVLstoreconstidx1 [makeValAndOff(ValAndOff(a).Val()&0xffff | ValAndOff(c).Val()<<16, ValAndOff(a).Off())] {s} p i mem) 9025 for { 9026 c := v.AuxInt 9027 s := v.Aux 9028 p := v.Args[0] 9029 i := v.Args[1] 9030 x := v.Args[2] 9031 if x.Op != OpAMD64MOVWstoreconstidx1 { 9032 break 9033 } 9034 a := x.AuxInt 9035 if x.Aux != s { 9036 break 9037 } 9038 if p != x.Args[0] { 9039 break 9040 } 9041 if i != x.Args[1] { 9042 break 9043 } 9044 mem := x.Args[2] 9045 if !(x.Uses == 1 && ValAndOff(a).Off()+2 == ValAndOff(c).Off() && clobber(x)) { 9046 break 9047 } 9048 v.reset(OpAMD64MOVLstoreconstidx1) 9049 v.AuxInt = makeValAndOff(ValAndOff(a).Val()&0xffff|ValAndOff(c).Val()<<16, ValAndOff(a).Off()) 9050 v.Aux = s 9051 v.AddArg(p) 9052 v.AddArg(i) 9053 v.AddArg(mem) 9054 return true 9055 } 9056 return false 9057 } 9058 func rewriteValueAMD64_OpAMD64MOVWstoreconstidx2(v *Value, config *Config) bool { 9059 b := v.Block 9060 _ = b 9061 // match: (MOVWstoreconstidx2 [x] {sym} (ADDQconst [c] ptr) idx mem) 9062 // cond: 9063 // result: (MOVWstoreconstidx2 [ValAndOff(x).add(c)] {sym} ptr idx mem) 9064 for { 9065 x := v.AuxInt 9066 sym := v.Aux 9067 v_0 := v.Args[0] 9068 if v_0.Op != OpAMD64ADDQconst { 9069 break 9070 } 9071 c := v_0.AuxInt 9072 ptr := v_0.Args[0] 9073 idx := v.Args[1] 9074 mem := v.Args[2] 9075 v.reset(OpAMD64MOVWstoreconstidx2) 9076 v.AuxInt = ValAndOff(x).add(c) 9077 v.Aux = sym 9078 v.AddArg(ptr) 9079 v.AddArg(idx) 9080 v.AddArg(mem) 9081 return true 9082 } 9083 // match: (MOVWstoreconstidx2 [x] {sym} ptr (ADDQconst [c] idx) mem) 9084 // cond: 9085 // result: (MOVWstoreconstidx2 [ValAndOff(x).add(2*c)] {sym} ptr idx mem) 9086 for { 9087 x := v.AuxInt 9088 sym := v.Aux 9089 ptr := v.Args[0] 9090 v_1 := v.Args[1] 9091 if v_1.Op != OpAMD64ADDQconst { 9092 break 9093 } 9094 c := v_1.AuxInt 9095 idx := v_1.Args[0] 9096 mem := v.Args[2] 9097 v.reset(OpAMD64MOVWstoreconstidx2) 9098 v.AuxInt = ValAndOff(x).add(2 * c) 9099 v.Aux = sym 9100 v.AddArg(ptr) 9101 v.AddArg(idx) 9102 v.AddArg(mem) 9103 return true 9104 } 9105 // match: (MOVWstoreconstidx2 [c] {s} p i x:(MOVWstoreconstidx2 [a] {s} p i mem)) 9106 // cond: x.Uses == 1 && ValAndOff(a).Off() + 2 == ValAndOff(c).Off() && clobber(x) 9107 // result: (MOVLstoreconstidx1 [makeValAndOff(ValAndOff(a).Val()&0xffff | ValAndOff(c).Val()<<16, ValAndOff(a).Off())] {s} p (SHLQconst <i.Type> [1] i) mem) 9108 for { 9109 c := v.AuxInt 9110 s := v.Aux 9111 p := v.Args[0] 9112 i := v.Args[1] 9113 x := v.Args[2] 9114 if x.Op != OpAMD64MOVWstoreconstidx2 { 9115 break 9116 } 9117 a := x.AuxInt 9118 if x.Aux != s { 9119 break 9120 } 9121 if p != x.Args[0] { 9122 break 9123 } 9124 if i != x.Args[1] { 9125 break 9126 } 9127 mem := x.Args[2] 9128 if !(x.Uses == 1 && ValAndOff(a).Off()+2 == ValAndOff(c).Off() && clobber(x)) { 9129 break 9130 } 9131 v.reset(OpAMD64MOVLstoreconstidx1) 9132 v.AuxInt = makeValAndOff(ValAndOff(a).Val()&0xffff|ValAndOff(c).Val()<<16, ValAndOff(a).Off()) 9133 v.Aux = s 9134 v.AddArg(p) 9135 v0 := b.NewValue0(v.Line, OpAMD64SHLQconst, i.Type) 9136 v0.AuxInt = 1 9137 v0.AddArg(i) 9138 v.AddArg(v0) 9139 v.AddArg(mem) 9140 return true 9141 } 9142 return false 9143 } 9144 func rewriteValueAMD64_OpAMD64MOVWstoreidx1(v *Value, config *Config) bool { 9145 b := v.Block 9146 _ = b 9147 // match: (MOVWstoreidx1 [c] {sym} ptr (SHLQconst [1] idx) val mem) 9148 // cond: 9149 // result: (MOVWstoreidx2 [c] {sym} ptr idx val mem) 9150 for { 9151 c := v.AuxInt 9152 sym := v.Aux 9153 ptr := v.Args[0] 9154 v_1 := v.Args[1] 9155 if v_1.Op != OpAMD64SHLQconst { 9156 break 9157 } 9158 if v_1.AuxInt != 1 { 9159 break 9160 } 9161 idx := v_1.Args[0] 9162 val := v.Args[2] 9163 mem := v.Args[3] 9164 v.reset(OpAMD64MOVWstoreidx2) 9165 v.AuxInt = c 9166 v.Aux = sym 9167 v.AddArg(ptr) 9168 v.AddArg(idx) 9169 v.AddArg(val) 9170 v.AddArg(mem) 9171 return true 9172 } 9173 // match: (MOVWstoreidx1 [c] {sym} (ADDQconst [d] ptr) idx val mem) 9174 // cond: 9175 // result: (MOVWstoreidx1 [c+d] {sym} ptr idx val mem) 9176 for { 9177 c := v.AuxInt 9178 sym := v.Aux 9179 v_0 := v.Args[0] 9180 if v_0.Op != OpAMD64ADDQconst { 9181 break 9182 } 9183 d := v_0.AuxInt 9184 ptr := v_0.Args[0] 9185 idx := v.Args[1] 9186 val := v.Args[2] 9187 mem := v.Args[3] 9188 v.reset(OpAMD64MOVWstoreidx1) 9189 v.AuxInt = c + d 9190 v.Aux = sym 9191 v.AddArg(ptr) 9192 v.AddArg(idx) 9193 v.AddArg(val) 9194 v.AddArg(mem) 9195 return true 9196 } 9197 // match: (MOVWstoreidx1 [c] {sym} ptr (ADDQconst [d] idx) val mem) 9198 // cond: 9199 // result: (MOVWstoreidx1 [c+d] {sym} ptr idx val mem) 9200 for { 9201 c := v.AuxInt 9202 sym := v.Aux 9203 ptr := v.Args[0] 9204 v_1 := v.Args[1] 9205 if v_1.Op != OpAMD64ADDQconst { 9206 break 9207 } 9208 d := v_1.AuxInt 9209 idx := v_1.Args[0] 9210 val := v.Args[2] 9211 mem := v.Args[3] 9212 v.reset(OpAMD64MOVWstoreidx1) 9213 v.AuxInt = c + d 9214 v.Aux = sym 9215 v.AddArg(ptr) 9216 v.AddArg(idx) 9217 v.AddArg(val) 9218 v.AddArg(mem) 9219 return true 9220 } 9221 // match: (MOVWstoreidx1 [i] {s} p idx (SHRQconst [16] w) x:(MOVWstoreidx1 [i-2] {s} p idx w mem)) 9222 // cond: x.Uses == 1 && clobber(x) 9223 // result: (MOVLstoreidx1 [i-2] {s} p idx w mem) 9224 for { 9225 i := v.AuxInt 9226 s := v.Aux 9227 p := v.Args[0] 9228 idx := v.Args[1] 9229 v_2 := v.Args[2] 9230 if v_2.Op != OpAMD64SHRQconst { 9231 break 9232 } 9233 if v_2.AuxInt != 16 { 9234 break 9235 } 9236 w := v_2.Args[0] 9237 x := v.Args[3] 9238 if x.Op != OpAMD64MOVWstoreidx1 { 9239 break 9240 } 9241 if x.AuxInt != i-2 { 9242 break 9243 } 9244 if x.Aux != s { 9245 break 9246 } 9247 if p != x.Args[0] { 9248 break 9249 } 9250 if idx != x.Args[1] { 9251 break 9252 } 9253 if w != x.Args[2] { 9254 break 9255 } 9256 mem := x.Args[3] 9257 if !(x.Uses == 1 && clobber(x)) { 9258 break 9259 } 9260 v.reset(OpAMD64MOVLstoreidx1) 9261 v.AuxInt = i - 2 9262 v.Aux = s 9263 v.AddArg(p) 9264 v.AddArg(idx) 9265 v.AddArg(w) 9266 v.AddArg(mem) 9267 return true 9268 } 9269 // match: (MOVWstoreidx1 [i] {s} p idx (SHRQconst [j] w) x:(MOVWstoreidx1 [i-2] {s} p idx w0:(SHRQconst [j-16] w) mem)) 9270 // cond: x.Uses == 1 && clobber(x) 9271 // result: (MOVLstoreidx1 [i-2] {s} p idx w0 mem) 9272 for { 9273 i := v.AuxInt 9274 s := v.Aux 9275 p := v.Args[0] 9276 idx := v.Args[1] 9277 v_2 := v.Args[2] 9278 if v_2.Op != OpAMD64SHRQconst { 9279 break 9280 } 9281 j := v_2.AuxInt 9282 w := v_2.Args[0] 9283 x := v.Args[3] 9284 if x.Op != OpAMD64MOVWstoreidx1 { 9285 break 9286 } 9287 if x.AuxInt != i-2 { 9288 break 9289 } 9290 if x.Aux != s { 9291 break 9292 } 9293 if p != x.Args[0] { 9294 break 9295 } 9296 if idx != x.Args[1] { 9297 break 9298 } 9299 w0 := x.Args[2] 9300 if w0.Op != OpAMD64SHRQconst { 9301 break 9302 } 9303 if w0.AuxInt != j-16 { 9304 break 9305 } 9306 if w != w0.Args[0] { 9307 break 9308 } 9309 mem := x.Args[3] 9310 if !(x.Uses == 1 && clobber(x)) { 9311 break 9312 } 9313 v.reset(OpAMD64MOVLstoreidx1) 9314 v.AuxInt = i - 2 9315 v.Aux = s 9316 v.AddArg(p) 9317 v.AddArg(idx) 9318 v.AddArg(w0) 9319 v.AddArg(mem) 9320 return true 9321 } 9322 return false 9323 } 9324 func rewriteValueAMD64_OpAMD64MOVWstoreidx2(v *Value, config *Config) bool { 9325 b := v.Block 9326 _ = b 9327 // match: (MOVWstoreidx2 [c] {sym} (ADDQconst [d] ptr) idx val mem) 9328 // cond: 9329 // result: (MOVWstoreidx2 [c+d] {sym} ptr idx val mem) 9330 for { 9331 c := v.AuxInt 9332 sym := v.Aux 9333 v_0 := v.Args[0] 9334 if v_0.Op != OpAMD64ADDQconst { 9335 break 9336 } 9337 d := v_0.AuxInt 9338 ptr := v_0.Args[0] 9339 idx := v.Args[1] 9340 val := v.Args[2] 9341 mem := v.Args[3] 9342 v.reset(OpAMD64MOVWstoreidx2) 9343 v.AuxInt = c + d 9344 v.Aux = sym 9345 v.AddArg(ptr) 9346 v.AddArg(idx) 9347 v.AddArg(val) 9348 v.AddArg(mem) 9349 return true 9350 } 9351 // match: (MOVWstoreidx2 [c] {sym} ptr (ADDQconst [d] idx) val mem) 9352 // cond: 9353 // result: (MOVWstoreidx2 [c+2*d] {sym} ptr idx val mem) 9354 for { 9355 c := v.AuxInt 9356 sym := v.Aux 9357 ptr := v.Args[0] 9358 v_1 := v.Args[1] 9359 if v_1.Op != OpAMD64ADDQconst { 9360 break 9361 } 9362 d := v_1.AuxInt 9363 idx := v_1.Args[0] 9364 val := v.Args[2] 9365 mem := v.Args[3] 9366 v.reset(OpAMD64MOVWstoreidx2) 9367 v.AuxInt = c + 2*d 9368 v.Aux = sym 9369 v.AddArg(ptr) 9370 v.AddArg(idx) 9371 v.AddArg(val) 9372 v.AddArg(mem) 9373 return true 9374 } 9375 // match: (MOVWstoreidx2 [i] {s} p idx (SHRQconst [16] w) x:(MOVWstoreidx2 [i-2] {s} p idx w mem)) 9376 // cond: x.Uses == 1 && clobber(x) 9377 // result: (MOVLstoreidx1 [i-2] {s} p (SHLQconst <idx.Type> [1] idx) w mem) 9378 for { 9379 i := v.AuxInt 9380 s := v.Aux 9381 p := v.Args[0] 9382 idx := v.Args[1] 9383 v_2 := v.Args[2] 9384 if v_2.Op != OpAMD64SHRQconst { 9385 break 9386 } 9387 if v_2.AuxInt != 16 { 9388 break 9389 } 9390 w := v_2.Args[0] 9391 x := v.Args[3] 9392 if x.Op != OpAMD64MOVWstoreidx2 { 9393 break 9394 } 9395 if x.AuxInt != i-2 { 9396 break 9397 } 9398 if x.Aux != s { 9399 break 9400 } 9401 if p != x.Args[0] { 9402 break 9403 } 9404 if idx != x.Args[1] { 9405 break 9406 } 9407 if w != x.Args[2] { 9408 break 9409 } 9410 mem := x.Args[3] 9411 if !(x.Uses == 1 && clobber(x)) { 9412 break 9413 } 9414 v.reset(OpAMD64MOVLstoreidx1) 9415 v.AuxInt = i - 2 9416 v.Aux = s 9417 v.AddArg(p) 9418 v0 := b.NewValue0(v.Line, OpAMD64SHLQconst, idx.Type) 9419 v0.AuxInt = 1 9420 v0.AddArg(idx) 9421 v.AddArg(v0) 9422 v.AddArg(w) 9423 v.AddArg(mem) 9424 return true 9425 } 9426 // match: (MOVWstoreidx2 [i] {s} p idx (SHRQconst [j] w) x:(MOVWstoreidx2 [i-2] {s} p idx w0:(SHRQconst [j-16] w) mem)) 9427 // cond: x.Uses == 1 && clobber(x) 9428 // result: (MOVLstoreidx1 [i-2] {s} p (SHLQconst <idx.Type> [1] idx) w0 mem) 9429 for { 9430 i := v.AuxInt 9431 s := v.Aux 9432 p := v.Args[0] 9433 idx := v.Args[1] 9434 v_2 := v.Args[2] 9435 if v_2.Op != OpAMD64SHRQconst { 9436 break 9437 } 9438 j := v_2.AuxInt 9439 w := v_2.Args[0] 9440 x := v.Args[3] 9441 if x.Op != OpAMD64MOVWstoreidx2 { 9442 break 9443 } 9444 if x.AuxInt != i-2 { 9445 break 9446 } 9447 if x.Aux != s { 9448 break 9449 } 9450 if p != x.Args[0] { 9451 break 9452 } 9453 if idx != x.Args[1] { 9454 break 9455 } 9456 w0 := x.Args[2] 9457 if w0.Op != OpAMD64SHRQconst { 9458 break 9459 } 9460 if w0.AuxInt != j-16 { 9461 break 9462 } 9463 if w != w0.Args[0] { 9464 break 9465 } 9466 mem := x.Args[3] 9467 if !(x.Uses == 1 && clobber(x)) { 9468 break 9469 } 9470 v.reset(OpAMD64MOVLstoreidx1) 9471 v.AuxInt = i - 2 9472 v.Aux = s 9473 v.AddArg(p) 9474 v0 := b.NewValue0(v.Line, OpAMD64SHLQconst, idx.Type) 9475 v0.AuxInt = 1 9476 v0.AddArg(idx) 9477 v.AddArg(v0) 9478 v.AddArg(w0) 9479 v.AddArg(mem) 9480 return true 9481 } 9482 return false 9483 } 9484 func rewriteValueAMD64_OpAMD64MULL(v *Value, config *Config) bool { 9485 b := v.Block 9486 _ = b 9487 // match: (MULL x (MOVLconst [c])) 9488 // cond: 9489 // result: (MULLconst [c] x) 9490 for { 9491 x := v.Args[0] 9492 v_1 := v.Args[1] 9493 if v_1.Op != OpAMD64MOVLconst { 9494 break 9495 } 9496 c := v_1.AuxInt 9497 v.reset(OpAMD64MULLconst) 9498 v.AuxInt = c 9499 v.AddArg(x) 9500 return true 9501 } 9502 // match: (MULL (MOVLconst [c]) x) 9503 // cond: 9504 // result: (MULLconst [c] x) 9505 for { 9506 v_0 := v.Args[0] 9507 if v_0.Op != OpAMD64MOVLconst { 9508 break 9509 } 9510 c := v_0.AuxInt 9511 x := v.Args[1] 9512 v.reset(OpAMD64MULLconst) 9513 v.AuxInt = c 9514 v.AddArg(x) 9515 return true 9516 } 9517 return false 9518 } 9519 func rewriteValueAMD64_OpAMD64MULLconst(v *Value, config *Config) bool { 9520 b := v.Block 9521 _ = b 9522 // match: (MULLconst [c] (MULLconst [d] x)) 9523 // cond: 9524 // result: (MULLconst [int64(int32(c * d))] x) 9525 for { 9526 c := v.AuxInt 9527 v_0 := v.Args[0] 9528 if v_0.Op != OpAMD64MULLconst { 9529 break 9530 } 9531 d := v_0.AuxInt 9532 x := v_0.Args[0] 9533 v.reset(OpAMD64MULLconst) 9534 v.AuxInt = int64(int32(c * d)) 9535 v.AddArg(x) 9536 return true 9537 } 9538 // match: (MULLconst [c] (MOVLconst [d])) 9539 // cond: 9540 // result: (MOVLconst [int64(int32(c*d))]) 9541 for { 9542 c := v.AuxInt 9543 v_0 := v.Args[0] 9544 if v_0.Op != OpAMD64MOVLconst { 9545 break 9546 } 9547 d := v_0.AuxInt 9548 v.reset(OpAMD64MOVLconst) 9549 v.AuxInt = int64(int32(c * d)) 9550 return true 9551 } 9552 return false 9553 } 9554 func rewriteValueAMD64_OpAMD64MULQ(v *Value, config *Config) bool { 9555 b := v.Block 9556 _ = b 9557 // match: (MULQ x (MOVQconst [c])) 9558 // cond: is32Bit(c) 9559 // result: (MULQconst [c] x) 9560 for { 9561 x := v.Args[0] 9562 v_1 := v.Args[1] 9563 if v_1.Op != OpAMD64MOVQconst { 9564 break 9565 } 9566 c := v_1.AuxInt 9567 if !(is32Bit(c)) { 9568 break 9569 } 9570 v.reset(OpAMD64MULQconst) 9571 v.AuxInt = c 9572 v.AddArg(x) 9573 return true 9574 } 9575 // match: (MULQ (MOVQconst [c]) x) 9576 // cond: is32Bit(c) 9577 // result: (MULQconst [c] x) 9578 for { 9579 v_0 := v.Args[0] 9580 if v_0.Op != OpAMD64MOVQconst { 9581 break 9582 } 9583 c := v_0.AuxInt 9584 x := v.Args[1] 9585 if !(is32Bit(c)) { 9586 break 9587 } 9588 v.reset(OpAMD64MULQconst) 9589 v.AuxInt = c 9590 v.AddArg(x) 9591 return true 9592 } 9593 return false 9594 } 9595 func rewriteValueAMD64_OpAMD64MULQconst(v *Value, config *Config) bool { 9596 b := v.Block 9597 _ = b 9598 // match: (MULQconst [c] (MULQconst [d] x)) 9599 // cond: is32Bit(c*d) 9600 // result: (MULQconst [c * d] x) 9601 for { 9602 c := v.AuxInt 9603 v_0 := v.Args[0] 9604 if v_0.Op != OpAMD64MULQconst { 9605 break 9606 } 9607 d := v_0.AuxInt 9608 x := v_0.Args[0] 9609 if !(is32Bit(c * d)) { 9610 break 9611 } 9612 v.reset(OpAMD64MULQconst) 9613 v.AuxInt = c * d 9614 v.AddArg(x) 9615 return true 9616 } 9617 // match: (MULQconst [-1] x) 9618 // cond: 9619 // result: (NEGQ x) 9620 for { 9621 if v.AuxInt != -1 { 9622 break 9623 } 9624 x := v.Args[0] 9625 v.reset(OpAMD64NEGQ) 9626 v.AddArg(x) 9627 return true 9628 } 9629 // match: (MULQconst [0] _) 9630 // cond: 9631 // result: (MOVQconst [0]) 9632 for { 9633 if v.AuxInt != 0 { 9634 break 9635 } 9636 v.reset(OpAMD64MOVQconst) 9637 v.AuxInt = 0 9638 return true 9639 } 9640 // match: (MULQconst [1] x) 9641 // cond: 9642 // result: x 9643 for { 9644 if v.AuxInt != 1 { 9645 break 9646 } 9647 x := v.Args[0] 9648 v.reset(OpCopy) 9649 v.Type = x.Type 9650 v.AddArg(x) 9651 return true 9652 } 9653 // match: (MULQconst [3] x) 9654 // cond: 9655 // result: (LEAQ2 x x) 9656 for { 9657 if v.AuxInt != 3 { 9658 break 9659 } 9660 x := v.Args[0] 9661 v.reset(OpAMD64LEAQ2) 9662 v.AddArg(x) 9663 v.AddArg(x) 9664 return true 9665 } 9666 // match: (MULQconst [5] x) 9667 // cond: 9668 // result: (LEAQ4 x x) 9669 for { 9670 if v.AuxInt != 5 { 9671 break 9672 } 9673 x := v.Args[0] 9674 v.reset(OpAMD64LEAQ4) 9675 v.AddArg(x) 9676 v.AddArg(x) 9677 return true 9678 } 9679 // match: (MULQconst [7] x) 9680 // cond: 9681 // result: (LEAQ8 (NEGQ <v.Type> x) x) 9682 for { 9683 if v.AuxInt != 7 { 9684 break 9685 } 9686 x := v.Args[0] 9687 v.reset(OpAMD64LEAQ8) 9688 v0 := b.NewValue0(v.Line, OpAMD64NEGQ, v.Type) 9689 v0.AddArg(x) 9690 v.AddArg(v0) 9691 v.AddArg(x) 9692 return true 9693 } 9694 // match: (MULQconst [9] x) 9695 // cond: 9696 // result: (LEAQ8 x x) 9697 for { 9698 if v.AuxInt != 9 { 9699 break 9700 } 9701 x := v.Args[0] 9702 v.reset(OpAMD64LEAQ8) 9703 v.AddArg(x) 9704 v.AddArg(x) 9705 return true 9706 } 9707 // match: (MULQconst [11] x) 9708 // cond: 9709 // result: (LEAQ2 x (LEAQ4 <v.Type> x x)) 9710 for { 9711 if v.AuxInt != 11 { 9712 break 9713 } 9714 x := v.Args[0] 9715 v.reset(OpAMD64LEAQ2) 9716 v.AddArg(x) 9717 v0 := b.NewValue0(v.Line, OpAMD64LEAQ4, v.Type) 9718 v0.AddArg(x) 9719 v0.AddArg(x) 9720 v.AddArg(v0) 9721 return true 9722 } 9723 // match: (MULQconst [13] x) 9724 // cond: 9725 // result: (LEAQ4 x (LEAQ2 <v.Type> x x)) 9726 for { 9727 if v.AuxInt != 13 { 9728 break 9729 } 9730 x := v.Args[0] 9731 v.reset(OpAMD64LEAQ4) 9732 v.AddArg(x) 9733 v0 := b.NewValue0(v.Line, OpAMD64LEAQ2, v.Type) 9734 v0.AddArg(x) 9735 v0.AddArg(x) 9736 v.AddArg(v0) 9737 return true 9738 } 9739 // match: (MULQconst [21] x) 9740 // cond: 9741 // result: (LEAQ4 x (LEAQ4 <v.Type> x x)) 9742 for { 9743 if v.AuxInt != 21 { 9744 break 9745 } 9746 x := v.Args[0] 9747 v.reset(OpAMD64LEAQ4) 9748 v.AddArg(x) 9749 v0 := b.NewValue0(v.Line, OpAMD64LEAQ4, v.Type) 9750 v0.AddArg(x) 9751 v0.AddArg(x) 9752 v.AddArg(v0) 9753 return true 9754 } 9755 // match: (MULQconst [25] x) 9756 // cond: 9757 // result: (LEAQ8 x (LEAQ2 <v.Type> x x)) 9758 for { 9759 if v.AuxInt != 25 { 9760 break 9761 } 9762 x := v.Args[0] 9763 v.reset(OpAMD64LEAQ8) 9764 v.AddArg(x) 9765 v0 := b.NewValue0(v.Line, OpAMD64LEAQ2, v.Type) 9766 v0.AddArg(x) 9767 v0.AddArg(x) 9768 v.AddArg(v0) 9769 return true 9770 } 9771 // match: (MULQconst [37] x) 9772 // cond: 9773 // result: (LEAQ4 x (LEAQ8 <v.Type> x x)) 9774 for { 9775 if v.AuxInt != 37 { 9776 break 9777 } 9778 x := v.Args[0] 9779 v.reset(OpAMD64LEAQ4) 9780 v.AddArg(x) 9781 v0 := b.NewValue0(v.Line, OpAMD64LEAQ8, v.Type) 9782 v0.AddArg(x) 9783 v0.AddArg(x) 9784 v.AddArg(v0) 9785 return true 9786 } 9787 // match: (MULQconst [41] x) 9788 // cond: 9789 // result: (LEAQ8 x (LEAQ4 <v.Type> x x)) 9790 for { 9791 if v.AuxInt != 41 { 9792 break 9793 } 9794 x := v.Args[0] 9795 v.reset(OpAMD64LEAQ8) 9796 v.AddArg(x) 9797 v0 := b.NewValue0(v.Line, OpAMD64LEAQ4, v.Type) 9798 v0.AddArg(x) 9799 v0.AddArg(x) 9800 v.AddArg(v0) 9801 return true 9802 } 9803 // match: (MULQconst [73] x) 9804 // cond: 9805 // result: (LEAQ8 x (LEAQ8 <v.Type> x x)) 9806 for { 9807 if v.AuxInt != 73 { 9808 break 9809 } 9810 x := v.Args[0] 9811 v.reset(OpAMD64LEAQ8) 9812 v.AddArg(x) 9813 v0 := b.NewValue0(v.Line, OpAMD64LEAQ8, v.Type) 9814 v0.AddArg(x) 9815 v0.AddArg(x) 9816 v.AddArg(v0) 9817 return true 9818 } 9819 // match: (MULQconst [c] x) 9820 // cond: isPowerOfTwo(c) 9821 // result: (SHLQconst [log2(c)] x) 9822 for { 9823 c := v.AuxInt 9824 x := v.Args[0] 9825 if !(isPowerOfTwo(c)) { 9826 break 9827 } 9828 v.reset(OpAMD64SHLQconst) 9829 v.AuxInt = log2(c) 9830 v.AddArg(x) 9831 return true 9832 } 9833 // match: (MULQconst [c] x) 9834 // cond: isPowerOfTwo(c+1) && c >= 15 9835 // result: (SUBQ (SHLQconst <v.Type> [log2(c+1)] x) x) 9836 for { 9837 c := v.AuxInt 9838 x := v.Args[0] 9839 if !(isPowerOfTwo(c+1) && c >= 15) { 9840 break 9841 } 9842 v.reset(OpAMD64SUBQ) 9843 v0 := b.NewValue0(v.Line, OpAMD64SHLQconst, v.Type) 9844 v0.AuxInt = log2(c + 1) 9845 v0.AddArg(x) 9846 v.AddArg(v0) 9847 v.AddArg(x) 9848 return true 9849 } 9850 // match: (MULQconst [c] x) 9851 // cond: isPowerOfTwo(c-1) && c >= 17 9852 // result: (LEAQ1 (SHLQconst <v.Type> [log2(c-1)] x) x) 9853 for { 9854 c := v.AuxInt 9855 x := v.Args[0] 9856 if !(isPowerOfTwo(c-1) && c >= 17) { 9857 break 9858 } 9859 v.reset(OpAMD64LEAQ1) 9860 v0 := b.NewValue0(v.Line, OpAMD64SHLQconst, v.Type) 9861 v0.AuxInt = log2(c - 1) 9862 v0.AddArg(x) 9863 v.AddArg(v0) 9864 v.AddArg(x) 9865 return true 9866 } 9867 // match: (MULQconst [c] x) 9868 // cond: isPowerOfTwo(c-2) && c >= 34 9869 // result: (LEAQ2 (SHLQconst <v.Type> [log2(c-2)] x) x) 9870 for { 9871 c := v.AuxInt 9872 x := v.Args[0] 9873 if !(isPowerOfTwo(c-2) && c >= 34) { 9874 break 9875 } 9876 v.reset(OpAMD64LEAQ2) 9877 v0 := b.NewValue0(v.Line, OpAMD64SHLQconst, v.Type) 9878 v0.AuxInt = log2(c - 2) 9879 v0.AddArg(x) 9880 v.AddArg(v0) 9881 v.AddArg(x) 9882 return true 9883 } 9884 // match: (MULQconst [c] x) 9885 // cond: isPowerOfTwo(c-4) && c >= 68 9886 // result: (LEAQ4 (SHLQconst <v.Type> [log2(c-4)] x) x) 9887 for { 9888 c := v.AuxInt 9889 x := v.Args[0] 9890 if !(isPowerOfTwo(c-4) && c >= 68) { 9891 break 9892 } 9893 v.reset(OpAMD64LEAQ4) 9894 v0 := b.NewValue0(v.Line, OpAMD64SHLQconst, v.Type) 9895 v0.AuxInt = log2(c - 4) 9896 v0.AddArg(x) 9897 v.AddArg(v0) 9898 v.AddArg(x) 9899 return true 9900 } 9901 // match: (MULQconst [c] x) 9902 // cond: isPowerOfTwo(c-8) && c >= 136 9903 // result: (LEAQ8 (SHLQconst <v.Type> [log2(c-8)] x) x) 9904 for { 9905 c := v.AuxInt 9906 x := v.Args[0] 9907 if !(isPowerOfTwo(c-8) && c >= 136) { 9908 break 9909 } 9910 v.reset(OpAMD64LEAQ8) 9911 v0 := b.NewValue0(v.Line, OpAMD64SHLQconst, v.Type) 9912 v0.AuxInt = log2(c - 8) 9913 v0.AddArg(x) 9914 v.AddArg(v0) 9915 v.AddArg(x) 9916 return true 9917 } 9918 // match: (MULQconst [c] x) 9919 // cond: c%3 == 0 && isPowerOfTwo(c/3) 9920 // result: (SHLQconst [log2(c/3)] (LEAQ2 <v.Type> x x)) 9921 for { 9922 c := v.AuxInt 9923 x := v.Args[0] 9924 if !(c%3 == 0 && isPowerOfTwo(c/3)) { 9925 break 9926 } 9927 v.reset(OpAMD64SHLQconst) 9928 v.AuxInt = log2(c / 3) 9929 v0 := b.NewValue0(v.Line, OpAMD64LEAQ2, v.Type) 9930 v0.AddArg(x) 9931 v0.AddArg(x) 9932 v.AddArg(v0) 9933 return true 9934 } 9935 // match: (MULQconst [c] x) 9936 // cond: c%5 == 0 && isPowerOfTwo(c/5) 9937 // result: (SHLQconst [log2(c/5)] (LEAQ4 <v.Type> x x)) 9938 for { 9939 c := v.AuxInt 9940 x := v.Args[0] 9941 if !(c%5 == 0 && isPowerOfTwo(c/5)) { 9942 break 9943 } 9944 v.reset(OpAMD64SHLQconst) 9945 v.AuxInt = log2(c / 5) 9946 v0 := b.NewValue0(v.Line, OpAMD64LEAQ4, v.Type) 9947 v0.AddArg(x) 9948 v0.AddArg(x) 9949 v.AddArg(v0) 9950 return true 9951 } 9952 // match: (MULQconst [c] x) 9953 // cond: c%9 == 0 && isPowerOfTwo(c/9) 9954 // result: (SHLQconst [log2(c/9)] (LEAQ8 <v.Type> x x)) 9955 for { 9956 c := v.AuxInt 9957 x := v.Args[0] 9958 if !(c%9 == 0 && isPowerOfTwo(c/9)) { 9959 break 9960 } 9961 v.reset(OpAMD64SHLQconst) 9962 v.AuxInt = log2(c / 9) 9963 v0 := b.NewValue0(v.Line, OpAMD64LEAQ8, v.Type) 9964 v0.AddArg(x) 9965 v0.AddArg(x) 9966 v.AddArg(v0) 9967 return true 9968 } 9969 // match: (MULQconst [c] (MOVQconst [d])) 9970 // cond: 9971 // result: (MOVQconst [c*d]) 9972 for { 9973 c := v.AuxInt 9974 v_0 := v.Args[0] 9975 if v_0.Op != OpAMD64MOVQconst { 9976 break 9977 } 9978 d := v_0.AuxInt 9979 v.reset(OpAMD64MOVQconst) 9980 v.AuxInt = c * d 9981 return true 9982 } 9983 return false 9984 } 9985 func rewriteValueAMD64_OpAMD64NEGL(v *Value, config *Config) bool { 9986 b := v.Block 9987 _ = b 9988 // match: (NEGL (MOVLconst [c])) 9989 // cond: 9990 // result: (MOVLconst [int64(int32(-c))]) 9991 for { 9992 v_0 := v.Args[0] 9993 if v_0.Op != OpAMD64MOVLconst { 9994 break 9995 } 9996 c := v_0.AuxInt 9997 v.reset(OpAMD64MOVLconst) 9998 v.AuxInt = int64(int32(-c)) 9999 return true 10000 } 10001 return false 10002 } 10003 func rewriteValueAMD64_OpAMD64NEGQ(v *Value, config *Config) bool { 10004 b := v.Block 10005 _ = b 10006 // match: (NEGQ (MOVQconst [c])) 10007 // cond: 10008 // result: (MOVQconst [-c]) 10009 for { 10010 v_0 := v.Args[0] 10011 if v_0.Op != OpAMD64MOVQconst { 10012 break 10013 } 10014 c := v_0.AuxInt 10015 v.reset(OpAMD64MOVQconst) 10016 v.AuxInt = -c 10017 return true 10018 } 10019 return false 10020 } 10021 func rewriteValueAMD64_OpAMD64NOTL(v *Value, config *Config) bool { 10022 b := v.Block 10023 _ = b 10024 // match: (NOTL (MOVLconst [c])) 10025 // cond: 10026 // result: (MOVLconst [^c]) 10027 for { 10028 v_0 := v.Args[0] 10029 if v_0.Op != OpAMD64MOVLconst { 10030 break 10031 } 10032 c := v_0.AuxInt 10033 v.reset(OpAMD64MOVLconst) 10034 v.AuxInt = ^c 10035 return true 10036 } 10037 return false 10038 } 10039 func rewriteValueAMD64_OpAMD64NOTQ(v *Value, config *Config) bool { 10040 b := v.Block 10041 _ = b 10042 // match: (NOTQ (MOVQconst [c])) 10043 // cond: 10044 // result: (MOVQconst [^c]) 10045 for { 10046 v_0 := v.Args[0] 10047 if v_0.Op != OpAMD64MOVQconst { 10048 break 10049 } 10050 c := v_0.AuxInt 10051 v.reset(OpAMD64MOVQconst) 10052 v.AuxInt = ^c 10053 return true 10054 } 10055 return false 10056 } 10057 func rewriteValueAMD64_OpAMD64ORL(v *Value, config *Config) bool { 10058 b := v.Block 10059 _ = b 10060 // match: (ORL x (MOVLconst [c])) 10061 // cond: 10062 // result: (ORLconst [c] x) 10063 for { 10064 x := v.Args[0] 10065 v_1 := v.Args[1] 10066 if v_1.Op != OpAMD64MOVLconst { 10067 break 10068 } 10069 c := v_1.AuxInt 10070 v.reset(OpAMD64ORLconst) 10071 v.AuxInt = c 10072 v.AddArg(x) 10073 return true 10074 } 10075 // match: (ORL (MOVLconst [c]) x) 10076 // cond: 10077 // result: (ORLconst [c] x) 10078 for { 10079 v_0 := v.Args[0] 10080 if v_0.Op != OpAMD64MOVLconst { 10081 break 10082 } 10083 c := v_0.AuxInt 10084 x := v.Args[1] 10085 v.reset(OpAMD64ORLconst) 10086 v.AuxInt = c 10087 v.AddArg(x) 10088 return true 10089 } 10090 // match: (ORL x x) 10091 // cond: 10092 // result: x 10093 for { 10094 x := v.Args[0] 10095 if x != v.Args[1] { 10096 break 10097 } 10098 v.reset(OpCopy) 10099 v.Type = x.Type 10100 v.AddArg(x) 10101 return true 10102 } 10103 // match: (ORL x0:(MOVBload [i] {s} p mem) s0:(SHLLconst [8] x1:(MOVBload [i+1] {s} p mem))) 10104 // cond: x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) 10105 // result: @mergePoint(b,x0,x1) (MOVWload [i] {s} p mem) 10106 for { 10107 x0 := v.Args[0] 10108 if x0.Op != OpAMD64MOVBload { 10109 break 10110 } 10111 i := x0.AuxInt 10112 s := x0.Aux 10113 p := x0.Args[0] 10114 mem := x0.Args[1] 10115 s0 := v.Args[1] 10116 if s0.Op != OpAMD64SHLLconst { 10117 break 10118 } 10119 if s0.AuxInt != 8 { 10120 break 10121 } 10122 x1 := s0.Args[0] 10123 if x1.Op != OpAMD64MOVBload { 10124 break 10125 } 10126 if x1.AuxInt != i+1 { 10127 break 10128 } 10129 if x1.Aux != s { 10130 break 10131 } 10132 if p != x1.Args[0] { 10133 break 10134 } 10135 if mem != x1.Args[1] { 10136 break 10137 } 10138 if !(x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0)) { 10139 break 10140 } 10141 b = mergePoint(b, x0, x1) 10142 v0 := b.NewValue0(v.Line, OpAMD64MOVWload, config.fe.TypeUInt16()) 10143 v.reset(OpCopy) 10144 v.AddArg(v0) 10145 v0.AuxInt = i 10146 v0.Aux = s 10147 v0.AddArg(p) 10148 v0.AddArg(mem) 10149 return true 10150 } 10151 // match: (ORL o0:(ORL o1:(ORL x0:(MOVBload [i] {s} p mem) s0:(SHLLconst [8] x1:(MOVBload [i+1] {s} p mem))) s1:(SHLLconst [16] x2:(MOVBload [i+2] {s} p mem))) s2:(SHLLconst [24] x3:(MOVBload [i+3] {s} p mem))) 10152 // cond: x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && s2.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && mergePoint(b,x0,x1,x2,x3) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(s0) && clobber(s1) && clobber(s2) && clobber(o0) && clobber(o1) 10153 // result: @mergePoint(b,x0,x1,x2,x3) (MOVLload [i] {s} p mem) 10154 for { 10155 o0 := v.Args[0] 10156 if o0.Op != OpAMD64ORL { 10157 break 10158 } 10159 o1 := o0.Args[0] 10160 if o1.Op != OpAMD64ORL { 10161 break 10162 } 10163 x0 := o1.Args[0] 10164 if x0.Op != OpAMD64MOVBload { 10165 break 10166 } 10167 i := x0.AuxInt 10168 s := x0.Aux 10169 p := x0.Args[0] 10170 mem := x0.Args[1] 10171 s0 := o1.Args[1] 10172 if s0.Op != OpAMD64SHLLconst { 10173 break 10174 } 10175 if s0.AuxInt != 8 { 10176 break 10177 } 10178 x1 := s0.Args[0] 10179 if x1.Op != OpAMD64MOVBload { 10180 break 10181 } 10182 if x1.AuxInt != i+1 { 10183 break 10184 } 10185 if x1.Aux != s { 10186 break 10187 } 10188 if p != x1.Args[0] { 10189 break 10190 } 10191 if mem != x1.Args[1] { 10192 break 10193 } 10194 s1 := o0.Args[1] 10195 if s1.Op != OpAMD64SHLLconst { 10196 break 10197 } 10198 if s1.AuxInt != 16 { 10199 break 10200 } 10201 x2 := s1.Args[0] 10202 if x2.Op != OpAMD64MOVBload { 10203 break 10204 } 10205 if x2.AuxInt != i+2 { 10206 break 10207 } 10208 if x2.Aux != s { 10209 break 10210 } 10211 if p != x2.Args[0] { 10212 break 10213 } 10214 if mem != x2.Args[1] { 10215 break 10216 } 10217 s2 := v.Args[1] 10218 if s2.Op != OpAMD64SHLLconst { 10219 break 10220 } 10221 if s2.AuxInt != 24 { 10222 break 10223 } 10224 x3 := s2.Args[0] 10225 if x3.Op != OpAMD64MOVBload { 10226 break 10227 } 10228 if x3.AuxInt != i+3 { 10229 break 10230 } 10231 if x3.Aux != s { 10232 break 10233 } 10234 if p != x3.Args[0] { 10235 break 10236 } 10237 if mem != x3.Args[1] { 10238 break 10239 } 10240 if !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && s2.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && mergePoint(b, x0, x1, x2, x3) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(s0) && clobber(s1) && clobber(s2) && clobber(o0) && clobber(o1)) { 10241 break 10242 } 10243 b = mergePoint(b, x0, x1, x2, x3) 10244 v0 := b.NewValue0(v.Line, OpAMD64MOVLload, config.fe.TypeUInt32()) 10245 v.reset(OpCopy) 10246 v.AddArg(v0) 10247 v0.AuxInt = i 10248 v0.Aux = s 10249 v0.AddArg(p) 10250 v0.AddArg(mem) 10251 return true 10252 } 10253 // match: (ORL x0:(MOVBloadidx1 [i] {s} p idx mem) s0:(SHLLconst [8] x1:(MOVBloadidx1 [i+1] {s} p idx mem))) 10254 // cond: x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) 10255 // result: @mergePoint(b,x0,x1) (MOVWloadidx1 <v.Type> [i] {s} p idx mem) 10256 for { 10257 x0 := v.Args[0] 10258 if x0.Op != OpAMD64MOVBloadidx1 { 10259 break 10260 } 10261 i := x0.AuxInt 10262 s := x0.Aux 10263 p := x0.Args[0] 10264 idx := x0.Args[1] 10265 mem := x0.Args[2] 10266 s0 := v.Args[1] 10267 if s0.Op != OpAMD64SHLLconst { 10268 break 10269 } 10270 if s0.AuxInt != 8 { 10271 break 10272 } 10273 x1 := s0.Args[0] 10274 if x1.Op != OpAMD64MOVBloadidx1 { 10275 break 10276 } 10277 if x1.AuxInt != i+1 { 10278 break 10279 } 10280 if x1.Aux != s { 10281 break 10282 } 10283 if p != x1.Args[0] { 10284 break 10285 } 10286 if idx != x1.Args[1] { 10287 break 10288 } 10289 if mem != x1.Args[2] { 10290 break 10291 } 10292 if !(x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0)) { 10293 break 10294 } 10295 b = mergePoint(b, x0, x1) 10296 v0 := b.NewValue0(v.Line, OpAMD64MOVWloadidx1, v.Type) 10297 v.reset(OpCopy) 10298 v.AddArg(v0) 10299 v0.AuxInt = i 10300 v0.Aux = s 10301 v0.AddArg(p) 10302 v0.AddArg(idx) 10303 v0.AddArg(mem) 10304 return true 10305 } 10306 // match: (ORL o0:(ORL o1:(ORL x0:(MOVBloadidx1 [i] {s} p idx mem) s0:(SHLLconst [8] x1:(MOVBloadidx1 [i+1] {s} p idx mem))) s1:(SHLLconst [16] x2:(MOVBloadidx1 [i+2] {s} p idx mem))) s2:(SHLLconst [24] x3:(MOVBloadidx1 [i+3] {s} p idx mem))) 10307 // cond: x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && s2.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && mergePoint(b,x0,x1,x2,x3) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(s0) && clobber(s1) && clobber(s2) && clobber(o0) && clobber(o1) 10308 // result: @mergePoint(b,x0,x1,x2,x3) (MOVLloadidx1 <v.Type> [i] {s} p idx mem) 10309 for { 10310 o0 := v.Args[0] 10311 if o0.Op != OpAMD64ORL { 10312 break 10313 } 10314 o1 := o0.Args[0] 10315 if o1.Op != OpAMD64ORL { 10316 break 10317 } 10318 x0 := o1.Args[0] 10319 if x0.Op != OpAMD64MOVBloadidx1 { 10320 break 10321 } 10322 i := x0.AuxInt 10323 s := x0.Aux 10324 p := x0.Args[0] 10325 idx := x0.Args[1] 10326 mem := x0.Args[2] 10327 s0 := o1.Args[1] 10328 if s0.Op != OpAMD64SHLLconst { 10329 break 10330 } 10331 if s0.AuxInt != 8 { 10332 break 10333 } 10334 x1 := s0.Args[0] 10335 if x1.Op != OpAMD64MOVBloadidx1 { 10336 break 10337 } 10338 if x1.AuxInt != i+1 { 10339 break 10340 } 10341 if x1.Aux != s { 10342 break 10343 } 10344 if p != x1.Args[0] { 10345 break 10346 } 10347 if idx != x1.Args[1] { 10348 break 10349 } 10350 if mem != x1.Args[2] { 10351 break 10352 } 10353 s1 := o0.Args[1] 10354 if s1.Op != OpAMD64SHLLconst { 10355 break 10356 } 10357 if s1.AuxInt != 16 { 10358 break 10359 } 10360 x2 := s1.Args[0] 10361 if x2.Op != OpAMD64MOVBloadidx1 { 10362 break 10363 } 10364 if x2.AuxInt != i+2 { 10365 break 10366 } 10367 if x2.Aux != s { 10368 break 10369 } 10370 if p != x2.Args[0] { 10371 break 10372 } 10373 if idx != x2.Args[1] { 10374 break 10375 } 10376 if mem != x2.Args[2] { 10377 break 10378 } 10379 s2 := v.Args[1] 10380 if s2.Op != OpAMD64SHLLconst { 10381 break 10382 } 10383 if s2.AuxInt != 24 { 10384 break 10385 } 10386 x3 := s2.Args[0] 10387 if x3.Op != OpAMD64MOVBloadidx1 { 10388 break 10389 } 10390 if x3.AuxInt != i+3 { 10391 break 10392 } 10393 if x3.Aux != s { 10394 break 10395 } 10396 if p != x3.Args[0] { 10397 break 10398 } 10399 if idx != x3.Args[1] { 10400 break 10401 } 10402 if mem != x3.Args[2] { 10403 break 10404 } 10405 if !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && s2.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && mergePoint(b, x0, x1, x2, x3) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(s0) && clobber(s1) && clobber(s2) && clobber(o0) && clobber(o1)) { 10406 break 10407 } 10408 b = mergePoint(b, x0, x1, x2, x3) 10409 v0 := b.NewValue0(v.Line, OpAMD64MOVLloadidx1, v.Type) 10410 v.reset(OpCopy) 10411 v.AddArg(v0) 10412 v0.AuxInt = i 10413 v0.Aux = s 10414 v0.AddArg(p) 10415 v0.AddArg(idx) 10416 v0.AddArg(mem) 10417 return true 10418 } 10419 return false 10420 } 10421 func rewriteValueAMD64_OpAMD64ORLconst(v *Value, config *Config) bool { 10422 b := v.Block 10423 _ = b 10424 // match: (ORLconst [c] x) 10425 // cond: int32(c)==0 10426 // result: x 10427 for { 10428 c := v.AuxInt 10429 x := v.Args[0] 10430 if !(int32(c) == 0) { 10431 break 10432 } 10433 v.reset(OpCopy) 10434 v.Type = x.Type 10435 v.AddArg(x) 10436 return true 10437 } 10438 // match: (ORLconst [c] _) 10439 // cond: int32(c)==-1 10440 // result: (MOVLconst [-1]) 10441 for { 10442 c := v.AuxInt 10443 if !(int32(c) == -1) { 10444 break 10445 } 10446 v.reset(OpAMD64MOVLconst) 10447 v.AuxInt = -1 10448 return true 10449 } 10450 // match: (ORLconst [c] (MOVLconst [d])) 10451 // cond: 10452 // result: (MOVLconst [c|d]) 10453 for { 10454 c := v.AuxInt 10455 v_0 := v.Args[0] 10456 if v_0.Op != OpAMD64MOVLconst { 10457 break 10458 } 10459 d := v_0.AuxInt 10460 v.reset(OpAMD64MOVLconst) 10461 v.AuxInt = c | d 10462 return true 10463 } 10464 return false 10465 } 10466 func rewriteValueAMD64_OpAMD64ORQ(v *Value, config *Config) bool { 10467 b := v.Block 10468 _ = b 10469 // match: (ORQ x (MOVQconst [c])) 10470 // cond: is32Bit(c) 10471 // result: (ORQconst [c] x) 10472 for { 10473 x := v.Args[0] 10474 v_1 := v.Args[1] 10475 if v_1.Op != OpAMD64MOVQconst { 10476 break 10477 } 10478 c := v_1.AuxInt 10479 if !(is32Bit(c)) { 10480 break 10481 } 10482 v.reset(OpAMD64ORQconst) 10483 v.AuxInt = c 10484 v.AddArg(x) 10485 return true 10486 } 10487 // match: (ORQ (MOVQconst [c]) x) 10488 // cond: is32Bit(c) 10489 // result: (ORQconst [c] x) 10490 for { 10491 v_0 := v.Args[0] 10492 if v_0.Op != OpAMD64MOVQconst { 10493 break 10494 } 10495 c := v_0.AuxInt 10496 x := v.Args[1] 10497 if !(is32Bit(c)) { 10498 break 10499 } 10500 v.reset(OpAMD64ORQconst) 10501 v.AuxInt = c 10502 v.AddArg(x) 10503 return true 10504 } 10505 // match: (ORQ x x) 10506 // cond: 10507 // result: x 10508 for { 10509 x := v.Args[0] 10510 if x != v.Args[1] { 10511 break 10512 } 10513 v.reset(OpCopy) 10514 v.Type = x.Type 10515 v.AddArg(x) 10516 return true 10517 } 10518 // match: (ORQ o0:(ORQ o1:(ORQ o2:(ORQ o3:(ORQ o4:(ORQ o5:(ORQ x0:(MOVBload [i] {s} p mem) s0:(SHLQconst [8] x1:(MOVBload [i+1] {s} p mem))) s1:(SHLQconst [16] x2:(MOVBload [i+2] {s} p mem))) s2:(SHLQconst [24] x3:(MOVBload [i+3] {s} p mem))) s3:(SHLQconst [32] x4:(MOVBload [i+4] {s} p mem))) s4:(SHLQconst [40] x5:(MOVBload [i+5] {s} p mem))) s5:(SHLQconst [48] x6:(MOVBload [i+6] {s} p mem))) s6:(SHLQconst [56] x7:(MOVBload [i+7] {s} p mem))) 10519 // cond: x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && x7.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && s2.Uses == 1 && s3.Uses == 1 && s4.Uses == 1 && s5.Uses == 1 && s6.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1 && o3.Uses == 1 && o4.Uses == 1 && o5.Uses == 1 && mergePoint(b,x0,x1,x2,x3,x4,x5,x6,x7) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) && clobber(x7) && clobber(s0) && clobber(s1) && clobber(s2) && clobber(s3) && clobber(s4) && clobber(s5) && clobber(s6) && clobber(o0) && clobber(o1) && clobber(o2) && clobber(o3) && clobber(o4) && clobber(o5) 10520 // result: @mergePoint(b,x0,x1,x2,x3,x4,x5,x6,x7) (MOVQload [i] {s} p mem) 10521 for { 10522 o0 := v.Args[0] 10523 if o0.Op != OpAMD64ORQ { 10524 break 10525 } 10526 o1 := o0.Args[0] 10527 if o1.Op != OpAMD64ORQ { 10528 break 10529 } 10530 o2 := o1.Args[0] 10531 if o2.Op != OpAMD64ORQ { 10532 break 10533 } 10534 o3 := o2.Args[0] 10535 if o3.Op != OpAMD64ORQ { 10536 break 10537 } 10538 o4 := o3.Args[0] 10539 if o4.Op != OpAMD64ORQ { 10540 break 10541 } 10542 o5 := o4.Args[0] 10543 if o5.Op != OpAMD64ORQ { 10544 break 10545 } 10546 x0 := o5.Args[0] 10547 if x0.Op != OpAMD64MOVBload { 10548 break 10549 } 10550 i := x0.AuxInt 10551 s := x0.Aux 10552 p := x0.Args[0] 10553 mem := x0.Args[1] 10554 s0 := o5.Args[1] 10555 if s0.Op != OpAMD64SHLQconst { 10556 break 10557 } 10558 if s0.AuxInt != 8 { 10559 break 10560 } 10561 x1 := s0.Args[0] 10562 if x1.Op != OpAMD64MOVBload { 10563 break 10564 } 10565 if x1.AuxInt != i+1 { 10566 break 10567 } 10568 if x1.Aux != s { 10569 break 10570 } 10571 if p != x1.Args[0] { 10572 break 10573 } 10574 if mem != x1.Args[1] { 10575 break 10576 } 10577 s1 := o4.Args[1] 10578 if s1.Op != OpAMD64SHLQconst { 10579 break 10580 } 10581 if s1.AuxInt != 16 { 10582 break 10583 } 10584 x2 := s1.Args[0] 10585 if x2.Op != OpAMD64MOVBload { 10586 break 10587 } 10588 if x2.AuxInt != i+2 { 10589 break 10590 } 10591 if x2.Aux != s { 10592 break 10593 } 10594 if p != x2.Args[0] { 10595 break 10596 } 10597 if mem != x2.Args[1] { 10598 break 10599 } 10600 s2 := o3.Args[1] 10601 if s2.Op != OpAMD64SHLQconst { 10602 break 10603 } 10604 if s2.AuxInt != 24 { 10605 break 10606 } 10607 x3 := s2.Args[0] 10608 if x3.Op != OpAMD64MOVBload { 10609 break 10610 } 10611 if x3.AuxInt != i+3 { 10612 break 10613 } 10614 if x3.Aux != s { 10615 break 10616 } 10617 if p != x3.Args[0] { 10618 break 10619 } 10620 if mem != x3.Args[1] { 10621 break 10622 } 10623 s3 := o2.Args[1] 10624 if s3.Op != OpAMD64SHLQconst { 10625 break 10626 } 10627 if s3.AuxInt != 32 { 10628 break 10629 } 10630 x4 := s3.Args[0] 10631 if x4.Op != OpAMD64MOVBload { 10632 break 10633 } 10634 if x4.AuxInt != i+4 { 10635 break 10636 } 10637 if x4.Aux != s { 10638 break 10639 } 10640 if p != x4.Args[0] { 10641 break 10642 } 10643 if mem != x4.Args[1] { 10644 break 10645 } 10646 s4 := o1.Args[1] 10647 if s4.Op != OpAMD64SHLQconst { 10648 break 10649 } 10650 if s4.AuxInt != 40 { 10651 break 10652 } 10653 x5 := s4.Args[0] 10654 if x5.Op != OpAMD64MOVBload { 10655 break 10656 } 10657 if x5.AuxInt != i+5 { 10658 break 10659 } 10660 if x5.Aux != s { 10661 break 10662 } 10663 if p != x5.Args[0] { 10664 break 10665 } 10666 if mem != x5.Args[1] { 10667 break 10668 } 10669 s5 := o0.Args[1] 10670 if s5.Op != OpAMD64SHLQconst { 10671 break 10672 } 10673 if s5.AuxInt != 48 { 10674 break 10675 } 10676 x6 := s5.Args[0] 10677 if x6.Op != OpAMD64MOVBload { 10678 break 10679 } 10680 if x6.AuxInt != i+6 { 10681 break 10682 } 10683 if x6.Aux != s { 10684 break 10685 } 10686 if p != x6.Args[0] { 10687 break 10688 } 10689 if mem != x6.Args[1] { 10690 break 10691 } 10692 s6 := v.Args[1] 10693 if s6.Op != OpAMD64SHLQconst { 10694 break 10695 } 10696 if s6.AuxInt != 56 { 10697 break 10698 } 10699 x7 := s6.Args[0] 10700 if x7.Op != OpAMD64MOVBload { 10701 break 10702 } 10703 if x7.AuxInt != i+7 { 10704 break 10705 } 10706 if x7.Aux != s { 10707 break 10708 } 10709 if p != x7.Args[0] { 10710 break 10711 } 10712 if mem != x7.Args[1] { 10713 break 10714 } 10715 if !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && x7.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && s2.Uses == 1 && s3.Uses == 1 && s4.Uses == 1 && s5.Uses == 1 && s6.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1 && o3.Uses == 1 && o4.Uses == 1 && o5.Uses == 1 && mergePoint(b, x0, x1, x2, x3, x4, x5, x6, x7) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) && clobber(x7) && clobber(s0) && clobber(s1) && clobber(s2) && clobber(s3) && clobber(s4) && clobber(s5) && clobber(s6) && clobber(o0) && clobber(o1) && clobber(o2) && clobber(o3) && clobber(o4) && clobber(o5)) { 10716 break 10717 } 10718 b = mergePoint(b, x0, x1, x2, x3, x4, x5, x6, x7) 10719 v0 := b.NewValue0(v.Line, OpAMD64MOVQload, config.fe.TypeUInt64()) 10720 v.reset(OpCopy) 10721 v.AddArg(v0) 10722 v0.AuxInt = i 10723 v0.Aux = s 10724 v0.AddArg(p) 10725 v0.AddArg(mem) 10726 return true 10727 } 10728 // match: (ORQ o0:(ORQ o1:(ORQ o2:(ORQ o3:(ORQ o4:(ORQ o5:(ORQ x0:(MOVBloadidx1 [i] {s} p idx mem) s0:(SHLQconst [8] x1:(MOVBloadidx1 [i+1] {s} p idx mem))) s1:(SHLQconst [16] x2:(MOVBloadidx1 [i+2] {s} p idx mem))) s2:(SHLQconst [24] x3:(MOVBloadidx1 [i+3] {s} p idx mem))) s3:(SHLQconst [32] x4:(MOVBloadidx1 [i+4] {s} p idx mem))) s4:(SHLQconst [40] x5:(MOVBloadidx1 [i+5] {s} p idx mem))) s5:(SHLQconst [48] x6:(MOVBloadidx1 [i+6] {s} p idx mem))) s6:(SHLQconst [56] x7:(MOVBloadidx1 [i+7] {s} p idx mem))) 10729 // cond: x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && x7.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && s2.Uses == 1 && s3.Uses == 1 && s4.Uses == 1 && s5.Uses == 1 && s6.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1 && o3.Uses == 1 && o4.Uses == 1 && o5.Uses == 1 && mergePoint(b,x0,x1,x2,x3,x4,x5,x6,x7) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) && clobber(x7) && clobber(s0) && clobber(s1) && clobber(s2) && clobber(s3) && clobber(s4) && clobber(s5) && clobber(s6) && clobber(o0) && clobber(o1) && clobber(o2) && clobber(o3) && clobber(o4) && clobber(o5) 10730 // result: @mergePoint(b,x0,x1,x2,x3,x4,x5,x6,x7) (MOVQloadidx1 <v.Type> [i] {s} p idx mem) 10731 for { 10732 o0 := v.Args[0] 10733 if o0.Op != OpAMD64ORQ { 10734 break 10735 } 10736 o1 := o0.Args[0] 10737 if o1.Op != OpAMD64ORQ { 10738 break 10739 } 10740 o2 := o1.Args[0] 10741 if o2.Op != OpAMD64ORQ { 10742 break 10743 } 10744 o3 := o2.Args[0] 10745 if o3.Op != OpAMD64ORQ { 10746 break 10747 } 10748 o4 := o3.Args[0] 10749 if o4.Op != OpAMD64ORQ { 10750 break 10751 } 10752 o5 := o4.Args[0] 10753 if o5.Op != OpAMD64ORQ { 10754 break 10755 } 10756 x0 := o5.Args[0] 10757 if x0.Op != OpAMD64MOVBloadidx1 { 10758 break 10759 } 10760 i := x0.AuxInt 10761 s := x0.Aux 10762 p := x0.Args[0] 10763 idx := x0.Args[1] 10764 mem := x0.Args[2] 10765 s0 := o5.Args[1] 10766 if s0.Op != OpAMD64SHLQconst { 10767 break 10768 } 10769 if s0.AuxInt != 8 { 10770 break 10771 } 10772 x1 := s0.Args[0] 10773 if x1.Op != OpAMD64MOVBloadidx1 { 10774 break 10775 } 10776 if x1.AuxInt != i+1 { 10777 break 10778 } 10779 if x1.Aux != s { 10780 break 10781 } 10782 if p != x1.Args[0] { 10783 break 10784 } 10785 if idx != x1.Args[1] { 10786 break 10787 } 10788 if mem != x1.Args[2] { 10789 break 10790 } 10791 s1 := o4.Args[1] 10792 if s1.Op != OpAMD64SHLQconst { 10793 break 10794 } 10795 if s1.AuxInt != 16 { 10796 break 10797 } 10798 x2 := s1.Args[0] 10799 if x2.Op != OpAMD64MOVBloadidx1 { 10800 break 10801 } 10802 if x2.AuxInt != i+2 { 10803 break 10804 } 10805 if x2.Aux != s { 10806 break 10807 } 10808 if p != x2.Args[0] { 10809 break 10810 } 10811 if idx != x2.Args[1] { 10812 break 10813 } 10814 if mem != x2.Args[2] { 10815 break 10816 } 10817 s2 := o3.Args[1] 10818 if s2.Op != OpAMD64SHLQconst { 10819 break 10820 } 10821 if s2.AuxInt != 24 { 10822 break 10823 } 10824 x3 := s2.Args[0] 10825 if x3.Op != OpAMD64MOVBloadidx1 { 10826 break 10827 } 10828 if x3.AuxInt != i+3 { 10829 break 10830 } 10831 if x3.Aux != s { 10832 break 10833 } 10834 if p != x3.Args[0] { 10835 break 10836 } 10837 if idx != x3.Args[1] { 10838 break 10839 } 10840 if mem != x3.Args[2] { 10841 break 10842 } 10843 s3 := o2.Args[1] 10844 if s3.Op != OpAMD64SHLQconst { 10845 break 10846 } 10847 if s3.AuxInt != 32 { 10848 break 10849 } 10850 x4 := s3.Args[0] 10851 if x4.Op != OpAMD64MOVBloadidx1 { 10852 break 10853 } 10854 if x4.AuxInt != i+4 { 10855 break 10856 } 10857 if x4.Aux != s { 10858 break 10859 } 10860 if p != x4.Args[0] { 10861 break 10862 } 10863 if idx != x4.Args[1] { 10864 break 10865 } 10866 if mem != x4.Args[2] { 10867 break 10868 } 10869 s4 := o1.Args[1] 10870 if s4.Op != OpAMD64SHLQconst { 10871 break 10872 } 10873 if s4.AuxInt != 40 { 10874 break 10875 } 10876 x5 := s4.Args[0] 10877 if x5.Op != OpAMD64MOVBloadidx1 { 10878 break 10879 } 10880 if x5.AuxInt != i+5 { 10881 break 10882 } 10883 if x5.Aux != s { 10884 break 10885 } 10886 if p != x5.Args[0] { 10887 break 10888 } 10889 if idx != x5.Args[1] { 10890 break 10891 } 10892 if mem != x5.Args[2] { 10893 break 10894 } 10895 s5 := o0.Args[1] 10896 if s5.Op != OpAMD64SHLQconst { 10897 break 10898 } 10899 if s5.AuxInt != 48 { 10900 break 10901 } 10902 x6 := s5.Args[0] 10903 if x6.Op != OpAMD64MOVBloadidx1 { 10904 break 10905 } 10906 if x6.AuxInt != i+6 { 10907 break 10908 } 10909 if x6.Aux != s { 10910 break 10911 } 10912 if p != x6.Args[0] { 10913 break 10914 } 10915 if idx != x6.Args[1] { 10916 break 10917 } 10918 if mem != x6.Args[2] { 10919 break 10920 } 10921 s6 := v.Args[1] 10922 if s6.Op != OpAMD64SHLQconst { 10923 break 10924 } 10925 if s6.AuxInt != 56 { 10926 break 10927 } 10928 x7 := s6.Args[0] 10929 if x7.Op != OpAMD64MOVBloadidx1 { 10930 break 10931 } 10932 if x7.AuxInt != i+7 { 10933 break 10934 } 10935 if x7.Aux != s { 10936 break 10937 } 10938 if p != x7.Args[0] { 10939 break 10940 } 10941 if idx != x7.Args[1] { 10942 break 10943 } 10944 if mem != x7.Args[2] { 10945 break 10946 } 10947 if !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && x7.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && s2.Uses == 1 && s3.Uses == 1 && s4.Uses == 1 && s5.Uses == 1 && s6.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1 && o3.Uses == 1 && o4.Uses == 1 && o5.Uses == 1 && mergePoint(b, x0, x1, x2, x3, x4, x5, x6, x7) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) && clobber(x7) && clobber(s0) && clobber(s1) && clobber(s2) && clobber(s3) && clobber(s4) && clobber(s5) && clobber(s6) && clobber(o0) && clobber(o1) && clobber(o2) && clobber(o3) && clobber(o4) && clobber(o5)) { 10948 break 10949 } 10950 b = mergePoint(b, x0, x1, x2, x3, x4, x5, x6, x7) 10951 v0 := b.NewValue0(v.Line, OpAMD64MOVQloadidx1, v.Type) 10952 v.reset(OpCopy) 10953 v.AddArg(v0) 10954 v0.AuxInt = i 10955 v0.Aux = s 10956 v0.AddArg(p) 10957 v0.AddArg(idx) 10958 v0.AddArg(mem) 10959 return true 10960 } 10961 return false 10962 } 10963 func rewriteValueAMD64_OpAMD64ORQconst(v *Value, config *Config) bool { 10964 b := v.Block 10965 _ = b 10966 // match: (ORQconst [0] x) 10967 // cond: 10968 // result: x 10969 for { 10970 if v.AuxInt != 0 { 10971 break 10972 } 10973 x := v.Args[0] 10974 v.reset(OpCopy) 10975 v.Type = x.Type 10976 v.AddArg(x) 10977 return true 10978 } 10979 // match: (ORQconst [-1] _) 10980 // cond: 10981 // result: (MOVQconst [-1]) 10982 for { 10983 if v.AuxInt != -1 { 10984 break 10985 } 10986 v.reset(OpAMD64MOVQconst) 10987 v.AuxInt = -1 10988 return true 10989 } 10990 // match: (ORQconst [c] (MOVQconst [d])) 10991 // cond: 10992 // result: (MOVQconst [c|d]) 10993 for { 10994 c := v.AuxInt 10995 v_0 := v.Args[0] 10996 if v_0.Op != OpAMD64MOVQconst { 10997 break 10998 } 10999 d := v_0.AuxInt 11000 v.reset(OpAMD64MOVQconst) 11001 v.AuxInt = c | d 11002 return true 11003 } 11004 return false 11005 } 11006 func rewriteValueAMD64_OpAMD64ROLBconst(v *Value, config *Config) bool { 11007 b := v.Block 11008 _ = b 11009 // match: (ROLBconst [c] (ROLBconst [d] x)) 11010 // cond: 11011 // result: (ROLBconst [(c+d)& 7] x) 11012 for { 11013 c := v.AuxInt 11014 v_0 := v.Args[0] 11015 if v_0.Op != OpAMD64ROLBconst { 11016 break 11017 } 11018 d := v_0.AuxInt 11019 x := v_0.Args[0] 11020 v.reset(OpAMD64ROLBconst) 11021 v.AuxInt = (c + d) & 7 11022 v.AddArg(x) 11023 return true 11024 } 11025 // match: (ROLBconst [0] x) 11026 // cond: 11027 // result: x 11028 for { 11029 if v.AuxInt != 0 { 11030 break 11031 } 11032 x := v.Args[0] 11033 v.reset(OpCopy) 11034 v.Type = x.Type 11035 v.AddArg(x) 11036 return true 11037 } 11038 return false 11039 } 11040 func rewriteValueAMD64_OpAMD64ROLLconst(v *Value, config *Config) bool { 11041 b := v.Block 11042 _ = b 11043 // match: (ROLLconst [c] (ROLLconst [d] x)) 11044 // cond: 11045 // result: (ROLLconst [(c+d)&31] x) 11046 for { 11047 c := v.AuxInt 11048 v_0 := v.Args[0] 11049 if v_0.Op != OpAMD64ROLLconst { 11050 break 11051 } 11052 d := v_0.AuxInt 11053 x := v_0.Args[0] 11054 v.reset(OpAMD64ROLLconst) 11055 v.AuxInt = (c + d) & 31 11056 v.AddArg(x) 11057 return true 11058 } 11059 // match: (ROLLconst [0] x) 11060 // cond: 11061 // result: x 11062 for { 11063 if v.AuxInt != 0 { 11064 break 11065 } 11066 x := v.Args[0] 11067 v.reset(OpCopy) 11068 v.Type = x.Type 11069 v.AddArg(x) 11070 return true 11071 } 11072 return false 11073 } 11074 func rewriteValueAMD64_OpAMD64ROLQconst(v *Value, config *Config) bool { 11075 b := v.Block 11076 _ = b 11077 // match: (ROLQconst [c] (ROLQconst [d] x)) 11078 // cond: 11079 // result: (ROLQconst [(c+d)&63] x) 11080 for { 11081 c := v.AuxInt 11082 v_0 := v.Args[0] 11083 if v_0.Op != OpAMD64ROLQconst { 11084 break 11085 } 11086 d := v_0.AuxInt 11087 x := v_0.Args[0] 11088 v.reset(OpAMD64ROLQconst) 11089 v.AuxInt = (c + d) & 63 11090 v.AddArg(x) 11091 return true 11092 } 11093 // match: (ROLQconst [0] x) 11094 // cond: 11095 // result: x 11096 for { 11097 if v.AuxInt != 0 { 11098 break 11099 } 11100 x := v.Args[0] 11101 v.reset(OpCopy) 11102 v.Type = x.Type 11103 v.AddArg(x) 11104 return true 11105 } 11106 return false 11107 } 11108 func rewriteValueAMD64_OpAMD64ROLWconst(v *Value, config *Config) bool { 11109 b := v.Block 11110 _ = b 11111 // match: (ROLWconst [c] (ROLWconst [d] x)) 11112 // cond: 11113 // result: (ROLWconst [(c+d)&15] x) 11114 for { 11115 c := v.AuxInt 11116 v_0 := v.Args[0] 11117 if v_0.Op != OpAMD64ROLWconst { 11118 break 11119 } 11120 d := v_0.AuxInt 11121 x := v_0.Args[0] 11122 v.reset(OpAMD64ROLWconst) 11123 v.AuxInt = (c + d) & 15 11124 v.AddArg(x) 11125 return true 11126 } 11127 // match: (ROLWconst [0] x) 11128 // cond: 11129 // result: x 11130 for { 11131 if v.AuxInt != 0 { 11132 break 11133 } 11134 x := v.Args[0] 11135 v.reset(OpCopy) 11136 v.Type = x.Type 11137 v.AddArg(x) 11138 return true 11139 } 11140 return false 11141 } 11142 func rewriteValueAMD64_OpAMD64SARB(v *Value, config *Config) bool { 11143 b := v.Block 11144 _ = b 11145 // match: (SARB x (MOVQconst [c])) 11146 // cond: 11147 // result: (SARBconst [c&31] x) 11148 for { 11149 x := v.Args[0] 11150 v_1 := v.Args[1] 11151 if v_1.Op != OpAMD64MOVQconst { 11152 break 11153 } 11154 c := v_1.AuxInt 11155 v.reset(OpAMD64SARBconst) 11156 v.AuxInt = c & 31 11157 v.AddArg(x) 11158 return true 11159 } 11160 // match: (SARB x (MOVLconst [c])) 11161 // cond: 11162 // result: (SARBconst [c&31] x) 11163 for { 11164 x := v.Args[0] 11165 v_1 := v.Args[1] 11166 if v_1.Op != OpAMD64MOVLconst { 11167 break 11168 } 11169 c := v_1.AuxInt 11170 v.reset(OpAMD64SARBconst) 11171 v.AuxInt = c & 31 11172 v.AddArg(x) 11173 return true 11174 } 11175 return false 11176 } 11177 func rewriteValueAMD64_OpAMD64SARBconst(v *Value, config *Config) bool { 11178 b := v.Block 11179 _ = b 11180 // match: (SARBconst [c] (MOVQconst [d])) 11181 // cond: 11182 // result: (MOVQconst [d>>uint64(c)]) 11183 for { 11184 c := v.AuxInt 11185 v_0 := v.Args[0] 11186 if v_0.Op != OpAMD64MOVQconst { 11187 break 11188 } 11189 d := v_0.AuxInt 11190 v.reset(OpAMD64MOVQconst) 11191 v.AuxInt = d >> uint64(c) 11192 return true 11193 } 11194 return false 11195 } 11196 func rewriteValueAMD64_OpAMD64SARL(v *Value, config *Config) bool { 11197 b := v.Block 11198 _ = b 11199 // match: (SARL x (MOVQconst [c])) 11200 // cond: 11201 // result: (SARLconst [c&31] x) 11202 for { 11203 x := v.Args[0] 11204 v_1 := v.Args[1] 11205 if v_1.Op != OpAMD64MOVQconst { 11206 break 11207 } 11208 c := v_1.AuxInt 11209 v.reset(OpAMD64SARLconst) 11210 v.AuxInt = c & 31 11211 v.AddArg(x) 11212 return true 11213 } 11214 // match: (SARL x (MOVLconst [c])) 11215 // cond: 11216 // result: (SARLconst [c&31] x) 11217 for { 11218 x := v.Args[0] 11219 v_1 := v.Args[1] 11220 if v_1.Op != OpAMD64MOVLconst { 11221 break 11222 } 11223 c := v_1.AuxInt 11224 v.reset(OpAMD64SARLconst) 11225 v.AuxInt = c & 31 11226 v.AddArg(x) 11227 return true 11228 } 11229 // match: (SARL x (ANDLconst [31] y)) 11230 // cond: 11231 // result: (SARL x y) 11232 for { 11233 x := v.Args[0] 11234 v_1 := v.Args[1] 11235 if v_1.Op != OpAMD64ANDLconst { 11236 break 11237 } 11238 if v_1.AuxInt != 31 { 11239 break 11240 } 11241 y := v_1.Args[0] 11242 v.reset(OpAMD64SARL) 11243 v.AddArg(x) 11244 v.AddArg(y) 11245 return true 11246 } 11247 return false 11248 } 11249 func rewriteValueAMD64_OpAMD64SARLconst(v *Value, config *Config) bool { 11250 b := v.Block 11251 _ = b 11252 // match: (SARLconst [c] (MOVQconst [d])) 11253 // cond: 11254 // result: (MOVQconst [d>>uint64(c)]) 11255 for { 11256 c := v.AuxInt 11257 v_0 := v.Args[0] 11258 if v_0.Op != OpAMD64MOVQconst { 11259 break 11260 } 11261 d := v_0.AuxInt 11262 v.reset(OpAMD64MOVQconst) 11263 v.AuxInt = d >> uint64(c) 11264 return true 11265 } 11266 return false 11267 } 11268 func rewriteValueAMD64_OpAMD64SARQ(v *Value, config *Config) bool { 11269 b := v.Block 11270 _ = b 11271 // match: (SARQ x (MOVQconst [c])) 11272 // cond: 11273 // result: (SARQconst [c&63] x) 11274 for { 11275 x := v.Args[0] 11276 v_1 := v.Args[1] 11277 if v_1.Op != OpAMD64MOVQconst { 11278 break 11279 } 11280 c := v_1.AuxInt 11281 v.reset(OpAMD64SARQconst) 11282 v.AuxInt = c & 63 11283 v.AddArg(x) 11284 return true 11285 } 11286 // match: (SARQ x (MOVLconst [c])) 11287 // cond: 11288 // result: (SARQconst [c&63] x) 11289 for { 11290 x := v.Args[0] 11291 v_1 := v.Args[1] 11292 if v_1.Op != OpAMD64MOVLconst { 11293 break 11294 } 11295 c := v_1.AuxInt 11296 v.reset(OpAMD64SARQconst) 11297 v.AuxInt = c & 63 11298 v.AddArg(x) 11299 return true 11300 } 11301 // match: (SARQ x (ANDQconst [63] y)) 11302 // cond: 11303 // result: (SARQ x y) 11304 for { 11305 x := v.Args[0] 11306 v_1 := v.Args[1] 11307 if v_1.Op != OpAMD64ANDQconst { 11308 break 11309 } 11310 if v_1.AuxInt != 63 { 11311 break 11312 } 11313 y := v_1.Args[0] 11314 v.reset(OpAMD64SARQ) 11315 v.AddArg(x) 11316 v.AddArg(y) 11317 return true 11318 } 11319 return false 11320 } 11321 func rewriteValueAMD64_OpAMD64SARQconst(v *Value, config *Config) bool { 11322 b := v.Block 11323 _ = b 11324 // match: (SARQconst [c] (MOVQconst [d])) 11325 // cond: 11326 // result: (MOVQconst [d>>uint64(c)]) 11327 for { 11328 c := v.AuxInt 11329 v_0 := v.Args[0] 11330 if v_0.Op != OpAMD64MOVQconst { 11331 break 11332 } 11333 d := v_0.AuxInt 11334 v.reset(OpAMD64MOVQconst) 11335 v.AuxInt = d >> uint64(c) 11336 return true 11337 } 11338 return false 11339 } 11340 func rewriteValueAMD64_OpAMD64SARW(v *Value, config *Config) bool { 11341 b := v.Block 11342 _ = b 11343 // match: (SARW x (MOVQconst [c])) 11344 // cond: 11345 // result: (SARWconst [c&31] x) 11346 for { 11347 x := v.Args[0] 11348 v_1 := v.Args[1] 11349 if v_1.Op != OpAMD64MOVQconst { 11350 break 11351 } 11352 c := v_1.AuxInt 11353 v.reset(OpAMD64SARWconst) 11354 v.AuxInt = c & 31 11355 v.AddArg(x) 11356 return true 11357 } 11358 // match: (SARW x (MOVLconst [c])) 11359 // cond: 11360 // result: (SARWconst [c&31] x) 11361 for { 11362 x := v.Args[0] 11363 v_1 := v.Args[1] 11364 if v_1.Op != OpAMD64MOVLconst { 11365 break 11366 } 11367 c := v_1.AuxInt 11368 v.reset(OpAMD64SARWconst) 11369 v.AuxInt = c & 31 11370 v.AddArg(x) 11371 return true 11372 } 11373 return false 11374 } 11375 func rewriteValueAMD64_OpAMD64SARWconst(v *Value, config *Config) bool { 11376 b := v.Block 11377 _ = b 11378 // match: (SARWconst [c] (MOVQconst [d])) 11379 // cond: 11380 // result: (MOVQconst [d>>uint64(c)]) 11381 for { 11382 c := v.AuxInt 11383 v_0 := v.Args[0] 11384 if v_0.Op != OpAMD64MOVQconst { 11385 break 11386 } 11387 d := v_0.AuxInt 11388 v.reset(OpAMD64MOVQconst) 11389 v.AuxInt = d >> uint64(c) 11390 return true 11391 } 11392 return false 11393 } 11394 func rewriteValueAMD64_OpAMD64SBBLcarrymask(v *Value, config *Config) bool { 11395 b := v.Block 11396 _ = b 11397 // match: (SBBLcarrymask (FlagEQ)) 11398 // cond: 11399 // result: (MOVLconst [0]) 11400 for { 11401 v_0 := v.Args[0] 11402 if v_0.Op != OpAMD64FlagEQ { 11403 break 11404 } 11405 v.reset(OpAMD64MOVLconst) 11406 v.AuxInt = 0 11407 return true 11408 } 11409 // match: (SBBLcarrymask (FlagLT_ULT)) 11410 // cond: 11411 // result: (MOVLconst [-1]) 11412 for { 11413 v_0 := v.Args[0] 11414 if v_0.Op != OpAMD64FlagLT_ULT { 11415 break 11416 } 11417 v.reset(OpAMD64MOVLconst) 11418 v.AuxInt = -1 11419 return true 11420 } 11421 // match: (SBBLcarrymask (FlagLT_UGT)) 11422 // cond: 11423 // result: (MOVLconst [0]) 11424 for { 11425 v_0 := v.Args[0] 11426 if v_0.Op != OpAMD64FlagLT_UGT { 11427 break 11428 } 11429 v.reset(OpAMD64MOVLconst) 11430 v.AuxInt = 0 11431 return true 11432 } 11433 // match: (SBBLcarrymask (FlagGT_ULT)) 11434 // cond: 11435 // result: (MOVLconst [-1]) 11436 for { 11437 v_0 := v.Args[0] 11438 if v_0.Op != OpAMD64FlagGT_ULT { 11439 break 11440 } 11441 v.reset(OpAMD64MOVLconst) 11442 v.AuxInt = -1 11443 return true 11444 } 11445 // match: (SBBLcarrymask (FlagGT_UGT)) 11446 // cond: 11447 // result: (MOVLconst [0]) 11448 for { 11449 v_0 := v.Args[0] 11450 if v_0.Op != OpAMD64FlagGT_UGT { 11451 break 11452 } 11453 v.reset(OpAMD64MOVLconst) 11454 v.AuxInt = 0 11455 return true 11456 } 11457 return false 11458 } 11459 func rewriteValueAMD64_OpAMD64SBBQcarrymask(v *Value, config *Config) bool { 11460 b := v.Block 11461 _ = b 11462 // match: (SBBQcarrymask (FlagEQ)) 11463 // cond: 11464 // result: (MOVQconst [0]) 11465 for { 11466 v_0 := v.Args[0] 11467 if v_0.Op != OpAMD64FlagEQ { 11468 break 11469 } 11470 v.reset(OpAMD64MOVQconst) 11471 v.AuxInt = 0 11472 return true 11473 } 11474 // match: (SBBQcarrymask (FlagLT_ULT)) 11475 // cond: 11476 // result: (MOVQconst [-1]) 11477 for { 11478 v_0 := v.Args[0] 11479 if v_0.Op != OpAMD64FlagLT_ULT { 11480 break 11481 } 11482 v.reset(OpAMD64MOVQconst) 11483 v.AuxInt = -1 11484 return true 11485 } 11486 // match: (SBBQcarrymask (FlagLT_UGT)) 11487 // cond: 11488 // result: (MOVQconst [0]) 11489 for { 11490 v_0 := v.Args[0] 11491 if v_0.Op != OpAMD64FlagLT_UGT { 11492 break 11493 } 11494 v.reset(OpAMD64MOVQconst) 11495 v.AuxInt = 0 11496 return true 11497 } 11498 // match: (SBBQcarrymask (FlagGT_ULT)) 11499 // cond: 11500 // result: (MOVQconst [-1]) 11501 for { 11502 v_0 := v.Args[0] 11503 if v_0.Op != OpAMD64FlagGT_ULT { 11504 break 11505 } 11506 v.reset(OpAMD64MOVQconst) 11507 v.AuxInt = -1 11508 return true 11509 } 11510 // match: (SBBQcarrymask (FlagGT_UGT)) 11511 // cond: 11512 // result: (MOVQconst [0]) 11513 for { 11514 v_0 := v.Args[0] 11515 if v_0.Op != OpAMD64FlagGT_UGT { 11516 break 11517 } 11518 v.reset(OpAMD64MOVQconst) 11519 v.AuxInt = 0 11520 return true 11521 } 11522 return false 11523 } 11524 func rewriteValueAMD64_OpAMD64SETA(v *Value, config *Config) bool { 11525 b := v.Block 11526 _ = b 11527 // match: (SETA (InvertFlags x)) 11528 // cond: 11529 // result: (SETB x) 11530 for { 11531 v_0 := v.Args[0] 11532 if v_0.Op != OpAMD64InvertFlags { 11533 break 11534 } 11535 x := v_0.Args[0] 11536 v.reset(OpAMD64SETB) 11537 v.AddArg(x) 11538 return true 11539 } 11540 // match: (SETA (FlagEQ)) 11541 // cond: 11542 // result: (MOVLconst [0]) 11543 for { 11544 v_0 := v.Args[0] 11545 if v_0.Op != OpAMD64FlagEQ { 11546 break 11547 } 11548 v.reset(OpAMD64MOVLconst) 11549 v.AuxInt = 0 11550 return true 11551 } 11552 // match: (SETA (FlagLT_ULT)) 11553 // cond: 11554 // result: (MOVLconst [0]) 11555 for { 11556 v_0 := v.Args[0] 11557 if v_0.Op != OpAMD64FlagLT_ULT { 11558 break 11559 } 11560 v.reset(OpAMD64MOVLconst) 11561 v.AuxInt = 0 11562 return true 11563 } 11564 // match: (SETA (FlagLT_UGT)) 11565 // cond: 11566 // result: (MOVLconst [1]) 11567 for { 11568 v_0 := v.Args[0] 11569 if v_0.Op != OpAMD64FlagLT_UGT { 11570 break 11571 } 11572 v.reset(OpAMD64MOVLconst) 11573 v.AuxInt = 1 11574 return true 11575 } 11576 // match: (SETA (FlagGT_ULT)) 11577 // cond: 11578 // result: (MOVLconst [0]) 11579 for { 11580 v_0 := v.Args[0] 11581 if v_0.Op != OpAMD64FlagGT_ULT { 11582 break 11583 } 11584 v.reset(OpAMD64MOVLconst) 11585 v.AuxInt = 0 11586 return true 11587 } 11588 // match: (SETA (FlagGT_UGT)) 11589 // cond: 11590 // result: (MOVLconst [1]) 11591 for { 11592 v_0 := v.Args[0] 11593 if v_0.Op != OpAMD64FlagGT_UGT { 11594 break 11595 } 11596 v.reset(OpAMD64MOVLconst) 11597 v.AuxInt = 1 11598 return true 11599 } 11600 return false 11601 } 11602 func rewriteValueAMD64_OpAMD64SETAE(v *Value, config *Config) bool { 11603 b := v.Block 11604 _ = b 11605 // match: (SETAE (InvertFlags x)) 11606 // cond: 11607 // result: (SETBE x) 11608 for { 11609 v_0 := v.Args[0] 11610 if v_0.Op != OpAMD64InvertFlags { 11611 break 11612 } 11613 x := v_0.Args[0] 11614 v.reset(OpAMD64SETBE) 11615 v.AddArg(x) 11616 return true 11617 } 11618 // match: (SETAE (FlagEQ)) 11619 // cond: 11620 // result: (MOVLconst [1]) 11621 for { 11622 v_0 := v.Args[0] 11623 if v_0.Op != OpAMD64FlagEQ { 11624 break 11625 } 11626 v.reset(OpAMD64MOVLconst) 11627 v.AuxInt = 1 11628 return true 11629 } 11630 // match: (SETAE (FlagLT_ULT)) 11631 // cond: 11632 // result: (MOVLconst [0]) 11633 for { 11634 v_0 := v.Args[0] 11635 if v_0.Op != OpAMD64FlagLT_ULT { 11636 break 11637 } 11638 v.reset(OpAMD64MOVLconst) 11639 v.AuxInt = 0 11640 return true 11641 } 11642 // match: (SETAE (FlagLT_UGT)) 11643 // cond: 11644 // result: (MOVLconst [1]) 11645 for { 11646 v_0 := v.Args[0] 11647 if v_0.Op != OpAMD64FlagLT_UGT { 11648 break 11649 } 11650 v.reset(OpAMD64MOVLconst) 11651 v.AuxInt = 1 11652 return true 11653 } 11654 // match: (SETAE (FlagGT_ULT)) 11655 // cond: 11656 // result: (MOVLconst [0]) 11657 for { 11658 v_0 := v.Args[0] 11659 if v_0.Op != OpAMD64FlagGT_ULT { 11660 break 11661 } 11662 v.reset(OpAMD64MOVLconst) 11663 v.AuxInt = 0 11664 return true 11665 } 11666 // match: (SETAE (FlagGT_UGT)) 11667 // cond: 11668 // result: (MOVLconst [1]) 11669 for { 11670 v_0 := v.Args[0] 11671 if v_0.Op != OpAMD64FlagGT_UGT { 11672 break 11673 } 11674 v.reset(OpAMD64MOVLconst) 11675 v.AuxInt = 1 11676 return true 11677 } 11678 return false 11679 } 11680 func rewriteValueAMD64_OpAMD64SETB(v *Value, config *Config) bool { 11681 b := v.Block 11682 _ = b 11683 // match: (SETB (InvertFlags x)) 11684 // cond: 11685 // result: (SETA x) 11686 for { 11687 v_0 := v.Args[0] 11688 if v_0.Op != OpAMD64InvertFlags { 11689 break 11690 } 11691 x := v_0.Args[0] 11692 v.reset(OpAMD64SETA) 11693 v.AddArg(x) 11694 return true 11695 } 11696 // match: (SETB (FlagEQ)) 11697 // cond: 11698 // result: (MOVLconst [0]) 11699 for { 11700 v_0 := v.Args[0] 11701 if v_0.Op != OpAMD64FlagEQ { 11702 break 11703 } 11704 v.reset(OpAMD64MOVLconst) 11705 v.AuxInt = 0 11706 return true 11707 } 11708 // match: (SETB (FlagLT_ULT)) 11709 // cond: 11710 // result: (MOVLconst [1]) 11711 for { 11712 v_0 := v.Args[0] 11713 if v_0.Op != OpAMD64FlagLT_ULT { 11714 break 11715 } 11716 v.reset(OpAMD64MOVLconst) 11717 v.AuxInt = 1 11718 return true 11719 } 11720 // match: (SETB (FlagLT_UGT)) 11721 // cond: 11722 // result: (MOVLconst [0]) 11723 for { 11724 v_0 := v.Args[0] 11725 if v_0.Op != OpAMD64FlagLT_UGT { 11726 break 11727 } 11728 v.reset(OpAMD64MOVLconst) 11729 v.AuxInt = 0 11730 return true 11731 } 11732 // match: (SETB (FlagGT_ULT)) 11733 // cond: 11734 // result: (MOVLconst [1]) 11735 for { 11736 v_0 := v.Args[0] 11737 if v_0.Op != OpAMD64FlagGT_ULT { 11738 break 11739 } 11740 v.reset(OpAMD64MOVLconst) 11741 v.AuxInt = 1 11742 return true 11743 } 11744 // match: (SETB (FlagGT_UGT)) 11745 // cond: 11746 // result: (MOVLconst [0]) 11747 for { 11748 v_0 := v.Args[0] 11749 if v_0.Op != OpAMD64FlagGT_UGT { 11750 break 11751 } 11752 v.reset(OpAMD64MOVLconst) 11753 v.AuxInt = 0 11754 return true 11755 } 11756 return false 11757 } 11758 func rewriteValueAMD64_OpAMD64SETBE(v *Value, config *Config) bool { 11759 b := v.Block 11760 _ = b 11761 // match: (SETBE (InvertFlags x)) 11762 // cond: 11763 // result: (SETAE x) 11764 for { 11765 v_0 := v.Args[0] 11766 if v_0.Op != OpAMD64InvertFlags { 11767 break 11768 } 11769 x := v_0.Args[0] 11770 v.reset(OpAMD64SETAE) 11771 v.AddArg(x) 11772 return true 11773 } 11774 // match: (SETBE (FlagEQ)) 11775 // cond: 11776 // result: (MOVLconst [1]) 11777 for { 11778 v_0 := v.Args[0] 11779 if v_0.Op != OpAMD64FlagEQ { 11780 break 11781 } 11782 v.reset(OpAMD64MOVLconst) 11783 v.AuxInt = 1 11784 return true 11785 } 11786 // match: (SETBE (FlagLT_ULT)) 11787 // cond: 11788 // result: (MOVLconst [1]) 11789 for { 11790 v_0 := v.Args[0] 11791 if v_0.Op != OpAMD64FlagLT_ULT { 11792 break 11793 } 11794 v.reset(OpAMD64MOVLconst) 11795 v.AuxInt = 1 11796 return true 11797 } 11798 // match: (SETBE (FlagLT_UGT)) 11799 // cond: 11800 // result: (MOVLconst [0]) 11801 for { 11802 v_0 := v.Args[0] 11803 if v_0.Op != OpAMD64FlagLT_UGT { 11804 break 11805 } 11806 v.reset(OpAMD64MOVLconst) 11807 v.AuxInt = 0 11808 return true 11809 } 11810 // match: (SETBE (FlagGT_ULT)) 11811 // cond: 11812 // result: (MOVLconst [1]) 11813 for { 11814 v_0 := v.Args[0] 11815 if v_0.Op != OpAMD64FlagGT_ULT { 11816 break 11817 } 11818 v.reset(OpAMD64MOVLconst) 11819 v.AuxInt = 1 11820 return true 11821 } 11822 // match: (SETBE (FlagGT_UGT)) 11823 // cond: 11824 // result: (MOVLconst [0]) 11825 for { 11826 v_0 := v.Args[0] 11827 if v_0.Op != OpAMD64FlagGT_UGT { 11828 break 11829 } 11830 v.reset(OpAMD64MOVLconst) 11831 v.AuxInt = 0 11832 return true 11833 } 11834 return false 11835 } 11836 func rewriteValueAMD64_OpAMD64SETEQ(v *Value, config *Config) bool { 11837 b := v.Block 11838 _ = b 11839 // match: (SETEQ (InvertFlags x)) 11840 // cond: 11841 // result: (SETEQ x) 11842 for { 11843 v_0 := v.Args[0] 11844 if v_0.Op != OpAMD64InvertFlags { 11845 break 11846 } 11847 x := v_0.Args[0] 11848 v.reset(OpAMD64SETEQ) 11849 v.AddArg(x) 11850 return true 11851 } 11852 // match: (SETEQ (FlagEQ)) 11853 // cond: 11854 // result: (MOVLconst [1]) 11855 for { 11856 v_0 := v.Args[0] 11857 if v_0.Op != OpAMD64FlagEQ { 11858 break 11859 } 11860 v.reset(OpAMD64MOVLconst) 11861 v.AuxInt = 1 11862 return true 11863 } 11864 // match: (SETEQ (FlagLT_ULT)) 11865 // cond: 11866 // result: (MOVLconst [0]) 11867 for { 11868 v_0 := v.Args[0] 11869 if v_0.Op != OpAMD64FlagLT_ULT { 11870 break 11871 } 11872 v.reset(OpAMD64MOVLconst) 11873 v.AuxInt = 0 11874 return true 11875 } 11876 // match: (SETEQ (FlagLT_UGT)) 11877 // cond: 11878 // result: (MOVLconst [0]) 11879 for { 11880 v_0 := v.Args[0] 11881 if v_0.Op != OpAMD64FlagLT_UGT { 11882 break 11883 } 11884 v.reset(OpAMD64MOVLconst) 11885 v.AuxInt = 0 11886 return true 11887 } 11888 // match: (SETEQ (FlagGT_ULT)) 11889 // cond: 11890 // result: (MOVLconst [0]) 11891 for { 11892 v_0 := v.Args[0] 11893 if v_0.Op != OpAMD64FlagGT_ULT { 11894 break 11895 } 11896 v.reset(OpAMD64MOVLconst) 11897 v.AuxInt = 0 11898 return true 11899 } 11900 // match: (SETEQ (FlagGT_UGT)) 11901 // cond: 11902 // result: (MOVLconst [0]) 11903 for { 11904 v_0 := v.Args[0] 11905 if v_0.Op != OpAMD64FlagGT_UGT { 11906 break 11907 } 11908 v.reset(OpAMD64MOVLconst) 11909 v.AuxInt = 0 11910 return true 11911 } 11912 return false 11913 } 11914 func rewriteValueAMD64_OpAMD64SETG(v *Value, config *Config) bool { 11915 b := v.Block 11916 _ = b 11917 // match: (SETG (InvertFlags x)) 11918 // cond: 11919 // result: (SETL x) 11920 for { 11921 v_0 := v.Args[0] 11922 if v_0.Op != OpAMD64InvertFlags { 11923 break 11924 } 11925 x := v_0.Args[0] 11926 v.reset(OpAMD64SETL) 11927 v.AddArg(x) 11928 return true 11929 } 11930 // match: (SETG (FlagEQ)) 11931 // cond: 11932 // result: (MOVLconst [0]) 11933 for { 11934 v_0 := v.Args[0] 11935 if v_0.Op != OpAMD64FlagEQ { 11936 break 11937 } 11938 v.reset(OpAMD64MOVLconst) 11939 v.AuxInt = 0 11940 return true 11941 } 11942 // match: (SETG (FlagLT_ULT)) 11943 // cond: 11944 // result: (MOVLconst [0]) 11945 for { 11946 v_0 := v.Args[0] 11947 if v_0.Op != OpAMD64FlagLT_ULT { 11948 break 11949 } 11950 v.reset(OpAMD64MOVLconst) 11951 v.AuxInt = 0 11952 return true 11953 } 11954 // match: (SETG (FlagLT_UGT)) 11955 // cond: 11956 // result: (MOVLconst [0]) 11957 for { 11958 v_0 := v.Args[0] 11959 if v_0.Op != OpAMD64FlagLT_UGT { 11960 break 11961 } 11962 v.reset(OpAMD64MOVLconst) 11963 v.AuxInt = 0 11964 return true 11965 } 11966 // match: (SETG (FlagGT_ULT)) 11967 // cond: 11968 // result: (MOVLconst [1]) 11969 for { 11970 v_0 := v.Args[0] 11971 if v_0.Op != OpAMD64FlagGT_ULT { 11972 break 11973 } 11974 v.reset(OpAMD64MOVLconst) 11975 v.AuxInt = 1 11976 return true 11977 } 11978 // match: (SETG (FlagGT_UGT)) 11979 // cond: 11980 // result: (MOVLconst [1]) 11981 for { 11982 v_0 := v.Args[0] 11983 if v_0.Op != OpAMD64FlagGT_UGT { 11984 break 11985 } 11986 v.reset(OpAMD64MOVLconst) 11987 v.AuxInt = 1 11988 return true 11989 } 11990 return false 11991 } 11992 func rewriteValueAMD64_OpAMD64SETGE(v *Value, config *Config) bool { 11993 b := v.Block 11994 _ = b 11995 // match: (SETGE (InvertFlags x)) 11996 // cond: 11997 // result: (SETLE x) 11998 for { 11999 v_0 := v.Args[0] 12000 if v_0.Op != OpAMD64InvertFlags { 12001 break 12002 } 12003 x := v_0.Args[0] 12004 v.reset(OpAMD64SETLE) 12005 v.AddArg(x) 12006 return true 12007 } 12008 // match: (SETGE (FlagEQ)) 12009 // cond: 12010 // result: (MOVLconst [1]) 12011 for { 12012 v_0 := v.Args[0] 12013 if v_0.Op != OpAMD64FlagEQ { 12014 break 12015 } 12016 v.reset(OpAMD64MOVLconst) 12017 v.AuxInt = 1 12018 return true 12019 } 12020 // match: (SETGE (FlagLT_ULT)) 12021 // cond: 12022 // result: (MOVLconst [0]) 12023 for { 12024 v_0 := v.Args[0] 12025 if v_0.Op != OpAMD64FlagLT_ULT { 12026 break 12027 } 12028 v.reset(OpAMD64MOVLconst) 12029 v.AuxInt = 0 12030 return true 12031 } 12032 // match: (SETGE (FlagLT_UGT)) 12033 // cond: 12034 // result: (MOVLconst [0]) 12035 for { 12036 v_0 := v.Args[0] 12037 if v_0.Op != OpAMD64FlagLT_UGT { 12038 break 12039 } 12040 v.reset(OpAMD64MOVLconst) 12041 v.AuxInt = 0 12042 return true 12043 } 12044 // match: (SETGE (FlagGT_ULT)) 12045 // cond: 12046 // result: (MOVLconst [1]) 12047 for { 12048 v_0 := v.Args[0] 12049 if v_0.Op != OpAMD64FlagGT_ULT { 12050 break 12051 } 12052 v.reset(OpAMD64MOVLconst) 12053 v.AuxInt = 1 12054 return true 12055 } 12056 // match: (SETGE (FlagGT_UGT)) 12057 // cond: 12058 // result: (MOVLconst [1]) 12059 for { 12060 v_0 := v.Args[0] 12061 if v_0.Op != OpAMD64FlagGT_UGT { 12062 break 12063 } 12064 v.reset(OpAMD64MOVLconst) 12065 v.AuxInt = 1 12066 return true 12067 } 12068 return false 12069 } 12070 func rewriteValueAMD64_OpAMD64SETL(v *Value, config *Config) bool { 12071 b := v.Block 12072 _ = b 12073 // match: (SETL (InvertFlags x)) 12074 // cond: 12075 // result: (SETG x) 12076 for { 12077 v_0 := v.Args[0] 12078 if v_0.Op != OpAMD64InvertFlags { 12079 break 12080 } 12081 x := v_0.Args[0] 12082 v.reset(OpAMD64SETG) 12083 v.AddArg(x) 12084 return true 12085 } 12086 // match: (SETL (FlagEQ)) 12087 // cond: 12088 // result: (MOVLconst [0]) 12089 for { 12090 v_0 := v.Args[0] 12091 if v_0.Op != OpAMD64FlagEQ { 12092 break 12093 } 12094 v.reset(OpAMD64MOVLconst) 12095 v.AuxInt = 0 12096 return true 12097 } 12098 // match: (SETL (FlagLT_ULT)) 12099 // cond: 12100 // result: (MOVLconst [1]) 12101 for { 12102 v_0 := v.Args[0] 12103 if v_0.Op != OpAMD64FlagLT_ULT { 12104 break 12105 } 12106 v.reset(OpAMD64MOVLconst) 12107 v.AuxInt = 1 12108 return true 12109 } 12110 // match: (SETL (FlagLT_UGT)) 12111 // cond: 12112 // result: (MOVLconst [1]) 12113 for { 12114 v_0 := v.Args[0] 12115 if v_0.Op != OpAMD64FlagLT_UGT { 12116 break 12117 } 12118 v.reset(OpAMD64MOVLconst) 12119 v.AuxInt = 1 12120 return true 12121 } 12122 // match: (SETL (FlagGT_ULT)) 12123 // cond: 12124 // result: (MOVLconst [0]) 12125 for { 12126 v_0 := v.Args[0] 12127 if v_0.Op != OpAMD64FlagGT_ULT { 12128 break 12129 } 12130 v.reset(OpAMD64MOVLconst) 12131 v.AuxInt = 0 12132 return true 12133 } 12134 // match: (SETL (FlagGT_UGT)) 12135 // cond: 12136 // result: (MOVLconst [0]) 12137 for { 12138 v_0 := v.Args[0] 12139 if v_0.Op != OpAMD64FlagGT_UGT { 12140 break 12141 } 12142 v.reset(OpAMD64MOVLconst) 12143 v.AuxInt = 0 12144 return true 12145 } 12146 return false 12147 } 12148 func rewriteValueAMD64_OpAMD64SETLE(v *Value, config *Config) bool { 12149 b := v.Block 12150 _ = b 12151 // match: (SETLE (InvertFlags x)) 12152 // cond: 12153 // result: (SETGE x) 12154 for { 12155 v_0 := v.Args[0] 12156 if v_0.Op != OpAMD64InvertFlags { 12157 break 12158 } 12159 x := v_0.Args[0] 12160 v.reset(OpAMD64SETGE) 12161 v.AddArg(x) 12162 return true 12163 } 12164 // match: (SETLE (FlagEQ)) 12165 // cond: 12166 // result: (MOVLconst [1]) 12167 for { 12168 v_0 := v.Args[0] 12169 if v_0.Op != OpAMD64FlagEQ { 12170 break 12171 } 12172 v.reset(OpAMD64MOVLconst) 12173 v.AuxInt = 1 12174 return true 12175 } 12176 // match: (SETLE (FlagLT_ULT)) 12177 // cond: 12178 // result: (MOVLconst [1]) 12179 for { 12180 v_0 := v.Args[0] 12181 if v_0.Op != OpAMD64FlagLT_ULT { 12182 break 12183 } 12184 v.reset(OpAMD64MOVLconst) 12185 v.AuxInt = 1 12186 return true 12187 } 12188 // match: (SETLE (FlagLT_UGT)) 12189 // cond: 12190 // result: (MOVLconst [1]) 12191 for { 12192 v_0 := v.Args[0] 12193 if v_0.Op != OpAMD64FlagLT_UGT { 12194 break 12195 } 12196 v.reset(OpAMD64MOVLconst) 12197 v.AuxInt = 1 12198 return true 12199 } 12200 // match: (SETLE (FlagGT_ULT)) 12201 // cond: 12202 // result: (MOVLconst [0]) 12203 for { 12204 v_0 := v.Args[0] 12205 if v_0.Op != OpAMD64FlagGT_ULT { 12206 break 12207 } 12208 v.reset(OpAMD64MOVLconst) 12209 v.AuxInt = 0 12210 return true 12211 } 12212 // match: (SETLE (FlagGT_UGT)) 12213 // cond: 12214 // result: (MOVLconst [0]) 12215 for { 12216 v_0 := v.Args[0] 12217 if v_0.Op != OpAMD64FlagGT_UGT { 12218 break 12219 } 12220 v.reset(OpAMD64MOVLconst) 12221 v.AuxInt = 0 12222 return true 12223 } 12224 return false 12225 } 12226 func rewriteValueAMD64_OpAMD64SETNE(v *Value, config *Config) bool { 12227 b := v.Block 12228 _ = b 12229 // match: (SETNE (InvertFlags x)) 12230 // cond: 12231 // result: (SETNE x) 12232 for { 12233 v_0 := v.Args[0] 12234 if v_0.Op != OpAMD64InvertFlags { 12235 break 12236 } 12237 x := v_0.Args[0] 12238 v.reset(OpAMD64SETNE) 12239 v.AddArg(x) 12240 return true 12241 } 12242 // match: (SETNE (FlagEQ)) 12243 // cond: 12244 // result: (MOVLconst [0]) 12245 for { 12246 v_0 := v.Args[0] 12247 if v_0.Op != OpAMD64FlagEQ { 12248 break 12249 } 12250 v.reset(OpAMD64MOVLconst) 12251 v.AuxInt = 0 12252 return true 12253 } 12254 // match: (SETNE (FlagLT_ULT)) 12255 // cond: 12256 // result: (MOVLconst [1]) 12257 for { 12258 v_0 := v.Args[0] 12259 if v_0.Op != OpAMD64FlagLT_ULT { 12260 break 12261 } 12262 v.reset(OpAMD64MOVLconst) 12263 v.AuxInt = 1 12264 return true 12265 } 12266 // match: (SETNE (FlagLT_UGT)) 12267 // cond: 12268 // result: (MOVLconst [1]) 12269 for { 12270 v_0 := v.Args[0] 12271 if v_0.Op != OpAMD64FlagLT_UGT { 12272 break 12273 } 12274 v.reset(OpAMD64MOVLconst) 12275 v.AuxInt = 1 12276 return true 12277 } 12278 // match: (SETNE (FlagGT_ULT)) 12279 // cond: 12280 // result: (MOVLconst [1]) 12281 for { 12282 v_0 := v.Args[0] 12283 if v_0.Op != OpAMD64FlagGT_ULT { 12284 break 12285 } 12286 v.reset(OpAMD64MOVLconst) 12287 v.AuxInt = 1 12288 return true 12289 } 12290 // match: (SETNE (FlagGT_UGT)) 12291 // cond: 12292 // result: (MOVLconst [1]) 12293 for { 12294 v_0 := v.Args[0] 12295 if v_0.Op != OpAMD64FlagGT_UGT { 12296 break 12297 } 12298 v.reset(OpAMD64MOVLconst) 12299 v.AuxInt = 1 12300 return true 12301 } 12302 return false 12303 } 12304 func rewriteValueAMD64_OpAMD64SHLL(v *Value, config *Config) bool { 12305 b := v.Block 12306 _ = b 12307 // match: (SHLL x (MOVQconst [c])) 12308 // cond: 12309 // result: (SHLLconst [c&31] x) 12310 for { 12311 x := v.Args[0] 12312 v_1 := v.Args[1] 12313 if v_1.Op != OpAMD64MOVQconst { 12314 break 12315 } 12316 c := v_1.AuxInt 12317 v.reset(OpAMD64SHLLconst) 12318 v.AuxInt = c & 31 12319 v.AddArg(x) 12320 return true 12321 } 12322 // match: (SHLL x (MOVLconst [c])) 12323 // cond: 12324 // result: (SHLLconst [c&31] x) 12325 for { 12326 x := v.Args[0] 12327 v_1 := v.Args[1] 12328 if v_1.Op != OpAMD64MOVLconst { 12329 break 12330 } 12331 c := v_1.AuxInt 12332 v.reset(OpAMD64SHLLconst) 12333 v.AuxInt = c & 31 12334 v.AddArg(x) 12335 return true 12336 } 12337 // match: (SHLL x (ANDLconst [31] y)) 12338 // cond: 12339 // result: (SHLL x y) 12340 for { 12341 x := v.Args[0] 12342 v_1 := v.Args[1] 12343 if v_1.Op != OpAMD64ANDLconst { 12344 break 12345 } 12346 if v_1.AuxInt != 31 { 12347 break 12348 } 12349 y := v_1.Args[0] 12350 v.reset(OpAMD64SHLL) 12351 v.AddArg(x) 12352 v.AddArg(y) 12353 return true 12354 } 12355 return false 12356 } 12357 func rewriteValueAMD64_OpAMD64SHLQ(v *Value, config *Config) bool { 12358 b := v.Block 12359 _ = b 12360 // match: (SHLQ x (MOVQconst [c])) 12361 // cond: 12362 // result: (SHLQconst [c&63] x) 12363 for { 12364 x := v.Args[0] 12365 v_1 := v.Args[1] 12366 if v_1.Op != OpAMD64MOVQconst { 12367 break 12368 } 12369 c := v_1.AuxInt 12370 v.reset(OpAMD64SHLQconst) 12371 v.AuxInt = c & 63 12372 v.AddArg(x) 12373 return true 12374 } 12375 // match: (SHLQ x (MOVLconst [c])) 12376 // cond: 12377 // result: (SHLQconst [c&63] x) 12378 for { 12379 x := v.Args[0] 12380 v_1 := v.Args[1] 12381 if v_1.Op != OpAMD64MOVLconst { 12382 break 12383 } 12384 c := v_1.AuxInt 12385 v.reset(OpAMD64SHLQconst) 12386 v.AuxInt = c & 63 12387 v.AddArg(x) 12388 return true 12389 } 12390 // match: (SHLQ x (ANDQconst [63] y)) 12391 // cond: 12392 // result: (SHLQ x y) 12393 for { 12394 x := v.Args[0] 12395 v_1 := v.Args[1] 12396 if v_1.Op != OpAMD64ANDQconst { 12397 break 12398 } 12399 if v_1.AuxInt != 63 { 12400 break 12401 } 12402 y := v_1.Args[0] 12403 v.reset(OpAMD64SHLQ) 12404 v.AddArg(x) 12405 v.AddArg(y) 12406 return true 12407 } 12408 return false 12409 } 12410 func rewriteValueAMD64_OpAMD64SHRB(v *Value, config *Config) bool { 12411 b := v.Block 12412 _ = b 12413 // match: (SHRB x (MOVQconst [c])) 12414 // cond: 12415 // result: (SHRBconst [c&31] x) 12416 for { 12417 x := v.Args[0] 12418 v_1 := v.Args[1] 12419 if v_1.Op != OpAMD64MOVQconst { 12420 break 12421 } 12422 c := v_1.AuxInt 12423 v.reset(OpAMD64SHRBconst) 12424 v.AuxInt = c & 31 12425 v.AddArg(x) 12426 return true 12427 } 12428 // match: (SHRB x (MOVLconst [c])) 12429 // cond: 12430 // result: (SHRBconst [c&31] x) 12431 for { 12432 x := v.Args[0] 12433 v_1 := v.Args[1] 12434 if v_1.Op != OpAMD64MOVLconst { 12435 break 12436 } 12437 c := v_1.AuxInt 12438 v.reset(OpAMD64SHRBconst) 12439 v.AuxInt = c & 31 12440 v.AddArg(x) 12441 return true 12442 } 12443 return false 12444 } 12445 func rewriteValueAMD64_OpAMD64SHRL(v *Value, config *Config) bool { 12446 b := v.Block 12447 _ = b 12448 // match: (SHRL x (MOVQconst [c])) 12449 // cond: 12450 // result: (SHRLconst [c&31] x) 12451 for { 12452 x := v.Args[0] 12453 v_1 := v.Args[1] 12454 if v_1.Op != OpAMD64MOVQconst { 12455 break 12456 } 12457 c := v_1.AuxInt 12458 v.reset(OpAMD64SHRLconst) 12459 v.AuxInt = c & 31 12460 v.AddArg(x) 12461 return true 12462 } 12463 // match: (SHRL x (MOVLconst [c])) 12464 // cond: 12465 // result: (SHRLconst [c&31] x) 12466 for { 12467 x := v.Args[0] 12468 v_1 := v.Args[1] 12469 if v_1.Op != OpAMD64MOVLconst { 12470 break 12471 } 12472 c := v_1.AuxInt 12473 v.reset(OpAMD64SHRLconst) 12474 v.AuxInt = c & 31 12475 v.AddArg(x) 12476 return true 12477 } 12478 // match: (SHRL x (ANDLconst [31] y)) 12479 // cond: 12480 // result: (SHRL x y) 12481 for { 12482 x := v.Args[0] 12483 v_1 := v.Args[1] 12484 if v_1.Op != OpAMD64ANDLconst { 12485 break 12486 } 12487 if v_1.AuxInt != 31 { 12488 break 12489 } 12490 y := v_1.Args[0] 12491 v.reset(OpAMD64SHRL) 12492 v.AddArg(x) 12493 v.AddArg(y) 12494 return true 12495 } 12496 return false 12497 } 12498 func rewriteValueAMD64_OpAMD64SHRQ(v *Value, config *Config) bool { 12499 b := v.Block 12500 _ = b 12501 // match: (SHRQ x (MOVQconst [c])) 12502 // cond: 12503 // result: (SHRQconst [c&63] x) 12504 for { 12505 x := v.Args[0] 12506 v_1 := v.Args[1] 12507 if v_1.Op != OpAMD64MOVQconst { 12508 break 12509 } 12510 c := v_1.AuxInt 12511 v.reset(OpAMD64SHRQconst) 12512 v.AuxInt = c & 63 12513 v.AddArg(x) 12514 return true 12515 } 12516 // match: (SHRQ x (MOVLconst [c])) 12517 // cond: 12518 // result: (SHRQconst [c&63] x) 12519 for { 12520 x := v.Args[0] 12521 v_1 := v.Args[1] 12522 if v_1.Op != OpAMD64MOVLconst { 12523 break 12524 } 12525 c := v_1.AuxInt 12526 v.reset(OpAMD64SHRQconst) 12527 v.AuxInt = c & 63 12528 v.AddArg(x) 12529 return true 12530 } 12531 // match: (SHRQ x (ANDQconst [63] y)) 12532 // cond: 12533 // result: (SHRQ x y) 12534 for { 12535 x := v.Args[0] 12536 v_1 := v.Args[1] 12537 if v_1.Op != OpAMD64ANDQconst { 12538 break 12539 } 12540 if v_1.AuxInt != 63 { 12541 break 12542 } 12543 y := v_1.Args[0] 12544 v.reset(OpAMD64SHRQ) 12545 v.AddArg(x) 12546 v.AddArg(y) 12547 return true 12548 } 12549 return false 12550 } 12551 func rewriteValueAMD64_OpAMD64SHRW(v *Value, config *Config) bool { 12552 b := v.Block 12553 _ = b 12554 // match: (SHRW x (MOVQconst [c])) 12555 // cond: 12556 // result: (SHRWconst [c&31] x) 12557 for { 12558 x := v.Args[0] 12559 v_1 := v.Args[1] 12560 if v_1.Op != OpAMD64MOVQconst { 12561 break 12562 } 12563 c := v_1.AuxInt 12564 v.reset(OpAMD64SHRWconst) 12565 v.AuxInt = c & 31 12566 v.AddArg(x) 12567 return true 12568 } 12569 // match: (SHRW x (MOVLconst [c])) 12570 // cond: 12571 // result: (SHRWconst [c&31] x) 12572 for { 12573 x := v.Args[0] 12574 v_1 := v.Args[1] 12575 if v_1.Op != OpAMD64MOVLconst { 12576 break 12577 } 12578 c := v_1.AuxInt 12579 v.reset(OpAMD64SHRWconst) 12580 v.AuxInt = c & 31 12581 v.AddArg(x) 12582 return true 12583 } 12584 return false 12585 } 12586 func rewriteValueAMD64_OpAMD64SUBL(v *Value, config *Config) bool { 12587 b := v.Block 12588 _ = b 12589 // match: (SUBL x (MOVLconst [c])) 12590 // cond: 12591 // result: (SUBLconst x [c]) 12592 for { 12593 x := v.Args[0] 12594 v_1 := v.Args[1] 12595 if v_1.Op != OpAMD64MOVLconst { 12596 break 12597 } 12598 c := v_1.AuxInt 12599 v.reset(OpAMD64SUBLconst) 12600 v.AuxInt = c 12601 v.AddArg(x) 12602 return true 12603 } 12604 // match: (SUBL (MOVLconst [c]) x) 12605 // cond: 12606 // result: (NEGL (SUBLconst <v.Type> x [c])) 12607 for { 12608 v_0 := v.Args[0] 12609 if v_0.Op != OpAMD64MOVLconst { 12610 break 12611 } 12612 c := v_0.AuxInt 12613 x := v.Args[1] 12614 v.reset(OpAMD64NEGL) 12615 v0 := b.NewValue0(v.Line, OpAMD64SUBLconst, v.Type) 12616 v0.AuxInt = c 12617 v0.AddArg(x) 12618 v.AddArg(v0) 12619 return true 12620 } 12621 // match: (SUBL x x) 12622 // cond: 12623 // result: (MOVLconst [0]) 12624 for { 12625 x := v.Args[0] 12626 if x != v.Args[1] { 12627 break 12628 } 12629 v.reset(OpAMD64MOVLconst) 12630 v.AuxInt = 0 12631 return true 12632 } 12633 return false 12634 } 12635 func rewriteValueAMD64_OpAMD64SUBLconst(v *Value, config *Config) bool { 12636 b := v.Block 12637 _ = b 12638 // match: (SUBLconst [c] x) 12639 // cond: int32(c) == 0 12640 // result: x 12641 for { 12642 c := v.AuxInt 12643 x := v.Args[0] 12644 if !(int32(c) == 0) { 12645 break 12646 } 12647 v.reset(OpCopy) 12648 v.Type = x.Type 12649 v.AddArg(x) 12650 return true 12651 } 12652 // match: (SUBLconst [c] x) 12653 // cond: 12654 // result: (ADDLconst [int64(int32(-c))] x) 12655 for { 12656 c := v.AuxInt 12657 x := v.Args[0] 12658 v.reset(OpAMD64ADDLconst) 12659 v.AuxInt = int64(int32(-c)) 12660 v.AddArg(x) 12661 return true 12662 } 12663 } 12664 func rewriteValueAMD64_OpAMD64SUBQ(v *Value, config *Config) bool { 12665 b := v.Block 12666 _ = b 12667 // match: (SUBQ x (MOVQconst [c])) 12668 // cond: is32Bit(c) 12669 // result: (SUBQconst x [c]) 12670 for { 12671 x := v.Args[0] 12672 v_1 := v.Args[1] 12673 if v_1.Op != OpAMD64MOVQconst { 12674 break 12675 } 12676 c := v_1.AuxInt 12677 if !(is32Bit(c)) { 12678 break 12679 } 12680 v.reset(OpAMD64SUBQconst) 12681 v.AuxInt = c 12682 v.AddArg(x) 12683 return true 12684 } 12685 // match: (SUBQ (MOVQconst [c]) x) 12686 // cond: is32Bit(c) 12687 // result: (NEGQ (SUBQconst <v.Type> x [c])) 12688 for { 12689 v_0 := v.Args[0] 12690 if v_0.Op != OpAMD64MOVQconst { 12691 break 12692 } 12693 c := v_0.AuxInt 12694 x := v.Args[1] 12695 if !(is32Bit(c)) { 12696 break 12697 } 12698 v.reset(OpAMD64NEGQ) 12699 v0 := b.NewValue0(v.Line, OpAMD64SUBQconst, v.Type) 12700 v0.AuxInt = c 12701 v0.AddArg(x) 12702 v.AddArg(v0) 12703 return true 12704 } 12705 // match: (SUBQ x x) 12706 // cond: 12707 // result: (MOVQconst [0]) 12708 for { 12709 x := v.Args[0] 12710 if x != v.Args[1] { 12711 break 12712 } 12713 v.reset(OpAMD64MOVQconst) 12714 v.AuxInt = 0 12715 return true 12716 } 12717 return false 12718 } 12719 func rewriteValueAMD64_OpAMD64SUBQconst(v *Value, config *Config) bool { 12720 b := v.Block 12721 _ = b 12722 // match: (SUBQconst [0] x) 12723 // cond: 12724 // result: x 12725 for { 12726 if v.AuxInt != 0 { 12727 break 12728 } 12729 x := v.Args[0] 12730 v.reset(OpCopy) 12731 v.Type = x.Type 12732 v.AddArg(x) 12733 return true 12734 } 12735 // match: (SUBQconst [c] x) 12736 // cond: c != -(1<<31) 12737 // result: (ADDQconst [-c] x) 12738 for { 12739 c := v.AuxInt 12740 x := v.Args[0] 12741 if !(c != -(1 << 31)) { 12742 break 12743 } 12744 v.reset(OpAMD64ADDQconst) 12745 v.AuxInt = -c 12746 v.AddArg(x) 12747 return true 12748 } 12749 // match: (SUBQconst (MOVQconst [d]) [c]) 12750 // cond: 12751 // result: (MOVQconst [d-c]) 12752 for { 12753 c := v.AuxInt 12754 v_0 := v.Args[0] 12755 if v_0.Op != OpAMD64MOVQconst { 12756 break 12757 } 12758 d := v_0.AuxInt 12759 v.reset(OpAMD64MOVQconst) 12760 v.AuxInt = d - c 12761 return true 12762 } 12763 // match: (SUBQconst (SUBQconst x [d]) [c]) 12764 // cond: is32Bit(-c-d) 12765 // result: (ADDQconst [-c-d] x) 12766 for { 12767 c := v.AuxInt 12768 v_0 := v.Args[0] 12769 if v_0.Op != OpAMD64SUBQconst { 12770 break 12771 } 12772 d := v_0.AuxInt 12773 x := v_0.Args[0] 12774 if !(is32Bit(-c - d)) { 12775 break 12776 } 12777 v.reset(OpAMD64ADDQconst) 12778 v.AuxInt = -c - d 12779 v.AddArg(x) 12780 return true 12781 } 12782 return false 12783 } 12784 func rewriteValueAMD64_OpAMD64XCHGL(v *Value, config *Config) bool { 12785 b := v.Block 12786 _ = b 12787 // match: (XCHGL [off1] {sym} val (ADDQconst [off2] ptr) mem) 12788 // cond: is32Bit(off1+off2) 12789 // result: (XCHGL [off1+off2] {sym} val ptr mem) 12790 for { 12791 off1 := v.AuxInt 12792 sym := v.Aux 12793 val := v.Args[0] 12794 v_1 := v.Args[1] 12795 if v_1.Op != OpAMD64ADDQconst { 12796 break 12797 } 12798 off2 := v_1.AuxInt 12799 ptr := v_1.Args[0] 12800 mem := v.Args[2] 12801 if !(is32Bit(off1 + off2)) { 12802 break 12803 } 12804 v.reset(OpAMD64XCHGL) 12805 v.AuxInt = off1 + off2 12806 v.Aux = sym 12807 v.AddArg(val) 12808 v.AddArg(ptr) 12809 v.AddArg(mem) 12810 return true 12811 } 12812 // match: (XCHGL [off1] {sym1} val (LEAQ [off2] {sym2} ptr) mem) 12813 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) && ptr.Op != OpSB 12814 // result: (XCHGL [off1+off2] {mergeSym(sym1,sym2)} val ptr mem) 12815 for { 12816 off1 := v.AuxInt 12817 sym1 := v.Aux 12818 val := v.Args[0] 12819 v_1 := v.Args[1] 12820 if v_1.Op != OpAMD64LEAQ { 12821 break 12822 } 12823 off2 := v_1.AuxInt 12824 sym2 := v_1.Aux 12825 ptr := v_1.Args[0] 12826 mem := v.Args[2] 12827 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2) && ptr.Op != OpSB) { 12828 break 12829 } 12830 v.reset(OpAMD64XCHGL) 12831 v.AuxInt = off1 + off2 12832 v.Aux = mergeSym(sym1, sym2) 12833 v.AddArg(val) 12834 v.AddArg(ptr) 12835 v.AddArg(mem) 12836 return true 12837 } 12838 return false 12839 } 12840 func rewriteValueAMD64_OpAMD64XCHGQ(v *Value, config *Config) bool { 12841 b := v.Block 12842 _ = b 12843 // match: (XCHGQ [off1] {sym} val (ADDQconst [off2] ptr) mem) 12844 // cond: is32Bit(off1+off2) 12845 // result: (XCHGQ [off1+off2] {sym} val ptr mem) 12846 for { 12847 off1 := v.AuxInt 12848 sym := v.Aux 12849 val := v.Args[0] 12850 v_1 := v.Args[1] 12851 if v_1.Op != OpAMD64ADDQconst { 12852 break 12853 } 12854 off2 := v_1.AuxInt 12855 ptr := v_1.Args[0] 12856 mem := v.Args[2] 12857 if !(is32Bit(off1 + off2)) { 12858 break 12859 } 12860 v.reset(OpAMD64XCHGQ) 12861 v.AuxInt = off1 + off2 12862 v.Aux = sym 12863 v.AddArg(val) 12864 v.AddArg(ptr) 12865 v.AddArg(mem) 12866 return true 12867 } 12868 // match: (XCHGQ [off1] {sym1} val (LEAQ [off2] {sym2} ptr) mem) 12869 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) && ptr.Op != OpSB 12870 // result: (XCHGQ [off1+off2] {mergeSym(sym1,sym2)} val ptr mem) 12871 for { 12872 off1 := v.AuxInt 12873 sym1 := v.Aux 12874 val := v.Args[0] 12875 v_1 := v.Args[1] 12876 if v_1.Op != OpAMD64LEAQ { 12877 break 12878 } 12879 off2 := v_1.AuxInt 12880 sym2 := v_1.Aux 12881 ptr := v_1.Args[0] 12882 mem := v.Args[2] 12883 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2) && ptr.Op != OpSB) { 12884 break 12885 } 12886 v.reset(OpAMD64XCHGQ) 12887 v.AuxInt = off1 + off2 12888 v.Aux = mergeSym(sym1, sym2) 12889 v.AddArg(val) 12890 v.AddArg(ptr) 12891 v.AddArg(mem) 12892 return true 12893 } 12894 return false 12895 } 12896 func rewriteValueAMD64_OpAMD64XORL(v *Value, config *Config) bool { 12897 b := v.Block 12898 _ = b 12899 // match: (XORL x (MOVLconst [c])) 12900 // cond: 12901 // result: (XORLconst [c] x) 12902 for { 12903 x := v.Args[0] 12904 v_1 := v.Args[1] 12905 if v_1.Op != OpAMD64MOVLconst { 12906 break 12907 } 12908 c := v_1.AuxInt 12909 v.reset(OpAMD64XORLconst) 12910 v.AuxInt = c 12911 v.AddArg(x) 12912 return true 12913 } 12914 // match: (XORL (MOVLconst [c]) x) 12915 // cond: 12916 // result: (XORLconst [c] x) 12917 for { 12918 v_0 := v.Args[0] 12919 if v_0.Op != OpAMD64MOVLconst { 12920 break 12921 } 12922 c := v_0.AuxInt 12923 x := v.Args[1] 12924 v.reset(OpAMD64XORLconst) 12925 v.AuxInt = c 12926 v.AddArg(x) 12927 return true 12928 } 12929 // match: (XORL x x) 12930 // cond: 12931 // result: (MOVLconst [0]) 12932 for { 12933 x := v.Args[0] 12934 if x != v.Args[1] { 12935 break 12936 } 12937 v.reset(OpAMD64MOVLconst) 12938 v.AuxInt = 0 12939 return true 12940 } 12941 return false 12942 } 12943 func rewriteValueAMD64_OpAMD64XORLconst(v *Value, config *Config) bool { 12944 b := v.Block 12945 _ = b 12946 // match: (XORLconst [c] (XORLconst [d] x)) 12947 // cond: 12948 // result: (XORLconst [c ^ d] x) 12949 for { 12950 c := v.AuxInt 12951 v_0 := v.Args[0] 12952 if v_0.Op != OpAMD64XORLconst { 12953 break 12954 } 12955 d := v_0.AuxInt 12956 x := v_0.Args[0] 12957 v.reset(OpAMD64XORLconst) 12958 v.AuxInt = c ^ d 12959 v.AddArg(x) 12960 return true 12961 } 12962 // match: (XORLconst [c] x) 12963 // cond: int32(c)==0 12964 // result: x 12965 for { 12966 c := v.AuxInt 12967 x := v.Args[0] 12968 if !(int32(c) == 0) { 12969 break 12970 } 12971 v.reset(OpCopy) 12972 v.Type = x.Type 12973 v.AddArg(x) 12974 return true 12975 } 12976 // match: (XORLconst [c] (MOVLconst [d])) 12977 // cond: 12978 // result: (MOVLconst [c^d]) 12979 for { 12980 c := v.AuxInt 12981 v_0 := v.Args[0] 12982 if v_0.Op != OpAMD64MOVLconst { 12983 break 12984 } 12985 d := v_0.AuxInt 12986 v.reset(OpAMD64MOVLconst) 12987 v.AuxInt = c ^ d 12988 return true 12989 } 12990 return false 12991 } 12992 func rewriteValueAMD64_OpAMD64XORQ(v *Value, config *Config) bool { 12993 b := v.Block 12994 _ = b 12995 // match: (XORQ x (MOVQconst [c])) 12996 // cond: is32Bit(c) 12997 // result: (XORQconst [c] x) 12998 for { 12999 x := v.Args[0] 13000 v_1 := v.Args[1] 13001 if v_1.Op != OpAMD64MOVQconst { 13002 break 13003 } 13004 c := v_1.AuxInt 13005 if !(is32Bit(c)) { 13006 break 13007 } 13008 v.reset(OpAMD64XORQconst) 13009 v.AuxInt = c 13010 v.AddArg(x) 13011 return true 13012 } 13013 // match: (XORQ (MOVQconst [c]) x) 13014 // cond: is32Bit(c) 13015 // result: (XORQconst [c] x) 13016 for { 13017 v_0 := v.Args[0] 13018 if v_0.Op != OpAMD64MOVQconst { 13019 break 13020 } 13021 c := v_0.AuxInt 13022 x := v.Args[1] 13023 if !(is32Bit(c)) { 13024 break 13025 } 13026 v.reset(OpAMD64XORQconst) 13027 v.AuxInt = c 13028 v.AddArg(x) 13029 return true 13030 } 13031 // match: (XORQ x x) 13032 // cond: 13033 // result: (MOVQconst [0]) 13034 for { 13035 x := v.Args[0] 13036 if x != v.Args[1] { 13037 break 13038 } 13039 v.reset(OpAMD64MOVQconst) 13040 v.AuxInt = 0 13041 return true 13042 } 13043 return false 13044 } 13045 func rewriteValueAMD64_OpAMD64XORQconst(v *Value, config *Config) bool { 13046 b := v.Block 13047 _ = b 13048 // match: (XORQconst [c] (XORQconst [d] x)) 13049 // cond: 13050 // result: (XORQconst [c ^ d] x) 13051 for { 13052 c := v.AuxInt 13053 v_0 := v.Args[0] 13054 if v_0.Op != OpAMD64XORQconst { 13055 break 13056 } 13057 d := v_0.AuxInt 13058 x := v_0.Args[0] 13059 v.reset(OpAMD64XORQconst) 13060 v.AuxInt = c ^ d 13061 v.AddArg(x) 13062 return true 13063 } 13064 // match: (XORQconst [0] x) 13065 // cond: 13066 // result: x 13067 for { 13068 if v.AuxInt != 0 { 13069 break 13070 } 13071 x := v.Args[0] 13072 v.reset(OpCopy) 13073 v.Type = x.Type 13074 v.AddArg(x) 13075 return true 13076 } 13077 // match: (XORQconst [c] (MOVQconst [d])) 13078 // cond: 13079 // result: (MOVQconst [c^d]) 13080 for { 13081 c := v.AuxInt 13082 v_0 := v.Args[0] 13083 if v_0.Op != OpAMD64MOVQconst { 13084 break 13085 } 13086 d := v_0.AuxInt 13087 v.reset(OpAMD64MOVQconst) 13088 v.AuxInt = c ^ d 13089 return true 13090 } 13091 return false 13092 } 13093 func rewriteValueAMD64_OpAdd16(v *Value, config *Config) bool { 13094 b := v.Block 13095 _ = b 13096 // match: (Add16 x y) 13097 // cond: 13098 // result: (ADDL x y) 13099 for { 13100 x := v.Args[0] 13101 y := v.Args[1] 13102 v.reset(OpAMD64ADDL) 13103 v.AddArg(x) 13104 v.AddArg(y) 13105 return true 13106 } 13107 } 13108 func rewriteValueAMD64_OpAdd32(v *Value, config *Config) bool { 13109 b := v.Block 13110 _ = b 13111 // match: (Add32 x y) 13112 // cond: 13113 // result: (ADDL x y) 13114 for { 13115 x := v.Args[0] 13116 y := v.Args[1] 13117 v.reset(OpAMD64ADDL) 13118 v.AddArg(x) 13119 v.AddArg(y) 13120 return true 13121 } 13122 } 13123 func rewriteValueAMD64_OpAdd32F(v *Value, config *Config) bool { 13124 b := v.Block 13125 _ = b 13126 // match: (Add32F x y) 13127 // cond: 13128 // result: (ADDSS x y) 13129 for { 13130 x := v.Args[0] 13131 y := v.Args[1] 13132 v.reset(OpAMD64ADDSS) 13133 v.AddArg(x) 13134 v.AddArg(y) 13135 return true 13136 } 13137 } 13138 func rewriteValueAMD64_OpAdd64(v *Value, config *Config) bool { 13139 b := v.Block 13140 _ = b 13141 // match: (Add64 x y) 13142 // cond: 13143 // result: (ADDQ x y) 13144 for { 13145 x := v.Args[0] 13146 y := v.Args[1] 13147 v.reset(OpAMD64ADDQ) 13148 v.AddArg(x) 13149 v.AddArg(y) 13150 return true 13151 } 13152 } 13153 func rewriteValueAMD64_OpAdd64F(v *Value, config *Config) bool { 13154 b := v.Block 13155 _ = b 13156 // match: (Add64F x y) 13157 // cond: 13158 // result: (ADDSD x y) 13159 for { 13160 x := v.Args[0] 13161 y := v.Args[1] 13162 v.reset(OpAMD64ADDSD) 13163 v.AddArg(x) 13164 v.AddArg(y) 13165 return true 13166 } 13167 } 13168 func rewriteValueAMD64_OpAdd8(v *Value, config *Config) bool { 13169 b := v.Block 13170 _ = b 13171 // match: (Add8 x y) 13172 // cond: 13173 // result: (ADDL x y) 13174 for { 13175 x := v.Args[0] 13176 y := v.Args[1] 13177 v.reset(OpAMD64ADDL) 13178 v.AddArg(x) 13179 v.AddArg(y) 13180 return true 13181 } 13182 } 13183 func rewriteValueAMD64_OpAddPtr(v *Value, config *Config) bool { 13184 b := v.Block 13185 _ = b 13186 // match: (AddPtr x y) 13187 // cond: config.PtrSize == 8 13188 // result: (ADDQ x y) 13189 for { 13190 x := v.Args[0] 13191 y := v.Args[1] 13192 if !(config.PtrSize == 8) { 13193 break 13194 } 13195 v.reset(OpAMD64ADDQ) 13196 v.AddArg(x) 13197 v.AddArg(y) 13198 return true 13199 } 13200 // match: (AddPtr x y) 13201 // cond: config.PtrSize == 4 13202 // result: (ADDL x y) 13203 for { 13204 x := v.Args[0] 13205 y := v.Args[1] 13206 if !(config.PtrSize == 4) { 13207 break 13208 } 13209 v.reset(OpAMD64ADDL) 13210 v.AddArg(x) 13211 v.AddArg(y) 13212 return true 13213 } 13214 return false 13215 } 13216 func rewriteValueAMD64_OpAddr(v *Value, config *Config) bool { 13217 b := v.Block 13218 _ = b 13219 // match: (Addr {sym} base) 13220 // cond: config.PtrSize == 8 13221 // result: (LEAQ {sym} base) 13222 for { 13223 sym := v.Aux 13224 base := v.Args[0] 13225 if !(config.PtrSize == 8) { 13226 break 13227 } 13228 v.reset(OpAMD64LEAQ) 13229 v.Aux = sym 13230 v.AddArg(base) 13231 return true 13232 } 13233 // match: (Addr {sym} base) 13234 // cond: config.PtrSize == 4 13235 // result: (LEAL {sym} base) 13236 for { 13237 sym := v.Aux 13238 base := v.Args[0] 13239 if !(config.PtrSize == 4) { 13240 break 13241 } 13242 v.reset(OpAMD64LEAL) 13243 v.Aux = sym 13244 v.AddArg(base) 13245 return true 13246 } 13247 return false 13248 } 13249 func rewriteValueAMD64_OpAnd16(v *Value, config *Config) bool { 13250 b := v.Block 13251 _ = b 13252 // match: (And16 x y) 13253 // cond: 13254 // result: (ANDL x y) 13255 for { 13256 x := v.Args[0] 13257 y := v.Args[1] 13258 v.reset(OpAMD64ANDL) 13259 v.AddArg(x) 13260 v.AddArg(y) 13261 return true 13262 } 13263 } 13264 func rewriteValueAMD64_OpAnd32(v *Value, config *Config) bool { 13265 b := v.Block 13266 _ = b 13267 // match: (And32 x y) 13268 // cond: 13269 // result: (ANDL x y) 13270 for { 13271 x := v.Args[0] 13272 y := v.Args[1] 13273 v.reset(OpAMD64ANDL) 13274 v.AddArg(x) 13275 v.AddArg(y) 13276 return true 13277 } 13278 } 13279 func rewriteValueAMD64_OpAnd64(v *Value, config *Config) bool { 13280 b := v.Block 13281 _ = b 13282 // match: (And64 x y) 13283 // cond: 13284 // result: (ANDQ x y) 13285 for { 13286 x := v.Args[0] 13287 y := v.Args[1] 13288 v.reset(OpAMD64ANDQ) 13289 v.AddArg(x) 13290 v.AddArg(y) 13291 return true 13292 } 13293 } 13294 func rewriteValueAMD64_OpAnd8(v *Value, config *Config) bool { 13295 b := v.Block 13296 _ = b 13297 // match: (And8 x y) 13298 // cond: 13299 // result: (ANDL x y) 13300 for { 13301 x := v.Args[0] 13302 y := v.Args[1] 13303 v.reset(OpAMD64ANDL) 13304 v.AddArg(x) 13305 v.AddArg(y) 13306 return true 13307 } 13308 } 13309 func rewriteValueAMD64_OpAndB(v *Value, config *Config) bool { 13310 b := v.Block 13311 _ = b 13312 // match: (AndB x y) 13313 // cond: 13314 // result: (ANDL x y) 13315 for { 13316 x := v.Args[0] 13317 y := v.Args[1] 13318 v.reset(OpAMD64ANDL) 13319 v.AddArg(x) 13320 v.AddArg(y) 13321 return true 13322 } 13323 } 13324 func rewriteValueAMD64_OpAtomicLoad32(v *Value, config *Config) bool { 13325 b := v.Block 13326 _ = b 13327 // match: (AtomicLoad32 ptr mem) 13328 // cond: 13329 // result: (MOVLatomicload ptr mem) 13330 for { 13331 ptr := v.Args[0] 13332 mem := v.Args[1] 13333 v.reset(OpAMD64MOVLatomicload) 13334 v.AddArg(ptr) 13335 v.AddArg(mem) 13336 return true 13337 } 13338 } 13339 func rewriteValueAMD64_OpAtomicLoad64(v *Value, config *Config) bool { 13340 b := v.Block 13341 _ = b 13342 // match: (AtomicLoad64 ptr mem) 13343 // cond: 13344 // result: (MOVQatomicload ptr mem) 13345 for { 13346 ptr := v.Args[0] 13347 mem := v.Args[1] 13348 v.reset(OpAMD64MOVQatomicload) 13349 v.AddArg(ptr) 13350 v.AddArg(mem) 13351 return true 13352 } 13353 } 13354 func rewriteValueAMD64_OpAtomicLoadPtr(v *Value, config *Config) bool { 13355 b := v.Block 13356 _ = b 13357 // match: (AtomicLoadPtr ptr mem) 13358 // cond: config.PtrSize == 8 13359 // result: (MOVQatomicload ptr mem) 13360 for { 13361 ptr := v.Args[0] 13362 mem := v.Args[1] 13363 if !(config.PtrSize == 8) { 13364 break 13365 } 13366 v.reset(OpAMD64MOVQatomicload) 13367 v.AddArg(ptr) 13368 v.AddArg(mem) 13369 return true 13370 } 13371 // match: (AtomicLoadPtr ptr mem) 13372 // cond: config.PtrSize == 4 13373 // result: (MOVLatomicload ptr mem) 13374 for { 13375 ptr := v.Args[0] 13376 mem := v.Args[1] 13377 if !(config.PtrSize == 4) { 13378 break 13379 } 13380 v.reset(OpAMD64MOVLatomicload) 13381 v.AddArg(ptr) 13382 v.AddArg(mem) 13383 return true 13384 } 13385 return false 13386 } 13387 func rewriteValueAMD64_OpAtomicStore32(v *Value, config *Config) bool { 13388 b := v.Block 13389 _ = b 13390 // match: (AtomicStore32 ptr val mem) 13391 // cond: 13392 // result: (Select1 (XCHGL <MakeTuple(config.Frontend().TypeUInt32(),TypeMem)> val ptr mem)) 13393 for { 13394 ptr := v.Args[0] 13395 val := v.Args[1] 13396 mem := v.Args[2] 13397 v.reset(OpSelect1) 13398 v0 := b.NewValue0(v.Line, OpAMD64XCHGL, MakeTuple(config.Frontend().TypeUInt32(), TypeMem)) 13399 v0.AddArg(val) 13400 v0.AddArg(ptr) 13401 v0.AddArg(mem) 13402 v.AddArg(v0) 13403 return true 13404 } 13405 } 13406 func rewriteValueAMD64_OpAtomicStore64(v *Value, config *Config) bool { 13407 b := v.Block 13408 _ = b 13409 // match: (AtomicStore64 ptr val mem) 13410 // cond: 13411 // result: (Select1 (XCHGQ <MakeTuple(config.Frontend().TypeUInt64(),TypeMem)> val ptr mem)) 13412 for { 13413 ptr := v.Args[0] 13414 val := v.Args[1] 13415 mem := v.Args[2] 13416 v.reset(OpSelect1) 13417 v0 := b.NewValue0(v.Line, OpAMD64XCHGQ, MakeTuple(config.Frontend().TypeUInt64(), TypeMem)) 13418 v0.AddArg(val) 13419 v0.AddArg(ptr) 13420 v0.AddArg(mem) 13421 v.AddArg(v0) 13422 return true 13423 } 13424 } 13425 func rewriteValueAMD64_OpAtomicStorePtrNoWB(v *Value, config *Config) bool { 13426 b := v.Block 13427 _ = b 13428 // match: (AtomicStorePtrNoWB ptr val mem) 13429 // cond: config.PtrSize == 8 13430 // result: (Select1 (XCHGQ <MakeTuple(config.Frontend().TypeBytePtr(),TypeMem)> val ptr mem)) 13431 for { 13432 ptr := v.Args[0] 13433 val := v.Args[1] 13434 mem := v.Args[2] 13435 if !(config.PtrSize == 8) { 13436 break 13437 } 13438 v.reset(OpSelect1) 13439 v0 := b.NewValue0(v.Line, OpAMD64XCHGQ, MakeTuple(config.Frontend().TypeBytePtr(), TypeMem)) 13440 v0.AddArg(val) 13441 v0.AddArg(ptr) 13442 v0.AddArg(mem) 13443 v.AddArg(v0) 13444 return true 13445 } 13446 // match: (AtomicStorePtrNoWB ptr val mem) 13447 // cond: config.PtrSize == 4 13448 // result: (Select1 (XCHGL <MakeTuple(config.Frontend().TypeBytePtr(),TypeMem)> val ptr mem)) 13449 for { 13450 ptr := v.Args[0] 13451 val := v.Args[1] 13452 mem := v.Args[2] 13453 if !(config.PtrSize == 4) { 13454 break 13455 } 13456 v.reset(OpSelect1) 13457 v0 := b.NewValue0(v.Line, OpAMD64XCHGL, MakeTuple(config.Frontend().TypeBytePtr(), TypeMem)) 13458 v0.AddArg(val) 13459 v0.AddArg(ptr) 13460 v0.AddArg(mem) 13461 v.AddArg(v0) 13462 return true 13463 } 13464 return false 13465 } 13466 func rewriteValueAMD64_OpAvg64u(v *Value, config *Config) bool { 13467 b := v.Block 13468 _ = b 13469 // match: (Avg64u x y) 13470 // cond: 13471 // result: (AVGQU x y) 13472 for { 13473 x := v.Args[0] 13474 y := v.Args[1] 13475 v.reset(OpAMD64AVGQU) 13476 v.AddArg(x) 13477 v.AddArg(y) 13478 return true 13479 } 13480 } 13481 func rewriteValueAMD64_OpBswap32(v *Value, config *Config) bool { 13482 b := v.Block 13483 _ = b 13484 // match: (Bswap32 x) 13485 // cond: 13486 // result: (BSWAPL x) 13487 for { 13488 x := v.Args[0] 13489 v.reset(OpAMD64BSWAPL) 13490 v.AddArg(x) 13491 return true 13492 } 13493 } 13494 func rewriteValueAMD64_OpBswap64(v *Value, config *Config) bool { 13495 b := v.Block 13496 _ = b 13497 // match: (Bswap64 x) 13498 // cond: 13499 // result: (BSWAPQ x) 13500 for { 13501 x := v.Args[0] 13502 v.reset(OpAMD64BSWAPQ) 13503 v.AddArg(x) 13504 return true 13505 } 13506 } 13507 func rewriteValueAMD64_OpClosureCall(v *Value, config *Config) bool { 13508 b := v.Block 13509 _ = b 13510 // match: (ClosureCall [argwid] entry closure mem) 13511 // cond: 13512 // result: (CALLclosure [argwid] entry closure mem) 13513 for { 13514 argwid := v.AuxInt 13515 entry := v.Args[0] 13516 closure := v.Args[1] 13517 mem := v.Args[2] 13518 v.reset(OpAMD64CALLclosure) 13519 v.AuxInt = argwid 13520 v.AddArg(entry) 13521 v.AddArg(closure) 13522 v.AddArg(mem) 13523 return true 13524 } 13525 } 13526 func rewriteValueAMD64_OpCom16(v *Value, config *Config) bool { 13527 b := v.Block 13528 _ = b 13529 // match: (Com16 x) 13530 // cond: 13531 // result: (NOTL x) 13532 for { 13533 x := v.Args[0] 13534 v.reset(OpAMD64NOTL) 13535 v.AddArg(x) 13536 return true 13537 } 13538 } 13539 func rewriteValueAMD64_OpCom32(v *Value, config *Config) bool { 13540 b := v.Block 13541 _ = b 13542 // match: (Com32 x) 13543 // cond: 13544 // result: (NOTL x) 13545 for { 13546 x := v.Args[0] 13547 v.reset(OpAMD64NOTL) 13548 v.AddArg(x) 13549 return true 13550 } 13551 } 13552 func rewriteValueAMD64_OpCom64(v *Value, config *Config) bool { 13553 b := v.Block 13554 _ = b 13555 // match: (Com64 x) 13556 // cond: 13557 // result: (NOTQ x) 13558 for { 13559 x := v.Args[0] 13560 v.reset(OpAMD64NOTQ) 13561 v.AddArg(x) 13562 return true 13563 } 13564 } 13565 func rewriteValueAMD64_OpCom8(v *Value, config *Config) bool { 13566 b := v.Block 13567 _ = b 13568 // match: (Com8 x) 13569 // cond: 13570 // result: (NOTL x) 13571 for { 13572 x := v.Args[0] 13573 v.reset(OpAMD64NOTL) 13574 v.AddArg(x) 13575 return true 13576 } 13577 } 13578 func rewriteValueAMD64_OpConst16(v *Value, config *Config) bool { 13579 b := v.Block 13580 _ = b 13581 // match: (Const16 [val]) 13582 // cond: 13583 // result: (MOVLconst [val]) 13584 for { 13585 val := v.AuxInt 13586 v.reset(OpAMD64MOVLconst) 13587 v.AuxInt = val 13588 return true 13589 } 13590 } 13591 func rewriteValueAMD64_OpConst32(v *Value, config *Config) bool { 13592 b := v.Block 13593 _ = b 13594 // match: (Const32 [val]) 13595 // cond: 13596 // result: (MOVLconst [val]) 13597 for { 13598 val := v.AuxInt 13599 v.reset(OpAMD64MOVLconst) 13600 v.AuxInt = val 13601 return true 13602 } 13603 } 13604 func rewriteValueAMD64_OpConst32F(v *Value, config *Config) bool { 13605 b := v.Block 13606 _ = b 13607 // match: (Const32F [val]) 13608 // cond: 13609 // result: (MOVSSconst [val]) 13610 for { 13611 val := v.AuxInt 13612 v.reset(OpAMD64MOVSSconst) 13613 v.AuxInt = val 13614 return true 13615 } 13616 } 13617 func rewriteValueAMD64_OpConst64(v *Value, config *Config) bool { 13618 b := v.Block 13619 _ = b 13620 // match: (Const64 [val]) 13621 // cond: 13622 // result: (MOVQconst [val]) 13623 for { 13624 val := v.AuxInt 13625 v.reset(OpAMD64MOVQconst) 13626 v.AuxInt = val 13627 return true 13628 } 13629 } 13630 func rewriteValueAMD64_OpConst64F(v *Value, config *Config) bool { 13631 b := v.Block 13632 _ = b 13633 // match: (Const64F [val]) 13634 // cond: 13635 // result: (MOVSDconst [val]) 13636 for { 13637 val := v.AuxInt 13638 v.reset(OpAMD64MOVSDconst) 13639 v.AuxInt = val 13640 return true 13641 } 13642 } 13643 func rewriteValueAMD64_OpConst8(v *Value, config *Config) bool { 13644 b := v.Block 13645 _ = b 13646 // match: (Const8 [val]) 13647 // cond: 13648 // result: (MOVLconst [val]) 13649 for { 13650 val := v.AuxInt 13651 v.reset(OpAMD64MOVLconst) 13652 v.AuxInt = val 13653 return true 13654 } 13655 } 13656 func rewriteValueAMD64_OpConstBool(v *Value, config *Config) bool { 13657 b := v.Block 13658 _ = b 13659 // match: (ConstBool [b]) 13660 // cond: 13661 // result: (MOVLconst [b]) 13662 for { 13663 b := v.AuxInt 13664 v.reset(OpAMD64MOVLconst) 13665 v.AuxInt = b 13666 return true 13667 } 13668 } 13669 func rewriteValueAMD64_OpConstNil(v *Value, config *Config) bool { 13670 b := v.Block 13671 _ = b 13672 // match: (ConstNil) 13673 // cond: config.PtrSize == 8 13674 // result: (MOVQconst [0]) 13675 for { 13676 if !(config.PtrSize == 8) { 13677 break 13678 } 13679 v.reset(OpAMD64MOVQconst) 13680 v.AuxInt = 0 13681 return true 13682 } 13683 // match: (ConstNil) 13684 // cond: config.PtrSize == 4 13685 // result: (MOVLconst [0]) 13686 for { 13687 if !(config.PtrSize == 4) { 13688 break 13689 } 13690 v.reset(OpAMD64MOVLconst) 13691 v.AuxInt = 0 13692 return true 13693 } 13694 return false 13695 } 13696 func rewriteValueAMD64_OpConvert(v *Value, config *Config) bool { 13697 b := v.Block 13698 _ = b 13699 // match: (Convert <t> x mem) 13700 // cond: config.PtrSize == 8 13701 // result: (MOVQconvert <t> x mem) 13702 for { 13703 t := v.Type 13704 x := v.Args[0] 13705 mem := v.Args[1] 13706 if !(config.PtrSize == 8) { 13707 break 13708 } 13709 v.reset(OpAMD64MOVQconvert) 13710 v.Type = t 13711 v.AddArg(x) 13712 v.AddArg(mem) 13713 return true 13714 } 13715 // match: (Convert <t> x mem) 13716 // cond: config.PtrSize == 4 13717 // result: (MOVLconvert <t> x mem) 13718 for { 13719 t := v.Type 13720 x := v.Args[0] 13721 mem := v.Args[1] 13722 if !(config.PtrSize == 4) { 13723 break 13724 } 13725 v.reset(OpAMD64MOVLconvert) 13726 v.Type = t 13727 v.AddArg(x) 13728 v.AddArg(mem) 13729 return true 13730 } 13731 return false 13732 } 13733 func rewriteValueAMD64_OpCtz32(v *Value, config *Config) bool { 13734 b := v.Block 13735 _ = b 13736 // match: (Ctz32 <t> x) 13737 // cond: 13738 // result: (CMOVLEQ (Select0 <t> (BSFL x)) (MOVLconst <t> [32]) (Select1 <TypeFlags> (BSFL x))) 13739 for { 13740 t := v.Type 13741 x := v.Args[0] 13742 v.reset(OpAMD64CMOVLEQ) 13743 v0 := b.NewValue0(v.Line, OpSelect0, t) 13744 v1 := b.NewValue0(v.Line, OpAMD64BSFL, MakeTuple(config.fe.TypeUInt32(), TypeFlags)) 13745 v1.AddArg(x) 13746 v0.AddArg(v1) 13747 v.AddArg(v0) 13748 v2 := b.NewValue0(v.Line, OpAMD64MOVLconst, t) 13749 v2.AuxInt = 32 13750 v.AddArg(v2) 13751 v3 := b.NewValue0(v.Line, OpSelect1, TypeFlags) 13752 v4 := b.NewValue0(v.Line, OpAMD64BSFL, MakeTuple(config.fe.TypeUInt32(), TypeFlags)) 13753 v4.AddArg(x) 13754 v3.AddArg(v4) 13755 v.AddArg(v3) 13756 return true 13757 } 13758 } 13759 func rewriteValueAMD64_OpCtz64(v *Value, config *Config) bool { 13760 b := v.Block 13761 _ = b 13762 // match: (Ctz64 <t> x) 13763 // cond: 13764 // result: (CMOVQEQ (Select0 <t> (BSFQ x)) (MOVQconst <t> [64]) (Select1 <TypeFlags> (BSFQ x))) 13765 for { 13766 t := v.Type 13767 x := v.Args[0] 13768 v.reset(OpAMD64CMOVQEQ) 13769 v0 := b.NewValue0(v.Line, OpSelect0, t) 13770 v1 := b.NewValue0(v.Line, OpAMD64BSFQ, MakeTuple(config.fe.TypeUInt64(), TypeFlags)) 13771 v1.AddArg(x) 13772 v0.AddArg(v1) 13773 v.AddArg(v0) 13774 v2 := b.NewValue0(v.Line, OpAMD64MOVQconst, t) 13775 v2.AuxInt = 64 13776 v.AddArg(v2) 13777 v3 := b.NewValue0(v.Line, OpSelect1, TypeFlags) 13778 v4 := b.NewValue0(v.Line, OpAMD64BSFQ, MakeTuple(config.fe.TypeUInt64(), TypeFlags)) 13779 v4.AddArg(x) 13780 v3.AddArg(v4) 13781 v.AddArg(v3) 13782 return true 13783 } 13784 } 13785 func rewriteValueAMD64_OpCvt32Fto32(v *Value, config *Config) bool { 13786 b := v.Block 13787 _ = b 13788 // match: (Cvt32Fto32 x) 13789 // cond: 13790 // result: (CVTTSS2SL x) 13791 for { 13792 x := v.Args[0] 13793 v.reset(OpAMD64CVTTSS2SL) 13794 v.AddArg(x) 13795 return true 13796 } 13797 } 13798 func rewriteValueAMD64_OpCvt32Fto64(v *Value, config *Config) bool { 13799 b := v.Block 13800 _ = b 13801 // match: (Cvt32Fto64 x) 13802 // cond: 13803 // result: (CVTTSS2SQ x) 13804 for { 13805 x := v.Args[0] 13806 v.reset(OpAMD64CVTTSS2SQ) 13807 v.AddArg(x) 13808 return true 13809 } 13810 } 13811 func rewriteValueAMD64_OpCvt32Fto64F(v *Value, config *Config) bool { 13812 b := v.Block 13813 _ = b 13814 // match: (Cvt32Fto64F x) 13815 // cond: 13816 // result: (CVTSS2SD x) 13817 for { 13818 x := v.Args[0] 13819 v.reset(OpAMD64CVTSS2SD) 13820 v.AddArg(x) 13821 return true 13822 } 13823 } 13824 func rewriteValueAMD64_OpCvt32to32F(v *Value, config *Config) bool { 13825 b := v.Block 13826 _ = b 13827 // match: (Cvt32to32F x) 13828 // cond: 13829 // result: (CVTSL2SS x) 13830 for { 13831 x := v.Args[0] 13832 v.reset(OpAMD64CVTSL2SS) 13833 v.AddArg(x) 13834 return true 13835 } 13836 } 13837 func rewriteValueAMD64_OpCvt32to64F(v *Value, config *Config) bool { 13838 b := v.Block 13839 _ = b 13840 // match: (Cvt32to64F x) 13841 // cond: 13842 // result: (CVTSL2SD x) 13843 for { 13844 x := v.Args[0] 13845 v.reset(OpAMD64CVTSL2SD) 13846 v.AddArg(x) 13847 return true 13848 } 13849 } 13850 func rewriteValueAMD64_OpCvt64Fto32(v *Value, config *Config) bool { 13851 b := v.Block 13852 _ = b 13853 // match: (Cvt64Fto32 x) 13854 // cond: 13855 // result: (CVTTSD2SL x) 13856 for { 13857 x := v.Args[0] 13858 v.reset(OpAMD64CVTTSD2SL) 13859 v.AddArg(x) 13860 return true 13861 } 13862 } 13863 func rewriteValueAMD64_OpCvt64Fto32F(v *Value, config *Config) bool { 13864 b := v.Block 13865 _ = b 13866 // match: (Cvt64Fto32F x) 13867 // cond: 13868 // result: (CVTSD2SS x) 13869 for { 13870 x := v.Args[0] 13871 v.reset(OpAMD64CVTSD2SS) 13872 v.AddArg(x) 13873 return true 13874 } 13875 } 13876 func rewriteValueAMD64_OpCvt64Fto64(v *Value, config *Config) bool { 13877 b := v.Block 13878 _ = b 13879 // match: (Cvt64Fto64 x) 13880 // cond: 13881 // result: (CVTTSD2SQ x) 13882 for { 13883 x := v.Args[0] 13884 v.reset(OpAMD64CVTTSD2SQ) 13885 v.AddArg(x) 13886 return true 13887 } 13888 } 13889 func rewriteValueAMD64_OpCvt64to32F(v *Value, config *Config) bool { 13890 b := v.Block 13891 _ = b 13892 // match: (Cvt64to32F x) 13893 // cond: 13894 // result: (CVTSQ2SS x) 13895 for { 13896 x := v.Args[0] 13897 v.reset(OpAMD64CVTSQ2SS) 13898 v.AddArg(x) 13899 return true 13900 } 13901 } 13902 func rewriteValueAMD64_OpCvt64to64F(v *Value, config *Config) bool { 13903 b := v.Block 13904 _ = b 13905 // match: (Cvt64to64F x) 13906 // cond: 13907 // result: (CVTSQ2SD x) 13908 for { 13909 x := v.Args[0] 13910 v.reset(OpAMD64CVTSQ2SD) 13911 v.AddArg(x) 13912 return true 13913 } 13914 } 13915 func rewriteValueAMD64_OpDeferCall(v *Value, config *Config) bool { 13916 b := v.Block 13917 _ = b 13918 // match: (DeferCall [argwid] mem) 13919 // cond: 13920 // result: (CALLdefer [argwid] mem) 13921 for { 13922 argwid := v.AuxInt 13923 mem := v.Args[0] 13924 v.reset(OpAMD64CALLdefer) 13925 v.AuxInt = argwid 13926 v.AddArg(mem) 13927 return true 13928 } 13929 } 13930 func rewriteValueAMD64_OpDiv16(v *Value, config *Config) bool { 13931 b := v.Block 13932 _ = b 13933 // match: (Div16 x y) 13934 // cond: 13935 // result: (Select0 (DIVW x y)) 13936 for { 13937 x := v.Args[0] 13938 y := v.Args[1] 13939 v.reset(OpSelect0) 13940 v0 := b.NewValue0(v.Line, OpAMD64DIVW, MakeTuple(config.fe.TypeInt16(), config.fe.TypeInt16())) 13941 v0.AddArg(x) 13942 v0.AddArg(y) 13943 v.AddArg(v0) 13944 return true 13945 } 13946 } 13947 func rewriteValueAMD64_OpDiv16u(v *Value, config *Config) bool { 13948 b := v.Block 13949 _ = b 13950 // match: (Div16u x y) 13951 // cond: 13952 // result: (Select0 (DIVWU x y)) 13953 for { 13954 x := v.Args[0] 13955 y := v.Args[1] 13956 v.reset(OpSelect0) 13957 v0 := b.NewValue0(v.Line, OpAMD64DIVWU, MakeTuple(config.fe.TypeUInt16(), config.fe.TypeUInt16())) 13958 v0.AddArg(x) 13959 v0.AddArg(y) 13960 v.AddArg(v0) 13961 return true 13962 } 13963 } 13964 func rewriteValueAMD64_OpDiv32(v *Value, config *Config) bool { 13965 b := v.Block 13966 _ = b 13967 // match: (Div32 x y) 13968 // cond: 13969 // result: (Select0 (DIVL x y)) 13970 for { 13971 x := v.Args[0] 13972 y := v.Args[1] 13973 v.reset(OpSelect0) 13974 v0 := b.NewValue0(v.Line, OpAMD64DIVL, MakeTuple(config.fe.TypeInt32(), config.fe.TypeInt32())) 13975 v0.AddArg(x) 13976 v0.AddArg(y) 13977 v.AddArg(v0) 13978 return true 13979 } 13980 } 13981 func rewriteValueAMD64_OpDiv32F(v *Value, config *Config) bool { 13982 b := v.Block 13983 _ = b 13984 // match: (Div32F x y) 13985 // cond: 13986 // result: (DIVSS x y) 13987 for { 13988 x := v.Args[0] 13989 y := v.Args[1] 13990 v.reset(OpAMD64DIVSS) 13991 v.AddArg(x) 13992 v.AddArg(y) 13993 return true 13994 } 13995 } 13996 func rewriteValueAMD64_OpDiv32u(v *Value, config *Config) bool { 13997 b := v.Block 13998 _ = b 13999 // match: (Div32u x y) 14000 // cond: 14001 // result: (Select0 (DIVLU x y)) 14002 for { 14003 x := v.Args[0] 14004 y := v.Args[1] 14005 v.reset(OpSelect0) 14006 v0 := b.NewValue0(v.Line, OpAMD64DIVLU, MakeTuple(config.fe.TypeUInt32(), config.fe.TypeUInt32())) 14007 v0.AddArg(x) 14008 v0.AddArg(y) 14009 v.AddArg(v0) 14010 return true 14011 } 14012 } 14013 func rewriteValueAMD64_OpDiv64(v *Value, config *Config) bool { 14014 b := v.Block 14015 _ = b 14016 // match: (Div64 x y) 14017 // cond: 14018 // result: (Select0 (DIVQ x y)) 14019 for { 14020 x := v.Args[0] 14021 y := v.Args[1] 14022 v.reset(OpSelect0) 14023 v0 := b.NewValue0(v.Line, OpAMD64DIVQ, MakeTuple(config.fe.TypeInt64(), config.fe.TypeInt64())) 14024 v0.AddArg(x) 14025 v0.AddArg(y) 14026 v.AddArg(v0) 14027 return true 14028 } 14029 } 14030 func rewriteValueAMD64_OpDiv64F(v *Value, config *Config) bool { 14031 b := v.Block 14032 _ = b 14033 // match: (Div64F x y) 14034 // cond: 14035 // result: (DIVSD x y) 14036 for { 14037 x := v.Args[0] 14038 y := v.Args[1] 14039 v.reset(OpAMD64DIVSD) 14040 v.AddArg(x) 14041 v.AddArg(y) 14042 return true 14043 } 14044 } 14045 func rewriteValueAMD64_OpDiv64u(v *Value, config *Config) bool { 14046 b := v.Block 14047 _ = b 14048 // match: (Div64u x y) 14049 // cond: 14050 // result: (Select0 (DIVQU x y)) 14051 for { 14052 x := v.Args[0] 14053 y := v.Args[1] 14054 v.reset(OpSelect0) 14055 v0 := b.NewValue0(v.Line, OpAMD64DIVQU, MakeTuple(config.fe.TypeUInt64(), config.fe.TypeUInt64())) 14056 v0.AddArg(x) 14057 v0.AddArg(y) 14058 v.AddArg(v0) 14059 return true 14060 } 14061 } 14062 func rewriteValueAMD64_OpDiv8(v *Value, config *Config) bool { 14063 b := v.Block 14064 _ = b 14065 // match: (Div8 x y) 14066 // cond: 14067 // result: (Select0 (DIVW (SignExt8to16 x) (SignExt8to16 y))) 14068 for { 14069 x := v.Args[0] 14070 y := v.Args[1] 14071 v.reset(OpSelect0) 14072 v0 := b.NewValue0(v.Line, OpAMD64DIVW, MakeTuple(config.fe.TypeInt16(), config.fe.TypeInt16())) 14073 v1 := b.NewValue0(v.Line, OpSignExt8to16, config.fe.TypeInt16()) 14074 v1.AddArg(x) 14075 v0.AddArg(v1) 14076 v2 := b.NewValue0(v.Line, OpSignExt8to16, config.fe.TypeInt16()) 14077 v2.AddArg(y) 14078 v0.AddArg(v2) 14079 v.AddArg(v0) 14080 return true 14081 } 14082 } 14083 func rewriteValueAMD64_OpDiv8u(v *Value, config *Config) bool { 14084 b := v.Block 14085 _ = b 14086 // match: (Div8u x y) 14087 // cond: 14088 // result: (Select0 (DIVWU (ZeroExt8to16 x) (ZeroExt8to16 y))) 14089 for { 14090 x := v.Args[0] 14091 y := v.Args[1] 14092 v.reset(OpSelect0) 14093 v0 := b.NewValue0(v.Line, OpAMD64DIVWU, MakeTuple(config.fe.TypeUInt16(), config.fe.TypeUInt16())) 14094 v1 := b.NewValue0(v.Line, OpZeroExt8to16, config.fe.TypeUInt16()) 14095 v1.AddArg(x) 14096 v0.AddArg(v1) 14097 v2 := b.NewValue0(v.Line, OpZeroExt8to16, config.fe.TypeUInt16()) 14098 v2.AddArg(y) 14099 v0.AddArg(v2) 14100 v.AddArg(v0) 14101 return true 14102 } 14103 } 14104 func rewriteValueAMD64_OpEq16(v *Value, config *Config) bool { 14105 b := v.Block 14106 _ = b 14107 // match: (Eq16 x y) 14108 // cond: 14109 // result: (SETEQ (CMPW x y)) 14110 for { 14111 x := v.Args[0] 14112 y := v.Args[1] 14113 v.reset(OpAMD64SETEQ) 14114 v0 := b.NewValue0(v.Line, OpAMD64CMPW, TypeFlags) 14115 v0.AddArg(x) 14116 v0.AddArg(y) 14117 v.AddArg(v0) 14118 return true 14119 } 14120 } 14121 func rewriteValueAMD64_OpEq32(v *Value, config *Config) bool { 14122 b := v.Block 14123 _ = b 14124 // match: (Eq32 x y) 14125 // cond: 14126 // result: (SETEQ (CMPL x y)) 14127 for { 14128 x := v.Args[0] 14129 y := v.Args[1] 14130 v.reset(OpAMD64SETEQ) 14131 v0 := b.NewValue0(v.Line, OpAMD64CMPL, TypeFlags) 14132 v0.AddArg(x) 14133 v0.AddArg(y) 14134 v.AddArg(v0) 14135 return true 14136 } 14137 } 14138 func rewriteValueAMD64_OpEq32F(v *Value, config *Config) bool { 14139 b := v.Block 14140 _ = b 14141 // match: (Eq32F x y) 14142 // cond: 14143 // result: (SETEQF (UCOMISS x y)) 14144 for { 14145 x := v.Args[0] 14146 y := v.Args[1] 14147 v.reset(OpAMD64SETEQF) 14148 v0 := b.NewValue0(v.Line, OpAMD64UCOMISS, TypeFlags) 14149 v0.AddArg(x) 14150 v0.AddArg(y) 14151 v.AddArg(v0) 14152 return true 14153 } 14154 } 14155 func rewriteValueAMD64_OpEq64(v *Value, config *Config) bool { 14156 b := v.Block 14157 _ = b 14158 // match: (Eq64 x y) 14159 // cond: 14160 // result: (SETEQ (CMPQ x y)) 14161 for { 14162 x := v.Args[0] 14163 y := v.Args[1] 14164 v.reset(OpAMD64SETEQ) 14165 v0 := b.NewValue0(v.Line, OpAMD64CMPQ, TypeFlags) 14166 v0.AddArg(x) 14167 v0.AddArg(y) 14168 v.AddArg(v0) 14169 return true 14170 } 14171 } 14172 func rewriteValueAMD64_OpEq64F(v *Value, config *Config) bool { 14173 b := v.Block 14174 _ = b 14175 // match: (Eq64F x y) 14176 // cond: 14177 // result: (SETEQF (UCOMISD x y)) 14178 for { 14179 x := v.Args[0] 14180 y := v.Args[1] 14181 v.reset(OpAMD64SETEQF) 14182 v0 := b.NewValue0(v.Line, OpAMD64UCOMISD, TypeFlags) 14183 v0.AddArg(x) 14184 v0.AddArg(y) 14185 v.AddArg(v0) 14186 return true 14187 } 14188 } 14189 func rewriteValueAMD64_OpEq8(v *Value, config *Config) bool { 14190 b := v.Block 14191 _ = b 14192 // match: (Eq8 x y) 14193 // cond: 14194 // result: (SETEQ (CMPB x y)) 14195 for { 14196 x := v.Args[0] 14197 y := v.Args[1] 14198 v.reset(OpAMD64SETEQ) 14199 v0 := b.NewValue0(v.Line, OpAMD64CMPB, TypeFlags) 14200 v0.AddArg(x) 14201 v0.AddArg(y) 14202 v.AddArg(v0) 14203 return true 14204 } 14205 } 14206 func rewriteValueAMD64_OpEqB(v *Value, config *Config) bool { 14207 b := v.Block 14208 _ = b 14209 // match: (EqB x y) 14210 // cond: 14211 // result: (SETEQ (CMPB x y)) 14212 for { 14213 x := v.Args[0] 14214 y := v.Args[1] 14215 v.reset(OpAMD64SETEQ) 14216 v0 := b.NewValue0(v.Line, OpAMD64CMPB, TypeFlags) 14217 v0.AddArg(x) 14218 v0.AddArg(y) 14219 v.AddArg(v0) 14220 return true 14221 } 14222 } 14223 func rewriteValueAMD64_OpEqPtr(v *Value, config *Config) bool { 14224 b := v.Block 14225 _ = b 14226 // match: (EqPtr x y) 14227 // cond: config.PtrSize == 8 14228 // result: (SETEQ (CMPQ x y)) 14229 for { 14230 x := v.Args[0] 14231 y := v.Args[1] 14232 if !(config.PtrSize == 8) { 14233 break 14234 } 14235 v.reset(OpAMD64SETEQ) 14236 v0 := b.NewValue0(v.Line, OpAMD64CMPQ, TypeFlags) 14237 v0.AddArg(x) 14238 v0.AddArg(y) 14239 v.AddArg(v0) 14240 return true 14241 } 14242 // match: (EqPtr x y) 14243 // cond: config.PtrSize == 4 14244 // result: (SETEQ (CMPL x y)) 14245 for { 14246 x := v.Args[0] 14247 y := v.Args[1] 14248 if !(config.PtrSize == 4) { 14249 break 14250 } 14251 v.reset(OpAMD64SETEQ) 14252 v0 := b.NewValue0(v.Line, OpAMD64CMPL, TypeFlags) 14253 v0.AddArg(x) 14254 v0.AddArg(y) 14255 v.AddArg(v0) 14256 return true 14257 } 14258 return false 14259 } 14260 func rewriteValueAMD64_OpGeq16(v *Value, config *Config) bool { 14261 b := v.Block 14262 _ = b 14263 // match: (Geq16 x y) 14264 // cond: 14265 // result: (SETGE (CMPW x y)) 14266 for { 14267 x := v.Args[0] 14268 y := v.Args[1] 14269 v.reset(OpAMD64SETGE) 14270 v0 := b.NewValue0(v.Line, OpAMD64CMPW, TypeFlags) 14271 v0.AddArg(x) 14272 v0.AddArg(y) 14273 v.AddArg(v0) 14274 return true 14275 } 14276 } 14277 func rewriteValueAMD64_OpGeq16U(v *Value, config *Config) bool { 14278 b := v.Block 14279 _ = b 14280 // match: (Geq16U x y) 14281 // cond: 14282 // result: (SETAE (CMPW x y)) 14283 for { 14284 x := v.Args[0] 14285 y := v.Args[1] 14286 v.reset(OpAMD64SETAE) 14287 v0 := b.NewValue0(v.Line, OpAMD64CMPW, TypeFlags) 14288 v0.AddArg(x) 14289 v0.AddArg(y) 14290 v.AddArg(v0) 14291 return true 14292 } 14293 } 14294 func rewriteValueAMD64_OpGeq32(v *Value, config *Config) bool { 14295 b := v.Block 14296 _ = b 14297 // match: (Geq32 x y) 14298 // cond: 14299 // result: (SETGE (CMPL x y)) 14300 for { 14301 x := v.Args[0] 14302 y := v.Args[1] 14303 v.reset(OpAMD64SETGE) 14304 v0 := b.NewValue0(v.Line, OpAMD64CMPL, TypeFlags) 14305 v0.AddArg(x) 14306 v0.AddArg(y) 14307 v.AddArg(v0) 14308 return true 14309 } 14310 } 14311 func rewriteValueAMD64_OpGeq32F(v *Value, config *Config) bool { 14312 b := v.Block 14313 _ = b 14314 // match: (Geq32F x y) 14315 // cond: 14316 // result: (SETGEF (UCOMISS x y)) 14317 for { 14318 x := v.Args[0] 14319 y := v.Args[1] 14320 v.reset(OpAMD64SETGEF) 14321 v0 := b.NewValue0(v.Line, OpAMD64UCOMISS, TypeFlags) 14322 v0.AddArg(x) 14323 v0.AddArg(y) 14324 v.AddArg(v0) 14325 return true 14326 } 14327 } 14328 func rewriteValueAMD64_OpGeq32U(v *Value, config *Config) bool { 14329 b := v.Block 14330 _ = b 14331 // match: (Geq32U x y) 14332 // cond: 14333 // result: (SETAE (CMPL x y)) 14334 for { 14335 x := v.Args[0] 14336 y := v.Args[1] 14337 v.reset(OpAMD64SETAE) 14338 v0 := b.NewValue0(v.Line, OpAMD64CMPL, TypeFlags) 14339 v0.AddArg(x) 14340 v0.AddArg(y) 14341 v.AddArg(v0) 14342 return true 14343 } 14344 } 14345 func rewriteValueAMD64_OpGeq64(v *Value, config *Config) bool { 14346 b := v.Block 14347 _ = b 14348 // match: (Geq64 x y) 14349 // cond: 14350 // result: (SETGE (CMPQ x y)) 14351 for { 14352 x := v.Args[0] 14353 y := v.Args[1] 14354 v.reset(OpAMD64SETGE) 14355 v0 := b.NewValue0(v.Line, OpAMD64CMPQ, TypeFlags) 14356 v0.AddArg(x) 14357 v0.AddArg(y) 14358 v.AddArg(v0) 14359 return true 14360 } 14361 } 14362 func rewriteValueAMD64_OpGeq64F(v *Value, config *Config) bool { 14363 b := v.Block 14364 _ = b 14365 // match: (Geq64F x y) 14366 // cond: 14367 // result: (SETGEF (UCOMISD x y)) 14368 for { 14369 x := v.Args[0] 14370 y := v.Args[1] 14371 v.reset(OpAMD64SETGEF) 14372 v0 := b.NewValue0(v.Line, OpAMD64UCOMISD, TypeFlags) 14373 v0.AddArg(x) 14374 v0.AddArg(y) 14375 v.AddArg(v0) 14376 return true 14377 } 14378 } 14379 func rewriteValueAMD64_OpGeq64U(v *Value, config *Config) bool { 14380 b := v.Block 14381 _ = b 14382 // match: (Geq64U x y) 14383 // cond: 14384 // result: (SETAE (CMPQ x y)) 14385 for { 14386 x := v.Args[0] 14387 y := v.Args[1] 14388 v.reset(OpAMD64SETAE) 14389 v0 := b.NewValue0(v.Line, OpAMD64CMPQ, TypeFlags) 14390 v0.AddArg(x) 14391 v0.AddArg(y) 14392 v.AddArg(v0) 14393 return true 14394 } 14395 } 14396 func rewriteValueAMD64_OpGeq8(v *Value, config *Config) bool { 14397 b := v.Block 14398 _ = b 14399 // match: (Geq8 x y) 14400 // cond: 14401 // result: (SETGE (CMPB x y)) 14402 for { 14403 x := v.Args[0] 14404 y := v.Args[1] 14405 v.reset(OpAMD64SETGE) 14406 v0 := b.NewValue0(v.Line, OpAMD64CMPB, TypeFlags) 14407 v0.AddArg(x) 14408 v0.AddArg(y) 14409 v.AddArg(v0) 14410 return true 14411 } 14412 } 14413 func rewriteValueAMD64_OpGeq8U(v *Value, config *Config) bool { 14414 b := v.Block 14415 _ = b 14416 // match: (Geq8U x y) 14417 // cond: 14418 // result: (SETAE (CMPB x y)) 14419 for { 14420 x := v.Args[0] 14421 y := v.Args[1] 14422 v.reset(OpAMD64SETAE) 14423 v0 := b.NewValue0(v.Line, OpAMD64CMPB, TypeFlags) 14424 v0.AddArg(x) 14425 v0.AddArg(y) 14426 v.AddArg(v0) 14427 return true 14428 } 14429 } 14430 func rewriteValueAMD64_OpGetClosurePtr(v *Value, config *Config) bool { 14431 b := v.Block 14432 _ = b 14433 // match: (GetClosurePtr) 14434 // cond: 14435 // result: (LoweredGetClosurePtr) 14436 for { 14437 v.reset(OpAMD64LoweredGetClosurePtr) 14438 return true 14439 } 14440 } 14441 func rewriteValueAMD64_OpGetG(v *Value, config *Config) bool { 14442 b := v.Block 14443 _ = b 14444 // match: (GetG mem) 14445 // cond: 14446 // result: (LoweredGetG mem) 14447 for { 14448 mem := v.Args[0] 14449 v.reset(OpAMD64LoweredGetG) 14450 v.AddArg(mem) 14451 return true 14452 } 14453 } 14454 func rewriteValueAMD64_OpGoCall(v *Value, config *Config) bool { 14455 b := v.Block 14456 _ = b 14457 // match: (GoCall [argwid] mem) 14458 // cond: 14459 // result: (CALLgo [argwid] mem) 14460 for { 14461 argwid := v.AuxInt 14462 mem := v.Args[0] 14463 v.reset(OpAMD64CALLgo) 14464 v.AuxInt = argwid 14465 v.AddArg(mem) 14466 return true 14467 } 14468 } 14469 func rewriteValueAMD64_OpGreater16(v *Value, config *Config) bool { 14470 b := v.Block 14471 _ = b 14472 // match: (Greater16 x y) 14473 // cond: 14474 // result: (SETG (CMPW x y)) 14475 for { 14476 x := v.Args[0] 14477 y := v.Args[1] 14478 v.reset(OpAMD64SETG) 14479 v0 := b.NewValue0(v.Line, OpAMD64CMPW, TypeFlags) 14480 v0.AddArg(x) 14481 v0.AddArg(y) 14482 v.AddArg(v0) 14483 return true 14484 } 14485 } 14486 func rewriteValueAMD64_OpGreater16U(v *Value, config *Config) bool { 14487 b := v.Block 14488 _ = b 14489 // match: (Greater16U x y) 14490 // cond: 14491 // result: (SETA (CMPW x y)) 14492 for { 14493 x := v.Args[0] 14494 y := v.Args[1] 14495 v.reset(OpAMD64SETA) 14496 v0 := b.NewValue0(v.Line, OpAMD64CMPW, TypeFlags) 14497 v0.AddArg(x) 14498 v0.AddArg(y) 14499 v.AddArg(v0) 14500 return true 14501 } 14502 } 14503 func rewriteValueAMD64_OpGreater32(v *Value, config *Config) bool { 14504 b := v.Block 14505 _ = b 14506 // match: (Greater32 x y) 14507 // cond: 14508 // result: (SETG (CMPL x y)) 14509 for { 14510 x := v.Args[0] 14511 y := v.Args[1] 14512 v.reset(OpAMD64SETG) 14513 v0 := b.NewValue0(v.Line, OpAMD64CMPL, TypeFlags) 14514 v0.AddArg(x) 14515 v0.AddArg(y) 14516 v.AddArg(v0) 14517 return true 14518 } 14519 } 14520 func rewriteValueAMD64_OpGreater32F(v *Value, config *Config) bool { 14521 b := v.Block 14522 _ = b 14523 // match: (Greater32F x y) 14524 // cond: 14525 // result: (SETGF (UCOMISS x y)) 14526 for { 14527 x := v.Args[0] 14528 y := v.Args[1] 14529 v.reset(OpAMD64SETGF) 14530 v0 := b.NewValue0(v.Line, OpAMD64UCOMISS, TypeFlags) 14531 v0.AddArg(x) 14532 v0.AddArg(y) 14533 v.AddArg(v0) 14534 return true 14535 } 14536 } 14537 func rewriteValueAMD64_OpGreater32U(v *Value, config *Config) bool { 14538 b := v.Block 14539 _ = b 14540 // match: (Greater32U x y) 14541 // cond: 14542 // result: (SETA (CMPL x y)) 14543 for { 14544 x := v.Args[0] 14545 y := v.Args[1] 14546 v.reset(OpAMD64SETA) 14547 v0 := b.NewValue0(v.Line, OpAMD64CMPL, TypeFlags) 14548 v0.AddArg(x) 14549 v0.AddArg(y) 14550 v.AddArg(v0) 14551 return true 14552 } 14553 } 14554 func rewriteValueAMD64_OpGreater64(v *Value, config *Config) bool { 14555 b := v.Block 14556 _ = b 14557 // match: (Greater64 x y) 14558 // cond: 14559 // result: (SETG (CMPQ x y)) 14560 for { 14561 x := v.Args[0] 14562 y := v.Args[1] 14563 v.reset(OpAMD64SETG) 14564 v0 := b.NewValue0(v.Line, OpAMD64CMPQ, TypeFlags) 14565 v0.AddArg(x) 14566 v0.AddArg(y) 14567 v.AddArg(v0) 14568 return true 14569 } 14570 } 14571 func rewriteValueAMD64_OpGreater64F(v *Value, config *Config) bool { 14572 b := v.Block 14573 _ = b 14574 // match: (Greater64F x y) 14575 // cond: 14576 // result: (SETGF (UCOMISD x y)) 14577 for { 14578 x := v.Args[0] 14579 y := v.Args[1] 14580 v.reset(OpAMD64SETGF) 14581 v0 := b.NewValue0(v.Line, OpAMD64UCOMISD, TypeFlags) 14582 v0.AddArg(x) 14583 v0.AddArg(y) 14584 v.AddArg(v0) 14585 return true 14586 } 14587 } 14588 func rewriteValueAMD64_OpGreater64U(v *Value, config *Config) bool { 14589 b := v.Block 14590 _ = b 14591 // match: (Greater64U x y) 14592 // cond: 14593 // result: (SETA (CMPQ x y)) 14594 for { 14595 x := v.Args[0] 14596 y := v.Args[1] 14597 v.reset(OpAMD64SETA) 14598 v0 := b.NewValue0(v.Line, OpAMD64CMPQ, TypeFlags) 14599 v0.AddArg(x) 14600 v0.AddArg(y) 14601 v.AddArg(v0) 14602 return true 14603 } 14604 } 14605 func rewriteValueAMD64_OpGreater8(v *Value, config *Config) bool { 14606 b := v.Block 14607 _ = b 14608 // match: (Greater8 x y) 14609 // cond: 14610 // result: (SETG (CMPB x y)) 14611 for { 14612 x := v.Args[0] 14613 y := v.Args[1] 14614 v.reset(OpAMD64SETG) 14615 v0 := b.NewValue0(v.Line, OpAMD64CMPB, TypeFlags) 14616 v0.AddArg(x) 14617 v0.AddArg(y) 14618 v.AddArg(v0) 14619 return true 14620 } 14621 } 14622 func rewriteValueAMD64_OpGreater8U(v *Value, config *Config) bool { 14623 b := v.Block 14624 _ = b 14625 // match: (Greater8U x y) 14626 // cond: 14627 // result: (SETA (CMPB x y)) 14628 for { 14629 x := v.Args[0] 14630 y := v.Args[1] 14631 v.reset(OpAMD64SETA) 14632 v0 := b.NewValue0(v.Line, OpAMD64CMPB, TypeFlags) 14633 v0.AddArg(x) 14634 v0.AddArg(y) 14635 v.AddArg(v0) 14636 return true 14637 } 14638 } 14639 func rewriteValueAMD64_OpHmul16(v *Value, config *Config) bool { 14640 b := v.Block 14641 _ = b 14642 // match: (Hmul16 x y) 14643 // cond: 14644 // result: (HMULW x y) 14645 for { 14646 x := v.Args[0] 14647 y := v.Args[1] 14648 v.reset(OpAMD64HMULW) 14649 v.AddArg(x) 14650 v.AddArg(y) 14651 return true 14652 } 14653 } 14654 func rewriteValueAMD64_OpHmul16u(v *Value, config *Config) bool { 14655 b := v.Block 14656 _ = b 14657 // match: (Hmul16u x y) 14658 // cond: 14659 // result: (HMULWU x y) 14660 for { 14661 x := v.Args[0] 14662 y := v.Args[1] 14663 v.reset(OpAMD64HMULWU) 14664 v.AddArg(x) 14665 v.AddArg(y) 14666 return true 14667 } 14668 } 14669 func rewriteValueAMD64_OpHmul32(v *Value, config *Config) bool { 14670 b := v.Block 14671 _ = b 14672 // match: (Hmul32 x y) 14673 // cond: 14674 // result: (HMULL x y) 14675 for { 14676 x := v.Args[0] 14677 y := v.Args[1] 14678 v.reset(OpAMD64HMULL) 14679 v.AddArg(x) 14680 v.AddArg(y) 14681 return true 14682 } 14683 } 14684 func rewriteValueAMD64_OpHmul32u(v *Value, config *Config) bool { 14685 b := v.Block 14686 _ = b 14687 // match: (Hmul32u x y) 14688 // cond: 14689 // result: (HMULLU x y) 14690 for { 14691 x := v.Args[0] 14692 y := v.Args[1] 14693 v.reset(OpAMD64HMULLU) 14694 v.AddArg(x) 14695 v.AddArg(y) 14696 return true 14697 } 14698 } 14699 func rewriteValueAMD64_OpHmul64(v *Value, config *Config) bool { 14700 b := v.Block 14701 _ = b 14702 // match: (Hmul64 x y) 14703 // cond: 14704 // result: (HMULQ x y) 14705 for { 14706 x := v.Args[0] 14707 y := v.Args[1] 14708 v.reset(OpAMD64HMULQ) 14709 v.AddArg(x) 14710 v.AddArg(y) 14711 return true 14712 } 14713 } 14714 func rewriteValueAMD64_OpHmul64u(v *Value, config *Config) bool { 14715 b := v.Block 14716 _ = b 14717 // match: (Hmul64u x y) 14718 // cond: 14719 // result: (HMULQU x y) 14720 for { 14721 x := v.Args[0] 14722 y := v.Args[1] 14723 v.reset(OpAMD64HMULQU) 14724 v.AddArg(x) 14725 v.AddArg(y) 14726 return true 14727 } 14728 } 14729 func rewriteValueAMD64_OpHmul8(v *Value, config *Config) bool { 14730 b := v.Block 14731 _ = b 14732 // match: (Hmul8 x y) 14733 // cond: 14734 // result: (HMULB x y) 14735 for { 14736 x := v.Args[0] 14737 y := v.Args[1] 14738 v.reset(OpAMD64HMULB) 14739 v.AddArg(x) 14740 v.AddArg(y) 14741 return true 14742 } 14743 } 14744 func rewriteValueAMD64_OpHmul8u(v *Value, config *Config) bool { 14745 b := v.Block 14746 _ = b 14747 // match: (Hmul8u x y) 14748 // cond: 14749 // result: (HMULBU x y) 14750 for { 14751 x := v.Args[0] 14752 y := v.Args[1] 14753 v.reset(OpAMD64HMULBU) 14754 v.AddArg(x) 14755 v.AddArg(y) 14756 return true 14757 } 14758 } 14759 func rewriteValueAMD64_OpInt64Hi(v *Value, config *Config) bool { 14760 b := v.Block 14761 _ = b 14762 // match: (Int64Hi x) 14763 // cond: 14764 // result: (SHRQconst [32] x) 14765 for { 14766 x := v.Args[0] 14767 v.reset(OpAMD64SHRQconst) 14768 v.AuxInt = 32 14769 v.AddArg(x) 14770 return true 14771 } 14772 } 14773 func rewriteValueAMD64_OpInterCall(v *Value, config *Config) bool { 14774 b := v.Block 14775 _ = b 14776 // match: (InterCall [argwid] entry mem) 14777 // cond: 14778 // result: (CALLinter [argwid] entry mem) 14779 for { 14780 argwid := v.AuxInt 14781 entry := v.Args[0] 14782 mem := v.Args[1] 14783 v.reset(OpAMD64CALLinter) 14784 v.AuxInt = argwid 14785 v.AddArg(entry) 14786 v.AddArg(mem) 14787 return true 14788 } 14789 } 14790 func rewriteValueAMD64_OpIsInBounds(v *Value, config *Config) bool { 14791 b := v.Block 14792 _ = b 14793 // match: (IsInBounds idx len) 14794 // cond: 14795 // result: (SETB (CMPQ idx len)) 14796 for { 14797 idx := v.Args[0] 14798 len := v.Args[1] 14799 v.reset(OpAMD64SETB) 14800 v0 := b.NewValue0(v.Line, OpAMD64CMPQ, TypeFlags) 14801 v0.AddArg(idx) 14802 v0.AddArg(len) 14803 v.AddArg(v0) 14804 return true 14805 } 14806 } 14807 func rewriteValueAMD64_OpIsNonNil(v *Value, config *Config) bool { 14808 b := v.Block 14809 _ = b 14810 // match: (IsNonNil p) 14811 // cond: config.PtrSize == 8 14812 // result: (SETNE (TESTQ p p)) 14813 for { 14814 p := v.Args[0] 14815 if !(config.PtrSize == 8) { 14816 break 14817 } 14818 v.reset(OpAMD64SETNE) 14819 v0 := b.NewValue0(v.Line, OpAMD64TESTQ, TypeFlags) 14820 v0.AddArg(p) 14821 v0.AddArg(p) 14822 v.AddArg(v0) 14823 return true 14824 } 14825 // match: (IsNonNil p) 14826 // cond: config.PtrSize == 4 14827 // result: (SETNE (TESTL p p)) 14828 for { 14829 p := v.Args[0] 14830 if !(config.PtrSize == 4) { 14831 break 14832 } 14833 v.reset(OpAMD64SETNE) 14834 v0 := b.NewValue0(v.Line, OpAMD64TESTL, TypeFlags) 14835 v0.AddArg(p) 14836 v0.AddArg(p) 14837 v.AddArg(v0) 14838 return true 14839 } 14840 return false 14841 } 14842 func rewriteValueAMD64_OpIsSliceInBounds(v *Value, config *Config) bool { 14843 b := v.Block 14844 _ = b 14845 // match: (IsSliceInBounds idx len) 14846 // cond: 14847 // result: (SETBE (CMPQ idx len)) 14848 for { 14849 idx := v.Args[0] 14850 len := v.Args[1] 14851 v.reset(OpAMD64SETBE) 14852 v0 := b.NewValue0(v.Line, OpAMD64CMPQ, TypeFlags) 14853 v0.AddArg(idx) 14854 v0.AddArg(len) 14855 v.AddArg(v0) 14856 return true 14857 } 14858 } 14859 func rewriteValueAMD64_OpLeq16(v *Value, config *Config) bool { 14860 b := v.Block 14861 _ = b 14862 // match: (Leq16 x y) 14863 // cond: 14864 // result: (SETLE (CMPW x y)) 14865 for { 14866 x := v.Args[0] 14867 y := v.Args[1] 14868 v.reset(OpAMD64SETLE) 14869 v0 := b.NewValue0(v.Line, OpAMD64CMPW, TypeFlags) 14870 v0.AddArg(x) 14871 v0.AddArg(y) 14872 v.AddArg(v0) 14873 return true 14874 } 14875 } 14876 func rewriteValueAMD64_OpLeq16U(v *Value, config *Config) bool { 14877 b := v.Block 14878 _ = b 14879 // match: (Leq16U x y) 14880 // cond: 14881 // result: (SETBE (CMPW x y)) 14882 for { 14883 x := v.Args[0] 14884 y := v.Args[1] 14885 v.reset(OpAMD64SETBE) 14886 v0 := b.NewValue0(v.Line, OpAMD64CMPW, TypeFlags) 14887 v0.AddArg(x) 14888 v0.AddArg(y) 14889 v.AddArg(v0) 14890 return true 14891 } 14892 } 14893 func rewriteValueAMD64_OpLeq32(v *Value, config *Config) bool { 14894 b := v.Block 14895 _ = b 14896 // match: (Leq32 x y) 14897 // cond: 14898 // result: (SETLE (CMPL x y)) 14899 for { 14900 x := v.Args[0] 14901 y := v.Args[1] 14902 v.reset(OpAMD64SETLE) 14903 v0 := b.NewValue0(v.Line, OpAMD64CMPL, TypeFlags) 14904 v0.AddArg(x) 14905 v0.AddArg(y) 14906 v.AddArg(v0) 14907 return true 14908 } 14909 } 14910 func rewriteValueAMD64_OpLeq32F(v *Value, config *Config) bool { 14911 b := v.Block 14912 _ = b 14913 // match: (Leq32F x y) 14914 // cond: 14915 // result: (SETGEF (UCOMISS y x)) 14916 for { 14917 x := v.Args[0] 14918 y := v.Args[1] 14919 v.reset(OpAMD64SETGEF) 14920 v0 := b.NewValue0(v.Line, OpAMD64UCOMISS, TypeFlags) 14921 v0.AddArg(y) 14922 v0.AddArg(x) 14923 v.AddArg(v0) 14924 return true 14925 } 14926 } 14927 func rewriteValueAMD64_OpLeq32U(v *Value, config *Config) bool { 14928 b := v.Block 14929 _ = b 14930 // match: (Leq32U x y) 14931 // cond: 14932 // result: (SETBE (CMPL x y)) 14933 for { 14934 x := v.Args[0] 14935 y := v.Args[1] 14936 v.reset(OpAMD64SETBE) 14937 v0 := b.NewValue0(v.Line, OpAMD64CMPL, TypeFlags) 14938 v0.AddArg(x) 14939 v0.AddArg(y) 14940 v.AddArg(v0) 14941 return true 14942 } 14943 } 14944 func rewriteValueAMD64_OpLeq64(v *Value, config *Config) bool { 14945 b := v.Block 14946 _ = b 14947 // match: (Leq64 x y) 14948 // cond: 14949 // result: (SETLE (CMPQ x y)) 14950 for { 14951 x := v.Args[0] 14952 y := v.Args[1] 14953 v.reset(OpAMD64SETLE) 14954 v0 := b.NewValue0(v.Line, OpAMD64CMPQ, TypeFlags) 14955 v0.AddArg(x) 14956 v0.AddArg(y) 14957 v.AddArg(v0) 14958 return true 14959 } 14960 } 14961 func rewriteValueAMD64_OpLeq64F(v *Value, config *Config) bool { 14962 b := v.Block 14963 _ = b 14964 // match: (Leq64F x y) 14965 // cond: 14966 // result: (SETGEF (UCOMISD y x)) 14967 for { 14968 x := v.Args[0] 14969 y := v.Args[1] 14970 v.reset(OpAMD64SETGEF) 14971 v0 := b.NewValue0(v.Line, OpAMD64UCOMISD, TypeFlags) 14972 v0.AddArg(y) 14973 v0.AddArg(x) 14974 v.AddArg(v0) 14975 return true 14976 } 14977 } 14978 func rewriteValueAMD64_OpLeq64U(v *Value, config *Config) bool { 14979 b := v.Block 14980 _ = b 14981 // match: (Leq64U x y) 14982 // cond: 14983 // result: (SETBE (CMPQ x y)) 14984 for { 14985 x := v.Args[0] 14986 y := v.Args[1] 14987 v.reset(OpAMD64SETBE) 14988 v0 := b.NewValue0(v.Line, OpAMD64CMPQ, TypeFlags) 14989 v0.AddArg(x) 14990 v0.AddArg(y) 14991 v.AddArg(v0) 14992 return true 14993 } 14994 } 14995 func rewriteValueAMD64_OpLeq8(v *Value, config *Config) bool { 14996 b := v.Block 14997 _ = b 14998 // match: (Leq8 x y) 14999 // cond: 15000 // result: (SETLE (CMPB x y)) 15001 for { 15002 x := v.Args[0] 15003 y := v.Args[1] 15004 v.reset(OpAMD64SETLE) 15005 v0 := b.NewValue0(v.Line, OpAMD64CMPB, TypeFlags) 15006 v0.AddArg(x) 15007 v0.AddArg(y) 15008 v.AddArg(v0) 15009 return true 15010 } 15011 } 15012 func rewriteValueAMD64_OpLeq8U(v *Value, config *Config) bool { 15013 b := v.Block 15014 _ = b 15015 // match: (Leq8U x y) 15016 // cond: 15017 // result: (SETBE (CMPB x y)) 15018 for { 15019 x := v.Args[0] 15020 y := v.Args[1] 15021 v.reset(OpAMD64SETBE) 15022 v0 := b.NewValue0(v.Line, OpAMD64CMPB, TypeFlags) 15023 v0.AddArg(x) 15024 v0.AddArg(y) 15025 v.AddArg(v0) 15026 return true 15027 } 15028 } 15029 func rewriteValueAMD64_OpLess16(v *Value, config *Config) bool { 15030 b := v.Block 15031 _ = b 15032 // match: (Less16 x y) 15033 // cond: 15034 // result: (SETL (CMPW x y)) 15035 for { 15036 x := v.Args[0] 15037 y := v.Args[1] 15038 v.reset(OpAMD64SETL) 15039 v0 := b.NewValue0(v.Line, OpAMD64CMPW, TypeFlags) 15040 v0.AddArg(x) 15041 v0.AddArg(y) 15042 v.AddArg(v0) 15043 return true 15044 } 15045 } 15046 func rewriteValueAMD64_OpLess16U(v *Value, config *Config) bool { 15047 b := v.Block 15048 _ = b 15049 // match: (Less16U x y) 15050 // cond: 15051 // result: (SETB (CMPW x y)) 15052 for { 15053 x := v.Args[0] 15054 y := v.Args[1] 15055 v.reset(OpAMD64SETB) 15056 v0 := b.NewValue0(v.Line, OpAMD64CMPW, TypeFlags) 15057 v0.AddArg(x) 15058 v0.AddArg(y) 15059 v.AddArg(v0) 15060 return true 15061 } 15062 } 15063 func rewriteValueAMD64_OpLess32(v *Value, config *Config) bool { 15064 b := v.Block 15065 _ = b 15066 // match: (Less32 x y) 15067 // cond: 15068 // result: (SETL (CMPL x y)) 15069 for { 15070 x := v.Args[0] 15071 y := v.Args[1] 15072 v.reset(OpAMD64SETL) 15073 v0 := b.NewValue0(v.Line, OpAMD64CMPL, TypeFlags) 15074 v0.AddArg(x) 15075 v0.AddArg(y) 15076 v.AddArg(v0) 15077 return true 15078 } 15079 } 15080 func rewriteValueAMD64_OpLess32F(v *Value, config *Config) bool { 15081 b := v.Block 15082 _ = b 15083 // match: (Less32F x y) 15084 // cond: 15085 // result: (SETGF (UCOMISS y x)) 15086 for { 15087 x := v.Args[0] 15088 y := v.Args[1] 15089 v.reset(OpAMD64SETGF) 15090 v0 := b.NewValue0(v.Line, OpAMD64UCOMISS, TypeFlags) 15091 v0.AddArg(y) 15092 v0.AddArg(x) 15093 v.AddArg(v0) 15094 return true 15095 } 15096 } 15097 func rewriteValueAMD64_OpLess32U(v *Value, config *Config) bool { 15098 b := v.Block 15099 _ = b 15100 // match: (Less32U x y) 15101 // cond: 15102 // result: (SETB (CMPL x y)) 15103 for { 15104 x := v.Args[0] 15105 y := v.Args[1] 15106 v.reset(OpAMD64SETB) 15107 v0 := b.NewValue0(v.Line, OpAMD64CMPL, TypeFlags) 15108 v0.AddArg(x) 15109 v0.AddArg(y) 15110 v.AddArg(v0) 15111 return true 15112 } 15113 } 15114 func rewriteValueAMD64_OpLess64(v *Value, config *Config) bool { 15115 b := v.Block 15116 _ = b 15117 // match: (Less64 x y) 15118 // cond: 15119 // result: (SETL (CMPQ x y)) 15120 for { 15121 x := v.Args[0] 15122 y := v.Args[1] 15123 v.reset(OpAMD64SETL) 15124 v0 := b.NewValue0(v.Line, OpAMD64CMPQ, TypeFlags) 15125 v0.AddArg(x) 15126 v0.AddArg(y) 15127 v.AddArg(v0) 15128 return true 15129 } 15130 } 15131 func rewriteValueAMD64_OpLess64F(v *Value, config *Config) bool { 15132 b := v.Block 15133 _ = b 15134 // match: (Less64F x y) 15135 // cond: 15136 // result: (SETGF (UCOMISD y x)) 15137 for { 15138 x := v.Args[0] 15139 y := v.Args[1] 15140 v.reset(OpAMD64SETGF) 15141 v0 := b.NewValue0(v.Line, OpAMD64UCOMISD, TypeFlags) 15142 v0.AddArg(y) 15143 v0.AddArg(x) 15144 v.AddArg(v0) 15145 return true 15146 } 15147 } 15148 func rewriteValueAMD64_OpLess64U(v *Value, config *Config) bool { 15149 b := v.Block 15150 _ = b 15151 // match: (Less64U x y) 15152 // cond: 15153 // result: (SETB (CMPQ x y)) 15154 for { 15155 x := v.Args[0] 15156 y := v.Args[1] 15157 v.reset(OpAMD64SETB) 15158 v0 := b.NewValue0(v.Line, OpAMD64CMPQ, TypeFlags) 15159 v0.AddArg(x) 15160 v0.AddArg(y) 15161 v.AddArg(v0) 15162 return true 15163 } 15164 } 15165 func rewriteValueAMD64_OpLess8(v *Value, config *Config) bool { 15166 b := v.Block 15167 _ = b 15168 // match: (Less8 x y) 15169 // cond: 15170 // result: (SETL (CMPB x y)) 15171 for { 15172 x := v.Args[0] 15173 y := v.Args[1] 15174 v.reset(OpAMD64SETL) 15175 v0 := b.NewValue0(v.Line, OpAMD64CMPB, TypeFlags) 15176 v0.AddArg(x) 15177 v0.AddArg(y) 15178 v.AddArg(v0) 15179 return true 15180 } 15181 } 15182 func rewriteValueAMD64_OpLess8U(v *Value, config *Config) bool { 15183 b := v.Block 15184 _ = b 15185 // match: (Less8U x y) 15186 // cond: 15187 // result: (SETB (CMPB x y)) 15188 for { 15189 x := v.Args[0] 15190 y := v.Args[1] 15191 v.reset(OpAMD64SETB) 15192 v0 := b.NewValue0(v.Line, OpAMD64CMPB, TypeFlags) 15193 v0.AddArg(x) 15194 v0.AddArg(y) 15195 v.AddArg(v0) 15196 return true 15197 } 15198 } 15199 func rewriteValueAMD64_OpLoad(v *Value, config *Config) bool { 15200 b := v.Block 15201 _ = b 15202 // match: (Load <t> ptr mem) 15203 // cond: (is64BitInt(t) || isPtr(t) && config.PtrSize == 8) 15204 // result: (MOVQload ptr mem) 15205 for { 15206 t := v.Type 15207 ptr := v.Args[0] 15208 mem := v.Args[1] 15209 if !(is64BitInt(t) || isPtr(t) && config.PtrSize == 8) { 15210 break 15211 } 15212 v.reset(OpAMD64MOVQload) 15213 v.AddArg(ptr) 15214 v.AddArg(mem) 15215 return true 15216 } 15217 // match: (Load <t> ptr mem) 15218 // cond: (is32BitInt(t) || isPtr(t) && config.PtrSize == 4) 15219 // result: (MOVLload ptr mem) 15220 for { 15221 t := v.Type 15222 ptr := v.Args[0] 15223 mem := v.Args[1] 15224 if !(is32BitInt(t) || isPtr(t) && config.PtrSize == 4) { 15225 break 15226 } 15227 v.reset(OpAMD64MOVLload) 15228 v.AddArg(ptr) 15229 v.AddArg(mem) 15230 return true 15231 } 15232 // match: (Load <t> ptr mem) 15233 // cond: is16BitInt(t) 15234 // result: (MOVWload ptr mem) 15235 for { 15236 t := v.Type 15237 ptr := v.Args[0] 15238 mem := v.Args[1] 15239 if !(is16BitInt(t)) { 15240 break 15241 } 15242 v.reset(OpAMD64MOVWload) 15243 v.AddArg(ptr) 15244 v.AddArg(mem) 15245 return true 15246 } 15247 // match: (Load <t> ptr mem) 15248 // cond: (t.IsBoolean() || is8BitInt(t)) 15249 // result: (MOVBload ptr mem) 15250 for { 15251 t := v.Type 15252 ptr := v.Args[0] 15253 mem := v.Args[1] 15254 if !(t.IsBoolean() || is8BitInt(t)) { 15255 break 15256 } 15257 v.reset(OpAMD64MOVBload) 15258 v.AddArg(ptr) 15259 v.AddArg(mem) 15260 return true 15261 } 15262 // match: (Load <t> ptr mem) 15263 // cond: is32BitFloat(t) 15264 // result: (MOVSSload ptr mem) 15265 for { 15266 t := v.Type 15267 ptr := v.Args[0] 15268 mem := v.Args[1] 15269 if !(is32BitFloat(t)) { 15270 break 15271 } 15272 v.reset(OpAMD64MOVSSload) 15273 v.AddArg(ptr) 15274 v.AddArg(mem) 15275 return true 15276 } 15277 // match: (Load <t> ptr mem) 15278 // cond: is64BitFloat(t) 15279 // result: (MOVSDload ptr mem) 15280 for { 15281 t := v.Type 15282 ptr := v.Args[0] 15283 mem := v.Args[1] 15284 if !(is64BitFloat(t)) { 15285 break 15286 } 15287 v.reset(OpAMD64MOVSDload) 15288 v.AddArg(ptr) 15289 v.AddArg(mem) 15290 return true 15291 } 15292 return false 15293 } 15294 func rewriteValueAMD64_OpLrot16(v *Value, config *Config) bool { 15295 b := v.Block 15296 _ = b 15297 // match: (Lrot16 <t> x [c]) 15298 // cond: 15299 // result: (ROLWconst <t> [c&15] x) 15300 for { 15301 t := v.Type 15302 c := v.AuxInt 15303 x := v.Args[0] 15304 v.reset(OpAMD64ROLWconst) 15305 v.Type = t 15306 v.AuxInt = c & 15 15307 v.AddArg(x) 15308 return true 15309 } 15310 } 15311 func rewriteValueAMD64_OpLrot32(v *Value, config *Config) bool { 15312 b := v.Block 15313 _ = b 15314 // match: (Lrot32 <t> x [c]) 15315 // cond: 15316 // result: (ROLLconst <t> [c&31] x) 15317 for { 15318 t := v.Type 15319 c := v.AuxInt 15320 x := v.Args[0] 15321 v.reset(OpAMD64ROLLconst) 15322 v.Type = t 15323 v.AuxInt = c & 31 15324 v.AddArg(x) 15325 return true 15326 } 15327 } 15328 func rewriteValueAMD64_OpLrot64(v *Value, config *Config) bool { 15329 b := v.Block 15330 _ = b 15331 // match: (Lrot64 <t> x [c]) 15332 // cond: 15333 // result: (ROLQconst <t> [c&63] x) 15334 for { 15335 t := v.Type 15336 c := v.AuxInt 15337 x := v.Args[0] 15338 v.reset(OpAMD64ROLQconst) 15339 v.Type = t 15340 v.AuxInt = c & 63 15341 v.AddArg(x) 15342 return true 15343 } 15344 } 15345 func rewriteValueAMD64_OpLrot8(v *Value, config *Config) bool { 15346 b := v.Block 15347 _ = b 15348 // match: (Lrot8 <t> x [c]) 15349 // cond: 15350 // result: (ROLBconst <t> [c&7] x) 15351 for { 15352 t := v.Type 15353 c := v.AuxInt 15354 x := v.Args[0] 15355 v.reset(OpAMD64ROLBconst) 15356 v.Type = t 15357 v.AuxInt = c & 7 15358 v.AddArg(x) 15359 return true 15360 } 15361 } 15362 func rewriteValueAMD64_OpLsh16x16(v *Value, config *Config) bool { 15363 b := v.Block 15364 _ = b 15365 // match: (Lsh16x16 <t> x y) 15366 // cond: 15367 // result: (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPWconst y [32]))) 15368 for { 15369 t := v.Type 15370 x := v.Args[0] 15371 y := v.Args[1] 15372 v.reset(OpAMD64ANDL) 15373 v0 := b.NewValue0(v.Line, OpAMD64SHLL, t) 15374 v0.AddArg(x) 15375 v0.AddArg(y) 15376 v.AddArg(v0) 15377 v1 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, t) 15378 v2 := b.NewValue0(v.Line, OpAMD64CMPWconst, TypeFlags) 15379 v2.AuxInt = 32 15380 v2.AddArg(y) 15381 v1.AddArg(v2) 15382 v.AddArg(v1) 15383 return true 15384 } 15385 } 15386 func rewriteValueAMD64_OpLsh16x32(v *Value, config *Config) bool { 15387 b := v.Block 15388 _ = b 15389 // match: (Lsh16x32 <t> x y) 15390 // cond: 15391 // result: (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPLconst y [32]))) 15392 for { 15393 t := v.Type 15394 x := v.Args[0] 15395 y := v.Args[1] 15396 v.reset(OpAMD64ANDL) 15397 v0 := b.NewValue0(v.Line, OpAMD64SHLL, t) 15398 v0.AddArg(x) 15399 v0.AddArg(y) 15400 v.AddArg(v0) 15401 v1 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, t) 15402 v2 := b.NewValue0(v.Line, OpAMD64CMPLconst, TypeFlags) 15403 v2.AuxInt = 32 15404 v2.AddArg(y) 15405 v1.AddArg(v2) 15406 v.AddArg(v1) 15407 return true 15408 } 15409 } 15410 func rewriteValueAMD64_OpLsh16x64(v *Value, config *Config) bool { 15411 b := v.Block 15412 _ = b 15413 // match: (Lsh16x64 <t> x y) 15414 // cond: 15415 // result: (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPQconst y [32]))) 15416 for { 15417 t := v.Type 15418 x := v.Args[0] 15419 y := v.Args[1] 15420 v.reset(OpAMD64ANDL) 15421 v0 := b.NewValue0(v.Line, OpAMD64SHLL, t) 15422 v0.AddArg(x) 15423 v0.AddArg(y) 15424 v.AddArg(v0) 15425 v1 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, t) 15426 v2 := b.NewValue0(v.Line, OpAMD64CMPQconst, TypeFlags) 15427 v2.AuxInt = 32 15428 v2.AddArg(y) 15429 v1.AddArg(v2) 15430 v.AddArg(v1) 15431 return true 15432 } 15433 } 15434 func rewriteValueAMD64_OpLsh16x8(v *Value, config *Config) bool { 15435 b := v.Block 15436 _ = b 15437 // match: (Lsh16x8 <t> x y) 15438 // cond: 15439 // result: (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPBconst y [32]))) 15440 for { 15441 t := v.Type 15442 x := v.Args[0] 15443 y := v.Args[1] 15444 v.reset(OpAMD64ANDL) 15445 v0 := b.NewValue0(v.Line, OpAMD64SHLL, t) 15446 v0.AddArg(x) 15447 v0.AddArg(y) 15448 v.AddArg(v0) 15449 v1 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, t) 15450 v2 := b.NewValue0(v.Line, OpAMD64CMPBconst, TypeFlags) 15451 v2.AuxInt = 32 15452 v2.AddArg(y) 15453 v1.AddArg(v2) 15454 v.AddArg(v1) 15455 return true 15456 } 15457 } 15458 func rewriteValueAMD64_OpLsh32x16(v *Value, config *Config) bool { 15459 b := v.Block 15460 _ = b 15461 // match: (Lsh32x16 <t> x y) 15462 // cond: 15463 // result: (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPWconst y [32]))) 15464 for { 15465 t := v.Type 15466 x := v.Args[0] 15467 y := v.Args[1] 15468 v.reset(OpAMD64ANDL) 15469 v0 := b.NewValue0(v.Line, OpAMD64SHLL, t) 15470 v0.AddArg(x) 15471 v0.AddArg(y) 15472 v.AddArg(v0) 15473 v1 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, t) 15474 v2 := b.NewValue0(v.Line, OpAMD64CMPWconst, TypeFlags) 15475 v2.AuxInt = 32 15476 v2.AddArg(y) 15477 v1.AddArg(v2) 15478 v.AddArg(v1) 15479 return true 15480 } 15481 } 15482 func rewriteValueAMD64_OpLsh32x32(v *Value, config *Config) bool { 15483 b := v.Block 15484 _ = b 15485 // match: (Lsh32x32 <t> x y) 15486 // cond: 15487 // result: (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPLconst y [32]))) 15488 for { 15489 t := v.Type 15490 x := v.Args[0] 15491 y := v.Args[1] 15492 v.reset(OpAMD64ANDL) 15493 v0 := b.NewValue0(v.Line, OpAMD64SHLL, t) 15494 v0.AddArg(x) 15495 v0.AddArg(y) 15496 v.AddArg(v0) 15497 v1 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, t) 15498 v2 := b.NewValue0(v.Line, OpAMD64CMPLconst, TypeFlags) 15499 v2.AuxInt = 32 15500 v2.AddArg(y) 15501 v1.AddArg(v2) 15502 v.AddArg(v1) 15503 return true 15504 } 15505 } 15506 func rewriteValueAMD64_OpLsh32x64(v *Value, config *Config) bool { 15507 b := v.Block 15508 _ = b 15509 // match: (Lsh32x64 <t> x y) 15510 // cond: 15511 // result: (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPQconst y [32]))) 15512 for { 15513 t := v.Type 15514 x := v.Args[0] 15515 y := v.Args[1] 15516 v.reset(OpAMD64ANDL) 15517 v0 := b.NewValue0(v.Line, OpAMD64SHLL, t) 15518 v0.AddArg(x) 15519 v0.AddArg(y) 15520 v.AddArg(v0) 15521 v1 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, t) 15522 v2 := b.NewValue0(v.Line, OpAMD64CMPQconst, TypeFlags) 15523 v2.AuxInt = 32 15524 v2.AddArg(y) 15525 v1.AddArg(v2) 15526 v.AddArg(v1) 15527 return true 15528 } 15529 } 15530 func rewriteValueAMD64_OpLsh32x8(v *Value, config *Config) bool { 15531 b := v.Block 15532 _ = b 15533 // match: (Lsh32x8 <t> x y) 15534 // cond: 15535 // result: (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPBconst y [32]))) 15536 for { 15537 t := v.Type 15538 x := v.Args[0] 15539 y := v.Args[1] 15540 v.reset(OpAMD64ANDL) 15541 v0 := b.NewValue0(v.Line, OpAMD64SHLL, t) 15542 v0.AddArg(x) 15543 v0.AddArg(y) 15544 v.AddArg(v0) 15545 v1 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, t) 15546 v2 := b.NewValue0(v.Line, OpAMD64CMPBconst, TypeFlags) 15547 v2.AuxInt = 32 15548 v2.AddArg(y) 15549 v1.AddArg(v2) 15550 v.AddArg(v1) 15551 return true 15552 } 15553 } 15554 func rewriteValueAMD64_OpLsh64x16(v *Value, config *Config) bool { 15555 b := v.Block 15556 _ = b 15557 // match: (Lsh64x16 <t> x y) 15558 // cond: 15559 // result: (ANDQ (SHLQ <t> x y) (SBBQcarrymask <t> (CMPWconst y [64]))) 15560 for { 15561 t := v.Type 15562 x := v.Args[0] 15563 y := v.Args[1] 15564 v.reset(OpAMD64ANDQ) 15565 v0 := b.NewValue0(v.Line, OpAMD64SHLQ, t) 15566 v0.AddArg(x) 15567 v0.AddArg(y) 15568 v.AddArg(v0) 15569 v1 := b.NewValue0(v.Line, OpAMD64SBBQcarrymask, t) 15570 v2 := b.NewValue0(v.Line, OpAMD64CMPWconst, TypeFlags) 15571 v2.AuxInt = 64 15572 v2.AddArg(y) 15573 v1.AddArg(v2) 15574 v.AddArg(v1) 15575 return true 15576 } 15577 } 15578 func rewriteValueAMD64_OpLsh64x32(v *Value, config *Config) bool { 15579 b := v.Block 15580 _ = b 15581 // match: (Lsh64x32 <t> x y) 15582 // cond: 15583 // result: (ANDQ (SHLQ <t> x y) (SBBQcarrymask <t> (CMPLconst y [64]))) 15584 for { 15585 t := v.Type 15586 x := v.Args[0] 15587 y := v.Args[1] 15588 v.reset(OpAMD64ANDQ) 15589 v0 := b.NewValue0(v.Line, OpAMD64SHLQ, t) 15590 v0.AddArg(x) 15591 v0.AddArg(y) 15592 v.AddArg(v0) 15593 v1 := b.NewValue0(v.Line, OpAMD64SBBQcarrymask, t) 15594 v2 := b.NewValue0(v.Line, OpAMD64CMPLconst, TypeFlags) 15595 v2.AuxInt = 64 15596 v2.AddArg(y) 15597 v1.AddArg(v2) 15598 v.AddArg(v1) 15599 return true 15600 } 15601 } 15602 func rewriteValueAMD64_OpLsh64x64(v *Value, config *Config) bool { 15603 b := v.Block 15604 _ = b 15605 // match: (Lsh64x64 <t> x y) 15606 // cond: 15607 // result: (ANDQ (SHLQ <t> x y) (SBBQcarrymask <t> (CMPQconst y [64]))) 15608 for { 15609 t := v.Type 15610 x := v.Args[0] 15611 y := v.Args[1] 15612 v.reset(OpAMD64ANDQ) 15613 v0 := b.NewValue0(v.Line, OpAMD64SHLQ, t) 15614 v0.AddArg(x) 15615 v0.AddArg(y) 15616 v.AddArg(v0) 15617 v1 := b.NewValue0(v.Line, OpAMD64SBBQcarrymask, t) 15618 v2 := b.NewValue0(v.Line, OpAMD64CMPQconst, TypeFlags) 15619 v2.AuxInt = 64 15620 v2.AddArg(y) 15621 v1.AddArg(v2) 15622 v.AddArg(v1) 15623 return true 15624 } 15625 } 15626 func rewriteValueAMD64_OpLsh64x8(v *Value, config *Config) bool { 15627 b := v.Block 15628 _ = b 15629 // match: (Lsh64x8 <t> x y) 15630 // cond: 15631 // result: (ANDQ (SHLQ <t> x y) (SBBQcarrymask <t> (CMPBconst y [64]))) 15632 for { 15633 t := v.Type 15634 x := v.Args[0] 15635 y := v.Args[1] 15636 v.reset(OpAMD64ANDQ) 15637 v0 := b.NewValue0(v.Line, OpAMD64SHLQ, t) 15638 v0.AddArg(x) 15639 v0.AddArg(y) 15640 v.AddArg(v0) 15641 v1 := b.NewValue0(v.Line, OpAMD64SBBQcarrymask, t) 15642 v2 := b.NewValue0(v.Line, OpAMD64CMPBconst, TypeFlags) 15643 v2.AuxInt = 64 15644 v2.AddArg(y) 15645 v1.AddArg(v2) 15646 v.AddArg(v1) 15647 return true 15648 } 15649 } 15650 func rewriteValueAMD64_OpLsh8x16(v *Value, config *Config) bool { 15651 b := v.Block 15652 _ = b 15653 // match: (Lsh8x16 <t> x y) 15654 // cond: 15655 // result: (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPWconst y [32]))) 15656 for { 15657 t := v.Type 15658 x := v.Args[0] 15659 y := v.Args[1] 15660 v.reset(OpAMD64ANDL) 15661 v0 := b.NewValue0(v.Line, OpAMD64SHLL, t) 15662 v0.AddArg(x) 15663 v0.AddArg(y) 15664 v.AddArg(v0) 15665 v1 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, t) 15666 v2 := b.NewValue0(v.Line, OpAMD64CMPWconst, TypeFlags) 15667 v2.AuxInt = 32 15668 v2.AddArg(y) 15669 v1.AddArg(v2) 15670 v.AddArg(v1) 15671 return true 15672 } 15673 } 15674 func rewriteValueAMD64_OpLsh8x32(v *Value, config *Config) bool { 15675 b := v.Block 15676 _ = b 15677 // match: (Lsh8x32 <t> x y) 15678 // cond: 15679 // result: (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPLconst y [32]))) 15680 for { 15681 t := v.Type 15682 x := v.Args[0] 15683 y := v.Args[1] 15684 v.reset(OpAMD64ANDL) 15685 v0 := b.NewValue0(v.Line, OpAMD64SHLL, t) 15686 v0.AddArg(x) 15687 v0.AddArg(y) 15688 v.AddArg(v0) 15689 v1 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, t) 15690 v2 := b.NewValue0(v.Line, OpAMD64CMPLconst, TypeFlags) 15691 v2.AuxInt = 32 15692 v2.AddArg(y) 15693 v1.AddArg(v2) 15694 v.AddArg(v1) 15695 return true 15696 } 15697 } 15698 func rewriteValueAMD64_OpLsh8x64(v *Value, config *Config) bool { 15699 b := v.Block 15700 _ = b 15701 // match: (Lsh8x64 <t> x y) 15702 // cond: 15703 // result: (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPQconst y [32]))) 15704 for { 15705 t := v.Type 15706 x := v.Args[0] 15707 y := v.Args[1] 15708 v.reset(OpAMD64ANDL) 15709 v0 := b.NewValue0(v.Line, OpAMD64SHLL, t) 15710 v0.AddArg(x) 15711 v0.AddArg(y) 15712 v.AddArg(v0) 15713 v1 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, t) 15714 v2 := b.NewValue0(v.Line, OpAMD64CMPQconst, TypeFlags) 15715 v2.AuxInt = 32 15716 v2.AddArg(y) 15717 v1.AddArg(v2) 15718 v.AddArg(v1) 15719 return true 15720 } 15721 } 15722 func rewriteValueAMD64_OpLsh8x8(v *Value, config *Config) bool { 15723 b := v.Block 15724 _ = b 15725 // match: (Lsh8x8 <t> x y) 15726 // cond: 15727 // result: (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPBconst y [32]))) 15728 for { 15729 t := v.Type 15730 x := v.Args[0] 15731 y := v.Args[1] 15732 v.reset(OpAMD64ANDL) 15733 v0 := b.NewValue0(v.Line, OpAMD64SHLL, t) 15734 v0.AddArg(x) 15735 v0.AddArg(y) 15736 v.AddArg(v0) 15737 v1 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, t) 15738 v2 := b.NewValue0(v.Line, OpAMD64CMPBconst, TypeFlags) 15739 v2.AuxInt = 32 15740 v2.AddArg(y) 15741 v1.AddArg(v2) 15742 v.AddArg(v1) 15743 return true 15744 } 15745 } 15746 func rewriteValueAMD64_OpMod16(v *Value, config *Config) bool { 15747 b := v.Block 15748 _ = b 15749 // match: (Mod16 x y) 15750 // cond: 15751 // result: (Select1 (DIVW x y)) 15752 for { 15753 x := v.Args[0] 15754 y := v.Args[1] 15755 v.reset(OpSelect1) 15756 v0 := b.NewValue0(v.Line, OpAMD64DIVW, MakeTuple(config.fe.TypeInt16(), config.fe.TypeInt16())) 15757 v0.AddArg(x) 15758 v0.AddArg(y) 15759 v.AddArg(v0) 15760 return true 15761 } 15762 } 15763 func rewriteValueAMD64_OpMod16u(v *Value, config *Config) bool { 15764 b := v.Block 15765 _ = b 15766 // match: (Mod16u x y) 15767 // cond: 15768 // result: (Select1 (DIVWU x y)) 15769 for { 15770 x := v.Args[0] 15771 y := v.Args[1] 15772 v.reset(OpSelect1) 15773 v0 := b.NewValue0(v.Line, OpAMD64DIVWU, MakeTuple(config.fe.TypeUInt16(), config.fe.TypeUInt16())) 15774 v0.AddArg(x) 15775 v0.AddArg(y) 15776 v.AddArg(v0) 15777 return true 15778 } 15779 } 15780 func rewriteValueAMD64_OpMod32(v *Value, config *Config) bool { 15781 b := v.Block 15782 _ = b 15783 // match: (Mod32 x y) 15784 // cond: 15785 // result: (Select1 (DIVL x y)) 15786 for { 15787 x := v.Args[0] 15788 y := v.Args[1] 15789 v.reset(OpSelect1) 15790 v0 := b.NewValue0(v.Line, OpAMD64DIVL, MakeTuple(config.fe.TypeInt32(), config.fe.TypeInt32())) 15791 v0.AddArg(x) 15792 v0.AddArg(y) 15793 v.AddArg(v0) 15794 return true 15795 } 15796 } 15797 func rewriteValueAMD64_OpMod32u(v *Value, config *Config) bool { 15798 b := v.Block 15799 _ = b 15800 // match: (Mod32u x y) 15801 // cond: 15802 // result: (Select1 (DIVLU x y)) 15803 for { 15804 x := v.Args[0] 15805 y := v.Args[1] 15806 v.reset(OpSelect1) 15807 v0 := b.NewValue0(v.Line, OpAMD64DIVLU, MakeTuple(config.fe.TypeUInt32(), config.fe.TypeUInt32())) 15808 v0.AddArg(x) 15809 v0.AddArg(y) 15810 v.AddArg(v0) 15811 return true 15812 } 15813 } 15814 func rewriteValueAMD64_OpMod64(v *Value, config *Config) bool { 15815 b := v.Block 15816 _ = b 15817 // match: (Mod64 x y) 15818 // cond: 15819 // result: (Select1 (DIVQ x y)) 15820 for { 15821 x := v.Args[0] 15822 y := v.Args[1] 15823 v.reset(OpSelect1) 15824 v0 := b.NewValue0(v.Line, OpAMD64DIVQ, MakeTuple(config.fe.TypeInt64(), config.fe.TypeInt64())) 15825 v0.AddArg(x) 15826 v0.AddArg(y) 15827 v.AddArg(v0) 15828 return true 15829 } 15830 } 15831 func rewriteValueAMD64_OpMod64u(v *Value, config *Config) bool { 15832 b := v.Block 15833 _ = b 15834 // match: (Mod64u x y) 15835 // cond: 15836 // result: (Select1 (DIVQU x y)) 15837 for { 15838 x := v.Args[0] 15839 y := v.Args[1] 15840 v.reset(OpSelect1) 15841 v0 := b.NewValue0(v.Line, OpAMD64DIVQU, MakeTuple(config.fe.TypeUInt64(), config.fe.TypeUInt64())) 15842 v0.AddArg(x) 15843 v0.AddArg(y) 15844 v.AddArg(v0) 15845 return true 15846 } 15847 } 15848 func rewriteValueAMD64_OpMod8(v *Value, config *Config) bool { 15849 b := v.Block 15850 _ = b 15851 // match: (Mod8 x y) 15852 // cond: 15853 // result: (Select1 (DIVW (SignExt8to16 x) (SignExt8to16 y))) 15854 for { 15855 x := v.Args[0] 15856 y := v.Args[1] 15857 v.reset(OpSelect1) 15858 v0 := b.NewValue0(v.Line, OpAMD64DIVW, MakeTuple(config.fe.TypeInt16(), config.fe.TypeInt16())) 15859 v1 := b.NewValue0(v.Line, OpSignExt8to16, config.fe.TypeInt16()) 15860 v1.AddArg(x) 15861 v0.AddArg(v1) 15862 v2 := b.NewValue0(v.Line, OpSignExt8to16, config.fe.TypeInt16()) 15863 v2.AddArg(y) 15864 v0.AddArg(v2) 15865 v.AddArg(v0) 15866 return true 15867 } 15868 } 15869 func rewriteValueAMD64_OpMod8u(v *Value, config *Config) bool { 15870 b := v.Block 15871 _ = b 15872 // match: (Mod8u x y) 15873 // cond: 15874 // result: (Select1 (DIVWU (ZeroExt8to16 x) (ZeroExt8to16 y))) 15875 for { 15876 x := v.Args[0] 15877 y := v.Args[1] 15878 v.reset(OpSelect1) 15879 v0 := b.NewValue0(v.Line, OpAMD64DIVWU, MakeTuple(config.fe.TypeUInt16(), config.fe.TypeUInt16())) 15880 v1 := b.NewValue0(v.Line, OpZeroExt8to16, config.fe.TypeUInt16()) 15881 v1.AddArg(x) 15882 v0.AddArg(v1) 15883 v2 := b.NewValue0(v.Line, OpZeroExt8to16, config.fe.TypeUInt16()) 15884 v2.AddArg(y) 15885 v0.AddArg(v2) 15886 v.AddArg(v0) 15887 return true 15888 } 15889 } 15890 func rewriteValueAMD64_OpMove(v *Value, config *Config) bool { 15891 b := v.Block 15892 _ = b 15893 // match: (Move [s] _ _ mem) 15894 // cond: SizeAndAlign(s).Size() == 0 15895 // result: mem 15896 for { 15897 s := v.AuxInt 15898 mem := v.Args[2] 15899 if !(SizeAndAlign(s).Size() == 0) { 15900 break 15901 } 15902 v.reset(OpCopy) 15903 v.Type = mem.Type 15904 v.AddArg(mem) 15905 return true 15906 } 15907 // match: (Move [s] dst src mem) 15908 // cond: SizeAndAlign(s).Size() == 1 15909 // result: (MOVBstore dst (MOVBload src mem) mem) 15910 for { 15911 s := v.AuxInt 15912 dst := v.Args[0] 15913 src := v.Args[1] 15914 mem := v.Args[2] 15915 if !(SizeAndAlign(s).Size() == 1) { 15916 break 15917 } 15918 v.reset(OpAMD64MOVBstore) 15919 v.AddArg(dst) 15920 v0 := b.NewValue0(v.Line, OpAMD64MOVBload, config.fe.TypeUInt8()) 15921 v0.AddArg(src) 15922 v0.AddArg(mem) 15923 v.AddArg(v0) 15924 v.AddArg(mem) 15925 return true 15926 } 15927 // match: (Move [s] dst src mem) 15928 // cond: SizeAndAlign(s).Size() == 2 15929 // result: (MOVWstore dst (MOVWload src mem) mem) 15930 for { 15931 s := v.AuxInt 15932 dst := v.Args[0] 15933 src := v.Args[1] 15934 mem := v.Args[2] 15935 if !(SizeAndAlign(s).Size() == 2) { 15936 break 15937 } 15938 v.reset(OpAMD64MOVWstore) 15939 v.AddArg(dst) 15940 v0 := b.NewValue0(v.Line, OpAMD64MOVWload, config.fe.TypeUInt16()) 15941 v0.AddArg(src) 15942 v0.AddArg(mem) 15943 v.AddArg(v0) 15944 v.AddArg(mem) 15945 return true 15946 } 15947 // match: (Move [s] dst src mem) 15948 // cond: SizeAndAlign(s).Size() == 4 15949 // result: (MOVLstore dst (MOVLload src mem) mem) 15950 for { 15951 s := v.AuxInt 15952 dst := v.Args[0] 15953 src := v.Args[1] 15954 mem := v.Args[2] 15955 if !(SizeAndAlign(s).Size() == 4) { 15956 break 15957 } 15958 v.reset(OpAMD64MOVLstore) 15959 v.AddArg(dst) 15960 v0 := b.NewValue0(v.Line, OpAMD64MOVLload, config.fe.TypeUInt32()) 15961 v0.AddArg(src) 15962 v0.AddArg(mem) 15963 v.AddArg(v0) 15964 v.AddArg(mem) 15965 return true 15966 } 15967 // match: (Move [s] dst src mem) 15968 // cond: SizeAndAlign(s).Size() == 8 15969 // result: (MOVQstore dst (MOVQload src mem) mem) 15970 for { 15971 s := v.AuxInt 15972 dst := v.Args[0] 15973 src := v.Args[1] 15974 mem := v.Args[2] 15975 if !(SizeAndAlign(s).Size() == 8) { 15976 break 15977 } 15978 v.reset(OpAMD64MOVQstore) 15979 v.AddArg(dst) 15980 v0 := b.NewValue0(v.Line, OpAMD64MOVQload, config.fe.TypeUInt64()) 15981 v0.AddArg(src) 15982 v0.AddArg(mem) 15983 v.AddArg(v0) 15984 v.AddArg(mem) 15985 return true 15986 } 15987 // match: (Move [s] dst src mem) 15988 // cond: SizeAndAlign(s).Size() == 16 15989 // result: (MOVOstore dst (MOVOload src mem) mem) 15990 for { 15991 s := v.AuxInt 15992 dst := v.Args[0] 15993 src := v.Args[1] 15994 mem := v.Args[2] 15995 if !(SizeAndAlign(s).Size() == 16) { 15996 break 15997 } 15998 v.reset(OpAMD64MOVOstore) 15999 v.AddArg(dst) 16000 v0 := b.NewValue0(v.Line, OpAMD64MOVOload, TypeInt128) 16001 v0.AddArg(src) 16002 v0.AddArg(mem) 16003 v.AddArg(v0) 16004 v.AddArg(mem) 16005 return true 16006 } 16007 // match: (Move [s] dst src mem) 16008 // cond: SizeAndAlign(s).Size() == 3 16009 // result: (MOVBstore [2] dst (MOVBload [2] src mem) (MOVWstore dst (MOVWload src mem) mem)) 16010 for { 16011 s := v.AuxInt 16012 dst := v.Args[0] 16013 src := v.Args[1] 16014 mem := v.Args[2] 16015 if !(SizeAndAlign(s).Size() == 3) { 16016 break 16017 } 16018 v.reset(OpAMD64MOVBstore) 16019 v.AuxInt = 2 16020 v.AddArg(dst) 16021 v0 := b.NewValue0(v.Line, OpAMD64MOVBload, config.fe.TypeUInt8()) 16022 v0.AuxInt = 2 16023 v0.AddArg(src) 16024 v0.AddArg(mem) 16025 v.AddArg(v0) 16026 v1 := b.NewValue0(v.Line, OpAMD64MOVWstore, TypeMem) 16027 v1.AddArg(dst) 16028 v2 := b.NewValue0(v.Line, OpAMD64MOVWload, config.fe.TypeUInt16()) 16029 v2.AddArg(src) 16030 v2.AddArg(mem) 16031 v1.AddArg(v2) 16032 v1.AddArg(mem) 16033 v.AddArg(v1) 16034 return true 16035 } 16036 // match: (Move [s] dst src mem) 16037 // cond: SizeAndAlign(s).Size() == 5 16038 // result: (MOVBstore [4] dst (MOVBload [4] src mem) (MOVLstore dst (MOVLload src mem) mem)) 16039 for { 16040 s := v.AuxInt 16041 dst := v.Args[0] 16042 src := v.Args[1] 16043 mem := v.Args[2] 16044 if !(SizeAndAlign(s).Size() == 5) { 16045 break 16046 } 16047 v.reset(OpAMD64MOVBstore) 16048 v.AuxInt = 4 16049 v.AddArg(dst) 16050 v0 := b.NewValue0(v.Line, OpAMD64MOVBload, config.fe.TypeUInt8()) 16051 v0.AuxInt = 4 16052 v0.AddArg(src) 16053 v0.AddArg(mem) 16054 v.AddArg(v0) 16055 v1 := b.NewValue0(v.Line, OpAMD64MOVLstore, TypeMem) 16056 v1.AddArg(dst) 16057 v2 := b.NewValue0(v.Line, OpAMD64MOVLload, config.fe.TypeUInt32()) 16058 v2.AddArg(src) 16059 v2.AddArg(mem) 16060 v1.AddArg(v2) 16061 v1.AddArg(mem) 16062 v.AddArg(v1) 16063 return true 16064 } 16065 // match: (Move [s] dst src mem) 16066 // cond: SizeAndAlign(s).Size() == 6 16067 // result: (MOVWstore [4] dst (MOVWload [4] src mem) (MOVLstore dst (MOVLload src mem) mem)) 16068 for { 16069 s := v.AuxInt 16070 dst := v.Args[0] 16071 src := v.Args[1] 16072 mem := v.Args[2] 16073 if !(SizeAndAlign(s).Size() == 6) { 16074 break 16075 } 16076 v.reset(OpAMD64MOVWstore) 16077 v.AuxInt = 4 16078 v.AddArg(dst) 16079 v0 := b.NewValue0(v.Line, OpAMD64MOVWload, config.fe.TypeUInt16()) 16080 v0.AuxInt = 4 16081 v0.AddArg(src) 16082 v0.AddArg(mem) 16083 v.AddArg(v0) 16084 v1 := b.NewValue0(v.Line, OpAMD64MOVLstore, TypeMem) 16085 v1.AddArg(dst) 16086 v2 := b.NewValue0(v.Line, OpAMD64MOVLload, config.fe.TypeUInt32()) 16087 v2.AddArg(src) 16088 v2.AddArg(mem) 16089 v1.AddArg(v2) 16090 v1.AddArg(mem) 16091 v.AddArg(v1) 16092 return true 16093 } 16094 // match: (Move [s] dst src mem) 16095 // cond: SizeAndAlign(s).Size() == 7 16096 // result: (MOVLstore [3] dst (MOVLload [3] src mem) (MOVLstore dst (MOVLload src mem) mem)) 16097 for { 16098 s := v.AuxInt 16099 dst := v.Args[0] 16100 src := v.Args[1] 16101 mem := v.Args[2] 16102 if !(SizeAndAlign(s).Size() == 7) { 16103 break 16104 } 16105 v.reset(OpAMD64MOVLstore) 16106 v.AuxInt = 3 16107 v.AddArg(dst) 16108 v0 := b.NewValue0(v.Line, OpAMD64MOVLload, config.fe.TypeUInt32()) 16109 v0.AuxInt = 3 16110 v0.AddArg(src) 16111 v0.AddArg(mem) 16112 v.AddArg(v0) 16113 v1 := b.NewValue0(v.Line, OpAMD64MOVLstore, TypeMem) 16114 v1.AddArg(dst) 16115 v2 := b.NewValue0(v.Line, OpAMD64MOVLload, config.fe.TypeUInt32()) 16116 v2.AddArg(src) 16117 v2.AddArg(mem) 16118 v1.AddArg(v2) 16119 v1.AddArg(mem) 16120 v.AddArg(v1) 16121 return true 16122 } 16123 // match: (Move [s] dst src mem) 16124 // cond: SizeAndAlign(s).Size() > 8 && SizeAndAlign(s).Size() < 16 16125 // result: (MOVQstore [SizeAndAlign(s).Size()-8] dst (MOVQload [SizeAndAlign(s).Size()-8] src mem) (MOVQstore dst (MOVQload src mem) mem)) 16126 for { 16127 s := v.AuxInt 16128 dst := v.Args[0] 16129 src := v.Args[1] 16130 mem := v.Args[2] 16131 if !(SizeAndAlign(s).Size() > 8 && SizeAndAlign(s).Size() < 16) { 16132 break 16133 } 16134 v.reset(OpAMD64MOVQstore) 16135 v.AuxInt = SizeAndAlign(s).Size() - 8 16136 v.AddArg(dst) 16137 v0 := b.NewValue0(v.Line, OpAMD64MOVQload, config.fe.TypeUInt64()) 16138 v0.AuxInt = SizeAndAlign(s).Size() - 8 16139 v0.AddArg(src) 16140 v0.AddArg(mem) 16141 v.AddArg(v0) 16142 v1 := b.NewValue0(v.Line, OpAMD64MOVQstore, TypeMem) 16143 v1.AddArg(dst) 16144 v2 := b.NewValue0(v.Line, OpAMD64MOVQload, config.fe.TypeUInt64()) 16145 v2.AddArg(src) 16146 v2.AddArg(mem) 16147 v1.AddArg(v2) 16148 v1.AddArg(mem) 16149 v.AddArg(v1) 16150 return true 16151 } 16152 // match: (Move [s] dst src mem) 16153 // cond: SizeAndAlign(s).Size() > 16 && SizeAndAlign(s).Size()%16 != 0 && SizeAndAlign(s).Size()%16 <= 8 16154 // result: (Move [SizeAndAlign(s).Size()-SizeAndAlign(s).Size()%16] (OffPtr <dst.Type> dst [SizeAndAlign(s).Size()%16]) (OffPtr <src.Type> src [SizeAndAlign(s).Size()%16]) (MOVQstore dst (MOVQload src mem) mem)) 16155 for { 16156 s := v.AuxInt 16157 dst := v.Args[0] 16158 src := v.Args[1] 16159 mem := v.Args[2] 16160 if !(SizeAndAlign(s).Size() > 16 && SizeAndAlign(s).Size()%16 != 0 && SizeAndAlign(s).Size()%16 <= 8) { 16161 break 16162 } 16163 v.reset(OpMove) 16164 v.AuxInt = SizeAndAlign(s).Size() - SizeAndAlign(s).Size()%16 16165 v0 := b.NewValue0(v.Line, OpOffPtr, dst.Type) 16166 v0.AuxInt = SizeAndAlign(s).Size() % 16 16167 v0.AddArg(dst) 16168 v.AddArg(v0) 16169 v1 := b.NewValue0(v.Line, OpOffPtr, src.Type) 16170 v1.AuxInt = SizeAndAlign(s).Size() % 16 16171 v1.AddArg(src) 16172 v.AddArg(v1) 16173 v2 := b.NewValue0(v.Line, OpAMD64MOVQstore, TypeMem) 16174 v2.AddArg(dst) 16175 v3 := b.NewValue0(v.Line, OpAMD64MOVQload, config.fe.TypeUInt64()) 16176 v3.AddArg(src) 16177 v3.AddArg(mem) 16178 v2.AddArg(v3) 16179 v2.AddArg(mem) 16180 v.AddArg(v2) 16181 return true 16182 } 16183 // match: (Move [s] dst src mem) 16184 // cond: SizeAndAlign(s).Size() > 16 && SizeAndAlign(s).Size()%16 != 0 && SizeAndAlign(s).Size()%16 > 8 16185 // result: (Move [SizeAndAlign(s).Size()-SizeAndAlign(s).Size()%16] (OffPtr <dst.Type> dst [SizeAndAlign(s).Size()%16]) (OffPtr <src.Type> src [SizeAndAlign(s).Size()%16]) (MOVOstore dst (MOVOload src mem) mem)) 16186 for { 16187 s := v.AuxInt 16188 dst := v.Args[0] 16189 src := v.Args[1] 16190 mem := v.Args[2] 16191 if !(SizeAndAlign(s).Size() > 16 && SizeAndAlign(s).Size()%16 != 0 && SizeAndAlign(s).Size()%16 > 8) { 16192 break 16193 } 16194 v.reset(OpMove) 16195 v.AuxInt = SizeAndAlign(s).Size() - SizeAndAlign(s).Size()%16 16196 v0 := b.NewValue0(v.Line, OpOffPtr, dst.Type) 16197 v0.AuxInt = SizeAndAlign(s).Size() % 16 16198 v0.AddArg(dst) 16199 v.AddArg(v0) 16200 v1 := b.NewValue0(v.Line, OpOffPtr, src.Type) 16201 v1.AuxInt = SizeAndAlign(s).Size() % 16 16202 v1.AddArg(src) 16203 v.AddArg(v1) 16204 v2 := b.NewValue0(v.Line, OpAMD64MOVOstore, TypeMem) 16205 v2.AddArg(dst) 16206 v3 := b.NewValue0(v.Line, OpAMD64MOVOload, TypeInt128) 16207 v3.AddArg(src) 16208 v3.AddArg(mem) 16209 v2.AddArg(v3) 16210 v2.AddArg(mem) 16211 v.AddArg(v2) 16212 return true 16213 } 16214 // match: (Move [s] dst src mem) 16215 // cond: SizeAndAlign(s).Size() >= 32 && SizeAndAlign(s).Size() <= 16*64 && SizeAndAlign(s).Size()%16 == 0 && !config.noDuffDevice 16216 // result: (DUFFCOPY [14*(64-SizeAndAlign(s).Size()/16)] dst src mem) 16217 for { 16218 s := v.AuxInt 16219 dst := v.Args[0] 16220 src := v.Args[1] 16221 mem := v.Args[2] 16222 if !(SizeAndAlign(s).Size() >= 32 && SizeAndAlign(s).Size() <= 16*64 && SizeAndAlign(s).Size()%16 == 0 && !config.noDuffDevice) { 16223 break 16224 } 16225 v.reset(OpAMD64DUFFCOPY) 16226 v.AuxInt = 14 * (64 - SizeAndAlign(s).Size()/16) 16227 v.AddArg(dst) 16228 v.AddArg(src) 16229 v.AddArg(mem) 16230 return true 16231 } 16232 // match: (Move [s] dst src mem) 16233 // cond: (SizeAndAlign(s).Size() > 16*64 || config.noDuffDevice) && SizeAndAlign(s).Size()%8 == 0 16234 // result: (REPMOVSQ dst src (MOVQconst [SizeAndAlign(s).Size()/8]) mem) 16235 for { 16236 s := v.AuxInt 16237 dst := v.Args[0] 16238 src := v.Args[1] 16239 mem := v.Args[2] 16240 if !((SizeAndAlign(s).Size() > 16*64 || config.noDuffDevice) && SizeAndAlign(s).Size()%8 == 0) { 16241 break 16242 } 16243 v.reset(OpAMD64REPMOVSQ) 16244 v.AddArg(dst) 16245 v.AddArg(src) 16246 v0 := b.NewValue0(v.Line, OpAMD64MOVQconst, config.fe.TypeUInt64()) 16247 v0.AuxInt = SizeAndAlign(s).Size() / 8 16248 v.AddArg(v0) 16249 v.AddArg(mem) 16250 return true 16251 } 16252 return false 16253 } 16254 func rewriteValueAMD64_OpMul16(v *Value, config *Config) bool { 16255 b := v.Block 16256 _ = b 16257 // match: (Mul16 x y) 16258 // cond: 16259 // result: (MULL x y) 16260 for { 16261 x := v.Args[0] 16262 y := v.Args[1] 16263 v.reset(OpAMD64MULL) 16264 v.AddArg(x) 16265 v.AddArg(y) 16266 return true 16267 } 16268 } 16269 func rewriteValueAMD64_OpMul32(v *Value, config *Config) bool { 16270 b := v.Block 16271 _ = b 16272 // match: (Mul32 x y) 16273 // cond: 16274 // result: (MULL x y) 16275 for { 16276 x := v.Args[0] 16277 y := v.Args[1] 16278 v.reset(OpAMD64MULL) 16279 v.AddArg(x) 16280 v.AddArg(y) 16281 return true 16282 } 16283 } 16284 func rewriteValueAMD64_OpMul32F(v *Value, config *Config) bool { 16285 b := v.Block 16286 _ = b 16287 // match: (Mul32F x y) 16288 // cond: 16289 // result: (MULSS x y) 16290 for { 16291 x := v.Args[0] 16292 y := v.Args[1] 16293 v.reset(OpAMD64MULSS) 16294 v.AddArg(x) 16295 v.AddArg(y) 16296 return true 16297 } 16298 } 16299 func rewriteValueAMD64_OpMul64(v *Value, config *Config) bool { 16300 b := v.Block 16301 _ = b 16302 // match: (Mul64 x y) 16303 // cond: 16304 // result: (MULQ x y) 16305 for { 16306 x := v.Args[0] 16307 y := v.Args[1] 16308 v.reset(OpAMD64MULQ) 16309 v.AddArg(x) 16310 v.AddArg(y) 16311 return true 16312 } 16313 } 16314 func rewriteValueAMD64_OpMul64F(v *Value, config *Config) bool { 16315 b := v.Block 16316 _ = b 16317 // match: (Mul64F x y) 16318 // cond: 16319 // result: (MULSD x y) 16320 for { 16321 x := v.Args[0] 16322 y := v.Args[1] 16323 v.reset(OpAMD64MULSD) 16324 v.AddArg(x) 16325 v.AddArg(y) 16326 return true 16327 } 16328 } 16329 func rewriteValueAMD64_OpMul8(v *Value, config *Config) bool { 16330 b := v.Block 16331 _ = b 16332 // match: (Mul8 x y) 16333 // cond: 16334 // result: (MULL x y) 16335 for { 16336 x := v.Args[0] 16337 y := v.Args[1] 16338 v.reset(OpAMD64MULL) 16339 v.AddArg(x) 16340 v.AddArg(y) 16341 return true 16342 } 16343 } 16344 func rewriteValueAMD64_OpNeg16(v *Value, config *Config) bool { 16345 b := v.Block 16346 _ = b 16347 // match: (Neg16 x) 16348 // cond: 16349 // result: (NEGL x) 16350 for { 16351 x := v.Args[0] 16352 v.reset(OpAMD64NEGL) 16353 v.AddArg(x) 16354 return true 16355 } 16356 } 16357 func rewriteValueAMD64_OpNeg32(v *Value, config *Config) bool { 16358 b := v.Block 16359 _ = b 16360 // match: (Neg32 x) 16361 // cond: 16362 // result: (NEGL x) 16363 for { 16364 x := v.Args[0] 16365 v.reset(OpAMD64NEGL) 16366 v.AddArg(x) 16367 return true 16368 } 16369 } 16370 func rewriteValueAMD64_OpNeg32F(v *Value, config *Config) bool { 16371 b := v.Block 16372 _ = b 16373 // match: (Neg32F x) 16374 // cond: 16375 // result: (PXOR x (MOVSSconst <config.Frontend().TypeFloat32()> [f2i(math.Copysign(0, -1))])) 16376 for { 16377 x := v.Args[0] 16378 v.reset(OpAMD64PXOR) 16379 v.AddArg(x) 16380 v0 := b.NewValue0(v.Line, OpAMD64MOVSSconst, config.Frontend().TypeFloat32()) 16381 v0.AuxInt = f2i(math.Copysign(0, -1)) 16382 v.AddArg(v0) 16383 return true 16384 } 16385 } 16386 func rewriteValueAMD64_OpNeg64(v *Value, config *Config) bool { 16387 b := v.Block 16388 _ = b 16389 // match: (Neg64 x) 16390 // cond: 16391 // result: (NEGQ x) 16392 for { 16393 x := v.Args[0] 16394 v.reset(OpAMD64NEGQ) 16395 v.AddArg(x) 16396 return true 16397 } 16398 } 16399 func rewriteValueAMD64_OpNeg64F(v *Value, config *Config) bool { 16400 b := v.Block 16401 _ = b 16402 // match: (Neg64F x) 16403 // cond: 16404 // result: (PXOR x (MOVSDconst <config.Frontend().TypeFloat64()> [f2i(math.Copysign(0, -1))])) 16405 for { 16406 x := v.Args[0] 16407 v.reset(OpAMD64PXOR) 16408 v.AddArg(x) 16409 v0 := b.NewValue0(v.Line, OpAMD64MOVSDconst, config.Frontend().TypeFloat64()) 16410 v0.AuxInt = f2i(math.Copysign(0, -1)) 16411 v.AddArg(v0) 16412 return true 16413 } 16414 } 16415 func rewriteValueAMD64_OpNeg8(v *Value, config *Config) bool { 16416 b := v.Block 16417 _ = b 16418 // match: (Neg8 x) 16419 // cond: 16420 // result: (NEGL x) 16421 for { 16422 x := v.Args[0] 16423 v.reset(OpAMD64NEGL) 16424 v.AddArg(x) 16425 return true 16426 } 16427 } 16428 func rewriteValueAMD64_OpNeq16(v *Value, config *Config) bool { 16429 b := v.Block 16430 _ = b 16431 // match: (Neq16 x y) 16432 // cond: 16433 // result: (SETNE (CMPW x y)) 16434 for { 16435 x := v.Args[0] 16436 y := v.Args[1] 16437 v.reset(OpAMD64SETNE) 16438 v0 := b.NewValue0(v.Line, OpAMD64CMPW, TypeFlags) 16439 v0.AddArg(x) 16440 v0.AddArg(y) 16441 v.AddArg(v0) 16442 return true 16443 } 16444 } 16445 func rewriteValueAMD64_OpNeq32(v *Value, config *Config) bool { 16446 b := v.Block 16447 _ = b 16448 // match: (Neq32 x y) 16449 // cond: 16450 // result: (SETNE (CMPL x y)) 16451 for { 16452 x := v.Args[0] 16453 y := v.Args[1] 16454 v.reset(OpAMD64SETNE) 16455 v0 := b.NewValue0(v.Line, OpAMD64CMPL, TypeFlags) 16456 v0.AddArg(x) 16457 v0.AddArg(y) 16458 v.AddArg(v0) 16459 return true 16460 } 16461 } 16462 func rewriteValueAMD64_OpNeq32F(v *Value, config *Config) bool { 16463 b := v.Block 16464 _ = b 16465 // match: (Neq32F x y) 16466 // cond: 16467 // result: (SETNEF (UCOMISS x y)) 16468 for { 16469 x := v.Args[0] 16470 y := v.Args[1] 16471 v.reset(OpAMD64SETNEF) 16472 v0 := b.NewValue0(v.Line, OpAMD64UCOMISS, TypeFlags) 16473 v0.AddArg(x) 16474 v0.AddArg(y) 16475 v.AddArg(v0) 16476 return true 16477 } 16478 } 16479 func rewriteValueAMD64_OpNeq64(v *Value, config *Config) bool { 16480 b := v.Block 16481 _ = b 16482 // match: (Neq64 x y) 16483 // cond: 16484 // result: (SETNE (CMPQ x y)) 16485 for { 16486 x := v.Args[0] 16487 y := v.Args[1] 16488 v.reset(OpAMD64SETNE) 16489 v0 := b.NewValue0(v.Line, OpAMD64CMPQ, TypeFlags) 16490 v0.AddArg(x) 16491 v0.AddArg(y) 16492 v.AddArg(v0) 16493 return true 16494 } 16495 } 16496 func rewriteValueAMD64_OpNeq64F(v *Value, config *Config) bool { 16497 b := v.Block 16498 _ = b 16499 // match: (Neq64F x y) 16500 // cond: 16501 // result: (SETNEF (UCOMISD x y)) 16502 for { 16503 x := v.Args[0] 16504 y := v.Args[1] 16505 v.reset(OpAMD64SETNEF) 16506 v0 := b.NewValue0(v.Line, OpAMD64UCOMISD, TypeFlags) 16507 v0.AddArg(x) 16508 v0.AddArg(y) 16509 v.AddArg(v0) 16510 return true 16511 } 16512 } 16513 func rewriteValueAMD64_OpNeq8(v *Value, config *Config) bool { 16514 b := v.Block 16515 _ = b 16516 // match: (Neq8 x y) 16517 // cond: 16518 // result: (SETNE (CMPB x y)) 16519 for { 16520 x := v.Args[0] 16521 y := v.Args[1] 16522 v.reset(OpAMD64SETNE) 16523 v0 := b.NewValue0(v.Line, OpAMD64CMPB, TypeFlags) 16524 v0.AddArg(x) 16525 v0.AddArg(y) 16526 v.AddArg(v0) 16527 return true 16528 } 16529 } 16530 func rewriteValueAMD64_OpNeqB(v *Value, config *Config) bool { 16531 b := v.Block 16532 _ = b 16533 // match: (NeqB x y) 16534 // cond: 16535 // result: (SETNE (CMPB x y)) 16536 for { 16537 x := v.Args[0] 16538 y := v.Args[1] 16539 v.reset(OpAMD64SETNE) 16540 v0 := b.NewValue0(v.Line, OpAMD64CMPB, TypeFlags) 16541 v0.AddArg(x) 16542 v0.AddArg(y) 16543 v.AddArg(v0) 16544 return true 16545 } 16546 } 16547 func rewriteValueAMD64_OpNeqPtr(v *Value, config *Config) bool { 16548 b := v.Block 16549 _ = b 16550 // match: (NeqPtr x y) 16551 // cond: config.PtrSize == 8 16552 // result: (SETNE (CMPQ x y)) 16553 for { 16554 x := v.Args[0] 16555 y := v.Args[1] 16556 if !(config.PtrSize == 8) { 16557 break 16558 } 16559 v.reset(OpAMD64SETNE) 16560 v0 := b.NewValue0(v.Line, OpAMD64CMPQ, TypeFlags) 16561 v0.AddArg(x) 16562 v0.AddArg(y) 16563 v.AddArg(v0) 16564 return true 16565 } 16566 // match: (NeqPtr x y) 16567 // cond: config.PtrSize == 4 16568 // result: (SETNE (CMPL x y)) 16569 for { 16570 x := v.Args[0] 16571 y := v.Args[1] 16572 if !(config.PtrSize == 4) { 16573 break 16574 } 16575 v.reset(OpAMD64SETNE) 16576 v0 := b.NewValue0(v.Line, OpAMD64CMPL, TypeFlags) 16577 v0.AddArg(x) 16578 v0.AddArg(y) 16579 v.AddArg(v0) 16580 return true 16581 } 16582 return false 16583 } 16584 func rewriteValueAMD64_OpNilCheck(v *Value, config *Config) bool { 16585 b := v.Block 16586 _ = b 16587 // match: (NilCheck ptr mem) 16588 // cond: 16589 // result: (LoweredNilCheck ptr mem) 16590 for { 16591 ptr := v.Args[0] 16592 mem := v.Args[1] 16593 v.reset(OpAMD64LoweredNilCheck) 16594 v.AddArg(ptr) 16595 v.AddArg(mem) 16596 return true 16597 } 16598 } 16599 func rewriteValueAMD64_OpNot(v *Value, config *Config) bool { 16600 b := v.Block 16601 _ = b 16602 // match: (Not x) 16603 // cond: 16604 // result: (XORLconst [1] x) 16605 for { 16606 x := v.Args[0] 16607 v.reset(OpAMD64XORLconst) 16608 v.AuxInt = 1 16609 v.AddArg(x) 16610 return true 16611 } 16612 } 16613 func rewriteValueAMD64_OpOffPtr(v *Value, config *Config) bool { 16614 b := v.Block 16615 _ = b 16616 // match: (OffPtr [off] ptr) 16617 // cond: config.PtrSize == 8 && is32Bit(off) 16618 // result: (ADDQconst [off] ptr) 16619 for { 16620 off := v.AuxInt 16621 ptr := v.Args[0] 16622 if !(config.PtrSize == 8 && is32Bit(off)) { 16623 break 16624 } 16625 v.reset(OpAMD64ADDQconst) 16626 v.AuxInt = off 16627 v.AddArg(ptr) 16628 return true 16629 } 16630 // match: (OffPtr [off] ptr) 16631 // cond: config.PtrSize == 8 16632 // result: (ADDQ (MOVQconst [off]) ptr) 16633 for { 16634 off := v.AuxInt 16635 ptr := v.Args[0] 16636 if !(config.PtrSize == 8) { 16637 break 16638 } 16639 v.reset(OpAMD64ADDQ) 16640 v0 := b.NewValue0(v.Line, OpAMD64MOVQconst, config.fe.TypeUInt64()) 16641 v0.AuxInt = off 16642 v.AddArg(v0) 16643 v.AddArg(ptr) 16644 return true 16645 } 16646 // match: (OffPtr [off] ptr) 16647 // cond: config.PtrSize == 4 16648 // result: (ADDLconst [off] ptr) 16649 for { 16650 off := v.AuxInt 16651 ptr := v.Args[0] 16652 if !(config.PtrSize == 4) { 16653 break 16654 } 16655 v.reset(OpAMD64ADDLconst) 16656 v.AuxInt = off 16657 v.AddArg(ptr) 16658 return true 16659 } 16660 return false 16661 } 16662 func rewriteValueAMD64_OpOr16(v *Value, config *Config) bool { 16663 b := v.Block 16664 _ = b 16665 // match: (Or16 x y) 16666 // cond: 16667 // result: (ORL x y) 16668 for { 16669 x := v.Args[0] 16670 y := v.Args[1] 16671 v.reset(OpAMD64ORL) 16672 v.AddArg(x) 16673 v.AddArg(y) 16674 return true 16675 } 16676 } 16677 func rewriteValueAMD64_OpOr32(v *Value, config *Config) bool { 16678 b := v.Block 16679 _ = b 16680 // match: (Or32 x y) 16681 // cond: 16682 // result: (ORL x y) 16683 for { 16684 x := v.Args[0] 16685 y := v.Args[1] 16686 v.reset(OpAMD64ORL) 16687 v.AddArg(x) 16688 v.AddArg(y) 16689 return true 16690 } 16691 } 16692 func rewriteValueAMD64_OpOr64(v *Value, config *Config) bool { 16693 b := v.Block 16694 _ = b 16695 // match: (Or64 x y) 16696 // cond: 16697 // result: (ORQ x y) 16698 for { 16699 x := v.Args[0] 16700 y := v.Args[1] 16701 v.reset(OpAMD64ORQ) 16702 v.AddArg(x) 16703 v.AddArg(y) 16704 return true 16705 } 16706 } 16707 func rewriteValueAMD64_OpOr8(v *Value, config *Config) bool { 16708 b := v.Block 16709 _ = b 16710 // match: (Or8 x y) 16711 // cond: 16712 // result: (ORL x y) 16713 for { 16714 x := v.Args[0] 16715 y := v.Args[1] 16716 v.reset(OpAMD64ORL) 16717 v.AddArg(x) 16718 v.AddArg(y) 16719 return true 16720 } 16721 } 16722 func rewriteValueAMD64_OpOrB(v *Value, config *Config) bool { 16723 b := v.Block 16724 _ = b 16725 // match: (OrB x y) 16726 // cond: 16727 // result: (ORL x y) 16728 for { 16729 x := v.Args[0] 16730 y := v.Args[1] 16731 v.reset(OpAMD64ORL) 16732 v.AddArg(x) 16733 v.AddArg(y) 16734 return true 16735 } 16736 } 16737 func rewriteValueAMD64_OpRsh16Ux16(v *Value, config *Config) bool { 16738 b := v.Block 16739 _ = b 16740 // match: (Rsh16Ux16 <t> x y) 16741 // cond: 16742 // result: (ANDL (SHRW <t> x y) (SBBLcarrymask <t> (CMPWconst y [16]))) 16743 for { 16744 t := v.Type 16745 x := v.Args[0] 16746 y := v.Args[1] 16747 v.reset(OpAMD64ANDL) 16748 v0 := b.NewValue0(v.Line, OpAMD64SHRW, t) 16749 v0.AddArg(x) 16750 v0.AddArg(y) 16751 v.AddArg(v0) 16752 v1 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, t) 16753 v2 := b.NewValue0(v.Line, OpAMD64CMPWconst, TypeFlags) 16754 v2.AuxInt = 16 16755 v2.AddArg(y) 16756 v1.AddArg(v2) 16757 v.AddArg(v1) 16758 return true 16759 } 16760 } 16761 func rewriteValueAMD64_OpRsh16Ux32(v *Value, config *Config) bool { 16762 b := v.Block 16763 _ = b 16764 // match: (Rsh16Ux32 <t> x y) 16765 // cond: 16766 // result: (ANDL (SHRW <t> x y) (SBBLcarrymask <t> (CMPLconst y [16]))) 16767 for { 16768 t := v.Type 16769 x := v.Args[0] 16770 y := v.Args[1] 16771 v.reset(OpAMD64ANDL) 16772 v0 := b.NewValue0(v.Line, OpAMD64SHRW, t) 16773 v0.AddArg(x) 16774 v0.AddArg(y) 16775 v.AddArg(v0) 16776 v1 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, t) 16777 v2 := b.NewValue0(v.Line, OpAMD64CMPLconst, TypeFlags) 16778 v2.AuxInt = 16 16779 v2.AddArg(y) 16780 v1.AddArg(v2) 16781 v.AddArg(v1) 16782 return true 16783 } 16784 } 16785 func rewriteValueAMD64_OpRsh16Ux64(v *Value, config *Config) bool { 16786 b := v.Block 16787 _ = b 16788 // match: (Rsh16Ux64 <t> x y) 16789 // cond: 16790 // result: (ANDL (SHRW <t> x y) (SBBLcarrymask <t> (CMPQconst y [16]))) 16791 for { 16792 t := v.Type 16793 x := v.Args[0] 16794 y := v.Args[1] 16795 v.reset(OpAMD64ANDL) 16796 v0 := b.NewValue0(v.Line, OpAMD64SHRW, t) 16797 v0.AddArg(x) 16798 v0.AddArg(y) 16799 v.AddArg(v0) 16800 v1 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, t) 16801 v2 := b.NewValue0(v.Line, OpAMD64CMPQconst, TypeFlags) 16802 v2.AuxInt = 16 16803 v2.AddArg(y) 16804 v1.AddArg(v2) 16805 v.AddArg(v1) 16806 return true 16807 } 16808 } 16809 func rewriteValueAMD64_OpRsh16Ux8(v *Value, config *Config) bool { 16810 b := v.Block 16811 _ = b 16812 // match: (Rsh16Ux8 <t> x y) 16813 // cond: 16814 // result: (ANDL (SHRW <t> x y) (SBBLcarrymask <t> (CMPBconst y [16]))) 16815 for { 16816 t := v.Type 16817 x := v.Args[0] 16818 y := v.Args[1] 16819 v.reset(OpAMD64ANDL) 16820 v0 := b.NewValue0(v.Line, OpAMD64SHRW, t) 16821 v0.AddArg(x) 16822 v0.AddArg(y) 16823 v.AddArg(v0) 16824 v1 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, t) 16825 v2 := b.NewValue0(v.Line, OpAMD64CMPBconst, TypeFlags) 16826 v2.AuxInt = 16 16827 v2.AddArg(y) 16828 v1.AddArg(v2) 16829 v.AddArg(v1) 16830 return true 16831 } 16832 } 16833 func rewriteValueAMD64_OpRsh16x16(v *Value, config *Config) bool { 16834 b := v.Block 16835 _ = b 16836 // match: (Rsh16x16 <t> x y) 16837 // cond: 16838 // result: (SARW <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPWconst y [16]))))) 16839 for { 16840 t := v.Type 16841 x := v.Args[0] 16842 y := v.Args[1] 16843 v.reset(OpAMD64SARW) 16844 v.Type = t 16845 v.AddArg(x) 16846 v0 := b.NewValue0(v.Line, OpAMD64ORL, y.Type) 16847 v0.AddArg(y) 16848 v1 := b.NewValue0(v.Line, OpAMD64NOTL, y.Type) 16849 v2 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, y.Type) 16850 v3 := b.NewValue0(v.Line, OpAMD64CMPWconst, TypeFlags) 16851 v3.AuxInt = 16 16852 v3.AddArg(y) 16853 v2.AddArg(v3) 16854 v1.AddArg(v2) 16855 v0.AddArg(v1) 16856 v.AddArg(v0) 16857 return true 16858 } 16859 } 16860 func rewriteValueAMD64_OpRsh16x32(v *Value, config *Config) bool { 16861 b := v.Block 16862 _ = b 16863 // match: (Rsh16x32 <t> x y) 16864 // cond: 16865 // result: (SARW <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPLconst y [16]))))) 16866 for { 16867 t := v.Type 16868 x := v.Args[0] 16869 y := v.Args[1] 16870 v.reset(OpAMD64SARW) 16871 v.Type = t 16872 v.AddArg(x) 16873 v0 := b.NewValue0(v.Line, OpAMD64ORL, y.Type) 16874 v0.AddArg(y) 16875 v1 := b.NewValue0(v.Line, OpAMD64NOTL, y.Type) 16876 v2 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, y.Type) 16877 v3 := b.NewValue0(v.Line, OpAMD64CMPLconst, TypeFlags) 16878 v3.AuxInt = 16 16879 v3.AddArg(y) 16880 v2.AddArg(v3) 16881 v1.AddArg(v2) 16882 v0.AddArg(v1) 16883 v.AddArg(v0) 16884 return true 16885 } 16886 } 16887 func rewriteValueAMD64_OpRsh16x64(v *Value, config *Config) bool { 16888 b := v.Block 16889 _ = b 16890 // match: (Rsh16x64 <t> x y) 16891 // cond: 16892 // result: (SARW <t> x (ORQ <y.Type> y (NOTQ <y.Type> (SBBQcarrymask <y.Type> (CMPQconst y [16]))))) 16893 for { 16894 t := v.Type 16895 x := v.Args[0] 16896 y := v.Args[1] 16897 v.reset(OpAMD64SARW) 16898 v.Type = t 16899 v.AddArg(x) 16900 v0 := b.NewValue0(v.Line, OpAMD64ORQ, y.Type) 16901 v0.AddArg(y) 16902 v1 := b.NewValue0(v.Line, OpAMD64NOTQ, y.Type) 16903 v2 := b.NewValue0(v.Line, OpAMD64SBBQcarrymask, y.Type) 16904 v3 := b.NewValue0(v.Line, OpAMD64CMPQconst, TypeFlags) 16905 v3.AuxInt = 16 16906 v3.AddArg(y) 16907 v2.AddArg(v3) 16908 v1.AddArg(v2) 16909 v0.AddArg(v1) 16910 v.AddArg(v0) 16911 return true 16912 } 16913 } 16914 func rewriteValueAMD64_OpRsh16x8(v *Value, config *Config) bool { 16915 b := v.Block 16916 _ = b 16917 // match: (Rsh16x8 <t> x y) 16918 // cond: 16919 // result: (SARW <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPBconst y [16]))))) 16920 for { 16921 t := v.Type 16922 x := v.Args[0] 16923 y := v.Args[1] 16924 v.reset(OpAMD64SARW) 16925 v.Type = t 16926 v.AddArg(x) 16927 v0 := b.NewValue0(v.Line, OpAMD64ORL, y.Type) 16928 v0.AddArg(y) 16929 v1 := b.NewValue0(v.Line, OpAMD64NOTL, y.Type) 16930 v2 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, y.Type) 16931 v3 := b.NewValue0(v.Line, OpAMD64CMPBconst, TypeFlags) 16932 v3.AuxInt = 16 16933 v3.AddArg(y) 16934 v2.AddArg(v3) 16935 v1.AddArg(v2) 16936 v0.AddArg(v1) 16937 v.AddArg(v0) 16938 return true 16939 } 16940 } 16941 func rewriteValueAMD64_OpRsh32Ux16(v *Value, config *Config) bool { 16942 b := v.Block 16943 _ = b 16944 // match: (Rsh32Ux16 <t> x y) 16945 // cond: 16946 // result: (ANDL (SHRL <t> x y) (SBBLcarrymask <t> (CMPWconst y [32]))) 16947 for { 16948 t := v.Type 16949 x := v.Args[0] 16950 y := v.Args[1] 16951 v.reset(OpAMD64ANDL) 16952 v0 := b.NewValue0(v.Line, OpAMD64SHRL, t) 16953 v0.AddArg(x) 16954 v0.AddArg(y) 16955 v.AddArg(v0) 16956 v1 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, t) 16957 v2 := b.NewValue0(v.Line, OpAMD64CMPWconst, TypeFlags) 16958 v2.AuxInt = 32 16959 v2.AddArg(y) 16960 v1.AddArg(v2) 16961 v.AddArg(v1) 16962 return true 16963 } 16964 } 16965 func rewriteValueAMD64_OpRsh32Ux32(v *Value, config *Config) bool { 16966 b := v.Block 16967 _ = b 16968 // match: (Rsh32Ux32 <t> x y) 16969 // cond: 16970 // result: (ANDL (SHRL <t> x y) (SBBLcarrymask <t> (CMPLconst y [32]))) 16971 for { 16972 t := v.Type 16973 x := v.Args[0] 16974 y := v.Args[1] 16975 v.reset(OpAMD64ANDL) 16976 v0 := b.NewValue0(v.Line, OpAMD64SHRL, t) 16977 v0.AddArg(x) 16978 v0.AddArg(y) 16979 v.AddArg(v0) 16980 v1 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, t) 16981 v2 := b.NewValue0(v.Line, OpAMD64CMPLconst, TypeFlags) 16982 v2.AuxInt = 32 16983 v2.AddArg(y) 16984 v1.AddArg(v2) 16985 v.AddArg(v1) 16986 return true 16987 } 16988 } 16989 func rewriteValueAMD64_OpRsh32Ux64(v *Value, config *Config) bool { 16990 b := v.Block 16991 _ = b 16992 // match: (Rsh32Ux64 <t> x y) 16993 // cond: 16994 // result: (ANDL (SHRL <t> x y) (SBBLcarrymask <t> (CMPQconst y [32]))) 16995 for { 16996 t := v.Type 16997 x := v.Args[0] 16998 y := v.Args[1] 16999 v.reset(OpAMD64ANDL) 17000 v0 := b.NewValue0(v.Line, OpAMD64SHRL, t) 17001 v0.AddArg(x) 17002 v0.AddArg(y) 17003 v.AddArg(v0) 17004 v1 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, t) 17005 v2 := b.NewValue0(v.Line, OpAMD64CMPQconst, TypeFlags) 17006 v2.AuxInt = 32 17007 v2.AddArg(y) 17008 v1.AddArg(v2) 17009 v.AddArg(v1) 17010 return true 17011 } 17012 } 17013 func rewriteValueAMD64_OpRsh32Ux8(v *Value, config *Config) bool { 17014 b := v.Block 17015 _ = b 17016 // match: (Rsh32Ux8 <t> x y) 17017 // cond: 17018 // result: (ANDL (SHRL <t> x y) (SBBLcarrymask <t> (CMPBconst y [32]))) 17019 for { 17020 t := v.Type 17021 x := v.Args[0] 17022 y := v.Args[1] 17023 v.reset(OpAMD64ANDL) 17024 v0 := b.NewValue0(v.Line, OpAMD64SHRL, t) 17025 v0.AddArg(x) 17026 v0.AddArg(y) 17027 v.AddArg(v0) 17028 v1 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, t) 17029 v2 := b.NewValue0(v.Line, OpAMD64CMPBconst, TypeFlags) 17030 v2.AuxInt = 32 17031 v2.AddArg(y) 17032 v1.AddArg(v2) 17033 v.AddArg(v1) 17034 return true 17035 } 17036 } 17037 func rewriteValueAMD64_OpRsh32x16(v *Value, config *Config) bool { 17038 b := v.Block 17039 _ = b 17040 // match: (Rsh32x16 <t> x y) 17041 // cond: 17042 // result: (SARL <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPWconst y [32]))))) 17043 for { 17044 t := v.Type 17045 x := v.Args[0] 17046 y := v.Args[1] 17047 v.reset(OpAMD64SARL) 17048 v.Type = t 17049 v.AddArg(x) 17050 v0 := b.NewValue0(v.Line, OpAMD64ORL, y.Type) 17051 v0.AddArg(y) 17052 v1 := b.NewValue0(v.Line, OpAMD64NOTL, y.Type) 17053 v2 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, y.Type) 17054 v3 := b.NewValue0(v.Line, OpAMD64CMPWconst, TypeFlags) 17055 v3.AuxInt = 32 17056 v3.AddArg(y) 17057 v2.AddArg(v3) 17058 v1.AddArg(v2) 17059 v0.AddArg(v1) 17060 v.AddArg(v0) 17061 return true 17062 } 17063 } 17064 func rewriteValueAMD64_OpRsh32x32(v *Value, config *Config) bool { 17065 b := v.Block 17066 _ = b 17067 // match: (Rsh32x32 <t> x y) 17068 // cond: 17069 // result: (SARL <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPLconst y [32]))))) 17070 for { 17071 t := v.Type 17072 x := v.Args[0] 17073 y := v.Args[1] 17074 v.reset(OpAMD64SARL) 17075 v.Type = t 17076 v.AddArg(x) 17077 v0 := b.NewValue0(v.Line, OpAMD64ORL, y.Type) 17078 v0.AddArg(y) 17079 v1 := b.NewValue0(v.Line, OpAMD64NOTL, y.Type) 17080 v2 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, y.Type) 17081 v3 := b.NewValue0(v.Line, OpAMD64CMPLconst, TypeFlags) 17082 v3.AuxInt = 32 17083 v3.AddArg(y) 17084 v2.AddArg(v3) 17085 v1.AddArg(v2) 17086 v0.AddArg(v1) 17087 v.AddArg(v0) 17088 return true 17089 } 17090 } 17091 func rewriteValueAMD64_OpRsh32x64(v *Value, config *Config) bool { 17092 b := v.Block 17093 _ = b 17094 // match: (Rsh32x64 <t> x y) 17095 // cond: 17096 // result: (SARL <t> x (ORQ <y.Type> y (NOTQ <y.Type> (SBBQcarrymask <y.Type> (CMPQconst y [32]))))) 17097 for { 17098 t := v.Type 17099 x := v.Args[0] 17100 y := v.Args[1] 17101 v.reset(OpAMD64SARL) 17102 v.Type = t 17103 v.AddArg(x) 17104 v0 := b.NewValue0(v.Line, OpAMD64ORQ, y.Type) 17105 v0.AddArg(y) 17106 v1 := b.NewValue0(v.Line, OpAMD64NOTQ, y.Type) 17107 v2 := b.NewValue0(v.Line, OpAMD64SBBQcarrymask, y.Type) 17108 v3 := b.NewValue0(v.Line, OpAMD64CMPQconst, TypeFlags) 17109 v3.AuxInt = 32 17110 v3.AddArg(y) 17111 v2.AddArg(v3) 17112 v1.AddArg(v2) 17113 v0.AddArg(v1) 17114 v.AddArg(v0) 17115 return true 17116 } 17117 } 17118 func rewriteValueAMD64_OpRsh32x8(v *Value, config *Config) bool { 17119 b := v.Block 17120 _ = b 17121 // match: (Rsh32x8 <t> x y) 17122 // cond: 17123 // result: (SARL <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPBconst y [32]))))) 17124 for { 17125 t := v.Type 17126 x := v.Args[0] 17127 y := v.Args[1] 17128 v.reset(OpAMD64SARL) 17129 v.Type = t 17130 v.AddArg(x) 17131 v0 := b.NewValue0(v.Line, OpAMD64ORL, y.Type) 17132 v0.AddArg(y) 17133 v1 := b.NewValue0(v.Line, OpAMD64NOTL, y.Type) 17134 v2 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, y.Type) 17135 v3 := b.NewValue0(v.Line, OpAMD64CMPBconst, TypeFlags) 17136 v3.AuxInt = 32 17137 v3.AddArg(y) 17138 v2.AddArg(v3) 17139 v1.AddArg(v2) 17140 v0.AddArg(v1) 17141 v.AddArg(v0) 17142 return true 17143 } 17144 } 17145 func rewriteValueAMD64_OpRsh64Ux16(v *Value, config *Config) bool { 17146 b := v.Block 17147 _ = b 17148 // match: (Rsh64Ux16 <t> x y) 17149 // cond: 17150 // result: (ANDQ (SHRQ <t> x y) (SBBQcarrymask <t> (CMPWconst y [64]))) 17151 for { 17152 t := v.Type 17153 x := v.Args[0] 17154 y := v.Args[1] 17155 v.reset(OpAMD64ANDQ) 17156 v0 := b.NewValue0(v.Line, OpAMD64SHRQ, t) 17157 v0.AddArg(x) 17158 v0.AddArg(y) 17159 v.AddArg(v0) 17160 v1 := b.NewValue0(v.Line, OpAMD64SBBQcarrymask, t) 17161 v2 := b.NewValue0(v.Line, OpAMD64CMPWconst, TypeFlags) 17162 v2.AuxInt = 64 17163 v2.AddArg(y) 17164 v1.AddArg(v2) 17165 v.AddArg(v1) 17166 return true 17167 } 17168 } 17169 func rewriteValueAMD64_OpRsh64Ux32(v *Value, config *Config) bool { 17170 b := v.Block 17171 _ = b 17172 // match: (Rsh64Ux32 <t> x y) 17173 // cond: 17174 // result: (ANDQ (SHRQ <t> x y) (SBBQcarrymask <t> (CMPLconst y [64]))) 17175 for { 17176 t := v.Type 17177 x := v.Args[0] 17178 y := v.Args[1] 17179 v.reset(OpAMD64ANDQ) 17180 v0 := b.NewValue0(v.Line, OpAMD64SHRQ, t) 17181 v0.AddArg(x) 17182 v0.AddArg(y) 17183 v.AddArg(v0) 17184 v1 := b.NewValue0(v.Line, OpAMD64SBBQcarrymask, t) 17185 v2 := b.NewValue0(v.Line, OpAMD64CMPLconst, TypeFlags) 17186 v2.AuxInt = 64 17187 v2.AddArg(y) 17188 v1.AddArg(v2) 17189 v.AddArg(v1) 17190 return true 17191 } 17192 } 17193 func rewriteValueAMD64_OpRsh64Ux64(v *Value, config *Config) bool { 17194 b := v.Block 17195 _ = b 17196 // match: (Rsh64Ux64 <t> x y) 17197 // cond: 17198 // result: (ANDQ (SHRQ <t> x y) (SBBQcarrymask <t> (CMPQconst y [64]))) 17199 for { 17200 t := v.Type 17201 x := v.Args[0] 17202 y := v.Args[1] 17203 v.reset(OpAMD64ANDQ) 17204 v0 := b.NewValue0(v.Line, OpAMD64SHRQ, t) 17205 v0.AddArg(x) 17206 v0.AddArg(y) 17207 v.AddArg(v0) 17208 v1 := b.NewValue0(v.Line, OpAMD64SBBQcarrymask, t) 17209 v2 := b.NewValue0(v.Line, OpAMD64CMPQconst, TypeFlags) 17210 v2.AuxInt = 64 17211 v2.AddArg(y) 17212 v1.AddArg(v2) 17213 v.AddArg(v1) 17214 return true 17215 } 17216 } 17217 func rewriteValueAMD64_OpRsh64Ux8(v *Value, config *Config) bool { 17218 b := v.Block 17219 _ = b 17220 // match: (Rsh64Ux8 <t> x y) 17221 // cond: 17222 // result: (ANDQ (SHRQ <t> x y) (SBBQcarrymask <t> (CMPBconst y [64]))) 17223 for { 17224 t := v.Type 17225 x := v.Args[0] 17226 y := v.Args[1] 17227 v.reset(OpAMD64ANDQ) 17228 v0 := b.NewValue0(v.Line, OpAMD64SHRQ, t) 17229 v0.AddArg(x) 17230 v0.AddArg(y) 17231 v.AddArg(v0) 17232 v1 := b.NewValue0(v.Line, OpAMD64SBBQcarrymask, t) 17233 v2 := b.NewValue0(v.Line, OpAMD64CMPBconst, TypeFlags) 17234 v2.AuxInt = 64 17235 v2.AddArg(y) 17236 v1.AddArg(v2) 17237 v.AddArg(v1) 17238 return true 17239 } 17240 } 17241 func rewriteValueAMD64_OpRsh64x16(v *Value, config *Config) bool { 17242 b := v.Block 17243 _ = b 17244 // match: (Rsh64x16 <t> x y) 17245 // cond: 17246 // result: (SARQ <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPWconst y [64]))))) 17247 for { 17248 t := v.Type 17249 x := v.Args[0] 17250 y := v.Args[1] 17251 v.reset(OpAMD64SARQ) 17252 v.Type = t 17253 v.AddArg(x) 17254 v0 := b.NewValue0(v.Line, OpAMD64ORL, y.Type) 17255 v0.AddArg(y) 17256 v1 := b.NewValue0(v.Line, OpAMD64NOTL, y.Type) 17257 v2 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, y.Type) 17258 v3 := b.NewValue0(v.Line, OpAMD64CMPWconst, TypeFlags) 17259 v3.AuxInt = 64 17260 v3.AddArg(y) 17261 v2.AddArg(v3) 17262 v1.AddArg(v2) 17263 v0.AddArg(v1) 17264 v.AddArg(v0) 17265 return true 17266 } 17267 } 17268 func rewriteValueAMD64_OpRsh64x32(v *Value, config *Config) bool { 17269 b := v.Block 17270 _ = b 17271 // match: (Rsh64x32 <t> x y) 17272 // cond: 17273 // result: (SARQ <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPLconst y [64]))))) 17274 for { 17275 t := v.Type 17276 x := v.Args[0] 17277 y := v.Args[1] 17278 v.reset(OpAMD64SARQ) 17279 v.Type = t 17280 v.AddArg(x) 17281 v0 := b.NewValue0(v.Line, OpAMD64ORL, y.Type) 17282 v0.AddArg(y) 17283 v1 := b.NewValue0(v.Line, OpAMD64NOTL, y.Type) 17284 v2 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, y.Type) 17285 v3 := b.NewValue0(v.Line, OpAMD64CMPLconst, TypeFlags) 17286 v3.AuxInt = 64 17287 v3.AddArg(y) 17288 v2.AddArg(v3) 17289 v1.AddArg(v2) 17290 v0.AddArg(v1) 17291 v.AddArg(v0) 17292 return true 17293 } 17294 } 17295 func rewriteValueAMD64_OpRsh64x64(v *Value, config *Config) bool { 17296 b := v.Block 17297 _ = b 17298 // match: (Rsh64x64 <t> x y) 17299 // cond: 17300 // result: (SARQ <t> x (ORQ <y.Type> y (NOTQ <y.Type> (SBBQcarrymask <y.Type> (CMPQconst y [64]))))) 17301 for { 17302 t := v.Type 17303 x := v.Args[0] 17304 y := v.Args[1] 17305 v.reset(OpAMD64SARQ) 17306 v.Type = t 17307 v.AddArg(x) 17308 v0 := b.NewValue0(v.Line, OpAMD64ORQ, y.Type) 17309 v0.AddArg(y) 17310 v1 := b.NewValue0(v.Line, OpAMD64NOTQ, y.Type) 17311 v2 := b.NewValue0(v.Line, OpAMD64SBBQcarrymask, y.Type) 17312 v3 := b.NewValue0(v.Line, OpAMD64CMPQconst, TypeFlags) 17313 v3.AuxInt = 64 17314 v3.AddArg(y) 17315 v2.AddArg(v3) 17316 v1.AddArg(v2) 17317 v0.AddArg(v1) 17318 v.AddArg(v0) 17319 return true 17320 } 17321 } 17322 func rewriteValueAMD64_OpRsh64x8(v *Value, config *Config) bool { 17323 b := v.Block 17324 _ = b 17325 // match: (Rsh64x8 <t> x y) 17326 // cond: 17327 // result: (SARQ <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPBconst y [64]))))) 17328 for { 17329 t := v.Type 17330 x := v.Args[0] 17331 y := v.Args[1] 17332 v.reset(OpAMD64SARQ) 17333 v.Type = t 17334 v.AddArg(x) 17335 v0 := b.NewValue0(v.Line, OpAMD64ORL, y.Type) 17336 v0.AddArg(y) 17337 v1 := b.NewValue0(v.Line, OpAMD64NOTL, y.Type) 17338 v2 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, y.Type) 17339 v3 := b.NewValue0(v.Line, OpAMD64CMPBconst, TypeFlags) 17340 v3.AuxInt = 64 17341 v3.AddArg(y) 17342 v2.AddArg(v3) 17343 v1.AddArg(v2) 17344 v0.AddArg(v1) 17345 v.AddArg(v0) 17346 return true 17347 } 17348 } 17349 func rewriteValueAMD64_OpRsh8Ux16(v *Value, config *Config) bool { 17350 b := v.Block 17351 _ = b 17352 // match: (Rsh8Ux16 <t> x y) 17353 // cond: 17354 // result: (ANDL (SHRB <t> x y) (SBBLcarrymask <t> (CMPWconst y [8]))) 17355 for { 17356 t := v.Type 17357 x := v.Args[0] 17358 y := v.Args[1] 17359 v.reset(OpAMD64ANDL) 17360 v0 := b.NewValue0(v.Line, OpAMD64SHRB, t) 17361 v0.AddArg(x) 17362 v0.AddArg(y) 17363 v.AddArg(v0) 17364 v1 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, t) 17365 v2 := b.NewValue0(v.Line, OpAMD64CMPWconst, TypeFlags) 17366 v2.AuxInt = 8 17367 v2.AddArg(y) 17368 v1.AddArg(v2) 17369 v.AddArg(v1) 17370 return true 17371 } 17372 } 17373 func rewriteValueAMD64_OpRsh8Ux32(v *Value, config *Config) bool { 17374 b := v.Block 17375 _ = b 17376 // match: (Rsh8Ux32 <t> x y) 17377 // cond: 17378 // result: (ANDL (SHRB <t> x y) (SBBLcarrymask <t> (CMPLconst y [8]))) 17379 for { 17380 t := v.Type 17381 x := v.Args[0] 17382 y := v.Args[1] 17383 v.reset(OpAMD64ANDL) 17384 v0 := b.NewValue0(v.Line, OpAMD64SHRB, t) 17385 v0.AddArg(x) 17386 v0.AddArg(y) 17387 v.AddArg(v0) 17388 v1 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, t) 17389 v2 := b.NewValue0(v.Line, OpAMD64CMPLconst, TypeFlags) 17390 v2.AuxInt = 8 17391 v2.AddArg(y) 17392 v1.AddArg(v2) 17393 v.AddArg(v1) 17394 return true 17395 } 17396 } 17397 func rewriteValueAMD64_OpRsh8Ux64(v *Value, config *Config) bool { 17398 b := v.Block 17399 _ = b 17400 // match: (Rsh8Ux64 <t> x y) 17401 // cond: 17402 // result: (ANDL (SHRB <t> x y) (SBBLcarrymask <t> (CMPQconst y [8]))) 17403 for { 17404 t := v.Type 17405 x := v.Args[0] 17406 y := v.Args[1] 17407 v.reset(OpAMD64ANDL) 17408 v0 := b.NewValue0(v.Line, OpAMD64SHRB, t) 17409 v0.AddArg(x) 17410 v0.AddArg(y) 17411 v.AddArg(v0) 17412 v1 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, t) 17413 v2 := b.NewValue0(v.Line, OpAMD64CMPQconst, TypeFlags) 17414 v2.AuxInt = 8 17415 v2.AddArg(y) 17416 v1.AddArg(v2) 17417 v.AddArg(v1) 17418 return true 17419 } 17420 } 17421 func rewriteValueAMD64_OpRsh8Ux8(v *Value, config *Config) bool { 17422 b := v.Block 17423 _ = b 17424 // match: (Rsh8Ux8 <t> x y) 17425 // cond: 17426 // result: (ANDL (SHRB <t> x y) (SBBLcarrymask <t> (CMPBconst y [8]))) 17427 for { 17428 t := v.Type 17429 x := v.Args[0] 17430 y := v.Args[1] 17431 v.reset(OpAMD64ANDL) 17432 v0 := b.NewValue0(v.Line, OpAMD64SHRB, t) 17433 v0.AddArg(x) 17434 v0.AddArg(y) 17435 v.AddArg(v0) 17436 v1 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, t) 17437 v2 := b.NewValue0(v.Line, OpAMD64CMPBconst, TypeFlags) 17438 v2.AuxInt = 8 17439 v2.AddArg(y) 17440 v1.AddArg(v2) 17441 v.AddArg(v1) 17442 return true 17443 } 17444 } 17445 func rewriteValueAMD64_OpRsh8x16(v *Value, config *Config) bool { 17446 b := v.Block 17447 _ = b 17448 // match: (Rsh8x16 <t> x y) 17449 // cond: 17450 // result: (SARB <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPWconst y [8]))))) 17451 for { 17452 t := v.Type 17453 x := v.Args[0] 17454 y := v.Args[1] 17455 v.reset(OpAMD64SARB) 17456 v.Type = t 17457 v.AddArg(x) 17458 v0 := b.NewValue0(v.Line, OpAMD64ORL, y.Type) 17459 v0.AddArg(y) 17460 v1 := b.NewValue0(v.Line, OpAMD64NOTL, y.Type) 17461 v2 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, y.Type) 17462 v3 := b.NewValue0(v.Line, OpAMD64CMPWconst, TypeFlags) 17463 v3.AuxInt = 8 17464 v3.AddArg(y) 17465 v2.AddArg(v3) 17466 v1.AddArg(v2) 17467 v0.AddArg(v1) 17468 v.AddArg(v0) 17469 return true 17470 } 17471 } 17472 func rewriteValueAMD64_OpRsh8x32(v *Value, config *Config) bool { 17473 b := v.Block 17474 _ = b 17475 // match: (Rsh8x32 <t> x y) 17476 // cond: 17477 // result: (SARB <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPLconst y [8]))))) 17478 for { 17479 t := v.Type 17480 x := v.Args[0] 17481 y := v.Args[1] 17482 v.reset(OpAMD64SARB) 17483 v.Type = t 17484 v.AddArg(x) 17485 v0 := b.NewValue0(v.Line, OpAMD64ORL, y.Type) 17486 v0.AddArg(y) 17487 v1 := b.NewValue0(v.Line, OpAMD64NOTL, y.Type) 17488 v2 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, y.Type) 17489 v3 := b.NewValue0(v.Line, OpAMD64CMPLconst, TypeFlags) 17490 v3.AuxInt = 8 17491 v3.AddArg(y) 17492 v2.AddArg(v3) 17493 v1.AddArg(v2) 17494 v0.AddArg(v1) 17495 v.AddArg(v0) 17496 return true 17497 } 17498 } 17499 func rewriteValueAMD64_OpRsh8x64(v *Value, config *Config) bool { 17500 b := v.Block 17501 _ = b 17502 // match: (Rsh8x64 <t> x y) 17503 // cond: 17504 // result: (SARB <t> x (ORQ <y.Type> y (NOTQ <y.Type> (SBBQcarrymask <y.Type> (CMPQconst y [8]))))) 17505 for { 17506 t := v.Type 17507 x := v.Args[0] 17508 y := v.Args[1] 17509 v.reset(OpAMD64SARB) 17510 v.Type = t 17511 v.AddArg(x) 17512 v0 := b.NewValue0(v.Line, OpAMD64ORQ, y.Type) 17513 v0.AddArg(y) 17514 v1 := b.NewValue0(v.Line, OpAMD64NOTQ, y.Type) 17515 v2 := b.NewValue0(v.Line, OpAMD64SBBQcarrymask, y.Type) 17516 v3 := b.NewValue0(v.Line, OpAMD64CMPQconst, TypeFlags) 17517 v3.AuxInt = 8 17518 v3.AddArg(y) 17519 v2.AddArg(v3) 17520 v1.AddArg(v2) 17521 v0.AddArg(v1) 17522 v.AddArg(v0) 17523 return true 17524 } 17525 } 17526 func rewriteValueAMD64_OpRsh8x8(v *Value, config *Config) bool { 17527 b := v.Block 17528 _ = b 17529 // match: (Rsh8x8 <t> x y) 17530 // cond: 17531 // result: (SARB <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPBconst y [8]))))) 17532 for { 17533 t := v.Type 17534 x := v.Args[0] 17535 y := v.Args[1] 17536 v.reset(OpAMD64SARB) 17537 v.Type = t 17538 v.AddArg(x) 17539 v0 := b.NewValue0(v.Line, OpAMD64ORL, y.Type) 17540 v0.AddArg(y) 17541 v1 := b.NewValue0(v.Line, OpAMD64NOTL, y.Type) 17542 v2 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, y.Type) 17543 v3 := b.NewValue0(v.Line, OpAMD64CMPBconst, TypeFlags) 17544 v3.AuxInt = 8 17545 v3.AddArg(y) 17546 v2.AddArg(v3) 17547 v1.AddArg(v2) 17548 v0.AddArg(v1) 17549 v.AddArg(v0) 17550 return true 17551 } 17552 } 17553 func rewriteValueAMD64_OpSignExt16to32(v *Value, config *Config) bool { 17554 b := v.Block 17555 _ = b 17556 // match: (SignExt16to32 x) 17557 // cond: 17558 // result: (MOVWQSX x) 17559 for { 17560 x := v.Args[0] 17561 v.reset(OpAMD64MOVWQSX) 17562 v.AddArg(x) 17563 return true 17564 } 17565 } 17566 func rewriteValueAMD64_OpSignExt16to64(v *Value, config *Config) bool { 17567 b := v.Block 17568 _ = b 17569 // match: (SignExt16to64 x) 17570 // cond: 17571 // result: (MOVWQSX x) 17572 for { 17573 x := v.Args[0] 17574 v.reset(OpAMD64MOVWQSX) 17575 v.AddArg(x) 17576 return true 17577 } 17578 } 17579 func rewriteValueAMD64_OpSignExt32to64(v *Value, config *Config) bool { 17580 b := v.Block 17581 _ = b 17582 // match: (SignExt32to64 x) 17583 // cond: 17584 // result: (MOVLQSX x) 17585 for { 17586 x := v.Args[0] 17587 v.reset(OpAMD64MOVLQSX) 17588 v.AddArg(x) 17589 return true 17590 } 17591 } 17592 func rewriteValueAMD64_OpSignExt8to16(v *Value, config *Config) bool { 17593 b := v.Block 17594 _ = b 17595 // match: (SignExt8to16 x) 17596 // cond: 17597 // result: (MOVBQSX x) 17598 for { 17599 x := v.Args[0] 17600 v.reset(OpAMD64MOVBQSX) 17601 v.AddArg(x) 17602 return true 17603 } 17604 } 17605 func rewriteValueAMD64_OpSignExt8to32(v *Value, config *Config) bool { 17606 b := v.Block 17607 _ = b 17608 // match: (SignExt8to32 x) 17609 // cond: 17610 // result: (MOVBQSX x) 17611 for { 17612 x := v.Args[0] 17613 v.reset(OpAMD64MOVBQSX) 17614 v.AddArg(x) 17615 return true 17616 } 17617 } 17618 func rewriteValueAMD64_OpSignExt8to64(v *Value, config *Config) bool { 17619 b := v.Block 17620 _ = b 17621 // match: (SignExt8to64 x) 17622 // cond: 17623 // result: (MOVBQSX x) 17624 for { 17625 x := v.Args[0] 17626 v.reset(OpAMD64MOVBQSX) 17627 v.AddArg(x) 17628 return true 17629 } 17630 } 17631 func rewriteValueAMD64_OpSqrt(v *Value, config *Config) bool { 17632 b := v.Block 17633 _ = b 17634 // match: (Sqrt x) 17635 // cond: 17636 // result: (SQRTSD x) 17637 for { 17638 x := v.Args[0] 17639 v.reset(OpAMD64SQRTSD) 17640 v.AddArg(x) 17641 return true 17642 } 17643 } 17644 func rewriteValueAMD64_OpStaticCall(v *Value, config *Config) bool { 17645 b := v.Block 17646 _ = b 17647 // match: (StaticCall [argwid] {target} mem) 17648 // cond: 17649 // result: (CALLstatic [argwid] {target} mem) 17650 for { 17651 argwid := v.AuxInt 17652 target := v.Aux 17653 mem := v.Args[0] 17654 v.reset(OpAMD64CALLstatic) 17655 v.AuxInt = argwid 17656 v.Aux = target 17657 v.AddArg(mem) 17658 return true 17659 } 17660 } 17661 func rewriteValueAMD64_OpStore(v *Value, config *Config) bool { 17662 b := v.Block 17663 _ = b 17664 // match: (Store [8] ptr val mem) 17665 // cond: is64BitFloat(val.Type) 17666 // result: (MOVSDstore ptr val mem) 17667 for { 17668 if v.AuxInt != 8 { 17669 break 17670 } 17671 ptr := v.Args[0] 17672 val := v.Args[1] 17673 mem := v.Args[2] 17674 if !(is64BitFloat(val.Type)) { 17675 break 17676 } 17677 v.reset(OpAMD64MOVSDstore) 17678 v.AddArg(ptr) 17679 v.AddArg(val) 17680 v.AddArg(mem) 17681 return true 17682 } 17683 // match: (Store [4] ptr val mem) 17684 // cond: is32BitFloat(val.Type) 17685 // result: (MOVSSstore ptr val mem) 17686 for { 17687 if v.AuxInt != 4 { 17688 break 17689 } 17690 ptr := v.Args[0] 17691 val := v.Args[1] 17692 mem := v.Args[2] 17693 if !(is32BitFloat(val.Type)) { 17694 break 17695 } 17696 v.reset(OpAMD64MOVSSstore) 17697 v.AddArg(ptr) 17698 v.AddArg(val) 17699 v.AddArg(mem) 17700 return true 17701 } 17702 // match: (Store [8] ptr val mem) 17703 // cond: 17704 // result: (MOVQstore ptr val mem) 17705 for { 17706 if v.AuxInt != 8 { 17707 break 17708 } 17709 ptr := v.Args[0] 17710 val := v.Args[1] 17711 mem := v.Args[2] 17712 v.reset(OpAMD64MOVQstore) 17713 v.AddArg(ptr) 17714 v.AddArg(val) 17715 v.AddArg(mem) 17716 return true 17717 } 17718 // match: (Store [4] ptr val mem) 17719 // cond: 17720 // result: (MOVLstore ptr val mem) 17721 for { 17722 if v.AuxInt != 4 { 17723 break 17724 } 17725 ptr := v.Args[0] 17726 val := v.Args[1] 17727 mem := v.Args[2] 17728 v.reset(OpAMD64MOVLstore) 17729 v.AddArg(ptr) 17730 v.AddArg(val) 17731 v.AddArg(mem) 17732 return true 17733 } 17734 // match: (Store [2] ptr val mem) 17735 // cond: 17736 // result: (MOVWstore ptr val mem) 17737 for { 17738 if v.AuxInt != 2 { 17739 break 17740 } 17741 ptr := v.Args[0] 17742 val := v.Args[1] 17743 mem := v.Args[2] 17744 v.reset(OpAMD64MOVWstore) 17745 v.AddArg(ptr) 17746 v.AddArg(val) 17747 v.AddArg(mem) 17748 return true 17749 } 17750 // match: (Store [1] ptr val mem) 17751 // cond: 17752 // result: (MOVBstore ptr val mem) 17753 for { 17754 if v.AuxInt != 1 { 17755 break 17756 } 17757 ptr := v.Args[0] 17758 val := v.Args[1] 17759 mem := v.Args[2] 17760 v.reset(OpAMD64MOVBstore) 17761 v.AddArg(ptr) 17762 v.AddArg(val) 17763 v.AddArg(mem) 17764 return true 17765 } 17766 return false 17767 } 17768 func rewriteValueAMD64_OpSub16(v *Value, config *Config) bool { 17769 b := v.Block 17770 _ = b 17771 // match: (Sub16 x y) 17772 // cond: 17773 // result: (SUBL x y) 17774 for { 17775 x := v.Args[0] 17776 y := v.Args[1] 17777 v.reset(OpAMD64SUBL) 17778 v.AddArg(x) 17779 v.AddArg(y) 17780 return true 17781 } 17782 } 17783 func rewriteValueAMD64_OpSub32(v *Value, config *Config) bool { 17784 b := v.Block 17785 _ = b 17786 // match: (Sub32 x y) 17787 // cond: 17788 // result: (SUBL x y) 17789 for { 17790 x := v.Args[0] 17791 y := v.Args[1] 17792 v.reset(OpAMD64SUBL) 17793 v.AddArg(x) 17794 v.AddArg(y) 17795 return true 17796 } 17797 } 17798 func rewriteValueAMD64_OpSub32F(v *Value, config *Config) bool { 17799 b := v.Block 17800 _ = b 17801 // match: (Sub32F x y) 17802 // cond: 17803 // result: (SUBSS x y) 17804 for { 17805 x := v.Args[0] 17806 y := v.Args[1] 17807 v.reset(OpAMD64SUBSS) 17808 v.AddArg(x) 17809 v.AddArg(y) 17810 return true 17811 } 17812 } 17813 func rewriteValueAMD64_OpSub64(v *Value, config *Config) bool { 17814 b := v.Block 17815 _ = b 17816 // match: (Sub64 x y) 17817 // cond: 17818 // result: (SUBQ x y) 17819 for { 17820 x := v.Args[0] 17821 y := v.Args[1] 17822 v.reset(OpAMD64SUBQ) 17823 v.AddArg(x) 17824 v.AddArg(y) 17825 return true 17826 } 17827 } 17828 func rewriteValueAMD64_OpSub64F(v *Value, config *Config) bool { 17829 b := v.Block 17830 _ = b 17831 // match: (Sub64F x y) 17832 // cond: 17833 // result: (SUBSD x y) 17834 for { 17835 x := v.Args[0] 17836 y := v.Args[1] 17837 v.reset(OpAMD64SUBSD) 17838 v.AddArg(x) 17839 v.AddArg(y) 17840 return true 17841 } 17842 } 17843 func rewriteValueAMD64_OpSub8(v *Value, config *Config) bool { 17844 b := v.Block 17845 _ = b 17846 // match: (Sub8 x y) 17847 // cond: 17848 // result: (SUBL x y) 17849 for { 17850 x := v.Args[0] 17851 y := v.Args[1] 17852 v.reset(OpAMD64SUBL) 17853 v.AddArg(x) 17854 v.AddArg(y) 17855 return true 17856 } 17857 } 17858 func rewriteValueAMD64_OpSubPtr(v *Value, config *Config) bool { 17859 b := v.Block 17860 _ = b 17861 // match: (SubPtr x y) 17862 // cond: config.PtrSize == 8 17863 // result: (SUBQ x y) 17864 for { 17865 x := v.Args[0] 17866 y := v.Args[1] 17867 if !(config.PtrSize == 8) { 17868 break 17869 } 17870 v.reset(OpAMD64SUBQ) 17871 v.AddArg(x) 17872 v.AddArg(y) 17873 return true 17874 } 17875 // match: (SubPtr x y) 17876 // cond: config.PtrSize == 4 17877 // result: (SUBL x y) 17878 for { 17879 x := v.Args[0] 17880 y := v.Args[1] 17881 if !(config.PtrSize == 4) { 17882 break 17883 } 17884 v.reset(OpAMD64SUBL) 17885 v.AddArg(x) 17886 v.AddArg(y) 17887 return true 17888 } 17889 return false 17890 } 17891 func rewriteValueAMD64_OpTrunc16to8(v *Value, config *Config) bool { 17892 b := v.Block 17893 _ = b 17894 // match: (Trunc16to8 x) 17895 // cond: 17896 // result: x 17897 for { 17898 x := v.Args[0] 17899 v.reset(OpCopy) 17900 v.Type = x.Type 17901 v.AddArg(x) 17902 return true 17903 } 17904 } 17905 func rewriteValueAMD64_OpTrunc32to16(v *Value, config *Config) bool { 17906 b := v.Block 17907 _ = b 17908 // match: (Trunc32to16 x) 17909 // cond: 17910 // result: x 17911 for { 17912 x := v.Args[0] 17913 v.reset(OpCopy) 17914 v.Type = x.Type 17915 v.AddArg(x) 17916 return true 17917 } 17918 } 17919 func rewriteValueAMD64_OpTrunc32to8(v *Value, config *Config) bool { 17920 b := v.Block 17921 _ = b 17922 // match: (Trunc32to8 x) 17923 // cond: 17924 // result: x 17925 for { 17926 x := v.Args[0] 17927 v.reset(OpCopy) 17928 v.Type = x.Type 17929 v.AddArg(x) 17930 return true 17931 } 17932 } 17933 func rewriteValueAMD64_OpTrunc64to16(v *Value, config *Config) bool { 17934 b := v.Block 17935 _ = b 17936 // match: (Trunc64to16 x) 17937 // cond: 17938 // result: x 17939 for { 17940 x := v.Args[0] 17941 v.reset(OpCopy) 17942 v.Type = x.Type 17943 v.AddArg(x) 17944 return true 17945 } 17946 } 17947 func rewriteValueAMD64_OpTrunc64to32(v *Value, config *Config) bool { 17948 b := v.Block 17949 _ = b 17950 // match: (Trunc64to32 x) 17951 // cond: 17952 // result: x 17953 for { 17954 x := v.Args[0] 17955 v.reset(OpCopy) 17956 v.Type = x.Type 17957 v.AddArg(x) 17958 return true 17959 } 17960 } 17961 func rewriteValueAMD64_OpTrunc64to8(v *Value, config *Config) bool { 17962 b := v.Block 17963 _ = b 17964 // match: (Trunc64to8 x) 17965 // cond: 17966 // result: x 17967 for { 17968 x := v.Args[0] 17969 v.reset(OpCopy) 17970 v.Type = x.Type 17971 v.AddArg(x) 17972 return true 17973 } 17974 } 17975 func rewriteValueAMD64_OpXor16(v *Value, config *Config) bool { 17976 b := v.Block 17977 _ = b 17978 // match: (Xor16 x y) 17979 // cond: 17980 // result: (XORL x y) 17981 for { 17982 x := v.Args[0] 17983 y := v.Args[1] 17984 v.reset(OpAMD64XORL) 17985 v.AddArg(x) 17986 v.AddArg(y) 17987 return true 17988 } 17989 } 17990 func rewriteValueAMD64_OpXor32(v *Value, config *Config) bool { 17991 b := v.Block 17992 _ = b 17993 // match: (Xor32 x y) 17994 // cond: 17995 // result: (XORL x y) 17996 for { 17997 x := v.Args[0] 17998 y := v.Args[1] 17999 v.reset(OpAMD64XORL) 18000 v.AddArg(x) 18001 v.AddArg(y) 18002 return true 18003 } 18004 } 18005 func rewriteValueAMD64_OpXor64(v *Value, config *Config) bool { 18006 b := v.Block 18007 _ = b 18008 // match: (Xor64 x y) 18009 // cond: 18010 // result: (XORQ x y) 18011 for { 18012 x := v.Args[0] 18013 y := v.Args[1] 18014 v.reset(OpAMD64XORQ) 18015 v.AddArg(x) 18016 v.AddArg(y) 18017 return true 18018 } 18019 } 18020 func rewriteValueAMD64_OpXor8(v *Value, config *Config) bool { 18021 b := v.Block 18022 _ = b 18023 // match: (Xor8 x y) 18024 // cond: 18025 // result: (XORL x y) 18026 for { 18027 x := v.Args[0] 18028 y := v.Args[1] 18029 v.reset(OpAMD64XORL) 18030 v.AddArg(x) 18031 v.AddArg(y) 18032 return true 18033 } 18034 } 18035 func rewriteValueAMD64_OpZero(v *Value, config *Config) bool { 18036 b := v.Block 18037 _ = b 18038 // match: (Zero [s] _ mem) 18039 // cond: SizeAndAlign(s).Size() == 0 18040 // result: mem 18041 for { 18042 s := v.AuxInt 18043 mem := v.Args[1] 18044 if !(SizeAndAlign(s).Size() == 0) { 18045 break 18046 } 18047 v.reset(OpCopy) 18048 v.Type = mem.Type 18049 v.AddArg(mem) 18050 return true 18051 } 18052 // match: (Zero [s] destptr mem) 18053 // cond: SizeAndAlign(s).Size() == 1 18054 // result: (MOVBstoreconst [0] destptr mem) 18055 for { 18056 s := v.AuxInt 18057 destptr := v.Args[0] 18058 mem := v.Args[1] 18059 if !(SizeAndAlign(s).Size() == 1) { 18060 break 18061 } 18062 v.reset(OpAMD64MOVBstoreconst) 18063 v.AuxInt = 0 18064 v.AddArg(destptr) 18065 v.AddArg(mem) 18066 return true 18067 } 18068 // match: (Zero [s] destptr mem) 18069 // cond: SizeAndAlign(s).Size() == 2 18070 // result: (MOVWstoreconst [0] destptr mem) 18071 for { 18072 s := v.AuxInt 18073 destptr := v.Args[0] 18074 mem := v.Args[1] 18075 if !(SizeAndAlign(s).Size() == 2) { 18076 break 18077 } 18078 v.reset(OpAMD64MOVWstoreconst) 18079 v.AuxInt = 0 18080 v.AddArg(destptr) 18081 v.AddArg(mem) 18082 return true 18083 } 18084 // match: (Zero [s] destptr mem) 18085 // cond: SizeAndAlign(s).Size() == 4 18086 // result: (MOVLstoreconst [0] destptr mem) 18087 for { 18088 s := v.AuxInt 18089 destptr := v.Args[0] 18090 mem := v.Args[1] 18091 if !(SizeAndAlign(s).Size() == 4) { 18092 break 18093 } 18094 v.reset(OpAMD64MOVLstoreconst) 18095 v.AuxInt = 0 18096 v.AddArg(destptr) 18097 v.AddArg(mem) 18098 return true 18099 } 18100 // match: (Zero [s] destptr mem) 18101 // cond: SizeAndAlign(s).Size() == 8 18102 // result: (MOVQstoreconst [0] destptr mem) 18103 for { 18104 s := v.AuxInt 18105 destptr := v.Args[0] 18106 mem := v.Args[1] 18107 if !(SizeAndAlign(s).Size() == 8) { 18108 break 18109 } 18110 v.reset(OpAMD64MOVQstoreconst) 18111 v.AuxInt = 0 18112 v.AddArg(destptr) 18113 v.AddArg(mem) 18114 return true 18115 } 18116 // match: (Zero [s] destptr mem) 18117 // cond: SizeAndAlign(s).Size() == 3 18118 // result: (MOVBstoreconst [makeValAndOff(0,2)] destptr (MOVWstoreconst [0] destptr mem)) 18119 for { 18120 s := v.AuxInt 18121 destptr := v.Args[0] 18122 mem := v.Args[1] 18123 if !(SizeAndAlign(s).Size() == 3) { 18124 break 18125 } 18126 v.reset(OpAMD64MOVBstoreconst) 18127 v.AuxInt = makeValAndOff(0, 2) 18128 v.AddArg(destptr) 18129 v0 := b.NewValue0(v.Line, OpAMD64MOVWstoreconst, TypeMem) 18130 v0.AuxInt = 0 18131 v0.AddArg(destptr) 18132 v0.AddArg(mem) 18133 v.AddArg(v0) 18134 return true 18135 } 18136 // match: (Zero [s] destptr mem) 18137 // cond: SizeAndAlign(s).Size() == 5 18138 // result: (MOVBstoreconst [makeValAndOff(0,4)] destptr (MOVLstoreconst [0] destptr mem)) 18139 for { 18140 s := v.AuxInt 18141 destptr := v.Args[0] 18142 mem := v.Args[1] 18143 if !(SizeAndAlign(s).Size() == 5) { 18144 break 18145 } 18146 v.reset(OpAMD64MOVBstoreconst) 18147 v.AuxInt = makeValAndOff(0, 4) 18148 v.AddArg(destptr) 18149 v0 := b.NewValue0(v.Line, OpAMD64MOVLstoreconst, TypeMem) 18150 v0.AuxInt = 0 18151 v0.AddArg(destptr) 18152 v0.AddArg(mem) 18153 v.AddArg(v0) 18154 return true 18155 } 18156 // match: (Zero [s] destptr mem) 18157 // cond: SizeAndAlign(s).Size() == 6 18158 // result: (MOVWstoreconst [makeValAndOff(0,4)] destptr (MOVLstoreconst [0] destptr mem)) 18159 for { 18160 s := v.AuxInt 18161 destptr := v.Args[0] 18162 mem := v.Args[1] 18163 if !(SizeAndAlign(s).Size() == 6) { 18164 break 18165 } 18166 v.reset(OpAMD64MOVWstoreconst) 18167 v.AuxInt = makeValAndOff(0, 4) 18168 v.AddArg(destptr) 18169 v0 := b.NewValue0(v.Line, OpAMD64MOVLstoreconst, TypeMem) 18170 v0.AuxInt = 0 18171 v0.AddArg(destptr) 18172 v0.AddArg(mem) 18173 v.AddArg(v0) 18174 return true 18175 } 18176 // match: (Zero [s] destptr mem) 18177 // cond: SizeAndAlign(s).Size() == 7 18178 // result: (MOVLstoreconst [makeValAndOff(0,3)] destptr (MOVLstoreconst [0] destptr mem)) 18179 for { 18180 s := v.AuxInt 18181 destptr := v.Args[0] 18182 mem := v.Args[1] 18183 if !(SizeAndAlign(s).Size() == 7) { 18184 break 18185 } 18186 v.reset(OpAMD64MOVLstoreconst) 18187 v.AuxInt = makeValAndOff(0, 3) 18188 v.AddArg(destptr) 18189 v0 := b.NewValue0(v.Line, OpAMD64MOVLstoreconst, TypeMem) 18190 v0.AuxInt = 0 18191 v0.AddArg(destptr) 18192 v0.AddArg(mem) 18193 v.AddArg(v0) 18194 return true 18195 } 18196 // match: (Zero [s] destptr mem) 18197 // cond: SizeAndAlign(s).Size()%8 != 0 && SizeAndAlign(s).Size() > 8 18198 // result: (Zero [SizeAndAlign(s).Size()-SizeAndAlign(s).Size()%8] (OffPtr <destptr.Type> destptr [SizeAndAlign(s).Size()%8]) (MOVQstoreconst [0] destptr mem)) 18199 for { 18200 s := v.AuxInt 18201 destptr := v.Args[0] 18202 mem := v.Args[1] 18203 if !(SizeAndAlign(s).Size()%8 != 0 && SizeAndAlign(s).Size() > 8) { 18204 break 18205 } 18206 v.reset(OpZero) 18207 v.AuxInt = SizeAndAlign(s).Size() - SizeAndAlign(s).Size()%8 18208 v0 := b.NewValue0(v.Line, OpOffPtr, destptr.Type) 18209 v0.AuxInt = SizeAndAlign(s).Size() % 8 18210 v0.AddArg(destptr) 18211 v.AddArg(v0) 18212 v1 := b.NewValue0(v.Line, OpAMD64MOVQstoreconst, TypeMem) 18213 v1.AuxInt = 0 18214 v1.AddArg(destptr) 18215 v1.AddArg(mem) 18216 v.AddArg(v1) 18217 return true 18218 } 18219 // match: (Zero [s] destptr mem) 18220 // cond: SizeAndAlign(s).Size() == 16 18221 // result: (MOVQstoreconst [makeValAndOff(0,8)] destptr (MOVQstoreconst [0] destptr mem)) 18222 for { 18223 s := v.AuxInt 18224 destptr := v.Args[0] 18225 mem := v.Args[1] 18226 if !(SizeAndAlign(s).Size() == 16) { 18227 break 18228 } 18229 v.reset(OpAMD64MOVQstoreconst) 18230 v.AuxInt = makeValAndOff(0, 8) 18231 v.AddArg(destptr) 18232 v0 := b.NewValue0(v.Line, OpAMD64MOVQstoreconst, TypeMem) 18233 v0.AuxInt = 0 18234 v0.AddArg(destptr) 18235 v0.AddArg(mem) 18236 v.AddArg(v0) 18237 return true 18238 } 18239 // match: (Zero [s] destptr mem) 18240 // cond: SizeAndAlign(s).Size() == 24 18241 // result: (MOVQstoreconst [makeValAndOff(0,16)] destptr (MOVQstoreconst [makeValAndOff(0,8)] destptr (MOVQstoreconst [0] destptr mem))) 18242 for { 18243 s := v.AuxInt 18244 destptr := v.Args[0] 18245 mem := v.Args[1] 18246 if !(SizeAndAlign(s).Size() == 24) { 18247 break 18248 } 18249 v.reset(OpAMD64MOVQstoreconst) 18250 v.AuxInt = makeValAndOff(0, 16) 18251 v.AddArg(destptr) 18252 v0 := b.NewValue0(v.Line, OpAMD64MOVQstoreconst, TypeMem) 18253 v0.AuxInt = makeValAndOff(0, 8) 18254 v0.AddArg(destptr) 18255 v1 := b.NewValue0(v.Line, OpAMD64MOVQstoreconst, TypeMem) 18256 v1.AuxInt = 0 18257 v1.AddArg(destptr) 18258 v1.AddArg(mem) 18259 v0.AddArg(v1) 18260 v.AddArg(v0) 18261 return true 18262 } 18263 // match: (Zero [s] destptr mem) 18264 // cond: SizeAndAlign(s).Size() == 32 18265 // result: (MOVQstoreconst [makeValAndOff(0,24)] destptr (MOVQstoreconst [makeValAndOff(0,16)] destptr (MOVQstoreconst [makeValAndOff(0,8)] destptr (MOVQstoreconst [0] destptr mem)))) 18266 for { 18267 s := v.AuxInt 18268 destptr := v.Args[0] 18269 mem := v.Args[1] 18270 if !(SizeAndAlign(s).Size() == 32) { 18271 break 18272 } 18273 v.reset(OpAMD64MOVQstoreconst) 18274 v.AuxInt = makeValAndOff(0, 24) 18275 v.AddArg(destptr) 18276 v0 := b.NewValue0(v.Line, OpAMD64MOVQstoreconst, TypeMem) 18277 v0.AuxInt = makeValAndOff(0, 16) 18278 v0.AddArg(destptr) 18279 v1 := b.NewValue0(v.Line, OpAMD64MOVQstoreconst, TypeMem) 18280 v1.AuxInt = makeValAndOff(0, 8) 18281 v1.AddArg(destptr) 18282 v2 := b.NewValue0(v.Line, OpAMD64MOVQstoreconst, TypeMem) 18283 v2.AuxInt = 0 18284 v2.AddArg(destptr) 18285 v2.AddArg(mem) 18286 v1.AddArg(v2) 18287 v0.AddArg(v1) 18288 v.AddArg(v0) 18289 return true 18290 } 18291 // match: (Zero [s] destptr mem) 18292 // cond: SizeAndAlign(s).Size() <= 1024 && SizeAndAlign(s).Size()%8 == 0 && SizeAndAlign(s).Size()%16 != 0 && !config.noDuffDevice 18293 // result: (Zero [SizeAndAlign(s).Size()-8] (OffPtr <destptr.Type> [8] destptr) (MOVQstore destptr (MOVQconst [0]) mem)) 18294 for { 18295 s := v.AuxInt 18296 destptr := v.Args[0] 18297 mem := v.Args[1] 18298 if !(SizeAndAlign(s).Size() <= 1024 && SizeAndAlign(s).Size()%8 == 0 && SizeAndAlign(s).Size()%16 != 0 && !config.noDuffDevice) { 18299 break 18300 } 18301 v.reset(OpZero) 18302 v.AuxInt = SizeAndAlign(s).Size() - 8 18303 v0 := b.NewValue0(v.Line, OpOffPtr, destptr.Type) 18304 v0.AuxInt = 8 18305 v0.AddArg(destptr) 18306 v.AddArg(v0) 18307 v1 := b.NewValue0(v.Line, OpAMD64MOVQstore, TypeMem) 18308 v1.AddArg(destptr) 18309 v2 := b.NewValue0(v.Line, OpAMD64MOVQconst, config.fe.TypeUInt64()) 18310 v2.AuxInt = 0 18311 v1.AddArg(v2) 18312 v1.AddArg(mem) 18313 v.AddArg(v1) 18314 return true 18315 } 18316 // match: (Zero [s] destptr mem) 18317 // cond: SizeAndAlign(s).Size() <= 1024 && SizeAndAlign(s).Size()%16 == 0 && !config.noDuffDevice 18318 // result: (DUFFZERO [SizeAndAlign(s).Size()] destptr (MOVOconst [0]) mem) 18319 for { 18320 s := v.AuxInt 18321 destptr := v.Args[0] 18322 mem := v.Args[1] 18323 if !(SizeAndAlign(s).Size() <= 1024 && SizeAndAlign(s).Size()%16 == 0 && !config.noDuffDevice) { 18324 break 18325 } 18326 v.reset(OpAMD64DUFFZERO) 18327 v.AuxInt = SizeAndAlign(s).Size() 18328 v.AddArg(destptr) 18329 v0 := b.NewValue0(v.Line, OpAMD64MOVOconst, TypeInt128) 18330 v0.AuxInt = 0 18331 v.AddArg(v0) 18332 v.AddArg(mem) 18333 return true 18334 } 18335 // match: (Zero [s] destptr mem) 18336 // cond: (SizeAndAlign(s).Size() > 1024 || (config.noDuffDevice && SizeAndAlign(s).Size() > 32)) && SizeAndAlign(s).Size()%8 == 0 18337 // result: (REPSTOSQ destptr (MOVQconst [SizeAndAlign(s).Size()/8]) (MOVQconst [0]) mem) 18338 for { 18339 s := v.AuxInt 18340 destptr := v.Args[0] 18341 mem := v.Args[1] 18342 if !((SizeAndAlign(s).Size() > 1024 || (config.noDuffDevice && SizeAndAlign(s).Size() > 32)) && SizeAndAlign(s).Size()%8 == 0) { 18343 break 18344 } 18345 v.reset(OpAMD64REPSTOSQ) 18346 v.AddArg(destptr) 18347 v0 := b.NewValue0(v.Line, OpAMD64MOVQconst, config.fe.TypeUInt64()) 18348 v0.AuxInt = SizeAndAlign(s).Size() / 8 18349 v.AddArg(v0) 18350 v1 := b.NewValue0(v.Line, OpAMD64MOVQconst, config.fe.TypeUInt64()) 18351 v1.AuxInt = 0 18352 v.AddArg(v1) 18353 v.AddArg(mem) 18354 return true 18355 } 18356 return false 18357 } 18358 func rewriteValueAMD64_OpZeroExt16to32(v *Value, config *Config) bool { 18359 b := v.Block 18360 _ = b 18361 // match: (ZeroExt16to32 x) 18362 // cond: 18363 // result: (MOVWQZX x) 18364 for { 18365 x := v.Args[0] 18366 v.reset(OpAMD64MOVWQZX) 18367 v.AddArg(x) 18368 return true 18369 } 18370 } 18371 func rewriteValueAMD64_OpZeroExt16to64(v *Value, config *Config) bool { 18372 b := v.Block 18373 _ = b 18374 // match: (ZeroExt16to64 x) 18375 // cond: 18376 // result: (MOVWQZX x) 18377 for { 18378 x := v.Args[0] 18379 v.reset(OpAMD64MOVWQZX) 18380 v.AddArg(x) 18381 return true 18382 } 18383 } 18384 func rewriteValueAMD64_OpZeroExt32to64(v *Value, config *Config) bool { 18385 b := v.Block 18386 _ = b 18387 // match: (ZeroExt32to64 x) 18388 // cond: 18389 // result: (MOVLQZX x) 18390 for { 18391 x := v.Args[0] 18392 v.reset(OpAMD64MOVLQZX) 18393 v.AddArg(x) 18394 return true 18395 } 18396 } 18397 func rewriteValueAMD64_OpZeroExt8to16(v *Value, config *Config) bool { 18398 b := v.Block 18399 _ = b 18400 // match: (ZeroExt8to16 x) 18401 // cond: 18402 // result: (MOVBQZX x) 18403 for { 18404 x := v.Args[0] 18405 v.reset(OpAMD64MOVBQZX) 18406 v.AddArg(x) 18407 return true 18408 } 18409 } 18410 func rewriteValueAMD64_OpZeroExt8to32(v *Value, config *Config) bool { 18411 b := v.Block 18412 _ = b 18413 // match: (ZeroExt8to32 x) 18414 // cond: 18415 // result: (MOVBQZX x) 18416 for { 18417 x := v.Args[0] 18418 v.reset(OpAMD64MOVBQZX) 18419 v.AddArg(x) 18420 return true 18421 } 18422 } 18423 func rewriteValueAMD64_OpZeroExt8to64(v *Value, config *Config) bool { 18424 b := v.Block 18425 _ = b 18426 // match: (ZeroExt8to64 x) 18427 // cond: 18428 // result: (MOVBQZX x) 18429 for { 18430 x := v.Args[0] 18431 v.reset(OpAMD64MOVBQZX) 18432 v.AddArg(x) 18433 return true 18434 } 18435 } 18436 func rewriteBlockAMD64(b *Block) bool { 18437 switch b.Kind { 18438 case BlockAMD64EQ: 18439 // match: (EQ (InvertFlags cmp) yes no) 18440 // cond: 18441 // result: (EQ cmp yes no) 18442 for { 18443 v := b.Control 18444 if v.Op != OpAMD64InvertFlags { 18445 break 18446 } 18447 cmp := v.Args[0] 18448 yes := b.Succs[0] 18449 no := b.Succs[1] 18450 b.Kind = BlockAMD64EQ 18451 b.SetControl(cmp) 18452 _ = yes 18453 _ = no 18454 return true 18455 } 18456 // match: (EQ (FlagEQ) yes no) 18457 // cond: 18458 // result: (First nil yes no) 18459 for { 18460 v := b.Control 18461 if v.Op != OpAMD64FlagEQ { 18462 break 18463 } 18464 yes := b.Succs[0] 18465 no := b.Succs[1] 18466 b.Kind = BlockFirst 18467 b.SetControl(nil) 18468 _ = yes 18469 _ = no 18470 return true 18471 } 18472 // match: (EQ (FlagLT_ULT) yes no) 18473 // cond: 18474 // result: (First nil no yes) 18475 for { 18476 v := b.Control 18477 if v.Op != OpAMD64FlagLT_ULT { 18478 break 18479 } 18480 yes := b.Succs[0] 18481 no := b.Succs[1] 18482 b.Kind = BlockFirst 18483 b.SetControl(nil) 18484 b.swapSuccessors() 18485 _ = no 18486 _ = yes 18487 return true 18488 } 18489 // match: (EQ (FlagLT_UGT) yes no) 18490 // cond: 18491 // result: (First nil no yes) 18492 for { 18493 v := b.Control 18494 if v.Op != OpAMD64FlagLT_UGT { 18495 break 18496 } 18497 yes := b.Succs[0] 18498 no := b.Succs[1] 18499 b.Kind = BlockFirst 18500 b.SetControl(nil) 18501 b.swapSuccessors() 18502 _ = no 18503 _ = yes 18504 return true 18505 } 18506 // match: (EQ (FlagGT_ULT) yes no) 18507 // cond: 18508 // result: (First nil no yes) 18509 for { 18510 v := b.Control 18511 if v.Op != OpAMD64FlagGT_ULT { 18512 break 18513 } 18514 yes := b.Succs[0] 18515 no := b.Succs[1] 18516 b.Kind = BlockFirst 18517 b.SetControl(nil) 18518 b.swapSuccessors() 18519 _ = no 18520 _ = yes 18521 return true 18522 } 18523 // match: (EQ (FlagGT_UGT) yes no) 18524 // cond: 18525 // result: (First nil no yes) 18526 for { 18527 v := b.Control 18528 if v.Op != OpAMD64FlagGT_UGT { 18529 break 18530 } 18531 yes := b.Succs[0] 18532 no := b.Succs[1] 18533 b.Kind = BlockFirst 18534 b.SetControl(nil) 18535 b.swapSuccessors() 18536 _ = no 18537 _ = yes 18538 return true 18539 } 18540 case BlockAMD64GE: 18541 // match: (GE (InvertFlags cmp) yes no) 18542 // cond: 18543 // result: (LE cmp yes no) 18544 for { 18545 v := b.Control 18546 if v.Op != OpAMD64InvertFlags { 18547 break 18548 } 18549 cmp := v.Args[0] 18550 yes := b.Succs[0] 18551 no := b.Succs[1] 18552 b.Kind = BlockAMD64LE 18553 b.SetControl(cmp) 18554 _ = yes 18555 _ = no 18556 return true 18557 } 18558 // match: (GE (FlagEQ) yes no) 18559 // cond: 18560 // result: (First nil yes no) 18561 for { 18562 v := b.Control 18563 if v.Op != OpAMD64FlagEQ { 18564 break 18565 } 18566 yes := b.Succs[0] 18567 no := b.Succs[1] 18568 b.Kind = BlockFirst 18569 b.SetControl(nil) 18570 _ = yes 18571 _ = no 18572 return true 18573 } 18574 // match: (GE (FlagLT_ULT) yes no) 18575 // cond: 18576 // result: (First nil no yes) 18577 for { 18578 v := b.Control 18579 if v.Op != OpAMD64FlagLT_ULT { 18580 break 18581 } 18582 yes := b.Succs[0] 18583 no := b.Succs[1] 18584 b.Kind = BlockFirst 18585 b.SetControl(nil) 18586 b.swapSuccessors() 18587 _ = no 18588 _ = yes 18589 return true 18590 } 18591 // match: (GE (FlagLT_UGT) yes no) 18592 // cond: 18593 // result: (First nil no yes) 18594 for { 18595 v := b.Control 18596 if v.Op != OpAMD64FlagLT_UGT { 18597 break 18598 } 18599 yes := b.Succs[0] 18600 no := b.Succs[1] 18601 b.Kind = BlockFirst 18602 b.SetControl(nil) 18603 b.swapSuccessors() 18604 _ = no 18605 _ = yes 18606 return true 18607 } 18608 // match: (GE (FlagGT_ULT) yes no) 18609 // cond: 18610 // result: (First nil yes no) 18611 for { 18612 v := b.Control 18613 if v.Op != OpAMD64FlagGT_ULT { 18614 break 18615 } 18616 yes := b.Succs[0] 18617 no := b.Succs[1] 18618 b.Kind = BlockFirst 18619 b.SetControl(nil) 18620 _ = yes 18621 _ = no 18622 return true 18623 } 18624 // match: (GE (FlagGT_UGT) yes no) 18625 // cond: 18626 // result: (First nil yes no) 18627 for { 18628 v := b.Control 18629 if v.Op != OpAMD64FlagGT_UGT { 18630 break 18631 } 18632 yes := b.Succs[0] 18633 no := b.Succs[1] 18634 b.Kind = BlockFirst 18635 b.SetControl(nil) 18636 _ = yes 18637 _ = no 18638 return true 18639 } 18640 case BlockAMD64GT: 18641 // match: (GT (InvertFlags cmp) yes no) 18642 // cond: 18643 // result: (LT cmp yes no) 18644 for { 18645 v := b.Control 18646 if v.Op != OpAMD64InvertFlags { 18647 break 18648 } 18649 cmp := v.Args[0] 18650 yes := b.Succs[0] 18651 no := b.Succs[1] 18652 b.Kind = BlockAMD64LT 18653 b.SetControl(cmp) 18654 _ = yes 18655 _ = no 18656 return true 18657 } 18658 // match: (GT (FlagEQ) yes no) 18659 // cond: 18660 // result: (First nil no yes) 18661 for { 18662 v := b.Control 18663 if v.Op != OpAMD64FlagEQ { 18664 break 18665 } 18666 yes := b.Succs[0] 18667 no := b.Succs[1] 18668 b.Kind = BlockFirst 18669 b.SetControl(nil) 18670 b.swapSuccessors() 18671 _ = no 18672 _ = yes 18673 return true 18674 } 18675 // match: (GT (FlagLT_ULT) yes no) 18676 // cond: 18677 // result: (First nil no yes) 18678 for { 18679 v := b.Control 18680 if v.Op != OpAMD64FlagLT_ULT { 18681 break 18682 } 18683 yes := b.Succs[0] 18684 no := b.Succs[1] 18685 b.Kind = BlockFirst 18686 b.SetControl(nil) 18687 b.swapSuccessors() 18688 _ = no 18689 _ = yes 18690 return true 18691 } 18692 // match: (GT (FlagLT_UGT) yes no) 18693 // cond: 18694 // result: (First nil no yes) 18695 for { 18696 v := b.Control 18697 if v.Op != OpAMD64FlagLT_UGT { 18698 break 18699 } 18700 yes := b.Succs[0] 18701 no := b.Succs[1] 18702 b.Kind = BlockFirst 18703 b.SetControl(nil) 18704 b.swapSuccessors() 18705 _ = no 18706 _ = yes 18707 return true 18708 } 18709 // match: (GT (FlagGT_ULT) yes no) 18710 // cond: 18711 // result: (First nil yes no) 18712 for { 18713 v := b.Control 18714 if v.Op != OpAMD64FlagGT_ULT { 18715 break 18716 } 18717 yes := b.Succs[0] 18718 no := b.Succs[1] 18719 b.Kind = BlockFirst 18720 b.SetControl(nil) 18721 _ = yes 18722 _ = no 18723 return true 18724 } 18725 // match: (GT (FlagGT_UGT) yes no) 18726 // cond: 18727 // result: (First nil yes no) 18728 for { 18729 v := b.Control 18730 if v.Op != OpAMD64FlagGT_UGT { 18731 break 18732 } 18733 yes := b.Succs[0] 18734 no := b.Succs[1] 18735 b.Kind = BlockFirst 18736 b.SetControl(nil) 18737 _ = yes 18738 _ = no 18739 return true 18740 } 18741 case BlockIf: 18742 // match: (If (SETL cmp) yes no) 18743 // cond: 18744 // result: (LT cmp yes no) 18745 for { 18746 v := b.Control 18747 if v.Op != OpAMD64SETL { 18748 break 18749 } 18750 cmp := v.Args[0] 18751 yes := b.Succs[0] 18752 no := b.Succs[1] 18753 b.Kind = BlockAMD64LT 18754 b.SetControl(cmp) 18755 _ = yes 18756 _ = no 18757 return true 18758 } 18759 // match: (If (SETLE cmp) yes no) 18760 // cond: 18761 // result: (LE cmp yes no) 18762 for { 18763 v := b.Control 18764 if v.Op != OpAMD64SETLE { 18765 break 18766 } 18767 cmp := v.Args[0] 18768 yes := b.Succs[0] 18769 no := b.Succs[1] 18770 b.Kind = BlockAMD64LE 18771 b.SetControl(cmp) 18772 _ = yes 18773 _ = no 18774 return true 18775 } 18776 // match: (If (SETG cmp) yes no) 18777 // cond: 18778 // result: (GT cmp yes no) 18779 for { 18780 v := b.Control 18781 if v.Op != OpAMD64SETG { 18782 break 18783 } 18784 cmp := v.Args[0] 18785 yes := b.Succs[0] 18786 no := b.Succs[1] 18787 b.Kind = BlockAMD64GT 18788 b.SetControl(cmp) 18789 _ = yes 18790 _ = no 18791 return true 18792 } 18793 // match: (If (SETGE cmp) yes no) 18794 // cond: 18795 // result: (GE cmp yes no) 18796 for { 18797 v := b.Control 18798 if v.Op != OpAMD64SETGE { 18799 break 18800 } 18801 cmp := v.Args[0] 18802 yes := b.Succs[0] 18803 no := b.Succs[1] 18804 b.Kind = BlockAMD64GE 18805 b.SetControl(cmp) 18806 _ = yes 18807 _ = no 18808 return true 18809 } 18810 // match: (If (SETEQ cmp) yes no) 18811 // cond: 18812 // result: (EQ cmp yes no) 18813 for { 18814 v := b.Control 18815 if v.Op != OpAMD64SETEQ { 18816 break 18817 } 18818 cmp := v.Args[0] 18819 yes := b.Succs[0] 18820 no := b.Succs[1] 18821 b.Kind = BlockAMD64EQ 18822 b.SetControl(cmp) 18823 _ = yes 18824 _ = no 18825 return true 18826 } 18827 // match: (If (SETNE cmp) yes no) 18828 // cond: 18829 // result: (NE cmp yes no) 18830 for { 18831 v := b.Control 18832 if v.Op != OpAMD64SETNE { 18833 break 18834 } 18835 cmp := v.Args[0] 18836 yes := b.Succs[0] 18837 no := b.Succs[1] 18838 b.Kind = BlockAMD64NE 18839 b.SetControl(cmp) 18840 _ = yes 18841 _ = no 18842 return true 18843 } 18844 // match: (If (SETB cmp) yes no) 18845 // cond: 18846 // result: (ULT cmp yes no) 18847 for { 18848 v := b.Control 18849 if v.Op != OpAMD64SETB { 18850 break 18851 } 18852 cmp := v.Args[0] 18853 yes := b.Succs[0] 18854 no := b.Succs[1] 18855 b.Kind = BlockAMD64ULT 18856 b.SetControl(cmp) 18857 _ = yes 18858 _ = no 18859 return true 18860 } 18861 // match: (If (SETBE cmp) yes no) 18862 // cond: 18863 // result: (ULE cmp yes no) 18864 for { 18865 v := b.Control 18866 if v.Op != OpAMD64SETBE { 18867 break 18868 } 18869 cmp := v.Args[0] 18870 yes := b.Succs[0] 18871 no := b.Succs[1] 18872 b.Kind = BlockAMD64ULE 18873 b.SetControl(cmp) 18874 _ = yes 18875 _ = no 18876 return true 18877 } 18878 // match: (If (SETA cmp) yes no) 18879 // cond: 18880 // result: (UGT cmp yes no) 18881 for { 18882 v := b.Control 18883 if v.Op != OpAMD64SETA { 18884 break 18885 } 18886 cmp := v.Args[0] 18887 yes := b.Succs[0] 18888 no := b.Succs[1] 18889 b.Kind = BlockAMD64UGT 18890 b.SetControl(cmp) 18891 _ = yes 18892 _ = no 18893 return true 18894 } 18895 // match: (If (SETAE cmp) yes no) 18896 // cond: 18897 // result: (UGE cmp yes no) 18898 for { 18899 v := b.Control 18900 if v.Op != OpAMD64SETAE { 18901 break 18902 } 18903 cmp := v.Args[0] 18904 yes := b.Succs[0] 18905 no := b.Succs[1] 18906 b.Kind = BlockAMD64UGE 18907 b.SetControl(cmp) 18908 _ = yes 18909 _ = no 18910 return true 18911 } 18912 // match: (If (SETGF cmp) yes no) 18913 // cond: 18914 // result: (UGT cmp yes no) 18915 for { 18916 v := b.Control 18917 if v.Op != OpAMD64SETGF { 18918 break 18919 } 18920 cmp := v.Args[0] 18921 yes := b.Succs[0] 18922 no := b.Succs[1] 18923 b.Kind = BlockAMD64UGT 18924 b.SetControl(cmp) 18925 _ = yes 18926 _ = no 18927 return true 18928 } 18929 // match: (If (SETGEF cmp) yes no) 18930 // cond: 18931 // result: (UGE cmp yes no) 18932 for { 18933 v := b.Control 18934 if v.Op != OpAMD64SETGEF { 18935 break 18936 } 18937 cmp := v.Args[0] 18938 yes := b.Succs[0] 18939 no := b.Succs[1] 18940 b.Kind = BlockAMD64UGE 18941 b.SetControl(cmp) 18942 _ = yes 18943 _ = no 18944 return true 18945 } 18946 // match: (If (SETEQF cmp) yes no) 18947 // cond: 18948 // result: (EQF cmp yes no) 18949 for { 18950 v := b.Control 18951 if v.Op != OpAMD64SETEQF { 18952 break 18953 } 18954 cmp := v.Args[0] 18955 yes := b.Succs[0] 18956 no := b.Succs[1] 18957 b.Kind = BlockAMD64EQF 18958 b.SetControl(cmp) 18959 _ = yes 18960 _ = no 18961 return true 18962 } 18963 // match: (If (SETNEF cmp) yes no) 18964 // cond: 18965 // result: (NEF cmp yes no) 18966 for { 18967 v := b.Control 18968 if v.Op != OpAMD64SETNEF { 18969 break 18970 } 18971 cmp := v.Args[0] 18972 yes := b.Succs[0] 18973 no := b.Succs[1] 18974 b.Kind = BlockAMD64NEF 18975 b.SetControl(cmp) 18976 _ = yes 18977 _ = no 18978 return true 18979 } 18980 // match: (If cond yes no) 18981 // cond: 18982 // result: (NE (TESTB cond cond) yes no) 18983 for { 18984 v := b.Control 18985 _ = v 18986 cond := b.Control 18987 yes := b.Succs[0] 18988 no := b.Succs[1] 18989 b.Kind = BlockAMD64NE 18990 v0 := b.NewValue0(v.Line, OpAMD64TESTB, TypeFlags) 18991 v0.AddArg(cond) 18992 v0.AddArg(cond) 18993 b.SetControl(v0) 18994 _ = yes 18995 _ = no 18996 return true 18997 } 18998 case BlockAMD64LE: 18999 // match: (LE (InvertFlags cmp) yes no) 19000 // cond: 19001 // result: (GE cmp yes no) 19002 for { 19003 v := b.Control 19004 if v.Op != OpAMD64InvertFlags { 19005 break 19006 } 19007 cmp := v.Args[0] 19008 yes := b.Succs[0] 19009 no := b.Succs[1] 19010 b.Kind = BlockAMD64GE 19011 b.SetControl(cmp) 19012 _ = yes 19013 _ = no 19014 return true 19015 } 19016 // match: (LE (FlagEQ) yes no) 19017 // cond: 19018 // result: (First nil yes no) 19019 for { 19020 v := b.Control 19021 if v.Op != OpAMD64FlagEQ { 19022 break 19023 } 19024 yes := b.Succs[0] 19025 no := b.Succs[1] 19026 b.Kind = BlockFirst 19027 b.SetControl(nil) 19028 _ = yes 19029 _ = no 19030 return true 19031 } 19032 // match: (LE (FlagLT_ULT) yes no) 19033 // cond: 19034 // result: (First nil yes no) 19035 for { 19036 v := b.Control 19037 if v.Op != OpAMD64FlagLT_ULT { 19038 break 19039 } 19040 yes := b.Succs[0] 19041 no := b.Succs[1] 19042 b.Kind = BlockFirst 19043 b.SetControl(nil) 19044 _ = yes 19045 _ = no 19046 return true 19047 } 19048 // match: (LE (FlagLT_UGT) yes no) 19049 // cond: 19050 // result: (First nil yes no) 19051 for { 19052 v := b.Control 19053 if v.Op != OpAMD64FlagLT_UGT { 19054 break 19055 } 19056 yes := b.Succs[0] 19057 no := b.Succs[1] 19058 b.Kind = BlockFirst 19059 b.SetControl(nil) 19060 _ = yes 19061 _ = no 19062 return true 19063 } 19064 // match: (LE (FlagGT_ULT) yes no) 19065 // cond: 19066 // result: (First nil no yes) 19067 for { 19068 v := b.Control 19069 if v.Op != OpAMD64FlagGT_ULT { 19070 break 19071 } 19072 yes := b.Succs[0] 19073 no := b.Succs[1] 19074 b.Kind = BlockFirst 19075 b.SetControl(nil) 19076 b.swapSuccessors() 19077 _ = no 19078 _ = yes 19079 return true 19080 } 19081 // match: (LE (FlagGT_UGT) yes no) 19082 // cond: 19083 // result: (First nil no yes) 19084 for { 19085 v := b.Control 19086 if v.Op != OpAMD64FlagGT_UGT { 19087 break 19088 } 19089 yes := b.Succs[0] 19090 no := b.Succs[1] 19091 b.Kind = BlockFirst 19092 b.SetControl(nil) 19093 b.swapSuccessors() 19094 _ = no 19095 _ = yes 19096 return true 19097 } 19098 case BlockAMD64LT: 19099 // match: (LT (InvertFlags cmp) yes no) 19100 // cond: 19101 // result: (GT cmp yes no) 19102 for { 19103 v := b.Control 19104 if v.Op != OpAMD64InvertFlags { 19105 break 19106 } 19107 cmp := v.Args[0] 19108 yes := b.Succs[0] 19109 no := b.Succs[1] 19110 b.Kind = BlockAMD64GT 19111 b.SetControl(cmp) 19112 _ = yes 19113 _ = no 19114 return true 19115 } 19116 // match: (LT (FlagEQ) yes no) 19117 // cond: 19118 // result: (First nil no yes) 19119 for { 19120 v := b.Control 19121 if v.Op != OpAMD64FlagEQ { 19122 break 19123 } 19124 yes := b.Succs[0] 19125 no := b.Succs[1] 19126 b.Kind = BlockFirst 19127 b.SetControl(nil) 19128 b.swapSuccessors() 19129 _ = no 19130 _ = yes 19131 return true 19132 } 19133 // match: (LT (FlagLT_ULT) yes no) 19134 // cond: 19135 // result: (First nil yes no) 19136 for { 19137 v := b.Control 19138 if v.Op != OpAMD64FlagLT_ULT { 19139 break 19140 } 19141 yes := b.Succs[0] 19142 no := b.Succs[1] 19143 b.Kind = BlockFirst 19144 b.SetControl(nil) 19145 _ = yes 19146 _ = no 19147 return true 19148 } 19149 // match: (LT (FlagLT_UGT) yes no) 19150 // cond: 19151 // result: (First nil yes no) 19152 for { 19153 v := b.Control 19154 if v.Op != OpAMD64FlagLT_UGT { 19155 break 19156 } 19157 yes := b.Succs[0] 19158 no := b.Succs[1] 19159 b.Kind = BlockFirst 19160 b.SetControl(nil) 19161 _ = yes 19162 _ = no 19163 return true 19164 } 19165 // match: (LT (FlagGT_ULT) yes no) 19166 // cond: 19167 // result: (First nil no yes) 19168 for { 19169 v := b.Control 19170 if v.Op != OpAMD64FlagGT_ULT { 19171 break 19172 } 19173 yes := b.Succs[0] 19174 no := b.Succs[1] 19175 b.Kind = BlockFirst 19176 b.SetControl(nil) 19177 b.swapSuccessors() 19178 _ = no 19179 _ = yes 19180 return true 19181 } 19182 // match: (LT (FlagGT_UGT) yes no) 19183 // cond: 19184 // result: (First nil no yes) 19185 for { 19186 v := b.Control 19187 if v.Op != OpAMD64FlagGT_UGT { 19188 break 19189 } 19190 yes := b.Succs[0] 19191 no := b.Succs[1] 19192 b.Kind = BlockFirst 19193 b.SetControl(nil) 19194 b.swapSuccessors() 19195 _ = no 19196 _ = yes 19197 return true 19198 } 19199 case BlockAMD64NE: 19200 // match: (NE (TESTB (SETL cmp) (SETL cmp)) yes no) 19201 // cond: 19202 // result: (LT cmp yes no) 19203 for { 19204 v := b.Control 19205 if v.Op != OpAMD64TESTB { 19206 break 19207 } 19208 v_0 := v.Args[0] 19209 if v_0.Op != OpAMD64SETL { 19210 break 19211 } 19212 cmp := v_0.Args[0] 19213 v_1 := v.Args[1] 19214 if v_1.Op != OpAMD64SETL { 19215 break 19216 } 19217 if cmp != v_1.Args[0] { 19218 break 19219 } 19220 yes := b.Succs[0] 19221 no := b.Succs[1] 19222 b.Kind = BlockAMD64LT 19223 b.SetControl(cmp) 19224 _ = yes 19225 _ = no 19226 return true 19227 } 19228 // match: (NE (TESTB (SETLE cmp) (SETLE cmp)) yes no) 19229 // cond: 19230 // result: (LE cmp yes no) 19231 for { 19232 v := b.Control 19233 if v.Op != OpAMD64TESTB { 19234 break 19235 } 19236 v_0 := v.Args[0] 19237 if v_0.Op != OpAMD64SETLE { 19238 break 19239 } 19240 cmp := v_0.Args[0] 19241 v_1 := v.Args[1] 19242 if v_1.Op != OpAMD64SETLE { 19243 break 19244 } 19245 if cmp != v_1.Args[0] { 19246 break 19247 } 19248 yes := b.Succs[0] 19249 no := b.Succs[1] 19250 b.Kind = BlockAMD64LE 19251 b.SetControl(cmp) 19252 _ = yes 19253 _ = no 19254 return true 19255 } 19256 // match: (NE (TESTB (SETG cmp) (SETG cmp)) yes no) 19257 // cond: 19258 // result: (GT cmp yes no) 19259 for { 19260 v := b.Control 19261 if v.Op != OpAMD64TESTB { 19262 break 19263 } 19264 v_0 := v.Args[0] 19265 if v_0.Op != OpAMD64SETG { 19266 break 19267 } 19268 cmp := v_0.Args[0] 19269 v_1 := v.Args[1] 19270 if v_1.Op != OpAMD64SETG { 19271 break 19272 } 19273 if cmp != v_1.Args[0] { 19274 break 19275 } 19276 yes := b.Succs[0] 19277 no := b.Succs[1] 19278 b.Kind = BlockAMD64GT 19279 b.SetControl(cmp) 19280 _ = yes 19281 _ = no 19282 return true 19283 } 19284 // match: (NE (TESTB (SETGE cmp) (SETGE cmp)) yes no) 19285 // cond: 19286 // result: (GE cmp yes no) 19287 for { 19288 v := b.Control 19289 if v.Op != OpAMD64TESTB { 19290 break 19291 } 19292 v_0 := v.Args[0] 19293 if v_0.Op != OpAMD64SETGE { 19294 break 19295 } 19296 cmp := v_0.Args[0] 19297 v_1 := v.Args[1] 19298 if v_1.Op != OpAMD64SETGE { 19299 break 19300 } 19301 if cmp != v_1.Args[0] { 19302 break 19303 } 19304 yes := b.Succs[0] 19305 no := b.Succs[1] 19306 b.Kind = BlockAMD64GE 19307 b.SetControl(cmp) 19308 _ = yes 19309 _ = no 19310 return true 19311 } 19312 // match: (NE (TESTB (SETEQ cmp) (SETEQ cmp)) yes no) 19313 // cond: 19314 // result: (EQ cmp yes no) 19315 for { 19316 v := b.Control 19317 if v.Op != OpAMD64TESTB { 19318 break 19319 } 19320 v_0 := v.Args[0] 19321 if v_0.Op != OpAMD64SETEQ { 19322 break 19323 } 19324 cmp := v_0.Args[0] 19325 v_1 := v.Args[1] 19326 if v_1.Op != OpAMD64SETEQ { 19327 break 19328 } 19329 if cmp != v_1.Args[0] { 19330 break 19331 } 19332 yes := b.Succs[0] 19333 no := b.Succs[1] 19334 b.Kind = BlockAMD64EQ 19335 b.SetControl(cmp) 19336 _ = yes 19337 _ = no 19338 return true 19339 } 19340 // match: (NE (TESTB (SETNE cmp) (SETNE cmp)) yes no) 19341 // cond: 19342 // result: (NE cmp yes no) 19343 for { 19344 v := b.Control 19345 if v.Op != OpAMD64TESTB { 19346 break 19347 } 19348 v_0 := v.Args[0] 19349 if v_0.Op != OpAMD64SETNE { 19350 break 19351 } 19352 cmp := v_0.Args[0] 19353 v_1 := v.Args[1] 19354 if v_1.Op != OpAMD64SETNE { 19355 break 19356 } 19357 if cmp != v_1.Args[0] { 19358 break 19359 } 19360 yes := b.Succs[0] 19361 no := b.Succs[1] 19362 b.Kind = BlockAMD64NE 19363 b.SetControl(cmp) 19364 _ = yes 19365 _ = no 19366 return true 19367 } 19368 // match: (NE (TESTB (SETB cmp) (SETB cmp)) yes no) 19369 // cond: 19370 // result: (ULT cmp yes no) 19371 for { 19372 v := b.Control 19373 if v.Op != OpAMD64TESTB { 19374 break 19375 } 19376 v_0 := v.Args[0] 19377 if v_0.Op != OpAMD64SETB { 19378 break 19379 } 19380 cmp := v_0.Args[0] 19381 v_1 := v.Args[1] 19382 if v_1.Op != OpAMD64SETB { 19383 break 19384 } 19385 if cmp != v_1.Args[0] { 19386 break 19387 } 19388 yes := b.Succs[0] 19389 no := b.Succs[1] 19390 b.Kind = BlockAMD64ULT 19391 b.SetControl(cmp) 19392 _ = yes 19393 _ = no 19394 return true 19395 } 19396 // match: (NE (TESTB (SETBE cmp) (SETBE cmp)) yes no) 19397 // cond: 19398 // result: (ULE cmp yes no) 19399 for { 19400 v := b.Control 19401 if v.Op != OpAMD64TESTB { 19402 break 19403 } 19404 v_0 := v.Args[0] 19405 if v_0.Op != OpAMD64SETBE { 19406 break 19407 } 19408 cmp := v_0.Args[0] 19409 v_1 := v.Args[1] 19410 if v_1.Op != OpAMD64SETBE { 19411 break 19412 } 19413 if cmp != v_1.Args[0] { 19414 break 19415 } 19416 yes := b.Succs[0] 19417 no := b.Succs[1] 19418 b.Kind = BlockAMD64ULE 19419 b.SetControl(cmp) 19420 _ = yes 19421 _ = no 19422 return true 19423 } 19424 // match: (NE (TESTB (SETA cmp) (SETA cmp)) yes no) 19425 // cond: 19426 // result: (UGT cmp yes no) 19427 for { 19428 v := b.Control 19429 if v.Op != OpAMD64TESTB { 19430 break 19431 } 19432 v_0 := v.Args[0] 19433 if v_0.Op != OpAMD64SETA { 19434 break 19435 } 19436 cmp := v_0.Args[0] 19437 v_1 := v.Args[1] 19438 if v_1.Op != OpAMD64SETA { 19439 break 19440 } 19441 if cmp != v_1.Args[0] { 19442 break 19443 } 19444 yes := b.Succs[0] 19445 no := b.Succs[1] 19446 b.Kind = BlockAMD64UGT 19447 b.SetControl(cmp) 19448 _ = yes 19449 _ = no 19450 return true 19451 } 19452 // match: (NE (TESTB (SETAE cmp) (SETAE cmp)) yes no) 19453 // cond: 19454 // result: (UGE cmp yes no) 19455 for { 19456 v := b.Control 19457 if v.Op != OpAMD64TESTB { 19458 break 19459 } 19460 v_0 := v.Args[0] 19461 if v_0.Op != OpAMD64SETAE { 19462 break 19463 } 19464 cmp := v_0.Args[0] 19465 v_1 := v.Args[1] 19466 if v_1.Op != OpAMD64SETAE { 19467 break 19468 } 19469 if cmp != v_1.Args[0] { 19470 break 19471 } 19472 yes := b.Succs[0] 19473 no := b.Succs[1] 19474 b.Kind = BlockAMD64UGE 19475 b.SetControl(cmp) 19476 _ = yes 19477 _ = no 19478 return true 19479 } 19480 // match: (NE (TESTB (SETGF cmp) (SETGF cmp)) yes no) 19481 // cond: 19482 // result: (UGT cmp yes no) 19483 for { 19484 v := b.Control 19485 if v.Op != OpAMD64TESTB { 19486 break 19487 } 19488 v_0 := v.Args[0] 19489 if v_0.Op != OpAMD64SETGF { 19490 break 19491 } 19492 cmp := v_0.Args[0] 19493 v_1 := v.Args[1] 19494 if v_1.Op != OpAMD64SETGF { 19495 break 19496 } 19497 if cmp != v_1.Args[0] { 19498 break 19499 } 19500 yes := b.Succs[0] 19501 no := b.Succs[1] 19502 b.Kind = BlockAMD64UGT 19503 b.SetControl(cmp) 19504 _ = yes 19505 _ = no 19506 return true 19507 } 19508 // match: (NE (TESTB (SETGEF cmp) (SETGEF cmp)) yes no) 19509 // cond: 19510 // result: (UGE cmp yes no) 19511 for { 19512 v := b.Control 19513 if v.Op != OpAMD64TESTB { 19514 break 19515 } 19516 v_0 := v.Args[0] 19517 if v_0.Op != OpAMD64SETGEF { 19518 break 19519 } 19520 cmp := v_0.Args[0] 19521 v_1 := v.Args[1] 19522 if v_1.Op != OpAMD64SETGEF { 19523 break 19524 } 19525 if cmp != v_1.Args[0] { 19526 break 19527 } 19528 yes := b.Succs[0] 19529 no := b.Succs[1] 19530 b.Kind = BlockAMD64UGE 19531 b.SetControl(cmp) 19532 _ = yes 19533 _ = no 19534 return true 19535 } 19536 // match: (NE (TESTB (SETEQF cmp) (SETEQF cmp)) yes no) 19537 // cond: 19538 // result: (EQF cmp yes no) 19539 for { 19540 v := b.Control 19541 if v.Op != OpAMD64TESTB { 19542 break 19543 } 19544 v_0 := v.Args[0] 19545 if v_0.Op != OpAMD64SETEQF { 19546 break 19547 } 19548 cmp := v_0.Args[0] 19549 v_1 := v.Args[1] 19550 if v_1.Op != OpAMD64SETEQF { 19551 break 19552 } 19553 if cmp != v_1.Args[0] { 19554 break 19555 } 19556 yes := b.Succs[0] 19557 no := b.Succs[1] 19558 b.Kind = BlockAMD64EQF 19559 b.SetControl(cmp) 19560 _ = yes 19561 _ = no 19562 return true 19563 } 19564 // match: (NE (TESTB (SETNEF cmp) (SETNEF cmp)) yes no) 19565 // cond: 19566 // result: (NEF cmp yes no) 19567 for { 19568 v := b.Control 19569 if v.Op != OpAMD64TESTB { 19570 break 19571 } 19572 v_0 := v.Args[0] 19573 if v_0.Op != OpAMD64SETNEF { 19574 break 19575 } 19576 cmp := v_0.Args[0] 19577 v_1 := v.Args[1] 19578 if v_1.Op != OpAMD64SETNEF { 19579 break 19580 } 19581 if cmp != v_1.Args[0] { 19582 break 19583 } 19584 yes := b.Succs[0] 19585 no := b.Succs[1] 19586 b.Kind = BlockAMD64NEF 19587 b.SetControl(cmp) 19588 _ = yes 19589 _ = no 19590 return true 19591 } 19592 // match: (NE (InvertFlags cmp) yes no) 19593 // cond: 19594 // result: (NE cmp yes no) 19595 for { 19596 v := b.Control 19597 if v.Op != OpAMD64InvertFlags { 19598 break 19599 } 19600 cmp := v.Args[0] 19601 yes := b.Succs[0] 19602 no := b.Succs[1] 19603 b.Kind = BlockAMD64NE 19604 b.SetControl(cmp) 19605 _ = yes 19606 _ = no 19607 return true 19608 } 19609 // match: (NE (FlagEQ) yes no) 19610 // cond: 19611 // result: (First nil no yes) 19612 for { 19613 v := b.Control 19614 if v.Op != OpAMD64FlagEQ { 19615 break 19616 } 19617 yes := b.Succs[0] 19618 no := b.Succs[1] 19619 b.Kind = BlockFirst 19620 b.SetControl(nil) 19621 b.swapSuccessors() 19622 _ = no 19623 _ = yes 19624 return true 19625 } 19626 // match: (NE (FlagLT_ULT) yes no) 19627 // cond: 19628 // result: (First nil yes no) 19629 for { 19630 v := b.Control 19631 if v.Op != OpAMD64FlagLT_ULT { 19632 break 19633 } 19634 yes := b.Succs[0] 19635 no := b.Succs[1] 19636 b.Kind = BlockFirst 19637 b.SetControl(nil) 19638 _ = yes 19639 _ = no 19640 return true 19641 } 19642 // match: (NE (FlagLT_UGT) yes no) 19643 // cond: 19644 // result: (First nil yes no) 19645 for { 19646 v := b.Control 19647 if v.Op != OpAMD64FlagLT_UGT { 19648 break 19649 } 19650 yes := b.Succs[0] 19651 no := b.Succs[1] 19652 b.Kind = BlockFirst 19653 b.SetControl(nil) 19654 _ = yes 19655 _ = no 19656 return true 19657 } 19658 // match: (NE (FlagGT_ULT) yes no) 19659 // cond: 19660 // result: (First nil yes no) 19661 for { 19662 v := b.Control 19663 if v.Op != OpAMD64FlagGT_ULT { 19664 break 19665 } 19666 yes := b.Succs[0] 19667 no := b.Succs[1] 19668 b.Kind = BlockFirst 19669 b.SetControl(nil) 19670 _ = yes 19671 _ = no 19672 return true 19673 } 19674 // match: (NE (FlagGT_UGT) yes no) 19675 // cond: 19676 // result: (First nil yes no) 19677 for { 19678 v := b.Control 19679 if v.Op != OpAMD64FlagGT_UGT { 19680 break 19681 } 19682 yes := b.Succs[0] 19683 no := b.Succs[1] 19684 b.Kind = BlockFirst 19685 b.SetControl(nil) 19686 _ = yes 19687 _ = no 19688 return true 19689 } 19690 case BlockAMD64UGE: 19691 // match: (UGE (InvertFlags cmp) yes no) 19692 // cond: 19693 // result: (ULE cmp yes no) 19694 for { 19695 v := b.Control 19696 if v.Op != OpAMD64InvertFlags { 19697 break 19698 } 19699 cmp := v.Args[0] 19700 yes := b.Succs[0] 19701 no := b.Succs[1] 19702 b.Kind = BlockAMD64ULE 19703 b.SetControl(cmp) 19704 _ = yes 19705 _ = no 19706 return true 19707 } 19708 // match: (UGE (FlagEQ) yes no) 19709 // cond: 19710 // result: (First nil yes no) 19711 for { 19712 v := b.Control 19713 if v.Op != OpAMD64FlagEQ { 19714 break 19715 } 19716 yes := b.Succs[0] 19717 no := b.Succs[1] 19718 b.Kind = BlockFirst 19719 b.SetControl(nil) 19720 _ = yes 19721 _ = no 19722 return true 19723 } 19724 // match: (UGE (FlagLT_ULT) yes no) 19725 // cond: 19726 // result: (First nil no yes) 19727 for { 19728 v := b.Control 19729 if v.Op != OpAMD64FlagLT_ULT { 19730 break 19731 } 19732 yes := b.Succs[0] 19733 no := b.Succs[1] 19734 b.Kind = BlockFirst 19735 b.SetControl(nil) 19736 b.swapSuccessors() 19737 _ = no 19738 _ = yes 19739 return true 19740 } 19741 // match: (UGE (FlagLT_UGT) yes no) 19742 // cond: 19743 // result: (First nil yes no) 19744 for { 19745 v := b.Control 19746 if v.Op != OpAMD64FlagLT_UGT { 19747 break 19748 } 19749 yes := b.Succs[0] 19750 no := b.Succs[1] 19751 b.Kind = BlockFirst 19752 b.SetControl(nil) 19753 _ = yes 19754 _ = no 19755 return true 19756 } 19757 // match: (UGE (FlagGT_ULT) yes no) 19758 // cond: 19759 // result: (First nil no yes) 19760 for { 19761 v := b.Control 19762 if v.Op != OpAMD64FlagGT_ULT { 19763 break 19764 } 19765 yes := b.Succs[0] 19766 no := b.Succs[1] 19767 b.Kind = BlockFirst 19768 b.SetControl(nil) 19769 b.swapSuccessors() 19770 _ = no 19771 _ = yes 19772 return true 19773 } 19774 // match: (UGE (FlagGT_UGT) yes no) 19775 // cond: 19776 // result: (First nil yes no) 19777 for { 19778 v := b.Control 19779 if v.Op != OpAMD64FlagGT_UGT { 19780 break 19781 } 19782 yes := b.Succs[0] 19783 no := b.Succs[1] 19784 b.Kind = BlockFirst 19785 b.SetControl(nil) 19786 _ = yes 19787 _ = no 19788 return true 19789 } 19790 case BlockAMD64UGT: 19791 // match: (UGT (InvertFlags cmp) yes no) 19792 // cond: 19793 // result: (ULT cmp yes no) 19794 for { 19795 v := b.Control 19796 if v.Op != OpAMD64InvertFlags { 19797 break 19798 } 19799 cmp := v.Args[0] 19800 yes := b.Succs[0] 19801 no := b.Succs[1] 19802 b.Kind = BlockAMD64ULT 19803 b.SetControl(cmp) 19804 _ = yes 19805 _ = no 19806 return true 19807 } 19808 // match: (UGT (FlagEQ) yes no) 19809 // cond: 19810 // result: (First nil no yes) 19811 for { 19812 v := b.Control 19813 if v.Op != OpAMD64FlagEQ { 19814 break 19815 } 19816 yes := b.Succs[0] 19817 no := b.Succs[1] 19818 b.Kind = BlockFirst 19819 b.SetControl(nil) 19820 b.swapSuccessors() 19821 _ = no 19822 _ = yes 19823 return true 19824 } 19825 // match: (UGT (FlagLT_ULT) yes no) 19826 // cond: 19827 // result: (First nil no yes) 19828 for { 19829 v := b.Control 19830 if v.Op != OpAMD64FlagLT_ULT { 19831 break 19832 } 19833 yes := b.Succs[0] 19834 no := b.Succs[1] 19835 b.Kind = BlockFirst 19836 b.SetControl(nil) 19837 b.swapSuccessors() 19838 _ = no 19839 _ = yes 19840 return true 19841 } 19842 // match: (UGT (FlagLT_UGT) yes no) 19843 // cond: 19844 // result: (First nil yes no) 19845 for { 19846 v := b.Control 19847 if v.Op != OpAMD64FlagLT_UGT { 19848 break 19849 } 19850 yes := b.Succs[0] 19851 no := b.Succs[1] 19852 b.Kind = BlockFirst 19853 b.SetControl(nil) 19854 _ = yes 19855 _ = no 19856 return true 19857 } 19858 // match: (UGT (FlagGT_ULT) yes no) 19859 // cond: 19860 // result: (First nil no yes) 19861 for { 19862 v := b.Control 19863 if v.Op != OpAMD64FlagGT_ULT { 19864 break 19865 } 19866 yes := b.Succs[0] 19867 no := b.Succs[1] 19868 b.Kind = BlockFirst 19869 b.SetControl(nil) 19870 b.swapSuccessors() 19871 _ = no 19872 _ = yes 19873 return true 19874 } 19875 // match: (UGT (FlagGT_UGT) yes no) 19876 // cond: 19877 // result: (First nil yes no) 19878 for { 19879 v := b.Control 19880 if v.Op != OpAMD64FlagGT_UGT { 19881 break 19882 } 19883 yes := b.Succs[0] 19884 no := b.Succs[1] 19885 b.Kind = BlockFirst 19886 b.SetControl(nil) 19887 _ = yes 19888 _ = no 19889 return true 19890 } 19891 case BlockAMD64ULE: 19892 // match: (ULE (InvertFlags cmp) yes no) 19893 // cond: 19894 // result: (UGE cmp yes no) 19895 for { 19896 v := b.Control 19897 if v.Op != OpAMD64InvertFlags { 19898 break 19899 } 19900 cmp := v.Args[0] 19901 yes := b.Succs[0] 19902 no := b.Succs[1] 19903 b.Kind = BlockAMD64UGE 19904 b.SetControl(cmp) 19905 _ = yes 19906 _ = no 19907 return true 19908 } 19909 // match: (ULE (FlagEQ) yes no) 19910 // cond: 19911 // result: (First nil yes no) 19912 for { 19913 v := b.Control 19914 if v.Op != OpAMD64FlagEQ { 19915 break 19916 } 19917 yes := b.Succs[0] 19918 no := b.Succs[1] 19919 b.Kind = BlockFirst 19920 b.SetControl(nil) 19921 _ = yes 19922 _ = no 19923 return true 19924 } 19925 // match: (ULE (FlagLT_ULT) yes no) 19926 // cond: 19927 // result: (First nil yes no) 19928 for { 19929 v := b.Control 19930 if v.Op != OpAMD64FlagLT_ULT { 19931 break 19932 } 19933 yes := b.Succs[0] 19934 no := b.Succs[1] 19935 b.Kind = BlockFirst 19936 b.SetControl(nil) 19937 _ = yes 19938 _ = no 19939 return true 19940 } 19941 // match: (ULE (FlagLT_UGT) yes no) 19942 // cond: 19943 // result: (First nil no yes) 19944 for { 19945 v := b.Control 19946 if v.Op != OpAMD64FlagLT_UGT { 19947 break 19948 } 19949 yes := b.Succs[0] 19950 no := b.Succs[1] 19951 b.Kind = BlockFirst 19952 b.SetControl(nil) 19953 b.swapSuccessors() 19954 _ = no 19955 _ = yes 19956 return true 19957 } 19958 // match: (ULE (FlagGT_ULT) yes no) 19959 // cond: 19960 // result: (First nil yes no) 19961 for { 19962 v := b.Control 19963 if v.Op != OpAMD64FlagGT_ULT { 19964 break 19965 } 19966 yes := b.Succs[0] 19967 no := b.Succs[1] 19968 b.Kind = BlockFirst 19969 b.SetControl(nil) 19970 _ = yes 19971 _ = no 19972 return true 19973 } 19974 // match: (ULE (FlagGT_UGT) yes no) 19975 // cond: 19976 // result: (First nil no yes) 19977 for { 19978 v := b.Control 19979 if v.Op != OpAMD64FlagGT_UGT { 19980 break 19981 } 19982 yes := b.Succs[0] 19983 no := b.Succs[1] 19984 b.Kind = BlockFirst 19985 b.SetControl(nil) 19986 b.swapSuccessors() 19987 _ = no 19988 _ = yes 19989 return true 19990 } 19991 case BlockAMD64ULT: 19992 // match: (ULT (InvertFlags cmp) yes no) 19993 // cond: 19994 // result: (UGT cmp yes no) 19995 for { 19996 v := b.Control 19997 if v.Op != OpAMD64InvertFlags { 19998 break 19999 } 20000 cmp := v.Args[0] 20001 yes := b.Succs[0] 20002 no := b.Succs[1] 20003 b.Kind = BlockAMD64UGT 20004 b.SetControl(cmp) 20005 _ = yes 20006 _ = no 20007 return true 20008 } 20009 // match: (ULT (FlagEQ) yes no) 20010 // cond: 20011 // result: (First nil no yes) 20012 for { 20013 v := b.Control 20014 if v.Op != OpAMD64FlagEQ { 20015 break 20016 } 20017 yes := b.Succs[0] 20018 no := b.Succs[1] 20019 b.Kind = BlockFirst 20020 b.SetControl(nil) 20021 b.swapSuccessors() 20022 _ = no 20023 _ = yes 20024 return true 20025 } 20026 // match: (ULT (FlagLT_ULT) yes no) 20027 // cond: 20028 // result: (First nil yes no) 20029 for { 20030 v := b.Control 20031 if v.Op != OpAMD64FlagLT_ULT { 20032 break 20033 } 20034 yes := b.Succs[0] 20035 no := b.Succs[1] 20036 b.Kind = BlockFirst 20037 b.SetControl(nil) 20038 _ = yes 20039 _ = no 20040 return true 20041 } 20042 // match: (ULT (FlagLT_UGT) yes no) 20043 // cond: 20044 // result: (First nil no yes) 20045 for { 20046 v := b.Control 20047 if v.Op != OpAMD64FlagLT_UGT { 20048 break 20049 } 20050 yes := b.Succs[0] 20051 no := b.Succs[1] 20052 b.Kind = BlockFirst 20053 b.SetControl(nil) 20054 b.swapSuccessors() 20055 _ = no 20056 _ = yes 20057 return true 20058 } 20059 // match: (ULT (FlagGT_ULT) yes no) 20060 // cond: 20061 // result: (First nil yes no) 20062 for { 20063 v := b.Control 20064 if v.Op != OpAMD64FlagGT_ULT { 20065 break 20066 } 20067 yes := b.Succs[0] 20068 no := b.Succs[1] 20069 b.Kind = BlockFirst 20070 b.SetControl(nil) 20071 _ = yes 20072 _ = no 20073 return true 20074 } 20075 // match: (ULT (FlagGT_UGT) yes no) 20076 // cond: 20077 // result: (First nil no yes) 20078 for { 20079 v := b.Control 20080 if v.Op != OpAMD64FlagGT_UGT { 20081 break 20082 } 20083 yes := b.Succs[0] 20084 no := b.Succs[1] 20085 b.Kind = BlockFirst 20086 b.SetControl(nil) 20087 b.swapSuccessors() 20088 _ = no 20089 _ = yes 20090 return true 20091 } 20092 } 20093 return false 20094 }