github.com/euank/go@v0.0.0-20160829210321-495514729181/src/cmd/compile/internal/ssa/rewriteAMD64.go (about) 1 // autogenerated from gen/AMD64.rules: do not edit! 2 // generated with: cd gen; go run *.go 3 4 package ssa 5 6 import "math" 7 8 var _ = math.MinInt8 // in case not otherwise used 9 func rewriteValueAMD64(v *Value, config *Config) bool { 10 switch v.Op { 11 case OpAMD64ADDL: 12 return rewriteValueAMD64_OpAMD64ADDL(v, config) 13 case OpAMD64ADDLconst: 14 return rewriteValueAMD64_OpAMD64ADDLconst(v, config) 15 case OpAMD64ADDQ: 16 return rewriteValueAMD64_OpAMD64ADDQ(v, config) 17 case OpAMD64ADDQconst: 18 return rewriteValueAMD64_OpAMD64ADDQconst(v, config) 19 case OpAMD64ANDL: 20 return rewriteValueAMD64_OpAMD64ANDL(v, config) 21 case OpAMD64ANDLconst: 22 return rewriteValueAMD64_OpAMD64ANDLconst(v, config) 23 case OpAMD64ANDQ: 24 return rewriteValueAMD64_OpAMD64ANDQ(v, config) 25 case OpAMD64ANDQconst: 26 return rewriteValueAMD64_OpAMD64ANDQconst(v, config) 27 case OpAMD64CMPB: 28 return rewriteValueAMD64_OpAMD64CMPB(v, config) 29 case OpAMD64CMPBconst: 30 return rewriteValueAMD64_OpAMD64CMPBconst(v, config) 31 case OpAMD64CMPL: 32 return rewriteValueAMD64_OpAMD64CMPL(v, config) 33 case OpAMD64CMPLconst: 34 return rewriteValueAMD64_OpAMD64CMPLconst(v, config) 35 case OpAMD64CMPQ: 36 return rewriteValueAMD64_OpAMD64CMPQ(v, config) 37 case OpAMD64CMPQconst: 38 return rewriteValueAMD64_OpAMD64CMPQconst(v, config) 39 case OpAMD64CMPW: 40 return rewriteValueAMD64_OpAMD64CMPW(v, config) 41 case OpAMD64CMPWconst: 42 return rewriteValueAMD64_OpAMD64CMPWconst(v, config) 43 case OpAMD64CMPXCHGLlock: 44 return rewriteValueAMD64_OpAMD64CMPXCHGLlock(v, config) 45 case OpAMD64CMPXCHGQlock: 46 return rewriteValueAMD64_OpAMD64CMPXCHGQlock(v, config) 47 case OpAMD64LEAL: 48 return rewriteValueAMD64_OpAMD64LEAL(v, config) 49 case OpAMD64LEAQ: 50 return rewriteValueAMD64_OpAMD64LEAQ(v, config) 51 case OpAMD64LEAQ1: 52 return rewriteValueAMD64_OpAMD64LEAQ1(v, config) 53 case OpAMD64LEAQ2: 54 return rewriteValueAMD64_OpAMD64LEAQ2(v, config) 55 case OpAMD64LEAQ4: 56 return rewriteValueAMD64_OpAMD64LEAQ4(v, config) 57 case OpAMD64LEAQ8: 58 return rewriteValueAMD64_OpAMD64LEAQ8(v, config) 59 case OpAMD64MOVBQSX: 60 return rewriteValueAMD64_OpAMD64MOVBQSX(v, config) 61 case OpAMD64MOVBQSXload: 62 return rewriteValueAMD64_OpAMD64MOVBQSXload(v, config) 63 case OpAMD64MOVBQZX: 64 return rewriteValueAMD64_OpAMD64MOVBQZX(v, config) 65 case OpAMD64MOVBload: 66 return rewriteValueAMD64_OpAMD64MOVBload(v, config) 67 case OpAMD64MOVBloadidx1: 68 return rewriteValueAMD64_OpAMD64MOVBloadidx1(v, config) 69 case OpAMD64MOVBstore: 70 return rewriteValueAMD64_OpAMD64MOVBstore(v, config) 71 case OpAMD64MOVBstoreconst: 72 return rewriteValueAMD64_OpAMD64MOVBstoreconst(v, config) 73 case OpAMD64MOVBstoreconstidx1: 74 return rewriteValueAMD64_OpAMD64MOVBstoreconstidx1(v, config) 75 case OpAMD64MOVBstoreidx1: 76 return rewriteValueAMD64_OpAMD64MOVBstoreidx1(v, config) 77 case OpAMD64MOVLQSX: 78 return rewriteValueAMD64_OpAMD64MOVLQSX(v, config) 79 case OpAMD64MOVLQSXload: 80 return rewriteValueAMD64_OpAMD64MOVLQSXload(v, config) 81 case OpAMD64MOVLQZX: 82 return rewriteValueAMD64_OpAMD64MOVLQZX(v, config) 83 case OpAMD64MOVLatomicload: 84 return rewriteValueAMD64_OpAMD64MOVLatomicload(v, config) 85 case OpAMD64MOVLload: 86 return rewriteValueAMD64_OpAMD64MOVLload(v, config) 87 case OpAMD64MOVLloadidx1: 88 return rewriteValueAMD64_OpAMD64MOVLloadidx1(v, config) 89 case OpAMD64MOVLloadidx4: 90 return rewriteValueAMD64_OpAMD64MOVLloadidx4(v, config) 91 case OpAMD64MOVLstore: 92 return rewriteValueAMD64_OpAMD64MOVLstore(v, config) 93 case OpAMD64MOVLstoreconst: 94 return rewriteValueAMD64_OpAMD64MOVLstoreconst(v, config) 95 case OpAMD64MOVLstoreconstidx1: 96 return rewriteValueAMD64_OpAMD64MOVLstoreconstidx1(v, config) 97 case OpAMD64MOVLstoreconstidx4: 98 return rewriteValueAMD64_OpAMD64MOVLstoreconstidx4(v, config) 99 case OpAMD64MOVLstoreidx1: 100 return rewriteValueAMD64_OpAMD64MOVLstoreidx1(v, config) 101 case OpAMD64MOVLstoreidx4: 102 return rewriteValueAMD64_OpAMD64MOVLstoreidx4(v, config) 103 case OpAMD64MOVOload: 104 return rewriteValueAMD64_OpAMD64MOVOload(v, config) 105 case OpAMD64MOVOstore: 106 return rewriteValueAMD64_OpAMD64MOVOstore(v, config) 107 case OpAMD64MOVQatomicload: 108 return rewriteValueAMD64_OpAMD64MOVQatomicload(v, config) 109 case OpAMD64MOVQload: 110 return rewriteValueAMD64_OpAMD64MOVQload(v, config) 111 case OpAMD64MOVQloadidx1: 112 return rewriteValueAMD64_OpAMD64MOVQloadidx1(v, config) 113 case OpAMD64MOVQloadidx8: 114 return rewriteValueAMD64_OpAMD64MOVQloadidx8(v, config) 115 case OpAMD64MOVQstore: 116 return rewriteValueAMD64_OpAMD64MOVQstore(v, config) 117 case OpAMD64MOVQstoreconst: 118 return rewriteValueAMD64_OpAMD64MOVQstoreconst(v, config) 119 case OpAMD64MOVQstoreconstidx1: 120 return rewriteValueAMD64_OpAMD64MOVQstoreconstidx1(v, config) 121 case OpAMD64MOVQstoreconstidx8: 122 return rewriteValueAMD64_OpAMD64MOVQstoreconstidx8(v, config) 123 case OpAMD64MOVQstoreidx1: 124 return rewriteValueAMD64_OpAMD64MOVQstoreidx1(v, config) 125 case OpAMD64MOVQstoreidx8: 126 return rewriteValueAMD64_OpAMD64MOVQstoreidx8(v, config) 127 case OpAMD64MOVSDload: 128 return rewriteValueAMD64_OpAMD64MOVSDload(v, config) 129 case OpAMD64MOVSDloadidx1: 130 return rewriteValueAMD64_OpAMD64MOVSDloadidx1(v, config) 131 case OpAMD64MOVSDloadidx8: 132 return rewriteValueAMD64_OpAMD64MOVSDloadidx8(v, config) 133 case OpAMD64MOVSDstore: 134 return rewriteValueAMD64_OpAMD64MOVSDstore(v, config) 135 case OpAMD64MOVSDstoreidx1: 136 return rewriteValueAMD64_OpAMD64MOVSDstoreidx1(v, config) 137 case OpAMD64MOVSDstoreidx8: 138 return rewriteValueAMD64_OpAMD64MOVSDstoreidx8(v, config) 139 case OpAMD64MOVSSload: 140 return rewriteValueAMD64_OpAMD64MOVSSload(v, config) 141 case OpAMD64MOVSSloadidx1: 142 return rewriteValueAMD64_OpAMD64MOVSSloadidx1(v, config) 143 case OpAMD64MOVSSloadidx4: 144 return rewriteValueAMD64_OpAMD64MOVSSloadidx4(v, config) 145 case OpAMD64MOVSSstore: 146 return rewriteValueAMD64_OpAMD64MOVSSstore(v, config) 147 case OpAMD64MOVSSstoreidx1: 148 return rewriteValueAMD64_OpAMD64MOVSSstoreidx1(v, config) 149 case OpAMD64MOVSSstoreidx4: 150 return rewriteValueAMD64_OpAMD64MOVSSstoreidx4(v, config) 151 case OpAMD64MOVWQSX: 152 return rewriteValueAMD64_OpAMD64MOVWQSX(v, config) 153 case OpAMD64MOVWQSXload: 154 return rewriteValueAMD64_OpAMD64MOVWQSXload(v, config) 155 case OpAMD64MOVWQZX: 156 return rewriteValueAMD64_OpAMD64MOVWQZX(v, config) 157 case OpAMD64MOVWload: 158 return rewriteValueAMD64_OpAMD64MOVWload(v, config) 159 case OpAMD64MOVWloadidx1: 160 return rewriteValueAMD64_OpAMD64MOVWloadidx1(v, config) 161 case OpAMD64MOVWloadidx2: 162 return rewriteValueAMD64_OpAMD64MOVWloadidx2(v, config) 163 case OpAMD64MOVWstore: 164 return rewriteValueAMD64_OpAMD64MOVWstore(v, config) 165 case OpAMD64MOVWstoreconst: 166 return rewriteValueAMD64_OpAMD64MOVWstoreconst(v, config) 167 case OpAMD64MOVWstoreconstidx1: 168 return rewriteValueAMD64_OpAMD64MOVWstoreconstidx1(v, config) 169 case OpAMD64MOVWstoreconstidx2: 170 return rewriteValueAMD64_OpAMD64MOVWstoreconstidx2(v, config) 171 case OpAMD64MOVWstoreidx1: 172 return rewriteValueAMD64_OpAMD64MOVWstoreidx1(v, config) 173 case OpAMD64MOVWstoreidx2: 174 return rewriteValueAMD64_OpAMD64MOVWstoreidx2(v, config) 175 case OpAMD64MULL: 176 return rewriteValueAMD64_OpAMD64MULL(v, config) 177 case OpAMD64MULLconst: 178 return rewriteValueAMD64_OpAMD64MULLconst(v, config) 179 case OpAMD64MULQ: 180 return rewriteValueAMD64_OpAMD64MULQ(v, config) 181 case OpAMD64MULQconst: 182 return rewriteValueAMD64_OpAMD64MULQconst(v, config) 183 case OpAMD64NEGL: 184 return rewriteValueAMD64_OpAMD64NEGL(v, config) 185 case OpAMD64NEGQ: 186 return rewriteValueAMD64_OpAMD64NEGQ(v, config) 187 case OpAMD64NOTL: 188 return rewriteValueAMD64_OpAMD64NOTL(v, config) 189 case OpAMD64NOTQ: 190 return rewriteValueAMD64_OpAMD64NOTQ(v, config) 191 case OpAMD64ORL: 192 return rewriteValueAMD64_OpAMD64ORL(v, config) 193 case OpAMD64ORLconst: 194 return rewriteValueAMD64_OpAMD64ORLconst(v, config) 195 case OpAMD64ORQ: 196 return rewriteValueAMD64_OpAMD64ORQ(v, config) 197 case OpAMD64ORQconst: 198 return rewriteValueAMD64_OpAMD64ORQconst(v, config) 199 case OpAMD64ROLBconst: 200 return rewriteValueAMD64_OpAMD64ROLBconst(v, config) 201 case OpAMD64ROLLconst: 202 return rewriteValueAMD64_OpAMD64ROLLconst(v, config) 203 case OpAMD64ROLQconst: 204 return rewriteValueAMD64_OpAMD64ROLQconst(v, config) 205 case OpAMD64ROLWconst: 206 return rewriteValueAMD64_OpAMD64ROLWconst(v, config) 207 case OpAMD64SARB: 208 return rewriteValueAMD64_OpAMD64SARB(v, config) 209 case OpAMD64SARBconst: 210 return rewriteValueAMD64_OpAMD64SARBconst(v, config) 211 case OpAMD64SARL: 212 return rewriteValueAMD64_OpAMD64SARL(v, config) 213 case OpAMD64SARLconst: 214 return rewriteValueAMD64_OpAMD64SARLconst(v, config) 215 case OpAMD64SARQ: 216 return rewriteValueAMD64_OpAMD64SARQ(v, config) 217 case OpAMD64SARQconst: 218 return rewriteValueAMD64_OpAMD64SARQconst(v, config) 219 case OpAMD64SARW: 220 return rewriteValueAMD64_OpAMD64SARW(v, config) 221 case OpAMD64SARWconst: 222 return rewriteValueAMD64_OpAMD64SARWconst(v, config) 223 case OpAMD64SBBLcarrymask: 224 return rewriteValueAMD64_OpAMD64SBBLcarrymask(v, config) 225 case OpAMD64SBBQcarrymask: 226 return rewriteValueAMD64_OpAMD64SBBQcarrymask(v, config) 227 case OpAMD64SETA: 228 return rewriteValueAMD64_OpAMD64SETA(v, config) 229 case OpAMD64SETAE: 230 return rewriteValueAMD64_OpAMD64SETAE(v, config) 231 case OpAMD64SETB: 232 return rewriteValueAMD64_OpAMD64SETB(v, config) 233 case OpAMD64SETBE: 234 return rewriteValueAMD64_OpAMD64SETBE(v, config) 235 case OpAMD64SETEQ: 236 return rewriteValueAMD64_OpAMD64SETEQ(v, config) 237 case OpAMD64SETG: 238 return rewriteValueAMD64_OpAMD64SETG(v, config) 239 case OpAMD64SETGE: 240 return rewriteValueAMD64_OpAMD64SETGE(v, config) 241 case OpAMD64SETL: 242 return rewriteValueAMD64_OpAMD64SETL(v, config) 243 case OpAMD64SETLE: 244 return rewriteValueAMD64_OpAMD64SETLE(v, config) 245 case OpAMD64SETNE: 246 return rewriteValueAMD64_OpAMD64SETNE(v, config) 247 case OpAMD64SHLL: 248 return rewriteValueAMD64_OpAMD64SHLL(v, config) 249 case OpAMD64SHLQ: 250 return rewriteValueAMD64_OpAMD64SHLQ(v, config) 251 case OpAMD64SHRB: 252 return rewriteValueAMD64_OpAMD64SHRB(v, config) 253 case OpAMD64SHRL: 254 return rewriteValueAMD64_OpAMD64SHRL(v, config) 255 case OpAMD64SHRQ: 256 return rewriteValueAMD64_OpAMD64SHRQ(v, config) 257 case OpAMD64SHRW: 258 return rewriteValueAMD64_OpAMD64SHRW(v, config) 259 case OpAMD64SUBL: 260 return rewriteValueAMD64_OpAMD64SUBL(v, config) 261 case OpAMD64SUBLconst: 262 return rewriteValueAMD64_OpAMD64SUBLconst(v, config) 263 case OpAMD64SUBQ: 264 return rewriteValueAMD64_OpAMD64SUBQ(v, config) 265 case OpAMD64SUBQconst: 266 return rewriteValueAMD64_OpAMD64SUBQconst(v, config) 267 case OpAMD64XADDLlock: 268 return rewriteValueAMD64_OpAMD64XADDLlock(v, config) 269 case OpAMD64XADDQlock: 270 return rewriteValueAMD64_OpAMD64XADDQlock(v, config) 271 case OpAMD64XCHGL: 272 return rewriteValueAMD64_OpAMD64XCHGL(v, config) 273 case OpAMD64XCHGQ: 274 return rewriteValueAMD64_OpAMD64XCHGQ(v, config) 275 case OpAMD64XORL: 276 return rewriteValueAMD64_OpAMD64XORL(v, config) 277 case OpAMD64XORLconst: 278 return rewriteValueAMD64_OpAMD64XORLconst(v, config) 279 case OpAMD64XORQ: 280 return rewriteValueAMD64_OpAMD64XORQ(v, config) 281 case OpAMD64XORQconst: 282 return rewriteValueAMD64_OpAMD64XORQconst(v, config) 283 case OpAdd16: 284 return rewriteValueAMD64_OpAdd16(v, config) 285 case OpAdd32: 286 return rewriteValueAMD64_OpAdd32(v, config) 287 case OpAdd32F: 288 return rewriteValueAMD64_OpAdd32F(v, config) 289 case OpAdd64: 290 return rewriteValueAMD64_OpAdd64(v, config) 291 case OpAdd64F: 292 return rewriteValueAMD64_OpAdd64F(v, config) 293 case OpAdd8: 294 return rewriteValueAMD64_OpAdd8(v, config) 295 case OpAddPtr: 296 return rewriteValueAMD64_OpAddPtr(v, config) 297 case OpAddr: 298 return rewriteValueAMD64_OpAddr(v, config) 299 case OpAnd16: 300 return rewriteValueAMD64_OpAnd16(v, config) 301 case OpAnd32: 302 return rewriteValueAMD64_OpAnd32(v, config) 303 case OpAnd64: 304 return rewriteValueAMD64_OpAnd64(v, config) 305 case OpAnd8: 306 return rewriteValueAMD64_OpAnd8(v, config) 307 case OpAndB: 308 return rewriteValueAMD64_OpAndB(v, config) 309 case OpAtomicAdd32: 310 return rewriteValueAMD64_OpAtomicAdd32(v, config) 311 case OpAtomicAdd64: 312 return rewriteValueAMD64_OpAtomicAdd64(v, config) 313 case OpAtomicAnd8: 314 return rewriteValueAMD64_OpAtomicAnd8(v, config) 315 case OpAtomicCompareAndSwap32: 316 return rewriteValueAMD64_OpAtomicCompareAndSwap32(v, config) 317 case OpAtomicCompareAndSwap64: 318 return rewriteValueAMD64_OpAtomicCompareAndSwap64(v, config) 319 case OpAtomicExchange32: 320 return rewriteValueAMD64_OpAtomicExchange32(v, config) 321 case OpAtomicExchange64: 322 return rewriteValueAMD64_OpAtomicExchange64(v, config) 323 case OpAtomicLoad32: 324 return rewriteValueAMD64_OpAtomicLoad32(v, config) 325 case OpAtomicLoad64: 326 return rewriteValueAMD64_OpAtomicLoad64(v, config) 327 case OpAtomicLoadPtr: 328 return rewriteValueAMD64_OpAtomicLoadPtr(v, config) 329 case OpAtomicOr8: 330 return rewriteValueAMD64_OpAtomicOr8(v, config) 331 case OpAtomicStore32: 332 return rewriteValueAMD64_OpAtomicStore32(v, config) 333 case OpAtomicStore64: 334 return rewriteValueAMD64_OpAtomicStore64(v, config) 335 case OpAtomicStorePtrNoWB: 336 return rewriteValueAMD64_OpAtomicStorePtrNoWB(v, config) 337 case OpAvg64u: 338 return rewriteValueAMD64_OpAvg64u(v, config) 339 case OpBswap32: 340 return rewriteValueAMD64_OpBswap32(v, config) 341 case OpBswap64: 342 return rewriteValueAMD64_OpBswap64(v, config) 343 case OpClosureCall: 344 return rewriteValueAMD64_OpClosureCall(v, config) 345 case OpCom16: 346 return rewriteValueAMD64_OpCom16(v, config) 347 case OpCom32: 348 return rewriteValueAMD64_OpCom32(v, config) 349 case OpCom64: 350 return rewriteValueAMD64_OpCom64(v, config) 351 case OpCom8: 352 return rewriteValueAMD64_OpCom8(v, config) 353 case OpConst16: 354 return rewriteValueAMD64_OpConst16(v, config) 355 case OpConst32: 356 return rewriteValueAMD64_OpConst32(v, config) 357 case OpConst32F: 358 return rewriteValueAMD64_OpConst32F(v, config) 359 case OpConst64: 360 return rewriteValueAMD64_OpConst64(v, config) 361 case OpConst64F: 362 return rewriteValueAMD64_OpConst64F(v, config) 363 case OpConst8: 364 return rewriteValueAMD64_OpConst8(v, config) 365 case OpConstBool: 366 return rewriteValueAMD64_OpConstBool(v, config) 367 case OpConstNil: 368 return rewriteValueAMD64_OpConstNil(v, config) 369 case OpConvert: 370 return rewriteValueAMD64_OpConvert(v, config) 371 case OpCtz32: 372 return rewriteValueAMD64_OpCtz32(v, config) 373 case OpCtz64: 374 return rewriteValueAMD64_OpCtz64(v, config) 375 case OpCvt32Fto32: 376 return rewriteValueAMD64_OpCvt32Fto32(v, config) 377 case OpCvt32Fto64: 378 return rewriteValueAMD64_OpCvt32Fto64(v, config) 379 case OpCvt32Fto64F: 380 return rewriteValueAMD64_OpCvt32Fto64F(v, config) 381 case OpCvt32to32F: 382 return rewriteValueAMD64_OpCvt32to32F(v, config) 383 case OpCvt32to64F: 384 return rewriteValueAMD64_OpCvt32to64F(v, config) 385 case OpCvt64Fto32: 386 return rewriteValueAMD64_OpCvt64Fto32(v, config) 387 case OpCvt64Fto32F: 388 return rewriteValueAMD64_OpCvt64Fto32F(v, config) 389 case OpCvt64Fto64: 390 return rewriteValueAMD64_OpCvt64Fto64(v, config) 391 case OpCvt64to32F: 392 return rewriteValueAMD64_OpCvt64to32F(v, config) 393 case OpCvt64to64F: 394 return rewriteValueAMD64_OpCvt64to64F(v, config) 395 case OpDeferCall: 396 return rewriteValueAMD64_OpDeferCall(v, config) 397 case OpDiv16: 398 return rewriteValueAMD64_OpDiv16(v, config) 399 case OpDiv16u: 400 return rewriteValueAMD64_OpDiv16u(v, config) 401 case OpDiv32: 402 return rewriteValueAMD64_OpDiv32(v, config) 403 case OpDiv32F: 404 return rewriteValueAMD64_OpDiv32F(v, config) 405 case OpDiv32u: 406 return rewriteValueAMD64_OpDiv32u(v, config) 407 case OpDiv64: 408 return rewriteValueAMD64_OpDiv64(v, config) 409 case OpDiv64F: 410 return rewriteValueAMD64_OpDiv64F(v, config) 411 case OpDiv64u: 412 return rewriteValueAMD64_OpDiv64u(v, config) 413 case OpDiv8: 414 return rewriteValueAMD64_OpDiv8(v, config) 415 case OpDiv8u: 416 return rewriteValueAMD64_OpDiv8u(v, config) 417 case OpEq16: 418 return rewriteValueAMD64_OpEq16(v, config) 419 case OpEq32: 420 return rewriteValueAMD64_OpEq32(v, config) 421 case OpEq32F: 422 return rewriteValueAMD64_OpEq32F(v, config) 423 case OpEq64: 424 return rewriteValueAMD64_OpEq64(v, config) 425 case OpEq64F: 426 return rewriteValueAMD64_OpEq64F(v, config) 427 case OpEq8: 428 return rewriteValueAMD64_OpEq8(v, config) 429 case OpEqB: 430 return rewriteValueAMD64_OpEqB(v, config) 431 case OpEqPtr: 432 return rewriteValueAMD64_OpEqPtr(v, config) 433 case OpGeq16: 434 return rewriteValueAMD64_OpGeq16(v, config) 435 case OpGeq16U: 436 return rewriteValueAMD64_OpGeq16U(v, config) 437 case OpGeq32: 438 return rewriteValueAMD64_OpGeq32(v, config) 439 case OpGeq32F: 440 return rewriteValueAMD64_OpGeq32F(v, config) 441 case OpGeq32U: 442 return rewriteValueAMD64_OpGeq32U(v, config) 443 case OpGeq64: 444 return rewriteValueAMD64_OpGeq64(v, config) 445 case OpGeq64F: 446 return rewriteValueAMD64_OpGeq64F(v, config) 447 case OpGeq64U: 448 return rewriteValueAMD64_OpGeq64U(v, config) 449 case OpGeq8: 450 return rewriteValueAMD64_OpGeq8(v, config) 451 case OpGeq8U: 452 return rewriteValueAMD64_OpGeq8U(v, config) 453 case OpGetClosurePtr: 454 return rewriteValueAMD64_OpGetClosurePtr(v, config) 455 case OpGetG: 456 return rewriteValueAMD64_OpGetG(v, config) 457 case OpGoCall: 458 return rewriteValueAMD64_OpGoCall(v, config) 459 case OpGreater16: 460 return rewriteValueAMD64_OpGreater16(v, config) 461 case OpGreater16U: 462 return rewriteValueAMD64_OpGreater16U(v, config) 463 case OpGreater32: 464 return rewriteValueAMD64_OpGreater32(v, config) 465 case OpGreater32F: 466 return rewriteValueAMD64_OpGreater32F(v, config) 467 case OpGreater32U: 468 return rewriteValueAMD64_OpGreater32U(v, config) 469 case OpGreater64: 470 return rewriteValueAMD64_OpGreater64(v, config) 471 case OpGreater64F: 472 return rewriteValueAMD64_OpGreater64F(v, config) 473 case OpGreater64U: 474 return rewriteValueAMD64_OpGreater64U(v, config) 475 case OpGreater8: 476 return rewriteValueAMD64_OpGreater8(v, config) 477 case OpGreater8U: 478 return rewriteValueAMD64_OpGreater8U(v, config) 479 case OpHmul16: 480 return rewriteValueAMD64_OpHmul16(v, config) 481 case OpHmul16u: 482 return rewriteValueAMD64_OpHmul16u(v, config) 483 case OpHmul32: 484 return rewriteValueAMD64_OpHmul32(v, config) 485 case OpHmul32u: 486 return rewriteValueAMD64_OpHmul32u(v, config) 487 case OpHmul64: 488 return rewriteValueAMD64_OpHmul64(v, config) 489 case OpHmul64u: 490 return rewriteValueAMD64_OpHmul64u(v, config) 491 case OpHmul8: 492 return rewriteValueAMD64_OpHmul8(v, config) 493 case OpHmul8u: 494 return rewriteValueAMD64_OpHmul8u(v, config) 495 case OpInt64Hi: 496 return rewriteValueAMD64_OpInt64Hi(v, config) 497 case OpInterCall: 498 return rewriteValueAMD64_OpInterCall(v, config) 499 case OpIsInBounds: 500 return rewriteValueAMD64_OpIsInBounds(v, config) 501 case OpIsNonNil: 502 return rewriteValueAMD64_OpIsNonNil(v, config) 503 case OpIsSliceInBounds: 504 return rewriteValueAMD64_OpIsSliceInBounds(v, config) 505 case OpLeq16: 506 return rewriteValueAMD64_OpLeq16(v, config) 507 case OpLeq16U: 508 return rewriteValueAMD64_OpLeq16U(v, config) 509 case OpLeq32: 510 return rewriteValueAMD64_OpLeq32(v, config) 511 case OpLeq32F: 512 return rewriteValueAMD64_OpLeq32F(v, config) 513 case OpLeq32U: 514 return rewriteValueAMD64_OpLeq32U(v, config) 515 case OpLeq64: 516 return rewriteValueAMD64_OpLeq64(v, config) 517 case OpLeq64F: 518 return rewriteValueAMD64_OpLeq64F(v, config) 519 case OpLeq64U: 520 return rewriteValueAMD64_OpLeq64U(v, config) 521 case OpLeq8: 522 return rewriteValueAMD64_OpLeq8(v, config) 523 case OpLeq8U: 524 return rewriteValueAMD64_OpLeq8U(v, config) 525 case OpLess16: 526 return rewriteValueAMD64_OpLess16(v, config) 527 case OpLess16U: 528 return rewriteValueAMD64_OpLess16U(v, config) 529 case OpLess32: 530 return rewriteValueAMD64_OpLess32(v, config) 531 case OpLess32F: 532 return rewriteValueAMD64_OpLess32F(v, config) 533 case OpLess32U: 534 return rewriteValueAMD64_OpLess32U(v, config) 535 case OpLess64: 536 return rewriteValueAMD64_OpLess64(v, config) 537 case OpLess64F: 538 return rewriteValueAMD64_OpLess64F(v, config) 539 case OpLess64U: 540 return rewriteValueAMD64_OpLess64U(v, config) 541 case OpLess8: 542 return rewriteValueAMD64_OpLess8(v, config) 543 case OpLess8U: 544 return rewriteValueAMD64_OpLess8U(v, config) 545 case OpLoad: 546 return rewriteValueAMD64_OpLoad(v, config) 547 case OpLrot16: 548 return rewriteValueAMD64_OpLrot16(v, config) 549 case OpLrot32: 550 return rewriteValueAMD64_OpLrot32(v, config) 551 case OpLrot64: 552 return rewriteValueAMD64_OpLrot64(v, config) 553 case OpLrot8: 554 return rewriteValueAMD64_OpLrot8(v, config) 555 case OpLsh16x16: 556 return rewriteValueAMD64_OpLsh16x16(v, config) 557 case OpLsh16x32: 558 return rewriteValueAMD64_OpLsh16x32(v, config) 559 case OpLsh16x64: 560 return rewriteValueAMD64_OpLsh16x64(v, config) 561 case OpLsh16x8: 562 return rewriteValueAMD64_OpLsh16x8(v, config) 563 case OpLsh32x16: 564 return rewriteValueAMD64_OpLsh32x16(v, config) 565 case OpLsh32x32: 566 return rewriteValueAMD64_OpLsh32x32(v, config) 567 case OpLsh32x64: 568 return rewriteValueAMD64_OpLsh32x64(v, config) 569 case OpLsh32x8: 570 return rewriteValueAMD64_OpLsh32x8(v, config) 571 case OpLsh64x16: 572 return rewriteValueAMD64_OpLsh64x16(v, config) 573 case OpLsh64x32: 574 return rewriteValueAMD64_OpLsh64x32(v, config) 575 case OpLsh64x64: 576 return rewriteValueAMD64_OpLsh64x64(v, config) 577 case OpLsh64x8: 578 return rewriteValueAMD64_OpLsh64x8(v, config) 579 case OpLsh8x16: 580 return rewriteValueAMD64_OpLsh8x16(v, config) 581 case OpLsh8x32: 582 return rewriteValueAMD64_OpLsh8x32(v, config) 583 case OpLsh8x64: 584 return rewriteValueAMD64_OpLsh8x64(v, config) 585 case OpLsh8x8: 586 return rewriteValueAMD64_OpLsh8x8(v, config) 587 case OpMod16: 588 return rewriteValueAMD64_OpMod16(v, config) 589 case OpMod16u: 590 return rewriteValueAMD64_OpMod16u(v, config) 591 case OpMod32: 592 return rewriteValueAMD64_OpMod32(v, config) 593 case OpMod32u: 594 return rewriteValueAMD64_OpMod32u(v, config) 595 case OpMod64: 596 return rewriteValueAMD64_OpMod64(v, config) 597 case OpMod64u: 598 return rewriteValueAMD64_OpMod64u(v, config) 599 case OpMod8: 600 return rewriteValueAMD64_OpMod8(v, config) 601 case OpMod8u: 602 return rewriteValueAMD64_OpMod8u(v, config) 603 case OpMove: 604 return rewriteValueAMD64_OpMove(v, config) 605 case OpMul16: 606 return rewriteValueAMD64_OpMul16(v, config) 607 case OpMul32: 608 return rewriteValueAMD64_OpMul32(v, config) 609 case OpMul32F: 610 return rewriteValueAMD64_OpMul32F(v, config) 611 case OpMul64: 612 return rewriteValueAMD64_OpMul64(v, config) 613 case OpMul64F: 614 return rewriteValueAMD64_OpMul64F(v, config) 615 case OpMul8: 616 return rewriteValueAMD64_OpMul8(v, config) 617 case OpNeg16: 618 return rewriteValueAMD64_OpNeg16(v, config) 619 case OpNeg32: 620 return rewriteValueAMD64_OpNeg32(v, config) 621 case OpNeg32F: 622 return rewriteValueAMD64_OpNeg32F(v, config) 623 case OpNeg64: 624 return rewriteValueAMD64_OpNeg64(v, config) 625 case OpNeg64F: 626 return rewriteValueAMD64_OpNeg64F(v, config) 627 case OpNeg8: 628 return rewriteValueAMD64_OpNeg8(v, config) 629 case OpNeq16: 630 return rewriteValueAMD64_OpNeq16(v, config) 631 case OpNeq32: 632 return rewriteValueAMD64_OpNeq32(v, config) 633 case OpNeq32F: 634 return rewriteValueAMD64_OpNeq32F(v, config) 635 case OpNeq64: 636 return rewriteValueAMD64_OpNeq64(v, config) 637 case OpNeq64F: 638 return rewriteValueAMD64_OpNeq64F(v, config) 639 case OpNeq8: 640 return rewriteValueAMD64_OpNeq8(v, config) 641 case OpNeqB: 642 return rewriteValueAMD64_OpNeqB(v, config) 643 case OpNeqPtr: 644 return rewriteValueAMD64_OpNeqPtr(v, config) 645 case OpNilCheck: 646 return rewriteValueAMD64_OpNilCheck(v, config) 647 case OpNot: 648 return rewriteValueAMD64_OpNot(v, config) 649 case OpOffPtr: 650 return rewriteValueAMD64_OpOffPtr(v, config) 651 case OpOr16: 652 return rewriteValueAMD64_OpOr16(v, config) 653 case OpOr32: 654 return rewriteValueAMD64_OpOr32(v, config) 655 case OpOr64: 656 return rewriteValueAMD64_OpOr64(v, config) 657 case OpOr8: 658 return rewriteValueAMD64_OpOr8(v, config) 659 case OpOrB: 660 return rewriteValueAMD64_OpOrB(v, config) 661 case OpRsh16Ux16: 662 return rewriteValueAMD64_OpRsh16Ux16(v, config) 663 case OpRsh16Ux32: 664 return rewriteValueAMD64_OpRsh16Ux32(v, config) 665 case OpRsh16Ux64: 666 return rewriteValueAMD64_OpRsh16Ux64(v, config) 667 case OpRsh16Ux8: 668 return rewriteValueAMD64_OpRsh16Ux8(v, config) 669 case OpRsh16x16: 670 return rewriteValueAMD64_OpRsh16x16(v, config) 671 case OpRsh16x32: 672 return rewriteValueAMD64_OpRsh16x32(v, config) 673 case OpRsh16x64: 674 return rewriteValueAMD64_OpRsh16x64(v, config) 675 case OpRsh16x8: 676 return rewriteValueAMD64_OpRsh16x8(v, config) 677 case OpRsh32Ux16: 678 return rewriteValueAMD64_OpRsh32Ux16(v, config) 679 case OpRsh32Ux32: 680 return rewriteValueAMD64_OpRsh32Ux32(v, config) 681 case OpRsh32Ux64: 682 return rewriteValueAMD64_OpRsh32Ux64(v, config) 683 case OpRsh32Ux8: 684 return rewriteValueAMD64_OpRsh32Ux8(v, config) 685 case OpRsh32x16: 686 return rewriteValueAMD64_OpRsh32x16(v, config) 687 case OpRsh32x32: 688 return rewriteValueAMD64_OpRsh32x32(v, config) 689 case OpRsh32x64: 690 return rewriteValueAMD64_OpRsh32x64(v, config) 691 case OpRsh32x8: 692 return rewriteValueAMD64_OpRsh32x8(v, config) 693 case OpRsh64Ux16: 694 return rewriteValueAMD64_OpRsh64Ux16(v, config) 695 case OpRsh64Ux32: 696 return rewriteValueAMD64_OpRsh64Ux32(v, config) 697 case OpRsh64Ux64: 698 return rewriteValueAMD64_OpRsh64Ux64(v, config) 699 case OpRsh64Ux8: 700 return rewriteValueAMD64_OpRsh64Ux8(v, config) 701 case OpRsh64x16: 702 return rewriteValueAMD64_OpRsh64x16(v, config) 703 case OpRsh64x32: 704 return rewriteValueAMD64_OpRsh64x32(v, config) 705 case OpRsh64x64: 706 return rewriteValueAMD64_OpRsh64x64(v, config) 707 case OpRsh64x8: 708 return rewriteValueAMD64_OpRsh64x8(v, config) 709 case OpRsh8Ux16: 710 return rewriteValueAMD64_OpRsh8Ux16(v, config) 711 case OpRsh8Ux32: 712 return rewriteValueAMD64_OpRsh8Ux32(v, config) 713 case OpRsh8Ux64: 714 return rewriteValueAMD64_OpRsh8Ux64(v, config) 715 case OpRsh8Ux8: 716 return rewriteValueAMD64_OpRsh8Ux8(v, config) 717 case OpRsh8x16: 718 return rewriteValueAMD64_OpRsh8x16(v, config) 719 case OpRsh8x32: 720 return rewriteValueAMD64_OpRsh8x32(v, config) 721 case OpRsh8x64: 722 return rewriteValueAMD64_OpRsh8x64(v, config) 723 case OpRsh8x8: 724 return rewriteValueAMD64_OpRsh8x8(v, config) 725 case OpSelect0: 726 return rewriteValueAMD64_OpSelect0(v, config) 727 case OpSelect1: 728 return rewriteValueAMD64_OpSelect1(v, config) 729 case OpSignExt16to32: 730 return rewriteValueAMD64_OpSignExt16to32(v, config) 731 case OpSignExt16to64: 732 return rewriteValueAMD64_OpSignExt16to64(v, config) 733 case OpSignExt32to64: 734 return rewriteValueAMD64_OpSignExt32to64(v, config) 735 case OpSignExt8to16: 736 return rewriteValueAMD64_OpSignExt8to16(v, config) 737 case OpSignExt8to32: 738 return rewriteValueAMD64_OpSignExt8to32(v, config) 739 case OpSignExt8to64: 740 return rewriteValueAMD64_OpSignExt8to64(v, config) 741 case OpSqrt: 742 return rewriteValueAMD64_OpSqrt(v, config) 743 case OpStaticCall: 744 return rewriteValueAMD64_OpStaticCall(v, config) 745 case OpStore: 746 return rewriteValueAMD64_OpStore(v, config) 747 case OpSub16: 748 return rewriteValueAMD64_OpSub16(v, config) 749 case OpSub32: 750 return rewriteValueAMD64_OpSub32(v, config) 751 case OpSub32F: 752 return rewriteValueAMD64_OpSub32F(v, config) 753 case OpSub64: 754 return rewriteValueAMD64_OpSub64(v, config) 755 case OpSub64F: 756 return rewriteValueAMD64_OpSub64F(v, config) 757 case OpSub8: 758 return rewriteValueAMD64_OpSub8(v, config) 759 case OpSubPtr: 760 return rewriteValueAMD64_OpSubPtr(v, config) 761 case OpTrunc16to8: 762 return rewriteValueAMD64_OpTrunc16to8(v, config) 763 case OpTrunc32to16: 764 return rewriteValueAMD64_OpTrunc32to16(v, config) 765 case OpTrunc32to8: 766 return rewriteValueAMD64_OpTrunc32to8(v, config) 767 case OpTrunc64to16: 768 return rewriteValueAMD64_OpTrunc64to16(v, config) 769 case OpTrunc64to32: 770 return rewriteValueAMD64_OpTrunc64to32(v, config) 771 case OpTrunc64to8: 772 return rewriteValueAMD64_OpTrunc64to8(v, config) 773 case OpXor16: 774 return rewriteValueAMD64_OpXor16(v, config) 775 case OpXor32: 776 return rewriteValueAMD64_OpXor32(v, config) 777 case OpXor64: 778 return rewriteValueAMD64_OpXor64(v, config) 779 case OpXor8: 780 return rewriteValueAMD64_OpXor8(v, config) 781 case OpZero: 782 return rewriteValueAMD64_OpZero(v, config) 783 case OpZeroExt16to32: 784 return rewriteValueAMD64_OpZeroExt16to32(v, config) 785 case OpZeroExt16to64: 786 return rewriteValueAMD64_OpZeroExt16to64(v, config) 787 case OpZeroExt32to64: 788 return rewriteValueAMD64_OpZeroExt32to64(v, config) 789 case OpZeroExt8to16: 790 return rewriteValueAMD64_OpZeroExt8to16(v, config) 791 case OpZeroExt8to32: 792 return rewriteValueAMD64_OpZeroExt8to32(v, config) 793 case OpZeroExt8to64: 794 return rewriteValueAMD64_OpZeroExt8to64(v, config) 795 } 796 return false 797 } 798 func rewriteValueAMD64_OpAMD64ADDL(v *Value, config *Config) bool { 799 b := v.Block 800 _ = b 801 // match: (ADDL x (MOVLconst [c])) 802 // cond: 803 // result: (ADDLconst [c] x) 804 for { 805 x := v.Args[0] 806 v_1 := v.Args[1] 807 if v_1.Op != OpAMD64MOVLconst { 808 break 809 } 810 c := v_1.AuxInt 811 v.reset(OpAMD64ADDLconst) 812 v.AuxInt = c 813 v.AddArg(x) 814 return true 815 } 816 // match: (ADDL (MOVLconst [c]) x) 817 // cond: 818 // result: (ADDLconst [c] x) 819 for { 820 v_0 := v.Args[0] 821 if v_0.Op != OpAMD64MOVLconst { 822 break 823 } 824 c := v_0.AuxInt 825 x := v.Args[1] 826 v.reset(OpAMD64ADDLconst) 827 v.AuxInt = c 828 v.AddArg(x) 829 return true 830 } 831 // match: (ADDL x (NEGL y)) 832 // cond: 833 // result: (SUBL x y) 834 for { 835 x := v.Args[0] 836 v_1 := v.Args[1] 837 if v_1.Op != OpAMD64NEGL { 838 break 839 } 840 y := v_1.Args[0] 841 v.reset(OpAMD64SUBL) 842 v.AddArg(x) 843 v.AddArg(y) 844 return true 845 } 846 return false 847 } 848 func rewriteValueAMD64_OpAMD64ADDLconst(v *Value, config *Config) bool { 849 b := v.Block 850 _ = b 851 // match: (ADDLconst [c] x) 852 // cond: int32(c)==0 853 // result: x 854 for { 855 c := v.AuxInt 856 x := v.Args[0] 857 if !(int32(c) == 0) { 858 break 859 } 860 v.reset(OpCopy) 861 v.Type = x.Type 862 v.AddArg(x) 863 return true 864 } 865 // match: (ADDLconst [c] (MOVLconst [d])) 866 // cond: 867 // result: (MOVLconst [int64(int32(c+d))]) 868 for { 869 c := v.AuxInt 870 v_0 := v.Args[0] 871 if v_0.Op != OpAMD64MOVLconst { 872 break 873 } 874 d := v_0.AuxInt 875 v.reset(OpAMD64MOVLconst) 876 v.AuxInt = int64(int32(c + d)) 877 return true 878 } 879 // match: (ADDLconst [c] (ADDLconst [d] x)) 880 // cond: 881 // result: (ADDLconst [int64(int32(c+d))] x) 882 for { 883 c := v.AuxInt 884 v_0 := v.Args[0] 885 if v_0.Op != OpAMD64ADDLconst { 886 break 887 } 888 d := v_0.AuxInt 889 x := v_0.Args[0] 890 v.reset(OpAMD64ADDLconst) 891 v.AuxInt = int64(int32(c + d)) 892 v.AddArg(x) 893 return true 894 } 895 // match: (ADDLconst [c] (LEAL [d] {s} x)) 896 // cond: is32Bit(c+d) 897 // result: (LEAL [c+d] {s} x) 898 for { 899 c := v.AuxInt 900 v_0 := v.Args[0] 901 if v_0.Op != OpAMD64LEAL { 902 break 903 } 904 d := v_0.AuxInt 905 s := v_0.Aux 906 x := v_0.Args[0] 907 if !(is32Bit(c + d)) { 908 break 909 } 910 v.reset(OpAMD64LEAL) 911 v.AuxInt = c + d 912 v.Aux = s 913 v.AddArg(x) 914 return true 915 } 916 return false 917 } 918 func rewriteValueAMD64_OpAMD64ADDQ(v *Value, config *Config) bool { 919 b := v.Block 920 _ = b 921 // match: (ADDQ x (MOVQconst [c])) 922 // cond: is32Bit(c) 923 // result: (ADDQconst [c] x) 924 for { 925 x := v.Args[0] 926 v_1 := v.Args[1] 927 if v_1.Op != OpAMD64MOVQconst { 928 break 929 } 930 c := v_1.AuxInt 931 if !(is32Bit(c)) { 932 break 933 } 934 v.reset(OpAMD64ADDQconst) 935 v.AuxInt = c 936 v.AddArg(x) 937 return true 938 } 939 // match: (ADDQ (MOVQconst [c]) x) 940 // cond: is32Bit(c) 941 // result: (ADDQconst [c] x) 942 for { 943 v_0 := v.Args[0] 944 if v_0.Op != OpAMD64MOVQconst { 945 break 946 } 947 c := v_0.AuxInt 948 x := v.Args[1] 949 if !(is32Bit(c)) { 950 break 951 } 952 v.reset(OpAMD64ADDQconst) 953 v.AuxInt = c 954 v.AddArg(x) 955 return true 956 } 957 // match: (ADDQ x (SHLQconst [3] y)) 958 // cond: 959 // result: (LEAQ8 x y) 960 for { 961 x := v.Args[0] 962 v_1 := v.Args[1] 963 if v_1.Op != OpAMD64SHLQconst { 964 break 965 } 966 if v_1.AuxInt != 3 { 967 break 968 } 969 y := v_1.Args[0] 970 v.reset(OpAMD64LEAQ8) 971 v.AddArg(x) 972 v.AddArg(y) 973 return true 974 } 975 // match: (ADDQ x (SHLQconst [2] y)) 976 // cond: 977 // result: (LEAQ4 x y) 978 for { 979 x := v.Args[0] 980 v_1 := v.Args[1] 981 if v_1.Op != OpAMD64SHLQconst { 982 break 983 } 984 if v_1.AuxInt != 2 { 985 break 986 } 987 y := v_1.Args[0] 988 v.reset(OpAMD64LEAQ4) 989 v.AddArg(x) 990 v.AddArg(y) 991 return true 992 } 993 // match: (ADDQ x (SHLQconst [1] y)) 994 // cond: 995 // result: (LEAQ2 x y) 996 for { 997 x := v.Args[0] 998 v_1 := v.Args[1] 999 if v_1.Op != OpAMD64SHLQconst { 1000 break 1001 } 1002 if v_1.AuxInt != 1 { 1003 break 1004 } 1005 y := v_1.Args[0] 1006 v.reset(OpAMD64LEAQ2) 1007 v.AddArg(x) 1008 v.AddArg(y) 1009 return true 1010 } 1011 // match: (ADDQ x (ADDQ y y)) 1012 // cond: 1013 // result: (LEAQ2 x y) 1014 for { 1015 x := v.Args[0] 1016 v_1 := v.Args[1] 1017 if v_1.Op != OpAMD64ADDQ { 1018 break 1019 } 1020 y := v_1.Args[0] 1021 if y != v_1.Args[1] { 1022 break 1023 } 1024 v.reset(OpAMD64LEAQ2) 1025 v.AddArg(x) 1026 v.AddArg(y) 1027 return true 1028 } 1029 // match: (ADDQ x (ADDQ x y)) 1030 // cond: 1031 // result: (LEAQ2 y x) 1032 for { 1033 x := v.Args[0] 1034 v_1 := v.Args[1] 1035 if v_1.Op != OpAMD64ADDQ { 1036 break 1037 } 1038 if x != v_1.Args[0] { 1039 break 1040 } 1041 y := v_1.Args[1] 1042 v.reset(OpAMD64LEAQ2) 1043 v.AddArg(y) 1044 v.AddArg(x) 1045 return true 1046 } 1047 // match: (ADDQ x (ADDQ y x)) 1048 // cond: 1049 // result: (LEAQ2 y x) 1050 for { 1051 x := v.Args[0] 1052 v_1 := v.Args[1] 1053 if v_1.Op != OpAMD64ADDQ { 1054 break 1055 } 1056 y := v_1.Args[0] 1057 if x != v_1.Args[1] { 1058 break 1059 } 1060 v.reset(OpAMD64LEAQ2) 1061 v.AddArg(y) 1062 v.AddArg(x) 1063 return true 1064 } 1065 // match: (ADDQ (ADDQconst [c] x) y) 1066 // cond: 1067 // result: (LEAQ1 [c] x y) 1068 for { 1069 v_0 := v.Args[0] 1070 if v_0.Op != OpAMD64ADDQconst { 1071 break 1072 } 1073 c := v_0.AuxInt 1074 x := v_0.Args[0] 1075 y := v.Args[1] 1076 v.reset(OpAMD64LEAQ1) 1077 v.AuxInt = c 1078 v.AddArg(x) 1079 v.AddArg(y) 1080 return true 1081 } 1082 // match: (ADDQ x (ADDQconst [c] y)) 1083 // cond: 1084 // result: (LEAQ1 [c] x y) 1085 for { 1086 x := v.Args[0] 1087 v_1 := v.Args[1] 1088 if v_1.Op != OpAMD64ADDQconst { 1089 break 1090 } 1091 c := v_1.AuxInt 1092 y := v_1.Args[0] 1093 v.reset(OpAMD64LEAQ1) 1094 v.AuxInt = c 1095 v.AddArg(x) 1096 v.AddArg(y) 1097 return true 1098 } 1099 // match: (ADDQ x (LEAQ [c] {s} y)) 1100 // cond: x.Op != OpSB && y.Op != OpSB 1101 // result: (LEAQ1 [c] {s} x y) 1102 for { 1103 x := v.Args[0] 1104 v_1 := v.Args[1] 1105 if v_1.Op != OpAMD64LEAQ { 1106 break 1107 } 1108 c := v_1.AuxInt 1109 s := v_1.Aux 1110 y := v_1.Args[0] 1111 if !(x.Op != OpSB && y.Op != OpSB) { 1112 break 1113 } 1114 v.reset(OpAMD64LEAQ1) 1115 v.AuxInt = c 1116 v.Aux = s 1117 v.AddArg(x) 1118 v.AddArg(y) 1119 return true 1120 } 1121 // match: (ADDQ (LEAQ [c] {s} x) y) 1122 // cond: x.Op != OpSB && y.Op != OpSB 1123 // result: (LEAQ1 [c] {s} x y) 1124 for { 1125 v_0 := v.Args[0] 1126 if v_0.Op != OpAMD64LEAQ { 1127 break 1128 } 1129 c := v_0.AuxInt 1130 s := v_0.Aux 1131 x := v_0.Args[0] 1132 y := v.Args[1] 1133 if !(x.Op != OpSB && y.Op != OpSB) { 1134 break 1135 } 1136 v.reset(OpAMD64LEAQ1) 1137 v.AuxInt = c 1138 v.Aux = s 1139 v.AddArg(x) 1140 v.AddArg(y) 1141 return true 1142 } 1143 // match: (ADDQ x (NEGQ y)) 1144 // cond: 1145 // result: (SUBQ x y) 1146 for { 1147 x := v.Args[0] 1148 v_1 := v.Args[1] 1149 if v_1.Op != OpAMD64NEGQ { 1150 break 1151 } 1152 y := v_1.Args[0] 1153 v.reset(OpAMD64SUBQ) 1154 v.AddArg(x) 1155 v.AddArg(y) 1156 return true 1157 } 1158 return false 1159 } 1160 func rewriteValueAMD64_OpAMD64ADDQconst(v *Value, config *Config) bool { 1161 b := v.Block 1162 _ = b 1163 // match: (ADDQconst [c] (ADDQ x y)) 1164 // cond: 1165 // result: (LEAQ1 [c] x y) 1166 for { 1167 c := v.AuxInt 1168 v_0 := v.Args[0] 1169 if v_0.Op != OpAMD64ADDQ { 1170 break 1171 } 1172 x := v_0.Args[0] 1173 y := v_0.Args[1] 1174 v.reset(OpAMD64LEAQ1) 1175 v.AuxInt = c 1176 v.AddArg(x) 1177 v.AddArg(y) 1178 return true 1179 } 1180 // match: (ADDQconst [c] (LEAQ [d] {s} x)) 1181 // cond: is32Bit(c+d) 1182 // result: (LEAQ [c+d] {s} x) 1183 for { 1184 c := v.AuxInt 1185 v_0 := v.Args[0] 1186 if v_0.Op != OpAMD64LEAQ { 1187 break 1188 } 1189 d := v_0.AuxInt 1190 s := v_0.Aux 1191 x := v_0.Args[0] 1192 if !(is32Bit(c + d)) { 1193 break 1194 } 1195 v.reset(OpAMD64LEAQ) 1196 v.AuxInt = c + d 1197 v.Aux = s 1198 v.AddArg(x) 1199 return true 1200 } 1201 // match: (ADDQconst [c] (LEAQ1 [d] {s} x y)) 1202 // cond: is32Bit(c+d) 1203 // result: (LEAQ1 [c+d] {s} x y) 1204 for { 1205 c := v.AuxInt 1206 v_0 := v.Args[0] 1207 if v_0.Op != OpAMD64LEAQ1 { 1208 break 1209 } 1210 d := v_0.AuxInt 1211 s := v_0.Aux 1212 x := v_0.Args[0] 1213 y := v_0.Args[1] 1214 if !(is32Bit(c + d)) { 1215 break 1216 } 1217 v.reset(OpAMD64LEAQ1) 1218 v.AuxInt = c + d 1219 v.Aux = s 1220 v.AddArg(x) 1221 v.AddArg(y) 1222 return true 1223 } 1224 // match: (ADDQconst [c] (LEAQ2 [d] {s} x y)) 1225 // cond: is32Bit(c+d) 1226 // result: (LEAQ2 [c+d] {s} x y) 1227 for { 1228 c := v.AuxInt 1229 v_0 := v.Args[0] 1230 if v_0.Op != OpAMD64LEAQ2 { 1231 break 1232 } 1233 d := v_0.AuxInt 1234 s := v_0.Aux 1235 x := v_0.Args[0] 1236 y := v_0.Args[1] 1237 if !(is32Bit(c + d)) { 1238 break 1239 } 1240 v.reset(OpAMD64LEAQ2) 1241 v.AuxInt = c + d 1242 v.Aux = s 1243 v.AddArg(x) 1244 v.AddArg(y) 1245 return true 1246 } 1247 // match: (ADDQconst [c] (LEAQ4 [d] {s} x y)) 1248 // cond: is32Bit(c+d) 1249 // result: (LEAQ4 [c+d] {s} x y) 1250 for { 1251 c := v.AuxInt 1252 v_0 := v.Args[0] 1253 if v_0.Op != OpAMD64LEAQ4 { 1254 break 1255 } 1256 d := v_0.AuxInt 1257 s := v_0.Aux 1258 x := v_0.Args[0] 1259 y := v_0.Args[1] 1260 if !(is32Bit(c + d)) { 1261 break 1262 } 1263 v.reset(OpAMD64LEAQ4) 1264 v.AuxInt = c + d 1265 v.Aux = s 1266 v.AddArg(x) 1267 v.AddArg(y) 1268 return true 1269 } 1270 // match: (ADDQconst [c] (LEAQ8 [d] {s} x y)) 1271 // cond: is32Bit(c+d) 1272 // result: (LEAQ8 [c+d] {s} x y) 1273 for { 1274 c := v.AuxInt 1275 v_0 := v.Args[0] 1276 if v_0.Op != OpAMD64LEAQ8 { 1277 break 1278 } 1279 d := v_0.AuxInt 1280 s := v_0.Aux 1281 x := v_0.Args[0] 1282 y := v_0.Args[1] 1283 if !(is32Bit(c + d)) { 1284 break 1285 } 1286 v.reset(OpAMD64LEAQ8) 1287 v.AuxInt = c + d 1288 v.Aux = s 1289 v.AddArg(x) 1290 v.AddArg(y) 1291 return true 1292 } 1293 // match: (ADDQconst [0] x) 1294 // cond: 1295 // result: x 1296 for { 1297 if v.AuxInt != 0 { 1298 break 1299 } 1300 x := v.Args[0] 1301 v.reset(OpCopy) 1302 v.Type = x.Type 1303 v.AddArg(x) 1304 return true 1305 } 1306 // match: (ADDQconst [c] (MOVQconst [d])) 1307 // cond: 1308 // result: (MOVQconst [c+d]) 1309 for { 1310 c := v.AuxInt 1311 v_0 := v.Args[0] 1312 if v_0.Op != OpAMD64MOVQconst { 1313 break 1314 } 1315 d := v_0.AuxInt 1316 v.reset(OpAMD64MOVQconst) 1317 v.AuxInt = c + d 1318 return true 1319 } 1320 // match: (ADDQconst [c] (ADDQconst [d] x)) 1321 // cond: is32Bit(c+d) 1322 // result: (ADDQconst [c+d] x) 1323 for { 1324 c := v.AuxInt 1325 v_0 := v.Args[0] 1326 if v_0.Op != OpAMD64ADDQconst { 1327 break 1328 } 1329 d := v_0.AuxInt 1330 x := v_0.Args[0] 1331 if !(is32Bit(c + d)) { 1332 break 1333 } 1334 v.reset(OpAMD64ADDQconst) 1335 v.AuxInt = c + d 1336 v.AddArg(x) 1337 return true 1338 } 1339 return false 1340 } 1341 func rewriteValueAMD64_OpAMD64ANDL(v *Value, config *Config) bool { 1342 b := v.Block 1343 _ = b 1344 // match: (ANDL x (MOVLconst [c])) 1345 // cond: 1346 // result: (ANDLconst [c] x) 1347 for { 1348 x := v.Args[0] 1349 v_1 := v.Args[1] 1350 if v_1.Op != OpAMD64MOVLconst { 1351 break 1352 } 1353 c := v_1.AuxInt 1354 v.reset(OpAMD64ANDLconst) 1355 v.AuxInt = c 1356 v.AddArg(x) 1357 return true 1358 } 1359 // match: (ANDL (MOVLconst [c]) x) 1360 // cond: 1361 // result: (ANDLconst [c] x) 1362 for { 1363 v_0 := v.Args[0] 1364 if v_0.Op != OpAMD64MOVLconst { 1365 break 1366 } 1367 c := v_0.AuxInt 1368 x := v.Args[1] 1369 v.reset(OpAMD64ANDLconst) 1370 v.AuxInt = c 1371 v.AddArg(x) 1372 return true 1373 } 1374 // match: (ANDL x x) 1375 // cond: 1376 // result: x 1377 for { 1378 x := v.Args[0] 1379 if x != v.Args[1] { 1380 break 1381 } 1382 v.reset(OpCopy) 1383 v.Type = x.Type 1384 v.AddArg(x) 1385 return true 1386 } 1387 return false 1388 } 1389 func rewriteValueAMD64_OpAMD64ANDLconst(v *Value, config *Config) bool { 1390 b := v.Block 1391 _ = b 1392 // match: (ANDLconst [c] (ANDLconst [d] x)) 1393 // cond: 1394 // result: (ANDLconst [c & d] x) 1395 for { 1396 c := v.AuxInt 1397 v_0 := v.Args[0] 1398 if v_0.Op != OpAMD64ANDLconst { 1399 break 1400 } 1401 d := v_0.AuxInt 1402 x := v_0.Args[0] 1403 v.reset(OpAMD64ANDLconst) 1404 v.AuxInt = c & d 1405 v.AddArg(x) 1406 return true 1407 } 1408 // match: (ANDLconst [c] _) 1409 // cond: int32(c)==0 1410 // result: (MOVLconst [0]) 1411 for { 1412 c := v.AuxInt 1413 if !(int32(c) == 0) { 1414 break 1415 } 1416 v.reset(OpAMD64MOVLconst) 1417 v.AuxInt = 0 1418 return true 1419 } 1420 // match: (ANDLconst [c] x) 1421 // cond: int32(c)==-1 1422 // result: x 1423 for { 1424 c := v.AuxInt 1425 x := v.Args[0] 1426 if !(int32(c) == -1) { 1427 break 1428 } 1429 v.reset(OpCopy) 1430 v.Type = x.Type 1431 v.AddArg(x) 1432 return true 1433 } 1434 // match: (ANDLconst [c] (MOVLconst [d])) 1435 // cond: 1436 // result: (MOVLconst [c&d]) 1437 for { 1438 c := v.AuxInt 1439 v_0 := v.Args[0] 1440 if v_0.Op != OpAMD64MOVLconst { 1441 break 1442 } 1443 d := v_0.AuxInt 1444 v.reset(OpAMD64MOVLconst) 1445 v.AuxInt = c & d 1446 return true 1447 } 1448 return false 1449 } 1450 func rewriteValueAMD64_OpAMD64ANDQ(v *Value, config *Config) bool { 1451 b := v.Block 1452 _ = b 1453 // match: (ANDQ x (MOVQconst [c])) 1454 // cond: is32Bit(c) 1455 // result: (ANDQconst [c] x) 1456 for { 1457 x := v.Args[0] 1458 v_1 := v.Args[1] 1459 if v_1.Op != OpAMD64MOVQconst { 1460 break 1461 } 1462 c := v_1.AuxInt 1463 if !(is32Bit(c)) { 1464 break 1465 } 1466 v.reset(OpAMD64ANDQconst) 1467 v.AuxInt = c 1468 v.AddArg(x) 1469 return true 1470 } 1471 // match: (ANDQ (MOVQconst [c]) x) 1472 // cond: is32Bit(c) 1473 // result: (ANDQconst [c] x) 1474 for { 1475 v_0 := v.Args[0] 1476 if v_0.Op != OpAMD64MOVQconst { 1477 break 1478 } 1479 c := v_0.AuxInt 1480 x := v.Args[1] 1481 if !(is32Bit(c)) { 1482 break 1483 } 1484 v.reset(OpAMD64ANDQconst) 1485 v.AuxInt = c 1486 v.AddArg(x) 1487 return true 1488 } 1489 // match: (ANDQ x x) 1490 // cond: 1491 // result: x 1492 for { 1493 x := v.Args[0] 1494 if x != v.Args[1] { 1495 break 1496 } 1497 v.reset(OpCopy) 1498 v.Type = x.Type 1499 v.AddArg(x) 1500 return true 1501 } 1502 return false 1503 } 1504 func rewriteValueAMD64_OpAMD64ANDQconst(v *Value, config *Config) bool { 1505 b := v.Block 1506 _ = b 1507 // match: (ANDQconst [c] (ANDQconst [d] x)) 1508 // cond: 1509 // result: (ANDQconst [c & d] x) 1510 for { 1511 c := v.AuxInt 1512 v_0 := v.Args[0] 1513 if v_0.Op != OpAMD64ANDQconst { 1514 break 1515 } 1516 d := v_0.AuxInt 1517 x := v_0.Args[0] 1518 v.reset(OpAMD64ANDQconst) 1519 v.AuxInt = c & d 1520 v.AddArg(x) 1521 return true 1522 } 1523 // match: (ANDQconst [0xFF] x) 1524 // cond: 1525 // result: (MOVBQZX x) 1526 for { 1527 if v.AuxInt != 0xFF { 1528 break 1529 } 1530 x := v.Args[0] 1531 v.reset(OpAMD64MOVBQZX) 1532 v.AddArg(x) 1533 return true 1534 } 1535 // match: (ANDQconst [0xFFFF] x) 1536 // cond: 1537 // result: (MOVWQZX x) 1538 for { 1539 if v.AuxInt != 0xFFFF { 1540 break 1541 } 1542 x := v.Args[0] 1543 v.reset(OpAMD64MOVWQZX) 1544 v.AddArg(x) 1545 return true 1546 } 1547 // match: (ANDQconst [0xFFFFFFFF] x) 1548 // cond: 1549 // result: (MOVLQZX x) 1550 for { 1551 if v.AuxInt != 0xFFFFFFFF { 1552 break 1553 } 1554 x := v.Args[0] 1555 v.reset(OpAMD64MOVLQZX) 1556 v.AddArg(x) 1557 return true 1558 } 1559 // match: (ANDQconst [0] _) 1560 // cond: 1561 // result: (MOVQconst [0]) 1562 for { 1563 if v.AuxInt != 0 { 1564 break 1565 } 1566 v.reset(OpAMD64MOVQconst) 1567 v.AuxInt = 0 1568 return true 1569 } 1570 // match: (ANDQconst [-1] x) 1571 // cond: 1572 // result: x 1573 for { 1574 if v.AuxInt != -1 { 1575 break 1576 } 1577 x := v.Args[0] 1578 v.reset(OpCopy) 1579 v.Type = x.Type 1580 v.AddArg(x) 1581 return true 1582 } 1583 // match: (ANDQconst [c] (MOVQconst [d])) 1584 // cond: 1585 // result: (MOVQconst [c&d]) 1586 for { 1587 c := v.AuxInt 1588 v_0 := v.Args[0] 1589 if v_0.Op != OpAMD64MOVQconst { 1590 break 1591 } 1592 d := v_0.AuxInt 1593 v.reset(OpAMD64MOVQconst) 1594 v.AuxInt = c & d 1595 return true 1596 } 1597 return false 1598 } 1599 func rewriteValueAMD64_OpAMD64CMPB(v *Value, config *Config) bool { 1600 b := v.Block 1601 _ = b 1602 // match: (CMPB x (MOVLconst [c])) 1603 // cond: 1604 // result: (CMPBconst x [int64(int8(c))]) 1605 for { 1606 x := v.Args[0] 1607 v_1 := v.Args[1] 1608 if v_1.Op != OpAMD64MOVLconst { 1609 break 1610 } 1611 c := v_1.AuxInt 1612 v.reset(OpAMD64CMPBconst) 1613 v.AuxInt = int64(int8(c)) 1614 v.AddArg(x) 1615 return true 1616 } 1617 // match: (CMPB (MOVLconst [c]) x) 1618 // cond: 1619 // result: (InvertFlags (CMPBconst x [int64(int8(c))])) 1620 for { 1621 v_0 := v.Args[0] 1622 if v_0.Op != OpAMD64MOVLconst { 1623 break 1624 } 1625 c := v_0.AuxInt 1626 x := v.Args[1] 1627 v.reset(OpAMD64InvertFlags) 1628 v0 := b.NewValue0(v.Line, OpAMD64CMPBconst, TypeFlags) 1629 v0.AuxInt = int64(int8(c)) 1630 v0.AddArg(x) 1631 v.AddArg(v0) 1632 return true 1633 } 1634 return false 1635 } 1636 func rewriteValueAMD64_OpAMD64CMPBconst(v *Value, config *Config) bool { 1637 b := v.Block 1638 _ = b 1639 // match: (CMPBconst (MOVLconst [x]) [y]) 1640 // cond: int8(x)==int8(y) 1641 // result: (FlagEQ) 1642 for { 1643 y := v.AuxInt 1644 v_0 := v.Args[0] 1645 if v_0.Op != OpAMD64MOVLconst { 1646 break 1647 } 1648 x := v_0.AuxInt 1649 if !(int8(x) == int8(y)) { 1650 break 1651 } 1652 v.reset(OpAMD64FlagEQ) 1653 return true 1654 } 1655 // match: (CMPBconst (MOVLconst [x]) [y]) 1656 // cond: int8(x)<int8(y) && uint8(x)<uint8(y) 1657 // result: (FlagLT_ULT) 1658 for { 1659 y := v.AuxInt 1660 v_0 := v.Args[0] 1661 if v_0.Op != OpAMD64MOVLconst { 1662 break 1663 } 1664 x := v_0.AuxInt 1665 if !(int8(x) < int8(y) && uint8(x) < uint8(y)) { 1666 break 1667 } 1668 v.reset(OpAMD64FlagLT_ULT) 1669 return true 1670 } 1671 // match: (CMPBconst (MOVLconst [x]) [y]) 1672 // cond: int8(x)<int8(y) && uint8(x)>uint8(y) 1673 // result: (FlagLT_UGT) 1674 for { 1675 y := v.AuxInt 1676 v_0 := v.Args[0] 1677 if v_0.Op != OpAMD64MOVLconst { 1678 break 1679 } 1680 x := v_0.AuxInt 1681 if !(int8(x) < int8(y) && uint8(x) > uint8(y)) { 1682 break 1683 } 1684 v.reset(OpAMD64FlagLT_UGT) 1685 return true 1686 } 1687 // match: (CMPBconst (MOVLconst [x]) [y]) 1688 // cond: int8(x)>int8(y) && uint8(x)<uint8(y) 1689 // result: (FlagGT_ULT) 1690 for { 1691 y := v.AuxInt 1692 v_0 := v.Args[0] 1693 if v_0.Op != OpAMD64MOVLconst { 1694 break 1695 } 1696 x := v_0.AuxInt 1697 if !(int8(x) > int8(y) && uint8(x) < uint8(y)) { 1698 break 1699 } 1700 v.reset(OpAMD64FlagGT_ULT) 1701 return true 1702 } 1703 // match: (CMPBconst (MOVLconst [x]) [y]) 1704 // cond: int8(x)>int8(y) && uint8(x)>uint8(y) 1705 // result: (FlagGT_UGT) 1706 for { 1707 y := v.AuxInt 1708 v_0 := v.Args[0] 1709 if v_0.Op != OpAMD64MOVLconst { 1710 break 1711 } 1712 x := v_0.AuxInt 1713 if !(int8(x) > int8(y) && uint8(x) > uint8(y)) { 1714 break 1715 } 1716 v.reset(OpAMD64FlagGT_UGT) 1717 return true 1718 } 1719 // match: (CMPBconst (ANDLconst _ [m]) [n]) 1720 // cond: 0 <= int8(m) && int8(m) < int8(n) 1721 // result: (FlagLT_ULT) 1722 for { 1723 n := v.AuxInt 1724 v_0 := v.Args[0] 1725 if v_0.Op != OpAMD64ANDLconst { 1726 break 1727 } 1728 m := v_0.AuxInt 1729 if !(0 <= int8(m) && int8(m) < int8(n)) { 1730 break 1731 } 1732 v.reset(OpAMD64FlagLT_ULT) 1733 return true 1734 } 1735 // match: (CMPBconst (ANDL x y) [0]) 1736 // cond: 1737 // result: (TESTB x y) 1738 for { 1739 if v.AuxInt != 0 { 1740 break 1741 } 1742 v_0 := v.Args[0] 1743 if v_0.Op != OpAMD64ANDL { 1744 break 1745 } 1746 x := v_0.Args[0] 1747 y := v_0.Args[1] 1748 v.reset(OpAMD64TESTB) 1749 v.AddArg(x) 1750 v.AddArg(y) 1751 return true 1752 } 1753 // match: (CMPBconst (ANDLconst [c] x) [0]) 1754 // cond: 1755 // result: (TESTBconst [int64(int8(c))] x) 1756 for { 1757 if v.AuxInt != 0 { 1758 break 1759 } 1760 v_0 := v.Args[0] 1761 if v_0.Op != OpAMD64ANDLconst { 1762 break 1763 } 1764 c := v_0.AuxInt 1765 x := v_0.Args[0] 1766 v.reset(OpAMD64TESTBconst) 1767 v.AuxInt = int64(int8(c)) 1768 v.AddArg(x) 1769 return true 1770 } 1771 // match: (CMPBconst x [0]) 1772 // cond: 1773 // result: (TESTB x x) 1774 for { 1775 if v.AuxInt != 0 { 1776 break 1777 } 1778 x := v.Args[0] 1779 v.reset(OpAMD64TESTB) 1780 v.AddArg(x) 1781 v.AddArg(x) 1782 return true 1783 } 1784 return false 1785 } 1786 func rewriteValueAMD64_OpAMD64CMPL(v *Value, config *Config) bool { 1787 b := v.Block 1788 _ = b 1789 // match: (CMPL x (MOVLconst [c])) 1790 // cond: 1791 // result: (CMPLconst x [c]) 1792 for { 1793 x := v.Args[0] 1794 v_1 := v.Args[1] 1795 if v_1.Op != OpAMD64MOVLconst { 1796 break 1797 } 1798 c := v_1.AuxInt 1799 v.reset(OpAMD64CMPLconst) 1800 v.AuxInt = c 1801 v.AddArg(x) 1802 return true 1803 } 1804 // match: (CMPL (MOVLconst [c]) x) 1805 // cond: 1806 // result: (InvertFlags (CMPLconst x [c])) 1807 for { 1808 v_0 := v.Args[0] 1809 if v_0.Op != OpAMD64MOVLconst { 1810 break 1811 } 1812 c := v_0.AuxInt 1813 x := v.Args[1] 1814 v.reset(OpAMD64InvertFlags) 1815 v0 := b.NewValue0(v.Line, OpAMD64CMPLconst, TypeFlags) 1816 v0.AuxInt = c 1817 v0.AddArg(x) 1818 v.AddArg(v0) 1819 return true 1820 } 1821 return false 1822 } 1823 func rewriteValueAMD64_OpAMD64CMPLconst(v *Value, config *Config) bool { 1824 b := v.Block 1825 _ = b 1826 // match: (CMPLconst (MOVLconst [x]) [y]) 1827 // cond: int32(x)==int32(y) 1828 // result: (FlagEQ) 1829 for { 1830 y := v.AuxInt 1831 v_0 := v.Args[0] 1832 if v_0.Op != OpAMD64MOVLconst { 1833 break 1834 } 1835 x := v_0.AuxInt 1836 if !(int32(x) == int32(y)) { 1837 break 1838 } 1839 v.reset(OpAMD64FlagEQ) 1840 return true 1841 } 1842 // match: (CMPLconst (MOVLconst [x]) [y]) 1843 // cond: int32(x)<int32(y) && uint32(x)<uint32(y) 1844 // result: (FlagLT_ULT) 1845 for { 1846 y := v.AuxInt 1847 v_0 := v.Args[0] 1848 if v_0.Op != OpAMD64MOVLconst { 1849 break 1850 } 1851 x := v_0.AuxInt 1852 if !(int32(x) < int32(y) && uint32(x) < uint32(y)) { 1853 break 1854 } 1855 v.reset(OpAMD64FlagLT_ULT) 1856 return true 1857 } 1858 // match: (CMPLconst (MOVLconst [x]) [y]) 1859 // cond: int32(x)<int32(y) && uint32(x)>uint32(y) 1860 // result: (FlagLT_UGT) 1861 for { 1862 y := v.AuxInt 1863 v_0 := v.Args[0] 1864 if v_0.Op != OpAMD64MOVLconst { 1865 break 1866 } 1867 x := v_0.AuxInt 1868 if !(int32(x) < int32(y) && uint32(x) > uint32(y)) { 1869 break 1870 } 1871 v.reset(OpAMD64FlagLT_UGT) 1872 return true 1873 } 1874 // match: (CMPLconst (MOVLconst [x]) [y]) 1875 // cond: int32(x)>int32(y) && uint32(x)<uint32(y) 1876 // result: (FlagGT_ULT) 1877 for { 1878 y := v.AuxInt 1879 v_0 := v.Args[0] 1880 if v_0.Op != OpAMD64MOVLconst { 1881 break 1882 } 1883 x := v_0.AuxInt 1884 if !(int32(x) > int32(y) && uint32(x) < uint32(y)) { 1885 break 1886 } 1887 v.reset(OpAMD64FlagGT_ULT) 1888 return true 1889 } 1890 // match: (CMPLconst (MOVLconst [x]) [y]) 1891 // cond: int32(x)>int32(y) && uint32(x)>uint32(y) 1892 // result: (FlagGT_UGT) 1893 for { 1894 y := v.AuxInt 1895 v_0 := v.Args[0] 1896 if v_0.Op != OpAMD64MOVLconst { 1897 break 1898 } 1899 x := v_0.AuxInt 1900 if !(int32(x) > int32(y) && uint32(x) > uint32(y)) { 1901 break 1902 } 1903 v.reset(OpAMD64FlagGT_UGT) 1904 return true 1905 } 1906 // match: (CMPLconst (SHRLconst _ [c]) [n]) 1907 // cond: 0 <= n && 0 < c && c <= 32 && (1<<uint64(32-c)) <= uint64(n) 1908 // result: (FlagLT_ULT) 1909 for { 1910 n := v.AuxInt 1911 v_0 := v.Args[0] 1912 if v_0.Op != OpAMD64SHRLconst { 1913 break 1914 } 1915 c := v_0.AuxInt 1916 if !(0 <= n && 0 < c && c <= 32 && (1<<uint64(32-c)) <= uint64(n)) { 1917 break 1918 } 1919 v.reset(OpAMD64FlagLT_ULT) 1920 return true 1921 } 1922 // match: (CMPLconst (ANDLconst _ [m]) [n]) 1923 // cond: 0 <= int32(m) && int32(m) < int32(n) 1924 // result: (FlagLT_ULT) 1925 for { 1926 n := v.AuxInt 1927 v_0 := v.Args[0] 1928 if v_0.Op != OpAMD64ANDLconst { 1929 break 1930 } 1931 m := v_0.AuxInt 1932 if !(0 <= int32(m) && int32(m) < int32(n)) { 1933 break 1934 } 1935 v.reset(OpAMD64FlagLT_ULT) 1936 return true 1937 } 1938 // match: (CMPLconst (ANDL x y) [0]) 1939 // cond: 1940 // result: (TESTL x y) 1941 for { 1942 if v.AuxInt != 0 { 1943 break 1944 } 1945 v_0 := v.Args[0] 1946 if v_0.Op != OpAMD64ANDL { 1947 break 1948 } 1949 x := v_0.Args[0] 1950 y := v_0.Args[1] 1951 v.reset(OpAMD64TESTL) 1952 v.AddArg(x) 1953 v.AddArg(y) 1954 return true 1955 } 1956 // match: (CMPLconst (ANDLconst [c] x) [0]) 1957 // cond: 1958 // result: (TESTLconst [c] x) 1959 for { 1960 if v.AuxInt != 0 { 1961 break 1962 } 1963 v_0 := v.Args[0] 1964 if v_0.Op != OpAMD64ANDLconst { 1965 break 1966 } 1967 c := v_0.AuxInt 1968 x := v_0.Args[0] 1969 v.reset(OpAMD64TESTLconst) 1970 v.AuxInt = c 1971 v.AddArg(x) 1972 return true 1973 } 1974 // match: (CMPLconst x [0]) 1975 // cond: 1976 // result: (TESTL x x) 1977 for { 1978 if v.AuxInt != 0 { 1979 break 1980 } 1981 x := v.Args[0] 1982 v.reset(OpAMD64TESTL) 1983 v.AddArg(x) 1984 v.AddArg(x) 1985 return true 1986 } 1987 return false 1988 } 1989 func rewriteValueAMD64_OpAMD64CMPQ(v *Value, config *Config) bool { 1990 b := v.Block 1991 _ = b 1992 // match: (CMPQ x (MOVQconst [c])) 1993 // cond: is32Bit(c) 1994 // result: (CMPQconst x [c]) 1995 for { 1996 x := v.Args[0] 1997 v_1 := v.Args[1] 1998 if v_1.Op != OpAMD64MOVQconst { 1999 break 2000 } 2001 c := v_1.AuxInt 2002 if !(is32Bit(c)) { 2003 break 2004 } 2005 v.reset(OpAMD64CMPQconst) 2006 v.AuxInt = c 2007 v.AddArg(x) 2008 return true 2009 } 2010 // match: (CMPQ (MOVQconst [c]) x) 2011 // cond: is32Bit(c) 2012 // result: (InvertFlags (CMPQconst x [c])) 2013 for { 2014 v_0 := v.Args[0] 2015 if v_0.Op != OpAMD64MOVQconst { 2016 break 2017 } 2018 c := v_0.AuxInt 2019 x := v.Args[1] 2020 if !(is32Bit(c)) { 2021 break 2022 } 2023 v.reset(OpAMD64InvertFlags) 2024 v0 := b.NewValue0(v.Line, OpAMD64CMPQconst, TypeFlags) 2025 v0.AuxInt = c 2026 v0.AddArg(x) 2027 v.AddArg(v0) 2028 return true 2029 } 2030 return false 2031 } 2032 func rewriteValueAMD64_OpAMD64CMPQconst(v *Value, config *Config) bool { 2033 b := v.Block 2034 _ = b 2035 // match: (CMPQconst (MOVQconst [x]) [y]) 2036 // cond: x==y 2037 // result: (FlagEQ) 2038 for { 2039 y := v.AuxInt 2040 v_0 := v.Args[0] 2041 if v_0.Op != OpAMD64MOVQconst { 2042 break 2043 } 2044 x := v_0.AuxInt 2045 if !(x == y) { 2046 break 2047 } 2048 v.reset(OpAMD64FlagEQ) 2049 return true 2050 } 2051 // match: (CMPQconst (MOVQconst [x]) [y]) 2052 // cond: x<y && uint64(x)<uint64(y) 2053 // result: (FlagLT_ULT) 2054 for { 2055 y := v.AuxInt 2056 v_0 := v.Args[0] 2057 if v_0.Op != OpAMD64MOVQconst { 2058 break 2059 } 2060 x := v_0.AuxInt 2061 if !(x < y && uint64(x) < uint64(y)) { 2062 break 2063 } 2064 v.reset(OpAMD64FlagLT_ULT) 2065 return true 2066 } 2067 // match: (CMPQconst (MOVQconst [x]) [y]) 2068 // cond: x<y && uint64(x)>uint64(y) 2069 // result: (FlagLT_UGT) 2070 for { 2071 y := v.AuxInt 2072 v_0 := v.Args[0] 2073 if v_0.Op != OpAMD64MOVQconst { 2074 break 2075 } 2076 x := v_0.AuxInt 2077 if !(x < y && uint64(x) > uint64(y)) { 2078 break 2079 } 2080 v.reset(OpAMD64FlagLT_UGT) 2081 return true 2082 } 2083 // match: (CMPQconst (MOVQconst [x]) [y]) 2084 // cond: x>y && uint64(x)<uint64(y) 2085 // result: (FlagGT_ULT) 2086 for { 2087 y := v.AuxInt 2088 v_0 := v.Args[0] 2089 if v_0.Op != OpAMD64MOVQconst { 2090 break 2091 } 2092 x := v_0.AuxInt 2093 if !(x > y && uint64(x) < uint64(y)) { 2094 break 2095 } 2096 v.reset(OpAMD64FlagGT_ULT) 2097 return true 2098 } 2099 // match: (CMPQconst (MOVQconst [x]) [y]) 2100 // cond: x>y && uint64(x)>uint64(y) 2101 // result: (FlagGT_UGT) 2102 for { 2103 y := v.AuxInt 2104 v_0 := v.Args[0] 2105 if v_0.Op != OpAMD64MOVQconst { 2106 break 2107 } 2108 x := v_0.AuxInt 2109 if !(x > y && uint64(x) > uint64(y)) { 2110 break 2111 } 2112 v.reset(OpAMD64FlagGT_UGT) 2113 return true 2114 } 2115 // match: (CMPQconst (MOVBQZX _) [c]) 2116 // cond: 0xFF < c 2117 // result: (FlagLT_ULT) 2118 for { 2119 c := v.AuxInt 2120 v_0 := v.Args[0] 2121 if v_0.Op != OpAMD64MOVBQZX { 2122 break 2123 } 2124 if !(0xFF < c) { 2125 break 2126 } 2127 v.reset(OpAMD64FlagLT_ULT) 2128 return true 2129 } 2130 // match: (CMPQconst (MOVWQZX _) [c]) 2131 // cond: 0xFFFF < c 2132 // result: (FlagLT_ULT) 2133 for { 2134 c := v.AuxInt 2135 v_0 := v.Args[0] 2136 if v_0.Op != OpAMD64MOVWQZX { 2137 break 2138 } 2139 if !(0xFFFF < c) { 2140 break 2141 } 2142 v.reset(OpAMD64FlagLT_ULT) 2143 return true 2144 } 2145 // match: (CMPQconst (MOVLQZX _) [c]) 2146 // cond: 0xFFFFFFFF < c 2147 // result: (FlagLT_ULT) 2148 for { 2149 c := v.AuxInt 2150 v_0 := v.Args[0] 2151 if v_0.Op != OpAMD64MOVLQZX { 2152 break 2153 } 2154 if !(0xFFFFFFFF < c) { 2155 break 2156 } 2157 v.reset(OpAMD64FlagLT_ULT) 2158 return true 2159 } 2160 // match: (CMPQconst (SHRQconst _ [c]) [n]) 2161 // cond: 0 <= n && 0 < c && c <= 64 && (1<<uint64(64-c)) <= uint64(n) 2162 // result: (FlagLT_ULT) 2163 for { 2164 n := v.AuxInt 2165 v_0 := v.Args[0] 2166 if v_0.Op != OpAMD64SHRQconst { 2167 break 2168 } 2169 c := v_0.AuxInt 2170 if !(0 <= n && 0 < c && c <= 64 && (1<<uint64(64-c)) <= uint64(n)) { 2171 break 2172 } 2173 v.reset(OpAMD64FlagLT_ULT) 2174 return true 2175 } 2176 // match: (CMPQconst (ANDQconst _ [m]) [n]) 2177 // cond: 0 <= m && m < n 2178 // result: (FlagLT_ULT) 2179 for { 2180 n := v.AuxInt 2181 v_0 := v.Args[0] 2182 if v_0.Op != OpAMD64ANDQconst { 2183 break 2184 } 2185 m := v_0.AuxInt 2186 if !(0 <= m && m < n) { 2187 break 2188 } 2189 v.reset(OpAMD64FlagLT_ULT) 2190 return true 2191 } 2192 // match: (CMPQconst (ANDQ x y) [0]) 2193 // cond: 2194 // result: (TESTQ x y) 2195 for { 2196 if v.AuxInt != 0 { 2197 break 2198 } 2199 v_0 := v.Args[0] 2200 if v_0.Op != OpAMD64ANDQ { 2201 break 2202 } 2203 x := v_0.Args[0] 2204 y := v_0.Args[1] 2205 v.reset(OpAMD64TESTQ) 2206 v.AddArg(x) 2207 v.AddArg(y) 2208 return true 2209 } 2210 // match: (CMPQconst (ANDQconst [c] x) [0]) 2211 // cond: 2212 // result: (TESTQconst [c] x) 2213 for { 2214 if v.AuxInt != 0 { 2215 break 2216 } 2217 v_0 := v.Args[0] 2218 if v_0.Op != OpAMD64ANDQconst { 2219 break 2220 } 2221 c := v_0.AuxInt 2222 x := v_0.Args[0] 2223 v.reset(OpAMD64TESTQconst) 2224 v.AuxInt = c 2225 v.AddArg(x) 2226 return true 2227 } 2228 // match: (CMPQconst x [0]) 2229 // cond: 2230 // result: (TESTQ x x) 2231 for { 2232 if v.AuxInt != 0 { 2233 break 2234 } 2235 x := v.Args[0] 2236 v.reset(OpAMD64TESTQ) 2237 v.AddArg(x) 2238 v.AddArg(x) 2239 return true 2240 } 2241 return false 2242 } 2243 func rewriteValueAMD64_OpAMD64CMPW(v *Value, config *Config) bool { 2244 b := v.Block 2245 _ = b 2246 // match: (CMPW x (MOVLconst [c])) 2247 // cond: 2248 // result: (CMPWconst x [int64(int16(c))]) 2249 for { 2250 x := v.Args[0] 2251 v_1 := v.Args[1] 2252 if v_1.Op != OpAMD64MOVLconst { 2253 break 2254 } 2255 c := v_1.AuxInt 2256 v.reset(OpAMD64CMPWconst) 2257 v.AuxInt = int64(int16(c)) 2258 v.AddArg(x) 2259 return true 2260 } 2261 // match: (CMPW (MOVLconst [c]) x) 2262 // cond: 2263 // result: (InvertFlags (CMPWconst x [int64(int16(c))])) 2264 for { 2265 v_0 := v.Args[0] 2266 if v_0.Op != OpAMD64MOVLconst { 2267 break 2268 } 2269 c := v_0.AuxInt 2270 x := v.Args[1] 2271 v.reset(OpAMD64InvertFlags) 2272 v0 := b.NewValue0(v.Line, OpAMD64CMPWconst, TypeFlags) 2273 v0.AuxInt = int64(int16(c)) 2274 v0.AddArg(x) 2275 v.AddArg(v0) 2276 return true 2277 } 2278 return false 2279 } 2280 func rewriteValueAMD64_OpAMD64CMPWconst(v *Value, config *Config) bool { 2281 b := v.Block 2282 _ = b 2283 // match: (CMPWconst (MOVLconst [x]) [y]) 2284 // cond: int16(x)==int16(y) 2285 // result: (FlagEQ) 2286 for { 2287 y := v.AuxInt 2288 v_0 := v.Args[0] 2289 if v_0.Op != OpAMD64MOVLconst { 2290 break 2291 } 2292 x := v_0.AuxInt 2293 if !(int16(x) == int16(y)) { 2294 break 2295 } 2296 v.reset(OpAMD64FlagEQ) 2297 return true 2298 } 2299 // match: (CMPWconst (MOVLconst [x]) [y]) 2300 // cond: int16(x)<int16(y) && uint16(x)<uint16(y) 2301 // result: (FlagLT_ULT) 2302 for { 2303 y := v.AuxInt 2304 v_0 := v.Args[0] 2305 if v_0.Op != OpAMD64MOVLconst { 2306 break 2307 } 2308 x := v_0.AuxInt 2309 if !(int16(x) < int16(y) && uint16(x) < uint16(y)) { 2310 break 2311 } 2312 v.reset(OpAMD64FlagLT_ULT) 2313 return true 2314 } 2315 // match: (CMPWconst (MOVLconst [x]) [y]) 2316 // cond: int16(x)<int16(y) && uint16(x)>uint16(y) 2317 // result: (FlagLT_UGT) 2318 for { 2319 y := v.AuxInt 2320 v_0 := v.Args[0] 2321 if v_0.Op != OpAMD64MOVLconst { 2322 break 2323 } 2324 x := v_0.AuxInt 2325 if !(int16(x) < int16(y) && uint16(x) > uint16(y)) { 2326 break 2327 } 2328 v.reset(OpAMD64FlagLT_UGT) 2329 return true 2330 } 2331 // match: (CMPWconst (MOVLconst [x]) [y]) 2332 // cond: int16(x)>int16(y) && uint16(x)<uint16(y) 2333 // result: (FlagGT_ULT) 2334 for { 2335 y := v.AuxInt 2336 v_0 := v.Args[0] 2337 if v_0.Op != OpAMD64MOVLconst { 2338 break 2339 } 2340 x := v_0.AuxInt 2341 if !(int16(x) > int16(y) && uint16(x) < uint16(y)) { 2342 break 2343 } 2344 v.reset(OpAMD64FlagGT_ULT) 2345 return true 2346 } 2347 // match: (CMPWconst (MOVLconst [x]) [y]) 2348 // cond: int16(x)>int16(y) && uint16(x)>uint16(y) 2349 // result: (FlagGT_UGT) 2350 for { 2351 y := v.AuxInt 2352 v_0 := v.Args[0] 2353 if v_0.Op != OpAMD64MOVLconst { 2354 break 2355 } 2356 x := v_0.AuxInt 2357 if !(int16(x) > int16(y) && uint16(x) > uint16(y)) { 2358 break 2359 } 2360 v.reset(OpAMD64FlagGT_UGT) 2361 return true 2362 } 2363 // match: (CMPWconst (ANDLconst _ [m]) [n]) 2364 // cond: 0 <= int16(m) && int16(m) < int16(n) 2365 // result: (FlagLT_ULT) 2366 for { 2367 n := v.AuxInt 2368 v_0 := v.Args[0] 2369 if v_0.Op != OpAMD64ANDLconst { 2370 break 2371 } 2372 m := v_0.AuxInt 2373 if !(0 <= int16(m) && int16(m) < int16(n)) { 2374 break 2375 } 2376 v.reset(OpAMD64FlagLT_ULT) 2377 return true 2378 } 2379 // match: (CMPWconst (ANDL x y) [0]) 2380 // cond: 2381 // result: (TESTW x y) 2382 for { 2383 if v.AuxInt != 0 { 2384 break 2385 } 2386 v_0 := v.Args[0] 2387 if v_0.Op != OpAMD64ANDL { 2388 break 2389 } 2390 x := v_0.Args[0] 2391 y := v_0.Args[1] 2392 v.reset(OpAMD64TESTW) 2393 v.AddArg(x) 2394 v.AddArg(y) 2395 return true 2396 } 2397 // match: (CMPWconst (ANDLconst [c] x) [0]) 2398 // cond: 2399 // result: (TESTWconst [int64(int16(c))] x) 2400 for { 2401 if v.AuxInt != 0 { 2402 break 2403 } 2404 v_0 := v.Args[0] 2405 if v_0.Op != OpAMD64ANDLconst { 2406 break 2407 } 2408 c := v_0.AuxInt 2409 x := v_0.Args[0] 2410 v.reset(OpAMD64TESTWconst) 2411 v.AuxInt = int64(int16(c)) 2412 v.AddArg(x) 2413 return true 2414 } 2415 // match: (CMPWconst x [0]) 2416 // cond: 2417 // result: (TESTW x x) 2418 for { 2419 if v.AuxInt != 0 { 2420 break 2421 } 2422 x := v.Args[0] 2423 v.reset(OpAMD64TESTW) 2424 v.AddArg(x) 2425 v.AddArg(x) 2426 return true 2427 } 2428 return false 2429 } 2430 func rewriteValueAMD64_OpAMD64CMPXCHGLlock(v *Value, config *Config) bool { 2431 b := v.Block 2432 _ = b 2433 // match: (CMPXCHGLlock [off1] {sym} (ADDQconst [off2] ptr) old new_ mem) 2434 // cond: is32Bit(off1+off2) 2435 // result: (CMPXCHGLlock [off1+off2] {sym} ptr old new_ mem) 2436 for { 2437 off1 := v.AuxInt 2438 sym := v.Aux 2439 v_0 := v.Args[0] 2440 if v_0.Op != OpAMD64ADDQconst { 2441 break 2442 } 2443 off2 := v_0.AuxInt 2444 ptr := v_0.Args[0] 2445 old := v.Args[1] 2446 new_ := v.Args[2] 2447 mem := v.Args[3] 2448 if !(is32Bit(off1 + off2)) { 2449 break 2450 } 2451 v.reset(OpAMD64CMPXCHGLlock) 2452 v.AuxInt = off1 + off2 2453 v.Aux = sym 2454 v.AddArg(ptr) 2455 v.AddArg(old) 2456 v.AddArg(new_) 2457 v.AddArg(mem) 2458 return true 2459 } 2460 return false 2461 } 2462 func rewriteValueAMD64_OpAMD64CMPXCHGQlock(v *Value, config *Config) bool { 2463 b := v.Block 2464 _ = b 2465 // match: (CMPXCHGQlock [off1] {sym} (ADDQconst [off2] ptr) old new_ mem) 2466 // cond: is32Bit(off1+off2) 2467 // result: (CMPXCHGQlock [off1+off2] {sym} ptr old new_ mem) 2468 for { 2469 off1 := v.AuxInt 2470 sym := v.Aux 2471 v_0 := v.Args[0] 2472 if v_0.Op != OpAMD64ADDQconst { 2473 break 2474 } 2475 off2 := v_0.AuxInt 2476 ptr := v_0.Args[0] 2477 old := v.Args[1] 2478 new_ := v.Args[2] 2479 mem := v.Args[3] 2480 if !(is32Bit(off1 + off2)) { 2481 break 2482 } 2483 v.reset(OpAMD64CMPXCHGQlock) 2484 v.AuxInt = off1 + off2 2485 v.Aux = sym 2486 v.AddArg(ptr) 2487 v.AddArg(old) 2488 v.AddArg(new_) 2489 v.AddArg(mem) 2490 return true 2491 } 2492 return false 2493 } 2494 func rewriteValueAMD64_OpAMD64LEAL(v *Value, config *Config) bool { 2495 b := v.Block 2496 _ = b 2497 // match: (LEAL [c] {s} (ADDLconst [d] x)) 2498 // cond: is32Bit(c+d) 2499 // result: (LEAL [c+d] {s} x) 2500 for { 2501 c := v.AuxInt 2502 s := v.Aux 2503 v_0 := v.Args[0] 2504 if v_0.Op != OpAMD64ADDLconst { 2505 break 2506 } 2507 d := v_0.AuxInt 2508 x := v_0.Args[0] 2509 if !(is32Bit(c + d)) { 2510 break 2511 } 2512 v.reset(OpAMD64LEAL) 2513 v.AuxInt = c + d 2514 v.Aux = s 2515 v.AddArg(x) 2516 return true 2517 } 2518 return false 2519 } 2520 func rewriteValueAMD64_OpAMD64LEAQ(v *Value, config *Config) bool { 2521 b := v.Block 2522 _ = b 2523 // match: (LEAQ [c] {s} (ADDQconst [d] x)) 2524 // cond: is32Bit(c+d) 2525 // result: (LEAQ [c+d] {s} x) 2526 for { 2527 c := v.AuxInt 2528 s := v.Aux 2529 v_0 := v.Args[0] 2530 if v_0.Op != OpAMD64ADDQconst { 2531 break 2532 } 2533 d := v_0.AuxInt 2534 x := v_0.Args[0] 2535 if !(is32Bit(c + d)) { 2536 break 2537 } 2538 v.reset(OpAMD64LEAQ) 2539 v.AuxInt = c + d 2540 v.Aux = s 2541 v.AddArg(x) 2542 return true 2543 } 2544 // match: (LEAQ [c] {s} (ADDQ x y)) 2545 // cond: x.Op != OpSB && y.Op != OpSB 2546 // result: (LEAQ1 [c] {s} x y) 2547 for { 2548 c := v.AuxInt 2549 s := v.Aux 2550 v_0 := v.Args[0] 2551 if v_0.Op != OpAMD64ADDQ { 2552 break 2553 } 2554 x := v_0.Args[0] 2555 y := v_0.Args[1] 2556 if !(x.Op != OpSB && y.Op != OpSB) { 2557 break 2558 } 2559 v.reset(OpAMD64LEAQ1) 2560 v.AuxInt = c 2561 v.Aux = s 2562 v.AddArg(x) 2563 v.AddArg(y) 2564 return true 2565 } 2566 // match: (LEAQ [off1] {sym1} (LEAQ [off2] {sym2} x)) 2567 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 2568 // result: (LEAQ [off1+off2] {mergeSym(sym1,sym2)} x) 2569 for { 2570 off1 := v.AuxInt 2571 sym1 := v.Aux 2572 v_0 := v.Args[0] 2573 if v_0.Op != OpAMD64LEAQ { 2574 break 2575 } 2576 off2 := v_0.AuxInt 2577 sym2 := v_0.Aux 2578 x := v_0.Args[0] 2579 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 2580 break 2581 } 2582 v.reset(OpAMD64LEAQ) 2583 v.AuxInt = off1 + off2 2584 v.Aux = mergeSym(sym1, sym2) 2585 v.AddArg(x) 2586 return true 2587 } 2588 // match: (LEAQ [off1] {sym1} (LEAQ1 [off2] {sym2} x y)) 2589 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 2590 // result: (LEAQ1 [off1+off2] {mergeSym(sym1,sym2)} x y) 2591 for { 2592 off1 := v.AuxInt 2593 sym1 := v.Aux 2594 v_0 := v.Args[0] 2595 if v_0.Op != OpAMD64LEAQ1 { 2596 break 2597 } 2598 off2 := v_0.AuxInt 2599 sym2 := v_0.Aux 2600 x := v_0.Args[0] 2601 y := v_0.Args[1] 2602 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 2603 break 2604 } 2605 v.reset(OpAMD64LEAQ1) 2606 v.AuxInt = off1 + off2 2607 v.Aux = mergeSym(sym1, sym2) 2608 v.AddArg(x) 2609 v.AddArg(y) 2610 return true 2611 } 2612 // match: (LEAQ [off1] {sym1} (LEAQ2 [off2] {sym2} x y)) 2613 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 2614 // result: (LEAQ2 [off1+off2] {mergeSym(sym1,sym2)} x y) 2615 for { 2616 off1 := v.AuxInt 2617 sym1 := v.Aux 2618 v_0 := v.Args[0] 2619 if v_0.Op != OpAMD64LEAQ2 { 2620 break 2621 } 2622 off2 := v_0.AuxInt 2623 sym2 := v_0.Aux 2624 x := v_0.Args[0] 2625 y := v_0.Args[1] 2626 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 2627 break 2628 } 2629 v.reset(OpAMD64LEAQ2) 2630 v.AuxInt = off1 + off2 2631 v.Aux = mergeSym(sym1, sym2) 2632 v.AddArg(x) 2633 v.AddArg(y) 2634 return true 2635 } 2636 // match: (LEAQ [off1] {sym1} (LEAQ4 [off2] {sym2} x y)) 2637 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 2638 // result: (LEAQ4 [off1+off2] {mergeSym(sym1,sym2)} x y) 2639 for { 2640 off1 := v.AuxInt 2641 sym1 := v.Aux 2642 v_0 := v.Args[0] 2643 if v_0.Op != OpAMD64LEAQ4 { 2644 break 2645 } 2646 off2 := v_0.AuxInt 2647 sym2 := v_0.Aux 2648 x := v_0.Args[0] 2649 y := v_0.Args[1] 2650 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 2651 break 2652 } 2653 v.reset(OpAMD64LEAQ4) 2654 v.AuxInt = off1 + off2 2655 v.Aux = mergeSym(sym1, sym2) 2656 v.AddArg(x) 2657 v.AddArg(y) 2658 return true 2659 } 2660 // match: (LEAQ [off1] {sym1} (LEAQ8 [off2] {sym2} x y)) 2661 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 2662 // result: (LEAQ8 [off1+off2] {mergeSym(sym1,sym2)} x y) 2663 for { 2664 off1 := v.AuxInt 2665 sym1 := v.Aux 2666 v_0 := v.Args[0] 2667 if v_0.Op != OpAMD64LEAQ8 { 2668 break 2669 } 2670 off2 := v_0.AuxInt 2671 sym2 := v_0.Aux 2672 x := v_0.Args[0] 2673 y := v_0.Args[1] 2674 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 2675 break 2676 } 2677 v.reset(OpAMD64LEAQ8) 2678 v.AuxInt = off1 + off2 2679 v.Aux = mergeSym(sym1, sym2) 2680 v.AddArg(x) 2681 v.AddArg(y) 2682 return true 2683 } 2684 return false 2685 } 2686 func rewriteValueAMD64_OpAMD64LEAQ1(v *Value, config *Config) bool { 2687 b := v.Block 2688 _ = b 2689 // match: (LEAQ1 [c] {s} (ADDQconst [d] x) y) 2690 // cond: is32Bit(c+d) && x.Op != OpSB 2691 // result: (LEAQ1 [c+d] {s} x y) 2692 for { 2693 c := v.AuxInt 2694 s := v.Aux 2695 v_0 := v.Args[0] 2696 if v_0.Op != OpAMD64ADDQconst { 2697 break 2698 } 2699 d := v_0.AuxInt 2700 x := v_0.Args[0] 2701 y := v.Args[1] 2702 if !(is32Bit(c+d) && x.Op != OpSB) { 2703 break 2704 } 2705 v.reset(OpAMD64LEAQ1) 2706 v.AuxInt = c + d 2707 v.Aux = s 2708 v.AddArg(x) 2709 v.AddArg(y) 2710 return true 2711 } 2712 // match: (LEAQ1 [c] {s} x (ADDQconst [d] y)) 2713 // cond: is32Bit(c+d) && y.Op != OpSB 2714 // result: (LEAQ1 [c+d] {s} x y) 2715 for { 2716 c := v.AuxInt 2717 s := v.Aux 2718 x := v.Args[0] 2719 v_1 := v.Args[1] 2720 if v_1.Op != OpAMD64ADDQconst { 2721 break 2722 } 2723 d := v_1.AuxInt 2724 y := v_1.Args[0] 2725 if !(is32Bit(c+d) && y.Op != OpSB) { 2726 break 2727 } 2728 v.reset(OpAMD64LEAQ1) 2729 v.AuxInt = c + d 2730 v.Aux = s 2731 v.AddArg(x) 2732 v.AddArg(y) 2733 return true 2734 } 2735 // match: (LEAQ1 [c] {s} x (SHLQconst [1] y)) 2736 // cond: 2737 // result: (LEAQ2 [c] {s} x y) 2738 for { 2739 c := v.AuxInt 2740 s := v.Aux 2741 x := v.Args[0] 2742 v_1 := v.Args[1] 2743 if v_1.Op != OpAMD64SHLQconst { 2744 break 2745 } 2746 if v_1.AuxInt != 1 { 2747 break 2748 } 2749 y := v_1.Args[0] 2750 v.reset(OpAMD64LEAQ2) 2751 v.AuxInt = c 2752 v.Aux = s 2753 v.AddArg(x) 2754 v.AddArg(y) 2755 return true 2756 } 2757 // match: (LEAQ1 [c] {s} (SHLQconst [1] x) y) 2758 // cond: 2759 // result: (LEAQ2 [c] {s} y x) 2760 for { 2761 c := v.AuxInt 2762 s := v.Aux 2763 v_0 := v.Args[0] 2764 if v_0.Op != OpAMD64SHLQconst { 2765 break 2766 } 2767 if v_0.AuxInt != 1 { 2768 break 2769 } 2770 x := v_0.Args[0] 2771 y := v.Args[1] 2772 v.reset(OpAMD64LEAQ2) 2773 v.AuxInt = c 2774 v.Aux = s 2775 v.AddArg(y) 2776 v.AddArg(x) 2777 return true 2778 } 2779 // match: (LEAQ1 [c] {s} x (SHLQconst [2] y)) 2780 // cond: 2781 // result: (LEAQ4 [c] {s} x y) 2782 for { 2783 c := v.AuxInt 2784 s := v.Aux 2785 x := v.Args[0] 2786 v_1 := v.Args[1] 2787 if v_1.Op != OpAMD64SHLQconst { 2788 break 2789 } 2790 if v_1.AuxInt != 2 { 2791 break 2792 } 2793 y := v_1.Args[0] 2794 v.reset(OpAMD64LEAQ4) 2795 v.AuxInt = c 2796 v.Aux = s 2797 v.AddArg(x) 2798 v.AddArg(y) 2799 return true 2800 } 2801 // match: (LEAQ1 [c] {s} (SHLQconst [2] x) y) 2802 // cond: 2803 // result: (LEAQ4 [c] {s} y x) 2804 for { 2805 c := v.AuxInt 2806 s := v.Aux 2807 v_0 := v.Args[0] 2808 if v_0.Op != OpAMD64SHLQconst { 2809 break 2810 } 2811 if v_0.AuxInt != 2 { 2812 break 2813 } 2814 x := v_0.Args[0] 2815 y := v.Args[1] 2816 v.reset(OpAMD64LEAQ4) 2817 v.AuxInt = c 2818 v.Aux = s 2819 v.AddArg(y) 2820 v.AddArg(x) 2821 return true 2822 } 2823 // match: (LEAQ1 [c] {s} x (SHLQconst [3] y)) 2824 // cond: 2825 // result: (LEAQ8 [c] {s} x y) 2826 for { 2827 c := v.AuxInt 2828 s := v.Aux 2829 x := v.Args[0] 2830 v_1 := v.Args[1] 2831 if v_1.Op != OpAMD64SHLQconst { 2832 break 2833 } 2834 if v_1.AuxInt != 3 { 2835 break 2836 } 2837 y := v_1.Args[0] 2838 v.reset(OpAMD64LEAQ8) 2839 v.AuxInt = c 2840 v.Aux = s 2841 v.AddArg(x) 2842 v.AddArg(y) 2843 return true 2844 } 2845 // match: (LEAQ1 [c] {s} (SHLQconst [3] x) y) 2846 // cond: 2847 // result: (LEAQ8 [c] {s} y x) 2848 for { 2849 c := v.AuxInt 2850 s := v.Aux 2851 v_0 := v.Args[0] 2852 if v_0.Op != OpAMD64SHLQconst { 2853 break 2854 } 2855 if v_0.AuxInt != 3 { 2856 break 2857 } 2858 x := v_0.Args[0] 2859 y := v.Args[1] 2860 v.reset(OpAMD64LEAQ8) 2861 v.AuxInt = c 2862 v.Aux = s 2863 v.AddArg(y) 2864 v.AddArg(x) 2865 return true 2866 } 2867 // match: (LEAQ1 [off1] {sym1} (LEAQ [off2] {sym2} x) y) 2868 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) && x.Op != OpSB 2869 // result: (LEAQ1 [off1+off2] {mergeSym(sym1,sym2)} x y) 2870 for { 2871 off1 := v.AuxInt 2872 sym1 := v.Aux 2873 v_0 := v.Args[0] 2874 if v_0.Op != OpAMD64LEAQ { 2875 break 2876 } 2877 off2 := v_0.AuxInt 2878 sym2 := v_0.Aux 2879 x := v_0.Args[0] 2880 y := v.Args[1] 2881 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2) && x.Op != OpSB) { 2882 break 2883 } 2884 v.reset(OpAMD64LEAQ1) 2885 v.AuxInt = off1 + off2 2886 v.Aux = mergeSym(sym1, sym2) 2887 v.AddArg(x) 2888 v.AddArg(y) 2889 return true 2890 } 2891 // match: (LEAQ1 [off1] {sym1} x (LEAQ [off2] {sym2} y)) 2892 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) && y.Op != OpSB 2893 // result: (LEAQ1 [off1+off2] {mergeSym(sym1,sym2)} x y) 2894 for { 2895 off1 := v.AuxInt 2896 sym1 := v.Aux 2897 x := v.Args[0] 2898 v_1 := v.Args[1] 2899 if v_1.Op != OpAMD64LEAQ { 2900 break 2901 } 2902 off2 := v_1.AuxInt 2903 sym2 := v_1.Aux 2904 y := v_1.Args[0] 2905 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2) && y.Op != OpSB) { 2906 break 2907 } 2908 v.reset(OpAMD64LEAQ1) 2909 v.AuxInt = off1 + off2 2910 v.Aux = mergeSym(sym1, sym2) 2911 v.AddArg(x) 2912 v.AddArg(y) 2913 return true 2914 } 2915 return false 2916 } 2917 func rewriteValueAMD64_OpAMD64LEAQ2(v *Value, config *Config) bool { 2918 b := v.Block 2919 _ = b 2920 // match: (LEAQ2 [c] {s} (ADDQconst [d] x) y) 2921 // cond: is32Bit(c+d) && x.Op != OpSB 2922 // result: (LEAQ2 [c+d] {s} x y) 2923 for { 2924 c := v.AuxInt 2925 s := v.Aux 2926 v_0 := v.Args[0] 2927 if v_0.Op != OpAMD64ADDQconst { 2928 break 2929 } 2930 d := v_0.AuxInt 2931 x := v_0.Args[0] 2932 y := v.Args[1] 2933 if !(is32Bit(c+d) && x.Op != OpSB) { 2934 break 2935 } 2936 v.reset(OpAMD64LEAQ2) 2937 v.AuxInt = c + d 2938 v.Aux = s 2939 v.AddArg(x) 2940 v.AddArg(y) 2941 return true 2942 } 2943 // match: (LEAQ2 [c] {s} x (ADDQconst [d] y)) 2944 // cond: is32Bit(c+2*d) && y.Op != OpSB 2945 // result: (LEAQ2 [c+2*d] {s} x y) 2946 for { 2947 c := v.AuxInt 2948 s := v.Aux 2949 x := v.Args[0] 2950 v_1 := v.Args[1] 2951 if v_1.Op != OpAMD64ADDQconst { 2952 break 2953 } 2954 d := v_1.AuxInt 2955 y := v_1.Args[0] 2956 if !(is32Bit(c+2*d) && y.Op != OpSB) { 2957 break 2958 } 2959 v.reset(OpAMD64LEAQ2) 2960 v.AuxInt = c + 2*d 2961 v.Aux = s 2962 v.AddArg(x) 2963 v.AddArg(y) 2964 return true 2965 } 2966 // match: (LEAQ2 [c] {s} x (SHLQconst [1] y)) 2967 // cond: 2968 // result: (LEAQ4 [c] {s} x y) 2969 for { 2970 c := v.AuxInt 2971 s := v.Aux 2972 x := v.Args[0] 2973 v_1 := v.Args[1] 2974 if v_1.Op != OpAMD64SHLQconst { 2975 break 2976 } 2977 if v_1.AuxInt != 1 { 2978 break 2979 } 2980 y := v_1.Args[0] 2981 v.reset(OpAMD64LEAQ4) 2982 v.AuxInt = c 2983 v.Aux = s 2984 v.AddArg(x) 2985 v.AddArg(y) 2986 return true 2987 } 2988 // match: (LEAQ2 [c] {s} x (SHLQconst [2] y)) 2989 // cond: 2990 // result: (LEAQ8 [c] {s} x y) 2991 for { 2992 c := v.AuxInt 2993 s := v.Aux 2994 x := v.Args[0] 2995 v_1 := v.Args[1] 2996 if v_1.Op != OpAMD64SHLQconst { 2997 break 2998 } 2999 if v_1.AuxInt != 2 { 3000 break 3001 } 3002 y := v_1.Args[0] 3003 v.reset(OpAMD64LEAQ8) 3004 v.AuxInt = c 3005 v.Aux = s 3006 v.AddArg(x) 3007 v.AddArg(y) 3008 return true 3009 } 3010 // match: (LEAQ2 [off1] {sym1} (LEAQ [off2] {sym2} x) y) 3011 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) && x.Op != OpSB 3012 // result: (LEAQ2 [off1+off2] {mergeSym(sym1,sym2)} x y) 3013 for { 3014 off1 := v.AuxInt 3015 sym1 := v.Aux 3016 v_0 := v.Args[0] 3017 if v_0.Op != OpAMD64LEAQ { 3018 break 3019 } 3020 off2 := v_0.AuxInt 3021 sym2 := v_0.Aux 3022 x := v_0.Args[0] 3023 y := v.Args[1] 3024 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2) && x.Op != OpSB) { 3025 break 3026 } 3027 v.reset(OpAMD64LEAQ2) 3028 v.AuxInt = off1 + off2 3029 v.Aux = mergeSym(sym1, sym2) 3030 v.AddArg(x) 3031 v.AddArg(y) 3032 return true 3033 } 3034 return false 3035 } 3036 func rewriteValueAMD64_OpAMD64LEAQ4(v *Value, config *Config) bool { 3037 b := v.Block 3038 _ = b 3039 // match: (LEAQ4 [c] {s} (ADDQconst [d] x) y) 3040 // cond: is32Bit(c+d) && x.Op != OpSB 3041 // result: (LEAQ4 [c+d] {s} x y) 3042 for { 3043 c := v.AuxInt 3044 s := v.Aux 3045 v_0 := v.Args[0] 3046 if v_0.Op != OpAMD64ADDQconst { 3047 break 3048 } 3049 d := v_0.AuxInt 3050 x := v_0.Args[0] 3051 y := v.Args[1] 3052 if !(is32Bit(c+d) && x.Op != OpSB) { 3053 break 3054 } 3055 v.reset(OpAMD64LEAQ4) 3056 v.AuxInt = c + d 3057 v.Aux = s 3058 v.AddArg(x) 3059 v.AddArg(y) 3060 return true 3061 } 3062 // match: (LEAQ4 [c] {s} x (ADDQconst [d] y)) 3063 // cond: is32Bit(c+4*d) && y.Op != OpSB 3064 // result: (LEAQ4 [c+4*d] {s} x y) 3065 for { 3066 c := v.AuxInt 3067 s := v.Aux 3068 x := v.Args[0] 3069 v_1 := v.Args[1] 3070 if v_1.Op != OpAMD64ADDQconst { 3071 break 3072 } 3073 d := v_1.AuxInt 3074 y := v_1.Args[0] 3075 if !(is32Bit(c+4*d) && y.Op != OpSB) { 3076 break 3077 } 3078 v.reset(OpAMD64LEAQ4) 3079 v.AuxInt = c + 4*d 3080 v.Aux = s 3081 v.AddArg(x) 3082 v.AddArg(y) 3083 return true 3084 } 3085 // match: (LEAQ4 [c] {s} x (SHLQconst [1] y)) 3086 // cond: 3087 // result: (LEAQ8 [c] {s} x y) 3088 for { 3089 c := v.AuxInt 3090 s := v.Aux 3091 x := v.Args[0] 3092 v_1 := v.Args[1] 3093 if v_1.Op != OpAMD64SHLQconst { 3094 break 3095 } 3096 if v_1.AuxInt != 1 { 3097 break 3098 } 3099 y := v_1.Args[0] 3100 v.reset(OpAMD64LEAQ8) 3101 v.AuxInt = c 3102 v.Aux = s 3103 v.AddArg(x) 3104 v.AddArg(y) 3105 return true 3106 } 3107 // match: (LEAQ4 [off1] {sym1} (LEAQ [off2] {sym2} x) y) 3108 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) && x.Op != OpSB 3109 // result: (LEAQ4 [off1+off2] {mergeSym(sym1,sym2)} x y) 3110 for { 3111 off1 := v.AuxInt 3112 sym1 := v.Aux 3113 v_0 := v.Args[0] 3114 if v_0.Op != OpAMD64LEAQ { 3115 break 3116 } 3117 off2 := v_0.AuxInt 3118 sym2 := v_0.Aux 3119 x := v_0.Args[0] 3120 y := v.Args[1] 3121 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2) && x.Op != OpSB) { 3122 break 3123 } 3124 v.reset(OpAMD64LEAQ4) 3125 v.AuxInt = off1 + off2 3126 v.Aux = mergeSym(sym1, sym2) 3127 v.AddArg(x) 3128 v.AddArg(y) 3129 return true 3130 } 3131 return false 3132 } 3133 func rewriteValueAMD64_OpAMD64LEAQ8(v *Value, config *Config) bool { 3134 b := v.Block 3135 _ = b 3136 // match: (LEAQ8 [c] {s} (ADDQconst [d] x) y) 3137 // cond: is32Bit(c+d) && x.Op != OpSB 3138 // result: (LEAQ8 [c+d] {s} x y) 3139 for { 3140 c := v.AuxInt 3141 s := v.Aux 3142 v_0 := v.Args[0] 3143 if v_0.Op != OpAMD64ADDQconst { 3144 break 3145 } 3146 d := v_0.AuxInt 3147 x := v_0.Args[0] 3148 y := v.Args[1] 3149 if !(is32Bit(c+d) && x.Op != OpSB) { 3150 break 3151 } 3152 v.reset(OpAMD64LEAQ8) 3153 v.AuxInt = c + d 3154 v.Aux = s 3155 v.AddArg(x) 3156 v.AddArg(y) 3157 return true 3158 } 3159 // match: (LEAQ8 [c] {s} x (ADDQconst [d] y)) 3160 // cond: is32Bit(c+8*d) && y.Op != OpSB 3161 // result: (LEAQ8 [c+8*d] {s} x y) 3162 for { 3163 c := v.AuxInt 3164 s := v.Aux 3165 x := v.Args[0] 3166 v_1 := v.Args[1] 3167 if v_1.Op != OpAMD64ADDQconst { 3168 break 3169 } 3170 d := v_1.AuxInt 3171 y := v_1.Args[0] 3172 if !(is32Bit(c+8*d) && y.Op != OpSB) { 3173 break 3174 } 3175 v.reset(OpAMD64LEAQ8) 3176 v.AuxInt = c + 8*d 3177 v.Aux = s 3178 v.AddArg(x) 3179 v.AddArg(y) 3180 return true 3181 } 3182 // match: (LEAQ8 [off1] {sym1} (LEAQ [off2] {sym2} x) y) 3183 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) && x.Op != OpSB 3184 // result: (LEAQ8 [off1+off2] {mergeSym(sym1,sym2)} x y) 3185 for { 3186 off1 := v.AuxInt 3187 sym1 := v.Aux 3188 v_0 := v.Args[0] 3189 if v_0.Op != OpAMD64LEAQ { 3190 break 3191 } 3192 off2 := v_0.AuxInt 3193 sym2 := v_0.Aux 3194 x := v_0.Args[0] 3195 y := v.Args[1] 3196 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2) && x.Op != OpSB) { 3197 break 3198 } 3199 v.reset(OpAMD64LEAQ8) 3200 v.AuxInt = off1 + off2 3201 v.Aux = mergeSym(sym1, sym2) 3202 v.AddArg(x) 3203 v.AddArg(y) 3204 return true 3205 } 3206 return false 3207 } 3208 func rewriteValueAMD64_OpAMD64MOVBQSX(v *Value, config *Config) bool { 3209 b := v.Block 3210 _ = b 3211 // match: (MOVBQSX x:(MOVBload [off] {sym} ptr mem)) 3212 // cond: x.Uses == 1 && clobber(x) 3213 // result: @x.Block (MOVBQSXload <v.Type> [off] {sym} ptr mem) 3214 for { 3215 x := v.Args[0] 3216 if x.Op != OpAMD64MOVBload { 3217 break 3218 } 3219 off := x.AuxInt 3220 sym := x.Aux 3221 ptr := x.Args[0] 3222 mem := x.Args[1] 3223 if !(x.Uses == 1 && clobber(x)) { 3224 break 3225 } 3226 b = x.Block 3227 v0 := b.NewValue0(v.Line, OpAMD64MOVBQSXload, v.Type) 3228 v.reset(OpCopy) 3229 v.AddArg(v0) 3230 v0.AuxInt = off 3231 v0.Aux = sym 3232 v0.AddArg(ptr) 3233 v0.AddArg(mem) 3234 return true 3235 } 3236 // match: (MOVBQSX (ANDLconst [c] x)) 3237 // cond: c & 0x80 == 0 3238 // result: (ANDLconst [c & 0x7f] x) 3239 for { 3240 v_0 := v.Args[0] 3241 if v_0.Op != OpAMD64ANDLconst { 3242 break 3243 } 3244 c := v_0.AuxInt 3245 x := v_0.Args[0] 3246 if !(c&0x80 == 0) { 3247 break 3248 } 3249 v.reset(OpAMD64ANDLconst) 3250 v.AuxInt = c & 0x7f 3251 v.AddArg(x) 3252 return true 3253 } 3254 return false 3255 } 3256 func rewriteValueAMD64_OpAMD64MOVBQSXload(v *Value, config *Config) bool { 3257 b := v.Block 3258 _ = b 3259 // match: (MOVBQSXload [off1] {sym1} (LEAQ [off2] {sym2} base) mem) 3260 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 3261 // result: (MOVBQSXload [off1+off2] {mergeSym(sym1,sym2)} base mem) 3262 for { 3263 off1 := v.AuxInt 3264 sym1 := v.Aux 3265 v_0 := v.Args[0] 3266 if v_0.Op != OpAMD64LEAQ { 3267 break 3268 } 3269 off2 := v_0.AuxInt 3270 sym2 := v_0.Aux 3271 base := v_0.Args[0] 3272 mem := v.Args[1] 3273 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 3274 break 3275 } 3276 v.reset(OpAMD64MOVBQSXload) 3277 v.AuxInt = off1 + off2 3278 v.Aux = mergeSym(sym1, sym2) 3279 v.AddArg(base) 3280 v.AddArg(mem) 3281 return true 3282 } 3283 return false 3284 } 3285 func rewriteValueAMD64_OpAMD64MOVBQZX(v *Value, config *Config) bool { 3286 b := v.Block 3287 _ = b 3288 // match: (MOVBQZX x:(MOVBload [off] {sym} ptr mem)) 3289 // cond: x.Uses == 1 && clobber(x) 3290 // result: @x.Block (MOVBload <v.Type> [off] {sym} ptr mem) 3291 for { 3292 x := v.Args[0] 3293 if x.Op != OpAMD64MOVBload { 3294 break 3295 } 3296 off := x.AuxInt 3297 sym := x.Aux 3298 ptr := x.Args[0] 3299 mem := x.Args[1] 3300 if !(x.Uses == 1 && clobber(x)) { 3301 break 3302 } 3303 b = x.Block 3304 v0 := b.NewValue0(v.Line, OpAMD64MOVBload, v.Type) 3305 v.reset(OpCopy) 3306 v.AddArg(v0) 3307 v0.AuxInt = off 3308 v0.Aux = sym 3309 v0.AddArg(ptr) 3310 v0.AddArg(mem) 3311 return true 3312 } 3313 // match: (MOVBQZX x:(MOVBloadidx1 [off] {sym} ptr idx mem)) 3314 // cond: x.Uses == 1 && clobber(x) 3315 // result: @x.Block (MOVBloadidx1 <v.Type> [off] {sym} ptr idx mem) 3316 for { 3317 x := v.Args[0] 3318 if x.Op != OpAMD64MOVBloadidx1 { 3319 break 3320 } 3321 off := x.AuxInt 3322 sym := x.Aux 3323 ptr := x.Args[0] 3324 idx := x.Args[1] 3325 mem := x.Args[2] 3326 if !(x.Uses == 1 && clobber(x)) { 3327 break 3328 } 3329 b = x.Block 3330 v0 := b.NewValue0(v.Line, OpAMD64MOVBloadidx1, v.Type) 3331 v.reset(OpCopy) 3332 v.AddArg(v0) 3333 v0.AuxInt = off 3334 v0.Aux = sym 3335 v0.AddArg(ptr) 3336 v0.AddArg(idx) 3337 v0.AddArg(mem) 3338 return true 3339 } 3340 // match: (MOVBQZX (ANDLconst [c] x)) 3341 // cond: 3342 // result: (ANDLconst [c & 0xff] x) 3343 for { 3344 v_0 := v.Args[0] 3345 if v_0.Op != OpAMD64ANDLconst { 3346 break 3347 } 3348 c := v_0.AuxInt 3349 x := v_0.Args[0] 3350 v.reset(OpAMD64ANDLconst) 3351 v.AuxInt = c & 0xff 3352 v.AddArg(x) 3353 return true 3354 } 3355 return false 3356 } 3357 func rewriteValueAMD64_OpAMD64MOVBload(v *Value, config *Config) bool { 3358 b := v.Block 3359 _ = b 3360 // match: (MOVBload [off] {sym} ptr (MOVBstore [off2] {sym2} ptr2 x _)) 3361 // cond: sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) 3362 // result: x 3363 for { 3364 off := v.AuxInt 3365 sym := v.Aux 3366 ptr := v.Args[0] 3367 v_1 := v.Args[1] 3368 if v_1.Op != OpAMD64MOVBstore { 3369 break 3370 } 3371 off2 := v_1.AuxInt 3372 sym2 := v_1.Aux 3373 ptr2 := v_1.Args[0] 3374 x := v_1.Args[1] 3375 if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) { 3376 break 3377 } 3378 v.reset(OpCopy) 3379 v.Type = x.Type 3380 v.AddArg(x) 3381 return true 3382 } 3383 // match: (MOVBload [off1] {sym} (ADDQconst [off2] ptr) mem) 3384 // cond: is32Bit(off1+off2) 3385 // result: (MOVBload [off1+off2] {sym} ptr mem) 3386 for { 3387 off1 := v.AuxInt 3388 sym := v.Aux 3389 v_0 := v.Args[0] 3390 if v_0.Op != OpAMD64ADDQconst { 3391 break 3392 } 3393 off2 := v_0.AuxInt 3394 ptr := v_0.Args[0] 3395 mem := v.Args[1] 3396 if !(is32Bit(off1 + off2)) { 3397 break 3398 } 3399 v.reset(OpAMD64MOVBload) 3400 v.AuxInt = off1 + off2 3401 v.Aux = sym 3402 v.AddArg(ptr) 3403 v.AddArg(mem) 3404 return true 3405 } 3406 // match: (MOVBload [off1] {sym1} (LEAQ [off2] {sym2} base) mem) 3407 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 3408 // result: (MOVBload [off1+off2] {mergeSym(sym1,sym2)} base mem) 3409 for { 3410 off1 := v.AuxInt 3411 sym1 := v.Aux 3412 v_0 := v.Args[0] 3413 if v_0.Op != OpAMD64LEAQ { 3414 break 3415 } 3416 off2 := v_0.AuxInt 3417 sym2 := v_0.Aux 3418 base := v_0.Args[0] 3419 mem := v.Args[1] 3420 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 3421 break 3422 } 3423 v.reset(OpAMD64MOVBload) 3424 v.AuxInt = off1 + off2 3425 v.Aux = mergeSym(sym1, sym2) 3426 v.AddArg(base) 3427 v.AddArg(mem) 3428 return true 3429 } 3430 // match: (MOVBload [off1] {sym1} (LEAQ1 [off2] {sym2} ptr idx) mem) 3431 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 3432 // result: (MOVBloadidx1 [off1+off2] {mergeSym(sym1,sym2)} ptr idx mem) 3433 for { 3434 off1 := v.AuxInt 3435 sym1 := v.Aux 3436 v_0 := v.Args[0] 3437 if v_0.Op != OpAMD64LEAQ1 { 3438 break 3439 } 3440 off2 := v_0.AuxInt 3441 sym2 := v_0.Aux 3442 ptr := v_0.Args[0] 3443 idx := v_0.Args[1] 3444 mem := v.Args[1] 3445 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 3446 break 3447 } 3448 v.reset(OpAMD64MOVBloadidx1) 3449 v.AuxInt = off1 + off2 3450 v.Aux = mergeSym(sym1, sym2) 3451 v.AddArg(ptr) 3452 v.AddArg(idx) 3453 v.AddArg(mem) 3454 return true 3455 } 3456 // match: (MOVBload [off] {sym} (ADDQ ptr idx) mem) 3457 // cond: ptr.Op != OpSB 3458 // result: (MOVBloadidx1 [off] {sym} ptr idx mem) 3459 for { 3460 off := v.AuxInt 3461 sym := v.Aux 3462 v_0 := v.Args[0] 3463 if v_0.Op != OpAMD64ADDQ { 3464 break 3465 } 3466 ptr := v_0.Args[0] 3467 idx := v_0.Args[1] 3468 mem := v.Args[1] 3469 if !(ptr.Op != OpSB) { 3470 break 3471 } 3472 v.reset(OpAMD64MOVBloadidx1) 3473 v.AuxInt = off 3474 v.Aux = sym 3475 v.AddArg(ptr) 3476 v.AddArg(idx) 3477 v.AddArg(mem) 3478 return true 3479 } 3480 // match: (MOVBload [off1] {sym1} (LEAL [off2] {sym2} base) mem) 3481 // cond: canMergeSym(sym1, sym2) 3482 // result: (MOVBload [off1+off2] {mergeSym(sym1,sym2)} base mem) 3483 for { 3484 off1 := v.AuxInt 3485 sym1 := v.Aux 3486 v_0 := v.Args[0] 3487 if v_0.Op != OpAMD64LEAL { 3488 break 3489 } 3490 off2 := v_0.AuxInt 3491 sym2 := v_0.Aux 3492 base := v_0.Args[0] 3493 mem := v.Args[1] 3494 if !(canMergeSym(sym1, sym2)) { 3495 break 3496 } 3497 v.reset(OpAMD64MOVBload) 3498 v.AuxInt = off1 + off2 3499 v.Aux = mergeSym(sym1, sym2) 3500 v.AddArg(base) 3501 v.AddArg(mem) 3502 return true 3503 } 3504 // match: (MOVBload [off1] {sym} (ADDLconst [off2] ptr) mem) 3505 // cond: is32Bit(off1+off2) 3506 // result: (MOVBload [off1+off2] {sym} ptr mem) 3507 for { 3508 off1 := v.AuxInt 3509 sym := v.Aux 3510 v_0 := v.Args[0] 3511 if v_0.Op != OpAMD64ADDLconst { 3512 break 3513 } 3514 off2 := v_0.AuxInt 3515 ptr := v_0.Args[0] 3516 mem := v.Args[1] 3517 if !(is32Bit(off1 + off2)) { 3518 break 3519 } 3520 v.reset(OpAMD64MOVBload) 3521 v.AuxInt = off1 + off2 3522 v.Aux = sym 3523 v.AddArg(ptr) 3524 v.AddArg(mem) 3525 return true 3526 } 3527 return false 3528 } 3529 func rewriteValueAMD64_OpAMD64MOVBloadidx1(v *Value, config *Config) bool { 3530 b := v.Block 3531 _ = b 3532 // match: (MOVBloadidx1 [c] {sym} (ADDQconst [d] ptr) idx mem) 3533 // cond: 3534 // result: (MOVBloadidx1 [c+d] {sym} ptr idx mem) 3535 for { 3536 c := v.AuxInt 3537 sym := v.Aux 3538 v_0 := v.Args[0] 3539 if v_0.Op != OpAMD64ADDQconst { 3540 break 3541 } 3542 d := v_0.AuxInt 3543 ptr := v_0.Args[0] 3544 idx := v.Args[1] 3545 mem := v.Args[2] 3546 v.reset(OpAMD64MOVBloadidx1) 3547 v.AuxInt = c + d 3548 v.Aux = sym 3549 v.AddArg(ptr) 3550 v.AddArg(idx) 3551 v.AddArg(mem) 3552 return true 3553 } 3554 // match: (MOVBloadidx1 [c] {sym} ptr (ADDQconst [d] idx) mem) 3555 // cond: 3556 // result: (MOVBloadidx1 [c+d] {sym} ptr idx mem) 3557 for { 3558 c := v.AuxInt 3559 sym := v.Aux 3560 ptr := v.Args[0] 3561 v_1 := v.Args[1] 3562 if v_1.Op != OpAMD64ADDQconst { 3563 break 3564 } 3565 d := v_1.AuxInt 3566 idx := v_1.Args[0] 3567 mem := v.Args[2] 3568 v.reset(OpAMD64MOVBloadidx1) 3569 v.AuxInt = c + d 3570 v.Aux = sym 3571 v.AddArg(ptr) 3572 v.AddArg(idx) 3573 v.AddArg(mem) 3574 return true 3575 } 3576 return false 3577 } 3578 func rewriteValueAMD64_OpAMD64MOVBstore(v *Value, config *Config) bool { 3579 b := v.Block 3580 _ = b 3581 // match: (MOVBstore [off] {sym} ptr (MOVBQSX x) mem) 3582 // cond: 3583 // result: (MOVBstore [off] {sym} ptr x mem) 3584 for { 3585 off := v.AuxInt 3586 sym := v.Aux 3587 ptr := v.Args[0] 3588 v_1 := v.Args[1] 3589 if v_1.Op != OpAMD64MOVBQSX { 3590 break 3591 } 3592 x := v_1.Args[0] 3593 mem := v.Args[2] 3594 v.reset(OpAMD64MOVBstore) 3595 v.AuxInt = off 3596 v.Aux = sym 3597 v.AddArg(ptr) 3598 v.AddArg(x) 3599 v.AddArg(mem) 3600 return true 3601 } 3602 // match: (MOVBstore [off] {sym} ptr (MOVBQZX x) mem) 3603 // cond: 3604 // result: (MOVBstore [off] {sym} ptr x mem) 3605 for { 3606 off := v.AuxInt 3607 sym := v.Aux 3608 ptr := v.Args[0] 3609 v_1 := v.Args[1] 3610 if v_1.Op != OpAMD64MOVBQZX { 3611 break 3612 } 3613 x := v_1.Args[0] 3614 mem := v.Args[2] 3615 v.reset(OpAMD64MOVBstore) 3616 v.AuxInt = off 3617 v.Aux = sym 3618 v.AddArg(ptr) 3619 v.AddArg(x) 3620 v.AddArg(mem) 3621 return true 3622 } 3623 // match: (MOVBstore [off1] {sym} (ADDQconst [off2] ptr) val mem) 3624 // cond: is32Bit(off1+off2) 3625 // result: (MOVBstore [off1+off2] {sym} ptr val mem) 3626 for { 3627 off1 := v.AuxInt 3628 sym := v.Aux 3629 v_0 := v.Args[0] 3630 if v_0.Op != OpAMD64ADDQconst { 3631 break 3632 } 3633 off2 := v_0.AuxInt 3634 ptr := v_0.Args[0] 3635 val := v.Args[1] 3636 mem := v.Args[2] 3637 if !(is32Bit(off1 + off2)) { 3638 break 3639 } 3640 v.reset(OpAMD64MOVBstore) 3641 v.AuxInt = off1 + off2 3642 v.Aux = sym 3643 v.AddArg(ptr) 3644 v.AddArg(val) 3645 v.AddArg(mem) 3646 return true 3647 } 3648 // match: (MOVBstore [off] {sym} ptr (MOVLconst [c]) mem) 3649 // cond: validOff(off) 3650 // result: (MOVBstoreconst [makeValAndOff(int64(int8(c)),off)] {sym} ptr mem) 3651 for { 3652 off := v.AuxInt 3653 sym := v.Aux 3654 ptr := v.Args[0] 3655 v_1 := v.Args[1] 3656 if v_1.Op != OpAMD64MOVLconst { 3657 break 3658 } 3659 c := v_1.AuxInt 3660 mem := v.Args[2] 3661 if !(validOff(off)) { 3662 break 3663 } 3664 v.reset(OpAMD64MOVBstoreconst) 3665 v.AuxInt = makeValAndOff(int64(int8(c)), off) 3666 v.Aux = sym 3667 v.AddArg(ptr) 3668 v.AddArg(mem) 3669 return true 3670 } 3671 // match: (MOVBstore [off1] {sym1} (LEAQ [off2] {sym2} base) val mem) 3672 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 3673 // result: (MOVBstore [off1+off2] {mergeSym(sym1,sym2)} base val mem) 3674 for { 3675 off1 := v.AuxInt 3676 sym1 := v.Aux 3677 v_0 := v.Args[0] 3678 if v_0.Op != OpAMD64LEAQ { 3679 break 3680 } 3681 off2 := v_0.AuxInt 3682 sym2 := v_0.Aux 3683 base := v_0.Args[0] 3684 val := v.Args[1] 3685 mem := v.Args[2] 3686 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 3687 break 3688 } 3689 v.reset(OpAMD64MOVBstore) 3690 v.AuxInt = off1 + off2 3691 v.Aux = mergeSym(sym1, sym2) 3692 v.AddArg(base) 3693 v.AddArg(val) 3694 v.AddArg(mem) 3695 return true 3696 } 3697 // match: (MOVBstore [off1] {sym1} (LEAQ1 [off2] {sym2} ptr idx) val mem) 3698 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 3699 // result: (MOVBstoreidx1 [off1+off2] {mergeSym(sym1,sym2)} ptr idx val mem) 3700 for { 3701 off1 := v.AuxInt 3702 sym1 := v.Aux 3703 v_0 := v.Args[0] 3704 if v_0.Op != OpAMD64LEAQ1 { 3705 break 3706 } 3707 off2 := v_0.AuxInt 3708 sym2 := v_0.Aux 3709 ptr := v_0.Args[0] 3710 idx := v_0.Args[1] 3711 val := v.Args[1] 3712 mem := v.Args[2] 3713 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 3714 break 3715 } 3716 v.reset(OpAMD64MOVBstoreidx1) 3717 v.AuxInt = off1 + off2 3718 v.Aux = mergeSym(sym1, sym2) 3719 v.AddArg(ptr) 3720 v.AddArg(idx) 3721 v.AddArg(val) 3722 v.AddArg(mem) 3723 return true 3724 } 3725 // match: (MOVBstore [off] {sym} (ADDQ ptr idx) val mem) 3726 // cond: ptr.Op != OpSB 3727 // result: (MOVBstoreidx1 [off] {sym} ptr idx val mem) 3728 for { 3729 off := v.AuxInt 3730 sym := v.Aux 3731 v_0 := v.Args[0] 3732 if v_0.Op != OpAMD64ADDQ { 3733 break 3734 } 3735 ptr := v_0.Args[0] 3736 idx := v_0.Args[1] 3737 val := v.Args[1] 3738 mem := v.Args[2] 3739 if !(ptr.Op != OpSB) { 3740 break 3741 } 3742 v.reset(OpAMD64MOVBstoreidx1) 3743 v.AuxInt = off 3744 v.Aux = sym 3745 v.AddArg(ptr) 3746 v.AddArg(idx) 3747 v.AddArg(val) 3748 v.AddArg(mem) 3749 return true 3750 } 3751 // match: (MOVBstore [i] {s} p (SHRQconst [8] w) x:(MOVBstore [i-1] {s} p w mem)) 3752 // cond: x.Uses == 1 && clobber(x) 3753 // result: (MOVWstore [i-1] {s} p w mem) 3754 for { 3755 i := v.AuxInt 3756 s := v.Aux 3757 p := v.Args[0] 3758 v_1 := v.Args[1] 3759 if v_1.Op != OpAMD64SHRQconst { 3760 break 3761 } 3762 if v_1.AuxInt != 8 { 3763 break 3764 } 3765 w := v_1.Args[0] 3766 x := v.Args[2] 3767 if x.Op != OpAMD64MOVBstore { 3768 break 3769 } 3770 if x.AuxInt != i-1 { 3771 break 3772 } 3773 if x.Aux != s { 3774 break 3775 } 3776 if p != x.Args[0] { 3777 break 3778 } 3779 if w != x.Args[1] { 3780 break 3781 } 3782 mem := x.Args[2] 3783 if !(x.Uses == 1 && clobber(x)) { 3784 break 3785 } 3786 v.reset(OpAMD64MOVWstore) 3787 v.AuxInt = i - 1 3788 v.Aux = s 3789 v.AddArg(p) 3790 v.AddArg(w) 3791 v.AddArg(mem) 3792 return true 3793 } 3794 // match: (MOVBstore [i] {s} p (SHRQconst [j] w) x:(MOVBstore [i-1] {s} p w0:(SHRQconst [j-8] w) mem)) 3795 // cond: x.Uses == 1 && clobber(x) 3796 // result: (MOVWstore [i-1] {s} p w0 mem) 3797 for { 3798 i := v.AuxInt 3799 s := v.Aux 3800 p := v.Args[0] 3801 v_1 := v.Args[1] 3802 if v_1.Op != OpAMD64SHRQconst { 3803 break 3804 } 3805 j := v_1.AuxInt 3806 w := v_1.Args[0] 3807 x := v.Args[2] 3808 if x.Op != OpAMD64MOVBstore { 3809 break 3810 } 3811 if x.AuxInt != i-1 { 3812 break 3813 } 3814 if x.Aux != s { 3815 break 3816 } 3817 if p != x.Args[0] { 3818 break 3819 } 3820 w0 := x.Args[1] 3821 if w0.Op != OpAMD64SHRQconst { 3822 break 3823 } 3824 if w0.AuxInt != j-8 { 3825 break 3826 } 3827 if w != w0.Args[0] { 3828 break 3829 } 3830 mem := x.Args[2] 3831 if !(x.Uses == 1 && clobber(x)) { 3832 break 3833 } 3834 v.reset(OpAMD64MOVWstore) 3835 v.AuxInt = i - 1 3836 v.Aux = s 3837 v.AddArg(p) 3838 v.AddArg(w0) 3839 v.AddArg(mem) 3840 return true 3841 } 3842 // match: (MOVBstore [off1] {sym1} (LEAL [off2] {sym2} base) val mem) 3843 // cond: canMergeSym(sym1, sym2) 3844 // result: (MOVBstore [off1+off2] {mergeSym(sym1,sym2)} base val mem) 3845 for { 3846 off1 := v.AuxInt 3847 sym1 := v.Aux 3848 v_0 := v.Args[0] 3849 if v_0.Op != OpAMD64LEAL { 3850 break 3851 } 3852 off2 := v_0.AuxInt 3853 sym2 := v_0.Aux 3854 base := v_0.Args[0] 3855 val := v.Args[1] 3856 mem := v.Args[2] 3857 if !(canMergeSym(sym1, sym2)) { 3858 break 3859 } 3860 v.reset(OpAMD64MOVBstore) 3861 v.AuxInt = off1 + off2 3862 v.Aux = mergeSym(sym1, sym2) 3863 v.AddArg(base) 3864 v.AddArg(val) 3865 v.AddArg(mem) 3866 return true 3867 } 3868 // match: (MOVBstore [off1] {sym} (ADDLconst [off2] ptr) val mem) 3869 // cond: is32Bit(off1+off2) 3870 // result: (MOVBstore [off1+off2] {sym} ptr val mem) 3871 for { 3872 off1 := v.AuxInt 3873 sym := v.Aux 3874 v_0 := v.Args[0] 3875 if v_0.Op != OpAMD64ADDLconst { 3876 break 3877 } 3878 off2 := v_0.AuxInt 3879 ptr := v_0.Args[0] 3880 val := v.Args[1] 3881 mem := v.Args[2] 3882 if !(is32Bit(off1 + off2)) { 3883 break 3884 } 3885 v.reset(OpAMD64MOVBstore) 3886 v.AuxInt = off1 + off2 3887 v.Aux = sym 3888 v.AddArg(ptr) 3889 v.AddArg(val) 3890 v.AddArg(mem) 3891 return true 3892 } 3893 return false 3894 } 3895 func rewriteValueAMD64_OpAMD64MOVBstoreconst(v *Value, config *Config) bool { 3896 b := v.Block 3897 _ = b 3898 // match: (MOVBstoreconst [sc] {s} (ADDQconst [off] ptr) mem) 3899 // cond: ValAndOff(sc).canAdd(off) 3900 // result: (MOVBstoreconst [ValAndOff(sc).add(off)] {s} ptr mem) 3901 for { 3902 sc := v.AuxInt 3903 s := v.Aux 3904 v_0 := v.Args[0] 3905 if v_0.Op != OpAMD64ADDQconst { 3906 break 3907 } 3908 off := v_0.AuxInt 3909 ptr := v_0.Args[0] 3910 mem := v.Args[1] 3911 if !(ValAndOff(sc).canAdd(off)) { 3912 break 3913 } 3914 v.reset(OpAMD64MOVBstoreconst) 3915 v.AuxInt = ValAndOff(sc).add(off) 3916 v.Aux = s 3917 v.AddArg(ptr) 3918 v.AddArg(mem) 3919 return true 3920 } 3921 // match: (MOVBstoreconst [sc] {sym1} (LEAQ [off] {sym2} ptr) mem) 3922 // cond: canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off) 3923 // result: (MOVBstoreconst [ValAndOff(sc).add(off)] {mergeSym(sym1, sym2)} ptr mem) 3924 for { 3925 sc := v.AuxInt 3926 sym1 := v.Aux 3927 v_0 := v.Args[0] 3928 if v_0.Op != OpAMD64LEAQ { 3929 break 3930 } 3931 off := v_0.AuxInt 3932 sym2 := v_0.Aux 3933 ptr := v_0.Args[0] 3934 mem := v.Args[1] 3935 if !(canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off)) { 3936 break 3937 } 3938 v.reset(OpAMD64MOVBstoreconst) 3939 v.AuxInt = ValAndOff(sc).add(off) 3940 v.Aux = mergeSym(sym1, sym2) 3941 v.AddArg(ptr) 3942 v.AddArg(mem) 3943 return true 3944 } 3945 // match: (MOVBstoreconst [x] {sym1} (LEAQ1 [off] {sym2} ptr idx) mem) 3946 // cond: canMergeSym(sym1, sym2) 3947 // result: (MOVBstoreconstidx1 [ValAndOff(x).add(off)] {mergeSym(sym1,sym2)} ptr idx mem) 3948 for { 3949 x := v.AuxInt 3950 sym1 := v.Aux 3951 v_0 := v.Args[0] 3952 if v_0.Op != OpAMD64LEAQ1 { 3953 break 3954 } 3955 off := v_0.AuxInt 3956 sym2 := v_0.Aux 3957 ptr := v_0.Args[0] 3958 idx := v_0.Args[1] 3959 mem := v.Args[1] 3960 if !(canMergeSym(sym1, sym2)) { 3961 break 3962 } 3963 v.reset(OpAMD64MOVBstoreconstidx1) 3964 v.AuxInt = ValAndOff(x).add(off) 3965 v.Aux = mergeSym(sym1, sym2) 3966 v.AddArg(ptr) 3967 v.AddArg(idx) 3968 v.AddArg(mem) 3969 return true 3970 } 3971 // match: (MOVBstoreconst [x] {sym} (ADDQ ptr idx) mem) 3972 // cond: 3973 // result: (MOVBstoreconstidx1 [x] {sym} ptr idx mem) 3974 for { 3975 x := v.AuxInt 3976 sym := v.Aux 3977 v_0 := v.Args[0] 3978 if v_0.Op != OpAMD64ADDQ { 3979 break 3980 } 3981 ptr := v_0.Args[0] 3982 idx := v_0.Args[1] 3983 mem := v.Args[1] 3984 v.reset(OpAMD64MOVBstoreconstidx1) 3985 v.AuxInt = x 3986 v.Aux = sym 3987 v.AddArg(ptr) 3988 v.AddArg(idx) 3989 v.AddArg(mem) 3990 return true 3991 } 3992 // match: (MOVBstoreconst [c] {s} p x:(MOVBstoreconst [a] {s} p mem)) 3993 // cond: x.Uses == 1 && ValAndOff(a).Off() + 1 == ValAndOff(c).Off() && clobber(x) 3994 // result: (MOVWstoreconst [makeValAndOff(ValAndOff(a).Val()&0xff | ValAndOff(c).Val()<<8, ValAndOff(a).Off())] {s} p mem) 3995 for { 3996 c := v.AuxInt 3997 s := v.Aux 3998 p := v.Args[0] 3999 x := v.Args[1] 4000 if x.Op != OpAMD64MOVBstoreconst { 4001 break 4002 } 4003 a := x.AuxInt 4004 if x.Aux != s { 4005 break 4006 } 4007 if p != x.Args[0] { 4008 break 4009 } 4010 mem := x.Args[1] 4011 if !(x.Uses == 1 && ValAndOff(a).Off()+1 == ValAndOff(c).Off() && clobber(x)) { 4012 break 4013 } 4014 v.reset(OpAMD64MOVWstoreconst) 4015 v.AuxInt = makeValAndOff(ValAndOff(a).Val()&0xff|ValAndOff(c).Val()<<8, ValAndOff(a).Off()) 4016 v.Aux = s 4017 v.AddArg(p) 4018 v.AddArg(mem) 4019 return true 4020 } 4021 // match: (MOVBstoreconst [sc] {sym1} (LEAL [off] {sym2} ptr) mem) 4022 // cond: canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off) 4023 // result: (MOVBstoreconst [ValAndOff(sc).add(off)] {mergeSym(sym1, sym2)} ptr mem) 4024 for { 4025 sc := v.AuxInt 4026 sym1 := v.Aux 4027 v_0 := v.Args[0] 4028 if v_0.Op != OpAMD64LEAL { 4029 break 4030 } 4031 off := v_0.AuxInt 4032 sym2 := v_0.Aux 4033 ptr := v_0.Args[0] 4034 mem := v.Args[1] 4035 if !(canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off)) { 4036 break 4037 } 4038 v.reset(OpAMD64MOVBstoreconst) 4039 v.AuxInt = ValAndOff(sc).add(off) 4040 v.Aux = mergeSym(sym1, sym2) 4041 v.AddArg(ptr) 4042 v.AddArg(mem) 4043 return true 4044 } 4045 // match: (MOVBstoreconst [sc] {s} (ADDLconst [off] ptr) mem) 4046 // cond: ValAndOff(sc).canAdd(off) 4047 // result: (MOVBstoreconst [ValAndOff(sc).add(off)] {s} ptr mem) 4048 for { 4049 sc := v.AuxInt 4050 s := v.Aux 4051 v_0 := v.Args[0] 4052 if v_0.Op != OpAMD64ADDLconst { 4053 break 4054 } 4055 off := v_0.AuxInt 4056 ptr := v_0.Args[0] 4057 mem := v.Args[1] 4058 if !(ValAndOff(sc).canAdd(off)) { 4059 break 4060 } 4061 v.reset(OpAMD64MOVBstoreconst) 4062 v.AuxInt = ValAndOff(sc).add(off) 4063 v.Aux = s 4064 v.AddArg(ptr) 4065 v.AddArg(mem) 4066 return true 4067 } 4068 return false 4069 } 4070 func rewriteValueAMD64_OpAMD64MOVBstoreconstidx1(v *Value, config *Config) bool { 4071 b := v.Block 4072 _ = b 4073 // match: (MOVBstoreconstidx1 [x] {sym} (ADDQconst [c] ptr) idx mem) 4074 // cond: 4075 // result: (MOVBstoreconstidx1 [ValAndOff(x).add(c)] {sym} ptr idx mem) 4076 for { 4077 x := v.AuxInt 4078 sym := v.Aux 4079 v_0 := v.Args[0] 4080 if v_0.Op != OpAMD64ADDQconst { 4081 break 4082 } 4083 c := v_0.AuxInt 4084 ptr := v_0.Args[0] 4085 idx := v.Args[1] 4086 mem := v.Args[2] 4087 v.reset(OpAMD64MOVBstoreconstidx1) 4088 v.AuxInt = ValAndOff(x).add(c) 4089 v.Aux = sym 4090 v.AddArg(ptr) 4091 v.AddArg(idx) 4092 v.AddArg(mem) 4093 return true 4094 } 4095 // match: (MOVBstoreconstidx1 [x] {sym} ptr (ADDQconst [c] idx) mem) 4096 // cond: 4097 // result: (MOVBstoreconstidx1 [ValAndOff(x).add(c)] {sym} ptr idx mem) 4098 for { 4099 x := v.AuxInt 4100 sym := v.Aux 4101 ptr := v.Args[0] 4102 v_1 := v.Args[1] 4103 if v_1.Op != OpAMD64ADDQconst { 4104 break 4105 } 4106 c := v_1.AuxInt 4107 idx := v_1.Args[0] 4108 mem := v.Args[2] 4109 v.reset(OpAMD64MOVBstoreconstidx1) 4110 v.AuxInt = ValAndOff(x).add(c) 4111 v.Aux = sym 4112 v.AddArg(ptr) 4113 v.AddArg(idx) 4114 v.AddArg(mem) 4115 return true 4116 } 4117 // match: (MOVBstoreconstidx1 [c] {s} p i x:(MOVBstoreconstidx1 [a] {s} p i mem)) 4118 // cond: x.Uses == 1 && ValAndOff(a).Off() + 1 == ValAndOff(c).Off() && clobber(x) 4119 // result: (MOVWstoreconstidx1 [makeValAndOff(ValAndOff(a).Val()&0xff | ValAndOff(c).Val()<<8, ValAndOff(a).Off())] {s} p i mem) 4120 for { 4121 c := v.AuxInt 4122 s := v.Aux 4123 p := v.Args[0] 4124 i := v.Args[1] 4125 x := v.Args[2] 4126 if x.Op != OpAMD64MOVBstoreconstidx1 { 4127 break 4128 } 4129 a := x.AuxInt 4130 if x.Aux != s { 4131 break 4132 } 4133 if p != x.Args[0] { 4134 break 4135 } 4136 if i != x.Args[1] { 4137 break 4138 } 4139 mem := x.Args[2] 4140 if !(x.Uses == 1 && ValAndOff(a).Off()+1 == ValAndOff(c).Off() && clobber(x)) { 4141 break 4142 } 4143 v.reset(OpAMD64MOVWstoreconstidx1) 4144 v.AuxInt = makeValAndOff(ValAndOff(a).Val()&0xff|ValAndOff(c).Val()<<8, ValAndOff(a).Off()) 4145 v.Aux = s 4146 v.AddArg(p) 4147 v.AddArg(i) 4148 v.AddArg(mem) 4149 return true 4150 } 4151 return false 4152 } 4153 func rewriteValueAMD64_OpAMD64MOVBstoreidx1(v *Value, config *Config) bool { 4154 b := v.Block 4155 _ = b 4156 // match: (MOVBstoreidx1 [c] {sym} (ADDQconst [d] ptr) idx val mem) 4157 // cond: 4158 // result: (MOVBstoreidx1 [c+d] {sym} ptr idx val mem) 4159 for { 4160 c := v.AuxInt 4161 sym := v.Aux 4162 v_0 := v.Args[0] 4163 if v_0.Op != OpAMD64ADDQconst { 4164 break 4165 } 4166 d := v_0.AuxInt 4167 ptr := v_0.Args[0] 4168 idx := v.Args[1] 4169 val := v.Args[2] 4170 mem := v.Args[3] 4171 v.reset(OpAMD64MOVBstoreidx1) 4172 v.AuxInt = c + d 4173 v.Aux = sym 4174 v.AddArg(ptr) 4175 v.AddArg(idx) 4176 v.AddArg(val) 4177 v.AddArg(mem) 4178 return true 4179 } 4180 // match: (MOVBstoreidx1 [c] {sym} ptr (ADDQconst [d] idx) val mem) 4181 // cond: 4182 // result: (MOVBstoreidx1 [c+d] {sym} ptr idx val mem) 4183 for { 4184 c := v.AuxInt 4185 sym := v.Aux 4186 ptr := v.Args[0] 4187 v_1 := v.Args[1] 4188 if v_1.Op != OpAMD64ADDQconst { 4189 break 4190 } 4191 d := v_1.AuxInt 4192 idx := v_1.Args[0] 4193 val := v.Args[2] 4194 mem := v.Args[3] 4195 v.reset(OpAMD64MOVBstoreidx1) 4196 v.AuxInt = c + d 4197 v.Aux = sym 4198 v.AddArg(ptr) 4199 v.AddArg(idx) 4200 v.AddArg(val) 4201 v.AddArg(mem) 4202 return true 4203 } 4204 // match: (MOVBstoreidx1 [i] {s} p idx (SHRQconst [8] w) x:(MOVBstoreidx1 [i-1] {s} p idx w mem)) 4205 // cond: x.Uses == 1 && clobber(x) 4206 // result: (MOVWstoreidx1 [i-1] {s} p idx w mem) 4207 for { 4208 i := v.AuxInt 4209 s := v.Aux 4210 p := v.Args[0] 4211 idx := v.Args[1] 4212 v_2 := v.Args[2] 4213 if v_2.Op != OpAMD64SHRQconst { 4214 break 4215 } 4216 if v_2.AuxInt != 8 { 4217 break 4218 } 4219 w := v_2.Args[0] 4220 x := v.Args[3] 4221 if x.Op != OpAMD64MOVBstoreidx1 { 4222 break 4223 } 4224 if x.AuxInt != i-1 { 4225 break 4226 } 4227 if x.Aux != s { 4228 break 4229 } 4230 if p != x.Args[0] { 4231 break 4232 } 4233 if idx != x.Args[1] { 4234 break 4235 } 4236 if w != x.Args[2] { 4237 break 4238 } 4239 mem := x.Args[3] 4240 if !(x.Uses == 1 && clobber(x)) { 4241 break 4242 } 4243 v.reset(OpAMD64MOVWstoreidx1) 4244 v.AuxInt = i - 1 4245 v.Aux = s 4246 v.AddArg(p) 4247 v.AddArg(idx) 4248 v.AddArg(w) 4249 v.AddArg(mem) 4250 return true 4251 } 4252 // match: (MOVBstoreidx1 [i] {s} p idx (SHRQconst [j] w) x:(MOVBstoreidx1 [i-1] {s} p idx w0:(SHRQconst [j-8] w) mem)) 4253 // cond: x.Uses == 1 && clobber(x) 4254 // result: (MOVWstoreidx1 [i-1] {s} p idx w0 mem) 4255 for { 4256 i := v.AuxInt 4257 s := v.Aux 4258 p := v.Args[0] 4259 idx := v.Args[1] 4260 v_2 := v.Args[2] 4261 if v_2.Op != OpAMD64SHRQconst { 4262 break 4263 } 4264 j := v_2.AuxInt 4265 w := v_2.Args[0] 4266 x := v.Args[3] 4267 if x.Op != OpAMD64MOVBstoreidx1 { 4268 break 4269 } 4270 if x.AuxInt != i-1 { 4271 break 4272 } 4273 if x.Aux != s { 4274 break 4275 } 4276 if p != x.Args[0] { 4277 break 4278 } 4279 if idx != x.Args[1] { 4280 break 4281 } 4282 w0 := x.Args[2] 4283 if w0.Op != OpAMD64SHRQconst { 4284 break 4285 } 4286 if w0.AuxInt != j-8 { 4287 break 4288 } 4289 if w != w0.Args[0] { 4290 break 4291 } 4292 mem := x.Args[3] 4293 if !(x.Uses == 1 && clobber(x)) { 4294 break 4295 } 4296 v.reset(OpAMD64MOVWstoreidx1) 4297 v.AuxInt = i - 1 4298 v.Aux = s 4299 v.AddArg(p) 4300 v.AddArg(idx) 4301 v.AddArg(w0) 4302 v.AddArg(mem) 4303 return true 4304 } 4305 return false 4306 } 4307 func rewriteValueAMD64_OpAMD64MOVLQSX(v *Value, config *Config) bool { 4308 b := v.Block 4309 _ = b 4310 // match: (MOVLQSX x:(MOVLload [off] {sym} ptr mem)) 4311 // cond: x.Uses == 1 && clobber(x) 4312 // result: @x.Block (MOVLQSXload <v.Type> [off] {sym} ptr mem) 4313 for { 4314 x := v.Args[0] 4315 if x.Op != OpAMD64MOVLload { 4316 break 4317 } 4318 off := x.AuxInt 4319 sym := x.Aux 4320 ptr := x.Args[0] 4321 mem := x.Args[1] 4322 if !(x.Uses == 1 && clobber(x)) { 4323 break 4324 } 4325 b = x.Block 4326 v0 := b.NewValue0(v.Line, OpAMD64MOVLQSXload, v.Type) 4327 v.reset(OpCopy) 4328 v.AddArg(v0) 4329 v0.AuxInt = off 4330 v0.Aux = sym 4331 v0.AddArg(ptr) 4332 v0.AddArg(mem) 4333 return true 4334 } 4335 // match: (MOVLQSX (ANDLconst [c] x)) 4336 // cond: c & 0x80000000 == 0 4337 // result: (ANDLconst [c & 0x7fffffff] x) 4338 for { 4339 v_0 := v.Args[0] 4340 if v_0.Op != OpAMD64ANDLconst { 4341 break 4342 } 4343 c := v_0.AuxInt 4344 x := v_0.Args[0] 4345 if !(c&0x80000000 == 0) { 4346 break 4347 } 4348 v.reset(OpAMD64ANDLconst) 4349 v.AuxInt = c & 0x7fffffff 4350 v.AddArg(x) 4351 return true 4352 } 4353 return false 4354 } 4355 func rewriteValueAMD64_OpAMD64MOVLQSXload(v *Value, config *Config) bool { 4356 b := v.Block 4357 _ = b 4358 // match: (MOVLQSXload [off1] {sym1} (LEAQ [off2] {sym2} base) mem) 4359 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 4360 // result: (MOVLQSXload [off1+off2] {mergeSym(sym1,sym2)} base mem) 4361 for { 4362 off1 := v.AuxInt 4363 sym1 := v.Aux 4364 v_0 := v.Args[0] 4365 if v_0.Op != OpAMD64LEAQ { 4366 break 4367 } 4368 off2 := v_0.AuxInt 4369 sym2 := v_0.Aux 4370 base := v_0.Args[0] 4371 mem := v.Args[1] 4372 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 4373 break 4374 } 4375 v.reset(OpAMD64MOVLQSXload) 4376 v.AuxInt = off1 + off2 4377 v.Aux = mergeSym(sym1, sym2) 4378 v.AddArg(base) 4379 v.AddArg(mem) 4380 return true 4381 } 4382 return false 4383 } 4384 func rewriteValueAMD64_OpAMD64MOVLQZX(v *Value, config *Config) bool { 4385 b := v.Block 4386 _ = b 4387 // match: (MOVLQZX x:(MOVLload [off] {sym} ptr mem)) 4388 // cond: x.Uses == 1 && clobber(x) 4389 // result: @x.Block (MOVLload <v.Type> [off] {sym} ptr mem) 4390 for { 4391 x := v.Args[0] 4392 if x.Op != OpAMD64MOVLload { 4393 break 4394 } 4395 off := x.AuxInt 4396 sym := x.Aux 4397 ptr := x.Args[0] 4398 mem := x.Args[1] 4399 if !(x.Uses == 1 && clobber(x)) { 4400 break 4401 } 4402 b = x.Block 4403 v0 := b.NewValue0(v.Line, OpAMD64MOVLload, v.Type) 4404 v.reset(OpCopy) 4405 v.AddArg(v0) 4406 v0.AuxInt = off 4407 v0.Aux = sym 4408 v0.AddArg(ptr) 4409 v0.AddArg(mem) 4410 return true 4411 } 4412 // match: (MOVLQZX x:(MOVLloadidx1 [off] {sym} ptr idx mem)) 4413 // cond: x.Uses == 1 && clobber(x) 4414 // result: @x.Block (MOVLloadidx1 <v.Type> [off] {sym} ptr idx mem) 4415 for { 4416 x := v.Args[0] 4417 if x.Op != OpAMD64MOVLloadidx1 { 4418 break 4419 } 4420 off := x.AuxInt 4421 sym := x.Aux 4422 ptr := x.Args[0] 4423 idx := x.Args[1] 4424 mem := x.Args[2] 4425 if !(x.Uses == 1 && clobber(x)) { 4426 break 4427 } 4428 b = x.Block 4429 v0 := b.NewValue0(v.Line, OpAMD64MOVLloadidx1, v.Type) 4430 v.reset(OpCopy) 4431 v.AddArg(v0) 4432 v0.AuxInt = off 4433 v0.Aux = sym 4434 v0.AddArg(ptr) 4435 v0.AddArg(idx) 4436 v0.AddArg(mem) 4437 return true 4438 } 4439 // match: (MOVLQZX x:(MOVLloadidx4 [off] {sym} ptr idx mem)) 4440 // cond: x.Uses == 1 && clobber(x) 4441 // result: @x.Block (MOVLloadidx4 <v.Type> [off] {sym} ptr idx mem) 4442 for { 4443 x := v.Args[0] 4444 if x.Op != OpAMD64MOVLloadidx4 { 4445 break 4446 } 4447 off := x.AuxInt 4448 sym := x.Aux 4449 ptr := x.Args[0] 4450 idx := x.Args[1] 4451 mem := x.Args[2] 4452 if !(x.Uses == 1 && clobber(x)) { 4453 break 4454 } 4455 b = x.Block 4456 v0 := b.NewValue0(v.Line, OpAMD64MOVLloadidx4, v.Type) 4457 v.reset(OpCopy) 4458 v.AddArg(v0) 4459 v0.AuxInt = off 4460 v0.Aux = sym 4461 v0.AddArg(ptr) 4462 v0.AddArg(idx) 4463 v0.AddArg(mem) 4464 return true 4465 } 4466 // match: (MOVLQZX (ANDLconst [c] x)) 4467 // cond: 4468 // result: (ANDLconst [c] x) 4469 for { 4470 v_0 := v.Args[0] 4471 if v_0.Op != OpAMD64ANDLconst { 4472 break 4473 } 4474 c := v_0.AuxInt 4475 x := v_0.Args[0] 4476 v.reset(OpAMD64ANDLconst) 4477 v.AuxInt = c 4478 v.AddArg(x) 4479 return true 4480 } 4481 return false 4482 } 4483 func rewriteValueAMD64_OpAMD64MOVLatomicload(v *Value, config *Config) bool { 4484 b := v.Block 4485 _ = b 4486 // match: (MOVLatomicload [off1] {sym} (ADDQconst [off2] ptr) mem) 4487 // cond: is32Bit(off1+off2) 4488 // result: (MOVLatomicload [off1+off2] {sym} ptr mem) 4489 for { 4490 off1 := v.AuxInt 4491 sym := v.Aux 4492 v_0 := v.Args[0] 4493 if v_0.Op != OpAMD64ADDQconst { 4494 break 4495 } 4496 off2 := v_0.AuxInt 4497 ptr := v_0.Args[0] 4498 mem := v.Args[1] 4499 if !(is32Bit(off1 + off2)) { 4500 break 4501 } 4502 v.reset(OpAMD64MOVLatomicload) 4503 v.AuxInt = off1 + off2 4504 v.Aux = sym 4505 v.AddArg(ptr) 4506 v.AddArg(mem) 4507 return true 4508 } 4509 // match: (MOVLatomicload [off1] {sym1} (LEAQ [off2] {sym2} ptr) mem) 4510 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 4511 // result: (MOVLatomicload [off1+off2] {mergeSym(sym1,sym2)} ptr mem) 4512 for { 4513 off1 := v.AuxInt 4514 sym1 := v.Aux 4515 v_0 := v.Args[0] 4516 if v_0.Op != OpAMD64LEAQ { 4517 break 4518 } 4519 off2 := v_0.AuxInt 4520 sym2 := v_0.Aux 4521 ptr := v_0.Args[0] 4522 mem := v.Args[1] 4523 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 4524 break 4525 } 4526 v.reset(OpAMD64MOVLatomicload) 4527 v.AuxInt = off1 + off2 4528 v.Aux = mergeSym(sym1, sym2) 4529 v.AddArg(ptr) 4530 v.AddArg(mem) 4531 return true 4532 } 4533 return false 4534 } 4535 func rewriteValueAMD64_OpAMD64MOVLload(v *Value, config *Config) bool { 4536 b := v.Block 4537 _ = b 4538 // match: (MOVLload [off] {sym} ptr (MOVLstore [off2] {sym2} ptr2 x _)) 4539 // cond: sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) 4540 // result: x 4541 for { 4542 off := v.AuxInt 4543 sym := v.Aux 4544 ptr := v.Args[0] 4545 v_1 := v.Args[1] 4546 if v_1.Op != OpAMD64MOVLstore { 4547 break 4548 } 4549 off2 := v_1.AuxInt 4550 sym2 := v_1.Aux 4551 ptr2 := v_1.Args[0] 4552 x := v_1.Args[1] 4553 if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) { 4554 break 4555 } 4556 v.reset(OpCopy) 4557 v.Type = x.Type 4558 v.AddArg(x) 4559 return true 4560 } 4561 // match: (MOVLload [off1] {sym} (ADDQconst [off2] ptr) mem) 4562 // cond: is32Bit(off1+off2) 4563 // result: (MOVLload [off1+off2] {sym} ptr mem) 4564 for { 4565 off1 := v.AuxInt 4566 sym := v.Aux 4567 v_0 := v.Args[0] 4568 if v_0.Op != OpAMD64ADDQconst { 4569 break 4570 } 4571 off2 := v_0.AuxInt 4572 ptr := v_0.Args[0] 4573 mem := v.Args[1] 4574 if !(is32Bit(off1 + off2)) { 4575 break 4576 } 4577 v.reset(OpAMD64MOVLload) 4578 v.AuxInt = off1 + off2 4579 v.Aux = sym 4580 v.AddArg(ptr) 4581 v.AddArg(mem) 4582 return true 4583 } 4584 // match: (MOVLload [off1] {sym1} (LEAQ [off2] {sym2} base) mem) 4585 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 4586 // result: (MOVLload [off1+off2] {mergeSym(sym1,sym2)} base mem) 4587 for { 4588 off1 := v.AuxInt 4589 sym1 := v.Aux 4590 v_0 := v.Args[0] 4591 if v_0.Op != OpAMD64LEAQ { 4592 break 4593 } 4594 off2 := v_0.AuxInt 4595 sym2 := v_0.Aux 4596 base := v_0.Args[0] 4597 mem := v.Args[1] 4598 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 4599 break 4600 } 4601 v.reset(OpAMD64MOVLload) 4602 v.AuxInt = off1 + off2 4603 v.Aux = mergeSym(sym1, sym2) 4604 v.AddArg(base) 4605 v.AddArg(mem) 4606 return true 4607 } 4608 // match: (MOVLload [off1] {sym1} (LEAQ1 [off2] {sym2} ptr idx) mem) 4609 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 4610 // result: (MOVLloadidx1 [off1+off2] {mergeSym(sym1,sym2)} ptr idx mem) 4611 for { 4612 off1 := v.AuxInt 4613 sym1 := v.Aux 4614 v_0 := v.Args[0] 4615 if v_0.Op != OpAMD64LEAQ1 { 4616 break 4617 } 4618 off2 := v_0.AuxInt 4619 sym2 := v_0.Aux 4620 ptr := v_0.Args[0] 4621 idx := v_0.Args[1] 4622 mem := v.Args[1] 4623 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 4624 break 4625 } 4626 v.reset(OpAMD64MOVLloadidx1) 4627 v.AuxInt = off1 + off2 4628 v.Aux = mergeSym(sym1, sym2) 4629 v.AddArg(ptr) 4630 v.AddArg(idx) 4631 v.AddArg(mem) 4632 return true 4633 } 4634 // match: (MOVLload [off1] {sym1} (LEAQ4 [off2] {sym2} ptr idx) mem) 4635 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 4636 // result: (MOVLloadidx4 [off1+off2] {mergeSym(sym1,sym2)} ptr idx mem) 4637 for { 4638 off1 := v.AuxInt 4639 sym1 := v.Aux 4640 v_0 := v.Args[0] 4641 if v_0.Op != OpAMD64LEAQ4 { 4642 break 4643 } 4644 off2 := v_0.AuxInt 4645 sym2 := v_0.Aux 4646 ptr := v_0.Args[0] 4647 idx := v_0.Args[1] 4648 mem := v.Args[1] 4649 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 4650 break 4651 } 4652 v.reset(OpAMD64MOVLloadidx4) 4653 v.AuxInt = off1 + off2 4654 v.Aux = mergeSym(sym1, sym2) 4655 v.AddArg(ptr) 4656 v.AddArg(idx) 4657 v.AddArg(mem) 4658 return true 4659 } 4660 // match: (MOVLload [off] {sym} (ADDQ ptr idx) mem) 4661 // cond: ptr.Op != OpSB 4662 // result: (MOVLloadidx1 [off] {sym} ptr idx mem) 4663 for { 4664 off := v.AuxInt 4665 sym := v.Aux 4666 v_0 := v.Args[0] 4667 if v_0.Op != OpAMD64ADDQ { 4668 break 4669 } 4670 ptr := v_0.Args[0] 4671 idx := v_0.Args[1] 4672 mem := v.Args[1] 4673 if !(ptr.Op != OpSB) { 4674 break 4675 } 4676 v.reset(OpAMD64MOVLloadidx1) 4677 v.AuxInt = off 4678 v.Aux = sym 4679 v.AddArg(ptr) 4680 v.AddArg(idx) 4681 v.AddArg(mem) 4682 return true 4683 } 4684 // match: (MOVLload [off1] {sym1} (LEAL [off2] {sym2} base) mem) 4685 // cond: canMergeSym(sym1, sym2) 4686 // result: (MOVLload [off1+off2] {mergeSym(sym1,sym2)} base mem) 4687 for { 4688 off1 := v.AuxInt 4689 sym1 := v.Aux 4690 v_0 := v.Args[0] 4691 if v_0.Op != OpAMD64LEAL { 4692 break 4693 } 4694 off2 := v_0.AuxInt 4695 sym2 := v_0.Aux 4696 base := v_0.Args[0] 4697 mem := v.Args[1] 4698 if !(canMergeSym(sym1, sym2)) { 4699 break 4700 } 4701 v.reset(OpAMD64MOVLload) 4702 v.AuxInt = off1 + off2 4703 v.Aux = mergeSym(sym1, sym2) 4704 v.AddArg(base) 4705 v.AddArg(mem) 4706 return true 4707 } 4708 // match: (MOVLload [off1] {sym} (ADDLconst [off2] ptr) mem) 4709 // cond: is32Bit(off1+off2) 4710 // result: (MOVLload [off1+off2] {sym} ptr mem) 4711 for { 4712 off1 := v.AuxInt 4713 sym := v.Aux 4714 v_0 := v.Args[0] 4715 if v_0.Op != OpAMD64ADDLconst { 4716 break 4717 } 4718 off2 := v_0.AuxInt 4719 ptr := v_0.Args[0] 4720 mem := v.Args[1] 4721 if !(is32Bit(off1 + off2)) { 4722 break 4723 } 4724 v.reset(OpAMD64MOVLload) 4725 v.AuxInt = off1 + off2 4726 v.Aux = sym 4727 v.AddArg(ptr) 4728 v.AddArg(mem) 4729 return true 4730 } 4731 return false 4732 } 4733 func rewriteValueAMD64_OpAMD64MOVLloadidx1(v *Value, config *Config) bool { 4734 b := v.Block 4735 _ = b 4736 // match: (MOVLloadidx1 [c] {sym} ptr (SHLQconst [2] idx) mem) 4737 // cond: 4738 // result: (MOVLloadidx4 [c] {sym} ptr idx mem) 4739 for { 4740 c := v.AuxInt 4741 sym := v.Aux 4742 ptr := v.Args[0] 4743 v_1 := v.Args[1] 4744 if v_1.Op != OpAMD64SHLQconst { 4745 break 4746 } 4747 if v_1.AuxInt != 2 { 4748 break 4749 } 4750 idx := v_1.Args[0] 4751 mem := v.Args[2] 4752 v.reset(OpAMD64MOVLloadidx4) 4753 v.AuxInt = c 4754 v.Aux = sym 4755 v.AddArg(ptr) 4756 v.AddArg(idx) 4757 v.AddArg(mem) 4758 return true 4759 } 4760 // match: (MOVLloadidx1 [c] {sym} (ADDQconst [d] ptr) idx mem) 4761 // cond: 4762 // result: (MOVLloadidx1 [c+d] {sym} ptr idx mem) 4763 for { 4764 c := v.AuxInt 4765 sym := v.Aux 4766 v_0 := v.Args[0] 4767 if v_0.Op != OpAMD64ADDQconst { 4768 break 4769 } 4770 d := v_0.AuxInt 4771 ptr := v_0.Args[0] 4772 idx := v.Args[1] 4773 mem := v.Args[2] 4774 v.reset(OpAMD64MOVLloadidx1) 4775 v.AuxInt = c + d 4776 v.Aux = sym 4777 v.AddArg(ptr) 4778 v.AddArg(idx) 4779 v.AddArg(mem) 4780 return true 4781 } 4782 // match: (MOVLloadidx1 [c] {sym} ptr (ADDQconst [d] idx) mem) 4783 // cond: 4784 // result: (MOVLloadidx1 [c+d] {sym} ptr idx mem) 4785 for { 4786 c := v.AuxInt 4787 sym := v.Aux 4788 ptr := v.Args[0] 4789 v_1 := v.Args[1] 4790 if v_1.Op != OpAMD64ADDQconst { 4791 break 4792 } 4793 d := v_1.AuxInt 4794 idx := v_1.Args[0] 4795 mem := v.Args[2] 4796 v.reset(OpAMD64MOVLloadidx1) 4797 v.AuxInt = c + d 4798 v.Aux = sym 4799 v.AddArg(ptr) 4800 v.AddArg(idx) 4801 v.AddArg(mem) 4802 return true 4803 } 4804 return false 4805 } 4806 func rewriteValueAMD64_OpAMD64MOVLloadidx4(v *Value, config *Config) bool { 4807 b := v.Block 4808 _ = b 4809 // match: (MOVLloadidx4 [c] {sym} (ADDQconst [d] ptr) idx mem) 4810 // cond: 4811 // result: (MOVLloadidx4 [c+d] {sym} ptr idx mem) 4812 for { 4813 c := v.AuxInt 4814 sym := v.Aux 4815 v_0 := v.Args[0] 4816 if v_0.Op != OpAMD64ADDQconst { 4817 break 4818 } 4819 d := v_0.AuxInt 4820 ptr := v_0.Args[0] 4821 idx := v.Args[1] 4822 mem := v.Args[2] 4823 v.reset(OpAMD64MOVLloadidx4) 4824 v.AuxInt = c + d 4825 v.Aux = sym 4826 v.AddArg(ptr) 4827 v.AddArg(idx) 4828 v.AddArg(mem) 4829 return true 4830 } 4831 // match: (MOVLloadidx4 [c] {sym} ptr (ADDQconst [d] idx) mem) 4832 // cond: 4833 // result: (MOVLloadidx4 [c+4*d] {sym} ptr idx mem) 4834 for { 4835 c := v.AuxInt 4836 sym := v.Aux 4837 ptr := v.Args[0] 4838 v_1 := v.Args[1] 4839 if v_1.Op != OpAMD64ADDQconst { 4840 break 4841 } 4842 d := v_1.AuxInt 4843 idx := v_1.Args[0] 4844 mem := v.Args[2] 4845 v.reset(OpAMD64MOVLloadidx4) 4846 v.AuxInt = c + 4*d 4847 v.Aux = sym 4848 v.AddArg(ptr) 4849 v.AddArg(idx) 4850 v.AddArg(mem) 4851 return true 4852 } 4853 return false 4854 } 4855 func rewriteValueAMD64_OpAMD64MOVLstore(v *Value, config *Config) bool { 4856 b := v.Block 4857 _ = b 4858 // match: (MOVLstore [off] {sym} ptr (MOVLQSX x) mem) 4859 // cond: 4860 // result: (MOVLstore [off] {sym} ptr x mem) 4861 for { 4862 off := v.AuxInt 4863 sym := v.Aux 4864 ptr := v.Args[0] 4865 v_1 := v.Args[1] 4866 if v_1.Op != OpAMD64MOVLQSX { 4867 break 4868 } 4869 x := v_1.Args[0] 4870 mem := v.Args[2] 4871 v.reset(OpAMD64MOVLstore) 4872 v.AuxInt = off 4873 v.Aux = sym 4874 v.AddArg(ptr) 4875 v.AddArg(x) 4876 v.AddArg(mem) 4877 return true 4878 } 4879 // match: (MOVLstore [off] {sym} ptr (MOVLQZX x) mem) 4880 // cond: 4881 // result: (MOVLstore [off] {sym} ptr x mem) 4882 for { 4883 off := v.AuxInt 4884 sym := v.Aux 4885 ptr := v.Args[0] 4886 v_1 := v.Args[1] 4887 if v_1.Op != OpAMD64MOVLQZX { 4888 break 4889 } 4890 x := v_1.Args[0] 4891 mem := v.Args[2] 4892 v.reset(OpAMD64MOVLstore) 4893 v.AuxInt = off 4894 v.Aux = sym 4895 v.AddArg(ptr) 4896 v.AddArg(x) 4897 v.AddArg(mem) 4898 return true 4899 } 4900 // match: (MOVLstore [off1] {sym} (ADDQconst [off2] ptr) val mem) 4901 // cond: is32Bit(off1+off2) 4902 // result: (MOVLstore [off1+off2] {sym} ptr val mem) 4903 for { 4904 off1 := v.AuxInt 4905 sym := v.Aux 4906 v_0 := v.Args[0] 4907 if v_0.Op != OpAMD64ADDQconst { 4908 break 4909 } 4910 off2 := v_0.AuxInt 4911 ptr := v_0.Args[0] 4912 val := v.Args[1] 4913 mem := v.Args[2] 4914 if !(is32Bit(off1 + off2)) { 4915 break 4916 } 4917 v.reset(OpAMD64MOVLstore) 4918 v.AuxInt = off1 + off2 4919 v.Aux = sym 4920 v.AddArg(ptr) 4921 v.AddArg(val) 4922 v.AddArg(mem) 4923 return true 4924 } 4925 // match: (MOVLstore [off] {sym} ptr (MOVLconst [c]) mem) 4926 // cond: validOff(off) 4927 // result: (MOVLstoreconst [makeValAndOff(int64(int32(c)),off)] {sym} ptr mem) 4928 for { 4929 off := v.AuxInt 4930 sym := v.Aux 4931 ptr := v.Args[0] 4932 v_1 := v.Args[1] 4933 if v_1.Op != OpAMD64MOVLconst { 4934 break 4935 } 4936 c := v_1.AuxInt 4937 mem := v.Args[2] 4938 if !(validOff(off)) { 4939 break 4940 } 4941 v.reset(OpAMD64MOVLstoreconst) 4942 v.AuxInt = makeValAndOff(int64(int32(c)), off) 4943 v.Aux = sym 4944 v.AddArg(ptr) 4945 v.AddArg(mem) 4946 return true 4947 } 4948 // match: (MOVLstore [off1] {sym1} (LEAQ [off2] {sym2} base) val mem) 4949 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 4950 // result: (MOVLstore [off1+off2] {mergeSym(sym1,sym2)} base val mem) 4951 for { 4952 off1 := v.AuxInt 4953 sym1 := v.Aux 4954 v_0 := v.Args[0] 4955 if v_0.Op != OpAMD64LEAQ { 4956 break 4957 } 4958 off2 := v_0.AuxInt 4959 sym2 := v_0.Aux 4960 base := v_0.Args[0] 4961 val := v.Args[1] 4962 mem := v.Args[2] 4963 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 4964 break 4965 } 4966 v.reset(OpAMD64MOVLstore) 4967 v.AuxInt = off1 + off2 4968 v.Aux = mergeSym(sym1, sym2) 4969 v.AddArg(base) 4970 v.AddArg(val) 4971 v.AddArg(mem) 4972 return true 4973 } 4974 // match: (MOVLstore [off1] {sym1} (LEAQ1 [off2] {sym2} ptr idx) val mem) 4975 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 4976 // result: (MOVLstoreidx1 [off1+off2] {mergeSym(sym1,sym2)} ptr idx val mem) 4977 for { 4978 off1 := v.AuxInt 4979 sym1 := v.Aux 4980 v_0 := v.Args[0] 4981 if v_0.Op != OpAMD64LEAQ1 { 4982 break 4983 } 4984 off2 := v_0.AuxInt 4985 sym2 := v_0.Aux 4986 ptr := v_0.Args[0] 4987 idx := v_0.Args[1] 4988 val := v.Args[1] 4989 mem := v.Args[2] 4990 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 4991 break 4992 } 4993 v.reset(OpAMD64MOVLstoreidx1) 4994 v.AuxInt = off1 + off2 4995 v.Aux = mergeSym(sym1, sym2) 4996 v.AddArg(ptr) 4997 v.AddArg(idx) 4998 v.AddArg(val) 4999 v.AddArg(mem) 5000 return true 5001 } 5002 // match: (MOVLstore [off1] {sym1} (LEAQ4 [off2] {sym2} ptr idx) val mem) 5003 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 5004 // result: (MOVLstoreidx4 [off1+off2] {mergeSym(sym1,sym2)} ptr idx val mem) 5005 for { 5006 off1 := v.AuxInt 5007 sym1 := v.Aux 5008 v_0 := v.Args[0] 5009 if v_0.Op != OpAMD64LEAQ4 { 5010 break 5011 } 5012 off2 := v_0.AuxInt 5013 sym2 := v_0.Aux 5014 ptr := v_0.Args[0] 5015 idx := v_0.Args[1] 5016 val := v.Args[1] 5017 mem := v.Args[2] 5018 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 5019 break 5020 } 5021 v.reset(OpAMD64MOVLstoreidx4) 5022 v.AuxInt = off1 + off2 5023 v.Aux = mergeSym(sym1, sym2) 5024 v.AddArg(ptr) 5025 v.AddArg(idx) 5026 v.AddArg(val) 5027 v.AddArg(mem) 5028 return true 5029 } 5030 // match: (MOVLstore [off] {sym} (ADDQ ptr idx) val mem) 5031 // cond: ptr.Op != OpSB 5032 // result: (MOVLstoreidx1 [off] {sym} ptr idx val mem) 5033 for { 5034 off := v.AuxInt 5035 sym := v.Aux 5036 v_0 := v.Args[0] 5037 if v_0.Op != OpAMD64ADDQ { 5038 break 5039 } 5040 ptr := v_0.Args[0] 5041 idx := v_0.Args[1] 5042 val := v.Args[1] 5043 mem := v.Args[2] 5044 if !(ptr.Op != OpSB) { 5045 break 5046 } 5047 v.reset(OpAMD64MOVLstoreidx1) 5048 v.AuxInt = off 5049 v.Aux = sym 5050 v.AddArg(ptr) 5051 v.AddArg(idx) 5052 v.AddArg(val) 5053 v.AddArg(mem) 5054 return true 5055 } 5056 // match: (MOVLstore [i] {s} p (SHRQconst [32] w) x:(MOVLstore [i-4] {s} p w mem)) 5057 // cond: x.Uses == 1 && clobber(x) 5058 // result: (MOVQstore [i-4] {s} p w mem) 5059 for { 5060 i := v.AuxInt 5061 s := v.Aux 5062 p := v.Args[0] 5063 v_1 := v.Args[1] 5064 if v_1.Op != OpAMD64SHRQconst { 5065 break 5066 } 5067 if v_1.AuxInt != 32 { 5068 break 5069 } 5070 w := v_1.Args[0] 5071 x := v.Args[2] 5072 if x.Op != OpAMD64MOVLstore { 5073 break 5074 } 5075 if x.AuxInt != i-4 { 5076 break 5077 } 5078 if x.Aux != s { 5079 break 5080 } 5081 if p != x.Args[0] { 5082 break 5083 } 5084 if w != x.Args[1] { 5085 break 5086 } 5087 mem := x.Args[2] 5088 if !(x.Uses == 1 && clobber(x)) { 5089 break 5090 } 5091 v.reset(OpAMD64MOVQstore) 5092 v.AuxInt = i - 4 5093 v.Aux = s 5094 v.AddArg(p) 5095 v.AddArg(w) 5096 v.AddArg(mem) 5097 return true 5098 } 5099 // match: (MOVLstore [i] {s} p (SHRQconst [j] w) x:(MOVLstore [i-4] {s} p w0:(SHRQconst [j-32] w) mem)) 5100 // cond: x.Uses == 1 && clobber(x) 5101 // result: (MOVQstore [i-4] {s} p w0 mem) 5102 for { 5103 i := v.AuxInt 5104 s := v.Aux 5105 p := v.Args[0] 5106 v_1 := v.Args[1] 5107 if v_1.Op != OpAMD64SHRQconst { 5108 break 5109 } 5110 j := v_1.AuxInt 5111 w := v_1.Args[0] 5112 x := v.Args[2] 5113 if x.Op != OpAMD64MOVLstore { 5114 break 5115 } 5116 if x.AuxInt != i-4 { 5117 break 5118 } 5119 if x.Aux != s { 5120 break 5121 } 5122 if p != x.Args[0] { 5123 break 5124 } 5125 w0 := x.Args[1] 5126 if w0.Op != OpAMD64SHRQconst { 5127 break 5128 } 5129 if w0.AuxInt != j-32 { 5130 break 5131 } 5132 if w != w0.Args[0] { 5133 break 5134 } 5135 mem := x.Args[2] 5136 if !(x.Uses == 1 && clobber(x)) { 5137 break 5138 } 5139 v.reset(OpAMD64MOVQstore) 5140 v.AuxInt = i - 4 5141 v.Aux = s 5142 v.AddArg(p) 5143 v.AddArg(w0) 5144 v.AddArg(mem) 5145 return true 5146 } 5147 // match: (MOVLstore [off1] {sym1} (LEAL [off2] {sym2} base) val mem) 5148 // cond: canMergeSym(sym1, sym2) 5149 // result: (MOVLstore [off1+off2] {mergeSym(sym1,sym2)} base val mem) 5150 for { 5151 off1 := v.AuxInt 5152 sym1 := v.Aux 5153 v_0 := v.Args[0] 5154 if v_0.Op != OpAMD64LEAL { 5155 break 5156 } 5157 off2 := v_0.AuxInt 5158 sym2 := v_0.Aux 5159 base := v_0.Args[0] 5160 val := v.Args[1] 5161 mem := v.Args[2] 5162 if !(canMergeSym(sym1, sym2)) { 5163 break 5164 } 5165 v.reset(OpAMD64MOVLstore) 5166 v.AuxInt = off1 + off2 5167 v.Aux = mergeSym(sym1, sym2) 5168 v.AddArg(base) 5169 v.AddArg(val) 5170 v.AddArg(mem) 5171 return true 5172 } 5173 // match: (MOVLstore [off1] {sym} (ADDLconst [off2] ptr) val mem) 5174 // cond: is32Bit(off1+off2) 5175 // result: (MOVLstore [off1+off2] {sym} ptr val mem) 5176 for { 5177 off1 := v.AuxInt 5178 sym := v.Aux 5179 v_0 := v.Args[0] 5180 if v_0.Op != OpAMD64ADDLconst { 5181 break 5182 } 5183 off2 := v_0.AuxInt 5184 ptr := v_0.Args[0] 5185 val := v.Args[1] 5186 mem := v.Args[2] 5187 if !(is32Bit(off1 + off2)) { 5188 break 5189 } 5190 v.reset(OpAMD64MOVLstore) 5191 v.AuxInt = off1 + off2 5192 v.Aux = sym 5193 v.AddArg(ptr) 5194 v.AddArg(val) 5195 v.AddArg(mem) 5196 return true 5197 } 5198 return false 5199 } 5200 func rewriteValueAMD64_OpAMD64MOVLstoreconst(v *Value, config *Config) bool { 5201 b := v.Block 5202 _ = b 5203 // match: (MOVLstoreconst [sc] {s} (ADDQconst [off] ptr) mem) 5204 // cond: ValAndOff(sc).canAdd(off) 5205 // result: (MOVLstoreconst [ValAndOff(sc).add(off)] {s} ptr mem) 5206 for { 5207 sc := v.AuxInt 5208 s := v.Aux 5209 v_0 := v.Args[0] 5210 if v_0.Op != OpAMD64ADDQconst { 5211 break 5212 } 5213 off := v_0.AuxInt 5214 ptr := v_0.Args[0] 5215 mem := v.Args[1] 5216 if !(ValAndOff(sc).canAdd(off)) { 5217 break 5218 } 5219 v.reset(OpAMD64MOVLstoreconst) 5220 v.AuxInt = ValAndOff(sc).add(off) 5221 v.Aux = s 5222 v.AddArg(ptr) 5223 v.AddArg(mem) 5224 return true 5225 } 5226 // match: (MOVLstoreconst [sc] {sym1} (LEAQ [off] {sym2} ptr) mem) 5227 // cond: canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off) 5228 // result: (MOVLstoreconst [ValAndOff(sc).add(off)] {mergeSym(sym1, sym2)} ptr mem) 5229 for { 5230 sc := v.AuxInt 5231 sym1 := v.Aux 5232 v_0 := v.Args[0] 5233 if v_0.Op != OpAMD64LEAQ { 5234 break 5235 } 5236 off := v_0.AuxInt 5237 sym2 := v_0.Aux 5238 ptr := v_0.Args[0] 5239 mem := v.Args[1] 5240 if !(canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off)) { 5241 break 5242 } 5243 v.reset(OpAMD64MOVLstoreconst) 5244 v.AuxInt = ValAndOff(sc).add(off) 5245 v.Aux = mergeSym(sym1, sym2) 5246 v.AddArg(ptr) 5247 v.AddArg(mem) 5248 return true 5249 } 5250 // match: (MOVLstoreconst [x] {sym1} (LEAQ1 [off] {sym2} ptr idx) mem) 5251 // cond: canMergeSym(sym1, sym2) 5252 // result: (MOVLstoreconstidx1 [ValAndOff(x).add(off)] {mergeSym(sym1,sym2)} ptr idx mem) 5253 for { 5254 x := v.AuxInt 5255 sym1 := v.Aux 5256 v_0 := v.Args[0] 5257 if v_0.Op != OpAMD64LEAQ1 { 5258 break 5259 } 5260 off := v_0.AuxInt 5261 sym2 := v_0.Aux 5262 ptr := v_0.Args[0] 5263 idx := v_0.Args[1] 5264 mem := v.Args[1] 5265 if !(canMergeSym(sym1, sym2)) { 5266 break 5267 } 5268 v.reset(OpAMD64MOVLstoreconstidx1) 5269 v.AuxInt = ValAndOff(x).add(off) 5270 v.Aux = mergeSym(sym1, sym2) 5271 v.AddArg(ptr) 5272 v.AddArg(idx) 5273 v.AddArg(mem) 5274 return true 5275 } 5276 // match: (MOVLstoreconst [x] {sym1} (LEAQ4 [off] {sym2} ptr idx) mem) 5277 // cond: canMergeSym(sym1, sym2) 5278 // result: (MOVLstoreconstidx4 [ValAndOff(x).add(off)] {mergeSym(sym1,sym2)} ptr idx mem) 5279 for { 5280 x := v.AuxInt 5281 sym1 := v.Aux 5282 v_0 := v.Args[0] 5283 if v_0.Op != OpAMD64LEAQ4 { 5284 break 5285 } 5286 off := v_0.AuxInt 5287 sym2 := v_0.Aux 5288 ptr := v_0.Args[0] 5289 idx := v_0.Args[1] 5290 mem := v.Args[1] 5291 if !(canMergeSym(sym1, sym2)) { 5292 break 5293 } 5294 v.reset(OpAMD64MOVLstoreconstidx4) 5295 v.AuxInt = ValAndOff(x).add(off) 5296 v.Aux = mergeSym(sym1, sym2) 5297 v.AddArg(ptr) 5298 v.AddArg(idx) 5299 v.AddArg(mem) 5300 return true 5301 } 5302 // match: (MOVLstoreconst [x] {sym} (ADDQ ptr idx) mem) 5303 // cond: 5304 // result: (MOVLstoreconstidx1 [x] {sym} ptr idx mem) 5305 for { 5306 x := v.AuxInt 5307 sym := v.Aux 5308 v_0 := v.Args[0] 5309 if v_0.Op != OpAMD64ADDQ { 5310 break 5311 } 5312 ptr := v_0.Args[0] 5313 idx := v_0.Args[1] 5314 mem := v.Args[1] 5315 v.reset(OpAMD64MOVLstoreconstidx1) 5316 v.AuxInt = x 5317 v.Aux = sym 5318 v.AddArg(ptr) 5319 v.AddArg(idx) 5320 v.AddArg(mem) 5321 return true 5322 } 5323 // match: (MOVLstoreconst [c] {s} p x:(MOVLstoreconst [a] {s} p mem)) 5324 // cond: x.Uses == 1 && ValAndOff(a).Off() + 4 == ValAndOff(c).Off() && clobber(x) 5325 // result: (MOVQstore [ValAndOff(a).Off()] {s} p (MOVQconst [ValAndOff(a).Val()&0xffffffff | ValAndOff(c).Val()<<32]) mem) 5326 for { 5327 c := v.AuxInt 5328 s := v.Aux 5329 p := v.Args[0] 5330 x := v.Args[1] 5331 if x.Op != OpAMD64MOVLstoreconst { 5332 break 5333 } 5334 a := x.AuxInt 5335 if x.Aux != s { 5336 break 5337 } 5338 if p != x.Args[0] { 5339 break 5340 } 5341 mem := x.Args[1] 5342 if !(x.Uses == 1 && ValAndOff(a).Off()+4 == ValAndOff(c).Off() && clobber(x)) { 5343 break 5344 } 5345 v.reset(OpAMD64MOVQstore) 5346 v.AuxInt = ValAndOff(a).Off() 5347 v.Aux = s 5348 v.AddArg(p) 5349 v0 := b.NewValue0(v.Line, OpAMD64MOVQconst, config.fe.TypeUInt64()) 5350 v0.AuxInt = ValAndOff(a).Val()&0xffffffff | ValAndOff(c).Val()<<32 5351 v.AddArg(v0) 5352 v.AddArg(mem) 5353 return true 5354 } 5355 // match: (MOVLstoreconst [sc] {sym1} (LEAL [off] {sym2} ptr) mem) 5356 // cond: canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off) 5357 // result: (MOVLstoreconst [ValAndOff(sc).add(off)] {mergeSym(sym1, sym2)} ptr mem) 5358 for { 5359 sc := v.AuxInt 5360 sym1 := v.Aux 5361 v_0 := v.Args[0] 5362 if v_0.Op != OpAMD64LEAL { 5363 break 5364 } 5365 off := v_0.AuxInt 5366 sym2 := v_0.Aux 5367 ptr := v_0.Args[0] 5368 mem := v.Args[1] 5369 if !(canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off)) { 5370 break 5371 } 5372 v.reset(OpAMD64MOVLstoreconst) 5373 v.AuxInt = ValAndOff(sc).add(off) 5374 v.Aux = mergeSym(sym1, sym2) 5375 v.AddArg(ptr) 5376 v.AddArg(mem) 5377 return true 5378 } 5379 // match: (MOVLstoreconst [sc] {s} (ADDLconst [off] ptr) mem) 5380 // cond: ValAndOff(sc).canAdd(off) 5381 // result: (MOVLstoreconst [ValAndOff(sc).add(off)] {s} ptr mem) 5382 for { 5383 sc := v.AuxInt 5384 s := v.Aux 5385 v_0 := v.Args[0] 5386 if v_0.Op != OpAMD64ADDLconst { 5387 break 5388 } 5389 off := v_0.AuxInt 5390 ptr := v_0.Args[0] 5391 mem := v.Args[1] 5392 if !(ValAndOff(sc).canAdd(off)) { 5393 break 5394 } 5395 v.reset(OpAMD64MOVLstoreconst) 5396 v.AuxInt = ValAndOff(sc).add(off) 5397 v.Aux = s 5398 v.AddArg(ptr) 5399 v.AddArg(mem) 5400 return true 5401 } 5402 return false 5403 } 5404 func rewriteValueAMD64_OpAMD64MOVLstoreconstidx1(v *Value, config *Config) bool { 5405 b := v.Block 5406 _ = b 5407 // match: (MOVLstoreconstidx1 [c] {sym} ptr (SHLQconst [2] idx) mem) 5408 // cond: 5409 // result: (MOVLstoreconstidx4 [c] {sym} ptr idx mem) 5410 for { 5411 c := v.AuxInt 5412 sym := v.Aux 5413 ptr := v.Args[0] 5414 v_1 := v.Args[1] 5415 if v_1.Op != OpAMD64SHLQconst { 5416 break 5417 } 5418 if v_1.AuxInt != 2 { 5419 break 5420 } 5421 idx := v_1.Args[0] 5422 mem := v.Args[2] 5423 v.reset(OpAMD64MOVLstoreconstidx4) 5424 v.AuxInt = c 5425 v.Aux = sym 5426 v.AddArg(ptr) 5427 v.AddArg(idx) 5428 v.AddArg(mem) 5429 return true 5430 } 5431 // match: (MOVLstoreconstidx1 [x] {sym} (ADDQconst [c] ptr) idx mem) 5432 // cond: 5433 // result: (MOVLstoreconstidx1 [ValAndOff(x).add(c)] {sym} ptr idx mem) 5434 for { 5435 x := v.AuxInt 5436 sym := v.Aux 5437 v_0 := v.Args[0] 5438 if v_0.Op != OpAMD64ADDQconst { 5439 break 5440 } 5441 c := v_0.AuxInt 5442 ptr := v_0.Args[0] 5443 idx := v.Args[1] 5444 mem := v.Args[2] 5445 v.reset(OpAMD64MOVLstoreconstidx1) 5446 v.AuxInt = ValAndOff(x).add(c) 5447 v.Aux = sym 5448 v.AddArg(ptr) 5449 v.AddArg(idx) 5450 v.AddArg(mem) 5451 return true 5452 } 5453 // match: (MOVLstoreconstidx1 [x] {sym} ptr (ADDQconst [c] idx) mem) 5454 // cond: 5455 // result: (MOVLstoreconstidx1 [ValAndOff(x).add(c)] {sym} ptr idx mem) 5456 for { 5457 x := v.AuxInt 5458 sym := v.Aux 5459 ptr := v.Args[0] 5460 v_1 := v.Args[1] 5461 if v_1.Op != OpAMD64ADDQconst { 5462 break 5463 } 5464 c := v_1.AuxInt 5465 idx := v_1.Args[0] 5466 mem := v.Args[2] 5467 v.reset(OpAMD64MOVLstoreconstidx1) 5468 v.AuxInt = ValAndOff(x).add(c) 5469 v.Aux = sym 5470 v.AddArg(ptr) 5471 v.AddArg(idx) 5472 v.AddArg(mem) 5473 return true 5474 } 5475 // match: (MOVLstoreconstidx1 [c] {s} p i x:(MOVLstoreconstidx1 [a] {s} p i mem)) 5476 // cond: x.Uses == 1 && ValAndOff(a).Off() + 4 == ValAndOff(c).Off() && clobber(x) 5477 // result: (MOVQstoreidx1 [ValAndOff(a).Off()] {s} p i (MOVQconst [ValAndOff(a).Val()&0xffffffff | ValAndOff(c).Val()<<32]) mem) 5478 for { 5479 c := v.AuxInt 5480 s := v.Aux 5481 p := v.Args[0] 5482 i := v.Args[1] 5483 x := v.Args[2] 5484 if x.Op != OpAMD64MOVLstoreconstidx1 { 5485 break 5486 } 5487 a := x.AuxInt 5488 if x.Aux != s { 5489 break 5490 } 5491 if p != x.Args[0] { 5492 break 5493 } 5494 if i != x.Args[1] { 5495 break 5496 } 5497 mem := x.Args[2] 5498 if !(x.Uses == 1 && ValAndOff(a).Off()+4 == ValAndOff(c).Off() && clobber(x)) { 5499 break 5500 } 5501 v.reset(OpAMD64MOVQstoreidx1) 5502 v.AuxInt = ValAndOff(a).Off() 5503 v.Aux = s 5504 v.AddArg(p) 5505 v.AddArg(i) 5506 v0 := b.NewValue0(v.Line, OpAMD64MOVQconst, config.fe.TypeUInt64()) 5507 v0.AuxInt = ValAndOff(a).Val()&0xffffffff | ValAndOff(c).Val()<<32 5508 v.AddArg(v0) 5509 v.AddArg(mem) 5510 return true 5511 } 5512 return false 5513 } 5514 func rewriteValueAMD64_OpAMD64MOVLstoreconstidx4(v *Value, config *Config) bool { 5515 b := v.Block 5516 _ = b 5517 // match: (MOVLstoreconstidx4 [x] {sym} (ADDQconst [c] ptr) idx mem) 5518 // cond: 5519 // result: (MOVLstoreconstidx4 [ValAndOff(x).add(c)] {sym} ptr idx mem) 5520 for { 5521 x := v.AuxInt 5522 sym := v.Aux 5523 v_0 := v.Args[0] 5524 if v_0.Op != OpAMD64ADDQconst { 5525 break 5526 } 5527 c := v_0.AuxInt 5528 ptr := v_0.Args[0] 5529 idx := v.Args[1] 5530 mem := v.Args[2] 5531 v.reset(OpAMD64MOVLstoreconstidx4) 5532 v.AuxInt = ValAndOff(x).add(c) 5533 v.Aux = sym 5534 v.AddArg(ptr) 5535 v.AddArg(idx) 5536 v.AddArg(mem) 5537 return true 5538 } 5539 // match: (MOVLstoreconstidx4 [x] {sym} ptr (ADDQconst [c] idx) mem) 5540 // cond: 5541 // result: (MOVLstoreconstidx4 [ValAndOff(x).add(4*c)] {sym} ptr idx mem) 5542 for { 5543 x := v.AuxInt 5544 sym := v.Aux 5545 ptr := v.Args[0] 5546 v_1 := v.Args[1] 5547 if v_1.Op != OpAMD64ADDQconst { 5548 break 5549 } 5550 c := v_1.AuxInt 5551 idx := v_1.Args[0] 5552 mem := v.Args[2] 5553 v.reset(OpAMD64MOVLstoreconstidx4) 5554 v.AuxInt = ValAndOff(x).add(4 * c) 5555 v.Aux = sym 5556 v.AddArg(ptr) 5557 v.AddArg(idx) 5558 v.AddArg(mem) 5559 return true 5560 } 5561 // match: (MOVLstoreconstidx4 [c] {s} p i x:(MOVLstoreconstidx4 [a] {s} p i mem)) 5562 // cond: x.Uses == 1 && ValAndOff(a).Off() + 4 == ValAndOff(c).Off() && clobber(x) 5563 // result: (MOVQstoreidx1 [ValAndOff(a).Off()] {s} p (SHLQconst <i.Type> [2] i) (MOVQconst [ValAndOff(a).Val()&0xffffffff | ValAndOff(c).Val()<<32]) mem) 5564 for { 5565 c := v.AuxInt 5566 s := v.Aux 5567 p := v.Args[0] 5568 i := v.Args[1] 5569 x := v.Args[2] 5570 if x.Op != OpAMD64MOVLstoreconstidx4 { 5571 break 5572 } 5573 a := x.AuxInt 5574 if x.Aux != s { 5575 break 5576 } 5577 if p != x.Args[0] { 5578 break 5579 } 5580 if i != x.Args[1] { 5581 break 5582 } 5583 mem := x.Args[2] 5584 if !(x.Uses == 1 && ValAndOff(a).Off()+4 == ValAndOff(c).Off() && clobber(x)) { 5585 break 5586 } 5587 v.reset(OpAMD64MOVQstoreidx1) 5588 v.AuxInt = ValAndOff(a).Off() 5589 v.Aux = s 5590 v.AddArg(p) 5591 v0 := b.NewValue0(v.Line, OpAMD64SHLQconst, i.Type) 5592 v0.AuxInt = 2 5593 v0.AddArg(i) 5594 v.AddArg(v0) 5595 v1 := b.NewValue0(v.Line, OpAMD64MOVQconst, config.fe.TypeUInt64()) 5596 v1.AuxInt = ValAndOff(a).Val()&0xffffffff | ValAndOff(c).Val()<<32 5597 v.AddArg(v1) 5598 v.AddArg(mem) 5599 return true 5600 } 5601 return false 5602 } 5603 func rewriteValueAMD64_OpAMD64MOVLstoreidx1(v *Value, config *Config) bool { 5604 b := v.Block 5605 _ = b 5606 // match: (MOVLstoreidx1 [c] {sym} ptr (SHLQconst [2] idx) val mem) 5607 // cond: 5608 // result: (MOVLstoreidx4 [c] {sym} ptr idx val mem) 5609 for { 5610 c := v.AuxInt 5611 sym := v.Aux 5612 ptr := v.Args[0] 5613 v_1 := v.Args[1] 5614 if v_1.Op != OpAMD64SHLQconst { 5615 break 5616 } 5617 if v_1.AuxInt != 2 { 5618 break 5619 } 5620 idx := v_1.Args[0] 5621 val := v.Args[2] 5622 mem := v.Args[3] 5623 v.reset(OpAMD64MOVLstoreidx4) 5624 v.AuxInt = c 5625 v.Aux = sym 5626 v.AddArg(ptr) 5627 v.AddArg(idx) 5628 v.AddArg(val) 5629 v.AddArg(mem) 5630 return true 5631 } 5632 // match: (MOVLstoreidx1 [c] {sym} (ADDQconst [d] ptr) idx val mem) 5633 // cond: 5634 // result: (MOVLstoreidx1 [c+d] {sym} ptr idx val mem) 5635 for { 5636 c := v.AuxInt 5637 sym := v.Aux 5638 v_0 := v.Args[0] 5639 if v_0.Op != OpAMD64ADDQconst { 5640 break 5641 } 5642 d := v_0.AuxInt 5643 ptr := v_0.Args[0] 5644 idx := v.Args[1] 5645 val := v.Args[2] 5646 mem := v.Args[3] 5647 v.reset(OpAMD64MOVLstoreidx1) 5648 v.AuxInt = c + d 5649 v.Aux = sym 5650 v.AddArg(ptr) 5651 v.AddArg(idx) 5652 v.AddArg(val) 5653 v.AddArg(mem) 5654 return true 5655 } 5656 // match: (MOVLstoreidx1 [c] {sym} ptr (ADDQconst [d] idx) val mem) 5657 // cond: 5658 // result: (MOVLstoreidx1 [c+d] {sym} ptr idx val mem) 5659 for { 5660 c := v.AuxInt 5661 sym := v.Aux 5662 ptr := v.Args[0] 5663 v_1 := v.Args[1] 5664 if v_1.Op != OpAMD64ADDQconst { 5665 break 5666 } 5667 d := v_1.AuxInt 5668 idx := v_1.Args[0] 5669 val := v.Args[2] 5670 mem := v.Args[3] 5671 v.reset(OpAMD64MOVLstoreidx1) 5672 v.AuxInt = c + d 5673 v.Aux = sym 5674 v.AddArg(ptr) 5675 v.AddArg(idx) 5676 v.AddArg(val) 5677 v.AddArg(mem) 5678 return true 5679 } 5680 // match: (MOVLstoreidx1 [i] {s} p idx (SHRQconst [32] w) x:(MOVLstoreidx1 [i-4] {s} p idx w mem)) 5681 // cond: x.Uses == 1 && clobber(x) 5682 // result: (MOVQstoreidx1 [i-4] {s} p idx w mem) 5683 for { 5684 i := v.AuxInt 5685 s := v.Aux 5686 p := v.Args[0] 5687 idx := v.Args[1] 5688 v_2 := v.Args[2] 5689 if v_2.Op != OpAMD64SHRQconst { 5690 break 5691 } 5692 if v_2.AuxInt != 32 { 5693 break 5694 } 5695 w := v_2.Args[0] 5696 x := v.Args[3] 5697 if x.Op != OpAMD64MOVLstoreidx1 { 5698 break 5699 } 5700 if x.AuxInt != i-4 { 5701 break 5702 } 5703 if x.Aux != s { 5704 break 5705 } 5706 if p != x.Args[0] { 5707 break 5708 } 5709 if idx != x.Args[1] { 5710 break 5711 } 5712 if w != x.Args[2] { 5713 break 5714 } 5715 mem := x.Args[3] 5716 if !(x.Uses == 1 && clobber(x)) { 5717 break 5718 } 5719 v.reset(OpAMD64MOVQstoreidx1) 5720 v.AuxInt = i - 4 5721 v.Aux = s 5722 v.AddArg(p) 5723 v.AddArg(idx) 5724 v.AddArg(w) 5725 v.AddArg(mem) 5726 return true 5727 } 5728 // match: (MOVLstoreidx1 [i] {s} p idx (SHRQconst [j] w) x:(MOVLstoreidx1 [i-4] {s} p idx w0:(SHRQconst [j-32] w) mem)) 5729 // cond: x.Uses == 1 && clobber(x) 5730 // result: (MOVQstoreidx1 [i-4] {s} p idx w0 mem) 5731 for { 5732 i := v.AuxInt 5733 s := v.Aux 5734 p := v.Args[0] 5735 idx := v.Args[1] 5736 v_2 := v.Args[2] 5737 if v_2.Op != OpAMD64SHRQconst { 5738 break 5739 } 5740 j := v_2.AuxInt 5741 w := v_2.Args[0] 5742 x := v.Args[3] 5743 if x.Op != OpAMD64MOVLstoreidx1 { 5744 break 5745 } 5746 if x.AuxInt != i-4 { 5747 break 5748 } 5749 if x.Aux != s { 5750 break 5751 } 5752 if p != x.Args[0] { 5753 break 5754 } 5755 if idx != x.Args[1] { 5756 break 5757 } 5758 w0 := x.Args[2] 5759 if w0.Op != OpAMD64SHRQconst { 5760 break 5761 } 5762 if w0.AuxInt != j-32 { 5763 break 5764 } 5765 if w != w0.Args[0] { 5766 break 5767 } 5768 mem := x.Args[3] 5769 if !(x.Uses == 1 && clobber(x)) { 5770 break 5771 } 5772 v.reset(OpAMD64MOVQstoreidx1) 5773 v.AuxInt = i - 4 5774 v.Aux = s 5775 v.AddArg(p) 5776 v.AddArg(idx) 5777 v.AddArg(w0) 5778 v.AddArg(mem) 5779 return true 5780 } 5781 return false 5782 } 5783 func rewriteValueAMD64_OpAMD64MOVLstoreidx4(v *Value, config *Config) bool { 5784 b := v.Block 5785 _ = b 5786 // match: (MOVLstoreidx4 [c] {sym} (ADDQconst [d] ptr) idx val mem) 5787 // cond: 5788 // result: (MOVLstoreidx4 [c+d] {sym} ptr idx val mem) 5789 for { 5790 c := v.AuxInt 5791 sym := v.Aux 5792 v_0 := v.Args[0] 5793 if v_0.Op != OpAMD64ADDQconst { 5794 break 5795 } 5796 d := v_0.AuxInt 5797 ptr := v_0.Args[0] 5798 idx := v.Args[1] 5799 val := v.Args[2] 5800 mem := v.Args[3] 5801 v.reset(OpAMD64MOVLstoreidx4) 5802 v.AuxInt = c + d 5803 v.Aux = sym 5804 v.AddArg(ptr) 5805 v.AddArg(idx) 5806 v.AddArg(val) 5807 v.AddArg(mem) 5808 return true 5809 } 5810 // match: (MOVLstoreidx4 [c] {sym} ptr (ADDQconst [d] idx) val mem) 5811 // cond: 5812 // result: (MOVLstoreidx4 [c+4*d] {sym} ptr idx val mem) 5813 for { 5814 c := v.AuxInt 5815 sym := v.Aux 5816 ptr := v.Args[0] 5817 v_1 := v.Args[1] 5818 if v_1.Op != OpAMD64ADDQconst { 5819 break 5820 } 5821 d := v_1.AuxInt 5822 idx := v_1.Args[0] 5823 val := v.Args[2] 5824 mem := v.Args[3] 5825 v.reset(OpAMD64MOVLstoreidx4) 5826 v.AuxInt = c + 4*d 5827 v.Aux = sym 5828 v.AddArg(ptr) 5829 v.AddArg(idx) 5830 v.AddArg(val) 5831 v.AddArg(mem) 5832 return true 5833 } 5834 // match: (MOVLstoreidx4 [i] {s} p idx (SHRQconst [32] w) x:(MOVLstoreidx4 [i-4] {s} p idx w mem)) 5835 // cond: x.Uses == 1 && clobber(x) 5836 // result: (MOVQstoreidx1 [i-4] {s} p (SHLQconst <idx.Type> [2] idx) w mem) 5837 for { 5838 i := v.AuxInt 5839 s := v.Aux 5840 p := v.Args[0] 5841 idx := v.Args[1] 5842 v_2 := v.Args[2] 5843 if v_2.Op != OpAMD64SHRQconst { 5844 break 5845 } 5846 if v_2.AuxInt != 32 { 5847 break 5848 } 5849 w := v_2.Args[0] 5850 x := v.Args[3] 5851 if x.Op != OpAMD64MOVLstoreidx4 { 5852 break 5853 } 5854 if x.AuxInt != i-4 { 5855 break 5856 } 5857 if x.Aux != s { 5858 break 5859 } 5860 if p != x.Args[0] { 5861 break 5862 } 5863 if idx != x.Args[1] { 5864 break 5865 } 5866 if w != x.Args[2] { 5867 break 5868 } 5869 mem := x.Args[3] 5870 if !(x.Uses == 1 && clobber(x)) { 5871 break 5872 } 5873 v.reset(OpAMD64MOVQstoreidx1) 5874 v.AuxInt = i - 4 5875 v.Aux = s 5876 v.AddArg(p) 5877 v0 := b.NewValue0(v.Line, OpAMD64SHLQconst, idx.Type) 5878 v0.AuxInt = 2 5879 v0.AddArg(idx) 5880 v.AddArg(v0) 5881 v.AddArg(w) 5882 v.AddArg(mem) 5883 return true 5884 } 5885 // match: (MOVLstoreidx4 [i] {s} p idx (SHRQconst [j] w) x:(MOVLstoreidx4 [i-4] {s} p idx w0:(SHRQconst [j-32] w) mem)) 5886 // cond: x.Uses == 1 && clobber(x) 5887 // result: (MOVQstoreidx1 [i-4] {s} p (SHLQconst <idx.Type> [2] idx) w0 mem) 5888 for { 5889 i := v.AuxInt 5890 s := v.Aux 5891 p := v.Args[0] 5892 idx := v.Args[1] 5893 v_2 := v.Args[2] 5894 if v_2.Op != OpAMD64SHRQconst { 5895 break 5896 } 5897 j := v_2.AuxInt 5898 w := v_2.Args[0] 5899 x := v.Args[3] 5900 if x.Op != OpAMD64MOVLstoreidx4 { 5901 break 5902 } 5903 if x.AuxInt != i-4 { 5904 break 5905 } 5906 if x.Aux != s { 5907 break 5908 } 5909 if p != x.Args[0] { 5910 break 5911 } 5912 if idx != x.Args[1] { 5913 break 5914 } 5915 w0 := x.Args[2] 5916 if w0.Op != OpAMD64SHRQconst { 5917 break 5918 } 5919 if w0.AuxInt != j-32 { 5920 break 5921 } 5922 if w != w0.Args[0] { 5923 break 5924 } 5925 mem := x.Args[3] 5926 if !(x.Uses == 1 && clobber(x)) { 5927 break 5928 } 5929 v.reset(OpAMD64MOVQstoreidx1) 5930 v.AuxInt = i - 4 5931 v.Aux = s 5932 v.AddArg(p) 5933 v0 := b.NewValue0(v.Line, OpAMD64SHLQconst, idx.Type) 5934 v0.AuxInt = 2 5935 v0.AddArg(idx) 5936 v.AddArg(v0) 5937 v.AddArg(w0) 5938 v.AddArg(mem) 5939 return true 5940 } 5941 return false 5942 } 5943 func rewriteValueAMD64_OpAMD64MOVOload(v *Value, config *Config) bool { 5944 b := v.Block 5945 _ = b 5946 // match: (MOVOload [off1] {sym} (ADDQconst [off2] ptr) mem) 5947 // cond: is32Bit(off1+off2) 5948 // result: (MOVOload [off1+off2] {sym} ptr mem) 5949 for { 5950 off1 := v.AuxInt 5951 sym := v.Aux 5952 v_0 := v.Args[0] 5953 if v_0.Op != OpAMD64ADDQconst { 5954 break 5955 } 5956 off2 := v_0.AuxInt 5957 ptr := v_0.Args[0] 5958 mem := v.Args[1] 5959 if !(is32Bit(off1 + off2)) { 5960 break 5961 } 5962 v.reset(OpAMD64MOVOload) 5963 v.AuxInt = off1 + off2 5964 v.Aux = sym 5965 v.AddArg(ptr) 5966 v.AddArg(mem) 5967 return true 5968 } 5969 // match: (MOVOload [off1] {sym1} (LEAQ [off2] {sym2} base) mem) 5970 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 5971 // result: (MOVOload [off1+off2] {mergeSym(sym1,sym2)} base mem) 5972 for { 5973 off1 := v.AuxInt 5974 sym1 := v.Aux 5975 v_0 := v.Args[0] 5976 if v_0.Op != OpAMD64LEAQ { 5977 break 5978 } 5979 off2 := v_0.AuxInt 5980 sym2 := v_0.Aux 5981 base := v_0.Args[0] 5982 mem := v.Args[1] 5983 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 5984 break 5985 } 5986 v.reset(OpAMD64MOVOload) 5987 v.AuxInt = off1 + off2 5988 v.Aux = mergeSym(sym1, sym2) 5989 v.AddArg(base) 5990 v.AddArg(mem) 5991 return true 5992 } 5993 return false 5994 } 5995 func rewriteValueAMD64_OpAMD64MOVOstore(v *Value, config *Config) bool { 5996 b := v.Block 5997 _ = b 5998 // match: (MOVOstore [off1] {sym} (ADDQconst [off2] ptr) val mem) 5999 // cond: is32Bit(off1+off2) 6000 // result: (MOVOstore [off1+off2] {sym} ptr val mem) 6001 for { 6002 off1 := v.AuxInt 6003 sym := v.Aux 6004 v_0 := v.Args[0] 6005 if v_0.Op != OpAMD64ADDQconst { 6006 break 6007 } 6008 off2 := v_0.AuxInt 6009 ptr := v_0.Args[0] 6010 val := v.Args[1] 6011 mem := v.Args[2] 6012 if !(is32Bit(off1 + off2)) { 6013 break 6014 } 6015 v.reset(OpAMD64MOVOstore) 6016 v.AuxInt = off1 + off2 6017 v.Aux = sym 6018 v.AddArg(ptr) 6019 v.AddArg(val) 6020 v.AddArg(mem) 6021 return true 6022 } 6023 // match: (MOVOstore [off1] {sym1} (LEAQ [off2] {sym2} base) val mem) 6024 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 6025 // result: (MOVOstore [off1+off2] {mergeSym(sym1,sym2)} base val mem) 6026 for { 6027 off1 := v.AuxInt 6028 sym1 := v.Aux 6029 v_0 := v.Args[0] 6030 if v_0.Op != OpAMD64LEAQ { 6031 break 6032 } 6033 off2 := v_0.AuxInt 6034 sym2 := v_0.Aux 6035 base := v_0.Args[0] 6036 val := v.Args[1] 6037 mem := v.Args[2] 6038 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 6039 break 6040 } 6041 v.reset(OpAMD64MOVOstore) 6042 v.AuxInt = off1 + off2 6043 v.Aux = mergeSym(sym1, sym2) 6044 v.AddArg(base) 6045 v.AddArg(val) 6046 v.AddArg(mem) 6047 return true 6048 } 6049 return false 6050 } 6051 func rewriteValueAMD64_OpAMD64MOVQatomicload(v *Value, config *Config) bool { 6052 b := v.Block 6053 _ = b 6054 // match: (MOVQatomicload [off1] {sym} (ADDQconst [off2] ptr) mem) 6055 // cond: is32Bit(off1+off2) 6056 // result: (MOVQatomicload [off1+off2] {sym} ptr mem) 6057 for { 6058 off1 := v.AuxInt 6059 sym := v.Aux 6060 v_0 := v.Args[0] 6061 if v_0.Op != OpAMD64ADDQconst { 6062 break 6063 } 6064 off2 := v_0.AuxInt 6065 ptr := v_0.Args[0] 6066 mem := v.Args[1] 6067 if !(is32Bit(off1 + off2)) { 6068 break 6069 } 6070 v.reset(OpAMD64MOVQatomicload) 6071 v.AuxInt = off1 + off2 6072 v.Aux = sym 6073 v.AddArg(ptr) 6074 v.AddArg(mem) 6075 return true 6076 } 6077 // match: (MOVQatomicload [off1] {sym1} (LEAQ [off2] {sym2} ptr) mem) 6078 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 6079 // result: (MOVQatomicload [off1+off2] {mergeSym(sym1,sym2)} ptr mem) 6080 for { 6081 off1 := v.AuxInt 6082 sym1 := v.Aux 6083 v_0 := v.Args[0] 6084 if v_0.Op != OpAMD64LEAQ { 6085 break 6086 } 6087 off2 := v_0.AuxInt 6088 sym2 := v_0.Aux 6089 ptr := v_0.Args[0] 6090 mem := v.Args[1] 6091 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 6092 break 6093 } 6094 v.reset(OpAMD64MOVQatomicload) 6095 v.AuxInt = off1 + off2 6096 v.Aux = mergeSym(sym1, sym2) 6097 v.AddArg(ptr) 6098 v.AddArg(mem) 6099 return true 6100 } 6101 return false 6102 } 6103 func rewriteValueAMD64_OpAMD64MOVQload(v *Value, config *Config) bool { 6104 b := v.Block 6105 _ = b 6106 // match: (MOVQload [off] {sym} ptr (MOVQstore [off2] {sym2} ptr2 x _)) 6107 // cond: sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) 6108 // result: x 6109 for { 6110 off := v.AuxInt 6111 sym := v.Aux 6112 ptr := v.Args[0] 6113 v_1 := v.Args[1] 6114 if v_1.Op != OpAMD64MOVQstore { 6115 break 6116 } 6117 off2 := v_1.AuxInt 6118 sym2 := v_1.Aux 6119 ptr2 := v_1.Args[0] 6120 x := v_1.Args[1] 6121 if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) { 6122 break 6123 } 6124 v.reset(OpCopy) 6125 v.Type = x.Type 6126 v.AddArg(x) 6127 return true 6128 } 6129 // match: (MOVQload [off1] {sym} (ADDQconst [off2] ptr) mem) 6130 // cond: is32Bit(off1+off2) 6131 // result: (MOVQload [off1+off2] {sym} ptr mem) 6132 for { 6133 off1 := v.AuxInt 6134 sym := v.Aux 6135 v_0 := v.Args[0] 6136 if v_0.Op != OpAMD64ADDQconst { 6137 break 6138 } 6139 off2 := v_0.AuxInt 6140 ptr := v_0.Args[0] 6141 mem := v.Args[1] 6142 if !(is32Bit(off1 + off2)) { 6143 break 6144 } 6145 v.reset(OpAMD64MOVQload) 6146 v.AuxInt = off1 + off2 6147 v.Aux = sym 6148 v.AddArg(ptr) 6149 v.AddArg(mem) 6150 return true 6151 } 6152 // match: (MOVQload [off1] {sym1} (LEAQ [off2] {sym2} base) mem) 6153 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 6154 // result: (MOVQload [off1+off2] {mergeSym(sym1,sym2)} base mem) 6155 for { 6156 off1 := v.AuxInt 6157 sym1 := v.Aux 6158 v_0 := v.Args[0] 6159 if v_0.Op != OpAMD64LEAQ { 6160 break 6161 } 6162 off2 := v_0.AuxInt 6163 sym2 := v_0.Aux 6164 base := v_0.Args[0] 6165 mem := v.Args[1] 6166 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 6167 break 6168 } 6169 v.reset(OpAMD64MOVQload) 6170 v.AuxInt = off1 + off2 6171 v.Aux = mergeSym(sym1, sym2) 6172 v.AddArg(base) 6173 v.AddArg(mem) 6174 return true 6175 } 6176 // match: (MOVQload [off1] {sym1} (LEAQ1 [off2] {sym2} ptr idx) mem) 6177 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 6178 // result: (MOVQloadidx1 [off1+off2] {mergeSym(sym1,sym2)} ptr idx mem) 6179 for { 6180 off1 := v.AuxInt 6181 sym1 := v.Aux 6182 v_0 := v.Args[0] 6183 if v_0.Op != OpAMD64LEAQ1 { 6184 break 6185 } 6186 off2 := v_0.AuxInt 6187 sym2 := v_0.Aux 6188 ptr := v_0.Args[0] 6189 idx := v_0.Args[1] 6190 mem := v.Args[1] 6191 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 6192 break 6193 } 6194 v.reset(OpAMD64MOVQloadidx1) 6195 v.AuxInt = off1 + off2 6196 v.Aux = mergeSym(sym1, sym2) 6197 v.AddArg(ptr) 6198 v.AddArg(idx) 6199 v.AddArg(mem) 6200 return true 6201 } 6202 // match: (MOVQload [off1] {sym1} (LEAQ8 [off2] {sym2} ptr idx) mem) 6203 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 6204 // result: (MOVQloadidx8 [off1+off2] {mergeSym(sym1,sym2)} ptr idx mem) 6205 for { 6206 off1 := v.AuxInt 6207 sym1 := v.Aux 6208 v_0 := v.Args[0] 6209 if v_0.Op != OpAMD64LEAQ8 { 6210 break 6211 } 6212 off2 := v_0.AuxInt 6213 sym2 := v_0.Aux 6214 ptr := v_0.Args[0] 6215 idx := v_0.Args[1] 6216 mem := v.Args[1] 6217 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 6218 break 6219 } 6220 v.reset(OpAMD64MOVQloadidx8) 6221 v.AuxInt = off1 + off2 6222 v.Aux = mergeSym(sym1, sym2) 6223 v.AddArg(ptr) 6224 v.AddArg(idx) 6225 v.AddArg(mem) 6226 return true 6227 } 6228 // match: (MOVQload [off] {sym} (ADDQ ptr idx) mem) 6229 // cond: ptr.Op != OpSB 6230 // result: (MOVQloadidx1 [off] {sym} ptr idx mem) 6231 for { 6232 off := v.AuxInt 6233 sym := v.Aux 6234 v_0 := v.Args[0] 6235 if v_0.Op != OpAMD64ADDQ { 6236 break 6237 } 6238 ptr := v_0.Args[0] 6239 idx := v_0.Args[1] 6240 mem := v.Args[1] 6241 if !(ptr.Op != OpSB) { 6242 break 6243 } 6244 v.reset(OpAMD64MOVQloadidx1) 6245 v.AuxInt = off 6246 v.Aux = sym 6247 v.AddArg(ptr) 6248 v.AddArg(idx) 6249 v.AddArg(mem) 6250 return true 6251 } 6252 // match: (MOVQload [off1] {sym1} (LEAL [off2] {sym2} base) mem) 6253 // cond: canMergeSym(sym1, sym2) 6254 // result: (MOVQload [off1+off2] {mergeSym(sym1,sym2)} base mem) 6255 for { 6256 off1 := v.AuxInt 6257 sym1 := v.Aux 6258 v_0 := v.Args[0] 6259 if v_0.Op != OpAMD64LEAL { 6260 break 6261 } 6262 off2 := v_0.AuxInt 6263 sym2 := v_0.Aux 6264 base := v_0.Args[0] 6265 mem := v.Args[1] 6266 if !(canMergeSym(sym1, sym2)) { 6267 break 6268 } 6269 v.reset(OpAMD64MOVQload) 6270 v.AuxInt = off1 + off2 6271 v.Aux = mergeSym(sym1, sym2) 6272 v.AddArg(base) 6273 v.AddArg(mem) 6274 return true 6275 } 6276 // match: (MOVQload [off1] {sym} (ADDLconst [off2] ptr) mem) 6277 // cond: is32Bit(off1+off2) 6278 // result: (MOVQload [off1+off2] {sym} ptr mem) 6279 for { 6280 off1 := v.AuxInt 6281 sym := v.Aux 6282 v_0 := v.Args[0] 6283 if v_0.Op != OpAMD64ADDLconst { 6284 break 6285 } 6286 off2 := v_0.AuxInt 6287 ptr := v_0.Args[0] 6288 mem := v.Args[1] 6289 if !(is32Bit(off1 + off2)) { 6290 break 6291 } 6292 v.reset(OpAMD64MOVQload) 6293 v.AuxInt = off1 + off2 6294 v.Aux = sym 6295 v.AddArg(ptr) 6296 v.AddArg(mem) 6297 return true 6298 } 6299 return false 6300 } 6301 func rewriteValueAMD64_OpAMD64MOVQloadidx1(v *Value, config *Config) bool { 6302 b := v.Block 6303 _ = b 6304 // match: (MOVQloadidx1 [c] {sym} ptr (SHLQconst [3] idx) mem) 6305 // cond: 6306 // result: (MOVQloadidx8 [c] {sym} ptr idx mem) 6307 for { 6308 c := v.AuxInt 6309 sym := v.Aux 6310 ptr := v.Args[0] 6311 v_1 := v.Args[1] 6312 if v_1.Op != OpAMD64SHLQconst { 6313 break 6314 } 6315 if v_1.AuxInt != 3 { 6316 break 6317 } 6318 idx := v_1.Args[0] 6319 mem := v.Args[2] 6320 v.reset(OpAMD64MOVQloadidx8) 6321 v.AuxInt = c 6322 v.Aux = sym 6323 v.AddArg(ptr) 6324 v.AddArg(idx) 6325 v.AddArg(mem) 6326 return true 6327 } 6328 // match: (MOVQloadidx1 [c] {sym} (ADDQconst [d] ptr) idx mem) 6329 // cond: 6330 // result: (MOVQloadidx1 [c+d] {sym} ptr idx mem) 6331 for { 6332 c := v.AuxInt 6333 sym := v.Aux 6334 v_0 := v.Args[0] 6335 if v_0.Op != OpAMD64ADDQconst { 6336 break 6337 } 6338 d := v_0.AuxInt 6339 ptr := v_0.Args[0] 6340 idx := v.Args[1] 6341 mem := v.Args[2] 6342 v.reset(OpAMD64MOVQloadidx1) 6343 v.AuxInt = c + d 6344 v.Aux = sym 6345 v.AddArg(ptr) 6346 v.AddArg(idx) 6347 v.AddArg(mem) 6348 return true 6349 } 6350 // match: (MOVQloadidx1 [c] {sym} ptr (ADDQconst [d] idx) mem) 6351 // cond: 6352 // result: (MOVQloadidx1 [c+d] {sym} ptr idx mem) 6353 for { 6354 c := v.AuxInt 6355 sym := v.Aux 6356 ptr := v.Args[0] 6357 v_1 := v.Args[1] 6358 if v_1.Op != OpAMD64ADDQconst { 6359 break 6360 } 6361 d := v_1.AuxInt 6362 idx := v_1.Args[0] 6363 mem := v.Args[2] 6364 v.reset(OpAMD64MOVQloadidx1) 6365 v.AuxInt = c + d 6366 v.Aux = sym 6367 v.AddArg(ptr) 6368 v.AddArg(idx) 6369 v.AddArg(mem) 6370 return true 6371 } 6372 return false 6373 } 6374 func rewriteValueAMD64_OpAMD64MOVQloadidx8(v *Value, config *Config) bool { 6375 b := v.Block 6376 _ = b 6377 // match: (MOVQloadidx8 [c] {sym} (ADDQconst [d] ptr) idx mem) 6378 // cond: 6379 // result: (MOVQloadidx8 [c+d] {sym} ptr idx mem) 6380 for { 6381 c := v.AuxInt 6382 sym := v.Aux 6383 v_0 := v.Args[0] 6384 if v_0.Op != OpAMD64ADDQconst { 6385 break 6386 } 6387 d := v_0.AuxInt 6388 ptr := v_0.Args[0] 6389 idx := v.Args[1] 6390 mem := v.Args[2] 6391 v.reset(OpAMD64MOVQloadidx8) 6392 v.AuxInt = c + d 6393 v.Aux = sym 6394 v.AddArg(ptr) 6395 v.AddArg(idx) 6396 v.AddArg(mem) 6397 return true 6398 } 6399 // match: (MOVQloadidx8 [c] {sym} ptr (ADDQconst [d] idx) mem) 6400 // cond: 6401 // result: (MOVQloadidx8 [c+8*d] {sym} ptr idx mem) 6402 for { 6403 c := v.AuxInt 6404 sym := v.Aux 6405 ptr := v.Args[0] 6406 v_1 := v.Args[1] 6407 if v_1.Op != OpAMD64ADDQconst { 6408 break 6409 } 6410 d := v_1.AuxInt 6411 idx := v_1.Args[0] 6412 mem := v.Args[2] 6413 v.reset(OpAMD64MOVQloadidx8) 6414 v.AuxInt = c + 8*d 6415 v.Aux = sym 6416 v.AddArg(ptr) 6417 v.AddArg(idx) 6418 v.AddArg(mem) 6419 return true 6420 } 6421 return false 6422 } 6423 func rewriteValueAMD64_OpAMD64MOVQstore(v *Value, config *Config) bool { 6424 b := v.Block 6425 _ = b 6426 // match: (MOVQstore [off1] {sym} (ADDQconst [off2] ptr) val mem) 6427 // cond: is32Bit(off1+off2) 6428 // result: (MOVQstore [off1+off2] {sym} ptr val mem) 6429 for { 6430 off1 := v.AuxInt 6431 sym := v.Aux 6432 v_0 := v.Args[0] 6433 if v_0.Op != OpAMD64ADDQconst { 6434 break 6435 } 6436 off2 := v_0.AuxInt 6437 ptr := v_0.Args[0] 6438 val := v.Args[1] 6439 mem := v.Args[2] 6440 if !(is32Bit(off1 + off2)) { 6441 break 6442 } 6443 v.reset(OpAMD64MOVQstore) 6444 v.AuxInt = off1 + off2 6445 v.Aux = sym 6446 v.AddArg(ptr) 6447 v.AddArg(val) 6448 v.AddArg(mem) 6449 return true 6450 } 6451 // match: (MOVQstore [off] {sym} ptr (MOVQconst [c]) mem) 6452 // cond: validValAndOff(c,off) 6453 // result: (MOVQstoreconst [makeValAndOff(c,off)] {sym} ptr mem) 6454 for { 6455 off := v.AuxInt 6456 sym := v.Aux 6457 ptr := v.Args[0] 6458 v_1 := v.Args[1] 6459 if v_1.Op != OpAMD64MOVQconst { 6460 break 6461 } 6462 c := v_1.AuxInt 6463 mem := v.Args[2] 6464 if !(validValAndOff(c, off)) { 6465 break 6466 } 6467 v.reset(OpAMD64MOVQstoreconst) 6468 v.AuxInt = makeValAndOff(c, off) 6469 v.Aux = sym 6470 v.AddArg(ptr) 6471 v.AddArg(mem) 6472 return true 6473 } 6474 // match: (MOVQstore [off1] {sym1} (LEAQ [off2] {sym2} base) val mem) 6475 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 6476 // result: (MOVQstore [off1+off2] {mergeSym(sym1,sym2)} base val mem) 6477 for { 6478 off1 := v.AuxInt 6479 sym1 := v.Aux 6480 v_0 := v.Args[0] 6481 if v_0.Op != OpAMD64LEAQ { 6482 break 6483 } 6484 off2 := v_0.AuxInt 6485 sym2 := v_0.Aux 6486 base := v_0.Args[0] 6487 val := v.Args[1] 6488 mem := v.Args[2] 6489 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 6490 break 6491 } 6492 v.reset(OpAMD64MOVQstore) 6493 v.AuxInt = off1 + off2 6494 v.Aux = mergeSym(sym1, sym2) 6495 v.AddArg(base) 6496 v.AddArg(val) 6497 v.AddArg(mem) 6498 return true 6499 } 6500 // match: (MOVQstore [off1] {sym1} (LEAQ1 [off2] {sym2} ptr idx) val mem) 6501 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 6502 // result: (MOVQstoreidx1 [off1+off2] {mergeSym(sym1,sym2)} ptr idx val mem) 6503 for { 6504 off1 := v.AuxInt 6505 sym1 := v.Aux 6506 v_0 := v.Args[0] 6507 if v_0.Op != OpAMD64LEAQ1 { 6508 break 6509 } 6510 off2 := v_0.AuxInt 6511 sym2 := v_0.Aux 6512 ptr := v_0.Args[0] 6513 idx := v_0.Args[1] 6514 val := v.Args[1] 6515 mem := v.Args[2] 6516 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 6517 break 6518 } 6519 v.reset(OpAMD64MOVQstoreidx1) 6520 v.AuxInt = off1 + off2 6521 v.Aux = mergeSym(sym1, sym2) 6522 v.AddArg(ptr) 6523 v.AddArg(idx) 6524 v.AddArg(val) 6525 v.AddArg(mem) 6526 return true 6527 } 6528 // match: (MOVQstore [off1] {sym1} (LEAQ8 [off2] {sym2} ptr idx) val mem) 6529 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 6530 // result: (MOVQstoreidx8 [off1+off2] {mergeSym(sym1,sym2)} ptr idx val mem) 6531 for { 6532 off1 := v.AuxInt 6533 sym1 := v.Aux 6534 v_0 := v.Args[0] 6535 if v_0.Op != OpAMD64LEAQ8 { 6536 break 6537 } 6538 off2 := v_0.AuxInt 6539 sym2 := v_0.Aux 6540 ptr := v_0.Args[0] 6541 idx := v_0.Args[1] 6542 val := v.Args[1] 6543 mem := v.Args[2] 6544 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 6545 break 6546 } 6547 v.reset(OpAMD64MOVQstoreidx8) 6548 v.AuxInt = off1 + off2 6549 v.Aux = mergeSym(sym1, sym2) 6550 v.AddArg(ptr) 6551 v.AddArg(idx) 6552 v.AddArg(val) 6553 v.AddArg(mem) 6554 return true 6555 } 6556 // match: (MOVQstore [off] {sym} (ADDQ ptr idx) val mem) 6557 // cond: ptr.Op != OpSB 6558 // result: (MOVQstoreidx1 [off] {sym} ptr idx val mem) 6559 for { 6560 off := v.AuxInt 6561 sym := v.Aux 6562 v_0 := v.Args[0] 6563 if v_0.Op != OpAMD64ADDQ { 6564 break 6565 } 6566 ptr := v_0.Args[0] 6567 idx := v_0.Args[1] 6568 val := v.Args[1] 6569 mem := v.Args[2] 6570 if !(ptr.Op != OpSB) { 6571 break 6572 } 6573 v.reset(OpAMD64MOVQstoreidx1) 6574 v.AuxInt = off 6575 v.Aux = sym 6576 v.AddArg(ptr) 6577 v.AddArg(idx) 6578 v.AddArg(val) 6579 v.AddArg(mem) 6580 return true 6581 } 6582 // match: (MOVQstore [off1] {sym1} (LEAL [off2] {sym2} base) val mem) 6583 // cond: canMergeSym(sym1, sym2) 6584 // result: (MOVQstore [off1+off2] {mergeSym(sym1,sym2)} base val mem) 6585 for { 6586 off1 := v.AuxInt 6587 sym1 := v.Aux 6588 v_0 := v.Args[0] 6589 if v_0.Op != OpAMD64LEAL { 6590 break 6591 } 6592 off2 := v_0.AuxInt 6593 sym2 := v_0.Aux 6594 base := v_0.Args[0] 6595 val := v.Args[1] 6596 mem := v.Args[2] 6597 if !(canMergeSym(sym1, sym2)) { 6598 break 6599 } 6600 v.reset(OpAMD64MOVQstore) 6601 v.AuxInt = off1 + off2 6602 v.Aux = mergeSym(sym1, sym2) 6603 v.AddArg(base) 6604 v.AddArg(val) 6605 v.AddArg(mem) 6606 return true 6607 } 6608 // match: (MOVQstore [off1] {sym} (ADDLconst [off2] ptr) val mem) 6609 // cond: is32Bit(off1+off2) 6610 // result: (MOVQstore [off1+off2] {sym} ptr val mem) 6611 for { 6612 off1 := v.AuxInt 6613 sym := v.Aux 6614 v_0 := v.Args[0] 6615 if v_0.Op != OpAMD64ADDLconst { 6616 break 6617 } 6618 off2 := v_0.AuxInt 6619 ptr := v_0.Args[0] 6620 val := v.Args[1] 6621 mem := v.Args[2] 6622 if !(is32Bit(off1 + off2)) { 6623 break 6624 } 6625 v.reset(OpAMD64MOVQstore) 6626 v.AuxInt = off1 + off2 6627 v.Aux = sym 6628 v.AddArg(ptr) 6629 v.AddArg(val) 6630 v.AddArg(mem) 6631 return true 6632 } 6633 return false 6634 } 6635 func rewriteValueAMD64_OpAMD64MOVQstoreconst(v *Value, config *Config) bool { 6636 b := v.Block 6637 _ = b 6638 // match: (MOVQstoreconst [sc] {s} (ADDQconst [off] ptr) mem) 6639 // cond: ValAndOff(sc).canAdd(off) 6640 // result: (MOVQstoreconst [ValAndOff(sc).add(off)] {s} ptr mem) 6641 for { 6642 sc := v.AuxInt 6643 s := v.Aux 6644 v_0 := v.Args[0] 6645 if v_0.Op != OpAMD64ADDQconst { 6646 break 6647 } 6648 off := v_0.AuxInt 6649 ptr := v_0.Args[0] 6650 mem := v.Args[1] 6651 if !(ValAndOff(sc).canAdd(off)) { 6652 break 6653 } 6654 v.reset(OpAMD64MOVQstoreconst) 6655 v.AuxInt = ValAndOff(sc).add(off) 6656 v.Aux = s 6657 v.AddArg(ptr) 6658 v.AddArg(mem) 6659 return true 6660 } 6661 // match: (MOVQstoreconst [sc] {sym1} (LEAQ [off] {sym2} ptr) mem) 6662 // cond: canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off) 6663 // result: (MOVQstoreconst [ValAndOff(sc).add(off)] {mergeSym(sym1, sym2)} ptr mem) 6664 for { 6665 sc := v.AuxInt 6666 sym1 := v.Aux 6667 v_0 := v.Args[0] 6668 if v_0.Op != OpAMD64LEAQ { 6669 break 6670 } 6671 off := v_0.AuxInt 6672 sym2 := v_0.Aux 6673 ptr := v_0.Args[0] 6674 mem := v.Args[1] 6675 if !(canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off)) { 6676 break 6677 } 6678 v.reset(OpAMD64MOVQstoreconst) 6679 v.AuxInt = ValAndOff(sc).add(off) 6680 v.Aux = mergeSym(sym1, sym2) 6681 v.AddArg(ptr) 6682 v.AddArg(mem) 6683 return true 6684 } 6685 // match: (MOVQstoreconst [x] {sym1} (LEAQ1 [off] {sym2} ptr idx) mem) 6686 // cond: canMergeSym(sym1, sym2) 6687 // result: (MOVQstoreconstidx1 [ValAndOff(x).add(off)] {mergeSym(sym1,sym2)} ptr idx mem) 6688 for { 6689 x := v.AuxInt 6690 sym1 := v.Aux 6691 v_0 := v.Args[0] 6692 if v_0.Op != OpAMD64LEAQ1 { 6693 break 6694 } 6695 off := v_0.AuxInt 6696 sym2 := v_0.Aux 6697 ptr := v_0.Args[0] 6698 idx := v_0.Args[1] 6699 mem := v.Args[1] 6700 if !(canMergeSym(sym1, sym2)) { 6701 break 6702 } 6703 v.reset(OpAMD64MOVQstoreconstidx1) 6704 v.AuxInt = ValAndOff(x).add(off) 6705 v.Aux = mergeSym(sym1, sym2) 6706 v.AddArg(ptr) 6707 v.AddArg(idx) 6708 v.AddArg(mem) 6709 return true 6710 } 6711 // match: (MOVQstoreconst [x] {sym1} (LEAQ8 [off] {sym2} ptr idx) mem) 6712 // cond: canMergeSym(sym1, sym2) 6713 // result: (MOVQstoreconstidx8 [ValAndOff(x).add(off)] {mergeSym(sym1,sym2)} ptr idx mem) 6714 for { 6715 x := v.AuxInt 6716 sym1 := v.Aux 6717 v_0 := v.Args[0] 6718 if v_0.Op != OpAMD64LEAQ8 { 6719 break 6720 } 6721 off := v_0.AuxInt 6722 sym2 := v_0.Aux 6723 ptr := v_0.Args[0] 6724 idx := v_0.Args[1] 6725 mem := v.Args[1] 6726 if !(canMergeSym(sym1, sym2)) { 6727 break 6728 } 6729 v.reset(OpAMD64MOVQstoreconstidx8) 6730 v.AuxInt = ValAndOff(x).add(off) 6731 v.Aux = mergeSym(sym1, sym2) 6732 v.AddArg(ptr) 6733 v.AddArg(idx) 6734 v.AddArg(mem) 6735 return true 6736 } 6737 // match: (MOVQstoreconst [x] {sym} (ADDQ ptr idx) mem) 6738 // cond: 6739 // result: (MOVQstoreconstidx1 [x] {sym} ptr idx mem) 6740 for { 6741 x := v.AuxInt 6742 sym := v.Aux 6743 v_0 := v.Args[0] 6744 if v_0.Op != OpAMD64ADDQ { 6745 break 6746 } 6747 ptr := v_0.Args[0] 6748 idx := v_0.Args[1] 6749 mem := v.Args[1] 6750 v.reset(OpAMD64MOVQstoreconstidx1) 6751 v.AuxInt = x 6752 v.Aux = sym 6753 v.AddArg(ptr) 6754 v.AddArg(idx) 6755 v.AddArg(mem) 6756 return true 6757 } 6758 // match: (MOVQstoreconst [sc] {sym1} (LEAL [off] {sym2} ptr) mem) 6759 // cond: canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off) 6760 // result: (MOVQstoreconst [ValAndOff(sc).add(off)] {mergeSym(sym1, sym2)} ptr mem) 6761 for { 6762 sc := v.AuxInt 6763 sym1 := v.Aux 6764 v_0 := v.Args[0] 6765 if v_0.Op != OpAMD64LEAL { 6766 break 6767 } 6768 off := v_0.AuxInt 6769 sym2 := v_0.Aux 6770 ptr := v_0.Args[0] 6771 mem := v.Args[1] 6772 if !(canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off)) { 6773 break 6774 } 6775 v.reset(OpAMD64MOVQstoreconst) 6776 v.AuxInt = ValAndOff(sc).add(off) 6777 v.Aux = mergeSym(sym1, sym2) 6778 v.AddArg(ptr) 6779 v.AddArg(mem) 6780 return true 6781 } 6782 // match: (MOVQstoreconst [sc] {s} (ADDLconst [off] ptr) mem) 6783 // cond: ValAndOff(sc).canAdd(off) 6784 // result: (MOVQstoreconst [ValAndOff(sc).add(off)] {s} ptr mem) 6785 for { 6786 sc := v.AuxInt 6787 s := v.Aux 6788 v_0 := v.Args[0] 6789 if v_0.Op != OpAMD64ADDLconst { 6790 break 6791 } 6792 off := v_0.AuxInt 6793 ptr := v_0.Args[0] 6794 mem := v.Args[1] 6795 if !(ValAndOff(sc).canAdd(off)) { 6796 break 6797 } 6798 v.reset(OpAMD64MOVQstoreconst) 6799 v.AuxInt = ValAndOff(sc).add(off) 6800 v.Aux = s 6801 v.AddArg(ptr) 6802 v.AddArg(mem) 6803 return true 6804 } 6805 return false 6806 } 6807 func rewriteValueAMD64_OpAMD64MOVQstoreconstidx1(v *Value, config *Config) bool { 6808 b := v.Block 6809 _ = b 6810 // match: (MOVQstoreconstidx1 [c] {sym} ptr (SHLQconst [3] idx) mem) 6811 // cond: 6812 // result: (MOVQstoreconstidx8 [c] {sym} ptr idx mem) 6813 for { 6814 c := v.AuxInt 6815 sym := v.Aux 6816 ptr := v.Args[0] 6817 v_1 := v.Args[1] 6818 if v_1.Op != OpAMD64SHLQconst { 6819 break 6820 } 6821 if v_1.AuxInt != 3 { 6822 break 6823 } 6824 idx := v_1.Args[0] 6825 mem := v.Args[2] 6826 v.reset(OpAMD64MOVQstoreconstidx8) 6827 v.AuxInt = c 6828 v.Aux = sym 6829 v.AddArg(ptr) 6830 v.AddArg(idx) 6831 v.AddArg(mem) 6832 return true 6833 } 6834 // match: (MOVQstoreconstidx1 [x] {sym} (ADDQconst [c] ptr) idx mem) 6835 // cond: 6836 // result: (MOVQstoreconstidx1 [ValAndOff(x).add(c)] {sym} ptr idx mem) 6837 for { 6838 x := v.AuxInt 6839 sym := v.Aux 6840 v_0 := v.Args[0] 6841 if v_0.Op != OpAMD64ADDQconst { 6842 break 6843 } 6844 c := v_0.AuxInt 6845 ptr := v_0.Args[0] 6846 idx := v.Args[1] 6847 mem := v.Args[2] 6848 v.reset(OpAMD64MOVQstoreconstidx1) 6849 v.AuxInt = ValAndOff(x).add(c) 6850 v.Aux = sym 6851 v.AddArg(ptr) 6852 v.AddArg(idx) 6853 v.AddArg(mem) 6854 return true 6855 } 6856 // match: (MOVQstoreconstidx1 [x] {sym} ptr (ADDQconst [c] idx) mem) 6857 // cond: 6858 // result: (MOVQstoreconstidx1 [ValAndOff(x).add(c)] {sym} ptr idx mem) 6859 for { 6860 x := v.AuxInt 6861 sym := v.Aux 6862 ptr := v.Args[0] 6863 v_1 := v.Args[1] 6864 if v_1.Op != OpAMD64ADDQconst { 6865 break 6866 } 6867 c := v_1.AuxInt 6868 idx := v_1.Args[0] 6869 mem := v.Args[2] 6870 v.reset(OpAMD64MOVQstoreconstidx1) 6871 v.AuxInt = ValAndOff(x).add(c) 6872 v.Aux = sym 6873 v.AddArg(ptr) 6874 v.AddArg(idx) 6875 v.AddArg(mem) 6876 return true 6877 } 6878 return false 6879 } 6880 func rewriteValueAMD64_OpAMD64MOVQstoreconstidx8(v *Value, config *Config) bool { 6881 b := v.Block 6882 _ = b 6883 // match: (MOVQstoreconstidx8 [x] {sym} (ADDQconst [c] ptr) idx mem) 6884 // cond: 6885 // result: (MOVQstoreconstidx8 [ValAndOff(x).add(c)] {sym} ptr idx mem) 6886 for { 6887 x := v.AuxInt 6888 sym := v.Aux 6889 v_0 := v.Args[0] 6890 if v_0.Op != OpAMD64ADDQconst { 6891 break 6892 } 6893 c := v_0.AuxInt 6894 ptr := v_0.Args[0] 6895 idx := v.Args[1] 6896 mem := v.Args[2] 6897 v.reset(OpAMD64MOVQstoreconstidx8) 6898 v.AuxInt = ValAndOff(x).add(c) 6899 v.Aux = sym 6900 v.AddArg(ptr) 6901 v.AddArg(idx) 6902 v.AddArg(mem) 6903 return true 6904 } 6905 // match: (MOVQstoreconstidx8 [x] {sym} ptr (ADDQconst [c] idx) mem) 6906 // cond: 6907 // result: (MOVQstoreconstidx8 [ValAndOff(x).add(8*c)] {sym} ptr idx mem) 6908 for { 6909 x := v.AuxInt 6910 sym := v.Aux 6911 ptr := v.Args[0] 6912 v_1 := v.Args[1] 6913 if v_1.Op != OpAMD64ADDQconst { 6914 break 6915 } 6916 c := v_1.AuxInt 6917 idx := v_1.Args[0] 6918 mem := v.Args[2] 6919 v.reset(OpAMD64MOVQstoreconstidx8) 6920 v.AuxInt = ValAndOff(x).add(8 * c) 6921 v.Aux = sym 6922 v.AddArg(ptr) 6923 v.AddArg(idx) 6924 v.AddArg(mem) 6925 return true 6926 } 6927 return false 6928 } 6929 func rewriteValueAMD64_OpAMD64MOVQstoreidx1(v *Value, config *Config) bool { 6930 b := v.Block 6931 _ = b 6932 // match: (MOVQstoreidx1 [c] {sym} ptr (SHLQconst [3] idx) val mem) 6933 // cond: 6934 // result: (MOVQstoreidx8 [c] {sym} ptr idx val mem) 6935 for { 6936 c := v.AuxInt 6937 sym := v.Aux 6938 ptr := v.Args[0] 6939 v_1 := v.Args[1] 6940 if v_1.Op != OpAMD64SHLQconst { 6941 break 6942 } 6943 if v_1.AuxInt != 3 { 6944 break 6945 } 6946 idx := v_1.Args[0] 6947 val := v.Args[2] 6948 mem := v.Args[3] 6949 v.reset(OpAMD64MOVQstoreidx8) 6950 v.AuxInt = c 6951 v.Aux = sym 6952 v.AddArg(ptr) 6953 v.AddArg(idx) 6954 v.AddArg(val) 6955 v.AddArg(mem) 6956 return true 6957 } 6958 // match: (MOVQstoreidx1 [c] {sym} (ADDQconst [d] ptr) idx val mem) 6959 // cond: 6960 // result: (MOVQstoreidx1 [c+d] {sym} ptr idx val mem) 6961 for { 6962 c := v.AuxInt 6963 sym := v.Aux 6964 v_0 := v.Args[0] 6965 if v_0.Op != OpAMD64ADDQconst { 6966 break 6967 } 6968 d := v_0.AuxInt 6969 ptr := v_0.Args[0] 6970 idx := v.Args[1] 6971 val := v.Args[2] 6972 mem := v.Args[3] 6973 v.reset(OpAMD64MOVQstoreidx1) 6974 v.AuxInt = c + d 6975 v.Aux = sym 6976 v.AddArg(ptr) 6977 v.AddArg(idx) 6978 v.AddArg(val) 6979 v.AddArg(mem) 6980 return true 6981 } 6982 // match: (MOVQstoreidx1 [c] {sym} ptr (ADDQconst [d] idx) val mem) 6983 // cond: 6984 // result: (MOVQstoreidx1 [c+d] {sym} ptr idx val mem) 6985 for { 6986 c := v.AuxInt 6987 sym := v.Aux 6988 ptr := v.Args[0] 6989 v_1 := v.Args[1] 6990 if v_1.Op != OpAMD64ADDQconst { 6991 break 6992 } 6993 d := v_1.AuxInt 6994 idx := v_1.Args[0] 6995 val := v.Args[2] 6996 mem := v.Args[3] 6997 v.reset(OpAMD64MOVQstoreidx1) 6998 v.AuxInt = c + d 6999 v.Aux = sym 7000 v.AddArg(ptr) 7001 v.AddArg(idx) 7002 v.AddArg(val) 7003 v.AddArg(mem) 7004 return true 7005 } 7006 return false 7007 } 7008 func rewriteValueAMD64_OpAMD64MOVQstoreidx8(v *Value, config *Config) bool { 7009 b := v.Block 7010 _ = b 7011 // match: (MOVQstoreidx8 [c] {sym} (ADDQconst [d] ptr) idx val mem) 7012 // cond: 7013 // result: (MOVQstoreidx8 [c+d] {sym} ptr idx val mem) 7014 for { 7015 c := v.AuxInt 7016 sym := v.Aux 7017 v_0 := v.Args[0] 7018 if v_0.Op != OpAMD64ADDQconst { 7019 break 7020 } 7021 d := v_0.AuxInt 7022 ptr := v_0.Args[0] 7023 idx := v.Args[1] 7024 val := v.Args[2] 7025 mem := v.Args[3] 7026 v.reset(OpAMD64MOVQstoreidx8) 7027 v.AuxInt = c + d 7028 v.Aux = sym 7029 v.AddArg(ptr) 7030 v.AddArg(idx) 7031 v.AddArg(val) 7032 v.AddArg(mem) 7033 return true 7034 } 7035 // match: (MOVQstoreidx8 [c] {sym} ptr (ADDQconst [d] idx) val mem) 7036 // cond: 7037 // result: (MOVQstoreidx8 [c+8*d] {sym} ptr idx val mem) 7038 for { 7039 c := v.AuxInt 7040 sym := v.Aux 7041 ptr := v.Args[0] 7042 v_1 := v.Args[1] 7043 if v_1.Op != OpAMD64ADDQconst { 7044 break 7045 } 7046 d := v_1.AuxInt 7047 idx := v_1.Args[0] 7048 val := v.Args[2] 7049 mem := v.Args[3] 7050 v.reset(OpAMD64MOVQstoreidx8) 7051 v.AuxInt = c + 8*d 7052 v.Aux = sym 7053 v.AddArg(ptr) 7054 v.AddArg(idx) 7055 v.AddArg(val) 7056 v.AddArg(mem) 7057 return true 7058 } 7059 return false 7060 } 7061 func rewriteValueAMD64_OpAMD64MOVSDload(v *Value, config *Config) bool { 7062 b := v.Block 7063 _ = b 7064 // match: (MOVSDload [off1] {sym} (ADDQconst [off2] ptr) mem) 7065 // cond: is32Bit(off1+off2) 7066 // result: (MOVSDload [off1+off2] {sym} ptr mem) 7067 for { 7068 off1 := v.AuxInt 7069 sym := v.Aux 7070 v_0 := v.Args[0] 7071 if v_0.Op != OpAMD64ADDQconst { 7072 break 7073 } 7074 off2 := v_0.AuxInt 7075 ptr := v_0.Args[0] 7076 mem := v.Args[1] 7077 if !(is32Bit(off1 + off2)) { 7078 break 7079 } 7080 v.reset(OpAMD64MOVSDload) 7081 v.AuxInt = off1 + off2 7082 v.Aux = sym 7083 v.AddArg(ptr) 7084 v.AddArg(mem) 7085 return true 7086 } 7087 // match: (MOVSDload [off1] {sym1} (LEAQ [off2] {sym2} base) mem) 7088 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 7089 // result: (MOVSDload [off1+off2] {mergeSym(sym1,sym2)} base mem) 7090 for { 7091 off1 := v.AuxInt 7092 sym1 := v.Aux 7093 v_0 := v.Args[0] 7094 if v_0.Op != OpAMD64LEAQ { 7095 break 7096 } 7097 off2 := v_0.AuxInt 7098 sym2 := v_0.Aux 7099 base := v_0.Args[0] 7100 mem := v.Args[1] 7101 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 7102 break 7103 } 7104 v.reset(OpAMD64MOVSDload) 7105 v.AuxInt = off1 + off2 7106 v.Aux = mergeSym(sym1, sym2) 7107 v.AddArg(base) 7108 v.AddArg(mem) 7109 return true 7110 } 7111 // match: (MOVSDload [off1] {sym1} (LEAQ1 [off2] {sym2} ptr idx) mem) 7112 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 7113 // result: (MOVSDloadidx1 [off1+off2] {mergeSym(sym1,sym2)} ptr idx mem) 7114 for { 7115 off1 := v.AuxInt 7116 sym1 := v.Aux 7117 v_0 := v.Args[0] 7118 if v_0.Op != OpAMD64LEAQ1 { 7119 break 7120 } 7121 off2 := v_0.AuxInt 7122 sym2 := v_0.Aux 7123 ptr := v_0.Args[0] 7124 idx := v_0.Args[1] 7125 mem := v.Args[1] 7126 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 7127 break 7128 } 7129 v.reset(OpAMD64MOVSDloadidx1) 7130 v.AuxInt = off1 + off2 7131 v.Aux = mergeSym(sym1, sym2) 7132 v.AddArg(ptr) 7133 v.AddArg(idx) 7134 v.AddArg(mem) 7135 return true 7136 } 7137 // match: (MOVSDload [off1] {sym1} (LEAQ8 [off2] {sym2} ptr idx) mem) 7138 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 7139 // result: (MOVSDloadidx8 [off1+off2] {mergeSym(sym1,sym2)} ptr idx mem) 7140 for { 7141 off1 := v.AuxInt 7142 sym1 := v.Aux 7143 v_0 := v.Args[0] 7144 if v_0.Op != OpAMD64LEAQ8 { 7145 break 7146 } 7147 off2 := v_0.AuxInt 7148 sym2 := v_0.Aux 7149 ptr := v_0.Args[0] 7150 idx := v_0.Args[1] 7151 mem := v.Args[1] 7152 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 7153 break 7154 } 7155 v.reset(OpAMD64MOVSDloadidx8) 7156 v.AuxInt = off1 + off2 7157 v.Aux = mergeSym(sym1, sym2) 7158 v.AddArg(ptr) 7159 v.AddArg(idx) 7160 v.AddArg(mem) 7161 return true 7162 } 7163 // match: (MOVSDload [off] {sym} (ADDQ ptr idx) mem) 7164 // cond: ptr.Op != OpSB 7165 // result: (MOVSDloadidx1 [off] {sym} ptr idx mem) 7166 for { 7167 off := v.AuxInt 7168 sym := v.Aux 7169 v_0 := v.Args[0] 7170 if v_0.Op != OpAMD64ADDQ { 7171 break 7172 } 7173 ptr := v_0.Args[0] 7174 idx := v_0.Args[1] 7175 mem := v.Args[1] 7176 if !(ptr.Op != OpSB) { 7177 break 7178 } 7179 v.reset(OpAMD64MOVSDloadidx1) 7180 v.AuxInt = off 7181 v.Aux = sym 7182 v.AddArg(ptr) 7183 v.AddArg(idx) 7184 v.AddArg(mem) 7185 return true 7186 } 7187 return false 7188 } 7189 func rewriteValueAMD64_OpAMD64MOVSDloadidx1(v *Value, config *Config) bool { 7190 b := v.Block 7191 _ = b 7192 // match: (MOVSDloadidx1 [c] {sym} (ADDQconst [d] ptr) idx mem) 7193 // cond: 7194 // result: (MOVSDloadidx1 [c+d] {sym} ptr idx mem) 7195 for { 7196 c := v.AuxInt 7197 sym := v.Aux 7198 v_0 := v.Args[0] 7199 if v_0.Op != OpAMD64ADDQconst { 7200 break 7201 } 7202 d := v_0.AuxInt 7203 ptr := v_0.Args[0] 7204 idx := v.Args[1] 7205 mem := v.Args[2] 7206 v.reset(OpAMD64MOVSDloadidx1) 7207 v.AuxInt = c + d 7208 v.Aux = sym 7209 v.AddArg(ptr) 7210 v.AddArg(idx) 7211 v.AddArg(mem) 7212 return true 7213 } 7214 // match: (MOVSDloadidx1 [c] {sym} ptr (ADDQconst [d] idx) mem) 7215 // cond: 7216 // result: (MOVSDloadidx1 [c+d] {sym} ptr idx mem) 7217 for { 7218 c := v.AuxInt 7219 sym := v.Aux 7220 ptr := v.Args[0] 7221 v_1 := v.Args[1] 7222 if v_1.Op != OpAMD64ADDQconst { 7223 break 7224 } 7225 d := v_1.AuxInt 7226 idx := v_1.Args[0] 7227 mem := v.Args[2] 7228 v.reset(OpAMD64MOVSDloadidx1) 7229 v.AuxInt = c + d 7230 v.Aux = sym 7231 v.AddArg(ptr) 7232 v.AddArg(idx) 7233 v.AddArg(mem) 7234 return true 7235 } 7236 return false 7237 } 7238 func rewriteValueAMD64_OpAMD64MOVSDloadidx8(v *Value, config *Config) bool { 7239 b := v.Block 7240 _ = b 7241 // match: (MOVSDloadidx8 [c] {sym} (ADDQconst [d] ptr) idx mem) 7242 // cond: 7243 // result: (MOVSDloadidx8 [c+d] {sym} ptr idx mem) 7244 for { 7245 c := v.AuxInt 7246 sym := v.Aux 7247 v_0 := v.Args[0] 7248 if v_0.Op != OpAMD64ADDQconst { 7249 break 7250 } 7251 d := v_0.AuxInt 7252 ptr := v_0.Args[0] 7253 idx := v.Args[1] 7254 mem := v.Args[2] 7255 v.reset(OpAMD64MOVSDloadidx8) 7256 v.AuxInt = c + d 7257 v.Aux = sym 7258 v.AddArg(ptr) 7259 v.AddArg(idx) 7260 v.AddArg(mem) 7261 return true 7262 } 7263 // match: (MOVSDloadidx8 [c] {sym} ptr (ADDQconst [d] idx) mem) 7264 // cond: 7265 // result: (MOVSDloadidx8 [c+8*d] {sym} ptr idx mem) 7266 for { 7267 c := v.AuxInt 7268 sym := v.Aux 7269 ptr := v.Args[0] 7270 v_1 := v.Args[1] 7271 if v_1.Op != OpAMD64ADDQconst { 7272 break 7273 } 7274 d := v_1.AuxInt 7275 idx := v_1.Args[0] 7276 mem := v.Args[2] 7277 v.reset(OpAMD64MOVSDloadidx8) 7278 v.AuxInt = c + 8*d 7279 v.Aux = sym 7280 v.AddArg(ptr) 7281 v.AddArg(idx) 7282 v.AddArg(mem) 7283 return true 7284 } 7285 return false 7286 } 7287 func rewriteValueAMD64_OpAMD64MOVSDstore(v *Value, config *Config) bool { 7288 b := v.Block 7289 _ = b 7290 // match: (MOVSDstore [off1] {sym} (ADDQconst [off2] ptr) val mem) 7291 // cond: is32Bit(off1+off2) 7292 // result: (MOVSDstore [off1+off2] {sym} ptr val mem) 7293 for { 7294 off1 := v.AuxInt 7295 sym := v.Aux 7296 v_0 := v.Args[0] 7297 if v_0.Op != OpAMD64ADDQconst { 7298 break 7299 } 7300 off2 := v_0.AuxInt 7301 ptr := v_0.Args[0] 7302 val := v.Args[1] 7303 mem := v.Args[2] 7304 if !(is32Bit(off1 + off2)) { 7305 break 7306 } 7307 v.reset(OpAMD64MOVSDstore) 7308 v.AuxInt = off1 + off2 7309 v.Aux = sym 7310 v.AddArg(ptr) 7311 v.AddArg(val) 7312 v.AddArg(mem) 7313 return true 7314 } 7315 // match: (MOVSDstore [off1] {sym1} (LEAQ [off2] {sym2} base) val mem) 7316 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 7317 // result: (MOVSDstore [off1+off2] {mergeSym(sym1,sym2)} base val mem) 7318 for { 7319 off1 := v.AuxInt 7320 sym1 := v.Aux 7321 v_0 := v.Args[0] 7322 if v_0.Op != OpAMD64LEAQ { 7323 break 7324 } 7325 off2 := v_0.AuxInt 7326 sym2 := v_0.Aux 7327 base := v_0.Args[0] 7328 val := v.Args[1] 7329 mem := v.Args[2] 7330 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 7331 break 7332 } 7333 v.reset(OpAMD64MOVSDstore) 7334 v.AuxInt = off1 + off2 7335 v.Aux = mergeSym(sym1, sym2) 7336 v.AddArg(base) 7337 v.AddArg(val) 7338 v.AddArg(mem) 7339 return true 7340 } 7341 // match: (MOVSDstore [off1] {sym1} (LEAQ1 [off2] {sym2} ptr idx) val mem) 7342 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 7343 // result: (MOVSDstoreidx1 [off1+off2] {mergeSym(sym1,sym2)} ptr idx val mem) 7344 for { 7345 off1 := v.AuxInt 7346 sym1 := v.Aux 7347 v_0 := v.Args[0] 7348 if v_0.Op != OpAMD64LEAQ1 { 7349 break 7350 } 7351 off2 := v_0.AuxInt 7352 sym2 := v_0.Aux 7353 ptr := v_0.Args[0] 7354 idx := v_0.Args[1] 7355 val := v.Args[1] 7356 mem := v.Args[2] 7357 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 7358 break 7359 } 7360 v.reset(OpAMD64MOVSDstoreidx1) 7361 v.AuxInt = off1 + off2 7362 v.Aux = mergeSym(sym1, sym2) 7363 v.AddArg(ptr) 7364 v.AddArg(idx) 7365 v.AddArg(val) 7366 v.AddArg(mem) 7367 return true 7368 } 7369 // match: (MOVSDstore [off1] {sym1} (LEAQ8 [off2] {sym2} ptr idx) val mem) 7370 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 7371 // result: (MOVSDstoreidx8 [off1+off2] {mergeSym(sym1,sym2)} ptr idx val mem) 7372 for { 7373 off1 := v.AuxInt 7374 sym1 := v.Aux 7375 v_0 := v.Args[0] 7376 if v_0.Op != OpAMD64LEAQ8 { 7377 break 7378 } 7379 off2 := v_0.AuxInt 7380 sym2 := v_0.Aux 7381 ptr := v_0.Args[0] 7382 idx := v_0.Args[1] 7383 val := v.Args[1] 7384 mem := v.Args[2] 7385 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 7386 break 7387 } 7388 v.reset(OpAMD64MOVSDstoreidx8) 7389 v.AuxInt = off1 + off2 7390 v.Aux = mergeSym(sym1, sym2) 7391 v.AddArg(ptr) 7392 v.AddArg(idx) 7393 v.AddArg(val) 7394 v.AddArg(mem) 7395 return true 7396 } 7397 // match: (MOVSDstore [off] {sym} (ADDQ ptr idx) val mem) 7398 // cond: ptr.Op != OpSB 7399 // result: (MOVSDstoreidx1 [off] {sym} ptr idx val mem) 7400 for { 7401 off := v.AuxInt 7402 sym := v.Aux 7403 v_0 := v.Args[0] 7404 if v_0.Op != OpAMD64ADDQ { 7405 break 7406 } 7407 ptr := v_0.Args[0] 7408 idx := v_0.Args[1] 7409 val := v.Args[1] 7410 mem := v.Args[2] 7411 if !(ptr.Op != OpSB) { 7412 break 7413 } 7414 v.reset(OpAMD64MOVSDstoreidx1) 7415 v.AuxInt = off 7416 v.Aux = sym 7417 v.AddArg(ptr) 7418 v.AddArg(idx) 7419 v.AddArg(val) 7420 v.AddArg(mem) 7421 return true 7422 } 7423 return false 7424 } 7425 func rewriteValueAMD64_OpAMD64MOVSDstoreidx1(v *Value, config *Config) bool { 7426 b := v.Block 7427 _ = b 7428 // match: (MOVSDstoreidx1 [c] {sym} (ADDQconst [d] ptr) idx val mem) 7429 // cond: 7430 // result: (MOVSDstoreidx1 [c+d] {sym} ptr idx val mem) 7431 for { 7432 c := v.AuxInt 7433 sym := v.Aux 7434 v_0 := v.Args[0] 7435 if v_0.Op != OpAMD64ADDQconst { 7436 break 7437 } 7438 d := v_0.AuxInt 7439 ptr := v_0.Args[0] 7440 idx := v.Args[1] 7441 val := v.Args[2] 7442 mem := v.Args[3] 7443 v.reset(OpAMD64MOVSDstoreidx1) 7444 v.AuxInt = c + d 7445 v.Aux = sym 7446 v.AddArg(ptr) 7447 v.AddArg(idx) 7448 v.AddArg(val) 7449 v.AddArg(mem) 7450 return true 7451 } 7452 // match: (MOVSDstoreidx1 [c] {sym} ptr (ADDQconst [d] idx) val mem) 7453 // cond: 7454 // result: (MOVSDstoreidx1 [c+d] {sym} ptr idx val mem) 7455 for { 7456 c := v.AuxInt 7457 sym := v.Aux 7458 ptr := v.Args[0] 7459 v_1 := v.Args[1] 7460 if v_1.Op != OpAMD64ADDQconst { 7461 break 7462 } 7463 d := v_1.AuxInt 7464 idx := v_1.Args[0] 7465 val := v.Args[2] 7466 mem := v.Args[3] 7467 v.reset(OpAMD64MOVSDstoreidx1) 7468 v.AuxInt = c + d 7469 v.Aux = sym 7470 v.AddArg(ptr) 7471 v.AddArg(idx) 7472 v.AddArg(val) 7473 v.AddArg(mem) 7474 return true 7475 } 7476 return false 7477 } 7478 func rewriteValueAMD64_OpAMD64MOVSDstoreidx8(v *Value, config *Config) bool { 7479 b := v.Block 7480 _ = b 7481 // match: (MOVSDstoreidx8 [c] {sym} (ADDQconst [d] ptr) idx val mem) 7482 // cond: 7483 // result: (MOVSDstoreidx8 [c+d] {sym} ptr idx val mem) 7484 for { 7485 c := v.AuxInt 7486 sym := v.Aux 7487 v_0 := v.Args[0] 7488 if v_0.Op != OpAMD64ADDQconst { 7489 break 7490 } 7491 d := v_0.AuxInt 7492 ptr := v_0.Args[0] 7493 idx := v.Args[1] 7494 val := v.Args[2] 7495 mem := v.Args[3] 7496 v.reset(OpAMD64MOVSDstoreidx8) 7497 v.AuxInt = c + d 7498 v.Aux = sym 7499 v.AddArg(ptr) 7500 v.AddArg(idx) 7501 v.AddArg(val) 7502 v.AddArg(mem) 7503 return true 7504 } 7505 // match: (MOVSDstoreidx8 [c] {sym} ptr (ADDQconst [d] idx) val mem) 7506 // cond: 7507 // result: (MOVSDstoreidx8 [c+8*d] {sym} ptr idx val mem) 7508 for { 7509 c := v.AuxInt 7510 sym := v.Aux 7511 ptr := v.Args[0] 7512 v_1 := v.Args[1] 7513 if v_1.Op != OpAMD64ADDQconst { 7514 break 7515 } 7516 d := v_1.AuxInt 7517 idx := v_1.Args[0] 7518 val := v.Args[2] 7519 mem := v.Args[3] 7520 v.reset(OpAMD64MOVSDstoreidx8) 7521 v.AuxInt = c + 8*d 7522 v.Aux = sym 7523 v.AddArg(ptr) 7524 v.AddArg(idx) 7525 v.AddArg(val) 7526 v.AddArg(mem) 7527 return true 7528 } 7529 return false 7530 } 7531 func rewriteValueAMD64_OpAMD64MOVSSload(v *Value, config *Config) bool { 7532 b := v.Block 7533 _ = b 7534 // match: (MOVSSload [off1] {sym} (ADDQconst [off2] ptr) mem) 7535 // cond: is32Bit(off1+off2) 7536 // result: (MOVSSload [off1+off2] {sym} ptr mem) 7537 for { 7538 off1 := v.AuxInt 7539 sym := v.Aux 7540 v_0 := v.Args[0] 7541 if v_0.Op != OpAMD64ADDQconst { 7542 break 7543 } 7544 off2 := v_0.AuxInt 7545 ptr := v_0.Args[0] 7546 mem := v.Args[1] 7547 if !(is32Bit(off1 + off2)) { 7548 break 7549 } 7550 v.reset(OpAMD64MOVSSload) 7551 v.AuxInt = off1 + off2 7552 v.Aux = sym 7553 v.AddArg(ptr) 7554 v.AddArg(mem) 7555 return true 7556 } 7557 // match: (MOVSSload [off1] {sym1} (LEAQ [off2] {sym2} base) mem) 7558 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 7559 // result: (MOVSSload [off1+off2] {mergeSym(sym1,sym2)} base mem) 7560 for { 7561 off1 := v.AuxInt 7562 sym1 := v.Aux 7563 v_0 := v.Args[0] 7564 if v_0.Op != OpAMD64LEAQ { 7565 break 7566 } 7567 off2 := v_0.AuxInt 7568 sym2 := v_0.Aux 7569 base := v_0.Args[0] 7570 mem := v.Args[1] 7571 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 7572 break 7573 } 7574 v.reset(OpAMD64MOVSSload) 7575 v.AuxInt = off1 + off2 7576 v.Aux = mergeSym(sym1, sym2) 7577 v.AddArg(base) 7578 v.AddArg(mem) 7579 return true 7580 } 7581 // match: (MOVSSload [off1] {sym1} (LEAQ1 [off2] {sym2} ptr idx) mem) 7582 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 7583 // result: (MOVSSloadidx1 [off1+off2] {mergeSym(sym1,sym2)} ptr idx mem) 7584 for { 7585 off1 := v.AuxInt 7586 sym1 := v.Aux 7587 v_0 := v.Args[0] 7588 if v_0.Op != OpAMD64LEAQ1 { 7589 break 7590 } 7591 off2 := v_0.AuxInt 7592 sym2 := v_0.Aux 7593 ptr := v_0.Args[0] 7594 idx := v_0.Args[1] 7595 mem := v.Args[1] 7596 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 7597 break 7598 } 7599 v.reset(OpAMD64MOVSSloadidx1) 7600 v.AuxInt = off1 + off2 7601 v.Aux = mergeSym(sym1, sym2) 7602 v.AddArg(ptr) 7603 v.AddArg(idx) 7604 v.AddArg(mem) 7605 return true 7606 } 7607 // match: (MOVSSload [off1] {sym1} (LEAQ4 [off2] {sym2} ptr idx) mem) 7608 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 7609 // result: (MOVSSloadidx4 [off1+off2] {mergeSym(sym1,sym2)} ptr idx mem) 7610 for { 7611 off1 := v.AuxInt 7612 sym1 := v.Aux 7613 v_0 := v.Args[0] 7614 if v_0.Op != OpAMD64LEAQ4 { 7615 break 7616 } 7617 off2 := v_0.AuxInt 7618 sym2 := v_0.Aux 7619 ptr := v_0.Args[0] 7620 idx := v_0.Args[1] 7621 mem := v.Args[1] 7622 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 7623 break 7624 } 7625 v.reset(OpAMD64MOVSSloadidx4) 7626 v.AuxInt = off1 + off2 7627 v.Aux = mergeSym(sym1, sym2) 7628 v.AddArg(ptr) 7629 v.AddArg(idx) 7630 v.AddArg(mem) 7631 return true 7632 } 7633 // match: (MOVSSload [off] {sym} (ADDQ ptr idx) mem) 7634 // cond: ptr.Op != OpSB 7635 // result: (MOVSSloadidx1 [off] {sym} ptr idx mem) 7636 for { 7637 off := v.AuxInt 7638 sym := v.Aux 7639 v_0 := v.Args[0] 7640 if v_0.Op != OpAMD64ADDQ { 7641 break 7642 } 7643 ptr := v_0.Args[0] 7644 idx := v_0.Args[1] 7645 mem := v.Args[1] 7646 if !(ptr.Op != OpSB) { 7647 break 7648 } 7649 v.reset(OpAMD64MOVSSloadidx1) 7650 v.AuxInt = off 7651 v.Aux = sym 7652 v.AddArg(ptr) 7653 v.AddArg(idx) 7654 v.AddArg(mem) 7655 return true 7656 } 7657 return false 7658 } 7659 func rewriteValueAMD64_OpAMD64MOVSSloadidx1(v *Value, config *Config) bool { 7660 b := v.Block 7661 _ = b 7662 // match: (MOVSSloadidx1 [c] {sym} (ADDQconst [d] ptr) idx mem) 7663 // cond: 7664 // result: (MOVSSloadidx1 [c+d] {sym} ptr idx mem) 7665 for { 7666 c := v.AuxInt 7667 sym := v.Aux 7668 v_0 := v.Args[0] 7669 if v_0.Op != OpAMD64ADDQconst { 7670 break 7671 } 7672 d := v_0.AuxInt 7673 ptr := v_0.Args[0] 7674 idx := v.Args[1] 7675 mem := v.Args[2] 7676 v.reset(OpAMD64MOVSSloadidx1) 7677 v.AuxInt = c + d 7678 v.Aux = sym 7679 v.AddArg(ptr) 7680 v.AddArg(idx) 7681 v.AddArg(mem) 7682 return true 7683 } 7684 // match: (MOVSSloadidx1 [c] {sym} ptr (ADDQconst [d] idx) mem) 7685 // cond: 7686 // result: (MOVSSloadidx1 [c+d] {sym} ptr idx mem) 7687 for { 7688 c := v.AuxInt 7689 sym := v.Aux 7690 ptr := v.Args[0] 7691 v_1 := v.Args[1] 7692 if v_1.Op != OpAMD64ADDQconst { 7693 break 7694 } 7695 d := v_1.AuxInt 7696 idx := v_1.Args[0] 7697 mem := v.Args[2] 7698 v.reset(OpAMD64MOVSSloadidx1) 7699 v.AuxInt = c + d 7700 v.Aux = sym 7701 v.AddArg(ptr) 7702 v.AddArg(idx) 7703 v.AddArg(mem) 7704 return true 7705 } 7706 return false 7707 } 7708 func rewriteValueAMD64_OpAMD64MOVSSloadidx4(v *Value, config *Config) bool { 7709 b := v.Block 7710 _ = b 7711 // match: (MOVSSloadidx4 [c] {sym} (ADDQconst [d] ptr) idx mem) 7712 // cond: 7713 // result: (MOVSSloadidx4 [c+d] {sym} ptr idx mem) 7714 for { 7715 c := v.AuxInt 7716 sym := v.Aux 7717 v_0 := v.Args[0] 7718 if v_0.Op != OpAMD64ADDQconst { 7719 break 7720 } 7721 d := v_0.AuxInt 7722 ptr := v_0.Args[0] 7723 idx := v.Args[1] 7724 mem := v.Args[2] 7725 v.reset(OpAMD64MOVSSloadidx4) 7726 v.AuxInt = c + d 7727 v.Aux = sym 7728 v.AddArg(ptr) 7729 v.AddArg(idx) 7730 v.AddArg(mem) 7731 return true 7732 } 7733 // match: (MOVSSloadidx4 [c] {sym} ptr (ADDQconst [d] idx) mem) 7734 // cond: 7735 // result: (MOVSSloadidx4 [c+4*d] {sym} ptr idx mem) 7736 for { 7737 c := v.AuxInt 7738 sym := v.Aux 7739 ptr := v.Args[0] 7740 v_1 := v.Args[1] 7741 if v_1.Op != OpAMD64ADDQconst { 7742 break 7743 } 7744 d := v_1.AuxInt 7745 idx := v_1.Args[0] 7746 mem := v.Args[2] 7747 v.reset(OpAMD64MOVSSloadidx4) 7748 v.AuxInt = c + 4*d 7749 v.Aux = sym 7750 v.AddArg(ptr) 7751 v.AddArg(idx) 7752 v.AddArg(mem) 7753 return true 7754 } 7755 return false 7756 } 7757 func rewriteValueAMD64_OpAMD64MOVSSstore(v *Value, config *Config) bool { 7758 b := v.Block 7759 _ = b 7760 // match: (MOVSSstore [off1] {sym} (ADDQconst [off2] ptr) val mem) 7761 // cond: is32Bit(off1+off2) 7762 // result: (MOVSSstore [off1+off2] {sym} ptr val mem) 7763 for { 7764 off1 := v.AuxInt 7765 sym := v.Aux 7766 v_0 := v.Args[0] 7767 if v_0.Op != OpAMD64ADDQconst { 7768 break 7769 } 7770 off2 := v_0.AuxInt 7771 ptr := v_0.Args[0] 7772 val := v.Args[1] 7773 mem := v.Args[2] 7774 if !(is32Bit(off1 + off2)) { 7775 break 7776 } 7777 v.reset(OpAMD64MOVSSstore) 7778 v.AuxInt = off1 + off2 7779 v.Aux = sym 7780 v.AddArg(ptr) 7781 v.AddArg(val) 7782 v.AddArg(mem) 7783 return true 7784 } 7785 // match: (MOVSSstore [off1] {sym1} (LEAQ [off2] {sym2} base) val mem) 7786 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 7787 // result: (MOVSSstore [off1+off2] {mergeSym(sym1,sym2)} base val mem) 7788 for { 7789 off1 := v.AuxInt 7790 sym1 := v.Aux 7791 v_0 := v.Args[0] 7792 if v_0.Op != OpAMD64LEAQ { 7793 break 7794 } 7795 off2 := v_0.AuxInt 7796 sym2 := v_0.Aux 7797 base := v_0.Args[0] 7798 val := v.Args[1] 7799 mem := v.Args[2] 7800 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 7801 break 7802 } 7803 v.reset(OpAMD64MOVSSstore) 7804 v.AuxInt = off1 + off2 7805 v.Aux = mergeSym(sym1, sym2) 7806 v.AddArg(base) 7807 v.AddArg(val) 7808 v.AddArg(mem) 7809 return true 7810 } 7811 // match: (MOVSSstore [off1] {sym1} (LEAQ1 [off2] {sym2} ptr idx) val mem) 7812 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 7813 // result: (MOVSSstoreidx1 [off1+off2] {mergeSym(sym1,sym2)} ptr idx val mem) 7814 for { 7815 off1 := v.AuxInt 7816 sym1 := v.Aux 7817 v_0 := v.Args[0] 7818 if v_0.Op != OpAMD64LEAQ1 { 7819 break 7820 } 7821 off2 := v_0.AuxInt 7822 sym2 := v_0.Aux 7823 ptr := v_0.Args[0] 7824 idx := v_0.Args[1] 7825 val := v.Args[1] 7826 mem := v.Args[2] 7827 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 7828 break 7829 } 7830 v.reset(OpAMD64MOVSSstoreidx1) 7831 v.AuxInt = off1 + off2 7832 v.Aux = mergeSym(sym1, sym2) 7833 v.AddArg(ptr) 7834 v.AddArg(idx) 7835 v.AddArg(val) 7836 v.AddArg(mem) 7837 return true 7838 } 7839 // match: (MOVSSstore [off1] {sym1} (LEAQ4 [off2] {sym2} ptr idx) val mem) 7840 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 7841 // result: (MOVSSstoreidx4 [off1+off2] {mergeSym(sym1,sym2)} ptr idx val mem) 7842 for { 7843 off1 := v.AuxInt 7844 sym1 := v.Aux 7845 v_0 := v.Args[0] 7846 if v_0.Op != OpAMD64LEAQ4 { 7847 break 7848 } 7849 off2 := v_0.AuxInt 7850 sym2 := v_0.Aux 7851 ptr := v_0.Args[0] 7852 idx := v_0.Args[1] 7853 val := v.Args[1] 7854 mem := v.Args[2] 7855 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 7856 break 7857 } 7858 v.reset(OpAMD64MOVSSstoreidx4) 7859 v.AuxInt = off1 + off2 7860 v.Aux = mergeSym(sym1, sym2) 7861 v.AddArg(ptr) 7862 v.AddArg(idx) 7863 v.AddArg(val) 7864 v.AddArg(mem) 7865 return true 7866 } 7867 // match: (MOVSSstore [off] {sym} (ADDQ ptr idx) val mem) 7868 // cond: ptr.Op != OpSB 7869 // result: (MOVSSstoreidx1 [off] {sym} ptr idx val mem) 7870 for { 7871 off := v.AuxInt 7872 sym := v.Aux 7873 v_0 := v.Args[0] 7874 if v_0.Op != OpAMD64ADDQ { 7875 break 7876 } 7877 ptr := v_0.Args[0] 7878 idx := v_0.Args[1] 7879 val := v.Args[1] 7880 mem := v.Args[2] 7881 if !(ptr.Op != OpSB) { 7882 break 7883 } 7884 v.reset(OpAMD64MOVSSstoreidx1) 7885 v.AuxInt = off 7886 v.Aux = sym 7887 v.AddArg(ptr) 7888 v.AddArg(idx) 7889 v.AddArg(val) 7890 v.AddArg(mem) 7891 return true 7892 } 7893 return false 7894 } 7895 func rewriteValueAMD64_OpAMD64MOVSSstoreidx1(v *Value, config *Config) bool { 7896 b := v.Block 7897 _ = b 7898 // match: (MOVSSstoreidx1 [c] {sym} (ADDQconst [d] ptr) idx val mem) 7899 // cond: 7900 // result: (MOVSSstoreidx1 [c+d] {sym} ptr idx val mem) 7901 for { 7902 c := v.AuxInt 7903 sym := v.Aux 7904 v_0 := v.Args[0] 7905 if v_0.Op != OpAMD64ADDQconst { 7906 break 7907 } 7908 d := v_0.AuxInt 7909 ptr := v_0.Args[0] 7910 idx := v.Args[1] 7911 val := v.Args[2] 7912 mem := v.Args[3] 7913 v.reset(OpAMD64MOVSSstoreidx1) 7914 v.AuxInt = c + d 7915 v.Aux = sym 7916 v.AddArg(ptr) 7917 v.AddArg(idx) 7918 v.AddArg(val) 7919 v.AddArg(mem) 7920 return true 7921 } 7922 // match: (MOVSSstoreidx1 [c] {sym} ptr (ADDQconst [d] idx) val mem) 7923 // cond: 7924 // result: (MOVSSstoreidx1 [c+d] {sym} ptr idx val mem) 7925 for { 7926 c := v.AuxInt 7927 sym := v.Aux 7928 ptr := v.Args[0] 7929 v_1 := v.Args[1] 7930 if v_1.Op != OpAMD64ADDQconst { 7931 break 7932 } 7933 d := v_1.AuxInt 7934 idx := v_1.Args[0] 7935 val := v.Args[2] 7936 mem := v.Args[3] 7937 v.reset(OpAMD64MOVSSstoreidx1) 7938 v.AuxInt = c + d 7939 v.Aux = sym 7940 v.AddArg(ptr) 7941 v.AddArg(idx) 7942 v.AddArg(val) 7943 v.AddArg(mem) 7944 return true 7945 } 7946 return false 7947 } 7948 func rewriteValueAMD64_OpAMD64MOVSSstoreidx4(v *Value, config *Config) bool { 7949 b := v.Block 7950 _ = b 7951 // match: (MOVSSstoreidx4 [c] {sym} (ADDQconst [d] ptr) idx val mem) 7952 // cond: 7953 // result: (MOVSSstoreidx4 [c+d] {sym} ptr idx val mem) 7954 for { 7955 c := v.AuxInt 7956 sym := v.Aux 7957 v_0 := v.Args[0] 7958 if v_0.Op != OpAMD64ADDQconst { 7959 break 7960 } 7961 d := v_0.AuxInt 7962 ptr := v_0.Args[0] 7963 idx := v.Args[1] 7964 val := v.Args[2] 7965 mem := v.Args[3] 7966 v.reset(OpAMD64MOVSSstoreidx4) 7967 v.AuxInt = c + d 7968 v.Aux = sym 7969 v.AddArg(ptr) 7970 v.AddArg(idx) 7971 v.AddArg(val) 7972 v.AddArg(mem) 7973 return true 7974 } 7975 // match: (MOVSSstoreidx4 [c] {sym} ptr (ADDQconst [d] idx) val mem) 7976 // cond: 7977 // result: (MOVSSstoreidx4 [c+4*d] {sym} ptr idx val mem) 7978 for { 7979 c := v.AuxInt 7980 sym := v.Aux 7981 ptr := v.Args[0] 7982 v_1 := v.Args[1] 7983 if v_1.Op != OpAMD64ADDQconst { 7984 break 7985 } 7986 d := v_1.AuxInt 7987 idx := v_1.Args[0] 7988 val := v.Args[2] 7989 mem := v.Args[3] 7990 v.reset(OpAMD64MOVSSstoreidx4) 7991 v.AuxInt = c + 4*d 7992 v.Aux = sym 7993 v.AddArg(ptr) 7994 v.AddArg(idx) 7995 v.AddArg(val) 7996 v.AddArg(mem) 7997 return true 7998 } 7999 return false 8000 } 8001 func rewriteValueAMD64_OpAMD64MOVWQSX(v *Value, config *Config) bool { 8002 b := v.Block 8003 _ = b 8004 // match: (MOVWQSX x:(MOVWload [off] {sym} ptr mem)) 8005 // cond: x.Uses == 1 && clobber(x) 8006 // result: @x.Block (MOVWQSXload <v.Type> [off] {sym} ptr mem) 8007 for { 8008 x := v.Args[0] 8009 if x.Op != OpAMD64MOVWload { 8010 break 8011 } 8012 off := x.AuxInt 8013 sym := x.Aux 8014 ptr := x.Args[0] 8015 mem := x.Args[1] 8016 if !(x.Uses == 1 && clobber(x)) { 8017 break 8018 } 8019 b = x.Block 8020 v0 := b.NewValue0(v.Line, OpAMD64MOVWQSXload, v.Type) 8021 v.reset(OpCopy) 8022 v.AddArg(v0) 8023 v0.AuxInt = off 8024 v0.Aux = sym 8025 v0.AddArg(ptr) 8026 v0.AddArg(mem) 8027 return true 8028 } 8029 // match: (MOVWQSX (ANDLconst [c] x)) 8030 // cond: c & 0x8000 == 0 8031 // result: (ANDLconst [c & 0x7fff] x) 8032 for { 8033 v_0 := v.Args[0] 8034 if v_0.Op != OpAMD64ANDLconst { 8035 break 8036 } 8037 c := v_0.AuxInt 8038 x := v_0.Args[0] 8039 if !(c&0x8000 == 0) { 8040 break 8041 } 8042 v.reset(OpAMD64ANDLconst) 8043 v.AuxInt = c & 0x7fff 8044 v.AddArg(x) 8045 return true 8046 } 8047 return false 8048 } 8049 func rewriteValueAMD64_OpAMD64MOVWQSXload(v *Value, config *Config) bool { 8050 b := v.Block 8051 _ = b 8052 // match: (MOVWQSXload [off1] {sym1} (LEAQ [off2] {sym2} base) mem) 8053 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 8054 // result: (MOVWQSXload [off1+off2] {mergeSym(sym1,sym2)} base mem) 8055 for { 8056 off1 := v.AuxInt 8057 sym1 := v.Aux 8058 v_0 := v.Args[0] 8059 if v_0.Op != OpAMD64LEAQ { 8060 break 8061 } 8062 off2 := v_0.AuxInt 8063 sym2 := v_0.Aux 8064 base := v_0.Args[0] 8065 mem := v.Args[1] 8066 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 8067 break 8068 } 8069 v.reset(OpAMD64MOVWQSXload) 8070 v.AuxInt = off1 + off2 8071 v.Aux = mergeSym(sym1, sym2) 8072 v.AddArg(base) 8073 v.AddArg(mem) 8074 return true 8075 } 8076 return false 8077 } 8078 func rewriteValueAMD64_OpAMD64MOVWQZX(v *Value, config *Config) bool { 8079 b := v.Block 8080 _ = b 8081 // match: (MOVWQZX x:(MOVWload [off] {sym} ptr mem)) 8082 // cond: x.Uses == 1 && clobber(x) 8083 // result: @x.Block (MOVWload <v.Type> [off] {sym} ptr mem) 8084 for { 8085 x := v.Args[0] 8086 if x.Op != OpAMD64MOVWload { 8087 break 8088 } 8089 off := x.AuxInt 8090 sym := x.Aux 8091 ptr := x.Args[0] 8092 mem := x.Args[1] 8093 if !(x.Uses == 1 && clobber(x)) { 8094 break 8095 } 8096 b = x.Block 8097 v0 := b.NewValue0(v.Line, OpAMD64MOVWload, v.Type) 8098 v.reset(OpCopy) 8099 v.AddArg(v0) 8100 v0.AuxInt = off 8101 v0.Aux = sym 8102 v0.AddArg(ptr) 8103 v0.AddArg(mem) 8104 return true 8105 } 8106 // match: (MOVWQZX x:(MOVWloadidx1 [off] {sym} ptr idx mem)) 8107 // cond: x.Uses == 1 && clobber(x) 8108 // result: @x.Block (MOVWloadidx1 <v.Type> [off] {sym} ptr idx mem) 8109 for { 8110 x := v.Args[0] 8111 if x.Op != OpAMD64MOVWloadidx1 { 8112 break 8113 } 8114 off := x.AuxInt 8115 sym := x.Aux 8116 ptr := x.Args[0] 8117 idx := x.Args[1] 8118 mem := x.Args[2] 8119 if !(x.Uses == 1 && clobber(x)) { 8120 break 8121 } 8122 b = x.Block 8123 v0 := b.NewValue0(v.Line, OpAMD64MOVWloadidx1, v.Type) 8124 v.reset(OpCopy) 8125 v.AddArg(v0) 8126 v0.AuxInt = off 8127 v0.Aux = sym 8128 v0.AddArg(ptr) 8129 v0.AddArg(idx) 8130 v0.AddArg(mem) 8131 return true 8132 } 8133 // match: (MOVWQZX x:(MOVWloadidx2 [off] {sym} ptr idx mem)) 8134 // cond: x.Uses == 1 && clobber(x) 8135 // result: @x.Block (MOVWloadidx2 <v.Type> [off] {sym} ptr idx mem) 8136 for { 8137 x := v.Args[0] 8138 if x.Op != OpAMD64MOVWloadidx2 { 8139 break 8140 } 8141 off := x.AuxInt 8142 sym := x.Aux 8143 ptr := x.Args[0] 8144 idx := x.Args[1] 8145 mem := x.Args[2] 8146 if !(x.Uses == 1 && clobber(x)) { 8147 break 8148 } 8149 b = x.Block 8150 v0 := b.NewValue0(v.Line, OpAMD64MOVWloadidx2, v.Type) 8151 v.reset(OpCopy) 8152 v.AddArg(v0) 8153 v0.AuxInt = off 8154 v0.Aux = sym 8155 v0.AddArg(ptr) 8156 v0.AddArg(idx) 8157 v0.AddArg(mem) 8158 return true 8159 } 8160 // match: (MOVWQZX (ANDLconst [c] x)) 8161 // cond: 8162 // result: (ANDLconst [c & 0xffff] x) 8163 for { 8164 v_0 := v.Args[0] 8165 if v_0.Op != OpAMD64ANDLconst { 8166 break 8167 } 8168 c := v_0.AuxInt 8169 x := v_0.Args[0] 8170 v.reset(OpAMD64ANDLconst) 8171 v.AuxInt = c & 0xffff 8172 v.AddArg(x) 8173 return true 8174 } 8175 return false 8176 } 8177 func rewriteValueAMD64_OpAMD64MOVWload(v *Value, config *Config) bool { 8178 b := v.Block 8179 _ = b 8180 // match: (MOVWload [off] {sym} ptr (MOVWstore [off2] {sym2} ptr2 x _)) 8181 // cond: sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) 8182 // result: x 8183 for { 8184 off := v.AuxInt 8185 sym := v.Aux 8186 ptr := v.Args[0] 8187 v_1 := v.Args[1] 8188 if v_1.Op != OpAMD64MOVWstore { 8189 break 8190 } 8191 off2 := v_1.AuxInt 8192 sym2 := v_1.Aux 8193 ptr2 := v_1.Args[0] 8194 x := v_1.Args[1] 8195 if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) { 8196 break 8197 } 8198 v.reset(OpCopy) 8199 v.Type = x.Type 8200 v.AddArg(x) 8201 return true 8202 } 8203 // match: (MOVWload [off1] {sym} (ADDQconst [off2] ptr) mem) 8204 // cond: is32Bit(off1+off2) 8205 // result: (MOVWload [off1+off2] {sym} ptr mem) 8206 for { 8207 off1 := v.AuxInt 8208 sym := v.Aux 8209 v_0 := v.Args[0] 8210 if v_0.Op != OpAMD64ADDQconst { 8211 break 8212 } 8213 off2 := v_0.AuxInt 8214 ptr := v_0.Args[0] 8215 mem := v.Args[1] 8216 if !(is32Bit(off1 + off2)) { 8217 break 8218 } 8219 v.reset(OpAMD64MOVWload) 8220 v.AuxInt = off1 + off2 8221 v.Aux = sym 8222 v.AddArg(ptr) 8223 v.AddArg(mem) 8224 return true 8225 } 8226 // match: (MOVWload [off1] {sym1} (LEAQ [off2] {sym2} base) mem) 8227 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 8228 // result: (MOVWload [off1+off2] {mergeSym(sym1,sym2)} base mem) 8229 for { 8230 off1 := v.AuxInt 8231 sym1 := v.Aux 8232 v_0 := v.Args[0] 8233 if v_0.Op != OpAMD64LEAQ { 8234 break 8235 } 8236 off2 := v_0.AuxInt 8237 sym2 := v_0.Aux 8238 base := v_0.Args[0] 8239 mem := v.Args[1] 8240 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 8241 break 8242 } 8243 v.reset(OpAMD64MOVWload) 8244 v.AuxInt = off1 + off2 8245 v.Aux = mergeSym(sym1, sym2) 8246 v.AddArg(base) 8247 v.AddArg(mem) 8248 return true 8249 } 8250 // match: (MOVWload [off1] {sym1} (LEAQ1 [off2] {sym2} ptr idx) mem) 8251 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 8252 // result: (MOVWloadidx1 [off1+off2] {mergeSym(sym1,sym2)} ptr idx mem) 8253 for { 8254 off1 := v.AuxInt 8255 sym1 := v.Aux 8256 v_0 := v.Args[0] 8257 if v_0.Op != OpAMD64LEAQ1 { 8258 break 8259 } 8260 off2 := v_0.AuxInt 8261 sym2 := v_0.Aux 8262 ptr := v_0.Args[0] 8263 idx := v_0.Args[1] 8264 mem := v.Args[1] 8265 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 8266 break 8267 } 8268 v.reset(OpAMD64MOVWloadidx1) 8269 v.AuxInt = off1 + off2 8270 v.Aux = mergeSym(sym1, sym2) 8271 v.AddArg(ptr) 8272 v.AddArg(idx) 8273 v.AddArg(mem) 8274 return true 8275 } 8276 // match: (MOVWload [off1] {sym1} (LEAQ2 [off2] {sym2} ptr idx) mem) 8277 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 8278 // result: (MOVWloadidx2 [off1+off2] {mergeSym(sym1,sym2)} ptr idx mem) 8279 for { 8280 off1 := v.AuxInt 8281 sym1 := v.Aux 8282 v_0 := v.Args[0] 8283 if v_0.Op != OpAMD64LEAQ2 { 8284 break 8285 } 8286 off2 := v_0.AuxInt 8287 sym2 := v_0.Aux 8288 ptr := v_0.Args[0] 8289 idx := v_0.Args[1] 8290 mem := v.Args[1] 8291 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 8292 break 8293 } 8294 v.reset(OpAMD64MOVWloadidx2) 8295 v.AuxInt = off1 + off2 8296 v.Aux = mergeSym(sym1, sym2) 8297 v.AddArg(ptr) 8298 v.AddArg(idx) 8299 v.AddArg(mem) 8300 return true 8301 } 8302 // match: (MOVWload [off] {sym} (ADDQ ptr idx) mem) 8303 // cond: ptr.Op != OpSB 8304 // result: (MOVWloadidx1 [off] {sym} ptr idx mem) 8305 for { 8306 off := v.AuxInt 8307 sym := v.Aux 8308 v_0 := v.Args[0] 8309 if v_0.Op != OpAMD64ADDQ { 8310 break 8311 } 8312 ptr := v_0.Args[0] 8313 idx := v_0.Args[1] 8314 mem := v.Args[1] 8315 if !(ptr.Op != OpSB) { 8316 break 8317 } 8318 v.reset(OpAMD64MOVWloadidx1) 8319 v.AuxInt = off 8320 v.Aux = sym 8321 v.AddArg(ptr) 8322 v.AddArg(idx) 8323 v.AddArg(mem) 8324 return true 8325 } 8326 // match: (MOVWload [off1] {sym1} (LEAL [off2] {sym2} base) mem) 8327 // cond: canMergeSym(sym1, sym2) 8328 // result: (MOVWload [off1+off2] {mergeSym(sym1,sym2)} base mem) 8329 for { 8330 off1 := v.AuxInt 8331 sym1 := v.Aux 8332 v_0 := v.Args[0] 8333 if v_0.Op != OpAMD64LEAL { 8334 break 8335 } 8336 off2 := v_0.AuxInt 8337 sym2 := v_0.Aux 8338 base := v_0.Args[0] 8339 mem := v.Args[1] 8340 if !(canMergeSym(sym1, sym2)) { 8341 break 8342 } 8343 v.reset(OpAMD64MOVWload) 8344 v.AuxInt = off1 + off2 8345 v.Aux = mergeSym(sym1, sym2) 8346 v.AddArg(base) 8347 v.AddArg(mem) 8348 return true 8349 } 8350 // match: (MOVWload [off1] {sym} (ADDLconst [off2] ptr) mem) 8351 // cond: is32Bit(off1+off2) 8352 // result: (MOVWload [off1+off2] {sym} ptr mem) 8353 for { 8354 off1 := v.AuxInt 8355 sym := v.Aux 8356 v_0 := v.Args[0] 8357 if v_0.Op != OpAMD64ADDLconst { 8358 break 8359 } 8360 off2 := v_0.AuxInt 8361 ptr := v_0.Args[0] 8362 mem := v.Args[1] 8363 if !(is32Bit(off1 + off2)) { 8364 break 8365 } 8366 v.reset(OpAMD64MOVWload) 8367 v.AuxInt = off1 + off2 8368 v.Aux = sym 8369 v.AddArg(ptr) 8370 v.AddArg(mem) 8371 return true 8372 } 8373 return false 8374 } 8375 func rewriteValueAMD64_OpAMD64MOVWloadidx1(v *Value, config *Config) bool { 8376 b := v.Block 8377 _ = b 8378 // match: (MOVWloadidx1 [c] {sym} ptr (SHLQconst [1] idx) mem) 8379 // cond: 8380 // result: (MOVWloadidx2 [c] {sym} ptr idx mem) 8381 for { 8382 c := v.AuxInt 8383 sym := v.Aux 8384 ptr := v.Args[0] 8385 v_1 := v.Args[1] 8386 if v_1.Op != OpAMD64SHLQconst { 8387 break 8388 } 8389 if v_1.AuxInt != 1 { 8390 break 8391 } 8392 idx := v_1.Args[0] 8393 mem := v.Args[2] 8394 v.reset(OpAMD64MOVWloadidx2) 8395 v.AuxInt = c 8396 v.Aux = sym 8397 v.AddArg(ptr) 8398 v.AddArg(idx) 8399 v.AddArg(mem) 8400 return true 8401 } 8402 // match: (MOVWloadidx1 [c] {sym} (ADDQconst [d] ptr) idx mem) 8403 // cond: 8404 // result: (MOVWloadidx1 [c+d] {sym} ptr idx mem) 8405 for { 8406 c := v.AuxInt 8407 sym := v.Aux 8408 v_0 := v.Args[0] 8409 if v_0.Op != OpAMD64ADDQconst { 8410 break 8411 } 8412 d := v_0.AuxInt 8413 ptr := v_0.Args[0] 8414 idx := v.Args[1] 8415 mem := v.Args[2] 8416 v.reset(OpAMD64MOVWloadidx1) 8417 v.AuxInt = c + d 8418 v.Aux = sym 8419 v.AddArg(ptr) 8420 v.AddArg(idx) 8421 v.AddArg(mem) 8422 return true 8423 } 8424 // match: (MOVWloadidx1 [c] {sym} ptr (ADDQconst [d] idx) mem) 8425 // cond: 8426 // result: (MOVWloadidx1 [c+d] {sym} ptr idx mem) 8427 for { 8428 c := v.AuxInt 8429 sym := v.Aux 8430 ptr := v.Args[0] 8431 v_1 := v.Args[1] 8432 if v_1.Op != OpAMD64ADDQconst { 8433 break 8434 } 8435 d := v_1.AuxInt 8436 idx := v_1.Args[0] 8437 mem := v.Args[2] 8438 v.reset(OpAMD64MOVWloadidx1) 8439 v.AuxInt = c + d 8440 v.Aux = sym 8441 v.AddArg(ptr) 8442 v.AddArg(idx) 8443 v.AddArg(mem) 8444 return true 8445 } 8446 return false 8447 } 8448 func rewriteValueAMD64_OpAMD64MOVWloadidx2(v *Value, config *Config) bool { 8449 b := v.Block 8450 _ = b 8451 // match: (MOVWloadidx2 [c] {sym} (ADDQconst [d] ptr) idx mem) 8452 // cond: 8453 // result: (MOVWloadidx2 [c+d] {sym} ptr idx mem) 8454 for { 8455 c := v.AuxInt 8456 sym := v.Aux 8457 v_0 := v.Args[0] 8458 if v_0.Op != OpAMD64ADDQconst { 8459 break 8460 } 8461 d := v_0.AuxInt 8462 ptr := v_0.Args[0] 8463 idx := v.Args[1] 8464 mem := v.Args[2] 8465 v.reset(OpAMD64MOVWloadidx2) 8466 v.AuxInt = c + d 8467 v.Aux = sym 8468 v.AddArg(ptr) 8469 v.AddArg(idx) 8470 v.AddArg(mem) 8471 return true 8472 } 8473 // match: (MOVWloadidx2 [c] {sym} ptr (ADDQconst [d] idx) mem) 8474 // cond: 8475 // result: (MOVWloadidx2 [c+2*d] {sym} ptr idx mem) 8476 for { 8477 c := v.AuxInt 8478 sym := v.Aux 8479 ptr := v.Args[0] 8480 v_1 := v.Args[1] 8481 if v_1.Op != OpAMD64ADDQconst { 8482 break 8483 } 8484 d := v_1.AuxInt 8485 idx := v_1.Args[0] 8486 mem := v.Args[2] 8487 v.reset(OpAMD64MOVWloadidx2) 8488 v.AuxInt = c + 2*d 8489 v.Aux = sym 8490 v.AddArg(ptr) 8491 v.AddArg(idx) 8492 v.AddArg(mem) 8493 return true 8494 } 8495 return false 8496 } 8497 func rewriteValueAMD64_OpAMD64MOVWstore(v *Value, config *Config) bool { 8498 b := v.Block 8499 _ = b 8500 // match: (MOVWstore [off] {sym} ptr (MOVWQSX x) mem) 8501 // cond: 8502 // result: (MOVWstore [off] {sym} ptr x mem) 8503 for { 8504 off := v.AuxInt 8505 sym := v.Aux 8506 ptr := v.Args[0] 8507 v_1 := v.Args[1] 8508 if v_1.Op != OpAMD64MOVWQSX { 8509 break 8510 } 8511 x := v_1.Args[0] 8512 mem := v.Args[2] 8513 v.reset(OpAMD64MOVWstore) 8514 v.AuxInt = off 8515 v.Aux = sym 8516 v.AddArg(ptr) 8517 v.AddArg(x) 8518 v.AddArg(mem) 8519 return true 8520 } 8521 // match: (MOVWstore [off] {sym} ptr (MOVWQZX x) mem) 8522 // cond: 8523 // result: (MOVWstore [off] {sym} ptr x mem) 8524 for { 8525 off := v.AuxInt 8526 sym := v.Aux 8527 ptr := v.Args[0] 8528 v_1 := v.Args[1] 8529 if v_1.Op != OpAMD64MOVWQZX { 8530 break 8531 } 8532 x := v_1.Args[0] 8533 mem := v.Args[2] 8534 v.reset(OpAMD64MOVWstore) 8535 v.AuxInt = off 8536 v.Aux = sym 8537 v.AddArg(ptr) 8538 v.AddArg(x) 8539 v.AddArg(mem) 8540 return true 8541 } 8542 // match: (MOVWstore [off1] {sym} (ADDQconst [off2] ptr) val mem) 8543 // cond: is32Bit(off1+off2) 8544 // result: (MOVWstore [off1+off2] {sym} ptr val mem) 8545 for { 8546 off1 := v.AuxInt 8547 sym := v.Aux 8548 v_0 := v.Args[0] 8549 if v_0.Op != OpAMD64ADDQconst { 8550 break 8551 } 8552 off2 := v_0.AuxInt 8553 ptr := v_0.Args[0] 8554 val := v.Args[1] 8555 mem := v.Args[2] 8556 if !(is32Bit(off1 + off2)) { 8557 break 8558 } 8559 v.reset(OpAMD64MOVWstore) 8560 v.AuxInt = off1 + off2 8561 v.Aux = sym 8562 v.AddArg(ptr) 8563 v.AddArg(val) 8564 v.AddArg(mem) 8565 return true 8566 } 8567 // match: (MOVWstore [off] {sym} ptr (MOVLconst [c]) mem) 8568 // cond: validOff(off) 8569 // result: (MOVWstoreconst [makeValAndOff(int64(int16(c)),off)] {sym} ptr mem) 8570 for { 8571 off := v.AuxInt 8572 sym := v.Aux 8573 ptr := v.Args[0] 8574 v_1 := v.Args[1] 8575 if v_1.Op != OpAMD64MOVLconst { 8576 break 8577 } 8578 c := v_1.AuxInt 8579 mem := v.Args[2] 8580 if !(validOff(off)) { 8581 break 8582 } 8583 v.reset(OpAMD64MOVWstoreconst) 8584 v.AuxInt = makeValAndOff(int64(int16(c)), off) 8585 v.Aux = sym 8586 v.AddArg(ptr) 8587 v.AddArg(mem) 8588 return true 8589 } 8590 // match: (MOVWstore [off1] {sym1} (LEAQ [off2] {sym2} base) val mem) 8591 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 8592 // result: (MOVWstore [off1+off2] {mergeSym(sym1,sym2)} base val mem) 8593 for { 8594 off1 := v.AuxInt 8595 sym1 := v.Aux 8596 v_0 := v.Args[0] 8597 if v_0.Op != OpAMD64LEAQ { 8598 break 8599 } 8600 off2 := v_0.AuxInt 8601 sym2 := v_0.Aux 8602 base := v_0.Args[0] 8603 val := v.Args[1] 8604 mem := v.Args[2] 8605 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 8606 break 8607 } 8608 v.reset(OpAMD64MOVWstore) 8609 v.AuxInt = off1 + off2 8610 v.Aux = mergeSym(sym1, sym2) 8611 v.AddArg(base) 8612 v.AddArg(val) 8613 v.AddArg(mem) 8614 return true 8615 } 8616 // match: (MOVWstore [off1] {sym1} (LEAQ1 [off2] {sym2} ptr idx) val mem) 8617 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 8618 // result: (MOVWstoreidx1 [off1+off2] {mergeSym(sym1,sym2)} ptr idx val mem) 8619 for { 8620 off1 := v.AuxInt 8621 sym1 := v.Aux 8622 v_0 := v.Args[0] 8623 if v_0.Op != OpAMD64LEAQ1 { 8624 break 8625 } 8626 off2 := v_0.AuxInt 8627 sym2 := v_0.Aux 8628 ptr := v_0.Args[0] 8629 idx := v_0.Args[1] 8630 val := v.Args[1] 8631 mem := v.Args[2] 8632 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 8633 break 8634 } 8635 v.reset(OpAMD64MOVWstoreidx1) 8636 v.AuxInt = off1 + off2 8637 v.Aux = mergeSym(sym1, sym2) 8638 v.AddArg(ptr) 8639 v.AddArg(idx) 8640 v.AddArg(val) 8641 v.AddArg(mem) 8642 return true 8643 } 8644 // match: (MOVWstore [off1] {sym1} (LEAQ2 [off2] {sym2} ptr idx) val mem) 8645 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 8646 // result: (MOVWstoreidx2 [off1+off2] {mergeSym(sym1,sym2)} ptr idx val mem) 8647 for { 8648 off1 := v.AuxInt 8649 sym1 := v.Aux 8650 v_0 := v.Args[0] 8651 if v_0.Op != OpAMD64LEAQ2 { 8652 break 8653 } 8654 off2 := v_0.AuxInt 8655 sym2 := v_0.Aux 8656 ptr := v_0.Args[0] 8657 idx := v_0.Args[1] 8658 val := v.Args[1] 8659 mem := v.Args[2] 8660 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 8661 break 8662 } 8663 v.reset(OpAMD64MOVWstoreidx2) 8664 v.AuxInt = off1 + off2 8665 v.Aux = mergeSym(sym1, sym2) 8666 v.AddArg(ptr) 8667 v.AddArg(idx) 8668 v.AddArg(val) 8669 v.AddArg(mem) 8670 return true 8671 } 8672 // match: (MOVWstore [off] {sym} (ADDQ ptr idx) val mem) 8673 // cond: ptr.Op != OpSB 8674 // result: (MOVWstoreidx1 [off] {sym} ptr idx val mem) 8675 for { 8676 off := v.AuxInt 8677 sym := v.Aux 8678 v_0 := v.Args[0] 8679 if v_0.Op != OpAMD64ADDQ { 8680 break 8681 } 8682 ptr := v_0.Args[0] 8683 idx := v_0.Args[1] 8684 val := v.Args[1] 8685 mem := v.Args[2] 8686 if !(ptr.Op != OpSB) { 8687 break 8688 } 8689 v.reset(OpAMD64MOVWstoreidx1) 8690 v.AuxInt = off 8691 v.Aux = sym 8692 v.AddArg(ptr) 8693 v.AddArg(idx) 8694 v.AddArg(val) 8695 v.AddArg(mem) 8696 return true 8697 } 8698 // match: (MOVWstore [i] {s} p (SHRQconst [16] w) x:(MOVWstore [i-2] {s} p w mem)) 8699 // cond: x.Uses == 1 && clobber(x) 8700 // result: (MOVLstore [i-2] {s} p w mem) 8701 for { 8702 i := v.AuxInt 8703 s := v.Aux 8704 p := v.Args[0] 8705 v_1 := v.Args[1] 8706 if v_1.Op != OpAMD64SHRQconst { 8707 break 8708 } 8709 if v_1.AuxInt != 16 { 8710 break 8711 } 8712 w := v_1.Args[0] 8713 x := v.Args[2] 8714 if x.Op != OpAMD64MOVWstore { 8715 break 8716 } 8717 if x.AuxInt != i-2 { 8718 break 8719 } 8720 if x.Aux != s { 8721 break 8722 } 8723 if p != x.Args[0] { 8724 break 8725 } 8726 if w != x.Args[1] { 8727 break 8728 } 8729 mem := x.Args[2] 8730 if !(x.Uses == 1 && clobber(x)) { 8731 break 8732 } 8733 v.reset(OpAMD64MOVLstore) 8734 v.AuxInt = i - 2 8735 v.Aux = s 8736 v.AddArg(p) 8737 v.AddArg(w) 8738 v.AddArg(mem) 8739 return true 8740 } 8741 // match: (MOVWstore [i] {s} p (SHRQconst [j] w) x:(MOVWstore [i-2] {s} p w0:(SHRQconst [j-16] w) mem)) 8742 // cond: x.Uses == 1 && clobber(x) 8743 // result: (MOVLstore [i-2] {s} p w0 mem) 8744 for { 8745 i := v.AuxInt 8746 s := v.Aux 8747 p := v.Args[0] 8748 v_1 := v.Args[1] 8749 if v_1.Op != OpAMD64SHRQconst { 8750 break 8751 } 8752 j := v_1.AuxInt 8753 w := v_1.Args[0] 8754 x := v.Args[2] 8755 if x.Op != OpAMD64MOVWstore { 8756 break 8757 } 8758 if x.AuxInt != i-2 { 8759 break 8760 } 8761 if x.Aux != s { 8762 break 8763 } 8764 if p != x.Args[0] { 8765 break 8766 } 8767 w0 := x.Args[1] 8768 if w0.Op != OpAMD64SHRQconst { 8769 break 8770 } 8771 if w0.AuxInt != j-16 { 8772 break 8773 } 8774 if w != w0.Args[0] { 8775 break 8776 } 8777 mem := x.Args[2] 8778 if !(x.Uses == 1 && clobber(x)) { 8779 break 8780 } 8781 v.reset(OpAMD64MOVLstore) 8782 v.AuxInt = i - 2 8783 v.Aux = s 8784 v.AddArg(p) 8785 v.AddArg(w0) 8786 v.AddArg(mem) 8787 return true 8788 } 8789 // match: (MOVWstore [off1] {sym1} (LEAL [off2] {sym2} base) val mem) 8790 // cond: canMergeSym(sym1, sym2) 8791 // result: (MOVWstore [off1+off2] {mergeSym(sym1,sym2)} base val mem) 8792 for { 8793 off1 := v.AuxInt 8794 sym1 := v.Aux 8795 v_0 := v.Args[0] 8796 if v_0.Op != OpAMD64LEAL { 8797 break 8798 } 8799 off2 := v_0.AuxInt 8800 sym2 := v_0.Aux 8801 base := v_0.Args[0] 8802 val := v.Args[1] 8803 mem := v.Args[2] 8804 if !(canMergeSym(sym1, sym2)) { 8805 break 8806 } 8807 v.reset(OpAMD64MOVWstore) 8808 v.AuxInt = off1 + off2 8809 v.Aux = mergeSym(sym1, sym2) 8810 v.AddArg(base) 8811 v.AddArg(val) 8812 v.AddArg(mem) 8813 return true 8814 } 8815 // match: (MOVWstore [off1] {sym} (ADDLconst [off2] ptr) val mem) 8816 // cond: is32Bit(off1+off2) 8817 // result: (MOVWstore [off1+off2] {sym} ptr val mem) 8818 for { 8819 off1 := v.AuxInt 8820 sym := v.Aux 8821 v_0 := v.Args[0] 8822 if v_0.Op != OpAMD64ADDLconst { 8823 break 8824 } 8825 off2 := v_0.AuxInt 8826 ptr := v_0.Args[0] 8827 val := v.Args[1] 8828 mem := v.Args[2] 8829 if !(is32Bit(off1 + off2)) { 8830 break 8831 } 8832 v.reset(OpAMD64MOVWstore) 8833 v.AuxInt = off1 + off2 8834 v.Aux = sym 8835 v.AddArg(ptr) 8836 v.AddArg(val) 8837 v.AddArg(mem) 8838 return true 8839 } 8840 return false 8841 } 8842 func rewriteValueAMD64_OpAMD64MOVWstoreconst(v *Value, config *Config) bool { 8843 b := v.Block 8844 _ = b 8845 // match: (MOVWstoreconst [sc] {s} (ADDQconst [off] ptr) mem) 8846 // cond: ValAndOff(sc).canAdd(off) 8847 // result: (MOVWstoreconst [ValAndOff(sc).add(off)] {s} ptr mem) 8848 for { 8849 sc := v.AuxInt 8850 s := v.Aux 8851 v_0 := v.Args[0] 8852 if v_0.Op != OpAMD64ADDQconst { 8853 break 8854 } 8855 off := v_0.AuxInt 8856 ptr := v_0.Args[0] 8857 mem := v.Args[1] 8858 if !(ValAndOff(sc).canAdd(off)) { 8859 break 8860 } 8861 v.reset(OpAMD64MOVWstoreconst) 8862 v.AuxInt = ValAndOff(sc).add(off) 8863 v.Aux = s 8864 v.AddArg(ptr) 8865 v.AddArg(mem) 8866 return true 8867 } 8868 // match: (MOVWstoreconst [sc] {sym1} (LEAQ [off] {sym2} ptr) mem) 8869 // cond: canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off) 8870 // result: (MOVWstoreconst [ValAndOff(sc).add(off)] {mergeSym(sym1, sym2)} ptr mem) 8871 for { 8872 sc := v.AuxInt 8873 sym1 := v.Aux 8874 v_0 := v.Args[0] 8875 if v_0.Op != OpAMD64LEAQ { 8876 break 8877 } 8878 off := v_0.AuxInt 8879 sym2 := v_0.Aux 8880 ptr := v_0.Args[0] 8881 mem := v.Args[1] 8882 if !(canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off)) { 8883 break 8884 } 8885 v.reset(OpAMD64MOVWstoreconst) 8886 v.AuxInt = ValAndOff(sc).add(off) 8887 v.Aux = mergeSym(sym1, sym2) 8888 v.AddArg(ptr) 8889 v.AddArg(mem) 8890 return true 8891 } 8892 // match: (MOVWstoreconst [x] {sym1} (LEAQ1 [off] {sym2} ptr idx) mem) 8893 // cond: canMergeSym(sym1, sym2) 8894 // result: (MOVWstoreconstidx1 [ValAndOff(x).add(off)] {mergeSym(sym1,sym2)} ptr idx mem) 8895 for { 8896 x := v.AuxInt 8897 sym1 := v.Aux 8898 v_0 := v.Args[0] 8899 if v_0.Op != OpAMD64LEAQ1 { 8900 break 8901 } 8902 off := v_0.AuxInt 8903 sym2 := v_0.Aux 8904 ptr := v_0.Args[0] 8905 idx := v_0.Args[1] 8906 mem := v.Args[1] 8907 if !(canMergeSym(sym1, sym2)) { 8908 break 8909 } 8910 v.reset(OpAMD64MOVWstoreconstidx1) 8911 v.AuxInt = ValAndOff(x).add(off) 8912 v.Aux = mergeSym(sym1, sym2) 8913 v.AddArg(ptr) 8914 v.AddArg(idx) 8915 v.AddArg(mem) 8916 return true 8917 } 8918 // match: (MOVWstoreconst [x] {sym1} (LEAQ2 [off] {sym2} ptr idx) mem) 8919 // cond: canMergeSym(sym1, sym2) 8920 // result: (MOVWstoreconstidx2 [ValAndOff(x).add(off)] {mergeSym(sym1,sym2)} ptr idx mem) 8921 for { 8922 x := v.AuxInt 8923 sym1 := v.Aux 8924 v_0 := v.Args[0] 8925 if v_0.Op != OpAMD64LEAQ2 { 8926 break 8927 } 8928 off := v_0.AuxInt 8929 sym2 := v_0.Aux 8930 ptr := v_0.Args[0] 8931 idx := v_0.Args[1] 8932 mem := v.Args[1] 8933 if !(canMergeSym(sym1, sym2)) { 8934 break 8935 } 8936 v.reset(OpAMD64MOVWstoreconstidx2) 8937 v.AuxInt = ValAndOff(x).add(off) 8938 v.Aux = mergeSym(sym1, sym2) 8939 v.AddArg(ptr) 8940 v.AddArg(idx) 8941 v.AddArg(mem) 8942 return true 8943 } 8944 // match: (MOVWstoreconst [x] {sym} (ADDQ ptr idx) mem) 8945 // cond: 8946 // result: (MOVWstoreconstidx1 [x] {sym} ptr idx mem) 8947 for { 8948 x := v.AuxInt 8949 sym := v.Aux 8950 v_0 := v.Args[0] 8951 if v_0.Op != OpAMD64ADDQ { 8952 break 8953 } 8954 ptr := v_0.Args[0] 8955 idx := v_0.Args[1] 8956 mem := v.Args[1] 8957 v.reset(OpAMD64MOVWstoreconstidx1) 8958 v.AuxInt = x 8959 v.Aux = sym 8960 v.AddArg(ptr) 8961 v.AddArg(idx) 8962 v.AddArg(mem) 8963 return true 8964 } 8965 // match: (MOVWstoreconst [c] {s} p x:(MOVWstoreconst [a] {s} p mem)) 8966 // cond: x.Uses == 1 && ValAndOff(a).Off() + 2 == ValAndOff(c).Off() && clobber(x) 8967 // result: (MOVLstoreconst [makeValAndOff(ValAndOff(a).Val()&0xffff | ValAndOff(c).Val()<<16, ValAndOff(a).Off())] {s} p mem) 8968 for { 8969 c := v.AuxInt 8970 s := v.Aux 8971 p := v.Args[0] 8972 x := v.Args[1] 8973 if x.Op != OpAMD64MOVWstoreconst { 8974 break 8975 } 8976 a := x.AuxInt 8977 if x.Aux != s { 8978 break 8979 } 8980 if p != x.Args[0] { 8981 break 8982 } 8983 mem := x.Args[1] 8984 if !(x.Uses == 1 && ValAndOff(a).Off()+2 == ValAndOff(c).Off() && clobber(x)) { 8985 break 8986 } 8987 v.reset(OpAMD64MOVLstoreconst) 8988 v.AuxInt = makeValAndOff(ValAndOff(a).Val()&0xffff|ValAndOff(c).Val()<<16, ValAndOff(a).Off()) 8989 v.Aux = s 8990 v.AddArg(p) 8991 v.AddArg(mem) 8992 return true 8993 } 8994 // match: (MOVWstoreconst [sc] {sym1} (LEAL [off] {sym2} ptr) mem) 8995 // cond: canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off) 8996 // result: (MOVWstoreconst [ValAndOff(sc).add(off)] {mergeSym(sym1, sym2)} ptr mem) 8997 for { 8998 sc := v.AuxInt 8999 sym1 := v.Aux 9000 v_0 := v.Args[0] 9001 if v_0.Op != OpAMD64LEAL { 9002 break 9003 } 9004 off := v_0.AuxInt 9005 sym2 := v_0.Aux 9006 ptr := v_0.Args[0] 9007 mem := v.Args[1] 9008 if !(canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off)) { 9009 break 9010 } 9011 v.reset(OpAMD64MOVWstoreconst) 9012 v.AuxInt = ValAndOff(sc).add(off) 9013 v.Aux = mergeSym(sym1, sym2) 9014 v.AddArg(ptr) 9015 v.AddArg(mem) 9016 return true 9017 } 9018 // match: (MOVWstoreconst [sc] {s} (ADDLconst [off] ptr) mem) 9019 // cond: ValAndOff(sc).canAdd(off) 9020 // result: (MOVWstoreconst [ValAndOff(sc).add(off)] {s} ptr mem) 9021 for { 9022 sc := v.AuxInt 9023 s := v.Aux 9024 v_0 := v.Args[0] 9025 if v_0.Op != OpAMD64ADDLconst { 9026 break 9027 } 9028 off := v_0.AuxInt 9029 ptr := v_0.Args[0] 9030 mem := v.Args[1] 9031 if !(ValAndOff(sc).canAdd(off)) { 9032 break 9033 } 9034 v.reset(OpAMD64MOVWstoreconst) 9035 v.AuxInt = ValAndOff(sc).add(off) 9036 v.Aux = s 9037 v.AddArg(ptr) 9038 v.AddArg(mem) 9039 return true 9040 } 9041 return false 9042 } 9043 func rewriteValueAMD64_OpAMD64MOVWstoreconstidx1(v *Value, config *Config) bool { 9044 b := v.Block 9045 _ = b 9046 // match: (MOVWstoreconstidx1 [c] {sym} ptr (SHLQconst [1] idx) mem) 9047 // cond: 9048 // result: (MOVWstoreconstidx2 [c] {sym} ptr idx mem) 9049 for { 9050 c := v.AuxInt 9051 sym := v.Aux 9052 ptr := v.Args[0] 9053 v_1 := v.Args[1] 9054 if v_1.Op != OpAMD64SHLQconst { 9055 break 9056 } 9057 if v_1.AuxInt != 1 { 9058 break 9059 } 9060 idx := v_1.Args[0] 9061 mem := v.Args[2] 9062 v.reset(OpAMD64MOVWstoreconstidx2) 9063 v.AuxInt = c 9064 v.Aux = sym 9065 v.AddArg(ptr) 9066 v.AddArg(idx) 9067 v.AddArg(mem) 9068 return true 9069 } 9070 // match: (MOVWstoreconstidx1 [x] {sym} (ADDQconst [c] ptr) idx mem) 9071 // cond: 9072 // result: (MOVWstoreconstidx1 [ValAndOff(x).add(c)] {sym} ptr idx mem) 9073 for { 9074 x := v.AuxInt 9075 sym := v.Aux 9076 v_0 := v.Args[0] 9077 if v_0.Op != OpAMD64ADDQconst { 9078 break 9079 } 9080 c := v_0.AuxInt 9081 ptr := v_0.Args[0] 9082 idx := v.Args[1] 9083 mem := v.Args[2] 9084 v.reset(OpAMD64MOVWstoreconstidx1) 9085 v.AuxInt = ValAndOff(x).add(c) 9086 v.Aux = sym 9087 v.AddArg(ptr) 9088 v.AddArg(idx) 9089 v.AddArg(mem) 9090 return true 9091 } 9092 // match: (MOVWstoreconstidx1 [x] {sym} ptr (ADDQconst [c] idx) mem) 9093 // cond: 9094 // result: (MOVWstoreconstidx1 [ValAndOff(x).add(c)] {sym} ptr idx mem) 9095 for { 9096 x := v.AuxInt 9097 sym := v.Aux 9098 ptr := v.Args[0] 9099 v_1 := v.Args[1] 9100 if v_1.Op != OpAMD64ADDQconst { 9101 break 9102 } 9103 c := v_1.AuxInt 9104 idx := v_1.Args[0] 9105 mem := v.Args[2] 9106 v.reset(OpAMD64MOVWstoreconstidx1) 9107 v.AuxInt = ValAndOff(x).add(c) 9108 v.Aux = sym 9109 v.AddArg(ptr) 9110 v.AddArg(idx) 9111 v.AddArg(mem) 9112 return true 9113 } 9114 // match: (MOVWstoreconstidx1 [c] {s} p i x:(MOVWstoreconstidx1 [a] {s} p i mem)) 9115 // cond: x.Uses == 1 && ValAndOff(a).Off() + 2 == ValAndOff(c).Off() && clobber(x) 9116 // result: (MOVLstoreconstidx1 [makeValAndOff(ValAndOff(a).Val()&0xffff | ValAndOff(c).Val()<<16, ValAndOff(a).Off())] {s} p i mem) 9117 for { 9118 c := v.AuxInt 9119 s := v.Aux 9120 p := v.Args[0] 9121 i := v.Args[1] 9122 x := v.Args[2] 9123 if x.Op != OpAMD64MOVWstoreconstidx1 { 9124 break 9125 } 9126 a := x.AuxInt 9127 if x.Aux != s { 9128 break 9129 } 9130 if p != x.Args[0] { 9131 break 9132 } 9133 if i != x.Args[1] { 9134 break 9135 } 9136 mem := x.Args[2] 9137 if !(x.Uses == 1 && ValAndOff(a).Off()+2 == ValAndOff(c).Off() && clobber(x)) { 9138 break 9139 } 9140 v.reset(OpAMD64MOVLstoreconstidx1) 9141 v.AuxInt = makeValAndOff(ValAndOff(a).Val()&0xffff|ValAndOff(c).Val()<<16, ValAndOff(a).Off()) 9142 v.Aux = s 9143 v.AddArg(p) 9144 v.AddArg(i) 9145 v.AddArg(mem) 9146 return true 9147 } 9148 return false 9149 } 9150 func rewriteValueAMD64_OpAMD64MOVWstoreconstidx2(v *Value, config *Config) bool { 9151 b := v.Block 9152 _ = b 9153 // match: (MOVWstoreconstidx2 [x] {sym} (ADDQconst [c] ptr) idx mem) 9154 // cond: 9155 // result: (MOVWstoreconstidx2 [ValAndOff(x).add(c)] {sym} ptr idx mem) 9156 for { 9157 x := v.AuxInt 9158 sym := v.Aux 9159 v_0 := v.Args[0] 9160 if v_0.Op != OpAMD64ADDQconst { 9161 break 9162 } 9163 c := v_0.AuxInt 9164 ptr := v_0.Args[0] 9165 idx := v.Args[1] 9166 mem := v.Args[2] 9167 v.reset(OpAMD64MOVWstoreconstidx2) 9168 v.AuxInt = ValAndOff(x).add(c) 9169 v.Aux = sym 9170 v.AddArg(ptr) 9171 v.AddArg(idx) 9172 v.AddArg(mem) 9173 return true 9174 } 9175 // match: (MOVWstoreconstidx2 [x] {sym} ptr (ADDQconst [c] idx) mem) 9176 // cond: 9177 // result: (MOVWstoreconstidx2 [ValAndOff(x).add(2*c)] {sym} ptr idx mem) 9178 for { 9179 x := v.AuxInt 9180 sym := v.Aux 9181 ptr := v.Args[0] 9182 v_1 := v.Args[1] 9183 if v_1.Op != OpAMD64ADDQconst { 9184 break 9185 } 9186 c := v_1.AuxInt 9187 idx := v_1.Args[0] 9188 mem := v.Args[2] 9189 v.reset(OpAMD64MOVWstoreconstidx2) 9190 v.AuxInt = ValAndOff(x).add(2 * c) 9191 v.Aux = sym 9192 v.AddArg(ptr) 9193 v.AddArg(idx) 9194 v.AddArg(mem) 9195 return true 9196 } 9197 // match: (MOVWstoreconstidx2 [c] {s} p i x:(MOVWstoreconstidx2 [a] {s} p i mem)) 9198 // cond: x.Uses == 1 && ValAndOff(a).Off() + 2 == ValAndOff(c).Off() && clobber(x) 9199 // result: (MOVLstoreconstidx1 [makeValAndOff(ValAndOff(a).Val()&0xffff | ValAndOff(c).Val()<<16, ValAndOff(a).Off())] {s} p (SHLQconst <i.Type> [1] i) mem) 9200 for { 9201 c := v.AuxInt 9202 s := v.Aux 9203 p := v.Args[0] 9204 i := v.Args[1] 9205 x := v.Args[2] 9206 if x.Op != OpAMD64MOVWstoreconstidx2 { 9207 break 9208 } 9209 a := x.AuxInt 9210 if x.Aux != s { 9211 break 9212 } 9213 if p != x.Args[0] { 9214 break 9215 } 9216 if i != x.Args[1] { 9217 break 9218 } 9219 mem := x.Args[2] 9220 if !(x.Uses == 1 && ValAndOff(a).Off()+2 == ValAndOff(c).Off() && clobber(x)) { 9221 break 9222 } 9223 v.reset(OpAMD64MOVLstoreconstidx1) 9224 v.AuxInt = makeValAndOff(ValAndOff(a).Val()&0xffff|ValAndOff(c).Val()<<16, ValAndOff(a).Off()) 9225 v.Aux = s 9226 v.AddArg(p) 9227 v0 := b.NewValue0(v.Line, OpAMD64SHLQconst, i.Type) 9228 v0.AuxInt = 1 9229 v0.AddArg(i) 9230 v.AddArg(v0) 9231 v.AddArg(mem) 9232 return true 9233 } 9234 return false 9235 } 9236 func rewriteValueAMD64_OpAMD64MOVWstoreidx1(v *Value, config *Config) bool { 9237 b := v.Block 9238 _ = b 9239 // match: (MOVWstoreidx1 [c] {sym} ptr (SHLQconst [1] idx) val mem) 9240 // cond: 9241 // result: (MOVWstoreidx2 [c] {sym} ptr idx val mem) 9242 for { 9243 c := v.AuxInt 9244 sym := v.Aux 9245 ptr := v.Args[0] 9246 v_1 := v.Args[1] 9247 if v_1.Op != OpAMD64SHLQconst { 9248 break 9249 } 9250 if v_1.AuxInt != 1 { 9251 break 9252 } 9253 idx := v_1.Args[0] 9254 val := v.Args[2] 9255 mem := v.Args[3] 9256 v.reset(OpAMD64MOVWstoreidx2) 9257 v.AuxInt = c 9258 v.Aux = sym 9259 v.AddArg(ptr) 9260 v.AddArg(idx) 9261 v.AddArg(val) 9262 v.AddArg(mem) 9263 return true 9264 } 9265 // match: (MOVWstoreidx1 [c] {sym} (ADDQconst [d] ptr) idx val mem) 9266 // cond: 9267 // result: (MOVWstoreidx1 [c+d] {sym} ptr idx val mem) 9268 for { 9269 c := v.AuxInt 9270 sym := v.Aux 9271 v_0 := v.Args[0] 9272 if v_0.Op != OpAMD64ADDQconst { 9273 break 9274 } 9275 d := v_0.AuxInt 9276 ptr := v_0.Args[0] 9277 idx := v.Args[1] 9278 val := v.Args[2] 9279 mem := v.Args[3] 9280 v.reset(OpAMD64MOVWstoreidx1) 9281 v.AuxInt = c + d 9282 v.Aux = sym 9283 v.AddArg(ptr) 9284 v.AddArg(idx) 9285 v.AddArg(val) 9286 v.AddArg(mem) 9287 return true 9288 } 9289 // match: (MOVWstoreidx1 [c] {sym} ptr (ADDQconst [d] idx) val mem) 9290 // cond: 9291 // result: (MOVWstoreidx1 [c+d] {sym} ptr idx val mem) 9292 for { 9293 c := v.AuxInt 9294 sym := v.Aux 9295 ptr := v.Args[0] 9296 v_1 := v.Args[1] 9297 if v_1.Op != OpAMD64ADDQconst { 9298 break 9299 } 9300 d := v_1.AuxInt 9301 idx := v_1.Args[0] 9302 val := v.Args[2] 9303 mem := v.Args[3] 9304 v.reset(OpAMD64MOVWstoreidx1) 9305 v.AuxInt = c + d 9306 v.Aux = sym 9307 v.AddArg(ptr) 9308 v.AddArg(idx) 9309 v.AddArg(val) 9310 v.AddArg(mem) 9311 return true 9312 } 9313 // match: (MOVWstoreidx1 [i] {s} p idx (SHRQconst [16] w) x:(MOVWstoreidx1 [i-2] {s} p idx w mem)) 9314 // cond: x.Uses == 1 && clobber(x) 9315 // result: (MOVLstoreidx1 [i-2] {s} p idx w mem) 9316 for { 9317 i := v.AuxInt 9318 s := v.Aux 9319 p := v.Args[0] 9320 idx := v.Args[1] 9321 v_2 := v.Args[2] 9322 if v_2.Op != OpAMD64SHRQconst { 9323 break 9324 } 9325 if v_2.AuxInt != 16 { 9326 break 9327 } 9328 w := v_2.Args[0] 9329 x := v.Args[3] 9330 if x.Op != OpAMD64MOVWstoreidx1 { 9331 break 9332 } 9333 if x.AuxInt != i-2 { 9334 break 9335 } 9336 if x.Aux != s { 9337 break 9338 } 9339 if p != x.Args[0] { 9340 break 9341 } 9342 if idx != x.Args[1] { 9343 break 9344 } 9345 if w != x.Args[2] { 9346 break 9347 } 9348 mem := x.Args[3] 9349 if !(x.Uses == 1 && clobber(x)) { 9350 break 9351 } 9352 v.reset(OpAMD64MOVLstoreidx1) 9353 v.AuxInt = i - 2 9354 v.Aux = s 9355 v.AddArg(p) 9356 v.AddArg(idx) 9357 v.AddArg(w) 9358 v.AddArg(mem) 9359 return true 9360 } 9361 // match: (MOVWstoreidx1 [i] {s} p idx (SHRQconst [j] w) x:(MOVWstoreidx1 [i-2] {s} p idx w0:(SHRQconst [j-16] w) mem)) 9362 // cond: x.Uses == 1 && clobber(x) 9363 // result: (MOVLstoreidx1 [i-2] {s} p idx w0 mem) 9364 for { 9365 i := v.AuxInt 9366 s := v.Aux 9367 p := v.Args[0] 9368 idx := v.Args[1] 9369 v_2 := v.Args[2] 9370 if v_2.Op != OpAMD64SHRQconst { 9371 break 9372 } 9373 j := v_2.AuxInt 9374 w := v_2.Args[0] 9375 x := v.Args[3] 9376 if x.Op != OpAMD64MOVWstoreidx1 { 9377 break 9378 } 9379 if x.AuxInt != i-2 { 9380 break 9381 } 9382 if x.Aux != s { 9383 break 9384 } 9385 if p != x.Args[0] { 9386 break 9387 } 9388 if idx != x.Args[1] { 9389 break 9390 } 9391 w0 := x.Args[2] 9392 if w0.Op != OpAMD64SHRQconst { 9393 break 9394 } 9395 if w0.AuxInt != j-16 { 9396 break 9397 } 9398 if w != w0.Args[0] { 9399 break 9400 } 9401 mem := x.Args[3] 9402 if !(x.Uses == 1 && clobber(x)) { 9403 break 9404 } 9405 v.reset(OpAMD64MOVLstoreidx1) 9406 v.AuxInt = i - 2 9407 v.Aux = s 9408 v.AddArg(p) 9409 v.AddArg(idx) 9410 v.AddArg(w0) 9411 v.AddArg(mem) 9412 return true 9413 } 9414 return false 9415 } 9416 func rewriteValueAMD64_OpAMD64MOVWstoreidx2(v *Value, config *Config) bool { 9417 b := v.Block 9418 _ = b 9419 // match: (MOVWstoreidx2 [c] {sym} (ADDQconst [d] ptr) idx val mem) 9420 // cond: 9421 // result: (MOVWstoreidx2 [c+d] {sym} ptr idx val mem) 9422 for { 9423 c := v.AuxInt 9424 sym := v.Aux 9425 v_0 := v.Args[0] 9426 if v_0.Op != OpAMD64ADDQconst { 9427 break 9428 } 9429 d := v_0.AuxInt 9430 ptr := v_0.Args[0] 9431 idx := v.Args[1] 9432 val := v.Args[2] 9433 mem := v.Args[3] 9434 v.reset(OpAMD64MOVWstoreidx2) 9435 v.AuxInt = c + d 9436 v.Aux = sym 9437 v.AddArg(ptr) 9438 v.AddArg(idx) 9439 v.AddArg(val) 9440 v.AddArg(mem) 9441 return true 9442 } 9443 // match: (MOVWstoreidx2 [c] {sym} ptr (ADDQconst [d] idx) val mem) 9444 // cond: 9445 // result: (MOVWstoreidx2 [c+2*d] {sym} ptr idx val mem) 9446 for { 9447 c := v.AuxInt 9448 sym := v.Aux 9449 ptr := v.Args[0] 9450 v_1 := v.Args[1] 9451 if v_1.Op != OpAMD64ADDQconst { 9452 break 9453 } 9454 d := v_1.AuxInt 9455 idx := v_1.Args[0] 9456 val := v.Args[2] 9457 mem := v.Args[3] 9458 v.reset(OpAMD64MOVWstoreidx2) 9459 v.AuxInt = c + 2*d 9460 v.Aux = sym 9461 v.AddArg(ptr) 9462 v.AddArg(idx) 9463 v.AddArg(val) 9464 v.AddArg(mem) 9465 return true 9466 } 9467 // match: (MOVWstoreidx2 [i] {s} p idx (SHRQconst [16] w) x:(MOVWstoreidx2 [i-2] {s} p idx w mem)) 9468 // cond: x.Uses == 1 && clobber(x) 9469 // result: (MOVLstoreidx1 [i-2] {s} p (SHLQconst <idx.Type> [1] idx) w mem) 9470 for { 9471 i := v.AuxInt 9472 s := v.Aux 9473 p := v.Args[0] 9474 idx := v.Args[1] 9475 v_2 := v.Args[2] 9476 if v_2.Op != OpAMD64SHRQconst { 9477 break 9478 } 9479 if v_2.AuxInt != 16 { 9480 break 9481 } 9482 w := v_2.Args[0] 9483 x := v.Args[3] 9484 if x.Op != OpAMD64MOVWstoreidx2 { 9485 break 9486 } 9487 if x.AuxInt != i-2 { 9488 break 9489 } 9490 if x.Aux != s { 9491 break 9492 } 9493 if p != x.Args[0] { 9494 break 9495 } 9496 if idx != x.Args[1] { 9497 break 9498 } 9499 if w != x.Args[2] { 9500 break 9501 } 9502 mem := x.Args[3] 9503 if !(x.Uses == 1 && clobber(x)) { 9504 break 9505 } 9506 v.reset(OpAMD64MOVLstoreidx1) 9507 v.AuxInt = i - 2 9508 v.Aux = s 9509 v.AddArg(p) 9510 v0 := b.NewValue0(v.Line, OpAMD64SHLQconst, idx.Type) 9511 v0.AuxInt = 1 9512 v0.AddArg(idx) 9513 v.AddArg(v0) 9514 v.AddArg(w) 9515 v.AddArg(mem) 9516 return true 9517 } 9518 // match: (MOVWstoreidx2 [i] {s} p idx (SHRQconst [j] w) x:(MOVWstoreidx2 [i-2] {s} p idx w0:(SHRQconst [j-16] w) mem)) 9519 // cond: x.Uses == 1 && clobber(x) 9520 // result: (MOVLstoreidx1 [i-2] {s} p (SHLQconst <idx.Type> [1] idx) w0 mem) 9521 for { 9522 i := v.AuxInt 9523 s := v.Aux 9524 p := v.Args[0] 9525 idx := v.Args[1] 9526 v_2 := v.Args[2] 9527 if v_2.Op != OpAMD64SHRQconst { 9528 break 9529 } 9530 j := v_2.AuxInt 9531 w := v_2.Args[0] 9532 x := v.Args[3] 9533 if x.Op != OpAMD64MOVWstoreidx2 { 9534 break 9535 } 9536 if x.AuxInt != i-2 { 9537 break 9538 } 9539 if x.Aux != s { 9540 break 9541 } 9542 if p != x.Args[0] { 9543 break 9544 } 9545 if idx != x.Args[1] { 9546 break 9547 } 9548 w0 := x.Args[2] 9549 if w0.Op != OpAMD64SHRQconst { 9550 break 9551 } 9552 if w0.AuxInt != j-16 { 9553 break 9554 } 9555 if w != w0.Args[0] { 9556 break 9557 } 9558 mem := x.Args[3] 9559 if !(x.Uses == 1 && clobber(x)) { 9560 break 9561 } 9562 v.reset(OpAMD64MOVLstoreidx1) 9563 v.AuxInt = i - 2 9564 v.Aux = s 9565 v.AddArg(p) 9566 v0 := b.NewValue0(v.Line, OpAMD64SHLQconst, idx.Type) 9567 v0.AuxInt = 1 9568 v0.AddArg(idx) 9569 v.AddArg(v0) 9570 v.AddArg(w0) 9571 v.AddArg(mem) 9572 return true 9573 } 9574 return false 9575 } 9576 func rewriteValueAMD64_OpAMD64MULL(v *Value, config *Config) bool { 9577 b := v.Block 9578 _ = b 9579 // match: (MULL x (MOVLconst [c])) 9580 // cond: 9581 // result: (MULLconst [c] x) 9582 for { 9583 x := v.Args[0] 9584 v_1 := v.Args[1] 9585 if v_1.Op != OpAMD64MOVLconst { 9586 break 9587 } 9588 c := v_1.AuxInt 9589 v.reset(OpAMD64MULLconst) 9590 v.AuxInt = c 9591 v.AddArg(x) 9592 return true 9593 } 9594 // match: (MULL (MOVLconst [c]) x) 9595 // cond: 9596 // result: (MULLconst [c] x) 9597 for { 9598 v_0 := v.Args[0] 9599 if v_0.Op != OpAMD64MOVLconst { 9600 break 9601 } 9602 c := v_0.AuxInt 9603 x := v.Args[1] 9604 v.reset(OpAMD64MULLconst) 9605 v.AuxInt = c 9606 v.AddArg(x) 9607 return true 9608 } 9609 return false 9610 } 9611 func rewriteValueAMD64_OpAMD64MULLconst(v *Value, config *Config) bool { 9612 b := v.Block 9613 _ = b 9614 // match: (MULLconst [c] (MULLconst [d] x)) 9615 // cond: 9616 // result: (MULLconst [int64(int32(c * d))] x) 9617 for { 9618 c := v.AuxInt 9619 v_0 := v.Args[0] 9620 if v_0.Op != OpAMD64MULLconst { 9621 break 9622 } 9623 d := v_0.AuxInt 9624 x := v_0.Args[0] 9625 v.reset(OpAMD64MULLconst) 9626 v.AuxInt = int64(int32(c * d)) 9627 v.AddArg(x) 9628 return true 9629 } 9630 // match: (MULLconst [c] (MOVLconst [d])) 9631 // cond: 9632 // result: (MOVLconst [int64(int32(c*d))]) 9633 for { 9634 c := v.AuxInt 9635 v_0 := v.Args[0] 9636 if v_0.Op != OpAMD64MOVLconst { 9637 break 9638 } 9639 d := v_0.AuxInt 9640 v.reset(OpAMD64MOVLconst) 9641 v.AuxInt = int64(int32(c * d)) 9642 return true 9643 } 9644 return false 9645 } 9646 func rewriteValueAMD64_OpAMD64MULQ(v *Value, config *Config) bool { 9647 b := v.Block 9648 _ = b 9649 // match: (MULQ x (MOVQconst [c])) 9650 // cond: is32Bit(c) 9651 // result: (MULQconst [c] x) 9652 for { 9653 x := v.Args[0] 9654 v_1 := v.Args[1] 9655 if v_1.Op != OpAMD64MOVQconst { 9656 break 9657 } 9658 c := v_1.AuxInt 9659 if !(is32Bit(c)) { 9660 break 9661 } 9662 v.reset(OpAMD64MULQconst) 9663 v.AuxInt = c 9664 v.AddArg(x) 9665 return true 9666 } 9667 // match: (MULQ (MOVQconst [c]) x) 9668 // cond: is32Bit(c) 9669 // result: (MULQconst [c] x) 9670 for { 9671 v_0 := v.Args[0] 9672 if v_0.Op != OpAMD64MOVQconst { 9673 break 9674 } 9675 c := v_0.AuxInt 9676 x := v.Args[1] 9677 if !(is32Bit(c)) { 9678 break 9679 } 9680 v.reset(OpAMD64MULQconst) 9681 v.AuxInt = c 9682 v.AddArg(x) 9683 return true 9684 } 9685 return false 9686 } 9687 func rewriteValueAMD64_OpAMD64MULQconst(v *Value, config *Config) bool { 9688 b := v.Block 9689 _ = b 9690 // match: (MULQconst [c] (MULQconst [d] x)) 9691 // cond: is32Bit(c*d) 9692 // result: (MULQconst [c * d] x) 9693 for { 9694 c := v.AuxInt 9695 v_0 := v.Args[0] 9696 if v_0.Op != OpAMD64MULQconst { 9697 break 9698 } 9699 d := v_0.AuxInt 9700 x := v_0.Args[0] 9701 if !(is32Bit(c * d)) { 9702 break 9703 } 9704 v.reset(OpAMD64MULQconst) 9705 v.AuxInt = c * d 9706 v.AddArg(x) 9707 return true 9708 } 9709 // match: (MULQconst [-1] x) 9710 // cond: 9711 // result: (NEGQ x) 9712 for { 9713 if v.AuxInt != -1 { 9714 break 9715 } 9716 x := v.Args[0] 9717 v.reset(OpAMD64NEGQ) 9718 v.AddArg(x) 9719 return true 9720 } 9721 // match: (MULQconst [0] _) 9722 // cond: 9723 // result: (MOVQconst [0]) 9724 for { 9725 if v.AuxInt != 0 { 9726 break 9727 } 9728 v.reset(OpAMD64MOVQconst) 9729 v.AuxInt = 0 9730 return true 9731 } 9732 // match: (MULQconst [1] x) 9733 // cond: 9734 // result: x 9735 for { 9736 if v.AuxInt != 1 { 9737 break 9738 } 9739 x := v.Args[0] 9740 v.reset(OpCopy) 9741 v.Type = x.Type 9742 v.AddArg(x) 9743 return true 9744 } 9745 // match: (MULQconst [3] x) 9746 // cond: 9747 // result: (LEAQ2 x x) 9748 for { 9749 if v.AuxInt != 3 { 9750 break 9751 } 9752 x := v.Args[0] 9753 v.reset(OpAMD64LEAQ2) 9754 v.AddArg(x) 9755 v.AddArg(x) 9756 return true 9757 } 9758 // match: (MULQconst [5] x) 9759 // cond: 9760 // result: (LEAQ4 x x) 9761 for { 9762 if v.AuxInt != 5 { 9763 break 9764 } 9765 x := v.Args[0] 9766 v.reset(OpAMD64LEAQ4) 9767 v.AddArg(x) 9768 v.AddArg(x) 9769 return true 9770 } 9771 // match: (MULQconst [7] x) 9772 // cond: 9773 // result: (LEAQ8 (NEGQ <v.Type> x) x) 9774 for { 9775 if v.AuxInt != 7 { 9776 break 9777 } 9778 x := v.Args[0] 9779 v.reset(OpAMD64LEAQ8) 9780 v0 := b.NewValue0(v.Line, OpAMD64NEGQ, v.Type) 9781 v0.AddArg(x) 9782 v.AddArg(v0) 9783 v.AddArg(x) 9784 return true 9785 } 9786 // match: (MULQconst [9] x) 9787 // cond: 9788 // result: (LEAQ8 x x) 9789 for { 9790 if v.AuxInt != 9 { 9791 break 9792 } 9793 x := v.Args[0] 9794 v.reset(OpAMD64LEAQ8) 9795 v.AddArg(x) 9796 v.AddArg(x) 9797 return true 9798 } 9799 // match: (MULQconst [11] x) 9800 // cond: 9801 // result: (LEAQ2 x (LEAQ4 <v.Type> x x)) 9802 for { 9803 if v.AuxInt != 11 { 9804 break 9805 } 9806 x := v.Args[0] 9807 v.reset(OpAMD64LEAQ2) 9808 v.AddArg(x) 9809 v0 := b.NewValue0(v.Line, OpAMD64LEAQ4, v.Type) 9810 v0.AddArg(x) 9811 v0.AddArg(x) 9812 v.AddArg(v0) 9813 return true 9814 } 9815 // match: (MULQconst [13] x) 9816 // cond: 9817 // result: (LEAQ4 x (LEAQ2 <v.Type> x x)) 9818 for { 9819 if v.AuxInt != 13 { 9820 break 9821 } 9822 x := v.Args[0] 9823 v.reset(OpAMD64LEAQ4) 9824 v.AddArg(x) 9825 v0 := b.NewValue0(v.Line, OpAMD64LEAQ2, v.Type) 9826 v0.AddArg(x) 9827 v0.AddArg(x) 9828 v.AddArg(v0) 9829 return true 9830 } 9831 // match: (MULQconst [21] x) 9832 // cond: 9833 // result: (LEAQ4 x (LEAQ4 <v.Type> x x)) 9834 for { 9835 if v.AuxInt != 21 { 9836 break 9837 } 9838 x := v.Args[0] 9839 v.reset(OpAMD64LEAQ4) 9840 v.AddArg(x) 9841 v0 := b.NewValue0(v.Line, OpAMD64LEAQ4, v.Type) 9842 v0.AddArg(x) 9843 v0.AddArg(x) 9844 v.AddArg(v0) 9845 return true 9846 } 9847 // match: (MULQconst [25] x) 9848 // cond: 9849 // result: (LEAQ8 x (LEAQ2 <v.Type> x x)) 9850 for { 9851 if v.AuxInt != 25 { 9852 break 9853 } 9854 x := v.Args[0] 9855 v.reset(OpAMD64LEAQ8) 9856 v.AddArg(x) 9857 v0 := b.NewValue0(v.Line, OpAMD64LEAQ2, v.Type) 9858 v0.AddArg(x) 9859 v0.AddArg(x) 9860 v.AddArg(v0) 9861 return true 9862 } 9863 // match: (MULQconst [37] x) 9864 // cond: 9865 // result: (LEAQ4 x (LEAQ8 <v.Type> x x)) 9866 for { 9867 if v.AuxInt != 37 { 9868 break 9869 } 9870 x := v.Args[0] 9871 v.reset(OpAMD64LEAQ4) 9872 v.AddArg(x) 9873 v0 := b.NewValue0(v.Line, OpAMD64LEAQ8, v.Type) 9874 v0.AddArg(x) 9875 v0.AddArg(x) 9876 v.AddArg(v0) 9877 return true 9878 } 9879 // match: (MULQconst [41] x) 9880 // cond: 9881 // result: (LEAQ8 x (LEAQ4 <v.Type> x x)) 9882 for { 9883 if v.AuxInt != 41 { 9884 break 9885 } 9886 x := v.Args[0] 9887 v.reset(OpAMD64LEAQ8) 9888 v.AddArg(x) 9889 v0 := b.NewValue0(v.Line, OpAMD64LEAQ4, v.Type) 9890 v0.AddArg(x) 9891 v0.AddArg(x) 9892 v.AddArg(v0) 9893 return true 9894 } 9895 // match: (MULQconst [73] x) 9896 // cond: 9897 // result: (LEAQ8 x (LEAQ8 <v.Type> x x)) 9898 for { 9899 if v.AuxInt != 73 { 9900 break 9901 } 9902 x := v.Args[0] 9903 v.reset(OpAMD64LEAQ8) 9904 v.AddArg(x) 9905 v0 := b.NewValue0(v.Line, OpAMD64LEAQ8, v.Type) 9906 v0.AddArg(x) 9907 v0.AddArg(x) 9908 v.AddArg(v0) 9909 return true 9910 } 9911 // match: (MULQconst [c] x) 9912 // cond: isPowerOfTwo(c) 9913 // result: (SHLQconst [log2(c)] x) 9914 for { 9915 c := v.AuxInt 9916 x := v.Args[0] 9917 if !(isPowerOfTwo(c)) { 9918 break 9919 } 9920 v.reset(OpAMD64SHLQconst) 9921 v.AuxInt = log2(c) 9922 v.AddArg(x) 9923 return true 9924 } 9925 // match: (MULQconst [c] x) 9926 // cond: isPowerOfTwo(c+1) && c >= 15 9927 // result: (SUBQ (SHLQconst <v.Type> [log2(c+1)] x) x) 9928 for { 9929 c := v.AuxInt 9930 x := v.Args[0] 9931 if !(isPowerOfTwo(c+1) && c >= 15) { 9932 break 9933 } 9934 v.reset(OpAMD64SUBQ) 9935 v0 := b.NewValue0(v.Line, OpAMD64SHLQconst, v.Type) 9936 v0.AuxInt = log2(c + 1) 9937 v0.AddArg(x) 9938 v.AddArg(v0) 9939 v.AddArg(x) 9940 return true 9941 } 9942 // match: (MULQconst [c] x) 9943 // cond: isPowerOfTwo(c-1) && c >= 17 9944 // result: (LEAQ1 (SHLQconst <v.Type> [log2(c-1)] x) x) 9945 for { 9946 c := v.AuxInt 9947 x := v.Args[0] 9948 if !(isPowerOfTwo(c-1) && c >= 17) { 9949 break 9950 } 9951 v.reset(OpAMD64LEAQ1) 9952 v0 := b.NewValue0(v.Line, OpAMD64SHLQconst, v.Type) 9953 v0.AuxInt = log2(c - 1) 9954 v0.AddArg(x) 9955 v.AddArg(v0) 9956 v.AddArg(x) 9957 return true 9958 } 9959 // match: (MULQconst [c] x) 9960 // cond: isPowerOfTwo(c-2) && c >= 34 9961 // result: (LEAQ2 (SHLQconst <v.Type> [log2(c-2)] x) x) 9962 for { 9963 c := v.AuxInt 9964 x := v.Args[0] 9965 if !(isPowerOfTwo(c-2) && c >= 34) { 9966 break 9967 } 9968 v.reset(OpAMD64LEAQ2) 9969 v0 := b.NewValue0(v.Line, OpAMD64SHLQconst, v.Type) 9970 v0.AuxInt = log2(c - 2) 9971 v0.AddArg(x) 9972 v.AddArg(v0) 9973 v.AddArg(x) 9974 return true 9975 } 9976 // match: (MULQconst [c] x) 9977 // cond: isPowerOfTwo(c-4) && c >= 68 9978 // result: (LEAQ4 (SHLQconst <v.Type> [log2(c-4)] x) x) 9979 for { 9980 c := v.AuxInt 9981 x := v.Args[0] 9982 if !(isPowerOfTwo(c-4) && c >= 68) { 9983 break 9984 } 9985 v.reset(OpAMD64LEAQ4) 9986 v0 := b.NewValue0(v.Line, OpAMD64SHLQconst, v.Type) 9987 v0.AuxInt = log2(c - 4) 9988 v0.AddArg(x) 9989 v.AddArg(v0) 9990 v.AddArg(x) 9991 return true 9992 } 9993 // match: (MULQconst [c] x) 9994 // cond: isPowerOfTwo(c-8) && c >= 136 9995 // result: (LEAQ8 (SHLQconst <v.Type> [log2(c-8)] x) x) 9996 for { 9997 c := v.AuxInt 9998 x := v.Args[0] 9999 if !(isPowerOfTwo(c-8) && c >= 136) { 10000 break 10001 } 10002 v.reset(OpAMD64LEAQ8) 10003 v0 := b.NewValue0(v.Line, OpAMD64SHLQconst, v.Type) 10004 v0.AuxInt = log2(c - 8) 10005 v0.AddArg(x) 10006 v.AddArg(v0) 10007 v.AddArg(x) 10008 return true 10009 } 10010 // match: (MULQconst [c] x) 10011 // cond: c%3 == 0 && isPowerOfTwo(c/3) 10012 // result: (SHLQconst [log2(c/3)] (LEAQ2 <v.Type> x x)) 10013 for { 10014 c := v.AuxInt 10015 x := v.Args[0] 10016 if !(c%3 == 0 && isPowerOfTwo(c/3)) { 10017 break 10018 } 10019 v.reset(OpAMD64SHLQconst) 10020 v.AuxInt = log2(c / 3) 10021 v0 := b.NewValue0(v.Line, OpAMD64LEAQ2, v.Type) 10022 v0.AddArg(x) 10023 v0.AddArg(x) 10024 v.AddArg(v0) 10025 return true 10026 } 10027 // match: (MULQconst [c] x) 10028 // cond: c%5 == 0 && isPowerOfTwo(c/5) 10029 // result: (SHLQconst [log2(c/5)] (LEAQ4 <v.Type> x x)) 10030 for { 10031 c := v.AuxInt 10032 x := v.Args[0] 10033 if !(c%5 == 0 && isPowerOfTwo(c/5)) { 10034 break 10035 } 10036 v.reset(OpAMD64SHLQconst) 10037 v.AuxInt = log2(c / 5) 10038 v0 := b.NewValue0(v.Line, OpAMD64LEAQ4, v.Type) 10039 v0.AddArg(x) 10040 v0.AddArg(x) 10041 v.AddArg(v0) 10042 return true 10043 } 10044 // match: (MULQconst [c] x) 10045 // cond: c%9 == 0 && isPowerOfTwo(c/9) 10046 // result: (SHLQconst [log2(c/9)] (LEAQ8 <v.Type> x x)) 10047 for { 10048 c := v.AuxInt 10049 x := v.Args[0] 10050 if !(c%9 == 0 && isPowerOfTwo(c/9)) { 10051 break 10052 } 10053 v.reset(OpAMD64SHLQconst) 10054 v.AuxInt = log2(c / 9) 10055 v0 := b.NewValue0(v.Line, OpAMD64LEAQ8, v.Type) 10056 v0.AddArg(x) 10057 v0.AddArg(x) 10058 v.AddArg(v0) 10059 return true 10060 } 10061 // match: (MULQconst [c] (MOVQconst [d])) 10062 // cond: 10063 // result: (MOVQconst [c*d]) 10064 for { 10065 c := v.AuxInt 10066 v_0 := v.Args[0] 10067 if v_0.Op != OpAMD64MOVQconst { 10068 break 10069 } 10070 d := v_0.AuxInt 10071 v.reset(OpAMD64MOVQconst) 10072 v.AuxInt = c * d 10073 return true 10074 } 10075 return false 10076 } 10077 func rewriteValueAMD64_OpAMD64NEGL(v *Value, config *Config) bool { 10078 b := v.Block 10079 _ = b 10080 // match: (NEGL (MOVLconst [c])) 10081 // cond: 10082 // result: (MOVLconst [int64(int32(-c))]) 10083 for { 10084 v_0 := v.Args[0] 10085 if v_0.Op != OpAMD64MOVLconst { 10086 break 10087 } 10088 c := v_0.AuxInt 10089 v.reset(OpAMD64MOVLconst) 10090 v.AuxInt = int64(int32(-c)) 10091 return true 10092 } 10093 return false 10094 } 10095 func rewriteValueAMD64_OpAMD64NEGQ(v *Value, config *Config) bool { 10096 b := v.Block 10097 _ = b 10098 // match: (NEGQ (MOVQconst [c])) 10099 // cond: 10100 // result: (MOVQconst [-c]) 10101 for { 10102 v_0 := v.Args[0] 10103 if v_0.Op != OpAMD64MOVQconst { 10104 break 10105 } 10106 c := v_0.AuxInt 10107 v.reset(OpAMD64MOVQconst) 10108 v.AuxInt = -c 10109 return true 10110 } 10111 return false 10112 } 10113 func rewriteValueAMD64_OpAMD64NOTL(v *Value, config *Config) bool { 10114 b := v.Block 10115 _ = b 10116 // match: (NOTL (MOVLconst [c])) 10117 // cond: 10118 // result: (MOVLconst [^c]) 10119 for { 10120 v_0 := v.Args[0] 10121 if v_0.Op != OpAMD64MOVLconst { 10122 break 10123 } 10124 c := v_0.AuxInt 10125 v.reset(OpAMD64MOVLconst) 10126 v.AuxInt = ^c 10127 return true 10128 } 10129 return false 10130 } 10131 func rewriteValueAMD64_OpAMD64NOTQ(v *Value, config *Config) bool { 10132 b := v.Block 10133 _ = b 10134 // match: (NOTQ (MOVQconst [c])) 10135 // cond: 10136 // result: (MOVQconst [^c]) 10137 for { 10138 v_0 := v.Args[0] 10139 if v_0.Op != OpAMD64MOVQconst { 10140 break 10141 } 10142 c := v_0.AuxInt 10143 v.reset(OpAMD64MOVQconst) 10144 v.AuxInt = ^c 10145 return true 10146 } 10147 return false 10148 } 10149 func rewriteValueAMD64_OpAMD64ORL(v *Value, config *Config) bool { 10150 b := v.Block 10151 _ = b 10152 // match: (ORL x (MOVLconst [c])) 10153 // cond: 10154 // result: (ORLconst [c] x) 10155 for { 10156 x := v.Args[0] 10157 v_1 := v.Args[1] 10158 if v_1.Op != OpAMD64MOVLconst { 10159 break 10160 } 10161 c := v_1.AuxInt 10162 v.reset(OpAMD64ORLconst) 10163 v.AuxInt = c 10164 v.AddArg(x) 10165 return true 10166 } 10167 // match: (ORL (MOVLconst [c]) x) 10168 // cond: 10169 // result: (ORLconst [c] x) 10170 for { 10171 v_0 := v.Args[0] 10172 if v_0.Op != OpAMD64MOVLconst { 10173 break 10174 } 10175 c := v_0.AuxInt 10176 x := v.Args[1] 10177 v.reset(OpAMD64ORLconst) 10178 v.AuxInt = c 10179 v.AddArg(x) 10180 return true 10181 } 10182 // match: (ORL x x) 10183 // cond: 10184 // result: x 10185 for { 10186 x := v.Args[0] 10187 if x != v.Args[1] { 10188 break 10189 } 10190 v.reset(OpCopy) 10191 v.Type = x.Type 10192 v.AddArg(x) 10193 return true 10194 } 10195 // match: (ORL x0:(MOVBload [i] {s} p mem) s0:(SHLLconst [8] x1:(MOVBload [i+1] {s} p mem))) 10196 // cond: x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) 10197 // result: @mergePoint(b,x0,x1) (MOVWload [i] {s} p mem) 10198 for { 10199 x0 := v.Args[0] 10200 if x0.Op != OpAMD64MOVBload { 10201 break 10202 } 10203 i := x0.AuxInt 10204 s := x0.Aux 10205 p := x0.Args[0] 10206 mem := x0.Args[1] 10207 s0 := v.Args[1] 10208 if s0.Op != OpAMD64SHLLconst { 10209 break 10210 } 10211 if s0.AuxInt != 8 { 10212 break 10213 } 10214 x1 := s0.Args[0] 10215 if x1.Op != OpAMD64MOVBload { 10216 break 10217 } 10218 if x1.AuxInt != i+1 { 10219 break 10220 } 10221 if x1.Aux != s { 10222 break 10223 } 10224 if p != x1.Args[0] { 10225 break 10226 } 10227 if mem != x1.Args[1] { 10228 break 10229 } 10230 if !(x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0)) { 10231 break 10232 } 10233 b = mergePoint(b, x0, x1) 10234 v0 := b.NewValue0(v.Line, OpAMD64MOVWload, config.fe.TypeUInt16()) 10235 v.reset(OpCopy) 10236 v.AddArg(v0) 10237 v0.AuxInt = i 10238 v0.Aux = s 10239 v0.AddArg(p) 10240 v0.AddArg(mem) 10241 return true 10242 } 10243 // match: (ORL o0:(ORL o1:(ORL x0:(MOVBload [i] {s} p mem) s0:(SHLLconst [8] x1:(MOVBload [i+1] {s} p mem))) s1:(SHLLconst [16] x2:(MOVBload [i+2] {s} p mem))) s2:(SHLLconst [24] x3:(MOVBload [i+3] {s} p mem))) 10244 // cond: x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && s2.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && mergePoint(b,x0,x1,x2,x3) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(s0) && clobber(s1) && clobber(s2) && clobber(o0) && clobber(o1) 10245 // result: @mergePoint(b,x0,x1,x2,x3) (MOVLload [i] {s} p mem) 10246 for { 10247 o0 := v.Args[0] 10248 if o0.Op != OpAMD64ORL { 10249 break 10250 } 10251 o1 := o0.Args[0] 10252 if o1.Op != OpAMD64ORL { 10253 break 10254 } 10255 x0 := o1.Args[0] 10256 if x0.Op != OpAMD64MOVBload { 10257 break 10258 } 10259 i := x0.AuxInt 10260 s := x0.Aux 10261 p := x0.Args[0] 10262 mem := x0.Args[1] 10263 s0 := o1.Args[1] 10264 if s0.Op != OpAMD64SHLLconst { 10265 break 10266 } 10267 if s0.AuxInt != 8 { 10268 break 10269 } 10270 x1 := s0.Args[0] 10271 if x1.Op != OpAMD64MOVBload { 10272 break 10273 } 10274 if x1.AuxInt != i+1 { 10275 break 10276 } 10277 if x1.Aux != s { 10278 break 10279 } 10280 if p != x1.Args[0] { 10281 break 10282 } 10283 if mem != x1.Args[1] { 10284 break 10285 } 10286 s1 := o0.Args[1] 10287 if s1.Op != OpAMD64SHLLconst { 10288 break 10289 } 10290 if s1.AuxInt != 16 { 10291 break 10292 } 10293 x2 := s1.Args[0] 10294 if x2.Op != OpAMD64MOVBload { 10295 break 10296 } 10297 if x2.AuxInt != i+2 { 10298 break 10299 } 10300 if x2.Aux != s { 10301 break 10302 } 10303 if p != x2.Args[0] { 10304 break 10305 } 10306 if mem != x2.Args[1] { 10307 break 10308 } 10309 s2 := v.Args[1] 10310 if s2.Op != OpAMD64SHLLconst { 10311 break 10312 } 10313 if s2.AuxInt != 24 { 10314 break 10315 } 10316 x3 := s2.Args[0] 10317 if x3.Op != OpAMD64MOVBload { 10318 break 10319 } 10320 if x3.AuxInt != i+3 { 10321 break 10322 } 10323 if x3.Aux != s { 10324 break 10325 } 10326 if p != x3.Args[0] { 10327 break 10328 } 10329 if mem != x3.Args[1] { 10330 break 10331 } 10332 if !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && s2.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && mergePoint(b, x0, x1, x2, x3) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(s0) && clobber(s1) && clobber(s2) && clobber(o0) && clobber(o1)) { 10333 break 10334 } 10335 b = mergePoint(b, x0, x1, x2, x3) 10336 v0 := b.NewValue0(v.Line, OpAMD64MOVLload, config.fe.TypeUInt32()) 10337 v.reset(OpCopy) 10338 v.AddArg(v0) 10339 v0.AuxInt = i 10340 v0.Aux = s 10341 v0.AddArg(p) 10342 v0.AddArg(mem) 10343 return true 10344 } 10345 // match: (ORL x0:(MOVBloadidx1 [i] {s} p idx mem) s0:(SHLLconst [8] x1:(MOVBloadidx1 [i+1] {s} p idx mem))) 10346 // cond: x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) 10347 // result: @mergePoint(b,x0,x1) (MOVWloadidx1 <v.Type> [i] {s} p idx mem) 10348 for { 10349 x0 := v.Args[0] 10350 if x0.Op != OpAMD64MOVBloadidx1 { 10351 break 10352 } 10353 i := x0.AuxInt 10354 s := x0.Aux 10355 p := x0.Args[0] 10356 idx := x0.Args[1] 10357 mem := x0.Args[2] 10358 s0 := v.Args[1] 10359 if s0.Op != OpAMD64SHLLconst { 10360 break 10361 } 10362 if s0.AuxInt != 8 { 10363 break 10364 } 10365 x1 := s0.Args[0] 10366 if x1.Op != OpAMD64MOVBloadidx1 { 10367 break 10368 } 10369 if x1.AuxInt != i+1 { 10370 break 10371 } 10372 if x1.Aux != s { 10373 break 10374 } 10375 if p != x1.Args[0] { 10376 break 10377 } 10378 if idx != x1.Args[1] { 10379 break 10380 } 10381 if mem != x1.Args[2] { 10382 break 10383 } 10384 if !(x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0)) { 10385 break 10386 } 10387 b = mergePoint(b, x0, x1) 10388 v0 := b.NewValue0(v.Line, OpAMD64MOVWloadidx1, v.Type) 10389 v.reset(OpCopy) 10390 v.AddArg(v0) 10391 v0.AuxInt = i 10392 v0.Aux = s 10393 v0.AddArg(p) 10394 v0.AddArg(idx) 10395 v0.AddArg(mem) 10396 return true 10397 } 10398 // match: (ORL o0:(ORL o1:(ORL x0:(MOVBloadidx1 [i] {s} p idx mem) s0:(SHLLconst [8] x1:(MOVBloadidx1 [i+1] {s} p idx mem))) s1:(SHLLconst [16] x2:(MOVBloadidx1 [i+2] {s} p idx mem))) s2:(SHLLconst [24] x3:(MOVBloadidx1 [i+3] {s} p idx mem))) 10399 // cond: x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && s2.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && mergePoint(b,x0,x1,x2,x3) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(s0) && clobber(s1) && clobber(s2) && clobber(o0) && clobber(o1) 10400 // result: @mergePoint(b,x0,x1,x2,x3) (MOVLloadidx1 <v.Type> [i] {s} p idx mem) 10401 for { 10402 o0 := v.Args[0] 10403 if o0.Op != OpAMD64ORL { 10404 break 10405 } 10406 o1 := o0.Args[0] 10407 if o1.Op != OpAMD64ORL { 10408 break 10409 } 10410 x0 := o1.Args[0] 10411 if x0.Op != OpAMD64MOVBloadidx1 { 10412 break 10413 } 10414 i := x0.AuxInt 10415 s := x0.Aux 10416 p := x0.Args[0] 10417 idx := x0.Args[1] 10418 mem := x0.Args[2] 10419 s0 := o1.Args[1] 10420 if s0.Op != OpAMD64SHLLconst { 10421 break 10422 } 10423 if s0.AuxInt != 8 { 10424 break 10425 } 10426 x1 := s0.Args[0] 10427 if x1.Op != OpAMD64MOVBloadidx1 { 10428 break 10429 } 10430 if x1.AuxInt != i+1 { 10431 break 10432 } 10433 if x1.Aux != s { 10434 break 10435 } 10436 if p != x1.Args[0] { 10437 break 10438 } 10439 if idx != x1.Args[1] { 10440 break 10441 } 10442 if mem != x1.Args[2] { 10443 break 10444 } 10445 s1 := o0.Args[1] 10446 if s1.Op != OpAMD64SHLLconst { 10447 break 10448 } 10449 if s1.AuxInt != 16 { 10450 break 10451 } 10452 x2 := s1.Args[0] 10453 if x2.Op != OpAMD64MOVBloadidx1 { 10454 break 10455 } 10456 if x2.AuxInt != i+2 { 10457 break 10458 } 10459 if x2.Aux != s { 10460 break 10461 } 10462 if p != x2.Args[0] { 10463 break 10464 } 10465 if idx != x2.Args[1] { 10466 break 10467 } 10468 if mem != x2.Args[2] { 10469 break 10470 } 10471 s2 := v.Args[1] 10472 if s2.Op != OpAMD64SHLLconst { 10473 break 10474 } 10475 if s2.AuxInt != 24 { 10476 break 10477 } 10478 x3 := s2.Args[0] 10479 if x3.Op != OpAMD64MOVBloadidx1 { 10480 break 10481 } 10482 if x3.AuxInt != i+3 { 10483 break 10484 } 10485 if x3.Aux != s { 10486 break 10487 } 10488 if p != x3.Args[0] { 10489 break 10490 } 10491 if idx != x3.Args[1] { 10492 break 10493 } 10494 if mem != x3.Args[2] { 10495 break 10496 } 10497 if !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && s2.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && mergePoint(b, x0, x1, x2, x3) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(s0) && clobber(s1) && clobber(s2) && clobber(o0) && clobber(o1)) { 10498 break 10499 } 10500 b = mergePoint(b, x0, x1, x2, x3) 10501 v0 := b.NewValue0(v.Line, OpAMD64MOVLloadidx1, v.Type) 10502 v.reset(OpCopy) 10503 v.AddArg(v0) 10504 v0.AuxInt = i 10505 v0.Aux = s 10506 v0.AddArg(p) 10507 v0.AddArg(idx) 10508 v0.AddArg(mem) 10509 return true 10510 } 10511 return false 10512 } 10513 func rewriteValueAMD64_OpAMD64ORLconst(v *Value, config *Config) bool { 10514 b := v.Block 10515 _ = b 10516 // match: (ORLconst [c] x) 10517 // cond: int32(c)==0 10518 // result: x 10519 for { 10520 c := v.AuxInt 10521 x := v.Args[0] 10522 if !(int32(c) == 0) { 10523 break 10524 } 10525 v.reset(OpCopy) 10526 v.Type = x.Type 10527 v.AddArg(x) 10528 return true 10529 } 10530 // match: (ORLconst [c] _) 10531 // cond: int32(c)==-1 10532 // result: (MOVLconst [-1]) 10533 for { 10534 c := v.AuxInt 10535 if !(int32(c) == -1) { 10536 break 10537 } 10538 v.reset(OpAMD64MOVLconst) 10539 v.AuxInt = -1 10540 return true 10541 } 10542 // match: (ORLconst [c] (MOVLconst [d])) 10543 // cond: 10544 // result: (MOVLconst [c|d]) 10545 for { 10546 c := v.AuxInt 10547 v_0 := v.Args[0] 10548 if v_0.Op != OpAMD64MOVLconst { 10549 break 10550 } 10551 d := v_0.AuxInt 10552 v.reset(OpAMD64MOVLconst) 10553 v.AuxInt = c | d 10554 return true 10555 } 10556 return false 10557 } 10558 func rewriteValueAMD64_OpAMD64ORQ(v *Value, config *Config) bool { 10559 b := v.Block 10560 _ = b 10561 // match: (ORQ x (MOVQconst [c])) 10562 // cond: is32Bit(c) 10563 // result: (ORQconst [c] x) 10564 for { 10565 x := v.Args[0] 10566 v_1 := v.Args[1] 10567 if v_1.Op != OpAMD64MOVQconst { 10568 break 10569 } 10570 c := v_1.AuxInt 10571 if !(is32Bit(c)) { 10572 break 10573 } 10574 v.reset(OpAMD64ORQconst) 10575 v.AuxInt = c 10576 v.AddArg(x) 10577 return true 10578 } 10579 // match: (ORQ (MOVQconst [c]) x) 10580 // cond: is32Bit(c) 10581 // result: (ORQconst [c] x) 10582 for { 10583 v_0 := v.Args[0] 10584 if v_0.Op != OpAMD64MOVQconst { 10585 break 10586 } 10587 c := v_0.AuxInt 10588 x := v.Args[1] 10589 if !(is32Bit(c)) { 10590 break 10591 } 10592 v.reset(OpAMD64ORQconst) 10593 v.AuxInt = c 10594 v.AddArg(x) 10595 return true 10596 } 10597 // match: (ORQ x x) 10598 // cond: 10599 // result: x 10600 for { 10601 x := v.Args[0] 10602 if x != v.Args[1] { 10603 break 10604 } 10605 v.reset(OpCopy) 10606 v.Type = x.Type 10607 v.AddArg(x) 10608 return true 10609 } 10610 // match: (ORQ o0:(ORQ o1:(ORQ o2:(ORQ o3:(ORQ o4:(ORQ o5:(ORQ x0:(MOVBload [i] {s} p mem) s0:(SHLQconst [8] x1:(MOVBload [i+1] {s} p mem))) s1:(SHLQconst [16] x2:(MOVBload [i+2] {s} p mem))) s2:(SHLQconst [24] x3:(MOVBload [i+3] {s} p mem))) s3:(SHLQconst [32] x4:(MOVBload [i+4] {s} p mem))) s4:(SHLQconst [40] x5:(MOVBload [i+5] {s} p mem))) s5:(SHLQconst [48] x6:(MOVBload [i+6] {s} p mem))) s6:(SHLQconst [56] x7:(MOVBload [i+7] {s} p mem))) 10611 // cond: x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && x7.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && s2.Uses == 1 && s3.Uses == 1 && s4.Uses == 1 && s5.Uses == 1 && s6.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1 && o3.Uses == 1 && o4.Uses == 1 && o5.Uses == 1 && mergePoint(b,x0,x1,x2,x3,x4,x5,x6,x7) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) && clobber(x7) && clobber(s0) && clobber(s1) && clobber(s2) && clobber(s3) && clobber(s4) && clobber(s5) && clobber(s6) && clobber(o0) && clobber(o1) && clobber(o2) && clobber(o3) && clobber(o4) && clobber(o5) 10612 // result: @mergePoint(b,x0,x1,x2,x3,x4,x5,x6,x7) (MOVQload [i] {s} p mem) 10613 for { 10614 o0 := v.Args[0] 10615 if o0.Op != OpAMD64ORQ { 10616 break 10617 } 10618 o1 := o0.Args[0] 10619 if o1.Op != OpAMD64ORQ { 10620 break 10621 } 10622 o2 := o1.Args[0] 10623 if o2.Op != OpAMD64ORQ { 10624 break 10625 } 10626 o3 := o2.Args[0] 10627 if o3.Op != OpAMD64ORQ { 10628 break 10629 } 10630 o4 := o3.Args[0] 10631 if o4.Op != OpAMD64ORQ { 10632 break 10633 } 10634 o5 := o4.Args[0] 10635 if o5.Op != OpAMD64ORQ { 10636 break 10637 } 10638 x0 := o5.Args[0] 10639 if x0.Op != OpAMD64MOVBload { 10640 break 10641 } 10642 i := x0.AuxInt 10643 s := x0.Aux 10644 p := x0.Args[0] 10645 mem := x0.Args[1] 10646 s0 := o5.Args[1] 10647 if s0.Op != OpAMD64SHLQconst { 10648 break 10649 } 10650 if s0.AuxInt != 8 { 10651 break 10652 } 10653 x1 := s0.Args[0] 10654 if x1.Op != OpAMD64MOVBload { 10655 break 10656 } 10657 if x1.AuxInt != i+1 { 10658 break 10659 } 10660 if x1.Aux != s { 10661 break 10662 } 10663 if p != x1.Args[0] { 10664 break 10665 } 10666 if mem != x1.Args[1] { 10667 break 10668 } 10669 s1 := o4.Args[1] 10670 if s1.Op != OpAMD64SHLQconst { 10671 break 10672 } 10673 if s1.AuxInt != 16 { 10674 break 10675 } 10676 x2 := s1.Args[0] 10677 if x2.Op != OpAMD64MOVBload { 10678 break 10679 } 10680 if x2.AuxInt != i+2 { 10681 break 10682 } 10683 if x2.Aux != s { 10684 break 10685 } 10686 if p != x2.Args[0] { 10687 break 10688 } 10689 if mem != x2.Args[1] { 10690 break 10691 } 10692 s2 := o3.Args[1] 10693 if s2.Op != OpAMD64SHLQconst { 10694 break 10695 } 10696 if s2.AuxInt != 24 { 10697 break 10698 } 10699 x3 := s2.Args[0] 10700 if x3.Op != OpAMD64MOVBload { 10701 break 10702 } 10703 if x3.AuxInt != i+3 { 10704 break 10705 } 10706 if x3.Aux != s { 10707 break 10708 } 10709 if p != x3.Args[0] { 10710 break 10711 } 10712 if mem != x3.Args[1] { 10713 break 10714 } 10715 s3 := o2.Args[1] 10716 if s3.Op != OpAMD64SHLQconst { 10717 break 10718 } 10719 if s3.AuxInt != 32 { 10720 break 10721 } 10722 x4 := s3.Args[0] 10723 if x4.Op != OpAMD64MOVBload { 10724 break 10725 } 10726 if x4.AuxInt != i+4 { 10727 break 10728 } 10729 if x4.Aux != s { 10730 break 10731 } 10732 if p != x4.Args[0] { 10733 break 10734 } 10735 if mem != x4.Args[1] { 10736 break 10737 } 10738 s4 := o1.Args[1] 10739 if s4.Op != OpAMD64SHLQconst { 10740 break 10741 } 10742 if s4.AuxInt != 40 { 10743 break 10744 } 10745 x5 := s4.Args[0] 10746 if x5.Op != OpAMD64MOVBload { 10747 break 10748 } 10749 if x5.AuxInt != i+5 { 10750 break 10751 } 10752 if x5.Aux != s { 10753 break 10754 } 10755 if p != x5.Args[0] { 10756 break 10757 } 10758 if mem != x5.Args[1] { 10759 break 10760 } 10761 s5 := o0.Args[1] 10762 if s5.Op != OpAMD64SHLQconst { 10763 break 10764 } 10765 if s5.AuxInt != 48 { 10766 break 10767 } 10768 x6 := s5.Args[0] 10769 if x6.Op != OpAMD64MOVBload { 10770 break 10771 } 10772 if x6.AuxInt != i+6 { 10773 break 10774 } 10775 if x6.Aux != s { 10776 break 10777 } 10778 if p != x6.Args[0] { 10779 break 10780 } 10781 if mem != x6.Args[1] { 10782 break 10783 } 10784 s6 := v.Args[1] 10785 if s6.Op != OpAMD64SHLQconst { 10786 break 10787 } 10788 if s6.AuxInt != 56 { 10789 break 10790 } 10791 x7 := s6.Args[0] 10792 if x7.Op != OpAMD64MOVBload { 10793 break 10794 } 10795 if x7.AuxInt != i+7 { 10796 break 10797 } 10798 if x7.Aux != s { 10799 break 10800 } 10801 if p != x7.Args[0] { 10802 break 10803 } 10804 if mem != x7.Args[1] { 10805 break 10806 } 10807 if !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && x7.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && s2.Uses == 1 && s3.Uses == 1 && s4.Uses == 1 && s5.Uses == 1 && s6.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1 && o3.Uses == 1 && o4.Uses == 1 && o5.Uses == 1 && mergePoint(b, x0, x1, x2, x3, x4, x5, x6, x7) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) && clobber(x7) && clobber(s0) && clobber(s1) && clobber(s2) && clobber(s3) && clobber(s4) && clobber(s5) && clobber(s6) && clobber(o0) && clobber(o1) && clobber(o2) && clobber(o3) && clobber(o4) && clobber(o5)) { 10808 break 10809 } 10810 b = mergePoint(b, x0, x1, x2, x3, x4, x5, x6, x7) 10811 v0 := b.NewValue0(v.Line, OpAMD64MOVQload, config.fe.TypeUInt64()) 10812 v.reset(OpCopy) 10813 v.AddArg(v0) 10814 v0.AuxInt = i 10815 v0.Aux = s 10816 v0.AddArg(p) 10817 v0.AddArg(mem) 10818 return true 10819 } 10820 // match: (ORQ o0:(ORQ o1:(ORQ o2:(ORQ o3:(ORQ o4:(ORQ o5:(ORQ x0:(MOVBloadidx1 [i] {s} p idx mem) s0:(SHLQconst [8] x1:(MOVBloadidx1 [i+1] {s} p idx mem))) s1:(SHLQconst [16] x2:(MOVBloadidx1 [i+2] {s} p idx mem))) s2:(SHLQconst [24] x3:(MOVBloadidx1 [i+3] {s} p idx mem))) s3:(SHLQconst [32] x4:(MOVBloadidx1 [i+4] {s} p idx mem))) s4:(SHLQconst [40] x5:(MOVBloadidx1 [i+5] {s} p idx mem))) s5:(SHLQconst [48] x6:(MOVBloadidx1 [i+6] {s} p idx mem))) s6:(SHLQconst [56] x7:(MOVBloadidx1 [i+7] {s} p idx mem))) 10821 // cond: x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && x7.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && s2.Uses == 1 && s3.Uses == 1 && s4.Uses == 1 && s5.Uses == 1 && s6.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1 && o3.Uses == 1 && o4.Uses == 1 && o5.Uses == 1 && mergePoint(b,x0,x1,x2,x3,x4,x5,x6,x7) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) && clobber(x7) && clobber(s0) && clobber(s1) && clobber(s2) && clobber(s3) && clobber(s4) && clobber(s5) && clobber(s6) && clobber(o0) && clobber(o1) && clobber(o2) && clobber(o3) && clobber(o4) && clobber(o5) 10822 // result: @mergePoint(b,x0,x1,x2,x3,x4,x5,x6,x7) (MOVQloadidx1 <v.Type> [i] {s} p idx mem) 10823 for { 10824 o0 := v.Args[0] 10825 if o0.Op != OpAMD64ORQ { 10826 break 10827 } 10828 o1 := o0.Args[0] 10829 if o1.Op != OpAMD64ORQ { 10830 break 10831 } 10832 o2 := o1.Args[0] 10833 if o2.Op != OpAMD64ORQ { 10834 break 10835 } 10836 o3 := o2.Args[0] 10837 if o3.Op != OpAMD64ORQ { 10838 break 10839 } 10840 o4 := o3.Args[0] 10841 if o4.Op != OpAMD64ORQ { 10842 break 10843 } 10844 o5 := o4.Args[0] 10845 if o5.Op != OpAMD64ORQ { 10846 break 10847 } 10848 x0 := o5.Args[0] 10849 if x0.Op != OpAMD64MOVBloadidx1 { 10850 break 10851 } 10852 i := x0.AuxInt 10853 s := x0.Aux 10854 p := x0.Args[0] 10855 idx := x0.Args[1] 10856 mem := x0.Args[2] 10857 s0 := o5.Args[1] 10858 if s0.Op != OpAMD64SHLQconst { 10859 break 10860 } 10861 if s0.AuxInt != 8 { 10862 break 10863 } 10864 x1 := s0.Args[0] 10865 if x1.Op != OpAMD64MOVBloadidx1 { 10866 break 10867 } 10868 if x1.AuxInt != i+1 { 10869 break 10870 } 10871 if x1.Aux != s { 10872 break 10873 } 10874 if p != x1.Args[0] { 10875 break 10876 } 10877 if idx != x1.Args[1] { 10878 break 10879 } 10880 if mem != x1.Args[2] { 10881 break 10882 } 10883 s1 := o4.Args[1] 10884 if s1.Op != OpAMD64SHLQconst { 10885 break 10886 } 10887 if s1.AuxInt != 16 { 10888 break 10889 } 10890 x2 := s1.Args[0] 10891 if x2.Op != OpAMD64MOVBloadidx1 { 10892 break 10893 } 10894 if x2.AuxInt != i+2 { 10895 break 10896 } 10897 if x2.Aux != s { 10898 break 10899 } 10900 if p != x2.Args[0] { 10901 break 10902 } 10903 if idx != x2.Args[1] { 10904 break 10905 } 10906 if mem != x2.Args[2] { 10907 break 10908 } 10909 s2 := o3.Args[1] 10910 if s2.Op != OpAMD64SHLQconst { 10911 break 10912 } 10913 if s2.AuxInt != 24 { 10914 break 10915 } 10916 x3 := s2.Args[0] 10917 if x3.Op != OpAMD64MOVBloadidx1 { 10918 break 10919 } 10920 if x3.AuxInt != i+3 { 10921 break 10922 } 10923 if x3.Aux != s { 10924 break 10925 } 10926 if p != x3.Args[0] { 10927 break 10928 } 10929 if idx != x3.Args[1] { 10930 break 10931 } 10932 if mem != x3.Args[2] { 10933 break 10934 } 10935 s3 := o2.Args[1] 10936 if s3.Op != OpAMD64SHLQconst { 10937 break 10938 } 10939 if s3.AuxInt != 32 { 10940 break 10941 } 10942 x4 := s3.Args[0] 10943 if x4.Op != OpAMD64MOVBloadidx1 { 10944 break 10945 } 10946 if x4.AuxInt != i+4 { 10947 break 10948 } 10949 if x4.Aux != s { 10950 break 10951 } 10952 if p != x4.Args[0] { 10953 break 10954 } 10955 if idx != x4.Args[1] { 10956 break 10957 } 10958 if mem != x4.Args[2] { 10959 break 10960 } 10961 s4 := o1.Args[1] 10962 if s4.Op != OpAMD64SHLQconst { 10963 break 10964 } 10965 if s4.AuxInt != 40 { 10966 break 10967 } 10968 x5 := s4.Args[0] 10969 if x5.Op != OpAMD64MOVBloadidx1 { 10970 break 10971 } 10972 if x5.AuxInt != i+5 { 10973 break 10974 } 10975 if x5.Aux != s { 10976 break 10977 } 10978 if p != x5.Args[0] { 10979 break 10980 } 10981 if idx != x5.Args[1] { 10982 break 10983 } 10984 if mem != x5.Args[2] { 10985 break 10986 } 10987 s5 := o0.Args[1] 10988 if s5.Op != OpAMD64SHLQconst { 10989 break 10990 } 10991 if s5.AuxInt != 48 { 10992 break 10993 } 10994 x6 := s5.Args[0] 10995 if x6.Op != OpAMD64MOVBloadidx1 { 10996 break 10997 } 10998 if x6.AuxInt != i+6 { 10999 break 11000 } 11001 if x6.Aux != s { 11002 break 11003 } 11004 if p != x6.Args[0] { 11005 break 11006 } 11007 if idx != x6.Args[1] { 11008 break 11009 } 11010 if mem != x6.Args[2] { 11011 break 11012 } 11013 s6 := v.Args[1] 11014 if s6.Op != OpAMD64SHLQconst { 11015 break 11016 } 11017 if s6.AuxInt != 56 { 11018 break 11019 } 11020 x7 := s6.Args[0] 11021 if x7.Op != OpAMD64MOVBloadidx1 { 11022 break 11023 } 11024 if x7.AuxInt != i+7 { 11025 break 11026 } 11027 if x7.Aux != s { 11028 break 11029 } 11030 if p != x7.Args[0] { 11031 break 11032 } 11033 if idx != x7.Args[1] { 11034 break 11035 } 11036 if mem != x7.Args[2] { 11037 break 11038 } 11039 if !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && x7.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && s2.Uses == 1 && s3.Uses == 1 && s4.Uses == 1 && s5.Uses == 1 && s6.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1 && o3.Uses == 1 && o4.Uses == 1 && o5.Uses == 1 && mergePoint(b, x0, x1, x2, x3, x4, x5, x6, x7) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) && clobber(x7) && clobber(s0) && clobber(s1) && clobber(s2) && clobber(s3) && clobber(s4) && clobber(s5) && clobber(s6) && clobber(o0) && clobber(o1) && clobber(o2) && clobber(o3) && clobber(o4) && clobber(o5)) { 11040 break 11041 } 11042 b = mergePoint(b, x0, x1, x2, x3, x4, x5, x6, x7) 11043 v0 := b.NewValue0(v.Line, OpAMD64MOVQloadidx1, v.Type) 11044 v.reset(OpCopy) 11045 v.AddArg(v0) 11046 v0.AuxInt = i 11047 v0.Aux = s 11048 v0.AddArg(p) 11049 v0.AddArg(idx) 11050 v0.AddArg(mem) 11051 return true 11052 } 11053 return false 11054 } 11055 func rewriteValueAMD64_OpAMD64ORQconst(v *Value, config *Config) bool { 11056 b := v.Block 11057 _ = b 11058 // match: (ORQconst [0] x) 11059 // cond: 11060 // result: x 11061 for { 11062 if v.AuxInt != 0 { 11063 break 11064 } 11065 x := v.Args[0] 11066 v.reset(OpCopy) 11067 v.Type = x.Type 11068 v.AddArg(x) 11069 return true 11070 } 11071 // match: (ORQconst [-1] _) 11072 // cond: 11073 // result: (MOVQconst [-1]) 11074 for { 11075 if v.AuxInt != -1 { 11076 break 11077 } 11078 v.reset(OpAMD64MOVQconst) 11079 v.AuxInt = -1 11080 return true 11081 } 11082 // match: (ORQconst [c] (MOVQconst [d])) 11083 // cond: 11084 // result: (MOVQconst [c|d]) 11085 for { 11086 c := v.AuxInt 11087 v_0 := v.Args[0] 11088 if v_0.Op != OpAMD64MOVQconst { 11089 break 11090 } 11091 d := v_0.AuxInt 11092 v.reset(OpAMD64MOVQconst) 11093 v.AuxInt = c | d 11094 return true 11095 } 11096 return false 11097 } 11098 func rewriteValueAMD64_OpAMD64ROLBconst(v *Value, config *Config) bool { 11099 b := v.Block 11100 _ = b 11101 // match: (ROLBconst [c] (ROLBconst [d] x)) 11102 // cond: 11103 // result: (ROLBconst [(c+d)& 7] x) 11104 for { 11105 c := v.AuxInt 11106 v_0 := v.Args[0] 11107 if v_0.Op != OpAMD64ROLBconst { 11108 break 11109 } 11110 d := v_0.AuxInt 11111 x := v_0.Args[0] 11112 v.reset(OpAMD64ROLBconst) 11113 v.AuxInt = (c + d) & 7 11114 v.AddArg(x) 11115 return true 11116 } 11117 // match: (ROLBconst [0] x) 11118 // cond: 11119 // result: x 11120 for { 11121 if v.AuxInt != 0 { 11122 break 11123 } 11124 x := v.Args[0] 11125 v.reset(OpCopy) 11126 v.Type = x.Type 11127 v.AddArg(x) 11128 return true 11129 } 11130 return false 11131 } 11132 func rewriteValueAMD64_OpAMD64ROLLconst(v *Value, config *Config) bool { 11133 b := v.Block 11134 _ = b 11135 // match: (ROLLconst [c] (ROLLconst [d] x)) 11136 // cond: 11137 // result: (ROLLconst [(c+d)&31] x) 11138 for { 11139 c := v.AuxInt 11140 v_0 := v.Args[0] 11141 if v_0.Op != OpAMD64ROLLconst { 11142 break 11143 } 11144 d := v_0.AuxInt 11145 x := v_0.Args[0] 11146 v.reset(OpAMD64ROLLconst) 11147 v.AuxInt = (c + d) & 31 11148 v.AddArg(x) 11149 return true 11150 } 11151 // match: (ROLLconst [0] x) 11152 // cond: 11153 // result: x 11154 for { 11155 if v.AuxInt != 0 { 11156 break 11157 } 11158 x := v.Args[0] 11159 v.reset(OpCopy) 11160 v.Type = x.Type 11161 v.AddArg(x) 11162 return true 11163 } 11164 return false 11165 } 11166 func rewriteValueAMD64_OpAMD64ROLQconst(v *Value, config *Config) bool { 11167 b := v.Block 11168 _ = b 11169 // match: (ROLQconst [c] (ROLQconst [d] x)) 11170 // cond: 11171 // result: (ROLQconst [(c+d)&63] x) 11172 for { 11173 c := v.AuxInt 11174 v_0 := v.Args[0] 11175 if v_0.Op != OpAMD64ROLQconst { 11176 break 11177 } 11178 d := v_0.AuxInt 11179 x := v_0.Args[0] 11180 v.reset(OpAMD64ROLQconst) 11181 v.AuxInt = (c + d) & 63 11182 v.AddArg(x) 11183 return true 11184 } 11185 // match: (ROLQconst [0] x) 11186 // cond: 11187 // result: x 11188 for { 11189 if v.AuxInt != 0 { 11190 break 11191 } 11192 x := v.Args[0] 11193 v.reset(OpCopy) 11194 v.Type = x.Type 11195 v.AddArg(x) 11196 return true 11197 } 11198 return false 11199 } 11200 func rewriteValueAMD64_OpAMD64ROLWconst(v *Value, config *Config) bool { 11201 b := v.Block 11202 _ = b 11203 // match: (ROLWconst [c] (ROLWconst [d] x)) 11204 // cond: 11205 // result: (ROLWconst [(c+d)&15] x) 11206 for { 11207 c := v.AuxInt 11208 v_0 := v.Args[0] 11209 if v_0.Op != OpAMD64ROLWconst { 11210 break 11211 } 11212 d := v_0.AuxInt 11213 x := v_0.Args[0] 11214 v.reset(OpAMD64ROLWconst) 11215 v.AuxInt = (c + d) & 15 11216 v.AddArg(x) 11217 return true 11218 } 11219 // match: (ROLWconst [0] x) 11220 // cond: 11221 // result: x 11222 for { 11223 if v.AuxInt != 0 { 11224 break 11225 } 11226 x := v.Args[0] 11227 v.reset(OpCopy) 11228 v.Type = x.Type 11229 v.AddArg(x) 11230 return true 11231 } 11232 return false 11233 } 11234 func rewriteValueAMD64_OpAMD64SARB(v *Value, config *Config) bool { 11235 b := v.Block 11236 _ = b 11237 // match: (SARB x (MOVQconst [c])) 11238 // cond: 11239 // result: (SARBconst [c&31] x) 11240 for { 11241 x := v.Args[0] 11242 v_1 := v.Args[1] 11243 if v_1.Op != OpAMD64MOVQconst { 11244 break 11245 } 11246 c := v_1.AuxInt 11247 v.reset(OpAMD64SARBconst) 11248 v.AuxInt = c & 31 11249 v.AddArg(x) 11250 return true 11251 } 11252 // match: (SARB x (MOVLconst [c])) 11253 // cond: 11254 // result: (SARBconst [c&31] x) 11255 for { 11256 x := v.Args[0] 11257 v_1 := v.Args[1] 11258 if v_1.Op != OpAMD64MOVLconst { 11259 break 11260 } 11261 c := v_1.AuxInt 11262 v.reset(OpAMD64SARBconst) 11263 v.AuxInt = c & 31 11264 v.AddArg(x) 11265 return true 11266 } 11267 return false 11268 } 11269 func rewriteValueAMD64_OpAMD64SARBconst(v *Value, config *Config) bool { 11270 b := v.Block 11271 _ = b 11272 // match: (SARBconst [c] (MOVQconst [d])) 11273 // cond: 11274 // result: (MOVQconst [d>>uint64(c)]) 11275 for { 11276 c := v.AuxInt 11277 v_0 := v.Args[0] 11278 if v_0.Op != OpAMD64MOVQconst { 11279 break 11280 } 11281 d := v_0.AuxInt 11282 v.reset(OpAMD64MOVQconst) 11283 v.AuxInt = d >> uint64(c) 11284 return true 11285 } 11286 return false 11287 } 11288 func rewriteValueAMD64_OpAMD64SARL(v *Value, config *Config) bool { 11289 b := v.Block 11290 _ = b 11291 // match: (SARL x (MOVQconst [c])) 11292 // cond: 11293 // result: (SARLconst [c&31] x) 11294 for { 11295 x := v.Args[0] 11296 v_1 := v.Args[1] 11297 if v_1.Op != OpAMD64MOVQconst { 11298 break 11299 } 11300 c := v_1.AuxInt 11301 v.reset(OpAMD64SARLconst) 11302 v.AuxInt = c & 31 11303 v.AddArg(x) 11304 return true 11305 } 11306 // match: (SARL x (MOVLconst [c])) 11307 // cond: 11308 // result: (SARLconst [c&31] x) 11309 for { 11310 x := v.Args[0] 11311 v_1 := v.Args[1] 11312 if v_1.Op != OpAMD64MOVLconst { 11313 break 11314 } 11315 c := v_1.AuxInt 11316 v.reset(OpAMD64SARLconst) 11317 v.AuxInt = c & 31 11318 v.AddArg(x) 11319 return true 11320 } 11321 // match: (SARL x (ANDLconst [31] y)) 11322 // cond: 11323 // result: (SARL x y) 11324 for { 11325 x := v.Args[0] 11326 v_1 := v.Args[1] 11327 if v_1.Op != OpAMD64ANDLconst { 11328 break 11329 } 11330 if v_1.AuxInt != 31 { 11331 break 11332 } 11333 y := v_1.Args[0] 11334 v.reset(OpAMD64SARL) 11335 v.AddArg(x) 11336 v.AddArg(y) 11337 return true 11338 } 11339 return false 11340 } 11341 func rewriteValueAMD64_OpAMD64SARLconst(v *Value, config *Config) bool { 11342 b := v.Block 11343 _ = b 11344 // match: (SARLconst [c] (MOVQconst [d])) 11345 // cond: 11346 // result: (MOVQconst [d>>uint64(c)]) 11347 for { 11348 c := v.AuxInt 11349 v_0 := v.Args[0] 11350 if v_0.Op != OpAMD64MOVQconst { 11351 break 11352 } 11353 d := v_0.AuxInt 11354 v.reset(OpAMD64MOVQconst) 11355 v.AuxInt = d >> uint64(c) 11356 return true 11357 } 11358 return false 11359 } 11360 func rewriteValueAMD64_OpAMD64SARQ(v *Value, config *Config) bool { 11361 b := v.Block 11362 _ = b 11363 // match: (SARQ x (MOVQconst [c])) 11364 // cond: 11365 // result: (SARQconst [c&63] x) 11366 for { 11367 x := v.Args[0] 11368 v_1 := v.Args[1] 11369 if v_1.Op != OpAMD64MOVQconst { 11370 break 11371 } 11372 c := v_1.AuxInt 11373 v.reset(OpAMD64SARQconst) 11374 v.AuxInt = c & 63 11375 v.AddArg(x) 11376 return true 11377 } 11378 // match: (SARQ x (MOVLconst [c])) 11379 // cond: 11380 // result: (SARQconst [c&63] x) 11381 for { 11382 x := v.Args[0] 11383 v_1 := v.Args[1] 11384 if v_1.Op != OpAMD64MOVLconst { 11385 break 11386 } 11387 c := v_1.AuxInt 11388 v.reset(OpAMD64SARQconst) 11389 v.AuxInt = c & 63 11390 v.AddArg(x) 11391 return true 11392 } 11393 // match: (SARQ x (ANDQconst [63] y)) 11394 // cond: 11395 // result: (SARQ x y) 11396 for { 11397 x := v.Args[0] 11398 v_1 := v.Args[1] 11399 if v_1.Op != OpAMD64ANDQconst { 11400 break 11401 } 11402 if v_1.AuxInt != 63 { 11403 break 11404 } 11405 y := v_1.Args[0] 11406 v.reset(OpAMD64SARQ) 11407 v.AddArg(x) 11408 v.AddArg(y) 11409 return true 11410 } 11411 return false 11412 } 11413 func rewriteValueAMD64_OpAMD64SARQconst(v *Value, config *Config) bool { 11414 b := v.Block 11415 _ = b 11416 // match: (SARQconst [c] (MOVQconst [d])) 11417 // cond: 11418 // result: (MOVQconst [d>>uint64(c)]) 11419 for { 11420 c := v.AuxInt 11421 v_0 := v.Args[0] 11422 if v_0.Op != OpAMD64MOVQconst { 11423 break 11424 } 11425 d := v_0.AuxInt 11426 v.reset(OpAMD64MOVQconst) 11427 v.AuxInt = d >> uint64(c) 11428 return true 11429 } 11430 return false 11431 } 11432 func rewriteValueAMD64_OpAMD64SARW(v *Value, config *Config) bool { 11433 b := v.Block 11434 _ = b 11435 // match: (SARW x (MOVQconst [c])) 11436 // cond: 11437 // result: (SARWconst [c&31] x) 11438 for { 11439 x := v.Args[0] 11440 v_1 := v.Args[1] 11441 if v_1.Op != OpAMD64MOVQconst { 11442 break 11443 } 11444 c := v_1.AuxInt 11445 v.reset(OpAMD64SARWconst) 11446 v.AuxInt = c & 31 11447 v.AddArg(x) 11448 return true 11449 } 11450 // match: (SARW x (MOVLconst [c])) 11451 // cond: 11452 // result: (SARWconst [c&31] x) 11453 for { 11454 x := v.Args[0] 11455 v_1 := v.Args[1] 11456 if v_1.Op != OpAMD64MOVLconst { 11457 break 11458 } 11459 c := v_1.AuxInt 11460 v.reset(OpAMD64SARWconst) 11461 v.AuxInt = c & 31 11462 v.AddArg(x) 11463 return true 11464 } 11465 return false 11466 } 11467 func rewriteValueAMD64_OpAMD64SARWconst(v *Value, config *Config) bool { 11468 b := v.Block 11469 _ = b 11470 // match: (SARWconst [c] (MOVQconst [d])) 11471 // cond: 11472 // result: (MOVQconst [d>>uint64(c)]) 11473 for { 11474 c := v.AuxInt 11475 v_0 := v.Args[0] 11476 if v_0.Op != OpAMD64MOVQconst { 11477 break 11478 } 11479 d := v_0.AuxInt 11480 v.reset(OpAMD64MOVQconst) 11481 v.AuxInt = d >> uint64(c) 11482 return true 11483 } 11484 return false 11485 } 11486 func rewriteValueAMD64_OpAMD64SBBLcarrymask(v *Value, config *Config) bool { 11487 b := v.Block 11488 _ = b 11489 // match: (SBBLcarrymask (FlagEQ)) 11490 // cond: 11491 // result: (MOVLconst [0]) 11492 for { 11493 v_0 := v.Args[0] 11494 if v_0.Op != OpAMD64FlagEQ { 11495 break 11496 } 11497 v.reset(OpAMD64MOVLconst) 11498 v.AuxInt = 0 11499 return true 11500 } 11501 // match: (SBBLcarrymask (FlagLT_ULT)) 11502 // cond: 11503 // result: (MOVLconst [-1]) 11504 for { 11505 v_0 := v.Args[0] 11506 if v_0.Op != OpAMD64FlagLT_ULT { 11507 break 11508 } 11509 v.reset(OpAMD64MOVLconst) 11510 v.AuxInt = -1 11511 return true 11512 } 11513 // match: (SBBLcarrymask (FlagLT_UGT)) 11514 // cond: 11515 // result: (MOVLconst [0]) 11516 for { 11517 v_0 := v.Args[0] 11518 if v_0.Op != OpAMD64FlagLT_UGT { 11519 break 11520 } 11521 v.reset(OpAMD64MOVLconst) 11522 v.AuxInt = 0 11523 return true 11524 } 11525 // match: (SBBLcarrymask (FlagGT_ULT)) 11526 // cond: 11527 // result: (MOVLconst [-1]) 11528 for { 11529 v_0 := v.Args[0] 11530 if v_0.Op != OpAMD64FlagGT_ULT { 11531 break 11532 } 11533 v.reset(OpAMD64MOVLconst) 11534 v.AuxInt = -1 11535 return true 11536 } 11537 // match: (SBBLcarrymask (FlagGT_UGT)) 11538 // cond: 11539 // result: (MOVLconst [0]) 11540 for { 11541 v_0 := v.Args[0] 11542 if v_0.Op != OpAMD64FlagGT_UGT { 11543 break 11544 } 11545 v.reset(OpAMD64MOVLconst) 11546 v.AuxInt = 0 11547 return true 11548 } 11549 return false 11550 } 11551 func rewriteValueAMD64_OpAMD64SBBQcarrymask(v *Value, config *Config) bool { 11552 b := v.Block 11553 _ = b 11554 // match: (SBBQcarrymask (FlagEQ)) 11555 // cond: 11556 // result: (MOVQconst [0]) 11557 for { 11558 v_0 := v.Args[0] 11559 if v_0.Op != OpAMD64FlagEQ { 11560 break 11561 } 11562 v.reset(OpAMD64MOVQconst) 11563 v.AuxInt = 0 11564 return true 11565 } 11566 // match: (SBBQcarrymask (FlagLT_ULT)) 11567 // cond: 11568 // result: (MOVQconst [-1]) 11569 for { 11570 v_0 := v.Args[0] 11571 if v_0.Op != OpAMD64FlagLT_ULT { 11572 break 11573 } 11574 v.reset(OpAMD64MOVQconst) 11575 v.AuxInt = -1 11576 return true 11577 } 11578 // match: (SBBQcarrymask (FlagLT_UGT)) 11579 // cond: 11580 // result: (MOVQconst [0]) 11581 for { 11582 v_0 := v.Args[0] 11583 if v_0.Op != OpAMD64FlagLT_UGT { 11584 break 11585 } 11586 v.reset(OpAMD64MOVQconst) 11587 v.AuxInt = 0 11588 return true 11589 } 11590 // match: (SBBQcarrymask (FlagGT_ULT)) 11591 // cond: 11592 // result: (MOVQconst [-1]) 11593 for { 11594 v_0 := v.Args[0] 11595 if v_0.Op != OpAMD64FlagGT_ULT { 11596 break 11597 } 11598 v.reset(OpAMD64MOVQconst) 11599 v.AuxInt = -1 11600 return true 11601 } 11602 // match: (SBBQcarrymask (FlagGT_UGT)) 11603 // cond: 11604 // result: (MOVQconst [0]) 11605 for { 11606 v_0 := v.Args[0] 11607 if v_0.Op != OpAMD64FlagGT_UGT { 11608 break 11609 } 11610 v.reset(OpAMD64MOVQconst) 11611 v.AuxInt = 0 11612 return true 11613 } 11614 return false 11615 } 11616 func rewriteValueAMD64_OpAMD64SETA(v *Value, config *Config) bool { 11617 b := v.Block 11618 _ = b 11619 // match: (SETA (InvertFlags x)) 11620 // cond: 11621 // result: (SETB x) 11622 for { 11623 v_0 := v.Args[0] 11624 if v_0.Op != OpAMD64InvertFlags { 11625 break 11626 } 11627 x := v_0.Args[0] 11628 v.reset(OpAMD64SETB) 11629 v.AddArg(x) 11630 return true 11631 } 11632 // match: (SETA (FlagEQ)) 11633 // cond: 11634 // result: (MOVLconst [0]) 11635 for { 11636 v_0 := v.Args[0] 11637 if v_0.Op != OpAMD64FlagEQ { 11638 break 11639 } 11640 v.reset(OpAMD64MOVLconst) 11641 v.AuxInt = 0 11642 return true 11643 } 11644 // match: (SETA (FlagLT_ULT)) 11645 // cond: 11646 // result: (MOVLconst [0]) 11647 for { 11648 v_0 := v.Args[0] 11649 if v_0.Op != OpAMD64FlagLT_ULT { 11650 break 11651 } 11652 v.reset(OpAMD64MOVLconst) 11653 v.AuxInt = 0 11654 return true 11655 } 11656 // match: (SETA (FlagLT_UGT)) 11657 // cond: 11658 // result: (MOVLconst [1]) 11659 for { 11660 v_0 := v.Args[0] 11661 if v_0.Op != OpAMD64FlagLT_UGT { 11662 break 11663 } 11664 v.reset(OpAMD64MOVLconst) 11665 v.AuxInt = 1 11666 return true 11667 } 11668 // match: (SETA (FlagGT_ULT)) 11669 // cond: 11670 // result: (MOVLconst [0]) 11671 for { 11672 v_0 := v.Args[0] 11673 if v_0.Op != OpAMD64FlagGT_ULT { 11674 break 11675 } 11676 v.reset(OpAMD64MOVLconst) 11677 v.AuxInt = 0 11678 return true 11679 } 11680 // match: (SETA (FlagGT_UGT)) 11681 // cond: 11682 // result: (MOVLconst [1]) 11683 for { 11684 v_0 := v.Args[0] 11685 if v_0.Op != OpAMD64FlagGT_UGT { 11686 break 11687 } 11688 v.reset(OpAMD64MOVLconst) 11689 v.AuxInt = 1 11690 return true 11691 } 11692 return false 11693 } 11694 func rewriteValueAMD64_OpAMD64SETAE(v *Value, config *Config) bool { 11695 b := v.Block 11696 _ = b 11697 // match: (SETAE (InvertFlags x)) 11698 // cond: 11699 // result: (SETBE x) 11700 for { 11701 v_0 := v.Args[0] 11702 if v_0.Op != OpAMD64InvertFlags { 11703 break 11704 } 11705 x := v_0.Args[0] 11706 v.reset(OpAMD64SETBE) 11707 v.AddArg(x) 11708 return true 11709 } 11710 // match: (SETAE (FlagEQ)) 11711 // cond: 11712 // result: (MOVLconst [1]) 11713 for { 11714 v_0 := v.Args[0] 11715 if v_0.Op != OpAMD64FlagEQ { 11716 break 11717 } 11718 v.reset(OpAMD64MOVLconst) 11719 v.AuxInt = 1 11720 return true 11721 } 11722 // match: (SETAE (FlagLT_ULT)) 11723 // cond: 11724 // result: (MOVLconst [0]) 11725 for { 11726 v_0 := v.Args[0] 11727 if v_0.Op != OpAMD64FlagLT_ULT { 11728 break 11729 } 11730 v.reset(OpAMD64MOVLconst) 11731 v.AuxInt = 0 11732 return true 11733 } 11734 // match: (SETAE (FlagLT_UGT)) 11735 // cond: 11736 // result: (MOVLconst [1]) 11737 for { 11738 v_0 := v.Args[0] 11739 if v_0.Op != OpAMD64FlagLT_UGT { 11740 break 11741 } 11742 v.reset(OpAMD64MOVLconst) 11743 v.AuxInt = 1 11744 return true 11745 } 11746 // match: (SETAE (FlagGT_ULT)) 11747 // cond: 11748 // result: (MOVLconst [0]) 11749 for { 11750 v_0 := v.Args[0] 11751 if v_0.Op != OpAMD64FlagGT_ULT { 11752 break 11753 } 11754 v.reset(OpAMD64MOVLconst) 11755 v.AuxInt = 0 11756 return true 11757 } 11758 // match: (SETAE (FlagGT_UGT)) 11759 // cond: 11760 // result: (MOVLconst [1]) 11761 for { 11762 v_0 := v.Args[0] 11763 if v_0.Op != OpAMD64FlagGT_UGT { 11764 break 11765 } 11766 v.reset(OpAMD64MOVLconst) 11767 v.AuxInt = 1 11768 return true 11769 } 11770 return false 11771 } 11772 func rewriteValueAMD64_OpAMD64SETB(v *Value, config *Config) bool { 11773 b := v.Block 11774 _ = b 11775 // match: (SETB (InvertFlags x)) 11776 // cond: 11777 // result: (SETA x) 11778 for { 11779 v_0 := v.Args[0] 11780 if v_0.Op != OpAMD64InvertFlags { 11781 break 11782 } 11783 x := v_0.Args[0] 11784 v.reset(OpAMD64SETA) 11785 v.AddArg(x) 11786 return true 11787 } 11788 // match: (SETB (FlagEQ)) 11789 // cond: 11790 // result: (MOVLconst [0]) 11791 for { 11792 v_0 := v.Args[0] 11793 if v_0.Op != OpAMD64FlagEQ { 11794 break 11795 } 11796 v.reset(OpAMD64MOVLconst) 11797 v.AuxInt = 0 11798 return true 11799 } 11800 // match: (SETB (FlagLT_ULT)) 11801 // cond: 11802 // result: (MOVLconst [1]) 11803 for { 11804 v_0 := v.Args[0] 11805 if v_0.Op != OpAMD64FlagLT_ULT { 11806 break 11807 } 11808 v.reset(OpAMD64MOVLconst) 11809 v.AuxInt = 1 11810 return true 11811 } 11812 // match: (SETB (FlagLT_UGT)) 11813 // cond: 11814 // result: (MOVLconst [0]) 11815 for { 11816 v_0 := v.Args[0] 11817 if v_0.Op != OpAMD64FlagLT_UGT { 11818 break 11819 } 11820 v.reset(OpAMD64MOVLconst) 11821 v.AuxInt = 0 11822 return true 11823 } 11824 // match: (SETB (FlagGT_ULT)) 11825 // cond: 11826 // result: (MOVLconst [1]) 11827 for { 11828 v_0 := v.Args[0] 11829 if v_0.Op != OpAMD64FlagGT_ULT { 11830 break 11831 } 11832 v.reset(OpAMD64MOVLconst) 11833 v.AuxInt = 1 11834 return true 11835 } 11836 // match: (SETB (FlagGT_UGT)) 11837 // cond: 11838 // result: (MOVLconst [0]) 11839 for { 11840 v_0 := v.Args[0] 11841 if v_0.Op != OpAMD64FlagGT_UGT { 11842 break 11843 } 11844 v.reset(OpAMD64MOVLconst) 11845 v.AuxInt = 0 11846 return true 11847 } 11848 return false 11849 } 11850 func rewriteValueAMD64_OpAMD64SETBE(v *Value, config *Config) bool { 11851 b := v.Block 11852 _ = b 11853 // match: (SETBE (InvertFlags x)) 11854 // cond: 11855 // result: (SETAE x) 11856 for { 11857 v_0 := v.Args[0] 11858 if v_0.Op != OpAMD64InvertFlags { 11859 break 11860 } 11861 x := v_0.Args[0] 11862 v.reset(OpAMD64SETAE) 11863 v.AddArg(x) 11864 return true 11865 } 11866 // match: (SETBE (FlagEQ)) 11867 // cond: 11868 // result: (MOVLconst [1]) 11869 for { 11870 v_0 := v.Args[0] 11871 if v_0.Op != OpAMD64FlagEQ { 11872 break 11873 } 11874 v.reset(OpAMD64MOVLconst) 11875 v.AuxInt = 1 11876 return true 11877 } 11878 // match: (SETBE (FlagLT_ULT)) 11879 // cond: 11880 // result: (MOVLconst [1]) 11881 for { 11882 v_0 := v.Args[0] 11883 if v_0.Op != OpAMD64FlagLT_ULT { 11884 break 11885 } 11886 v.reset(OpAMD64MOVLconst) 11887 v.AuxInt = 1 11888 return true 11889 } 11890 // match: (SETBE (FlagLT_UGT)) 11891 // cond: 11892 // result: (MOVLconst [0]) 11893 for { 11894 v_0 := v.Args[0] 11895 if v_0.Op != OpAMD64FlagLT_UGT { 11896 break 11897 } 11898 v.reset(OpAMD64MOVLconst) 11899 v.AuxInt = 0 11900 return true 11901 } 11902 // match: (SETBE (FlagGT_ULT)) 11903 // cond: 11904 // result: (MOVLconst [1]) 11905 for { 11906 v_0 := v.Args[0] 11907 if v_0.Op != OpAMD64FlagGT_ULT { 11908 break 11909 } 11910 v.reset(OpAMD64MOVLconst) 11911 v.AuxInt = 1 11912 return true 11913 } 11914 // match: (SETBE (FlagGT_UGT)) 11915 // cond: 11916 // result: (MOVLconst [0]) 11917 for { 11918 v_0 := v.Args[0] 11919 if v_0.Op != OpAMD64FlagGT_UGT { 11920 break 11921 } 11922 v.reset(OpAMD64MOVLconst) 11923 v.AuxInt = 0 11924 return true 11925 } 11926 return false 11927 } 11928 func rewriteValueAMD64_OpAMD64SETEQ(v *Value, config *Config) bool { 11929 b := v.Block 11930 _ = b 11931 // match: (SETEQ (InvertFlags x)) 11932 // cond: 11933 // result: (SETEQ x) 11934 for { 11935 v_0 := v.Args[0] 11936 if v_0.Op != OpAMD64InvertFlags { 11937 break 11938 } 11939 x := v_0.Args[0] 11940 v.reset(OpAMD64SETEQ) 11941 v.AddArg(x) 11942 return true 11943 } 11944 // match: (SETEQ (FlagEQ)) 11945 // cond: 11946 // result: (MOVLconst [1]) 11947 for { 11948 v_0 := v.Args[0] 11949 if v_0.Op != OpAMD64FlagEQ { 11950 break 11951 } 11952 v.reset(OpAMD64MOVLconst) 11953 v.AuxInt = 1 11954 return true 11955 } 11956 // match: (SETEQ (FlagLT_ULT)) 11957 // cond: 11958 // result: (MOVLconst [0]) 11959 for { 11960 v_0 := v.Args[0] 11961 if v_0.Op != OpAMD64FlagLT_ULT { 11962 break 11963 } 11964 v.reset(OpAMD64MOVLconst) 11965 v.AuxInt = 0 11966 return true 11967 } 11968 // match: (SETEQ (FlagLT_UGT)) 11969 // cond: 11970 // result: (MOVLconst [0]) 11971 for { 11972 v_0 := v.Args[0] 11973 if v_0.Op != OpAMD64FlagLT_UGT { 11974 break 11975 } 11976 v.reset(OpAMD64MOVLconst) 11977 v.AuxInt = 0 11978 return true 11979 } 11980 // match: (SETEQ (FlagGT_ULT)) 11981 // cond: 11982 // result: (MOVLconst [0]) 11983 for { 11984 v_0 := v.Args[0] 11985 if v_0.Op != OpAMD64FlagGT_ULT { 11986 break 11987 } 11988 v.reset(OpAMD64MOVLconst) 11989 v.AuxInt = 0 11990 return true 11991 } 11992 // match: (SETEQ (FlagGT_UGT)) 11993 // cond: 11994 // result: (MOVLconst [0]) 11995 for { 11996 v_0 := v.Args[0] 11997 if v_0.Op != OpAMD64FlagGT_UGT { 11998 break 11999 } 12000 v.reset(OpAMD64MOVLconst) 12001 v.AuxInt = 0 12002 return true 12003 } 12004 return false 12005 } 12006 func rewriteValueAMD64_OpAMD64SETG(v *Value, config *Config) bool { 12007 b := v.Block 12008 _ = b 12009 // match: (SETG (InvertFlags x)) 12010 // cond: 12011 // result: (SETL x) 12012 for { 12013 v_0 := v.Args[0] 12014 if v_0.Op != OpAMD64InvertFlags { 12015 break 12016 } 12017 x := v_0.Args[0] 12018 v.reset(OpAMD64SETL) 12019 v.AddArg(x) 12020 return true 12021 } 12022 // match: (SETG (FlagEQ)) 12023 // cond: 12024 // result: (MOVLconst [0]) 12025 for { 12026 v_0 := v.Args[0] 12027 if v_0.Op != OpAMD64FlagEQ { 12028 break 12029 } 12030 v.reset(OpAMD64MOVLconst) 12031 v.AuxInt = 0 12032 return true 12033 } 12034 // match: (SETG (FlagLT_ULT)) 12035 // cond: 12036 // result: (MOVLconst [0]) 12037 for { 12038 v_0 := v.Args[0] 12039 if v_0.Op != OpAMD64FlagLT_ULT { 12040 break 12041 } 12042 v.reset(OpAMD64MOVLconst) 12043 v.AuxInt = 0 12044 return true 12045 } 12046 // match: (SETG (FlagLT_UGT)) 12047 // cond: 12048 // result: (MOVLconst [0]) 12049 for { 12050 v_0 := v.Args[0] 12051 if v_0.Op != OpAMD64FlagLT_UGT { 12052 break 12053 } 12054 v.reset(OpAMD64MOVLconst) 12055 v.AuxInt = 0 12056 return true 12057 } 12058 // match: (SETG (FlagGT_ULT)) 12059 // cond: 12060 // result: (MOVLconst [1]) 12061 for { 12062 v_0 := v.Args[0] 12063 if v_0.Op != OpAMD64FlagGT_ULT { 12064 break 12065 } 12066 v.reset(OpAMD64MOVLconst) 12067 v.AuxInt = 1 12068 return true 12069 } 12070 // match: (SETG (FlagGT_UGT)) 12071 // cond: 12072 // result: (MOVLconst [1]) 12073 for { 12074 v_0 := v.Args[0] 12075 if v_0.Op != OpAMD64FlagGT_UGT { 12076 break 12077 } 12078 v.reset(OpAMD64MOVLconst) 12079 v.AuxInt = 1 12080 return true 12081 } 12082 return false 12083 } 12084 func rewriteValueAMD64_OpAMD64SETGE(v *Value, config *Config) bool { 12085 b := v.Block 12086 _ = b 12087 // match: (SETGE (InvertFlags x)) 12088 // cond: 12089 // result: (SETLE x) 12090 for { 12091 v_0 := v.Args[0] 12092 if v_0.Op != OpAMD64InvertFlags { 12093 break 12094 } 12095 x := v_0.Args[0] 12096 v.reset(OpAMD64SETLE) 12097 v.AddArg(x) 12098 return true 12099 } 12100 // match: (SETGE (FlagEQ)) 12101 // cond: 12102 // result: (MOVLconst [1]) 12103 for { 12104 v_0 := v.Args[0] 12105 if v_0.Op != OpAMD64FlagEQ { 12106 break 12107 } 12108 v.reset(OpAMD64MOVLconst) 12109 v.AuxInt = 1 12110 return true 12111 } 12112 // match: (SETGE (FlagLT_ULT)) 12113 // cond: 12114 // result: (MOVLconst [0]) 12115 for { 12116 v_0 := v.Args[0] 12117 if v_0.Op != OpAMD64FlagLT_ULT { 12118 break 12119 } 12120 v.reset(OpAMD64MOVLconst) 12121 v.AuxInt = 0 12122 return true 12123 } 12124 // match: (SETGE (FlagLT_UGT)) 12125 // cond: 12126 // result: (MOVLconst [0]) 12127 for { 12128 v_0 := v.Args[0] 12129 if v_0.Op != OpAMD64FlagLT_UGT { 12130 break 12131 } 12132 v.reset(OpAMD64MOVLconst) 12133 v.AuxInt = 0 12134 return true 12135 } 12136 // match: (SETGE (FlagGT_ULT)) 12137 // cond: 12138 // result: (MOVLconst [1]) 12139 for { 12140 v_0 := v.Args[0] 12141 if v_0.Op != OpAMD64FlagGT_ULT { 12142 break 12143 } 12144 v.reset(OpAMD64MOVLconst) 12145 v.AuxInt = 1 12146 return true 12147 } 12148 // match: (SETGE (FlagGT_UGT)) 12149 // cond: 12150 // result: (MOVLconst [1]) 12151 for { 12152 v_0 := v.Args[0] 12153 if v_0.Op != OpAMD64FlagGT_UGT { 12154 break 12155 } 12156 v.reset(OpAMD64MOVLconst) 12157 v.AuxInt = 1 12158 return true 12159 } 12160 return false 12161 } 12162 func rewriteValueAMD64_OpAMD64SETL(v *Value, config *Config) bool { 12163 b := v.Block 12164 _ = b 12165 // match: (SETL (InvertFlags x)) 12166 // cond: 12167 // result: (SETG x) 12168 for { 12169 v_0 := v.Args[0] 12170 if v_0.Op != OpAMD64InvertFlags { 12171 break 12172 } 12173 x := v_0.Args[0] 12174 v.reset(OpAMD64SETG) 12175 v.AddArg(x) 12176 return true 12177 } 12178 // match: (SETL (FlagEQ)) 12179 // cond: 12180 // result: (MOVLconst [0]) 12181 for { 12182 v_0 := v.Args[0] 12183 if v_0.Op != OpAMD64FlagEQ { 12184 break 12185 } 12186 v.reset(OpAMD64MOVLconst) 12187 v.AuxInt = 0 12188 return true 12189 } 12190 // match: (SETL (FlagLT_ULT)) 12191 // cond: 12192 // result: (MOVLconst [1]) 12193 for { 12194 v_0 := v.Args[0] 12195 if v_0.Op != OpAMD64FlagLT_ULT { 12196 break 12197 } 12198 v.reset(OpAMD64MOVLconst) 12199 v.AuxInt = 1 12200 return true 12201 } 12202 // match: (SETL (FlagLT_UGT)) 12203 // cond: 12204 // result: (MOVLconst [1]) 12205 for { 12206 v_0 := v.Args[0] 12207 if v_0.Op != OpAMD64FlagLT_UGT { 12208 break 12209 } 12210 v.reset(OpAMD64MOVLconst) 12211 v.AuxInt = 1 12212 return true 12213 } 12214 // match: (SETL (FlagGT_ULT)) 12215 // cond: 12216 // result: (MOVLconst [0]) 12217 for { 12218 v_0 := v.Args[0] 12219 if v_0.Op != OpAMD64FlagGT_ULT { 12220 break 12221 } 12222 v.reset(OpAMD64MOVLconst) 12223 v.AuxInt = 0 12224 return true 12225 } 12226 // match: (SETL (FlagGT_UGT)) 12227 // cond: 12228 // result: (MOVLconst [0]) 12229 for { 12230 v_0 := v.Args[0] 12231 if v_0.Op != OpAMD64FlagGT_UGT { 12232 break 12233 } 12234 v.reset(OpAMD64MOVLconst) 12235 v.AuxInt = 0 12236 return true 12237 } 12238 return false 12239 } 12240 func rewriteValueAMD64_OpAMD64SETLE(v *Value, config *Config) bool { 12241 b := v.Block 12242 _ = b 12243 // match: (SETLE (InvertFlags x)) 12244 // cond: 12245 // result: (SETGE x) 12246 for { 12247 v_0 := v.Args[0] 12248 if v_0.Op != OpAMD64InvertFlags { 12249 break 12250 } 12251 x := v_0.Args[0] 12252 v.reset(OpAMD64SETGE) 12253 v.AddArg(x) 12254 return true 12255 } 12256 // match: (SETLE (FlagEQ)) 12257 // cond: 12258 // result: (MOVLconst [1]) 12259 for { 12260 v_0 := v.Args[0] 12261 if v_0.Op != OpAMD64FlagEQ { 12262 break 12263 } 12264 v.reset(OpAMD64MOVLconst) 12265 v.AuxInt = 1 12266 return true 12267 } 12268 // match: (SETLE (FlagLT_ULT)) 12269 // cond: 12270 // result: (MOVLconst [1]) 12271 for { 12272 v_0 := v.Args[0] 12273 if v_0.Op != OpAMD64FlagLT_ULT { 12274 break 12275 } 12276 v.reset(OpAMD64MOVLconst) 12277 v.AuxInt = 1 12278 return true 12279 } 12280 // match: (SETLE (FlagLT_UGT)) 12281 // cond: 12282 // result: (MOVLconst [1]) 12283 for { 12284 v_0 := v.Args[0] 12285 if v_0.Op != OpAMD64FlagLT_UGT { 12286 break 12287 } 12288 v.reset(OpAMD64MOVLconst) 12289 v.AuxInt = 1 12290 return true 12291 } 12292 // match: (SETLE (FlagGT_ULT)) 12293 // cond: 12294 // result: (MOVLconst [0]) 12295 for { 12296 v_0 := v.Args[0] 12297 if v_0.Op != OpAMD64FlagGT_ULT { 12298 break 12299 } 12300 v.reset(OpAMD64MOVLconst) 12301 v.AuxInt = 0 12302 return true 12303 } 12304 // match: (SETLE (FlagGT_UGT)) 12305 // cond: 12306 // result: (MOVLconst [0]) 12307 for { 12308 v_0 := v.Args[0] 12309 if v_0.Op != OpAMD64FlagGT_UGT { 12310 break 12311 } 12312 v.reset(OpAMD64MOVLconst) 12313 v.AuxInt = 0 12314 return true 12315 } 12316 return false 12317 } 12318 func rewriteValueAMD64_OpAMD64SETNE(v *Value, config *Config) bool { 12319 b := v.Block 12320 _ = b 12321 // match: (SETNE (InvertFlags x)) 12322 // cond: 12323 // result: (SETNE x) 12324 for { 12325 v_0 := v.Args[0] 12326 if v_0.Op != OpAMD64InvertFlags { 12327 break 12328 } 12329 x := v_0.Args[0] 12330 v.reset(OpAMD64SETNE) 12331 v.AddArg(x) 12332 return true 12333 } 12334 // match: (SETNE (FlagEQ)) 12335 // cond: 12336 // result: (MOVLconst [0]) 12337 for { 12338 v_0 := v.Args[0] 12339 if v_0.Op != OpAMD64FlagEQ { 12340 break 12341 } 12342 v.reset(OpAMD64MOVLconst) 12343 v.AuxInt = 0 12344 return true 12345 } 12346 // match: (SETNE (FlagLT_ULT)) 12347 // cond: 12348 // result: (MOVLconst [1]) 12349 for { 12350 v_0 := v.Args[0] 12351 if v_0.Op != OpAMD64FlagLT_ULT { 12352 break 12353 } 12354 v.reset(OpAMD64MOVLconst) 12355 v.AuxInt = 1 12356 return true 12357 } 12358 // match: (SETNE (FlagLT_UGT)) 12359 // cond: 12360 // result: (MOVLconst [1]) 12361 for { 12362 v_0 := v.Args[0] 12363 if v_0.Op != OpAMD64FlagLT_UGT { 12364 break 12365 } 12366 v.reset(OpAMD64MOVLconst) 12367 v.AuxInt = 1 12368 return true 12369 } 12370 // match: (SETNE (FlagGT_ULT)) 12371 // cond: 12372 // result: (MOVLconst [1]) 12373 for { 12374 v_0 := v.Args[0] 12375 if v_0.Op != OpAMD64FlagGT_ULT { 12376 break 12377 } 12378 v.reset(OpAMD64MOVLconst) 12379 v.AuxInt = 1 12380 return true 12381 } 12382 // match: (SETNE (FlagGT_UGT)) 12383 // cond: 12384 // result: (MOVLconst [1]) 12385 for { 12386 v_0 := v.Args[0] 12387 if v_0.Op != OpAMD64FlagGT_UGT { 12388 break 12389 } 12390 v.reset(OpAMD64MOVLconst) 12391 v.AuxInt = 1 12392 return true 12393 } 12394 return false 12395 } 12396 func rewriteValueAMD64_OpAMD64SHLL(v *Value, config *Config) bool { 12397 b := v.Block 12398 _ = b 12399 // match: (SHLL x (MOVQconst [c])) 12400 // cond: 12401 // result: (SHLLconst [c&31] x) 12402 for { 12403 x := v.Args[0] 12404 v_1 := v.Args[1] 12405 if v_1.Op != OpAMD64MOVQconst { 12406 break 12407 } 12408 c := v_1.AuxInt 12409 v.reset(OpAMD64SHLLconst) 12410 v.AuxInt = c & 31 12411 v.AddArg(x) 12412 return true 12413 } 12414 // match: (SHLL x (MOVLconst [c])) 12415 // cond: 12416 // result: (SHLLconst [c&31] x) 12417 for { 12418 x := v.Args[0] 12419 v_1 := v.Args[1] 12420 if v_1.Op != OpAMD64MOVLconst { 12421 break 12422 } 12423 c := v_1.AuxInt 12424 v.reset(OpAMD64SHLLconst) 12425 v.AuxInt = c & 31 12426 v.AddArg(x) 12427 return true 12428 } 12429 // match: (SHLL x (ANDLconst [31] y)) 12430 // cond: 12431 // result: (SHLL x y) 12432 for { 12433 x := v.Args[0] 12434 v_1 := v.Args[1] 12435 if v_1.Op != OpAMD64ANDLconst { 12436 break 12437 } 12438 if v_1.AuxInt != 31 { 12439 break 12440 } 12441 y := v_1.Args[0] 12442 v.reset(OpAMD64SHLL) 12443 v.AddArg(x) 12444 v.AddArg(y) 12445 return true 12446 } 12447 return false 12448 } 12449 func rewriteValueAMD64_OpAMD64SHLQ(v *Value, config *Config) bool { 12450 b := v.Block 12451 _ = b 12452 // match: (SHLQ x (MOVQconst [c])) 12453 // cond: 12454 // result: (SHLQconst [c&63] x) 12455 for { 12456 x := v.Args[0] 12457 v_1 := v.Args[1] 12458 if v_1.Op != OpAMD64MOVQconst { 12459 break 12460 } 12461 c := v_1.AuxInt 12462 v.reset(OpAMD64SHLQconst) 12463 v.AuxInt = c & 63 12464 v.AddArg(x) 12465 return true 12466 } 12467 // match: (SHLQ x (MOVLconst [c])) 12468 // cond: 12469 // result: (SHLQconst [c&63] x) 12470 for { 12471 x := v.Args[0] 12472 v_1 := v.Args[1] 12473 if v_1.Op != OpAMD64MOVLconst { 12474 break 12475 } 12476 c := v_1.AuxInt 12477 v.reset(OpAMD64SHLQconst) 12478 v.AuxInt = c & 63 12479 v.AddArg(x) 12480 return true 12481 } 12482 // match: (SHLQ x (ANDQconst [63] y)) 12483 // cond: 12484 // result: (SHLQ x y) 12485 for { 12486 x := v.Args[0] 12487 v_1 := v.Args[1] 12488 if v_1.Op != OpAMD64ANDQconst { 12489 break 12490 } 12491 if v_1.AuxInt != 63 { 12492 break 12493 } 12494 y := v_1.Args[0] 12495 v.reset(OpAMD64SHLQ) 12496 v.AddArg(x) 12497 v.AddArg(y) 12498 return true 12499 } 12500 return false 12501 } 12502 func rewriteValueAMD64_OpAMD64SHRB(v *Value, config *Config) bool { 12503 b := v.Block 12504 _ = b 12505 // match: (SHRB x (MOVQconst [c])) 12506 // cond: 12507 // result: (SHRBconst [c&31] x) 12508 for { 12509 x := v.Args[0] 12510 v_1 := v.Args[1] 12511 if v_1.Op != OpAMD64MOVQconst { 12512 break 12513 } 12514 c := v_1.AuxInt 12515 v.reset(OpAMD64SHRBconst) 12516 v.AuxInt = c & 31 12517 v.AddArg(x) 12518 return true 12519 } 12520 // match: (SHRB x (MOVLconst [c])) 12521 // cond: 12522 // result: (SHRBconst [c&31] x) 12523 for { 12524 x := v.Args[0] 12525 v_1 := v.Args[1] 12526 if v_1.Op != OpAMD64MOVLconst { 12527 break 12528 } 12529 c := v_1.AuxInt 12530 v.reset(OpAMD64SHRBconst) 12531 v.AuxInt = c & 31 12532 v.AddArg(x) 12533 return true 12534 } 12535 return false 12536 } 12537 func rewriteValueAMD64_OpAMD64SHRL(v *Value, config *Config) bool { 12538 b := v.Block 12539 _ = b 12540 // match: (SHRL x (MOVQconst [c])) 12541 // cond: 12542 // result: (SHRLconst [c&31] x) 12543 for { 12544 x := v.Args[0] 12545 v_1 := v.Args[1] 12546 if v_1.Op != OpAMD64MOVQconst { 12547 break 12548 } 12549 c := v_1.AuxInt 12550 v.reset(OpAMD64SHRLconst) 12551 v.AuxInt = c & 31 12552 v.AddArg(x) 12553 return true 12554 } 12555 // match: (SHRL x (MOVLconst [c])) 12556 // cond: 12557 // result: (SHRLconst [c&31] x) 12558 for { 12559 x := v.Args[0] 12560 v_1 := v.Args[1] 12561 if v_1.Op != OpAMD64MOVLconst { 12562 break 12563 } 12564 c := v_1.AuxInt 12565 v.reset(OpAMD64SHRLconst) 12566 v.AuxInt = c & 31 12567 v.AddArg(x) 12568 return true 12569 } 12570 // match: (SHRL x (ANDLconst [31] y)) 12571 // cond: 12572 // result: (SHRL x y) 12573 for { 12574 x := v.Args[0] 12575 v_1 := v.Args[1] 12576 if v_1.Op != OpAMD64ANDLconst { 12577 break 12578 } 12579 if v_1.AuxInt != 31 { 12580 break 12581 } 12582 y := v_1.Args[0] 12583 v.reset(OpAMD64SHRL) 12584 v.AddArg(x) 12585 v.AddArg(y) 12586 return true 12587 } 12588 return false 12589 } 12590 func rewriteValueAMD64_OpAMD64SHRQ(v *Value, config *Config) bool { 12591 b := v.Block 12592 _ = b 12593 // match: (SHRQ x (MOVQconst [c])) 12594 // cond: 12595 // result: (SHRQconst [c&63] x) 12596 for { 12597 x := v.Args[0] 12598 v_1 := v.Args[1] 12599 if v_1.Op != OpAMD64MOVQconst { 12600 break 12601 } 12602 c := v_1.AuxInt 12603 v.reset(OpAMD64SHRQconst) 12604 v.AuxInt = c & 63 12605 v.AddArg(x) 12606 return true 12607 } 12608 // match: (SHRQ x (MOVLconst [c])) 12609 // cond: 12610 // result: (SHRQconst [c&63] x) 12611 for { 12612 x := v.Args[0] 12613 v_1 := v.Args[1] 12614 if v_1.Op != OpAMD64MOVLconst { 12615 break 12616 } 12617 c := v_1.AuxInt 12618 v.reset(OpAMD64SHRQconst) 12619 v.AuxInt = c & 63 12620 v.AddArg(x) 12621 return true 12622 } 12623 // match: (SHRQ x (ANDQconst [63] y)) 12624 // cond: 12625 // result: (SHRQ x y) 12626 for { 12627 x := v.Args[0] 12628 v_1 := v.Args[1] 12629 if v_1.Op != OpAMD64ANDQconst { 12630 break 12631 } 12632 if v_1.AuxInt != 63 { 12633 break 12634 } 12635 y := v_1.Args[0] 12636 v.reset(OpAMD64SHRQ) 12637 v.AddArg(x) 12638 v.AddArg(y) 12639 return true 12640 } 12641 return false 12642 } 12643 func rewriteValueAMD64_OpAMD64SHRW(v *Value, config *Config) bool { 12644 b := v.Block 12645 _ = b 12646 // match: (SHRW x (MOVQconst [c])) 12647 // cond: 12648 // result: (SHRWconst [c&31] x) 12649 for { 12650 x := v.Args[0] 12651 v_1 := v.Args[1] 12652 if v_1.Op != OpAMD64MOVQconst { 12653 break 12654 } 12655 c := v_1.AuxInt 12656 v.reset(OpAMD64SHRWconst) 12657 v.AuxInt = c & 31 12658 v.AddArg(x) 12659 return true 12660 } 12661 // match: (SHRW x (MOVLconst [c])) 12662 // cond: 12663 // result: (SHRWconst [c&31] x) 12664 for { 12665 x := v.Args[0] 12666 v_1 := v.Args[1] 12667 if v_1.Op != OpAMD64MOVLconst { 12668 break 12669 } 12670 c := v_1.AuxInt 12671 v.reset(OpAMD64SHRWconst) 12672 v.AuxInt = c & 31 12673 v.AddArg(x) 12674 return true 12675 } 12676 return false 12677 } 12678 func rewriteValueAMD64_OpAMD64SUBL(v *Value, config *Config) bool { 12679 b := v.Block 12680 _ = b 12681 // match: (SUBL x (MOVLconst [c])) 12682 // cond: 12683 // result: (SUBLconst x [c]) 12684 for { 12685 x := v.Args[0] 12686 v_1 := v.Args[1] 12687 if v_1.Op != OpAMD64MOVLconst { 12688 break 12689 } 12690 c := v_1.AuxInt 12691 v.reset(OpAMD64SUBLconst) 12692 v.AuxInt = c 12693 v.AddArg(x) 12694 return true 12695 } 12696 // match: (SUBL (MOVLconst [c]) x) 12697 // cond: 12698 // result: (NEGL (SUBLconst <v.Type> x [c])) 12699 for { 12700 v_0 := v.Args[0] 12701 if v_0.Op != OpAMD64MOVLconst { 12702 break 12703 } 12704 c := v_0.AuxInt 12705 x := v.Args[1] 12706 v.reset(OpAMD64NEGL) 12707 v0 := b.NewValue0(v.Line, OpAMD64SUBLconst, v.Type) 12708 v0.AuxInt = c 12709 v0.AddArg(x) 12710 v.AddArg(v0) 12711 return true 12712 } 12713 // match: (SUBL x x) 12714 // cond: 12715 // result: (MOVLconst [0]) 12716 for { 12717 x := v.Args[0] 12718 if x != v.Args[1] { 12719 break 12720 } 12721 v.reset(OpAMD64MOVLconst) 12722 v.AuxInt = 0 12723 return true 12724 } 12725 return false 12726 } 12727 func rewriteValueAMD64_OpAMD64SUBLconst(v *Value, config *Config) bool { 12728 b := v.Block 12729 _ = b 12730 // match: (SUBLconst [c] x) 12731 // cond: int32(c) == 0 12732 // result: x 12733 for { 12734 c := v.AuxInt 12735 x := v.Args[0] 12736 if !(int32(c) == 0) { 12737 break 12738 } 12739 v.reset(OpCopy) 12740 v.Type = x.Type 12741 v.AddArg(x) 12742 return true 12743 } 12744 // match: (SUBLconst [c] x) 12745 // cond: 12746 // result: (ADDLconst [int64(int32(-c))] x) 12747 for { 12748 c := v.AuxInt 12749 x := v.Args[0] 12750 v.reset(OpAMD64ADDLconst) 12751 v.AuxInt = int64(int32(-c)) 12752 v.AddArg(x) 12753 return true 12754 } 12755 } 12756 func rewriteValueAMD64_OpAMD64SUBQ(v *Value, config *Config) bool { 12757 b := v.Block 12758 _ = b 12759 // match: (SUBQ x (MOVQconst [c])) 12760 // cond: is32Bit(c) 12761 // result: (SUBQconst x [c]) 12762 for { 12763 x := v.Args[0] 12764 v_1 := v.Args[1] 12765 if v_1.Op != OpAMD64MOVQconst { 12766 break 12767 } 12768 c := v_1.AuxInt 12769 if !(is32Bit(c)) { 12770 break 12771 } 12772 v.reset(OpAMD64SUBQconst) 12773 v.AuxInt = c 12774 v.AddArg(x) 12775 return true 12776 } 12777 // match: (SUBQ (MOVQconst [c]) x) 12778 // cond: is32Bit(c) 12779 // result: (NEGQ (SUBQconst <v.Type> x [c])) 12780 for { 12781 v_0 := v.Args[0] 12782 if v_0.Op != OpAMD64MOVQconst { 12783 break 12784 } 12785 c := v_0.AuxInt 12786 x := v.Args[1] 12787 if !(is32Bit(c)) { 12788 break 12789 } 12790 v.reset(OpAMD64NEGQ) 12791 v0 := b.NewValue0(v.Line, OpAMD64SUBQconst, v.Type) 12792 v0.AuxInt = c 12793 v0.AddArg(x) 12794 v.AddArg(v0) 12795 return true 12796 } 12797 // match: (SUBQ x x) 12798 // cond: 12799 // result: (MOVQconst [0]) 12800 for { 12801 x := v.Args[0] 12802 if x != v.Args[1] { 12803 break 12804 } 12805 v.reset(OpAMD64MOVQconst) 12806 v.AuxInt = 0 12807 return true 12808 } 12809 return false 12810 } 12811 func rewriteValueAMD64_OpAMD64SUBQconst(v *Value, config *Config) bool { 12812 b := v.Block 12813 _ = b 12814 // match: (SUBQconst [0] x) 12815 // cond: 12816 // result: x 12817 for { 12818 if v.AuxInt != 0 { 12819 break 12820 } 12821 x := v.Args[0] 12822 v.reset(OpCopy) 12823 v.Type = x.Type 12824 v.AddArg(x) 12825 return true 12826 } 12827 // match: (SUBQconst [c] x) 12828 // cond: c != -(1<<31) 12829 // result: (ADDQconst [-c] x) 12830 for { 12831 c := v.AuxInt 12832 x := v.Args[0] 12833 if !(c != -(1 << 31)) { 12834 break 12835 } 12836 v.reset(OpAMD64ADDQconst) 12837 v.AuxInt = -c 12838 v.AddArg(x) 12839 return true 12840 } 12841 // match: (SUBQconst (MOVQconst [d]) [c]) 12842 // cond: 12843 // result: (MOVQconst [d-c]) 12844 for { 12845 c := v.AuxInt 12846 v_0 := v.Args[0] 12847 if v_0.Op != OpAMD64MOVQconst { 12848 break 12849 } 12850 d := v_0.AuxInt 12851 v.reset(OpAMD64MOVQconst) 12852 v.AuxInt = d - c 12853 return true 12854 } 12855 // match: (SUBQconst (SUBQconst x [d]) [c]) 12856 // cond: is32Bit(-c-d) 12857 // result: (ADDQconst [-c-d] x) 12858 for { 12859 c := v.AuxInt 12860 v_0 := v.Args[0] 12861 if v_0.Op != OpAMD64SUBQconst { 12862 break 12863 } 12864 d := v_0.AuxInt 12865 x := v_0.Args[0] 12866 if !(is32Bit(-c - d)) { 12867 break 12868 } 12869 v.reset(OpAMD64ADDQconst) 12870 v.AuxInt = -c - d 12871 v.AddArg(x) 12872 return true 12873 } 12874 return false 12875 } 12876 func rewriteValueAMD64_OpAMD64XADDLlock(v *Value, config *Config) bool { 12877 b := v.Block 12878 _ = b 12879 // match: (XADDLlock [off1] {sym} val (ADDQconst [off2] ptr) mem) 12880 // cond: is32Bit(off1+off2) 12881 // result: (XADDLlock [off1+off2] {sym} val ptr mem) 12882 for { 12883 off1 := v.AuxInt 12884 sym := v.Aux 12885 val := v.Args[0] 12886 v_1 := v.Args[1] 12887 if v_1.Op != OpAMD64ADDQconst { 12888 break 12889 } 12890 off2 := v_1.AuxInt 12891 ptr := v_1.Args[0] 12892 mem := v.Args[2] 12893 if !(is32Bit(off1 + off2)) { 12894 break 12895 } 12896 v.reset(OpAMD64XADDLlock) 12897 v.AuxInt = off1 + off2 12898 v.Aux = sym 12899 v.AddArg(val) 12900 v.AddArg(ptr) 12901 v.AddArg(mem) 12902 return true 12903 } 12904 return false 12905 } 12906 func rewriteValueAMD64_OpAMD64XADDQlock(v *Value, config *Config) bool { 12907 b := v.Block 12908 _ = b 12909 // match: (XADDQlock [off1] {sym} val (ADDQconst [off2] ptr) mem) 12910 // cond: is32Bit(off1+off2) 12911 // result: (XADDQlock [off1+off2] {sym} val ptr mem) 12912 for { 12913 off1 := v.AuxInt 12914 sym := v.Aux 12915 val := v.Args[0] 12916 v_1 := v.Args[1] 12917 if v_1.Op != OpAMD64ADDQconst { 12918 break 12919 } 12920 off2 := v_1.AuxInt 12921 ptr := v_1.Args[0] 12922 mem := v.Args[2] 12923 if !(is32Bit(off1 + off2)) { 12924 break 12925 } 12926 v.reset(OpAMD64XADDQlock) 12927 v.AuxInt = off1 + off2 12928 v.Aux = sym 12929 v.AddArg(val) 12930 v.AddArg(ptr) 12931 v.AddArg(mem) 12932 return true 12933 } 12934 return false 12935 } 12936 func rewriteValueAMD64_OpAMD64XCHGL(v *Value, config *Config) bool { 12937 b := v.Block 12938 _ = b 12939 // match: (XCHGL [off1] {sym} val (ADDQconst [off2] ptr) mem) 12940 // cond: is32Bit(off1+off2) 12941 // result: (XCHGL [off1+off2] {sym} val ptr mem) 12942 for { 12943 off1 := v.AuxInt 12944 sym := v.Aux 12945 val := v.Args[0] 12946 v_1 := v.Args[1] 12947 if v_1.Op != OpAMD64ADDQconst { 12948 break 12949 } 12950 off2 := v_1.AuxInt 12951 ptr := v_1.Args[0] 12952 mem := v.Args[2] 12953 if !(is32Bit(off1 + off2)) { 12954 break 12955 } 12956 v.reset(OpAMD64XCHGL) 12957 v.AuxInt = off1 + off2 12958 v.Aux = sym 12959 v.AddArg(val) 12960 v.AddArg(ptr) 12961 v.AddArg(mem) 12962 return true 12963 } 12964 // match: (XCHGL [off1] {sym1} val (LEAQ [off2] {sym2} ptr) mem) 12965 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) && ptr.Op != OpSB 12966 // result: (XCHGL [off1+off2] {mergeSym(sym1,sym2)} val ptr mem) 12967 for { 12968 off1 := v.AuxInt 12969 sym1 := v.Aux 12970 val := v.Args[0] 12971 v_1 := v.Args[1] 12972 if v_1.Op != OpAMD64LEAQ { 12973 break 12974 } 12975 off2 := v_1.AuxInt 12976 sym2 := v_1.Aux 12977 ptr := v_1.Args[0] 12978 mem := v.Args[2] 12979 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2) && ptr.Op != OpSB) { 12980 break 12981 } 12982 v.reset(OpAMD64XCHGL) 12983 v.AuxInt = off1 + off2 12984 v.Aux = mergeSym(sym1, sym2) 12985 v.AddArg(val) 12986 v.AddArg(ptr) 12987 v.AddArg(mem) 12988 return true 12989 } 12990 return false 12991 } 12992 func rewriteValueAMD64_OpAMD64XCHGQ(v *Value, config *Config) bool { 12993 b := v.Block 12994 _ = b 12995 // match: (XCHGQ [off1] {sym} val (ADDQconst [off2] ptr) mem) 12996 // cond: is32Bit(off1+off2) 12997 // result: (XCHGQ [off1+off2] {sym} val ptr mem) 12998 for { 12999 off1 := v.AuxInt 13000 sym := v.Aux 13001 val := v.Args[0] 13002 v_1 := v.Args[1] 13003 if v_1.Op != OpAMD64ADDQconst { 13004 break 13005 } 13006 off2 := v_1.AuxInt 13007 ptr := v_1.Args[0] 13008 mem := v.Args[2] 13009 if !(is32Bit(off1 + off2)) { 13010 break 13011 } 13012 v.reset(OpAMD64XCHGQ) 13013 v.AuxInt = off1 + off2 13014 v.Aux = sym 13015 v.AddArg(val) 13016 v.AddArg(ptr) 13017 v.AddArg(mem) 13018 return true 13019 } 13020 // match: (XCHGQ [off1] {sym1} val (LEAQ [off2] {sym2} ptr) mem) 13021 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) && ptr.Op != OpSB 13022 // result: (XCHGQ [off1+off2] {mergeSym(sym1,sym2)} val ptr mem) 13023 for { 13024 off1 := v.AuxInt 13025 sym1 := v.Aux 13026 val := v.Args[0] 13027 v_1 := v.Args[1] 13028 if v_1.Op != OpAMD64LEAQ { 13029 break 13030 } 13031 off2 := v_1.AuxInt 13032 sym2 := v_1.Aux 13033 ptr := v_1.Args[0] 13034 mem := v.Args[2] 13035 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2) && ptr.Op != OpSB) { 13036 break 13037 } 13038 v.reset(OpAMD64XCHGQ) 13039 v.AuxInt = off1 + off2 13040 v.Aux = mergeSym(sym1, sym2) 13041 v.AddArg(val) 13042 v.AddArg(ptr) 13043 v.AddArg(mem) 13044 return true 13045 } 13046 return false 13047 } 13048 func rewriteValueAMD64_OpAMD64XORL(v *Value, config *Config) bool { 13049 b := v.Block 13050 _ = b 13051 // match: (XORL x (MOVLconst [c])) 13052 // cond: 13053 // result: (XORLconst [c] x) 13054 for { 13055 x := v.Args[0] 13056 v_1 := v.Args[1] 13057 if v_1.Op != OpAMD64MOVLconst { 13058 break 13059 } 13060 c := v_1.AuxInt 13061 v.reset(OpAMD64XORLconst) 13062 v.AuxInt = c 13063 v.AddArg(x) 13064 return true 13065 } 13066 // match: (XORL (MOVLconst [c]) x) 13067 // cond: 13068 // result: (XORLconst [c] x) 13069 for { 13070 v_0 := v.Args[0] 13071 if v_0.Op != OpAMD64MOVLconst { 13072 break 13073 } 13074 c := v_0.AuxInt 13075 x := v.Args[1] 13076 v.reset(OpAMD64XORLconst) 13077 v.AuxInt = c 13078 v.AddArg(x) 13079 return true 13080 } 13081 // match: (XORL x x) 13082 // cond: 13083 // result: (MOVLconst [0]) 13084 for { 13085 x := v.Args[0] 13086 if x != v.Args[1] { 13087 break 13088 } 13089 v.reset(OpAMD64MOVLconst) 13090 v.AuxInt = 0 13091 return true 13092 } 13093 return false 13094 } 13095 func rewriteValueAMD64_OpAMD64XORLconst(v *Value, config *Config) bool { 13096 b := v.Block 13097 _ = b 13098 // match: (XORLconst [c] (XORLconst [d] x)) 13099 // cond: 13100 // result: (XORLconst [c ^ d] x) 13101 for { 13102 c := v.AuxInt 13103 v_0 := v.Args[0] 13104 if v_0.Op != OpAMD64XORLconst { 13105 break 13106 } 13107 d := v_0.AuxInt 13108 x := v_0.Args[0] 13109 v.reset(OpAMD64XORLconst) 13110 v.AuxInt = c ^ d 13111 v.AddArg(x) 13112 return true 13113 } 13114 // match: (XORLconst [c] x) 13115 // cond: int32(c)==0 13116 // result: x 13117 for { 13118 c := v.AuxInt 13119 x := v.Args[0] 13120 if !(int32(c) == 0) { 13121 break 13122 } 13123 v.reset(OpCopy) 13124 v.Type = x.Type 13125 v.AddArg(x) 13126 return true 13127 } 13128 // match: (XORLconst [c] (MOVLconst [d])) 13129 // cond: 13130 // result: (MOVLconst [c^d]) 13131 for { 13132 c := v.AuxInt 13133 v_0 := v.Args[0] 13134 if v_0.Op != OpAMD64MOVLconst { 13135 break 13136 } 13137 d := v_0.AuxInt 13138 v.reset(OpAMD64MOVLconst) 13139 v.AuxInt = c ^ d 13140 return true 13141 } 13142 return false 13143 } 13144 func rewriteValueAMD64_OpAMD64XORQ(v *Value, config *Config) bool { 13145 b := v.Block 13146 _ = b 13147 // match: (XORQ x (MOVQconst [c])) 13148 // cond: is32Bit(c) 13149 // result: (XORQconst [c] x) 13150 for { 13151 x := v.Args[0] 13152 v_1 := v.Args[1] 13153 if v_1.Op != OpAMD64MOVQconst { 13154 break 13155 } 13156 c := v_1.AuxInt 13157 if !(is32Bit(c)) { 13158 break 13159 } 13160 v.reset(OpAMD64XORQconst) 13161 v.AuxInt = c 13162 v.AddArg(x) 13163 return true 13164 } 13165 // match: (XORQ (MOVQconst [c]) x) 13166 // cond: is32Bit(c) 13167 // result: (XORQconst [c] x) 13168 for { 13169 v_0 := v.Args[0] 13170 if v_0.Op != OpAMD64MOVQconst { 13171 break 13172 } 13173 c := v_0.AuxInt 13174 x := v.Args[1] 13175 if !(is32Bit(c)) { 13176 break 13177 } 13178 v.reset(OpAMD64XORQconst) 13179 v.AuxInt = c 13180 v.AddArg(x) 13181 return true 13182 } 13183 // match: (XORQ x x) 13184 // cond: 13185 // result: (MOVQconst [0]) 13186 for { 13187 x := v.Args[0] 13188 if x != v.Args[1] { 13189 break 13190 } 13191 v.reset(OpAMD64MOVQconst) 13192 v.AuxInt = 0 13193 return true 13194 } 13195 return false 13196 } 13197 func rewriteValueAMD64_OpAMD64XORQconst(v *Value, config *Config) bool { 13198 b := v.Block 13199 _ = b 13200 // match: (XORQconst [c] (XORQconst [d] x)) 13201 // cond: 13202 // result: (XORQconst [c ^ d] x) 13203 for { 13204 c := v.AuxInt 13205 v_0 := v.Args[0] 13206 if v_0.Op != OpAMD64XORQconst { 13207 break 13208 } 13209 d := v_0.AuxInt 13210 x := v_0.Args[0] 13211 v.reset(OpAMD64XORQconst) 13212 v.AuxInt = c ^ d 13213 v.AddArg(x) 13214 return true 13215 } 13216 // match: (XORQconst [0] x) 13217 // cond: 13218 // result: x 13219 for { 13220 if v.AuxInt != 0 { 13221 break 13222 } 13223 x := v.Args[0] 13224 v.reset(OpCopy) 13225 v.Type = x.Type 13226 v.AddArg(x) 13227 return true 13228 } 13229 // match: (XORQconst [c] (MOVQconst [d])) 13230 // cond: 13231 // result: (MOVQconst [c^d]) 13232 for { 13233 c := v.AuxInt 13234 v_0 := v.Args[0] 13235 if v_0.Op != OpAMD64MOVQconst { 13236 break 13237 } 13238 d := v_0.AuxInt 13239 v.reset(OpAMD64MOVQconst) 13240 v.AuxInt = c ^ d 13241 return true 13242 } 13243 return false 13244 } 13245 func rewriteValueAMD64_OpAdd16(v *Value, config *Config) bool { 13246 b := v.Block 13247 _ = b 13248 // match: (Add16 x y) 13249 // cond: 13250 // result: (ADDL x y) 13251 for { 13252 x := v.Args[0] 13253 y := v.Args[1] 13254 v.reset(OpAMD64ADDL) 13255 v.AddArg(x) 13256 v.AddArg(y) 13257 return true 13258 } 13259 } 13260 func rewriteValueAMD64_OpAdd32(v *Value, config *Config) bool { 13261 b := v.Block 13262 _ = b 13263 // match: (Add32 x y) 13264 // cond: 13265 // result: (ADDL x y) 13266 for { 13267 x := v.Args[0] 13268 y := v.Args[1] 13269 v.reset(OpAMD64ADDL) 13270 v.AddArg(x) 13271 v.AddArg(y) 13272 return true 13273 } 13274 } 13275 func rewriteValueAMD64_OpAdd32F(v *Value, config *Config) bool { 13276 b := v.Block 13277 _ = b 13278 // match: (Add32F x y) 13279 // cond: 13280 // result: (ADDSS x y) 13281 for { 13282 x := v.Args[0] 13283 y := v.Args[1] 13284 v.reset(OpAMD64ADDSS) 13285 v.AddArg(x) 13286 v.AddArg(y) 13287 return true 13288 } 13289 } 13290 func rewriteValueAMD64_OpAdd64(v *Value, config *Config) bool { 13291 b := v.Block 13292 _ = b 13293 // match: (Add64 x y) 13294 // cond: 13295 // result: (ADDQ x y) 13296 for { 13297 x := v.Args[0] 13298 y := v.Args[1] 13299 v.reset(OpAMD64ADDQ) 13300 v.AddArg(x) 13301 v.AddArg(y) 13302 return true 13303 } 13304 } 13305 func rewriteValueAMD64_OpAdd64F(v *Value, config *Config) bool { 13306 b := v.Block 13307 _ = b 13308 // match: (Add64F x y) 13309 // cond: 13310 // result: (ADDSD x y) 13311 for { 13312 x := v.Args[0] 13313 y := v.Args[1] 13314 v.reset(OpAMD64ADDSD) 13315 v.AddArg(x) 13316 v.AddArg(y) 13317 return true 13318 } 13319 } 13320 func rewriteValueAMD64_OpAdd8(v *Value, config *Config) bool { 13321 b := v.Block 13322 _ = b 13323 // match: (Add8 x y) 13324 // cond: 13325 // result: (ADDL x y) 13326 for { 13327 x := v.Args[0] 13328 y := v.Args[1] 13329 v.reset(OpAMD64ADDL) 13330 v.AddArg(x) 13331 v.AddArg(y) 13332 return true 13333 } 13334 } 13335 func rewriteValueAMD64_OpAddPtr(v *Value, config *Config) bool { 13336 b := v.Block 13337 _ = b 13338 // match: (AddPtr x y) 13339 // cond: config.PtrSize == 8 13340 // result: (ADDQ x y) 13341 for { 13342 x := v.Args[0] 13343 y := v.Args[1] 13344 if !(config.PtrSize == 8) { 13345 break 13346 } 13347 v.reset(OpAMD64ADDQ) 13348 v.AddArg(x) 13349 v.AddArg(y) 13350 return true 13351 } 13352 // match: (AddPtr x y) 13353 // cond: config.PtrSize == 4 13354 // result: (ADDL x y) 13355 for { 13356 x := v.Args[0] 13357 y := v.Args[1] 13358 if !(config.PtrSize == 4) { 13359 break 13360 } 13361 v.reset(OpAMD64ADDL) 13362 v.AddArg(x) 13363 v.AddArg(y) 13364 return true 13365 } 13366 return false 13367 } 13368 func rewriteValueAMD64_OpAddr(v *Value, config *Config) bool { 13369 b := v.Block 13370 _ = b 13371 // match: (Addr {sym} base) 13372 // cond: config.PtrSize == 8 13373 // result: (LEAQ {sym} base) 13374 for { 13375 sym := v.Aux 13376 base := v.Args[0] 13377 if !(config.PtrSize == 8) { 13378 break 13379 } 13380 v.reset(OpAMD64LEAQ) 13381 v.Aux = sym 13382 v.AddArg(base) 13383 return true 13384 } 13385 // match: (Addr {sym} base) 13386 // cond: config.PtrSize == 4 13387 // result: (LEAL {sym} base) 13388 for { 13389 sym := v.Aux 13390 base := v.Args[0] 13391 if !(config.PtrSize == 4) { 13392 break 13393 } 13394 v.reset(OpAMD64LEAL) 13395 v.Aux = sym 13396 v.AddArg(base) 13397 return true 13398 } 13399 return false 13400 } 13401 func rewriteValueAMD64_OpAnd16(v *Value, config *Config) bool { 13402 b := v.Block 13403 _ = b 13404 // match: (And16 x y) 13405 // cond: 13406 // result: (ANDL x y) 13407 for { 13408 x := v.Args[0] 13409 y := v.Args[1] 13410 v.reset(OpAMD64ANDL) 13411 v.AddArg(x) 13412 v.AddArg(y) 13413 return true 13414 } 13415 } 13416 func rewriteValueAMD64_OpAnd32(v *Value, config *Config) bool { 13417 b := v.Block 13418 _ = b 13419 // match: (And32 x y) 13420 // cond: 13421 // result: (ANDL x y) 13422 for { 13423 x := v.Args[0] 13424 y := v.Args[1] 13425 v.reset(OpAMD64ANDL) 13426 v.AddArg(x) 13427 v.AddArg(y) 13428 return true 13429 } 13430 } 13431 func rewriteValueAMD64_OpAnd64(v *Value, config *Config) bool { 13432 b := v.Block 13433 _ = b 13434 // match: (And64 x y) 13435 // cond: 13436 // result: (ANDQ x y) 13437 for { 13438 x := v.Args[0] 13439 y := v.Args[1] 13440 v.reset(OpAMD64ANDQ) 13441 v.AddArg(x) 13442 v.AddArg(y) 13443 return true 13444 } 13445 } 13446 func rewriteValueAMD64_OpAnd8(v *Value, config *Config) bool { 13447 b := v.Block 13448 _ = b 13449 // match: (And8 x y) 13450 // cond: 13451 // result: (ANDL x y) 13452 for { 13453 x := v.Args[0] 13454 y := v.Args[1] 13455 v.reset(OpAMD64ANDL) 13456 v.AddArg(x) 13457 v.AddArg(y) 13458 return true 13459 } 13460 } 13461 func rewriteValueAMD64_OpAndB(v *Value, config *Config) bool { 13462 b := v.Block 13463 _ = b 13464 // match: (AndB x y) 13465 // cond: 13466 // result: (ANDL x y) 13467 for { 13468 x := v.Args[0] 13469 y := v.Args[1] 13470 v.reset(OpAMD64ANDL) 13471 v.AddArg(x) 13472 v.AddArg(y) 13473 return true 13474 } 13475 } 13476 func rewriteValueAMD64_OpAtomicAdd32(v *Value, config *Config) bool { 13477 b := v.Block 13478 _ = b 13479 // match: (AtomicAdd32 ptr val mem) 13480 // cond: 13481 // result: (AddTupleFirst32 (XADDLlock val ptr mem) val) 13482 for { 13483 ptr := v.Args[0] 13484 val := v.Args[1] 13485 mem := v.Args[2] 13486 v.reset(OpAMD64AddTupleFirst32) 13487 v0 := b.NewValue0(v.Line, OpAMD64XADDLlock, MakeTuple(config.fe.TypeUInt32(), TypeMem)) 13488 v0.AddArg(val) 13489 v0.AddArg(ptr) 13490 v0.AddArg(mem) 13491 v.AddArg(v0) 13492 v.AddArg(val) 13493 return true 13494 } 13495 } 13496 func rewriteValueAMD64_OpAtomicAdd64(v *Value, config *Config) bool { 13497 b := v.Block 13498 _ = b 13499 // match: (AtomicAdd64 ptr val mem) 13500 // cond: 13501 // result: (AddTupleFirst64 (XADDQlock val ptr mem) val) 13502 for { 13503 ptr := v.Args[0] 13504 val := v.Args[1] 13505 mem := v.Args[2] 13506 v.reset(OpAMD64AddTupleFirst64) 13507 v0 := b.NewValue0(v.Line, OpAMD64XADDQlock, MakeTuple(config.fe.TypeUInt64(), TypeMem)) 13508 v0.AddArg(val) 13509 v0.AddArg(ptr) 13510 v0.AddArg(mem) 13511 v.AddArg(v0) 13512 v.AddArg(val) 13513 return true 13514 } 13515 } 13516 func rewriteValueAMD64_OpAtomicAnd8(v *Value, config *Config) bool { 13517 b := v.Block 13518 _ = b 13519 // match: (AtomicAnd8 ptr val mem) 13520 // cond: 13521 // result: (ANDBlock ptr val mem) 13522 for { 13523 ptr := v.Args[0] 13524 val := v.Args[1] 13525 mem := v.Args[2] 13526 v.reset(OpAMD64ANDBlock) 13527 v.AddArg(ptr) 13528 v.AddArg(val) 13529 v.AddArg(mem) 13530 return true 13531 } 13532 } 13533 func rewriteValueAMD64_OpAtomicCompareAndSwap32(v *Value, config *Config) bool { 13534 b := v.Block 13535 _ = b 13536 // match: (AtomicCompareAndSwap32 ptr old new_ mem) 13537 // cond: 13538 // result: (CMPXCHGLlock ptr old new_ mem) 13539 for { 13540 ptr := v.Args[0] 13541 old := v.Args[1] 13542 new_ := v.Args[2] 13543 mem := v.Args[3] 13544 v.reset(OpAMD64CMPXCHGLlock) 13545 v.AddArg(ptr) 13546 v.AddArg(old) 13547 v.AddArg(new_) 13548 v.AddArg(mem) 13549 return true 13550 } 13551 } 13552 func rewriteValueAMD64_OpAtomicCompareAndSwap64(v *Value, config *Config) bool { 13553 b := v.Block 13554 _ = b 13555 // match: (AtomicCompareAndSwap64 ptr old new_ mem) 13556 // cond: 13557 // result: (CMPXCHGQlock ptr old new_ mem) 13558 for { 13559 ptr := v.Args[0] 13560 old := v.Args[1] 13561 new_ := v.Args[2] 13562 mem := v.Args[3] 13563 v.reset(OpAMD64CMPXCHGQlock) 13564 v.AddArg(ptr) 13565 v.AddArg(old) 13566 v.AddArg(new_) 13567 v.AddArg(mem) 13568 return true 13569 } 13570 } 13571 func rewriteValueAMD64_OpAtomicExchange32(v *Value, config *Config) bool { 13572 b := v.Block 13573 _ = b 13574 // match: (AtomicExchange32 ptr val mem) 13575 // cond: 13576 // result: (XCHGL val ptr mem) 13577 for { 13578 ptr := v.Args[0] 13579 val := v.Args[1] 13580 mem := v.Args[2] 13581 v.reset(OpAMD64XCHGL) 13582 v.AddArg(val) 13583 v.AddArg(ptr) 13584 v.AddArg(mem) 13585 return true 13586 } 13587 } 13588 func rewriteValueAMD64_OpAtomicExchange64(v *Value, config *Config) bool { 13589 b := v.Block 13590 _ = b 13591 // match: (AtomicExchange64 ptr val mem) 13592 // cond: 13593 // result: (XCHGQ val ptr mem) 13594 for { 13595 ptr := v.Args[0] 13596 val := v.Args[1] 13597 mem := v.Args[2] 13598 v.reset(OpAMD64XCHGQ) 13599 v.AddArg(val) 13600 v.AddArg(ptr) 13601 v.AddArg(mem) 13602 return true 13603 } 13604 } 13605 func rewriteValueAMD64_OpAtomicLoad32(v *Value, config *Config) bool { 13606 b := v.Block 13607 _ = b 13608 // match: (AtomicLoad32 ptr mem) 13609 // cond: 13610 // result: (MOVLatomicload ptr mem) 13611 for { 13612 ptr := v.Args[0] 13613 mem := v.Args[1] 13614 v.reset(OpAMD64MOVLatomicload) 13615 v.AddArg(ptr) 13616 v.AddArg(mem) 13617 return true 13618 } 13619 } 13620 func rewriteValueAMD64_OpAtomicLoad64(v *Value, config *Config) bool { 13621 b := v.Block 13622 _ = b 13623 // match: (AtomicLoad64 ptr mem) 13624 // cond: 13625 // result: (MOVQatomicload ptr mem) 13626 for { 13627 ptr := v.Args[0] 13628 mem := v.Args[1] 13629 v.reset(OpAMD64MOVQatomicload) 13630 v.AddArg(ptr) 13631 v.AddArg(mem) 13632 return true 13633 } 13634 } 13635 func rewriteValueAMD64_OpAtomicLoadPtr(v *Value, config *Config) bool { 13636 b := v.Block 13637 _ = b 13638 // match: (AtomicLoadPtr ptr mem) 13639 // cond: config.PtrSize == 8 13640 // result: (MOVQatomicload ptr mem) 13641 for { 13642 ptr := v.Args[0] 13643 mem := v.Args[1] 13644 if !(config.PtrSize == 8) { 13645 break 13646 } 13647 v.reset(OpAMD64MOVQatomicload) 13648 v.AddArg(ptr) 13649 v.AddArg(mem) 13650 return true 13651 } 13652 // match: (AtomicLoadPtr ptr mem) 13653 // cond: config.PtrSize == 4 13654 // result: (MOVLatomicload ptr mem) 13655 for { 13656 ptr := v.Args[0] 13657 mem := v.Args[1] 13658 if !(config.PtrSize == 4) { 13659 break 13660 } 13661 v.reset(OpAMD64MOVLatomicload) 13662 v.AddArg(ptr) 13663 v.AddArg(mem) 13664 return true 13665 } 13666 return false 13667 } 13668 func rewriteValueAMD64_OpAtomicOr8(v *Value, config *Config) bool { 13669 b := v.Block 13670 _ = b 13671 // match: (AtomicOr8 ptr val mem) 13672 // cond: 13673 // result: (ORBlock ptr val mem) 13674 for { 13675 ptr := v.Args[0] 13676 val := v.Args[1] 13677 mem := v.Args[2] 13678 v.reset(OpAMD64ORBlock) 13679 v.AddArg(ptr) 13680 v.AddArg(val) 13681 v.AddArg(mem) 13682 return true 13683 } 13684 } 13685 func rewriteValueAMD64_OpAtomicStore32(v *Value, config *Config) bool { 13686 b := v.Block 13687 _ = b 13688 // match: (AtomicStore32 ptr val mem) 13689 // cond: 13690 // result: (Select1 (XCHGL <MakeTuple(config.Frontend().TypeUInt32(),TypeMem)> val ptr mem)) 13691 for { 13692 ptr := v.Args[0] 13693 val := v.Args[1] 13694 mem := v.Args[2] 13695 v.reset(OpSelect1) 13696 v0 := b.NewValue0(v.Line, OpAMD64XCHGL, MakeTuple(config.Frontend().TypeUInt32(), TypeMem)) 13697 v0.AddArg(val) 13698 v0.AddArg(ptr) 13699 v0.AddArg(mem) 13700 v.AddArg(v0) 13701 return true 13702 } 13703 } 13704 func rewriteValueAMD64_OpAtomicStore64(v *Value, config *Config) bool { 13705 b := v.Block 13706 _ = b 13707 // match: (AtomicStore64 ptr val mem) 13708 // cond: 13709 // result: (Select1 (XCHGQ <MakeTuple(config.Frontend().TypeUInt64(),TypeMem)> val ptr mem)) 13710 for { 13711 ptr := v.Args[0] 13712 val := v.Args[1] 13713 mem := v.Args[2] 13714 v.reset(OpSelect1) 13715 v0 := b.NewValue0(v.Line, OpAMD64XCHGQ, MakeTuple(config.Frontend().TypeUInt64(), TypeMem)) 13716 v0.AddArg(val) 13717 v0.AddArg(ptr) 13718 v0.AddArg(mem) 13719 v.AddArg(v0) 13720 return true 13721 } 13722 } 13723 func rewriteValueAMD64_OpAtomicStorePtrNoWB(v *Value, config *Config) bool { 13724 b := v.Block 13725 _ = b 13726 // match: (AtomicStorePtrNoWB ptr val mem) 13727 // cond: config.PtrSize == 8 13728 // result: (Select1 (XCHGQ <MakeTuple(config.Frontend().TypeBytePtr(),TypeMem)> val ptr mem)) 13729 for { 13730 ptr := v.Args[0] 13731 val := v.Args[1] 13732 mem := v.Args[2] 13733 if !(config.PtrSize == 8) { 13734 break 13735 } 13736 v.reset(OpSelect1) 13737 v0 := b.NewValue0(v.Line, OpAMD64XCHGQ, MakeTuple(config.Frontend().TypeBytePtr(), TypeMem)) 13738 v0.AddArg(val) 13739 v0.AddArg(ptr) 13740 v0.AddArg(mem) 13741 v.AddArg(v0) 13742 return true 13743 } 13744 // match: (AtomicStorePtrNoWB ptr val mem) 13745 // cond: config.PtrSize == 4 13746 // result: (Select1 (XCHGL <MakeTuple(config.Frontend().TypeBytePtr(),TypeMem)> val ptr mem)) 13747 for { 13748 ptr := v.Args[0] 13749 val := v.Args[1] 13750 mem := v.Args[2] 13751 if !(config.PtrSize == 4) { 13752 break 13753 } 13754 v.reset(OpSelect1) 13755 v0 := b.NewValue0(v.Line, OpAMD64XCHGL, MakeTuple(config.Frontend().TypeBytePtr(), TypeMem)) 13756 v0.AddArg(val) 13757 v0.AddArg(ptr) 13758 v0.AddArg(mem) 13759 v.AddArg(v0) 13760 return true 13761 } 13762 return false 13763 } 13764 func rewriteValueAMD64_OpAvg64u(v *Value, config *Config) bool { 13765 b := v.Block 13766 _ = b 13767 // match: (Avg64u x y) 13768 // cond: 13769 // result: (AVGQU x y) 13770 for { 13771 x := v.Args[0] 13772 y := v.Args[1] 13773 v.reset(OpAMD64AVGQU) 13774 v.AddArg(x) 13775 v.AddArg(y) 13776 return true 13777 } 13778 } 13779 func rewriteValueAMD64_OpBswap32(v *Value, config *Config) bool { 13780 b := v.Block 13781 _ = b 13782 // match: (Bswap32 x) 13783 // cond: 13784 // result: (BSWAPL x) 13785 for { 13786 x := v.Args[0] 13787 v.reset(OpAMD64BSWAPL) 13788 v.AddArg(x) 13789 return true 13790 } 13791 } 13792 func rewriteValueAMD64_OpBswap64(v *Value, config *Config) bool { 13793 b := v.Block 13794 _ = b 13795 // match: (Bswap64 x) 13796 // cond: 13797 // result: (BSWAPQ x) 13798 for { 13799 x := v.Args[0] 13800 v.reset(OpAMD64BSWAPQ) 13801 v.AddArg(x) 13802 return true 13803 } 13804 } 13805 func rewriteValueAMD64_OpClosureCall(v *Value, config *Config) bool { 13806 b := v.Block 13807 _ = b 13808 // match: (ClosureCall [argwid] entry closure mem) 13809 // cond: 13810 // result: (CALLclosure [argwid] entry closure mem) 13811 for { 13812 argwid := v.AuxInt 13813 entry := v.Args[0] 13814 closure := v.Args[1] 13815 mem := v.Args[2] 13816 v.reset(OpAMD64CALLclosure) 13817 v.AuxInt = argwid 13818 v.AddArg(entry) 13819 v.AddArg(closure) 13820 v.AddArg(mem) 13821 return true 13822 } 13823 } 13824 func rewriteValueAMD64_OpCom16(v *Value, config *Config) bool { 13825 b := v.Block 13826 _ = b 13827 // match: (Com16 x) 13828 // cond: 13829 // result: (NOTL x) 13830 for { 13831 x := v.Args[0] 13832 v.reset(OpAMD64NOTL) 13833 v.AddArg(x) 13834 return true 13835 } 13836 } 13837 func rewriteValueAMD64_OpCom32(v *Value, config *Config) bool { 13838 b := v.Block 13839 _ = b 13840 // match: (Com32 x) 13841 // cond: 13842 // result: (NOTL x) 13843 for { 13844 x := v.Args[0] 13845 v.reset(OpAMD64NOTL) 13846 v.AddArg(x) 13847 return true 13848 } 13849 } 13850 func rewriteValueAMD64_OpCom64(v *Value, config *Config) bool { 13851 b := v.Block 13852 _ = b 13853 // match: (Com64 x) 13854 // cond: 13855 // result: (NOTQ x) 13856 for { 13857 x := v.Args[0] 13858 v.reset(OpAMD64NOTQ) 13859 v.AddArg(x) 13860 return true 13861 } 13862 } 13863 func rewriteValueAMD64_OpCom8(v *Value, config *Config) bool { 13864 b := v.Block 13865 _ = b 13866 // match: (Com8 x) 13867 // cond: 13868 // result: (NOTL x) 13869 for { 13870 x := v.Args[0] 13871 v.reset(OpAMD64NOTL) 13872 v.AddArg(x) 13873 return true 13874 } 13875 } 13876 func rewriteValueAMD64_OpConst16(v *Value, config *Config) bool { 13877 b := v.Block 13878 _ = b 13879 // match: (Const16 [val]) 13880 // cond: 13881 // result: (MOVLconst [val]) 13882 for { 13883 val := v.AuxInt 13884 v.reset(OpAMD64MOVLconst) 13885 v.AuxInt = val 13886 return true 13887 } 13888 } 13889 func rewriteValueAMD64_OpConst32(v *Value, config *Config) bool { 13890 b := v.Block 13891 _ = b 13892 // match: (Const32 [val]) 13893 // cond: 13894 // result: (MOVLconst [val]) 13895 for { 13896 val := v.AuxInt 13897 v.reset(OpAMD64MOVLconst) 13898 v.AuxInt = val 13899 return true 13900 } 13901 } 13902 func rewriteValueAMD64_OpConst32F(v *Value, config *Config) bool { 13903 b := v.Block 13904 _ = b 13905 // match: (Const32F [val]) 13906 // cond: 13907 // result: (MOVSSconst [val]) 13908 for { 13909 val := v.AuxInt 13910 v.reset(OpAMD64MOVSSconst) 13911 v.AuxInt = val 13912 return true 13913 } 13914 } 13915 func rewriteValueAMD64_OpConst64(v *Value, config *Config) bool { 13916 b := v.Block 13917 _ = b 13918 // match: (Const64 [val]) 13919 // cond: 13920 // result: (MOVQconst [val]) 13921 for { 13922 val := v.AuxInt 13923 v.reset(OpAMD64MOVQconst) 13924 v.AuxInt = val 13925 return true 13926 } 13927 } 13928 func rewriteValueAMD64_OpConst64F(v *Value, config *Config) bool { 13929 b := v.Block 13930 _ = b 13931 // match: (Const64F [val]) 13932 // cond: 13933 // result: (MOVSDconst [val]) 13934 for { 13935 val := v.AuxInt 13936 v.reset(OpAMD64MOVSDconst) 13937 v.AuxInt = val 13938 return true 13939 } 13940 } 13941 func rewriteValueAMD64_OpConst8(v *Value, config *Config) bool { 13942 b := v.Block 13943 _ = b 13944 // match: (Const8 [val]) 13945 // cond: 13946 // result: (MOVLconst [val]) 13947 for { 13948 val := v.AuxInt 13949 v.reset(OpAMD64MOVLconst) 13950 v.AuxInt = val 13951 return true 13952 } 13953 } 13954 func rewriteValueAMD64_OpConstBool(v *Value, config *Config) bool { 13955 b := v.Block 13956 _ = b 13957 // match: (ConstBool [b]) 13958 // cond: 13959 // result: (MOVLconst [b]) 13960 for { 13961 b := v.AuxInt 13962 v.reset(OpAMD64MOVLconst) 13963 v.AuxInt = b 13964 return true 13965 } 13966 } 13967 func rewriteValueAMD64_OpConstNil(v *Value, config *Config) bool { 13968 b := v.Block 13969 _ = b 13970 // match: (ConstNil) 13971 // cond: config.PtrSize == 8 13972 // result: (MOVQconst [0]) 13973 for { 13974 if !(config.PtrSize == 8) { 13975 break 13976 } 13977 v.reset(OpAMD64MOVQconst) 13978 v.AuxInt = 0 13979 return true 13980 } 13981 // match: (ConstNil) 13982 // cond: config.PtrSize == 4 13983 // result: (MOVLconst [0]) 13984 for { 13985 if !(config.PtrSize == 4) { 13986 break 13987 } 13988 v.reset(OpAMD64MOVLconst) 13989 v.AuxInt = 0 13990 return true 13991 } 13992 return false 13993 } 13994 func rewriteValueAMD64_OpConvert(v *Value, config *Config) bool { 13995 b := v.Block 13996 _ = b 13997 // match: (Convert <t> x mem) 13998 // cond: config.PtrSize == 8 13999 // result: (MOVQconvert <t> x mem) 14000 for { 14001 t := v.Type 14002 x := v.Args[0] 14003 mem := v.Args[1] 14004 if !(config.PtrSize == 8) { 14005 break 14006 } 14007 v.reset(OpAMD64MOVQconvert) 14008 v.Type = t 14009 v.AddArg(x) 14010 v.AddArg(mem) 14011 return true 14012 } 14013 // match: (Convert <t> x mem) 14014 // cond: config.PtrSize == 4 14015 // result: (MOVLconvert <t> x mem) 14016 for { 14017 t := v.Type 14018 x := v.Args[0] 14019 mem := v.Args[1] 14020 if !(config.PtrSize == 4) { 14021 break 14022 } 14023 v.reset(OpAMD64MOVLconvert) 14024 v.Type = t 14025 v.AddArg(x) 14026 v.AddArg(mem) 14027 return true 14028 } 14029 return false 14030 } 14031 func rewriteValueAMD64_OpCtz32(v *Value, config *Config) bool { 14032 b := v.Block 14033 _ = b 14034 // match: (Ctz32 <t> x) 14035 // cond: 14036 // result: (CMOVLEQ (Select0 <t> (BSFL x)) (MOVLconst <t> [32]) (Select1 <TypeFlags> (BSFL x))) 14037 for { 14038 t := v.Type 14039 x := v.Args[0] 14040 v.reset(OpAMD64CMOVLEQ) 14041 v0 := b.NewValue0(v.Line, OpSelect0, t) 14042 v1 := b.NewValue0(v.Line, OpAMD64BSFL, MakeTuple(config.fe.TypeUInt32(), TypeFlags)) 14043 v1.AddArg(x) 14044 v0.AddArg(v1) 14045 v.AddArg(v0) 14046 v2 := b.NewValue0(v.Line, OpAMD64MOVLconst, t) 14047 v2.AuxInt = 32 14048 v.AddArg(v2) 14049 v3 := b.NewValue0(v.Line, OpSelect1, TypeFlags) 14050 v4 := b.NewValue0(v.Line, OpAMD64BSFL, MakeTuple(config.fe.TypeUInt32(), TypeFlags)) 14051 v4.AddArg(x) 14052 v3.AddArg(v4) 14053 v.AddArg(v3) 14054 return true 14055 } 14056 } 14057 func rewriteValueAMD64_OpCtz64(v *Value, config *Config) bool { 14058 b := v.Block 14059 _ = b 14060 // match: (Ctz64 <t> x) 14061 // cond: 14062 // result: (CMOVQEQ (Select0 <t> (BSFQ x)) (MOVQconst <t> [64]) (Select1 <TypeFlags> (BSFQ x))) 14063 for { 14064 t := v.Type 14065 x := v.Args[0] 14066 v.reset(OpAMD64CMOVQEQ) 14067 v0 := b.NewValue0(v.Line, OpSelect0, t) 14068 v1 := b.NewValue0(v.Line, OpAMD64BSFQ, MakeTuple(config.fe.TypeUInt64(), TypeFlags)) 14069 v1.AddArg(x) 14070 v0.AddArg(v1) 14071 v.AddArg(v0) 14072 v2 := b.NewValue0(v.Line, OpAMD64MOVQconst, t) 14073 v2.AuxInt = 64 14074 v.AddArg(v2) 14075 v3 := b.NewValue0(v.Line, OpSelect1, TypeFlags) 14076 v4 := b.NewValue0(v.Line, OpAMD64BSFQ, MakeTuple(config.fe.TypeUInt64(), TypeFlags)) 14077 v4.AddArg(x) 14078 v3.AddArg(v4) 14079 v.AddArg(v3) 14080 return true 14081 } 14082 } 14083 func rewriteValueAMD64_OpCvt32Fto32(v *Value, config *Config) bool { 14084 b := v.Block 14085 _ = b 14086 // match: (Cvt32Fto32 x) 14087 // cond: 14088 // result: (CVTTSS2SL x) 14089 for { 14090 x := v.Args[0] 14091 v.reset(OpAMD64CVTTSS2SL) 14092 v.AddArg(x) 14093 return true 14094 } 14095 } 14096 func rewriteValueAMD64_OpCvt32Fto64(v *Value, config *Config) bool { 14097 b := v.Block 14098 _ = b 14099 // match: (Cvt32Fto64 x) 14100 // cond: 14101 // result: (CVTTSS2SQ x) 14102 for { 14103 x := v.Args[0] 14104 v.reset(OpAMD64CVTTSS2SQ) 14105 v.AddArg(x) 14106 return true 14107 } 14108 } 14109 func rewriteValueAMD64_OpCvt32Fto64F(v *Value, config *Config) bool { 14110 b := v.Block 14111 _ = b 14112 // match: (Cvt32Fto64F x) 14113 // cond: 14114 // result: (CVTSS2SD x) 14115 for { 14116 x := v.Args[0] 14117 v.reset(OpAMD64CVTSS2SD) 14118 v.AddArg(x) 14119 return true 14120 } 14121 } 14122 func rewriteValueAMD64_OpCvt32to32F(v *Value, config *Config) bool { 14123 b := v.Block 14124 _ = b 14125 // match: (Cvt32to32F x) 14126 // cond: 14127 // result: (CVTSL2SS x) 14128 for { 14129 x := v.Args[0] 14130 v.reset(OpAMD64CVTSL2SS) 14131 v.AddArg(x) 14132 return true 14133 } 14134 } 14135 func rewriteValueAMD64_OpCvt32to64F(v *Value, config *Config) bool { 14136 b := v.Block 14137 _ = b 14138 // match: (Cvt32to64F x) 14139 // cond: 14140 // result: (CVTSL2SD x) 14141 for { 14142 x := v.Args[0] 14143 v.reset(OpAMD64CVTSL2SD) 14144 v.AddArg(x) 14145 return true 14146 } 14147 } 14148 func rewriteValueAMD64_OpCvt64Fto32(v *Value, config *Config) bool { 14149 b := v.Block 14150 _ = b 14151 // match: (Cvt64Fto32 x) 14152 // cond: 14153 // result: (CVTTSD2SL x) 14154 for { 14155 x := v.Args[0] 14156 v.reset(OpAMD64CVTTSD2SL) 14157 v.AddArg(x) 14158 return true 14159 } 14160 } 14161 func rewriteValueAMD64_OpCvt64Fto32F(v *Value, config *Config) bool { 14162 b := v.Block 14163 _ = b 14164 // match: (Cvt64Fto32F x) 14165 // cond: 14166 // result: (CVTSD2SS x) 14167 for { 14168 x := v.Args[0] 14169 v.reset(OpAMD64CVTSD2SS) 14170 v.AddArg(x) 14171 return true 14172 } 14173 } 14174 func rewriteValueAMD64_OpCvt64Fto64(v *Value, config *Config) bool { 14175 b := v.Block 14176 _ = b 14177 // match: (Cvt64Fto64 x) 14178 // cond: 14179 // result: (CVTTSD2SQ x) 14180 for { 14181 x := v.Args[0] 14182 v.reset(OpAMD64CVTTSD2SQ) 14183 v.AddArg(x) 14184 return true 14185 } 14186 } 14187 func rewriteValueAMD64_OpCvt64to32F(v *Value, config *Config) bool { 14188 b := v.Block 14189 _ = b 14190 // match: (Cvt64to32F x) 14191 // cond: 14192 // result: (CVTSQ2SS x) 14193 for { 14194 x := v.Args[0] 14195 v.reset(OpAMD64CVTSQ2SS) 14196 v.AddArg(x) 14197 return true 14198 } 14199 } 14200 func rewriteValueAMD64_OpCvt64to64F(v *Value, config *Config) bool { 14201 b := v.Block 14202 _ = b 14203 // match: (Cvt64to64F x) 14204 // cond: 14205 // result: (CVTSQ2SD x) 14206 for { 14207 x := v.Args[0] 14208 v.reset(OpAMD64CVTSQ2SD) 14209 v.AddArg(x) 14210 return true 14211 } 14212 } 14213 func rewriteValueAMD64_OpDeferCall(v *Value, config *Config) bool { 14214 b := v.Block 14215 _ = b 14216 // match: (DeferCall [argwid] mem) 14217 // cond: 14218 // result: (CALLdefer [argwid] mem) 14219 for { 14220 argwid := v.AuxInt 14221 mem := v.Args[0] 14222 v.reset(OpAMD64CALLdefer) 14223 v.AuxInt = argwid 14224 v.AddArg(mem) 14225 return true 14226 } 14227 } 14228 func rewriteValueAMD64_OpDiv16(v *Value, config *Config) bool { 14229 b := v.Block 14230 _ = b 14231 // match: (Div16 x y) 14232 // cond: 14233 // result: (Select0 (DIVW x y)) 14234 for { 14235 x := v.Args[0] 14236 y := v.Args[1] 14237 v.reset(OpSelect0) 14238 v0 := b.NewValue0(v.Line, OpAMD64DIVW, MakeTuple(config.fe.TypeInt16(), config.fe.TypeInt16())) 14239 v0.AddArg(x) 14240 v0.AddArg(y) 14241 v.AddArg(v0) 14242 return true 14243 } 14244 } 14245 func rewriteValueAMD64_OpDiv16u(v *Value, config *Config) bool { 14246 b := v.Block 14247 _ = b 14248 // match: (Div16u x y) 14249 // cond: 14250 // result: (Select0 (DIVWU x y)) 14251 for { 14252 x := v.Args[0] 14253 y := v.Args[1] 14254 v.reset(OpSelect0) 14255 v0 := b.NewValue0(v.Line, OpAMD64DIVWU, MakeTuple(config.fe.TypeUInt16(), config.fe.TypeUInt16())) 14256 v0.AddArg(x) 14257 v0.AddArg(y) 14258 v.AddArg(v0) 14259 return true 14260 } 14261 } 14262 func rewriteValueAMD64_OpDiv32(v *Value, config *Config) bool { 14263 b := v.Block 14264 _ = b 14265 // match: (Div32 x y) 14266 // cond: 14267 // result: (Select0 (DIVL x y)) 14268 for { 14269 x := v.Args[0] 14270 y := v.Args[1] 14271 v.reset(OpSelect0) 14272 v0 := b.NewValue0(v.Line, OpAMD64DIVL, MakeTuple(config.fe.TypeInt32(), config.fe.TypeInt32())) 14273 v0.AddArg(x) 14274 v0.AddArg(y) 14275 v.AddArg(v0) 14276 return true 14277 } 14278 } 14279 func rewriteValueAMD64_OpDiv32F(v *Value, config *Config) bool { 14280 b := v.Block 14281 _ = b 14282 // match: (Div32F x y) 14283 // cond: 14284 // result: (DIVSS x y) 14285 for { 14286 x := v.Args[0] 14287 y := v.Args[1] 14288 v.reset(OpAMD64DIVSS) 14289 v.AddArg(x) 14290 v.AddArg(y) 14291 return true 14292 } 14293 } 14294 func rewriteValueAMD64_OpDiv32u(v *Value, config *Config) bool { 14295 b := v.Block 14296 _ = b 14297 // match: (Div32u x y) 14298 // cond: 14299 // result: (Select0 (DIVLU x y)) 14300 for { 14301 x := v.Args[0] 14302 y := v.Args[1] 14303 v.reset(OpSelect0) 14304 v0 := b.NewValue0(v.Line, OpAMD64DIVLU, MakeTuple(config.fe.TypeUInt32(), config.fe.TypeUInt32())) 14305 v0.AddArg(x) 14306 v0.AddArg(y) 14307 v.AddArg(v0) 14308 return true 14309 } 14310 } 14311 func rewriteValueAMD64_OpDiv64(v *Value, config *Config) bool { 14312 b := v.Block 14313 _ = b 14314 // match: (Div64 x y) 14315 // cond: 14316 // result: (Select0 (DIVQ x y)) 14317 for { 14318 x := v.Args[0] 14319 y := v.Args[1] 14320 v.reset(OpSelect0) 14321 v0 := b.NewValue0(v.Line, OpAMD64DIVQ, MakeTuple(config.fe.TypeInt64(), config.fe.TypeInt64())) 14322 v0.AddArg(x) 14323 v0.AddArg(y) 14324 v.AddArg(v0) 14325 return true 14326 } 14327 } 14328 func rewriteValueAMD64_OpDiv64F(v *Value, config *Config) bool { 14329 b := v.Block 14330 _ = b 14331 // match: (Div64F x y) 14332 // cond: 14333 // result: (DIVSD x y) 14334 for { 14335 x := v.Args[0] 14336 y := v.Args[1] 14337 v.reset(OpAMD64DIVSD) 14338 v.AddArg(x) 14339 v.AddArg(y) 14340 return true 14341 } 14342 } 14343 func rewriteValueAMD64_OpDiv64u(v *Value, config *Config) bool { 14344 b := v.Block 14345 _ = b 14346 // match: (Div64u x y) 14347 // cond: 14348 // result: (Select0 (DIVQU x y)) 14349 for { 14350 x := v.Args[0] 14351 y := v.Args[1] 14352 v.reset(OpSelect0) 14353 v0 := b.NewValue0(v.Line, OpAMD64DIVQU, MakeTuple(config.fe.TypeUInt64(), config.fe.TypeUInt64())) 14354 v0.AddArg(x) 14355 v0.AddArg(y) 14356 v.AddArg(v0) 14357 return true 14358 } 14359 } 14360 func rewriteValueAMD64_OpDiv8(v *Value, config *Config) bool { 14361 b := v.Block 14362 _ = b 14363 // match: (Div8 x y) 14364 // cond: 14365 // result: (Select0 (DIVW (SignExt8to16 x) (SignExt8to16 y))) 14366 for { 14367 x := v.Args[0] 14368 y := v.Args[1] 14369 v.reset(OpSelect0) 14370 v0 := b.NewValue0(v.Line, OpAMD64DIVW, MakeTuple(config.fe.TypeInt16(), config.fe.TypeInt16())) 14371 v1 := b.NewValue0(v.Line, OpSignExt8to16, config.fe.TypeInt16()) 14372 v1.AddArg(x) 14373 v0.AddArg(v1) 14374 v2 := b.NewValue0(v.Line, OpSignExt8to16, config.fe.TypeInt16()) 14375 v2.AddArg(y) 14376 v0.AddArg(v2) 14377 v.AddArg(v0) 14378 return true 14379 } 14380 } 14381 func rewriteValueAMD64_OpDiv8u(v *Value, config *Config) bool { 14382 b := v.Block 14383 _ = b 14384 // match: (Div8u x y) 14385 // cond: 14386 // result: (Select0 (DIVWU (ZeroExt8to16 x) (ZeroExt8to16 y))) 14387 for { 14388 x := v.Args[0] 14389 y := v.Args[1] 14390 v.reset(OpSelect0) 14391 v0 := b.NewValue0(v.Line, OpAMD64DIVWU, MakeTuple(config.fe.TypeUInt16(), config.fe.TypeUInt16())) 14392 v1 := b.NewValue0(v.Line, OpZeroExt8to16, config.fe.TypeUInt16()) 14393 v1.AddArg(x) 14394 v0.AddArg(v1) 14395 v2 := b.NewValue0(v.Line, OpZeroExt8to16, config.fe.TypeUInt16()) 14396 v2.AddArg(y) 14397 v0.AddArg(v2) 14398 v.AddArg(v0) 14399 return true 14400 } 14401 } 14402 func rewriteValueAMD64_OpEq16(v *Value, config *Config) bool { 14403 b := v.Block 14404 _ = b 14405 // match: (Eq16 x y) 14406 // cond: 14407 // result: (SETEQ (CMPW x y)) 14408 for { 14409 x := v.Args[0] 14410 y := v.Args[1] 14411 v.reset(OpAMD64SETEQ) 14412 v0 := b.NewValue0(v.Line, OpAMD64CMPW, TypeFlags) 14413 v0.AddArg(x) 14414 v0.AddArg(y) 14415 v.AddArg(v0) 14416 return true 14417 } 14418 } 14419 func rewriteValueAMD64_OpEq32(v *Value, config *Config) bool { 14420 b := v.Block 14421 _ = b 14422 // match: (Eq32 x y) 14423 // cond: 14424 // result: (SETEQ (CMPL x y)) 14425 for { 14426 x := v.Args[0] 14427 y := v.Args[1] 14428 v.reset(OpAMD64SETEQ) 14429 v0 := b.NewValue0(v.Line, OpAMD64CMPL, TypeFlags) 14430 v0.AddArg(x) 14431 v0.AddArg(y) 14432 v.AddArg(v0) 14433 return true 14434 } 14435 } 14436 func rewriteValueAMD64_OpEq32F(v *Value, config *Config) bool { 14437 b := v.Block 14438 _ = b 14439 // match: (Eq32F x y) 14440 // cond: 14441 // result: (SETEQF (UCOMISS x y)) 14442 for { 14443 x := v.Args[0] 14444 y := v.Args[1] 14445 v.reset(OpAMD64SETEQF) 14446 v0 := b.NewValue0(v.Line, OpAMD64UCOMISS, TypeFlags) 14447 v0.AddArg(x) 14448 v0.AddArg(y) 14449 v.AddArg(v0) 14450 return true 14451 } 14452 } 14453 func rewriteValueAMD64_OpEq64(v *Value, config *Config) bool { 14454 b := v.Block 14455 _ = b 14456 // match: (Eq64 x y) 14457 // cond: 14458 // result: (SETEQ (CMPQ x y)) 14459 for { 14460 x := v.Args[0] 14461 y := v.Args[1] 14462 v.reset(OpAMD64SETEQ) 14463 v0 := b.NewValue0(v.Line, OpAMD64CMPQ, TypeFlags) 14464 v0.AddArg(x) 14465 v0.AddArg(y) 14466 v.AddArg(v0) 14467 return true 14468 } 14469 } 14470 func rewriteValueAMD64_OpEq64F(v *Value, config *Config) bool { 14471 b := v.Block 14472 _ = b 14473 // match: (Eq64F x y) 14474 // cond: 14475 // result: (SETEQF (UCOMISD x y)) 14476 for { 14477 x := v.Args[0] 14478 y := v.Args[1] 14479 v.reset(OpAMD64SETEQF) 14480 v0 := b.NewValue0(v.Line, OpAMD64UCOMISD, TypeFlags) 14481 v0.AddArg(x) 14482 v0.AddArg(y) 14483 v.AddArg(v0) 14484 return true 14485 } 14486 } 14487 func rewriteValueAMD64_OpEq8(v *Value, config *Config) bool { 14488 b := v.Block 14489 _ = b 14490 // match: (Eq8 x y) 14491 // cond: 14492 // result: (SETEQ (CMPB x y)) 14493 for { 14494 x := v.Args[0] 14495 y := v.Args[1] 14496 v.reset(OpAMD64SETEQ) 14497 v0 := b.NewValue0(v.Line, OpAMD64CMPB, TypeFlags) 14498 v0.AddArg(x) 14499 v0.AddArg(y) 14500 v.AddArg(v0) 14501 return true 14502 } 14503 } 14504 func rewriteValueAMD64_OpEqB(v *Value, config *Config) bool { 14505 b := v.Block 14506 _ = b 14507 // match: (EqB x y) 14508 // cond: 14509 // result: (SETEQ (CMPB x y)) 14510 for { 14511 x := v.Args[0] 14512 y := v.Args[1] 14513 v.reset(OpAMD64SETEQ) 14514 v0 := b.NewValue0(v.Line, OpAMD64CMPB, TypeFlags) 14515 v0.AddArg(x) 14516 v0.AddArg(y) 14517 v.AddArg(v0) 14518 return true 14519 } 14520 } 14521 func rewriteValueAMD64_OpEqPtr(v *Value, config *Config) bool { 14522 b := v.Block 14523 _ = b 14524 // match: (EqPtr x y) 14525 // cond: config.PtrSize == 8 14526 // result: (SETEQ (CMPQ x y)) 14527 for { 14528 x := v.Args[0] 14529 y := v.Args[1] 14530 if !(config.PtrSize == 8) { 14531 break 14532 } 14533 v.reset(OpAMD64SETEQ) 14534 v0 := b.NewValue0(v.Line, OpAMD64CMPQ, TypeFlags) 14535 v0.AddArg(x) 14536 v0.AddArg(y) 14537 v.AddArg(v0) 14538 return true 14539 } 14540 // match: (EqPtr x y) 14541 // cond: config.PtrSize == 4 14542 // result: (SETEQ (CMPL x y)) 14543 for { 14544 x := v.Args[0] 14545 y := v.Args[1] 14546 if !(config.PtrSize == 4) { 14547 break 14548 } 14549 v.reset(OpAMD64SETEQ) 14550 v0 := b.NewValue0(v.Line, OpAMD64CMPL, TypeFlags) 14551 v0.AddArg(x) 14552 v0.AddArg(y) 14553 v.AddArg(v0) 14554 return true 14555 } 14556 return false 14557 } 14558 func rewriteValueAMD64_OpGeq16(v *Value, config *Config) bool { 14559 b := v.Block 14560 _ = b 14561 // match: (Geq16 x y) 14562 // cond: 14563 // result: (SETGE (CMPW x y)) 14564 for { 14565 x := v.Args[0] 14566 y := v.Args[1] 14567 v.reset(OpAMD64SETGE) 14568 v0 := b.NewValue0(v.Line, OpAMD64CMPW, TypeFlags) 14569 v0.AddArg(x) 14570 v0.AddArg(y) 14571 v.AddArg(v0) 14572 return true 14573 } 14574 } 14575 func rewriteValueAMD64_OpGeq16U(v *Value, config *Config) bool { 14576 b := v.Block 14577 _ = b 14578 // match: (Geq16U x y) 14579 // cond: 14580 // result: (SETAE (CMPW x y)) 14581 for { 14582 x := v.Args[0] 14583 y := v.Args[1] 14584 v.reset(OpAMD64SETAE) 14585 v0 := b.NewValue0(v.Line, OpAMD64CMPW, TypeFlags) 14586 v0.AddArg(x) 14587 v0.AddArg(y) 14588 v.AddArg(v0) 14589 return true 14590 } 14591 } 14592 func rewriteValueAMD64_OpGeq32(v *Value, config *Config) bool { 14593 b := v.Block 14594 _ = b 14595 // match: (Geq32 x y) 14596 // cond: 14597 // result: (SETGE (CMPL x y)) 14598 for { 14599 x := v.Args[0] 14600 y := v.Args[1] 14601 v.reset(OpAMD64SETGE) 14602 v0 := b.NewValue0(v.Line, OpAMD64CMPL, TypeFlags) 14603 v0.AddArg(x) 14604 v0.AddArg(y) 14605 v.AddArg(v0) 14606 return true 14607 } 14608 } 14609 func rewriteValueAMD64_OpGeq32F(v *Value, config *Config) bool { 14610 b := v.Block 14611 _ = b 14612 // match: (Geq32F x y) 14613 // cond: 14614 // result: (SETGEF (UCOMISS x y)) 14615 for { 14616 x := v.Args[0] 14617 y := v.Args[1] 14618 v.reset(OpAMD64SETGEF) 14619 v0 := b.NewValue0(v.Line, OpAMD64UCOMISS, TypeFlags) 14620 v0.AddArg(x) 14621 v0.AddArg(y) 14622 v.AddArg(v0) 14623 return true 14624 } 14625 } 14626 func rewriteValueAMD64_OpGeq32U(v *Value, config *Config) bool { 14627 b := v.Block 14628 _ = b 14629 // match: (Geq32U x y) 14630 // cond: 14631 // result: (SETAE (CMPL x y)) 14632 for { 14633 x := v.Args[0] 14634 y := v.Args[1] 14635 v.reset(OpAMD64SETAE) 14636 v0 := b.NewValue0(v.Line, OpAMD64CMPL, TypeFlags) 14637 v0.AddArg(x) 14638 v0.AddArg(y) 14639 v.AddArg(v0) 14640 return true 14641 } 14642 } 14643 func rewriteValueAMD64_OpGeq64(v *Value, config *Config) bool { 14644 b := v.Block 14645 _ = b 14646 // match: (Geq64 x y) 14647 // cond: 14648 // result: (SETGE (CMPQ x y)) 14649 for { 14650 x := v.Args[0] 14651 y := v.Args[1] 14652 v.reset(OpAMD64SETGE) 14653 v0 := b.NewValue0(v.Line, OpAMD64CMPQ, TypeFlags) 14654 v0.AddArg(x) 14655 v0.AddArg(y) 14656 v.AddArg(v0) 14657 return true 14658 } 14659 } 14660 func rewriteValueAMD64_OpGeq64F(v *Value, config *Config) bool { 14661 b := v.Block 14662 _ = b 14663 // match: (Geq64F x y) 14664 // cond: 14665 // result: (SETGEF (UCOMISD x y)) 14666 for { 14667 x := v.Args[0] 14668 y := v.Args[1] 14669 v.reset(OpAMD64SETGEF) 14670 v0 := b.NewValue0(v.Line, OpAMD64UCOMISD, TypeFlags) 14671 v0.AddArg(x) 14672 v0.AddArg(y) 14673 v.AddArg(v0) 14674 return true 14675 } 14676 } 14677 func rewriteValueAMD64_OpGeq64U(v *Value, config *Config) bool { 14678 b := v.Block 14679 _ = b 14680 // match: (Geq64U x y) 14681 // cond: 14682 // result: (SETAE (CMPQ x y)) 14683 for { 14684 x := v.Args[0] 14685 y := v.Args[1] 14686 v.reset(OpAMD64SETAE) 14687 v0 := b.NewValue0(v.Line, OpAMD64CMPQ, TypeFlags) 14688 v0.AddArg(x) 14689 v0.AddArg(y) 14690 v.AddArg(v0) 14691 return true 14692 } 14693 } 14694 func rewriteValueAMD64_OpGeq8(v *Value, config *Config) bool { 14695 b := v.Block 14696 _ = b 14697 // match: (Geq8 x y) 14698 // cond: 14699 // result: (SETGE (CMPB x y)) 14700 for { 14701 x := v.Args[0] 14702 y := v.Args[1] 14703 v.reset(OpAMD64SETGE) 14704 v0 := b.NewValue0(v.Line, OpAMD64CMPB, TypeFlags) 14705 v0.AddArg(x) 14706 v0.AddArg(y) 14707 v.AddArg(v0) 14708 return true 14709 } 14710 } 14711 func rewriteValueAMD64_OpGeq8U(v *Value, config *Config) bool { 14712 b := v.Block 14713 _ = b 14714 // match: (Geq8U x y) 14715 // cond: 14716 // result: (SETAE (CMPB x y)) 14717 for { 14718 x := v.Args[0] 14719 y := v.Args[1] 14720 v.reset(OpAMD64SETAE) 14721 v0 := b.NewValue0(v.Line, OpAMD64CMPB, TypeFlags) 14722 v0.AddArg(x) 14723 v0.AddArg(y) 14724 v.AddArg(v0) 14725 return true 14726 } 14727 } 14728 func rewriteValueAMD64_OpGetClosurePtr(v *Value, config *Config) bool { 14729 b := v.Block 14730 _ = b 14731 // match: (GetClosurePtr) 14732 // cond: 14733 // result: (LoweredGetClosurePtr) 14734 for { 14735 v.reset(OpAMD64LoweredGetClosurePtr) 14736 return true 14737 } 14738 } 14739 func rewriteValueAMD64_OpGetG(v *Value, config *Config) bool { 14740 b := v.Block 14741 _ = b 14742 // match: (GetG mem) 14743 // cond: 14744 // result: (LoweredGetG mem) 14745 for { 14746 mem := v.Args[0] 14747 v.reset(OpAMD64LoweredGetG) 14748 v.AddArg(mem) 14749 return true 14750 } 14751 } 14752 func rewriteValueAMD64_OpGoCall(v *Value, config *Config) bool { 14753 b := v.Block 14754 _ = b 14755 // match: (GoCall [argwid] mem) 14756 // cond: 14757 // result: (CALLgo [argwid] mem) 14758 for { 14759 argwid := v.AuxInt 14760 mem := v.Args[0] 14761 v.reset(OpAMD64CALLgo) 14762 v.AuxInt = argwid 14763 v.AddArg(mem) 14764 return true 14765 } 14766 } 14767 func rewriteValueAMD64_OpGreater16(v *Value, config *Config) bool { 14768 b := v.Block 14769 _ = b 14770 // match: (Greater16 x y) 14771 // cond: 14772 // result: (SETG (CMPW x y)) 14773 for { 14774 x := v.Args[0] 14775 y := v.Args[1] 14776 v.reset(OpAMD64SETG) 14777 v0 := b.NewValue0(v.Line, OpAMD64CMPW, TypeFlags) 14778 v0.AddArg(x) 14779 v0.AddArg(y) 14780 v.AddArg(v0) 14781 return true 14782 } 14783 } 14784 func rewriteValueAMD64_OpGreater16U(v *Value, config *Config) bool { 14785 b := v.Block 14786 _ = b 14787 // match: (Greater16U x y) 14788 // cond: 14789 // result: (SETA (CMPW x y)) 14790 for { 14791 x := v.Args[0] 14792 y := v.Args[1] 14793 v.reset(OpAMD64SETA) 14794 v0 := b.NewValue0(v.Line, OpAMD64CMPW, TypeFlags) 14795 v0.AddArg(x) 14796 v0.AddArg(y) 14797 v.AddArg(v0) 14798 return true 14799 } 14800 } 14801 func rewriteValueAMD64_OpGreater32(v *Value, config *Config) bool { 14802 b := v.Block 14803 _ = b 14804 // match: (Greater32 x y) 14805 // cond: 14806 // result: (SETG (CMPL x y)) 14807 for { 14808 x := v.Args[0] 14809 y := v.Args[1] 14810 v.reset(OpAMD64SETG) 14811 v0 := b.NewValue0(v.Line, OpAMD64CMPL, TypeFlags) 14812 v0.AddArg(x) 14813 v0.AddArg(y) 14814 v.AddArg(v0) 14815 return true 14816 } 14817 } 14818 func rewriteValueAMD64_OpGreater32F(v *Value, config *Config) bool { 14819 b := v.Block 14820 _ = b 14821 // match: (Greater32F x y) 14822 // cond: 14823 // result: (SETGF (UCOMISS x y)) 14824 for { 14825 x := v.Args[0] 14826 y := v.Args[1] 14827 v.reset(OpAMD64SETGF) 14828 v0 := b.NewValue0(v.Line, OpAMD64UCOMISS, TypeFlags) 14829 v0.AddArg(x) 14830 v0.AddArg(y) 14831 v.AddArg(v0) 14832 return true 14833 } 14834 } 14835 func rewriteValueAMD64_OpGreater32U(v *Value, config *Config) bool { 14836 b := v.Block 14837 _ = b 14838 // match: (Greater32U x y) 14839 // cond: 14840 // result: (SETA (CMPL x y)) 14841 for { 14842 x := v.Args[0] 14843 y := v.Args[1] 14844 v.reset(OpAMD64SETA) 14845 v0 := b.NewValue0(v.Line, OpAMD64CMPL, TypeFlags) 14846 v0.AddArg(x) 14847 v0.AddArg(y) 14848 v.AddArg(v0) 14849 return true 14850 } 14851 } 14852 func rewriteValueAMD64_OpGreater64(v *Value, config *Config) bool { 14853 b := v.Block 14854 _ = b 14855 // match: (Greater64 x y) 14856 // cond: 14857 // result: (SETG (CMPQ x y)) 14858 for { 14859 x := v.Args[0] 14860 y := v.Args[1] 14861 v.reset(OpAMD64SETG) 14862 v0 := b.NewValue0(v.Line, OpAMD64CMPQ, TypeFlags) 14863 v0.AddArg(x) 14864 v0.AddArg(y) 14865 v.AddArg(v0) 14866 return true 14867 } 14868 } 14869 func rewriteValueAMD64_OpGreater64F(v *Value, config *Config) bool { 14870 b := v.Block 14871 _ = b 14872 // match: (Greater64F x y) 14873 // cond: 14874 // result: (SETGF (UCOMISD x y)) 14875 for { 14876 x := v.Args[0] 14877 y := v.Args[1] 14878 v.reset(OpAMD64SETGF) 14879 v0 := b.NewValue0(v.Line, OpAMD64UCOMISD, TypeFlags) 14880 v0.AddArg(x) 14881 v0.AddArg(y) 14882 v.AddArg(v0) 14883 return true 14884 } 14885 } 14886 func rewriteValueAMD64_OpGreater64U(v *Value, config *Config) bool { 14887 b := v.Block 14888 _ = b 14889 // match: (Greater64U x y) 14890 // cond: 14891 // result: (SETA (CMPQ x y)) 14892 for { 14893 x := v.Args[0] 14894 y := v.Args[1] 14895 v.reset(OpAMD64SETA) 14896 v0 := b.NewValue0(v.Line, OpAMD64CMPQ, TypeFlags) 14897 v0.AddArg(x) 14898 v0.AddArg(y) 14899 v.AddArg(v0) 14900 return true 14901 } 14902 } 14903 func rewriteValueAMD64_OpGreater8(v *Value, config *Config) bool { 14904 b := v.Block 14905 _ = b 14906 // match: (Greater8 x y) 14907 // cond: 14908 // result: (SETG (CMPB x y)) 14909 for { 14910 x := v.Args[0] 14911 y := v.Args[1] 14912 v.reset(OpAMD64SETG) 14913 v0 := b.NewValue0(v.Line, OpAMD64CMPB, TypeFlags) 14914 v0.AddArg(x) 14915 v0.AddArg(y) 14916 v.AddArg(v0) 14917 return true 14918 } 14919 } 14920 func rewriteValueAMD64_OpGreater8U(v *Value, config *Config) bool { 14921 b := v.Block 14922 _ = b 14923 // match: (Greater8U x y) 14924 // cond: 14925 // result: (SETA (CMPB x y)) 14926 for { 14927 x := v.Args[0] 14928 y := v.Args[1] 14929 v.reset(OpAMD64SETA) 14930 v0 := b.NewValue0(v.Line, OpAMD64CMPB, TypeFlags) 14931 v0.AddArg(x) 14932 v0.AddArg(y) 14933 v.AddArg(v0) 14934 return true 14935 } 14936 } 14937 func rewriteValueAMD64_OpHmul16(v *Value, config *Config) bool { 14938 b := v.Block 14939 _ = b 14940 // match: (Hmul16 x y) 14941 // cond: 14942 // result: (HMULW x y) 14943 for { 14944 x := v.Args[0] 14945 y := v.Args[1] 14946 v.reset(OpAMD64HMULW) 14947 v.AddArg(x) 14948 v.AddArg(y) 14949 return true 14950 } 14951 } 14952 func rewriteValueAMD64_OpHmul16u(v *Value, config *Config) bool { 14953 b := v.Block 14954 _ = b 14955 // match: (Hmul16u x y) 14956 // cond: 14957 // result: (HMULWU x y) 14958 for { 14959 x := v.Args[0] 14960 y := v.Args[1] 14961 v.reset(OpAMD64HMULWU) 14962 v.AddArg(x) 14963 v.AddArg(y) 14964 return true 14965 } 14966 } 14967 func rewriteValueAMD64_OpHmul32(v *Value, config *Config) bool { 14968 b := v.Block 14969 _ = b 14970 // match: (Hmul32 x y) 14971 // cond: 14972 // result: (HMULL x y) 14973 for { 14974 x := v.Args[0] 14975 y := v.Args[1] 14976 v.reset(OpAMD64HMULL) 14977 v.AddArg(x) 14978 v.AddArg(y) 14979 return true 14980 } 14981 } 14982 func rewriteValueAMD64_OpHmul32u(v *Value, config *Config) bool { 14983 b := v.Block 14984 _ = b 14985 // match: (Hmul32u x y) 14986 // cond: 14987 // result: (HMULLU x y) 14988 for { 14989 x := v.Args[0] 14990 y := v.Args[1] 14991 v.reset(OpAMD64HMULLU) 14992 v.AddArg(x) 14993 v.AddArg(y) 14994 return true 14995 } 14996 } 14997 func rewriteValueAMD64_OpHmul64(v *Value, config *Config) bool { 14998 b := v.Block 14999 _ = b 15000 // match: (Hmul64 x y) 15001 // cond: 15002 // result: (HMULQ x y) 15003 for { 15004 x := v.Args[0] 15005 y := v.Args[1] 15006 v.reset(OpAMD64HMULQ) 15007 v.AddArg(x) 15008 v.AddArg(y) 15009 return true 15010 } 15011 } 15012 func rewriteValueAMD64_OpHmul64u(v *Value, config *Config) bool { 15013 b := v.Block 15014 _ = b 15015 // match: (Hmul64u x y) 15016 // cond: 15017 // result: (HMULQU x y) 15018 for { 15019 x := v.Args[0] 15020 y := v.Args[1] 15021 v.reset(OpAMD64HMULQU) 15022 v.AddArg(x) 15023 v.AddArg(y) 15024 return true 15025 } 15026 } 15027 func rewriteValueAMD64_OpHmul8(v *Value, config *Config) bool { 15028 b := v.Block 15029 _ = b 15030 // match: (Hmul8 x y) 15031 // cond: 15032 // result: (HMULB x y) 15033 for { 15034 x := v.Args[0] 15035 y := v.Args[1] 15036 v.reset(OpAMD64HMULB) 15037 v.AddArg(x) 15038 v.AddArg(y) 15039 return true 15040 } 15041 } 15042 func rewriteValueAMD64_OpHmul8u(v *Value, config *Config) bool { 15043 b := v.Block 15044 _ = b 15045 // match: (Hmul8u x y) 15046 // cond: 15047 // result: (HMULBU x y) 15048 for { 15049 x := v.Args[0] 15050 y := v.Args[1] 15051 v.reset(OpAMD64HMULBU) 15052 v.AddArg(x) 15053 v.AddArg(y) 15054 return true 15055 } 15056 } 15057 func rewriteValueAMD64_OpInt64Hi(v *Value, config *Config) bool { 15058 b := v.Block 15059 _ = b 15060 // match: (Int64Hi x) 15061 // cond: 15062 // result: (SHRQconst [32] x) 15063 for { 15064 x := v.Args[0] 15065 v.reset(OpAMD64SHRQconst) 15066 v.AuxInt = 32 15067 v.AddArg(x) 15068 return true 15069 } 15070 } 15071 func rewriteValueAMD64_OpInterCall(v *Value, config *Config) bool { 15072 b := v.Block 15073 _ = b 15074 // match: (InterCall [argwid] entry mem) 15075 // cond: 15076 // result: (CALLinter [argwid] entry mem) 15077 for { 15078 argwid := v.AuxInt 15079 entry := v.Args[0] 15080 mem := v.Args[1] 15081 v.reset(OpAMD64CALLinter) 15082 v.AuxInt = argwid 15083 v.AddArg(entry) 15084 v.AddArg(mem) 15085 return true 15086 } 15087 } 15088 func rewriteValueAMD64_OpIsInBounds(v *Value, config *Config) bool { 15089 b := v.Block 15090 _ = b 15091 // match: (IsInBounds idx len) 15092 // cond: 15093 // result: (SETB (CMPQ idx len)) 15094 for { 15095 idx := v.Args[0] 15096 len := v.Args[1] 15097 v.reset(OpAMD64SETB) 15098 v0 := b.NewValue0(v.Line, OpAMD64CMPQ, TypeFlags) 15099 v0.AddArg(idx) 15100 v0.AddArg(len) 15101 v.AddArg(v0) 15102 return true 15103 } 15104 } 15105 func rewriteValueAMD64_OpIsNonNil(v *Value, config *Config) bool { 15106 b := v.Block 15107 _ = b 15108 // match: (IsNonNil p) 15109 // cond: config.PtrSize == 8 15110 // result: (SETNE (TESTQ p p)) 15111 for { 15112 p := v.Args[0] 15113 if !(config.PtrSize == 8) { 15114 break 15115 } 15116 v.reset(OpAMD64SETNE) 15117 v0 := b.NewValue0(v.Line, OpAMD64TESTQ, TypeFlags) 15118 v0.AddArg(p) 15119 v0.AddArg(p) 15120 v.AddArg(v0) 15121 return true 15122 } 15123 // match: (IsNonNil p) 15124 // cond: config.PtrSize == 4 15125 // result: (SETNE (TESTL p p)) 15126 for { 15127 p := v.Args[0] 15128 if !(config.PtrSize == 4) { 15129 break 15130 } 15131 v.reset(OpAMD64SETNE) 15132 v0 := b.NewValue0(v.Line, OpAMD64TESTL, TypeFlags) 15133 v0.AddArg(p) 15134 v0.AddArg(p) 15135 v.AddArg(v0) 15136 return true 15137 } 15138 return false 15139 } 15140 func rewriteValueAMD64_OpIsSliceInBounds(v *Value, config *Config) bool { 15141 b := v.Block 15142 _ = b 15143 // match: (IsSliceInBounds idx len) 15144 // cond: 15145 // result: (SETBE (CMPQ idx len)) 15146 for { 15147 idx := v.Args[0] 15148 len := v.Args[1] 15149 v.reset(OpAMD64SETBE) 15150 v0 := b.NewValue0(v.Line, OpAMD64CMPQ, TypeFlags) 15151 v0.AddArg(idx) 15152 v0.AddArg(len) 15153 v.AddArg(v0) 15154 return true 15155 } 15156 } 15157 func rewriteValueAMD64_OpLeq16(v *Value, config *Config) bool { 15158 b := v.Block 15159 _ = b 15160 // match: (Leq16 x y) 15161 // cond: 15162 // result: (SETLE (CMPW x y)) 15163 for { 15164 x := v.Args[0] 15165 y := v.Args[1] 15166 v.reset(OpAMD64SETLE) 15167 v0 := b.NewValue0(v.Line, OpAMD64CMPW, TypeFlags) 15168 v0.AddArg(x) 15169 v0.AddArg(y) 15170 v.AddArg(v0) 15171 return true 15172 } 15173 } 15174 func rewriteValueAMD64_OpLeq16U(v *Value, config *Config) bool { 15175 b := v.Block 15176 _ = b 15177 // match: (Leq16U x y) 15178 // cond: 15179 // result: (SETBE (CMPW x y)) 15180 for { 15181 x := v.Args[0] 15182 y := v.Args[1] 15183 v.reset(OpAMD64SETBE) 15184 v0 := b.NewValue0(v.Line, OpAMD64CMPW, TypeFlags) 15185 v0.AddArg(x) 15186 v0.AddArg(y) 15187 v.AddArg(v0) 15188 return true 15189 } 15190 } 15191 func rewriteValueAMD64_OpLeq32(v *Value, config *Config) bool { 15192 b := v.Block 15193 _ = b 15194 // match: (Leq32 x y) 15195 // cond: 15196 // result: (SETLE (CMPL x y)) 15197 for { 15198 x := v.Args[0] 15199 y := v.Args[1] 15200 v.reset(OpAMD64SETLE) 15201 v0 := b.NewValue0(v.Line, OpAMD64CMPL, TypeFlags) 15202 v0.AddArg(x) 15203 v0.AddArg(y) 15204 v.AddArg(v0) 15205 return true 15206 } 15207 } 15208 func rewriteValueAMD64_OpLeq32F(v *Value, config *Config) bool { 15209 b := v.Block 15210 _ = b 15211 // match: (Leq32F x y) 15212 // cond: 15213 // result: (SETGEF (UCOMISS y x)) 15214 for { 15215 x := v.Args[0] 15216 y := v.Args[1] 15217 v.reset(OpAMD64SETGEF) 15218 v0 := b.NewValue0(v.Line, OpAMD64UCOMISS, TypeFlags) 15219 v0.AddArg(y) 15220 v0.AddArg(x) 15221 v.AddArg(v0) 15222 return true 15223 } 15224 } 15225 func rewriteValueAMD64_OpLeq32U(v *Value, config *Config) bool { 15226 b := v.Block 15227 _ = b 15228 // match: (Leq32U x y) 15229 // cond: 15230 // result: (SETBE (CMPL x y)) 15231 for { 15232 x := v.Args[0] 15233 y := v.Args[1] 15234 v.reset(OpAMD64SETBE) 15235 v0 := b.NewValue0(v.Line, OpAMD64CMPL, TypeFlags) 15236 v0.AddArg(x) 15237 v0.AddArg(y) 15238 v.AddArg(v0) 15239 return true 15240 } 15241 } 15242 func rewriteValueAMD64_OpLeq64(v *Value, config *Config) bool { 15243 b := v.Block 15244 _ = b 15245 // match: (Leq64 x y) 15246 // cond: 15247 // result: (SETLE (CMPQ x y)) 15248 for { 15249 x := v.Args[0] 15250 y := v.Args[1] 15251 v.reset(OpAMD64SETLE) 15252 v0 := b.NewValue0(v.Line, OpAMD64CMPQ, TypeFlags) 15253 v0.AddArg(x) 15254 v0.AddArg(y) 15255 v.AddArg(v0) 15256 return true 15257 } 15258 } 15259 func rewriteValueAMD64_OpLeq64F(v *Value, config *Config) bool { 15260 b := v.Block 15261 _ = b 15262 // match: (Leq64F x y) 15263 // cond: 15264 // result: (SETGEF (UCOMISD y x)) 15265 for { 15266 x := v.Args[0] 15267 y := v.Args[1] 15268 v.reset(OpAMD64SETGEF) 15269 v0 := b.NewValue0(v.Line, OpAMD64UCOMISD, TypeFlags) 15270 v0.AddArg(y) 15271 v0.AddArg(x) 15272 v.AddArg(v0) 15273 return true 15274 } 15275 } 15276 func rewriteValueAMD64_OpLeq64U(v *Value, config *Config) bool { 15277 b := v.Block 15278 _ = b 15279 // match: (Leq64U x y) 15280 // cond: 15281 // result: (SETBE (CMPQ x y)) 15282 for { 15283 x := v.Args[0] 15284 y := v.Args[1] 15285 v.reset(OpAMD64SETBE) 15286 v0 := b.NewValue0(v.Line, OpAMD64CMPQ, TypeFlags) 15287 v0.AddArg(x) 15288 v0.AddArg(y) 15289 v.AddArg(v0) 15290 return true 15291 } 15292 } 15293 func rewriteValueAMD64_OpLeq8(v *Value, config *Config) bool { 15294 b := v.Block 15295 _ = b 15296 // match: (Leq8 x y) 15297 // cond: 15298 // result: (SETLE (CMPB x y)) 15299 for { 15300 x := v.Args[0] 15301 y := v.Args[1] 15302 v.reset(OpAMD64SETLE) 15303 v0 := b.NewValue0(v.Line, OpAMD64CMPB, TypeFlags) 15304 v0.AddArg(x) 15305 v0.AddArg(y) 15306 v.AddArg(v0) 15307 return true 15308 } 15309 } 15310 func rewriteValueAMD64_OpLeq8U(v *Value, config *Config) bool { 15311 b := v.Block 15312 _ = b 15313 // match: (Leq8U x y) 15314 // cond: 15315 // result: (SETBE (CMPB x y)) 15316 for { 15317 x := v.Args[0] 15318 y := v.Args[1] 15319 v.reset(OpAMD64SETBE) 15320 v0 := b.NewValue0(v.Line, OpAMD64CMPB, TypeFlags) 15321 v0.AddArg(x) 15322 v0.AddArg(y) 15323 v.AddArg(v0) 15324 return true 15325 } 15326 } 15327 func rewriteValueAMD64_OpLess16(v *Value, config *Config) bool { 15328 b := v.Block 15329 _ = b 15330 // match: (Less16 x y) 15331 // cond: 15332 // result: (SETL (CMPW x y)) 15333 for { 15334 x := v.Args[0] 15335 y := v.Args[1] 15336 v.reset(OpAMD64SETL) 15337 v0 := b.NewValue0(v.Line, OpAMD64CMPW, TypeFlags) 15338 v0.AddArg(x) 15339 v0.AddArg(y) 15340 v.AddArg(v0) 15341 return true 15342 } 15343 } 15344 func rewriteValueAMD64_OpLess16U(v *Value, config *Config) bool { 15345 b := v.Block 15346 _ = b 15347 // match: (Less16U x y) 15348 // cond: 15349 // result: (SETB (CMPW x y)) 15350 for { 15351 x := v.Args[0] 15352 y := v.Args[1] 15353 v.reset(OpAMD64SETB) 15354 v0 := b.NewValue0(v.Line, OpAMD64CMPW, TypeFlags) 15355 v0.AddArg(x) 15356 v0.AddArg(y) 15357 v.AddArg(v0) 15358 return true 15359 } 15360 } 15361 func rewriteValueAMD64_OpLess32(v *Value, config *Config) bool { 15362 b := v.Block 15363 _ = b 15364 // match: (Less32 x y) 15365 // cond: 15366 // result: (SETL (CMPL x y)) 15367 for { 15368 x := v.Args[0] 15369 y := v.Args[1] 15370 v.reset(OpAMD64SETL) 15371 v0 := b.NewValue0(v.Line, OpAMD64CMPL, TypeFlags) 15372 v0.AddArg(x) 15373 v0.AddArg(y) 15374 v.AddArg(v0) 15375 return true 15376 } 15377 } 15378 func rewriteValueAMD64_OpLess32F(v *Value, config *Config) bool { 15379 b := v.Block 15380 _ = b 15381 // match: (Less32F x y) 15382 // cond: 15383 // result: (SETGF (UCOMISS y x)) 15384 for { 15385 x := v.Args[0] 15386 y := v.Args[1] 15387 v.reset(OpAMD64SETGF) 15388 v0 := b.NewValue0(v.Line, OpAMD64UCOMISS, TypeFlags) 15389 v0.AddArg(y) 15390 v0.AddArg(x) 15391 v.AddArg(v0) 15392 return true 15393 } 15394 } 15395 func rewriteValueAMD64_OpLess32U(v *Value, config *Config) bool { 15396 b := v.Block 15397 _ = b 15398 // match: (Less32U x y) 15399 // cond: 15400 // result: (SETB (CMPL x y)) 15401 for { 15402 x := v.Args[0] 15403 y := v.Args[1] 15404 v.reset(OpAMD64SETB) 15405 v0 := b.NewValue0(v.Line, OpAMD64CMPL, TypeFlags) 15406 v0.AddArg(x) 15407 v0.AddArg(y) 15408 v.AddArg(v0) 15409 return true 15410 } 15411 } 15412 func rewriteValueAMD64_OpLess64(v *Value, config *Config) bool { 15413 b := v.Block 15414 _ = b 15415 // match: (Less64 x y) 15416 // cond: 15417 // result: (SETL (CMPQ x y)) 15418 for { 15419 x := v.Args[0] 15420 y := v.Args[1] 15421 v.reset(OpAMD64SETL) 15422 v0 := b.NewValue0(v.Line, OpAMD64CMPQ, TypeFlags) 15423 v0.AddArg(x) 15424 v0.AddArg(y) 15425 v.AddArg(v0) 15426 return true 15427 } 15428 } 15429 func rewriteValueAMD64_OpLess64F(v *Value, config *Config) bool { 15430 b := v.Block 15431 _ = b 15432 // match: (Less64F x y) 15433 // cond: 15434 // result: (SETGF (UCOMISD y x)) 15435 for { 15436 x := v.Args[0] 15437 y := v.Args[1] 15438 v.reset(OpAMD64SETGF) 15439 v0 := b.NewValue0(v.Line, OpAMD64UCOMISD, TypeFlags) 15440 v0.AddArg(y) 15441 v0.AddArg(x) 15442 v.AddArg(v0) 15443 return true 15444 } 15445 } 15446 func rewriteValueAMD64_OpLess64U(v *Value, config *Config) bool { 15447 b := v.Block 15448 _ = b 15449 // match: (Less64U x y) 15450 // cond: 15451 // result: (SETB (CMPQ x y)) 15452 for { 15453 x := v.Args[0] 15454 y := v.Args[1] 15455 v.reset(OpAMD64SETB) 15456 v0 := b.NewValue0(v.Line, OpAMD64CMPQ, TypeFlags) 15457 v0.AddArg(x) 15458 v0.AddArg(y) 15459 v.AddArg(v0) 15460 return true 15461 } 15462 } 15463 func rewriteValueAMD64_OpLess8(v *Value, config *Config) bool { 15464 b := v.Block 15465 _ = b 15466 // match: (Less8 x y) 15467 // cond: 15468 // result: (SETL (CMPB x y)) 15469 for { 15470 x := v.Args[0] 15471 y := v.Args[1] 15472 v.reset(OpAMD64SETL) 15473 v0 := b.NewValue0(v.Line, OpAMD64CMPB, TypeFlags) 15474 v0.AddArg(x) 15475 v0.AddArg(y) 15476 v.AddArg(v0) 15477 return true 15478 } 15479 } 15480 func rewriteValueAMD64_OpLess8U(v *Value, config *Config) bool { 15481 b := v.Block 15482 _ = b 15483 // match: (Less8U x y) 15484 // cond: 15485 // result: (SETB (CMPB x y)) 15486 for { 15487 x := v.Args[0] 15488 y := v.Args[1] 15489 v.reset(OpAMD64SETB) 15490 v0 := b.NewValue0(v.Line, OpAMD64CMPB, TypeFlags) 15491 v0.AddArg(x) 15492 v0.AddArg(y) 15493 v.AddArg(v0) 15494 return true 15495 } 15496 } 15497 func rewriteValueAMD64_OpLoad(v *Value, config *Config) bool { 15498 b := v.Block 15499 _ = b 15500 // match: (Load <t> ptr mem) 15501 // cond: (is64BitInt(t) || isPtr(t) && config.PtrSize == 8) 15502 // result: (MOVQload ptr mem) 15503 for { 15504 t := v.Type 15505 ptr := v.Args[0] 15506 mem := v.Args[1] 15507 if !(is64BitInt(t) || isPtr(t) && config.PtrSize == 8) { 15508 break 15509 } 15510 v.reset(OpAMD64MOVQload) 15511 v.AddArg(ptr) 15512 v.AddArg(mem) 15513 return true 15514 } 15515 // match: (Load <t> ptr mem) 15516 // cond: (is32BitInt(t) || isPtr(t) && config.PtrSize == 4) 15517 // result: (MOVLload ptr mem) 15518 for { 15519 t := v.Type 15520 ptr := v.Args[0] 15521 mem := v.Args[1] 15522 if !(is32BitInt(t) || isPtr(t) && config.PtrSize == 4) { 15523 break 15524 } 15525 v.reset(OpAMD64MOVLload) 15526 v.AddArg(ptr) 15527 v.AddArg(mem) 15528 return true 15529 } 15530 // match: (Load <t> ptr mem) 15531 // cond: is16BitInt(t) 15532 // result: (MOVWload ptr mem) 15533 for { 15534 t := v.Type 15535 ptr := v.Args[0] 15536 mem := v.Args[1] 15537 if !(is16BitInt(t)) { 15538 break 15539 } 15540 v.reset(OpAMD64MOVWload) 15541 v.AddArg(ptr) 15542 v.AddArg(mem) 15543 return true 15544 } 15545 // match: (Load <t> ptr mem) 15546 // cond: (t.IsBoolean() || is8BitInt(t)) 15547 // result: (MOVBload ptr mem) 15548 for { 15549 t := v.Type 15550 ptr := v.Args[0] 15551 mem := v.Args[1] 15552 if !(t.IsBoolean() || is8BitInt(t)) { 15553 break 15554 } 15555 v.reset(OpAMD64MOVBload) 15556 v.AddArg(ptr) 15557 v.AddArg(mem) 15558 return true 15559 } 15560 // match: (Load <t> ptr mem) 15561 // cond: is32BitFloat(t) 15562 // result: (MOVSSload ptr mem) 15563 for { 15564 t := v.Type 15565 ptr := v.Args[0] 15566 mem := v.Args[1] 15567 if !(is32BitFloat(t)) { 15568 break 15569 } 15570 v.reset(OpAMD64MOVSSload) 15571 v.AddArg(ptr) 15572 v.AddArg(mem) 15573 return true 15574 } 15575 // match: (Load <t> ptr mem) 15576 // cond: is64BitFloat(t) 15577 // result: (MOVSDload ptr mem) 15578 for { 15579 t := v.Type 15580 ptr := v.Args[0] 15581 mem := v.Args[1] 15582 if !(is64BitFloat(t)) { 15583 break 15584 } 15585 v.reset(OpAMD64MOVSDload) 15586 v.AddArg(ptr) 15587 v.AddArg(mem) 15588 return true 15589 } 15590 return false 15591 } 15592 func rewriteValueAMD64_OpLrot16(v *Value, config *Config) bool { 15593 b := v.Block 15594 _ = b 15595 // match: (Lrot16 <t> x [c]) 15596 // cond: 15597 // result: (ROLWconst <t> [c&15] x) 15598 for { 15599 t := v.Type 15600 c := v.AuxInt 15601 x := v.Args[0] 15602 v.reset(OpAMD64ROLWconst) 15603 v.Type = t 15604 v.AuxInt = c & 15 15605 v.AddArg(x) 15606 return true 15607 } 15608 } 15609 func rewriteValueAMD64_OpLrot32(v *Value, config *Config) bool { 15610 b := v.Block 15611 _ = b 15612 // match: (Lrot32 <t> x [c]) 15613 // cond: 15614 // result: (ROLLconst <t> [c&31] x) 15615 for { 15616 t := v.Type 15617 c := v.AuxInt 15618 x := v.Args[0] 15619 v.reset(OpAMD64ROLLconst) 15620 v.Type = t 15621 v.AuxInt = c & 31 15622 v.AddArg(x) 15623 return true 15624 } 15625 } 15626 func rewriteValueAMD64_OpLrot64(v *Value, config *Config) bool { 15627 b := v.Block 15628 _ = b 15629 // match: (Lrot64 <t> x [c]) 15630 // cond: 15631 // result: (ROLQconst <t> [c&63] x) 15632 for { 15633 t := v.Type 15634 c := v.AuxInt 15635 x := v.Args[0] 15636 v.reset(OpAMD64ROLQconst) 15637 v.Type = t 15638 v.AuxInt = c & 63 15639 v.AddArg(x) 15640 return true 15641 } 15642 } 15643 func rewriteValueAMD64_OpLrot8(v *Value, config *Config) bool { 15644 b := v.Block 15645 _ = b 15646 // match: (Lrot8 <t> x [c]) 15647 // cond: 15648 // result: (ROLBconst <t> [c&7] x) 15649 for { 15650 t := v.Type 15651 c := v.AuxInt 15652 x := v.Args[0] 15653 v.reset(OpAMD64ROLBconst) 15654 v.Type = t 15655 v.AuxInt = c & 7 15656 v.AddArg(x) 15657 return true 15658 } 15659 } 15660 func rewriteValueAMD64_OpLsh16x16(v *Value, config *Config) bool { 15661 b := v.Block 15662 _ = b 15663 // match: (Lsh16x16 <t> x y) 15664 // cond: 15665 // result: (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPWconst y [32]))) 15666 for { 15667 t := v.Type 15668 x := v.Args[0] 15669 y := v.Args[1] 15670 v.reset(OpAMD64ANDL) 15671 v0 := b.NewValue0(v.Line, OpAMD64SHLL, t) 15672 v0.AddArg(x) 15673 v0.AddArg(y) 15674 v.AddArg(v0) 15675 v1 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, t) 15676 v2 := b.NewValue0(v.Line, OpAMD64CMPWconst, TypeFlags) 15677 v2.AuxInt = 32 15678 v2.AddArg(y) 15679 v1.AddArg(v2) 15680 v.AddArg(v1) 15681 return true 15682 } 15683 } 15684 func rewriteValueAMD64_OpLsh16x32(v *Value, config *Config) bool { 15685 b := v.Block 15686 _ = b 15687 // match: (Lsh16x32 <t> x y) 15688 // cond: 15689 // result: (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPLconst y [32]))) 15690 for { 15691 t := v.Type 15692 x := v.Args[0] 15693 y := v.Args[1] 15694 v.reset(OpAMD64ANDL) 15695 v0 := b.NewValue0(v.Line, OpAMD64SHLL, t) 15696 v0.AddArg(x) 15697 v0.AddArg(y) 15698 v.AddArg(v0) 15699 v1 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, t) 15700 v2 := b.NewValue0(v.Line, OpAMD64CMPLconst, TypeFlags) 15701 v2.AuxInt = 32 15702 v2.AddArg(y) 15703 v1.AddArg(v2) 15704 v.AddArg(v1) 15705 return true 15706 } 15707 } 15708 func rewriteValueAMD64_OpLsh16x64(v *Value, config *Config) bool { 15709 b := v.Block 15710 _ = b 15711 // match: (Lsh16x64 <t> x y) 15712 // cond: 15713 // result: (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPQconst y [32]))) 15714 for { 15715 t := v.Type 15716 x := v.Args[0] 15717 y := v.Args[1] 15718 v.reset(OpAMD64ANDL) 15719 v0 := b.NewValue0(v.Line, OpAMD64SHLL, t) 15720 v0.AddArg(x) 15721 v0.AddArg(y) 15722 v.AddArg(v0) 15723 v1 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, t) 15724 v2 := b.NewValue0(v.Line, OpAMD64CMPQconst, TypeFlags) 15725 v2.AuxInt = 32 15726 v2.AddArg(y) 15727 v1.AddArg(v2) 15728 v.AddArg(v1) 15729 return true 15730 } 15731 } 15732 func rewriteValueAMD64_OpLsh16x8(v *Value, config *Config) bool { 15733 b := v.Block 15734 _ = b 15735 // match: (Lsh16x8 <t> x y) 15736 // cond: 15737 // result: (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPBconst y [32]))) 15738 for { 15739 t := v.Type 15740 x := v.Args[0] 15741 y := v.Args[1] 15742 v.reset(OpAMD64ANDL) 15743 v0 := b.NewValue0(v.Line, OpAMD64SHLL, t) 15744 v0.AddArg(x) 15745 v0.AddArg(y) 15746 v.AddArg(v0) 15747 v1 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, t) 15748 v2 := b.NewValue0(v.Line, OpAMD64CMPBconst, TypeFlags) 15749 v2.AuxInt = 32 15750 v2.AddArg(y) 15751 v1.AddArg(v2) 15752 v.AddArg(v1) 15753 return true 15754 } 15755 } 15756 func rewriteValueAMD64_OpLsh32x16(v *Value, config *Config) bool { 15757 b := v.Block 15758 _ = b 15759 // match: (Lsh32x16 <t> x y) 15760 // cond: 15761 // result: (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPWconst y [32]))) 15762 for { 15763 t := v.Type 15764 x := v.Args[0] 15765 y := v.Args[1] 15766 v.reset(OpAMD64ANDL) 15767 v0 := b.NewValue0(v.Line, OpAMD64SHLL, t) 15768 v0.AddArg(x) 15769 v0.AddArg(y) 15770 v.AddArg(v0) 15771 v1 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, t) 15772 v2 := b.NewValue0(v.Line, OpAMD64CMPWconst, TypeFlags) 15773 v2.AuxInt = 32 15774 v2.AddArg(y) 15775 v1.AddArg(v2) 15776 v.AddArg(v1) 15777 return true 15778 } 15779 } 15780 func rewriteValueAMD64_OpLsh32x32(v *Value, config *Config) bool { 15781 b := v.Block 15782 _ = b 15783 // match: (Lsh32x32 <t> x y) 15784 // cond: 15785 // result: (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPLconst y [32]))) 15786 for { 15787 t := v.Type 15788 x := v.Args[0] 15789 y := v.Args[1] 15790 v.reset(OpAMD64ANDL) 15791 v0 := b.NewValue0(v.Line, OpAMD64SHLL, t) 15792 v0.AddArg(x) 15793 v0.AddArg(y) 15794 v.AddArg(v0) 15795 v1 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, t) 15796 v2 := b.NewValue0(v.Line, OpAMD64CMPLconst, TypeFlags) 15797 v2.AuxInt = 32 15798 v2.AddArg(y) 15799 v1.AddArg(v2) 15800 v.AddArg(v1) 15801 return true 15802 } 15803 } 15804 func rewriteValueAMD64_OpLsh32x64(v *Value, config *Config) bool { 15805 b := v.Block 15806 _ = b 15807 // match: (Lsh32x64 <t> x y) 15808 // cond: 15809 // result: (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPQconst y [32]))) 15810 for { 15811 t := v.Type 15812 x := v.Args[0] 15813 y := v.Args[1] 15814 v.reset(OpAMD64ANDL) 15815 v0 := b.NewValue0(v.Line, OpAMD64SHLL, t) 15816 v0.AddArg(x) 15817 v0.AddArg(y) 15818 v.AddArg(v0) 15819 v1 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, t) 15820 v2 := b.NewValue0(v.Line, OpAMD64CMPQconst, TypeFlags) 15821 v2.AuxInt = 32 15822 v2.AddArg(y) 15823 v1.AddArg(v2) 15824 v.AddArg(v1) 15825 return true 15826 } 15827 } 15828 func rewriteValueAMD64_OpLsh32x8(v *Value, config *Config) bool { 15829 b := v.Block 15830 _ = b 15831 // match: (Lsh32x8 <t> x y) 15832 // cond: 15833 // result: (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPBconst y [32]))) 15834 for { 15835 t := v.Type 15836 x := v.Args[0] 15837 y := v.Args[1] 15838 v.reset(OpAMD64ANDL) 15839 v0 := b.NewValue0(v.Line, OpAMD64SHLL, t) 15840 v0.AddArg(x) 15841 v0.AddArg(y) 15842 v.AddArg(v0) 15843 v1 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, t) 15844 v2 := b.NewValue0(v.Line, OpAMD64CMPBconst, TypeFlags) 15845 v2.AuxInt = 32 15846 v2.AddArg(y) 15847 v1.AddArg(v2) 15848 v.AddArg(v1) 15849 return true 15850 } 15851 } 15852 func rewriteValueAMD64_OpLsh64x16(v *Value, config *Config) bool { 15853 b := v.Block 15854 _ = b 15855 // match: (Lsh64x16 <t> x y) 15856 // cond: 15857 // result: (ANDQ (SHLQ <t> x y) (SBBQcarrymask <t> (CMPWconst y [64]))) 15858 for { 15859 t := v.Type 15860 x := v.Args[0] 15861 y := v.Args[1] 15862 v.reset(OpAMD64ANDQ) 15863 v0 := b.NewValue0(v.Line, OpAMD64SHLQ, t) 15864 v0.AddArg(x) 15865 v0.AddArg(y) 15866 v.AddArg(v0) 15867 v1 := b.NewValue0(v.Line, OpAMD64SBBQcarrymask, t) 15868 v2 := b.NewValue0(v.Line, OpAMD64CMPWconst, TypeFlags) 15869 v2.AuxInt = 64 15870 v2.AddArg(y) 15871 v1.AddArg(v2) 15872 v.AddArg(v1) 15873 return true 15874 } 15875 } 15876 func rewriteValueAMD64_OpLsh64x32(v *Value, config *Config) bool { 15877 b := v.Block 15878 _ = b 15879 // match: (Lsh64x32 <t> x y) 15880 // cond: 15881 // result: (ANDQ (SHLQ <t> x y) (SBBQcarrymask <t> (CMPLconst y [64]))) 15882 for { 15883 t := v.Type 15884 x := v.Args[0] 15885 y := v.Args[1] 15886 v.reset(OpAMD64ANDQ) 15887 v0 := b.NewValue0(v.Line, OpAMD64SHLQ, t) 15888 v0.AddArg(x) 15889 v0.AddArg(y) 15890 v.AddArg(v0) 15891 v1 := b.NewValue0(v.Line, OpAMD64SBBQcarrymask, t) 15892 v2 := b.NewValue0(v.Line, OpAMD64CMPLconst, TypeFlags) 15893 v2.AuxInt = 64 15894 v2.AddArg(y) 15895 v1.AddArg(v2) 15896 v.AddArg(v1) 15897 return true 15898 } 15899 } 15900 func rewriteValueAMD64_OpLsh64x64(v *Value, config *Config) bool { 15901 b := v.Block 15902 _ = b 15903 // match: (Lsh64x64 <t> x y) 15904 // cond: 15905 // result: (ANDQ (SHLQ <t> x y) (SBBQcarrymask <t> (CMPQconst y [64]))) 15906 for { 15907 t := v.Type 15908 x := v.Args[0] 15909 y := v.Args[1] 15910 v.reset(OpAMD64ANDQ) 15911 v0 := b.NewValue0(v.Line, OpAMD64SHLQ, t) 15912 v0.AddArg(x) 15913 v0.AddArg(y) 15914 v.AddArg(v0) 15915 v1 := b.NewValue0(v.Line, OpAMD64SBBQcarrymask, t) 15916 v2 := b.NewValue0(v.Line, OpAMD64CMPQconst, TypeFlags) 15917 v2.AuxInt = 64 15918 v2.AddArg(y) 15919 v1.AddArg(v2) 15920 v.AddArg(v1) 15921 return true 15922 } 15923 } 15924 func rewriteValueAMD64_OpLsh64x8(v *Value, config *Config) bool { 15925 b := v.Block 15926 _ = b 15927 // match: (Lsh64x8 <t> x y) 15928 // cond: 15929 // result: (ANDQ (SHLQ <t> x y) (SBBQcarrymask <t> (CMPBconst y [64]))) 15930 for { 15931 t := v.Type 15932 x := v.Args[0] 15933 y := v.Args[1] 15934 v.reset(OpAMD64ANDQ) 15935 v0 := b.NewValue0(v.Line, OpAMD64SHLQ, t) 15936 v0.AddArg(x) 15937 v0.AddArg(y) 15938 v.AddArg(v0) 15939 v1 := b.NewValue0(v.Line, OpAMD64SBBQcarrymask, t) 15940 v2 := b.NewValue0(v.Line, OpAMD64CMPBconst, TypeFlags) 15941 v2.AuxInt = 64 15942 v2.AddArg(y) 15943 v1.AddArg(v2) 15944 v.AddArg(v1) 15945 return true 15946 } 15947 } 15948 func rewriteValueAMD64_OpLsh8x16(v *Value, config *Config) bool { 15949 b := v.Block 15950 _ = b 15951 // match: (Lsh8x16 <t> x y) 15952 // cond: 15953 // result: (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPWconst y [32]))) 15954 for { 15955 t := v.Type 15956 x := v.Args[0] 15957 y := v.Args[1] 15958 v.reset(OpAMD64ANDL) 15959 v0 := b.NewValue0(v.Line, OpAMD64SHLL, t) 15960 v0.AddArg(x) 15961 v0.AddArg(y) 15962 v.AddArg(v0) 15963 v1 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, t) 15964 v2 := b.NewValue0(v.Line, OpAMD64CMPWconst, TypeFlags) 15965 v2.AuxInt = 32 15966 v2.AddArg(y) 15967 v1.AddArg(v2) 15968 v.AddArg(v1) 15969 return true 15970 } 15971 } 15972 func rewriteValueAMD64_OpLsh8x32(v *Value, config *Config) bool { 15973 b := v.Block 15974 _ = b 15975 // match: (Lsh8x32 <t> x y) 15976 // cond: 15977 // result: (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPLconst y [32]))) 15978 for { 15979 t := v.Type 15980 x := v.Args[0] 15981 y := v.Args[1] 15982 v.reset(OpAMD64ANDL) 15983 v0 := b.NewValue0(v.Line, OpAMD64SHLL, t) 15984 v0.AddArg(x) 15985 v0.AddArg(y) 15986 v.AddArg(v0) 15987 v1 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, t) 15988 v2 := b.NewValue0(v.Line, OpAMD64CMPLconst, TypeFlags) 15989 v2.AuxInt = 32 15990 v2.AddArg(y) 15991 v1.AddArg(v2) 15992 v.AddArg(v1) 15993 return true 15994 } 15995 } 15996 func rewriteValueAMD64_OpLsh8x64(v *Value, config *Config) bool { 15997 b := v.Block 15998 _ = b 15999 // match: (Lsh8x64 <t> x y) 16000 // cond: 16001 // result: (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPQconst y [32]))) 16002 for { 16003 t := v.Type 16004 x := v.Args[0] 16005 y := v.Args[1] 16006 v.reset(OpAMD64ANDL) 16007 v0 := b.NewValue0(v.Line, OpAMD64SHLL, t) 16008 v0.AddArg(x) 16009 v0.AddArg(y) 16010 v.AddArg(v0) 16011 v1 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, t) 16012 v2 := b.NewValue0(v.Line, OpAMD64CMPQconst, TypeFlags) 16013 v2.AuxInt = 32 16014 v2.AddArg(y) 16015 v1.AddArg(v2) 16016 v.AddArg(v1) 16017 return true 16018 } 16019 } 16020 func rewriteValueAMD64_OpLsh8x8(v *Value, config *Config) bool { 16021 b := v.Block 16022 _ = b 16023 // match: (Lsh8x8 <t> x y) 16024 // cond: 16025 // result: (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPBconst y [32]))) 16026 for { 16027 t := v.Type 16028 x := v.Args[0] 16029 y := v.Args[1] 16030 v.reset(OpAMD64ANDL) 16031 v0 := b.NewValue0(v.Line, OpAMD64SHLL, t) 16032 v0.AddArg(x) 16033 v0.AddArg(y) 16034 v.AddArg(v0) 16035 v1 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, t) 16036 v2 := b.NewValue0(v.Line, OpAMD64CMPBconst, TypeFlags) 16037 v2.AuxInt = 32 16038 v2.AddArg(y) 16039 v1.AddArg(v2) 16040 v.AddArg(v1) 16041 return true 16042 } 16043 } 16044 func rewriteValueAMD64_OpMod16(v *Value, config *Config) bool { 16045 b := v.Block 16046 _ = b 16047 // match: (Mod16 x y) 16048 // cond: 16049 // result: (Select1 (DIVW x y)) 16050 for { 16051 x := v.Args[0] 16052 y := v.Args[1] 16053 v.reset(OpSelect1) 16054 v0 := b.NewValue0(v.Line, OpAMD64DIVW, MakeTuple(config.fe.TypeInt16(), config.fe.TypeInt16())) 16055 v0.AddArg(x) 16056 v0.AddArg(y) 16057 v.AddArg(v0) 16058 return true 16059 } 16060 } 16061 func rewriteValueAMD64_OpMod16u(v *Value, config *Config) bool { 16062 b := v.Block 16063 _ = b 16064 // match: (Mod16u x y) 16065 // cond: 16066 // result: (Select1 (DIVWU x y)) 16067 for { 16068 x := v.Args[0] 16069 y := v.Args[1] 16070 v.reset(OpSelect1) 16071 v0 := b.NewValue0(v.Line, OpAMD64DIVWU, MakeTuple(config.fe.TypeUInt16(), config.fe.TypeUInt16())) 16072 v0.AddArg(x) 16073 v0.AddArg(y) 16074 v.AddArg(v0) 16075 return true 16076 } 16077 } 16078 func rewriteValueAMD64_OpMod32(v *Value, config *Config) bool { 16079 b := v.Block 16080 _ = b 16081 // match: (Mod32 x y) 16082 // cond: 16083 // result: (Select1 (DIVL x y)) 16084 for { 16085 x := v.Args[0] 16086 y := v.Args[1] 16087 v.reset(OpSelect1) 16088 v0 := b.NewValue0(v.Line, OpAMD64DIVL, MakeTuple(config.fe.TypeInt32(), config.fe.TypeInt32())) 16089 v0.AddArg(x) 16090 v0.AddArg(y) 16091 v.AddArg(v0) 16092 return true 16093 } 16094 } 16095 func rewriteValueAMD64_OpMod32u(v *Value, config *Config) bool { 16096 b := v.Block 16097 _ = b 16098 // match: (Mod32u x y) 16099 // cond: 16100 // result: (Select1 (DIVLU x y)) 16101 for { 16102 x := v.Args[0] 16103 y := v.Args[1] 16104 v.reset(OpSelect1) 16105 v0 := b.NewValue0(v.Line, OpAMD64DIVLU, MakeTuple(config.fe.TypeUInt32(), config.fe.TypeUInt32())) 16106 v0.AddArg(x) 16107 v0.AddArg(y) 16108 v.AddArg(v0) 16109 return true 16110 } 16111 } 16112 func rewriteValueAMD64_OpMod64(v *Value, config *Config) bool { 16113 b := v.Block 16114 _ = b 16115 // match: (Mod64 x y) 16116 // cond: 16117 // result: (Select1 (DIVQ x y)) 16118 for { 16119 x := v.Args[0] 16120 y := v.Args[1] 16121 v.reset(OpSelect1) 16122 v0 := b.NewValue0(v.Line, OpAMD64DIVQ, MakeTuple(config.fe.TypeInt64(), config.fe.TypeInt64())) 16123 v0.AddArg(x) 16124 v0.AddArg(y) 16125 v.AddArg(v0) 16126 return true 16127 } 16128 } 16129 func rewriteValueAMD64_OpMod64u(v *Value, config *Config) bool { 16130 b := v.Block 16131 _ = b 16132 // match: (Mod64u x y) 16133 // cond: 16134 // result: (Select1 (DIVQU x y)) 16135 for { 16136 x := v.Args[0] 16137 y := v.Args[1] 16138 v.reset(OpSelect1) 16139 v0 := b.NewValue0(v.Line, OpAMD64DIVQU, MakeTuple(config.fe.TypeUInt64(), config.fe.TypeUInt64())) 16140 v0.AddArg(x) 16141 v0.AddArg(y) 16142 v.AddArg(v0) 16143 return true 16144 } 16145 } 16146 func rewriteValueAMD64_OpMod8(v *Value, config *Config) bool { 16147 b := v.Block 16148 _ = b 16149 // match: (Mod8 x y) 16150 // cond: 16151 // result: (Select1 (DIVW (SignExt8to16 x) (SignExt8to16 y))) 16152 for { 16153 x := v.Args[0] 16154 y := v.Args[1] 16155 v.reset(OpSelect1) 16156 v0 := b.NewValue0(v.Line, OpAMD64DIVW, MakeTuple(config.fe.TypeInt16(), config.fe.TypeInt16())) 16157 v1 := b.NewValue0(v.Line, OpSignExt8to16, config.fe.TypeInt16()) 16158 v1.AddArg(x) 16159 v0.AddArg(v1) 16160 v2 := b.NewValue0(v.Line, OpSignExt8to16, config.fe.TypeInt16()) 16161 v2.AddArg(y) 16162 v0.AddArg(v2) 16163 v.AddArg(v0) 16164 return true 16165 } 16166 } 16167 func rewriteValueAMD64_OpMod8u(v *Value, config *Config) bool { 16168 b := v.Block 16169 _ = b 16170 // match: (Mod8u x y) 16171 // cond: 16172 // result: (Select1 (DIVWU (ZeroExt8to16 x) (ZeroExt8to16 y))) 16173 for { 16174 x := v.Args[0] 16175 y := v.Args[1] 16176 v.reset(OpSelect1) 16177 v0 := b.NewValue0(v.Line, OpAMD64DIVWU, MakeTuple(config.fe.TypeUInt16(), config.fe.TypeUInt16())) 16178 v1 := b.NewValue0(v.Line, OpZeroExt8to16, config.fe.TypeUInt16()) 16179 v1.AddArg(x) 16180 v0.AddArg(v1) 16181 v2 := b.NewValue0(v.Line, OpZeroExt8to16, config.fe.TypeUInt16()) 16182 v2.AddArg(y) 16183 v0.AddArg(v2) 16184 v.AddArg(v0) 16185 return true 16186 } 16187 } 16188 func rewriteValueAMD64_OpMove(v *Value, config *Config) bool { 16189 b := v.Block 16190 _ = b 16191 // match: (Move [s] _ _ mem) 16192 // cond: SizeAndAlign(s).Size() == 0 16193 // result: mem 16194 for { 16195 s := v.AuxInt 16196 mem := v.Args[2] 16197 if !(SizeAndAlign(s).Size() == 0) { 16198 break 16199 } 16200 v.reset(OpCopy) 16201 v.Type = mem.Type 16202 v.AddArg(mem) 16203 return true 16204 } 16205 // match: (Move [s] dst src mem) 16206 // cond: SizeAndAlign(s).Size() == 1 16207 // result: (MOVBstore dst (MOVBload src mem) mem) 16208 for { 16209 s := v.AuxInt 16210 dst := v.Args[0] 16211 src := v.Args[1] 16212 mem := v.Args[2] 16213 if !(SizeAndAlign(s).Size() == 1) { 16214 break 16215 } 16216 v.reset(OpAMD64MOVBstore) 16217 v.AddArg(dst) 16218 v0 := b.NewValue0(v.Line, OpAMD64MOVBload, config.fe.TypeUInt8()) 16219 v0.AddArg(src) 16220 v0.AddArg(mem) 16221 v.AddArg(v0) 16222 v.AddArg(mem) 16223 return true 16224 } 16225 // match: (Move [s] dst src mem) 16226 // cond: SizeAndAlign(s).Size() == 2 16227 // result: (MOVWstore dst (MOVWload src mem) mem) 16228 for { 16229 s := v.AuxInt 16230 dst := v.Args[0] 16231 src := v.Args[1] 16232 mem := v.Args[2] 16233 if !(SizeAndAlign(s).Size() == 2) { 16234 break 16235 } 16236 v.reset(OpAMD64MOVWstore) 16237 v.AddArg(dst) 16238 v0 := b.NewValue0(v.Line, OpAMD64MOVWload, config.fe.TypeUInt16()) 16239 v0.AddArg(src) 16240 v0.AddArg(mem) 16241 v.AddArg(v0) 16242 v.AddArg(mem) 16243 return true 16244 } 16245 // match: (Move [s] dst src mem) 16246 // cond: SizeAndAlign(s).Size() == 4 16247 // result: (MOVLstore dst (MOVLload src mem) mem) 16248 for { 16249 s := v.AuxInt 16250 dst := v.Args[0] 16251 src := v.Args[1] 16252 mem := v.Args[2] 16253 if !(SizeAndAlign(s).Size() == 4) { 16254 break 16255 } 16256 v.reset(OpAMD64MOVLstore) 16257 v.AddArg(dst) 16258 v0 := b.NewValue0(v.Line, OpAMD64MOVLload, config.fe.TypeUInt32()) 16259 v0.AddArg(src) 16260 v0.AddArg(mem) 16261 v.AddArg(v0) 16262 v.AddArg(mem) 16263 return true 16264 } 16265 // match: (Move [s] dst src mem) 16266 // cond: SizeAndAlign(s).Size() == 8 16267 // result: (MOVQstore dst (MOVQload src mem) mem) 16268 for { 16269 s := v.AuxInt 16270 dst := v.Args[0] 16271 src := v.Args[1] 16272 mem := v.Args[2] 16273 if !(SizeAndAlign(s).Size() == 8) { 16274 break 16275 } 16276 v.reset(OpAMD64MOVQstore) 16277 v.AddArg(dst) 16278 v0 := b.NewValue0(v.Line, OpAMD64MOVQload, config.fe.TypeUInt64()) 16279 v0.AddArg(src) 16280 v0.AddArg(mem) 16281 v.AddArg(v0) 16282 v.AddArg(mem) 16283 return true 16284 } 16285 // match: (Move [s] dst src mem) 16286 // cond: SizeAndAlign(s).Size() == 16 16287 // result: (MOVOstore dst (MOVOload src mem) mem) 16288 for { 16289 s := v.AuxInt 16290 dst := v.Args[0] 16291 src := v.Args[1] 16292 mem := v.Args[2] 16293 if !(SizeAndAlign(s).Size() == 16) { 16294 break 16295 } 16296 v.reset(OpAMD64MOVOstore) 16297 v.AddArg(dst) 16298 v0 := b.NewValue0(v.Line, OpAMD64MOVOload, TypeInt128) 16299 v0.AddArg(src) 16300 v0.AddArg(mem) 16301 v.AddArg(v0) 16302 v.AddArg(mem) 16303 return true 16304 } 16305 // match: (Move [s] dst src mem) 16306 // cond: SizeAndAlign(s).Size() == 3 16307 // result: (MOVBstore [2] dst (MOVBload [2] src mem) (MOVWstore dst (MOVWload src mem) mem)) 16308 for { 16309 s := v.AuxInt 16310 dst := v.Args[0] 16311 src := v.Args[1] 16312 mem := v.Args[2] 16313 if !(SizeAndAlign(s).Size() == 3) { 16314 break 16315 } 16316 v.reset(OpAMD64MOVBstore) 16317 v.AuxInt = 2 16318 v.AddArg(dst) 16319 v0 := b.NewValue0(v.Line, OpAMD64MOVBload, config.fe.TypeUInt8()) 16320 v0.AuxInt = 2 16321 v0.AddArg(src) 16322 v0.AddArg(mem) 16323 v.AddArg(v0) 16324 v1 := b.NewValue0(v.Line, OpAMD64MOVWstore, TypeMem) 16325 v1.AddArg(dst) 16326 v2 := b.NewValue0(v.Line, OpAMD64MOVWload, config.fe.TypeUInt16()) 16327 v2.AddArg(src) 16328 v2.AddArg(mem) 16329 v1.AddArg(v2) 16330 v1.AddArg(mem) 16331 v.AddArg(v1) 16332 return true 16333 } 16334 // match: (Move [s] dst src mem) 16335 // cond: SizeAndAlign(s).Size() == 5 16336 // result: (MOVBstore [4] dst (MOVBload [4] src mem) (MOVLstore dst (MOVLload src mem) mem)) 16337 for { 16338 s := v.AuxInt 16339 dst := v.Args[0] 16340 src := v.Args[1] 16341 mem := v.Args[2] 16342 if !(SizeAndAlign(s).Size() == 5) { 16343 break 16344 } 16345 v.reset(OpAMD64MOVBstore) 16346 v.AuxInt = 4 16347 v.AddArg(dst) 16348 v0 := b.NewValue0(v.Line, OpAMD64MOVBload, config.fe.TypeUInt8()) 16349 v0.AuxInt = 4 16350 v0.AddArg(src) 16351 v0.AddArg(mem) 16352 v.AddArg(v0) 16353 v1 := b.NewValue0(v.Line, OpAMD64MOVLstore, TypeMem) 16354 v1.AddArg(dst) 16355 v2 := b.NewValue0(v.Line, OpAMD64MOVLload, config.fe.TypeUInt32()) 16356 v2.AddArg(src) 16357 v2.AddArg(mem) 16358 v1.AddArg(v2) 16359 v1.AddArg(mem) 16360 v.AddArg(v1) 16361 return true 16362 } 16363 // match: (Move [s] dst src mem) 16364 // cond: SizeAndAlign(s).Size() == 6 16365 // result: (MOVWstore [4] dst (MOVWload [4] src mem) (MOVLstore dst (MOVLload src mem) mem)) 16366 for { 16367 s := v.AuxInt 16368 dst := v.Args[0] 16369 src := v.Args[1] 16370 mem := v.Args[2] 16371 if !(SizeAndAlign(s).Size() == 6) { 16372 break 16373 } 16374 v.reset(OpAMD64MOVWstore) 16375 v.AuxInt = 4 16376 v.AddArg(dst) 16377 v0 := b.NewValue0(v.Line, OpAMD64MOVWload, config.fe.TypeUInt16()) 16378 v0.AuxInt = 4 16379 v0.AddArg(src) 16380 v0.AddArg(mem) 16381 v.AddArg(v0) 16382 v1 := b.NewValue0(v.Line, OpAMD64MOVLstore, TypeMem) 16383 v1.AddArg(dst) 16384 v2 := b.NewValue0(v.Line, OpAMD64MOVLload, config.fe.TypeUInt32()) 16385 v2.AddArg(src) 16386 v2.AddArg(mem) 16387 v1.AddArg(v2) 16388 v1.AddArg(mem) 16389 v.AddArg(v1) 16390 return true 16391 } 16392 // match: (Move [s] dst src mem) 16393 // cond: SizeAndAlign(s).Size() == 7 16394 // result: (MOVLstore [3] dst (MOVLload [3] src mem) (MOVLstore dst (MOVLload src mem) mem)) 16395 for { 16396 s := v.AuxInt 16397 dst := v.Args[0] 16398 src := v.Args[1] 16399 mem := v.Args[2] 16400 if !(SizeAndAlign(s).Size() == 7) { 16401 break 16402 } 16403 v.reset(OpAMD64MOVLstore) 16404 v.AuxInt = 3 16405 v.AddArg(dst) 16406 v0 := b.NewValue0(v.Line, OpAMD64MOVLload, config.fe.TypeUInt32()) 16407 v0.AuxInt = 3 16408 v0.AddArg(src) 16409 v0.AddArg(mem) 16410 v.AddArg(v0) 16411 v1 := b.NewValue0(v.Line, OpAMD64MOVLstore, TypeMem) 16412 v1.AddArg(dst) 16413 v2 := b.NewValue0(v.Line, OpAMD64MOVLload, config.fe.TypeUInt32()) 16414 v2.AddArg(src) 16415 v2.AddArg(mem) 16416 v1.AddArg(v2) 16417 v1.AddArg(mem) 16418 v.AddArg(v1) 16419 return true 16420 } 16421 // match: (Move [s] dst src mem) 16422 // cond: SizeAndAlign(s).Size() > 8 && SizeAndAlign(s).Size() < 16 16423 // result: (MOVQstore [SizeAndAlign(s).Size()-8] dst (MOVQload [SizeAndAlign(s).Size()-8] src mem) (MOVQstore dst (MOVQload src mem) mem)) 16424 for { 16425 s := v.AuxInt 16426 dst := v.Args[0] 16427 src := v.Args[1] 16428 mem := v.Args[2] 16429 if !(SizeAndAlign(s).Size() > 8 && SizeAndAlign(s).Size() < 16) { 16430 break 16431 } 16432 v.reset(OpAMD64MOVQstore) 16433 v.AuxInt = SizeAndAlign(s).Size() - 8 16434 v.AddArg(dst) 16435 v0 := b.NewValue0(v.Line, OpAMD64MOVQload, config.fe.TypeUInt64()) 16436 v0.AuxInt = SizeAndAlign(s).Size() - 8 16437 v0.AddArg(src) 16438 v0.AddArg(mem) 16439 v.AddArg(v0) 16440 v1 := b.NewValue0(v.Line, OpAMD64MOVQstore, TypeMem) 16441 v1.AddArg(dst) 16442 v2 := b.NewValue0(v.Line, OpAMD64MOVQload, config.fe.TypeUInt64()) 16443 v2.AddArg(src) 16444 v2.AddArg(mem) 16445 v1.AddArg(v2) 16446 v1.AddArg(mem) 16447 v.AddArg(v1) 16448 return true 16449 } 16450 // match: (Move [s] dst src mem) 16451 // cond: SizeAndAlign(s).Size() > 16 && SizeAndAlign(s).Size()%16 != 0 && SizeAndAlign(s).Size()%16 <= 8 16452 // result: (Move [SizeAndAlign(s).Size()-SizeAndAlign(s).Size()%16] (OffPtr <dst.Type> dst [SizeAndAlign(s).Size()%16]) (OffPtr <src.Type> src [SizeAndAlign(s).Size()%16]) (MOVQstore dst (MOVQload src mem) mem)) 16453 for { 16454 s := v.AuxInt 16455 dst := v.Args[0] 16456 src := v.Args[1] 16457 mem := v.Args[2] 16458 if !(SizeAndAlign(s).Size() > 16 && SizeAndAlign(s).Size()%16 != 0 && SizeAndAlign(s).Size()%16 <= 8) { 16459 break 16460 } 16461 v.reset(OpMove) 16462 v.AuxInt = SizeAndAlign(s).Size() - SizeAndAlign(s).Size()%16 16463 v0 := b.NewValue0(v.Line, OpOffPtr, dst.Type) 16464 v0.AuxInt = SizeAndAlign(s).Size() % 16 16465 v0.AddArg(dst) 16466 v.AddArg(v0) 16467 v1 := b.NewValue0(v.Line, OpOffPtr, src.Type) 16468 v1.AuxInt = SizeAndAlign(s).Size() % 16 16469 v1.AddArg(src) 16470 v.AddArg(v1) 16471 v2 := b.NewValue0(v.Line, OpAMD64MOVQstore, TypeMem) 16472 v2.AddArg(dst) 16473 v3 := b.NewValue0(v.Line, OpAMD64MOVQload, config.fe.TypeUInt64()) 16474 v3.AddArg(src) 16475 v3.AddArg(mem) 16476 v2.AddArg(v3) 16477 v2.AddArg(mem) 16478 v.AddArg(v2) 16479 return true 16480 } 16481 // match: (Move [s] dst src mem) 16482 // cond: SizeAndAlign(s).Size() > 16 && SizeAndAlign(s).Size()%16 != 0 && SizeAndAlign(s).Size()%16 > 8 16483 // result: (Move [SizeAndAlign(s).Size()-SizeAndAlign(s).Size()%16] (OffPtr <dst.Type> dst [SizeAndAlign(s).Size()%16]) (OffPtr <src.Type> src [SizeAndAlign(s).Size()%16]) (MOVOstore dst (MOVOload src mem) mem)) 16484 for { 16485 s := v.AuxInt 16486 dst := v.Args[0] 16487 src := v.Args[1] 16488 mem := v.Args[2] 16489 if !(SizeAndAlign(s).Size() > 16 && SizeAndAlign(s).Size()%16 != 0 && SizeAndAlign(s).Size()%16 > 8) { 16490 break 16491 } 16492 v.reset(OpMove) 16493 v.AuxInt = SizeAndAlign(s).Size() - SizeAndAlign(s).Size()%16 16494 v0 := b.NewValue0(v.Line, OpOffPtr, dst.Type) 16495 v0.AuxInt = SizeAndAlign(s).Size() % 16 16496 v0.AddArg(dst) 16497 v.AddArg(v0) 16498 v1 := b.NewValue0(v.Line, OpOffPtr, src.Type) 16499 v1.AuxInt = SizeAndAlign(s).Size() % 16 16500 v1.AddArg(src) 16501 v.AddArg(v1) 16502 v2 := b.NewValue0(v.Line, OpAMD64MOVOstore, TypeMem) 16503 v2.AddArg(dst) 16504 v3 := b.NewValue0(v.Line, OpAMD64MOVOload, TypeInt128) 16505 v3.AddArg(src) 16506 v3.AddArg(mem) 16507 v2.AddArg(v3) 16508 v2.AddArg(mem) 16509 v.AddArg(v2) 16510 return true 16511 } 16512 // match: (Move [s] dst src mem) 16513 // cond: SizeAndAlign(s).Size() >= 32 && SizeAndAlign(s).Size() <= 16*64 && SizeAndAlign(s).Size()%16 == 0 && !config.noDuffDevice 16514 // result: (DUFFCOPY [14*(64-SizeAndAlign(s).Size()/16)] dst src mem) 16515 for { 16516 s := v.AuxInt 16517 dst := v.Args[0] 16518 src := v.Args[1] 16519 mem := v.Args[2] 16520 if !(SizeAndAlign(s).Size() >= 32 && SizeAndAlign(s).Size() <= 16*64 && SizeAndAlign(s).Size()%16 == 0 && !config.noDuffDevice) { 16521 break 16522 } 16523 v.reset(OpAMD64DUFFCOPY) 16524 v.AuxInt = 14 * (64 - SizeAndAlign(s).Size()/16) 16525 v.AddArg(dst) 16526 v.AddArg(src) 16527 v.AddArg(mem) 16528 return true 16529 } 16530 // match: (Move [s] dst src mem) 16531 // cond: (SizeAndAlign(s).Size() > 16*64 || config.noDuffDevice) && SizeAndAlign(s).Size()%8 == 0 16532 // result: (REPMOVSQ dst src (MOVQconst [SizeAndAlign(s).Size()/8]) mem) 16533 for { 16534 s := v.AuxInt 16535 dst := v.Args[0] 16536 src := v.Args[1] 16537 mem := v.Args[2] 16538 if !((SizeAndAlign(s).Size() > 16*64 || config.noDuffDevice) && SizeAndAlign(s).Size()%8 == 0) { 16539 break 16540 } 16541 v.reset(OpAMD64REPMOVSQ) 16542 v.AddArg(dst) 16543 v.AddArg(src) 16544 v0 := b.NewValue0(v.Line, OpAMD64MOVQconst, config.fe.TypeUInt64()) 16545 v0.AuxInt = SizeAndAlign(s).Size() / 8 16546 v.AddArg(v0) 16547 v.AddArg(mem) 16548 return true 16549 } 16550 return false 16551 } 16552 func rewriteValueAMD64_OpMul16(v *Value, config *Config) bool { 16553 b := v.Block 16554 _ = b 16555 // match: (Mul16 x y) 16556 // cond: 16557 // result: (MULL x y) 16558 for { 16559 x := v.Args[0] 16560 y := v.Args[1] 16561 v.reset(OpAMD64MULL) 16562 v.AddArg(x) 16563 v.AddArg(y) 16564 return true 16565 } 16566 } 16567 func rewriteValueAMD64_OpMul32(v *Value, config *Config) bool { 16568 b := v.Block 16569 _ = b 16570 // match: (Mul32 x y) 16571 // cond: 16572 // result: (MULL x y) 16573 for { 16574 x := v.Args[0] 16575 y := v.Args[1] 16576 v.reset(OpAMD64MULL) 16577 v.AddArg(x) 16578 v.AddArg(y) 16579 return true 16580 } 16581 } 16582 func rewriteValueAMD64_OpMul32F(v *Value, config *Config) bool { 16583 b := v.Block 16584 _ = b 16585 // match: (Mul32F x y) 16586 // cond: 16587 // result: (MULSS x y) 16588 for { 16589 x := v.Args[0] 16590 y := v.Args[1] 16591 v.reset(OpAMD64MULSS) 16592 v.AddArg(x) 16593 v.AddArg(y) 16594 return true 16595 } 16596 } 16597 func rewriteValueAMD64_OpMul64(v *Value, config *Config) bool { 16598 b := v.Block 16599 _ = b 16600 // match: (Mul64 x y) 16601 // cond: 16602 // result: (MULQ x y) 16603 for { 16604 x := v.Args[0] 16605 y := v.Args[1] 16606 v.reset(OpAMD64MULQ) 16607 v.AddArg(x) 16608 v.AddArg(y) 16609 return true 16610 } 16611 } 16612 func rewriteValueAMD64_OpMul64F(v *Value, config *Config) bool { 16613 b := v.Block 16614 _ = b 16615 // match: (Mul64F x y) 16616 // cond: 16617 // result: (MULSD x y) 16618 for { 16619 x := v.Args[0] 16620 y := v.Args[1] 16621 v.reset(OpAMD64MULSD) 16622 v.AddArg(x) 16623 v.AddArg(y) 16624 return true 16625 } 16626 } 16627 func rewriteValueAMD64_OpMul8(v *Value, config *Config) bool { 16628 b := v.Block 16629 _ = b 16630 // match: (Mul8 x y) 16631 // cond: 16632 // result: (MULL x y) 16633 for { 16634 x := v.Args[0] 16635 y := v.Args[1] 16636 v.reset(OpAMD64MULL) 16637 v.AddArg(x) 16638 v.AddArg(y) 16639 return true 16640 } 16641 } 16642 func rewriteValueAMD64_OpNeg16(v *Value, config *Config) bool { 16643 b := v.Block 16644 _ = b 16645 // match: (Neg16 x) 16646 // cond: 16647 // result: (NEGL x) 16648 for { 16649 x := v.Args[0] 16650 v.reset(OpAMD64NEGL) 16651 v.AddArg(x) 16652 return true 16653 } 16654 } 16655 func rewriteValueAMD64_OpNeg32(v *Value, config *Config) bool { 16656 b := v.Block 16657 _ = b 16658 // match: (Neg32 x) 16659 // cond: 16660 // result: (NEGL x) 16661 for { 16662 x := v.Args[0] 16663 v.reset(OpAMD64NEGL) 16664 v.AddArg(x) 16665 return true 16666 } 16667 } 16668 func rewriteValueAMD64_OpNeg32F(v *Value, config *Config) bool { 16669 b := v.Block 16670 _ = b 16671 // match: (Neg32F x) 16672 // cond: 16673 // result: (PXOR x (MOVSSconst <config.Frontend().TypeFloat32()> [f2i(math.Copysign(0, -1))])) 16674 for { 16675 x := v.Args[0] 16676 v.reset(OpAMD64PXOR) 16677 v.AddArg(x) 16678 v0 := b.NewValue0(v.Line, OpAMD64MOVSSconst, config.Frontend().TypeFloat32()) 16679 v0.AuxInt = f2i(math.Copysign(0, -1)) 16680 v.AddArg(v0) 16681 return true 16682 } 16683 } 16684 func rewriteValueAMD64_OpNeg64(v *Value, config *Config) bool { 16685 b := v.Block 16686 _ = b 16687 // match: (Neg64 x) 16688 // cond: 16689 // result: (NEGQ x) 16690 for { 16691 x := v.Args[0] 16692 v.reset(OpAMD64NEGQ) 16693 v.AddArg(x) 16694 return true 16695 } 16696 } 16697 func rewriteValueAMD64_OpNeg64F(v *Value, config *Config) bool { 16698 b := v.Block 16699 _ = b 16700 // match: (Neg64F x) 16701 // cond: 16702 // result: (PXOR x (MOVSDconst <config.Frontend().TypeFloat64()> [f2i(math.Copysign(0, -1))])) 16703 for { 16704 x := v.Args[0] 16705 v.reset(OpAMD64PXOR) 16706 v.AddArg(x) 16707 v0 := b.NewValue0(v.Line, OpAMD64MOVSDconst, config.Frontend().TypeFloat64()) 16708 v0.AuxInt = f2i(math.Copysign(0, -1)) 16709 v.AddArg(v0) 16710 return true 16711 } 16712 } 16713 func rewriteValueAMD64_OpNeg8(v *Value, config *Config) bool { 16714 b := v.Block 16715 _ = b 16716 // match: (Neg8 x) 16717 // cond: 16718 // result: (NEGL x) 16719 for { 16720 x := v.Args[0] 16721 v.reset(OpAMD64NEGL) 16722 v.AddArg(x) 16723 return true 16724 } 16725 } 16726 func rewriteValueAMD64_OpNeq16(v *Value, config *Config) bool { 16727 b := v.Block 16728 _ = b 16729 // match: (Neq16 x y) 16730 // cond: 16731 // result: (SETNE (CMPW x y)) 16732 for { 16733 x := v.Args[0] 16734 y := v.Args[1] 16735 v.reset(OpAMD64SETNE) 16736 v0 := b.NewValue0(v.Line, OpAMD64CMPW, TypeFlags) 16737 v0.AddArg(x) 16738 v0.AddArg(y) 16739 v.AddArg(v0) 16740 return true 16741 } 16742 } 16743 func rewriteValueAMD64_OpNeq32(v *Value, config *Config) bool { 16744 b := v.Block 16745 _ = b 16746 // match: (Neq32 x y) 16747 // cond: 16748 // result: (SETNE (CMPL x y)) 16749 for { 16750 x := v.Args[0] 16751 y := v.Args[1] 16752 v.reset(OpAMD64SETNE) 16753 v0 := b.NewValue0(v.Line, OpAMD64CMPL, TypeFlags) 16754 v0.AddArg(x) 16755 v0.AddArg(y) 16756 v.AddArg(v0) 16757 return true 16758 } 16759 } 16760 func rewriteValueAMD64_OpNeq32F(v *Value, config *Config) bool { 16761 b := v.Block 16762 _ = b 16763 // match: (Neq32F x y) 16764 // cond: 16765 // result: (SETNEF (UCOMISS x y)) 16766 for { 16767 x := v.Args[0] 16768 y := v.Args[1] 16769 v.reset(OpAMD64SETNEF) 16770 v0 := b.NewValue0(v.Line, OpAMD64UCOMISS, TypeFlags) 16771 v0.AddArg(x) 16772 v0.AddArg(y) 16773 v.AddArg(v0) 16774 return true 16775 } 16776 } 16777 func rewriteValueAMD64_OpNeq64(v *Value, config *Config) bool { 16778 b := v.Block 16779 _ = b 16780 // match: (Neq64 x y) 16781 // cond: 16782 // result: (SETNE (CMPQ x y)) 16783 for { 16784 x := v.Args[0] 16785 y := v.Args[1] 16786 v.reset(OpAMD64SETNE) 16787 v0 := b.NewValue0(v.Line, OpAMD64CMPQ, TypeFlags) 16788 v0.AddArg(x) 16789 v0.AddArg(y) 16790 v.AddArg(v0) 16791 return true 16792 } 16793 } 16794 func rewriteValueAMD64_OpNeq64F(v *Value, config *Config) bool { 16795 b := v.Block 16796 _ = b 16797 // match: (Neq64F x y) 16798 // cond: 16799 // result: (SETNEF (UCOMISD x y)) 16800 for { 16801 x := v.Args[0] 16802 y := v.Args[1] 16803 v.reset(OpAMD64SETNEF) 16804 v0 := b.NewValue0(v.Line, OpAMD64UCOMISD, TypeFlags) 16805 v0.AddArg(x) 16806 v0.AddArg(y) 16807 v.AddArg(v0) 16808 return true 16809 } 16810 } 16811 func rewriteValueAMD64_OpNeq8(v *Value, config *Config) bool { 16812 b := v.Block 16813 _ = b 16814 // match: (Neq8 x y) 16815 // cond: 16816 // result: (SETNE (CMPB x y)) 16817 for { 16818 x := v.Args[0] 16819 y := v.Args[1] 16820 v.reset(OpAMD64SETNE) 16821 v0 := b.NewValue0(v.Line, OpAMD64CMPB, TypeFlags) 16822 v0.AddArg(x) 16823 v0.AddArg(y) 16824 v.AddArg(v0) 16825 return true 16826 } 16827 } 16828 func rewriteValueAMD64_OpNeqB(v *Value, config *Config) bool { 16829 b := v.Block 16830 _ = b 16831 // match: (NeqB x y) 16832 // cond: 16833 // result: (SETNE (CMPB x y)) 16834 for { 16835 x := v.Args[0] 16836 y := v.Args[1] 16837 v.reset(OpAMD64SETNE) 16838 v0 := b.NewValue0(v.Line, OpAMD64CMPB, TypeFlags) 16839 v0.AddArg(x) 16840 v0.AddArg(y) 16841 v.AddArg(v0) 16842 return true 16843 } 16844 } 16845 func rewriteValueAMD64_OpNeqPtr(v *Value, config *Config) bool { 16846 b := v.Block 16847 _ = b 16848 // match: (NeqPtr x y) 16849 // cond: config.PtrSize == 8 16850 // result: (SETNE (CMPQ x y)) 16851 for { 16852 x := v.Args[0] 16853 y := v.Args[1] 16854 if !(config.PtrSize == 8) { 16855 break 16856 } 16857 v.reset(OpAMD64SETNE) 16858 v0 := b.NewValue0(v.Line, OpAMD64CMPQ, TypeFlags) 16859 v0.AddArg(x) 16860 v0.AddArg(y) 16861 v.AddArg(v0) 16862 return true 16863 } 16864 // match: (NeqPtr x y) 16865 // cond: config.PtrSize == 4 16866 // result: (SETNE (CMPL x y)) 16867 for { 16868 x := v.Args[0] 16869 y := v.Args[1] 16870 if !(config.PtrSize == 4) { 16871 break 16872 } 16873 v.reset(OpAMD64SETNE) 16874 v0 := b.NewValue0(v.Line, OpAMD64CMPL, TypeFlags) 16875 v0.AddArg(x) 16876 v0.AddArg(y) 16877 v.AddArg(v0) 16878 return true 16879 } 16880 return false 16881 } 16882 func rewriteValueAMD64_OpNilCheck(v *Value, config *Config) bool { 16883 b := v.Block 16884 _ = b 16885 // match: (NilCheck ptr mem) 16886 // cond: 16887 // result: (LoweredNilCheck ptr mem) 16888 for { 16889 ptr := v.Args[0] 16890 mem := v.Args[1] 16891 v.reset(OpAMD64LoweredNilCheck) 16892 v.AddArg(ptr) 16893 v.AddArg(mem) 16894 return true 16895 } 16896 } 16897 func rewriteValueAMD64_OpNot(v *Value, config *Config) bool { 16898 b := v.Block 16899 _ = b 16900 // match: (Not x) 16901 // cond: 16902 // result: (XORLconst [1] x) 16903 for { 16904 x := v.Args[0] 16905 v.reset(OpAMD64XORLconst) 16906 v.AuxInt = 1 16907 v.AddArg(x) 16908 return true 16909 } 16910 } 16911 func rewriteValueAMD64_OpOffPtr(v *Value, config *Config) bool { 16912 b := v.Block 16913 _ = b 16914 // match: (OffPtr [off] ptr) 16915 // cond: config.PtrSize == 8 && is32Bit(off) 16916 // result: (ADDQconst [off] ptr) 16917 for { 16918 off := v.AuxInt 16919 ptr := v.Args[0] 16920 if !(config.PtrSize == 8 && is32Bit(off)) { 16921 break 16922 } 16923 v.reset(OpAMD64ADDQconst) 16924 v.AuxInt = off 16925 v.AddArg(ptr) 16926 return true 16927 } 16928 // match: (OffPtr [off] ptr) 16929 // cond: config.PtrSize == 8 16930 // result: (ADDQ (MOVQconst [off]) ptr) 16931 for { 16932 off := v.AuxInt 16933 ptr := v.Args[0] 16934 if !(config.PtrSize == 8) { 16935 break 16936 } 16937 v.reset(OpAMD64ADDQ) 16938 v0 := b.NewValue0(v.Line, OpAMD64MOVQconst, config.fe.TypeUInt64()) 16939 v0.AuxInt = off 16940 v.AddArg(v0) 16941 v.AddArg(ptr) 16942 return true 16943 } 16944 // match: (OffPtr [off] ptr) 16945 // cond: config.PtrSize == 4 16946 // result: (ADDLconst [off] ptr) 16947 for { 16948 off := v.AuxInt 16949 ptr := v.Args[0] 16950 if !(config.PtrSize == 4) { 16951 break 16952 } 16953 v.reset(OpAMD64ADDLconst) 16954 v.AuxInt = off 16955 v.AddArg(ptr) 16956 return true 16957 } 16958 return false 16959 } 16960 func rewriteValueAMD64_OpOr16(v *Value, config *Config) bool { 16961 b := v.Block 16962 _ = b 16963 // match: (Or16 x y) 16964 // cond: 16965 // result: (ORL x y) 16966 for { 16967 x := v.Args[0] 16968 y := v.Args[1] 16969 v.reset(OpAMD64ORL) 16970 v.AddArg(x) 16971 v.AddArg(y) 16972 return true 16973 } 16974 } 16975 func rewriteValueAMD64_OpOr32(v *Value, config *Config) bool { 16976 b := v.Block 16977 _ = b 16978 // match: (Or32 x y) 16979 // cond: 16980 // result: (ORL x y) 16981 for { 16982 x := v.Args[0] 16983 y := v.Args[1] 16984 v.reset(OpAMD64ORL) 16985 v.AddArg(x) 16986 v.AddArg(y) 16987 return true 16988 } 16989 } 16990 func rewriteValueAMD64_OpOr64(v *Value, config *Config) bool { 16991 b := v.Block 16992 _ = b 16993 // match: (Or64 x y) 16994 // cond: 16995 // result: (ORQ x y) 16996 for { 16997 x := v.Args[0] 16998 y := v.Args[1] 16999 v.reset(OpAMD64ORQ) 17000 v.AddArg(x) 17001 v.AddArg(y) 17002 return true 17003 } 17004 } 17005 func rewriteValueAMD64_OpOr8(v *Value, config *Config) bool { 17006 b := v.Block 17007 _ = b 17008 // match: (Or8 x y) 17009 // cond: 17010 // result: (ORL x y) 17011 for { 17012 x := v.Args[0] 17013 y := v.Args[1] 17014 v.reset(OpAMD64ORL) 17015 v.AddArg(x) 17016 v.AddArg(y) 17017 return true 17018 } 17019 } 17020 func rewriteValueAMD64_OpOrB(v *Value, config *Config) bool { 17021 b := v.Block 17022 _ = b 17023 // match: (OrB x y) 17024 // cond: 17025 // result: (ORL x y) 17026 for { 17027 x := v.Args[0] 17028 y := v.Args[1] 17029 v.reset(OpAMD64ORL) 17030 v.AddArg(x) 17031 v.AddArg(y) 17032 return true 17033 } 17034 } 17035 func rewriteValueAMD64_OpRsh16Ux16(v *Value, config *Config) bool { 17036 b := v.Block 17037 _ = b 17038 // match: (Rsh16Ux16 <t> x y) 17039 // cond: 17040 // result: (ANDL (SHRW <t> x y) (SBBLcarrymask <t> (CMPWconst y [16]))) 17041 for { 17042 t := v.Type 17043 x := v.Args[0] 17044 y := v.Args[1] 17045 v.reset(OpAMD64ANDL) 17046 v0 := b.NewValue0(v.Line, OpAMD64SHRW, t) 17047 v0.AddArg(x) 17048 v0.AddArg(y) 17049 v.AddArg(v0) 17050 v1 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, t) 17051 v2 := b.NewValue0(v.Line, OpAMD64CMPWconst, TypeFlags) 17052 v2.AuxInt = 16 17053 v2.AddArg(y) 17054 v1.AddArg(v2) 17055 v.AddArg(v1) 17056 return true 17057 } 17058 } 17059 func rewriteValueAMD64_OpRsh16Ux32(v *Value, config *Config) bool { 17060 b := v.Block 17061 _ = b 17062 // match: (Rsh16Ux32 <t> x y) 17063 // cond: 17064 // result: (ANDL (SHRW <t> x y) (SBBLcarrymask <t> (CMPLconst y [16]))) 17065 for { 17066 t := v.Type 17067 x := v.Args[0] 17068 y := v.Args[1] 17069 v.reset(OpAMD64ANDL) 17070 v0 := b.NewValue0(v.Line, OpAMD64SHRW, t) 17071 v0.AddArg(x) 17072 v0.AddArg(y) 17073 v.AddArg(v0) 17074 v1 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, t) 17075 v2 := b.NewValue0(v.Line, OpAMD64CMPLconst, TypeFlags) 17076 v2.AuxInt = 16 17077 v2.AddArg(y) 17078 v1.AddArg(v2) 17079 v.AddArg(v1) 17080 return true 17081 } 17082 } 17083 func rewriteValueAMD64_OpRsh16Ux64(v *Value, config *Config) bool { 17084 b := v.Block 17085 _ = b 17086 // match: (Rsh16Ux64 <t> x y) 17087 // cond: 17088 // result: (ANDL (SHRW <t> x y) (SBBLcarrymask <t> (CMPQconst y [16]))) 17089 for { 17090 t := v.Type 17091 x := v.Args[0] 17092 y := v.Args[1] 17093 v.reset(OpAMD64ANDL) 17094 v0 := b.NewValue0(v.Line, OpAMD64SHRW, t) 17095 v0.AddArg(x) 17096 v0.AddArg(y) 17097 v.AddArg(v0) 17098 v1 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, t) 17099 v2 := b.NewValue0(v.Line, OpAMD64CMPQconst, TypeFlags) 17100 v2.AuxInt = 16 17101 v2.AddArg(y) 17102 v1.AddArg(v2) 17103 v.AddArg(v1) 17104 return true 17105 } 17106 } 17107 func rewriteValueAMD64_OpRsh16Ux8(v *Value, config *Config) bool { 17108 b := v.Block 17109 _ = b 17110 // match: (Rsh16Ux8 <t> x y) 17111 // cond: 17112 // result: (ANDL (SHRW <t> x y) (SBBLcarrymask <t> (CMPBconst y [16]))) 17113 for { 17114 t := v.Type 17115 x := v.Args[0] 17116 y := v.Args[1] 17117 v.reset(OpAMD64ANDL) 17118 v0 := b.NewValue0(v.Line, OpAMD64SHRW, t) 17119 v0.AddArg(x) 17120 v0.AddArg(y) 17121 v.AddArg(v0) 17122 v1 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, t) 17123 v2 := b.NewValue0(v.Line, OpAMD64CMPBconst, TypeFlags) 17124 v2.AuxInt = 16 17125 v2.AddArg(y) 17126 v1.AddArg(v2) 17127 v.AddArg(v1) 17128 return true 17129 } 17130 } 17131 func rewriteValueAMD64_OpRsh16x16(v *Value, config *Config) bool { 17132 b := v.Block 17133 _ = b 17134 // match: (Rsh16x16 <t> x y) 17135 // cond: 17136 // result: (SARW <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPWconst y [16]))))) 17137 for { 17138 t := v.Type 17139 x := v.Args[0] 17140 y := v.Args[1] 17141 v.reset(OpAMD64SARW) 17142 v.Type = t 17143 v.AddArg(x) 17144 v0 := b.NewValue0(v.Line, OpAMD64ORL, y.Type) 17145 v0.AddArg(y) 17146 v1 := b.NewValue0(v.Line, OpAMD64NOTL, y.Type) 17147 v2 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, y.Type) 17148 v3 := b.NewValue0(v.Line, OpAMD64CMPWconst, TypeFlags) 17149 v3.AuxInt = 16 17150 v3.AddArg(y) 17151 v2.AddArg(v3) 17152 v1.AddArg(v2) 17153 v0.AddArg(v1) 17154 v.AddArg(v0) 17155 return true 17156 } 17157 } 17158 func rewriteValueAMD64_OpRsh16x32(v *Value, config *Config) bool { 17159 b := v.Block 17160 _ = b 17161 // match: (Rsh16x32 <t> x y) 17162 // cond: 17163 // result: (SARW <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPLconst y [16]))))) 17164 for { 17165 t := v.Type 17166 x := v.Args[0] 17167 y := v.Args[1] 17168 v.reset(OpAMD64SARW) 17169 v.Type = t 17170 v.AddArg(x) 17171 v0 := b.NewValue0(v.Line, OpAMD64ORL, y.Type) 17172 v0.AddArg(y) 17173 v1 := b.NewValue0(v.Line, OpAMD64NOTL, y.Type) 17174 v2 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, y.Type) 17175 v3 := b.NewValue0(v.Line, OpAMD64CMPLconst, TypeFlags) 17176 v3.AuxInt = 16 17177 v3.AddArg(y) 17178 v2.AddArg(v3) 17179 v1.AddArg(v2) 17180 v0.AddArg(v1) 17181 v.AddArg(v0) 17182 return true 17183 } 17184 } 17185 func rewriteValueAMD64_OpRsh16x64(v *Value, config *Config) bool { 17186 b := v.Block 17187 _ = b 17188 // match: (Rsh16x64 <t> x y) 17189 // cond: 17190 // result: (SARW <t> x (ORQ <y.Type> y (NOTQ <y.Type> (SBBQcarrymask <y.Type> (CMPQconst y [16]))))) 17191 for { 17192 t := v.Type 17193 x := v.Args[0] 17194 y := v.Args[1] 17195 v.reset(OpAMD64SARW) 17196 v.Type = t 17197 v.AddArg(x) 17198 v0 := b.NewValue0(v.Line, OpAMD64ORQ, y.Type) 17199 v0.AddArg(y) 17200 v1 := b.NewValue0(v.Line, OpAMD64NOTQ, y.Type) 17201 v2 := b.NewValue0(v.Line, OpAMD64SBBQcarrymask, y.Type) 17202 v3 := b.NewValue0(v.Line, OpAMD64CMPQconst, TypeFlags) 17203 v3.AuxInt = 16 17204 v3.AddArg(y) 17205 v2.AddArg(v3) 17206 v1.AddArg(v2) 17207 v0.AddArg(v1) 17208 v.AddArg(v0) 17209 return true 17210 } 17211 } 17212 func rewriteValueAMD64_OpRsh16x8(v *Value, config *Config) bool { 17213 b := v.Block 17214 _ = b 17215 // match: (Rsh16x8 <t> x y) 17216 // cond: 17217 // result: (SARW <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPBconst y [16]))))) 17218 for { 17219 t := v.Type 17220 x := v.Args[0] 17221 y := v.Args[1] 17222 v.reset(OpAMD64SARW) 17223 v.Type = t 17224 v.AddArg(x) 17225 v0 := b.NewValue0(v.Line, OpAMD64ORL, y.Type) 17226 v0.AddArg(y) 17227 v1 := b.NewValue0(v.Line, OpAMD64NOTL, y.Type) 17228 v2 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, y.Type) 17229 v3 := b.NewValue0(v.Line, OpAMD64CMPBconst, TypeFlags) 17230 v3.AuxInt = 16 17231 v3.AddArg(y) 17232 v2.AddArg(v3) 17233 v1.AddArg(v2) 17234 v0.AddArg(v1) 17235 v.AddArg(v0) 17236 return true 17237 } 17238 } 17239 func rewriteValueAMD64_OpRsh32Ux16(v *Value, config *Config) bool { 17240 b := v.Block 17241 _ = b 17242 // match: (Rsh32Ux16 <t> x y) 17243 // cond: 17244 // result: (ANDL (SHRL <t> x y) (SBBLcarrymask <t> (CMPWconst y [32]))) 17245 for { 17246 t := v.Type 17247 x := v.Args[0] 17248 y := v.Args[1] 17249 v.reset(OpAMD64ANDL) 17250 v0 := b.NewValue0(v.Line, OpAMD64SHRL, t) 17251 v0.AddArg(x) 17252 v0.AddArg(y) 17253 v.AddArg(v0) 17254 v1 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, t) 17255 v2 := b.NewValue0(v.Line, OpAMD64CMPWconst, TypeFlags) 17256 v2.AuxInt = 32 17257 v2.AddArg(y) 17258 v1.AddArg(v2) 17259 v.AddArg(v1) 17260 return true 17261 } 17262 } 17263 func rewriteValueAMD64_OpRsh32Ux32(v *Value, config *Config) bool { 17264 b := v.Block 17265 _ = b 17266 // match: (Rsh32Ux32 <t> x y) 17267 // cond: 17268 // result: (ANDL (SHRL <t> x y) (SBBLcarrymask <t> (CMPLconst y [32]))) 17269 for { 17270 t := v.Type 17271 x := v.Args[0] 17272 y := v.Args[1] 17273 v.reset(OpAMD64ANDL) 17274 v0 := b.NewValue0(v.Line, OpAMD64SHRL, t) 17275 v0.AddArg(x) 17276 v0.AddArg(y) 17277 v.AddArg(v0) 17278 v1 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, t) 17279 v2 := b.NewValue0(v.Line, OpAMD64CMPLconst, TypeFlags) 17280 v2.AuxInt = 32 17281 v2.AddArg(y) 17282 v1.AddArg(v2) 17283 v.AddArg(v1) 17284 return true 17285 } 17286 } 17287 func rewriteValueAMD64_OpRsh32Ux64(v *Value, config *Config) bool { 17288 b := v.Block 17289 _ = b 17290 // match: (Rsh32Ux64 <t> x y) 17291 // cond: 17292 // result: (ANDL (SHRL <t> x y) (SBBLcarrymask <t> (CMPQconst y [32]))) 17293 for { 17294 t := v.Type 17295 x := v.Args[0] 17296 y := v.Args[1] 17297 v.reset(OpAMD64ANDL) 17298 v0 := b.NewValue0(v.Line, OpAMD64SHRL, t) 17299 v0.AddArg(x) 17300 v0.AddArg(y) 17301 v.AddArg(v0) 17302 v1 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, t) 17303 v2 := b.NewValue0(v.Line, OpAMD64CMPQconst, TypeFlags) 17304 v2.AuxInt = 32 17305 v2.AddArg(y) 17306 v1.AddArg(v2) 17307 v.AddArg(v1) 17308 return true 17309 } 17310 } 17311 func rewriteValueAMD64_OpRsh32Ux8(v *Value, config *Config) bool { 17312 b := v.Block 17313 _ = b 17314 // match: (Rsh32Ux8 <t> x y) 17315 // cond: 17316 // result: (ANDL (SHRL <t> x y) (SBBLcarrymask <t> (CMPBconst y [32]))) 17317 for { 17318 t := v.Type 17319 x := v.Args[0] 17320 y := v.Args[1] 17321 v.reset(OpAMD64ANDL) 17322 v0 := b.NewValue0(v.Line, OpAMD64SHRL, t) 17323 v0.AddArg(x) 17324 v0.AddArg(y) 17325 v.AddArg(v0) 17326 v1 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, t) 17327 v2 := b.NewValue0(v.Line, OpAMD64CMPBconst, TypeFlags) 17328 v2.AuxInt = 32 17329 v2.AddArg(y) 17330 v1.AddArg(v2) 17331 v.AddArg(v1) 17332 return true 17333 } 17334 } 17335 func rewriteValueAMD64_OpRsh32x16(v *Value, config *Config) bool { 17336 b := v.Block 17337 _ = b 17338 // match: (Rsh32x16 <t> x y) 17339 // cond: 17340 // result: (SARL <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPWconst y [32]))))) 17341 for { 17342 t := v.Type 17343 x := v.Args[0] 17344 y := v.Args[1] 17345 v.reset(OpAMD64SARL) 17346 v.Type = t 17347 v.AddArg(x) 17348 v0 := b.NewValue0(v.Line, OpAMD64ORL, y.Type) 17349 v0.AddArg(y) 17350 v1 := b.NewValue0(v.Line, OpAMD64NOTL, y.Type) 17351 v2 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, y.Type) 17352 v3 := b.NewValue0(v.Line, OpAMD64CMPWconst, TypeFlags) 17353 v3.AuxInt = 32 17354 v3.AddArg(y) 17355 v2.AddArg(v3) 17356 v1.AddArg(v2) 17357 v0.AddArg(v1) 17358 v.AddArg(v0) 17359 return true 17360 } 17361 } 17362 func rewriteValueAMD64_OpRsh32x32(v *Value, config *Config) bool { 17363 b := v.Block 17364 _ = b 17365 // match: (Rsh32x32 <t> x y) 17366 // cond: 17367 // result: (SARL <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPLconst y [32]))))) 17368 for { 17369 t := v.Type 17370 x := v.Args[0] 17371 y := v.Args[1] 17372 v.reset(OpAMD64SARL) 17373 v.Type = t 17374 v.AddArg(x) 17375 v0 := b.NewValue0(v.Line, OpAMD64ORL, y.Type) 17376 v0.AddArg(y) 17377 v1 := b.NewValue0(v.Line, OpAMD64NOTL, y.Type) 17378 v2 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, y.Type) 17379 v3 := b.NewValue0(v.Line, OpAMD64CMPLconst, TypeFlags) 17380 v3.AuxInt = 32 17381 v3.AddArg(y) 17382 v2.AddArg(v3) 17383 v1.AddArg(v2) 17384 v0.AddArg(v1) 17385 v.AddArg(v0) 17386 return true 17387 } 17388 } 17389 func rewriteValueAMD64_OpRsh32x64(v *Value, config *Config) bool { 17390 b := v.Block 17391 _ = b 17392 // match: (Rsh32x64 <t> x y) 17393 // cond: 17394 // result: (SARL <t> x (ORQ <y.Type> y (NOTQ <y.Type> (SBBQcarrymask <y.Type> (CMPQconst y [32]))))) 17395 for { 17396 t := v.Type 17397 x := v.Args[0] 17398 y := v.Args[1] 17399 v.reset(OpAMD64SARL) 17400 v.Type = t 17401 v.AddArg(x) 17402 v0 := b.NewValue0(v.Line, OpAMD64ORQ, y.Type) 17403 v0.AddArg(y) 17404 v1 := b.NewValue0(v.Line, OpAMD64NOTQ, y.Type) 17405 v2 := b.NewValue0(v.Line, OpAMD64SBBQcarrymask, y.Type) 17406 v3 := b.NewValue0(v.Line, OpAMD64CMPQconst, TypeFlags) 17407 v3.AuxInt = 32 17408 v3.AddArg(y) 17409 v2.AddArg(v3) 17410 v1.AddArg(v2) 17411 v0.AddArg(v1) 17412 v.AddArg(v0) 17413 return true 17414 } 17415 } 17416 func rewriteValueAMD64_OpRsh32x8(v *Value, config *Config) bool { 17417 b := v.Block 17418 _ = b 17419 // match: (Rsh32x8 <t> x y) 17420 // cond: 17421 // result: (SARL <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPBconst y [32]))))) 17422 for { 17423 t := v.Type 17424 x := v.Args[0] 17425 y := v.Args[1] 17426 v.reset(OpAMD64SARL) 17427 v.Type = t 17428 v.AddArg(x) 17429 v0 := b.NewValue0(v.Line, OpAMD64ORL, y.Type) 17430 v0.AddArg(y) 17431 v1 := b.NewValue0(v.Line, OpAMD64NOTL, y.Type) 17432 v2 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, y.Type) 17433 v3 := b.NewValue0(v.Line, OpAMD64CMPBconst, TypeFlags) 17434 v3.AuxInt = 32 17435 v3.AddArg(y) 17436 v2.AddArg(v3) 17437 v1.AddArg(v2) 17438 v0.AddArg(v1) 17439 v.AddArg(v0) 17440 return true 17441 } 17442 } 17443 func rewriteValueAMD64_OpRsh64Ux16(v *Value, config *Config) bool { 17444 b := v.Block 17445 _ = b 17446 // match: (Rsh64Ux16 <t> x y) 17447 // cond: 17448 // result: (ANDQ (SHRQ <t> x y) (SBBQcarrymask <t> (CMPWconst y [64]))) 17449 for { 17450 t := v.Type 17451 x := v.Args[0] 17452 y := v.Args[1] 17453 v.reset(OpAMD64ANDQ) 17454 v0 := b.NewValue0(v.Line, OpAMD64SHRQ, t) 17455 v0.AddArg(x) 17456 v0.AddArg(y) 17457 v.AddArg(v0) 17458 v1 := b.NewValue0(v.Line, OpAMD64SBBQcarrymask, t) 17459 v2 := b.NewValue0(v.Line, OpAMD64CMPWconst, TypeFlags) 17460 v2.AuxInt = 64 17461 v2.AddArg(y) 17462 v1.AddArg(v2) 17463 v.AddArg(v1) 17464 return true 17465 } 17466 } 17467 func rewriteValueAMD64_OpRsh64Ux32(v *Value, config *Config) bool { 17468 b := v.Block 17469 _ = b 17470 // match: (Rsh64Ux32 <t> x y) 17471 // cond: 17472 // result: (ANDQ (SHRQ <t> x y) (SBBQcarrymask <t> (CMPLconst y [64]))) 17473 for { 17474 t := v.Type 17475 x := v.Args[0] 17476 y := v.Args[1] 17477 v.reset(OpAMD64ANDQ) 17478 v0 := b.NewValue0(v.Line, OpAMD64SHRQ, t) 17479 v0.AddArg(x) 17480 v0.AddArg(y) 17481 v.AddArg(v0) 17482 v1 := b.NewValue0(v.Line, OpAMD64SBBQcarrymask, t) 17483 v2 := b.NewValue0(v.Line, OpAMD64CMPLconst, TypeFlags) 17484 v2.AuxInt = 64 17485 v2.AddArg(y) 17486 v1.AddArg(v2) 17487 v.AddArg(v1) 17488 return true 17489 } 17490 } 17491 func rewriteValueAMD64_OpRsh64Ux64(v *Value, config *Config) bool { 17492 b := v.Block 17493 _ = b 17494 // match: (Rsh64Ux64 <t> x y) 17495 // cond: 17496 // result: (ANDQ (SHRQ <t> x y) (SBBQcarrymask <t> (CMPQconst y [64]))) 17497 for { 17498 t := v.Type 17499 x := v.Args[0] 17500 y := v.Args[1] 17501 v.reset(OpAMD64ANDQ) 17502 v0 := b.NewValue0(v.Line, OpAMD64SHRQ, t) 17503 v0.AddArg(x) 17504 v0.AddArg(y) 17505 v.AddArg(v0) 17506 v1 := b.NewValue0(v.Line, OpAMD64SBBQcarrymask, t) 17507 v2 := b.NewValue0(v.Line, OpAMD64CMPQconst, TypeFlags) 17508 v2.AuxInt = 64 17509 v2.AddArg(y) 17510 v1.AddArg(v2) 17511 v.AddArg(v1) 17512 return true 17513 } 17514 } 17515 func rewriteValueAMD64_OpRsh64Ux8(v *Value, config *Config) bool { 17516 b := v.Block 17517 _ = b 17518 // match: (Rsh64Ux8 <t> x y) 17519 // cond: 17520 // result: (ANDQ (SHRQ <t> x y) (SBBQcarrymask <t> (CMPBconst y [64]))) 17521 for { 17522 t := v.Type 17523 x := v.Args[0] 17524 y := v.Args[1] 17525 v.reset(OpAMD64ANDQ) 17526 v0 := b.NewValue0(v.Line, OpAMD64SHRQ, t) 17527 v0.AddArg(x) 17528 v0.AddArg(y) 17529 v.AddArg(v0) 17530 v1 := b.NewValue0(v.Line, OpAMD64SBBQcarrymask, t) 17531 v2 := b.NewValue0(v.Line, OpAMD64CMPBconst, TypeFlags) 17532 v2.AuxInt = 64 17533 v2.AddArg(y) 17534 v1.AddArg(v2) 17535 v.AddArg(v1) 17536 return true 17537 } 17538 } 17539 func rewriteValueAMD64_OpRsh64x16(v *Value, config *Config) bool { 17540 b := v.Block 17541 _ = b 17542 // match: (Rsh64x16 <t> x y) 17543 // cond: 17544 // result: (SARQ <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPWconst y [64]))))) 17545 for { 17546 t := v.Type 17547 x := v.Args[0] 17548 y := v.Args[1] 17549 v.reset(OpAMD64SARQ) 17550 v.Type = t 17551 v.AddArg(x) 17552 v0 := b.NewValue0(v.Line, OpAMD64ORL, y.Type) 17553 v0.AddArg(y) 17554 v1 := b.NewValue0(v.Line, OpAMD64NOTL, y.Type) 17555 v2 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, y.Type) 17556 v3 := b.NewValue0(v.Line, OpAMD64CMPWconst, TypeFlags) 17557 v3.AuxInt = 64 17558 v3.AddArg(y) 17559 v2.AddArg(v3) 17560 v1.AddArg(v2) 17561 v0.AddArg(v1) 17562 v.AddArg(v0) 17563 return true 17564 } 17565 } 17566 func rewriteValueAMD64_OpRsh64x32(v *Value, config *Config) bool { 17567 b := v.Block 17568 _ = b 17569 // match: (Rsh64x32 <t> x y) 17570 // cond: 17571 // result: (SARQ <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPLconst y [64]))))) 17572 for { 17573 t := v.Type 17574 x := v.Args[0] 17575 y := v.Args[1] 17576 v.reset(OpAMD64SARQ) 17577 v.Type = t 17578 v.AddArg(x) 17579 v0 := b.NewValue0(v.Line, OpAMD64ORL, y.Type) 17580 v0.AddArg(y) 17581 v1 := b.NewValue0(v.Line, OpAMD64NOTL, y.Type) 17582 v2 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, y.Type) 17583 v3 := b.NewValue0(v.Line, OpAMD64CMPLconst, TypeFlags) 17584 v3.AuxInt = 64 17585 v3.AddArg(y) 17586 v2.AddArg(v3) 17587 v1.AddArg(v2) 17588 v0.AddArg(v1) 17589 v.AddArg(v0) 17590 return true 17591 } 17592 } 17593 func rewriteValueAMD64_OpRsh64x64(v *Value, config *Config) bool { 17594 b := v.Block 17595 _ = b 17596 // match: (Rsh64x64 <t> x y) 17597 // cond: 17598 // result: (SARQ <t> x (ORQ <y.Type> y (NOTQ <y.Type> (SBBQcarrymask <y.Type> (CMPQconst y [64]))))) 17599 for { 17600 t := v.Type 17601 x := v.Args[0] 17602 y := v.Args[1] 17603 v.reset(OpAMD64SARQ) 17604 v.Type = t 17605 v.AddArg(x) 17606 v0 := b.NewValue0(v.Line, OpAMD64ORQ, y.Type) 17607 v0.AddArg(y) 17608 v1 := b.NewValue0(v.Line, OpAMD64NOTQ, y.Type) 17609 v2 := b.NewValue0(v.Line, OpAMD64SBBQcarrymask, y.Type) 17610 v3 := b.NewValue0(v.Line, OpAMD64CMPQconst, TypeFlags) 17611 v3.AuxInt = 64 17612 v3.AddArg(y) 17613 v2.AddArg(v3) 17614 v1.AddArg(v2) 17615 v0.AddArg(v1) 17616 v.AddArg(v0) 17617 return true 17618 } 17619 } 17620 func rewriteValueAMD64_OpRsh64x8(v *Value, config *Config) bool { 17621 b := v.Block 17622 _ = b 17623 // match: (Rsh64x8 <t> x y) 17624 // cond: 17625 // result: (SARQ <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPBconst y [64]))))) 17626 for { 17627 t := v.Type 17628 x := v.Args[0] 17629 y := v.Args[1] 17630 v.reset(OpAMD64SARQ) 17631 v.Type = t 17632 v.AddArg(x) 17633 v0 := b.NewValue0(v.Line, OpAMD64ORL, y.Type) 17634 v0.AddArg(y) 17635 v1 := b.NewValue0(v.Line, OpAMD64NOTL, y.Type) 17636 v2 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, y.Type) 17637 v3 := b.NewValue0(v.Line, OpAMD64CMPBconst, TypeFlags) 17638 v3.AuxInt = 64 17639 v3.AddArg(y) 17640 v2.AddArg(v3) 17641 v1.AddArg(v2) 17642 v0.AddArg(v1) 17643 v.AddArg(v0) 17644 return true 17645 } 17646 } 17647 func rewriteValueAMD64_OpRsh8Ux16(v *Value, config *Config) bool { 17648 b := v.Block 17649 _ = b 17650 // match: (Rsh8Ux16 <t> x y) 17651 // cond: 17652 // result: (ANDL (SHRB <t> x y) (SBBLcarrymask <t> (CMPWconst y [8]))) 17653 for { 17654 t := v.Type 17655 x := v.Args[0] 17656 y := v.Args[1] 17657 v.reset(OpAMD64ANDL) 17658 v0 := b.NewValue0(v.Line, OpAMD64SHRB, t) 17659 v0.AddArg(x) 17660 v0.AddArg(y) 17661 v.AddArg(v0) 17662 v1 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, t) 17663 v2 := b.NewValue0(v.Line, OpAMD64CMPWconst, TypeFlags) 17664 v2.AuxInt = 8 17665 v2.AddArg(y) 17666 v1.AddArg(v2) 17667 v.AddArg(v1) 17668 return true 17669 } 17670 } 17671 func rewriteValueAMD64_OpRsh8Ux32(v *Value, config *Config) bool { 17672 b := v.Block 17673 _ = b 17674 // match: (Rsh8Ux32 <t> x y) 17675 // cond: 17676 // result: (ANDL (SHRB <t> x y) (SBBLcarrymask <t> (CMPLconst y [8]))) 17677 for { 17678 t := v.Type 17679 x := v.Args[0] 17680 y := v.Args[1] 17681 v.reset(OpAMD64ANDL) 17682 v0 := b.NewValue0(v.Line, OpAMD64SHRB, t) 17683 v0.AddArg(x) 17684 v0.AddArg(y) 17685 v.AddArg(v0) 17686 v1 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, t) 17687 v2 := b.NewValue0(v.Line, OpAMD64CMPLconst, TypeFlags) 17688 v2.AuxInt = 8 17689 v2.AddArg(y) 17690 v1.AddArg(v2) 17691 v.AddArg(v1) 17692 return true 17693 } 17694 } 17695 func rewriteValueAMD64_OpRsh8Ux64(v *Value, config *Config) bool { 17696 b := v.Block 17697 _ = b 17698 // match: (Rsh8Ux64 <t> x y) 17699 // cond: 17700 // result: (ANDL (SHRB <t> x y) (SBBLcarrymask <t> (CMPQconst y [8]))) 17701 for { 17702 t := v.Type 17703 x := v.Args[0] 17704 y := v.Args[1] 17705 v.reset(OpAMD64ANDL) 17706 v0 := b.NewValue0(v.Line, OpAMD64SHRB, t) 17707 v0.AddArg(x) 17708 v0.AddArg(y) 17709 v.AddArg(v0) 17710 v1 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, t) 17711 v2 := b.NewValue0(v.Line, OpAMD64CMPQconst, TypeFlags) 17712 v2.AuxInt = 8 17713 v2.AddArg(y) 17714 v1.AddArg(v2) 17715 v.AddArg(v1) 17716 return true 17717 } 17718 } 17719 func rewriteValueAMD64_OpRsh8Ux8(v *Value, config *Config) bool { 17720 b := v.Block 17721 _ = b 17722 // match: (Rsh8Ux8 <t> x y) 17723 // cond: 17724 // result: (ANDL (SHRB <t> x y) (SBBLcarrymask <t> (CMPBconst y [8]))) 17725 for { 17726 t := v.Type 17727 x := v.Args[0] 17728 y := v.Args[1] 17729 v.reset(OpAMD64ANDL) 17730 v0 := b.NewValue0(v.Line, OpAMD64SHRB, t) 17731 v0.AddArg(x) 17732 v0.AddArg(y) 17733 v.AddArg(v0) 17734 v1 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, t) 17735 v2 := b.NewValue0(v.Line, OpAMD64CMPBconst, TypeFlags) 17736 v2.AuxInt = 8 17737 v2.AddArg(y) 17738 v1.AddArg(v2) 17739 v.AddArg(v1) 17740 return true 17741 } 17742 } 17743 func rewriteValueAMD64_OpRsh8x16(v *Value, config *Config) bool { 17744 b := v.Block 17745 _ = b 17746 // match: (Rsh8x16 <t> x y) 17747 // cond: 17748 // result: (SARB <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPWconst y [8]))))) 17749 for { 17750 t := v.Type 17751 x := v.Args[0] 17752 y := v.Args[1] 17753 v.reset(OpAMD64SARB) 17754 v.Type = t 17755 v.AddArg(x) 17756 v0 := b.NewValue0(v.Line, OpAMD64ORL, y.Type) 17757 v0.AddArg(y) 17758 v1 := b.NewValue0(v.Line, OpAMD64NOTL, y.Type) 17759 v2 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, y.Type) 17760 v3 := b.NewValue0(v.Line, OpAMD64CMPWconst, TypeFlags) 17761 v3.AuxInt = 8 17762 v3.AddArg(y) 17763 v2.AddArg(v3) 17764 v1.AddArg(v2) 17765 v0.AddArg(v1) 17766 v.AddArg(v0) 17767 return true 17768 } 17769 } 17770 func rewriteValueAMD64_OpRsh8x32(v *Value, config *Config) bool { 17771 b := v.Block 17772 _ = b 17773 // match: (Rsh8x32 <t> x y) 17774 // cond: 17775 // result: (SARB <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPLconst y [8]))))) 17776 for { 17777 t := v.Type 17778 x := v.Args[0] 17779 y := v.Args[1] 17780 v.reset(OpAMD64SARB) 17781 v.Type = t 17782 v.AddArg(x) 17783 v0 := b.NewValue0(v.Line, OpAMD64ORL, y.Type) 17784 v0.AddArg(y) 17785 v1 := b.NewValue0(v.Line, OpAMD64NOTL, y.Type) 17786 v2 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, y.Type) 17787 v3 := b.NewValue0(v.Line, OpAMD64CMPLconst, TypeFlags) 17788 v3.AuxInt = 8 17789 v3.AddArg(y) 17790 v2.AddArg(v3) 17791 v1.AddArg(v2) 17792 v0.AddArg(v1) 17793 v.AddArg(v0) 17794 return true 17795 } 17796 } 17797 func rewriteValueAMD64_OpRsh8x64(v *Value, config *Config) bool { 17798 b := v.Block 17799 _ = b 17800 // match: (Rsh8x64 <t> x y) 17801 // cond: 17802 // result: (SARB <t> x (ORQ <y.Type> y (NOTQ <y.Type> (SBBQcarrymask <y.Type> (CMPQconst y [8]))))) 17803 for { 17804 t := v.Type 17805 x := v.Args[0] 17806 y := v.Args[1] 17807 v.reset(OpAMD64SARB) 17808 v.Type = t 17809 v.AddArg(x) 17810 v0 := b.NewValue0(v.Line, OpAMD64ORQ, y.Type) 17811 v0.AddArg(y) 17812 v1 := b.NewValue0(v.Line, OpAMD64NOTQ, y.Type) 17813 v2 := b.NewValue0(v.Line, OpAMD64SBBQcarrymask, y.Type) 17814 v3 := b.NewValue0(v.Line, OpAMD64CMPQconst, TypeFlags) 17815 v3.AuxInt = 8 17816 v3.AddArg(y) 17817 v2.AddArg(v3) 17818 v1.AddArg(v2) 17819 v0.AddArg(v1) 17820 v.AddArg(v0) 17821 return true 17822 } 17823 } 17824 func rewriteValueAMD64_OpRsh8x8(v *Value, config *Config) bool { 17825 b := v.Block 17826 _ = b 17827 // match: (Rsh8x8 <t> x y) 17828 // cond: 17829 // result: (SARB <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPBconst y [8]))))) 17830 for { 17831 t := v.Type 17832 x := v.Args[0] 17833 y := v.Args[1] 17834 v.reset(OpAMD64SARB) 17835 v.Type = t 17836 v.AddArg(x) 17837 v0 := b.NewValue0(v.Line, OpAMD64ORL, y.Type) 17838 v0.AddArg(y) 17839 v1 := b.NewValue0(v.Line, OpAMD64NOTL, y.Type) 17840 v2 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, y.Type) 17841 v3 := b.NewValue0(v.Line, OpAMD64CMPBconst, TypeFlags) 17842 v3.AuxInt = 8 17843 v3.AddArg(y) 17844 v2.AddArg(v3) 17845 v1.AddArg(v2) 17846 v0.AddArg(v1) 17847 v.AddArg(v0) 17848 return true 17849 } 17850 } 17851 func rewriteValueAMD64_OpSelect0(v *Value, config *Config) bool { 17852 b := v.Block 17853 _ = b 17854 // match: (Select0 <t> (AddTupleFirst32 tuple val)) 17855 // cond: 17856 // result: (ADDL val (Select0 <t> tuple)) 17857 for { 17858 t := v.Type 17859 v_0 := v.Args[0] 17860 if v_0.Op != OpAMD64AddTupleFirst32 { 17861 break 17862 } 17863 tuple := v_0.Args[0] 17864 val := v_0.Args[1] 17865 v.reset(OpAMD64ADDL) 17866 v.AddArg(val) 17867 v0 := b.NewValue0(v.Line, OpSelect0, t) 17868 v0.AddArg(tuple) 17869 v.AddArg(v0) 17870 return true 17871 } 17872 // match: (Select0 <t> (AddTupleFirst64 tuple val)) 17873 // cond: 17874 // result: (ADDQ val (Select0 <t> tuple)) 17875 for { 17876 t := v.Type 17877 v_0 := v.Args[0] 17878 if v_0.Op != OpAMD64AddTupleFirst64 { 17879 break 17880 } 17881 tuple := v_0.Args[0] 17882 val := v_0.Args[1] 17883 v.reset(OpAMD64ADDQ) 17884 v.AddArg(val) 17885 v0 := b.NewValue0(v.Line, OpSelect0, t) 17886 v0.AddArg(tuple) 17887 v.AddArg(v0) 17888 return true 17889 } 17890 return false 17891 } 17892 func rewriteValueAMD64_OpSelect1(v *Value, config *Config) bool { 17893 b := v.Block 17894 _ = b 17895 // match: (Select1 (AddTupleFirst32 tuple _ )) 17896 // cond: 17897 // result: (Select1 tuple) 17898 for { 17899 v_0 := v.Args[0] 17900 if v_0.Op != OpAMD64AddTupleFirst32 { 17901 break 17902 } 17903 tuple := v_0.Args[0] 17904 v.reset(OpSelect1) 17905 v.AddArg(tuple) 17906 return true 17907 } 17908 // match: (Select1 (AddTupleFirst64 tuple _ )) 17909 // cond: 17910 // result: (Select1 tuple) 17911 for { 17912 v_0 := v.Args[0] 17913 if v_0.Op != OpAMD64AddTupleFirst64 { 17914 break 17915 } 17916 tuple := v_0.Args[0] 17917 v.reset(OpSelect1) 17918 v.AddArg(tuple) 17919 return true 17920 } 17921 return false 17922 } 17923 func rewriteValueAMD64_OpSignExt16to32(v *Value, config *Config) bool { 17924 b := v.Block 17925 _ = b 17926 // match: (SignExt16to32 x) 17927 // cond: 17928 // result: (MOVWQSX x) 17929 for { 17930 x := v.Args[0] 17931 v.reset(OpAMD64MOVWQSX) 17932 v.AddArg(x) 17933 return true 17934 } 17935 } 17936 func rewriteValueAMD64_OpSignExt16to64(v *Value, config *Config) bool { 17937 b := v.Block 17938 _ = b 17939 // match: (SignExt16to64 x) 17940 // cond: 17941 // result: (MOVWQSX x) 17942 for { 17943 x := v.Args[0] 17944 v.reset(OpAMD64MOVWQSX) 17945 v.AddArg(x) 17946 return true 17947 } 17948 } 17949 func rewriteValueAMD64_OpSignExt32to64(v *Value, config *Config) bool { 17950 b := v.Block 17951 _ = b 17952 // match: (SignExt32to64 x) 17953 // cond: 17954 // result: (MOVLQSX x) 17955 for { 17956 x := v.Args[0] 17957 v.reset(OpAMD64MOVLQSX) 17958 v.AddArg(x) 17959 return true 17960 } 17961 } 17962 func rewriteValueAMD64_OpSignExt8to16(v *Value, config *Config) bool { 17963 b := v.Block 17964 _ = b 17965 // match: (SignExt8to16 x) 17966 // cond: 17967 // result: (MOVBQSX x) 17968 for { 17969 x := v.Args[0] 17970 v.reset(OpAMD64MOVBQSX) 17971 v.AddArg(x) 17972 return true 17973 } 17974 } 17975 func rewriteValueAMD64_OpSignExt8to32(v *Value, config *Config) bool { 17976 b := v.Block 17977 _ = b 17978 // match: (SignExt8to32 x) 17979 // cond: 17980 // result: (MOVBQSX x) 17981 for { 17982 x := v.Args[0] 17983 v.reset(OpAMD64MOVBQSX) 17984 v.AddArg(x) 17985 return true 17986 } 17987 } 17988 func rewriteValueAMD64_OpSignExt8to64(v *Value, config *Config) bool { 17989 b := v.Block 17990 _ = b 17991 // match: (SignExt8to64 x) 17992 // cond: 17993 // result: (MOVBQSX x) 17994 for { 17995 x := v.Args[0] 17996 v.reset(OpAMD64MOVBQSX) 17997 v.AddArg(x) 17998 return true 17999 } 18000 } 18001 func rewriteValueAMD64_OpSqrt(v *Value, config *Config) bool { 18002 b := v.Block 18003 _ = b 18004 // match: (Sqrt x) 18005 // cond: 18006 // result: (SQRTSD x) 18007 for { 18008 x := v.Args[0] 18009 v.reset(OpAMD64SQRTSD) 18010 v.AddArg(x) 18011 return true 18012 } 18013 } 18014 func rewriteValueAMD64_OpStaticCall(v *Value, config *Config) bool { 18015 b := v.Block 18016 _ = b 18017 // match: (StaticCall [argwid] {target} mem) 18018 // cond: 18019 // result: (CALLstatic [argwid] {target} mem) 18020 for { 18021 argwid := v.AuxInt 18022 target := v.Aux 18023 mem := v.Args[0] 18024 v.reset(OpAMD64CALLstatic) 18025 v.AuxInt = argwid 18026 v.Aux = target 18027 v.AddArg(mem) 18028 return true 18029 } 18030 } 18031 func rewriteValueAMD64_OpStore(v *Value, config *Config) bool { 18032 b := v.Block 18033 _ = b 18034 // match: (Store [8] ptr val mem) 18035 // cond: is64BitFloat(val.Type) 18036 // result: (MOVSDstore ptr val mem) 18037 for { 18038 if v.AuxInt != 8 { 18039 break 18040 } 18041 ptr := v.Args[0] 18042 val := v.Args[1] 18043 mem := v.Args[2] 18044 if !(is64BitFloat(val.Type)) { 18045 break 18046 } 18047 v.reset(OpAMD64MOVSDstore) 18048 v.AddArg(ptr) 18049 v.AddArg(val) 18050 v.AddArg(mem) 18051 return true 18052 } 18053 // match: (Store [4] ptr val mem) 18054 // cond: is32BitFloat(val.Type) 18055 // result: (MOVSSstore ptr val mem) 18056 for { 18057 if v.AuxInt != 4 { 18058 break 18059 } 18060 ptr := v.Args[0] 18061 val := v.Args[1] 18062 mem := v.Args[2] 18063 if !(is32BitFloat(val.Type)) { 18064 break 18065 } 18066 v.reset(OpAMD64MOVSSstore) 18067 v.AddArg(ptr) 18068 v.AddArg(val) 18069 v.AddArg(mem) 18070 return true 18071 } 18072 // match: (Store [8] ptr val mem) 18073 // cond: 18074 // result: (MOVQstore ptr val mem) 18075 for { 18076 if v.AuxInt != 8 { 18077 break 18078 } 18079 ptr := v.Args[0] 18080 val := v.Args[1] 18081 mem := v.Args[2] 18082 v.reset(OpAMD64MOVQstore) 18083 v.AddArg(ptr) 18084 v.AddArg(val) 18085 v.AddArg(mem) 18086 return true 18087 } 18088 // match: (Store [4] ptr val mem) 18089 // cond: 18090 // result: (MOVLstore ptr val mem) 18091 for { 18092 if v.AuxInt != 4 { 18093 break 18094 } 18095 ptr := v.Args[0] 18096 val := v.Args[1] 18097 mem := v.Args[2] 18098 v.reset(OpAMD64MOVLstore) 18099 v.AddArg(ptr) 18100 v.AddArg(val) 18101 v.AddArg(mem) 18102 return true 18103 } 18104 // match: (Store [2] ptr val mem) 18105 // cond: 18106 // result: (MOVWstore ptr val mem) 18107 for { 18108 if v.AuxInt != 2 { 18109 break 18110 } 18111 ptr := v.Args[0] 18112 val := v.Args[1] 18113 mem := v.Args[2] 18114 v.reset(OpAMD64MOVWstore) 18115 v.AddArg(ptr) 18116 v.AddArg(val) 18117 v.AddArg(mem) 18118 return true 18119 } 18120 // match: (Store [1] ptr val mem) 18121 // cond: 18122 // result: (MOVBstore ptr val mem) 18123 for { 18124 if v.AuxInt != 1 { 18125 break 18126 } 18127 ptr := v.Args[0] 18128 val := v.Args[1] 18129 mem := v.Args[2] 18130 v.reset(OpAMD64MOVBstore) 18131 v.AddArg(ptr) 18132 v.AddArg(val) 18133 v.AddArg(mem) 18134 return true 18135 } 18136 return false 18137 } 18138 func rewriteValueAMD64_OpSub16(v *Value, config *Config) bool { 18139 b := v.Block 18140 _ = b 18141 // match: (Sub16 x y) 18142 // cond: 18143 // result: (SUBL x y) 18144 for { 18145 x := v.Args[0] 18146 y := v.Args[1] 18147 v.reset(OpAMD64SUBL) 18148 v.AddArg(x) 18149 v.AddArg(y) 18150 return true 18151 } 18152 } 18153 func rewriteValueAMD64_OpSub32(v *Value, config *Config) bool { 18154 b := v.Block 18155 _ = b 18156 // match: (Sub32 x y) 18157 // cond: 18158 // result: (SUBL x y) 18159 for { 18160 x := v.Args[0] 18161 y := v.Args[1] 18162 v.reset(OpAMD64SUBL) 18163 v.AddArg(x) 18164 v.AddArg(y) 18165 return true 18166 } 18167 } 18168 func rewriteValueAMD64_OpSub32F(v *Value, config *Config) bool { 18169 b := v.Block 18170 _ = b 18171 // match: (Sub32F x y) 18172 // cond: 18173 // result: (SUBSS x y) 18174 for { 18175 x := v.Args[0] 18176 y := v.Args[1] 18177 v.reset(OpAMD64SUBSS) 18178 v.AddArg(x) 18179 v.AddArg(y) 18180 return true 18181 } 18182 } 18183 func rewriteValueAMD64_OpSub64(v *Value, config *Config) bool { 18184 b := v.Block 18185 _ = b 18186 // match: (Sub64 x y) 18187 // cond: 18188 // result: (SUBQ x y) 18189 for { 18190 x := v.Args[0] 18191 y := v.Args[1] 18192 v.reset(OpAMD64SUBQ) 18193 v.AddArg(x) 18194 v.AddArg(y) 18195 return true 18196 } 18197 } 18198 func rewriteValueAMD64_OpSub64F(v *Value, config *Config) bool { 18199 b := v.Block 18200 _ = b 18201 // match: (Sub64F x y) 18202 // cond: 18203 // result: (SUBSD x y) 18204 for { 18205 x := v.Args[0] 18206 y := v.Args[1] 18207 v.reset(OpAMD64SUBSD) 18208 v.AddArg(x) 18209 v.AddArg(y) 18210 return true 18211 } 18212 } 18213 func rewriteValueAMD64_OpSub8(v *Value, config *Config) bool { 18214 b := v.Block 18215 _ = b 18216 // match: (Sub8 x y) 18217 // cond: 18218 // result: (SUBL x y) 18219 for { 18220 x := v.Args[0] 18221 y := v.Args[1] 18222 v.reset(OpAMD64SUBL) 18223 v.AddArg(x) 18224 v.AddArg(y) 18225 return true 18226 } 18227 } 18228 func rewriteValueAMD64_OpSubPtr(v *Value, config *Config) bool { 18229 b := v.Block 18230 _ = b 18231 // match: (SubPtr x y) 18232 // cond: config.PtrSize == 8 18233 // result: (SUBQ x y) 18234 for { 18235 x := v.Args[0] 18236 y := v.Args[1] 18237 if !(config.PtrSize == 8) { 18238 break 18239 } 18240 v.reset(OpAMD64SUBQ) 18241 v.AddArg(x) 18242 v.AddArg(y) 18243 return true 18244 } 18245 // match: (SubPtr x y) 18246 // cond: config.PtrSize == 4 18247 // result: (SUBL x y) 18248 for { 18249 x := v.Args[0] 18250 y := v.Args[1] 18251 if !(config.PtrSize == 4) { 18252 break 18253 } 18254 v.reset(OpAMD64SUBL) 18255 v.AddArg(x) 18256 v.AddArg(y) 18257 return true 18258 } 18259 return false 18260 } 18261 func rewriteValueAMD64_OpTrunc16to8(v *Value, config *Config) bool { 18262 b := v.Block 18263 _ = b 18264 // match: (Trunc16to8 x) 18265 // cond: 18266 // result: x 18267 for { 18268 x := v.Args[0] 18269 v.reset(OpCopy) 18270 v.Type = x.Type 18271 v.AddArg(x) 18272 return true 18273 } 18274 } 18275 func rewriteValueAMD64_OpTrunc32to16(v *Value, config *Config) bool { 18276 b := v.Block 18277 _ = b 18278 // match: (Trunc32to16 x) 18279 // cond: 18280 // result: x 18281 for { 18282 x := v.Args[0] 18283 v.reset(OpCopy) 18284 v.Type = x.Type 18285 v.AddArg(x) 18286 return true 18287 } 18288 } 18289 func rewriteValueAMD64_OpTrunc32to8(v *Value, config *Config) bool { 18290 b := v.Block 18291 _ = b 18292 // match: (Trunc32to8 x) 18293 // cond: 18294 // result: x 18295 for { 18296 x := v.Args[0] 18297 v.reset(OpCopy) 18298 v.Type = x.Type 18299 v.AddArg(x) 18300 return true 18301 } 18302 } 18303 func rewriteValueAMD64_OpTrunc64to16(v *Value, config *Config) bool { 18304 b := v.Block 18305 _ = b 18306 // match: (Trunc64to16 x) 18307 // cond: 18308 // result: x 18309 for { 18310 x := v.Args[0] 18311 v.reset(OpCopy) 18312 v.Type = x.Type 18313 v.AddArg(x) 18314 return true 18315 } 18316 } 18317 func rewriteValueAMD64_OpTrunc64to32(v *Value, config *Config) bool { 18318 b := v.Block 18319 _ = b 18320 // match: (Trunc64to32 x) 18321 // cond: 18322 // result: x 18323 for { 18324 x := v.Args[0] 18325 v.reset(OpCopy) 18326 v.Type = x.Type 18327 v.AddArg(x) 18328 return true 18329 } 18330 } 18331 func rewriteValueAMD64_OpTrunc64to8(v *Value, config *Config) bool { 18332 b := v.Block 18333 _ = b 18334 // match: (Trunc64to8 x) 18335 // cond: 18336 // result: x 18337 for { 18338 x := v.Args[0] 18339 v.reset(OpCopy) 18340 v.Type = x.Type 18341 v.AddArg(x) 18342 return true 18343 } 18344 } 18345 func rewriteValueAMD64_OpXor16(v *Value, config *Config) bool { 18346 b := v.Block 18347 _ = b 18348 // match: (Xor16 x y) 18349 // cond: 18350 // result: (XORL x y) 18351 for { 18352 x := v.Args[0] 18353 y := v.Args[1] 18354 v.reset(OpAMD64XORL) 18355 v.AddArg(x) 18356 v.AddArg(y) 18357 return true 18358 } 18359 } 18360 func rewriteValueAMD64_OpXor32(v *Value, config *Config) bool { 18361 b := v.Block 18362 _ = b 18363 // match: (Xor32 x y) 18364 // cond: 18365 // result: (XORL x y) 18366 for { 18367 x := v.Args[0] 18368 y := v.Args[1] 18369 v.reset(OpAMD64XORL) 18370 v.AddArg(x) 18371 v.AddArg(y) 18372 return true 18373 } 18374 } 18375 func rewriteValueAMD64_OpXor64(v *Value, config *Config) bool { 18376 b := v.Block 18377 _ = b 18378 // match: (Xor64 x y) 18379 // cond: 18380 // result: (XORQ x y) 18381 for { 18382 x := v.Args[0] 18383 y := v.Args[1] 18384 v.reset(OpAMD64XORQ) 18385 v.AddArg(x) 18386 v.AddArg(y) 18387 return true 18388 } 18389 } 18390 func rewriteValueAMD64_OpXor8(v *Value, config *Config) bool { 18391 b := v.Block 18392 _ = b 18393 // match: (Xor8 x y) 18394 // cond: 18395 // result: (XORL x y) 18396 for { 18397 x := v.Args[0] 18398 y := v.Args[1] 18399 v.reset(OpAMD64XORL) 18400 v.AddArg(x) 18401 v.AddArg(y) 18402 return true 18403 } 18404 } 18405 func rewriteValueAMD64_OpZero(v *Value, config *Config) bool { 18406 b := v.Block 18407 _ = b 18408 // match: (Zero [s] _ mem) 18409 // cond: SizeAndAlign(s).Size() == 0 18410 // result: mem 18411 for { 18412 s := v.AuxInt 18413 mem := v.Args[1] 18414 if !(SizeAndAlign(s).Size() == 0) { 18415 break 18416 } 18417 v.reset(OpCopy) 18418 v.Type = mem.Type 18419 v.AddArg(mem) 18420 return true 18421 } 18422 // match: (Zero [s] destptr mem) 18423 // cond: SizeAndAlign(s).Size() == 1 18424 // result: (MOVBstoreconst [0] destptr mem) 18425 for { 18426 s := v.AuxInt 18427 destptr := v.Args[0] 18428 mem := v.Args[1] 18429 if !(SizeAndAlign(s).Size() == 1) { 18430 break 18431 } 18432 v.reset(OpAMD64MOVBstoreconst) 18433 v.AuxInt = 0 18434 v.AddArg(destptr) 18435 v.AddArg(mem) 18436 return true 18437 } 18438 // match: (Zero [s] destptr mem) 18439 // cond: SizeAndAlign(s).Size() == 2 18440 // result: (MOVWstoreconst [0] destptr mem) 18441 for { 18442 s := v.AuxInt 18443 destptr := v.Args[0] 18444 mem := v.Args[1] 18445 if !(SizeAndAlign(s).Size() == 2) { 18446 break 18447 } 18448 v.reset(OpAMD64MOVWstoreconst) 18449 v.AuxInt = 0 18450 v.AddArg(destptr) 18451 v.AddArg(mem) 18452 return true 18453 } 18454 // match: (Zero [s] destptr mem) 18455 // cond: SizeAndAlign(s).Size() == 4 18456 // result: (MOVLstoreconst [0] destptr mem) 18457 for { 18458 s := v.AuxInt 18459 destptr := v.Args[0] 18460 mem := v.Args[1] 18461 if !(SizeAndAlign(s).Size() == 4) { 18462 break 18463 } 18464 v.reset(OpAMD64MOVLstoreconst) 18465 v.AuxInt = 0 18466 v.AddArg(destptr) 18467 v.AddArg(mem) 18468 return true 18469 } 18470 // match: (Zero [s] destptr mem) 18471 // cond: SizeAndAlign(s).Size() == 8 18472 // result: (MOVQstoreconst [0] destptr mem) 18473 for { 18474 s := v.AuxInt 18475 destptr := v.Args[0] 18476 mem := v.Args[1] 18477 if !(SizeAndAlign(s).Size() == 8) { 18478 break 18479 } 18480 v.reset(OpAMD64MOVQstoreconst) 18481 v.AuxInt = 0 18482 v.AddArg(destptr) 18483 v.AddArg(mem) 18484 return true 18485 } 18486 // match: (Zero [s] destptr mem) 18487 // cond: SizeAndAlign(s).Size() == 3 18488 // result: (MOVBstoreconst [makeValAndOff(0,2)] destptr (MOVWstoreconst [0] destptr mem)) 18489 for { 18490 s := v.AuxInt 18491 destptr := v.Args[0] 18492 mem := v.Args[1] 18493 if !(SizeAndAlign(s).Size() == 3) { 18494 break 18495 } 18496 v.reset(OpAMD64MOVBstoreconst) 18497 v.AuxInt = makeValAndOff(0, 2) 18498 v.AddArg(destptr) 18499 v0 := b.NewValue0(v.Line, OpAMD64MOVWstoreconst, TypeMem) 18500 v0.AuxInt = 0 18501 v0.AddArg(destptr) 18502 v0.AddArg(mem) 18503 v.AddArg(v0) 18504 return true 18505 } 18506 // match: (Zero [s] destptr mem) 18507 // cond: SizeAndAlign(s).Size() == 5 18508 // result: (MOVBstoreconst [makeValAndOff(0,4)] destptr (MOVLstoreconst [0] destptr mem)) 18509 for { 18510 s := v.AuxInt 18511 destptr := v.Args[0] 18512 mem := v.Args[1] 18513 if !(SizeAndAlign(s).Size() == 5) { 18514 break 18515 } 18516 v.reset(OpAMD64MOVBstoreconst) 18517 v.AuxInt = makeValAndOff(0, 4) 18518 v.AddArg(destptr) 18519 v0 := b.NewValue0(v.Line, OpAMD64MOVLstoreconst, TypeMem) 18520 v0.AuxInt = 0 18521 v0.AddArg(destptr) 18522 v0.AddArg(mem) 18523 v.AddArg(v0) 18524 return true 18525 } 18526 // match: (Zero [s] destptr mem) 18527 // cond: SizeAndAlign(s).Size() == 6 18528 // result: (MOVWstoreconst [makeValAndOff(0,4)] destptr (MOVLstoreconst [0] destptr mem)) 18529 for { 18530 s := v.AuxInt 18531 destptr := v.Args[0] 18532 mem := v.Args[1] 18533 if !(SizeAndAlign(s).Size() == 6) { 18534 break 18535 } 18536 v.reset(OpAMD64MOVWstoreconst) 18537 v.AuxInt = makeValAndOff(0, 4) 18538 v.AddArg(destptr) 18539 v0 := b.NewValue0(v.Line, OpAMD64MOVLstoreconst, TypeMem) 18540 v0.AuxInt = 0 18541 v0.AddArg(destptr) 18542 v0.AddArg(mem) 18543 v.AddArg(v0) 18544 return true 18545 } 18546 // match: (Zero [s] destptr mem) 18547 // cond: SizeAndAlign(s).Size() == 7 18548 // result: (MOVLstoreconst [makeValAndOff(0,3)] destptr (MOVLstoreconst [0] destptr mem)) 18549 for { 18550 s := v.AuxInt 18551 destptr := v.Args[0] 18552 mem := v.Args[1] 18553 if !(SizeAndAlign(s).Size() == 7) { 18554 break 18555 } 18556 v.reset(OpAMD64MOVLstoreconst) 18557 v.AuxInt = makeValAndOff(0, 3) 18558 v.AddArg(destptr) 18559 v0 := b.NewValue0(v.Line, OpAMD64MOVLstoreconst, TypeMem) 18560 v0.AuxInt = 0 18561 v0.AddArg(destptr) 18562 v0.AddArg(mem) 18563 v.AddArg(v0) 18564 return true 18565 } 18566 // match: (Zero [s] destptr mem) 18567 // cond: SizeAndAlign(s).Size()%8 != 0 && SizeAndAlign(s).Size() > 8 18568 // result: (Zero [SizeAndAlign(s).Size()-SizeAndAlign(s).Size()%8] (OffPtr <destptr.Type> destptr [SizeAndAlign(s).Size()%8]) (MOVQstoreconst [0] destptr mem)) 18569 for { 18570 s := v.AuxInt 18571 destptr := v.Args[0] 18572 mem := v.Args[1] 18573 if !(SizeAndAlign(s).Size()%8 != 0 && SizeAndAlign(s).Size() > 8) { 18574 break 18575 } 18576 v.reset(OpZero) 18577 v.AuxInt = SizeAndAlign(s).Size() - SizeAndAlign(s).Size()%8 18578 v0 := b.NewValue0(v.Line, OpOffPtr, destptr.Type) 18579 v0.AuxInt = SizeAndAlign(s).Size() % 8 18580 v0.AddArg(destptr) 18581 v.AddArg(v0) 18582 v1 := b.NewValue0(v.Line, OpAMD64MOVQstoreconst, TypeMem) 18583 v1.AuxInt = 0 18584 v1.AddArg(destptr) 18585 v1.AddArg(mem) 18586 v.AddArg(v1) 18587 return true 18588 } 18589 // match: (Zero [s] destptr mem) 18590 // cond: SizeAndAlign(s).Size() == 16 18591 // result: (MOVQstoreconst [makeValAndOff(0,8)] destptr (MOVQstoreconst [0] destptr mem)) 18592 for { 18593 s := v.AuxInt 18594 destptr := v.Args[0] 18595 mem := v.Args[1] 18596 if !(SizeAndAlign(s).Size() == 16) { 18597 break 18598 } 18599 v.reset(OpAMD64MOVQstoreconst) 18600 v.AuxInt = makeValAndOff(0, 8) 18601 v.AddArg(destptr) 18602 v0 := b.NewValue0(v.Line, OpAMD64MOVQstoreconst, TypeMem) 18603 v0.AuxInt = 0 18604 v0.AddArg(destptr) 18605 v0.AddArg(mem) 18606 v.AddArg(v0) 18607 return true 18608 } 18609 // match: (Zero [s] destptr mem) 18610 // cond: SizeAndAlign(s).Size() == 24 18611 // result: (MOVQstoreconst [makeValAndOff(0,16)] destptr (MOVQstoreconst [makeValAndOff(0,8)] destptr (MOVQstoreconst [0] destptr mem))) 18612 for { 18613 s := v.AuxInt 18614 destptr := v.Args[0] 18615 mem := v.Args[1] 18616 if !(SizeAndAlign(s).Size() == 24) { 18617 break 18618 } 18619 v.reset(OpAMD64MOVQstoreconst) 18620 v.AuxInt = makeValAndOff(0, 16) 18621 v.AddArg(destptr) 18622 v0 := b.NewValue0(v.Line, OpAMD64MOVQstoreconst, TypeMem) 18623 v0.AuxInt = makeValAndOff(0, 8) 18624 v0.AddArg(destptr) 18625 v1 := b.NewValue0(v.Line, OpAMD64MOVQstoreconst, TypeMem) 18626 v1.AuxInt = 0 18627 v1.AddArg(destptr) 18628 v1.AddArg(mem) 18629 v0.AddArg(v1) 18630 v.AddArg(v0) 18631 return true 18632 } 18633 // match: (Zero [s] destptr mem) 18634 // cond: SizeAndAlign(s).Size() == 32 18635 // result: (MOVQstoreconst [makeValAndOff(0,24)] destptr (MOVQstoreconst [makeValAndOff(0,16)] destptr (MOVQstoreconst [makeValAndOff(0,8)] destptr (MOVQstoreconst [0] destptr mem)))) 18636 for { 18637 s := v.AuxInt 18638 destptr := v.Args[0] 18639 mem := v.Args[1] 18640 if !(SizeAndAlign(s).Size() == 32) { 18641 break 18642 } 18643 v.reset(OpAMD64MOVQstoreconst) 18644 v.AuxInt = makeValAndOff(0, 24) 18645 v.AddArg(destptr) 18646 v0 := b.NewValue0(v.Line, OpAMD64MOVQstoreconst, TypeMem) 18647 v0.AuxInt = makeValAndOff(0, 16) 18648 v0.AddArg(destptr) 18649 v1 := b.NewValue0(v.Line, OpAMD64MOVQstoreconst, TypeMem) 18650 v1.AuxInt = makeValAndOff(0, 8) 18651 v1.AddArg(destptr) 18652 v2 := b.NewValue0(v.Line, OpAMD64MOVQstoreconst, TypeMem) 18653 v2.AuxInt = 0 18654 v2.AddArg(destptr) 18655 v2.AddArg(mem) 18656 v1.AddArg(v2) 18657 v0.AddArg(v1) 18658 v.AddArg(v0) 18659 return true 18660 } 18661 // match: (Zero [s] destptr mem) 18662 // cond: SizeAndAlign(s).Size() <= 1024 && SizeAndAlign(s).Size()%8 == 0 && SizeAndAlign(s).Size()%16 != 0 && !config.noDuffDevice 18663 // result: (Zero [SizeAndAlign(s).Size()-8] (OffPtr <destptr.Type> [8] destptr) (MOVQstore destptr (MOVQconst [0]) mem)) 18664 for { 18665 s := v.AuxInt 18666 destptr := v.Args[0] 18667 mem := v.Args[1] 18668 if !(SizeAndAlign(s).Size() <= 1024 && SizeAndAlign(s).Size()%8 == 0 && SizeAndAlign(s).Size()%16 != 0 && !config.noDuffDevice) { 18669 break 18670 } 18671 v.reset(OpZero) 18672 v.AuxInt = SizeAndAlign(s).Size() - 8 18673 v0 := b.NewValue0(v.Line, OpOffPtr, destptr.Type) 18674 v0.AuxInt = 8 18675 v0.AddArg(destptr) 18676 v.AddArg(v0) 18677 v1 := b.NewValue0(v.Line, OpAMD64MOVQstore, TypeMem) 18678 v1.AddArg(destptr) 18679 v2 := b.NewValue0(v.Line, OpAMD64MOVQconst, config.fe.TypeUInt64()) 18680 v2.AuxInt = 0 18681 v1.AddArg(v2) 18682 v1.AddArg(mem) 18683 v.AddArg(v1) 18684 return true 18685 } 18686 // match: (Zero [s] destptr mem) 18687 // cond: SizeAndAlign(s).Size() <= 1024 && SizeAndAlign(s).Size()%16 == 0 && !config.noDuffDevice 18688 // result: (DUFFZERO [SizeAndAlign(s).Size()] destptr (MOVOconst [0]) mem) 18689 for { 18690 s := v.AuxInt 18691 destptr := v.Args[0] 18692 mem := v.Args[1] 18693 if !(SizeAndAlign(s).Size() <= 1024 && SizeAndAlign(s).Size()%16 == 0 && !config.noDuffDevice) { 18694 break 18695 } 18696 v.reset(OpAMD64DUFFZERO) 18697 v.AuxInt = SizeAndAlign(s).Size() 18698 v.AddArg(destptr) 18699 v0 := b.NewValue0(v.Line, OpAMD64MOVOconst, TypeInt128) 18700 v0.AuxInt = 0 18701 v.AddArg(v0) 18702 v.AddArg(mem) 18703 return true 18704 } 18705 // match: (Zero [s] destptr mem) 18706 // cond: (SizeAndAlign(s).Size() > 1024 || (config.noDuffDevice && SizeAndAlign(s).Size() > 32)) && SizeAndAlign(s).Size()%8 == 0 18707 // result: (REPSTOSQ destptr (MOVQconst [SizeAndAlign(s).Size()/8]) (MOVQconst [0]) mem) 18708 for { 18709 s := v.AuxInt 18710 destptr := v.Args[0] 18711 mem := v.Args[1] 18712 if !((SizeAndAlign(s).Size() > 1024 || (config.noDuffDevice && SizeAndAlign(s).Size() > 32)) && SizeAndAlign(s).Size()%8 == 0) { 18713 break 18714 } 18715 v.reset(OpAMD64REPSTOSQ) 18716 v.AddArg(destptr) 18717 v0 := b.NewValue0(v.Line, OpAMD64MOVQconst, config.fe.TypeUInt64()) 18718 v0.AuxInt = SizeAndAlign(s).Size() / 8 18719 v.AddArg(v0) 18720 v1 := b.NewValue0(v.Line, OpAMD64MOVQconst, config.fe.TypeUInt64()) 18721 v1.AuxInt = 0 18722 v.AddArg(v1) 18723 v.AddArg(mem) 18724 return true 18725 } 18726 return false 18727 } 18728 func rewriteValueAMD64_OpZeroExt16to32(v *Value, config *Config) bool { 18729 b := v.Block 18730 _ = b 18731 // match: (ZeroExt16to32 x) 18732 // cond: 18733 // result: (MOVWQZX x) 18734 for { 18735 x := v.Args[0] 18736 v.reset(OpAMD64MOVWQZX) 18737 v.AddArg(x) 18738 return true 18739 } 18740 } 18741 func rewriteValueAMD64_OpZeroExt16to64(v *Value, config *Config) bool { 18742 b := v.Block 18743 _ = b 18744 // match: (ZeroExt16to64 x) 18745 // cond: 18746 // result: (MOVWQZX x) 18747 for { 18748 x := v.Args[0] 18749 v.reset(OpAMD64MOVWQZX) 18750 v.AddArg(x) 18751 return true 18752 } 18753 } 18754 func rewriteValueAMD64_OpZeroExt32to64(v *Value, config *Config) bool { 18755 b := v.Block 18756 _ = b 18757 // match: (ZeroExt32to64 x) 18758 // cond: 18759 // result: (MOVLQZX x) 18760 for { 18761 x := v.Args[0] 18762 v.reset(OpAMD64MOVLQZX) 18763 v.AddArg(x) 18764 return true 18765 } 18766 } 18767 func rewriteValueAMD64_OpZeroExt8to16(v *Value, config *Config) bool { 18768 b := v.Block 18769 _ = b 18770 // match: (ZeroExt8to16 x) 18771 // cond: 18772 // result: (MOVBQZX x) 18773 for { 18774 x := v.Args[0] 18775 v.reset(OpAMD64MOVBQZX) 18776 v.AddArg(x) 18777 return true 18778 } 18779 } 18780 func rewriteValueAMD64_OpZeroExt8to32(v *Value, config *Config) bool { 18781 b := v.Block 18782 _ = b 18783 // match: (ZeroExt8to32 x) 18784 // cond: 18785 // result: (MOVBQZX x) 18786 for { 18787 x := v.Args[0] 18788 v.reset(OpAMD64MOVBQZX) 18789 v.AddArg(x) 18790 return true 18791 } 18792 } 18793 func rewriteValueAMD64_OpZeroExt8to64(v *Value, config *Config) bool { 18794 b := v.Block 18795 _ = b 18796 // match: (ZeroExt8to64 x) 18797 // cond: 18798 // result: (MOVBQZX x) 18799 for { 18800 x := v.Args[0] 18801 v.reset(OpAMD64MOVBQZX) 18802 v.AddArg(x) 18803 return true 18804 } 18805 } 18806 func rewriteBlockAMD64(b *Block) bool { 18807 switch b.Kind { 18808 case BlockAMD64EQ: 18809 // match: (EQ (InvertFlags cmp) yes no) 18810 // cond: 18811 // result: (EQ cmp yes no) 18812 for { 18813 v := b.Control 18814 if v.Op != OpAMD64InvertFlags { 18815 break 18816 } 18817 cmp := v.Args[0] 18818 yes := b.Succs[0] 18819 no := b.Succs[1] 18820 b.Kind = BlockAMD64EQ 18821 b.SetControl(cmp) 18822 _ = yes 18823 _ = no 18824 return true 18825 } 18826 // match: (EQ (FlagEQ) yes no) 18827 // cond: 18828 // result: (First nil yes no) 18829 for { 18830 v := b.Control 18831 if v.Op != OpAMD64FlagEQ { 18832 break 18833 } 18834 yes := b.Succs[0] 18835 no := b.Succs[1] 18836 b.Kind = BlockFirst 18837 b.SetControl(nil) 18838 _ = yes 18839 _ = no 18840 return true 18841 } 18842 // match: (EQ (FlagLT_ULT) yes no) 18843 // cond: 18844 // result: (First nil no yes) 18845 for { 18846 v := b.Control 18847 if v.Op != OpAMD64FlagLT_ULT { 18848 break 18849 } 18850 yes := b.Succs[0] 18851 no := b.Succs[1] 18852 b.Kind = BlockFirst 18853 b.SetControl(nil) 18854 b.swapSuccessors() 18855 _ = no 18856 _ = yes 18857 return true 18858 } 18859 // match: (EQ (FlagLT_UGT) yes no) 18860 // cond: 18861 // result: (First nil no yes) 18862 for { 18863 v := b.Control 18864 if v.Op != OpAMD64FlagLT_UGT { 18865 break 18866 } 18867 yes := b.Succs[0] 18868 no := b.Succs[1] 18869 b.Kind = BlockFirst 18870 b.SetControl(nil) 18871 b.swapSuccessors() 18872 _ = no 18873 _ = yes 18874 return true 18875 } 18876 // match: (EQ (FlagGT_ULT) yes no) 18877 // cond: 18878 // result: (First nil no yes) 18879 for { 18880 v := b.Control 18881 if v.Op != OpAMD64FlagGT_ULT { 18882 break 18883 } 18884 yes := b.Succs[0] 18885 no := b.Succs[1] 18886 b.Kind = BlockFirst 18887 b.SetControl(nil) 18888 b.swapSuccessors() 18889 _ = no 18890 _ = yes 18891 return true 18892 } 18893 // match: (EQ (FlagGT_UGT) yes no) 18894 // cond: 18895 // result: (First nil no yes) 18896 for { 18897 v := b.Control 18898 if v.Op != OpAMD64FlagGT_UGT { 18899 break 18900 } 18901 yes := b.Succs[0] 18902 no := b.Succs[1] 18903 b.Kind = BlockFirst 18904 b.SetControl(nil) 18905 b.swapSuccessors() 18906 _ = no 18907 _ = yes 18908 return true 18909 } 18910 case BlockAMD64GE: 18911 // match: (GE (InvertFlags cmp) yes no) 18912 // cond: 18913 // result: (LE cmp yes no) 18914 for { 18915 v := b.Control 18916 if v.Op != OpAMD64InvertFlags { 18917 break 18918 } 18919 cmp := v.Args[0] 18920 yes := b.Succs[0] 18921 no := b.Succs[1] 18922 b.Kind = BlockAMD64LE 18923 b.SetControl(cmp) 18924 _ = yes 18925 _ = no 18926 return true 18927 } 18928 // match: (GE (FlagEQ) yes no) 18929 // cond: 18930 // result: (First nil yes no) 18931 for { 18932 v := b.Control 18933 if v.Op != OpAMD64FlagEQ { 18934 break 18935 } 18936 yes := b.Succs[0] 18937 no := b.Succs[1] 18938 b.Kind = BlockFirst 18939 b.SetControl(nil) 18940 _ = yes 18941 _ = no 18942 return true 18943 } 18944 // match: (GE (FlagLT_ULT) yes no) 18945 // cond: 18946 // result: (First nil no yes) 18947 for { 18948 v := b.Control 18949 if v.Op != OpAMD64FlagLT_ULT { 18950 break 18951 } 18952 yes := b.Succs[0] 18953 no := b.Succs[1] 18954 b.Kind = BlockFirst 18955 b.SetControl(nil) 18956 b.swapSuccessors() 18957 _ = no 18958 _ = yes 18959 return true 18960 } 18961 // match: (GE (FlagLT_UGT) yes no) 18962 // cond: 18963 // result: (First nil no yes) 18964 for { 18965 v := b.Control 18966 if v.Op != OpAMD64FlagLT_UGT { 18967 break 18968 } 18969 yes := b.Succs[0] 18970 no := b.Succs[1] 18971 b.Kind = BlockFirst 18972 b.SetControl(nil) 18973 b.swapSuccessors() 18974 _ = no 18975 _ = yes 18976 return true 18977 } 18978 // match: (GE (FlagGT_ULT) yes no) 18979 // cond: 18980 // result: (First nil yes no) 18981 for { 18982 v := b.Control 18983 if v.Op != OpAMD64FlagGT_ULT { 18984 break 18985 } 18986 yes := b.Succs[0] 18987 no := b.Succs[1] 18988 b.Kind = BlockFirst 18989 b.SetControl(nil) 18990 _ = yes 18991 _ = no 18992 return true 18993 } 18994 // match: (GE (FlagGT_UGT) yes no) 18995 // cond: 18996 // result: (First nil yes no) 18997 for { 18998 v := b.Control 18999 if v.Op != OpAMD64FlagGT_UGT { 19000 break 19001 } 19002 yes := b.Succs[0] 19003 no := b.Succs[1] 19004 b.Kind = BlockFirst 19005 b.SetControl(nil) 19006 _ = yes 19007 _ = no 19008 return true 19009 } 19010 case BlockAMD64GT: 19011 // match: (GT (InvertFlags cmp) yes no) 19012 // cond: 19013 // result: (LT cmp yes no) 19014 for { 19015 v := b.Control 19016 if v.Op != OpAMD64InvertFlags { 19017 break 19018 } 19019 cmp := v.Args[0] 19020 yes := b.Succs[0] 19021 no := b.Succs[1] 19022 b.Kind = BlockAMD64LT 19023 b.SetControl(cmp) 19024 _ = yes 19025 _ = no 19026 return true 19027 } 19028 // match: (GT (FlagEQ) yes no) 19029 // cond: 19030 // result: (First nil no yes) 19031 for { 19032 v := b.Control 19033 if v.Op != OpAMD64FlagEQ { 19034 break 19035 } 19036 yes := b.Succs[0] 19037 no := b.Succs[1] 19038 b.Kind = BlockFirst 19039 b.SetControl(nil) 19040 b.swapSuccessors() 19041 _ = no 19042 _ = yes 19043 return true 19044 } 19045 // match: (GT (FlagLT_ULT) yes no) 19046 // cond: 19047 // result: (First nil no yes) 19048 for { 19049 v := b.Control 19050 if v.Op != OpAMD64FlagLT_ULT { 19051 break 19052 } 19053 yes := b.Succs[0] 19054 no := b.Succs[1] 19055 b.Kind = BlockFirst 19056 b.SetControl(nil) 19057 b.swapSuccessors() 19058 _ = no 19059 _ = yes 19060 return true 19061 } 19062 // match: (GT (FlagLT_UGT) yes no) 19063 // cond: 19064 // result: (First nil no yes) 19065 for { 19066 v := b.Control 19067 if v.Op != OpAMD64FlagLT_UGT { 19068 break 19069 } 19070 yes := b.Succs[0] 19071 no := b.Succs[1] 19072 b.Kind = BlockFirst 19073 b.SetControl(nil) 19074 b.swapSuccessors() 19075 _ = no 19076 _ = yes 19077 return true 19078 } 19079 // match: (GT (FlagGT_ULT) yes no) 19080 // cond: 19081 // result: (First nil yes no) 19082 for { 19083 v := b.Control 19084 if v.Op != OpAMD64FlagGT_ULT { 19085 break 19086 } 19087 yes := b.Succs[0] 19088 no := b.Succs[1] 19089 b.Kind = BlockFirst 19090 b.SetControl(nil) 19091 _ = yes 19092 _ = no 19093 return true 19094 } 19095 // match: (GT (FlagGT_UGT) yes no) 19096 // cond: 19097 // result: (First nil yes no) 19098 for { 19099 v := b.Control 19100 if v.Op != OpAMD64FlagGT_UGT { 19101 break 19102 } 19103 yes := b.Succs[0] 19104 no := b.Succs[1] 19105 b.Kind = BlockFirst 19106 b.SetControl(nil) 19107 _ = yes 19108 _ = no 19109 return true 19110 } 19111 case BlockIf: 19112 // match: (If (SETL cmp) yes no) 19113 // cond: 19114 // result: (LT cmp yes no) 19115 for { 19116 v := b.Control 19117 if v.Op != OpAMD64SETL { 19118 break 19119 } 19120 cmp := v.Args[0] 19121 yes := b.Succs[0] 19122 no := b.Succs[1] 19123 b.Kind = BlockAMD64LT 19124 b.SetControl(cmp) 19125 _ = yes 19126 _ = no 19127 return true 19128 } 19129 // match: (If (SETLE cmp) yes no) 19130 // cond: 19131 // result: (LE cmp yes no) 19132 for { 19133 v := b.Control 19134 if v.Op != OpAMD64SETLE { 19135 break 19136 } 19137 cmp := v.Args[0] 19138 yes := b.Succs[0] 19139 no := b.Succs[1] 19140 b.Kind = BlockAMD64LE 19141 b.SetControl(cmp) 19142 _ = yes 19143 _ = no 19144 return true 19145 } 19146 // match: (If (SETG cmp) yes no) 19147 // cond: 19148 // result: (GT cmp yes no) 19149 for { 19150 v := b.Control 19151 if v.Op != OpAMD64SETG { 19152 break 19153 } 19154 cmp := v.Args[0] 19155 yes := b.Succs[0] 19156 no := b.Succs[1] 19157 b.Kind = BlockAMD64GT 19158 b.SetControl(cmp) 19159 _ = yes 19160 _ = no 19161 return true 19162 } 19163 // match: (If (SETGE cmp) yes no) 19164 // cond: 19165 // result: (GE cmp yes no) 19166 for { 19167 v := b.Control 19168 if v.Op != OpAMD64SETGE { 19169 break 19170 } 19171 cmp := v.Args[0] 19172 yes := b.Succs[0] 19173 no := b.Succs[1] 19174 b.Kind = BlockAMD64GE 19175 b.SetControl(cmp) 19176 _ = yes 19177 _ = no 19178 return true 19179 } 19180 // match: (If (SETEQ cmp) yes no) 19181 // cond: 19182 // result: (EQ cmp yes no) 19183 for { 19184 v := b.Control 19185 if v.Op != OpAMD64SETEQ { 19186 break 19187 } 19188 cmp := v.Args[0] 19189 yes := b.Succs[0] 19190 no := b.Succs[1] 19191 b.Kind = BlockAMD64EQ 19192 b.SetControl(cmp) 19193 _ = yes 19194 _ = no 19195 return true 19196 } 19197 // match: (If (SETNE cmp) yes no) 19198 // cond: 19199 // result: (NE cmp yes no) 19200 for { 19201 v := b.Control 19202 if v.Op != OpAMD64SETNE { 19203 break 19204 } 19205 cmp := v.Args[0] 19206 yes := b.Succs[0] 19207 no := b.Succs[1] 19208 b.Kind = BlockAMD64NE 19209 b.SetControl(cmp) 19210 _ = yes 19211 _ = no 19212 return true 19213 } 19214 // match: (If (SETB cmp) yes no) 19215 // cond: 19216 // result: (ULT cmp yes no) 19217 for { 19218 v := b.Control 19219 if v.Op != OpAMD64SETB { 19220 break 19221 } 19222 cmp := v.Args[0] 19223 yes := b.Succs[0] 19224 no := b.Succs[1] 19225 b.Kind = BlockAMD64ULT 19226 b.SetControl(cmp) 19227 _ = yes 19228 _ = no 19229 return true 19230 } 19231 // match: (If (SETBE cmp) yes no) 19232 // cond: 19233 // result: (ULE cmp yes no) 19234 for { 19235 v := b.Control 19236 if v.Op != OpAMD64SETBE { 19237 break 19238 } 19239 cmp := v.Args[0] 19240 yes := b.Succs[0] 19241 no := b.Succs[1] 19242 b.Kind = BlockAMD64ULE 19243 b.SetControl(cmp) 19244 _ = yes 19245 _ = no 19246 return true 19247 } 19248 // match: (If (SETA cmp) yes no) 19249 // cond: 19250 // result: (UGT cmp yes no) 19251 for { 19252 v := b.Control 19253 if v.Op != OpAMD64SETA { 19254 break 19255 } 19256 cmp := v.Args[0] 19257 yes := b.Succs[0] 19258 no := b.Succs[1] 19259 b.Kind = BlockAMD64UGT 19260 b.SetControl(cmp) 19261 _ = yes 19262 _ = no 19263 return true 19264 } 19265 // match: (If (SETAE cmp) yes no) 19266 // cond: 19267 // result: (UGE cmp yes no) 19268 for { 19269 v := b.Control 19270 if v.Op != OpAMD64SETAE { 19271 break 19272 } 19273 cmp := v.Args[0] 19274 yes := b.Succs[0] 19275 no := b.Succs[1] 19276 b.Kind = BlockAMD64UGE 19277 b.SetControl(cmp) 19278 _ = yes 19279 _ = no 19280 return true 19281 } 19282 // match: (If (SETGF cmp) yes no) 19283 // cond: 19284 // result: (UGT cmp yes no) 19285 for { 19286 v := b.Control 19287 if v.Op != OpAMD64SETGF { 19288 break 19289 } 19290 cmp := v.Args[0] 19291 yes := b.Succs[0] 19292 no := b.Succs[1] 19293 b.Kind = BlockAMD64UGT 19294 b.SetControl(cmp) 19295 _ = yes 19296 _ = no 19297 return true 19298 } 19299 // match: (If (SETGEF cmp) yes no) 19300 // cond: 19301 // result: (UGE cmp yes no) 19302 for { 19303 v := b.Control 19304 if v.Op != OpAMD64SETGEF { 19305 break 19306 } 19307 cmp := v.Args[0] 19308 yes := b.Succs[0] 19309 no := b.Succs[1] 19310 b.Kind = BlockAMD64UGE 19311 b.SetControl(cmp) 19312 _ = yes 19313 _ = no 19314 return true 19315 } 19316 // match: (If (SETEQF cmp) yes no) 19317 // cond: 19318 // result: (EQF cmp yes no) 19319 for { 19320 v := b.Control 19321 if v.Op != OpAMD64SETEQF { 19322 break 19323 } 19324 cmp := v.Args[0] 19325 yes := b.Succs[0] 19326 no := b.Succs[1] 19327 b.Kind = BlockAMD64EQF 19328 b.SetControl(cmp) 19329 _ = yes 19330 _ = no 19331 return true 19332 } 19333 // match: (If (SETNEF cmp) yes no) 19334 // cond: 19335 // result: (NEF cmp yes no) 19336 for { 19337 v := b.Control 19338 if v.Op != OpAMD64SETNEF { 19339 break 19340 } 19341 cmp := v.Args[0] 19342 yes := b.Succs[0] 19343 no := b.Succs[1] 19344 b.Kind = BlockAMD64NEF 19345 b.SetControl(cmp) 19346 _ = yes 19347 _ = no 19348 return true 19349 } 19350 // match: (If cond yes no) 19351 // cond: 19352 // result: (NE (TESTB cond cond) yes no) 19353 for { 19354 v := b.Control 19355 _ = v 19356 cond := b.Control 19357 yes := b.Succs[0] 19358 no := b.Succs[1] 19359 b.Kind = BlockAMD64NE 19360 v0 := b.NewValue0(v.Line, OpAMD64TESTB, TypeFlags) 19361 v0.AddArg(cond) 19362 v0.AddArg(cond) 19363 b.SetControl(v0) 19364 _ = yes 19365 _ = no 19366 return true 19367 } 19368 case BlockAMD64LE: 19369 // match: (LE (InvertFlags cmp) yes no) 19370 // cond: 19371 // result: (GE cmp yes no) 19372 for { 19373 v := b.Control 19374 if v.Op != OpAMD64InvertFlags { 19375 break 19376 } 19377 cmp := v.Args[0] 19378 yes := b.Succs[0] 19379 no := b.Succs[1] 19380 b.Kind = BlockAMD64GE 19381 b.SetControl(cmp) 19382 _ = yes 19383 _ = no 19384 return true 19385 } 19386 // match: (LE (FlagEQ) yes no) 19387 // cond: 19388 // result: (First nil yes no) 19389 for { 19390 v := b.Control 19391 if v.Op != OpAMD64FlagEQ { 19392 break 19393 } 19394 yes := b.Succs[0] 19395 no := b.Succs[1] 19396 b.Kind = BlockFirst 19397 b.SetControl(nil) 19398 _ = yes 19399 _ = no 19400 return true 19401 } 19402 // match: (LE (FlagLT_ULT) yes no) 19403 // cond: 19404 // result: (First nil yes no) 19405 for { 19406 v := b.Control 19407 if v.Op != OpAMD64FlagLT_ULT { 19408 break 19409 } 19410 yes := b.Succs[0] 19411 no := b.Succs[1] 19412 b.Kind = BlockFirst 19413 b.SetControl(nil) 19414 _ = yes 19415 _ = no 19416 return true 19417 } 19418 // match: (LE (FlagLT_UGT) yes no) 19419 // cond: 19420 // result: (First nil yes no) 19421 for { 19422 v := b.Control 19423 if v.Op != OpAMD64FlagLT_UGT { 19424 break 19425 } 19426 yes := b.Succs[0] 19427 no := b.Succs[1] 19428 b.Kind = BlockFirst 19429 b.SetControl(nil) 19430 _ = yes 19431 _ = no 19432 return true 19433 } 19434 // match: (LE (FlagGT_ULT) yes no) 19435 // cond: 19436 // result: (First nil no yes) 19437 for { 19438 v := b.Control 19439 if v.Op != OpAMD64FlagGT_ULT { 19440 break 19441 } 19442 yes := b.Succs[0] 19443 no := b.Succs[1] 19444 b.Kind = BlockFirst 19445 b.SetControl(nil) 19446 b.swapSuccessors() 19447 _ = no 19448 _ = yes 19449 return true 19450 } 19451 // match: (LE (FlagGT_UGT) yes no) 19452 // cond: 19453 // result: (First nil no yes) 19454 for { 19455 v := b.Control 19456 if v.Op != OpAMD64FlagGT_UGT { 19457 break 19458 } 19459 yes := b.Succs[0] 19460 no := b.Succs[1] 19461 b.Kind = BlockFirst 19462 b.SetControl(nil) 19463 b.swapSuccessors() 19464 _ = no 19465 _ = yes 19466 return true 19467 } 19468 case BlockAMD64LT: 19469 // match: (LT (InvertFlags cmp) yes no) 19470 // cond: 19471 // result: (GT cmp yes no) 19472 for { 19473 v := b.Control 19474 if v.Op != OpAMD64InvertFlags { 19475 break 19476 } 19477 cmp := v.Args[0] 19478 yes := b.Succs[0] 19479 no := b.Succs[1] 19480 b.Kind = BlockAMD64GT 19481 b.SetControl(cmp) 19482 _ = yes 19483 _ = no 19484 return true 19485 } 19486 // match: (LT (FlagEQ) yes no) 19487 // cond: 19488 // result: (First nil no yes) 19489 for { 19490 v := b.Control 19491 if v.Op != OpAMD64FlagEQ { 19492 break 19493 } 19494 yes := b.Succs[0] 19495 no := b.Succs[1] 19496 b.Kind = BlockFirst 19497 b.SetControl(nil) 19498 b.swapSuccessors() 19499 _ = no 19500 _ = yes 19501 return true 19502 } 19503 // match: (LT (FlagLT_ULT) yes no) 19504 // cond: 19505 // result: (First nil yes no) 19506 for { 19507 v := b.Control 19508 if v.Op != OpAMD64FlagLT_ULT { 19509 break 19510 } 19511 yes := b.Succs[0] 19512 no := b.Succs[1] 19513 b.Kind = BlockFirst 19514 b.SetControl(nil) 19515 _ = yes 19516 _ = no 19517 return true 19518 } 19519 // match: (LT (FlagLT_UGT) yes no) 19520 // cond: 19521 // result: (First nil yes no) 19522 for { 19523 v := b.Control 19524 if v.Op != OpAMD64FlagLT_UGT { 19525 break 19526 } 19527 yes := b.Succs[0] 19528 no := b.Succs[1] 19529 b.Kind = BlockFirst 19530 b.SetControl(nil) 19531 _ = yes 19532 _ = no 19533 return true 19534 } 19535 // match: (LT (FlagGT_ULT) yes no) 19536 // cond: 19537 // result: (First nil no yes) 19538 for { 19539 v := b.Control 19540 if v.Op != OpAMD64FlagGT_ULT { 19541 break 19542 } 19543 yes := b.Succs[0] 19544 no := b.Succs[1] 19545 b.Kind = BlockFirst 19546 b.SetControl(nil) 19547 b.swapSuccessors() 19548 _ = no 19549 _ = yes 19550 return true 19551 } 19552 // match: (LT (FlagGT_UGT) yes no) 19553 // cond: 19554 // result: (First nil no yes) 19555 for { 19556 v := b.Control 19557 if v.Op != OpAMD64FlagGT_UGT { 19558 break 19559 } 19560 yes := b.Succs[0] 19561 no := b.Succs[1] 19562 b.Kind = BlockFirst 19563 b.SetControl(nil) 19564 b.swapSuccessors() 19565 _ = no 19566 _ = yes 19567 return true 19568 } 19569 case BlockAMD64NE: 19570 // match: (NE (TESTB (SETL cmp) (SETL cmp)) yes no) 19571 // cond: 19572 // result: (LT cmp yes no) 19573 for { 19574 v := b.Control 19575 if v.Op != OpAMD64TESTB { 19576 break 19577 } 19578 v_0 := v.Args[0] 19579 if v_0.Op != OpAMD64SETL { 19580 break 19581 } 19582 cmp := v_0.Args[0] 19583 v_1 := v.Args[1] 19584 if v_1.Op != OpAMD64SETL { 19585 break 19586 } 19587 if cmp != v_1.Args[0] { 19588 break 19589 } 19590 yes := b.Succs[0] 19591 no := b.Succs[1] 19592 b.Kind = BlockAMD64LT 19593 b.SetControl(cmp) 19594 _ = yes 19595 _ = no 19596 return true 19597 } 19598 // match: (NE (TESTB (SETLE cmp) (SETLE cmp)) yes no) 19599 // cond: 19600 // result: (LE cmp yes no) 19601 for { 19602 v := b.Control 19603 if v.Op != OpAMD64TESTB { 19604 break 19605 } 19606 v_0 := v.Args[0] 19607 if v_0.Op != OpAMD64SETLE { 19608 break 19609 } 19610 cmp := v_0.Args[0] 19611 v_1 := v.Args[1] 19612 if v_1.Op != OpAMD64SETLE { 19613 break 19614 } 19615 if cmp != v_1.Args[0] { 19616 break 19617 } 19618 yes := b.Succs[0] 19619 no := b.Succs[1] 19620 b.Kind = BlockAMD64LE 19621 b.SetControl(cmp) 19622 _ = yes 19623 _ = no 19624 return true 19625 } 19626 // match: (NE (TESTB (SETG cmp) (SETG cmp)) yes no) 19627 // cond: 19628 // result: (GT cmp yes no) 19629 for { 19630 v := b.Control 19631 if v.Op != OpAMD64TESTB { 19632 break 19633 } 19634 v_0 := v.Args[0] 19635 if v_0.Op != OpAMD64SETG { 19636 break 19637 } 19638 cmp := v_0.Args[0] 19639 v_1 := v.Args[1] 19640 if v_1.Op != OpAMD64SETG { 19641 break 19642 } 19643 if cmp != v_1.Args[0] { 19644 break 19645 } 19646 yes := b.Succs[0] 19647 no := b.Succs[1] 19648 b.Kind = BlockAMD64GT 19649 b.SetControl(cmp) 19650 _ = yes 19651 _ = no 19652 return true 19653 } 19654 // match: (NE (TESTB (SETGE cmp) (SETGE cmp)) yes no) 19655 // cond: 19656 // result: (GE cmp yes no) 19657 for { 19658 v := b.Control 19659 if v.Op != OpAMD64TESTB { 19660 break 19661 } 19662 v_0 := v.Args[0] 19663 if v_0.Op != OpAMD64SETGE { 19664 break 19665 } 19666 cmp := v_0.Args[0] 19667 v_1 := v.Args[1] 19668 if v_1.Op != OpAMD64SETGE { 19669 break 19670 } 19671 if cmp != v_1.Args[0] { 19672 break 19673 } 19674 yes := b.Succs[0] 19675 no := b.Succs[1] 19676 b.Kind = BlockAMD64GE 19677 b.SetControl(cmp) 19678 _ = yes 19679 _ = no 19680 return true 19681 } 19682 // match: (NE (TESTB (SETEQ cmp) (SETEQ cmp)) yes no) 19683 // cond: 19684 // result: (EQ cmp yes no) 19685 for { 19686 v := b.Control 19687 if v.Op != OpAMD64TESTB { 19688 break 19689 } 19690 v_0 := v.Args[0] 19691 if v_0.Op != OpAMD64SETEQ { 19692 break 19693 } 19694 cmp := v_0.Args[0] 19695 v_1 := v.Args[1] 19696 if v_1.Op != OpAMD64SETEQ { 19697 break 19698 } 19699 if cmp != v_1.Args[0] { 19700 break 19701 } 19702 yes := b.Succs[0] 19703 no := b.Succs[1] 19704 b.Kind = BlockAMD64EQ 19705 b.SetControl(cmp) 19706 _ = yes 19707 _ = no 19708 return true 19709 } 19710 // match: (NE (TESTB (SETNE cmp) (SETNE cmp)) yes no) 19711 // cond: 19712 // result: (NE cmp yes no) 19713 for { 19714 v := b.Control 19715 if v.Op != OpAMD64TESTB { 19716 break 19717 } 19718 v_0 := v.Args[0] 19719 if v_0.Op != OpAMD64SETNE { 19720 break 19721 } 19722 cmp := v_0.Args[0] 19723 v_1 := v.Args[1] 19724 if v_1.Op != OpAMD64SETNE { 19725 break 19726 } 19727 if cmp != v_1.Args[0] { 19728 break 19729 } 19730 yes := b.Succs[0] 19731 no := b.Succs[1] 19732 b.Kind = BlockAMD64NE 19733 b.SetControl(cmp) 19734 _ = yes 19735 _ = no 19736 return true 19737 } 19738 // match: (NE (TESTB (SETB cmp) (SETB cmp)) yes no) 19739 // cond: 19740 // result: (ULT cmp yes no) 19741 for { 19742 v := b.Control 19743 if v.Op != OpAMD64TESTB { 19744 break 19745 } 19746 v_0 := v.Args[0] 19747 if v_0.Op != OpAMD64SETB { 19748 break 19749 } 19750 cmp := v_0.Args[0] 19751 v_1 := v.Args[1] 19752 if v_1.Op != OpAMD64SETB { 19753 break 19754 } 19755 if cmp != v_1.Args[0] { 19756 break 19757 } 19758 yes := b.Succs[0] 19759 no := b.Succs[1] 19760 b.Kind = BlockAMD64ULT 19761 b.SetControl(cmp) 19762 _ = yes 19763 _ = no 19764 return true 19765 } 19766 // match: (NE (TESTB (SETBE cmp) (SETBE cmp)) yes no) 19767 // cond: 19768 // result: (ULE cmp yes no) 19769 for { 19770 v := b.Control 19771 if v.Op != OpAMD64TESTB { 19772 break 19773 } 19774 v_0 := v.Args[0] 19775 if v_0.Op != OpAMD64SETBE { 19776 break 19777 } 19778 cmp := v_0.Args[0] 19779 v_1 := v.Args[1] 19780 if v_1.Op != OpAMD64SETBE { 19781 break 19782 } 19783 if cmp != v_1.Args[0] { 19784 break 19785 } 19786 yes := b.Succs[0] 19787 no := b.Succs[1] 19788 b.Kind = BlockAMD64ULE 19789 b.SetControl(cmp) 19790 _ = yes 19791 _ = no 19792 return true 19793 } 19794 // match: (NE (TESTB (SETA cmp) (SETA cmp)) yes no) 19795 // cond: 19796 // result: (UGT cmp yes no) 19797 for { 19798 v := b.Control 19799 if v.Op != OpAMD64TESTB { 19800 break 19801 } 19802 v_0 := v.Args[0] 19803 if v_0.Op != OpAMD64SETA { 19804 break 19805 } 19806 cmp := v_0.Args[0] 19807 v_1 := v.Args[1] 19808 if v_1.Op != OpAMD64SETA { 19809 break 19810 } 19811 if cmp != v_1.Args[0] { 19812 break 19813 } 19814 yes := b.Succs[0] 19815 no := b.Succs[1] 19816 b.Kind = BlockAMD64UGT 19817 b.SetControl(cmp) 19818 _ = yes 19819 _ = no 19820 return true 19821 } 19822 // match: (NE (TESTB (SETAE cmp) (SETAE cmp)) yes no) 19823 // cond: 19824 // result: (UGE cmp yes no) 19825 for { 19826 v := b.Control 19827 if v.Op != OpAMD64TESTB { 19828 break 19829 } 19830 v_0 := v.Args[0] 19831 if v_0.Op != OpAMD64SETAE { 19832 break 19833 } 19834 cmp := v_0.Args[0] 19835 v_1 := v.Args[1] 19836 if v_1.Op != OpAMD64SETAE { 19837 break 19838 } 19839 if cmp != v_1.Args[0] { 19840 break 19841 } 19842 yes := b.Succs[0] 19843 no := b.Succs[1] 19844 b.Kind = BlockAMD64UGE 19845 b.SetControl(cmp) 19846 _ = yes 19847 _ = no 19848 return true 19849 } 19850 // match: (NE (TESTB (SETGF cmp) (SETGF cmp)) yes no) 19851 // cond: 19852 // result: (UGT cmp yes no) 19853 for { 19854 v := b.Control 19855 if v.Op != OpAMD64TESTB { 19856 break 19857 } 19858 v_0 := v.Args[0] 19859 if v_0.Op != OpAMD64SETGF { 19860 break 19861 } 19862 cmp := v_0.Args[0] 19863 v_1 := v.Args[1] 19864 if v_1.Op != OpAMD64SETGF { 19865 break 19866 } 19867 if cmp != v_1.Args[0] { 19868 break 19869 } 19870 yes := b.Succs[0] 19871 no := b.Succs[1] 19872 b.Kind = BlockAMD64UGT 19873 b.SetControl(cmp) 19874 _ = yes 19875 _ = no 19876 return true 19877 } 19878 // match: (NE (TESTB (SETGEF cmp) (SETGEF cmp)) yes no) 19879 // cond: 19880 // result: (UGE cmp yes no) 19881 for { 19882 v := b.Control 19883 if v.Op != OpAMD64TESTB { 19884 break 19885 } 19886 v_0 := v.Args[0] 19887 if v_0.Op != OpAMD64SETGEF { 19888 break 19889 } 19890 cmp := v_0.Args[0] 19891 v_1 := v.Args[1] 19892 if v_1.Op != OpAMD64SETGEF { 19893 break 19894 } 19895 if cmp != v_1.Args[0] { 19896 break 19897 } 19898 yes := b.Succs[0] 19899 no := b.Succs[1] 19900 b.Kind = BlockAMD64UGE 19901 b.SetControl(cmp) 19902 _ = yes 19903 _ = no 19904 return true 19905 } 19906 // match: (NE (TESTB (SETEQF cmp) (SETEQF cmp)) yes no) 19907 // cond: 19908 // result: (EQF cmp yes no) 19909 for { 19910 v := b.Control 19911 if v.Op != OpAMD64TESTB { 19912 break 19913 } 19914 v_0 := v.Args[0] 19915 if v_0.Op != OpAMD64SETEQF { 19916 break 19917 } 19918 cmp := v_0.Args[0] 19919 v_1 := v.Args[1] 19920 if v_1.Op != OpAMD64SETEQF { 19921 break 19922 } 19923 if cmp != v_1.Args[0] { 19924 break 19925 } 19926 yes := b.Succs[0] 19927 no := b.Succs[1] 19928 b.Kind = BlockAMD64EQF 19929 b.SetControl(cmp) 19930 _ = yes 19931 _ = no 19932 return true 19933 } 19934 // match: (NE (TESTB (SETNEF cmp) (SETNEF cmp)) yes no) 19935 // cond: 19936 // result: (NEF cmp yes no) 19937 for { 19938 v := b.Control 19939 if v.Op != OpAMD64TESTB { 19940 break 19941 } 19942 v_0 := v.Args[0] 19943 if v_0.Op != OpAMD64SETNEF { 19944 break 19945 } 19946 cmp := v_0.Args[0] 19947 v_1 := v.Args[1] 19948 if v_1.Op != OpAMD64SETNEF { 19949 break 19950 } 19951 if cmp != v_1.Args[0] { 19952 break 19953 } 19954 yes := b.Succs[0] 19955 no := b.Succs[1] 19956 b.Kind = BlockAMD64NEF 19957 b.SetControl(cmp) 19958 _ = yes 19959 _ = no 19960 return true 19961 } 19962 // match: (NE (InvertFlags cmp) yes no) 19963 // cond: 19964 // result: (NE cmp yes no) 19965 for { 19966 v := b.Control 19967 if v.Op != OpAMD64InvertFlags { 19968 break 19969 } 19970 cmp := v.Args[0] 19971 yes := b.Succs[0] 19972 no := b.Succs[1] 19973 b.Kind = BlockAMD64NE 19974 b.SetControl(cmp) 19975 _ = yes 19976 _ = no 19977 return true 19978 } 19979 // match: (NE (FlagEQ) yes no) 19980 // cond: 19981 // result: (First nil no yes) 19982 for { 19983 v := b.Control 19984 if v.Op != OpAMD64FlagEQ { 19985 break 19986 } 19987 yes := b.Succs[0] 19988 no := b.Succs[1] 19989 b.Kind = BlockFirst 19990 b.SetControl(nil) 19991 b.swapSuccessors() 19992 _ = no 19993 _ = yes 19994 return true 19995 } 19996 // match: (NE (FlagLT_ULT) yes no) 19997 // cond: 19998 // result: (First nil yes no) 19999 for { 20000 v := b.Control 20001 if v.Op != OpAMD64FlagLT_ULT { 20002 break 20003 } 20004 yes := b.Succs[0] 20005 no := b.Succs[1] 20006 b.Kind = BlockFirst 20007 b.SetControl(nil) 20008 _ = yes 20009 _ = no 20010 return true 20011 } 20012 // match: (NE (FlagLT_UGT) yes no) 20013 // cond: 20014 // result: (First nil yes no) 20015 for { 20016 v := b.Control 20017 if v.Op != OpAMD64FlagLT_UGT { 20018 break 20019 } 20020 yes := b.Succs[0] 20021 no := b.Succs[1] 20022 b.Kind = BlockFirst 20023 b.SetControl(nil) 20024 _ = yes 20025 _ = no 20026 return true 20027 } 20028 // match: (NE (FlagGT_ULT) yes no) 20029 // cond: 20030 // result: (First nil yes no) 20031 for { 20032 v := b.Control 20033 if v.Op != OpAMD64FlagGT_ULT { 20034 break 20035 } 20036 yes := b.Succs[0] 20037 no := b.Succs[1] 20038 b.Kind = BlockFirst 20039 b.SetControl(nil) 20040 _ = yes 20041 _ = no 20042 return true 20043 } 20044 // match: (NE (FlagGT_UGT) yes no) 20045 // cond: 20046 // result: (First nil yes no) 20047 for { 20048 v := b.Control 20049 if v.Op != OpAMD64FlagGT_UGT { 20050 break 20051 } 20052 yes := b.Succs[0] 20053 no := b.Succs[1] 20054 b.Kind = BlockFirst 20055 b.SetControl(nil) 20056 _ = yes 20057 _ = no 20058 return true 20059 } 20060 case BlockAMD64UGE: 20061 // match: (UGE (InvertFlags cmp) yes no) 20062 // cond: 20063 // result: (ULE cmp yes no) 20064 for { 20065 v := b.Control 20066 if v.Op != OpAMD64InvertFlags { 20067 break 20068 } 20069 cmp := v.Args[0] 20070 yes := b.Succs[0] 20071 no := b.Succs[1] 20072 b.Kind = BlockAMD64ULE 20073 b.SetControl(cmp) 20074 _ = yes 20075 _ = no 20076 return true 20077 } 20078 // match: (UGE (FlagEQ) yes no) 20079 // cond: 20080 // result: (First nil yes no) 20081 for { 20082 v := b.Control 20083 if v.Op != OpAMD64FlagEQ { 20084 break 20085 } 20086 yes := b.Succs[0] 20087 no := b.Succs[1] 20088 b.Kind = BlockFirst 20089 b.SetControl(nil) 20090 _ = yes 20091 _ = no 20092 return true 20093 } 20094 // match: (UGE (FlagLT_ULT) yes no) 20095 // cond: 20096 // result: (First nil no yes) 20097 for { 20098 v := b.Control 20099 if v.Op != OpAMD64FlagLT_ULT { 20100 break 20101 } 20102 yes := b.Succs[0] 20103 no := b.Succs[1] 20104 b.Kind = BlockFirst 20105 b.SetControl(nil) 20106 b.swapSuccessors() 20107 _ = no 20108 _ = yes 20109 return true 20110 } 20111 // match: (UGE (FlagLT_UGT) yes no) 20112 // cond: 20113 // result: (First nil yes no) 20114 for { 20115 v := b.Control 20116 if v.Op != OpAMD64FlagLT_UGT { 20117 break 20118 } 20119 yes := b.Succs[0] 20120 no := b.Succs[1] 20121 b.Kind = BlockFirst 20122 b.SetControl(nil) 20123 _ = yes 20124 _ = no 20125 return true 20126 } 20127 // match: (UGE (FlagGT_ULT) yes no) 20128 // cond: 20129 // result: (First nil no yes) 20130 for { 20131 v := b.Control 20132 if v.Op != OpAMD64FlagGT_ULT { 20133 break 20134 } 20135 yes := b.Succs[0] 20136 no := b.Succs[1] 20137 b.Kind = BlockFirst 20138 b.SetControl(nil) 20139 b.swapSuccessors() 20140 _ = no 20141 _ = yes 20142 return true 20143 } 20144 // match: (UGE (FlagGT_UGT) yes no) 20145 // cond: 20146 // result: (First nil yes no) 20147 for { 20148 v := b.Control 20149 if v.Op != OpAMD64FlagGT_UGT { 20150 break 20151 } 20152 yes := b.Succs[0] 20153 no := b.Succs[1] 20154 b.Kind = BlockFirst 20155 b.SetControl(nil) 20156 _ = yes 20157 _ = no 20158 return true 20159 } 20160 case BlockAMD64UGT: 20161 // match: (UGT (InvertFlags cmp) yes no) 20162 // cond: 20163 // result: (ULT cmp yes no) 20164 for { 20165 v := b.Control 20166 if v.Op != OpAMD64InvertFlags { 20167 break 20168 } 20169 cmp := v.Args[0] 20170 yes := b.Succs[0] 20171 no := b.Succs[1] 20172 b.Kind = BlockAMD64ULT 20173 b.SetControl(cmp) 20174 _ = yes 20175 _ = no 20176 return true 20177 } 20178 // match: (UGT (FlagEQ) yes no) 20179 // cond: 20180 // result: (First nil no yes) 20181 for { 20182 v := b.Control 20183 if v.Op != OpAMD64FlagEQ { 20184 break 20185 } 20186 yes := b.Succs[0] 20187 no := b.Succs[1] 20188 b.Kind = BlockFirst 20189 b.SetControl(nil) 20190 b.swapSuccessors() 20191 _ = no 20192 _ = yes 20193 return true 20194 } 20195 // match: (UGT (FlagLT_ULT) yes no) 20196 // cond: 20197 // result: (First nil no yes) 20198 for { 20199 v := b.Control 20200 if v.Op != OpAMD64FlagLT_ULT { 20201 break 20202 } 20203 yes := b.Succs[0] 20204 no := b.Succs[1] 20205 b.Kind = BlockFirst 20206 b.SetControl(nil) 20207 b.swapSuccessors() 20208 _ = no 20209 _ = yes 20210 return true 20211 } 20212 // match: (UGT (FlagLT_UGT) yes no) 20213 // cond: 20214 // result: (First nil yes no) 20215 for { 20216 v := b.Control 20217 if v.Op != OpAMD64FlagLT_UGT { 20218 break 20219 } 20220 yes := b.Succs[0] 20221 no := b.Succs[1] 20222 b.Kind = BlockFirst 20223 b.SetControl(nil) 20224 _ = yes 20225 _ = no 20226 return true 20227 } 20228 // match: (UGT (FlagGT_ULT) yes no) 20229 // cond: 20230 // result: (First nil no yes) 20231 for { 20232 v := b.Control 20233 if v.Op != OpAMD64FlagGT_ULT { 20234 break 20235 } 20236 yes := b.Succs[0] 20237 no := b.Succs[1] 20238 b.Kind = BlockFirst 20239 b.SetControl(nil) 20240 b.swapSuccessors() 20241 _ = no 20242 _ = yes 20243 return true 20244 } 20245 // match: (UGT (FlagGT_UGT) yes no) 20246 // cond: 20247 // result: (First nil yes no) 20248 for { 20249 v := b.Control 20250 if v.Op != OpAMD64FlagGT_UGT { 20251 break 20252 } 20253 yes := b.Succs[0] 20254 no := b.Succs[1] 20255 b.Kind = BlockFirst 20256 b.SetControl(nil) 20257 _ = yes 20258 _ = no 20259 return true 20260 } 20261 case BlockAMD64ULE: 20262 // match: (ULE (InvertFlags cmp) yes no) 20263 // cond: 20264 // result: (UGE cmp yes no) 20265 for { 20266 v := b.Control 20267 if v.Op != OpAMD64InvertFlags { 20268 break 20269 } 20270 cmp := v.Args[0] 20271 yes := b.Succs[0] 20272 no := b.Succs[1] 20273 b.Kind = BlockAMD64UGE 20274 b.SetControl(cmp) 20275 _ = yes 20276 _ = no 20277 return true 20278 } 20279 // match: (ULE (FlagEQ) yes no) 20280 // cond: 20281 // result: (First nil yes no) 20282 for { 20283 v := b.Control 20284 if v.Op != OpAMD64FlagEQ { 20285 break 20286 } 20287 yes := b.Succs[0] 20288 no := b.Succs[1] 20289 b.Kind = BlockFirst 20290 b.SetControl(nil) 20291 _ = yes 20292 _ = no 20293 return true 20294 } 20295 // match: (ULE (FlagLT_ULT) yes no) 20296 // cond: 20297 // result: (First nil yes no) 20298 for { 20299 v := b.Control 20300 if v.Op != OpAMD64FlagLT_ULT { 20301 break 20302 } 20303 yes := b.Succs[0] 20304 no := b.Succs[1] 20305 b.Kind = BlockFirst 20306 b.SetControl(nil) 20307 _ = yes 20308 _ = no 20309 return true 20310 } 20311 // match: (ULE (FlagLT_UGT) yes no) 20312 // cond: 20313 // result: (First nil no yes) 20314 for { 20315 v := b.Control 20316 if v.Op != OpAMD64FlagLT_UGT { 20317 break 20318 } 20319 yes := b.Succs[0] 20320 no := b.Succs[1] 20321 b.Kind = BlockFirst 20322 b.SetControl(nil) 20323 b.swapSuccessors() 20324 _ = no 20325 _ = yes 20326 return true 20327 } 20328 // match: (ULE (FlagGT_ULT) yes no) 20329 // cond: 20330 // result: (First nil yes no) 20331 for { 20332 v := b.Control 20333 if v.Op != OpAMD64FlagGT_ULT { 20334 break 20335 } 20336 yes := b.Succs[0] 20337 no := b.Succs[1] 20338 b.Kind = BlockFirst 20339 b.SetControl(nil) 20340 _ = yes 20341 _ = no 20342 return true 20343 } 20344 // match: (ULE (FlagGT_UGT) yes no) 20345 // cond: 20346 // result: (First nil no yes) 20347 for { 20348 v := b.Control 20349 if v.Op != OpAMD64FlagGT_UGT { 20350 break 20351 } 20352 yes := b.Succs[0] 20353 no := b.Succs[1] 20354 b.Kind = BlockFirst 20355 b.SetControl(nil) 20356 b.swapSuccessors() 20357 _ = no 20358 _ = yes 20359 return true 20360 } 20361 case BlockAMD64ULT: 20362 // match: (ULT (InvertFlags cmp) yes no) 20363 // cond: 20364 // result: (UGT cmp yes no) 20365 for { 20366 v := b.Control 20367 if v.Op != OpAMD64InvertFlags { 20368 break 20369 } 20370 cmp := v.Args[0] 20371 yes := b.Succs[0] 20372 no := b.Succs[1] 20373 b.Kind = BlockAMD64UGT 20374 b.SetControl(cmp) 20375 _ = yes 20376 _ = no 20377 return true 20378 } 20379 // match: (ULT (FlagEQ) yes no) 20380 // cond: 20381 // result: (First nil no yes) 20382 for { 20383 v := b.Control 20384 if v.Op != OpAMD64FlagEQ { 20385 break 20386 } 20387 yes := b.Succs[0] 20388 no := b.Succs[1] 20389 b.Kind = BlockFirst 20390 b.SetControl(nil) 20391 b.swapSuccessors() 20392 _ = no 20393 _ = yes 20394 return true 20395 } 20396 // match: (ULT (FlagLT_ULT) yes no) 20397 // cond: 20398 // result: (First nil yes no) 20399 for { 20400 v := b.Control 20401 if v.Op != OpAMD64FlagLT_ULT { 20402 break 20403 } 20404 yes := b.Succs[0] 20405 no := b.Succs[1] 20406 b.Kind = BlockFirst 20407 b.SetControl(nil) 20408 _ = yes 20409 _ = no 20410 return true 20411 } 20412 // match: (ULT (FlagLT_UGT) yes no) 20413 // cond: 20414 // result: (First nil no yes) 20415 for { 20416 v := b.Control 20417 if v.Op != OpAMD64FlagLT_UGT { 20418 break 20419 } 20420 yes := b.Succs[0] 20421 no := b.Succs[1] 20422 b.Kind = BlockFirst 20423 b.SetControl(nil) 20424 b.swapSuccessors() 20425 _ = no 20426 _ = yes 20427 return true 20428 } 20429 // match: (ULT (FlagGT_ULT) yes no) 20430 // cond: 20431 // result: (First nil yes no) 20432 for { 20433 v := b.Control 20434 if v.Op != OpAMD64FlagGT_ULT { 20435 break 20436 } 20437 yes := b.Succs[0] 20438 no := b.Succs[1] 20439 b.Kind = BlockFirst 20440 b.SetControl(nil) 20441 _ = yes 20442 _ = no 20443 return true 20444 } 20445 // match: (ULT (FlagGT_UGT) yes no) 20446 // cond: 20447 // result: (First nil no yes) 20448 for { 20449 v := b.Control 20450 if v.Op != OpAMD64FlagGT_UGT { 20451 break 20452 } 20453 yes := b.Succs[0] 20454 no := b.Succs[1] 20455 b.Kind = BlockFirst 20456 b.SetControl(nil) 20457 b.swapSuccessors() 20458 _ = no 20459 _ = yes 20460 return true 20461 } 20462 } 20463 return false 20464 }