github.com/dannin/go@v0.0.0-20161031215817-d35dfd405eaa/src/cmd/compile/internal/ssa/rewriteAMD64.go (about) 1 // autogenerated from gen/AMD64.rules: do not edit! 2 // generated with: cd gen; go run *.go 3 4 package ssa 5 6 import "math" 7 8 var _ = math.MinInt8 // in case not otherwise used 9 func rewriteValueAMD64(v *Value, config *Config) bool { 10 switch v.Op { 11 case OpAMD64ADDL: 12 return rewriteValueAMD64_OpAMD64ADDL(v, config) 13 case OpAMD64ADDLconst: 14 return rewriteValueAMD64_OpAMD64ADDLconst(v, config) 15 case OpAMD64ADDQ: 16 return rewriteValueAMD64_OpAMD64ADDQ(v, config) 17 case OpAMD64ADDQconst: 18 return rewriteValueAMD64_OpAMD64ADDQconst(v, config) 19 case OpAMD64ANDL: 20 return rewriteValueAMD64_OpAMD64ANDL(v, config) 21 case OpAMD64ANDLconst: 22 return rewriteValueAMD64_OpAMD64ANDLconst(v, config) 23 case OpAMD64ANDQ: 24 return rewriteValueAMD64_OpAMD64ANDQ(v, config) 25 case OpAMD64ANDQconst: 26 return rewriteValueAMD64_OpAMD64ANDQconst(v, config) 27 case OpAMD64CMPB: 28 return rewriteValueAMD64_OpAMD64CMPB(v, config) 29 case OpAMD64CMPBconst: 30 return rewriteValueAMD64_OpAMD64CMPBconst(v, config) 31 case OpAMD64CMPL: 32 return rewriteValueAMD64_OpAMD64CMPL(v, config) 33 case OpAMD64CMPLconst: 34 return rewriteValueAMD64_OpAMD64CMPLconst(v, config) 35 case OpAMD64CMPQ: 36 return rewriteValueAMD64_OpAMD64CMPQ(v, config) 37 case OpAMD64CMPQconst: 38 return rewriteValueAMD64_OpAMD64CMPQconst(v, config) 39 case OpAMD64CMPW: 40 return rewriteValueAMD64_OpAMD64CMPW(v, config) 41 case OpAMD64CMPWconst: 42 return rewriteValueAMD64_OpAMD64CMPWconst(v, config) 43 case OpAMD64CMPXCHGLlock: 44 return rewriteValueAMD64_OpAMD64CMPXCHGLlock(v, config) 45 case OpAMD64CMPXCHGQlock: 46 return rewriteValueAMD64_OpAMD64CMPXCHGQlock(v, config) 47 case OpAMD64LEAL: 48 return rewriteValueAMD64_OpAMD64LEAL(v, config) 49 case OpAMD64LEAQ: 50 return rewriteValueAMD64_OpAMD64LEAQ(v, config) 51 case OpAMD64LEAQ1: 52 return rewriteValueAMD64_OpAMD64LEAQ1(v, config) 53 case OpAMD64LEAQ2: 54 return rewriteValueAMD64_OpAMD64LEAQ2(v, config) 55 case OpAMD64LEAQ4: 56 return rewriteValueAMD64_OpAMD64LEAQ4(v, config) 57 case OpAMD64LEAQ8: 58 return rewriteValueAMD64_OpAMD64LEAQ8(v, config) 59 case OpAMD64MOVBQSX: 60 return rewriteValueAMD64_OpAMD64MOVBQSX(v, config) 61 case OpAMD64MOVBQSXload: 62 return rewriteValueAMD64_OpAMD64MOVBQSXload(v, config) 63 case OpAMD64MOVBQZX: 64 return rewriteValueAMD64_OpAMD64MOVBQZX(v, config) 65 case OpAMD64MOVBload: 66 return rewriteValueAMD64_OpAMD64MOVBload(v, config) 67 case OpAMD64MOVBloadidx1: 68 return rewriteValueAMD64_OpAMD64MOVBloadidx1(v, config) 69 case OpAMD64MOVBstore: 70 return rewriteValueAMD64_OpAMD64MOVBstore(v, config) 71 case OpAMD64MOVBstoreconst: 72 return rewriteValueAMD64_OpAMD64MOVBstoreconst(v, config) 73 case OpAMD64MOVBstoreconstidx1: 74 return rewriteValueAMD64_OpAMD64MOVBstoreconstidx1(v, config) 75 case OpAMD64MOVBstoreidx1: 76 return rewriteValueAMD64_OpAMD64MOVBstoreidx1(v, config) 77 case OpAMD64MOVLQSX: 78 return rewriteValueAMD64_OpAMD64MOVLQSX(v, config) 79 case OpAMD64MOVLQSXload: 80 return rewriteValueAMD64_OpAMD64MOVLQSXload(v, config) 81 case OpAMD64MOVLQZX: 82 return rewriteValueAMD64_OpAMD64MOVLQZX(v, config) 83 case OpAMD64MOVLatomicload: 84 return rewriteValueAMD64_OpAMD64MOVLatomicload(v, config) 85 case OpAMD64MOVLload: 86 return rewriteValueAMD64_OpAMD64MOVLload(v, config) 87 case OpAMD64MOVLloadidx1: 88 return rewriteValueAMD64_OpAMD64MOVLloadidx1(v, config) 89 case OpAMD64MOVLloadidx4: 90 return rewriteValueAMD64_OpAMD64MOVLloadidx4(v, config) 91 case OpAMD64MOVLstore: 92 return rewriteValueAMD64_OpAMD64MOVLstore(v, config) 93 case OpAMD64MOVLstoreconst: 94 return rewriteValueAMD64_OpAMD64MOVLstoreconst(v, config) 95 case OpAMD64MOVLstoreconstidx1: 96 return rewriteValueAMD64_OpAMD64MOVLstoreconstidx1(v, config) 97 case OpAMD64MOVLstoreconstidx4: 98 return rewriteValueAMD64_OpAMD64MOVLstoreconstidx4(v, config) 99 case OpAMD64MOVLstoreidx1: 100 return rewriteValueAMD64_OpAMD64MOVLstoreidx1(v, config) 101 case OpAMD64MOVLstoreidx4: 102 return rewriteValueAMD64_OpAMD64MOVLstoreidx4(v, config) 103 case OpAMD64MOVOload: 104 return rewriteValueAMD64_OpAMD64MOVOload(v, config) 105 case OpAMD64MOVOstore: 106 return rewriteValueAMD64_OpAMD64MOVOstore(v, config) 107 case OpAMD64MOVQatomicload: 108 return rewriteValueAMD64_OpAMD64MOVQatomicload(v, config) 109 case OpAMD64MOVQload: 110 return rewriteValueAMD64_OpAMD64MOVQload(v, config) 111 case OpAMD64MOVQloadidx1: 112 return rewriteValueAMD64_OpAMD64MOVQloadidx1(v, config) 113 case OpAMD64MOVQloadidx8: 114 return rewriteValueAMD64_OpAMD64MOVQloadidx8(v, config) 115 case OpAMD64MOVQstore: 116 return rewriteValueAMD64_OpAMD64MOVQstore(v, config) 117 case OpAMD64MOVQstoreconst: 118 return rewriteValueAMD64_OpAMD64MOVQstoreconst(v, config) 119 case OpAMD64MOVQstoreconstidx1: 120 return rewriteValueAMD64_OpAMD64MOVQstoreconstidx1(v, config) 121 case OpAMD64MOVQstoreconstidx8: 122 return rewriteValueAMD64_OpAMD64MOVQstoreconstidx8(v, config) 123 case OpAMD64MOVQstoreidx1: 124 return rewriteValueAMD64_OpAMD64MOVQstoreidx1(v, config) 125 case OpAMD64MOVQstoreidx8: 126 return rewriteValueAMD64_OpAMD64MOVQstoreidx8(v, config) 127 case OpAMD64MOVSDload: 128 return rewriteValueAMD64_OpAMD64MOVSDload(v, config) 129 case OpAMD64MOVSDloadidx1: 130 return rewriteValueAMD64_OpAMD64MOVSDloadidx1(v, config) 131 case OpAMD64MOVSDloadidx8: 132 return rewriteValueAMD64_OpAMD64MOVSDloadidx8(v, config) 133 case OpAMD64MOVSDstore: 134 return rewriteValueAMD64_OpAMD64MOVSDstore(v, config) 135 case OpAMD64MOVSDstoreidx1: 136 return rewriteValueAMD64_OpAMD64MOVSDstoreidx1(v, config) 137 case OpAMD64MOVSDstoreidx8: 138 return rewriteValueAMD64_OpAMD64MOVSDstoreidx8(v, config) 139 case OpAMD64MOVSSload: 140 return rewriteValueAMD64_OpAMD64MOVSSload(v, config) 141 case OpAMD64MOVSSloadidx1: 142 return rewriteValueAMD64_OpAMD64MOVSSloadidx1(v, config) 143 case OpAMD64MOVSSloadidx4: 144 return rewriteValueAMD64_OpAMD64MOVSSloadidx4(v, config) 145 case OpAMD64MOVSSstore: 146 return rewriteValueAMD64_OpAMD64MOVSSstore(v, config) 147 case OpAMD64MOVSSstoreidx1: 148 return rewriteValueAMD64_OpAMD64MOVSSstoreidx1(v, config) 149 case OpAMD64MOVSSstoreidx4: 150 return rewriteValueAMD64_OpAMD64MOVSSstoreidx4(v, config) 151 case OpAMD64MOVWQSX: 152 return rewriteValueAMD64_OpAMD64MOVWQSX(v, config) 153 case OpAMD64MOVWQSXload: 154 return rewriteValueAMD64_OpAMD64MOVWQSXload(v, config) 155 case OpAMD64MOVWQZX: 156 return rewriteValueAMD64_OpAMD64MOVWQZX(v, config) 157 case OpAMD64MOVWload: 158 return rewriteValueAMD64_OpAMD64MOVWload(v, config) 159 case OpAMD64MOVWloadidx1: 160 return rewriteValueAMD64_OpAMD64MOVWloadidx1(v, config) 161 case OpAMD64MOVWloadidx2: 162 return rewriteValueAMD64_OpAMD64MOVWloadidx2(v, config) 163 case OpAMD64MOVWstore: 164 return rewriteValueAMD64_OpAMD64MOVWstore(v, config) 165 case OpAMD64MOVWstoreconst: 166 return rewriteValueAMD64_OpAMD64MOVWstoreconst(v, config) 167 case OpAMD64MOVWstoreconstidx1: 168 return rewriteValueAMD64_OpAMD64MOVWstoreconstidx1(v, config) 169 case OpAMD64MOVWstoreconstidx2: 170 return rewriteValueAMD64_OpAMD64MOVWstoreconstidx2(v, config) 171 case OpAMD64MOVWstoreidx1: 172 return rewriteValueAMD64_OpAMD64MOVWstoreidx1(v, config) 173 case OpAMD64MOVWstoreidx2: 174 return rewriteValueAMD64_OpAMD64MOVWstoreidx2(v, config) 175 case OpAMD64MULL: 176 return rewriteValueAMD64_OpAMD64MULL(v, config) 177 case OpAMD64MULLconst: 178 return rewriteValueAMD64_OpAMD64MULLconst(v, config) 179 case OpAMD64MULQ: 180 return rewriteValueAMD64_OpAMD64MULQ(v, config) 181 case OpAMD64MULQconst: 182 return rewriteValueAMD64_OpAMD64MULQconst(v, config) 183 case OpAMD64NEGL: 184 return rewriteValueAMD64_OpAMD64NEGL(v, config) 185 case OpAMD64NEGQ: 186 return rewriteValueAMD64_OpAMD64NEGQ(v, config) 187 case OpAMD64NOTL: 188 return rewriteValueAMD64_OpAMD64NOTL(v, config) 189 case OpAMD64NOTQ: 190 return rewriteValueAMD64_OpAMD64NOTQ(v, config) 191 case OpAMD64ORL: 192 return rewriteValueAMD64_OpAMD64ORL(v, config) 193 case OpAMD64ORLconst: 194 return rewriteValueAMD64_OpAMD64ORLconst(v, config) 195 case OpAMD64ORQ: 196 return rewriteValueAMD64_OpAMD64ORQ(v, config) 197 case OpAMD64ORQconst: 198 return rewriteValueAMD64_OpAMD64ORQconst(v, config) 199 case OpAMD64ROLBconst: 200 return rewriteValueAMD64_OpAMD64ROLBconst(v, config) 201 case OpAMD64ROLLconst: 202 return rewriteValueAMD64_OpAMD64ROLLconst(v, config) 203 case OpAMD64ROLQconst: 204 return rewriteValueAMD64_OpAMD64ROLQconst(v, config) 205 case OpAMD64ROLWconst: 206 return rewriteValueAMD64_OpAMD64ROLWconst(v, config) 207 case OpAMD64SARB: 208 return rewriteValueAMD64_OpAMD64SARB(v, config) 209 case OpAMD64SARBconst: 210 return rewriteValueAMD64_OpAMD64SARBconst(v, config) 211 case OpAMD64SARL: 212 return rewriteValueAMD64_OpAMD64SARL(v, config) 213 case OpAMD64SARLconst: 214 return rewriteValueAMD64_OpAMD64SARLconst(v, config) 215 case OpAMD64SARQ: 216 return rewriteValueAMD64_OpAMD64SARQ(v, config) 217 case OpAMD64SARQconst: 218 return rewriteValueAMD64_OpAMD64SARQconst(v, config) 219 case OpAMD64SARW: 220 return rewriteValueAMD64_OpAMD64SARW(v, config) 221 case OpAMD64SARWconst: 222 return rewriteValueAMD64_OpAMD64SARWconst(v, config) 223 case OpAMD64SBBLcarrymask: 224 return rewriteValueAMD64_OpAMD64SBBLcarrymask(v, config) 225 case OpAMD64SBBQcarrymask: 226 return rewriteValueAMD64_OpAMD64SBBQcarrymask(v, config) 227 case OpAMD64SETA: 228 return rewriteValueAMD64_OpAMD64SETA(v, config) 229 case OpAMD64SETAE: 230 return rewriteValueAMD64_OpAMD64SETAE(v, config) 231 case OpAMD64SETB: 232 return rewriteValueAMD64_OpAMD64SETB(v, config) 233 case OpAMD64SETBE: 234 return rewriteValueAMD64_OpAMD64SETBE(v, config) 235 case OpAMD64SETEQ: 236 return rewriteValueAMD64_OpAMD64SETEQ(v, config) 237 case OpAMD64SETG: 238 return rewriteValueAMD64_OpAMD64SETG(v, config) 239 case OpAMD64SETGE: 240 return rewriteValueAMD64_OpAMD64SETGE(v, config) 241 case OpAMD64SETL: 242 return rewriteValueAMD64_OpAMD64SETL(v, config) 243 case OpAMD64SETLE: 244 return rewriteValueAMD64_OpAMD64SETLE(v, config) 245 case OpAMD64SETNE: 246 return rewriteValueAMD64_OpAMD64SETNE(v, config) 247 case OpAMD64SHLL: 248 return rewriteValueAMD64_OpAMD64SHLL(v, config) 249 case OpAMD64SHLQ: 250 return rewriteValueAMD64_OpAMD64SHLQ(v, config) 251 case OpAMD64SHRB: 252 return rewriteValueAMD64_OpAMD64SHRB(v, config) 253 case OpAMD64SHRL: 254 return rewriteValueAMD64_OpAMD64SHRL(v, config) 255 case OpAMD64SHRQ: 256 return rewriteValueAMD64_OpAMD64SHRQ(v, config) 257 case OpAMD64SHRW: 258 return rewriteValueAMD64_OpAMD64SHRW(v, config) 259 case OpAMD64SUBL: 260 return rewriteValueAMD64_OpAMD64SUBL(v, config) 261 case OpAMD64SUBLconst: 262 return rewriteValueAMD64_OpAMD64SUBLconst(v, config) 263 case OpAMD64SUBQ: 264 return rewriteValueAMD64_OpAMD64SUBQ(v, config) 265 case OpAMD64SUBQconst: 266 return rewriteValueAMD64_OpAMD64SUBQconst(v, config) 267 case OpAMD64XADDLlock: 268 return rewriteValueAMD64_OpAMD64XADDLlock(v, config) 269 case OpAMD64XADDQlock: 270 return rewriteValueAMD64_OpAMD64XADDQlock(v, config) 271 case OpAMD64XCHGL: 272 return rewriteValueAMD64_OpAMD64XCHGL(v, config) 273 case OpAMD64XCHGQ: 274 return rewriteValueAMD64_OpAMD64XCHGQ(v, config) 275 case OpAMD64XORL: 276 return rewriteValueAMD64_OpAMD64XORL(v, config) 277 case OpAMD64XORLconst: 278 return rewriteValueAMD64_OpAMD64XORLconst(v, config) 279 case OpAMD64XORQ: 280 return rewriteValueAMD64_OpAMD64XORQ(v, config) 281 case OpAMD64XORQconst: 282 return rewriteValueAMD64_OpAMD64XORQconst(v, config) 283 case OpAdd16: 284 return rewriteValueAMD64_OpAdd16(v, config) 285 case OpAdd32: 286 return rewriteValueAMD64_OpAdd32(v, config) 287 case OpAdd32F: 288 return rewriteValueAMD64_OpAdd32F(v, config) 289 case OpAdd64: 290 return rewriteValueAMD64_OpAdd64(v, config) 291 case OpAdd64F: 292 return rewriteValueAMD64_OpAdd64F(v, config) 293 case OpAdd8: 294 return rewriteValueAMD64_OpAdd8(v, config) 295 case OpAddPtr: 296 return rewriteValueAMD64_OpAddPtr(v, config) 297 case OpAddr: 298 return rewriteValueAMD64_OpAddr(v, config) 299 case OpAnd16: 300 return rewriteValueAMD64_OpAnd16(v, config) 301 case OpAnd32: 302 return rewriteValueAMD64_OpAnd32(v, config) 303 case OpAnd64: 304 return rewriteValueAMD64_OpAnd64(v, config) 305 case OpAnd8: 306 return rewriteValueAMD64_OpAnd8(v, config) 307 case OpAndB: 308 return rewriteValueAMD64_OpAndB(v, config) 309 case OpAtomicAdd32: 310 return rewriteValueAMD64_OpAtomicAdd32(v, config) 311 case OpAtomicAdd64: 312 return rewriteValueAMD64_OpAtomicAdd64(v, config) 313 case OpAtomicAnd8: 314 return rewriteValueAMD64_OpAtomicAnd8(v, config) 315 case OpAtomicCompareAndSwap32: 316 return rewriteValueAMD64_OpAtomicCompareAndSwap32(v, config) 317 case OpAtomicCompareAndSwap64: 318 return rewriteValueAMD64_OpAtomicCompareAndSwap64(v, config) 319 case OpAtomicExchange32: 320 return rewriteValueAMD64_OpAtomicExchange32(v, config) 321 case OpAtomicExchange64: 322 return rewriteValueAMD64_OpAtomicExchange64(v, config) 323 case OpAtomicLoad32: 324 return rewriteValueAMD64_OpAtomicLoad32(v, config) 325 case OpAtomicLoad64: 326 return rewriteValueAMD64_OpAtomicLoad64(v, config) 327 case OpAtomicLoadPtr: 328 return rewriteValueAMD64_OpAtomicLoadPtr(v, config) 329 case OpAtomicOr8: 330 return rewriteValueAMD64_OpAtomicOr8(v, config) 331 case OpAtomicStore32: 332 return rewriteValueAMD64_OpAtomicStore32(v, config) 333 case OpAtomicStore64: 334 return rewriteValueAMD64_OpAtomicStore64(v, config) 335 case OpAtomicStorePtrNoWB: 336 return rewriteValueAMD64_OpAtomicStorePtrNoWB(v, config) 337 case OpAvg64u: 338 return rewriteValueAMD64_OpAvg64u(v, config) 339 case OpBswap32: 340 return rewriteValueAMD64_OpBswap32(v, config) 341 case OpBswap64: 342 return rewriteValueAMD64_OpBswap64(v, config) 343 case OpClosureCall: 344 return rewriteValueAMD64_OpClosureCall(v, config) 345 case OpCom16: 346 return rewriteValueAMD64_OpCom16(v, config) 347 case OpCom32: 348 return rewriteValueAMD64_OpCom32(v, config) 349 case OpCom64: 350 return rewriteValueAMD64_OpCom64(v, config) 351 case OpCom8: 352 return rewriteValueAMD64_OpCom8(v, config) 353 case OpConst16: 354 return rewriteValueAMD64_OpConst16(v, config) 355 case OpConst32: 356 return rewriteValueAMD64_OpConst32(v, config) 357 case OpConst32F: 358 return rewriteValueAMD64_OpConst32F(v, config) 359 case OpConst64: 360 return rewriteValueAMD64_OpConst64(v, config) 361 case OpConst64F: 362 return rewriteValueAMD64_OpConst64F(v, config) 363 case OpConst8: 364 return rewriteValueAMD64_OpConst8(v, config) 365 case OpConstBool: 366 return rewriteValueAMD64_OpConstBool(v, config) 367 case OpConstNil: 368 return rewriteValueAMD64_OpConstNil(v, config) 369 case OpConvert: 370 return rewriteValueAMD64_OpConvert(v, config) 371 case OpCtz32: 372 return rewriteValueAMD64_OpCtz32(v, config) 373 case OpCtz64: 374 return rewriteValueAMD64_OpCtz64(v, config) 375 case OpCvt32Fto32: 376 return rewriteValueAMD64_OpCvt32Fto32(v, config) 377 case OpCvt32Fto64: 378 return rewriteValueAMD64_OpCvt32Fto64(v, config) 379 case OpCvt32Fto64F: 380 return rewriteValueAMD64_OpCvt32Fto64F(v, config) 381 case OpCvt32to32F: 382 return rewriteValueAMD64_OpCvt32to32F(v, config) 383 case OpCvt32to64F: 384 return rewriteValueAMD64_OpCvt32to64F(v, config) 385 case OpCvt64Fto32: 386 return rewriteValueAMD64_OpCvt64Fto32(v, config) 387 case OpCvt64Fto32F: 388 return rewriteValueAMD64_OpCvt64Fto32F(v, config) 389 case OpCvt64Fto64: 390 return rewriteValueAMD64_OpCvt64Fto64(v, config) 391 case OpCvt64to32F: 392 return rewriteValueAMD64_OpCvt64to32F(v, config) 393 case OpCvt64to64F: 394 return rewriteValueAMD64_OpCvt64to64F(v, config) 395 case OpDeferCall: 396 return rewriteValueAMD64_OpDeferCall(v, config) 397 case OpDiv128u: 398 return rewriteValueAMD64_OpDiv128u(v, config) 399 case OpDiv16: 400 return rewriteValueAMD64_OpDiv16(v, config) 401 case OpDiv16u: 402 return rewriteValueAMD64_OpDiv16u(v, config) 403 case OpDiv32: 404 return rewriteValueAMD64_OpDiv32(v, config) 405 case OpDiv32F: 406 return rewriteValueAMD64_OpDiv32F(v, config) 407 case OpDiv32u: 408 return rewriteValueAMD64_OpDiv32u(v, config) 409 case OpDiv64: 410 return rewriteValueAMD64_OpDiv64(v, config) 411 case OpDiv64F: 412 return rewriteValueAMD64_OpDiv64F(v, config) 413 case OpDiv64u: 414 return rewriteValueAMD64_OpDiv64u(v, config) 415 case OpDiv8: 416 return rewriteValueAMD64_OpDiv8(v, config) 417 case OpDiv8u: 418 return rewriteValueAMD64_OpDiv8u(v, config) 419 case OpEq16: 420 return rewriteValueAMD64_OpEq16(v, config) 421 case OpEq32: 422 return rewriteValueAMD64_OpEq32(v, config) 423 case OpEq32F: 424 return rewriteValueAMD64_OpEq32F(v, config) 425 case OpEq64: 426 return rewriteValueAMD64_OpEq64(v, config) 427 case OpEq64F: 428 return rewriteValueAMD64_OpEq64F(v, config) 429 case OpEq8: 430 return rewriteValueAMD64_OpEq8(v, config) 431 case OpEqB: 432 return rewriteValueAMD64_OpEqB(v, config) 433 case OpEqPtr: 434 return rewriteValueAMD64_OpEqPtr(v, config) 435 case OpGeq16: 436 return rewriteValueAMD64_OpGeq16(v, config) 437 case OpGeq16U: 438 return rewriteValueAMD64_OpGeq16U(v, config) 439 case OpGeq32: 440 return rewriteValueAMD64_OpGeq32(v, config) 441 case OpGeq32F: 442 return rewriteValueAMD64_OpGeq32F(v, config) 443 case OpGeq32U: 444 return rewriteValueAMD64_OpGeq32U(v, config) 445 case OpGeq64: 446 return rewriteValueAMD64_OpGeq64(v, config) 447 case OpGeq64F: 448 return rewriteValueAMD64_OpGeq64F(v, config) 449 case OpGeq64U: 450 return rewriteValueAMD64_OpGeq64U(v, config) 451 case OpGeq8: 452 return rewriteValueAMD64_OpGeq8(v, config) 453 case OpGeq8U: 454 return rewriteValueAMD64_OpGeq8U(v, config) 455 case OpGetClosurePtr: 456 return rewriteValueAMD64_OpGetClosurePtr(v, config) 457 case OpGetG: 458 return rewriteValueAMD64_OpGetG(v, config) 459 case OpGoCall: 460 return rewriteValueAMD64_OpGoCall(v, config) 461 case OpGreater16: 462 return rewriteValueAMD64_OpGreater16(v, config) 463 case OpGreater16U: 464 return rewriteValueAMD64_OpGreater16U(v, config) 465 case OpGreater32: 466 return rewriteValueAMD64_OpGreater32(v, config) 467 case OpGreater32F: 468 return rewriteValueAMD64_OpGreater32F(v, config) 469 case OpGreater32U: 470 return rewriteValueAMD64_OpGreater32U(v, config) 471 case OpGreater64: 472 return rewriteValueAMD64_OpGreater64(v, config) 473 case OpGreater64F: 474 return rewriteValueAMD64_OpGreater64F(v, config) 475 case OpGreater64U: 476 return rewriteValueAMD64_OpGreater64U(v, config) 477 case OpGreater8: 478 return rewriteValueAMD64_OpGreater8(v, config) 479 case OpGreater8U: 480 return rewriteValueAMD64_OpGreater8U(v, config) 481 case OpHmul16: 482 return rewriteValueAMD64_OpHmul16(v, config) 483 case OpHmul16u: 484 return rewriteValueAMD64_OpHmul16u(v, config) 485 case OpHmul32: 486 return rewriteValueAMD64_OpHmul32(v, config) 487 case OpHmul32u: 488 return rewriteValueAMD64_OpHmul32u(v, config) 489 case OpHmul64: 490 return rewriteValueAMD64_OpHmul64(v, config) 491 case OpHmul64u: 492 return rewriteValueAMD64_OpHmul64u(v, config) 493 case OpHmul8: 494 return rewriteValueAMD64_OpHmul8(v, config) 495 case OpHmul8u: 496 return rewriteValueAMD64_OpHmul8u(v, config) 497 case OpInt64Hi: 498 return rewriteValueAMD64_OpInt64Hi(v, config) 499 case OpInterCall: 500 return rewriteValueAMD64_OpInterCall(v, config) 501 case OpIsInBounds: 502 return rewriteValueAMD64_OpIsInBounds(v, config) 503 case OpIsNonNil: 504 return rewriteValueAMD64_OpIsNonNil(v, config) 505 case OpIsSliceInBounds: 506 return rewriteValueAMD64_OpIsSliceInBounds(v, config) 507 case OpLeq16: 508 return rewriteValueAMD64_OpLeq16(v, config) 509 case OpLeq16U: 510 return rewriteValueAMD64_OpLeq16U(v, config) 511 case OpLeq32: 512 return rewriteValueAMD64_OpLeq32(v, config) 513 case OpLeq32F: 514 return rewriteValueAMD64_OpLeq32F(v, config) 515 case OpLeq32U: 516 return rewriteValueAMD64_OpLeq32U(v, config) 517 case OpLeq64: 518 return rewriteValueAMD64_OpLeq64(v, config) 519 case OpLeq64F: 520 return rewriteValueAMD64_OpLeq64F(v, config) 521 case OpLeq64U: 522 return rewriteValueAMD64_OpLeq64U(v, config) 523 case OpLeq8: 524 return rewriteValueAMD64_OpLeq8(v, config) 525 case OpLeq8U: 526 return rewriteValueAMD64_OpLeq8U(v, config) 527 case OpLess16: 528 return rewriteValueAMD64_OpLess16(v, config) 529 case OpLess16U: 530 return rewriteValueAMD64_OpLess16U(v, config) 531 case OpLess32: 532 return rewriteValueAMD64_OpLess32(v, config) 533 case OpLess32F: 534 return rewriteValueAMD64_OpLess32F(v, config) 535 case OpLess32U: 536 return rewriteValueAMD64_OpLess32U(v, config) 537 case OpLess64: 538 return rewriteValueAMD64_OpLess64(v, config) 539 case OpLess64F: 540 return rewriteValueAMD64_OpLess64F(v, config) 541 case OpLess64U: 542 return rewriteValueAMD64_OpLess64U(v, config) 543 case OpLess8: 544 return rewriteValueAMD64_OpLess8(v, config) 545 case OpLess8U: 546 return rewriteValueAMD64_OpLess8U(v, config) 547 case OpLoad: 548 return rewriteValueAMD64_OpLoad(v, config) 549 case OpLrot16: 550 return rewriteValueAMD64_OpLrot16(v, config) 551 case OpLrot32: 552 return rewriteValueAMD64_OpLrot32(v, config) 553 case OpLrot64: 554 return rewriteValueAMD64_OpLrot64(v, config) 555 case OpLrot8: 556 return rewriteValueAMD64_OpLrot8(v, config) 557 case OpLsh16x16: 558 return rewriteValueAMD64_OpLsh16x16(v, config) 559 case OpLsh16x32: 560 return rewriteValueAMD64_OpLsh16x32(v, config) 561 case OpLsh16x64: 562 return rewriteValueAMD64_OpLsh16x64(v, config) 563 case OpLsh16x8: 564 return rewriteValueAMD64_OpLsh16x8(v, config) 565 case OpLsh32x16: 566 return rewriteValueAMD64_OpLsh32x16(v, config) 567 case OpLsh32x32: 568 return rewriteValueAMD64_OpLsh32x32(v, config) 569 case OpLsh32x64: 570 return rewriteValueAMD64_OpLsh32x64(v, config) 571 case OpLsh32x8: 572 return rewriteValueAMD64_OpLsh32x8(v, config) 573 case OpLsh64x16: 574 return rewriteValueAMD64_OpLsh64x16(v, config) 575 case OpLsh64x32: 576 return rewriteValueAMD64_OpLsh64x32(v, config) 577 case OpLsh64x64: 578 return rewriteValueAMD64_OpLsh64x64(v, config) 579 case OpLsh64x8: 580 return rewriteValueAMD64_OpLsh64x8(v, config) 581 case OpLsh8x16: 582 return rewriteValueAMD64_OpLsh8x16(v, config) 583 case OpLsh8x32: 584 return rewriteValueAMD64_OpLsh8x32(v, config) 585 case OpLsh8x64: 586 return rewriteValueAMD64_OpLsh8x64(v, config) 587 case OpLsh8x8: 588 return rewriteValueAMD64_OpLsh8x8(v, config) 589 case OpMod16: 590 return rewriteValueAMD64_OpMod16(v, config) 591 case OpMod16u: 592 return rewriteValueAMD64_OpMod16u(v, config) 593 case OpMod32: 594 return rewriteValueAMD64_OpMod32(v, config) 595 case OpMod32u: 596 return rewriteValueAMD64_OpMod32u(v, config) 597 case OpMod64: 598 return rewriteValueAMD64_OpMod64(v, config) 599 case OpMod64u: 600 return rewriteValueAMD64_OpMod64u(v, config) 601 case OpMod8: 602 return rewriteValueAMD64_OpMod8(v, config) 603 case OpMod8u: 604 return rewriteValueAMD64_OpMod8u(v, config) 605 case OpMove: 606 return rewriteValueAMD64_OpMove(v, config) 607 case OpMul16: 608 return rewriteValueAMD64_OpMul16(v, config) 609 case OpMul32: 610 return rewriteValueAMD64_OpMul32(v, config) 611 case OpMul32F: 612 return rewriteValueAMD64_OpMul32F(v, config) 613 case OpMul64: 614 return rewriteValueAMD64_OpMul64(v, config) 615 case OpMul64F: 616 return rewriteValueAMD64_OpMul64F(v, config) 617 case OpMul64uhilo: 618 return rewriteValueAMD64_OpMul64uhilo(v, config) 619 case OpMul8: 620 return rewriteValueAMD64_OpMul8(v, config) 621 case OpNeg16: 622 return rewriteValueAMD64_OpNeg16(v, config) 623 case OpNeg32: 624 return rewriteValueAMD64_OpNeg32(v, config) 625 case OpNeg32F: 626 return rewriteValueAMD64_OpNeg32F(v, config) 627 case OpNeg64: 628 return rewriteValueAMD64_OpNeg64(v, config) 629 case OpNeg64F: 630 return rewriteValueAMD64_OpNeg64F(v, config) 631 case OpNeg8: 632 return rewriteValueAMD64_OpNeg8(v, config) 633 case OpNeq16: 634 return rewriteValueAMD64_OpNeq16(v, config) 635 case OpNeq32: 636 return rewriteValueAMD64_OpNeq32(v, config) 637 case OpNeq32F: 638 return rewriteValueAMD64_OpNeq32F(v, config) 639 case OpNeq64: 640 return rewriteValueAMD64_OpNeq64(v, config) 641 case OpNeq64F: 642 return rewriteValueAMD64_OpNeq64F(v, config) 643 case OpNeq8: 644 return rewriteValueAMD64_OpNeq8(v, config) 645 case OpNeqB: 646 return rewriteValueAMD64_OpNeqB(v, config) 647 case OpNeqPtr: 648 return rewriteValueAMD64_OpNeqPtr(v, config) 649 case OpNilCheck: 650 return rewriteValueAMD64_OpNilCheck(v, config) 651 case OpNot: 652 return rewriteValueAMD64_OpNot(v, config) 653 case OpOffPtr: 654 return rewriteValueAMD64_OpOffPtr(v, config) 655 case OpOr16: 656 return rewriteValueAMD64_OpOr16(v, config) 657 case OpOr32: 658 return rewriteValueAMD64_OpOr32(v, config) 659 case OpOr64: 660 return rewriteValueAMD64_OpOr64(v, config) 661 case OpOr8: 662 return rewriteValueAMD64_OpOr8(v, config) 663 case OpOrB: 664 return rewriteValueAMD64_OpOrB(v, config) 665 case OpRsh16Ux16: 666 return rewriteValueAMD64_OpRsh16Ux16(v, config) 667 case OpRsh16Ux32: 668 return rewriteValueAMD64_OpRsh16Ux32(v, config) 669 case OpRsh16Ux64: 670 return rewriteValueAMD64_OpRsh16Ux64(v, config) 671 case OpRsh16Ux8: 672 return rewriteValueAMD64_OpRsh16Ux8(v, config) 673 case OpRsh16x16: 674 return rewriteValueAMD64_OpRsh16x16(v, config) 675 case OpRsh16x32: 676 return rewriteValueAMD64_OpRsh16x32(v, config) 677 case OpRsh16x64: 678 return rewriteValueAMD64_OpRsh16x64(v, config) 679 case OpRsh16x8: 680 return rewriteValueAMD64_OpRsh16x8(v, config) 681 case OpRsh32Ux16: 682 return rewriteValueAMD64_OpRsh32Ux16(v, config) 683 case OpRsh32Ux32: 684 return rewriteValueAMD64_OpRsh32Ux32(v, config) 685 case OpRsh32Ux64: 686 return rewriteValueAMD64_OpRsh32Ux64(v, config) 687 case OpRsh32Ux8: 688 return rewriteValueAMD64_OpRsh32Ux8(v, config) 689 case OpRsh32x16: 690 return rewriteValueAMD64_OpRsh32x16(v, config) 691 case OpRsh32x32: 692 return rewriteValueAMD64_OpRsh32x32(v, config) 693 case OpRsh32x64: 694 return rewriteValueAMD64_OpRsh32x64(v, config) 695 case OpRsh32x8: 696 return rewriteValueAMD64_OpRsh32x8(v, config) 697 case OpRsh64Ux16: 698 return rewriteValueAMD64_OpRsh64Ux16(v, config) 699 case OpRsh64Ux32: 700 return rewriteValueAMD64_OpRsh64Ux32(v, config) 701 case OpRsh64Ux64: 702 return rewriteValueAMD64_OpRsh64Ux64(v, config) 703 case OpRsh64Ux8: 704 return rewriteValueAMD64_OpRsh64Ux8(v, config) 705 case OpRsh64x16: 706 return rewriteValueAMD64_OpRsh64x16(v, config) 707 case OpRsh64x32: 708 return rewriteValueAMD64_OpRsh64x32(v, config) 709 case OpRsh64x64: 710 return rewriteValueAMD64_OpRsh64x64(v, config) 711 case OpRsh64x8: 712 return rewriteValueAMD64_OpRsh64x8(v, config) 713 case OpRsh8Ux16: 714 return rewriteValueAMD64_OpRsh8Ux16(v, config) 715 case OpRsh8Ux32: 716 return rewriteValueAMD64_OpRsh8Ux32(v, config) 717 case OpRsh8Ux64: 718 return rewriteValueAMD64_OpRsh8Ux64(v, config) 719 case OpRsh8Ux8: 720 return rewriteValueAMD64_OpRsh8Ux8(v, config) 721 case OpRsh8x16: 722 return rewriteValueAMD64_OpRsh8x16(v, config) 723 case OpRsh8x32: 724 return rewriteValueAMD64_OpRsh8x32(v, config) 725 case OpRsh8x64: 726 return rewriteValueAMD64_OpRsh8x64(v, config) 727 case OpRsh8x8: 728 return rewriteValueAMD64_OpRsh8x8(v, config) 729 case OpSelect0: 730 return rewriteValueAMD64_OpSelect0(v, config) 731 case OpSelect1: 732 return rewriteValueAMD64_OpSelect1(v, config) 733 case OpSignExt16to32: 734 return rewriteValueAMD64_OpSignExt16to32(v, config) 735 case OpSignExt16to64: 736 return rewriteValueAMD64_OpSignExt16to64(v, config) 737 case OpSignExt32to64: 738 return rewriteValueAMD64_OpSignExt32to64(v, config) 739 case OpSignExt8to16: 740 return rewriteValueAMD64_OpSignExt8to16(v, config) 741 case OpSignExt8to32: 742 return rewriteValueAMD64_OpSignExt8to32(v, config) 743 case OpSignExt8to64: 744 return rewriteValueAMD64_OpSignExt8to64(v, config) 745 case OpSlicemask: 746 return rewriteValueAMD64_OpSlicemask(v, config) 747 case OpSqrt: 748 return rewriteValueAMD64_OpSqrt(v, config) 749 case OpStaticCall: 750 return rewriteValueAMD64_OpStaticCall(v, config) 751 case OpStore: 752 return rewriteValueAMD64_OpStore(v, config) 753 case OpSub16: 754 return rewriteValueAMD64_OpSub16(v, config) 755 case OpSub32: 756 return rewriteValueAMD64_OpSub32(v, config) 757 case OpSub32F: 758 return rewriteValueAMD64_OpSub32F(v, config) 759 case OpSub64: 760 return rewriteValueAMD64_OpSub64(v, config) 761 case OpSub64F: 762 return rewriteValueAMD64_OpSub64F(v, config) 763 case OpSub8: 764 return rewriteValueAMD64_OpSub8(v, config) 765 case OpSubPtr: 766 return rewriteValueAMD64_OpSubPtr(v, config) 767 case OpTrunc16to8: 768 return rewriteValueAMD64_OpTrunc16to8(v, config) 769 case OpTrunc32to16: 770 return rewriteValueAMD64_OpTrunc32to16(v, config) 771 case OpTrunc32to8: 772 return rewriteValueAMD64_OpTrunc32to8(v, config) 773 case OpTrunc64to16: 774 return rewriteValueAMD64_OpTrunc64to16(v, config) 775 case OpTrunc64to32: 776 return rewriteValueAMD64_OpTrunc64to32(v, config) 777 case OpTrunc64to8: 778 return rewriteValueAMD64_OpTrunc64to8(v, config) 779 case OpXor16: 780 return rewriteValueAMD64_OpXor16(v, config) 781 case OpXor32: 782 return rewriteValueAMD64_OpXor32(v, config) 783 case OpXor64: 784 return rewriteValueAMD64_OpXor64(v, config) 785 case OpXor8: 786 return rewriteValueAMD64_OpXor8(v, config) 787 case OpZero: 788 return rewriteValueAMD64_OpZero(v, config) 789 case OpZeroExt16to32: 790 return rewriteValueAMD64_OpZeroExt16to32(v, config) 791 case OpZeroExt16to64: 792 return rewriteValueAMD64_OpZeroExt16to64(v, config) 793 case OpZeroExt32to64: 794 return rewriteValueAMD64_OpZeroExt32to64(v, config) 795 case OpZeroExt8to16: 796 return rewriteValueAMD64_OpZeroExt8to16(v, config) 797 case OpZeroExt8to32: 798 return rewriteValueAMD64_OpZeroExt8to32(v, config) 799 case OpZeroExt8to64: 800 return rewriteValueAMD64_OpZeroExt8to64(v, config) 801 } 802 return false 803 } 804 func rewriteValueAMD64_OpAMD64ADDL(v *Value, config *Config) bool { 805 b := v.Block 806 _ = b 807 // match: (ADDL x (MOVLconst [c])) 808 // cond: 809 // result: (ADDLconst [c] x) 810 for { 811 x := v.Args[0] 812 v_1 := v.Args[1] 813 if v_1.Op != OpAMD64MOVLconst { 814 break 815 } 816 c := v_1.AuxInt 817 v.reset(OpAMD64ADDLconst) 818 v.AuxInt = c 819 v.AddArg(x) 820 return true 821 } 822 // match: (ADDL (MOVLconst [c]) x) 823 // cond: 824 // result: (ADDLconst [c] x) 825 for { 826 v_0 := v.Args[0] 827 if v_0.Op != OpAMD64MOVLconst { 828 break 829 } 830 c := v_0.AuxInt 831 x := v.Args[1] 832 v.reset(OpAMD64ADDLconst) 833 v.AuxInt = c 834 v.AddArg(x) 835 return true 836 } 837 // match: (ADDL x (NEGL y)) 838 // cond: 839 // result: (SUBL x y) 840 for { 841 x := v.Args[0] 842 v_1 := v.Args[1] 843 if v_1.Op != OpAMD64NEGL { 844 break 845 } 846 y := v_1.Args[0] 847 v.reset(OpAMD64SUBL) 848 v.AddArg(x) 849 v.AddArg(y) 850 return true 851 } 852 return false 853 } 854 func rewriteValueAMD64_OpAMD64ADDLconst(v *Value, config *Config) bool { 855 b := v.Block 856 _ = b 857 // match: (ADDLconst [c] x) 858 // cond: int32(c)==0 859 // result: x 860 for { 861 c := v.AuxInt 862 x := v.Args[0] 863 if !(int32(c) == 0) { 864 break 865 } 866 v.reset(OpCopy) 867 v.Type = x.Type 868 v.AddArg(x) 869 return true 870 } 871 // match: (ADDLconst [c] (MOVLconst [d])) 872 // cond: 873 // result: (MOVLconst [int64(int32(c+d))]) 874 for { 875 c := v.AuxInt 876 v_0 := v.Args[0] 877 if v_0.Op != OpAMD64MOVLconst { 878 break 879 } 880 d := v_0.AuxInt 881 v.reset(OpAMD64MOVLconst) 882 v.AuxInt = int64(int32(c + d)) 883 return true 884 } 885 // match: (ADDLconst [c] (ADDLconst [d] x)) 886 // cond: 887 // result: (ADDLconst [int64(int32(c+d))] x) 888 for { 889 c := v.AuxInt 890 v_0 := v.Args[0] 891 if v_0.Op != OpAMD64ADDLconst { 892 break 893 } 894 d := v_0.AuxInt 895 x := v_0.Args[0] 896 v.reset(OpAMD64ADDLconst) 897 v.AuxInt = int64(int32(c + d)) 898 v.AddArg(x) 899 return true 900 } 901 // match: (ADDLconst [c] (LEAL [d] {s} x)) 902 // cond: is32Bit(c+d) 903 // result: (LEAL [c+d] {s} x) 904 for { 905 c := v.AuxInt 906 v_0 := v.Args[0] 907 if v_0.Op != OpAMD64LEAL { 908 break 909 } 910 d := v_0.AuxInt 911 s := v_0.Aux 912 x := v_0.Args[0] 913 if !(is32Bit(c + d)) { 914 break 915 } 916 v.reset(OpAMD64LEAL) 917 v.AuxInt = c + d 918 v.Aux = s 919 v.AddArg(x) 920 return true 921 } 922 return false 923 } 924 func rewriteValueAMD64_OpAMD64ADDQ(v *Value, config *Config) bool { 925 b := v.Block 926 _ = b 927 // match: (ADDQ x (MOVQconst [c])) 928 // cond: is32Bit(c) 929 // result: (ADDQconst [c] x) 930 for { 931 x := v.Args[0] 932 v_1 := v.Args[1] 933 if v_1.Op != OpAMD64MOVQconst { 934 break 935 } 936 c := v_1.AuxInt 937 if !(is32Bit(c)) { 938 break 939 } 940 v.reset(OpAMD64ADDQconst) 941 v.AuxInt = c 942 v.AddArg(x) 943 return true 944 } 945 // match: (ADDQ (MOVQconst [c]) x) 946 // cond: is32Bit(c) 947 // result: (ADDQconst [c] x) 948 for { 949 v_0 := v.Args[0] 950 if v_0.Op != OpAMD64MOVQconst { 951 break 952 } 953 c := v_0.AuxInt 954 x := v.Args[1] 955 if !(is32Bit(c)) { 956 break 957 } 958 v.reset(OpAMD64ADDQconst) 959 v.AuxInt = c 960 v.AddArg(x) 961 return true 962 } 963 // match: (ADDQ x (SHLQconst [3] y)) 964 // cond: 965 // result: (LEAQ8 x y) 966 for { 967 x := v.Args[0] 968 v_1 := v.Args[1] 969 if v_1.Op != OpAMD64SHLQconst { 970 break 971 } 972 if v_1.AuxInt != 3 { 973 break 974 } 975 y := v_1.Args[0] 976 v.reset(OpAMD64LEAQ8) 977 v.AddArg(x) 978 v.AddArg(y) 979 return true 980 } 981 // match: (ADDQ x (SHLQconst [2] y)) 982 // cond: 983 // result: (LEAQ4 x y) 984 for { 985 x := v.Args[0] 986 v_1 := v.Args[1] 987 if v_1.Op != OpAMD64SHLQconst { 988 break 989 } 990 if v_1.AuxInt != 2 { 991 break 992 } 993 y := v_1.Args[0] 994 v.reset(OpAMD64LEAQ4) 995 v.AddArg(x) 996 v.AddArg(y) 997 return true 998 } 999 // match: (ADDQ x (SHLQconst [1] y)) 1000 // cond: 1001 // result: (LEAQ2 x y) 1002 for { 1003 x := v.Args[0] 1004 v_1 := v.Args[1] 1005 if v_1.Op != OpAMD64SHLQconst { 1006 break 1007 } 1008 if v_1.AuxInt != 1 { 1009 break 1010 } 1011 y := v_1.Args[0] 1012 v.reset(OpAMD64LEAQ2) 1013 v.AddArg(x) 1014 v.AddArg(y) 1015 return true 1016 } 1017 // match: (ADDQ x (ADDQ y y)) 1018 // cond: 1019 // result: (LEAQ2 x y) 1020 for { 1021 x := v.Args[0] 1022 v_1 := v.Args[1] 1023 if v_1.Op != OpAMD64ADDQ { 1024 break 1025 } 1026 y := v_1.Args[0] 1027 if y != v_1.Args[1] { 1028 break 1029 } 1030 v.reset(OpAMD64LEAQ2) 1031 v.AddArg(x) 1032 v.AddArg(y) 1033 return true 1034 } 1035 // match: (ADDQ x (ADDQ x y)) 1036 // cond: 1037 // result: (LEAQ2 y x) 1038 for { 1039 x := v.Args[0] 1040 v_1 := v.Args[1] 1041 if v_1.Op != OpAMD64ADDQ { 1042 break 1043 } 1044 if x != v_1.Args[0] { 1045 break 1046 } 1047 y := v_1.Args[1] 1048 v.reset(OpAMD64LEAQ2) 1049 v.AddArg(y) 1050 v.AddArg(x) 1051 return true 1052 } 1053 // match: (ADDQ x (ADDQ y x)) 1054 // cond: 1055 // result: (LEAQ2 y x) 1056 for { 1057 x := v.Args[0] 1058 v_1 := v.Args[1] 1059 if v_1.Op != OpAMD64ADDQ { 1060 break 1061 } 1062 y := v_1.Args[0] 1063 if x != v_1.Args[1] { 1064 break 1065 } 1066 v.reset(OpAMD64LEAQ2) 1067 v.AddArg(y) 1068 v.AddArg(x) 1069 return true 1070 } 1071 // match: (ADDQ (ADDQconst [c] x) y) 1072 // cond: 1073 // result: (LEAQ1 [c] x y) 1074 for { 1075 v_0 := v.Args[0] 1076 if v_0.Op != OpAMD64ADDQconst { 1077 break 1078 } 1079 c := v_0.AuxInt 1080 x := v_0.Args[0] 1081 y := v.Args[1] 1082 v.reset(OpAMD64LEAQ1) 1083 v.AuxInt = c 1084 v.AddArg(x) 1085 v.AddArg(y) 1086 return true 1087 } 1088 // match: (ADDQ x (ADDQconst [c] y)) 1089 // cond: 1090 // result: (LEAQ1 [c] x y) 1091 for { 1092 x := v.Args[0] 1093 v_1 := v.Args[1] 1094 if v_1.Op != OpAMD64ADDQconst { 1095 break 1096 } 1097 c := v_1.AuxInt 1098 y := v_1.Args[0] 1099 v.reset(OpAMD64LEAQ1) 1100 v.AuxInt = c 1101 v.AddArg(x) 1102 v.AddArg(y) 1103 return true 1104 } 1105 // match: (ADDQ x (LEAQ [c] {s} y)) 1106 // cond: x.Op != OpSB && y.Op != OpSB 1107 // result: (LEAQ1 [c] {s} x y) 1108 for { 1109 x := v.Args[0] 1110 v_1 := v.Args[1] 1111 if v_1.Op != OpAMD64LEAQ { 1112 break 1113 } 1114 c := v_1.AuxInt 1115 s := v_1.Aux 1116 y := v_1.Args[0] 1117 if !(x.Op != OpSB && y.Op != OpSB) { 1118 break 1119 } 1120 v.reset(OpAMD64LEAQ1) 1121 v.AuxInt = c 1122 v.Aux = s 1123 v.AddArg(x) 1124 v.AddArg(y) 1125 return true 1126 } 1127 // match: (ADDQ (LEAQ [c] {s} x) y) 1128 // cond: x.Op != OpSB && y.Op != OpSB 1129 // result: (LEAQ1 [c] {s} x y) 1130 for { 1131 v_0 := v.Args[0] 1132 if v_0.Op != OpAMD64LEAQ { 1133 break 1134 } 1135 c := v_0.AuxInt 1136 s := v_0.Aux 1137 x := v_0.Args[0] 1138 y := v.Args[1] 1139 if !(x.Op != OpSB && y.Op != OpSB) { 1140 break 1141 } 1142 v.reset(OpAMD64LEAQ1) 1143 v.AuxInt = c 1144 v.Aux = s 1145 v.AddArg(x) 1146 v.AddArg(y) 1147 return true 1148 } 1149 // match: (ADDQ x (NEGQ y)) 1150 // cond: 1151 // result: (SUBQ x y) 1152 for { 1153 x := v.Args[0] 1154 v_1 := v.Args[1] 1155 if v_1.Op != OpAMD64NEGQ { 1156 break 1157 } 1158 y := v_1.Args[0] 1159 v.reset(OpAMD64SUBQ) 1160 v.AddArg(x) 1161 v.AddArg(y) 1162 return true 1163 } 1164 return false 1165 } 1166 func rewriteValueAMD64_OpAMD64ADDQconst(v *Value, config *Config) bool { 1167 b := v.Block 1168 _ = b 1169 // match: (ADDQconst [c] (ADDQ x y)) 1170 // cond: 1171 // result: (LEAQ1 [c] x y) 1172 for { 1173 c := v.AuxInt 1174 v_0 := v.Args[0] 1175 if v_0.Op != OpAMD64ADDQ { 1176 break 1177 } 1178 x := v_0.Args[0] 1179 y := v_0.Args[1] 1180 v.reset(OpAMD64LEAQ1) 1181 v.AuxInt = c 1182 v.AddArg(x) 1183 v.AddArg(y) 1184 return true 1185 } 1186 // match: (ADDQconst [c] (LEAQ [d] {s} x)) 1187 // cond: is32Bit(c+d) 1188 // result: (LEAQ [c+d] {s} x) 1189 for { 1190 c := v.AuxInt 1191 v_0 := v.Args[0] 1192 if v_0.Op != OpAMD64LEAQ { 1193 break 1194 } 1195 d := v_0.AuxInt 1196 s := v_0.Aux 1197 x := v_0.Args[0] 1198 if !(is32Bit(c + d)) { 1199 break 1200 } 1201 v.reset(OpAMD64LEAQ) 1202 v.AuxInt = c + d 1203 v.Aux = s 1204 v.AddArg(x) 1205 return true 1206 } 1207 // match: (ADDQconst [c] (LEAQ1 [d] {s} x y)) 1208 // cond: is32Bit(c+d) 1209 // result: (LEAQ1 [c+d] {s} x y) 1210 for { 1211 c := v.AuxInt 1212 v_0 := v.Args[0] 1213 if v_0.Op != OpAMD64LEAQ1 { 1214 break 1215 } 1216 d := v_0.AuxInt 1217 s := v_0.Aux 1218 x := v_0.Args[0] 1219 y := v_0.Args[1] 1220 if !(is32Bit(c + d)) { 1221 break 1222 } 1223 v.reset(OpAMD64LEAQ1) 1224 v.AuxInt = c + d 1225 v.Aux = s 1226 v.AddArg(x) 1227 v.AddArg(y) 1228 return true 1229 } 1230 // match: (ADDQconst [c] (LEAQ2 [d] {s} x y)) 1231 // cond: is32Bit(c+d) 1232 // result: (LEAQ2 [c+d] {s} x y) 1233 for { 1234 c := v.AuxInt 1235 v_0 := v.Args[0] 1236 if v_0.Op != OpAMD64LEAQ2 { 1237 break 1238 } 1239 d := v_0.AuxInt 1240 s := v_0.Aux 1241 x := v_0.Args[0] 1242 y := v_0.Args[1] 1243 if !(is32Bit(c + d)) { 1244 break 1245 } 1246 v.reset(OpAMD64LEAQ2) 1247 v.AuxInt = c + d 1248 v.Aux = s 1249 v.AddArg(x) 1250 v.AddArg(y) 1251 return true 1252 } 1253 // match: (ADDQconst [c] (LEAQ4 [d] {s} x y)) 1254 // cond: is32Bit(c+d) 1255 // result: (LEAQ4 [c+d] {s} x y) 1256 for { 1257 c := v.AuxInt 1258 v_0 := v.Args[0] 1259 if v_0.Op != OpAMD64LEAQ4 { 1260 break 1261 } 1262 d := v_0.AuxInt 1263 s := v_0.Aux 1264 x := v_0.Args[0] 1265 y := v_0.Args[1] 1266 if !(is32Bit(c + d)) { 1267 break 1268 } 1269 v.reset(OpAMD64LEAQ4) 1270 v.AuxInt = c + d 1271 v.Aux = s 1272 v.AddArg(x) 1273 v.AddArg(y) 1274 return true 1275 } 1276 // match: (ADDQconst [c] (LEAQ8 [d] {s} x y)) 1277 // cond: is32Bit(c+d) 1278 // result: (LEAQ8 [c+d] {s} x y) 1279 for { 1280 c := v.AuxInt 1281 v_0 := v.Args[0] 1282 if v_0.Op != OpAMD64LEAQ8 { 1283 break 1284 } 1285 d := v_0.AuxInt 1286 s := v_0.Aux 1287 x := v_0.Args[0] 1288 y := v_0.Args[1] 1289 if !(is32Bit(c + d)) { 1290 break 1291 } 1292 v.reset(OpAMD64LEAQ8) 1293 v.AuxInt = c + d 1294 v.Aux = s 1295 v.AddArg(x) 1296 v.AddArg(y) 1297 return true 1298 } 1299 // match: (ADDQconst [0] x) 1300 // cond: 1301 // result: x 1302 for { 1303 if v.AuxInt != 0 { 1304 break 1305 } 1306 x := v.Args[0] 1307 v.reset(OpCopy) 1308 v.Type = x.Type 1309 v.AddArg(x) 1310 return true 1311 } 1312 // match: (ADDQconst [c] (MOVQconst [d])) 1313 // cond: 1314 // result: (MOVQconst [c+d]) 1315 for { 1316 c := v.AuxInt 1317 v_0 := v.Args[0] 1318 if v_0.Op != OpAMD64MOVQconst { 1319 break 1320 } 1321 d := v_0.AuxInt 1322 v.reset(OpAMD64MOVQconst) 1323 v.AuxInt = c + d 1324 return true 1325 } 1326 // match: (ADDQconst [c] (ADDQconst [d] x)) 1327 // cond: is32Bit(c+d) 1328 // result: (ADDQconst [c+d] x) 1329 for { 1330 c := v.AuxInt 1331 v_0 := v.Args[0] 1332 if v_0.Op != OpAMD64ADDQconst { 1333 break 1334 } 1335 d := v_0.AuxInt 1336 x := v_0.Args[0] 1337 if !(is32Bit(c + d)) { 1338 break 1339 } 1340 v.reset(OpAMD64ADDQconst) 1341 v.AuxInt = c + d 1342 v.AddArg(x) 1343 return true 1344 } 1345 return false 1346 } 1347 func rewriteValueAMD64_OpAMD64ANDL(v *Value, config *Config) bool { 1348 b := v.Block 1349 _ = b 1350 // match: (ANDL x (MOVLconst [c])) 1351 // cond: 1352 // result: (ANDLconst [c] x) 1353 for { 1354 x := v.Args[0] 1355 v_1 := v.Args[1] 1356 if v_1.Op != OpAMD64MOVLconst { 1357 break 1358 } 1359 c := v_1.AuxInt 1360 v.reset(OpAMD64ANDLconst) 1361 v.AuxInt = c 1362 v.AddArg(x) 1363 return true 1364 } 1365 // match: (ANDL (MOVLconst [c]) x) 1366 // cond: 1367 // result: (ANDLconst [c] x) 1368 for { 1369 v_0 := v.Args[0] 1370 if v_0.Op != OpAMD64MOVLconst { 1371 break 1372 } 1373 c := v_0.AuxInt 1374 x := v.Args[1] 1375 v.reset(OpAMD64ANDLconst) 1376 v.AuxInt = c 1377 v.AddArg(x) 1378 return true 1379 } 1380 // match: (ANDL x x) 1381 // cond: 1382 // result: x 1383 for { 1384 x := v.Args[0] 1385 if x != v.Args[1] { 1386 break 1387 } 1388 v.reset(OpCopy) 1389 v.Type = x.Type 1390 v.AddArg(x) 1391 return true 1392 } 1393 return false 1394 } 1395 func rewriteValueAMD64_OpAMD64ANDLconst(v *Value, config *Config) bool { 1396 b := v.Block 1397 _ = b 1398 // match: (ANDLconst [c] (ANDLconst [d] x)) 1399 // cond: 1400 // result: (ANDLconst [c & d] x) 1401 for { 1402 c := v.AuxInt 1403 v_0 := v.Args[0] 1404 if v_0.Op != OpAMD64ANDLconst { 1405 break 1406 } 1407 d := v_0.AuxInt 1408 x := v_0.Args[0] 1409 v.reset(OpAMD64ANDLconst) 1410 v.AuxInt = c & d 1411 v.AddArg(x) 1412 return true 1413 } 1414 // match: (ANDLconst [0xFF] x) 1415 // cond: 1416 // result: (MOVBQZX x) 1417 for { 1418 if v.AuxInt != 0xFF { 1419 break 1420 } 1421 x := v.Args[0] 1422 v.reset(OpAMD64MOVBQZX) 1423 v.AddArg(x) 1424 return true 1425 } 1426 // match: (ANDLconst [0xFFFF] x) 1427 // cond: 1428 // result: (MOVWQZX x) 1429 for { 1430 if v.AuxInt != 0xFFFF { 1431 break 1432 } 1433 x := v.Args[0] 1434 v.reset(OpAMD64MOVWQZX) 1435 v.AddArg(x) 1436 return true 1437 } 1438 // match: (ANDLconst [c] _) 1439 // cond: int32(c)==0 1440 // result: (MOVLconst [0]) 1441 for { 1442 c := v.AuxInt 1443 if !(int32(c) == 0) { 1444 break 1445 } 1446 v.reset(OpAMD64MOVLconst) 1447 v.AuxInt = 0 1448 return true 1449 } 1450 // match: (ANDLconst [c] x) 1451 // cond: int32(c)==-1 1452 // result: x 1453 for { 1454 c := v.AuxInt 1455 x := v.Args[0] 1456 if !(int32(c) == -1) { 1457 break 1458 } 1459 v.reset(OpCopy) 1460 v.Type = x.Type 1461 v.AddArg(x) 1462 return true 1463 } 1464 // match: (ANDLconst [c] (MOVLconst [d])) 1465 // cond: 1466 // result: (MOVLconst [c&d]) 1467 for { 1468 c := v.AuxInt 1469 v_0 := v.Args[0] 1470 if v_0.Op != OpAMD64MOVLconst { 1471 break 1472 } 1473 d := v_0.AuxInt 1474 v.reset(OpAMD64MOVLconst) 1475 v.AuxInt = c & d 1476 return true 1477 } 1478 return false 1479 } 1480 func rewriteValueAMD64_OpAMD64ANDQ(v *Value, config *Config) bool { 1481 b := v.Block 1482 _ = b 1483 // match: (ANDQ x (MOVQconst [c])) 1484 // cond: is32Bit(c) 1485 // result: (ANDQconst [c] x) 1486 for { 1487 x := v.Args[0] 1488 v_1 := v.Args[1] 1489 if v_1.Op != OpAMD64MOVQconst { 1490 break 1491 } 1492 c := v_1.AuxInt 1493 if !(is32Bit(c)) { 1494 break 1495 } 1496 v.reset(OpAMD64ANDQconst) 1497 v.AuxInt = c 1498 v.AddArg(x) 1499 return true 1500 } 1501 // match: (ANDQ (MOVQconst [c]) x) 1502 // cond: is32Bit(c) 1503 // result: (ANDQconst [c] x) 1504 for { 1505 v_0 := v.Args[0] 1506 if v_0.Op != OpAMD64MOVQconst { 1507 break 1508 } 1509 c := v_0.AuxInt 1510 x := v.Args[1] 1511 if !(is32Bit(c)) { 1512 break 1513 } 1514 v.reset(OpAMD64ANDQconst) 1515 v.AuxInt = c 1516 v.AddArg(x) 1517 return true 1518 } 1519 // match: (ANDQ x x) 1520 // cond: 1521 // result: x 1522 for { 1523 x := v.Args[0] 1524 if x != v.Args[1] { 1525 break 1526 } 1527 v.reset(OpCopy) 1528 v.Type = x.Type 1529 v.AddArg(x) 1530 return true 1531 } 1532 return false 1533 } 1534 func rewriteValueAMD64_OpAMD64ANDQconst(v *Value, config *Config) bool { 1535 b := v.Block 1536 _ = b 1537 // match: (ANDQconst [c] (ANDQconst [d] x)) 1538 // cond: 1539 // result: (ANDQconst [c & d] x) 1540 for { 1541 c := v.AuxInt 1542 v_0 := v.Args[0] 1543 if v_0.Op != OpAMD64ANDQconst { 1544 break 1545 } 1546 d := v_0.AuxInt 1547 x := v_0.Args[0] 1548 v.reset(OpAMD64ANDQconst) 1549 v.AuxInt = c & d 1550 v.AddArg(x) 1551 return true 1552 } 1553 // match: (ANDQconst [0xFF] x) 1554 // cond: 1555 // result: (MOVBQZX x) 1556 for { 1557 if v.AuxInt != 0xFF { 1558 break 1559 } 1560 x := v.Args[0] 1561 v.reset(OpAMD64MOVBQZX) 1562 v.AddArg(x) 1563 return true 1564 } 1565 // match: (ANDQconst [0xFFFF] x) 1566 // cond: 1567 // result: (MOVWQZX x) 1568 for { 1569 if v.AuxInt != 0xFFFF { 1570 break 1571 } 1572 x := v.Args[0] 1573 v.reset(OpAMD64MOVWQZX) 1574 v.AddArg(x) 1575 return true 1576 } 1577 // match: (ANDQconst [0xFFFFFFFF] x) 1578 // cond: 1579 // result: (MOVLQZX x) 1580 for { 1581 if v.AuxInt != 0xFFFFFFFF { 1582 break 1583 } 1584 x := v.Args[0] 1585 v.reset(OpAMD64MOVLQZX) 1586 v.AddArg(x) 1587 return true 1588 } 1589 // match: (ANDQconst [0] _) 1590 // cond: 1591 // result: (MOVQconst [0]) 1592 for { 1593 if v.AuxInt != 0 { 1594 break 1595 } 1596 v.reset(OpAMD64MOVQconst) 1597 v.AuxInt = 0 1598 return true 1599 } 1600 // match: (ANDQconst [-1] x) 1601 // cond: 1602 // result: x 1603 for { 1604 if v.AuxInt != -1 { 1605 break 1606 } 1607 x := v.Args[0] 1608 v.reset(OpCopy) 1609 v.Type = x.Type 1610 v.AddArg(x) 1611 return true 1612 } 1613 // match: (ANDQconst [c] (MOVQconst [d])) 1614 // cond: 1615 // result: (MOVQconst [c&d]) 1616 for { 1617 c := v.AuxInt 1618 v_0 := v.Args[0] 1619 if v_0.Op != OpAMD64MOVQconst { 1620 break 1621 } 1622 d := v_0.AuxInt 1623 v.reset(OpAMD64MOVQconst) 1624 v.AuxInt = c & d 1625 return true 1626 } 1627 return false 1628 } 1629 func rewriteValueAMD64_OpAMD64CMPB(v *Value, config *Config) bool { 1630 b := v.Block 1631 _ = b 1632 // match: (CMPB x (MOVLconst [c])) 1633 // cond: 1634 // result: (CMPBconst x [int64(int8(c))]) 1635 for { 1636 x := v.Args[0] 1637 v_1 := v.Args[1] 1638 if v_1.Op != OpAMD64MOVLconst { 1639 break 1640 } 1641 c := v_1.AuxInt 1642 v.reset(OpAMD64CMPBconst) 1643 v.AuxInt = int64(int8(c)) 1644 v.AddArg(x) 1645 return true 1646 } 1647 // match: (CMPB (MOVLconst [c]) x) 1648 // cond: 1649 // result: (InvertFlags (CMPBconst x [int64(int8(c))])) 1650 for { 1651 v_0 := v.Args[0] 1652 if v_0.Op != OpAMD64MOVLconst { 1653 break 1654 } 1655 c := v_0.AuxInt 1656 x := v.Args[1] 1657 v.reset(OpAMD64InvertFlags) 1658 v0 := b.NewValue0(v.Line, OpAMD64CMPBconst, TypeFlags) 1659 v0.AuxInt = int64(int8(c)) 1660 v0.AddArg(x) 1661 v.AddArg(v0) 1662 return true 1663 } 1664 return false 1665 } 1666 func rewriteValueAMD64_OpAMD64CMPBconst(v *Value, config *Config) bool { 1667 b := v.Block 1668 _ = b 1669 // match: (CMPBconst (MOVLconst [x]) [y]) 1670 // cond: int8(x)==int8(y) 1671 // result: (FlagEQ) 1672 for { 1673 y := v.AuxInt 1674 v_0 := v.Args[0] 1675 if v_0.Op != OpAMD64MOVLconst { 1676 break 1677 } 1678 x := v_0.AuxInt 1679 if !(int8(x) == int8(y)) { 1680 break 1681 } 1682 v.reset(OpAMD64FlagEQ) 1683 return true 1684 } 1685 // match: (CMPBconst (MOVLconst [x]) [y]) 1686 // cond: int8(x)<int8(y) && uint8(x)<uint8(y) 1687 // result: (FlagLT_ULT) 1688 for { 1689 y := v.AuxInt 1690 v_0 := v.Args[0] 1691 if v_0.Op != OpAMD64MOVLconst { 1692 break 1693 } 1694 x := v_0.AuxInt 1695 if !(int8(x) < int8(y) && uint8(x) < uint8(y)) { 1696 break 1697 } 1698 v.reset(OpAMD64FlagLT_ULT) 1699 return true 1700 } 1701 // match: (CMPBconst (MOVLconst [x]) [y]) 1702 // cond: int8(x)<int8(y) && uint8(x)>uint8(y) 1703 // result: (FlagLT_UGT) 1704 for { 1705 y := v.AuxInt 1706 v_0 := v.Args[0] 1707 if v_0.Op != OpAMD64MOVLconst { 1708 break 1709 } 1710 x := v_0.AuxInt 1711 if !(int8(x) < int8(y) && uint8(x) > uint8(y)) { 1712 break 1713 } 1714 v.reset(OpAMD64FlagLT_UGT) 1715 return true 1716 } 1717 // match: (CMPBconst (MOVLconst [x]) [y]) 1718 // cond: int8(x)>int8(y) && uint8(x)<uint8(y) 1719 // result: (FlagGT_ULT) 1720 for { 1721 y := v.AuxInt 1722 v_0 := v.Args[0] 1723 if v_0.Op != OpAMD64MOVLconst { 1724 break 1725 } 1726 x := v_0.AuxInt 1727 if !(int8(x) > int8(y) && uint8(x) < uint8(y)) { 1728 break 1729 } 1730 v.reset(OpAMD64FlagGT_ULT) 1731 return true 1732 } 1733 // match: (CMPBconst (MOVLconst [x]) [y]) 1734 // cond: int8(x)>int8(y) && uint8(x)>uint8(y) 1735 // result: (FlagGT_UGT) 1736 for { 1737 y := v.AuxInt 1738 v_0 := v.Args[0] 1739 if v_0.Op != OpAMD64MOVLconst { 1740 break 1741 } 1742 x := v_0.AuxInt 1743 if !(int8(x) > int8(y) && uint8(x) > uint8(y)) { 1744 break 1745 } 1746 v.reset(OpAMD64FlagGT_UGT) 1747 return true 1748 } 1749 // match: (CMPBconst (ANDLconst _ [m]) [n]) 1750 // cond: 0 <= int8(m) && int8(m) < int8(n) 1751 // result: (FlagLT_ULT) 1752 for { 1753 n := v.AuxInt 1754 v_0 := v.Args[0] 1755 if v_0.Op != OpAMD64ANDLconst { 1756 break 1757 } 1758 m := v_0.AuxInt 1759 if !(0 <= int8(m) && int8(m) < int8(n)) { 1760 break 1761 } 1762 v.reset(OpAMD64FlagLT_ULT) 1763 return true 1764 } 1765 // match: (CMPBconst (ANDL x y) [0]) 1766 // cond: 1767 // result: (TESTB x y) 1768 for { 1769 if v.AuxInt != 0 { 1770 break 1771 } 1772 v_0 := v.Args[0] 1773 if v_0.Op != OpAMD64ANDL { 1774 break 1775 } 1776 x := v_0.Args[0] 1777 y := v_0.Args[1] 1778 v.reset(OpAMD64TESTB) 1779 v.AddArg(x) 1780 v.AddArg(y) 1781 return true 1782 } 1783 // match: (CMPBconst (ANDLconst [c] x) [0]) 1784 // cond: 1785 // result: (TESTBconst [int64(int8(c))] x) 1786 for { 1787 if v.AuxInt != 0 { 1788 break 1789 } 1790 v_0 := v.Args[0] 1791 if v_0.Op != OpAMD64ANDLconst { 1792 break 1793 } 1794 c := v_0.AuxInt 1795 x := v_0.Args[0] 1796 v.reset(OpAMD64TESTBconst) 1797 v.AuxInt = int64(int8(c)) 1798 v.AddArg(x) 1799 return true 1800 } 1801 // match: (CMPBconst x [0]) 1802 // cond: 1803 // result: (TESTB x x) 1804 for { 1805 if v.AuxInt != 0 { 1806 break 1807 } 1808 x := v.Args[0] 1809 v.reset(OpAMD64TESTB) 1810 v.AddArg(x) 1811 v.AddArg(x) 1812 return true 1813 } 1814 return false 1815 } 1816 func rewriteValueAMD64_OpAMD64CMPL(v *Value, config *Config) bool { 1817 b := v.Block 1818 _ = b 1819 // match: (CMPL x (MOVLconst [c])) 1820 // cond: 1821 // result: (CMPLconst x [c]) 1822 for { 1823 x := v.Args[0] 1824 v_1 := v.Args[1] 1825 if v_1.Op != OpAMD64MOVLconst { 1826 break 1827 } 1828 c := v_1.AuxInt 1829 v.reset(OpAMD64CMPLconst) 1830 v.AuxInt = c 1831 v.AddArg(x) 1832 return true 1833 } 1834 // match: (CMPL (MOVLconst [c]) x) 1835 // cond: 1836 // result: (InvertFlags (CMPLconst x [c])) 1837 for { 1838 v_0 := v.Args[0] 1839 if v_0.Op != OpAMD64MOVLconst { 1840 break 1841 } 1842 c := v_0.AuxInt 1843 x := v.Args[1] 1844 v.reset(OpAMD64InvertFlags) 1845 v0 := b.NewValue0(v.Line, OpAMD64CMPLconst, TypeFlags) 1846 v0.AuxInt = c 1847 v0.AddArg(x) 1848 v.AddArg(v0) 1849 return true 1850 } 1851 return false 1852 } 1853 func rewriteValueAMD64_OpAMD64CMPLconst(v *Value, config *Config) bool { 1854 b := v.Block 1855 _ = b 1856 // match: (CMPLconst (MOVLconst [x]) [y]) 1857 // cond: int32(x)==int32(y) 1858 // result: (FlagEQ) 1859 for { 1860 y := v.AuxInt 1861 v_0 := v.Args[0] 1862 if v_0.Op != OpAMD64MOVLconst { 1863 break 1864 } 1865 x := v_0.AuxInt 1866 if !(int32(x) == int32(y)) { 1867 break 1868 } 1869 v.reset(OpAMD64FlagEQ) 1870 return true 1871 } 1872 // match: (CMPLconst (MOVLconst [x]) [y]) 1873 // cond: int32(x)<int32(y) && uint32(x)<uint32(y) 1874 // result: (FlagLT_ULT) 1875 for { 1876 y := v.AuxInt 1877 v_0 := v.Args[0] 1878 if v_0.Op != OpAMD64MOVLconst { 1879 break 1880 } 1881 x := v_0.AuxInt 1882 if !(int32(x) < int32(y) && uint32(x) < uint32(y)) { 1883 break 1884 } 1885 v.reset(OpAMD64FlagLT_ULT) 1886 return true 1887 } 1888 // match: (CMPLconst (MOVLconst [x]) [y]) 1889 // cond: int32(x)<int32(y) && uint32(x)>uint32(y) 1890 // result: (FlagLT_UGT) 1891 for { 1892 y := v.AuxInt 1893 v_0 := v.Args[0] 1894 if v_0.Op != OpAMD64MOVLconst { 1895 break 1896 } 1897 x := v_0.AuxInt 1898 if !(int32(x) < int32(y) && uint32(x) > uint32(y)) { 1899 break 1900 } 1901 v.reset(OpAMD64FlagLT_UGT) 1902 return true 1903 } 1904 // match: (CMPLconst (MOVLconst [x]) [y]) 1905 // cond: int32(x)>int32(y) && uint32(x)<uint32(y) 1906 // result: (FlagGT_ULT) 1907 for { 1908 y := v.AuxInt 1909 v_0 := v.Args[0] 1910 if v_0.Op != OpAMD64MOVLconst { 1911 break 1912 } 1913 x := v_0.AuxInt 1914 if !(int32(x) > int32(y) && uint32(x) < uint32(y)) { 1915 break 1916 } 1917 v.reset(OpAMD64FlagGT_ULT) 1918 return true 1919 } 1920 // match: (CMPLconst (MOVLconst [x]) [y]) 1921 // cond: int32(x)>int32(y) && uint32(x)>uint32(y) 1922 // result: (FlagGT_UGT) 1923 for { 1924 y := v.AuxInt 1925 v_0 := v.Args[0] 1926 if v_0.Op != OpAMD64MOVLconst { 1927 break 1928 } 1929 x := v_0.AuxInt 1930 if !(int32(x) > int32(y) && uint32(x) > uint32(y)) { 1931 break 1932 } 1933 v.reset(OpAMD64FlagGT_UGT) 1934 return true 1935 } 1936 // match: (CMPLconst (SHRLconst _ [c]) [n]) 1937 // cond: 0 <= n && 0 < c && c <= 32 && (1<<uint64(32-c)) <= uint64(n) 1938 // result: (FlagLT_ULT) 1939 for { 1940 n := v.AuxInt 1941 v_0 := v.Args[0] 1942 if v_0.Op != OpAMD64SHRLconst { 1943 break 1944 } 1945 c := v_0.AuxInt 1946 if !(0 <= n && 0 < c && c <= 32 && (1<<uint64(32-c)) <= uint64(n)) { 1947 break 1948 } 1949 v.reset(OpAMD64FlagLT_ULT) 1950 return true 1951 } 1952 // match: (CMPLconst (ANDLconst _ [m]) [n]) 1953 // cond: 0 <= int32(m) && int32(m) < int32(n) 1954 // result: (FlagLT_ULT) 1955 for { 1956 n := v.AuxInt 1957 v_0 := v.Args[0] 1958 if v_0.Op != OpAMD64ANDLconst { 1959 break 1960 } 1961 m := v_0.AuxInt 1962 if !(0 <= int32(m) && int32(m) < int32(n)) { 1963 break 1964 } 1965 v.reset(OpAMD64FlagLT_ULT) 1966 return true 1967 } 1968 // match: (CMPLconst (ANDL x y) [0]) 1969 // cond: 1970 // result: (TESTL x y) 1971 for { 1972 if v.AuxInt != 0 { 1973 break 1974 } 1975 v_0 := v.Args[0] 1976 if v_0.Op != OpAMD64ANDL { 1977 break 1978 } 1979 x := v_0.Args[0] 1980 y := v_0.Args[1] 1981 v.reset(OpAMD64TESTL) 1982 v.AddArg(x) 1983 v.AddArg(y) 1984 return true 1985 } 1986 // match: (CMPLconst (ANDLconst [c] x) [0]) 1987 // cond: 1988 // result: (TESTLconst [c] x) 1989 for { 1990 if v.AuxInt != 0 { 1991 break 1992 } 1993 v_0 := v.Args[0] 1994 if v_0.Op != OpAMD64ANDLconst { 1995 break 1996 } 1997 c := v_0.AuxInt 1998 x := v_0.Args[0] 1999 v.reset(OpAMD64TESTLconst) 2000 v.AuxInt = c 2001 v.AddArg(x) 2002 return true 2003 } 2004 // match: (CMPLconst x [0]) 2005 // cond: 2006 // result: (TESTL x x) 2007 for { 2008 if v.AuxInt != 0 { 2009 break 2010 } 2011 x := v.Args[0] 2012 v.reset(OpAMD64TESTL) 2013 v.AddArg(x) 2014 v.AddArg(x) 2015 return true 2016 } 2017 return false 2018 } 2019 func rewriteValueAMD64_OpAMD64CMPQ(v *Value, config *Config) bool { 2020 b := v.Block 2021 _ = b 2022 // match: (CMPQ x (MOVQconst [c])) 2023 // cond: is32Bit(c) 2024 // result: (CMPQconst x [c]) 2025 for { 2026 x := v.Args[0] 2027 v_1 := v.Args[1] 2028 if v_1.Op != OpAMD64MOVQconst { 2029 break 2030 } 2031 c := v_1.AuxInt 2032 if !(is32Bit(c)) { 2033 break 2034 } 2035 v.reset(OpAMD64CMPQconst) 2036 v.AuxInt = c 2037 v.AddArg(x) 2038 return true 2039 } 2040 // match: (CMPQ (MOVQconst [c]) x) 2041 // cond: is32Bit(c) 2042 // result: (InvertFlags (CMPQconst x [c])) 2043 for { 2044 v_0 := v.Args[0] 2045 if v_0.Op != OpAMD64MOVQconst { 2046 break 2047 } 2048 c := v_0.AuxInt 2049 x := v.Args[1] 2050 if !(is32Bit(c)) { 2051 break 2052 } 2053 v.reset(OpAMD64InvertFlags) 2054 v0 := b.NewValue0(v.Line, OpAMD64CMPQconst, TypeFlags) 2055 v0.AuxInt = c 2056 v0.AddArg(x) 2057 v.AddArg(v0) 2058 return true 2059 } 2060 return false 2061 } 2062 func rewriteValueAMD64_OpAMD64CMPQconst(v *Value, config *Config) bool { 2063 b := v.Block 2064 _ = b 2065 // match: (CMPQconst (MOVQconst [x]) [y]) 2066 // cond: x==y 2067 // result: (FlagEQ) 2068 for { 2069 y := v.AuxInt 2070 v_0 := v.Args[0] 2071 if v_0.Op != OpAMD64MOVQconst { 2072 break 2073 } 2074 x := v_0.AuxInt 2075 if !(x == y) { 2076 break 2077 } 2078 v.reset(OpAMD64FlagEQ) 2079 return true 2080 } 2081 // match: (CMPQconst (MOVQconst [x]) [y]) 2082 // cond: x<y && uint64(x)<uint64(y) 2083 // result: (FlagLT_ULT) 2084 for { 2085 y := v.AuxInt 2086 v_0 := v.Args[0] 2087 if v_0.Op != OpAMD64MOVQconst { 2088 break 2089 } 2090 x := v_0.AuxInt 2091 if !(x < y && uint64(x) < uint64(y)) { 2092 break 2093 } 2094 v.reset(OpAMD64FlagLT_ULT) 2095 return true 2096 } 2097 // match: (CMPQconst (MOVQconst [x]) [y]) 2098 // cond: x<y && uint64(x)>uint64(y) 2099 // result: (FlagLT_UGT) 2100 for { 2101 y := v.AuxInt 2102 v_0 := v.Args[0] 2103 if v_0.Op != OpAMD64MOVQconst { 2104 break 2105 } 2106 x := v_0.AuxInt 2107 if !(x < y && uint64(x) > uint64(y)) { 2108 break 2109 } 2110 v.reset(OpAMD64FlagLT_UGT) 2111 return true 2112 } 2113 // match: (CMPQconst (MOVQconst [x]) [y]) 2114 // cond: x>y && uint64(x)<uint64(y) 2115 // result: (FlagGT_ULT) 2116 for { 2117 y := v.AuxInt 2118 v_0 := v.Args[0] 2119 if v_0.Op != OpAMD64MOVQconst { 2120 break 2121 } 2122 x := v_0.AuxInt 2123 if !(x > y && uint64(x) < uint64(y)) { 2124 break 2125 } 2126 v.reset(OpAMD64FlagGT_ULT) 2127 return true 2128 } 2129 // match: (CMPQconst (MOVQconst [x]) [y]) 2130 // cond: x>y && uint64(x)>uint64(y) 2131 // result: (FlagGT_UGT) 2132 for { 2133 y := v.AuxInt 2134 v_0 := v.Args[0] 2135 if v_0.Op != OpAMD64MOVQconst { 2136 break 2137 } 2138 x := v_0.AuxInt 2139 if !(x > y && uint64(x) > uint64(y)) { 2140 break 2141 } 2142 v.reset(OpAMD64FlagGT_UGT) 2143 return true 2144 } 2145 // match: (CMPQconst (MOVBQZX _) [c]) 2146 // cond: 0xFF < c 2147 // result: (FlagLT_ULT) 2148 for { 2149 c := v.AuxInt 2150 v_0 := v.Args[0] 2151 if v_0.Op != OpAMD64MOVBQZX { 2152 break 2153 } 2154 if !(0xFF < c) { 2155 break 2156 } 2157 v.reset(OpAMD64FlagLT_ULT) 2158 return true 2159 } 2160 // match: (CMPQconst (MOVWQZX _) [c]) 2161 // cond: 0xFFFF < c 2162 // result: (FlagLT_ULT) 2163 for { 2164 c := v.AuxInt 2165 v_0 := v.Args[0] 2166 if v_0.Op != OpAMD64MOVWQZX { 2167 break 2168 } 2169 if !(0xFFFF < c) { 2170 break 2171 } 2172 v.reset(OpAMD64FlagLT_ULT) 2173 return true 2174 } 2175 // match: (CMPQconst (MOVLQZX _) [c]) 2176 // cond: 0xFFFFFFFF < c 2177 // result: (FlagLT_ULT) 2178 for { 2179 c := v.AuxInt 2180 v_0 := v.Args[0] 2181 if v_0.Op != OpAMD64MOVLQZX { 2182 break 2183 } 2184 if !(0xFFFFFFFF < c) { 2185 break 2186 } 2187 v.reset(OpAMD64FlagLT_ULT) 2188 return true 2189 } 2190 // match: (CMPQconst (SHRQconst _ [c]) [n]) 2191 // cond: 0 <= n && 0 < c && c <= 64 && (1<<uint64(64-c)) <= uint64(n) 2192 // result: (FlagLT_ULT) 2193 for { 2194 n := v.AuxInt 2195 v_0 := v.Args[0] 2196 if v_0.Op != OpAMD64SHRQconst { 2197 break 2198 } 2199 c := v_0.AuxInt 2200 if !(0 <= n && 0 < c && c <= 64 && (1<<uint64(64-c)) <= uint64(n)) { 2201 break 2202 } 2203 v.reset(OpAMD64FlagLT_ULT) 2204 return true 2205 } 2206 // match: (CMPQconst (ANDQconst _ [m]) [n]) 2207 // cond: 0 <= m && m < n 2208 // result: (FlagLT_ULT) 2209 for { 2210 n := v.AuxInt 2211 v_0 := v.Args[0] 2212 if v_0.Op != OpAMD64ANDQconst { 2213 break 2214 } 2215 m := v_0.AuxInt 2216 if !(0 <= m && m < n) { 2217 break 2218 } 2219 v.reset(OpAMD64FlagLT_ULT) 2220 return true 2221 } 2222 // match: (CMPQconst (ANDQ x y) [0]) 2223 // cond: 2224 // result: (TESTQ x y) 2225 for { 2226 if v.AuxInt != 0 { 2227 break 2228 } 2229 v_0 := v.Args[0] 2230 if v_0.Op != OpAMD64ANDQ { 2231 break 2232 } 2233 x := v_0.Args[0] 2234 y := v_0.Args[1] 2235 v.reset(OpAMD64TESTQ) 2236 v.AddArg(x) 2237 v.AddArg(y) 2238 return true 2239 } 2240 // match: (CMPQconst (ANDQconst [c] x) [0]) 2241 // cond: 2242 // result: (TESTQconst [c] x) 2243 for { 2244 if v.AuxInt != 0 { 2245 break 2246 } 2247 v_0 := v.Args[0] 2248 if v_0.Op != OpAMD64ANDQconst { 2249 break 2250 } 2251 c := v_0.AuxInt 2252 x := v_0.Args[0] 2253 v.reset(OpAMD64TESTQconst) 2254 v.AuxInt = c 2255 v.AddArg(x) 2256 return true 2257 } 2258 // match: (CMPQconst x [0]) 2259 // cond: 2260 // result: (TESTQ x x) 2261 for { 2262 if v.AuxInt != 0 { 2263 break 2264 } 2265 x := v.Args[0] 2266 v.reset(OpAMD64TESTQ) 2267 v.AddArg(x) 2268 v.AddArg(x) 2269 return true 2270 } 2271 return false 2272 } 2273 func rewriteValueAMD64_OpAMD64CMPW(v *Value, config *Config) bool { 2274 b := v.Block 2275 _ = b 2276 // match: (CMPW x (MOVLconst [c])) 2277 // cond: 2278 // result: (CMPWconst x [int64(int16(c))]) 2279 for { 2280 x := v.Args[0] 2281 v_1 := v.Args[1] 2282 if v_1.Op != OpAMD64MOVLconst { 2283 break 2284 } 2285 c := v_1.AuxInt 2286 v.reset(OpAMD64CMPWconst) 2287 v.AuxInt = int64(int16(c)) 2288 v.AddArg(x) 2289 return true 2290 } 2291 // match: (CMPW (MOVLconst [c]) x) 2292 // cond: 2293 // result: (InvertFlags (CMPWconst x [int64(int16(c))])) 2294 for { 2295 v_0 := v.Args[0] 2296 if v_0.Op != OpAMD64MOVLconst { 2297 break 2298 } 2299 c := v_0.AuxInt 2300 x := v.Args[1] 2301 v.reset(OpAMD64InvertFlags) 2302 v0 := b.NewValue0(v.Line, OpAMD64CMPWconst, TypeFlags) 2303 v0.AuxInt = int64(int16(c)) 2304 v0.AddArg(x) 2305 v.AddArg(v0) 2306 return true 2307 } 2308 return false 2309 } 2310 func rewriteValueAMD64_OpAMD64CMPWconst(v *Value, config *Config) bool { 2311 b := v.Block 2312 _ = b 2313 // match: (CMPWconst (MOVLconst [x]) [y]) 2314 // cond: int16(x)==int16(y) 2315 // result: (FlagEQ) 2316 for { 2317 y := v.AuxInt 2318 v_0 := v.Args[0] 2319 if v_0.Op != OpAMD64MOVLconst { 2320 break 2321 } 2322 x := v_0.AuxInt 2323 if !(int16(x) == int16(y)) { 2324 break 2325 } 2326 v.reset(OpAMD64FlagEQ) 2327 return true 2328 } 2329 // match: (CMPWconst (MOVLconst [x]) [y]) 2330 // cond: int16(x)<int16(y) && uint16(x)<uint16(y) 2331 // result: (FlagLT_ULT) 2332 for { 2333 y := v.AuxInt 2334 v_0 := v.Args[0] 2335 if v_0.Op != OpAMD64MOVLconst { 2336 break 2337 } 2338 x := v_0.AuxInt 2339 if !(int16(x) < int16(y) && uint16(x) < uint16(y)) { 2340 break 2341 } 2342 v.reset(OpAMD64FlagLT_ULT) 2343 return true 2344 } 2345 // match: (CMPWconst (MOVLconst [x]) [y]) 2346 // cond: int16(x)<int16(y) && uint16(x)>uint16(y) 2347 // result: (FlagLT_UGT) 2348 for { 2349 y := v.AuxInt 2350 v_0 := v.Args[0] 2351 if v_0.Op != OpAMD64MOVLconst { 2352 break 2353 } 2354 x := v_0.AuxInt 2355 if !(int16(x) < int16(y) && uint16(x) > uint16(y)) { 2356 break 2357 } 2358 v.reset(OpAMD64FlagLT_UGT) 2359 return true 2360 } 2361 // match: (CMPWconst (MOVLconst [x]) [y]) 2362 // cond: int16(x)>int16(y) && uint16(x)<uint16(y) 2363 // result: (FlagGT_ULT) 2364 for { 2365 y := v.AuxInt 2366 v_0 := v.Args[0] 2367 if v_0.Op != OpAMD64MOVLconst { 2368 break 2369 } 2370 x := v_0.AuxInt 2371 if !(int16(x) > int16(y) && uint16(x) < uint16(y)) { 2372 break 2373 } 2374 v.reset(OpAMD64FlagGT_ULT) 2375 return true 2376 } 2377 // match: (CMPWconst (MOVLconst [x]) [y]) 2378 // cond: int16(x)>int16(y) && uint16(x)>uint16(y) 2379 // result: (FlagGT_UGT) 2380 for { 2381 y := v.AuxInt 2382 v_0 := v.Args[0] 2383 if v_0.Op != OpAMD64MOVLconst { 2384 break 2385 } 2386 x := v_0.AuxInt 2387 if !(int16(x) > int16(y) && uint16(x) > uint16(y)) { 2388 break 2389 } 2390 v.reset(OpAMD64FlagGT_UGT) 2391 return true 2392 } 2393 // match: (CMPWconst (ANDLconst _ [m]) [n]) 2394 // cond: 0 <= int16(m) && int16(m) < int16(n) 2395 // result: (FlagLT_ULT) 2396 for { 2397 n := v.AuxInt 2398 v_0 := v.Args[0] 2399 if v_0.Op != OpAMD64ANDLconst { 2400 break 2401 } 2402 m := v_0.AuxInt 2403 if !(0 <= int16(m) && int16(m) < int16(n)) { 2404 break 2405 } 2406 v.reset(OpAMD64FlagLT_ULT) 2407 return true 2408 } 2409 // match: (CMPWconst (ANDL x y) [0]) 2410 // cond: 2411 // result: (TESTW x y) 2412 for { 2413 if v.AuxInt != 0 { 2414 break 2415 } 2416 v_0 := v.Args[0] 2417 if v_0.Op != OpAMD64ANDL { 2418 break 2419 } 2420 x := v_0.Args[0] 2421 y := v_0.Args[1] 2422 v.reset(OpAMD64TESTW) 2423 v.AddArg(x) 2424 v.AddArg(y) 2425 return true 2426 } 2427 // match: (CMPWconst (ANDLconst [c] x) [0]) 2428 // cond: 2429 // result: (TESTWconst [int64(int16(c))] x) 2430 for { 2431 if v.AuxInt != 0 { 2432 break 2433 } 2434 v_0 := v.Args[0] 2435 if v_0.Op != OpAMD64ANDLconst { 2436 break 2437 } 2438 c := v_0.AuxInt 2439 x := v_0.Args[0] 2440 v.reset(OpAMD64TESTWconst) 2441 v.AuxInt = int64(int16(c)) 2442 v.AddArg(x) 2443 return true 2444 } 2445 // match: (CMPWconst x [0]) 2446 // cond: 2447 // result: (TESTW x x) 2448 for { 2449 if v.AuxInt != 0 { 2450 break 2451 } 2452 x := v.Args[0] 2453 v.reset(OpAMD64TESTW) 2454 v.AddArg(x) 2455 v.AddArg(x) 2456 return true 2457 } 2458 return false 2459 } 2460 func rewriteValueAMD64_OpAMD64CMPXCHGLlock(v *Value, config *Config) bool { 2461 b := v.Block 2462 _ = b 2463 // match: (CMPXCHGLlock [off1] {sym} (ADDQconst [off2] ptr) old new_ mem) 2464 // cond: is32Bit(off1+off2) 2465 // result: (CMPXCHGLlock [off1+off2] {sym} ptr old new_ mem) 2466 for { 2467 off1 := v.AuxInt 2468 sym := v.Aux 2469 v_0 := v.Args[0] 2470 if v_0.Op != OpAMD64ADDQconst { 2471 break 2472 } 2473 off2 := v_0.AuxInt 2474 ptr := v_0.Args[0] 2475 old := v.Args[1] 2476 new_ := v.Args[2] 2477 mem := v.Args[3] 2478 if !(is32Bit(off1 + off2)) { 2479 break 2480 } 2481 v.reset(OpAMD64CMPXCHGLlock) 2482 v.AuxInt = off1 + off2 2483 v.Aux = sym 2484 v.AddArg(ptr) 2485 v.AddArg(old) 2486 v.AddArg(new_) 2487 v.AddArg(mem) 2488 return true 2489 } 2490 return false 2491 } 2492 func rewriteValueAMD64_OpAMD64CMPXCHGQlock(v *Value, config *Config) bool { 2493 b := v.Block 2494 _ = b 2495 // match: (CMPXCHGQlock [off1] {sym} (ADDQconst [off2] ptr) old new_ mem) 2496 // cond: is32Bit(off1+off2) 2497 // result: (CMPXCHGQlock [off1+off2] {sym} ptr old new_ mem) 2498 for { 2499 off1 := v.AuxInt 2500 sym := v.Aux 2501 v_0 := v.Args[0] 2502 if v_0.Op != OpAMD64ADDQconst { 2503 break 2504 } 2505 off2 := v_0.AuxInt 2506 ptr := v_0.Args[0] 2507 old := v.Args[1] 2508 new_ := v.Args[2] 2509 mem := v.Args[3] 2510 if !(is32Bit(off1 + off2)) { 2511 break 2512 } 2513 v.reset(OpAMD64CMPXCHGQlock) 2514 v.AuxInt = off1 + off2 2515 v.Aux = sym 2516 v.AddArg(ptr) 2517 v.AddArg(old) 2518 v.AddArg(new_) 2519 v.AddArg(mem) 2520 return true 2521 } 2522 return false 2523 } 2524 func rewriteValueAMD64_OpAMD64LEAL(v *Value, config *Config) bool { 2525 b := v.Block 2526 _ = b 2527 // match: (LEAL [c] {s} (ADDLconst [d] x)) 2528 // cond: is32Bit(c+d) 2529 // result: (LEAL [c+d] {s} x) 2530 for { 2531 c := v.AuxInt 2532 s := v.Aux 2533 v_0 := v.Args[0] 2534 if v_0.Op != OpAMD64ADDLconst { 2535 break 2536 } 2537 d := v_0.AuxInt 2538 x := v_0.Args[0] 2539 if !(is32Bit(c + d)) { 2540 break 2541 } 2542 v.reset(OpAMD64LEAL) 2543 v.AuxInt = c + d 2544 v.Aux = s 2545 v.AddArg(x) 2546 return true 2547 } 2548 return false 2549 } 2550 func rewriteValueAMD64_OpAMD64LEAQ(v *Value, config *Config) bool { 2551 b := v.Block 2552 _ = b 2553 // match: (LEAQ [c] {s} (ADDQconst [d] x)) 2554 // cond: is32Bit(c+d) 2555 // result: (LEAQ [c+d] {s} x) 2556 for { 2557 c := v.AuxInt 2558 s := v.Aux 2559 v_0 := v.Args[0] 2560 if v_0.Op != OpAMD64ADDQconst { 2561 break 2562 } 2563 d := v_0.AuxInt 2564 x := v_0.Args[0] 2565 if !(is32Bit(c + d)) { 2566 break 2567 } 2568 v.reset(OpAMD64LEAQ) 2569 v.AuxInt = c + d 2570 v.Aux = s 2571 v.AddArg(x) 2572 return true 2573 } 2574 // match: (LEAQ [c] {s} (ADDQ x y)) 2575 // cond: x.Op != OpSB && y.Op != OpSB 2576 // result: (LEAQ1 [c] {s} x y) 2577 for { 2578 c := v.AuxInt 2579 s := v.Aux 2580 v_0 := v.Args[0] 2581 if v_0.Op != OpAMD64ADDQ { 2582 break 2583 } 2584 x := v_0.Args[0] 2585 y := v_0.Args[1] 2586 if !(x.Op != OpSB && y.Op != OpSB) { 2587 break 2588 } 2589 v.reset(OpAMD64LEAQ1) 2590 v.AuxInt = c 2591 v.Aux = s 2592 v.AddArg(x) 2593 v.AddArg(y) 2594 return true 2595 } 2596 // match: (LEAQ [off1] {sym1} (LEAQ [off2] {sym2} x)) 2597 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 2598 // result: (LEAQ [off1+off2] {mergeSym(sym1,sym2)} x) 2599 for { 2600 off1 := v.AuxInt 2601 sym1 := v.Aux 2602 v_0 := v.Args[0] 2603 if v_0.Op != OpAMD64LEAQ { 2604 break 2605 } 2606 off2 := v_0.AuxInt 2607 sym2 := v_0.Aux 2608 x := v_0.Args[0] 2609 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 2610 break 2611 } 2612 v.reset(OpAMD64LEAQ) 2613 v.AuxInt = off1 + off2 2614 v.Aux = mergeSym(sym1, sym2) 2615 v.AddArg(x) 2616 return true 2617 } 2618 // match: (LEAQ [off1] {sym1} (LEAQ1 [off2] {sym2} x y)) 2619 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 2620 // result: (LEAQ1 [off1+off2] {mergeSym(sym1,sym2)} x y) 2621 for { 2622 off1 := v.AuxInt 2623 sym1 := v.Aux 2624 v_0 := v.Args[0] 2625 if v_0.Op != OpAMD64LEAQ1 { 2626 break 2627 } 2628 off2 := v_0.AuxInt 2629 sym2 := v_0.Aux 2630 x := v_0.Args[0] 2631 y := v_0.Args[1] 2632 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 2633 break 2634 } 2635 v.reset(OpAMD64LEAQ1) 2636 v.AuxInt = off1 + off2 2637 v.Aux = mergeSym(sym1, sym2) 2638 v.AddArg(x) 2639 v.AddArg(y) 2640 return true 2641 } 2642 // match: (LEAQ [off1] {sym1} (LEAQ2 [off2] {sym2} x y)) 2643 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 2644 // result: (LEAQ2 [off1+off2] {mergeSym(sym1,sym2)} x y) 2645 for { 2646 off1 := v.AuxInt 2647 sym1 := v.Aux 2648 v_0 := v.Args[0] 2649 if v_0.Op != OpAMD64LEAQ2 { 2650 break 2651 } 2652 off2 := v_0.AuxInt 2653 sym2 := v_0.Aux 2654 x := v_0.Args[0] 2655 y := v_0.Args[1] 2656 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 2657 break 2658 } 2659 v.reset(OpAMD64LEAQ2) 2660 v.AuxInt = off1 + off2 2661 v.Aux = mergeSym(sym1, sym2) 2662 v.AddArg(x) 2663 v.AddArg(y) 2664 return true 2665 } 2666 // match: (LEAQ [off1] {sym1} (LEAQ4 [off2] {sym2} x y)) 2667 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 2668 // result: (LEAQ4 [off1+off2] {mergeSym(sym1,sym2)} x y) 2669 for { 2670 off1 := v.AuxInt 2671 sym1 := v.Aux 2672 v_0 := v.Args[0] 2673 if v_0.Op != OpAMD64LEAQ4 { 2674 break 2675 } 2676 off2 := v_0.AuxInt 2677 sym2 := v_0.Aux 2678 x := v_0.Args[0] 2679 y := v_0.Args[1] 2680 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 2681 break 2682 } 2683 v.reset(OpAMD64LEAQ4) 2684 v.AuxInt = off1 + off2 2685 v.Aux = mergeSym(sym1, sym2) 2686 v.AddArg(x) 2687 v.AddArg(y) 2688 return true 2689 } 2690 // match: (LEAQ [off1] {sym1} (LEAQ8 [off2] {sym2} x y)) 2691 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 2692 // result: (LEAQ8 [off1+off2] {mergeSym(sym1,sym2)} x y) 2693 for { 2694 off1 := v.AuxInt 2695 sym1 := v.Aux 2696 v_0 := v.Args[0] 2697 if v_0.Op != OpAMD64LEAQ8 { 2698 break 2699 } 2700 off2 := v_0.AuxInt 2701 sym2 := v_0.Aux 2702 x := v_0.Args[0] 2703 y := v_0.Args[1] 2704 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 2705 break 2706 } 2707 v.reset(OpAMD64LEAQ8) 2708 v.AuxInt = off1 + off2 2709 v.Aux = mergeSym(sym1, sym2) 2710 v.AddArg(x) 2711 v.AddArg(y) 2712 return true 2713 } 2714 return false 2715 } 2716 func rewriteValueAMD64_OpAMD64LEAQ1(v *Value, config *Config) bool { 2717 b := v.Block 2718 _ = b 2719 // match: (LEAQ1 [c] {s} (ADDQconst [d] x) y) 2720 // cond: is32Bit(c+d) && x.Op != OpSB 2721 // result: (LEAQ1 [c+d] {s} x y) 2722 for { 2723 c := v.AuxInt 2724 s := v.Aux 2725 v_0 := v.Args[0] 2726 if v_0.Op != OpAMD64ADDQconst { 2727 break 2728 } 2729 d := v_0.AuxInt 2730 x := v_0.Args[0] 2731 y := v.Args[1] 2732 if !(is32Bit(c+d) && x.Op != OpSB) { 2733 break 2734 } 2735 v.reset(OpAMD64LEAQ1) 2736 v.AuxInt = c + d 2737 v.Aux = s 2738 v.AddArg(x) 2739 v.AddArg(y) 2740 return true 2741 } 2742 // match: (LEAQ1 [c] {s} x (ADDQconst [d] y)) 2743 // cond: is32Bit(c+d) && y.Op != OpSB 2744 // result: (LEAQ1 [c+d] {s} x y) 2745 for { 2746 c := v.AuxInt 2747 s := v.Aux 2748 x := v.Args[0] 2749 v_1 := v.Args[1] 2750 if v_1.Op != OpAMD64ADDQconst { 2751 break 2752 } 2753 d := v_1.AuxInt 2754 y := v_1.Args[0] 2755 if !(is32Bit(c+d) && y.Op != OpSB) { 2756 break 2757 } 2758 v.reset(OpAMD64LEAQ1) 2759 v.AuxInt = c + d 2760 v.Aux = s 2761 v.AddArg(x) 2762 v.AddArg(y) 2763 return true 2764 } 2765 // match: (LEAQ1 [c] {s} x (SHLQconst [1] y)) 2766 // cond: 2767 // result: (LEAQ2 [c] {s} x y) 2768 for { 2769 c := v.AuxInt 2770 s := v.Aux 2771 x := v.Args[0] 2772 v_1 := v.Args[1] 2773 if v_1.Op != OpAMD64SHLQconst { 2774 break 2775 } 2776 if v_1.AuxInt != 1 { 2777 break 2778 } 2779 y := v_1.Args[0] 2780 v.reset(OpAMD64LEAQ2) 2781 v.AuxInt = c 2782 v.Aux = s 2783 v.AddArg(x) 2784 v.AddArg(y) 2785 return true 2786 } 2787 // match: (LEAQ1 [c] {s} (SHLQconst [1] x) y) 2788 // cond: 2789 // result: (LEAQ2 [c] {s} y x) 2790 for { 2791 c := v.AuxInt 2792 s := v.Aux 2793 v_0 := v.Args[0] 2794 if v_0.Op != OpAMD64SHLQconst { 2795 break 2796 } 2797 if v_0.AuxInt != 1 { 2798 break 2799 } 2800 x := v_0.Args[0] 2801 y := v.Args[1] 2802 v.reset(OpAMD64LEAQ2) 2803 v.AuxInt = c 2804 v.Aux = s 2805 v.AddArg(y) 2806 v.AddArg(x) 2807 return true 2808 } 2809 // match: (LEAQ1 [c] {s} x (SHLQconst [2] y)) 2810 // cond: 2811 // result: (LEAQ4 [c] {s} x y) 2812 for { 2813 c := v.AuxInt 2814 s := v.Aux 2815 x := v.Args[0] 2816 v_1 := v.Args[1] 2817 if v_1.Op != OpAMD64SHLQconst { 2818 break 2819 } 2820 if v_1.AuxInt != 2 { 2821 break 2822 } 2823 y := v_1.Args[0] 2824 v.reset(OpAMD64LEAQ4) 2825 v.AuxInt = c 2826 v.Aux = s 2827 v.AddArg(x) 2828 v.AddArg(y) 2829 return true 2830 } 2831 // match: (LEAQ1 [c] {s} (SHLQconst [2] x) y) 2832 // cond: 2833 // result: (LEAQ4 [c] {s} y x) 2834 for { 2835 c := v.AuxInt 2836 s := v.Aux 2837 v_0 := v.Args[0] 2838 if v_0.Op != OpAMD64SHLQconst { 2839 break 2840 } 2841 if v_0.AuxInt != 2 { 2842 break 2843 } 2844 x := v_0.Args[0] 2845 y := v.Args[1] 2846 v.reset(OpAMD64LEAQ4) 2847 v.AuxInt = c 2848 v.Aux = s 2849 v.AddArg(y) 2850 v.AddArg(x) 2851 return true 2852 } 2853 // match: (LEAQ1 [c] {s} x (SHLQconst [3] y)) 2854 // cond: 2855 // result: (LEAQ8 [c] {s} x y) 2856 for { 2857 c := v.AuxInt 2858 s := v.Aux 2859 x := v.Args[0] 2860 v_1 := v.Args[1] 2861 if v_1.Op != OpAMD64SHLQconst { 2862 break 2863 } 2864 if v_1.AuxInt != 3 { 2865 break 2866 } 2867 y := v_1.Args[0] 2868 v.reset(OpAMD64LEAQ8) 2869 v.AuxInt = c 2870 v.Aux = s 2871 v.AddArg(x) 2872 v.AddArg(y) 2873 return true 2874 } 2875 // match: (LEAQ1 [c] {s} (SHLQconst [3] x) y) 2876 // cond: 2877 // result: (LEAQ8 [c] {s} y x) 2878 for { 2879 c := v.AuxInt 2880 s := v.Aux 2881 v_0 := v.Args[0] 2882 if v_0.Op != OpAMD64SHLQconst { 2883 break 2884 } 2885 if v_0.AuxInt != 3 { 2886 break 2887 } 2888 x := v_0.Args[0] 2889 y := v.Args[1] 2890 v.reset(OpAMD64LEAQ8) 2891 v.AuxInt = c 2892 v.Aux = s 2893 v.AddArg(y) 2894 v.AddArg(x) 2895 return true 2896 } 2897 // match: (LEAQ1 [off1] {sym1} (LEAQ [off2] {sym2} x) y) 2898 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) && x.Op != OpSB 2899 // result: (LEAQ1 [off1+off2] {mergeSym(sym1,sym2)} x y) 2900 for { 2901 off1 := v.AuxInt 2902 sym1 := v.Aux 2903 v_0 := v.Args[0] 2904 if v_0.Op != OpAMD64LEAQ { 2905 break 2906 } 2907 off2 := v_0.AuxInt 2908 sym2 := v_0.Aux 2909 x := v_0.Args[0] 2910 y := v.Args[1] 2911 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2) && x.Op != OpSB) { 2912 break 2913 } 2914 v.reset(OpAMD64LEAQ1) 2915 v.AuxInt = off1 + off2 2916 v.Aux = mergeSym(sym1, sym2) 2917 v.AddArg(x) 2918 v.AddArg(y) 2919 return true 2920 } 2921 // match: (LEAQ1 [off1] {sym1} x (LEAQ [off2] {sym2} y)) 2922 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) && y.Op != OpSB 2923 // result: (LEAQ1 [off1+off2] {mergeSym(sym1,sym2)} x y) 2924 for { 2925 off1 := v.AuxInt 2926 sym1 := v.Aux 2927 x := v.Args[0] 2928 v_1 := v.Args[1] 2929 if v_1.Op != OpAMD64LEAQ { 2930 break 2931 } 2932 off2 := v_1.AuxInt 2933 sym2 := v_1.Aux 2934 y := v_1.Args[0] 2935 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2) && y.Op != OpSB) { 2936 break 2937 } 2938 v.reset(OpAMD64LEAQ1) 2939 v.AuxInt = off1 + off2 2940 v.Aux = mergeSym(sym1, sym2) 2941 v.AddArg(x) 2942 v.AddArg(y) 2943 return true 2944 } 2945 return false 2946 } 2947 func rewriteValueAMD64_OpAMD64LEAQ2(v *Value, config *Config) bool { 2948 b := v.Block 2949 _ = b 2950 // match: (LEAQ2 [c] {s} (ADDQconst [d] x) y) 2951 // cond: is32Bit(c+d) && x.Op != OpSB 2952 // result: (LEAQ2 [c+d] {s} x y) 2953 for { 2954 c := v.AuxInt 2955 s := v.Aux 2956 v_0 := v.Args[0] 2957 if v_0.Op != OpAMD64ADDQconst { 2958 break 2959 } 2960 d := v_0.AuxInt 2961 x := v_0.Args[0] 2962 y := v.Args[1] 2963 if !(is32Bit(c+d) && x.Op != OpSB) { 2964 break 2965 } 2966 v.reset(OpAMD64LEAQ2) 2967 v.AuxInt = c + d 2968 v.Aux = s 2969 v.AddArg(x) 2970 v.AddArg(y) 2971 return true 2972 } 2973 // match: (LEAQ2 [c] {s} x (ADDQconst [d] y)) 2974 // cond: is32Bit(c+2*d) && y.Op != OpSB 2975 // result: (LEAQ2 [c+2*d] {s} x y) 2976 for { 2977 c := v.AuxInt 2978 s := v.Aux 2979 x := v.Args[0] 2980 v_1 := v.Args[1] 2981 if v_1.Op != OpAMD64ADDQconst { 2982 break 2983 } 2984 d := v_1.AuxInt 2985 y := v_1.Args[0] 2986 if !(is32Bit(c+2*d) && y.Op != OpSB) { 2987 break 2988 } 2989 v.reset(OpAMD64LEAQ2) 2990 v.AuxInt = c + 2*d 2991 v.Aux = s 2992 v.AddArg(x) 2993 v.AddArg(y) 2994 return true 2995 } 2996 // match: (LEAQ2 [c] {s} x (SHLQconst [1] y)) 2997 // cond: 2998 // result: (LEAQ4 [c] {s} x y) 2999 for { 3000 c := v.AuxInt 3001 s := v.Aux 3002 x := v.Args[0] 3003 v_1 := v.Args[1] 3004 if v_1.Op != OpAMD64SHLQconst { 3005 break 3006 } 3007 if v_1.AuxInt != 1 { 3008 break 3009 } 3010 y := v_1.Args[0] 3011 v.reset(OpAMD64LEAQ4) 3012 v.AuxInt = c 3013 v.Aux = s 3014 v.AddArg(x) 3015 v.AddArg(y) 3016 return true 3017 } 3018 // match: (LEAQ2 [c] {s} x (SHLQconst [2] y)) 3019 // cond: 3020 // result: (LEAQ8 [c] {s} x y) 3021 for { 3022 c := v.AuxInt 3023 s := v.Aux 3024 x := v.Args[0] 3025 v_1 := v.Args[1] 3026 if v_1.Op != OpAMD64SHLQconst { 3027 break 3028 } 3029 if v_1.AuxInt != 2 { 3030 break 3031 } 3032 y := v_1.Args[0] 3033 v.reset(OpAMD64LEAQ8) 3034 v.AuxInt = c 3035 v.Aux = s 3036 v.AddArg(x) 3037 v.AddArg(y) 3038 return true 3039 } 3040 // match: (LEAQ2 [off1] {sym1} (LEAQ [off2] {sym2} x) y) 3041 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) && x.Op != OpSB 3042 // result: (LEAQ2 [off1+off2] {mergeSym(sym1,sym2)} x y) 3043 for { 3044 off1 := v.AuxInt 3045 sym1 := v.Aux 3046 v_0 := v.Args[0] 3047 if v_0.Op != OpAMD64LEAQ { 3048 break 3049 } 3050 off2 := v_0.AuxInt 3051 sym2 := v_0.Aux 3052 x := v_0.Args[0] 3053 y := v.Args[1] 3054 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2) && x.Op != OpSB) { 3055 break 3056 } 3057 v.reset(OpAMD64LEAQ2) 3058 v.AuxInt = off1 + off2 3059 v.Aux = mergeSym(sym1, sym2) 3060 v.AddArg(x) 3061 v.AddArg(y) 3062 return true 3063 } 3064 return false 3065 } 3066 func rewriteValueAMD64_OpAMD64LEAQ4(v *Value, config *Config) bool { 3067 b := v.Block 3068 _ = b 3069 // match: (LEAQ4 [c] {s} (ADDQconst [d] x) y) 3070 // cond: is32Bit(c+d) && x.Op != OpSB 3071 // result: (LEAQ4 [c+d] {s} x y) 3072 for { 3073 c := v.AuxInt 3074 s := v.Aux 3075 v_0 := v.Args[0] 3076 if v_0.Op != OpAMD64ADDQconst { 3077 break 3078 } 3079 d := v_0.AuxInt 3080 x := v_0.Args[0] 3081 y := v.Args[1] 3082 if !(is32Bit(c+d) && x.Op != OpSB) { 3083 break 3084 } 3085 v.reset(OpAMD64LEAQ4) 3086 v.AuxInt = c + d 3087 v.Aux = s 3088 v.AddArg(x) 3089 v.AddArg(y) 3090 return true 3091 } 3092 // match: (LEAQ4 [c] {s} x (ADDQconst [d] y)) 3093 // cond: is32Bit(c+4*d) && y.Op != OpSB 3094 // result: (LEAQ4 [c+4*d] {s} x y) 3095 for { 3096 c := v.AuxInt 3097 s := v.Aux 3098 x := v.Args[0] 3099 v_1 := v.Args[1] 3100 if v_1.Op != OpAMD64ADDQconst { 3101 break 3102 } 3103 d := v_1.AuxInt 3104 y := v_1.Args[0] 3105 if !(is32Bit(c+4*d) && y.Op != OpSB) { 3106 break 3107 } 3108 v.reset(OpAMD64LEAQ4) 3109 v.AuxInt = c + 4*d 3110 v.Aux = s 3111 v.AddArg(x) 3112 v.AddArg(y) 3113 return true 3114 } 3115 // match: (LEAQ4 [c] {s} x (SHLQconst [1] y)) 3116 // cond: 3117 // result: (LEAQ8 [c] {s} x y) 3118 for { 3119 c := v.AuxInt 3120 s := v.Aux 3121 x := v.Args[0] 3122 v_1 := v.Args[1] 3123 if v_1.Op != OpAMD64SHLQconst { 3124 break 3125 } 3126 if v_1.AuxInt != 1 { 3127 break 3128 } 3129 y := v_1.Args[0] 3130 v.reset(OpAMD64LEAQ8) 3131 v.AuxInt = c 3132 v.Aux = s 3133 v.AddArg(x) 3134 v.AddArg(y) 3135 return true 3136 } 3137 // match: (LEAQ4 [off1] {sym1} (LEAQ [off2] {sym2} x) y) 3138 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) && x.Op != OpSB 3139 // result: (LEAQ4 [off1+off2] {mergeSym(sym1,sym2)} x y) 3140 for { 3141 off1 := v.AuxInt 3142 sym1 := v.Aux 3143 v_0 := v.Args[0] 3144 if v_0.Op != OpAMD64LEAQ { 3145 break 3146 } 3147 off2 := v_0.AuxInt 3148 sym2 := v_0.Aux 3149 x := v_0.Args[0] 3150 y := v.Args[1] 3151 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2) && x.Op != OpSB) { 3152 break 3153 } 3154 v.reset(OpAMD64LEAQ4) 3155 v.AuxInt = off1 + off2 3156 v.Aux = mergeSym(sym1, sym2) 3157 v.AddArg(x) 3158 v.AddArg(y) 3159 return true 3160 } 3161 return false 3162 } 3163 func rewriteValueAMD64_OpAMD64LEAQ8(v *Value, config *Config) bool { 3164 b := v.Block 3165 _ = b 3166 // match: (LEAQ8 [c] {s} (ADDQconst [d] x) y) 3167 // cond: is32Bit(c+d) && x.Op != OpSB 3168 // result: (LEAQ8 [c+d] {s} x y) 3169 for { 3170 c := v.AuxInt 3171 s := v.Aux 3172 v_0 := v.Args[0] 3173 if v_0.Op != OpAMD64ADDQconst { 3174 break 3175 } 3176 d := v_0.AuxInt 3177 x := v_0.Args[0] 3178 y := v.Args[1] 3179 if !(is32Bit(c+d) && x.Op != OpSB) { 3180 break 3181 } 3182 v.reset(OpAMD64LEAQ8) 3183 v.AuxInt = c + d 3184 v.Aux = s 3185 v.AddArg(x) 3186 v.AddArg(y) 3187 return true 3188 } 3189 // match: (LEAQ8 [c] {s} x (ADDQconst [d] y)) 3190 // cond: is32Bit(c+8*d) && y.Op != OpSB 3191 // result: (LEAQ8 [c+8*d] {s} x y) 3192 for { 3193 c := v.AuxInt 3194 s := v.Aux 3195 x := v.Args[0] 3196 v_1 := v.Args[1] 3197 if v_1.Op != OpAMD64ADDQconst { 3198 break 3199 } 3200 d := v_1.AuxInt 3201 y := v_1.Args[0] 3202 if !(is32Bit(c+8*d) && y.Op != OpSB) { 3203 break 3204 } 3205 v.reset(OpAMD64LEAQ8) 3206 v.AuxInt = c + 8*d 3207 v.Aux = s 3208 v.AddArg(x) 3209 v.AddArg(y) 3210 return true 3211 } 3212 // match: (LEAQ8 [off1] {sym1} (LEAQ [off2] {sym2} x) y) 3213 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) && x.Op != OpSB 3214 // result: (LEAQ8 [off1+off2] {mergeSym(sym1,sym2)} x y) 3215 for { 3216 off1 := v.AuxInt 3217 sym1 := v.Aux 3218 v_0 := v.Args[0] 3219 if v_0.Op != OpAMD64LEAQ { 3220 break 3221 } 3222 off2 := v_0.AuxInt 3223 sym2 := v_0.Aux 3224 x := v_0.Args[0] 3225 y := v.Args[1] 3226 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2) && x.Op != OpSB) { 3227 break 3228 } 3229 v.reset(OpAMD64LEAQ8) 3230 v.AuxInt = off1 + off2 3231 v.Aux = mergeSym(sym1, sym2) 3232 v.AddArg(x) 3233 v.AddArg(y) 3234 return true 3235 } 3236 return false 3237 } 3238 func rewriteValueAMD64_OpAMD64MOVBQSX(v *Value, config *Config) bool { 3239 b := v.Block 3240 _ = b 3241 // match: (MOVBQSX x:(MOVBload [off] {sym} ptr mem)) 3242 // cond: x.Uses == 1 && clobber(x) 3243 // result: @x.Block (MOVBQSXload <v.Type> [off] {sym} ptr mem) 3244 for { 3245 x := v.Args[0] 3246 if x.Op != OpAMD64MOVBload { 3247 break 3248 } 3249 off := x.AuxInt 3250 sym := x.Aux 3251 ptr := x.Args[0] 3252 mem := x.Args[1] 3253 if !(x.Uses == 1 && clobber(x)) { 3254 break 3255 } 3256 b = x.Block 3257 v0 := b.NewValue0(v.Line, OpAMD64MOVBQSXload, v.Type) 3258 v.reset(OpCopy) 3259 v.AddArg(v0) 3260 v0.AuxInt = off 3261 v0.Aux = sym 3262 v0.AddArg(ptr) 3263 v0.AddArg(mem) 3264 return true 3265 } 3266 // match: (MOVBQSX x:(MOVWload [off] {sym} ptr mem)) 3267 // cond: x.Uses == 1 && clobber(x) 3268 // result: @x.Block (MOVBQSXload <v.Type> [off] {sym} ptr mem) 3269 for { 3270 x := v.Args[0] 3271 if x.Op != OpAMD64MOVWload { 3272 break 3273 } 3274 off := x.AuxInt 3275 sym := x.Aux 3276 ptr := x.Args[0] 3277 mem := x.Args[1] 3278 if !(x.Uses == 1 && clobber(x)) { 3279 break 3280 } 3281 b = x.Block 3282 v0 := b.NewValue0(v.Line, OpAMD64MOVBQSXload, v.Type) 3283 v.reset(OpCopy) 3284 v.AddArg(v0) 3285 v0.AuxInt = off 3286 v0.Aux = sym 3287 v0.AddArg(ptr) 3288 v0.AddArg(mem) 3289 return true 3290 } 3291 // match: (MOVBQSX x:(MOVLload [off] {sym} ptr mem)) 3292 // cond: x.Uses == 1 && clobber(x) 3293 // result: @x.Block (MOVBQSXload <v.Type> [off] {sym} ptr mem) 3294 for { 3295 x := v.Args[0] 3296 if x.Op != OpAMD64MOVLload { 3297 break 3298 } 3299 off := x.AuxInt 3300 sym := x.Aux 3301 ptr := x.Args[0] 3302 mem := x.Args[1] 3303 if !(x.Uses == 1 && clobber(x)) { 3304 break 3305 } 3306 b = x.Block 3307 v0 := b.NewValue0(v.Line, OpAMD64MOVBQSXload, v.Type) 3308 v.reset(OpCopy) 3309 v.AddArg(v0) 3310 v0.AuxInt = off 3311 v0.Aux = sym 3312 v0.AddArg(ptr) 3313 v0.AddArg(mem) 3314 return true 3315 } 3316 // match: (MOVBQSX x:(MOVQload [off] {sym} ptr mem)) 3317 // cond: x.Uses == 1 && clobber(x) 3318 // result: @x.Block (MOVBQSXload <v.Type> [off] {sym} ptr mem) 3319 for { 3320 x := v.Args[0] 3321 if x.Op != OpAMD64MOVQload { 3322 break 3323 } 3324 off := x.AuxInt 3325 sym := x.Aux 3326 ptr := x.Args[0] 3327 mem := x.Args[1] 3328 if !(x.Uses == 1 && clobber(x)) { 3329 break 3330 } 3331 b = x.Block 3332 v0 := b.NewValue0(v.Line, OpAMD64MOVBQSXload, v.Type) 3333 v.reset(OpCopy) 3334 v.AddArg(v0) 3335 v0.AuxInt = off 3336 v0.Aux = sym 3337 v0.AddArg(ptr) 3338 v0.AddArg(mem) 3339 return true 3340 } 3341 // match: (MOVBQSX (ANDLconst [c] x)) 3342 // cond: c & 0x80 == 0 3343 // result: (ANDLconst [c & 0x7f] x) 3344 for { 3345 v_0 := v.Args[0] 3346 if v_0.Op != OpAMD64ANDLconst { 3347 break 3348 } 3349 c := v_0.AuxInt 3350 x := v_0.Args[0] 3351 if !(c&0x80 == 0) { 3352 break 3353 } 3354 v.reset(OpAMD64ANDLconst) 3355 v.AuxInt = c & 0x7f 3356 v.AddArg(x) 3357 return true 3358 } 3359 return false 3360 } 3361 func rewriteValueAMD64_OpAMD64MOVBQSXload(v *Value, config *Config) bool { 3362 b := v.Block 3363 _ = b 3364 // match: (MOVBQSXload [off1] {sym1} (LEAQ [off2] {sym2} base) mem) 3365 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 3366 // result: (MOVBQSXload [off1+off2] {mergeSym(sym1,sym2)} base mem) 3367 for { 3368 off1 := v.AuxInt 3369 sym1 := v.Aux 3370 v_0 := v.Args[0] 3371 if v_0.Op != OpAMD64LEAQ { 3372 break 3373 } 3374 off2 := v_0.AuxInt 3375 sym2 := v_0.Aux 3376 base := v_0.Args[0] 3377 mem := v.Args[1] 3378 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 3379 break 3380 } 3381 v.reset(OpAMD64MOVBQSXload) 3382 v.AuxInt = off1 + off2 3383 v.Aux = mergeSym(sym1, sym2) 3384 v.AddArg(base) 3385 v.AddArg(mem) 3386 return true 3387 } 3388 return false 3389 } 3390 func rewriteValueAMD64_OpAMD64MOVBQZX(v *Value, config *Config) bool { 3391 b := v.Block 3392 _ = b 3393 // match: (MOVBQZX x:(MOVBload [off] {sym} ptr mem)) 3394 // cond: x.Uses == 1 && clobber(x) 3395 // result: @x.Block (MOVBload <v.Type> [off] {sym} ptr mem) 3396 for { 3397 x := v.Args[0] 3398 if x.Op != OpAMD64MOVBload { 3399 break 3400 } 3401 off := x.AuxInt 3402 sym := x.Aux 3403 ptr := x.Args[0] 3404 mem := x.Args[1] 3405 if !(x.Uses == 1 && clobber(x)) { 3406 break 3407 } 3408 b = x.Block 3409 v0 := b.NewValue0(v.Line, OpAMD64MOVBload, v.Type) 3410 v.reset(OpCopy) 3411 v.AddArg(v0) 3412 v0.AuxInt = off 3413 v0.Aux = sym 3414 v0.AddArg(ptr) 3415 v0.AddArg(mem) 3416 return true 3417 } 3418 // match: (MOVBQZX x:(MOVWload [off] {sym} ptr mem)) 3419 // cond: x.Uses == 1 && clobber(x) 3420 // result: @x.Block (MOVBload <v.Type> [off] {sym} ptr mem) 3421 for { 3422 x := v.Args[0] 3423 if x.Op != OpAMD64MOVWload { 3424 break 3425 } 3426 off := x.AuxInt 3427 sym := x.Aux 3428 ptr := x.Args[0] 3429 mem := x.Args[1] 3430 if !(x.Uses == 1 && clobber(x)) { 3431 break 3432 } 3433 b = x.Block 3434 v0 := b.NewValue0(v.Line, OpAMD64MOVBload, v.Type) 3435 v.reset(OpCopy) 3436 v.AddArg(v0) 3437 v0.AuxInt = off 3438 v0.Aux = sym 3439 v0.AddArg(ptr) 3440 v0.AddArg(mem) 3441 return true 3442 } 3443 // match: (MOVBQZX x:(MOVLload [off] {sym} ptr mem)) 3444 // cond: x.Uses == 1 && clobber(x) 3445 // result: @x.Block (MOVBload <v.Type> [off] {sym} ptr mem) 3446 for { 3447 x := v.Args[0] 3448 if x.Op != OpAMD64MOVLload { 3449 break 3450 } 3451 off := x.AuxInt 3452 sym := x.Aux 3453 ptr := x.Args[0] 3454 mem := x.Args[1] 3455 if !(x.Uses == 1 && clobber(x)) { 3456 break 3457 } 3458 b = x.Block 3459 v0 := b.NewValue0(v.Line, OpAMD64MOVBload, v.Type) 3460 v.reset(OpCopy) 3461 v.AddArg(v0) 3462 v0.AuxInt = off 3463 v0.Aux = sym 3464 v0.AddArg(ptr) 3465 v0.AddArg(mem) 3466 return true 3467 } 3468 // match: (MOVBQZX x:(MOVQload [off] {sym} ptr mem)) 3469 // cond: x.Uses == 1 && clobber(x) 3470 // result: @x.Block (MOVBload <v.Type> [off] {sym} ptr mem) 3471 for { 3472 x := v.Args[0] 3473 if x.Op != OpAMD64MOVQload { 3474 break 3475 } 3476 off := x.AuxInt 3477 sym := x.Aux 3478 ptr := x.Args[0] 3479 mem := x.Args[1] 3480 if !(x.Uses == 1 && clobber(x)) { 3481 break 3482 } 3483 b = x.Block 3484 v0 := b.NewValue0(v.Line, OpAMD64MOVBload, v.Type) 3485 v.reset(OpCopy) 3486 v.AddArg(v0) 3487 v0.AuxInt = off 3488 v0.Aux = sym 3489 v0.AddArg(ptr) 3490 v0.AddArg(mem) 3491 return true 3492 } 3493 // match: (MOVBQZX x:(MOVBloadidx1 [off] {sym} ptr idx mem)) 3494 // cond: x.Uses == 1 && clobber(x) 3495 // result: @x.Block (MOVBloadidx1 <v.Type> [off] {sym} ptr idx mem) 3496 for { 3497 x := v.Args[0] 3498 if x.Op != OpAMD64MOVBloadidx1 { 3499 break 3500 } 3501 off := x.AuxInt 3502 sym := x.Aux 3503 ptr := x.Args[0] 3504 idx := x.Args[1] 3505 mem := x.Args[2] 3506 if !(x.Uses == 1 && clobber(x)) { 3507 break 3508 } 3509 b = x.Block 3510 v0 := b.NewValue0(v.Line, OpAMD64MOVBloadidx1, v.Type) 3511 v.reset(OpCopy) 3512 v.AddArg(v0) 3513 v0.AuxInt = off 3514 v0.Aux = sym 3515 v0.AddArg(ptr) 3516 v0.AddArg(idx) 3517 v0.AddArg(mem) 3518 return true 3519 } 3520 // match: (MOVBQZX (ANDLconst [c] x)) 3521 // cond: 3522 // result: (ANDLconst [c & 0xff] x) 3523 for { 3524 v_0 := v.Args[0] 3525 if v_0.Op != OpAMD64ANDLconst { 3526 break 3527 } 3528 c := v_0.AuxInt 3529 x := v_0.Args[0] 3530 v.reset(OpAMD64ANDLconst) 3531 v.AuxInt = c & 0xff 3532 v.AddArg(x) 3533 return true 3534 } 3535 return false 3536 } 3537 func rewriteValueAMD64_OpAMD64MOVBload(v *Value, config *Config) bool { 3538 b := v.Block 3539 _ = b 3540 // match: (MOVBload [off] {sym} ptr (MOVBstore [off2] {sym2} ptr2 x _)) 3541 // cond: sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) 3542 // result: x 3543 for { 3544 off := v.AuxInt 3545 sym := v.Aux 3546 ptr := v.Args[0] 3547 v_1 := v.Args[1] 3548 if v_1.Op != OpAMD64MOVBstore { 3549 break 3550 } 3551 off2 := v_1.AuxInt 3552 sym2 := v_1.Aux 3553 ptr2 := v_1.Args[0] 3554 x := v_1.Args[1] 3555 if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) { 3556 break 3557 } 3558 v.reset(OpCopy) 3559 v.Type = x.Type 3560 v.AddArg(x) 3561 return true 3562 } 3563 // match: (MOVBload [off1] {sym} (ADDQconst [off2] ptr) mem) 3564 // cond: is32Bit(off1+off2) 3565 // result: (MOVBload [off1+off2] {sym} ptr mem) 3566 for { 3567 off1 := v.AuxInt 3568 sym := v.Aux 3569 v_0 := v.Args[0] 3570 if v_0.Op != OpAMD64ADDQconst { 3571 break 3572 } 3573 off2 := v_0.AuxInt 3574 ptr := v_0.Args[0] 3575 mem := v.Args[1] 3576 if !(is32Bit(off1 + off2)) { 3577 break 3578 } 3579 v.reset(OpAMD64MOVBload) 3580 v.AuxInt = off1 + off2 3581 v.Aux = sym 3582 v.AddArg(ptr) 3583 v.AddArg(mem) 3584 return true 3585 } 3586 // match: (MOVBload [off1] {sym1} (LEAQ [off2] {sym2} base) mem) 3587 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 3588 // result: (MOVBload [off1+off2] {mergeSym(sym1,sym2)} base mem) 3589 for { 3590 off1 := v.AuxInt 3591 sym1 := v.Aux 3592 v_0 := v.Args[0] 3593 if v_0.Op != OpAMD64LEAQ { 3594 break 3595 } 3596 off2 := v_0.AuxInt 3597 sym2 := v_0.Aux 3598 base := v_0.Args[0] 3599 mem := v.Args[1] 3600 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 3601 break 3602 } 3603 v.reset(OpAMD64MOVBload) 3604 v.AuxInt = off1 + off2 3605 v.Aux = mergeSym(sym1, sym2) 3606 v.AddArg(base) 3607 v.AddArg(mem) 3608 return true 3609 } 3610 // match: (MOVBload [off1] {sym1} (LEAQ1 [off2] {sym2} ptr idx) mem) 3611 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 3612 // result: (MOVBloadidx1 [off1+off2] {mergeSym(sym1,sym2)} ptr idx mem) 3613 for { 3614 off1 := v.AuxInt 3615 sym1 := v.Aux 3616 v_0 := v.Args[0] 3617 if v_0.Op != OpAMD64LEAQ1 { 3618 break 3619 } 3620 off2 := v_0.AuxInt 3621 sym2 := v_0.Aux 3622 ptr := v_0.Args[0] 3623 idx := v_0.Args[1] 3624 mem := v.Args[1] 3625 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 3626 break 3627 } 3628 v.reset(OpAMD64MOVBloadidx1) 3629 v.AuxInt = off1 + off2 3630 v.Aux = mergeSym(sym1, sym2) 3631 v.AddArg(ptr) 3632 v.AddArg(idx) 3633 v.AddArg(mem) 3634 return true 3635 } 3636 // match: (MOVBload [off] {sym} (ADDQ ptr idx) mem) 3637 // cond: ptr.Op != OpSB 3638 // result: (MOVBloadidx1 [off] {sym} ptr idx mem) 3639 for { 3640 off := v.AuxInt 3641 sym := v.Aux 3642 v_0 := v.Args[0] 3643 if v_0.Op != OpAMD64ADDQ { 3644 break 3645 } 3646 ptr := v_0.Args[0] 3647 idx := v_0.Args[1] 3648 mem := v.Args[1] 3649 if !(ptr.Op != OpSB) { 3650 break 3651 } 3652 v.reset(OpAMD64MOVBloadidx1) 3653 v.AuxInt = off 3654 v.Aux = sym 3655 v.AddArg(ptr) 3656 v.AddArg(idx) 3657 v.AddArg(mem) 3658 return true 3659 } 3660 // match: (MOVBload [off1] {sym1} (LEAL [off2] {sym2} base) mem) 3661 // cond: canMergeSym(sym1, sym2) 3662 // result: (MOVBload [off1+off2] {mergeSym(sym1,sym2)} base mem) 3663 for { 3664 off1 := v.AuxInt 3665 sym1 := v.Aux 3666 v_0 := v.Args[0] 3667 if v_0.Op != OpAMD64LEAL { 3668 break 3669 } 3670 off2 := v_0.AuxInt 3671 sym2 := v_0.Aux 3672 base := v_0.Args[0] 3673 mem := v.Args[1] 3674 if !(canMergeSym(sym1, sym2)) { 3675 break 3676 } 3677 v.reset(OpAMD64MOVBload) 3678 v.AuxInt = off1 + off2 3679 v.Aux = mergeSym(sym1, sym2) 3680 v.AddArg(base) 3681 v.AddArg(mem) 3682 return true 3683 } 3684 // match: (MOVBload [off1] {sym} (ADDLconst [off2] ptr) mem) 3685 // cond: is32Bit(off1+off2) 3686 // result: (MOVBload [off1+off2] {sym} ptr mem) 3687 for { 3688 off1 := v.AuxInt 3689 sym := v.Aux 3690 v_0 := v.Args[0] 3691 if v_0.Op != OpAMD64ADDLconst { 3692 break 3693 } 3694 off2 := v_0.AuxInt 3695 ptr := v_0.Args[0] 3696 mem := v.Args[1] 3697 if !(is32Bit(off1 + off2)) { 3698 break 3699 } 3700 v.reset(OpAMD64MOVBload) 3701 v.AuxInt = off1 + off2 3702 v.Aux = sym 3703 v.AddArg(ptr) 3704 v.AddArg(mem) 3705 return true 3706 } 3707 return false 3708 } 3709 func rewriteValueAMD64_OpAMD64MOVBloadidx1(v *Value, config *Config) bool { 3710 b := v.Block 3711 _ = b 3712 // match: (MOVBloadidx1 [c] {sym} (ADDQconst [d] ptr) idx mem) 3713 // cond: 3714 // result: (MOVBloadidx1 [c+d] {sym} ptr idx mem) 3715 for { 3716 c := v.AuxInt 3717 sym := v.Aux 3718 v_0 := v.Args[0] 3719 if v_0.Op != OpAMD64ADDQconst { 3720 break 3721 } 3722 d := v_0.AuxInt 3723 ptr := v_0.Args[0] 3724 idx := v.Args[1] 3725 mem := v.Args[2] 3726 v.reset(OpAMD64MOVBloadidx1) 3727 v.AuxInt = c + d 3728 v.Aux = sym 3729 v.AddArg(ptr) 3730 v.AddArg(idx) 3731 v.AddArg(mem) 3732 return true 3733 } 3734 // match: (MOVBloadidx1 [c] {sym} ptr (ADDQconst [d] idx) mem) 3735 // cond: 3736 // result: (MOVBloadidx1 [c+d] {sym} ptr idx mem) 3737 for { 3738 c := v.AuxInt 3739 sym := v.Aux 3740 ptr := v.Args[0] 3741 v_1 := v.Args[1] 3742 if v_1.Op != OpAMD64ADDQconst { 3743 break 3744 } 3745 d := v_1.AuxInt 3746 idx := v_1.Args[0] 3747 mem := v.Args[2] 3748 v.reset(OpAMD64MOVBloadidx1) 3749 v.AuxInt = c + d 3750 v.Aux = sym 3751 v.AddArg(ptr) 3752 v.AddArg(idx) 3753 v.AddArg(mem) 3754 return true 3755 } 3756 return false 3757 } 3758 func rewriteValueAMD64_OpAMD64MOVBstore(v *Value, config *Config) bool { 3759 b := v.Block 3760 _ = b 3761 // match: (MOVBstore [off] {sym} ptr (MOVBQSX x) mem) 3762 // cond: 3763 // result: (MOVBstore [off] {sym} ptr x mem) 3764 for { 3765 off := v.AuxInt 3766 sym := v.Aux 3767 ptr := v.Args[0] 3768 v_1 := v.Args[1] 3769 if v_1.Op != OpAMD64MOVBQSX { 3770 break 3771 } 3772 x := v_1.Args[0] 3773 mem := v.Args[2] 3774 v.reset(OpAMD64MOVBstore) 3775 v.AuxInt = off 3776 v.Aux = sym 3777 v.AddArg(ptr) 3778 v.AddArg(x) 3779 v.AddArg(mem) 3780 return true 3781 } 3782 // match: (MOVBstore [off] {sym} ptr (MOVBQZX x) mem) 3783 // cond: 3784 // result: (MOVBstore [off] {sym} ptr x mem) 3785 for { 3786 off := v.AuxInt 3787 sym := v.Aux 3788 ptr := v.Args[0] 3789 v_1 := v.Args[1] 3790 if v_1.Op != OpAMD64MOVBQZX { 3791 break 3792 } 3793 x := v_1.Args[0] 3794 mem := v.Args[2] 3795 v.reset(OpAMD64MOVBstore) 3796 v.AuxInt = off 3797 v.Aux = sym 3798 v.AddArg(ptr) 3799 v.AddArg(x) 3800 v.AddArg(mem) 3801 return true 3802 } 3803 // match: (MOVBstore [off1] {sym} (ADDQconst [off2] ptr) val mem) 3804 // cond: is32Bit(off1+off2) 3805 // result: (MOVBstore [off1+off2] {sym} ptr val mem) 3806 for { 3807 off1 := v.AuxInt 3808 sym := v.Aux 3809 v_0 := v.Args[0] 3810 if v_0.Op != OpAMD64ADDQconst { 3811 break 3812 } 3813 off2 := v_0.AuxInt 3814 ptr := v_0.Args[0] 3815 val := v.Args[1] 3816 mem := v.Args[2] 3817 if !(is32Bit(off1 + off2)) { 3818 break 3819 } 3820 v.reset(OpAMD64MOVBstore) 3821 v.AuxInt = off1 + off2 3822 v.Aux = sym 3823 v.AddArg(ptr) 3824 v.AddArg(val) 3825 v.AddArg(mem) 3826 return true 3827 } 3828 // match: (MOVBstore [off] {sym} ptr (MOVLconst [c]) mem) 3829 // cond: validOff(off) 3830 // result: (MOVBstoreconst [makeValAndOff(int64(int8(c)),off)] {sym} ptr mem) 3831 for { 3832 off := v.AuxInt 3833 sym := v.Aux 3834 ptr := v.Args[0] 3835 v_1 := v.Args[1] 3836 if v_1.Op != OpAMD64MOVLconst { 3837 break 3838 } 3839 c := v_1.AuxInt 3840 mem := v.Args[2] 3841 if !(validOff(off)) { 3842 break 3843 } 3844 v.reset(OpAMD64MOVBstoreconst) 3845 v.AuxInt = makeValAndOff(int64(int8(c)), off) 3846 v.Aux = sym 3847 v.AddArg(ptr) 3848 v.AddArg(mem) 3849 return true 3850 } 3851 // match: (MOVBstore [off1] {sym1} (LEAQ [off2] {sym2} base) val mem) 3852 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 3853 // result: (MOVBstore [off1+off2] {mergeSym(sym1,sym2)} base val mem) 3854 for { 3855 off1 := v.AuxInt 3856 sym1 := v.Aux 3857 v_0 := v.Args[0] 3858 if v_0.Op != OpAMD64LEAQ { 3859 break 3860 } 3861 off2 := v_0.AuxInt 3862 sym2 := v_0.Aux 3863 base := v_0.Args[0] 3864 val := v.Args[1] 3865 mem := v.Args[2] 3866 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 3867 break 3868 } 3869 v.reset(OpAMD64MOVBstore) 3870 v.AuxInt = off1 + off2 3871 v.Aux = mergeSym(sym1, sym2) 3872 v.AddArg(base) 3873 v.AddArg(val) 3874 v.AddArg(mem) 3875 return true 3876 } 3877 // match: (MOVBstore [off1] {sym1} (LEAQ1 [off2] {sym2} ptr idx) val mem) 3878 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 3879 // result: (MOVBstoreidx1 [off1+off2] {mergeSym(sym1,sym2)} ptr idx val mem) 3880 for { 3881 off1 := v.AuxInt 3882 sym1 := v.Aux 3883 v_0 := v.Args[0] 3884 if v_0.Op != OpAMD64LEAQ1 { 3885 break 3886 } 3887 off2 := v_0.AuxInt 3888 sym2 := v_0.Aux 3889 ptr := v_0.Args[0] 3890 idx := v_0.Args[1] 3891 val := v.Args[1] 3892 mem := v.Args[2] 3893 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 3894 break 3895 } 3896 v.reset(OpAMD64MOVBstoreidx1) 3897 v.AuxInt = off1 + off2 3898 v.Aux = mergeSym(sym1, sym2) 3899 v.AddArg(ptr) 3900 v.AddArg(idx) 3901 v.AddArg(val) 3902 v.AddArg(mem) 3903 return true 3904 } 3905 // match: (MOVBstore [off] {sym} (ADDQ ptr idx) val mem) 3906 // cond: ptr.Op != OpSB 3907 // result: (MOVBstoreidx1 [off] {sym} ptr idx val mem) 3908 for { 3909 off := v.AuxInt 3910 sym := v.Aux 3911 v_0 := v.Args[0] 3912 if v_0.Op != OpAMD64ADDQ { 3913 break 3914 } 3915 ptr := v_0.Args[0] 3916 idx := v_0.Args[1] 3917 val := v.Args[1] 3918 mem := v.Args[2] 3919 if !(ptr.Op != OpSB) { 3920 break 3921 } 3922 v.reset(OpAMD64MOVBstoreidx1) 3923 v.AuxInt = off 3924 v.Aux = sym 3925 v.AddArg(ptr) 3926 v.AddArg(idx) 3927 v.AddArg(val) 3928 v.AddArg(mem) 3929 return true 3930 } 3931 // match: (MOVBstore [i] {s} p (SHRQconst [8] w) x:(MOVBstore [i-1] {s} p w mem)) 3932 // cond: x.Uses == 1 && clobber(x) 3933 // result: (MOVWstore [i-1] {s} p w mem) 3934 for { 3935 i := v.AuxInt 3936 s := v.Aux 3937 p := v.Args[0] 3938 v_1 := v.Args[1] 3939 if v_1.Op != OpAMD64SHRQconst { 3940 break 3941 } 3942 if v_1.AuxInt != 8 { 3943 break 3944 } 3945 w := v_1.Args[0] 3946 x := v.Args[2] 3947 if x.Op != OpAMD64MOVBstore { 3948 break 3949 } 3950 if x.AuxInt != i-1 { 3951 break 3952 } 3953 if x.Aux != s { 3954 break 3955 } 3956 if p != x.Args[0] { 3957 break 3958 } 3959 if w != x.Args[1] { 3960 break 3961 } 3962 mem := x.Args[2] 3963 if !(x.Uses == 1 && clobber(x)) { 3964 break 3965 } 3966 v.reset(OpAMD64MOVWstore) 3967 v.AuxInt = i - 1 3968 v.Aux = s 3969 v.AddArg(p) 3970 v.AddArg(w) 3971 v.AddArg(mem) 3972 return true 3973 } 3974 // match: (MOVBstore [i] {s} p (SHRQconst [j] w) x:(MOVBstore [i-1] {s} p w0:(SHRQconst [j-8] w) mem)) 3975 // cond: x.Uses == 1 && clobber(x) 3976 // result: (MOVWstore [i-1] {s} p w0 mem) 3977 for { 3978 i := v.AuxInt 3979 s := v.Aux 3980 p := v.Args[0] 3981 v_1 := v.Args[1] 3982 if v_1.Op != OpAMD64SHRQconst { 3983 break 3984 } 3985 j := v_1.AuxInt 3986 w := v_1.Args[0] 3987 x := v.Args[2] 3988 if x.Op != OpAMD64MOVBstore { 3989 break 3990 } 3991 if x.AuxInt != i-1 { 3992 break 3993 } 3994 if x.Aux != s { 3995 break 3996 } 3997 if p != x.Args[0] { 3998 break 3999 } 4000 w0 := x.Args[1] 4001 if w0.Op != OpAMD64SHRQconst { 4002 break 4003 } 4004 if w0.AuxInt != j-8 { 4005 break 4006 } 4007 if w != w0.Args[0] { 4008 break 4009 } 4010 mem := x.Args[2] 4011 if !(x.Uses == 1 && clobber(x)) { 4012 break 4013 } 4014 v.reset(OpAMD64MOVWstore) 4015 v.AuxInt = i - 1 4016 v.Aux = s 4017 v.AddArg(p) 4018 v.AddArg(w0) 4019 v.AddArg(mem) 4020 return true 4021 } 4022 // match: (MOVBstore [off1] {sym1} (LEAL [off2] {sym2} base) val mem) 4023 // cond: canMergeSym(sym1, sym2) 4024 // result: (MOVBstore [off1+off2] {mergeSym(sym1,sym2)} base val mem) 4025 for { 4026 off1 := v.AuxInt 4027 sym1 := v.Aux 4028 v_0 := v.Args[0] 4029 if v_0.Op != OpAMD64LEAL { 4030 break 4031 } 4032 off2 := v_0.AuxInt 4033 sym2 := v_0.Aux 4034 base := v_0.Args[0] 4035 val := v.Args[1] 4036 mem := v.Args[2] 4037 if !(canMergeSym(sym1, sym2)) { 4038 break 4039 } 4040 v.reset(OpAMD64MOVBstore) 4041 v.AuxInt = off1 + off2 4042 v.Aux = mergeSym(sym1, sym2) 4043 v.AddArg(base) 4044 v.AddArg(val) 4045 v.AddArg(mem) 4046 return true 4047 } 4048 // match: (MOVBstore [off1] {sym} (ADDLconst [off2] ptr) val mem) 4049 // cond: is32Bit(off1+off2) 4050 // result: (MOVBstore [off1+off2] {sym} ptr val mem) 4051 for { 4052 off1 := v.AuxInt 4053 sym := v.Aux 4054 v_0 := v.Args[0] 4055 if v_0.Op != OpAMD64ADDLconst { 4056 break 4057 } 4058 off2 := v_0.AuxInt 4059 ptr := v_0.Args[0] 4060 val := v.Args[1] 4061 mem := v.Args[2] 4062 if !(is32Bit(off1 + off2)) { 4063 break 4064 } 4065 v.reset(OpAMD64MOVBstore) 4066 v.AuxInt = off1 + off2 4067 v.Aux = sym 4068 v.AddArg(ptr) 4069 v.AddArg(val) 4070 v.AddArg(mem) 4071 return true 4072 } 4073 return false 4074 } 4075 func rewriteValueAMD64_OpAMD64MOVBstoreconst(v *Value, config *Config) bool { 4076 b := v.Block 4077 _ = b 4078 // match: (MOVBstoreconst [sc] {s} (ADDQconst [off] ptr) mem) 4079 // cond: ValAndOff(sc).canAdd(off) 4080 // result: (MOVBstoreconst [ValAndOff(sc).add(off)] {s} ptr mem) 4081 for { 4082 sc := v.AuxInt 4083 s := v.Aux 4084 v_0 := v.Args[0] 4085 if v_0.Op != OpAMD64ADDQconst { 4086 break 4087 } 4088 off := v_0.AuxInt 4089 ptr := v_0.Args[0] 4090 mem := v.Args[1] 4091 if !(ValAndOff(sc).canAdd(off)) { 4092 break 4093 } 4094 v.reset(OpAMD64MOVBstoreconst) 4095 v.AuxInt = ValAndOff(sc).add(off) 4096 v.Aux = s 4097 v.AddArg(ptr) 4098 v.AddArg(mem) 4099 return true 4100 } 4101 // match: (MOVBstoreconst [sc] {sym1} (LEAQ [off] {sym2} ptr) mem) 4102 // cond: canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off) 4103 // result: (MOVBstoreconst [ValAndOff(sc).add(off)] {mergeSym(sym1, sym2)} ptr mem) 4104 for { 4105 sc := v.AuxInt 4106 sym1 := v.Aux 4107 v_0 := v.Args[0] 4108 if v_0.Op != OpAMD64LEAQ { 4109 break 4110 } 4111 off := v_0.AuxInt 4112 sym2 := v_0.Aux 4113 ptr := v_0.Args[0] 4114 mem := v.Args[1] 4115 if !(canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off)) { 4116 break 4117 } 4118 v.reset(OpAMD64MOVBstoreconst) 4119 v.AuxInt = ValAndOff(sc).add(off) 4120 v.Aux = mergeSym(sym1, sym2) 4121 v.AddArg(ptr) 4122 v.AddArg(mem) 4123 return true 4124 } 4125 // match: (MOVBstoreconst [x] {sym1} (LEAQ1 [off] {sym2} ptr idx) mem) 4126 // cond: canMergeSym(sym1, sym2) 4127 // result: (MOVBstoreconstidx1 [ValAndOff(x).add(off)] {mergeSym(sym1,sym2)} ptr idx mem) 4128 for { 4129 x := v.AuxInt 4130 sym1 := v.Aux 4131 v_0 := v.Args[0] 4132 if v_0.Op != OpAMD64LEAQ1 { 4133 break 4134 } 4135 off := v_0.AuxInt 4136 sym2 := v_0.Aux 4137 ptr := v_0.Args[0] 4138 idx := v_0.Args[1] 4139 mem := v.Args[1] 4140 if !(canMergeSym(sym1, sym2)) { 4141 break 4142 } 4143 v.reset(OpAMD64MOVBstoreconstidx1) 4144 v.AuxInt = ValAndOff(x).add(off) 4145 v.Aux = mergeSym(sym1, sym2) 4146 v.AddArg(ptr) 4147 v.AddArg(idx) 4148 v.AddArg(mem) 4149 return true 4150 } 4151 // match: (MOVBstoreconst [x] {sym} (ADDQ ptr idx) mem) 4152 // cond: 4153 // result: (MOVBstoreconstidx1 [x] {sym} ptr idx mem) 4154 for { 4155 x := v.AuxInt 4156 sym := v.Aux 4157 v_0 := v.Args[0] 4158 if v_0.Op != OpAMD64ADDQ { 4159 break 4160 } 4161 ptr := v_0.Args[0] 4162 idx := v_0.Args[1] 4163 mem := v.Args[1] 4164 v.reset(OpAMD64MOVBstoreconstidx1) 4165 v.AuxInt = x 4166 v.Aux = sym 4167 v.AddArg(ptr) 4168 v.AddArg(idx) 4169 v.AddArg(mem) 4170 return true 4171 } 4172 // match: (MOVBstoreconst [c] {s} p x:(MOVBstoreconst [a] {s} p mem)) 4173 // cond: x.Uses == 1 && ValAndOff(a).Off() + 1 == ValAndOff(c).Off() && clobber(x) 4174 // result: (MOVWstoreconst [makeValAndOff(ValAndOff(a).Val()&0xff | ValAndOff(c).Val()<<8, ValAndOff(a).Off())] {s} p mem) 4175 for { 4176 c := v.AuxInt 4177 s := v.Aux 4178 p := v.Args[0] 4179 x := v.Args[1] 4180 if x.Op != OpAMD64MOVBstoreconst { 4181 break 4182 } 4183 a := x.AuxInt 4184 if x.Aux != s { 4185 break 4186 } 4187 if p != x.Args[0] { 4188 break 4189 } 4190 mem := x.Args[1] 4191 if !(x.Uses == 1 && ValAndOff(a).Off()+1 == ValAndOff(c).Off() && clobber(x)) { 4192 break 4193 } 4194 v.reset(OpAMD64MOVWstoreconst) 4195 v.AuxInt = makeValAndOff(ValAndOff(a).Val()&0xff|ValAndOff(c).Val()<<8, ValAndOff(a).Off()) 4196 v.Aux = s 4197 v.AddArg(p) 4198 v.AddArg(mem) 4199 return true 4200 } 4201 // match: (MOVBstoreconst [sc] {sym1} (LEAL [off] {sym2} ptr) mem) 4202 // cond: canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off) 4203 // result: (MOVBstoreconst [ValAndOff(sc).add(off)] {mergeSym(sym1, sym2)} ptr mem) 4204 for { 4205 sc := v.AuxInt 4206 sym1 := v.Aux 4207 v_0 := v.Args[0] 4208 if v_0.Op != OpAMD64LEAL { 4209 break 4210 } 4211 off := v_0.AuxInt 4212 sym2 := v_0.Aux 4213 ptr := v_0.Args[0] 4214 mem := v.Args[1] 4215 if !(canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off)) { 4216 break 4217 } 4218 v.reset(OpAMD64MOVBstoreconst) 4219 v.AuxInt = ValAndOff(sc).add(off) 4220 v.Aux = mergeSym(sym1, sym2) 4221 v.AddArg(ptr) 4222 v.AddArg(mem) 4223 return true 4224 } 4225 // match: (MOVBstoreconst [sc] {s} (ADDLconst [off] ptr) mem) 4226 // cond: ValAndOff(sc).canAdd(off) 4227 // result: (MOVBstoreconst [ValAndOff(sc).add(off)] {s} ptr mem) 4228 for { 4229 sc := v.AuxInt 4230 s := v.Aux 4231 v_0 := v.Args[0] 4232 if v_0.Op != OpAMD64ADDLconst { 4233 break 4234 } 4235 off := v_0.AuxInt 4236 ptr := v_0.Args[0] 4237 mem := v.Args[1] 4238 if !(ValAndOff(sc).canAdd(off)) { 4239 break 4240 } 4241 v.reset(OpAMD64MOVBstoreconst) 4242 v.AuxInt = ValAndOff(sc).add(off) 4243 v.Aux = s 4244 v.AddArg(ptr) 4245 v.AddArg(mem) 4246 return true 4247 } 4248 return false 4249 } 4250 func rewriteValueAMD64_OpAMD64MOVBstoreconstidx1(v *Value, config *Config) bool { 4251 b := v.Block 4252 _ = b 4253 // match: (MOVBstoreconstidx1 [x] {sym} (ADDQconst [c] ptr) idx mem) 4254 // cond: 4255 // result: (MOVBstoreconstidx1 [ValAndOff(x).add(c)] {sym} ptr idx mem) 4256 for { 4257 x := v.AuxInt 4258 sym := v.Aux 4259 v_0 := v.Args[0] 4260 if v_0.Op != OpAMD64ADDQconst { 4261 break 4262 } 4263 c := v_0.AuxInt 4264 ptr := v_0.Args[0] 4265 idx := v.Args[1] 4266 mem := v.Args[2] 4267 v.reset(OpAMD64MOVBstoreconstidx1) 4268 v.AuxInt = ValAndOff(x).add(c) 4269 v.Aux = sym 4270 v.AddArg(ptr) 4271 v.AddArg(idx) 4272 v.AddArg(mem) 4273 return true 4274 } 4275 // match: (MOVBstoreconstidx1 [x] {sym} ptr (ADDQconst [c] idx) mem) 4276 // cond: 4277 // result: (MOVBstoreconstidx1 [ValAndOff(x).add(c)] {sym} ptr idx mem) 4278 for { 4279 x := v.AuxInt 4280 sym := v.Aux 4281 ptr := v.Args[0] 4282 v_1 := v.Args[1] 4283 if v_1.Op != OpAMD64ADDQconst { 4284 break 4285 } 4286 c := v_1.AuxInt 4287 idx := v_1.Args[0] 4288 mem := v.Args[2] 4289 v.reset(OpAMD64MOVBstoreconstidx1) 4290 v.AuxInt = ValAndOff(x).add(c) 4291 v.Aux = sym 4292 v.AddArg(ptr) 4293 v.AddArg(idx) 4294 v.AddArg(mem) 4295 return true 4296 } 4297 // match: (MOVBstoreconstidx1 [c] {s} p i x:(MOVBstoreconstidx1 [a] {s} p i mem)) 4298 // cond: x.Uses == 1 && ValAndOff(a).Off() + 1 == ValAndOff(c).Off() && clobber(x) 4299 // result: (MOVWstoreconstidx1 [makeValAndOff(ValAndOff(a).Val()&0xff | ValAndOff(c).Val()<<8, ValAndOff(a).Off())] {s} p i mem) 4300 for { 4301 c := v.AuxInt 4302 s := v.Aux 4303 p := v.Args[0] 4304 i := v.Args[1] 4305 x := v.Args[2] 4306 if x.Op != OpAMD64MOVBstoreconstidx1 { 4307 break 4308 } 4309 a := x.AuxInt 4310 if x.Aux != s { 4311 break 4312 } 4313 if p != x.Args[0] { 4314 break 4315 } 4316 if i != x.Args[1] { 4317 break 4318 } 4319 mem := x.Args[2] 4320 if !(x.Uses == 1 && ValAndOff(a).Off()+1 == ValAndOff(c).Off() && clobber(x)) { 4321 break 4322 } 4323 v.reset(OpAMD64MOVWstoreconstidx1) 4324 v.AuxInt = makeValAndOff(ValAndOff(a).Val()&0xff|ValAndOff(c).Val()<<8, ValAndOff(a).Off()) 4325 v.Aux = s 4326 v.AddArg(p) 4327 v.AddArg(i) 4328 v.AddArg(mem) 4329 return true 4330 } 4331 return false 4332 } 4333 func rewriteValueAMD64_OpAMD64MOVBstoreidx1(v *Value, config *Config) bool { 4334 b := v.Block 4335 _ = b 4336 // match: (MOVBstoreidx1 [c] {sym} (ADDQconst [d] ptr) idx val mem) 4337 // cond: 4338 // result: (MOVBstoreidx1 [c+d] {sym} ptr idx val mem) 4339 for { 4340 c := v.AuxInt 4341 sym := v.Aux 4342 v_0 := v.Args[0] 4343 if v_0.Op != OpAMD64ADDQconst { 4344 break 4345 } 4346 d := v_0.AuxInt 4347 ptr := v_0.Args[0] 4348 idx := v.Args[1] 4349 val := v.Args[2] 4350 mem := v.Args[3] 4351 v.reset(OpAMD64MOVBstoreidx1) 4352 v.AuxInt = c + d 4353 v.Aux = sym 4354 v.AddArg(ptr) 4355 v.AddArg(idx) 4356 v.AddArg(val) 4357 v.AddArg(mem) 4358 return true 4359 } 4360 // match: (MOVBstoreidx1 [c] {sym} ptr (ADDQconst [d] idx) val mem) 4361 // cond: 4362 // result: (MOVBstoreidx1 [c+d] {sym} ptr idx val mem) 4363 for { 4364 c := v.AuxInt 4365 sym := v.Aux 4366 ptr := v.Args[0] 4367 v_1 := v.Args[1] 4368 if v_1.Op != OpAMD64ADDQconst { 4369 break 4370 } 4371 d := v_1.AuxInt 4372 idx := v_1.Args[0] 4373 val := v.Args[2] 4374 mem := v.Args[3] 4375 v.reset(OpAMD64MOVBstoreidx1) 4376 v.AuxInt = c + d 4377 v.Aux = sym 4378 v.AddArg(ptr) 4379 v.AddArg(idx) 4380 v.AddArg(val) 4381 v.AddArg(mem) 4382 return true 4383 } 4384 // match: (MOVBstoreidx1 [i] {s} p idx (SHRQconst [8] w) x:(MOVBstoreidx1 [i-1] {s} p idx w mem)) 4385 // cond: x.Uses == 1 && clobber(x) 4386 // result: (MOVWstoreidx1 [i-1] {s} p idx w mem) 4387 for { 4388 i := v.AuxInt 4389 s := v.Aux 4390 p := v.Args[0] 4391 idx := v.Args[1] 4392 v_2 := v.Args[2] 4393 if v_2.Op != OpAMD64SHRQconst { 4394 break 4395 } 4396 if v_2.AuxInt != 8 { 4397 break 4398 } 4399 w := v_2.Args[0] 4400 x := v.Args[3] 4401 if x.Op != OpAMD64MOVBstoreidx1 { 4402 break 4403 } 4404 if x.AuxInt != i-1 { 4405 break 4406 } 4407 if x.Aux != s { 4408 break 4409 } 4410 if p != x.Args[0] { 4411 break 4412 } 4413 if idx != x.Args[1] { 4414 break 4415 } 4416 if w != x.Args[2] { 4417 break 4418 } 4419 mem := x.Args[3] 4420 if !(x.Uses == 1 && clobber(x)) { 4421 break 4422 } 4423 v.reset(OpAMD64MOVWstoreidx1) 4424 v.AuxInt = i - 1 4425 v.Aux = s 4426 v.AddArg(p) 4427 v.AddArg(idx) 4428 v.AddArg(w) 4429 v.AddArg(mem) 4430 return true 4431 } 4432 // match: (MOVBstoreidx1 [i] {s} p idx (SHRQconst [j] w) x:(MOVBstoreidx1 [i-1] {s} p idx w0:(SHRQconst [j-8] w) mem)) 4433 // cond: x.Uses == 1 && clobber(x) 4434 // result: (MOVWstoreidx1 [i-1] {s} p idx w0 mem) 4435 for { 4436 i := v.AuxInt 4437 s := v.Aux 4438 p := v.Args[0] 4439 idx := v.Args[1] 4440 v_2 := v.Args[2] 4441 if v_2.Op != OpAMD64SHRQconst { 4442 break 4443 } 4444 j := v_2.AuxInt 4445 w := v_2.Args[0] 4446 x := v.Args[3] 4447 if x.Op != OpAMD64MOVBstoreidx1 { 4448 break 4449 } 4450 if x.AuxInt != i-1 { 4451 break 4452 } 4453 if x.Aux != s { 4454 break 4455 } 4456 if p != x.Args[0] { 4457 break 4458 } 4459 if idx != x.Args[1] { 4460 break 4461 } 4462 w0 := x.Args[2] 4463 if w0.Op != OpAMD64SHRQconst { 4464 break 4465 } 4466 if w0.AuxInt != j-8 { 4467 break 4468 } 4469 if w != w0.Args[0] { 4470 break 4471 } 4472 mem := x.Args[3] 4473 if !(x.Uses == 1 && clobber(x)) { 4474 break 4475 } 4476 v.reset(OpAMD64MOVWstoreidx1) 4477 v.AuxInt = i - 1 4478 v.Aux = s 4479 v.AddArg(p) 4480 v.AddArg(idx) 4481 v.AddArg(w0) 4482 v.AddArg(mem) 4483 return true 4484 } 4485 return false 4486 } 4487 func rewriteValueAMD64_OpAMD64MOVLQSX(v *Value, config *Config) bool { 4488 b := v.Block 4489 _ = b 4490 // match: (MOVLQSX x:(MOVLload [off] {sym} ptr mem)) 4491 // cond: x.Uses == 1 && clobber(x) 4492 // result: @x.Block (MOVLQSXload <v.Type> [off] {sym} ptr mem) 4493 for { 4494 x := v.Args[0] 4495 if x.Op != OpAMD64MOVLload { 4496 break 4497 } 4498 off := x.AuxInt 4499 sym := x.Aux 4500 ptr := x.Args[0] 4501 mem := x.Args[1] 4502 if !(x.Uses == 1 && clobber(x)) { 4503 break 4504 } 4505 b = x.Block 4506 v0 := b.NewValue0(v.Line, OpAMD64MOVLQSXload, v.Type) 4507 v.reset(OpCopy) 4508 v.AddArg(v0) 4509 v0.AuxInt = off 4510 v0.Aux = sym 4511 v0.AddArg(ptr) 4512 v0.AddArg(mem) 4513 return true 4514 } 4515 // match: (MOVLQSX x:(MOVQload [off] {sym} ptr mem)) 4516 // cond: x.Uses == 1 && clobber(x) 4517 // result: @x.Block (MOVLQSXload <v.Type> [off] {sym} ptr mem) 4518 for { 4519 x := v.Args[0] 4520 if x.Op != OpAMD64MOVQload { 4521 break 4522 } 4523 off := x.AuxInt 4524 sym := x.Aux 4525 ptr := x.Args[0] 4526 mem := x.Args[1] 4527 if !(x.Uses == 1 && clobber(x)) { 4528 break 4529 } 4530 b = x.Block 4531 v0 := b.NewValue0(v.Line, OpAMD64MOVLQSXload, v.Type) 4532 v.reset(OpCopy) 4533 v.AddArg(v0) 4534 v0.AuxInt = off 4535 v0.Aux = sym 4536 v0.AddArg(ptr) 4537 v0.AddArg(mem) 4538 return true 4539 } 4540 // match: (MOVLQSX (ANDLconst [c] x)) 4541 // cond: c & 0x80000000 == 0 4542 // result: (ANDLconst [c & 0x7fffffff] x) 4543 for { 4544 v_0 := v.Args[0] 4545 if v_0.Op != OpAMD64ANDLconst { 4546 break 4547 } 4548 c := v_0.AuxInt 4549 x := v_0.Args[0] 4550 if !(c&0x80000000 == 0) { 4551 break 4552 } 4553 v.reset(OpAMD64ANDLconst) 4554 v.AuxInt = c & 0x7fffffff 4555 v.AddArg(x) 4556 return true 4557 } 4558 return false 4559 } 4560 func rewriteValueAMD64_OpAMD64MOVLQSXload(v *Value, config *Config) bool { 4561 b := v.Block 4562 _ = b 4563 // match: (MOVLQSXload [off1] {sym1} (LEAQ [off2] {sym2} base) mem) 4564 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 4565 // result: (MOVLQSXload [off1+off2] {mergeSym(sym1,sym2)} base mem) 4566 for { 4567 off1 := v.AuxInt 4568 sym1 := v.Aux 4569 v_0 := v.Args[0] 4570 if v_0.Op != OpAMD64LEAQ { 4571 break 4572 } 4573 off2 := v_0.AuxInt 4574 sym2 := v_0.Aux 4575 base := v_0.Args[0] 4576 mem := v.Args[1] 4577 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 4578 break 4579 } 4580 v.reset(OpAMD64MOVLQSXload) 4581 v.AuxInt = off1 + off2 4582 v.Aux = mergeSym(sym1, sym2) 4583 v.AddArg(base) 4584 v.AddArg(mem) 4585 return true 4586 } 4587 return false 4588 } 4589 func rewriteValueAMD64_OpAMD64MOVLQZX(v *Value, config *Config) bool { 4590 b := v.Block 4591 _ = b 4592 // match: (MOVLQZX x:(MOVLload [off] {sym} ptr mem)) 4593 // cond: x.Uses == 1 && clobber(x) 4594 // result: @x.Block (MOVLload <v.Type> [off] {sym} ptr mem) 4595 for { 4596 x := v.Args[0] 4597 if x.Op != OpAMD64MOVLload { 4598 break 4599 } 4600 off := x.AuxInt 4601 sym := x.Aux 4602 ptr := x.Args[0] 4603 mem := x.Args[1] 4604 if !(x.Uses == 1 && clobber(x)) { 4605 break 4606 } 4607 b = x.Block 4608 v0 := b.NewValue0(v.Line, OpAMD64MOVLload, v.Type) 4609 v.reset(OpCopy) 4610 v.AddArg(v0) 4611 v0.AuxInt = off 4612 v0.Aux = sym 4613 v0.AddArg(ptr) 4614 v0.AddArg(mem) 4615 return true 4616 } 4617 // match: (MOVLQZX x:(MOVQload [off] {sym} ptr mem)) 4618 // cond: x.Uses == 1 && clobber(x) 4619 // result: @x.Block (MOVLload <v.Type> [off] {sym} ptr mem) 4620 for { 4621 x := v.Args[0] 4622 if x.Op != OpAMD64MOVQload { 4623 break 4624 } 4625 off := x.AuxInt 4626 sym := x.Aux 4627 ptr := x.Args[0] 4628 mem := x.Args[1] 4629 if !(x.Uses == 1 && clobber(x)) { 4630 break 4631 } 4632 b = x.Block 4633 v0 := b.NewValue0(v.Line, OpAMD64MOVLload, v.Type) 4634 v.reset(OpCopy) 4635 v.AddArg(v0) 4636 v0.AuxInt = off 4637 v0.Aux = sym 4638 v0.AddArg(ptr) 4639 v0.AddArg(mem) 4640 return true 4641 } 4642 // match: (MOVLQZX x:(MOVLloadidx1 [off] {sym} ptr idx mem)) 4643 // cond: x.Uses == 1 && clobber(x) 4644 // result: @x.Block (MOVLloadidx1 <v.Type> [off] {sym} ptr idx mem) 4645 for { 4646 x := v.Args[0] 4647 if x.Op != OpAMD64MOVLloadidx1 { 4648 break 4649 } 4650 off := x.AuxInt 4651 sym := x.Aux 4652 ptr := x.Args[0] 4653 idx := x.Args[1] 4654 mem := x.Args[2] 4655 if !(x.Uses == 1 && clobber(x)) { 4656 break 4657 } 4658 b = x.Block 4659 v0 := b.NewValue0(v.Line, OpAMD64MOVLloadidx1, v.Type) 4660 v.reset(OpCopy) 4661 v.AddArg(v0) 4662 v0.AuxInt = off 4663 v0.Aux = sym 4664 v0.AddArg(ptr) 4665 v0.AddArg(idx) 4666 v0.AddArg(mem) 4667 return true 4668 } 4669 // match: (MOVLQZX x:(MOVLloadidx4 [off] {sym} ptr idx mem)) 4670 // cond: x.Uses == 1 && clobber(x) 4671 // result: @x.Block (MOVLloadidx4 <v.Type> [off] {sym} ptr idx mem) 4672 for { 4673 x := v.Args[0] 4674 if x.Op != OpAMD64MOVLloadidx4 { 4675 break 4676 } 4677 off := x.AuxInt 4678 sym := x.Aux 4679 ptr := x.Args[0] 4680 idx := x.Args[1] 4681 mem := x.Args[2] 4682 if !(x.Uses == 1 && clobber(x)) { 4683 break 4684 } 4685 b = x.Block 4686 v0 := b.NewValue0(v.Line, OpAMD64MOVLloadidx4, v.Type) 4687 v.reset(OpCopy) 4688 v.AddArg(v0) 4689 v0.AuxInt = off 4690 v0.Aux = sym 4691 v0.AddArg(ptr) 4692 v0.AddArg(idx) 4693 v0.AddArg(mem) 4694 return true 4695 } 4696 // match: (MOVLQZX (ANDLconst [c] x)) 4697 // cond: 4698 // result: (ANDLconst [c] x) 4699 for { 4700 v_0 := v.Args[0] 4701 if v_0.Op != OpAMD64ANDLconst { 4702 break 4703 } 4704 c := v_0.AuxInt 4705 x := v_0.Args[0] 4706 v.reset(OpAMD64ANDLconst) 4707 v.AuxInt = c 4708 v.AddArg(x) 4709 return true 4710 } 4711 return false 4712 } 4713 func rewriteValueAMD64_OpAMD64MOVLatomicload(v *Value, config *Config) bool { 4714 b := v.Block 4715 _ = b 4716 // match: (MOVLatomicload [off1] {sym} (ADDQconst [off2] ptr) mem) 4717 // cond: is32Bit(off1+off2) 4718 // result: (MOVLatomicload [off1+off2] {sym} ptr mem) 4719 for { 4720 off1 := v.AuxInt 4721 sym := v.Aux 4722 v_0 := v.Args[0] 4723 if v_0.Op != OpAMD64ADDQconst { 4724 break 4725 } 4726 off2 := v_0.AuxInt 4727 ptr := v_0.Args[0] 4728 mem := v.Args[1] 4729 if !(is32Bit(off1 + off2)) { 4730 break 4731 } 4732 v.reset(OpAMD64MOVLatomicload) 4733 v.AuxInt = off1 + off2 4734 v.Aux = sym 4735 v.AddArg(ptr) 4736 v.AddArg(mem) 4737 return true 4738 } 4739 // match: (MOVLatomicload [off1] {sym1} (LEAQ [off2] {sym2} ptr) mem) 4740 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 4741 // result: (MOVLatomicload [off1+off2] {mergeSym(sym1,sym2)} ptr mem) 4742 for { 4743 off1 := v.AuxInt 4744 sym1 := v.Aux 4745 v_0 := v.Args[0] 4746 if v_0.Op != OpAMD64LEAQ { 4747 break 4748 } 4749 off2 := v_0.AuxInt 4750 sym2 := v_0.Aux 4751 ptr := v_0.Args[0] 4752 mem := v.Args[1] 4753 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 4754 break 4755 } 4756 v.reset(OpAMD64MOVLatomicload) 4757 v.AuxInt = off1 + off2 4758 v.Aux = mergeSym(sym1, sym2) 4759 v.AddArg(ptr) 4760 v.AddArg(mem) 4761 return true 4762 } 4763 return false 4764 } 4765 func rewriteValueAMD64_OpAMD64MOVLload(v *Value, config *Config) bool { 4766 b := v.Block 4767 _ = b 4768 // match: (MOVLload [off] {sym} ptr (MOVLstore [off2] {sym2} ptr2 x _)) 4769 // cond: sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) 4770 // result: x 4771 for { 4772 off := v.AuxInt 4773 sym := v.Aux 4774 ptr := v.Args[0] 4775 v_1 := v.Args[1] 4776 if v_1.Op != OpAMD64MOVLstore { 4777 break 4778 } 4779 off2 := v_1.AuxInt 4780 sym2 := v_1.Aux 4781 ptr2 := v_1.Args[0] 4782 x := v_1.Args[1] 4783 if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) { 4784 break 4785 } 4786 v.reset(OpCopy) 4787 v.Type = x.Type 4788 v.AddArg(x) 4789 return true 4790 } 4791 // match: (MOVLload [off1] {sym} (ADDQconst [off2] ptr) mem) 4792 // cond: is32Bit(off1+off2) 4793 // result: (MOVLload [off1+off2] {sym} ptr mem) 4794 for { 4795 off1 := v.AuxInt 4796 sym := v.Aux 4797 v_0 := v.Args[0] 4798 if v_0.Op != OpAMD64ADDQconst { 4799 break 4800 } 4801 off2 := v_0.AuxInt 4802 ptr := v_0.Args[0] 4803 mem := v.Args[1] 4804 if !(is32Bit(off1 + off2)) { 4805 break 4806 } 4807 v.reset(OpAMD64MOVLload) 4808 v.AuxInt = off1 + off2 4809 v.Aux = sym 4810 v.AddArg(ptr) 4811 v.AddArg(mem) 4812 return true 4813 } 4814 // match: (MOVLload [off1] {sym1} (LEAQ [off2] {sym2} base) mem) 4815 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 4816 // result: (MOVLload [off1+off2] {mergeSym(sym1,sym2)} base mem) 4817 for { 4818 off1 := v.AuxInt 4819 sym1 := v.Aux 4820 v_0 := v.Args[0] 4821 if v_0.Op != OpAMD64LEAQ { 4822 break 4823 } 4824 off2 := v_0.AuxInt 4825 sym2 := v_0.Aux 4826 base := v_0.Args[0] 4827 mem := v.Args[1] 4828 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 4829 break 4830 } 4831 v.reset(OpAMD64MOVLload) 4832 v.AuxInt = off1 + off2 4833 v.Aux = mergeSym(sym1, sym2) 4834 v.AddArg(base) 4835 v.AddArg(mem) 4836 return true 4837 } 4838 // match: (MOVLload [off1] {sym1} (LEAQ1 [off2] {sym2} ptr idx) mem) 4839 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 4840 // result: (MOVLloadidx1 [off1+off2] {mergeSym(sym1,sym2)} ptr idx mem) 4841 for { 4842 off1 := v.AuxInt 4843 sym1 := v.Aux 4844 v_0 := v.Args[0] 4845 if v_0.Op != OpAMD64LEAQ1 { 4846 break 4847 } 4848 off2 := v_0.AuxInt 4849 sym2 := v_0.Aux 4850 ptr := v_0.Args[0] 4851 idx := v_0.Args[1] 4852 mem := v.Args[1] 4853 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 4854 break 4855 } 4856 v.reset(OpAMD64MOVLloadidx1) 4857 v.AuxInt = off1 + off2 4858 v.Aux = mergeSym(sym1, sym2) 4859 v.AddArg(ptr) 4860 v.AddArg(idx) 4861 v.AddArg(mem) 4862 return true 4863 } 4864 // match: (MOVLload [off1] {sym1} (LEAQ4 [off2] {sym2} ptr idx) mem) 4865 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 4866 // result: (MOVLloadidx4 [off1+off2] {mergeSym(sym1,sym2)} ptr idx mem) 4867 for { 4868 off1 := v.AuxInt 4869 sym1 := v.Aux 4870 v_0 := v.Args[0] 4871 if v_0.Op != OpAMD64LEAQ4 { 4872 break 4873 } 4874 off2 := v_0.AuxInt 4875 sym2 := v_0.Aux 4876 ptr := v_0.Args[0] 4877 idx := v_0.Args[1] 4878 mem := v.Args[1] 4879 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 4880 break 4881 } 4882 v.reset(OpAMD64MOVLloadidx4) 4883 v.AuxInt = off1 + off2 4884 v.Aux = mergeSym(sym1, sym2) 4885 v.AddArg(ptr) 4886 v.AddArg(idx) 4887 v.AddArg(mem) 4888 return true 4889 } 4890 // match: (MOVLload [off] {sym} (ADDQ ptr idx) mem) 4891 // cond: ptr.Op != OpSB 4892 // result: (MOVLloadidx1 [off] {sym} ptr idx mem) 4893 for { 4894 off := v.AuxInt 4895 sym := v.Aux 4896 v_0 := v.Args[0] 4897 if v_0.Op != OpAMD64ADDQ { 4898 break 4899 } 4900 ptr := v_0.Args[0] 4901 idx := v_0.Args[1] 4902 mem := v.Args[1] 4903 if !(ptr.Op != OpSB) { 4904 break 4905 } 4906 v.reset(OpAMD64MOVLloadidx1) 4907 v.AuxInt = off 4908 v.Aux = sym 4909 v.AddArg(ptr) 4910 v.AddArg(idx) 4911 v.AddArg(mem) 4912 return true 4913 } 4914 // match: (MOVLload [off1] {sym1} (LEAL [off2] {sym2} base) mem) 4915 // cond: canMergeSym(sym1, sym2) 4916 // result: (MOVLload [off1+off2] {mergeSym(sym1,sym2)} base mem) 4917 for { 4918 off1 := v.AuxInt 4919 sym1 := v.Aux 4920 v_0 := v.Args[0] 4921 if v_0.Op != OpAMD64LEAL { 4922 break 4923 } 4924 off2 := v_0.AuxInt 4925 sym2 := v_0.Aux 4926 base := v_0.Args[0] 4927 mem := v.Args[1] 4928 if !(canMergeSym(sym1, sym2)) { 4929 break 4930 } 4931 v.reset(OpAMD64MOVLload) 4932 v.AuxInt = off1 + off2 4933 v.Aux = mergeSym(sym1, sym2) 4934 v.AddArg(base) 4935 v.AddArg(mem) 4936 return true 4937 } 4938 // match: (MOVLload [off1] {sym} (ADDLconst [off2] ptr) mem) 4939 // cond: is32Bit(off1+off2) 4940 // result: (MOVLload [off1+off2] {sym} ptr mem) 4941 for { 4942 off1 := v.AuxInt 4943 sym := v.Aux 4944 v_0 := v.Args[0] 4945 if v_0.Op != OpAMD64ADDLconst { 4946 break 4947 } 4948 off2 := v_0.AuxInt 4949 ptr := v_0.Args[0] 4950 mem := v.Args[1] 4951 if !(is32Bit(off1 + off2)) { 4952 break 4953 } 4954 v.reset(OpAMD64MOVLload) 4955 v.AuxInt = off1 + off2 4956 v.Aux = sym 4957 v.AddArg(ptr) 4958 v.AddArg(mem) 4959 return true 4960 } 4961 return false 4962 } 4963 func rewriteValueAMD64_OpAMD64MOVLloadidx1(v *Value, config *Config) bool { 4964 b := v.Block 4965 _ = b 4966 // match: (MOVLloadidx1 [c] {sym} ptr (SHLQconst [2] idx) mem) 4967 // cond: 4968 // result: (MOVLloadidx4 [c] {sym} ptr idx mem) 4969 for { 4970 c := v.AuxInt 4971 sym := v.Aux 4972 ptr := v.Args[0] 4973 v_1 := v.Args[1] 4974 if v_1.Op != OpAMD64SHLQconst { 4975 break 4976 } 4977 if v_1.AuxInt != 2 { 4978 break 4979 } 4980 idx := v_1.Args[0] 4981 mem := v.Args[2] 4982 v.reset(OpAMD64MOVLloadidx4) 4983 v.AuxInt = c 4984 v.Aux = sym 4985 v.AddArg(ptr) 4986 v.AddArg(idx) 4987 v.AddArg(mem) 4988 return true 4989 } 4990 // match: (MOVLloadidx1 [c] {sym} (ADDQconst [d] ptr) idx mem) 4991 // cond: 4992 // result: (MOVLloadidx1 [c+d] {sym} ptr idx mem) 4993 for { 4994 c := v.AuxInt 4995 sym := v.Aux 4996 v_0 := v.Args[0] 4997 if v_0.Op != OpAMD64ADDQconst { 4998 break 4999 } 5000 d := v_0.AuxInt 5001 ptr := v_0.Args[0] 5002 idx := v.Args[1] 5003 mem := v.Args[2] 5004 v.reset(OpAMD64MOVLloadidx1) 5005 v.AuxInt = c + d 5006 v.Aux = sym 5007 v.AddArg(ptr) 5008 v.AddArg(idx) 5009 v.AddArg(mem) 5010 return true 5011 } 5012 // match: (MOVLloadidx1 [c] {sym} ptr (ADDQconst [d] idx) mem) 5013 // cond: 5014 // result: (MOVLloadidx1 [c+d] {sym} ptr idx mem) 5015 for { 5016 c := v.AuxInt 5017 sym := v.Aux 5018 ptr := v.Args[0] 5019 v_1 := v.Args[1] 5020 if v_1.Op != OpAMD64ADDQconst { 5021 break 5022 } 5023 d := v_1.AuxInt 5024 idx := v_1.Args[0] 5025 mem := v.Args[2] 5026 v.reset(OpAMD64MOVLloadidx1) 5027 v.AuxInt = c + d 5028 v.Aux = sym 5029 v.AddArg(ptr) 5030 v.AddArg(idx) 5031 v.AddArg(mem) 5032 return true 5033 } 5034 return false 5035 } 5036 func rewriteValueAMD64_OpAMD64MOVLloadidx4(v *Value, config *Config) bool { 5037 b := v.Block 5038 _ = b 5039 // match: (MOVLloadidx4 [c] {sym} (ADDQconst [d] ptr) idx mem) 5040 // cond: 5041 // result: (MOVLloadidx4 [c+d] {sym} ptr idx mem) 5042 for { 5043 c := v.AuxInt 5044 sym := v.Aux 5045 v_0 := v.Args[0] 5046 if v_0.Op != OpAMD64ADDQconst { 5047 break 5048 } 5049 d := v_0.AuxInt 5050 ptr := v_0.Args[0] 5051 idx := v.Args[1] 5052 mem := v.Args[2] 5053 v.reset(OpAMD64MOVLloadidx4) 5054 v.AuxInt = c + d 5055 v.Aux = sym 5056 v.AddArg(ptr) 5057 v.AddArg(idx) 5058 v.AddArg(mem) 5059 return true 5060 } 5061 // match: (MOVLloadidx4 [c] {sym} ptr (ADDQconst [d] idx) mem) 5062 // cond: 5063 // result: (MOVLloadidx4 [c+4*d] {sym} ptr idx mem) 5064 for { 5065 c := v.AuxInt 5066 sym := v.Aux 5067 ptr := v.Args[0] 5068 v_1 := v.Args[1] 5069 if v_1.Op != OpAMD64ADDQconst { 5070 break 5071 } 5072 d := v_1.AuxInt 5073 idx := v_1.Args[0] 5074 mem := v.Args[2] 5075 v.reset(OpAMD64MOVLloadidx4) 5076 v.AuxInt = c + 4*d 5077 v.Aux = sym 5078 v.AddArg(ptr) 5079 v.AddArg(idx) 5080 v.AddArg(mem) 5081 return true 5082 } 5083 return false 5084 } 5085 func rewriteValueAMD64_OpAMD64MOVLstore(v *Value, config *Config) bool { 5086 b := v.Block 5087 _ = b 5088 // match: (MOVLstore [off] {sym} ptr (MOVLQSX x) mem) 5089 // cond: 5090 // result: (MOVLstore [off] {sym} ptr x mem) 5091 for { 5092 off := v.AuxInt 5093 sym := v.Aux 5094 ptr := v.Args[0] 5095 v_1 := v.Args[1] 5096 if v_1.Op != OpAMD64MOVLQSX { 5097 break 5098 } 5099 x := v_1.Args[0] 5100 mem := v.Args[2] 5101 v.reset(OpAMD64MOVLstore) 5102 v.AuxInt = off 5103 v.Aux = sym 5104 v.AddArg(ptr) 5105 v.AddArg(x) 5106 v.AddArg(mem) 5107 return true 5108 } 5109 // match: (MOVLstore [off] {sym} ptr (MOVLQZX x) mem) 5110 // cond: 5111 // result: (MOVLstore [off] {sym} ptr x mem) 5112 for { 5113 off := v.AuxInt 5114 sym := v.Aux 5115 ptr := v.Args[0] 5116 v_1 := v.Args[1] 5117 if v_1.Op != OpAMD64MOVLQZX { 5118 break 5119 } 5120 x := v_1.Args[0] 5121 mem := v.Args[2] 5122 v.reset(OpAMD64MOVLstore) 5123 v.AuxInt = off 5124 v.Aux = sym 5125 v.AddArg(ptr) 5126 v.AddArg(x) 5127 v.AddArg(mem) 5128 return true 5129 } 5130 // match: (MOVLstore [off1] {sym} (ADDQconst [off2] ptr) val mem) 5131 // cond: is32Bit(off1+off2) 5132 // result: (MOVLstore [off1+off2] {sym} ptr val mem) 5133 for { 5134 off1 := v.AuxInt 5135 sym := v.Aux 5136 v_0 := v.Args[0] 5137 if v_0.Op != OpAMD64ADDQconst { 5138 break 5139 } 5140 off2 := v_0.AuxInt 5141 ptr := v_0.Args[0] 5142 val := v.Args[1] 5143 mem := v.Args[2] 5144 if !(is32Bit(off1 + off2)) { 5145 break 5146 } 5147 v.reset(OpAMD64MOVLstore) 5148 v.AuxInt = off1 + off2 5149 v.Aux = sym 5150 v.AddArg(ptr) 5151 v.AddArg(val) 5152 v.AddArg(mem) 5153 return true 5154 } 5155 // match: (MOVLstore [off] {sym} ptr (MOVLconst [c]) mem) 5156 // cond: validOff(off) 5157 // result: (MOVLstoreconst [makeValAndOff(int64(int32(c)),off)] {sym} ptr mem) 5158 for { 5159 off := v.AuxInt 5160 sym := v.Aux 5161 ptr := v.Args[0] 5162 v_1 := v.Args[1] 5163 if v_1.Op != OpAMD64MOVLconst { 5164 break 5165 } 5166 c := v_1.AuxInt 5167 mem := v.Args[2] 5168 if !(validOff(off)) { 5169 break 5170 } 5171 v.reset(OpAMD64MOVLstoreconst) 5172 v.AuxInt = makeValAndOff(int64(int32(c)), off) 5173 v.Aux = sym 5174 v.AddArg(ptr) 5175 v.AddArg(mem) 5176 return true 5177 } 5178 // match: (MOVLstore [off1] {sym1} (LEAQ [off2] {sym2} base) val mem) 5179 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 5180 // result: (MOVLstore [off1+off2] {mergeSym(sym1,sym2)} base val mem) 5181 for { 5182 off1 := v.AuxInt 5183 sym1 := v.Aux 5184 v_0 := v.Args[0] 5185 if v_0.Op != OpAMD64LEAQ { 5186 break 5187 } 5188 off2 := v_0.AuxInt 5189 sym2 := v_0.Aux 5190 base := v_0.Args[0] 5191 val := v.Args[1] 5192 mem := v.Args[2] 5193 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 5194 break 5195 } 5196 v.reset(OpAMD64MOVLstore) 5197 v.AuxInt = off1 + off2 5198 v.Aux = mergeSym(sym1, sym2) 5199 v.AddArg(base) 5200 v.AddArg(val) 5201 v.AddArg(mem) 5202 return true 5203 } 5204 // match: (MOVLstore [off1] {sym1} (LEAQ1 [off2] {sym2} ptr idx) val mem) 5205 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 5206 // result: (MOVLstoreidx1 [off1+off2] {mergeSym(sym1,sym2)} ptr idx val mem) 5207 for { 5208 off1 := v.AuxInt 5209 sym1 := v.Aux 5210 v_0 := v.Args[0] 5211 if v_0.Op != OpAMD64LEAQ1 { 5212 break 5213 } 5214 off2 := v_0.AuxInt 5215 sym2 := v_0.Aux 5216 ptr := v_0.Args[0] 5217 idx := v_0.Args[1] 5218 val := v.Args[1] 5219 mem := v.Args[2] 5220 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 5221 break 5222 } 5223 v.reset(OpAMD64MOVLstoreidx1) 5224 v.AuxInt = off1 + off2 5225 v.Aux = mergeSym(sym1, sym2) 5226 v.AddArg(ptr) 5227 v.AddArg(idx) 5228 v.AddArg(val) 5229 v.AddArg(mem) 5230 return true 5231 } 5232 // match: (MOVLstore [off1] {sym1} (LEAQ4 [off2] {sym2} ptr idx) val mem) 5233 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 5234 // result: (MOVLstoreidx4 [off1+off2] {mergeSym(sym1,sym2)} ptr idx val mem) 5235 for { 5236 off1 := v.AuxInt 5237 sym1 := v.Aux 5238 v_0 := v.Args[0] 5239 if v_0.Op != OpAMD64LEAQ4 { 5240 break 5241 } 5242 off2 := v_0.AuxInt 5243 sym2 := v_0.Aux 5244 ptr := v_0.Args[0] 5245 idx := v_0.Args[1] 5246 val := v.Args[1] 5247 mem := v.Args[2] 5248 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 5249 break 5250 } 5251 v.reset(OpAMD64MOVLstoreidx4) 5252 v.AuxInt = off1 + off2 5253 v.Aux = mergeSym(sym1, sym2) 5254 v.AddArg(ptr) 5255 v.AddArg(idx) 5256 v.AddArg(val) 5257 v.AddArg(mem) 5258 return true 5259 } 5260 // match: (MOVLstore [off] {sym} (ADDQ ptr idx) val mem) 5261 // cond: ptr.Op != OpSB 5262 // result: (MOVLstoreidx1 [off] {sym} ptr idx val mem) 5263 for { 5264 off := v.AuxInt 5265 sym := v.Aux 5266 v_0 := v.Args[0] 5267 if v_0.Op != OpAMD64ADDQ { 5268 break 5269 } 5270 ptr := v_0.Args[0] 5271 idx := v_0.Args[1] 5272 val := v.Args[1] 5273 mem := v.Args[2] 5274 if !(ptr.Op != OpSB) { 5275 break 5276 } 5277 v.reset(OpAMD64MOVLstoreidx1) 5278 v.AuxInt = off 5279 v.Aux = sym 5280 v.AddArg(ptr) 5281 v.AddArg(idx) 5282 v.AddArg(val) 5283 v.AddArg(mem) 5284 return true 5285 } 5286 // match: (MOVLstore [i] {s} p (SHRQconst [32] w) x:(MOVLstore [i-4] {s} p w mem)) 5287 // cond: x.Uses == 1 && clobber(x) 5288 // result: (MOVQstore [i-4] {s} p w mem) 5289 for { 5290 i := v.AuxInt 5291 s := v.Aux 5292 p := v.Args[0] 5293 v_1 := v.Args[1] 5294 if v_1.Op != OpAMD64SHRQconst { 5295 break 5296 } 5297 if v_1.AuxInt != 32 { 5298 break 5299 } 5300 w := v_1.Args[0] 5301 x := v.Args[2] 5302 if x.Op != OpAMD64MOVLstore { 5303 break 5304 } 5305 if x.AuxInt != i-4 { 5306 break 5307 } 5308 if x.Aux != s { 5309 break 5310 } 5311 if p != x.Args[0] { 5312 break 5313 } 5314 if w != x.Args[1] { 5315 break 5316 } 5317 mem := x.Args[2] 5318 if !(x.Uses == 1 && clobber(x)) { 5319 break 5320 } 5321 v.reset(OpAMD64MOVQstore) 5322 v.AuxInt = i - 4 5323 v.Aux = s 5324 v.AddArg(p) 5325 v.AddArg(w) 5326 v.AddArg(mem) 5327 return true 5328 } 5329 // match: (MOVLstore [i] {s} p (SHRQconst [j] w) x:(MOVLstore [i-4] {s} p w0:(SHRQconst [j-32] w) mem)) 5330 // cond: x.Uses == 1 && clobber(x) 5331 // result: (MOVQstore [i-4] {s} p w0 mem) 5332 for { 5333 i := v.AuxInt 5334 s := v.Aux 5335 p := v.Args[0] 5336 v_1 := v.Args[1] 5337 if v_1.Op != OpAMD64SHRQconst { 5338 break 5339 } 5340 j := v_1.AuxInt 5341 w := v_1.Args[0] 5342 x := v.Args[2] 5343 if x.Op != OpAMD64MOVLstore { 5344 break 5345 } 5346 if x.AuxInt != i-4 { 5347 break 5348 } 5349 if x.Aux != s { 5350 break 5351 } 5352 if p != x.Args[0] { 5353 break 5354 } 5355 w0 := x.Args[1] 5356 if w0.Op != OpAMD64SHRQconst { 5357 break 5358 } 5359 if w0.AuxInt != j-32 { 5360 break 5361 } 5362 if w != w0.Args[0] { 5363 break 5364 } 5365 mem := x.Args[2] 5366 if !(x.Uses == 1 && clobber(x)) { 5367 break 5368 } 5369 v.reset(OpAMD64MOVQstore) 5370 v.AuxInt = i - 4 5371 v.Aux = s 5372 v.AddArg(p) 5373 v.AddArg(w0) 5374 v.AddArg(mem) 5375 return true 5376 } 5377 // match: (MOVLstore [off1] {sym1} (LEAL [off2] {sym2} base) val mem) 5378 // cond: canMergeSym(sym1, sym2) 5379 // result: (MOVLstore [off1+off2] {mergeSym(sym1,sym2)} base val mem) 5380 for { 5381 off1 := v.AuxInt 5382 sym1 := v.Aux 5383 v_0 := v.Args[0] 5384 if v_0.Op != OpAMD64LEAL { 5385 break 5386 } 5387 off2 := v_0.AuxInt 5388 sym2 := v_0.Aux 5389 base := v_0.Args[0] 5390 val := v.Args[1] 5391 mem := v.Args[2] 5392 if !(canMergeSym(sym1, sym2)) { 5393 break 5394 } 5395 v.reset(OpAMD64MOVLstore) 5396 v.AuxInt = off1 + off2 5397 v.Aux = mergeSym(sym1, sym2) 5398 v.AddArg(base) 5399 v.AddArg(val) 5400 v.AddArg(mem) 5401 return true 5402 } 5403 // match: (MOVLstore [off1] {sym} (ADDLconst [off2] ptr) val mem) 5404 // cond: is32Bit(off1+off2) 5405 // result: (MOVLstore [off1+off2] {sym} ptr val mem) 5406 for { 5407 off1 := v.AuxInt 5408 sym := v.Aux 5409 v_0 := v.Args[0] 5410 if v_0.Op != OpAMD64ADDLconst { 5411 break 5412 } 5413 off2 := v_0.AuxInt 5414 ptr := v_0.Args[0] 5415 val := v.Args[1] 5416 mem := v.Args[2] 5417 if !(is32Bit(off1 + off2)) { 5418 break 5419 } 5420 v.reset(OpAMD64MOVLstore) 5421 v.AuxInt = off1 + off2 5422 v.Aux = sym 5423 v.AddArg(ptr) 5424 v.AddArg(val) 5425 v.AddArg(mem) 5426 return true 5427 } 5428 return false 5429 } 5430 func rewriteValueAMD64_OpAMD64MOVLstoreconst(v *Value, config *Config) bool { 5431 b := v.Block 5432 _ = b 5433 // match: (MOVLstoreconst [sc] {s} (ADDQconst [off] ptr) mem) 5434 // cond: ValAndOff(sc).canAdd(off) 5435 // result: (MOVLstoreconst [ValAndOff(sc).add(off)] {s} ptr mem) 5436 for { 5437 sc := v.AuxInt 5438 s := v.Aux 5439 v_0 := v.Args[0] 5440 if v_0.Op != OpAMD64ADDQconst { 5441 break 5442 } 5443 off := v_0.AuxInt 5444 ptr := v_0.Args[0] 5445 mem := v.Args[1] 5446 if !(ValAndOff(sc).canAdd(off)) { 5447 break 5448 } 5449 v.reset(OpAMD64MOVLstoreconst) 5450 v.AuxInt = ValAndOff(sc).add(off) 5451 v.Aux = s 5452 v.AddArg(ptr) 5453 v.AddArg(mem) 5454 return true 5455 } 5456 // match: (MOVLstoreconst [sc] {sym1} (LEAQ [off] {sym2} ptr) mem) 5457 // cond: canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off) 5458 // result: (MOVLstoreconst [ValAndOff(sc).add(off)] {mergeSym(sym1, sym2)} ptr mem) 5459 for { 5460 sc := v.AuxInt 5461 sym1 := v.Aux 5462 v_0 := v.Args[0] 5463 if v_0.Op != OpAMD64LEAQ { 5464 break 5465 } 5466 off := v_0.AuxInt 5467 sym2 := v_0.Aux 5468 ptr := v_0.Args[0] 5469 mem := v.Args[1] 5470 if !(canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off)) { 5471 break 5472 } 5473 v.reset(OpAMD64MOVLstoreconst) 5474 v.AuxInt = ValAndOff(sc).add(off) 5475 v.Aux = mergeSym(sym1, sym2) 5476 v.AddArg(ptr) 5477 v.AddArg(mem) 5478 return true 5479 } 5480 // match: (MOVLstoreconst [x] {sym1} (LEAQ1 [off] {sym2} ptr idx) mem) 5481 // cond: canMergeSym(sym1, sym2) 5482 // result: (MOVLstoreconstidx1 [ValAndOff(x).add(off)] {mergeSym(sym1,sym2)} ptr idx mem) 5483 for { 5484 x := v.AuxInt 5485 sym1 := v.Aux 5486 v_0 := v.Args[0] 5487 if v_0.Op != OpAMD64LEAQ1 { 5488 break 5489 } 5490 off := v_0.AuxInt 5491 sym2 := v_0.Aux 5492 ptr := v_0.Args[0] 5493 idx := v_0.Args[1] 5494 mem := v.Args[1] 5495 if !(canMergeSym(sym1, sym2)) { 5496 break 5497 } 5498 v.reset(OpAMD64MOVLstoreconstidx1) 5499 v.AuxInt = ValAndOff(x).add(off) 5500 v.Aux = mergeSym(sym1, sym2) 5501 v.AddArg(ptr) 5502 v.AddArg(idx) 5503 v.AddArg(mem) 5504 return true 5505 } 5506 // match: (MOVLstoreconst [x] {sym1} (LEAQ4 [off] {sym2} ptr idx) mem) 5507 // cond: canMergeSym(sym1, sym2) 5508 // result: (MOVLstoreconstidx4 [ValAndOff(x).add(off)] {mergeSym(sym1,sym2)} ptr idx mem) 5509 for { 5510 x := v.AuxInt 5511 sym1 := v.Aux 5512 v_0 := v.Args[0] 5513 if v_0.Op != OpAMD64LEAQ4 { 5514 break 5515 } 5516 off := v_0.AuxInt 5517 sym2 := v_0.Aux 5518 ptr := v_0.Args[0] 5519 idx := v_0.Args[1] 5520 mem := v.Args[1] 5521 if !(canMergeSym(sym1, sym2)) { 5522 break 5523 } 5524 v.reset(OpAMD64MOVLstoreconstidx4) 5525 v.AuxInt = ValAndOff(x).add(off) 5526 v.Aux = mergeSym(sym1, sym2) 5527 v.AddArg(ptr) 5528 v.AddArg(idx) 5529 v.AddArg(mem) 5530 return true 5531 } 5532 // match: (MOVLstoreconst [x] {sym} (ADDQ ptr idx) mem) 5533 // cond: 5534 // result: (MOVLstoreconstidx1 [x] {sym} ptr idx mem) 5535 for { 5536 x := v.AuxInt 5537 sym := v.Aux 5538 v_0 := v.Args[0] 5539 if v_0.Op != OpAMD64ADDQ { 5540 break 5541 } 5542 ptr := v_0.Args[0] 5543 idx := v_0.Args[1] 5544 mem := v.Args[1] 5545 v.reset(OpAMD64MOVLstoreconstidx1) 5546 v.AuxInt = x 5547 v.Aux = sym 5548 v.AddArg(ptr) 5549 v.AddArg(idx) 5550 v.AddArg(mem) 5551 return true 5552 } 5553 // match: (MOVLstoreconst [c] {s} p x:(MOVLstoreconst [a] {s} p mem)) 5554 // cond: x.Uses == 1 && ValAndOff(a).Off() + 4 == ValAndOff(c).Off() && clobber(x) 5555 // result: (MOVQstore [ValAndOff(a).Off()] {s} p (MOVQconst [ValAndOff(a).Val()&0xffffffff | ValAndOff(c).Val()<<32]) mem) 5556 for { 5557 c := v.AuxInt 5558 s := v.Aux 5559 p := v.Args[0] 5560 x := v.Args[1] 5561 if x.Op != OpAMD64MOVLstoreconst { 5562 break 5563 } 5564 a := x.AuxInt 5565 if x.Aux != s { 5566 break 5567 } 5568 if p != x.Args[0] { 5569 break 5570 } 5571 mem := x.Args[1] 5572 if !(x.Uses == 1 && ValAndOff(a).Off()+4 == ValAndOff(c).Off() && clobber(x)) { 5573 break 5574 } 5575 v.reset(OpAMD64MOVQstore) 5576 v.AuxInt = ValAndOff(a).Off() 5577 v.Aux = s 5578 v.AddArg(p) 5579 v0 := b.NewValue0(v.Line, OpAMD64MOVQconst, config.fe.TypeUInt64()) 5580 v0.AuxInt = ValAndOff(a).Val()&0xffffffff | ValAndOff(c).Val()<<32 5581 v.AddArg(v0) 5582 v.AddArg(mem) 5583 return true 5584 } 5585 // match: (MOVLstoreconst [sc] {sym1} (LEAL [off] {sym2} ptr) mem) 5586 // cond: canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off) 5587 // result: (MOVLstoreconst [ValAndOff(sc).add(off)] {mergeSym(sym1, sym2)} ptr mem) 5588 for { 5589 sc := v.AuxInt 5590 sym1 := v.Aux 5591 v_0 := v.Args[0] 5592 if v_0.Op != OpAMD64LEAL { 5593 break 5594 } 5595 off := v_0.AuxInt 5596 sym2 := v_0.Aux 5597 ptr := v_0.Args[0] 5598 mem := v.Args[1] 5599 if !(canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off)) { 5600 break 5601 } 5602 v.reset(OpAMD64MOVLstoreconst) 5603 v.AuxInt = ValAndOff(sc).add(off) 5604 v.Aux = mergeSym(sym1, sym2) 5605 v.AddArg(ptr) 5606 v.AddArg(mem) 5607 return true 5608 } 5609 // match: (MOVLstoreconst [sc] {s} (ADDLconst [off] ptr) mem) 5610 // cond: ValAndOff(sc).canAdd(off) 5611 // result: (MOVLstoreconst [ValAndOff(sc).add(off)] {s} ptr mem) 5612 for { 5613 sc := v.AuxInt 5614 s := v.Aux 5615 v_0 := v.Args[0] 5616 if v_0.Op != OpAMD64ADDLconst { 5617 break 5618 } 5619 off := v_0.AuxInt 5620 ptr := v_0.Args[0] 5621 mem := v.Args[1] 5622 if !(ValAndOff(sc).canAdd(off)) { 5623 break 5624 } 5625 v.reset(OpAMD64MOVLstoreconst) 5626 v.AuxInt = ValAndOff(sc).add(off) 5627 v.Aux = s 5628 v.AddArg(ptr) 5629 v.AddArg(mem) 5630 return true 5631 } 5632 return false 5633 } 5634 func rewriteValueAMD64_OpAMD64MOVLstoreconstidx1(v *Value, config *Config) bool { 5635 b := v.Block 5636 _ = b 5637 // match: (MOVLstoreconstidx1 [c] {sym} ptr (SHLQconst [2] idx) mem) 5638 // cond: 5639 // result: (MOVLstoreconstidx4 [c] {sym} ptr idx mem) 5640 for { 5641 c := v.AuxInt 5642 sym := v.Aux 5643 ptr := v.Args[0] 5644 v_1 := v.Args[1] 5645 if v_1.Op != OpAMD64SHLQconst { 5646 break 5647 } 5648 if v_1.AuxInt != 2 { 5649 break 5650 } 5651 idx := v_1.Args[0] 5652 mem := v.Args[2] 5653 v.reset(OpAMD64MOVLstoreconstidx4) 5654 v.AuxInt = c 5655 v.Aux = sym 5656 v.AddArg(ptr) 5657 v.AddArg(idx) 5658 v.AddArg(mem) 5659 return true 5660 } 5661 // match: (MOVLstoreconstidx1 [x] {sym} (ADDQconst [c] ptr) idx mem) 5662 // cond: 5663 // result: (MOVLstoreconstidx1 [ValAndOff(x).add(c)] {sym} ptr idx mem) 5664 for { 5665 x := v.AuxInt 5666 sym := v.Aux 5667 v_0 := v.Args[0] 5668 if v_0.Op != OpAMD64ADDQconst { 5669 break 5670 } 5671 c := v_0.AuxInt 5672 ptr := v_0.Args[0] 5673 idx := v.Args[1] 5674 mem := v.Args[2] 5675 v.reset(OpAMD64MOVLstoreconstidx1) 5676 v.AuxInt = ValAndOff(x).add(c) 5677 v.Aux = sym 5678 v.AddArg(ptr) 5679 v.AddArg(idx) 5680 v.AddArg(mem) 5681 return true 5682 } 5683 // match: (MOVLstoreconstidx1 [x] {sym} ptr (ADDQconst [c] idx) mem) 5684 // cond: 5685 // result: (MOVLstoreconstidx1 [ValAndOff(x).add(c)] {sym} ptr idx mem) 5686 for { 5687 x := v.AuxInt 5688 sym := v.Aux 5689 ptr := v.Args[0] 5690 v_1 := v.Args[1] 5691 if v_1.Op != OpAMD64ADDQconst { 5692 break 5693 } 5694 c := v_1.AuxInt 5695 idx := v_1.Args[0] 5696 mem := v.Args[2] 5697 v.reset(OpAMD64MOVLstoreconstidx1) 5698 v.AuxInt = ValAndOff(x).add(c) 5699 v.Aux = sym 5700 v.AddArg(ptr) 5701 v.AddArg(idx) 5702 v.AddArg(mem) 5703 return true 5704 } 5705 // match: (MOVLstoreconstidx1 [c] {s} p i x:(MOVLstoreconstidx1 [a] {s} p i mem)) 5706 // cond: x.Uses == 1 && ValAndOff(a).Off() + 4 == ValAndOff(c).Off() && clobber(x) 5707 // result: (MOVQstoreidx1 [ValAndOff(a).Off()] {s} p i (MOVQconst [ValAndOff(a).Val()&0xffffffff | ValAndOff(c).Val()<<32]) mem) 5708 for { 5709 c := v.AuxInt 5710 s := v.Aux 5711 p := v.Args[0] 5712 i := v.Args[1] 5713 x := v.Args[2] 5714 if x.Op != OpAMD64MOVLstoreconstidx1 { 5715 break 5716 } 5717 a := x.AuxInt 5718 if x.Aux != s { 5719 break 5720 } 5721 if p != x.Args[0] { 5722 break 5723 } 5724 if i != x.Args[1] { 5725 break 5726 } 5727 mem := x.Args[2] 5728 if !(x.Uses == 1 && ValAndOff(a).Off()+4 == ValAndOff(c).Off() && clobber(x)) { 5729 break 5730 } 5731 v.reset(OpAMD64MOVQstoreidx1) 5732 v.AuxInt = ValAndOff(a).Off() 5733 v.Aux = s 5734 v.AddArg(p) 5735 v.AddArg(i) 5736 v0 := b.NewValue0(v.Line, OpAMD64MOVQconst, config.fe.TypeUInt64()) 5737 v0.AuxInt = ValAndOff(a).Val()&0xffffffff | ValAndOff(c).Val()<<32 5738 v.AddArg(v0) 5739 v.AddArg(mem) 5740 return true 5741 } 5742 return false 5743 } 5744 func rewriteValueAMD64_OpAMD64MOVLstoreconstidx4(v *Value, config *Config) bool { 5745 b := v.Block 5746 _ = b 5747 // match: (MOVLstoreconstidx4 [x] {sym} (ADDQconst [c] ptr) idx mem) 5748 // cond: 5749 // result: (MOVLstoreconstidx4 [ValAndOff(x).add(c)] {sym} ptr idx mem) 5750 for { 5751 x := v.AuxInt 5752 sym := v.Aux 5753 v_0 := v.Args[0] 5754 if v_0.Op != OpAMD64ADDQconst { 5755 break 5756 } 5757 c := v_0.AuxInt 5758 ptr := v_0.Args[0] 5759 idx := v.Args[1] 5760 mem := v.Args[2] 5761 v.reset(OpAMD64MOVLstoreconstidx4) 5762 v.AuxInt = ValAndOff(x).add(c) 5763 v.Aux = sym 5764 v.AddArg(ptr) 5765 v.AddArg(idx) 5766 v.AddArg(mem) 5767 return true 5768 } 5769 // match: (MOVLstoreconstidx4 [x] {sym} ptr (ADDQconst [c] idx) mem) 5770 // cond: 5771 // result: (MOVLstoreconstidx4 [ValAndOff(x).add(4*c)] {sym} ptr idx mem) 5772 for { 5773 x := v.AuxInt 5774 sym := v.Aux 5775 ptr := v.Args[0] 5776 v_1 := v.Args[1] 5777 if v_1.Op != OpAMD64ADDQconst { 5778 break 5779 } 5780 c := v_1.AuxInt 5781 idx := v_1.Args[0] 5782 mem := v.Args[2] 5783 v.reset(OpAMD64MOVLstoreconstidx4) 5784 v.AuxInt = ValAndOff(x).add(4 * c) 5785 v.Aux = sym 5786 v.AddArg(ptr) 5787 v.AddArg(idx) 5788 v.AddArg(mem) 5789 return true 5790 } 5791 // match: (MOVLstoreconstidx4 [c] {s} p i x:(MOVLstoreconstidx4 [a] {s} p i mem)) 5792 // cond: x.Uses == 1 && ValAndOff(a).Off() + 4 == ValAndOff(c).Off() && clobber(x) 5793 // result: (MOVQstoreidx1 [ValAndOff(a).Off()] {s} p (SHLQconst <i.Type> [2] i) (MOVQconst [ValAndOff(a).Val()&0xffffffff | ValAndOff(c).Val()<<32]) mem) 5794 for { 5795 c := v.AuxInt 5796 s := v.Aux 5797 p := v.Args[0] 5798 i := v.Args[1] 5799 x := v.Args[2] 5800 if x.Op != OpAMD64MOVLstoreconstidx4 { 5801 break 5802 } 5803 a := x.AuxInt 5804 if x.Aux != s { 5805 break 5806 } 5807 if p != x.Args[0] { 5808 break 5809 } 5810 if i != x.Args[1] { 5811 break 5812 } 5813 mem := x.Args[2] 5814 if !(x.Uses == 1 && ValAndOff(a).Off()+4 == ValAndOff(c).Off() && clobber(x)) { 5815 break 5816 } 5817 v.reset(OpAMD64MOVQstoreidx1) 5818 v.AuxInt = ValAndOff(a).Off() 5819 v.Aux = s 5820 v.AddArg(p) 5821 v0 := b.NewValue0(v.Line, OpAMD64SHLQconst, i.Type) 5822 v0.AuxInt = 2 5823 v0.AddArg(i) 5824 v.AddArg(v0) 5825 v1 := b.NewValue0(v.Line, OpAMD64MOVQconst, config.fe.TypeUInt64()) 5826 v1.AuxInt = ValAndOff(a).Val()&0xffffffff | ValAndOff(c).Val()<<32 5827 v.AddArg(v1) 5828 v.AddArg(mem) 5829 return true 5830 } 5831 return false 5832 } 5833 func rewriteValueAMD64_OpAMD64MOVLstoreidx1(v *Value, config *Config) bool { 5834 b := v.Block 5835 _ = b 5836 // match: (MOVLstoreidx1 [c] {sym} ptr (SHLQconst [2] idx) val mem) 5837 // cond: 5838 // result: (MOVLstoreidx4 [c] {sym} ptr idx val mem) 5839 for { 5840 c := v.AuxInt 5841 sym := v.Aux 5842 ptr := v.Args[0] 5843 v_1 := v.Args[1] 5844 if v_1.Op != OpAMD64SHLQconst { 5845 break 5846 } 5847 if v_1.AuxInt != 2 { 5848 break 5849 } 5850 idx := v_1.Args[0] 5851 val := v.Args[2] 5852 mem := v.Args[3] 5853 v.reset(OpAMD64MOVLstoreidx4) 5854 v.AuxInt = c 5855 v.Aux = sym 5856 v.AddArg(ptr) 5857 v.AddArg(idx) 5858 v.AddArg(val) 5859 v.AddArg(mem) 5860 return true 5861 } 5862 // match: (MOVLstoreidx1 [c] {sym} (ADDQconst [d] ptr) idx val mem) 5863 // cond: 5864 // result: (MOVLstoreidx1 [c+d] {sym} ptr idx val mem) 5865 for { 5866 c := v.AuxInt 5867 sym := v.Aux 5868 v_0 := v.Args[0] 5869 if v_0.Op != OpAMD64ADDQconst { 5870 break 5871 } 5872 d := v_0.AuxInt 5873 ptr := v_0.Args[0] 5874 idx := v.Args[1] 5875 val := v.Args[2] 5876 mem := v.Args[3] 5877 v.reset(OpAMD64MOVLstoreidx1) 5878 v.AuxInt = c + d 5879 v.Aux = sym 5880 v.AddArg(ptr) 5881 v.AddArg(idx) 5882 v.AddArg(val) 5883 v.AddArg(mem) 5884 return true 5885 } 5886 // match: (MOVLstoreidx1 [c] {sym} ptr (ADDQconst [d] idx) val mem) 5887 // cond: 5888 // result: (MOVLstoreidx1 [c+d] {sym} ptr idx val mem) 5889 for { 5890 c := v.AuxInt 5891 sym := v.Aux 5892 ptr := v.Args[0] 5893 v_1 := v.Args[1] 5894 if v_1.Op != OpAMD64ADDQconst { 5895 break 5896 } 5897 d := v_1.AuxInt 5898 idx := v_1.Args[0] 5899 val := v.Args[2] 5900 mem := v.Args[3] 5901 v.reset(OpAMD64MOVLstoreidx1) 5902 v.AuxInt = c + d 5903 v.Aux = sym 5904 v.AddArg(ptr) 5905 v.AddArg(idx) 5906 v.AddArg(val) 5907 v.AddArg(mem) 5908 return true 5909 } 5910 // match: (MOVLstoreidx1 [i] {s} p idx (SHRQconst [32] w) x:(MOVLstoreidx1 [i-4] {s} p idx w mem)) 5911 // cond: x.Uses == 1 && clobber(x) 5912 // result: (MOVQstoreidx1 [i-4] {s} p idx w mem) 5913 for { 5914 i := v.AuxInt 5915 s := v.Aux 5916 p := v.Args[0] 5917 idx := v.Args[1] 5918 v_2 := v.Args[2] 5919 if v_2.Op != OpAMD64SHRQconst { 5920 break 5921 } 5922 if v_2.AuxInt != 32 { 5923 break 5924 } 5925 w := v_2.Args[0] 5926 x := v.Args[3] 5927 if x.Op != OpAMD64MOVLstoreidx1 { 5928 break 5929 } 5930 if x.AuxInt != i-4 { 5931 break 5932 } 5933 if x.Aux != s { 5934 break 5935 } 5936 if p != x.Args[0] { 5937 break 5938 } 5939 if idx != x.Args[1] { 5940 break 5941 } 5942 if w != x.Args[2] { 5943 break 5944 } 5945 mem := x.Args[3] 5946 if !(x.Uses == 1 && clobber(x)) { 5947 break 5948 } 5949 v.reset(OpAMD64MOVQstoreidx1) 5950 v.AuxInt = i - 4 5951 v.Aux = s 5952 v.AddArg(p) 5953 v.AddArg(idx) 5954 v.AddArg(w) 5955 v.AddArg(mem) 5956 return true 5957 } 5958 // match: (MOVLstoreidx1 [i] {s} p idx (SHRQconst [j] w) x:(MOVLstoreidx1 [i-4] {s} p idx w0:(SHRQconst [j-32] w) mem)) 5959 // cond: x.Uses == 1 && clobber(x) 5960 // result: (MOVQstoreidx1 [i-4] {s} p idx w0 mem) 5961 for { 5962 i := v.AuxInt 5963 s := v.Aux 5964 p := v.Args[0] 5965 idx := v.Args[1] 5966 v_2 := v.Args[2] 5967 if v_2.Op != OpAMD64SHRQconst { 5968 break 5969 } 5970 j := v_2.AuxInt 5971 w := v_2.Args[0] 5972 x := v.Args[3] 5973 if x.Op != OpAMD64MOVLstoreidx1 { 5974 break 5975 } 5976 if x.AuxInt != i-4 { 5977 break 5978 } 5979 if x.Aux != s { 5980 break 5981 } 5982 if p != x.Args[0] { 5983 break 5984 } 5985 if idx != x.Args[1] { 5986 break 5987 } 5988 w0 := x.Args[2] 5989 if w0.Op != OpAMD64SHRQconst { 5990 break 5991 } 5992 if w0.AuxInt != j-32 { 5993 break 5994 } 5995 if w != w0.Args[0] { 5996 break 5997 } 5998 mem := x.Args[3] 5999 if !(x.Uses == 1 && clobber(x)) { 6000 break 6001 } 6002 v.reset(OpAMD64MOVQstoreidx1) 6003 v.AuxInt = i - 4 6004 v.Aux = s 6005 v.AddArg(p) 6006 v.AddArg(idx) 6007 v.AddArg(w0) 6008 v.AddArg(mem) 6009 return true 6010 } 6011 return false 6012 } 6013 func rewriteValueAMD64_OpAMD64MOVLstoreidx4(v *Value, config *Config) bool { 6014 b := v.Block 6015 _ = b 6016 // match: (MOVLstoreidx4 [c] {sym} (ADDQconst [d] ptr) idx val mem) 6017 // cond: 6018 // result: (MOVLstoreidx4 [c+d] {sym} ptr idx val mem) 6019 for { 6020 c := v.AuxInt 6021 sym := v.Aux 6022 v_0 := v.Args[0] 6023 if v_0.Op != OpAMD64ADDQconst { 6024 break 6025 } 6026 d := v_0.AuxInt 6027 ptr := v_0.Args[0] 6028 idx := v.Args[1] 6029 val := v.Args[2] 6030 mem := v.Args[3] 6031 v.reset(OpAMD64MOVLstoreidx4) 6032 v.AuxInt = c + d 6033 v.Aux = sym 6034 v.AddArg(ptr) 6035 v.AddArg(idx) 6036 v.AddArg(val) 6037 v.AddArg(mem) 6038 return true 6039 } 6040 // match: (MOVLstoreidx4 [c] {sym} ptr (ADDQconst [d] idx) val mem) 6041 // cond: 6042 // result: (MOVLstoreidx4 [c+4*d] {sym} ptr idx val mem) 6043 for { 6044 c := v.AuxInt 6045 sym := v.Aux 6046 ptr := v.Args[0] 6047 v_1 := v.Args[1] 6048 if v_1.Op != OpAMD64ADDQconst { 6049 break 6050 } 6051 d := v_1.AuxInt 6052 idx := v_1.Args[0] 6053 val := v.Args[2] 6054 mem := v.Args[3] 6055 v.reset(OpAMD64MOVLstoreidx4) 6056 v.AuxInt = c + 4*d 6057 v.Aux = sym 6058 v.AddArg(ptr) 6059 v.AddArg(idx) 6060 v.AddArg(val) 6061 v.AddArg(mem) 6062 return true 6063 } 6064 // match: (MOVLstoreidx4 [i] {s} p idx (SHRQconst [32] w) x:(MOVLstoreidx4 [i-4] {s} p idx w mem)) 6065 // cond: x.Uses == 1 && clobber(x) 6066 // result: (MOVQstoreidx1 [i-4] {s} p (SHLQconst <idx.Type> [2] idx) w mem) 6067 for { 6068 i := v.AuxInt 6069 s := v.Aux 6070 p := v.Args[0] 6071 idx := v.Args[1] 6072 v_2 := v.Args[2] 6073 if v_2.Op != OpAMD64SHRQconst { 6074 break 6075 } 6076 if v_2.AuxInt != 32 { 6077 break 6078 } 6079 w := v_2.Args[0] 6080 x := v.Args[3] 6081 if x.Op != OpAMD64MOVLstoreidx4 { 6082 break 6083 } 6084 if x.AuxInt != i-4 { 6085 break 6086 } 6087 if x.Aux != s { 6088 break 6089 } 6090 if p != x.Args[0] { 6091 break 6092 } 6093 if idx != x.Args[1] { 6094 break 6095 } 6096 if w != x.Args[2] { 6097 break 6098 } 6099 mem := x.Args[3] 6100 if !(x.Uses == 1 && clobber(x)) { 6101 break 6102 } 6103 v.reset(OpAMD64MOVQstoreidx1) 6104 v.AuxInt = i - 4 6105 v.Aux = s 6106 v.AddArg(p) 6107 v0 := b.NewValue0(v.Line, OpAMD64SHLQconst, idx.Type) 6108 v0.AuxInt = 2 6109 v0.AddArg(idx) 6110 v.AddArg(v0) 6111 v.AddArg(w) 6112 v.AddArg(mem) 6113 return true 6114 } 6115 // match: (MOVLstoreidx4 [i] {s} p idx (SHRQconst [j] w) x:(MOVLstoreidx4 [i-4] {s} p idx w0:(SHRQconst [j-32] w) mem)) 6116 // cond: x.Uses == 1 && clobber(x) 6117 // result: (MOVQstoreidx1 [i-4] {s} p (SHLQconst <idx.Type> [2] idx) w0 mem) 6118 for { 6119 i := v.AuxInt 6120 s := v.Aux 6121 p := v.Args[0] 6122 idx := v.Args[1] 6123 v_2 := v.Args[2] 6124 if v_2.Op != OpAMD64SHRQconst { 6125 break 6126 } 6127 j := v_2.AuxInt 6128 w := v_2.Args[0] 6129 x := v.Args[3] 6130 if x.Op != OpAMD64MOVLstoreidx4 { 6131 break 6132 } 6133 if x.AuxInt != i-4 { 6134 break 6135 } 6136 if x.Aux != s { 6137 break 6138 } 6139 if p != x.Args[0] { 6140 break 6141 } 6142 if idx != x.Args[1] { 6143 break 6144 } 6145 w0 := x.Args[2] 6146 if w0.Op != OpAMD64SHRQconst { 6147 break 6148 } 6149 if w0.AuxInt != j-32 { 6150 break 6151 } 6152 if w != w0.Args[0] { 6153 break 6154 } 6155 mem := x.Args[3] 6156 if !(x.Uses == 1 && clobber(x)) { 6157 break 6158 } 6159 v.reset(OpAMD64MOVQstoreidx1) 6160 v.AuxInt = i - 4 6161 v.Aux = s 6162 v.AddArg(p) 6163 v0 := b.NewValue0(v.Line, OpAMD64SHLQconst, idx.Type) 6164 v0.AuxInt = 2 6165 v0.AddArg(idx) 6166 v.AddArg(v0) 6167 v.AddArg(w0) 6168 v.AddArg(mem) 6169 return true 6170 } 6171 return false 6172 } 6173 func rewriteValueAMD64_OpAMD64MOVOload(v *Value, config *Config) bool { 6174 b := v.Block 6175 _ = b 6176 // match: (MOVOload [off1] {sym} (ADDQconst [off2] ptr) mem) 6177 // cond: is32Bit(off1+off2) 6178 // result: (MOVOload [off1+off2] {sym} ptr mem) 6179 for { 6180 off1 := v.AuxInt 6181 sym := v.Aux 6182 v_0 := v.Args[0] 6183 if v_0.Op != OpAMD64ADDQconst { 6184 break 6185 } 6186 off2 := v_0.AuxInt 6187 ptr := v_0.Args[0] 6188 mem := v.Args[1] 6189 if !(is32Bit(off1 + off2)) { 6190 break 6191 } 6192 v.reset(OpAMD64MOVOload) 6193 v.AuxInt = off1 + off2 6194 v.Aux = sym 6195 v.AddArg(ptr) 6196 v.AddArg(mem) 6197 return true 6198 } 6199 // match: (MOVOload [off1] {sym1} (LEAQ [off2] {sym2} base) mem) 6200 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 6201 // result: (MOVOload [off1+off2] {mergeSym(sym1,sym2)} base mem) 6202 for { 6203 off1 := v.AuxInt 6204 sym1 := v.Aux 6205 v_0 := v.Args[0] 6206 if v_0.Op != OpAMD64LEAQ { 6207 break 6208 } 6209 off2 := v_0.AuxInt 6210 sym2 := v_0.Aux 6211 base := v_0.Args[0] 6212 mem := v.Args[1] 6213 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 6214 break 6215 } 6216 v.reset(OpAMD64MOVOload) 6217 v.AuxInt = off1 + off2 6218 v.Aux = mergeSym(sym1, sym2) 6219 v.AddArg(base) 6220 v.AddArg(mem) 6221 return true 6222 } 6223 return false 6224 } 6225 func rewriteValueAMD64_OpAMD64MOVOstore(v *Value, config *Config) bool { 6226 b := v.Block 6227 _ = b 6228 // match: (MOVOstore [off1] {sym} (ADDQconst [off2] ptr) val mem) 6229 // cond: is32Bit(off1+off2) 6230 // result: (MOVOstore [off1+off2] {sym} ptr val mem) 6231 for { 6232 off1 := v.AuxInt 6233 sym := v.Aux 6234 v_0 := v.Args[0] 6235 if v_0.Op != OpAMD64ADDQconst { 6236 break 6237 } 6238 off2 := v_0.AuxInt 6239 ptr := v_0.Args[0] 6240 val := v.Args[1] 6241 mem := v.Args[2] 6242 if !(is32Bit(off1 + off2)) { 6243 break 6244 } 6245 v.reset(OpAMD64MOVOstore) 6246 v.AuxInt = off1 + off2 6247 v.Aux = sym 6248 v.AddArg(ptr) 6249 v.AddArg(val) 6250 v.AddArg(mem) 6251 return true 6252 } 6253 // match: (MOVOstore [off1] {sym1} (LEAQ [off2] {sym2} base) val mem) 6254 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 6255 // result: (MOVOstore [off1+off2] {mergeSym(sym1,sym2)} base val mem) 6256 for { 6257 off1 := v.AuxInt 6258 sym1 := v.Aux 6259 v_0 := v.Args[0] 6260 if v_0.Op != OpAMD64LEAQ { 6261 break 6262 } 6263 off2 := v_0.AuxInt 6264 sym2 := v_0.Aux 6265 base := v_0.Args[0] 6266 val := v.Args[1] 6267 mem := v.Args[2] 6268 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 6269 break 6270 } 6271 v.reset(OpAMD64MOVOstore) 6272 v.AuxInt = off1 + off2 6273 v.Aux = mergeSym(sym1, sym2) 6274 v.AddArg(base) 6275 v.AddArg(val) 6276 v.AddArg(mem) 6277 return true 6278 } 6279 return false 6280 } 6281 func rewriteValueAMD64_OpAMD64MOVQatomicload(v *Value, config *Config) bool { 6282 b := v.Block 6283 _ = b 6284 // match: (MOVQatomicload [off1] {sym} (ADDQconst [off2] ptr) mem) 6285 // cond: is32Bit(off1+off2) 6286 // result: (MOVQatomicload [off1+off2] {sym} ptr mem) 6287 for { 6288 off1 := v.AuxInt 6289 sym := v.Aux 6290 v_0 := v.Args[0] 6291 if v_0.Op != OpAMD64ADDQconst { 6292 break 6293 } 6294 off2 := v_0.AuxInt 6295 ptr := v_0.Args[0] 6296 mem := v.Args[1] 6297 if !(is32Bit(off1 + off2)) { 6298 break 6299 } 6300 v.reset(OpAMD64MOVQatomicload) 6301 v.AuxInt = off1 + off2 6302 v.Aux = sym 6303 v.AddArg(ptr) 6304 v.AddArg(mem) 6305 return true 6306 } 6307 // match: (MOVQatomicload [off1] {sym1} (LEAQ [off2] {sym2} ptr) mem) 6308 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 6309 // result: (MOVQatomicload [off1+off2] {mergeSym(sym1,sym2)} ptr mem) 6310 for { 6311 off1 := v.AuxInt 6312 sym1 := v.Aux 6313 v_0 := v.Args[0] 6314 if v_0.Op != OpAMD64LEAQ { 6315 break 6316 } 6317 off2 := v_0.AuxInt 6318 sym2 := v_0.Aux 6319 ptr := v_0.Args[0] 6320 mem := v.Args[1] 6321 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 6322 break 6323 } 6324 v.reset(OpAMD64MOVQatomicload) 6325 v.AuxInt = off1 + off2 6326 v.Aux = mergeSym(sym1, sym2) 6327 v.AddArg(ptr) 6328 v.AddArg(mem) 6329 return true 6330 } 6331 return false 6332 } 6333 func rewriteValueAMD64_OpAMD64MOVQload(v *Value, config *Config) bool { 6334 b := v.Block 6335 _ = b 6336 // match: (MOVQload [off] {sym} ptr (MOVQstore [off2] {sym2} ptr2 x _)) 6337 // cond: sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) 6338 // result: x 6339 for { 6340 off := v.AuxInt 6341 sym := v.Aux 6342 ptr := v.Args[0] 6343 v_1 := v.Args[1] 6344 if v_1.Op != OpAMD64MOVQstore { 6345 break 6346 } 6347 off2 := v_1.AuxInt 6348 sym2 := v_1.Aux 6349 ptr2 := v_1.Args[0] 6350 x := v_1.Args[1] 6351 if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) { 6352 break 6353 } 6354 v.reset(OpCopy) 6355 v.Type = x.Type 6356 v.AddArg(x) 6357 return true 6358 } 6359 // match: (MOVQload [off1] {sym} (ADDQconst [off2] ptr) mem) 6360 // cond: is32Bit(off1+off2) 6361 // result: (MOVQload [off1+off2] {sym} ptr mem) 6362 for { 6363 off1 := v.AuxInt 6364 sym := v.Aux 6365 v_0 := v.Args[0] 6366 if v_0.Op != OpAMD64ADDQconst { 6367 break 6368 } 6369 off2 := v_0.AuxInt 6370 ptr := v_0.Args[0] 6371 mem := v.Args[1] 6372 if !(is32Bit(off1 + off2)) { 6373 break 6374 } 6375 v.reset(OpAMD64MOVQload) 6376 v.AuxInt = off1 + off2 6377 v.Aux = sym 6378 v.AddArg(ptr) 6379 v.AddArg(mem) 6380 return true 6381 } 6382 // match: (MOVQload [off1] {sym1} (LEAQ [off2] {sym2} base) mem) 6383 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 6384 // result: (MOVQload [off1+off2] {mergeSym(sym1,sym2)} base mem) 6385 for { 6386 off1 := v.AuxInt 6387 sym1 := v.Aux 6388 v_0 := v.Args[0] 6389 if v_0.Op != OpAMD64LEAQ { 6390 break 6391 } 6392 off2 := v_0.AuxInt 6393 sym2 := v_0.Aux 6394 base := v_0.Args[0] 6395 mem := v.Args[1] 6396 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 6397 break 6398 } 6399 v.reset(OpAMD64MOVQload) 6400 v.AuxInt = off1 + off2 6401 v.Aux = mergeSym(sym1, sym2) 6402 v.AddArg(base) 6403 v.AddArg(mem) 6404 return true 6405 } 6406 // match: (MOVQload [off1] {sym1} (LEAQ1 [off2] {sym2} ptr idx) mem) 6407 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 6408 // result: (MOVQloadidx1 [off1+off2] {mergeSym(sym1,sym2)} ptr idx mem) 6409 for { 6410 off1 := v.AuxInt 6411 sym1 := v.Aux 6412 v_0 := v.Args[0] 6413 if v_0.Op != OpAMD64LEAQ1 { 6414 break 6415 } 6416 off2 := v_0.AuxInt 6417 sym2 := v_0.Aux 6418 ptr := v_0.Args[0] 6419 idx := v_0.Args[1] 6420 mem := v.Args[1] 6421 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 6422 break 6423 } 6424 v.reset(OpAMD64MOVQloadidx1) 6425 v.AuxInt = off1 + off2 6426 v.Aux = mergeSym(sym1, sym2) 6427 v.AddArg(ptr) 6428 v.AddArg(idx) 6429 v.AddArg(mem) 6430 return true 6431 } 6432 // match: (MOVQload [off1] {sym1} (LEAQ8 [off2] {sym2} ptr idx) mem) 6433 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 6434 // result: (MOVQloadidx8 [off1+off2] {mergeSym(sym1,sym2)} ptr idx mem) 6435 for { 6436 off1 := v.AuxInt 6437 sym1 := v.Aux 6438 v_0 := v.Args[0] 6439 if v_0.Op != OpAMD64LEAQ8 { 6440 break 6441 } 6442 off2 := v_0.AuxInt 6443 sym2 := v_0.Aux 6444 ptr := v_0.Args[0] 6445 idx := v_0.Args[1] 6446 mem := v.Args[1] 6447 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 6448 break 6449 } 6450 v.reset(OpAMD64MOVQloadidx8) 6451 v.AuxInt = off1 + off2 6452 v.Aux = mergeSym(sym1, sym2) 6453 v.AddArg(ptr) 6454 v.AddArg(idx) 6455 v.AddArg(mem) 6456 return true 6457 } 6458 // match: (MOVQload [off] {sym} (ADDQ ptr idx) mem) 6459 // cond: ptr.Op != OpSB 6460 // result: (MOVQloadidx1 [off] {sym} ptr idx mem) 6461 for { 6462 off := v.AuxInt 6463 sym := v.Aux 6464 v_0 := v.Args[0] 6465 if v_0.Op != OpAMD64ADDQ { 6466 break 6467 } 6468 ptr := v_0.Args[0] 6469 idx := v_0.Args[1] 6470 mem := v.Args[1] 6471 if !(ptr.Op != OpSB) { 6472 break 6473 } 6474 v.reset(OpAMD64MOVQloadidx1) 6475 v.AuxInt = off 6476 v.Aux = sym 6477 v.AddArg(ptr) 6478 v.AddArg(idx) 6479 v.AddArg(mem) 6480 return true 6481 } 6482 // match: (MOVQload [off1] {sym1} (LEAL [off2] {sym2} base) mem) 6483 // cond: canMergeSym(sym1, sym2) 6484 // result: (MOVQload [off1+off2] {mergeSym(sym1,sym2)} base mem) 6485 for { 6486 off1 := v.AuxInt 6487 sym1 := v.Aux 6488 v_0 := v.Args[0] 6489 if v_0.Op != OpAMD64LEAL { 6490 break 6491 } 6492 off2 := v_0.AuxInt 6493 sym2 := v_0.Aux 6494 base := v_0.Args[0] 6495 mem := v.Args[1] 6496 if !(canMergeSym(sym1, sym2)) { 6497 break 6498 } 6499 v.reset(OpAMD64MOVQload) 6500 v.AuxInt = off1 + off2 6501 v.Aux = mergeSym(sym1, sym2) 6502 v.AddArg(base) 6503 v.AddArg(mem) 6504 return true 6505 } 6506 // match: (MOVQload [off1] {sym} (ADDLconst [off2] ptr) mem) 6507 // cond: is32Bit(off1+off2) 6508 // result: (MOVQload [off1+off2] {sym} ptr mem) 6509 for { 6510 off1 := v.AuxInt 6511 sym := v.Aux 6512 v_0 := v.Args[0] 6513 if v_0.Op != OpAMD64ADDLconst { 6514 break 6515 } 6516 off2 := v_0.AuxInt 6517 ptr := v_0.Args[0] 6518 mem := v.Args[1] 6519 if !(is32Bit(off1 + off2)) { 6520 break 6521 } 6522 v.reset(OpAMD64MOVQload) 6523 v.AuxInt = off1 + off2 6524 v.Aux = sym 6525 v.AddArg(ptr) 6526 v.AddArg(mem) 6527 return true 6528 } 6529 return false 6530 } 6531 func rewriteValueAMD64_OpAMD64MOVQloadidx1(v *Value, config *Config) bool { 6532 b := v.Block 6533 _ = b 6534 // match: (MOVQloadidx1 [c] {sym} ptr (SHLQconst [3] idx) mem) 6535 // cond: 6536 // result: (MOVQloadidx8 [c] {sym} ptr idx mem) 6537 for { 6538 c := v.AuxInt 6539 sym := v.Aux 6540 ptr := v.Args[0] 6541 v_1 := v.Args[1] 6542 if v_1.Op != OpAMD64SHLQconst { 6543 break 6544 } 6545 if v_1.AuxInt != 3 { 6546 break 6547 } 6548 idx := v_1.Args[0] 6549 mem := v.Args[2] 6550 v.reset(OpAMD64MOVQloadidx8) 6551 v.AuxInt = c 6552 v.Aux = sym 6553 v.AddArg(ptr) 6554 v.AddArg(idx) 6555 v.AddArg(mem) 6556 return true 6557 } 6558 // match: (MOVQloadidx1 [c] {sym} (ADDQconst [d] ptr) idx mem) 6559 // cond: 6560 // result: (MOVQloadidx1 [c+d] {sym} ptr idx mem) 6561 for { 6562 c := v.AuxInt 6563 sym := v.Aux 6564 v_0 := v.Args[0] 6565 if v_0.Op != OpAMD64ADDQconst { 6566 break 6567 } 6568 d := v_0.AuxInt 6569 ptr := v_0.Args[0] 6570 idx := v.Args[1] 6571 mem := v.Args[2] 6572 v.reset(OpAMD64MOVQloadidx1) 6573 v.AuxInt = c + d 6574 v.Aux = sym 6575 v.AddArg(ptr) 6576 v.AddArg(idx) 6577 v.AddArg(mem) 6578 return true 6579 } 6580 // match: (MOVQloadidx1 [c] {sym} ptr (ADDQconst [d] idx) mem) 6581 // cond: 6582 // result: (MOVQloadidx1 [c+d] {sym} ptr idx mem) 6583 for { 6584 c := v.AuxInt 6585 sym := v.Aux 6586 ptr := v.Args[0] 6587 v_1 := v.Args[1] 6588 if v_1.Op != OpAMD64ADDQconst { 6589 break 6590 } 6591 d := v_1.AuxInt 6592 idx := v_1.Args[0] 6593 mem := v.Args[2] 6594 v.reset(OpAMD64MOVQloadidx1) 6595 v.AuxInt = c + d 6596 v.Aux = sym 6597 v.AddArg(ptr) 6598 v.AddArg(idx) 6599 v.AddArg(mem) 6600 return true 6601 } 6602 return false 6603 } 6604 func rewriteValueAMD64_OpAMD64MOVQloadidx8(v *Value, config *Config) bool { 6605 b := v.Block 6606 _ = b 6607 // match: (MOVQloadidx8 [c] {sym} (ADDQconst [d] ptr) idx mem) 6608 // cond: 6609 // result: (MOVQloadidx8 [c+d] {sym} ptr idx mem) 6610 for { 6611 c := v.AuxInt 6612 sym := v.Aux 6613 v_0 := v.Args[0] 6614 if v_0.Op != OpAMD64ADDQconst { 6615 break 6616 } 6617 d := v_0.AuxInt 6618 ptr := v_0.Args[0] 6619 idx := v.Args[1] 6620 mem := v.Args[2] 6621 v.reset(OpAMD64MOVQloadidx8) 6622 v.AuxInt = c + d 6623 v.Aux = sym 6624 v.AddArg(ptr) 6625 v.AddArg(idx) 6626 v.AddArg(mem) 6627 return true 6628 } 6629 // match: (MOVQloadidx8 [c] {sym} ptr (ADDQconst [d] idx) mem) 6630 // cond: 6631 // result: (MOVQloadidx8 [c+8*d] {sym} ptr idx mem) 6632 for { 6633 c := v.AuxInt 6634 sym := v.Aux 6635 ptr := v.Args[0] 6636 v_1 := v.Args[1] 6637 if v_1.Op != OpAMD64ADDQconst { 6638 break 6639 } 6640 d := v_1.AuxInt 6641 idx := v_1.Args[0] 6642 mem := v.Args[2] 6643 v.reset(OpAMD64MOVQloadidx8) 6644 v.AuxInt = c + 8*d 6645 v.Aux = sym 6646 v.AddArg(ptr) 6647 v.AddArg(idx) 6648 v.AddArg(mem) 6649 return true 6650 } 6651 return false 6652 } 6653 func rewriteValueAMD64_OpAMD64MOVQstore(v *Value, config *Config) bool { 6654 b := v.Block 6655 _ = b 6656 // match: (MOVQstore [off1] {sym} (ADDQconst [off2] ptr) val mem) 6657 // cond: is32Bit(off1+off2) 6658 // result: (MOVQstore [off1+off2] {sym} ptr val mem) 6659 for { 6660 off1 := v.AuxInt 6661 sym := v.Aux 6662 v_0 := v.Args[0] 6663 if v_0.Op != OpAMD64ADDQconst { 6664 break 6665 } 6666 off2 := v_0.AuxInt 6667 ptr := v_0.Args[0] 6668 val := v.Args[1] 6669 mem := v.Args[2] 6670 if !(is32Bit(off1 + off2)) { 6671 break 6672 } 6673 v.reset(OpAMD64MOVQstore) 6674 v.AuxInt = off1 + off2 6675 v.Aux = sym 6676 v.AddArg(ptr) 6677 v.AddArg(val) 6678 v.AddArg(mem) 6679 return true 6680 } 6681 // match: (MOVQstore [off] {sym} ptr (MOVQconst [c]) mem) 6682 // cond: validValAndOff(c,off) 6683 // result: (MOVQstoreconst [makeValAndOff(c,off)] {sym} ptr mem) 6684 for { 6685 off := v.AuxInt 6686 sym := v.Aux 6687 ptr := v.Args[0] 6688 v_1 := v.Args[1] 6689 if v_1.Op != OpAMD64MOVQconst { 6690 break 6691 } 6692 c := v_1.AuxInt 6693 mem := v.Args[2] 6694 if !(validValAndOff(c, off)) { 6695 break 6696 } 6697 v.reset(OpAMD64MOVQstoreconst) 6698 v.AuxInt = makeValAndOff(c, off) 6699 v.Aux = sym 6700 v.AddArg(ptr) 6701 v.AddArg(mem) 6702 return true 6703 } 6704 // match: (MOVQstore [off1] {sym1} (LEAQ [off2] {sym2} base) val mem) 6705 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 6706 // result: (MOVQstore [off1+off2] {mergeSym(sym1,sym2)} base val mem) 6707 for { 6708 off1 := v.AuxInt 6709 sym1 := v.Aux 6710 v_0 := v.Args[0] 6711 if v_0.Op != OpAMD64LEAQ { 6712 break 6713 } 6714 off2 := v_0.AuxInt 6715 sym2 := v_0.Aux 6716 base := v_0.Args[0] 6717 val := v.Args[1] 6718 mem := v.Args[2] 6719 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 6720 break 6721 } 6722 v.reset(OpAMD64MOVQstore) 6723 v.AuxInt = off1 + off2 6724 v.Aux = mergeSym(sym1, sym2) 6725 v.AddArg(base) 6726 v.AddArg(val) 6727 v.AddArg(mem) 6728 return true 6729 } 6730 // match: (MOVQstore [off1] {sym1} (LEAQ1 [off2] {sym2} ptr idx) val mem) 6731 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 6732 // result: (MOVQstoreidx1 [off1+off2] {mergeSym(sym1,sym2)} ptr idx val mem) 6733 for { 6734 off1 := v.AuxInt 6735 sym1 := v.Aux 6736 v_0 := v.Args[0] 6737 if v_0.Op != OpAMD64LEAQ1 { 6738 break 6739 } 6740 off2 := v_0.AuxInt 6741 sym2 := v_0.Aux 6742 ptr := v_0.Args[0] 6743 idx := v_0.Args[1] 6744 val := v.Args[1] 6745 mem := v.Args[2] 6746 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 6747 break 6748 } 6749 v.reset(OpAMD64MOVQstoreidx1) 6750 v.AuxInt = off1 + off2 6751 v.Aux = mergeSym(sym1, sym2) 6752 v.AddArg(ptr) 6753 v.AddArg(idx) 6754 v.AddArg(val) 6755 v.AddArg(mem) 6756 return true 6757 } 6758 // match: (MOVQstore [off1] {sym1} (LEAQ8 [off2] {sym2} ptr idx) val mem) 6759 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 6760 // result: (MOVQstoreidx8 [off1+off2] {mergeSym(sym1,sym2)} ptr idx val mem) 6761 for { 6762 off1 := v.AuxInt 6763 sym1 := v.Aux 6764 v_0 := v.Args[0] 6765 if v_0.Op != OpAMD64LEAQ8 { 6766 break 6767 } 6768 off2 := v_0.AuxInt 6769 sym2 := v_0.Aux 6770 ptr := v_0.Args[0] 6771 idx := v_0.Args[1] 6772 val := v.Args[1] 6773 mem := v.Args[2] 6774 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 6775 break 6776 } 6777 v.reset(OpAMD64MOVQstoreidx8) 6778 v.AuxInt = off1 + off2 6779 v.Aux = mergeSym(sym1, sym2) 6780 v.AddArg(ptr) 6781 v.AddArg(idx) 6782 v.AddArg(val) 6783 v.AddArg(mem) 6784 return true 6785 } 6786 // match: (MOVQstore [off] {sym} (ADDQ ptr idx) val mem) 6787 // cond: ptr.Op != OpSB 6788 // result: (MOVQstoreidx1 [off] {sym} ptr idx val mem) 6789 for { 6790 off := v.AuxInt 6791 sym := v.Aux 6792 v_0 := v.Args[0] 6793 if v_0.Op != OpAMD64ADDQ { 6794 break 6795 } 6796 ptr := v_0.Args[0] 6797 idx := v_0.Args[1] 6798 val := v.Args[1] 6799 mem := v.Args[2] 6800 if !(ptr.Op != OpSB) { 6801 break 6802 } 6803 v.reset(OpAMD64MOVQstoreidx1) 6804 v.AuxInt = off 6805 v.Aux = sym 6806 v.AddArg(ptr) 6807 v.AddArg(idx) 6808 v.AddArg(val) 6809 v.AddArg(mem) 6810 return true 6811 } 6812 // match: (MOVQstore [off1] {sym1} (LEAL [off2] {sym2} base) val mem) 6813 // cond: canMergeSym(sym1, sym2) 6814 // result: (MOVQstore [off1+off2] {mergeSym(sym1,sym2)} base val mem) 6815 for { 6816 off1 := v.AuxInt 6817 sym1 := v.Aux 6818 v_0 := v.Args[0] 6819 if v_0.Op != OpAMD64LEAL { 6820 break 6821 } 6822 off2 := v_0.AuxInt 6823 sym2 := v_0.Aux 6824 base := v_0.Args[0] 6825 val := v.Args[1] 6826 mem := v.Args[2] 6827 if !(canMergeSym(sym1, sym2)) { 6828 break 6829 } 6830 v.reset(OpAMD64MOVQstore) 6831 v.AuxInt = off1 + off2 6832 v.Aux = mergeSym(sym1, sym2) 6833 v.AddArg(base) 6834 v.AddArg(val) 6835 v.AddArg(mem) 6836 return true 6837 } 6838 // match: (MOVQstore [off1] {sym} (ADDLconst [off2] ptr) val mem) 6839 // cond: is32Bit(off1+off2) 6840 // result: (MOVQstore [off1+off2] {sym} ptr val mem) 6841 for { 6842 off1 := v.AuxInt 6843 sym := v.Aux 6844 v_0 := v.Args[0] 6845 if v_0.Op != OpAMD64ADDLconst { 6846 break 6847 } 6848 off2 := v_0.AuxInt 6849 ptr := v_0.Args[0] 6850 val := v.Args[1] 6851 mem := v.Args[2] 6852 if !(is32Bit(off1 + off2)) { 6853 break 6854 } 6855 v.reset(OpAMD64MOVQstore) 6856 v.AuxInt = off1 + off2 6857 v.Aux = sym 6858 v.AddArg(ptr) 6859 v.AddArg(val) 6860 v.AddArg(mem) 6861 return true 6862 } 6863 return false 6864 } 6865 func rewriteValueAMD64_OpAMD64MOVQstoreconst(v *Value, config *Config) bool { 6866 b := v.Block 6867 _ = b 6868 // match: (MOVQstoreconst [sc] {s} (ADDQconst [off] ptr) mem) 6869 // cond: ValAndOff(sc).canAdd(off) 6870 // result: (MOVQstoreconst [ValAndOff(sc).add(off)] {s} ptr mem) 6871 for { 6872 sc := v.AuxInt 6873 s := v.Aux 6874 v_0 := v.Args[0] 6875 if v_0.Op != OpAMD64ADDQconst { 6876 break 6877 } 6878 off := v_0.AuxInt 6879 ptr := v_0.Args[0] 6880 mem := v.Args[1] 6881 if !(ValAndOff(sc).canAdd(off)) { 6882 break 6883 } 6884 v.reset(OpAMD64MOVQstoreconst) 6885 v.AuxInt = ValAndOff(sc).add(off) 6886 v.Aux = s 6887 v.AddArg(ptr) 6888 v.AddArg(mem) 6889 return true 6890 } 6891 // match: (MOVQstoreconst [sc] {sym1} (LEAQ [off] {sym2} ptr) mem) 6892 // cond: canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off) 6893 // result: (MOVQstoreconst [ValAndOff(sc).add(off)] {mergeSym(sym1, sym2)} ptr mem) 6894 for { 6895 sc := v.AuxInt 6896 sym1 := v.Aux 6897 v_0 := v.Args[0] 6898 if v_0.Op != OpAMD64LEAQ { 6899 break 6900 } 6901 off := v_0.AuxInt 6902 sym2 := v_0.Aux 6903 ptr := v_0.Args[0] 6904 mem := v.Args[1] 6905 if !(canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off)) { 6906 break 6907 } 6908 v.reset(OpAMD64MOVQstoreconst) 6909 v.AuxInt = ValAndOff(sc).add(off) 6910 v.Aux = mergeSym(sym1, sym2) 6911 v.AddArg(ptr) 6912 v.AddArg(mem) 6913 return true 6914 } 6915 // match: (MOVQstoreconst [x] {sym1} (LEAQ1 [off] {sym2} ptr idx) mem) 6916 // cond: canMergeSym(sym1, sym2) 6917 // result: (MOVQstoreconstidx1 [ValAndOff(x).add(off)] {mergeSym(sym1,sym2)} ptr idx mem) 6918 for { 6919 x := v.AuxInt 6920 sym1 := v.Aux 6921 v_0 := v.Args[0] 6922 if v_0.Op != OpAMD64LEAQ1 { 6923 break 6924 } 6925 off := v_0.AuxInt 6926 sym2 := v_0.Aux 6927 ptr := v_0.Args[0] 6928 idx := v_0.Args[1] 6929 mem := v.Args[1] 6930 if !(canMergeSym(sym1, sym2)) { 6931 break 6932 } 6933 v.reset(OpAMD64MOVQstoreconstidx1) 6934 v.AuxInt = ValAndOff(x).add(off) 6935 v.Aux = mergeSym(sym1, sym2) 6936 v.AddArg(ptr) 6937 v.AddArg(idx) 6938 v.AddArg(mem) 6939 return true 6940 } 6941 // match: (MOVQstoreconst [x] {sym1} (LEAQ8 [off] {sym2} ptr idx) mem) 6942 // cond: canMergeSym(sym1, sym2) 6943 // result: (MOVQstoreconstidx8 [ValAndOff(x).add(off)] {mergeSym(sym1,sym2)} ptr idx mem) 6944 for { 6945 x := v.AuxInt 6946 sym1 := v.Aux 6947 v_0 := v.Args[0] 6948 if v_0.Op != OpAMD64LEAQ8 { 6949 break 6950 } 6951 off := v_0.AuxInt 6952 sym2 := v_0.Aux 6953 ptr := v_0.Args[0] 6954 idx := v_0.Args[1] 6955 mem := v.Args[1] 6956 if !(canMergeSym(sym1, sym2)) { 6957 break 6958 } 6959 v.reset(OpAMD64MOVQstoreconstidx8) 6960 v.AuxInt = ValAndOff(x).add(off) 6961 v.Aux = mergeSym(sym1, sym2) 6962 v.AddArg(ptr) 6963 v.AddArg(idx) 6964 v.AddArg(mem) 6965 return true 6966 } 6967 // match: (MOVQstoreconst [x] {sym} (ADDQ ptr idx) mem) 6968 // cond: 6969 // result: (MOVQstoreconstidx1 [x] {sym} ptr idx mem) 6970 for { 6971 x := v.AuxInt 6972 sym := v.Aux 6973 v_0 := v.Args[0] 6974 if v_0.Op != OpAMD64ADDQ { 6975 break 6976 } 6977 ptr := v_0.Args[0] 6978 idx := v_0.Args[1] 6979 mem := v.Args[1] 6980 v.reset(OpAMD64MOVQstoreconstidx1) 6981 v.AuxInt = x 6982 v.Aux = sym 6983 v.AddArg(ptr) 6984 v.AddArg(idx) 6985 v.AddArg(mem) 6986 return true 6987 } 6988 // match: (MOVQstoreconst [sc] {sym1} (LEAL [off] {sym2} ptr) mem) 6989 // cond: canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off) 6990 // result: (MOVQstoreconst [ValAndOff(sc).add(off)] {mergeSym(sym1, sym2)} ptr mem) 6991 for { 6992 sc := v.AuxInt 6993 sym1 := v.Aux 6994 v_0 := v.Args[0] 6995 if v_0.Op != OpAMD64LEAL { 6996 break 6997 } 6998 off := v_0.AuxInt 6999 sym2 := v_0.Aux 7000 ptr := v_0.Args[0] 7001 mem := v.Args[1] 7002 if !(canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off)) { 7003 break 7004 } 7005 v.reset(OpAMD64MOVQstoreconst) 7006 v.AuxInt = ValAndOff(sc).add(off) 7007 v.Aux = mergeSym(sym1, sym2) 7008 v.AddArg(ptr) 7009 v.AddArg(mem) 7010 return true 7011 } 7012 // match: (MOVQstoreconst [sc] {s} (ADDLconst [off] ptr) mem) 7013 // cond: ValAndOff(sc).canAdd(off) 7014 // result: (MOVQstoreconst [ValAndOff(sc).add(off)] {s} ptr mem) 7015 for { 7016 sc := v.AuxInt 7017 s := v.Aux 7018 v_0 := v.Args[0] 7019 if v_0.Op != OpAMD64ADDLconst { 7020 break 7021 } 7022 off := v_0.AuxInt 7023 ptr := v_0.Args[0] 7024 mem := v.Args[1] 7025 if !(ValAndOff(sc).canAdd(off)) { 7026 break 7027 } 7028 v.reset(OpAMD64MOVQstoreconst) 7029 v.AuxInt = ValAndOff(sc).add(off) 7030 v.Aux = s 7031 v.AddArg(ptr) 7032 v.AddArg(mem) 7033 return true 7034 } 7035 return false 7036 } 7037 func rewriteValueAMD64_OpAMD64MOVQstoreconstidx1(v *Value, config *Config) bool { 7038 b := v.Block 7039 _ = b 7040 // match: (MOVQstoreconstidx1 [c] {sym} ptr (SHLQconst [3] idx) mem) 7041 // cond: 7042 // result: (MOVQstoreconstidx8 [c] {sym} ptr idx mem) 7043 for { 7044 c := v.AuxInt 7045 sym := v.Aux 7046 ptr := v.Args[0] 7047 v_1 := v.Args[1] 7048 if v_1.Op != OpAMD64SHLQconst { 7049 break 7050 } 7051 if v_1.AuxInt != 3 { 7052 break 7053 } 7054 idx := v_1.Args[0] 7055 mem := v.Args[2] 7056 v.reset(OpAMD64MOVQstoreconstidx8) 7057 v.AuxInt = c 7058 v.Aux = sym 7059 v.AddArg(ptr) 7060 v.AddArg(idx) 7061 v.AddArg(mem) 7062 return true 7063 } 7064 // match: (MOVQstoreconstidx1 [x] {sym} (ADDQconst [c] ptr) idx mem) 7065 // cond: 7066 // result: (MOVQstoreconstidx1 [ValAndOff(x).add(c)] {sym} ptr idx mem) 7067 for { 7068 x := v.AuxInt 7069 sym := v.Aux 7070 v_0 := v.Args[0] 7071 if v_0.Op != OpAMD64ADDQconst { 7072 break 7073 } 7074 c := v_0.AuxInt 7075 ptr := v_0.Args[0] 7076 idx := v.Args[1] 7077 mem := v.Args[2] 7078 v.reset(OpAMD64MOVQstoreconstidx1) 7079 v.AuxInt = ValAndOff(x).add(c) 7080 v.Aux = sym 7081 v.AddArg(ptr) 7082 v.AddArg(idx) 7083 v.AddArg(mem) 7084 return true 7085 } 7086 // match: (MOVQstoreconstidx1 [x] {sym} ptr (ADDQconst [c] idx) mem) 7087 // cond: 7088 // result: (MOVQstoreconstidx1 [ValAndOff(x).add(c)] {sym} ptr idx mem) 7089 for { 7090 x := v.AuxInt 7091 sym := v.Aux 7092 ptr := v.Args[0] 7093 v_1 := v.Args[1] 7094 if v_1.Op != OpAMD64ADDQconst { 7095 break 7096 } 7097 c := v_1.AuxInt 7098 idx := v_1.Args[0] 7099 mem := v.Args[2] 7100 v.reset(OpAMD64MOVQstoreconstidx1) 7101 v.AuxInt = ValAndOff(x).add(c) 7102 v.Aux = sym 7103 v.AddArg(ptr) 7104 v.AddArg(idx) 7105 v.AddArg(mem) 7106 return true 7107 } 7108 return false 7109 } 7110 func rewriteValueAMD64_OpAMD64MOVQstoreconstidx8(v *Value, config *Config) bool { 7111 b := v.Block 7112 _ = b 7113 // match: (MOVQstoreconstidx8 [x] {sym} (ADDQconst [c] ptr) idx mem) 7114 // cond: 7115 // result: (MOVQstoreconstidx8 [ValAndOff(x).add(c)] {sym} ptr idx mem) 7116 for { 7117 x := v.AuxInt 7118 sym := v.Aux 7119 v_0 := v.Args[0] 7120 if v_0.Op != OpAMD64ADDQconst { 7121 break 7122 } 7123 c := v_0.AuxInt 7124 ptr := v_0.Args[0] 7125 idx := v.Args[1] 7126 mem := v.Args[2] 7127 v.reset(OpAMD64MOVQstoreconstidx8) 7128 v.AuxInt = ValAndOff(x).add(c) 7129 v.Aux = sym 7130 v.AddArg(ptr) 7131 v.AddArg(idx) 7132 v.AddArg(mem) 7133 return true 7134 } 7135 // match: (MOVQstoreconstidx8 [x] {sym} ptr (ADDQconst [c] idx) mem) 7136 // cond: 7137 // result: (MOVQstoreconstidx8 [ValAndOff(x).add(8*c)] {sym} ptr idx mem) 7138 for { 7139 x := v.AuxInt 7140 sym := v.Aux 7141 ptr := v.Args[0] 7142 v_1 := v.Args[1] 7143 if v_1.Op != OpAMD64ADDQconst { 7144 break 7145 } 7146 c := v_1.AuxInt 7147 idx := v_1.Args[0] 7148 mem := v.Args[2] 7149 v.reset(OpAMD64MOVQstoreconstidx8) 7150 v.AuxInt = ValAndOff(x).add(8 * c) 7151 v.Aux = sym 7152 v.AddArg(ptr) 7153 v.AddArg(idx) 7154 v.AddArg(mem) 7155 return true 7156 } 7157 return false 7158 } 7159 func rewriteValueAMD64_OpAMD64MOVQstoreidx1(v *Value, config *Config) bool { 7160 b := v.Block 7161 _ = b 7162 // match: (MOVQstoreidx1 [c] {sym} ptr (SHLQconst [3] idx) val mem) 7163 // cond: 7164 // result: (MOVQstoreidx8 [c] {sym} ptr idx val mem) 7165 for { 7166 c := v.AuxInt 7167 sym := v.Aux 7168 ptr := v.Args[0] 7169 v_1 := v.Args[1] 7170 if v_1.Op != OpAMD64SHLQconst { 7171 break 7172 } 7173 if v_1.AuxInt != 3 { 7174 break 7175 } 7176 idx := v_1.Args[0] 7177 val := v.Args[2] 7178 mem := v.Args[3] 7179 v.reset(OpAMD64MOVQstoreidx8) 7180 v.AuxInt = c 7181 v.Aux = sym 7182 v.AddArg(ptr) 7183 v.AddArg(idx) 7184 v.AddArg(val) 7185 v.AddArg(mem) 7186 return true 7187 } 7188 // match: (MOVQstoreidx1 [c] {sym} (ADDQconst [d] ptr) idx val mem) 7189 // cond: 7190 // result: (MOVQstoreidx1 [c+d] {sym} ptr idx val mem) 7191 for { 7192 c := v.AuxInt 7193 sym := v.Aux 7194 v_0 := v.Args[0] 7195 if v_0.Op != OpAMD64ADDQconst { 7196 break 7197 } 7198 d := v_0.AuxInt 7199 ptr := v_0.Args[0] 7200 idx := v.Args[1] 7201 val := v.Args[2] 7202 mem := v.Args[3] 7203 v.reset(OpAMD64MOVQstoreidx1) 7204 v.AuxInt = c + d 7205 v.Aux = sym 7206 v.AddArg(ptr) 7207 v.AddArg(idx) 7208 v.AddArg(val) 7209 v.AddArg(mem) 7210 return true 7211 } 7212 // match: (MOVQstoreidx1 [c] {sym} ptr (ADDQconst [d] idx) val mem) 7213 // cond: 7214 // result: (MOVQstoreidx1 [c+d] {sym} ptr idx val mem) 7215 for { 7216 c := v.AuxInt 7217 sym := v.Aux 7218 ptr := v.Args[0] 7219 v_1 := v.Args[1] 7220 if v_1.Op != OpAMD64ADDQconst { 7221 break 7222 } 7223 d := v_1.AuxInt 7224 idx := v_1.Args[0] 7225 val := v.Args[2] 7226 mem := v.Args[3] 7227 v.reset(OpAMD64MOVQstoreidx1) 7228 v.AuxInt = c + d 7229 v.Aux = sym 7230 v.AddArg(ptr) 7231 v.AddArg(idx) 7232 v.AddArg(val) 7233 v.AddArg(mem) 7234 return true 7235 } 7236 return false 7237 } 7238 func rewriteValueAMD64_OpAMD64MOVQstoreidx8(v *Value, config *Config) bool { 7239 b := v.Block 7240 _ = b 7241 // match: (MOVQstoreidx8 [c] {sym} (ADDQconst [d] ptr) idx val mem) 7242 // cond: 7243 // result: (MOVQstoreidx8 [c+d] {sym} ptr idx val mem) 7244 for { 7245 c := v.AuxInt 7246 sym := v.Aux 7247 v_0 := v.Args[0] 7248 if v_0.Op != OpAMD64ADDQconst { 7249 break 7250 } 7251 d := v_0.AuxInt 7252 ptr := v_0.Args[0] 7253 idx := v.Args[1] 7254 val := v.Args[2] 7255 mem := v.Args[3] 7256 v.reset(OpAMD64MOVQstoreidx8) 7257 v.AuxInt = c + d 7258 v.Aux = sym 7259 v.AddArg(ptr) 7260 v.AddArg(idx) 7261 v.AddArg(val) 7262 v.AddArg(mem) 7263 return true 7264 } 7265 // match: (MOVQstoreidx8 [c] {sym} ptr (ADDQconst [d] idx) val mem) 7266 // cond: 7267 // result: (MOVQstoreidx8 [c+8*d] {sym} ptr idx val mem) 7268 for { 7269 c := v.AuxInt 7270 sym := v.Aux 7271 ptr := v.Args[0] 7272 v_1 := v.Args[1] 7273 if v_1.Op != OpAMD64ADDQconst { 7274 break 7275 } 7276 d := v_1.AuxInt 7277 idx := v_1.Args[0] 7278 val := v.Args[2] 7279 mem := v.Args[3] 7280 v.reset(OpAMD64MOVQstoreidx8) 7281 v.AuxInt = c + 8*d 7282 v.Aux = sym 7283 v.AddArg(ptr) 7284 v.AddArg(idx) 7285 v.AddArg(val) 7286 v.AddArg(mem) 7287 return true 7288 } 7289 return false 7290 } 7291 func rewriteValueAMD64_OpAMD64MOVSDload(v *Value, config *Config) bool { 7292 b := v.Block 7293 _ = b 7294 // match: (MOVSDload [off1] {sym} (ADDQconst [off2] ptr) mem) 7295 // cond: is32Bit(off1+off2) 7296 // result: (MOVSDload [off1+off2] {sym} ptr mem) 7297 for { 7298 off1 := v.AuxInt 7299 sym := v.Aux 7300 v_0 := v.Args[0] 7301 if v_0.Op != OpAMD64ADDQconst { 7302 break 7303 } 7304 off2 := v_0.AuxInt 7305 ptr := v_0.Args[0] 7306 mem := v.Args[1] 7307 if !(is32Bit(off1 + off2)) { 7308 break 7309 } 7310 v.reset(OpAMD64MOVSDload) 7311 v.AuxInt = off1 + off2 7312 v.Aux = sym 7313 v.AddArg(ptr) 7314 v.AddArg(mem) 7315 return true 7316 } 7317 // match: (MOVSDload [off1] {sym1} (LEAQ [off2] {sym2} base) mem) 7318 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 7319 // result: (MOVSDload [off1+off2] {mergeSym(sym1,sym2)} base mem) 7320 for { 7321 off1 := v.AuxInt 7322 sym1 := v.Aux 7323 v_0 := v.Args[0] 7324 if v_0.Op != OpAMD64LEAQ { 7325 break 7326 } 7327 off2 := v_0.AuxInt 7328 sym2 := v_0.Aux 7329 base := v_0.Args[0] 7330 mem := v.Args[1] 7331 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 7332 break 7333 } 7334 v.reset(OpAMD64MOVSDload) 7335 v.AuxInt = off1 + off2 7336 v.Aux = mergeSym(sym1, sym2) 7337 v.AddArg(base) 7338 v.AddArg(mem) 7339 return true 7340 } 7341 // match: (MOVSDload [off1] {sym1} (LEAQ1 [off2] {sym2} ptr idx) mem) 7342 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 7343 // result: (MOVSDloadidx1 [off1+off2] {mergeSym(sym1,sym2)} ptr idx mem) 7344 for { 7345 off1 := v.AuxInt 7346 sym1 := v.Aux 7347 v_0 := v.Args[0] 7348 if v_0.Op != OpAMD64LEAQ1 { 7349 break 7350 } 7351 off2 := v_0.AuxInt 7352 sym2 := v_0.Aux 7353 ptr := v_0.Args[0] 7354 idx := v_0.Args[1] 7355 mem := v.Args[1] 7356 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 7357 break 7358 } 7359 v.reset(OpAMD64MOVSDloadidx1) 7360 v.AuxInt = off1 + off2 7361 v.Aux = mergeSym(sym1, sym2) 7362 v.AddArg(ptr) 7363 v.AddArg(idx) 7364 v.AddArg(mem) 7365 return true 7366 } 7367 // match: (MOVSDload [off1] {sym1} (LEAQ8 [off2] {sym2} ptr idx) mem) 7368 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 7369 // result: (MOVSDloadidx8 [off1+off2] {mergeSym(sym1,sym2)} ptr idx mem) 7370 for { 7371 off1 := v.AuxInt 7372 sym1 := v.Aux 7373 v_0 := v.Args[0] 7374 if v_0.Op != OpAMD64LEAQ8 { 7375 break 7376 } 7377 off2 := v_0.AuxInt 7378 sym2 := v_0.Aux 7379 ptr := v_0.Args[0] 7380 idx := v_0.Args[1] 7381 mem := v.Args[1] 7382 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 7383 break 7384 } 7385 v.reset(OpAMD64MOVSDloadidx8) 7386 v.AuxInt = off1 + off2 7387 v.Aux = mergeSym(sym1, sym2) 7388 v.AddArg(ptr) 7389 v.AddArg(idx) 7390 v.AddArg(mem) 7391 return true 7392 } 7393 // match: (MOVSDload [off] {sym} (ADDQ ptr idx) mem) 7394 // cond: ptr.Op != OpSB 7395 // result: (MOVSDloadidx1 [off] {sym} ptr idx mem) 7396 for { 7397 off := v.AuxInt 7398 sym := v.Aux 7399 v_0 := v.Args[0] 7400 if v_0.Op != OpAMD64ADDQ { 7401 break 7402 } 7403 ptr := v_0.Args[0] 7404 idx := v_0.Args[1] 7405 mem := v.Args[1] 7406 if !(ptr.Op != OpSB) { 7407 break 7408 } 7409 v.reset(OpAMD64MOVSDloadidx1) 7410 v.AuxInt = off 7411 v.Aux = sym 7412 v.AddArg(ptr) 7413 v.AddArg(idx) 7414 v.AddArg(mem) 7415 return true 7416 } 7417 return false 7418 } 7419 func rewriteValueAMD64_OpAMD64MOVSDloadidx1(v *Value, config *Config) bool { 7420 b := v.Block 7421 _ = b 7422 // match: (MOVSDloadidx1 [c] {sym} ptr (SHLQconst [3] idx) mem) 7423 // cond: 7424 // result: (MOVSDloadidx8 [c] {sym} ptr idx mem) 7425 for { 7426 c := v.AuxInt 7427 sym := v.Aux 7428 ptr := v.Args[0] 7429 v_1 := v.Args[1] 7430 if v_1.Op != OpAMD64SHLQconst { 7431 break 7432 } 7433 if v_1.AuxInt != 3 { 7434 break 7435 } 7436 idx := v_1.Args[0] 7437 mem := v.Args[2] 7438 v.reset(OpAMD64MOVSDloadidx8) 7439 v.AuxInt = c 7440 v.Aux = sym 7441 v.AddArg(ptr) 7442 v.AddArg(idx) 7443 v.AddArg(mem) 7444 return true 7445 } 7446 // match: (MOVSDloadidx1 [c] {sym} (ADDQconst [d] ptr) idx mem) 7447 // cond: 7448 // result: (MOVSDloadidx1 [c+d] {sym} ptr idx mem) 7449 for { 7450 c := v.AuxInt 7451 sym := v.Aux 7452 v_0 := v.Args[0] 7453 if v_0.Op != OpAMD64ADDQconst { 7454 break 7455 } 7456 d := v_0.AuxInt 7457 ptr := v_0.Args[0] 7458 idx := v.Args[1] 7459 mem := v.Args[2] 7460 v.reset(OpAMD64MOVSDloadidx1) 7461 v.AuxInt = c + d 7462 v.Aux = sym 7463 v.AddArg(ptr) 7464 v.AddArg(idx) 7465 v.AddArg(mem) 7466 return true 7467 } 7468 // match: (MOVSDloadidx1 [c] {sym} ptr (ADDQconst [d] idx) mem) 7469 // cond: 7470 // result: (MOVSDloadidx1 [c+d] {sym} ptr idx mem) 7471 for { 7472 c := v.AuxInt 7473 sym := v.Aux 7474 ptr := v.Args[0] 7475 v_1 := v.Args[1] 7476 if v_1.Op != OpAMD64ADDQconst { 7477 break 7478 } 7479 d := v_1.AuxInt 7480 idx := v_1.Args[0] 7481 mem := v.Args[2] 7482 v.reset(OpAMD64MOVSDloadidx1) 7483 v.AuxInt = c + d 7484 v.Aux = sym 7485 v.AddArg(ptr) 7486 v.AddArg(idx) 7487 v.AddArg(mem) 7488 return true 7489 } 7490 return false 7491 } 7492 func rewriteValueAMD64_OpAMD64MOVSDloadidx8(v *Value, config *Config) bool { 7493 b := v.Block 7494 _ = b 7495 // match: (MOVSDloadidx8 [c] {sym} (ADDQconst [d] ptr) idx mem) 7496 // cond: 7497 // result: (MOVSDloadidx8 [c+d] {sym} ptr idx mem) 7498 for { 7499 c := v.AuxInt 7500 sym := v.Aux 7501 v_0 := v.Args[0] 7502 if v_0.Op != OpAMD64ADDQconst { 7503 break 7504 } 7505 d := v_0.AuxInt 7506 ptr := v_0.Args[0] 7507 idx := v.Args[1] 7508 mem := v.Args[2] 7509 v.reset(OpAMD64MOVSDloadidx8) 7510 v.AuxInt = c + d 7511 v.Aux = sym 7512 v.AddArg(ptr) 7513 v.AddArg(idx) 7514 v.AddArg(mem) 7515 return true 7516 } 7517 // match: (MOVSDloadidx8 [c] {sym} ptr (ADDQconst [d] idx) mem) 7518 // cond: 7519 // result: (MOVSDloadidx8 [c+8*d] {sym} ptr idx mem) 7520 for { 7521 c := v.AuxInt 7522 sym := v.Aux 7523 ptr := v.Args[0] 7524 v_1 := v.Args[1] 7525 if v_1.Op != OpAMD64ADDQconst { 7526 break 7527 } 7528 d := v_1.AuxInt 7529 idx := v_1.Args[0] 7530 mem := v.Args[2] 7531 v.reset(OpAMD64MOVSDloadidx8) 7532 v.AuxInt = c + 8*d 7533 v.Aux = sym 7534 v.AddArg(ptr) 7535 v.AddArg(idx) 7536 v.AddArg(mem) 7537 return true 7538 } 7539 return false 7540 } 7541 func rewriteValueAMD64_OpAMD64MOVSDstore(v *Value, config *Config) bool { 7542 b := v.Block 7543 _ = b 7544 // match: (MOVSDstore [off1] {sym} (ADDQconst [off2] ptr) val mem) 7545 // cond: is32Bit(off1+off2) 7546 // result: (MOVSDstore [off1+off2] {sym} ptr val mem) 7547 for { 7548 off1 := v.AuxInt 7549 sym := v.Aux 7550 v_0 := v.Args[0] 7551 if v_0.Op != OpAMD64ADDQconst { 7552 break 7553 } 7554 off2 := v_0.AuxInt 7555 ptr := v_0.Args[0] 7556 val := v.Args[1] 7557 mem := v.Args[2] 7558 if !(is32Bit(off1 + off2)) { 7559 break 7560 } 7561 v.reset(OpAMD64MOVSDstore) 7562 v.AuxInt = off1 + off2 7563 v.Aux = sym 7564 v.AddArg(ptr) 7565 v.AddArg(val) 7566 v.AddArg(mem) 7567 return true 7568 } 7569 // match: (MOVSDstore [off1] {sym1} (LEAQ [off2] {sym2} base) val mem) 7570 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 7571 // result: (MOVSDstore [off1+off2] {mergeSym(sym1,sym2)} base val mem) 7572 for { 7573 off1 := v.AuxInt 7574 sym1 := v.Aux 7575 v_0 := v.Args[0] 7576 if v_0.Op != OpAMD64LEAQ { 7577 break 7578 } 7579 off2 := v_0.AuxInt 7580 sym2 := v_0.Aux 7581 base := v_0.Args[0] 7582 val := v.Args[1] 7583 mem := v.Args[2] 7584 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 7585 break 7586 } 7587 v.reset(OpAMD64MOVSDstore) 7588 v.AuxInt = off1 + off2 7589 v.Aux = mergeSym(sym1, sym2) 7590 v.AddArg(base) 7591 v.AddArg(val) 7592 v.AddArg(mem) 7593 return true 7594 } 7595 // match: (MOVSDstore [off1] {sym1} (LEAQ1 [off2] {sym2} ptr idx) val mem) 7596 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 7597 // result: (MOVSDstoreidx1 [off1+off2] {mergeSym(sym1,sym2)} ptr idx val mem) 7598 for { 7599 off1 := v.AuxInt 7600 sym1 := v.Aux 7601 v_0 := v.Args[0] 7602 if v_0.Op != OpAMD64LEAQ1 { 7603 break 7604 } 7605 off2 := v_0.AuxInt 7606 sym2 := v_0.Aux 7607 ptr := v_0.Args[0] 7608 idx := v_0.Args[1] 7609 val := v.Args[1] 7610 mem := v.Args[2] 7611 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 7612 break 7613 } 7614 v.reset(OpAMD64MOVSDstoreidx1) 7615 v.AuxInt = off1 + off2 7616 v.Aux = mergeSym(sym1, sym2) 7617 v.AddArg(ptr) 7618 v.AddArg(idx) 7619 v.AddArg(val) 7620 v.AddArg(mem) 7621 return true 7622 } 7623 // match: (MOVSDstore [off1] {sym1} (LEAQ8 [off2] {sym2} ptr idx) val mem) 7624 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 7625 // result: (MOVSDstoreidx8 [off1+off2] {mergeSym(sym1,sym2)} ptr idx val mem) 7626 for { 7627 off1 := v.AuxInt 7628 sym1 := v.Aux 7629 v_0 := v.Args[0] 7630 if v_0.Op != OpAMD64LEAQ8 { 7631 break 7632 } 7633 off2 := v_0.AuxInt 7634 sym2 := v_0.Aux 7635 ptr := v_0.Args[0] 7636 idx := v_0.Args[1] 7637 val := v.Args[1] 7638 mem := v.Args[2] 7639 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 7640 break 7641 } 7642 v.reset(OpAMD64MOVSDstoreidx8) 7643 v.AuxInt = off1 + off2 7644 v.Aux = mergeSym(sym1, sym2) 7645 v.AddArg(ptr) 7646 v.AddArg(idx) 7647 v.AddArg(val) 7648 v.AddArg(mem) 7649 return true 7650 } 7651 // match: (MOVSDstore [off] {sym} (ADDQ ptr idx) val mem) 7652 // cond: ptr.Op != OpSB 7653 // result: (MOVSDstoreidx1 [off] {sym} ptr idx val mem) 7654 for { 7655 off := v.AuxInt 7656 sym := v.Aux 7657 v_0 := v.Args[0] 7658 if v_0.Op != OpAMD64ADDQ { 7659 break 7660 } 7661 ptr := v_0.Args[0] 7662 idx := v_0.Args[1] 7663 val := v.Args[1] 7664 mem := v.Args[2] 7665 if !(ptr.Op != OpSB) { 7666 break 7667 } 7668 v.reset(OpAMD64MOVSDstoreidx1) 7669 v.AuxInt = off 7670 v.Aux = sym 7671 v.AddArg(ptr) 7672 v.AddArg(idx) 7673 v.AddArg(val) 7674 v.AddArg(mem) 7675 return true 7676 } 7677 return false 7678 } 7679 func rewriteValueAMD64_OpAMD64MOVSDstoreidx1(v *Value, config *Config) bool { 7680 b := v.Block 7681 _ = b 7682 // match: (MOVSDstoreidx1 [c] {sym} ptr (SHLQconst [3] idx) val mem) 7683 // cond: 7684 // result: (MOVSDstoreidx8 [c] {sym} ptr idx val mem) 7685 for { 7686 c := v.AuxInt 7687 sym := v.Aux 7688 ptr := v.Args[0] 7689 v_1 := v.Args[1] 7690 if v_1.Op != OpAMD64SHLQconst { 7691 break 7692 } 7693 if v_1.AuxInt != 3 { 7694 break 7695 } 7696 idx := v_1.Args[0] 7697 val := v.Args[2] 7698 mem := v.Args[3] 7699 v.reset(OpAMD64MOVSDstoreidx8) 7700 v.AuxInt = c 7701 v.Aux = sym 7702 v.AddArg(ptr) 7703 v.AddArg(idx) 7704 v.AddArg(val) 7705 v.AddArg(mem) 7706 return true 7707 } 7708 // match: (MOVSDstoreidx1 [c] {sym} (ADDQconst [d] ptr) idx val mem) 7709 // cond: 7710 // result: (MOVSDstoreidx1 [c+d] {sym} ptr idx val mem) 7711 for { 7712 c := v.AuxInt 7713 sym := v.Aux 7714 v_0 := v.Args[0] 7715 if v_0.Op != OpAMD64ADDQconst { 7716 break 7717 } 7718 d := v_0.AuxInt 7719 ptr := v_0.Args[0] 7720 idx := v.Args[1] 7721 val := v.Args[2] 7722 mem := v.Args[3] 7723 v.reset(OpAMD64MOVSDstoreidx1) 7724 v.AuxInt = c + d 7725 v.Aux = sym 7726 v.AddArg(ptr) 7727 v.AddArg(idx) 7728 v.AddArg(val) 7729 v.AddArg(mem) 7730 return true 7731 } 7732 // match: (MOVSDstoreidx1 [c] {sym} ptr (ADDQconst [d] idx) val mem) 7733 // cond: 7734 // result: (MOVSDstoreidx1 [c+d] {sym} ptr idx val mem) 7735 for { 7736 c := v.AuxInt 7737 sym := v.Aux 7738 ptr := v.Args[0] 7739 v_1 := v.Args[1] 7740 if v_1.Op != OpAMD64ADDQconst { 7741 break 7742 } 7743 d := v_1.AuxInt 7744 idx := v_1.Args[0] 7745 val := v.Args[2] 7746 mem := v.Args[3] 7747 v.reset(OpAMD64MOVSDstoreidx1) 7748 v.AuxInt = c + d 7749 v.Aux = sym 7750 v.AddArg(ptr) 7751 v.AddArg(idx) 7752 v.AddArg(val) 7753 v.AddArg(mem) 7754 return true 7755 } 7756 return false 7757 } 7758 func rewriteValueAMD64_OpAMD64MOVSDstoreidx8(v *Value, config *Config) bool { 7759 b := v.Block 7760 _ = b 7761 // match: (MOVSDstoreidx8 [c] {sym} (ADDQconst [d] ptr) idx val mem) 7762 // cond: 7763 // result: (MOVSDstoreidx8 [c+d] {sym} ptr idx val mem) 7764 for { 7765 c := v.AuxInt 7766 sym := v.Aux 7767 v_0 := v.Args[0] 7768 if v_0.Op != OpAMD64ADDQconst { 7769 break 7770 } 7771 d := v_0.AuxInt 7772 ptr := v_0.Args[0] 7773 idx := v.Args[1] 7774 val := v.Args[2] 7775 mem := v.Args[3] 7776 v.reset(OpAMD64MOVSDstoreidx8) 7777 v.AuxInt = c + d 7778 v.Aux = sym 7779 v.AddArg(ptr) 7780 v.AddArg(idx) 7781 v.AddArg(val) 7782 v.AddArg(mem) 7783 return true 7784 } 7785 // match: (MOVSDstoreidx8 [c] {sym} ptr (ADDQconst [d] idx) val mem) 7786 // cond: 7787 // result: (MOVSDstoreidx8 [c+8*d] {sym} ptr idx val mem) 7788 for { 7789 c := v.AuxInt 7790 sym := v.Aux 7791 ptr := v.Args[0] 7792 v_1 := v.Args[1] 7793 if v_1.Op != OpAMD64ADDQconst { 7794 break 7795 } 7796 d := v_1.AuxInt 7797 idx := v_1.Args[0] 7798 val := v.Args[2] 7799 mem := v.Args[3] 7800 v.reset(OpAMD64MOVSDstoreidx8) 7801 v.AuxInt = c + 8*d 7802 v.Aux = sym 7803 v.AddArg(ptr) 7804 v.AddArg(idx) 7805 v.AddArg(val) 7806 v.AddArg(mem) 7807 return true 7808 } 7809 return false 7810 } 7811 func rewriteValueAMD64_OpAMD64MOVSSload(v *Value, config *Config) bool { 7812 b := v.Block 7813 _ = b 7814 // match: (MOVSSload [off1] {sym} (ADDQconst [off2] ptr) mem) 7815 // cond: is32Bit(off1+off2) 7816 // result: (MOVSSload [off1+off2] {sym} ptr mem) 7817 for { 7818 off1 := v.AuxInt 7819 sym := v.Aux 7820 v_0 := v.Args[0] 7821 if v_0.Op != OpAMD64ADDQconst { 7822 break 7823 } 7824 off2 := v_0.AuxInt 7825 ptr := v_0.Args[0] 7826 mem := v.Args[1] 7827 if !(is32Bit(off1 + off2)) { 7828 break 7829 } 7830 v.reset(OpAMD64MOVSSload) 7831 v.AuxInt = off1 + off2 7832 v.Aux = sym 7833 v.AddArg(ptr) 7834 v.AddArg(mem) 7835 return true 7836 } 7837 // match: (MOVSSload [off1] {sym1} (LEAQ [off2] {sym2} base) mem) 7838 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 7839 // result: (MOVSSload [off1+off2] {mergeSym(sym1,sym2)} base mem) 7840 for { 7841 off1 := v.AuxInt 7842 sym1 := v.Aux 7843 v_0 := v.Args[0] 7844 if v_0.Op != OpAMD64LEAQ { 7845 break 7846 } 7847 off2 := v_0.AuxInt 7848 sym2 := v_0.Aux 7849 base := v_0.Args[0] 7850 mem := v.Args[1] 7851 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 7852 break 7853 } 7854 v.reset(OpAMD64MOVSSload) 7855 v.AuxInt = off1 + off2 7856 v.Aux = mergeSym(sym1, sym2) 7857 v.AddArg(base) 7858 v.AddArg(mem) 7859 return true 7860 } 7861 // match: (MOVSSload [off1] {sym1} (LEAQ1 [off2] {sym2} ptr idx) mem) 7862 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 7863 // result: (MOVSSloadidx1 [off1+off2] {mergeSym(sym1,sym2)} ptr idx mem) 7864 for { 7865 off1 := v.AuxInt 7866 sym1 := v.Aux 7867 v_0 := v.Args[0] 7868 if v_0.Op != OpAMD64LEAQ1 { 7869 break 7870 } 7871 off2 := v_0.AuxInt 7872 sym2 := v_0.Aux 7873 ptr := v_0.Args[0] 7874 idx := v_0.Args[1] 7875 mem := v.Args[1] 7876 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 7877 break 7878 } 7879 v.reset(OpAMD64MOVSSloadidx1) 7880 v.AuxInt = off1 + off2 7881 v.Aux = mergeSym(sym1, sym2) 7882 v.AddArg(ptr) 7883 v.AddArg(idx) 7884 v.AddArg(mem) 7885 return true 7886 } 7887 // match: (MOVSSload [off1] {sym1} (LEAQ4 [off2] {sym2} ptr idx) mem) 7888 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 7889 // result: (MOVSSloadidx4 [off1+off2] {mergeSym(sym1,sym2)} ptr idx mem) 7890 for { 7891 off1 := v.AuxInt 7892 sym1 := v.Aux 7893 v_0 := v.Args[0] 7894 if v_0.Op != OpAMD64LEAQ4 { 7895 break 7896 } 7897 off2 := v_0.AuxInt 7898 sym2 := v_0.Aux 7899 ptr := v_0.Args[0] 7900 idx := v_0.Args[1] 7901 mem := v.Args[1] 7902 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 7903 break 7904 } 7905 v.reset(OpAMD64MOVSSloadidx4) 7906 v.AuxInt = off1 + off2 7907 v.Aux = mergeSym(sym1, sym2) 7908 v.AddArg(ptr) 7909 v.AddArg(idx) 7910 v.AddArg(mem) 7911 return true 7912 } 7913 // match: (MOVSSload [off] {sym} (ADDQ ptr idx) mem) 7914 // cond: ptr.Op != OpSB 7915 // result: (MOVSSloadidx1 [off] {sym} ptr idx mem) 7916 for { 7917 off := v.AuxInt 7918 sym := v.Aux 7919 v_0 := v.Args[0] 7920 if v_0.Op != OpAMD64ADDQ { 7921 break 7922 } 7923 ptr := v_0.Args[0] 7924 idx := v_0.Args[1] 7925 mem := v.Args[1] 7926 if !(ptr.Op != OpSB) { 7927 break 7928 } 7929 v.reset(OpAMD64MOVSSloadidx1) 7930 v.AuxInt = off 7931 v.Aux = sym 7932 v.AddArg(ptr) 7933 v.AddArg(idx) 7934 v.AddArg(mem) 7935 return true 7936 } 7937 return false 7938 } 7939 func rewriteValueAMD64_OpAMD64MOVSSloadidx1(v *Value, config *Config) bool { 7940 b := v.Block 7941 _ = b 7942 // match: (MOVSSloadidx1 [c] {sym} ptr (SHLQconst [2] idx) mem) 7943 // cond: 7944 // result: (MOVSSloadidx4 [c] {sym} ptr idx mem) 7945 for { 7946 c := v.AuxInt 7947 sym := v.Aux 7948 ptr := v.Args[0] 7949 v_1 := v.Args[1] 7950 if v_1.Op != OpAMD64SHLQconst { 7951 break 7952 } 7953 if v_1.AuxInt != 2 { 7954 break 7955 } 7956 idx := v_1.Args[0] 7957 mem := v.Args[2] 7958 v.reset(OpAMD64MOVSSloadidx4) 7959 v.AuxInt = c 7960 v.Aux = sym 7961 v.AddArg(ptr) 7962 v.AddArg(idx) 7963 v.AddArg(mem) 7964 return true 7965 } 7966 // match: (MOVSSloadidx1 [c] {sym} (ADDQconst [d] ptr) idx mem) 7967 // cond: 7968 // result: (MOVSSloadidx1 [c+d] {sym} ptr idx mem) 7969 for { 7970 c := v.AuxInt 7971 sym := v.Aux 7972 v_0 := v.Args[0] 7973 if v_0.Op != OpAMD64ADDQconst { 7974 break 7975 } 7976 d := v_0.AuxInt 7977 ptr := v_0.Args[0] 7978 idx := v.Args[1] 7979 mem := v.Args[2] 7980 v.reset(OpAMD64MOVSSloadidx1) 7981 v.AuxInt = c + d 7982 v.Aux = sym 7983 v.AddArg(ptr) 7984 v.AddArg(idx) 7985 v.AddArg(mem) 7986 return true 7987 } 7988 // match: (MOVSSloadidx1 [c] {sym} ptr (ADDQconst [d] idx) mem) 7989 // cond: 7990 // result: (MOVSSloadidx1 [c+d] {sym} ptr idx mem) 7991 for { 7992 c := v.AuxInt 7993 sym := v.Aux 7994 ptr := v.Args[0] 7995 v_1 := v.Args[1] 7996 if v_1.Op != OpAMD64ADDQconst { 7997 break 7998 } 7999 d := v_1.AuxInt 8000 idx := v_1.Args[0] 8001 mem := v.Args[2] 8002 v.reset(OpAMD64MOVSSloadidx1) 8003 v.AuxInt = c + d 8004 v.Aux = sym 8005 v.AddArg(ptr) 8006 v.AddArg(idx) 8007 v.AddArg(mem) 8008 return true 8009 } 8010 return false 8011 } 8012 func rewriteValueAMD64_OpAMD64MOVSSloadidx4(v *Value, config *Config) bool { 8013 b := v.Block 8014 _ = b 8015 // match: (MOVSSloadidx4 [c] {sym} (ADDQconst [d] ptr) idx mem) 8016 // cond: 8017 // result: (MOVSSloadidx4 [c+d] {sym} ptr idx mem) 8018 for { 8019 c := v.AuxInt 8020 sym := v.Aux 8021 v_0 := v.Args[0] 8022 if v_0.Op != OpAMD64ADDQconst { 8023 break 8024 } 8025 d := v_0.AuxInt 8026 ptr := v_0.Args[0] 8027 idx := v.Args[1] 8028 mem := v.Args[2] 8029 v.reset(OpAMD64MOVSSloadidx4) 8030 v.AuxInt = c + d 8031 v.Aux = sym 8032 v.AddArg(ptr) 8033 v.AddArg(idx) 8034 v.AddArg(mem) 8035 return true 8036 } 8037 // match: (MOVSSloadidx4 [c] {sym} ptr (ADDQconst [d] idx) mem) 8038 // cond: 8039 // result: (MOVSSloadidx4 [c+4*d] {sym} ptr idx mem) 8040 for { 8041 c := v.AuxInt 8042 sym := v.Aux 8043 ptr := v.Args[0] 8044 v_1 := v.Args[1] 8045 if v_1.Op != OpAMD64ADDQconst { 8046 break 8047 } 8048 d := v_1.AuxInt 8049 idx := v_1.Args[0] 8050 mem := v.Args[2] 8051 v.reset(OpAMD64MOVSSloadidx4) 8052 v.AuxInt = c + 4*d 8053 v.Aux = sym 8054 v.AddArg(ptr) 8055 v.AddArg(idx) 8056 v.AddArg(mem) 8057 return true 8058 } 8059 return false 8060 } 8061 func rewriteValueAMD64_OpAMD64MOVSSstore(v *Value, config *Config) bool { 8062 b := v.Block 8063 _ = b 8064 // match: (MOVSSstore [off1] {sym} (ADDQconst [off2] ptr) val mem) 8065 // cond: is32Bit(off1+off2) 8066 // result: (MOVSSstore [off1+off2] {sym} ptr val mem) 8067 for { 8068 off1 := v.AuxInt 8069 sym := v.Aux 8070 v_0 := v.Args[0] 8071 if v_0.Op != OpAMD64ADDQconst { 8072 break 8073 } 8074 off2 := v_0.AuxInt 8075 ptr := v_0.Args[0] 8076 val := v.Args[1] 8077 mem := v.Args[2] 8078 if !(is32Bit(off1 + off2)) { 8079 break 8080 } 8081 v.reset(OpAMD64MOVSSstore) 8082 v.AuxInt = off1 + off2 8083 v.Aux = sym 8084 v.AddArg(ptr) 8085 v.AddArg(val) 8086 v.AddArg(mem) 8087 return true 8088 } 8089 // match: (MOVSSstore [off1] {sym1} (LEAQ [off2] {sym2} base) val mem) 8090 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 8091 // result: (MOVSSstore [off1+off2] {mergeSym(sym1,sym2)} base val mem) 8092 for { 8093 off1 := v.AuxInt 8094 sym1 := v.Aux 8095 v_0 := v.Args[0] 8096 if v_0.Op != OpAMD64LEAQ { 8097 break 8098 } 8099 off2 := v_0.AuxInt 8100 sym2 := v_0.Aux 8101 base := v_0.Args[0] 8102 val := v.Args[1] 8103 mem := v.Args[2] 8104 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 8105 break 8106 } 8107 v.reset(OpAMD64MOVSSstore) 8108 v.AuxInt = off1 + off2 8109 v.Aux = mergeSym(sym1, sym2) 8110 v.AddArg(base) 8111 v.AddArg(val) 8112 v.AddArg(mem) 8113 return true 8114 } 8115 // match: (MOVSSstore [off1] {sym1} (LEAQ1 [off2] {sym2} ptr idx) val mem) 8116 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 8117 // result: (MOVSSstoreidx1 [off1+off2] {mergeSym(sym1,sym2)} ptr idx val mem) 8118 for { 8119 off1 := v.AuxInt 8120 sym1 := v.Aux 8121 v_0 := v.Args[0] 8122 if v_0.Op != OpAMD64LEAQ1 { 8123 break 8124 } 8125 off2 := v_0.AuxInt 8126 sym2 := v_0.Aux 8127 ptr := v_0.Args[0] 8128 idx := v_0.Args[1] 8129 val := v.Args[1] 8130 mem := v.Args[2] 8131 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 8132 break 8133 } 8134 v.reset(OpAMD64MOVSSstoreidx1) 8135 v.AuxInt = off1 + off2 8136 v.Aux = mergeSym(sym1, sym2) 8137 v.AddArg(ptr) 8138 v.AddArg(idx) 8139 v.AddArg(val) 8140 v.AddArg(mem) 8141 return true 8142 } 8143 // match: (MOVSSstore [off1] {sym1} (LEAQ4 [off2] {sym2} ptr idx) val mem) 8144 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 8145 // result: (MOVSSstoreidx4 [off1+off2] {mergeSym(sym1,sym2)} ptr idx val mem) 8146 for { 8147 off1 := v.AuxInt 8148 sym1 := v.Aux 8149 v_0 := v.Args[0] 8150 if v_0.Op != OpAMD64LEAQ4 { 8151 break 8152 } 8153 off2 := v_0.AuxInt 8154 sym2 := v_0.Aux 8155 ptr := v_0.Args[0] 8156 idx := v_0.Args[1] 8157 val := v.Args[1] 8158 mem := v.Args[2] 8159 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 8160 break 8161 } 8162 v.reset(OpAMD64MOVSSstoreidx4) 8163 v.AuxInt = off1 + off2 8164 v.Aux = mergeSym(sym1, sym2) 8165 v.AddArg(ptr) 8166 v.AddArg(idx) 8167 v.AddArg(val) 8168 v.AddArg(mem) 8169 return true 8170 } 8171 // match: (MOVSSstore [off] {sym} (ADDQ ptr idx) val mem) 8172 // cond: ptr.Op != OpSB 8173 // result: (MOVSSstoreidx1 [off] {sym} ptr idx val mem) 8174 for { 8175 off := v.AuxInt 8176 sym := v.Aux 8177 v_0 := v.Args[0] 8178 if v_0.Op != OpAMD64ADDQ { 8179 break 8180 } 8181 ptr := v_0.Args[0] 8182 idx := v_0.Args[1] 8183 val := v.Args[1] 8184 mem := v.Args[2] 8185 if !(ptr.Op != OpSB) { 8186 break 8187 } 8188 v.reset(OpAMD64MOVSSstoreidx1) 8189 v.AuxInt = off 8190 v.Aux = sym 8191 v.AddArg(ptr) 8192 v.AddArg(idx) 8193 v.AddArg(val) 8194 v.AddArg(mem) 8195 return true 8196 } 8197 return false 8198 } 8199 func rewriteValueAMD64_OpAMD64MOVSSstoreidx1(v *Value, config *Config) bool { 8200 b := v.Block 8201 _ = b 8202 // match: (MOVSSstoreidx1 [c] {sym} ptr (SHLQconst [2] idx) val mem) 8203 // cond: 8204 // result: (MOVSSstoreidx4 [c] {sym} ptr idx val mem) 8205 for { 8206 c := v.AuxInt 8207 sym := v.Aux 8208 ptr := v.Args[0] 8209 v_1 := v.Args[1] 8210 if v_1.Op != OpAMD64SHLQconst { 8211 break 8212 } 8213 if v_1.AuxInt != 2 { 8214 break 8215 } 8216 idx := v_1.Args[0] 8217 val := v.Args[2] 8218 mem := v.Args[3] 8219 v.reset(OpAMD64MOVSSstoreidx4) 8220 v.AuxInt = c 8221 v.Aux = sym 8222 v.AddArg(ptr) 8223 v.AddArg(idx) 8224 v.AddArg(val) 8225 v.AddArg(mem) 8226 return true 8227 } 8228 // match: (MOVSSstoreidx1 [c] {sym} (ADDQconst [d] ptr) idx val mem) 8229 // cond: 8230 // result: (MOVSSstoreidx1 [c+d] {sym} ptr idx val mem) 8231 for { 8232 c := v.AuxInt 8233 sym := v.Aux 8234 v_0 := v.Args[0] 8235 if v_0.Op != OpAMD64ADDQconst { 8236 break 8237 } 8238 d := v_0.AuxInt 8239 ptr := v_0.Args[0] 8240 idx := v.Args[1] 8241 val := v.Args[2] 8242 mem := v.Args[3] 8243 v.reset(OpAMD64MOVSSstoreidx1) 8244 v.AuxInt = c + d 8245 v.Aux = sym 8246 v.AddArg(ptr) 8247 v.AddArg(idx) 8248 v.AddArg(val) 8249 v.AddArg(mem) 8250 return true 8251 } 8252 // match: (MOVSSstoreidx1 [c] {sym} ptr (ADDQconst [d] idx) val mem) 8253 // cond: 8254 // result: (MOVSSstoreidx1 [c+d] {sym} ptr idx val mem) 8255 for { 8256 c := v.AuxInt 8257 sym := v.Aux 8258 ptr := v.Args[0] 8259 v_1 := v.Args[1] 8260 if v_1.Op != OpAMD64ADDQconst { 8261 break 8262 } 8263 d := v_1.AuxInt 8264 idx := v_1.Args[0] 8265 val := v.Args[2] 8266 mem := v.Args[3] 8267 v.reset(OpAMD64MOVSSstoreidx1) 8268 v.AuxInt = c + d 8269 v.Aux = sym 8270 v.AddArg(ptr) 8271 v.AddArg(idx) 8272 v.AddArg(val) 8273 v.AddArg(mem) 8274 return true 8275 } 8276 return false 8277 } 8278 func rewriteValueAMD64_OpAMD64MOVSSstoreidx4(v *Value, config *Config) bool { 8279 b := v.Block 8280 _ = b 8281 // match: (MOVSSstoreidx4 [c] {sym} (ADDQconst [d] ptr) idx val mem) 8282 // cond: 8283 // result: (MOVSSstoreidx4 [c+d] {sym} ptr idx val mem) 8284 for { 8285 c := v.AuxInt 8286 sym := v.Aux 8287 v_0 := v.Args[0] 8288 if v_0.Op != OpAMD64ADDQconst { 8289 break 8290 } 8291 d := v_0.AuxInt 8292 ptr := v_0.Args[0] 8293 idx := v.Args[1] 8294 val := v.Args[2] 8295 mem := v.Args[3] 8296 v.reset(OpAMD64MOVSSstoreidx4) 8297 v.AuxInt = c + d 8298 v.Aux = sym 8299 v.AddArg(ptr) 8300 v.AddArg(idx) 8301 v.AddArg(val) 8302 v.AddArg(mem) 8303 return true 8304 } 8305 // match: (MOVSSstoreidx4 [c] {sym} ptr (ADDQconst [d] idx) val mem) 8306 // cond: 8307 // result: (MOVSSstoreidx4 [c+4*d] {sym} ptr idx val mem) 8308 for { 8309 c := v.AuxInt 8310 sym := v.Aux 8311 ptr := v.Args[0] 8312 v_1 := v.Args[1] 8313 if v_1.Op != OpAMD64ADDQconst { 8314 break 8315 } 8316 d := v_1.AuxInt 8317 idx := v_1.Args[0] 8318 val := v.Args[2] 8319 mem := v.Args[3] 8320 v.reset(OpAMD64MOVSSstoreidx4) 8321 v.AuxInt = c + 4*d 8322 v.Aux = sym 8323 v.AddArg(ptr) 8324 v.AddArg(idx) 8325 v.AddArg(val) 8326 v.AddArg(mem) 8327 return true 8328 } 8329 return false 8330 } 8331 func rewriteValueAMD64_OpAMD64MOVWQSX(v *Value, config *Config) bool { 8332 b := v.Block 8333 _ = b 8334 // match: (MOVWQSX x:(MOVWload [off] {sym} ptr mem)) 8335 // cond: x.Uses == 1 && clobber(x) 8336 // result: @x.Block (MOVWQSXload <v.Type> [off] {sym} ptr mem) 8337 for { 8338 x := v.Args[0] 8339 if x.Op != OpAMD64MOVWload { 8340 break 8341 } 8342 off := x.AuxInt 8343 sym := x.Aux 8344 ptr := x.Args[0] 8345 mem := x.Args[1] 8346 if !(x.Uses == 1 && clobber(x)) { 8347 break 8348 } 8349 b = x.Block 8350 v0 := b.NewValue0(v.Line, OpAMD64MOVWQSXload, v.Type) 8351 v.reset(OpCopy) 8352 v.AddArg(v0) 8353 v0.AuxInt = off 8354 v0.Aux = sym 8355 v0.AddArg(ptr) 8356 v0.AddArg(mem) 8357 return true 8358 } 8359 // match: (MOVWQSX x:(MOVLload [off] {sym} ptr mem)) 8360 // cond: x.Uses == 1 && clobber(x) 8361 // result: @x.Block (MOVWQSXload <v.Type> [off] {sym} ptr mem) 8362 for { 8363 x := v.Args[0] 8364 if x.Op != OpAMD64MOVLload { 8365 break 8366 } 8367 off := x.AuxInt 8368 sym := x.Aux 8369 ptr := x.Args[0] 8370 mem := x.Args[1] 8371 if !(x.Uses == 1 && clobber(x)) { 8372 break 8373 } 8374 b = x.Block 8375 v0 := b.NewValue0(v.Line, OpAMD64MOVWQSXload, v.Type) 8376 v.reset(OpCopy) 8377 v.AddArg(v0) 8378 v0.AuxInt = off 8379 v0.Aux = sym 8380 v0.AddArg(ptr) 8381 v0.AddArg(mem) 8382 return true 8383 } 8384 // match: (MOVWQSX x:(MOVQload [off] {sym} ptr mem)) 8385 // cond: x.Uses == 1 && clobber(x) 8386 // result: @x.Block (MOVWQSXload <v.Type> [off] {sym} ptr mem) 8387 for { 8388 x := v.Args[0] 8389 if x.Op != OpAMD64MOVQload { 8390 break 8391 } 8392 off := x.AuxInt 8393 sym := x.Aux 8394 ptr := x.Args[0] 8395 mem := x.Args[1] 8396 if !(x.Uses == 1 && clobber(x)) { 8397 break 8398 } 8399 b = x.Block 8400 v0 := b.NewValue0(v.Line, OpAMD64MOVWQSXload, v.Type) 8401 v.reset(OpCopy) 8402 v.AddArg(v0) 8403 v0.AuxInt = off 8404 v0.Aux = sym 8405 v0.AddArg(ptr) 8406 v0.AddArg(mem) 8407 return true 8408 } 8409 // match: (MOVWQSX (ANDLconst [c] x)) 8410 // cond: c & 0x8000 == 0 8411 // result: (ANDLconst [c & 0x7fff] x) 8412 for { 8413 v_0 := v.Args[0] 8414 if v_0.Op != OpAMD64ANDLconst { 8415 break 8416 } 8417 c := v_0.AuxInt 8418 x := v_0.Args[0] 8419 if !(c&0x8000 == 0) { 8420 break 8421 } 8422 v.reset(OpAMD64ANDLconst) 8423 v.AuxInt = c & 0x7fff 8424 v.AddArg(x) 8425 return true 8426 } 8427 return false 8428 } 8429 func rewriteValueAMD64_OpAMD64MOVWQSXload(v *Value, config *Config) bool { 8430 b := v.Block 8431 _ = b 8432 // match: (MOVWQSXload [off1] {sym1} (LEAQ [off2] {sym2} base) mem) 8433 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 8434 // result: (MOVWQSXload [off1+off2] {mergeSym(sym1,sym2)} base mem) 8435 for { 8436 off1 := v.AuxInt 8437 sym1 := v.Aux 8438 v_0 := v.Args[0] 8439 if v_0.Op != OpAMD64LEAQ { 8440 break 8441 } 8442 off2 := v_0.AuxInt 8443 sym2 := v_0.Aux 8444 base := v_0.Args[0] 8445 mem := v.Args[1] 8446 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 8447 break 8448 } 8449 v.reset(OpAMD64MOVWQSXload) 8450 v.AuxInt = off1 + off2 8451 v.Aux = mergeSym(sym1, sym2) 8452 v.AddArg(base) 8453 v.AddArg(mem) 8454 return true 8455 } 8456 return false 8457 } 8458 func rewriteValueAMD64_OpAMD64MOVWQZX(v *Value, config *Config) bool { 8459 b := v.Block 8460 _ = b 8461 // match: (MOVWQZX x:(MOVWload [off] {sym} ptr mem)) 8462 // cond: x.Uses == 1 && clobber(x) 8463 // result: @x.Block (MOVWload <v.Type> [off] {sym} ptr mem) 8464 for { 8465 x := v.Args[0] 8466 if x.Op != OpAMD64MOVWload { 8467 break 8468 } 8469 off := x.AuxInt 8470 sym := x.Aux 8471 ptr := x.Args[0] 8472 mem := x.Args[1] 8473 if !(x.Uses == 1 && clobber(x)) { 8474 break 8475 } 8476 b = x.Block 8477 v0 := b.NewValue0(v.Line, OpAMD64MOVWload, v.Type) 8478 v.reset(OpCopy) 8479 v.AddArg(v0) 8480 v0.AuxInt = off 8481 v0.Aux = sym 8482 v0.AddArg(ptr) 8483 v0.AddArg(mem) 8484 return true 8485 } 8486 // match: (MOVWQZX x:(MOVLload [off] {sym} ptr mem)) 8487 // cond: x.Uses == 1 && clobber(x) 8488 // result: @x.Block (MOVWload <v.Type> [off] {sym} ptr mem) 8489 for { 8490 x := v.Args[0] 8491 if x.Op != OpAMD64MOVLload { 8492 break 8493 } 8494 off := x.AuxInt 8495 sym := x.Aux 8496 ptr := x.Args[0] 8497 mem := x.Args[1] 8498 if !(x.Uses == 1 && clobber(x)) { 8499 break 8500 } 8501 b = x.Block 8502 v0 := b.NewValue0(v.Line, OpAMD64MOVWload, v.Type) 8503 v.reset(OpCopy) 8504 v.AddArg(v0) 8505 v0.AuxInt = off 8506 v0.Aux = sym 8507 v0.AddArg(ptr) 8508 v0.AddArg(mem) 8509 return true 8510 } 8511 // match: (MOVWQZX x:(MOVQload [off] {sym} ptr mem)) 8512 // cond: x.Uses == 1 && clobber(x) 8513 // result: @x.Block (MOVWload <v.Type> [off] {sym} ptr mem) 8514 for { 8515 x := v.Args[0] 8516 if x.Op != OpAMD64MOVQload { 8517 break 8518 } 8519 off := x.AuxInt 8520 sym := x.Aux 8521 ptr := x.Args[0] 8522 mem := x.Args[1] 8523 if !(x.Uses == 1 && clobber(x)) { 8524 break 8525 } 8526 b = x.Block 8527 v0 := b.NewValue0(v.Line, OpAMD64MOVWload, v.Type) 8528 v.reset(OpCopy) 8529 v.AddArg(v0) 8530 v0.AuxInt = off 8531 v0.Aux = sym 8532 v0.AddArg(ptr) 8533 v0.AddArg(mem) 8534 return true 8535 } 8536 // match: (MOVWQZX x:(MOVWloadidx1 [off] {sym} ptr idx mem)) 8537 // cond: x.Uses == 1 && clobber(x) 8538 // result: @x.Block (MOVWloadidx1 <v.Type> [off] {sym} ptr idx mem) 8539 for { 8540 x := v.Args[0] 8541 if x.Op != OpAMD64MOVWloadidx1 { 8542 break 8543 } 8544 off := x.AuxInt 8545 sym := x.Aux 8546 ptr := x.Args[0] 8547 idx := x.Args[1] 8548 mem := x.Args[2] 8549 if !(x.Uses == 1 && clobber(x)) { 8550 break 8551 } 8552 b = x.Block 8553 v0 := b.NewValue0(v.Line, OpAMD64MOVWloadidx1, v.Type) 8554 v.reset(OpCopy) 8555 v.AddArg(v0) 8556 v0.AuxInt = off 8557 v0.Aux = sym 8558 v0.AddArg(ptr) 8559 v0.AddArg(idx) 8560 v0.AddArg(mem) 8561 return true 8562 } 8563 // match: (MOVWQZX x:(MOVWloadidx2 [off] {sym} ptr idx mem)) 8564 // cond: x.Uses == 1 && clobber(x) 8565 // result: @x.Block (MOVWloadidx2 <v.Type> [off] {sym} ptr idx mem) 8566 for { 8567 x := v.Args[0] 8568 if x.Op != OpAMD64MOVWloadidx2 { 8569 break 8570 } 8571 off := x.AuxInt 8572 sym := x.Aux 8573 ptr := x.Args[0] 8574 idx := x.Args[1] 8575 mem := x.Args[2] 8576 if !(x.Uses == 1 && clobber(x)) { 8577 break 8578 } 8579 b = x.Block 8580 v0 := b.NewValue0(v.Line, OpAMD64MOVWloadidx2, v.Type) 8581 v.reset(OpCopy) 8582 v.AddArg(v0) 8583 v0.AuxInt = off 8584 v0.Aux = sym 8585 v0.AddArg(ptr) 8586 v0.AddArg(idx) 8587 v0.AddArg(mem) 8588 return true 8589 } 8590 // match: (MOVWQZX (ANDLconst [c] x)) 8591 // cond: 8592 // result: (ANDLconst [c & 0xffff] x) 8593 for { 8594 v_0 := v.Args[0] 8595 if v_0.Op != OpAMD64ANDLconst { 8596 break 8597 } 8598 c := v_0.AuxInt 8599 x := v_0.Args[0] 8600 v.reset(OpAMD64ANDLconst) 8601 v.AuxInt = c & 0xffff 8602 v.AddArg(x) 8603 return true 8604 } 8605 return false 8606 } 8607 func rewriteValueAMD64_OpAMD64MOVWload(v *Value, config *Config) bool { 8608 b := v.Block 8609 _ = b 8610 // match: (MOVWload [off] {sym} ptr (MOVWstore [off2] {sym2} ptr2 x _)) 8611 // cond: sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) 8612 // result: x 8613 for { 8614 off := v.AuxInt 8615 sym := v.Aux 8616 ptr := v.Args[0] 8617 v_1 := v.Args[1] 8618 if v_1.Op != OpAMD64MOVWstore { 8619 break 8620 } 8621 off2 := v_1.AuxInt 8622 sym2 := v_1.Aux 8623 ptr2 := v_1.Args[0] 8624 x := v_1.Args[1] 8625 if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) { 8626 break 8627 } 8628 v.reset(OpCopy) 8629 v.Type = x.Type 8630 v.AddArg(x) 8631 return true 8632 } 8633 // match: (MOVWload [off1] {sym} (ADDQconst [off2] ptr) mem) 8634 // cond: is32Bit(off1+off2) 8635 // result: (MOVWload [off1+off2] {sym} ptr mem) 8636 for { 8637 off1 := v.AuxInt 8638 sym := v.Aux 8639 v_0 := v.Args[0] 8640 if v_0.Op != OpAMD64ADDQconst { 8641 break 8642 } 8643 off2 := v_0.AuxInt 8644 ptr := v_0.Args[0] 8645 mem := v.Args[1] 8646 if !(is32Bit(off1 + off2)) { 8647 break 8648 } 8649 v.reset(OpAMD64MOVWload) 8650 v.AuxInt = off1 + off2 8651 v.Aux = sym 8652 v.AddArg(ptr) 8653 v.AddArg(mem) 8654 return true 8655 } 8656 // match: (MOVWload [off1] {sym1} (LEAQ [off2] {sym2} base) mem) 8657 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 8658 // result: (MOVWload [off1+off2] {mergeSym(sym1,sym2)} base mem) 8659 for { 8660 off1 := v.AuxInt 8661 sym1 := v.Aux 8662 v_0 := v.Args[0] 8663 if v_0.Op != OpAMD64LEAQ { 8664 break 8665 } 8666 off2 := v_0.AuxInt 8667 sym2 := v_0.Aux 8668 base := v_0.Args[0] 8669 mem := v.Args[1] 8670 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 8671 break 8672 } 8673 v.reset(OpAMD64MOVWload) 8674 v.AuxInt = off1 + off2 8675 v.Aux = mergeSym(sym1, sym2) 8676 v.AddArg(base) 8677 v.AddArg(mem) 8678 return true 8679 } 8680 // match: (MOVWload [off1] {sym1} (LEAQ1 [off2] {sym2} ptr idx) mem) 8681 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 8682 // result: (MOVWloadidx1 [off1+off2] {mergeSym(sym1,sym2)} ptr idx mem) 8683 for { 8684 off1 := v.AuxInt 8685 sym1 := v.Aux 8686 v_0 := v.Args[0] 8687 if v_0.Op != OpAMD64LEAQ1 { 8688 break 8689 } 8690 off2 := v_0.AuxInt 8691 sym2 := v_0.Aux 8692 ptr := v_0.Args[0] 8693 idx := v_0.Args[1] 8694 mem := v.Args[1] 8695 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 8696 break 8697 } 8698 v.reset(OpAMD64MOVWloadidx1) 8699 v.AuxInt = off1 + off2 8700 v.Aux = mergeSym(sym1, sym2) 8701 v.AddArg(ptr) 8702 v.AddArg(idx) 8703 v.AddArg(mem) 8704 return true 8705 } 8706 // match: (MOVWload [off1] {sym1} (LEAQ2 [off2] {sym2} ptr idx) mem) 8707 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 8708 // result: (MOVWloadidx2 [off1+off2] {mergeSym(sym1,sym2)} ptr idx mem) 8709 for { 8710 off1 := v.AuxInt 8711 sym1 := v.Aux 8712 v_0 := v.Args[0] 8713 if v_0.Op != OpAMD64LEAQ2 { 8714 break 8715 } 8716 off2 := v_0.AuxInt 8717 sym2 := v_0.Aux 8718 ptr := v_0.Args[0] 8719 idx := v_0.Args[1] 8720 mem := v.Args[1] 8721 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 8722 break 8723 } 8724 v.reset(OpAMD64MOVWloadidx2) 8725 v.AuxInt = off1 + off2 8726 v.Aux = mergeSym(sym1, sym2) 8727 v.AddArg(ptr) 8728 v.AddArg(idx) 8729 v.AddArg(mem) 8730 return true 8731 } 8732 // match: (MOVWload [off] {sym} (ADDQ ptr idx) mem) 8733 // cond: ptr.Op != OpSB 8734 // result: (MOVWloadidx1 [off] {sym} ptr idx mem) 8735 for { 8736 off := v.AuxInt 8737 sym := v.Aux 8738 v_0 := v.Args[0] 8739 if v_0.Op != OpAMD64ADDQ { 8740 break 8741 } 8742 ptr := v_0.Args[0] 8743 idx := v_0.Args[1] 8744 mem := v.Args[1] 8745 if !(ptr.Op != OpSB) { 8746 break 8747 } 8748 v.reset(OpAMD64MOVWloadidx1) 8749 v.AuxInt = off 8750 v.Aux = sym 8751 v.AddArg(ptr) 8752 v.AddArg(idx) 8753 v.AddArg(mem) 8754 return true 8755 } 8756 // match: (MOVWload [off1] {sym1} (LEAL [off2] {sym2} base) mem) 8757 // cond: canMergeSym(sym1, sym2) 8758 // result: (MOVWload [off1+off2] {mergeSym(sym1,sym2)} base mem) 8759 for { 8760 off1 := v.AuxInt 8761 sym1 := v.Aux 8762 v_0 := v.Args[0] 8763 if v_0.Op != OpAMD64LEAL { 8764 break 8765 } 8766 off2 := v_0.AuxInt 8767 sym2 := v_0.Aux 8768 base := v_0.Args[0] 8769 mem := v.Args[1] 8770 if !(canMergeSym(sym1, sym2)) { 8771 break 8772 } 8773 v.reset(OpAMD64MOVWload) 8774 v.AuxInt = off1 + off2 8775 v.Aux = mergeSym(sym1, sym2) 8776 v.AddArg(base) 8777 v.AddArg(mem) 8778 return true 8779 } 8780 // match: (MOVWload [off1] {sym} (ADDLconst [off2] ptr) mem) 8781 // cond: is32Bit(off1+off2) 8782 // result: (MOVWload [off1+off2] {sym} ptr mem) 8783 for { 8784 off1 := v.AuxInt 8785 sym := v.Aux 8786 v_0 := v.Args[0] 8787 if v_0.Op != OpAMD64ADDLconst { 8788 break 8789 } 8790 off2 := v_0.AuxInt 8791 ptr := v_0.Args[0] 8792 mem := v.Args[1] 8793 if !(is32Bit(off1 + off2)) { 8794 break 8795 } 8796 v.reset(OpAMD64MOVWload) 8797 v.AuxInt = off1 + off2 8798 v.Aux = sym 8799 v.AddArg(ptr) 8800 v.AddArg(mem) 8801 return true 8802 } 8803 return false 8804 } 8805 func rewriteValueAMD64_OpAMD64MOVWloadidx1(v *Value, config *Config) bool { 8806 b := v.Block 8807 _ = b 8808 // match: (MOVWloadidx1 [c] {sym} ptr (SHLQconst [1] idx) mem) 8809 // cond: 8810 // result: (MOVWloadidx2 [c] {sym} ptr idx mem) 8811 for { 8812 c := v.AuxInt 8813 sym := v.Aux 8814 ptr := v.Args[0] 8815 v_1 := v.Args[1] 8816 if v_1.Op != OpAMD64SHLQconst { 8817 break 8818 } 8819 if v_1.AuxInt != 1 { 8820 break 8821 } 8822 idx := v_1.Args[0] 8823 mem := v.Args[2] 8824 v.reset(OpAMD64MOVWloadidx2) 8825 v.AuxInt = c 8826 v.Aux = sym 8827 v.AddArg(ptr) 8828 v.AddArg(idx) 8829 v.AddArg(mem) 8830 return true 8831 } 8832 // match: (MOVWloadidx1 [c] {sym} (ADDQconst [d] ptr) idx mem) 8833 // cond: 8834 // result: (MOVWloadidx1 [c+d] {sym} ptr idx mem) 8835 for { 8836 c := v.AuxInt 8837 sym := v.Aux 8838 v_0 := v.Args[0] 8839 if v_0.Op != OpAMD64ADDQconst { 8840 break 8841 } 8842 d := v_0.AuxInt 8843 ptr := v_0.Args[0] 8844 idx := v.Args[1] 8845 mem := v.Args[2] 8846 v.reset(OpAMD64MOVWloadidx1) 8847 v.AuxInt = c + d 8848 v.Aux = sym 8849 v.AddArg(ptr) 8850 v.AddArg(idx) 8851 v.AddArg(mem) 8852 return true 8853 } 8854 // match: (MOVWloadidx1 [c] {sym} ptr (ADDQconst [d] idx) mem) 8855 // cond: 8856 // result: (MOVWloadidx1 [c+d] {sym} ptr idx mem) 8857 for { 8858 c := v.AuxInt 8859 sym := v.Aux 8860 ptr := v.Args[0] 8861 v_1 := v.Args[1] 8862 if v_1.Op != OpAMD64ADDQconst { 8863 break 8864 } 8865 d := v_1.AuxInt 8866 idx := v_1.Args[0] 8867 mem := v.Args[2] 8868 v.reset(OpAMD64MOVWloadidx1) 8869 v.AuxInt = c + d 8870 v.Aux = sym 8871 v.AddArg(ptr) 8872 v.AddArg(idx) 8873 v.AddArg(mem) 8874 return true 8875 } 8876 return false 8877 } 8878 func rewriteValueAMD64_OpAMD64MOVWloadidx2(v *Value, config *Config) bool { 8879 b := v.Block 8880 _ = b 8881 // match: (MOVWloadidx2 [c] {sym} (ADDQconst [d] ptr) idx mem) 8882 // cond: 8883 // result: (MOVWloadidx2 [c+d] {sym} ptr idx mem) 8884 for { 8885 c := v.AuxInt 8886 sym := v.Aux 8887 v_0 := v.Args[0] 8888 if v_0.Op != OpAMD64ADDQconst { 8889 break 8890 } 8891 d := v_0.AuxInt 8892 ptr := v_0.Args[0] 8893 idx := v.Args[1] 8894 mem := v.Args[2] 8895 v.reset(OpAMD64MOVWloadidx2) 8896 v.AuxInt = c + d 8897 v.Aux = sym 8898 v.AddArg(ptr) 8899 v.AddArg(idx) 8900 v.AddArg(mem) 8901 return true 8902 } 8903 // match: (MOVWloadidx2 [c] {sym} ptr (ADDQconst [d] idx) mem) 8904 // cond: 8905 // result: (MOVWloadidx2 [c+2*d] {sym} ptr idx mem) 8906 for { 8907 c := v.AuxInt 8908 sym := v.Aux 8909 ptr := v.Args[0] 8910 v_1 := v.Args[1] 8911 if v_1.Op != OpAMD64ADDQconst { 8912 break 8913 } 8914 d := v_1.AuxInt 8915 idx := v_1.Args[0] 8916 mem := v.Args[2] 8917 v.reset(OpAMD64MOVWloadidx2) 8918 v.AuxInt = c + 2*d 8919 v.Aux = sym 8920 v.AddArg(ptr) 8921 v.AddArg(idx) 8922 v.AddArg(mem) 8923 return true 8924 } 8925 return false 8926 } 8927 func rewriteValueAMD64_OpAMD64MOVWstore(v *Value, config *Config) bool { 8928 b := v.Block 8929 _ = b 8930 // match: (MOVWstore [off] {sym} ptr (MOVWQSX x) mem) 8931 // cond: 8932 // result: (MOVWstore [off] {sym} ptr x mem) 8933 for { 8934 off := v.AuxInt 8935 sym := v.Aux 8936 ptr := v.Args[0] 8937 v_1 := v.Args[1] 8938 if v_1.Op != OpAMD64MOVWQSX { 8939 break 8940 } 8941 x := v_1.Args[0] 8942 mem := v.Args[2] 8943 v.reset(OpAMD64MOVWstore) 8944 v.AuxInt = off 8945 v.Aux = sym 8946 v.AddArg(ptr) 8947 v.AddArg(x) 8948 v.AddArg(mem) 8949 return true 8950 } 8951 // match: (MOVWstore [off] {sym} ptr (MOVWQZX x) mem) 8952 // cond: 8953 // result: (MOVWstore [off] {sym} ptr x mem) 8954 for { 8955 off := v.AuxInt 8956 sym := v.Aux 8957 ptr := v.Args[0] 8958 v_1 := v.Args[1] 8959 if v_1.Op != OpAMD64MOVWQZX { 8960 break 8961 } 8962 x := v_1.Args[0] 8963 mem := v.Args[2] 8964 v.reset(OpAMD64MOVWstore) 8965 v.AuxInt = off 8966 v.Aux = sym 8967 v.AddArg(ptr) 8968 v.AddArg(x) 8969 v.AddArg(mem) 8970 return true 8971 } 8972 // match: (MOVWstore [off1] {sym} (ADDQconst [off2] ptr) val mem) 8973 // cond: is32Bit(off1+off2) 8974 // result: (MOVWstore [off1+off2] {sym} ptr val mem) 8975 for { 8976 off1 := v.AuxInt 8977 sym := v.Aux 8978 v_0 := v.Args[0] 8979 if v_0.Op != OpAMD64ADDQconst { 8980 break 8981 } 8982 off2 := v_0.AuxInt 8983 ptr := v_0.Args[0] 8984 val := v.Args[1] 8985 mem := v.Args[2] 8986 if !(is32Bit(off1 + off2)) { 8987 break 8988 } 8989 v.reset(OpAMD64MOVWstore) 8990 v.AuxInt = off1 + off2 8991 v.Aux = sym 8992 v.AddArg(ptr) 8993 v.AddArg(val) 8994 v.AddArg(mem) 8995 return true 8996 } 8997 // match: (MOVWstore [off] {sym} ptr (MOVLconst [c]) mem) 8998 // cond: validOff(off) 8999 // result: (MOVWstoreconst [makeValAndOff(int64(int16(c)),off)] {sym} ptr mem) 9000 for { 9001 off := v.AuxInt 9002 sym := v.Aux 9003 ptr := v.Args[0] 9004 v_1 := v.Args[1] 9005 if v_1.Op != OpAMD64MOVLconst { 9006 break 9007 } 9008 c := v_1.AuxInt 9009 mem := v.Args[2] 9010 if !(validOff(off)) { 9011 break 9012 } 9013 v.reset(OpAMD64MOVWstoreconst) 9014 v.AuxInt = makeValAndOff(int64(int16(c)), off) 9015 v.Aux = sym 9016 v.AddArg(ptr) 9017 v.AddArg(mem) 9018 return true 9019 } 9020 // match: (MOVWstore [off1] {sym1} (LEAQ [off2] {sym2} base) val mem) 9021 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 9022 // result: (MOVWstore [off1+off2] {mergeSym(sym1,sym2)} base val mem) 9023 for { 9024 off1 := v.AuxInt 9025 sym1 := v.Aux 9026 v_0 := v.Args[0] 9027 if v_0.Op != OpAMD64LEAQ { 9028 break 9029 } 9030 off2 := v_0.AuxInt 9031 sym2 := v_0.Aux 9032 base := v_0.Args[0] 9033 val := v.Args[1] 9034 mem := v.Args[2] 9035 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 9036 break 9037 } 9038 v.reset(OpAMD64MOVWstore) 9039 v.AuxInt = off1 + off2 9040 v.Aux = mergeSym(sym1, sym2) 9041 v.AddArg(base) 9042 v.AddArg(val) 9043 v.AddArg(mem) 9044 return true 9045 } 9046 // match: (MOVWstore [off1] {sym1} (LEAQ1 [off2] {sym2} ptr idx) val mem) 9047 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 9048 // result: (MOVWstoreidx1 [off1+off2] {mergeSym(sym1,sym2)} ptr idx val mem) 9049 for { 9050 off1 := v.AuxInt 9051 sym1 := v.Aux 9052 v_0 := v.Args[0] 9053 if v_0.Op != OpAMD64LEAQ1 { 9054 break 9055 } 9056 off2 := v_0.AuxInt 9057 sym2 := v_0.Aux 9058 ptr := v_0.Args[0] 9059 idx := v_0.Args[1] 9060 val := v.Args[1] 9061 mem := v.Args[2] 9062 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 9063 break 9064 } 9065 v.reset(OpAMD64MOVWstoreidx1) 9066 v.AuxInt = off1 + off2 9067 v.Aux = mergeSym(sym1, sym2) 9068 v.AddArg(ptr) 9069 v.AddArg(idx) 9070 v.AddArg(val) 9071 v.AddArg(mem) 9072 return true 9073 } 9074 // match: (MOVWstore [off1] {sym1} (LEAQ2 [off2] {sym2} ptr idx) val mem) 9075 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 9076 // result: (MOVWstoreidx2 [off1+off2] {mergeSym(sym1,sym2)} ptr idx val mem) 9077 for { 9078 off1 := v.AuxInt 9079 sym1 := v.Aux 9080 v_0 := v.Args[0] 9081 if v_0.Op != OpAMD64LEAQ2 { 9082 break 9083 } 9084 off2 := v_0.AuxInt 9085 sym2 := v_0.Aux 9086 ptr := v_0.Args[0] 9087 idx := v_0.Args[1] 9088 val := v.Args[1] 9089 mem := v.Args[2] 9090 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 9091 break 9092 } 9093 v.reset(OpAMD64MOVWstoreidx2) 9094 v.AuxInt = off1 + off2 9095 v.Aux = mergeSym(sym1, sym2) 9096 v.AddArg(ptr) 9097 v.AddArg(idx) 9098 v.AddArg(val) 9099 v.AddArg(mem) 9100 return true 9101 } 9102 // match: (MOVWstore [off] {sym} (ADDQ ptr idx) val mem) 9103 // cond: ptr.Op != OpSB 9104 // result: (MOVWstoreidx1 [off] {sym} ptr idx val mem) 9105 for { 9106 off := v.AuxInt 9107 sym := v.Aux 9108 v_0 := v.Args[0] 9109 if v_0.Op != OpAMD64ADDQ { 9110 break 9111 } 9112 ptr := v_0.Args[0] 9113 idx := v_0.Args[1] 9114 val := v.Args[1] 9115 mem := v.Args[2] 9116 if !(ptr.Op != OpSB) { 9117 break 9118 } 9119 v.reset(OpAMD64MOVWstoreidx1) 9120 v.AuxInt = off 9121 v.Aux = sym 9122 v.AddArg(ptr) 9123 v.AddArg(idx) 9124 v.AddArg(val) 9125 v.AddArg(mem) 9126 return true 9127 } 9128 // match: (MOVWstore [i] {s} p (SHRQconst [16] w) x:(MOVWstore [i-2] {s} p w mem)) 9129 // cond: x.Uses == 1 && clobber(x) 9130 // result: (MOVLstore [i-2] {s} p w mem) 9131 for { 9132 i := v.AuxInt 9133 s := v.Aux 9134 p := v.Args[0] 9135 v_1 := v.Args[1] 9136 if v_1.Op != OpAMD64SHRQconst { 9137 break 9138 } 9139 if v_1.AuxInt != 16 { 9140 break 9141 } 9142 w := v_1.Args[0] 9143 x := v.Args[2] 9144 if x.Op != OpAMD64MOVWstore { 9145 break 9146 } 9147 if x.AuxInt != i-2 { 9148 break 9149 } 9150 if x.Aux != s { 9151 break 9152 } 9153 if p != x.Args[0] { 9154 break 9155 } 9156 if w != x.Args[1] { 9157 break 9158 } 9159 mem := x.Args[2] 9160 if !(x.Uses == 1 && clobber(x)) { 9161 break 9162 } 9163 v.reset(OpAMD64MOVLstore) 9164 v.AuxInt = i - 2 9165 v.Aux = s 9166 v.AddArg(p) 9167 v.AddArg(w) 9168 v.AddArg(mem) 9169 return true 9170 } 9171 // match: (MOVWstore [i] {s} p (SHRQconst [j] w) x:(MOVWstore [i-2] {s} p w0:(SHRQconst [j-16] w) mem)) 9172 // cond: x.Uses == 1 && clobber(x) 9173 // result: (MOVLstore [i-2] {s} p w0 mem) 9174 for { 9175 i := v.AuxInt 9176 s := v.Aux 9177 p := v.Args[0] 9178 v_1 := v.Args[1] 9179 if v_1.Op != OpAMD64SHRQconst { 9180 break 9181 } 9182 j := v_1.AuxInt 9183 w := v_1.Args[0] 9184 x := v.Args[2] 9185 if x.Op != OpAMD64MOVWstore { 9186 break 9187 } 9188 if x.AuxInt != i-2 { 9189 break 9190 } 9191 if x.Aux != s { 9192 break 9193 } 9194 if p != x.Args[0] { 9195 break 9196 } 9197 w0 := x.Args[1] 9198 if w0.Op != OpAMD64SHRQconst { 9199 break 9200 } 9201 if w0.AuxInt != j-16 { 9202 break 9203 } 9204 if w != w0.Args[0] { 9205 break 9206 } 9207 mem := x.Args[2] 9208 if !(x.Uses == 1 && clobber(x)) { 9209 break 9210 } 9211 v.reset(OpAMD64MOVLstore) 9212 v.AuxInt = i - 2 9213 v.Aux = s 9214 v.AddArg(p) 9215 v.AddArg(w0) 9216 v.AddArg(mem) 9217 return true 9218 } 9219 // match: (MOVWstore [off1] {sym1} (LEAL [off2] {sym2} base) val mem) 9220 // cond: canMergeSym(sym1, sym2) 9221 // result: (MOVWstore [off1+off2] {mergeSym(sym1,sym2)} base val mem) 9222 for { 9223 off1 := v.AuxInt 9224 sym1 := v.Aux 9225 v_0 := v.Args[0] 9226 if v_0.Op != OpAMD64LEAL { 9227 break 9228 } 9229 off2 := v_0.AuxInt 9230 sym2 := v_0.Aux 9231 base := v_0.Args[0] 9232 val := v.Args[1] 9233 mem := v.Args[2] 9234 if !(canMergeSym(sym1, sym2)) { 9235 break 9236 } 9237 v.reset(OpAMD64MOVWstore) 9238 v.AuxInt = off1 + off2 9239 v.Aux = mergeSym(sym1, sym2) 9240 v.AddArg(base) 9241 v.AddArg(val) 9242 v.AddArg(mem) 9243 return true 9244 } 9245 // match: (MOVWstore [off1] {sym} (ADDLconst [off2] ptr) val mem) 9246 // cond: is32Bit(off1+off2) 9247 // result: (MOVWstore [off1+off2] {sym} ptr val mem) 9248 for { 9249 off1 := v.AuxInt 9250 sym := v.Aux 9251 v_0 := v.Args[0] 9252 if v_0.Op != OpAMD64ADDLconst { 9253 break 9254 } 9255 off2 := v_0.AuxInt 9256 ptr := v_0.Args[0] 9257 val := v.Args[1] 9258 mem := v.Args[2] 9259 if !(is32Bit(off1 + off2)) { 9260 break 9261 } 9262 v.reset(OpAMD64MOVWstore) 9263 v.AuxInt = off1 + off2 9264 v.Aux = sym 9265 v.AddArg(ptr) 9266 v.AddArg(val) 9267 v.AddArg(mem) 9268 return true 9269 } 9270 return false 9271 } 9272 func rewriteValueAMD64_OpAMD64MOVWstoreconst(v *Value, config *Config) bool { 9273 b := v.Block 9274 _ = b 9275 // match: (MOVWstoreconst [sc] {s} (ADDQconst [off] ptr) mem) 9276 // cond: ValAndOff(sc).canAdd(off) 9277 // result: (MOVWstoreconst [ValAndOff(sc).add(off)] {s} ptr mem) 9278 for { 9279 sc := v.AuxInt 9280 s := v.Aux 9281 v_0 := v.Args[0] 9282 if v_0.Op != OpAMD64ADDQconst { 9283 break 9284 } 9285 off := v_0.AuxInt 9286 ptr := v_0.Args[0] 9287 mem := v.Args[1] 9288 if !(ValAndOff(sc).canAdd(off)) { 9289 break 9290 } 9291 v.reset(OpAMD64MOVWstoreconst) 9292 v.AuxInt = ValAndOff(sc).add(off) 9293 v.Aux = s 9294 v.AddArg(ptr) 9295 v.AddArg(mem) 9296 return true 9297 } 9298 // match: (MOVWstoreconst [sc] {sym1} (LEAQ [off] {sym2} ptr) mem) 9299 // cond: canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off) 9300 // result: (MOVWstoreconst [ValAndOff(sc).add(off)] {mergeSym(sym1, sym2)} ptr mem) 9301 for { 9302 sc := v.AuxInt 9303 sym1 := v.Aux 9304 v_0 := v.Args[0] 9305 if v_0.Op != OpAMD64LEAQ { 9306 break 9307 } 9308 off := v_0.AuxInt 9309 sym2 := v_0.Aux 9310 ptr := v_0.Args[0] 9311 mem := v.Args[1] 9312 if !(canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off)) { 9313 break 9314 } 9315 v.reset(OpAMD64MOVWstoreconst) 9316 v.AuxInt = ValAndOff(sc).add(off) 9317 v.Aux = mergeSym(sym1, sym2) 9318 v.AddArg(ptr) 9319 v.AddArg(mem) 9320 return true 9321 } 9322 // match: (MOVWstoreconst [x] {sym1} (LEAQ1 [off] {sym2} ptr idx) mem) 9323 // cond: canMergeSym(sym1, sym2) 9324 // result: (MOVWstoreconstidx1 [ValAndOff(x).add(off)] {mergeSym(sym1,sym2)} ptr idx mem) 9325 for { 9326 x := v.AuxInt 9327 sym1 := v.Aux 9328 v_0 := v.Args[0] 9329 if v_0.Op != OpAMD64LEAQ1 { 9330 break 9331 } 9332 off := v_0.AuxInt 9333 sym2 := v_0.Aux 9334 ptr := v_0.Args[0] 9335 idx := v_0.Args[1] 9336 mem := v.Args[1] 9337 if !(canMergeSym(sym1, sym2)) { 9338 break 9339 } 9340 v.reset(OpAMD64MOVWstoreconstidx1) 9341 v.AuxInt = ValAndOff(x).add(off) 9342 v.Aux = mergeSym(sym1, sym2) 9343 v.AddArg(ptr) 9344 v.AddArg(idx) 9345 v.AddArg(mem) 9346 return true 9347 } 9348 // match: (MOVWstoreconst [x] {sym1} (LEAQ2 [off] {sym2} ptr idx) mem) 9349 // cond: canMergeSym(sym1, sym2) 9350 // result: (MOVWstoreconstidx2 [ValAndOff(x).add(off)] {mergeSym(sym1,sym2)} ptr idx mem) 9351 for { 9352 x := v.AuxInt 9353 sym1 := v.Aux 9354 v_0 := v.Args[0] 9355 if v_0.Op != OpAMD64LEAQ2 { 9356 break 9357 } 9358 off := v_0.AuxInt 9359 sym2 := v_0.Aux 9360 ptr := v_0.Args[0] 9361 idx := v_0.Args[1] 9362 mem := v.Args[1] 9363 if !(canMergeSym(sym1, sym2)) { 9364 break 9365 } 9366 v.reset(OpAMD64MOVWstoreconstidx2) 9367 v.AuxInt = ValAndOff(x).add(off) 9368 v.Aux = mergeSym(sym1, sym2) 9369 v.AddArg(ptr) 9370 v.AddArg(idx) 9371 v.AddArg(mem) 9372 return true 9373 } 9374 // match: (MOVWstoreconst [x] {sym} (ADDQ ptr idx) mem) 9375 // cond: 9376 // result: (MOVWstoreconstidx1 [x] {sym} ptr idx mem) 9377 for { 9378 x := v.AuxInt 9379 sym := v.Aux 9380 v_0 := v.Args[0] 9381 if v_0.Op != OpAMD64ADDQ { 9382 break 9383 } 9384 ptr := v_0.Args[0] 9385 idx := v_0.Args[1] 9386 mem := v.Args[1] 9387 v.reset(OpAMD64MOVWstoreconstidx1) 9388 v.AuxInt = x 9389 v.Aux = sym 9390 v.AddArg(ptr) 9391 v.AddArg(idx) 9392 v.AddArg(mem) 9393 return true 9394 } 9395 // match: (MOVWstoreconst [c] {s} p x:(MOVWstoreconst [a] {s} p mem)) 9396 // cond: x.Uses == 1 && ValAndOff(a).Off() + 2 == ValAndOff(c).Off() && clobber(x) 9397 // result: (MOVLstoreconst [makeValAndOff(ValAndOff(a).Val()&0xffff | ValAndOff(c).Val()<<16, ValAndOff(a).Off())] {s} p mem) 9398 for { 9399 c := v.AuxInt 9400 s := v.Aux 9401 p := v.Args[0] 9402 x := v.Args[1] 9403 if x.Op != OpAMD64MOVWstoreconst { 9404 break 9405 } 9406 a := x.AuxInt 9407 if x.Aux != s { 9408 break 9409 } 9410 if p != x.Args[0] { 9411 break 9412 } 9413 mem := x.Args[1] 9414 if !(x.Uses == 1 && ValAndOff(a).Off()+2 == ValAndOff(c).Off() && clobber(x)) { 9415 break 9416 } 9417 v.reset(OpAMD64MOVLstoreconst) 9418 v.AuxInt = makeValAndOff(ValAndOff(a).Val()&0xffff|ValAndOff(c).Val()<<16, ValAndOff(a).Off()) 9419 v.Aux = s 9420 v.AddArg(p) 9421 v.AddArg(mem) 9422 return true 9423 } 9424 // match: (MOVWstoreconst [sc] {sym1} (LEAL [off] {sym2} ptr) mem) 9425 // cond: canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off) 9426 // result: (MOVWstoreconst [ValAndOff(sc).add(off)] {mergeSym(sym1, sym2)} ptr mem) 9427 for { 9428 sc := v.AuxInt 9429 sym1 := v.Aux 9430 v_0 := v.Args[0] 9431 if v_0.Op != OpAMD64LEAL { 9432 break 9433 } 9434 off := v_0.AuxInt 9435 sym2 := v_0.Aux 9436 ptr := v_0.Args[0] 9437 mem := v.Args[1] 9438 if !(canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off)) { 9439 break 9440 } 9441 v.reset(OpAMD64MOVWstoreconst) 9442 v.AuxInt = ValAndOff(sc).add(off) 9443 v.Aux = mergeSym(sym1, sym2) 9444 v.AddArg(ptr) 9445 v.AddArg(mem) 9446 return true 9447 } 9448 // match: (MOVWstoreconst [sc] {s} (ADDLconst [off] ptr) mem) 9449 // cond: ValAndOff(sc).canAdd(off) 9450 // result: (MOVWstoreconst [ValAndOff(sc).add(off)] {s} ptr mem) 9451 for { 9452 sc := v.AuxInt 9453 s := v.Aux 9454 v_0 := v.Args[0] 9455 if v_0.Op != OpAMD64ADDLconst { 9456 break 9457 } 9458 off := v_0.AuxInt 9459 ptr := v_0.Args[0] 9460 mem := v.Args[1] 9461 if !(ValAndOff(sc).canAdd(off)) { 9462 break 9463 } 9464 v.reset(OpAMD64MOVWstoreconst) 9465 v.AuxInt = ValAndOff(sc).add(off) 9466 v.Aux = s 9467 v.AddArg(ptr) 9468 v.AddArg(mem) 9469 return true 9470 } 9471 return false 9472 } 9473 func rewriteValueAMD64_OpAMD64MOVWstoreconstidx1(v *Value, config *Config) bool { 9474 b := v.Block 9475 _ = b 9476 // match: (MOVWstoreconstidx1 [c] {sym} ptr (SHLQconst [1] idx) mem) 9477 // cond: 9478 // result: (MOVWstoreconstidx2 [c] {sym} ptr idx mem) 9479 for { 9480 c := v.AuxInt 9481 sym := v.Aux 9482 ptr := v.Args[0] 9483 v_1 := v.Args[1] 9484 if v_1.Op != OpAMD64SHLQconst { 9485 break 9486 } 9487 if v_1.AuxInt != 1 { 9488 break 9489 } 9490 idx := v_1.Args[0] 9491 mem := v.Args[2] 9492 v.reset(OpAMD64MOVWstoreconstidx2) 9493 v.AuxInt = c 9494 v.Aux = sym 9495 v.AddArg(ptr) 9496 v.AddArg(idx) 9497 v.AddArg(mem) 9498 return true 9499 } 9500 // match: (MOVWstoreconstidx1 [x] {sym} (ADDQconst [c] ptr) idx mem) 9501 // cond: 9502 // result: (MOVWstoreconstidx1 [ValAndOff(x).add(c)] {sym} ptr idx mem) 9503 for { 9504 x := v.AuxInt 9505 sym := v.Aux 9506 v_0 := v.Args[0] 9507 if v_0.Op != OpAMD64ADDQconst { 9508 break 9509 } 9510 c := v_0.AuxInt 9511 ptr := v_0.Args[0] 9512 idx := v.Args[1] 9513 mem := v.Args[2] 9514 v.reset(OpAMD64MOVWstoreconstidx1) 9515 v.AuxInt = ValAndOff(x).add(c) 9516 v.Aux = sym 9517 v.AddArg(ptr) 9518 v.AddArg(idx) 9519 v.AddArg(mem) 9520 return true 9521 } 9522 // match: (MOVWstoreconstidx1 [x] {sym} ptr (ADDQconst [c] idx) mem) 9523 // cond: 9524 // result: (MOVWstoreconstidx1 [ValAndOff(x).add(c)] {sym} ptr idx mem) 9525 for { 9526 x := v.AuxInt 9527 sym := v.Aux 9528 ptr := v.Args[0] 9529 v_1 := v.Args[1] 9530 if v_1.Op != OpAMD64ADDQconst { 9531 break 9532 } 9533 c := v_1.AuxInt 9534 idx := v_1.Args[0] 9535 mem := v.Args[2] 9536 v.reset(OpAMD64MOVWstoreconstidx1) 9537 v.AuxInt = ValAndOff(x).add(c) 9538 v.Aux = sym 9539 v.AddArg(ptr) 9540 v.AddArg(idx) 9541 v.AddArg(mem) 9542 return true 9543 } 9544 // match: (MOVWstoreconstidx1 [c] {s} p i x:(MOVWstoreconstidx1 [a] {s} p i mem)) 9545 // cond: x.Uses == 1 && ValAndOff(a).Off() + 2 == ValAndOff(c).Off() && clobber(x) 9546 // result: (MOVLstoreconstidx1 [makeValAndOff(ValAndOff(a).Val()&0xffff | ValAndOff(c).Val()<<16, ValAndOff(a).Off())] {s} p i mem) 9547 for { 9548 c := v.AuxInt 9549 s := v.Aux 9550 p := v.Args[0] 9551 i := v.Args[1] 9552 x := v.Args[2] 9553 if x.Op != OpAMD64MOVWstoreconstidx1 { 9554 break 9555 } 9556 a := x.AuxInt 9557 if x.Aux != s { 9558 break 9559 } 9560 if p != x.Args[0] { 9561 break 9562 } 9563 if i != x.Args[1] { 9564 break 9565 } 9566 mem := x.Args[2] 9567 if !(x.Uses == 1 && ValAndOff(a).Off()+2 == ValAndOff(c).Off() && clobber(x)) { 9568 break 9569 } 9570 v.reset(OpAMD64MOVLstoreconstidx1) 9571 v.AuxInt = makeValAndOff(ValAndOff(a).Val()&0xffff|ValAndOff(c).Val()<<16, ValAndOff(a).Off()) 9572 v.Aux = s 9573 v.AddArg(p) 9574 v.AddArg(i) 9575 v.AddArg(mem) 9576 return true 9577 } 9578 return false 9579 } 9580 func rewriteValueAMD64_OpAMD64MOVWstoreconstidx2(v *Value, config *Config) bool { 9581 b := v.Block 9582 _ = b 9583 // match: (MOVWstoreconstidx2 [x] {sym} (ADDQconst [c] ptr) idx mem) 9584 // cond: 9585 // result: (MOVWstoreconstidx2 [ValAndOff(x).add(c)] {sym} ptr idx mem) 9586 for { 9587 x := v.AuxInt 9588 sym := v.Aux 9589 v_0 := v.Args[0] 9590 if v_0.Op != OpAMD64ADDQconst { 9591 break 9592 } 9593 c := v_0.AuxInt 9594 ptr := v_0.Args[0] 9595 idx := v.Args[1] 9596 mem := v.Args[2] 9597 v.reset(OpAMD64MOVWstoreconstidx2) 9598 v.AuxInt = ValAndOff(x).add(c) 9599 v.Aux = sym 9600 v.AddArg(ptr) 9601 v.AddArg(idx) 9602 v.AddArg(mem) 9603 return true 9604 } 9605 // match: (MOVWstoreconstidx2 [x] {sym} ptr (ADDQconst [c] idx) mem) 9606 // cond: 9607 // result: (MOVWstoreconstidx2 [ValAndOff(x).add(2*c)] {sym} ptr idx mem) 9608 for { 9609 x := v.AuxInt 9610 sym := v.Aux 9611 ptr := v.Args[0] 9612 v_1 := v.Args[1] 9613 if v_1.Op != OpAMD64ADDQconst { 9614 break 9615 } 9616 c := v_1.AuxInt 9617 idx := v_1.Args[0] 9618 mem := v.Args[2] 9619 v.reset(OpAMD64MOVWstoreconstidx2) 9620 v.AuxInt = ValAndOff(x).add(2 * c) 9621 v.Aux = sym 9622 v.AddArg(ptr) 9623 v.AddArg(idx) 9624 v.AddArg(mem) 9625 return true 9626 } 9627 // match: (MOVWstoreconstidx2 [c] {s} p i x:(MOVWstoreconstidx2 [a] {s} p i mem)) 9628 // cond: x.Uses == 1 && ValAndOff(a).Off() + 2 == ValAndOff(c).Off() && clobber(x) 9629 // result: (MOVLstoreconstidx1 [makeValAndOff(ValAndOff(a).Val()&0xffff | ValAndOff(c).Val()<<16, ValAndOff(a).Off())] {s} p (SHLQconst <i.Type> [1] i) mem) 9630 for { 9631 c := v.AuxInt 9632 s := v.Aux 9633 p := v.Args[0] 9634 i := v.Args[1] 9635 x := v.Args[2] 9636 if x.Op != OpAMD64MOVWstoreconstidx2 { 9637 break 9638 } 9639 a := x.AuxInt 9640 if x.Aux != s { 9641 break 9642 } 9643 if p != x.Args[0] { 9644 break 9645 } 9646 if i != x.Args[1] { 9647 break 9648 } 9649 mem := x.Args[2] 9650 if !(x.Uses == 1 && ValAndOff(a).Off()+2 == ValAndOff(c).Off() && clobber(x)) { 9651 break 9652 } 9653 v.reset(OpAMD64MOVLstoreconstidx1) 9654 v.AuxInt = makeValAndOff(ValAndOff(a).Val()&0xffff|ValAndOff(c).Val()<<16, ValAndOff(a).Off()) 9655 v.Aux = s 9656 v.AddArg(p) 9657 v0 := b.NewValue0(v.Line, OpAMD64SHLQconst, i.Type) 9658 v0.AuxInt = 1 9659 v0.AddArg(i) 9660 v.AddArg(v0) 9661 v.AddArg(mem) 9662 return true 9663 } 9664 return false 9665 } 9666 func rewriteValueAMD64_OpAMD64MOVWstoreidx1(v *Value, config *Config) bool { 9667 b := v.Block 9668 _ = b 9669 // match: (MOVWstoreidx1 [c] {sym} ptr (SHLQconst [1] idx) val mem) 9670 // cond: 9671 // result: (MOVWstoreidx2 [c] {sym} ptr idx val mem) 9672 for { 9673 c := v.AuxInt 9674 sym := v.Aux 9675 ptr := v.Args[0] 9676 v_1 := v.Args[1] 9677 if v_1.Op != OpAMD64SHLQconst { 9678 break 9679 } 9680 if v_1.AuxInt != 1 { 9681 break 9682 } 9683 idx := v_1.Args[0] 9684 val := v.Args[2] 9685 mem := v.Args[3] 9686 v.reset(OpAMD64MOVWstoreidx2) 9687 v.AuxInt = c 9688 v.Aux = sym 9689 v.AddArg(ptr) 9690 v.AddArg(idx) 9691 v.AddArg(val) 9692 v.AddArg(mem) 9693 return true 9694 } 9695 // match: (MOVWstoreidx1 [c] {sym} (ADDQconst [d] ptr) idx val mem) 9696 // cond: 9697 // result: (MOVWstoreidx1 [c+d] {sym} ptr idx val mem) 9698 for { 9699 c := v.AuxInt 9700 sym := v.Aux 9701 v_0 := v.Args[0] 9702 if v_0.Op != OpAMD64ADDQconst { 9703 break 9704 } 9705 d := v_0.AuxInt 9706 ptr := v_0.Args[0] 9707 idx := v.Args[1] 9708 val := v.Args[2] 9709 mem := v.Args[3] 9710 v.reset(OpAMD64MOVWstoreidx1) 9711 v.AuxInt = c + d 9712 v.Aux = sym 9713 v.AddArg(ptr) 9714 v.AddArg(idx) 9715 v.AddArg(val) 9716 v.AddArg(mem) 9717 return true 9718 } 9719 // match: (MOVWstoreidx1 [c] {sym} ptr (ADDQconst [d] idx) val mem) 9720 // cond: 9721 // result: (MOVWstoreidx1 [c+d] {sym} ptr idx val mem) 9722 for { 9723 c := v.AuxInt 9724 sym := v.Aux 9725 ptr := v.Args[0] 9726 v_1 := v.Args[1] 9727 if v_1.Op != OpAMD64ADDQconst { 9728 break 9729 } 9730 d := v_1.AuxInt 9731 idx := v_1.Args[0] 9732 val := v.Args[2] 9733 mem := v.Args[3] 9734 v.reset(OpAMD64MOVWstoreidx1) 9735 v.AuxInt = c + d 9736 v.Aux = sym 9737 v.AddArg(ptr) 9738 v.AddArg(idx) 9739 v.AddArg(val) 9740 v.AddArg(mem) 9741 return true 9742 } 9743 // match: (MOVWstoreidx1 [i] {s} p idx (SHRQconst [16] w) x:(MOVWstoreidx1 [i-2] {s} p idx w mem)) 9744 // cond: x.Uses == 1 && clobber(x) 9745 // result: (MOVLstoreidx1 [i-2] {s} p idx w mem) 9746 for { 9747 i := v.AuxInt 9748 s := v.Aux 9749 p := v.Args[0] 9750 idx := v.Args[1] 9751 v_2 := v.Args[2] 9752 if v_2.Op != OpAMD64SHRQconst { 9753 break 9754 } 9755 if v_2.AuxInt != 16 { 9756 break 9757 } 9758 w := v_2.Args[0] 9759 x := v.Args[3] 9760 if x.Op != OpAMD64MOVWstoreidx1 { 9761 break 9762 } 9763 if x.AuxInt != i-2 { 9764 break 9765 } 9766 if x.Aux != s { 9767 break 9768 } 9769 if p != x.Args[0] { 9770 break 9771 } 9772 if idx != x.Args[1] { 9773 break 9774 } 9775 if w != x.Args[2] { 9776 break 9777 } 9778 mem := x.Args[3] 9779 if !(x.Uses == 1 && clobber(x)) { 9780 break 9781 } 9782 v.reset(OpAMD64MOVLstoreidx1) 9783 v.AuxInt = i - 2 9784 v.Aux = s 9785 v.AddArg(p) 9786 v.AddArg(idx) 9787 v.AddArg(w) 9788 v.AddArg(mem) 9789 return true 9790 } 9791 // match: (MOVWstoreidx1 [i] {s} p idx (SHRQconst [j] w) x:(MOVWstoreidx1 [i-2] {s} p idx w0:(SHRQconst [j-16] w) mem)) 9792 // cond: x.Uses == 1 && clobber(x) 9793 // result: (MOVLstoreidx1 [i-2] {s} p idx w0 mem) 9794 for { 9795 i := v.AuxInt 9796 s := v.Aux 9797 p := v.Args[0] 9798 idx := v.Args[1] 9799 v_2 := v.Args[2] 9800 if v_2.Op != OpAMD64SHRQconst { 9801 break 9802 } 9803 j := v_2.AuxInt 9804 w := v_2.Args[0] 9805 x := v.Args[3] 9806 if x.Op != OpAMD64MOVWstoreidx1 { 9807 break 9808 } 9809 if x.AuxInt != i-2 { 9810 break 9811 } 9812 if x.Aux != s { 9813 break 9814 } 9815 if p != x.Args[0] { 9816 break 9817 } 9818 if idx != x.Args[1] { 9819 break 9820 } 9821 w0 := x.Args[2] 9822 if w0.Op != OpAMD64SHRQconst { 9823 break 9824 } 9825 if w0.AuxInt != j-16 { 9826 break 9827 } 9828 if w != w0.Args[0] { 9829 break 9830 } 9831 mem := x.Args[3] 9832 if !(x.Uses == 1 && clobber(x)) { 9833 break 9834 } 9835 v.reset(OpAMD64MOVLstoreidx1) 9836 v.AuxInt = i - 2 9837 v.Aux = s 9838 v.AddArg(p) 9839 v.AddArg(idx) 9840 v.AddArg(w0) 9841 v.AddArg(mem) 9842 return true 9843 } 9844 return false 9845 } 9846 func rewriteValueAMD64_OpAMD64MOVWstoreidx2(v *Value, config *Config) bool { 9847 b := v.Block 9848 _ = b 9849 // match: (MOVWstoreidx2 [c] {sym} (ADDQconst [d] ptr) idx val mem) 9850 // cond: 9851 // result: (MOVWstoreidx2 [c+d] {sym} ptr idx val mem) 9852 for { 9853 c := v.AuxInt 9854 sym := v.Aux 9855 v_0 := v.Args[0] 9856 if v_0.Op != OpAMD64ADDQconst { 9857 break 9858 } 9859 d := v_0.AuxInt 9860 ptr := v_0.Args[0] 9861 idx := v.Args[1] 9862 val := v.Args[2] 9863 mem := v.Args[3] 9864 v.reset(OpAMD64MOVWstoreidx2) 9865 v.AuxInt = c + d 9866 v.Aux = sym 9867 v.AddArg(ptr) 9868 v.AddArg(idx) 9869 v.AddArg(val) 9870 v.AddArg(mem) 9871 return true 9872 } 9873 // match: (MOVWstoreidx2 [c] {sym} ptr (ADDQconst [d] idx) val mem) 9874 // cond: 9875 // result: (MOVWstoreidx2 [c+2*d] {sym} ptr idx val mem) 9876 for { 9877 c := v.AuxInt 9878 sym := v.Aux 9879 ptr := v.Args[0] 9880 v_1 := v.Args[1] 9881 if v_1.Op != OpAMD64ADDQconst { 9882 break 9883 } 9884 d := v_1.AuxInt 9885 idx := v_1.Args[0] 9886 val := v.Args[2] 9887 mem := v.Args[3] 9888 v.reset(OpAMD64MOVWstoreidx2) 9889 v.AuxInt = c + 2*d 9890 v.Aux = sym 9891 v.AddArg(ptr) 9892 v.AddArg(idx) 9893 v.AddArg(val) 9894 v.AddArg(mem) 9895 return true 9896 } 9897 // match: (MOVWstoreidx2 [i] {s} p idx (SHRQconst [16] w) x:(MOVWstoreidx2 [i-2] {s} p idx w mem)) 9898 // cond: x.Uses == 1 && clobber(x) 9899 // result: (MOVLstoreidx1 [i-2] {s} p (SHLQconst <idx.Type> [1] idx) w mem) 9900 for { 9901 i := v.AuxInt 9902 s := v.Aux 9903 p := v.Args[0] 9904 idx := v.Args[1] 9905 v_2 := v.Args[2] 9906 if v_2.Op != OpAMD64SHRQconst { 9907 break 9908 } 9909 if v_2.AuxInt != 16 { 9910 break 9911 } 9912 w := v_2.Args[0] 9913 x := v.Args[3] 9914 if x.Op != OpAMD64MOVWstoreidx2 { 9915 break 9916 } 9917 if x.AuxInt != i-2 { 9918 break 9919 } 9920 if x.Aux != s { 9921 break 9922 } 9923 if p != x.Args[0] { 9924 break 9925 } 9926 if idx != x.Args[1] { 9927 break 9928 } 9929 if w != x.Args[2] { 9930 break 9931 } 9932 mem := x.Args[3] 9933 if !(x.Uses == 1 && clobber(x)) { 9934 break 9935 } 9936 v.reset(OpAMD64MOVLstoreidx1) 9937 v.AuxInt = i - 2 9938 v.Aux = s 9939 v.AddArg(p) 9940 v0 := b.NewValue0(v.Line, OpAMD64SHLQconst, idx.Type) 9941 v0.AuxInt = 1 9942 v0.AddArg(idx) 9943 v.AddArg(v0) 9944 v.AddArg(w) 9945 v.AddArg(mem) 9946 return true 9947 } 9948 // match: (MOVWstoreidx2 [i] {s} p idx (SHRQconst [j] w) x:(MOVWstoreidx2 [i-2] {s} p idx w0:(SHRQconst [j-16] w) mem)) 9949 // cond: x.Uses == 1 && clobber(x) 9950 // result: (MOVLstoreidx1 [i-2] {s} p (SHLQconst <idx.Type> [1] idx) w0 mem) 9951 for { 9952 i := v.AuxInt 9953 s := v.Aux 9954 p := v.Args[0] 9955 idx := v.Args[1] 9956 v_2 := v.Args[2] 9957 if v_2.Op != OpAMD64SHRQconst { 9958 break 9959 } 9960 j := v_2.AuxInt 9961 w := v_2.Args[0] 9962 x := v.Args[3] 9963 if x.Op != OpAMD64MOVWstoreidx2 { 9964 break 9965 } 9966 if x.AuxInt != i-2 { 9967 break 9968 } 9969 if x.Aux != s { 9970 break 9971 } 9972 if p != x.Args[0] { 9973 break 9974 } 9975 if idx != x.Args[1] { 9976 break 9977 } 9978 w0 := x.Args[2] 9979 if w0.Op != OpAMD64SHRQconst { 9980 break 9981 } 9982 if w0.AuxInt != j-16 { 9983 break 9984 } 9985 if w != w0.Args[0] { 9986 break 9987 } 9988 mem := x.Args[3] 9989 if !(x.Uses == 1 && clobber(x)) { 9990 break 9991 } 9992 v.reset(OpAMD64MOVLstoreidx1) 9993 v.AuxInt = i - 2 9994 v.Aux = s 9995 v.AddArg(p) 9996 v0 := b.NewValue0(v.Line, OpAMD64SHLQconst, idx.Type) 9997 v0.AuxInt = 1 9998 v0.AddArg(idx) 9999 v.AddArg(v0) 10000 v.AddArg(w0) 10001 v.AddArg(mem) 10002 return true 10003 } 10004 return false 10005 } 10006 func rewriteValueAMD64_OpAMD64MULL(v *Value, config *Config) bool { 10007 b := v.Block 10008 _ = b 10009 // match: (MULL x (MOVLconst [c])) 10010 // cond: 10011 // result: (MULLconst [c] x) 10012 for { 10013 x := v.Args[0] 10014 v_1 := v.Args[1] 10015 if v_1.Op != OpAMD64MOVLconst { 10016 break 10017 } 10018 c := v_1.AuxInt 10019 v.reset(OpAMD64MULLconst) 10020 v.AuxInt = c 10021 v.AddArg(x) 10022 return true 10023 } 10024 // match: (MULL (MOVLconst [c]) x) 10025 // cond: 10026 // result: (MULLconst [c] x) 10027 for { 10028 v_0 := v.Args[0] 10029 if v_0.Op != OpAMD64MOVLconst { 10030 break 10031 } 10032 c := v_0.AuxInt 10033 x := v.Args[1] 10034 v.reset(OpAMD64MULLconst) 10035 v.AuxInt = c 10036 v.AddArg(x) 10037 return true 10038 } 10039 return false 10040 } 10041 func rewriteValueAMD64_OpAMD64MULLconst(v *Value, config *Config) bool { 10042 b := v.Block 10043 _ = b 10044 // match: (MULLconst [c] (MULLconst [d] x)) 10045 // cond: 10046 // result: (MULLconst [int64(int32(c * d))] x) 10047 for { 10048 c := v.AuxInt 10049 v_0 := v.Args[0] 10050 if v_0.Op != OpAMD64MULLconst { 10051 break 10052 } 10053 d := v_0.AuxInt 10054 x := v_0.Args[0] 10055 v.reset(OpAMD64MULLconst) 10056 v.AuxInt = int64(int32(c * d)) 10057 v.AddArg(x) 10058 return true 10059 } 10060 // match: (MULLconst [c] (MOVLconst [d])) 10061 // cond: 10062 // result: (MOVLconst [int64(int32(c*d))]) 10063 for { 10064 c := v.AuxInt 10065 v_0 := v.Args[0] 10066 if v_0.Op != OpAMD64MOVLconst { 10067 break 10068 } 10069 d := v_0.AuxInt 10070 v.reset(OpAMD64MOVLconst) 10071 v.AuxInt = int64(int32(c * d)) 10072 return true 10073 } 10074 return false 10075 } 10076 func rewriteValueAMD64_OpAMD64MULQ(v *Value, config *Config) bool { 10077 b := v.Block 10078 _ = b 10079 // match: (MULQ x (MOVQconst [c])) 10080 // cond: is32Bit(c) 10081 // result: (MULQconst [c] x) 10082 for { 10083 x := v.Args[0] 10084 v_1 := v.Args[1] 10085 if v_1.Op != OpAMD64MOVQconst { 10086 break 10087 } 10088 c := v_1.AuxInt 10089 if !(is32Bit(c)) { 10090 break 10091 } 10092 v.reset(OpAMD64MULQconst) 10093 v.AuxInt = c 10094 v.AddArg(x) 10095 return true 10096 } 10097 // match: (MULQ (MOVQconst [c]) x) 10098 // cond: is32Bit(c) 10099 // result: (MULQconst [c] x) 10100 for { 10101 v_0 := v.Args[0] 10102 if v_0.Op != OpAMD64MOVQconst { 10103 break 10104 } 10105 c := v_0.AuxInt 10106 x := v.Args[1] 10107 if !(is32Bit(c)) { 10108 break 10109 } 10110 v.reset(OpAMD64MULQconst) 10111 v.AuxInt = c 10112 v.AddArg(x) 10113 return true 10114 } 10115 return false 10116 } 10117 func rewriteValueAMD64_OpAMD64MULQconst(v *Value, config *Config) bool { 10118 b := v.Block 10119 _ = b 10120 // match: (MULQconst [c] (MULQconst [d] x)) 10121 // cond: is32Bit(c*d) 10122 // result: (MULQconst [c * d] x) 10123 for { 10124 c := v.AuxInt 10125 v_0 := v.Args[0] 10126 if v_0.Op != OpAMD64MULQconst { 10127 break 10128 } 10129 d := v_0.AuxInt 10130 x := v_0.Args[0] 10131 if !(is32Bit(c * d)) { 10132 break 10133 } 10134 v.reset(OpAMD64MULQconst) 10135 v.AuxInt = c * d 10136 v.AddArg(x) 10137 return true 10138 } 10139 // match: (MULQconst [-1] x) 10140 // cond: 10141 // result: (NEGQ x) 10142 for { 10143 if v.AuxInt != -1 { 10144 break 10145 } 10146 x := v.Args[0] 10147 v.reset(OpAMD64NEGQ) 10148 v.AddArg(x) 10149 return true 10150 } 10151 // match: (MULQconst [0] _) 10152 // cond: 10153 // result: (MOVQconst [0]) 10154 for { 10155 if v.AuxInt != 0 { 10156 break 10157 } 10158 v.reset(OpAMD64MOVQconst) 10159 v.AuxInt = 0 10160 return true 10161 } 10162 // match: (MULQconst [1] x) 10163 // cond: 10164 // result: x 10165 for { 10166 if v.AuxInt != 1 { 10167 break 10168 } 10169 x := v.Args[0] 10170 v.reset(OpCopy) 10171 v.Type = x.Type 10172 v.AddArg(x) 10173 return true 10174 } 10175 // match: (MULQconst [3] x) 10176 // cond: 10177 // result: (LEAQ2 x x) 10178 for { 10179 if v.AuxInt != 3 { 10180 break 10181 } 10182 x := v.Args[0] 10183 v.reset(OpAMD64LEAQ2) 10184 v.AddArg(x) 10185 v.AddArg(x) 10186 return true 10187 } 10188 // match: (MULQconst [5] x) 10189 // cond: 10190 // result: (LEAQ4 x x) 10191 for { 10192 if v.AuxInt != 5 { 10193 break 10194 } 10195 x := v.Args[0] 10196 v.reset(OpAMD64LEAQ4) 10197 v.AddArg(x) 10198 v.AddArg(x) 10199 return true 10200 } 10201 // match: (MULQconst [7] x) 10202 // cond: 10203 // result: (LEAQ8 (NEGQ <v.Type> x) x) 10204 for { 10205 if v.AuxInt != 7 { 10206 break 10207 } 10208 x := v.Args[0] 10209 v.reset(OpAMD64LEAQ8) 10210 v0 := b.NewValue0(v.Line, OpAMD64NEGQ, v.Type) 10211 v0.AddArg(x) 10212 v.AddArg(v0) 10213 v.AddArg(x) 10214 return true 10215 } 10216 // match: (MULQconst [9] x) 10217 // cond: 10218 // result: (LEAQ8 x x) 10219 for { 10220 if v.AuxInt != 9 { 10221 break 10222 } 10223 x := v.Args[0] 10224 v.reset(OpAMD64LEAQ8) 10225 v.AddArg(x) 10226 v.AddArg(x) 10227 return true 10228 } 10229 // match: (MULQconst [11] x) 10230 // cond: 10231 // result: (LEAQ2 x (LEAQ4 <v.Type> x x)) 10232 for { 10233 if v.AuxInt != 11 { 10234 break 10235 } 10236 x := v.Args[0] 10237 v.reset(OpAMD64LEAQ2) 10238 v.AddArg(x) 10239 v0 := b.NewValue0(v.Line, OpAMD64LEAQ4, v.Type) 10240 v0.AddArg(x) 10241 v0.AddArg(x) 10242 v.AddArg(v0) 10243 return true 10244 } 10245 // match: (MULQconst [13] x) 10246 // cond: 10247 // result: (LEAQ4 x (LEAQ2 <v.Type> x x)) 10248 for { 10249 if v.AuxInt != 13 { 10250 break 10251 } 10252 x := v.Args[0] 10253 v.reset(OpAMD64LEAQ4) 10254 v.AddArg(x) 10255 v0 := b.NewValue0(v.Line, OpAMD64LEAQ2, v.Type) 10256 v0.AddArg(x) 10257 v0.AddArg(x) 10258 v.AddArg(v0) 10259 return true 10260 } 10261 // match: (MULQconst [21] x) 10262 // cond: 10263 // result: (LEAQ4 x (LEAQ4 <v.Type> x x)) 10264 for { 10265 if v.AuxInt != 21 { 10266 break 10267 } 10268 x := v.Args[0] 10269 v.reset(OpAMD64LEAQ4) 10270 v.AddArg(x) 10271 v0 := b.NewValue0(v.Line, OpAMD64LEAQ4, v.Type) 10272 v0.AddArg(x) 10273 v0.AddArg(x) 10274 v.AddArg(v0) 10275 return true 10276 } 10277 // match: (MULQconst [25] x) 10278 // cond: 10279 // result: (LEAQ8 x (LEAQ2 <v.Type> x x)) 10280 for { 10281 if v.AuxInt != 25 { 10282 break 10283 } 10284 x := v.Args[0] 10285 v.reset(OpAMD64LEAQ8) 10286 v.AddArg(x) 10287 v0 := b.NewValue0(v.Line, OpAMD64LEAQ2, v.Type) 10288 v0.AddArg(x) 10289 v0.AddArg(x) 10290 v.AddArg(v0) 10291 return true 10292 } 10293 // match: (MULQconst [37] x) 10294 // cond: 10295 // result: (LEAQ4 x (LEAQ8 <v.Type> x x)) 10296 for { 10297 if v.AuxInt != 37 { 10298 break 10299 } 10300 x := v.Args[0] 10301 v.reset(OpAMD64LEAQ4) 10302 v.AddArg(x) 10303 v0 := b.NewValue0(v.Line, OpAMD64LEAQ8, v.Type) 10304 v0.AddArg(x) 10305 v0.AddArg(x) 10306 v.AddArg(v0) 10307 return true 10308 } 10309 // match: (MULQconst [41] x) 10310 // cond: 10311 // result: (LEAQ8 x (LEAQ4 <v.Type> x x)) 10312 for { 10313 if v.AuxInt != 41 { 10314 break 10315 } 10316 x := v.Args[0] 10317 v.reset(OpAMD64LEAQ8) 10318 v.AddArg(x) 10319 v0 := b.NewValue0(v.Line, OpAMD64LEAQ4, v.Type) 10320 v0.AddArg(x) 10321 v0.AddArg(x) 10322 v.AddArg(v0) 10323 return true 10324 } 10325 // match: (MULQconst [73] x) 10326 // cond: 10327 // result: (LEAQ8 x (LEAQ8 <v.Type> x x)) 10328 for { 10329 if v.AuxInt != 73 { 10330 break 10331 } 10332 x := v.Args[0] 10333 v.reset(OpAMD64LEAQ8) 10334 v.AddArg(x) 10335 v0 := b.NewValue0(v.Line, OpAMD64LEAQ8, v.Type) 10336 v0.AddArg(x) 10337 v0.AddArg(x) 10338 v.AddArg(v0) 10339 return true 10340 } 10341 // match: (MULQconst [c] x) 10342 // cond: isPowerOfTwo(c) 10343 // result: (SHLQconst [log2(c)] x) 10344 for { 10345 c := v.AuxInt 10346 x := v.Args[0] 10347 if !(isPowerOfTwo(c)) { 10348 break 10349 } 10350 v.reset(OpAMD64SHLQconst) 10351 v.AuxInt = log2(c) 10352 v.AddArg(x) 10353 return true 10354 } 10355 // match: (MULQconst [c] x) 10356 // cond: isPowerOfTwo(c+1) && c >= 15 10357 // result: (SUBQ (SHLQconst <v.Type> [log2(c+1)] x) x) 10358 for { 10359 c := v.AuxInt 10360 x := v.Args[0] 10361 if !(isPowerOfTwo(c+1) && c >= 15) { 10362 break 10363 } 10364 v.reset(OpAMD64SUBQ) 10365 v0 := b.NewValue0(v.Line, OpAMD64SHLQconst, v.Type) 10366 v0.AuxInt = log2(c + 1) 10367 v0.AddArg(x) 10368 v.AddArg(v0) 10369 v.AddArg(x) 10370 return true 10371 } 10372 // match: (MULQconst [c] x) 10373 // cond: isPowerOfTwo(c-1) && c >= 17 10374 // result: (LEAQ1 (SHLQconst <v.Type> [log2(c-1)] x) x) 10375 for { 10376 c := v.AuxInt 10377 x := v.Args[0] 10378 if !(isPowerOfTwo(c-1) && c >= 17) { 10379 break 10380 } 10381 v.reset(OpAMD64LEAQ1) 10382 v0 := b.NewValue0(v.Line, OpAMD64SHLQconst, v.Type) 10383 v0.AuxInt = log2(c - 1) 10384 v0.AddArg(x) 10385 v.AddArg(v0) 10386 v.AddArg(x) 10387 return true 10388 } 10389 // match: (MULQconst [c] x) 10390 // cond: isPowerOfTwo(c-2) && c >= 34 10391 // result: (LEAQ2 (SHLQconst <v.Type> [log2(c-2)] x) x) 10392 for { 10393 c := v.AuxInt 10394 x := v.Args[0] 10395 if !(isPowerOfTwo(c-2) && c >= 34) { 10396 break 10397 } 10398 v.reset(OpAMD64LEAQ2) 10399 v0 := b.NewValue0(v.Line, OpAMD64SHLQconst, v.Type) 10400 v0.AuxInt = log2(c - 2) 10401 v0.AddArg(x) 10402 v.AddArg(v0) 10403 v.AddArg(x) 10404 return true 10405 } 10406 // match: (MULQconst [c] x) 10407 // cond: isPowerOfTwo(c-4) && c >= 68 10408 // result: (LEAQ4 (SHLQconst <v.Type> [log2(c-4)] x) x) 10409 for { 10410 c := v.AuxInt 10411 x := v.Args[0] 10412 if !(isPowerOfTwo(c-4) && c >= 68) { 10413 break 10414 } 10415 v.reset(OpAMD64LEAQ4) 10416 v0 := b.NewValue0(v.Line, OpAMD64SHLQconst, v.Type) 10417 v0.AuxInt = log2(c - 4) 10418 v0.AddArg(x) 10419 v.AddArg(v0) 10420 v.AddArg(x) 10421 return true 10422 } 10423 // match: (MULQconst [c] x) 10424 // cond: isPowerOfTwo(c-8) && c >= 136 10425 // result: (LEAQ8 (SHLQconst <v.Type> [log2(c-8)] x) x) 10426 for { 10427 c := v.AuxInt 10428 x := v.Args[0] 10429 if !(isPowerOfTwo(c-8) && c >= 136) { 10430 break 10431 } 10432 v.reset(OpAMD64LEAQ8) 10433 v0 := b.NewValue0(v.Line, OpAMD64SHLQconst, v.Type) 10434 v0.AuxInt = log2(c - 8) 10435 v0.AddArg(x) 10436 v.AddArg(v0) 10437 v.AddArg(x) 10438 return true 10439 } 10440 // match: (MULQconst [c] x) 10441 // cond: c%3 == 0 && isPowerOfTwo(c/3) 10442 // result: (SHLQconst [log2(c/3)] (LEAQ2 <v.Type> x x)) 10443 for { 10444 c := v.AuxInt 10445 x := v.Args[0] 10446 if !(c%3 == 0 && isPowerOfTwo(c/3)) { 10447 break 10448 } 10449 v.reset(OpAMD64SHLQconst) 10450 v.AuxInt = log2(c / 3) 10451 v0 := b.NewValue0(v.Line, OpAMD64LEAQ2, v.Type) 10452 v0.AddArg(x) 10453 v0.AddArg(x) 10454 v.AddArg(v0) 10455 return true 10456 } 10457 // match: (MULQconst [c] x) 10458 // cond: c%5 == 0 && isPowerOfTwo(c/5) 10459 // result: (SHLQconst [log2(c/5)] (LEAQ4 <v.Type> x x)) 10460 for { 10461 c := v.AuxInt 10462 x := v.Args[0] 10463 if !(c%5 == 0 && isPowerOfTwo(c/5)) { 10464 break 10465 } 10466 v.reset(OpAMD64SHLQconst) 10467 v.AuxInt = log2(c / 5) 10468 v0 := b.NewValue0(v.Line, OpAMD64LEAQ4, v.Type) 10469 v0.AddArg(x) 10470 v0.AddArg(x) 10471 v.AddArg(v0) 10472 return true 10473 } 10474 // match: (MULQconst [c] x) 10475 // cond: c%9 == 0 && isPowerOfTwo(c/9) 10476 // result: (SHLQconst [log2(c/9)] (LEAQ8 <v.Type> x x)) 10477 for { 10478 c := v.AuxInt 10479 x := v.Args[0] 10480 if !(c%9 == 0 && isPowerOfTwo(c/9)) { 10481 break 10482 } 10483 v.reset(OpAMD64SHLQconst) 10484 v.AuxInt = log2(c / 9) 10485 v0 := b.NewValue0(v.Line, OpAMD64LEAQ8, v.Type) 10486 v0.AddArg(x) 10487 v0.AddArg(x) 10488 v.AddArg(v0) 10489 return true 10490 } 10491 // match: (MULQconst [c] (MOVQconst [d])) 10492 // cond: 10493 // result: (MOVQconst [c*d]) 10494 for { 10495 c := v.AuxInt 10496 v_0 := v.Args[0] 10497 if v_0.Op != OpAMD64MOVQconst { 10498 break 10499 } 10500 d := v_0.AuxInt 10501 v.reset(OpAMD64MOVQconst) 10502 v.AuxInt = c * d 10503 return true 10504 } 10505 return false 10506 } 10507 func rewriteValueAMD64_OpAMD64NEGL(v *Value, config *Config) bool { 10508 b := v.Block 10509 _ = b 10510 // match: (NEGL (MOVLconst [c])) 10511 // cond: 10512 // result: (MOVLconst [int64(int32(-c))]) 10513 for { 10514 v_0 := v.Args[0] 10515 if v_0.Op != OpAMD64MOVLconst { 10516 break 10517 } 10518 c := v_0.AuxInt 10519 v.reset(OpAMD64MOVLconst) 10520 v.AuxInt = int64(int32(-c)) 10521 return true 10522 } 10523 return false 10524 } 10525 func rewriteValueAMD64_OpAMD64NEGQ(v *Value, config *Config) bool { 10526 b := v.Block 10527 _ = b 10528 // match: (NEGQ (MOVQconst [c])) 10529 // cond: 10530 // result: (MOVQconst [-c]) 10531 for { 10532 v_0 := v.Args[0] 10533 if v_0.Op != OpAMD64MOVQconst { 10534 break 10535 } 10536 c := v_0.AuxInt 10537 v.reset(OpAMD64MOVQconst) 10538 v.AuxInt = -c 10539 return true 10540 } 10541 return false 10542 } 10543 func rewriteValueAMD64_OpAMD64NOTL(v *Value, config *Config) bool { 10544 b := v.Block 10545 _ = b 10546 // match: (NOTL (MOVLconst [c])) 10547 // cond: 10548 // result: (MOVLconst [^c]) 10549 for { 10550 v_0 := v.Args[0] 10551 if v_0.Op != OpAMD64MOVLconst { 10552 break 10553 } 10554 c := v_0.AuxInt 10555 v.reset(OpAMD64MOVLconst) 10556 v.AuxInt = ^c 10557 return true 10558 } 10559 return false 10560 } 10561 func rewriteValueAMD64_OpAMD64NOTQ(v *Value, config *Config) bool { 10562 b := v.Block 10563 _ = b 10564 // match: (NOTQ (MOVQconst [c])) 10565 // cond: 10566 // result: (MOVQconst [^c]) 10567 for { 10568 v_0 := v.Args[0] 10569 if v_0.Op != OpAMD64MOVQconst { 10570 break 10571 } 10572 c := v_0.AuxInt 10573 v.reset(OpAMD64MOVQconst) 10574 v.AuxInt = ^c 10575 return true 10576 } 10577 return false 10578 } 10579 func rewriteValueAMD64_OpAMD64ORL(v *Value, config *Config) bool { 10580 b := v.Block 10581 _ = b 10582 // match: (ORL x (MOVLconst [c])) 10583 // cond: 10584 // result: (ORLconst [c] x) 10585 for { 10586 x := v.Args[0] 10587 v_1 := v.Args[1] 10588 if v_1.Op != OpAMD64MOVLconst { 10589 break 10590 } 10591 c := v_1.AuxInt 10592 v.reset(OpAMD64ORLconst) 10593 v.AuxInt = c 10594 v.AddArg(x) 10595 return true 10596 } 10597 // match: (ORL (MOVLconst [c]) x) 10598 // cond: 10599 // result: (ORLconst [c] x) 10600 for { 10601 v_0 := v.Args[0] 10602 if v_0.Op != OpAMD64MOVLconst { 10603 break 10604 } 10605 c := v_0.AuxInt 10606 x := v.Args[1] 10607 v.reset(OpAMD64ORLconst) 10608 v.AuxInt = c 10609 v.AddArg(x) 10610 return true 10611 } 10612 // match: (ORL x x) 10613 // cond: 10614 // result: x 10615 for { 10616 x := v.Args[0] 10617 if x != v.Args[1] { 10618 break 10619 } 10620 v.reset(OpCopy) 10621 v.Type = x.Type 10622 v.AddArg(x) 10623 return true 10624 } 10625 // match: (ORL x0:(MOVBload [i] {s} p mem) s0:(SHLLconst [8] x1:(MOVBload [i+1] {s} p mem))) 10626 // cond: x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) 10627 // result: @mergePoint(b,x0,x1) (MOVWload [i] {s} p mem) 10628 for { 10629 x0 := v.Args[0] 10630 if x0.Op != OpAMD64MOVBload { 10631 break 10632 } 10633 i := x0.AuxInt 10634 s := x0.Aux 10635 p := x0.Args[0] 10636 mem := x0.Args[1] 10637 s0 := v.Args[1] 10638 if s0.Op != OpAMD64SHLLconst { 10639 break 10640 } 10641 if s0.AuxInt != 8 { 10642 break 10643 } 10644 x1 := s0.Args[0] 10645 if x1.Op != OpAMD64MOVBload { 10646 break 10647 } 10648 if x1.AuxInt != i+1 { 10649 break 10650 } 10651 if x1.Aux != s { 10652 break 10653 } 10654 if p != x1.Args[0] { 10655 break 10656 } 10657 if mem != x1.Args[1] { 10658 break 10659 } 10660 if !(x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0)) { 10661 break 10662 } 10663 b = mergePoint(b, x0, x1) 10664 v0 := b.NewValue0(v.Line, OpAMD64MOVWload, config.fe.TypeUInt16()) 10665 v.reset(OpCopy) 10666 v.AddArg(v0) 10667 v0.AuxInt = i 10668 v0.Aux = s 10669 v0.AddArg(p) 10670 v0.AddArg(mem) 10671 return true 10672 } 10673 // match: (ORL o0:(ORL x0:(MOVWload [i] {s} p mem) s0:(SHLLconst [16] x1:(MOVBload [i+2] {s} p mem))) s1:(SHLLconst [24] x2:(MOVBload [i+3] {s} p mem))) 10674 // cond: x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && o0.Uses == 1 && mergePoint(b,x0,x1,x2) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(s0) && clobber(s1) && clobber(o0) 10675 // result: @mergePoint(b,x0,x1,x2) (MOVLload [i] {s} p mem) 10676 for { 10677 o0 := v.Args[0] 10678 if o0.Op != OpAMD64ORL { 10679 break 10680 } 10681 x0 := o0.Args[0] 10682 if x0.Op != OpAMD64MOVWload { 10683 break 10684 } 10685 i := x0.AuxInt 10686 s := x0.Aux 10687 p := x0.Args[0] 10688 mem := x0.Args[1] 10689 s0 := o0.Args[1] 10690 if s0.Op != OpAMD64SHLLconst { 10691 break 10692 } 10693 if s0.AuxInt != 16 { 10694 break 10695 } 10696 x1 := s0.Args[0] 10697 if x1.Op != OpAMD64MOVBload { 10698 break 10699 } 10700 if x1.AuxInt != i+2 { 10701 break 10702 } 10703 if x1.Aux != s { 10704 break 10705 } 10706 if p != x1.Args[0] { 10707 break 10708 } 10709 if mem != x1.Args[1] { 10710 break 10711 } 10712 s1 := v.Args[1] 10713 if s1.Op != OpAMD64SHLLconst { 10714 break 10715 } 10716 if s1.AuxInt != 24 { 10717 break 10718 } 10719 x2 := s1.Args[0] 10720 if x2.Op != OpAMD64MOVBload { 10721 break 10722 } 10723 if x2.AuxInt != i+3 { 10724 break 10725 } 10726 if x2.Aux != s { 10727 break 10728 } 10729 if p != x2.Args[0] { 10730 break 10731 } 10732 if mem != x2.Args[1] { 10733 break 10734 } 10735 if !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && o0.Uses == 1 && mergePoint(b, x0, x1, x2) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(s0) && clobber(s1) && clobber(o0)) { 10736 break 10737 } 10738 b = mergePoint(b, x0, x1, x2) 10739 v0 := b.NewValue0(v.Line, OpAMD64MOVLload, config.fe.TypeUInt32()) 10740 v.reset(OpCopy) 10741 v.AddArg(v0) 10742 v0.AuxInt = i 10743 v0.Aux = s 10744 v0.AddArg(p) 10745 v0.AddArg(mem) 10746 return true 10747 } 10748 // match: (ORL x0:(MOVBloadidx1 [i] {s} p idx mem) s0:(SHLLconst [8] x1:(MOVBloadidx1 [i+1] {s} p idx mem))) 10749 // cond: x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) 10750 // result: @mergePoint(b,x0,x1) (MOVWloadidx1 <v.Type> [i] {s} p idx mem) 10751 for { 10752 x0 := v.Args[0] 10753 if x0.Op != OpAMD64MOVBloadidx1 { 10754 break 10755 } 10756 i := x0.AuxInt 10757 s := x0.Aux 10758 p := x0.Args[0] 10759 idx := x0.Args[1] 10760 mem := x0.Args[2] 10761 s0 := v.Args[1] 10762 if s0.Op != OpAMD64SHLLconst { 10763 break 10764 } 10765 if s0.AuxInt != 8 { 10766 break 10767 } 10768 x1 := s0.Args[0] 10769 if x1.Op != OpAMD64MOVBloadidx1 { 10770 break 10771 } 10772 if x1.AuxInt != i+1 { 10773 break 10774 } 10775 if x1.Aux != s { 10776 break 10777 } 10778 if p != x1.Args[0] { 10779 break 10780 } 10781 if idx != x1.Args[1] { 10782 break 10783 } 10784 if mem != x1.Args[2] { 10785 break 10786 } 10787 if !(x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0)) { 10788 break 10789 } 10790 b = mergePoint(b, x0, x1) 10791 v0 := b.NewValue0(v.Line, OpAMD64MOVWloadidx1, v.Type) 10792 v.reset(OpCopy) 10793 v.AddArg(v0) 10794 v0.AuxInt = i 10795 v0.Aux = s 10796 v0.AddArg(p) 10797 v0.AddArg(idx) 10798 v0.AddArg(mem) 10799 return true 10800 } 10801 // match: (ORL o0:(ORL x0:(MOVWloadidx1 [i] {s} p idx mem) s0:(SHLLconst [16] x1:(MOVBloadidx1 [i+2] {s} p idx mem))) s1:(SHLLconst [24] x2:(MOVBloadidx1 [i+3] {s} p idx mem))) 10802 // cond: x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && o0.Uses == 1 && mergePoint(b,x0,x1,x2) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(s0) && clobber(s1) && clobber(o0) 10803 // result: @mergePoint(b,x0,x1,x2) (MOVLloadidx1 <v.Type> [i] {s} p idx mem) 10804 for { 10805 o0 := v.Args[0] 10806 if o0.Op != OpAMD64ORL { 10807 break 10808 } 10809 x0 := o0.Args[0] 10810 if x0.Op != OpAMD64MOVWloadidx1 { 10811 break 10812 } 10813 i := x0.AuxInt 10814 s := x0.Aux 10815 p := x0.Args[0] 10816 idx := x0.Args[1] 10817 mem := x0.Args[2] 10818 s0 := o0.Args[1] 10819 if s0.Op != OpAMD64SHLLconst { 10820 break 10821 } 10822 if s0.AuxInt != 16 { 10823 break 10824 } 10825 x1 := s0.Args[0] 10826 if x1.Op != OpAMD64MOVBloadidx1 { 10827 break 10828 } 10829 if x1.AuxInt != i+2 { 10830 break 10831 } 10832 if x1.Aux != s { 10833 break 10834 } 10835 if p != x1.Args[0] { 10836 break 10837 } 10838 if idx != x1.Args[1] { 10839 break 10840 } 10841 if mem != x1.Args[2] { 10842 break 10843 } 10844 s1 := v.Args[1] 10845 if s1.Op != OpAMD64SHLLconst { 10846 break 10847 } 10848 if s1.AuxInt != 24 { 10849 break 10850 } 10851 x2 := s1.Args[0] 10852 if x2.Op != OpAMD64MOVBloadidx1 { 10853 break 10854 } 10855 if x2.AuxInt != i+3 { 10856 break 10857 } 10858 if x2.Aux != s { 10859 break 10860 } 10861 if p != x2.Args[0] { 10862 break 10863 } 10864 if idx != x2.Args[1] { 10865 break 10866 } 10867 if mem != x2.Args[2] { 10868 break 10869 } 10870 if !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && o0.Uses == 1 && mergePoint(b, x0, x1, x2) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(s0) && clobber(s1) && clobber(o0)) { 10871 break 10872 } 10873 b = mergePoint(b, x0, x1, x2) 10874 v0 := b.NewValue0(v.Line, OpAMD64MOVLloadidx1, v.Type) 10875 v.reset(OpCopy) 10876 v.AddArg(v0) 10877 v0.AuxInt = i 10878 v0.Aux = s 10879 v0.AddArg(p) 10880 v0.AddArg(idx) 10881 v0.AddArg(mem) 10882 return true 10883 } 10884 return false 10885 } 10886 func rewriteValueAMD64_OpAMD64ORLconst(v *Value, config *Config) bool { 10887 b := v.Block 10888 _ = b 10889 // match: (ORLconst [c] x) 10890 // cond: int32(c)==0 10891 // result: x 10892 for { 10893 c := v.AuxInt 10894 x := v.Args[0] 10895 if !(int32(c) == 0) { 10896 break 10897 } 10898 v.reset(OpCopy) 10899 v.Type = x.Type 10900 v.AddArg(x) 10901 return true 10902 } 10903 // match: (ORLconst [c] _) 10904 // cond: int32(c)==-1 10905 // result: (MOVLconst [-1]) 10906 for { 10907 c := v.AuxInt 10908 if !(int32(c) == -1) { 10909 break 10910 } 10911 v.reset(OpAMD64MOVLconst) 10912 v.AuxInt = -1 10913 return true 10914 } 10915 // match: (ORLconst [c] (MOVLconst [d])) 10916 // cond: 10917 // result: (MOVLconst [c|d]) 10918 for { 10919 c := v.AuxInt 10920 v_0 := v.Args[0] 10921 if v_0.Op != OpAMD64MOVLconst { 10922 break 10923 } 10924 d := v_0.AuxInt 10925 v.reset(OpAMD64MOVLconst) 10926 v.AuxInt = c | d 10927 return true 10928 } 10929 return false 10930 } 10931 func rewriteValueAMD64_OpAMD64ORQ(v *Value, config *Config) bool { 10932 b := v.Block 10933 _ = b 10934 // match: (ORQ x (MOVQconst [c])) 10935 // cond: is32Bit(c) 10936 // result: (ORQconst [c] x) 10937 for { 10938 x := v.Args[0] 10939 v_1 := v.Args[1] 10940 if v_1.Op != OpAMD64MOVQconst { 10941 break 10942 } 10943 c := v_1.AuxInt 10944 if !(is32Bit(c)) { 10945 break 10946 } 10947 v.reset(OpAMD64ORQconst) 10948 v.AuxInt = c 10949 v.AddArg(x) 10950 return true 10951 } 10952 // match: (ORQ (MOVQconst [c]) x) 10953 // cond: is32Bit(c) 10954 // result: (ORQconst [c] x) 10955 for { 10956 v_0 := v.Args[0] 10957 if v_0.Op != OpAMD64MOVQconst { 10958 break 10959 } 10960 c := v_0.AuxInt 10961 x := v.Args[1] 10962 if !(is32Bit(c)) { 10963 break 10964 } 10965 v.reset(OpAMD64ORQconst) 10966 v.AuxInt = c 10967 v.AddArg(x) 10968 return true 10969 } 10970 // match: (ORQ x x) 10971 // cond: 10972 // result: x 10973 for { 10974 x := v.Args[0] 10975 if x != v.Args[1] { 10976 break 10977 } 10978 v.reset(OpCopy) 10979 v.Type = x.Type 10980 v.AddArg(x) 10981 return true 10982 } 10983 // match: (ORQ o0:(ORQ o1:(ORQ o2:(ORQ o3:(ORQ o4:(ORQ o5:(ORQ x0:(MOVBload [i] {s} p mem) s0:(SHLQconst [8] x1:(MOVBload [i+1] {s} p mem))) s1:(SHLQconst [16] x2:(MOVBload [i+2] {s} p mem))) s2:(SHLQconst [24] x3:(MOVBload [i+3] {s} p mem))) s3:(SHLQconst [32] x4:(MOVBload [i+4] {s} p mem))) s4:(SHLQconst [40] x5:(MOVBload [i+5] {s} p mem))) s5:(SHLQconst [48] x6:(MOVBload [i+6] {s} p mem))) s6:(SHLQconst [56] x7:(MOVBload [i+7] {s} p mem))) 10984 // cond: x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && x7.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && s2.Uses == 1 && s3.Uses == 1 && s4.Uses == 1 && s5.Uses == 1 && s6.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1 && o3.Uses == 1 && o4.Uses == 1 && o5.Uses == 1 && mergePoint(b,x0,x1,x2,x3,x4,x5,x6,x7) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) && clobber(x7) && clobber(s0) && clobber(s1) && clobber(s2) && clobber(s3) && clobber(s4) && clobber(s5) && clobber(s6) && clobber(o0) && clobber(o1) && clobber(o2) && clobber(o3) && clobber(o4) && clobber(o5) 10985 // result: @mergePoint(b,x0,x1,x2,x3,x4,x5,x6,x7) (MOVQload [i] {s} p mem) 10986 for { 10987 o0 := v.Args[0] 10988 if o0.Op != OpAMD64ORQ { 10989 break 10990 } 10991 o1 := o0.Args[0] 10992 if o1.Op != OpAMD64ORQ { 10993 break 10994 } 10995 o2 := o1.Args[0] 10996 if o2.Op != OpAMD64ORQ { 10997 break 10998 } 10999 o3 := o2.Args[0] 11000 if o3.Op != OpAMD64ORQ { 11001 break 11002 } 11003 o4 := o3.Args[0] 11004 if o4.Op != OpAMD64ORQ { 11005 break 11006 } 11007 o5 := o4.Args[0] 11008 if o5.Op != OpAMD64ORQ { 11009 break 11010 } 11011 x0 := o5.Args[0] 11012 if x0.Op != OpAMD64MOVBload { 11013 break 11014 } 11015 i := x0.AuxInt 11016 s := x0.Aux 11017 p := x0.Args[0] 11018 mem := x0.Args[1] 11019 s0 := o5.Args[1] 11020 if s0.Op != OpAMD64SHLQconst { 11021 break 11022 } 11023 if s0.AuxInt != 8 { 11024 break 11025 } 11026 x1 := s0.Args[0] 11027 if x1.Op != OpAMD64MOVBload { 11028 break 11029 } 11030 if x1.AuxInt != i+1 { 11031 break 11032 } 11033 if x1.Aux != s { 11034 break 11035 } 11036 if p != x1.Args[0] { 11037 break 11038 } 11039 if mem != x1.Args[1] { 11040 break 11041 } 11042 s1 := o4.Args[1] 11043 if s1.Op != OpAMD64SHLQconst { 11044 break 11045 } 11046 if s1.AuxInt != 16 { 11047 break 11048 } 11049 x2 := s1.Args[0] 11050 if x2.Op != OpAMD64MOVBload { 11051 break 11052 } 11053 if x2.AuxInt != i+2 { 11054 break 11055 } 11056 if x2.Aux != s { 11057 break 11058 } 11059 if p != x2.Args[0] { 11060 break 11061 } 11062 if mem != x2.Args[1] { 11063 break 11064 } 11065 s2 := o3.Args[1] 11066 if s2.Op != OpAMD64SHLQconst { 11067 break 11068 } 11069 if s2.AuxInt != 24 { 11070 break 11071 } 11072 x3 := s2.Args[0] 11073 if x3.Op != OpAMD64MOVBload { 11074 break 11075 } 11076 if x3.AuxInt != i+3 { 11077 break 11078 } 11079 if x3.Aux != s { 11080 break 11081 } 11082 if p != x3.Args[0] { 11083 break 11084 } 11085 if mem != x3.Args[1] { 11086 break 11087 } 11088 s3 := o2.Args[1] 11089 if s3.Op != OpAMD64SHLQconst { 11090 break 11091 } 11092 if s3.AuxInt != 32 { 11093 break 11094 } 11095 x4 := s3.Args[0] 11096 if x4.Op != OpAMD64MOVBload { 11097 break 11098 } 11099 if x4.AuxInt != i+4 { 11100 break 11101 } 11102 if x4.Aux != s { 11103 break 11104 } 11105 if p != x4.Args[0] { 11106 break 11107 } 11108 if mem != x4.Args[1] { 11109 break 11110 } 11111 s4 := o1.Args[1] 11112 if s4.Op != OpAMD64SHLQconst { 11113 break 11114 } 11115 if s4.AuxInt != 40 { 11116 break 11117 } 11118 x5 := s4.Args[0] 11119 if x5.Op != OpAMD64MOVBload { 11120 break 11121 } 11122 if x5.AuxInt != i+5 { 11123 break 11124 } 11125 if x5.Aux != s { 11126 break 11127 } 11128 if p != x5.Args[0] { 11129 break 11130 } 11131 if mem != x5.Args[1] { 11132 break 11133 } 11134 s5 := o0.Args[1] 11135 if s5.Op != OpAMD64SHLQconst { 11136 break 11137 } 11138 if s5.AuxInt != 48 { 11139 break 11140 } 11141 x6 := s5.Args[0] 11142 if x6.Op != OpAMD64MOVBload { 11143 break 11144 } 11145 if x6.AuxInt != i+6 { 11146 break 11147 } 11148 if x6.Aux != s { 11149 break 11150 } 11151 if p != x6.Args[0] { 11152 break 11153 } 11154 if mem != x6.Args[1] { 11155 break 11156 } 11157 s6 := v.Args[1] 11158 if s6.Op != OpAMD64SHLQconst { 11159 break 11160 } 11161 if s6.AuxInt != 56 { 11162 break 11163 } 11164 x7 := s6.Args[0] 11165 if x7.Op != OpAMD64MOVBload { 11166 break 11167 } 11168 if x7.AuxInt != i+7 { 11169 break 11170 } 11171 if x7.Aux != s { 11172 break 11173 } 11174 if p != x7.Args[0] { 11175 break 11176 } 11177 if mem != x7.Args[1] { 11178 break 11179 } 11180 if !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && x7.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && s2.Uses == 1 && s3.Uses == 1 && s4.Uses == 1 && s5.Uses == 1 && s6.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1 && o3.Uses == 1 && o4.Uses == 1 && o5.Uses == 1 && mergePoint(b, x0, x1, x2, x3, x4, x5, x6, x7) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) && clobber(x7) && clobber(s0) && clobber(s1) && clobber(s2) && clobber(s3) && clobber(s4) && clobber(s5) && clobber(s6) && clobber(o0) && clobber(o1) && clobber(o2) && clobber(o3) && clobber(o4) && clobber(o5)) { 11181 break 11182 } 11183 b = mergePoint(b, x0, x1, x2, x3, x4, x5, x6, x7) 11184 v0 := b.NewValue0(v.Line, OpAMD64MOVQload, config.fe.TypeUInt64()) 11185 v.reset(OpCopy) 11186 v.AddArg(v0) 11187 v0.AuxInt = i 11188 v0.Aux = s 11189 v0.AddArg(p) 11190 v0.AddArg(mem) 11191 return true 11192 } 11193 // match: (ORQ o0:(ORQ o1:(ORQ o2:(ORQ o3:(ORQ o4:(ORQ o5:(ORQ x0:(MOVBloadidx1 [i] {s} p idx mem) s0:(SHLQconst [8] x1:(MOVBloadidx1 [i+1] {s} p idx mem))) s1:(SHLQconst [16] x2:(MOVBloadidx1 [i+2] {s} p idx mem))) s2:(SHLQconst [24] x3:(MOVBloadidx1 [i+3] {s} p idx mem))) s3:(SHLQconst [32] x4:(MOVBloadidx1 [i+4] {s} p idx mem))) s4:(SHLQconst [40] x5:(MOVBloadidx1 [i+5] {s} p idx mem))) s5:(SHLQconst [48] x6:(MOVBloadidx1 [i+6] {s} p idx mem))) s6:(SHLQconst [56] x7:(MOVBloadidx1 [i+7] {s} p idx mem))) 11194 // cond: x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && x7.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && s2.Uses == 1 && s3.Uses == 1 && s4.Uses == 1 && s5.Uses == 1 && s6.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1 && o3.Uses == 1 && o4.Uses == 1 && o5.Uses == 1 && mergePoint(b,x0,x1,x2,x3,x4,x5,x6,x7) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) && clobber(x7) && clobber(s0) && clobber(s1) && clobber(s2) && clobber(s3) && clobber(s4) && clobber(s5) && clobber(s6) && clobber(o0) && clobber(o1) && clobber(o2) && clobber(o3) && clobber(o4) && clobber(o5) 11195 // result: @mergePoint(b,x0,x1,x2,x3,x4,x5,x6,x7) (MOVQloadidx1 <v.Type> [i] {s} p idx mem) 11196 for { 11197 o0 := v.Args[0] 11198 if o0.Op != OpAMD64ORQ { 11199 break 11200 } 11201 o1 := o0.Args[0] 11202 if o1.Op != OpAMD64ORQ { 11203 break 11204 } 11205 o2 := o1.Args[0] 11206 if o2.Op != OpAMD64ORQ { 11207 break 11208 } 11209 o3 := o2.Args[0] 11210 if o3.Op != OpAMD64ORQ { 11211 break 11212 } 11213 o4 := o3.Args[0] 11214 if o4.Op != OpAMD64ORQ { 11215 break 11216 } 11217 o5 := o4.Args[0] 11218 if o5.Op != OpAMD64ORQ { 11219 break 11220 } 11221 x0 := o5.Args[0] 11222 if x0.Op != OpAMD64MOVBloadidx1 { 11223 break 11224 } 11225 i := x0.AuxInt 11226 s := x0.Aux 11227 p := x0.Args[0] 11228 idx := x0.Args[1] 11229 mem := x0.Args[2] 11230 s0 := o5.Args[1] 11231 if s0.Op != OpAMD64SHLQconst { 11232 break 11233 } 11234 if s0.AuxInt != 8 { 11235 break 11236 } 11237 x1 := s0.Args[0] 11238 if x1.Op != OpAMD64MOVBloadidx1 { 11239 break 11240 } 11241 if x1.AuxInt != i+1 { 11242 break 11243 } 11244 if x1.Aux != s { 11245 break 11246 } 11247 if p != x1.Args[0] { 11248 break 11249 } 11250 if idx != x1.Args[1] { 11251 break 11252 } 11253 if mem != x1.Args[2] { 11254 break 11255 } 11256 s1 := o4.Args[1] 11257 if s1.Op != OpAMD64SHLQconst { 11258 break 11259 } 11260 if s1.AuxInt != 16 { 11261 break 11262 } 11263 x2 := s1.Args[0] 11264 if x2.Op != OpAMD64MOVBloadidx1 { 11265 break 11266 } 11267 if x2.AuxInt != i+2 { 11268 break 11269 } 11270 if x2.Aux != s { 11271 break 11272 } 11273 if p != x2.Args[0] { 11274 break 11275 } 11276 if idx != x2.Args[1] { 11277 break 11278 } 11279 if mem != x2.Args[2] { 11280 break 11281 } 11282 s2 := o3.Args[1] 11283 if s2.Op != OpAMD64SHLQconst { 11284 break 11285 } 11286 if s2.AuxInt != 24 { 11287 break 11288 } 11289 x3 := s2.Args[0] 11290 if x3.Op != OpAMD64MOVBloadidx1 { 11291 break 11292 } 11293 if x3.AuxInt != i+3 { 11294 break 11295 } 11296 if x3.Aux != s { 11297 break 11298 } 11299 if p != x3.Args[0] { 11300 break 11301 } 11302 if idx != x3.Args[1] { 11303 break 11304 } 11305 if mem != x3.Args[2] { 11306 break 11307 } 11308 s3 := o2.Args[1] 11309 if s3.Op != OpAMD64SHLQconst { 11310 break 11311 } 11312 if s3.AuxInt != 32 { 11313 break 11314 } 11315 x4 := s3.Args[0] 11316 if x4.Op != OpAMD64MOVBloadidx1 { 11317 break 11318 } 11319 if x4.AuxInt != i+4 { 11320 break 11321 } 11322 if x4.Aux != s { 11323 break 11324 } 11325 if p != x4.Args[0] { 11326 break 11327 } 11328 if idx != x4.Args[1] { 11329 break 11330 } 11331 if mem != x4.Args[2] { 11332 break 11333 } 11334 s4 := o1.Args[1] 11335 if s4.Op != OpAMD64SHLQconst { 11336 break 11337 } 11338 if s4.AuxInt != 40 { 11339 break 11340 } 11341 x5 := s4.Args[0] 11342 if x5.Op != OpAMD64MOVBloadidx1 { 11343 break 11344 } 11345 if x5.AuxInt != i+5 { 11346 break 11347 } 11348 if x5.Aux != s { 11349 break 11350 } 11351 if p != x5.Args[0] { 11352 break 11353 } 11354 if idx != x5.Args[1] { 11355 break 11356 } 11357 if mem != x5.Args[2] { 11358 break 11359 } 11360 s5 := o0.Args[1] 11361 if s5.Op != OpAMD64SHLQconst { 11362 break 11363 } 11364 if s5.AuxInt != 48 { 11365 break 11366 } 11367 x6 := s5.Args[0] 11368 if x6.Op != OpAMD64MOVBloadidx1 { 11369 break 11370 } 11371 if x6.AuxInt != i+6 { 11372 break 11373 } 11374 if x6.Aux != s { 11375 break 11376 } 11377 if p != x6.Args[0] { 11378 break 11379 } 11380 if idx != x6.Args[1] { 11381 break 11382 } 11383 if mem != x6.Args[2] { 11384 break 11385 } 11386 s6 := v.Args[1] 11387 if s6.Op != OpAMD64SHLQconst { 11388 break 11389 } 11390 if s6.AuxInt != 56 { 11391 break 11392 } 11393 x7 := s6.Args[0] 11394 if x7.Op != OpAMD64MOVBloadidx1 { 11395 break 11396 } 11397 if x7.AuxInt != i+7 { 11398 break 11399 } 11400 if x7.Aux != s { 11401 break 11402 } 11403 if p != x7.Args[0] { 11404 break 11405 } 11406 if idx != x7.Args[1] { 11407 break 11408 } 11409 if mem != x7.Args[2] { 11410 break 11411 } 11412 if !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && x7.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && s2.Uses == 1 && s3.Uses == 1 && s4.Uses == 1 && s5.Uses == 1 && s6.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1 && o3.Uses == 1 && o4.Uses == 1 && o5.Uses == 1 && mergePoint(b, x0, x1, x2, x3, x4, x5, x6, x7) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) && clobber(x7) && clobber(s0) && clobber(s1) && clobber(s2) && clobber(s3) && clobber(s4) && clobber(s5) && clobber(s6) && clobber(o0) && clobber(o1) && clobber(o2) && clobber(o3) && clobber(o4) && clobber(o5)) { 11413 break 11414 } 11415 b = mergePoint(b, x0, x1, x2, x3, x4, x5, x6, x7) 11416 v0 := b.NewValue0(v.Line, OpAMD64MOVQloadidx1, v.Type) 11417 v.reset(OpCopy) 11418 v.AddArg(v0) 11419 v0.AuxInt = i 11420 v0.Aux = s 11421 v0.AddArg(p) 11422 v0.AddArg(idx) 11423 v0.AddArg(mem) 11424 return true 11425 } 11426 return false 11427 } 11428 func rewriteValueAMD64_OpAMD64ORQconst(v *Value, config *Config) bool { 11429 b := v.Block 11430 _ = b 11431 // match: (ORQconst [0] x) 11432 // cond: 11433 // result: x 11434 for { 11435 if v.AuxInt != 0 { 11436 break 11437 } 11438 x := v.Args[0] 11439 v.reset(OpCopy) 11440 v.Type = x.Type 11441 v.AddArg(x) 11442 return true 11443 } 11444 // match: (ORQconst [-1] _) 11445 // cond: 11446 // result: (MOVQconst [-1]) 11447 for { 11448 if v.AuxInt != -1 { 11449 break 11450 } 11451 v.reset(OpAMD64MOVQconst) 11452 v.AuxInt = -1 11453 return true 11454 } 11455 // match: (ORQconst [c] (MOVQconst [d])) 11456 // cond: 11457 // result: (MOVQconst [c|d]) 11458 for { 11459 c := v.AuxInt 11460 v_0 := v.Args[0] 11461 if v_0.Op != OpAMD64MOVQconst { 11462 break 11463 } 11464 d := v_0.AuxInt 11465 v.reset(OpAMD64MOVQconst) 11466 v.AuxInt = c | d 11467 return true 11468 } 11469 return false 11470 } 11471 func rewriteValueAMD64_OpAMD64ROLBconst(v *Value, config *Config) bool { 11472 b := v.Block 11473 _ = b 11474 // match: (ROLBconst [c] (ROLBconst [d] x)) 11475 // cond: 11476 // result: (ROLBconst [(c+d)& 7] x) 11477 for { 11478 c := v.AuxInt 11479 v_0 := v.Args[0] 11480 if v_0.Op != OpAMD64ROLBconst { 11481 break 11482 } 11483 d := v_0.AuxInt 11484 x := v_0.Args[0] 11485 v.reset(OpAMD64ROLBconst) 11486 v.AuxInt = (c + d) & 7 11487 v.AddArg(x) 11488 return true 11489 } 11490 // match: (ROLBconst [0] x) 11491 // cond: 11492 // result: x 11493 for { 11494 if v.AuxInt != 0 { 11495 break 11496 } 11497 x := v.Args[0] 11498 v.reset(OpCopy) 11499 v.Type = x.Type 11500 v.AddArg(x) 11501 return true 11502 } 11503 return false 11504 } 11505 func rewriteValueAMD64_OpAMD64ROLLconst(v *Value, config *Config) bool { 11506 b := v.Block 11507 _ = b 11508 // match: (ROLLconst [c] (ROLLconst [d] x)) 11509 // cond: 11510 // result: (ROLLconst [(c+d)&31] x) 11511 for { 11512 c := v.AuxInt 11513 v_0 := v.Args[0] 11514 if v_0.Op != OpAMD64ROLLconst { 11515 break 11516 } 11517 d := v_0.AuxInt 11518 x := v_0.Args[0] 11519 v.reset(OpAMD64ROLLconst) 11520 v.AuxInt = (c + d) & 31 11521 v.AddArg(x) 11522 return true 11523 } 11524 // match: (ROLLconst [0] x) 11525 // cond: 11526 // result: x 11527 for { 11528 if v.AuxInt != 0 { 11529 break 11530 } 11531 x := v.Args[0] 11532 v.reset(OpCopy) 11533 v.Type = x.Type 11534 v.AddArg(x) 11535 return true 11536 } 11537 return false 11538 } 11539 func rewriteValueAMD64_OpAMD64ROLQconst(v *Value, config *Config) bool { 11540 b := v.Block 11541 _ = b 11542 // match: (ROLQconst [c] (ROLQconst [d] x)) 11543 // cond: 11544 // result: (ROLQconst [(c+d)&63] x) 11545 for { 11546 c := v.AuxInt 11547 v_0 := v.Args[0] 11548 if v_0.Op != OpAMD64ROLQconst { 11549 break 11550 } 11551 d := v_0.AuxInt 11552 x := v_0.Args[0] 11553 v.reset(OpAMD64ROLQconst) 11554 v.AuxInt = (c + d) & 63 11555 v.AddArg(x) 11556 return true 11557 } 11558 // match: (ROLQconst [0] x) 11559 // cond: 11560 // result: x 11561 for { 11562 if v.AuxInt != 0 { 11563 break 11564 } 11565 x := v.Args[0] 11566 v.reset(OpCopy) 11567 v.Type = x.Type 11568 v.AddArg(x) 11569 return true 11570 } 11571 return false 11572 } 11573 func rewriteValueAMD64_OpAMD64ROLWconst(v *Value, config *Config) bool { 11574 b := v.Block 11575 _ = b 11576 // match: (ROLWconst [c] (ROLWconst [d] x)) 11577 // cond: 11578 // result: (ROLWconst [(c+d)&15] x) 11579 for { 11580 c := v.AuxInt 11581 v_0 := v.Args[0] 11582 if v_0.Op != OpAMD64ROLWconst { 11583 break 11584 } 11585 d := v_0.AuxInt 11586 x := v_0.Args[0] 11587 v.reset(OpAMD64ROLWconst) 11588 v.AuxInt = (c + d) & 15 11589 v.AddArg(x) 11590 return true 11591 } 11592 // match: (ROLWconst [0] x) 11593 // cond: 11594 // result: x 11595 for { 11596 if v.AuxInt != 0 { 11597 break 11598 } 11599 x := v.Args[0] 11600 v.reset(OpCopy) 11601 v.Type = x.Type 11602 v.AddArg(x) 11603 return true 11604 } 11605 return false 11606 } 11607 func rewriteValueAMD64_OpAMD64SARB(v *Value, config *Config) bool { 11608 b := v.Block 11609 _ = b 11610 // match: (SARB x (MOVQconst [c])) 11611 // cond: 11612 // result: (SARBconst [c&31] x) 11613 for { 11614 x := v.Args[0] 11615 v_1 := v.Args[1] 11616 if v_1.Op != OpAMD64MOVQconst { 11617 break 11618 } 11619 c := v_1.AuxInt 11620 v.reset(OpAMD64SARBconst) 11621 v.AuxInt = c & 31 11622 v.AddArg(x) 11623 return true 11624 } 11625 // match: (SARB x (MOVLconst [c])) 11626 // cond: 11627 // result: (SARBconst [c&31] x) 11628 for { 11629 x := v.Args[0] 11630 v_1 := v.Args[1] 11631 if v_1.Op != OpAMD64MOVLconst { 11632 break 11633 } 11634 c := v_1.AuxInt 11635 v.reset(OpAMD64SARBconst) 11636 v.AuxInt = c & 31 11637 v.AddArg(x) 11638 return true 11639 } 11640 return false 11641 } 11642 func rewriteValueAMD64_OpAMD64SARBconst(v *Value, config *Config) bool { 11643 b := v.Block 11644 _ = b 11645 // match: (SARBconst [c] (MOVQconst [d])) 11646 // cond: 11647 // result: (MOVQconst [d>>uint64(c)]) 11648 for { 11649 c := v.AuxInt 11650 v_0 := v.Args[0] 11651 if v_0.Op != OpAMD64MOVQconst { 11652 break 11653 } 11654 d := v_0.AuxInt 11655 v.reset(OpAMD64MOVQconst) 11656 v.AuxInt = d >> uint64(c) 11657 return true 11658 } 11659 return false 11660 } 11661 func rewriteValueAMD64_OpAMD64SARL(v *Value, config *Config) bool { 11662 b := v.Block 11663 _ = b 11664 // match: (SARL x (MOVQconst [c])) 11665 // cond: 11666 // result: (SARLconst [c&31] x) 11667 for { 11668 x := v.Args[0] 11669 v_1 := v.Args[1] 11670 if v_1.Op != OpAMD64MOVQconst { 11671 break 11672 } 11673 c := v_1.AuxInt 11674 v.reset(OpAMD64SARLconst) 11675 v.AuxInt = c & 31 11676 v.AddArg(x) 11677 return true 11678 } 11679 // match: (SARL x (MOVLconst [c])) 11680 // cond: 11681 // result: (SARLconst [c&31] x) 11682 for { 11683 x := v.Args[0] 11684 v_1 := v.Args[1] 11685 if v_1.Op != OpAMD64MOVLconst { 11686 break 11687 } 11688 c := v_1.AuxInt 11689 v.reset(OpAMD64SARLconst) 11690 v.AuxInt = c & 31 11691 v.AddArg(x) 11692 return true 11693 } 11694 // match: (SARL x (ANDLconst [31] y)) 11695 // cond: 11696 // result: (SARL x y) 11697 for { 11698 x := v.Args[0] 11699 v_1 := v.Args[1] 11700 if v_1.Op != OpAMD64ANDLconst { 11701 break 11702 } 11703 if v_1.AuxInt != 31 { 11704 break 11705 } 11706 y := v_1.Args[0] 11707 v.reset(OpAMD64SARL) 11708 v.AddArg(x) 11709 v.AddArg(y) 11710 return true 11711 } 11712 return false 11713 } 11714 func rewriteValueAMD64_OpAMD64SARLconst(v *Value, config *Config) bool { 11715 b := v.Block 11716 _ = b 11717 // match: (SARLconst [c] (MOVQconst [d])) 11718 // cond: 11719 // result: (MOVQconst [d>>uint64(c)]) 11720 for { 11721 c := v.AuxInt 11722 v_0 := v.Args[0] 11723 if v_0.Op != OpAMD64MOVQconst { 11724 break 11725 } 11726 d := v_0.AuxInt 11727 v.reset(OpAMD64MOVQconst) 11728 v.AuxInt = d >> uint64(c) 11729 return true 11730 } 11731 return false 11732 } 11733 func rewriteValueAMD64_OpAMD64SARQ(v *Value, config *Config) bool { 11734 b := v.Block 11735 _ = b 11736 // match: (SARQ x (MOVQconst [c])) 11737 // cond: 11738 // result: (SARQconst [c&63] x) 11739 for { 11740 x := v.Args[0] 11741 v_1 := v.Args[1] 11742 if v_1.Op != OpAMD64MOVQconst { 11743 break 11744 } 11745 c := v_1.AuxInt 11746 v.reset(OpAMD64SARQconst) 11747 v.AuxInt = c & 63 11748 v.AddArg(x) 11749 return true 11750 } 11751 // match: (SARQ x (MOVLconst [c])) 11752 // cond: 11753 // result: (SARQconst [c&63] x) 11754 for { 11755 x := v.Args[0] 11756 v_1 := v.Args[1] 11757 if v_1.Op != OpAMD64MOVLconst { 11758 break 11759 } 11760 c := v_1.AuxInt 11761 v.reset(OpAMD64SARQconst) 11762 v.AuxInt = c & 63 11763 v.AddArg(x) 11764 return true 11765 } 11766 // match: (SARQ x (ANDQconst [63] y)) 11767 // cond: 11768 // result: (SARQ x y) 11769 for { 11770 x := v.Args[0] 11771 v_1 := v.Args[1] 11772 if v_1.Op != OpAMD64ANDQconst { 11773 break 11774 } 11775 if v_1.AuxInt != 63 { 11776 break 11777 } 11778 y := v_1.Args[0] 11779 v.reset(OpAMD64SARQ) 11780 v.AddArg(x) 11781 v.AddArg(y) 11782 return true 11783 } 11784 return false 11785 } 11786 func rewriteValueAMD64_OpAMD64SARQconst(v *Value, config *Config) bool { 11787 b := v.Block 11788 _ = b 11789 // match: (SARQconst [c] (MOVQconst [d])) 11790 // cond: 11791 // result: (MOVQconst [d>>uint64(c)]) 11792 for { 11793 c := v.AuxInt 11794 v_0 := v.Args[0] 11795 if v_0.Op != OpAMD64MOVQconst { 11796 break 11797 } 11798 d := v_0.AuxInt 11799 v.reset(OpAMD64MOVQconst) 11800 v.AuxInt = d >> uint64(c) 11801 return true 11802 } 11803 return false 11804 } 11805 func rewriteValueAMD64_OpAMD64SARW(v *Value, config *Config) bool { 11806 b := v.Block 11807 _ = b 11808 // match: (SARW x (MOVQconst [c])) 11809 // cond: 11810 // result: (SARWconst [c&31] x) 11811 for { 11812 x := v.Args[0] 11813 v_1 := v.Args[1] 11814 if v_1.Op != OpAMD64MOVQconst { 11815 break 11816 } 11817 c := v_1.AuxInt 11818 v.reset(OpAMD64SARWconst) 11819 v.AuxInt = c & 31 11820 v.AddArg(x) 11821 return true 11822 } 11823 // match: (SARW x (MOVLconst [c])) 11824 // cond: 11825 // result: (SARWconst [c&31] x) 11826 for { 11827 x := v.Args[0] 11828 v_1 := v.Args[1] 11829 if v_1.Op != OpAMD64MOVLconst { 11830 break 11831 } 11832 c := v_1.AuxInt 11833 v.reset(OpAMD64SARWconst) 11834 v.AuxInt = c & 31 11835 v.AddArg(x) 11836 return true 11837 } 11838 return false 11839 } 11840 func rewriteValueAMD64_OpAMD64SARWconst(v *Value, config *Config) bool { 11841 b := v.Block 11842 _ = b 11843 // match: (SARWconst [c] (MOVQconst [d])) 11844 // cond: 11845 // result: (MOVQconst [d>>uint64(c)]) 11846 for { 11847 c := v.AuxInt 11848 v_0 := v.Args[0] 11849 if v_0.Op != OpAMD64MOVQconst { 11850 break 11851 } 11852 d := v_0.AuxInt 11853 v.reset(OpAMD64MOVQconst) 11854 v.AuxInt = d >> uint64(c) 11855 return true 11856 } 11857 return false 11858 } 11859 func rewriteValueAMD64_OpAMD64SBBLcarrymask(v *Value, config *Config) bool { 11860 b := v.Block 11861 _ = b 11862 // match: (SBBLcarrymask (FlagEQ)) 11863 // cond: 11864 // result: (MOVLconst [0]) 11865 for { 11866 v_0 := v.Args[0] 11867 if v_0.Op != OpAMD64FlagEQ { 11868 break 11869 } 11870 v.reset(OpAMD64MOVLconst) 11871 v.AuxInt = 0 11872 return true 11873 } 11874 // match: (SBBLcarrymask (FlagLT_ULT)) 11875 // cond: 11876 // result: (MOVLconst [-1]) 11877 for { 11878 v_0 := v.Args[0] 11879 if v_0.Op != OpAMD64FlagLT_ULT { 11880 break 11881 } 11882 v.reset(OpAMD64MOVLconst) 11883 v.AuxInt = -1 11884 return true 11885 } 11886 // match: (SBBLcarrymask (FlagLT_UGT)) 11887 // cond: 11888 // result: (MOVLconst [0]) 11889 for { 11890 v_0 := v.Args[0] 11891 if v_0.Op != OpAMD64FlagLT_UGT { 11892 break 11893 } 11894 v.reset(OpAMD64MOVLconst) 11895 v.AuxInt = 0 11896 return true 11897 } 11898 // match: (SBBLcarrymask (FlagGT_ULT)) 11899 // cond: 11900 // result: (MOVLconst [-1]) 11901 for { 11902 v_0 := v.Args[0] 11903 if v_0.Op != OpAMD64FlagGT_ULT { 11904 break 11905 } 11906 v.reset(OpAMD64MOVLconst) 11907 v.AuxInt = -1 11908 return true 11909 } 11910 // match: (SBBLcarrymask (FlagGT_UGT)) 11911 // cond: 11912 // result: (MOVLconst [0]) 11913 for { 11914 v_0 := v.Args[0] 11915 if v_0.Op != OpAMD64FlagGT_UGT { 11916 break 11917 } 11918 v.reset(OpAMD64MOVLconst) 11919 v.AuxInt = 0 11920 return true 11921 } 11922 return false 11923 } 11924 func rewriteValueAMD64_OpAMD64SBBQcarrymask(v *Value, config *Config) bool { 11925 b := v.Block 11926 _ = b 11927 // match: (SBBQcarrymask (FlagEQ)) 11928 // cond: 11929 // result: (MOVQconst [0]) 11930 for { 11931 v_0 := v.Args[0] 11932 if v_0.Op != OpAMD64FlagEQ { 11933 break 11934 } 11935 v.reset(OpAMD64MOVQconst) 11936 v.AuxInt = 0 11937 return true 11938 } 11939 // match: (SBBQcarrymask (FlagLT_ULT)) 11940 // cond: 11941 // result: (MOVQconst [-1]) 11942 for { 11943 v_0 := v.Args[0] 11944 if v_0.Op != OpAMD64FlagLT_ULT { 11945 break 11946 } 11947 v.reset(OpAMD64MOVQconst) 11948 v.AuxInt = -1 11949 return true 11950 } 11951 // match: (SBBQcarrymask (FlagLT_UGT)) 11952 // cond: 11953 // result: (MOVQconst [0]) 11954 for { 11955 v_0 := v.Args[0] 11956 if v_0.Op != OpAMD64FlagLT_UGT { 11957 break 11958 } 11959 v.reset(OpAMD64MOVQconst) 11960 v.AuxInt = 0 11961 return true 11962 } 11963 // match: (SBBQcarrymask (FlagGT_ULT)) 11964 // cond: 11965 // result: (MOVQconst [-1]) 11966 for { 11967 v_0 := v.Args[0] 11968 if v_0.Op != OpAMD64FlagGT_ULT { 11969 break 11970 } 11971 v.reset(OpAMD64MOVQconst) 11972 v.AuxInt = -1 11973 return true 11974 } 11975 // match: (SBBQcarrymask (FlagGT_UGT)) 11976 // cond: 11977 // result: (MOVQconst [0]) 11978 for { 11979 v_0 := v.Args[0] 11980 if v_0.Op != OpAMD64FlagGT_UGT { 11981 break 11982 } 11983 v.reset(OpAMD64MOVQconst) 11984 v.AuxInt = 0 11985 return true 11986 } 11987 return false 11988 } 11989 func rewriteValueAMD64_OpAMD64SETA(v *Value, config *Config) bool { 11990 b := v.Block 11991 _ = b 11992 // match: (SETA (InvertFlags x)) 11993 // cond: 11994 // result: (SETB x) 11995 for { 11996 v_0 := v.Args[0] 11997 if v_0.Op != OpAMD64InvertFlags { 11998 break 11999 } 12000 x := v_0.Args[0] 12001 v.reset(OpAMD64SETB) 12002 v.AddArg(x) 12003 return true 12004 } 12005 // match: (SETA (FlagEQ)) 12006 // cond: 12007 // result: (MOVLconst [0]) 12008 for { 12009 v_0 := v.Args[0] 12010 if v_0.Op != OpAMD64FlagEQ { 12011 break 12012 } 12013 v.reset(OpAMD64MOVLconst) 12014 v.AuxInt = 0 12015 return true 12016 } 12017 // match: (SETA (FlagLT_ULT)) 12018 // cond: 12019 // result: (MOVLconst [0]) 12020 for { 12021 v_0 := v.Args[0] 12022 if v_0.Op != OpAMD64FlagLT_ULT { 12023 break 12024 } 12025 v.reset(OpAMD64MOVLconst) 12026 v.AuxInt = 0 12027 return true 12028 } 12029 // match: (SETA (FlagLT_UGT)) 12030 // cond: 12031 // result: (MOVLconst [1]) 12032 for { 12033 v_0 := v.Args[0] 12034 if v_0.Op != OpAMD64FlagLT_UGT { 12035 break 12036 } 12037 v.reset(OpAMD64MOVLconst) 12038 v.AuxInt = 1 12039 return true 12040 } 12041 // match: (SETA (FlagGT_ULT)) 12042 // cond: 12043 // result: (MOVLconst [0]) 12044 for { 12045 v_0 := v.Args[0] 12046 if v_0.Op != OpAMD64FlagGT_ULT { 12047 break 12048 } 12049 v.reset(OpAMD64MOVLconst) 12050 v.AuxInt = 0 12051 return true 12052 } 12053 // match: (SETA (FlagGT_UGT)) 12054 // cond: 12055 // result: (MOVLconst [1]) 12056 for { 12057 v_0 := v.Args[0] 12058 if v_0.Op != OpAMD64FlagGT_UGT { 12059 break 12060 } 12061 v.reset(OpAMD64MOVLconst) 12062 v.AuxInt = 1 12063 return true 12064 } 12065 return false 12066 } 12067 func rewriteValueAMD64_OpAMD64SETAE(v *Value, config *Config) bool { 12068 b := v.Block 12069 _ = b 12070 // match: (SETAE (InvertFlags x)) 12071 // cond: 12072 // result: (SETBE x) 12073 for { 12074 v_0 := v.Args[0] 12075 if v_0.Op != OpAMD64InvertFlags { 12076 break 12077 } 12078 x := v_0.Args[0] 12079 v.reset(OpAMD64SETBE) 12080 v.AddArg(x) 12081 return true 12082 } 12083 // match: (SETAE (FlagEQ)) 12084 // cond: 12085 // result: (MOVLconst [1]) 12086 for { 12087 v_0 := v.Args[0] 12088 if v_0.Op != OpAMD64FlagEQ { 12089 break 12090 } 12091 v.reset(OpAMD64MOVLconst) 12092 v.AuxInt = 1 12093 return true 12094 } 12095 // match: (SETAE (FlagLT_ULT)) 12096 // cond: 12097 // result: (MOVLconst [0]) 12098 for { 12099 v_0 := v.Args[0] 12100 if v_0.Op != OpAMD64FlagLT_ULT { 12101 break 12102 } 12103 v.reset(OpAMD64MOVLconst) 12104 v.AuxInt = 0 12105 return true 12106 } 12107 // match: (SETAE (FlagLT_UGT)) 12108 // cond: 12109 // result: (MOVLconst [1]) 12110 for { 12111 v_0 := v.Args[0] 12112 if v_0.Op != OpAMD64FlagLT_UGT { 12113 break 12114 } 12115 v.reset(OpAMD64MOVLconst) 12116 v.AuxInt = 1 12117 return true 12118 } 12119 // match: (SETAE (FlagGT_ULT)) 12120 // cond: 12121 // result: (MOVLconst [0]) 12122 for { 12123 v_0 := v.Args[0] 12124 if v_0.Op != OpAMD64FlagGT_ULT { 12125 break 12126 } 12127 v.reset(OpAMD64MOVLconst) 12128 v.AuxInt = 0 12129 return true 12130 } 12131 // match: (SETAE (FlagGT_UGT)) 12132 // cond: 12133 // result: (MOVLconst [1]) 12134 for { 12135 v_0 := v.Args[0] 12136 if v_0.Op != OpAMD64FlagGT_UGT { 12137 break 12138 } 12139 v.reset(OpAMD64MOVLconst) 12140 v.AuxInt = 1 12141 return true 12142 } 12143 return false 12144 } 12145 func rewriteValueAMD64_OpAMD64SETB(v *Value, config *Config) bool { 12146 b := v.Block 12147 _ = b 12148 // match: (SETB (InvertFlags x)) 12149 // cond: 12150 // result: (SETA x) 12151 for { 12152 v_0 := v.Args[0] 12153 if v_0.Op != OpAMD64InvertFlags { 12154 break 12155 } 12156 x := v_0.Args[0] 12157 v.reset(OpAMD64SETA) 12158 v.AddArg(x) 12159 return true 12160 } 12161 // match: (SETB (FlagEQ)) 12162 // cond: 12163 // result: (MOVLconst [0]) 12164 for { 12165 v_0 := v.Args[0] 12166 if v_0.Op != OpAMD64FlagEQ { 12167 break 12168 } 12169 v.reset(OpAMD64MOVLconst) 12170 v.AuxInt = 0 12171 return true 12172 } 12173 // match: (SETB (FlagLT_ULT)) 12174 // cond: 12175 // result: (MOVLconst [1]) 12176 for { 12177 v_0 := v.Args[0] 12178 if v_0.Op != OpAMD64FlagLT_ULT { 12179 break 12180 } 12181 v.reset(OpAMD64MOVLconst) 12182 v.AuxInt = 1 12183 return true 12184 } 12185 // match: (SETB (FlagLT_UGT)) 12186 // cond: 12187 // result: (MOVLconst [0]) 12188 for { 12189 v_0 := v.Args[0] 12190 if v_0.Op != OpAMD64FlagLT_UGT { 12191 break 12192 } 12193 v.reset(OpAMD64MOVLconst) 12194 v.AuxInt = 0 12195 return true 12196 } 12197 // match: (SETB (FlagGT_ULT)) 12198 // cond: 12199 // result: (MOVLconst [1]) 12200 for { 12201 v_0 := v.Args[0] 12202 if v_0.Op != OpAMD64FlagGT_ULT { 12203 break 12204 } 12205 v.reset(OpAMD64MOVLconst) 12206 v.AuxInt = 1 12207 return true 12208 } 12209 // match: (SETB (FlagGT_UGT)) 12210 // cond: 12211 // result: (MOVLconst [0]) 12212 for { 12213 v_0 := v.Args[0] 12214 if v_0.Op != OpAMD64FlagGT_UGT { 12215 break 12216 } 12217 v.reset(OpAMD64MOVLconst) 12218 v.AuxInt = 0 12219 return true 12220 } 12221 return false 12222 } 12223 func rewriteValueAMD64_OpAMD64SETBE(v *Value, config *Config) bool { 12224 b := v.Block 12225 _ = b 12226 // match: (SETBE (InvertFlags x)) 12227 // cond: 12228 // result: (SETAE x) 12229 for { 12230 v_0 := v.Args[0] 12231 if v_0.Op != OpAMD64InvertFlags { 12232 break 12233 } 12234 x := v_0.Args[0] 12235 v.reset(OpAMD64SETAE) 12236 v.AddArg(x) 12237 return true 12238 } 12239 // match: (SETBE (FlagEQ)) 12240 // cond: 12241 // result: (MOVLconst [1]) 12242 for { 12243 v_0 := v.Args[0] 12244 if v_0.Op != OpAMD64FlagEQ { 12245 break 12246 } 12247 v.reset(OpAMD64MOVLconst) 12248 v.AuxInt = 1 12249 return true 12250 } 12251 // match: (SETBE (FlagLT_ULT)) 12252 // cond: 12253 // result: (MOVLconst [1]) 12254 for { 12255 v_0 := v.Args[0] 12256 if v_0.Op != OpAMD64FlagLT_ULT { 12257 break 12258 } 12259 v.reset(OpAMD64MOVLconst) 12260 v.AuxInt = 1 12261 return true 12262 } 12263 // match: (SETBE (FlagLT_UGT)) 12264 // cond: 12265 // result: (MOVLconst [0]) 12266 for { 12267 v_0 := v.Args[0] 12268 if v_0.Op != OpAMD64FlagLT_UGT { 12269 break 12270 } 12271 v.reset(OpAMD64MOVLconst) 12272 v.AuxInt = 0 12273 return true 12274 } 12275 // match: (SETBE (FlagGT_ULT)) 12276 // cond: 12277 // result: (MOVLconst [1]) 12278 for { 12279 v_0 := v.Args[0] 12280 if v_0.Op != OpAMD64FlagGT_ULT { 12281 break 12282 } 12283 v.reset(OpAMD64MOVLconst) 12284 v.AuxInt = 1 12285 return true 12286 } 12287 // match: (SETBE (FlagGT_UGT)) 12288 // cond: 12289 // result: (MOVLconst [0]) 12290 for { 12291 v_0 := v.Args[0] 12292 if v_0.Op != OpAMD64FlagGT_UGT { 12293 break 12294 } 12295 v.reset(OpAMD64MOVLconst) 12296 v.AuxInt = 0 12297 return true 12298 } 12299 return false 12300 } 12301 func rewriteValueAMD64_OpAMD64SETEQ(v *Value, config *Config) bool { 12302 b := v.Block 12303 _ = b 12304 // match: (SETEQ (InvertFlags x)) 12305 // cond: 12306 // result: (SETEQ x) 12307 for { 12308 v_0 := v.Args[0] 12309 if v_0.Op != OpAMD64InvertFlags { 12310 break 12311 } 12312 x := v_0.Args[0] 12313 v.reset(OpAMD64SETEQ) 12314 v.AddArg(x) 12315 return true 12316 } 12317 // match: (SETEQ (FlagEQ)) 12318 // cond: 12319 // result: (MOVLconst [1]) 12320 for { 12321 v_0 := v.Args[0] 12322 if v_0.Op != OpAMD64FlagEQ { 12323 break 12324 } 12325 v.reset(OpAMD64MOVLconst) 12326 v.AuxInt = 1 12327 return true 12328 } 12329 // match: (SETEQ (FlagLT_ULT)) 12330 // cond: 12331 // result: (MOVLconst [0]) 12332 for { 12333 v_0 := v.Args[0] 12334 if v_0.Op != OpAMD64FlagLT_ULT { 12335 break 12336 } 12337 v.reset(OpAMD64MOVLconst) 12338 v.AuxInt = 0 12339 return true 12340 } 12341 // match: (SETEQ (FlagLT_UGT)) 12342 // cond: 12343 // result: (MOVLconst [0]) 12344 for { 12345 v_0 := v.Args[0] 12346 if v_0.Op != OpAMD64FlagLT_UGT { 12347 break 12348 } 12349 v.reset(OpAMD64MOVLconst) 12350 v.AuxInt = 0 12351 return true 12352 } 12353 // match: (SETEQ (FlagGT_ULT)) 12354 // cond: 12355 // result: (MOVLconst [0]) 12356 for { 12357 v_0 := v.Args[0] 12358 if v_0.Op != OpAMD64FlagGT_ULT { 12359 break 12360 } 12361 v.reset(OpAMD64MOVLconst) 12362 v.AuxInt = 0 12363 return true 12364 } 12365 // match: (SETEQ (FlagGT_UGT)) 12366 // cond: 12367 // result: (MOVLconst [0]) 12368 for { 12369 v_0 := v.Args[0] 12370 if v_0.Op != OpAMD64FlagGT_UGT { 12371 break 12372 } 12373 v.reset(OpAMD64MOVLconst) 12374 v.AuxInt = 0 12375 return true 12376 } 12377 return false 12378 } 12379 func rewriteValueAMD64_OpAMD64SETG(v *Value, config *Config) bool { 12380 b := v.Block 12381 _ = b 12382 // match: (SETG (InvertFlags x)) 12383 // cond: 12384 // result: (SETL x) 12385 for { 12386 v_0 := v.Args[0] 12387 if v_0.Op != OpAMD64InvertFlags { 12388 break 12389 } 12390 x := v_0.Args[0] 12391 v.reset(OpAMD64SETL) 12392 v.AddArg(x) 12393 return true 12394 } 12395 // match: (SETG (FlagEQ)) 12396 // cond: 12397 // result: (MOVLconst [0]) 12398 for { 12399 v_0 := v.Args[0] 12400 if v_0.Op != OpAMD64FlagEQ { 12401 break 12402 } 12403 v.reset(OpAMD64MOVLconst) 12404 v.AuxInt = 0 12405 return true 12406 } 12407 // match: (SETG (FlagLT_ULT)) 12408 // cond: 12409 // result: (MOVLconst [0]) 12410 for { 12411 v_0 := v.Args[0] 12412 if v_0.Op != OpAMD64FlagLT_ULT { 12413 break 12414 } 12415 v.reset(OpAMD64MOVLconst) 12416 v.AuxInt = 0 12417 return true 12418 } 12419 // match: (SETG (FlagLT_UGT)) 12420 // cond: 12421 // result: (MOVLconst [0]) 12422 for { 12423 v_0 := v.Args[0] 12424 if v_0.Op != OpAMD64FlagLT_UGT { 12425 break 12426 } 12427 v.reset(OpAMD64MOVLconst) 12428 v.AuxInt = 0 12429 return true 12430 } 12431 // match: (SETG (FlagGT_ULT)) 12432 // cond: 12433 // result: (MOVLconst [1]) 12434 for { 12435 v_0 := v.Args[0] 12436 if v_0.Op != OpAMD64FlagGT_ULT { 12437 break 12438 } 12439 v.reset(OpAMD64MOVLconst) 12440 v.AuxInt = 1 12441 return true 12442 } 12443 // match: (SETG (FlagGT_UGT)) 12444 // cond: 12445 // result: (MOVLconst [1]) 12446 for { 12447 v_0 := v.Args[0] 12448 if v_0.Op != OpAMD64FlagGT_UGT { 12449 break 12450 } 12451 v.reset(OpAMD64MOVLconst) 12452 v.AuxInt = 1 12453 return true 12454 } 12455 return false 12456 } 12457 func rewriteValueAMD64_OpAMD64SETGE(v *Value, config *Config) bool { 12458 b := v.Block 12459 _ = b 12460 // match: (SETGE (InvertFlags x)) 12461 // cond: 12462 // result: (SETLE x) 12463 for { 12464 v_0 := v.Args[0] 12465 if v_0.Op != OpAMD64InvertFlags { 12466 break 12467 } 12468 x := v_0.Args[0] 12469 v.reset(OpAMD64SETLE) 12470 v.AddArg(x) 12471 return true 12472 } 12473 // match: (SETGE (FlagEQ)) 12474 // cond: 12475 // result: (MOVLconst [1]) 12476 for { 12477 v_0 := v.Args[0] 12478 if v_0.Op != OpAMD64FlagEQ { 12479 break 12480 } 12481 v.reset(OpAMD64MOVLconst) 12482 v.AuxInt = 1 12483 return true 12484 } 12485 // match: (SETGE (FlagLT_ULT)) 12486 // cond: 12487 // result: (MOVLconst [0]) 12488 for { 12489 v_0 := v.Args[0] 12490 if v_0.Op != OpAMD64FlagLT_ULT { 12491 break 12492 } 12493 v.reset(OpAMD64MOVLconst) 12494 v.AuxInt = 0 12495 return true 12496 } 12497 // match: (SETGE (FlagLT_UGT)) 12498 // cond: 12499 // result: (MOVLconst [0]) 12500 for { 12501 v_0 := v.Args[0] 12502 if v_0.Op != OpAMD64FlagLT_UGT { 12503 break 12504 } 12505 v.reset(OpAMD64MOVLconst) 12506 v.AuxInt = 0 12507 return true 12508 } 12509 // match: (SETGE (FlagGT_ULT)) 12510 // cond: 12511 // result: (MOVLconst [1]) 12512 for { 12513 v_0 := v.Args[0] 12514 if v_0.Op != OpAMD64FlagGT_ULT { 12515 break 12516 } 12517 v.reset(OpAMD64MOVLconst) 12518 v.AuxInt = 1 12519 return true 12520 } 12521 // match: (SETGE (FlagGT_UGT)) 12522 // cond: 12523 // result: (MOVLconst [1]) 12524 for { 12525 v_0 := v.Args[0] 12526 if v_0.Op != OpAMD64FlagGT_UGT { 12527 break 12528 } 12529 v.reset(OpAMD64MOVLconst) 12530 v.AuxInt = 1 12531 return true 12532 } 12533 return false 12534 } 12535 func rewriteValueAMD64_OpAMD64SETL(v *Value, config *Config) bool { 12536 b := v.Block 12537 _ = b 12538 // match: (SETL (InvertFlags x)) 12539 // cond: 12540 // result: (SETG x) 12541 for { 12542 v_0 := v.Args[0] 12543 if v_0.Op != OpAMD64InvertFlags { 12544 break 12545 } 12546 x := v_0.Args[0] 12547 v.reset(OpAMD64SETG) 12548 v.AddArg(x) 12549 return true 12550 } 12551 // match: (SETL (FlagEQ)) 12552 // cond: 12553 // result: (MOVLconst [0]) 12554 for { 12555 v_0 := v.Args[0] 12556 if v_0.Op != OpAMD64FlagEQ { 12557 break 12558 } 12559 v.reset(OpAMD64MOVLconst) 12560 v.AuxInt = 0 12561 return true 12562 } 12563 // match: (SETL (FlagLT_ULT)) 12564 // cond: 12565 // result: (MOVLconst [1]) 12566 for { 12567 v_0 := v.Args[0] 12568 if v_0.Op != OpAMD64FlagLT_ULT { 12569 break 12570 } 12571 v.reset(OpAMD64MOVLconst) 12572 v.AuxInt = 1 12573 return true 12574 } 12575 // match: (SETL (FlagLT_UGT)) 12576 // cond: 12577 // result: (MOVLconst [1]) 12578 for { 12579 v_0 := v.Args[0] 12580 if v_0.Op != OpAMD64FlagLT_UGT { 12581 break 12582 } 12583 v.reset(OpAMD64MOVLconst) 12584 v.AuxInt = 1 12585 return true 12586 } 12587 // match: (SETL (FlagGT_ULT)) 12588 // cond: 12589 // result: (MOVLconst [0]) 12590 for { 12591 v_0 := v.Args[0] 12592 if v_0.Op != OpAMD64FlagGT_ULT { 12593 break 12594 } 12595 v.reset(OpAMD64MOVLconst) 12596 v.AuxInt = 0 12597 return true 12598 } 12599 // match: (SETL (FlagGT_UGT)) 12600 // cond: 12601 // result: (MOVLconst [0]) 12602 for { 12603 v_0 := v.Args[0] 12604 if v_0.Op != OpAMD64FlagGT_UGT { 12605 break 12606 } 12607 v.reset(OpAMD64MOVLconst) 12608 v.AuxInt = 0 12609 return true 12610 } 12611 return false 12612 } 12613 func rewriteValueAMD64_OpAMD64SETLE(v *Value, config *Config) bool { 12614 b := v.Block 12615 _ = b 12616 // match: (SETLE (InvertFlags x)) 12617 // cond: 12618 // result: (SETGE x) 12619 for { 12620 v_0 := v.Args[0] 12621 if v_0.Op != OpAMD64InvertFlags { 12622 break 12623 } 12624 x := v_0.Args[0] 12625 v.reset(OpAMD64SETGE) 12626 v.AddArg(x) 12627 return true 12628 } 12629 // match: (SETLE (FlagEQ)) 12630 // cond: 12631 // result: (MOVLconst [1]) 12632 for { 12633 v_0 := v.Args[0] 12634 if v_0.Op != OpAMD64FlagEQ { 12635 break 12636 } 12637 v.reset(OpAMD64MOVLconst) 12638 v.AuxInt = 1 12639 return true 12640 } 12641 // match: (SETLE (FlagLT_ULT)) 12642 // cond: 12643 // result: (MOVLconst [1]) 12644 for { 12645 v_0 := v.Args[0] 12646 if v_0.Op != OpAMD64FlagLT_ULT { 12647 break 12648 } 12649 v.reset(OpAMD64MOVLconst) 12650 v.AuxInt = 1 12651 return true 12652 } 12653 // match: (SETLE (FlagLT_UGT)) 12654 // cond: 12655 // result: (MOVLconst [1]) 12656 for { 12657 v_0 := v.Args[0] 12658 if v_0.Op != OpAMD64FlagLT_UGT { 12659 break 12660 } 12661 v.reset(OpAMD64MOVLconst) 12662 v.AuxInt = 1 12663 return true 12664 } 12665 // match: (SETLE (FlagGT_ULT)) 12666 // cond: 12667 // result: (MOVLconst [0]) 12668 for { 12669 v_0 := v.Args[0] 12670 if v_0.Op != OpAMD64FlagGT_ULT { 12671 break 12672 } 12673 v.reset(OpAMD64MOVLconst) 12674 v.AuxInt = 0 12675 return true 12676 } 12677 // match: (SETLE (FlagGT_UGT)) 12678 // cond: 12679 // result: (MOVLconst [0]) 12680 for { 12681 v_0 := v.Args[0] 12682 if v_0.Op != OpAMD64FlagGT_UGT { 12683 break 12684 } 12685 v.reset(OpAMD64MOVLconst) 12686 v.AuxInt = 0 12687 return true 12688 } 12689 return false 12690 } 12691 func rewriteValueAMD64_OpAMD64SETNE(v *Value, config *Config) bool { 12692 b := v.Block 12693 _ = b 12694 // match: (SETNE (InvertFlags x)) 12695 // cond: 12696 // result: (SETNE x) 12697 for { 12698 v_0 := v.Args[0] 12699 if v_0.Op != OpAMD64InvertFlags { 12700 break 12701 } 12702 x := v_0.Args[0] 12703 v.reset(OpAMD64SETNE) 12704 v.AddArg(x) 12705 return true 12706 } 12707 // match: (SETNE (FlagEQ)) 12708 // cond: 12709 // result: (MOVLconst [0]) 12710 for { 12711 v_0 := v.Args[0] 12712 if v_0.Op != OpAMD64FlagEQ { 12713 break 12714 } 12715 v.reset(OpAMD64MOVLconst) 12716 v.AuxInt = 0 12717 return true 12718 } 12719 // match: (SETNE (FlagLT_ULT)) 12720 // cond: 12721 // result: (MOVLconst [1]) 12722 for { 12723 v_0 := v.Args[0] 12724 if v_0.Op != OpAMD64FlagLT_ULT { 12725 break 12726 } 12727 v.reset(OpAMD64MOVLconst) 12728 v.AuxInt = 1 12729 return true 12730 } 12731 // match: (SETNE (FlagLT_UGT)) 12732 // cond: 12733 // result: (MOVLconst [1]) 12734 for { 12735 v_0 := v.Args[0] 12736 if v_0.Op != OpAMD64FlagLT_UGT { 12737 break 12738 } 12739 v.reset(OpAMD64MOVLconst) 12740 v.AuxInt = 1 12741 return true 12742 } 12743 // match: (SETNE (FlagGT_ULT)) 12744 // cond: 12745 // result: (MOVLconst [1]) 12746 for { 12747 v_0 := v.Args[0] 12748 if v_0.Op != OpAMD64FlagGT_ULT { 12749 break 12750 } 12751 v.reset(OpAMD64MOVLconst) 12752 v.AuxInt = 1 12753 return true 12754 } 12755 // match: (SETNE (FlagGT_UGT)) 12756 // cond: 12757 // result: (MOVLconst [1]) 12758 for { 12759 v_0 := v.Args[0] 12760 if v_0.Op != OpAMD64FlagGT_UGT { 12761 break 12762 } 12763 v.reset(OpAMD64MOVLconst) 12764 v.AuxInt = 1 12765 return true 12766 } 12767 return false 12768 } 12769 func rewriteValueAMD64_OpAMD64SHLL(v *Value, config *Config) bool { 12770 b := v.Block 12771 _ = b 12772 // match: (SHLL x (MOVQconst [c])) 12773 // cond: 12774 // result: (SHLLconst [c&31] x) 12775 for { 12776 x := v.Args[0] 12777 v_1 := v.Args[1] 12778 if v_1.Op != OpAMD64MOVQconst { 12779 break 12780 } 12781 c := v_1.AuxInt 12782 v.reset(OpAMD64SHLLconst) 12783 v.AuxInt = c & 31 12784 v.AddArg(x) 12785 return true 12786 } 12787 // match: (SHLL x (MOVLconst [c])) 12788 // cond: 12789 // result: (SHLLconst [c&31] x) 12790 for { 12791 x := v.Args[0] 12792 v_1 := v.Args[1] 12793 if v_1.Op != OpAMD64MOVLconst { 12794 break 12795 } 12796 c := v_1.AuxInt 12797 v.reset(OpAMD64SHLLconst) 12798 v.AuxInt = c & 31 12799 v.AddArg(x) 12800 return true 12801 } 12802 // match: (SHLL x (ANDLconst [31] y)) 12803 // cond: 12804 // result: (SHLL x y) 12805 for { 12806 x := v.Args[0] 12807 v_1 := v.Args[1] 12808 if v_1.Op != OpAMD64ANDLconst { 12809 break 12810 } 12811 if v_1.AuxInt != 31 { 12812 break 12813 } 12814 y := v_1.Args[0] 12815 v.reset(OpAMD64SHLL) 12816 v.AddArg(x) 12817 v.AddArg(y) 12818 return true 12819 } 12820 return false 12821 } 12822 func rewriteValueAMD64_OpAMD64SHLQ(v *Value, config *Config) bool { 12823 b := v.Block 12824 _ = b 12825 // match: (SHLQ x (MOVQconst [c])) 12826 // cond: 12827 // result: (SHLQconst [c&63] x) 12828 for { 12829 x := v.Args[0] 12830 v_1 := v.Args[1] 12831 if v_1.Op != OpAMD64MOVQconst { 12832 break 12833 } 12834 c := v_1.AuxInt 12835 v.reset(OpAMD64SHLQconst) 12836 v.AuxInt = c & 63 12837 v.AddArg(x) 12838 return true 12839 } 12840 // match: (SHLQ x (MOVLconst [c])) 12841 // cond: 12842 // result: (SHLQconst [c&63] x) 12843 for { 12844 x := v.Args[0] 12845 v_1 := v.Args[1] 12846 if v_1.Op != OpAMD64MOVLconst { 12847 break 12848 } 12849 c := v_1.AuxInt 12850 v.reset(OpAMD64SHLQconst) 12851 v.AuxInt = c & 63 12852 v.AddArg(x) 12853 return true 12854 } 12855 // match: (SHLQ x (ANDQconst [63] y)) 12856 // cond: 12857 // result: (SHLQ x y) 12858 for { 12859 x := v.Args[0] 12860 v_1 := v.Args[1] 12861 if v_1.Op != OpAMD64ANDQconst { 12862 break 12863 } 12864 if v_1.AuxInt != 63 { 12865 break 12866 } 12867 y := v_1.Args[0] 12868 v.reset(OpAMD64SHLQ) 12869 v.AddArg(x) 12870 v.AddArg(y) 12871 return true 12872 } 12873 return false 12874 } 12875 func rewriteValueAMD64_OpAMD64SHRB(v *Value, config *Config) bool { 12876 b := v.Block 12877 _ = b 12878 // match: (SHRB x (MOVQconst [c])) 12879 // cond: 12880 // result: (SHRBconst [c&31] x) 12881 for { 12882 x := v.Args[0] 12883 v_1 := v.Args[1] 12884 if v_1.Op != OpAMD64MOVQconst { 12885 break 12886 } 12887 c := v_1.AuxInt 12888 v.reset(OpAMD64SHRBconst) 12889 v.AuxInt = c & 31 12890 v.AddArg(x) 12891 return true 12892 } 12893 // match: (SHRB x (MOVLconst [c])) 12894 // cond: 12895 // result: (SHRBconst [c&31] x) 12896 for { 12897 x := v.Args[0] 12898 v_1 := v.Args[1] 12899 if v_1.Op != OpAMD64MOVLconst { 12900 break 12901 } 12902 c := v_1.AuxInt 12903 v.reset(OpAMD64SHRBconst) 12904 v.AuxInt = c & 31 12905 v.AddArg(x) 12906 return true 12907 } 12908 return false 12909 } 12910 func rewriteValueAMD64_OpAMD64SHRL(v *Value, config *Config) bool { 12911 b := v.Block 12912 _ = b 12913 // match: (SHRL x (MOVQconst [c])) 12914 // cond: 12915 // result: (SHRLconst [c&31] x) 12916 for { 12917 x := v.Args[0] 12918 v_1 := v.Args[1] 12919 if v_1.Op != OpAMD64MOVQconst { 12920 break 12921 } 12922 c := v_1.AuxInt 12923 v.reset(OpAMD64SHRLconst) 12924 v.AuxInt = c & 31 12925 v.AddArg(x) 12926 return true 12927 } 12928 // match: (SHRL x (MOVLconst [c])) 12929 // cond: 12930 // result: (SHRLconst [c&31] x) 12931 for { 12932 x := v.Args[0] 12933 v_1 := v.Args[1] 12934 if v_1.Op != OpAMD64MOVLconst { 12935 break 12936 } 12937 c := v_1.AuxInt 12938 v.reset(OpAMD64SHRLconst) 12939 v.AuxInt = c & 31 12940 v.AddArg(x) 12941 return true 12942 } 12943 // match: (SHRL x (ANDLconst [31] y)) 12944 // cond: 12945 // result: (SHRL x y) 12946 for { 12947 x := v.Args[0] 12948 v_1 := v.Args[1] 12949 if v_1.Op != OpAMD64ANDLconst { 12950 break 12951 } 12952 if v_1.AuxInt != 31 { 12953 break 12954 } 12955 y := v_1.Args[0] 12956 v.reset(OpAMD64SHRL) 12957 v.AddArg(x) 12958 v.AddArg(y) 12959 return true 12960 } 12961 return false 12962 } 12963 func rewriteValueAMD64_OpAMD64SHRQ(v *Value, config *Config) bool { 12964 b := v.Block 12965 _ = b 12966 // match: (SHRQ x (MOVQconst [c])) 12967 // cond: 12968 // result: (SHRQconst [c&63] x) 12969 for { 12970 x := v.Args[0] 12971 v_1 := v.Args[1] 12972 if v_1.Op != OpAMD64MOVQconst { 12973 break 12974 } 12975 c := v_1.AuxInt 12976 v.reset(OpAMD64SHRQconst) 12977 v.AuxInt = c & 63 12978 v.AddArg(x) 12979 return true 12980 } 12981 // match: (SHRQ x (MOVLconst [c])) 12982 // cond: 12983 // result: (SHRQconst [c&63] x) 12984 for { 12985 x := v.Args[0] 12986 v_1 := v.Args[1] 12987 if v_1.Op != OpAMD64MOVLconst { 12988 break 12989 } 12990 c := v_1.AuxInt 12991 v.reset(OpAMD64SHRQconst) 12992 v.AuxInt = c & 63 12993 v.AddArg(x) 12994 return true 12995 } 12996 // match: (SHRQ x (ANDQconst [63] y)) 12997 // cond: 12998 // result: (SHRQ x y) 12999 for { 13000 x := v.Args[0] 13001 v_1 := v.Args[1] 13002 if v_1.Op != OpAMD64ANDQconst { 13003 break 13004 } 13005 if v_1.AuxInt != 63 { 13006 break 13007 } 13008 y := v_1.Args[0] 13009 v.reset(OpAMD64SHRQ) 13010 v.AddArg(x) 13011 v.AddArg(y) 13012 return true 13013 } 13014 return false 13015 } 13016 func rewriteValueAMD64_OpAMD64SHRW(v *Value, config *Config) bool { 13017 b := v.Block 13018 _ = b 13019 // match: (SHRW x (MOVQconst [c])) 13020 // cond: 13021 // result: (SHRWconst [c&31] x) 13022 for { 13023 x := v.Args[0] 13024 v_1 := v.Args[1] 13025 if v_1.Op != OpAMD64MOVQconst { 13026 break 13027 } 13028 c := v_1.AuxInt 13029 v.reset(OpAMD64SHRWconst) 13030 v.AuxInt = c & 31 13031 v.AddArg(x) 13032 return true 13033 } 13034 // match: (SHRW x (MOVLconst [c])) 13035 // cond: 13036 // result: (SHRWconst [c&31] x) 13037 for { 13038 x := v.Args[0] 13039 v_1 := v.Args[1] 13040 if v_1.Op != OpAMD64MOVLconst { 13041 break 13042 } 13043 c := v_1.AuxInt 13044 v.reset(OpAMD64SHRWconst) 13045 v.AuxInt = c & 31 13046 v.AddArg(x) 13047 return true 13048 } 13049 return false 13050 } 13051 func rewriteValueAMD64_OpAMD64SUBL(v *Value, config *Config) bool { 13052 b := v.Block 13053 _ = b 13054 // match: (SUBL x (MOVLconst [c])) 13055 // cond: 13056 // result: (SUBLconst x [c]) 13057 for { 13058 x := v.Args[0] 13059 v_1 := v.Args[1] 13060 if v_1.Op != OpAMD64MOVLconst { 13061 break 13062 } 13063 c := v_1.AuxInt 13064 v.reset(OpAMD64SUBLconst) 13065 v.AuxInt = c 13066 v.AddArg(x) 13067 return true 13068 } 13069 // match: (SUBL (MOVLconst [c]) x) 13070 // cond: 13071 // result: (NEGL (SUBLconst <v.Type> x [c])) 13072 for { 13073 v_0 := v.Args[0] 13074 if v_0.Op != OpAMD64MOVLconst { 13075 break 13076 } 13077 c := v_0.AuxInt 13078 x := v.Args[1] 13079 v.reset(OpAMD64NEGL) 13080 v0 := b.NewValue0(v.Line, OpAMD64SUBLconst, v.Type) 13081 v0.AuxInt = c 13082 v0.AddArg(x) 13083 v.AddArg(v0) 13084 return true 13085 } 13086 // match: (SUBL x x) 13087 // cond: 13088 // result: (MOVLconst [0]) 13089 for { 13090 x := v.Args[0] 13091 if x != v.Args[1] { 13092 break 13093 } 13094 v.reset(OpAMD64MOVLconst) 13095 v.AuxInt = 0 13096 return true 13097 } 13098 return false 13099 } 13100 func rewriteValueAMD64_OpAMD64SUBLconst(v *Value, config *Config) bool { 13101 b := v.Block 13102 _ = b 13103 // match: (SUBLconst [c] x) 13104 // cond: int32(c) == 0 13105 // result: x 13106 for { 13107 c := v.AuxInt 13108 x := v.Args[0] 13109 if !(int32(c) == 0) { 13110 break 13111 } 13112 v.reset(OpCopy) 13113 v.Type = x.Type 13114 v.AddArg(x) 13115 return true 13116 } 13117 // match: (SUBLconst [c] x) 13118 // cond: 13119 // result: (ADDLconst [int64(int32(-c))] x) 13120 for { 13121 c := v.AuxInt 13122 x := v.Args[0] 13123 v.reset(OpAMD64ADDLconst) 13124 v.AuxInt = int64(int32(-c)) 13125 v.AddArg(x) 13126 return true 13127 } 13128 } 13129 func rewriteValueAMD64_OpAMD64SUBQ(v *Value, config *Config) bool { 13130 b := v.Block 13131 _ = b 13132 // match: (SUBQ x (MOVQconst [c])) 13133 // cond: is32Bit(c) 13134 // result: (SUBQconst x [c]) 13135 for { 13136 x := v.Args[0] 13137 v_1 := v.Args[1] 13138 if v_1.Op != OpAMD64MOVQconst { 13139 break 13140 } 13141 c := v_1.AuxInt 13142 if !(is32Bit(c)) { 13143 break 13144 } 13145 v.reset(OpAMD64SUBQconst) 13146 v.AuxInt = c 13147 v.AddArg(x) 13148 return true 13149 } 13150 // match: (SUBQ (MOVQconst [c]) x) 13151 // cond: is32Bit(c) 13152 // result: (NEGQ (SUBQconst <v.Type> x [c])) 13153 for { 13154 v_0 := v.Args[0] 13155 if v_0.Op != OpAMD64MOVQconst { 13156 break 13157 } 13158 c := v_0.AuxInt 13159 x := v.Args[1] 13160 if !(is32Bit(c)) { 13161 break 13162 } 13163 v.reset(OpAMD64NEGQ) 13164 v0 := b.NewValue0(v.Line, OpAMD64SUBQconst, v.Type) 13165 v0.AuxInt = c 13166 v0.AddArg(x) 13167 v.AddArg(v0) 13168 return true 13169 } 13170 // match: (SUBQ x x) 13171 // cond: 13172 // result: (MOVQconst [0]) 13173 for { 13174 x := v.Args[0] 13175 if x != v.Args[1] { 13176 break 13177 } 13178 v.reset(OpAMD64MOVQconst) 13179 v.AuxInt = 0 13180 return true 13181 } 13182 return false 13183 } 13184 func rewriteValueAMD64_OpAMD64SUBQconst(v *Value, config *Config) bool { 13185 b := v.Block 13186 _ = b 13187 // match: (SUBQconst [0] x) 13188 // cond: 13189 // result: x 13190 for { 13191 if v.AuxInt != 0 { 13192 break 13193 } 13194 x := v.Args[0] 13195 v.reset(OpCopy) 13196 v.Type = x.Type 13197 v.AddArg(x) 13198 return true 13199 } 13200 // match: (SUBQconst [c] x) 13201 // cond: c != -(1<<31) 13202 // result: (ADDQconst [-c] x) 13203 for { 13204 c := v.AuxInt 13205 x := v.Args[0] 13206 if !(c != -(1 << 31)) { 13207 break 13208 } 13209 v.reset(OpAMD64ADDQconst) 13210 v.AuxInt = -c 13211 v.AddArg(x) 13212 return true 13213 } 13214 // match: (SUBQconst (MOVQconst [d]) [c]) 13215 // cond: 13216 // result: (MOVQconst [d-c]) 13217 for { 13218 c := v.AuxInt 13219 v_0 := v.Args[0] 13220 if v_0.Op != OpAMD64MOVQconst { 13221 break 13222 } 13223 d := v_0.AuxInt 13224 v.reset(OpAMD64MOVQconst) 13225 v.AuxInt = d - c 13226 return true 13227 } 13228 // match: (SUBQconst (SUBQconst x [d]) [c]) 13229 // cond: is32Bit(-c-d) 13230 // result: (ADDQconst [-c-d] x) 13231 for { 13232 c := v.AuxInt 13233 v_0 := v.Args[0] 13234 if v_0.Op != OpAMD64SUBQconst { 13235 break 13236 } 13237 d := v_0.AuxInt 13238 x := v_0.Args[0] 13239 if !(is32Bit(-c - d)) { 13240 break 13241 } 13242 v.reset(OpAMD64ADDQconst) 13243 v.AuxInt = -c - d 13244 v.AddArg(x) 13245 return true 13246 } 13247 return false 13248 } 13249 func rewriteValueAMD64_OpAMD64XADDLlock(v *Value, config *Config) bool { 13250 b := v.Block 13251 _ = b 13252 // match: (XADDLlock [off1] {sym} val (ADDQconst [off2] ptr) mem) 13253 // cond: is32Bit(off1+off2) 13254 // result: (XADDLlock [off1+off2] {sym} val ptr mem) 13255 for { 13256 off1 := v.AuxInt 13257 sym := v.Aux 13258 val := v.Args[0] 13259 v_1 := v.Args[1] 13260 if v_1.Op != OpAMD64ADDQconst { 13261 break 13262 } 13263 off2 := v_1.AuxInt 13264 ptr := v_1.Args[0] 13265 mem := v.Args[2] 13266 if !(is32Bit(off1 + off2)) { 13267 break 13268 } 13269 v.reset(OpAMD64XADDLlock) 13270 v.AuxInt = off1 + off2 13271 v.Aux = sym 13272 v.AddArg(val) 13273 v.AddArg(ptr) 13274 v.AddArg(mem) 13275 return true 13276 } 13277 return false 13278 } 13279 func rewriteValueAMD64_OpAMD64XADDQlock(v *Value, config *Config) bool { 13280 b := v.Block 13281 _ = b 13282 // match: (XADDQlock [off1] {sym} val (ADDQconst [off2] ptr) mem) 13283 // cond: is32Bit(off1+off2) 13284 // result: (XADDQlock [off1+off2] {sym} val ptr mem) 13285 for { 13286 off1 := v.AuxInt 13287 sym := v.Aux 13288 val := v.Args[0] 13289 v_1 := v.Args[1] 13290 if v_1.Op != OpAMD64ADDQconst { 13291 break 13292 } 13293 off2 := v_1.AuxInt 13294 ptr := v_1.Args[0] 13295 mem := v.Args[2] 13296 if !(is32Bit(off1 + off2)) { 13297 break 13298 } 13299 v.reset(OpAMD64XADDQlock) 13300 v.AuxInt = off1 + off2 13301 v.Aux = sym 13302 v.AddArg(val) 13303 v.AddArg(ptr) 13304 v.AddArg(mem) 13305 return true 13306 } 13307 return false 13308 } 13309 func rewriteValueAMD64_OpAMD64XCHGL(v *Value, config *Config) bool { 13310 b := v.Block 13311 _ = b 13312 // match: (XCHGL [off1] {sym} val (ADDQconst [off2] ptr) mem) 13313 // cond: is32Bit(off1+off2) 13314 // result: (XCHGL [off1+off2] {sym} val ptr mem) 13315 for { 13316 off1 := v.AuxInt 13317 sym := v.Aux 13318 val := v.Args[0] 13319 v_1 := v.Args[1] 13320 if v_1.Op != OpAMD64ADDQconst { 13321 break 13322 } 13323 off2 := v_1.AuxInt 13324 ptr := v_1.Args[0] 13325 mem := v.Args[2] 13326 if !(is32Bit(off1 + off2)) { 13327 break 13328 } 13329 v.reset(OpAMD64XCHGL) 13330 v.AuxInt = off1 + off2 13331 v.Aux = sym 13332 v.AddArg(val) 13333 v.AddArg(ptr) 13334 v.AddArg(mem) 13335 return true 13336 } 13337 // match: (XCHGL [off1] {sym1} val (LEAQ [off2] {sym2} ptr) mem) 13338 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) && ptr.Op != OpSB 13339 // result: (XCHGL [off1+off2] {mergeSym(sym1,sym2)} val ptr mem) 13340 for { 13341 off1 := v.AuxInt 13342 sym1 := v.Aux 13343 val := v.Args[0] 13344 v_1 := v.Args[1] 13345 if v_1.Op != OpAMD64LEAQ { 13346 break 13347 } 13348 off2 := v_1.AuxInt 13349 sym2 := v_1.Aux 13350 ptr := v_1.Args[0] 13351 mem := v.Args[2] 13352 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2) && ptr.Op != OpSB) { 13353 break 13354 } 13355 v.reset(OpAMD64XCHGL) 13356 v.AuxInt = off1 + off2 13357 v.Aux = mergeSym(sym1, sym2) 13358 v.AddArg(val) 13359 v.AddArg(ptr) 13360 v.AddArg(mem) 13361 return true 13362 } 13363 return false 13364 } 13365 func rewriteValueAMD64_OpAMD64XCHGQ(v *Value, config *Config) bool { 13366 b := v.Block 13367 _ = b 13368 // match: (XCHGQ [off1] {sym} val (ADDQconst [off2] ptr) mem) 13369 // cond: is32Bit(off1+off2) 13370 // result: (XCHGQ [off1+off2] {sym} val ptr mem) 13371 for { 13372 off1 := v.AuxInt 13373 sym := v.Aux 13374 val := v.Args[0] 13375 v_1 := v.Args[1] 13376 if v_1.Op != OpAMD64ADDQconst { 13377 break 13378 } 13379 off2 := v_1.AuxInt 13380 ptr := v_1.Args[0] 13381 mem := v.Args[2] 13382 if !(is32Bit(off1 + off2)) { 13383 break 13384 } 13385 v.reset(OpAMD64XCHGQ) 13386 v.AuxInt = off1 + off2 13387 v.Aux = sym 13388 v.AddArg(val) 13389 v.AddArg(ptr) 13390 v.AddArg(mem) 13391 return true 13392 } 13393 // match: (XCHGQ [off1] {sym1} val (LEAQ [off2] {sym2} ptr) mem) 13394 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) && ptr.Op != OpSB 13395 // result: (XCHGQ [off1+off2] {mergeSym(sym1,sym2)} val ptr mem) 13396 for { 13397 off1 := v.AuxInt 13398 sym1 := v.Aux 13399 val := v.Args[0] 13400 v_1 := v.Args[1] 13401 if v_1.Op != OpAMD64LEAQ { 13402 break 13403 } 13404 off2 := v_1.AuxInt 13405 sym2 := v_1.Aux 13406 ptr := v_1.Args[0] 13407 mem := v.Args[2] 13408 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2) && ptr.Op != OpSB) { 13409 break 13410 } 13411 v.reset(OpAMD64XCHGQ) 13412 v.AuxInt = off1 + off2 13413 v.Aux = mergeSym(sym1, sym2) 13414 v.AddArg(val) 13415 v.AddArg(ptr) 13416 v.AddArg(mem) 13417 return true 13418 } 13419 return false 13420 } 13421 func rewriteValueAMD64_OpAMD64XORL(v *Value, config *Config) bool { 13422 b := v.Block 13423 _ = b 13424 // match: (XORL x (MOVLconst [c])) 13425 // cond: 13426 // result: (XORLconst [c] x) 13427 for { 13428 x := v.Args[0] 13429 v_1 := v.Args[1] 13430 if v_1.Op != OpAMD64MOVLconst { 13431 break 13432 } 13433 c := v_1.AuxInt 13434 v.reset(OpAMD64XORLconst) 13435 v.AuxInt = c 13436 v.AddArg(x) 13437 return true 13438 } 13439 // match: (XORL (MOVLconst [c]) x) 13440 // cond: 13441 // result: (XORLconst [c] x) 13442 for { 13443 v_0 := v.Args[0] 13444 if v_0.Op != OpAMD64MOVLconst { 13445 break 13446 } 13447 c := v_0.AuxInt 13448 x := v.Args[1] 13449 v.reset(OpAMD64XORLconst) 13450 v.AuxInt = c 13451 v.AddArg(x) 13452 return true 13453 } 13454 // match: (XORL x x) 13455 // cond: 13456 // result: (MOVLconst [0]) 13457 for { 13458 x := v.Args[0] 13459 if x != v.Args[1] { 13460 break 13461 } 13462 v.reset(OpAMD64MOVLconst) 13463 v.AuxInt = 0 13464 return true 13465 } 13466 return false 13467 } 13468 func rewriteValueAMD64_OpAMD64XORLconst(v *Value, config *Config) bool { 13469 b := v.Block 13470 _ = b 13471 // match: (XORLconst [c] (XORLconst [d] x)) 13472 // cond: 13473 // result: (XORLconst [c ^ d] x) 13474 for { 13475 c := v.AuxInt 13476 v_0 := v.Args[0] 13477 if v_0.Op != OpAMD64XORLconst { 13478 break 13479 } 13480 d := v_0.AuxInt 13481 x := v_0.Args[0] 13482 v.reset(OpAMD64XORLconst) 13483 v.AuxInt = c ^ d 13484 v.AddArg(x) 13485 return true 13486 } 13487 // match: (XORLconst [c] x) 13488 // cond: int32(c)==0 13489 // result: x 13490 for { 13491 c := v.AuxInt 13492 x := v.Args[0] 13493 if !(int32(c) == 0) { 13494 break 13495 } 13496 v.reset(OpCopy) 13497 v.Type = x.Type 13498 v.AddArg(x) 13499 return true 13500 } 13501 // match: (XORLconst [c] (MOVLconst [d])) 13502 // cond: 13503 // result: (MOVLconst [c^d]) 13504 for { 13505 c := v.AuxInt 13506 v_0 := v.Args[0] 13507 if v_0.Op != OpAMD64MOVLconst { 13508 break 13509 } 13510 d := v_0.AuxInt 13511 v.reset(OpAMD64MOVLconst) 13512 v.AuxInt = c ^ d 13513 return true 13514 } 13515 return false 13516 } 13517 func rewriteValueAMD64_OpAMD64XORQ(v *Value, config *Config) bool { 13518 b := v.Block 13519 _ = b 13520 // match: (XORQ x (MOVQconst [c])) 13521 // cond: is32Bit(c) 13522 // result: (XORQconst [c] x) 13523 for { 13524 x := v.Args[0] 13525 v_1 := v.Args[1] 13526 if v_1.Op != OpAMD64MOVQconst { 13527 break 13528 } 13529 c := v_1.AuxInt 13530 if !(is32Bit(c)) { 13531 break 13532 } 13533 v.reset(OpAMD64XORQconst) 13534 v.AuxInt = c 13535 v.AddArg(x) 13536 return true 13537 } 13538 // match: (XORQ (MOVQconst [c]) x) 13539 // cond: is32Bit(c) 13540 // result: (XORQconst [c] x) 13541 for { 13542 v_0 := v.Args[0] 13543 if v_0.Op != OpAMD64MOVQconst { 13544 break 13545 } 13546 c := v_0.AuxInt 13547 x := v.Args[1] 13548 if !(is32Bit(c)) { 13549 break 13550 } 13551 v.reset(OpAMD64XORQconst) 13552 v.AuxInt = c 13553 v.AddArg(x) 13554 return true 13555 } 13556 // match: (XORQ x x) 13557 // cond: 13558 // result: (MOVQconst [0]) 13559 for { 13560 x := v.Args[0] 13561 if x != v.Args[1] { 13562 break 13563 } 13564 v.reset(OpAMD64MOVQconst) 13565 v.AuxInt = 0 13566 return true 13567 } 13568 return false 13569 } 13570 func rewriteValueAMD64_OpAMD64XORQconst(v *Value, config *Config) bool { 13571 b := v.Block 13572 _ = b 13573 // match: (XORQconst [c] (XORQconst [d] x)) 13574 // cond: 13575 // result: (XORQconst [c ^ d] x) 13576 for { 13577 c := v.AuxInt 13578 v_0 := v.Args[0] 13579 if v_0.Op != OpAMD64XORQconst { 13580 break 13581 } 13582 d := v_0.AuxInt 13583 x := v_0.Args[0] 13584 v.reset(OpAMD64XORQconst) 13585 v.AuxInt = c ^ d 13586 v.AddArg(x) 13587 return true 13588 } 13589 // match: (XORQconst [0] x) 13590 // cond: 13591 // result: x 13592 for { 13593 if v.AuxInt != 0 { 13594 break 13595 } 13596 x := v.Args[0] 13597 v.reset(OpCopy) 13598 v.Type = x.Type 13599 v.AddArg(x) 13600 return true 13601 } 13602 // match: (XORQconst [c] (MOVQconst [d])) 13603 // cond: 13604 // result: (MOVQconst [c^d]) 13605 for { 13606 c := v.AuxInt 13607 v_0 := v.Args[0] 13608 if v_0.Op != OpAMD64MOVQconst { 13609 break 13610 } 13611 d := v_0.AuxInt 13612 v.reset(OpAMD64MOVQconst) 13613 v.AuxInt = c ^ d 13614 return true 13615 } 13616 return false 13617 } 13618 func rewriteValueAMD64_OpAdd16(v *Value, config *Config) bool { 13619 b := v.Block 13620 _ = b 13621 // match: (Add16 x y) 13622 // cond: 13623 // result: (ADDL x y) 13624 for { 13625 x := v.Args[0] 13626 y := v.Args[1] 13627 v.reset(OpAMD64ADDL) 13628 v.AddArg(x) 13629 v.AddArg(y) 13630 return true 13631 } 13632 } 13633 func rewriteValueAMD64_OpAdd32(v *Value, config *Config) bool { 13634 b := v.Block 13635 _ = b 13636 // match: (Add32 x y) 13637 // cond: 13638 // result: (ADDL x y) 13639 for { 13640 x := v.Args[0] 13641 y := v.Args[1] 13642 v.reset(OpAMD64ADDL) 13643 v.AddArg(x) 13644 v.AddArg(y) 13645 return true 13646 } 13647 } 13648 func rewriteValueAMD64_OpAdd32F(v *Value, config *Config) bool { 13649 b := v.Block 13650 _ = b 13651 // match: (Add32F x y) 13652 // cond: 13653 // result: (ADDSS x y) 13654 for { 13655 x := v.Args[0] 13656 y := v.Args[1] 13657 v.reset(OpAMD64ADDSS) 13658 v.AddArg(x) 13659 v.AddArg(y) 13660 return true 13661 } 13662 } 13663 func rewriteValueAMD64_OpAdd64(v *Value, config *Config) bool { 13664 b := v.Block 13665 _ = b 13666 // match: (Add64 x y) 13667 // cond: 13668 // result: (ADDQ x y) 13669 for { 13670 x := v.Args[0] 13671 y := v.Args[1] 13672 v.reset(OpAMD64ADDQ) 13673 v.AddArg(x) 13674 v.AddArg(y) 13675 return true 13676 } 13677 } 13678 func rewriteValueAMD64_OpAdd64F(v *Value, config *Config) bool { 13679 b := v.Block 13680 _ = b 13681 // match: (Add64F x y) 13682 // cond: 13683 // result: (ADDSD x y) 13684 for { 13685 x := v.Args[0] 13686 y := v.Args[1] 13687 v.reset(OpAMD64ADDSD) 13688 v.AddArg(x) 13689 v.AddArg(y) 13690 return true 13691 } 13692 } 13693 func rewriteValueAMD64_OpAdd8(v *Value, config *Config) bool { 13694 b := v.Block 13695 _ = b 13696 // match: (Add8 x y) 13697 // cond: 13698 // result: (ADDL x y) 13699 for { 13700 x := v.Args[0] 13701 y := v.Args[1] 13702 v.reset(OpAMD64ADDL) 13703 v.AddArg(x) 13704 v.AddArg(y) 13705 return true 13706 } 13707 } 13708 func rewriteValueAMD64_OpAddPtr(v *Value, config *Config) bool { 13709 b := v.Block 13710 _ = b 13711 // match: (AddPtr x y) 13712 // cond: config.PtrSize == 8 13713 // result: (ADDQ x y) 13714 for { 13715 x := v.Args[0] 13716 y := v.Args[1] 13717 if !(config.PtrSize == 8) { 13718 break 13719 } 13720 v.reset(OpAMD64ADDQ) 13721 v.AddArg(x) 13722 v.AddArg(y) 13723 return true 13724 } 13725 // match: (AddPtr x y) 13726 // cond: config.PtrSize == 4 13727 // result: (ADDL x y) 13728 for { 13729 x := v.Args[0] 13730 y := v.Args[1] 13731 if !(config.PtrSize == 4) { 13732 break 13733 } 13734 v.reset(OpAMD64ADDL) 13735 v.AddArg(x) 13736 v.AddArg(y) 13737 return true 13738 } 13739 return false 13740 } 13741 func rewriteValueAMD64_OpAddr(v *Value, config *Config) bool { 13742 b := v.Block 13743 _ = b 13744 // match: (Addr {sym} base) 13745 // cond: config.PtrSize == 8 13746 // result: (LEAQ {sym} base) 13747 for { 13748 sym := v.Aux 13749 base := v.Args[0] 13750 if !(config.PtrSize == 8) { 13751 break 13752 } 13753 v.reset(OpAMD64LEAQ) 13754 v.Aux = sym 13755 v.AddArg(base) 13756 return true 13757 } 13758 // match: (Addr {sym} base) 13759 // cond: config.PtrSize == 4 13760 // result: (LEAL {sym} base) 13761 for { 13762 sym := v.Aux 13763 base := v.Args[0] 13764 if !(config.PtrSize == 4) { 13765 break 13766 } 13767 v.reset(OpAMD64LEAL) 13768 v.Aux = sym 13769 v.AddArg(base) 13770 return true 13771 } 13772 return false 13773 } 13774 func rewriteValueAMD64_OpAnd16(v *Value, config *Config) bool { 13775 b := v.Block 13776 _ = b 13777 // match: (And16 x y) 13778 // cond: 13779 // result: (ANDL x y) 13780 for { 13781 x := v.Args[0] 13782 y := v.Args[1] 13783 v.reset(OpAMD64ANDL) 13784 v.AddArg(x) 13785 v.AddArg(y) 13786 return true 13787 } 13788 } 13789 func rewriteValueAMD64_OpAnd32(v *Value, config *Config) bool { 13790 b := v.Block 13791 _ = b 13792 // match: (And32 x y) 13793 // cond: 13794 // result: (ANDL x y) 13795 for { 13796 x := v.Args[0] 13797 y := v.Args[1] 13798 v.reset(OpAMD64ANDL) 13799 v.AddArg(x) 13800 v.AddArg(y) 13801 return true 13802 } 13803 } 13804 func rewriteValueAMD64_OpAnd64(v *Value, config *Config) bool { 13805 b := v.Block 13806 _ = b 13807 // match: (And64 x y) 13808 // cond: 13809 // result: (ANDQ x y) 13810 for { 13811 x := v.Args[0] 13812 y := v.Args[1] 13813 v.reset(OpAMD64ANDQ) 13814 v.AddArg(x) 13815 v.AddArg(y) 13816 return true 13817 } 13818 } 13819 func rewriteValueAMD64_OpAnd8(v *Value, config *Config) bool { 13820 b := v.Block 13821 _ = b 13822 // match: (And8 x y) 13823 // cond: 13824 // result: (ANDL x y) 13825 for { 13826 x := v.Args[0] 13827 y := v.Args[1] 13828 v.reset(OpAMD64ANDL) 13829 v.AddArg(x) 13830 v.AddArg(y) 13831 return true 13832 } 13833 } 13834 func rewriteValueAMD64_OpAndB(v *Value, config *Config) bool { 13835 b := v.Block 13836 _ = b 13837 // match: (AndB x y) 13838 // cond: 13839 // result: (ANDL x y) 13840 for { 13841 x := v.Args[0] 13842 y := v.Args[1] 13843 v.reset(OpAMD64ANDL) 13844 v.AddArg(x) 13845 v.AddArg(y) 13846 return true 13847 } 13848 } 13849 func rewriteValueAMD64_OpAtomicAdd32(v *Value, config *Config) bool { 13850 b := v.Block 13851 _ = b 13852 // match: (AtomicAdd32 ptr val mem) 13853 // cond: 13854 // result: (AddTupleFirst32 (XADDLlock val ptr mem) val) 13855 for { 13856 ptr := v.Args[0] 13857 val := v.Args[1] 13858 mem := v.Args[2] 13859 v.reset(OpAMD64AddTupleFirst32) 13860 v0 := b.NewValue0(v.Line, OpAMD64XADDLlock, MakeTuple(config.fe.TypeUInt32(), TypeMem)) 13861 v0.AddArg(val) 13862 v0.AddArg(ptr) 13863 v0.AddArg(mem) 13864 v.AddArg(v0) 13865 v.AddArg(val) 13866 return true 13867 } 13868 } 13869 func rewriteValueAMD64_OpAtomicAdd64(v *Value, config *Config) bool { 13870 b := v.Block 13871 _ = b 13872 // match: (AtomicAdd64 ptr val mem) 13873 // cond: 13874 // result: (AddTupleFirst64 (XADDQlock val ptr mem) val) 13875 for { 13876 ptr := v.Args[0] 13877 val := v.Args[1] 13878 mem := v.Args[2] 13879 v.reset(OpAMD64AddTupleFirst64) 13880 v0 := b.NewValue0(v.Line, OpAMD64XADDQlock, MakeTuple(config.fe.TypeUInt64(), TypeMem)) 13881 v0.AddArg(val) 13882 v0.AddArg(ptr) 13883 v0.AddArg(mem) 13884 v.AddArg(v0) 13885 v.AddArg(val) 13886 return true 13887 } 13888 } 13889 func rewriteValueAMD64_OpAtomicAnd8(v *Value, config *Config) bool { 13890 b := v.Block 13891 _ = b 13892 // match: (AtomicAnd8 ptr val mem) 13893 // cond: 13894 // result: (ANDBlock ptr val mem) 13895 for { 13896 ptr := v.Args[0] 13897 val := v.Args[1] 13898 mem := v.Args[2] 13899 v.reset(OpAMD64ANDBlock) 13900 v.AddArg(ptr) 13901 v.AddArg(val) 13902 v.AddArg(mem) 13903 return true 13904 } 13905 } 13906 func rewriteValueAMD64_OpAtomicCompareAndSwap32(v *Value, config *Config) bool { 13907 b := v.Block 13908 _ = b 13909 // match: (AtomicCompareAndSwap32 ptr old new_ mem) 13910 // cond: 13911 // result: (CMPXCHGLlock ptr old new_ mem) 13912 for { 13913 ptr := v.Args[0] 13914 old := v.Args[1] 13915 new_ := v.Args[2] 13916 mem := v.Args[3] 13917 v.reset(OpAMD64CMPXCHGLlock) 13918 v.AddArg(ptr) 13919 v.AddArg(old) 13920 v.AddArg(new_) 13921 v.AddArg(mem) 13922 return true 13923 } 13924 } 13925 func rewriteValueAMD64_OpAtomicCompareAndSwap64(v *Value, config *Config) bool { 13926 b := v.Block 13927 _ = b 13928 // match: (AtomicCompareAndSwap64 ptr old new_ mem) 13929 // cond: 13930 // result: (CMPXCHGQlock ptr old new_ mem) 13931 for { 13932 ptr := v.Args[0] 13933 old := v.Args[1] 13934 new_ := v.Args[2] 13935 mem := v.Args[3] 13936 v.reset(OpAMD64CMPXCHGQlock) 13937 v.AddArg(ptr) 13938 v.AddArg(old) 13939 v.AddArg(new_) 13940 v.AddArg(mem) 13941 return true 13942 } 13943 } 13944 func rewriteValueAMD64_OpAtomicExchange32(v *Value, config *Config) bool { 13945 b := v.Block 13946 _ = b 13947 // match: (AtomicExchange32 ptr val mem) 13948 // cond: 13949 // result: (XCHGL val ptr mem) 13950 for { 13951 ptr := v.Args[0] 13952 val := v.Args[1] 13953 mem := v.Args[2] 13954 v.reset(OpAMD64XCHGL) 13955 v.AddArg(val) 13956 v.AddArg(ptr) 13957 v.AddArg(mem) 13958 return true 13959 } 13960 } 13961 func rewriteValueAMD64_OpAtomicExchange64(v *Value, config *Config) bool { 13962 b := v.Block 13963 _ = b 13964 // match: (AtomicExchange64 ptr val mem) 13965 // cond: 13966 // result: (XCHGQ val ptr mem) 13967 for { 13968 ptr := v.Args[0] 13969 val := v.Args[1] 13970 mem := v.Args[2] 13971 v.reset(OpAMD64XCHGQ) 13972 v.AddArg(val) 13973 v.AddArg(ptr) 13974 v.AddArg(mem) 13975 return true 13976 } 13977 } 13978 func rewriteValueAMD64_OpAtomicLoad32(v *Value, config *Config) bool { 13979 b := v.Block 13980 _ = b 13981 // match: (AtomicLoad32 ptr mem) 13982 // cond: 13983 // result: (MOVLatomicload ptr mem) 13984 for { 13985 ptr := v.Args[0] 13986 mem := v.Args[1] 13987 v.reset(OpAMD64MOVLatomicload) 13988 v.AddArg(ptr) 13989 v.AddArg(mem) 13990 return true 13991 } 13992 } 13993 func rewriteValueAMD64_OpAtomicLoad64(v *Value, config *Config) bool { 13994 b := v.Block 13995 _ = b 13996 // match: (AtomicLoad64 ptr mem) 13997 // cond: 13998 // result: (MOVQatomicload ptr mem) 13999 for { 14000 ptr := v.Args[0] 14001 mem := v.Args[1] 14002 v.reset(OpAMD64MOVQatomicload) 14003 v.AddArg(ptr) 14004 v.AddArg(mem) 14005 return true 14006 } 14007 } 14008 func rewriteValueAMD64_OpAtomicLoadPtr(v *Value, config *Config) bool { 14009 b := v.Block 14010 _ = b 14011 // match: (AtomicLoadPtr ptr mem) 14012 // cond: config.PtrSize == 8 14013 // result: (MOVQatomicload ptr mem) 14014 for { 14015 ptr := v.Args[0] 14016 mem := v.Args[1] 14017 if !(config.PtrSize == 8) { 14018 break 14019 } 14020 v.reset(OpAMD64MOVQatomicload) 14021 v.AddArg(ptr) 14022 v.AddArg(mem) 14023 return true 14024 } 14025 // match: (AtomicLoadPtr ptr mem) 14026 // cond: config.PtrSize == 4 14027 // result: (MOVLatomicload ptr mem) 14028 for { 14029 ptr := v.Args[0] 14030 mem := v.Args[1] 14031 if !(config.PtrSize == 4) { 14032 break 14033 } 14034 v.reset(OpAMD64MOVLatomicload) 14035 v.AddArg(ptr) 14036 v.AddArg(mem) 14037 return true 14038 } 14039 return false 14040 } 14041 func rewriteValueAMD64_OpAtomicOr8(v *Value, config *Config) bool { 14042 b := v.Block 14043 _ = b 14044 // match: (AtomicOr8 ptr val mem) 14045 // cond: 14046 // result: (ORBlock ptr val mem) 14047 for { 14048 ptr := v.Args[0] 14049 val := v.Args[1] 14050 mem := v.Args[2] 14051 v.reset(OpAMD64ORBlock) 14052 v.AddArg(ptr) 14053 v.AddArg(val) 14054 v.AddArg(mem) 14055 return true 14056 } 14057 } 14058 func rewriteValueAMD64_OpAtomicStore32(v *Value, config *Config) bool { 14059 b := v.Block 14060 _ = b 14061 // match: (AtomicStore32 ptr val mem) 14062 // cond: 14063 // result: (Select1 (XCHGL <MakeTuple(config.Frontend().TypeUInt32(),TypeMem)> val ptr mem)) 14064 for { 14065 ptr := v.Args[0] 14066 val := v.Args[1] 14067 mem := v.Args[2] 14068 v.reset(OpSelect1) 14069 v0 := b.NewValue0(v.Line, OpAMD64XCHGL, MakeTuple(config.Frontend().TypeUInt32(), TypeMem)) 14070 v0.AddArg(val) 14071 v0.AddArg(ptr) 14072 v0.AddArg(mem) 14073 v.AddArg(v0) 14074 return true 14075 } 14076 } 14077 func rewriteValueAMD64_OpAtomicStore64(v *Value, config *Config) bool { 14078 b := v.Block 14079 _ = b 14080 // match: (AtomicStore64 ptr val mem) 14081 // cond: 14082 // result: (Select1 (XCHGQ <MakeTuple(config.Frontend().TypeUInt64(),TypeMem)> val ptr mem)) 14083 for { 14084 ptr := v.Args[0] 14085 val := v.Args[1] 14086 mem := v.Args[2] 14087 v.reset(OpSelect1) 14088 v0 := b.NewValue0(v.Line, OpAMD64XCHGQ, MakeTuple(config.Frontend().TypeUInt64(), TypeMem)) 14089 v0.AddArg(val) 14090 v0.AddArg(ptr) 14091 v0.AddArg(mem) 14092 v.AddArg(v0) 14093 return true 14094 } 14095 } 14096 func rewriteValueAMD64_OpAtomicStorePtrNoWB(v *Value, config *Config) bool { 14097 b := v.Block 14098 _ = b 14099 // match: (AtomicStorePtrNoWB ptr val mem) 14100 // cond: config.PtrSize == 8 14101 // result: (Select1 (XCHGQ <MakeTuple(config.Frontend().TypeBytePtr(),TypeMem)> val ptr mem)) 14102 for { 14103 ptr := v.Args[0] 14104 val := v.Args[1] 14105 mem := v.Args[2] 14106 if !(config.PtrSize == 8) { 14107 break 14108 } 14109 v.reset(OpSelect1) 14110 v0 := b.NewValue0(v.Line, OpAMD64XCHGQ, MakeTuple(config.Frontend().TypeBytePtr(), TypeMem)) 14111 v0.AddArg(val) 14112 v0.AddArg(ptr) 14113 v0.AddArg(mem) 14114 v.AddArg(v0) 14115 return true 14116 } 14117 // match: (AtomicStorePtrNoWB ptr val mem) 14118 // cond: config.PtrSize == 4 14119 // result: (Select1 (XCHGL <MakeTuple(config.Frontend().TypeBytePtr(),TypeMem)> val ptr mem)) 14120 for { 14121 ptr := v.Args[0] 14122 val := v.Args[1] 14123 mem := v.Args[2] 14124 if !(config.PtrSize == 4) { 14125 break 14126 } 14127 v.reset(OpSelect1) 14128 v0 := b.NewValue0(v.Line, OpAMD64XCHGL, MakeTuple(config.Frontend().TypeBytePtr(), TypeMem)) 14129 v0.AddArg(val) 14130 v0.AddArg(ptr) 14131 v0.AddArg(mem) 14132 v.AddArg(v0) 14133 return true 14134 } 14135 return false 14136 } 14137 func rewriteValueAMD64_OpAvg64u(v *Value, config *Config) bool { 14138 b := v.Block 14139 _ = b 14140 // match: (Avg64u x y) 14141 // cond: 14142 // result: (AVGQU x y) 14143 for { 14144 x := v.Args[0] 14145 y := v.Args[1] 14146 v.reset(OpAMD64AVGQU) 14147 v.AddArg(x) 14148 v.AddArg(y) 14149 return true 14150 } 14151 } 14152 func rewriteValueAMD64_OpBswap32(v *Value, config *Config) bool { 14153 b := v.Block 14154 _ = b 14155 // match: (Bswap32 x) 14156 // cond: 14157 // result: (BSWAPL x) 14158 for { 14159 x := v.Args[0] 14160 v.reset(OpAMD64BSWAPL) 14161 v.AddArg(x) 14162 return true 14163 } 14164 } 14165 func rewriteValueAMD64_OpBswap64(v *Value, config *Config) bool { 14166 b := v.Block 14167 _ = b 14168 // match: (Bswap64 x) 14169 // cond: 14170 // result: (BSWAPQ x) 14171 for { 14172 x := v.Args[0] 14173 v.reset(OpAMD64BSWAPQ) 14174 v.AddArg(x) 14175 return true 14176 } 14177 } 14178 func rewriteValueAMD64_OpClosureCall(v *Value, config *Config) bool { 14179 b := v.Block 14180 _ = b 14181 // match: (ClosureCall [argwid] entry closure mem) 14182 // cond: 14183 // result: (CALLclosure [argwid] entry closure mem) 14184 for { 14185 argwid := v.AuxInt 14186 entry := v.Args[0] 14187 closure := v.Args[1] 14188 mem := v.Args[2] 14189 v.reset(OpAMD64CALLclosure) 14190 v.AuxInt = argwid 14191 v.AddArg(entry) 14192 v.AddArg(closure) 14193 v.AddArg(mem) 14194 return true 14195 } 14196 } 14197 func rewriteValueAMD64_OpCom16(v *Value, config *Config) bool { 14198 b := v.Block 14199 _ = b 14200 // match: (Com16 x) 14201 // cond: 14202 // result: (NOTL x) 14203 for { 14204 x := v.Args[0] 14205 v.reset(OpAMD64NOTL) 14206 v.AddArg(x) 14207 return true 14208 } 14209 } 14210 func rewriteValueAMD64_OpCom32(v *Value, config *Config) bool { 14211 b := v.Block 14212 _ = b 14213 // match: (Com32 x) 14214 // cond: 14215 // result: (NOTL x) 14216 for { 14217 x := v.Args[0] 14218 v.reset(OpAMD64NOTL) 14219 v.AddArg(x) 14220 return true 14221 } 14222 } 14223 func rewriteValueAMD64_OpCom64(v *Value, config *Config) bool { 14224 b := v.Block 14225 _ = b 14226 // match: (Com64 x) 14227 // cond: 14228 // result: (NOTQ x) 14229 for { 14230 x := v.Args[0] 14231 v.reset(OpAMD64NOTQ) 14232 v.AddArg(x) 14233 return true 14234 } 14235 } 14236 func rewriteValueAMD64_OpCom8(v *Value, config *Config) bool { 14237 b := v.Block 14238 _ = b 14239 // match: (Com8 x) 14240 // cond: 14241 // result: (NOTL x) 14242 for { 14243 x := v.Args[0] 14244 v.reset(OpAMD64NOTL) 14245 v.AddArg(x) 14246 return true 14247 } 14248 } 14249 func rewriteValueAMD64_OpConst16(v *Value, config *Config) bool { 14250 b := v.Block 14251 _ = b 14252 // match: (Const16 [val]) 14253 // cond: 14254 // result: (MOVLconst [val]) 14255 for { 14256 val := v.AuxInt 14257 v.reset(OpAMD64MOVLconst) 14258 v.AuxInt = val 14259 return true 14260 } 14261 } 14262 func rewriteValueAMD64_OpConst32(v *Value, config *Config) bool { 14263 b := v.Block 14264 _ = b 14265 // match: (Const32 [val]) 14266 // cond: 14267 // result: (MOVLconst [val]) 14268 for { 14269 val := v.AuxInt 14270 v.reset(OpAMD64MOVLconst) 14271 v.AuxInt = val 14272 return true 14273 } 14274 } 14275 func rewriteValueAMD64_OpConst32F(v *Value, config *Config) bool { 14276 b := v.Block 14277 _ = b 14278 // match: (Const32F [val]) 14279 // cond: 14280 // result: (MOVSSconst [val]) 14281 for { 14282 val := v.AuxInt 14283 v.reset(OpAMD64MOVSSconst) 14284 v.AuxInt = val 14285 return true 14286 } 14287 } 14288 func rewriteValueAMD64_OpConst64(v *Value, config *Config) bool { 14289 b := v.Block 14290 _ = b 14291 // match: (Const64 [val]) 14292 // cond: 14293 // result: (MOVQconst [val]) 14294 for { 14295 val := v.AuxInt 14296 v.reset(OpAMD64MOVQconst) 14297 v.AuxInt = val 14298 return true 14299 } 14300 } 14301 func rewriteValueAMD64_OpConst64F(v *Value, config *Config) bool { 14302 b := v.Block 14303 _ = b 14304 // match: (Const64F [val]) 14305 // cond: 14306 // result: (MOVSDconst [val]) 14307 for { 14308 val := v.AuxInt 14309 v.reset(OpAMD64MOVSDconst) 14310 v.AuxInt = val 14311 return true 14312 } 14313 } 14314 func rewriteValueAMD64_OpConst8(v *Value, config *Config) bool { 14315 b := v.Block 14316 _ = b 14317 // match: (Const8 [val]) 14318 // cond: 14319 // result: (MOVLconst [val]) 14320 for { 14321 val := v.AuxInt 14322 v.reset(OpAMD64MOVLconst) 14323 v.AuxInt = val 14324 return true 14325 } 14326 } 14327 func rewriteValueAMD64_OpConstBool(v *Value, config *Config) bool { 14328 b := v.Block 14329 _ = b 14330 // match: (ConstBool [b]) 14331 // cond: 14332 // result: (MOVLconst [b]) 14333 for { 14334 b := v.AuxInt 14335 v.reset(OpAMD64MOVLconst) 14336 v.AuxInt = b 14337 return true 14338 } 14339 } 14340 func rewriteValueAMD64_OpConstNil(v *Value, config *Config) bool { 14341 b := v.Block 14342 _ = b 14343 // match: (ConstNil) 14344 // cond: config.PtrSize == 8 14345 // result: (MOVQconst [0]) 14346 for { 14347 if !(config.PtrSize == 8) { 14348 break 14349 } 14350 v.reset(OpAMD64MOVQconst) 14351 v.AuxInt = 0 14352 return true 14353 } 14354 // match: (ConstNil) 14355 // cond: config.PtrSize == 4 14356 // result: (MOVLconst [0]) 14357 for { 14358 if !(config.PtrSize == 4) { 14359 break 14360 } 14361 v.reset(OpAMD64MOVLconst) 14362 v.AuxInt = 0 14363 return true 14364 } 14365 return false 14366 } 14367 func rewriteValueAMD64_OpConvert(v *Value, config *Config) bool { 14368 b := v.Block 14369 _ = b 14370 // match: (Convert <t> x mem) 14371 // cond: config.PtrSize == 8 14372 // result: (MOVQconvert <t> x mem) 14373 for { 14374 t := v.Type 14375 x := v.Args[0] 14376 mem := v.Args[1] 14377 if !(config.PtrSize == 8) { 14378 break 14379 } 14380 v.reset(OpAMD64MOVQconvert) 14381 v.Type = t 14382 v.AddArg(x) 14383 v.AddArg(mem) 14384 return true 14385 } 14386 // match: (Convert <t> x mem) 14387 // cond: config.PtrSize == 4 14388 // result: (MOVLconvert <t> x mem) 14389 for { 14390 t := v.Type 14391 x := v.Args[0] 14392 mem := v.Args[1] 14393 if !(config.PtrSize == 4) { 14394 break 14395 } 14396 v.reset(OpAMD64MOVLconvert) 14397 v.Type = t 14398 v.AddArg(x) 14399 v.AddArg(mem) 14400 return true 14401 } 14402 return false 14403 } 14404 func rewriteValueAMD64_OpCtz32(v *Value, config *Config) bool { 14405 b := v.Block 14406 _ = b 14407 // match: (Ctz32 <t> x) 14408 // cond: 14409 // result: (CMOVLEQ (Select0 <t> (BSFL x)) (MOVLconst <t> [32]) (Select1 <TypeFlags> (BSFL x))) 14410 for { 14411 t := v.Type 14412 x := v.Args[0] 14413 v.reset(OpAMD64CMOVLEQ) 14414 v0 := b.NewValue0(v.Line, OpSelect0, t) 14415 v1 := b.NewValue0(v.Line, OpAMD64BSFL, MakeTuple(config.fe.TypeUInt32(), TypeFlags)) 14416 v1.AddArg(x) 14417 v0.AddArg(v1) 14418 v.AddArg(v0) 14419 v2 := b.NewValue0(v.Line, OpAMD64MOVLconst, t) 14420 v2.AuxInt = 32 14421 v.AddArg(v2) 14422 v3 := b.NewValue0(v.Line, OpSelect1, TypeFlags) 14423 v4 := b.NewValue0(v.Line, OpAMD64BSFL, MakeTuple(config.fe.TypeUInt32(), TypeFlags)) 14424 v4.AddArg(x) 14425 v3.AddArg(v4) 14426 v.AddArg(v3) 14427 return true 14428 } 14429 } 14430 func rewriteValueAMD64_OpCtz64(v *Value, config *Config) bool { 14431 b := v.Block 14432 _ = b 14433 // match: (Ctz64 <t> x) 14434 // cond: 14435 // result: (CMOVQEQ (Select0 <t> (BSFQ x)) (MOVQconst <t> [64]) (Select1 <TypeFlags> (BSFQ x))) 14436 for { 14437 t := v.Type 14438 x := v.Args[0] 14439 v.reset(OpAMD64CMOVQEQ) 14440 v0 := b.NewValue0(v.Line, OpSelect0, t) 14441 v1 := b.NewValue0(v.Line, OpAMD64BSFQ, MakeTuple(config.fe.TypeUInt64(), TypeFlags)) 14442 v1.AddArg(x) 14443 v0.AddArg(v1) 14444 v.AddArg(v0) 14445 v2 := b.NewValue0(v.Line, OpAMD64MOVQconst, t) 14446 v2.AuxInt = 64 14447 v.AddArg(v2) 14448 v3 := b.NewValue0(v.Line, OpSelect1, TypeFlags) 14449 v4 := b.NewValue0(v.Line, OpAMD64BSFQ, MakeTuple(config.fe.TypeUInt64(), TypeFlags)) 14450 v4.AddArg(x) 14451 v3.AddArg(v4) 14452 v.AddArg(v3) 14453 return true 14454 } 14455 } 14456 func rewriteValueAMD64_OpCvt32Fto32(v *Value, config *Config) bool { 14457 b := v.Block 14458 _ = b 14459 // match: (Cvt32Fto32 x) 14460 // cond: 14461 // result: (CVTTSS2SL x) 14462 for { 14463 x := v.Args[0] 14464 v.reset(OpAMD64CVTTSS2SL) 14465 v.AddArg(x) 14466 return true 14467 } 14468 } 14469 func rewriteValueAMD64_OpCvt32Fto64(v *Value, config *Config) bool { 14470 b := v.Block 14471 _ = b 14472 // match: (Cvt32Fto64 x) 14473 // cond: 14474 // result: (CVTTSS2SQ x) 14475 for { 14476 x := v.Args[0] 14477 v.reset(OpAMD64CVTTSS2SQ) 14478 v.AddArg(x) 14479 return true 14480 } 14481 } 14482 func rewriteValueAMD64_OpCvt32Fto64F(v *Value, config *Config) bool { 14483 b := v.Block 14484 _ = b 14485 // match: (Cvt32Fto64F x) 14486 // cond: 14487 // result: (CVTSS2SD x) 14488 for { 14489 x := v.Args[0] 14490 v.reset(OpAMD64CVTSS2SD) 14491 v.AddArg(x) 14492 return true 14493 } 14494 } 14495 func rewriteValueAMD64_OpCvt32to32F(v *Value, config *Config) bool { 14496 b := v.Block 14497 _ = b 14498 // match: (Cvt32to32F x) 14499 // cond: 14500 // result: (CVTSL2SS x) 14501 for { 14502 x := v.Args[0] 14503 v.reset(OpAMD64CVTSL2SS) 14504 v.AddArg(x) 14505 return true 14506 } 14507 } 14508 func rewriteValueAMD64_OpCvt32to64F(v *Value, config *Config) bool { 14509 b := v.Block 14510 _ = b 14511 // match: (Cvt32to64F x) 14512 // cond: 14513 // result: (CVTSL2SD x) 14514 for { 14515 x := v.Args[0] 14516 v.reset(OpAMD64CVTSL2SD) 14517 v.AddArg(x) 14518 return true 14519 } 14520 } 14521 func rewriteValueAMD64_OpCvt64Fto32(v *Value, config *Config) bool { 14522 b := v.Block 14523 _ = b 14524 // match: (Cvt64Fto32 x) 14525 // cond: 14526 // result: (CVTTSD2SL x) 14527 for { 14528 x := v.Args[0] 14529 v.reset(OpAMD64CVTTSD2SL) 14530 v.AddArg(x) 14531 return true 14532 } 14533 } 14534 func rewriteValueAMD64_OpCvt64Fto32F(v *Value, config *Config) bool { 14535 b := v.Block 14536 _ = b 14537 // match: (Cvt64Fto32F x) 14538 // cond: 14539 // result: (CVTSD2SS x) 14540 for { 14541 x := v.Args[0] 14542 v.reset(OpAMD64CVTSD2SS) 14543 v.AddArg(x) 14544 return true 14545 } 14546 } 14547 func rewriteValueAMD64_OpCvt64Fto64(v *Value, config *Config) bool { 14548 b := v.Block 14549 _ = b 14550 // match: (Cvt64Fto64 x) 14551 // cond: 14552 // result: (CVTTSD2SQ x) 14553 for { 14554 x := v.Args[0] 14555 v.reset(OpAMD64CVTTSD2SQ) 14556 v.AddArg(x) 14557 return true 14558 } 14559 } 14560 func rewriteValueAMD64_OpCvt64to32F(v *Value, config *Config) bool { 14561 b := v.Block 14562 _ = b 14563 // match: (Cvt64to32F x) 14564 // cond: 14565 // result: (CVTSQ2SS x) 14566 for { 14567 x := v.Args[0] 14568 v.reset(OpAMD64CVTSQ2SS) 14569 v.AddArg(x) 14570 return true 14571 } 14572 } 14573 func rewriteValueAMD64_OpCvt64to64F(v *Value, config *Config) bool { 14574 b := v.Block 14575 _ = b 14576 // match: (Cvt64to64F x) 14577 // cond: 14578 // result: (CVTSQ2SD x) 14579 for { 14580 x := v.Args[0] 14581 v.reset(OpAMD64CVTSQ2SD) 14582 v.AddArg(x) 14583 return true 14584 } 14585 } 14586 func rewriteValueAMD64_OpDeferCall(v *Value, config *Config) bool { 14587 b := v.Block 14588 _ = b 14589 // match: (DeferCall [argwid] mem) 14590 // cond: 14591 // result: (CALLdefer [argwid] mem) 14592 for { 14593 argwid := v.AuxInt 14594 mem := v.Args[0] 14595 v.reset(OpAMD64CALLdefer) 14596 v.AuxInt = argwid 14597 v.AddArg(mem) 14598 return true 14599 } 14600 } 14601 func rewriteValueAMD64_OpDiv128u(v *Value, config *Config) bool { 14602 b := v.Block 14603 _ = b 14604 // match: (Div128u xhi xlo y) 14605 // cond: 14606 // result: (DIVQU2 xhi xlo y) 14607 for { 14608 xhi := v.Args[0] 14609 xlo := v.Args[1] 14610 y := v.Args[2] 14611 v.reset(OpAMD64DIVQU2) 14612 v.AddArg(xhi) 14613 v.AddArg(xlo) 14614 v.AddArg(y) 14615 return true 14616 } 14617 } 14618 func rewriteValueAMD64_OpDiv16(v *Value, config *Config) bool { 14619 b := v.Block 14620 _ = b 14621 // match: (Div16 x y) 14622 // cond: 14623 // result: (Select0 (DIVW x y)) 14624 for { 14625 x := v.Args[0] 14626 y := v.Args[1] 14627 v.reset(OpSelect0) 14628 v0 := b.NewValue0(v.Line, OpAMD64DIVW, MakeTuple(config.fe.TypeInt16(), config.fe.TypeInt16())) 14629 v0.AddArg(x) 14630 v0.AddArg(y) 14631 v.AddArg(v0) 14632 return true 14633 } 14634 } 14635 func rewriteValueAMD64_OpDiv16u(v *Value, config *Config) bool { 14636 b := v.Block 14637 _ = b 14638 // match: (Div16u x y) 14639 // cond: 14640 // result: (Select0 (DIVWU x y)) 14641 for { 14642 x := v.Args[0] 14643 y := v.Args[1] 14644 v.reset(OpSelect0) 14645 v0 := b.NewValue0(v.Line, OpAMD64DIVWU, MakeTuple(config.fe.TypeUInt16(), config.fe.TypeUInt16())) 14646 v0.AddArg(x) 14647 v0.AddArg(y) 14648 v.AddArg(v0) 14649 return true 14650 } 14651 } 14652 func rewriteValueAMD64_OpDiv32(v *Value, config *Config) bool { 14653 b := v.Block 14654 _ = b 14655 // match: (Div32 x y) 14656 // cond: 14657 // result: (Select0 (DIVL x y)) 14658 for { 14659 x := v.Args[0] 14660 y := v.Args[1] 14661 v.reset(OpSelect0) 14662 v0 := b.NewValue0(v.Line, OpAMD64DIVL, MakeTuple(config.fe.TypeInt32(), config.fe.TypeInt32())) 14663 v0.AddArg(x) 14664 v0.AddArg(y) 14665 v.AddArg(v0) 14666 return true 14667 } 14668 } 14669 func rewriteValueAMD64_OpDiv32F(v *Value, config *Config) bool { 14670 b := v.Block 14671 _ = b 14672 // match: (Div32F x y) 14673 // cond: 14674 // result: (DIVSS x y) 14675 for { 14676 x := v.Args[0] 14677 y := v.Args[1] 14678 v.reset(OpAMD64DIVSS) 14679 v.AddArg(x) 14680 v.AddArg(y) 14681 return true 14682 } 14683 } 14684 func rewriteValueAMD64_OpDiv32u(v *Value, config *Config) bool { 14685 b := v.Block 14686 _ = b 14687 // match: (Div32u x y) 14688 // cond: 14689 // result: (Select0 (DIVLU x y)) 14690 for { 14691 x := v.Args[0] 14692 y := v.Args[1] 14693 v.reset(OpSelect0) 14694 v0 := b.NewValue0(v.Line, OpAMD64DIVLU, MakeTuple(config.fe.TypeUInt32(), config.fe.TypeUInt32())) 14695 v0.AddArg(x) 14696 v0.AddArg(y) 14697 v.AddArg(v0) 14698 return true 14699 } 14700 } 14701 func rewriteValueAMD64_OpDiv64(v *Value, config *Config) bool { 14702 b := v.Block 14703 _ = b 14704 // match: (Div64 x y) 14705 // cond: 14706 // result: (Select0 (DIVQ x y)) 14707 for { 14708 x := v.Args[0] 14709 y := v.Args[1] 14710 v.reset(OpSelect0) 14711 v0 := b.NewValue0(v.Line, OpAMD64DIVQ, MakeTuple(config.fe.TypeInt64(), config.fe.TypeInt64())) 14712 v0.AddArg(x) 14713 v0.AddArg(y) 14714 v.AddArg(v0) 14715 return true 14716 } 14717 } 14718 func rewriteValueAMD64_OpDiv64F(v *Value, config *Config) bool { 14719 b := v.Block 14720 _ = b 14721 // match: (Div64F x y) 14722 // cond: 14723 // result: (DIVSD x y) 14724 for { 14725 x := v.Args[0] 14726 y := v.Args[1] 14727 v.reset(OpAMD64DIVSD) 14728 v.AddArg(x) 14729 v.AddArg(y) 14730 return true 14731 } 14732 } 14733 func rewriteValueAMD64_OpDiv64u(v *Value, config *Config) bool { 14734 b := v.Block 14735 _ = b 14736 // match: (Div64u x y) 14737 // cond: 14738 // result: (Select0 (DIVQU x y)) 14739 for { 14740 x := v.Args[0] 14741 y := v.Args[1] 14742 v.reset(OpSelect0) 14743 v0 := b.NewValue0(v.Line, OpAMD64DIVQU, MakeTuple(config.fe.TypeUInt64(), config.fe.TypeUInt64())) 14744 v0.AddArg(x) 14745 v0.AddArg(y) 14746 v.AddArg(v0) 14747 return true 14748 } 14749 } 14750 func rewriteValueAMD64_OpDiv8(v *Value, config *Config) bool { 14751 b := v.Block 14752 _ = b 14753 // match: (Div8 x y) 14754 // cond: 14755 // result: (Select0 (DIVW (SignExt8to16 x) (SignExt8to16 y))) 14756 for { 14757 x := v.Args[0] 14758 y := v.Args[1] 14759 v.reset(OpSelect0) 14760 v0 := b.NewValue0(v.Line, OpAMD64DIVW, MakeTuple(config.fe.TypeInt16(), config.fe.TypeInt16())) 14761 v1 := b.NewValue0(v.Line, OpSignExt8to16, config.fe.TypeInt16()) 14762 v1.AddArg(x) 14763 v0.AddArg(v1) 14764 v2 := b.NewValue0(v.Line, OpSignExt8to16, config.fe.TypeInt16()) 14765 v2.AddArg(y) 14766 v0.AddArg(v2) 14767 v.AddArg(v0) 14768 return true 14769 } 14770 } 14771 func rewriteValueAMD64_OpDiv8u(v *Value, config *Config) bool { 14772 b := v.Block 14773 _ = b 14774 // match: (Div8u x y) 14775 // cond: 14776 // result: (Select0 (DIVWU (ZeroExt8to16 x) (ZeroExt8to16 y))) 14777 for { 14778 x := v.Args[0] 14779 y := v.Args[1] 14780 v.reset(OpSelect0) 14781 v0 := b.NewValue0(v.Line, OpAMD64DIVWU, MakeTuple(config.fe.TypeUInt16(), config.fe.TypeUInt16())) 14782 v1 := b.NewValue0(v.Line, OpZeroExt8to16, config.fe.TypeUInt16()) 14783 v1.AddArg(x) 14784 v0.AddArg(v1) 14785 v2 := b.NewValue0(v.Line, OpZeroExt8to16, config.fe.TypeUInt16()) 14786 v2.AddArg(y) 14787 v0.AddArg(v2) 14788 v.AddArg(v0) 14789 return true 14790 } 14791 } 14792 func rewriteValueAMD64_OpEq16(v *Value, config *Config) bool { 14793 b := v.Block 14794 _ = b 14795 // match: (Eq16 x y) 14796 // cond: 14797 // result: (SETEQ (CMPW x y)) 14798 for { 14799 x := v.Args[0] 14800 y := v.Args[1] 14801 v.reset(OpAMD64SETEQ) 14802 v0 := b.NewValue0(v.Line, OpAMD64CMPW, TypeFlags) 14803 v0.AddArg(x) 14804 v0.AddArg(y) 14805 v.AddArg(v0) 14806 return true 14807 } 14808 } 14809 func rewriteValueAMD64_OpEq32(v *Value, config *Config) bool { 14810 b := v.Block 14811 _ = b 14812 // match: (Eq32 x y) 14813 // cond: 14814 // result: (SETEQ (CMPL x y)) 14815 for { 14816 x := v.Args[0] 14817 y := v.Args[1] 14818 v.reset(OpAMD64SETEQ) 14819 v0 := b.NewValue0(v.Line, OpAMD64CMPL, TypeFlags) 14820 v0.AddArg(x) 14821 v0.AddArg(y) 14822 v.AddArg(v0) 14823 return true 14824 } 14825 } 14826 func rewriteValueAMD64_OpEq32F(v *Value, config *Config) bool { 14827 b := v.Block 14828 _ = b 14829 // match: (Eq32F x y) 14830 // cond: 14831 // result: (SETEQF (UCOMISS x y)) 14832 for { 14833 x := v.Args[0] 14834 y := v.Args[1] 14835 v.reset(OpAMD64SETEQF) 14836 v0 := b.NewValue0(v.Line, OpAMD64UCOMISS, TypeFlags) 14837 v0.AddArg(x) 14838 v0.AddArg(y) 14839 v.AddArg(v0) 14840 return true 14841 } 14842 } 14843 func rewriteValueAMD64_OpEq64(v *Value, config *Config) bool { 14844 b := v.Block 14845 _ = b 14846 // match: (Eq64 x y) 14847 // cond: 14848 // result: (SETEQ (CMPQ x y)) 14849 for { 14850 x := v.Args[0] 14851 y := v.Args[1] 14852 v.reset(OpAMD64SETEQ) 14853 v0 := b.NewValue0(v.Line, OpAMD64CMPQ, TypeFlags) 14854 v0.AddArg(x) 14855 v0.AddArg(y) 14856 v.AddArg(v0) 14857 return true 14858 } 14859 } 14860 func rewriteValueAMD64_OpEq64F(v *Value, config *Config) bool { 14861 b := v.Block 14862 _ = b 14863 // match: (Eq64F x y) 14864 // cond: 14865 // result: (SETEQF (UCOMISD x y)) 14866 for { 14867 x := v.Args[0] 14868 y := v.Args[1] 14869 v.reset(OpAMD64SETEQF) 14870 v0 := b.NewValue0(v.Line, OpAMD64UCOMISD, TypeFlags) 14871 v0.AddArg(x) 14872 v0.AddArg(y) 14873 v.AddArg(v0) 14874 return true 14875 } 14876 } 14877 func rewriteValueAMD64_OpEq8(v *Value, config *Config) bool { 14878 b := v.Block 14879 _ = b 14880 // match: (Eq8 x y) 14881 // cond: 14882 // result: (SETEQ (CMPB x y)) 14883 for { 14884 x := v.Args[0] 14885 y := v.Args[1] 14886 v.reset(OpAMD64SETEQ) 14887 v0 := b.NewValue0(v.Line, OpAMD64CMPB, TypeFlags) 14888 v0.AddArg(x) 14889 v0.AddArg(y) 14890 v.AddArg(v0) 14891 return true 14892 } 14893 } 14894 func rewriteValueAMD64_OpEqB(v *Value, config *Config) bool { 14895 b := v.Block 14896 _ = b 14897 // match: (EqB x y) 14898 // cond: 14899 // result: (SETEQ (CMPB x y)) 14900 for { 14901 x := v.Args[0] 14902 y := v.Args[1] 14903 v.reset(OpAMD64SETEQ) 14904 v0 := b.NewValue0(v.Line, OpAMD64CMPB, TypeFlags) 14905 v0.AddArg(x) 14906 v0.AddArg(y) 14907 v.AddArg(v0) 14908 return true 14909 } 14910 } 14911 func rewriteValueAMD64_OpEqPtr(v *Value, config *Config) bool { 14912 b := v.Block 14913 _ = b 14914 // match: (EqPtr x y) 14915 // cond: config.PtrSize == 8 14916 // result: (SETEQ (CMPQ x y)) 14917 for { 14918 x := v.Args[0] 14919 y := v.Args[1] 14920 if !(config.PtrSize == 8) { 14921 break 14922 } 14923 v.reset(OpAMD64SETEQ) 14924 v0 := b.NewValue0(v.Line, OpAMD64CMPQ, TypeFlags) 14925 v0.AddArg(x) 14926 v0.AddArg(y) 14927 v.AddArg(v0) 14928 return true 14929 } 14930 // match: (EqPtr x y) 14931 // cond: config.PtrSize == 4 14932 // result: (SETEQ (CMPL x y)) 14933 for { 14934 x := v.Args[0] 14935 y := v.Args[1] 14936 if !(config.PtrSize == 4) { 14937 break 14938 } 14939 v.reset(OpAMD64SETEQ) 14940 v0 := b.NewValue0(v.Line, OpAMD64CMPL, TypeFlags) 14941 v0.AddArg(x) 14942 v0.AddArg(y) 14943 v.AddArg(v0) 14944 return true 14945 } 14946 return false 14947 } 14948 func rewriteValueAMD64_OpGeq16(v *Value, config *Config) bool { 14949 b := v.Block 14950 _ = b 14951 // match: (Geq16 x y) 14952 // cond: 14953 // result: (SETGE (CMPW x y)) 14954 for { 14955 x := v.Args[0] 14956 y := v.Args[1] 14957 v.reset(OpAMD64SETGE) 14958 v0 := b.NewValue0(v.Line, OpAMD64CMPW, TypeFlags) 14959 v0.AddArg(x) 14960 v0.AddArg(y) 14961 v.AddArg(v0) 14962 return true 14963 } 14964 } 14965 func rewriteValueAMD64_OpGeq16U(v *Value, config *Config) bool { 14966 b := v.Block 14967 _ = b 14968 // match: (Geq16U x y) 14969 // cond: 14970 // result: (SETAE (CMPW x y)) 14971 for { 14972 x := v.Args[0] 14973 y := v.Args[1] 14974 v.reset(OpAMD64SETAE) 14975 v0 := b.NewValue0(v.Line, OpAMD64CMPW, TypeFlags) 14976 v0.AddArg(x) 14977 v0.AddArg(y) 14978 v.AddArg(v0) 14979 return true 14980 } 14981 } 14982 func rewriteValueAMD64_OpGeq32(v *Value, config *Config) bool { 14983 b := v.Block 14984 _ = b 14985 // match: (Geq32 x y) 14986 // cond: 14987 // result: (SETGE (CMPL x y)) 14988 for { 14989 x := v.Args[0] 14990 y := v.Args[1] 14991 v.reset(OpAMD64SETGE) 14992 v0 := b.NewValue0(v.Line, OpAMD64CMPL, TypeFlags) 14993 v0.AddArg(x) 14994 v0.AddArg(y) 14995 v.AddArg(v0) 14996 return true 14997 } 14998 } 14999 func rewriteValueAMD64_OpGeq32F(v *Value, config *Config) bool { 15000 b := v.Block 15001 _ = b 15002 // match: (Geq32F x y) 15003 // cond: 15004 // result: (SETGEF (UCOMISS x y)) 15005 for { 15006 x := v.Args[0] 15007 y := v.Args[1] 15008 v.reset(OpAMD64SETGEF) 15009 v0 := b.NewValue0(v.Line, OpAMD64UCOMISS, TypeFlags) 15010 v0.AddArg(x) 15011 v0.AddArg(y) 15012 v.AddArg(v0) 15013 return true 15014 } 15015 } 15016 func rewriteValueAMD64_OpGeq32U(v *Value, config *Config) bool { 15017 b := v.Block 15018 _ = b 15019 // match: (Geq32U x y) 15020 // cond: 15021 // result: (SETAE (CMPL x y)) 15022 for { 15023 x := v.Args[0] 15024 y := v.Args[1] 15025 v.reset(OpAMD64SETAE) 15026 v0 := b.NewValue0(v.Line, OpAMD64CMPL, TypeFlags) 15027 v0.AddArg(x) 15028 v0.AddArg(y) 15029 v.AddArg(v0) 15030 return true 15031 } 15032 } 15033 func rewriteValueAMD64_OpGeq64(v *Value, config *Config) bool { 15034 b := v.Block 15035 _ = b 15036 // match: (Geq64 x y) 15037 // cond: 15038 // result: (SETGE (CMPQ x y)) 15039 for { 15040 x := v.Args[0] 15041 y := v.Args[1] 15042 v.reset(OpAMD64SETGE) 15043 v0 := b.NewValue0(v.Line, OpAMD64CMPQ, TypeFlags) 15044 v0.AddArg(x) 15045 v0.AddArg(y) 15046 v.AddArg(v0) 15047 return true 15048 } 15049 } 15050 func rewriteValueAMD64_OpGeq64F(v *Value, config *Config) bool { 15051 b := v.Block 15052 _ = b 15053 // match: (Geq64F x y) 15054 // cond: 15055 // result: (SETGEF (UCOMISD x y)) 15056 for { 15057 x := v.Args[0] 15058 y := v.Args[1] 15059 v.reset(OpAMD64SETGEF) 15060 v0 := b.NewValue0(v.Line, OpAMD64UCOMISD, TypeFlags) 15061 v0.AddArg(x) 15062 v0.AddArg(y) 15063 v.AddArg(v0) 15064 return true 15065 } 15066 } 15067 func rewriteValueAMD64_OpGeq64U(v *Value, config *Config) bool { 15068 b := v.Block 15069 _ = b 15070 // match: (Geq64U x y) 15071 // cond: 15072 // result: (SETAE (CMPQ x y)) 15073 for { 15074 x := v.Args[0] 15075 y := v.Args[1] 15076 v.reset(OpAMD64SETAE) 15077 v0 := b.NewValue0(v.Line, OpAMD64CMPQ, TypeFlags) 15078 v0.AddArg(x) 15079 v0.AddArg(y) 15080 v.AddArg(v0) 15081 return true 15082 } 15083 } 15084 func rewriteValueAMD64_OpGeq8(v *Value, config *Config) bool { 15085 b := v.Block 15086 _ = b 15087 // match: (Geq8 x y) 15088 // cond: 15089 // result: (SETGE (CMPB x y)) 15090 for { 15091 x := v.Args[0] 15092 y := v.Args[1] 15093 v.reset(OpAMD64SETGE) 15094 v0 := b.NewValue0(v.Line, OpAMD64CMPB, TypeFlags) 15095 v0.AddArg(x) 15096 v0.AddArg(y) 15097 v.AddArg(v0) 15098 return true 15099 } 15100 } 15101 func rewriteValueAMD64_OpGeq8U(v *Value, config *Config) bool { 15102 b := v.Block 15103 _ = b 15104 // match: (Geq8U x y) 15105 // cond: 15106 // result: (SETAE (CMPB x y)) 15107 for { 15108 x := v.Args[0] 15109 y := v.Args[1] 15110 v.reset(OpAMD64SETAE) 15111 v0 := b.NewValue0(v.Line, OpAMD64CMPB, TypeFlags) 15112 v0.AddArg(x) 15113 v0.AddArg(y) 15114 v.AddArg(v0) 15115 return true 15116 } 15117 } 15118 func rewriteValueAMD64_OpGetClosurePtr(v *Value, config *Config) bool { 15119 b := v.Block 15120 _ = b 15121 // match: (GetClosurePtr) 15122 // cond: 15123 // result: (LoweredGetClosurePtr) 15124 for { 15125 v.reset(OpAMD64LoweredGetClosurePtr) 15126 return true 15127 } 15128 } 15129 func rewriteValueAMD64_OpGetG(v *Value, config *Config) bool { 15130 b := v.Block 15131 _ = b 15132 // match: (GetG mem) 15133 // cond: 15134 // result: (LoweredGetG mem) 15135 for { 15136 mem := v.Args[0] 15137 v.reset(OpAMD64LoweredGetG) 15138 v.AddArg(mem) 15139 return true 15140 } 15141 } 15142 func rewriteValueAMD64_OpGoCall(v *Value, config *Config) bool { 15143 b := v.Block 15144 _ = b 15145 // match: (GoCall [argwid] mem) 15146 // cond: 15147 // result: (CALLgo [argwid] mem) 15148 for { 15149 argwid := v.AuxInt 15150 mem := v.Args[0] 15151 v.reset(OpAMD64CALLgo) 15152 v.AuxInt = argwid 15153 v.AddArg(mem) 15154 return true 15155 } 15156 } 15157 func rewriteValueAMD64_OpGreater16(v *Value, config *Config) bool { 15158 b := v.Block 15159 _ = b 15160 // match: (Greater16 x y) 15161 // cond: 15162 // result: (SETG (CMPW x y)) 15163 for { 15164 x := v.Args[0] 15165 y := v.Args[1] 15166 v.reset(OpAMD64SETG) 15167 v0 := b.NewValue0(v.Line, OpAMD64CMPW, TypeFlags) 15168 v0.AddArg(x) 15169 v0.AddArg(y) 15170 v.AddArg(v0) 15171 return true 15172 } 15173 } 15174 func rewriteValueAMD64_OpGreater16U(v *Value, config *Config) bool { 15175 b := v.Block 15176 _ = b 15177 // match: (Greater16U x y) 15178 // cond: 15179 // result: (SETA (CMPW x y)) 15180 for { 15181 x := v.Args[0] 15182 y := v.Args[1] 15183 v.reset(OpAMD64SETA) 15184 v0 := b.NewValue0(v.Line, OpAMD64CMPW, TypeFlags) 15185 v0.AddArg(x) 15186 v0.AddArg(y) 15187 v.AddArg(v0) 15188 return true 15189 } 15190 } 15191 func rewriteValueAMD64_OpGreater32(v *Value, config *Config) bool { 15192 b := v.Block 15193 _ = b 15194 // match: (Greater32 x y) 15195 // cond: 15196 // result: (SETG (CMPL x y)) 15197 for { 15198 x := v.Args[0] 15199 y := v.Args[1] 15200 v.reset(OpAMD64SETG) 15201 v0 := b.NewValue0(v.Line, OpAMD64CMPL, TypeFlags) 15202 v0.AddArg(x) 15203 v0.AddArg(y) 15204 v.AddArg(v0) 15205 return true 15206 } 15207 } 15208 func rewriteValueAMD64_OpGreater32F(v *Value, config *Config) bool { 15209 b := v.Block 15210 _ = b 15211 // match: (Greater32F x y) 15212 // cond: 15213 // result: (SETGF (UCOMISS x y)) 15214 for { 15215 x := v.Args[0] 15216 y := v.Args[1] 15217 v.reset(OpAMD64SETGF) 15218 v0 := b.NewValue0(v.Line, OpAMD64UCOMISS, TypeFlags) 15219 v0.AddArg(x) 15220 v0.AddArg(y) 15221 v.AddArg(v0) 15222 return true 15223 } 15224 } 15225 func rewriteValueAMD64_OpGreater32U(v *Value, config *Config) bool { 15226 b := v.Block 15227 _ = b 15228 // match: (Greater32U x y) 15229 // cond: 15230 // result: (SETA (CMPL x y)) 15231 for { 15232 x := v.Args[0] 15233 y := v.Args[1] 15234 v.reset(OpAMD64SETA) 15235 v0 := b.NewValue0(v.Line, OpAMD64CMPL, TypeFlags) 15236 v0.AddArg(x) 15237 v0.AddArg(y) 15238 v.AddArg(v0) 15239 return true 15240 } 15241 } 15242 func rewriteValueAMD64_OpGreater64(v *Value, config *Config) bool { 15243 b := v.Block 15244 _ = b 15245 // match: (Greater64 x y) 15246 // cond: 15247 // result: (SETG (CMPQ x y)) 15248 for { 15249 x := v.Args[0] 15250 y := v.Args[1] 15251 v.reset(OpAMD64SETG) 15252 v0 := b.NewValue0(v.Line, OpAMD64CMPQ, TypeFlags) 15253 v0.AddArg(x) 15254 v0.AddArg(y) 15255 v.AddArg(v0) 15256 return true 15257 } 15258 } 15259 func rewriteValueAMD64_OpGreater64F(v *Value, config *Config) bool { 15260 b := v.Block 15261 _ = b 15262 // match: (Greater64F x y) 15263 // cond: 15264 // result: (SETGF (UCOMISD x y)) 15265 for { 15266 x := v.Args[0] 15267 y := v.Args[1] 15268 v.reset(OpAMD64SETGF) 15269 v0 := b.NewValue0(v.Line, OpAMD64UCOMISD, TypeFlags) 15270 v0.AddArg(x) 15271 v0.AddArg(y) 15272 v.AddArg(v0) 15273 return true 15274 } 15275 } 15276 func rewriteValueAMD64_OpGreater64U(v *Value, config *Config) bool { 15277 b := v.Block 15278 _ = b 15279 // match: (Greater64U x y) 15280 // cond: 15281 // result: (SETA (CMPQ x y)) 15282 for { 15283 x := v.Args[0] 15284 y := v.Args[1] 15285 v.reset(OpAMD64SETA) 15286 v0 := b.NewValue0(v.Line, OpAMD64CMPQ, TypeFlags) 15287 v0.AddArg(x) 15288 v0.AddArg(y) 15289 v.AddArg(v0) 15290 return true 15291 } 15292 } 15293 func rewriteValueAMD64_OpGreater8(v *Value, config *Config) bool { 15294 b := v.Block 15295 _ = b 15296 // match: (Greater8 x y) 15297 // cond: 15298 // result: (SETG (CMPB x y)) 15299 for { 15300 x := v.Args[0] 15301 y := v.Args[1] 15302 v.reset(OpAMD64SETG) 15303 v0 := b.NewValue0(v.Line, OpAMD64CMPB, TypeFlags) 15304 v0.AddArg(x) 15305 v0.AddArg(y) 15306 v.AddArg(v0) 15307 return true 15308 } 15309 } 15310 func rewriteValueAMD64_OpGreater8U(v *Value, config *Config) bool { 15311 b := v.Block 15312 _ = b 15313 // match: (Greater8U x y) 15314 // cond: 15315 // result: (SETA (CMPB x y)) 15316 for { 15317 x := v.Args[0] 15318 y := v.Args[1] 15319 v.reset(OpAMD64SETA) 15320 v0 := b.NewValue0(v.Line, OpAMD64CMPB, TypeFlags) 15321 v0.AddArg(x) 15322 v0.AddArg(y) 15323 v.AddArg(v0) 15324 return true 15325 } 15326 } 15327 func rewriteValueAMD64_OpHmul16(v *Value, config *Config) bool { 15328 b := v.Block 15329 _ = b 15330 // match: (Hmul16 x y) 15331 // cond: 15332 // result: (HMULW x y) 15333 for { 15334 x := v.Args[0] 15335 y := v.Args[1] 15336 v.reset(OpAMD64HMULW) 15337 v.AddArg(x) 15338 v.AddArg(y) 15339 return true 15340 } 15341 } 15342 func rewriteValueAMD64_OpHmul16u(v *Value, config *Config) bool { 15343 b := v.Block 15344 _ = b 15345 // match: (Hmul16u x y) 15346 // cond: 15347 // result: (HMULWU x y) 15348 for { 15349 x := v.Args[0] 15350 y := v.Args[1] 15351 v.reset(OpAMD64HMULWU) 15352 v.AddArg(x) 15353 v.AddArg(y) 15354 return true 15355 } 15356 } 15357 func rewriteValueAMD64_OpHmul32(v *Value, config *Config) bool { 15358 b := v.Block 15359 _ = b 15360 // match: (Hmul32 x y) 15361 // cond: 15362 // result: (HMULL x y) 15363 for { 15364 x := v.Args[0] 15365 y := v.Args[1] 15366 v.reset(OpAMD64HMULL) 15367 v.AddArg(x) 15368 v.AddArg(y) 15369 return true 15370 } 15371 } 15372 func rewriteValueAMD64_OpHmul32u(v *Value, config *Config) bool { 15373 b := v.Block 15374 _ = b 15375 // match: (Hmul32u x y) 15376 // cond: 15377 // result: (HMULLU x y) 15378 for { 15379 x := v.Args[0] 15380 y := v.Args[1] 15381 v.reset(OpAMD64HMULLU) 15382 v.AddArg(x) 15383 v.AddArg(y) 15384 return true 15385 } 15386 } 15387 func rewriteValueAMD64_OpHmul64(v *Value, config *Config) bool { 15388 b := v.Block 15389 _ = b 15390 // match: (Hmul64 x y) 15391 // cond: 15392 // result: (HMULQ x y) 15393 for { 15394 x := v.Args[0] 15395 y := v.Args[1] 15396 v.reset(OpAMD64HMULQ) 15397 v.AddArg(x) 15398 v.AddArg(y) 15399 return true 15400 } 15401 } 15402 func rewriteValueAMD64_OpHmul64u(v *Value, config *Config) bool { 15403 b := v.Block 15404 _ = b 15405 // match: (Hmul64u x y) 15406 // cond: 15407 // result: (HMULQU x y) 15408 for { 15409 x := v.Args[0] 15410 y := v.Args[1] 15411 v.reset(OpAMD64HMULQU) 15412 v.AddArg(x) 15413 v.AddArg(y) 15414 return true 15415 } 15416 } 15417 func rewriteValueAMD64_OpHmul8(v *Value, config *Config) bool { 15418 b := v.Block 15419 _ = b 15420 // match: (Hmul8 x y) 15421 // cond: 15422 // result: (HMULB x y) 15423 for { 15424 x := v.Args[0] 15425 y := v.Args[1] 15426 v.reset(OpAMD64HMULB) 15427 v.AddArg(x) 15428 v.AddArg(y) 15429 return true 15430 } 15431 } 15432 func rewriteValueAMD64_OpHmul8u(v *Value, config *Config) bool { 15433 b := v.Block 15434 _ = b 15435 // match: (Hmul8u x y) 15436 // cond: 15437 // result: (HMULBU x y) 15438 for { 15439 x := v.Args[0] 15440 y := v.Args[1] 15441 v.reset(OpAMD64HMULBU) 15442 v.AddArg(x) 15443 v.AddArg(y) 15444 return true 15445 } 15446 } 15447 func rewriteValueAMD64_OpInt64Hi(v *Value, config *Config) bool { 15448 b := v.Block 15449 _ = b 15450 // match: (Int64Hi x) 15451 // cond: 15452 // result: (SHRQconst [32] x) 15453 for { 15454 x := v.Args[0] 15455 v.reset(OpAMD64SHRQconst) 15456 v.AuxInt = 32 15457 v.AddArg(x) 15458 return true 15459 } 15460 } 15461 func rewriteValueAMD64_OpInterCall(v *Value, config *Config) bool { 15462 b := v.Block 15463 _ = b 15464 // match: (InterCall [argwid] entry mem) 15465 // cond: 15466 // result: (CALLinter [argwid] entry mem) 15467 for { 15468 argwid := v.AuxInt 15469 entry := v.Args[0] 15470 mem := v.Args[1] 15471 v.reset(OpAMD64CALLinter) 15472 v.AuxInt = argwid 15473 v.AddArg(entry) 15474 v.AddArg(mem) 15475 return true 15476 } 15477 } 15478 func rewriteValueAMD64_OpIsInBounds(v *Value, config *Config) bool { 15479 b := v.Block 15480 _ = b 15481 // match: (IsInBounds idx len) 15482 // cond: 15483 // result: (SETB (CMPQ idx len)) 15484 for { 15485 idx := v.Args[0] 15486 len := v.Args[1] 15487 v.reset(OpAMD64SETB) 15488 v0 := b.NewValue0(v.Line, OpAMD64CMPQ, TypeFlags) 15489 v0.AddArg(idx) 15490 v0.AddArg(len) 15491 v.AddArg(v0) 15492 return true 15493 } 15494 } 15495 func rewriteValueAMD64_OpIsNonNil(v *Value, config *Config) bool { 15496 b := v.Block 15497 _ = b 15498 // match: (IsNonNil p) 15499 // cond: config.PtrSize == 8 15500 // result: (SETNE (TESTQ p p)) 15501 for { 15502 p := v.Args[0] 15503 if !(config.PtrSize == 8) { 15504 break 15505 } 15506 v.reset(OpAMD64SETNE) 15507 v0 := b.NewValue0(v.Line, OpAMD64TESTQ, TypeFlags) 15508 v0.AddArg(p) 15509 v0.AddArg(p) 15510 v.AddArg(v0) 15511 return true 15512 } 15513 // match: (IsNonNil p) 15514 // cond: config.PtrSize == 4 15515 // result: (SETNE (TESTL p p)) 15516 for { 15517 p := v.Args[0] 15518 if !(config.PtrSize == 4) { 15519 break 15520 } 15521 v.reset(OpAMD64SETNE) 15522 v0 := b.NewValue0(v.Line, OpAMD64TESTL, TypeFlags) 15523 v0.AddArg(p) 15524 v0.AddArg(p) 15525 v.AddArg(v0) 15526 return true 15527 } 15528 return false 15529 } 15530 func rewriteValueAMD64_OpIsSliceInBounds(v *Value, config *Config) bool { 15531 b := v.Block 15532 _ = b 15533 // match: (IsSliceInBounds idx len) 15534 // cond: 15535 // result: (SETBE (CMPQ idx len)) 15536 for { 15537 idx := v.Args[0] 15538 len := v.Args[1] 15539 v.reset(OpAMD64SETBE) 15540 v0 := b.NewValue0(v.Line, OpAMD64CMPQ, TypeFlags) 15541 v0.AddArg(idx) 15542 v0.AddArg(len) 15543 v.AddArg(v0) 15544 return true 15545 } 15546 } 15547 func rewriteValueAMD64_OpLeq16(v *Value, config *Config) bool { 15548 b := v.Block 15549 _ = b 15550 // match: (Leq16 x y) 15551 // cond: 15552 // result: (SETLE (CMPW x y)) 15553 for { 15554 x := v.Args[0] 15555 y := v.Args[1] 15556 v.reset(OpAMD64SETLE) 15557 v0 := b.NewValue0(v.Line, OpAMD64CMPW, TypeFlags) 15558 v0.AddArg(x) 15559 v0.AddArg(y) 15560 v.AddArg(v0) 15561 return true 15562 } 15563 } 15564 func rewriteValueAMD64_OpLeq16U(v *Value, config *Config) bool { 15565 b := v.Block 15566 _ = b 15567 // match: (Leq16U x y) 15568 // cond: 15569 // result: (SETBE (CMPW x y)) 15570 for { 15571 x := v.Args[0] 15572 y := v.Args[1] 15573 v.reset(OpAMD64SETBE) 15574 v0 := b.NewValue0(v.Line, OpAMD64CMPW, TypeFlags) 15575 v0.AddArg(x) 15576 v0.AddArg(y) 15577 v.AddArg(v0) 15578 return true 15579 } 15580 } 15581 func rewriteValueAMD64_OpLeq32(v *Value, config *Config) bool { 15582 b := v.Block 15583 _ = b 15584 // match: (Leq32 x y) 15585 // cond: 15586 // result: (SETLE (CMPL x y)) 15587 for { 15588 x := v.Args[0] 15589 y := v.Args[1] 15590 v.reset(OpAMD64SETLE) 15591 v0 := b.NewValue0(v.Line, OpAMD64CMPL, TypeFlags) 15592 v0.AddArg(x) 15593 v0.AddArg(y) 15594 v.AddArg(v0) 15595 return true 15596 } 15597 } 15598 func rewriteValueAMD64_OpLeq32F(v *Value, config *Config) bool { 15599 b := v.Block 15600 _ = b 15601 // match: (Leq32F x y) 15602 // cond: 15603 // result: (SETGEF (UCOMISS y x)) 15604 for { 15605 x := v.Args[0] 15606 y := v.Args[1] 15607 v.reset(OpAMD64SETGEF) 15608 v0 := b.NewValue0(v.Line, OpAMD64UCOMISS, TypeFlags) 15609 v0.AddArg(y) 15610 v0.AddArg(x) 15611 v.AddArg(v0) 15612 return true 15613 } 15614 } 15615 func rewriteValueAMD64_OpLeq32U(v *Value, config *Config) bool { 15616 b := v.Block 15617 _ = b 15618 // match: (Leq32U x y) 15619 // cond: 15620 // result: (SETBE (CMPL x y)) 15621 for { 15622 x := v.Args[0] 15623 y := v.Args[1] 15624 v.reset(OpAMD64SETBE) 15625 v0 := b.NewValue0(v.Line, OpAMD64CMPL, TypeFlags) 15626 v0.AddArg(x) 15627 v0.AddArg(y) 15628 v.AddArg(v0) 15629 return true 15630 } 15631 } 15632 func rewriteValueAMD64_OpLeq64(v *Value, config *Config) bool { 15633 b := v.Block 15634 _ = b 15635 // match: (Leq64 x y) 15636 // cond: 15637 // result: (SETLE (CMPQ x y)) 15638 for { 15639 x := v.Args[0] 15640 y := v.Args[1] 15641 v.reset(OpAMD64SETLE) 15642 v0 := b.NewValue0(v.Line, OpAMD64CMPQ, TypeFlags) 15643 v0.AddArg(x) 15644 v0.AddArg(y) 15645 v.AddArg(v0) 15646 return true 15647 } 15648 } 15649 func rewriteValueAMD64_OpLeq64F(v *Value, config *Config) bool { 15650 b := v.Block 15651 _ = b 15652 // match: (Leq64F x y) 15653 // cond: 15654 // result: (SETGEF (UCOMISD y x)) 15655 for { 15656 x := v.Args[0] 15657 y := v.Args[1] 15658 v.reset(OpAMD64SETGEF) 15659 v0 := b.NewValue0(v.Line, OpAMD64UCOMISD, TypeFlags) 15660 v0.AddArg(y) 15661 v0.AddArg(x) 15662 v.AddArg(v0) 15663 return true 15664 } 15665 } 15666 func rewriteValueAMD64_OpLeq64U(v *Value, config *Config) bool { 15667 b := v.Block 15668 _ = b 15669 // match: (Leq64U x y) 15670 // cond: 15671 // result: (SETBE (CMPQ x y)) 15672 for { 15673 x := v.Args[0] 15674 y := v.Args[1] 15675 v.reset(OpAMD64SETBE) 15676 v0 := b.NewValue0(v.Line, OpAMD64CMPQ, TypeFlags) 15677 v0.AddArg(x) 15678 v0.AddArg(y) 15679 v.AddArg(v0) 15680 return true 15681 } 15682 } 15683 func rewriteValueAMD64_OpLeq8(v *Value, config *Config) bool { 15684 b := v.Block 15685 _ = b 15686 // match: (Leq8 x y) 15687 // cond: 15688 // result: (SETLE (CMPB x y)) 15689 for { 15690 x := v.Args[0] 15691 y := v.Args[1] 15692 v.reset(OpAMD64SETLE) 15693 v0 := b.NewValue0(v.Line, OpAMD64CMPB, TypeFlags) 15694 v0.AddArg(x) 15695 v0.AddArg(y) 15696 v.AddArg(v0) 15697 return true 15698 } 15699 } 15700 func rewriteValueAMD64_OpLeq8U(v *Value, config *Config) bool { 15701 b := v.Block 15702 _ = b 15703 // match: (Leq8U x y) 15704 // cond: 15705 // result: (SETBE (CMPB x y)) 15706 for { 15707 x := v.Args[0] 15708 y := v.Args[1] 15709 v.reset(OpAMD64SETBE) 15710 v0 := b.NewValue0(v.Line, OpAMD64CMPB, TypeFlags) 15711 v0.AddArg(x) 15712 v0.AddArg(y) 15713 v.AddArg(v0) 15714 return true 15715 } 15716 } 15717 func rewriteValueAMD64_OpLess16(v *Value, config *Config) bool { 15718 b := v.Block 15719 _ = b 15720 // match: (Less16 x y) 15721 // cond: 15722 // result: (SETL (CMPW x y)) 15723 for { 15724 x := v.Args[0] 15725 y := v.Args[1] 15726 v.reset(OpAMD64SETL) 15727 v0 := b.NewValue0(v.Line, OpAMD64CMPW, TypeFlags) 15728 v0.AddArg(x) 15729 v0.AddArg(y) 15730 v.AddArg(v0) 15731 return true 15732 } 15733 } 15734 func rewriteValueAMD64_OpLess16U(v *Value, config *Config) bool { 15735 b := v.Block 15736 _ = b 15737 // match: (Less16U x y) 15738 // cond: 15739 // result: (SETB (CMPW x y)) 15740 for { 15741 x := v.Args[0] 15742 y := v.Args[1] 15743 v.reset(OpAMD64SETB) 15744 v0 := b.NewValue0(v.Line, OpAMD64CMPW, TypeFlags) 15745 v0.AddArg(x) 15746 v0.AddArg(y) 15747 v.AddArg(v0) 15748 return true 15749 } 15750 } 15751 func rewriteValueAMD64_OpLess32(v *Value, config *Config) bool { 15752 b := v.Block 15753 _ = b 15754 // match: (Less32 x y) 15755 // cond: 15756 // result: (SETL (CMPL x y)) 15757 for { 15758 x := v.Args[0] 15759 y := v.Args[1] 15760 v.reset(OpAMD64SETL) 15761 v0 := b.NewValue0(v.Line, OpAMD64CMPL, TypeFlags) 15762 v0.AddArg(x) 15763 v0.AddArg(y) 15764 v.AddArg(v0) 15765 return true 15766 } 15767 } 15768 func rewriteValueAMD64_OpLess32F(v *Value, config *Config) bool { 15769 b := v.Block 15770 _ = b 15771 // match: (Less32F x y) 15772 // cond: 15773 // result: (SETGF (UCOMISS y x)) 15774 for { 15775 x := v.Args[0] 15776 y := v.Args[1] 15777 v.reset(OpAMD64SETGF) 15778 v0 := b.NewValue0(v.Line, OpAMD64UCOMISS, TypeFlags) 15779 v0.AddArg(y) 15780 v0.AddArg(x) 15781 v.AddArg(v0) 15782 return true 15783 } 15784 } 15785 func rewriteValueAMD64_OpLess32U(v *Value, config *Config) bool { 15786 b := v.Block 15787 _ = b 15788 // match: (Less32U x y) 15789 // cond: 15790 // result: (SETB (CMPL x y)) 15791 for { 15792 x := v.Args[0] 15793 y := v.Args[1] 15794 v.reset(OpAMD64SETB) 15795 v0 := b.NewValue0(v.Line, OpAMD64CMPL, TypeFlags) 15796 v0.AddArg(x) 15797 v0.AddArg(y) 15798 v.AddArg(v0) 15799 return true 15800 } 15801 } 15802 func rewriteValueAMD64_OpLess64(v *Value, config *Config) bool { 15803 b := v.Block 15804 _ = b 15805 // match: (Less64 x y) 15806 // cond: 15807 // result: (SETL (CMPQ x y)) 15808 for { 15809 x := v.Args[0] 15810 y := v.Args[1] 15811 v.reset(OpAMD64SETL) 15812 v0 := b.NewValue0(v.Line, OpAMD64CMPQ, TypeFlags) 15813 v0.AddArg(x) 15814 v0.AddArg(y) 15815 v.AddArg(v0) 15816 return true 15817 } 15818 } 15819 func rewriteValueAMD64_OpLess64F(v *Value, config *Config) bool { 15820 b := v.Block 15821 _ = b 15822 // match: (Less64F x y) 15823 // cond: 15824 // result: (SETGF (UCOMISD y x)) 15825 for { 15826 x := v.Args[0] 15827 y := v.Args[1] 15828 v.reset(OpAMD64SETGF) 15829 v0 := b.NewValue0(v.Line, OpAMD64UCOMISD, TypeFlags) 15830 v0.AddArg(y) 15831 v0.AddArg(x) 15832 v.AddArg(v0) 15833 return true 15834 } 15835 } 15836 func rewriteValueAMD64_OpLess64U(v *Value, config *Config) bool { 15837 b := v.Block 15838 _ = b 15839 // match: (Less64U x y) 15840 // cond: 15841 // result: (SETB (CMPQ x y)) 15842 for { 15843 x := v.Args[0] 15844 y := v.Args[1] 15845 v.reset(OpAMD64SETB) 15846 v0 := b.NewValue0(v.Line, OpAMD64CMPQ, TypeFlags) 15847 v0.AddArg(x) 15848 v0.AddArg(y) 15849 v.AddArg(v0) 15850 return true 15851 } 15852 } 15853 func rewriteValueAMD64_OpLess8(v *Value, config *Config) bool { 15854 b := v.Block 15855 _ = b 15856 // match: (Less8 x y) 15857 // cond: 15858 // result: (SETL (CMPB x y)) 15859 for { 15860 x := v.Args[0] 15861 y := v.Args[1] 15862 v.reset(OpAMD64SETL) 15863 v0 := b.NewValue0(v.Line, OpAMD64CMPB, TypeFlags) 15864 v0.AddArg(x) 15865 v0.AddArg(y) 15866 v.AddArg(v0) 15867 return true 15868 } 15869 } 15870 func rewriteValueAMD64_OpLess8U(v *Value, config *Config) bool { 15871 b := v.Block 15872 _ = b 15873 // match: (Less8U x y) 15874 // cond: 15875 // result: (SETB (CMPB x y)) 15876 for { 15877 x := v.Args[0] 15878 y := v.Args[1] 15879 v.reset(OpAMD64SETB) 15880 v0 := b.NewValue0(v.Line, OpAMD64CMPB, TypeFlags) 15881 v0.AddArg(x) 15882 v0.AddArg(y) 15883 v.AddArg(v0) 15884 return true 15885 } 15886 } 15887 func rewriteValueAMD64_OpLoad(v *Value, config *Config) bool { 15888 b := v.Block 15889 _ = b 15890 // match: (Load <t> ptr mem) 15891 // cond: (is64BitInt(t) || isPtr(t) && config.PtrSize == 8) 15892 // result: (MOVQload ptr mem) 15893 for { 15894 t := v.Type 15895 ptr := v.Args[0] 15896 mem := v.Args[1] 15897 if !(is64BitInt(t) || isPtr(t) && config.PtrSize == 8) { 15898 break 15899 } 15900 v.reset(OpAMD64MOVQload) 15901 v.AddArg(ptr) 15902 v.AddArg(mem) 15903 return true 15904 } 15905 // match: (Load <t> ptr mem) 15906 // cond: (is32BitInt(t) || isPtr(t) && config.PtrSize == 4) 15907 // result: (MOVLload ptr mem) 15908 for { 15909 t := v.Type 15910 ptr := v.Args[0] 15911 mem := v.Args[1] 15912 if !(is32BitInt(t) || isPtr(t) && config.PtrSize == 4) { 15913 break 15914 } 15915 v.reset(OpAMD64MOVLload) 15916 v.AddArg(ptr) 15917 v.AddArg(mem) 15918 return true 15919 } 15920 // match: (Load <t> ptr mem) 15921 // cond: is16BitInt(t) 15922 // result: (MOVWload ptr mem) 15923 for { 15924 t := v.Type 15925 ptr := v.Args[0] 15926 mem := v.Args[1] 15927 if !(is16BitInt(t)) { 15928 break 15929 } 15930 v.reset(OpAMD64MOVWload) 15931 v.AddArg(ptr) 15932 v.AddArg(mem) 15933 return true 15934 } 15935 // match: (Load <t> ptr mem) 15936 // cond: (t.IsBoolean() || is8BitInt(t)) 15937 // result: (MOVBload ptr mem) 15938 for { 15939 t := v.Type 15940 ptr := v.Args[0] 15941 mem := v.Args[1] 15942 if !(t.IsBoolean() || is8BitInt(t)) { 15943 break 15944 } 15945 v.reset(OpAMD64MOVBload) 15946 v.AddArg(ptr) 15947 v.AddArg(mem) 15948 return true 15949 } 15950 // match: (Load <t> ptr mem) 15951 // cond: is32BitFloat(t) 15952 // result: (MOVSSload ptr mem) 15953 for { 15954 t := v.Type 15955 ptr := v.Args[0] 15956 mem := v.Args[1] 15957 if !(is32BitFloat(t)) { 15958 break 15959 } 15960 v.reset(OpAMD64MOVSSload) 15961 v.AddArg(ptr) 15962 v.AddArg(mem) 15963 return true 15964 } 15965 // match: (Load <t> ptr mem) 15966 // cond: is64BitFloat(t) 15967 // result: (MOVSDload ptr mem) 15968 for { 15969 t := v.Type 15970 ptr := v.Args[0] 15971 mem := v.Args[1] 15972 if !(is64BitFloat(t)) { 15973 break 15974 } 15975 v.reset(OpAMD64MOVSDload) 15976 v.AddArg(ptr) 15977 v.AddArg(mem) 15978 return true 15979 } 15980 return false 15981 } 15982 func rewriteValueAMD64_OpLrot16(v *Value, config *Config) bool { 15983 b := v.Block 15984 _ = b 15985 // match: (Lrot16 <t> x [c]) 15986 // cond: 15987 // result: (ROLWconst <t> [c&15] x) 15988 for { 15989 t := v.Type 15990 c := v.AuxInt 15991 x := v.Args[0] 15992 v.reset(OpAMD64ROLWconst) 15993 v.Type = t 15994 v.AuxInt = c & 15 15995 v.AddArg(x) 15996 return true 15997 } 15998 } 15999 func rewriteValueAMD64_OpLrot32(v *Value, config *Config) bool { 16000 b := v.Block 16001 _ = b 16002 // match: (Lrot32 <t> x [c]) 16003 // cond: 16004 // result: (ROLLconst <t> [c&31] x) 16005 for { 16006 t := v.Type 16007 c := v.AuxInt 16008 x := v.Args[0] 16009 v.reset(OpAMD64ROLLconst) 16010 v.Type = t 16011 v.AuxInt = c & 31 16012 v.AddArg(x) 16013 return true 16014 } 16015 } 16016 func rewriteValueAMD64_OpLrot64(v *Value, config *Config) bool { 16017 b := v.Block 16018 _ = b 16019 // match: (Lrot64 <t> x [c]) 16020 // cond: 16021 // result: (ROLQconst <t> [c&63] x) 16022 for { 16023 t := v.Type 16024 c := v.AuxInt 16025 x := v.Args[0] 16026 v.reset(OpAMD64ROLQconst) 16027 v.Type = t 16028 v.AuxInt = c & 63 16029 v.AddArg(x) 16030 return true 16031 } 16032 } 16033 func rewriteValueAMD64_OpLrot8(v *Value, config *Config) bool { 16034 b := v.Block 16035 _ = b 16036 // match: (Lrot8 <t> x [c]) 16037 // cond: 16038 // result: (ROLBconst <t> [c&7] x) 16039 for { 16040 t := v.Type 16041 c := v.AuxInt 16042 x := v.Args[0] 16043 v.reset(OpAMD64ROLBconst) 16044 v.Type = t 16045 v.AuxInt = c & 7 16046 v.AddArg(x) 16047 return true 16048 } 16049 } 16050 func rewriteValueAMD64_OpLsh16x16(v *Value, config *Config) bool { 16051 b := v.Block 16052 _ = b 16053 // match: (Lsh16x16 <t> x y) 16054 // cond: 16055 // result: (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPWconst y [32]))) 16056 for { 16057 t := v.Type 16058 x := v.Args[0] 16059 y := v.Args[1] 16060 v.reset(OpAMD64ANDL) 16061 v0 := b.NewValue0(v.Line, OpAMD64SHLL, t) 16062 v0.AddArg(x) 16063 v0.AddArg(y) 16064 v.AddArg(v0) 16065 v1 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, t) 16066 v2 := b.NewValue0(v.Line, OpAMD64CMPWconst, TypeFlags) 16067 v2.AuxInt = 32 16068 v2.AddArg(y) 16069 v1.AddArg(v2) 16070 v.AddArg(v1) 16071 return true 16072 } 16073 } 16074 func rewriteValueAMD64_OpLsh16x32(v *Value, config *Config) bool { 16075 b := v.Block 16076 _ = b 16077 // match: (Lsh16x32 <t> x y) 16078 // cond: 16079 // result: (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPLconst y [32]))) 16080 for { 16081 t := v.Type 16082 x := v.Args[0] 16083 y := v.Args[1] 16084 v.reset(OpAMD64ANDL) 16085 v0 := b.NewValue0(v.Line, OpAMD64SHLL, t) 16086 v0.AddArg(x) 16087 v0.AddArg(y) 16088 v.AddArg(v0) 16089 v1 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, t) 16090 v2 := b.NewValue0(v.Line, OpAMD64CMPLconst, TypeFlags) 16091 v2.AuxInt = 32 16092 v2.AddArg(y) 16093 v1.AddArg(v2) 16094 v.AddArg(v1) 16095 return true 16096 } 16097 } 16098 func rewriteValueAMD64_OpLsh16x64(v *Value, config *Config) bool { 16099 b := v.Block 16100 _ = b 16101 // match: (Lsh16x64 <t> x y) 16102 // cond: 16103 // result: (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPQconst y [32]))) 16104 for { 16105 t := v.Type 16106 x := v.Args[0] 16107 y := v.Args[1] 16108 v.reset(OpAMD64ANDL) 16109 v0 := b.NewValue0(v.Line, OpAMD64SHLL, t) 16110 v0.AddArg(x) 16111 v0.AddArg(y) 16112 v.AddArg(v0) 16113 v1 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, t) 16114 v2 := b.NewValue0(v.Line, OpAMD64CMPQconst, TypeFlags) 16115 v2.AuxInt = 32 16116 v2.AddArg(y) 16117 v1.AddArg(v2) 16118 v.AddArg(v1) 16119 return true 16120 } 16121 } 16122 func rewriteValueAMD64_OpLsh16x8(v *Value, config *Config) bool { 16123 b := v.Block 16124 _ = b 16125 // match: (Lsh16x8 <t> x y) 16126 // cond: 16127 // result: (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPBconst y [32]))) 16128 for { 16129 t := v.Type 16130 x := v.Args[0] 16131 y := v.Args[1] 16132 v.reset(OpAMD64ANDL) 16133 v0 := b.NewValue0(v.Line, OpAMD64SHLL, t) 16134 v0.AddArg(x) 16135 v0.AddArg(y) 16136 v.AddArg(v0) 16137 v1 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, t) 16138 v2 := b.NewValue0(v.Line, OpAMD64CMPBconst, TypeFlags) 16139 v2.AuxInt = 32 16140 v2.AddArg(y) 16141 v1.AddArg(v2) 16142 v.AddArg(v1) 16143 return true 16144 } 16145 } 16146 func rewriteValueAMD64_OpLsh32x16(v *Value, config *Config) bool { 16147 b := v.Block 16148 _ = b 16149 // match: (Lsh32x16 <t> x y) 16150 // cond: 16151 // result: (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPWconst y [32]))) 16152 for { 16153 t := v.Type 16154 x := v.Args[0] 16155 y := v.Args[1] 16156 v.reset(OpAMD64ANDL) 16157 v0 := b.NewValue0(v.Line, OpAMD64SHLL, t) 16158 v0.AddArg(x) 16159 v0.AddArg(y) 16160 v.AddArg(v0) 16161 v1 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, t) 16162 v2 := b.NewValue0(v.Line, OpAMD64CMPWconst, TypeFlags) 16163 v2.AuxInt = 32 16164 v2.AddArg(y) 16165 v1.AddArg(v2) 16166 v.AddArg(v1) 16167 return true 16168 } 16169 } 16170 func rewriteValueAMD64_OpLsh32x32(v *Value, config *Config) bool { 16171 b := v.Block 16172 _ = b 16173 // match: (Lsh32x32 <t> x y) 16174 // cond: 16175 // result: (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPLconst y [32]))) 16176 for { 16177 t := v.Type 16178 x := v.Args[0] 16179 y := v.Args[1] 16180 v.reset(OpAMD64ANDL) 16181 v0 := b.NewValue0(v.Line, OpAMD64SHLL, t) 16182 v0.AddArg(x) 16183 v0.AddArg(y) 16184 v.AddArg(v0) 16185 v1 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, t) 16186 v2 := b.NewValue0(v.Line, OpAMD64CMPLconst, TypeFlags) 16187 v2.AuxInt = 32 16188 v2.AddArg(y) 16189 v1.AddArg(v2) 16190 v.AddArg(v1) 16191 return true 16192 } 16193 } 16194 func rewriteValueAMD64_OpLsh32x64(v *Value, config *Config) bool { 16195 b := v.Block 16196 _ = b 16197 // match: (Lsh32x64 <t> x y) 16198 // cond: 16199 // result: (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPQconst y [32]))) 16200 for { 16201 t := v.Type 16202 x := v.Args[0] 16203 y := v.Args[1] 16204 v.reset(OpAMD64ANDL) 16205 v0 := b.NewValue0(v.Line, OpAMD64SHLL, t) 16206 v0.AddArg(x) 16207 v0.AddArg(y) 16208 v.AddArg(v0) 16209 v1 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, t) 16210 v2 := b.NewValue0(v.Line, OpAMD64CMPQconst, TypeFlags) 16211 v2.AuxInt = 32 16212 v2.AddArg(y) 16213 v1.AddArg(v2) 16214 v.AddArg(v1) 16215 return true 16216 } 16217 } 16218 func rewriteValueAMD64_OpLsh32x8(v *Value, config *Config) bool { 16219 b := v.Block 16220 _ = b 16221 // match: (Lsh32x8 <t> x y) 16222 // cond: 16223 // result: (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPBconst y [32]))) 16224 for { 16225 t := v.Type 16226 x := v.Args[0] 16227 y := v.Args[1] 16228 v.reset(OpAMD64ANDL) 16229 v0 := b.NewValue0(v.Line, OpAMD64SHLL, t) 16230 v0.AddArg(x) 16231 v0.AddArg(y) 16232 v.AddArg(v0) 16233 v1 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, t) 16234 v2 := b.NewValue0(v.Line, OpAMD64CMPBconst, TypeFlags) 16235 v2.AuxInt = 32 16236 v2.AddArg(y) 16237 v1.AddArg(v2) 16238 v.AddArg(v1) 16239 return true 16240 } 16241 } 16242 func rewriteValueAMD64_OpLsh64x16(v *Value, config *Config) bool { 16243 b := v.Block 16244 _ = b 16245 // match: (Lsh64x16 <t> x y) 16246 // cond: 16247 // result: (ANDQ (SHLQ <t> x y) (SBBQcarrymask <t> (CMPWconst y [64]))) 16248 for { 16249 t := v.Type 16250 x := v.Args[0] 16251 y := v.Args[1] 16252 v.reset(OpAMD64ANDQ) 16253 v0 := b.NewValue0(v.Line, OpAMD64SHLQ, t) 16254 v0.AddArg(x) 16255 v0.AddArg(y) 16256 v.AddArg(v0) 16257 v1 := b.NewValue0(v.Line, OpAMD64SBBQcarrymask, t) 16258 v2 := b.NewValue0(v.Line, OpAMD64CMPWconst, TypeFlags) 16259 v2.AuxInt = 64 16260 v2.AddArg(y) 16261 v1.AddArg(v2) 16262 v.AddArg(v1) 16263 return true 16264 } 16265 } 16266 func rewriteValueAMD64_OpLsh64x32(v *Value, config *Config) bool { 16267 b := v.Block 16268 _ = b 16269 // match: (Lsh64x32 <t> x y) 16270 // cond: 16271 // result: (ANDQ (SHLQ <t> x y) (SBBQcarrymask <t> (CMPLconst y [64]))) 16272 for { 16273 t := v.Type 16274 x := v.Args[0] 16275 y := v.Args[1] 16276 v.reset(OpAMD64ANDQ) 16277 v0 := b.NewValue0(v.Line, OpAMD64SHLQ, t) 16278 v0.AddArg(x) 16279 v0.AddArg(y) 16280 v.AddArg(v0) 16281 v1 := b.NewValue0(v.Line, OpAMD64SBBQcarrymask, t) 16282 v2 := b.NewValue0(v.Line, OpAMD64CMPLconst, TypeFlags) 16283 v2.AuxInt = 64 16284 v2.AddArg(y) 16285 v1.AddArg(v2) 16286 v.AddArg(v1) 16287 return true 16288 } 16289 } 16290 func rewriteValueAMD64_OpLsh64x64(v *Value, config *Config) bool { 16291 b := v.Block 16292 _ = b 16293 // match: (Lsh64x64 <t> x y) 16294 // cond: 16295 // result: (ANDQ (SHLQ <t> x y) (SBBQcarrymask <t> (CMPQconst y [64]))) 16296 for { 16297 t := v.Type 16298 x := v.Args[0] 16299 y := v.Args[1] 16300 v.reset(OpAMD64ANDQ) 16301 v0 := b.NewValue0(v.Line, OpAMD64SHLQ, t) 16302 v0.AddArg(x) 16303 v0.AddArg(y) 16304 v.AddArg(v0) 16305 v1 := b.NewValue0(v.Line, OpAMD64SBBQcarrymask, t) 16306 v2 := b.NewValue0(v.Line, OpAMD64CMPQconst, TypeFlags) 16307 v2.AuxInt = 64 16308 v2.AddArg(y) 16309 v1.AddArg(v2) 16310 v.AddArg(v1) 16311 return true 16312 } 16313 } 16314 func rewriteValueAMD64_OpLsh64x8(v *Value, config *Config) bool { 16315 b := v.Block 16316 _ = b 16317 // match: (Lsh64x8 <t> x y) 16318 // cond: 16319 // result: (ANDQ (SHLQ <t> x y) (SBBQcarrymask <t> (CMPBconst y [64]))) 16320 for { 16321 t := v.Type 16322 x := v.Args[0] 16323 y := v.Args[1] 16324 v.reset(OpAMD64ANDQ) 16325 v0 := b.NewValue0(v.Line, OpAMD64SHLQ, t) 16326 v0.AddArg(x) 16327 v0.AddArg(y) 16328 v.AddArg(v0) 16329 v1 := b.NewValue0(v.Line, OpAMD64SBBQcarrymask, t) 16330 v2 := b.NewValue0(v.Line, OpAMD64CMPBconst, TypeFlags) 16331 v2.AuxInt = 64 16332 v2.AddArg(y) 16333 v1.AddArg(v2) 16334 v.AddArg(v1) 16335 return true 16336 } 16337 } 16338 func rewriteValueAMD64_OpLsh8x16(v *Value, config *Config) bool { 16339 b := v.Block 16340 _ = b 16341 // match: (Lsh8x16 <t> x y) 16342 // cond: 16343 // result: (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPWconst y [32]))) 16344 for { 16345 t := v.Type 16346 x := v.Args[0] 16347 y := v.Args[1] 16348 v.reset(OpAMD64ANDL) 16349 v0 := b.NewValue0(v.Line, OpAMD64SHLL, t) 16350 v0.AddArg(x) 16351 v0.AddArg(y) 16352 v.AddArg(v0) 16353 v1 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, t) 16354 v2 := b.NewValue0(v.Line, OpAMD64CMPWconst, TypeFlags) 16355 v2.AuxInt = 32 16356 v2.AddArg(y) 16357 v1.AddArg(v2) 16358 v.AddArg(v1) 16359 return true 16360 } 16361 } 16362 func rewriteValueAMD64_OpLsh8x32(v *Value, config *Config) bool { 16363 b := v.Block 16364 _ = b 16365 // match: (Lsh8x32 <t> x y) 16366 // cond: 16367 // result: (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPLconst y [32]))) 16368 for { 16369 t := v.Type 16370 x := v.Args[0] 16371 y := v.Args[1] 16372 v.reset(OpAMD64ANDL) 16373 v0 := b.NewValue0(v.Line, OpAMD64SHLL, t) 16374 v0.AddArg(x) 16375 v0.AddArg(y) 16376 v.AddArg(v0) 16377 v1 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, t) 16378 v2 := b.NewValue0(v.Line, OpAMD64CMPLconst, TypeFlags) 16379 v2.AuxInt = 32 16380 v2.AddArg(y) 16381 v1.AddArg(v2) 16382 v.AddArg(v1) 16383 return true 16384 } 16385 } 16386 func rewriteValueAMD64_OpLsh8x64(v *Value, config *Config) bool { 16387 b := v.Block 16388 _ = b 16389 // match: (Lsh8x64 <t> x y) 16390 // cond: 16391 // result: (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPQconst y [32]))) 16392 for { 16393 t := v.Type 16394 x := v.Args[0] 16395 y := v.Args[1] 16396 v.reset(OpAMD64ANDL) 16397 v0 := b.NewValue0(v.Line, OpAMD64SHLL, t) 16398 v0.AddArg(x) 16399 v0.AddArg(y) 16400 v.AddArg(v0) 16401 v1 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, t) 16402 v2 := b.NewValue0(v.Line, OpAMD64CMPQconst, TypeFlags) 16403 v2.AuxInt = 32 16404 v2.AddArg(y) 16405 v1.AddArg(v2) 16406 v.AddArg(v1) 16407 return true 16408 } 16409 } 16410 func rewriteValueAMD64_OpLsh8x8(v *Value, config *Config) bool { 16411 b := v.Block 16412 _ = b 16413 // match: (Lsh8x8 <t> x y) 16414 // cond: 16415 // result: (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPBconst y [32]))) 16416 for { 16417 t := v.Type 16418 x := v.Args[0] 16419 y := v.Args[1] 16420 v.reset(OpAMD64ANDL) 16421 v0 := b.NewValue0(v.Line, OpAMD64SHLL, t) 16422 v0.AddArg(x) 16423 v0.AddArg(y) 16424 v.AddArg(v0) 16425 v1 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, t) 16426 v2 := b.NewValue0(v.Line, OpAMD64CMPBconst, TypeFlags) 16427 v2.AuxInt = 32 16428 v2.AddArg(y) 16429 v1.AddArg(v2) 16430 v.AddArg(v1) 16431 return true 16432 } 16433 } 16434 func rewriteValueAMD64_OpMod16(v *Value, config *Config) bool { 16435 b := v.Block 16436 _ = b 16437 // match: (Mod16 x y) 16438 // cond: 16439 // result: (Select1 (DIVW x y)) 16440 for { 16441 x := v.Args[0] 16442 y := v.Args[1] 16443 v.reset(OpSelect1) 16444 v0 := b.NewValue0(v.Line, OpAMD64DIVW, MakeTuple(config.fe.TypeInt16(), config.fe.TypeInt16())) 16445 v0.AddArg(x) 16446 v0.AddArg(y) 16447 v.AddArg(v0) 16448 return true 16449 } 16450 } 16451 func rewriteValueAMD64_OpMod16u(v *Value, config *Config) bool { 16452 b := v.Block 16453 _ = b 16454 // match: (Mod16u x y) 16455 // cond: 16456 // result: (Select1 (DIVWU x y)) 16457 for { 16458 x := v.Args[0] 16459 y := v.Args[1] 16460 v.reset(OpSelect1) 16461 v0 := b.NewValue0(v.Line, OpAMD64DIVWU, MakeTuple(config.fe.TypeUInt16(), config.fe.TypeUInt16())) 16462 v0.AddArg(x) 16463 v0.AddArg(y) 16464 v.AddArg(v0) 16465 return true 16466 } 16467 } 16468 func rewriteValueAMD64_OpMod32(v *Value, config *Config) bool { 16469 b := v.Block 16470 _ = b 16471 // match: (Mod32 x y) 16472 // cond: 16473 // result: (Select1 (DIVL x y)) 16474 for { 16475 x := v.Args[0] 16476 y := v.Args[1] 16477 v.reset(OpSelect1) 16478 v0 := b.NewValue0(v.Line, OpAMD64DIVL, MakeTuple(config.fe.TypeInt32(), config.fe.TypeInt32())) 16479 v0.AddArg(x) 16480 v0.AddArg(y) 16481 v.AddArg(v0) 16482 return true 16483 } 16484 } 16485 func rewriteValueAMD64_OpMod32u(v *Value, config *Config) bool { 16486 b := v.Block 16487 _ = b 16488 // match: (Mod32u x y) 16489 // cond: 16490 // result: (Select1 (DIVLU x y)) 16491 for { 16492 x := v.Args[0] 16493 y := v.Args[1] 16494 v.reset(OpSelect1) 16495 v0 := b.NewValue0(v.Line, OpAMD64DIVLU, MakeTuple(config.fe.TypeUInt32(), config.fe.TypeUInt32())) 16496 v0.AddArg(x) 16497 v0.AddArg(y) 16498 v.AddArg(v0) 16499 return true 16500 } 16501 } 16502 func rewriteValueAMD64_OpMod64(v *Value, config *Config) bool { 16503 b := v.Block 16504 _ = b 16505 // match: (Mod64 x y) 16506 // cond: 16507 // result: (Select1 (DIVQ x y)) 16508 for { 16509 x := v.Args[0] 16510 y := v.Args[1] 16511 v.reset(OpSelect1) 16512 v0 := b.NewValue0(v.Line, OpAMD64DIVQ, MakeTuple(config.fe.TypeInt64(), config.fe.TypeInt64())) 16513 v0.AddArg(x) 16514 v0.AddArg(y) 16515 v.AddArg(v0) 16516 return true 16517 } 16518 } 16519 func rewriteValueAMD64_OpMod64u(v *Value, config *Config) bool { 16520 b := v.Block 16521 _ = b 16522 // match: (Mod64u x y) 16523 // cond: 16524 // result: (Select1 (DIVQU x y)) 16525 for { 16526 x := v.Args[0] 16527 y := v.Args[1] 16528 v.reset(OpSelect1) 16529 v0 := b.NewValue0(v.Line, OpAMD64DIVQU, MakeTuple(config.fe.TypeUInt64(), config.fe.TypeUInt64())) 16530 v0.AddArg(x) 16531 v0.AddArg(y) 16532 v.AddArg(v0) 16533 return true 16534 } 16535 } 16536 func rewriteValueAMD64_OpMod8(v *Value, config *Config) bool { 16537 b := v.Block 16538 _ = b 16539 // match: (Mod8 x y) 16540 // cond: 16541 // result: (Select1 (DIVW (SignExt8to16 x) (SignExt8to16 y))) 16542 for { 16543 x := v.Args[0] 16544 y := v.Args[1] 16545 v.reset(OpSelect1) 16546 v0 := b.NewValue0(v.Line, OpAMD64DIVW, MakeTuple(config.fe.TypeInt16(), config.fe.TypeInt16())) 16547 v1 := b.NewValue0(v.Line, OpSignExt8to16, config.fe.TypeInt16()) 16548 v1.AddArg(x) 16549 v0.AddArg(v1) 16550 v2 := b.NewValue0(v.Line, OpSignExt8to16, config.fe.TypeInt16()) 16551 v2.AddArg(y) 16552 v0.AddArg(v2) 16553 v.AddArg(v0) 16554 return true 16555 } 16556 } 16557 func rewriteValueAMD64_OpMod8u(v *Value, config *Config) bool { 16558 b := v.Block 16559 _ = b 16560 // match: (Mod8u x y) 16561 // cond: 16562 // result: (Select1 (DIVWU (ZeroExt8to16 x) (ZeroExt8to16 y))) 16563 for { 16564 x := v.Args[0] 16565 y := v.Args[1] 16566 v.reset(OpSelect1) 16567 v0 := b.NewValue0(v.Line, OpAMD64DIVWU, MakeTuple(config.fe.TypeUInt16(), config.fe.TypeUInt16())) 16568 v1 := b.NewValue0(v.Line, OpZeroExt8to16, config.fe.TypeUInt16()) 16569 v1.AddArg(x) 16570 v0.AddArg(v1) 16571 v2 := b.NewValue0(v.Line, OpZeroExt8to16, config.fe.TypeUInt16()) 16572 v2.AddArg(y) 16573 v0.AddArg(v2) 16574 v.AddArg(v0) 16575 return true 16576 } 16577 } 16578 func rewriteValueAMD64_OpMove(v *Value, config *Config) bool { 16579 b := v.Block 16580 _ = b 16581 // match: (Move [s] _ _ mem) 16582 // cond: SizeAndAlign(s).Size() == 0 16583 // result: mem 16584 for { 16585 s := v.AuxInt 16586 mem := v.Args[2] 16587 if !(SizeAndAlign(s).Size() == 0) { 16588 break 16589 } 16590 v.reset(OpCopy) 16591 v.Type = mem.Type 16592 v.AddArg(mem) 16593 return true 16594 } 16595 // match: (Move [s] dst src mem) 16596 // cond: SizeAndAlign(s).Size() == 1 16597 // result: (MOVBstore dst (MOVBload src mem) mem) 16598 for { 16599 s := v.AuxInt 16600 dst := v.Args[0] 16601 src := v.Args[1] 16602 mem := v.Args[2] 16603 if !(SizeAndAlign(s).Size() == 1) { 16604 break 16605 } 16606 v.reset(OpAMD64MOVBstore) 16607 v.AddArg(dst) 16608 v0 := b.NewValue0(v.Line, OpAMD64MOVBload, config.fe.TypeUInt8()) 16609 v0.AddArg(src) 16610 v0.AddArg(mem) 16611 v.AddArg(v0) 16612 v.AddArg(mem) 16613 return true 16614 } 16615 // match: (Move [s] dst src mem) 16616 // cond: SizeAndAlign(s).Size() == 2 16617 // result: (MOVWstore dst (MOVWload src mem) mem) 16618 for { 16619 s := v.AuxInt 16620 dst := v.Args[0] 16621 src := v.Args[1] 16622 mem := v.Args[2] 16623 if !(SizeAndAlign(s).Size() == 2) { 16624 break 16625 } 16626 v.reset(OpAMD64MOVWstore) 16627 v.AddArg(dst) 16628 v0 := b.NewValue0(v.Line, OpAMD64MOVWload, config.fe.TypeUInt16()) 16629 v0.AddArg(src) 16630 v0.AddArg(mem) 16631 v.AddArg(v0) 16632 v.AddArg(mem) 16633 return true 16634 } 16635 // match: (Move [s] dst src mem) 16636 // cond: SizeAndAlign(s).Size() == 4 16637 // result: (MOVLstore dst (MOVLload src mem) mem) 16638 for { 16639 s := v.AuxInt 16640 dst := v.Args[0] 16641 src := v.Args[1] 16642 mem := v.Args[2] 16643 if !(SizeAndAlign(s).Size() == 4) { 16644 break 16645 } 16646 v.reset(OpAMD64MOVLstore) 16647 v.AddArg(dst) 16648 v0 := b.NewValue0(v.Line, OpAMD64MOVLload, config.fe.TypeUInt32()) 16649 v0.AddArg(src) 16650 v0.AddArg(mem) 16651 v.AddArg(v0) 16652 v.AddArg(mem) 16653 return true 16654 } 16655 // match: (Move [s] dst src mem) 16656 // cond: SizeAndAlign(s).Size() == 8 16657 // result: (MOVQstore dst (MOVQload src mem) mem) 16658 for { 16659 s := v.AuxInt 16660 dst := v.Args[0] 16661 src := v.Args[1] 16662 mem := v.Args[2] 16663 if !(SizeAndAlign(s).Size() == 8) { 16664 break 16665 } 16666 v.reset(OpAMD64MOVQstore) 16667 v.AddArg(dst) 16668 v0 := b.NewValue0(v.Line, OpAMD64MOVQload, config.fe.TypeUInt64()) 16669 v0.AddArg(src) 16670 v0.AddArg(mem) 16671 v.AddArg(v0) 16672 v.AddArg(mem) 16673 return true 16674 } 16675 // match: (Move [s] dst src mem) 16676 // cond: SizeAndAlign(s).Size() == 16 16677 // result: (MOVOstore dst (MOVOload src mem) mem) 16678 for { 16679 s := v.AuxInt 16680 dst := v.Args[0] 16681 src := v.Args[1] 16682 mem := v.Args[2] 16683 if !(SizeAndAlign(s).Size() == 16) { 16684 break 16685 } 16686 v.reset(OpAMD64MOVOstore) 16687 v.AddArg(dst) 16688 v0 := b.NewValue0(v.Line, OpAMD64MOVOload, TypeInt128) 16689 v0.AddArg(src) 16690 v0.AddArg(mem) 16691 v.AddArg(v0) 16692 v.AddArg(mem) 16693 return true 16694 } 16695 // match: (Move [s] dst src mem) 16696 // cond: SizeAndAlign(s).Size() == 3 16697 // result: (MOVBstore [2] dst (MOVBload [2] src mem) (MOVWstore dst (MOVWload src mem) mem)) 16698 for { 16699 s := v.AuxInt 16700 dst := v.Args[0] 16701 src := v.Args[1] 16702 mem := v.Args[2] 16703 if !(SizeAndAlign(s).Size() == 3) { 16704 break 16705 } 16706 v.reset(OpAMD64MOVBstore) 16707 v.AuxInt = 2 16708 v.AddArg(dst) 16709 v0 := b.NewValue0(v.Line, OpAMD64MOVBload, config.fe.TypeUInt8()) 16710 v0.AuxInt = 2 16711 v0.AddArg(src) 16712 v0.AddArg(mem) 16713 v.AddArg(v0) 16714 v1 := b.NewValue0(v.Line, OpAMD64MOVWstore, TypeMem) 16715 v1.AddArg(dst) 16716 v2 := b.NewValue0(v.Line, OpAMD64MOVWload, config.fe.TypeUInt16()) 16717 v2.AddArg(src) 16718 v2.AddArg(mem) 16719 v1.AddArg(v2) 16720 v1.AddArg(mem) 16721 v.AddArg(v1) 16722 return true 16723 } 16724 // match: (Move [s] dst src mem) 16725 // cond: SizeAndAlign(s).Size() == 5 16726 // result: (MOVBstore [4] dst (MOVBload [4] src mem) (MOVLstore dst (MOVLload src mem) mem)) 16727 for { 16728 s := v.AuxInt 16729 dst := v.Args[0] 16730 src := v.Args[1] 16731 mem := v.Args[2] 16732 if !(SizeAndAlign(s).Size() == 5) { 16733 break 16734 } 16735 v.reset(OpAMD64MOVBstore) 16736 v.AuxInt = 4 16737 v.AddArg(dst) 16738 v0 := b.NewValue0(v.Line, OpAMD64MOVBload, config.fe.TypeUInt8()) 16739 v0.AuxInt = 4 16740 v0.AddArg(src) 16741 v0.AddArg(mem) 16742 v.AddArg(v0) 16743 v1 := b.NewValue0(v.Line, OpAMD64MOVLstore, TypeMem) 16744 v1.AddArg(dst) 16745 v2 := b.NewValue0(v.Line, OpAMD64MOVLload, config.fe.TypeUInt32()) 16746 v2.AddArg(src) 16747 v2.AddArg(mem) 16748 v1.AddArg(v2) 16749 v1.AddArg(mem) 16750 v.AddArg(v1) 16751 return true 16752 } 16753 // match: (Move [s] dst src mem) 16754 // cond: SizeAndAlign(s).Size() == 6 16755 // result: (MOVWstore [4] dst (MOVWload [4] src mem) (MOVLstore dst (MOVLload src mem) mem)) 16756 for { 16757 s := v.AuxInt 16758 dst := v.Args[0] 16759 src := v.Args[1] 16760 mem := v.Args[2] 16761 if !(SizeAndAlign(s).Size() == 6) { 16762 break 16763 } 16764 v.reset(OpAMD64MOVWstore) 16765 v.AuxInt = 4 16766 v.AddArg(dst) 16767 v0 := b.NewValue0(v.Line, OpAMD64MOVWload, config.fe.TypeUInt16()) 16768 v0.AuxInt = 4 16769 v0.AddArg(src) 16770 v0.AddArg(mem) 16771 v.AddArg(v0) 16772 v1 := b.NewValue0(v.Line, OpAMD64MOVLstore, TypeMem) 16773 v1.AddArg(dst) 16774 v2 := b.NewValue0(v.Line, OpAMD64MOVLload, config.fe.TypeUInt32()) 16775 v2.AddArg(src) 16776 v2.AddArg(mem) 16777 v1.AddArg(v2) 16778 v1.AddArg(mem) 16779 v.AddArg(v1) 16780 return true 16781 } 16782 // match: (Move [s] dst src mem) 16783 // cond: SizeAndAlign(s).Size() == 7 16784 // result: (MOVLstore [3] dst (MOVLload [3] src mem) (MOVLstore dst (MOVLload src mem) mem)) 16785 for { 16786 s := v.AuxInt 16787 dst := v.Args[0] 16788 src := v.Args[1] 16789 mem := v.Args[2] 16790 if !(SizeAndAlign(s).Size() == 7) { 16791 break 16792 } 16793 v.reset(OpAMD64MOVLstore) 16794 v.AuxInt = 3 16795 v.AddArg(dst) 16796 v0 := b.NewValue0(v.Line, OpAMD64MOVLload, config.fe.TypeUInt32()) 16797 v0.AuxInt = 3 16798 v0.AddArg(src) 16799 v0.AddArg(mem) 16800 v.AddArg(v0) 16801 v1 := b.NewValue0(v.Line, OpAMD64MOVLstore, TypeMem) 16802 v1.AddArg(dst) 16803 v2 := b.NewValue0(v.Line, OpAMD64MOVLload, config.fe.TypeUInt32()) 16804 v2.AddArg(src) 16805 v2.AddArg(mem) 16806 v1.AddArg(v2) 16807 v1.AddArg(mem) 16808 v.AddArg(v1) 16809 return true 16810 } 16811 // match: (Move [s] dst src mem) 16812 // cond: SizeAndAlign(s).Size() > 8 && SizeAndAlign(s).Size() < 16 16813 // result: (MOVQstore [SizeAndAlign(s).Size()-8] dst (MOVQload [SizeAndAlign(s).Size()-8] src mem) (MOVQstore dst (MOVQload src mem) mem)) 16814 for { 16815 s := v.AuxInt 16816 dst := v.Args[0] 16817 src := v.Args[1] 16818 mem := v.Args[2] 16819 if !(SizeAndAlign(s).Size() > 8 && SizeAndAlign(s).Size() < 16) { 16820 break 16821 } 16822 v.reset(OpAMD64MOVQstore) 16823 v.AuxInt = SizeAndAlign(s).Size() - 8 16824 v.AddArg(dst) 16825 v0 := b.NewValue0(v.Line, OpAMD64MOVQload, config.fe.TypeUInt64()) 16826 v0.AuxInt = SizeAndAlign(s).Size() - 8 16827 v0.AddArg(src) 16828 v0.AddArg(mem) 16829 v.AddArg(v0) 16830 v1 := b.NewValue0(v.Line, OpAMD64MOVQstore, TypeMem) 16831 v1.AddArg(dst) 16832 v2 := b.NewValue0(v.Line, OpAMD64MOVQload, config.fe.TypeUInt64()) 16833 v2.AddArg(src) 16834 v2.AddArg(mem) 16835 v1.AddArg(v2) 16836 v1.AddArg(mem) 16837 v.AddArg(v1) 16838 return true 16839 } 16840 // match: (Move [s] dst src mem) 16841 // cond: SizeAndAlign(s).Size() > 16 && SizeAndAlign(s).Size()%16 != 0 && SizeAndAlign(s).Size()%16 <= 8 16842 // result: (Move [SizeAndAlign(s).Size()-SizeAndAlign(s).Size()%16] (OffPtr <dst.Type> dst [SizeAndAlign(s).Size()%16]) (OffPtr <src.Type> src [SizeAndAlign(s).Size()%16]) (MOVQstore dst (MOVQload src mem) mem)) 16843 for { 16844 s := v.AuxInt 16845 dst := v.Args[0] 16846 src := v.Args[1] 16847 mem := v.Args[2] 16848 if !(SizeAndAlign(s).Size() > 16 && SizeAndAlign(s).Size()%16 != 0 && SizeAndAlign(s).Size()%16 <= 8) { 16849 break 16850 } 16851 v.reset(OpMove) 16852 v.AuxInt = SizeAndAlign(s).Size() - SizeAndAlign(s).Size()%16 16853 v0 := b.NewValue0(v.Line, OpOffPtr, dst.Type) 16854 v0.AuxInt = SizeAndAlign(s).Size() % 16 16855 v0.AddArg(dst) 16856 v.AddArg(v0) 16857 v1 := b.NewValue0(v.Line, OpOffPtr, src.Type) 16858 v1.AuxInt = SizeAndAlign(s).Size() % 16 16859 v1.AddArg(src) 16860 v.AddArg(v1) 16861 v2 := b.NewValue0(v.Line, OpAMD64MOVQstore, TypeMem) 16862 v2.AddArg(dst) 16863 v3 := b.NewValue0(v.Line, OpAMD64MOVQload, config.fe.TypeUInt64()) 16864 v3.AddArg(src) 16865 v3.AddArg(mem) 16866 v2.AddArg(v3) 16867 v2.AddArg(mem) 16868 v.AddArg(v2) 16869 return true 16870 } 16871 // match: (Move [s] dst src mem) 16872 // cond: SizeAndAlign(s).Size() > 16 && SizeAndAlign(s).Size()%16 != 0 && SizeAndAlign(s).Size()%16 > 8 16873 // result: (Move [SizeAndAlign(s).Size()-SizeAndAlign(s).Size()%16] (OffPtr <dst.Type> dst [SizeAndAlign(s).Size()%16]) (OffPtr <src.Type> src [SizeAndAlign(s).Size()%16]) (MOVOstore dst (MOVOload src mem) mem)) 16874 for { 16875 s := v.AuxInt 16876 dst := v.Args[0] 16877 src := v.Args[1] 16878 mem := v.Args[2] 16879 if !(SizeAndAlign(s).Size() > 16 && SizeAndAlign(s).Size()%16 != 0 && SizeAndAlign(s).Size()%16 > 8) { 16880 break 16881 } 16882 v.reset(OpMove) 16883 v.AuxInt = SizeAndAlign(s).Size() - SizeAndAlign(s).Size()%16 16884 v0 := b.NewValue0(v.Line, OpOffPtr, dst.Type) 16885 v0.AuxInt = SizeAndAlign(s).Size() % 16 16886 v0.AddArg(dst) 16887 v.AddArg(v0) 16888 v1 := b.NewValue0(v.Line, OpOffPtr, src.Type) 16889 v1.AuxInt = SizeAndAlign(s).Size() % 16 16890 v1.AddArg(src) 16891 v.AddArg(v1) 16892 v2 := b.NewValue0(v.Line, OpAMD64MOVOstore, TypeMem) 16893 v2.AddArg(dst) 16894 v3 := b.NewValue0(v.Line, OpAMD64MOVOload, TypeInt128) 16895 v3.AddArg(src) 16896 v3.AddArg(mem) 16897 v2.AddArg(v3) 16898 v2.AddArg(mem) 16899 v.AddArg(v2) 16900 return true 16901 } 16902 // match: (Move [s] dst src mem) 16903 // cond: SizeAndAlign(s).Size() >= 32 && SizeAndAlign(s).Size() <= 16*64 && SizeAndAlign(s).Size()%16 == 0 && !config.noDuffDevice 16904 // result: (DUFFCOPY [14*(64-SizeAndAlign(s).Size()/16)] dst src mem) 16905 for { 16906 s := v.AuxInt 16907 dst := v.Args[0] 16908 src := v.Args[1] 16909 mem := v.Args[2] 16910 if !(SizeAndAlign(s).Size() >= 32 && SizeAndAlign(s).Size() <= 16*64 && SizeAndAlign(s).Size()%16 == 0 && !config.noDuffDevice) { 16911 break 16912 } 16913 v.reset(OpAMD64DUFFCOPY) 16914 v.AuxInt = 14 * (64 - SizeAndAlign(s).Size()/16) 16915 v.AddArg(dst) 16916 v.AddArg(src) 16917 v.AddArg(mem) 16918 return true 16919 } 16920 // match: (Move [s] dst src mem) 16921 // cond: (SizeAndAlign(s).Size() > 16*64 || config.noDuffDevice) && SizeAndAlign(s).Size()%8 == 0 16922 // result: (REPMOVSQ dst src (MOVQconst [SizeAndAlign(s).Size()/8]) mem) 16923 for { 16924 s := v.AuxInt 16925 dst := v.Args[0] 16926 src := v.Args[1] 16927 mem := v.Args[2] 16928 if !((SizeAndAlign(s).Size() > 16*64 || config.noDuffDevice) && SizeAndAlign(s).Size()%8 == 0) { 16929 break 16930 } 16931 v.reset(OpAMD64REPMOVSQ) 16932 v.AddArg(dst) 16933 v.AddArg(src) 16934 v0 := b.NewValue0(v.Line, OpAMD64MOVQconst, config.fe.TypeUInt64()) 16935 v0.AuxInt = SizeAndAlign(s).Size() / 8 16936 v.AddArg(v0) 16937 v.AddArg(mem) 16938 return true 16939 } 16940 return false 16941 } 16942 func rewriteValueAMD64_OpMul16(v *Value, config *Config) bool { 16943 b := v.Block 16944 _ = b 16945 // match: (Mul16 x y) 16946 // cond: 16947 // result: (MULL x y) 16948 for { 16949 x := v.Args[0] 16950 y := v.Args[1] 16951 v.reset(OpAMD64MULL) 16952 v.AddArg(x) 16953 v.AddArg(y) 16954 return true 16955 } 16956 } 16957 func rewriteValueAMD64_OpMul32(v *Value, config *Config) bool { 16958 b := v.Block 16959 _ = b 16960 // match: (Mul32 x y) 16961 // cond: 16962 // result: (MULL x y) 16963 for { 16964 x := v.Args[0] 16965 y := v.Args[1] 16966 v.reset(OpAMD64MULL) 16967 v.AddArg(x) 16968 v.AddArg(y) 16969 return true 16970 } 16971 } 16972 func rewriteValueAMD64_OpMul32F(v *Value, config *Config) bool { 16973 b := v.Block 16974 _ = b 16975 // match: (Mul32F x y) 16976 // cond: 16977 // result: (MULSS x y) 16978 for { 16979 x := v.Args[0] 16980 y := v.Args[1] 16981 v.reset(OpAMD64MULSS) 16982 v.AddArg(x) 16983 v.AddArg(y) 16984 return true 16985 } 16986 } 16987 func rewriteValueAMD64_OpMul64(v *Value, config *Config) bool { 16988 b := v.Block 16989 _ = b 16990 // match: (Mul64 x y) 16991 // cond: 16992 // result: (MULQ x y) 16993 for { 16994 x := v.Args[0] 16995 y := v.Args[1] 16996 v.reset(OpAMD64MULQ) 16997 v.AddArg(x) 16998 v.AddArg(y) 16999 return true 17000 } 17001 } 17002 func rewriteValueAMD64_OpMul64F(v *Value, config *Config) bool { 17003 b := v.Block 17004 _ = b 17005 // match: (Mul64F x y) 17006 // cond: 17007 // result: (MULSD x y) 17008 for { 17009 x := v.Args[0] 17010 y := v.Args[1] 17011 v.reset(OpAMD64MULSD) 17012 v.AddArg(x) 17013 v.AddArg(y) 17014 return true 17015 } 17016 } 17017 func rewriteValueAMD64_OpMul64uhilo(v *Value, config *Config) bool { 17018 b := v.Block 17019 _ = b 17020 // match: (Mul64uhilo x y) 17021 // cond: 17022 // result: (MULQU2 x y) 17023 for { 17024 x := v.Args[0] 17025 y := v.Args[1] 17026 v.reset(OpAMD64MULQU2) 17027 v.AddArg(x) 17028 v.AddArg(y) 17029 return true 17030 } 17031 } 17032 func rewriteValueAMD64_OpMul8(v *Value, config *Config) bool { 17033 b := v.Block 17034 _ = b 17035 // match: (Mul8 x y) 17036 // cond: 17037 // result: (MULL x y) 17038 for { 17039 x := v.Args[0] 17040 y := v.Args[1] 17041 v.reset(OpAMD64MULL) 17042 v.AddArg(x) 17043 v.AddArg(y) 17044 return true 17045 } 17046 } 17047 func rewriteValueAMD64_OpNeg16(v *Value, config *Config) bool { 17048 b := v.Block 17049 _ = b 17050 // match: (Neg16 x) 17051 // cond: 17052 // result: (NEGL x) 17053 for { 17054 x := v.Args[0] 17055 v.reset(OpAMD64NEGL) 17056 v.AddArg(x) 17057 return true 17058 } 17059 } 17060 func rewriteValueAMD64_OpNeg32(v *Value, config *Config) bool { 17061 b := v.Block 17062 _ = b 17063 // match: (Neg32 x) 17064 // cond: 17065 // result: (NEGL x) 17066 for { 17067 x := v.Args[0] 17068 v.reset(OpAMD64NEGL) 17069 v.AddArg(x) 17070 return true 17071 } 17072 } 17073 func rewriteValueAMD64_OpNeg32F(v *Value, config *Config) bool { 17074 b := v.Block 17075 _ = b 17076 // match: (Neg32F x) 17077 // cond: 17078 // result: (PXOR x (MOVSSconst <config.Frontend().TypeFloat32()> [f2i(math.Copysign(0, -1))])) 17079 for { 17080 x := v.Args[0] 17081 v.reset(OpAMD64PXOR) 17082 v.AddArg(x) 17083 v0 := b.NewValue0(v.Line, OpAMD64MOVSSconst, config.Frontend().TypeFloat32()) 17084 v0.AuxInt = f2i(math.Copysign(0, -1)) 17085 v.AddArg(v0) 17086 return true 17087 } 17088 } 17089 func rewriteValueAMD64_OpNeg64(v *Value, config *Config) bool { 17090 b := v.Block 17091 _ = b 17092 // match: (Neg64 x) 17093 // cond: 17094 // result: (NEGQ x) 17095 for { 17096 x := v.Args[0] 17097 v.reset(OpAMD64NEGQ) 17098 v.AddArg(x) 17099 return true 17100 } 17101 } 17102 func rewriteValueAMD64_OpNeg64F(v *Value, config *Config) bool { 17103 b := v.Block 17104 _ = b 17105 // match: (Neg64F x) 17106 // cond: 17107 // result: (PXOR x (MOVSDconst <config.Frontend().TypeFloat64()> [f2i(math.Copysign(0, -1))])) 17108 for { 17109 x := v.Args[0] 17110 v.reset(OpAMD64PXOR) 17111 v.AddArg(x) 17112 v0 := b.NewValue0(v.Line, OpAMD64MOVSDconst, config.Frontend().TypeFloat64()) 17113 v0.AuxInt = f2i(math.Copysign(0, -1)) 17114 v.AddArg(v0) 17115 return true 17116 } 17117 } 17118 func rewriteValueAMD64_OpNeg8(v *Value, config *Config) bool { 17119 b := v.Block 17120 _ = b 17121 // match: (Neg8 x) 17122 // cond: 17123 // result: (NEGL x) 17124 for { 17125 x := v.Args[0] 17126 v.reset(OpAMD64NEGL) 17127 v.AddArg(x) 17128 return true 17129 } 17130 } 17131 func rewriteValueAMD64_OpNeq16(v *Value, config *Config) bool { 17132 b := v.Block 17133 _ = b 17134 // match: (Neq16 x y) 17135 // cond: 17136 // result: (SETNE (CMPW x y)) 17137 for { 17138 x := v.Args[0] 17139 y := v.Args[1] 17140 v.reset(OpAMD64SETNE) 17141 v0 := b.NewValue0(v.Line, OpAMD64CMPW, TypeFlags) 17142 v0.AddArg(x) 17143 v0.AddArg(y) 17144 v.AddArg(v0) 17145 return true 17146 } 17147 } 17148 func rewriteValueAMD64_OpNeq32(v *Value, config *Config) bool { 17149 b := v.Block 17150 _ = b 17151 // match: (Neq32 x y) 17152 // cond: 17153 // result: (SETNE (CMPL x y)) 17154 for { 17155 x := v.Args[0] 17156 y := v.Args[1] 17157 v.reset(OpAMD64SETNE) 17158 v0 := b.NewValue0(v.Line, OpAMD64CMPL, TypeFlags) 17159 v0.AddArg(x) 17160 v0.AddArg(y) 17161 v.AddArg(v0) 17162 return true 17163 } 17164 } 17165 func rewriteValueAMD64_OpNeq32F(v *Value, config *Config) bool { 17166 b := v.Block 17167 _ = b 17168 // match: (Neq32F x y) 17169 // cond: 17170 // result: (SETNEF (UCOMISS x y)) 17171 for { 17172 x := v.Args[0] 17173 y := v.Args[1] 17174 v.reset(OpAMD64SETNEF) 17175 v0 := b.NewValue0(v.Line, OpAMD64UCOMISS, TypeFlags) 17176 v0.AddArg(x) 17177 v0.AddArg(y) 17178 v.AddArg(v0) 17179 return true 17180 } 17181 } 17182 func rewriteValueAMD64_OpNeq64(v *Value, config *Config) bool { 17183 b := v.Block 17184 _ = b 17185 // match: (Neq64 x y) 17186 // cond: 17187 // result: (SETNE (CMPQ x y)) 17188 for { 17189 x := v.Args[0] 17190 y := v.Args[1] 17191 v.reset(OpAMD64SETNE) 17192 v0 := b.NewValue0(v.Line, OpAMD64CMPQ, TypeFlags) 17193 v0.AddArg(x) 17194 v0.AddArg(y) 17195 v.AddArg(v0) 17196 return true 17197 } 17198 } 17199 func rewriteValueAMD64_OpNeq64F(v *Value, config *Config) bool { 17200 b := v.Block 17201 _ = b 17202 // match: (Neq64F x y) 17203 // cond: 17204 // result: (SETNEF (UCOMISD x y)) 17205 for { 17206 x := v.Args[0] 17207 y := v.Args[1] 17208 v.reset(OpAMD64SETNEF) 17209 v0 := b.NewValue0(v.Line, OpAMD64UCOMISD, TypeFlags) 17210 v0.AddArg(x) 17211 v0.AddArg(y) 17212 v.AddArg(v0) 17213 return true 17214 } 17215 } 17216 func rewriteValueAMD64_OpNeq8(v *Value, config *Config) bool { 17217 b := v.Block 17218 _ = b 17219 // match: (Neq8 x y) 17220 // cond: 17221 // result: (SETNE (CMPB x y)) 17222 for { 17223 x := v.Args[0] 17224 y := v.Args[1] 17225 v.reset(OpAMD64SETNE) 17226 v0 := b.NewValue0(v.Line, OpAMD64CMPB, TypeFlags) 17227 v0.AddArg(x) 17228 v0.AddArg(y) 17229 v.AddArg(v0) 17230 return true 17231 } 17232 } 17233 func rewriteValueAMD64_OpNeqB(v *Value, config *Config) bool { 17234 b := v.Block 17235 _ = b 17236 // match: (NeqB x y) 17237 // cond: 17238 // result: (SETNE (CMPB x y)) 17239 for { 17240 x := v.Args[0] 17241 y := v.Args[1] 17242 v.reset(OpAMD64SETNE) 17243 v0 := b.NewValue0(v.Line, OpAMD64CMPB, TypeFlags) 17244 v0.AddArg(x) 17245 v0.AddArg(y) 17246 v.AddArg(v0) 17247 return true 17248 } 17249 } 17250 func rewriteValueAMD64_OpNeqPtr(v *Value, config *Config) bool { 17251 b := v.Block 17252 _ = b 17253 // match: (NeqPtr x y) 17254 // cond: config.PtrSize == 8 17255 // result: (SETNE (CMPQ x y)) 17256 for { 17257 x := v.Args[0] 17258 y := v.Args[1] 17259 if !(config.PtrSize == 8) { 17260 break 17261 } 17262 v.reset(OpAMD64SETNE) 17263 v0 := b.NewValue0(v.Line, OpAMD64CMPQ, TypeFlags) 17264 v0.AddArg(x) 17265 v0.AddArg(y) 17266 v.AddArg(v0) 17267 return true 17268 } 17269 // match: (NeqPtr x y) 17270 // cond: config.PtrSize == 4 17271 // result: (SETNE (CMPL x y)) 17272 for { 17273 x := v.Args[0] 17274 y := v.Args[1] 17275 if !(config.PtrSize == 4) { 17276 break 17277 } 17278 v.reset(OpAMD64SETNE) 17279 v0 := b.NewValue0(v.Line, OpAMD64CMPL, TypeFlags) 17280 v0.AddArg(x) 17281 v0.AddArg(y) 17282 v.AddArg(v0) 17283 return true 17284 } 17285 return false 17286 } 17287 func rewriteValueAMD64_OpNilCheck(v *Value, config *Config) bool { 17288 b := v.Block 17289 _ = b 17290 // match: (NilCheck ptr mem) 17291 // cond: 17292 // result: (LoweredNilCheck ptr mem) 17293 for { 17294 ptr := v.Args[0] 17295 mem := v.Args[1] 17296 v.reset(OpAMD64LoweredNilCheck) 17297 v.AddArg(ptr) 17298 v.AddArg(mem) 17299 return true 17300 } 17301 } 17302 func rewriteValueAMD64_OpNot(v *Value, config *Config) bool { 17303 b := v.Block 17304 _ = b 17305 // match: (Not x) 17306 // cond: 17307 // result: (XORLconst [1] x) 17308 for { 17309 x := v.Args[0] 17310 v.reset(OpAMD64XORLconst) 17311 v.AuxInt = 1 17312 v.AddArg(x) 17313 return true 17314 } 17315 } 17316 func rewriteValueAMD64_OpOffPtr(v *Value, config *Config) bool { 17317 b := v.Block 17318 _ = b 17319 // match: (OffPtr [off] ptr) 17320 // cond: config.PtrSize == 8 && is32Bit(off) 17321 // result: (ADDQconst [off] ptr) 17322 for { 17323 off := v.AuxInt 17324 ptr := v.Args[0] 17325 if !(config.PtrSize == 8 && is32Bit(off)) { 17326 break 17327 } 17328 v.reset(OpAMD64ADDQconst) 17329 v.AuxInt = off 17330 v.AddArg(ptr) 17331 return true 17332 } 17333 // match: (OffPtr [off] ptr) 17334 // cond: config.PtrSize == 8 17335 // result: (ADDQ (MOVQconst [off]) ptr) 17336 for { 17337 off := v.AuxInt 17338 ptr := v.Args[0] 17339 if !(config.PtrSize == 8) { 17340 break 17341 } 17342 v.reset(OpAMD64ADDQ) 17343 v0 := b.NewValue0(v.Line, OpAMD64MOVQconst, config.fe.TypeUInt64()) 17344 v0.AuxInt = off 17345 v.AddArg(v0) 17346 v.AddArg(ptr) 17347 return true 17348 } 17349 // match: (OffPtr [off] ptr) 17350 // cond: config.PtrSize == 4 17351 // result: (ADDLconst [off] ptr) 17352 for { 17353 off := v.AuxInt 17354 ptr := v.Args[0] 17355 if !(config.PtrSize == 4) { 17356 break 17357 } 17358 v.reset(OpAMD64ADDLconst) 17359 v.AuxInt = off 17360 v.AddArg(ptr) 17361 return true 17362 } 17363 return false 17364 } 17365 func rewriteValueAMD64_OpOr16(v *Value, config *Config) bool { 17366 b := v.Block 17367 _ = b 17368 // match: (Or16 x y) 17369 // cond: 17370 // result: (ORL x y) 17371 for { 17372 x := v.Args[0] 17373 y := v.Args[1] 17374 v.reset(OpAMD64ORL) 17375 v.AddArg(x) 17376 v.AddArg(y) 17377 return true 17378 } 17379 } 17380 func rewriteValueAMD64_OpOr32(v *Value, config *Config) bool { 17381 b := v.Block 17382 _ = b 17383 // match: (Or32 x y) 17384 // cond: 17385 // result: (ORL x y) 17386 for { 17387 x := v.Args[0] 17388 y := v.Args[1] 17389 v.reset(OpAMD64ORL) 17390 v.AddArg(x) 17391 v.AddArg(y) 17392 return true 17393 } 17394 } 17395 func rewriteValueAMD64_OpOr64(v *Value, config *Config) bool { 17396 b := v.Block 17397 _ = b 17398 // match: (Or64 x y) 17399 // cond: 17400 // result: (ORQ x y) 17401 for { 17402 x := v.Args[0] 17403 y := v.Args[1] 17404 v.reset(OpAMD64ORQ) 17405 v.AddArg(x) 17406 v.AddArg(y) 17407 return true 17408 } 17409 } 17410 func rewriteValueAMD64_OpOr8(v *Value, config *Config) bool { 17411 b := v.Block 17412 _ = b 17413 // match: (Or8 x y) 17414 // cond: 17415 // result: (ORL x y) 17416 for { 17417 x := v.Args[0] 17418 y := v.Args[1] 17419 v.reset(OpAMD64ORL) 17420 v.AddArg(x) 17421 v.AddArg(y) 17422 return true 17423 } 17424 } 17425 func rewriteValueAMD64_OpOrB(v *Value, config *Config) bool { 17426 b := v.Block 17427 _ = b 17428 // match: (OrB x y) 17429 // cond: 17430 // result: (ORL x y) 17431 for { 17432 x := v.Args[0] 17433 y := v.Args[1] 17434 v.reset(OpAMD64ORL) 17435 v.AddArg(x) 17436 v.AddArg(y) 17437 return true 17438 } 17439 } 17440 func rewriteValueAMD64_OpRsh16Ux16(v *Value, config *Config) bool { 17441 b := v.Block 17442 _ = b 17443 // match: (Rsh16Ux16 <t> x y) 17444 // cond: 17445 // result: (ANDL (SHRW <t> x y) (SBBLcarrymask <t> (CMPWconst y [16]))) 17446 for { 17447 t := v.Type 17448 x := v.Args[0] 17449 y := v.Args[1] 17450 v.reset(OpAMD64ANDL) 17451 v0 := b.NewValue0(v.Line, OpAMD64SHRW, t) 17452 v0.AddArg(x) 17453 v0.AddArg(y) 17454 v.AddArg(v0) 17455 v1 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, t) 17456 v2 := b.NewValue0(v.Line, OpAMD64CMPWconst, TypeFlags) 17457 v2.AuxInt = 16 17458 v2.AddArg(y) 17459 v1.AddArg(v2) 17460 v.AddArg(v1) 17461 return true 17462 } 17463 } 17464 func rewriteValueAMD64_OpRsh16Ux32(v *Value, config *Config) bool { 17465 b := v.Block 17466 _ = b 17467 // match: (Rsh16Ux32 <t> x y) 17468 // cond: 17469 // result: (ANDL (SHRW <t> x y) (SBBLcarrymask <t> (CMPLconst y [16]))) 17470 for { 17471 t := v.Type 17472 x := v.Args[0] 17473 y := v.Args[1] 17474 v.reset(OpAMD64ANDL) 17475 v0 := b.NewValue0(v.Line, OpAMD64SHRW, t) 17476 v0.AddArg(x) 17477 v0.AddArg(y) 17478 v.AddArg(v0) 17479 v1 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, t) 17480 v2 := b.NewValue0(v.Line, OpAMD64CMPLconst, TypeFlags) 17481 v2.AuxInt = 16 17482 v2.AddArg(y) 17483 v1.AddArg(v2) 17484 v.AddArg(v1) 17485 return true 17486 } 17487 } 17488 func rewriteValueAMD64_OpRsh16Ux64(v *Value, config *Config) bool { 17489 b := v.Block 17490 _ = b 17491 // match: (Rsh16Ux64 <t> x y) 17492 // cond: 17493 // result: (ANDL (SHRW <t> x y) (SBBLcarrymask <t> (CMPQconst y [16]))) 17494 for { 17495 t := v.Type 17496 x := v.Args[0] 17497 y := v.Args[1] 17498 v.reset(OpAMD64ANDL) 17499 v0 := b.NewValue0(v.Line, OpAMD64SHRW, t) 17500 v0.AddArg(x) 17501 v0.AddArg(y) 17502 v.AddArg(v0) 17503 v1 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, t) 17504 v2 := b.NewValue0(v.Line, OpAMD64CMPQconst, TypeFlags) 17505 v2.AuxInt = 16 17506 v2.AddArg(y) 17507 v1.AddArg(v2) 17508 v.AddArg(v1) 17509 return true 17510 } 17511 } 17512 func rewriteValueAMD64_OpRsh16Ux8(v *Value, config *Config) bool { 17513 b := v.Block 17514 _ = b 17515 // match: (Rsh16Ux8 <t> x y) 17516 // cond: 17517 // result: (ANDL (SHRW <t> x y) (SBBLcarrymask <t> (CMPBconst y [16]))) 17518 for { 17519 t := v.Type 17520 x := v.Args[0] 17521 y := v.Args[1] 17522 v.reset(OpAMD64ANDL) 17523 v0 := b.NewValue0(v.Line, OpAMD64SHRW, t) 17524 v0.AddArg(x) 17525 v0.AddArg(y) 17526 v.AddArg(v0) 17527 v1 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, t) 17528 v2 := b.NewValue0(v.Line, OpAMD64CMPBconst, TypeFlags) 17529 v2.AuxInt = 16 17530 v2.AddArg(y) 17531 v1.AddArg(v2) 17532 v.AddArg(v1) 17533 return true 17534 } 17535 } 17536 func rewriteValueAMD64_OpRsh16x16(v *Value, config *Config) bool { 17537 b := v.Block 17538 _ = b 17539 // match: (Rsh16x16 <t> x y) 17540 // cond: 17541 // result: (SARW <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPWconst y [16]))))) 17542 for { 17543 t := v.Type 17544 x := v.Args[0] 17545 y := v.Args[1] 17546 v.reset(OpAMD64SARW) 17547 v.Type = t 17548 v.AddArg(x) 17549 v0 := b.NewValue0(v.Line, OpAMD64ORL, y.Type) 17550 v0.AddArg(y) 17551 v1 := b.NewValue0(v.Line, OpAMD64NOTL, y.Type) 17552 v2 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, y.Type) 17553 v3 := b.NewValue0(v.Line, OpAMD64CMPWconst, TypeFlags) 17554 v3.AuxInt = 16 17555 v3.AddArg(y) 17556 v2.AddArg(v3) 17557 v1.AddArg(v2) 17558 v0.AddArg(v1) 17559 v.AddArg(v0) 17560 return true 17561 } 17562 } 17563 func rewriteValueAMD64_OpRsh16x32(v *Value, config *Config) bool { 17564 b := v.Block 17565 _ = b 17566 // match: (Rsh16x32 <t> x y) 17567 // cond: 17568 // result: (SARW <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPLconst y [16]))))) 17569 for { 17570 t := v.Type 17571 x := v.Args[0] 17572 y := v.Args[1] 17573 v.reset(OpAMD64SARW) 17574 v.Type = t 17575 v.AddArg(x) 17576 v0 := b.NewValue0(v.Line, OpAMD64ORL, y.Type) 17577 v0.AddArg(y) 17578 v1 := b.NewValue0(v.Line, OpAMD64NOTL, y.Type) 17579 v2 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, y.Type) 17580 v3 := b.NewValue0(v.Line, OpAMD64CMPLconst, TypeFlags) 17581 v3.AuxInt = 16 17582 v3.AddArg(y) 17583 v2.AddArg(v3) 17584 v1.AddArg(v2) 17585 v0.AddArg(v1) 17586 v.AddArg(v0) 17587 return true 17588 } 17589 } 17590 func rewriteValueAMD64_OpRsh16x64(v *Value, config *Config) bool { 17591 b := v.Block 17592 _ = b 17593 // match: (Rsh16x64 <t> x y) 17594 // cond: 17595 // result: (SARW <t> x (ORQ <y.Type> y (NOTQ <y.Type> (SBBQcarrymask <y.Type> (CMPQconst y [16]))))) 17596 for { 17597 t := v.Type 17598 x := v.Args[0] 17599 y := v.Args[1] 17600 v.reset(OpAMD64SARW) 17601 v.Type = t 17602 v.AddArg(x) 17603 v0 := b.NewValue0(v.Line, OpAMD64ORQ, y.Type) 17604 v0.AddArg(y) 17605 v1 := b.NewValue0(v.Line, OpAMD64NOTQ, y.Type) 17606 v2 := b.NewValue0(v.Line, OpAMD64SBBQcarrymask, y.Type) 17607 v3 := b.NewValue0(v.Line, OpAMD64CMPQconst, TypeFlags) 17608 v3.AuxInt = 16 17609 v3.AddArg(y) 17610 v2.AddArg(v3) 17611 v1.AddArg(v2) 17612 v0.AddArg(v1) 17613 v.AddArg(v0) 17614 return true 17615 } 17616 } 17617 func rewriteValueAMD64_OpRsh16x8(v *Value, config *Config) bool { 17618 b := v.Block 17619 _ = b 17620 // match: (Rsh16x8 <t> x y) 17621 // cond: 17622 // result: (SARW <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPBconst y [16]))))) 17623 for { 17624 t := v.Type 17625 x := v.Args[0] 17626 y := v.Args[1] 17627 v.reset(OpAMD64SARW) 17628 v.Type = t 17629 v.AddArg(x) 17630 v0 := b.NewValue0(v.Line, OpAMD64ORL, y.Type) 17631 v0.AddArg(y) 17632 v1 := b.NewValue0(v.Line, OpAMD64NOTL, y.Type) 17633 v2 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, y.Type) 17634 v3 := b.NewValue0(v.Line, OpAMD64CMPBconst, TypeFlags) 17635 v3.AuxInt = 16 17636 v3.AddArg(y) 17637 v2.AddArg(v3) 17638 v1.AddArg(v2) 17639 v0.AddArg(v1) 17640 v.AddArg(v0) 17641 return true 17642 } 17643 } 17644 func rewriteValueAMD64_OpRsh32Ux16(v *Value, config *Config) bool { 17645 b := v.Block 17646 _ = b 17647 // match: (Rsh32Ux16 <t> x y) 17648 // cond: 17649 // result: (ANDL (SHRL <t> x y) (SBBLcarrymask <t> (CMPWconst y [32]))) 17650 for { 17651 t := v.Type 17652 x := v.Args[0] 17653 y := v.Args[1] 17654 v.reset(OpAMD64ANDL) 17655 v0 := b.NewValue0(v.Line, OpAMD64SHRL, t) 17656 v0.AddArg(x) 17657 v0.AddArg(y) 17658 v.AddArg(v0) 17659 v1 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, t) 17660 v2 := b.NewValue0(v.Line, OpAMD64CMPWconst, TypeFlags) 17661 v2.AuxInt = 32 17662 v2.AddArg(y) 17663 v1.AddArg(v2) 17664 v.AddArg(v1) 17665 return true 17666 } 17667 } 17668 func rewriteValueAMD64_OpRsh32Ux32(v *Value, config *Config) bool { 17669 b := v.Block 17670 _ = b 17671 // match: (Rsh32Ux32 <t> x y) 17672 // cond: 17673 // result: (ANDL (SHRL <t> x y) (SBBLcarrymask <t> (CMPLconst y [32]))) 17674 for { 17675 t := v.Type 17676 x := v.Args[0] 17677 y := v.Args[1] 17678 v.reset(OpAMD64ANDL) 17679 v0 := b.NewValue0(v.Line, OpAMD64SHRL, t) 17680 v0.AddArg(x) 17681 v0.AddArg(y) 17682 v.AddArg(v0) 17683 v1 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, t) 17684 v2 := b.NewValue0(v.Line, OpAMD64CMPLconst, TypeFlags) 17685 v2.AuxInt = 32 17686 v2.AddArg(y) 17687 v1.AddArg(v2) 17688 v.AddArg(v1) 17689 return true 17690 } 17691 } 17692 func rewriteValueAMD64_OpRsh32Ux64(v *Value, config *Config) bool { 17693 b := v.Block 17694 _ = b 17695 // match: (Rsh32Ux64 <t> x y) 17696 // cond: 17697 // result: (ANDL (SHRL <t> x y) (SBBLcarrymask <t> (CMPQconst y [32]))) 17698 for { 17699 t := v.Type 17700 x := v.Args[0] 17701 y := v.Args[1] 17702 v.reset(OpAMD64ANDL) 17703 v0 := b.NewValue0(v.Line, OpAMD64SHRL, t) 17704 v0.AddArg(x) 17705 v0.AddArg(y) 17706 v.AddArg(v0) 17707 v1 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, t) 17708 v2 := b.NewValue0(v.Line, OpAMD64CMPQconst, TypeFlags) 17709 v2.AuxInt = 32 17710 v2.AddArg(y) 17711 v1.AddArg(v2) 17712 v.AddArg(v1) 17713 return true 17714 } 17715 } 17716 func rewriteValueAMD64_OpRsh32Ux8(v *Value, config *Config) bool { 17717 b := v.Block 17718 _ = b 17719 // match: (Rsh32Ux8 <t> x y) 17720 // cond: 17721 // result: (ANDL (SHRL <t> x y) (SBBLcarrymask <t> (CMPBconst y [32]))) 17722 for { 17723 t := v.Type 17724 x := v.Args[0] 17725 y := v.Args[1] 17726 v.reset(OpAMD64ANDL) 17727 v0 := b.NewValue0(v.Line, OpAMD64SHRL, t) 17728 v0.AddArg(x) 17729 v0.AddArg(y) 17730 v.AddArg(v0) 17731 v1 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, t) 17732 v2 := b.NewValue0(v.Line, OpAMD64CMPBconst, TypeFlags) 17733 v2.AuxInt = 32 17734 v2.AddArg(y) 17735 v1.AddArg(v2) 17736 v.AddArg(v1) 17737 return true 17738 } 17739 } 17740 func rewriteValueAMD64_OpRsh32x16(v *Value, config *Config) bool { 17741 b := v.Block 17742 _ = b 17743 // match: (Rsh32x16 <t> x y) 17744 // cond: 17745 // result: (SARL <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPWconst y [32]))))) 17746 for { 17747 t := v.Type 17748 x := v.Args[0] 17749 y := v.Args[1] 17750 v.reset(OpAMD64SARL) 17751 v.Type = t 17752 v.AddArg(x) 17753 v0 := b.NewValue0(v.Line, OpAMD64ORL, y.Type) 17754 v0.AddArg(y) 17755 v1 := b.NewValue0(v.Line, OpAMD64NOTL, y.Type) 17756 v2 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, y.Type) 17757 v3 := b.NewValue0(v.Line, OpAMD64CMPWconst, TypeFlags) 17758 v3.AuxInt = 32 17759 v3.AddArg(y) 17760 v2.AddArg(v3) 17761 v1.AddArg(v2) 17762 v0.AddArg(v1) 17763 v.AddArg(v0) 17764 return true 17765 } 17766 } 17767 func rewriteValueAMD64_OpRsh32x32(v *Value, config *Config) bool { 17768 b := v.Block 17769 _ = b 17770 // match: (Rsh32x32 <t> x y) 17771 // cond: 17772 // result: (SARL <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPLconst y [32]))))) 17773 for { 17774 t := v.Type 17775 x := v.Args[0] 17776 y := v.Args[1] 17777 v.reset(OpAMD64SARL) 17778 v.Type = t 17779 v.AddArg(x) 17780 v0 := b.NewValue0(v.Line, OpAMD64ORL, y.Type) 17781 v0.AddArg(y) 17782 v1 := b.NewValue0(v.Line, OpAMD64NOTL, y.Type) 17783 v2 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, y.Type) 17784 v3 := b.NewValue0(v.Line, OpAMD64CMPLconst, TypeFlags) 17785 v3.AuxInt = 32 17786 v3.AddArg(y) 17787 v2.AddArg(v3) 17788 v1.AddArg(v2) 17789 v0.AddArg(v1) 17790 v.AddArg(v0) 17791 return true 17792 } 17793 } 17794 func rewriteValueAMD64_OpRsh32x64(v *Value, config *Config) bool { 17795 b := v.Block 17796 _ = b 17797 // match: (Rsh32x64 <t> x y) 17798 // cond: 17799 // result: (SARL <t> x (ORQ <y.Type> y (NOTQ <y.Type> (SBBQcarrymask <y.Type> (CMPQconst y [32]))))) 17800 for { 17801 t := v.Type 17802 x := v.Args[0] 17803 y := v.Args[1] 17804 v.reset(OpAMD64SARL) 17805 v.Type = t 17806 v.AddArg(x) 17807 v0 := b.NewValue0(v.Line, OpAMD64ORQ, y.Type) 17808 v0.AddArg(y) 17809 v1 := b.NewValue0(v.Line, OpAMD64NOTQ, y.Type) 17810 v2 := b.NewValue0(v.Line, OpAMD64SBBQcarrymask, y.Type) 17811 v3 := b.NewValue0(v.Line, OpAMD64CMPQconst, TypeFlags) 17812 v3.AuxInt = 32 17813 v3.AddArg(y) 17814 v2.AddArg(v3) 17815 v1.AddArg(v2) 17816 v0.AddArg(v1) 17817 v.AddArg(v0) 17818 return true 17819 } 17820 } 17821 func rewriteValueAMD64_OpRsh32x8(v *Value, config *Config) bool { 17822 b := v.Block 17823 _ = b 17824 // match: (Rsh32x8 <t> x y) 17825 // cond: 17826 // result: (SARL <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPBconst y [32]))))) 17827 for { 17828 t := v.Type 17829 x := v.Args[0] 17830 y := v.Args[1] 17831 v.reset(OpAMD64SARL) 17832 v.Type = t 17833 v.AddArg(x) 17834 v0 := b.NewValue0(v.Line, OpAMD64ORL, y.Type) 17835 v0.AddArg(y) 17836 v1 := b.NewValue0(v.Line, OpAMD64NOTL, y.Type) 17837 v2 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, y.Type) 17838 v3 := b.NewValue0(v.Line, OpAMD64CMPBconst, TypeFlags) 17839 v3.AuxInt = 32 17840 v3.AddArg(y) 17841 v2.AddArg(v3) 17842 v1.AddArg(v2) 17843 v0.AddArg(v1) 17844 v.AddArg(v0) 17845 return true 17846 } 17847 } 17848 func rewriteValueAMD64_OpRsh64Ux16(v *Value, config *Config) bool { 17849 b := v.Block 17850 _ = b 17851 // match: (Rsh64Ux16 <t> x y) 17852 // cond: 17853 // result: (ANDQ (SHRQ <t> x y) (SBBQcarrymask <t> (CMPWconst y [64]))) 17854 for { 17855 t := v.Type 17856 x := v.Args[0] 17857 y := v.Args[1] 17858 v.reset(OpAMD64ANDQ) 17859 v0 := b.NewValue0(v.Line, OpAMD64SHRQ, t) 17860 v0.AddArg(x) 17861 v0.AddArg(y) 17862 v.AddArg(v0) 17863 v1 := b.NewValue0(v.Line, OpAMD64SBBQcarrymask, t) 17864 v2 := b.NewValue0(v.Line, OpAMD64CMPWconst, TypeFlags) 17865 v2.AuxInt = 64 17866 v2.AddArg(y) 17867 v1.AddArg(v2) 17868 v.AddArg(v1) 17869 return true 17870 } 17871 } 17872 func rewriteValueAMD64_OpRsh64Ux32(v *Value, config *Config) bool { 17873 b := v.Block 17874 _ = b 17875 // match: (Rsh64Ux32 <t> x y) 17876 // cond: 17877 // result: (ANDQ (SHRQ <t> x y) (SBBQcarrymask <t> (CMPLconst y [64]))) 17878 for { 17879 t := v.Type 17880 x := v.Args[0] 17881 y := v.Args[1] 17882 v.reset(OpAMD64ANDQ) 17883 v0 := b.NewValue0(v.Line, OpAMD64SHRQ, t) 17884 v0.AddArg(x) 17885 v0.AddArg(y) 17886 v.AddArg(v0) 17887 v1 := b.NewValue0(v.Line, OpAMD64SBBQcarrymask, t) 17888 v2 := b.NewValue0(v.Line, OpAMD64CMPLconst, TypeFlags) 17889 v2.AuxInt = 64 17890 v2.AddArg(y) 17891 v1.AddArg(v2) 17892 v.AddArg(v1) 17893 return true 17894 } 17895 } 17896 func rewriteValueAMD64_OpRsh64Ux64(v *Value, config *Config) bool { 17897 b := v.Block 17898 _ = b 17899 // match: (Rsh64Ux64 <t> x y) 17900 // cond: 17901 // result: (ANDQ (SHRQ <t> x y) (SBBQcarrymask <t> (CMPQconst y [64]))) 17902 for { 17903 t := v.Type 17904 x := v.Args[0] 17905 y := v.Args[1] 17906 v.reset(OpAMD64ANDQ) 17907 v0 := b.NewValue0(v.Line, OpAMD64SHRQ, t) 17908 v0.AddArg(x) 17909 v0.AddArg(y) 17910 v.AddArg(v0) 17911 v1 := b.NewValue0(v.Line, OpAMD64SBBQcarrymask, t) 17912 v2 := b.NewValue0(v.Line, OpAMD64CMPQconst, TypeFlags) 17913 v2.AuxInt = 64 17914 v2.AddArg(y) 17915 v1.AddArg(v2) 17916 v.AddArg(v1) 17917 return true 17918 } 17919 } 17920 func rewriteValueAMD64_OpRsh64Ux8(v *Value, config *Config) bool { 17921 b := v.Block 17922 _ = b 17923 // match: (Rsh64Ux8 <t> x y) 17924 // cond: 17925 // result: (ANDQ (SHRQ <t> x y) (SBBQcarrymask <t> (CMPBconst y [64]))) 17926 for { 17927 t := v.Type 17928 x := v.Args[0] 17929 y := v.Args[1] 17930 v.reset(OpAMD64ANDQ) 17931 v0 := b.NewValue0(v.Line, OpAMD64SHRQ, t) 17932 v0.AddArg(x) 17933 v0.AddArg(y) 17934 v.AddArg(v0) 17935 v1 := b.NewValue0(v.Line, OpAMD64SBBQcarrymask, t) 17936 v2 := b.NewValue0(v.Line, OpAMD64CMPBconst, TypeFlags) 17937 v2.AuxInt = 64 17938 v2.AddArg(y) 17939 v1.AddArg(v2) 17940 v.AddArg(v1) 17941 return true 17942 } 17943 } 17944 func rewriteValueAMD64_OpRsh64x16(v *Value, config *Config) bool { 17945 b := v.Block 17946 _ = b 17947 // match: (Rsh64x16 <t> x y) 17948 // cond: 17949 // result: (SARQ <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPWconst y [64]))))) 17950 for { 17951 t := v.Type 17952 x := v.Args[0] 17953 y := v.Args[1] 17954 v.reset(OpAMD64SARQ) 17955 v.Type = t 17956 v.AddArg(x) 17957 v0 := b.NewValue0(v.Line, OpAMD64ORL, y.Type) 17958 v0.AddArg(y) 17959 v1 := b.NewValue0(v.Line, OpAMD64NOTL, y.Type) 17960 v2 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, y.Type) 17961 v3 := b.NewValue0(v.Line, OpAMD64CMPWconst, TypeFlags) 17962 v3.AuxInt = 64 17963 v3.AddArg(y) 17964 v2.AddArg(v3) 17965 v1.AddArg(v2) 17966 v0.AddArg(v1) 17967 v.AddArg(v0) 17968 return true 17969 } 17970 } 17971 func rewriteValueAMD64_OpRsh64x32(v *Value, config *Config) bool { 17972 b := v.Block 17973 _ = b 17974 // match: (Rsh64x32 <t> x y) 17975 // cond: 17976 // result: (SARQ <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPLconst y [64]))))) 17977 for { 17978 t := v.Type 17979 x := v.Args[0] 17980 y := v.Args[1] 17981 v.reset(OpAMD64SARQ) 17982 v.Type = t 17983 v.AddArg(x) 17984 v0 := b.NewValue0(v.Line, OpAMD64ORL, y.Type) 17985 v0.AddArg(y) 17986 v1 := b.NewValue0(v.Line, OpAMD64NOTL, y.Type) 17987 v2 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, y.Type) 17988 v3 := b.NewValue0(v.Line, OpAMD64CMPLconst, TypeFlags) 17989 v3.AuxInt = 64 17990 v3.AddArg(y) 17991 v2.AddArg(v3) 17992 v1.AddArg(v2) 17993 v0.AddArg(v1) 17994 v.AddArg(v0) 17995 return true 17996 } 17997 } 17998 func rewriteValueAMD64_OpRsh64x64(v *Value, config *Config) bool { 17999 b := v.Block 18000 _ = b 18001 // match: (Rsh64x64 <t> x y) 18002 // cond: 18003 // result: (SARQ <t> x (ORQ <y.Type> y (NOTQ <y.Type> (SBBQcarrymask <y.Type> (CMPQconst y [64]))))) 18004 for { 18005 t := v.Type 18006 x := v.Args[0] 18007 y := v.Args[1] 18008 v.reset(OpAMD64SARQ) 18009 v.Type = t 18010 v.AddArg(x) 18011 v0 := b.NewValue0(v.Line, OpAMD64ORQ, y.Type) 18012 v0.AddArg(y) 18013 v1 := b.NewValue0(v.Line, OpAMD64NOTQ, y.Type) 18014 v2 := b.NewValue0(v.Line, OpAMD64SBBQcarrymask, y.Type) 18015 v3 := b.NewValue0(v.Line, OpAMD64CMPQconst, TypeFlags) 18016 v3.AuxInt = 64 18017 v3.AddArg(y) 18018 v2.AddArg(v3) 18019 v1.AddArg(v2) 18020 v0.AddArg(v1) 18021 v.AddArg(v0) 18022 return true 18023 } 18024 } 18025 func rewriteValueAMD64_OpRsh64x8(v *Value, config *Config) bool { 18026 b := v.Block 18027 _ = b 18028 // match: (Rsh64x8 <t> x y) 18029 // cond: 18030 // result: (SARQ <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPBconst y [64]))))) 18031 for { 18032 t := v.Type 18033 x := v.Args[0] 18034 y := v.Args[1] 18035 v.reset(OpAMD64SARQ) 18036 v.Type = t 18037 v.AddArg(x) 18038 v0 := b.NewValue0(v.Line, OpAMD64ORL, y.Type) 18039 v0.AddArg(y) 18040 v1 := b.NewValue0(v.Line, OpAMD64NOTL, y.Type) 18041 v2 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, y.Type) 18042 v3 := b.NewValue0(v.Line, OpAMD64CMPBconst, TypeFlags) 18043 v3.AuxInt = 64 18044 v3.AddArg(y) 18045 v2.AddArg(v3) 18046 v1.AddArg(v2) 18047 v0.AddArg(v1) 18048 v.AddArg(v0) 18049 return true 18050 } 18051 } 18052 func rewriteValueAMD64_OpRsh8Ux16(v *Value, config *Config) bool { 18053 b := v.Block 18054 _ = b 18055 // match: (Rsh8Ux16 <t> x y) 18056 // cond: 18057 // result: (ANDL (SHRB <t> x y) (SBBLcarrymask <t> (CMPWconst y [8]))) 18058 for { 18059 t := v.Type 18060 x := v.Args[0] 18061 y := v.Args[1] 18062 v.reset(OpAMD64ANDL) 18063 v0 := b.NewValue0(v.Line, OpAMD64SHRB, t) 18064 v0.AddArg(x) 18065 v0.AddArg(y) 18066 v.AddArg(v0) 18067 v1 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, t) 18068 v2 := b.NewValue0(v.Line, OpAMD64CMPWconst, TypeFlags) 18069 v2.AuxInt = 8 18070 v2.AddArg(y) 18071 v1.AddArg(v2) 18072 v.AddArg(v1) 18073 return true 18074 } 18075 } 18076 func rewriteValueAMD64_OpRsh8Ux32(v *Value, config *Config) bool { 18077 b := v.Block 18078 _ = b 18079 // match: (Rsh8Ux32 <t> x y) 18080 // cond: 18081 // result: (ANDL (SHRB <t> x y) (SBBLcarrymask <t> (CMPLconst y [8]))) 18082 for { 18083 t := v.Type 18084 x := v.Args[0] 18085 y := v.Args[1] 18086 v.reset(OpAMD64ANDL) 18087 v0 := b.NewValue0(v.Line, OpAMD64SHRB, t) 18088 v0.AddArg(x) 18089 v0.AddArg(y) 18090 v.AddArg(v0) 18091 v1 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, t) 18092 v2 := b.NewValue0(v.Line, OpAMD64CMPLconst, TypeFlags) 18093 v2.AuxInt = 8 18094 v2.AddArg(y) 18095 v1.AddArg(v2) 18096 v.AddArg(v1) 18097 return true 18098 } 18099 } 18100 func rewriteValueAMD64_OpRsh8Ux64(v *Value, config *Config) bool { 18101 b := v.Block 18102 _ = b 18103 // match: (Rsh8Ux64 <t> x y) 18104 // cond: 18105 // result: (ANDL (SHRB <t> x y) (SBBLcarrymask <t> (CMPQconst y [8]))) 18106 for { 18107 t := v.Type 18108 x := v.Args[0] 18109 y := v.Args[1] 18110 v.reset(OpAMD64ANDL) 18111 v0 := b.NewValue0(v.Line, OpAMD64SHRB, t) 18112 v0.AddArg(x) 18113 v0.AddArg(y) 18114 v.AddArg(v0) 18115 v1 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, t) 18116 v2 := b.NewValue0(v.Line, OpAMD64CMPQconst, TypeFlags) 18117 v2.AuxInt = 8 18118 v2.AddArg(y) 18119 v1.AddArg(v2) 18120 v.AddArg(v1) 18121 return true 18122 } 18123 } 18124 func rewriteValueAMD64_OpRsh8Ux8(v *Value, config *Config) bool { 18125 b := v.Block 18126 _ = b 18127 // match: (Rsh8Ux8 <t> x y) 18128 // cond: 18129 // result: (ANDL (SHRB <t> x y) (SBBLcarrymask <t> (CMPBconst y [8]))) 18130 for { 18131 t := v.Type 18132 x := v.Args[0] 18133 y := v.Args[1] 18134 v.reset(OpAMD64ANDL) 18135 v0 := b.NewValue0(v.Line, OpAMD64SHRB, t) 18136 v0.AddArg(x) 18137 v0.AddArg(y) 18138 v.AddArg(v0) 18139 v1 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, t) 18140 v2 := b.NewValue0(v.Line, OpAMD64CMPBconst, TypeFlags) 18141 v2.AuxInt = 8 18142 v2.AddArg(y) 18143 v1.AddArg(v2) 18144 v.AddArg(v1) 18145 return true 18146 } 18147 } 18148 func rewriteValueAMD64_OpRsh8x16(v *Value, config *Config) bool { 18149 b := v.Block 18150 _ = b 18151 // match: (Rsh8x16 <t> x y) 18152 // cond: 18153 // result: (SARB <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPWconst y [8]))))) 18154 for { 18155 t := v.Type 18156 x := v.Args[0] 18157 y := v.Args[1] 18158 v.reset(OpAMD64SARB) 18159 v.Type = t 18160 v.AddArg(x) 18161 v0 := b.NewValue0(v.Line, OpAMD64ORL, y.Type) 18162 v0.AddArg(y) 18163 v1 := b.NewValue0(v.Line, OpAMD64NOTL, y.Type) 18164 v2 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, y.Type) 18165 v3 := b.NewValue0(v.Line, OpAMD64CMPWconst, TypeFlags) 18166 v3.AuxInt = 8 18167 v3.AddArg(y) 18168 v2.AddArg(v3) 18169 v1.AddArg(v2) 18170 v0.AddArg(v1) 18171 v.AddArg(v0) 18172 return true 18173 } 18174 } 18175 func rewriteValueAMD64_OpRsh8x32(v *Value, config *Config) bool { 18176 b := v.Block 18177 _ = b 18178 // match: (Rsh8x32 <t> x y) 18179 // cond: 18180 // result: (SARB <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPLconst y [8]))))) 18181 for { 18182 t := v.Type 18183 x := v.Args[0] 18184 y := v.Args[1] 18185 v.reset(OpAMD64SARB) 18186 v.Type = t 18187 v.AddArg(x) 18188 v0 := b.NewValue0(v.Line, OpAMD64ORL, y.Type) 18189 v0.AddArg(y) 18190 v1 := b.NewValue0(v.Line, OpAMD64NOTL, y.Type) 18191 v2 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, y.Type) 18192 v3 := b.NewValue0(v.Line, OpAMD64CMPLconst, TypeFlags) 18193 v3.AuxInt = 8 18194 v3.AddArg(y) 18195 v2.AddArg(v3) 18196 v1.AddArg(v2) 18197 v0.AddArg(v1) 18198 v.AddArg(v0) 18199 return true 18200 } 18201 } 18202 func rewriteValueAMD64_OpRsh8x64(v *Value, config *Config) bool { 18203 b := v.Block 18204 _ = b 18205 // match: (Rsh8x64 <t> x y) 18206 // cond: 18207 // result: (SARB <t> x (ORQ <y.Type> y (NOTQ <y.Type> (SBBQcarrymask <y.Type> (CMPQconst y [8]))))) 18208 for { 18209 t := v.Type 18210 x := v.Args[0] 18211 y := v.Args[1] 18212 v.reset(OpAMD64SARB) 18213 v.Type = t 18214 v.AddArg(x) 18215 v0 := b.NewValue0(v.Line, OpAMD64ORQ, y.Type) 18216 v0.AddArg(y) 18217 v1 := b.NewValue0(v.Line, OpAMD64NOTQ, y.Type) 18218 v2 := b.NewValue0(v.Line, OpAMD64SBBQcarrymask, y.Type) 18219 v3 := b.NewValue0(v.Line, OpAMD64CMPQconst, TypeFlags) 18220 v3.AuxInt = 8 18221 v3.AddArg(y) 18222 v2.AddArg(v3) 18223 v1.AddArg(v2) 18224 v0.AddArg(v1) 18225 v.AddArg(v0) 18226 return true 18227 } 18228 } 18229 func rewriteValueAMD64_OpRsh8x8(v *Value, config *Config) bool { 18230 b := v.Block 18231 _ = b 18232 // match: (Rsh8x8 <t> x y) 18233 // cond: 18234 // result: (SARB <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPBconst y [8]))))) 18235 for { 18236 t := v.Type 18237 x := v.Args[0] 18238 y := v.Args[1] 18239 v.reset(OpAMD64SARB) 18240 v.Type = t 18241 v.AddArg(x) 18242 v0 := b.NewValue0(v.Line, OpAMD64ORL, y.Type) 18243 v0.AddArg(y) 18244 v1 := b.NewValue0(v.Line, OpAMD64NOTL, y.Type) 18245 v2 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, y.Type) 18246 v3 := b.NewValue0(v.Line, OpAMD64CMPBconst, TypeFlags) 18247 v3.AuxInt = 8 18248 v3.AddArg(y) 18249 v2.AddArg(v3) 18250 v1.AddArg(v2) 18251 v0.AddArg(v1) 18252 v.AddArg(v0) 18253 return true 18254 } 18255 } 18256 func rewriteValueAMD64_OpSelect0(v *Value, config *Config) bool { 18257 b := v.Block 18258 _ = b 18259 // match: (Select0 <t> (AddTupleFirst32 tuple val)) 18260 // cond: 18261 // result: (ADDL val (Select0 <t> tuple)) 18262 for { 18263 t := v.Type 18264 v_0 := v.Args[0] 18265 if v_0.Op != OpAMD64AddTupleFirst32 { 18266 break 18267 } 18268 tuple := v_0.Args[0] 18269 val := v_0.Args[1] 18270 v.reset(OpAMD64ADDL) 18271 v.AddArg(val) 18272 v0 := b.NewValue0(v.Line, OpSelect0, t) 18273 v0.AddArg(tuple) 18274 v.AddArg(v0) 18275 return true 18276 } 18277 // match: (Select0 <t> (AddTupleFirst64 tuple val)) 18278 // cond: 18279 // result: (ADDQ val (Select0 <t> tuple)) 18280 for { 18281 t := v.Type 18282 v_0 := v.Args[0] 18283 if v_0.Op != OpAMD64AddTupleFirst64 { 18284 break 18285 } 18286 tuple := v_0.Args[0] 18287 val := v_0.Args[1] 18288 v.reset(OpAMD64ADDQ) 18289 v.AddArg(val) 18290 v0 := b.NewValue0(v.Line, OpSelect0, t) 18291 v0.AddArg(tuple) 18292 v.AddArg(v0) 18293 return true 18294 } 18295 return false 18296 } 18297 func rewriteValueAMD64_OpSelect1(v *Value, config *Config) bool { 18298 b := v.Block 18299 _ = b 18300 // match: (Select1 (AddTupleFirst32 tuple _ )) 18301 // cond: 18302 // result: (Select1 tuple) 18303 for { 18304 v_0 := v.Args[0] 18305 if v_0.Op != OpAMD64AddTupleFirst32 { 18306 break 18307 } 18308 tuple := v_0.Args[0] 18309 v.reset(OpSelect1) 18310 v.AddArg(tuple) 18311 return true 18312 } 18313 // match: (Select1 (AddTupleFirst64 tuple _ )) 18314 // cond: 18315 // result: (Select1 tuple) 18316 for { 18317 v_0 := v.Args[0] 18318 if v_0.Op != OpAMD64AddTupleFirst64 { 18319 break 18320 } 18321 tuple := v_0.Args[0] 18322 v.reset(OpSelect1) 18323 v.AddArg(tuple) 18324 return true 18325 } 18326 return false 18327 } 18328 func rewriteValueAMD64_OpSignExt16to32(v *Value, config *Config) bool { 18329 b := v.Block 18330 _ = b 18331 // match: (SignExt16to32 x) 18332 // cond: 18333 // result: (MOVWQSX x) 18334 for { 18335 x := v.Args[0] 18336 v.reset(OpAMD64MOVWQSX) 18337 v.AddArg(x) 18338 return true 18339 } 18340 } 18341 func rewriteValueAMD64_OpSignExt16to64(v *Value, config *Config) bool { 18342 b := v.Block 18343 _ = b 18344 // match: (SignExt16to64 x) 18345 // cond: 18346 // result: (MOVWQSX x) 18347 for { 18348 x := v.Args[0] 18349 v.reset(OpAMD64MOVWQSX) 18350 v.AddArg(x) 18351 return true 18352 } 18353 } 18354 func rewriteValueAMD64_OpSignExt32to64(v *Value, config *Config) bool { 18355 b := v.Block 18356 _ = b 18357 // match: (SignExt32to64 x) 18358 // cond: 18359 // result: (MOVLQSX x) 18360 for { 18361 x := v.Args[0] 18362 v.reset(OpAMD64MOVLQSX) 18363 v.AddArg(x) 18364 return true 18365 } 18366 } 18367 func rewriteValueAMD64_OpSignExt8to16(v *Value, config *Config) bool { 18368 b := v.Block 18369 _ = b 18370 // match: (SignExt8to16 x) 18371 // cond: 18372 // result: (MOVBQSX x) 18373 for { 18374 x := v.Args[0] 18375 v.reset(OpAMD64MOVBQSX) 18376 v.AddArg(x) 18377 return true 18378 } 18379 } 18380 func rewriteValueAMD64_OpSignExt8to32(v *Value, config *Config) bool { 18381 b := v.Block 18382 _ = b 18383 // match: (SignExt8to32 x) 18384 // cond: 18385 // result: (MOVBQSX x) 18386 for { 18387 x := v.Args[0] 18388 v.reset(OpAMD64MOVBQSX) 18389 v.AddArg(x) 18390 return true 18391 } 18392 } 18393 func rewriteValueAMD64_OpSignExt8to64(v *Value, config *Config) bool { 18394 b := v.Block 18395 _ = b 18396 // match: (SignExt8to64 x) 18397 // cond: 18398 // result: (MOVBQSX x) 18399 for { 18400 x := v.Args[0] 18401 v.reset(OpAMD64MOVBQSX) 18402 v.AddArg(x) 18403 return true 18404 } 18405 } 18406 func rewriteValueAMD64_OpSlicemask(v *Value, config *Config) bool { 18407 b := v.Block 18408 _ = b 18409 // match: (Slicemask <t> x) 18410 // cond: 18411 // result: (XORQconst [-1] (SARQconst <t> (SUBQconst <t> x [1]) [63])) 18412 for { 18413 t := v.Type 18414 x := v.Args[0] 18415 v.reset(OpAMD64XORQconst) 18416 v.AuxInt = -1 18417 v0 := b.NewValue0(v.Line, OpAMD64SARQconst, t) 18418 v0.AuxInt = 63 18419 v1 := b.NewValue0(v.Line, OpAMD64SUBQconst, t) 18420 v1.AuxInt = 1 18421 v1.AddArg(x) 18422 v0.AddArg(v1) 18423 v.AddArg(v0) 18424 return true 18425 } 18426 } 18427 func rewriteValueAMD64_OpSqrt(v *Value, config *Config) bool { 18428 b := v.Block 18429 _ = b 18430 // match: (Sqrt x) 18431 // cond: 18432 // result: (SQRTSD x) 18433 for { 18434 x := v.Args[0] 18435 v.reset(OpAMD64SQRTSD) 18436 v.AddArg(x) 18437 return true 18438 } 18439 } 18440 func rewriteValueAMD64_OpStaticCall(v *Value, config *Config) bool { 18441 b := v.Block 18442 _ = b 18443 // match: (StaticCall [argwid] {target} mem) 18444 // cond: 18445 // result: (CALLstatic [argwid] {target} mem) 18446 for { 18447 argwid := v.AuxInt 18448 target := v.Aux 18449 mem := v.Args[0] 18450 v.reset(OpAMD64CALLstatic) 18451 v.AuxInt = argwid 18452 v.Aux = target 18453 v.AddArg(mem) 18454 return true 18455 } 18456 } 18457 func rewriteValueAMD64_OpStore(v *Value, config *Config) bool { 18458 b := v.Block 18459 _ = b 18460 // match: (Store [8] ptr val mem) 18461 // cond: is64BitFloat(val.Type) 18462 // result: (MOVSDstore ptr val mem) 18463 for { 18464 if v.AuxInt != 8 { 18465 break 18466 } 18467 ptr := v.Args[0] 18468 val := v.Args[1] 18469 mem := v.Args[2] 18470 if !(is64BitFloat(val.Type)) { 18471 break 18472 } 18473 v.reset(OpAMD64MOVSDstore) 18474 v.AddArg(ptr) 18475 v.AddArg(val) 18476 v.AddArg(mem) 18477 return true 18478 } 18479 // match: (Store [4] ptr val mem) 18480 // cond: is32BitFloat(val.Type) 18481 // result: (MOVSSstore ptr val mem) 18482 for { 18483 if v.AuxInt != 4 { 18484 break 18485 } 18486 ptr := v.Args[0] 18487 val := v.Args[1] 18488 mem := v.Args[2] 18489 if !(is32BitFloat(val.Type)) { 18490 break 18491 } 18492 v.reset(OpAMD64MOVSSstore) 18493 v.AddArg(ptr) 18494 v.AddArg(val) 18495 v.AddArg(mem) 18496 return true 18497 } 18498 // match: (Store [8] ptr val mem) 18499 // cond: 18500 // result: (MOVQstore ptr val mem) 18501 for { 18502 if v.AuxInt != 8 { 18503 break 18504 } 18505 ptr := v.Args[0] 18506 val := v.Args[1] 18507 mem := v.Args[2] 18508 v.reset(OpAMD64MOVQstore) 18509 v.AddArg(ptr) 18510 v.AddArg(val) 18511 v.AddArg(mem) 18512 return true 18513 } 18514 // match: (Store [4] ptr val mem) 18515 // cond: 18516 // result: (MOVLstore ptr val mem) 18517 for { 18518 if v.AuxInt != 4 { 18519 break 18520 } 18521 ptr := v.Args[0] 18522 val := v.Args[1] 18523 mem := v.Args[2] 18524 v.reset(OpAMD64MOVLstore) 18525 v.AddArg(ptr) 18526 v.AddArg(val) 18527 v.AddArg(mem) 18528 return true 18529 } 18530 // match: (Store [2] ptr val mem) 18531 // cond: 18532 // result: (MOVWstore ptr val mem) 18533 for { 18534 if v.AuxInt != 2 { 18535 break 18536 } 18537 ptr := v.Args[0] 18538 val := v.Args[1] 18539 mem := v.Args[2] 18540 v.reset(OpAMD64MOVWstore) 18541 v.AddArg(ptr) 18542 v.AddArg(val) 18543 v.AddArg(mem) 18544 return true 18545 } 18546 // match: (Store [1] ptr val mem) 18547 // cond: 18548 // result: (MOVBstore ptr val mem) 18549 for { 18550 if v.AuxInt != 1 { 18551 break 18552 } 18553 ptr := v.Args[0] 18554 val := v.Args[1] 18555 mem := v.Args[2] 18556 v.reset(OpAMD64MOVBstore) 18557 v.AddArg(ptr) 18558 v.AddArg(val) 18559 v.AddArg(mem) 18560 return true 18561 } 18562 return false 18563 } 18564 func rewriteValueAMD64_OpSub16(v *Value, config *Config) bool { 18565 b := v.Block 18566 _ = b 18567 // match: (Sub16 x y) 18568 // cond: 18569 // result: (SUBL x y) 18570 for { 18571 x := v.Args[0] 18572 y := v.Args[1] 18573 v.reset(OpAMD64SUBL) 18574 v.AddArg(x) 18575 v.AddArg(y) 18576 return true 18577 } 18578 } 18579 func rewriteValueAMD64_OpSub32(v *Value, config *Config) bool { 18580 b := v.Block 18581 _ = b 18582 // match: (Sub32 x y) 18583 // cond: 18584 // result: (SUBL x y) 18585 for { 18586 x := v.Args[0] 18587 y := v.Args[1] 18588 v.reset(OpAMD64SUBL) 18589 v.AddArg(x) 18590 v.AddArg(y) 18591 return true 18592 } 18593 } 18594 func rewriteValueAMD64_OpSub32F(v *Value, config *Config) bool { 18595 b := v.Block 18596 _ = b 18597 // match: (Sub32F x y) 18598 // cond: 18599 // result: (SUBSS x y) 18600 for { 18601 x := v.Args[0] 18602 y := v.Args[1] 18603 v.reset(OpAMD64SUBSS) 18604 v.AddArg(x) 18605 v.AddArg(y) 18606 return true 18607 } 18608 } 18609 func rewriteValueAMD64_OpSub64(v *Value, config *Config) bool { 18610 b := v.Block 18611 _ = b 18612 // match: (Sub64 x y) 18613 // cond: 18614 // result: (SUBQ x y) 18615 for { 18616 x := v.Args[0] 18617 y := v.Args[1] 18618 v.reset(OpAMD64SUBQ) 18619 v.AddArg(x) 18620 v.AddArg(y) 18621 return true 18622 } 18623 } 18624 func rewriteValueAMD64_OpSub64F(v *Value, config *Config) bool { 18625 b := v.Block 18626 _ = b 18627 // match: (Sub64F x y) 18628 // cond: 18629 // result: (SUBSD x y) 18630 for { 18631 x := v.Args[0] 18632 y := v.Args[1] 18633 v.reset(OpAMD64SUBSD) 18634 v.AddArg(x) 18635 v.AddArg(y) 18636 return true 18637 } 18638 } 18639 func rewriteValueAMD64_OpSub8(v *Value, config *Config) bool { 18640 b := v.Block 18641 _ = b 18642 // match: (Sub8 x y) 18643 // cond: 18644 // result: (SUBL x y) 18645 for { 18646 x := v.Args[0] 18647 y := v.Args[1] 18648 v.reset(OpAMD64SUBL) 18649 v.AddArg(x) 18650 v.AddArg(y) 18651 return true 18652 } 18653 } 18654 func rewriteValueAMD64_OpSubPtr(v *Value, config *Config) bool { 18655 b := v.Block 18656 _ = b 18657 // match: (SubPtr x y) 18658 // cond: config.PtrSize == 8 18659 // result: (SUBQ x y) 18660 for { 18661 x := v.Args[0] 18662 y := v.Args[1] 18663 if !(config.PtrSize == 8) { 18664 break 18665 } 18666 v.reset(OpAMD64SUBQ) 18667 v.AddArg(x) 18668 v.AddArg(y) 18669 return true 18670 } 18671 // match: (SubPtr x y) 18672 // cond: config.PtrSize == 4 18673 // result: (SUBL x y) 18674 for { 18675 x := v.Args[0] 18676 y := v.Args[1] 18677 if !(config.PtrSize == 4) { 18678 break 18679 } 18680 v.reset(OpAMD64SUBL) 18681 v.AddArg(x) 18682 v.AddArg(y) 18683 return true 18684 } 18685 return false 18686 } 18687 func rewriteValueAMD64_OpTrunc16to8(v *Value, config *Config) bool { 18688 b := v.Block 18689 _ = b 18690 // match: (Trunc16to8 x) 18691 // cond: 18692 // result: x 18693 for { 18694 x := v.Args[0] 18695 v.reset(OpCopy) 18696 v.Type = x.Type 18697 v.AddArg(x) 18698 return true 18699 } 18700 } 18701 func rewriteValueAMD64_OpTrunc32to16(v *Value, config *Config) bool { 18702 b := v.Block 18703 _ = b 18704 // match: (Trunc32to16 x) 18705 // cond: 18706 // result: x 18707 for { 18708 x := v.Args[0] 18709 v.reset(OpCopy) 18710 v.Type = x.Type 18711 v.AddArg(x) 18712 return true 18713 } 18714 } 18715 func rewriteValueAMD64_OpTrunc32to8(v *Value, config *Config) bool { 18716 b := v.Block 18717 _ = b 18718 // match: (Trunc32to8 x) 18719 // cond: 18720 // result: x 18721 for { 18722 x := v.Args[0] 18723 v.reset(OpCopy) 18724 v.Type = x.Type 18725 v.AddArg(x) 18726 return true 18727 } 18728 } 18729 func rewriteValueAMD64_OpTrunc64to16(v *Value, config *Config) bool { 18730 b := v.Block 18731 _ = b 18732 // match: (Trunc64to16 x) 18733 // cond: 18734 // result: x 18735 for { 18736 x := v.Args[0] 18737 v.reset(OpCopy) 18738 v.Type = x.Type 18739 v.AddArg(x) 18740 return true 18741 } 18742 } 18743 func rewriteValueAMD64_OpTrunc64to32(v *Value, config *Config) bool { 18744 b := v.Block 18745 _ = b 18746 // match: (Trunc64to32 x) 18747 // cond: 18748 // result: x 18749 for { 18750 x := v.Args[0] 18751 v.reset(OpCopy) 18752 v.Type = x.Type 18753 v.AddArg(x) 18754 return true 18755 } 18756 } 18757 func rewriteValueAMD64_OpTrunc64to8(v *Value, config *Config) bool { 18758 b := v.Block 18759 _ = b 18760 // match: (Trunc64to8 x) 18761 // cond: 18762 // result: x 18763 for { 18764 x := v.Args[0] 18765 v.reset(OpCopy) 18766 v.Type = x.Type 18767 v.AddArg(x) 18768 return true 18769 } 18770 } 18771 func rewriteValueAMD64_OpXor16(v *Value, config *Config) bool { 18772 b := v.Block 18773 _ = b 18774 // match: (Xor16 x y) 18775 // cond: 18776 // result: (XORL x y) 18777 for { 18778 x := v.Args[0] 18779 y := v.Args[1] 18780 v.reset(OpAMD64XORL) 18781 v.AddArg(x) 18782 v.AddArg(y) 18783 return true 18784 } 18785 } 18786 func rewriteValueAMD64_OpXor32(v *Value, config *Config) bool { 18787 b := v.Block 18788 _ = b 18789 // match: (Xor32 x y) 18790 // cond: 18791 // result: (XORL x y) 18792 for { 18793 x := v.Args[0] 18794 y := v.Args[1] 18795 v.reset(OpAMD64XORL) 18796 v.AddArg(x) 18797 v.AddArg(y) 18798 return true 18799 } 18800 } 18801 func rewriteValueAMD64_OpXor64(v *Value, config *Config) bool { 18802 b := v.Block 18803 _ = b 18804 // match: (Xor64 x y) 18805 // cond: 18806 // result: (XORQ x y) 18807 for { 18808 x := v.Args[0] 18809 y := v.Args[1] 18810 v.reset(OpAMD64XORQ) 18811 v.AddArg(x) 18812 v.AddArg(y) 18813 return true 18814 } 18815 } 18816 func rewriteValueAMD64_OpXor8(v *Value, config *Config) bool { 18817 b := v.Block 18818 _ = b 18819 // match: (Xor8 x y) 18820 // cond: 18821 // result: (XORL x y) 18822 for { 18823 x := v.Args[0] 18824 y := v.Args[1] 18825 v.reset(OpAMD64XORL) 18826 v.AddArg(x) 18827 v.AddArg(y) 18828 return true 18829 } 18830 } 18831 func rewriteValueAMD64_OpZero(v *Value, config *Config) bool { 18832 b := v.Block 18833 _ = b 18834 // match: (Zero [s] _ mem) 18835 // cond: SizeAndAlign(s).Size() == 0 18836 // result: mem 18837 for { 18838 s := v.AuxInt 18839 mem := v.Args[1] 18840 if !(SizeAndAlign(s).Size() == 0) { 18841 break 18842 } 18843 v.reset(OpCopy) 18844 v.Type = mem.Type 18845 v.AddArg(mem) 18846 return true 18847 } 18848 // match: (Zero [s] destptr mem) 18849 // cond: SizeAndAlign(s).Size() == 1 18850 // result: (MOVBstoreconst [0] destptr mem) 18851 for { 18852 s := v.AuxInt 18853 destptr := v.Args[0] 18854 mem := v.Args[1] 18855 if !(SizeAndAlign(s).Size() == 1) { 18856 break 18857 } 18858 v.reset(OpAMD64MOVBstoreconst) 18859 v.AuxInt = 0 18860 v.AddArg(destptr) 18861 v.AddArg(mem) 18862 return true 18863 } 18864 // match: (Zero [s] destptr mem) 18865 // cond: SizeAndAlign(s).Size() == 2 18866 // result: (MOVWstoreconst [0] destptr mem) 18867 for { 18868 s := v.AuxInt 18869 destptr := v.Args[0] 18870 mem := v.Args[1] 18871 if !(SizeAndAlign(s).Size() == 2) { 18872 break 18873 } 18874 v.reset(OpAMD64MOVWstoreconst) 18875 v.AuxInt = 0 18876 v.AddArg(destptr) 18877 v.AddArg(mem) 18878 return true 18879 } 18880 // match: (Zero [s] destptr mem) 18881 // cond: SizeAndAlign(s).Size() == 4 18882 // result: (MOVLstoreconst [0] destptr mem) 18883 for { 18884 s := v.AuxInt 18885 destptr := v.Args[0] 18886 mem := v.Args[1] 18887 if !(SizeAndAlign(s).Size() == 4) { 18888 break 18889 } 18890 v.reset(OpAMD64MOVLstoreconst) 18891 v.AuxInt = 0 18892 v.AddArg(destptr) 18893 v.AddArg(mem) 18894 return true 18895 } 18896 // match: (Zero [s] destptr mem) 18897 // cond: SizeAndAlign(s).Size() == 8 18898 // result: (MOVQstoreconst [0] destptr mem) 18899 for { 18900 s := v.AuxInt 18901 destptr := v.Args[0] 18902 mem := v.Args[1] 18903 if !(SizeAndAlign(s).Size() == 8) { 18904 break 18905 } 18906 v.reset(OpAMD64MOVQstoreconst) 18907 v.AuxInt = 0 18908 v.AddArg(destptr) 18909 v.AddArg(mem) 18910 return true 18911 } 18912 // match: (Zero [s] destptr mem) 18913 // cond: SizeAndAlign(s).Size() == 3 18914 // result: (MOVBstoreconst [makeValAndOff(0,2)] destptr (MOVWstoreconst [0] destptr mem)) 18915 for { 18916 s := v.AuxInt 18917 destptr := v.Args[0] 18918 mem := v.Args[1] 18919 if !(SizeAndAlign(s).Size() == 3) { 18920 break 18921 } 18922 v.reset(OpAMD64MOVBstoreconst) 18923 v.AuxInt = makeValAndOff(0, 2) 18924 v.AddArg(destptr) 18925 v0 := b.NewValue0(v.Line, OpAMD64MOVWstoreconst, TypeMem) 18926 v0.AuxInt = 0 18927 v0.AddArg(destptr) 18928 v0.AddArg(mem) 18929 v.AddArg(v0) 18930 return true 18931 } 18932 // match: (Zero [s] destptr mem) 18933 // cond: SizeAndAlign(s).Size() == 5 18934 // result: (MOVBstoreconst [makeValAndOff(0,4)] destptr (MOVLstoreconst [0] destptr mem)) 18935 for { 18936 s := v.AuxInt 18937 destptr := v.Args[0] 18938 mem := v.Args[1] 18939 if !(SizeAndAlign(s).Size() == 5) { 18940 break 18941 } 18942 v.reset(OpAMD64MOVBstoreconst) 18943 v.AuxInt = makeValAndOff(0, 4) 18944 v.AddArg(destptr) 18945 v0 := b.NewValue0(v.Line, OpAMD64MOVLstoreconst, TypeMem) 18946 v0.AuxInt = 0 18947 v0.AddArg(destptr) 18948 v0.AddArg(mem) 18949 v.AddArg(v0) 18950 return true 18951 } 18952 // match: (Zero [s] destptr mem) 18953 // cond: SizeAndAlign(s).Size() == 6 18954 // result: (MOVWstoreconst [makeValAndOff(0,4)] destptr (MOVLstoreconst [0] destptr mem)) 18955 for { 18956 s := v.AuxInt 18957 destptr := v.Args[0] 18958 mem := v.Args[1] 18959 if !(SizeAndAlign(s).Size() == 6) { 18960 break 18961 } 18962 v.reset(OpAMD64MOVWstoreconst) 18963 v.AuxInt = makeValAndOff(0, 4) 18964 v.AddArg(destptr) 18965 v0 := b.NewValue0(v.Line, OpAMD64MOVLstoreconst, TypeMem) 18966 v0.AuxInt = 0 18967 v0.AddArg(destptr) 18968 v0.AddArg(mem) 18969 v.AddArg(v0) 18970 return true 18971 } 18972 // match: (Zero [s] destptr mem) 18973 // cond: SizeAndAlign(s).Size() == 7 18974 // result: (MOVLstoreconst [makeValAndOff(0,3)] destptr (MOVLstoreconst [0] destptr mem)) 18975 for { 18976 s := v.AuxInt 18977 destptr := v.Args[0] 18978 mem := v.Args[1] 18979 if !(SizeAndAlign(s).Size() == 7) { 18980 break 18981 } 18982 v.reset(OpAMD64MOVLstoreconst) 18983 v.AuxInt = makeValAndOff(0, 3) 18984 v.AddArg(destptr) 18985 v0 := b.NewValue0(v.Line, OpAMD64MOVLstoreconst, TypeMem) 18986 v0.AuxInt = 0 18987 v0.AddArg(destptr) 18988 v0.AddArg(mem) 18989 v.AddArg(v0) 18990 return true 18991 } 18992 // match: (Zero [s] destptr mem) 18993 // cond: SizeAndAlign(s).Size()%8 != 0 && SizeAndAlign(s).Size() > 8 18994 // result: (Zero [SizeAndAlign(s).Size()-SizeAndAlign(s).Size()%8] (OffPtr <destptr.Type> destptr [SizeAndAlign(s).Size()%8]) (MOVQstoreconst [0] destptr mem)) 18995 for { 18996 s := v.AuxInt 18997 destptr := v.Args[0] 18998 mem := v.Args[1] 18999 if !(SizeAndAlign(s).Size()%8 != 0 && SizeAndAlign(s).Size() > 8) { 19000 break 19001 } 19002 v.reset(OpZero) 19003 v.AuxInt = SizeAndAlign(s).Size() - SizeAndAlign(s).Size()%8 19004 v0 := b.NewValue0(v.Line, OpOffPtr, destptr.Type) 19005 v0.AuxInt = SizeAndAlign(s).Size() % 8 19006 v0.AddArg(destptr) 19007 v.AddArg(v0) 19008 v1 := b.NewValue0(v.Line, OpAMD64MOVQstoreconst, TypeMem) 19009 v1.AuxInt = 0 19010 v1.AddArg(destptr) 19011 v1.AddArg(mem) 19012 v.AddArg(v1) 19013 return true 19014 } 19015 // match: (Zero [s] destptr mem) 19016 // cond: SizeAndAlign(s).Size() == 16 19017 // result: (MOVQstoreconst [makeValAndOff(0,8)] destptr (MOVQstoreconst [0] destptr mem)) 19018 for { 19019 s := v.AuxInt 19020 destptr := v.Args[0] 19021 mem := v.Args[1] 19022 if !(SizeAndAlign(s).Size() == 16) { 19023 break 19024 } 19025 v.reset(OpAMD64MOVQstoreconst) 19026 v.AuxInt = makeValAndOff(0, 8) 19027 v.AddArg(destptr) 19028 v0 := b.NewValue0(v.Line, OpAMD64MOVQstoreconst, TypeMem) 19029 v0.AuxInt = 0 19030 v0.AddArg(destptr) 19031 v0.AddArg(mem) 19032 v.AddArg(v0) 19033 return true 19034 } 19035 // match: (Zero [s] destptr mem) 19036 // cond: SizeAndAlign(s).Size() == 24 19037 // result: (MOVQstoreconst [makeValAndOff(0,16)] destptr (MOVQstoreconst [makeValAndOff(0,8)] destptr (MOVQstoreconst [0] destptr mem))) 19038 for { 19039 s := v.AuxInt 19040 destptr := v.Args[0] 19041 mem := v.Args[1] 19042 if !(SizeAndAlign(s).Size() == 24) { 19043 break 19044 } 19045 v.reset(OpAMD64MOVQstoreconst) 19046 v.AuxInt = makeValAndOff(0, 16) 19047 v.AddArg(destptr) 19048 v0 := b.NewValue0(v.Line, OpAMD64MOVQstoreconst, TypeMem) 19049 v0.AuxInt = makeValAndOff(0, 8) 19050 v0.AddArg(destptr) 19051 v1 := b.NewValue0(v.Line, OpAMD64MOVQstoreconst, TypeMem) 19052 v1.AuxInt = 0 19053 v1.AddArg(destptr) 19054 v1.AddArg(mem) 19055 v0.AddArg(v1) 19056 v.AddArg(v0) 19057 return true 19058 } 19059 // match: (Zero [s] destptr mem) 19060 // cond: SizeAndAlign(s).Size() == 32 19061 // result: (MOVQstoreconst [makeValAndOff(0,24)] destptr (MOVQstoreconst [makeValAndOff(0,16)] destptr (MOVQstoreconst [makeValAndOff(0,8)] destptr (MOVQstoreconst [0] destptr mem)))) 19062 for { 19063 s := v.AuxInt 19064 destptr := v.Args[0] 19065 mem := v.Args[1] 19066 if !(SizeAndAlign(s).Size() == 32) { 19067 break 19068 } 19069 v.reset(OpAMD64MOVQstoreconst) 19070 v.AuxInt = makeValAndOff(0, 24) 19071 v.AddArg(destptr) 19072 v0 := b.NewValue0(v.Line, OpAMD64MOVQstoreconst, TypeMem) 19073 v0.AuxInt = makeValAndOff(0, 16) 19074 v0.AddArg(destptr) 19075 v1 := b.NewValue0(v.Line, OpAMD64MOVQstoreconst, TypeMem) 19076 v1.AuxInt = makeValAndOff(0, 8) 19077 v1.AddArg(destptr) 19078 v2 := b.NewValue0(v.Line, OpAMD64MOVQstoreconst, TypeMem) 19079 v2.AuxInt = 0 19080 v2.AddArg(destptr) 19081 v2.AddArg(mem) 19082 v1.AddArg(v2) 19083 v0.AddArg(v1) 19084 v.AddArg(v0) 19085 return true 19086 } 19087 // match: (Zero [s] destptr mem) 19088 // cond: SizeAndAlign(s).Size() <= 1024 && SizeAndAlign(s).Size()%8 == 0 && SizeAndAlign(s).Size()%16 != 0 && !config.noDuffDevice 19089 // result: (Zero [SizeAndAlign(s).Size()-8] (OffPtr <destptr.Type> [8] destptr) (MOVQstore destptr (MOVQconst [0]) mem)) 19090 for { 19091 s := v.AuxInt 19092 destptr := v.Args[0] 19093 mem := v.Args[1] 19094 if !(SizeAndAlign(s).Size() <= 1024 && SizeAndAlign(s).Size()%8 == 0 && SizeAndAlign(s).Size()%16 != 0 && !config.noDuffDevice) { 19095 break 19096 } 19097 v.reset(OpZero) 19098 v.AuxInt = SizeAndAlign(s).Size() - 8 19099 v0 := b.NewValue0(v.Line, OpOffPtr, destptr.Type) 19100 v0.AuxInt = 8 19101 v0.AddArg(destptr) 19102 v.AddArg(v0) 19103 v1 := b.NewValue0(v.Line, OpAMD64MOVQstore, TypeMem) 19104 v1.AddArg(destptr) 19105 v2 := b.NewValue0(v.Line, OpAMD64MOVQconst, config.fe.TypeUInt64()) 19106 v2.AuxInt = 0 19107 v1.AddArg(v2) 19108 v1.AddArg(mem) 19109 v.AddArg(v1) 19110 return true 19111 } 19112 // match: (Zero [s] destptr mem) 19113 // cond: SizeAndAlign(s).Size() <= 1024 && SizeAndAlign(s).Size()%16 == 0 && !config.noDuffDevice 19114 // result: (DUFFZERO [SizeAndAlign(s).Size()] destptr (MOVOconst [0]) mem) 19115 for { 19116 s := v.AuxInt 19117 destptr := v.Args[0] 19118 mem := v.Args[1] 19119 if !(SizeAndAlign(s).Size() <= 1024 && SizeAndAlign(s).Size()%16 == 0 && !config.noDuffDevice) { 19120 break 19121 } 19122 v.reset(OpAMD64DUFFZERO) 19123 v.AuxInt = SizeAndAlign(s).Size() 19124 v.AddArg(destptr) 19125 v0 := b.NewValue0(v.Line, OpAMD64MOVOconst, TypeInt128) 19126 v0.AuxInt = 0 19127 v.AddArg(v0) 19128 v.AddArg(mem) 19129 return true 19130 } 19131 // match: (Zero [s] destptr mem) 19132 // cond: (SizeAndAlign(s).Size() > 1024 || (config.noDuffDevice && SizeAndAlign(s).Size() > 32)) && SizeAndAlign(s).Size()%8 == 0 19133 // result: (REPSTOSQ destptr (MOVQconst [SizeAndAlign(s).Size()/8]) (MOVQconst [0]) mem) 19134 for { 19135 s := v.AuxInt 19136 destptr := v.Args[0] 19137 mem := v.Args[1] 19138 if !((SizeAndAlign(s).Size() > 1024 || (config.noDuffDevice && SizeAndAlign(s).Size() > 32)) && SizeAndAlign(s).Size()%8 == 0) { 19139 break 19140 } 19141 v.reset(OpAMD64REPSTOSQ) 19142 v.AddArg(destptr) 19143 v0 := b.NewValue0(v.Line, OpAMD64MOVQconst, config.fe.TypeUInt64()) 19144 v0.AuxInt = SizeAndAlign(s).Size() / 8 19145 v.AddArg(v0) 19146 v1 := b.NewValue0(v.Line, OpAMD64MOVQconst, config.fe.TypeUInt64()) 19147 v1.AuxInt = 0 19148 v.AddArg(v1) 19149 v.AddArg(mem) 19150 return true 19151 } 19152 return false 19153 } 19154 func rewriteValueAMD64_OpZeroExt16to32(v *Value, config *Config) bool { 19155 b := v.Block 19156 _ = b 19157 // match: (ZeroExt16to32 x) 19158 // cond: 19159 // result: (MOVWQZX x) 19160 for { 19161 x := v.Args[0] 19162 v.reset(OpAMD64MOVWQZX) 19163 v.AddArg(x) 19164 return true 19165 } 19166 } 19167 func rewriteValueAMD64_OpZeroExt16to64(v *Value, config *Config) bool { 19168 b := v.Block 19169 _ = b 19170 // match: (ZeroExt16to64 x) 19171 // cond: 19172 // result: (MOVWQZX x) 19173 for { 19174 x := v.Args[0] 19175 v.reset(OpAMD64MOVWQZX) 19176 v.AddArg(x) 19177 return true 19178 } 19179 } 19180 func rewriteValueAMD64_OpZeroExt32to64(v *Value, config *Config) bool { 19181 b := v.Block 19182 _ = b 19183 // match: (ZeroExt32to64 x) 19184 // cond: 19185 // result: (MOVLQZX x) 19186 for { 19187 x := v.Args[0] 19188 v.reset(OpAMD64MOVLQZX) 19189 v.AddArg(x) 19190 return true 19191 } 19192 } 19193 func rewriteValueAMD64_OpZeroExt8to16(v *Value, config *Config) bool { 19194 b := v.Block 19195 _ = b 19196 // match: (ZeroExt8to16 x) 19197 // cond: 19198 // result: (MOVBQZX x) 19199 for { 19200 x := v.Args[0] 19201 v.reset(OpAMD64MOVBQZX) 19202 v.AddArg(x) 19203 return true 19204 } 19205 } 19206 func rewriteValueAMD64_OpZeroExt8to32(v *Value, config *Config) bool { 19207 b := v.Block 19208 _ = b 19209 // match: (ZeroExt8to32 x) 19210 // cond: 19211 // result: (MOVBQZX x) 19212 for { 19213 x := v.Args[0] 19214 v.reset(OpAMD64MOVBQZX) 19215 v.AddArg(x) 19216 return true 19217 } 19218 } 19219 func rewriteValueAMD64_OpZeroExt8to64(v *Value, config *Config) bool { 19220 b := v.Block 19221 _ = b 19222 // match: (ZeroExt8to64 x) 19223 // cond: 19224 // result: (MOVBQZX x) 19225 for { 19226 x := v.Args[0] 19227 v.reset(OpAMD64MOVBQZX) 19228 v.AddArg(x) 19229 return true 19230 } 19231 } 19232 func rewriteBlockAMD64(b *Block, config *Config) bool { 19233 switch b.Kind { 19234 case BlockAMD64EQ: 19235 // match: (EQ (InvertFlags cmp) yes no) 19236 // cond: 19237 // result: (EQ cmp yes no) 19238 for { 19239 v := b.Control 19240 if v.Op != OpAMD64InvertFlags { 19241 break 19242 } 19243 cmp := v.Args[0] 19244 yes := b.Succs[0] 19245 no := b.Succs[1] 19246 b.Kind = BlockAMD64EQ 19247 b.SetControl(cmp) 19248 _ = yes 19249 _ = no 19250 return true 19251 } 19252 // match: (EQ (FlagEQ) yes no) 19253 // cond: 19254 // result: (First nil yes no) 19255 for { 19256 v := b.Control 19257 if v.Op != OpAMD64FlagEQ { 19258 break 19259 } 19260 yes := b.Succs[0] 19261 no := b.Succs[1] 19262 b.Kind = BlockFirst 19263 b.SetControl(nil) 19264 _ = yes 19265 _ = no 19266 return true 19267 } 19268 // match: (EQ (FlagLT_ULT) yes no) 19269 // cond: 19270 // result: (First nil no yes) 19271 for { 19272 v := b.Control 19273 if v.Op != OpAMD64FlagLT_ULT { 19274 break 19275 } 19276 yes := b.Succs[0] 19277 no := b.Succs[1] 19278 b.Kind = BlockFirst 19279 b.SetControl(nil) 19280 b.swapSuccessors() 19281 _ = no 19282 _ = yes 19283 return true 19284 } 19285 // match: (EQ (FlagLT_UGT) yes no) 19286 // cond: 19287 // result: (First nil no yes) 19288 for { 19289 v := b.Control 19290 if v.Op != OpAMD64FlagLT_UGT { 19291 break 19292 } 19293 yes := b.Succs[0] 19294 no := b.Succs[1] 19295 b.Kind = BlockFirst 19296 b.SetControl(nil) 19297 b.swapSuccessors() 19298 _ = no 19299 _ = yes 19300 return true 19301 } 19302 // match: (EQ (FlagGT_ULT) yes no) 19303 // cond: 19304 // result: (First nil no yes) 19305 for { 19306 v := b.Control 19307 if v.Op != OpAMD64FlagGT_ULT { 19308 break 19309 } 19310 yes := b.Succs[0] 19311 no := b.Succs[1] 19312 b.Kind = BlockFirst 19313 b.SetControl(nil) 19314 b.swapSuccessors() 19315 _ = no 19316 _ = yes 19317 return true 19318 } 19319 // match: (EQ (FlagGT_UGT) yes no) 19320 // cond: 19321 // result: (First nil no yes) 19322 for { 19323 v := b.Control 19324 if v.Op != OpAMD64FlagGT_UGT { 19325 break 19326 } 19327 yes := b.Succs[0] 19328 no := b.Succs[1] 19329 b.Kind = BlockFirst 19330 b.SetControl(nil) 19331 b.swapSuccessors() 19332 _ = no 19333 _ = yes 19334 return true 19335 } 19336 case BlockAMD64GE: 19337 // match: (GE (InvertFlags cmp) yes no) 19338 // cond: 19339 // result: (LE cmp yes no) 19340 for { 19341 v := b.Control 19342 if v.Op != OpAMD64InvertFlags { 19343 break 19344 } 19345 cmp := v.Args[0] 19346 yes := b.Succs[0] 19347 no := b.Succs[1] 19348 b.Kind = BlockAMD64LE 19349 b.SetControl(cmp) 19350 _ = yes 19351 _ = no 19352 return true 19353 } 19354 // match: (GE (FlagEQ) yes no) 19355 // cond: 19356 // result: (First nil yes no) 19357 for { 19358 v := b.Control 19359 if v.Op != OpAMD64FlagEQ { 19360 break 19361 } 19362 yes := b.Succs[0] 19363 no := b.Succs[1] 19364 b.Kind = BlockFirst 19365 b.SetControl(nil) 19366 _ = yes 19367 _ = no 19368 return true 19369 } 19370 // match: (GE (FlagLT_ULT) yes no) 19371 // cond: 19372 // result: (First nil no yes) 19373 for { 19374 v := b.Control 19375 if v.Op != OpAMD64FlagLT_ULT { 19376 break 19377 } 19378 yes := b.Succs[0] 19379 no := b.Succs[1] 19380 b.Kind = BlockFirst 19381 b.SetControl(nil) 19382 b.swapSuccessors() 19383 _ = no 19384 _ = yes 19385 return true 19386 } 19387 // match: (GE (FlagLT_UGT) yes no) 19388 // cond: 19389 // result: (First nil no yes) 19390 for { 19391 v := b.Control 19392 if v.Op != OpAMD64FlagLT_UGT { 19393 break 19394 } 19395 yes := b.Succs[0] 19396 no := b.Succs[1] 19397 b.Kind = BlockFirst 19398 b.SetControl(nil) 19399 b.swapSuccessors() 19400 _ = no 19401 _ = yes 19402 return true 19403 } 19404 // match: (GE (FlagGT_ULT) yes no) 19405 // cond: 19406 // result: (First nil yes no) 19407 for { 19408 v := b.Control 19409 if v.Op != OpAMD64FlagGT_ULT { 19410 break 19411 } 19412 yes := b.Succs[0] 19413 no := b.Succs[1] 19414 b.Kind = BlockFirst 19415 b.SetControl(nil) 19416 _ = yes 19417 _ = no 19418 return true 19419 } 19420 // match: (GE (FlagGT_UGT) yes no) 19421 // cond: 19422 // result: (First nil yes no) 19423 for { 19424 v := b.Control 19425 if v.Op != OpAMD64FlagGT_UGT { 19426 break 19427 } 19428 yes := b.Succs[0] 19429 no := b.Succs[1] 19430 b.Kind = BlockFirst 19431 b.SetControl(nil) 19432 _ = yes 19433 _ = no 19434 return true 19435 } 19436 case BlockAMD64GT: 19437 // match: (GT (InvertFlags cmp) yes no) 19438 // cond: 19439 // result: (LT cmp yes no) 19440 for { 19441 v := b.Control 19442 if v.Op != OpAMD64InvertFlags { 19443 break 19444 } 19445 cmp := v.Args[0] 19446 yes := b.Succs[0] 19447 no := b.Succs[1] 19448 b.Kind = BlockAMD64LT 19449 b.SetControl(cmp) 19450 _ = yes 19451 _ = no 19452 return true 19453 } 19454 // match: (GT (FlagEQ) yes no) 19455 // cond: 19456 // result: (First nil no yes) 19457 for { 19458 v := b.Control 19459 if v.Op != OpAMD64FlagEQ { 19460 break 19461 } 19462 yes := b.Succs[0] 19463 no := b.Succs[1] 19464 b.Kind = BlockFirst 19465 b.SetControl(nil) 19466 b.swapSuccessors() 19467 _ = no 19468 _ = yes 19469 return true 19470 } 19471 // match: (GT (FlagLT_ULT) yes no) 19472 // cond: 19473 // result: (First nil no yes) 19474 for { 19475 v := b.Control 19476 if v.Op != OpAMD64FlagLT_ULT { 19477 break 19478 } 19479 yes := b.Succs[0] 19480 no := b.Succs[1] 19481 b.Kind = BlockFirst 19482 b.SetControl(nil) 19483 b.swapSuccessors() 19484 _ = no 19485 _ = yes 19486 return true 19487 } 19488 // match: (GT (FlagLT_UGT) yes no) 19489 // cond: 19490 // result: (First nil no yes) 19491 for { 19492 v := b.Control 19493 if v.Op != OpAMD64FlagLT_UGT { 19494 break 19495 } 19496 yes := b.Succs[0] 19497 no := b.Succs[1] 19498 b.Kind = BlockFirst 19499 b.SetControl(nil) 19500 b.swapSuccessors() 19501 _ = no 19502 _ = yes 19503 return true 19504 } 19505 // match: (GT (FlagGT_ULT) yes no) 19506 // cond: 19507 // result: (First nil yes no) 19508 for { 19509 v := b.Control 19510 if v.Op != OpAMD64FlagGT_ULT { 19511 break 19512 } 19513 yes := b.Succs[0] 19514 no := b.Succs[1] 19515 b.Kind = BlockFirst 19516 b.SetControl(nil) 19517 _ = yes 19518 _ = no 19519 return true 19520 } 19521 // match: (GT (FlagGT_UGT) yes no) 19522 // cond: 19523 // result: (First nil yes no) 19524 for { 19525 v := b.Control 19526 if v.Op != OpAMD64FlagGT_UGT { 19527 break 19528 } 19529 yes := b.Succs[0] 19530 no := b.Succs[1] 19531 b.Kind = BlockFirst 19532 b.SetControl(nil) 19533 _ = yes 19534 _ = no 19535 return true 19536 } 19537 case BlockIf: 19538 // match: (If (SETL cmp) yes no) 19539 // cond: 19540 // result: (LT cmp yes no) 19541 for { 19542 v := b.Control 19543 if v.Op != OpAMD64SETL { 19544 break 19545 } 19546 cmp := v.Args[0] 19547 yes := b.Succs[0] 19548 no := b.Succs[1] 19549 b.Kind = BlockAMD64LT 19550 b.SetControl(cmp) 19551 _ = yes 19552 _ = no 19553 return true 19554 } 19555 // match: (If (SETLE cmp) yes no) 19556 // cond: 19557 // result: (LE cmp yes no) 19558 for { 19559 v := b.Control 19560 if v.Op != OpAMD64SETLE { 19561 break 19562 } 19563 cmp := v.Args[0] 19564 yes := b.Succs[0] 19565 no := b.Succs[1] 19566 b.Kind = BlockAMD64LE 19567 b.SetControl(cmp) 19568 _ = yes 19569 _ = no 19570 return true 19571 } 19572 // match: (If (SETG cmp) yes no) 19573 // cond: 19574 // result: (GT cmp yes no) 19575 for { 19576 v := b.Control 19577 if v.Op != OpAMD64SETG { 19578 break 19579 } 19580 cmp := v.Args[0] 19581 yes := b.Succs[0] 19582 no := b.Succs[1] 19583 b.Kind = BlockAMD64GT 19584 b.SetControl(cmp) 19585 _ = yes 19586 _ = no 19587 return true 19588 } 19589 // match: (If (SETGE cmp) yes no) 19590 // cond: 19591 // result: (GE cmp yes no) 19592 for { 19593 v := b.Control 19594 if v.Op != OpAMD64SETGE { 19595 break 19596 } 19597 cmp := v.Args[0] 19598 yes := b.Succs[0] 19599 no := b.Succs[1] 19600 b.Kind = BlockAMD64GE 19601 b.SetControl(cmp) 19602 _ = yes 19603 _ = no 19604 return true 19605 } 19606 // match: (If (SETEQ cmp) yes no) 19607 // cond: 19608 // result: (EQ cmp yes no) 19609 for { 19610 v := b.Control 19611 if v.Op != OpAMD64SETEQ { 19612 break 19613 } 19614 cmp := v.Args[0] 19615 yes := b.Succs[0] 19616 no := b.Succs[1] 19617 b.Kind = BlockAMD64EQ 19618 b.SetControl(cmp) 19619 _ = yes 19620 _ = no 19621 return true 19622 } 19623 // match: (If (SETNE cmp) yes no) 19624 // cond: 19625 // result: (NE cmp yes no) 19626 for { 19627 v := b.Control 19628 if v.Op != OpAMD64SETNE { 19629 break 19630 } 19631 cmp := v.Args[0] 19632 yes := b.Succs[0] 19633 no := b.Succs[1] 19634 b.Kind = BlockAMD64NE 19635 b.SetControl(cmp) 19636 _ = yes 19637 _ = no 19638 return true 19639 } 19640 // match: (If (SETB cmp) yes no) 19641 // cond: 19642 // result: (ULT cmp yes no) 19643 for { 19644 v := b.Control 19645 if v.Op != OpAMD64SETB { 19646 break 19647 } 19648 cmp := v.Args[0] 19649 yes := b.Succs[0] 19650 no := b.Succs[1] 19651 b.Kind = BlockAMD64ULT 19652 b.SetControl(cmp) 19653 _ = yes 19654 _ = no 19655 return true 19656 } 19657 // match: (If (SETBE cmp) yes no) 19658 // cond: 19659 // result: (ULE cmp yes no) 19660 for { 19661 v := b.Control 19662 if v.Op != OpAMD64SETBE { 19663 break 19664 } 19665 cmp := v.Args[0] 19666 yes := b.Succs[0] 19667 no := b.Succs[1] 19668 b.Kind = BlockAMD64ULE 19669 b.SetControl(cmp) 19670 _ = yes 19671 _ = no 19672 return true 19673 } 19674 // match: (If (SETA cmp) yes no) 19675 // cond: 19676 // result: (UGT cmp yes no) 19677 for { 19678 v := b.Control 19679 if v.Op != OpAMD64SETA { 19680 break 19681 } 19682 cmp := v.Args[0] 19683 yes := b.Succs[0] 19684 no := b.Succs[1] 19685 b.Kind = BlockAMD64UGT 19686 b.SetControl(cmp) 19687 _ = yes 19688 _ = no 19689 return true 19690 } 19691 // match: (If (SETAE cmp) yes no) 19692 // cond: 19693 // result: (UGE cmp yes no) 19694 for { 19695 v := b.Control 19696 if v.Op != OpAMD64SETAE { 19697 break 19698 } 19699 cmp := v.Args[0] 19700 yes := b.Succs[0] 19701 no := b.Succs[1] 19702 b.Kind = BlockAMD64UGE 19703 b.SetControl(cmp) 19704 _ = yes 19705 _ = no 19706 return true 19707 } 19708 // match: (If (SETGF cmp) yes no) 19709 // cond: 19710 // result: (UGT cmp yes no) 19711 for { 19712 v := b.Control 19713 if v.Op != OpAMD64SETGF { 19714 break 19715 } 19716 cmp := v.Args[0] 19717 yes := b.Succs[0] 19718 no := b.Succs[1] 19719 b.Kind = BlockAMD64UGT 19720 b.SetControl(cmp) 19721 _ = yes 19722 _ = no 19723 return true 19724 } 19725 // match: (If (SETGEF cmp) yes no) 19726 // cond: 19727 // result: (UGE cmp yes no) 19728 for { 19729 v := b.Control 19730 if v.Op != OpAMD64SETGEF { 19731 break 19732 } 19733 cmp := v.Args[0] 19734 yes := b.Succs[0] 19735 no := b.Succs[1] 19736 b.Kind = BlockAMD64UGE 19737 b.SetControl(cmp) 19738 _ = yes 19739 _ = no 19740 return true 19741 } 19742 // match: (If (SETEQF cmp) yes no) 19743 // cond: 19744 // result: (EQF cmp yes no) 19745 for { 19746 v := b.Control 19747 if v.Op != OpAMD64SETEQF { 19748 break 19749 } 19750 cmp := v.Args[0] 19751 yes := b.Succs[0] 19752 no := b.Succs[1] 19753 b.Kind = BlockAMD64EQF 19754 b.SetControl(cmp) 19755 _ = yes 19756 _ = no 19757 return true 19758 } 19759 // match: (If (SETNEF cmp) yes no) 19760 // cond: 19761 // result: (NEF cmp yes no) 19762 for { 19763 v := b.Control 19764 if v.Op != OpAMD64SETNEF { 19765 break 19766 } 19767 cmp := v.Args[0] 19768 yes := b.Succs[0] 19769 no := b.Succs[1] 19770 b.Kind = BlockAMD64NEF 19771 b.SetControl(cmp) 19772 _ = yes 19773 _ = no 19774 return true 19775 } 19776 // match: (If cond yes no) 19777 // cond: 19778 // result: (NE (TESTB cond cond) yes no) 19779 for { 19780 v := b.Control 19781 _ = v 19782 cond := b.Control 19783 yes := b.Succs[0] 19784 no := b.Succs[1] 19785 b.Kind = BlockAMD64NE 19786 v0 := b.NewValue0(v.Line, OpAMD64TESTB, TypeFlags) 19787 v0.AddArg(cond) 19788 v0.AddArg(cond) 19789 b.SetControl(v0) 19790 _ = yes 19791 _ = no 19792 return true 19793 } 19794 case BlockAMD64LE: 19795 // match: (LE (InvertFlags cmp) yes no) 19796 // cond: 19797 // result: (GE cmp yes no) 19798 for { 19799 v := b.Control 19800 if v.Op != OpAMD64InvertFlags { 19801 break 19802 } 19803 cmp := v.Args[0] 19804 yes := b.Succs[0] 19805 no := b.Succs[1] 19806 b.Kind = BlockAMD64GE 19807 b.SetControl(cmp) 19808 _ = yes 19809 _ = no 19810 return true 19811 } 19812 // match: (LE (FlagEQ) yes no) 19813 // cond: 19814 // result: (First nil yes no) 19815 for { 19816 v := b.Control 19817 if v.Op != OpAMD64FlagEQ { 19818 break 19819 } 19820 yes := b.Succs[0] 19821 no := b.Succs[1] 19822 b.Kind = BlockFirst 19823 b.SetControl(nil) 19824 _ = yes 19825 _ = no 19826 return true 19827 } 19828 // match: (LE (FlagLT_ULT) yes no) 19829 // cond: 19830 // result: (First nil yes no) 19831 for { 19832 v := b.Control 19833 if v.Op != OpAMD64FlagLT_ULT { 19834 break 19835 } 19836 yes := b.Succs[0] 19837 no := b.Succs[1] 19838 b.Kind = BlockFirst 19839 b.SetControl(nil) 19840 _ = yes 19841 _ = no 19842 return true 19843 } 19844 // match: (LE (FlagLT_UGT) yes no) 19845 // cond: 19846 // result: (First nil yes no) 19847 for { 19848 v := b.Control 19849 if v.Op != OpAMD64FlagLT_UGT { 19850 break 19851 } 19852 yes := b.Succs[0] 19853 no := b.Succs[1] 19854 b.Kind = BlockFirst 19855 b.SetControl(nil) 19856 _ = yes 19857 _ = no 19858 return true 19859 } 19860 // match: (LE (FlagGT_ULT) yes no) 19861 // cond: 19862 // result: (First nil no yes) 19863 for { 19864 v := b.Control 19865 if v.Op != OpAMD64FlagGT_ULT { 19866 break 19867 } 19868 yes := b.Succs[0] 19869 no := b.Succs[1] 19870 b.Kind = BlockFirst 19871 b.SetControl(nil) 19872 b.swapSuccessors() 19873 _ = no 19874 _ = yes 19875 return true 19876 } 19877 // match: (LE (FlagGT_UGT) yes no) 19878 // cond: 19879 // result: (First nil no yes) 19880 for { 19881 v := b.Control 19882 if v.Op != OpAMD64FlagGT_UGT { 19883 break 19884 } 19885 yes := b.Succs[0] 19886 no := b.Succs[1] 19887 b.Kind = BlockFirst 19888 b.SetControl(nil) 19889 b.swapSuccessors() 19890 _ = no 19891 _ = yes 19892 return true 19893 } 19894 case BlockAMD64LT: 19895 // match: (LT (InvertFlags cmp) yes no) 19896 // cond: 19897 // result: (GT cmp yes no) 19898 for { 19899 v := b.Control 19900 if v.Op != OpAMD64InvertFlags { 19901 break 19902 } 19903 cmp := v.Args[0] 19904 yes := b.Succs[0] 19905 no := b.Succs[1] 19906 b.Kind = BlockAMD64GT 19907 b.SetControl(cmp) 19908 _ = yes 19909 _ = no 19910 return true 19911 } 19912 // match: (LT (FlagEQ) yes no) 19913 // cond: 19914 // result: (First nil no yes) 19915 for { 19916 v := b.Control 19917 if v.Op != OpAMD64FlagEQ { 19918 break 19919 } 19920 yes := b.Succs[0] 19921 no := b.Succs[1] 19922 b.Kind = BlockFirst 19923 b.SetControl(nil) 19924 b.swapSuccessors() 19925 _ = no 19926 _ = yes 19927 return true 19928 } 19929 // match: (LT (FlagLT_ULT) yes no) 19930 // cond: 19931 // result: (First nil yes no) 19932 for { 19933 v := b.Control 19934 if v.Op != OpAMD64FlagLT_ULT { 19935 break 19936 } 19937 yes := b.Succs[0] 19938 no := b.Succs[1] 19939 b.Kind = BlockFirst 19940 b.SetControl(nil) 19941 _ = yes 19942 _ = no 19943 return true 19944 } 19945 // match: (LT (FlagLT_UGT) yes no) 19946 // cond: 19947 // result: (First nil yes no) 19948 for { 19949 v := b.Control 19950 if v.Op != OpAMD64FlagLT_UGT { 19951 break 19952 } 19953 yes := b.Succs[0] 19954 no := b.Succs[1] 19955 b.Kind = BlockFirst 19956 b.SetControl(nil) 19957 _ = yes 19958 _ = no 19959 return true 19960 } 19961 // match: (LT (FlagGT_ULT) yes no) 19962 // cond: 19963 // result: (First nil no yes) 19964 for { 19965 v := b.Control 19966 if v.Op != OpAMD64FlagGT_ULT { 19967 break 19968 } 19969 yes := b.Succs[0] 19970 no := b.Succs[1] 19971 b.Kind = BlockFirst 19972 b.SetControl(nil) 19973 b.swapSuccessors() 19974 _ = no 19975 _ = yes 19976 return true 19977 } 19978 // match: (LT (FlagGT_UGT) yes no) 19979 // cond: 19980 // result: (First nil no yes) 19981 for { 19982 v := b.Control 19983 if v.Op != OpAMD64FlagGT_UGT { 19984 break 19985 } 19986 yes := b.Succs[0] 19987 no := b.Succs[1] 19988 b.Kind = BlockFirst 19989 b.SetControl(nil) 19990 b.swapSuccessors() 19991 _ = no 19992 _ = yes 19993 return true 19994 } 19995 case BlockAMD64NE: 19996 // match: (NE (TESTB (SETL cmp) (SETL cmp)) yes no) 19997 // cond: 19998 // result: (LT cmp yes no) 19999 for { 20000 v := b.Control 20001 if v.Op != OpAMD64TESTB { 20002 break 20003 } 20004 v_0 := v.Args[0] 20005 if v_0.Op != OpAMD64SETL { 20006 break 20007 } 20008 cmp := v_0.Args[0] 20009 v_1 := v.Args[1] 20010 if v_1.Op != OpAMD64SETL { 20011 break 20012 } 20013 if cmp != v_1.Args[0] { 20014 break 20015 } 20016 yes := b.Succs[0] 20017 no := b.Succs[1] 20018 b.Kind = BlockAMD64LT 20019 b.SetControl(cmp) 20020 _ = yes 20021 _ = no 20022 return true 20023 } 20024 // match: (NE (TESTB (SETLE cmp) (SETLE cmp)) yes no) 20025 // cond: 20026 // result: (LE cmp yes no) 20027 for { 20028 v := b.Control 20029 if v.Op != OpAMD64TESTB { 20030 break 20031 } 20032 v_0 := v.Args[0] 20033 if v_0.Op != OpAMD64SETLE { 20034 break 20035 } 20036 cmp := v_0.Args[0] 20037 v_1 := v.Args[1] 20038 if v_1.Op != OpAMD64SETLE { 20039 break 20040 } 20041 if cmp != v_1.Args[0] { 20042 break 20043 } 20044 yes := b.Succs[0] 20045 no := b.Succs[1] 20046 b.Kind = BlockAMD64LE 20047 b.SetControl(cmp) 20048 _ = yes 20049 _ = no 20050 return true 20051 } 20052 // match: (NE (TESTB (SETG cmp) (SETG cmp)) yes no) 20053 // cond: 20054 // result: (GT cmp yes no) 20055 for { 20056 v := b.Control 20057 if v.Op != OpAMD64TESTB { 20058 break 20059 } 20060 v_0 := v.Args[0] 20061 if v_0.Op != OpAMD64SETG { 20062 break 20063 } 20064 cmp := v_0.Args[0] 20065 v_1 := v.Args[1] 20066 if v_1.Op != OpAMD64SETG { 20067 break 20068 } 20069 if cmp != v_1.Args[0] { 20070 break 20071 } 20072 yes := b.Succs[0] 20073 no := b.Succs[1] 20074 b.Kind = BlockAMD64GT 20075 b.SetControl(cmp) 20076 _ = yes 20077 _ = no 20078 return true 20079 } 20080 // match: (NE (TESTB (SETGE cmp) (SETGE cmp)) yes no) 20081 // cond: 20082 // result: (GE cmp yes no) 20083 for { 20084 v := b.Control 20085 if v.Op != OpAMD64TESTB { 20086 break 20087 } 20088 v_0 := v.Args[0] 20089 if v_0.Op != OpAMD64SETGE { 20090 break 20091 } 20092 cmp := v_0.Args[0] 20093 v_1 := v.Args[1] 20094 if v_1.Op != OpAMD64SETGE { 20095 break 20096 } 20097 if cmp != v_1.Args[0] { 20098 break 20099 } 20100 yes := b.Succs[0] 20101 no := b.Succs[1] 20102 b.Kind = BlockAMD64GE 20103 b.SetControl(cmp) 20104 _ = yes 20105 _ = no 20106 return true 20107 } 20108 // match: (NE (TESTB (SETEQ cmp) (SETEQ cmp)) yes no) 20109 // cond: 20110 // result: (EQ cmp yes no) 20111 for { 20112 v := b.Control 20113 if v.Op != OpAMD64TESTB { 20114 break 20115 } 20116 v_0 := v.Args[0] 20117 if v_0.Op != OpAMD64SETEQ { 20118 break 20119 } 20120 cmp := v_0.Args[0] 20121 v_1 := v.Args[1] 20122 if v_1.Op != OpAMD64SETEQ { 20123 break 20124 } 20125 if cmp != v_1.Args[0] { 20126 break 20127 } 20128 yes := b.Succs[0] 20129 no := b.Succs[1] 20130 b.Kind = BlockAMD64EQ 20131 b.SetControl(cmp) 20132 _ = yes 20133 _ = no 20134 return true 20135 } 20136 // match: (NE (TESTB (SETNE cmp) (SETNE cmp)) yes no) 20137 // cond: 20138 // result: (NE cmp yes no) 20139 for { 20140 v := b.Control 20141 if v.Op != OpAMD64TESTB { 20142 break 20143 } 20144 v_0 := v.Args[0] 20145 if v_0.Op != OpAMD64SETNE { 20146 break 20147 } 20148 cmp := v_0.Args[0] 20149 v_1 := v.Args[1] 20150 if v_1.Op != OpAMD64SETNE { 20151 break 20152 } 20153 if cmp != v_1.Args[0] { 20154 break 20155 } 20156 yes := b.Succs[0] 20157 no := b.Succs[1] 20158 b.Kind = BlockAMD64NE 20159 b.SetControl(cmp) 20160 _ = yes 20161 _ = no 20162 return true 20163 } 20164 // match: (NE (TESTB (SETB cmp) (SETB cmp)) yes no) 20165 // cond: 20166 // result: (ULT cmp yes no) 20167 for { 20168 v := b.Control 20169 if v.Op != OpAMD64TESTB { 20170 break 20171 } 20172 v_0 := v.Args[0] 20173 if v_0.Op != OpAMD64SETB { 20174 break 20175 } 20176 cmp := v_0.Args[0] 20177 v_1 := v.Args[1] 20178 if v_1.Op != OpAMD64SETB { 20179 break 20180 } 20181 if cmp != v_1.Args[0] { 20182 break 20183 } 20184 yes := b.Succs[0] 20185 no := b.Succs[1] 20186 b.Kind = BlockAMD64ULT 20187 b.SetControl(cmp) 20188 _ = yes 20189 _ = no 20190 return true 20191 } 20192 // match: (NE (TESTB (SETBE cmp) (SETBE cmp)) yes no) 20193 // cond: 20194 // result: (ULE cmp yes no) 20195 for { 20196 v := b.Control 20197 if v.Op != OpAMD64TESTB { 20198 break 20199 } 20200 v_0 := v.Args[0] 20201 if v_0.Op != OpAMD64SETBE { 20202 break 20203 } 20204 cmp := v_0.Args[0] 20205 v_1 := v.Args[1] 20206 if v_1.Op != OpAMD64SETBE { 20207 break 20208 } 20209 if cmp != v_1.Args[0] { 20210 break 20211 } 20212 yes := b.Succs[0] 20213 no := b.Succs[1] 20214 b.Kind = BlockAMD64ULE 20215 b.SetControl(cmp) 20216 _ = yes 20217 _ = no 20218 return true 20219 } 20220 // match: (NE (TESTB (SETA cmp) (SETA cmp)) yes no) 20221 // cond: 20222 // result: (UGT cmp yes no) 20223 for { 20224 v := b.Control 20225 if v.Op != OpAMD64TESTB { 20226 break 20227 } 20228 v_0 := v.Args[0] 20229 if v_0.Op != OpAMD64SETA { 20230 break 20231 } 20232 cmp := v_0.Args[0] 20233 v_1 := v.Args[1] 20234 if v_1.Op != OpAMD64SETA { 20235 break 20236 } 20237 if cmp != v_1.Args[0] { 20238 break 20239 } 20240 yes := b.Succs[0] 20241 no := b.Succs[1] 20242 b.Kind = BlockAMD64UGT 20243 b.SetControl(cmp) 20244 _ = yes 20245 _ = no 20246 return true 20247 } 20248 // match: (NE (TESTB (SETAE cmp) (SETAE cmp)) yes no) 20249 // cond: 20250 // result: (UGE cmp yes no) 20251 for { 20252 v := b.Control 20253 if v.Op != OpAMD64TESTB { 20254 break 20255 } 20256 v_0 := v.Args[0] 20257 if v_0.Op != OpAMD64SETAE { 20258 break 20259 } 20260 cmp := v_0.Args[0] 20261 v_1 := v.Args[1] 20262 if v_1.Op != OpAMD64SETAE { 20263 break 20264 } 20265 if cmp != v_1.Args[0] { 20266 break 20267 } 20268 yes := b.Succs[0] 20269 no := b.Succs[1] 20270 b.Kind = BlockAMD64UGE 20271 b.SetControl(cmp) 20272 _ = yes 20273 _ = no 20274 return true 20275 } 20276 // match: (NE (TESTB (SETGF cmp) (SETGF cmp)) yes no) 20277 // cond: 20278 // result: (UGT cmp yes no) 20279 for { 20280 v := b.Control 20281 if v.Op != OpAMD64TESTB { 20282 break 20283 } 20284 v_0 := v.Args[0] 20285 if v_0.Op != OpAMD64SETGF { 20286 break 20287 } 20288 cmp := v_0.Args[0] 20289 v_1 := v.Args[1] 20290 if v_1.Op != OpAMD64SETGF { 20291 break 20292 } 20293 if cmp != v_1.Args[0] { 20294 break 20295 } 20296 yes := b.Succs[0] 20297 no := b.Succs[1] 20298 b.Kind = BlockAMD64UGT 20299 b.SetControl(cmp) 20300 _ = yes 20301 _ = no 20302 return true 20303 } 20304 // match: (NE (TESTB (SETGEF cmp) (SETGEF cmp)) yes no) 20305 // cond: 20306 // result: (UGE cmp yes no) 20307 for { 20308 v := b.Control 20309 if v.Op != OpAMD64TESTB { 20310 break 20311 } 20312 v_0 := v.Args[0] 20313 if v_0.Op != OpAMD64SETGEF { 20314 break 20315 } 20316 cmp := v_0.Args[0] 20317 v_1 := v.Args[1] 20318 if v_1.Op != OpAMD64SETGEF { 20319 break 20320 } 20321 if cmp != v_1.Args[0] { 20322 break 20323 } 20324 yes := b.Succs[0] 20325 no := b.Succs[1] 20326 b.Kind = BlockAMD64UGE 20327 b.SetControl(cmp) 20328 _ = yes 20329 _ = no 20330 return true 20331 } 20332 // match: (NE (TESTB (SETEQF cmp) (SETEQF cmp)) yes no) 20333 // cond: 20334 // result: (EQF cmp yes no) 20335 for { 20336 v := b.Control 20337 if v.Op != OpAMD64TESTB { 20338 break 20339 } 20340 v_0 := v.Args[0] 20341 if v_0.Op != OpAMD64SETEQF { 20342 break 20343 } 20344 cmp := v_0.Args[0] 20345 v_1 := v.Args[1] 20346 if v_1.Op != OpAMD64SETEQF { 20347 break 20348 } 20349 if cmp != v_1.Args[0] { 20350 break 20351 } 20352 yes := b.Succs[0] 20353 no := b.Succs[1] 20354 b.Kind = BlockAMD64EQF 20355 b.SetControl(cmp) 20356 _ = yes 20357 _ = no 20358 return true 20359 } 20360 // match: (NE (TESTB (SETNEF cmp) (SETNEF cmp)) yes no) 20361 // cond: 20362 // result: (NEF cmp yes no) 20363 for { 20364 v := b.Control 20365 if v.Op != OpAMD64TESTB { 20366 break 20367 } 20368 v_0 := v.Args[0] 20369 if v_0.Op != OpAMD64SETNEF { 20370 break 20371 } 20372 cmp := v_0.Args[0] 20373 v_1 := v.Args[1] 20374 if v_1.Op != OpAMD64SETNEF { 20375 break 20376 } 20377 if cmp != v_1.Args[0] { 20378 break 20379 } 20380 yes := b.Succs[0] 20381 no := b.Succs[1] 20382 b.Kind = BlockAMD64NEF 20383 b.SetControl(cmp) 20384 _ = yes 20385 _ = no 20386 return true 20387 } 20388 // match: (NE (InvertFlags cmp) yes no) 20389 // cond: 20390 // result: (NE cmp yes no) 20391 for { 20392 v := b.Control 20393 if v.Op != OpAMD64InvertFlags { 20394 break 20395 } 20396 cmp := v.Args[0] 20397 yes := b.Succs[0] 20398 no := b.Succs[1] 20399 b.Kind = BlockAMD64NE 20400 b.SetControl(cmp) 20401 _ = yes 20402 _ = no 20403 return true 20404 } 20405 // match: (NE (FlagEQ) yes no) 20406 // cond: 20407 // result: (First nil no yes) 20408 for { 20409 v := b.Control 20410 if v.Op != OpAMD64FlagEQ { 20411 break 20412 } 20413 yes := b.Succs[0] 20414 no := b.Succs[1] 20415 b.Kind = BlockFirst 20416 b.SetControl(nil) 20417 b.swapSuccessors() 20418 _ = no 20419 _ = yes 20420 return true 20421 } 20422 // match: (NE (FlagLT_ULT) yes no) 20423 // cond: 20424 // result: (First nil yes no) 20425 for { 20426 v := b.Control 20427 if v.Op != OpAMD64FlagLT_ULT { 20428 break 20429 } 20430 yes := b.Succs[0] 20431 no := b.Succs[1] 20432 b.Kind = BlockFirst 20433 b.SetControl(nil) 20434 _ = yes 20435 _ = no 20436 return true 20437 } 20438 // match: (NE (FlagLT_UGT) yes no) 20439 // cond: 20440 // result: (First nil yes no) 20441 for { 20442 v := b.Control 20443 if v.Op != OpAMD64FlagLT_UGT { 20444 break 20445 } 20446 yes := b.Succs[0] 20447 no := b.Succs[1] 20448 b.Kind = BlockFirst 20449 b.SetControl(nil) 20450 _ = yes 20451 _ = no 20452 return true 20453 } 20454 // match: (NE (FlagGT_ULT) yes no) 20455 // cond: 20456 // result: (First nil yes no) 20457 for { 20458 v := b.Control 20459 if v.Op != OpAMD64FlagGT_ULT { 20460 break 20461 } 20462 yes := b.Succs[0] 20463 no := b.Succs[1] 20464 b.Kind = BlockFirst 20465 b.SetControl(nil) 20466 _ = yes 20467 _ = no 20468 return true 20469 } 20470 // match: (NE (FlagGT_UGT) yes no) 20471 // cond: 20472 // result: (First nil yes no) 20473 for { 20474 v := b.Control 20475 if v.Op != OpAMD64FlagGT_UGT { 20476 break 20477 } 20478 yes := b.Succs[0] 20479 no := b.Succs[1] 20480 b.Kind = BlockFirst 20481 b.SetControl(nil) 20482 _ = yes 20483 _ = no 20484 return true 20485 } 20486 case BlockAMD64UGE: 20487 // match: (UGE (InvertFlags cmp) yes no) 20488 // cond: 20489 // result: (ULE cmp yes no) 20490 for { 20491 v := b.Control 20492 if v.Op != OpAMD64InvertFlags { 20493 break 20494 } 20495 cmp := v.Args[0] 20496 yes := b.Succs[0] 20497 no := b.Succs[1] 20498 b.Kind = BlockAMD64ULE 20499 b.SetControl(cmp) 20500 _ = yes 20501 _ = no 20502 return true 20503 } 20504 // match: (UGE (FlagEQ) yes no) 20505 // cond: 20506 // result: (First nil yes no) 20507 for { 20508 v := b.Control 20509 if v.Op != OpAMD64FlagEQ { 20510 break 20511 } 20512 yes := b.Succs[0] 20513 no := b.Succs[1] 20514 b.Kind = BlockFirst 20515 b.SetControl(nil) 20516 _ = yes 20517 _ = no 20518 return true 20519 } 20520 // match: (UGE (FlagLT_ULT) yes no) 20521 // cond: 20522 // result: (First nil no yes) 20523 for { 20524 v := b.Control 20525 if v.Op != OpAMD64FlagLT_ULT { 20526 break 20527 } 20528 yes := b.Succs[0] 20529 no := b.Succs[1] 20530 b.Kind = BlockFirst 20531 b.SetControl(nil) 20532 b.swapSuccessors() 20533 _ = no 20534 _ = yes 20535 return true 20536 } 20537 // match: (UGE (FlagLT_UGT) yes no) 20538 // cond: 20539 // result: (First nil yes no) 20540 for { 20541 v := b.Control 20542 if v.Op != OpAMD64FlagLT_UGT { 20543 break 20544 } 20545 yes := b.Succs[0] 20546 no := b.Succs[1] 20547 b.Kind = BlockFirst 20548 b.SetControl(nil) 20549 _ = yes 20550 _ = no 20551 return true 20552 } 20553 // match: (UGE (FlagGT_ULT) yes no) 20554 // cond: 20555 // result: (First nil no yes) 20556 for { 20557 v := b.Control 20558 if v.Op != OpAMD64FlagGT_ULT { 20559 break 20560 } 20561 yes := b.Succs[0] 20562 no := b.Succs[1] 20563 b.Kind = BlockFirst 20564 b.SetControl(nil) 20565 b.swapSuccessors() 20566 _ = no 20567 _ = yes 20568 return true 20569 } 20570 // match: (UGE (FlagGT_UGT) yes no) 20571 // cond: 20572 // result: (First nil yes no) 20573 for { 20574 v := b.Control 20575 if v.Op != OpAMD64FlagGT_UGT { 20576 break 20577 } 20578 yes := b.Succs[0] 20579 no := b.Succs[1] 20580 b.Kind = BlockFirst 20581 b.SetControl(nil) 20582 _ = yes 20583 _ = no 20584 return true 20585 } 20586 case BlockAMD64UGT: 20587 // match: (UGT (InvertFlags cmp) yes no) 20588 // cond: 20589 // result: (ULT cmp yes no) 20590 for { 20591 v := b.Control 20592 if v.Op != OpAMD64InvertFlags { 20593 break 20594 } 20595 cmp := v.Args[0] 20596 yes := b.Succs[0] 20597 no := b.Succs[1] 20598 b.Kind = BlockAMD64ULT 20599 b.SetControl(cmp) 20600 _ = yes 20601 _ = no 20602 return true 20603 } 20604 // match: (UGT (FlagEQ) yes no) 20605 // cond: 20606 // result: (First nil no yes) 20607 for { 20608 v := b.Control 20609 if v.Op != OpAMD64FlagEQ { 20610 break 20611 } 20612 yes := b.Succs[0] 20613 no := b.Succs[1] 20614 b.Kind = BlockFirst 20615 b.SetControl(nil) 20616 b.swapSuccessors() 20617 _ = no 20618 _ = yes 20619 return true 20620 } 20621 // match: (UGT (FlagLT_ULT) yes no) 20622 // cond: 20623 // result: (First nil no yes) 20624 for { 20625 v := b.Control 20626 if v.Op != OpAMD64FlagLT_ULT { 20627 break 20628 } 20629 yes := b.Succs[0] 20630 no := b.Succs[1] 20631 b.Kind = BlockFirst 20632 b.SetControl(nil) 20633 b.swapSuccessors() 20634 _ = no 20635 _ = yes 20636 return true 20637 } 20638 // match: (UGT (FlagLT_UGT) yes no) 20639 // cond: 20640 // result: (First nil yes no) 20641 for { 20642 v := b.Control 20643 if v.Op != OpAMD64FlagLT_UGT { 20644 break 20645 } 20646 yes := b.Succs[0] 20647 no := b.Succs[1] 20648 b.Kind = BlockFirst 20649 b.SetControl(nil) 20650 _ = yes 20651 _ = no 20652 return true 20653 } 20654 // match: (UGT (FlagGT_ULT) yes no) 20655 // cond: 20656 // result: (First nil no yes) 20657 for { 20658 v := b.Control 20659 if v.Op != OpAMD64FlagGT_ULT { 20660 break 20661 } 20662 yes := b.Succs[0] 20663 no := b.Succs[1] 20664 b.Kind = BlockFirst 20665 b.SetControl(nil) 20666 b.swapSuccessors() 20667 _ = no 20668 _ = yes 20669 return true 20670 } 20671 // match: (UGT (FlagGT_UGT) yes no) 20672 // cond: 20673 // result: (First nil yes no) 20674 for { 20675 v := b.Control 20676 if v.Op != OpAMD64FlagGT_UGT { 20677 break 20678 } 20679 yes := b.Succs[0] 20680 no := b.Succs[1] 20681 b.Kind = BlockFirst 20682 b.SetControl(nil) 20683 _ = yes 20684 _ = no 20685 return true 20686 } 20687 case BlockAMD64ULE: 20688 // match: (ULE (InvertFlags cmp) yes no) 20689 // cond: 20690 // result: (UGE cmp yes no) 20691 for { 20692 v := b.Control 20693 if v.Op != OpAMD64InvertFlags { 20694 break 20695 } 20696 cmp := v.Args[0] 20697 yes := b.Succs[0] 20698 no := b.Succs[1] 20699 b.Kind = BlockAMD64UGE 20700 b.SetControl(cmp) 20701 _ = yes 20702 _ = no 20703 return true 20704 } 20705 // match: (ULE (FlagEQ) yes no) 20706 // cond: 20707 // result: (First nil yes no) 20708 for { 20709 v := b.Control 20710 if v.Op != OpAMD64FlagEQ { 20711 break 20712 } 20713 yes := b.Succs[0] 20714 no := b.Succs[1] 20715 b.Kind = BlockFirst 20716 b.SetControl(nil) 20717 _ = yes 20718 _ = no 20719 return true 20720 } 20721 // match: (ULE (FlagLT_ULT) yes no) 20722 // cond: 20723 // result: (First nil yes no) 20724 for { 20725 v := b.Control 20726 if v.Op != OpAMD64FlagLT_ULT { 20727 break 20728 } 20729 yes := b.Succs[0] 20730 no := b.Succs[1] 20731 b.Kind = BlockFirst 20732 b.SetControl(nil) 20733 _ = yes 20734 _ = no 20735 return true 20736 } 20737 // match: (ULE (FlagLT_UGT) yes no) 20738 // cond: 20739 // result: (First nil no yes) 20740 for { 20741 v := b.Control 20742 if v.Op != OpAMD64FlagLT_UGT { 20743 break 20744 } 20745 yes := b.Succs[0] 20746 no := b.Succs[1] 20747 b.Kind = BlockFirst 20748 b.SetControl(nil) 20749 b.swapSuccessors() 20750 _ = no 20751 _ = yes 20752 return true 20753 } 20754 // match: (ULE (FlagGT_ULT) yes no) 20755 // cond: 20756 // result: (First nil yes no) 20757 for { 20758 v := b.Control 20759 if v.Op != OpAMD64FlagGT_ULT { 20760 break 20761 } 20762 yes := b.Succs[0] 20763 no := b.Succs[1] 20764 b.Kind = BlockFirst 20765 b.SetControl(nil) 20766 _ = yes 20767 _ = no 20768 return true 20769 } 20770 // match: (ULE (FlagGT_UGT) yes no) 20771 // cond: 20772 // result: (First nil no yes) 20773 for { 20774 v := b.Control 20775 if v.Op != OpAMD64FlagGT_UGT { 20776 break 20777 } 20778 yes := b.Succs[0] 20779 no := b.Succs[1] 20780 b.Kind = BlockFirst 20781 b.SetControl(nil) 20782 b.swapSuccessors() 20783 _ = no 20784 _ = yes 20785 return true 20786 } 20787 case BlockAMD64ULT: 20788 // match: (ULT (InvertFlags cmp) yes no) 20789 // cond: 20790 // result: (UGT cmp yes no) 20791 for { 20792 v := b.Control 20793 if v.Op != OpAMD64InvertFlags { 20794 break 20795 } 20796 cmp := v.Args[0] 20797 yes := b.Succs[0] 20798 no := b.Succs[1] 20799 b.Kind = BlockAMD64UGT 20800 b.SetControl(cmp) 20801 _ = yes 20802 _ = no 20803 return true 20804 } 20805 // match: (ULT (FlagEQ) yes no) 20806 // cond: 20807 // result: (First nil no yes) 20808 for { 20809 v := b.Control 20810 if v.Op != OpAMD64FlagEQ { 20811 break 20812 } 20813 yes := b.Succs[0] 20814 no := b.Succs[1] 20815 b.Kind = BlockFirst 20816 b.SetControl(nil) 20817 b.swapSuccessors() 20818 _ = no 20819 _ = yes 20820 return true 20821 } 20822 // match: (ULT (FlagLT_ULT) yes no) 20823 // cond: 20824 // result: (First nil yes no) 20825 for { 20826 v := b.Control 20827 if v.Op != OpAMD64FlagLT_ULT { 20828 break 20829 } 20830 yes := b.Succs[0] 20831 no := b.Succs[1] 20832 b.Kind = BlockFirst 20833 b.SetControl(nil) 20834 _ = yes 20835 _ = no 20836 return true 20837 } 20838 // match: (ULT (FlagLT_UGT) yes no) 20839 // cond: 20840 // result: (First nil no yes) 20841 for { 20842 v := b.Control 20843 if v.Op != OpAMD64FlagLT_UGT { 20844 break 20845 } 20846 yes := b.Succs[0] 20847 no := b.Succs[1] 20848 b.Kind = BlockFirst 20849 b.SetControl(nil) 20850 b.swapSuccessors() 20851 _ = no 20852 _ = yes 20853 return true 20854 } 20855 // match: (ULT (FlagGT_ULT) yes no) 20856 // cond: 20857 // result: (First nil yes no) 20858 for { 20859 v := b.Control 20860 if v.Op != OpAMD64FlagGT_ULT { 20861 break 20862 } 20863 yes := b.Succs[0] 20864 no := b.Succs[1] 20865 b.Kind = BlockFirst 20866 b.SetControl(nil) 20867 _ = yes 20868 _ = no 20869 return true 20870 } 20871 // match: (ULT (FlagGT_UGT) yes no) 20872 // cond: 20873 // result: (First nil no yes) 20874 for { 20875 v := b.Control 20876 if v.Op != OpAMD64FlagGT_UGT { 20877 break 20878 } 20879 yes := b.Succs[0] 20880 no := b.Succs[1] 20881 b.Kind = BlockFirst 20882 b.SetControl(nil) 20883 b.swapSuccessors() 20884 _ = no 20885 _ = yes 20886 return true 20887 } 20888 } 20889 return false 20890 }