github.com/freddyisaac/sicortex-golang@v0.0.0-20231019035217-e03519e66f60/src/cmd/compile/internal/ssa/rewriteAMD64.go (about) 1 // autogenerated from gen/AMD64.rules: do not edit! 2 // generated with: cd gen; go run *.go 3 4 package ssa 5 6 import "math" 7 8 var _ = math.MinInt8 // in case not otherwise used 9 func rewriteValueAMD64(v *Value, config *Config) bool { 10 switch v.Op { 11 case OpAMD64ADDL: 12 return rewriteValueAMD64_OpAMD64ADDL(v, config) 13 case OpAMD64ADDLconst: 14 return rewriteValueAMD64_OpAMD64ADDLconst(v, config) 15 case OpAMD64ADDQ: 16 return rewriteValueAMD64_OpAMD64ADDQ(v, config) 17 case OpAMD64ADDQconst: 18 return rewriteValueAMD64_OpAMD64ADDQconst(v, config) 19 case OpAMD64ANDL: 20 return rewriteValueAMD64_OpAMD64ANDL(v, config) 21 case OpAMD64ANDLconst: 22 return rewriteValueAMD64_OpAMD64ANDLconst(v, config) 23 case OpAMD64ANDQ: 24 return rewriteValueAMD64_OpAMD64ANDQ(v, config) 25 case OpAMD64ANDQconst: 26 return rewriteValueAMD64_OpAMD64ANDQconst(v, config) 27 case OpAMD64CMPB: 28 return rewriteValueAMD64_OpAMD64CMPB(v, config) 29 case OpAMD64CMPBconst: 30 return rewriteValueAMD64_OpAMD64CMPBconst(v, config) 31 case OpAMD64CMPL: 32 return rewriteValueAMD64_OpAMD64CMPL(v, config) 33 case OpAMD64CMPLconst: 34 return rewriteValueAMD64_OpAMD64CMPLconst(v, config) 35 case OpAMD64CMPQ: 36 return rewriteValueAMD64_OpAMD64CMPQ(v, config) 37 case OpAMD64CMPQconst: 38 return rewriteValueAMD64_OpAMD64CMPQconst(v, config) 39 case OpAMD64CMPW: 40 return rewriteValueAMD64_OpAMD64CMPW(v, config) 41 case OpAMD64CMPWconst: 42 return rewriteValueAMD64_OpAMD64CMPWconst(v, config) 43 case OpAMD64CMPXCHGLlock: 44 return rewriteValueAMD64_OpAMD64CMPXCHGLlock(v, config) 45 case OpAMD64CMPXCHGQlock: 46 return rewriteValueAMD64_OpAMD64CMPXCHGQlock(v, config) 47 case OpAMD64LEAL: 48 return rewriteValueAMD64_OpAMD64LEAL(v, config) 49 case OpAMD64LEAQ: 50 return rewriteValueAMD64_OpAMD64LEAQ(v, config) 51 case OpAMD64LEAQ1: 52 return rewriteValueAMD64_OpAMD64LEAQ1(v, config) 53 case OpAMD64LEAQ2: 54 return rewriteValueAMD64_OpAMD64LEAQ2(v, config) 55 case OpAMD64LEAQ4: 56 return rewriteValueAMD64_OpAMD64LEAQ4(v, config) 57 case OpAMD64LEAQ8: 58 return rewriteValueAMD64_OpAMD64LEAQ8(v, config) 59 case OpAMD64MOVBQSX: 60 return rewriteValueAMD64_OpAMD64MOVBQSX(v, config) 61 case OpAMD64MOVBQSXload: 62 return rewriteValueAMD64_OpAMD64MOVBQSXload(v, config) 63 case OpAMD64MOVBQZX: 64 return rewriteValueAMD64_OpAMD64MOVBQZX(v, config) 65 case OpAMD64MOVBload: 66 return rewriteValueAMD64_OpAMD64MOVBload(v, config) 67 case OpAMD64MOVBloadidx1: 68 return rewriteValueAMD64_OpAMD64MOVBloadidx1(v, config) 69 case OpAMD64MOVBstore: 70 return rewriteValueAMD64_OpAMD64MOVBstore(v, config) 71 case OpAMD64MOVBstoreconst: 72 return rewriteValueAMD64_OpAMD64MOVBstoreconst(v, config) 73 case OpAMD64MOVBstoreconstidx1: 74 return rewriteValueAMD64_OpAMD64MOVBstoreconstidx1(v, config) 75 case OpAMD64MOVBstoreidx1: 76 return rewriteValueAMD64_OpAMD64MOVBstoreidx1(v, config) 77 case OpAMD64MOVLQSX: 78 return rewriteValueAMD64_OpAMD64MOVLQSX(v, config) 79 case OpAMD64MOVLQSXload: 80 return rewriteValueAMD64_OpAMD64MOVLQSXload(v, config) 81 case OpAMD64MOVLQZX: 82 return rewriteValueAMD64_OpAMD64MOVLQZX(v, config) 83 case OpAMD64MOVLatomicload: 84 return rewriteValueAMD64_OpAMD64MOVLatomicload(v, config) 85 case OpAMD64MOVLload: 86 return rewriteValueAMD64_OpAMD64MOVLload(v, config) 87 case OpAMD64MOVLloadidx1: 88 return rewriteValueAMD64_OpAMD64MOVLloadidx1(v, config) 89 case OpAMD64MOVLloadidx4: 90 return rewriteValueAMD64_OpAMD64MOVLloadidx4(v, config) 91 case OpAMD64MOVLstore: 92 return rewriteValueAMD64_OpAMD64MOVLstore(v, config) 93 case OpAMD64MOVLstoreconst: 94 return rewriteValueAMD64_OpAMD64MOVLstoreconst(v, config) 95 case OpAMD64MOVLstoreconstidx1: 96 return rewriteValueAMD64_OpAMD64MOVLstoreconstidx1(v, config) 97 case OpAMD64MOVLstoreconstidx4: 98 return rewriteValueAMD64_OpAMD64MOVLstoreconstidx4(v, config) 99 case OpAMD64MOVLstoreidx1: 100 return rewriteValueAMD64_OpAMD64MOVLstoreidx1(v, config) 101 case OpAMD64MOVLstoreidx4: 102 return rewriteValueAMD64_OpAMD64MOVLstoreidx4(v, config) 103 case OpAMD64MOVOload: 104 return rewriteValueAMD64_OpAMD64MOVOload(v, config) 105 case OpAMD64MOVOstore: 106 return rewriteValueAMD64_OpAMD64MOVOstore(v, config) 107 case OpAMD64MOVQatomicload: 108 return rewriteValueAMD64_OpAMD64MOVQatomicload(v, config) 109 case OpAMD64MOVQload: 110 return rewriteValueAMD64_OpAMD64MOVQload(v, config) 111 case OpAMD64MOVQloadidx1: 112 return rewriteValueAMD64_OpAMD64MOVQloadidx1(v, config) 113 case OpAMD64MOVQloadidx8: 114 return rewriteValueAMD64_OpAMD64MOVQloadidx8(v, config) 115 case OpAMD64MOVQstore: 116 return rewriteValueAMD64_OpAMD64MOVQstore(v, config) 117 case OpAMD64MOVQstoreconst: 118 return rewriteValueAMD64_OpAMD64MOVQstoreconst(v, config) 119 case OpAMD64MOVQstoreconstidx1: 120 return rewriteValueAMD64_OpAMD64MOVQstoreconstidx1(v, config) 121 case OpAMD64MOVQstoreconstidx8: 122 return rewriteValueAMD64_OpAMD64MOVQstoreconstidx8(v, config) 123 case OpAMD64MOVQstoreidx1: 124 return rewriteValueAMD64_OpAMD64MOVQstoreidx1(v, config) 125 case OpAMD64MOVQstoreidx8: 126 return rewriteValueAMD64_OpAMD64MOVQstoreidx8(v, config) 127 case OpAMD64MOVSDload: 128 return rewriteValueAMD64_OpAMD64MOVSDload(v, config) 129 case OpAMD64MOVSDloadidx1: 130 return rewriteValueAMD64_OpAMD64MOVSDloadidx1(v, config) 131 case OpAMD64MOVSDloadidx8: 132 return rewriteValueAMD64_OpAMD64MOVSDloadidx8(v, config) 133 case OpAMD64MOVSDstore: 134 return rewriteValueAMD64_OpAMD64MOVSDstore(v, config) 135 case OpAMD64MOVSDstoreidx1: 136 return rewriteValueAMD64_OpAMD64MOVSDstoreidx1(v, config) 137 case OpAMD64MOVSDstoreidx8: 138 return rewriteValueAMD64_OpAMD64MOVSDstoreidx8(v, config) 139 case OpAMD64MOVSSload: 140 return rewriteValueAMD64_OpAMD64MOVSSload(v, config) 141 case OpAMD64MOVSSloadidx1: 142 return rewriteValueAMD64_OpAMD64MOVSSloadidx1(v, config) 143 case OpAMD64MOVSSloadidx4: 144 return rewriteValueAMD64_OpAMD64MOVSSloadidx4(v, config) 145 case OpAMD64MOVSSstore: 146 return rewriteValueAMD64_OpAMD64MOVSSstore(v, config) 147 case OpAMD64MOVSSstoreidx1: 148 return rewriteValueAMD64_OpAMD64MOVSSstoreidx1(v, config) 149 case OpAMD64MOVSSstoreidx4: 150 return rewriteValueAMD64_OpAMD64MOVSSstoreidx4(v, config) 151 case OpAMD64MOVWQSX: 152 return rewriteValueAMD64_OpAMD64MOVWQSX(v, config) 153 case OpAMD64MOVWQSXload: 154 return rewriteValueAMD64_OpAMD64MOVWQSXload(v, config) 155 case OpAMD64MOVWQZX: 156 return rewriteValueAMD64_OpAMD64MOVWQZX(v, config) 157 case OpAMD64MOVWload: 158 return rewriteValueAMD64_OpAMD64MOVWload(v, config) 159 case OpAMD64MOVWloadidx1: 160 return rewriteValueAMD64_OpAMD64MOVWloadidx1(v, config) 161 case OpAMD64MOVWloadidx2: 162 return rewriteValueAMD64_OpAMD64MOVWloadidx2(v, config) 163 case OpAMD64MOVWstore: 164 return rewriteValueAMD64_OpAMD64MOVWstore(v, config) 165 case OpAMD64MOVWstoreconst: 166 return rewriteValueAMD64_OpAMD64MOVWstoreconst(v, config) 167 case OpAMD64MOVWstoreconstidx1: 168 return rewriteValueAMD64_OpAMD64MOVWstoreconstidx1(v, config) 169 case OpAMD64MOVWstoreconstidx2: 170 return rewriteValueAMD64_OpAMD64MOVWstoreconstidx2(v, config) 171 case OpAMD64MOVWstoreidx1: 172 return rewriteValueAMD64_OpAMD64MOVWstoreidx1(v, config) 173 case OpAMD64MOVWstoreidx2: 174 return rewriteValueAMD64_OpAMD64MOVWstoreidx2(v, config) 175 case OpAMD64MULL: 176 return rewriteValueAMD64_OpAMD64MULL(v, config) 177 case OpAMD64MULLconst: 178 return rewriteValueAMD64_OpAMD64MULLconst(v, config) 179 case OpAMD64MULQ: 180 return rewriteValueAMD64_OpAMD64MULQ(v, config) 181 case OpAMD64MULQconst: 182 return rewriteValueAMD64_OpAMD64MULQconst(v, config) 183 case OpAMD64NEGL: 184 return rewriteValueAMD64_OpAMD64NEGL(v, config) 185 case OpAMD64NEGQ: 186 return rewriteValueAMD64_OpAMD64NEGQ(v, config) 187 case OpAMD64NOTL: 188 return rewriteValueAMD64_OpAMD64NOTL(v, config) 189 case OpAMD64NOTQ: 190 return rewriteValueAMD64_OpAMD64NOTQ(v, config) 191 case OpAMD64ORL: 192 return rewriteValueAMD64_OpAMD64ORL(v, config) 193 case OpAMD64ORLconst: 194 return rewriteValueAMD64_OpAMD64ORLconst(v, config) 195 case OpAMD64ORQ: 196 return rewriteValueAMD64_OpAMD64ORQ(v, config) 197 case OpAMD64ORQconst: 198 return rewriteValueAMD64_OpAMD64ORQconst(v, config) 199 case OpAMD64ROLBconst: 200 return rewriteValueAMD64_OpAMD64ROLBconst(v, config) 201 case OpAMD64ROLLconst: 202 return rewriteValueAMD64_OpAMD64ROLLconst(v, config) 203 case OpAMD64ROLQconst: 204 return rewriteValueAMD64_OpAMD64ROLQconst(v, config) 205 case OpAMD64ROLWconst: 206 return rewriteValueAMD64_OpAMD64ROLWconst(v, config) 207 case OpAMD64SARB: 208 return rewriteValueAMD64_OpAMD64SARB(v, config) 209 case OpAMD64SARBconst: 210 return rewriteValueAMD64_OpAMD64SARBconst(v, config) 211 case OpAMD64SARL: 212 return rewriteValueAMD64_OpAMD64SARL(v, config) 213 case OpAMD64SARLconst: 214 return rewriteValueAMD64_OpAMD64SARLconst(v, config) 215 case OpAMD64SARQ: 216 return rewriteValueAMD64_OpAMD64SARQ(v, config) 217 case OpAMD64SARQconst: 218 return rewriteValueAMD64_OpAMD64SARQconst(v, config) 219 case OpAMD64SARW: 220 return rewriteValueAMD64_OpAMD64SARW(v, config) 221 case OpAMD64SARWconst: 222 return rewriteValueAMD64_OpAMD64SARWconst(v, config) 223 case OpAMD64SBBLcarrymask: 224 return rewriteValueAMD64_OpAMD64SBBLcarrymask(v, config) 225 case OpAMD64SBBQcarrymask: 226 return rewriteValueAMD64_OpAMD64SBBQcarrymask(v, config) 227 case OpAMD64SETA: 228 return rewriteValueAMD64_OpAMD64SETA(v, config) 229 case OpAMD64SETAE: 230 return rewriteValueAMD64_OpAMD64SETAE(v, config) 231 case OpAMD64SETB: 232 return rewriteValueAMD64_OpAMD64SETB(v, config) 233 case OpAMD64SETBE: 234 return rewriteValueAMD64_OpAMD64SETBE(v, config) 235 case OpAMD64SETEQ: 236 return rewriteValueAMD64_OpAMD64SETEQ(v, config) 237 case OpAMD64SETG: 238 return rewriteValueAMD64_OpAMD64SETG(v, config) 239 case OpAMD64SETGE: 240 return rewriteValueAMD64_OpAMD64SETGE(v, config) 241 case OpAMD64SETL: 242 return rewriteValueAMD64_OpAMD64SETL(v, config) 243 case OpAMD64SETLE: 244 return rewriteValueAMD64_OpAMD64SETLE(v, config) 245 case OpAMD64SETNE: 246 return rewriteValueAMD64_OpAMD64SETNE(v, config) 247 case OpAMD64SHLL: 248 return rewriteValueAMD64_OpAMD64SHLL(v, config) 249 case OpAMD64SHLQ: 250 return rewriteValueAMD64_OpAMD64SHLQ(v, config) 251 case OpAMD64SHRB: 252 return rewriteValueAMD64_OpAMD64SHRB(v, config) 253 case OpAMD64SHRL: 254 return rewriteValueAMD64_OpAMD64SHRL(v, config) 255 case OpAMD64SHRQ: 256 return rewriteValueAMD64_OpAMD64SHRQ(v, config) 257 case OpAMD64SHRW: 258 return rewriteValueAMD64_OpAMD64SHRW(v, config) 259 case OpAMD64SUBL: 260 return rewriteValueAMD64_OpAMD64SUBL(v, config) 261 case OpAMD64SUBLconst: 262 return rewriteValueAMD64_OpAMD64SUBLconst(v, config) 263 case OpAMD64SUBQ: 264 return rewriteValueAMD64_OpAMD64SUBQ(v, config) 265 case OpAMD64SUBQconst: 266 return rewriteValueAMD64_OpAMD64SUBQconst(v, config) 267 case OpAMD64XADDLlock: 268 return rewriteValueAMD64_OpAMD64XADDLlock(v, config) 269 case OpAMD64XADDQlock: 270 return rewriteValueAMD64_OpAMD64XADDQlock(v, config) 271 case OpAMD64XCHGL: 272 return rewriteValueAMD64_OpAMD64XCHGL(v, config) 273 case OpAMD64XCHGQ: 274 return rewriteValueAMD64_OpAMD64XCHGQ(v, config) 275 case OpAMD64XORL: 276 return rewriteValueAMD64_OpAMD64XORL(v, config) 277 case OpAMD64XORLconst: 278 return rewriteValueAMD64_OpAMD64XORLconst(v, config) 279 case OpAMD64XORQ: 280 return rewriteValueAMD64_OpAMD64XORQ(v, config) 281 case OpAMD64XORQconst: 282 return rewriteValueAMD64_OpAMD64XORQconst(v, config) 283 case OpAdd16: 284 return rewriteValueAMD64_OpAdd16(v, config) 285 case OpAdd32: 286 return rewriteValueAMD64_OpAdd32(v, config) 287 case OpAdd32F: 288 return rewriteValueAMD64_OpAdd32F(v, config) 289 case OpAdd64: 290 return rewriteValueAMD64_OpAdd64(v, config) 291 case OpAdd64F: 292 return rewriteValueAMD64_OpAdd64F(v, config) 293 case OpAdd8: 294 return rewriteValueAMD64_OpAdd8(v, config) 295 case OpAddPtr: 296 return rewriteValueAMD64_OpAddPtr(v, config) 297 case OpAddr: 298 return rewriteValueAMD64_OpAddr(v, config) 299 case OpAnd16: 300 return rewriteValueAMD64_OpAnd16(v, config) 301 case OpAnd32: 302 return rewriteValueAMD64_OpAnd32(v, config) 303 case OpAnd64: 304 return rewriteValueAMD64_OpAnd64(v, config) 305 case OpAnd8: 306 return rewriteValueAMD64_OpAnd8(v, config) 307 case OpAndB: 308 return rewriteValueAMD64_OpAndB(v, config) 309 case OpAtomicAdd32: 310 return rewriteValueAMD64_OpAtomicAdd32(v, config) 311 case OpAtomicAdd64: 312 return rewriteValueAMD64_OpAtomicAdd64(v, config) 313 case OpAtomicAnd8: 314 return rewriteValueAMD64_OpAtomicAnd8(v, config) 315 case OpAtomicCompareAndSwap32: 316 return rewriteValueAMD64_OpAtomicCompareAndSwap32(v, config) 317 case OpAtomicCompareAndSwap64: 318 return rewriteValueAMD64_OpAtomicCompareAndSwap64(v, config) 319 case OpAtomicExchange32: 320 return rewriteValueAMD64_OpAtomicExchange32(v, config) 321 case OpAtomicExchange64: 322 return rewriteValueAMD64_OpAtomicExchange64(v, config) 323 case OpAtomicLoad32: 324 return rewriteValueAMD64_OpAtomicLoad32(v, config) 325 case OpAtomicLoad64: 326 return rewriteValueAMD64_OpAtomicLoad64(v, config) 327 case OpAtomicLoadPtr: 328 return rewriteValueAMD64_OpAtomicLoadPtr(v, config) 329 case OpAtomicOr8: 330 return rewriteValueAMD64_OpAtomicOr8(v, config) 331 case OpAtomicStore32: 332 return rewriteValueAMD64_OpAtomicStore32(v, config) 333 case OpAtomicStore64: 334 return rewriteValueAMD64_OpAtomicStore64(v, config) 335 case OpAtomicStorePtrNoWB: 336 return rewriteValueAMD64_OpAtomicStorePtrNoWB(v, config) 337 case OpAvg64u: 338 return rewriteValueAMD64_OpAvg64u(v, config) 339 case OpBswap32: 340 return rewriteValueAMD64_OpBswap32(v, config) 341 case OpBswap64: 342 return rewriteValueAMD64_OpBswap64(v, config) 343 case OpClosureCall: 344 return rewriteValueAMD64_OpClosureCall(v, config) 345 case OpCom16: 346 return rewriteValueAMD64_OpCom16(v, config) 347 case OpCom32: 348 return rewriteValueAMD64_OpCom32(v, config) 349 case OpCom64: 350 return rewriteValueAMD64_OpCom64(v, config) 351 case OpCom8: 352 return rewriteValueAMD64_OpCom8(v, config) 353 case OpConst16: 354 return rewriteValueAMD64_OpConst16(v, config) 355 case OpConst32: 356 return rewriteValueAMD64_OpConst32(v, config) 357 case OpConst32F: 358 return rewriteValueAMD64_OpConst32F(v, config) 359 case OpConst64: 360 return rewriteValueAMD64_OpConst64(v, config) 361 case OpConst64F: 362 return rewriteValueAMD64_OpConst64F(v, config) 363 case OpConst8: 364 return rewriteValueAMD64_OpConst8(v, config) 365 case OpConstBool: 366 return rewriteValueAMD64_OpConstBool(v, config) 367 case OpConstNil: 368 return rewriteValueAMD64_OpConstNil(v, config) 369 case OpConvert: 370 return rewriteValueAMD64_OpConvert(v, config) 371 case OpCtz32: 372 return rewriteValueAMD64_OpCtz32(v, config) 373 case OpCtz64: 374 return rewriteValueAMD64_OpCtz64(v, config) 375 case OpCvt32Fto32: 376 return rewriteValueAMD64_OpCvt32Fto32(v, config) 377 case OpCvt32Fto64: 378 return rewriteValueAMD64_OpCvt32Fto64(v, config) 379 case OpCvt32Fto64F: 380 return rewriteValueAMD64_OpCvt32Fto64F(v, config) 381 case OpCvt32to32F: 382 return rewriteValueAMD64_OpCvt32to32F(v, config) 383 case OpCvt32to64F: 384 return rewriteValueAMD64_OpCvt32to64F(v, config) 385 case OpCvt64Fto32: 386 return rewriteValueAMD64_OpCvt64Fto32(v, config) 387 case OpCvt64Fto32F: 388 return rewriteValueAMD64_OpCvt64Fto32F(v, config) 389 case OpCvt64Fto64: 390 return rewriteValueAMD64_OpCvt64Fto64(v, config) 391 case OpCvt64to32F: 392 return rewriteValueAMD64_OpCvt64to32F(v, config) 393 case OpCvt64to64F: 394 return rewriteValueAMD64_OpCvt64to64F(v, config) 395 case OpDeferCall: 396 return rewriteValueAMD64_OpDeferCall(v, config) 397 case OpDiv128u: 398 return rewriteValueAMD64_OpDiv128u(v, config) 399 case OpDiv16: 400 return rewriteValueAMD64_OpDiv16(v, config) 401 case OpDiv16u: 402 return rewriteValueAMD64_OpDiv16u(v, config) 403 case OpDiv32: 404 return rewriteValueAMD64_OpDiv32(v, config) 405 case OpDiv32F: 406 return rewriteValueAMD64_OpDiv32F(v, config) 407 case OpDiv32u: 408 return rewriteValueAMD64_OpDiv32u(v, config) 409 case OpDiv64: 410 return rewriteValueAMD64_OpDiv64(v, config) 411 case OpDiv64F: 412 return rewriteValueAMD64_OpDiv64F(v, config) 413 case OpDiv64u: 414 return rewriteValueAMD64_OpDiv64u(v, config) 415 case OpDiv8: 416 return rewriteValueAMD64_OpDiv8(v, config) 417 case OpDiv8u: 418 return rewriteValueAMD64_OpDiv8u(v, config) 419 case OpEq16: 420 return rewriteValueAMD64_OpEq16(v, config) 421 case OpEq32: 422 return rewriteValueAMD64_OpEq32(v, config) 423 case OpEq32F: 424 return rewriteValueAMD64_OpEq32F(v, config) 425 case OpEq64: 426 return rewriteValueAMD64_OpEq64(v, config) 427 case OpEq64F: 428 return rewriteValueAMD64_OpEq64F(v, config) 429 case OpEq8: 430 return rewriteValueAMD64_OpEq8(v, config) 431 case OpEqB: 432 return rewriteValueAMD64_OpEqB(v, config) 433 case OpEqPtr: 434 return rewriteValueAMD64_OpEqPtr(v, config) 435 case OpGeq16: 436 return rewriteValueAMD64_OpGeq16(v, config) 437 case OpGeq16U: 438 return rewriteValueAMD64_OpGeq16U(v, config) 439 case OpGeq32: 440 return rewriteValueAMD64_OpGeq32(v, config) 441 case OpGeq32F: 442 return rewriteValueAMD64_OpGeq32F(v, config) 443 case OpGeq32U: 444 return rewriteValueAMD64_OpGeq32U(v, config) 445 case OpGeq64: 446 return rewriteValueAMD64_OpGeq64(v, config) 447 case OpGeq64F: 448 return rewriteValueAMD64_OpGeq64F(v, config) 449 case OpGeq64U: 450 return rewriteValueAMD64_OpGeq64U(v, config) 451 case OpGeq8: 452 return rewriteValueAMD64_OpGeq8(v, config) 453 case OpGeq8U: 454 return rewriteValueAMD64_OpGeq8U(v, config) 455 case OpGetClosurePtr: 456 return rewriteValueAMD64_OpGetClosurePtr(v, config) 457 case OpGetG: 458 return rewriteValueAMD64_OpGetG(v, config) 459 case OpGoCall: 460 return rewriteValueAMD64_OpGoCall(v, config) 461 case OpGreater16: 462 return rewriteValueAMD64_OpGreater16(v, config) 463 case OpGreater16U: 464 return rewriteValueAMD64_OpGreater16U(v, config) 465 case OpGreater32: 466 return rewriteValueAMD64_OpGreater32(v, config) 467 case OpGreater32F: 468 return rewriteValueAMD64_OpGreater32F(v, config) 469 case OpGreater32U: 470 return rewriteValueAMD64_OpGreater32U(v, config) 471 case OpGreater64: 472 return rewriteValueAMD64_OpGreater64(v, config) 473 case OpGreater64F: 474 return rewriteValueAMD64_OpGreater64F(v, config) 475 case OpGreater64U: 476 return rewriteValueAMD64_OpGreater64U(v, config) 477 case OpGreater8: 478 return rewriteValueAMD64_OpGreater8(v, config) 479 case OpGreater8U: 480 return rewriteValueAMD64_OpGreater8U(v, config) 481 case OpHmul16: 482 return rewriteValueAMD64_OpHmul16(v, config) 483 case OpHmul16u: 484 return rewriteValueAMD64_OpHmul16u(v, config) 485 case OpHmul32: 486 return rewriteValueAMD64_OpHmul32(v, config) 487 case OpHmul32u: 488 return rewriteValueAMD64_OpHmul32u(v, config) 489 case OpHmul64: 490 return rewriteValueAMD64_OpHmul64(v, config) 491 case OpHmul64u: 492 return rewriteValueAMD64_OpHmul64u(v, config) 493 case OpHmul8: 494 return rewriteValueAMD64_OpHmul8(v, config) 495 case OpHmul8u: 496 return rewriteValueAMD64_OpHmul8u(v, config) 497 case OpInt64Hi: 498 return rewriteValueAMD64_OpInt64Hi(v, config) 499 case OpInterCall: 500 return rewriteValueAMD64_OpInterCall(v, config) 501 case OpIsInBounds: 502 return rewriteValueAMD64_OpIsInBounds(v, config) 503 case OpIsNonNil: 504 return rewriteValueAMD64_OpIsNonNil(v, config) 505 case OpIsSliceInBounds: 506 return rewriteValueAMD64_OpIsSliceInBounds(v, config) 507 case OpLeq16: 508 return rewriteValueAMD64_OpLeq16(v, config) 509 case OpLeq16U: 510 return rewriteValueAMD64_OpLeq16U(v, config) 511 case OpLeq32: 512 return rewriteValueAMD64_OpLeq32(v, config) 513 case OpLeq32F: 514 return rewriteValueAMD64_OpLeq32F(v, config) 515 case OpLeq32U: 516 return rewriteValueAMD64_OpLeq32U(v, config) 517 case OpLeq64: 518 return rewriteValueAMD64_OpLeq64(v, config) 519 case OpLeq64F: 520 return rewriteValueAMD64_OpLeq64F(v, config) 521 case OpLeq64U: 522 return rewriteValueAMD64_OpLeq64U(v, config) 523 case OpLeq8: 524 return rewriteValueAMD64_OpLeq8(v, config) 525 case OpLeq8U: 526 return rewriteValueAMD64_OpLeq8U(v, config) 527 case OpLess16: 528 return rewriteValueAMD64_OpLess16(v, config) 529 case OpLess16U: 530 return rewriteValueAMD64_OpLess16U(v, config) 531 case OpLess32: 532 return rewriteValueAMD64_OpLess32(v, config) 533 case OpLess32F: 534 return rewriteValueAMD64_OpLess32F(v, config) 535 case OpLess32U: 536 return rewriteValueAMD64_OpLess32U(v, config) 537 case OpLess64: 538 return rewriteValueAMD64_OpLess64(v, config) 539 case OpLess64F: 540 return rewriteValueAMD64_OpLess64F(v, config) 541 case OpLess64U: 542 return rewriteValueAMD64_OpLess64U(v, config) 543 case OpLess8: 544 return rewriteValueAMD64_OpLess8(v, config) 545 case OpLess8U: 546 return rewriteValueAMD64_OpLess8U(v, config) 547 case OpLoad: 548 return rewriteValueAMD64_OpLoad(v, config) 549 case OpLrot16: 550 return rewriteValueAMD64_OpLrot16(v, config) 551 case OpLrot32: 552 return rewriteValueAMD64_OpLrot32(v, config) 553 case OpLrot64: 554 return rewriteValueAMD64_OpLrot64(v, config) 555 case OpLrot8: 556 return rewriteValueAMD64_OpLrot8(v, config) 557 case OpLsh16x16: 558 return rewriteValueAMD64_OpLsh16x16(v, config) 559 case OpLsh16x32: 560 return rewriteValueAMD64_OpLsh16x32(v, config) 561 case OpLsh16x64: 562 return rewriteValueAMD64_OpLsh16x64(v, config) 563 case OpLsh16x8: 564 return rewriteValueAMD64_OpLsh16x8(v, config) 565 case OpLsh32x16: 566 return rewriteValueAMD64_OpLsh32x16(v, config) 567 case OpLsh32x32: 568 return rewriteValueAMD64_OpLsh32x32(v, config) 569 case OpLsh32x64: 570 return rewriteValueAMD64_OpLsh32x64(v, config) 571 case OpLsh32x8: 572 return rewriteValueAMD64_OpLsh32x8(v, config) 573 case OpLsh64x16: 574 return rewriteValueAMD64_OpLsh64x16(v, config) 575 case OpLsh64x32: 576 return rewriteValueAMD64_OpLsh64x32(v, config) 577 case OpLsh64x64: 578 return rewriteValueAMD64_OpLsh64x64(v, config) 579 case OpLsh64x8: 580 return rewriteValueAMD64_OpLsh64x8(v, config) 581 case OpLsh8x16: 582 return rewriteValueAMD64_OpLsh8x16(v, config) 583 case OpLsh8x32: 584 return rewriteValueAMD64_OpLsh8x32(v, config) 585 case OpLsh8x64: 586 return rewriteValueAMD64_OpLsh8x64(v, config) 587 case OpLsh8x8: 588 return rewriteValueAMD64_OpLsh8x8(v, config) 589 case OpMod16: 590 return rewriteValueAMD64_OpMod16(v, config) 591 case OpMod16u: 592 return rewriteValueAMD64_OpMod16u(v, config) 593 case OpMod32: 594 return rewriteValueAMD64_OpMod32(v, config) 595 case OpMod32u: 596 return rewriteValueAMD64_OpMod32u(v, config) 597 case OpMod64: 598 return rewriteValueAMD64_OpMod64(v, config) 599 case OpMod64u: 600 return rewriteValueAMD64_OpMod64u(v, config) 601 case OpMod8: 602 return rewriteValueAMD64_OpMod8(v, config) 603 case OpMod8u: 604 return rewriteValueAMD64_OpMod8u(v, config) 605 case OpMove: 606 return rewriteValueAMD64_OpMove(v, config) 607 case OpMul16: 608 return rewriteValueAMD64_OpMul16(v, config) 609 case OpMul32: 610 return rewriteValueAMD64_OpMul32(v, config) 611 case OpMul32F: 612 return rewriteValueAMD64_OpMul32F(v, config) 613 case OpMul64: 614 return rewriteValueAMD64_OpMul64(v, config) 615 case OpMul64F: 616 return rewriteValueAMD64_OpMul64F(v, config) 617 case OpMul64uhilo: 618 return rewriteValueAMD64_OpMul64uhilo(v, config) 619 case OpMul8: 620 return rewriteValueAMD64_OpMul8(v, config) 621 case OpNeg16: 622 return rewriteValueAMD64_OpNeg16(v, config) 623 case OpNeg32: 624 return rewriteValueAMD64_OpNeg32(v, config) 625 case OpNeg32F: 626 return rewriteValueAMD64_OpNeg32F(v, config) 627 case OpNeg64: 628 return rewriteValueAMD64_OpNeg64(v, config) 629 case OpNeg64F: 630 return rewriteValueAMD64_OpNeg64F(v, config) 631 case OpNeg8: 632 return rewriteValueAMD64_OpNeg8(v, config) 633 case OpNeq16: 634 return rewriteValueAMD64_OpNeq16(v, config) 635 case OpNeq32: 636 return rewriteValueAMD64_OpNeq32(v, config) 637 case OpNeq32F: 638 return rewriteValueAMD64_OpNeq32F(v, config) 639 case OpNeq64: 640 return rewriteValueAMD64_OpNeq64(v, config) 641 case OpNeq64F: 642 return rewriteValueAMD64_OpNeq64F(v, config) 643 case OpNeq8: 644 return rewriteValueAMD64_OpNeq8(v, config) 645 case OpNeqB: 646 return rewriteValueAMD64_OpNeqB(v, config) 647 case OpNeqPtr: 648 return rewriteValueAMD64_OpNeqPtr(v, config) 649 case OpNilCheck: 650 return rewriteValueAMD64_OpNilCheck(v, config) 651 case OpNot: 652 return rewriteValueAMD64_OpNot(v, config) 653 case OpOffPtr: 654 return rewriteValueAMD64_OpOffPtr(v, config) 655 case OpOr16: 656 return rewriteValueAMD64_OpOr16(v, config) 657 case OpOr32: 658 return rewriteValueAMD64_OpOr32(v, config) 659 case OpOr64: 660 return rewriteValueAMD64_OpOr64(v, config) 661 case OpOr8: 662 return rewriteValueAMD64_OpOr8(v, config) 663 case OpOrB: 664 return rewriteValueAMD64_OpOrB(v, config) 665 case OpRsh16Ux16: 666 return rewriteValueAMD64_OpRsh16Ux16(v, config) 667 case OpRsh16Ux32: 668 return rewriteValueAMD64_OpRsh16Ux32(v, config) 669 case OpRsh16Ux64: 670 return rewriteValueAMD64_OpRsh16Ux64(v, config) 671 case OpRsh16Ux8: 672 return rewriteValueAMD64_OpRsh16Ux8(v, config) 673 case OpRsh16x16: 674 return rewriteValueAMD64_OpRsh16x16(v, config) 675 case OpRsh16x32: 676 return rewriteValueAMD64_OpRsh16x32(v, config) 677 case OpRsh16x64: 678 return rewriteValueAMD64_OpRsh16x64(v, config) 679 case OpRsh16x8: 680 return rewriteValueAMD64_OpRsh16x8(v, config) 681 case OpRsh32Ux16: 682 return rewriteValueAMD64_OpRsh32Ux16(v, config) 683 case OpRsh32Ux32: 684 return rewriteValueAMD64_OpRsh32Ux32(v, config) 685 case OpRsh32Ux64: 686 return rewriteValueAMD64_OpRsh32Ux64(v, config) 687 case OpRsh32Ux8: 688 return rewriteValueAMD64_OpRsh32Ux8(v, config) 689 case OpRsh32x16: 690 return rewriteValueAMD64_OpRsh32x16(v, config) 691 case OpRsh32x32: 692 return rewriteValueAMD64_OpRsh32x32(v, config) 693 case OpRsh32x64: 694 return rewriteValueAMD64_OpRsh32x64(v, config) 695 case OpRsh32x8: 696 return rewriteValueAMD64_OpRsh32x8(v, config) 697 case OpRsh64Ux16: 698 return rewriteValueAMD64_OpRsh64Ux16(v, config) 699 case OpRsh64Ux32: 700 return rewriteValueAMD64_OpRsh64Ux32(v, config) 701 case OpRsh64Ux64: 702 return rewriteValueAMD64_OpRsh64Ux64(v, config) 703 case OpRsh64Ux8: 704 return rewriteValueAMD64_OpRsh64Ux8(v, config) 705 case OpRsh64x16: 706 return rewriteValueAMD64_OpRsh64x16(v, config) 707 case OpRsh64x32: 708 return rewriteValueAMD64_OpRsh64x32(v, config) 709 case OpRsh64x64: 710 return rewriteValueAMD64_OpRsh64x64(v, config) 711 case OpRsh64x8: 712 return rewriteValueAMD64_OpRsh64x8(v, config) 713 case OpRsh8Ux16: 714 return rewriteValueAMD64_OpRsh8Ux16(v, config) 715 case OpRsh8Ux32: 716 return rewriteValueAMD64_OpRsh8Ux32(v, config) 717 case OpRsh8Ux64: 718 return rewriteValueAMD64_OpRsh8Ux64(v, config) 719 case OpRsh8Ux8: 720 return rewriteValueAMD64_OpRsh8Ux8(v, config) 721 case OpRsh8x16: 722 return rewriteValueAMD64_OpRsh8x16(v, config) 723 case OpRsh8x32: 724 return rewriteValueAMD64_OpRsh8x32(v, config) 725 case OpRsh8x64: 726 return rewriteValueAMD64_OpRsh8x64(v, config) 727 case OpRsh8x8: 728 return rewriteValueAMD64_OpRsh8x8(v, config) 729 case OpSelect0: 730 return rewriteValueAMD64_OpSelect0(v, config) 731 case OpSelect1: 732 return rewriteValueAMD64_OpSelect1(v, config) 733 case OpSignExt16to32: 734 return rewriteValueAMD64_OpSignExt16to32(v, config) 735 case OpSignExt16to64: 736 return rewriteValueAMD64_OpSignExt16to64(v, config) 737 case OpSignExt32to64: 738 return rewriteValueAMD64_OpSignExt32to64(v, config) 739 case OpSignExt8to16: 740 return rewriteValueAMD64_OpSignExt8to16(v, config) 741 case OpSignExt8to32: 742 return rewriteValueAMD64_OpSignExt8to32(v, config) 743 case OpSignExt8to64: 744 return rewriteValueAMD64_OpSignExt8to64(v, config) 745 case OpSlicemask: 746 return rewriteValueAMD64_OpSlicemask(v, config) 747 case OpSqrt: 748 return rewriteValueAMD64_OpSqrt(v, config) 749 case OpStaticCall: 750 return rewriteValueAMD64_OpStaticCall(v, config) 751 case OpStore: 752 return rewriteValueAMD64_OpStore(v, config) 753 case OpSub16: 754 return rewriteValueAMD64_OpSub16(v, config) 755 case OpSub32: 756 return rewriteValueAMD64_OpSub32(v, config) 757 case OpSub32F: 758 return rewriteValueAMD64_OpSub32F(v, config) 759 case OpSub64: 760 return rewriteValueAMD64_OpSub64(v, config) 761 case OpSub64F: 762 return rewriteValueAMD64_OpSub64F(v, config) 763 case OpSub8: 764 return rewriteValueAMD64_OpSub8(v, config) 765 case OpSubPtr: 766 return rewriteValueAMD64_OpSubPtr(v, config) 767 case OpTrunc16to8: 768 return rewriteValueAMD64_OpTrunc16to8(v, config) 769 case OpTrunc32to16: 770 return rewriteValueAMD64_OpTrunc32to16(v, config) 771 case OpTrunc32to8: 772 return rewriteValueAMD64_OpTrunc32to8(v, config) 773 case OpTrunc64to16: 774 return rewriteValueAMD64_OpTrunc64to16(v, config) 775 case OpTrunc64to32: 776 return rewriteValueAMD64_OpTrunc64to32(v, config) 777 case OpTrunc64to8: 778 return rewriteValueAMD64_OpTrunc64to8(v, config) 779 case OpXor16: 780 return rewriteValueAMD64_OpXor16(v, config) 781 case OpXor32: 782 return rewriteValueAMD64_OpXor32(v, config) 783 case OpXor64: 784 return rewriteValueAMD64_OpXor64(v, config) 785 case OpXor8: 786 return rewriteValueAMD64_OpXor8(v, config) 787 case OpZero: 788 return rewriteValueAMD64_OpZero(v, config) 789 case OpZeroExt16to32: 790 return rewriteValueAMD64_OpZeroExt16to32(v, config) 791 case OpZeroExt16to64: 792 return rewriteValueAMD64_OpZeroExt16to64(v, config) 793 case OpZeroExt32to64: 794 return rewriteValueAMD64_OpZeroExt32to64(v, config) 795 case OpZeroExt8to16: 796 return rewriteValueAMD64_OpZeroExt8to16(v, config) 797 case OpZeroExt8to32: 798 return rewriteValueAMD64_OpZeroExt8to32(v, config) 799 case OpZeroExt8to64: 800 return rewriteValueAMD64_OpZeroExt8to64(v, config) 801 } 802 return false 803 } 804 func rewriteValueAMD64_OpAMD64ADDL(v *Value, config *Config) bool { 805 b := v.Block 806 _ = b 807 // match: (ADDL x (MOVLconst [c])) 808 // cond: 809 // result: (ADDLconst [c] x) 810 for { 811 x := v.Args[0] 812 v_1 := v.Args[1] 813 if v_1.Op != OpAMD64MOVLconst { 814 break 815 } 816 c := v_1.AuxInt 817 v.reset(OpAMD64ADDLconst) 818 v.AuxInt = c 819 v.AddArg(x) 820 return true 821 } 822 // match: (ADDL (MOVLconst [c]) x) 823 // cond: 824 // result: (ADDLconst [c] x) 825 for { 826 v_0 := v.Args[0] 827 if v_0.Op != OpAMD64MOVLconst { 828 break 829 } 830 c := v_0.AuxInt 831 x := v.Args[1] 832 v.reset(OpAMD64ADDLconst) 833 v.AuxInt = c 834 v.AddArg(x) 835 return true 836 } 837 // match: (ADDL x (NEGL y)) 838 // cond: 839 // result: (SUBL x y) 840 for { 841 x := v.Args[0] 842 v_1 := v.Args[1] 843 if v_1.Op != OpAMD64NEGL { 844 break 845 } 846 y := v_1.Args[0] 847 v.reset(OpAMD64SUBL) 848 v.AddArg(x) 849 v.AddArg(y) 850 return true 851 } 852 return false 853 } 854 func rewriteValueAMD64_OpAMD64ADDLconst(v *Value, config *Config) bool { 855 b := v.Block 856 _ = b 857 // match: (ADDLconst [c] x) 858 // cond: int32(c)==0 859 // result: x 860 for { 861 c := v.AuxInt 862 x := v.Args[0] 863 if !(int32(c) == 0) { 864 break 865 } 866 v.reset(OpCopy) 867 v.Type = x.Type 868 v.AddArg(x) 869 return true 870 } 871 // match: (ADDLconst [c] (MOVLconst [d])) 872 // cond: 873 // result: (MOVLconst [int64(int32(c+d))]) 874 for { 875 c := v.AuxInt 876 v_0 := v.Args[0] 877 if v_0.Op != OpAMD64MOVLconst { 878 break 879 } 880 d := v_0.AuxInt 881 v.reset(OpAMD64MOVLconst) 882 v.AuxInt = int64(int32(c + d)) 883 return true 884 } 885 // match: (ADDLconst [c] (ADDLconst [d] x)) 886 // cond: 887 // result: (ADDLconst [int64(int32(c+d))] x) 888 for { 889 c := v.AuxInt 890 v_0 := v.Args[0] 891 if v_0.Op != OpAMD64ADDLconst { 892 break 893 } 894 d := v_0.AuxInt 895 x := v_0.Args[0] 896 v.reset(OpAMD64ADDLconst) 897 v.AuxInt = int64(int32(c + d)) 898 v.AddArg(x) 899 return true 900 } 901 // match: (ADDLconst [c] (LEAL [d] {s} x)) 902 // cond: is32Bit(c+d) 903 // result: (LEAL [c+d] {s} x) 904 for { 905 c := v.AuxInt 906 v_0 := v.Args[0] 907 if v_0.Op != OpAMD64LEAL { 908 break 909 } 910 d := v_0.AuxInt 911 s := v_0.Aux 912 x := v_0.Args[0] 913 if !(is32Bit(c + d)) { 914 break 915 } 916 v.reset(OpAMD64LEAL) 917 v.AuxInt = c + d 918 v.Aux = s 919 v.AddArg(x) 920 return true 921 } 922 return false 923 } 924 func rewriteValueAMD64_OpAMD64ADDQ(v *Value, config *Config) bool { 925 b := v.Block 926 _ = b 927 // match: (ADDQ x (MOVQconst [c])) 928 // cond: is32Bit(c) 929 // result: (ADDQconst [c] x) 930 for { 931 x := v.Args[0] 932 v_1 := v.Args[1] 933 if v_1.Op != OpAMD64MOVQconst { 934 break 935 } 936 c := v_1.AuxInt 937 if !(is32Bit(c)) { 938 break 939 } 940 v.reset(OpAMD64ADDQconst) 941 v.AuxInt = c 942 v.AddArg(x) 943 return true 944 } 945 // match: (ADDQ (MOVQconst [c]) x) 946 // cond: is32Bit(c) 947 // result: (ADDQconst [c] x) 948 for { 949 v_0 := v.Args[0] 950 if v_0.Op != OpAMD64MOVQconst { 951 break 952 } 953 c := v_0.AuxInt 954 x := v.Args[1] 955 if !(is32Bit(c)) { 956 break 957 } 958 v.reset(OpAMD64ADDQconst) 959 v.AuxInt = c 960 v.AddArg(x) 961 return true 962 } 963 // match: (ADDQ x (SHLQconst [3] y)) 964 // cond: 965 // result: (LEAQ8 x y) 966 for { 967 x := v.Args[0] 968 v_1 := v.Args[1] 969 if v_1.Op != OpAMD64SHLQconst { 970 break 971 } 972 if v_1.AuxInt != 3 { 973 break 974 } 975 y := v_1.Args[0] 976 v.reset(OpAMD64LEAQ8) 977 v.AddArg(x) 978 v.AddArg(y) 979 return true 980 } 981 // match: (ADDQ x (SHLQconst [2] y)) 982 // cond: 983 // result: (LEAQ4 x y) 984 for { 985 x := v.Args[0] 986 v_1 := v.Args[1] 987 if v_1.Op != OpAMD64SHLQconst { 988 break 989 } 990 if v_1.AuxInt != 2 { 991 break 992 } 993 y := v_1.Args[0] 994 v.reset(OpAMD64LEAQ4) 995 v.AddArg(x) 996 v.AddArg(y) 997 return true 998 } 999 // match: (ADDQ x (SHLQconst [1] y)) 1000 // cond: 1001 // result: (LEAQ2 x y) 1002 for { 1003 x := v.Args[0] 1004 v_1 := v.Args[1] 1005 if v_1.Op != OpAMD64SHLQconst { 1006 break 1007 } 1008 if v_1.AuxInt != 1 { 1009 break 1010 } 1011 y := v_1.Args[0] 1012 v.reset(OpAMD64LEAQ2) 1013 v.AddArg(x) 1014 v.AddArg(y) 1015 return true 1016 } 1017 // match: (ADDQ x (ADDQ y y)) 1018 // cond: 1019 // result: (LEAQ2 x y) 1020 for { 1021 x := v.Args[0] 1022 v_1 := v.Args[1] 1023 if v_1.Op != OpAMD64ADDQ { 1024 break 1025 } 1026 y := v_1.Args[0] 1027 if y != v_1.Args[1] { 1028 break 1029 } 1030 v.reset(OpAMD64LEAQ2) 1031 v.AddArg(x) 1032 v.AddArg(y) 1033 return true 1034 } 1035 // match: (ADDQ x (ADDQ x y)) 1036 // cond: 1037 // result: (LEAQ2 y x) 1038 for { 1039 x := v.Args[0] 1040 v_1 := v.Args[1] 1041 if v_1.Op != OpAMD64ADDQ { 1042 break 1043 } 1044 if x != v_1.Args[0] { 1045 break 1046 } 1047 y := v_1.Args[1] 1048 v.reset(OpAMD64LEAQ2) 1049 v.AddArg(y) 1050 v.AddArg(x) 1051 return true 1052 } 1053 // match: (ADDQ x (ADDQ y x)) 1054 // cond: 1055 // result: (LEAQ2 y x) 1056 for { 1057 x := v.Args[0] 1058 v_1 := v.Args[1] 1059 if v_1.Op != OpAMD64ADDQ { 1060 break 1061 } 1062 y := v_1.Args[0] 1063 if x != v_1.Args[1] { 1064 break 1065 } 1066 v.reset(OpAMD64LEAQ2) 1067 v.AddArg(y) 1068 v.AddArg(x) 1069 return true 1070 } 1071 // match: (ADDQ (ADDQconst [c] x) y) 1072 // cond: 1073 // result: (LEAQ1 [c] x y) 1074 for { 1075 v_0 := v.Args[0] 1076 if v_0.Op != OpAMD64ADDQconst { 1077 break 1078 } 1079 c := v_0.AuxInt 1080 x := v_0.Args[0] 1081 y := v.Args[1] 1082 v.reset(OpAMD64LEAQ1) 1083 v.AuxInt = c 1084 v.AddArg(x) 1085 v.AddArg(y) 1086 return true 1087 } 1088 // match: (ADDQ x (ADDQconst [c] y)) 1089 // cond: 1090 // result: (LEAQ1 [c] x y) 1091 for { 1092 x := v.Args[0] 1093 v_1 := v.Args[1] 1094 if v_1.Op != OpAMD64ADDQconst { 1095 break 1096 } 1097 c := v_1.AuxInt 1098 y := v_1.Args[0] 1099 v.reset(OpAMD64LEAQ1) 1100 v.AuxInt = c 1101 v.AddArg(x) 1102 v.AddArg(y) 1103 return true 1104 } 1105 // match: (ADDQ x (LEAQ [c] {s} y)) 1106 // cond: x.Op != OpSB && y.Op != OpSB 1107 // result: (LEAQ1 [c] {s} x y) 1108 for { 1109 x := v.Args[0] 1110 v_1 := v.Args[1] 1111 if v_1.Op != OpAMD64LEAQ { 1112 break 1113 } 1114 c := v_1.AuxInt 1115 s := v_1.Aux 1116 y := v_1.Args[0] 1117 if !(x.Op != OpSB && y.Op != OpSB) { 1118 break 1119 } 1120 v.reset(OpAMD64LEAQ1) 1121 v.AuxInt = c 1122 v.Aux = s 1123 v.AddArg(x) 1124 v.AddArg(y) 1125 return true 1126 } 1127 // match: (ADDQ (LEAQ [c] {s} x) y) 1128 // cond: x.Op != OpSB && y.Op != OpSB 1129 // result: (LEAQ1 [c] {s} x y) 1130 for { 1131 v_0 := v.Args[0] 1132 if v_0.Op != OpAMD64LEAQ { 1133 break 1134 } 1135 c := v_0.AuxInt 1136 s := v_0.Aux 1137 x := v_0.Args[0] 1138 y := v.Args[1] 1139 if !(x.Op != OpSB && y.Op != OpSB) { 1140 break 1141 } 1142 v.reset(OpAMD64LEAQ1) 1143 v.AuxInt = c 1144 v.Aux = s 1145 v.AddArg(x) 1146 v.AddArg(y) 1147 return true 1148 } 1149 // match: (ADDQ x (NEGQ y)) 1150 // cond: 1151 // result: (SUBQ x y) 1152 for { 1153 x := v.Args[0] 1154 v_1 := v.Args[1] 1155 if v_1.Op != OpAMD64NEGQ { 1156 break 1157 } 1158 y := v_1.Args[0] 1159 v.reset(OpAMD64SUBQ) 1160 v.AddArg(x) 1161 v.AddArg(y) 1162 return true 1163 } 1164 return false 1165 } 1166 func rewriteValueAMD64_OpAMD64ADDQconst(v *Value, config *Config) bool { 1167 b := v.Block 1168 _ = b 1169 // match: (ADDQconst [c] (ADDQ x y)) 1170 // cond: 1171 // result: (LEAQ1 [c] x y) 1172 for { 1173 c := v.AuxInt 1174 v_0 := v.Args[0] 1175 if v_0.Op != OpAMD64ADDQ { 1176 break 1177 } 1178 x := v_0.Args[0] 1179 y := v_0.Args[1] 1180 v.reset(OpAMD64LEAQ1) 1181 v.AuxInt = c 1182 v.AddArg(x) 1183 v.AddArg(y) 1184 return true 1185 } 1186 // match: (ADDQconst [c] (LEAQ [d] {s} x)) 1187 // cond: is32Bit(c+d) 1188 // result: (LEAQ [c+d] {s} x) 1189 for { 1190 c := v.AuxInt 1191 v_0 := v.Args[0] 1192 if v_0.Op != OpAMD64LEAQ { 1193 break 1194 } 1195 d := v_0.AuxInt 1196 s := v_0.Aux 1197 x := v_0.Args[0] 1198 if !(is32Bit(c + d)) { 1199 break 1200 } 1201 v.reset(OpAMD64LEAQ) 1202 v.AuxInt = c + d 1203 v.Aux = s 1204 v.AddArg(x) 1205 return true 1206 } 1207 // match: (ADDQconst [c] (LEAQ1 [d] {s} x y)) 1208 // cond: is32Bit(c+d) 1209 // result: (LEAQ1 [c+d] {s} x y) 1210 for { 1211 c := v.AuxInt 1212 v_0 := v.Args[0] 1213 if v_0.Op != OpAMD64LEAQ1 { 1214 break 1215 } 1216 d := v_0.AuxInt 1217 s := v_0.Aux 1218 x := v_0.Args[0] 1219 y := v_0.Args[1] 1220 if !(is32Bit(c + d)) { 1221 break 1222 } 1223 v.reset(OpAMD64LEAQ1) 1224 v.AuxInt = c + d 1225 v.Aux = s 1226 v.AddArg(x) 1227 v.AddArg(y) 1228 return true 1229 } 1230 // match: (ADDQconst [c] (LEAQ2 [d] {s} x y)) 1231 // cond: is32Bit(c+d) 1232 // result: (LEAQ2 [c+d] {s} x y) 1233 for { 1234 c := v.AuxInt 1235 v_0 := v.Args[0] 1236 if v_0.Op != OpAMD64LEAQ2 { 1237 break 1238 } 1239 d := v_0.AuxInt 1240 s := v_0.Aux 1241 x := v_0.Args[0] 1242 y := v_0.Args[1] 1243 if !(is32Bit(c + d)) { 1244 break 1245 } 1246 v.reset(OpAMD64LEAQ2) 1247 v.AuxInt = c + d 1248 v.Aux = s 1249 v.AddArg(x) 1250 v.AddArg(y) 1251 return true 1252 } 1253 // match: (ADDQconst [c] (LEAQ4 [d] {s} x y)) 1254 // cond: is32Bit(c+d) 1255 // result: (LEAQ4 [c+d] {s} x y) 1256 for { 1257 c := v.AuxInt 1258 v_0 := v.Args[0] 1259 if v_0.Op != OpAMD64LEAQ4 { 1260 break 1261 } 1262 d := v_0.AuxInt 1263 s := v_0.Aux 1264 x := v_0.Args[0] 1265 y := v_0.Args[1] 1266 if !(is32Bit(c + d)) { 1267 break 1268 } 1269 v.reset(OpAMD64LEAQ4) 1270 v.AuxInt = c + d 1271 v.Aux = s 1272 v.AddArg(x) 1273 v.AddArg(y) 1274 return true 1275 } 1276 // match: (ADDQconst [c] (LEAQ8 [d] {s} x y)) 1277 // cond: is32Bit(c+d) 1278 // result: (LEAQ8 [c+d] {s} x y) 1279 for { 1280 c := v.AuxInt 1281 v_0 := v.Args[0] 1282 if v_0.Op != OpAMD64LEAQ8 { 1283 break 1284 } 1285 d := v_0.AuxInt 1286 s := v_0.Aux 1287 x := v_0.Args[0] 1288 y := v_0.Args[1] 1289 if !(is32Bit(c + d)) { 1290 break 1291 } 1292 v.reset(OpAMD64LEAQ8) 1293 v.AuxInt = c + d 1294 v.Aux = s 1295 v.AddArg(x) 1296 v.AddArg(y) 1297 return true 1298 } 1299 // match: (ADDQconst [0] x) 1300 // cond: 1301 // result: x 1302 for { 1303 if v.AuxInt != 0 { 1304 break 1305 } 1306 x := v.Args[0] 1307 v.reset(OpCopy) 1308 v.Type = x.Type 1309 v.AddArg(x) 1310 return true 1311 } 1312 // match: (ADDQconst [c] (MOVQconst [d])) 1313 // cond: 1314 // result: (MOVQconst [c+d]) 1315 for { 1316 c := v.AuxInt 1317 v_0 := v.Args[0] 1318 if v_0.Op != OpAMD64MOVQconst { 1319 break 1320 } 1321 d := v_0.AuxInt 1322 v.reset(OpAMD64MOVQconst) 1323 v.AuxInt = c + d 1324 return true 1325 } 1326 // match: (ADDQconst [c] (ADDQconst [d] x)) 1327 // cond: is32Bit(c+d) 1328 // result: (ADDQconst [c+d] x) 1329 for { 1330 c := v.AuxInt 1331 v_0 := v.Args[0] 1332 if v_0.Op != OpAMD64ADDQconst { 1333 break 1334 } 1335 d := v_0.AuxInt 1336 x := v_0.Args[0] 1337 if !(is32Bit(c + d)) { 1338 break 1339 } 1340 v.reset(OpAMD64ADDQconst) 1341 v.AuxInt = c + d 1342 v.AddArg(x) 1343 return true 1344 } 1345 return false 1346 } 1347 func rewriteValueAMD64_OpAMD64ANDL(v *Value, config *Config) bool { 1348 b := v.Block 1349 _ = b 1350 // match: (ANDL x (MOVLconst [c])) 1351 // cond: 1352 // result: (ANDLconst [c] x) 1353 for { 1354 x := v.Args[0] 1355 v_1 := v.Args[1] 1356 if v_1.Op != OpAMD64MOVLconst { 1357 break 1358 } 1359 c := v_1.AuxInt 1360 v.reset(OpAMD64ANDLconst) 1361 v.AuxInt = c 1362 v.AddArg(x) 1363 return true 1364 } 1365 // match: (ANDL (MOVLconst [c]) x) 1366 // cond: 1367 // result: (ANDLconst [c] x) 1368 for { 1369 v_0 := v.Args[0] 1370 if v_0.Op != OpAMD64MOVLconst { 1371 break 1372 } 1373 c := v_0.AuxInt 1374 x := v.Args[1] 1375 v.reset(OpAMD64ANDLconst) 1376 v.AuxInt = c 1377 v.AddArg(x) 1378 return true 1379 } 1380 // match: (ANDL x x) 1381 // cond: 1382 // result: x 1383 for { 1384 x := v.Args[0] 1385 if x != v.Args[1] { 1386 break 1387 } 1388 v.reset(OpCopy) 1389 v.Type = x.Type 1390 v.AddArg(x) 1391 return true 1392 } 1393 return false 1394 } 1395 func rewriteValueAMD64_OpAMD64ANDLconst(v *Value, config *Config) bool { 1396 b := v.Block 1397 _ = b 1398 // match: (ANDLconst [c] (ANDLconst [d] x)) 1399 // cond: 1400 // result: (ANDLconst [c & d] x) 1401 for { 1402 c := v.AuxInt 1403 v_0 := v.Args[0] 1404 if v_0.Op != OpAMD64ANDLconst { 1405 break 1406 } 1407 d := v_0.AuxInt 1408 x := v_0.Args[0] 1409 v.reset(OpAMD64ANDLconst) 1410 v.AuxInt = c & d 1411 v.AddArg(x) 1412 return true 1413 } 1414 // match: (ANDLconst [0xFF] x) 1415 // cond: 1416 // result: (MOVBQZX x) 1417 for { 1418 if v.AuxInt != 0xFF { 1419 break 1420 } 1421 x := v.Args[0] 1422 v.reset(OpAMD64MOVBQZX) 1423 v.AddArg(x) 1424 return true 1425 } 1426 // match: (ANDLconst [0xFFFF] x) 1427 // cond: 1428 // result: (MOVWQZX x) 1429 for { 1430 if v.AuxInt != 0xFFFF { 1431 break 1432 } 1433 x := v.Args[0] 1434 v.reset(OpAMD64MOVWQZX) 1435 v.AddArg(x) 1436 return true 1437 } 1438 // match: (ANDLconst [c] _) 1439 // cond: int32(c)==0 1440 // result: (MOVLconst [0]) 1441 for { 1442 c := v.AuxInt 1443 if !(int32(c) == 0) { 1444 break 1445 } 1446 v.reset(OpAMD64MOVLconst) 1447 v.AuxInt = 0 1448 return true 1449 } 1450 // match: (ANDLconst [c] x) 1451 // cond: int32(c)==-1 1452 // result: x 1453 for { 1454 c := v.AuxInt 1455 x := v.Args[0] 1456 if !(int32(c) == -1) { 1457 break 1458 } 1459 v.reset(OpCopy) 1460 v.Type = x.Type 1461 v.AddArg(x) 1462 return true 1463 } 1464 // match: (ANDLconst [c] (MOVLconst [d])) 1465 // cond: 1466 // result: (MOVLconst [c&d]) 1467 for { 1468 c := v.AuxInt 1469 v_0 := v.Args[0] 1470 if v_0.Op != OpAMD64MOVLconst { 1471 break 1472 } 1473 d := v_0.AuxInt 1474 v.reset(OpAMD64MOVLconst) 1475 v.AuxInt = c & d 1476 return true 1477 } 1478 return false 1479 } 1480 func rewriteValueAMD64_OpAMD64ANDQ(v *Value, config *Config) bool { 1481 b := v.Block 1482 _ = b 1483 // match: (ANDQ x (MOVQconst [c])) 1484 // cond: is32Bit(c) 1485 // result: (ANDQconst [c] x) 1486 for { 1487 x := v.Args[0] 1488 v_1 := v.Args[1] 1489 if v_1.Op != OpAMD64MOVQconst { 1490 break 1491 } 1492 c := v_1.AuxInt 1493 if !(is32Bit(c)) { 1494 break 1495 } 1496 v.reset(OpAMD64ANDQconst) 1497 v.AuxInt = c 1498 v.AddArg(x) 1499 return true 1500 } 1501 // match: (ANDQ (MOVQconst [c]) x) 1502 // cond: is32Bit(c) 1503 // result: (ANDQconst [c] x) 1504 for { 1505 v_0 := v.Args[0] 1506 if v_0.Op != OpAMD64MOVQconst { 1507 break 1508 } 1509 c := v_0.AuxInt 1510 x := v.Args[1] 1511 if !(is32Bit(c)) { 1512 break 1513 } 1514 v.reset(OpAMD64ANDQconst) 1515 v.AuxInt = c 1516 v.AddArg(x) 1517 return true 1518 } 1519 // match: (ANDQ x x) 1520 // cond: 1521 // result: x 1522 for { 1523 x := v.Args[0] 1524 if x != v.Args[1] { 1525 break 1526 } 1527 v.reset(OpCopy) 1528 v.Type = x.Type 1529 v.AddArg(x) 1530 return true 1531 } 1532 return false 1533 } 1534 func rewriteValueAMD64_OpAMD64ANDQconst(v *Value, config *Config) bool { 1535 b := v.Block 1536 _ = b 1537 // match: (ANDQconst [c] (ANDQconst [d] x)) 1538 // cond: 1539 // result: (ANDQconst [c & d] x) 1540 for { 1541 c := v.AuxInt 1542 v_0 := v.Args[0] 1543 if v_0.Op != OpAMD64ANDQconst { 1544 break 1545 } 1546 d := v_0.AuxInt 1547 x := v_0.Args[0] 1548 v.reset(OpAMD64ANDQconst) 1549 v.AuxInt = c & d 1550 v.AddArg(x) 1551 return true 1552 } 1553 // match: (ANDQconst [0xFF] x) 1554 // cond: 1555 // result: (MOVBQZX x) 1556 for { 1557 if v.AuxInt != 0xFF { 1558 break 1559 } 1560 x := v.Args[0] 1561 v.reset(OpAMD64MOVBQZX) 1562 v.AddArg(x) 1563 return true 1564 } 1565 // match: (ANDQconst [0xFFFF] x) 1566 // cond: 1567 // result: (MOVWQZX x) 1568 for { 1569 if v.AuxInt != 0xFFFF { 1570 break 1571 } 1572 x := v.Args[0] 1573 v.reset(OpAMD64MOVWQZX) 1574 v.AddArg(x) 1575 return true 1576 } 1577 // match: (ANDQconst [0xFFFFFFFF] x) 1578 // cond: 1579 // result: (MOVLQZX x) 1580 for { 1581 if v.AuxInt != 0xFFFFFFFF { 1582 break 1583 } 1584 x := v.Args[0] 1585 v.reset(OpAMD64MOVLQZX) 1586 v.AddArg(x) 1587 return true 1588 } 1589 // match: (ANDQconst [0] _) 1590 // cond: 1591 // result: (MOVQconst [0]) 1592 for { 1593 if v.AuxInt != 0 { 1594 break 1595 } 1596 v.reset(OpAMD64MOVQconst) 1597 v.AuxInt = 0 1598 return true 1599 } 1600 // match: (ANDQconst [-1] x) 1601 // cond: 1602 // result: x 1603 for { 1604 if v.AuxInt != -1 { 1605 break 1606 } 1607 x := v.Args[0] 1608 v.reset(OpCopy) 1609 v.Type = x.Type 1610 v.AddArg(x) 1611 return true 1612 } 1613 // match: (ANDQconst [c] (MOVQconst [d])) 1614 // cond: 1615 // result: (MOVQconst [c&d]) 1616 for { 1617 c := v.AuxInt 1618 v_0 := v.Args[0] 1619 if v_0.Op != OpAMD64MOVQconst { 1620 break 1621 } 1622 d := v_0.AuxInt 1623 v.reset(OpAMD64MOVQconst) 1624 v.AuxInt = c & d 1625 return true 1626 } 1627 return false 1628 } 1629 func rewriteValueAMD64_OpAMD64CMPB(v *Value, config *Config) bool { 1630 b := v.Block 1631 _ = b 1632 // match: (CMPB x (MOVLconst [c])) 1633 // cond: 1634 // result: (CMPBconst x [int64(int8(c))]) 1635 for { 1636 x := v.Args[0] 1637 v_1 := v.Args[1] 1638 if v_1.Op != OpAMD64MOVLconst { 1639 break 1640 } 1641 c := v_1.AuxInt 1642 v.reset(OpAMD64CMPBconst) 1643 v.AuxInt = int64(int8(c)) 1644 v.AddArg(x) 1645 return true 1646 } 1647 // match: (CMPB (MOVLconst [c]) x) 1648 // cond: 1649 // result: (InvertFlags (CMPBconst x [int64(int8(c))])) 1650 for { 1651 v_0 := v.Args[0] 1652 if v_0.Op != OpAMD64MOVLconst { 1653 break 1654 } 1655 c := v_0.AuxInt 1656 x := v.Args[1] 1657 v.reset(OpAMD64InvertFlags) 1658 v0 := b.NewValue0(v.Line, OpAMD64CMPBconst, TypeFlags) 1659 v0.AuxInt = int64(int8(c)) 1660 v0.AddArg(x) 1661 v.AddArg(v0) 1662 return true 1663 } 1664 return false 1665 } 1666 func rewriteValueAMD64_OpAMD64CMPBconst(v *Value, config *Config) bool { 1667 b := v.Block 1668 _ = b 1669 // match: (CMPBconst (MOVLconst [x]) [y]) 1670 // cond: int8(x)==int8(y) 1671 // result: (FlagEQ) 1672 for { 1673 y := v.AuxInt 1674 v_0 := v.Args[0] 1675 if v_0.Op != OpAMD64MOVLconst { 1676 break 1677 } 1678 x := v_0.AuxInt 1679 if !(int8(x) == int8(y)) { 1680 break 1681 } 1682 v.reset(OpAMD64FlagEQ) 1683 return true 1684 } 1685 // match: (CMPBconst (MOVLconst [x]) [y]) 1686 // cond: int8(x)<int8(y) && uint8(x)<uint8(y) 1687 // result: (FlagLT_ULT) 1688 for { 1689 y := v.AuxInt 1690 v_0 := v.Args[0] 1691 if v_0.Op != OpAMD64MOVLconst { 1692 break 1693 } 1694 x := v_0.AuxInt 1695 if !(int8(x) < int8(y) && uint8(x) < uint8(y)) { 1696 break 1697 } 1698 v.reset(OpAMD64FlagLT_ULT) 1699 return true 1700 } 1701 // match: (CMPBconst (MOVLconst [x]) [y]) 1702 // cond: int8(x)<int8(y) && uint8(x)>uint8(y) 1703 // result: (FlagLT_UGT) 1704 for { 1705 y := v.AuxInt 1706 v_0 := v.Args[0] 1707 if v_0.Op != OpAMD64MOVLconst { 1708 break 1709 } 1710 x := v_0.AuxInt 1711 if !(int8(x) < int8(y) && uint8(x) > uint8(y)) { 1712 break 1713 } 1714 v.reset(OpAMD64FlagLT_UGT) 1715 return true 1716 } 1717 // match: (CMPBconst (MOVLconst [x]) [y]) 1718 // cond: int8(x)>int8(y) && uint8(x)<uint8(y) 1719 // result: (FlagGT_ULT) 1720 for { 1721 y := v.AuxInt 1722 v_0 := v.Args[0] 1723 if v_0.Op != OpAMD64MOVLconst { 1724 break 1725 } 1726 x := v_0.AuxInt 1727 if !(int8(x) > int8(y) && uint8(x) < uint8(y)) { 1728 break 1729 } 1730 v.reset(OpAMD64FlagGT_ULT) 1731 return true 1732 } 1733 // match: (CMPBconst (MOVLconst [x]) [y]) 1734 // cond: int8(x)>int8(y) && uint8(x)>uint8(y) 1735 // result: (FlagGT_UGT) 1736 for { 1737 y := v.AuxInt 1738 v_0 := v.Args[0] 1739 if v_0.Op != OpAMD64MOVLconst { 1740 break 1741 } 1742 x := v_0.AuxInt 1743 if !(int8(x) > int8(y) && uint8(x) > uint8(y)) { 1744 break 1745 } 1746 v.reset(OpAMD64FlagGT_UGT) 1747 return true 1748 } 1749 // match: (CMPBconst (ANDLconst _ [m]) [n]) 1750 // cond: 0 <= int8(m) && int8(m) < int8(n) 1751 // result: (FlagLT_ULT) 1752 for { 1753 n := v.AuxInt 1754 v_0 := v.Args[0] 1755 if v_0.Op != OpAMD64ANDLconst { 1756 break 1757 } 1758 m := v_0.AuxInt 1759 if !(0 <= int8(m) && int8(m) < int8(n)) { 1760 break 1761 } 1762 v.reset(OpAMD64FlagLT_ULT) 1763 return true 1764 } 1765 // match: (CMPBconst (ANDL x y) [0]) 1766 // cond: 1767 // result: (TESTB x y) 1768 for { 1769 if v.AuxInt != 0 { 1770 break 1771 } 1772 v_0 := v.Args[0] 1773 if v_0.Op != OpAMD64ANDL { 1774 break 1775 } 1776 x := v_0.Args[0] 1777 y := v_0.Args[1] 1778 v.reset(OpAMD64TESTB) 1779 v.AddArg(x) 1780 v.AddArg(y) 1781 return true 1782 } 1783 // match: (CMPBconst (ANDLconst [c] x) [0]) 1784 // cond: 1785 // result: (TESTBconst [int64(int8(c))] x) 1786 for { 1787 if v.AuxInt != 0 { 1788 break 1789 } 1790 v_0 := v.Args[0] 1791 if v_0.Op != OpAMD64ANDLconst { 1792 break 1793 } 1794 c := v_0.AuxInt 1795 x := v_0.Args[0] 1796 v.reset(OpAMD64TESTBconst) 1797 v.AuxInt = int64(int8(c)) 1798 v.AddArg(x) 1799 return true 1800 } 1801 // match: (CMPBconst x [0]) 1802 // cond: 1803 // result: (TESTB x x) 1804 for { 1805 if v.AuxInt != 0 { 1806 break 1807 } 1808 x := v.Args[0] 1809 v.reset(OpAMD64TESTB) 1810 v.AddArg(x) 1811 v.AddArg(x) 1812 return true 1813 } 1814 return false 1815 } 1816 func rewriteValueAMD64_OpAMD64CMPL(v *Value, config *Config) bool { 1817 b := v.Block 1818 _ = b 1819 // match: (CMPL x (MOVLconst [c])) 1820 // cond: 1821 // result: (CMPLconst x [c]) 1822 for { 1823 x := v.Args[0] 1824 v_1 := v.Args[1] 1825 if v_1.Op != OpAMD64MOVLconst { 1826 break 1827 } 1828 c := v_1.AuxInt 1829 v.reset(OpAMD64CMPLconst) 1830 v.AuxInt = c 1831 v.AddArg(x) 1832 return true 1833 } 1834 // match: (CMPL (MOVLconst [c]) x) 1835 // cond: 1836 // result: (InvertFlags (CMPLconst x [c])) 1837 for { 1838 v_0 := v.Args[0] 1839 if v_0.Op != OpAMD64MOVLconst { 1840 break 1841 } 1842 c := v_0.AuxInt 1843 x := v.Args[1] 1844 v.reset(OpAMD64InvertFlags) 1845 v0 := b.NewValue0(v.Line, OpAMD64CMPLconst, TypeFlags) 1846 v0.AuxInt = c 1847 v0.AddArg(x) 1848 v.AddArg(v0) 1849 return true 1850 } 1851 return false 1852 } 1853 func rewriteValueAMD64_OpAMD64CMPLconst(v *Value, config *Config) bool { 1854 b := v.Block 1855 _ = b 1856 // match: (CMPLconst (MOVLconst [x]) [y]) 1857 // cond: int32(x)==int32(y) 1858 // result: (FlagEQ) 1859 for { 1860 y := v.AuxInt 1861 v_0 := v.Args[0] 1862 if v_0.Op != OpAMD64MOVLconst { 1863 break 1864 } 1865 x := v_0.AuxInt 1866 if !(int32(x) == int32(y)) { 1867 break 1868 } 1869 v.reset(OpAMD64FlagEQ) 1870 return true 1871 } 1872 // match: (CMPLconst (MOVLconst [x]) [y]) 1873 // cond: int32(x)<int32(y) && uint32(x)<uint32(y) 1874 // result: (FlagLT_ULT) 1875 for { 1876 y := v.AuxInt 1877 v_0 := v.Args[0] 1878 if v_0.Op != OpAMD64MOVLconst { 1879 break 1880 } 1881 x := v_0.AuxInt 1882 if !(int32(x) < int32(y) && uint32(x) < uint32(y)) { 1883 break 1884 } 1885 v.reset(OpAMD64FlagLT_ULT) 1886 return true 1887 } 1888 // match: (CMPLconst (MOVLconst [x]) [y]) 1889 // cond: int32(x)<int32(y) && uint32(x)>uint32(y) 1890 // result: (FlagLT_UGT) 1891 for { 1892 y := v.AuxInt 1893 v_0 := v.Args[0] 1894 if v_0.Op != OpAMD64MOVLconst { 1895 break 1896 } 1897 x := v_0.AuxInt 1898 if !(int32(x) < int32(y) && uint32(x) > uint32(y)) { 1899 break 1900 } 1901 v.reset(OpAMD64FlagLT_UGT) 1902 return true 1903 } 1904 // match: (CMPLconst (MOVLconst [x]) [y]) 1905 // cond: int32(x)>int32(y) && uint32(x)<uint32(y) 1906 // result: (FlagGT_ULT) 1907 for { 1908 y := v.AuxInt 1909 v_0 := v.Args[0] 1910 if v_0.Op != OpAMD64MOVLconst { 1911 break 1912 } 1913 x := v_0.AuxInt 1914 if !(int32(x) > int32(y) && uint32(x) < uint32(y)) { 1915 break 1916 } 1917 v.reset(OpAMD64FlagGT_ULT) 1918 return true 1919 } 1920 // match: (CMPLconst (MOVLconst [x]) [y]) 1921 // cond: int32(x)>int32(y) && uint32(x)>uint32(y) 1922 // result: (FlagGT_UGT) 1923 for { 1924 y := v.AuxInt 1925 v_0 := v.Args[0] 1926 if v_0.Op != OpAMD64MOVLconst { 1927 break 1928 } 1929 x := v_0.AuxInt 1930 if !(int32(x) > int32(y) && uint32(x) > uint32(y)) { 1931 break 1932 } 1933 v.reset(OpAMD64FlagGT_UGT) 1934 return true 1935 } 1936 // match: (CMPLconst (SHRLconst _ [c]) [n]) 1937 // cond: 0 <= n && 0 < c && c <= 32 && (1<<uint64(32-c)) <= uint64(n) 1938 // result: (FlagLT_ULT) 1939 for { 1940 n := v.AuxInt 1941 v_0 := v.Args[0] 1942 if v_0.Op != OpAMD64SHRLconst { 1943 break 1944 } 1945 c := v_0.AuxInt 1946 if !(0 <= n && 0 < c && c <= 32 && (1<<uint64(32-c)) <= uint64(n)) { 1947 break 1948 } 1949 v.reset(OpAMD64FlagLT_ULT) 1950 return true 1951 } 1952 // match: (CMPLconst (ANDLconst _ [m]) [n]) 1953 // cond: 0 <= int32(m) && int32(m) < int32(n) 1954 // result: (FlagLT_ULT) 1955 for { 1956 n := v.AuxInt 1957 v_0 := v.Args[0] 1958 if v_0.Op != OpAMD64ANDLconst { 1959 break 1960 } 1961 m := v_0.AuxInt 1962 if !(0 <= int32(m) && int32(m) < int32(n)) { 1963 break 1964 } 1965 v.reset(OpAMD64FlagLT_ULT) 1966 return true 1967 } 1968 // match: (CMPLconst (ANDL x y) [0]) 1969 // cond: 1970 // result: (TESTL x y) 1971 for { 1972 if v.AuxInt != 0 { 1973 break 1974 } 1975 v_0 := v.Args[0] 1976 if v_0.Op != OpAMD64ANDL { 1977 break 1978 } 1979 x := v_0.Args[0] 1980 y := v_0.Args[1] 1981 v.reset(OpAMD64TESTL) 1982 v.AddArg(x) 1983 v.AddArg(y) 1984 return true 1985 } 1986 // match: (CMPLconst (ANDLconst [c] x) [0]) 1987 // cond: 1988 // result: (TESTLconst [c] x) 1989 for { 1990 if v.AuxInt != 0 { 1991 break 1992 } 1993 v_0 := v.Args[0] 1994 if v_0.Op != OpAMD64ANDLconst { 1995 break 1996 } 1997 c := v_0.AuxInt 1998 x := v_0.Args[0] 1999 v.reset(OpAMD64TESTLconst) 2000 v.AuxInt = c 2001 v.AddArg(x) 2002 return true 2003 } 2004 // match: (CMPLconst x [0]) 2005 // cond: 2006 // result: (TESTL x x) 2007 for { 2008 if v.AuxInt != 0 { 2009 break 2010 } 2011 x := v.Args[0] 2012 v.reset(OpAMD64TESTL) 2013 v.AddArg(x) 2014 v.AddArg(x) 2015 return true 2016 } 2017 return false 2018 } 2019 func rewriteValueAMD64_OpAMD64CMPQ(v *Value, config *Config) bool { 2020 b := v.Block 2021 _ = b 2022 // match: (CMPQ x (MOVQconst [c])) 2023 // cond: is32Bit(c) 2024 // result: (CMPQconst x [c]) 2025 for { 2026 x := v.Args[0] 2027 v_1 := v.Args[1] 2028 if v_1.Op != OpAMD64MOVQconst { 2029 break 2030 } 2031 c := v_1.AuxInt 2032 if !(is32Bit(c)) { 2033 break 2034 } 2035 v.reset(OpAMD64CMPQconst) 2036 v.AuxInt = c 2037 v.AddArg(x) 2038 return true 2039 } 2040 // match: (CMPQ (MOVQconst [c]) x) 2041 // cond: is32Bit(c) 2042 // result: (InvertFlags (CMPQconst x [c])) 2043 for { 2044 v_0 := v.Args[0] 2045 if v_0.Op != OpAMD64MOVQconst { 2046 break 2047 } 2048 c := v_0.AuxInt 2049 x := v.Args[1] 2050 if !(is32Bit(c)) { 2051 break 2052 } 2053 v.reset(OpAMD64InvertFlags) 2054 v0 := b.NewValue0(v.Line, OpAMD64CMPQconst, TypeFlags) 2055 v0.AuxInt = c 2056 v0.AddArg(x) 2057 v.AddArg(v0) 2058 return true 2059 } 2060 return false 2061 } 2062 func rewriteValueAMD64_OpAMD64CMPQconst(v *Value, config *Config) bool { 2063 b := v.Block 2064 _ = b 2065 // match: (CMPQconst (MOVQconst [x]) [y]) 2066 // cond: x==y 2067 // result: (FlagEQ) 2068 for { 2069 y := v.AuxInt 2070 v_0 := v.Args[0] 2071 if v_0.Op != OpAMD64MOVQconst { 2072 break 2073 } 2074 x := v_0.AuxInt 2075 if !(x == y) { 2076 break 2077 } 2078 v.reset(OpAMD64FlagEQ) 2079 return true 2080 } 2081 // match: (CMPQconst (MOVQconst [x]) [y]) 2082 // cond: x<y && uint64(x)<uint64(y) 2083 // result: (FlagLT_ULT) 2084 for { 2085 y := v.AuxInt 2086 v_0 := v.Args[0] 2087 if v_0.Op != OpAMD64MOVQconst { 2088 break 2089 } 2090 x := v_0.AuxInt 2091 if !(x < y && uint64(x) < uint64(y)) { 2092 break 2093 } 2094 v.reset(OpAMD64FlagLT_ULT) 2095 return true 2096 } 2097 // match: (CMPQconst (MOVQconst [x]) [y]) 2098 // cond: x<y && uint64(x)>uint64(y) 2099 // result: (FlagLT_UGT) 2100 for { 2101 y := v.AuxInt 2102 v_0 := v.Args[0] 2103 if v_0.Op != OpAMD64MOVQconst { 2104 break 2105 } 2106 x := v_0.AuxInt 2107 if !(x < y && uint64(x) > uint64(y)) { 2108 break 2109 } 2110 v.reset(OpAMD64FlagLT_UGT) 2111 return true 2112 } 2113 // match: (CMPQconst (MOVQconst [x]) [y]) 2114 // cond: x>y && uint64(x)<uint64(y) 2115 // result: (FlagGT_ULT) 2116 for { 2117 y := v.AuxInt 2118 v_0 := v.Args[0] 2119 if v_0.Op != OpAMD64MOVQconst { 2120 break 2121 } 2122 x := v_0.AuxInt 2123 if !(x > y && uint64(x) < uint64(y)) { 2124 break 2125 } 2126 v.reset(OpAMD64FlagGT_ULT) 2127 return true 2128 } 2129 // match: (CMPQconst (MOVQconst [x]) [y]) 2130 // cond: x>y && uint64(x)>uint64(y) 2131 // result: (FlagGT_UGT) 2132 for { 2133 y := v.AuxInt 2134 v_0 := v.Args[0] 2135 if v_0.Op != OpAMD64MOVQconst { 2136 break 2137 } 2138 x := v_0.AuxInt 2139 if !(x > y && uint64(x) > uint64(y)) { 2140 break 2141 } 2142 v.reset(OpAMD64FlagGT_UGT) 2143 return true 2144 } 2145 // match: (CMPQconst (MOVBQZX _) [c]) 2146 // cond: 0xFF < c 2147 // result: (FlagLT_ULT) 2148 for { 2149 c := v.AuxInt 2150 v_0 := v.Args[0] 2151 if v_0.Op != OpAMD64MOVBQZX { 2152 break 2153 } 2154 if !(0xFF < c) { 2155 break 2156 } 2157 v.reset(OpAMD64FlagLT_ULT) 2158 return true 2159 } 2160 // match: (CMPQconst (MOVWQZX _) [c]) 2161 // cond: 0xFFFF < c 2162 // result: (FlagLT_ULT) 2163 for { 2164 c := v.AuxInt 2165 v_0 := v.Args[0] 2166 if v_0.Op != OpAMD64MOVWQZX { 2167 break 2168 } 2169 if !(0xFFFF < c) { 2170 break 2171 } 2172 v.reset(OpAMD64FlagLT_ULT) 2173 return true 2174 } 2175 // match: (CMPQconst (MOVLQZX _) [c]) 2176 // cond: 0xFFFFFFFF < c 2177 // result: (FlagLT_ULT) 2178 for { 2179 c := v.AuxInt 2180 v_0 := v.Args[0] 2181 if v_0.Op != OpAMD64MOVLQZX { 2182 break 2183 } 2184 if !(0xFFFFFFFF < c) { 2185 break 2186 } 2187 v.reset(OpAMD64FlagLT_ULT) 2188 return true 2189 } 2190 // match: (CMPQconst (SHRQconst _ [c]) [n]) 2191 // cond: 0 <= n && 0 < c && c <= 64 && (1<<uint64(64-c)) <= uint64(n) 2192 // result: (FlagLT_ULT) 2193 for { 2194 n := v.AuxInt 2195 v_0 := v.Args[0] 2196 if v_0.Op != OpAMD64SHRQconst { 2197 break 2198 } 2199 c := v_0.AuxInt 2200 if !(0 <= n && 0 < c && c <= 64 && (1<<uint64(64-c)) <= uint64(n)) { 2201 break 2202 } 2203 v.reset(OpAMD64FlagLT_ULT) 2204 return true 2205 } 2206 // match: (CMPQconst (ANDQconst _ [m]) [n]) 2207 // cond: 0 <= m && m < n 2208 // result: (FlagLT_ULT) 2209 for { 2210 n := v.AuxInt 2211 v_0 := v.Args[0] 2212 if v_0.Op != OpAMD64ANDQconst { 2213 break 2214 } 2215 m := v_0.AuxInt 2216 if !(0 <= m && m < n) { 2217 break 2218 } 2219 v.reset(OpAMD64FlagLT_ULT) 2220 return true 2221 } 2222 // match: (CMPQconst (ANDQ x y) [0]) 2223 // cond: 2224 // result: (TESTQ x y) 2225 for { 2226 if v.AuxInt != 0 { 2227 break 2228 } 2229 v_0 := v.Args[0] 2230 if v_0.Op != OpAMD64ANDQ { 2231 break 2232 } 2233 x := v_0.Args[0] 2234 y := v_0.Args[1] 2235 v.reset(OpAMD64TESTQ) 2236 v.AddArg(x) 2237 v.AddArg(y) 2238 return true 2239 } 2240 // match: (CMPQconst (ANDQconst [c] x) [0]) 2241 // cond: 2242 // result: (TESTQconst [c] x) 2243 for { 2244 if v.AuxInt != 0 { 2245 break 2246 } 2247 v_0 := v.Args[0] 2248 if v_0.Op != OpAMD64ANDQconst { 2249 break 2250 } 2251 c := v_0.AuxInt 2252 x := v_0.Args[0] 2253 v.reset(OpAMD64TESTQconst) 2254 v.AuxInt = c 2255 v.AddArg(x) 2256 return true 2257 } 2258 // match: (CMPQconst x [0]) 2259 // cond: 2260 // result: (TESTQ x x) 2261 for { 2262 if v.AuxInt != 0 { 2263 break 2264 } 2265 x := v.Args[0] 2266 v.reset(OpAMD64TESTQ) 2267 v.AddArg(x) 2268 v.AddArg(x) 2269 return true 2270 } 2271 return false 2272 } 2273 func rewriteValueAMD64_OpAMD64CMPW(v *Value, config *Config) bool { 2274 b := v.Block 2275 _ = b 2276 // match: (CMPW x (MOVLconst [c])) 2277 // cond: 2278 // result: (CMPWconst x [int64(int16(c))]) 2279 for { 2280 x := v.Args[0] 2281 v_1 := v.Args[1] 2282 if v_1.Op != OpAMD64MOVLconst { 2283 break 2284 } 2285 c := v_1.AuxInt 2286 v.reset(OpAMD64CMPWconst) 2287 v.AuxInt = int64(int16(c)) 2288 v.AddArg(x) 2289 return true 2290 } 2291 // match: (CMPW (MOVLconst [c]) x) 2292 // cond: 2293 // result: (InvertFlags (CMPWconst x [int64(int16(c))])) 2294 for { 2295 v_0 := v.Args[0] 2296 if v_0.Op != OpAMD64MOVLconst { 2297 break 2298 } 2299 c := v_0.AuxInt 2300 x := v.Args[1] 2301 v.reset(OpAMD64InvertFlags) 2302 v0 := b.NewValue0(v.Line, OpAMD64CMPWconst, TypeFlags) 2303 v0.AuxInt = int64(int16(c)) 2304 v0.AddArg(x) 2305 v.AddArg(v0) 2306 return true 2307 } 2308 return false 2309 } 2310 func rewriteValueAMD64_OpAMD64CMPWconst(v *Value, config *Config) bool { 2311 b := v.Block 2312 _ = b 2313 // match: (CMPWconst (MOVLconst [x]) [y]) 2314 // cond: int16(x)==int16(y) 2315 // result: (FlagEQ) 2316 for { 2317 y := v.AuxInt 2318 v_0 := v.Args[0] 2319 if v_0.Op != OpAMD64MOVLconst { 2320 break 2321 } 2322 x := v_0.AuxInt 2323 if !(int16(x) == int16(y)) { 2324 break 2325 } 2326 v.reset(OpAMD64FlagEQ) 2327 return true 2328 } 2329 // match: (CMPWconst (MOVLconst [x]) [y]) 2330 // cond: int16(x)<int16(y) && uint16(x)<uint16(y) 2331 // result: (FlagLT_ULT) 2332 for { 2333 y := v.AuxInt 2334 v_0 := v.Args[0] 2335 if v_0.Op != OpAMD64MOVLconst { 2336 break 2337 } 2338 x := v_0.AuxInt 2339 if !(int16(x) < int16(y) && uint16(x) < uint16(y)) { 2340 break 2341 } 2342 v.reset(OpAMD64FlagLT_ULT) 2343 return true 2344 } 2345 // match: (CMPWconst (MOVLconst [x]) [y]) 2346 // cond: int16(x)<int16(y) && uint16(x)>uint16(y) 2347 // result: (FlagLT_UGT) 2348 for { 2349 y := v.AuxInt 2350 v_0 := v.Args[0] 2351 if v_0.Op != OpAMD64MOVLconst { 2352 break 2353 } 2354 x := v_0.AuxInt 2355 if !(int16(x) < int16(y) && uint16(x) > uint16(y)) { 2356 break 2357 } 2358 v.reset(OpAMD64FlagLT_UGT) 2359 return true 2360 } 2361 // match: (CMPWconst (MOVLconst [x]) [y]) 2362 // cond: int16(x)>int16(y) && uint16(x)<uint16(y) 2363 // result: (FlagGT_ULT) 2364 for { 2365 y := v.AuxInt 2366 v_0 := v.Args[0] 2367 if v_0.Op != OpAMD64MOVLconst { 2368 break 2369 } 2370 x := v_0.AuxInt 2371 if !(int16(x) > int16(y) && uint16(x) < uint16(y)) { 2372 break 2373 } 2374 v.reset(OpAMD64FlagGT_ULT) 2375 return true 2376 } 2377 // match: (CMPWconst (MOVLconst [x]) [y]) 2378 // cond: int16(x)>int16(y) && uint16(x)>uint16(y) 2379 // result: (FlagGT_UGT) 2380 for { 2381 y := v.AuxInt 2382 v_0 := v.Args[0] 2383 if v_0.Op != OpAMD64MOVLconst { 2384 break 2385 } 2386 x := v_0.AuxInt 2387 if !(int16(x) > int16(y) && uint16(x) > uint16(y)) { 2388 break 2389 } 2390 v.reset(OpAMD64FlagGT_UGT) 2391 return true 2392 } 2393 // match: (CMPWconst (ANDLconst _ [m]) [n]) 2394 // cond: 0 <= int16(m) && int16(m) < int16(n) 2395 // result: (FlagLT_ULT) 2396 for { 2397 n := v.AuxInt 2398 v_0 := v.Args[0] 2399 if v_0.Op != OpAMD64ANDLconst { 2400 break 2401 } 2402 m := v_0.AuxInt 2403 if !(0 <= int16(m) && int16(m) < int16(n)) { 2404 break 2405 } 2406 v.reset(OpAMD64FlagLT_ULT) 2407 return true 2408 } 2409 // match: (CMPWconst (ANDL x y) [0]) 2410 // cond: 2411 // result: (TESTW x y) 2412 for { 2413 if v.AuxInt != 0 { 2414 break 2415 } 2416 v_0 := v.Args[0] 2417 if v_0.Op != OpAMD64ANDL { 2418 break 2419 } 2420 x := v_0.Args[0] 2421 y := v_0.Args[1] 2422 v.reset(OpAMD64TESTW) 2423 v.AddArg(x) 2424 v.AddArg(y) 2425 return true 2426 } 2427 // match: (CMPWconst (ANDLconst [c] x) [0]) 2428 // cond: 2429 // result: (TESTWconst [int64(int16(c))] x) 2430 for { 2431 if v.AuxInt != 0 { 2432 break 2433 } 2434 v_0 := v.Args[0] 2435 if v_0.Op != OpAMD64ANDLconst { 2436 break 2437 } 2438 c := v_0.AuxInt 2439 x := v_0.Args[0] 2440 v.reset(OpAMD64TESTWconst) 2441 v.AuxInt = int64(int16(c)) 2442 v.AddArg(x) 2443 return true 2444 } 2445 // match: (CMPWconst x [0]) 2446 // cond: 2447 // result: (TESTW x x) 2448 for { 2449 if v.AuxInt != 0 { 2450 break 2451 } 2452 x := v.Args[0] 2453 v.reset(OpAMD64TESTW) 2454 v.AddArg(x) 2455 v.AddArg(x) 2456 return true 2457 } 2458 return false 2459 } 2460 func rewriteValueAMD64_OpAMD64CMPXCHGLlock(v *Value, config *Config) bool { 2461 b := v.Block 2462 _ = b 2463 // match: (CMPXCHGLlock [off1] {sym} (ADDQconst [off2] ptr) old new_ mem) 2464 // cond: is32Bit(off1+off2) 2465 // result: (CMPXCHGLlock [off1+off2] {sym} ptr old new_ mem) 2466 for { 2467 off1 := v.AuxInt 2468 sym := v.Aux 2469 v_0 := v.Args[0] 2470 if v_0.Op != OpAMD64ADDQconst { 2471 break 2472 } 2473 off2 := v_0.AuxInt 2474 ptr := v_0.Args[0] 2475 old := v.Args[1] 2476 new_ := v.Args[2] 2477 mem := v.Args[3] 2478 if !(is32Bit(off1 + off2)) { 2479 break 2480 } 2481 v.reset(OpAMD64CMPXCHGLlock) 2482 v.AuxInt = off1 + off2 2483 v.Aux = sym 2484 v.AddArg(ptr) 2485 v.AddArg(old) 2486 v.AddArg(new_) 2487 v.AddArg(mem) 2488 return true 2489 } 2490 return false 2491 } 2492 func rewriteValueAMD64_OpAMD64CMPXCHGQlock(v *Value, config *Config) bool { 2493 b := v.Block 2494 _ = b 2495 // match: (CMPXCHGQlock [off1] {sym} (ADDQconst [off2] ptr) old new_ mem) 2496 // cond: is32Bit(off1+off2) 2497 // result: (CMPXCHGQlock [off1+off2] {sym} ptr old new_ mem) 2498 for { 2499 off1 := v.AuxInt 2500 sym := v.Aux 2501 v_0 := v.Args[0] 2502 if v_0.Op != OpAMD64ADDQconst { 2503 break 2504 } 2505 off2 := v_0.AuxInt 2506 ptr := v_0.Args[0] 2507 old := v.Args[1] 2508 new_ := v.Args[2] 2509 mem := v.Args[3] 2510 if !(is32Bit(off1 + off2)) { 2511 break 2512 } 2513 v.reset(OpAMD64CMPXCHGQlock) 2514 v.AuxInt = off1 + off2 2515 v.Aux = sym 2516 v.AddArg(ptr) 2517 v.AddArg(old) 2518 v.AddArg(new_) 2519 v.AddArg(mem) 2520 return true 2521 } 2522 return false 2523 } 2524 func rewriteValueAMD64_OpAMD64LEAL(v *Value, config *Config) bool { 2525 b := v.Block 2526 _ = b 2527 // match: (LEAL [c] {s} (ADDLconst [d] x)) 2528 // cond: is32Bit(c+d) 2529 // result: (LEAL [c+d] {s} x) 2530 for { 2531 c := v.AuxInt 2532 s := v.Aux 2533 v_0 := v.Args[0] 2534 if v_0.Op != OpAMD64ADDLconst { 2535 break 2536 } 2537 d := v_0.AuxInt 2538 x := v_0.Args[0] 2539 if !(is32Bit(c + d)) { 2540 break 2541 } 2542 v.reset(OpAMD64LEAL) 2543 v.AuxInt = c + d 2544 v.Aux = s 2545 v.AddArg(x) 2546 return true 2547 } 2548 return false 2549 } 2550 func rewriteValueAMD64_OpAMD64LEAQ(v *Value, config *Config) bool { 2551 b := v.Block 2552 _ = b 2553 // match: (LEAQ [c] {s} (ADDQconst [d] x)) 2554 // cond: is32Bit(c+d) 2555 // result: (LEAQ [c+d] {s} x) 2556 for { 2557 c := v.AuxInt 2558 s := v.Aux 2559 v_0 := v.Args[0] 2560 if v_0.Op != OpAMD64ADDQconst { 2561 break 2562 } 2563 d := v_0.AuxInt 2564 x := v_0.Args[0] 2565 if !(is32Bit(c + d)) { 2566 break 2567 } 2568 v.reset(OpAMD64LEAQ) 2569 v.AuxInt = c + d 2570 v.Aux = s 2571 v.AddArg(x) 2572 return true 2573 } 2574 // match: (LEAQ [c] {s} (ADDQ x y)) 2575 // cond: x.Op != OpSB && y.Op != OpSB 2576 // result: (LEAQ1 [c] {s} x y) 2577 for { 2578 c := v.AuxInt 2579 s := v.Aux 2580 v_0 := v.Args[0] 2581 if v_0.Op != OpAMD64ADDQ { 2582 break 2583 } 2584 x := v_0.Args[0] 2585 y := v_0.Args[1] 2586 if !(x.Op != OpSB && y.Op != OpSB) { 2587 break 2588 } 2589 v.reset(OpAMD64LEAQ1) 2590 v.AuxInt = c 2591 v.Aux = s 2592 v.AddArg(x) 2593 v.AddArg(y) 2594 return true 2595 } 2596 // match: (LEAQ [off1] {sym1} (LEAQ [off2] {sym2} x)) 2597 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 2598 // result: (LEAQ [off1+off2] {mergeSym(sym1,sym2)} x) 2599 for { 2600 off1 := v.AuxInt 2601 sym1 := v.Aux 2602 v_0 := v.Args[0] 2603 if v_0.Op != OpAMD64LEAQ { 2604 break 2605 } 2606 off2 := v_0.AuxInt 2607 sym2 := v_0.Aux 2608 x := v_0.Args[0] 2609 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 2610 break 2611 } 2612 v.reset(OpAMD64LEAQ) 2613 v.AuxInt = off1 + off2 2614 v.Aux = mergeSym(sym1, sym2) 2615 v.AddArg(x) 2616 return true 2617 } 2618 // match: (LEAQ [off1] {sym1} (LEAQ1 [off2] {sym2} x y)) 2619 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 2620 // result: (LEAQ1 [off1+off2] {mergeSym(sym1,sym2)} x y) 2621 for { 2622 off1 := v.AuxInt 2623 sym1 := v.Aux 2624 v_0 := v.Args[0] 2625 if v_0.Op != OpAMD64LEAQ1 { 2626 break 2627 } 2628 off2 := v_0.AuxInt 2629 sym2 := v_0.Aux 2630 x := v_0.Args[0] 2631 y := v_0.Args[1] 2632 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 2633 break 2634 } 2635 v.reset(OpAMD64LEAQ1) 2636 v.AuxInt = off1 + off2 2637 v.Aux = mergeSym(sym1, sym2) 2638 v.AddArg(x) 2639 v.AddArg(y) 2640 return true 2641 } 2642 // match: (LEAQ [off1] {sym1} (LEAQ2 [off2] {sym2} x y)) 2643 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 2644 // result: (LEAQ2 [off1+off2] {mergeSym(sym1,sym2)} x y) 2645 for { 2646 off1 := v.AuxInt 2647 sym1 := v.Aux 2648 v_0 := v.Args[0] 2649 if v_0.Op != OpAMD64LEAQ2 { 2650 break 2651 } 2652 off2 := v_0.AuxInt 2653 sym2 := v_0.Aux 2654 x := v_0.Args[0] 2655 y := v_0.Args[1] 2656 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 2657 break 2658 } 2659 v.reset(OpAMD64LEAQ2) 2660 v.AuxInt = off1 + off2 2661 v.Aux = mergeSym(sym1, sym2) 2662 v.AddArg(x) 2663 v.AddArg(y) 2664 return true 2665 } 2666 // match: (LEAQ [off1] {sym1} (LEAQ4 [off2] {sym2} x y)) 2667 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 2668 // result: (LEAQ4 [off1+off2] {mergeSym(sym1,sym2)} x y) 2669 for { 2670 off1 := v.AuxInt 2671 sym1 := v.Aux 2672 v_0 := v.Args[0] 2673 if v_0.Op != OpAMD64LEAQ4 { 2674 break 2675 } 2676 off2 := v_0.AuxInt 2677 sym2 := v_0.Aux 2678 x := v_0.Args[0] 2679 y := v_0.Args[1] 2680 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 2681 break 2682 } 2683 v.reset(OpAMD64LEAQ4) 2684 v.AuxInt = off1 + off2 2685 v.Aux = mergeSym(sym1, sym2) 2686 v.AddArg(x) 2687 v.AddArg(y) 2688 return true 2689 } 2690 // match: (LEAQ [off1] {sym1} (LEAQ8 [off2] {sym2} x y)) 2691 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 2692 // result: (LEAQ8 [off1+off2] {mergeSym(sym1,sym2)} x y) 2693 for { 2694 off1 := v.AuxInt 2695 sym1 := v.Aux 2696 v_0 := v.Args[0] 2697 if v_0.Op != OpAMD64LEAQ8 { 2698 break 2699 } 2700 off2 := v_0.AuxInt 2701 sym2 := v_0.Aux 2702 x := v_0.Args[0] 2703 y := v_0.Args[1] 2704 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 2705 break 2706 } 2707 v.reset(OpAMD64LEAQ8) 2708 v.AuxInt = off1 + off2 2709 v.Aux = mergeSym(sym1, sym2) 2710 v.AddArg(x) 2711 v.AddArg(y) 2712 return true 2713 } 2714 return false 2715 } 2716 func rewriteValueAMD64_OpAMD64LEAQ1(v *Value, config *Config) bool { 2717 b := v.Block 2718 _ = b 2719 // match: (LEAQ1 [c] {s} (ADDQconst [d] x) y) 2720 // cond: is32Bit(c+d) && x.Op != OpSB 2721 // result: (LEAQ1 [c+d] {s} x y) 2722 for { 2723 c := v.AuxInt 2724 s := v.Aux 2725 v_0 := v.Args[0] 2726 if v_0.Op != OpAMD64ADDQconst { 2727 break 2728 } 2729 d := v_0.AuxInt 2730 x := v_0.Args[0] 2731 y := v.Args[1] 2732 if !(is32Bit(c+d) && x.Op != OpSB) { 2733 break 2734 } 2735 v.reset(OpAMD64LEAQ1) 2736 v.AuxInt = c + d 2737 v.Aux = s 2738 v.AddArg(x) 2739 v.AddArg(y) 2740 return true 2741 } 2742 // match: (LEAQ1 [c] {s} x (ADDQconst [d] y)) 2743 // cond: is32Bit(c+d) && y.Op != OpSB 2744 // result: (LEAQ1 [c+d] {s} x y) 2745 for { 2746 c := v.AuxInt 2747 s := v.Aux 2748 x := v.Args[0] 2749 v_1 := v.Args[1] 2750 if v_1.Op != OpAMD64ADDQconst { 2751 break 2752 } 2753 d := v_1.AuxInt 2754 y := v_1.Args[0] 2755 if !(is32Bit(c+d) && y.Op != OpSB) { 2756 break 2757 } 2758 v.reset(OpAMD64LEAQ1) 2759 v.AuxInt = c + d 2760 v.Aux = s 2761 v.AddArg(x) 2762 v.AddArg(y) 2763 return true 2764 } 2765 // match: (LEAQ1 [c] {s} x (SHLQconst [1] y)) 2766 // cond: 2767 // result: (LEAQ2 [c] {s} x y) 2768 for { 2769 c := v.AuxInt 2770 s := v.Aux 2771 x := v.Args[0] 2772 v_1 := v.Args[1] 2773 if v_1.Op != OpAMD64SHLQconst { 2774 break 2775 } 2776 if v_1.AuxInt != 1 { 2777 break 2778 } 2779 y := v_1.Args[0] 2780 v.reset(OpAMD64LEAQ2) 2781 v.AuxInt = c 2782 v.Aux = s 2783 v.AddArg(x) 2784 v.AddArg(y) 2785 return true 2786 } 2787 // match: (LEAQ1 [c] {s} (SHLQconst [1] x) y) 2788 // cond: 2789 // result: (LEAQ2 [c] {s} y x) 2790 for { 2791 c := v.AuxInt 2792 s := v.Aux 2793 v_0 := v.Args[0] 2794 if v_0.Op != OpAMD64SHLQconst { 2795 break 2796 } 2797 if v_0.AuxInt != 1 { 2798 break 2799 } 2800 x := v_0.Args[0] 2801 y := v.Args[1] 2802 v.reset(OpAMD64LEAQ2) 2803 v.AuxInt = c 2804 v.Aux = s 2805 v.AddArg(y) 2806 v.AddArg(x) 2807 return true 2808 } 2809 // match: (LEAQ1 [c] {s} x (SHLQconst [2] y)) 2810 // cond: 2811 // result: (LEAQ4 [c] {s} x y) 2812 for { 2813 c := v.AuxInt 2814 s := v.Aux 2815 x := v.Args[0] 2816 v_1 := v.Args[1] 2817 if v_1.Op != OpAMD64SHLQconst { 2818 break 2819 } 2820 if v_1.AuxInt != 2 { 2821 break 2822 } 2823 y := v_1.Args[0] 2824 v.reset(OpAMD64LEAQ4) 2825 v.AuxInt = c 2826 v.Aux = s 2827 v.AddArg(x) 2828 v.AddArg(y) 2829 return true 2830 } 2831 // match: (LEAQ1 [c] {s} (SHLQconst [2] x) y) 2832 // cond: 2833 // result: (LEAQ4 [c] {s} y x) 2834 for { 2835 c := v.AuxInt 2836 s := v.Aux 2837 v_0 := v.Args[0] 2838 if v_0.Op != OpAMD64SHLQconst { 2839 break 2840 } 2841 if v_0.AuxInt != 2 { 2842 break 2843 } 2844 x := v_0.Args[0] 2845 y := v.Args[1] 2846 v.reset(OpAMD64LEAQ4) 2847 v.AuxInt = c 2848 v.Aux = s 2849 v.AddArg(y) 2850 v.AddArg(x) 2851 return true 2852 } 2853 // match: (LEAQ1 [c] {s} x (SHLQconst [3] y)) 2854 // cond: 2855 // result: (LEAQ8 [c] {s} x y) 2856 for { 2857 c := v.AuxInt 2858 s := v.Aux 2859 x := v.Args[0] 2860 v_1 := v.Args[1] 2861 if v_1.Op != OpAMD64SHLQconst { 2862 break 2863 } 2864 if v_1.AuxInt != 3 { 2865 break 2866 } 2867 y := v_1.Args[0] 2868 v.reset(OpAMD64LEAQ8) 2869 v.AuxInt = c 2870 v.Aux = s 2871 v.AddArg(x) 2872 v.AddArg(y) 2873 return true 2874 } 2875 // match: (LEAQ1 [c] {s} (SHLQconst [3] x) y) 2876 // cond: 2877 // result: (LEAQ8 [c] {s} y x) 2878 for { 2879 c := v.AuxInt 2880 s := v.Aux 2881 v_0 := v.Args[0] 2882 if v_0.Op != OpAMD64SHLQconst { 2883 break 2884 } 2885 if v_0.AuxInt != 3 { 2886 break 2887 } 2888 x := v_0.Args[0] 2889 y := v.Args[1] 2890 v.reset(OpAMD64LEAQ8) 2891 v.AuxInt = c 2892 v.Aux = s 2893 v.AddArg(y) 2894 v.AddArg(x) 2895 return true 2896 } 2897 // match: (LEAQ1 [off1] {sym1} (LEAQ [off2] {sym2} x) y) 2898 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) && x.Op != OpSB 2899 // result: (LEAQ1 [off1+off2] {mergeSym(sym1,sym2)} x y) 2900 for { 2901 off1 := v.AuxInt 2902 sym1 := v.Aux 2903 v_0 := v.Args[0] 2904 if v_0.Op != OpAMD64LEAQ { 2905 break 2906 } 2907 off2 := v_0.AuxInt 2908 sym2 := v_0.Aux 2909 x := v_0.Args[0] 2910 y := v.Args[1] 2911 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2) && x.Op != OpSB) { 2912 break 2913 } 2914 v.reset(OpAMD64LEAQ1) 2915 v.AuxInt = off1 + off2 2916 v.Aux = mergeSym(sym1, sym2) 2917 v.AddArg(x) 2918 v.AddArg(y) 2919 return true 2920 } 2921 // match: (LEAQ1 [off1] {sym1} x (LEAQ [off2] {sym2} y)) 2922 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) && y.Op != OpSB 2923 // result: (LEAQ1 [off1+off2] {mergeSym(sym1,sym2)} x y) 2924 for { 2925 off1 := v.AuxInt 2926 sym1 := v.Aux 2927 x := v.Args[0] 2928 v_1 := v.Args[1] 2929 if v_1.Op != OpAMD64LEAQ { 2930 break 2931 } 2932 off2 := v_1.AuxInt 2933 sym2 := v_1.Aux 2934 y := v_1.Args[0] 2935 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2) && y.Op != OpSB) { 2936 break 2937 } 2938 v.reset(OpAMD64LEAQ1) 2939 v.AuxInt = off1 + off2 2940 v.Aux = mergeSym(sym1, sym2) 2941 v.AddArg(x) 2942 v.AddArg(y) 2943 return true 2944 } 2945 return false 2946 } 2947 func rewriteValueAMD64_OpAMD64LEAQ2(v *Value, config *Config) bool { 2948 b := v.Block 2949 _ = b 2950 // match: (LEAQ2 [c] {s} (ADDQconst [d] x) y) 2951 // cond: is32Bit(c+d) && x.Op != OpSB 2952 // result: (LEAQ2 [c+d] {s} x y) 2953 for { 2954 c := v.AuxInt 2955 s := v.Aux 2956 v_0 := v.Args[0] 2957 if v_0.Op != OpAMD64ADDQconst { 2958 break 2959 } 2960 d := v_0.AuxInt 2961 x := v_0.Args[0] 2962 y := v.Args[1] 2963 if !(is32Bit(c+d) && x.Op != OpSB) { 2964 break 2965 } 2966 v.reset(OpAMD64LEAQ2) 2967 v.AuxInt = c + d 2968 v.Aux = s 2969 v.AddArg(x) 2970 v.AddArg(y) 2971 return true 2972 } 2973 // match: (LEAQ2 [c] {s} x (ADDQconst [d] y)) 2974 // cond: is32Bit(c+2*d) && y.Op != OpSB 2975 // result: (LEAQ2 [c+2*d] {s} x y) 2976 for { 2977 c := v.AuxInt 2978 s := v.Aux 2979 x := v.Args[0] 2980 v_1 := v.Args[1] 2981 if v_1.Op != OpAMD64ADDQconst { 2982 break 2983 } 2984 d := v_1.AuxInt 2985 y := v_1.Args[0] 2986 if !(is32Bit(c+2*d) && y.Op != OpSB) { 2987 break 2988 } 2989 v.reset(OpAMD64LEAQ2) 2990 v.AuxInt = c + 2*d 2991 v.Aux = s 2992 v.AddArg(x) 2993 v.AddArg(y) 2994 return true 2995 } 2996 // match: (LEAQ2 [c] {s} x (SHLQconst [1] y)) 2997 // cond: 2998 // result: (LEAQ4 [c] {s} x y) 2999 for { 3000 c := v.AuxInt 3001 s := v.Aux 3002 x := v.Args[0] 3003 v_1 := v.Args[1] 3004 if v_1.Op != OpAMD64SHLQconst { 3005 break 3006 } 3007 if v_1.AuxInt != 1 { 3008 break 3009 } 3010 y := v_1.Args[0] 3011 v.reset(OpAMD64LEAQ4) 3012 v.AuxInt = c 3013 v.Aux = s 3014 v.AddArg(x) 3015 v.AddArg(y) 3016 return true 3017 } 3018 // match: (LEAQ2 [c] {s} x (SHLQconst [2] y)) 3019 // cond: 3020 // result: (LEAQ8 [c] {s} x y) 3021 for { 3022 c := v.AuxInt 3023 s := v.Aux 3024 x := v.Args[0] 3025 v_1 := v.Args[1] 3026 if v_1.Op != OpAMD64SHLQconst { 3027 break 3028 } 3029 if v_1.AuxInt != 2 { 3030 break 3031 } 3032 y := v_1.Args[0] 3033 v.reset(OpAMD64LEAQ8) 3034 v.AuxInt = c 3035 v.Aux = s 3036 v.AddArg(x) 3037 v.AddArg(y) 3038 return true 3039 } 3040 // match: (LEAQ2 [off1] {sym1} (LEAQ [off2] {sym2} x) y) 3041 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) && x.Op != OpSB 3042 // result: (LEAQ2 [off1+off2] {mergeSym(sym1,sym2)} x y) 3043 for { 3044 off1 := v.AuxInt 3045 sym1 := v.Aux 3046 v_0 := v.Args[0] 3047 if v_0.Op != OpAMD64LEAQ { 3048 break 3049 } 3050 off2 := v_0.AuxInt 3051 sym2 := v_0.Aux 3052 x := v_0.Args[0] 3053 y := v.Args[1] 3054 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2) && x.Op != OpSB) { 3055 break 3056 } 3057 v.reset(OpAMD64LEAQ2) 3058 v.AuxInt = off1 + off2 3059 v.Aux = mergeSym(sym1, sym2) 3060 v.AddArg(x) 3061 v.AddArg(y) 3062 return true 3063 } 3064 return false 3065 } 3066 func rewriteValueAMD64_OpAMD64LEAQ4(v *Value, config *Config) bool { 3067 b := v.Block 3068 _ = b 3069 // match: (LEAQ4 [c] {s} (ADDQconst [d] x) y) 3070 // cond: is32Bit(c+d) && x.Op != OpSB 3071 // result: (LEAQ4 [c+d] {s} x y) 3072 for { 3073 c := v.AuxInt 3074 s := v.Aux 3075 v_0 := v.Args[0] 3076 if v_0.Op != OpAMD64ADDQconst { 3077 break 3078 } 3079 d := v_0.AuxInt 3080 x := v_0.Args[0] 3081 y := v.Args[1] 3082 if !(is32Bit(c+d) && x.Op != OpSB) { 3083 break 3084 } 3085 v.reset(OpAMD64LEAQ4) 3086 v.AuxInt = c + d 3087 v.Aux = s 3088 v.AddArg(x) 3089 v.AddArg(y) 3090 return true 3091 } 3092 // match: (LEAQ4 [c] {s} x (ADDQconst [d] y)) 3093 // cond: is32Bit(c+4*d) && y.Op != OpSB 3094 // result: (LEAQ4 [c+4*d] {s} x y) 3095 for { 3096 c := v.AuxInt 3097 s := v.Aux 3098 x := v.Args[0] 3099 v_1 := v.Args[1] 3100 if v_1.Op != OpAMD64ADDQconst { 3101 break 3102 } 3103 d := v_1.AuxInt 3104 y := v_1.Args[0] 3105 if !(is32Bit(c+4*d) && y.Op != OpSB) { 3106 break 3107 } 3108 v.reset(OpAMD64LEAQ4) 3109 v.AuxInt = c + 4*d 3110 v.Aux = s 3111 v.AddArg(x) 3112 v.AddArg(y) 3113 return true 3114 } 3115 // match: (LEAQ4 [c] {s} x (SHLQconst [1] y)) 3116 // cond: 3117 // result: (LEAQ8 [c] {s} x y) 3118 for { 3119 c := v.AuxInt 3120 s := v.Aux 3121 x := v.Args[0] 3122 v_1 := v.Args[1] 3123 if v_1.Op != OpAMD64SHLQconst { 3124 break 3125 } 3126 if v_1.AuxInt != 1 { 3127 break 3128 } 3129 y := v_1.Args[0] 3130 v.reset(OpAMD64LEAQ8) 3131 v.AuxInt = c 3132 v.Aux = s 3133 v.AddArg(x) 3134 v.AddArg(y) 3135 return true 3136 } 3137 // match: (LEAQ4 [off1] {sym1} (LEAQ [off2] {sym2} x) y) 3138 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) && x.Op != OpSB 3139 // result: (LEAQ4 [off1+off2] {mergeSym(sym1,sym2)} x y) 3140 for { 3141 off1 := v.AuxInt 3142 sym1 := v.Aux 3143 v_0 := v.Args[0] 3144 if v_0.Op != OpAMD64LEAQ { 3145 break 3146 } 3147 off2 := v_0.AuxInt 3148 sym2 := v_0.Aux 3149 x := v_0.Args[0] 3150 y := v.Args[1] 3151 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2) && x.Op != OpSB) { 3152 break 3153 } 3154 v.reset(OpAMD64LEAQ4) 3155 v.AuxInt = off1 + off2 3156 v.Aux = mergeSym(sym1, sym2) 3157 v.AddArg(x) 3158 v.AddArg(y) 3159 return true 3160 } 3161 return false 3162 } 3163 func rewriteValueAMD64_OpAMD64LEAQ8(v *Value, config *Config) bool { 3164 b := v.Block 3165 _ = b 3166 // match: (LEAQ8 [c] {s} (ADDQconst [d] x) y) 3167 // cond: is32Bit(c+d) && x.Op != OpSB 3168 // result: (LEAQ8 [c+d] {s} x y) 3169 for { 3170 c := v.AuxInt 3171 s := v.Aux 3172 v_0 := v.Args[0] 3173 if v_0.Op != OpAMD64ADDQconst { 3174 break 3175 } 3176 d := v_0.AuxInt 3177 x := v_0.Args[0] 3178 y := v.Args[1] 3179 if !(is32Bit(c+d) && x.Op != OpSB) { 3180 break 3181 } 3182 v.reset(OpAMD64LEAQ8) 3183 v.AuxInt = c + d 3184 v.Aux = s 3185 v.AddArg(x) 3186 v.AddArg(y) 3187 return true 3188 } 3189 // match: (LEAQ8 [c] {s} x (ADDQconst [d] y)) 3190 // cond: is32Bit(c+8*d) && y.Op != OpSB 3191 // result: (LEAQ8 [c+8*d] {s} x y) 3192 for { 3193 c := v.AuxInt 3194 s := v.Aux 3195 x := v.Args[0] 3196 v_1 := v.Args[1] 3197 if v_1.Op != OpAMD64ADDQconst { 3198 break 3199 } 3200 d := v_1.AuxInt 3201 y := v_1.Args[0] 3202 if !(is32Bit(c+8*d) && y.Op != OpSB) { 3203 break 3204 } 3205 v.reset(OpAMD64LEAQ8) 3206 v.AuxInt = c + 8*d 3207 v.Aux = s 3208 v.AddArg(x) 3209 v.AddArg(y) 3210 return true 3211 } 3212 // match: (LEAQ8 [off1] {sym1} (LEAQ [off2] {sym2} x) y) 3213 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) && x.Op != OpSB 3214 // result: (LEAQ8 [off1+off2] {mergeSym(sym1,sym2)} x y) 3215 for { 3216 off1 := v.AuxInt 3217 sym1 := v.Aux 3218 v_0 := v.Args[0] 3219 if v_0.Op != OpAMD64LEAQ { 3220 break 3221 } 3222 off2 := v_0.AuxInt 3223 sym2 := v_0.Aux 3224 x := v_0.Args[0] 3225 y := v.Args[1] 3226 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2) && x.Op != OpSB) { 3227 break 3228 } 3229 v.reset(OpAMD64LEAQ8) 3230 v.AuxInt = off1 + off2 3231 v.Aux = mergeSym(sym1, sym2) 3232 v.AddArg(x) 3233 v.AddArg(y) 3234 return true 3235 } 3236 return false 3237 } 3238 func rewriteValueAMD64_OpAMD64MOVBQSX(v *Value, config *Config) bool { 3239 b := v.Block 3240 _ = b 3241 // match: (MOVBQSX x:(MOVBload [off] {sym} ptr mem)) 3242 // cond: x.Uses == 1 && clobber(x) 3243 // result: @x.Block (MOVBQSXload <v.Type> [off] {sym} ptr mem) 3244 for { 3245 x := v.Args[0] 3246 if x.Op != OpAMD64MOVBload { 3247 break 3248 } 3249 off := x.AuxInt 3250 sym := x.Aux 3251 ptr := x.Args[0] 3252 mem := x.Args[1] 3253 if !(x.Uses == 1 && clobber(x)) { 3254 break 3255 } 3256 b = x.Block 3257 v0 := b.NewValue0(v.Line, OpAMD64MOVBQSXload, v.Type) 3258 v.reset(OpCopy) 3259 v.AddArg(v0) 3260 v0.AuxInt = off 3261 v0.Aux = sym 3262 v0.AddArg(ptr) 3263 v0.AddArg(mem) 3264 return true 3265 } 3266 // match: (MOVBQSX x:(MOVWload [off] {sym} ptr mem)) 3267 // cond: x.Uses == 1 && clobber(x) 3268 // result: @x.Block (MOVBQSXload <v.Type> [off] {sym} ptr mem) 3269 for { 3270 x := v.Args[0] 3271 if x.Op != OpAMD64MOVWload { 3272 break 3273 } 3274 off := x.AuxInt 3275 sym := x.Aux 3276 ptr := x.Args[0] 3277 mem := x.Args[1] 3278 if !(x.Uses == 1 && clobber(x)) { 3279 break 3280 } 3281 b = x.Block 3282 v0 := b.NewValue0(v.Line, OpAMD64MOVBQSXload, v.Type) 3283 v.reset(OpCopy) 3284 v.AddArg(v0) 3285 v0.AuxInt = off 3286 v0.Aux = sym 3287 v0.AddArg(ptr) 3288 v0.AddArg(mem) 3289 return true 3290 } 3291 // match: (MOVBQSX x:(MOVLload [off] {sym} ptr mem)) 3292 // cond: x.Uses == 1 && clobber(x) 3293 // result: @x.Block (MOVBQSXload <v.Type> [off] {sym} ptr mem) 3294 for { 3295 x := v.Args[0] 3296 if x.Op != OpAMD64MOVLload { 3297 break 3298 } 3299 off := x.AuxInt 3300 sym := x.Aux 3301 ptr := x.Args[0] 3302 mem := x.Args[1] 3303 if !(x.Uses == 1 && clobber(x)) { 3304 break 3305 } 3306 b = x.Block 3307 v0 := b.NewValue0(v.Line, OpAMD64MOVBQSXload, v.Type) 3308 v.reset(OpCopy) 3309 v.AddArg(v0) 3310 v0.AuxInt = off 3311 v0.Aux = sym 3312 v0.AddArg(ptr) 3313 v0.AddArg(mem) 3314 return true 3315 } 3316 // match: (MOVBQSX x:(MOVQload [off] {sym} ptr mem)) 3317 // cond: x.Uses == 1 && clobber(x) 3318 // result: @x.Block (MOVBQSXload <v.Type> [off] {sym} ptr mem) 3319 for { 3320 x := v.Args[0] 3321 if x.Op != OpAMD64MOVQload { 3322 break 3323 } 3324 off := x.AuxInt 3325 sym := x.Aux 3326 ptr := x.Args[0] 3327 mem := x.Args[1] 3328 if !(x.Uses == 1 && clobber(x)) { 3329 break 3330 } 3331 b = x.Block 3332 v0 := b.NewValue0(v.Line, OpAMD64MOVBQSXload, v.Type) 3333 v.reset(OpCopy) 3334 v.AddArg(v0) 3335 v0.AuxInt = off 3336 v0.Aux = sym 3337 v0.AddArg(ptr) 3338 v0.AddArg(mem) 3339 return true 3340 } 3341 // match: (MOVBQSX (ANDLconst [c] x)) 3342 // cond: c & 0x80 == 0 3343 // result: (ANDLconst [c & 0x7f] x) 3344 for { 3345 v_0 := v.Args[0] 3346 if v_0.Op != OpAMD64ANDLconst { 3347 break 3348 } 3349 c := v_0.AuxInt 3350 x := v_0.Args[0] 3351 if !(c&0x80 == 0) { 3352 break 3353 } 3354 v.reset(OpAMD64ANDLconst) 3355 v.AuxInt = c & 0x7f 3356 v.AddArg(x) 3357 return true 3358 } 3359 return false 3360 } 3361 func rewriteValueAMD64_OpAMD64MOVBQSXload(v *Value, config *Config) bool { 3362 b := v.Block 3363 _ = b 3364 // match: (MOVBQSXload [off] {sym} ptr (MOVBstore [off2] {sym2} ptr2 x _)) 3365 // cond: sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) 3366 // result: (MOVBQSX x) 3367 for { 3368 off := v.AuxInt 3369 sym := v.Aux 3370 ptr := v.Args[0] 3371 v_1 := v.Args[1] 3372 if v_1.Op != OpAMD64MOVBstore { 3373 break 3374 } 3375 off2 := v_1.AuxInt 3376 sym2 := v_1.Aux 3377 ptr2 := v_1.Args[0] 3378 x := v_1.Args[1] 3379 if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) { 3380 break 3381 } 3382 v.reset(OpAMD64MOVBQSX) 3383 v.AddArg(x) 3384 return true 3385 } 3386 // match: (MOVBQSXload [off1] {sym1} (LEAQ [off2] {sym2} base) mem) 3387 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 3388 // result: (MOVBQSXload [off1+off2] {mergeSym(sym1,sym2)} base mem) 3389 for { 3390 off1 := v.AuxInt 3391 sym1 := v.Aux 3392 v_0 := v.Args[0] 3393 if v_0.Op != OpAMD64LEAQ { 3394 break 3395 } 3396 off2 := v_0.AuxInt 3397 sym2 := v_0.Aux 3398 base := v_0.Args[0] 3399 mem := v.Args[1] 3400 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 3401 break 3402 } 3403 v.reset(OpAMD64MOVBQSXload) 3404 v.AuxInt = off1 + off2 3405 v.Aux = mergeSym(sym1, sym2) 3406 v.AddArg(base) 3407 v.AddArg(mem) 3408 return true 3409 } 3410 return false 3411 } 3412 func rewriteValueAMD64_OpAMD64MOVBQZX(v *Value, config *Config) bool { 3413 b := v.Block 3414 _ = b 3415 // match: (MOVBQZX x:(MOVBload [off] {sym} ptr mem)) 3416 // cond: x.Uses == 1 && clobber(x) 3417 // result: @x.Block (MOVBload <v.Type> [off] {sym} ptr mem) 3418 for { 3419 x := v.Args[0] 3420 if x.Op != OpAMD64MOVBload { 3421 break 3422 } 3423 off := x.AuxInt 3424 sym := x.Aux 3425 ptr := x.Args[0] 3426 mem := x.Args[1] 3427 if !(x.Uses == 1 && clobber(x)) { 3428 break 3429 } 3430 b = x.Block 3431 v0 := b.NewValue0(v.Line, OpAMD64MOVBload, v.Type) 3432 v.reset(OpCopy) 3433 v.AddArg(v0) 3434 v0.AuxInt = off 3435 v0.Aux = sym 3436 v0.AddArg(ptr) 3437 v0.AddArg(mem) 3438 return true 3439 } 3440 // match: (MOVBQZX x:(MOVWload [off] {sym} ptr mem)) 3441 // cond: x.Uses == 1 && clobber(x) 3442 // result: @x.Block (MOVBload <v.Type> [off] {sym} ptr mem) 3443 for { 3444 x := v.Args[0] 3445 if x.Op != OpAMD64MOVWload { 3446 break 3447 } 3448 off := x.AuxInt 3449 sym := x.Aux 3450 ptr := x.Args[0] 3451 mem := x.Args[1] 3452 if !(x.Uses == 1 && clobber(x)) { 3453 break 3454 } 3455 b = x.Block 3456 v0 := b.NewValue0(v.Line, OpAMD64MOVBload, v.Type) 3457 v.reset(OpCopy) 3458 v.AddArg(v0) 3459 v0.AuxInt = off 3460 v0.Aux = sym 3461 v0.AddArg(ptr) 3462 v0.AddArg(mem) 3463 return true 3464 } 3465 // match: (MOVBQZX x:(MOVLload [off] {sym} ptr mem)) 3466 // cond: x.Uses == 1 && clobber(x) 3467 // result: @x.Block (MOVBload <v.Type> [off] {sym} ptr mem) 3468 for { 3469 x := v.Args[0] 3470 if x.Op != OpAMD64MOVLload { 3471 break 3472 } 3473 off := x.AuxInt 3474 sym := x.Aux 3475 ptr := x.Args[0] 3476 mem := x.Args[1] 3477 if !(x.Uses == 1 && clobber(x)) { 3478 break 3479 } 3480 b = x.Block 3481 v0 := b.NewValue0(v.Line, OpAMD64MOVBload, v.Type) 3482 v.reset(OpCopy) 3483 v.AddArg(v0) 3484 v0.AuxInt = off 3485 v0.Aux = sym 3486 v0.AddArg(ptr) 3487 v0.AddArg(mem) 3488 return true 3489 } 3490 // match: (MOVBQZX x:(MOVQload [off] {sym} ptr mem)) 3491 // cond: x.Uses == 1 && clobber(x) 3492 // result: @x.Block (MOVBload <v.Type> [off] {sym} ptr mem) 3493 for { 3494 x := v.Args[0] 3495 if x.Op != OpAMD64MOVQload { 3496 break 3497 } 3498 off := x.AuxInt 3499 sym := x.Aux 3500 ptr := x.Args[0] 3501 mem := x.Args[1] 3502 if !(x.Uses == 1 && clobber(x)) { 3503 break 3504 } 3505 b = x.Block 3506 v0 := b.NewValue0(v.Line, OpAMD64MOVBload, v.Type) 3507 v.reset(OpCopy) 3508 v.AddArg(v0) 3509 v0.AuxInt = off 3510 v0.Aux = sym 3511 v0.AddArg(ptr) 3512 v0.AddArg(mem) 3513 return true 3514 } 3515 // match: (MOVBQZX x:(MOVBloadidx1 [off] {sym} ptr idx mem)) 3516 // cond: x.Uses == 1 && clobber(x) 3517 // result: @x.Block (MOVBloadidx1 <v.Type> [off] {sym} ptr idx mem) 3518 for { 3519 x := v.Args[0] 3520 if x.Op != OpAMD64MOVBloadidx1 { 3521 break 3522 } 3523 off := x.AuxInt 3524 sym := x.Aux 3525 ptr := x.Args[0] 3526 idx := x.Args[1] 3527 mem := x.Args[2] 3528 if !(x.Uses == 1 && clobber(x)) { 3529 break 3530 } 3531 b = x.Block 3532 v0 := b.NewValue0(v.Line, OpAMD64MOVBloadidx1, v.Type) 3533 v.reset(OpCopy) 3534 v.AddArg(v0) 3535 v0.AuxInt = off 3536 v0.Aux = sym 3537 v0.AddArg(ptr) 3538 v0.AddArg(idx) 3539 v0.AddArg(mem) 3540 return true 3541 } 3542 // match: (MOVBQZX (ANDLconst [c] x)) 3543 // cond: 3544 // result: (ANDLconst [c & 0xff] x) 3545 for { 3546 v_0 := v.Args[0] 3547 if v_0.Op != OpAMD64ANDLconst { 3548 break 3549 } 3550 c := v_0.AuxInt 3551 x := v_0.Args[0] 3552 v.reset(OpAMD64ANDLconst) 3553 v.AuxInt = c & 0xff 3554 v.AddArg(x) 3555 return true 3556 } 3557 return false 3558 } 3559 func rewriteValueAMD64_OpAMD64MOVBload(v *Value, config *Config) bool { 3560 b := v.Block 3561 _ = b 3562 // match: (MOVBload [off] {sym} ptr (MOVBstore [off2] {sym2} ptr2 x _)) 3563 // cond: sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) 3564 // result: (MOVBQZX x) 3565 for { 3566 off := v.AuxInt 3567 sym := v.Aux 3568 ptr := v.Args[0] 3569 v_1 := v.Args[1] 3570 if v_1.Op != OpAMD64MOVBstore { 3571 break 3572 } 3573 off2 := v_1.AuxInt 3574 sym2 := v_1.Aux 3575 ptr2 := v_1.Args[0] 3576 x := v_1.Args[1] 3577 if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) { 3578 break 3579 } 3580 v.reset(OpAMD64MOVBQZX) 3581 v.AddArg(x) 3582 return true 3583 } 3584 // match: (MOVBload [off1] {sym} (ADDQconst [off2] ptr) mem) 3585 // cond: is32Bit(off1+off2) 3586 // result: (MOVBload [off1+off2] {sym} ptr mem) 3587 for { 3588 off1 := v.AuxInt 3589 sym := v.Aux 3590 v_0 := v.Args[0] 3591 if v_0.Op != OpAMD64ADDQconst { 3592 break 3593 } 3594 off2 := v_0.AuxInt 3595 ptr := v_0.Args[0] 3596 mem := v.Args[1] 3597 if !(is32Bit(off1 + off2)) { 3598 break 3599 } 3600 v.reset(OpAMD64MOVBload) 3601 v.AuxInt = off1 + off2 3602 v.Aux = sym 3603 v.AddArg(ptr) 3604 v.AddArg(mem) 3605 return true 3606 } 3607 // match: (MOVBload [off1] {sym1} (LEAQ [off2] {sym2} base) mem) 3608 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 3609 // result: (MOVBload [off1+off2] {mergeSym(sym1,sym2)} base mem) 3610 for { 3611 off1 := v.AuxInt 3612 sym1 := v.Aux 3613 v_0 := v.Args[0] 3614 if v_0.Op != OpAMD64LEAQ { 3615 break 3616 } 3617 off2 := v_0.AuxInt 3618 sym2 := v_0.Aux 3619 base := v_0.Args[0] 3620 mem := v.Args[1] 3621 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 3622 break 3623 } 3624 v.reset(OpAMD64MOVBload) 3625 v.AuxInt = off1 + off2 3626 v.Aux = mergeSym(sym1, sym2) 3627 v.AddArg(base) 3628 v.AddArg(mem) 3629 return true 3630 } 3631 // match: (MOVBload [off1] {sym1} (LEAQ1 [off2] {sym2} ptr idx) mem) 3632 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 3633 // result: (MOVBloadidx1 [off1+off2] {mergeSym(sym1,sym2)} ptr idx mem) 3634 for { 3635 off1 := v.AuxInt 3636 sym1 := v.Aux 3637 v_0 := v.Args[0] 3638 if v_0.Op != OpAMD64LEAQ1 { 3639 break 3640 } 3641 off2 := v_0.AuxInt 3642 sym2 := v_0.Aux 3643 ptr := v_0.Args[0] 3644 idx := v_0.Args[1] 3645 mem := v.Args[1] 3646 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 3647 break 3648 } 3649 v.reset(OpAMD64MOVBloadidx1) 3650 v.AuxInt = off1 + off2 3651 v.Aux = mergeSym(sym1, sym2) 3652 v.AddArg(ptr) 3653 v.AddArg(idx) 3654 v.AddArg(mem) 3655 return true 3656 } 3657 // match: (MOVBload [off] {sym} (ADDQ ptr idx) mem) 3658 // cond: ptr.Op != OpSB 3659 // result: (MOVBloadidx1 [off] {sym} ptr idx mem) 3660 for { 3661 off := v.AuxInt 3662 sym := v.Aux 3663 v_0 := v.Args[0] 3664 if v_0.Op != OpAMD64ADDQ { 3665 break 3666 } 3667 ptr := v_0.Args[0] 3668 idx := v_0.Args[1] 3669 mem := v.Args[1] 3670 if !(ptr.Op != OpSB) { 3671 break 3672 } 3673 v.reset(OpAMD64MOVBloadidx1) 3674 v.AuxInt = off 3675 v.Aux = sym 3676 v.AddArg(ptr) 3677 v.AddArg(idx) 3678 v.AddArg(mem) 3679 return true 3680 } 3681 // match: (MOVBload [off1] {sym1} (LEAL [off2] {sym2} base) mem) 3682 // cond: canMergeSym(sym1, sym2) 3683 // result: (MOVBload [off1+off2] {mergeSym(sym1,sym2)} base mem) 3684 for { 3685 off1 := v.AuxInt 3686 sym1 := v.Aux 3687 v_0 := v.Args[0] 3688 if v_0.Op != OpAMD64LEAL { 3689 break 3690 } 3691 off2 := v_0.AuxInt 3692 sym2 := v_0.Aux 3693 base := v_0.Args[0] 3694 mem := v.Args[1] 3695 if !(canMergeSym(sym1, sym2)) { 3696 break 3697 } 3698 v.reset(OpAMD64MOVBload) 3699 v.AuxInt = off1 + off2 3700 v.Aux = mergeSym(sym1, sym2) 3701 v.AddArg(base) 3702 v.AddArg(mem) 3703 return true 3704 } 3705 // match: (MOVBload [off1] {sym} (ADDLconst [off2] ptr) mem) 3706 // cond: is32Bit(off1+off2) 3707 // result: (MOVBload [off1+off2] {sym} ptr mem) 3708 for { 3709 off1 := v.AuxInt 3710 sym := v.Aux 3711 v_0 := v.Args[0] 3712 if v_0.Op != OpAMD64ADDLconst { 3713 break 3714 } 3715 off2 := v_0.AuxInt 3716 ptr := v_0.Args[0] 3717 mem := v.Args[1] 3718 if !(is32Bit(off1 + off2)) { 3719 break 3720 } 3721 v.reset(OpAMD64MOVBload) 3722 v.AuxInt = off1 + off2 3723 v.Aux = sym 3724 v.AddArg(ptr) 3725 v.AddArg(mem) 3726 return true 3727 } 3728 return false 3729 } 3730 func rewriteValueAMD64_OpAMD64MOVBloadidx1(v *Value, config *Config) bool { 3731 b := v.Block 3732 _ = b 3733 // match: (MOVBloadidx1 [c] {sym} (ADDQconst [d] ptr) idx mem) 3734 // cond: 3735 // result: (MOVBloadidx1 [c+d] {sym} ptr idx mem) 3736 for { 3737 c := v.AuxInt 3738 sym := v.Aux 3739 v_0 := v.Args[0] 3740 if v_0.Op != OpAMD64ADDQconst { 3741 break 3742 } 3743 d := v_0.AuxInt 3744 ptr := v_0.Args[0] 3745 idx := v.Args[1] 3746 mem := v.Args[2] 3747 v.reset(OpAMD64MOVBloadidx1) 3748 v.AuxInt = c + d 3749 v.Aux = sym 3750 v.AddArg(ptr) 3751 v.AddArg(idx) 3752 v.AddArg(mem) 3753 return true 3754 } 3755 // match: (MOVBloadidx1 [c] {sym} ptr (ADDQconst [d] idx) mem) 3756 // cond: 3757 // result: (MOVBloadidx1 [c+d] {sym} ptr idx mem) 3758 for { 3759 c := v.AuxInt 3760 sym := v.Aux 3761 ptr := v.Args[0] 3762 v_1 := v.Args[1] 3763 if v_1.Op != OpAMD64ADDQconst { 3764 break 3765 } 3766 d := v_1.AuxInt 3767 idx := v_1.Args[0] 3768 mem := v.Args[2] 3769 v.reset(OpAMD64MOVBloadidx1) 3770 v.AuxInt = c + d 3771 v.Aux = sym 3772 v.AddArg(ptr) 3773 v.AddArg(idx) 3774 v.AddArg(mem) 3775 return true 3776 } 3777 return false 3778 } 3779 func rewriteValueAMD64_OpAMD64MOVBstore(v *Value, config *Config) bool { 3780 b := v.Block 3781 _ = b 3782 // match: (MOVBstore [off] {sym} ptr (MOVBQSX x) mem) 3783 // cond: 3784 // result: (MOVBstore [off] {sym} ptr x mem) 3785 for { 3786 off := v.AuxInt 3787 sym := v.Aux 3788 ptr := v.Args[0] 3789 v_1 := v.Args[1] 3790 if v_1.Op != OpAMD64MOVBQSX { 3791 break 3792 } 3793 x := v_1.Args[0] 3794 mem := v.Args[2] 3795 v.reset(OpAMD64MOVBstore) 3796 v.AuxInt = off 3797 v.Aux = sym 3798 v.AddArg(ptr) 3799 v.AddArg(x) 3800 v.AddArg(mem) 3801 return true 3802 } 3803 // match: (MOVBstore [off] {sym} ptr (MOVBQZX x) mem) 3804 // cond: 3805 // result: (MOVBstore [off] {sym} ptr x mem) 3806 for { 3807 off := v.AuxInt 3808 sym := v.Aux 3809 ptr := v.Args[0] 3810 v_1 := v.Args[1] 3811 if v_1.Op != OpAMD64MOVBQZX { 3812 break 3813 } 3814 x := v_1.Args[0] 3815 mem := v.Args[2] 3816 v.reset(OpAMD64MOVBstore) 3817 v.AuxInt = off 3818 v.Aux = sym 3819 v.AddArg(ptr) 3820 v.AddArg(x) 3821 v.AddArg(mem) 3822 return true 3823 } 3824 // match: (MOVBstore [off1] {sym} (ADDQconst [off2] ptr) val mem) 3825 // cond: is32Bit(off1+off2) 3826 // result: (MOVBstore [off1+off2] {sym} ptr val mem) 3827 for { 3828 off1 := v.AuxInt 3829 sym := v.Aux 3830 v_0 := v.Args[0] 3831 if v_0.Op != OpAMD64ADDQconst { 3832 break 3833 } 3834 off2 := v_0.AuxInt 3835 ptr := v_0.Args[0] 3836 val := v.Args[1] 3837 mem := v.Args[2] 3838 if !(is32Bit(off1 + off2)) { 3839 break 3840 } 3841 v.reset(OpAMD64MOVBstore) 3842 v.AuxInt = off1 + off2 3843 v.Aux = sym 3844 v.AddArg(ptr) 3845 v.AddArg(val) 3846 v.AddArg(mem) 3847 return true 3848 } 3849 // match: (MOVBstore [off] {sym} ptr (MOVLconst [c]) mem) 3850 // cond: validOff(off) 3851 // result: (MOVBstoreconst [makeValAndOff(int64(int8(c)),off)] {sym} ptr mem) 3852 for { 3853 off := v.AuxInt 3854 sym := v.Aux 3855 ptr := v.Args[0] 3856 v_1 := v.Args[1] 3857 if v_1.Op != OpAMD64MOVLconst { 3858 break 3859 } 3860 c := v_1.AuxInt 3861 mem := v.Args[2] 3862 if !(validOff(off)) { 3863 break 3864 } 3865 v.reset(OpAMD64MOVBstoreconst) 3866 v.AuxInt = makeValAndOff(int64(int8(c)), off) 3867 v.Aux = sym 3868 v.AddArg(ptr) 3869 v.AddArg(mem) 3870 return true 3871 } 3872 // match: (MOVBstore [off1] {sym1} (LEAQ [off2] {sym2} base) val mem) 3873 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 3874 // result: (MOVBstore [off1+off2] {mergeSym(sym1,sym2)} base val mem) 3875 for { 3876 off1 := v.AuxInt 3877 sym1 := v.Aux 3878 v_0 := v.Args[0] 3879 if v_0.Op != OpAMD64LEAQ { 3880 break 3881 } 3882 off2 := v_0.AuxInt 3883 sym2 := v_0.Aux 3884 base := v_0.Args[0] 3885 val := v.Args[1] 3886 mem := v.Args[2] 3887 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 3888 break 3889 } 3890 v.reset(OpAMD64MOVBstore) 3891 v.AuxInt = off1 + off2 3892 v.Aux = mergeSym(sym1, sym2) 3893 v.AddArg(base) 3894 v.AddArg(val) 3895 v.AddArg(mem) 3896 return true 3897 } 3898 // match: (MOVBstore [off1] {sym1} (LEAQ1 [off2] {sym2} ptr idx) val mem) 3899 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 3900 // result: (MOVBstoreidx1 [off1+off2] {mergeSym(sym1,sym2)} ptr idx val mem) 3901 for { 3902 off1 := v.AuxInt 3903 sym1 := v.Aux 3904 v_0 := v.Args[0] 3905 if v_0.Op != OpAMD64LEAQ1 { 3906 break 3907 } 3908 off2 := v_0.AuxInt 3909 sym2 := v_0.Aux 3910 ptr := v_0.Args[0] 3911 idx := v_0.Args[1] 3912 val := v.Args[1] 3913 mem := v.Args[2] 3914 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 3915 break 3916 } 3917 v.reset(OpAMD64MOVBstoreidx1) 3918 v.AuxInt = off1 + off2 3919 v.Aux = mergeSym(sym1, sym2) 3920 v.AddArg(ptr) 3921 v.AddArg(idx) 3922 v.AddArg(val) 3923 v.AddArg(mem) 3924 return true 3925 } 3926 // match: (MOVBstore [off] {sym} (ADDQ ptr idx) val mem) 3927 // cond: ptr.Op != OpSB 3928 // result: (MOVBstoreidx1 [off] {sym} ptr idx val mem) 3929 for { 3930 off := v.AuxInt 3931 sym := v.Aux 3932 v_0 := v.Args[0] 3933 if v_0.Op != OpAMD64ADDQ { 3934 break 3935 } 3936 ptr := v_0.Args[0] 3937 idx := v_0.Args[1] 3938 val := v.Args[1] 3939 mem := v.Args[2] 3940 if !(ptr.Op != OpSB) { 3941 break 3942 } 3943 v.reset(OpAMD64MOVBstoreidx1) 3944 v.AuxInt = off 3945 v.Aux = sym 3946 v.AddArg(ptr) 3947 v.AddArg(idx) 3948 v.AddArg(val) 3949 v.AddArg(mem) 3950 return true 3951 } 3952 // match: (MOVBstore [i] {s} p w x2:(MOVBstore [i-1] {s} p (SHRLconst [8] w) x1:(MOVBstore [i-2] {s} p (SHRLconst [16] w) x0:(MOVBstore [i-3] {s} p (SHRLconst [24] w) mem)))) 3953 // cond: x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) 3954 // result: (MOVLstore [i-3] {s} p (BSWAPL <w.Type> w) mem) 3955 for { 3956 i := v.AuxInt 3957 s := v.Aux 3958 p := v.Args[0] 3959 w := v.Args[1] 3960 x2 := v.Args[2] 3961 if x2.Op != OpAMD64MOVBstore { 3962 break 3963 } 3964 if x2.AuxInt != i-1 { 3965 break 3966 } 3967 if x2.Aux != s { 3968 break 3969 } 3970 if p != x2.Args[0] { 3971 break 3972 } 3973 x2_1 := x2.Args[1] 3974 if x2_1.Op != OpAMD64SHRLconst { 3975 break 3976 } 3977 if x2_1.AuxInt != 8 { 3978 break 3979 } 3980 if w != x2_1.Args[0] { 3981 break 3982 } 3983 x1 := x2.Args[2] 3984 if x1.Op != OpAMD64MOVBstore { 3985 break 3986 } 3987 if x1.AuxInt != i-2 { 3988 break 3989 } 3990 if x1.Aux != s { 3991 break 3992 } 3993 if p != x1.Args[0] { 3994 break 3995 } 3996 x1_1 := x1.Args[1] 3997 if x1_1.Op != OpAMD64SHRLconst { 3998 break 3999 } 4000 if x1_1.AuxInt != 16 { 4001 break 4002 } 4003 if w != x1_1.Args[0] { 4004 break 4005 } 4006 x0 := x1.Args[2] 4007 if x0.Op != OpAMD64MOVBstore { 4008 break 4009 } 4010 if x0.AuxInt != i-3 { 4011 break 4012 } 4013 if x0.Aux != s { 4014 break 4015 } 4016 if p != x0.Args[0] { 4017 break 4018 } 4019 x0_1 := x0.Args[1] 4020 if x0_1.Op != OpAMD64SHRLconst { 4021 break 4022 } 4023 if x0_1.AuxInt != 24 { 4024 break 4025 } 4026 if w != x0_1.Args[0] { 4027 break 4028 } 4029 mem := x0.Args[2] 4030 if !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2)) { 4031 break 4032 } 4033 v.reset(OpAMD64MOVLstore) 4034 v.AuxInt = i - 3 4035 v.Aux = s 4036 v.AddArg(p) 4037 v0 := b.NewValue0(v.Line, OpAMD64BSWAPL, w.Type) 4038 v0.AddArg(w) 4039 v.AddArg(v0) 4040 v.AddArg(mem) 4041 return true 4042 } 4043 // match: (MOVBstore [i] {s} p w x6:(MOVBstore [i-1] {s} p (SHRQconst [8] w) x5:(MOVBstore [i-2] {s} p (SHRQconst [16] w) x4:(MOVBstore [i-3] {s} p (SHRQconst [24] w) x3:(MOVBstore [i-4] {s} p (SHRQconst [32] w) x2:(MOVBstore [i-5] {s} p (SHRQconst [40] w) x1:(MOVBstore [i-6] {s} p (SHRQconst [48] w) x0:(MOVBstore [i-7] {s} p (SHRQconst [56] w) mem)))))))) 4044 // cond: x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) 4045 // result: (MOVQstore [i-7] {s} p (BSWAPQ <w.Type> w) mem) 4046 for { 4047 i := v.AuxInt 4048 s := v.Aux 4049 p := v.Args[0] 4050 w := v.Args[1] 4051 x6 := v.Args[2] 4052 if x6.Op != OpAMD64MOVBstore { 4053 break 4054 } 4055 if x6.AuxInt != i-1 { 4056 break 4057 } 4058 if x6.Aux != s { 4059 break 4060 } 4061 if p != x6.Args[0] { 4062 break 4063 } 4064 x6_1 := x6.Args[1] 4065 if x6_1.Op != OpAMD64SHRQconst { 4066 break 4067 } 4068 if x6_1.AuxInt != 8 { 4069 break 4070 } 4071 if w != x6_1.Args[0] { 4072 break 4073 } 4074 x5 := x6.Args[2] 4075 if x5.Op != OpAMD64MOVBstore { 4076 break 4077 } 4078 if x5.AuxInt != i-2 { 4079 break 4080 } 4081 if x5.Aux != s { 4082 break 4083 } 4084 if p != x5.Args[0] { 4085 break 4086 } 4087 x5_1 := x5.Args[1] 4088 if x5_1.Op != OpAMD64SHRQconst { 4089 break 4090 } 4091 if x5_1.AuxInt != 16 { 4092 break 4093 } 4094 if w != x5_1.Args[0] { 4095 break 4096 } 4097 x4 := x5.Args[2] 4098 if x4.Op != OpAMD64MOVBstore { 4099 break 4100 } 4101 if x4.AuxInt != i-3 { 4102 break 4103 } 4104 if x4.Aux != s { 4105 break 4106 } 4107 if p != x4.Args[0] { 4108 break 4109 } 4110 x4_1 := x4.Args[1] 4111 if x4_1.Op != OpAMD64SHRQconst { 4112 break 4113 } 4114 if x4_1.AuxInt != 24 { 4115 break 4116 } 4117 if w != x4_1.Args[0] { 4118 break 4119 } 4120 x3 := x4.Args[2] 4121 if x3.Op != OpAMD64MOVBstore { 4122 break 4123 } 4124 if x3.AuxInt != i-4 { 4125 break 4126 } 4127 if x3.Aux != s { 4128 break 4129 } 4130 if p != x3.Args[0] { 4131 break 4132 } 4133 x3_1 := x3.Args[1] 4134 if x3_1.Op != OpAMD64SHRQconst { 4135 break 4136 } 4137 if x3_1.AuxInt != 32 { 4138 break 4139 } 4140 if w != x3_1.Args[0] { 4141 break 4142 } 4143 x2 := x3.Args[2] 4144 if x2.Op != OpAMD64MOVBstore { 4145 break 4146 } 4147 if x2.AuxInt != i-5 { 4148 break 4149 } 4150 if x2.Aux != s { 4151 break 4152 } 4153 if p != x2.Args[0] { 4154 break 4155 } 4156 x2_1 := x2.Args[1] 4157 if x2_1.Op != OpAMD64SHRQconst { 4158 break 4159 } 4160 if x2_1.AuxInt != 40 { 4161 break 4162 } 4163 if w != x2_1.Args[0] { 4164 break 4165 } 4166 x1 := x2.Args[2] 4167 if x1.Op != OpAMD64MOVBstore { 4168 break 4169 } 4170 if x1.AuxInt != i-6 { 4171 break 4172 } 4173 if x1.Aux != s { 4174 break 4175 } 4176 if p != x1.Args[0] { 4177 break 4178 } 4179 x1_1 := x1.Args[1] 4180 if x1_1.Op != OpAMD64SHRQconst { 4181 break 4182 } 4183 if x1_1.AuxInt != 48 { 4184 break 4185 } 4186 if w != x1_1.Args[0] { 4187 break 4188 } 4189 x0 := x1.Args[2] 4190 if x0.Op != OpAMD64MOVBstore { 4191 break 4192 } 4193 if x0.AuxInt != i-7 { 4194 break 4195 } 4196 if x0.Aux != s { 4197 break 4198 } 4199 if p != x0.Args[0] { 4200 break 4201 } 4202 x0_1 := x0.Args[1] 4203 if x0_1.Op != OpAMD64SHRQconst { 4204 break 4205 } 4206 if x0_1.AuxInt != 56 { 4207 break 4208 } 4209 if w != x0_1.Args[0] { 4210 break 4211 } 4212 mem := x0.Args[2] 4213 if !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6)) { 4214 break 4215 } 4216 v.reset(OpAMD64MOVQstore) 4217 v.AuxInt = i - 7 4218 v.Aux = s 4219 v.AddArg(p) 4220 v0 := b.NewValue0(v.Line, OpAMD64BSWAPQ, w.Type) 4221 v0.AddArg(w) 4222 v.AddArg(v0) 4223 v.AddArg(mem) 4224 return true 4225 } 4226 // match: (MOVBstore [i] {s} p (SHRQconst [8] w) x:(MOVBstore [i-1] {s} p w mem)) 4227 // cond: x.Uses == 1 && clobber(x) 4228 // result: (MOVWstore [i-1] {s} p w mem) 4229 for { 4230 i := v.AuxInt 4231 s := v.Aux 4232 p := v.Args[0] 4233 v_1 := v.Args[1] 4234 if v_1.Op != OpAMD64SHRQconst { 4235 break 4236 } 4237 if v_1.AuxInt != 8 { 4238 break 4239 } 4240 w := v_1.Args[0] 4241 x := v.Args[2] 4242 if x.Op != OpAMD64MOVBstore { 4243 break 4244 } 4245 if x.AuxInt != i-1 { 4246 break 4247 } 4248 if x.Aux != s { 4249 break 4250 } 4251 if p != x.Args[0] { 4252 break 4253 } 4254 if w != x.Args[1] { 4255 break 4256 } 4257 mem := x.Args[2] 4258 if !(x.Uses == 1 && clobber(x)) { 4259 break 4260 } 4261 v.reset(OpAMD64MOVWstore) 4262 v.AuxInt = i - 1 4263 v.Aux = s 4264 v.AddArg(p) 4265 v.AddArg(w) 4266 v.AddArg(mem) 4267 return true 4268 } 4269 // match: (MOVBstore [i] {s} p (SHRQconst [j] w) x:(MOVBstore [i-1] {s} p w0:(SHRQconst [j-8] w) mem)) 4270 // cond: x.Uses == 1 && clobber(x) 4271 // result: (MOVWstore [i-1] {s} p w0 mem) 4272 for { 4273 i := v.AuxInt 4274 s := v.Aux 4275 p := v.Args[0] 4276 v_1 := v.Args[1] 4277 if v_1.Op != OpAMD64SHRQconst { 4278 break 4279 } 4280 j := v_1.AuxInt 4281 w := v_1.Args[0] 4282 x := v.Args[2] 4283 if x.Op != OpAMD64MOVBstore { 4284 break 4285 } 4286 if x.AuxInt != i-1 { 4287 break 4288 } 4289 if x.Aux != s { 4290 break 4291 } 4292 if p != x.Args[0] { 4293 break 4294 } 4295 w0 := x.Args[1] 4296 if w0.Op != OpAMD64SHRQconst { 4297 break 4298 } 4299 if w0.AuxInt != j-8 { 4300 break 4301 } 4302 if w != w0.Args[0] { 4303 break 4304 } 4305 mem := x.Args[2] 4306 if !(x.Uses == 1 && clobber(x)) { 4307 break 4308 } 4309 v.reset(OpAMD64MOVWstore) 4310 v.AuxInt = i - 1 4311 v.Aux = s 4312 v.AddArg(p) 4313 v.AddArg(w0) 4314 v.AddArg(mem) 4315 return true 4316 } 4317 // match: (MOVBstore [off1] {sym1} (LEAL [off2] {sym2} base) val mem) 4318 // cond: canMergeSym(sym1, sym2) 4319 // result: (MOVBstore [off1+off2] {mergeSym(sym1,sym2)} base val mem) 4320 for { 4321 off1 := v.AuxInt 4322 sym1 := v.Aux 4323 v_0 := v.Args[0] 4324 if v_0.Op != OpAMD64LEAL { 4325 break 4326 } 4327 off2 := v_0.AuxInt 4328 sym2 := v_0.Aux 4329 base := v_0.Args[0] 4330 val := v.Args[1] 4331 mem := v.Args[2] 4332 if !(canMergeSym(sym1, sym2)) { 4333 break 4334 } 4335 v.reset(OpAMD64MOVBstore) 4336 v.AuxInt = off1 + off2 4337 v.Aux = mergeSym(sym1, sym2) 4338 v.AddArg(base) 4339 v.AddArg(val) 4340 v.AddArg(mem) 4341 return true 4342 } 4343 // match: (MOVBstore [off1] {sym} (ADDLconst [off2] ptr) val mem) 4344 // cond: is32Bit(off1+off2) 4345 // result: (MOVBstore [off1+off2] {sym} ptr val mem) 4346 for { 4347 off1 := v.AuxInt 4348 sym := v.Aux 4349 v_0 := v.Args[0] 4350 if v_0.Op != OpAMD64ADDLconst { 4351 break 4352 } 4353 off2 := v_0.AuxInt 4354 ptr := v_0.Args[0] 4355 val := v.Args[1] 4356 mem := v.Args[2] 4357 if !(is32Bit(off1 + off2)) { 4358 break 4359 } 4360 v.reset(OpAMD64MOVBstore) 4361 v.AuxInt = off1 + off2 4362 v.Aux = sym 4363 v.AddArg(ptr) 4364 v.AddArg(val) 4365 v.AddArg(mem) 4366 return true 4367 } 4368 return false 4369 } 4370 func rewriteValueAMD64_OpAMD64MOVBstoreconst(v *Value, config *Config) bool { 4371 b := v.Block 4372 _ = b 4373 // match: (MOVBstoreconst [sc] {s} (ADDQconst [off] ptr) mem) 4374 // cond: ValAndOff(sc).canAdd(off) 4375 // result: (MOVBstoreconst [ValAndOff(sc).add(off)] {s} ptr mem) 4376 for { 4377 sc := v.AuxInt 4378 s := v.Aux 4379 v_0 := v.Args[0] 4380 if v_0.Op != OpAMD64ADDQconst { 4381 break 4382 } 4383 off := v_0.AuxInt 4384 ptr := v_0.Args[0] 4385 mem := v.Args[1] 4386 if !(ValAndOff(sc).canAdd(off)) { 4387 break 4388 } 4389 v.reset(OpAMD64MOVBstoreconst) 4390 v.AuxInt = ValAndOff(sc).add(off) 4391 v.Aux = s 4392 v.AddArg(ptr) 4393 v.AddArg(mem) 4394 return true 4395 } 4396 // match: (MOVBstoreconst [sc] {sym1} (LEAQ [off] {sym2} ptr) mem) 4397 // cond: canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off) 4398 // result: (MOVBstoreconst [ValAndOff(sc).add(off)] {mergeSym(sym1, sym2)} ptr mem) 4399 for { 4400 sc := v.AuxInt 4401 sym1 := v.Aux 4402 v_0 := v.Args[0] 4403 if v_0.Op != OpAMD64LEAQ { 4404 break 4405 } 4406 off := v_0.AuxInt 4407 sym2 := v_0.Aux 4408 ptr := v_0.Args[0] 4409 mem := v.Args[1] 4410 if !(canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off)) { 4411 break 4412 } 4413 v.reset(OpAMD64MOVBstoreconst) 4414 v.AuxInt = ValAndOff(sc).add(off) 4415 v.Aux = mergeSym(sym1, sym2) 4416 v.AddArg(ptr) 4417 v.AddArg(mem) 4418 return true 4419 } 4420 // match: (MOVBstoreconst [x] {sym1} (LEAQ1 [off] {sym2} ptr idx) mem) 4421 // cond: canMergeSym(sym1, sym2) 4422 // result: (MOVBstoreconstidx1 [ValAndOff(x).add(off)] {mergeSym(sym1,sym2)} ptr idx mem) 4423 for { 4424 x := v.AuxInt 4425 sym1 := v.Aux 4426 v_0 := v.Args[0] 4427 if v_0.Op != OpAMD64LEAQ1 { 4428 break 4429 } 4430 off := v_0.AuxInt 4431 sym2 := v_0.Aux 4432 ptr := v_0.Args[0] 4433 idx := v_0.Args[1] 4434 mem := v.Args[1] 4435 if !(canMergeSym(sym1, sym2)) { 4436 break 4437 } 4438 v.reset(OpAMD64MOVBstoreconstidx1) 4439 v.AuxInt = ValAndOff(x).add(off) 4440 v.Aux = mergeSym(sym1, sym2) 4441 v.AddArg(ptr) 4442 v.AddArg(idx) 4443 v.AddArg(mem) 4444 return true 4445 } 4446 // match: (MOVBstoreconst [x] {sym} (ADDQ ptr idx) mem) 4447 // cond: 4448 // result: (MOVBstoreconstidx1 [x] {sym} ptr idx mem) 4449 for { 4450 x := v.AuxInt 4451 sym := v.Aux 4452 v_0 := v.Args[0] 4453 if v_0.Op != OpAMD64ADDQ { 4454 break 4455 } 4456 ptr := v_0.Args[0] 4457 idx := v_0.Args[1] 4458 mem := v.Args[1] 4459 v.reset(OpAMD64MOVBstoreconstidx1) 4460 v.AuxInt = x 4461 v.Aux = sym 4462 v.AddArg(ptr) 4463 v.AddArg(idx) 4464 v.AddArg(mem) 4465 return true 4466 } 4467 // match: (MOVBstoreconst [c] {s} p x:(MOVBstoreconst [a] {s} p mem)) 4468 // cond: x.Uses == 1 && ValAndOff(a).Off() + 1 == ValAndOff(c).Off() && clobber(x) 4469 // result: (MOVWstoreconst [makeValAndOff(ValAndOff(a).Val()&0xff | ValAndOff(c).Val()<<8, ValAndOff(a).Off())] {s} p mem) 4470 for { 4471 c := v.AuxInt 4472 s := v.Aux 4473 p := v.Args[0] 4474 x := v.Args[1] 4475 if x.Op != OpAMD64MOVBstoreconst { 4476 break 4477 } 4478 a := x.AuxInt 4479 if x.Aux != s { 4480 break 4481 } 4482 if p != x.Args[0] { 4483 break 4484 } 4485 mem := x.Args[1] 4486 if !(x.Uses == 1 && ValAndOff(a).Off()+1 == ValAndOff(c).Off() && clobber(x)) { 4487 break 4488 } 4489 v.reset(OpAMD64MOVWstoreconst) 4490 v.AuxInt = makeValAndOff(ValAndOff(a).Val()&0xff|ValAndOff(c).Val()<<8, ValAndOff(a).Off()) 4491 v.Aux = s 4492 v.AddArg(p) 4493 v.AddArg(mem) 4494 return true 4495 } 4496 // match: (MOVBstoreconst [sc] {sym1} (LEAL [off] {sym2} ptr) mem) 4497 // cond: canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off) 4498 // result: (MOVBstoreconst [ValAndOff(sc).add(off)] {mergeSym(sym1, sym2)} ptr mem) 4499 for { 4500 sc := v.AuxInt 4501 sym1 := v.Aux 4502 v_0 := v.Args[0] 4503 if v_0.Op != OpAMD64LEAL { 4504 break 4505 } 4506 off := v_0.AuxInt 4507 sym2 := v_0.Aux 4508 ptr := v_0.Args[0] 4509 mem := v.Args[1] 4510 if !(canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off)) { 4511 break 4512 } 4513 v.reset(OpAMD64MOVBstoreconst) 4514 v.AuxInt = ValAndOff(sc).add(off) 4515 v.Aux = mergeSym(sym1, sym2) 4516 v.AddArg(ptr) 4517 v.AddArg(mem) 4518 return true 4519 } 4520 // match: (MOVBstoreconst [sc] {s} (ADDLconst [off] ptr) mem) 4521 // cond: ValAndOff(sc).canAdd(off) 4522 // result: (MOVBstoreconst [ValAndOff(sc).add(off)] {s} ptr mem) 4523 for { 4524 sc := v.AuxInt 4525 s := v.Aux 4526 v_0 := v.Args[0] 4527 if v_0.Op != OpAMD64ADDLconst { 4528 break 4529 } 4530 off := v_0.AuxInt 4531 ptr := v_0.Args[0] 4532 mem := v.Args[1] 4533 if !(ValAndOff(sc).canAdd(off)) { 4534 break 4535 } 4536 v.reset(OpAMD64MOVBstoreconst) 4537 v.AuxInt = ValAndOff(sc).add(off) 4538 v.Aux = s 4539 v.AddArg(ptr) 4540 v.AddArg(mem) 4541 return true 4542 } 4543 return false 4544 } 4545 func rewriteValueAMD64_OpAMD64MOVBstoreconstidx1(v *Value, config *Config) bool { 4546 b := v.Block 4547 _ = b 4548 // match: (MOVBstoreconstidx1 [x] {sym} (ADDQconst [c] ptr) idx mem) 4549 // cond: 4550 // result: (MOVBstoreconstidx1 [ValAndOff(x).add(c)] {sym} ptr idx mem) 4551 for { 4552 x := v.AuxInt 4553 sym := v.Aux 4554 v_0 := v.Args[0] 4555 if v_0.Op != OpAMD64ADDQconst { 4556 break 4557 } 4558 c := v_0.AuxInt 4559 ptr := v_0.Args[0] 4560 idx := v.Args[1] 4561 mem := v.Args[2] 4562 v.reset(OpAMD64MOVBstoreconstidx1) 4563 v.AuxInt = ValAndOff(x).add(c) 4564 v.Aux = sym 4565 v.AddArg(ptr) 4566 v.AddArg(idx) 4567 v.AddArg(mem) 4568 return true 4569 } 4570 // match: (MOVBstoreconstidx1 [x] {sym} ptr (ADDQconst [c] idx) mem) 4571 // cond: 4572 // result: (MOVBstoreconstidx1 [ValAndOff(x).add(c)] {sym} ptr idx mem) 4573 for { 4574 x := v.AuxInt 4575 sym := v.Aux 4576 ptr := v.Args[0] 4577 v_1 := v.Args[1] 4578 if v_1.Op != OpAMD64ADDQconst { 4579 break 4580 } 4581 c := v_1.AuxInt 4582 idx := v_1.Args[0] 4583 mem := v.Args[2] 4584 v.reset(OpAMD64MOVBstoreconstidx1) 4585 v.AuxInt = ValAndOff(x).add(c) 4586 v.Aux = sym 4587 v.AddArg(ptr) 4588 v.AddArg(idx) 4589 v.AddArg(mem) 4590 return true 4591 } 4592 // match: (MOVBstoreconstidx1 [c] {s} p i x:(MOVBstoreconstidx1 [a] {s} p i mem)) 4593 // cond: x.Uses == 1 && ValAndOff(a).Off() + 1 == ValAndOff(c).Off() && clobber(x) 4594 // result: (MOVWstoreconstidx1 [makeValAndOff(ValAndOff(a).Val()&0xff | ValAndOff(c).Val()<<8, ValAndOff(a).Off())] {s} p i mem) 4595 for { 4596 c := v.AuxInt 4597 s := v.Aux 4598 p := v.Args[0] 4599 i := v.Args[1] 4600 x := v.Args[2] 4601 if x.Op != OpAMD64MOVBstoreconstidx1 { 4602 break 4603 } 4604 a := x.AuxInt 4605 if x.Aux != s { 4606 break 4607 } 4608 if p != x.Args[0] { 4609 break 4610 } 4611 if i != x.Args[1] { 4612 break 4613 } 4614 mem := x.Args[2] 4615 if !(x.Uses == 1 && ValAndOff(a).Off()+1 == ValAndOff(c).Off() && clobber(x)) { 4616 break 4617 } 4618 v.reset(OpAMD64MOVWstoreconstidx1) 4619 v.AuxInt = makeValAndOff(ValAndOff(a).Val()&0xff|ValAndOff(c).Val()<<8, ValAndOff(a).Off()) 4620 v.Aux = s 4621 v.AddArg(p) 4622 v.AddArg(i) 4623 v.AddArg(mem) 4624 return true 4625 } 4626 return false 4627 } 4628 func rewriteValueAMD64_OpAMD64MOVBstoreidx1(v *Value, config *Config) bool { 4629 b := v.Block 4630 _ = b 4631 // match: (MOVBstoreidx1 [c] {sym} (ADDQconst [d] ptr) idx val mem) 4632 // cond: 4633 // result: (MOVBstoreidx1 [c+d] {sym} ptr idx val mem) 4634 for { 4635 c := v.AuxInt 4636 sym := v.Aux 4637 v_0 := v.Args[0] 4638 if v_0.Op != OpAMD64ADDQconst { 4639 break 4640 } 4641 d := v_0.AuxInt 4642 ptr := v_0.Args[0] 4643 idx := v.Args[1] 4644 val := v.Args[2] 4645 mem := v.Args[3] 4646 v.reset(OpAMD64MOVBstoreidx1) 4647 v.AuxInt = c + d 4648 v.Aux = sym 4649 v.AddArg(ptr) 4650 v.AddArg(idx) 4651 v.AddArg(val) 4652 v.AddArg(mem) 4653 return true 4654 } 4655 // match: (MOVBstoreidx1 [c] {sym} ptr (ADDQconst [d] idx) val mem) 4656 // cond: 4657 // result: (MOVBstoreidx1 [c+d] {sym} ptr idx val mem) 4658 for { 4659 c := v.AuxInt 4660 sym := v.Aux 4661 ptr := v.Args[0] 4662 v_1 := v.Args[1] 4663 if v_1.Op != OpAMD64ADDQconst { 4664 break 4665 } 4666 d := v_1.AuxInt 4667 idx := v_1.Args[0] 4668 val := v.Args[2] 4669 mem := v.Args[3] 4670 v.reset(OpAMD64MOVBstoreidx1) 4671 v.AuxInt = c + d 4672 v.Aux = sym 4673 v.AddArg(ptr) 4674 v.AddArg(idx) 4675 v.AddArg(val) 4676 v.AddArg(mem) 4677 return true 4678 } 4679 // match: (MOVBstoreidx1 [i] {s} p idx (SHRQconst [8] w) x:(MOVBstoreidx1 [i-1] {s} p idx w mem)) 4680 // cond: x.Uses == 1 && clobber(x) 4681 // result: (MOVWstoreidx1 [i-1] {s} p idx w mem) 4682 for { 4683 i := v.AuxInt 4684 s := v.Aux 4685 p := v.Args[0] 4686 idx := v.Args[1] 4687 v_2 := v.Args[2] 4688 if v_2.Op != OpAMD64SHRQconst { 4689 break 4690 } 4691 if v_2.AuxInt != 8 { 4692 break 4693 } 4694 w := v_2.Args[0] 4695 x := v.Args[3] 4696 if x.Op != OpAMD64MOVBstoreidx1 { 4697 break 4698 } 4699 if x.AuxInt != i-1 { 4700 break 4701 } 4702 if x.Aux != s { 4703 break 4704 } 4705 if p != x.Args[0] { 4706 break 4707 } 4708 if idx != x.Args[1] { 4709 break 4710 } 4711 if w != x.Args[2] { 4712 break 4713 } 4714 mem := x.Args[3] 4715 if !(x.Uses == 1 && clobber(x)) { 4716 break 4717 } 4718 v.reset(OpAMD64MOVWstoreidx1) 4719 v.AuxInt = i - 1 4720 v.Aux = s 4721 v.AddArg(p) 4722 v.AddArg(idx) 4723 v.AddArg(w) 4724 v.AddArg(mem) 4725 return true 4726 } 4727 // match: (MOVBstoreidx1 [i] {s} p idx (SHRQconst [j] w) x:(MOVBstoreidx1 [i-1] {s} p idx w0:(SHRQconst [j-8] w) mem)) 4728 // cond: x.Uses == 1 && clobber(x) 4729 // result: (MOVWstoreidx1 [i-1] {s} p idx w0 mem) 4730 for { 4731 i := v.AuxInt 4732 s := v.Aux 4733 p := v.Args[0] 4734 idx := v.Args[1] 4735 v_2 := v.Args[2] 4736 if v_2.Op != OpAMD64SHRQconst { 4737 break 4738 } 4739 j := v_2.AuxInt 4740 w := v_2.Args[0] 4741 x := v.Args[3] 4742 if x.Op != OpAMD64MOVBstoreidx1 { 4743 break 4744 } 4745 if x.AuxInt != i-1 { 4746 break 4747 } 4748 if x.Aux != s { 4749 break 4750 } 4751 if p != x.Args[0] { 4752 break 4753 } 4754 if idx != x.Args[1] { 4755 break 4756 } 4757 w0 := x.Args[2] 4758 if w0.Op != OpAMD64SHRQconst { 4759 break 4760 } 4761 if w0.AuxInt != j-8 { 4762 break 4763 } 4764 if w != w0.Args[0] { 4765 break 4766 } 4767 mem := x.Args[3] 4768 if !(x.Uses == 1 && clobber(x)) { 4769 break 4770 } 4771 v.reset(OpAMD64MOVWstoreidx1) 4772 v.AuxInt = i - 1 4773 v.Aux = s 4774 v.AddArg(p) 4775 v.AddArg(idx) 4776 v.AddArg(w0) 4777 v.AddArg(mem) 4778 return true 4779 } 4780 return false 4781 } 4782 func rewriteValueAMD64_OpAMD64MOVLQSX(v *Value, config *Config) bool { 4783 b := v.Block 4784 _ = b 4785 // match: (MOVLQSX x:(MOVLload [off] {sym} ptr mem)) 4786 // cond: x.Uses == 1 && clobber(x) 4787 // result: @x.Block (MOVLQSXload <v.Type> [off] {sym} ptr mem) 4788 for { 4789 x := v.Args[0] 4790 if x.Op != OpAMD64MOVLload { 4791 break 4792 } 4793 off := x.AuxInt 4794 sym := x.Aux 4795 ptr := x.Args[0] 4796 mem := x.Args[1] 4797 if !(x.Uses == 1 && clobber(x)) { 4798 break 4799 } 4800 b = x.Block 4801 v0 := b.NewValue0(v.Line, OpAMD64MOVLQSXload, v.Type) 4802 v.reset(OpCopy) 4803 v.AddArg(v0) 4804 v0.AuxInt = off 4805 v0.Aux = sym 4806 v0.AddArg(ptr) 4807 v0.AddArg(mem) 4808 return true 4809 } 4810 // match: (MOVLQSX x:(MOVQload [off] {sym} ptr mem)) 4811 // cond: x.Uses == 1 && clobber(x) 4812 // result: @x.Block (MOVLQSXload <v.Type> [off] {sym} ptr mem) 4813 for { 4814 x := v.Args[0] 4815 if x.Op != OpAMD64MOVQload { 4816 break 4817 } 4818 off := x.AuxInt 4819 sym := x.Aux 4820 ptr := x.Args[0] 4821 mem := x.Args[1] 4822 if !(x.Uses == 1 && clobber(x)) { 4823 break 4824 } 4825 b = x.Block 4826 v0 := b.NewValue0(v.Line, OpAMD64MOVLQSXload, v.Type) 4827 v.reset(OpCopy) 4828 v.AddArg(v0) 4829 v0.AuxInt = off 4830 v0.Aux = sym 4831 v0.AddArg(ptr) 4832 v0.AddArg(mem) 4833 return true 4834 } 4835 // match: (MOVLQSX (ANDLconst [c] x)) 4836 // cond: c & 0x80000000 == 0 4837 // result: (ANDLconst [c & 0x7fffffff] x) 4838 for { 4839 v_0 := v.Args[0] 4840 if v_0.Op != OpAMD64ANDLconst { 4841 break 4842 } 4843 c := v_0.AuxInt 4844 x := v_0.Args[0] 4845 if !(c&0x80000000 == 0) { 4846 break 4847 } 4848 v.reset(OpAMD64ANDLconst) 4849 v.AuxInt = c & 0x7fffffff 4850 v.AddArg(x) 4851 return true 4852 } 4853 return false 4854 } 4855 func rewriteValueAMD64_OpAMD64MOVLQSXload(v *Value, config *Config) bool { 4856 b := v.Block 4857 _ = b 4858 // match: (MOVLQSXload [off] {sym} ptr (MOVLstore [off2] {sym2} ptr2 x _)) 4859 // cond: sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) 4860 // result: (MOVLQSX x) 4861 for { 4862 off := v.AuxInt 4863 sym := v.Aux 4864 ptr := v.Args[0] 4865 v_1 := v.Args[1] 4866 if v_1.Op != OpAMD64MOVLstore { 4867 break 4868 } 4869 off2 := v_1.AuxInt 4870 sym2 := v_1.Aux 4871 ptr2 := v_1.Args[0] 4872 x := v_1.Args[1] 4873 if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) { 4874 break 4875 } 4876 v.reset(OpAMD64MOVLQSX) 4877 v.AddArg(x) 4878 return true 4879 } 4880 // match: (MOVLQSXload [off1] {sym1} (LEAQ [off2] {sym2} base) mem) 4881 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 4882 // result: (MOVLQSXload [off1+off2] {mergeSym(sym1,sym2)} base mem) 4883 for { 4884 off1 := v.AuxInt 4885 sym1 := v.Aux 4886 v_0 := v.Args[0] 4887 if v_0.Op != OpAMD64LEAQ { 4888 break 4889 } 4890 off2 := v_0.AuxInt 4891 sym2 := v_0.Aux 4892 base := v_0.Args[0] 4893 mem := v.Args[1] 4894 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 4895 break 4896 } 4897 v.reset(OpAMD64MOVLQSXload) 4898 v.AuxInt = off1 + off2 4899 v.Aux = mergeSym(sym1, sym2) 4900 v.AddArg(base) 4901 v.AddArg(mem) 4902 return true 4903 } 4904 return false 4905 } 4906 func rewriteValueAMD64_OpAMD64MOVLQZX(v *Value, config *Config) bool { 4907 b := v.Block 4908 _ = b 4909 // match: (MOVLQZX x:(MOVLload [off] {sym} ptr mem)) 4910 // cond: x.Uses == 1 && clobber(x) 4911 // result: @x.Block (MOVLload <v.Type> [off] {sym} ptr mem) 4912 for { 4913 x := v.Args[0] 4914 if x.Op != OpAMD64MOVLload { 4915 break 4916 } 4917 off := x.AuxInt 4918 sym := x.Aux 4919 ptr := x.Args[0] 4920 mem := x.Args[1] 4921 if !(x.Uses == 1 && clobber(x)) { 4922 break 4923 } 4924 b = x.Block 4925 v0 := b.NewValue0(v.Line, OpAMD64MOVLload, v.Type) 4926 v.reset(OpCopy) 4927 v.AddArg(v0) 4928 v0.AuxInt = off 4929 v0.Aux = sym 4930 v0.AddArg(ptr) 4931 v0.AddArg(mem) 4932 return true 4933 } 4934 // match: (MOVLQZX x:(MOVQload [off] {sym} ptr mem)) 4935 // cond: x.Uses == 1 && clobber(x) 4936 // result: @x.Block (MOVLload <v.Type> [off] {sym} ptr mem) 4937 for { 4938 x := v.Args[0] 4939 if x.Op != OpAMD64MOVQload { 4940 break 4941 } 4942 off := x.AuxInt 4943 sym := x.Aux 4944 ptr := x.Args[0] 4945 mem := x.Args[1] 4946 if !(x.Uses == 1 && clobber(x)) { 4947 break 4948 } 4949 b = x.Block 4950 v0 := b.NewValue0(v.Line, OpAMD64MOVLload, v.Type) 4951 v.reset(OpCopy) 4952 v.AddArg(v0) 4953 v0.AuxInt = off 4954 v0.Aux = sym 4955 v0.AddArg(ptr) 4956 v0.AddArg(mem) 4957 return true 4958 } 4959 // match: (MOVLQZX x:(MOVLloadidx1 [off] {sym} ptr idx mem)) 4960 // cond: x.Uses == 1 && clobber(x) 4961 // result: @x.Block (MOVLloadidx1 <v.Type> [off] {sym} ptr idx mem) 4962 for { 4963 x := v.Args[0] 4964 if x.Op != OpAMD64MOVLloadidx1 { 4965 break 4966 } 4967 off := x.AuxInt 4968 sym := x.Aux 4969 ptr := x.Args[0] 4970 idx := x.Args[1] 4971 mem := x.Args[2] 4972 if !(x.Uses == 1 && clobber(x)) { 4973 break 4974 } 4975 b = x.Block 4976 v0 := b.NewValue0(v.Line, OpAMD64MOVLloadidx1, v.Type) 4977 v.reset(OpCopy) 4978 v.AddArg(v0) 4979 v0.AuxInt = off 4980 v0.Aux = sym 4981 v0.AddArg(ptr) 4982 v0.AddArg(idx) 4983 v0.AddArg(mem) 4984 return true 4985 } 4986 // match: (MOVLQZX x:(MOVLloadidx4 [off] {sym} ptr idx mem)) 4987 // cond: x.Uses == 1 && clobber(x) 4988 // result: @x.Block (MOVLloadidx4 <v.Type> [off] {sym} ptr idx mem) 4989 for { 4990 x := v.Args[0] 4991 if x.Op != OpAMD64MOVLloadidx4 { 4992 break 4993 } 4994 off := x.AuxInt 4995 sym := x.Aux 4996 ptr := x.Args[0] 4997 idx := x.Args[1] 4998 mem := x.Args[2] 4999 if !(x.Uses == 1 && clobber(x)) { 5000 break 5001 } 5002 b = x.Block 5003 v0 := b.NewValue0(v.Line, OpAMD64MOVLloadidx4, v.Type) 5004 v.reset(OpCopy) 5005 v.AddArg(v0) 5006 v0.AuxInt = off 5007 v0.Aux = sym 5008 v0.AddArg(ptr) 5009 v0.AddArg(idx) 5010 v0.AddArg(mem) 5011 return true 5012 } 5013 // match: (MOVLQZX (ANDLconst [c] x)) 5014 // cond: 5015 // result: (ANDLconst [c] x) 5016 for { 5017 v_0 := v.Args[0] 5018 if v_0.Op != OpAMD64ANDLconst { 5019 break 5020 } 5021 c := v_0.AuxInt 5022 x := v_0.Args[0] 5023 v.reset(OpAMD64ANDLconst) 5024 v.AuxInt = c 5025 v.AddArg(x) 5026 return true 5027 } 5028 return false 5029 } 5030 func rewriteValueAMD64_OpAMD64MOVLatomicload(v *Value, config *Config) bool { 5031 b := v.Block 5032 _ = b 5033 // match: (MOVLatomicload [off1] {sym} (ADDQconst [off2] ptr) mem) 5034 // cond: is32Bit(off1+off2) 5035 // result: (MOVLatomicload [off1+off2] {sym} ptr mem) 5036 for { 5037 off1 := v.AuxInt 5038 sym := v.Aux 5039 v_0 := v.Args[0] 5040 if v_0.Op != OpAMD64ADDQconst { 5041 break 5042 } 5043 off2 := v_0.AuxInt 5044 ptr := v_0.Args[0] 5045 mem := v.Args[1] 5046 if !(is32Bit(off1 + off2)) { 5047 break 5048 } 5049 v.reset(OpAMD64MOVLatomicload) 5050 v.AuxInt = off1 + off2 5051 v.Aux = sym 5052 v.AddArg(ptr) 5053 v.AddArg(mem) 5054 return true 5055 } 5056 // match: (MOVLatomicload [off1] {sym1} (LEAQ [off2] {sym2} ptr) mem) 5057 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 5058 // result: (MOVLatomicload [off1+off2] {mergeSym(sym1,sym2)} ptr mem) 5059 for { 5060 off1 := v.AuxInt 5061 sym1 := v.Aux 5062 v_0 := v.Args[0] 5063 if v_0.Op != OpAMD64LEAQ { 5064 break 5065 } 5066 off2 := v_0.AuxInt 5067 sym2 := v_0.Aux 5068 ptr := v_0.Args[0] 5069 mem := v.Args[1] 5070 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 5071 break 5072 } 5073 v.reset(OpAMD64MOVLatomicload) 5074 v.AuxInt = off1 + off2 5075 v.Aux = mergeSym(sym1, sym2) 5076 v.AddArg(ptr) 5077 v.AddArg(mem) 5078 return true 5079 } 5080 return false 5081 } 5082 func rewriteValueAMD64_OpAMD64MOVLload(v *Value, config *Config) bool { 5083 b := v.Block 5084 _ = b 5085 // match: (MOVLload [off] {sym} ptr (MOVLstore [off2] {sym2} ptr2 x _)) 5086 // cond: sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) 5087 // result: (MOVLQZX x) 5088 for { 5089 off := v.AuxInt 5090 sym := v.Aux 5091 ptr := v.Args[0] 5092 v_1 := v.Args[1] 5093 if v_1.Op != OpAMD64MOVLstore { 5094 break 5095 } 5096 off2 := v_1.AuxInt 5097 sym2 := v_1.Aux 5098 ptr2 := v_1.Args[0] 5099 x := v_1.Args[1] 5100 if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) { 5101 break 5102 } 5103 v.reset(OpAMD64MOVLQZX) 5104 v.AddArg(x) 5105 return true 5106 } 5107 // match: (MOVLload [off1] {sym} (ADDQconst [off2] ptr) mem) 5108 // cond: is32Bit(off1+off2) 5109 // result: (MOVLload [off1+off2] {sym} ptr mem) 5110 for { 5111 off1 := v.AuxInt 5112 sym := v.Aux 5113 v_0 := v.Args[0] 5114 if v_0.Op != OpAMD64ADDQconst { 5115 break 5116 } 5117 off2 := v_0.AuxInt 5118 ptr := v_0.Args[0] 5119 mem := v.Args[1] 5120 if !(is32Bit(off1 + off2)) { 5121 break 5122 } 5123 v.reset(OpAMD64MOVLload) 5124 v.AuxInt = off1 + off2 5125 v.Aux = sym 5126 v.AddArg(ptr) 5127 v.AddArg(mem) 5128 return true 5129 } 5130 // match: (MOVLload [off1] {sym1} (LEAQ [off2] {sym2} base) mem) 5131 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 5132 // result: (MOVLload [off1+off2] {mergeSym(sym1,sym2)} base mem) 5133 for { 5134 off1 := v.AuxInt 5135 sym1 := v.Aux 5136 v_0 := v.Args[0] 5137 if v_0.Op != OpAMD64LEAQ { 5138 break 5139 } 5140 off2 := v_0.AuxInt 5141 sym2 := v_0.Aux 5142 base := v_0.Args[0] 5143 mem := v.Args[1] 5144 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 5145 break 5146 } 5147 v.reset(OpAMD64MOVLload) 5148 v.AuxInt = off1 + off2 5149 v.Aux = mergeSym(sym1, sym2) 5150 v.AddArg(base) 5151 v.AddArg(mem) 5152 return true 5153 } 5154 // match: (MOVLload [off1] {sym1} (LEAQ1 [off2] {sym2} ptr idx) mem) 5155 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 5156 // result: (MOVLloadidx1 [off1+off2] {mergeSym(sym1,sym2)} ptr idx mem) 5157 for { 5158 off1 := v.AuxInt 5159 sym1 := v.Aux 5160 v_0 := v.Args[0] 5161 if v_0.Op != OpAMD64LEAQ1 { 5162 break 5163 } 5164 off2 := v_0.AuxInt 5165 sym2 := v_0.Aux 5166 ptr := v_0.Args[0] 5167 idx := v_0.Args[1] 5168 mem := v.Args[1] 5169 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 5170 break 5171 } 5172 v.reset(OpAMD64MOVLloadidx1) 5173 v.AuxInt = off1 + off2 5174 v.Aux = mergeSym(sym1, sym2) 5175 v.AddArg(ptr) 5176 v.AddArg(idx) 5177 v.AddArg(mem) 5178 return true 5179 } 5180 // match: (MOVLload [off1] {sym1} (LEAQ4 [off2] {sym2} ptr idx) mem) 5181 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 5182 // result: (MOVLloadidx4 [off1+off2] {mergeSym(sym1,sym2)} ptr idx mem) 5183 for { 5184 off1 := v.AuxInt 5185 sym1 := v.Aux 5186 v_0 := v.Args[0] 5187 if v_0.Op != OpAMD64LEAQ4 { 5188 break 5189 } 5190 off2 := v_0.AuxInt 5191 sym2 := v_0.Aux 5192 ptr := v_0.Args[0] 5193 idx := v_0.Args[1] 5194 mem := v.Args[1] 5195 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 5196 break 5197 } 5198 v.reset(OpAMD64MOVLloadidx4) 5199 v.AuxInt = off1 + off2 5200 v.Aux = mergeSym(sym1, sym2) 5201 v.AddArg(ptr) 5202 v.AddArg(idx) 5203 v.AddArg(mem) 5204 return true 5205 } 5206 // match: (MOVLload [off] {sym} (ADDQ ptr idx) mem) 5207 // cond: ptr.Op != OpSB 5208 // result: (MOVLloadidx1 [off] {sym} ptr idx mem) 5209 for { 5210 off := v.AuxInt 5211 sym := v.Aux 5212 v_0 := v.Args[0] 5213 if v_0.Op != OpAMD64ADDQ { 5214 break 5215 } 5216 ptr := v_0.Args[0] 5217 idx := v_0.Args[1] 5218 mem := v.Args[1] 5219 if !(ptr.Op != OpSB) { 5220 break 5221 } 5222 v.reset(OpAMD64MOVLloadidx1) 5223 v.AuxInt = off 5224 v.Aux = sym 5225 v.AddArg(ptr) 5226 v.AddArg(idx) 5227 v.AddArg(mem) 5228 return true 5229 } 5230 // match: (MOVLload [off1] {sym1} (LEAL [off2] {sym2} base) mem) 5231 // cond: canMergeSym(sym1, sym2) 5232 // result: (MOVLload [off1+off2] {mergeSym(sym1,sym2)} base mem) 5233 for { 5234 off1 := v.AuxInt 5235 sym1 := v.Aux 5236 v_0 := v.Args[0] 5237 if v_0.Op != OpAMD64LEAL { 5238 break 5239 } 5240 off2 := v_0.AuxInt 5241 sym2 := v_0.Aux 5242 base := v_0.Args[0] 5243 mem := v.Args[1] 5244 if !(canMergeSym(sym1, sym2)) { 5245 break 5246 } 5247 v.reset(OpAMD64MOVLload) 5248 v.AuxInt = off1 + off2 5249 v.Aux = mergeSym(sym1, sym2) 5250 v.AddArg(base) 5251 v.AddArg(mem) 5252 return true 5253 } 5254 // match: (MOVLload [off1] {sym} (ADDLconst [off2] ptr) mem) 5255 // cond: is32Bit(off1+off2) 5256 // result: (MOVLload [off1+off2] {sym} ptr mem) 5257 for { 5258 off1 := v.AuxInt 5259 sym := v.Aux 5260 v_0 := v.Args[0] 5261 if v_0.Op != OpAMD64ADDLconst { 5262 break 5263 } 5264 off2 := v_0.AuxInt 5265 ptr := v_0.Args[0] 5266 mem := v.Args[1] 5267 if !(is32Bit(off1 + off2)) { 5268 break 5269 } 5270 v.reset(OpAMD64MOVLload) 5271 v.AuxInt = off1 + off2 5272 v.Aux = sym 5273 v.AddArg(ptr) 5274 v.AddArg(mem) 5275 return true 5276 } 5277 return false 5278 } 5279 func rewriteValueAMD64_OpAMD64MOVLloadidx1(v *Value, config *Config) bool { 5280 b := v.Block 5281 _ = b 5282 // match: (MOVLloadidx1 [c] {sym} ptr (SHLQconst [2] idx) mem) 5283 // cond: 5284 // result: (MOVLloadidx4 [c] {sym} ptr idx mem) 5285 for { 5286 c := v.AuxInt 5287 sym := v.Aux 5288 ptr := v.Args[0] 5289 v_1 := v.Args[1] 5290 if v_1.Op != OpAMD64SHLQconst { 5291 break 5292 } 5293 if v_1.AuxInt != 2 { 5294 break 5295 } 5296 idx := v_1.Args[0] 5297 mem := v.Args[2] 5298 v.reset(OpAMD64MOVLloadidx4) 5299 v.AuxInt = c 5300 v.Aux = sym 5301 v.AddArg(ptr) 5302 v.AddArg(idx) 5303 v.AddArg(mem) 5304 return true 5305 } 5306 // match: (MOVLloadidx1 [c] {sym} (ADDQconst [d] ptr) idx mem) 5307 // cond: 5308 // result: (MOVLloadidx1 [c+d] {sym} ptr idx mem) 5309 for { 5310 c := v.AuxInt 5311 sym := v.Aux 5312 v_0 := v.Args[0] 5313 if v_0.Op != OpAMD64ADDQconst { 5314 break 5315 } 5316 d := v_0.AuxInt 5317 ptr := v_0.Args[0] 5318 idx := v.Args[1] 5319 mem := v.Args[2] 5320 v.reset(OpAMD64MOVLloadidx1) 5321 v.AuxInt = c + d 5322 v.Aux = sym 5323 v.AddArg(ptr) 5324 v.AddArg(idx) 5325 v.AddArg(mem) 5326 return true 5327 } 5328 // match: (MOVLloadidx1 [c] {sym} ptr (ADDQconst [d] idx) mem) 5329 // cond: 5330 // result: (MOVLloadidx1 [c+d] {sym} ptr idx mem) 5331 for { 5332 c := v.AuxInt 5333 sym := v.Aux 5334 ptr := v.Args[0] 5335 v_1 := v.Args[1] 5336 if v_1.Op != OpAMD64ADDQconst { 5337 break 5338 } 5339 d := v_1.AuxInt 5340 idx := v_1.Args[0] 5341 mem := v.Args[2] 5342 v.reset(OpAMD64MOVLloadidx1) 5343 v.AuxInt = c + d 5344 v.Aux = sym 5345 v.AddArg(ptr) 5346 v.AddArg(idx) 5347 v.AddArg(mem) 5348 return true 5349 } 5350 return false 5351 } 5352 func rewriteValueAMD64_OpAMD64MOVLloadidx4(v *Value, config *Config) bool { 5353 b := v.Block 5354 _ = b 5355 // match: (MOVLloadidx4 [c] {sym} (ADDQconst [d] ptr) idx mem) 5356 // cond: 5357 // result: (MOVLloadidx4 [c+d] {sym} ptr idx mem) 5358 for { 5359 c := v.AuxInt 5360 sym := v.Aux 5361 v_0 := v.Args[0] 5362 if v_0.Op != OpAMD64ADDQconst { 5363 break 5364 } 5365 d := v_0.AuxInt 5366 ptr := v_0.Args[0] 5367 idx := v.Args[1] 5368 mem := v.Args[2] 5369 v.reset(OpAMD64MOVLloadidx4) 5370 v.AuxInt = c + d 5371 v.Aux = sym 5372 v.AddArg(ptr) 5373 v.AddArg(idx) 5374 v.AddArg(mem) 5375 return true 5376 } 5377 // match: (MOVLloadidx4 [c] {sym} ptr (ADDQconst [d] idx) mem) 5378 // cond: 5379 // result: (MOVLloadidx4 [c+4*d] {sym} ptr idx mem) 5380 for { 5381 c := v.AuxInt 5382 sym := v.Aux 5383 ptr := v.Args[0] 5384 v_1 := v.Args[1] 5385 if v_1.Op != OpAMD64ADDQconst { 5386 break 5387 } 5388 d := v_1.AuxInt 5389 idx := v_1.Args[0] 5390 mem := v.Args[2] 5391 v.reset(OpAMD64MOVLloadidx4) 5392 v.AuxInt = c + 4*d 5393 v.Aux = sym 5394 v.AddArg(ptr) 5395 v.AddArg(idx) 5396 v.AddArg(mem) 5397 return true 5398 } 5399 return false 5400 } 5401 func rewriteValueAMD64_OpAMD64MOVLstore(v *Value, config *Config) bool { 5402 b := v.Block 5403 _ = b 5404 // match: (MOVLstore [off] {sym} ptr (MOVLQSX x) mem) 5405 // cond: 5406 // result: (MOVLstore [off] {sym} ptr x mem) 5407 for { 5408 off := v.AuxInt 5409 sym := v.Aux 5410 ptr := v.Args[0] 5411 v_1 := v.Args[1] 5412 if v_1.Op != OpAMD64MOVLQSX { 5413 break 5414 } 5415 x := v_1.Args[0] 5416 mem := v.Args[2] 5417 v.reset(OpAMD64MOVLstore) 5418 v.AuxInt = off 5419 v.Aux = sym 5420 v.AddArg(ptr) 5421 v.AddArg(x) 5422 v.AddArg(mem) 5423 return true 5424 } 5425 // match: (MOVLstore [off] {sym} ptr (MOVLQZX x) mem) 5426 // cond: 5427 // result: (MOVLstore [off] {sym} ptr x mem) 5428 for { 5429 off := v.AuxInt 5430 sym := v.Aux 5431 ptr := v.Args[0] 5432 v_1 := v.Args[1] 5433 if v_1.Op != OpAMD64MOVLQZX { 5434 break 5435 } 5436 x := v_1.Args[0] 5437 mem := v.Args[2] 5438 v.reset(OpAMD64MOVLstore) 5439 v.AuxInt = off 5440 v.Aux = sym 5441 v.AddArg(ptr) 5442 v.AddArg(x) 5443 v.AddArg(mem) 5444 return true 5445 } 5446 // match: (MOVLstore [off1] {sym} (ADDQconst [off2] ptr) val mem) 5447 // cond: is32Bit(off1+off2) 5448 // result: (MOVLstore [off1+off2] {sym} ptr val mem) 5449 for { 5450 off1 := v.AuxInt 5451 sym := v.Aux 5452 v_0 := v.Args[0] 5453 if v_0.Op != OpAMD64ADDQconst { 5454 break 5455 } 5456 off2 := v_0.AuxInt 5457 ptr := v_0.Args[0] 5458 val := v.Args[1] 5459 mem := v.Args[2] 5460 if !(is32Bit(off1 + off2)) { 5461 break 5462 } 5463 v.reset(OpAMD64MOVLstore) 5464 v.AuxInt = off1 + off2 5465 v.Aux = sym 5466 v.AddArg(ptr) 5467 v.AddArg(val) 5468 v.AddArg(mem) 5469 return true 5470 } 5471 // match: (MOVLstore [off] {sym} ptr (MOVLconst [c]) mem) 5472 // cond: validOff(off) 5473 // result: (MOVLstoreconst [makeValAndOff(int64(int32(c)),off)] {sym} ptr mem) 5474 for { 5475 off := v.AuxInt 5476 sym := v.Aux 5477 ptr := v.Args[0] 5478 v_1 := v.Args[1] 5479 if v_1.Op != OpAMD64MOVLconst { 5480 break 5481 } 5482 c := v_1.AuxInt 5483 mem := v.Args[2] 5484 if !(validOff(off)) { 5485 break 5486 } 5487 v.reset(OpAMD64MOVLstoreconst) 5488 v.AuxInt = makeValAndOff(int64(int32(c)), off) 5489 v.Aux = sym 5490 v.AddArg(ptr) 5491 v.AddArg(mem) 5492 return true 5493 } 5494 // match: (MOVLstore [off1] {sym1} (LEAQ [off2] {sym2} base) val mem) 5495 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 5496 // result: (MOVLstore [off1+off2] {mergeSym(sym1,sym2)} base val mem) 5497 for { 5498 off1 := v.AuxInt 5499 sym1 := v.Aux 5500 v_0 := v.Args[0] 5501 if v_0.Op != OpAMD64LEAQ { 5502 break 5503 } 5504 off2 := v_0.AuxInt 5505 sym2 := v_0.Aux 5506 base := v_0.Args[0] 5507 val := v.Args[1] 5508 mem := v.Args[2] 5509 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 5510 break 5511 } 5512 v.reset(OpAMD64MOVLstore) 5513 v.AuxInt = off1 + off2 5514 v.Aux = mergeSym(sym1, sym2) 5515 v.AddArg(base) 5516 v.AddArg(val) 5517 v.AddArg(mem) 5518 return true 5519 } 5520 // match: (MOVLstore [off1] {sym1} (LEAQ1 [off2] {sym2} ptr idx) val mem) 5521 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 5522 // result: (MOVLstoreidx1 [off1+off2] {mergeSym(sym1,sym2)} ptr idx val mem) 5523 for { 5524 off1 := v.AuxInt 5525 sym1 := v.Aux 5526 v_0 := v.Args[0] 5527 if v_0.Op != OpAMD64LEAQ1 { 5528 break 5529 } 5530 off2 := v_0.AuxInt 5531 sym2 := v_0.Aux 5532 ptr := v_0.Args[0] 5533 idx := v_0.Args[1] 5534 val := v.Args[1] 5535 mem := v.Args[2] 5536 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 5537 break 5538 } 5539 v.reset(OpAMD64MOVLstoreidx1) 5540 v.AuxInt = off1 + off2 5541 v.Aux = mergeSym(sym1, sym2) 5542 v.AddArg(ptr) 5543 v.AddArg(idx) 5544 v.AddArg(val) 5545 v.AddArg(mem) 5546 return true 5547 } 5548 // match: (MOVLstore [off1] {sym1} (LEAQ4 [off2] {sym2} ptr idx) val mem) 5549 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 5550 // result: (MOVLstoreidx4 [off1+off2] {mergeSym(sym1,sym2)} ptr idx val mem) 5551 for { 5552 off1 := v.AuxInt 5553 sym1 := v.Aux 5554 v_0 := v.Args[0] 5555 if v_0.Op != OpAMD64LEAQ4 { 5556 break 5557 } 5558 off2 := v_0.AuxInt 5559 sym2 := v_0.Aux 5560 ptr := v_0.Args[0] 5561 idx := v_0.Args[1] 5562 val := v.Args[1] 5563 mem := v.Args[2] 5564 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 5565 break 5566 } 5567 v.reset(OpAMD64MOVLstoreidx4) 5568 v.AuxInt = off1 + off2 5569 v.Aux = mergeSym(sym1, sym2) 5570 v.AddArg(ptr) 5571 v.AddArg(idx) 5572 v.AddArg(val) 5573 v.AddArg(mem) 5574 return true 5575 } 5576 // match: (MOVLstore [off] {sym} (ADDQ ptr idx) val mem) 5577 // cond: ptr.Op != OpSB 5578 // result: (MOVLstoreidx1 [off] {sym} ptr idx val mem) 5579 for { 5580 off := v.AuxInt 5581 sym := v.Aux 5582 v_0 := v.Args[0] 5583 if v_0.Op != OpAMD64ADDQ { 5584 break 5585 } 5586 ptr := v_0.Args[0] 5587 idx := v_0.Args[1] 5588 val := v.Args[1] 5589 mem := v.Args[2] 5590 if !(ptr.Op != OpSB) { 5591 break 5592 } 5593 v.reset(OpAMD64MOVLstoreidx1) 5594 v.AuxInt = off 5595 v.Aux = sym 5596 v.AddArg(ptr) 5597 v.AddArg(idx) 5598 v.AddArg(val) 5599 v.AddArg(mem) 5600 return true 5601 } 5602 // match: (MOVLstore [i] {s} p (SHRQconst [32] w) x:(MOVLstore [i-4] {s} p w mem)) 5603 // cond: x.Uses == 1 && clobber(x) 5604 // result: (MOVQstore [i-4] {s} p w mem) 5605 for { 5606 i := v.AuxInt 5607 s := v.Aux 5608 p := v.Args[0] 5609 v_1 := v.Args[1] 5610 if v_1.Op != OpAMD64SHRQconst { 5611 break 5612 } 5613 if v_1.AuxInt != 32 { 5614 break 5615 } 5616 w := v_1.Args[0] 5617 x := v.Args[2] 5618 if x.Op != OpAMD64MOVLstore { 5619 break 5620 } 5621 if x.AuxInt != i-4 { 5622 break 5623 } 5624 if x.Aux != s { 5625 break 5626 } 5627 if p != x.Args[0] { 5628 break 5629 } 5630 if w != x.Args[1] { 5631 break 5632 } 5633 mem := x.Args[2] 5634 if !(x.Uses == 1 && clobber(x)) { 5635 break 5636 } 5637 v.reset(OpAMD64MOVQstore) 5638 v.AuxInt = i - 4 5639 v.Aux = s 5640 v.AddArg(p) 5641 v.AddArg(w) 5642 v.AddArg(mem) 5643 return true 5644 } 5645 // match: (MOVLstore [i] {s} p (SHRQconst [j] w) x:(MOVLstore [i-4] {s} p w0:(SHRQconst [j-32] w) mem)) 5646 // cond: x.Uses == 1 && clobber(x) 5647 // result: (MOVQstore [i-4] {s} p w0 mem) 5648 for { 5649 i := v.AuxInt 5650 s := v.Aux 5651 p := v.Args[0] 5652 v_1 := v.Args[1] 5653 if v_1.Op != OpAMD64SHRQconst { 5654 break 5655 } 5656 j := v_1.AuxInt 5657 w := v_1.Args[0] 5658 x := v.Args[2] 5659 if x.Op != OpAMD64MOVLstore { 5660 break 5661 } 5662 if x.AuxInt != i-4 { 5663 break 5664 } 5665 if x.Aux != s { 5666 break 5667 } 5668 if p != x.Args[0] { 5669 break 5670 } 5671 w0 := x.Args[1] 5672 if w0.Op != OpAMD64SHRQconst { 5673 break 5674 } 5675 if w0.AuxInt != j-32 { 5676 break 5677 } 5678 if w != w0.Args[0] { 5679 break 5680 } 5681 mem := x.Args[2] 5682 if !(x.Uses == 1 && clobber(x)) { 5683 break 5684 } 5685 v.reset(OpAMD64MOVQstore) 5686 v.AuxInt = i - 4 5687 v.Aux = s 5688 v.AddArg(p) 5689 v.AddArg(w0) 5690 v.AddArg(mem) 5691 return true 5692 } 5693 // match: (MOVLstore [off1] {sym1} (LEAL [off2] {sym2} base) val mem) 5694 // cond: canMergeSym(sym1, sym2) 5695 // result: (MOVLstore [off1+off2] {mergeSym(sym1,sym2)} base val mem) 5696 for { 5697 off1 := v.AuxInt 5698 sym1 := v.Aux 5699 v_0 := v.Args[0] 5700 if v_0.Op != OpAMD64LEAL { 5701 break 5702 } 5703 off2 := v_0.AuxInt 5704 sym2 := v_0.Aux 5705 base := v_0.Args[0] 5706 val := v.Args[1] 5707 mem := v.Args[2] 5708 if !(canMergeSym(sym1, sym2)) { 5709 break 5710 } 5711 v.reset(OpAMD64MOVLstore) 5712 v.AuxInt = off1 + off2 5713 v.Aux = mergeSym(sym1, sym2) 5714 v.AddArg(base) 5715 v.AddArg(val) 5716 v.AddArg(mem) 5717 return true 5718 } 5719 // match: (MOVLstore [off1] {sym} (ADDLconst [off2] ptr) val mem) 5720 // cond: is32Bit(off1+off2) 5721 // result: (MOVLstore [off1+off2] {sym} ptr val mem) 5722 for { 5723 off1 := v.AuxInt 5724 sym := v.Aux 5725 v_0 := v.Args[0] 5726 if v_0.Op != OpAMD64ADDLconst { 5727 break 5728 } 5729 off2 := v_0.AuxInt 5730 ptr := v_0.Args[0] 5731 val := v.Args[1] 5732 mem := v.Args[2] 5733 if !(is32Bit(off1 + off2)) { 5734 break 5735 } 5736 v.reset(OpAMD64MOVLstore) 5737 v.AuxInt = off1 + off2 5738 v.Aux = sym 5739 v.AddArg(ptr) 5740 v.AddArg(val) 5741 v.AddArg(mem) 5742 return true 5743 } 5744 return false 5745 } 5746 func rewriteValueAMD64_OpAMD64MOVLstoreconst(v *Value, config *Config) bool { 5747 b := v.Block 5748 _ = b 5749 // match: (MOVLstoreconst [sc] {s} (ADDQconst [off] ptr) mem) 5750 // cond: ValAndOff(sc).canAdd(off) 5751 // result: (MOVLstoreconst [ValAndOff(sc).add(off)] {s} ptr mem) 5752 for { 5753 sc := v.AuxInt 5754 s := v.Aux 5755 v_0 := v.Args[0] 5756 if v_0.Op != OpAMD64ADDQconst { 5757 break 5758 } 5759 off := v_0.AuxInt 5760 ptr := v_0.Args[0] 5761 mem := v.Args[1] 5762 if !(ValAndOff(sc).canAdd(off)) { 5763 break 5764 } 5765 v.reset(OpAMD64MOVLstoreconst) 5766 v.AuxInt = ValAndOff(sc).add(off) 5767 v.Aux = s 5768 v.AddArg(ptr) 5769 v.AddArg(mem) 5770 return true 5771 } 5772 // match: (MOVLstoreconst [sc] {sym1} (LEAQ [off] {sym2} ptr) mem) 5773 // cond: canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off) 5774 // result: (MOVLstoreconst [ValAndOff(sc).add(off)] {mergeSym(sym1, sym2)} ptr mem) 5775 for { 5776 sc := v.AuxInt 5777 sym1 := v.Aux 5778 v_0 := v.Args[0] 5779 if v_0.Op != OpAMD64LEAQ { 5780 break 5781 } 5782 off := v_0.AuxInt 5783 sym2 := v_0.Aux 5784 ptr := v_0.Args[0] 5785 mem := v.Args[1] 5786 if !(canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off)) { 5787 break 5788 } 5789 v.reset(OpAMD64MOVLstoreconst) 5790 v.AuxInt = ValAndOff(sc).add(off) 5791 v.Aux = mergeSym(sym1, sym2) 5792 v.AddArg(ptr) 5793 v.AddArg(mem) 5794 return true 5795 } 5796 // match: (MOVLstoreconst [x] {sym1} (LEAQ1 [off] {sym2} ptr idx) mem) 5797 // cond: canMergeSym(sym1, sym2) 5798 // result: (MOVLstoreconstidx1 [ValAndOff(x).add(off)] {mergeSym(sym1,sym2)} ptr idx mem) 5799 for { 5800 x := v.AuxInt 5801 sym1 := v.Aux 5802 v_0 := v.Args[0] 5803 if v_0.Op != OpAMD64LEAQ1 { 5804 break 5805 } 5806 off := v_0.AuxInt 5807 sym2 := v_0.Aux 5808 ptr := v_0.Args[0] 5809 idx := v_0.Args[1] 5810 mem := v.Args[1] 5811 if !(canMergeSym(sym1, sym2)) { 5812 break 5813 } 5814 v.reset(OpAMD64MOVLstoreconstidx1) 5815 v.AuxInt = ValAndOff(x).add(off) 5816 v.Aux = mergeSym(sym1, sym2) 5817 v.AddArg(ptr) 5818 v.AddArg(idx) 5819 v.AddArg(mem) 5820 return true 5821 } 5822 // match: (MOVLstoreconst [x] {sym1} (LEAQ4 [off] {sym2} ptr idx) mem) 5823 // cond: canMergeSym(sym1, sym2) 5824 // result: (MOVLstoreconstidx4 [ValAndOff(x).add(off)] {mergeSym(sym1,sym2)} ptr idx mem) 5825 for { 5826 x := v.AuxInt 5827 sym1 := v.Aux 5828 v_0 := v.Args[0] 5829 if v_0.Op != OpAMD64LEAQ4 { 5830 break 5831 } 5832 off := v_0.AuxInt 5833 sym2 := v_0.Aux 5834 ptr := v_0.Args[0] 5835 idx := v_0.Args[1] 5836 mem := v.Args[1] 5837 if !(canMergeSym(sym1, sym2)) { 5838 break 5839 } 5840 v.reset(OpAMD64MOVLstoreconstidx4) 5841 v.AuxInt = ValAndOff(x).add(off) 5842 v.Aux = mergeSym(sym1, sym2) 5843 v.AddArg(ptr) 5844 v.AddArg(idx) 5845 v.AddArg(mem) 5846 return true 5847 } 5848 // match: (MOVLstoreconst [x] {sym} (ADDQ ptr idx) mem) 5849 // cond: 5850 // result: (MOVLstoreconstidx1 [x] {sym} ptr idx mem) 5851 for { 5852 x := v.AuxInt 5853 sym := v.Aux 5854 v_0 := v.Args[0] 5855 if v_0.Op != OpAMD64ADDQ { 5856 break 5857 } 5858 ptr := v_0.Args[0] 5859 idx := v_0.Args[1] 5860 mem := v.Args[1] 5861 v.reset(OpAMD64MOVLstoreconstidx1) 5862 v.AuxInt = x 5863 v.Aux = sym 5864 v.AddArg(ptr) 5865 v.AddArg(idx) 5866 v.AddArg(mem) 5867 return true 5868 } 5869 // match: (MOVLstoreconst [c] {s} p x:(MOVLstoreconst [a] {s} p mem)) 5870 // cond: x.Uses == 1 && ValAndOff(a).Off() + 4 == ValAndOff(c).Off() && clobber(x) 5871 // result: (MOVQstore [ValAndOff(a).Off()] {s} p (MOVQconst [ValAndOff(a).Val()&0xffffffff | ValAndOff(c).Val()<<32]) mem) 5872 for { 5873 c := v.AuxInt 5874 s := v.Aux 5875 p := v.Args[0] 5876 x := v.Args[1] 5877 if x.Op != OpAMD64MOVLstoreconst { 5878 break 5879 } 5880 a := x.AuxInt 5881 if x.Aux != s { 5882 break 5883 } 5884 if p != x.Args[0] { 5885 break 5886 } 5887 mem := x.Args[1] 5888 if !(x.Uses == 1 && ValAndOff(a).Off()+4 == ValAndOff(c).Off() && clobber(x)) { 5889 break 5890 } 5891 v.reset(OpAMD64MOVQstore) 5892 v.AuxInt = ValAndOff(a).Off() 5893 v.Aux = s 5894 v.AddArg(p) 5895 v0 := b.NewValue0(v.Line, OpAMD64MOVQconst, config.fe.TypeUInt64()) 5896 v0.AuxInt = ValAndOff(a).Val()&0xffffffff | ValAndOff(c).Val()<<32 5897 v.AddArg(v0) 5898 v.AddArg(mem) 5899 return true 5900 } 5901 // match: (MOVLstoreconst [sc] {sym1} (LEAL [off] {sym2} ptr) mem) 5902 // cond: canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off) 5903 // result: (MOVLstoreconst [ValAndOff(sc).add(off)] {mergeSym(sym1, sym2)} ptr mem) 5904 for { 5905 sc := v.AuxInt 5906 sym1 := v.Aux 5907 v_0 := v.Args[0] 5908 if v_0.Op != OpAMD64LEAL { 5909 break 5910 } 5911 off := v_0.AuxInt 5912 sym2 := v_0.Aux 5913 ptr := v_0.Args[0] 5914 mem := v.Args[1] 5915 if !(canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off)) { 5916 break 5917 } 5918 v.reset(OpAMD64MOVLstoreconst) 5919 v.AuxInt = ValAndOff(sc).add(off) 5920 v.Aux = mergeSym(sym1, sym2) 5921 v.AddArg(ptr) 5922 v.AddArg(mem) 5923 return true 5924 } 5925 // match: (MOVLstoreconst [sc] {s} (ADDLconst [off] ptr) mem) 5926 // cond: ValAndOff(sc).canAdd(off) 5927 // result: (MOVLstoreconst [ValAndOff(sc).add(off)] {s} ptr mem) 5928 for { 5929 sc := v.AuxInt 5930 s := v.Aux 5931 v_0 := v.Args[0] 5932 if v_0.Op != OpAMD64ADDLconst { 5933 break 5934 } 5935 off := v_0.AuxInt 5936 ptr := v_0.Args[0] 5937 mem := v.Args[1] 5938 if !(ValAndOff(sc).canAdd(off)) { 5939 break 5940 } 5941 v.reset(OpAMD64MOVLstoreconst) 5942 v.AuxInt = ValAndOff(sc).add(off) 5943 v.Aux = s 5944 v.AddArg(ptr) 5945 v.AddArg(mem) 5946 return true 5947 } 5948 return false 5949 } 5950 func rewriteValueAMD64_OpAMD64MOVLstoreconstidx1(v *Value, config *Config) bool { 5951 b := v.Block 5952 _ = b 5953 // match: (MOVLstoreconstidx1 [c] {sym} ptr (SHLQconst [2] idx) mem) 5954 // cond: 5955 // result: (MOVLstoreconstidx4 [c] {sym} ptr idx mem) 5956 for { 5957 c := v.AuxInt 5958 sym := v.Aux 5959 ptr := v.Args[0] 5960 v_1 := v.Args[1] 5961 if v_1.Op != OpAMD64SHLQconst { 5962 break 5963 } 5964 if v_1.AuxInt != 2 { 5965 break 5966 } 5967 idx := v_1.Args[0] 5968 mem := v.Args[2] 5969 v.reset(OpAMD64MOVLstoreconstidx4) 5970 v.AuxInt = c 5971 v.Aux = sym 5972 v.AddArg(ptr) 5973 v.AddArg(idx) 5974 v.AddArg(mem) 5975 return true 5976 } 5977 // match: (MOVLstoreconstidx1 [x] {sym} (ADDQconst [c] ptr) idx mem) 5978 // cond: 5979 // result: (MOVLstoreconstidx1 [ValAndOff(x).add(c)] {sym} ptr idx mem) 5980 for { 5981 x := v.AuxInt 5982 sym := v.Aux 5983 v_0 := v.Args[0] 5984 if v_0.Op != OpAMD64ADDQconst { 5985 break 5986 } 5987 c := v_0.AuxInt 5988 ptr := v_0.Args[0] 5989 idx := v.Args[1] 5990 mem := v.Args[2] 5991 v.reset(OpAMD64MOVLstoreconstidx1) 5992 v.AuxInt = ValAndOff(x).add(c) 5993 v.Aux = sym 5994 v.AddArg(ptr) 5995 v.AddArg(idx) 5996 v.AddArg(mem) 5997 return true 5998 } 5999 // match: (MOVLstoreconstidx1 [x] {sym} ptr (ADDQconst [c] idx) mem) 6000 // cond: 6001 // result: (MOVLstoreconstidx1 [ValAndOff(x).add(c)] {sym} ptr idx mem) 6002 for { 6003 x := v.AuxInt 6004 sym := v.Aux 6005 ptr := v.Args[0] 6006 v_1 := v.Args[1] 6007 if v_1.Op != OpAMD64ADDQconst { 6008 break 6009 } 6010 c := v_1.AuxInt 6011 idx := v_1.Args[0] 6012 mem := v.Args[2] 6013 v.reset(OpAMD64MOVLstoreconstidx1) 6014 v.AuxInt = ValAndOff(x).add(c) 6015 v.Aux = sym 6016 v.AddArg(ptr) 6017 v.AddArg(idx) 6018 v.AddArg(mem) 6019 return true 6020 } 6021 // match: (MOVLstoreconstidx1 [c] {s} p i x:(MOVLstoreconstidx1 [a] {s} p i mem)) 6022 // cond: x.Uses == 1 && ValAndOff(a).Off() + 4 == ValAndOff(c).Off() && clobber(x) 6023 // result: (MOVQstoreidx1 [ValAndOff(a).Off()] {s} p i (MOVQconst [ValAndOff(a).Val()&0xffffffff | ValAndOff(c).Val()<<32]) mem) 6024 for { 6025 c := v.AuxInt 6026 s := v.Aux 6027 p := v.Args[0] 6028 i := v.Args[1] 6029 x := v.Args[2] 6030 if x.Op != OpAMD64MOVLstoreconstidx1 { 6031 break 6032 } 6033 a := x.AuxInt 6034 if x.Aux != s { 6035 break 6036 } 6037 if p != x.Args[0] { 6038 break 6039 } 6040 if i != x.Args[1] { 6041 break 6042 } 6043 mem := x.Args[2] 6044 if !(x.Uses == 1 && ValAndOff(a).Off()+4 == ValAndOff(c).Off() && clobber(x)) { 6045 break 6046 } 6047 v.reset(OpAMD64MOVQstoreidx1) 6048 v.AuxInt = ValAndOff(a).Off() 6049 v.Aux = s 6050 v.AddArg(p) 6051 v.AddArg(i) 6052 v0 := b.NewValue0(v.Line, OpAMD64MOVQconst, config.fe.TypeUInt64()) 6053 v0.AuxInt = ValAndOff(a).Val()&0xffffffff | ValAndOff(c).Val()<<32 6054 v.AddArg(v0) 6055 v.AddArg(mem) 6056 return true 6057 } 6058 return false 6059 } 6060 func rewriteValueAMD64_OpAMD64MOVLstoreconstidx4(v *Value, config *Config) bool { 6061 b := v.Block 6062 _ = b 6063 // match: (MOVLstoreconstidx4 [x] {sym} (ADDQconst [c] ptr) idx mem) 6064 // cond: 6065 // result: (MOVLstoreconstidx4 [ValAndOff(x).add(c)] {sym} ptr idx mem) 6066 for { 6067 x := v.AuxInt 6068 sym := v.Aux 6069 v_0 := v.Args[0] 6070 if v_0.Op != OpAMD64ADDQconst { 6071 break 6072 } 6073 c := v_0.AuxInt 6074 ptr := v_0.Args[0] 6075 idx := v.Args[1] 6076 mem := v.Args[2] 6077 v.reset(OpAMD64MOVLstoreconstidx4) 6078 v.AuxInt = ValAndOff(x).add(c) 6079 v.Aux = sym 6080 v.AddArg(ptr) 6081 v.AddArg(idx) 6082 v.AddArg(mem) 6083 return true 6084 } 6085 // match: (MOVLstoreconstidx4 [x] {sym} ptr (ADDQconst [c] idx) mem) 6086 // cond: 6087 // result: (MOVLstoreconstidx4 [ValAndOff(x).add(4*c)] {sym} ptr idx mem) 6088 for { 6089 x := v.AuxInt 6090 sym := v.Aux 6091 ptr := v.Args[0] 6092 v_1 := v.Args[1] 6093 if v_1.Op != OpAMD64ADDQconst { 6094 break 6095 } 6096 c := v_1.AuxInt 6097 idx := v_1.Args[0] 6098 mem := v.Args[2] 6099 v.reset(OpAMD64MOVLstoreconstidx4) 6100 v.AuxInt = ValAndOff(x).add(4 * c) 6101 v.Aux = sym 6102 v.AddArg(ptr) 6103 v.AddArg(idx) 6104 v.AddArg(mem) 6105 return true 6106 } 6107 // match: (MOVLstoreconstidx4 [c] {s} p i x:(MOVLstoreconstidx4 [a] {s} p i mem)) 6108 // cond: x.Uses == 1 && ValAndOff(a).Off() + 4 == ValAndOff(c).Off() && clobber(x) 6109 // result: (MOVQstoreidx1 [ValAndOff(a).Off()] {s} p (SHLQconst <i.Type> [2] i) (MOVQconst [ValAndOff(a).Val()&0xffffffff | ValAndOff(c).Val()<<32]) mem) 6110 for { 6111 c := v.AuxInt 6112 s := v.Aux 6113 p := v.Args[0] 6114 i := v.Args[1] 6115 x := v.Args[2] 6116 if x.Op != OpAMD64MOVLstoreconstidx4 { 6117 break 6118 } 6119 a := x.AuxInt 6120 if x.Aux != s { 6121 break 6122 } 6123 if p != x.Args[0] { 6124 break 6125 } 6126 if i != x.Args[1] { 6127 break 6128 } 6129 mem := x.Args[2] 6130 if !(x.Uses == 1 && ValAndOff(a).Off()+4 == ValAndOff(c).Off() && clobber(x)) { 6131 break 6132 } 6133 v.reset(OpAMD64MOVQstoreidx1) 6134 v.AuxInt = ValAndOff(a).Off() 6135 v.Aux = s 6136 v.AddArg(p) 6137 v0 := b.NewValue0(v.Line, OpAMD64SHLQconst, i.Type) 6138 v0.AuxInt = 2 6139 v0.AddArg(i) 6140 v.AddArg(v0) 6141 v1 := b.NewValue0(v.Line, OpAMD64MOVQconst, config.fe.TypeUInt64()) 6142 v1.AuxInt = ValAndOff(a).Val()&0xffffffff | ValAndOff(c).Val()<<32 6143 v.AddArg(v1) 6144 v.AddArg(mem) 6145 return true 6146 } 6147 return false 6148 } 6149 func rewriteValueAMD64_OpAMD64MOVLstoreidx1(v *Value, config *Config) bool { 6150 b := v.Block 6151 _ = b 6152 // match: (MOVLstoreidx1 [c] {sym} ptr (SHLQconst [2] idx) val mem) 6153 // cond: 6154 // result: (MOVLstoreidx4 [c] {sym} ptr idx val mem) 6155 for { 6156 c := v.AuxInt 6157 sym := v.Aux 6158 ptr := v.Args[0] 6159 v_1 := v.Args[1] 6160 if v_1.Op != OpAMD64SHLQconst { 6161 break 6162 } 6163 if v_1.AuxInt != 2 { 6164 break 6165 } 6166 idx := v_1.Args[0] 6167 val := v.Args[2] 6168 mem := v.Args[3] 6169 v.reset(OpAMD64MOVLstoreidx4) 6170 v.AuxInt = c 6171 v.Aux = sym 6172 v.AddArg(ptr) 6173 v.AddArg(idx) 6174 v.AddArg(val) 6175 v.AddArg(mem) 6176 return true 6177 } 6178 // match: (MOVLstoreidx1 [c] {sym} (ADDQconst [d] ptr) idx val mem) 6179 // cond: 6180 // result: (MOVLstoreidx1 [c+d] {sym} ptr idx val mem) 6181 for { 6182 c := v.AuxInt 6183 sym := v.Aux 6184 v_0 := v.Args[0] 6185 if v_0.Op != OpAMD64ADDQconst { 6186 break 6187 } 6188 d := v_0.AuxInt 6189 ptr := v_0.Args[0] 6190 idx := v.Args[1] 6191 val := v.Args[2] 6192 mem := v.Args[3] 6193 v.reset(OpAMD64MOVLstoreidx1) 6194 v.AuxInt = c + d 6195 v.Aux = sym 6196 v.AddArg(ptr) 6197 v.AddArg(idx) 6198 v.AddArg(val) 6199 v.AddArg(mem) 6200 return true 6201 } 6202 // match: (MOVLstoreidx1 [c] {sym} ptr (ADDQconst [d] idx) val mem) 6203 // cond: 6204 // result: (MOVLstoreidx1 [c+d] {sym} ptr idx val mem) 6205 for { 6206 c := v.AuxInt 6207 sym := v.Aux 6208 ptr := v.Args[0] 6209 v_1 := v.Args[1] 6210 if v_1.Op != OpAMD64ADDQconst { 6211 break 6212 } 6213 d := v_1.AuxInt 6214 idx := v_1.Args[0] 6215 val := v.Args[2] 6216 mem := v.Args[3] 6217 v.reset(OpAMD64MOVLstoreidx1) 6218 v.AuxInt = c + d 6219 v.Aux = sym 6220 v.AddArg(ptr) 6221 v.AddArg(idx) 6222 v.AddArg(val) 6223 v.AddArg(mem) 6224 return true 6225 } 6226 // match: (MOVLstoreidx1 [i] {s} p idx (SHRQconst [32] w) x:(MOVLstoreidx1 [i-4] {s} p idx w mem)) 6227 // cond: x.Uses == 1 && clobber(x) 6228 // result: (MOVQstoreidx1 [i-4] {s} p idx w mem) 6229 for { 6230 i := v.AuxInt 6231 s := v.Aux 6232 p := v.Args[0] 6233 idx := v.Args[1] 6234 v_2 := v.Args[2] 6235 if v_2.Op != OpAMD64SHRQconst { 6236 break 6237 } 6238 if v_2.AuxInt != 32 { 6239 break 6240 } 6241 w := v_2.Args[0] 6242 x := v.Args[3] 6243 if x.Op != OpAMD64MOVLstoreidx1 { 6244 break 6245 } 6246 if x.AuxInt != i-4 { 6247 break 6248 } 6249 if x.Aux != s { 6250 break 6251 } 6252 if p != x.Args[0] { 6253 break 6254 } 6255 if idx != x.Args[1] { 6256 break 6257 } 6258 if w != x.Args[2] { 6259 break 6260 } 6261 mem := x.Args[3] 6262 if !(x.Uses == 1 && clobber(x)) { 6263 break 6264 } 6265 v.reset(OpAMD64MOVQstoreidx1) 6266 v.AuxInt = i - 4 6267 v.Aux = s 6268 v.AddArg(p) 6269 v.AddArg(idx) 6270 v.AddArg(w) 6271 v.AddArg(mem) 6272 return true 6273 } 6274 // match: (MOVLstoreidx1 [i] {s} p idx (SHRQconst [j] w) x:(MOVLstoreidx1 [i-4] {s} p idx w0:(SHRQconst [j-32] w) mem)) 6275 // cond: x.Uses == 1 && clobber(x) 6276 // result: (MOVQstoreidx1 [i-4] {s} p idx w0 mem) 6277 for { 6278 i := v.AuxInt 6279 s := v.Aux 6280 p := v.Args[0] 6281 idx := v.Args[1] 6282 v_2 := v.Args[2] 6283 if v_2.Op != OpAMD64SHRQconst { 6284 break 6285 } 6286 j := v_2.AuxInt 6287 w := v_2.Args[0] 6288 x := v.Args[3] 6289 if x.Op != OpAMD64MOVLstoreidx1 { 6290 break 6291 } 6292 if x.AuxInt != i-4 { 6293 break 6294 } 6295 if x.Aux != s { 6296 break 6297 } 6298 if p != x.Args[0] { 6299 break 6300 } 6301 if idx != x.Args[1] { 6302 break 6303 } 6304 w0 := x.Args[2] 6305 if w0.Op != OpAMD64SHRQconst { 6306 break 6307 } 6308 if w0.AuxInt != j-32 { 6309 break 6310 } 6311 if w != w0.Args[0] { 6312 break 6313 } 6314 mem := x.Args[3] 6315 if !(x.Uses == 1 && clobber(x)) { 6316 break 6317 } 6318 v.reset(OpAMD64MOVQstoreidx1) 6319 v.AuxInt = i - 4 6320 v.Aux = s 6321 v.AddArg(p) 6322 v.AddArg(idx) 6323 v.AddArg(w0) 6324 v.AddArg(mem) 6325 return true 6326 } 6327 return false 6328 } 6329 func rewriteValueAMD64_OpAMD64MOVLstoreidx4(v *Value, config *Config) bool { 6330 b := v.Block 6331 _ = b 6332 // match: (MOVLstoreidx4 [c] {sym} (ADDQconst [d] ptr) idx val mem) 6333 // cond: 6334 // result: (MOVLstoreidx4 [c+d] {sym} ptr idx val mem) 6335 for { 6336 c := v.AuxInt 6337 sym := v.Aux 6338 v_0 := v.Args[0] 6339 if v_0.Op != OpAMD64ADDQconst { 6340 break 6341 } 6342 d := v_0.AuxInt 6343 ptr := v_0.Args[0] 6344 idx := v.Args[1] 6345 val := v.Args[2] 6346 mem := v.Args[3] 6347 v.reset(OpAMD64MOVLstoreidx4) 6348 v.AuxInt = c + d 6349 v.Aux = sym 6350 v.AddArg(ptr) 6351 v.AddArg(idx) 6352 v.AddArg(val) 6353 v.AddArg(mem) 6354 return true 6355 } 6356 // match: (MOVLstoreidx4 [c] {sym} ptr (ADDQconst [d] idx) val mem) 6357 // cond: 6358 // result: (MOVLstoreidx4 [c+4*d] {sym} ptr idx val mem) 6359 for { 6360 c := v.AuxInt 6361 sym := v.Aux 6362 ptr := v.Args[0] 6363 v_1 := v.Args[1] 6364 if v_1.Op != OpAMD64ADDQconst { 6365 break 6366 } 6367 d := v_1.AuxInt 6368 idx := v_1.Args[0] 6369 val := v.Args[2] 6370 mem := v.Args[3] 6371 v.reset(OpAMD64MOVLstoreidx4) 6372 v.AuxInt = c + 4*d 6373 v.Aux = sym 6374 v.AddArg(ptr) 6375 v.AddArg(idx) 6376 v.AddArg(val) 6377 v.AddArg(mem) 6378 return true 6379 } 6380 // match: (MOVLstoreidx4 [i] {s} p idx (SHRQconst [32] w) x:(MOVLstoreidx4 [i-4] {s} p idx w mem)) 6381 // cond: x.Uses == 1 && clobber(x) 6382 // result: (MOVQstoreidx1 [i-4] {s} p (SHLQconst <idx.Type> [2] idx) w mem) 6383 for { 6384 i := v.AuxInt 6385 s := v.Aux 6386 p := v.Args[0] 6387 idx := v.Args[1] 6388 v_2 := v.Args[2] 6389 if v_2.Op != OpAMD64SHRQconst { 6390 break 6391 } 6392 if v_2.AuxInt != 32 { 6393 break 6394 } 6395 w := v_2.Args[0] 6396 x := v.Args[3] 6397 if x.Op != OpAMD64MOVLstoreidx4 { 6398 break 6399 } 6400 if x.AuxInt != i-4 { 6401 break 6402 } 6403 if x.Aux != s { 6404 break 6405 } 6406 if p != x.Args[0] { 6407 break 6408 } 6409 if idx != x.Args[1] { 6410 break 6411 } 6412 if w != x.Args[2] { 6413 break 6414 } 6415 mem := x.Args[3] 6416 if !(x.Uses == 1 && clobber(x)) { 6417 break 6418 } 6419 v.reset(OpAMD64MOVQstoreidx1) 6420 v.AuxInt = i - 4 6421 v.Aux = s 6422 v.AddArg(p) 6423 v0 := b.NewValue0(v.Line, OpAMD64SHLQconst, idx.Type) 6424 v0.AuxInt = 2 6425 v0.AddArg(idx) 6426 v.AddArg(v0) 6427 v.AddArg(w) 6428 v.AddArg(mem) 6429 return true 6430 } 6431 // match: (MOVLstoreidx4 [i] {s} p idx (SHRQconst [j] w) x:(MOVLstoreidx4 [i-4] {s} p idx w0:(SHRQconst [j-32] w) mem)) 6432 // cond: x.Uses == 1 && clobber(x) 6433 // result: (MOVQstoreidx1 [i-4] {s} p (SHLQconst <idx.Type> [2] idx) w0 mem) 6434 for { 6435 i := v.AuxInt 6436 s := v.Aux 6437 p := v.Args[0] 6438 idx := v.Args[1] 6439 v_2 := v.Args[2] 6440 if v_2.Op != OpAMD64SHRQconst { 6441 break 6442 } 6443 j := v_2.AuxInt 6444 w := v_2.Args[0] 6445 x := v.Args[3] 6446 if x.Op != OpAMD64MOVLstoreidx4 { 6447 break 6448 } 6449 if x.AuxInt != i-4 { 6450 break 6451 } 6452 if x.Aux != s { 6453 break 6454 } 6455 if p != x.Args[0] { 6456 break 6457 } 6458 if idx != x.Args[1] { 6459 break 6460 } 6461 w0 := x.Args[2] 6462 if w0.Op != OpAMD64SHRQconst { 6463 break 6464 } 6465 if w0.AuxInt != j-32 { 6466 break 6467 } 6468 if w != w0.Args[0] { 6469 break 6470 } 6471 mem := x.Args[3] 6472 if !(x.Uses == 1 && clobber(x)) { 6473 break 6474 } 6475 v.reset(OpAMD64MOVQstoreidx1) 6476 v.AuxInt = i - 4 6477 v.Aux = s 6478 v.AddArg(p) 6479 v0 := b.NewValue0(v.Line, OpAMD64SHLQconst, idx.Type) 6480 v0.AuxInt = 2 6481 v0.AddArg(idx) 6482 v.AddArg(v0) 6483 v.AddArg(w0) 6484 v.AddArg(mem) 6485 return true 6486 } 6487 return false 6488 } 6489 func rewriteValueAMD64_OpAMD64MOVOload(v *Value, config *Config) bool { 6490 b := v.Block 6491 _ = b 6492 // match: (MOVOload [off1] {sym} (ADDQconst [off2] ptr) mem) 6493 // cond: is32Bit(off1+off2) 6494 // result: (MOVOload [off1+off2] {sym} ptr mem) 6495 for { 6496 off1 := v.AuxInt 6497 sym := v.Aux 6498 v_0 := v.Args[0] 6499 if v_0.Op != OpAMD64ADDQconst { 6500 break 6501 } 6502 off2 := v_0.AuxInt 6503 ptr := v_0.Args[0] 6504 mem := v.Args[1] 6505 if !(is32Bit(off1 + off2)) { 6506 break 6507 } 6508 v.reset(OpAMD64MOVOload) 6509 v.AuxInt = off1 + off2 6510 v.Aux = sym 6511 v.AddArg(ptr) 6512 v.AddArg(mem) 6513 return true 6514 } 6515 // match: (MOVOload [off1] {sym1} (LEAQ [off2] {sym2} base) mem) 6516 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 6517 // result: (MOVOload [off1+off2] {mergeSym(sym1,sym2)} base mem) 6518 for { 6519 off1 := v.AuxInt 6520 sym1 := v.Aux 6521 v_0 := v.Args[0] 6522 if v_0.Op != OpAMD64LEAQ { 6523 break 6524 } 6525 off2 := v_0.AuxInt 6526 sym2 := v_0.Aux 6527 base := v_0.Args[0] 6528 mem := v.Args[1] 6529 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 6530 break 6531 } 6532 v.reset(OpAMD64MOVOload) 6533 v.AuxInt = off1 + off2 6534 v.Aux = mergeSym(sym1, sym2) 6535 v.AddArg(base) 6536 v.AddArg(mem) 6537 return true 6538 } 6539 return false 6540 } 6541 func rewriteValueAMD64_OpAMD64MOVOstore(v *Value, config *Config) bool { 6542 b := v.Block 6543 _ = b 6544 // match: (MOVOstore [off1] {sym} (ADDQconst [off2] ptr) val mem) 6545 // cond: is32Bit(off1+off2) 6546 // result: (MOVOstore [off1+off2] {sym} ptr val mem) 6547 for { 6548 off1 := v.AuxInt 6549 sym := v.Aux 6550 v_0 := v.Args[0] 6551 if v_0.Op != OpAMD64ADDQconst { 6552 break 6553 } 6554 off2 := v_0.AuxInt 6555 ptr := v_0.Args[0] 6556 val := v.Args[1] 6557 mem := v.Args[2] 6558 if !(is32Bit(off1 + off2)) { 6559 break 6560 } 6561 v.reset(OpAMD64MOVOstore) 6562 v.AuxInt = off1 + off2 6563 v.Aux = sym 6564 v.AddArg(ptr) 6565 v.AddArg(val) 6566 v.AddArg(mem) 6567 return true 6568 } 6569 // match: (MOVOstore [off1] {sym1} (LEAQ [off2] {sym2} base) val mem) 6570 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 6571 // result: (MOVOstore [off1+off2] {mergeSym(sym1,sym2)} base val mem) 6572 for { 6573 off1 := v.AuxInt 6574 sym1 := v.Aux 6575 v_0 := v.Args[0] 6576 if v_0.Op != OpAMD64LEAQ { 6577 break 6578 } 6579 off2 := v_0.AuxInt 6580 sym2 := v_0.Aux 6581 base := v_0.Args[0] 6582 val := v.Args[1] 6583 mem := v.Args[2] 6584 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 6585 break 6586 } 6587 v.reset(OpAMD64MOVOstore) 6588 v.AuxInt = off1 + off2 6589 v.Aux = mergeSym(sym1, sym2) 6590 v.AddArg(base) 6591 v.AddArg(val) 6592 v.AddArg(mem) 6593 return true 6594 } 6595 return false 6596 } 6597 func rewriteValueAMD64_OpAMD64MOVQatomicload(v *Value, config *Config) bool { 6598 b := v.Block 6599 _ = b 6600 // match: (MOVQatomicload [off1] {sym} (ADDQconst [off2] ptr) mem) 6601 // cond: is32Bit(off1+off2) 6602 // result: (MOVQatomicload [off1+off2] {sym} ptr mem) 6603 for { 6604 off1 := v.AuxInt 6605 sym := v.Aux 6606 v_0 := v.Args[0] 6607 if v_0.Op != OpAMD64ADDQconst { 6608 break 6609 } 6610 off2 := v_0.AuxInt 6611 ptr := v_0.Args[0] 6612 mem := v.Args[1] 6613 if !(is32Bit(off1 + off2)) { 6614 break 6615 } 6616 v.reset(OpAMD64MOVQatomicload) 6617 v.AuxInt = off1 + off2 6618 v.Aux = sym 6619 v.AddArg(ptr) 6620 v.AddArg(mem) 6621 return true 6622 } 6623 // match: (MOVQatomicload [off1] {sym1} (LEAQ [off2] {sym2} ptr) mem) 6624 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 6625 // result: (MOVQatomicload [off1+off2] {mergeSym(sym1,sym2)} ptr mem) 6626 for { 6627 off1 := v.AuxInt 6628 sym1 := v.Aux 6629 v_0 := v.Args[0] 6630 if v_0.Op != OpAMD64LEAQ { 6631 break 6632 } 6633 off2 := v_0.AuxInt 6634 sym2 := v_0.Aux 6635 ptr := v_0.Args[0] 6636 mem := v.Args[1] 6637 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 6638 break 6639 } 6640 v.reset(OpAMD64MOVQatomicload) 6641 v.AuxInt = off1 + off2 6642 v.Aux = mergeSym(sym1, sym2) 6643 v.AddArg(ptr) 6644 v.AddArg(mem) 6645 return true 6646 } 6647 return false 6648 } 6649 func rewriteValueAMD64_OpAMD64MOVQload(v *Value, config *Config) bool { 6650 b := v.Block 6651 _ = b 6652 // match: (MOVQload [off] {sym} ptr (MOVQstore [off2] {sym2} ptr2 x _)) 6653 // cond: sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) 6654 // result: x 6655 for { 6656 off := v.AuxInt 6657 sym := v.Aux 6658 ptr := v.Args[0] 6659 v_1 := v.Args[1] 6660 if v_1.Op != OpAMD64MOVQstore { 6661 break 6662 } 6663 off2 := v_1.AuxInt 6664 sym2 := v_1.Aux 6665 ptr2 := v_1.Args[0] 6666 x := v_1.Args[1] 6667 if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) { 6668 break 6669 } 6670 v.reset(OpCopy) 6671 v.Type = x.Type 6672 v.AddArg(x) 6673 return true 6674 } 6675 // match: (MOVQload [off1] {sym} (ADDQconst [off2] ptr) mem) 6676 // cond: is32Bit(off1+off2) 6677 // result: (MOVQload [off1+off2] {sym} ptr mem) 6678 for { 6679 off1 := v.AuxInt 6680 sym := v.Aux 6681 v_0 := v.Args[0] 6682 if v_0.Op != OpAMD64ADDQconst { 6683 break 6684 } 6685 off2 := v_0.AuxInt 6686 ptr := v_0.Args[0] 6687 mem := v.Args[1] 6688 if !(is32Bit(off1 + off2)) { 6689 break 6690 } 6691 v.reset(OpAMD64MOVQload) 6692 v.AuxInt = off1 + off2 6693 v.Aux = sym 6694 v.AddArg(ptr) 6695 v.AddArg(mem) 6696 return true 6697 } 6698 // match: (MOVQload [off1] {sym1} (LEAQ [off2] {sym2} base) mem) 6699 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 6700 // result: (MOVQload [off1+off2] {mergeSym(sym1,sym2)} base mem) 6701 for { 6702 off1 := v.AuxInt 6703 sym1 := v.Aux 6704 v_0 := v.Args[0] 6705 if v_0.Op != OpAMD64LEAQ { 6706 break 6707 } 6708 off2 := v_0.AuxInt 6709 sym2 := v_0.Aux 6710 base := v_0.Args[0] 6711 mem := v.Args[1] 6712 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 6713 break 6714 } 6715 v.reset(OpAMD64MOVQload) 6716 v.AuxInt = off1 + off2 6717 v.Aux = mergeSym(sym1, sym2) 6718 v.AddArg(base) 6719 v.AddArg(mem) 6720 return true 6721 } 6722 // match: (MOVQload [off1] {sym1} (LEAQ1 [off2] {sym2} ptr idx) mem) 6723 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 6724 // result: (MOVQloadidx1 [off1+off2] {mergeSym(sym1,sym2)} ptr idx mem) 6725 for { 6726 off1 := v.AuxInt 6727 sym1 := v.Aux 6728 v_0 := v.Args[0] 6729 if v_0.Op != OpAMD64LEAQ1 { 6730 break 6731 } 6732 off2 := v_0.AuxInt 6733 sym2 := v_0.Aux 6734 ptr := v_0.Args[0] 6735 idx := v_0.Args[1] 6736 mem := v.Args[1] 6737 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 6738 break 6739 } 6740 v.reset(OpAMD64MOVQloadidx1) 6741 v.AuxInt = off1 + off2 6742 v.Aux = mergeSym(sym1, sym2) 6743 v.AddArg(ptr) 6744 v.AddArg(idx) 6745 v.AddArg(mem) 6746 return true 6747 } 6748 // match: (MOVQload [off1] {sym1} (LEAQ8 [off2] {sym2} ptr idx) mem) 6749 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 6750 // result: (MOVQloadidx8 [off1+off2] {mergeSym(sym1,sym2)} ptr idx mem) 6751 for { 6752 off1 := v.AuxInt 6753 sym1 := v.Aux 6754 v_0 := v.Args[0] 6755 if v_0.Op != OpAMD64LEAQ8 { 6756 break 6757 } 6758 off2 := v_0.AuxInt 6759 sym2 := v_0.Aux 6760 ptr := v_0.Args[0] 6761 idx := v_0.Args[1] 6762 mem := v.Args[1] 6763 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 6764 break 6765 } 6766 v.reset(OpAMD64MOVQloadidx8) 6767 v.AuxInt = off1 + off2 6768 v.Aux = mergeSym(sym1, sym2) 6769 v.AddArg(ptr) 6770 v.AddArg(idx) 6771 v.AddArg(mem) 6772 return true 6773 } 6774 // match: (MOVQload [off] {sym} (ADDQ ptr idx) mem) 6775 // cond: ptr.Op != OpSB 6776 // result: (MOVQloadidx1 [off] {sym} ptr idx mem) 6777 for { 6778 off := v.AuxInt 6779 sym := v.Aux 6780 v_0 := v.Args[0] 6781 if v_0.Op != OpAMD64ADDQ { 6782 break 6783 } 6784 ptr := v_0.Args[0] 6785 idx := v_0.Args[1] 6786 mem := v.Args[1] 6787 if !(ptr.Op != OpSB) { 6788 break 6789 } 6790 v.reset(OpAMD64MOVQloadidx1) 6791 v.AuxInt = off 6792 v.Aux = sym 6793 v.AddArg(ptr) 6794 v.AddArg(idx) 6795 v.AddArg(mem) 6796 return true 6797 } 6798 // match: (MOVQload [off1] {sym1} (LEAL [off2] {sym2} base) mem) 6799 // cond: canMergeSym(sym1, sym2) 6800 // result: (MOVQload [off1+off2] {mergeSym(sym1,sym2)} base mem) 6801 for { 6802 off1 := v.AuxInt 6803 sym1 := v.Aux 6804 v_0 := v.Args[0] 6805 if v_0.Op != OpAMD64LEAL { 6806 break 6807 } 6808 off2 := v_0.AuxInt 6809 sym2 := v_0.Aux 6810 base := v_0.Args[0] 6811 mem := v.Args[1] 6812 if !(canMergeSym(sym1, sym2)) { 6813 break 6814 } 6815 v.reset(OpAMD64MOVQload) 6816 v.AuxInt = off1 + off2 6817 v.Aux = mergeSym(sym1, sym2) 6818 v.AddArg(base) 6819 v.AddArg(mem) 6820 return true 6821 } 6822 // match: (MOVQload [off1] {sym} (ADDLconst [off2] ptr) mem) 6823 // cond: is32Bit(off1+off2) 6824 // result: (MOVQload [off1+off2] {sym} ptr mem) 6825 for { 6826 off1 := v.AuxInt 6827 sym := v.Aux 6828 v_0 := v.Args[0] 6829 if v_0.Op != OpAMD64ADDLconst { 6830 break 6831 } 6832 off2 := v_0.AuxInt 6833 ptr := v_0.Args[0] 6834 mem := v.Args[1] 6835 if !(is32Bit(off1 + off2)) { 6836 break 6837 } 6838 v.reset(OpAMD64MOVQload) 6839 v.AuxInt = off1 + off2 6840 v.Aux = sym 6841 v.AddArg(ptr) 6842 v.AddArg(mem) 6843 return true 6844 } 6845 return false 6846 } 6847 func rewriteValueAMD64_OpAMD64MOVQloadidx1(v *Value, config *Config) bool { 6848 b := v.Block 6849 _ = b 6850 // match: (MOVQloadidx1 [c] {sym} ptr (SHLQconst [3] idx) mem) 6851 // cond: 6852 // result: (MOVQloadidx8 [c] {sym} ptr idx mem) 6853 for { 6854 c := v.AuxInt 6855 sym := v.Aux 6856 ptr := v.Args[0] 6857 v_1 := v.Args[1] 6858 if v_1.Op != OpAMD64SHLQconst { 6859 break 6860 } 6861 if v_1.AuxInt != 3 { 6862 break 6863 } 6864 idx := v_1.Args[0] 6865 mem := v.Args[2] 6866 v.reset(OpAMD64MOVQloadidx8) 6867 v.AuxInt = c 6868 v.Aux = sym 6869 v.AddArg(ptr) 6870 v.AddArg(idx) 6871 v.AddArg(mem) 6872 return true 6873 } 6874 // match: (MOVQloadidx1 [c] {sym} (ADDQconst [d] ptr) idx mem) 6875 // cond: 6876 // result: (MOVQloadidx1 [c+d] {sym} ptr idx mem) 6877 for { 6878 c := v.AuxInt 6879 sym := v.Aux 6880 v_0 := v.Args[0] 6881 if v_0.Op != OpAMD64ADDQconst { 6882 break 6883 } 6884 d := v_0.AuxInt 6885 ptr := v_0.Args[0] 6886 idx := v.Args[1] 6887 mem := v.Args[2] 6888 v.reset(OpAMD64MOVQloadidx1) 6889 v.AuxInt = c + d 6890 v.Aux = sym 6891 v.AddArg(ptr) 6892 v.AddArg(idx) 6893 v.AddArg(mem) 6894 return true 6895 } 6896 // match: (MOVQloadidx1 [c] {sym} ptr (ADDQconst [d] idx) mem) 6897 // cond: 6898 // result: (MOVQloadidx1 [c+d] {sym} ptr idx mem) 6899 for { 6900 c := v.AuxInt 6901 sym := v.Aux 6902 ptr := v.Args[0] 6903 v_1 := v.Args[1] 6904 if v_1.Op != OpAMD64ADDQconst { 6905 break 6906 } 6907 d := v_1.AuxInt 6908 idx := v_1.Args[0] 6909 mem := v.Args[2] 6910 v.reset(OpAMD64MOVQloadidx1) 6911 v.AuxInt = c + d 6912 v.Aux = sym 6913 v.AddArg(ptr) 6914 v.AddArg(idx) 6915 v.AddArg(mem) 6916 return true 6917 } 6918 return false 6919 } 6920 func rewriteValueAMD64_OpAMD64MOVQloadidx8(v *Value, config *Config) bool { 6921 b := v.Block 6922 _ = b 6923 // match: (MOVQloadidx8 [c] {sym} (ADDQconst [d] ptr) idx mem) 6924 // cond: 6925 // result: (MOVQloadidx8 [c+d] {sym} ptr idx mem) 6926 for { 6927 c := v.AuxInt 6928 sym := v.Aux 6929 v_0 := v.Args[0] 6930 if v_0.Op != OpAMD64ADDQconst { 6931 break 6932 } 6933 d := v_0.AuxInt 6934 ptr := v_0.Args[0] 6935 idx := v.Args[1] 6936 mem := v.Args[2] 6937 v.reset(OpAMD64MOVQloadidx8) 6938 v.AuxInt = c + d 6939 v.Aux = sym 6940 v.AddArg(ptr) 6941 v.AddArg(idx) 6942 v.AddArg(mem) 6943 return true 6944 } 6945 // match: (MOVQloadidx8 [c] {sym} ptr (ADDQconst [d] idx) mem) 6946 // cond: 6947 // result: (MOVQloadidx8 [c+8*d] {sym} ptr idx mem) 6948 for { 6949 c := v.AuxInt 6950 sym := v.Aux 6951 ptr := v.Args[0] 6952 v_1 := v.Args[1] 6953 if v_1.Op != OpAMD64ADDQconst { 6954 break 6955 } 6956 d := v_1.AuxInt 6957 idx := v_1.Args[0] 6958 mem := v.Args[2] 6959 v.reset(OpAMD64MOVQloadidx8) 6960 v.AuxInt = c + 8*d 6961 v.Aux = sym 6962 v.AddArg(ptr) 6963 v.AddArg(idx) 6964 v.AddArg(mem) 6965 return true 6966 } 6967 return false 6968 } 6969 func rewriteValueAMD64_OpAMD64MOVQstore(v *Value, config *Config) bool { 6970 b := v.Block 6971 _ = b 6972 // match: (MOVQstore [off1] {sym} (ADDQconst [off2] ptr) val mem) 6973 // cond: is32Bit(off1+off2) 6974 // result: (MOVQstore [off1+off2] {sym} ptr val mem) 6975 for { 6976 off1 := v.AuxInt 6977 sym := v.Aux 6978 v_0 := v.Args[0] 6979 if v_0.Op != OpAMD64ADDQconst { 6980 break 6981 } 6982 off2 := v_0.AuxInt 6983 ptr := v_0.Args[0] 6984 val := v.Args[1] 6985 mem := v.Args[2] 6986 if !(is32Bit(off1 + off2)) { 6987 break 6988 } 6989 v.reset(OpAMD64MOVQstore) 6990 v.AuxInt = off1 + off2 6991 v.Aux = sym 6992 v.AddArg(ptr) 6993 v.AddArg(val) 6994 v.AddArg(mem) 6995 return true 6996 } 6997 // match: (MOVQstore [off] {sym} ptr (MOVQconst [c]) mem) 6998 // cond: validValAndOff(c,off) 6999 // result: (MOVQstoreconst [makeValAndOff(c,off)] {sym} ptr mem) 7000 for { 7001 off := v.AuxInt 7002 sym := v.Aux 7003 ptr := v.Args[0] 7004 v_1 := v.Args[1] 7005 if v_1.Op != OpAMD64MOVQconst { 7006 break 7007 } 7008 c := v_1.AuxInt 7009 mem := v.Args[2] 7010 if !(validValAndOff(c, off)) { 7011 break 7012 } 7013 v.reset(OpAMD64MOVQstoreconst) 7014 v.AuxInt = makeValAndOff(c, off) 7015 v.Aux = sym 7016 v.AddArg(ptr) 7017 v.AddArg(mem) 7018 return true 7019 } 7020 // match: (MOVQstore [off1] {sym1} (LEAQ [off2] {sym2} base) val mem) 7021 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 7022 // result: (MOVQstore [off1+off2] {mergeSym(sym1,sym2)} base val mem) 7023 for { 7024 off1 := v.AuxInt 7025 sym1 := v.Aux 7026 v_0 := v.Args[0] 7027 if v_0.Op != OpAMD64LEAQ { 7028 break 7029 } 7030 off2 := v_0.AuxInt 7031 sym2 := v_0.Aux 7032 base := v_0.Args[0] 7033 val := v.Args[1] 7034 mem := v.Args[2] 7035 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 7036 break 7037 } 7038 v.reset(OpAMD64MOVQstore) 7039 v.AuxInt = off1 + off2 7040 v.Aux = mergeSym(sym1, sym2) 7041 v.AddArg(base) 7042 v.AddArg(val) 7043 v.AddArg(mem) 7044 return true 7045 } 7046 // match: (MOVQstore [off1] {sym1} (LEAQ1 [off2] {sym2} ptr idx) val mem) 7047 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 7048 // result: (MOVQstoreidx1 [off1+off2] {mergeSym(sym1,sym2)} ptr idx val mem) 7049 for { 7050 off1 := v.AuxInt 7051 sym1 := v.Aux 7052 v_0 := v.Args[0] 7053 if v_0.Op != OpAMD64LEAQ1 { 7054 break 7055 } 7056 off2 := v_0.AuxInt 7057 sym2 := v_0.Aux 7058 ptr := v_0.Args[0] 7059 idx := v_0.Args[1] 7060 val := v.Args[1] 7061 mem := v.Args[2] 7062 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 7063 break 7064 } 7065 v.reset(OpAMD64MOVQstoreidx1) 7066 v.AuxInt = off1 + off2 7067 v.Aux = mergeSym(sym1, sym2) 7068 v.AddArg(ptr) 7069 v.AddArg(idx) 7070 v.AddArg(val) 7071 v.AddArg(mem) 7072 return true 7073 } 7074 // match: (MOVQstore [off1] {sym1} (LEAQ8 [off2] {sym2} ptr idx) val mem) 7075 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 7076 // result: (MOVQstoreidx8 [off1+off2] {mergeSym(sym1,sym2)} ptr idx val mem) 7077 for { 7078 off1 := v.AuxInt 7079 sym1 := v.Aux 7080 v_0 := v.Args[0] 7081 if v_0.Op != OpAMD64LEAQ8 { 7082 break 7083 } 7084 off2 := v_0.AuxInt 7085 sym2 := v_0.Aux 7086 ptr := v_0.Args[0] 7087 idx := v_0.Args[1] 7088 val := v.Args[1] 7089 mem := v.Args[2] 7090 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 7091 break 7092 } 7093 v.reset(OpAMD64MOVQstoreidx8) 7094 v.AuxInt = off1 + off2 7095 v.Aux = mergeSym(sym1, sym2) 7096 v.AddArg(ptr) 7097 v.AddArg(idx) 7098 v.AddArg(val) 7099 v.AddArg(mem) 7100 return true 7101 } 7102 // match: (MOVQstore [off] {sym} (ADDQ ptr idx) val mem) 7103 // cond: ptr.Op != OpSB 7104 // result: (MOVQstoreidx1 [off] {sym} ptr idx val mem) 7105 for { 7106 off := v.AuxInt 7107 sym := v.Aux 7108 v_0 := v.Args[0] 7109 if v_0.Op != OpAMD64ADDQ { 7110 break 7111 } 7112 ptr := v_0.Args[0] 7113 idx := v_0.Args[1] 7114 val := v.Args[1] 7115 mem := v.Args[2] 7116 if !(ptr.Op != OpSB) { 7117 break 7118 } 7119 v.reset(OpAMD64MOVQstoreidx1) 7120 v.AuxInt = off 7121 v.Aux = sym 7122 v.AddArg(ptr) 7123 v.AddArg(idx) 7124 v.AddArg(val) 7125 v.AddArg(mem) 7126 return true 7127 } 7128 // match: (MOVQstore [off1] {sym1} (LEAL [off2] {sym2} base) val mem) 7129 // cond: canMergeSym(sym1, sym2) 7130 // result: (MOVQstore [off1+off2] {mergeSym(sym1,sym2)} base val mem) 7131 for { 7132 off1 := v.AuxInt 7133 sym1 := v.Aux 7134 v_0 := v.Args[0] 7135 if v_0.Op != OpAMD64LEAL { 7136 break 7137 } 7138 off2 := v_0.AuxInt 7139 sym2 := v_0.Aux 7140 base := v_0.Args[0] 7141 val := v.Args[1] 7142 mem := v.Args[2] 7143 if !(canMergeSym(sym1, sym2)) { 7144 break 7145 } 7146 v.reset(OpAMD64MOVQstore) 7147 v.AuxInt = off1 + off2 7148 v.Aux = mergeSym(sym1, sym2) 7149 v.AddArg(base) 7150 v.AddArg(val) 7151 v.AddArg(mem) 7152 return true 7153 } 7154 // match: (MOVQstore [off1] {sym} (ADDLconst [off2] ptr) val mem) 7155 // cond: is32Bit(off1+off2) 7156 // result: (MOVQstore [off1+off2] {sym} ptr val mem) 7157 for { 7158 off1 := v.AuxInt 7159 sym := v.Aux 7160 v_0 := v.Args[0] 7161 if v_0.Op != OpAMD64ADDLconst { 7162 break 7163 } 7164 off2 := v_0.AuxInt 7165 ptr := v_0.Args[0] 7166 val := v.Args[1] 7167 mem := v.Args[2] 7168 if !(is32Bit(off1 + off2)) { 7169 break 7170 } 7171 v.reset(OpAMD64MOVQstore) 7172 v.AuxInt = off1 + off2 7173 v.Aux = sym 7174 v.AddArg(ptr) 7175 v.AddArg(val) 7176 v.AddArg(mem) 7177 return true 7178 } 7179 return false 7180 } 7181 func rewriteValueAMD64_OpAMD64MOVQstoreconst(v *Value, config *Config) bool { 7182 b := v.Block 7183 _ = b 7184 // match: (MOVQstoreconst [sc] {s} (ADDQconst [off] ptr) mem) 7185 // cond: ValAndOff(sc).canAdd(off) 7186 // result: (MOVQstoreconst [ValAndOff(sc).add(off)] {s} ptr mem) 7187 for { 7188 sc := v.AuxInt 7189 s := v.Aux 7190 v_0 := v.Args[0] 7191 if v_0.Op != OpAMD64ADDQconst { 7192 break 7193 } 7194 off := v_0.AuxInt 7195 ptr := v_0.Args[0] 7196 mem := v.Args[1] 7197 if !(ValAndOff(sc).canAdd(off)) { 7198 break 7199 } 7200 v.reset(OpAMD64MOVQstoreconst) 7201 v.AuxInt = ValAndOff(sc).add(off) 7202 v.Aux = s 7203 v.AddArg(ptr) 7204 v.AddArg(mem) 7205 return true 7206 } 7207 // match: (MOVQstoreconst [sc] {sym1} (LEAQ [off] {sym2} ptr) mem) 7208 // cond: canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off) 7209 // result: (MOVQstoreconst [ValAndOff(sc).add(off)] {mergeSym(sym1, sym2)} ptr mem) 7210 for { 7211 sc := v.AuxInt 7212 sym1 := v.Aux 7213 v_0 := v.Args[0] 7214 if v_0.Op != OpAMD64LEAQ { 7215 break 7216 } 7217 off := v_0.AuxInt 7218 sym2 := v_0.Aux 7219 ptr := v_0.Args[0] 7220 mem := v.Args[1] 7221 if !(canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off)) { 7222 break 7223 } 7224 v.reset(OpAMD64MOVQstoreconst) 7225 v.AuxInt = ValAndOff(sc).add(off) 7226 v.Aux = mergeSym(sym1, sym2) 7227 v.AddArg(ptr) 7228 v.AddArg(mem) 7229 return true 7230 } 7231 // match: (MOVQstoreconst [x] {sym1} (LEAQ1 [off] {sym2} ptr idx) mem) 7232 // cond: canMergeSym(sym1, sym2) 7233 // result: (MOVQstoreconstidx1 [ValAndOff(x).add(off)] {mergeSym(sym1,sym2)} ptr idx mem) 7234 for { 7235 x := v.AuxInt 7236 sym1 := v.Aux 7237 v_0 := v.Args[0] 7238 if v_0.Op != OpAMD64LEAQ1 { 7239 break 7240 } 7241 off := v_0.AuxInt 7242 sym2 := v_0.Aux 7243 ptr := v_0.Args[0] 7244 idx := v_0.Args[1] 7245 mem := v.Args[1] 7246 if !(canMergeSym(sym1, sym2)) { 7247 break 7248 } 7249 v.reset(OpAMD64MOVQstoreconstidx1) 7250 v.AuxInt = ValAndOff(x).add(off) 7251 v.Aux = mergeSym(sym1, sym2) 7252 v.AddArg(ptr) 7253 v.AddArg(idx) 7254 v.AddArg(mem) 7255 return true 7256 } 7257 // match: (MOVQstoreconst [x] {sym1} (LEAQ8 [off] {sym2} ptr idx) mem) 7258 // cond: canMergeSym(sym1, sym2) 7259 // result: (MOVQstoreconstidx8 [ValAndOff(x).add(off)] {mergeSym(sym1,sym2)} ptr idx mem) 7260 for { 7261 x := v.AuxInt 7262 sym1 := v.Aux 7263 v_0 := v.Args[0] 7264 if v_0.Op != OpAMD64LEAQ8 { 7265 break 7266 } 7267 off := v_0.AuxInt 7268 sym2 := v_0.Aux 7269 ptr := v_0.Args[0] 7270 idx := v_0.Args[1] 7271 mem := v.Args[1] 7272 if !(canMergeSym(sym1, sym2)) { 7273 break 7274 } 7275 v.reset(OpAMD64MOVQstoreconstidx8) 7276 v.AuxInt = ValAndOff(x).add(off) 7277 v.Aux = mergeSym(sym1, sym2) 7278 v.AddArg(ptr) 7279 v.AddArg(idx) 7280 v.AddArg(mem) 7281 return true 7282 } 7283 // match: (MOVQstoreconst [x] {sym} (ADDQ ptr idx) mem) 7284 // cond: 7285 // result: (MOVQstoreconstidx1 [x] {sym} ptr idx mem) 7286 for { 7287 x := v.AuxInt 7288 sym := v.Aux 7289 v_0 := v.Args[0] 7290 if v_0.Op != OpAMD64ADDQ { 7291 break 7292 } 7293 ptr := v_0.Args[0] 7294 idx := v_0.Args[1] 7295 mem := v.Args[1] 7296 v.reset(OpAMD64MOVQstoreconstidx1) 7297 v.AuxInt = x 7298 v.Aux = sym 7299 v.AddArg(ptr) 7300 v.AddArg(idx) 7301 v.AddArg(mem) 7302 return true 7303 } 7304 // match: (MOVQstoreconst [sc] {sym1} (LEAL [off] {sym2} ptr) mem) 7305 // cond: canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off) 7306 // result: (MOVQstoreconst [ValAndOff(sc).add(off)] {mergeSym(sym1, sym2)} ptr mem) 7307 for { 7308 sc := v.AuxInt 7309 sym1 := v.Aux 7310 v_0 := v.Args[0] 7311 if v_0.Op != OpAMD64LEAL { 7312 break 7313 } 7314 off := v_0.AuxInt 7315 sym2 := v_0.Aux 7316 ptr := v_0.Args[0] 7317 mem := v.Args[1] 7318 if !(canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off)) { 7319 break 7320 } 7321 v.reset(OpAMD64MOVQstoreconst) 7322 v.AuxInt = ValAndOff(sc).add(off) 7323 v.Aux = mergeSym(sym1, sym2) 7324 v.AddArg(ptr) 7325 v.AddArg(mem) 7326 return true 7327 } 7328 // match: (MOVQstoreconst [sc] {s} (ADDLconst [off] ptr) mem) 7329 // cond: ValAndOff(sc).canAdd(off) 7330 // result: (MOVQstoreconst [ValAndOff(sc).add(off)] {s} ptr mem) 7331 for { 7332 sc := v.AuxInt 7333 s := v.Aux 7334 v_0 := v.Args[0] 7335 if v_0.Op != OpAMD64ADDLconst { 7336 break 7337 } 7338 off := v_0.AuxInt 7339 ptr := v_0.Args[0] 7340 mem := v.Args[1] 7341 if !(ValAndOff(sc).canAdd(off)) { 7342 break 7343 } 7344 v.reset(OpAMD64MOVQstoreconst) 7345 v.AuxInt = ValAndOff(sc).add(off) 7346 v.Aux = s 7347 v.AddArg(ptr) 7348 v.AddArg(mem) 7349 return true 7350 } 7351 return false 7352 } 7353 func rewriteValueAMD64_OpAMD64MOVQstoreconstidx1(v *Value, config *Config) bool { 7354 b := v.Block 7355 _ = b 7356 // match: (MOVQstoreconstidx1 [c] {sym} ptr (SHLQconst [3] idx) mem) 7357 // cond: 7358 // result: (MOVQstoreconstidx8 [c] {sym} ptr idx mem) 7359 for { 7360 c := v.AuxInt 7361 sym := v.Aux 7362 ptr := v.Args[0] 7363 v_1 := v.Args[1] 7364 if v_1.Op != OpAMD64SHLQconst { 7365 break 7366 } 7367 if v_1.AuxInt != 3 { 7368 break 7369 } 7370 idx := v_1.Args[0] 7371 mem := v.Args[2] 7372 v.reset(OpAMD64MOVQstoreconstidx8) 7373 v.AuxInt = c 7374 v.Aux = sym 7375 v.AddArg(ptr) 7376 v.AddArg(idx) 7377 v.AddArg(mem) 7378 return true 7379 } 7380 // match: (MOVQstoreconstidx1 [x] {sym} (ADDQconst [c] ptr) idx mem) 7381 // cond: 7382 // result: (MOVQstoreconstidx1 [ValAndOff(x).add(c)] {sym} ptr idx mem) 7383 for { 7384 x := v.AuxInt 7385 sym := v.Aux 7386 v_0 := v.Args[0] 7387 if v_0.Op != OpAMD64ADDQconst { 7388 break 7389 } 7390 c := v_0.AuxInt 7391 ptr := v_0.Args[0] 7392 idx := v.Args[1] 7393 mem := v.Args[2] 7394 v.reset(OpAMD64MOVQstoreconstidx1) 7395 v.AuxInt = ValAndOff(x).add(c) 7396 v.Aux = sym 7397 v.AddArg(ptr) 7398 v.AddArg(idx) 7399 v.AddArg(mem) 7400 return true 7401 } 7402 // match: (MOVQstoreconstidx1 [x] {sym} ptr (ADDQconst [c] idx) mem) 7403 // cond: 7404 // result: (MOVQstoreconstidx1 [ValAndOff(x).add(c)] {sym} ptr idx mem) 7405 for { 7406 x := v.AuxInt 7407 sym := v.Aux 7408 ptr := v.Args[0] 7409 v_1 := v.Args[1] 7410 if v_1.Op != OpAMD64ADDQconst { 7411 break 7412 } 7413 c := v_1.AuxInt 7414 idx := v_1.Args[0] 7415 mem := v.Args[2] 7416 v.reset(OpAMD64MOVQstoreconstidx1) 7417 v.AuxInt = ValAndOff(x).add(c) 7418 v.Aux = sym 7419 v.AddArg(ptr) 7420 v.AddArg(idx) 7421 v.AddArg(mem) 7422 return true 7423 } 7424 return false 7425 } 7426 func rewriteValueAMD64_OpAMD64MOVQstoreconstidx8(v *Value, config *Config) bool { 7427 b := v.Block 7428 _ = b 7429 // match: (MOVQstoreconstidx8 [x] {sym} (ADDQconst [c] ptr) idx mem) 7430 // cond: 7431 // result: (MOVQstoreconstidx8 [ValAndOff(x).add(c)] {sym} ptr idx mem) 7432 for { 7433 x := v.AuxInt 7434 sym := v.Aux 7435 v_0 := v.Args[0] 7436 if v_0.Op != OpAMD64ADDQconst { 7437 break 7438 } 7439 c := v_0.AuxInt 7440 ptr := v_0.Args[0] 7441 idx := v.Args[1] 7442 mem := v.Args[2] 7443 v.reset(OpAMD64MOVQstoreconstidx8) 7444 v.AuxInt = ValAndOff(x).add(c) 7445 v.Aux = sym 7446 v.AddArg(ptr) 7447 v.AddArg(idx) 7448 v.AddArg(mem) 7449 return true 7450 } 7451 // match: (MOVQstoreconstidx8 [x] {sym} ptr (ADDQconst [c] idx) mem) 7452 // cond: 7453 // result: (MOVQstoreconstidx8 [ValAndOff(x).add(8*c)] {sym} ptr idx mem) 7454 for { 7455 x := v.AuxInt 7456 sym := v.Aux 7457 ptr := v.Args[0] 7458 v_1 := v.Args[1] 7459 if v_1.Op != OpAMD64ADDQconst { 7460 break 7461 } 7462 c := v_1.AuxInt 7463 idx := v_1.Args[0] 7464 mem := v.Args[2] 7465 v.reset(OpAMD64MOVQstoreconstidx8) 7466 v.AuxInt = ValAndOff(x).add(8 * c) 7467 v.Aux = sym 7468 v.AddArg(ptr) 7469 v.AddArg(idx) 7470 v.AddArg(mem) 7471 return true 7472 } 7473 return false 7474 } 7475 func rewriteValueAMD64_OpAMD64MOVQstoreidx1(v *Value, config *Config) bool { 7476 b := v.Block 7477 _ = b 7478 // match: (MOVQstoreidx1 [c] {sym} ptr (SHLQconst [3] idx) val mem) 7479 // cond: 7480 // result: (MOVQstoreidx8 [c] {sym} ptr idx val mem) 7481 for { 7482 c := v.AuxInt 7483 sym := v.Aux 7484 ptr := v.Args[0] 7485 v_1 := v.Args[1] 7486 if v_1.Op != OpAMD64SHLQconst { 7487 break 7488 } 7489 if v_1.AuxInt != 3 { 7490 break 7491 } 7492 idx := v_1.Args[0] 7493 val := v.Args[2] 7494 mem := v.Args[3] 7495 v.reset(OpAMD64MOVQstoreidx8) 7496 v.AuxInt = c 7497 v.Aux = sym 7498 v.AddArg(ptr) 7499 v.AddArg(idx) 7500 v.AddArg(val) 7501 v.AddArg(mem) 7502 return true 7503 } 7504 // match: (MOVQstoreidx1 [c] {sym} (ADDQconst [d] ptr) idx val mem) 7505 // cond: 7506 // result: (MOVQstoreidx1 [c+d] {sym} ptr idx val mem) 7507 for { 7508 c := v.AuxInt 7509 sym := v.Aux 7510 v_0 := v.Args[0] 7511 if v_0.Op != OpAMD64ADDQconst { 7512 break 7513 } 7514 d := v_0.AuxInt 7515 ptr := v_0.Args[0] 7516 idx := v.Args[1] 7517 val := v.Args[2] 7518 mem := v.Args[3] 7519 v.reset(OpAMD64MOVQstoreidx1) 7520 v.AuxInt = c + d 7521 v.Aux = sym 7522 v.AddArg(ptr) 7523 v.AddArg(idx) 7524 v.AddArg(val) 7525 v.AddArg(mem) 7526 return true 7527 } 7528 // match: (MOVQstoreidx1 [c] {sym} ptr (ADDQconst [d] idx) val mem) 7529 // cond: 7530 // result: (MOVQstoreidx1 [c+d] {sym} ptr idx val mem) 7531 for { 7532 c := v.AuxInt 7533 sym := v.Aux 7534 ptr := v.Args[0] 7535 v_1 := v.Args[1] 7536 if v_1.Op != OpAMD64ADDQconst { 7537 break 7538 } 7539 d := v_1.AuxInt 7540 idx := v_1.Args[0] 7541 val := v.Args[2] 7542 mem := v.Args[3] 7543 v.reset(OpAMD64MOVQstoreidx1) 7544 v.AuxInt = c + d 7545 v.Aux = sym 7546 v.AddArg(ptr) 7547 v.AddArg(idx) 7548 v.AddArg(val) 7549 v.AddArg(mem) 7550 return true 7551 } 7552 return false 7553 } 7554 func rewriteValueAMD64_OpAMD64MOVQstoreidx8(v *Value, config *Config) bool { 7555 b := v.Block 7556 _ = b 7557 // match: (MOVQstoreidx8 [c] {sym} (ADDQconst [d] ptr) idx val mem) 7558 // cond: 7559 // result: (MOVQstoreidx8 [c+d] {sym} ptr idx val mem) 7560 for { 7561 c := v.AuxInt 7562 sym := v.Aux 7563 v_0 := v.Args[0] 7564 if v_0.Op != OpAMD64ADDQconst { 7565 break 7566 } 7567 d := v_0.AuxInt 7568 ptr := v_0.Args[0] 7569 idx := v.Args[1] 7570 val := v.Args[2] 7571 mem := v.Args[3] 7572 v.reset(OpAMD64MOVQstoreidx8) 7573 v.AuxInt = c + d 7574 v.Aux = sym 7575 v.AddArg(ptr) 7576 v.AddArg(idx) 7577 v.AddArg(val) 7578 v.AddArg(mem) 7579 return true 7580 } 7581 // match: (MOVQstoreidx8 [c] {sym} ptr (ADDQconst [d] idx) val mem) 7582 // cond: 7583 // result: (MOVQstoreidx8 [c+8*d] {sym} ptr idx val mem) 7584 for { 7585 c := v.AuxInt 7586 sym := v.Aux 7587 ptr := v.Args[0] 7588 v_1 := v.Args[1] 7589 if v_1.Op != OpAMD64ADDQconst { 7590 break 7591 } 7592 d := v_1.AuxInt 7593 idx := v_1.Args[0] 7594 val := v.Args[2] 7595 mem := v.Args[3] 7596 v.reset(OpAMD64MOVQstoreidx8) 7597 v.AuxInt = c + 8*d 7598 v.Aux = sym 7599 v.AddArg(ptr) 7600 v.AddArg(idx) 7601 v.AddArg(val) 7602 v.AddArg(mem) 7603 return true 7604 } 7605 return false 7606 } 7607 func rewriteValueAMD64_OpAMD64MOVSDload(v *Value, config *Config) bool { 7608 b := v.Block 7609 _ = b 7610 // match: (MOVSDload [off1] {sym} (ADDQconst [off2] ptr) mem) 7611 // cond: is32Bit(off1+off2) 7612 // result: (MOVSDload [off1+off2] {sym} ptr mem) 7613 for { 7614 off1 := v.AuxInt 7615 sym := v.Aux 7616 v_0 := v.Args[0] 7617 if v_0.Op != OpAMD64ADDQconst { 7618 break 7619 } 7620 off2 := v_0.AuxInt 7621 ptr := v_0.Args[0] 7622 mem := v.Args[1] 7623 if !(is32Bit(off1 + off2)) { 7624 break 7625 } 7626 v.reset(OpAMD64MOVSDload) 7627 v.AuxInt = off1 + off2 7628 v.Aux = sym 7629 v.AddArg(ptr) 7630 v.AddArg(mem) 7631 return true 7632 } 7633 // match: (MOVSDload [off1] {sym1} (LEAQ [off2] {sym2} base) mem) 7634 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 7635 // result: (MOVSDload [off1+off2] {mergeSym(sym1,sym2)} base mem) 7636 for { 7637 off1 := v.AuxInt 7638 sym1 := v.Aux 7639 v_0 := v.Args[0] 7640 if v_0.Op != OpAMD64LEAQ { 7641 break 7642 } 7643 off2 := v_0.AuxInt 7644 sym2 := v_0.Aux 7645 base := v_0.Args[0] 7646 mem := v.Args[1] 7647 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 7648 break 7649 } 7650 v.reset(OpAMD64MOVSDload) 7651 v.AuxInt = off1 + off2 7652 v.Aux = mergeSym(sym1, sym2) 7653 v.AddArg(base) 7654 v.AddArg(mem) 7655 return true 7656 } 7657 // match: (MOVSDload [off1] {sym1} (LEAQ1 [off2] {sym2} ptr idx) mem) 7658 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 7659 // result: (MOVSDloadidx1 [off1+off2] {mergeSym(sym1,sym2)} ptr idx mem) 7660 for { 7661 off1 := v.AuxInt 7662 sym1 := v.Aux 7663 v_0 := v.Args[0] 7664 if v_0.Op != OpAMD64LEAQ1 { 7665 break 7666 } 7667 off2 := v_0.AuxInt 7668 sym2 := v_0.Aux 7669 ptr := v_0.Args[0] 7670 idx := v_0.Args[1] 7671 mem := v.Args[1] 7672 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 7673 break 7674 } 7675 v.reset(OpAMD64MOVSDloadidx1) 7676 v.AuxInt = off1 + off2 7677 v.Aux = mergeSym(sym1, sym2) 7678 v.AddArg(ptr) 7679 v.AddArg(idx) 7680 v.AddArg(mem) 7681 return true 7682 } 7683 // match: (MOVSDload [off1] {sym1} (LEAQ8 [off2] {sym2} ptr idx) mem) 7684 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 7685 // result: (MOVSDloadidx8 [off1+off2] {mergeSym(sym1,sym2)} ptr idx mem) 7686 for { 7687 off1 := v.AuxInt 7688 sym1 := v.Aux 7689 v_0 := v.Args[0] 7690 if v_0.Op != OpAMD64LEAQ8 { 7691 break 7692 } 7693 off2 := v_0.AuxInt 7694 sym2 := v_0.Aux 7695 ptr := v_0.Args[0] 7696 idx := v_0.Args[1] 7697 mem := v.Args[1] 7698 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 7699 break 7700 } 7701 v.reset(OpAMD64MOVSDloadidx8) 7702 v.AuxInt = off1 + off2 7703 v.Aux = mergeSym(sym1, sym2) 7704 v.AddArg(ptr) 7705 v.AddArg(idx) 7706 v.AddArg(mem) 7707 return true 7708 } 7709 // match: (MOVSDload [off] {sym} (ADDQ ptr idx) mem) 7710 // cond: ptr.Op != OpSB 7711 // result: (MOVSDloadidx1 [off] {sym} ptr idx mem) 7712 for { 7713 off := v.AuxInt 7714 sym := v.Aux 7715 v_0 := v.Args[0] 7716 if v_0.Op != OpAMD64ADDQ { 7717 break 7718 } 7719 ptr := v_0.Args[0] 7720 idx := v_0.Args[1] 7721 mem := v.Args[1] 7722 if !(ptr.Op != OpSB) { 7723 break 7724 } 7725 v.reset(OpAMD64MOVSDloadidx1) 7726 v.AuxInt = off 7727 v.Aux = sym 7728 v.AddArg(ptr) 7729 v.AddArg(idx) 7730 v.AddArg(mem) 7731 return true 7732 } 7733 return false 7734 } 7735 func rewriteValueAMD64_OpAMD64MOVSDloadidx1(v *Value, config *Config) bool { 7736 b := v.Block 7737 _ = b 7738 // match: (MOVSDloadidx1 [c] {sym} ptr (SHLQconst [3] idx) mem) 7739 // cond: 7740 // result: (MOVSDloadidx8 [c] {sym} ptr idx mem) 7741 for { 7742 c := v.AuxInt 7743 sym := v.Aux 7744 ptr := v.Args[0] 7745 v_1 := v.Args[1] 7746 if v_1.Op != OpAMD64SHLQconst { 7747 break 7748 } 7749 if v_1.AuxInt != 3 { 7750 break 7751 } 7752 idx := v_1.Args[0] 7753 mem := v.Args[2] 7754 v.reset(OpAMD64MOVSDloadidx8) 7755 v.AuxInt = c 7756 v.Aux = sym 7757 v.AddArg(ptr) 7758 v.AddArg(idx) 7759 v.AddArg(mem) 7760 return true 7761 } 7762 // match: (MOVSDloadidx1 [c] {sym} (ADDQconst [d] ptr) idx mem) 7763 // cond: 7764 // result: (MOVSDloadidx1 [c+d] {sym} ptr idx mem) 7765 for { 7766 c := v.AuxInt 7767 sym := v.Aux 7768 v_0 := v.Args[0] 7769 if v_0.Op != OpAMD64ADDQconst { 7770 break 7771 } 7772 d := v_0.AuxInt 7773 ptr := v_0.Args[0] 7774 idx := v.Args[1] 7775 mem := v.Args[2] 7776 v.reset(OpAMD64MOVSDloadidx1) 7777 v.AuxInt = c + d 7778 v.Aux = sym 7779 v.AddArg(ptr) 7780 v.AddArg(idx) 7781 v.AddArg(mem) 7782 return true 7783 } 7784 // match: (MOVSDloadidx1 [c] {sym} ptr (ADDQconst [d] idx) mem) 7785 // cond: 7786 // result: (MOVSDloadidx1 [c+d] {sym} ptr idx mem) 7787 for { 7788 c := v.AuxInt 7789 sym := v.Aux 7790 ptr := v.Args[0] 7791 v_1 := v.Args[1] 7792 if v_1.Op != OpAMD64ADDQconst { 7793 break 7794 } 7795 d := v_1.AuxInt 7796 idx := v_1.Args[0] 7797 mem := v.Args[2] 7798 v.reset(OpAMD64MOVSDloadidx1) 7799 v.AuxInt = c + d 7800 v.Aux = sym 7801 v.AddArg(ptr) 7802 v.AddArg(idx) 7803 v.AddArg(mem) 7804 return true 7805 } 7806 return false 7807 } 7808 func rewriteValueAMD64_OpAMD64MOVSDloadidx8(v *Value, config *Config) bool { 7809 b := v.Block 7810 _ = b 7811 // match: (MOVSDloadidx8 [c] {sym} (ADDQconst [d] ptr) idx mem) 7812 // cond: 7813 // result: (MOVSDloadidx8 [c+d] {sym} ptr idx mem) 7814 for { 7815 c := v.AuxInt 7816 sym := v.Aux 7817 v_0 := v.Args[0] 7818 if v_0.Op != OpAMD64ADDQconst { 7819 break 7820 } 7821 d := v_0.AuxInt 7822 ptr := v_0.Args[0] 7823 idx := v.Args[1] 7824 mem := v.Args[2] 7825 v.reset(OpAMD64MOVSDloadidx8) 7826 v.AuxInt = c + d 7827 v.Aux = sym 7828 v.AddArg(ptr) 7829 v.AddArg(idx) 7830 v.AddArg(mem) 7831 return true 7832 } 7833 // match: (MOVSDloadidx8 [c] {sym} ptr (ADDQconst [d] idx) mem) 7834 // cond: 7835 // result: (MOVSDloadidx8 [c+8*d] {sym} ptr idx mem) 7836 for { 7837 c := v.AuxInt 7838 sym := v.Aux 7839 ptr := v.Args[0] 7840 v_1 := v.Args[1] 7841 if v_1.Op != OpAMD64ADDQconst { 7842 break 7843 } 7844 d := v_1.AuxInt 7845 idx := v_1.Args[0] 7846 mem := v.Args[2] 7847 v.reset(OpAMD64MOVSDloadidx8) 7848 v.AuxInt = c + 8*d 7849 v.Aux = sym 7850 v.AddArg(ptr) 7851 v.AddArg(idx) 7852 v.AddArg(mem) 7853 return true 7854 } 7855 return false 7856 } 7857 func rewriteValueAMD64_OpAMD64MOVSDstore(v *Value, config *Config) bool { 7858 b := v.Block 7859 _ = b 7860 // match: (MOVSDstore [off1] {sym} (ADDQconst [off2] ptr) val mem) 7861 // cond: is32Bit(off1+off2) 7862 // result: (MOVSDstore [off1+off2] {sym} ptr val mem) 7863 for { 7864 off1 := v.AuxInt 7865 sym := v.Aux 7866 v_0 := v.Args[0] 7867 if v_0.Op != OpAMD64ADDQconst { 7868 break 7869 } 7870 off2 := v_0.AuxInt 7871 ptr := v_0.Args[0] 7872 val := v.Args[1] 7873 mem := v.Args[2] 7874 if !(is32Bit(off1 + off2)) { 7875 break 7876 } 7877 v.reset(OpAMD64MOVSDstore) 7878 v.AuxInt = off1 + off2 7879 v.Aux = sym 7880 v.AddArg(ptr) 7881 v.AddArg(val) 7882 v.AddArg(mem) 7883 return true 7884 } 7885 // match: (MOVSDstore [off1] {sym1} (LEAQ [off2] {sym2} base) val mem) 7886 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 7887 // result: (MOVSDstore [off1+off2] {mergeSym(sym1,sym2)} base val mem) 7888 for { 7889 off1 := v.AuxInt 7890 sym1 := v.Aux 7891 v_0 := v.Args[0] 7892 if v_0.Op != OpAMD64LEAQ { 7893 break 7894 } 7895 off2 := v_0.AuxInt 7896 sym2 := v_0.Aux 7897 base := v_0.Args[0] 7898 val := v.Args[1] 7899 mem := v.Args[2] 7900 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 7901 break 7902 } 7903 v.reset(OpAMD64MOVSDstore) 7904 v.AuxInt = off1 + off2 7905 v.Aux = mergeSym(sym1, sym2) 7906 v.AddArg(base) 7907 v.AddArg(val) 7908 v.AddArg(mem) 7909 return true 7910 } 7911 // match: (MOVSDstore [off1] {sym1} (LEAQ1 [off2] {sym2} ptr idx) val mem) 7912 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 7913 // result: (MOVSDstoreidx1 [off1+off2] {mergeSym(sym1,sym2)} ptr idx val mem) 7914 for { 7915 off1 := v.AuxInt 7916 sym1 := v.Aux 7917 v_0 := v.Args[0] 7918 if v_0.Op != OpAMD64LEAQ1 { 7919 break 7920 } 7921 off2 := v_0.AuxInt 7922 sym2 := v_0.Aux 7923 ptr := v_0.Args[0] 7924 idx := v_0.Args[1] 7925 val := v.Args[1] 7926 mem := v.Args[2] 7927 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 7928 break 7929 } 7930 v.reset(OpAMD64MOVSDstoreidx1) 7931 v.AuxInt = off1 + off2 7932 v.Aux = mergeSym(sym1, sym2) 7933 v.AddArg(ptr) 7934 v.AddArg(idx) 7935 v.AddArg(val) 7936 v.AddArg(mem) 7937 return true 7938 } 7939 // match: (MOVSDstore [off1] {sym1} (LEAQ8 [off2] {sym2} ptr idx) val mem) 7940 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 7941 // result: (MOVSDstoreidx8 [off1+off2] {mergeSym(sym1,sym2)} ptr idx val mem) 7942 for { 7943 off1 := v.AuxInt 7944 sym1 := v.Aux 7945 v_0 := v.Args[0] 7946 if v_0.Op != OpAMD64LEAQ8 { 7947 break 7948 } 7949 off2 := v_0.AuxInt 7950 sym2 := v_0.Aux 7951 ptr := v_0.Args[0] 7952 idx := v_0.Args[1] 7953 val := v.Args[1] 7954 mem := v.Args[2] 7955 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 7956 break 7957 } 7958 v.reset(OpAMD64MOVSDstoreidx8) 7959 v.AuxInt = off1 + off2 7960 v.Aux = mergeSym(sym1, sym2) 7961 v.AddArg(ptr) 7962 v.AddArg(idx) 7963 v.AddArg(val) 7964 v.AddArg(mem) 7965 return true 7966 } 7967 // match: (MOVSDstore [off] {sym} (ADDQ ptr idx) val mem) 7968 // cond: ptr.Op != OpSB 7969 // result: (MOVSDstoreidx1 [off] {sym} ptr idx val mem) 7970 for { 7971 off := v.AuxInt 7972 sym := v.Aux 7973 v_0 := v.Args[0] 7974 if v_0.Op != OpAMD64ADDQ { 7975 break 7976 } 7977 ptr := v_0.Args[0] 7978 idx := v_0.Args[1] 7979 val := v.Args[1] 7980 mem := v.Args[2] 7981 if !(ptr.Op != OpSB) { 7982 break 7983 } 7984 v.reset(OpAMD64MOVSDstoreidx1) 7985 v.AuxInt = off 7986 v.Aux = sym 7987 v.AddArg(ptr) 7988 v.AddArg(idx) 7989 v.AddArg(val) 7990 v.AddArg(mem) 7991 return true 7992 } 7993 return false 7994 } 7995 func rewriteValueAMD64_OpAMD64MOVSDstoreidx1(v *Value, config *Config) bool { 7996 b := v.Block 7997 _ = b 7998 // match: (MOVSDstoreidx1 [c] {sym} ptr (SHLQconst [3] idx) val mem) 7999 // cond: 8000 // result: (MOVSDstoreidx8 [c] {sym} ptr idx val mem) 8001 for { 8002 c := v.AuxInt 8003 sym := v.Aux 8004 ptr := v.Args[0] 8005 v_1 := v.Args[1] 8006 if v_1.Op != OpAMD64SHLQconst { 8007 break 8008 } 8009 if v_1.AuxInt != 3 { 8010 break 8011 } 8012 idx := v_1.Args[0] 8013 val := v.Args[2] 8014 mem := v.Args[3] 8015 v.reset(OpAMD64MOVSDstoreidx8) 8016 v.AuxInt = c 8017 v.Aux = sym 8018 v.AddArg(ptr) 8019 v.AddArg(idx) 8020 v.AddArg(val) 8021 v.AddArg(mem) 8022 return true 8023 } 8024 // match: (MOVSDstoreidx1 [c] {sym} (ADDQconst [d] ptr) idx val mem) 8025 // cond: 8026 // result: (MOVSDstoreidx1 [c+d] {sym} ptr idx val mem) 8027 for { 8028 c := v.AuxInt 8029 sym := v.Aux 8030 v_0 := v.Args[0] 8031 if v_0.Op != OpAMD64ADDQconst { 8032 break 8033 } 8034 d := v_0.AuxInt 8035 ptr := v_0.Args[0] 8036 idx := v.Args[1] 8037 val := v.Args[2] 8038 mem := v.Args[3] 8039 v.reset(OpAMD64MOVSDstoreidx1) 8040 v.AuxInt = c + d 8041 v.Aux = sym 8042 v.AddArg(ptr) 8043 v.AddArg(idx) 8044 v.AddArg(val) 8045 v.AddArg(mem) 8046 return true 8047 } 8048 // match: (MOVSDstoreidx1 [c] {sym} ptr (ADDQconst [d] idx) val mem) 8049 // cond: 8050 // result: (MOVSDstoreidx1 [c+d] {sym} ptr idx val mem) 8051 for { 8052 c := v.AuxInt 8053 sym := v.Aux 8054 ptr := v.Args[0] 8055 v_1 := v.Args[1] 8056 if v_1.Op != OpAMD64ADDQconst { 8057 break 8058 } 8059 d := v_1.AuxInt 8060 idx := v_1.Args[0] 8061 val := v.Args[2] 8062 mem := v.Args[3] 8063 v.reset(OpAMD64MOVSDstoreidx1) 8064 v.AuxInt = c + d 8065 v.Aux = sym 8066 v.AddArg(ptr) 8067 v.AddArg(idx) 8068 v.AddArg(val) 8069 v.AddArg(mem) 8070 return true 8071 } 8072 return false 8073 } 8074 func rewriteValueAMD64_OpAMD64MOVSDstoreidx8(v *Value, config *Config) bool { 8075 b := v.Block 8076 _ = b 8077 // match: (MOVSDstoreidx8 [c] {sym} (ADDQconst [d] ptr) idx val mem) 8078 // cond: 8079 // result: (MOVSDstoreidx8 [c+d] {sym} ptr idx val mem) 8080 for { 8081 c := v.AuxInt 8082 sym := v.Aux 8083 v_0 := v.Args[0] 8084 if v_0.Op != OpAMD64ADDQconst { 8085 break 8086 } 8087 d := v_0.AuxInt 8088 ptr := v_0.Args[0] 8089 idx := v.Args[1] 8090 val := v.Args[2] 8091 mem := v.Args[3] 8092 v.reset(OpAMD64MOVSDstoreidx8) 8093 v.AuxInt = c + d 8094 v.Aux = sym 8095 v.AddArg(ptr) 8096 v.AddArg(idx) 8097 v.AddArg(val) 8098 v.AddArg(mem) 8099 return true 8100 } 8101 // match: (MOVSDstoreidx8 [c] {sym} ptr (ADDQconst [d] idx) val mem) 8102 // cond: 8103 // result: (MOVSDstoreidx8 [c+8*d] {sym} ptr idx val mem) 8104 for { 8105 c := v.AuxInt 8106 sym := v.Aux 8107 ptr := v.Args[0] 8108 v_1 := v.Args[1] 8109 if v_1.Op != OpAMD64ADDQconst { 8110 break 8111 } 8112 d := v_1.AuxInt 8113 idx := v_1.Args[0] 8114 val := v.Args[2] 8115 mem := v.Args[3] 8116 v.reset(OpAMD64MOVSDstoreidx8) 8117 v.AuxInt = c + 8*d 8118 v.Aux = sym 8119 v.AddArg(ptr) 8120 v.AddArg(idx) 8121 v.AddArg(val) 8122 v.AddArg(mem) 8123 return true 8124 } 8125 return false 8126 } 8127 func rewriteValueAMD64_OpAMD64MOVSSload(v *Value, config *Config) bool { 8128 b := v.Block 8129 _ = b 8130 // match: (MOVSSload [off1] {sym} (ADDQconst [off2] ptr) mem) 8131 // cond: is32Bit(off1+off2) 8132 // result: (MOVSSload [off1+off2] {sym} ptr mem) 8133 for { 8134 off1 := v.AuxInt 8135 sym := v.Aux 8136 v_0 := v.Args[0] 8137 if v_0.Op != OpAMD64ADDQconst { 8138 break 8139 } 8140 off2 := v_0.AuxInt 8141 ptr := v_0.Args[0] 8142 mem := v.Args[1] 8143 if !(is32Bit(off1 + off2)) { 8144 break 8145 } 8146 v.reset(OpAMD64MOVSSload) 8147 v.AuxInt = off1 + off2 8148 v.Aux = sym 8149 v.AddArg(ptr) 8150 v.AddArg(mem) 8151 return true 8152 } 8153 // match: (MOVSSload [off1] {sym1} (LEAQ [off2] {sym2} base) mem) 8154 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 8155 // result: (MOVSSload [off1+off2] {mergeSym(sym1,sym2)} base mem) 8156 for { 8157 off1 := v.AuxInt 8158 sym1 := v.Aux 8159 v_0 := v.Args[0] 8160 if v_0.Op != OpAMD64LEAQ { 8161 break 8162 } 8163 off2 := v_0.AuxInt 8164 sym2 := v_0.Aux 8165 base := v_0.Args[0] 8166 mem := v.Args[1] 8167 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 8168 break 8169 } 8170 v.reset(OpAMD64MOVSSload) 8171 v.AuxInt = off1 + off2 8172 v.Aux = mergeSym(sym1, sym2) 8173 v.AddArg(base) 8174 v.AddArg(mem) 8175 return true 8176 } 8177 // match: (MOVSSload [off1] {sym1} (LEAQ1 [off2] {sym2} ptr idx) mem) 8178 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 8179 // result: (MOVSSloadidx1 [off1+off2] {mergeSym(sym1,sym2)} ptr idx mem) 8180 for { 8181 off1 := v.AuxInt 8182 sym1 := v.Aux 8183 v_0 := v.Args[0] 8184 if v_0.Op != OpAMD64LEAQ1 { 8185 break 8186 } 8187 off2 := v_0.AuxInt 8188 sym2 := v_0.Aux 8189 ptr := v_0.Args[0] 8190 idx := v_0.Args[1] 8191 mem := v.Args[1] 8192 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 8193 break 8194 } 8195 v.reset(OpAMD64MOVSSloadidx1) 8196 v.AuxInt = off1 + off2 8197 v.Aux = mergeSym(sym1, sym2) 8198 v.AddArg(ptr) 8199 v.AddArg(idx) 8200 v.AddArg(mem) 8201 return true 8202 } 8203 // match: (MOVSSload [off1] {sym1} (LEAQ4 [off2] {sym2} ptr idx) mem) 8204 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 8205 // result: (MOVSSloadidx4 [off1+off2] {mergeSym(sym1,sym2)} ptr idx mem) 8206 for { 8207 off1 := v.AuxInt 8208 sym1 := v.Aux 8209 v_0 := v.Args[0] 8210 if v_0.Op != OpAMD64LEAQ4 { 8211 break 8212 } 8213 off2 := v_0.AuxInt 8214 sym2 := v_0.Aux 8215 ptr := v_0.Args[0] 8216 idx := v_0.Args[1] 8217 mem := v.Args[1] 8218 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 8219 break 8220 } 8221 v.reset(OpAMD64MOVSSloadidx4) 8222 v.AuxInt = off1 + off2 8223 v.Aux = mergeSym(sym1, sym2) 8224 v.AddArg(ptr) 8225 v.AddArg(idx) 8226 v.AddArg(mem) 8227 return true 8228 } 8229 // match: (MOVSSload [off] {sym} (ADDQ ptr idx) mem) 8230 // cond: ptr.Op != OpSB 8231 // result: (MOVSSloadidx1 [off] {sym} ptr idx mem) 8232 for { 8233 off := v.AuxInt 8234 sym := v.Aux 8235 v_0 := v.Args[0] 8236 if v_0.Op != OpAMD64ADDQ { 8237 break 8238 } 8239 ptr := v_0.Args[0] 8240 idx := v_0.Args[1] 8241 mem := v.Args[1] 8242 if !(ptr.Op != OpSB) { 8243 break 8244 } 8245 v.reset(OpAMD64MOVSSloadidx1) 8246 v.AuxInt = off 8247 v.Aux = sym 8248 v.AddArg(ptr) 8249 v.AddArg(idx) 8250 v.AddArg(mem) 8251 return true 8252 } 8253 return false 8254 } 8255 func rewriteValueAMD64_OpAMD64MOVSSloadidx1(v *Value, config *Config) bool { 8256 b := v.Block 8257 _ = b 8258 // match: (MOVSSloadidx1 [c] {sym} ptr (SHLQconst [2] idx) mem) 8259 // cond: 8260 // result: (MOVSSloadidx4 [c] {sym} ptr idx mem) 8261 for { 8262 c := v.AuxInt 8263 sym := v.Aux 8264 ptr := v.Args[0] 8265 v_1 := v.Args[1] 8266 if v_1.Op != OpAMD64SHLQconst { 8267 break 8268 } 8269 if v_1.AuxInt != 2 { 8270 break 8271 } 8272 idx := v_1.Args[0] 8273 mem := v.Args[2] 8274 v.reset(OpAMD64MOVSSloadidx4) 8275 v.AuxInt = c 8276 v.Aux = sym 8277 v.AddArg(ptr) 8278 v.AddArg(idx) 8279 v.AddArg(mem) 8280 return true 8281 } 8282 // match: (MOVSSloadidx1 [c] {sym} (ADDQconst [d] ptr) idx mem) 8283 // cond: 8284 // result: (MOVSSloadidx1 [c+d] {sym} ptr idx mem) 8285 for { 8286 c := v.AuxInt 8287 sym := v.Aux 8288 v_0 := v.Args[0] 8289 if v_0.Op != OpAMD64ADDQconst { 8290 break 8291 } 8292 d := v_0.AuxInt 8293 ptr := v_0.Args[0] 8294 idx := v.Args[1] 8295 mem := v.Args[2] 8296 v.reset(OpAMD64MOVSSloadidx1) 8297 v.AuxInt = c + d 8298 v.Aux = sym 8299 v.AddArg(ptr) 8300 v.AddArg(idx) 8301 v.AddArg(mem) 8302 return true 8303 } 8304 // match: (MOVSSloadidx1 [c] {sym} ptr (ADDQconst [d] idx) mem) 8305 // cond: 8306 // result: (MOVSSloadidx1 [c+d] {sym} ptr idx mem) 8307 for { 8308 c := v.AuxInt 8309 sym := v.Aux 8310 ptr := v.Args[0] 8311 v_1 := v.Args[1] 8312 if v_1.Op != OpAMD64ADDQconst { 8313 break 8314 } 8315 d := v_1.AuxInt 8316 idx := v_1.Args[0] 8317 mem := v.Args[2] 8318 v.reset(OpAMD64MOVSSloadidx1) 8319 v.AuxInt = c + d 8320 v.Aux = sym 8321 v.AddArg(ptr) 8322 v.AddArg(idx) 8323 v.AddArg(mem) 8324 return true 8325 } 8326 return false 8327 } 8328 func rewriteValueAMD64_OpAMD64MOVSSloadidx4(v *Value, config *Config) bool { 8329 b := v.Block 8330 _ = b 8331 // match: (MOVSSloadidx4 [c] {sym} (ADDQconst [d] ptr) idx mem) 8332 // cond: 8333 // result: (MOVSSloadidx4 [c+d] {sym} ptr idx mem) 8334 for { 8335 c := v.AuxInt 8336 sym := v.Aux 8337 v_0 := v.Args[0] 8338 if v_0.Op != OpAMD64ADDQconst { 8339 break 8340 } 8341 d := v_0.AuxInt 8342 ptr := v_0.Args[0] 8343 idx := v.Args[1] 8344 mem := v.Args[2] 8345 v.reset(OpAMD64MOVSSloadidx4) 8346 v.AuxInt = c + d 8347 v.Aux = sym 8348 v.AddArg(ptr) 8349 v.AddArg(idx) 8350 v.AddArg(mem) 8351 return true 8352 } 8353 // match: (MOVSSloadidx4 [c] {sym} ptr (ADDQconst [d] idx) mem) 8354 // cond: 8355 // result: (MOVSSloadidx4 [c+4*d] {sym} ptr idx mem) 8356 for { 8357 c := v.AuxInt 8358 sym := v.Aux 8359 ptr := v.Args[0] 8360 v_1 := v.Args[1] 8361 if v_1.Op != OpAMD64ADDQconst { 8362 break 8363 } 8364 d := v_1.AuxInt 8365 idx := v_1.Args[0] 8366 mem := v.Args[2] 8367 v.reset(OpAMD64MOVSSloadidx4) 8368 v.AuxInt = c + 4*d 8369 v.Aux = sym 8370 v.AddArg(ptr) 8371 v.AddArg(idx) 8372 v.AddArg(mem) 8373 return true 8374 } 8375 return false 8376 } 8377 func rewriteValueAMD64_OpAMD64MOVSSstore(v *Value, config *Config) bool { 8378 b := v.Block 8379 _ = b 8380 // match: (MOVSSstore [off1] {sym} (ADDQconst [off2] ptr) val mem) 8381 // cond: is32Bit(off1+off2) 8382 // result: (MOVSSstore [off1+off2] {sym} ptr val mem) 8383 for { 8384 off1 := v.AuxInt 8385 sym := v.Aux 8386 v_0 := v.Args[0] 8387 if v_0.Op != OpAMD64ADDQconst { 8388 break 8389 } 8390 off2 := v_0.AuxInt 8391 ptr := v_0.Args[0] 8392 val := v.Args[1] 8393 mem := v.Args[2] 8394 if !(is32Bit(off1 + off2)) { 8395 break 8396 } 8397 v.reset(OpAMD64MOVSSstore) 8398 v.AuxInt = off1 + off2 8399 v.Aux = sym 8400 v.AddArg(ptr) 8401 v.AddArg(val) 8402 v.AddArg(mem) 8403 return true 8404 } 8405 // match: (MOVSSstore [off1] {sym1} (LEAQ [off2] {sym2} base) val mem) 8406 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 8407 // result: (MOVSSstore [off1+off2] {mergeSym(sym1,sym2)} base val mem) 8408 for { 8409 off1 := v.AuxInt 8410 sym1 := v.Aux 8411 v_0 := v.Args[0] 8412 if v_0.Op != OpAMD64LEAQ { 8413 break 8414 } 8415 off2 := v_0.AuxInt 8416 sym2 := v_0.Aux 8417 base := v_0.Args[0] 8418 val := v.Args[1] 8419 mem := v.Args[2] 8420 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 8421 break 8422 } 8423 v.reset(OpAMD64MOVSSstore) 8424 v.AuxInt = off1 + off2 8425 v.Aux = mergeSym(sym1, sym2) 8426 v.AddArg(base) 8427 v.AddArg(val) 8428 v.AddArg(mem) 8429 return true 8430 } 8431 // match: (MOVSSstore [off1] {sym1} (LEAQ1 [off2] {sym2} ptr idx) val mem) 8432 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 8433 // result: (MOVSSstoreidx1 [off1+off2] {mergeSym(sym1,sym2)} ptr idx val mem) 8434 for { 8435 off1 := v.AuxInt 8436 sym1 := v.Aux 8437 v_0 := v.Args[0] 8438 if v_0.Op != OpAMD64LEAQ1 { 8439 break 8440 } 8441 off2 := v_0.AuxInt 8442 sym2 := v_0.Aux 8443 ptr := v_0.Args[0] 8444 idx := v_0.Args[1] 8445 val := v.Args[1] 8446 mem := v.Args[2] 8447 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 8448 break 8449 } 8450 v.reset(OpAMD64MOVSSstoreidx1) 8451 v.AuxInt = off1 + off2 8452 v.Aux = mergeSym(sym1, sym2) 8453 v.AddArg(ptr) 8454 v.AddArg(idx) 8455 v.AddArg(val) 8456 v.AddArg(mem) 8457 return true 8458 } 8459 // match: (MOVSSstore [off1] {sym1} (LEAQ4 [off2] {sym2} ptr idx) val mem) 8460 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 8461 // result: (MOVSSstoreidx4 [off1+off2] {mergeSym(sym1,sym2)} ptr idx val mem) 8462 for { 8463 off1 := v.AuxInt 8464 sym1 := v.Aux 8465 v_0 := v.Args[0] 8466 if v_0.Op != OpAMD64LEAQ4 { 8467 break 8468 } 8469 off2 := v_0.AuxInt 8470 sym2 := v_0.Aux 8471 ptr := v_0.Args[0] 8472 idx := v_0.Args[1] 8473 val := v.Args[1] 8474 mem := v.Args[2] 8475 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 8476 break 8477 } 8478 v.reset(OpAMD64MOVSSstoreidx4) 8479 v.AuxInt = off1 + off2 8480 v.Aux = mergeSym(sym1, sym2) 8481 v.AddArg(ptr) 8482 v.AddArg(idx) 8483 v.AddArg(val) 8484 v.AddArg(mem) 8485 return true 8486 } 8487 // match: (MOVSSstore [off] {sym} (ADDQ ptr idx) val mem) 8488 // cond: ptr.Op != OpSB 8489 // result: (MOVSSstoreidx1 [off] {sym} ptr idx val mem) 8490 for { 8491 off := v.AuxInt 8492 sym := v.Aux 8493 v_0 := v.Args[0] 8494 if v_0.Op != OpAMD64ADDQ { 8495 break 8496 } 8497 ptr := v_0.Args[0] 8498 idx := v_0.Args[1] 8499 val := v.Args[1] 8500 mem := v.Args[2] 8501 if !(ptr.Op != OpSB) { 8502 break 8503 } 8504 v.reset(OpAMD64MOVSSstoreidx1) 8505 v.AuxInt = off 8506 v.Aux = sym 8507 v.AddArg(ptr) 8508 v.AddArg(idx) 8509 v.AddArg(val) 8510 v.AddArg(mem) 8511 return true 8512 } 8513 return false 8514 } 8515 func rewriteValueAMD64_OpAMD64MOVSSstoreidx1(v *Value, config *Config) bool { 8516 b := v.Block 8517 _ = b 8518 // match: (MOVSSstoreidx1 [c] {sym} ptr (SHLQconst [2] idx) val mem) 8519 // cond: 8520 // result: (MOVSSstoreidx4 [c] {sym} ptr idx val mem) 8521 for { 8522 c := v.AuxInt 8523 sym := v.Aux 8524 ptr := v.Args[0] 8525 v_1 := v.Args[1] 8526 if v_1.Op != OpAMD64SHLQconst { 8527 break 8528 } 8529 if v_1.AuxInt != 2 { 8530 break 8531 } 8532 idx := v_1.Args[0] 8533 val := v.Args[2] 8534 mem := v.Args[3] 8535 v.reset(OpAMD64MOVSSstoreidx4) 8536 v.AuxInt = c 8537 v.Aux = sym 8538 v.AddArg(ptr) 8539 v.AddArg(idx) 8540 v.AddArg(val) 8541 v.AddArg(mem) 8542 return true 8543 } 8544 // match: (MOVSSstoreidx1 [c] {sym} (ADDQconst [d] ptr) idx val mem) 8545 // cond: 8546 // result: (MOVSSstoreidx1 [c+d] {sym} ptr idx val mem) 8547 for { 8548 c := v.AuxInt 8549 sym := v.Aux 8550 v_0 := v.Args[0] 8551 if v_0.Op != OpAMD64ADDQconst { 8552 break 8553 } 8554 d := v_0.AuxInt 8555 ptr := v_0.Args[0] 8556 idx := v.Args[1] 8557 val := v.Args[2] 8558 mem := v.Args[3] 8559 v.reset(OpAMD64MOVSSstoreidx1) 8560 v.AuxInt = c + d 8561 v.Aux = sym 8562 v.AddArg(ptr) 8563 v.AddArg(idx) 8564 v.AddArg(val) 8565 v.AddArg(mem) 8566 return true 8567 } 8568 // match: (MOVSSstoreidx1 [c] {sym} ptr (ADDQconst [d] idx) val mem) 8569 // cond: 8570 // result: (MOVSSstoreidx1 [c+d] {sym} ptr idx val mem) 8571 for { 8572 c := v.AuxInt 8573 sym := v.Aux 8574 ptr := v.Args[0] 8575 v_1 := v.Args[1] 8576 if v_1.Op != OpAMD64ADDQconst { 8577 break 8578 } 8579 d := v_1.AuxInt 8580 idx := v_1.Args[0] 8581 val := v.Args[2] 8582 mem := v.Args[3] 8583 v.reset(OpAMD64MOVSSstoreidx1) 8584 v.AuxInt = c + d 8585 v.Aux = sym 8586 v.AddArg(ptr) 8587 v.AddArg(idx) 8588 v.AddArg(val) 8589 v.AddArg(mem) 8590 return true 8591 } 8592 return false 8593 } 8594 func rewriteValueAMD64_OpAMD64MOVSSstoreidx4(v *Value, config *Config) bool { 8595 b := v.Block 8596 _ = b 8597 // match: (MOVSSstoreidx4 [c] {sym} (ADDQconst [d] ptr) idx val mem) 8598 // cond: 8599 // result: (MOVSSstoreidx4 [c+d] {sym} ptr idx val mem) 8600 for { 8601 c := v.AuxInt 8602 sym := v.Aux 8603 v_0 := v.Args[0] 8604 if v_0.Op != OpAMD64ADDQconst { 8605 break 8606 } 8607 d := v_0.AuxInt 8608 ptr := v_0.Args[0] 8609 idx := v.Args[1] 8610 val := v.Args[2] 8611 mem := v.Args[3] 8612 v.reset(OpAMD64MOVSSstoreidx4) 8613 v.AuxInt = c + d 8614 v.Aux = sym 8615 v.AddArg(ptr) 8616 v.AddArg(idx) 8617 v.AddArg(val) 8618 v.AddArg(mem) 8619 return true 8620 } 8621 // match: (MOVSSstoreidx4 [c] {sym} ptr (ADDQconst [d] idx) val mem) 8622 // cond: 8623 // result: (MOVSSstoreidx4 [c+4*d] {sym} ptr idx val mem) 8624 for { 8625 c := v.AuxInt 8626 sym := v.Aux 8627 ptr := v.Args[0] 8628 v_1 := v.Args[1] 8629 if v_1.Op != OpAMD64ADDQconst { 8630 break 8631 } 8632 d := v_1.AuxInt 8633 idx := v_1.Args[0] 8634 val := v.Args[2] 8635 mem := v.Args[3] 8636 v.reset(OpAMD64MOVSSstoreidx4) 8637 v.AuxInt = c + 4*d 8638 v.Aux = sym 8639 v.AddArg(ptr) 8640 v.AddArg(idx) 8641 v.AddArg(val) 8642 v.AddArg(mem) 8643 return true 8644 } 8645 return false 8646 } 8647 func rewriteValueAMD64_OpAMD64MOVWQSX(v *Value, config *Config) bool { 8648 b := v.Block 8649 _ = b 8650 // match: (MOVWQSX x:(MOVWload [off] {sym} ptr mem)) 8651 // cond: x.Uses == 1 && clobber(x) 8652 // result: @x.Block (MOVWQSXload <v.Type> [off] {sym} ptr mem) 8653 for { 8654 x := v.Args[0] 8655 if x.Op != OpAMD64MOVWload { 8656 break 8657 } 8658 off := x.AuxInt 8659 sym := x.Aux 8660 ptr := x.Args[0] 8661 mem := x.Args[1] 8662 if !(x.Uses == 1 && clobber(x)) { 8663 break 8664 } 8665 b = x.Block 8666 v0 := b.NewValue0(v.Line, OpAMD64MOVWQSXload, v.Type) 8667 v.reset(OpCopy) 8668 v.AddArg(v0) 8669 v0.AuxInt = off 8670 v0.Aux = sym 8671 v0.AddArg(ptr) 8672 v0.AddArg(mem) 8673 return true 8674 } 8675 // match: (MOVWQSX x:(MOVLload [off] {sym} ptr mem)) 8676 // cond: x.Uses == 1 && clobber(x) 8677 // result: @x.Block (MOVWQSXload <v.Type> [off] {sym} ptr mem) 8678 for { 8679 x := v.Args[0] 8680 if x.Op != OpAMD64MOVLload { 8681 break 8682 } 8683 off := x.AuxInt 8684 sym := x.Aux 8685 ptr := x.Args[0] 8686 mem := x.Args[1] 8687 if !(x.Uses == 1 && clobber(x)) { 8688 break 8689 } 8690 b = x.Block 8691 v0 := b.NewValue0(v.Line, OpAMD64MOVWQSXload, v.Type) 8692 v.reset(OpCopy) 8693 v.AddArg(v0) 8694 v0.AuxInt = off 8695 v0.Aux = sym 8696 v0.AddArg(ptr) 8697 v0.AddArg(mem) 8698 return true 8699 } 8700 // match: (MOVWQSX x:(MOVQload [off] {sym} ptr mem)) 8701 // cond: x.Uses == 1 && clobber(x) 8702 // result: @x.Block (MOVWQSXload <v.Type> [off] {sym} ptr mem) 8703 for { 8704 x := v.Args[0] 8705 if x.Op != OpAMD64MOVQload { 8706 break 8707 } 8708 off := x.AuxInt 8709 sym := x.Aux 8710 ptr := x.Args[0] 8711 mem := x.Args[1] 8712 if !(x.Uses == 1 && clobber(x)) { 8713 break 8714 } 8715 b = x.Block 8716 v0 := b.NewValue0(v.Line, OpAMD64MOVWQSXload, v.Type) 8717 v.reset(OpCopy) 8718 v.AddArg(v0) 8719 v0.AuxInt = off 8720 v0.Aux = sym 8721 v0.AddArg(ptr) 8722 v0.AddArg(mem) 8723 return true 8724 } 8725 // match: (MOVWQSX (ANDLconst [c] x)) 8726 // cond: c & 0x8000 == 0 8727 // result: (ANDLconst [c & 0x7fff] x) 8728 for { 8729 v_0 := v.Args[0] 8730 if v_0.Op != OpAMD64ANDLconst { 8731 break 8732 } 8733 c := v_0.AuxInt 8734 x := v_0.Args[0] 8735 if !(c&0x8000 == 0) { 8736 break 8737 } 8738 v.reset(OpAMD64ANDLconst) 8739 v.AuxInt = c & 0x7fff 8740 v.AddArg(x) 8741 return true 8742 } 8743 return false 8744 } 8745 func rewriteValueAMD64_OpAMD64MOVWQSXload(v *Value, config *Config) bool { 8746 b := v.Block 8747 _ = b 8748 // match: (MOVWQSXload [off] {sym} ptr (MOVWstore [off2] {sym2} ptr2 x _)) 8749 // cond: sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) 8750 // result: (MOVWQSX x) 8751 for { 8752 off := v.AuxInt 8753 sym := v.Aux 8754 ptr := v.Args[0] 8755 v_1 := v.Args[1] 8756 if v_1.Op != OpAMD64MOVWstore { 8757 break 8758 } 8759 off2 := v_1.AuxInt 8760 sym2 := v_1.Aux 8761 ptr2 := v_1.Args[0] 8762 x := v_1.Args[1] 8763 if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) { 8764 break 8765 } 8766 v.reset(OpAMD64MOVWQSX) 8767 v.AddArg(x) 8768 return true 8769 } 8770 // match: (MOVWQSXload [off1] {sym1} (LEAQ [off2] {sym2} base) mem) 8771 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 8772 // result: (MOVWQSXload [off1+off2] {mergeSym(sym1,sym2)} base mem) 8773 for { 8774 off1 := v.AuxInt 8775 sym1 := v.Aux 8776 v_0 := v.Args[0] 8777 if v_0.Op != OpAMD64LEAQ { 8778 break 8779 } 8780 off2 := v_0.AuxInt 8781 sym2 := v_0.Aux 8782 base := v_0.Args[0] 8783 mem := v.Args[1] 8784 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 8785 break 8786 } 8787 v.reset(OpAMD64MOVWQSXload) 8788 v.AuxInt = off1 + off2 8789 v.Aux = mergeSym(sym1, sym2) 8790 v.AddArg(base) 8791 v.AddArg(mem) 8792 return true 8793 } 8794 return false 8795 } 8796 func rewriteValueAMD64_OpAMD64MOVWQZX(v *Value, config *Config) bool { 8797 b := v.Block 8798 _ = b 8799 // match: (MOVWQZX x:(MOVWload [off] {sym} ptr mem)) 8800 // cond: x.Uses == 1 && clobber(x) 8801 // result: @x.Block (MOVWload <v.Type> [off] {sym} ptr mem) 8802 for { 8803 x := v.Args[0] 8804 if x.Op != OpAMD64MOVWload { 8805 break 8806 } 8807 off := x.AuxInt 8808 sym := x.Aux 8809 ptr := x.Args[0] 8810 mem := x.Args[1] 8811 if !(x.Uses == 1 && clobber(x)) { 8812 break 8813 } 8814 b = x.Block 8815 v0 := b.NewValue0(v.Line, OpAMD64MOVWload, v.Type) 8816 v.reset(OpCopy) 8817 v.AddArg(v0) 8818 v0.AuxInt = off 8819 v0.Aux = sym 8820 v0.AddArg(ptr) 8821 v0.AddArg(mem) 8822 return true 8823 } 8824 // match: (MOVWQZX x:(MOVLload [off] {sym} ptr mem)) 8825 // cond: x.Uses == 1 && clobber(x) 8826 // result: @x.Block (MOVWload <v.Type> [off] {sym} ptr mem) 8827 for { 8828 x := v.Args[0] 8829 if x.Op != OpAMD64MOVLload { 8830 break 8831 } 8832 off := x.AuxInt 8833 sym := x.Aux 8834 ptr := x.Args[0] 8835 mem := x.Args[1] 8836 if !(x.Uses == 1 && clobber(x)) { 8837 break 8838 } 8839 b = x.Block 8840 v0 := b.NewValue0(v.Line, OpAMD64MOVWload, v.Type) 8841 v.reset(OpCopy) 8842 v.AddArg(v0) 8843 v0.AuxInt = off 8844 v0.Aux = sym 8845 v0.AddArg(ptr) 8846 v0.AddArg(mem) 8847 return true 8848 } 8849 // match: (MOVWQZX x:(MOVQload [off] {sym} ptr mem)) 8850 // cond: x.Uses == 1 && clobber(x) 8851 // result: @x.Block (MOVWload <v.Type> [off] {sym} ptr mem) 8852 for { 8853 x := v.Args[0] 8854 if x.Op != OpAMD64MOVQload { 8855 break 8856 } 8857 off := x.AuxInt 8858 sym := x.Aux 8859 ptr := x.Args[0] 8860 mem := x.Args[1] 8861 if !(x.Uses == 1 && clobber(x)) { 8862 break 8863 } 8864 b = x.Block 8865 v0 := b.NewValue0(v.Line, OpAMD64MOVWload, v.Type) 8866 v.reset(OpCopy) 8867 v.AddArg(v0) 8868 v0.AuxInt = off 8869 v0.Aux = sym 8870 v0.AddArg(ptr) 8871 v0.AddArg(mem) 8872 return true 8873 } 8874 // match: (MOVWQZX x:(MOVWloadidx1 [off] {sym} ptr idx mem)) 8875 // cond: x.Uses == 1 && clobber(x) 8876 // result: @x.Block (MOVWloadidx1 <v.Type> [off] {sym} ptr idx mem) 8877 for { 8878 x := v.Args[0] 8879 if x.Op != OpAMD64MOVWloadidx1 { 8880 break 8881 } 8882 off := x.AuxInt 8883 sym := x.Aux 8884 ptr := x.Args[0] 8885 idx := x.Args[1] 8886 mem := x.Args[2] 8887 if !(x.Uses == 1 && clobber(x)) { 8888 break 8889 } 8890 b = x.Block 8891 v0 := b.NewValue0(v.Line, OpAMD64MOVWloadidx1, v.Type) 8892 v.reset(OpCopy) 8893 v.AddArg(v0) 8894 v0.AuxInt = off 8895 v0.Aux = sym 8896 v0.AddArg(ptr) 8897 v0.AddArg(idx) 8898 v0.AddArg(mem) 8899 return true 8900 } 8901 // match: (MOVWQZX x:(MOVWloadidx2 [off] {sym} ptr idx mem)) 8902 // cond: x.Uses == 1 && clobber(x) 8903 // result: @x.Block (MOVWloadidx2 <v.Type> [off] {sym} ptr idx mem) 8904 for { 8905 x := v.Args[0] 8906 if x.Op != OpAMD64MOVWloadidx2 { 8907 break 8908 } 8909 off := x.AuxInt 8910 sym := x.Aux 8911 ptr := x.Args[0] 8912 idx := x.Args[1] 8913 mem := x.Args[2] 8914 if !(x.Uses == 1 && clobber(x)) { 8915 break 8916 } 8917 b = x.Block 8918 v0 := b.NewValue0(v.Line, OpAMD64MOVWloadidx2, v.Type) 8919 v.reset(OpCopy) 8920 v.AddArg(v0) 8921 v0.AuxInt = off 8922 v0.Aux = sym 8923 v0.AddArg(ptr) 8924 v0.AddArg(idx) 8925 v0.AddArg(mem) 8926 return true 8927 } 8928 // match: (MOVWQZX (ANDLconst [c] x)) 8929 // cond: 8930 // result: (ANDLconst [c & 0xffff] x) 8931 for { 8932 v_0 := v.Args[0] 8933 if v_0.Op != OpAMD64ANDLconst { 8934 break 8935 } 8936 c := v_0.AuxInt 8937 x := v_0.Args[0] 8938 v.reset(OpAMD64ANDLconst) 8939 v.AuxInt = c & 0xffff 8940 v.AddArg(x) 8941 return true 8942 } 8943 return false 8944 } 8945 func rewriteValueAMD64_OpAMD64MOVWload(v *Value, config *Config) bool { 8946 b := v.Block 8947 _ = b 8948 // match: (MOVWload [off] {sym} ptr (MOVWstore [off2] {sym2} ptr2 x _)) 8949 // cond: sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) 8950 // result: (MOVWQZX x) 8951 for { 8952 off := v.AuxInt 8953 sym := v.Aux 8954 ptr := v.Args[0] 8955 v_1 := v.Args[1] 8956 if v_1.Op != OpAMD64MOVWstore { 8957 break 8958 } 8959 off2 := v_1.AuxInt 8960 sym2 := v_1.Aux 8961 ptr2 := v_1.Args[0] 8962 x := v_1.Args[1] 8963 if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) { 8964 break 8965 } 8966 v.reset(OpAMD64MOVWQZX) 8967 v.AddArg(x) 8968 return true 8969 } 8970 // match: (MOVWload [off1] {sym} (ADDQconst [off2] ptr) mem) 8971 // cond: is32Bit(off1+off2) 8972 // result: (MOVWload [off1+off2] {sym} ptr mem) 8973 for { 8974 off1 := v.AuxInt 8975 sym := v.Aux 8976 v_0 := v.Args[0] 8977 if v_0.Op != OpAMD64ADDQconst { 8978 break 8979 } 8980 off2 := v_0.AuxInt 8981 ptr := v_0.Args[0] 8982 mem := v.Args[1] 8983 if !(is32Bit(off1 + off2)) { 8984 break 8985 } 8986 v.reset(OpAMD64MOVWload) 8987 v.AuxInt = off1 + off2 8988 v.Aux = sym 8989 v.AddArg(ptr) 8990 v.AddArg(mem) 8991 return true 8992 } 8993 // match: (MOVWload [off1] {sym1} (LEAQ [off2] {sym2} base) mem) 8994 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 8995 // result: (MOVWload [off1+off2] {mergeSym(sym1,sym2)} base mem) 8996 for { 8997 off1 := v.AuxInt 8998 sym1 := v.Aux 8999 v_0 := v.Args[0] 9000 if v_0.Op != OpAMD64LEAQ { 9001 break 9002 } 9003 off2 := v_0.AuxInt 9004 sym2 := v_0.Aux 9005 base := v_0.Args[0] 9006 mem := v.Args[1] 9007 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 9008 break 9009 } 9010 v.reset(OpAMD64MOVWload) 9011 v.AuxInt = off1 + off2 9012 v.Aux = mergeSym(sym1, sym2) 9013 v.AddArg(base) 9014 v.AddArg(mem) 9015 return true 9016 } 9017 // match: (MOVWload [off1] {sym1} (LEAQ1 [off2] {sym2} ptr idx) mem) 9018 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 9019 // result: (MOVWloadidx1 [off1+off2] {mergeSym(sym1,sym2)} ptr idx mem) 9020 for { 9021 off1 := v.AuxInt 9022 sym1 := v.Aux 9023 v_0 := v.Args[0] 9024 if v_0.Op != OpAMD64LEAQ1 { 9025 break 9026 } 9027 off2 := v_0.AuxInt 9028 sym2 := v_0.Aux 9029 ptr := v_0.Args[0] 9030 idx := v_0.Args[1] 9031 mem := v.Args[1] 9032 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 9033 break 9034 } 9035 v.reset(OpAMD64MOVWloadidx1) 9036 v.AuxInt = off1 + off2 9037 v.Aux = mergeSym(sym1, sym2) 9038 v.AddArg(ptr) 9039 v.AddArg(idx) 9040 v.AddArg(mem) 9041 return true 9042 } 9043 // match: (MOVWload [off1] {sym1} (LEAQ2 [off2] {sym2} ptr idx) mem) 9044 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 9045 // result: (MOVWloadidx2 [off1+off2] {mergeSym(sym1,sym2)} ptr idx mem) 9046 for { 9047 off1 := v.AuxInt 9048 sym1 := v.Aux 9049 v_0 := v.Args[0] 9050 if v_0.Op != OpAMD64LEAQ2 { 9051 break 9052 } 9053 off2 := v_0.AuxInt 9054 sym2 := v_0.Aux 9055 ptr := v_0.Args[0] 9056 idx := v_0.Args[1] 9057 mem := v.Args[1] 9058 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 9059 break 9060 } 9061 v.reset(OpAMD64MOVWloadidx2) 9062 v.AuxInt = off1 + off2 9063 v.Aux = mergeSym(sym1, sym2) 9064 v.AddArg(ptr) 9065 v.AddArg(idx) 9066 v.AddArg(mem) 9067 return true 9068 } 9069 // match: (MOVWload [off] {sym} (ADDQ ptr idx) mem) 9070 // cond: ptr.Op != OpSB 9071 // result: (MOVWloadidx1 [off] {sym} ptr idx mem) 9072 for { 9073 off := v.AuxInt 9074 sym := v.Aux 9075 v_0 := v.Args[0] 9076 if v_0.Op != OpAMD64ADDQ { 9077 break 9078 } 9079 ptr := v_0.Args[0] 9080 idx := v_0.Args[1] 9081 mem := v.Args[1] 9082 if !(ptr.Op != OpSB) { 9083 break 9084 } 9085 v.reset(OpAMD64MOVWloadidx1) 9086 v.AuxInt = off 9087 v.Aux = sym 9088 v.AddArg(ptr) 9089 v.AddArg(idx) 9090 v.AddArg(mem) 9091 return true 9092 } 9093 // match: (MOVWload [off1] {sym1} (LEAL [off2] {sym2} base) mem) 9094 // cond: canMergeSym(sym1, sym2) 9095 // result: (MOVWload [off1+off2] {mergeSym(sym1,sym2)} base mem) 9096 for { 9097 off1 := v.AuxInt 9098 sym1 := v.Aux 9099 v_0 := v.Args[0] 9100 if v_0.Op != OpAMD64LEAL { 9101 break 9102 } 9103 off2 := v_0.AuxInt 9104 sym2 := v_0.Aux 9105 base := v_0.Args[0] 9106 mem := v.Args[1] 9107 if !(canMergeSym(sym1, sym2)) { 9108 break 9109 } 9110 v.reset(OpAMD64MOVWload) 9111 v.AuxInt = off1 + off2 9112 v.Aux = mergeSym(sym1, sym2) 9113 v.AddArg(base) 9114 v.AddArg(mem) 9115 return true 9116 } 9117 // match: (MOVWload [off1] {sym} (ADDLconst [off2] ptr) mem) 9118 // cond: is32Bit(off1+off2) 9119 // result: (MOVWload [off1+off2] {sym} ptr mem) 9120 for { 9121 off1 := v.AuxInt 9122 sym := v.Aux 9123 v_0 := v.Args[0] 9124 if v_0.Op != OpAMD64ADDLconst { 9125 break 9126 } 9127 off2 := v_0.AuxInt 9128 ptr := v_0.Args[0] 9129 mem := v.Args[1] 9130 if !(is32Bit(off1 + off2)) { 9131 break 9132 } 9133 v.reset(OpAMD64MOVWload) 9134 v.AuxInt = off1 + off2 9135 v.Aux = sym 9136 v.AddArg(ptr) 9137 v.AddArg(mem) 9138 return true 9139 } 9140 return false 9141 } 9142 func rewriteValueAMD64_OpAMD64MOVWloadidx1(v *Value, config *Config) bool { 9143 b := v.Block 9144 _ = b 9145 // match: (MOVWloadidx1 [c] {sym} ptr (SHLQconst [1] idx) mem) 9146 // cond: 9147 // result: (MOVWloadidx2 [c] {sym} ptr idx mem) 9148 for { 9149 c := v.AuxInt 9150 sym := v.Aux 9151 ptr := v.Args[0] 9152 v_1 := v.Args[1] 9153 if v_1.Op != OpAMD64SHLQconst { 9154 break 9155 } 9156 if v_1.AuxInt != 1 { 9157 break 9158 } 9159 idx := v_1.Args[0] 9160 mem := v.Args[2] 9161 v.reset(OpAMD64MOVWloadidx2) 9162 v.AuxInt = c 9163 v.Aux = sym 9164 v.AddArg(ptr) 9165 v.AddArg(idx) 9166 v.AddArg(mem) 9167 return true 9168 } 9169 // match: (MOVWloadidx1 [c] {sym} (ADDQconst [d] ptr) idx mem) 9170 // cond: 9171 // result: (MOVWloadidx1 [c+d] {sym} ptr idx mem) 9172 for { 9173 c := v.AuxInt 9174 sym := v.Aux 9175 v_0 := v.Args[0] 9176 if v_0.Op != OpAMD64ADDQconst { 9177 break 9178 } 9179 d := v_0.AuxInt 9180 ptr := v_0.Args[0] 9181 idx := v.Args[1] 9182 mem := v.Args[2] 9183 v.reset(OpAMD64MOVWloadidx1) 9184 v.AuxInt = c + d 9185 v.Aux = sym 9186 v.AddArg(ptr) 9187 v.AddArg(idx) 9188 v.AddArg(mem) 9189 return true 9190 } 9191 // match: (MOVWloadidx1 [c] {sym} ptr (ADDQconst [d] idx) mem) 9192 // cond: 9193 // result: (MOVWloadidx1 [c+d] {sym} ptr idx mem) 9194 for { 9195 c := v.AuxInt 9196 sym := v.Aux 9197 ptr := v.Args[0] 9198 v_1 := v.Args[1] 9199 if v_1.Op != OpAMD64ADDQconst { 9200 break 9201 } 9202 d := v_1.AuxInt 9203 idx := v_1.Args[0] 9204 mem := v.Args[2] 9205 v.reset(OpAMD64MOVWloadidx1) 9206 v.AuxInt = c + d 9207 v.Aux = sym 9208 v.AddArg(ptr) 9209 v.AddArg(idx) 9210 v.AddArg(mem) 9211 return true 9212 } 9213 return false 9214 } 9215 func rewriteValueAMD64_OpAMD64MOVWloadidx2(v *Value, config *Config) bool { 9216 b := v.Block 9217 _ = b 9218 // match: (MOVWloadidx2 [c] {sym} (ADDQconst [d] ptr) idx mem) 9219 // cond: 9220 // result: (MOVWloadidx2 [c+d] {sym} ptr idx mem) 9221 for { 9222 c := v.AuxInt 9223 sym := v.Aux 9224 v_0 := v.Args[0] 9225 if v_0.Op != OpAMD64ADDQconst { 9226 break 9227 } 9228 d := v_0.AuxInt 9229 ptr := v_0.Args[0] 9230 idx := v.Args[1] 9231 mem := v.Args[2] 9232 v.reset(OpAMD64MOVWloadidx2) 9233 v.AuxInt = c + d 9234 v.Aux = sym 9235 v.AddArg(ptr) 9236 v.AddArg(idx) 9237 v.AddArg(mem) 9238 return true 9239 } 9240 // match: (MOVWloadidx2 [c] {sym} ptr (ADDQconst [d] idx) mem) 9241 // cond: 9242 // result: (MOVWloadidx2 [c+2*d] {sym} ptr idx mem) 9243 for { 9244 c := v.AuxInt 9245 sym := v.Aux 9246 ptr := v.Args[0] 9247 v_1 := v.Args[1] 9248 if v_1.Op != OpAMD64ADDQconst { 9249 break 9250 } 9251 d := v_1.AuxInt 9252 idx := v_1.Args[0] 9253 mem := v.Args[2] 9254 v.reset(OpAMD64MOVWloadidx2) 9255 v.AuxInt = c + 2*d 9256 v.Aux = sym 9257 v.AddArg(ptr) 9258 v.AddArg(idx) 9259 v.AddArg(mem) 9260 return true 9261 } 9262 return false 9263 } 9264 func rewriteValueAMD64_OpAMD64MOVWstore(v *Value, config *Config) bool { 9265 b := v.Block 9266 _ = b 9267 // match: (MOVWstore [off] {sym} ptr (MOVWQSX x) mem) 9268 // cond: 9269 // result: (MOVWstore [off] {sym} ptr x mem) 9270 for { 9271 off := v.AuxInt 9272 sym := v.Aux 9273 ptr := v.Args[0] 9274 v_1 := v.Args[1] 9275 if v_1.Op != OpAMD64MOVWQSX { 9276 break 9277 } 9278 x := v_1.Args[0] 9279 mem := v.Args[2] 9280 v.reset(OpAMD64MOVWstore) 9281 v.AuxInt = off 9282 v.Aux = sym 9283 v.AddArg(ptr) 9284 v.AddArg(x) 9285 v.AddArg(mem) 9286 return true 9287 } 9288 // match: (MOVWstore [off] {sym} ptr (MOVWQZX x) mem) 9289 // cond: 9290 // result: (MOVWstore [off] {sym} ptr x mem) 9291 for { 9292 off := v.AuxInt 9293 sym := v.Aux 9294 ptr := v.Args[0] 9295 v_1 := v.Args[1] 9296 if v_1.Op != OpAMD64MOVWQZX { 9297 break 9298 } 9299 x := v_1.Args[0] 9300 mem := v.Args[2] 9301 v.reset(OpAMD64MOVWstore) 9302 v.AuxInt = off 9303 v.Aux = sym 9304 v.AddArg(ptr) 9305 v.AddArg(x) 9306 v.AddArg(mem) 9307 return true 9308 } 9309 // match: (MOVWstore [off1] {sym} (ADDQconst [off2] ptr) val mem) 9310 // cond: is32Bit(off1+off2) 9311 // result: (MOVWstore [off1+off2] {sym} ptr val mem) 9312 for { 9313 off1 := v.AuxInt 9314 sym := v.Aux 9315 v_0 := v.Args[0] 9316 if v_0.Op != OpAMD64ADDQconst { 9317 break 9318 } 9319 off2 := v_0.AuxInt 9320 ptr := v_0.Args[0] 9321 val := v.Args[1] 9322 mem := v.Args[2] 9323 if !(is32Bit(off1 + off2)) { 9324 break 9325 } 9326 v.reset(OpAMD64MOVWstore) 9327 v.AuxInt = off1 + off2 9328 v.Aux = sym 9329 v.AddArg(ptr) 9330 v.AddArg(val) 9331 v.AddArg(mem) 9332 return true 9333 } 9334 // match: (MOVWstore [off] {sym} ptr (MOVLconst [c]) mem) 9335 // cond: validOff(off) 9336 // result: (MOVWstoreconst [makeValAndOff(int64(int16(c)),off)] {sym} ptr mem) 9337 for { 9338 off := v.AuxInt 9339 sym := v.Aux 9340 ptr := v.Args[0] 9341 v_1 := v.Args[1] 9342 if v_1.Op != OpAMD64MOVLconst { 9343 break 9344 } 9345 c := v_1.AuxInt 9346 mem := v.Args[2] 9347 if !(validOff(off)) { 9348 break 9349 } 9350 v.reset(OpAMD64MOVWstoreconst) 9351 v.AuxInt = makeValAndOff(int64(int16(c)), off) 9352 v.Aux = sym 9353 v.AddArg(ptr) 9354 v.AddArg(mem) 9355 return true 9356 } 9357 // match: (MOVWstore [off1] {sym1} (LEAQ [off2] {sym2} base) val mem) 9358 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 9359 // result: (MOVWstore [off1+off2] {mergeSym(sym1,sym2)} base val mem) 9360 for { 9361 off1 := v.AuxInt 9362 sym1 := v.Aux 9363 v_0 := v.Args[0] 9364 if v_0.Op != OpAMD64LEAQ { 9365 break 9366 } 9367 off2 := v_0.AuxInt 9368 sym2 := v_0.Aux 9369 base := v_0.Args[0] 9370 val := v.Args[1] 9371 mem := v.Args[2] 9372 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 9373 break 9374 } 9375 v.reset(OpAMD64MOVWstore) 9376 v.AuxInt = off1 + off2 9377 v.Aux = mergeSym(sym1, sym2) 9378 v.AddArg(base) 9379 v.AddArg(val) 9380 v.AddArg(mem) 9381 return true 9382 } 9383 // match: (MOVWstore [off1] {sym1} (LEAQ1 [off2] {sym2} ptr idx) val mem) 9384 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 9385 // result: (MOVWstoreidx1 [off1+off2] {mergeSym(sym1,sym2)} ptr idx val mem) 9386 for { 9387 off1 := v.AuxInt 9388 sym1 := v.Aux 9389 v_0 := v.Args[0] 9390 if v_0.Op != OpAMD64LEAQ1 { 9391 break 9392 } 9393 off2 := v_0.AuxInt 9394 sym2 := v_0.Aux 9395 ptr := v_0.Args[0] 9396 idx := v_0.Args[1] 9397 val := v.Args[1] 9398 mem := v.Args[2] 9399 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 9400 break 9401 } 9402 v.reset(OpAMD64MOVWstoreidx1) 9403 v.AuxInt = off1 + off2 9404 v.Aux = mergeSym(sym1, sym2) 9405 v.AddArg(ptr) 9406 v.AddArg(idx) 9407 v.AddArg(val) 9408 v.AddArg(mem) 9409 return true 9410 } 9411 // match: (MOVWstore [off1] {sym1} (LEAQ2 [off2] {sym2} ptr idx) val mem) 9412 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 9413 // result: (MOVWstoreidx2 [off1+off2] {mergeSym(sym1,sym2)} ptr idx val mem) 9414 for { 9415 off1 := v.AuxInt 9416 sym1 := v.Aux 9417 v_0 := v.Args[0] 9418 if v_0.Op != OpAMD64LEAQ2 { 9419 break 9420 } 9421 off2 := v_0.AuxInt 9422 sym2 := v_0.Aux 9423 ptr := v_0.Args[0] 9424 idx := v_0.Args[1] 9425 val := v.Args[1] 9426 mem := v.Args[2] 9427 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 9428 break 9429 } 9430 v.reset(OpAMD64MOVWstoreidx2) 9431 v.AuxInt = off1 + off2 9432 v.Aux = mergeSym(sym1, sym2) 9433 v.AddArg(ptr) 9434 v.AddArg(idx) 9435 v.AddArg(val) 9436 v.AddArg(mem) 9437 return true 9438 } 9439 // match: (MOVWstore [off] {sym} (ADDQ ptr idx) val mem) 9440 // cond: ptr.Op != OpSB 9441 // result: (MOVWstoreidx1 [off] {sym} ptr idx val mem) 9442 for { 9443 off := v.AuxInt 9444 sym := v.Aux 9445 v_0 := v.Args[0] 9446 if v_0.Op != OpAMD64ADDQ { 9447 break 9448 } 9449 ptr := v_0.Args[0] 9450 idx := v_0.Args[1] 9451 val := v.Args[1] 9452 mem := v.Args[2] 9453 if !(ptr.Op != OpSB) { 9454 break 9455 } 9456 v.reset(OpAMD64MOVWstoreidx1) 9457 v.AuxInt = off 9458 v.Aux = sym 9459 v.AddArg(ptr) 9460 v.AddArg(idx) 9461 v.AddArg(val) 9462 v.AddArg(mem) 9463 return true 9464 } 9465 // match: (MOVWstore [i] {s} p (SHRQconst [16] w) x:(MOVWstore [i-2] {s} p w mem)) 9466 // cond: x.Uses == 1 && clobber(x) 9467 // result: (MOVLstore [i-2] {s} p w mem) 9468 for { 9469 i := v.AuxInt 9470 s := v.Aux 9471 p := v.Args[0] 9472 v_1 := v.Args[1] 9473 if v_1.Op != OpAMD64SHRQconst { 9474 break 9475 } 9476 if v_1.AuxInt != 16 { 9477 break 9478 } 9479 w := v_1.Args[0] 9480 x := v.Args[2] 9481 if x.Op != OpAMD64MOVWstore { 9482 break 9483 } 9484 if x.AuxInt != i-2 { 9485 break 9486 } 9487 if x.Aux != s { 9488 break 9489 } 9490 if p != x.Args[0] { 9491 break 9492 } 9493 if w != x.Args[1] { 9494 break 9495 } 9496 mem := x.Args[2] 9497 if !(x.Uses == 1 && clobber(x)) { 9498 break 9499 } 9500 v.reset(OpAMD64MOVLstore) 9501 v.AuxInt = i - 2 9502 v.Aux = s 9503 v.AddArg(p) 9504 v.AddArg(w) 9505 v.AddArg(mem) 9506 return true 9507 } 9508 // match: (MOVWstore [i] {s} p (SHRQconst [j] w) x:(MOVWstore [i-2] {s} p w0:(SHRQconst [j-16] w) mem)) 9509 // cond: x.Uses == 1 && clobber(x) 9510 // result: (MOVLstore [i-2] {s} p w0 mem) 9511 for { 9512 i := v.AuxInt 9513 s := v.Aux 9514 p := v.Args[0] 9515 v_1 := v.Args[1] 9516 if v_1.Op != OpAMD64SHRQconst { 9517 break 9518 } 9519 j := v_1.AuxInt 9520 w := v_1.Args[0] 9521 x := v.Args[2] 9522 if x.Op != OpAMD64MOVWstore { 9523 break 9524 } 9525 if x.AuxInt != i-2 { 9526 break 9527 } 9528 if x.Aux != s { 9529 break 9530 } 9531 if p != x.Args[0] { 9532 break 9533 } 9534 w0 := x.Args[1] 9535 if w0.Op != OpAMD64SHRQconst { 9536 break 9537 } 9538 if w0.AuxInt != j-16 { 9539 break 9540 } 9541 if w != w0.Args[0] { 9542 break 9543 } 9544 mem := x.Args[2] 9545 if !(x.Uses == 1 && clobber(x)) { 9546 break 9547 } 9548 v.reset(OpAMD64MOVLstore) 9549 v.AuxInt = i - 2 9550 v.Aux = s 9551 v.AddArg(p) 9552 v.AddArg(w0) 9553 v.AddArg(mem) 9554 return true 9555 } 9556 // match: (MOVWstore [off1] {sym1} (LEAL [off2] {sym2} base) val mem) 9557 // cond: canMergeSym(sym1, sym2) 9558 // result: (MOVWstore [off1+off2] {mergeSym(sym1,sym2)} base val mem) 9559 for { 9560 off1 := v.AuxInt 9561 sym1 := v.Aux 9562 v_0 := v.Args[0] 9563 if v_0.Op != OpAMD64LEAL { 9564 break 9565 } 9566 off2 := v_0.AuxInt 9567 sym2 := v_0.Aux 9568 base := v_0.Args[0] 9569 val := v.Args[1] 9570 mem := v.Args[2] 9571 if !(canMergeSym(sym1, sym2)) { 9572 break 9573 } 9574 v.reset(OpAMD64MOVWstore) 9575 v.AuxInt = off1 + off2 9576 v.Aux = mergeSym(sym1, sym2) 9577 v.AddArg(base) 9578 v.AddArg(val) 9579 v.AddArg(mem) 9580 return true 9581 } 9582 // match: (MOVWstore [off1] {sym} (ADDLconst [off2] ptr) val mem) 9583 // cond: is32Bit(off1+off2) 9584 // result: (MOVWstore [off1+off2] {sym} ptr val mem) 9585 for { 9586 off1 := v.AuxInt 9587 sym := v.Aux 9588 v_0 := v.Args[0] 9589 if v_0.Op != OpAMD64ADDLconst { 9590 break 9591 } 9592 off2 := v_0.AuxInt 9593 ptr := v_0.Args[0] 9594 val := v.Args[1] 9595 mem := v.Args[2] 9596 if !(is32Bit(off1 + off2)) { 9597 break 9598 } 9599 v.reset(OpAMD64MOVWstore) 9600 v.AuxInt = off1 + off2 9601 v.Aux = sym 9602 v.AddArg(ptr) 9603 v.AddArg(val) 9604 v.AddArg(mem) 9605 return true 9606 } 9607 return false 9608 } 9609 func rewriteValueAMD64_OpAMD64MOVWstoreconst(v *Value, config *Config) bool { 9610 b := v.Block 9611 _ = b 9612 // match: (MOVWstoreconst [sc] {s} (ADDQconst [off] ptr) mem) 9613 // cond: ValAndOff(sc).canAdd(off) 9614 // result: (MOVWstoreconst [ValAndOff(sc).add(off)] {s} ptr mem) 9615 for { 9616 sc := v.AuxInt 9617 s := v.Aux 9618 v_0 := v.Args[0] 9619 if v_0.Op != OpAMD64ADDQconst { 9620 break 9621 } 9622 off := v_0.AuxInt 9623 ptr := v_0.Args[0] 9624 mem := v.Args[1] 9625 if !(ValAndOff(sc).canAdd(off)) { 9626 break 9627 } 9628 v.reset(OpAMD64MOVWstoreconst) 9629 v.AuxInt = ValAndOff(sc).add(off) 9630 v.Aux = s 9631 v.AddArg(ptr) 9632 v.AddArg(mem) 9633 return true 9634 } 9635 // match: (MOVWstoreconst [sc] {sym1} (LEAQ [off] {sym2} ptr) mem) 9636 // cond: canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off) 9637 // result: (MOVWstoreconst [ValAndOff(sc).add(off)] {mergeSym(sym1, sym2)} ptr mem) 9638 for { 9639 sc := v.AuxInt 9640 sym1 := v.Aux 9641 v_0 := v.Args[0] 9642 if v_0.Op != OpAMD64LEAQ { 9643 break 9644 } 9645 off := v_0.AuxInt 9646 sym2 := v_0.Aux 9647 ptr := v_0.Args[0] 9648 mem := v.Args[1] 9649 if !(canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off)) { 9650 break 9651 } 9652 v.reset(OpAMD64MOVWstoreconst) 9653 v.AuxInt = ValAndOff(sc).add(off) 9654 v.Aux = mergeSym(sym1, sym2) 9655 v.AddArg(ptr) 9656 v.AddArg(mem) 9657 return true 9658 } 9659 // match: (MOVWstoreconst [x] {sym1} (LEAQ1 [off] {sym2} ptr idx) mem) 9660 // cond: canMergeSym(sym1, sym2) 9661 // result: (MOVWstoreconstidx1 [ValAndOff(x).add(off)] {mergeSym(sym1,sym2)} ptr idx mem) 9662 for { 9663 x := v.AuxInt 9664 sym1 := v.Aux 9665 v_0 := v.Args[0] 9666 if v_0.Op != OpAMD64LEAQ1 { 9667 break 9668 } 9669 off := v_0.AuxInt 9670 sym2 := v_0.Aux 9671 ptr := v_0.Args[0] 9672 idx := v_0.Args[1] 9673 mem := v.Args[1] 9674 if !(canMergeSym(sym1, sym2)) { 9675 break 9676 } 9677 v.reset(OpAMD64MOVWstoreconstidx1) 9678 v.AuxInt = ValAndOff(x).add(off) 9679 v.Aux = mergeSym(sym1, sym2) 9680 v.AddArg(ptr) 9681 v.AddArg(idx) 9682 v.AddArg(mem) 9683 return true 9684 } 9685 // match: (MOVWstoreconst [x] {sym1} (LEAQ2 [off] {sym2} ptr idx) mem) 9686 // cond: canMergeSym(sym1, sym2) 9687 // result: (MOVWstoreconstidx2 [ValAndOff(x).add(off)] {mergeSym(sym1,sym2)} ptr idx mem) 9688 for { 9689 x := v.AuxInt 9690 sym1 := v.Aux 9691 v_0 := v.Args[0] 9692 if v_0.Op != OpAMD64LEAQ2 { 9693 break 9694 } 9695 off := v_0.AuxInt 9696 sym2 := v_0.Aux 9697 ptr := v_0.Args[0] 9698 idx := v_0.Args[1] 9699 mem := v.Args[1] 9700 if !(canMergeSym(sym1, sym2)) { 9701 break 9702 } 9703 v.reset(OpAMD64MOVWstoreconstidx2) 9704 v.AuxInt = ValAndOff(x).add(off) 9705 v.Aux = mergeSym(sym1, sym2) 9706 v.AddArg(ptr) 9707 v.AddArg(idx) 9708 v.AddArg(mem) 9709 return true 9710 } 9711 // match: (MOVWstoreconst [x] {sym} (ADDQ ptr idx) mem) 9712 // cond: 9713 // result: (MOVWstoreconstidx1 [x] {sym} ptr idx mem) 9714 for { 9715 x := v.AuxInt 9716 sym := v.Aux 9717 v_0 := v.Args[0] 9718 if v_0.Op != OpAMD64ADDQ { 9719 break 9720 } 9721 ptr := v_0.Args[0] 9722 idx := v_0.Args[1] 9723 mem := v.Args[1] 9724 v.reset(OpAMD64MOVWstoreconstidx1) 9725 v.AuxInt = x 9726 v.Aux = sym 9727 v.AddArg(ptr) 9728 v.AddArg(idx) 9729 v.AddArg(mem) 9730 return true 9731 } 9732 // match: (MOVWstoreconst [c] {s} p x:(MOVWstoreconst [a] {s} p mem)) 9733 // cond: x.Uses == 1 && ValAndOff(a).Off() + 2 == ValAndOff(c).Off() && clobber(x) 9734 // result: (MOVLstoreconst [makeValAndOff(ValAndOff(a).Val()&0xffff | ValAndOff(c).Val()<<16, ValAndOff(a).Off())] {s} p mem) 9735 for { 9736 c := v.AuxInt 9737 s := v.Aux 9738 p := v.Args[0] 9739 x := v.Args[1] 9740 if x.Op != OpAMD64MOVWstoreconst { 9741 break 9742 } 9743 a := x.AuxInt 9744 if x.Aux != s { 9745 break 9746 } 9747 if p != x.Args[0] { 9748 break 9749 } 9750 mem := x.Args[1] 9751 if !(x.Uses == 1 && ValAndOff(a).Off()+2 == ValAndOff(c).Off() && clobber(x)) { 9752 break 9753 } 9754 v.reset(OpAMD64MOVLstoreconst) 9755 v.AuxInt = makeValAndOff(ValAndOff(a).Val()&0xffff|ValAndOff(c).Val()<<16, ValAndOff(a).Off()) 9756 v.Aux = s 9757 v.AddArg(p) 9758 v.AddArg(mem) 9759 return true 9760 } 9761 // match: (MOVWstoreconst [sc] {sym1} (LEAL [off] {sym2} ptr) mem) 9762 // cond: canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off) 9763 // result: (MOVWstoreconst [ValAndOff(sc).add(off)] {mergeSym(sym1, sym2)} ptr mem) 9764 for { 9765 sc := v.AuxInt 9766 sym1 := v.Aux 9767 v_0 := v.Args[0] 9768 if v_0.Op != OpAMD64LEAL { 9769 break 9770 } 9771 off := v_0.AuxInt 9772 sym2 := v_0.Aux 9773 ptr := v_0.Args[0] 9774 mem := v.Args[1] 9775 if !(canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off)) { 9776 break 9777 } 9778 v.reset(OpAMD64MOVWstoreconst) 9779 v.AuxInt = ValAndOff(sc).add(off) 9780 v.Aux = mergeSym(sym1, sym2) 9781 v.AddArg(ptr) 9782 v.AddArg(mem) 9783 return true 9784 } 9785 // match: (MOVWstoreconst [sc] {s} (ADDLconst [off] ptr) mem) 9786 // cond: ValAndOff(sc).canAdd(off) 9787 // result: (MOVWstoreconst [ValAndOff(sc).add(off)] {s} ptr mem) 9788 for { 9789 sc := v.AuxInt 9790 s := v.Aux 9791 v_0 := v.Args[0] 9792 if v_0.Op != OpAMD64ADDLconst { 9793 break 9794 } 9795 off := v_0.AuxInt 9796 ptr := v_0.Args[0] 9797 mem := v.Args[1] 9798 if !(ValAndOff(sc).canAdd(off)) { 9799 break 9800 } 9801 v.reset(OpAMD64MOVWstoreconst) 9802 v.AuxInt = ValAndOff(sc).add(off) 9803 v.Aux = s 9804 v.AddArg(ptr) 9805 v.AddArg(mem) 9806 return true 9807 } 9808 return false 9809 } 9810 func rewriteValueAMD64_OpAMD64MOVWstoreconstidx1(v *Value, config *Config) bool { 9811 b := v.Block 9812 _ = b 9813 // match: (MOVWstoreconstidx1 [c] {sym} ptr (SHLQconst [1] idx) mem) 9814 // cond: 9815 // result: (MOVWstoreconstidx2 [c] {sym} ptr idx mem) 9816 for { 9817 c := v.AuxInt 9818 sym := v.Aux 9819 ptr := v.Args[0] 9820 v_1 := v.Args[1] 9821 if v_1.Op != OpAMD64SHLQconst { 9822 break 9823 } 9824 if v_1.AuxInt != 1 { 9825 break 9826 } 9827 idx := v_1.Args[0] 9828 mem := v.Args[2] 9829 v.reset(OpAMD64MOVWstoreconstidx2) 9830 v.AuxInt = c 9831 v.Aux = sym 9832 v.AddArg(ptr) 9833 v.AddArg(idx) 9834 v.AddArg(mem) 9835 return true 9836 } 9837 // match: (MOVWstoreconstidx1 [x] {sym} (ADDQconst [c] ptr) idx mem) 9838 // cond: 9839 // result: (MOVWstoreconstidx1 [ValAndOff(x).add(c)] {sym} ptr idx mem) 9840 for { 9841 x := v.AuxInt 9842 sym := v.Aux 9843 v_0 := v.Args[0] 9844 if v_0.Op != OpAMD64ADDQconst { 9845 break 9846 } 9847 c := v_0.AuxInt 9848 ptr := v_0.Args[0] 9849 idx := v.Args[1] 9850 mem := v.Args[2] 9851 v.reset(OpAMD64MOVWstoreconstidx1) 9852 v.AuxInt = ValAndOff(x).add(c) 9853 v.Aux = sym 9854 v.AddArg(ptr) 9855 v.AddArg(idx) 9856 v.AddArg(mem) 9857 return true 9858 } 9859 // match: (MOVWstoreconstidx1 [x] {sym} ptr (ADDQconst [c] idx) mem) 9860 // cond: 9861 // result: (MOVWstoreconstidx1 [ValAndOff(x).add(c)] {sym} ptr idx mem) 9862 for { 9863 x := v.AuxInt 9864 sym := v.Aux 9865 ptr := v.Args[0] 9866 v_1 := v.Args[1] 9867 if v_1.Op != OpAMD64ADDQconst { 9868 break 9869 } 9870 c := v_1.AuxInt 9871 idx := v_1.Args[0] 9872 mem := v.Args[2] 9873 v.reset(OpAMD64MOVWstoreconstidx1) 9874 v.AuxInt = ValAndOff(x).add(c) 9875 v.Aux = sym 9876 v.AddArg(ptr) 9877 v.AddArg(idx) 9878 v.AddArg(mem) 9879 return true 9880 } 9881 // match: (MOVWstoreconstidx1 [c] {s} p i x:(MOVWstoreconstidx1 [a] {s} p i mem)) 9882 // cond: x.Uses == 1 && ValAndOff(a).Off() + 2 == ValAndOff(c).Off() && clobber(x) 9883 // result: (MOVLstoreconstidx1 [makeValAndOff(ValAndOff(a).Val()&0xffff | ValAndOff(c).Val()<<16, ValAndOff(a).Off())] {s} p i mem) 9884 for { 9885 c := v.AuxInt 9886 s := v.Aux 9887 p := v.Args[0] 9888 i := v.Args[1] 9889 x := v.Args[2] 9890 if x.Op != OpAMD64MOVWstoreconstidx1 { 9891 break 9892 } 9893 a := x.AuxInt 9894 if x.Aux != s { 9895 break 9896 } 9897 if p != x.Args[0] { 9898 break 9899 } 9900 if i != x.Args[1] { 9901 break 9902 } 9903 mem := x.Args[2] 9904 if !(x.Uses == 1 && ValAndOff(a).Off()+2 == ValAndOff(c).Off() && clobber(x)) { 9905 break 9906 } 9907 v.reset(OpAMD64MOVLstoreconstidx1) 9908 v.AuxInt = makeValAndOff(ValAndOff(a).Val()&0xffff|ValAndOff(c).Val()<<16, ValAndOff(a).Off()) 9909 v.Aux = s 9910 v.AddArg(p) 9911 v.AddArg(i) 9912 v.AddArg(mem) 9913 return true 9914 } 9915 return false 9916 } 9917 func rewriteValueAMD64_OpAMD64MOVWstoreconstidx2(v *Value, config *Config) bool { 9918 b := v.Block 9919 _ = b 9920 // match: (MOVWstoreconstidx2 [x] {sym} (ADDQconst [c] ptr) idx mem) 9921 // cond: 9922 // result: (MOVWstoreconstidx2 [ValAndOff(x).add(c)] {sym} ptr idx mem) 9923 for { 9924 x := v.AuxInt 9925 sym := v.Aux 9926 v_0 := v.Args[0] 9927 if v_0.Op != OpAMD64ADDQconst { 9928 break 9929 } 9930 c := v_0.AuxInt 9931 ptr := v_0.Args[0] 9932 idx := v.Args[1] 9933 mem := v.Args[2] 9934 v.reset(OpAMD64MOVWstoreconstidx2) 9935 v.AuxInt = ValAndOff(x).add(c) 9936 v.Aux = sym 9937 v.AddArg(ptr) 9938 v.AddArg(idx) 9939 v.AddArg(mem) 9940 return true 9941 } 9942 // match: (MOVWstoreconstidx2 [x] {sym} ptr (ADDQconst [c] idx) mem) 9943 // cond: 9944 // result: (MOVWstoreconstidx2 [ValAndOff(x).add(2*c)] {sym} ptr idx mem) 9945 for { 9946 x := v.AuxInt 9947 sym := v.Aux 9948 ptr := v.Args[0] 9949 v_1 := v.Args[1] 9950 if v_1.Op != OpAMD64ADDQconst { 9951 break 9952 } 9953 c := v_1.AuxInt 9954 idx := v_1.Args[0] 9955 mem := v.Args[2] 9956 v.reset(OpAMD64MOVWstoreconstidx2) 9957 v.AuxInt = ValAndOff(x).add(2 * c) 9958 v.Aux = sym 9959 v.AddArg(ptr) 9960 v.AddArg(idx) 9961 v.AddArg(mem) 9962 return true 9963 } 9964 // match: (MOVWstoreconstidx2 [c] {s} p i x:(MOVWstoreconstidx2 [a] {s} p i mem)) 9965 // cond: x.Uses == 1 && ValAndOff(a).Off() + 2 == ValAndOff(c).Off() && clobber(x) 9966 // result: (MOVLstoreconstidx1 [makeValAndOff(ValAndOff(a).Val()&0xffff | ValAndOff(c).Val()<<16, ValAndOff(a).Off())] {s} p (SHLQconst <i.Type> [1] i) mem) 9967 for { 9968 c := v.AuxInt 9969 s := v.Aux 9970 p := v.Args[0] 9971 i := v.Args[1] 9972 x := v.Args[2] 9973 if x.Op != OpAMD64MOVWstoreconstidx2 { 9974 break 9975 } 9976 a := x.AuxInt 9977 if x.Aux != s { 9978 break 9979 } 9980 if p != x.Args[0] { 9981 break 9982 } 9983 if i != x.Args[1] { 9984 break 9985 } 9986 mem := x.Args[2] 9987 if !(x.Uses == 1 && ValAndOff(a).Off()+2 == ValAndOff(c).Off() && clobber(x)) { 9988 break 9989 } 9990 v.reset(OpAMD64MOVLstoreconstidx1) 9991 v.AuxInt = makeValAndOff(ValAndOff(a).Val()&0xffff|ValAndOff(c).Val()<<16, ValAndOff(a).Off()) 9992 v.Aux = s 9993 v.AddArg(p) 9994 v0 := b.NewValue0(v.Line, OpAMD64SHLQconst, i.Type) 9995 v0.AuxInt = 1 9996 v0.AddArg(i) 9997 v.AddArg(v0) 9998 v.AddArg(mem) 9999 return true 10000 } 10001 return false 10002 } 10003 func rewriteValueAMD64_OpAMD64MOVWstoreidx1(v *Value, config *Config) bool { 10004 b := v.Block 10005 _ = b 10006 // match: (MOVWstoreidx1 [c] {sym} ptr (SHLQconst [1] idx) val mem) 10007 // cond: 10008 // result: (MOVWstoreidx2 [c] {sym} ptr idx val mem) 10009 for { 10010 c := v.AuxInt 10011 sym := v.Aux 10012 ptr := v.Args[0] 10013 v_1 := v.Args[1] 10014 if v_1.Op != OpAMD64SHLQconst { 10015 break 10016 } 10017 if v_1.AuxInt != 1 { 10018 break 10019 } 10020 idx := v_1.Args[0] 10021 val := v.Args[2] 10022 mem := v.Args[3] 10023 v.reset(OpAMD64MOVWstoreidx2) 10024 v.AuxInt = c 10025 v.Aux = sym 10026 v.AddArg(ptr) 10027 v.AddArg(idx) 10028 v.AddArg(val) 10029 v.AddArg(mem) 10030 return true 10031 } 10032 // match: (MOVWstoreidx1 [c] {sym} (ADDQconst [d] ptr) idx val mem) 10033 // cond: 10034 // result: (MOVWstoreidx1 [c+d] {sym} ptr idx val mem) 10035 for { 10036 c := v.AuxInt 10037 sym := v.Aux 10038 v_0 := v.Args[0] 10039 if v_0.Op != OpAMD64ADDQconst { 10040 break 10041 } 10042 d := v_0.AuxInt 10043 ptr := v_0.Args[0] 10044 idx := v.Args[1] 10045 val := v.Args[2] 10046 mem := v.Args[3] 10047 v.reset(OpAMD64MOVWstoreidx1) 10048 v.AuxInt = c + d 10049 v.Aux = sym 10050 v.AddArg(ptr) 10051 v.AddArg(idx) 10052 v.AddArg(val) 10053 v.AddArg(mem) 10054 return true 10055 } 10056 // match: (MOVWstoreidx1 [c] {sym} ptr (ADDQconst [d] idx) val mem) 10057 // cond: 10058 // result: (MOVWstoreidx1 [c+d] {sym} ptr idx val mem) 10059 for { 10060 c := v.AuxInt 10061 sym := v.Aux 10062 ptr := v.Args[0] 10063 v_1 := v.Args[1] 10064 if v_1.Op != OpAMD64ADDQconst { 10065 break 10066 } 10067 d := v_1.AuxInt 10068 idx := v_1.Args[0] 10069 val := v.Args[2] 10070 mem := v.Args[3] 10071 v.reset(OpAMD64MOVWstoreidx1) 10072 v.AuxInt = c + d 10073 v.Aux = sym 10074 v.AddArg(ptr) 10075 v.AddArg(idx) 10076 v.AddArg(val) 10077 v.AddArg(mem) 10078 return true 10079 } 10080 // match: (MOVWstoreidx1 [i] {s} p idx (SHRQconst [16] w) x:(MOVWstoreidx1 [i-2] {s} p idx w mem)) 10081 // cond: x.Uses == 1 && clobber(x) 10082 // result: (MOVLstoreidx1 [i-2] {s} p idx w mem) 10083 for { 10084 i := v.AuxInt 10085 s := v.Aux 10086 p := v.Args[0] 10087 idx := v.Args[1] 10088 v_2 := v.Args[2] 10089 if v_2.Op != OpAMD64SHRQconst { 10090 break 10091 } 10092 if v_2.AuxInt != 16 { 10093 break 10094 } 10095 w := v_2.Args[0] 10096 x := v.Args[3] 10097 if x.Op != OpAMD64MOVWstoreidx1 { 10098 break 10099 } 10100 if x.AuxInt != i-2 { 10101 break 10102 } 10103 if x.Aux != s { 10104 break 10105 } 10106 if p != x.Args[0] { 10107 break 10108 } 10109 if idx != x.Args[1] { 10110 break 10111 } 10112 if w != x.Args[2] { 10113 break 10114 } 10115 mem := x.Args[3] 10116 if !(x.Uses == 1 && clobber(x)) { 10117 break 10118 } 10119 v.reset(OpAMD64MOVLstoreidx1) 10120 v.AuxInt = i - 2 10121 v.Aux = s 10122 v.AddArg(p) 10123 v.AddArg(idx) 10124 v.AddArg(w) 10125 v.AddArg(mem) 10126 return true 10127 } 10128 // match: (MOVWstoreidx1 [i] {s} p idx (SHRQconst [j] w) x:(MOVWstoreidx1 [i-2] {s} p idx w0:(SHRQconst [j-16] w) mem)) 10129 // cond: x.Uses == 1 && clobber(x) 10130 // result: (MOVLstoreidx1 [i-2] {s} p idx w0 mem) 10131 for { 10132 i := v.AuxInt 10133 s := v.Aux 10134 p := v.Args[0] 10135 idx := v.Args[1] 10136 v_2 := v.Args[2] 10137 if v_2.Op != OpAMD64SHRQconst { 10138 break 10139 } 10140 j := v_2.AuxInt 10141 w := v_2.Args[0] 10142 x := v.Args[3] 10143 if x.Op != OpAMD64MOVWstoreidx1 { 10144 break 10145 } 10146 if x.AuxInt != i-2 { 10147 break 10148 } 10149 if x.Aux != s { 10150 break 10151 } 10152 if p != x.Args[0] { 10153 break 10154 } 10155 if idx != x.Args[1] { 10156 break 10157 } 10158 w0 := x.Args[2] 10159 if w0.Op != OpAMD64SHRQconst { 10160 break 10161 } 10162 if w0.AuxInt != j-16 { 10163 break 10164 } 10165 if w != w0.Args[0] { 10166 break 10167 } 10168 mem := x.Args[3] 10169 if !(x.Uses == 1 && clobber(x)) { 10170 break 10171 } 10172 v.reset(OpAMD64MOVLstoreidx1) 10173 v.AuxInt = i - 2 10174 v.Aux = s 10175 v.AddArg(p) 10176 v.AddArg(idx) 10177 v.AddArg(w0) 10178 v.AddArg(mem) 10179 return true 10180 } 10181 return false 10182 } 10183 func rewriteValueAMD64_OpAMD64MOVWstoreidx2(v *Value, config *Config) bool { 10184 b := v.Block 10185 _ = b 10186 // match: (MOVWstoreidx2 [c] {sym} (ADDQconst [d] ptr) idx val mem) 10187 // cond: 10188 // result: (MOVWstoreidx2 [c+d] {sym} ptr idx val mem) 10189 for { 10190 c := v.AuxInt 10191 sym := v.Aux 10192 v_0 := v.Args[0] 10193 if v_0.Op != OpAMD64ADDQconst { 10194 break 10195 } 10196 d := v_0.AuxInt 10197 ptr := v_0.Args[0] 10198 idx := v.Args[1] 10199 val := v.Args[2] 10200 mem := v.Args[3] 10201 v.reset(OpAMD64MOVWstoreidx2) 10202 v.AuxInt = c + d 10203 v.Aux = sym 10204 v.AddArg(ptr) 10205 v.AddArg(idx) 10206 v.AddArg(val) 10207 v.AddArg(mem) 10208 return true 10209 } 10210 // match: (MOVWstoreidx2 [c] {sym} ptr (ADDQconst [d] idx) val mem) 10211 // cond: 10212 // result: (MOVWstoreidx2 [c+2*d] {sym} ptr idx val mem) 10213 for { 10214 c := v.AuxInt 10215 sym := v.Aux 10216 ptr := v.Args[0] 10217 v_1 := v.Args[1] 10218 if v_1.Op != OpAMD64ADDQconst { 10219 break 10220 } 10221 d := v_1.AuxInt 10222 idx := v_1.Args[0] 10223 val := v.Args[2] 10224 mem := v.Args[3] 10225 v.reset(OpAMD64MOVWstoreidx2) 10226 v.AuxInt = c + 2*d 10227 v.Aux = sym 10228 v.AddArg(ptr) 10229 v.AddArg(idx) 10230 v.AddArg(val) 10231 v.AddArg(mem) 10232 return true 10233 } 10234 // match: (MOVWstoreidx2 [i] {s} p idx (SHRQconst [16] w) x:(MOVWstoreidx2 [i-2] {s} p idx w mem)) 10235 // cond: x.Uses == 1 && clobber(x) 10236 // result: (MOVLstoreidx1 [i-2] {s} p (SHLQconst <idx.Type> [1] idx) w mem) 10237 for { 10238 i := v.AuxInt 10239 s := v.Aux 10240 p := v.Args[0] 10241 idx := v.Args[1] 10242 v_2 := v.Args[2] 10243 if v_2.Op != OpAMD64SHRQconst { 10244 break 10245 } 10246 if v_2.AuxInt != 16 { 10247 break 10248 } 10249 w := v_2.Args[0] 10250 x := v.Args[3] 10251 if x.Op != OpAMD64MOVWstoreidx2 { 10252 break 10253 } 10254 if x.AuxInt != i-2 { 10255 break 10256 } 10257 if x.Aux != s { 10258 break 10259 } 10260 if p != x.Args[0] { 10261 break 10262 } 10263 if idx != x.Args[1] { 10264 break 10265 } 10266 if w != x.Args[2] { 10267 break 10268 } 10269 mem := x.Args[3] 10270 if !(x.Uses == 1 && clobber(x)) { 10271 break 10272 } 10273 v.reset(OpAMD64MOVLstoreidx1) 10274 v.AuxInt = i - 2 10275 v.Aux = s 10276 v.AddArg(p) 10277 v0 := b.NewValue0(v.Line, OpAMD64SHLQconst, idx.Type) 10278 v0.AuxInt = 1 10279 v0.AddArg(idx) 10280 v.AddArg(v0) 10281 v.AddArg(w) 10282 v.AddArg(mem) 10283 return true 10284 } 10285 // match: (MOVWstoreidx2 [i] {s} p idx (SHRQconst [j] w) x:(MOVWstoreidx2 [i-2] {s} p idx w0:(SHRQconst [j-16] w) mem)) 10286 // cond: x.Uses == 1 && clobber(x) 10287 // result: (MOVLstoreidx1 [i-2] {s} p (SHLQconst <idx.Type> [1] idx) w0 mem) 10288 for { 10289 i := v.AuxInt 10290 s := v.Aux 10291 p := v.Args[0] 10292 idx := v.Args[1] 10293 v_2 := v.Args[2] 10294 if v_2.Op != OpAMD64SHRQconst { 10295 break 10296 } 10297 j := v_2.AuxInt 10298 w := v_2.Args[0] 10299 x := v.Args[3] 10300 if x.Op != OpAMD64MOVWstoreidx2 { 10301 break 10302 } 10303 if x.AuxInt != i-2 { 10304 break 10305 } 10306 if x.Aux != s { 10307 break 10308 } 10309 if p != x.Args[0] { 10310 break 10311 } 10312 if idx != x.Args[1] { 10313 break 10314 } 10315 w0 := x.Args[2] 10316 if w0.Op != OpAMD64SHRQconst { 10317 break 10318 } 10319 if w0.AuxInt != j-16 { 10320 break 10321 } 10322 if w != w0.Args[0] { 10323 break 10324 } 10325 mem := x.Args[3] 10326 if !(x.Uses == 1 && clobber(x)) { 10327 break 10328 } 10329 v.reset(OpAMD64MOVLstoreidx1) 10330 v.AuxInt = i - 2 10331 v.Aux = s 10332 v.AddArg(p) 10333 v0 := b.NewValue0(v.Line, OpAMD64SHLQconst, idx.Type) 10334 v0.AuxInt = 1 10335 v0.AddArg(idx) 10336 v.AddArg(v0) 10337 v.AddArg(w0) 10338 v.AddArg(mem) 10339 return true 10340 } 10341 return false 10342 } 10343 func rewriteValueAMD64_OpAMD64MULL(v *Value, config *Config) bool { 10344 b := v.Block 10345 _ = b 10346 // match: (MULL x (MOVLconst [c])) 10347 // cond: 10348 // result: (MULLconst [c] x) 10349 for { 10350 x := v.Args[0] 10351 v_1 := v.Args[1] 10352 if v_1.Op != OpAMD64MOVLconst { 10353 break 10354 } 10355 c := v_1.AuxInt 10356 v.reset(OpAMD64MULLconst) 10357 v.AuxInt = c 10358 v.AddArg(x) 10359 return true 10360 } 10361 // match: (MULL (MOVLconst [c]) x) 10362 // cond: 10363 // result: (MULLconst [c] x) 10364 for { 10365 v_0 := v.Args[0] 10366 if v_0.Op != OpAMD64MOVLconst { 10367 break 10368 } 10369 c := v_0.AuxInt 10370 x := v.Args[1] 10371 v.reset(OpAMD64MULLconst) 10372 v.AuxInt = c 10373 v.AddArg(x) 10374 return true 10375 } 10376 return false 10377 } 10378 func rewriteValueAMD64_OpAMD64MULLconst(v *Value, config *Config) bool { 10379 b := v.Block 10380 _ = b 10381 // match: (MULLconst [c] (MULLconst [d] x)) 10382 // cond: 10383 // result: (MULLconst [int64(int32(c * d))] x) 10384 for { 10385 c := v.AuxInt 10386 v_0 := v.Args[0] 10387 if v_0.Op != OpAMD64MULLconst { 10388 break 10389 } 10390 d := v_0.AuxInt 10391 x := v_0.Args[0] 10392 v.reset(OpAMD64MULLconst) 10393 v.AuxInt = int64(int32(c * d)) 10394 v.AddArg(x) 10395 return true 10396 } 10397 // match: (MULLconst [c] (MOVLconst [d])) 10398 // cond: 10399 // result: (MOVLconst [int64(int32(c*d))]) 10400 for { 10401 c := v.AuxInt 10402 v_0 := v.Args[0] 10403 if v_0.Op != OpAMD64MOVLconst { 10404 break 10405 } 10406 d := v_0.AuxInt 10407 v.reset(OpAMD64MOVLconst) 10408 v.AuxInt = int64(int32(c * d)) 10409 return true 10410 } 10411 return false 10412 } 10413 func rewriteValueAMD64_OpAMD64MULQ(v *Value, config *Config) bool { 10414 b := v.Block 10415 _ = b 10416 // match: (MULQ x (MOVQconst [c])) 10417 // cond: is32Bit(c) 10418 // result: (MULQconst [c] x) 10419 for { 10420 x := v.Args[0] 10421 v_1 := v.Args[1] 10422 if v_1.Op != OpAMD64MOVQconst { 10423 break 10424 } 10425 c := v_1.AuxInt 10426 if !(is32Bit(c)) { 10427 break 10428 } 10429 v.reset(OpAMD64MULQconst) 10430 v.AuxInt = c 10431 v.AddArg(x) 10432 return true 10433 } 10434 // match: (MULQ (MOVQconst [c]) x) 10435 // cond: is32Bit(c) 10436 // result: (MULQconst [c] x) 10437 for { 10438 v_0 := v.Args[0] 10439 if v_0.Op != OpAMD64MOVQconst { 10440 break 10441 } 10442 c := v_0.AuxInt 10443 x := v.Args[1] 10444 if !(is32Bit(c)) { 10445 break 10446 } 10447 v.reset(OpAMD64MULQconst) 10448 v.AuxInt = c 10449 v.AddArg(x) 10450 return true 10451 } 10452 return false 10453 } 10454 func rewriteValueAMD64_OpAMD64MULQconst(v *Value, config *Config) bool { 10455 b := v.Block 10456 _ = b 10457 // match: (MULQconst [c] (MULQconst [d] x)) 10458 // cond: is32Bit(c*d) 10459 // result: (MULQconst [c * d] x) 10460 for { 10461 c := v.AuxInt 10462 v_0 := v.Args[0] 10463 if v_0.Op != OpAMD64MULQconst { 10464 break 10465 } 10466 d := v_0.AuxInt 10467 x := v_0.Args[0] 10468 if !(is32Bit(c * d)) { 10469 break 10470 } 10471 v.reset(OpAMD64MULQconst) 10472 v.AuxInt = c * d 10473 v.AddArg(x) 10474 return true 10475 } 10476 // match: (MULQconst [-1] x) 10477 // cond: 10478 // result: (NEGQ x) 10479 for { 10480 if v.AuxInt != -1 { 10481 break 10482 } 10483 x := v.Args[0] 10484 v.reset(OpAMD64NEGQ) 10485 v.AddArg(x) 10486 return true 10487 } 10488 // match: (MULQconst [0] _) 10489 // cond: 10490 // result: (MOVQconst [0]) 10491 for { 10492 if v.AuxInt != 0 { 10493 break 10494 } 10495 v.reset(OpAMD64MOVQconst) 10496 v.AuxInt = 0 10497 return true 10498 } 10499 // match: (MULQconst [1] x) 10500 // cond: 10501 // result: x 10502 for { 10503 if v.AuxInt != 1 { 10504 break 10505 } 10506 x := v.Args[0] 10507 v.reset(OpCopy) 10508 v.Type = x.Type 10509 v.AddArg(x) 10510 return true 10511 } 10512 // match: (MULQconst [3] x) 10513 // cond: 10514 // result: (LEAQ2 x x) 10515 for { 10516 if v.AuxInt != 3 { 10517 break 10518 } 10519 x := v.Args[0] 10520 v.reset(OpAMD64LEAQ2) 10521 v.AddArg(x) 10522 v.AddArg(x) 10523 return true 10524 } 10525 // match: (MULQconst [5] x) 10526 // cond: 10527 // result: (LEAQ4 x x) 10528 for { 10529 if v.AuxInt != 5 { 10530 break 10531 } 10532 x := v.Args[0] 10533 v.reset(OpAMD64LEAQ4) 10534 v.AddArg(x) 10535 v.AddArg(x) 10536 return true 10537 } 10538 // match: (MULQconst [7] x) 10539 // cond: 10540 // result: (LEAQ8 (NEGQ <v.Type> x) x) 10541 for { 10542 if v.AuxInt != 7 { 10543 break 10544 } 10545 x := v.Args[0] 10546 v.reset(OpAMD64LEAQ8) 10547 v0 := b.NewValue0(v.Line, OpAMD64NEGQ, v.Type) 10548 v0.AddArg(x) 10549 v.AddArg(v0) 10550 v.AddArg(x) 10551 return true 10552 } 10553 // match: (MULQconst [9] x) 10554 // cond: 10555 // result: (LEAQ8 x x) 10556 for { 10557 if v.AuxInt != 9 { 10558 break 10559 } 10560 x := v.Args[0] 10561 v.reset(OpAMD64LEAQ8) 10562 v.AddArg(x) 10563 v.AddArg(x) 10564 return true 10565 } 10566 // match: (MULQconst [11] x) 10567 // cond: 10568 // result: (LEAQ2 x (LEAQ4 <v.Type> x x)) 10569 for { 10570 if v.AuxInt != 11 { 10571 break 10572 } 10573 x := v.Args[0] 10574 v.reset(OpAMD64LEAQ2) 10575 v.AddArg(x) 10576 v0 := b.NewValue0(v.Line, OpAMD64LEAQ4, v.Type) 10577 v0.AddArg(x) 10578 v0.AddArg(x) 10579 v.AddArg(v0) 10580 return true 10581 } 10582 // match: (MULQconst [13] x) 10583 // cond: 10584 // result: (LEAQ4 x (LEAQ2 <v.Type> x x)) 10585 for { 10586 if v.AuxInt != 13 { 10587 break 10588 } 10589 x := v.Args[0] 10590 v.reset(OpAMD64LEAQ4) 10591 v.AddArg(x) 10592 v0 := b.NewValue0(v.Line, OpAMD64LEAQ2, v.Type) 10593 v0.AddArg(x) 10594 v0.AddArg(x) 10595 v.AddArg(v0) 10596 return true 10597 } 10598 // match: (MULQconst [21] x) 10599 // cond: 10600 // result: (LEAQ4 x (LEAQ4 <v.Type> x x)) 10601 for { 10602 if v.AuxInt != 21 { 10603 break 10604 } 10605 x := v.Args[0] 10606 v.reset(OpAMD64LEAQ4) 10607 v.AddArg(x) 10608 v0 := b.NewValue0(v.Line, OpAMD64LEAQ4, v.Type) 10609 v0.AddArg(x) 10610 v0.AddArg(x) 10611 v.AddArg(v0) 10612 return true 10613 } 10614 // match: (MULQconst [25] x) 10615 // cond: 10616 // result: (LEAQ8 x (LEAQ2 <v.Type> x x)) 10617 for { 10618 if v.AuxInt != 25 { 10619 break 10620 } 10621 x := v.Args[0] 10622 v.reset(OpAMD64LEAQ8) 10623 v.AddArg(x) 10624 v0 := b.NewValue0(v.Line, OpAMD64LEAQ2, v.Type) 10625 v0.AddArg(x) 10626 v0.AddArg(x) 10627 v.AddArg(v0) 10628 return true 10629 } 10630 // match: (MULQconst [37] x) 10631 // cond: 10632 // result: (LEAQ4 x (LEAQ8 <v.Type> x x)) 10633 for { 10634 if v.AuxInt != 37 { 10635 break 10636 } 10637 x := v.Args[0] 10638 v.reset(OpAMD64LEAQ4) 10639 v.AddArg(x) 10640 v0 := b.NewValue0(v.Line, OpAMD64LEAQ8, v.Type) 10641 v0.AddArg(x) 10642 v0.AddArg(x) 10643 v.AddArg(v0) 10644 return true 10645 } 10646 // match: (MULQconst [41] x) 10647 // cond: 10648 // result: (LEAQ8 x (LEAQ4 <v.Type> x x)) 10649 for { 10650 if v.AuxInt != 41 { 10651 break 10652 } 10653 x := v.Args[0] 10654 v.reset(OpAMD64LEAQ8) 10655 v.AddArg(x) 10656 v0 := b.NewValue0(v.Line, OpAMD64LEAQ4, v.Type) 10657 v0.AddArg(x) 10658 v0.AddArg(x) 10659 v.AddArg(v0) 10660 return true 10661 } 10662 // match: (MULQconst [73] x) 10663 // cond: 10664 // result: (LEAQ8 x (LEAQ8 <v.Type> x x)) 10665 for { 10666 if v.AuxInt != 73 { 10667 break 10668 } 10669 x := v.Args[0] 10670 v.reset(OpAMD64LEAQ8) 10671 v.AddArg(x) 10672 v0 := b.NewValue0(v.Line, OpAMD64LEAQ8, v.Type) 10673 v0.AddArg(x) 10674 v0.AddArg(x) 10675 v.AddArg(v0) 10676 return true 10677 } 10678 // match: (MULQconst [c] x) 10679 // cond: isPowerOfTwo(c) 10680 // result: (SHLQconst [log2(c)] x) 10681 for { 10682 c := v.AuxInt 10683 x := v.Args[0] 10684 if !(isPowerOfTwo(c)) { 10685 break 10686 } 10687 v.reset(OpAMD64SHLQconst) 10688 v.AuxInt = log2(c) 10689 v.AddArg(x) 10690 return true 10691 } 10692 // match: (MULQconst [c] x) 10693 // cond: isPowerOfTwo(c+1) && c >= 15 10694 // result: (SUBQ (SHLQconst <v.Type> [log2(c+1)] x) x) 10695 for { 10696 c := v.AuxInt 10697 x := v.Args[0] 10698 if !(isPowerOfTwo(c+1) && c >= 15) { 10699 break 10700 } 10701 v.reset(OpAMD64SUBQ) 10702 v0 := b.NewValue0(v.Line, OpAMD64SHLQconst, v.Type) 10703 v0.AuxInt = log2(c + 1) 10704 v0.AddArg(x) 10705 v.AddArg(v0) 10706 v.AddArg(x) 10707 return true 10708 } 10709 // match: (MULQconst [c] x) 10710 // cond: isPowerOfTwo(c-1) && c >= 17 10711 // result: (LEAQ1 (SHLQconst <v.Type> [log2(c-1)] x) x) 10712 for { 10713 c := v.AuxInt 10714 x := v.Args[0] 10715 if !(isPowerOfTwo(c-1) && c >= 17) { 10716 break 10717 } 10718 v.reset(OpAMD64LEAQ1) 10719 v0 := b.NewValue0(v.Line, OpAMD64SHLQconst, v.Type) 10720 v0.AuxInt = log2(c - 1) 10721 v0.AddArg(x) 10722 v.AddArg(v0) 10723 v.AddArg(x) 10724 return true 10725 } 10726 // match: (MULQconst [c] x) 10727 // cond: isPowerOfTwo(c-2) && c >= 34 10728 // result: (LEAQ2 (SHLQconst <v.Type> [log2(c-2)] x) x) 10729 for { 10730 c := v.AuxInt 10731 x := v.Args[0] 10732 if !(isPowerOfTwo(c-2) && c >= 34) { 10733 break 10734 } 10735 v.reset(OpAMD64LEAQ2) 10736 v0 := b.NewValue0(v.Line, OpAMD64SHLQconst, v.Type) 10737 v0.AuxInt = log2(c - 2) 10738 v0.AddArg(x) 10739 v.AddArg(v0) 10740 v.AddArg(x) 10741 return true 10742 } 10743 // match: (MULQconst [c] x) 10744 // cond: isPowerOfTwo(c-4) && c >= 68 10745 // result: (LEAQ4 (SHLQconst <v.Type> [log2(c-4)] x) x) 10746 for { 10747 c := v.AuxInt 10748 x := v.Args[0] 10749 if !(isPowerOfTwo(c-4) && c >= 68) { 10750 break 10751 } 10752 v.reset(OpAMD64LEAQ4) 10753 v0 := b.NewValue0(v.Line, OpAMD64SHLQconst, v.Type) 10754 v0.AuxInt = log2(c - 4) 10755 v0.AddArg(x) 10756 v.AddArg(v0) 10757 v.AddArg(x) 10758 return true 10759 } 10760 // match: (MULQconst [c] x) 10761 // cond: isPowerOfTwo(c-8) && c >= 136 10762 // result: (LEAQ8 (SHLQconst <v.Type> [log2(c-8)] x) x) 10763 for { 10764 c := v.AuxInt 10765 x := v.Args[0] 10766 if !(isPowerOfTwo(c-8) && c >= 136) { 10767 break 10768 } 10769 v.reset(OpAMD64LEAQ8) 10770 v0 := b.NewValue0(v.Line, OpAMD64SHLQconst, v.Type) 10771 v0.AuxInt = log2(c - 8) 10772 v0.AddArg(x) 10773 v.AddArg(v0) 10774 v.AddArg(x) 10775 return true 10776 } 10777 // match: (MULQconst [c] x) 10778 // cond: c%3 == 0 && isPowerOfTwo(c/3) 10779 // result: (SHLQconst [log2(c/3)] (LEAQ2 <v.Type> x x)) 10780 for { 10781 c := v.AuxInt 10782 x := v.Args[0] 10783 if !(c%3 == 0 && isPowerOfTwo(c/3)) { 10784 break 10785 } 10786 v.reset(OpAMD64SHLQconst) 10787 v.AuxInt = log2(c / 3) 10788 v0 := b.NewValue0(v.Line, OpAMD64LEAQ2, v.Type) 10789 v0.AddArg(x) 10790 v0.AddArg(x) 10791 v.AddArg(v0) 10792 return true 10793 } 10794 // match: (MULQconst [c] x) 10795 // cond: c%5 == 0 && isPowerOfTwo(c/5) 10796 // result: (SHLQconst [log2(c/5)] (LEAQ4 <v.Type> x x)) 10797 for { 10798 c := v.AuxInt 10799 x := v.Args[0] 10800 if !(c%5 == 0 && isPowerOfTwo(c/5)) { 10801 break 10802 } 10803 v.reset(OpAMD64SHLQconst) 10804 v.AuxInt = log2(c / 5) 10805 v0 := b.NewValue0(v.Line, OpAMD64LEAQ4, v.Type) 10806 v0.AddArg(x) 10807 v0.AddArg(x) 10808 v.AddArg(v0) 10809 return true 10810 } 10811 // match: (MULQconst [c] x) 10812 // cond: c%9 == 0 && isPowerOfTwo(c/9) 10813 // result: (SHLQconst [log2(c/9)] (LEAQ8 <v.Type> x x)) 10814 for { 10815 c := v.AuxInt 10816 x := v.Args[0] 10817 if !(c%9 == 0 && isPowerOfTwo(c/9)) { 10818 break 10819 } 10820 v.reset(OpAMD64SHLQconst) 10821 v.AuxInt = log2(c / 9) 10822 v0 := b.NewValue0(v.Line, OpAMD64LEAQ8, v.Type) 10823 v0.AddArg(x) 10824 v0.AddArg(x) 10825 v.AddArg(v0) 10826 return true 10827 } 10828 // match: (MULQconst [c] (MOVQconst [d])) 10829 // cond: 10830 // result: (MOVQconst [c*d]) 10831 for { 10832 c := v.AuxInt 10833 v_0 := v.Args[0] 10834 if v_0.Op != OpAMD64MOVQconst { 10835 break 10836 } 10837 d := v_0.AuxInt 10838 v.reset(OpAMD64MOVQconst) 10839 v.AuxInt = c * d 10840 return true 10841 } 10842 return false 10843 } 10844 func rewriteValueAMD64_OpAMD64NEGL(v *Value, config *Config) bool { 10845 b := v.Block 10846 _ = b 10847 // match: (NEGL (MOVLconst [c])) 10848 // cond: 10849 // result: (MOVLconst [int64(int32(-c))]) 10850 for { 10851 v_0 := v.Args[0] 10852 if v_0.Op != OpAMD64MOVLconst { 10853 break 10854 } 10855 c := v_0.AuxInt 10856 v.reset(OpAMD64MOVLconst) 10857 v.AuxInt = int64(int32(-c)) 10858 return true 10859 } 10860 return false 10861 } 10862 func rewriteValueAMD64_OpAMD64NEGQ(v *Value, config *Config) bool { 10863 b := v.Block 10864 _ = b 10865 // match: (NEGQ (MOVQconst [c])) 10866 // cond: 10867 // result: (MOVQconst [-c]) 10868 for { 10869 v_0 := v.Args[0] 10870 if v_0.Op != OpAMD64MOVQconst { 10871 break 10872 } 10873 c := v_0.AuxInt 10874 v.reset(OpAMD64MOVQconst) 10875 v.AuxInt = -c 10876 return true 10877 } 10878 return false 10879 } 10880 func rewriteValueAMD64_OpAMD64NOTL(v *Value, config *Config) bool { 10881 b := v.Block 10882 _ = b 10883 // match: (NOTL (MOVLconst [c])) 10884 // cond: 10885 // result: (MOVLconst [^c]) 10886 for { 10887 v_0 := v.Args[0] 10888 if v_0.Op != OpAMD64MOVLconst { 10889 break 10890 } 10891 c := v_0.AuxInt 10892 v.reset(OpAMD64MOVLconst) 10893 v.AuxInt = ^c 10894 return true 10895 } 10896 return false 10897 } 10898 func rewriteValueAMD64_OpAMD64NOTQ(v *Value, config *Config) bool { 10899 b := v.Block 10900 _ = b 10901 // match: (NOTQ (MOVQconst [c])) 10902 // cond: 10903 // result: (MOVQconst [^c]) 10904 for { 10905 v_0 := v.Args[0] 10906 if v_0.Op != OpAMD64MOVQconst { 10907 break 10908 } 10909 c := v_0.AuxInt 10910 v.reset(OpAMD64MOVQconst) 10911 v.AuxInt = ^c 10912 return true 10913 } 10914 return false 10915 } 10916 func rewriteValueAMD64_OpAMD64ORL(v *Value, config *Config) bool { 10917 b := v.Block 10918 _ = b 10919 // match: (ORL x (MOVLconst [c])) 10920 // cond: 10921 // result: (ORLconst [c] x) 10922 for { 10923 x := v.Args[0] 10924 v_1 := v.Args[1] 10925 if v_1.Op != OpAMD64MOVLconst { 10926 break 10927 } 10928 c := v_1.AuxInt 10929 v.reset(OpAMD64ORLconst) 10930 v.AuxInt = c 10931 v.AddArg(x) 10932 return true 10933 } 10934 // match: (ORL (MOVLconst [c]) x) 10935 // cond: 10936 // result: (ORLconst [c] x) 10937 for { 10938 v_0 := v.Args[0] 10939 if v_0.Op != OpAMD64MOVLconst { 10940 break 10941 } 10942 c := v_0.AuxInt 10943 x := v.Args[1] 10944 v.reset(OpAMD64ORLconst) 10945 v.AuxInt = c 10946 v.AddArg(x) 10947 return true 10948 } 10949 // match: (ORL x x) 10950 // cond: 10951 // result: x 10952 for { 10953 x := v.Args[0] 10954 if x != v.Args[1] { 10955 break 10956 } 10957 v.reset(OpCopy) 10958 v.Type = x.Type 10959 v.AddArg(x) 10960 return true 10961 } 10962 // match: (ORL x0:(MOVBload [i] {s} p mem) s0:(SHLLconst [8] x1:(MOVBload [i+1] {s} p mem))) 10963 // cond: x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) 10964 // result: @mergePoint(b,x0,x1) (MOVWload [i] {s} p mem) 10965 for { 10966 x0 := v.Args[0] 10967 if x0.Op != OpAMD64MOVBload { 10968 break 10969 } 10970 i := x0.AuxInt 10971 s := x0.Aux 10972 p := x0.Args[0] 10973 mem := x0.Args[1] 10974 s0 := v.Args[1] 10975 if s0.Op != OpAMD64SHLLconst { 10976 break 10977 } 10978 if s0.AuxInt != 8 { 10979 break 10980 } 10981 x1 := s0.Args[0] 10982 if x1.Op != OpAMD64MOVBload { 10983 break 10984 } 10985 if x1.AuxInt != i+1 { 10986 break 10987 } 10988 if x1.Aux != s { 10989 break 10990 } 10991 if p != x1.Args[0] { 10992 break 10993 } 10994 if mem != x1.Args[1] { 10995 break 10996 } 10997 if !(x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0)) { 10998 break 10999 } 11000 b = mergePoint(b, x0, x1) 11001 v0 := b.NewValue0(v.Line, OpAMD64MOVWload, config.fe.TypeUInt16()) 11002 v.reset(OpCopy) 11003 v.AddArg(v0) 11004 v0.AuxInt = i 11005 v0.Aux = s 11006 v0.AddArg(p) 11007 v0.AddArg(mem) 11008 return true 11009 } 11010 // match: (ORL o0:(ORL x0:(MOVWload [i] {s} p mem) s0:(SHLLconst [16] x1:(MOVBload [i+2] {s} p mem))) s1:(SHLLconst [24] x2:(MOVBload [i+3] {s} p mem))) 11011 // cond: x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && o0.Uses == 1 && mergePoint(b,x0,x1,x2) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(s0) && clobber(s1) && clobber(o0) 11012 // result: @mergePoint(b,x0,x1,x2) (MOVLload [i] {s} p mem) 11013 for { 11014 o0 := v.Args[0] 11015 if o0.Op != OpAMD64ORL { 11016 break 11017 } 11018 x0 := o0.Args[0] 11019 if x0.Op != OpAMD64MOVWload { 11020 break 11021 } 11022 i := x0.AuxInt 11023 s := x0.Aux 11024 p := x0.Args[0] 11025 mem := x0.Args[1] 11026 s0 := o0.Args[1] 11027 if s0.Op != OpAMD64SHLLconst { 11028 break 11029 } 11030 if s0.AuxInt != 16 { 11031 break 11032 } 11033 x1 := s0.Args[0] 11034 if x1.Op != OpAMD64MOVBload { 11035 break 11036 } 11037 if x1.AuxInt != i+2 { 11038 break 11039 } 11040 if x1.Aux != s { 11041 break 11042 } 11043 if p != x1.Args[0] { 11044 break 11045 } 11046 if mem != x1.Args[1] { 11047 break 11048 } 11049 s1 := v.Args[1] 11050 if s1.Op != OpAMD64SHLLconst { 11051 break 11052 } 11053 if s1.AuxInt != 24 { 11054 break 11055 } 11056 x2 := s1.Args[0] 11057 if x2.Op != OpAMD64MOVBload { 11058 break 11059 } 11060 if x2.AuxInt != i+3 { 11061 break 11062 } 11063 if x2.Aux != s { 11064 break 11065 } 11066 if p != x2.Args[0] { 11067 break 11068 } 11069 if mem != x2.Args[1] { 11070 break 11071 } 11072 if !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && o0.Uses == 1 && mergePoint(b, x0, x1, x2) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(s0) && clobber(s1) && clobber(o0)) { 11073 break 11074 } 11075 b = mergePoint(b, x0, x1, x2) 11076 v0 := b.NewValue0(v.Line, OpAMD64MOVLload, config.fe.TypeUInt32()) 11077 v.reset(OpCopy) 11078 v.AddArg(v0) 11079 v0.AuxInt = i 11080 v0.Aux = s 11081 v0.AddArg(p) 11082 v0.AddArg(mem) 11083 return true 11084 } 11085 // match: (ORL x0:(MOVBloadidx1 [i] {s} p idx mem) s0:(SHLLconst [8] x1:(MOVBloadidx1 [i+1] {s} p idx mem))) 11086 // cond: x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) 11087 // result: @mergePoint(b,x0,x1) (MOVWloadidx1 <v.Type> [i] {s} p idx mem) 11088 for { 11089 x0 := v.Args[0] 11090 if x0.Op != OpAMD64MOVBloadidx1 { 11091 break 11092 } 11093 i := x0.AuxInt 11094 s := x0.Aux 11095 p := x0.Args[0] 11096 idx := x0.Args[1] 11097 mem := x0.Args[2] 11098 s0 := v.Args[1] 11099 if s0.Op != OpAMD64SHLLconst { 11100 break 11101 } 11102 if s0.AuxInt != 8 { 11103 break 11104 } 11105 x1 := s0.Args[0] 11106 if x1.Op != OpAMD64MOVBloadidx1 { 11107 break 11108 } 11109 if x1.AuxInt != i+1 { 11110 break 11111 } 11112 if x1.Aux != s { 11113 break 11114 } 11115 if p != x1.Args[0] { 11116 break 11117 } 11118 if idx != x1.Args[1] { 11119 break 11120 } 11121 if mem != x1.Args[2] { 11122 break 11123 } 11124 if !(x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0)) { 11125 break 11126 } 11127 b = mergePoint(b, x0, x1) 11128 v0 := b.NewValue0(v.Line, OpAMD64MOVWloadidx1, v.Type) 11129 v.reset(OpCopy) 11130 v.AddArg(v0) 11131 v0.AuxInt = i 11132 v0.Aux = s 11133 v0.AddArg(p) 11134 v0.AddArg(idx) 11135 v0.AddArg(mem) 11136 return true 11137 } 11138 // match: (ORL o0:(ORL x0:(MOVWloadidx1 [i] {s} p idx mem) s0:(SHLLconst [16] x1:(MOVBloadidx1 [i+2] {s} p idx mem))) s1:(SHLLconst [24] x2:(MOVBloadidx1 [i+3] {s} p idx mem))) 11139 // cond: x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && o0.Uses == 1 && mergePoint(b,x0,x1,x2) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(s0) && clobber(s1) && clobber(o0) 11140 // result: @mergePoint(b,x0,x1,x2) (MOVLloadidx1 <v.Type> [i] {s} p idx mem) 11141 for { 11142 o0 := v.Args[0] 11143 if o0.Op != OpAMD64ORL { 11144 break 11145 } 11146 x0 := o0.Args[0] 11147 if x0.Op != OpAMD64MOVWloadidx1 { 11148 break 11149 } 11150 i := x0.AuxInt 11151 s := x0.Aux 11152 p := x0.Args[0] 11153 idx := x0.Args[1] 11154 mem := x0.Args[2] 11155 s0 := o0.Args[1] 11156 if s0.Op != OpAMD64SHLLconst { 11157 break 11158 } 11159 if s0.AuxInt != 16 { 11160 break 11161 } 11162 x1 := s0.Args[0] 11163 if x1.Op != OpAMD64MOVBloadidx1 { 11164 break 11165 } 11166 if x1.AuxInt != i+2 { 11167 break 11168 } 11169 if x1.Aux != s { 11170 break 11171 } 11172 if p != x1.Args[0] { 11173 break 11174 } 11175 if idx != x1.Args[1] { 11176 break 11177 } 11178 if mem != x1.Args[2] { 11179 break 11180 } 11181 s1 := v.Args[1] 11182 if s1.Op != OpAMD64SHLLconst { 11183 break 11184 } 11185 if s1.AuxInt != 24 { 11186 break 11187 } 11188 x2 := s1.Args[0] 11189 if x2.Op != OpAMD64MOVBloadidx1 { 11190 break 11191 } 11192 if x2.AuxInt != i+3 { 11193 break 11194 } 11195 if x2.Aux != s { 11196 break 11197 } 11198 if p != x2.Args[0] { 11199 break 11200 } 11201 if idx != x2.Args[1] { 11202 break 11203 } 11204 if mem != x2.Args[2] { 11205 break 11206 } 11207 if !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && o0.Uses == 1 && mergePoint(b, x0, x1, x2) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(s0) && clobber(s1) && clobber(o0)) { 11208 break 11209 } 11210 b = mergePoint(b, x0, x1, x2) 11211 v0 := b.NewValue0(v.Line, OpAMD64MOVLloadidx1, v.Type) 11212 v.reset(OpCopy) 11213 v.AddArg(v0) 11214 v0.AuxInt = i 11215 v0.Aux = s 11216 v0.AddArg(p) 11217 v0.AddArg(idx) 11218 v0.AddArg(mem) 11219 return true 11220 } 11221 // match: (ORL o1:(ORL o0:(ORL x0:(MOVBload [i] {s} p mem) s0:(SHLLconst [8] x1:(MOVBload [i-1] {s} p mem))) s1:(SHLLconst [16] x2:(MOVBload [i-2] {s} p mem))) s2:(SHLLconst [24] x3:(MOVBload [i-3] {s} p mem))) 11222 // cond: x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && s2.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && mergePoint(b,x0,x1,x2,x3) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(s0) && clobber(s1) && clobber(s2) && clobber(o0) && clobber(o1) 11223 // result: @mergePoint(b,x0,x1,x2,x3) (BSWAPL <v.Type> (MOVLload [i-3] {s} p mem)) 11224 for { 11225 o1 := v.Args[0] 11226 if o1.Op != OpAMD64ORL { 11227 break 11228 } 11229 o0 := o1.Args[0] 11230 if o0.Op != OpAMD64ORL { 11231 break 11232 } 11233 x0 := o0.Args[0] 11234 if x0.Op != OpAMD64MOVBload { 11235 break 11236 } 11237 i := x0.AuxInt 11238 s := x0.Aux 11239 p := x0.Args[0] 11240 mem := x0.Args[1] 11241 s0 := o0.Args[1] 11242 if s0.Op != OpAMD64SHLLconst { 11243 break 11244 } 11245 if s0.AuxInt != 8 { 11246 break 11247 } 11248 x1 := s0.Args[0] 11249 if x1.Op != OpAMD64MOVBload { 11250 break 11251 } 11252 if x1.AuxInt != i-1 { 11253 break 11254 } 11255 if x1.Aux != s { 11256 break 11257 } 11258 if p != x1.Args[0] { 11259 break 11260 } 11261 if mem != x1.Args[1] { 11262 break 11263 } 11264 s1 := o1.Args[1] 11265 if s1.Op != OpAMD64SHLLconst { 11266 break 11267 } 11268 if s1.AuxInt != 16 { 11269 break 11270 } 11271 x2 := s1.Args[0] 11272 if x2.Op != OpAMD64MOVBload { 11273 break 11274 } 11275 if x2.AuxInt != i-2 { 11276 break 11277 } 11278 if x2.Aux != s { 11279 break 11280 } 11281 if p != x2.Args[0] { 11282 break 11283 } 11284 if mem != x2.Args[1] { 11285 break 11286 } 11287 s2 := v.Args[1] 11288 if s2.Op != OpAMD64SHLLconst { 11289 break 11290 } 11291 if s2.AuxInt != 24 { 11292 break 11293 } 11294 x3 := s2.Args[0] 11295 if x3.Op != OpAMD64MOVBload { 11296 break 11297 } 11298 if x3.AuxInt != i-3 { 11299 break 11300 } 11301 if x3.Aux != s { 11302 break 11303 } 11304 if p != x3.Args[0] { 11305 break 11306 } 11307 if mem != x3.Args[1] { 11308 break 11309 } 11310 if !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && s2.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && mergePoint(b, x0, x1, x2, x3) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(s0) && clobber(s1) && clobber(s2) && clobber(o0) && clobber(o1)) { 11311 break 11312 } 11313 b = mergePoint(b, x0, x1, x2, x3) 11314 v0 := b.NewValue0(v.Line, OpAMD64BSWAPL, v.Type) 11315 v.reset(OpCopy) 11316 v.AddArg(v0) 11317 v1 := b.NewValue0(v.Line, OpAMD64MOVLload, config.fe.TypeUInt32()) 11318 v1.AuxInt = i - 3 11319 v1.Aux = s 11320 v1.AddArg(p) 11321 v1.AddArg(mem) 11322 v0.AddArg(v1) 11323 return true 11324 } 11325 // match: (ORL o1:(ORL o0:(ORL x0:(MOVBloadidx1 [i] {s} p idx mem) s0:(SHLLconst [8] x1:(MOVBloadidx1 [i-1] {s} p idx mem))) s1:(SHLLconst [16] x2:(MOVBloadidx1 [i-2] {s} p idx mem))) s2:(SHLLconst [24] x3:(MOVBloadidx1 [i-3] {s} p idx mem))) 11326 // cond: x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && s2.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && mergePoint(b,x0,x1,x2,x3) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(s0) && clobber(s1) && clobber(s2) && clobber(o0) && clobber(o1) 11327 // result: @mergePoint(b,x0,x1,x2,x3) (BSWAPL <v.Type> (MOVLloadidx1 <v.Type> [i-3] {s} p idx mem)) 11328 for { 11329 o1 := v.Args[0] 11330 if o1.Op != OpAMD64ORL { 11331 break 11332 } 11333 o0 := o1.Args[0] 11334 if o0.Op != OpAMD64ORL { 11335 break 11336 } 11337 x0 := o0.Args[0] 11338 if x0.Op != OpAMD64MOVBloadidx1 { 11339 break 11340 } 11341 i := x0.AuxInt 11342 s := x0.Aux 11343 p := x0.Args[0] 11344 idx := x0.Args[1] 11345 mem := x0.Args[2] 11346 s0 := o0.Args[1] 11347 if s0.Op != OpAMD64SHLLconst { 11348 break 11349 } 11350 if s0.AuxInt != 8 { 11351 break 11352 } 11353 x1 := s0.Args[0] 11354 if x1.Op != OpAMD64MOVBloadidx1 { 11355 break 11356 } 11357 if x1.AuxInt != i-1 { 11358 break 11359 } 11360 if x1.Aux != s { 11361 break 11362 } 11363 if p != x1.Args[0] { 11364 break 11365 } 11366 if idx != x1.Args[1] { 11367 break 11368 } 11369 if mem != x1.Args[2] { 11370 break 11371 } 11372 s1 := o1.Args[1] 11373 if s1.Op != OpAMD64SHLLconst { 11374 break 11375 } 11376 if s1.AuxInt != 16 { 11377 break 11378 } 11379 x2 := s1.Args[0] 11380 if x2.Op != OpAMD64MOVBloadidx1 { 11381 break 11382 } 11383 if x2.AuxInt != i-2 { 11384 break 11385 } 11386 if x2.Aux != s { 11387 break 11388 } 11389 if p != x2.Args[0] { 11390 break 11391 } 11392 if idx != x2.Args[1] { 11393 break 11394 } 11395 if mem != x2.Args[2] { 11396 break 11397 } 11398 s2 := v.Args[1] 11399 if s2.Op != OpAMD64SHLLconst { 11400 break 11401 } 11402 if s2.AuxInt != 24 { 11403 break 11404 } 11405 x3 := s2.Args[0] 11406 if x3.Op != OpAMD64MOVBloadidx1 { 11407 break 11408 } 11409 if x3.AuxInt != i-3 { 11410 break 11411 } 11412 if x3.Aux != s { 11413 break 11414 } 11415 if p != x3.Args[0] { 11416 break 11417 } 11418 if idx != x3.Args[1] { 11419 break 11420 } 11421 if mem != x3.Args[2] { 11422 break 11423 } 11424 if !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && s2.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && mergePoint(b, x0, x1, x2, x3) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(s0) && clobber(s1) && clobber(s2) && clobber(o0) && clobber(o1)) { 11425 break 11426 } 11427 b = mergePoint(b, x0, x1, x2, x3) 11428 v0 := b.NewValue0(v.Line, OpAMD64BSWAPL, v.Type) 11429 v.reset(OpCopy) 11430 v.AddArg(v0) 11431 v1 := b.NewValue0(v.Line, OpAMD64MOVLloadidx1, v.Type) 11432 v1.AuxInt = i - 3 11433 v1.Aux = s 11434 v1.AddArg(p) 11435 v1.AddArg(idx) 11436 v1.AddArg(mem) 11437 v0.AddArg(v1) 11438 return true 11439 } 11440 return false 11441 } 11442 func rewriteValueAMD64_OpAMD64ORLconst(v *Value, config *Config) bool { 11443 b := v.Block 11444 _ = b 11445 // match: (ORLconst [c] x) 11446 // cond: int32(c)==0 11447 // result: x 11448 for { 11449 c := v.AuxInt 11450 x := v.Args[0] 11451 if !(int32(c) == 0) { 11452 break 11453 } 11454 v.reset(OpCopy) 11455 v.Type = x.Type 11456 v.AddArg(x) 11457 return true 11458 } 11459 // match: (ORLconst [c] _) 11460 // cond: int32(c)==-1 11461 // result: (MOVLconst [-1]) 11462 for { 11463 c := v.AuxInt 11464 if !(int32(c) == -1) { 11465 break 11466 } 11467 v.reset(OpAMD64MOVLconst) 11468 v.AuxInt = -1 11469 return true 11470 } 11471 // match: (ORLconst [c] (MOVLconst [d])) 11472 // cond: 11473 // result: (MOVLconst [c|d]) 11474 for { 11475 c := v.AuxInt 11476 v_0 := v.Args[0] 11477 if v_0.Op != OpAMD64MOVLconst { 11478 break 11479 } 11480 d := v_0.AuxInt 11481 v.reset(OpAMD64MOVLconst) 11482 v.AuxInt = c | d 11483 return true 11484 } 11485 return false 11486 } 11487 func rewriteValueAMD64_OpAMD64ORQ(v *Value, config *Config) bool { 11488 b := v.Block 11489 _ = b 11490 // match: (ORQ x (MOVQconst [c])) 11491 // cond: is32Bit(c) 11492 // result: (ORQconst [c] x) 11493 for { 11494 x := v.Args[0] 11495 v_1 := v.Args[1] 11496 if v_1.Op != OpAMD64MOVQconst { 11497 break 11498 } 11499 c := v_1.AuxInt 11500 if !(is32Bit(c)) { 11501 break 11502 } 11503 v.reset(OpAMD64ORQconst) 11504 v.AuxInt = c 11505 v.AddArg(x) 11506 return true 11507 } 11508 // match: (ORQ (MOVQconst [c]) x) 11509 // cond: is32Bit(c) 11510 // result: (ORQconst [c] x) 11511 for { 11512 v_0 := v.Args[0] 11513 if v_0.Op != OpAMD64MOVQconst { 11514 break 11515 } 11516 c := v_0.AuxInt 11517 x := v.Args[1] 11518 if !(is32Bit(c)) { 11519 break 11520 } 11521 v.reset(OpAMD64ORQconst) 11522 v.AuxInt = c 11523 v.AddArg(x) 11524 return true 11525 } 11526 // match: (ORQ x x) 11527 // cond: 11528 // result: x 11529 for { 11530 x := v.Args[0] 11531 if x != v.Args[1] { 11532 break 11533 } 11534 v.reset(OpCopy) 11535 v.Type = x.Type 11536 v.AddArg(x) 11537 return true 11538 } 11539 // match: (ORQ o0:(ORQ o1:(ORQ o2:(ORQ o3:(ORQ o4:(ORQ o5:(ORQ x0:(MOVBload [i] {s} p mem) s0:(SHLQconst [8] x1:(MOVBload [i+1] {s} p mem))) s1:(SHLQconst [16] x2:(MOVBload [i+2] {s} p mem))) s2:(SHLQconst [24] x3:(MOVBload [i+3] {s} p mem))) s3:(SHLQconst [32] x4:(MOVBload [i+4] {s} p mem))) s4:(SHLQconst [40] x5:(MOVBload [i+5] {s} p mem))) s5:(SHLQconst [48] x6:(MOVBload [i+6] {s} p mem))) s6:(SHLQconst [56] x7:(MOVBload [i+7] {s} p mem))) 11540 // cond: x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && x7.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && s2.Uses == 1 && s3.Uses == 1 && s4.Uses == 1 && s5.Uses == 1 && s6.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1 && o3.Uses == 1 && o4.Uses == 1 && o5.Uses == 1 && mergePoint(b,x0,x1,x2,x3,x4,x5,x6,x7) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) && clobber(x7) && clobber(s0) && clobber(s1) && clobber(s2) && clobber(s3) && clobber(s4) && clobber(s5) && clobber(s6) && clobber(o0) && clobber(o1) && clobber(o2) && clobber(o3) && clobber(o4) && clobber(o5) 11541 // result: @mergePoint(b,x0,x1,x2,x3,x4,x5,x6,x7) (MOVQload [i] {s} p mem) 11542 for { 11543 o0 := v.Args[0] 11544 if o0.Op != OpAMD64ORQ { 11545 break 11546 } 11547 o1 := o0.Args[0] 11548 if o1.Op != OpAMD64ORQ { 11549 break 11550 } 11551 o2 := o1.Args[0] 11552 if o2.Op != OpAMD64ORQ { 11553 break 11554 } 11555 o3 := o2.Args[0] 11556 if o3.Op != OpAMD64ORQ { 11557 break 11558 } 11559 o4 := o3.Args[0] 11560 if o4.Op != OpAMD64ORQ { 11561 break 11562 } 11563 o5 := o4.Args[0] 11564 if o5.Op != OpAMD64ORQ { 11565 break 11566 } 11567 x0 := o5.Args[0] 11568 if x0.Op != OpAMD64MOVBload { 11569 break 11570 } 11571 i := x0.AuxInt 11572 s := x0.Aux 11573 p := x0.Args[0] 11574 mem := x0.Args[1] 11575 s0 := o5.Args[1] 11576 if s0.Op != OpAMD64SHLQconst { 11577 break 11578 } 11579 if s0.AuxInt != 8 { 11580 break 11581 } 11582 x1 := s0.Args[0] 11583 if x1.Op != OpAMD64MOVBload { 11584 break 11585 } 11586 if x1.AuxInt != i+1 { 11587 break 11588 } 11589 if x1.Aux != s { 11590 break 11591 } 11592 if p != x1.Args[0] { 11593 break 11594 } 11595 if mem != x1.Args[1] { 11596 break 11597 } 11598 s1 := o4.Args[1] 11599 if s1.Op != OpAMD64SHLQconst { 11600 break 11601 } 11602 if s1.AuxInt != 16 { 11603 break 11604 } 11605 x2 := s1.Args[0] 11606 if x2.Op != OpAMD64MOVBload { 11607 break 11608 } 11609 if x2.AuxInt != i+2 { 11610 break 11611 } 11612 if x2.Aux != s { 11613 break 11614 } 11615 if p != x2.Args[0] { 11616 break 11617 } 11618 if mem != x2.Args[1] { 11619 break 11620 } 11621 s2 := o3.Args[1] 11622 if s2.Op != OpAMD64SHLQconst { 11623 break 11624 } 11625 if s2.AuxInt != 24 { 11626 break 11627 } 11628 x3 := s2.Args[0] 11629 if x3.Op != OpAMD64MOVBload { 11630 break 11631 } 11632 if x3.AuxInt != i+3 { 11633 break 11634 } 11635 if x3.Aux != s { 11636 break 11637 } 11638 if p != x3.Args[0] { 11639 break 11640 } 11641 if mem != x3.Args[1] { 11642 break 11643 } 11644 s3 := o2.Args[1] 11645 if s3.Op != OpAMD64SHLQconst { 11646 break 11647 } 11648 if s3.AuxInt != 32 { 11649 break 11650 } 11651 x4 := s3.Args[0] 11652 if x4.Op != OpAMD64MOVBload { 11653 break 11654 } 11655 if x4.AuxInt != i+4 { 11656 break 11657 } 11658 if x4.Aux != s { 11659 break 11660 } 11661 if p != x4.Args[0] { 11662 break 11663 } 11664 if mem != x4.Args[1] { 11665 break 11666 } 11667 s4 := o1.Args[1] 11668 if s4.Op != OpAMD64SHLQconst { 11669 break 11670 } 11671 if s4.AuxInt != 40 { 11672 break 11673 } 11674 x5 := s4.Args[0] 11675 if x5.Op != OpAMD64MOVBload { 11676 break 11677 } 11678 if x5.AuxInt != i+5 { 11679 break 11680 } 11681 if x5.Aux != s { 11682 break 11683 } 11684 if p != x5.Args[0] { 11685 break 11686 } 11687 if mem != x5.Args[1] { 11688 break 11689 } 11690 s5 := o0.Args[1] 11691 if s5.Op != OpAMD64SHLQconst { 11692 break 11693 } 11694 if s5.AuxInt != 48 { 11695 break 11696 } 11697 x6 := s5.Args[0] 11698 if x6.Op != OpAMD64MOVBload { 11699 break 11700 } 11701 if x6.AuxInt != i+6 { 11702 break 11703 } 11704 if x6.Aux != s { 11705 break 11706 } 11707 if p != x6.Args[0] { 11708 break 11709 } 11710 if mem != x6.Args[1] { 11711 break 11712 } 11713 s6 := v.Args[1] 11714 if s6.Op != OpAMD64SHLQconst { 11715 break 11716 } 11717 if s6.AuxInt != 56 { 11718 break 11719 } 11720 x7 := s6.Args[0] 11721 if x7.Op != OpAMD64MOVBload { 11722 break 11723 } 11724 if x7.AuxInt != i+7 { 11725 break 11726 } 11727 if x7.Aux != s { 11728 break 11729 } 11730 if p != x7.Args[0] { 11731 break 11732 } 11733 if mem != x7.Args[1] { 11734 break 11735 } 11736 if !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && x7.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && s2.Uses == 1 && s3.Uses == 1 && s4.Uses == 1 && s5.Uses == 1 && s6.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1 && o3.Uses == 1 && o4.Uses == 1 && o5.Uses == 1 && mergePoint(b, x0, x1, x2, x3, x4, x5, x6, x7) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) && clobber(x7) && clobber(s0) && clobber(s1) && clobber(s2) && clobber(s3) && clobber(s4) && clobber(s5) && clobber(s6) && clobber(o0) && clobber(o1) && clobber(o2) && clobber(o3) && clobber(o4) && clobber(o5)) { 11737 break 11738 } 11739 b = mergePoint(b, x0, x1, x2, x3, x4, x5, x6, x7) 11740 v0 := b.NewValue0(v.Line, OpAMD64MOVQload, config.fe.TypeUInt64()) 11741 v.reset(OpCopy) 11742 v.AddArg(v0) 11743 v0.AuxInt = i 11744 v0.Aux = s 11745 v0.AddArg(p) 11746 v0.AddArg(mem) 11747 return true 11748 } 11749 // match: (ORQ o0:(ORQ o1:(ORQ o2:(ORQ o3:(ORQ o4:(ORQ o5:(ORQ x0:(MOVBloadidx1 [i] {s} p idx mem) s0:(SHLQconst [8] x1:(MOVBloadidx1 [i+1] {s} p idx mem))) s1:(SHLQconst [16] x2:(MOVBloadidx1 [i+2] {s} p idx mem))) s2:(SHLQconst [24] x3:(MOVBloadidx1 [i+3] {s} p idx mem))) s3:(SHLQconst [32] x4:(MOVBloadidx1 [i+4] {s} p idx mem))) s4:(SHLQconst [40] x5:(MOVBloadidx1 [i+5] {s} p idx mem))) s5:(SHLQconst [48] x6:(MOVBloadidx1 [i+6] {s} p idx mem))) s6:(SHLQconst [56] x7:(MOVBloadidx1 [i+7] {s} p idx mem))) 11750 // cond: x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && x7.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && s2.Uses == 1 && s3.Uses == 1 && s4.Uses == 1 && s5.Uses == 1 && s6.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1 && o3.Uses == 1 && o4.Uses == 1 && o5.Uses == 1 && mergePoint(b,x0,x1,x2,x3,x4,x5,x6,x7) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) && clobber(x7) && clobber(s0) && clobber(s1) && clobber(s2) && clobber(s3) && clobber(s4) && clobber(s5) && clobber(s6) && clobber(o0) && clobber(o1) && clobber(o2) && clobber(o3) && clobber(o4) && clobber(o5) 11751 // result: @mergePoint(b,x0,x1,x2,x3,x4,x5,x6,x7) (MOVQloadidx1 <v.Type> [i] {s} p idx mem) 11752 for { 11753 o0 := v.Args[0] 11754 if o0.Op != OpAMD64ORQ { 11755 break 11756 } 11757 o1 := o0.Args[0] 11758 if o1.Op != OpAMD64ORQ { 11759 break 11760 } 11761 o2 := o1.Args[0] 11762 if o2.Op != OpAMD64ORQ { 11763 break 11764 } 11765 o3 := o2.Args[0] 11766 if o3.Op != OpAMD64ORQ { 11767 break 11768 } 11769 o4 := o3.Args[0] 11770 if o4.Op != OpAMD64ORQ { 11771 break 11772 } 11773 o5 := o4.Args[0] 11774 if o5.Op != OpAMD64ORQ { 11775 break 11776 } 11777 x0 := o5.Args[0] 11778 if x0.Op != OpAMD64MOVBloadidx1 { 11779 break 11780 } 11781 i := x0.AuxInt 11782 s := x0.Aux 11783 p := x0.Args[0] 11784 idx := x0.Args[1] 11785 mem := x0.Args[2] 11786 s0 := o5.Args[1] 11787 if s0.Op != OpAMD64SHLQconst { 11788 break 11789 } 11790 if s0.AuxInt != 8 { 11791 break 11792 } 11793 x1 := s0.Args[0] 11794 if x1.Op != OpAMD64MOVBloadidx1 { 11795 break 11796 } 11797 if x1.AuxInt != i+1 { 11798 break 11799 } 11800 if x1.Aux != s { 11801 break 11802 } 11803 if p != x1.Args[0] { 11804 break 11805 } 11806 if idx != x1.Args[1] { 11807 break 11808 } 11809 if mem != x1.Args[2] { 11810 break 11811 } 11812 s1 := o4.Args[1] 11813 if s1.Op != OpAMD64SHLQconst { 11814 break 11815 } 11816 if s1.AuxInt != 16 { 11817 break 11818 } 11819 x2 := s1.Args[0] 11820 if x2.Op != OpAMD64MOVBloadidx1 { 11821 break 11822 } 11823 if x2.AuxInt != i+2 { 11824 break 11825 } 11826 if x2.Aux != s { 11827 break 11828 } 11829 if p != x2.Args[0] { 11830 break 11831 } 11832 if idx != x2.Args[1] { 11833 break 11834 } 11835 if mem != x2.Args[2] { 11836 break 11837 } 11838 s2 := o3.Args[1] 11839 if s2.Op != OpAMD64SHLQconst { 11840 break 11841 } 11842 if s2.AuxInt != 24 { 11843 break 11844 } 11845 x3 := s2.Args[0] 11846 if x3.Op != OpAMD64MOVBloadidx1 { 11847 break 11848 } 11849 if x3.AuxInt != i+3 { 11850 break 11851 } 11852 if x3.Aux != s { 11853 break 11854 } 11855 if p != x3.Args[0] { 11856 break 11857 } 11858 if idx != x3.Args[1] { 11859 break 11860 } 11861 if mem != x3.Args[2] { 11862 break 11863 } 11864 s3 := o2.Args[1] 11865 if s3.Op != OpAMD64SHLQconst { 11866 break 11867 } 11868 if s3.AuxInt != 32 { 11869 break 11870 } 11871 x4 := s3.Args[0] 11872 if x4.Op != OpAMD64MOVBloadidx1 { 11873 break 11874 } 11875 if x4.AuxInt != i+4 { 11876 break 11877 } 11878 if x4.Aux != s { 11879 break 11880 } 11881 if p != x4.Args[0] { 11882 break 11883 } 11884 if idx != x4.Args[1] { 11885 break 11886 } 11887 if mem != x4.Args[2] { 11888 break 11889 } 11890 s4 := o1.Args[1] 11891 if s4.Op != OpAMD64SHLQconst { 11892 break 11893 } 11894 if s4.AuxInt != 40 { 11895 break 11896 } 11897 x5 := s4.Args[0] 11898 if x5.Op != OpAMD64MOVBloadidx1 { 11899 break 11900 } 11901 if x5.AuxInt != i+5 { 11902 break 11903 } 11904 if x5.Aux != s { 11905 break 11906 } 11907 if p != x5.Args[0] { 11908 break 11909 } 11910 if idx != x5.Args[1] { 11911 break 11912 } 11913 if mem != x5.Args[2] { 11914 break 11915 } 11916 s5 := o0.Args[1] 11917 if s5.Op != OpAMD64SHLQconst { 11918 break 11919 } 11920 if s5.AuxInt != 48 { 11921 break 11922 } 11923 x6 := s5.Args[0] 11924 if x6.Op != OpAMD64MOVBloadidx1 { 11925 break 11926 } 11927 if x6.AuxInt != i+6 { 11928 break 11929 } 11930 if x6.Aux != s { 11931 break 11932 } 11933 if p != x6.Args[0] { 11934 break 11935 } 11936 if idx != x6.Args[1] { 11937 break 11938 } 11939 if mem != x6.Args[2] { 11940 break 11941 } 11942 s6 := v.Args[1] 11943 if s6.Op != OpAMD64SHLQconst { 11944 break 11945 } 11946 if s6.AuxInt != 56 { 11947 break 11948 } 11949 x7 := s6.Args[0] 11950 if x7.Op != OpAMD64MOVBloadidx1 { 11951 break 11952 } 11953 if x7.AuxInt != i+7 { 11954 break 11955 } 11956 if x7.Aux != s { 11957 break 11958 } 11959 if p != x7.Args[0] { 11960 break 11961 } 11962 if idx != x7.Args[1] { 11963 break 11964 } 11965 if mem != x7.Args[2] { 11966 break 11967 } 11968 if !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && x7.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && s2.Uses == 1 && s3.Uses == 1 && s4.Uses == 1 && s5.Uses == 1 && s6.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1 && o3.Uses == 1 && o4.Uses == 1 && o5.Uses == 1 && mergePoint(b, x0, x1, x2, x3, x4, x5, x6, x7) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) && clobber(x7) && clobber(s0) && clobber(s1) && clobber(s2) && clobber(s3) && clobber(s4) && clobber(s5) && clobber(s6) && clobber(o0) && clobber(o1) && clobber(o2) && clobber(o3) && clobber(o4) && clobber(o5)) { 11969 break 11970 } 11971 b = mergePoint(b, x0, x1, x2, x3, x4, x5, x6, x7) 11972 v0 := b.NewValue0(v.Line, OpAMD64MOVQloadidx1, v.Type) 11973 v.reset(OpCopy) 11974 v.AddArg(v0) 11975 v0.AuxInt = i 11976 v0.Aux = s 11977 v0.AddArg(p) 11978 v0.AddArg(idx) 11979 v0.AddArg(mem) 11980 return true 11981 } 11982 // match: (ORQ o5:(ORQ o4:(ORQ o3:(ORQ o2:(ORQ o1:(ORQ o0:(ORQ x0:(MOVBload [i] {s} p mem) s0:(SHLQconst [8] x1:(MOVBload [i-1] {s} p mem))) s1:(SHLQconst [16] x2:(MOVBload [i-2] {s} p mem))) s2:(SHLQconst [24] x3:(MOVBload [i-3] {s} p mem))) s3:(SHLQconst [32] x4:(MOVBload [i-4] {s} p mem))) s4:(SHLQconst [40] x5:(MOVBload [i-5] {s} p mem))) s5:(SHLQconst [48] x6:(MOVBload [i-6] {s} p mem))) s6:(SHLQconst [56] x7:(MOVBload [i-7] {s} p mem))) 11983 // cond: x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && x7.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && s2.Uses == 1 && s3.Uses == 1 && s4.Uses == 1 && s5.Uses == 1 && s6.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1 && o3.Uses == 1 && o4.Uses == 1 && o5.Uses == 1 && mergePoint(b,x0,x1,x2,x3,x4,x5,x6,x7) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) && clobber(x7) && clobber(s0) && clobber(s1) && clobber(s2) && clobber(s3) && clobber(s4) && clobber(s5) && clobber(s6) && clobber(o0) && clobber(o1) && clobber(o2) && clobber(o3) && clobber(o4) && clobber(o5) 11984 // result: @mergePoint(b,x0,x1,x2,x3,x4,x5,x6,x7) (BSWAPQ <v.Type> (MOVQload [i-7] {s} p mem)) 11985 for { 11986 o5 := v.Args[0] 11987 if o5.Op != OpAMD64ORQ { 11988 break 11989 } 11990 o4 := o5.Args[0] 11991 if o4.Op != OpAMD64ORQ { 11992 break 11993 } 11994 o3 := o4.Args[0] 11995 if o3.Op != OpAMD64ORQ { 11996 break 11997 } 11998 o2 := o3.Args[0] 11999 if o2.Op != OpAMD64ORQ { 12000 break 12001 } 12002 o1 := o2.Args[0] 12003 if o1.Op != OpAMD64ORQ { 12004 break 12005 } 12006 o0 := o1.Args[0] 12007 if o0.Op != OpAMD64ORQ { 12008 break 12009 } 12010 x0 := o0.Args[0] 12011 if x0.Op != OpAMD64MOVBload { 12012 break 12013 } 12014 i := x0.AuxInt 12015 s := x0.Aux 12016 p := x0.Args[0] 12017 mem := x0.Args[1] 12018 s0 := o0.Args[1] 12019 if s0.Op != OpAMD64SHLQconst { 12020 break 12021 } 12022 if s0.AuxInt != 8 { 12023 break 12024 } 12025 x1 := s0.Args[0] 12026 if x1.Op != OpAMD64MOVBload { 12027 break 12028 } 12029 if x1.AuxInt != i-1 { 12030 break 12031 } 12032 if x1.Aux != s { 12033 break 12034 } 12035 if p != x1.Args[0] { 12036 break 12037 } 12038 if mem != x1.Args[1] { 12039 break 12040 } 12041 s1 := o1.Args[1] 12042 if s1.Op != OpAMD64SHLQconst { 12043 break 12044 } 12045 if s1.AuxInt != 16 { 12046 break 12047 } 12048 x2 := s1.Args[0] 12049 if x2.Op != OpAMD64MOVBload { 12050 break 12051 } 12052 if x2.AuxInt != i-2 { 12053 break 12054 } 12055 if x2.Aux != s { 12056 break 12057 } 12058 if p != x2.Args[0] { 12059 break 12060 } 12061 if mem != x2.Args[1] { 12062 break 12063 } 12064 s2 := o2.Args[1] 12065 if s2.Op != OpAMD64SHLQconst { 12066 break 12067 } 12068 if s2.AuxInt != 24 { 12069 break 12070 } 12071 x3 := s2.Args[0] 12072 if x3.Op != OpAMD64MOVBload { 12073 break 12074 } 12075 if x3.AuxInt != i-3 { 12076 break 12077 } 12078 if x3.Aux != s { 12079 break 12080 } 12081 if p != x3.Args[0] { 12082 break 12083 } 12084 if mem != x3.Args[1] { 12085 break 12086 } 12087 s3 := o3.Args[1] 12088 if s3.Op != OpAMD64SHLQconst { 12089 break 12090 } 12091 if s3.AuxInt != 32 { 12092 break 12093 } 12094 x4 := s3.Args[0] 12095 if x4.Op != OpAMD64MOVBload { 12096 break 12097 } 12098 if x4.AuxInt != i-4 { 12099 break 12100 } 12101 if x4.Aux != s { 12102 break 12103 } 12104 if p != x4.Args[0] { 12105 break 12106 } 12107 if mem != x4.Args[1] { 12108 break 12109 } 12110 s4 := o4.Args[1] 12111 if s4.Op != OpAMD64SHLQconst { 12112 break 12113 } 12114 if s4.AuxInt != 40 { 12115 break 12116 } 12117 x5 := s4.Args[0] 12118 if x5.Op != OpAMD64MOVBload { 12119 break 12120 } 12121 if x5.AuxInt != i-5 { 12122 break 12123 } 12124 if x5.Aux != s { 12125 break 12126 } 12127 if p != x5.Args[0] { 12128 break 12129 } 12130 if mem != x5.Args[1] { 12131 break 12132 } 12133 s5 := o5.Args[1] 12134 if s5.Op != OpAMD64SHLQconst { 12135 break 12136 } 12137 if s5.AuxInt != 48 { 12138 break 12139 } 12140 x6 := s5.Args[0] 12141 if x6.Op != OpAMD64MOVBload { 12142 break 12143 } 12144 if x6.AuxInt != i-6 { 12145 break 12146 } 12147 if x6.Aux != s { 12148 break 12149 } 12150 if p != x6.Args[0] { 12151 break 12152 } 12153 if mem != x6.Args[1] { 12154 break 12155 } 12156 s6 := v.Args[1] 12157 if s6.Op != OpAMD64SHLQconst { 12158 break 12159 } 12160 if s6.AuxInt != 56 { 12161 break 12162 } 12163 x7 := s6.Args[0] 12164 if x7.Op != OpAMD64MOVBload { 12165 break 12166 } 12167 if x7.AuxInt != i-7 { 12168 break 12169 } 12170 if x7.Aux != s { 12171 break 12172 } 12173 if p != x7.Args[0] { 12174 break 12175 } 12176 if mem != x7.Args[1] { 12177 break 12178 } 12179 if !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && x7.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && s2.Uses == 1 && s3.Uses == 1 && s4.Uses == 1 && s5.Uses == 1 && s6.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1 && o3.Uses == 1 && o4.Uses == 1 && o5.Uses == 1 && mergePoint(b, x0, x1, x2, x3, x4, x5, x6, x7) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) && clobber(x7) && clobber(s0) && clobber(s1) && clobber(s2) && clobber(s3) && clobber(s4) && clobber(s5) && clobber(s6) && clobber(o0) && clobber(o1) && clobber(o2) && clobber(o3) && clobber(o4) && clobber(o5)) { 12180 break 12181 } 12182 b = mergePoint(b, x0, x1, x2, x3, x4, x5, x6, x7) 12183 v0 := b.NewValue0(v.Line, OpAMD64BSWAPQ, v.Type) 12184 v.reset(OpCopy) 12185 v.AddArg(v0) 12186 v1 := b.NewValue0(v.Line, OpAMD64MOVQload, config.fe.TypeUInt64()) 12187 v1.AuxInt = i - 7 12188 v1.Aux = s 12189 v1.AddArg(p) 12190 v1.AddArg(mem) 12191 v0.AddArg(v1) 12192 return true 12193 } 12194 // match: (ORQ o5:(ORQ o4:(ORQ o3:(ORQ o2:(ORQ o1:(ORQ o0:(ORQ x0:(MOVBloadidx1 [i] {s} p idx mem) s0:(SHLQconst [8] x1:(MOVBloadidx1 [i-1] {s} p idx mem))) s1:(SHLQconst [16] x2:(MOVBloadidx1 [i-2] {s} p idx mem))) s2:(SHLQconst [24] x3:(MOVBloadidx1 [i-3] {s} p idx mem))) s3:(SHLQconst [32] x4:(MOVBloadidx1 [i-4] {s} p idx mem))) s4:(SHLQconst [40] x5:(MOVBloadidx1 [i-5] {s} p idx mem))) s5:(SHLQconst [48] x6:(MOVBloadidx1 [i-6] {s} p idx mem))) s6:(SHLQconst [56] x7:(MOVBloadidx1 [i-7] {s} p idx mem))) 12195 // cond: x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && x7.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && s2.Uses == 1 && s3.Uses == 1 && s4.Uses == 1 && s5.Uses == 1 && s6.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1 && o3.Uses == 1 && o4.Uses == 1 && o5.Uses == 1 && mergePoint(b,x0,x1,x2,x3,x4,x5,x6,x7) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) && clobber(x7) && clobber(s0) && clobber(s1) && clobber(s2) && clobber(s3) && clobber(s4) && clobber(s5) && clobber(s6) && clobber(o0) && clobber(o1) && clobber(o2) && clobber(o3) && clobber(o4) && clobber(o5) 12196 // result: @mergePoint(b,x0,x1,x2,x3,x4,x5,x6,x7) (BSWAPQ <v.Type> (MOVQloadidx1 <v.Type> [i-7] {s} p idx mem)) 12197 for { 12198 o5 := v.Args[0] 12199 if o5.Op != OpAMD64ORQ { 12200 break 12201 } 12202 o4 := o5.Args[0] 12203 if o4.Op != OpAMD64ORQ { 12204 break 12205 } 12206 o3 := o4.Args[0] 12207 if o3.Op != OpAMD64ORQ { 12208 break 12209 } 12210 o2 := o3.Args[0] 12211 if o2.Op != OpAMD64ORQ { 12212 break 12213 } 12214 o1 := o2.Args[0] 12215 if o1.Op != OpAMD64ORQ { 12216 break 12217 } 12218 o0 := o1.Args[0] 12219 if o0.Op != OpAMD64ORQ { 12220 break 12221 } 12222 x0 := o0.Args[0] 12223 if x0.Op != OpAMD64MOVBloadidx1 { 12224 break 12225 } 12226 i := x0.AuxInt 12227 s := x0.Aux 12228 p := x0.Args[0] 12229 idx := x0.Args[1] 12230 mem := x0.Args[2] 12231 s0 := o0.Args[1] 12232 if s0.Op != OpAMD64SHLQconst { 12233 break 12234 } 12235 if s0.AuxInt != 8 { 12236 break 12237 } 12238 x1 := s0.Args[0] 12239 if x1.Op != OpAMD64MOVBloadidx1 { 12240 break 12241 } 12242 if x1.AuxInt != i-1 { 12243 break 12244 } 12245 if x1.Aux != s { 12246 break 12247 } 12248 if p != x1.Args[0] { 12249 break 12250 } 12251 if idx != x1.Args[1] { 12252 break 12253 } 12254 if mem != x1.Args[2] { 12255 break 12256 } 12257 s1 := o1.Args[1] 12258 if s1.Op != OpAMD64SHLQconst { 12259 break 12260 } 12261 if s1.AuxInt != 16 { 12262 break 12263 } 12264 x2 := s1.Args[0] 12265 if x2.Op != OpAMD64MOVBloadidx1 { 12266 break 12267 } 12268 if x2.AuxInt != i-2 { 12269 break 12270 } 12271 if x2.Aux != s { 12272 break 12273 } 12274 if p != x2.Args[0] { 12275 break 12276 } 12277 if idx != x2.Args[1] { 12278 break 12279 } 12280 if mem != x2.Args[2] { 12281 break 12282 } 12283 s2 := o2.Args[1] 12284 if s2.Op != OpAMD64SHLQconst { 12285 break 12286 } 12287 if s2.AuxInt != 24 { 12288 break 12289 } 12290 x3 := s2.Args[0] 12291 if x3.Op != OpAMD64MOVBloadidx1 { 12292 break 12293 } 12294 if x3.AuxInt != i-3 { 12295 break 12296 } 12297 if x3.Aux != s { 12298 break 12299 } 12300 if p != x3.Args[0] { 12301 break 12302 } 12303 if idx != x3.Args[1] { 12304 break 12305 } 12306 if mem != x3.Args[2] { 12307 break 12308 } 12309 s3 := o3.Args[1] 12310 if s3.Op != OpAMD64SHLQconst { 12311 break 12312 } 12313 if s3.AuxInt != 32 { 12314 break 12315 } 12316 x4 := s3.Args[0] 12317 if x4.Op != OpAMD64MOVBloadidx1 { 12318 break 12319 } 12320 if x4.AuxInt != i-4 { 12321 break 12322 } 12323 if x4.Aux != s { 12324 break 12325 } 12326 if p != x4.Args[0] { 12327 break 12328 } 12329 if idx != x4.Args[1] { 12330 break 12331 } 12332 if mem != x4.Args[2] { 12333 break 12334 } 12335 s4 := o4.Args[1] 12336 if s4.Op != OpAMD64SHLQconst { 12337 break 12338 } 12339 if s4.AuxInt != 40 { 12340 break 12341 } 12342 x5 := s4.Args[0] 12343 if x5.Op != OpAMD64MOVBloadidx1 { 12344 break 12345 } 12346 if x5.AuxInt != i-5 { 12347 break 12348 } 12349 if x5.Aux != s { 12350 break 12351 } 12352 if p != x5.Args[0] { 12353 break 12354 } 12355 if idx != x5.Args[1] { 12356 break 12357 } 12358 if mem != x5.Args[2] { 12359 break 12360 } 12361 s5 := o5.Args[1] 12362 if s5.Op != OpAMD64SHLQconst { 12363 break 12364 } 12365 if s5.AuxInt != 48 { 12366 break 12367 } 12368 x6 := s5.Args[0] 12369 if x6.Op != OpAMD64MOVBloadidx1 { 12370 break 12371 } 12372 if x6.AuxInt != i-6 { 12373 break 12374 } 12375 if x6.Aux != s { 12376 break 12377 } 12378 if p != x6.Args[0] { 12379 break 12380 } 12381 if idx != x6.Args[1] { 12382 break 12383 } 12384 if mem != x6.Args[2] { 12385 break 12386 } 12387 s6 := v.Args[1] 12388 if s6.Op != OpAMD64SHLQconst { 12389 break 12390 } 12391 if s6.AuxInt != 56 { 12392 break 12393 } 12394 x7 := s6.Args[0] 12395 if x7.Op != OpAMD64MOVBloadidx1 { 12396 break 12397 } 12398 if x7.AuxInt != i-7 { 12399 break 12400 } 12401 if x7.Aux != s { 12402 break 12403 } 12404 if p != x7.Args[0] { 12405 break 12406 } 12407 if idx != x7.Args[1] { 12408 break 12409 } 12410 if mem != x7.Args[2] { 12411 break 12412 } 12413 if !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && x7.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && s2.Uses == 1 && s3.Uses == 1 && s4.Uses == 1 && s5.Uses == 1 && s6.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1 && o3.Uses == 1 && o4.Uses == 1 && o5.Uses == 1 && mergePoint(b, x0, x1, x2, x3, x4, x5, x6, x7) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) && clobber(x7) && clobber(s0) && clobber(s1) && clobber(s2) && clobber(s3) && clobber(s4) && clobber(s5) && clobber(s6) && clobber(o0) && clobber(o1) && clobber(o2) && clobber(o3) && clobber(o4) && clobber(o5)) { 12414 break 12415 } 12416 b = mergePoint(b, x0, x1, x2, x3, x4, x5, x6, x7) 12417 v0 := b.NewValue0(v.Line, OpAMD64BSWAPQ, v.Type) 12418 v.reset(OpCopy) 12419 v.AddArg(v0) 12420 v1 := b.NewValue0(v.Line, OpAMD64MOVQloadidx1, v.Type) 12421 v1.AuxInt = i - 7 12422 v1.Aux = s 12423 v1.AddArg(p) 12424 v1.AddArg(idx) 12425 v1.AddArg(mem) 12426 v0.AddArg(v1) 12427 return true 12428 } 12429 return false 12430 } 12431 func rewriteValueAMD64_OpAMD64ORQconst(v *Value, config *Config) bool { 12432 b := v.Block 12433 _ = b 12434 // match: (ORQconst [0] x) 12435 // cond: 12436 // result: x 12437 for { 12438 if v.AuxInt != 0 { 12439 break 12440 } 12441 x := v.Args[0] 12442 v.reset(OpCopy) 12443 v.Type = x.Type 12444 v.AddArg(x) 12445 return true 12446 } 12447 // match: (ORQconst [-1] _) 12448 // cond: 12449 // result: (MOVQconst [-1]) 12450 for { 12451 if v.AuxInt != -1 { 12452 break 12453 } 12454 v.reset(OpAMD64MOVQconst) 12455 v.AuxInt = -1 12456 return true 12457 } 12458 // match: (ORQconst [c] (MOVQconst [d])) 12459 // cond: 12460 // result: (MOVQconst [c|d]) 12461 for { 12462 c := v.AuxInt 12463 v_0 := v.Args[0] 12464 if v_0.Op != OpAMD64MOVQconst { 12465 break 12466 } 12467 d := v_0.AuxInt 12468 v.reset(OpAMD64MOVQconst) 12469 v.AuxInt = c | d 12470 return true 12471 } 12472 return false 12473 } 12474 func rewriteValueAMD64_OpAMD64ROLBconst(v *Value, config *Config) bool { 12475 b := v.Block 12476 _ = b 12477 // match: (ROLBconst [c] (ROLBconst [d] x)) 12478 // cond: 12479 // result: (ROLBconst [(c+d)& 7] x) 12480 for { 12481 c := v.AuxInt 12482 v_0 := v.Args[0] 12483 if v_0.Op != OpAMD64ROLBconst { 12484 break 12485 } 12486 d := v_0.AuxInt 12487 x := v_0.Args[0] 12488 v.reset(OpAMD64ROLBconst) 12489 v.AuxInt = (c + d) & 7 12490 v.AddArg(x) 12491 return true 12492 } 12493 // match: (ROLBconst [0] x) 12494 // cond: 12495 // result: x 12496 for { 12497 if v.AuxInt != 0 { 12498 break 12499 } 12500 x := v.Args[0] 12501 v.reset(OpCopy) 12502 v.Type = x.Type 12503 v.AddArg(x) 12504 return true 12505 } 12506 return false 12507 } 12508 func rewriteValueAMD64_OpAMD64ROLLconst(v *Value, config *Config) bool { 12509 b := v.Block 12510 _ = b 12511 // match: (ROLLconst [c] (ROLLconst [d] x)) 12512 // cond: 12513 // result: (ROLLconst [(c+d)&31] x) 12514 for { 12515 c := v.AuxInt 12516 v_0 := v.Args[0] 12517 if v_0.Op != OpAMD64ROLLconst { 12518 break 12519 } 12520 d := v_0.AuxInt 12521 x := v_0.Args[0] 12522 v.reset(OpAMD64ROLLconst) 12523 v.AuxInt = (c + d) & 31 12524 v.AddArg(x) 12525 return true 12526 } 12527 // match: (ROLLconst [0] x) 12528 // cond: 12529 // result: x 12530 for { 12531 if v.AuxInt != 0 { 12532 break 12533 } 12534 x := v.Args[0] 12535 v.reset(OpCopy) 12536 v.Type = x.Type 12537 v.AddArg(x) 12538 return true 12539 } 12540 return false 12541 } 12542 func rewriteValueAMD64_OpAMD64ROLQconst(v *Value, config *Config) bool { 12543 b := v.Block 12544 _ = b 12545 // match: (ROLQconst [c] (ROLQconst [d] x)) 12546 // cond: 12547 // result: (ROLQconst [(c+d)&63] x) 12548 for { 12549 c := v.AuxInt 12550 v_0 := v.Args[0] 12551 if v_0.Op != OpAMD64ROLQconst { 12552 break 12553 } 12554 d := v_0.AuxInt 12555 x := v_0.Args[0] 12556 v.reset(OpAMD64ROLQconst) 12557 v.AuxInt = (c + d) & 63 12558 v.AddArg(x) 12559 return true 12560 } 12561 // match: (ROLQconst [0] x) 12562 // cond: 12563 // result: x 12564 for { 12565 if v.AuxInt != 0 { 12566 break 12567 } 12568 x := v.Args[0] 12569 v.reset(OpCopy) 12570 v.Type = x.Type 12571 v.AddArg(x) 12572 return true 12573 } 12574 return false 12575 } 12576 func rewriteValueAMD64_OpAMD64ROLWconst(v *Value, config *Config) bool { 12577 b := v.Block 12578 _ = b 12579 // match: (ROLWconst [c] (ROLWconst [d] x)) 12580 // cond: 12581 // result: (ROLWconst [(c+d)&15] x) 12582 for { 12583 c := v.AuxInt 12584 v_0 := v.Args[0] 12585 if v_0.Op != OpAMD64ROLWconst { 12586 break 12587 } 12588 d := v_0.AuxInt 12589 x := v_0.Args[0] 12590 v.reset(OpAMD64ROLWconst) 12591 v.AuxInt = (c + d) & 15 12592 v.AddArg(x) 12593 return true 12594 } 12595 // match: (ROLWconst [0] x) 12596 // cond: 12597 // result: x 12598 for { 12599 if v.AuxInt != 0 { 12600 break 12601 } 12602 x := v.Args[0] 12603 v.reset(OpCopy) 12604 v.Type = x.Type 12605 v.AddArg(x) 12606 return true 12607 } 12608 return false 12609 } 12610 func rewriteValueAMD64_OpAMD64SARB(v *Value, config *Config) bool { 12611 b := v.Block 12612 _ = b 12613 // match: (SARB x (MOVQconst [c])) 12614 // cond: 12615 // result: (SARBconst [c&31] x) 12616 for { 12617 x := v.Args[0] 12618 v_1 := v.Args[1] 12619 if v_1.Op != OpAMD64MOVQconst { 12620 break 12621 } 12622 c := v_1.AuxInt 12623 v.reset(OpAMD64SARBconst) 12624 v.AuxInt = c & 31 12625 v.AddArg(x) 12626 return true 12627 } 12628 // match: (SARB x (MOVLconst [c])) 12629 // cond: 12630 // result: (SARBconst [c&31] x) 12631 for { 12632 x := v.Args[0] 12633 v_1 := v.Args[1] 12634 if v_1.Op != OpAMD64MOVLconst { 12635 break 12636 } 12637 c := v_1.AuxInt 12638 v.reset(OpAMD64SARBconst) 12639 v.AuxInt = c & 31 12640 v.AddArg(x) 12641 return true 12642 } 12643 return false 12644 } 12645 func rewriteValueAMD64_OpAMD64SARBconst(v *Value, config *Config) bool { 12646 b := v.Block 12647 _ = b 12648 // match: (SARBconst [c] (MOVQconst [d])) 12649 // cond: 12650 // result: (MOVQconst [d>>uint64(c)]) 12651 for { 12652 c := v.AuxInt 12653 v_0 := v.Args[0] 12654 if v_0.Op != OpAMD64MOVQconst { 12655 break 12656 } 12657 d := v_0.AuxInt 12658 v.reset(OpAMD64MOVQconst) 12659 v.AuxInt = d >> uint64(c) 12660 return true 12661 } 12662 return false 12663 } 12664 func rewriteValueAMD64_OpAMD64SARL(v *Value, config *Config) bool { 12665 b := v.Block 12666 _ = b 12667 // match: (SARL x (MOVQconst [c])) 12668 // cond: 12669 // result: (SARLconst [c&31] x) 12670 for { 12671 x := v.Args[0] 12672 v_1 := v.Args[1] 12673 if v_1.Op != OpAMD64MOVQconst { 12674 break 12675 } 12676 c := v_1.AuxInt 12677 v.reset(OpAMD64SARLconst) 12678 v.AuxInt = c & 31 12679 v.AddArg(x) 12680 return true 12681 } 12682 // match: (SARL x (MOVLconst [c])) 12683 // cond: 12684 // result: (SARLconst [c&31] x) 12685 for { 12686 x := v.Args[0] 12687 v_1 := v.Args[1] 12688 if v_1.Op != OpAMD64MOVLconst { 12689 break 12690 } 12691 c := v_1.AuxInt 12692 v.reset(OpAMD64SARLconst) 12693 v.AuxInt = c & 31 12694 v.AddArg(x) 12695 return true 12696 } 12697 // match: (SARL x (ANDLconst [31] y)) 12698 // cond: 12699 // result: (SARL x y) 12700 for { 12701 x := v.Args[0] 12702 v_1 := v.Args[1] 12703 if v_1.Op != OpAMD64ANDLconst { 12704 break 12705 } 12706 if v_1.AuxInt != 31 { 12707 break 12708 } 12709 y := v_1.Args[0] 12710 v.reset(OpAMD64SARL) 12711 v.AddArg(x) 12712 v.AddArg(y) 12713 return true 12714 } 12715 return false 12716 } 12717 func rewriteValueAMD64_OpAMD64SARLconst(v *Value, config *Config) bool { 12718 b := v.Block 12719 _ = b 12720 // match: (SARLconst [c] (MOVQconst [d])) 12721 // cond: 12722 // result: (MOVQconst [d>>uint64(c)]) 12723 for { 12724 c := v.AuxInt 12725 v_0 := v.Args[0] 12726 if v_0.Op != OpAMD64MOVQconst { 12727 break 12728 } 12729 d := v_0.AuxInt 12730 v.reset(OpAMD64MOVQconst) 12731 v.AuxInt = d >> uint64(c) 12732 return true 12733 } 12734 return false 12735 } 12736 func rewriteValueAMD64_OpAMD64SARQ(v *Value, config *Config) bool { 12737 b := v.Block 12738 _ = b 12739 // match: (SARQ x (MOVQconst [c])) 12740 // cond: 12741 // result: (SARQconst [c&63] x) 12742 for { 12743 x := v.Args[0] 12744 v_1 := v.Args[1] 12745 if v_1.Op != OpAMD64MOVQconst { 12746 break 12747 } 12748 c := v_1.AuxInt 12749 v.reset(OpAMD64SARQconst) 12750 v.AuxInt = c & 63 12751 v.AddArg(x) 12752 return true 12753 } 12754 // match: (SARQ x (MOVLconst [c])) 12755 // cond: 12756 // result: (SARQconst [c&63] x) 12757 for { 12758 x := v.Args[0] 12759 v_1 := v.Args[1] 12760 if v_1.Op != OpAMD64MOVLconst { 12761 break 12762 } 12763 c := v_1.AuxInt 12764 v.reset(OpAMD64SARQconst) 12765 v.AuxInt = c & 63 12766 v.AddArg(x) 12767 return true 12768 } 12769 // match: (SARQ x (ANDQconst [63] y)) 12770 // cond: 12771 // result: (SARQ x y) 12772 for { 12773 x := v.Args[0] 12774 v_1 := v.Args[1] 12775 if v_1.Op != OpAMD64ANDQconst { 12776 break 12777 } 12778 if v_1.AuxInt != 63 { 12779 break 12780 } 12781 y := v_1.Args[0] 12782 v.reset(OpAMD64SARQ) 12783 v.AddArg(x) 12784 v.AddArg(y) 12785 return true 12786 } 12787 return false 12788 } 12789 func rewriteValueAMD64_OpAMD64SARQconst(v *Value, config *Config) bool { 12790 b := v.Block 12791 _ = b 12792 // match: (SARQconst [c] (MOVQconst [d])) 12793 // cond: 12794 // result: (MOVQconst [d>>uint64(c)]) 12795 for { 12796 c := v.AuxInt 12797 v_0 := v.Args[0] 12798 if v_0.Op != OpAMD64MOVQconst { 12799 break 12800 } 12801 d := v_0.AuxInt 12802 v.reset(OpAMD64MOVQconst) 12803 v.AuxInt = d >> uint64(c) 12804 return true 12805 } 12806 return false 12807 } 12808 func rewriteValueAMD64_OpAMD64SARW(v *Value, config *Config) bool { 12809 b := v.Block 12810 _ = b 12811 // match: (SARW x (MOVQconst [c])) 12812 // cond: 12813 // result: (SARWconst [c&31] x) 12814 for { 12815 x := v.Args[0] 12816 v_1 := v.Args[1] 12817 if v_1.Op != OpAMD64MOVQconst { 12818 break 12819 } 12820 c := v_1.AuxInt 12821 v.reset(OpAMD64SARWconst) 12822 v.AuxInt = c & 31 12823 v.AddArg(x) 12824 return true 12825 } 12826 // match: (SARW x (MOVLconst [c])) 12827 // cond: 12828 // result: (SARWconst [c&31] x) 12829 for { 12830 x := v.Args[0] 12831 v_1 := v.Args[1] 12832 if v_1.Op != OpAMD64MOVLconst { 12833 break 12834 } 12835 c := v_1.AuxInt 12836 v.reset(OpAMD64SARWconst) 12837 v.AuxInt = c & 31 12838 v.AddArg(x) 12839 return true 12840 } 12841 return false 12842 } 12843 func rewriteValueAMD64_OpAMD64SARWconst(v *Value, config *Config) bool { 12844 b := v.Block 12845 _ = b 12846 // match: (SARWconst [c] (MOVQconst [d])) 12847 // cond: 12848 // result: (MOVQconst [d>>uint64(c)]) 12849 for { 12850 c := v.AuxInt 12851 v_0 := v.Args[0] 12852 if v_0.Op != OpAMD64MOVQconst { 12853 break 12854 } 12855 d := v_0.AuxInt 12856 v.reset(OpAMD64MOVQconst) 12857 v.AuxInt = d >> uint64(c) 12858 return true 12859 } 12860 return false 12861 } 12862 func rewriteValueAMD64_OpAMD64SBBLcarrymask(v *Value, config *Config) bool { 12863 b := v.Block 12864 _ = b 12865 // match: (SBBLcarrymask (FlagEQ)) 12866 // cond: 12867 // result: (MOVLconst [0]) 12868 for { 12869 v_0 := v.Args[0] 12870 if v_0.Op != OpAMD64FlagEQ { 12871 break 12872 } 12873 v.reset(OpAMD64MOVLconst) 12874 v.AuxInt = 0 12875 return true 12876 } 12877 // match: (SBBLcarrymask (FlagLT_ULT)) 12878 // cond: 12879 // result: (MOVLconst [-1]) 12880 for { 12881 v_0 := v.Args[0] 12882 if v_0.Op != OpAMD64FlagLT_ULT { 12883 break 12884 } 12885 v.reset(OpAMD64MOVLconst) 12886 v.AuxInt = -1 12887 return true 12888 } 12889 // match: (SBBLcarrymask (FlagLT_UGT)) 12890 // cond: 12891 // result: (MOVLconst [0]) 12892 for { 12893 v_0 := v.Args[0] 12894 if v_0.Op != OpAMD64FlagLT_UGT { 12895 break 12896 } 12897 v.reset(OpAMD64MOVLconst) 12898 v.AuxInt = 0 12899 return true 12900 } 12901 // match: (SBBLcarrymask (FlagGT_ULT)) 12902 // cond: 12903 // result: (MOVLconst [-1]) 12904 for { 12905 v_0 := v.Args[0] 12906 if v_0.Op != OpAMD64FlagGT_ULT { 12907 break 12908 } 12909 v.reset(OpAMD64MOVLconst) 12910 v.AuxInt = -1 12911 return true 12912 } 12913 // match: (SBBLcarrymask (FlagGT_UGT)) 12914 // cond: 12915 // result: (MOVLconst [0]) 12916 for { 12917 v_0 := v.Args[0] 12918 if v_0.Op != OpAMD64FlagGT_UGT { 12919 break 12920 } 12921 v.reset(OpAMD64MOVLconst) 12922 v.AuxInt = 0 12923 return true 12924 } 12925 return false 12926 } 12927 func rewriteValueAMD64_OpAMD64SBBQcarrymask(v *Value, config *Config) bool { 12928 b := v.Block 12929 _ = b 12930 // match: (SBBQcarrymask (FlagEQ)) 12931 // cond: 12932 // result: (MOVQconst [0]) 12933 for { 12934 v_0 := v.Args[0] 12935 if v_0.Op != OpAMD64FlagEQ { 12936 break 12937 } 12938 v.reset(OpAMD64MOVQconst) 12939 v.AuxInt = 0 12940 return true 12941 } 12942 // match: (SBBQcarrymask (FlagLT_ULT)) 12943 // cond: 12944 // result: (MOVQconst [-1]) 12945 for { 12946 v_0 := v.Args[0] 12947 if v_0.Op != OpAMD64FlagLT_ULT { 12948 break 12949 } 12950 v.reset(OpAMD64MOVQconst) 12951 v.AuxInt = -1 12952 return true 12953 } 12954 // match: (SBBQcarrymask (FlagLT_UGT)) 12955 // cond: 12956 // result: (MOVQconst [0]) 12957 for { 12958 v_0 := v.Args[0] 12959 if v_0.Op != OpAMD64FlagLT_UGT { 12960 break 12961 } 12962 v.reset(OpAMD64MOVQconst) 12963 v.AuxInt = 0 12964 return true 12965 } 12966 // match: (SBBQcarrymask (FlagGT_ULT)) 12967 // cond: 12968 // result: (MOVQconst [-1]) 12969 for { 12970 v_0 := v.Args[0] 12971 if v_0.Op != OpAMD64FlagGT_ULT { 12972 break 12973 } 12974 v.reset(OpAMD64MOVQconst) 12975 v.AuxInt = -1 12976 return true 12977 } 12978 // match: (SBBQcarrymask (FlagGT_UGT)) 12979 // cond: 12980 // result: (MOVQconst [0]) 12981 for { 12982 v_0 := v.Args[0] 12983 if v_0.Op != OpAMD64FlagGT_UGT { 12984 break 12985 } 12986 v.reset(OpAMD64MOVQconst) 12987 v.AuxInt = 0 12988 return true 12989 } 12990 return false 12991 } 12992 func rewriteValueAMD64_OpAMD64SETA(v *Value, config *Config) bool { 12993 b := v.Block 12994 _ = b 12995 // match: (SETA (InvertFlags x)) 12996 // cond: 12997 // result: (SETB x) 12998 for { 12999 v_0 := v.Args[0] 13000 if v_0.Op != OpAMD64InvertFlags { 13001 break 13002 } 13003 x := v_0.Args[0] 13004 v.reset(OpAMD64SETB) 13005 v.AddArg(x) 13006 return true 13007 } 13008 // match: (SETA (FlagEQ)) 13009 // cond: 13010 // result: (MOVLconst [0]) 13011 for { 13012 v_0 := v.Args[0] 13013 if v_0.Op != OpAMD64FlagEQ { 13014 break 13015 } 13016 v.reset(OpAMD64MOVLconst) 13017 v.AuxInt = 0 13018 return true 13019 } 13020 // match: (SETA (FlagLT_ULT)) 13021 // cond: 13022 // result: (MOVLconst [0]) 13023 for { 13024 v_0 := v.Args[0] 13025 if v_0.Op != OpAMD64FlagLT_ULT { 13026 break 13027 } 13028 v.reset(OpAMD64MOVLconst) 13029 v.AuxInt = 0 13030 return true 13031 } 13032 // match: (SETA (FlagLT_UGT)) 13033 // cond: 13034 // result: (MOVLconst [1]) 13035 for { 13036 v_0 := v.Args[0] 13037 if v_0.Op != OpAMD64FlagLT_UGT { 13038 break 13039 } 13040 v.reset(OpAMD64MOVLconst) 13041 v.AuxInt = 1 13042 return true 13043 } 13044 // match: (SETA (FlagGT_ULT)) 13045 // cond: 13046 // result: (MOVLconst [0]) 13047 for { 13048 v_0 := v.Args[0] 13049 if v_0.Op != OpAMD64FlagGT_ULT { 13050 break 13051 } 13052 v.reset(OpAMD64MOVLconst) 13053 v.AuxInt = 0 13054 return true 13055 } 13056 // match: (SETA (FlagGT_UGT)) 13057 // cond: 13058 // result: (MOVLconst [1]) 13059 for { 13060 v_0 := v.Args[0] 13061 if v_0.Op != OpAMD64FlagGT_UGT { 13062 break 13063 } 13064 v.reset(OpAMD64MOVLconst) 13065 v.AuxInt = 1 13066 return true 13067 } 13068 return false 13069 } 13070 func rewriteValueAMD64_OpAMD64SETAE(v *Value, config *Config) bool { 13071 b := v.Block 13072 _ = b 13073 // match: (SETAE (InvertFlags x)) 13074 // cond: 13075 // result: (SETBE x) 13076 for { 13077 v_0 := v.Args[0] 13078 if v_0.Op != OpAMD64InvertFlags { 13079 break 13080 } 13081 x := v_0.Args[0] 13082 v.reset(OpAMD64SETBE) 13083 v.AddArg(x) 13084 return true 13085 } 13086 // match: (SETAE (FlagEQ)) 13087 // cond: 13088 // result: (MOVLconst [1]) 13089 for { 13090 v_0 := v.Args[0] 13091 if v_0.Op != OpAMD64FlagEQ { 13092 break 13093 } 13094 v.reset(OpAMD64MOVLconst) 13095 v.AuxInt = 1 13096 return true 13097 } 13098 // match: (SETAE (FlagLT_ULT)) 13099 // cond: 13100 // result: (MOVLconst [0]) 13101 for { 13102 v_0 := v.Args[0] 13103 if v_0.Op != OpAMD64FlagLT_ULT { 13104 break 13105 } 13106 v.reset(OpAMD64MOVLconst) 13107 v.AuxInt = 0 13108 return true 13109 } 13110 // match: (SETAE (FlagLT_UGT)) 13111 // cond: 13112 // result: (MOVLconst [1]) 13113 for { 13114 v_0 := v.Args[0] 13115 if v_0.Op != OpAMD64FlagLT_UGT { 13116 break 13117 } 13118 v.reset(OpAMD64MOVLconst) 13119 v.AuxInt = 1 13120 return true 13121 } 13122 // match: (SETAE (FlagGT_ULT)) 13123 // cond: 13124 // result: (MOVLconst [0]) 13125 for { 13126 v_0 := v.Args[0] 13127 if v_0.Op != OpAMD64FlagGT_ULT { 13128 break 13129 } 13130 v.reset(OpAMD64MOVLconst) 13131 v.AuxInt = 0 13132 return true 13133 } 13134 // match: (SETAE (FlagGT_UGT)) 13135 // cond: 13136 // result: (MOVLconst [1]) 13137 for { 13138 v_0 := v.Args[0] 13139 if v_0.Op != OpAMD64FlagGT_UGT { 13140 break 13141 } 13142 v.reset(OpAMD64MOVLconst) 13143 v.AuxInt = 1 13144 return true 13145 } 13146 return false 13147 } 13148 func rewriteValueAMD64_OpAMD64SETB(v *Value, config *Config) bool { 13149 b := v.Block 13150 _ = b 13151 // match: (SETB (InvertFlags x)) 13152 // cond: 13153 // result: (SETA x) 13154 for { 13155 v_0 := v.Args[0] 13156 if v_0.Op != OpAMD64InvertFlags { 13157 break 13158 } 13159 x := v_0.Args[0] 13160 v.reset(OpAMD64SETA) 13161 v.AddArg(x) 13162 return true 13163 } 13164 // match: (SETB (FlagEQ)) 13165 // cond: 13166 // result: (MOVLconst [0]) 13167 for { 13168 v_0 := v.Args[0] 13169 if v_0.Op != OpAMD64FlagEQ { 13170 break 13171 } 13172 v.reset(OpAMD64MOVLconst) 13173 v.AuxInt = 0 13174 return true 13175 } 13176 // match: (SETB (FlagLT_ULT)) 13177 // cond: 13178 // result: (MOVLconst [1]) 13179 for { 13180 v_0 := v.Args[0] 13181 if v_0.Op != OpAMD64FlagLT_ULT { 13182 break 13183 } 13184 v.reset(OpAMD64MOVLconst) 13185 v.AuxInt = 1 13186 return true 13187 } 13188 // match: (SETB (FlagLT_UGT)) 13189 // cond: 13190 // result: (MOVLconst [0]) 13191 for { 13192 v_0 := v.Args[0] 13193 if v_0.Op != OpAMD64FlagLT_UGT { 13194 break 13195 } 13196 v.reset(OpAMD64MOVLconst) 13197 v.AuxInt = 0 13198 return true 13199 } 13200 // match: (SETB (FlagGT_ULT)) 13201 // cond: 13202 // result: (MOVLconst [1]) 13203 for { 13204 v_0 := v.Args[0] 13205 if v_0.Op != OpAMD64FlagGT_ULT { 13206 break 13207 } 13208 v.reset(OpAMD64MOVLconst) 13209 v.AuxInt = 1 13210 return true 13211 } 13212 // match: (SETB (FlagGT_UGT)) 13213 // cond: 13214 // result: (MOVLconst [0]) 13215 for { 13216 v_0 := v.Args[0] 13217 if v_0.Op != OpAMD64FlagGT_UGT { 13218 break 13219 } 13220 v.reset(OpAMD64MOVLconst) 13221 v.AuxInt = 0 13222 return true 13223 } 13224 return false 13225 } 13226 func rewriteValueAMD64_OpAMD64SETBE(v *Value, config *Config) bool { 13227 b := v.Block 13228 _ = b 13229 // match: (SETBE (InvertFlags x)) 13230 // cond: 13231 // result: (SETAE x) 13232 for { 13233 v_0 := v.Args[0] 13234 if v_0.Op != OpAMD64InvertFlags { 13235 break 13236 } 13237 x := v_0.Args[0] 13238 v.reset(OpAMD64SETAE) 13239 v.AddArg(x) 13240 return true 13241 } 13242 // match: (SETBE (FlagEQ)) 13243 // cond: 13244 // result: (MOVLconst [1]) 13245 for { 13246 v_0 := v.Args[0] 13247 if v_0.Op != OpAMD64FlagEQ { 13248 break 13249 } 13250 v.reset(OpAMD64MOVLconst) 13251 v.AuxInt = 1 13252 return true 13253 } 13254 // match: (SETBE (FlagLT_ULT)) 13255 // cond: 13256 // result: (MOVLconst [1]) 13257 for { 13258 v_0 := v.Args[0] 13259 if v_0.Op != OpAMD64FlagLT_ULT { 13260 break 13261 } 13262 v.reset(OpAMD64MOVLconst) 13263 v.AuxInt = 1 13264 return true 13265 } 13266 // match: (SETBE (FlagLT_UGT)) 13267 // cond: 13268 // result: (MOVLconst [0]) 13269 for { 13270 v_0 := v.Args[0] 13271 if v_0.Op != OpAMD64FlagLT_UGT { 13272 break 13273 } 13274 v.reset(OpAMD64MOVLconst) 13275 v.AuxInt = 0 13276 return true 13277 } 13278 // match: (SETBE (FlagGT_ULT)) 13279 // cond: 13280 // result: (MOVLconst [1]) 13281 for { 13282 v_0 := v.Args[0] 13283 if v_0.Op != OpAMD64FlagGT_ULT { 13284 break 13285 } 13286 v.reset(OpAMD64MOVLconst) 13287 v.AuxInt = 1 13288 return true 13289 } 13290 // match: (SETBE (FlagGT_UGT)) 13291 // cond: 13292 // result: (MOVLconst [0]) 13293 for { 13294 v_0 := v.Args[0] 13295 if v_0.Op != OpAMD64FlagGT_UGT { 13296 break 13297 } 13298 v.reset(OpAMD64MOVLconst) 13299 v.AuxInt = 0 13300 return true 13301 } 13302 return false 13303 } 13304 func rewriteValueAMD64_OpAMD64SETEQ(v *Value, config *Config) bool { 13305 b := v.Block 13306 _ = b 13307 // match: (SETEQ (InvertFlags x)) 13308 // cond: 13309 // result: (SETEQ x) 13310 for { 13311 v_0 := v.Args[0] 13312 if v_0.Op != OpAMD64InvertFlags { 13313 break 13314 } 13315 x := v_0.Args[0] 13316 v.reset(OpAMD64SETEQ) 13317 v.AddArg(x) 13318 return true 13319 } 13320 // match: (SETEQ (FlagEQ)) 13321 // cond: 13322 // result: (MOVLconst [1]) 13323 for { 13324 v_0 := v.Args[0] 13325 if v_0.Op != OpAMD64FlagEQ { 13326 break 13327 } 13328 v.reset(OpAMD64MOVLconst) 13329 v.AuxInt = 1 13330 return true 13331 } 13332 // match: (SETEQ (FlagLT_ULT)) 13333 // cond: 13334 // result: (MOVLconst [0]) 13335 for { 13336 v_0 := v.Args[0] 13337 if v_0.Op != OpAMD64FlagLT_ULT { 13338 break 13339 } 13340 v.reset(OpAMD64MOVLconst) 13341 v.AuxInt = 0 13342 return true 13343 } 13344 // match: (SETEQ (FlagLT_UGT)) 13345 // cond: 13346 // result: (MOVLconst [0]) 13347 for { 13348 v_0 := v.Args[0] 13349 if v_0.Op != OpAMD64FlagLT_UGT { 13350 break 13351 } 13352 v.reset(OpAMD64MOVLconst) 13353 v.AuxInt = 0 13354 return true 13355 } 13356 // match: (SETEQ (FlagGT_ULT)) 13357 // cond: 13358 // result: (MOVLconst [0]) 13359 for { 13360 v_0 := v.Args[0] 13361 if v_0.Op != OpAMD64FlagGT_ULT { 13362 break 13363 } 13364 v.reset(OpAMD64MOVLconst) 13365 v.AuxInt = 0 13366 return true 13367 } 13368 // match: (SETEQ (FlagGT_UGT)) 13369 // cond: 13370 // result: (MOVLconst [0]) 13371 for { 13372 v_0 := v.Args[0] 13373 if v_0.Op != OpAMD64FlagGT_UGT { 13374 break 13375 } 13376 v.reset(OpAMD64MOVLconst) 13377 v.AuxInt = 0 13378 return true 13379 } 13380 return false 13381 } 13382 func rewriteValueAMD64_OpAMD64SETG(v *Value, config *Config) bool { 13383 b := v.Block 13384 _ = b 13385 // match: (SETG (InvertFlags x)) 13386 // cond: 13387 // result: (SETL x) 13388 for { 13389 v_0 := v.Args[0] 13390 if v_0.Op != OpAMD64InvertFlags { 13391 break 13392 } 13393 x := v_0.Args[0] 13394 v.reset(OpAMD64SETL) 13395 v.AddArg(x) 13396 return true 13397 } 13398 // match: (SETG (FlagEQ)) 13399 // cond: 13400 // result: (MOVLconst [0]) 13401 for { 13402 v_0 := v.Args[0] 13403 if v_0.Op != OpAMD64FlagEQ { 13404 break 13405 } 13406 v.reset(OpAMD64MOVLconst) 13407 v.AuxInt = 0 13408 return true 13409 } 13410 // match: (SETG (FlagLT_ULT)) 13411 // cond: 13412 // result: (MOVLconst [0]) 13413 for { 13414 v_0 := v.Args[0] 13415 if v_0.Op != OpAMD64FlagLT_ULT { 13416 break 13417 } 13418 v.reset(OpAMD64MOVLconst) 13419 v.AuxInt = 0 13420 return true 13421 } 13422 // match: (SETG (FlagLT_UGT)) 13423 // cond: 13424 // result: (MOVLconst [0]) 13425 for { 13426 v_0 := v.Args[0] 13427 if v_0.Op != OpAMD64FlagLT_UGT { 13428 break 13429 } 13430 v.reset(OpAMD64MOVLconst) 13431 v.AuxInt = 0 13432 return true 13433 } 13434 // match: (SETG (FlagGT_ULT)) 13435 // cond: 13436 // result: (MOVLconst [1]) 13437 for { 13438 v_0 := v.Args[0] 13439 if v_0.Op != OpAMD64FlagGT_ULT { 13440 break 13441 } 13442 v.reset(OpAMD64MOVLconst) 13443 v.AuxInt = 1 13444 return true 13445 } 13446 // match: (SETG (FlagGT_UGT)) 13447 // cond: 13448 // result: (MOVLconst [1]) 13449 for { 13450 v_0 := v.Args[0] 13451 if v_0.Op != OpAMD64FlagGT_UGT { 13452 break 13453 } 13454 v.reset(OpAMD64MOVLconst) 13455 v.AuxInt = 1 13456 return true 13457 } 13458 return false 13459 } 13460 func rewriteValueAMD64_OpAMD64SETGE(v *Value, config *Config) bool { 13461 b := v.Block 13462 _ = b 13463 // match: (SETGE (InvertFlags x)) 13464 // cond: 13465 // result: (SETLE x) 13466 for { 13467 v_0 := v.Args[0] 13468 if v_0.Op != OpAMD64InvertFlags { 13469 break 13470 } 13471 x := v_0.Args[0] 13472 v.reset(OpAMD64SETLE) 13473 v.AddArg(x) 13474 return true 13475 } 13476 // match: (SETGE (FlagEQ)) 13477 // cond: 13478 // result: (MOVLconst [1]) 13479 for { 13480 v_0 := v.Args[0] 13481 if v_0.Op != OpAMD64FlagEQ { 13482 break 13483 } 13484 v.reset(OpAMD64MOVLconst) 13485 v.AuxInt = 1 13486 return true 13487 } 13488 // match: (SETGE (FlagLT_ULT)) 13489 // cond: 13490 // result: (MOVLconst [0]) 13491 for { 13492 v_0 := v.Args[0] 13493 if v_0.Op != OpAMD64FlagLT_ULT { 13494 break 13495 } 13496 v.reset(OpAMD64MOVLconst) 13497 v.AuxInt = 0 13498 return true 13499 } 13500 // match: (SETGE (FlagLT_UGT)) 13501 // cond: 13502 // result: (MOVLconst [0]) 13503 for { 13504 v_0 := v.Args[0] 13505 if v_0.Op != OpAMD64FlagLT_UGT { 13506 break 13507 } 13508 v.reset(OpAMD64MOVLconst) 13509 v.AuxInt = 0 13510 return true 13511 } 13512 // match: (SETGE (FlagGT_ULT)) 13513 // cond: 13514 // result: (MOVLconst [1]) 13515 for { 13516 v_0 := v.Args[0] 13517 if v_0.Op != OpAMD64FlagGT_ULT { 13518 break 13519 } 13520 v.reset(OpAMD64MOVLconst) 13521 v.AuxInt = 1 13522 return true 13523 } 13524 // match: (SETGE (FlagGT_UGT)) 13525 // cond: 13526 // result: (MOVLconst [1]) 13527 for { 13528 v_0 := v.Args[0] 13529 if v_0.Op != OpAMD64FlagGT_UGT { 13530 break 13531 } 13532 v.reset(OpAMD64MOVLconst) 13533 v.AuxInt = 1 13534 return true 13535 } 13536 return false 13537 } 13538 func rewriteValueAMD64_OpAMD64SETL(v *Value, config *Config) bool { 13539 b := v.Block 13540 _ = b 13541 // match: (SETL (InvertFlags x)) 13542 // cond: 13543 // result: (SETG x) 13544 for { 13545 v_0 := v.Args[0] 13546 if v_0.Op != OpAMD64InvertFlags { 13547 break 13548 } 13549 x := v_0.Args[0] 13550 v.reset(OpAMD64SETG) 13551 v.AddArg(x) 13552 return true 13553 } 13554 // match: (SETL (FlagEQ)) 13555 // cond: 13556 // result: (MOVLconst [0]) 13557 for { 13558 v_0 := v.Args[0] 13559 if v_0.Op != OpAMD64FlagEQ { 13560 break 13561 } 13562 v.reset(OpAMD64MOVLconst) 13563 v.AuxInt = 0 13564 return true 13565 } 13566 // match: (SETL (FlagLT_ULT)) 13567 // cond: 13568 // result: (MOVLconst [1]) 13569 for { 13570 v_0 := v.Args[0] 13571 if v_0.Op != OpAMD64FlagLT_ULT { 13572 break 13573 } 13574 v.reset(OpAMD64MOVLconst) 13575 v.AuxInt = 1 13576 return true 13577 } 13578 // match: (SETL (FlagLT_UGT)) 13579 // cond: 13580 // result: (MOVLconst [1]) 13581 for { 13582 v_0 := v.Args[0] 13583 if v_0.Op != OpAMD64FlagLT_UGT { 13584 break 13585 } 13586 v.reset(OpAMD64MOVLconst) 13587 v.AuxInt = 1 13588 return true 13589 } 13590 // match: (SETL (FlagGT_ULT)) 13591 // cond: 13592 // result: (MOVLconst [0]) 13593 for { 13594 v_0 := v.Args[0] 13595 if v_0.Op != OpAMD64FlagGT_ULT { 13596 break 13597 } 13598 v.reset(OpAMD64MOVLconst) 13599 v.AuxInt = 0 13600 return true 13601 } 13602 // match: (SETL (FlagGT_UGT)) 13603 // cond: 13604 // result: (MOVLconst [0]) 13605 for { 13606 v_0 := v.Args[0] 13607 if v_0.Op != OpAMD64FlagGT_UGT { 13608 break 13609 } 13610 v.reset(OpAMD64MOVLconst) 13611 v.AuxInt = 0 13612 return true 13613 } 13614 return false 13615 } 13616 func rewriteValueAMD64_OpAMD64SETLE(v *Value, config *Config) bool { 13617 b := v.Block 13618 _ = b 13619 // match: (SETLE (InvertFlags x)) 13620 // cond: 13621 // result: (SETGE x) 13622 for { 13623 v_0 := v.Args[0] 13624 if v_0.Op != OpAMD64InvertFlags { 13625 break 13626 } 13627 x := v_0.Args[0] 13628 v.reset(OpAMD64SETGE) 13629 v.AddArg(x) 13630 return true 13631 } 13632 // match: (SETLE (FlagEQ)) 13633 // cond: 13634 // result: (MOVLconst [1]) 13635 for { 13636 v_0 := v.Args[0] 13637 if v_0.Op != OpAMD64FlagEQ { 13638 break 13639 } 13640 v.reset(OpAMD64MOVLconst) 13641 v.AuxInt = 1 13642 return true 13643 } 13644 // match: (SETLE (FlagLT_ULT)) 13645 // cond: 13646 // result: (MOVLconst [1]) 13647 for { 13648 v_0 := v.Args[0] 13649 if v_0.Op != OpAMD64FlagLT_ULT { 13650 break 13651 } 13652 v.reset(OpAMD64MOVLconst) 13653 v.AuxInt = 1 13654 return true 13655 } 13656 // match: (SETLE (FlagLT_UGT)) 13657 // cond: 13658 // result: (MOVLconst [1]) 13659 for { 13660 v_0 := v.Args[0] 13661 if v_0.Op != OpAMD64FlagLT_UGT { 13662 break 13663 } 13664 v.reset(OpAMD64MOVLconst) 13665 v.AuxInt = 1 13666 return true 13667 } 13668 // match: (SETLE (FlagGT_ULT)) 13669 // cond: 13670 // result: (MOVLconst [0]) 13671 for { 13672 v_0 := v.Args[0] 13673 if v_0.Op != OpAMD64FlagGT_ULT { 13674 break 13675 } 13676 v.reset(OpAMD64MOVLconst) 13677 v.AuxInt = 0 13678 return true 13679 } 13680 // match: (SETLE (FlagGT_UGT)) 13681 // cond: 13682 // result: (MOVLconst [0]) 13683 for { 13684 v_0 := v.Args[0] 13685 if v_0.Op != OpAMD64FlagGT_UGT { 13686 break 13687 } 13688 v.reset(OpAMD64MOVLconst) 13689 v.AuxInt = 0 13690 return true 13691 } 13692 return false 13693 } 13694 func rewriteValueAMD64_OpAMD64SETNE(v *Value, config *Config) bool { 13695 b := v.Block 13696 _ = b 13697 // match: (SETNE (InvertFlags x)) 13698 // cond: 13699 // result: (SETNE x) 13700 for { 13701 v_0 := v.Args[0] 13702 if v_0.Op != OpAMD64InvertFlags { 13703 break 13704 } 13705 x := v_0.Args[0] 13706 v.reset(OpAMD64SETNE) 13707 v.AddArg(x) 13708 return true 13709 } 13710 // match: (SETNE (FlagEQ)) 13711 // cond: 13712 // result: (MOVLconst [0]) 13713 for { 13714 v_0 := v.Args[0] 13715 if v_0.Op != OpAMD64FlagEQ { 13716 break 13717 } 13718 v.reset(OpAMD64MOVLconst) 13719 v.AuxInt = 0 13720 return true 13721 } 13722 // match: (SETNE (FlagLT_ULT)) 13723 // cond: 13724 // result: (MOVLconst [1]) 13725 for { 13726 v_0 := v.Args[0] 13727 if v_0.Op != OpAMD64FlagLT_ULT { 13728 break 13729 } 13730 v.reset(OpAMD64MOVLconst) 13731 v.AuxInt = 1 13732 return true 13733 } 13734 // match: (SETNE (FlagLT_UGT)) 13735 // cond: 13736 // result: (MOVLconst [1]) 13737 for { 13738 v_0 := v.Args[0] 13739 if v_0.Op != OpAMD64FlagLT_UGT { 13740 break 13741 } 13742 v.reset(OpAMD64MOVLconst) 13743 v.AuxInt = 1 13744 return true 13745 } 13746 // match: (SETNE (FlagGT_ULT)) 13747 // cond: 13748 // result: (MOVLconst [1]) 13749 for { 13750 v_0 := v.Args[0] 13751 if v_0.Op != OpAMD64FlagGT_ULT { 13752 break 13753 } 13754 v.reset(OpAMD64MOVLconst) 13755 v.AuxInt = 1 13756 return true 13757 } 13758 // match: (SETNE (FlagGT_UGT)) 13759 // cond: 13760 // result: (MOVLconst [1]) 13761 for { 13762 v_0 := v.Args[0] 13763 if v_0.Op != OpAMD64FlagGT_UGT { 13764 break 13765 } 13766 v.reset(OpAMD64MOVLconst) 13767 v.AuxInt = 1 13768 return true 13769 } 13770 return false 13771 } 13772 func rewriteValueAMD64_OpAMD64SHLL(v *Value, config *Config) bool { 13773 b := v.Block 13774 _ = b 13775 // match: (SHLL x (MOVQconst [c])) 13776 // cond: 13777 // result: (SHLLconst [c&31] x) 13778 for { 13779 x := v.Args[0] 13780 v_1 := v.Args[1] 13781 if v_1.Op != OpAMD64MOVQconst { 13782 break 13783 } 13784 c := v_1.AuxInt 13785 v.reset(OpAMD64SHLLconst) 13786 v.AuxInt = c & 31 13787 v.AddArg(x) 13788 return true 13789 } 13790 // match: (SHLL x (MOVLconst [c])) 13791 // cond: 13792 // result: (SHLLconst [c&31] x) 13793 for { 13794 x := v.Args[0] 13795 v_1 := v.Args[1] 13796 if v_1.Op != OpAMD64MOVLconst { 13797 break 13798 } 13799 c := v_1.AuxInt 13800 v.reset(OpAMD64SHLLconst) 13801 v.AuxInt = c & 31 13802 v.AddArg(x) 13803 return true 13804 } 13805 // match: (SHLL x (ANDLconst [31] y)) 13806 // cond: 13807 // result: (SHLL x y) 13808 for { 13809 x := v.Args[0] 13810 v_1 := v.Args[1] 13811 if v_1.Op != OpAMD64ANDLconst { 13812 break 13813 } 13814 if v_1.AuxInt != 31 { 13815 break 13816 } 13817 y := v_1.Args[0] 13818 v.reset(OpAMD64SHLL) 13819 v.AddArg(x) 13820 v.AddArg(y) 13821 return true 13822 } 13823 return false 13824 } 13825 func rewriteValueAMD64_OpAMD64SHLQ(v *Value, config *Config) bool { 13826 b := v.Block 13827 _ = b 13828 // match: (SHLQ x (MOVQconst [c])) 13829 // cond: 13830 // result: (SHLQconst [c&63] x) 13831 for { 13832 x := v.Args[0] 13833 v_1 := v.Args[1] 13834 if v_1.Op != OpAMD64MOVQconst { 13835 break 13836 } 13837 c := v_1.AuxInt 13838 v.reset(OpAMD64SHLQconst) 13839 v.AuxInt = c & 63 13840 v.AddArg(x) 13841 return true 13842 } 13843 // match: (SHLQ x (MOVLconst [c])) 13844 // cond: 13845 // result: (SHLQconst [c&63] x) 13846 for { 13847 x := v.Args[0] 13848 v_1 := v.Args[1] 13849 if v_1.Op != OpAMD64MOVLconst { 13850 break 13851 } 13852 c := v_1.AuxInt 13853 v.reset(OpAMD64SHLQconst) 13854 v.AuxInt = c & 63 13855 v.AddArg(x) 13856 return true 13857 } 13858 // match: (SHLQ x (ANDQconst [63] y)) 13859 // cond: 13860 // result: (SHLQ x y) 13861 for { 13862 x := v.Args[0] 13863 v_1 := v.Args[1] 13864 if v_1.Op != OpAMD64ANDQconst { 13865 break 13866 } 13867 if v_1.AuxInt != 63 { 13868 break 13869 } 13870 y := v_1.Args[0] 13871 v.reset(OpAMD64SHLQ) 13872 v.AddArg(x) 13873 v.AddArg(y) 13874 return true 13875 } 13876 return false 13877 } 13878 func rewriteValueAMD64_OpAMD64SHRB(v *Value, config *Config) bool { 13879 b := v.Block 13880 _ = b 13881 // match: (SHRB x (MOVQconst [c])) 13882 // cond: 13883 // result: (SHRBconst [c&31] x) 13884 for { 13885 x := v.Args[0] 13886 v_1 := v.Args[1] 13887 if v_1.Op != OpAMD64MOVQconst { 13888 break 13889 } 13890 c := v_1.AuxInt 13891 v.reset(OpAMD64SHRBconst) 13892 v.AuxInt = c & 31 13893 v.AddArg(x) 13894 return true 13895 } 13896 // match: (SHRB x (MOVLconst [c])) 13897 // cond: 13898 // result: (SHRBconst [c&31] x) 13899 for { 13900 x := v.Args[0] 13901 v_1 := v.Args[1] 13902 if v_1.Op != OpAMD64MOVLconst { 13903 break 13904 } 13905 c := v_1.AuxInt 13906 v.reset(OpAMD64SHRBconst) 13907 v.AuxInt = c & 31 13908 v.AddArg(x) 13909 return true 13910 } 13911 return false 13912 } 13913 func rewriteValueAMD64_OpAMD64SHRL(v *Value, config *Config) bool { 13914 b := v.Block 13915 _ = b 13916 // match: (SHRL x (MOVQconst [c])) 13917 // cond: 13918 // result: (SHRLconst [c&31] x) 13919 for { 13920 x := v.Args[0] 13921 v_1 := v.Args[1] 13922 if v_1.Op != OpAMD64MOVQconst { 13923 break 13924 } 13925 c := v_1.AuxInt 13926 v.reset(OpAMD64SHRLconst) 13927 v.AuxInt = c & 31 13928 v.AddArg(x) 13929 return true 13930 } 13931 // match: (SHRL x (MOVLconst [c])) 13932 // cond: 13933 // result: (SHRLconst [c&31] x) 13934 for { 13935 x := v.Args[0] 13936 v_1 := v.Args[1] 13937 if v_1.Op != OpAMD64MOVLconst { 13938 break 13939 } 13940 c := v_1.AuxInt 13941 v.reset(OpAMD64SHRLconst) 13942 v.AuxInt = c & 31 13943 v.AddArg(x) 13944 return true 13945 } 13946 // match: (SHRL x (ANDLconst [31] y)) 13947 // cond: 13948 // result: (SHRL x y) 13949 for { 13950 x := v.Args[0] 13951 v_1 := v.Args[1] 13952 if v_1.Op != OpAMD64ANDLconst { 13953 break 13954 } 13955 if v_1.AuxInt != 31 { 13956 break 13957 } 13958 y := v_1.Args[0] 13959 v.reset(OpAMD64SHRL) 13960 v.AddArg(x) 13961 v.AddArg(y) 13962 return true 13963 } 13964 return false 13965 } 13966 func rewriteValueAMD64_OpAMD64SHRQ(v *Value, config *Config) bool { 13967 b := v.Block 13968 _ = b 13969 // match: (SHRQ x (MOVQconst [c])) 13970 // cond: 13971 // result: (SHRQconst [c&63] x) 13972 for { 13973 x := v.Args[0] 13974 v_1 := v.Args[1] 13975 if v_1.Op != OpAMD64MOVQconst { 13976 break 13977 } 13978 c := v_1.AuxInt 13979 v.reset(OpAMD64SHRQconst) 13980 v.AuxInt = c & 63 13981 v.AddArg(x) 13982 return true 13983 } 13984 // match: (SHRQ x (MOVLconst [c])) 13985 // cond: 13986 // result: (SHRQconst [c&63] x) 13987 for { 13988 x := v.Args[0] 13989 v_1 := v.Args[1] 13990 if v_1.Op != OpAMD64MOVLconst { 13991 break 13992 } 13993 c := v_1.AuxInt 13994 v.reset(OpAMD64SHRQconst) 13995 v.AuxInt = c & 63 13996 v.AddArg(x) 13997 return true 13998 } 13999 // match: (SHRQ x (ANDQconst [63] y)) 14000 // cond: 14001 // result: (SHRQ x y) 14002 for { 14003 x := v.Args[0] 14004 v_1 := v.Args[1] 14005 if v_1.Op != OpAMD64ANDQconst { 14006 break 14007 } 14008 if v_1.AuxInt != 63 { 14009 break 14010 } 14011 y := v_1.Args[0] 14012 v.reset(OpAMD64SHRQ) 14013 v.AddArg(x) 14014 v.AddArg(y) 14015 return true 14016 } 14017 return false 14018 } 14019 func rewriteValueAMD64_OpAMD64SHRW(v *Value, config *Config) bool { 14020 b := v.Block 14021 _ = b 14022 // match: (SHRW x (MOVQconst [c])) 14023 // cond: 14024 // result: (SHRWconst [c&31] x) 14025 for { 14026 x := v.Args[0] 14027 v_1 := v.Args[1] 14028 if v_1.Op != OpAMD64MOVQconst { 14029 break 14030 } 14031 c := v_1.AuxInt 14032 v.reset(OpAMD64SHRWconst) 14033 v.AuxInt = c & 31 14034 v.AddArg(x) 14035 return true 14036 } 14037 // match: (SHRW x (MOVLconst [c])) 14038 // cond: 14039 // result: (SHRWconst [c&31] x) 14040 for { 14041 x := v.Args[0] 14042 v_1 := v.Args[1] 14043 if v_1.Op != OpAMD64MOVLconst { 14044 break 14045 } 14046 c := v_1.AuxInt 14047 v.reset(OpAMD64SHRWconst) 14048 v.AuxInt = c & 31 14049 v.AddArg(x) 14050 return true 14051 } 14052 return false 14053 } 14054 func rewriteValueAMD64_OpAMD64SUBL(v *Value, config *Config) bool { 14055 b := v.Block 14056 _ = b 14057 // match: (SUBL x (MOVLconst [c])) 14058 // cond: 14059 // result: (SUBLconst x [c]) 14060 for { 14061 x := v.Args[0] 14062 v_1 := v.Args[1] 14063 if v_1.Op != OpAMD64MOVLconst { 14064 break 14065 } 14066 c := v_1.AuxInt 14067 v.reset(OpAMD64SUBLconst) 14068 v.AuxInt = c 14069 v.AddArg(x) 14070 return true 14071 } 14072 // match: (SUBL (MOVLconst [c]) x) 14073 // cond: 14074 // result: (NEGL (SUBLconst <v.Type> x [c])) 14075 for { 14076 v_0 := v.Args[0] 14077 if v_0.Op != OpAMD64MOVLconst { 14078 break 14079 } 14080 c := v_0.AuxInt 14081 x := v.Args[1] 14082 v.reset(OpAMD64NEGL) 14083 v0 := b.NewValue0(v.Line, OpAMD64SUBLconst, v.Type) 14084 v0.AuxInt = c 14085 v0.AddArg(x) 14086 v.AddArg(v0) 14087 return true 14088 } 14089 // match: (SUBL x x) 14090 // cond: 14091 // result: (MOVLconst [0]) 14092 for { 14093 x := v.Args[0] 14094 if x != v.Args[1] { 14095 break 14096 } 14097 v.reset(OpAMD64MOVLconst) 14098 v.AuxInt = 0 14099 return true 14100 } 14101 return false 14102 } 14103 func rewriteValueAMD64_OpAMD64SUBLconst(v *Value, config *Config) bool { 14104 b := v.Block 14105 _ = b 14106 // match: (SUBLconst [c] x) 14107 // cond: int32(c) == 0 14108 // result: x 14109 for { 14110 c := v.AuxInt 14111 x := v.Args[0] 14112 if !(int32(c) == 0) { 14113 break 14114 } 14115 v.reset(OpCopy) 14116 v.Type = x.Type 14117 v.AddArg(x) 14118 return true 14119 } 14120 // match: (SUBLconst [c] x) 14121 // cond: 14122 // result: (ADDLconst [int64(int32(-c))] x) 14123 for { 14124 c := v.AuxInt 14125 x := v.Args[0] 14126 v.reset(OpAMD64ADDLconst) 14127 v.AuxInt = int64(int32(-c)) 14128 v.AddArg(x) 14129 return true 14130 } 14131 } 14132 func rewriteValueAMD64_OpAMD64SUBQ(v *Value, config *Config) bool { 14133 b := v.Block 14134 _ = b 14135 // match: (SUBQ x (MOVQconst [c])) 14136 // cond: is32Bit(c) 14137 // result: (SUBQconst x [c]) 14138 for { 14139 x := v.Args[0] 14140 v_1 := v.Args[1] 14141 if v_1.Op != OpAMD64MOVQconst { 14142 break 14143 } 14144 c := v_1.AuxInt 14145 if !(is32Bit(c)) { 14146 break 14147 } 14148 v.reset(OpAMD64SUBQconst) 14149 v.AuxInt = c 14150 v.AddArg(x) 14151 return true 14152 } 14153 // match: (SUBQ (MOVQconst [c]) x) 14154 // cond: is32Bit(c) 14155 // result: (NEGQ (SUBQconst <v.Type> x [c])) 14156 for { 14157 v_0 := v.Args[0] 14158 if v_0.Op != OpAMD64MOVQconst { 14159 break 14160 } 14161 c := v_0.AuxInt 14162 x := v.Args[1] 14163 if !(is32Bit(c)) { 14164 break 14165 } 14166 v.reset(OpAMD64NEGQ) 14167 v0 := b.NewValue0(v.Line, OpAMD64SUBQconst, v.Type) 14168 v0.AuxInt = c 14169 v0.AddArg(x) 14170 v.AddArg(v0) 14171 return true 14172 } 14173 // match: (SUBQ x x) 14174 // cond: 14175 // result: (MOVQconst [0]) 14176 for { 14177 x := v.Args[0] 14178 if x != v.Args[1] { 14179 break 14180 } 14181 v.reset(OpAMD64MOVQconst) 14182 v.AuxInt = 0 14183 return true 14184 } 14185 return false 14186 } 14187 func rewriteValueAMD64_OpAMD64SUBQconst(v *Value, config *Config) bool { 14188 b := v.Block 14189 _ = b 14190 // match: (SUBQconst [0] x) 14191 // cond: 14192 // result: x 14193 for { 14194 if v.AuxInt != 0 { 14195 break 14196 } 14197 x := v.Args[0] 14198 v.reset(OpCopy) 14199 v.Type = x.Type 14200 v.AddArg(x) 14201 return true 14202 } 14203 // match: (SUBQconst [c] x) 14204 // cond: c != -(1<<31) 14205 // result: (ADDQconst [-c] x) 14206 for { 14207 c := v.AuxInt 14208 x := v.Args[0] 14209 if !(c != -(1 << 31)) { 14210 break 14211 } 14212 v.reset(OpAMD64ADDQconst) 14213 v.AuxInt = -c 14214 v.AddArg(x) 14215 return true 14216 } 14217 // match: (SUBQconst (MOVQconst [d]) [c]) 14218 // cond: 14219 // result: (MOVQconst [d-c]) 14220 for { 14221 c := v.AuxInt 14222 v_0 := v.Args[0] 14223 if v_0.Op != OpAMD64MOVQconst { 14224 break 14225 } 14226 d := v_0.AuxInt 14227 v.reset(OpAMD64MOVQconst) 14228 v.AuxInt = d - c 14229 return true 14230 } 14231 // match: (SUBQconst (SUBQconst x [d]) [c]) 14232 // cond: is32Bit(-c-d) 14233 // result: (ADDQconst [-c-d] x) 14234 for { 14235 c := v.AuxInt 14236 v_0 := v.Args[0] 14237 if v_0.Op != OpAMD64SUBQconst { 14238 break 14239 } 14240 d := v_0.AuxInt 14241 x := v_0.Args[0] 14242 if !(is32Bit(-c - d)) { 14243 break 14244 } 14245 v.reset(OpAMD64ADDQconst) 14246 v.AuxInt = -c - d 14247 v.AddArg(x) 14248 return true 14249 } 14250 return false 14251 } 14252 func rewriteValueAMD64_OpAMD64XADDLlock(v *Value, config *Config) bool { 14253 b := v.Block 14254 _ = b 14255 // match: (XADDLlock [off1] {sym} val (ADDQconst [off2] ptr) mem) 14256 // cond: is32Bit(off1+off2) 14257 // result: (XADDLlock [off1+off2] {sym} val ptr mem) 14258 for { 14259 off1 := v.AuxInt 14260 sym := v.Aux 14261 val := v.Args[0] 14262 v_1 := v.Args[1] 14263 if v_1.Op != OpAMD64ADDQconst { 14264 break 14265 } 14266 off2 := v_1.AuxInt 14267 ptr := v_1.Args[0] 14268 mem := v.Args[2] 14269 if !(is32Bit(off1 + off2)) { 14270 break 14271 } 14272 v.reset(OpAMD64XADDLlock) 14273 v.AuxInt = off1 + off2 14274 v.Aux = sym 14275 v.AddArg(val) 14276 v.AddArg(ptr) 14277 v.AddArg(mem) 14278 return true 14279 } 14280 return false 14281 } 14282 func rewriteValueAMD64_OpAMD64XADDQlock(v *Value, config *Config) bool { 14283 b := v.Block 14284 _ = b 14285 // match: (XADDQlock [off1] {sym} val (ADDQconst [off2] ptr) mem) 14286 // cond: is32Bit(off1+off2) 14287 // result: (XADDQlock [off1+off2] {sym} val ptr mem) 14288 for { 14289 off1 := v.AuxInt 14290 sym := v.Aux 14291 val := v.Args[0] 14292 v_1 := v.Args[1] 14293 if v_1.Op != OpAMD64ADDQconst { 14294 break 14295 } 14296 off2 := v_1.AuxInt 14297 ptr := v_1.Args[0] 14298 mem := v.Args[2] 14299 if !(is32Bit(off1 + off2)) { 14300 break 14301 } 14302 v.reset(OpAMD64XADDQlock) 14303 v.AuxInt = off1 + off2 14304 v.Aux = sym 14305 v.AddArg(val) 14306 v.AddArg(ptr) 14307 v.AddArg(mem) 14308 return true 14309 } 14310 return false 14311 } 14312 func rewriteValueAMD64_OpAMD64XCHGL(v *Value, config *Config) bool { 14313 b := v.Block 14314 _ = b 14315 // match: (XCHGL [off1] {sym} val (ADDQconst [off2] ptr) mem) 14316 // cond: is32Bit(off1+off2) 14317 // result: (XCHGL [off1+off2] {sym} val ptr mem) 14318 for { 14319 off1 := v.AuxInt 14320 sym := v.Aux 14321 val := v.Args[0] 14322 v_1 := v.Args[1] 14323 if v_1.Op != OpAMD64ADDQconst { 14324 break 14325 } 14326 off2 := v_1.AuxInt 14327 ptr := v_1.Args[0] 14328 mem := v.Args[2] 14329 if !(is32Bit(off1 + off2)) { 14330 break 14331 } 14332 v.reset(OpAMD64XCHGL) 14333 v.AuxInt = off1 + off2 14334 v.Aux = sym 14335 v.AddArg(val) 14336 v.AddArg(ptr) 14337 v.AddArg(mem) 14338 return true 14339 } 14340 // match: (XCHGL [off1] {sym1} val (LEAQ [off2] {sym2} ptr) mem) 14341 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) && ptr.Op != OpSB 14342 // result: (XCHGL [off1+off2] {mergeSym(sym1,sym2)} val ptr mem) 14343 for { 14344 off1 := v.AuxInt 14345 sym1 := v.Aux 14346 val := v.Args[0] 14347 v_1 := v.Args[1] 14348 if v_1.Op != OpAMD64LEAQ { 14349 break 14350 } 14351 off2 := v_1.AuxInt 14352 sym2 := v_1.Aux 14353 ptr := v_1.Args[0] 14354 mem := v.Args[2] 14355 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2) && ptr.Op != OpSB) { 14356 break 14357 } 14358 v.reset(OpAMD64XCHGL) 14359 v.AuxInt = off1 + off2 14360 v.Aux = mergeSym(sym1, sym2) 14361 v.AddArg(val) 14362 v.AddArg(ptr) 14363 v.AddArg(mem) 14364 return true 14365 } 14366 return false 14367 } 14368 func rewriteValueAMD64_OpAMD64XCHGQ(v *Value, config *Config) bool { 14369 b := v.Block 14370 _ = b 14371 // match: (XCHGQ [off1] {sym} val (ADDQconst [off2] ptr) mem) 14372 // cond: is32Bit(off1+off2) 14373 // result: (XCHGQ [off1+off2] {sym} val ptr mem) 14374 for { 14375 off1 := v.AuxInt 14376 sym := v.Aux 14377 val := v.Args[0] 14378 v_1 := v.Args[1] 14379 if v_1.Op != OpAMD64ADDQconst { 14380 break 14381 } 14382 off2 := v_1.AuxInt 14383 ptr := v_1.Args[0] 14384 mem := v.Args[2] 14385 if !(is32Bit(off1 + off2)) { 14386 break 14387 } 14388 v.reset(OpAMD64XCHGQ) 14389 v.AuxInt = off1 + off2 14390 v.Aux = sym 14391 v.AddArg(val) 14392 v.AddArg(ptr) 14393 v.AddArg(mem) 14394 return true 14395 } 14396 // match: (XCHGQ [off1] {sym1} val (LEAQ [off2] {sym2} ptr) mem) 14397 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) && ptr.Op != OpSB 14398 // result: (XCHGQ [off1+off2] {mergeSym(sym1,sym2)} val ptr mem) 14399 for { 14400 off1 := v.AuxInt 14401 sym1 := v.Aux 14402 val := v.Args[0] 14403 v_1 := v.Args[1] 14404 if v_1.Op != OpAMD64LEAQ { 14405 break 14406 } 14407 off2 := v_1.AuxInt 14408 sym2 := v_1.Aux 14409 ptr := v_1.Args[0] 14410 mem := v.Args[2] 14411 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2) && ptr.Op != OpSB) { 14412 break 14413 } 14414 v.reset(OpAMD64XCHGQ) 14415 v.AuxInt = off1 + off2 14416 v.Aux = mergeSym(sym1, sym2) 14417 v.AddArg(val) 14418 v.AddArg(ptr) 14419 v.AddArg(mem) 14420 return true 14421 } 14422 return false 14423 } 14424 func rewriteValueAMD64_OpAMD64XORL(v *Value, config *Config) bool { 14425 b := v.Block 14426 _ = b 14427 // match: (XORL x (MOVLconst [c])) 14428 // cond: 14429 // result: (XORLconst [c] x) 14430 for { 14431 x := v.Args[0] 14432 v_1 := v.Args[1] 14433 if v_1.Op != OpAMD64MOVLconst { 14434 break 14435 } 14436 c := v_1.AuxInt 14437 v.reset(OpAMD64XORLconst) 14438 v.AuxInt = c 14439 v.AddArg(x) 14440 return true 14441 } 14442 // match: (XORL (MOVLconst [c]) x) 14443 // cond: 14444 // result: (XORLconst [c] x) 14445 for { 14446 v_0 := v.Args[0] 14447 if v_0.Op != OpAMD64MOVLconst { 14448 break 14449 } 14450 c := v_0.AuxInt 14451 x := v.Args[1] 14452 v.reset(OpAMD64XORLconst) 14453 v.AuxInt = c 14454 v.AddArg(x) 14455 return true 14456 } 14457 // match: (XORL x x) 14458 // cond: 14459 // result: (MOVLconst [0]) 14460 for { 14461 x := v.Args[0] 14462 if x != v.Args[1] { 14463 break 14464 } 14465 v.reset(OpAMD64MOVLconst) 14466 v.AuxInt = 0 14467 return true 14468 } 14469 return false 14470 } 14471 func rewriteValueAMD64_OpAMD64XORLconst(v *Value, config *Config) bool { 14472 b := v.Block 14473 _ = b 14474 // match: (XORLconst [c] (XORLconst [d] x)) 14475 // cond: 14476 // result: (XORLconst [c ^ d] x) 14477 for { 14478 c := v.AuxInt 14479 v_0 := v.Args[0] 14480 if v_0.Op != OpAMD64XORLconst { 14481 break 14482 } 14483 d := v_0.AuxInt 14484 x := v_0.Args[0] 14485 v.reset(OpAMD64XORLconst) 14486 v.AuxInt = c ^ d 14487 v.AddArg(x) 14488 return true 14489 } 14490 // match: (XORLconst [c] x) 14491 // cond: int32(c)==0 14492 // result: x 14493 for { 14494 c := v.AuxInt 14495 x := v.Args[0] 14496 if !(int32(c) == 0) { 14497 break 14498 } 14499 v.reset(OpCopy) 14500 v.Type = x.Type 14501 v.AddArg(x) 14502 return true 14503 } 14504 // match: (XORLconst [c] (MOVLconst [d])) 14505 // cond: 14506 // result: (MOVLconst [c^d]) 14507 for { 14508 c := v.AuxInt 14509 v_0 := v.Args[0] 14510 if v_0.Op != OpAMD64MOVLconst { 14511 break 14512 } 14513 d := v_0.AuxInt 14514 v.reset(OpAMD64MOVLconst) 14515 v.AuxInt = c ^ d 14516 return true 14517 } 14518 return false 14519 } 14520 func rewriteValueAMD64_OpAMD64XORQ(v *Value, config *Config) bool { 14521 b := v.Block 14522 _ = b 14523 // match: (XORQ x (MOVQconst [c])) 14524 // cond: is32Bit(c) 14525 // result: (XORQconst [c] x) 14526 for { 14527 x := v.Args[0] 14528 v_1 := v.Args[1] 14529 if v_1.Op != OpAMD64MOVQconst { 14530 break 14531 } 14532 c := v_1.AuxInt 14533 if !(is32Bit(c)) { 14534 break 14535 } 14536 v.reset(OpAMD64XORQconst) 14537 v.AuxInt = c 14538 v.AddArg(x) 14539 return true 14540 } 14541 // match: (XORQ (MOVQconst [c]) x) 14542 // cond: is32Bit(c) 14543 // result: (XORQconst [c] x) 14544 for { 14545 v_0 := v.Args[0] 14546 if v_0.Op != OpAMD64MOVQconst { 14547 break 14548 } 14549 c := v_0.AuxInt 14550 x := v.Args[1] 14551 if !(is32Bit(c)) { 14552 break 14553 } 14554 v.reset(OpAMD64XORQconst) 14555 v.AuxInt = c 14556 v.AddArg(x) 14557 return true 14558 } 14559 // match: (XORQ x x) 14560 // cond: 14561 // result: (MOVQconst [0]) 14562 for { 14563 x := v.Args[0] 14564 if x != v.Args[1] { 14565 break 14566 } 14567 v.reset(OpAMD64MOVQconst) 14568 v.AuxInt = 0 14569 return true 14570 } 14571 return false 14572 } 14573 func rewriteValueAMD64_OpAMD64XORQconst(v *Value, config *Config) bool { 14574 b := v.Block 14575 _ = b 14576 // match: (XORQconst [c] (XORQconst [d] x)) 14577 // cond: 14578 // result: (XORQconst [c ^ d] x) 14579 for { 14580 c := v.AuxInt 14581 v_0 := v.Args[0] 14582 if v_0.Op != OpAMD64XORQconst { 14583 break 14584 } 14585 d := v_0.AuxInt 14586 x := v_0.Args[0] 14587 v.reset(OpAMD64XORQconst) 14588 v.AuxInt = c ^ d 14589 v.AddArg(x) 14590 return true 14591 } 14592 // match: (XORQconst [0] x) 14593 // cond: 14594 // result: x 14595 for { 14596 if v.AuxInt != 0 { 14597 break 14598 } 14599 x := v.Args[0] 14600 v.reset(OpCopy) 14601 v.Type = x.Type 14602 v.AddArg(x) 14603 return true 14604 } 14605 // match: (XORQconst [c] (MOVQconst [d])) 14606 // cond: 14607 // result: (MOVQconst [c^d]) 14608 for { 14609 c := v.AuxInt 14610 v_0 := v.Args[0] 14611 if v_0.Op != OpAMD64MOVQconst { 14612 break 14613 } 14614 d := v_0.AuxInt 14615 v.reset(OpAMD64MOVQconst) 14616 v.AuxInt = c ^ d 14617 return true 14618 } 14619 return false 14620 } 14621 func rewriteValueAMD64_OpAdd16(v *Value, config *Config) bool { 14622 b := v.Block 14623 _ = b 14624 // match: (Add16 x y) 14625 // cond: 14626 // result: (ADDL x y) 14627 for { 14628 x := v.Args[0] 14629 y := v.Args[1] 14630 v.reset(OpAMD64ADDL) 14631 v.AddArg(x) 14632 v.AddArg(y) 14633 return true 14634 } 14635 } 14636 func rewriteValueAMD64_OpAdd32(v *Value, config *Config) bool { 14637 b := v.Block 14638 _ = b 14639 // match: (Add32 x y) 14640 // cond: 14641 // result: (ADDL x y) 14642 for { 14643 x := v.Args[0] 14644 y := v.Args[1] 14645 v.reset(OpAMD64ADDL) 14646 v.AddArg(x) 14647 v.AddArg(y) 14648 return true 14649 } 14650 } 14651 func rewriteValueAMD64_OpAdd32F(v *Value, config *Config) bool { 14652 b := v.Block 14653 _ = b 14654 // match: (Add32F x y) 14655 // cond: 14656 // result: (ADDSS x y) 14657 for { 14658 x := v.Args[0] 14659 y := v.Args[1] 14660 v.reset(OpAMD64ADDSS) 14661 v.AddArg(x) 14662 v.AddArg(y) 14663 return true 14664 } 14665 } 14666 func rewriteValueAMD64_OpAdd64(v *Value, config *Config) bool { 14667 b := v.Block 14668 _ = b 14669 // match: (Add64 x y) 14670 // cond: 14671 // result: (ADDQ x y) 14672 for { 14673 x := v.Args[0] 14674 y := v.Args[1] 14675 v.reset(OpAMD64ADDQ) 14676 v.AddArg(x) 14677 v.AddArg(y) 14678 return true 14679 } 14680 } 14681 func rewriteValueAMD64_OpAdd64F(v *Value, config *Config) bool { 14682 b := v.Block 14683 _ = b 14684 // match: (Add64F x y) 14685 // cond: 14686 // result: (ADDSD x y) 14687 for { 14688 x := v.Args[0] 14689 y := v.Args[1] 14690 v.reset(OpAMD64ADDSD) 14691 v.AddArg(x) 14692 v.AddArg(y) 14693 return true 14694 } 14695 } 14696 func rewriteValueAMD64_OpAdd8(v *Value, config *Config) bool { 14697 b := v.Block 14698 _ = b 14699 // match: (Add8 x y) 14700 // cond: 14701 // result: (ADDL x y) 14702 for { 14703 x := v.Args[0] 14704 y := v.Args[1] 14705 v.reset(OpAMD64ADDL) 14706 v.AddArg(x) 14707 v.AddArg(y) 14708 return true 14709 } 14710 } 14711 func rewriteValueAMD64_OpAddPtr(v *Value, config *Config) bool { 14712 b := v.Block 14713 _ = b 14714 // match: (AddPtr x y) 14715 // cond: config.PtrSize == 8 14716 // result: (ADDQ x y) 14717 for { 14718 x := v.Args[0] 14719 y := v.Args[1] 14720 if !(config.PtrSize == 8) { 14721 break 14722 } 14723 v.reset(OpAMD64ADDQ) 14724 v.AddArg(x) 14725 v.AddArg(y) 14726 return true 14727 } 14728 // match: (AddPtr x y) 14729 // cond: config.PtrSize == 4 14730 // result: (ADDL x y) 14731 for { 14732 x := v.Args[0] 14733 y := v.Args[1] 14734 if !(config.PtrSize == 4) { 14735 break 14736 } 14737 v.reset(OpAMD64ADDL) 14738 v.AddArg(x) 14739 v.AddArg(y) 14740 return true 14741 } 14742 return false 14743 } 14744 func rewriteValueAMD64_OpAddr(v *Value, config *Config) bool { 14745 b := v.Block 14746 _ = b 14747 // match: (Addr {sym} base) 14748 // cond: config.PtrSize == 8 14749 // result: (LEAQ {sym} base) 14750 for { 14751 sym := v.Aux 14752 base := v.Args[0] 14753 if !(config.PtrSize == 8) { 14754 break 14755 } 14756 v.reset(OpAMD64LEAQ) 14757 v.Aux = sym 14758 v.AddArg(base) 14759 return true 14760 } 14761 // match: (Addr {sym} base) 14762 // cond: config.PtrSize == 4 14763 // result: (LEAL {sym} base) 14764 for { 14765 sym := v.Aux 14766 base := v.Args[0] 14767 if !(config.PtrSize == 4) { 14768 break 14769 } 14770 v.reset(OpAMD64LEAL) 14771 v.Aux = sym 14772 v.AddArg(base) 14773 return true 14774 } 14775 return false 14776 } 14777 func rewriteValueAMD64_OpAnd16(v *Value, config *Config) bool { 14778 b := v.Block 14779 _ = b 14780 // match: (And16 x y) 14781 // cond: 14782 // result: (ANDL x y) 14783 for { 14784 x := v.Args[0] 14785 y := v.Args[1] 14786 v.reset(OpAMD64ANDL) 14787 v.AddArg(x) 14788 v.AddArg(y) 14789 return true 14790 } 14791 } 14792 func rewriteValueAMD64_OpAnd32(v *Value, config *Config) bool { 14793 b := v.Block 14794 _ = b 14795 // match: (And32 x y) 14796 // cond: 14797 // result: (ANDL x y) 14798 for { 14799 x := v.Args[0] 14800 y := v.Args[1] 14801 v.reset(OpAMD64ANDL) 14802 v.AddArg(x) 14803 v.AddArg(y) 14804 return true 14805 } 14806 } 14807 func rewriteValueAMD64_OpAnd64(v *Value, config *Config) bool { 14808 b := v.Block 14809 _ = b 14810 // match: (And64 x y) 14811 // cond: 14812 // result: (ANDQ x y) 14813 for { 14814 x := v.Args[0] 14815 y := v.Args[1] 14816 v.reset(OpAMD64ANDQ) 14817 v.AddArg(x) 14818 v.AddArg(y) 14819 return true 14820 } 14821 } 14822 func rewriteValueAMD64_OpAnd8(v *Value, config *Config) bool { 14823 b := v.Block 14824 _ = b 14825 // match: (And8 x y) 14826 // cond: 14827 // result: (ANDL x y) 14828 for { 14829 x := v.Args[0] 14830 y := v.Args[1] 14831 v.reset(OpAMD64ANDL) 14832 v.AddArg(x) 14833 v.AddArg(y) 14834 return true 14835 } 14836 } 14837 func rewriteValueAMD64_OpAndB(v *Value, config *Config) bool { 14838 b := v.Block 14839 _ = b 14840 // match: (AndB x y) 14841 // cond: 14842 // result: (ANDL x y) 14843 for { 14844 x := v.Args[0] 14845 y := v.Args[1] 14846 v.reset(OpAMD64ANDL) 14847 v.AddArg(x) 14848 v.AddArg(y) 14849 return true 14850 } 14851 } 14852 func rewriteValueAMD64_OpAtomicAdd32(v *Value, config *Config) bool { 14853 b := v.Block 14854 _ = b 14855 // match: (AtomicAdd32 ptr val mem) 14856 // cond: 14857 // result: (AddTupleFirst32 (XADDLlock val ptr mem) val) 14858 for { 14859 ptr := v.Args[0] 14860 val := v.Args[1] 14861 mem := v.Args[2] 14862 v.reset(OpAMD64AddTupleFirst32) 14863 v0 := b.NewValue0(v.Line, OpAMD64XADDLlock, MakeTuple(config.fe.TypeUInt32(), TypeMem)) 14864 v0.AddArg(val) 14865 v0.AddArg(ptr) 14866 v0.AddArg(mem) 14867 v.AddArg(v0) 14868 v.AddArg(val) 14869 return true 14870 } 14871 } 14872 func rewriteValueAMD64_OpAtomicAdd64(v *Value, config *Config) bool { 14873 b := v.Block 14874 _ = b 14875 // match: (AtomicAdd64 ptr val mem) 14876 // cond: 14877 // result: (AddTupleFirst64 (XADDQlock val ptr mem) val) 14878 for { 14879 ptr := v.Args[0] 14880 val := v.Args[1] 14881 mem := v.Args[2] 14882 v.reset(OpAMD64AddTupleFirst64) 14883 v0 := b.NewValue0(v.Line, OpAMD64XADDQlock, MakeTuple(config.fe.TypeUInt64(), TypeMem)) 14884 v0.AddArg(val) 14885 v0.AddArg(ptr) 14886 v0.AddArg(mem) 14887 v.AddArg(v0) 14888 v.AddArg(val) 14889 return true 14890 } 14891 } 14892 func rewriteValueAMD64_OpAtomicAnd8(v *Value, config *Config) bool { 14893 b := v.Block 14894 _ = b 14895 // match: (AtomicAnd8 ptr val mem) 14896 // cond: 14897 // result: (ANDBlock ptr val mem) 14898 for { 14899 ptr := v.Args[0] 14900 val := v.Args[1] 14901 mem := v.Args[2] 14902 v.reset(OpAMD64ANDBlock) 14903 v.AddArg(ptr) 14904 v.AddArg(val) 14905 v.AddArg(mem) 14906 return true 14907 } 14908 } 14909 func rewriteValueAMD64_OpAtomicCompareAndSwap32(v *Value, config *Config) bool { 14910 b := v.Block 14911 _ = b 14912 // match: (AtomicCompareAndSwap32 ptr old new_ mem) 14913 // cond: 14914 // result: (CMPXCHGLlock ptr old new_ mem) 14915 for { 14916 ptr := v.Args[0] 14917 old := v.Args[1] 14918 new_ := v.Args[2] 14919 mem := v.Args[3] 14920 v.reset(OpAMD64CMPXCHGLlock) 14921 v.AddArg(ptr) 14922 v.AddArg(old) 14923 v.AddArg(new_) 14924 v.AddArg(mem) 14925 return true 14926 } 14927 } 14928 func rewriteValueAMD64_OpAtomicCompareAndSwap64(v *Value, config *Config) bool { 14929 b := v.Block 14930 _ = b 14931 // match: (AtomicCompareAndSwap64 ptr old new_ mem) 14932 // cond: 14933 // result: (CMPXCHGQlock ptr old new_ mem) 14934 for { 14935 ptr := v.Args[0] 14936 old := v.Args[1] 14937 new_ := v.Args[2] 14938 mem := v.Args[3] 14939 v.reset(OpAMD64CMPXCHGQlock) 14940 v.AddArg(ptr) 14941 v.AddArg(old) 14942 v.AddArg(new_) 14943 v.AddArg(mem) 14944 return true 14945 } 14946 } 14947 func rewriteValueAMD64_OpAtomicExchange32(v *Value, config *Config) bool { 14948 b := v.Block 14949 _ = b 14950 // match: (AtomicExchange32 ptr val mem) 14951 // cond: 14952 // result: (XCHGL val ptr mem) 14953 for { 14954 ptr := v.Args[0] 14955 val := v.Args[1] 14956 mem := v.Args[2] 14957 v.reset(OpAMD64XCHGL) 14958 v.AddArg(val) 14959 v.AddArg(ptr) 14960 v.AddArg(mem) 14961 return true 14962 } 14963 } 14964 func rewriteValueAMD64_OpAtomicExchange64(v *Value, config *Config) bool { 14965 b := v.Block 14966 _ = b 14967 // match: (AtomicExchange64 ptr val mem) 14968 // cond: 14969 // result: (XCHGQ val ptr mem) 14970 for { 14971 ptr := v.Args[0] 14972 val := v.Args[1] 14973 mem := v.Args[2] 14974 v.reset(OpAMD64XCHGQ) 14975 v.AddArg(val) 14976 v.AddArg(ptr) 14977 v.AddArg(mem) 14978 return true 14979 } 14980 } 14981 func rewriteValueAMD64_OpAtomicLoad32(v *Value, config *Config) bool { 14982 b := v.Block 14983 _ = b 14984 // match: (AtomicLoad32 ptr mem) 14985 // cond: 14986 // result: (MOVLatomicload ptr mem) 14987 for { 14988 ptr := v.Args[0] 14989 mem := v.Args[1] 14990 v.reset(OpAMD64MOVLatomicload) 14991 v.AddArg(ptr) 14992 v.AddArg(mem) 14993 return true 14994 } 14995 } 14996 func rewriteValueAMD64_OpAtomicLoad64(v *Value, config *Config) bool { 14997 b := v.Block 14998 _ = b 14999 // match: (AtomicLoad64 ptr mem) 15000 // cond: 15001 // result: (MOVQatomicload ptr mem) 15002 for { 15003 ptr := v.Args[0] 15004 mem := v.Args[1] 15005 v.reset(OpAMD64MOVQatomicload) 15006 v.AddArg(ptr) 15007 v.AddArg(mem) 15008 return true 15009 } 15010 } 15011 func rewriteValueAMD64_OpAtomicLoadPtr(v *Value, config *Config) bool { 15012 b := v.Block 15013 _ = b 15014 // match: (AtomicLoadPtr ptr mem) 15015 // cond: config.PtrSize == 8 15016 // result: (MOVQatomicload ptr mem) 15017 for { 15018 ptr := v.Args[0] 15019 mem := v.Args[1] 15020 if !(config.PtrSize == 8) { 15021 break 15022 } 15023 v.reset(OpAMD64MOVQatomicload) 15024 v.AddArg(ptr) 15025 v.AddArg(mem) 15026 return true 15027 } 15028 // match: (AtomicLoadPtr ptr mem) 15029 // cond: config.PtrSize == 4 15030 // result: (MOVLatomicload ptr mem) 15031 for { 15032 ptr := v.Args[0] 15033 mem := v.Args[1] 15034 if !(config.PtrSize == 4) { 15035 break 15036 } 15037 v.reset(OpAMD64MOVLatomicload) 15038 v.AddArg(ptr) 15039 v.AddArg(mem) 15040 return true 15041 } 15042 return false 15043 } 15044 func rewriteValueAMD64_OpAtomicOr8(v *Value, config *Config) bool { 15045 b := v.Block 15046 _ = b 15047 // match: (AtomicOr8 ptr val mem) 15048 // cond: 15049 // result: (ORBlock ptr val mem) 15050 for { 15051 ptr := v.Args[0] 15052 val := v.Args[1] 15053 mem := v.Args[2] 15054 v.reset(OpAMD64ORBlock) 15055 v.AddArg(ptr) 15056 v.AddArg(val) 15057 v.AddArg(mem) 15058 return true 15059 } 15060 } 15061 func rewriteValueAMD64_OpAtomicStore32(v *Value, config *Config) bool { 15062 b := v.Block 15063 _ = b 15064 // match: (AtomicStore32 ptr val mem) 15065 // cond: 15066 // result: (Select1 (XCHGL <MakeTuple(config.Frontend().TypeUInt32(),TypeMem)> val ptr mem)) 15067 for { 15068 ptr := v.Args[0] 15069 val := v.Args[1] 15070 mem := v.Args[2] 15071 v.reset(OpSelect1) 15072 v0 := b.NewValue0(v.Line, OpAMD64XCHGL, MakeTuple(config.Frontend().TypeUInt32(), TypeMem)) 15073 v0.AddArg(val) 15074 v0.AddArg(ptr) 15075 v0.AddArg(mem) 15076 v.AddArg(v0) 15077 return true 15078 } 15079 } 15080 func rewriteValueAMD64_OpAtomicStore64(v *Value, config *Config) bool { 15081 b := v.Block 15082 _ = b 15083 // match: (AtomicStore64 ptr val mem) 15084 // cond: 15085 // result: (Select1 (XCHGQ <MakeTuple(config.Frontend().TypeUInt64(),TypeMem)> val ptr mem)) 15086 for { 15087 ptr := v.Args[0] 15088 val := v.Args[1] 15089 mem := v.Args[2] 15090 v.reset(OpSelect1) 15091 v0 := b.NewValue0(v.Line, OpAMD64XCHGQ, MakeTuple(config.Frontend().TypeUInt64(), TypeMem)) 15092 v0.AddArg(val) 15093 v0.AddArg(ptr) 15094 v0.AddArg(mem) 15095 v.AddArg(v0) 15096 return true 15097 } 15098 } 15099 func rewriteValueAMD64_OpAtomicStorePtrNoWB(v *Value, config *Config) bool { 15100 b := v.Block 15101 _ = b 15102 // match: (AtomicStorePtrNoWB ptr val mem) 15103 // cond: config.PtrSize == 8 15104 // result: (Select1 (XCHGQ <MakeTuple(config.Frontend().TypeBytePtr(),TypeMem)> val ptr mem)) 15105 for { 15106 ptr := v.Args[0] 15107 val := v.Args[1] 15108 mem := v.Args[2] 15109 if !(config.PtrSize == 8) { 15110 break 15111 } 15112 v.reset(OpSelect1) 15113 v0 := b.NewValue0(v.Line, OpAMD64XCHGQ, MakeTuple(config.Frontend().TypeBytePtr(), TypeMem)) 15114 v0.AddArg(val) 15115 v0.AddArg(ptr) 15116 v0.AddArg(mem) 15117 v.AddArg(v0) 15118 return true 15119 } 15120 // match: (AtomicStorePtrNoWB ptr val mem) 15121 // cond: config.PtrSize == 4 15122 // result: (Select1 (XCHGL <MakeTuple(config.Frontend().TypeBytePtr(),TypeMem)> val ptr mem)) 15123 for { 15124 ptr := v.Args[0] 15125 val := v.Args[1] 15126 mem := v.Args[2] 15127 if !(config.PtrSize == 4) { 15128 break 15129 } 15130 v.reset(OpSelect1) 15131 v0 := b.NewValue0(v.Line, OpAMD64XCHGL, MakeTuple(config.Frontend().TypeBytePtr(), TypeMem)) 15132 v0.AddArg(val) 15133 v0.AddArg(ptr) 15134 v0.AddArg(mem) 15135 v.AddArg(v0) 15136 return true 15137 } 15138 return false 15139 } 15140 func rewriteValueAMD64_OpAvg64u(v *Value, config *Config) bool { 15141 b := v.Block 15142 _ = b 15143 // match: (Avg64u x y) 15144 // cond: 15145 // result: (AVGQU x y) 15146 for { 15147 x := v.Args[0] 15148 y := v.Args[1] 15149 v.reset(OpAMD64AVGQU) 15150 v.AddArg(x) 15151 v.AddArg(y) 15152 return true 15153 } 15154 } 15155 func rewriteValueAMD64_OpBswap32(v *Value, config *Config) bool { 15156 b := v.Block 15157 _ = b 15158 // match: (Bswap32 x) 15159 // cond: 15160 // result: (BSWAPL x) 15161 for { 15162 x := v.Args[0] 15163 v.reset(OpAMD64BSWAPL) 15164 v.AddArg(x) 15165 return true 15166 } 15167 } 15168 func rewriteValueAMD64_OpBswap64(v *Value, config *Config) bool { 15169 b := v.Block 15170 _ = b 15171 // match: (Bswap64 x) 15172 // cond: 15173 // result: (BSWAPQ x) 15174 for { 15175 x := v.Args[0] 15176 v.reset(OpAMD64BSWAPQ) 15177 v.AddArg(x) 15178 return true 15179 } 15180 } 15181 func rewriteValueAMD64_OpClosureCall(v *Value, config *Config) bool { 15182 b := v.Block 15183 _ = b 15184 // match: (ClosureCall [argwid] entry closure mem) 15185 // cond: 15186 // result: (CALLclosure [argwid] entry closure mem) 15187 for { 15188 argwid := v.AuxInt 15189 entry := v.Args[0] 15190 closure := v.Args[1] 15191 mem := v.Args[2] 15192 v.reset(OpAMD64CALLclosure) 15193 v.AuxInt = argwid 15194 v.AddArg(entry) 15195 v.AddArg(closure) 15196 v.AddArg(mem) 15197 return true 15198 } 15199 } 15200 func rewriteValueAMD64_OpCom16(v *Value, config *Config) bool { 15201 b := v.Block 15202 _ = b 15203 // match: (Com16 x) 15204 // cond: 15205 // result: (NOTL x) 15206 for { 15207 x := v.Args[0] 15208 v.reset(OpAMD64NOTL) 15209 v.AddArg(x) 15210 return true 15211 } 15212 } 15213 func rewriteValueAMD64_OpCom32(v *Value, config *Config) bool { 15214 b := v.Block 15215 _ = b 15216 // match: (Com32 x) 15217 // cond: 15218 // result: (NOTL x) 15219 for { 15220 x := v.Args[0] 15221 v.reset(OpAMD64NOTL) 15222 v.AddArg(x) 15223 return true 15224 } 15225 } 15226 func rewriteValueAMD64_OpCom64(v *Value, config *Config) bool { 15227 b := v.Block 15228 _ = b 15229 // match: (Com64 x) 15230 // cond: 15231 // result: (NOTQ x) 15232 for { 15233 x := v.Args[0] 15234 v.reset(OpAMD64NOTQ) 15235 v.AddArg(x) 15236 return true 15237 } 15238 } 15239 func rewriteValueAMD64_OpCom8(v *Value, config *Config) bool { 15240 b := v.Block 15241 _ = b 15242 // match: (Com8 x) 15243 // cond: 15244 // result: (NOTL x) 15245 for { 15246 x := v.Args[0] 15247 v.reset(OpAMD64NOTL) 15248 v.AddArg(x) 15249 return true 15250 } 15251 } 15252 func rewriteValueAMD64_OpConst16(v *Value, config *Config) bool { 15253 b := v.Block 15254 _ = b 15255 // match: (Const16 [val]) 15256 // cond: 15257 // result: (MOVLconst [val]) 15258 for { 15259 val := v.AuxInt 15260 v.reset(OpAMD64MOVLconst) 15261 v.AuxInt = val 15262 return true 15263 } 15264 } 15265 func rewriteValueAMD64_OpConst32(v *Value, config *Config) bool { 15266 b := v.Block 15267 _ = b 15268 // match: (Const32 [val]) 15269 // cond: 15270 // result: (MOVLconst [val]) 15271 for { 15272 val := v.AuxInt 15273 v.reset(OpAMD64MOVLconst) 15274 v.AuxInt = val 15275 return true 15276 } 15277 } 15278 func rewriteValueAMD64_OpConst32F(v *Value, config *Config) bool { 15279 b := v.Block 15280 _ = b 15281 // match: (Const32F [val]) 15282 // cond: 15283 // result: (MOVSSconst [val]) 15284 for { 15285 val := v.AuxInt 15286 v.reset(OpAMD64MOVSSconst) 15287 v.AuxInt = val 15288 return true 15289 } 15290 } 15291 func rewriteValueAMD64_OpConst64(v *Value, config *Config) bool { 15292 b := v.Block 15293 _ = b 15294 // match: (Const64 [val]) 15295 // cond: 15296 // result: (MOVQconst [val]) 15297 for { 15298 val := v.AuxInt 15299 v.reset(OpAMD64MOVQconst) 15300 v.AuxInt = val 15301 return true 15302 } 15303 } 15304 func rewriteValueAMD64_OpConst64F(v *Value, config *Config) bool { 15305 b := v.Block 15306 _ = b 15307 // match: (Const64F [val]) 15308 // cond: 15309 // result: (MOVSDconst [val]) 15310 for { 15311 val := v.AuxInt 15312 v.reset(OpAMD64MOVSDconst) 15313 v.AuxInt = val 15314 return true 15315 } 15316 } 15317 func rewriteValueAMD64_OpConst8(v *Value, config *Config) bool { 15318 b := v.Block 15319 _ = b 15320 // match: (Const8 [val]) 15321 // cond: 15322 // result: (MOVLconst [val]) 15323 for { 15324 val := v.AuxInt 15325 v.reset(OpAMD64MOVLconst) 15326 v.AuxInt = val 15327 return true 15328 } 15329 } 15330 func rewriteValueAMD64_OpConstBool(v *Value, config *Config) bool { 15331 b := v.Block 15332 _ = b 15333 // match: (ConstBool [b]) 15334 // cond: 15335 // result: (MOVLconst [b]) 15336 for { 15337 b := v.AuxInt 15338 v.reset(OpAMD64MOVLconst) 15339 v.AuxInt = b 15340 return true 15341 } 15342 } 15343 func rewriteValueAMD64_OpConstNil(v *Value, config *Config) bool { 15344 b := v.Block 15345 _ = b 15346 // match: (ConstNil) 15347 // cond: config.PtrSize == 8 15348 // result: (MOVQconst [0]) 15349 for { 15350 if !(config.PtrSize == 8) { 15351 break 15352 } 15353 v.reset(OpAMD64MOVQconst) 15354 v.AuxInt = 0 15355 return true 15356 } 15357 // match: (ConstNil) 15358 // cond: config.PtrSize == 4 15359 // result: (MOVLconst [0]) 15360 for { 15361 if !(config.PtrSize == 4) { 15362 break 15363 } 15364 v.reset(OpAMD64MOVLconst) 15365 v.AuxInt = 0 15366 return true 15367 } 15368 return false 15369 } 15370 func rewriteValueAMD64_OpConvert(v *Value, config *Config) bool { 15371 b := v.Block 15372 _ = b 15373 // match: (Convert <t> x mem) 15374 // cond: config.PtrSize == 8 15375 // result: (MOVQconvert <t> x mem) 15376 for { 15377 t := v.Type 15378 x := v.Args[0] 15379 mem := v.Args[1] 15380 if !(config.PtrSize == 8) { 15381 break 15382 } 15383 v.reset(OpAMD64MOVQconvert) 15384 v.Type = t 15385 v.AddArg(x) 15386 v.AddArg(mem) 15387 return true 15388 } 15389 // match: (Convert <t> x mem) 15390 // cond: config.PtrSize == 4 15391 // result: (MOVLconvert <t> x mem) 15392 for { 15393 t := v.Type 15394 x := v.Args[0] 15395 mem := v.Args[1] 15396 if !(config.PtrSize == 4) { 15397 break 15398 } 15399 v.reset(OpAMD64MOVLconvert) 15400 v.Type = t 15401 v.AddArg(x) 15402 v.AddArg(mem) 15403 return true 15404 } 15405 return false 15406 } 15407 func rewriteValueAMD64_OpCtz32(v *Value, config *Config) bool { 15408 b := v.Block 15409 _ = b 15410 // match: (Ctz32 <t> x) 15411 // cond: 15412 // result: (CMOVLEQ (Select0 <t> (BSFL x)) (MOVLconst <t> [32]) (Select1 <TypeFlags> (BSFL x))) 15413 for { 15414 t := v.Type 15415 x := v.Args[0] 15416 v.reset(OpAMD64CMOVLEQ) 15417 v0 := b.NewValue0(v.Line, OpSelect0, t) 15418 v1 := b.NewValue0(v.Line, OpAMD64BSFL, MakeTuple(config.fe.TypeUInt32(), TypeFlags)) 15419 v1.AddArg(x) 15420 v0.AddArg(v1) 15421 v.AddArg(v0) 15422 v2 := b.NewValue0(v.Line, OpAMD64MOVLconst, t) 15423 v2.AuxInt = 32 15424 v.AddArg(v2) 15425 v3 := b.NewValue0(v.Line, OpSelect1, TypeFlags) 15426 v4 := b.NewValue0(v.Line, OpAMD64BSFL, MakeTuple(config.fe.TypeUInt32(), TypeFlags)) 15427 v4.AddArg(x) 15428 v3.AddArg(v4) 15429 v.AddArg(v3) 15430 return true 15431 } 15432 } 15433 func rewriteValueAMD64_OpCtz64(v *Value, config *Config) bool { 15434 b := v.Block 15435 _ = b 15436 // match: (Ctz64 <t> x) 15437 // cond: 15438 // result: (CMOVQEQ (Select0 <t> (BSFQ x)) (MOVQconst <t> [64]) (Select1 <TypeFlags> (BSFQ x))) 15439 for { 15440 t := v.Type 15441 x := v.Args[0] 15442 v.reset(OpAMD64CMOVQEQ) 15443 v0 := b.NewValue0(v.Line, OpSelect0, t) 15444 v1 := b.NewValue0(v.Line, OpAMD64BSFQ, MakeTuple(config.fe.TypeUInt64(), TypeFlags)) 15445 v1.AddArg(x) 15446 v0.AddArg(v1) 15447 v.AddArg(v0) 15448 v2 := b.NewValue0(v.Line, OpAMD64MOVQconst, t) 15449 v2.AuxInt = 64 15450 v.AddArg(v2) 15451 v3 := b.NewValue0(v.Line, OpSelect1, TypeFlags) 15452 v4 := b.NewValue0(v.Line, OpAMD64BSFQ, MakeTuple(config.fe.TypeUInt64(), TypeFlags)) 15453 v4.AddArg(x) 15454 v3.AddArg(v4) 15455 v.AddArg(v3) 15456 return true 15457 } 15458 } 15459 func rewriteValueAMD64_OpCvt32Fto32(v *Value, config *Config) bool { 15460 b := v.Block 15461 _ = b 15462 // match: (Cvt32Fto32 x) 15463 // cond: 15464 // result: (CVTTSS2SL x) 15465 for { 15466 x := v.Args[0] 15467 v.reset(OpAMD64CVTTSS2SL) 15468 v.AddArg(x) 15469 return true 15470 } 15471 } 15472 func rewriteValueAMD64_OpCvt32Fto64(v *Value, config *Config) bool { 15473 b := v.Block 15474 _ = b 15475 // match: (Cvt32Fto64 x) 15476 // cond: 15477 // result: (CVTTSS2SQ x) 15478 for { 15479 x := v.Args[0] 15480 v.reset(OpAMD64CVTTSS2SQ) 15481 v.AddArg(x) 15482 return true 15483 } 15484 } 15485 func rewriteValueAMD64_OpCvt32Fto64F(v *Value, config *Config) bool { 15486 b := v.Block 15487 _ = b 15488 // match: (Cvt32Fto64F x) 15489 // cond: 15490 // result: (CVTSS2SD x) 15491 for { 15492 x := v.Args[0] 15493 v.reset(OpAMD64CVTSS2SD) 15494 v.AddArg(x) 15495 return true 15496 } 15497 } 15498 func rewriteValueAMD64_OpCvt32to32F(v *Value, config *Config) bool { 15499 b := v.Block 15500 _ = b 15501 // match: (Cvt32to32F x) 15502 // cond: 15503 // result: (CVTSL2SS x) 15504 for { 15505 x := v.Args[0] 15506 v.reset(OpAMD64CVTSL2SS) 15507 v.AddArg(x) 15508 return true 15509 } 15510 } 15511 func rewriteValueAMD64_OpCvt32to64F(v *Value, config *Config) bool { 15512 b := v.Block 15513 _ = b 15514 // match: (Cvt32to64F x) 15515 // cond: 15516 // result: (CVTSL2SD x) 15517 for { 15518 x := v.Args[0] 15519 v.reset(OpAMD64CVTSL2SD) 15520 v.AddArg(x) 15521 return true 15522 } 15523 } 15524 func rewriteValueAMD64_OpCvt64Fto32(v *Value, config *Config) bool { 15525 b := v.Block 15526 _ = b 15527 // match: (Cvt64Fto32 x) 15528 // cond: 15529 // result: (CVTTSD2SL x) 15530 for { 15531 x := v.Args[0] 15532 v.reset(OpAMD64CVTTSD2SL) 15533 v.AddArg(x) 15534 return true 15535 } 15536 } 15537 func rewriteValueAMD64_OpCvt64Fto32F(v *Value, config *Config) bool { 15538 b := v.Block 15539 _ = b 15540 // match: (Cvt64Fto32F x) 15541 // cond: 15542 // result: (CVTSD2SS x) 15543 for { 15544 x := v.Args[0] 15545 v.reset(OpAMD64CVTSD2SS) 15546 v.AddArg(x) 15547 return true 15548 } 15549 } 15550 func rewriteValueAMD64_OpCvt64Fto64(v *Value, config *Config) bool { 15551 b := v.Block 15552 _ = b 15553 // match: (Cvt64Fto64 x) 15554 // cond: 15555 // result: (CVTTSD2SQ x) 15556 for { 15557 x := v.Args[0] 15558 v.reset(OpAMD64CVTTSD2SQ) 15559 v.AddArg(x) 15560 return true 15561 } 15562 } 15563 func rewriteValueAMD64_OpCvt64to32F(v *Value, config *Config) bool { 15564 b := v.Block 15565 _ = b 15566 // match: (Cvt64to32F x) 15567 // cond: 15568 // result: (CVTSQ2SS x) 15569 for { 15570 x := v.Args[0] 15571 v.reset(OpAMD64CVTSQ2SS) 15572 v.AddArg(x) 15573 return true 15574 } 15575 } 15576 func rewriteValueAMD64_OpCvt64to64F(v *Value, config *Config) bool { 15577 b := v.Block 15578 _ = b 15579 // match: (Cvt64to64F x) 15580 // cond: 15581 // result: (CVTSQ2SD x) 15582 for { 15583 x := v.Args[0] 15584 v.reset(OpAMD64CVTSQ2SD) 15585 v.AddArg(x) 15586 return true 15587 } 15588 } 15589 func rewriteValueAMD64_OpDeferCall(v *Value, config *Config) bool { 15590 b := v.Block 15591 _ = b 15592 // match: (DeferCall [argwid] mem) 15593 // cond: 15594 // result: (CALLdefer [argwid] mem) 15595 for { 15596 argwid := v.AuxInt 15597 mem := v.Args[0] 15598 v.reset(OpAMD64CALLdefer) 15599 v.AuxInt = argwid 15600 v.AddArg(mem) 15601 return true 15602 } 15603 } 15604 func rewriteValueAMD64_OpDiv128u(v *Value, config *Config) bool { 15605 b := v.Block 15606 _ = b 15607 // match: (Div128u xhi xlo y) 15608 // cond: 15609 // result: (DIVQU2 xhi xlo y) 15610 for { 15611 xhi := v.Args[0] 15612 xlo := v.Args[1] 15613 y := v.Args[2] 15614 v.reset(OpAMD64DIVQU2) 15615 v.AddArg(xhi) 15616 v.AddArg(xlo) 15617 v.AddArg(y) 15618 return true 15619 } 15620 } 15621 func rewriteValueAMD64_OpDiv16(v *Value, config *Config) bool { 15622 b := v.Block 15623 _ = b 15624 // match: (Div16 x y) 15625 // cond: 15626 // result: (Select0 (DIVW x y)) 15627 for { 15628 x := v.Args[0] 15629 y := v.Args[1] 15630 v.reset(OpSelect0) 15631 v0 := b.NewValue0(v.Line, OpAMD64DIVW, MakeTuple(config.fe.TypeInt16(), config.fe.TypeInt16())) 15632 v0.AddArg(x) 15633 v0.AddArg(y) 15634 v.AddArg(v0) 15635 return true 15636 } 15637 } 15638 func rewriteValueAMD64_OpDiv16u(v *Value, config *Config) bool { 15639 b := v.Block 15640 _ = b 15641 // match: (Div16u x y) 15642 // cond: 15643 // result: (Select0 (DIVWU x y)) 15644 for { 15645 x := v.Args[0] 15646 y := v.Args[1] 15647 v.reset(OpSelect0) 15648 v0 := b.NewValue0(v.Line, OpAMD64DIVWU, MakeTuple(config.fe.TypeUInt16(), config.fe.TypeUInt16())) 15649 v0.AddArg(x) 15650 v0.AddArg(y) 15651 v.AddArg(v0) 15652 return true 15653 } 15654 } 15655 func rewriteValueAMD64_OpDiv32(v *Value, config *Config) bool { 15656 b := v.Block 15657 _ = b 15658 // match: (Div32 x y) 15659 // cond: 15660 // result: (Select0 (DIVL x y)) 15661 for { 15662 x := v.Args[0] 15663 y := v.Args[1] 15664 v.reset(OpSelect0) 15665 v0 := b.NewValue0(v.Line, OpAMD64DIVL, MakeTuple(config.fe.TypeInt32(), config.fe.TypeInt32())) 15666 v0.AddArg(x) 15667 v0.AddArg(y) 15668 v.AddArg(v0) 15669 return true 15670 } 15671 } 15672 func rewriteValueAMD64_OpDiv32F(v *Value, config *Config) bool { 15673 b := v.Block 15674 _ = b 15675 // match: (Div32F x y) 15676 // cond: 15677 // result: (DIVSS x y) 15678 for { 15679 x := v.Args[0] 15680 y := v.Args[1] 15681 v.reset(OpAMD64DIVSS) 15682 v.AddArg(x) 15683 v.AddArg(y) 15684 return true 15685 } 15686 } 15687 func rewriteValueAMD64_OpDiv32u(v *Value, config *Config) bool { 15688 b := v.Block 15689 _ = b 15690 // match: (Div32u x y) 15691 // cond: 15692 // result: (Select0 (DIVLU x y)) 15693 for { 15694 x := v.Args[0] 15695 y := v.Args[1] 15696 v.reset(OpSelect0) 15697 v0 := b.NewValue0(v.Line, OpAMD64DIVLU, MakeTuple(config.fe.TypeUInt32(), config.fe.TypeUInt32())) 15698 v0.AddArg(x) 15699 v0.AddArg(y) 15700 v.AddArg(v0) 15701 return true 15702 } 15703 } 15704 func rewriteValueAMD64_OpDiv64(v *Value, config *Config) bool { 15705 b := v.Block 15706 _ = b 15707 // match: (Div64 x y) 15708 // cond: 15709 // result: (Select0 (DIVQ x y)) 15710 for { 15711 x := v.Args[0] 15712 y := v.Args[1] 15713 v.reset(OpSelect0) 15714 v0 := b.NewValue0(v.Line, OpAMD64DIVQ, MakeTuple(config.fe.TypeInt64(), config.fe.TypeInt64())) 15715 v0.AddArg(x) 15716 v0.AddArg(y) 15717 v.AddArg(v0) 15718 return true 15719 } 15720 } 15721 func rewriteValueAMD64_OpDiv64F(v *Value, config *Config) bool { 15722 b := v.Block 15723 _ = b 15724 // match: (Div64F x y) 15725 // cond: 15726 // result: (DIVSD x y) 15727 for { 15728 x := v.Args[0] 15729 y := v.Args[1] 15730 v.reset(OpAMD64DIVSD) 15731 v.AddArg(x) 15732 v.AddArg(y) 15733 return true 15734 } 15735 } 15736 func rewriteValueAMD64_OpDiv64u(v *Value, config *Config) bool { 15737 b := v.Block 15738 _ = b 15739 // match: (Div64u x y) 15740 // cond: 15741 // result: (Select0 (DIVQU x y)) 15742 for { 15743 x := v.Args[0] 15744 y := v.Args[1] 15745 v.reset(OpSelect0) 15746 v0 := b.NewValue0(v.Line, OpAMD64DIVQU, MakeTuple(config.fe.TypeUInt64(), config.fe.TypeUInt64())) 15747 v0.AddArg(x) 15748 v0.AddArg(y) 15749 v.AddArg(v0) 15750 return true 15751 } 15752 } 15753 func rewriteValueAMD64_OpDiv8(v *Value, config *Config) bool { 15754 b := v.Block 15755 _ = b 15756 // match: (Div8 x y) 15757 // cond: 15758 // result: (Select0 (DIVW (SignExt8to16 x) (SignExt8to16 y))) 15759 for { 15760 x := v.Args[0] 15761 y := v.Args[1] 15762 v.reset(OpSelect0) 15763 v0 := b.NewValue0(v.Line, OpAMD64DIVW, MakeTuple(config.fe.TypeInt16(), config.fe.TypeInt16())) 15764 v1 := b.NewValue0(v.Line, OpSignExt8to16, config.fe.TypeInt16()) 15765 v1.AddArg(x) 15766 v0.AddArg(v1) 15767 v2 := b.NewValue0(v.Line, OpSignExt8to16, config.fe.TypeInt16()) 15768 v2.AddArg(y) 15769 v0.AddArg(v2) 15770 v.AddArg(v0) 15771 return true 15772 } 15773 } 15774 func rewriteValueAMD64_OpDiv8u(v *Value, config *Config) bool { 15775 b := v.Block 15776 _ = b 15777 // match: (Div8u x y) 15778 // cond: 15779 // result: (Select0 (DIVWU (ZeroExt8to16 x) (ZeroExt8to16 y))) 15780 for { 15781 x := v.Args[0] 15782 y := v.Args[1] 15783 v.reset(OpSelect0) 15784 v0 := b.NewValue0(v.Line, OpAMD64DIVWU, MakeTuple(config.fe.TypeUInt16(), config.fe.TypeUInt16())) 15785 v1 := b.NewValue0(v.Line, OpZeroExt8to16, config.fe.TypeUInt16()) 15786 v1.AddArg(x) 15787 v0.AddArg(v1) 15788 v2 := b.NewValue0(v.Line, OpZeroExt8to16, config.fe.TypeUInt16()) 15789 v2.AddArg(y) 15790 v0.AddArg(v2) 15791 v.AddArg(v0) 15792 return true 15793 } 15794 } 15795 func rewriteValueAMD64_OpEq16(v *Value, config *Config) bool { 15796 b := v.Block 15797 _ = b 15798 // match: (Eq16 x y) 15799 // cond: 15800 // result: (SETEQ (CMPW x y)) 15801 for { 15802 x := v.Args[0] 15803 y := v.Args[1] 15804 v.reset(OpAMD64SETEQ) 15805 v0 := b.NewValue0(v.Line, OpAMD64CMPW, TypeFlags) 15806 v0.AddArg(x) 15807 v0.AddArg(y) 15808 v.AddArg(v0) 15809 return true 15810 } 15811 } 15812 func rewriteValueAMD64_OpEq32(v *Value, config *Config) bool { 15813 b := v.Block 15814 _ = b 15815 // match: (Eq32 x y) 15816 // cond: 15817 // result: (SETEQ (CMPL x y)) 15818 for { 15819 x := v.Args[0] 15820 y := v.Args[1] 15821 v.reset(OpAMD64SETEQ) 15822 v0 := b.NewValue0(v.Line, OpAMD64CMPL, TypeFlags) 15823 v0.AddArg(x) 15824 v0.AddArg(y) 15825 v.AddArg(v0) 15826 return true 15827 } 15828 } 15829 func rewriteValueAMD64_OpEq32F(v *Value, config *Config) bool { 15830 b := v.Block 15831 _ = b 15832 // match: (Eq32F x y) 15833 // cond: 15834 // result: (SETEQF (UCOMISS x y)) 15835 for { 15836 x := v.Args[0] 15837 y := v.Args[1] 15838 v.reset(OpAMD64SETEQF) 15839 v0 := b.NewValue0(v.Line, OpAMD64UCOMISS, TypeFlags) 15840 v0.AddArg(x) 15841 v0.AddArg(y) 15842 v.AddArg(v0) 15843 return true 15844 } 15845 } 15846 func rewriteValueAMD64_OpEq64(v *Value, config *Config) bool { 15847 b := v.Block 15848 _ = b 15849 // match: (Eq64 x y) 15850 // cond: 15851 // result: (SETEQ (CMPQ x y)) 15852 for { 15853 x := v.Args[0] 15854 y := v.Args[1] 15855 v.reset(OpAMD64SETEQ) 15856 v0 := b.NewValue0(v.Line, OpAMD64CMPQ, TypeFlags) 15857 v0.AddArg(x) 15858 v0.AddArg(y) 15859 v.AddArg(v0) 15860 return true 15861 } 15862 } 15863 func rewriteValueAMD64_OpEq64F(v *Value, config *Config) bool { 15864 b := v.Block 15865 _ = b 15866 // match: (Eq64F x y) 15867 // cond: 15868 // result: (SETEQF (UCOMISD x y)) 15869 for { 15870 x := v.Args[0] 15871 y := v.Args[1] 15872 v.reset(OpAMD64SETEQF) 15873 v0 := b.NewValue0(v.Line, OpAMD64UCOMISD, TypeFlags) 15874 v0.AddArg(x) 15875 v0.AddArg(y) 15876 v.AddArg(v0) 15877 return true 15878 } 15879 } 15880 func rewriteValueAMD64_OpEq8(v *Value, config *Config) bool { 15881 b := v.Block 15882 _ = b 15883 // match: (Eq8 x y) 15884 // cond: 15885 // result: (SETEQ (CMPB x y)) 15886 for { 15887 x := v.Args[0] 15888 y := v.Args[1] 15889 v.reset(OpAMD64SETEQ) 15890 v0 := b.NewValue0(v.Line, OpAMD64CMPB, TypeFlags) 15891 v0.AddArg(x) 15892 v0.AddArg(y) 15893 v.AddArg(v0) 15894 return true 15895 } 15896 } 15897 func rewriteValueAMD64_OpEqB(v *Value, config *Config) bool { 15898 b := v.Block 15899 _ = b 15900 // match: (EqB x y) 15901 // cond: 15902 // result: (SETEQ (CMPB x y)) 15903 for { 15904 x := v.Args[0] 15905 y := v.Args[1] 15906 v.reset(OpAMD64SETEQ) 15907 v0 := b.NewValue0(v.Line, OpAMD64CMPB, TypeFlags) 15908 v0.AddArg(x) 15909 v0.AddArg(y) 15910 v.AddArg(v0) 15911 return true 15912 } 15913 } 15914 func rewriteValueAMD64_OpEqPtr(v *Value, config *Config) bool { 15915 b := v.Block 15916 _ = b 15917 // match: (EqPtr x y) 15918 // cond: config.PtrSize == 8 15919 // result: (SETEQ (CMPQ x y)) 15920 for { 15921 x := v.Args[0] 15922 y := v.Args[1] 15923 if !(config.PtrSize == 8) { 15924 break 15925 } 15926 v.reset(OpAMD64SETEQ) 15927 v0 := b.NewValue0(v.Line, OpAMD64CMPQ, TypeFlags) 15928 v0.AddArg(x) 15929 v0.AddArg(y) 15930 v.AddArg(v0) 15931 return true 15932 } 15933 // match: (EqPtr x y) 15934 // cond: config.PtrSize == 4 15935 // result: (SETEQ (CMPL x y)) 15936 for { 15937 x := v.Args[0] 15938 y := v.Args[1] 15939 if !(config.PtrSize == 4) { 15940 break 15941 } 15942 v.reset(OpAMD64SETEQ) 15943 v0 := b.NewValue0(v.Line, OpAMD64CMPL, TypeFlags) 15944 v0.AddArg(x) 15945 v0.AddArg(y) 15946 v.AddArg(v0) 15947 return true 15948 } 15949 return false 15950 } 15951 func rewriteValueAMD64_OpGeq16(v *Value, config *Config) bool { 15952 b := v.Block 15953 _ = b 15954 // match: (Geq16 x y) 15955 // cond: 15956 // result: (SETGE (CMPW x y)) 15957 for { 15958 x := v.Args[0] 15959 y := v.Args[1] 15960 v.reset(OpAMD64SETGE) 15961 v0 := b.NewValue0(v.Line, OpAMD64CMPW, TypeFlags) 15962 v0.AddArg(x) 15963 v0.AddArg(y) 15964 v.AddArg(v0) 15965 return true 15966 } 15967 } 15968 func rewriteValueAMD64_OpGeq16U(v *Value, config *Config) bool { 15969 b := v.Block 15970 _ = b 15971 // match: (Geq16U x y) 15972 // cond: 15973 // result: (SETAE (CMPW x y)) 15974 for { 15975 x := v.Args[0] 15976 y := v.Args[1] 15977 v.reset(OpAMD64SETAE) 15978 v0 := b.NewValue0(v.Line, OpAMD64CMPW, TypeFlags) 15979 v0.AddArg(x) 15980 v0.AddArg(y) 15981 v.AddArg(v0) 15982 return true 15983 } 15984 } 15985 func rewriteValueAMD64_OpGeq32(v *Value, config *Config) bool { 15986 b := v.Block 15987 _ = b 15988 // match: (Geq32 x y) 15989 // cond: 15990 // result: (SETGE (CMPL x y)) 15991 for { 15992 x := v.Args[0] 15993 y := v.Args[1] 15994 v.reset(OpAMD64SETGE) 15995 v0 := b.NewValue0(v.Line, OpAMD64CMPL, TypeFlags) 15996 v0.AddArg(x) 15997 v0.AddArg(y) 15998 v.AddArg(v0) 15999 return true 16000 } 16001 } 16002 func rewriteValueAMD64_OpGeq32F(v *Value, config *Config) bool { 16003 b := v.Block 16004 _ = b 16005 // match: (Geq32F x y) 16006 // cond: 16007 // result: (SETGEF (UCOMISS x y)) 16008 for { 16009 x := v.Args[0] 16010 y := v.Args[1] 16011 v.reset(OpAMD64SETGEF) 16012 v0 := b.NewValue0(v.Line, OpAMD64UCOMISS, TypeFlags) 16013 v0.AddArg(x) 16014 v0.AddArg(y) 16015 v.AddArg(v0) 16016 return true 16017 } 16018 } 16019 func rewriteValueAMD64_OpGeq32U(v *Value, config *Config) bool { 16020 b := v.Block 16021 _ = b 16022 // match: (Geq32U x y) 16023 // cond: 16024 // result: (SETAE (CMPL x y)) 16025 for { 16026 x := v.Args[0] 16027 y := v.Args[1] 16028 v.reset(OpAMD64SETAE) 16029 v0 := b.NewValue0(v.Line, OpAMD64CMPL, TypeFlags) 16030 v0.AddArg(x) 16031 v0.AddArg(y) 16032 v.AddArg(v0) 16033 return true 16034 } 16035 } 16036 func rewriteValueAMD64_OpGeq64(v *Value, config *Config) bool { 16037 b := v.Block 16038 _ = b 16039 // match: (Geq64 x y) 16040 // cond: 16041 // result: (SETGE (CMPQ x y)) 16042 for { 16043 x := v.Args[0] 16044 y := v.Args[1] 16045 v.reset(OpAMD64SETGE) 16046 v0 := b.NewValue0(v.Line, OpAMD64CMPQ, TypeFlags) 16047 v0.AddArg(x) 16048 v0.AddArg(y) 16049 v.AddArg(v0) 16050 return true 16051 } 16052 } 16053 func rewriteValueAMD64_OpGeq64F(v *Value, config *Config) bool { 16054 b := v.Block 16055 _ = b 16056 // match: (Geq64F x y) 16057 // cond: 16058 // result: (SETGEF (UCOMISD x y)) 16059 for { 16060 x := v.Args[0] 16061 y := v.Args[1] 16062 v.reset(OpAMD64SETGEF) 16063 v0 := b.NewValue0(v.Line, OpAMD64UCOMISD, TypeFlags) 16064 v0.AddArg(x) 16065 v0.AddArg(y) 16066 v.AddArg(v0) 16067 return true 16068 } 16069 } 16070 func rewriteValueAMD64_OpGeq64U(v *Value, config *Config) bool { 16071 b := v.Block 16072 _ = b 16073 // match: (Geq64U x y) 16074 // cond: 16075 // result: (SETAE (CMPQ x y)) 16076 for { 16077 x := v.Args[0] 16078 y := v.Args[1] 16079 v.reset(OpAMD64SETAE) 16080 v0 := b.NewValue0(v.Line, OpAMD64CMPQ, TypeFlags) 16081 v0.AddArg(x) 16082 v0.AddArg(y) 16083 v.AddArg(v0) 16084 return true 16085 } 16086 } 16087 func rewriteValueAMD64_OpGeq8(v *Value, config *Config) bool { 16088 b := v.Block 16089 _ = b 16090 // match: (Geq8 x y) 16091 // cond: 16092 // result: (SETGE (CMPB x y)) 16093 for { 16094 x := v.Args[0] 16095 y := v.Args[1] 16096 v.reset(OpAMD64SETGE) 16097 v0 := b.NewValue0(v.Line, OpAMD64CMPB, TypeFlags) 16098 v0.AddArg(x) 16099 v0.AddArg(y) 16100 v.AddArg(v0) 16101 return true 16102 } 16103 } 16104 func rewriteValueAMD64_OpGeq8U(v *Value, config *Config) bool { 16105 b := v.Block 16106 _ = b 16107 // match: (Geq8U x y) 16108 // cond: 16109 // result: (SETAE (CMPB x y)) 16110 for { 16111 x := v.Args[0] 16112 y := v.Args[1] 16113 v.reset(OpAMD64SETAE) 16114 v0 := b.NewValue0(v.Line, OpAMD64CMPB, TypeFlags) 16115 v0.AddArg(x) 16116 v0.AddArg(y) 16117 v.AddArg(v0) 16118 return true 16119 } 16120 } 16121 func rewriteValueAMD64_OpGetClosurePtr(v *Value, config *Config) bool { 16122 b := v.Block 16123 _ = b 16124 // match: (GetClosurePtr) 16125 // cond: 16126 // result: (LoweredGetClosurePtr) 16127 for { 16128 v.reset(OpAMD64LoweredGetClosurePtr) 16129 return true 16130 } 16131 } 16132 func rewriteValueAMD64_OpGetG(v *Value, config *Config) bool { 16133 b := v.Block 16134 _ = b 16135 // match: (GetG mem) 16136 // cond: 16137 // result: (LoweredGetG mem) 16138 for { 16139 mem := v.Args[0] 16140 v.reset(OpAMD64LoweredGetG) 16141 v.AddArg(mem) 16142 return true 16143 } 16144 } 16145 func rewriteValueAMD64_OpGoCall(v *Value, config *Config) bool { 16146 b := v.Block 16147 _ = b 16148 // match: (GoCall [argwid] mem) 16149 // cond: 16150 // result: (CALLgo [argwid] mem) 16151 for { 16152 argwid := v.AuxInt 16153 mem := v.Args[0] 16154 v.reset(OpAMD64CALLgo) 16155 v.AuxInt = argwid 16156 v.AddArg(mem) 16157 return true 16158 } 16159 } 16160 func rewriteValueAMD64_OpGreater16(v *Value, config *Config) bool { 16161 b := v.Block 16162 _ = b 16163 // match: (Greater16 x y) 16164 // cond: 16165 // result: (SETG (CMPW x y)) 16166 for { 16167 x := v.Args[0] 16168 y := v.Args[1] 16169 v.reset(OpAMD64SETG) 16170 v0 := b.NewValue0(v.Line, OpAMD64CMPW, TypeFlags) 16171 v0.AddArg(x) 16172 v0.AddArg(y) 16173 v.AddArg(v0) 16174 return true 16175 } 16176 } 16177 func rewriteValueAMD64_OpGreater16U(v *Value, config *Config) bool { 16178 b := v.Block 16179 _ = b 16180 // match: (Greater16U x y) 16181 // cond: 16182 // result: (SETA (CMPW x y)) 16183 for { 16184 x := v.Args[0] 16185 y := v.Args[1] 16186 v.reset(OpAMD64SETA) 16187 v0 := b.NewValue0(v.Line, OpAMD64CMPW, TypeFlags) 16188 v0.AddArg(x) 16189 v0.AddArg(y) 16190 v.AddArg(v0) 16191 return true 16192 } 16193 } 16194 func rewriteValueAMD64_OpGreater32(v *Value, config *Config) bool { 16195 b := v.Block 16196 _ = b 16197 // match: (Greater32 x y) 16198 // cond: 16199 // result: (SETG (CMPL x y)) 16200 for { 16201 x := v.Args[0] 16202 y := v.Args[1] 16203 v.reset(OpAMD64SETG) 16204 v0 := b.NewValue0(v.Line, OpAMD64CMPL, TypeFlags) 16205 v0.AddArg(x) 16206 v0.AddArg(y) 16207 v.AddArg(v0) 16208 return true 16209 } 16210 } 16211 func rewriteValueAMD64_OpGreater32F(v *Value, config *Config) bool { 16212 b := v.Block 16213 _ = b 16214 // match: (Greater32F x y) 16215 // cond: 16216 // result: (SETGF (UCOMISS x y)) 16217 for { 16218 x := v.Args[0] 16219 y := v.Args[1] 16220 v.reset(OpAMD64SETGF) 16221 v0 := b.NewValue0(v.Line, OpAMD64UCOMISS, TypeFlags) 16222 v0.AddArg(x) 16223 v0.AddArg(y) 16224 v.AddArg(v0) 16225 return true 16226 } 16227 } 16228 func rewriteValueAMD64_OpGreater32U(v *Value, config *Config) bool { 16229 b := v.Block 16230 _ = b 16231 // match: (Greater32U x y) 16232 // cond: 16233 // result: (SETA (CMPL x y)) 16234 for { 16235 x := v.Args[0] 16236 y := v.Args[1] 16237 v.reset(OpAMD64SETA) 16238 v0 := b.NewValue0(v.Line, OpAMD64CMPL, TypeFlags) 16239 v0.AddArg(x) 16240 v0.AddArg(y) 16241 v.AddArg(v0) 16242 return true 16243 } 16244 } 16245 func rewriteValueAMD64_OpGreater64(v *Value, config *Config) bool { 16246 b := v.Block 16247 _ = b 16248 // match: (Greater64 x y) 16249 // cond: 16250 // result: (SETG (CMPQ x y)) 16251 for { 16252 x := v.Args[0] 16253 y := v.Args[1] 16254 v.reset(OpAMD64SETG) 16255 v0 := b.NewValue0(v.Line, OpAMD64CMPQ, TypeFlags) 16256 v0.AddArg(x) 16257 v0.AddArg(y) 16258 v.AddArg(v0) 16259 return true 16260 } 16261 } 16262 func rewriteValueAMD64_OpGreater64F(v *Value, config *Config) bool { 16263 b := v.Block 16264 _ = b 16265 // match: (Greater64F x y) 16266 // cond: 16267 // result: (SETGF (UCOMISD x y)) 16268 for { 16269 x := v.Args[0] 16270 y := v.Args[1] 16271 v.reset(OpAMD64SETGF) 16272 v0 := b.NewValue0(v.Line, OpAMD64UCOMISD, TypeFlags) 16273 v0.AddArg(x) 16274 v0.AddArg(y) 16275 v.AddArg(v0) 16276 return true 16277 } 16278 } 16279 func rewriteValueAMD64_OpGreater64U(v *Value, config *Config) bool { 16280 b := v.Block 16281 _ = b 16282 // match: (Greater64U x y) 16283 // cond: 16284 // result: (SETA (CMPQ x y)) 16285 for { 16286 x := v.Args[0] 16287 y := v.Args[1] 16288 v.reset(OpAMD64SETA) 16289 v0 := b.NewValue0(v.Line, OpAMD64CMPQ, TypeFlags) 16290 v0.AddArg(x) 16291 v0.AddArg(y) 16292 v.AddArg(v0) 16293 return true 16294 } 16295 } 16296 func rewriteValueAMD64_OpGreater8(v *Value, config *Config) bool { 16297 b := v.Block 16298 _ = b 16299 // match: (Greater8 x y) 16300 // cond: 16301 // result: (SETG (CMPB x y)) 16302 for { 16303 x := v.Args[0] 16304 y := v.Args[1] 16305 v.reset(OpAMD64SETG) 16306 v0 := b.NewValue0(v.Line, OpAMD64CMPB, TypeFlags) 16307 v0.AddArg(x) 16308 v0.AddArg(y) 16309 v.AddArg(v0) 16310 return true 16311 } 16312 } 16313 func rewriteValueAMD64_OpGreater8U(v *Value, config *Config) bool { 16314 b := v.Block 16315 _ = b 16316 // match: (Greater8U x y) 16317 // cond: 16318 // result: (SETA (CMPB x y)) 16319 for { 16320 x := v.Args[0] 16321 y := v.Args[1] 16322 v.reset(OpAMD64SETA) 16323 v0 := b.NewValue0(v.Line, OpAMD64CMPB, TypeFlags) 16324 v0.AddArg(x) 16325 v0.AddArg(y) 16326 v.AddArg(v0) 16327 return true 16328 } 16329 } 16330 func rewriteValueAMD64_OpHmul16(v *Value, config *Config) bool { 16331 b := v.Block 16332 _ = b 16333 // match: (Hmul16 x y) 16334 // cond: 16335 // result: (HMULW x y) 16336 for { 16337 x := v.Args[0] 16338 y := v.Args[1] 16339 v.reset(OpAMD64HMULW) 16340 v.AddArg(x) 16341 v.AddArg(y) 16342 return true 16343 } 16344 } 16345 func rewriteValueAMD64_OpHmul16u(v *Value, config *Config) bool { 16346 b := v.Block 16347 _ = b 16348 // match: (Hmul16u x y) 16349 // cond: 16350 // result: (HMULWU x y) 16351 for { 16352 x := v.Args[0] 16353 y := v.Args[1] 16354 v.reset(OpAMD64HMULWU) 16355 v.AddArg(x) 16356 v.AddArg(y) 16357 return true 16358 } 16359 } 16360 func rewriteValueAMD64_OpHmul32(v *Value, config *Config) bool { 16361 b := v.Block 16362 _ = b 16363 // match: (Hmul32 x y) 16364 // cond: 16365 // result: (HMULL x y) 16366 for { 16367 x := v.Args[0] 16368 y := v.Args[1] 16369 v.reset(OpAMD64HMULL) 16370 v.AddArg(x) 16371 v.AddArg(y) 16372 return true 16373 } 16374 } 16375 func rewriteValueAMD64_OpHmul32u(v *Value, config *Config) bool { 16376 b := v.Block 16377 _ = b 16378 // match: (Hmul32u x y) 16379 // cond: 16380 // result: (HMULLU x y) 16381 for { 16382 x := v.Args[0] 16383 y := v.Args[1] 16384 v.reset(OpAMD64HMULLU) 16385 v.AddArg(x) 16386 v.AddArg(y) 16387 return true 16388 } 16389 } 16390 func rewriteValueAMD64_OpHmul64(v *Value, config *Config) bool { 16391 b := v.Block 16392 _ = b 16393 // match: (Hmul64 x y) 16394 // cond: 16395 // result: (HMULQ x y) 16396 for { 16397 x := v.Args[0] 16398 y := v.Args[1] 16399 v.reset(OpAMD64HMULQ) 16400 v.AddArg(x) 16401 v.AddArg(y) 16402 return true 16403 } 16404 } 16405 func rewriteValueAMD64_OpHmul64u(v *Value, config *Config) bool { 16406 b := v.Block 16407 _ = b 16408 // match: (Hmul64u x y) 16409 // cond: 16410 // result: (HMULQU x y) 16411 for { 16412 x := v.Args[0] 16413 y := v.Args[1] 16414 v.reset(OpAMD64HMULQU) 16415 v.AddArg(x) 16416 v.AddArg(y) 16417 return true 16418 } 16419 } 16420 func rewriteValueAMD64_OpHmul8(v *Value, config *Config) bool { 16421 b := v.Block 16422 _ = b 16423 // match: (Hmul8 x y) 16424 // cond: 16425 // result: (HMULB x y) 16426 for { 16427 x := v.Args[0] 16428 y := v.Args[1] 16429 v.reset(OpAMD64HMULB) 16430 v.AddArg(x) 16431 v.AddArg(y) 16432 return true 16433 } 16434 } 16435 func rewriteValueAMD64_OpHmul8u(v *Value, config *Config) bool { 16436 b := v.Block 16437 _ = b 16438 // match: (Hmul8u x y) 16439 // cond: 16440 // result: (HMULBU x y) 16441 for { 16442 x := v.Args[0] 16443 y := v.Args[1] 16444 v.reset(OpAMD64HMULBU) 16445 v.AddArg(x) 16446 v.AddArg(y) 16447 return true 16448 } 16449 } 16450 func rewriteValueAMD64_OpInt64Hi(v *Value, config *Config) bool { 16451 b := v.Block 16452 _ = b 16453 // match: (Int64Hi x) 16454 // cond: 16455 // result: (SHRQconst [32] x) 16456 for { 16457 x := v.Args[0] 16458 v.reset(OpAMD64SHRQconst) 16459 v.AuxInt = 32 16460 v.AddArg(x) 16461 return true 16462 } 16463 } 16464 func rewriteValueAMD64_OpInterCall(v *Value, config *Config) bool { 16465 b := v.Block 16466 _ = b 16467 // match: (InterCall [argwid] entry mem) 16468 // cond: 16469 // result: (CALLinter [argwid] entry mem) 16470 for { 16471 argwid := v.AuxInt 16472 entry := v.Args[0] 16473 mem := v.Args[1] 16474 v.reset(OpAMD64CALLinter) 16475 v.AuxInt = argwid 16476 v.AddArg(entry) 16477 v.AddArg(mem) 16478 return true 16479 } 16480 } 16481 func rewriteValueAMD64_OpIsInBounds(v *Value, config *Config) bool { 16482 b := v.Block 16483 _ = b 16484 // match: (IsInBounds idx len) 16485 // cond: 16486 // result: (SETB (CMPQ idx len)) 16487 for { 16488 idx := v.Args[0] 16489 len := v.Args[1] 16490 v.reset(OpAMD64SETB) 16491 v0 := b.NewValue0(v.Line, OpAMD64CMPQ, TypeFlags) 16492 v0.AddArg(idx) 16493 v0.AddArg(len) 16494 v.AddArg(v0) 16495 return true 16496 } 16497 } 16498 func rewriteValueAMD64_OpIsNonNil(v *Value, config *Config) bool { 16499 b := v.Block 16500 _ = b 16501 // match: (IsNonNil p) 16502 // cond: config.PtrSize == 8 16503 // result: (SETNE (TESTQ p p)) 16504 for { 16505 p := v.Args[0] 16506 if !(config.PtrSize == 8) { 16507 break 16508 } 16509 v.reset(OpAMD64SETNE) 16510 v0 := b.NewValue0(v.Line, OpAMD64TESTQ, TypeFlags) 16511 v0.AddArg(p) 16512 v0.AddArg(p) 16513 v.AddArg(v0) 16514 return true 16515 } 16516 // match: (IsNonNil p) 16517 // cond: config.PtrSize == 4 16518 // result: (SETNE (TESTL p p)) 16519 for { 16520 p := v.Args[0] 16521 if !(config.PtrSize == 4) { 16522 break 16523 } 16524 v.reset(OpAMD64SETNE) 16525 v0 := b.NewValue0(v.Line, OpAMD64TESTL, TypeFlags) 16526 v0.AddArg(p) 16527 v0.AddArg(p) 16528 v.AddArg(v0) 16529 return true 16530 } 16531 return false 16532 } 16533 func rewriteValueAMD64_OpIsSliceInBounds(v *Value, config *Config) bool { 16534 b := v.Block 16535 _ = b 16536 // match: (IsSliceInBounds idx len) 16537 // cond: 16538 // result: (SETBE (CMPQ idx len)) 16539 for { 16540 idx := v.Args[0] 16541 len := v.Args[1] 16542 v.reset(OpAMD64SETBE) 16543 v0 := b.NewValue0(v.Line, OpAMD64CMPQ, TypeFlags) 16544 v0.AddArg(idx) 16545 v0.AddArg(len) 16546 v.AddArg(v0) 16547 return true 16548 } 16549 } 16550 func rewriteValueAMD64_OpLeq16(v *Value, config *Config) bool { 16551 b := v.Block 16552 _ = b 16553 // match: (Leq16 x y) 16554 // cond: 16555 // result: (SETLE (CMPW x y)) 16556 for { 16557 x := v.Args[0] 16558 y := v.Args[1] 16559 v.reset(OpAMD64SETLE) 16560 v0 := b.NewValue0(v.Line, OpAMD64CMPW, TypeFlags) 16561 v0.AddArg(x) 16562 v0.AddArg(y) 16563 v.AddArg(v0) 16564 return true 16565 } 16566 } 16567 func rewriteValueAMD64_OpLeq16U(v *Value, config *Config) bool { 16568 b := v.Block 16569 _ = b 16570 // match: (Leq16U x y) 16571 // cond: 16572 // result: (SETBE (CMPW x y)) 16573 for { 16574 x := v.Args[0] 16575 y := v.Args[1] 16576 v.reset(OpAMD64SETBE) 16577 v0 := b.NewValue0(v.Line, OpAMD64CMPW, TypeFlags) 16578 v0.AddArg(x) 16579 v0.AddArg(y) 16580 v.AddArg(v0) 16581 return true 16582 } 16583 } 16584 func rewriteValueAMD64_OpLeq32(v *Value, config *Config) bool { 16585 b := v.Block 16586 _ = b 16587 // match: (Leq32 x y) 16588 // cond: 16589 // result: (SETLE (CMPL x y)) 16590 for { 16591 x := v.Args[0] 16592 y := v.Args[1] 16593 v.reset(OpAMD64SETLE) 16594 v0 := b.NewValue0(v.Line, OpAMD64CMPL, TypeFlags) 16595 v0.AddArg(x) 16596 v0.AddArg(y) 16597 v.AddArg(v0) 16598 return true 16599 } 16600 } 16601 func rewriteValueAMD64_OpLeq32F(v *Value, config *Config) bool { 16602 b := v.Block 16603 _ = b 16604 // match: (Leq32F x y) 16605 // cond: 16606 // result: (SETGEF (UCOMISS y x)) 16607 for { 16608 x := v.Args[0] 16609 y := v.Args[1] 16610 v.reset(OpAMD64SETGEF) 16611 v0 := b.NewValue0(v.Line, OpAMD64UCOMISS, TypeFlags) 16612 v0.AddArg(y) 16613 v0.AddArg(x) 16614 v.AddArg(v0) 16615 return true 16616 } 16617 } 16618 func rewriteValueAMD64_OpLeq32U(v *Value, config *Config) bool { 16619 b := v.Block 16620 _ = b 16621 // match: (Leq32U x y) 16622 // cond: 16623 // result: (SETBE (CMPL x y)) 16624 for { 16625 x := v.Args[0] 16626 y := v.Args[1] 16627 v.reset(OpAMD64SETBE) 16628 v0 := b.NewValue0(v.Line, OpAMD64CMPL, TypeFlags) 16629 v0.AddArg(x) 16630 v0.AddArg(y) 16631 v.AddArg(v0) 16632 return true 16633 } 16634 } 16635 func rewriteValueAMD64_OpLeq64(v *Value, config *Config) bool { 16636 b := v.Block 16637 _ = b 16638 // match: (Leq64 x y) 16639 // cond: 16640 // result: (SETLE (CMPQ x y)) 16641 for { 16642 x := v.Args[0] 16643 y := v.Args[1] 16644 v.reset(OpAMD64SETLE) 16645 v0 := b.NewValue0(v.Line, OpAMD64CMPQ, TypeFlags) 16646 v0.AddArg(x) 16647 v0.AddArg(y) 16648 v.AddArg(v0) 16649 return true 16650 } 16651 } 16652 func rewriteValueAMD64_OpLeq64F(v *Value, config *Config) bool { 16653 b := v.Block 16654 _ = b 16655 // match: (Leq64F x y) 16656 // cond: 16657 // result: (SETGEF (UCOMISD y x)) 16658 for { 16659 x := v.Args[0] 16660 y := v.Args[1] 16661 v.reset(OpAMD64SETGEF) 16662 v0 := b.NewValue0(v.Line, OpAMD64UCOMISD, TypeFlags) 16663 v0.AddArg(y) 16664 v0.AddArg(x) 16665 v.AddArg(v0) 16666 return true 16667 } 16668 } 16669 func rewriteValueAMD64_OpLeq64U(v *Value, config *Config) bool { 16670 b := v.Block 16671 _ = b 16672 // match: (Leq64U x y) 16673 // cond: 16674 // result: (SETBE (CMPQ x y)) 16675 for { 16676 x := v.Args[0] 16677 y := v.Args[1] 16678 v.reset(OpAMD64SETBE) 16679 v0 := b.NewValue0(v.Line, OpAMD64CMPQ, TypeFlags) 16680 v0.AddArg(x) 16681 v0.AddArg(y) 16682 v.AddArg(v0) 16683 return true 16684 } 16685 } 16686 func rewriteValueAMD64_OpLeq8(v *Value, config *Config) bool { 16687 b := v.Block 16688 _ = b 16689 // match: (Leq8 x y) 16690 // cond: 16691 // result: (SETLE (CMPB x y)) 16692 for { 16693 x := v.Args[0] 16694 y := v.Args[1] 16695 v.reset(OpAMD64SETLE) 16696 v0 := b.NewValue0(v.Line, OpAMD64CMPB, TypeFlags) 16697 v0.AddArg(x) 16698 v0.AddArg(y) 16699 v.AddArg(v0) 16700 return true 16701 } 16702 } 16703 func rewriteValueAMD64_OpLeq8U(v *Value, config *Config) bool { 16704 b := v.Block 16705 _ = b 16706 // match: (Leq8U x y) 16707 // cond: 16708 // result: (SETBE (CMPB x y)) 16709 for { 16710 x := v.Args[0] 16711 y := v.Args[1] 16712 v.reset(OpAMD64SETBE) 16713 v0 := b.NewValue0(v.Line, OpAMD64CMPB, TypeFlags) 16714 v0.AddArg(x) 16715 v0.AddArg(y) 16716 v.AddArg(v0) 16717 return true 16718 } 16719 } 16720 func rewriteValueAMD64_OpLess16(v *Value, config *Config) bool { 16721 b := v.Block 16722 _ = b 16723 // match: (Less16 x y) 16724 // cond: 16725 // result: (SETL (CMPW x y)) 16726 for { 16727 x := v.Args[0] 16728 y := v.Args[1] 16729 v.reset(OpAMD64SETL) 16730 v0 := b.NewValue0(v.Line, OpAMD64CMPW, TypeFlags) 16731 v0.AddArg(x) 16732 v0.AddArg(y) 16733 v.AddArg(v0) 16734 return true 16735 } 16736 } 16737 func rewriteValueAMD64_OpLess16U(v *Value, config *Config) bool { 16738 b := v.Block 16739 _ = b 16740 // match: (Less16U x y) 16741 // cond: 16742 // result: (SETB (CMPW x y)) 16743 for { 16744 x := v.Args[0] 16745 y := v.Args[1] 16746 v.reset(OpAMD64SETB) 16747 v0 := b.NewValue0(v.Line, OpAMD64CMPW, TypeFlags) 16748 v0.AddArg(x) 16749 v0.AddArg(y) 16750 v.AddArg(v0) 16751 return true 16752 } 16753 } 16754 func rewriteValueAMD64_OpLess32(v *Value, config *Config) bool { 16755 b := v.Block 16756 _ = b 16757 // match: (Less32 x y) 16758 // cond: 16759 // result: (SETL (CMPL x y)) 16760 for { 16761 x := v.Args[0] 16762 y := v.Args[1] 16763 v.reset(OpAMD64SETL) 16764 v0 := b.NewValue0(v.Line, OpAMD64CMPL, TypeFlags) 16765 v0.AddArg(x) 16766 v0.AddArg(y) 16767 v.AddArg(v0) 16768 return true 16769 } 16770 } 16771 func rewriteValueAMD64_OpLess32F(v *Value, config *Config) bool { 16772 b := v.Block 16773 _ = b 16774 // match: (Less32F x y) 16775 // cond: 16776 // result: (SETGF (UCOMISS y x)) 16777 for { 16778 x := v.Args[0] 16779 y := v.Args[1] 16780 v.reset(OpAMD64SETGF) 16781 v0 := b.NewValue0(v.Line, OpAMD64UCOMISS, TypeFlags) 16782 v0.AddArg(y) 16783 v0.AddArg(x) 16784 v.AddArg(v0) 16785 return true 16786 } 16787 } 16788 func rewriteValueAMD64_OpLess32U(v *Value, config *Config) bool { 16789 b := v.Block 16790 _ = b 16791 // match: (Less32U x y) 16792 // cond: 16793 // result: (SETB (CMPL x y)) 16794 for { 16795 x := v.Args[0] 16796 y := v.Args[1] 16797 v.reset(OpAMD64SETB) 16798 v0 := b.NewValue0(v.Line, OpAMD64CMPL, TypeFlags) 16799 v0.AddArg(x) 16800 v0.AddArg(y) 16801 v.AddArg(v0) 16802 return true 16803 } 16804 } 16805 func rewriteValueAMD64_OpLess64(v *Value, config *Config) bool { 16806 b := v.Block 16807 _ = b 16808 // match: (Less64 x y) 16809 // cond: 16810 // result: (SETL (CMPQ x y)) 16811 for { 16812 x := v.Args[0] 16813 y := v.Args[1] 16814 v.reset(OpAMD64SETL) 16815 v0 := b.NewValue0(v.Line, OpAMD64CMPQ, TypeFlags) 16816 v0.AddArg(x) 16817 v0.AddArg(y) 16818 v.AddArg(v0) 16819 return true 16820 } 16821 } 16822 func rewriteValueAMD64_OpLess64F(v *Value, config *Config) bool { 16823 b := v.Block 16824 _ = b 16825 // match: (Less64F x y) 16826 // cond: 16827 // result: (SETGF (UCOMISD y x)) 16828 for { 16829 x := v.Args[0] 16830 y := v.Args[1] 16831 v.reset(OpAMD64SETGF) 16832 v0 := b.NewValue0(v.Line, OpAMD64UCOMISD, TypeFlags) 16833 v0.AddArg(y) 16834 v0.AddArg(x) 16835 v.AddArg(v0) 16836 return true 16837 } 16838 } 16839 func rewriteValueAMD64_OpLess64U(v *Value, config *Config) bool { 16840 b := v.Block 16841 _ = b 16842 // match: (Less64U x y) 16843 // cond: 16844 // result: (SETB (CMPQ x y)) 16845 for { 16846 x := v.Args[0] 16847 y := v.Args[1] 16848 v.reset(OpAMD64SETB) 16849 v0 := b.NewValue0(v.Line, OpAMD64CMPQ, TypeFlags) 16850 v0.AddArg(x) 16851 v0.AddArg(y) 16852 v.AddArg(v0) 16853 return true 16854 } 16855 } 16856 func rewriteValueAMD64_OpLess8(v *Value, config *Config) bool { 16857 b := v.Block 16858 _ = b 16859 // match: (Less8 x y) 16860 // cond: 16861 // result: (SETL (CMPB x y)) 16862 for { 16863 x := v.Args[0] 16864 y := v.Args[1] 16865 v.reset(OpAMD64SETL) 16866 v0 := b.NewValue0(v.Line, OpAMD64CMPB, TypeFlags) 16867 v0.AddArg(x) 16868 v0.AddArg(y) 16869 v.AddArg(v0) 16870 return true 16871 } 16872 } 16873 func rewriteValueAMD64_OpLess8U(v *Value, config *Config) bool { 16874 b := v.Block 16875 _ = b 16876 // match: (Less8U x y) 16877 // cond: 16878 // result: (SETB (CMPB x y)) 16879 for { 16880 x := v.Args[0] 16881 y := v.Args[1] 16882 v.reset(OpAMD64SETB) 16883 v0 := b.NewValue0(v.Line, OpAMD64CMPB, TypeFlags) 16884 v0.AddArg(x) 16885 v0.AddArg(y) 16886 v.AddArg(v0) 16887 return true 16888 } 16889 } 16890 func rewriteValueAMD64_OpLoad(v *Value, config *Config) bool { 16891 b := v.Block 16892 _ = b 16893 // match: (Load <t> ptr mem) 16894 // cond: (is64BitInt(t) || isPtr(t) && config.PtrSize == 8) 16895 // result: (MOVQload ptr mem) 16896 for { 16897 t := v.Type 16898 ptr := v.Args[0] 16899 mem := v.Args[1] 16900 if !(is64BitInt(t) || isPtr(t) && config.PtrSize == 8) { 16901 break 16902 } 16903 v.reset(OpAMD64MOVQload) 16904 v.AddArg(ptr) 16905 v.AddArg(mem) 16906 return true 16907 } 16908 // match: (Load <t> ptr mem) 16909 // cond: (is32BitInt(t) || isPtr(t) && config.PtrSize == 4) 16910 // result: (MOVLload ptr mem) 16911 for { 16912 t := v.Type 16913 ptr := v.Args[0] 16914 mem := v.Args[1] 16915 if !(is32BitInt(t) || isPtr(t) && config.PtrSize == 4) { 16916 break 16917 } 16918 v.reset(OpAMD64MOVLload) 16919 v.AddArg(ptr) 16920 v.AddArg(mem) 16921 return true 16922 } 16923 // match: (Load <t> ptr mem) 16924 // cond: is16BitInt(t) 16925 // result: (MOVWload ptr mem) 16926 for { 16927 t := v.Type 16928 ptr := v.Args[0] 16929 mem := v.Args[1] 16930 if !(is16BitInt(t)) { 16931 break 16932 } 16933 v.reset(OpAMD64MOVWload) 16934 v.AddArg(ptr) 16935 v.AddArg(mem) 16936 return true 16937 } 16938 // match: (Load <t> ptr mem) 16939 // cond: (t.IsBoolean() || is8BitInt(t)) 16940 // result: (MOVBload ptr mem) 16941 for { 16942 t := v.Type 16943 ptr := v.Args[0] 16944 mem := v.Args[1] 16945 if !(t.IsBoolean() || is8BitInt(t)) { 16946 break 16947 } 16948 v.reset(OpAMD64MOVBload) 16949 v.AddArg(ptr) 16950 v.AddArg(mem) 16951 return true 16952 } 16953 // match: (Load <t> ptr mem) 16954 // cond: is32BitFloat(t) 16955 // result: (MOVSSload ptr mem) 16956 for { 16957 t := v.Type 16958 ptr := v.Args[0] 16959 mem := v.Args[1] 16960 if !(is32BitFloat(t)) { 16961 break 16962 } 16963 v.reset(OpAMD64MOVSSload) 16964 v.AddArg(ptr) 16965 v.AddArg(mem) 16966 return true 16967 } 16968 // match: (Load <t> ptr mem) 16969 // cond: is64BitFloat(t) 16970 // result: (MOVSDload ptr mem) 16971 for { 16972 t := v.Type 16973 ptr := v.Args[0] 16974 mem := v.Args[1] 16975 if !(is64BitFloat(t)) { 16976 break 16977 } 16978 v.reset(OpAMD64MOVSDload) 16979 v.AddArg(ptr) 16980 v.AddArg(mem) 16981 return true 16982 } 16983 return false 16984 } 16985 func rewriteValueAMD64_OpLrot16(v *Value, config *Config) bool { 16986 b := v.Block 16987 _ = b 16988 // match: (Lrot16 <t> x [c]) 16989 // cond: 16990 // result: (ROLWconst <t> [c&15] x) 16991 for { 16992 t := v.Type 16993 c := v.AuxInt 16994 x := v.Args[0] 16995 v.reset(OpAMD64ROLWconst) 16996 v.Type = t 16997 v.AuxInt = c & 15 16998 v.AddArg(x) 16999 return true 17000 } 17001 } 17002 func rewriteValueAMD64_OpLrot32(v *Value, config *Config) bool { 17003 b := v.Block 17004 _ = b 17005 // match: (Lrot32 <t> x [c]) 17006 // cond: 17007 // result: (ROLLconst <t> [c&31] x) 17008 for { 17009 t := v.Type 17010 c := v.AuxInt 17011 x := v.Args[0] 17012 v.reset(OpAMD64ROLLconst) 17013 v.Type = t 17014 v.AuxInt = c & 31 17015 v.AddArg(x) 17016 return true 17017 } 17018 } 17019 func rewriteValueAMD64_OpLrot64(v *Value, config *Config) bool { 17020 b := v.Block 17021 _ = b 17022 // match: (Lrot64 <t> x [c]) 17023 // cond: 17024 // result: (ROLQconst <t> [c&63] x) 17025 for { 17026 t := v.Type 17027 c := v.AuxInt 17028 x := v.Args[0] 17029 v.reset(OpAMD64ROLQconst) 17030 v.Type = t 17031 v.AuxInt = c & 63 17032 v.AddArg(x) 17033 return true 17034 } 17035 } 17036 func rewriteValueAMD64_OpLrot8(v *Value, config *Config) bool { 17037 b := v.Block 17038 _ = b 17039 // match: (Lrot8 <t> x [c]) 17040 // cond: 17041 // result: (ROLBconst <t> [c&7] x) 17042 for { 17043 t := v.Type 17044 c := v.AuxInt 17045 x := v.Args[0] 17046 v.reset(OpAMD64ROLBconst) 17047 v.Type = t 17048 v.AuxInt = c & 7 17049 v.AddArg(x) 17050 return true 17051 } 17052 } 17053 func rewriteValueAMD64_OpLsh16x16(v *Value, config *Config) bool { 17054 b := v.Block 17055 _ = b 17056 // match: (Lsh16x16 <t> x y) 17057 // cond: 17058 // result: (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPWconst y [32]))) 17059 for { 17060 t := v.Type 17061 x := v.Args[0] 17062 y := v.Args[1] 17063 v.reset(OpAMD64ANDL) 17064 v0 := b.NewValue0(v.Line, OpAMD64SHLL, t) 17065 v0.AddArg(x) 17066 v0.AddArg(y) 17067 v.AddArg(v0) 17068 v1 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, t) 17069 v2 := b.NewValue0(v.Line, OpAMD64CMPWconst, TypeFlags) 17070 v2.AuxInt = 32 17071 v2.AddArg(y) 17072 v1.AddArg(v2) 17073 v.AddArg(v1) 17074 return true 17075 } 17076 } 17077 func rewriteValueAMD64_OpLsh16x32(v *Value, config *Config) bool { 17078 b := v.Block 17079 _ = b 17080 // match: (Lsh16x32 <t> x y) 17081 // cond: 17082 // result: (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPLconst y [32]))) 17083 for { 17084 t := v.Type 17085 x := v.Args[0] 17086 y := v.Args[1] 17087 v.reset(OpAMD64ANDL) 17088 v0 := b.NewValue0(v.Line, OpAMD64SHLL, t) 17089 v0.AddArg(x) 17090 v0.AddArg(y) 17091 v.AddArg(v0) 17092 v1 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, t) 17093 v2 := b.NewValue0(v.Line, OpAMD64CMPLconst, TypeFlags) 17094 v2.AuxInt = 32 17095 v2.AddArg(y) 17096 v1.AddArg(v2) 17097 v.AddArg(v1) 17098 return true 17099 } 17100 } 17101 func rewriteValueAMD64_OpLsh16x64(v *Value, config *Config) bool { 17102 b := v.Block 17103 _ = b 17104 // match: (Lsh16x64 <t> x y) 17105 // cond: 17106 // result: (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPQconst y [32]))) 17107 for { 17108 t := v.Type 17109 x := v.Args[0] 17110 y := v.Args[1] 17111 v.reset(OpAMD64ANDL) 17112 v0 := b.NewValue0(v.Line, OpAMD64SHLL, t) 17113 v0.AddArg(x) 17114 v0.AddArg(y) 17115 v.AddArg(v0) 17116 v1 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, t) 17117 v2 := b.NewValue0(v.Line, OpAMD64CMPQconst, TypeFlags) 17118 v2.AuxInt = 32 17119 v2.AddArg(y) 17120 v1.AddArg(v2) 17121 v.AddArg(v1) 17122 return true 17123 } 17124 } 17125 func rewriteValueAMD64_OpLsh16x8(v *Value, config *Config) bool { 17126 b := v.Block 17127 _ = b 17128 // match: (Lsh16x8 <t> x y) 17129 // cond: 17130 // result: (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPBconst y [32]))) 17131 for { 17132 t := v.Type 17133 x := v.Args[0] 17134 y := v.Args[1] 17135 v.reset(OpAMD64ANDL) 17136 v0 := b.NewValue0(v.Line, OpAMD64SHLL, t) 17137 v0.AddArg(x) 17138 v0.AddArg(y) 17139 v.AddArg(v0) 17140 v1 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, t) 17141 v2 := b.NewValue0(v.Line, OpAMD64CMPBconst, TypeFlags) 17142 v2.AuxInt = 32 17143 v2.AddArg(y) 17144 v1.AddArg(v2) 17145 v.AddArg(v1) 17146 return true 17147 } 17148 } 17149 func rewriteValueAMD64_OpLsh32x16(v *Value, config *Config) bool { 17150 b := v.Block 17151 _ = b 17152 // match: (Lsh32x16 <t> x y) 17153 // cond: 17154 // result: (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPWconst y [32]))) 17155 for { 17156 t := v.Type 17157 x := v.Args[0] 17158 y := v.Args[1] 17159 v.reset(OpAMD64ANDL) 17160 v0 := b.NewValue0(v.Line, OpAMD64SHLL, t) 17161 v0.AddArg(x) 17162 v0.AddArg(y) 17163 v.AddArg(v0) 17164 v1 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, t) 17165 v2 := b.NewValue0(v.Line, OpAMD64CMPWconst, TypeFlags) 17166 v2.AuxInt = 32 17167 v2.AddArg(y) 17168 v1.AddArg(v2) 17169 v.AddArg(v1) 17170 return true 17171 } 17172 } 17173 func rewriteValueAMD64_OpLsh32x32(v *Value, config *Config) bool { 17174 b := v.Block 17175 _ = b 17176 // match: (Lsh32x32 <t> x y) 17177 // cond: 17178 // result: (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPLconst y [32]))) 17179 for { 17180 t := v.Type 17181 x := v.Args[0] 17182 y := v.Args[1] 17183 v.reset(OpAMD64ANDL) 17184 v0 := b.NewValue0(v.Line, OpAMD64SHLL, t) 17185 v0.AddArg(x) 17186 v0.AddArg(y) 17187 v.AddArg(v0) 17188 v1 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, t) 17189 v2 := b.NewValue0(v.Line, OpAMD64CMPLconst, TypeFlags) 17190 v2.AuxInt = 32 17191 v2.AddArg(y) 17192 v1.AddArg(v2) 17193 v.AddArg(v1) 17194 return true 17195 } 17196 } 17197 func rewriteValueAMD64_OpLsh32x64(v *Value, config *Config) bool { 17198 b := v.Block 17199 _ = b 17200 // match: (Lsh32x64 <t> x y) 17201 // cond: 17202 // result: (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPQconst y [32]))) 17203 for { 17204 t := v.Type 17205 x := v.Args[0] 17206 y := v.Args[1] 17207 v.reset(OpAMD64ANDL) 17208 v0 := b.NewValue0(v.Line, OpAMD64SHLL, t) 17209 v0.AddArg(x) 17210 v0.AddArg(y) 17211 v.AddArg(v0) 17212 v1 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, t) 17213 v2 := b.NewValue0(v.Line, OpAMD64CMPQconst, TypeFlags) 17214 v2.AuxInt = 32 17215 v2.AddArg(y) 17216 v1.AddArg(v2) 17217 v.AddArg(v1) 17218 return true 17219 } 17220 } 17221 func rewriteValueAMD64_OpLsh32x8(v *Value, config *Config) bool { 17222 b := v.Block 17223 _ = b 17224 // match: (Lsh32x8 <t> x y) 17225 // cond: 17226 // result: (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPBconst y [32]))) 17227 for { 17228 t := v.Type 17229 x := v.Args[0] 17230 y := v.Args[1] 17231 v.reset(OpAMD64ANDL) 17232 v0 := b.NewValue0(v.Line, OpAMD64SHLL, t) 17233 v0.AddArg(x) 17234 v0.AddArg(y) 17235 v.AddArg(v0) 17236 v1 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, t) 17237 v2 := b.NewValue0(v.Line, OpAMD64CMPBconst, TypeFlags) 17238 v2.AuxInt = 32 17239 v2.AddArg(y) 17240 v1.AddArg(v2) 17241 v.AddArg(v1) 17242 return true 17243 } 17244 } 17245 func rewriteValueAMD64_OpLsh64x16(v *Value, config *Config) bool { 17246 b := v.Block 17247 _ = b 17248 // match: (Lsh64x16 <t> x y) 17249 // cond: 17250 // result: (ANDQ (SHLQ <t> x y) (SBBQcarrymask <t> (CMPWconst y [64]))) 17251 for { 17252 t := v.Type 17253 x := v.Args[0] 17254 y := v.Args[1] 17255 v.reset(OpAMD64ANDQ) 17256 v0 := b.NewValue0(v.Line, OpAMD64SHLQ, t) 17257 v0.AddArg(x) 17258 v0.AddArg(y) 17259 v.AddArg(v0) 17260 v1 := b.NewValue0(v.Line, OpAMD64SBBQcarrymask, t) 17261 v2 := b.NewValue0(v.Line, OpAMD64CMPWconst, TypeFlags) 17262 v2.AuxInt = 64 17263 v2.AddArg(y) 17264 v1.AddArg(v2) 17265 v.AddArg(v1) 17266 return true 17267 } 17268 } 17269 func rewriteValueAMD64_OpLsh64x32(v *Value, config *Config) bool { 17270 b := v.Block 17271 _ = b 17272 // match: (Lsh64x32 <t> x y) 17273 // cond: 17274 // result: (ANDQ (SHLQ <t> x y) (SBBQcarrymask <t> (CMPLconst y [64]))) 17275 for { 17276 t := v.Type 17277 x := v.Args[0] 17278 y := v.Args[1] 17279 v.reset(OpAMD64ANDQ) 17280 v0 := b.NewValue0(v.Line, OpAMD64SHLQ, t) 17281 v0.AddArg(x) 17282 v0.AddArg(y) 17283 v.AddArg(v0) 17284 v1 := b.NewValue0(v.Line, OpAMD64SBBQcarrymask, t) 17285 v2 := b.NewValue0(v.Line, OpAMD64CMPLconst, TypeFlags) 17286 v2.AuxInt = 64 17287 v2.AddArg(y) 17288 v1.AddArg(v2) 17289 v.AddArg(v1) 17290 return true 17291 } 17292 } 17293 func rewriteValueAMD64_OpLsh64x64(v *Value, config *Config) bool { 17294 b := v.Block 17295 _ = b 17296 // match: (Lsh64x64 <t> x y) 17297 // cond: 17298 // result: (ANDQ (SHLQ <t> x y) (SBBQcarrymask <t> (CMPQconst y [64]))) 17299 for { 17300 t := v.Type 17301 x := v.Args[0] 17302 y := v.Args[1] 17303 v.reset(OpAMD64ANDQ) 17304 v0 := b.NewValue0(v.Line, OpAMD64SHLQ, t) 17305 v0.AddArg(x) 17306 v0.AddArg(y) 17307 v.AddArg(v0) 17308 v1 := b.NewValue0(v.Line, OpAMD64SBBQcarrymask, t) 17309 v2 := b.NewValue0(v.Line, OpAMD64CMPQconst, TypeFlags) 17310 v2.AuxInt = 64 17311 v2.AddArg(y) 17312 v1.AddArg(v2) 17313 v.AddArg(v1) 17314 return true 17315 } 17316 } 17317 func rewriteValueAMD64_OpLsh64x8(v *Value, config *Config) bool { 17318 b := v.Block 17319 _ = b 17320 // match: (Lsh64x8 <t> x y) 17321 // cond: 17322 // result: (ANDQ (SHLQ <t> x y) (SBBQcarrymask <t> (CMPBconst y [64]))) 17323 for { 17324 t := v.Type 17325 x := v.Args[0] 17326 y := v.Args[1] 17327 v.reset(OpAMD64ANDQ) 17328 v0 := b.NewValue0(v.Line, OpAMD64SHLQ, t) 17329 v0.AddArg(x) 17330 v0.AddArg(y) 17331 v.AddArg(v0) 17332 v1 := b.NewValue0(v.Line, OpAMD64SBBQcarrymask, t) 17333 v2 := b.NewValue0(v.Line, OpAMD64CMPBconst, TypeFlags) 17334 v2.AuxInt = 64 17335 v2.AddArg(y) 17336 v1.AddArg(v2) 17337 v.AddArg(v1) 17338 return true 17339 } 17340 } 17341 func rewriteValueAMD64_OpLsh8x16(v *Value, config *Config) bool { 17342 b := v.Block 17343 _ = b 17344 // match: (Lsh8x16 <t> x y) 17345 // cond: 17346 // result: (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPWconst y [32]))) 17347 for { 17348 t := v.Type 17349 x := v.Args[0] 17350 y := v.Args[1] 17351 v.reset(OpAMD64ANDL) 17352 v0 := b.NewValue0(v.Line, OpAMD64SHLL, t) 17353 v0.AddArg(x) 17354 v0.AddArg(y) 17355 v.AddArg(v0) 17356 v1 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, t) 17357 v2 := b.NewValue0(v.Line, OpAMD64CMPWconst, TypeFlags) 17358 v2.AuxInt = 32 17359 v2.AddArg(y) 17360 v1.AddArg(v2) 17361 v.AddArg(v1) 17362 return true 17363 } 17364 } 17365 func rewriteValueAMD64_OpLsh8x32(v *Value, config *Config) bool { 17366 b := v.Block 17367 _ = b 17368 // match: (Lsh8x32 <t> x y) 17369 // cond: 17370 // result: (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPLconst y [32]))) 17371 for { 17372 t := v.Type 17373 x := v.Args[0] 17374 y := v.Args[1] 17375 v.reset(OpAMD64ANDL) 17376 v0 := b.NewValue0(v.Line, OpAMD64SHLL, t) 17377 v0.AddArg(x) 17378 v0.AddArg(y) 17379 v.AddArg(v0) 17380 v1 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, t) 17381 v2 := b.NewValue0(v.Line, OpAMD64CMPLconst, TypeFlags) 17382 v2.AuxInt = 32 17383 v2.AddArg(y) 17384 v1.AddArg(v2) 17385 v.AddArg(v1) 17386 return true 17387 } 17388 } 17389 func rewriteValueAMD64_OpLsh8x64(v *Value, config *Config) bool { 17390 b := v.Block 17391 _ = b 17392 // match: (Lsh8x64 <t> x y) 17393 // cond: 17394 // result: (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPQconst y [32]))) 17395 for { 17396 t := v.Type 17397 x := v.Args[0] 17398 y := v.Args[1] 17399 v.reset(OpAMD64ANDL) 17400 v0 := b.NewValue0(v.Line, OpAMD64SHLL, t) 17401 v0.AddArg(x) 17402 v0.AddArg(y) 17403 v.AddArg(v0) 17404 v1 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, t) 17405 v2 := b.NewValue0(v.Line, OpAMD64CMPQconst, TypeFlags) 17406 v2.AuxInt = 32 17407 v2.AddArg(y) 17408 v1.AddArg(v2) 17409 v.AddArg(v1) 17410 return true 17411 } 17412 } 17413 func rewriteValueAMD64_OpLsh8x8(v *Value, config *Config) bool { 17414 b := v.Block 17415 _ = b 17416 // match: (Lsh8x8 <t> x y) 17417 // cond: 17418 // result: (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPBconst y [32]))) 17419 for { 17420 t := v.Type 17421 x := v.Args[0] 17422 y := v.Args[1] 17423 v.reset(OpAMD64ANDL) 17424 v0 := b.NewValue0(v.Line, OpAMD64SHLL, t) 17425 v0.AddArg(x) 17426 v0.AddArg(y) 17427 v.AddArg(v0) 17428 v1 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, t) 17429 v2 := b.NewValue0(v.Line, OpAMD64CMPBconst, TypeFlags) 17430 v2.AuxInt = 32 17431 v2.AddArg(y) 17432 v1.AddArg(v2) 17433 v.AddArg(v1) 17434 return true 17435 } 17436 } 17437 func rewriteValueAMD64_OpMod16(v *Value, config *Config) bool { 17438 b := v.Block 17439 _ = b 17440 // match: (Mod16 x y) 17441 // cond: 17442 // result: (Select1 (DIVW x y)) 17443 for { 17444 x := v.Args[0] 17445 y := v.Args[1] 17446 v.reset(OpSelect1) 17447 v0 := b.NewValue0(v.Line, OpAMD64DIVW, MakeTuple(config.fe.TypeInt16(), config.fe.TypeInt16())) 17448 v0.AddArg(x) 17449 v0.AddArg(y) 17450 v.AddArg(v0) 17451 return true 17452 } 17453 } 17454 func rewriteValueAMD64_OpMod16u(v *Value, config *Config) bool { 17455 b := v.Block 17456 _ = b 17457 // match: (Mod16u x y) 17458 // cond: 17459 // result: (Select1 (DIVWU x y)) 17460 for { 17461 x := v.Args[0] 17462 y := v.Args[1] 17463 v.reset(OpSelect1) 17464 v0 := b.NewValue0(v.Line, OpAMD64DIVWU, MakeTuple(config.fe.TypeUInt16(), config.fe.TypeUInt16())) 17465 v0.AddArg(x) 17466 v0.AddArg(y) 17467 v.AddArg(v0) 17468 return true 17469 } 17470 } 17471 func rewriteValueAMD64_OpMod32(v *Value, config *Config) bool { 17472 b := v.Block 17473 _ = b 17474 // match: (Mod32 x y) 17475 // cond: 17476 // result: (Select1 (DIVL x y)) 17477 for { 17478 x := v.Args[0] 17479 y := v.Args[1] 17480 v.reset(OpSelect1) 17481 v0 := b.NewValue0(v.Line, OpAMD64DIVL, MakeTuple(config.fe.TypeInt32(), config.fe.TypeInt32())) 17482 v0.AddArg(x) 17483 v0.AddArg(y) 17484 v.AddArg(v0) 17485 return true 17486 } 17487 } 17488 func rewriteValueAMD64_OpMod32u(v *Value, config *Config) bool { 17489 b := v.Block 17490 _ = b 17491 // match: (Mod32u x y) 17492 // cond: 17493 // result: (Select1 (DIVLU x y)) 17494 for { 17495 x := v.Args[0] 17496 y := v.Args[1] 17497 v.reset(OpSelect1) 17498 v0 := b.NewValue0(v.Line, OpAMD64DIVLU, MakeTuple(config.fe.TypeUInt32(), config.fe.TypeUInt32())) 17499 v0.AddArg(x) 17500 v0.AddArg(y) 17501 v.AddArg(v0) 17502 return true 17503 } 17504 } 17505 func rewriteValueAMD64_OpMod64(v *Value, config *Config) bool { 17506 b := v.Block 17507 _ = b 17508 // match: (Mod64 x y) 17509 // cond: 17510 // result: (Select1 (DIVQ x y)) 17511 for { 17512 x := v.Args[0] 17513 y := v.Args[1] 17514 v.reset(OpSelect1) 17515 v0 := b.NewValue0(v.Line, OpAMD64DIVQ, MakeTuple(config.fe.TypeInt64(), config.fe.TypeInt64())) 17516 v0.AddArg(x) 17517 v0.AddArg(y) 17518 v.AddArg(v0) 17519 return true 17520 } 17521 } 17522 func rewriteValueAMD64_OpMod64u(v *Value, config *Config) bool { 17523 b := v.Block 17524 _ = b 17525 // match: (Mod64u x y) 17526 // cond: 17527 // result: (Select1 (DIVQU x y)) 17528 for { 17529 x := v.Args[0] 17530 y := v.Args[1] 17531 v.reset(OpSelect1) 17532 v0 := b.NewValue0(v.Line, OpAMD64DIVQU, MakeTuple(config.fe.TypeUInt64(), config.fe.TypeUInt64())) 17533 v0.AddArg(x) 17534 v0.AddArg(y) 17535 v.AddArg(v0) 17536 return true 17537 } 17538 } 17539 func rewriteValueAMD64_OpMod8(v *Value, config *Config) bool { 17540 b := v.Block 17541 _ = b 17542 // match: (Mod8 x y) 17543 // cond: 17544 // result: (Select1 (DIVW (SignExt8to16 x) (SignExt8to16 y))) 17545 for { 17546 x := v.Args[0] 17547 y := v.Args[1] 17548 v.reset(OpSelect1) 17549 v0 := b.NewValue0(v.Line, OpAMD64DIVW, MakeTuple(config.fe.TypeInt16(), config.fe.TypeInt16())) 17550 v1 := b.NewValue0(v.Line, OpSignExt8to16, config.fe.TypeInt16()) 17551 v1.AddArg(x) 17552 v0.AddArg(v1) 17553 v2 := b.NewValue0(v.Line, OpSignExt8to16, config.fe.TypeInt16()) 17554 v2.AddArg(y) 17555 v0.AddArg(v2) 17556 v.AddArg(v0) 17557 return true 17558 } 17559 } 17560 func rewriteValueAMD64_OpMod8u(v *Value, config *Config) bool { 17561 b := v.Block 17562 _ = b 17563 // match: (Mod8u x y) 17564 // cond: 17565 // result: (Select1 (DIVWU (ZeroExt8to16 x) (ZeroExt8to16 y))) 17566 for { 17567 x := v.Args[0] 17568 y := v.Args[1] 17569 v.reset(OpSelect1) 17570 v0 := b.NewValue0(v.Line, OpAMD64DIVWU, MakeTuple(config.fe.TypeUInt16(), config.fe.TypeUInt16())) 17571 v1 := b.NewValue0(v.Line, OpZeroExt8to16, config.fe.TypeUInt16()) 17572 v1.AddArg(x) 17573 v0.AddArg(v1) 17574 v2 := b.NewValue0(v.Line, OpZeroExt8to16, config.fe.TypeUInt16()) 17575 v2.AddArg(y) 17576 v0.AddArg(v2) 17577 v.AddArg(v0) 17578 return true 17579 } 17580 } 17581 func rewriteValueAMD64_OpMove(v *Value, config *Config) bool { 17582 b := v.Block 17583 _ = b 17584 // match: (Move [s] _ _ mem) 17585 // cond: SizeAndAlign(s).Size() == 0 17586 // result: mem 17587 for { 17588 s := v.AuxInt 17589 mem := v.Args[2] 17590 if !(SizeAndAlign(s).Size() == 0) { 17591 break 17592 } 17593 v.reset(OpCopy) 17594 v.Type = mem.Type 17595 v.AddArg(mem) 17596 return true 17597 } 17598 // match: (Move [s] dst src mem) 17599 // cond: SizeAndAlign(s).Size() == 1 17600 // result: (MOVBstore dst (MOVBload src mem) mem) 17601 for { 17602 s := v.AuxInt 17603 dst := v.Args[0] 17604 src := v.Args[1] 17605 mem := v.Args[2] 17606 if !(SizeAndAlign(s).Size() == 1) { 17607 break 17608 } 17609 v.reset(OpAMD64MOVBstore) 17610 v.AddArg(dst) 17611 v0 := b.NewValue0(v.Line, OpAMD64MOVBload, config.fe.TypeUInt8()) 17612 v0.AddArg(src) 17613 v0.AddArg(mem) 17614 v.AddArg(v0) 17615 v.AddArg(mem) 17616 return true 17617 } 17618 // match: (Move [s] dst src mem) 17619 // cond: SizeAndAlign(s).Size() == 2 17620 // result: (MOVWstore dst (MOVWload src mem) mem) 17621 for { 17622 s := v.AuxInt 17623 dst := v.Args[0] 17624 src := v.Args[1] 17625 mem := v.Args[2] 17626 if !(SizeAndAlign(s).Size() == 2) { 17627 break 17628 } 17629 v.reset(OpAMD64MOVWstore) 17630 v.AddArg(dst) 17631 v0 := b.NewValue0(v.Line, OpAMD64MOVWload, config.fe.TypeUInt16()) 17632 v0.AddArg(src) 17633 v0.AddArg(mem) 17634 v.AddArg(v0) 17635 v.AddArg(mem) 17636 return true 17637 } 17638 // match: (Move [s] dst src mem) 17639 // cond: SizeAndAlign(s).Size() == 4 17640 // result: (MOVLstore dst (MOVLload src mem) mem) 17641 for { 17642 s := v.AuxInt 17643 dst := v.Args[0] 17644 src := v.Args[1] 17645 mem := v.Args[2] 17646 if !(SizeAndAlign(s).Size() == 4) { 17647 break 17648 } 17649 v.reset(OpAMD64MOVLstore) 17650 v.AddArg(dst) 17651 v0 := b.NewValue0(v.Line, OpAMD64MOVLload, config.fe.TypeUInt32()) 17652 v0.AddArg(src) 17653 v0.AddArg(mem) 17654 v.AddArg(v0) 17655 v.AddArg(mem) 17656 return true 17657 } 17658 // match: (Move [s] dst src mem) 17659 // cond: SizeAndAlign(s).Size() == 8 17660 // result: (MOVQstore dst (MOVQload src mem) mem) 17661 for { 17662 s := v.AuxInt 17663 dst := v.Args[0] 17664 src := v.Args[1] 17665 mem := v.Args[2] 17666 if !(SizeAndAlign(s).Size() == 8) { 17667 break 17668 } 17669 v.reset(OpAMD64MOVQstore) 17670 v.AddArg(dst) 17671 v0 := b.NewValue0(v.Line, OpAMD64MOVQload, config.fe.TypeUInt64()) 17672 v0.AddArg(src) 17673 v0.AddArg(mem) 17674 v.AddArg(v0) 17675 v.AddArg(mem) 17676 return true 17677 } 17678 // match: (Move [s] dst src mem) 17679 // cond: SizeAndAlign(s).Size() == 16 17680 // result: (MOVOstore dst (MOVOload src mem) mem) 17681 for { 17682 s := v.AuxInt 17683 dst := v.Args[0] 17684 src := v.Args[1] 17685 mem := v.Args[2] 17686 if !(SizeAndAlign(s).Size() == 16) { 17687 break 17688 } 17689 v.reset(OpAMD64MOVOstore) 17690 v.AddArg(dst) 17691 v0 := b.NewValue0(v.Line, OpAMD64MOVOload, TypeInt128) 17692 v0.AddArg(src) 17693 v0.AddArg(mem) 17694 v.AddArg(v0) 17695 v.AddArg(mem) 17696 return true 17697 } 17698 // match: (Move [s] dst src mem) 17699 // cond: SizeAndAlign(s).Size() == 3 17700 // result: (MOVBstore [2] dst (MOVBload [2] src mem) (MOVWstore dst (MOVWload src mem) mem)) 17701 for { 17702 s := v.AuxInt 17703 dst := v.Args[0] 17704 src := v.Args[1] 17705 mem := v.Args[2] 17706 if !(SizeAndAlign(s).Size() == 3) { 17707 break 17708 } 17709 v.reset(OpAMD64MOVBstore) 17710 v.AuxInt = 2 17711 v.AddArg(dst) 17712 v0 := b.NewValue0(v.Line, OpAMD64MOVBload, config.fe.TypeUInt8()) 17713 v0.AuxInt = 2 17714 v0.AddArg(src) 17715 v0.AddArg(mem) 17716 v.AddArg(v0) 17717 v1 := b.NewValue0(v.Line, OpAMD64MOVWstore, TypeMem) 17718 v1.AddArg(dst) 17719 v2 := b.NewValue0(v.Line, OpAMD64MOVWload, config.fe.TypeUInt16()) 17720 v2.AddArg(src) 17721 v2.AddArg(mem) 17722 v1.AddArg(v2) 17723 v1.AddArg(mem) 17724 v.AddArg(v1) 17725 return true 17726 } 17727 // match: (Move [s] dst src mem) 17728 // cond: SizeAndAlign(s).Size() == 5 17729 // result: (MOVBstore [4] dst (MOVBload [4] src mem) (MOVLstore dst (MOVLload src mem) mem)) 17730 for { 17731 s := v.AuxInt 17732 dst := v.Args[0] 17733 src := v.Args[1] 17734 mem := v.Args[2] 17735 if !(SizeAndAlign(s).Size() == 5) { 17736 break 17737 } 17738 v.reset(OpAMD64MOVBstore) 17739 v.AuxInt = 4 17740 v.AddArg(dst) 17741 v0 := b.NewValue0(v.Line, OpAMD64MOVBload, config.fe.TypeUInt8()) 17742 v0.AuxInt = 4 17743 v0.AddArg(src) 17744 v0.AddArg(mem) 17745 v.AddArg(v0) 17746 v1 := b.NewValue0(v.Line, OpAMD64MOVLstore, TypeMem) 17747 v1.AddArg(dst) 17748 v2 := b.NewValue0(v.Line, OpAMD64MOVLload, config.fe.TypeUInt32()) 17749 v2.AddArg(src) 17750 v2.AddArg(mem) 17751 v1.AddArg(v2) 17752 v1.AddArg(mem) 17753 v.AddArg(v1) 17754 return true 17755 } 17756 // match: (Move [s] dst src mem) 17757 // cond: SizeAndAlign(s).Size() == 6 17758 // result: (MOVWstore [4] dst (MOVWload [4] src mem) (MOVLstore dst (MOVLload src mem) mem)) 17759 for { 17760 s := v.AuxInt 17761 dst := v.Args[0] 17762 src := v.Args[1] 17763 mem := v.Args[2] 17764 if !(SizeAndAlign(s).Size() == 6) { 17765 break 17766 } 17767 v.reset(OpAMD64MOVWstore) 17768 v.AuxInt = 4 17769 v.AddArg(dst) 17770 v0 := b.NewValue0(v.Line, OpAMD64MOVWload, config.fe.TypeUInt16()) 17771 v0.AuxInt = 4 17772 v0.AddArg(src) 17773 v0.AddArg(mem) 17774 v.AddArg(v0) 17775 v1 := b.NewValue0(v.Line, OpAMD64MOVLstore, TypeMem) 17776 v1.AddArg(dst) 17777 v2 := b.NewValue0(v.Line, OpAMD64MOVLload, config.fe.TypeUInt32()) 17778 v2.AddArg(src) 17779 v2.AddArg(mem) 17780 v1.AddArg(v2) 17781 v1.AddArg(mem) 17782 v.AddArg(v1) 17783 return true 17784 } 17785 // match: (Move [s] dst src mem) 17786 // cond: SizeAndAlign(s).Size() == 7 17787 // result: (MOVLstore [3] dst (MOVLload [3] src mem) (MOVLstore dst (MOVLload src mem) mem)) 17788 for { 17789 s := v.AuxInt 17790 dst := v.Args[0] 17791 src := v.Args[1] 17792 mem := v.Args[2] 17793 if !(SizeAndAlign(s).Size() == 7) { 17794 break 17795 } 17796 v.reset(OpAMD64MOVLstore) 17797 v.AuxInt = 3 17798 v.AddArg(dst) 17799 v0 := b.NewValue0(v.Line, OpAMD64MOVLload, config.fe.TypeUInt32()) 17800 v0.AuxInt = 3 17801 v0.AddArg(src) 17802 v0.AddArg(mem) 17803 v.AddArg(v0) 17804 v1 := b.NewValue0(v.Line, OpAMD64MOVLstore, TypeMem) 17805 v1.AddArg(dst) 17806 v2 := b.NewValue0(v.Line, OpAMD64MOVLload, config.fe.TypeUInt32()) 17807 v2.AddArg(src) 17808 v2.AddArg(mem) 17809 v1.AddArg(v2) 17810 v1.AddArg(mem) 17811 v.AddArg(v1) 17812 return true 17813 } 17814 // match: (Move [s] dst src mem) 17815 // cond: SizeAndAlign(s).Size() > 8 && SizeAndAlign(s).Size() < 16 17816 // result: (MOVQstore [SizeAndAlign(s).Size()-8] dst (MOVQload [SizeAndAlign(s).Size()-8] src mem) (MOVQstore dst (MOVQload src mem) mem)) 17817 for { 17818 s := v.AuxInt 17819 dst := v.Args[0] 17820 src := v.Args[1] 17821 mem := v.Args[2] 17822 if !(SizeAndAlign(s).Size() > 8 && SizeAndAlign(s).Size() < 16) { 17823 break 17824 } 17825 v.reset(OpAMD64MOVQstore) 17826 v.AuxInt = SizeAndAlign(s).Size() - 8 17827 v.AddArg(dst) 17828 v0 := b.NewValue0(v.Line, OpAMD64MOVQload, config.fe.TypeUInt64()) 17829 v0.AuxInt = SizeAndAlign(s).Size() - 8 17830 v0.AddArg(src) 17831 v0.AddArg(mem) 17832 v.AddArg(v0) 17833 v1 := b.NewValue0(v.Line, OpAMD64MOVQstore, TypeMem) 17834 v1.AddArg(dst) 17835 v2 := b.NewValue0(v.Line, OpAMD64MOVQload, config.fe.TypeUInt64()) 17836 v2.AddArg(src) 17837 v2.AddArg(mem) 17838 v1.AddArg(v2) 17839 v1.AddArg(mem) 17840 v.AddArg(v1) 17841 return true 17842 } 17843 // match: (Move [s] dst src mem) 17844 // cond: SizeAndAlign(s).Size() > 16 && SizeAndAlign(s).Size()%16 != 0 && SizeAndAlign(s).Size()%16 <= 8 17845 // result: (Move [SizeAndAlign(s).Size()-SizeAndAlign(s).Size()%16] (OffPtr <dst.Type> dst [SizeAndAlign(s).Size()%16]) (OffPtr <src.Type> src [SizeAndAlign(s).Size()%16]) (MOVQstore dst (MOVQload src mem) mem)) 17846 for { 17847 s := v.AuxInt 17848 dst := v.Args[0] 17849 src := v.Args[1] 17850 mem := v.Args[2] 17851 if !(SizeAndAlign(s).Size() > 16 && SizeAndAlign(s).Size()%16 != 0 && SizeAndAlign(s).Size()%16 <= 8) { 17852 break 17853 } 17854 v.reset(OpMove) 17855 v.AuxInt = SizeAndAlign(s).Size() - SizeAndAlign(s).Size()%16 17856 v0 := b.NewValue0(v.Line, OpOffPtr, dst.Type) 17857 v0.AuxInt = SizeAndAlign(s).Size() % 16 17858 v0.AddArg(dst) 17859 v.AddArg(v0) 17860 v1 := b.NewValue0(v.Line, OpOffPtr, src.Type) 17861 v1.AuxInt = SizeAndAlign(s).Size() % 16 17862 v1.AddArg(src) 17863 v.AddArg(v1) 17864 v2 := b.NewValue0(v.Line, OpAMD64MOVQstore, TypeMem) 17865 v2.AddArg(dst) 17866 v3 := b.NewValue0(v.Line, OpAMD64MOVQload, config.fe.TypeUInt64()) 17867 v3.AddArg(src) 17868 v3.AddArg(mem) 17869 v2.AddArg(v3) 17870 v2.AddArg(mem) 17871 v.AddArg(v2) 17872 return true 17873 } 17874 // match: (Move [s] dst src mem) 17875 // cond: SizeAndAlign(s).Size() > 16 && SizeAndAlign(s).Size()%16 != 0 && SizeAndAlign(s).Size()%16 > 8 17876 // result: (Move [SizeAndAlign(s).Size()-SizeAndAlign(s).Size()%16] (OffPtr <dst.Type> dst [SizeAndAlign(s).Size()%16]) (OffPtr <src.Type> src [SizeAndAlign(s).Size()%16]) (MOVOstore dst (MOVOload src mem) mem)) 17877 for { 17878 s := v.AuxInt 17879 dst := v.Args[0] 17880 src := v.Args[1] 17881 mem := v.Args[2] 17882 if !(SizeAndAlign(s).Size() > 16 && SizeAndAlign(s).Size()%16 != 0 && SizeAndAlign(s).Size()%16 > 8) { 17883 break 17884 } 17885 v.reset(OpMove) 17886 v.AuxInt = SizeAndAlign(s).Size() - SizeAndAlign(s).Size()%16 17887 v0 := b.NewValue0(v.Line, OpOffPtr, dst.Type) 17888 v0.AuxInt = SizeAndAlign(s).Size() % 16 17889 v0.AddArg(dst) 17890 v.AddArg(v0) 17891 v1 := b.NewValue0(v.Line, OpOffPtr, src.Type) 17892 v1.AuxInt = SizeAndAlign(s).Size() % 16 17893 v1.AddArg(src) 17894 v.AddArg(v1) 17895 v2 := b.NewValue0(v.Line, OpAMD64MOVOstore, TypeMem) 17896 v2.AddArg(dst) 17897 v3 := b.NewValue0(v.Line, OpAMD64MOVOload, TypeInt128) 17898 v3.AddArg(src) 17899 v3.AddArg(mem) 17900 v2.AddArg(v3) 17901 v2.AddArg(mem) 17902 v.AddArg(v2) 17903 return true 17904 } 17905 // match: (Move [s] dst src mem) 17906 // cond: SizeAndAlign(s).Size() >= 32 && SizeAndAlign(s).Size() <= 16*64 && SizeAndAlign(s).Size()%16 == 0 && !config.noDuffDevice 17907 // result: (DUFFCOPY [14*(64-SizeAndAlign(s).Size()/16)] dst src mem) 17908 for { 17909 s := v.AuxInt 17910 dst := v.Args[0] 17911 src := v.Args[1] 17912 mem := v.Args[2] 17913 if !(SizeAndAlign(s).Size() >= 32 && SizeAndAlign(s).Size() <= 16*64 && SizeAndAlign(s).Size()%16 == 0 && !config.noDuffDevice) { 17914 break 17915 } 17916 v.reset(OpAMD64DUFFCOPY) 17917 v.AuxInt = 14 * (64 - SizeAndAlign(s).Size()/16) 17918 v.AddArg(dst) 17919 v.AddArg(src) 17920 v.AddArg(mem) 17921 return true 17922 } 17923 // match: (Move [s] dst src mem) 17924 // cond: (SizeAndAlign(s).Size() > 16*64 || config.noDuffDevice) && SizeAndAlign(s).Size()%8 == 0 17925 // result: (REPMOVSQ dst src (MOVQconst [SizeAndAlign(s).Size()/8]) mem) 17926 for { 17927 s := v.AuxInt 17928 dst := v.Args[0] 17929 src := v.Args[1] 17930 mem := v.Args[2] 17931 if !((SizeAndAlign(s).Size() > 16*64 || config.noDuffDevice) && SizeAndAlign(s).Size()%8 == 0) { 17932 break 17933 } 17934 v.reset(OpAMD64REPMOVSQ) 17935 v.AddArg(dst) 17936 v.AddArg(src) 17937 v0 := b.NewValue0(v.Line, OpAMD64MOVQconst, config.fe.TypeUInt64()) 17938 v0.AuxInt = SizeAndAlign(s).Size() / 8 17939 v.AddArg(v0) 17940 v.AddArg(mem) 17941 return true 17942 } 17943 return false 17944 } 17945 func rewriteValueAMD64_OpMul16(v *Value, config *Config) bool { 17946 b := v.Block 17947 _ = b 17948 // match: (Mul16 x y) 17949 // cond: 17950 // result: (MULL x y) 17951 for { 17952 x := v.Args[0] 17953 y := v.Args[1] 17954 v.reset(OpAMD64MULL) 17955 v.AddArg(x) 17956 v.AddArg(y) 17957 return true 17958 } 17959 } 17960 func rewriteValueAMD64_OpMul32(v *Value, config *Config) bool { 17961 b := v.Block 17962 _ = b 17963 // match: (Mul32 x y) 17964 // cond: 17965 // result: (MULL x y) 17966 for { 17967 x := v.Args[0] 17968 y := v.Args[1] 17969 v.reset(OpAMD64MULL) 17970 v.AddArg(x) 17971 v.AddArg(y) 17972 return true 17973 } 17974 } 17975 func rewriteValueAMD64_OpMul32F(v *Value, config *Config) bool { 17976 b := v.Block 17977 _ = b 17978 // match: (Mul32F x y) 17979 // cond: 17980 // result: (MULSS x y) 17981 for { 17982 x := v.Args[0] 17983 y := v.Args[1] 17984 v.reset(OpAMD64MULSS) 17985 v.AddArg(x) 17986 v.AddArg(y) 17987 return true 17988 } 17989 } 17990 func rewriteValueAMD64_OpMul64(v *Value, config *Config) bool { 17991 b := v.Block 17992 _ = b 17993 // match: (Mul64 x y) 17994 // cond: 17995 // result: (MULQ x y) 17996 for { 17997 x := v.Args[0] 17998 y := v.Args[1] 17999 v.reset(OpAMD64MULQ) 18000 v.AddArg(x) 18001 v.AddArg(y) 18002 return true 18003 } 18004 } 18005 func rewriteValueAMD64_OpMul64F(v *Value, config *Config) bool { 18006 b := v.Block 18007 _ = b 18008 // match: (Mul64F x y) 18009 // cond: 18010 // result: (MULSD x y) 18011 for { 18012 x := v.Args[0] 18013 y := v.Args[1] 18014 v.reset(OpAMD64MULSD) 18015 v.AddArg(x) 18016 v.AddArg(y) 18017 return true 18018 } 18019 } 18020 func rewriteValueAMD64_OpMul64uhilo(v *Value, config *Config) bool { 18021 b := v.Block 18022 _ = b 18023 // match: (Mul64uhilo x y) 18024 // cond: 18025 // result: (MULQU2 x y) 18026 for { 18027 x := v.Args[0] 18028 y := v.Args[1] 18029 v.reset(OpAMD64MULQU2) 18030 v.AddArg(x) 18031 v.AddArg(y) 18032 return true 18033 } 18034 } 18035 func rewriteValueAMD64_OpMul8(v *Value, config *Config) bool { 18036 b := v.Block 18037 _ = b 18038 // match: (Mul8 x y) 18039 // cond: 18040 // result: (MULL x y) 18041 for { 18042 x := v.Args[0] 18043 y := v.Args[1] 18044 v.reset(OpAMD64MULL) 18045 v.AddArg(x) 18046 v.AddArg(y) 18047 return true 18048 } 18049 } 18050 func rewriteValueAMD64_OpNeg16(v *Value, config *Config) bool { 18051 b := v.Block 18052 _ = b 18053 // match: (Neg16 x) 18054 // cond: 18055 // result: (NEGL x) 18056 for { 18057 x := v.Args[0] 18058 v.reset(OpAMD64NEGL) 18059 v.AddArg(x) 18060 return true 18061 } 18062 } 18063 func rewriteValueAMD64_OpNeg32(v *Value, config *Config) bool { 18064 b := v.Block 18065 _ = b 18066 // match: (Neg32 x) 18067 // cond: 18068 // result: (NEGL x) 18069 for { 18070 x := v.Args[0] 18071 v.reset(OpAMD64NEGL) 18072 v.AddArg(x) 18073 return true 18074 } 18075 } 18076 func rewriteValueAMD64_OpNeg32F(v *Value, config *Config) bool { 18077 b := v.Block 18078 _ = b 18079 // match: (Neg32F x) 18080 // cond: 18081 // result: (PXOR x (MOVSSconst <config.Frontend().TypeFloat32()> [f2i(math.Copysign(0, -1))])) 18082 for { 18083 x := v.Args[0] 18084 v.reset(OpAMD64PXOR) 18085 v.AddArg(x) 18086 v0 := b.NewValue0(v.Line, OpAMD64MOVSSconst, config.Frontend().TypeFloat32()) 18087 v0.AuxInt = f2i(math.Copysign(0, -1)) 18088 v.AddArg(v0) 18089 return true 18090 } 18091 } 18092 func rewriteValueAMD64_OpNeg64(v *Value, config *Config) bool { 18093 b := v.Block 18094 _ = b 18095 // match: (Neg64 x) 18096 // cond: 18097 // result: (NEGQ x) 18098 for { 18099 x := v.Args[0] 18100 v.reset(OpAMD64NEGQ) 18101 v.AddArg(x) 18102 return true 18103 } 18104 } 18105 func rewriteValueAMD64_OpNeg64F(v *Value, config *Config) bool { 18106 b := v.Block 18107 _ = b 18108 // match: (Neg64F x) 18109 // cond: 18110 // result: (PXOR x (MOVSDconst <config.Frontend().TypeFloat64()> [f2i(math.Copysign(0, -1))])) 18111 for { 18112 x := v.Args[0] 18113 v.reset(OpAMD64PXOR) 18114 v.AddArg(x) 18115 v0 := b.NewValue0(v.Line, OpAMD64MOVSDconst, config.Frontend().TypeFloat64()) 18116 v0.AuxInt = f2i(math.Copysign(0, -1)) 18117 v.AddArg(v0) 18118 return true 18119 } 18120 } 18121 func rewriteValueAMD64_OpNeg8(v *Value, config *Config) bool { 18122 b := v.Block 18123 _ = b 18124 // match: (Neg8 x) 18125 // cond: 18126 // result: (NEGL x) 18127 for { 18128 x := v.Args[0] 18129 v.reset(OpAMD64NEGL) 18130 v.AddArg(x) 18131 return true 18132 } 18133 } 18134 func rewriteValueAMD64_OpNeq16(v *Value, config *Config) bool { 18135 b := v.Block 18136 _ = b 18137 // match: (Neq16 x y) 18138 // cond: 18139 // result: (SETNE (CMPW x y)) 18140 for { 18141 x := v.Args[0] 18142 y := v.Args[1] 18143 v.reset(OpAMD64SETNE) 18144 v0 := b.NewValue0(v.Line, OpAMD64CMPW, TypeFlags) 18145 v0.AddArg(x) 18146 v0.AddArg(y) 18147 v.AddArg(v0) 18148 return true 18149 } 18150 } 18151 func rewriteValueAMD64_OpNeq32(v *Value, config *Config) bool { 18152 b := v.Block 18153 _ = b 18154 // match: (Neq32 x y) 18155 // cond: 18156 // result: (SETNE (CMPL x y)) 18157 for { 18158 x := v.Args[0] 18159 y := v.Args[1] 18160 v.reset(OpAMD64SETNE) 18161 v0 := b.NewValue0(v.Line, OpAMD64CMPL, TypeFlags) 18162 v0.AddArg(x) 18163 v0.AddArg(y) 18164 v.AddArg(v0) 18165 return true 18166 } 18167 } 18168 func rewriteValueAMD64_OpNeq32F(v *Value, config *Config) bool { 18169 b := v.Block 18170 _ = b 18171 // match: (Neq32F x y) 18172 // cond: 18173 // result: (SETNEF (UCOMISS x y)) 18174 for { 18175 x := v.Args[0] 18176 y := v.Args[1] 18177 v.reset(OpAMD64SETNEF) 18178 v0 := b.NewValue0(v.Line, OpAMD64UCOMISS, TypeFlags) 18179 v0.AddArg(x) 18180 v0.AddArg(y) 18181 v.AddArg(v0) 18182 return true 18183 } 18184 } 18185 func rewriteValueAMD64_OpNeq64(v *Value, config *Config) bool { 18186 b := v.Block 18187 _ = b 18188 // match: (Neq64 x y) 18189 // cond: 18190 // result: (SETNE (CMPQ x y)) 18191 for { 18192 x := v.Args[0] 18193 y := v.Args[1] 18194 v.reset(OpAMD64SETNE) 18195 v0 := b.NewValue0(v.Line, OpAMD64CMPQ, TypeFlags) 18196 v0.AddArg(x) 18197 v0.AddArg(y) 18198 v.AddArg(v0) 18199 return true 18200 } 18201 } 18202 func rewriteValueAMD64_OpNeq64F(v *Value, config *Config) bool { 18203 b := v.Block 18204 _ = b 18205 // match: (Neq64F x y) 18206 // cond: 18207 // result: (SETNEF (UCOMISD x y)) 18208 for { 18209 x := v.Args[0] 18210 y := v.Args[1] 18211 v.reset(OpAMD64SETNEF) 18212 v0 := b.NewValue0(v.Line, OpAMD64UCOMISD, TypeFlags) 18213 v0.AddArg(x) 18214 v0.AddArg(y) 18215 v.AddArg(v0) 18216 return true 18217 } 18218 } 18219 func rewriteValueAMD64_OpNeq8(v *Value, config *Config) bool { 18220 b := v.Block 18221 _ = b 18222 // match: (Neq8 x y) 18223 // cond: 18224 // result: (SETNE (CMPB x y)) 18225 for { 18226 x := v.Args[0] 18227 y := v.Args[1] 18228 v.reset(OpAMD64SETNE) 18229 v0 := b.NewValue0(v.Line, OpAMD64CMPB, TypeFlags) 18230 v0.AddArg(x) 18231 v0.AddArg(y) 18232 v.AddArg(v0) 18233 return true 18234 } 18235 } 18236 func rewriteValueAMD64_OpNeqB(v *Value, config *Config) bool { 18237 b := v.Block 18238 _ = b 18239 // match: (NeqB x y) 18240 // cond: 18241 // result: (SETNE (CMPB x y)) 18242 for { 18243 x := v.Args[0] 18244 y := v.Args[1] 18245 v.reset(OpAMD64SETNE) 18246 v0 := b.NewValue0(v.Line, OpAMD64CMPB, TypeFlags) 18247 v0.AddArg(x) 18248 v0.AddArg(y) 18249 v.AddArg(v0) 18250 return true 18251 } 18252 } 18253 func rewriteValueAMD64_OpNeqPtr(v *Value, config *Config) bool { 18254 b := v.Block 18255 _ = b 18256 // match: (NeqPtr x y) 18257 // cond: config.PtrSize == 8 18258 // result: (SETNE (CMPQ x y)) 18259 for { 18260 x := v.Args[0] 18261 y := v.Args[1] 18262 if !(config.PtrSize == 8) { 18263 break 18264 } 18265 v.reset(OpAMD64SETNE) 18266 v0 := b.NewValue0(v.Line, OpAMD64CMPQ, TypeFlags) 18267 v0.AddArg(x) 18268 v0.AddArg(y) 18269 v.AddArg(v0) 18270 return true 18271 } 18272 // match: (NeqPtr x y) 18273 // cond: config.PtrSize == 4 18274 // result: (SETNE (CMPL x y)) 18275 for { 18276 x := v.Args[0] 18277 y := v.Args[1] 18278 if !(config.PtrSize == 4) { 18279 break 18280 } 18281 v.reset(OpAMD64SETNE) 18282 v0 := b.NewValue0(v.Line, OpAMD64CMPL, TypeFlags) 18283 v0.AddArg(x) 18284 v0.AddArg(y) 18285 v.AddArg(v0) 18286 return true 18287 } 18288 return false 18289 } 18290 func rewriteValueAMD64_OpNilCheck(v *Value, config *Config) bool { 18291 b := v.Block 18292 _ = b 18293 // match: (NilCheck ptr mem) 18294 // cond: 18295 // result: (LoweredNilCheck ptr mem) 18296 for { 18297 ptr := v.Args[0] 18298 mem := v.Args[1] 18299 v.reset(OpAMD64LoweredNilCheck) 18300 v.AddArg(ptr) 18301 v.AddArg(mem) 18302 return true 18303 } 18304 } 18305 func rewriteValueAMD64_OpNot(v *Value, config *Config) bool { 18306 b := v.Block 18307 _ = b 18308 // match: (Not x) 18309 // cond: 18310 // result: (XORLconst [1] x) 18311 for { 18312 x := v.Args[0] 18313 v.reset(OpAMD64XORLconst) 18314 v.AuxInt = 1 18315 v.AddArg(x) 18316 return true 18317 } 18318 } 18319 func rewriteValueAMD64_OpOffPtr(v *Value, config *Config) bool { 18320 b := v.Block 18321 _ = b 18322 // match: (OffPtr [off] ptr) 18323 // cond: config.PtrSize == 8 && is32Bit(off) 18324 // result: (ADDQconst [off] ptr) 18325 for { 18326 off := v.AuxInt 18327 ptr := v.Args[0] 18328 if !(config.PtrSize == 8 && is32Bit(off)) { 18329 break 18330 } 18331 v.reset(OpAMD64ADDQconst) 18332 v.AuxInt = off 18333 v.AddArg(ptr) 18334 return true 18335 } 18336 // match: (OffPtr [off] ptr) 18337 // cond: config.PtrSize == 8 18338 // result: (ADDQ (MOVQconst [off]) ptr) 18339 for { 18340 off := v.AuxInt 18341 ptr := v.Args[0] 18342 if !(config.PtrSize == 8) { 18343 break 18344 } 18345 v.reset(OpAMD64ADDQ) 18346 v0 := b.NewValue0(v.Line, OpAMD64MOVQconst, config.fe.TypeUInt64()) 18347 v0.AuxInt = off 18348 v.AddArg(v0) 18349 v.AddArg(ptr) 18350 return true 18351 } 18352 // match: (OffPtr [off] ptr) 18353 // cond: config.PtrSize == 4 18354 // result: (ADDLconst [off] ptr) 18355 for { 18356 off := v.AuxInt 18357 ptr := v.Args[0] 18358 if !(config.PtrSize == 4) { 18359 break 18360 } 18361 v.reset(OpAMD64ADDLconst) 18362 v.AuxInt = off 18363 v.AddArg(ptr) 18364 return true 18365 } 18366 return false 18367 } 18368 func rewriteValueAMD64_OpOr16(v *Value, config *Config) bool { 18369 b := v.Block 18370 _ = b 18371 // match: (Or16 x y) 18372 // cond: 18373 // result: (ORL x y) 18374 for { 18375 x := v.Args[0] 18376 y := v.Args[1] 18377 v.reset(OpAMD64ORL) 18378 v.AddArg(x) 18379 v.AddArg(y) 18380 return true 18381 } 18382 } 18383 func rewriteValueAMD64_OpOr32(v *Value, config *Config) bool { 18384 b := v.Block 18385 _ = b 18386 // match: (Or32 x y) 18387 // cond: 18388 // result: (ORL x y) 18389 for { 18390 x := v.Args[0] 18391 y := v.Args[1] 18392 v.reset(OpAMD64ORL) 18393 v.AddArg(x) 18394 v.AddArg(y) 18395 return true 18396 } 18397 } 18398 func rewriteValueAMD64_OpOr64(v *Value, config *Config) bool { 18399 b := v.Block 18400 _ = b 18401 // match: (Or64 x y) 18402 // cond: 18403 // result: (ORQ x y) 18404 for { 18405 x := v.Args[0] 18406 y := v.Args[1] 18407 v.reset(OpAMD64ORQ) 18408 v.AddArg(x) 18409 v.AddArg(y) 18410 return true 18411 } 18412 } 18413 func rewriteValueAMD64_OpOr8(v *Value, config *Config) bool { 18414 b := v.Block 18415 _ = b 18416 // match: (Or8 x y) 18417 // cond: 18418 // result: (ORL x y) 18419 for { 18420 x := v.Args[0] 18421 y := v.Args[1] 18422 v.reset(OpAMD64ORL) 18423 v.AddArg(x) 18424 v.AddArg(y) 18425 return true 18426 } 18427 } 18428 func rewriteValueAMD64_OpOrB(v *Value, config *Config) bool { 18429 b := v.Block 18430 _ = b 18431 // match: (OrB x y) 18432 // cond: 18433 // result: (ORL x y) 18434 for { 18435 x := v.Args[0] 18436 y := v.Args[1] 18437 v.reset(OpAMD64ORL) 18438 v.AddArg(x) 18439 v.AddArg(y) 18440 return true 18441 } 18442 } 18443 func rewriteValueAMD64_OpRsh16Ux16(v *Value, config *Config) bool { 18444 b := v.Block 18445 _ = b 18446 // match: (Rsh16Ux16 <t> x y) 18447 // cond: 18448 // result: (ANDL (SHRW <t> x y) (SBBLcarrymask <t> (CMPWconst y [16]))) 18449 for { 18450 t := v.Type 18451 x := v.Args[0] 18452 y := v.Args[1] 18453 v.reset(OpAMD64ANDL) 18454 v0 := b.NewValue0(v.Line, OpAMD64SHRW, t) 18455 v0.AddArg(x) 18456 v0.AddArg(y) 18457 v.AddArg(v0) 18458 v1 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, t) 18459 v2 := b.NewValue0(v.Line, OpAMD64CMPWconst, TypeFlags) 18460 v2.AuxInt = 16 18461 v2.AddArg(y) 18462 v1.AddArg(v2) 18463 v.AddArg(v1) 18464 return true 18465 } 18466 } 18467 func rewriteValueAMD64_OpRsh16Ux32(v *Value, config *Config) bool { 18468 b := v.Block 18469 _ = b 18470 // match: (Rsh16Ux32 <t> x y) 18471 // cond: 18472 // result: (ANDL (SHRW <t> x y) (SBBLcarrymask <t> (CMPLconst y [16]))) 18473 for { 18474 t := v.Type 18475 x := v.Args[0] 18476 y := v.Args[1] 18477 v.reset(OpAMD64ANDL) 18478 v0 := b.NewValue0(v.Line, OpAMD64SHRW, t) 18479 v0.AddArg(x) 18480 v0.AddArg(y) 18481 v.AddArg(v0) 18482 v1 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, t) 18483 v2 := b.NewValue0(v.Line, OpAMD64CMPLconst, TypeFlags) 18484 v2.AuxInt = 16 18485 v2.AddArg(y) 18486 v1.AddArg(v2) 18487 v.AddArg(v1) 18488 return true 18489 } 18490 } 18491 func rewriteValueAMD64_OpRsh16Ux64(v *Value, config *Config) bool { 18492 b := v.Block 18493 _ = b 18494 // match: (Rsh16Ux64 <t> x y) 18495 // cond: 18496 // result: (ANDL (SHRW <t> x y) (SBBLcarrymask <t> (CMPQconst y [16]))) 18497 for { 18498 t := v.Type 18499 x := v.Args[0] 18500 y := v.Args[1] 18501 v.reset(OpAMD64ANDL) 18502 v0 := b.NewValue0(v.Line, OpAMD64SHRW, t) 18503 v0.AddArg(x) 18504 v0.AddArg(y) 18505 v.AddArg(v0) 18506 v1 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, t) 18507 v2 := b.NewValue0(v.Line, OpAMD64CMPQconst, TypeFlags) 18508 v2.AuxInt = 16 18509 v2.AddArg(y) 18510 v1.AddArg(v2) 18511 v.AddArg(v1) 18512 return true 18513 } 18514 } 18515 func rewriteValueAMD64_OpRsh16Ux8(v *Value, config *Config) bool { 18516 b := v.Block 18517 _ = b 18518 // match: (Rsh16Ux8 <t> x y) 18519 // cond: 18520 // result: (ANDL (SHRW <t> x y) (SBBLcarrymask <t> (CMPBconst y [16]))) 18521 for { 18522 t := v.Type 18523 x := v.Args[0] 18524 y := v.Args[1] 18525 v.reset(OpAMD64ANDL) 18526 v0 := b.NewValue0(v.Line, OpAMD64SHRW, t) 18527 v0.AddArg(x) 18528 v0.AddArg(y) 18529 v.AddArg(v0) 18530 v1 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, t) 18531 v2 := b.NewValue0(v.Line, OpAMD64CMPBconst, TypeFlags) 18532 v2.AuxInt = 16 18533 v2.AddArg(y) 18534 v1.AddArg(v2) 18535 v.AddArg(v1) 18536 return true 18537 } 18538 } 18539 func rewriteValueAMD64_OpRsh16x16(v *Value, config *Config) bool { 18540 b := v.Block 18541 _ = b 18542 // match: (Rsh16x16 <t> x y) 18543 // cond: 18544 // result: (SARW <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPWconst y [16]))))) 18545 for { 18546 t := v.Type 18547 x := v.Args[0] 18548 y := v.Args[1] 18549 v.reset(OpAMD64SARW) 18550 v.Type = t 18551 v.AddArg(x) 18552 v0 := b.NewValue0(v.Line, OpAMD64ORL, y.Type) 18553 v0.AddArg(y) 18554 v1 := b.NewValue0(v.Line, OpAMD64NOTL, y.Type) 18555 v2 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, y.Type) 18556 v3 := b.NewValue0(v.Line, OpAMD64CMPWconst, TypeFlags) 18557 v3.AuxInt = 16 18558 v3.AddArg(y) 18559 v2.AddArg(v3) 18560 v1.AddArg(v2) 18561 v0.AddArg(v1) 18562 v.AddArg(v0) 18563 return true 18564 } 18565 } 18566 func rewriteValueAMD64_OpRsh16x32(v *Value, config *Config) bool { 18567 b := v.Block 18568 _ = b 18569 // match: (Rsh16x32 <t> x y) 18570 // cond: 18571 // result: (SARW <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPLconst y [16]))))) 18572 for { 18573 t := v.Type 18574 x := v.Args[0] 18575 y := v.Args[1] 18576 v.reset(OpAMD64SARW) 18577 v.Type = t 18578 v.AddArg(x) 18579 v0 := b.NewValue0(v.Line, OpAMD64ORL, y.Type) 18580 v0.AddArg(y) 18581 v1 := b.NewValue0(v.Line, OpAMD64NOTL, y.Type) 18582 v2 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, y.Type) 18583 v3 := b.NewValue0(v.Line, OpAMD64CMPLconst, TypeFlags) 18584 v3.AuxInt = 16 18585 v3.AddArg(y) 18586 v2.AddArg(v3) 18587 v1.AddArg(v2) 18588 v0.AddArg(v1) 18589 v.AddArg(v0) 18590 return true 18591 } 18592 } 18593 func rewriteValueAMD64_OpRsh16x64(v *Value, config *Config) bool { 18594 b := v.Block 18595 _ = b 18596 // match: (Rsh16x64 <t> x y) 18597 // cond: 18598 // result: (SARW <t> x (ORQ <y.Type> y (NOTQ <y.Type> (SBBQcarrymask <y.Type> (CMPQconst y [16]))))) 18599 for { 18600 t := v.Type 18601 x := v.Args[0] 18602 y := v.Args[1] 18603 v.reset(OpAMD64SARW) 18604 v.Type = t 18605 v.AddArg(x) 18606 v0 := b.NewValue0(v.Line, OpAMD64ORQ, y.Type) 18607 v0.AddArg(y) 18608 v1 := b.NewValue0(v.Line, OpAMD64NOTQ, y.Type) 18609 v2 := b.NewValue0(v.Line, OpAMD64SBBQcarrymask, y.Type) 18610 v3 := b.NewValue0(v.Line, OpAMD64CMPQconst, TypeFlags) 18611 v3.AuxInt = 16 18612 v3.AddArg(y) 18613 v2.AddArg(v3) 18614 v1.AddArg(v2) 18615 v0.AddArg(v1) 18616 v.AddArg(v0) 18617 return true 18618 } 18619 } 18620 func rewriteValueAMD64_OpRsh16x8(v *Value, config *Config) bool { 18621 b := v.Block 18622 _ = b 18623 // match: (Rsh16x8 <t> x y) 18624 // cond: 18625 // result: (SARW <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPBconst y [16]))))) 18626 for { 18627 t := v.Type 18628 x := v.Args[0] 18629 y := v.Args[1] 18630 v.reset(OpAMD64SARW) 18631 v.Type = t 18632 v.AddArg(x) 18633 v0 := b.NewValue0(v.Line, OpAMD64ORL, y.Type) 18634 v0.AddArg(y) 18635 v1 := b.NewValue0(v.Line, OpAMD64NOTL, y.Type) 18636 v2 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, y.Type) 18637 v3 := b.NewValue0(v.Line, OpAMD64CMPBconst, TypeFlags) 18638 v3.AuxInt = 16 18639 v3.AddArg(y) 18640 v2.AddArg(v3) 18641 v1.AddArg(v2) 18642 v0.AddArg(v1) 18643 v.AddArg(v0) 18644 return true 18645 } 18646 } 18647 func rewriteValueAMD64_OpRsh32Ux16(v *Value, config *Config) bool { 18648 b := v.Block 18649 _ = b 18650 // match: (Rsh32Ux16 <t> x y) 18651 // cond: 18652 // result: (ANDL (SHRL <t> x y) (SBBLcarrymask <t> (CMPWconst y [32]))) 18653 for { 18654 t := v.Type 18655 x := v.Args[0] 18656 y := v.Args[1] 18657 v.reset(OpAMD64ANDL) 18658 v0 := b.NewValue0(v.Line, OpAMD64SHRL, t) 18659 v0.AddArg(x) 18660 v0.AddArg(y) 18661 v.AddArg(v0) 18662 v1 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, t) 18663 v2 := b.NewValue0(v.Line, OpAMD64CMPWconst, TypeFlags) 18664 v2.AuxInt = 32 18665 v2.AddArg(y) 18666 v1.AddArg(v2) 18667 v.AddArg(v1) 18668 return true 18669 } 18670 } 18671 func rewriteValueAMD64_OpRsh32Ux32(v *Value, config *Config) bool { 18672 b := v.Block 18673 _ = b 18674 // match: (Rsh32Ux32 <t> x y) 18675 // cond: 18676 // result: (ANDL (SHRL <t> x y) (SBBLcarrymask <t> (CMPLconst y [32]))) 18677 for { 18678 t := v.Type 18679 x := v.Args[0] 18680 y := v.Args[1] 18681 v.reset(OpAMD64ANDL) 18682 v0 := b.NewValue0(v.Line, OpAMD64SHRL, t) 18683 v0.AddArg(x) 18684 v0.AddArg(y) 18685 v.AddArg(v0) 18686 v1 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, t) 18687 v2 := b.NewValue0(v.Line, OpAMD64CMPLconst, TypeFlags) 18688 v2.AuxInt = 32 18689 v2.AddArg(y) 18690 v1.AddArg(v2) 18691 v.AddArg(v1) 18692 return true 18693 } 18694 } 18695 func rewriteValueAMD64_OpRsh32Ux64(v *Value, config *Config) bool { 18696 b := v.Block 18697 _ = b 18698 // match: (Rsh32Ux64 <t> x y) 18699 // cond: 18700 // result: (ANDL (SHRL <t> x y) (SBBLcarrymask <t> (CMPQconst y [32]))) 18701 for { 18702 t := v.Type 18703 x := v.Args[0] 18704 y := v.Args[1] 18705 v.reset(OpAMD64ANDL) 18706 v0 := b.NewValue0(v.Line, OpAMD64SHRL, t) 18707 v0.AddArg(x) 18708 v0.AddArg(y) 18709 v.AddArg(v0) 18710 v1 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, t) 18711 v2 := b.NewValue0(v.Line, OpAMD64CMPQconst, TypeFlags) 18712 v2.AuxInt = 32 18713 v2.AddArg(y) 18714 v1.AddArg(v2) 18715 v.AddArg(v1) 18716 return true 18717 } 18718 } 18719 func rewriteValueAMD64_OpRsh32Ux8(v *Value, config *Config) bool { 18720 b := v.Block 18721 _ = b 18722 // match: (Rsh32Ux8 <t> x y) 18723 // cond: 18724 // result: (ANDL (SHRL <t> x y) (SBBLcarrymask <t> (CMPBconst y [32]))) 18725 for { 18726 t := v.Type 18727 x := v.Args[0] 18728 y := v.Args[1] 18729 v.reset(OpAMD64ANDL) 18730 v0 := b.NewValue0(v.Line, OpAMD64SHRL, t) 18731 v0.AddArg(x) 18732 v0.AddArg(y) 18733 v.AddArg(v0) 18734 v1 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, t) 18735 v2 := b.NewValue0(v.Line, OpAMD64CMPBconst, TypeFlags) 18736 v2.AuxInt = 32 18737 v2.AddArg(y) 18738 v1.AddArg(v2) 18739 v.AddArg(v1) 18740 return true 18741 } 18742 } 18743 func rewriteValueAMD64_OpRsh32x16(v *Value, config *Config) bool { 18744 b := v.Block 18745 _ = b 18746 // match: (Rsh32x16 <t> x y) 18747 // cond: 18748 // result: (SARL <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPWconst y [32]))))) 18749 for { 18750 t := v.Type 18751 x := v.Args[0] 18752 y := v.Args[1] 18753 v.reset(OpAMD64SARL) 18754 v.Type = t 18755 v.AddArg(x) 18756 v0 := b.NewValue0(v.Line, OpAMD64ORL, y.Type) 18757 v0.AddArg(y) 18758 v1 := b.NewValue0(v.Line, OpAMD64NOTL, y.Type) 18759 v2 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, y.Type) 18760 v3 := b.NewValue0(v.Line, OpAMD64CMPWconst, TypeFlags) 18761 v3.AuxInt = 32 18762 v3.AddArg(y) 18763 v2.AddArg(v3) 18764 v1.AddArg(v2) 18765 v0.AddArg(v1) 18766 v.AddArg(v0) 18767 return true 18768 } 18769 } 18770 func rewriteValueAMD64_OpRsh32x32(v *Value, config *Config) bool { 18771 b := v.Block 18772 _ = b 18773 // match: (Rsh32x32 <t> x y) 18774 // cond: 18775 // result: (SARL <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPLconst y [32]))))) 18776 for { 18777 t := v.Type 18778 x := v.Args[0] 18779 y := v.Args[1] 18780 v.reset(OpAMD64SARL) 18781 v.Type = t 18782 v.AddArg(x) 18783 v0 := b.NewValue0(v.Line, OpAMD64ORL, y.Type) 18784 v0.AddArg(y) 18785 v1 := b.NewValue0(v.Line, OpAMD64NOTL, y.Type) 18786 v2 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, y.Type) 18787 v3 := b.NewValue0(v.Line, OpAMD64CMPLconst, TypeFlags) 18788 v3.AuxInt = 32 18789 v3.AddArg(y) 18790 v2.AddArg(v3) 18791 v1.AddArg(v2) 18792 v0.AddArg(v1) 18793 v.AddArg(v0) 18794 return true 18795 } 18796 } 18797 func rewriteValueAMD64_OpRsh32x64(v *Value, config *Config) bool { 18798 b := v.Block 18799 _ = b 18800 // match: (Rsh32x64 <t> x y) 18801 // cond: 18802 // result: (SARL <t> x (ORQ <y.Type> y (NOTQ <y.Type> (SBBQcarrymask <y.Type> (CMPQconst y [32]))))) 18803 for { 18804 t := v.Type 18805 x := v.Args[0] 18806 y := v.Args[1] 18807 v.reset(OpAMD64SARL) 18808 v.Type = t 18809 v.AddArg(x) 18810 v0 := b.NewValue0(v.Line, OpAMD64ORQ, y.Type) 18811 v0.AddArg(y) 18812 v1 := b.NewValue0(v.Line, OpAMD64NOTQ, y.Type) 18813 v2 := b.NewValue0(v.Line, OpAMD64SBBQcarrymask, y.Type) 18814 v3 := b.NewValue0(v.Line, OpAMD64CMPQconst, TypeFlags) 18815 v3.AuxInt = 32 18816 v3.AddArg(y) 18817 v2.AddArg(v3) 18818 v1.AddArg(v2) 18819 v0.AddArg(v1) 18820 v.AddArg(v0) 18821 return true 18822 } 18823 } 18824 func rewriteValueAMD64_OpRsh32x8(v *Value, config *Config) bool { 18825 b := v.Block 18826 _ = b 18827 // match: (Rsh32x8 <t> x y) 18828 // cond: 18829 // result: (SARL <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPBconst y [32]))))) 18830 for { 18831 t := v.Type 18832 x := v.Args[0] 18833 y := v.Args[1] 18834 v.reset(OpAMD64SARL) 18835 v.Type = t 18836 v.AddArg(x) 18837 v0 := b.NewValue0(v.Line, OpAMD64ORL, y.Type) 18838 v0.AddArg(y) 18839 v1 := b.NewValue0(v.Line, OpAMD64NOTL, y.Type) 18840 v2 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, y.Type) 18841 v3 := b.NewValue0(v.Line, OpAMD64CMPBconst, TypeFlags) 18842 v3.AuxInt = 32 18843 v3.AddArg(y) 18844 v2.AddArg(v3) 18845 v1.AddArg(v2) 18846 v0.AddArg(v1) 18847 v.AddArg(v0) 18848 return true 18849 } 18850 } 18851 func rewriteValueAMD64_OpRsh64Ux16(v *Value, config *Config) bool { 18852 b := v.Block 18853 _ = b 18854 // match: (Rsh64Ux16 <t> x y) 18855 // cond: 18856 // result: (ANDQ (SHRQ <t> x y) (SBBQcarrymask <t> (CMPWconst y [64]))) 18857 for { 18858 t := v.Type 18859 x := v.Args[0] 18860 y := v.Args[1] 18861 v.reset(OpAMD64ANDQ) 18862 v0 := b.NewValue0(v.Line, OpAMD64SHRQ, t) 18863 v0.AddArg(x) 18864 v0.AddArg(y) 18865 v.AddArg(v0) 18866 v1 := b.NewValue0(v.Line, OpAMD64SBBQcarrymask, t) 18867 v2 := b.NewValue0(v.Line, OpAMD64CMPWconst, TypeFlags) 18868 v2.AuxInt = 64 18869 v2.AddArg(y) 18870 v1.AddArg(v2) 18871 v.AddArg(v1) 18872 return true 18873 } 18874 } 18875 func rewriteValueAMD64_OpRsh64Ux32(v *Value, config *Config) bool { 18876 b := v.Block 18877 _ = b 18878 // match: (Rsh64Ux32 <t> x y) 18879 // cond: 18880 // result: (ANDQ (SHRQ <t> x y) (SBBQcarrymask <t> (CMPLconst y [64]))) 18881 for { 18882 t := v.Type 18883 x := v.Args[0] 18884 y := v.Args[1] 18885 v.reset(OpAMD64ANDQ) 18886 v0 := b.NewValue0(v.Line, OpAMD64SHRQ, t) 18887 v0.AddArg(x) 18888 v0.AddArg(y) 18889 v.AddArg(v0) 18890 v1 := b.NewValue0(v.Line, OpAMD64SBBQcarrymask, t) 18891 v2 := b.NewValue0(v.Line, OpAMD64CMPLconst, TypeFlags) 18892 v2.AuxInt = 64 18893 v2.AddArg(y) 18894 v1.AddArg(v2) 18895 v.AddArg(v1) 18896 return true 18897 } 18898 } 18899 func rewriteValueAMD64_OpRsh64Ux64(v *Value, config *Config) bool { 18900 b := v.Block 18901 _ = b 18902 // match: (Rsh64Ux64 <t> x y) 18903 // cond: 18904 // result: (ANDQ (SHRQ <t> x y) (SBBQcarrymask <t> (CMPQconst y [64]))) 18905 for { 18906 t := v.Type 18907 x := v.Args[0] 18908 y := v.Args[1] 18909 v.reset(OpAMD64ANDQ) 18910 v0 := b.NewValue0(v.Line, OpAMD64SHRQ, t) 18911 v0.AddArg(x) 18912 v0.AddArg(y) 18913 v.AddArg(v0) 18914 v1 := b.NewValue0(v.Line, OpAMD64SBBQcarrymask, t) 18915 v2 := b.NewValue0(v.Line, OpAMD64CMPQconst, TypeFlags) 18916 v2.AuxInt = 64 18917 v2.AddArg(y) 18918 v1.AddArg(v2) 18919 v.AddArg(v1) 18920 return true 18921 } 18922 } 18923 func rewriteValueAMD64_OpRsh64Ux8(v *Value, config *Config) bool { 18924 b := v.Block 18925 _ = b 18926 // match: (Rsh64Ux8 <t> x y) 18927 // cond: 18928 // result: (ANDQ (SHRQ <t> x y) (SBBQcarrymask <t> (CMPBconst y [64]))) 18929 for { 18930 t := v.Type 18931 x := v.Args[0] 18932 y := v.Args[1] 18933 v.reset(OpAMD64ANDQ) 18934 v0 := b.NewValue0(v.Line, OpAMD64SHRQ, t) 18935 v0.AddArg(x) 18936 v0.AddArg(y) 18937 v.AddArg(v0) 18938 v1 := b.NewValue0(v.Line, OpAMD64SBBQcarrymask, t) 18939 v2 := b.NewValue0(v.Line, OpAMD64CMPBconst, TypeFlags) 18940 v2.AuxInt = 64 18941 v2.AddArg(y) 18942 v1.AddArg(v2) 18943 v.AddArg(v1) 18944 return true 18945 } 18946 } 18947 func rewriteValueAMD64_OpRsh64x16(v *Value, config *Config) bool { 18948 b := v.Block 18949 _ = b 18950 // match: (Rsh64x16 <t> x y) 18951 // cond: 18952 // result: (SARQ <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPWconst y [64]))))) 18953 for { 18954 t := v.Type 18955 x := v.Args[0] 18956 y := v.Args[1] 18957 v.reset(OpAMD64SARQ) 18958 v.Type = t 18959 v.AddArg(x) 18960 v0 := b.NewValue0(v.Line, OpAMD64ORL, y.Type) 18961 v0.AddArg(y) 18962 v1 := b.NewValue0(v.Line, OpAMD64NOTL, y.Type) 18963 v2 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, y.Type) 18964 v3 := b.NewValue0(v.Line, OpAMD64CMPWconst, TypeFlags) 18965 v3.AuxInt = 64 18966 v3.AddArg(y) 18967 v2.AddArg(v3) 18968 v1.AddArg(v2) 18969 v0.AddArg(v1) 18970 v.AddArg(v0) 18971 return true 18972 } 18973 } 18974 func rewriteValueAMD64_OpRsh64x32(v *Value, config *Config) bool { 18975 b := v.Block 18976 _ = b 18977 // match: (Rsh64x32 <t> x y) 18978 // cond: 18979 // result: (SARQ <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPLconst y [64]))))) 18980 for { 18981 t := v.Type 18982 x := v.Args[0] 18983 y := v.Args[1] 18984 v.reset(OpAMD64SARQ) 18985 v.Type = t 18986 v.AddArg(x) 18987 v0 := b.NewValue0(v.Line, OpAMD64ORL, y.Type) 18988 v0.AddArg(y) 18989 v1 := b.NewValue0(v.Line, OpAMD64NOTL, y.Type) 18990 v2 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, y.Type) 18991 v3 := b.NewValue0(v.Line, OpAMD64CMPLconst, TypeFlags) 18992 v3.AuxInt = 64 18993 v3.AddArg(y) 18994 v2.AddArg(v3) 18995 v1.AddArg(v2) 18996 v0.AddArg(v1) 18997 v.AddArg(v0) 18998 return true 18999 } 19000 } 19001 func rewriteValueAMD64_OpRsh64x64(v *Value, config *Config) bool { 19002 b := v.Block 19003 _ = b 19004 // match: (Rsh64x64 <t> x y) 19005 // cond: 19006 // result: (SARQ <t> x (ORQ <y.Type> y (NOTQ <y.Type> (SBBQcarrymask <y.Type> (CMPQconst y [64]))))) 19007 for { 19008 t := v.Type 19009 x := v.Args[0] 19010 y := v.Args[1] 19011 v.reset(OpAMD64SARQ) 19012 v.Type = t 19013 v.AddArg(x) 19014 v0 := b.NewValue0(v.Line, OpAMD64ORQ, y.Type) 19015 v0.AddArg(y) 19016 v1 := b.NewValue0(v.Line, OpAMD64NOTQ, y.Type) 19017 v2 := b.NewValue0(v.Line, OpAMD64SBBQcarrymask, y.Type) 19018 v3 := b.NewValue0(v.Line, OpAMD64CMPQconst, TypeFlags) 19019 v3.AuxInt = 64 19020 v3.AddArg(y) 19021 v2.AddArg(v3) 19022 v1.AddArg(v2) 19023 v0.AddArg(v1) 19024 v.AddArg(v0) 19025 return true 19026 } 19027 } 19028 func rewriteValueAMD64_OpRsh64x8(v *Value, config *Config) bool { 19029 b := v.Block 19030 _ = b 19031 // match: (Rsh64x8 <t> x y) 19032 // cond: 19033 // result: (SARQ <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPBconst y [64]))))) 19034 for { 19035 t := v.Type 19036 x := v.Args[0] 19037 y := v.Args[1] 19038 v.reset(OpAMD64SARQ) 19039 v.Type = t 19040 v.AddArg(x) 19041 v0 := b.NewValue0(v.Line, OpAMD64ORL, y.Type) 19042 v0.AddArg(y) 19043 v1 := b.NewValue0(v.Line, OpAMD64NOTL, y.Type) 19044 v2 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, y.Type) 19045 v3 := b.NewValue0(v.Line, OpAMD64CMPBconst, TypeFlags) 19046 v3.AuxInt = 64 19047 v3.AddArg(y) 19048 v2.AddArg(v3) 19049 v1.AddArg(v2) 19050 v0.AddArg(v1) 19051 v.AddArg(v0) 19052 return true 19053 } 19054 } 19055 func rewriteValueAMD64_OpRsh8Ux16(v *Value, config *Config) bool { 19056 b := v.Block 19057 _ = b 19058 // match: (Rsh8Ux16 <t> x y) 19059 // cond: 19060 // result: (ANDL (SHRB <t> x y) (SBBLcarrymask <t> (CMPWconst y [8]))) 19061 for { 19062 t := v.Type 19063 x := v.Args[0] 19064 y := v.Args[1] 19065 v.reset(OpAMD64ANDL) 19066 v0 := b.NewValue0(v.Line, OpAMD64SHRB, t) 19067 v0.AddArg(x) 19068 v0.AddArg(y) 19069 v.AddArg(v0) 19070 v1 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, t) 19071 v2 := b.NewValue0(v.Line, OpAMD64CMPWconst, TypeFlags) 19072 v2.AuxInt = 8 19073 v2.AddArg(y) 19074 v1.AddArg(v2) 19075 v.AddArg(v1) 19076 return true 19077 } 19078 } 19079 func rewriteValueAMD64_OpRsh8Ux32(v *Value, config *Config) bool { 19080 b := v.Block 19081 _ = b 19082 // match: (Rsh8Ux32 <t> x y) 19083 // cond: 19084 // result: (ANDL (SHRB <t> x y) (SBBLcarrymask <t> (CMPLconst y [8]))) 19085 for { 19086 t := v.Type 19087 x := v.Args[0] 19088 y := v.Args[1] 19089 v.reset(OpAMD64ANDL) 19090 v0 := b.NewValue0(v.Line, OpAMD64SHRB, t) 19091 v0.AddArg(x) 19092 v0.AddArg(y) 19093 v.AddArg(v0) 19094 v1 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, t) 19095 v2 := b.NewValue0(v.Line, OpAMD64CMPLconst, TypeFlags) 19096 v2.AuxInt = 8 19097 v2.AddArg(y) 19098 v1.AddArg(v2) 19099 v.AddArg(v1) 19100 return true 19101 } 19102 } 19103 func rewriteValueAMD64_OpRsh8Ux64(v *Value, config *Config) bool { 19104 b := v.Block 19105 _ = b 19106 // match: (Rsh8Ux64 <t> x y) 19107 // cond: 19108 // result: (ANDL (SHRB <t> x y) (SBBLcarrymask <t> (CMPQconst y [8]))) 19109 for { 19110 t := v.Type 19111 x := v.Args[0] 19112 y := v.Args[1] 19113 v.reset(OpAMD64ANDL) 19114 v0 := b.NewValue0(v.Line, OpAMD64SHRB, t) 19115 v0.AddArg(x) 19116 v0.AddArg(y) 19117 v.AddArg(v0) 19118 v1 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, t) 19119 v2 := b.NewValue0(v.Line, OpAMD64CMPQconst, TypeFlags) 19120 v2.AuxInt = 8 19121 v2.AddArg(y) 19122 v1.AddArg(v2) 19123 v.AddArg(v1) 19124 return true 19125 } 19126 } 19127 func rewriteValueAMD64_OpRsh8Ux8(v *Value, config *Config) bool { 19128 b := v.Block 19129 _ = b 19130 // match: (Rsh8Ux8 <t> x y) 19131 // cond: 19132 // result: (ANDL (SHRB <t> x y) (SBBLcarrymask <t> (CMPBconst y [8]))) 19133 for { 19134 t := v.Type 19135 x := v.Args[0] 19136 y := v.Args[1] 19137 v.reset(OpAMD64ANDL) 19138 v0 := b.NewValue0(v.Line, OpAMD64SHRB, t) 19139 v0.AddArg(x) 19140 v0.AddArg(y) 19141 v.AddArg(v0) 19142 v1 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, t) 19143 v2 := b.NewValue0(v.Line, OpAMD64CMPBconst, TypeFlags) 19144 v2.AuxInt = 8 19145 v2.AddArg(y) 19146 v1.AddArg(v2) 19147 v.AddArg(v1) 19148 return true 19149 } 19150 } 19151 func rewriteValueAMD64_OpRsh8x16(v *Value, config *Config) bool { 19152 b := v.Block 19153 _ = b 19154 // match: (Rsh8x16 <t> x y) 19155 // cond: 19156 // result: (SARB <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPWconst y [8]))))) 19157 for { 19158 t := v.Type 19159 x := v.Args[0] 19160 y := v.Args[1] 19161 v.reset(OpAMD64SARB) 19162 v.Type = t 19163 v.AddArg(x) 19164 v0 := b.NewValue0(v.Line, OpAMD64ORL, y.Type) 19165 v0.AddArg(y) 19166 v1 := b.NewValue0(v.Line, OpAMD64NOTL, y.Type) 19167 v2 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, y.Type) 19168 v3 := b.NewValue0(v.Line, OpAMD64CMPWconst, TypeFlags) 19169 v3.AuxInt = 8 19170 v3.AddArg(y) 19171 v2.AddArg(v3) 19172 v1.AddArg(v2) 19173 v0.AddArg(v1) 19174 v.AddArg(v0) 19175 return true 19176 } 19177 } 19178 func rewriteValueAMD64_OpRsh8x32(v *Value, config *Config) bool { 19179 b := v.Block 19180 _ = b 19181 // match: (Rsh8x32 <t> x y) 19182 // cond: 19183 // result: (SARB <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPLconst y [8]))))) 19184 for { 19185 t := v.Type 19186 x := v.Args[0] 19187 y := v.Args[1] 19188 v.reset(OpAMD64SARB) 19189 v.Type = t 19190 v.AddArg(x) 19191 v0 := b.NewValue0(v.Line, OpAMD64ORL, y.Type) 19192 v0.AddArg(y) 19193 v1 := b.NewValue0(v.Line, OpAMD64NOTL, y.Type) 19194 v2 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, y.Type) 19195 v3 := b.NewValue0(v.Line, OpAMD64CMPLconst, TypeFlags) 19196 v3.AuxInt = 8 19197 v3.AddArg(y) 19198 v2.AddArg(v3) 19199 v1.AddArg(v2) 19200 v0.AddArg(v1) 19201 v.AddArg(v0) 19202 return true 19203 } 19204 } 19205 func rewriteValueAMD64_OpRsh8x64(v *Value, config *Config) bool { 19206 b := v.Block 19207 _ = b 19208 // match: (Rsh8x64 <t> x y) 19209 // cond: 19210 // result: (SARB <t> x (ORQ <y.Type> y (NOTQ <y.Type> (SBBQcarrymask <y.Type> (CMPQconst y [8]))))) 19211 for { 19212 t := v.Type 19213 x := v.Args[0] 19214 y := v.Args[1] 19215 v.reset(OpAMD64SARB) 19216 v.Type = t 19217 v.AddArg(x) 19218 v0 := b.NewValue0(v.Line, OpAMD64ORQ, y.Type) 19219 v0.AddArg(y) 19220 v1 := b.NewValue0(v.Line, OpAMD64NOTQ, y.Type) 19221 v2 := b.NewValue0(v.Line, OpAMD64SBBQcarrymask, y.Type) 19222 v3 := b.NewValue0(v.Line, OpAMD64CMPQconst, TypeFlags) 19223 v3.AuxInt = 8 19224 v3.AddArg(y) 19225 v2.AddArg(v3) 19226 v1.AddArg(v2) 19227 v0.AddArg(v1) 19228 v.AddArg(v0) 19229 return true 19230 } 19231 } 19232 func rewriteValueAMD64_OpRsh8x8(v *Value, config *Config) bool { 19233 b := v.Block 19234 _ = b 19235 // match: (Rsh8x8 <t> x y) 19236 // cond: 19237 // result: (SARB <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPBconst y [8]))))) 19238 for { 19239 t := v.Type 19240 x := v.Args[0] 19241 y := v.Args[1] 19242 v.reset(OpAMD64SARB) 19243 v.Type = t 19244 v.AddArg(x) 19245 v0 := b.NewValue0(v.Line, OpAMD64ORL, y.Type) 19246 v0.AddArg(y) 19247 v1 := b.NewValue0(v.Line, OpAMD64NOTL, y.Type) 19248 v2 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, y.Type) 19249 v3 := b.NewValue0(v.Line, OpAMD64CMPBconst, TypeFlags) 19250 v3.AuxInt = 8 19251 v3.AddArg(y) 19252 v2.AddArg(v3) 19253 v1.AddArg(v2) 19254 v0.AddArg(v1) 19255 v.AddArg(v0) 19256 return true 19257 } 19258 } 19259 func rewriteValueAMD64_OpSelect0(v *Value, config *Config) bool { 19260 b := v.Block 19261 _ = b 19262 // match: (Select0 <t> (AddTupleFirst32 tuple val)) 19263 // cond: 19264 // result: (ADDL val (Select0 <t> tuple)) 19265 for { 19266 t := v.Type 19267 v_0 := v.Args[0] 19268 if v_0.Op != OpAMD64AddTupleFirst32 { 19269 break 19270 } 19271 tuple := v_0.Args[0] 19272 val := v_0.Args[1] 19273 v.reset(OpAMD64ADDL) 19274 v.AddArg(val) 19275 v0 := b.NewValue0(v.Line, OpSelect0, t) 19276 v0.AddArg(tuple) 19277 v.AddArg(v0) 19278 return true 19279 } 19280 // match: (Select0 <t> (AddTupleFirst64 tuple val)) 19281 // cond: 19282 // result: (ADDQ val (Select0 <t> tuple)) 19283 for { 19284 t := v.Type 19285 v_0 := v.Args[0] 19286 if v_0.Op != OpAMD64AddTupleFirst64 { 19287 break 19288 } 19289 tuple := v_0.Args[0] 19290 val := v_0.Args[1] 19291 v.reset(OpAMD64ADDQ) 19292 v.AddArg(val) 19293 v0 := b.NewValue0(v.Line, OpSelect0, t) 19294 v0.AddArg(tuple) 19295 v.AddArg(v0) 19296 return true 19297 } 19298 return false 19299 } 19300 func rewriteValueAMD64_OpSelect1(v *Value, config *Config) bool { 19301 b := v.Block 19302 _ = b 19303 // match: (Select1 (AddTupleFirst32 tuple _ )) 19304 // cond: 19305 // result: (Select1 tuple) 19306 for { 19307 v_0 := v.Args[0] 19308 if v_0.Op != OpAMD64AddTupleFirst32 { 19309 break 19310 } 19311 tuple := v_0.Args[0] 19312 v.reset(OpSelect1) 19313 v.AddArg(tuple) 19314 return true 19315 } 19316 // match: (Select1 (AddTupleFirst64 tuple _ )) 19317 // cond: 19318 // result: (Select1 tuple) 19319 for { 19320 v_0 := v.Args[0] 19321 if v_0.Op != OpAMD64AddTupleFirst64 { 19322 break 19323 } 19324 tuple := v_0.Args[0] 19325 v.reset(OpSelect1) 19326 v.AddArg(tuple) 19327 return true 19328 } 19329 return false 19330 } 19331 func rewriteValueAMD64_OpSignExt16to32(v *Value, config *Config) bool { 19332 b := v.Block 19333 _ = b 19334 // match: (SignExt16to32 x) 19335 // cond: 19336 // result: (MOVWQSX x) 19337 for { 19338 x := v.Args[0] 19339 v.reset(OpAMD64MOVWQSX) 19340 v.AddArg(x) 19341 return true 19342 } 19343 } 19344 func rewriteValueAMD64_OpSignExt16to64(v *Value, config *Config) bool { 19345 b := v.Block 19346 _ = b 19347 // match: (SignExt16to64 x) 19348 // cond: 19349 // result: (MOVWQSX x) 19350 for { 19351 x := v.Args[0] 19352 v.reset(OpAMD64MOVWQSX) 19353 v.AddArg(x) 19354 return true 19355 } 19356 } 19357 func rewriteValueAMD64_OpSignExt32to64(v *Value, config *Config) bool { 19358 b := v.Block 19359 _ = b 19360 // match: (SignExt32to64 x) 19361 // cond: 19362 // result: (MOVLQSX x) 19363 for { 19364 x := v.Args[0] 19365 v.reset(OpAMD64MOVLQSX) 19366 v.AddArg(x) 19367 return true 19368 } 19369 } 19370 func rewriteValueAMD64_OpSignExt8to16(v *Value, config *Config) bool { 19371 b := v.Block 19372 _ = b 19373 // match: (SignExt8to16 x) 19374 // cond: 19375 // result: (MOVBQSX x) 19376 for { 19377 x := v.Args[0] 19378 v.reset(OpAMD64MOVBQSX) 19379 v.AddArg(x) 19380 return true 19381 } 19382 } 19383 func rewriteValueAMD64_OpSignExt8to32(v *Value, config *Config) bool { 19384 b := v.Block 19385 _ = b 19386 // match: (SignExt8to32 x) 19387 // cond: 19388 // result: (MOVBQSX x) 19389 for { 19390 x := v.Args[0] 19391 v.reset(OpAMD64MOVBQSX) 19392 v.AddArg(x) 19393 return true 19394 } 19395 } 19396 func rewriteValueAMD64_OpSignExt8to64(v *Value, config *Config) bool { 19397 b := v.Block 19398 _ = b 19399 // match: (SignExt8to64 x) 19400 // cond: 19401 // result: (MOVBQSX x) 19402 for { 19403 x := v.Args[0] 19404 v.reset(OpAMD64MOVBQSX) 19405 v.AddArg(x) 19406 return true 19407 } 19408 } 19409 func rewriteValueAMD64_OpSlicemask(v *Value, config *Config) bool { 19410 b := v.Block 19411 _ = b 19412 // match: (Slicemask <t> x) 19413 // cond: 19414 // result: (XORQconst [-1] (SARQconst <t> (SUBQconst <t> x [1]) [63])) 19415 for { 19416 t := v.Type 19417 x := v.Args[0] 19418 v.reset(OpAMD64XORQconst) 19419 v.AuxInt = -1 19420 v0 := b.NewValue0(v.Line, OpAMD64SARQconst, t) 19421 v0.AuxInt = 63 19422 v1 := b.NewValue0(v.Line, OpAMD64SUBQconst, t) 19423 v1.AuxInt = 1 19424 v1.AddArg(x) 19425 v0.AddArg(v1) 19426 v.AddArg(v0) 19427 return true 19428 } 19429 } 19430 func rewriteValueAMD64_OpSqrt(v *Value, config *Config) bool { 19431 b := v.Block 19432 _ = b 19433 // match: (Sqrt x) 19434 // cond: 19435 // result: (SQRTSD x) 19436 for { 19437 x := v.Args[0] 19438 v.reset(OpAMD64SQRTSD) 19439 v.AddArg(x) 19440 return true 19441 } 19442 } 19443 func rewriteValueAMD64_OpStaticCall(v *Value, config *Config) bool { 19444 b := v.Block 19445 _ = b 19446 // match: (StaticCall [argwid] {target} mem) 19447 // cond: 19448 // result: (CALLstatic [argwid] {target} mem) 19449 for { 19450 argwid := v.AuxInt 19451 target := v.Aux 19452 mem := v.Args[0] 19453 v.reset(OpAMD64CALLstatic) 19454 v.AuxInt = argwid 19455 v.Aux = target 19456 v.AddArg(mem) 19457 return true 19458 } 19459 } 19460 func rewriteValueAMD64_OpStore(v *Value, config *Config) bool { 19461 b := v.Block 19462 _ = b 19463 // match: (Store [8] ptr val mem) 19464 // cond: is64BitFloat(val.Type) 19465 // result: (MOVSDstore ptr val mem) 19466 for { 19467 if v.AuxInt != 8 { 19468 break 19469 } 19470 ptr := v.Args[0] 19471 val := v.Args[1] 19472 mem := v.Args[2] 19473 if !(is64BitFloat(val.Type)) { 19474 break 19475 } 19476 v.reset(OpAMD64MOVSDstore) 19477 v.AddArg(ptr) 19478 v.AddArg(val) 19479 v.AddArg(mem) 19480 return true 19481 } 19482 // match: (Store [4] ptr val mem) 19483 // cond: is32BitFloat(val.Type) 19484 // result: (MOVSSstore ptr val mem) 19485 for { 19486 if v.AuxInt != 4 { 19487 break 19488 } 19489 ptr := v.Args[0] 19490 val := v.Args[1] 19491 mem := v.Args[2] 19492 if !(is32BitFloat(val.Type)) { 19493 break 19494 } 19495 v.reset(OpAMD64MOVSSstore) 19496 v.AddArg(ptr) 19497 v.AddArg(val) 19498 v.AddArg(mem) 19499 return true 19500 } 19501 // match: (Store [8] ptr val mem) 19502 // cond: 19503 // result: (MOVQstore ptr val mem) 19504 for { 19505 if v.AuxInt != 8 { 19506 break 19507 } 19508 ptr := v.Args[0] 19509 val := v.Args[1] 19510 mem := v.Args[2] 19511 v.reset(OpAMD64MOVQstore) 19512 v.AddArg(ptr) 19513 v.AddArg(val) 19514 v.AddArg(mem) 19515 return true 19516 } 19517 // match: (Store [4] ptr val mem) 19518 // cond: 19519 // result: (MOVLstore ptr val mem) 19520 for { 19521 if v.AuxInt != 4 { 19522 break 19523 } 19524 ptr := v.Args[0] 19525 val := v.Args[1] 19526 mem := v.Args[2] 19527 v.reset(OpAMD64MOVLstore) 19528 v.AddArg(ptr) 19529 v.AddArg(val) 19530 v.AddArg(mem) 19531 return true 19532 } 19533 // match: (Store [2] ptr val mem) 19534 // cond: 19535 // result: (MOVWstore ptr val mem) 19536 for { 19537 if v.AuxInt != 2 { 19538 break 19539 } 19540 ptr := v.Args[0] 19541 val := v.Args[1] 19542 mem := v.Args[2] 19543 v.reset(OpAMD64MOVWstore) 19544 v.AddArg(ptr) 19545 v.AddArg(val) 19546 v.AddArg(mem) 19547 return true 19548 } 19549 // match: (Store [1] ptr val mem) 19550 // cond: 19551 // result: (MOVBstore ptr val mem) 19552 for { 19553 if v.AuxInt != 1 { 19554 break 19555 } 19556 ptr := v.Args[0] 19557 val := v.Args[1] 19558 mem := v.Args[2] 19559 v.reset(OpAMD64MOVBstore) 19560 v.AddArg(ptr) 19561 v.AddArg(val) 19562 v.AddArg(mem) 19563 return true 19564 } 19565 return false 19566 } 19567 func rewriteValueAMD64_OpSub16(v *Value, config *Config) bool { 19568 b := v.Block 19569 _ = b 19570 // match: (Sub16 x y) 19571 // cond: 19572 // result: (SUBL x y) 19573 for { 19574 x := v.Args[0] 19575 y := v.Args[1] 19576 v.reset(OpAMD64SUBL) 19577 v.AddArg(x) 19578 v.AddArg(y) 19579 return true 19580 } 19581 } 19582 func rewriteValueAMD64_OpSub32(v *Value, config *Config) bool { 19583 b := v.Block 19584 _ = b 19585 // match: (Sub32 x y) 19586 // cond: 19587 // result: (SUBL x y) 19588 for { 19589 x := v.Args[0] 19590 y := v.Args[1] 19591 v.reset(OpAMD64SUBL) 19592 v.AddArg(x) 19593 v.AddArg(y) 19594 return true 19595 } 19596 } 19597 func rewriteValueAMD64_OpSub32F(v *Value, config *Config) bool { 19598 b := v.Block 19599 _ = b 19600 // match: (Sub32F x y) 19601 // cond: 19602 // result: (SUBSS x y) 19603 for { 19604 x := v.Args[0] 19605 y := v.Args[1] 19606 v.reset(OpAMD64SUBSS) 19607 v.AddArg(x) 19608 v.AddArg(y) 19609 return true 19610 } 19611 } 19612 func rewriteValueAMD64_OpSub64(v *Value, config *Config) bool { 19613 b := v.Block 19614 _ = b 19615 // match: (Sub64 x y) 19616 // cond: 19617 // result: (SUBQ x y) 19618 for { 19619 x := v.Args[0] 19620 y := v.Args[1] 19621 v.reset(OpAMD64SUBQ) 19622 v.AddArg(x) 19623 v.AddArg(y) 19624 return true 19625 } 19626 } 19627 func rewriteValueAMD64_OpSub64F(v *Value, config *Config) bool { 19628 b := v.Block 19629 _ = b 19630 // match: (Sub64F x y) 19631 // cond: 19632 // result: (SUBSD x y) 19633 for { 19634 x := v.Args[0] 19635 y := v.Args[1] 19636 v.reset(OpAMD64SUBSD) 19637 v.AddArg(x) 19638 v.AddArg(y) 19639 return true 19640 } 19641 } 19642 func rewriteValueAMD64_OpSub8(v *Value, config *Config) bool { 19643 b := v.Block 19644 _ = b 19645 // match: (Sub8 x y) 19646 // cond: 19647 // result: (SUBL x y) 19648 for { 19649 x := v.Args[0] 19650 y := v.Args[1] 19651 v.reset(OpAMD64SUBL) 19652 v.AddArg(x) 19653 v.AddArg(y) 19654 return true 19655 } 19656 } 19657 func rewriteValueAMD64_OpSubPtr(v *Value, config *Config) bool { 19658 b := v.Block 19659 _ = b 19660 // match: (SubPtr x y) 19661 // cond: config.PtrSize == 8 19662 // result: (SUBQ x y) 19663 for { 19664 x := v.Args[0] 19665 y := v.Args[1] 19666 if !(config.PtrSize == 8) { 19667 break 19668 } 19669 v.reset(OpAMD64SUBQ) 19670 v.AddArg(x) 19671 v.AddArg(y) 19672 return true 19673 } 19674 // match: (SubPtr x y) 19675 // cond: config.PtrSize == 4 19676 // result: (SUBL x y) 19677 for { 19678 x := v.Args[0] 19679 y := v.Args[1] 19680 if !(config.PtrSize == 4) { 19681 break 19682 } 19683 v.reset(OpAMD64SUBL) 19684 v.AddArg(x) 19685 v.AddArg(y) 19686 return true 19687 } 19688 return false 19689 } 19690 func rewriteValueAMD64_OpTrunc16to8(v *Value, config *Config) bool { 19691 b := v.Block 19692 _ = b 19693 // match: (Trunc16to8 x) 19694 // cond: 19695 // result: x 19696 for { 19697 x := v.Args[0] 19698 v.reset(OpCopy) 19699 v.Type = x.Type 19700 v.AddArg(x) 19701 return true 19702 } 19703 } 19704 func rewriteValueAMD64_OpTrunc32to16(v *Value, config *Config) bool { 19705 b := v.Block 19706 _ = b 19707 // match: (Trunc32to16 x) 19708 // cond: 19709 // result: x 19710 for { 19711 x := v.Args[0] 19712 v.reset(OpCopy) 19713 v.Type = x.Type 19714 v.AddArg(x) 19715 return true 19716 } 19717 } 19718 func rewriteValueAMD64_OpTrunc32to8(v *Value, config *Config) bool { 19719 b := v.Block 19720 _ = b 19721 // match: (Trunc32to8 x) 19722 // cond: 19723 // result: x 19724 for { 19725 x := v.Args[0] 19726 v.reset(OpCopy) 19727 v.Type = x.Type 19728 v.AddArg(x) 19729 return true 19730 } 19731 } 19732 func rewriteValueAMD64_OpTrunc64to16(v *Value, config *Config) bool { 19733 b := v.Block 19734 _ = b 19735 // match: (Trunc64to16 x) 19736 // cond: 19737 // result: x 19738 for { 19739 x := v.Args[0] 19740 v.reset(OpCopy) 19741 v.Type = x.Type 19742 v.AddArg(x) 19743 return true 19744 } 19745 } 19746 func rewriteValueAMD64_OpTrunc64to32(v *Value, config *Config) bool { 19747 b := v.Block 19748 _ = b 19749 // match: (Trunc64to32 x) 19750 // cond: 19751 // result: x 19752 for { 19753 x := v.Args[0] 19754 v.reset(OpCopy) 19755 v.Type = x.Type 19756 v.AddArg(x) 19757 return true 19758 } 19759 } 19760 func rewriteValueAMD64_OpTrunc64to8(v *Value, config *Config) bool { 19761 b := v.Block 19762 _ = b 19763 // match: (Trunc64to8 x) 19764 // cond: 19765 // result: x 19766 for { 19767 x := v.Args[0] 19768 v.reset(OpCopy) 19769 v.Type = x.Type 19770 v.AddArg(x) 19771 return true 19772 } 19773 } 19774 func rewriteValueAMD64_OpXor16(v *Value, config *Config) bool { 19775 b := v.Block 19776 _ = b 19777 // match: (Xor16 x y) 19778 // cond: 19779 // result: (XORL x y) 19780 for { 19781 x := v.Args[0] 19782 y := v.Args[1] 19783 v.reset(OpAMD64XORL) 19784 v.AddArg(x) 19785 v.AddArg(y) 19786 return true 19787 } 19788 } 19789 func rewriteValueAMD64_OpXor32(v *Value, config *Config) bool { 19790 b := v.Block 19791 _ = b 19792 // match: (Xor32 x y) 19793 // cond: 19794 // result: (XORL x y) 19795 for { 19796 x := v.Args[0] 19797 y := v.Args[1] 19798 v.reset(OpAMD64XORL) 19799 v.AddArg(x) 19800 v.AddArg(y) 19801 return true 19802 } 19803 } 19804 func rewriteValueAMD64_OpXor64(v *Value, config *Config) bool { 19805 b := v.Block 19806 _ = b 19807 // match: (Xor64 x y) 19808 // cond: 19809 // result: (XORQ x y) 19810 for { 19811 x := v.Args[0] 19812 y := v.Args[1] 19813 v.reset(OpAMD64XORQ) 19814 v.AddArg(x) 19815 v.AddArg(y) 19816 return true 19817 } 19818 } 19819 func rewriteValueAMD64_OpXor8(v *Value, config *Config) bool { 19820 b := v.Block 19821 _ = b 19822 // match: (Xor8 x y) 19823 // cond: 19824 // result: (XORL x y) 19825 for { 19826 x := v.Args[0] 19827 y := v.Args[1] 19828 v.reset(OpAMD64XORL) 19829 v.AddArg(x) 19830 v.AddArg(y) 19831 return true 19832 } 19833 } 19834 func rewriteValueAMD64_OpZero(v *Value, config *Config) bool { 19835 b := v.Block 19836 _ = b 19837 // match: (Zero [s] _ mem) 19838 // cond: SizeAndAlign(s).Size() == 0 19839 // result: mem 19840 for { 19841 s := v.AuxInt 19842 mem := v.Args[1] 19843 if !(SizeAndAlign(s).Size() == 0) { 19844 break 19845 } 19846 v.reset(OpCopy) 19847 v.Type = mem.Type 19848 v.AddArg(mem) 19849 return true 19850 } 19851 // match: (Zero [s] destptr mem) 19852 // cond: SizeAndAlign(s).Size() == 1 19853 // result: (MOVBstoreconst [0] destptr mem) 19854 for { 19855 s := v.AuxInt 19856 destptr := v.Args[0] 19857 mem := v.Args[1] 19858 if !(SizeAndAlign(s).Size() == 1) { 19859 break 19860 } 19861 v.reset(OpAMD64MOVBstoreconst) 19862 v.AuxInt = 0 19863 v.AddArg(destptr) 19864 v.AddArg(mem) 19865 return true 19866 } 19867 // match: (Zero [s] destptr mem) 19868 // cond: SizeAndAlign(s).Size() == 2 19869 // result: (MOVWstoreconst [0] destptr mem) 19870 for { 19871 s := v.AuxInt 19872 destptr := v.Args[0] 19873 mem := v.Args[1] 19874 if !(SizeAndAlign(s).Size() == 2) { 19875 break 19876 } 19877 v.reset(OpAMD64MOVWstoreconst) 19878 v.AuxInt = 0 19879 v.AddArg(destptr) 19880 v.AddArg(mem) 19881 return true 19882 } 19883 // match: (Zero [s] destptr mem) 19884 // cond: SizeAndAlign(s).Size() == 4 19885 // result: (MOVLstoreconst [0] destptr mem) 19886 for { 19887 s := v.AuxInt 19888 destptr := v.Args[0] 19889 mem := v.Args[1] 19890 if !(SizeAndAlign(s).Size() == 4) { 19891 break 19892 } 19893 v.reset(OpAMD64MOVLstoreconst) 19894 v.AuxInt = 0 19895 v.AddArg(destptr) 19896 v.AddArg(mem) 19897 return true 19898 } 19899 // match: (Zero [s] destptr mem) 19900 // cond: SizeAndAlign(s).Size() == 8 19901 // result: (MOVQstoreconst [0] destptr mem) 19902 for { 19903 s := v.AuxInt 19904 destptr := v.Args[0] 19905 mem := v.Args[1] 19906 if !(SizeAndAlign(s).Size() == 8) { 19907 break 19908 } 19909 v.reset(OpAMD64MOVQstoreconst) 19910 v.AuxInt = 0 19911 v.AddArg(destptr) 19912 v.AddArg(mem) 19913 return true 19914 } 19915 // match: (Zero [s] destptr mem) 19916 // cond: SizeAndAlign(s).Size() == 3 19917 // result: (MOVBstoreconst [makeValAndOff(0,2)] destptr (MOVWstoreconst [0] destptr mem)) 19918 for { 19919 s := v.AuxInt 19920 destptr := v.Args[0] 19921 mem := v.Args[1] 19922 if !(SizeAndAlign(s).Size() == 3) { 19923 break 19924 } 19925 v.reset(OpAMD64MOVBstoreconst) 19926 v.AuxInt = makeValAndOff(0, 2) 19927 v.AddArg(destptr) 19928 v0 := b.NewValue0(v.Line, OpAMD64MOVWstoreconst, TypeMem) 19929 v0.AuxInt = 0 19930 v0.AddArg(destptr) 19931 v0.AddArg(mem) 19932 v.AddArg(v0) 19933 return true 19934 } 19935 // match: (Zero [s] destptr mem) 19936 // cond: SizeAndAlign(s).Size() == 5 19937 // result: (MOVBstoreconst [makeValAndOff(0,4)] destptr (MOVLstoreconst [0] destptr mem)) 19938 for { 19939 s := v.AuxInt 19940 destptr := v.Args[0] 19941 mem := v.Args[1] 19942 if !(SizeAndAlign(s).Size() == 5) { 19943 break 19944 } 19945 v.reset(OpAMD64MOVBstoreconst) 19946 v.AuxInt = makeValAndOff(0, 4) 19947 v.AddArg(destptr) 19948 v0 := b.NewValue0(v.Line, OpAMD64MOVLstoreconst, TypeMem) 19949 v0.AuxInt = 0 19950 v0.AddArg(destptr) 19951 v0.AddArg(mem) 19952 v.AddArg(v0) 19953 return true 19954 } 19955 // match: (Zero [s] destptr mem) 19956 // cond: SizeAndAlign(s).Size() == 6 19957 // result: (MOVWstoreconst [makeValAndOff(0,4)] destptr (MOVLstoreconst [0] destptr mem)) 19958 for { 19959 s := v.AuxInt 19960 destptr := v.Args[0] 19961 mem := v.Args[1] 19962 if !(SizeAndAlign(s).Size() == 6) { 19963 break 19964 } 19965 v.reset(OpAMD64MOVWstoreconst) 19966 v.AuxInt = makeValAndOff(0, 4) 19967 v.AddArg(destptr) 19968 v0 := b.NewValue0(v.Line, OpAMD64MOVLstoreconst, TypeMem) 19969 v0.AuxInt = 0 19970 v0.AddArg(destptr) 19971 v0.AddArg(mem) 19972 v.AddArg(v0) 19973 return true 19974 } 19975 // match: (Zero [s] destptr mem) 19976 // cond: SizeAndAlign(s).Size() == 7 19977 // result: (MOVLstoreconst [makeValAndOff(0,3)] destptr (MOVLstoreconst [0] destptr mem)) 19978 for { 19979 s := v.AuxInt 19980 destptr := v.Args[0] 19981 mem := v.Args[1] 19982 if !(SizeAndAlign(s).Size() == 7) { 19983 break 19984 } 19985 v.reset(OpAMD64MOVLstoreconst) 19986 v.AuxInt = makeValAndOff(0, 3) 19987 v.AddArg(destptr) 19988 v0 := b.NewValue0(v.Line, OpAMD64MOVLstoreconst, TypeMem) 19989 v0.AuxInt = 0 19990 v0.AddArg(destptr) 19991 v0.AddArg(mem) 19992 v.AddArg(v0) 19993 return true 19994 } 19995 // match: (Zero [s] destptr mem) 19996 // cond: SizeAndAlign(s).Size()%8 != 0 && SizeAndAlign(s).Size() > 8 19997 // result: (Zero [SizeAndAlign(s).Size()-SizeAndAlign(s).Size()%8] (OffPtr <destptr.Type> destptr [SizeAndAlign(s).Size()%8]) (MOVQstoreconst [0] destptr mem)) 19998 for { 19999 s := v.AuxInt 20000 destptr := v.Args[0] 20001 mem := v.Args[1] 20002 if !(SizeAndAlign(s).Size()%8 != 0 && SizeAndAlign(s).Size() > 8) { 20003 break 20004 } 20005 v.reset(OpZero) 20006 v.AuxInt = SizeAndAlign(s).Size() - SizeAndAlign(s).Size()%8 20007 v0 := b.NewValue0(v.Line, OpOffPtr, destptr.Type) 20008 v0.AuxInt = SizeAndAlign(s).Size() % 8 20009 v0.AddArg(destptr) 20010 v.AddArg(v0) 20011 v1 := b.NewValue0(v.Line, OpAMD64MOVQstoreconst, TypeMem) 20012 v1.AuxInt = 0 20013 v1.AddArg(destptr) 20014 v1.AddArg(mem) 20015 v.AddArg(v1) 20016 return true 20017 } 20018 // match: (Zero [s] destptr mem) 20019 // cond: SizeAndAlign(s).Size() == 16 20020 // result: (MOVQstoreconst [makeValAndOff(0,8)] destptr (MOVQstoreconst [0] destptr mem)) 20021 for { 20022 s := v.AuxInt 20023 destptr := v.Args[0] 20024 mem := v.Args[1] 20025 if !(SizeAndAlign(s).Size() == 16) { 20026 break 20027 } 20028 v.reset(OpAMD64MOVQstoreconst) 20029 v.AuxInt = makeValAndOff(0, 8) 20030 v.AddArg(destptr) 20031 v0 := b.NewValue0(v.Line, OpAMD64MOVQstoreconst, TypeMem) 20032 v0.AuxInt = 0 20033 v0.AddArg(destptr) 20034 v0.AddArg(mem) 20035 v.AddArg(v0) 20036 return true 20037 } 20038 // match: (Zero [s] destptr mem) 20039 // cond: SizeAndAlign(s).Size() == 24 20040 // result: (MOVQstoreconst [makeValAndOff(0,16)] destptr (MOVQstoreconst [makeValAndOff(0,8)] destptr (MOVQstoreconst [0] destptr mem))) 20041 for { 20042 s := v.AuxInt 20043 destptr := v.Args[0] 20044 mem := v.Args[1] 20045 if !(SizeAndAlign(s).Size() == 24) { 20046 break 20047 } 20048 v.reset(OpAMD64MOVQstoreconst) 20049 v.AuxInt = makeValAndOff(0, 16) 20050 v.AddArg(destptr) 20051 v0 := b.NewValue0(v.Line, OpAMD64MOVQstoreconst, TypeMem) 20052 v0.AuxInt = makeValAndOff(0, 8) 20053 v0.AddArg(destptr) 20054 v1 := b.NewValue0(v.Line, OpAMD64MOVQstoreconst, TypeMem) 20055 v1.AuxInt = 0 20056 v1.AddArg(destptr) 20057 v1.AddArg(mem) 20058 v0.AddArg(v1) 20059 v.AddArg(v0) 20060 return true 20061 } 20062 // match: (Zero [s] destptr mem) 20063 // cond: SizeAndAlign(s).Size() == 32 20064 // result: (MOVQstoreconst [makeValAndOff(0,24)] destptr (MOVQstoreconst [makeValAndOff(0,16)] destptr (MOVQstoreconst [makeValAndOff(0,8)] destptr (MOVQstoreconst [0] destptr mem)))) 20065 for { 20066 s := v.AuxInt 20067 destptr := v.Args[0] 20068 mem := v.Args[1] 20069 if !(SizeAndAlign(s).Size() == 32) { 20070 break 20071 } 20072 v.reset(OpAMD64MOVQstoreconst) 20073 v.AuxInt = makeValAndOff(0, 24) 20074 v.AddArg(destptr) 20075 v0 := b.NewValue0(v.Line, OpAMD64MOVQstoreconst, TypeMem) 20076 v0.AuxInt = makeValAndOff(0, 16) 20077 v0.AddArg(destptr) 20078 v1 := b.NewValue0(v.Line, OpAMD64MOVQstoreconst, TypeMem) 20079 v1.AuxInt = makeValAndOff(0, 8) 20080 v1.AddArg(destptr) 20081 v2 := b.NewValue0(v.Line, OpAMD64MOVQstoreconst, TypeMem) 20082 v2.AuxInt = 0 20083 v2.AddArg(destptr) 20084 v2.AddArg(mem) 20085 v1.AddArg(v2) 20086 v0.AddArg(v1) 20087 v.AddArg(v0) 20088 return true 20089 } 20090 // match: (Zero [s] destptr mem) 20091 // cond: SizeAndAlign(s).Size() <= 1024 && SizeAndAlign(s).Size()%8 == 0 && SizeAndAlign(s).Size()%16 != 0 && !config.noDuffDevice 20092 // result: (Zero [SizeAndAlign(s).Size()-8] (OffPtr <destptr.Type> [8] destptr) (MOVQstore destptr (MOVQconst [0]) mem)) 20093 for { 20094 s := v.AuxInt 20095 destptr := v.Args[0] 20096 mem := v.Args[1] 20097 if !(SizeAndAlign(s).Size() <= 1024 && SizeAndAlign(s).Size()%8 == 0 && SizeAndAlign(s).Size()%16 != 0 && !config.noDuffDevice) { 20098 break 20099 } 20100 v.reset(OpZero) 20101 v.AuxInt = SizeAndAlign(s).Size() - 8 20102 v0 := b.NewValue0(v.Line, OpOffPtr, destptr.Type) 20103 v0.AuxInt = 8 20104 v0.AddArg(destptr) 20105 v.AddArg(v0) 20106 v1 := b.NewValue0(v.Line, OpAMD64MOVQstore, TypeMem) 20107 v1.AddArg(destptr) 20108 v2 := b.NewValue0(v.Line, OpAMD64MOVQconst, config.fe.TypeUInt64()) 20109 v2.AuxInt = 0 20110 v1.AddArg(v2) 20111 v1.AddArg(mem) 20112 v.AddArg(v1) 20113 return true 20114 } 20115 // match: (Zero [s] destptr mem) 20116 // cond: SizeAndAlign(s).Size() <= 1024 && SizeAndAlign(s).Size()%16 == 0 && !config.noDuffDevice 20117 // result: (DUFFZERO [SizeAndAlign(s).Size()] destptr (MOVOconst [0]) mem) 20118 for { 20119 s := v.AuxInt 20120 destptr := v.Args[0] 20121 mem := v.Args[1] 20122 if !(SizeAndAlign(s).Size() <= 1024 && SizeAndAlign(s).Size()%16 == 0 && !config.noDuffDevice) { 20123 break 20124 } 20125 v.reset(OpAMD64DUFFZERO) 20126 v.AuxInt = SizeAndAlign(s).Size() 20127 v.AddArg(destptr) 20128 v0 := b.NewValue0(v.Line, OpAMD64MOVOconst, TypeInt128) 20129 v0.AuxInt = 0 20130 v.AddArg(v0) 20131 v.AddArg(mem) 20132 return true 20133 } 20134 // match: (Zero [s] destptr mem) 20135 // cond: (SizeAndAlign(s).Size() > 1024 || (config.noDuffDevice && SizeAndAlign(s).Size() > 32)) && SizeAndAlign(s).Size()%8 == 0 20136 // result: (REPSTOSQ destptr (MOVQconst [SizeAndAlign(s).Size()/8]) (MOVQconst [0]) mem) 20137 for { 20138 s := v.AuxInt 20139 destptr := v.Args[0] 20140 mem := v.Args[1] 20141 if !((SizeAndAlign(s).Size() > 1024 || (config.noDuffDevice && SizeAndAlign(s).Size() > 32)) && SizeAndAlign(s).Size()%8 == 0) { 20142 break 20143 } 20144 v.reset(OpAMD64REPSTOSQ) 20145 v.AddArg(destptr) 20146 v0 := b.NewValue0(v.Line, OpAMD64MOVQconst, config.fe.TypeUInt64()) 20147 v0.AuxInt = SizeAndAlign(s).Size() / 8 20148 v.AddArg(v0) 20149 v1 := b.NewValue0(v.Line, OpAMD64MOVQconst, config.fe.TypeUInt64()) 20150 v1.AuxInt = 0 20151 v.AddArg(v1) 20152 v.AddArg(mem) 20153 return true 20154 } 20155 return false 20156 } 20157 func rewriteValueAMD64_OpZeroExt16to32(v *Value, config *Config) bool { 20158 b := v.Block 20159 _ = b 20160 // match: (ZeroExt16to32 x) 20161 // cond: 20162 // result: (MOVWQZX x) 20163 for { 20164 x := v.Args[0] 20165 v.reset(OpAMD64MOVWQZX) 20166 v.AddArg(x) 20167 return true 20168 } 20169 } 20170 func rewriteValueAMD64_OpZeroExt16to64(v *Value, config *Config) bool { 20171 b := v.Block 20172 _ = b 20173 // match: (ZeroExt16to64 x) 20174 // cond: 20175 // result: (MOVWQZX x) 20176 for { 20177 x := v.Args[0] 20178 v.reset(OpAMD64MOVWQZX) 20179 v.AddArg(x) 20180 return true 20181 } 20182 } 20183 func rewriteValueAMD64_OpZeroExt32to64(v *Value, config *Config) bool { 20184 b := v.Block 20185 _ = b 20186 // match: (ZeroExt32to64 x) 20187 // cond: 20188 // result: (MOVLQZX x) 20189 for { 20190 x := v.Args[0] 20191 v.reset(OpAMD64MOVLQZX) 20192 v.AddArg(x) 20193 return true 20194 } 20195 } 20196 func rewriteValueAMD64_OpZeroExt8to16(v *Value, config *Config) bool { 20197 b := v.Block 20198 _ = b 20199 // match: (ZeroExt8to16 x) 20200 // cond: 20201 // result: (MOVBQZX x) 20202 for { 20203 x := v.Args[0] 20204 v.reset(OpAMD64MOVBQZX) 20205 v.AddArg(x) 20206 return true 20207 } 20208 } 20209 func rewriteValueAMD64_OpZeroExt8to32(v *Value, config *Config) bool { 20210 b := v.Block 20211 _ = b 20212 // match: (ZeroExt8to32 x) 20213 // cond: 20214 // result: (MOVBQZX x) 20215 for { 20216 x := v.Args[0] 20217 v.reset(OpAMD64MOVBQZX) 20218 v.AddArg(x) 20219 return true 20220 } 20221 } 20222 func rewriteValueAMD64_OpZeroExt8to64(v *Value, config *Config) bool { 20223 b := v.Block 20224 _ = b 20225 // match: (ZeroExt8to64 x) 20226 // cond: 20227 // result: (MOVBQZX x) 20228 for { 20229 x := v.Args[0] 20230 v.reset(OpAMD64MOVBQZX) 20231 v.AddArg(x) 20232 return true 20233 } 20234 } 20235 func rewriteBlockAMD64(b *Block, config *Config) bool { 20236 switch b.Kind { 20237 case BlockAMD64EQ: 20238 // match: (EQ (InvertFlags cmp) yes no) 20239 // cond: 20240 // result: (EQ cmp yes no) 20241 for { 20242 v := b.Control 20243 if v.Op != OpAMD64InvertFlags { 20244 break 20245 } 20246 cmp := v.Args[0] 20247 yes := b.Succs[0] 20248 no := b.Succs[1] 20249 b.Kind = BlockAMD64EQ 20250 b.SetControl(cmp) 20251 _ = yes 20252 _ = no 20253 return true 20254 } 20255 // match: (EQ (FlagEQ) yes no) 20256 // cond: 20257 // result: (First nil yes no) 20258 for { 20259 v := b.Control 20260 if v.Op != OpAMD64FlagEQ { 20261 break 20262 } 20263 yes := b.Succs[0] 20264 no := b.Succs[1] 20265 b.Kind = BlockFirst 20266 b.SetControl(nil) 20267 _ = yes 20268 _ = no 20269 return true 20270 } 20271 // match: (EQ (FlagLT_ULT) yes no) 20272 // cond: 20273 // result: (First nil no yes) 20274 for { 20275 v := b.Control 20276 if v.Op != OpAMD64FlagLT_ULT { 20277 break 20278 } 20279 yes := b.Succs[0] 20280 no := b.Succs[1] 20281 b.Kind = BlockFirst 20282 b.SetControl(nil) 20283 b.swapSuccessors() 20284 _ = no 20285 _ = yes 20286 return true 20287 } 20288 // match: (EQ (FlagLT_UGT) yes no) 20289 // cond: 20290 // result: (First nil no yes) 20291 for { 20292 v := b.Control 20293 if v.Op != OpAMD64FlagLT_UGT { 20294 break 20295 } 20296 yes := b.Succs[0] 20297 no := b.Succs[1] 20298 b.Kind = BlockFirst 20299 b.SetControl(nil) 20300 b.swapSuccessors() 20301 _ = no 20302 _ = yes 20303 return true 20304 } 20305 // match: (EQ (FlagGT_ULT) yes no) 20306 // cond: 20307 // result: (First nil no yes) 20308 for { 20309 v := b.Control 20310 if v.Op != OpAMD64FlagGT_ULT { 20311 break 20312 } 20313 yes := b.Succs[0] 20314 no := b.Succs[1] 20315 b.Kind = BlockFirst 20316 b.SetControl(nil) 20317 b.swapSuccessors() 20318 _ = no 20319 _ = yes 20320 return true 20321 } 20322 // match: (EQ (FlagGT_UGT) yes no) 20323 // cond: 20324 // result: (First nil no yes) 20325 for { 20326 v := b.Control 20327 if v.Op != OpAMD64FlagGT_UGT { 20328 break 20329 } 20330 yes := b.Succs[0] 20331 no := b.Succs[1] 20332 b.Kind = BlockFirst 20333 b.SetControl(nil) 20334 b.swapSuccessors() 20335 _ = no 20336 _ = yes 20337 return true 20338 } 20339 case BlockAMD64GE: 20340 // match: (GE (InvertFlags cmp) yes no) 20341 // cond: 20342 // result: (LE cmp yes no) 20343 for { 20344 v := b.Control 20345 if v.Op != OpAMD64InvertFlags { 20346 break 20347 } 20348 cmp := v.Args[0] 20349 yes := b.Succs[0] 20350 no := b.Succs[1] 20351 b.Kind = BlockAMD64LE 20352 b.SetControl(cmp) 20353 _ = yes 20354 _ = no 20355 return true 20356 } 20357 // match: (GE (FlagEQ) yes no) 20358 // cond: 20359 // result: (First nil yes no) 20360 for { 20361 v := b.Control 20362 if v.Op != OpAMD64FlagEQ { 20363 break 20364 } 20365 yes := b.Succs[0] 20366 no := b.Succs[1] 20367 b.Kind = BlockFirst 20368 b.SetControl(nil) 20369 _ = yes 20370 _ = no 20371 return true 20372 } 20373 // match: (GE (FlagLT_ULT) yes no) 20374 // cond: 20375 // result: (First nil no yes) 20376 for { 20377 v := b.Control 20378 if v.Op != OpAMD64FlagLT_ULT { 20379 break 20380 } 20381 yes := b.Succs[0] 20382 no := b.Succs[1] 20383 b.Kind = BlockFirst 20384 b.SetControl(nil) 20385 b.swapSuccessors() 20386 _ = no 20387 _ = yes 20388 return true 20389 } 20390 // match: (GE (FlagLT_UGT) yes no) 20391 // cond: 20392 // result: (First nil no yes) 20393 for { 20394 v := b.Control 20395 if v.Op != OpAMD64FlagLT_UGT { 20396 break 20397 } 20398 yes := b.Succs[0] 20399 no := b.Succs[1] 20400 b.Kind = BlockFirst 20401 b.SetControl(nil) 20402 b.swapSuccessors() 20403 _ = no 20404 _ = yes 20405 return true 20406 } 20407 // match: (GE (FlagGT_ULT) yes no) 20408 // cond: 20409 // result: (First nil yes no) 20410 for { 20411 v := b.Control 20412 if v.Op != OpAMD64FlagGT_ULT { 20413 break 20414 } 20415 yes := b.Succs[0] 20416 no := b.Succs[1] 20417 b.Kind = BlockFirst 20418 b.SetControl(nil) 20419 _ = yes 20420 _ = no 20421 return true 20422 } 20423 // match: (GE (FlagGT_UGT) yes no) 20424 // cond: 20425 // result: (First nil yes no) 20426 for { 20427 v := b.Control 20428 if v.Op != OpAMD64FlagGT_UGT { 20429 break 20430 } 20431 yes := b.Succs[0] 20432 no := b.Succs[1] 20433 b.Kind = BlockFirst 20434 b.SetControl(nil) 20435 _ = yes 20436 _ = no 20437 return true 20438 } 20439 case BlockAMD64GT: 20440 // match: (GT (InvertFlags cmp) yes no) 20441 // cond: 20442 // result: (LT cmp yes no) 20443 for { 20444 v := b.Control 20445 if v.Op != OpAMD64InvertFlags { 20446 break 20447 } 20448 cmp := v.Args[0] 20449 yes := b.Succs[0] 20450 no := b.Succs[1] 20451 b.Kind = BlockAMD64LT 20452 b.SetControl(cmp) 20453 _ = yes 20454 _ = no 20455 return true 20456 } 20457 // match: (GT (FlagEQ) yes no) 20458 // cond: 20459 // result: (First nil no yes) 20460 for { 20461 v := b.Control 20462 if v.Op != OpAMD64FlagEQ { 20463 break 20464 } 20465 yes := b.Succs[0] 20466 no := b.Succs[1] 20467 b.Kind = BlockFirst 20468 b.SetControl(nil) 20469 b.swapSuccessors() 20470 _ = no 20471 _ = yes 20472 return true 20473 } 20474 // match: (GT (FlagLT_ULT) yes no) 20475 // cond: 20476 // result: (First nil no yes) 20477 for { 20478 v := b.Control 20479 if v.Op != OpAMD64FlagLT_ULT { 20480 break 20481 } 20482 yes := b.Succs[0] 20483 no := b.Succs[1] 20484 b.Kind = BlockFirst 20485 b.SetControl(nil) 20486 b.swapSuccessors() 20487 _ = no 20488 _ = yes 20489 return true 20490 } 20491 // match: (GT (FlagLT_UGT) yes no) 20492 // cond: 20493 // result: (First nil no yes) 20494 for { 20495 v := b.Control 20496 if v.Op != OpAMD64FlagLT_UGT { 20497 break 20498 } 20499 yes := b.Succs[0] 20500 no := b.Succs[1] 20501 b.Kind = BlockFirst 20502 b.SetControl(nil) 20503 b.swapSuccessors() 20504 _ = no 20505 _ = yes 20506 return true 20507 } 20508 // match: (GT (FlagGT_ULT) yes no) 20509 // cond: 20510 // result: (First nil yes no) 20511 for { 20512 v := b.Control 20513 if v.Op != OpAMD64FlagGT_ULT { 20514 break 20515 } 20516 yes := b.Succs[0] 20517 no := b.Succs[1] 20518 b.Kind = BlockFirst 20519 b.SetControl(nil) 20520 _ = yes 20521 _ = no 20522 return true 20523 } 20524 // match: (GT (FlagGT_UGT) yes no) 20525 // cond: 20526 // result: (First nil yes no) 20527 for { 20528 v := b.Control 20529 if v.Op != OpAMD64FlagGT_UGT { 20530 break 20531 } 20532 yes := b.Succs[0] 20533 no := b.Succs[1] 20534 b.Kind = BlockFirst 20535 b.SetControl(nil) 20536 _ = yes 20537 _ = no 20538 return true 20539 } 20540 case BlockIf: 20541 // match: (If (SETL cmp) yes no) 20542 // cond: 20543 // result: (LT cmp yes no) 20544 for { 20545 v := b.Control 20546 if v.Op != OpAMD64SETL { 20547 break 20548 } 20549 cmp := v.Args[0] 20550 yes := b.Succs[0] 20551 no := b.Succs[1] 20552 b.Kind = BlockAMD64LT 20553 b.SetControl(cmp) 20554 _ = yes 20555 _ = no 20556 return true 20557 } 20558 // match: (If (SETLE cmp) yes no) 20559 // cond: 20560 // result: (LE cmp yes no) 20561 for { 20562 v := b.Control 20563 if v.Op != OpAMD64SETLE { 20564 break 20565 } 20566 cmp := v.Args[0] 20567 yes := b.Succs[0] 20568 no := b.Succs[1] 20569 b.Kind = BlockAMD64LE 20570 b.SetControl(cmp) 20571 _ = yes 20572 _ = no 20573 return true 20574 } 20575 // match: (If (SETG cmp) yes no) 20576 // cond: 20577 // result: (GT cmp yes no) 20578 for { 20579 v := b.Control 20580 if v.Op != OpAMD64SETG { 20581 break 20582 } 20583 cmp := v.Args[0] 20584 yes := b.Succs[0] 20585 no := b.Succs[1] 20586 b.Kind = BlockAMD64GT 20587 b.SetControl(cmp) 20588 _ = yes 20589 _ = no 20590 return true 20591 } 20592 // match: (If (SETGE cmp) yes no) 20593 // cond: 20594 // result: (GE cmp yes no) 20595 for { 20596 v := b.Control 20597 if v.Op != OpAMD64SETGE { 20598 break 20599 } 20600 cmp := v.Args[0] 20601 yes := b.Succs[0] 20602 no := b.Succs[1] 20603 b.Kind = BlockAMD64GE 20604 b.SetControl(cmp) 20605 _ = yes 20606 _ = no 20607 return true 20608 } 20609 // match: (If (SETEQ cmp) yes no) 20610 // cond: 20611 // result: (EQ cmp yes no) 20612 for { 20613 v := b.Control 20614 if v.Op != OpAMD64SETEQ { 20615 break 20616 } 20617 cmp := v.Args[0] 20618 yes := b.Succs[0] 20619 no := b.Succs[1] 20620 b.Kind = BlockAMD64EQ 20621 b.SetControl(cmp) 20622 _ = yes 20623 _ = no 20624 return true 20625 } 20626 // match: (If (SETNE cmp) yes no) 20627 // cond: 20628 // result: (NE cmp yes no) 20629 for { 20630 v := b.Control 20631 if v.Op != OpAMD64SETNE { 20632 break 20633 } 20634 cmp := v.Args[0] 20635 yes := b.Succs[0] 20636 no := b.Succs[1] 20637 b.Kind = BlockAMD64NE 20638 b.SetControl(cmp) 20639 _ = yes 20640 _ = no 20641 return true 20642 } 20643 // match: (If (SETB cmp) yes no) 20644 // cond: 20645 // result: (ULT cmp yes no) 20646 for { 20647 v := b.Control 20648 if v.Op != OpAMD64SETB { 20649 break 20650 } 20651 cmp := v.Args[0] 20652 yes := b.Succs[0] 20653 no := b.Succs[1] 20654 b.Kind = BlockAMD64ULT 20655 b.SetControl(cmp) 20656 _ = yes 20657 _ = no 20658 return true 20659 } 20660 // match: (If (SETBE cmp) yes no) 20661 // cond: 20662 // result: (ULE cmp yes no) 20663 for { 20664 v := b.Control 20665 if v.Op != OpAMD64SETBE { 20666 break 20667 } 20668 cmp := v.Args[0] 20669 yes := b.Succs[0] 20670 no := b.Succs[1] 20671 b.Kind = BlockAMD64ULE 20672 b.SetControl(cmp) 20673 _ = yes 20674 _ = no 20675 return true 20676 } 20677 // match: (If (SETA cmp) yes no) 20678 // cond: 20679 // result: (UGT cmp yes no) 20680 for { 20681 v := b.Control 20682 if v.Op != OpAMD64SETA { 20683 break 20684 } 20685 cmp := v.Args[0] 20686 yes := b.Succs[0] 20687 no := b.Succs[1] 20688 b.Kind = BlockAMD64UGT 20689 b.SetControl(cmp) 20690 _ = yes 20691 _ = no 20692 return true 20693 } 20694 // match: (If (SETAE cmp) yes no) 20695 // cond: 20696 // result: (UGE cmp yes no) 20697 for { 20698 v := b.Control 20699 if v.Op != OpAMD64SETAE { 20700 break 20701 } 20702 cmp := v.Args[0] 20703 yes := b.Succs[0] 20704 no := b.Succs[1] 20705 b.Kind = BlockAMD64UGE 20706 b.SetControl(cmp) 20707 _ = yes 20708 _ = no 20709 return true 20710 } 20711 // match: (If (SETGF cmp) yes no) 20712 // cond: 20713 // result: (UGT cmp yes no) 20714 for { 20715 v := b.Control 20716 if v.Op != OpAMD64SETGF { 20717 break 20718 } 20719 cmp := v.Args[0] 20720 yes := b.Succs[0] 20721 no := b.Succs[1] 20722 b.Kind = BlockAMD64UGT 20723 b.SetControl(cmp) 20724 _ = yes 20725 _ = no 20726 return true 20727 } 20728 // match: (If (SETGEF cmp) yes no) 20729 // cond: 20730 // result: (UGE cmp yes no) 20731 for { 20732 v := b.Control 20733 if v.Op != OpAMD64SETGEF { 20734 break 20735 } 20736 cmp := v.Args[0] 20737 yes := b.Succs[0] 20738 no := b.Succs[1] 20739 b.Kind = BlockAMD64UGE 20740 b.SetControl(cmp) 20741 _ = yes 20742 _ = no 20743 return true 20744 } 20745 // match: (If (SETEQF cmp) yes no) 20746 // cond: 20747 // result: (EQF cmp yes no) 20748 for { 20749 v := b.Control 20750 if v.Op != OpAMD64SETEQF { 20751 break 20752 } 20753 cmp := v.Args[0] 20754 yes := b.Succs[0] 20755 no := b.Succs[1] 20756 b.Kind = BlockAMD64EQF 20757 b.SetControl(cmp) 20758 _ = yes 20759 _ = no 20760 return true 20761 } 20762 // match: (If (SETNEF cmp) yes no) 20763 // cond: 20764 // result: (NEF cmp yes no) 20765 for { 20766 v := b.Control 20767 if v.Op != OpAMD64SETNEF { 20768 break 20769 } 20770 cmp := v.Args[0] 20771 yes := b.Succs[0] 20772 no := b.Succs[1] 20773 b.Kind = BlockAMD64NEF 20774 b.SetControl(cmp) 20775 _ = yes 20776 _ = no 20777 return true 20778 } 20779 // match: (If cond yes no) 20780 // cond: 20781 // result: (NE (TESTB cond cond) yes no) 20782 for { 20783 v := b.Control 20784 _ = v 20785 cond := b.Control 20786 yes := b.Succs[0] 20787 no := b.Succs[1] 20788 b.Kind = BlockAMD64NE 20789 v0 := b.NewValue0(v.Line, OpAMD64TESTB, TypeFlags) 20790 v0.AddArg(cond) 20791 v0.AddArg(cond) 20792 b.SetControl(v0) 20793 _ = yes 20794 _ = no 20795 return true 20796 } 20797 case BlockAMD64LE: 20798 // match: (LE (InvertFlags cmp) yes no) 20799 // cond: 20800 // result: (GE cmp yes no) 20801 for { 20802 v := b.Control 20803 if v.Op != OpAMD64InvertFlags { 20804 break 20805 } 20806 cmp := v.Args[0] 20807 yes := b.Succs[0] 20808 no := b.Succs[1] 20809 b.Kind = BlockAMD64GE 20810 b.SetControl(cmp) 20811 _ = yes 20812 _ = no 20813 return true 20814 } 20815 // match: (LE (FlagEQ) yes no) 20816 // cond: 20817 // result: (First nil yes no) 20818 for { 20819 v := b.Control 20820 if v.Op != OpAMD64FlagEQ { 20821 break 20822 } 20823 yes := b.Succs[0] 20824 no := b.Succs[1] 20825 b.Kind = BlockFirst 20826 b.SetControl(nil) 20827 _ = yes 20828 _ = no 20829 return true 20830 } 20831 // match: (LE (FlagLT_ULT) yes no) 20832 // cond: 20833 // result: (First nil yes no) 20834 for { 20835 v := b.Control 20836 if v.Op != OpAMD64FlagLT_ULT { 20837 break 20838 } 20839 yes := b.Succs[0] 20840 no := b.Succs[1] 20841 b.Kind = BlockFirst 20842 b.SetControl(nil) 20843 _ = yes 20844 _ = no 20845 return true 20846 } 20847 // match: (LE (FlagLT_UGT) yes no) 20848 // cond: 20849 // result: (First nil yes no) 20850 for { 20851 v := b.Control 20852 if v.Op != OpAMD64FlagLT_UGT { 20853 break 20854 } 20855 yes := b.Succs[0] 20856 no := b.Succs[1] 20857 b.Kind = BlockFirst 20858 b.SetControl(nil) 20859 _ = yes 20860 _ = no 20861 return true 20862 } 20863 // match: (LE (FlagGT_ULT) yes no) 20864 // cond: 20865 // result: (First nil no yes) 20866 for { 20867 v := b.Control 20868 if v.Op != OpAMD64FlagGT_ULT { 20869 break 20870 } 20871 yes := b.Succs[0] 20872 no := b.Succs[1] 20873 b.Kind = BlockFirst 20874 b.SetControl(nil) 20875 b.swapSuccessors() 20876 _ = no 20877 _ = yes 20878 return true 20879 } 20880 // match: (LE (FlagGT_UGT) yes no) 20881 // cond: 20882 // result: (First nil no yes) 20883 for { 20884 v := b.Control 20885 if v.Op != OpAMD64FlagGT_UGT { 20886 break 20887 } 20888 yes := b.Succs[0] 20889 no := b.Succs[1] 20890 b.Kind = BlockFirst 20891 b.SetControl(nil) 20892 b.swapSuccessors() 20893 _ = no 20894 _ = yes 20895 return true 20896 } 20897 case BlockAMD64LT: 20898 // match: (LT (InvertFlags cmp) yes no) 20899 // cond: 20900 // result: (GT cmp yes no) 20901 for { 20902 v := b.Control 20903 if v.Op != OpAMD64InvertFlags { 20904 break 20905 } 20906 cmp := v.Args[0] 20907 yes := b.Succs[0] 20908 no := b.Succs[1] 20909 b.Kind = BlockAMD64GT 20910 b.SetControl(cmp) 20911 _ = yes 20912 _ = no 20913 return true 20914 } 20915 // match: (LT (FlagEQ) yes no) 20916 // cond: 20917 // result: (First nil no yes) 20918 for { 20919 v := b.Control 20920 if v.Op != OpAMD64FlagEQ { 20921 break 20922 } 20923 yes := b.Succs[0] 20924 no := b.Succs[1] 20925 b.Kind = BlockFirst 20926 b.SetControl(nil) 20927 b.swapSuccessors() 20928 _ = no 20929 _ = yes 20930 return true 20931 } 20932 // match: (LT (FlagLT_ULT) yes no) 20933 // cond: 20934 // result: (First nil yes no) 20935 for { 20936 v := b.Control 20937 if v.Op != OpAMD64FlagLT_ULT { 20938 break 20939 } 20940 yes := b.Succs[0] 20941 no := b.Succs[1] 20942 b.Kind = BlockFirst 20943 b.SetControl(nil) 20944 _ = yes 20945 _ = no 20946 return true 20947 } 20948 // match: (LT (FlagLT_UGT) yes no) 20949 // cond: 20950 // result: (First nil yes no) 20951 for { 20952 v := b.Control 20953 if v.Op != OpAMD64FlagLT_UGT { 20954 break 20955 } 20956 yes := b.Succs[0] 20957 no := b.Succs[1] 20958 b.Kind = BlockFirst 20959 b.SetControl(nil) 20960 _ = yes 20961 _ = no 20962 return true 20963 } 20964 // match: (LT (FlagGT_ULT) yes no) 20965 // cond: 20966 // result: (First nil no yes) 20967 for { 20968 v := b.Control 20969 if v.Op != OpAMD64FlagGT_ULT { 20970 break 20971 } 20972 yes := b.Succs[0] 20973 no := b.Succs[1] 20974 b.Kind = BlockFirst 20975 b.SetControl(nil) 20976 b.swapSuccessors() 20977 _ = no 20978 _ = yes 20979 return true 20980 } 20981 // match: (LT (FlagGT_UGT) yes no) 20982 // cond: 20983 // result: (First nil no yes) 20984 for { 20985 v := b.Control 20986 if v.Op != OpAMD64FlagGT_UGT { 20987 break 20988 } 20989 yes := b.Succs[0] 20990 no := b.Succs[1] 20991 b.Kind = BlockFirst 20992 b.SetControl(nil) 20993 b.swapSuccessors() 20994 _ = no 20995 _ = yes 20996 return true 20997 } 20998 case BlockAMD64NE: 20999 // match: (NE (TESTB (SETL cmp) (SETL cmp)) yes no) 21000 // cond: 21001 // result: (LT cmp yes no) 21002 for { 21003 v := b.Control 21004 if v.Op != OpAMD64TESTB { 21005 break 21006 } 21007 v_0 := v.Args[0] 21008 if v_0.Op != OpAMD64SETL { 21009 break 21010 } 21011 cmp := v_0.Args[0] 21012 v_1 := v.Args[1] 21013 if v_1.Op != OpAMD64SETL { 21014 break 21015 } 21016 if cmp != v_1.Args[0] { 21017 break 21018 } 21019 yes := b.Succs[0] 21020 no := b.Succs[1] 21021 b.Kind = BlockAMD64LT 21022 b.SetControl(cmp) 21023 _ = yes 21024 _ = no 21025 return true 21026 } 21027 // match: (NE (TESTB (SETLE cmp) (SETLE cmp)) yes no) 21028 // cond: 21029 // result: (LE cmp yes no) 21030 for { 21031 v := b.Control 21032 if v.Op != OpAMD64TESTB { 21033 break 21034 } 21035 v_0 := v.Args[0] 21036 if v_0.Op != OpAMD64SETLE { 21037 break 21038 } 21039 cmp := v_0.Args[0] 21040 v_1 := v.Args[1] 21041 if v_1.Op != OpAMD64SETLE { 21042 break 21043 } 21044 if cmp != v_1.Args[0] { 21045 break 21046 } 21047 yes := b.Succs[0] 21048 no := b.Succs[1] 21049 b.Kind = BlockAMD64LE 21050 b.SetControl(cmp) 21051 _ = yes 21052 _ = no 21053 return true 21054 } 21055 // match: (NE (TESTB (SETG cmp) (SETG cmp)) yes no) 21056 // cond: 21057 // result: (GT cmp yes no) 21058 for { 21059 v := b.Control 21060 if v.Op != OpAMD64TESTB { 21061 break 21062 } 21063 v_0 := v.Args[0] 21064 if v_0.Op != OpAMD64SETG { 21065 break 21066 } 21067 cmp := v_0.Args[0] 21068 v_1 := v.Args[1] 21069 if v_1.Op != OpAMD64SETG { 21070 break 21071 } 21072 if cmp != v_1.Args[0] { 21073 break 21074 } 21075 yes := b.Succs[0] 21076 no := b.Succs[1] 21077 b.Kind = BlockAMD64GT 21078 b.SetControl(cmp) 21079 _ = yes 21080 _ = no 21081 return true 21082 } 21083 // match: (NE (TESTB (SETGE cmp) (SETGE cmp)) yes no) 21084 // cond: 21085 // result: (GE cmp yes no) 21086 for { 21087 v := b.Control 21088 if v.Op != OpAMD64TESTB { 21089 break 21090 } 21091 v_0 := v.Args[0] 21092 if v_0.Op != OpAMD64SETGE { 21093 break 21094 } 21095 cmp := v_0.Args[0] 21096 v_1 := v.Args[1] 21097 if v_1.Op != OpAMD64SETGE { 21098 break 21099 } 21100 if cmp != v_1.Args[0] { 21101 break 21102 } 21103 yes := b.Succs[0] 21104 no := b.Succs[1] 21105 b.Kind = BlockAMD64GE 21106 b.SetControl(cmp) 21107 _ = yes 21108 _ = no 21109 return true 21110 } 21111 // match: (NE (TESTB (SETEQ cmp) (SETEQ cmp)) yes no) 21112 // cond: 21113 // result: (EQ cmp yes no) 21114 for { 21115 v := b.Control 21116 if v.Op != OpAMD64TESTB { 21117 break 21118 } 21119 v_0 := v.Args[0] 21120 if v_0.Op != OpAMD64SETEQ { 21121 break 21122 } 21123 cmp := v_0.Args[0] 21124 v_1 := v.Args[1] 21125 if v_1.Op != OpAMD64SETEQ { 21126 break 21127 } 21128 if cmp != v_1.Args[0] { 21129 break 21130 } 21131 yes := b.Succs[0] 21132 no := b.Succs[1] 21133 b.Kind = BlockAMD64EQ 21134 b.SetControl(cmp) 21135 _ = yes 21136 _ = no 21137 return true 21138 } 21139 // match: (NE (TESTB (SETNE cmp) (SETNE cmp)) yes no) 21140 // cond: 21141 // result: (NE cmp yes no) 21142 for { 21143 v := b.Control 21144 if v.Op != OpAMD64TESTB { 21145 break 21146 } 21147 v_0 := v.Args[0] 21148 if v_0.Op != OpAMD64SETNE { 21149 break 21150 } 21151 cmp := v_0.Args[0] 21152 v_1 := v.Args[1] 21153 if v_1.Op != OpAMD64SETNE { 21154 break 21155 } 21156 if cmp != v_1.Args[0] { 21157 break 21158 } 21159 yes := b.Succs[0] 21160 no := b.Succs[1] 21161 b.Kind = BlockAMD64NE 21162 b.SetControl(cmp) 21163 _ = yes 21164 _ = no 21165 return true 21166 } 21167 // match: (NE (TESTB (SETB cmp) (SETB cmp)) yes no) 21168 // cond: 21169 // result: (ULT cmp yes no) 21170 for { 21171 v := b.Control 21172 if v.Op != OpAMD64TESTB { 21173 break 21174 } 21175 v_0 := v.Args[0] 21176 if v_0.Op != OpAMD64SETB { 21177 break 21178 } 21179 cmp := v_0.Args[0] 21180 v_1 := v.Args[1] 21181 if v_1.Op != OpAMD64SETB { 21182 break 21183 } 21184 if cmp != v_1.Args[0] { 21185 break 21186 } 21187 yes := b.Succs[0] 21188 no := b.Succs[1] 21189 b.Kind = BlockAMD64ULT 21190 b.SetControl(cmp) 21191 _ = yes 21192 _ = no 21193 return true 21194 } 21195 // match: (NE (TESTB (SETBE cmp) (SETBE cmp)) yes no) 21196 // cond: 21197 // result: (ULE cmp yes no) 21198 for { 21199 v := b.Control 21200 if v.Op != OpAMD64TESTB { 21201 break 21202 } 21203 v_0 := v.Args[0] 21204 if v_0.Op != OpAMD64SETBE { 21205 break 21206 } 21207 cmp := v_0.Args[0] 21208 v_1 := v.Args[1] 21209 if v_1.Op != OpAMD64SETBE { 21210 break 21211 } 21212 if cmp != v_1.Args[0] { 21213 break 21214 } 21215 yes := b.Succs[0] 21216 no := b.Succs[1] 21217 b.Kind = BlockAMD64ULE 21218 b.SetControl(cmp) 21219 _ = yes 21220 _ = no 21221 return true 21222 } 21223 // match: (NE (TESTB (SETA cmp) (SETA cmp)) yes no) 21224 // cond: 21225 // result: (UGT cmp yes no) 21226 for { 21227 v := b.Control 21228 if v.Op != OpAMD64TESTB { 21229 break 21230 } 21231 v_0 := v.Args[0] 21232 if v_0.Op != OpAMD64SETA { 21233 break 21234 } 21235 cmp := v_0.Args[0] 21236 v_1 := v.Args[1] 21237 if v_1.Op != OpAMD64SETA { 21238 break 21239 } 21240 if cmp != v_1.Args[0] { 21241 break 21242 } 21243 yes := b.Succs[0] 21244 no := b.Succs[1] 21245 b.Kind = BlockAMD64UGT 21246 b.SetControl(cmp) 21247 _ = yes 21248 _ = no 21249 return true 21250 } 21251 // match: (NE (TESTB (SETAE cmp) (SETAE cmp)) yes no) 21252 // cond: 21253 // result: (UGE cmp yes no) 21254 for { 21255 v := b.Control 21256 if v.Op != OpAMD64TESTB { 21257 break 21258 } 21259 v_0 := v.Args[0] 21260 if v_0.Op != OpAMD64SETAE { 21261 break 21262 } 21263 cmp := v_0.Args[0] 21264 v_1 := v.Args[1] 21265 if v_1.Op != OpAMD64SETAE { 21266 break 21267 } 21268 if cmp != v_1.Args[0] { 21269 break 21270 } 21271 yes := b.Succs[0] 21272 no := b.Succs[1] 21273 b.Kind = BlockAMD64UGE 21274 b.SetControl(cmp) 21275 _ = yes 21276 _ = no 21277 return true 21278 } 21279 // match: (NE (TESTB (SETGF cmp) (SETGF cmp)) yes no) 21280 // cond: 21281 // result: (UGT cmp yes no) 21282 for { 21283 v := b.Control 21284 if v.Op != OpAMD64TESTB { 21285 break 21286 } 21287 v_0 := v.Args[0] 21288 if v_0.Op != OpAMD64SETGF { 21289 break 21290 } 21291 cmp := v_0.Args[0] 21292 v_1 := v.Args[1] 21293 if v_1.Op != OpAMD64SETGF { 21294 break 21295 } 21296 if cmp != v_1.Args[0] { 21297 break 21298 } 21299 yes := b.Succs[0] 21300 no := b.Succs[1] 21301 b.Kind = BlockAMD64UGT 21302 b.SetControl(cmp) 21303 _ = yes 21304 _ = no 21305 return true 21306 } 21307 // match: (NE (TESTB (SETGEF cmp) (SETGEF cmp)) yes no) 21308 // cond: 21309 // result: (UGE cmp yes no) 21310 for { 21311 v := b.Control 21312 if v.Op != OpAMD64TESTB { 21313 break 21314 } 21315 v_0 := v.Args[0] 21316 if v_0.Op != OpAMD64SETGEF { 21317 break 21318 } 21319 cmp := v_0.Args[0] 21320 v_1 := v.Args[1] 21321 if v_1.Op != OpAMD64SETGEF { 21322 break 21323 } 21324 if cmp != v_1.Args[0] { 21325 break 21326 } 21327 yes := b.Succs[0] 21328 no := b.Succs[1] 21329 b.Kind = BlockAMD64UGE 21330 b.SetControl(cmp) 21331 _ = yes 21332 _ = no 21333 return true 21334 } 21335 // match: (NE (TESTB (SETEQF cmp) (SETEQF cmp)) yes no) 21336 // cond: 21337 // result: (EQF cmp yes no) 21338 for { 21339 v := b.Control 21340 if v.Op != OpAMD64TESTB { 21341 break 21342 } 21343 v_0 := v.Args[0] 21344 if v_0.Op != OpAMD64SETEQF { 21345 break 21346 } 21347 cmp := v_0.Args[0] 21348 v_1 := v.Args[1] 21349 if v_1.Op != OpAMD64SETEQF { 21350 break 21351 } 21352 if cmp != v_1.Args[0] { 21353 break 21354 } 21355 yes := b.Succs[0] 21356 no := b.Succs[1] 21357 b.Kind = BlockAMD64EQF 21358 b.SetControl(cmp) 21359 _ = yes 21360 _ = no 21361 return true 21362 } 21363 // match: (NE (TESTB (SETNEF cmp) (SETNEF cmp)) yes no) 21364 // cond: 21365 // result: (NEF cmp yes no) 21366 for { 21367 v := b.Control 21368 if v.Op != OpAMD64TESTB { 21369 break 21370 } 21371 v_0 := v.Args[0] 21372 if v_0.Op != OpAMD64SETNEF { 21373 break 21374 } 21375 cmp := v_0.Args[0] 21376 v_1 := v.Args[1] 21377 if v_1.Op != OpAMD64SETNEF { 21378 break 21379 } 21380 if cmp != v_1.Args[0] { 21381 break 21382 } 21383 yes := b.Succs[0] 21384 no := b.Succs[1] 21385 b.Kind = BlockAMD64NEF 21386 b.SetControl(cmp) 21387 _ = yes 21388 _ = no 21389 return true 21390 } 21391 // match: (NE (InvertFlags cmp) yes no) 21392 // cond: 21393 // result: (NE cmp yes no) 21394 for { 21395 v := b.Control 21396 if v.Op != OpAMD64InvertFlags { 21397 break 21398 } 21399 cmp := v.Args[0] 21400 yes := b.Succs[0] 21401 no := b.Succs[1] 21402 b.Kind = BlockAMD64NE 21403 b.SetControl(cmp) 21404 _ = yes 21405 _ = no 21406 return true 21407 } 21408 // match: (NE (FlagEQ) yes no) 21409 // cond: 21410 // result: (First nil no yes) 21411 for { 21412 v := b.Control 21413 if v.Op != OpAMD64FlagEQ { 21414 break 21415 } 21416 yes := b.Succs[0] 21417 no := b.Succs[1] 21418 b.Kind = BlockFirst 21419 b.SetControl(nil) 21420 b.swapSuccessors() 21421 _ = no 21422 _ = yes 21423 return true 21424 } 21425 // match: (NE (FlagLT_ULT) yes no) 21426 // cond: 21427 // result: (First nil yes no) 21428 for { 21429 v := b.Control 21430 if v.Op != OpAMD64FlagLT_ULT { 21431 break 21432 } 21433 yes := b.Succs[0] 21434 no := b.Succs[1] 21435 b.Kind = BlockFirst 21436 b.SetControl(nil) 21437 _ = yes 21438 _ = no 21439 return true 21440 } 21441 // match: (NE (FlagLT_UGT) yes no) 21442 // cond: 21443 // result: (First nil yes no) 21444 for { 21445 v := b.Control 21446 if v.Op != OpAMD64FlagLT_UGT { 21447 break 21448 } 21449 yes := b.Succs[0] 21450 no := b.Succs[1] 21451 b.Kind = BlockFirst 21452 b.SetControl(nil) 21453 _ = yes 21454 _ = no 21455 return true 21456 } 21457 // match: (NE (FlagGT_ULT) yes no) 21458 // cond: 21459 // result: (First nil yes no) 21460 for { 21461 v := b.Control 21462 if v.Op != OpAMD64FlagGT_ULT { 21463 break 21464 } 21465 yes := b.Succs[0] 21466 no := b.Succs[1] 21467 b.Kind = BlockFirst 21468 b.SetControl(nil) 21469 _ = yes 21470 _ = no 21471 return true 21472 } 21473 // match: (NE (FlagGT_UGT) yes no) 21474 // cond: 21475 // result: (First nil yes no) 21476 for { 21477 v := b.Control 21478 if v.Op != OpAMD64FlagGT_UGT { 21479 break 21480 } 21481 yes := b.Succs[0] 21482 no := b.Succs[1] 21483 b.Kind = BlockFirst 21484 b.SetControl(nil) 21485 _ = yes 21486 _ = no 21487 return true 21488 } 21489 case BlockAMD64UGE: 21490 // match: (UGE (InvertFlags cmp) yes no) 21491 // cond: 21492 // result: (ULE cmp yes no) 21493 for { 21494 v := b.Control 21495 if v.Op != OpAMD64InvertFlags { 21496 break 21497 } 21498 cmp := v.Args[0] 21499 yes := b.Succs[0] 21500 no := b.Succs[1] 21501 b.Kind = BlockAMD64ULE 21502 b.SetControl(cmp) 21503 _ = yes 21504 _ = no 21505 return true 21506 } 21507 // match: (UGE (FlagEQ) yes no) 21508 // cond: 21509 // result: (First nil yes no) 21510 for { 21511 v := b.Control 21512 if v.Op != OpAMD64FlagEQ { 21513 break 21514 } 21515 yes := b.Succs[0] 21516 no := b.Succs[1] 21517 b.Kind = BlockFirst 21518 b.SetControl(nil) 21519 _ = yes 21520 _ = no 21521 return true 21522 } 21523 // match: (UGE (FlagLT_ULT) yes no) 21524 // cond: 21525 // result: (First nil no yes) 21526 for { 21527 v := b.Control 21528 if v.Op != OpAMD64FlagLT_ULT { 21529 break 21530 } 21531 yes := b.Succs[0] 21532 no := b.Succs[1] 21533 b.Kind = BlockFirst 21534 b.SetControl(nil) 21535 b.swapSuccessors() 21536 _ = no 21537 _ = yes 21538 return true 21539 } 21540 // match: (UGE (FlagLT_UGT) yes no) 21541 // cond: 21542 // result: (First nil yes no) 21543 for { 21544 v := b.Control 21545 if v.Op != OpAMD64FlagLT_UGT { 21546 break 21547 } 21548 yes := b.Succs[0] 21549 no := b.Succs[1] 21550 b.Kind = BlockFirst 21551 b.SetControl(nil) 21552 _ = yes 21553 _ = no 21554 return true 21555 } 21556 // match: (UGE (FlagGT_ULT) yes no) 21557 // cond: 21558 // result: (First nil no yes) 21559 for { 21560 v := b.Control 21561 if v.Op != OpAMD64FlagGT_ULT { 21562 break 21563 } 21564 yes := b.Succs[0] 21565 no := b.Succs[1] 21566 b.Kind = BlockFirst 21567 b.SetControl(nil) 21568 b.swapSuccessors() 21569 _ = no 21570 _ = yes 21571 return true 21572 } 21573 // match: (UGE (FlagGT_UGT) yes no) 21574 // cond: 21575 // result: (First nil yes no) 21576 for { 21577 v := b.Control 21578 if v.Op != OpAMD64FlagGT_UGT { 21579 break 21580 } 21581 yes := b.Succs[0] 21582 no := b.Succs[1] 21583 b.Kind = BlockFirst 21584 b.SetControl(nil) 21585 _ = yes 21586 _ = no 21587 return true 21588 } 21589 case BlockAMD64UGT: 21590 // match: (UGT (InvertFlags cmp) yes no) 21591 // cond: 21592 // result: (ULT cmp yes no) 21593 for { 21594 v := b.Control 21595 if v.Op != OpAMD64InvertFlags { 21596 break 21597 } 21598 cmp := v.Args[0] 21599 yes := b.Succs[0] 21600 no := b.Succs[1] 21601 b.Kind = BlockAMD64ULT 21602 b.SetControl(cmp) 21603 _ = yes 21604 _ = no 21605 return true 21606 } 21607 // match: (UGT (FlagEQ) yes no) 21608 // cond: 21609 // result: (First nil no yes) 21610 for { 21611 v := b.Control 21612 if v.Op != OpAMD64FlagEQ { 21613 break 21614 } 21615 yes := b.Succs[0] 21616 no := b.Succs[1] 21617 b.Kind = BlockFirst 21618 b.SetControl(nil) 21619 b.swapSuccessors() 21620 _ = no 21621 _ = yes 21622 return true 21623 } 21624 // match: (UGT (FlagLT_ULT) yes no) 21625 // cond: 21626 // result: (First nil no yes) 21627 for { 21628 v := b.Control 21629 if v.Op != OpAMD64FlagLT_ULT { 21630 break 21631 } 21632 yes := b.Succs[0] 21633 no := b.Succs[1] 21634 b.Kind = BlockFirst 21635 b.SetControl(nil) 21636 b.swapSuccessors() 21637 _ = no 21638 _ = yes 21639 return true 21640 } 21641 // match: (UGT (FlagLT_UGT) yes no) 21642 // cond: 21643 // result: (First nil yes no) 21644 for { 21645 v := b.Control 21646 if v.Op != OpAMD64FlagLT_UGT { 21647 break 21648 } 21649 yes := b.Succs[0] 21650 no := b.Succs[1] 21651 b.Kind = BlockFirst 21652 b.SetControl(nil) 21653 _ = yes 21654 _ = no 21655 return true 21656 } 21657 // match: (UGT (FlagGT_ULT) yes no) 21658 // cond: 21659 // result: (First nil no yes) 21660 for { 21661 v := b.Control 21662 if v.Op != OpAMD64FlagGT_ULT { 21663 break 21664 } 21665 yes := b.Succs[0] 21666 no := b.Succs[1] 21667 b.Kind = BlockFirst 21668 b.SetControl(nil) 21669 b.swapSuccessors() 21670 _ = no 21671 _ = yes 21672 return true 21673 } 21674 // match: (UGT (FlagGT_UGT) yes no) 21675 // cond: 21676 // result: (First nil yes no) 21677 for { 21678 v := b.Control 21679 if v.Op != OpAMD64FlagGT_UGT { 21680 break 21681 } 21682 yes := b.Succs[0] 21683 no := b.Succs[1] 21684 b.Kind = BlockFirst 21685 b.SetControl(nil) 21686 _ = yes 21687 _ = no 21688 return true 21689 } 21690 case BlockAMD64ULE: 21691 // match: (ULE (InvertFlags cmp) yes no) 21692 // cond: 21693 // result: (UGE cmp yes no) 21694 for { 21695 v := b.Control 21696 if v.Op != OpAMD64InvertFlags { 21697 break 21698 } 21699 cmp := v.Args[0] 21700 yes := b.Succs[0] 21701 no := b.Succs[1] 21702 b.Kind = BlockAMD64UGE 21703 b.SetControl(cmp) 21704 _ = yes 21705 _ = no 21706 return true 21707 } 21708 // match: (ULE (FlagEQ) yes no) 21709 // cond: 21710 // result: (First nil yes no) 21711 for { 21712 v := b.Control 21713 if v.Op != OpAMD64FlagEQ { 21714 break 21715 } 21716 yes := b.Succs[0] 21717 no := b.Succs[1] 21718 b.Kind = BlockFirst 21719 b.SetControl(nil) 21720 _ = yes 21721 _ = no 21722 return true 21723 } 21724 // match: (ULE (FlagLT_ULT) yes no) 21725 // cond: 21726 // result: (First nil yes no) 21727 for { 21728 v := b.Control 21729 if v.Op != OpAMD64FlagLT_ULT { 21730 break 21731 } 21732 yes := b.Succs[0] 21733 no := b.Succs[1] 21734 b.Kind = BlockFirst 21735 b.SetControl(nil) 21736 _ = yes 21737 _ = no 21738 return true 21739 } 21740 // match: (ULE (FlagLT_UGT) yes no) 21741 // cond: 21742 // result: (First nil no yes) 21743 for { 21744 v := b.Control 21745 if v.Op != OpAMD64FlagLT_UGT { 21746 break 21747 } 21748 yes := b.Succs[0] 21749 no := b.Succs[1] 21750 b.Kind = BlockFirst 21751 b.SetControl(nil) 21752 b.swapSuccessors() 21753 _ = no 21754 _ = yes 21755 return true 21756 } 21757 // match: (ULE (FlagGT_ULT) yes no) 21758 // cond: 21759 // result: (First nil yes no) 21760 for { 21761 v := b.Control 21762 if v.Op != OpAMD64FlagGT_ULT { 21763 break 21764 } 21765 yes := b.Succs[0] 21766 no := b.Succs[1] 21767 b.Kind = BlockFirst 21768 b.SetControl(nil) 21769 _ = yes 21770 _ = no 21771 return true 21772 } 21773 // match: (ULE (FlagGT_UGT) yes no) 21774 // cond: 21775 // result: (First nil no yes) 21776 for { 21777 v := b.Control 21778 if v.Op != OpAMD64FlagGT_UGT { 21779 break 21780 } 21781 yes := b.Succs[0] 21782 no := b.Succs[1] 21783 b.Kind = BlockFirst 21784 b.SetControl(nil) 21785 b.swapSuccessors() 21786 _ = no 21787 _ = yes 21788 return true 21789 } 21790 case BlockAMD64ULT: 21791 // match: (ULT (InvertFlags cmp) yes no) 21792 // cond: 21793 // result: (UGT cmp yes no) 21794 for { 21795 v := b.Control 21796 if v.Op != OpAMD64InvertFlags { 21797 break 21798 } 21799 cmp := v.Args[0] 21800 yes := b.Succs[0] 21801 no := b.Succs[1] 21802 b.Kind = BlockAMD64UGT 21803 b.SetControl(cmp) 21804 _ = yes 21805 _ = no 21806 return true 21807 } 21808 // match: (ULT (FlagEQ) yes no) 21809 // cond: 21810 // result: (First nil no yes) 21811 for { 21812 v := b.Control 21813 if v.Op != OpAMD64FlagEQ { 21814 break 21815 } 21816 yes := b.Succs[0] 21817 no := b.Succs[1] 21818 b.Kind = BlockFirst 21819 b.SetControl(nil) 21820 b.swapSuccessors() 21821 _ = no 21822 _ = yes 21823 return true 21824 } 21825 // match: (ULT (FlagLT_ULT) yes no) 21826 // cond: 21827 // result: (First nil yes no) 21828 for { 21829 v := b.Control 21830 if v.Op != OpAMD64FlagLT_ULT { 21831 break 21832 } 21833 yes := b.Succs[0] 21834 no := b.Succs[1] 21835 b.Kind = BlockFirst 21836 b.SetControl(nil) 21837 _ = yes 21838 _ = no 21839 return true 21840 } 21841 // match: (ULT (FlagLT_UGT) yes no) 21842 // cond: 21843 // result: (First nil no yes) 21844 for { 21845 v := b.Control 21846 if v.Op != OpAMD64FlagLT_UGT { 21847 break 21848 } 21849 yes := b.Succs[0] 21850 no := b.Succs[1] 21851 b.Kind = BlockFirst 21852 b.SetControl(nil) 21853 b.swapSuccessors() 21854 _ = no 21855 _ = yes 21856 return true 21857 } 21858 // match: (ULT (FlagGT_ULT) yes no) 21859 // cond: 21860 // result: (First nil yes no) 21861 for { 21862 v := b.Control 21863 if v.Op != OpAMD64FlagGT_ULT { 21864 break 21865 } 21866 yes := b.Succs[0] 21867 no := b.Succs[1] 21868 b.Kind = BlockFirst 21869 b.SetControl(nil) 21870 _ = yes 21871 _ = no 21872 return true 21873 } 21874 // match: (ULT (FlagGT_UGT) yes no) 21875 // cond: 21876 // result: (First nil no yes) 21877 for { 21878 v := b.Control 21879 if v.Op != OpAMD64FlagGT_UGT { 21880 break 21881 } 21882 yes := b.Succs[0] 21883 no := b.Succs[1] 21884 b.Kind = BlockFirst 21885 b.SetControl(nil) 21886 b.swapSuccessors() 21887 _ = no 21888 _ = yes 21889 return true 21890 } 21891 } 21892 return false 21893 }